From ac4e03fd61ecc377e00573e86551b996ecdccbc6 Mon Sep 17 00:00:00 2001 From: a11e99 <87661275+a11e99@users.noreply.github.com> Date: Tue, 20 Jul 2021 13:29:41 -0400 Subject: [PATCH] CFS-7074 - Terraform Provider AWS - QuickSight Integration (#24) * Rectify inconsistent vendoring Addresses error: go: inconsistent vendoring in /code: github.com/aws/aws-sdk-go@v1.35.8: is explicitly required in go.mod, but vendor/modules.txt indicates github.com/aws/aws-sdk-go@v1.29.24 To ignore the vendor directory, use -mod=readonly or -mod=mod. To sync the vendor directory, run: go mod vendor * Add AWS QuickSight Group Membership Cribbed from prior art implementation here: https://github.com/hashicorp/terraform-provider-aws/pull/11160 * Add AWS QuickSight IAM Policy Assignment Cribbed from prior art implementation here: https://github.com/hashicorp/terraform-provider-aws/pull/12279 * Add AWS QuickSight Namespaces --- aws/provider.go | 3 + ...esource_aws_quicksight_group_membership.go | 177 + ...ce_aws_quicksight_iam_policy_assignment.go | 272 + aws/resource_aws_quicksight_namespace.go | 206 + vendor/cloud.google.com/go/.gitignore | 11 + vendor/cloud.google.com/go/CHANGES.md | 1566 + vendor/cloud.google.com/go/CODE_OF_CONDUCT.md | 44 + vendor/cloud.google.com/go/CONTRIBUTING.md | 250 + vendor/cloud.google.com/go/README.md | 178 + vendor/cloud.google.com/go/RELEASING.md | 128 + .../go/compute/metadata/metadata.go | 54 +- vendor/cloud.google.com/go/doc.go | 100 + vendor/cloud.google.com/go/go.mod | 24 + vendor/cloud.google.com/go/go.sum | 484 + vendor/cloud.google.com/go/iam/iam.go | 76 +- .../go/internal/.repo-metadata-full.json | 770 + vendor/cloud.google.com/go/internal/README.md | 18 + .../go/internal/version/version.go | 2 +- vendor/cloud.google.com/go/storage/CHANGES.md | 83 + .../go/storage}/LICENSE | 0 vendor/cloud.google.com/go/storage/bucket.go | 160 +- vendor/cloud.google.com/go/storage/copy.go | 10 + vendor/cloud.google.com/go/storage/doc.go | 42 + vendor/cloud.google.com/go/storage/go.mod | 18 + vendor/cloud.google.com/go/storage/go.sum | 450 + vendor/cloud.google.com/go/storage/go110.go | 18 +- .../go/storage/go_mod_tidy_hack.go | 22 + vendor/cloud.google.com/go/storage/hmac.go | 129 +- vendor/cloud.google.com/go/storage/iam.go | 42 +- .../go/storage/post_policy_v4.go | 377 + vendor/cloud.google.com/go/storage/reader.go | 36 +- vendor/cloud.google.com/go/storage/storage.go | 321 +- vendor/cloud.google.com/go/storage/writer.go | 36 +- vendor/cloud.google.com/go/tools.go | 31 + vendor/github.com/agl/ed25519/ed25519.go | 127 - .../agl/ed25519/edwards25519/const.go | 1411 - .../agl/ed25519/edwards25519/edwards25519.go | 1773 - .../go-textseg/textseg/grapheme_clusters.go | 10308 +++-- .../github.com/aws/aws-sdk-go/aws/config.go | 9 +- .../aws-sdk-go/aws/corehandlers/handlers.go | 2 + .../aws-sdk-go/aws/credentials/credentials.go | 39 +- .../ec2rolecreds/ec2_role_provider.go | 20 +- .../aws/credentials/endpointcreds/provider.go | 11 +- .../shared_credentials_provider.go | 5 +- .../stscreds/assume_role_provider.go | 44 +- .../stscreds/web_identity_provider.go | 66 +- .../aws/aws-sdk-go/aws/ec2metadata/api.go | 69 +- .../aws/aws-sdk-go/aws/ec2metadata/service.go | 17 + .../aws/ec2metadata/token_provider.go | 3 +- .../aws/aws-sdk-go/aws/endpoints/decode.go | 2 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 4191 +- .../aws/aws-sdk-go/aws/endpoints/v3model.go | 16 +- .../aws/request/connection_reset_error.go | 3 +- .../aws/aws-sdk-go/aws/session/doc.go | 17 + .../aws/aws-sdk-go/aws/session/env_config.go | 10 + .../aws/aws-sdk-go/aws/session/session.go | 61 +- vendor/github.com/aws/aws-sdk-go/aws/types.go | 23 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../s3shared}/arn/accesspoint_arn.go | 15 +- .../internal => internal/s3shared}/arn/arn.go | 13 +- .../internal/s3shared/arn/outpost_arn.go | 126 + .../internal/s3shared/endpoint_errors.go | 189 + .../internal/s3shared/resource_request.go | 62 + .../internal/{ => s3shared}/s3err/error.go | 0 .../private/checksum/content_md5.go | 53 + .../eventstream/eventstreamapi/reader.go | 15 +- .../private/protocol/eventstream/header.go | 9 + .../private/protocol/eventstream/message.go | 14 + .../protocol/json/jsonutil/unmarshal.go | 36 +- .../aws-sdk-go/private/protocol/timestamp.go | 9 +- .../private/protocol/xml/xmlutil/build.go | 9 + .../private/protocol/xml/xmlutil/unmarshal.go | 8 + .../aws-sdk-go/service/accessanalyzer/api.go | 663 +- .../service/accessanalyzer/service.go | 2 +- .../aws/aws-sdk-go/service/acm/api.go | 463 +- .../aws/aws-sdk-go/service/acm/errors.go | 2 +- .../aws/aws-sdk-go/service/acm/service.go | 2 +- .../aws/aws-sdk-go/service/acmpca/api.go | 1866 +- .../aws/aws-sdk-go/service/acmpca/doc.go | 7 +- .../aws/aws-sdk-go/service/acmpca/errors.go | 27 +- .../aws/aws-sdk-go/service/acmpca/service.go | 2 +- .../aws/aws-sdk-go/service/amplify/api.go | 1766 +- .../aws/aws-sdk-go/service/amplify/doc.go | 9 +- .../aws/aws-sdk-go/service/amplify/errors.go | 17 +- .../aws/aws-sdk-go/service/amplify/service.go | 2 +- .../aws/aws-sdk-go/service/apigateway/api.go | 580 +- .../aws-sdk-go/service/apigateway/service.go | 2 +- .../aws-sdk-go/service/apigatewayv2/api.go | 1242 +- .../service/apigatewayv2/service.go | 2 +- .../service/applicationautoscaling/api.go | 1083 +- .../service/applicationautoscaling/doc.go | 15 +- .../service/applicationautoscaling/service.go | 2 +- .../service/applicationinsights/api.go | 472 +- .../service/applicationinsights/service.go | 2 +- .../aws/aws-sdk-go/service/appmesh/api.go | 9412 +++-- .../aws/aws-sdk-go/service/appmesh/doc.go | 2 +- .../aws/aws-sdk-go/service/appmesh/errors.go | 2 +- .../aws/aws-sdk-go/service/appmesh/service.go | 2 +- .../aws/aws-sdk-go/service/appstream/api.go | 797 +- .../aws-sdk-go/service/appstream/errors.go | 9 + .../aws-sdk-go/service/appstream/service.go | 2 +- .../aws/aws-sdk-go/service/appsync/api.go | 570 +- .../aws/aws-sdk-go/service/appsync/service.go | 2 +- .../aws/aws-sdk-go/service/athena/api.go | 2975 +- .../aws/aws-sdk-go/service/athena/errors.go | 11 + .../aws/aws-sdk-go/service/athena/service.go | 2 +- .../aws/aws-sdk-go/service/autoscaling/api.go | 1653 +- .../aws-sdk-go/service/autoscaling/errors.go | 17 +- .../aws-sdk-go/service/autoscaling/service.go | 2 +- .../service/autoscalingplans/api.go | 262 +- .../service/autoscalingplans/service.go | 2 +- .../aws/aws-sdk-go/service/backup/api.go | 1003 +- .../aws/aws-sdk-go/service/backup/service.go | 2 +- .../aws/aws-sdk-go/service/batch/api.go | 1345 +- .../aws/aws-sdk-go/service/batch/service.go | 2 +- .../aws/aws-sdk-go/service/budgets/api.go | 5209 ++- .../aws/aws-sdk-go/service/budgets/errors.go | 8 + .../aws/aws-sdk-go/service/budgets/service.go | 2 +- .../aws/aws-sdk-go/service/cloud9/api.go | 299 +- .../aws/aws-sdk-go/service/cloud9/errors.go | 7 + .../aws/aws-sdk-go/service/cloud9/service.go | 2 +- .../aws-sdk-go/service/cloudformation/api.go | 1134 +- .../service/cloudformation/service.go | 2 +- .../service/cloudformation/waiters.go | 66 + .../aws/aws-sdk-go/service/cloudfront/api.go | 19396 ++++++---- .../aws/aws-sdk-go/service/cloudfront/doc.go | 2 +- .../aws-sdk-go/service/cloudfront/errors.go | 169 +- .../aws-sdk-go/service/cloudfront/service.go | 4 +- .../aws/aws-sdk-go/service/cloudhsmv2/api.go | 204 +- .../aws-sdk-go/service/cloudhsmv2/errors.go | 3 + .../aws-sdk-go/service/cloudhsmv2/service.go | 2 +- .../aws/aws-sdk-go/service/cloudsearch/api.go | 109 + .../aws-sdk-go/service/cloudsearch/service.go | 2 +- .../aws/aws-sdk-go/service/cloudtrail/api.go | 961 +- .../aws-sdk-go/service/cloudtrail/service.go | 2 +- .../aws/aws-sdk-go/service/cloudwatch/api.go | 300 +- .../aws-sdk-go/service/cloudwatch/service.go | 2 +- .../service/cloudwatchevents/api.go | 652 +- .../service/cloudwatchevents/errors.go | 7 + .../service/cloudwatchevents/service.go | 2 +- .../aws-sdk-go/service/cloudwatchlogs/api.go | 1112 +- .../aws-sdk-go/service/cloudwatchlogs/doc.go | 12 +- .../service/cloudwatchlogs/errors.go | 2 +- .../service/cloudwatchlogs/service.go | 2 +- .../aws/aws-sdk-go/service/codebuild/api.go | 12134 ++++-- .../aws-sdk-go/service/codebuild/service.go | 2 +- .../aws/aws-sdk-go/service/codecommit/api.go | 4968 ++- .../aws/aws-sdk-go/service/codecommit/doc.go | 6 + .../aws-sdk-go/service/codecommit/errors.go | 33 +- .../aws-sdk-go/service/codecommit/service.go | 2 +- .../aws/aws-sdk-go/service/codedeploy/api.go | 3200 +- .../aws-sdk-go/service/codedeploy/errors.go | 12 +- .../aws-sdk-go/service/codedeploy/service.go | 2 +- .../aws-sdk-go/service/codepipeline/api.go | 908 +- .../service/codepipeline/service.go | 2 +- .../service/codestarnotifications/api.go | 230 +- .../service/codestarnotifications/service.go | 2 +- .../aws-sdk-go/service/cognitoidentity/api.go | 276 +- .../service/cognitoidentity/service.go | 2 +- .../service/cognitoidentityprovider/api.go | 1585 +- .../cognitoidentityprovider/service.go | 2 +- .../aws-sdk-go/service/configservice/api.go | 1728 +- .../service/configservice/service.go | 2 +- .../service/costandusagereportservice/api.go | 240 +- .../costandusagereportservice/service.go | 2 +- .../service/databasemigrationservice/api.go | 5019 ++- .../databasemigrationservice/errors.go | 22 + .../databasemigrationservice/service.go | 2 +- .../aws-sdk-go/service/dataexchange/api.go | 343 +- .../service/dataexchange/service.go | 2 +- .../aws-sdk-go/service/datapipeline/api.go | 130 +- .../service/datapipeline/service.go | 2 +- .../aws/aws-sdk-go/service/datasync/api.go | 1202 +- .../aws-sdk-go/service/datasync/service.go | 2 +- .../aws/aws-sdk-go/service/dax/api.go | 616 +- .../aws/aws-sdk-go/service/dax/service.go | 2 +- .../aws/aws-sdk-go/service/devicefarm/api.go | 625 +- .../aws-sdk-go/service/devicefarm/service.go | 2 +- .../aws-sdk-go/service/directconnect/api.go | 938 +- .../service/directconnect/service.go | 2 +- .../service/directoryservice/api.go | 919 +- .../service/directoryservice/service.go | 2 +- .../aws/aws-sdk-go/service/dlm/api.go | 178 +- .../aws/aws-sdk-go/service/dlm/service.go | 2 +- .../aws/aws-sdk-go/service/docdb/api.go | 430 +- .../aws/aws-sdk-go/service/docdb/service.go | 2 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 1234 +- .../aws/aws-sdk-go/service/dynamodb/errors.go | 6 +- .../aws-sdk-go/service/dynamodb/service.go | 2 +- .../aws/aws-sdk-go/service/ec2/api.go | 9651 ++++- .../aws-sdk-go/service/ec2/customizations.go | 6 +- .../aws/aws-sdk-go/service/ec2/service.go | 2 +- .../aws/aws-sdk-go/service/ecr/api.go | 1151 +- .../aws/aws-sdk-go/service/ecr/doc.go | 11 +- .../aws/aws-sdk-go/service/ecr/errors.go | 38 +- .../aws/aws-sdk-go/service/ecr/service.go | 2 +- .../aws/aws-sdk-go/service/ecs/api.go | 1697 +- .../aws/aws-sdk-go/service/ecs/service.go | 2 +- .../aws/aws-sdk-go/service/efs/api.go | 1195 +- .../aws/aws-sdk-go/service/efs/errors.go | 8 + .../aws/aws-sdk-go/service/efs/service.go | 2 +- .../aws/aws-sdk-go/service/eks/api.go | 725 +- .../aws/aws-sdk-go/service/eks/service.go | 2 +- .../aws/aws-sdk-go/service/elasticache/api.go | 15218 +++++--- .../aws-sdk-go/service/elasticache/errors.go | 91 + .../aws-sdk-go/service/elasticache/service.go | 2 +- .../service/elasticbeanstalk/api.go | 1437 +- .../service/elasticbeanstalk/doc.go | 4 +- .../service/elasticbeanstalk/service.go | 2 +- .../service/elasticbeanstalk/waiters.go | 163 + .../service/elasticsearchservice/api.go | 7275 +++- .../service/elasticsearchservice/errors.go | 40 +- .../service/elasticsearchservice/service.go | 2 +- .../service/elastictranscoder/api.go | 154 +- .../service/elastictranscoder/service.go | 2 +- .../aws/aws-sdk-go/service/elb/api.go | 83 +- .../aws/aws-sdk-go/service/elb/doc.go | 2 +- .../aws/aws-sdk-go/service/elb/service.go | 2 +- .../aws/aws-sdk-go/service/elbv2/api.go | 397 +- .../aws/aws-sdk-go/service/elbv2/errors.go | 6 + .../aws/aws-sdk-go/service/elbv2/service.go | 2 +- .../aws/aws-sdk-go/service/emr/api.go | 2744 +- .../aws/aws-sdk-go/service/emr/service.go | 2 +- .../aws/aws-sdk-go/service/firehose/api.go | 1584 +- .../aws-sdk-go/service/firehose/service.go | 2 +- .../aws/aws-sdk-go/service/fms/api.go | 3093 +- .../aws/aws-sdk-go/service/fms/errors.go | 9 +- .../aws/aws-sdk-go/service/fms/service.go | 2 +- .../aws-sdk-go/service/forecastservice/api.go | 1238 +- .../service/forecastservice/service.go | 2 +- .../aws/aws-sdk-go/service/fsx/api.go | 1834 +- .../aws/aws-sdk-go/service/fsx/service.go | 5 +- .../aws/aws-sdk-go/service/gamelift/api.go | 8894 ++++- .../aws/aws-sdk-go/service/gamelift/doc.go | 54 +- .../aws/aws-sdk-go/service/gamelift/errors.go | 9 + .../aws-sdk-go/service/gamelift/service.go | 2 +- .../aws/aws-sdk-go/service/glacier/api.go | 268 +- .../aws/aws-sdk-go/service/glacier/service.go | 2 +- .../service/globalaccelerator/api.go | 456 +- .../service/globalaccelerator/service.go | 2 +- .../aws/aws-sdk-go/service/glue/api.go | 5157 ++- .../aws/aws-sdk-go/service/glue/errors.go | 14 + .../aws/aws-sdk-go/service/glue/service.go | 2 +- .../aws/aws-sdk-go/service/greengrass/api.go | 585 +- .../aws-sdk-go/service/greengrass/service.go | 2 +- .../aws/aws-sdk-go/service/guardduty/api.go | 4093 +- .../aws/aws-sdk-go/service/guardduty/doc.go | 25 +- .../aws-sdk-go/service/guardduty/errors.go | 4 +- .../aws-sdk-go/service/guardduty/service.go | 2 +- .../aws/aws-sdk-go/service/iam/api.go | 608 +- .../aws/aws-sdk-go/service/iam/doc.go | 59 +- .../aws/aws-sdk-go/service/iam/errors.go | 3 +- .../aws/aws-sdk-go/service/iam/service.go | 2 +- .../aws-sdk-go/service/imagebuilder/api.go | 826 +- .../aws-sdk-go/service/imagebuilder/errors.go | 8 + .../service/imagebuilder/service.go | 2 +- .../aws/aws-sdk-go/service/inspector/api.go | 496 +- .../aws-sdk-go/service/inspector/service.go | 2 +- .../aws/aws-sdk-go/service/iot/api.go | 7484 +++- .../aws/aws-sdk-go/service/iot/doc.go | 8 + .../aws/aws-sdk-go/service/iot/service.go | 2 +- .../aws-sdk-go/service/iotanalytics/api.go | 223 +- .../service/iotanalytics/service.go | 2 +- .../aws/aws-sdk-go/service/iotevents/api.go | 1050 +- .../aws/aws-sdk-go/service/iotevents/doc.go | 4 +- .../aws-sdk-go/service/iotevents/service.go | 2 +- .../aws/aws-sdk-go/service/kafka/api.go | 2423 +- .../aws/aws-sdk-go/service/kafka/service.go | 2 +- .../aws/aws-sdk-go/service/kinesis/api.go | 879 +- .../aws/aws-sdk-go/service/kinesis/errors.go | 11 +- .../aws/aws-sdk-go/service/kinesis/service.go | 2 +- .../service/kinesisanalytics/api.go | 293 +- .../service/kinesisanalytics/service.go | 2 +- .../service/kinesisanalyticsv2/api.go | 983 +- .../service/kinesisanalyticsv2/doc.go | 8 +- .../service/kinesisanalyticsv2/errors.go | 6 +- .../service/kinesisanalyticsv2/service.go | 2 +- .../aws-sdk-go/service/kinesisvideo/api.go | 411 +- .../aws-sdk-go/service/kinesisvideo/errors.go | 2 +- .../service/kinesisvideo/service.go | 2 +- .../aws/aws-sdk-go/service/kms/api.go | 1228 +- .../aws/aws-sdk-go/service/kms/service.go | 2 +- .../aws-sdk-go/service/lakeformation/api.go | 401 +- .../service/lakeformation/service.go | 2 +- .../aws/aws-sdk-go/service/lambda/api.go | 1221 +- .../aws/aws-sdk-go/service/lambda/errors.go | 30 + .../aws/aws-sdk-go/service/lambda/service.go | 2 +- .../service/lexmodelbuildingservice/api.go | 623 +- .../lexmodelbuildingservice/service.go | 2 +- .../aws-sdk-go/service/licensemanager/api.go | 311 +- .../service/licensemanager/service.go | 2 +- .../aws/aws-sdk-go/service/lightsail/api.go | 16078 +++++--- .../aws/aws-sdk-go/service/lightsail/doc.go | 30 +- .../aws-sdk-go/service/lightsail/service.go | 2 +- .../aws/aws-sdk-go/service/macie/api.go | 219 +- .../aws/aws-sdk-go/service/macie/doc.go | 18 +- .../aws/aws-sdk-go/service/macie/service.go | 2 +- .../service/managedblockchain/api.go | 922 +- .../service/managedblockchain/service.go | 2 +- .../service/marketplacecatalog/api.go | 273 +- .../service/marketplacecatalog/doc.go | 4 +- .../service/marketplacecatalog/service.go | 2 +- .../aws-sdk-go/service/mediaconnect/api.go | 5993 ++- .../service/mediaconnect/service.go | 2 +- .../aws-sdk-go/service/mediaconvert/api.go | 7416 +++- .../service/mediaconvert/service.go | 2 +- .../aws/aws-sdk-go/service/medialive/api.go | 6315 ++- .../aws-sdk-go/service/medialive/service.go | 2 +- .../aws-sdk-go/service/medialive/waiters.go | 177 +- .../aws-sdk-go/service/mediapackage/api.go | 674 +- .../service/mediapackage/service.go | 2 +- .../aws/aws-sdk-go/service/mediastore/api.go | 795 +- .../aws-sdk-go/service/mediastore/service.go | 2 +- .../aws-sdk-go/service/mediastoredata/api.go | 113 +- .../service/mediastoredata/service.go | 2 +- .../aws/aws-sdk-go/service/mq/api.go | 636 +- .../aws/aws-sdk-go/service/mq/service.go | 2 +- .../aws/aws-sdk-go/service/neptune/api.go | 20 + .../aws/aws-sdk-go/service/neptune/service.go | 2 +- .../aws/aws-sdk-go/service/opsworks/api.go | 306 +- .../aws-sdk-go/service/opsworks/service.go | 2 +- .../aws-sdk-go/service/organizations/api.go | 8586 +++-- .../service/organizations/errors.go | 134 +- .../service/organizations/service.go | 2 +- .../aws/aws-sdk-go/service/personalize/api.go | 1275 +- .../aws-sdk-go/service/personalize/service.go | 2 +- .../aws/aws-sdk-go/service/pinpoint/api.go | 1489 +- .../aws/aws-sdk-go/service/pinpoint/errors.go | 7 + .../aws-sdk-go/service/pinpoint/service.go | 2 +- .../aws/aws-sdk-go/service/pricing/api.go | 117 +- .../aws/aws-sdk-go/service/pricing/service.go | 2 +- .../aws/aws-sdk-go/service/qldb/api.go | 1287 +- .../aws/aws-sdk-go/service/qldb/service.go | 2 +- .../aws/aws-sdk-go/service/quicksight/api.go | 31644 ++++++++++------ .../aws-sdk-go/service/quicksight/errors.go | 4 +- .../aws-sdk-go/service/quicksight/service.go | 2 +- .../aws/aws-sdk-go/service/ram/api.go | 756 +- .../aws/aws-sdk-go/service/ram/service.go | 2 +- .../aws/aws-sdk-go/service/rds/api.go | 2082 +- .../aws/aws-sdk-go/service/rds/errors.go | 8 + .../aws/aws-sdk-go/service/rds/service.go | 2 +- .../aws/aws-sdk-go/service/redshift/api.go | 1402 +- .../aws/aws-sdk-go/service/redshift/errors.go | 18 + .../aws-sdk-go/service/redshift/service.go | 2 +- .../aws-sdk-go/service/resourcegroups/api.go | 1701 +- .../aws-sdk-go/service/resourcegroups/doc.go | 2 +- .../service/resourcegroups/errors.go | 18 +- .../service/resourcegroups/service.go | 2 +- .../aws/aws-sdk-go/service/route53/api.go | 1056 +- .../aws/aws-sdk-go/service/route53/errors.go | 36 +- .../aws/aws-sdk-go/service/route53/service.go | 2 +- .../aws-sdk-go/service/route53resolver/api.go | 4343 ++- .../aws-sdk-go/service/route53resolver/doc.go | 71 +- .../service/route53resolver/errors.go | 10 +- .../service/route53resolver/service.go | 2 +- .../aws/aws-sdk-go/service/s3/api.go | 5523 ++- .../aws/aws-sdk-go/service/s3/body_hash.go | 49 +- .../aws-sdk-go/service/s3/customizations.go | 12 +- .../aws/aws-sdk-go/service/s3/doc_custom.go | 13 - .../aws/aws-sdk-go/service/s3/endpoint.go | 218 +- .../aws-sdk-go/service/s3/endpoint_builder.go | 177 + .../aws-sdk-go/service/s3/endpoint_errors.go | 151 - .../aws/aws-sdk-go/service/s3/errors.go | 2 +- .../aws/aws-sdk-go/service/s3/service.go | 2 +- .../aws/aws-sdk-go/service/s3/sse.go | 2 +- .../aws-sdk-go/service/s3/statusok_error.go | 16 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 40 +- .../aws/aws-sdk-go/service/s3control/api.go | 9110 ++++- .../service/s3control/customizations.go | 32 +- .../aws-sdk-go/service/s3control/endpoint.go | 216 + .../service/s3control/endpoint_builder.go | 152 + .../aws-sdk-go/service/s3control/errors.go | 20 + .../aws-sdk-go/service/s3control/service.go | 2 +- .../aws-sdk-go/service/s3control/validate.go | 44 + .../aws/aws-sdk-go/service/sagemaker/api.go | 5835 ++- .../aws/aws-sdk-go/service/sagemaker/doc.go | 6 + .../aws-sdk-go/service/sagemaker/service.go | 2 +- .../aws-sdk-go/service/secretsmanager/api.go | 1080 +- .../aws-sdk-go/service/secretsmanager/doc.go | 53 +- .../service/secretsmanager/errors.go | 7 + .../service/secretsmanager/service.go | 2 +- .../aws/aws-sdk-go/service/securityhub/api.go | 13275 ++++++- .../aws/aws-sdk-go/service/securityhub/doc.go | 6 + .../aws-sdk-go/service/securityhub/service.go | 2 +- .../serverlessapplicationrepository/api.go | 151 +- .../service.go | 2 +- .../aws-sdk-go/service/servicecatalog/api.go | 1308 +- .../service/servicecatalog/service.go | 2 +- .../service/servicediscovery/api.go | 1547 +- .../service/servicediscovery/errors.go | 49 +- .../service/servicediscovery/service.go | 2 +- .../aws-sdk-go/service/servicequotas/api.go | 372 +- .../service/servicequotas/service.go | 2 +- .../aws/aws-sdk-go/service/ses/api.go | 161 + .../aws/aws-sdk-go/service/ses/service.go | 2 +- .../aws/aws-sdk-go/service/sfn/api.go | 989 +- .../aws/aws-sdk-go/service/sfn/errors.go | 8 + .../aws/aws-sdk-go/service/sfn/service.go | 2 +- .../aws/aws-sdk-go/service/shield/api.go | 1059 +- .../aws/aws-sdk-go/service/shield/errors.go | 8 +- .../aws/aws-sdk-go/service/shield/service.go | 2 +- .../aws-sdk-go/service/simpledb/service.go | 2 +- .../aws/aws-sdk-go/service/sns/api.go | 171 +- .../aws/aws-sdk-go/service/sns/service.go | 2 +- .../aws/aws-sdk-go/service/sqs/api.go | 487 +- .../aws/aws-sdk-go/service/sqs/doc.go | 6 +- .../aws/aws-sdk-go/service/sqs/service.go | 2 +- .../aws/aws-sdk-go/service/ssm/api.go | 6710 +++- .../aws/aws-sdk-go/service/ssm/doc.go | 15 +- .../aws/aws-sdk-go/service/ssm/errors.go | 31 +- .../aws/aws-sdk-go/service/ssm/service.go | 2 +- .../aws/aws-sdk-go/service/ssm/waiters.go | 91 + .../aws-sdk-go/service/storagegateway/api.go | 2881 +- .../aws-sdk-go/service/storagegateway/doc.go | 14 +- .../service/storagegateway/service.go | 2 +- .../aws/aws-sdk-go/service/sts/api.go | 6 +- .../aws/aws-sdk-go/service/sts/doc.go | 86 +- .../aws/aws-sdk-go/service/sts/service.go | 2 +- .../aws/aws-sdk-go/service/swf/api.go | 524 +- .../aws/aws-sdk-go/service/swf/service.go | 2 +- .../aws/aws-sdk-go/service/transfer/api.go | 1877 +- .../aws/aws-sdk-go/service/transfer/doc.go | 29 +- .../aws/aws-sdk-go/service/transfer/errors.go | 19 +- .../aws-sdk-go/service/transfer/service.go | 4 +- .../aws/aws-sdk-go/service/waf/api.go | 2686 +- .../aws/aws-sdk-go/service/waf/doc.go | 26 +- .../aws/aws-sdk-go/service/waf/errors.go | 27 + .../aws/aws-sdk-go/service/waf/service.go | 2 +- .../aws/aws-sdk-go/service/wafregional/api.go | 1968 +- .../aws/aws-sdk-go/service/wafregional/doc.go | 28 +- .../aws-sdk-go/service/wafregional/errors.go | 27 + .../aws-sdk-go/service/wafregional/service.go | 2 +- .../aws/aws-sdk-go/service/wafv2/api.go | 2864 +- .../aws/aws-sdk-go/service/wafv2/doc.go | 27 +- .../aws/aws-sdk-go/service/wafv2/errors.go | 30 + .../aws/aws-sdk-go/service/wafv2/service.go | 2 +- .../aws/aws-sdk-go/service/worklink/api.go | 670 +- .../aws/aws-sdk-go/service/worklink/doc.go | 8 +- .../aws-sdk-go/service/worklink/service.go | 2 +- .../aws/aws-sdk-go/service/workmail/api.go | 3457 +- .../aws/aws-sdk-go/service/workmail/errors.go | 14 +- .../aws-sdk-go/service/workmail/service.go | 2 +- .../aws/aws-sdk-go/service/workspaces/api.go | 2749 +- .../aws-sdk-go/service/workspaces/service.go | 2 +- .../aws/aws-sdk-go/service/xray/api.go | 1089 +- .../aws/aws-sdk-go/service/xray/errors.go | 15 + .../aws/aws-sdk-go/service/xray/service.go | 2 +- vendor/github.com/beevik/etree/.travis.yml | 14 - vendor/github.com/beevik/etree/CONTRIBUTORS | 10 - vendor/github.com/beevik/etree/LICENSE | 24 - vendor/github.com/beevik/etree/README.md | 205 - .../github.com/beevik/etree/RELEASE_NOTES.md | 109 - vendor/github.com/beevik/etree/etree.go | 1453 - vendor/github.com/beevik/etree/helpers.go | 276 - vendor/github.com/beevik/etree/path.go | 582 - .../testmatchresourceattrcallexpr.go | 13 - .../resourcedatagetokexistscallexpr.go | 14 - .../resourceinforesourceonly.go | 35 - .../tfproviderlint/xpasses/XR001/README.md | 28 - .../tfproviderlint/xpasses/XR001/XR001.go | 43 - .../tfproviderlint/xpasses/XR002/README.md | 39 - .../tfproviderlint/xpasses/XR002/XR002.go | 43 - .../tfproviderlint/xpasses/XR003/README.md | 39 - .../tfproviderlint/xpasses/XR003/XR003.go | 43 - .../tfproviderlint/xpasses/XR004/README.md | 38 - .../tfproviderlint/xpasses/XR004/XR004.go | 108 - .../tfproviderlint/xpasses/XS001/README.md | 40 - .../tfproviderlint/xpasses/XS001/XS001.go | 58 - .../bflad/tfproviderlint/xpasses/checks.go | 21 - .../github.com/boombuler/barcode/.gitignore | 1 - vendor/github.com/boombuler/barcode/README.md | 53 - .../github.com/boombuler/barcode/barcode.go | 42 - vendor/github.com/boombuler/barcode/go.mod | 1 - .../boombuler/barcode/qr/alphanumeric.go | 66 - .../boombuler/barcode/qr/automatic.go | 23 - .../github.com/boombuler/barcode/qr/blocks.go | 59 - .../boombuler/barcode/qr/encoder.go | 416 - .../boombuler/barcode/qr/errorcorrection.go | 29 - .../boombuler/barcode/qr/numeric.go | 56 - .../github.com/boombuler/barcode/qr/qrcode.go | 166 - .../boombuler/barcode/qr/unicode.go | 27 - .../boombuler/barcode/qr/versioninfo.go | 310 - .../boombuler/barcode/scaledbarcode.go | 134 - .../boombuler/barcode/utils/base1dcode.go | 57 - .../boombuler/barcode/utils/bitlist.go | 119 - .../boombuler/barcode/utils/galoisfield.go | 65 - .../boombuler/barcode/utils/gfpoly.go | 103 - .../boombuler/barcode/utils/reedsolomon.go | 44 - .../boombuler/barcode/utils/runeint.go | 19 - vendor/github.com/gobwas/glob/match/any.go | 1 - vendor/github.com/gobwas/glob/match/list.go | 3 +- vendor/github.com/gobwas/glob/match/single.go | 3 +- .../gobwas/glob/syntax/ast/parser.go | 3 +- .../gobwas/glob/syntax/lexer/lexer.go | 3 +- vendor/github.com/golang/groupcache/LICENSE | 191 + .../github.com/golang/groupcache/lru/lru.go | 133 + .../protobuf/internal/gengogrpc/grpc.go | 398 + .../golang/protobuf/proto/buffer.go | 324 + .../github.com/golang/protobuf/proto/clone.go | 253 - .../golang/protobuf/proto/decode.go | 427 - .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 126 +- .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/encode.go | 203 - .../github.com/golang/protobuf/proto/equal.go | 301 - .../golang/protobuf/proto/extensions.go | 771 +- .../github.com/golang/protobuf/proto/lib.go | 965 - .../golang/protobuf/proto/message_set.go | 181 - .../golang/protobuf/proto/pointer_reflect.go | 360 - .../golang/protobuf/proto/pointer_unsafe.go | 313 - .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 323 + .../golang/protobuf/proto/table_marshal.go | 2776 -- .../golang/protobuf/proto/table_merge.go | 654 - .../golang/protobuf/proto/table_unmarshal.go | 2053 - .../github.com/golang/protobuf/proto/text.go | 843 - .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../golang/protobuf/proto/text_parser.go | 880 - .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../protoc-gen-go/descriptor/descriptor.pb.go | 3034 +- .../protoc-gen-go/descriptor/descriptor.proto | 883 - .../golang/protobuf/protoc-gen-go/main.go | 74 + .../github.com/golang/protobuf/ptypes/any.go | 214 +- .../golang/protobuf/ptypes/any/any.pb.go | 231 +- .../golang/protobuf/ptypes/any/any.proto | 154 - .../github.com/golang/protobuf/ptypes/doc.go | 37 +- .../golang/protobuf/ptypes/duration.go | 116 +- .../protobuf/ptypes/duration/duration.pb.go | 193 +- .../protobuf/ptypes/duration/duration.proto | 117 - .../golang/protobuf/ptypes/empty/empty.pb.go | 62 + .../golang/protobuf/ptypes/timestamp.go | 109 +- .../protobuf/ptypes/timestamp/timestamp.pb.go | 212 +- .../protobuf/ptypes/timestamp/timestamp.proto | 135 - vendor/github.com/golang/snappy/.gitignore | 16 - vendor/github.com/golang/snappy/AUTHORS | 15 - vendor/github.com/golang/snappy/CONTRIBUTORS | 37 - vendor/github.com/golang/snappy/README | 107 - vendor/github.com/golang/snappy/decode.go | 237 - .../github.com/golang/snappy/decode_amd64.go | 14 - .../github.com/golang/snappy/decode_amd64.s | 490 - .../github.com/golang/snappy/decode_other.go | 101 - vendor/github.com/golang/snappy/encode.go | 285 - .../github.com/golang/snappy/encode_amd64.go | 29 - .../github.com/golang/snappy/encode_amd64.s | 730 - .../github.com/golang/snappy/encode_other.go | 238 - vendor/github.com/golang/snappy/go.mod | 1 - vendor/github.com/golang/snappy/snappy.go | 98 - .../github.com/google/go-cmp/cmp/compare.go | 162 +- .../google/go-cmp/cmp/export_panic.go | 6 +- .../google/go-cmp/cmp/export_unsafe.go | 20 +- .../google/go-cmp/cmp/internal/diff/diff.go | 22 +- .../google/go-cmp/cmp/internal/value/name.go | 157 + .../cmp/internal/value/pointer_purego.go | 10 + .../cmp/internal/value/pointer_unsafe.go | 10 + .../github.com/google/go-cmp/cmp/options.go | 55 +- vendor/github.com/google/go-cmp/cmp/path.go | 78 +- vendor/github.com/google/go-cmp/cmp/report.go | 5 +- .../google/go-cmp/cmp/report_compare.go | 200 +- .../google/go-cmp/cmp/report_references.go | 264 + .../google/go-cmp/cmp/report_reflect.go | 239 +- .../google/go-cmp/cmp/report_slices.go | 135 +- .../google/go-cmp/cmp/report_text.go | 86 +- vendor/github.com/gookit/color/color_16.go | 6 +- .../github.com/gookit/color/color_windows.go | 2 +- .../{golang-lru => go-checkpoint}/LICENSE | 302 +- .../hashicorp/go-checkpoint/README.md | 22 + .../hashicorp/go-checkpoint/check.go | 368 + .../github.com/hashicorp/go-checkpoint/go.mod | 6 + .../github.com/hashicorp/go-checkpoint/go.sum | 4 + .../hashicorp/go-checkpoint/telemetry.go | 118 + .../hashicorp/go-checkpoint/versions.go | 90 + .../barcode => hashicorp/go-cty}/LICENSE | 4 +- .../hashicorp/go-cty/cty/capsule.go | 128 + .../hashicorp/go-cty/cty/capsule_ops.go | 132 + .../hashicorp/go-cty/cty/collection.go | 34 + .../go-cty/cty/convert/compare_types.go | 165 + .../go-cty/cty/convert/conversion.go | 190 + .../go-cty/cty/convert/conversion_capsule.go | 31 + .../cty/convert/conversion_collection.go | 488 + .../go-cty/cty/convert/conversion_dynamic.go | 33 + .../go-cty/cty/convert/conversion_object.go | 76 + .../cty/convert/conversion_primitive.go | 57 + .../go-cty/cty/convert/conversion_tuple.go | 71 + .../hashicorp/go-cty/cty/convert/doc.go | 15 + .../go-cty/cty/convert/mismatch_msg.go | 220 + .../hashicorp/go-cty/cty/convert/public.go | 83 + .../go-cty/cty/convert/sort_types.go | 69 + .../hashicorp/go-cty/cty/convert/unify.go | 357 + vendor/github.com/hashicorp/go-cty/cty/doc.go | 18 + .../hashicorp/go-cty/cty/element_iterator.go | 194 + .../github.com/hashicorp/go-cty/cty/error.go | 55 + vendor/github.com/hashicorp/go-cty/cty/gob.go | 204 + .../hashicorp/go-cty/cty/gocty/doc.go | 7 + .../hashicorp/go-cty/cty/gocty/helpers.go | 43 + .../hashicorp/go-cty/cty/gocty/in.go | 548 + .../hashicorp/go-cty/cty/gocty/out.go | 686 + .../go-cty/cty/gocty/type_implied.go | 108 + .../github.com/hashicorp/go-cty/cty/helper.go | 99 + .../github.com/hashicorp/go-cty/cty/json.go | 176 + .../hashicorp/go-cty/cty/json/doc.go | 11 + .../hashicorp/go-cty/cty/json/marshal.go | 193 + .../hashicorp/go-cty/cty/json/simple.go | 41 + .../hashicorp/go-cty/cty/json/type.go | 23 + .../hashicorp/go-cty/cty/json/type_implied.go | 170 + .../hashicorp/go-cty/cty/json/unmarshal.go | 459 + .../hashicorp/go-cty/cty/json/value.go | 65 + .../hashicorp/go-cty/cty/list_type.go | 74 + .../hashicorp/go-cty/cty/map_type.go | 74 + .../github.com/hashicorp/go-cty/cty/marks.go | 296 + .../hashicorp/go-cty/cty/msgpack/doc.go | 14 + .../hashicorp/go-cty/cty/msgpack/dynamic.go | 31 + .../hashicorp/go-cty/cty/msgpack/infinity.go | 8 + .../hashicorp/go-cty/cty/msgpack/marshal.go | 211 + .../go-cty/cty/msgpack/type_implied.go | 167 + .../hashicorp/go-cty/cty/msgpack/unknown.go | 16 + .../hashicorp/go-cty/cty/msgpack/unmarshal.go | 334 + .../github.com/hashicorp/go-cty/cty/null.go | 14 + .../hashicorp/go-cty/cty/object_type.go | 135 + .../github.com/hashicorp/go-cty/cty/path.go | 270 + .../hashicorp/go-cty/cty/path_set.go | 198 + .../hashicorp/go-cty/cty/primitive_type.go | 122 + .../hashicorp/go-cty/cty/set/gob.go | 76 + .../hashicorp/go-cty/cty/set/iterator.go | 15 + .../hashicorp/go-cty/cty/set/ops.go | 210 + .../hashicorp/go-cty/cty/set/rules.go | 43 + .../hashicorp/go-cty/cty/set/set.go | 62 + .../hashicorp/go-cty/cty/set_helper.go | 132 + .../hashicorp/go-cty/cty/set_internals.go | 244 + .../hashicorp/go-cty/cty/set_type.go | 72 + .../hashicorp/go-cty/cty/tuple_type.go | 121 + .../github.com/hashicorp/go-cty/cty/type.go | 120 + .../hashicorp/go-cty/cty/type_conform.go | 139 + .../hashicorp/go-cty/cty/types_to_register.go | 57 + .../hashicorp/go-cty/cty/unknown.go | 84 + .../hashicorp/go-cty/cty/unknown_as_null.go | 64 + .../github.com/hashicorp/go-cty/cty/value.go | 108 + .../hashicorp/go-cty/cty/value_init.go | 324 + .../hashicorp/go-cty/cty/value_ops.go | 1290 + .../github.com/hashicorp/go-cty/cty/walk.go | 182 + .../hashicorp/go-getter/.travis.yml | 24 - .../github.com/hashicorp/go-getter/README.md | 8 +- .../github.com/hashicorp/go-getter/get_git.go | 28 +- .../hashicorp/go-getter/get_http.go | 24 +- .../github.com/hashicorp/go-plugin/README.md | 5 - .../github.com/hashicorp/go-plugin/client.go | 34 +- vendor/github.com/hashicorp/go-plugin/go.mod | 14 +- vendor/github.com/hashicorp/go-plugin/go.sum | 55 +- .../hashicorp/go-plugin/grpc_client.go | 7 + .../hashicorp/go-plugin/grpc_server.go | 19 +- .../hashicorp/go-plugin/grpc_stdio.go | 207 + .../go-plugin/internal/plugin/gen.go | 2 +- .../internal/plugin/grpc_broker.pb.go | 49 +- .../internal/plugin/grpc_broker.proto | 2 - .../internal/plugin/grpc_controller.pb.go | 43 +- .../internal/plugin/grpc_stdio.pb.go | 233 + .../internal/plugin/grpc_stdio.proto | 30 + .../github.com/hashicorp/go-plugin/server.go | 254 +- vendor/github.com/hashicorp/go-uuid/uuid.go | 2 +- .../hashicorp/golang-lru/simplelru/lru.go | 161 - .../golang-lru/simplelru/lru_interface.go | 36 - .../tfconfig/load_hcl.go | 2 +- .../hashicorp/terraform-exec/LICENSE | 373 + .../terraform-exec/tfinstall/pubkey.go | 32 + .../terraform-exec/tfinstall/tfinstall.go | 287 + .../terraform-exec/tfinstall/version.go | 4 + .../hashicorp/terraform-json/schemas.go | 29 +- .../helper/customdiff/compose.go | 72 - .../helper/customdiff/computed.go | 16 - .../helper/customdiff/condition.go | 60 - .../helper/customdiff/doc.go | 11 - .../helper/customdiff/force_new.go | 40 - .../helper/customdiff/validate.go | 38 - .../helper/encryption/encryption.go | 40 - .../helper/mutexkv/mutexkv.go | 51 - .../internal/initwd/from_module.go | 3 +- .../internal/registry/client.go | 2 +- .../internal/registry/regsrc/friendly_host.go | 2 +- .../registry/regsrc/terraform_provider.go | 2 +- .../internal/tfplugin5/tfplugin5.pb.go | 3 +- .../vault/helper/pgpkeys/encrypt_decrypt.go | 85 - .../internal/vault/helper/pgpkeys/flag.go | 112 - .../internal/vault/helper/pgpkeys/keybase.go | 117 - .../vault/sdk/helper/compressutil/compress.go | 207 - .../vault/sdk/helper/jsonutil/json.go | 100 - .../v2}/LICENSE | 301 +- .../v2/diag/diagnostic.go | 104 + .../terraform-plugin-sdk/v2/diag/helpers.go | 36 + .../v2/helper/logging/logging.go | 116 + .../v2/helper/logging/transport.go | 70 + .../v2/helper/resource/error.go | 79 + .../v2/helper/resource/id.go | 45 + .../v2/helper/resource/plugin.go | 154 + .../v2/helper/resource/state.go | 280 + .../v2/helper/resource/state_shim.go | 288 + .../v2/helper/resource/testing.go | 1040 + .../v2/helper/resource/testing_config.go | 25 + .../v2/helper/resource/testing_new.go | 242 + .../v2/helper/resource/testing_new_config.go | 182 + .../resource/testing_new_import_state.go | 185 + .../v2/helper/resource/wait.go | 109 + .../v2/helper/schema/README.md | 11 + .../v2/helper/schema/core_schema.go | 368 + .../schema/data_source_resource_shim.go | 59 + .../v2/helper/schema/equal.go | 6 + .../v2/helper/schema/field_reader.go | 335 + .../v2/helper/schema/field_reader_config.go | 328 + .../v2/helper/schema/field_reader_diff.go | 245 + .../v2/helper/schema/field_reader_map.go | 202 + .../v2/helper/schema/field_reader_multi.go | 63 + .../v2/helper/schema/field_writer.go | 8 + .../v2/helper/schema/field_writer_map.go | 357 + .../v2/helper/schema/getsource_string.go | 46 + .../v2/helper/schema/provider.go | 469 + .../v2/helper/schema/resource.go | 809 + .../v2/helper/schema/resource_data.go | 540 + .../helper/schema/resource_data_get_source.go | 17 + .../v2/helper/schema/resource_diff.go | 559 + .../v2/helper/schema/resource_importer.go | 79 + .../v2/helper/schema/resource_timeout.go | 264 + .../v2/helper/schema/schema.go | 2121 ++ .../v2/helper/schema/serialize.go | 130 + .../v2/helper/schema/set.go | 276 + .../v2/helper/schema/shims.go | 116 + .../v2/helper/schema/testing.go | 29 + .../v2/helper/schema/valuetype.go | 21 + .../v2/helper/schema/valuetype_string.go | 31 + .../v2/internal/addrs/doc.go | 17 + .../v2/internal/addrs/instance_key.go | 47 + .../v2/internal/addrs/module.go | 13 + .../v2/internal/addrs/module_instance.go | 241 + .../v2/internal/addrs/resource.go | 130 + .../v2/internal/addrs/resourcemode_string.go | 33 + .../configs/configschema/coerce_value.go | 250 + .../v2/internal/configs/configschema/doc.go | 14 + .../configs/configschema/empty_value.go | 59 + .../configs/configschema/implied_type.go | 68 + .../configschema/nestingmode_string.go | 28 + .../internal/configs/configschema/schema.go | 155 + .../v2/internal/configs/hcl2shim/flatmap.go | 423 + .../v2/internal/configs/hcl2shim/paths.go | 276 + .../configs/hcl2shim/single_attr_body.go | 85 + .../v2/internal/configs/hcl2shim/values.go | 230 + .../internal/configs/hcl2shim/values_equiv.go | 214 + .../v2/internal/helper/hashcode/hashcode.go | 35 + .../internal/helper/plugin/context/context.go | 7 + .../v2/internal/helper/plugin/doc.go | 6 + .../internal/helper/plugin/grpc_provider.go | 1464 + .../v2/internal/helper/plugin/unknown.go | 132 + .../internal/plans/objchange/normalize_obj.go | 133 + .../v2/internal/plugin/convert/diagnostics.go | 142 + .../v2/internal/plugin/convert/schema.go | 183 + .../v2/internal/tfdiags/config_traversals.go | 56 + .../v2/internal/tfdiags/contextual.go | 81 + .../v2/internal/tfdiags/diagnostic.go | 20 + .../v2/internal/tfdiags/diagnostic_base.go | 31 + .../v2/internal/tfdiags/diagnostics.go | 211 + .../v2/internal/tfdiags/doc.go | 16 + .../v2/internal/tfdiags/error.go | 24 + .../v2/internal/tfdiags/rpc_friendly.go | 41 + .../v2/internal/tfdiags/severity_string.go | 29 + .../v2/internal/tfdiags/simple_warning.go | 20 + .../v2/internal/tfplugin5/generate.sh | 16 + .../v2/internal/tfplugin5/tfplugin5.pb.go | 3634 ++ .../v2/internal/tfplugin5/tfplugin5.proto | 368 + .../terraform-plugin-sdk/v2/meta/meta.go | 36 + .../terraform-plugin-sdk/v2/plugin/debug.go | 102 + .../v2/plugin/grpc_provider.go | 41 + .../terraform-plugin-sdk/v2/plugin/serve.go | 82 + .../terraform-plugin-sdk/v2/terraform/diff.go | 990 + .../v2/terraform/instancetype.go | 13 + .../v2/terraform/instancetype_string.go | 26 + .../v2/terraform/resource.go | 333 + .../v2/terraform/resource_address.go | 226 + .../v2/terraform/resource_mode.go | 12 + .../v2/terraform/resource_mode_string.go | 24 + .../v2/terraform/resource_provider.go | 26 + .../v2/terraform/schemas.go | 26 + .../v2/terraform/state.go | 1685 + .../v2/terraform/state_filter.go | 267 + .../terraform-plugin-sdk/v2/terraform/util.go | 22 + .../terraform-plugin-test/terraform.go | 3 +- .../terraform-plugin-test/v2/CHANGELOG.md | 30 + .../terraform-plugin-test/v2/LICENSE | 353 + .../terraform-plugin-test/v2/README.md | 4 + .../terraform-plugin-test/v2/config.go | 53 + .../hashicorp/terraform-plugin-test/v2/doc.go | 7 + .../hashicorp/terraform-plugin-test/v2/go.mod | 9 + .../hashicorp/terraform-plugin-test/v2/go.sum | 177 + .../terraform-plugin-test/v2/guard.go | 94 + .../terraform-plugin-test/v2/helper.go | 222 + .../terraform-plugin-test/v2/plugin.go | 15 + .../terraform-plugin-test/v2/terraform.go | 106 + .../terraform-plugin-test/v2/util.go | 95 + .../terraform-plugin-test/v2/working_dir.go | 426 + .../hashicorp/terraform-svchost/auth/cache.go | 2 +- .../terraform-svchost/auth/credentials.go | 3 +- .../terraform-svchost/auth/helper_program.go | 3 +- .../terraform-svchost/auth/static.go | 2 +- .../terraform-svchost/disco/disco.go | 2 +- .../vault/helper/compressutil/compress.go | 192 - .../hashicorp/vault/helper/jsonutil/json.go | 100 - .../vault/helper/pgpkeys/encrypt_decrypt.go | 118 - .../hashicorp/vault/helper/pgpkeys/flag.go | 140 - .../hashicorp/vault/helper/pgpkeys/keybase.go | 117 - .../vault/helper/pgpkeys/test_keys.go | 271 - .../jmespath/go-jmespath/.travis.yml | 25 +- .../github.com/jmespath/go-jmespath/Makefile | 21 +- .../github.com/jmespath/go-jmespath/README.md | 82 +- vendor/github.com/jmespath/go-jmespath/api.go | 2 +- vendor/github.com/jmespath/go-jmespath/go.mod | 5 + vendor/github.com/jmespath/go-jmespath/go.sum | 11 + .../github.com/jmespath/go-jmespath/parser.go | 2 +- .../jstemmer/go-junit-report/.gitignore | 1 + .../jstemmer/go-junit-report/.travis.yml | 16 + .../jstemmer/go-junit-report/LICENSE | 20 + .../jstemmer/go-junit-report/README.md | 49 + .../go-junit-report/formatter/formatter.go | 182 + .../go-junit-report/go-junit-report.go | 45 + .../jstemmer/go-junit-report/go.mod | 3 + .../jstemmer/go-junit-report/parser/parser.go | 319 + .../keybase/go-crypto/brainpool/brainpool.go | 134 - .../keybase/go-crypto/brainpool/rcurve.go | 83 - .../keybase/go-crypto/cast5/cast5.go | 526 - .../keybase/go-crypto/openpgp/armor/armor.go | 226 - .../keybase/go-crypto/openpgp/armor/encode.go | 160 - .../go-crypto/openpgp/canonical_text.go | 59 - .../go-crypto/openpgp/elgamal/elgamal.go | 122 - .../go-crypto/openpgp/errors/errors.go | 72 - .../keybase/go-crypto/openpgp/keys.go | 813 - .../go-crypto/openpgp/packet/compressed.go | 124 - .../go-crypto/openpgp/packet/config.go | 98 - .../keybase/go-crypto/openpgp/packet/ecdh.go | 18 - .../go-crypto/openpgp/packet/encrypted_key.go | 205 - .../go-crypto/openpgp/packet/literal.go | 89 - .../keybase/go-crypto/openpgp/packet/ocfb.go | 143 - .../openpgp/packet/one_pass_signature.go | 74 - .../go-crypto/openpgp/packet/opaque.go | 162 - .../go-crypto/openpgp/packet/packet.go | 542 - .../go-crypto/openpgp/packet/private_key.go | 412 - .../go-crypto/openpgp/packet/public_key.go | 870 - .../go-crypto/openpgp/packet/public_key_v3.go | 280 - .../go-crypto/openpgp/packet/reader.go | 76 - .../go-crypto/openpgp/packet/signature.go | 781 - .../go-crypto/openpgp/packet/signature_v3.go | 146 - .../openpgp/packet/symmetric_key_encrypted.go | 158 - .../openpgp/packet/symmetrically_encrypted.go | 291 - .../go-crypto/openpgp/packet/userattribute.go | 91 - .../go-crypto/openpgp/packet/userid.go | 160 - .../keybase/go-crypto/openpgp/patch.sh | 7 - .../keybase/go-crypto/openpgp/read.go | 451 - .../keybase/go-crypto/openpgp/s2k/s2k.go | 326 - .../keybase/go-crypto/openpgp/sig-v3.patch | 135 - .../keybase/go-crypto/openpgp/write.go | 495 - .../keybase/go-crypto/rsa/pkcs1v15.go | 325 - .../github.com/keybase/go-crypto/rsa/pss.go | 297 - .../github.com/keybase/go-crypto/rsa/rsa.go | 646 - vendor/github.com/mitchellh/cli/cli.go | 4 +- .../mitchellh/go-testing-interface/testing.go | 37 +- .../go-testing-interface/testing_go19.go | 7 +- .../zxcvbn-go/entropy/entropyCalculator.go | 7 +- .../nbutton23/zxcvbn-go/scoring/scoring.go | 7 +- .../github.com/nbutton23/zxcvbn-go/zxcvbn.go | 2 +- vendor/github.com/pierrec/lz4/.gitignore | 33 - vendor/github.com/pierrec/lz4/.travis.yml | 18 - vendor/github.com/pierrec/lz4/LICENSE | 28 - vendor/github.com/pierrec/lz4/README.md | 24 - vendor/github.com/pierrec/lz4/block.go | 397 - vendor/github.com/pierrec/lz4/debug.go | 23 - vendor/github.com/pierrec/lz4/debug_stub.go | 7 - .../pierrec/lz4/internal/xxh32/xxh32zero.go | 222 - vendor/github.com/pierrec/lz4/lz4.go | 68 - vendor/github.com/pierrec/lz4/lz4_go1.10.go | 29 - .../github.com/pierrec/lz4/lz4_notgo1.10.go | 29 - vendor/github.com/pierrec/lz4/reader.go | 295 - vendor/github.com/pierrec/lz4/writer.go | 267 - vendor/github.com/pquerna/otp/.travis.yml | 7 - vendor/github.com/pquerna/otp/NOTICE | 5 - vendor/github.com/pquerna/otp/README.md | 60 - vendor/github.com/pquerna/otp/doc.go | 70 - vendor/github.com/pquerna/otp/go.mod | 8 - vendor/github.com/pquerna/otp/go.sum | 11 - vendor/github.com/pquerna/otp/hotp/hotp.go | 204 - vendor/github.com/pquerna/otp/otp.go | 207 - vendor/github.com/pquerna/otp/totp/totp.go | 199 - .../sirupsen/logrus/terminal_check_bsd.go | 1 + .../sirupsen/logrus/terminal_check_unix.go | 1 + .../sourcegraph/go-diff/diff/diff.pb.go | 17 +- vendor/github.com/spf13/afero/mem/file.go | 3 +- vendor/github.com/ulikunitz/xz/LICENSE | 2 +- vendor/github.com/ulikunitz/xz/TODO.md | 13 + vendor/github.com/ulikunitz/xz/bits.go | 2 +- vendor/github.com/ulikunitz/xz/crc.go | 2 +- vendor/github.com/ulikunitz/xz/format.go | 10 +- .../github.com/ulikunitz/xz/fox-check-none.xz | Bin 0 -> 96 bytes vendor/github.com/ulikunitz/xz/go.mod | 3 + .../ulikunitz/xz/internal/hash/cyclic_poly.go | 2 +- .../ulikunitz/xz/internal/hash/doc.go | 2 +- .../ulikunitz/xz/internal/hash/rabin_karp.go | 2 +- .../ulikunitz/xz/internal/hash/roller.go | 2 +- .../ulikunitz/xz/internal/xlog/xlog.go | 2 +- .../github.com/ulikunitz/xz/lzma/bintree.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/bitops.go | 2 +- .../github.com/ulikunitz/xz/lzma/breader.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/buffer.go | 2 +- .../ulikunitz/xz/lzma/bytewriter.go | 2 +- .../github.com/ulikunitz/xz/lzma/decoder.go | 2 +- .../ulikunitz/xz/lzma/decoderdict.go | 2 +- .../ulikunitz/xz/lzma/directcodec.go | 2 +- .../github.com/ulikunitz/xz/lzma/distcodec.go | 2 +- .../github.com/ulikunitz/xz/lzma/encoder.go | 2 +- .../ulikunitz/xz/lzma/encoderdict.go | 2 +- .../github.com/ulikunitz/xz/lzma/hashtable.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/header.go | 2 +- .../github.com/ulikunitz/xz/lzma/header2.go | 2 +- .../ulikunitz/xz/lzma/lengthcodec.go | 2 +- .../ulikunitz/xz/lzma/literalcodec.go | 2 +- .../ulikunitz/xz/lzma/matchalgorithm.go | 2 +- .../github.com/ulikunitz/xz/lzma/operation.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/prob.go | 2 +- .../ulikunitz/xz/lzma/properties.go | 2 +- .../ulikunitz/xz/lzma/rangecodec.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/reader.go | 2 +- .../github.com/ulikunitz/xz/lzma/reader2.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/state.go | 2 +- .../ulikunitz/xz/lzma/treecodecs.go | 2 +- vendor/github.com/ulikunitz/xz/lzma/writer.go | 2 +- .../github.com/ulikunitz/xz/lzma/writer2.go | 2 +- vendor/github.com/ulikunitz/xz/lzmafilter.go | 2 +- vendor/github.com/ulikunitz/xz/none-check.go | 23 + vendor/github.com/ulikunitz/xz/reader.go | 8 +- vendor/github.com/ulikunitz/xz/writer.go | 15 +- .../github.com/zclconf/go-cty-yaml/readerc.go | 2 +- vendor/go.opencensus.io/Gopkg.lock | 231 - vendor/go.opencensus.io/Gopkg.toml | 36 - vendor/go.opencensus.io/Makefile | 27 +- vendor/go.opencensus.io/README.md | 6 +- vendor/go.opencensus.io/appveyor.yml | 7 +- vendor/go.opencensus.io/go.mod | 7 +- vendor/go.opencensus.io/go.sum | 21 +- vendor/go.opencensus.io/internal/internal.go | 2 +- vendor/go.opencensus.io/opencensus.go | 2 +- .../plugin/ochttp/propagation/b3/b3.go | 4 +- .../go.opencensus.io/plugin/ochttp/server.go | 12 +- .../go.opencensus.io/plugin/ochttp/stats.go | 18 +- .../go.opencensus.io/plugin/ochttp/trace.go | 5 + vendor/go.opencensus.io/stats/record.go | 20 + vendor/go.opencensus.io/stats/units.go | 1 + .../stats/view/aggregation.go | 11 +- .../stats/view/aggregation_data.go | 6 +- vendor/go.opencensus.io/stats/view/doc.go | 2 +- vendor/go.opencensus.io/stats/view/export.go | 17 +- vendor/go.opencensus.io/stats/view/view.go | 6 +- .../stats/view/view_to_metric.go | 16 +- vendor/go.opencensus.io/stats/view/worker.go | 188 +- .../stats/view/worker_commands.go | 6 +- vendor/go.opencensus.io/tag/key.go | 5 +- vendor/go.opencensus.io/tag/map_codec.go | 2 +- vendor/go.opencensus.io/trace/lrumap.go | 40 +- vendor/go.opencensus.io/trace/trace.go | 18 +- .../x/crypto/chacha20/chacha_arm64.go | 16 + .../asm_arm64.s => chacha20/chacha_arm64.s} | 3 +- .../x/crypto/chacha20/chacha_generic.go | 398 + .../{internal => }/chacha20/chacha_noasm.go | 11 +- .../x/crypto/chacha20/chacha_ppc64le.go | 16 + .../chacha_ppc64le.s} | 30 +- .../x/crypto/chacha20/chacha_s390x.go | 26 + .../{internal => }/chacha20/chacha_s390x.s | 40 +- .../x/crypto/{internal => }/chacha20/xor.go | 21 +- .../x/crypto/curve25519/const_amd64.h | 8 - .../x/crypto/curve25519/const_amd64.s | 20 - .../x/crypto/curve25519/cswap_amd64.s | 65 - .../x/crypto/curve25519/curve25519.go | 881 +- ...mont25519_amd64.go => curve25519_amd64.go} | 2 +- ...{ladderstep_amd64.s => curve25519_amd64.s} | 420 +- .../x/crypto/curve25519/curve25519_generic.go | 828 + .../x/crypto/curve25519/curve25519_noasm.go | 11 + vendor/golang.org/x/crypto/curve25519/doc.go | 23 - .../x/crypto/curve25519/freeze_amd64.s | 73 - .../x/crypto/curve25519/mul_amd64.s | 169 - .../x/crypto/curve25519/square_amd64.s | 132 - .../crypto/internal/chacha20/chacha_arm64.go | 31 - .../internal/chacha20/chacha_generic.go | 264 - .../internal/chacha20/chacha_ppc64le.go | 53 - .../crypto/internal/chacha20/chacha_s390x.go | 29 - .../x/crypto/openpgp/armor/armor.go | 24 +- .../x/crypto/openpgp/elgamal/elgamal.go | 4 +- .../x/crypto/openpgp/packet/compressed.go | 3 +- .../openpgp/packet/one_pass_signature.go | 5 +- .../x/crypto/openpgp/packet/packet.go | 67 +- .../x/crypto/openpgp/packet/reader.go | 3 +- .../openpgp/packet/symmetrically_encrypted.go | 3 +- .../x/crypto/poly1305/bits_compat.go | 39 + .../x/crypto/poly1305/bits_go1.13.go | 21 + .../golang.org/x/crypto/poly1305/mac_noasm.go | 4 +- .../golang.org/x/crypto/poly1305/poly1305.go | 34 +- .../golang.org/x/crypto/poly1305/sum_amd64.go | 65 +- .../golang.org/x/crypto/poly1305/sum_amd64.s | 42 +- .../golang.org/x/crypto/poly1305/sum_arm.go | 22 - vendor/golang.org/x/crypto/poly1305/sum_arm.s | 427 - .../x/crypto/poly1305/sum_generic.go | 398 +- .../golang.org/x/crypto/poly1305/sum_noasm.go | 16 - .../x/crypto/poly1305/sum_ppc64le.go | 65 +- .../x/crypto/poly1305/sum_ppc64le.s | 68 +- .../golang.org/x/crypto/poly1305/sum_s390x.go | 77 +- .../golang.org/x/crypto/poly1305/sum_s390x.s | 667 +- .../x/crypto/poly1305/sum_vmsl_s390x.s | 909 - vendor/golang.org/x/crypto/ssh/certs.go | 39 +- vendor/golang.org/x/crypto/ssh/cipher.go | 53 +- vendor/golang.org/x/crypto/ssh/client_auth.go | 22 +- .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 + vendor/golang.org/x/crypto/ssh/kex.go | 6 +- vendor/golang.org/x/crypto/ssh/keys.go | 482 +- vendor/golang.org/x/crypto/ssh/mux.go | 23 +- vendor/golang.org/x/crypto/ssh/server.go | 4 +- vendor/golang.org/x/lint/.travis.yml | 19 + .../x/lint}/CONTRIBUTING.md | 8 +- .../agl/ed25519 => golang.org/x/lint}/LICENSE | 2 +- vendor/golang.org/x/lint/README.md | 88 + vendor/golang.org/x/lint/go.mod | 5 + vendor/golang.org/x/lint/go.sum | 12 + vendor/golang.org/x/lint/golint/golint.go | 159 + vendor/golang.org/x/lint/golint/import.go | 309 + .../golang.org/x/lint/golint/importcomment.go | 13 + vendor/golang.org/x/lint/lint.go | 1615 + vendor/golang.org/x/mod/module/module.go | 6 +- .../x/net/http2/client_conn_pool.go | 8 +- vendor/golang.org/x/net/http2/flow.go | 2 + .../golang.org/x/net/http2/hpack/huffman.go | 7 + vendor/golang.org/x/net/http2/http2.go | 13 +- vendor/golang.org/x/net/http2/server.go | 19 +- vendor/golang.org/x/net/http2/transport.go | 100 +- vendor/golang.org/x/oauth2/README.md | 13 +- vendor/golang.org/x/oauth2/transport.go | 79 +- vendor/golang.org/x/sys/cpu/cpu.go | 9 + .../sys/cpu/{cpu_aix_ppc64.go => cpu_aix.go} | 2 +- vendor/golang.org/x/sys/cpu/cpu_arm64.go | 144 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 31 + vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go | 11 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 11 + .../sys/cpu/{cpu_gccgo.c => cpu_gccgo_x86.c} | 0 .../cpu/{cpu_gccgo.go => cpu_gccgo_x86.go} | 0 vendor/golang.org/x/sys/cpu/cpu_linux.go | 48 +- .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 8 +- .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 9 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 2 - vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 2 - .../golang.org/x/sys/cpu/cpu_other_arm64.go | 2 - vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 9 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 2 - vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../sys/internal/unsafeheader/unsafeheader.go | 30 + vendor/golang.org/x/sys/unix/README.md | 15 +- .../golang.org/x/sys/unix/asm_linux_riscv64.s | 7 - .../golang.org/x/sys/unix/bluetooth_linux.go | 1 + .../x/sys/unix/errors_freebsd_386.go | 6 + .../x/sys/unix/errors_freebsd_amd64.go | 6 + .../x/sys/unix/errors_freebsd_arm64.go | 17 + vendor/golang.org/x/sys/unix/fcntl.go | 12 +- vendor/golang.org/x/sys/unix/fdset.go | 29 + vendor/golang.org/x/sys/unix/mkall.sh | 15 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 25 +- .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 36 +- .../x/sys/unix/sockcmsg_unix_other.go | 38 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 21 +- .../x/sys/unix/syscall_darwin.1_13.go | 23 +- .../golang.org/x/sys/unix/syscall_darwin.go | 22 +- .../x/sys/unix/syscall_darwin_386.go | 12 - .../x/sys/unix/syscall_darwin_amd64.go | 12 - .../x/sys/unix/syscall_darwin_arm.1_11.go | 2 +- .../x/sys/unix/syscall_darwin_arm.go | 15 - .../x/sys/unix/syscall_darwin_arm64.go | 15 - .../x/sys/unix/syscall_dragonfly.go | 24 +- .../golang.org/x/sys/unix/syscall_freebsd.go | 23 +- .../x/sys/unix/syscall_freebsd_386.go | 10 + .../x/sys/unix/syscall_freebsd_amd64.go | 10 + .../x/sys/unix/syscall_freebsd_arm.go | 6 + .../x/sys/unix/syscall_freebsd_arm64.go | 6 + .../golang.org/x/sys/unix/syscall_illumos.go | 57 + vendor/golang.org/x/sys/unix/syscall_linux.go | 269 +- .../x/sys/unix/syscall_linux_386.go | 6 +- .../x/sys/unix/syscall_linux_amd64.go | 6 +- .../x/sys/unix/syscall_linux_arm.go | 6 +- .../x/sys/unix/syscall_linux_arm64.go | 32 +- .../x/sys/unix/syscall_linux_mips64x.go | 10 +- .../x/sys/unix/syscall_linux_mipsx.go | 6 +- .../x/sys/unix/syscall_linux_ppc64x.go | 6 +- .../x/sys/unix/syscall_linux_riscv64.go | 12 +- .../x/sys/unix/syscall_linux_s390x.go | 6 +- .../x/sys/unix/syscall_linux_sparc64.go | 6 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 32 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 33 +- .../golang.org/x/sys/unix/syscall_solaris.go | 2 +- vendor/golang.org/x/sys/unix/syscall_unix.go | 19 +- .../golang.org/x/sys/unix/zerrors_aix_ppc.go | 12 +- .../x/sys/unix/zerrors_aix_ppc64.go | 12 +- .../x/sys/unix/zerrors_freebsd_386.go | 160 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 158 +- .../x/sys/unix/zerrors_freebsd_arm.go | 16 + .../x/sys/unix/zerrors_freebsd_arm64.go | 159 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 2471 ++ .../x/sys/unix/zerrors_linux_386.go | 3269 +- .../x/sys/unix/zerrors_linux_amd64.go | 3269 +- .../x/sys/unix/zerrors_linux_arm.go | 3281 +- .../x/sys/unix/zerrors_linux_arm64.go | 3255 +- .../x/sys/unix/zerrors_linux_mips.go | 3273 +- .../x/sys/unix/zerrors_linux_mips64.go | 3273 +- .../x/sys/unix/zerrors_linux_mips64le.go | 3273 +- .../x/sys/unix/zerrors_linux_mipsle.go | 3273 +- .../x/sys/unix/zerrors_linux_ppc64.go | 3392 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 3392 +- .../x/sys/unix/zerrors_linux_riscv64.go | 3243 +- .../x/sys/unix/zerrors_linux_s390x.go | 3389 +- .../x/sys/unix/zerrors_linux_sparc64.go | 3370 +- ...acearm_linux.go => zptrace_armnn_linux.go} | 2 +- .../x/sys/unix/zptrace_linux_arm64.go | 17 + ...emips_linux.go => zptrace_mipsnn_linux.go} | 2 +- ...sle_linux.go => zptrace_mipsnnle_linux.go} | 2 +- ...trace386_linux.go => zptrace_x86_linux.go} | 2 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 76 +- .../x/sys/unix/zsyscall_darwin_386.go | 106 +- .../x/sys/unix/zsyscall_darwin_386.s | 8 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 76 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 106 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 8 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 60 +- .../x/sys/unix/zsyscall_darwin_arm.go | 85 +- .../x/sys/unix/zsyscall_darwin_arm.s | 6 +- .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 60 +- .../x/sys/unix/zsyscall_darwin_arm64.go | 85 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 6 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 11 - .../x/sys/unix/zsyscall_freebsd_386.go | 63 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 43 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 11 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 45 +- .../x/sys/unix/zsyscall_illumos_amd64.go | 87 + .../golang.org/x/sys/unix/zsyscall_linux.go | 1856 + .../x/sys/unix/zsyscall_linux_386.go | 1744 +- .../x/sys/unix/zsyscall_linux_amd64.go | 1744 +- .../x/sys/unix/zsyscall_linux_arm.go | 1744 +- .../x/sys/unix/zsyscall_linux_arm64.go | 1746 +- .../x/sys/unix/zsyscall_linux_mips.go | 1744 +- .../x/sys/unix/zsyscall_linux_mips64.go | 1744 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 1744 +- .../x/sys/unix/zsyscall_linux_mipsle.go | 1744 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 1744 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 1744 +- .../x/sys/unix/zsyscall_linux_riscv64.go | 1742 +- .../x/sys/unix/zsyscall_linux_s390x.go | 1744 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 1744 +- .../x/sys/unix/zsyscall_netbsd_386.go | 78 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 78 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 78 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 78 +- .../x/sys/unix/zsyscall_openbsd_386.go | 57 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 57 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 57 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 57 +- .../x/sys/unix/zsysctl_openbsd_386.go | 3 +- .../x/sys/unix/zsysctl_openbsd_amd64.go | 1 + .../x/sys/unix/zsysctl_openbsd_arm.go | 1 + .../x/sys/unix/zsysnum_linux_386.go | 2 + .../x/sys/unix/zsysnum_linux_amd64.go | 2 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 3 + .../x/sys/unix/zsysnum_linux_mips.go | 3 + .../x/sys/unix/zsysnum_linux_mips64.go | 3 + .../x/sys/unix/zsysnum_linux_mips64le.go | 3 + .../x/sys/unix/zsysnum_linux_mipsle.go | 3 + .../x/sys/unix/zsysnum_linux_ppc64.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 + .../x/sys/unix/zsysnum_linux_s390x.go | 2 + .../x/sys/unix/zsysnum_linux_sparc64.go | 2 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 10 + .../x/sys/unix/ztypes_freebsd_386.go | 51 +- .../x/sys/unix/ztypes_freebsd_amd64.go | 36 +- .../x/sys/unix/ztypes_freebsd_arm.go | 12 +- .../x/sys/unix/ztypes_freebsd_arm64.go | 66 +- vendor/golang.org/x/sys/unix/ztypes_linux.go | 2345 ++ .../golang.org/x/sys/unix/ztypes_linux_386.go | 2037 +- .../x/sys/unix/ztypes_linux_amd64.go | 2038 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2038 +- .../x/sys/unix/ztypes_linux_arm64.go | 2038 +- .../x/sys/unix/ztypes_linux_mips.go | 2038 +- .../x/sys/unix/ztypes_linux_mips64.go | 2039 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2039 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2038 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2038 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2038 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2038 +- .../x/sys/unix/ztypes_linux_s390x.go | 2038 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2038 +- .../x/sys/unix/ztypes_netbsd_386.go | 32 + .../x/sys/unix/ztypes_netbsd_amd64.go | 33 + .../x/sys/unix/ztypes_netbsd_arm.go | 32 + .../x/sys/unix/ztypes_netbsd_arm64.go | 33 + .../x/sys/unix/ztypes_solaris_amd64.go | 7 + .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/core.go | 8 +- .../x/text/unicode/bidi/tables11.0.0.go | 2 +- .../x/text/unicode/bidi/tables12.0.0.go | 1923 + .../x/text/unicode/norm/tables11.0.0.go | 2 +- .../x/text/unicode/norm/tables12.0.0.go | 7710 ++++ .../golang.org/x/text/width/tables11.0.0.go | 2 +- .../golang.org/x/text/width/tables12.0.0.go | 1350 + .../golang.org/x/tools/cmd/goimports/doc.go | 43 + .../x/tools/cmd/goimports/goimports.go | 380 + .../x/tools/cmd/goimports/goimports_gc.go | 26 + .../x/tools/cmd/goimports/goimports_not_gc.go | 11 + .../x/tools/go/analysis/analysis.go | 16 + .../go/analysis/analysistest/analysistest.go | 400 - vendor/golang.org/x/tools/go/analysis/doc.go | 9 + .../analysis/internal/analysisflags/flags.go | 2 +- .../go/analysis/internal/checker/checker.go | 41 +- .../go/analysis/passes/errorsas/errorsas.go | 2 +- .../go/analysis/passes/findcall/findcall.go | 14 +- .../go/analysis/passes/nilness/nilness.go | 103 +- .../tools/go/analysis/passes/printf/printf.go | 63 +- .../passes/unreachable/unreachable.go | 13 +- .../x/tools/go/ast/inspector/inspector.go | 6 +- .../tools/go/internal/packagesdriver/sizes.go | 109 +- .../golang.org/x/tools/go/packages/golist.go | 107 +- .../x/tools/go/packages/golist_overlay.go | 145 +- .../x/tools/go/packages/loadmode_string.go | 2 +- .../x/tools/go/packages/packages.go | 80 +- .../x/tools/go/types/objectpath/objectpath.go | 5 +- vendor/golang.org/x/tools/imports/forward.go | 40 +- .../internal/analysisinternal/analysis.go | 200 + .../x/tools/internal/event/core/event.go | 85 + .../x/tools/internal/event/core/export.go | 70 + .../x/tools/internal/event/core/fast.go | 77 + .../golang.org/x/tools/internal/event/doc.go | 7 + .../x/tools/internal/event/event.go | 127 + .../x/tools/internal/event/keys/keys.go | 564 + .../x/tools/internal/event/keys/standard.go | 22 + .../x/tools/internal/event/label/label.go | 213 + .../tools/internal/fastwalk/fastwalk_unix.go | 5 +- .../x/tools/internal/gocommand/invoke.go | 230 + .../x/tools/internal/gocommand/vendor.go | 102 + .../x/tools/internal/gopathwalk/walk.go | 32 +- .../x/tools/internal/imports/fix.go | 227 +- .../x/tools/internal/imports/imports.go | 95 +- .../x/tools/internal/imports/mod.go | 125 +- .../x/tools/internal/imports/sortimports.go | 20 +- .../x/tools/internal/imports/zstdlib.go | 87 + .../internal/packagesinternal/packages.go | 10 + .../golang.org/x/tools/internal/span/parse.go | 100 + .../golang.org/x/tools/internal/span/span.go | 285 + .../golang.org/x/tools/internal/span/token.go | 182 + .../x/tools/internal/span/token111.go | 39 + .../x/tools/internal/span/token112.go | 16 + .../golang.org/x/tools/internal/span/uri.go | 169 + .../golang.org/x/tools/internal/span/utf16.go | 94 + .../x/tools/internal/testenv/testenv.go | 185 - .../x/tools/internal/testenv/testenv_112.go | 27 - .../x/tools/internal/typesinternal/types.go | 28 + vendor/golang.org/x/xerrors/fmt.go | 138 +- vendor/google.golang.org/api/AUTHORS | 1 + vendor/google.golang.org/api/CONTRIBUTORS | 1 + .../api/googleapi/googleapi.go | 35 +- .../googleapi/internal/uritemplates/LICENSE | 18 - .../api/googleapi/transport/apikey.go | 8 +- .../google.golang.org/api/googleapi/types.go | 2 +- .../api/internal/conn_pool.go | 30 + .../google.golang.org/api/internal/creds.go | 29 +- .../api/{ => internal}/gensupport/buffer.go | 0 .../api/{ => internal}/gensupport/doc.go | 0 .../api/{ => internal}/gensupport/json.go | 0 .../{ => internal}/gensupport/jsonfloat.go | 16 +- .../api/{ => internal}/gensupport/media.go | 11 +- .../api/{ => internal}/gensupport/params.go | 0 .../{ => internal}/gensupport/resumable.go | 47 +- .../internal/gensupport/retryable_linux.go | 15 + .../api/{ => internal}/gensupport/send.go | 85 + .../api/internal/gensupport/version.go | 53 + vendor/google.golang.org/api/internal/pool.go | 61 - .../api/internal/settings.go | 62 +- .../third_party/uritemplates}/LICENSE | 2 +- .../third_party/uritemplates/METADATA | 14 + .../third_party}/uritemplates/uritemplates.go | 2 +- .../third_party}/uritemplates/utils.go | 0 .../api/iterator/iterator.go | 36 +- .../api/option/credentials_go19.go | 16 +- .../api/option/credentials_notgo19.go | 16 +- .../option/internaloption/internaloption.go | 40 + vendor/google.golang.org/api/option/option.go | 68 +- .../api/storage/v1/storage-api.json | 90 +- .../api/storage/v1/storage-gen.go | 281 +- .../api/transport/cert/default_cert.go | 110 + .../transport/http/default_transport_go113.go | 20 + .../http/default_transport_not_go113.go | 15 + .../api/transport/http/dial.go | 205 +- .../api/transport/http/dial_appengine.go | 16 +- .../http/internal/propagation/http.go | 16 +- .../google.golang.org/appengine/.travis.yml | 2 - vendor/google.golang.org/appengine/go.mod | 5 +- vendor/google.golang.org/appengine/go.sum | 11 - .../appengine/internal/api.go | 7 +- .../app_identity/app_identity_service.pb.go | 10 +- .../appengine/internal/base/api_base.pb.go | 10 +- .../internal/datastore/datastore_v3.pb.go | 10 +- .../appengine/internal/log/log_service.pb.go | 10 +- .../internal/modules/modules_service.pb.go | 10 +- .../appengine/internal/net.go | 2 +- .../internal/remote_api/remote_api.pb.go | 10 +- .../internal/urlfetch/urlfetch_service.pb.go | 10 +- .../api/annotations/annotations.pb.go | 147 +- .../googleapis/api/annotations/client.pb.go | 256 +- .../api/annotations/field_behavior.pb.go | 238 +- .../googleapis/api/annotations/http.pb.go | 554 +- .../googleapis/api/annotations/resource.pb.go | 691 +- .../googleapis/iam/v1/iam_policy.pb.go | 558 +- .../genproto/googleapis/iam/v1/options.pb.go | 214 +- .../genproto/googleapis/iam/v1/policy.pb.go | 806 +- .../genproto/googleapis/rpc/code/code.pb.go | 252 +- .../googleapis/rpc/status/status.pb.go | 277 +- .../genproto/googleapis/type/expr/expr.pb.go | 220 +- vendor/google.golang.org/grpc/.travis.yml | 29 +- .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 4 +- vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/MAINTAINERS.md | 27 + vendor/google.golang.org/grpc/Makefile | 3 + vendor/google.golang.org/grpc/README.md | 25 +- .../grpc/attributes/attributes.go | 76 + vendor/google.golang.org/grpc/backoff.go | 20 + .../google.golang.org/grpc/backoff/backoff.go | 52 + vendor/google.golang.org/grpc/balancer.go | 391 - .../grpc/balancer/balancer.go | 180 +- .../grpc/balancer/base/balancer.go | 113 +- .../grpc/balancer/base/base.go | 36 +- .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/roundrobin/roundrobin.go | 16 +- .../grpc/balancer_conn_wrappers.go | 158 +- .../grpc/balancer_v1_wrapper.go | 334 - .../grpc_binarylog_v1/binarylog.pb.go | 314 +- vendor/google.golang.org/grpc/clientconn.go | 431 +- .../grpc/credentials/credentials.go | 319 +- .../grpc/credentials/{tls13.go => go12.go} | 0 .../google.golang.org/grpc/credentials/tls.go | 235 + vendor/google.golang.org/grpc/dialoptions.go | 144 +- vendor/google.golang.org/grpc/doc.go | 2 + .../grpc/encoding/encoding.go | 4 + vendor/google.golang.org/grpc/go.mod | 18 +- vendor/google.golang.org/grpc/go.sum | 41 +- .../google.golang.org/grpc/grpclog/grpclog.go | 44 +- .../google.golang.org/grpc/grpclog/logger.go | 4 +- .../grpc/grpclog/loggerv2.go | 21 +- .../google.golang.org/grpc/health/client.go | 42 +- .../grpc/health/grpc_health_v1/health.pb.go | 201 +- .../health/grpc_health_v1/health_grpc.pb.go | 186 + .../grpc/health/regenerate.sh | 33 - .../google.golang.org/grpc/health/server.go | 13 +- .../grpc/internal/backoff/backoff.go | 27 +- .../grpc/internal/binarylog/binarylog.go | 15 +- .../grpc/internal/binarylog/env_config.go | 4 +- .../grpc/internal/binarylog/regenerate.sh | 33 - .../grpc/internal/binarylog/sink.go | 2 +- .../grpc/internal/buffer/unbounded.go | 85 + .../grpc/internal/channelz/funcs.go | 22 +- .../grpc/internal/channelz/logging.go | 100 + .../grpc/internal/envconfig/envconfig.go | 7 +- .../grpc/internal/grpclog/grpclog.go | 118 + .../grpc/internal/grpclog/prefixLogger.go | 63 + .../{binarylog/util.go => grpcutil/method.go} | 7 +- .../grpc/internal/grpcutil/target.go | 55 + .../grpc/internal/internal.go | 29 +- .../resolver/dns/dns_resolver.go | 219 +- .../grpc/internal/resolver/dns/go113.go | 33 + .../resolver/passthrough/passthrough.go | 4 +- .../internal/serviceconfig/serviceconfig.go | 90 + .../grpc/internal/status/status.go | 162 + .../grpc/internal/syscall/syscall_nonlinux.go | 2 + .../grpc/internal/transport/controlbuf.go | 76 +- .../grpc/internal/transport/handler_server.go | 83 +- .../grpc/internal/transport/http2_client.go | 259 +- .../grpc/internal/transport/http2_server.go | 178 +- .../grpc/internal/transport/http_util.go | 1 + .../grpc/internal/transport/transport.go | 82 +- .../grpc/naming/dns_resolver.go | 293 - .../google.golang.org/grpc/naming/naming.go | 68 - .../google.golang.org/grpc/picker_wrapper.go | 135 +- vendor/google.golang.org/grpc/pickfirst.go | 77 +- .../grpc/reflection/README.md | 18 + .../grpc_reflection_v1alpha/reflection.pb.go | 634 + .../grpc_reflection_v1alpha/reflection.proto | 138 + .../reflection_grpc.pb.go | 124 + .../grpc/reflection/serverreflection.go | 453 + vendor/google.golang.org/grpc/regenerate.sh | 79 + .../grpc/resolver/resolver.go | 93 +- .../grpc/resolver_conn_wrapper.go | 198 +- vendor/google.golang.org/grpc/rpc_util.go | 119 +- vendor/google.golang.org/grpc/server.go | 374 +- .../google.golang.org/grpc/service_config.go | 97 +- .../grpc/serviceconfig/serviceconfig.go | 21 +- vendor/google.golang.org/grpc/stats/stats.go | 24 +- .../google.golang.org/grpc/status/status.go | 119 +- vendor/google.golang.org/grpc/stream.go | 58 +- .../grpc/test/bufconn/bufconn.go | 74 +- vendor/google.golang.org/grpc/trace.go | 3 - vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 147 +- .../protobuf}/AUTHORS | 2 +- .../protobuf}/CONTRIBUTORS | 2 +- .../protobuf}/LICENSE | 2 +- .../protobuf}/PATENTS | 0 .../cmd/protoc-gen-go/internal_gengo/init.go | 168 + .../cmd/protoc-gen-go/internal_gengo/main.go | 901 + .../protoc-gen-go/internal_gengo/reflect.go | 351 + .../internal_gengo/well_known_types.go | 1077 + .../protobuf/compiler/protogen/protogen.go | 1419 + .../protobuf/encoding/prototext/decode.go | 791 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 433 + .../protobuf/encoding/protowire/wire.go | 538 + .../protobuf/internal/descfmt/stringer.go | 316 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 61 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 258 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 267 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 39 + .../protobuf/internal/errors/is_go113.go | 12 + .../protobuf/internal/fieldsort/fieldsort.go | 40 + .../protobuf/internal/filedesc/build.go | 155 + .../protobuf/internal/filedesc/desc.go | 614 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 282 + .../internal/filedesc/desc_list_gen.go | 345 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 9 + .../internal/flags/proto_legacy_enable.go | 9 + .../protobuf/internal/genid/any_gen.go | 34 + .../protobuf/internal/genid/api_gen.go | 106 + .../protobuf/internal/genid/descriptor_gen.go | 829 + .../protobuf/internal/genid/doc.go | 11 + .../protobuf/internal/genid/duration_gen.go | 34 + .../protobuf/internal/genid/empty_gen.go | 19 + .../protobuf/internal/genid/field_mask_gen.go | 31 + .../protobuf/internal/genid/goname.go | 25 + .../protobuf/internal/genid/map_entry.go | 16 + .../internal/genid/source_context_gen.go | 31 + .../protobuf/internal/genid/struct_gen.go | 116 + .../protobuf/internal/genid/timestamp_gen.go | 34 + .../protobuf/internal/genid/type_gen.go | 184 + .../protobuf/internal/genid/wrappers.go | 13 + .../protobuf/internal/genid/wrappers_gen.go | 175 + .../protobuf/internal/impl/api_export.go | 177 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 828 + .../protobuf/internal/impl/codec_gen.go | 5637 +++ .../protobuf/internal/impl/codec_map.go | 389 + .../protobuf/internal/impl/codec_map_go111.go | 37 + .../protobuf/internal/impl/codec_map_go112.go | 11 + .../protobuf/internal/impl/codec_message.go | 159 + .../internal/impl/codec_messageset.go | 120 + .../protobuf/internal/impl/codec_reflect.go | 209 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 17 + .../protobuf/internal/impl/convert.go | 467 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 274 + .../protobuf/internal/impl/encode.go | 199 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 175 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 502 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 215 + .../protobuf/internal/impl/message_reflect.go | 364 + .../internal/impl/message_reflect_field.go | 466 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 177 + .../protobuf/internal/impl/pointer_unsafe.go | 173 + .../protobuf/internal/impl/validate.go | 576 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/mapsort/mapsort.go | 43 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 27 + .../protobuf/internal/strs/strings_unsafe.go | 94 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 274 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 346 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 154 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 88 + .../google.golang.org/protobuf/proto/proto.go | 34 + .../protobuf/proto/proto_methods.go | 19 + .../protobuf/proto/proto_reflect.go | 19 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 97 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protodesc/desc.go | 275 + .../protobuf/reflect/protodesc/desc_init.go | 248 + .../reflect/protodesc/desc_resolve.go | 286 + .../reflect/protodesc/desc_validate.go | 371 + .../protobuf/reflect/protodesc/proto.go | 242 + .../protobuf/reflect/protoreflect/methods.go | 77 + .../protobuf/reflect/protoreflect/proto.go | 504 + .../protobuf/reflect/protoreflect/source.go | 52 + .../protobuf/reflect/protoreflect/type.go | 631 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 59 + .../reflect/protoreflect/value_union.go | 411 + .../reflect/protoreflect/value_unsafe.go | 98 + .../reflect/protoregistry/registry.go | 800 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 167 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../types/descriptorpb/descriptor.pb.go | 4040 ++ .../protobuf/types/known/anypb/any.pb.go | 494 + .../types/known/durationpb/duration.pb.go | 379 + .../protobuf/types/known/emptypb/empty.pb.go | 168 + .../types/known/timestamppb/timestamp.pb.go | 381 + .../protobuf/types/pluginpb/plugin.pb.go | 636 + vendor/gopkg.in/yaml.v2/readerc.go | 2 +- vendor/gopkg.in/yaml.v2/resolve.go | 2 +- vendor/gopkg.in/yaml.v2/sorter.go | 2 +- vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY | 60 +- vendor/honnef.co/go/tools/code/code.go | 481 + vendor/honnef.co/go/tools/config/config.go | 79 +- .../honnef.co/go/tools/deprecated/stdlib.go | 77 +- vendor/honnef.co/go/tools/edit/edit.go | 67 + vendor/honnef.co/go/tools/facts/generated.go | 7 + vendor/honnef.co/go/tools/facts/purity.go | 78 +- vendor/honnef.co/go/tools/functions/loops.go | 12 +- vendor/honnef.co/go/tools/functions/pure.go | 46 - vendor/honnef.co/go/tools/functions/stub.go | 32 + .../go/tools/functions/terminates.go | 60 +- .../go/tools/internal/cache/cache.go | 32 +- .../tools/internal/passes/buildir/buildir.go | 113 + .../internal/passes/buildssa/buildssa.go | 116 - .../go/tools/internal/renameio/renameio.go | 56 +- .../go/tools/internal/robustio/robustio.go | 53 + .../internal/robustio/robustio_darwin.go | 29 + .../tools/internal/robustio/robustio_flaky.go | 93 + .../tools/internal/robustio/robustio_other.go | 28 + .../internal/robustio/robustio_windows.go | 33 + .../go/tools/internal/sharedcheck/lint.go | 21 +- vendor/honnef.co/go/tools/{ssa => ir}/LICENSE | 0 .../go/tools/{ssa => ir}/blockopt.go | 50 +- .../honnef.co/go/tools/{ssa => ir}/builder.go | 1095 +- .../honnef.co/go/tools/{ssa => ir}/const.go | 48 +- .../honnef.co/go/tools/{ssa => ir}/create.go | 65 +- vendor/honnef.co/go/tools/{ssa => ir}/doc.go | 58 +- vendor/honnef.co/go/tools/{ssa => ir}/dom.go | 350 +- vendor/honnef.co/go/tools/{ssa => ir}/emit.go | 181 +- vendor/honnef.co/go/tools/ir/exits.go | 271 + vendor/honnef.co/go/tools/{ssa => ir}/func.go | 516 +- vendor/honnef.co/go/tools/ir/html.go | 1124 + .../go/tools/{ssa => ir}/identical.go | 2 +- .../go/tools/{ssa => ir}/identical_17.go | 2 +- vendor/honnef.co/go/tools/ir/irutil/load.go | 183 + vendor/honnef.co/go/tools/ir/irutil/switch.go | 264 + vendor/honnef.co/go/tools/ir/irutil/util.go | 70 + vendor/honnef.co/go/tools/ir/irutil/visit.go | 79 + vendor/honnef.co/go/tools/ir/lift.go | 1063 + .../honnef.co/go/tools/{ssa => ir}/lvalue.go | 55 +- .../honnef.co/go/tools/{ssa => ir}/methods.go | 2 +- vendor/honnef.co/go/tools/{ssa => ir}/mode.go | 40 +- .../honnef.co/go/tools/{ssa => ir}/print.go | 243 +- .../honnef.co/go/tools/{ssa => ir}/sanity.go | 70 +- .../honnef.co/go/tools/{ssa => ir}/source.go | 69 +- vendor/honnef.co/go/tools/{ssa => ir}/ssa.go | 643 +- .../go/tools/{ssa => ir}/staticcheck.conf | 0 vendor/honnef.co/go/tools/{ssa => ir}/util.go | 32 +- .../go/tools/{ssa => ir}/wrappers.go | 54 +- vendor/honnef.co/go/tools/ir/write.go | 5 + vendor/honnef.co/go/tools/lint/lint.go | 78 +- .../go/tools/lint/lintdsl/lintdsl.go | 402 +- .../go/tools/lint/lintutil/format/format.go | 43 +- .../honnef.co/go/tools/lint/lintutil/util.go | 70 +- vendor/honnef.co/go/tools/lint/runner.go | 320 +- vendor/honnef.co/go/tools/lint/stats.go | 18 + vendor/honnef.co/go/tools/loader/loader.go | 43 +- vendor/honnef.co/go/tools/pattern/convert.go | 242 + vendor/honnef.co/go/tools/pattern/doc.go | 273 + vendor/honnef.co/go/tools/pattern/fuzz.go | 50 + vendor/honnef.co/go/tools/pattern/lexer.go | 221 + vendor/honnef.co/go/tools/pattern/match.go | 513 + vendor/honnef.co/go/tools/pattern/parser.go | 455 + vendor/honnef.co/go/tools/pattern/pattern.go | 497 + vendor/honnef.co/go/tools/report/report.go | 184 + vendor/honnef.co/go/tools/simple/analysis.go | 183 +- vendor/honnef.co/go/tools/simple/doc.go | 118 +- vendor/honnef.co/go/tools/simple/lint.go | 1518 +- vendor/honnef.co/go/tools/ssa/lift.go | 657 - vendor/honnef.co/go/tools/ssa/testmain.go | 271 - vendor/honnef.co/go/tools/ssa/write.go | 5 - vendor/honnef.co/go/tools/ssautil/ssautil.go | 58 - .../go/tools/staticcheck/CONTRIBUTING.md | 15 - .../go/tools/staticcheck/analysis.go | 382 +- .../go/tools/staticcheck/buildtag.go | 4 +- vendor/honnef.co/go/tools/staticcheck/doc.go | 264 +- .../go/tools/staticcheck/knowledge.go | 25 - vendor/honnef.co/go/tools/staticcheck/lint.go | 1635 +- .../honnef.co/go/tools/staticcheck/rules.go | 72 +- .../go/tools/staticcheck/vrp/channel.go | 73 - .../honnef.co/go/tools/staticcheck/vrp/int.go | 476 - .../go/tools/staticcheck/vrp/slice.go | 273 - .../go/tools/staticcheck/vrp/string.go | 258 - .../honnef.co/go/tools/staticcheck/vrp/vrp.go | 1056 - .../honnef.co/go/tools/stylecheck/analysis.go | 90 +- vendor/honnef.co/go/tools/stylecheck/doc.go | 103 +- vendor/honnef.co/go/tools/stylecheck/lint.go | 493 +- vendor/honnef.co/go/tools/stylecheck/names.go | 180 +- vendor/honnef.co/go/tools/unused/unused.go | 302 +- vendor/honnef.co/go/tools/version/version.go | 2 +- vendor/modules.txt | 232 +- vendor/sourcegraph.com/sqs/pbtypes/html.pb.go | 14 +- .../sqs/pbtypes/timestamp.pb.go | 14 +- vendor/sourcegraph.com/sqs/pbtypes/void.pb.go | 14 +- 1645 files changed, 430423 insertions(+), 230463 deletions(-) create mode 100644 aws/resource_aws_quicksight_group_membership.go create mode 100644 aws/resource_aws_quicksight_iam_policy_assignment.go create mode 100644 aws/resource_aws_quicksight_namespace.go create mode 100644 vendor/cloud.google.com/go/.gitignore create mode 100644 vendor/cloud.google.com/go/CHANGES.md create mode 100644 vendor/cloud.google.com/go/CODE_OF_CONDUCT.md create mode 100644 vendor/cloud.google.com/go/CONTRIBUTING.md create mode 100644 vendor/cloud.google.com/go/README.md create mode 100644 vendor/cloud.google.com/go/RELEASING.md create mode 100644 vendor/cloud.google.com/go/doc.go create mode 100644 vendor/cloud.google.com/go/go.mod create mode 100644 vendor/cloud.google.com/go/go.sum create mode 100644 vendor/cloud.google.com/go/internal/.repo-metadata-full.json create mode 100644 vendor/cloud.google.com/go/internal/README.md create mode 100644 vendor/cloud.google.com/go/storage/CHANGES.md rename vendor/{github.com/pquerna/otp => cloud.google.com/go/storage}/LICENSE (100%) create mode 100644 vendor/cloud.google.com/go/storage/go.mod create mode 100644 vendor/cloud.google.com/go/storage/go.sum create mode 100644 vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go create mode 100644 vendor/cloud.google.com/go/storage/post_policy_v4.go create mode 100644 vendor/cloud.google.com/go/tools.go delete mode 100644 vendor/github.com/agl/ed25519/ed25519.go delete mode 100644 vendor/github.com/agl/ed25519/edwards25519/const.go delete mode 100644 vendor/github.com/agl/ed25519/edwards25519/edwards25519.go rename vendor/github.com/aws/aws-sdk-go/{service/s3/internal => internal/s3shared}/arn/accesspoint_arn.go (54%) rename vendor/github.com/aws/aws-sdk-go/{service/s3/internal => internal/s3shared}/arn/arn.go (75%) create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go rename vendor/github.com/aws/aws-sdk-go/internal/{ => s3shared}/s3err/error.go (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint_builder.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3control/validate.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go delete mode 100644 vendor/github.com/beevik/etree/.travis.yml delete mode 100644 vendor/github.com/beevik/etree/CONTRIBUTORS delete mode 100644 vendor/github.com/beevik/etree/LICENSE delete mode 100644 vendor/github.com/beevik/etree/README.md delete mode 100644 vendor/github.com/beevik/etree/RELEASE_NOTES.md delete mode 100644 vendor/github.com/beevik/etree/etree.go delete mode 100644 vendor/github.com/beevik/etree/helpers.go delete mode 100644 vendor/github.com/beevik/etree/path.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/passes/helper/resource/testmatchresourceattrcallexpr/testmatchresourceattrcallexpr.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetokexistscallexpr/resourcedatagetokexistscallexpr.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourceinforesourceonly/resourceinforesourceonly.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR001/README.md delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR001/XR001.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR002/README.md delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR002/XR002.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR003/README.md delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR003/XR003.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR004/README.md delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XR004/XR004.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XS001/README.md delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/XS001/XS001.go delete mode 100644 vendor/github.com/bflad/tfproviderlint/xpasses/checks.go delete mode 100644 vendor/github.com/boombuler/barcode/.gitignore delete mode 100644 vendor/github.com/boombuler/barcode/README.md delete mode 100644 vendor/github.com/boombuler/barcode/barcode.go delete mode 100644 vendor/github.com/boombuler/barcode/go.mod delete mode 100644 vendor/github.com/boombuler/barcode/qr/alphanumeric.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/automatic.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/blocks.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/encoder.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/errorcorrection.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/numeric.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/qrcode.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/unicode.go delete mode 100644 vendor/github.com/boombuler/barcode/qr/versioninfo.go delete mode 100644 vendor/github.com/boombuler/barcode/scaledbarcode.go delete mode 100644 vendor/github.com/boombuler/barcode/utils/base1dcode.go delete mode 100644 vendor/github.com/boombuler/barcode/utils/bitlist.go delete mode 100644 vendor/github.com/boombuler/barcode/utils/galoisfield.go delete mode 100644 vendor/github.com/boombuler/barcode/utils/gfpoly.go delete mode 100644 vendor/github.com/boombuler/barcode/utils/reedsolomon.go delete mode 100644 vendor/github.com/boombuler/barcode/utils/runeint.go create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go delete mode 100644 vendor/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go delete mode 100644 vendor/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_marshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_merge.go delete mode 100644 vendor/github.com/golang/protobuf/proto/table_unmarshal.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go delete mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go delete mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto create mode 100644 vendor/github.com/golang/protobuf/protoc-gen-go/main.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.proto delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.proto create mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 vendor/github.com/golang/snappy/.gitignore delete mode 100644 vendor/github.com/golang/snappy/AUTHORS delete mode 100644 vendor/github.com/golang/snappy/CONTRIBUTORS delete mode 100644 vendor/github.com/golang/snappy/README delete mode 100644 vendor/github.com/golang/snappy/decode.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/decode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/decode_other.go delete mode 100644 vendor/github.com/golang/snappy/encode.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.go delete mode 100644 vendor/github.com/golang/snappy/encode_amd64.s delete mode 100644 vendor/github.com/golang/snappy/encode_other.go delete mode 100644 vendor/github.com/golang/snappy/go.mod delete mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go create mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go rename vendor/github.com/hashicorp/{golang-lru => go-checkpoint}/LICENSE (50%) create mode 100644 vendor/github.com/hashicorp/go-checkpoint/README.md create mode 100644 vendor/github.com/hashicorp/go-checkpoint/check.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/go.mod create mode 100644 vendor/github.com/hashicorp/go-checkpoint/go.sum create mode 100644 vendor/github.com/hashicorp/go-checkpoint/telemetry.go create mode 100644 vendor/github.com/hashicorp/go-checkpoint/versions.go rename vendor/github.com/{boombuler/barcode => hashicorp/go-cty}/LICENSE (94%) create mode 100644 vendor/github.com/hashicorp/go-cty/cty/capsule.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/collection.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/public.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/convert/unify.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/element_iterator.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/error.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gob.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/in.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/out.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/helper.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/marshal.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/simple.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/json/value.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/list_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/map_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/marks.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/null.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/object_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/path.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/path_set.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/primitive_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/gob.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/iterator.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/ops.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/rules.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set/set.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set_helper.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set_internals.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/set_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/tuple_type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/type.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/type_conform.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/types_to_register.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/unknown.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/value.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/value_init.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/value_ops.go create mode 100644 vendor/github.com/hashicorp/go-cty/cty/walk.go delete mode 100644 vendor/github.com/hashicorp/go-getter/.travis.yml create mode 100644 vendor/github.com/hashicorp/go-plugin/grpc_stdio.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go create mode 100644 vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go create mode 100644 vendor/github.com/hashicorp/terraform-exec/tfinstall/version.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go delete mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go rename vendor/github.com/hashicorp/{vault => terraform-plugin-sdk/v2}/LICENSE (50%) create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/context/context.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/generate.sh create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.pb.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.proto create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/go.mod create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/go.sum create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/terraform.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go create mode 100644 vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go delete mode 100644 vendor/github.com/hashicorp/vault/helper/compressutil/compress.go delete mode 100644 vendor/github.com/hashicorp/vault/helper/jsonutil/json.go delete mode 100644 vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go delete mode 100644 vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go delete mode 100644 vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go delete mode 100644 vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go create mode 100644 vendor/github.com/jmespath/go-jmespath/go.mod create mode 100644 vendor/github.com/jmespath/go-jmespath/go.sum create mode 100644 vendor/github.com/jstemmer/go-junit-report/.gitignore create mode 100644 vendor/github.com/jstemmer/go-junit-report/.travis.yml create mode 100644 vendor/github.com/jstemmer/go-junit-report/LICENSE create mode 100644 vendor/github.com/jstemmer/go-junit-report/README.md create mode 100644 vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go create mode 100644 vendor/github.com/jstemmer/go-junit-report/go-junit-report.go create mode 100644 vendor/github.com/jstemmer/go-junit-report/go.mod create mode 100644 vendor/github.com/jstemmer/go-junit-report/parser/parser.go delete mode 100644 vendor/github.com/keybase/go-crypto/brainpool/brainpool.go delete mode 100644 vendor/github.com/keybase/go-crypto/brainpool/rcurve.go delete mode 100644 vendor/github.com/keybase/go-crypto/cast5/cast5.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/keys.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/config.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/patch.sh delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/read.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch delete mode 100644 vendor/github.com/keybase/go-crypto/openpgp/write.go delete mode 100644 vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go delete mode 100644 vendor/github.com/keybase/go-crypto/rsa/pss.go delete mode 100644 vendor/github.com/keybase/go-crypto/rsa/rsa.go delete mode 100644 vendor/github.com/pierrec/lz4/.gitignore delete mode 100644 vendor/github.com/pierrec/lz4/.travis.yml delete mode 100644 vendor/github.com/pierrec/lz4/LICENSE delete mode 100644 vendor/github.com/pierrec/lz4/README.md delete mode 100644 vendor/github.com/pierrec/lz4/block.go delete mode 100644 vendor/github.com/pierrec/lz4/debug.go delete mode 100644 vendor/github.com/pierrec/lz4/debug_stub.go delete mode 100644 vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_go1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/lz4_notgo1.10.go delete mode 100644 vendor/github.com/pierrec/lz4/reader.go delete mode 100644 vendor/github.com/pierrec/lz4/writer.go delete mode 100644 vendor/github.com/pquerna/otp/.travis.yml delete mode 100644 vendor/github.com/pquerna/otp/NOTICE delete mode 100644 vendor/github.com/pquerna/otp/README.md delete mode 100644 vendor/github.com/pquerna/otp/doc.go delete mode 100644 vendor/github.com/pquerna/otp/go.mod delete mode 100644 vendor/github.com/pquerna/otp/go.sum delete mode 100644 vendor/github.com/pquerna/otp/hotp/hotp.go delete mode 100644 vendor/github.com/pquerna/otp/otp.go delete mode 100644 vendor/github.com/pquerna/otp/totp/totp.go create mode 100644 vendor/github.com/ulikunitz/xz/fox-check-none.xz create mode 100644 vendor/github.com/ulikunitz/xz/go.mod create mode 100644 vendor/github.com/ulikunitz/xz/none-check.go delete mode 100644 vendor/go.opencensus.io/Gopkg.lock delete mode 100644 vendor/go.opencensus.io/Gopkg.toml create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go rename vendor/golang.org/x/crypto/{internal/chacha20/asm_arm64.s => chacha20/chacha_arm64.s} (99%) create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go rename vendor/golang.org/x/crypto/{internal => }/chacha20/chacha_noasm.go (50%) create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go rename vendor/golang.org/x/crypto/{internal/chacha20/asm_ppc64le.s => chacha20/chacha_ppc64le.s} (95%) create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go rename vendor/golang.org/x/crypto/{internal => }/chacha20/chacha_s390x.s (87%) rename vendor/golang.org/x/crypto/{internal => }/chacha20/xor.go (73%) delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.h delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s rename vendor/golang.org/x/crypto/curve25519/{mont25519_amd64.go => curve25519_amd64.go} (99%) rename vendor/golang.org/x/crypto/curve25519/{ladderstep_amd64.s => curve25519_amd64.s} (76%) create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_generic.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go delete mode 100644 vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_compat.go create mode 100644 vendor/golang.org/x/crypto/poly1305/bits_go1.13.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_noasm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s create mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go create mode 100644 vendor/golang.org/x/lint/.travis.yml rename vendor/{honnef.co/go/tools/simple => golang.org/x/lint}/CONTRIBUTING.md (54%) rename vendor/{github.com/agl/ed25519 => golang.org/x/lint}/LICENSE (96%) create mode 100644 vendor/golang.org/x/lint/README.md create mode 100644 vendor/golang.org/x/lint/go.mod create mode 100644 vendor/golang.org/x/lint/go.sum create mode 100644 vendor/golang.org/x/lint/golint/golint.go create mode 100644 vendor/golang.org/x/lint/golint/import.go create mode 100644 vendor/golang.org/x/lint/golint/importcomment.go create mode 100644 vendor/golang.org/x/lint/lint.go rename vendor/golang.org/x/sys/cpu/{cpu_aix_ppc64.go => cpu_aix.go} (96%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go rename vendor/golang.org/x/sys/cpu/{cpu_gccgo.c => cpu_gccgo_x86.c} (100%) rename vendor/golang.org/x/sys/cpu/{cpu_gccgo.go => cpu_gccgo_x86.go} (100%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go create mode 100644 vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go create mode 100644 vendor/golang.org/x/sys/unix/fdset.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_illumos.go create mode 100644 vendor/golang.org/x/sys/unix/zerrors_linux.go rename vendor/golang.org/x/sys/unix/{zptracearm_linux.go => zptrace_armnn_linux.go} (93%) create mode 100644 vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go rename vendor/golang.org/x/sys/unix/{zptracemips_linux.go => zptrace_mipsnn_linux.go} (93%) rename vendor/golang.org/x/sys/unix/{zptracemipsle_linux.go => zptrace_mipsnnle_linux.go} (93%) rename vendor/golang.org/x/sys/unix/{zptrace386_linux.go => zptrace_x86_linux.go} (95%) create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_linux.go create mode 100644 vendor/golang.org/x/sys/unix/ztypes_linux.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables12.0.0.go create mode 100644 vendor/golang.org/x/text/width/tables12.0.0.go create mode 100644 vendor/golang.org/x/tools/cmd/goimports/doc.go create mode 100644 vendor/golang.org/x/tools/cmd/goimports/goimports.go create mode 100644 vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go create mode 100644 vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go delete mode 100644 vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go create mode 100644 vendor/golang.org/x/tools/internal/analysisinternal/analysis.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/export.go create mode 100644 vendor/golang.org/x/tools/internal/event/core/fast.go create mode 100644 vendor/golang.org/x/tools/internal/event/doc.go create mode 100644 vendor/golang.org/x/tools/internal/event/event.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/keys.go create mode 100644 vendor/golang.org/x/tools/internal/event/keys/standard.go create mode 100644 vendor/golang.org/x/tools/internal/event/label/label.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/vendor.go create mode 100644 vendor/golang.org/x/tools/internal/span/parse.go create mode 100644 vendor/golang.org/x/tools/internal/span/span.go create mode 100644 vendor/golang.org/x/tools/internal/span/token.go create mode 100644 vendor/golang.org/x/tools/internal/span/token111.go create mode 100644 vendor/golang.org/x/tools/internal/span/token112.go create mode 100644 vendor/golang.org/x/tools/internal/span/uri.go create mode 100644 vendor/golang.org/x/tools/internal/span/utf16.go delete mode 100644 vendor/golang.org/x/tools/internal/testenv/testenv.go delete mode 100644 vendor/golang.org/x/tools/internal/testenv/testenv_112.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/types.go delete mode 100644 vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE create mode 100644 vendor/google.golang.org/api/internal/conn_pool.go rename vendor/google.golang.org/api/{ => internal}/gensupport/buffer.go (100%) rename vendor/google.golang.org/api/{ => internal}/gensupport/doc.go (100%) rename vendor/google.golang.org/api/{ => internal}/gensupport/json.go (100%) rename vendor/google.golang.org/api/{ => internal}/gensupport/jsonfloat.go (65%) rename vendor/google.golang.org/api/{ => internal}/gensupport/media.go (98%) rename vendor/google.golang.org/api/{ => internal}/gensupport/params.go (100%) rename vendor/google.golang.org/api/{ => internal}/gensupport/resumable.go (88%) create mode 100644 vendor/google.golang.org/api/internal/gensupport/retryable_linux.go rename vendor/google.golang.org/api/{ => internal}/gensupport/send.go (54%) create mode 100644 vendor/google.golang.org/api/internal/gensupport/version.go delete mode 100644 vendor/google.golang.org/api/internal/pool.go rename vendor/{github.com/keybase/go-crypto => google.golang.org/api/internal/third_party/uritemplates}/LICENSE (96%) create mode 100644 vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA rename vendor/google.golang.org/api/{googleapi/internal => internal/third_party}/uritemplates/uritemplates.go (98%) rename vendor/google.golang.org/api/{googleapi/internal => internal/third_party}/uritemplates/utils.go (100%) create mode 100644 vendor/google.golang.org/api/option/internaloption/internaloption.go create mode 100644 vendor/google.golang.org/api/transport/cert/default_cert.go create mode 100644 vendor/google.golang.org/api/transport/http/default_transport_go113.go create mode 100644 vendor/google.golang.org/api/transport/http/default_transport_not_go113.go create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go delete mode 100644 vendor/google.golang.org/grpc/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go delete mode 100644 vendor/google.golang.org/grpc/balancer_v1_wrapper.go rename vendor/google.golang.org/grpc/credentials/{tls13.go => go12.go} (100%) create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go delete mode 100644 vendor/google.golang.org/grpc/health/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename vendor/google.golang.org/grpc/internal/{binarylog/util.go => grpcutil/method.go} (82%) create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/dns/dns_resolver.go (71%) create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/go113.go rename vendor/google.golang.org/grpc/{ => internal}/resolver/passthrough/passthrough.go (94%) create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go delete mode 100644 vendor/google.golang.org/grpc/naming/dns_resolver.go delete mode 100644 vendor/google.golang.org/grpc/naming/naming.go create mode 100644 vendor/google.golang.org/grpc/reflection/README.md create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto create mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go create mode 100644 vendor/google.golang.org/grpc/reflection/serverreflection.go create mode 100644 vendor/google.golang.org/grpc/regenerate.sh rename vendor/{github.com/keybase/go-crypto => google.golang.org/protobuf}/AUTHORS (74%) rename vendor/{github.com/keybase/go-crypto => google.golang.org/protobuf}/CONTRIBUTORS (70%) rename vendor/{github.com/golang/snappy => google.golang.org/protobuf}/LICENSE (95%) rename vendor/{github.com/keybase/go-crypto => google.golang.org/protobuf}/PATENTS (100%) create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go create mode 100644 vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/compiler/protogen/protogen.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/goname.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/map_entry.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go create mode 100644 vendor/honnef.co/go/tools/code/code.go create mode 100644 vendor/honnef.co/go/tools/edit/edit.go delete mode 100644 vendor/honnef.co/go/tools/functions/pure.go create mode 100644 vendor/honnef.co/go/tools/functions/stub.go create mode 100644 vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go delete mode 100644 vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go create mode 100644 vendor/honnef.co/go/tools/internal/robustio/robustio.go create mode 100644 vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go create mode 100644 vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go create mode 100644 vendor/honnef.co/go/tools/internal/robustio/robustio_other.go create mode 100644 vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go rename vendor/honnef.co/go/tools/{ssa => ir}/LICENSE (100%) rename vendor/honnef.co/go/tools/{ssa => ir}/blockopt.go (83%) rename vendor/honnef.co/go/tools/{ssa => ir}/builder.go (71%) rename vendor/honnef.co/go/tools/{ssa => ir}/const.go (85%) rename vendor/honnef.co/go/tools/{ssa => ir}/create.go (82%) rename vendor/honnef.co/go/tools/{ssa => ir}/doc.go (74%) rename vendor/honnef.co/go/tools/{ssa => ir}/dom.go (51%) rename vendor/honnef.co/go/tools/{ssa => ir}/emit.go (75%) create mode 100644 vendor/honnef.co/go/tools/ir/exits.go rename vendor/honnef.co/go/tools/{ssa => ir}/func.go (65%) create mode 100644 vendor/honnef.co/go/tools/ir/html.go rename vendor/honnef.co/go/tools/{ssa => ir}/identical.go (88%) rename vendor/honnef.co/go/tools/{ssa => ir}/identical_17.go (87%) create mode 100644 vendor/honnef.co/go/tools/ir/irutil/load.go create mode 100644 vendor/honnef.co/go/tools/ir/irutil/switch.go create mode 100644 vendor/honnef.co/go/tools/ir/irutil/util.go create mode 100644 vendor/honnef.co/go/tools/ir/irutil/visit.go create mode 100644 vendor/honnef.co/go/tools/ir/lift.go rename vendor/honnef.co/go/tools/{ssa => ir}/lvalue.go (59%) rename vendor/honnef.co/go/tools/{ssa => ir}/methods.go (99%) rename vendor/honnef.co/go/tools/{ssa => ir}/mode.go (65%) rename vendor/honnef.co/go/tools/{ssa => ir}/print.go (54%) rename vendor/honnef.co/go/tools/{ssa => ir}/sanity.go (89%) rename vendor/honnef.co/go/tools/{ssa => ir}/source.go (80%) rename vendor/honnef.co/go/tools/{ssa => ir}/ssa.go (78%) rename vendor/honnef.co/go/tools/{ssa => ir}/staticcheck.conf (100%) rename vendor/honnef.co/go/tools/{ssa => ir}/util.go (76%) rename vendor/honnef.co/go/tools/{ssa => ir}/wrappers.go (89%) create mode 100644 vendor/honnef.co/go/tools/ir/write.go create mode 100644 vendor/honnef.co/go/tools/pattern/convert.go create mode 100644 vendor/honnef.co/go/tools/pattern/doc.go create mode 100644 vendor/honnef.co/go/tools/pattern/fuzz.go create mode 100644 vendor/honnef.co/go/tools/pattern/lexer.go create mode 100644 vendor/honnef.co/go/tools/pattern/match.go create mode 100644 vendor/honnef.co/go/tools/pattern/parser.go create mode 100644 vendor/honnef.co/go/tools/pattern/pattern.go create mode 100644 vendor/honnef.co/go/tools/report/report.go delete mode 100644 vendor/honnef.co/go/tools/ssa/lift.go delete mode 100644 vendor/honnef.co/go/tools/ssa/testmain.go delete mode 100644 vendor/honnef.co/go/tools/ssa/write.go delete mode 100644 vendor/honnef.co/go/tools/ssautil/ssautil.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/CONTRIBUTING.md delete mode 100644 vendor/honnef.co/go/tools/staticcheck/knowledge.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/channel.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/int.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/slice.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/string.go delete mode 100644 vendor/honnef.co/go/tools/staticcheck/vrp/vrp.go diff --git a/aws/provider.go b/aws/provider.go index 1fbfbb493..f3792104c 100644 --- a/aws/provider.go +++ b/aws/provider.go @@ -172,6 +172,9 @@ func Provider() terraform.ResourceProvider { "aws_iam_role_policy": resourceAwsIamRolePolicy(), "aws_iam_role_policy_attachment": resourceAwsIamRolePolicyAttachment(), "aws_quicksight_data_source": resourceAwsQuickSightDataSource(), + "aws_quicksight_group_membership": resourceAwsQuickSightGroupMembership(), + "aws_quicksight_iam_policy_assignment": resourceAwsQuickSightIAMPolicyAssignment(), + "aws_quicksight_namespace": resourceAwsQuickSightNamespace(), "aws_internet_gateway_detach": resourceAwsInternetGatewayDetach(), "aws_internet_gateway_delete": resourceAwsInternetGatewayDelete(), }, diff --git a/aws/resource_aws_quicksight_group_membership.go b/aws/resource_aws_quicksight_group_membership.go new file mode 100644 index 000000000..96be63cd8 --- /dev/null +++ b/aws/resource_aws_quicksight_group_membership.go @@ -0,0 +1,177 @@ +package aws + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/quicksight" +) + +func resourceAwsQuickSightGroupMembership() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsQuickSightGroupMembershipCreate, + Read: resourceAwsQuickSightGroupMembershipRead, + Delete: resourceAwsQuickSightGroupMembershipDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + + "aws_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "member_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "group_name": { + Type: schema.TypeString, + Required: true, + //Optional: true, + ForceNew: true, + }, + + "namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "default", + ValidateFunc: validation.StringInSlice([]string{ + "default", + }, false), + }, + }, + } +} + +func resourceAwsQuickSightGroupMembershipCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID := meta.(*AWSClient).accountid + namespace := d.Get("namespace").(string) + groupName := d.Get("group_name").(string) + + if v, ok := d.GetOk("aws_account_id"); ok { + awsAccountID = v.(string) + } + + createOpts := &quicksight.CreateGroupMembershipInput{ + AwsAccountId: aws.String(awsAccountID), + GroupName: aws.String(groupName), + MemberName: aws.String(d.Get("member_name").(string)), + Namespace: aws.String(namespace), + } + + resp, err := conn.CreateGroupMembership(createOpts) + if err != nil { + return fmt.Errorf("Error adding QuickSight user to group: %s", err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s/%s", awsAccountID, namespace, groupName, aws.StringValue(resp.GroupMember.MemberName))) + + return resourceAwsQuickSightGroupMembershipRead(d, meta) +} + +func resourceAwsQuickSightGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, groupName, userName, err := resourceAwsQuickSightGroupMembershipParseID(d.Id()) + if err != nil { + return err + } + + listOpts := &quicksight.ListUserGroupsInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + UserName: aws.String(userName), + } + + found := false + + for { + resp, err := conn.ListUserGroups(listOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight User %s is not found", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error listing QuickSight User groups (%s): %s", d.Id(), err) + } + + for _, group := range resp.GroupList { + if *group.GroupName == groupName { + found = true + break + } + } + + if found || resp.NextToken == nil { + break + } + + listOpts.NextToken = resp.NextToken + } + + if found { + d.Set("aws_account_id", awsAccountID) + d.Set("namespace", namespace) + d.Set("member_name", userName) + d.Set("group_name", groupName) + } else { + log.Printf("[WARN] QuickSight User-group membership %s is not found", d.Id()) + d.SetId("") + } + + return nil +} + +func resourceAwsQuickSightGroupMembershipDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, groupName, userName, err := resourceAwsQuickSightGroupMembershipParseID(d.Id()) + if err != nil { + return err + } + + deleteOpts := &quicksight.DeleteGroupMembershipInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + MemberName: aws.String(userName), + GroupName: aws.String(groupName), + } + + if _, err := conn.DeleteGroupMembership(deleteOpts); err != nil { + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting QuickSight User-group membership %s: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsQuickSightGroupMembershipParseID(id string) (string, string, string, string, error) { + parts := strings.SplitN(id, "/", 4) + if len(parts) < 4 || parts[0] == "" || parts[1] == "" || parts[2] == "" || parts[3] == "" { + return "", "", "", "", fmt.Errorf("unexpected format of ID (%s), expected AWS_ACCOUNT_ID/NAMESPACE/GROUP_NAME/USER_NAME", id) + } + return parts[0], parts[1], parts[2], parts[3], nil +} \ No newline at end of file diff --git a/aws/resource_aws_quicksight_iam_policy_assignment.go b/aws/resource_aws_quicksight_iam_policy_assignment.go new file mode 100644 index 000000000..616d9186b --- /dev/null +++ b/aws/resource_aws_quicksight_iam_policy_assignment.go @@ -0,0 +1,272 @@ +package aws + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/quicksight" +) + +var resourceAwsQuickSighIAMPolicyAttachmentCUPendingStates = []string{ + quicksight.AssignmentStatusDisabled, + quicksight.AssignmentStatusDraft, + "", +} + +func resourceAwsQuickSightIAMPolicyAssignment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsQuickSightIAMPolicyAssignmentCreate, + Read: resourceAwsQuickSightIAMPolicyAssignmentRead, + Update: resourceAwsQuickSightIAMPolicyAssignmentUpdate, + Delete: resourceAwsQuickSightIAMPolicyAssignmentDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Second), + Read: schema.DefaultTimeout(60 * time.Second), + Update: schema.DefaultTimeout(60 * time.Second), + Delete: schema.DefaultTimeout(60 * time.Second), + }, + + Schema: map[string]*schema.Schema{ + "assignment_name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[a-zA-Z0-9]*$"), + "The value may only contain alphanumeric value. Special chars not allowed"), + }, + + "assignment_status": { + Type: schema.TypeString, + Required: true, + }, + + "aws_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "groups": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Optional: true, + }, + + "users": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Optional: true, + }, + + "policy_arn": { + Type: schema.TypeString, + Required: true, + }, + + "namespace": { + Type: schema.TypeString, + Default: "default", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "default", + }, false), + }, + }, + } +} + +func resourceAwsQuickSightIAMPolicyAssignmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountId := d.Get("aws_account_id").(string) + namespace := d.Get("namespace").(string) + assignmentName := d.Get("assignment_name").(string) + + if v, ok := d.GetOk("aws_account_id"); ok { + awsAccountId = v.(string) + } + + identities := make(map[string][]*string) + if groupAttr := d.Get("groups").(*schema.Set); groupAttr.Len() > 0 { + identities["Group"] = expandStringList(groupAttr.List()) + } + + if userAttr := d.Get("users").(*schema.Set); userAttr.Len() > 0 { + identities["User"] = expandStringList(userAttr.List()) + } + + createOpts := &quicksight.CreateIAMPolicyAssignmentInput{ + AssignmentName: aws.String(assignmentName), + AssignmentStatus: aws.String(d.Get("assignment_status").(string)), + AwsAccountId: aws.String(awsAccountId), + Identities: identities, + Namespace: aws.String(namespace), + PolicyArn: aws.String(d.Get("policy_arn").(string)), + } + + resp, err := conn.CreateIAMPolicyAssignment(createOpts) + if err != nil { + return fmt.Errorf("Error creating QuickSight IAM Policy Assignment: %s", err) + } + + _, err = waitIAMPolicyAttachmentCreate(conn, awsAccountId, assignmentName, namespace, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error waiting for Data Source (%s) to become available: %s", assignmentName, err) + } + + d.SetId(fmt.Sprintf("%s/%s/%s", awsAccountId, namespace, aws.StringValue(resp.AssignmentName))) + return resourceAwsQuickSightIAMPolicyAssignmentRead(d, meta) +} + +func waitIAMPolicyAttachmentCreate(conn *quicksight.QuickSight, awsAccountId, assignmentName, namespace string, timeout time.Duration) (interface{}, error) { + stateChangeConf := &resource.StateChangeConf{ + Pending: resourceAwsQuickSighIAMPolicyAttachmentCUPendingStates, + Target: []string{quicksight.AssignmentStatusEnabled}, + Refresh: iamPolicyAttachmentStateRefreshFunc(conn, awsAccountId, assignmentName, namespace), + Timeout: timeout, + Delay: 5 * time.Second, + } + return stateChangeConf.WaitForState() +} + +func iamPolicyAttachmentStateRefreshFunc(conn *quicksight.QuickSight, awsAccountId, assignmentName, namespace string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + req := &quicksight.DescribeIAMPolicyAssignmentInput{ + AssignmentName: aws.String(assignmentName), + AwsAccountId: aws.String(awsAccountId), + Namespace: aws.String(namespace), + } + resp, err := conn.DescribeIAMPolicyAssignment(req) + if err != nil { + return nil, "", err + } + + assignmentId := resp.IAMPolicyAssignment.AssignmentId + state := "" + if aws.StringValue(resp.IAMPolicyAssignment.AssignmentStatus) == quicksight.AssignmentStatusEnabled { + state = quicksight.AssignmentStatusEnabled + } + return assignmentId, state, nil + } +} + +func resourceAwsQuickSightIAMPolicyAssignmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, assignmentName, err := resourceAwsQuickSightIAMPolicyAssignmentParseID(d.Id()) + if err != nil { + return err + } + + descOpts := &quicksight.DescribeIAMPolicyAssignmentInput{ + AssignmentName: aws.String(assignmentName), + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + } + + resp, err := conn.DescribeIAMPolicyAssignment(descOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight IAM Policy Assignment %s not found", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error describing QuickSight IAM Policy Assignment (%s): %s", d.Id(), err) + } + + d.Set("aws_account_id", resp.IAMPolicyAssignment.AwsAccountId) + d.Set("namespace", namespace) + d.Set("assignment_id", resp.IAMPolicyAssignment.AssignmentId) + d.Set("assignment_name", resp.IAMPolicyAssignment.AssignmentName) + d.Set("assignment_status", resp.IAMPolicyAssignment.AssignmentStatus) + d.Set("identities", resp.IAMPolicyAssignment.Identities) + d.Set("groups", resp.IAMPolicyAssignment.Identities["group"]) + d.Set("users", resp.IAMPolicyAssignment.Identities["user"]) + d.Set("policy_arn", resp.IAMPolicyAssignment.PolicyArn) + + return nil +} + +func resourceAwsQuickSightIAMPolicyAssignmentUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + awsAccountID, namespace, assignmentName, err := resourceAwsQuickSightIAMPolicyAssignmentParseID(d.Id()) + if err != nil { + return err + } + + identities := make(map[string][]*string) + if groupAttr := d.Get("groups").(*schema.Set); groupAttr.Len() > 0 { + identities["Group"] = expandStringList(groupAttr.List()) + } + if userAttr := d.Get("users").(*schema.Set); userAttr.Len() > 0 { + identities["User"] = expandStringList(userAttr.List()) + } + + updateOpts := &quicksight.UpdateIAMPolicyAssignmentInput{ + AssignmentName: aws.String(assignmentName), + AssignmentStatus: aws.String(d.Get("assignment_status").(string)), + AwsAccountId: aws.String(awsAccountID), + Identities: identities, + Namespace: aws.String(namespace), + PolicyArn: aws.String(d.Get("policy_arn").(string)), + } + + _, err = conn.UpdateIAMPolicyAssignment(updateOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[ERROR] QuickSight IAM Policy Assignment %s not found", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error updating QuickSight IAM Policy Assignment %s: %s", d.Id(), err) + } + return resourceAwsQuickSightIAMPolicyAssignmentRead(d, meta) +} + +func resourceAwsQuickSightIAMPolicyAssignmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, assignmentName, err := resourceAwsQuickSightIAMPolicyAssignmentParseID(d.Id()) + if err != nil { + return err + } + + deleteOpts := &quicksight.DeleteIAMPolicyAssignmentInput{ + AssignmentName: aws.String(assignmentName), + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + } + + if _, err := conn.DeleteIAMPolicyAssignment(deleteOpts); err != nil { + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting QuickSight IAM Policy Assignment %s: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsQuickSightIAMPolicyAssignmentParseID(id string) (string, string, string, error) { + parts := strings.SplitN(id, "/", 3) + if len(parts) < 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { + return "", "", "", fmt.Errorf("unexpected format of ID (%s), expected AWS_ACCOUNT_ID/NAMESPACE/GROUP_NAME", id) + } + return parts[0], parts[1], parts[2], nil +} \ No newline at end of file diff --git a/aws/resource_aws_quicksight_namespace.go b/aws/resource_aws_quicksight_namespace.go new file mode 100644 index 000000000..8171499ec --- /dev/null +++ b/aws/resource_aws_quicksight_namespace.go @@ -0,0 +1,206 @@ +package aws + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/quicksight" + + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" +) + +func resourceAwsQuickSightNamespace() *schema.Resource { + return &schema.Resource{ + // NOTE: It is possible for a namespace to get stuck in "CREATING" status if an account has + // not completed QuickSight signup. + Create: resourceAwsQuickSightNamespaceCreate, + Read: resourceAwsQuickSightNamespaceRead, + + // NOTE: AWS QuickSight Namespace does not have a dedicated edit/update endpoint. + //Update: resourceAwsQuickSightNamespaceUpdate, + + // NOTE: Deleting an AWS QuickSight Namespace will also delete users and groups + // associated with that namespace. + // ref: https://docs.aws.amazon.com/sdk-for-go/api/service/quicksight/#QuickSight.DeleteNamespace + Delete: resourceAwsQuickSightNamespaceDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "aws_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "identity_store": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + quicksight.IdentityTypeQuicksight, + }, false), + }, + + "namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + //"tags": tagsSchemaForceNew(), // TODO use this helper later in place of inline below + "tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} +func resourceAwsQuickSightNamespaceCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID := meta.(*AWSClient).accountid + namespace := d.Get("namespace").(string) + + if v, ok := d.GetOk("aws_account_id"); ok { + awsAccountID = v.(string) + } + + createOpts := &quicksight.CreateNamespaceInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + IdentityStore: aws.String(d.Get("identity_store").(string)), + } + + if attr, ok := d.GetOk("tags"); ok { + createOpts.Tags = keyvaluetags.New(attr.(map[string]interface{})).IgnoreAws().QuicksightTags() + } + + _, err := conn.CreateNamespace(createOpts) + if err != nil { + return fmt.Errorf("Error creating QuickSight Namespace: %s", err) + } + + d.SetId(fmt.Sprintf("%s/%s", awsAccountID, namespace)) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"CREATING"}, + Target: []string{"CREATED"}, + Refresh: stateRefresh(conn, awsAccountID, namespace), + Timeout: 15 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for QuickSight Namespace (%s) to become deleted: %s", d.Id(), err) + } + return resourceAwsQuickSightNamespaceRead(d, meta) +} + +func resourceAwsQuickSightNamespaceRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, err := resourceAwsQuickSightNamespaceParseID(d.Id()) + if err != nil { + return err + } + + descOpts := &quicksight.DescribeNamespaceInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + } + + resp, err := conn.DescribeNamespace(descOpts) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + log.Printf("[WARN] QuickSight Namespace %s is not found", d.Id()) + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error describing QuickSight Namespace (%s): %s", d.Id(), err) + } + + d.Set("namespace", resp.Namespace.Name) + d.Set("aws_account_id", awsAccountID) + + return nil +} + +func resourceAwsQuickSightNamespaceDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).quicksightconn + + awsAccountID, namespace, err := resourceAwsQuickSightNamespaceParseID(d.Id()) + if err != nil { + return err + } + + deleteOpts := &quicksight.DeleteNamespaceInput{ + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + } + + if _, err := conn.DeleteNamespace(deleteOpts); err != nil { + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + return nil + } + return fmt.Errorf("Error deleting QuickSight Namespace %s: %s", d.Id(), err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"DELETING", "CREATING"}, + Target: []string{"DELETED"}, + Refresh: stateRefresh(conn, awsAccountID, namespace), + Timeout: 15 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for QuickSight Namespace (%s) to become deleted: %s", d.Id(), err) + } + + return nil +} + +func resourceAwsQuickSightNamespaceParseID(id string) (string, string, error) { + parts := strings.SplitN(id, "/", 2) + if len(parts) < 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format of ID (%s), expected AWS_ACCOUNT_ID/NAMESPACE", id) + } + return parts[0], parts[1], nil +} + +func stateRefresh(conn *quicksight.QuickSight, awsAccountID, namespace string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + emptyResp := &quicksight.DescribeNamespaceOutput{} + + resp, err := conn.DescribeNamespace(&quicksight.DescribeNamespaceInput { + AwsAccountId: aws.String(awsAccountID), + Namespace: aws.String(namespace), + }) + if isAWSErr(err, quicksight.ErrCodeResourceNotFoundException, "") { + return emptyResp, "DELETED", nil + } + creationStatus := *resp.Namespace.CreationStatus + if err != nil { + return nil, creationStatus, err + } + + return resp, creationStatus, nil + } +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore new file mode 100644 index 000000000..ee9694b87 --- /dev/null +++ b/vendor/cloud.google.com/go/.gitignore @@ -0,0 +1,11 @@ +# Editors +.idea +.vscode +*.swp + +# Test files +*.test +coverage.txt + +# Other +.DS_Store diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md new file mode 100644 index 000000000..3e9fca4a7 --- /dev/null +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -0,0 +1,1566 @@ +# Changes + +## v0.61.0 + +### Changes + +- all: + - Update all direct dependencies. +- dashboard: + - Start generating client for apiv1. +- policytroubleshooter: + - Start generating client for apiv1. +- profiler: + - Disable OpenCensus Telemetry for requests made by the profiler package by default. You can re-enable it using `profiler.Config.EnableOCTelemetry`. +- Various updates to autogenerated clients. + +## v0.60.0 + +### Changes + +- all: + - Refactored examples to reduce module dependencies. + - Update sub-modules to use cloud.google.com/go v0.59.0. +- internal: + - Start generating client for gaming apiv1beta. +- Various updates to autogenerated clients. + +## v0.59.0 + +### Announcements + +goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our +contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept +pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to +point to GitHub. + +### Changes + +- all: + - Remove dependency on honnef.co/go/tools. + - Update our contributing instructions now that we use GitHub for reviews. + - Remove some un-inclusive terminology. +- compute/metadata: + - Pass cancelable context to DNS lookup. +- .github: + - Update templates issue/PR templates. +- internal: + - Bump several clients to GA. + - Fix GoDoc badge source. + - Several automation changes related to the move to GitHub. + - Start generating a client for asset v1p5beta1. +- Various updates to autogenerated clients. + +## v0.58.0 + +### Deprecation notice + +- `cloud.google.com/go/monitoring/apiv3` has been deprecated due to breaking + changes in the API. Please migrate to `cloud.google.com/go/monitoring/apiv3/v2`. + +### Changes + +- all: + - The remaining uses of gtransport.Dial have been removed. + - The `genproto` dependency has been updated to a version that makes use of + new `protoreflect` library. For more information on these protobuf changes + please see the following post from the official Go blog: + https://blog.golang.org/protobuf-apiv2. +- internal: + - Started generation of datastore admin v1 client. + - Updated protofuf version used for generation to 3.12.X. + - Update the release levels for several APIs. + - Generate clients with protoc-gen-go@v1.4.1. +- monitoring: + - Re-enable generation of monitoring/apiv3 under v2 directory (see deprecation + notice above). +- profiler: + - Fixed flakiness in tests. +- Various updates to autogenerated clients. + +## v0.57.0 + +- all: + - Update module dependency `google.golang.org/api` to `v0.21.0`. +- errorreporting: + - Add exported SetGoogleClientInfo wrappers to manual file. +- expr/v1alpha1: + - Deprecate client. This client will be removed in a future release. +- internal: + - Fix possible data race in TestTracer. + - Pin versions of tools used for generation. + - Correct the release levels for BigQuery APIs. + - Start generation osconfig v1. +- longrunning: + - Add exported SetGoogleClientInfo wrappers to manual file. +- monitoring: + - Stop generation of monitoring/apiv3 because of incoming breaking change. +- trace: + - Add exported SetGoogleClientInfo wrappers to manual file. +- Various updates to autogenerated clients. + +## v0.56.0 + +- secretmanager: + - add IAM helper +- profiler: + - try all us-west1 zones for integration tests +- internal: + - add config to generate webrisk v1 + - add repo and commit to buildcop invocation + - add recaptchaenterprise v1 generation config + - update microgenerator to v0.12.5 + - add datacatalog client + - start generating security center settings v1beta + - start generating osconfig agentendpoint v1 + - setup generation for bigquery/connection/v1beta1 +- all: + - increase continous testing timeout to 45m + - various updates to autogenerated clients. + +## v0.55.0 + +- Various updates to autogenerated clients. + +## v0.54.0 + +- all: + - remove unused golang.org/x/exp from mod file + - update godoc.org links to pkg.go.dev +- compute/metadata: + - use defaultClient when http.Client is nil + - remove subscribeClient +- iam: + - add support for v3 policy and IAM conditions +- Various updates to autogenerated clients. + +## v0.53.0 + +- all: most clients now use transport/grpc.DialPool rather than Dial (see #1777 for outliers). + - Connection pooling now does not use the deprecated (and soon to be removed) gRPC load balancer API. +- profiler: remove symbolization (drops support for go1.10) +- Various updates to autogenerated clients. + +## v0.52.0 + +- internal/gapicgen: multiple improvements related to library generation. +- compute/metadata: unset ResponseHeaderTimeout in defaultClient +- docs: fix link to KMS in README.md +- Various updates to autogenerated clients. + +## v0.51.0 + +- secretmanager: + - add IAM helper for generic resource IAM handle +- cloudbuild: + - migrate to microgen in a major version +- Various updates to autogenerated clients. + +## v0.50.0 + +- profiler: + - Support disabling CPU profile collection. + - Log when a profile creation attempt begins. +- compute/metadata: + - Fix panic on malformed URLs. + - InstanceName returns actual instance name. +- Various updates to autogenerated clients. + +## v0.49.0 + +- functions/metadata: + - Handle string resources in JSON unmarshaller. +- Various updates to autogenerated clients. + +## v0.48.0 + +- Various updates to autogenerated clients + +## v0.47.0 + +This release drops support for Go 1.9 and Go 1.10: we continue to officially +support Go 1.11, Go 1.12, and Go 1.13. + +- Various updates to autogenerated clients. +- Add cloudbuild/apiv1 client. + +## v0.46.3 + +This is an empty release that was created solely to aid in storage's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.2 + +This is an empty release that was created solely to aid in spanner's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.1 + +This is an empty release that was created solely to aid in firestore's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.46.0 + +- spanner: + - Retry "Session not found" for read-only transactions. + - Retry aborted PDMLs. +- spanner/spannertest: + - Fix a bug that was causing 0X-prefixed number to be parsed incorrectly. +- storage: + - Add HMACKeyOptions. + - Remove *REGIONAL from StorageClass documentation. Using MULTI_REGIONAL, + DURABLE_REDUCED_AVAILABILITY, and REGIONAL are no longer best practice + StorageClasses but they are still acceptable values. +- trace: + - Remove cloud.google.com/go/trace. Package cloud.google.com/go/trace has been + marked OBSOLETE for several years: it is now no longer provided. If you + relied on this package, please vendor it or switch to using + https://cloud.google.com/trace/docs/setup/go (which obsoleted it). + +## v0.45.1 + +This is an empty release that was created solely to aid in pubsub's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.45.0 + +- compute/metadata: + - Add Email method. +- storage: + - Fix duplicated retry logic. + - Add ReaderObjectAttrs.StartOffset. + - Support reading last N bytes of a file when a negative range is given, such + as `obj.NewRangeReader(ctx, -10, -1)`. + - Add HMACKey listing functionality. +- spanner/spannertest: + - Support primary keys with no columns. + - Fix MinInt64 parsing. + - Implement deletion of key ranges. + - Handle reads during a read-write transaction. + - Handle returning DATE values. +- pubsub: + - Fix Ack/Modack request size calculation. +- logging: + - Add auto-detection of monitored resources on GAE Standard. + +## v0.44.3 + +This is an empty release that was created solely to aid in bigtable's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.2 + +This is an empty release that was created solely to aid in bigquery's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.1 + +This is an empty release that was created solely to aid in datastore's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.44.0 + +- datastore: + - Interface elements whose underlying types are supported, are now supported. + - Reduce time to initial retry from 1s to 100ms. +- firestore: + - Add Increment transformation. +- storage: + - Allow emulator with STORAGE_EMULATOR_HOST. + - Add methods for HMAC key management. +- pubsub: + - Add PublishCount and PublishLatency measurements. + - Add DefaultPublishViews and DefaultSubscribeViews for convenience of + importing all views. + - Add add Subscription.PushConfig.AuthenticationMethod. +- spanner: + - Allow emulator usage with SPANNER_EMULATOR_HOST. + - Add cloud.google.com/go/spanner/spannertest, a spanner emulator. + - Add cloud.google.com/go/spanner/spansql which contains types and a parser + for the Cloud Spanner SQL dialect. +- asset: + - Add apiv1p2beta1 client. + +## v0.43.0 + +This is an empty release that was created solely to aid in logging's module +carve-out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## v0.42.0 + +- bigtable: + - Add an admin method to update an instance and clusters. + - Fix bttest regex matching behavior for alternations (things like `|a`). + - Expose BlockAllFilter filter. +- bigquery: + - Add Routines API support. +- storage: + - Add read-only Bucket.LocationType. +- logging: + - Add TraceSampled to Entry. + - Fix to properly extract {Trace, Span}Id from X-Cloud-Trace-Context. +- pubsub: + - Add Cloud Key Management to TopicConfig. + - Change ExpirationPolicy to optional.Duration. +- automl: + - Add apiv1beta1 client. +- iam: + - Fix compilation problem with iam/credentials/apiv1. + +## v0.41.0 + +- bigtable: + - Check results from PredicateFilter in bttest, which fixes certain false matches. +- profiler: + - debugLog checks user defined logging options before logging. +- spanner: + - PartitionedUpdates respect query parameters. + - StartInstance allows specifying cloud API access scopes. +- bigquery: + - Use empty slice instead of nil for ValueSaver, fixing an issue with zero-length, repeated, nested fields causing panics. +- firestore: + - Return same number of snapshots as doc refs (in the form of duplicate records) during GetAll. +- replay: + - Change references to IPv4 addresses to localhost, making replay compatible with IPv6. + +## v0.40.0 + +- all: + - Update to protobuf-golang v1.3.1. +- datastore: + - Attempt to decode GAE-encoded keys if initial decoding attempt fails. + - Support integer time conversion. +- pubsub: + - Add PublishSettings.BundlerByteLimit. If users receive pubsub.ErrOverflow, + this value should be adjusted higher. + - Use IPv6 compatible target in testutil. +- bigtable: + - Fix Latin-1 regexp filters in bttest, allowing \C. + - Expose PassAllFilter. +- profiler: + - Add log messages for slow path in start. + - Fix start to allow retry until success. +- firestore: + - Add admin client. +- containeranalysis: + - Add apiv1 client. +- grafeas: + - Add apiv1 client. + +## 0.39.0 + +- bigtable: + - Implement DeleteInstance in bttest. + - Return an error on invalid ReadRowsRequest.RowRange key ranges in bttest. +- bigquery: + - Move RequirePartitionFilter outside of TimePartioning. + - Expose models API. +- firestore: + - Allow array values in create and update calls. + - Add CollectionGroup method. +- pubsub: + - Add ExpirationPolicy to Subscription. +- storage: + - Add V4 signing. +- rpcreplay: + - Match streams by first sent request. This further improves rpcreplay's + ability to distinguish streams. +- httpreplay: + - Set up Man-In-The-Middle config only once. This should improve proxy + creation when multiple proxies are used in a single process. + - Remove error on empty Content-Type, allowing requests with no Content-Type + header but a non-empty body. +- all: + - Fix an edge case bug in auto-generated library pagination by properly + propagating pagetoken. + +## 0.38.0 + +This update includes a substantial reduction in our transitive dependency list +by way of updating to opencensus@v0.21.0. + +- spanner: + - Error implements GRPCStatus, allowing status.Convert. +- bigtable: + - Fix a bug in bttest that prevents single column queries returning results + that match other filters. + - Remove verbose retry logging. +- logging: + - Ensure RequestUrl has proper UTF-8, removing the need for users to wrap and + rune replace manually. +- recaptchaenterprise: + - Add v1beta1 client. +- phishingprotection: + - Add v1beta1 client. + +## 0.37.4 + +This patch releases re-builds the go.sum. This was not possible in the +previous release. + +- firestore: + - Add sentinel value DetectProjectID for auto-detecting project ID. + - Add OpenCensus tracing for public methods. + - Marked stable. All future changes come with a backwards compatibility + guarantee. + - Removed firestore/apiv1beta1. All users relying on this low-level library + should migrate to firestore/apiv1. Note that most users should use the + high-level firestore package instead. +- pubsub: + - Allow large messages in synchronous pull case. + - Cap bundler byte limit. This should prevent OOM conditions when there are + a very large number of message publishes occurring. +- storage: + - Add ETag to BucketAttrs and ObjectAttrs. +- datastore: + - Removed some non-sensical OpenCensus traces. +- webrisk: + - Add v1 client. +- asset: + - Add v1 client. +- cloudtasks: + - Add v2 client. + +## 0.37.3 + +This patch release removes github.com/golang/lint from the transitive +dependency list, resolving `go get -u` problems. + +Note: this release intentionally has a broken go.sum. Please use v0.37.4. + +## 0.37.2 + +This patch release is mostly intended to bring in v0.3.0 of +google.golang.org/api, which fixes a GCF deployment issue. + +Note: we had to-date accidentally marked Redis as stable. In this release, we've +fixed it by downgrading its documentation to alpha, as it is in other languages +and docs. + +- all: + - Document context in generated libraries. + +## 0.37.1 + +Small go.mod version bumps to bring in v0.2.0 of google.golang.org/api, which +introduces a new oauth2 url. + +## 0.37.0 + +- spanner: + - Add BatchDML method. + - Reduced initial time between retries. +- bigquery: + - Produce better error messages for InferSchema. + - Add logical type control for avro loads. + - Add support for the GEOGRAPHY type. +- datastore: + - Add sentinel value DetectProjectID for auto-detecting project ID. + - Allow flatten tag on struct pointers. + - Fixed a bug that caused queries to panic with invalid queries. Instead they + will now return an error. +- profiler: + - Add ability to override GCE zone and instance. +- pubsub: + - BEHAVIOR CHANGE: Refactor error code retry logic. RPCs should now more + consistently retry specific error codes based on whether they're idempotent + or non-idempotent. +- httpreplay: Fixed a bug when a non-GET request had a zero-length body causing + the Content-Length header to be dropped. +- iot: + - Add new apiv1 client. +- securitycenter: + - Add new apiv1 client. +- cloudscheduler: + - Add new apiv1 client. + +## 0.36.0 + +- spanner: + - Reduce minimum retry backoff from 1s to 100ms. This makes time between + retries much faster and should improve latency. +- storage: + - Add support for Bucket Policy Only. +- kms: + - Add ResourceIAM helper method. + - Deprecate KeyRingIAM and CryptoKeyIAM. Please use ResourceIAM. +- firestore: + - Switch from v1beta1 API to v1 API. + - Allow emulator with FIRESTORE_EMULATOR_HOST. +- bigquery: + - Add NumLongTermBytes to Table. + - Add TotalBytesProcessedAccuracy to QueryStatistics. +- irm: + - Add new v1alpha2 client. +- talent: + - Add new v4beta1 client. +- rpcreplay: + - Fix connection to work with grpc >= 1.17. + - It is now required for an actual gRPC server to be running for Dial to + succeed. + +## 0.35.1 + +- spanner: + - Adds OpenCensus views back to public API. + +## v0.35.0 + +- all: + - Add go.mod and go.sum. + - Switch usage of gax-go to gax-go/v2. +- bigquery: + - Fix bug where time partitioning could not be removed from a table. + - Fix panic that occurred with empty query parameters. +- bttest: + - Fix bug where deleted rows were returned by ReadRows. +- bigtable/emulator: + - Configure max message size to 256 MiB. +- firestore: + - Allow non-transactional queries in transactions. + - Allow StartAt/EndBefore on direct children at any depth. + - QuerySnapshotIterator.Stop may be called in an error state. + - Fix bug the prevented reset of transaction write state in between retries. +- functions/metadata: + - Make Metadata.Resource a pointer. +- logging: + - Make SpanID available in logging.Entry. +- metadata: + - Wrap !200 error code in a typed err. +- profiler: + - Add function to check if function name is within a particular file in the + profile. + - Set parent field in create profile request. + - Return kubernetes client to start cluster, so client can be used to poll + cluster. + - Add function for checking if filename is in profile. +- pubsub: + - Fix bug where messages expired without an initial modack in + synchronous=true mode. + - Receive does not retry ResourceExhausted errors. +- spanner: + - client.Close now cancels existing requests and should be much faster for + large amounts of sessions. + - Correctly allow MinOpened sessions to be spun up. + +## v0.34.0 + +- functions/metadata: + - Switch to using JSON in context. + - Make Resource a value. +- vision: Fix ProductSearch return type. +- datastore: Add an example for how to handle MultiError. + +## v0.33.1 + +- compute: Removes an erroneously added go.mod. +- logging: Populate source location in fromLogEntry. + +## v0.33.0 + +- bttest: + - Add support for apply_label_transformer. +- expr: + - Add expr library. +- firestore: + - Support retrieval of missing documents. +- kms: + - Add IAM methods. +- pubsub: + - Clarify extension documentation. +- scheduler: + - Add v1beta1 client. +- vision: + - Add product search helper. + - Add new product search client. + +## v0.32.0 + +Note: This release is the last to support Go 1.6 and 1.8. + +- bigquery: + - Add support for removing an expiration. + - Ignore NeverExpire in Table.Create. + - Validate table expiration time. +- cbt: + - Add note about not supporting arbitrary bytes. +- datastore: + - Align key checks. +- firestore: + - Return an error when using Start/End without providing values. +- pubsub: + - Add pstest Close method. + - Clarify MaxExtension documentation. +- securitycenter: + - Add v1beta1 client. +- spanner: + - Allow nil in mutations. + - Improve doc of SessionPoolConfig.MaxOpened. + - Increase session deletion timeout from 5s to 15s. + +## v0.31.0 + +- bigtable: + - Group mutations across multiple requests. +- bigquery: + - Link to bigquery troubleshooting errors page in bigquery.Error comment. +- cbt: + - Fix go generate command. + - Document usage of both maxage + maxversions. +- datastore: + - Passing nil keys results in ErrInvalidKey. +- firestore: + - Clarify what Document.DataTo does with untouched struct fields. +- profile: + - Validate service name in agent. +- pubsub: + - Fix deadlock with pstest and ctx.Cancel. + - Fix a possible deadlock in pstest. +- trace: + - Update doc URL with new fragment. + +Special thanks to @fastest963 for going above and beyond helping us to debug +hard-to-reproduce Pub/Sub issues. + +## v0.30.0 + +- spanner: DML support added. See https://godoc.org/cloud.google.com/go/spanner#hdr-DML_and_Partitioned_DML for more information. +- bigtable: bttest supports row sample filter. +- functions: metadata package added for accessing Cloud Functions resource metadata. + +## v0.29.0 + +- bigtable: + - Add retry to all idempotent RPCs. + - cbt supports complex GC policies. + - Emulator supports arbitrary bytes in regex filters. +- firestore: Add ArrayUnion and ArrayRemove. +- logging: Add the ContextFunc option to supply the context used for + asynchronous RPCs. +- profiler: Ignore NotDefinedError when fetching the instance name +- pubsub: + - BEHAVIOR CHANGE: Receive doesn't retry if an RPC returns codes.Cancelled. + - BEHAVIOR CHANGE: Receive retries on Unavailable intead of returning. + - Fix deadlock. + - Restore Ack/Nack/Modacks metrics. + - Improve context handling in iterator. + - Implement synchronous mode for Receive. + - pstest: add Pull. +- spanner: Add a metric for the number of sessions currently opened. +- storage: + - Canceling the context releases all resources. + - Add additional RetentionPolicy attributes. +- vision/apiv1: Add LocalizeObjects method. + +## v0.28.0 + +- bigtable: + - Emulator returns Unimplemented for snapshot RPCs. +- bigquery: + - Support zero-length repeated, nested fields. +- cloud assets: + - Add v1beta client. +- datastore: + - Don't nil out transaction ID on retry. +- firestore: + - BREAKING CHANGE: When watching a query with Query.Snapshots, QuerySnapshotIterator.Next + returns a QuerySnapshot which contains read time, result size, change list and the DocumentIterator + (previously, QuerySnapshotIterator.Next returned just the DocumentIterator). See: https://godoc.org/cloud.google.com/go/firestore#Query.Snapshots. + - Add array-contains operator. +- IAM: + - Add iam/credentials/apiv1 client. +- pubsub: + - Canceling the context passed to Subscription.Receive causes Receive to return when + processing finishes on all messages currently in progress, even if new messages are arriving. +- redis: + - Add redis/apiv1 client. +- storage: + - Add Reader.Attrs. + - Deprecate several Reader getter methods: please use Reader.Attrs for these instead. + - Add ObjectHandle.Bucket and ObjectHandle.Object methods. + +## v0.27.0 + +- bigquery: + - Allow modification of encryption configuration and partitioning options to a table via the Update call. + - Add a SchemaFromJSON function that converts a JSON table schema. +- bigtable: + - Restore cbt count functionality. +- containeranalysis: + - Add v1beta client. +- spanner: + - Fix a case where an iterator might not be closed correctly. +- storage: + - Add ServiceAccount method https://godoc.org/cloud.google.com/go/storage#Client.ServiceAccount. + - Add a method to Reader that returns the parsed value of the Last-Modified header. + +## v0.26.0 + +- bigquery: + - Support filtering listed jobs by min/max creation time. + - Support data clustering (https://godoc.org/cloud.google.com/go/bigquery#Clustering). + - Include job creator email in Job struct. +- bigtable: + - Add `RowSampleFilter`. + - emulator: BREAKING BEHAVIOR CHANGE: Regexps in row, family, column and value filters + must match the entire target string to succeed. Previously, the emulator was + succeeding on partial matches. + NOTE: As of this release, this change only affects the emulator when run + from this repo (bigtable/cmd/emulator/cbtemulator.go). The version launched + from `gcloud` will be updated in a subsequent `gcloud` release. +- dataproc: Add apiv1beta2 client. +- datastore: Save non-nil pointer fields on omitempty. +- logging: populate Entry.Trace from the HTTP X-Cloud-Trace-Context header. +- logging/logadmin: Support writer_identity and include_children. +- pubsub: + - Support labels on topics and subscriptions. + - Support message storage policy for topics. + - Use the distribution of ack times to determine when to extend ack deadlines. + The only user-visible effect of this change should be that programs that + call only `Subscription.Receive` need no IAM permissions other than `Pub/Sub + Subscriber`. +- storage: + - Support predefined ACLs. + - Support additional ACL fields other than Entity and Role. + - Support bucket websites. + - Support bucket logging. + + +## v0.25.0 + +- Added [Code of Conduct](https://github.com/googleapis/google-cloud-go/blob/master/CODE_OF_CONDUCT.md) +- bigtable: + - cbt: Support a GC policy of "never". +- errorreporting: + - Support User. + - Close now calls Flush. + - Use OnError (previously ignored). + - Pass through the RPC error as-is to OnError. +- httpreplay: A tool for recording and replaying HTTP requests + (for the bigquery and storage clients in this repo). +- kms: v1 client added +- logging: add SourceLocation to Entry. +- storage: improve CRC checking on read. + +## v0.24.0 + +- bigquery: Support for the NUMERIC type. +- bigtable: + - cbt: Optionally specify columns for read/lookup + - Support instance-level administration. +- oslogin: New client for the OS Login API. +- pubsub: + - The package is now stable. There will be no further breaking changes. + - Internal changes to improve Subscription.Receive behavior. +- storage: Support updating bucket lifecycle config. +- spanner: Support struct-typed parameter bindings. +- texttospeech: New client for the Text-to-Speech API. + +## v0.23.0 + +- bigquery: Add DDL stats to query statistics. +- bigtable: + - cbt: Add cells-per-column limit for row lookup. + - cbt: Make it possible to combine read filters. +- dlp: v2beta2 client removed. Use the v2 client instead. +- firestore, spanner: Fix compilation errors due to protobuf changes. + +## v0.22.0 + +- bigtable: + - cbt: Support cells per column limit for row read. + - bttest: Correctly handle empty RowSet. + - Fix ReadModifyWrite operation in emulator. + - Fix API path in GetCluster. + +- bigquery: + - BEHAVIOR CHANGE: Retry on 503 status code. + - Add dataset.DeleteWithContents. + - Add SchemaUpdateOptions for query jobs. + - Add Timeline to QueryStatistics. + - Add more stats to ExplainQueryStage. + - Support Parquet data format. + +- datastore: + - Support omitempty for times. + +- dlp: + - **BREAKING CHANGE:** Remove v1beta1 client. Please migrate to the v2 client, + which is now out of beta. + - Add v2 client. + +- firestore: + - BEHAVIOR CHANGE: Treat set({}, MergeAll) as valid. + +- iam: + - Support JWT signing via SignJwt callopt. + +- profiler: + - BEHAVIOR CHANGE: PollForSerialOutput returns an error when context.Done. + - BEHAVIOR CHANGE: Increase the initial backoff to 1 minute. + - Avoid returning empty serial port output. + +- pubsub: + - BEHAVIOR CHANGE: Don't backoff during next retryable error once stream is healthy. + - BEHAVIOR CHANGE: Don't backoff on EOF. + - pstest: Support Acknowledge and ModifyAckDeadline RPCs. + +- redis: + - Add v1 beta Redis client. + +- spanner: + - Support SessionLabels. + +- speech: + - Add api v1 beta1 client. + +- storage: + - BEHAVIOR CHANGE: Retry reads when retryable error occurs. + - Fix delete of object in requester-pays bucket. + - Support KMS integration. + +## v0.21.0 + +- bigquery: + - Add OpenCensus tracing. + +- firestore: + - **BREAKING CHANGE:** If a document does not exist, return a DocumentSnapshot + whose Exists method returns false. DocumentRef.Get and Transaction.Get + return the non-nil DocumentSnapshot in addition to a NotFound error. + **DocumentRef.GetAll and Transaction.GetAll return a non-nil + DocumentSnapshot instead of nil.** + - Add DocumentIterator.Stop. **Call Stop whenever you are done with a + DocumentIterator.** + - Added Query.Snapshots and DocumentRef.Snapshots, which provide realtime + notification of updates. See https://cloud.google.com/firestore/docs/query-data/listen. + - Canceling an RPC now always returns a grpc.Status with codes.Canceled. + +- spanner: + - Add `CommitTimestamp`, which supports inserting the commit timestamp of a + transaction into a column. + +## v0.20.0 + +- bigquery: Support SchemaUpdateOptions for load jobs. + +- bigtable: + - Add SampleRowKeys. + - cbt: Support union, intersection GCPolicy. + - Retry admin RPCS. + - Add trace spans to retries. + +- datastore: Add OpenCensus tracing. + +- firestore: + - Fix queries involving Null and NaN. + - Allow Timestamp protobuffers for time values. + +- logging: Add a WriteTimeout option. + +- spanner: Support Batch API. + +- storage: Add OpenCensus tracing. + +## v0.19.0 + +- bigquery: + - Support customer-managed encryption keys. + +- bigtable: + - Improved emulator support. + - Support GetCluster. + +- datastore: + - Add general mutations. + - Support pointer struct fields. + - Support transaction options. + +- firestore: + - Add Transaction.GetAll. + - Support document cursors. + +- logging: + - Support concurrent RPCs to the service. + - Support per-entry resources. + +- profiler: + - Add config options to disable heap and thread profiling. + - Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set. + +- pubsub: + - BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the + callback returns). + - Add SubscriptionInProject. + - Add OpenCensus instrumentation for streaming pull. + +- storage: + - Support CORS. + +## v0.18.0 + +- bigquery: + - Marked stable. + - Schema inference of nullable fields supported. + - Added TimePartitioning to QueryConfig. + +- firestore: Data provided to DocumentRef.Set with a Merge option can contain + Delete sentinels. + +- logging: Clients can accept parent resources other than projects. + +- pubsub: + - pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome. + - Support updating more subscription metadata: AckDeadline, + RetainAckedMessages and RetentionDuration. + +- oslogin/apiv1beta: New client for the Cloud OS Login API. + +- rpcreplay: A package for recording and replaying gRPC traffic. + +- spanner: + - Add a ReadWithOptions that supports a row limit, as well as an index. + - Support query plan and execution statistics. + - Added [OpenCensus](http://opencensus.io) support. + +- storage: Clarify checksum validation for gzipped files (it is not validated + when the file is served uncompressed). + + +## v0.17.0 + +- firestore BREAKING CHANGES: + - Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update. + Change + `docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})` + to + `docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})` + + Change + `docref.UpdateStruct(ctx, []string{"Field"}, aStruct)` + to + `docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})` + - Rename MergePaths to Merge; require args to be FieldPaths + - A value stored as an integer can be read into a floating-point field, and vice versa. +- bigtable/cmd/cbt: + - Support deleting a column. + - Add regex option for row read. +- spanner: Mark stable. +- storage: + - Add Reader.ContentEncoding method. + - Fix handling of SignedURL headers. +- bigquery: + - If Uploader.Put is called with no rows, it returns nil without making a + call. + - Schema inference supports the "nullable" option in struct tags for + non-required fields. + - TimePartitioning supports "Field". + + +## v0.16.0 + +- Other bigquery changes: + - `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE). + - UseStandardSQL is deprecated; set UseLegacySQL to true if you need + Legacy SQL. + - Uploader.Put will generate a random insert ID if you do not provide one. + - Support time partitioning for load jobs. + - Support dry-run queries. + - A `Job` remembers its last retrieved status. + - Support retrieving job configuration. + - Support labels for jobs and tables. + - Support dataset access lists. + - Improve support for external data sources, including data from Bigtable and + Google Sheets, and tables with external data. + - Support updating a table's view configuration. + - Fix uploading civil times with nanoseconds. + +- storage: + - Support PubSub notifications. + - Support Requester Pays buckets. + +- profiler: Support goroutine and mutex profile types. + +## v0.15.0 + +- firestore: beta release. See the + [announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html). + +- errorreporting: The existing package has been redesigned. + +- errors: This package has been removed. Use errorreporting. + + +## v0.14.0 + +- bigquery BREAKING CHANGES: + - Standard SQL is the default for queries and views. + - `Table.Create` takes `TableMetadata` as a second argument, instead of + options. + - `Dataset.Create` takes `DatasetMetadata` as a second argument. + - `DatasetMetadata` field `ID` renamed to `FullID` + - `TableMetadata` field `ID` renamed to `FullID` + +- Other bigquery changes: + - The client will append a random suffix to a provided job ID if you set + `AddJobIDSuffix` to true in a job config. + - Listing jobs is supported. + - Better retry logic. + +- vision, language, speech: clients are now stable + +- monitoring: client is now beta + +- profiler: + - Rename InstanceName to Instance, ZoneName to Zone + - Auto-detect service name and version on AppEngine. + +## v0.13.0 + +- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these + options to continue using Legacy SQL after the client switches its default + to Standard SQL. + +- bigquery: Support for updating dataset labels. + +- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other + than the client's. DatasetsInProject is no longer needed and is deprecated. + +- bigtable: Fail ListInstances when any zones fail. + +- spanner: support decoding of slices of basic types (e.g. []string, []int64, + etc.) + +- logging/logadmin: UpdateSink no longer creates a sink if it is missing + (actually a change to the underlying service, not the client) + +- profiler: Service and ServiceVersion replace Target in Config. + +## v0.12.0 + +- pubsub: Subscription.Receive now uses streaming pull. + +- pubsub: add Client.TopicInProject to access topics in a different project + than the client. + +- errors: renamed errorreporting. The errors package will be removed shortly. + +- datastore: improved retry behavior. + +- bigquery: support updates to dataset metadata, with etags. + +- bigquery: add etag support to Table.Update (BREAKING: etag argument added). + +- bigquery: generate all job IDs on the client. + +- storage: support bucket lifecycle configurations. + + +## v0.11.0 + +- Clients for spanner, pubsub and video are now in beta. + +- New client for DLP. + +- spanner: performance and testing improvements. + +- storage: requester-pays buckets are supported. + +- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements. + +- pubsub: bug fixes and other minor improvements + +## v0.10.0 + +- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update. + +- pubsub: Subscription.Receive now runs concurrently for higher throughput. + +- vision: cloud.google.com/go/vision is deprecated. Use +cloud.google.com/go/vision/apiv1 instead. + +- translation: now stable. + +- trace: several changes to the surface. See the link below. + +### Code changes required from v0.9.0 + +- pubsub: Replace + + ``` + sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"}) + ``` + + with + + ``` + sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{ + PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"}, + }) + ``` + +- trace: traceGRPCServerInterceptor will be provided from *trace.Client. +Given an initialized `*trace.Client` named `tc`, instead of + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc))) + ``` + + write + + ``` + s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor())) + ``` + +- trace trace.GRPCClientInterceptor will also provided from *trace.Client. +Instead of + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor())) + ``` + + write + + ``` + conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())) + ``` + +- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC +interceptor as a dial option as shown below when initializing Cloud package +clients: + + ``` + c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))) + if err != nil { + ... + } + ``` + + +## v0.9.0 + +- Breaking changes to some autogenerated clients. +- rpcreplay package added. + +## v0.8.0 + +- profiler package added. +- storage: + - Retry Objects.Insert call. + - Add ProgressFunc to WRiter. +- pubsub: breaking changes: + - Publish is now asynchronous ([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)). + - Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)). + - Message.Done replaced with Message.Ack and Message.Nack. + +## v0.7.0 + +- Release of a client library for Spanner. See +the +[blog +post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). +Note that although the Spanner service is beta, the Go client library is alpha. + +## v0.6.0 + +- Beta release of BigQuery, DataStore, Logging and Storage. See the +[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). + +- bigquery: + - struct support. Read a row directly into a struct with +`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. +You can also use field tags. See the [package documentation][cloud-bigquery-ref] +for details. + + - The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + + - Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + + - Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + + - The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +## v0.5.0 + +- bigquery: + - The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + - Support for query parameters. + - Support deleting a dataset. + - Values from INTEGER columns will now be returned as int64, not int. This + will avoid errors arising from large values on 32-bit systems. +- datastore: + - Nested Go structs encoded as Entity values, instead of a +flattened list of the embedded struct's fields. This means that you may now have twice-nested slices, eg. + ```go + type State struct { + Cities []struct{ + Populations []int + } + } + ``` + See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + - Contexts no longer hold namespaces; instead you must set a key's namespace + explicitly. Also, key functions have been changed and renamed. + - The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + - All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + - As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + - `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + - `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + - The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + - The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + - See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +## v0.4.0 + +- bigquery: + -`NewGCSReference` is now a function, not a method on `Client`. + - `Table.LoaderFrom` now accepts a `ReaderSource`, enabling + loading data into a table from a file or any `io.Reader`. + * Client.Table and Client.OpenTable have been removed. + Replace + ```go + client.OpenTable("project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table") + ``` + + * Client.CreateTable has been removed. + Replace + ```go + client.CreateTable(ctx, "project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table").Create(ctx) + ``` + + * Dataset.ListTables have been replaced with Dataset.Tables. + Replace + ```go + tables, err := ds.ListTables(ctx) + ``` + with + ```go + it := ds.Tables(ctx) + for { + table, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use table. + } + ``` + + * Client.Read has been replaced with Job.Read, Table.Read and Query.Read. + Replace + ```go + it, err := client.Read(ctx, job) + ``` + with + ```go + it, err := job.Read(ctx) + ``` + and similarly for reading from tables or queries. + + * The iterator returned from the Read methods is now named RowIterator. Its + behavior is closer to the other iterators in these libraries. It no longer + supports the Schema method; see the next item. + Replace + ```go + for it.Next(ctx) { + var vals ValueList + if err := it.Get(&vals); err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + if err := it.Err(); err != nil { + // TODO: Handle error. + } + ``` + with + ``` + for { + var vals ValueList + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + ``` + Instead of the `RecordsPerRequest(n)` option, write + ```go + it.PageInfo().MaxSize = n + ``` + Instead of the `StartIndex(i)` option, write + ```go + it.StartIndex = i + ``` + + * ValueLoader.Load now takes a Schema in addition to a slice of Values. + Replace + ```go + func (vl *myValueLoader) Load(v []bigquery.Value) + ``` + with + ```go + func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) + ``` + + + * Table.Patch is replace by Table.Update. + Replace + ```go + p := table.Patch() + p.Description("new description") + metadata, err := p.Apply(ctx) + ``` + with + ```go + metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "new description", + }) + ``` + + * Client.Copy is replaced by separate methods for each of its four functions. + All options have been replaced by struct fields. + + * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. + + Replace + ```go + client.Copy(ctx, table, gcsRef) + ``` + with + ```go + table.LoaderFrom(gcsRef).Run(ctx) + ``` + Instead of passing options to Copy, set fields on the Loader: + ```go + loader := table.LoaderFrom(gcsRef) + loader.WriteDisposition = bigquery.WriteTruncate + ``` + + * To extract data from a table into Google Cloud Storage, use + Table.ExtractorTo. Set fields on the returned Extractor instead of + passing options. + + Replace + ```go + client.Copy(ctx, gcsRef, table) + ``` + with + ```go + table.ExtractorTo(gcsRef).Run(ctx) + ``` + + * To copy data into a table from one or more other tables, use + Table.CopierFrom. Set fields on the returned Copier instead of passing options. + + Replace + ```go + client.Copy(ctx, dstTable, srcTable) + ``` + with + ```go + dst.Table.CopierFrom(srcTable).Run(ctx) + ``` + + * To start a query job, create a Query and call its Run method. Set fields + on the query instead of passing options. + + Replace + ```go + client.Copy(ctx, table, query) + ``` + with + ```go + query.Run(ctx) + ``` + + * Table.NewUploader has been renamed to Table.Uploader. Instead of options, + configure an Uploader by setting its fields. + Replace + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + ``` + with + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + u.IgnoreUnknownValues = true + ``` + +- pubsub: remove `pubsub.Done`. Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + +## v0.3.0 + +- storage: + * AdminClient replaced by methods on Client. + Replace + ```go + adminClient.CreateBucket(ctx, bucketName, attrs) + ``` + with + ```go + client.Bucket(bucketName).Create(ctx, projectID, attrs) + ``` + + * BucketHandle.List replaced by BucketHandle.Objects. + Replace + ```go + for query != nil { + objs, err := bucket.List(d.ctx, query) + if err != nil { ... } + query = objs.Next + for _, obj := range objs.Results { + fmt.Println(obj) + } + } + ``` + with + ```go + iter := bucket.Objects(d.ctx, query) + for { + obj, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { ... } + fmt.Println(obj) + } + ``` + (The `iterator` package is at `google.golang.org/api/iterator`.) + + Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. + + Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. + + + * ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, nil) + ``` + with + ```go + attrs, err := dst.CopierFrom(src).Run(ctx) + ``` + + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + c := dst.CopierFrom(src) + c.ContextType = "text/html" + attrs, err := c.Run(ctx) + ``` + + * ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. + Replace + ```go + attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) + ``` + with + ```go + attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) + ``` + + * ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. + Replace + ```go + attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) + ``` + + * ObjectHandle.WithConditions replaced by ObjectHandle.If. + Replace + ```go + obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) + ``` + with + ```go + obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) + ``` + + Replace + ```go + obj.WithConditions(storage.IfGenerationMatch(0)) + ``` + with + ```go + obj.If(storage.Conditions{DoesNotExist: true}) + ``` + + * `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). + +- Package preview/logging deleted. Use logging instead. + +## v0.2.0 + +- Logging client replaced with preview version (see below). + +- New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +- Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. + diff --git a/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..8fd1bc9c2 --- /dev/null +++ b/vendor/cloud.google.com/go/CODE_OF_CONDUCT.md @@ -0,0 +1,44 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 000000000..109ca5c43 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,250 @@ +# Contributing + +1. [File an issue](https://github.com/googleapis/google-cloud-go/issues/new/choose). + The issue will be used to discuss the bug or feature and should be created + before sending a CL. + +1. [Install Go](https://golang.org/dl/). + 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) + is in your `PATH`. + 1. Check it's working by running `go version`. + * If it doesn't work, check the install location, usually + `/usr/local/go`, is on your `PATH`. + +1. Sign one of the +[contributor license agreements](#contributor-license-agreements) below. + +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` + +1. Change into the checked out source: + `cd google-cloud-go` + +1. Fork the repo. + +1. Set your fork as a remote: + `git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git` + +1. Make changes (see [Formatting](#formatting) and [Style](#style)), commit to + your fork. + + Commit messages should follow the + [Go project style](https://github.com/golang/go/wiki/CommitMessage). For example: + ``` + functions: add gophers codelab + ``` + +1. Send a pull request with your changes. + +1. A maintainer will review the pull request and make comments. + + Prefer adding additional commits over amending and force-pushing since it can + be difficult to follow code reviews when the commit history changes. + + Commits will be squashed when they're merged. + +## Integration Tests + +In addition to the unit tests, you may run the integration test suite. These +directions describe setting up your environment to run integration tests for +_all_ packages: note that many of these instructions may be redundant if you +intend only to run integration tests on a single package. + +#### GCP Setup + +To run the integrations tests, creation and configuration of two projects in +the Google Developers Console is required: one specifically for Firestore +integration tests, and another for all other integration tests. We'll refer to +these projects as "general project" and "Firestore project". + +After creating each project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount) +for each project. Ensure the project-level **Owner** +[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to +each service account. During the creation of the service account, you should +download the JSON credential file for use later. + +Next, ensure the following APIs are enabled in the general project: + +- BigQuery API +- BigQuery Data Transfer API +- Cloud Dataproc API +- Cloud Dataproc Control API Private +- Cloud Datastore API +- Cloud Firestore API +- Cloud Key Management Service (KMS) API +- Cloud Natural Language API +- Cloud OS Login API +- Cloud Pub/Sub API +- Cloud Resource Manager API +- Cloud Spanner API +- Cloud Speech API +- Cloud Translation API +- Cloud Video Intelligence API +- Cloud Vision API +- Compute Engine API +- Compute Engine Instance Group Manager API +- Container Registry API +- Firebase Rules API +- Google Cloud APIs +- Google Cloud Deployment Manager V2 API +- Google Cloud SQL +- Google Cloud Storage +- Google Cloud Storage JSON API +- Google Compute Engine Instance Group Updater API +- Google Compute Engine Instance Groups API +- Kubernetes Engine API +- Stackdriver Error Reporting API + +Next, create a Datastore database in the general project, and a Firestore +database in the Firestore project. + +Finally, in the general project, create an API key for the translate API: + +- Go to GCP Developer Console. +- Navigate to APIs & Services > Credentials. +- Click Create Credentials > API Key. +- Save this key for use in `GCLOUD_TESTS_API_KEY` as described below. + +#### Local Setup + +Once the two projects are created and configured, set the following environment +variables: + +- `GCLOUD_TESTS_GOLANG_PROJECT_ID`: Developers Console project's ID (e.g. +bamboo-shift-455) for the general project. +- `GCLOUD_TESTS_GOLANG_KEY`: The path to the JSON key file of the general +project's service account. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID`: Developers Console project's ID +(e.g. doorway-cliff-677) for the Firestore project. +- `GCLOUD_TESTS_GOLANG_FIRESTORE_KEY`: The path to the JSON key file of the +Firestore project's service account. +- `GCLOUD_TESTS_GOLANG_KEYRING`: The full name of the keyring for the tests, +in the form +"projects/P/locations/L/keyRings/R". The creation of this is described below. +- `GCLOUD_TESTS_API_KEY`: API key for using the Translate API. +- `GCLOUD_TESTS_GOLANG_ZONE`: Compute Engine zone. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it to +create some resources used in integration tests. + +From the project's root directory: + +``` sh +# Sets the default project in your env. +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticates the gcloud tool with your account. +$ gcloud auth login + +# Create the indexes used in the datastore integration tests. +$ gcloud datastore indexes create datastore/testdata/index.yaml + +# Creates a Google Cloud storage bucket with the same name as your test project, +# and with the Stackdriver Logging service account as owner, for the sink +# integration tests in logging. +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Creates a PubSub topic for integration tests of storage notifications. +$ gcloud beta pubsub topics create go-storage-notification-test +# Next, go to the Pub/Sub dashboard in GCP console. Authorize the user +# "service-@gs-project-accounts.iam.gserviceaccount.com" +# as a publisher to that topic. + +# Creates a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 10 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to +# delete the instance after testing with 'gcloud beta spanner instances delete'. + +$ export MY_KEYRING=some-keyring-name +$ export MY_LOCATION=global +# Creates a KMS keyring, in the same location as the default location for your +# project's buckets. +$ gcloud kms keyrings create $MY_KEYRING --location $MY_LOCATION +# Creates two keys in the keyring, named key1 and key2. +$ gcloud kms keys create key1 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +$ gcloud kms keys create key2 --keyring $MY_KEYRING --location $MY_LOCATION --purpose encryption +# Sets the GCLOUD_TESTS_GOLANG_KEYRING environment variable. +$ export GCLOUD_TESTS_GOLANG_KEYRING=projects/$GCLOUD_TESTS_GOLANG_PROJECT_ID/locations/$MY_LOCATION/keyRings/$MY_KEYRING +# Authorizes Google Cloud Storage to encrypt and decrypt using key1. +gsutil kms authorize -p $GCLOUD_TESTS_GOLANG_PROJECT_ID -k $GCLOUD_TESTS_GOLANG_KEYRING/cryptoKeys/key1 +``` + +#### Running + +Once you've done the necessary setup, you can run the integration tests by +running: + +``` sh +$ go test -v cloud.google.com/go/... +``` + +#### Replay + +Some packages can record the RPCs during integration tests to a file for +subsequent replay. To record, pass the `-record` flag to `go test`. The +recording will be saved to the _package_`.replay` file. To replay integration +tests from a saved recording, the replay file must be present, the `-short` +flag must be passed to `go test`, and the `GCLOUD_TESTS_GOLANG_ENABLE_REPLAY` +environment variable must have a non-empty value. + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your +work**, then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 000000000..b115812c2 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,178 @@ +# Google Cloud Client Libraries for Go + +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go) + +Go packages for [Google Cloud Platform](https://cloud.google.com) services. + +``` go +import "cloud.google.com/go" +``` + +To install the packages on your system, *do not clone the repo*. Instead: + +1. Change to your project directory: + + ``` + cd /my/cloud/project + ``` +1. Get the package you want to use. Some products have their own module, so it's + best to `go get` the package(s) you want to use: + + ``` + $ go get cloud.google.com/go/firestore # Replace with the package you want to use. + ``` + +**NOTE:** Some of these packages are under development, and may occasionally +make backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + +## Supported APIs + +Google API | Status | Package +------------------------------------------------|--------------|----------------------------------------------------------- +[Asset][cloud-asset] | stable | [`cloud.google.com/go/asset/apiv1`](https://pkg.go.dev/cloud.google.com/go/asset/v1beta) +[Automl][cloud-automl] | stable | [`cloud.google.com/go/automl/apiv1`](https://pkg.go.dev/cloud.google.com/go/automl/apiv1) +[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) +[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`](https://pkg.go.dev/cloud.google.com/go/bigtable) +[Cloudbuild][cloud-build] | stable | [`cloud.google.com/go/cloudbuild/apiv1`](https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1) +[Cloudtasks][cloud-tasks] | stable | [`cloud.google.com/go/cloudtasks/apiv2`](https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2) +[Container][cloud-container] | stable | [`cloud.google.com/go/container/apiv1`](https://pkg.go.dev/cloud.google.com/go/container/apiv1) +[ContainerAnalysis][cloud-containeranalysis] | beta | [`cloud.google.com/go/containeranalysis/apiv1`](https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1) +[Dataproc][cloud-dataproc] | stable | [`cloud.google.com/go/dataproc/apiv1`](https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1) +[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`](https://pkg.go.dev/cloud.google.com/go/datastore) +[Debugger][cloud-debugger] | stable | [`cloud.google.com/go/debugger/apiv2`](https://pkg.go.dev/cloud.google.com/go/debugger/apiv2) +[Dialogflow][cloud-dialogflow] | stable | [`cloud.google.com/go/dialogflow/apiv2`](https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2) +[Data Loss Prevention][cloud-dlp] | stable | [`cloud.google.com/go/dlp/apiv2`](https://pkg.go.dev/cloud.google.com/go/dlp/apiv2) +[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`](https://pkg.go.dev/cloud.google.com/go/errorreporting) +[Firestore][cloud-firestore] | stable | [`cloud.google.com/go/firestore`](https://pkg.go.dev/cloud.google.com/go/firestore) +[IAM][cloud-iam] | stable | [`cloud.google.com/go/iam`](https://pkg.go.dev/cloud.google.com/go/iam) +[IoT][cloud-iot] | stable | [`cloud.google.com/go/iot/apiv1`](https://pkg.go.dev/cloud.google.com/go/iot/apiv1) +[IRM][cloud-irm] | alpha | [`cloud.google.com/go/irm/apiv1alpha2`](https://pkg.go.dev/cloud.google.com/go/irm/apiv1alpha2) +[KMS][cloud-kms] | stable | [`cloud.google.com/go/kms/apiv1`](https://pkg.go.dev/cloud.google.com/go/kms/apiv1) +[Natural Language][cloud-natural-language] | stable | [`cloud.google.com/go/language/apiv1`](https://pkg.go.dev/cloud.google.com/go/language/apiv1) +[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`](https://pkg.go.dev/cloud.google.com/go/logging) +[Memorystore][cloud-memorystore] | alpha | [`cloud.google.com/go/redis/apiv1`](https://pkg.go.dev/cloud.google.com/go/redis/apiv1) +[Monitoring][cloud-monitoring] | stable | [`cloud.google.com/go/monitoring/apiv3`](https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3) +[OS Login][cloud-oslogin] | stable | [`cloud.google.com/go/oslogin/apiv1`](https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1) +[Pub/Sub][cloud-pubsub] | stable | [`cloud.google.com/go/pubsub`](https://pkg.go.dev/cloud.google.com/go/pubsub) +[Phishing Protection][cloud-phishingprotection] | alpha | [`cloud.google.com/go/phishingprotection/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1) +[reCAPTCHA Enterprise][cloud-recaptcha] | alpha | [`cloud.google.com/go/recaptchaenterprise/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1) +[Recommender][cloud-recommender] | beta | [`cloud.google.com/go/recommender/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1) +[Scheduler][cloud-scheduler] | stable | [`cloud.google.com/go/scheduler/apiv1`](https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1) +[Securitycenter][cloud-securitycenter] | stable | [`cloud.google.com/go/securitycenter/apiv1`](https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1) +[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`](https://pkg.go.dev/cloud.google.com/go/spanner) +[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`](https://pkg.go.dev/cloud.google.com/go/speech/apiv1) +[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`](https://pkg.go.dev/cloud.google.com/go/storage) +[Talent][cloud-talent] | alpha | [`cloud.google.com/go/talent/apiv4beta1`](https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1) +[Text To Speech][cloud-texttospeech] | stable | [`cloud.google.com/go/texttospeech/apiv1`](https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1) +[Trace][cloud-trace] | stable | [`cloud.google.com/go/trace/apiv2`](https://pkg.go.dev/cloud.google.com/go/trace/apiv2) +[Translate][cloud-translate] | stable | [`cloud.google.com/go/translate`](https://pkg.go.dev/cloud.google.com/go/translate) +[Video Intelligence][cloud-video] | beta | [`cloud.google.com/go/videointelligence/apiv1beta2`](https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2) +[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`](https://pkg.go.dev/cloud.google.com/go/vision/apiv1) +[Webrisk][cloud-webrisk] | alpha | [`cloud.google.com/go/webrisk/apiv1beta1`](https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1) + +> **Alpha status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at [pkg.go.dev/cloud.google.com/go](https://pkg.go.dev/cloud.google.com/go) + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. + +## Authorization + +By default, each API will use [Google Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials) +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +[snip]:# (auth) +```go +client, err := storage.NewClient(ctx) +``` + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithCredentialsFile`](https://pkg.go.dev/google.golang.org/api/option#WithCredentialsFile) +to the `NewClient` function of the desired package. For example: + +[snip]:# (auth-JSON) +```go +client, err := storage.NewClient(ctx, option.WithCredentialsFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://pkg.go.dev/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://pkg.go.dev/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: +[snip]:# (auth-ts) +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-asset]: https://cloud.google.com/security-command-center/docs/how-to-asset-inventory +[cloud-automl]: https://cloud.google.com/automl +[cloud-build]: https://cloud.google.com/cloud-build/ +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-container]: https://cloud.google.com/containers/ +[cloud-containeranalysis]: https://cloud.google.com/container-registry/docs/container-analysis +[cloud-dataproc]: https://cloud.google.com/dataproc/ +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-dialogflow]: https://cloud.google.com/dialogflow-enterprise/ +[cloud-debugger]: https://cloud.google.com/debugger/ +[cloud-dlp]: https://cloud.google.com/dlp/ +[cloud-errors]: https://cloud.google.com/error-reporting/ +[cloud-firestore]: https://cloud.google.com/firestore/ +[cloud-iam]: https://cloud.google.com/iam/ +[cloud-iot]: https://cloud.google.com/iot-core/ +[cloud-irm]: https://cloud.google.com/incident-response/docs/concepts +[cloud-kms]: https://cloud.google.com/kms/ +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-language]: https://cloud.google.com/natural-language +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-natural-language]: https://cloud.google.com/natural-language/ +[cloud-memorystore]: https://cloud.google.com/memorystore/ +[cloud-monitoring]: https://cloud.google.com/monitoring/ +[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest +[cloud-phishingprotection]: https://cloud.google.com/phishing-protection/ +[cloud-securitycenter]: https://cloud.google.com/security-command-center/ +[cloud-scheduler]: https://cloud.google.com/scheduler +[cloud-spanner]: https://cloud.google.com/spanner/ +[cloud-speech]: https://cloud.google.com/speech +[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ +[cloud-tasks]: https://cloud.google.com/tasks/ +[cloud-texttospeech]: https://cloud.google.com/texttospeech/ +[cloud-talent]: https://cloud.google.com/solutions/talent-solution/ +[cloud-trace]: https://cloud.google.com/trace/ +[cloud-translate]: https://cloud.google.com/translate +[cloud-recaptcha]: https://cloud.google.com/recaptcha-enterprise/ +[cloud-recommender]: https://cloud.google.com/recommendations/ +[cloud-video]: https://cloud.google.com/video-intelligence/ +[cloud-vision]: https://cloud.google.com/vision +[cloud-webrisk]: https://cloud.google.com/web-risk/ diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md new file mode 100644 index 000000000..c8c7f9335 --- /dev/null +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -0,0 +1,128 @@ +# Setup from scratch + +1. [Install Go](https://golang.org/dl/). + 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) + is in your `PATH`. + 1. Check it's working by running `go version`. + * If it doesn't work, check the install location, usually + `/usr/local/go`, is on your `PATH`. + +1. Sign one of the +[contributor license agreements](#contributor-license-agreements) below. + +1. Clone the repo: + `git clone https://github.com/googleapis/google-cloud-go` + +1. Change into the checked out source: + `cd google-cloud-go` + +1. Fork the repo and add your fork as a secondary remote (this is necessary in + order to create PRs). + +# Which module to release? + +The Go client libraries have several modules. Each module does not strictly +correspond to a single library - they correspond to trees of directories. If a +file needs to be released, you must release the closest ancestor module. + +To see all modules: + +``` +$ cat `find . -name go.mod` | grep module +module cloud.google.com/go +module cloud.google.com/go/bigtable +module cloud.google.com/go/firestore +module cloud.google.com/go/bigquery +module cloud.google.com/go/storage +module cloud.google.com/go/datastore +module cloud.google.com/go/pubsub +module cloud.google.com/go/spanner +module cloud.google.com/go/logging +``` + +The `cloud.google.com/go` is the repository root module. Each other module is +a submodule. + +So, if you need to release a change in `bigtable/bttest/inmem.go`, the closest +ancestor module is `cloud.google.com/go/bigtable` - so you should release a new +version of the `cloud.google.com/go/bigtable` submodule. + +If you need to release a change in `asset/apiv1/asset_client.go`, the closest +ancestor module is `cloud.google.com/go` - so you should release a new version +of the `cloud.google.com/go` repository root module. Note: releasing +`cloud.google.com/go` has no impact on any of the submodules, and vice-versa. +They are released entirely independently. + +# Test failures + +If there are any test failures in the Kokoro build, releases are blocked until +the failures have been resolved. + +# How to release `cloud.google.com/go` + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any + failures in the most recent build, address them before proceeding with the + release. +1. Navigate to `~/code/gocloud/` and switch to master. +1. `git pull` +1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. + The current latest tag `$CV` is the largest tag. It should look something + like `vX.Y.Z` (note: ignore all `LIB/vX.Y.Z` tags - these are tags for a + specific library, not the module root). We'll call the current version `$CV` + and the new version `$NV`. +1. On master, run `git log $CV...` to list all the changes since the last + release. NOTE: You must manually visually parse out changes to submodules [1] + (the `git log` is going to show you things in submodules, which are not going + to be part of your release). +1. Edit `CHANGES.md` to include a summary of the changes. +1. `cd internal/version && go generate && cd -` +1. Commit the changes, push to your fork, and create a PR. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: + a. Switch to master. + b. `git pull` + c. Tag the repo with the next version: `git tag $NV`. + d. Push the tag to origin: + `git push origin $NV` +2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) + with the new release, copying the contents of `CHANGES.md`. + +# How to release a submodule + +We have several submodules, including `cloud.google.com/go/logging`, +`cloud.google.com/go/datastore`, and so on. + +To release a submodule: + +(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) + +1. Check for failures in the + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any + failures in the most recent build, address them before proceeding with the + release. (This applies even if the failures are in a different submodule from the one + being released.) +1. Navigate to `~/code/gocloud/` and switch to master. +1. `git pull` +1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all + existing releases. The current latest tag `$CV` is the largest tag. It + should look something like `datastore/vX.Y.Z`. We'll call the current version + `$CV` and the new version `$NV`. +1. On master, run `git log $CV.. -- datastore/` to list all the changes to the + submodule directory since the last release. +1. Edit `datastore/CHANGES.md` to include a summary of the changes. +1. `cd internal/version && go generate && cd -` +1. Commit the changes, push to your fork, and create a PR. +1. Wait for the PR to be reviewed and merged. Once it's merged, and without + merging any other PRs in the meantime: + a. Switch to master. + b. `git pull` + c. Tag the repo with the next version: `git tag $NV`. + d. Push the tag to origin: + `git push origin $NV` +1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) + with the new release, copying the contents of `datastore/CHANGES.md`. + +# Appendix + +1: This should get better as submodule tooling matures. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 4ff4e2f1c..6b13424fd 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -61,25 +61,14 @@ var ( instID = &cachedValue{k: "instance/id", trim: true} ) -var ( - defaultClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, - }} - subscribeClient = &Client{hc: &http.Client{ - Transport: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, - }} -) +var defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, +}} // NotDefinedError is returned when requested metadata is not defined. // @@ -151,7 +140,7 @@ func testOnGCE() bool { }() go func() { - addrs, err := net.LookupHost("metadata.google.internal") + addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal") if err != nil || len(addrs) == 0 { resc <- false return @@ -206,10 +195,9 @@ func systemInfoSuggestsGCE() bool { return name == "Google" || name == "Google Compute Engine" } -// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no -// ResponseHeaderTimeout). +// Subscribe calls Client.Subscribe on the default client. func Subscribe(suffix string, fn func(v string, ok bool) error) error { - return subscribeClient.Subscribe(suffix, fn) + return defaultClient.Subscribe(suffix, fn) } // Get calls Client.Get on the default client. @@ -280,9 +268,14 @@ type Client struct { hc *http.Client } -// NewClient returns a Client that can be used to fetch metadata. All HTTP requests -// will use the given http.Client instead of the default client. +// NewClient returns a Client that can be used to fetch metadata. +// Returns the client that uses the specified http.Client for HTTP requests. +// If nil is specified, returns the default client. func NewClient(c *http.Client) *Client { + if c == nil { + return defaultClient + } + return &Client{hc: c} } @@ -304,7 +297,10 @@ func (c *Client) getETag(suffix string) (value, etag string, err error) { host = metadataIP } u := "http://" + host + "/computeMetadata/v1/" + suffix - req, _ := http.NewRequest("GET", u, nil) + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return "", "", err + } req.Header.Set("Metadata-Flavor", "Google") req.Header.Set("User-Agent", userAgent) res, err := c.hc.Do(req) @@ -407,11 +403,7 @@ func (c *Client) InstanceTags() ([]string, error) { // InstanceName returns the current VM's instance ID string. func (c *Client) InstanceName() (string, error) { - host, err := c.Hostname() - if err != nil { - return "", err - } - return strings.Split(host, ".")[0], nil + return c.getTrimmed("instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go new file mode 100644 index 000000000..237d84561 --- /dev/null +++ b/vendor/cloud.google.com/go/doc.go @@ -0,0 +1,100 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package cloud is the root of the packages used to access Google Cloud +Services. See https://godoc.org/cloud.google.com/go for a full list +of sub-packages. + + +Client Options + +All clients in sub-packages are configurable via client options. These options are +described here: https://godoc.org/google.golang.org/api/option. + + +Authentication and Authorization + +All the clients in sub-packages support authentication via Google Application Default +Credentials (see https://cloud.google.com/docs/authentication/production), or +by providing a JSON key file for a Service Account. See the authentication examples +in this package for details. + + +Timeouts and Cancellation + +By default, all requests in sub-packages will run indefinitely, retrying on transient +errors when correctness allows. To set timeouts or arrange for cancellation, use +contexts. See the examples for details. + +Do not attempt to control the initial connection (dialing) of a service by setting a +timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts +would be ineffective and would only interfere with credential refreshing, which uses +the same context. + + +Connection Pooling + +Connection pooling differs in clients based on their transport. Cloud +clients either rely on HTTP or gRPC transports to communicate +with Google Cloud. + +Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the +underlying HTTP transport to cache connections for later re-use. These are cached to +the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in +http.DefaultTransport. + +For gRPC clients (all others in this repo), connection pooling is configurable. Users +of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client +option to NewClient calls. This configures the underlying gRPC connections to be +pooled and addressed in a round robin fashion. + + +Using the Libraries with Docker + +Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to +hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 +for more information. + + +Debugging + +To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See +https://godoc.org/google.golang.org/grpc/grpclog for more information. + +For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". + + +Client Stability + +Clients in this repository are considered alpha or beta unless otherwise +marked as stable in the README.md. Semver is not used to communicate stability +of clients. + +Alpha and beta clients may change or go away without notice. + +Clients marked stable will maintain compatibility with future versions for as +long as we can reasonably sustain. Incompatible changes might be made in some +situations, including: + +- Security bugs may prompt backwards-incompatible changes. + +- Situations in which components are no longer feasible to maintain without +making breaking changes, including removal. + +- Parts of the client surface may be outright unstable and subject to change. +These parts of the surface will be labeled with the note, "It is EXPERIMENTAL +and subject to change or removal without notice." +*/ +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod new file mode 100644 index 000000000..5bdc67a97 --- /dev/null +++ b/vendor/cloud.google.com/go/go.mod @@ -0,0 +1,24 @@ +module cloud.google.com/go + +go 1.11 + +require ( + cloud.google.com/go/storage v1.10.0 + github.com/golang/mock v1.4.3 + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.5.0 + github.com/google/martian/v3 v3.0.0 + github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 + github.com/googleapis/gax-go/v2 v2.0.5 + github.com/jstemmer/go-junit-report v0.9.1 + go.opencensus.io v0.22.4 + golang.org/x/lint v0.0.0-20200302205851-738671d3881b + golang.org/x/net v0.0.0-20200707034311-ab3426394381 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/text v0.3.3 + golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed + google.golang.org/api v0.29.0 + google.golang.org/genproto v0.0.0-20200711021454-869866162049 + google.golang.org/grpc v1.30.0 + google.golang.org/protobuf v1.25.0 // indirect +) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum new file mode 100644 index 000000000..3736901a5 --- /dev/null +++ b/vendor/cloud.google.com/go/go.sum @@ -0,0 +1,484 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0 h1:86K1Gel7BQ9/WmNWn7dTKMvTLFzwtBe5FNqYbi9X35g= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f h1:hX65Cu3JDlGH3uEdK7I99Ii+9kjD6mvnnpfLdEAH0x4= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f h1:QBjCr1Fz5kw158VqdE9JfI9cJnl/ymnJWAdMuinqL7Y= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25 h1:OKbAoGs4fGM5cPLlVQLZGYkFC8OnOfgo6tt0Smf9XhM= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138 h1:H3uGjxCR/6Ds0Mjgyp7LMK81+LvmbvWWEnJhzk1Pi9E= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed h1:+qzWo37K31KxduIYaBeMqJ8MUOyTayOQKpH9aDPLMSY= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0 h1:VGGbLNyPF7dvYHhcUGYBBGCRDDK0RRJAI6KCvo0CL+E= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64 h1:iKtrH9Y8mcbADOP0YFaEMth7OfuHY9xHOwNj4znpM1A= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIoU2uUvrMVl+H26359loFFUleSMXFo= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200711021454-869866162049 h1:YFTFpQhgvrLrmxtiIncJxFXeCyq84ixuKWVCaCAi9Oc= +google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a h1:/8zB6iBfHCl1qAnEAWwGPNrUvapuy6CPla1VM0k8hQw= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go index 5232cb673..0a06ea2e8 100644 --- a/vendor/cloud.google.com/go/iam/iam.go +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -38,6 +38,7 @@ type client interface { Get(ctx context.Context, resource string) (*pb.Policy, error) Set(ctx context.Context, resource string, p *pb.Policy) error Test(ctx context.Context, resource string, perms []string) ([]string, error) + GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) } // grpcClient implements client for the standard gRPC-based IAMPolicy service. @@ -57,13 +58,22 @@ var withRetry = gax.WithRetry(func() gax.Retryer { }) func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + return g.GetWithVersion(ctx, resource, 1) +} + +func (g *grpcClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (*pb.Policy, error) { var proto *pb.Policy md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) ctx = insertMetadata(ctx, md) err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { var err error - proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{ + Resource: resource, + Options: &pb.GetPolicyOptions{ + RequestedPolicyVersion: requestedPolicyVersion, + }, + }) return err }, withRetry) if err != nil { @@ -110,11 +120,18 @@ type Handle struct { resource string } +// A Handle3 provides IAM operations for a resource. It is similar to a Handle, but provides access to newer IAM features (e.g., conditions). +type Handle3 struct { + c client + resource string + version int32 +} + // InternalNewHandle is for use by the Google Cloud Libraries only. // // InternalNewHandle returns a Handle for resource. // The conn parameter refers to a server that must support the IAMPolicy service. -func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { +func InternalNewHandle(conn grpc.ClientConnInterface, resource string) *Handle { return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) } @@ -137,6 +154,17 @@ func InternalNewHandleClient(c client, resource string) *Handle { } } +// V3 returns a Handle3, which is like Handle except it sets +// requestedPolicyVersion to 3 when retrieving a policy and policy.version to 3 +// when storing a policy. +func (h *Handle) V3() *Handle3 { + return &Handle3{ + c: h.c, + resource: h.resource, + version: 3, + } +} + // Policy retrieves the IAM policy for the resource. func (h *Handle) Policy(ctx context.Context) (*Policy, error) { proto, err := h.c.Get(ctx, h.resource) @@ -313,3 +341,47 @@ func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { } return metadata.NewOutgoingContext(ctx, out) } + +// A Policy3 is a list of Bindings representing roles granted to members. +// +// The zero Policy3 is a valid policy with no bindings. +// +// It is similar to a Policy, except a Policy3 provides direct access to the +// list of Bindings. +// +// The policy version is always set to 3. +type Policy3 struct { + etag []byte + Bindings []*pb.Binding +} + +// Policy retrieves the IAM policy for the resource. +// +// requestedPolicyVersion is always set to 3. +func (h *Handle3) Policy(ctx context.Context) (*Policy3, error) { + proto, err := h.c.GetWithVersion(ctx, h.resource, h.version) + if err != nil { + return nil, err + } + return &Policy3{ + Bindings: proto.Bindings, + etag: proto.Etag, + }, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle3) SetPolicy(ctx context.Context, policy *Policy3) error { + return h.c.Set(ctx, h.resource, &pb.Policy{ + Bindings: policy.Bindings, + Etag: policy.etag, + Version: h.version, + }) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle3) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json new file mode 100644 index 000000000..77368c010 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -0,0 +1,770 @@ +{ + "cloud.google.com/go/asset/apiv1": { + "distribution_name": "cloud.google.com/go/asset/apiv1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/asset/apiv1beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/asset/apiv1p2beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1p2beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p2beta1", + "release_level": "beta" + }, + "cloud.google.com/go/asset/apiv1p5beta1": { + "distribution_name": "cloud.google.com/go/asset/apiv1p5beta1", + "description": "Cloud Asset API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1", + "release_level": "beta" + }, + "cloud.google.com/go/automl/apiv1": { + "distribution_name": "cloud.google.com/go/automl/apiv1", + "description": "Cloud AutoML API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/automl/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/automl/apiv1beta1": { + "distribution_name": "cloud.google.com/go/automl/apiv1beta1", + "description": "Cloud AutoML API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/automl/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery": { + "distribution_name": "cloud.google.com/go/bigquery", + "description": "BigQuery", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/connection/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", + "description": "BigQuery Connection API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/connection/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1", + "description": "BigQuery Connection API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery/datatransfer/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/datatransfer/apiv1", + "description": "BigQuery Data Transfer API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/datatransfer/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/reservation/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1", + "description": "BigQuery Reservation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/reservation/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/reservation/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/reservation/apiv1beta1", + "description": "BigQuery Reservation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/reservation/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery/storage/apiv1": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/bigquery/storage/apiv1alpha2": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha2", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1alpha2", + "release_level": "alpha" + }, + "cloud.google.com/go/bigquery/storage/apiv1beta1": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/bigquery/storage/apiv1beta2": { + "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta2", + "description": "BigQuery Storage API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/storage/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/bigtable": { + "distribution_name": "cloud.google.com/go/bigtable", + "description": "Cloud BigTable", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/bigtable", + "release_level": "ga" + }, + "cloud.google.com/go/billing/apiv1": { + "distribution_name": "cloud.google.com/go/billing/apiv1", + "description": "Cloud Billing API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/cloudbuild/apiv1/v2": { + "distribution_name": "cloud.google.com/go/cloudbuild/apiv1/v2", + "description": "Cloud Build API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudbuild/apiv1/v2", + "release_level": "ga" + }, + "cloud.google.com/go/cloudtasks/apiv2": { + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2", + "description": "Cloud Tasks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/cloudtasks/apiv2beta2": { + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta2", + "description": "Cloud Tasks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2beta2", + "release_level": "beta" + }, + "cloud.google.com/go/cloudtasks/apiv2beta3": { + "distribution_name": "cloud.google.com/go/cloudtasks/apiv2beta3", + "description": "Cloud Tasks API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/cloudtasks/apiv2beta3", + "release_level": "beta" + }, + "cloud.google.com/go/container/apiv1": { + "distribution_name": "cloud.google.com/go/container/apiv1", + "description": "Kubernetes Engine API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/container/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/containeranalysis/apiv1beta1": { + "distribution_name": "cloud.google.com/go/containeranalysis/apiv1beta1", + "description": "Container Analysis API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/containeranalysis/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/datacatalog/apiv1": { + "distribution_name": "cloud.google.com/go/datacatalog/apiv1", + "description": "Google Cloud Data Catalog API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/datacatalog/apiv1beta1": { + "distribution_name": "cloud.google.com/go/datacatalog/apiv1beta1", + "description": "Google Cloud Data Catalog API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datacatalog/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/dataproc/apiv1": { + "distribution_name": "cloud.google.com/go/dataproc/apiv1", + "description": "Cloud Dataproc API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/dataproc/apiv1beta2": { + "distribution_name": "cloud.google.com/go/dataproc/apiv1beta2", + "description": "Cloud Dataproc API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dataproc/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/datastore": { + "distribution_name": "cloud.google.com/go/datastore", + "description": "Cloud Datastore", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore", + "release_level": "ga" + }, + "cloud.google.com/go/datastore/admin/apiv1": { + "distribution_name": "cloud.google.com/go/datastore/admin/apiv1", + "description": "Cloud Datastore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/datastore/admin/apiv1", + "release_level": "alpha" + }, + "cloud.google.com/go/debugger/apiv2": { + "distribution_name": "cloud.google.com/go/debugger/apiv2", + "description": "Stackdriver Debugger API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/debugger/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/dialogflow/apiv2": { + "distribution_name": "cloud.google.com/go/dialogflow/apiv2", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/dlp/apiv2": { + "distribution_name": "cloud.google.com/go/dlp/apiv2", + "description": "Cloud Data Loss Prevention (DLP) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dlp/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/errorreporting": { + "distribution_name": "cloud.google.com/go/errorreporting", + "description": "Stackdriver Error Reporting API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting", + "release_level": "beta" + }, + "cloud.google.com/go/errorreporting/apiv1beta1": { + "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", + "description": "Stackdriver Error Reporting API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/firestore": { + "distribution_name": "cloud.google.com/go/firestore", + "description": "Cloud Firestore API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore", + "release_level": "ga" + }, + "cloud.google.com/go/firestore/apiv1": { + "distribution_name": "cloud.google.com/go/firestore/apiv1", + "description": "Cloud Firestore API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/firestore/apiv1/admin": { + "distribution_name": "cloud.google.com/go/firestore/apiv1/admin", + "description": "Google Cloud Firestore Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/firestore/apiv1/admin", + "release_level": "ga" + }, + "cloud.google.com/go/gaming/apiv1beta": { + "distribution_name": "cloud.google.com/go/gaming/apiv1beta", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/iam": { + "distribution_name": "cloud.google.com/go/iam", + "description": "Cloud IAM", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/iam", + "release_level": "ga" + }, + "cloud.google.com/go/iam/credentials/apiv1": { + "distribution_name": "cloud.google.com/go/iam/credentials/apiv1", + "description": "IAM Service Account Credentials API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/iam/credentials/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/iot/apiv1": { + "distribution_name": "cloud.google.com/go/iot/apiv1", + "description": "Cloud IoT API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/iot/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/kms/apiv1": { + "distribution_name": "cloud.google.com/go/kms/apiv1", + "description": "Cloud Key Management Service (KMS) API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/kms/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/language/apiv1": { + "distribution_name": "cloud.google.com/go/language/apiv1", + "description": "Cloud Natural Language API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/language/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/language/apiv1beta2": { + "distribution_name": "cloud.google.com/go/language/apiv1beta2", + "description": "Cloud Natural Language API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/language/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/logging": { + "distribution_name": "cloud.google.com/go/logging", + "description": "Stackdriver Logging API", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging", + "release_level": "ga" + }, + "cloud.google.com/go/logging/apiv2": { + "distribution_name": "cloud.google.com/go/logging/apiv2", + "description": "Cloud Logging API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/longrunning/autogen": { + "distribution_name": "cloud.google.com/go/longrunning/autogen", + "description": "Long Running Operations API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/longrunning/autogen", + "release_level": "alpha" + }, + "cloud.google.com/go/memcache/apiv1beta2": { + "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", + "description": "Cloud Memorystore for Memcached API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/memcache/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/monitoring/apiv3/v2": { + "distribution_name": "cloud.google.com/go/monitoring/apiv3/v2", + "description": "Cloud Monitoring API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2", + "release_level": "ga" + }, + "cloud.google.com/go/monitoring/dashboard/apiv1": { + "distribution_name": "cloud.google.com/go/monitoring/dashboard/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/osconfig/agentendpoint/apiv1": { + "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/osconfig/agentendpoint/apiv1beta": { + "distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/osconfig/apiv1": { + "distribution_name": "cloud.google.com/go/osconfig/apiv1", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/osconfig/apiv1beta": { + "distribution_name": "cloud.google.com/go/osconfig/apiv1beta", + "description": "Cloud OS Config API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/oslogin/apiv1": { + "distribution_name": "cloud.google.com/go/oslogin/apiv1", + "description": "Cloud OS Login API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/oslogin/apiv1beta": { + "distribution_name": "cloud.google.com/go/oslogin/apiv1beta", + "description": "Cloud OS Login API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/oslogin/apiv1beta", + "release_level": "beta" + }, + "cloud.google.com/go/phishingprotection/apiv1beta1": { + "distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1", + "description": "Phishing Protection API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/phishingprotection/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/policytroubleshooter/apiv1": { + "distribution_name": "cloud.google.com/go/policytroubleshooter/apiv1", + "description": "Policy Troubleshooter API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/policytroubleshooter/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/pubsub": { + "distribution_name": "cloud.google.com/go/pubsub", + "description": "Cloud PubSub", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub", + "release_level": "ga" + }, + "cloud.google.com/go/pubsub/apiv1": { + "distribution_name": "cloud.google.com/go/pubsub/apiv1", + "description": "Cloud Pub/Sub API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsub/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/recaptchaenterprise/apiv1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", + "description": "reCAPTCHA Enterprise API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1", + "release_level": "beta" + }, + "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "description": "reCAPTCHA Enterprise API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/recommender/apiv1": { + "distribution_name": "cloud.google.com/go/recommender/apiv1", + "description": "Recommender API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/recommender/apiv1beta1": { + "distribution_name": "cloud.google.com/go/recommender/apiv1beta1", + "description": "Recommender API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/recommender/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/redis/apiv1": { + "distribution_name": "cloud.google.com/go/redis/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/redis/apiv1beta1": { + "distribution_name": "cloud.google.com/go/redis/apiv1beta1", + "description": "Google Cloud Memorystore for Redis API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/redis/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/rpcreplay": { + "distribution_name": "cloud.google.com/go/rpcreplay", + "description": "RPC Replay", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/rpcreplay", + "release_level": "ga" + }, + "cloud.google.com/go/scheduler/apiv1": { + "distribution_name": "cloud.google.com/go/scheduler/apiv1", + "description": "Cloud Scheduler API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/scheduler/apiv1beta1": { + "distribution_name": "cloud.google.com/go/scheduler/apiv1beta1", + "description": "Cloud Scheduler API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/scheduler/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/secretmanager/apiv1": { + "distribution_name": "cloud.google.com/go/secretmanager/apiv1", + "description": "Secret Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/secretmanager/apiv1beta1": { + "distribution_name": "cloud.google.com/go/secretmanager/apiv1beta1", + "description": "Secret Manager API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/secretmanager/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/securitycenter/apiv1": { + "distribution_name": "cloud.google.com/go/securitycenter/apiv1", + "description": "Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/securitycenter/apiv1beta1": { + "distribution_name": "cloud.google.com/go/securitycenter/apiv1beta1", + "description": "Cloud Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/securitycenter/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1", + "description": "Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1p1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/securitycenter/settings/apiv1beta1": { + "distribution_name": "cloud.google.com/go/securitycenter/settings/apiv1beta1", + "description": "Cloud Security Command Center API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/settings/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/servicedirectory/apiv1beta1": { + "distribution_name": "cloud.google.com/go/servicedirectory/apiv1beta1", + "description": "Service Directory API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/servicedirectory/apiv1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/spanner": { + "distribution_name": "cloud.google.com/go/spanner", + "description": "Cloud Spanner", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner", + "release_level": "ga" + }, + "cloud.google.com/go/spanner/admin/database/apiv1": { + "distribution_name": "cloud.google.com/go/spanner/admin/database/apiv1", + "description": "Cloud Spanner Database Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/admin/database/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/spanner/admin/instance/apiv1": { + "distribution_name": "cloud.google.com/go/spanner/admin/instance/apiv1", + "description": "Cloud Spanner Instance Admin API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/admin/instance/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/spanner/apiv1": { + "distribution_name": "cloud.google.com/go/spanner/apiv1", + "description": "Cloud Spanner API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/spanner/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/speech/apiv1": { + "distribution_name": "cloud.google.com/go/speech/apiv1", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/speech/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/speech/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/speech/apiv1p1beta1", + "description": "Cloud Speech-to-Text API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/speech/apiv1p1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/storage": { + "distribution_name": "cloud.google.com/go/storage", + "description": "Cloud Storage (GCS)", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/storage", + "release_level": "ga" + }, + "cloud.google.com/go/talent/apiv4beta1": { + "distribution_name": "cloud.google.com/go/talent/apiv4beta1", + "description": "Cloud Talent Solution API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4beta1", + "release_level": "beta" + }, + "cloud.google.com/go/texttospeech/apiv1": { + "distribution_name": "cloud.google.com/go/texttospeech/apiv1", + "description": "Cloud Text-to-Speech API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/texttospeech/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/trace": { + "distribution_name": "cloud.google.com/go/trace", + "description": "Stackdriver Trace", + "language": "Go", + "client_library_type": "manual", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace", + "release_level": "ga" + }, + "cloud.google.com/go/trace/apiv1": { + "distribution_name": "cloud.google.com/go/trace/apiv1", + "description": "Stackdriver Trace API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/trace/apiv2": { + "distribution_name": "cloud.google.com/go/trace/apiv2", + "description": "Stackdriver Trace API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/trace/apiv2", + "release_level": "ga" + }, + "cloud.google.com/go/translate/apiv3": { + "distribution_name": "cloud.google.com/go/translate/apiv3", + "description": "Cloud Translation API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/translate/apiv3", + "release_level": "ga" + }, + "cloud.google.com/go/videointelligence/apiv1": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1", + "description": "Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/videointelligence/apiv1beta2": { + "distribution_name": "cloud.google.com/go/videointelligence/apiv1beta2", + "description": "Google Cloud Video Intelligence API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/videointelligence/apiv1beta2", + "release_level": "beta" + }, + "cloud.google.com/go/vision/apiv1": { + "distribution_name": "cloud.google.com/go/vision/apiv1", + "description": "Cloud Vision API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/vision/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/vision/apiv1p1beta1": { + "distribution_name": "cloud.google.com/go/vision/apiv1p1beta1", + "description": "Cloud Vision API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/vision/apiv1p1beta1", + "release_level": "beta" + }, + "cloud.google.com/go/webrisk/apiv1": { + "distribution_name": "cloud.google.com/go/webrisk/apiv1", + "description": "Web Risk API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1", + "release_level": "ga" + }, + "cloud.google.com/go/webrisk/apiv1beta1": { + "distribution_name": "cloud.google.com/go/webrisk/apiv1beta1", + "description": "Web Risk API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1", + "release_level": "beta" + } +} diff --git a/vendor/cloud.google.com/go/internal/README.md b/vendor/cloud.google.com/go/internal/README.md new file mode 100644 index 000000000..8857c8f6f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/README.md @@ -0,0 +1,18 @@ +# Internal + +This directory contains internal code for cloud.google.com/go packages. + +## .repo-metadata-full.json + +`.repo-metadata-full.json` contains metadata about the packages in this repo. It +is generated by `internal/gapicgen/generator`. It's processed by external tools +to build lists of all of the packages. + +Don't make breaking changes to the format without consulting with the external +tools. + +One day, we may want to create individual `.repo-metadata.json` files next to +each package, which is the pattern followed by some other languages. External +tools would then talk to pkg.go.dev or some other service to get the overall +list of packages and use the `.repo-metadata.json` files to get the additional +metadata required. For now, `.repo-metadata-full.json` includes everything. \ No newline at end of file diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index d291921b1..3328019a3 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20190802" +const Repo = "20200706" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md new file mode 100644 index 000000000..f6d57be50 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -0,0 +1,83 @@ +# Changes + +## v1.10.0 +- Bump dependency on google.golang.org/api to capture changes to retry logic + which will make retries on writes more resilient. +- Improve documentation for Writer.ChunkSize. +- Fix a bug in lifecycle to allow callers to clear lifecycle rules on a bucket. + +## v1.9.0 +- Add retry for transient network errors on most operations (with the exception + of writes). +- Bump dependency for google.golang.org/api to capture a change in the default + HTTP transport which will improve performance for reads under heavy load. +- Add CRC32C checksum validation option to Composer. + +## v1.8.0 +- Add support for V4 signed post policies. + +## v1.7.0 +- V4 signed URL support: + - Add support for bucket-bound domains and virtual hosted style URLs. + - Add support for query parameters in the signature. + - Fix text encoding to align with standards. +- Add the object name to query parameters for write calls. +- Fix retry behavior when reading files with Content-Encoding gzip. +- Fix response header in reader. +- New code examples: + - Error handling for `ObjectHandle` preconditions. + - Existence checks for buckets and objects. + +## v1.6.0 + +- Updated option handling: + - Don't drop custom scopes (#1756) + - Don't drop port in provided endpoint (#1737) + +## v1.5.0 + +- Honor WithEndpoint client option for reads as well as writes. +- Add archive storage class to docs. +- Make fixes to storage benchwrapper. + +## v1.4.0 + +- When listing objects in a bucket, allow callers to specify which attributes + are queried. This allows for performance optimization. + +## v1.3.0 + +- Use `storage.googleapis.com/storage/v1` by default for GCS requests + instead of `www.googleapis.com/storage/v1`. + +## v1.2.1 + +- Fixed a bug where UniformBucketLevelAccess and BucketPolicyOnly were not + being sent in all cases. + +## v1.2.0 + +- Add support for UniformBucketLevelAccess. This configures access checks + to use only bucket-level IAM policies. + See: https://godoc.org/cloud.google.com/go/storage#UniformBucketLevelAccess. +- Fix userAgent to use correct version. + +## v1.1.2 + +- Fix memory leak in BucketIterator and ObjectIterator. + +## v1.1.1 + +- Send BucketPolicyOnly even when it's disabled. + +## v1.1.0 + +- Performance improvements for ObjectIterator and BucketIterator. +- Fix Bucket.ObjectIterator size calculation checks. +- Added HMACKeyOptions to all the methods which allows for options such as + UserProject to be set per invocation and optionally be used. + +## v1.0.0 + +This is the first tag to carve out storage as its own module. See: +https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. diff --git a/vendor/github.com/pquerna/otp/LICENSE b/vendor/cloud.google.com/go/storage/LICENSE similarity index 100% rename from vendor/github.com/pquerna/otp/LICENSE rename to vendor/cloud.google.com/go/storage/LICENSE diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 07c470d3e..478482645 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -232,10 +232,18 @@ type BucketAttrs struct { // ACL is the list of access control rules on the bucket. ACL []ACLRule - // BucketPolicyOnly configures access checks to use only bucket-level IAM - // policies. + // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of + // UniformBucketLevelAccess is recommended above the use of this field. + // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to + // true, will enable UniformBucketLevelAccess. BucketPolicyOnly BucketPolicyOnly + // UniformBucketLevelAccess configures access checks to use only bucket-level IAM + // policies and ignore any ACL rules for the bucket. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access + // for more information. + UniformBucketLevelAccess UniformBucketLevelAccess + // DefaultObjectACL is the list of access controls to // apply to new objects when no object ACL is provided. DefaultObjectACL []ACLRule @@ -267,14 +275,10 @@ type BucketAttrs struct { // StorageClass is the default storage class of the bucket. This defines // how objects in the bucket are stored and determines the SLA - // and the cost of storage. Typical values are "MULTI_REGIONAL", - // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and - // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which - // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on - // the bucket's location settings. - // - // "DURABLE_REDUCED_AVAILABILITY", "MULTI_REGIONAL" and "REGIONAL" - // are considered legacy storage classes. + // and the cost of storage. Typical values are "STANDARD", "NEARLINE", + // "COLDLINE" and "ARCHIVE". Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. StorageClass string // Created is the creation time of the bucket. @@ -327,8 +331,8 @@ type BucketAttrs struct { LocationType string } -// BucketPolicyOnly configures access checks to use only bucket-level IAM -// policies. +// BucketPolicyOnly is an alias for UniformBucketLevelAccess. +// Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly. type BucketPolicyOnly struct { // Enabled specifies whether access checks use only bucket-level IAM // policies. Enabled may be disabled until the locked time. @@ -338,6 +342,17 @@ type BucketPolicyOnly struct { LockedTime time.Time } +// UniformBucketLevelAccess configures access checks to use only bucket-level IAM +// policies. +type UniformBucketLevelAccess struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + // Lifecycle is the lifecycle configuration for objects in the bucket. type Lifecycle struct { Rules []LifecycleRule @@ -446,8 +461,7 @@ type LifecycleCondition struct { // MatchesStorageClasses is the condition matching the object's storage // class. // - // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", - // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + // Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". MatchesStorageClasses []string // NumNewerVersions is the condition matching objects with a number of newer versions. @@ -495,26 +509,27 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { return nil, err } return &BucketAttrs{ - Name: b.Name, - Location: b.Location, - MetaGeneration: b.Metageneration, - DefaultEventBasedHold: b.DefaultEventBasedHold, - StorageClass: b.StorageClass, - Created: convertTime(b.TimeCreated), - VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, - ACL: toBucketACLRules(b.Acl), - DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), - Labels: b.Labels, - RequesterPays: b.Billing != nil && b.Billing.RequesterPays, - Lifecycle: toLifecycle(b.Lifecycle), - RetentionPolicy: rp, - CORS: toCORS(b.Cors), - Encryption: toBucketEncryption(b.Encryption), - Logging: toBucketLogging(b.Logging), - Website: toBucketWebsite(b.Website), - BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), - Etag: b.Etag, - LocationType: b.LocationType, + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + DefaultEventBasedHold: b.DefaultEventBasedHold, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + ACL: toBucketACLRules(b.Acl), + DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), + Logging: toBucketLogging(b.Logging), + Website: toBucketWebsite(b.Website), + BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), + UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration), + Etag: b.Etag, + LocationType: b.LocationType, }, nil } @@ -540,9 +555,9 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { bb = &raw.BucketBilling{RequesterPays: true} } var bktIAM *raw.BucketIamConfiguration - if b.BucketPolicyOnly.Enabled { + if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled { bktIAM = &raw.BucketIamConfiguration{ - BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ Enabled: true, }, } @@ -609,10 +624,20 @@ type BucketAttrsToUpdate struct { // newly created objects in this bucket. DefaultEventBasedHold optional.Bool - // BucketPolicyOnly configures access checks to use only bucket-level IAM - // policies. + // BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of + // UniformBucketLevelAccess is recommended above the use of this field. + // Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to + // true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and + // UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess + // will take precedence. BucketPolicyOnly *BucketPolicyOnly + // UniformBucketLevelAccess configures access checks to use only bucket-level IAM + // policies and ignore any ACL rules for the bucket. + // See https://cloud.google.com/storage/docs/uniform-bucket-level-access + // for more information. + UniformBucketLevelAccess *UniformBucketLevelAccess + // If set, updates the retention policy of the bucket. Using // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. // @@ -701,8 +726,17 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.BucketPolicyOnly != nil { rb.IamConfiguration = &raw.BucketIamConfiguration{ - BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ - Enabled: ua.BucketPolicyOnly.Enabled, + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: ua.BucketPolicyOnly.Enabled, + ForceSendFields: []string{"Enabled"}, + }, + } + } + if ua.UniformBucketLevelAccess != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: ua.UniformBucketLevelAccess.Enabled, + ForceSendFields: []string{"Enabled"}, }, } } @@ -716,6 +750,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.Lifecycle != nil { rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle") } if ua.Logging != nil { if *ua.Logging == (BucketLogging{}) { @@ -902,7 +937,7 @@ func toCORS(rc []*raw.BucketCors) []CORS { func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { var rl raw.BucketLifecycle if len(l.Rules) == 0 { - return nil + rl.ForceSendFields = []string{"Rule"} } for _, r := range l.Rules { rr := &raw.BucketLifecycleRule{ @@ -952,12 +987,11 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { }, } - switch { - case rr.Condition.IsLive == nil: + if rr.Condition.IsLive == nil { r.Condition.Liveness = LiveAndArchived - case *rr.Condition.IsLive == true: + } else if *rr.Condition.IsLive { r.Condition.Liveness = Live - case *rr.Condition.IsLive == false: + } else { r.Condition.Liveness = Archived } @@ -1041,8 +1075,26 @@ func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { } } +func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess { + if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled { + return UniformBucketLevelAccess{} + } + lt, err := time.Parse(time.RFC3339, b.UniformBucketLevelAccess.LockedTime) + if err != nil { + return UniformBucketLevelAccess{ + Enabled: true, + } + } + return UniformBucketLevelAccess{ + Enabled: true, + LockedTime: lt, + } +} + // Objects returns an iterator over the objects in the bucket that match the Query q. // If q is nil, no filtering is done. +// +// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { it := &ObjectIterator{ ctx: ctx, @@ -1059,6 +1111,8 @@ func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { } // An ObjectIterator is an iterator over ObjectAttrs. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. type ObjectIterator struct { ctx context.Context bucket *BucketHandle @@ -1069,6 +1123,8 @@ type ObjectIterator struct { } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // Next returns the next result. Its second return value is iterator.Done if @@ -1078,6 +1134,8 @@ func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will // have a non-empty Prefix field, and a zero value for all other fields. These // represent prefixes. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *ObjectIterator) Next() (*ObjectAttrs, error) { if err := it.nextFunc(); err != nil { return nil, err @@ -1094,6 +1152,9 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) req.Delimiter(it.query.Delimiter) req.Prefix(it.query.Prefix) req.Versions(it.query.Versions) + if len(it.query.fieldSelection) > 0 { + req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection)) + } req.PageToken(pageToken) if it.bucket.userProject != "" { req.UserProject(it.bucket.userProject) @@ -1126,6 +1187,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project // are returned. +// +// Note: The returned iterator is not safe for concurrent operations without explicit synchronization. func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { it := &BucketIterator{ ctx: ctx, @@ -1136,10 +1199,13 @@ func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator it.fetch, func() int { return len(it.buckets) }, func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it } // A BucketIterator is an iterator over BucketAttrs. +// +// Note: This iterator is not safe for concurrent operations without explicit synchronization. type BucketIterator struct { // Prefix restricts the iterator to buckets whose names begin with it. Prefix string @@ -1155,6 +1221,8 @@ type BucketIterator struct { // Next returns the next result. Its second return value is iterator.Done if // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) Next() (*BucketAttrs, error) { if err := it.nextFunc(); err != nil { return nil, err @@ -1165,6 +1233,8 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) { } // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +// +// Note: This method is not safe for concurrent operations without explicit synchronization. func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index 52162e72d..61983df5a 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -166,6 +166,13 @@ type Composer struct { // or zero-valued attributes are ignored. ObjectAttrs + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Composer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data in the destination object does not match + // the checksum, the compose will be rejected. + SendCRC32C bool + dst *ObjectHandle srcs []*ObjectHandle } @@ -186,6 +193,9 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { // Compose requires a non-empty Destination, so we always set it, // even if the caller-provided ObjectAttrs is the zero value. req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + if c.SendCRC32C { + req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C) + } for _, src := range c.srcs { if err := src.validate(); err != nil { return nil, err diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 88f645904..614ea11a5 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -117,6 +117,33 @@ Objects also have attributes, which you can fetch with Attrs: fmt.Printf("object %s has size %d and can be read using %s\n", objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) +Listing objects + +Listing objects in a bucket is done with the Bucket.Objects method: + + query := &storage.Query{Prefix: ""} + + var names []string + it := bkt.Objects(ctx, query) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatal(err) + } + names = append(names, attrs.Name) + } + +If only a subset of object attributes is needed when listing, specifying this +subset using Query.SetAttrSelection may speed up the listing process: + + query := &storage.Query{Prefix: ""} + query.SetAttrSelection([]string{"Name"}) + + // ... as before + ACLs Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of @@ -164,6 +191,21 @@ SignedURL for details. } fmt.Println(url) +Post Policy V4 Signed Request + +A type of signed request that allows uploads through HTML forms directly to Cloud Storage with +temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised +by a user. + +For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well +as the documentation of GenerateSignedPostPolicyV4. + + pv4, err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields) + Errors Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). diff --git a/vendor/cloud.google.com/go/storage/go.mod b/vendor/cloud.google.com/go/storage/go.mod new file mode 100644 index 000000000..2eb6df3cb --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go.mod @@ -0,0 +1,18 @@ +module cloud.google.com/go/storage + +go 1.11 + +require ( + cloud.google.com/go v0.57.0 + cloud.google.com/go/bigquery v1.8.0 // indirect + github.com/golang/protobuf v1.4.2 + github.com/google/go-cmp v0.4.1 + github.com/googleapis/gax-go/v2 v2.0.5 + golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect + golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 // indirect + google.golang.org/api v0.28.0 + google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 + google.golang.org/grpc v1.29.1 +) diff --git a/vendor/cloud.google.com/go/storage/go.sum b/vendor/cloud.google.com/go/storage/go.sum new file mode 100644 index 000000000..5d3fca5f8 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go.sum @@ -0,0 +1,450 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0 h1:K2NyuHRuv15ku6eUpe0DQk5ZykPMnSOnvuVf6IHcjaE= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0 h1:a/O/bK/vWrYGOTFtH8di4rBxMZnmkjy+Y5LxpDwo+dA= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0 h1:9/vpR43S4aJaROxqQHQ3nH9lfyKKV0dC3vOmnw8ebQQ= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a h1:7Wlg8L54In96HTWOaI4sreLJ6qfyGuvSau5el3fK41Y= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5 h1:WQ8q63x+f/zpC8Ac1s9wLElVoHhm32p6tudrU72n1QA= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d h1:nc5K6ox/4lTFbMVSL9WRR81ixkcwXThoiF6yf+R9scA= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e h1:hq86ru83GdWTlfQFZGO4nZJTU4Bs2wfHl8oFHRaXsfc= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a h1:7YaEqUc1tUg0yDwvdX+3U5bwrBg7u3FFAZ5D8gUs4/c= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74 h1:KW20qMcLRWuIgjdCpHFJbVZA7zsDKtFXPNcm7/eI5ZA= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nSRS/JrZEig0gz2BY4VNrg= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d h1:7M9AXzLrJWWGdDYtBblPHBTnHtaN6KKQ98OYb35mLlY= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d h1:3K34ovZAOnVaUPxanr0j4ghTZTPTA0CnXvjCl+5lZqk= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5XLHpzXXqcCdZPP/ApQ5NY= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d h1:lzLdP95xJmMpwQ6LUHwrc5V7js93hTiY7gkznu0BgmY= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 h1:FD4wDsP+CQUqh2V12OBOt90pLHVToe58P++fUu3ggV4= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0 h1:GwFK8+l5/gdsOYKz5p6M4UK+QT8OvmHWZPJCnf+5DjA= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90 h1:7THRSvPuzF1bql5kyFzX0JM0vpGhwuhskgJrJsbZ80Y= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383 h1:Vo0fD5w0fUKriWlZLyrim2GXbumyN0D6euW79T9PgEE= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 h1:jiDSspVssiikoRPFHT6pYrL+CL6/yIc3b9AuHO/4xik= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940 h1:MRHtG0U6SnaUb+s+LhNE1qt1FQ1wlhqr5E4usBKC0uA= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790 h1:FGjyjrQGURdc98leD1P65IdQD9Zlr4McvRcqIlV6OSs= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc h1:TnonUr8u3himcMY0vSh23jFOXA+cnucl1gB6EQTReBI= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go index 206813f0c..c1273d59a 100644 --- a/vendor/cloud.google.com/go/storage/go110.go +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -16,7 +16,12 @@ package storage -import "google.golang.org/api/googleapi" +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) func shouldRetry(err error) bool { switch e := err.(type) { @@ -24,6 +29,17 @@ func shouldRetry(err error) bool { // Retry on 429 and 5xx, according to // https://cloud.google.com/storage/docs/exponential-backoff. return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry socket-level errors ECONNREFUSED and ENETUNREACH (from syscall). + // Unfortunately the error type is unexported, so we resort to string + // matching. + retriable := []string{"connection refused", "connection reset"} + for _, s := range retriable { + if strings.Contains(e.Error(), s) { + return true + } + } + return false case interface{ Temporary() bool }: return e.Temporary() default: diff --git a/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go new file mode 100644 index 000000000..7df7a1d71 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go_mod_tidy_hack.go @@ -0,0 +1,22 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the cloud.google.com/go import, won't actually become part of +// the resultant binary. +// +build modhack + +package storage + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 4a5c1b512..7d8185f37 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -25,6 +25,8 @@ import ( ) // HMACState is the state of the HMAC key. +// +// This type is EXPERIMENTAL and subject to change or removal without notice. type HMACState string const ( @@ -105,9 +107,21 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle { // Get invokes an RPC to retrieve the HMAC key referenced by the // HMACKeyHandle's accessID. // +// Options such as UserProjectForHMACKeys can be used to set the +// userProject to be billed against for operations. +// // This method is EXPERIMENTAL and subject to change or removal without notice. -func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) { +func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) { call := hkh.raw.Get(hkh.projectID, hkh.accessID) + + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + call = call.UserProject(desc.userProjectID) + } + setClientHeader(call.Header()) var metadata *raw.HmacKeyMetadata @@ -131,8 +145,15 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) { // After deletion, a key cannot be used to authenticate requests. // // This method is EXPERIMENTAL and subject to change or removal without notice. -func (hkh *HMACKeyHandle) Delete(ctx context.Context) error { +func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error { delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID) + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + delCall = delCall.UserProject(desc.userProjectID) + } setClientHeader(delCall.Header()) return runWithRetry(ctx, func() error { @@ -173,7 +194,7 @@ func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, er // CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey. // // This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string) (*HMACKey, error) { +func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) { if projectID == "" { return nil, errors.New("storage: expecting a non-blank projectID") } @@ -183,6 +204,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma svc := raw.NewProjectsHmacKeysService(c.raw) call := svc.Create(projectID, serviceAccountEmail) + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + call = call.UserProject(desc.userProjectID) + } + setClientHeader(call.Header()) var hkPb *raw.HmacKey @@ -212,7 +241,7 @@ type HMACKeyAttrsToUpdate struct { // Update mutates the HMACKey referred to by accessID. // // This method is EXPERIMENTAL and subject to change or removal without notice. -func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*HMACKey, error) { +func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) { if au.State != Active && au.State != Inactive { return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive) } @@ -221,6 +250,14 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*H Etag: au.Etag, State: string(au.State), }) + + desc := new(hmacKeyDesc) + for _, opt := range opts { + opt.withHMACKeyDesc(desc) + } + if desc.userProjectID != "" { + call = call.UserProject(desc.userProjectID) + } setClientHeader(call.Header()) var metadata *raw.HmacKeyMetadata @@ -241,6 +278,8 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*H // An HMACKeysIterator is an iterator over HMACKeys. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This type is EXPERIMENTAL and subject to change or removal without notice. type HMACKeysIterator struct { ctx context.Context @@ -250,18 +289,25 @@ type HMACKeysIterator struct { pageInfo *iterator.PageInfo nextFunc func() error index int + desc hmacKeyDesc } // ListHMACKeys returns an iterator for listing HMACKeys. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This method is EXPERIMENTAL and subject to change or removal without notice. -func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIterator { +func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator { it := &HMACKeysIterator{ ctx: ctx, raw: raw.NewProjectsHmacKeysService(c.raw), projectID: projectID, } + for _, opt := range opts { + opt.withHMACKeyDesc(&it.desc) + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( it.fetch, func() int { return len(it.hmacKeys) - it.index }, @@ -278,6 +324,8 @@ func (c *Client) ListHMACKeys(ctx context.Context, projectID string) *HMACKeysIt // there are no more results. Once Next returns iterator.Done, all subsequent // calls will return iterator.Done. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) Next() (*HMACKey, error) { if err := it.nextFunc(); err != nil { @@ -292,16 +340,26 @@ func (it *HMACKeysIterator) Next() (*HMACKey, error) { // PageInfo supports pagination. See the google.golang.org/api/iterator package for details. // +// Note: This iterator is not safe for concurrent operations without explicit synchronization. +// // This method is EXPERIMENTAL and subject to change or removal without notice. func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) { call := it.raw.List(it.projectID) setClientHeader(call.Header()) - call = call.PageToken(pageToken) - // By default we'll also show deleted keys and then - // let users filter on their own. - call = call.ShowDeletedKeys(true) + if pageToken != "" { + call = call.PageToken(pageToken) + } + if it.desc.showDeletedKeys { + call = call.ShowDeletedKeys(true) + } + if it.desc.userProjectID != "" { + call = call.UserProject(it.desc.userProjectID) + } + if it.desc.forServiceAccountEmail != "" { + call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail) + } if pageSize > 0 { call = call.MaxResults(int64(pageSize)) } @@ -328,3 +386,56 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, } return resp.NextPageToken, nil } + +type hmacKeyDesc struct { + forServiceAccountEmail string + showDeletedKeys bool + userProjectID string +} + +// HMACKeyOption configures the behavior of HMACKey related methods and actions. +// +// This interface is EXPERIMENTAL and subject to change or removal without notice. +type HMACKeyOption interface { + withHMACKeyDesc(*hmacKeyDesc) +} + +type hmacKeyDescFunc func(*hmacKeyDesc) + +func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) { + hkdf(hkd) +} + +// ForHMACKeyServiceAccountEmail returns HMAC Keys that are +// associated with the email address of a service account in the project. +// +// Only one service account email can be used as a filter, so if multiple +// of these options are applied, the last email to be set will be used. +// +// This option is EXPERIMENTAL and subject to change or removal without notice. +func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.forServiceAccountEmail = serviceAccountEmail + }) +} + +// ShowDeletedHMACKeys will also list keys whose state is "DELETED". +// +// This option is EXPERIMENTAL and subject to change or removal without notice. +func ShowDeletedHMACKeys() HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.showDeletedKeys = true + }) +} + +// UserProjectForHMACKeys will bill the request against userProjectID +// if userProjectID is non-empty. +// +// Note: This is a noop right now and only provided for API compatibility. +// +// This option is EXPERIMENTAL and subject to change or removal without notice. +func UserProjectForHMACKeys(userProjectID string) HMACKeyOption { + return hmacKeyDescFunc(func(hkd *hmacKeyDesc) { + hkd.userProjectID = userProjectID + }) +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go index 9d9360671..5caefb059 100644 --- a/vendor/cloud.google.com/go/storage/iam.go +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -21,6 +21,7 @@ import ( "cloud.google.com/go/internal/trace" raw "google.golang.org/api/storage/v1" iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/genproto/googleapis/type/expr" ) // IAM provides access to IAM access control for the bucket. @@ -38,10 +39,14 @@ type iamClient struct { } func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + return c.GetWithVersion(ctx, resource, 1) +} + +func (c *iamClient) GetWithVersion(ctx context.Context, resource string, requestedPolicyVersion int32) (p *iampb.Policy, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") defer func() { trace.EndSpan(ctx, err) }() - call := c.raw.Buckets.GetIamPolicy(resource) + call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion)) setClientHeader(call.Header()) if c.userProject != "" { call.UserProject(c.userProject) @@ -97,6 +102,7 @@ func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { return &raw.Policy{ Bindings: iamToStorageBindings(ip.Bindings), Etag: string(ip.Etag), + Version: int64(ip.Version), } } @@ -104,13 +110,26 @@ func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { var rbs []*raw.PolicyBindings for _, ib := range ibs { rbs = append(rbs, &raw.PolicyBindings{ - Role: ib.Role, - Members: ib.Members, + Role: ib.Role, + Members: ib.Members, + Condition: iamToStorageCondition(ib.Condition), }) } return rbs } +func iamToStorageCondition(exprpb *expr.Expr) *raw.Expr { + if exprpb == nil { + return nil + } + return &raw.Expr{ + Expression: exprpb.Expression, + Description: exprpb.Description, + Location: exprpb.Location, + Title: exprpb.Title, + } +} + func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { return &iampb.Policy{ Bindings: iamFromStorageBindings(rp.Bindings), @@ -122,9 +141,22 @@ func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { var ibs []*iampb.Binding for _, rb := range rbs { ibs = append(ibs, &iampb.Binding{ - Role: rb.Role, - Members: rb.Members, + Role: rb.Role, + Members: rb.Members, + Condition: iamFromStorageCondition(rb.Condition), }) } return ibs } + +func iamFromStorageCondition(rawexpr *raw.Expr) *expr.Expr { + if rawexpr == nil { + return nil + } + return &expr.Expr{ + Expression: rawexpr.Expression, + Description: rawexpr.Description, + Location: rawexpr.Location, + Title: rawexpr.Title, + } +} diff --git a/vendor/cloud.google.com/go/storage/post_policy_v4.go b/vendor/cloud.google.com/go/storage/post_policy_v4.go new file mode 100644 index 000000000..b9df7db95 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/post_policy_v4.go @@ -0,0 +1,377 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// PostPolicyV4Options are used to construct a signed post policy. +// Please see https://cloud.google.com/storage/docs/xml-api/post-object +// for reference about the fields. +type PostPolicyV4Options struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func(hashBytes []byte) (signature []byte, err error) + + // Expires is the expiration time on the signed URL. + // It must be a time in the future. + // Required. + Expires time.Time + + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Optional. + Style URLStyle + + // Insecure when set indicates that the generated URL's scheme + // will use "http" instead of "https" (default). + // Optional. + Insecure bool + + // Fields specifies the attributes of a PostPolicyV4 request. + // When Fields is non-nil, its attributes must match those that will + // passed into field Conditions. + // Optional. + Fields *PolicyV4Fields + + // The conditions that the uploaded file will be expected to conform to. + // When used, the failure of an upload to satisfy a condition will result in + // a 4XX status code, back with the message describing the problem. + // Optional. + Conditions []PostPolicyV4Condition +} + +// PolicyV4Fields describes the attributes for a PostPolicyV4 request. +type PolicyV4Fields struct { + // ACL specifies the access control permissions for the object. + // Optional. + ACL string + // CacheControl specifies the caching directives for the object. + // Optional. + CacheControl string + // ContentType specifies the media type of the object. + // Optional. + ContentType string + // ContentDisposition specifies how the file will be served back to requesters. + // Optional. + ContentDisposition string + // ContentEncoding specifies the decompressive transcoding that the object. + // This field is complementary to ContentType in that the file could be + // compressed but ContentType specifies the file's original media type. + // Optional. + ContentEncoding string + // Metadata specifies custom metadata for the object. + // If any key doesn't begin with "x-goog-meta-", an error will be returned. + // Optional. + Metadata map[string]string + // StatusCodeOnSuccess when set, specifies the status code that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + StatusCodeOnSuccess int + // RedirectToURLOnSuccess when set, specifies the URL that Cloud Storage + // will serve back on successful upload of the object. + // Optional. + RedirectToURLOnSuccess string +} + +// PostPolicyV4 describes the URL and respective form fields for a generated PostPolicyV4 request. +type PostPolicyV4 struct { + // URL is the generated URL that the file upload will be made to. + URL string + // Fields specifies the generated key-values that the file uploader + // must include in their multipart upload form. + Fields map[string]string +} + +// PostPolicyV4Condition describes the constraints that the subsequent +// object upload's multipart form fields will be expected to conform to. +type PostPolicyV4Condition interface { + isEmpty() bool + json.Marshaler +} + +type startsWith struct { + key, value string +} + +func (sw *startsWith) MarshalJSON() ([]byte, error) { + return json.Marshal([]string{"starts-with", sw.key, sw.value}) +} +func (sw *startsWith) isEmpty() bool { + return sw.value == "" +} + +// ConditionStartsWith checks that an attributes starts with value. +// An empty value will cause this condition to be ignored. +func ConditionStartsWith(key, value string) PostPolicyV4Condition { + return &startsWith{key, value} +} + +type contentLengthRangeCondition struct { + start, end uint64 +} + +func (clr *contentLengthRangeCondition) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{"content-length-range", clr.start, clr.end}) +} +func (clr *contentLengthRangeCondition) isEmpty() bool { + return clr.start == 0 && clr.end == 0 +} + +type singleValueCondition struct { + name, value string +} + +func (svc *singleValueCondition) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{svc.name: svc.value}) +} +func (svc *singleValueCondition) isEmpty() bool { + return svc.value == "" +} + +// ConditionContentLengthRange constraints the limits that the +// multipart upload's range header will be expected to be within. +func ConditionContentLengthRange(start, end uint64) PostPolicyV4Condition { + return &contentLengthRangeCondition{start, end} +} + +func conditionRedirectToURLOnSuccess(redirectURL string) PostPolicyV4Condition { + return &singleValueCondition{"success_action_redirect", redirectURL} +} + +func conditionStatusCodeOnSuccess(statusCode int) PostPolicyV4Condition { + svc := &singleValueCondition{name: "success_action_status"} + if statusCode > 0 { + svc.value = fmt.Sprintf("%d", statusCode) + } + return svc +} + +// GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts. +// The generated URL and fields will then allow an unauthenticated client to perform multipart uploads. +func GenerateSignedPostPolicyV4(bucket, object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) { + if bucket == "" { + return nil, errors.New("storage: bucket must be non-empty") + } + if object == "" { + return nil, errors.New("storage: object must be non-empty") + } + now := utcNow() + if err := validatePostPolicyV4Options(opts, now); err != nil { + return nil, err + } + + var signingFn func(hashedBytes []byte) ([]byte, error) + switch { + case opts.SignBytes != nil: + signingFn = opts.SignBytes + + case len(opts.PrivateKey) != 0: + parsedRSAPrivKey, err := parseKey(opts.PrivateKey) + if err != nil { + return nil, err + } + signingFn = func(hashedBytes []byte) ([]byte, error) { + return rsa.SignPKCS1v15(rand.Reader, parsedRSAPrivKey, crypto.SHA256, hashedBytes) + } + + default: + return nil, errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + + var descFields PolicyV4Fields + if opts.Fields != nil { + descFields = *opts.Fields + } + + if err := validateMetadata(descFields.Metadata); err != nil { + return nil, err + } + + // Build the policy. + conds := make([]PostPolicyV4Condition, len(opts.Conditions)) + copy(conds, opts.Conditions) + conds = append(conds, + conditionRedirectToURLOnSuccess(descFields.RedirectToURLOnSuccess), + conditionStatusCodeOnSuccess(descFields.StatusCodeOnSuccess), + &singleValueCondition{"acl", descFields.ACL}, + &singleValueCondition{"cache-control", descFields.CacheControl}, + ) + + YYYYMMDD := now.Format(yearMonthDay) + policyFields := map[string]string{ + "key": object, + "x-goog-date": now.Format(iso8601), + "x-goog-credential": opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + "x-goog-algorithm": "GOOG4-RSA-SHA256", + "success_action_redirect": descFields.RedirectToURLOnSuccess, + "acl": descFields.ACL, + } + for key, value := range descFields.Metadata { + conds = append(conds, &singleValueCondition{key, value}) + policyFields[key] = value + } + + // Following from the order expected by the conformance test cases, + // hence manually inserting these fields in a specific order. + conds = append(conds, + &singleValueCondition{"bucket", bucket}, + &singleValueCondition{"key", object}, + &singleValueCondition{"x-goog-date", now.Format(iso8601)}, + &singleValueCondition{ + name: "x-goog-credential", + value: opts.GoogleAccessID + "/" + YYYYMMDD + "/auto/storage/goog4_request", + }, + &singleValueCondition{"x-goog-algorithm", "GOOG4-RSA-SHA256"}, + ) + + nonEmptyConds := make([]PostPolicyV4Condition, 0, len(opts.Conditions)) + for _, cond := range conds { + if cond == nil || !cond.isEmpty() { + nonEmptyConds = append(nonEmptyConds, cond) + } + } + condsAsJSON, err := json.Marshal(map[string]interface{}{ + "conditions": nonEmptyConds, + "expiration": opts.Expires.Format(time.RFC3339), + }) + if err != nil { + return nil, fmt.Errorf("storage: PostPolicyV4 JSON serialization failed: %v", err) + } + + b64Policy := base64.StdEncoding.EncodeToString(condsAsJSON) + shaSum := sha256.Sum256([]byte(b64Policy)) + signature, err := signingFn(shaSum[:]) + if err != nil { + return nil, err + } + + policyFields["policy"] = b64Policy + policyFields["x-goog-signature"] = fmt.Sprintf("%x", signature) + + // Construct the URL. + scheme := "https" + if opts.Insecure { + scheme = "http" + } + path := opts.Style.path(bucket, "") + "/" + u := &url.URL{ + Path: path, + RawPath: pathEncodeV4(path), + Host: opts.Style.host(bucket), + Scheme: scheme, + } + + if descFields.StatusCodeOnSuccess > 0 { + policyFields["success_action_status"] = fmt.Sprintf("%d", descFields.StatusCodeOnSuccess) + } + + // Clear out fields with blanks values. + for key, value := range policyFields { + if value == "" { + delete(policyFields, key) + } + } + pp4 := &PostPolicyV4{ + Fields: policyFields, + URL: u.String(), + } + return pp4, nil +} + +// validatePostPolicyV4Options checks that: +// * GoogleAccessID is set +// * either but not both PrivateKey and SignBytes are set or nil, but not both +// * Expires, the deadline is not in the past +// * if Style is not set, it'll use PathStyle +func validatePostPolicyV4Options(opts *PostPolicyV4Options, now time.Time) error { + if opts == nil || opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if privBlank, signBlank := len(opts.PrivateKey) == 0, opts.SignBytes == nil; privBlank == signBlank { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Expires.Before(now) { + return errors.New("storage: expecting Expires to be in the future") + } + if opts.Style == nil { + opts.Style = PathStyle() + } + return nil +} + +// validateMetadata ensures that all keys passed in have a prefix of "x-goog-meta-", +// otherwise it will return an error. +func validateMetadata(hdrs map[string]string) (err error) { + if len(hdrs) == 0 { + return nil + } + + badKeys := make([]string, 0, len(hdrs)) + for key := range hdrs { + if !strings.HasPrefix(key, "x-goog-meta-") { + badKeys = append(badKeys, key) + } + } + if len(badKeys) != 0 { + err = errors.New("storage: expected metadata to begin with x-goog-meta-, got " + strings.Join(badKeys, ", ")) + } + return +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 5c83651bd..d64f5ec77 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -86,6 +86,11 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { // until the end. If offset is negative, the object is read abs(offset) bytes // from the end, and length must also be negative to indicate all remaining // bytes will be read. +// +// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies +// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding +// that file will be served back whole, regardless of the requested range as +// Google Cloud Storage dictates. func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") defer func() { trace.EndSpan(ctx, err) }() @@ -160,10 +165,25 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) Body: string(body), } } - if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + + partialContentNotSatisfied := + !decompressiveTranscoding(res) && + start > 0 && length != 0 && + res.StatusCode != http.StatusPartialContent + + if partialContentNotSatisfied { res.Body.Close() return errors.New("storage: partial request not satisfied") } + + // With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves + // back the whole file regardless of the range count passed in as per: + // https://cloud.google.com/storage/docs/transcoding#range, + // thus we have to manually move the body forward by seen bytes. + if decompressiveTranscoding(res) && seen > 0 { + _, _ = io.CopyN(ioutil.Discard, res.Body, seen) + } + // If a generation hasn't been specified, and this is the first response we get, let's record the // generation. In future requests we'll use this generation as a precondition to avoid data races. if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { @@ -232,7 +252,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) body = emptyBody } var metaGen int64 - if res.Header.Get("X-Goog-Generation") != "" { + if res.Header.Get("X-Goog-Metageneration") != "" { metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) if err != nil { return nil, err @@ -268,6 +288,18 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) }, nil } +// decompressiveTranscoding returns true if the request was served decompressed +// and different than its original storage form. This happens when the "Content-Encoding" +// header is "gzip". +// See: +// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip +// * https://github.com/googleapis/google-cloud-go/issues/1800 +func decompressiveTranscoding(res *http.Response) bool { + // Decompressive Transcoding. + return res.Header.Get("Content-Encoding") == "gzip" || + res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" +} + func uncompressedByServer(res *http.Response) bool { // If the data is stored as gzip but is not encoded as gzip, then it // was uncompressed by the server. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index d35bd7568..20d9518a4 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -47,14 +47,19 @@ import ( htransport "google.golang.org/api/transport/http" ) +// Methods which can be used in signed URLs. +var signedURLMethods = map[string]bool{"DELETE": true, "GET": true, "HEAD": true, "POST": true, "PUT": true} + var ( // ErrBucketNotExist indicates that the bucket does not exist. ErrBucketNotExist = errors.New("storage: bucket doesn't exist") // ErrObjectNotExist indicates that the object does not exist. ErrObjectNotExist = errors.New("storage: object doesn't exist") + // errMethodNotValid indicates that given HTTP method is not valid. + errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys()) ) -const userAgent = "gcloud-golang-storage/20151204" +var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo) const ( // ScopeFullControl grants permissions to manage your @@ -94,30 +99,44 @@ type Client struct { // NewClient creates a new Google Cloud Storage client. // The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - o := []option.ClientOption{ - option.WithScopes(ScopeFullControl), - option.WithUserAgent(userAgent), + var host, readHost, scheme string + + if host = os.Getenv("STORAGE_EMULATOR_HOST"); host == "" { + scheme = "https" + readHost = "storage.googleapis.com" + + // Prepend default options to avoid overriding options passed by the user. + opts = append([]option.ClientOption{option.WithScopes(ScopeFullControl), option.WithUserAgent(userAgent)}, opts...) + } else { + scheme = "http" + readHost = host + + opts = append([]option.ClientOption{option.WithoutAuthentication()}, opts...) } - opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } - rawService, err := raw.New(hc) + rawService, err := raw.NewService(ctx, option.WithHTTPClient(hc)) if err != nil { return nil, fmt.Errorf("storage client: %v", err) } - if ep != "" { - rawService.BasePath = ep - } - scheme := "https" - var host, readHost string - if host = os.Getenv("STORAGE_EMULATOR_HOST"); host != "" { - scheme = "http" - readHost = host + if ep == "" { + // Override the default value for BasePath from the raw client. + // TODO: remove when the raw client uses this endpoint as its default (~end of 2020) + rawService.BasePath = "https://storage.googleapis.com/storage/v1/" } else { - readHost = "storage.googleapis.com" + // If the endpoint has been set explicitly, use this for the BasePath + // as well as readHost + rawService.BasePath = ep + u, err := url.Parse(ep) + if err != nil { + return nil, fmt.Errorf("supplied endpoint %v is not valid: %v", ep, err) + } + readHost = u.Host } + return &Client{ hc: hc, raw: rawService, @@ -151,6 +170,80 @@ const ( SigningSchemeV4 ) +// URLStyle determines the style to use for the signed URL. pathStyle is the +// default. All non-default options work with V4 scheme only. See +// https://cloud.google.com/storage/docs/request-endpoints for details. +type URLStyle interface { + // host should return the host portion of the signed URL, not including + // the scheme (e.g. storage.googleapis.com). + host(bucket string) string + + // path should return the path portion of the signed URL, which may include + // both the bucket and object name or only the object name depending on the + // style. + path(bucket, object string) string +} + +type pathStyle struct{} + +type virtualHostedStyle struct{} + +type bucketBoundHostname struct { + hostname string +} + +func (s pathStyle) host(bucket string) string { + return "storage.googleapis.com" +} + +func (s virtualHostedStyle) host(bucket string) string { + return bucket + ".storage.googleapis.com" +} + +func (s bucketBoundHostname) host(bucket string) string { + return s.hostname +} + +func (s pathStyle) path(bucket, object string) string { + p := bucket + if object != "" { + p += "/" + object + } + return p +} + +func (s virtualHostedStyle) path(bucket, object string) string { + return object +} + +func (s bucketBoundHostname) path(bucket, object string) string { + return object +} + +// PathStyle is the default style, and will generate a URL of the form +// "storage.googleapis.com//". +func PathStyle() URLStyle { + return pathStyle{} +} + +// VirtualHostedStyle generates a URL relative to the bucket's virtual +// hostname, e.g. ".storage.googleapis.com/". +func VirtualHostedStyle() URLStyle { + return virtualHostedStyle{} +} + +// BucketBoundHostname generates a URL with a custom hostname tied to a +// specific GCS bucket. The desired hostname should be passed in using the +// hostname argument. Generated urls will be of the form +// "/". See +// https://cloud.google.com/storage/docs/request-endpoints#cname and +// https://cloud.google.com/load-balancing/docs/https/adding-backend-buckets-to-load-balancers +// for details. Note that for CNAMEs, only HTTP is supported, so Insecure must +// be set to true. +func BucketBoundHostname(hostname string) URLStyle { + return bucketBoundHostname{hostname: hostname} +} + // SignedURLOptions allows you to restrict the access to the signed URL. type SignedURLOptions struct { // GoogleAccessID represents the authorizer of the signed URL generation. @@ -207,16 +300,37 @@ type SignedURLOptions struct { ContentType string // Headers is a list of extension headers the client must provide - // in order to use the generated signed URL. + // in order to use the generated signed URL. Each must be a string of the + // form "key:values", with multiple values separated by a semicolon. // Optional. Headers []string + // QueryParameters is a map of additional query parameters. When + // SigningScheme is V4, this is used in computing the signature, and the + // client must use the same query parameters when using the generated signed + // URL. + // Optional. + QueryParameters url.Values + // MD5 is the base64 encoded MD5 checksum of the file. // If provided, the client should provide the exact value on the request // header in order to use the signed URL. // Optional. MD5 string + // Style provides options for the type of URL to use. Options are + // PathStyle (default), BucketBoundHostname, and VirtualHostedStyle. See + // https://cloud.google.com/storage/docs/request-endpoints for details. + // Only supported for V4 signing. + // Optional. + Style URLStyle + + // Insecure determines whether the signed URL should use HTTPS (default) or + // HTTP. + // Only supported for V4 signing. + // Optional. + Insecure bool + // Scheme determines the version of URL signing to use. Default is // SigningSchemeV2. Scheme SigningScheme @@ -368,8 +482,9 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error { if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") } - if opts.Method == "" { - return errors.New("storage: missing required method option") + opts.Method = strings.ToUpper(opts.Method) + if _, ok := signedURLMethods[opts.Method]; !ok { + return errMethodNotValid } if opts.Expires.IsZero() { return errors.New("storage: missing required expires option") @@ -380,6 +495,12 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error { return errors.New("storage: invalid MD5 checksum") } } + if opts.Style == nil { + opts.Style = PathStyle() + } + if _, ok := opts.Style.(pathStyle); !ok && opts.Scheme == SigningSchemeV2 { + return errors.New("storage: only path-style URLs are permitted with SigningSchemeV2") + } if opts.Scheme == SigningSchemeV4 { cutoff := now.Add(604801 * time.Second) // 7 days + 1 second if !opts.Expires.Before(cutoff) { @@ -411,19 +532,33 @@ func extractHeaderNames(kvs []string) []string { return res } +// pathEncodeV4 creates an encoded string that matches the v4 signature spec. +// Following the spec precisely is necessary in order to ensure that the URL +// and signing string are correctly formed, and Go's url.PathEncode and +// url.QueryEncode don't generate an exact match without some additional logic. +func pathEncodeV4(path string) string { + segments := strings.Split(path, "/") + var encodedSegments []string + for _, s := range segments { + encodedSegments = append(encodedSegments, url.QueryEscape(s)) + } + encodedStr := strings.Join(encodedSegments, "/") + encodedStr = strings.Replace(encodedStr, "+", "%20", -1) + return encodedStr +} + // signedURLV4 creates a signed URL using the sigV4 algorithm. func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { buf := &bytes.Buffer{} fmt.Fprintf(buf, "%s\n", opts.Method) - u := &url.URL{Path: bucket} - if name != "" { - u.Path += "/" + name - } + + u := &url.URL{Path: opts.Style.path(bucket, name)} + u.RawPath = pathEncodeV4(u.Path) // Note: we have to add a / here because GCS does so auto-magically, despite - // Go's EscapedPath not doing so (and we have to exactly match their + // our encoding not doing so (and we have to exactly match their // canonical query). - fmt.Fprintf(buf, "/%s\n", u.EscapedPath()) + fmt.Fprintf(buf, "/%s\n", u.RawPath) headerNames := append(extractHeaderNames(opts.Headers), "host") if opts.ContentType != "" { @@ -443,23 +578,55 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, "X-Goog-SignedHeaders": {signedHeaders}, } + // Add user-supplied query parameters to the canonical query string. For V4, + // it's necessary to include these. + for k, v := range opts.QueryParameters { + canonicalQueryString[k] = append(canonicalQueryString[k], v...) + } + fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) - u.Host = "storage.googleapis.com" + // Fill in the hostname based on the desired URL style. + u.Host = opts.Style.host(bucket) + + // Fill in the URL scheme. + if opts.Insecure { + u.Scheme = "http" + } else { + u.Scheme = "https" + } var headersWithValue []string headersWithValue = append(headersWithValue, "host:"+u.Host) headersWithValue = append(headersWithValue, opts.Headers...) if opts.ContentType != "" { - headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType)) + headersWithValue = append(headersWithValue, "content-type:"+opts.ContentType) } if opts.MD5 != "" { - headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5)) + headersWithValue = append(headersWithValue, "content-md5:"+opts.MD5) + } + // Trim extra whitespace from headers and replace with a single space. + var trimmedHeaders []string + for _, h := range headersWithValue { + trimmedHeaders = append(trimmedHeaders, strings.Join(strings.Fields(h), " ")) } - canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n") + canonicalHeaders := strings.Join(sortHeadersByKey(trimmedHeaders), "\n") fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) fmt.Fprintf(buf, "%s\n", signedHeaders) - fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + + // If the user provides a value for X-Goog-Content-SHA256, we must use + // that value in the request string. If not, we use UNSIGNED-PAYLOAD. + sha256Header := false + for _, h := range trimmedHeaders { + if strings.HasPrefix(strings.ToLower(h), "x-goog-content-sha256") && strings.Contains(h, ":") { + sha256Header = true + fmt.Fprintf(buf, "%s", strings.SplitN(h, ":", 2)[1]) + break + } + } + if !sha256Header { + fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + } sum := sha256.Sum256(buf.Bytes()) hexDigest := hex.EncodeToString(sum[:]) @@ -491,7 +658,6 @@ func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (st } signature := hex.EncodeToString(b) canonicalQueryString.Set("X-Goog-Signature", string(signature)) - u.Scheme = "https" u.RawQuery = canonicalQueryString.Encode() return u.String(), nil } @@ -964,11 +1130,11 @@ type ObjectAttrs struct { // data is rejected if its MD5 hash does not match this field. MD5 []byte - // CRC32C is the CRC32 checksum of the object's content using - // the Castagnoli93 polynomial. This field is read-only, except when - // used from a Writer. If set on a Writer and Writer.SendCRC32C - // is true, the uploaded data is rejected if its CRC32c hash does not - // match this field. + // CRC32C is the CRC32 checksum of the object's content using the Castagnoli93 + // polynomial. This field is read-only, except when used from a Writer or + // Composer. In those cases, if the SendCRC32C field in the Writer or Composer + // is set to is true, the uploaded data is rejected if its CRC32C hash does + // not match this field. CRC32C uint32 // MediaLink is an URL to the object's content. This field is read-only. @@ -989,13 +1155,12 @@ type ObjectAttrs struct { // of a particular object. This field is read-only. Metageneration int64 - // StorageClass is the storage class of the object. - // This value defines how objects in the bucket are stored and - // determines the SLA and the cost of storage. Typical values are - // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" - // and "DURABLE_REDUCED_AVAILABILITY". - // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" - // or "REGIONAL" depending on the bucket's location settings. + // StorageClass is the storage class of the object. This defines + // how objects are stored and determines the SLA and the cost of storage. + // Typical values are "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE". + // Defaults to "STANDARD". + // See https://cloud.google.com/storage/docs/storage-classes for all + // valid values. StorageClass string // Created is the time the object was created. This field is read-only. @@ -1127,6 +1292,78 @@ type Query struct { // Versions indicates whether multiple versions of the same // object will be included in the results. Versions bool + + // fieldSelection is used to select only specific fields to be returned by + // the query. It's used internally and is populated for the user by + // calling Query.SetAttrSelection + fieldSelection string +} + +// attrToFieldMap maps the field names of ObjectAttrs to the underlying field +// names in the API call. Only the ObjectAttrs field names are visible to users +// because they are already part of the public API of the package. +var attrToFieldMap = map[string]string{ + "Bucket": "bucket", + "Name": "name", + "ContentType": "contentType", + "ContentLanguage": "contentLanguage", + "CacheControl": "cacheControl", + "EventBasedHold": "eventBasedHold", + "TemporaryHold": "temporaryHold", + "RetentionExpirationTime": "retentionExpirationTime", + "ACL": "acl", + "Owner": "owner", + "ContentEncoding": "contentEncoding", + "ContentDisposition": "contentDisposition", + "Size": "size", + "MD5": "md5Hash", + "CRC32C": "crc32c", + "MediaLink": "mediaLink", + "Metadata": "metadata", + "Generation": "generation", + "Metageneration": "metageneration", + "StorageClass": "storageClass", + "CustomerKeySHA256": "customerEncryption", + "KMSKeyName": "kmsKeyName", + "Created": "timeCreated", + "Deleted": "timeDeleted", + "Updated": "updated", + "Etag": "etag", +} + +// SetAttrSelection makes the query populate only specific attributes of +// objects. When iterating over objects, if you only need each object's name +// and size, pass []string{"Name", "Size"} to this method. Only these fields +// will be fetched for each object across the network; the other fields of +// ObjectAttr will remain at their default values. This is a performance +// optimization; for more information, see +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance +func (q *Query) SetAttrSelection(attrs []string) error { + fieldSet := make(map[string]bool) + + for _, attr := range attrs { + field, ok := attrToFieldMap[attr] + if !ok { + return fmt.Errorf("storage: attr %v is not valid", attr) + } + fieldSet[field] = true + } + + if len(fieldSet) > 0 { + var b bytes.Buffer + b.WriteString("items(") + first := true + for field := range fieldSet { + if !first { + b.WriteString(",") + } + first = false + b.WriteString(field) + } + b.WriteString(")") + q.fieldSelection = b.String() + } + return nil } // Conditions constrain methods to act on specific generations of diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index a11659212..1843a8141 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -45,12 +45,20 @@ type Writer struct { // Writer will attempt to send to the server in a single request. Objects // smaller than the size will be sent in a single request, while larger // objects will be split over multiple requests. The size will be rounded up - // to the nearest multiple of 256K. If zero, chunking will be disabled and - // the object will be uploaded in a single request. + // to the nearest multiple of 256K. // - // ChunkSize will default to a reasonable value. If you perform many concurrent - // writes of small objects, you may wish set ChunkSize to a value that matches - // your objects' sizes to avoid consuming large amounts of memory. + // ChunkSize will default to a reasonable value. If you perform many + // concurrent writes of small objects (under ~8MB), you may wish set ChunkSize + // to a value that matches your objects' sizes to avoid consuming large + // amounts of memory. See + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/upload#size + // for more information about performance trade-offs related to ChunkSize. + // + // If ChunkSize is set to zero, chunking will be disabled and the object will + // be uploaded in a single request without the use of a buffer. This will + // further reduce memory used during uploads, but will also prevent the writer + // from retrying in case of a transient error from the server, since a buffer + // is required in order to retry the failed request. // // ChunkSize must be set before the first Write call. ChunkSize int @@ -123,7 +131,8 @@ func (w *Writer) open() error { call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). Media(pr, mediaOpts...). Projection("full"). - Context(w.ctx) + Context(w.ctx). + Name(w.o.object) if w.ProgressFunc != nil { call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) @@ -149,14 +158,10 @@ func (w *Writer) open() error { } setClientHeader(call.Header()) - // The internals that perform call.Do automatically retry - // uploading chunks, hence no need to add retries here. - // See issue https://github.com/googleapis/google-cloud-go/issues/1507. - // - // However, since this whole call's internals involve making the initial - // resumable upload session, the first HTTP request is not retried. - // TODO: Follow-up with google.golang.org/gensupport to solve - // https://github.com/googleapis/google-api-go-client/issues/392. + // The internals that perform call.Do automatically retry both the initial + // call to set up the upload as well as calls to upload individual chunks + // for a resumable upload (as long as the chunk size is non-zero). Hence + // there is no need to add retries here. resp, err = call.Do() } if err != nil { @@ -177,6 +182,9 @@ func (w *Writer) open() error { // error even though the write failed (or will fail). Always // use the error returned from Writer.Close to determine if // the upload was successful. +// +// Writes will be retried on transient errors from the server, unless +// Writer.ChunkSize has been set to zero. func (w *Writer) Write(p []byte) (n int, err error) { w.mu.Lock() werr := w.err diff --git a/vendor/cloud.google.com/go/tools.go b/vendor/cloud.google.com/go/tools.go new file mode 100644 index 000000000..da5ca585d --- /dev/null +++ b/vendor/cloud.google.com/go/tools.go @@ -0,0 +1,31 @@ +// +build tools + +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This package exists to cause `go mod` and `go get` to believe these tools +// are dependencies, even though they are not runtime dependencies of any +// package (these are tools used by our CI builds). This means they will appear +// in our `go.mod` file, but will not be a part of the build. Also, since the +// build target is something non-existent, these should not be included in any +// binaries. + +package cloud + +import ( + _ "github.com/golang/protobuf/protoc-gen-go" + _ "github.com/jstemmer/go-junit-report" + _ "golang.org/x/lint/golint" + _ "golang.org/x/tools/cmd/goimports" +) diff --git a/vendor/github.com/agl/ed25519/ed25519.go b/vendor/github.com/agl/ed25519/ed25519.go deleted file mode 100644 index e5f873f52..000000000 --- a/vendor/github.com/agl/ed25519/ed25519.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// http://ed25519.cr.yp.to/. -package ed25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -import ( - "crypto/sha512" - "crypto/subtle" - "io" - - "github.com/agl/ed25519/edwards25519" -) - -const ( - PublicKeySize = 32 - PrivateKeySize = 64 - SignatureSize = 64 -) - -// GenerateKey generates a public/private key pair using randomness from rand. -func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) { - privateKey = new([64]byte) - publicKey = new([32]byte) - _, err = io.ReadFull(rand, privateKey[:32]) - if err != nil { - return nil, nil, err - } - - h := sha512.New() - h.Write(privateKey[:32]) - digest := h.Sum(nil) - - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest) - edwards25519.GeScalarMultBase(&A, &hBytes) - A.ToBytes(publicKey) - - copy(privateKey[32:], publicKey[:]) - return -} - -// Sign signs the message with privateKey and returns a signature. -func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte { - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := new([64]byte) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - return signature -} - -// Verify returns true iff sig is a valid signature of message by publicKey. -func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool { - if sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - if !A.FromBytes(publicKey) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) - - var checkR [32]byte - R.ToBytes(&checkR) - return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 -} diff --git a/vendor/github.com/agl/ed25519/edwards25519/const.go b/vendor/github.com/agl/ed25519/edwards25519/const.go deleted file mode 100644 index ea5b77a71..000000000 --- a/vendor/github.com/agl/ed25519/edwards25519/const.go +++ /dev/null @@ -1,1411 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/github.com/agl/ed25519/edwards25519/edwards25519.go b/vendor/github.com/agl/ed25519/edwards25519/edwards25519.go deleted file mode 100644 index 907981855..000000000 --- a/vendor/github.com/agl/ed25519/edwards25519/edwards25519.go +++ /dev/null @@ -1,1773 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package edwards25519 implements operations in GF(2**255-19) and on an -// Edwards curve that is isomorphic to curve25519. See -// http://ed25519.cr.yp.to/. -package edwards25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go index 2e99df517..012bc690a 100644 --- a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -1,4936 +1,4937 @@ + // line 1 "grapheme_clusters.rl" package textseg import ( - "errors" - "unicode/utf8" + "errors" + "unicode/utf8" ) // Generated from grapheme_clusters.rl. DO NOT EDIT // line 13 "grapheme_clusters.go" var _graphclust_actions []byte = []byte{ - 0, 1, 0, 1, 4, 1, 9, 1, 10, - 1, 11, 1, 12, 1, 13, 1, 14, - 1, 15, 1, 16, 1, 17, 1, 18, - 1, 19, 1, 20, 1, 21, 2, 1, - 7, 2, 1, 8, 2, 2, 3, 2, - 5, 1, 3, 0, 1, 8, 3, 5, - 0, 1, 3, 5, 1, 6, + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, } var _graphclust_key_offsets []int16 = []int16{ - 0, 0, 1, 3, 5, 7, 10, 15, - 17, 20, 28, 31, 33, 35, 37, 67, - 75, 77, 81, 84, 89, 94, 104, 116, - 122, 127, 137, 140, 147, 151, 159, 169, - 173, 181, 183, 191, 194, 196, 201, 203, - 210, 212, 220, 221, 242, 246, 252, 257, - 259, 263, 267, 269, 273, 275, 278, 282, - 284, 291, 293, 297, 301, 305, 307, 309, - 318, 322, 327, 329, 335, 337, 338, 340, - 341, 343, 345, 347, 349, 364, 368, 370, - 372, 377, 381, 385, 387, 389, 393, 397, - 399, 403, 410, 415, 419, 422, 423, 427, - 434, 439, 440, 441, 443, 452, 454, 477, - 481, 483, 487, 491, 492, 496, 500, 503, - 505, 510, 523, 525, 527, 529, 531, 535, - 539, 541, 543, 545, 549, 553, 557, 559, - 561, 563, 565, 566, 568, 574, 580, 586, - 588, 592, 596, 601, 604, 614, 616, 618, - 621, 623, 625, 627, 629, 632, 637, 639, - 642, 650, 653, 655, 657, 659, 690, 698, - 700, 704, 711, 723, 730, 744, 750, 768, - 779, 785, 797, 800, 809, 814, 824, 830, - 844, 850, 862, 874, 878, 880, 886, 888, - 895, 898, 906, 907, 928, 937, 945, 951, - 953, 957, 961, 966, 972, 974, 977, 990, - 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, - 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, - 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, - 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, - 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, - 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, - 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, - 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, - 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, - 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, - 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, - 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, - 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, - 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, - 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, - 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, - 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, - 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, - 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, - 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, - 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, - 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, - 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, - 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, - 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, - 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, - 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, - 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, - 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, - 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, - 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, - 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, - 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, - 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, - 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, - 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, - 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, - 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, - 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, - 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, - 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, - 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, - 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, - 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, - 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, - 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, - 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, - 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, - 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, - 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, - 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, - 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, - 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, - 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, - 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, - 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, - 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, - 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, - 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, - 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, - 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, - 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, - 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, - 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, - 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, - 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, - 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, - 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, - 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, - 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, - 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, - 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, - 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, - 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, - 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, - 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, - 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, - 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, - 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, - 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, - 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, - 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, - 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, - 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, - 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, - 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, - 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, - 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, - 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, - 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, - 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, - 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, - 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, - 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, - 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, - 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, - 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, - 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, - 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, - 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, - 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, - 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, - 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, - 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, - 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, - 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, - 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, - 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, - 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, - 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, - 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, - 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, - 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, - 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, - 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, - 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, - 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, - 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, - 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, - 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, - 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, - 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, - 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, - 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, - 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, - 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, - 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, - 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, - 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, - 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, - 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, - 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, - 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, - 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, - 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, - 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, - 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, - 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, - 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, - 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, - 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, - 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, - 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, - 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, - 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, - 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, - 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, - 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, - 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, - 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, - 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, - 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, - 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, - 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, - 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, - 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, - 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, - 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, - 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, - 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, - 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, - 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, - 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, - 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, - 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, - 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, - 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, - 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, - 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, - 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, - 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, - 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, - 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, - 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, - 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, - 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, - 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, - 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, - 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, - 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, - 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, - 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, - 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, - 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, - 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, - 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, - 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, - 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, - 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, - 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, - 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, - 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, - 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, - 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, - 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, - 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, - 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, - 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, - 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, - 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, - 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, - 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, - 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, - 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, - 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, - 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, - 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, - 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, - 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, - 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, - 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, - 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, - 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, - 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, - 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, - 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, - 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, - 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, - 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, - 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, - 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, - 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, - 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, - 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, - 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, - 11052, 11072, 11092, 11112, + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, } var _graphclust_trans_keys []byte = []byte{ - 10, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 167, 169, 171, 173, 174, 175, - 176, 177, 179, 180, 181, 182, 183, 184, - 185, 186, 187, 188, 189, 190, 191, 166, - 170, 172, 178, 150, 153, 155, 163, 165, - 167, 169, 173, 153, 155, 148, 161, 163, - 255, 189, 132, 185, 144, 152, 161, 164, - 255, 188, 129, 131, 190, 255, 133, 134, - 137, 138, 142, 150, 152, 161, 164, 255, - 131, 134, 137, 138, 142, 144, 146, 175, - 178, 180, 182, 255, 134, 138, 142, 161, - 164, 255, 188, 129, 131, 190, 191, 128, - 132, 135, 136, 139, 141, 150, 151, 162, - 163, 130, 190, 191, 151, 128, 130, 134, - 136, 138, 141, 128, 131, 190, 255, 133, - 137, 142, 148, 151, 161, 164, 255, 128, - 132, 134, 136, 138, 141, 149, 150, 162, - 163, 129, 131, 190, 255, 133, 137, 142, - 150, 152, 161, 164, 255, 130, 131, 138, - 150, 143, 148, 152, 159, 178, 179, 177, - 179, 186, 135, 142, 177, 179, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 177, 191, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 173, 183, 185, 190, 150, 153, - 158, 160, 177, 180, 130, 141, 157, 132, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 180, 255, 148, 156, 158, - 255, 139, 141, 169, 133, 134, 160, 171, - 176, 187, 151, 155, 160, 162, 191, 149, - 158, 165, 188, 176, 190, 128, 132, 180, - 255, 133, 170, 180, 255, 128, 130, 161, - 173, 166, 179, 164, 183, 173, 144, 146, - 148, 168, 178, 180, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 128, 131, 157, 179, 181, 183, 144, - 176, 164, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 163, - 167, 128, 129, 180, 255, 134, 159, 178, - 255, 166, 173, 135, 147, 128, 131, 179, - 255, 129, 164, 166, 255, 169, 182, 131, - 188, 140, 141, 176, 178, 180, 183, 184, - 190, 191, 129, 171, 175, 181, 182, 163, - 170, 172, 173, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 128, 130, 184, - 255, 135, 190, 131, 175, 187, 255, 128, - 130, 167, 180, 179, 128, 130, 179, 255, - 129, 137, 141, 255, 190, 172, 183, 159, - 170, 188, 128, 131, 190, 191, 151, 128, - 132, 135, 136, 139, 141, 162, 163, 166, - 172, 176, 180, 181, 191, 128, 134, 176, - 255, 132, 255, 175, 181, 184, 255, 129, - 155, 158, 255, 129, 255, 171, 183, 157, - 171, 175, 182, 184, 191, 146, 167, 169, - 182, 171, 172, 189, 190, 176, 180, 176, - 182, 145, 190, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 165, 169, - 173, 178, 187, 255, 131, 132, 140, 169, - 174, 255, 130, 132, 128, 182, 187, 255, - 173, 180, 182, 255, 132, 155, 159, 161, - 175, 128, 163, 165, 128, 134, 136, 152, - 155, 161, 163, 164, 166, 170, 144, 150, - 132, 138, 145, 146, 151, 166, 169, 0, - 127, 176, 255, 131, 137, 191, 145, 189, - 135, 129, 130, 132, 133, 144, 154, 176, - 139, 159, 150, 156, 159, 164, 167, 168, - 170, 173, 145, 176, 255, 139, 255, 166, - 176, 171, 179, 160, 161, 163, 164, 165, - 166, 167, 169, 171, 172, 173, 174, 175, - 176, 177, 178, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 168, 170, 150, 153, 155, 163, 165, 167, - 169, 173, 153, 155, 148, 161, 163, 255, - 131, 187, 189, 132, 185, 190, 255, 141, - 144, 129, 136, 145, 151, 152, 161, 162, - 163, 164, 255, 129, 188, 190, 130, 131, - 191, 255, 141, 151, 129, 132, 133, 134, - 137, 138, 142, 161, 162, 163, 164, 255, - 131, 188, 129, 130, 190, 255, 145, 181, - 129, 130, 131, 134, 135, 136, 137, 138, - 139, 141, 142, 175, 176, 177, 178, 255, - 134, 138, 141, 129, 136, 142, 161, 162, - 163, 164, 255, 129, 188, 130, 131, 190, - 191, 128, 141, 129, 132, 135, 136, 139, - 140, 150, 151, 162, 163, 130, 190, 191, - 128, 141, 151, 129, 130, 134, 136, 138, - 140, 128, 129, 131, 190, 255, 133, 137, - 129, 132, 142, 148, 151, 161, 164, 255, - 129, 188, 190, 191, 130, 131, 130, 134, - 128, 132, 135, 136, 138, 139, 140, 141, - 149, 150, 162, 163, 129, 190, 130, 131, - 191, 255, 133, 137, 141, 151, 129, 132, - 142, 161, 162, 163, 164, 255, 138, 143, - 150, 159, 144, 145, 146, 148, 152, 158, - 178, 179, 177, 179, 180, 186, 135, 142, - 177, 179, 180, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 191, - 177, 190, 128, 132, 134, 135, 141, 151, - 153, 188, 134, 128, 129, 130, 141, 156, - 157, 158, 159, 160, 162, 164, 168, 169, - 170, 172, 173, 174, 175, 176, 179, 183, - 177, 173, 183, 185, 186, 187, 188, 189, - 190, 150, 151, 152, 153, 158, 160, 177, - 180, 130, 132, 141, 157, 133, 134, 157, - 159, 146, 148, 178, 180, 146, 147, 178, - 179, 182, 180, 189, 190, 255, 134, 157, - 137, 147, 148, 255, 139, 141, 169, 133, - 134, 178, 160, 162, 163, 166, 167, 168, - 169, 171, 176, 184, 185, 187, 155, 151, - 152, 153, 154, 150, 160, 162, 191, 149, - 151, 152, 158, 165, 172, 173, 178, 179, - 188, 176, 190, 132, 181, 187, 128, 131, - 180, 188, 189, 255, 130, 133, 170, 171, - 179, 180, 255, 130, 161, 170, 128, 129, - 162, 165, 166, 167, 168, 173, 167, 173, - 166, 169, 170, 174, 175, 177, 178, 179, - 164, 171, 172, 179, 180, 181, 182, 183, - 161, 173, 180, 144, 146, 148, 168, 178, - 179, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 144, 176, - 175, 177, 191, 160, 191, 128, 130, 170, - 175, 153, 154, 153, 154, 155, 160, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 175, 175, 178, 180, 189, 158, 159, - 176, 177, 130, 134, 139, 167, 163, 164, - 165, 166, 132, 133, 134, 159, 160, 177, - 178, 255, 166, 173, 135, 145, 146, 147, - 131, 179, 188, 128, 130, 180, 181, 182, - 185, 186, 255, 165, 129, 255, 169, 174, - 175, 176, 177, 178, 179, 180, 181, 182, - 131, 140, 141, 188, 176, 178, 180, 183, - 184, 190, 191, 129, 171, 181, 182, 172, - 173, 174, 175, 165, 168, 172, 173, 163, - 170, 172, 184, 190, 158, 128, 143, 160, - 175, 144, 145, 150, 155, 157, 158, 159, - 135, 139, 141, 168, 171, 189, 160, 182, - 186, 191, 129, 131, 133, 134, 140, 143, - 184, 186, 165, 166, 128, 129, 130, 132, - 133, 134, 135, 136, 139, 140, 141, 144, - 145, 146, 147, 150, 151, 152, 153, 154, - 156, 176, 178, 129, 128, 130, 184, 255, - 135, 190, 130, 131, 175, 176, 178, 183, - 184, 187, 255, 172, 128, 130, 167, 180, - 179, 130, 128, 129, 179, 181, 182, 190, - 191, 255, 129, 137, 138, 140, 141, 255, - 180, 190, 172, 174, 175, 177, 178, 181, - 182, 183, 159, 160, 162, 163, 170, 188, - 190, 191, 128, 129, 130, 131, 128, 151, - 129, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 183, 184, 191, - 133, 128, 129, 130, 134, 176, 185, 189, - 177, 178, 179, 186, 187, 190, 191, 255, - 129, 132, 255, 175, 190, 176, 177, 178, - 181, 184, 187, 188, 255, 129, 155, 158, - 255, 189, 176, 178, 179, 186, 187, 190, - 191, 255, 129, 255, 172, 182, 171, 173, - 174, 175, 176, 183, 166, 157, 159, 160, - 161, 162, 171, 175, 190, 176, 182, 184, - 191, 169, 177, 180, 146, 167, 170, 182, - 171, 172, 189, 190, 176, 180, 176, 182, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 166, 173, 165, 169, 174, - 178, 187, 255, 131, 132, 140, 169, 174, - 255, 130, 132, 128, 182, 187, 255, 173, - 180, 182, 255, 132, 155, 159, 161, 175, - 128, 163, 165, 128, 134, 136, 152, 155, - 161, 163, 164, 166, 170, 144, 150, 132, - 138, 143, 187, 191, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 139, - 168, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 144, 145, 150, 155, - 157, 158, 128, 191, 173, 128, 159, 160, - 191, 156, 128, 133, 134, 191, 0, 127, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 128, - 191, 160, 172, 174, 191, 128, 133, 134, - 155, 157, 191, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 128, 255, 128, 129, 130, 132, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 160, 255, 128, 129, - 130, 133, 134, 135, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 160, 255, - 168, 255, 128, 129, 130, 134, 135, 141, - 156, 157, 158, 159, 160, 162, 164, 168, - 169, 170, 172, 173, 174, 175, 176, 179, - 183, 168, 255, 192, 255, 159, 139, 187, - 158, 159, 176, 255, 135, 138, 139, 187, - 188, 255, 168, 255, 153, 154, 155, 160, - 162, 163, 164, 165, 166, 167, 168, 169, - 170, 171, 175, 177, 178, 179, 180, 181, - 182, 184, 185, 186, 187, 188, 189, 191, - 176, 190, 192, 255, 135, 147, 160, 188, - 128, 156, 184, 129, 255, 128, 129, 130, - 133, 134, 141, 156, 157, 158, 159, 160, - 162, 164, 168, 169, 170, 172, 173, 174, - 175, 176, 179, 183, 158, 159, 135, 255, - 148, 176, 140, 168, 132, 160, 188, 152, - 180, 144, 172, 136, 164, 192, 255, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 160, 161, 162, 164, 165, 166, - 167, 168, 169, 171, 172, 173, 174, 175, - 176, 178, 179, 180, 181, 182, 183, 185, - 186, 187, 188, 189, 190, 128, 191, 129, - 130, 131, 132, 133, 134, 136, 137, 138, - 139, 140, 141, 143, 144, 145, 146, 147, - 148, 150, 151, 152, 153, 154, 155, 157, - 158, 159, 128, 156, 160, 255, 136, 164, - 175, 176, 255, 128, 141, 143, 191, 128, - 129, 152, 155, 156, 130, 191, 140, 141, - 128, 138, 144, 167, 175, 191, 128, 159, - 176, 191, 157, 128, 191, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 128, 156, 160, 255, 136, 164, 175, - 176, 255, 135, 138, 139, 187, 188, 191, - 192, 255, 187, 191, 128, 190, 128, 190, - 188, 128, 175, 190, 191, 145, 155, 157, - 159, 128, 191, 130, 135, 128, 191, 189, - 128, 191, 128, 129, 130, 131, 132, 191, - 178, 128, 191, 128, 159, 164, 191, 133, - 128, 191, 128, 178, 187, 191, 135, 142, - 143, 145, 146, 149, 150, 153, 154, 155, - 164, 128, 191, 128, 165, 166, 191, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 129, 135, 132, - 134, 128, 175, 157, 128, 191, 143, 128, - 191, 163, 181, 128, 191, 162, 128, 191, - 142, 128, 191, 132, 133, 134, 135, 160, - 128, 191, 0, 127, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 166, 167, 169, 171, 172, - 173, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 183, 184, 185, 186, 187, 188, - 189, 190, 191, 168, 170, 150, 153, 155, - 163, 165, 167, 169, 173, 153, 155, 148, - 161, 163, 255, 131, 187, 189, 132, 185, - 190, 255, 141, 144, 129, 136, 145, 151, - 152, 161, 162, 163, 164, 255, 129, 188, - 190, 130, 131, 191, 255, 141, 151, 129, - 132, 133, 134, 137, 138, 142, 161, 162, - 163, 164, 255, 131, 188, 129, 130, 190, - 255, 145, 181, 129, 130, 131, 134, 135, - 136, 137, 138, 139, 141, 142, 175, 176, - 177, 178, 255, 134, 138, 141, 129, 136, - 142, 161, 162, 163, 164, 255, 129, 188, - 130, 131, 190, 191, 128, 141, 129, 132, - 135, 136, 139, 140, 150, 151, 162, 163, - 130, 190, 191, 128, 141, 151, 129, 130, - 134, 136, 138, 140, 128, 129, 131, 190, - 255, 133, 137, 129, 132, 142, 148, 151, - 161, 164, 255, 129, 188, 190, 191, 130, - 131, 130, 134, 128, 132, 135, 136, 138, - 139, 140, 141, 149, 150, 162, 163, 129, - 190, 130, 131, 191, 255, 133, 137, 141, - 151, 129, 132, 142, 161, 162, 163, 164, - 255, 138, 143, 150, 159, 144, 145, 146, - 148, 152, 158, 178, 179, 177, 179, 180, - 186, 135, 142, 177, 179, 180, 185, 187, - 188, 136, 141, 181, 183, 185, 152, 153, - 190, 191, 191, 177, 190, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 177, 173, 183, 185, 186, - 187, 188, 189, 190, 150, 151, 152, 153, - 158, 160, 177, 180, 130, 132, 141, 157, - 133, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 182, 180, 189, 190, - 255, 134, 157, 137, 147, 148, 255, 139, - 141, 169, 133, 134, 178, 160, 162, 163, - 166, 167, 168, 169, 171, 176, 184, 185, - 187, 155, 151, 152, 153, 154, 150, 160, - 162, 191, 149, 151, 152, 158, 165, 172, - 173, 178, 179, 188, 176, 190, 132, 181, - 187, 128, 131, 180, 188, 189, 255, 130, - 133, 170, 171, 179, 180, 255, 130, 161, - 170, 128, 129, 162, 165, 166, 167, 168, - 173, 167, 173, 166, 169, 170, 174, 175, - 177, 178, 179, 164, 171, 172, 179, 180, - 181, 182, 183, 161, 173, 180, 144, 146, - 148, 168, 178, 179, 184, 185, 128, 181, - 187, 191, 128, 131, 179, 181, 183, 140, - 141, 144, 176, 175, 177, 191, 160, 191, - 128, 130, 170, 175, 153, 154, 153, 154, - 155, 160, 162, 163, 164, 165, 166, 167, - 168, 169, 170, 171, 175, 175, 178, 180, - 189, 158, 159, 176, 177, 130, 134, 139, - 167, 163, 164, 165, 166, 132, 133, 134, - 159, 160, 177, 178, 255, 166, 173, 135, - 145, 146, 147, 131, 179, 188, 128, 130, - 180, 181, 182, 185, 186, 255, 165, 129, - 255, 169, 174, 175, 176, 177, 178, 179, - 180, 181, 182, 131, 140, 141, 188, 176, - 178, 180, 183, 184, 190, 191, 129, 171, - 181, 182, 172, 173, 174, 175, 165, 168, - 172, 173, 163, 170, 172, 184, 190, 158, - 128, 143, 160, 175, 144, 145, 150, 155, - 157, 158, 159, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 129, 128, - 130, 184, 255, 135, 190, 130, 131, 175, - 176, 178, 183, 184, 187, 255, 172, 128, - 130, 167, 180, 179, 130, 128, 129, 179, - 181, 182, 190, 191, 255, 129, 137, 138, - 140, 141, 255, 180, 190, 172, 174, 175, - 177, 178, 181, 182, 183, 159, 160, 162, - 163, 170, 188, 190, 191, 128, 129, 130, - 131, 128, 151, 129, 132, 135, 136, 139, - 141, 162, 163, 166, 172, 176, 180, 181, - 183, 184, 191, 133, 128, 129, 130, 134, - 176, 185, 189, 177, 178, 179, 186, 187, - 190, 191, 255, 129, 132, 255, 175, 190, - 176, 177, 178, 181, 184, 187, 188, 255, - 129, 155, 158, 255, 189, 176, 178, 179, - 186, 187, 190, 191, 255, 129, 255, 172, - 182, 171, 173, 174, 175, 176, 183, 166, - 157, 159, 160, 161, 162, 171, 175, 190, - 176, 182, 184, 191, 169, 177, 180, 146, - 167, 170, 182, 171, 172, 189, 190, 176, - 180, 176, 182, 143, 146, 178, 157, 158, - 133, 134, 137, 168, 169, 170, 166, 173, - 165, 169, 174, 178, 187, 255, 131, 132, - 140, 169, 174, 255, 130, 132, 128, 182, - 187, 255, 173, 180, 182, 255, 132, 155, - 159, 161, 175, 128, 163, 165, 128, 134, - 136, 152, 155, 161, 163, 164, 166, 170, - 144, 150, 132, 138, 143, 187, 191, 160, - 128, 129, 132, 135, 133, 134, 160, 255, - 192, 255, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 128, 129, 130, - 132, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 160, 255, 128, - 129, 130, 133, 134, 135, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 160, - 255, 168, 255, 128, 129, 130, 134, 135, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 168, 255, 192, 255, 159, 139, - 187, 158, 159, 176, 255, 135, 138, 139, - 187, 188, 255, 168, 255, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 177, 178, 179, 180, - 181, 182, 184, 185, 186, 187, 188, 189, - 191, 176, 190, 192, 255, 135, 147, 160, - 188, 128, 156, 184, 129, 255, 128, 129, - 130, 133, 134, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 158, 159, 135, - 255, 148, 176, 140, 168, 132, 160, 188, - 152, 180, 144, 172, 136, 164, 192, 255, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 160, 161, 162, 164, 165, - 166, 167, 168, 169, 171, 172, 173, 174, - 175, 176, 178, 179, 180, 181, 182, 183, - 185, 186, 187, 188, 189, 190, 128, 191, - 129, 130, 131, 132, 133, 134, 136, 137, - 138, 139, 140, 141, 143, 144, 145, 146, - 147, 148, 150, 151, 152, 153, 154, 155, - 157, 158, 159, 128, 156, 160, 255, 136, - 164, 175, 176, 255, 142, 128, 191, 128, - 129, 152, 155, 156, 130, 191, 139, 141, - 128, 140, 142, 143, 144, 167, 168, 174, - 175, 191, 128, 255, 176, 255, 131, 137, - 191, 145, 189, 135, 129, 130, 132, 133, - 144, 154, 176, 139, 159, 150, 156, 159, - 164, 167, 168, 170, 173, 145, 176, 255, - 139, 255, 166, 176, 171, 179, 160, 161, - 163, 164, 165, 167, 169, 171, 173, 174, - 175, 176, 177, 179, 180, 181, 182, 183, - 184, 185, 186, 187, 188, 189, 190, 191, - 166, 170, 172, 178, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 189, 132, 185, 144, 152, 161, - 164, 255, 188, 129, 131, 190, 255, 133, - 134, 137, 138, 142, 150, 152, 161, 164, - 255, 131, 134, 137, 138, 142, 144, 146, - 175, 178, 180, 182, 255, 134, 138, 142, - 161, 164, 255, 188, 129, 131, 190, 191, - 128, 132, 135, 136, 139, 141, 150, 151, - 162, 163, 130, 190, 191, 151, 128, 130, - 134, 136, 138, 141, 128, 131, 190, 255, - 133, 137, 142, 148, 151, 161, 164, 255, - 128, 132, 134, 136, 138, 141, 149, 150, - 162, 163, 129, 131, 190, 255, 133, 137, - 142, 150, 152, 161, 164, 255, 130, 131, - 138, 150, 143, 148, 152, 159, 178, 179, - 177, 179, 186, 135, 142, 177, 179, 185, - 187, 188, 136, 141, 181, 183, 185, 152, - 153, 190, 191, 177, 191, 128, 132, 134, - 135, 141, 151, 153, 188, 134, 128, 129, - 130, 141, 156, 157, 158, 159, 160, 162, - 164, 168, 169, 170, 172, 173, 174, 175, - 176, 179, 183, 173, 183, 185, 190, 150, - 153, 158, 160, 177, 180, 130, 141, 157, - 132, 134, 157, 159, 146, 148, 178, 180, - 146, 147, 178, 179, 180, 255, 148, 156, - 158, 255, 139, 141, 169, 133, 134, 160, - 171, 176, 187, 151, 155, 160, 162, 191, - 149, 158, 165, 188, 176, 190, 128, 132, - 180, 255, 133, 170, 180, 255, 128, 130, - 161, 173, 166, 179, 164, 183, 173, 144, - 146, 148, 168, 178, 180, 184, 185, 128, - 181, 187, 191, 128, 131, 179, 181, 183, - 140, 141, 144, 176, 175, 177, 191, 160, - 191, 128, 130, 170, 175, 153, 154, 153, - 154, 155, 160, 162, 163, 164, 165, 166, - 167, 168, 169, 170, 171, 175, 175, 178, - 180, 189, 158, 159, 176, 177, 130, 134, - 139, 163, 167, 128, 129, 180, 255, 134, - 159, 178, 255, 166, 173, 135, 147, 128, - 131, 179, 255, 129, 164, 166, 255, 169, - 182, 131, 188, 140, 141, 176, 178, 180, - 183, 184, 190, 191, 129, 171, 175, 181, - 182, 163, 170, 172, 173, 172, 184, 190, - 158, 128, 143, 160, 175, 144, 145, 150, - 155, 157, 158, 135, 139, 141, 168, 171, - 189, 160, 182, 186, 191, 129, 131, 133, - 134, 140, 143, 184, 186, 165, 166, 128, - 129, 130, 132, 133, 134, 135, 136, 139, - 140, 141, 144, 145, 146, 147, 150, 151, - 152, 153, 154, 156, 176, 178, 128, 130, - 184, 255, 135, 190, 131, 175, 187, 255, - 128, 130, 167, 180, 179, 128, 130, 179, - 255, 129, 137, 141, 255, 190, 172, 183, - 159, 170, 188, 128, 131, 190, 191, 151, - 128, 132, 135, 136, 139, 141, 162, 163, - 166, 172, 176, 180, 181, 191, 128, 134, - 176, 255, 132, 255, 175, 181, 184, 255, - 129, 155, 158, 255, 129, 255, 171, 183, - 157, 171, 175, 182, 184, 191, 146, 167, - 169, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 145, 190, 143, 146, 178, 157, - 158, 133, 134, 137, 168, 169, 170, 165, - 169, 173, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 160, 128, 129, 132, 135, - 133, 134, 160, 255, 192, 255, 128, 131, - 157, 179, 181, 183, 164, 144, 145, 150, - 155, 157, 158, 159, 145, 146, 151, 166, - 169, 128, 255, 176, 255, 131, 137, 191, - 145, 189, 135, 129, 130, 132, 133, 144, - 154, 176, 139, 159, 150, 156, 159, 164, - 167, 168, 170, 173, 145, 176, 255, 139, - 255, 166, 176, 171, 179, 160, 161, 163, - 164, 165, 166, 167, 169, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 168, 170, 150, 153, 155, 163, - 165, 167, 169, 173, 153, 155, 148, 161, - 163, 255, 131, 187, 189, 132, 185, 190, - 255, 141, 144, 129, 136, 145, 151, 152, - 161, 162, 163, 164, 255, 129, 188, 190, - 130, 131, 191, 255, 141, 151, 129, 132, - 133, 134, 137, 138, 142, 161, 162, 163, - 164, 255, 131, 188, 129, 130, 190, 255, - 145, 181, 129, 130, 131, 134, 135, 136, - 137, 138, 139, 141, 142, 175, 176, 177, - 178, 255, 134, 138, 141, 129, 136, 142, - 161, 162, 163, 164, 255, 129, 188, 130, - 131, 190, 191, 128, 141, 129, 132, 135, - 136, 139, 140, 150, 151, 162, 163, 130, - 190, 191, 128, 141, 151, 129, 130, 134, - 136, 138, 140, 128, 129, 131, 190, 255, - 133, 137, 129, 132, 142, 148, 151, 161, - 164, 255, 129, 188, 190, 191, 130, 131, - 130, 134, 128, 132, 135, 136, 138, 139, - 140, 141, 149, 150, 162, 163, 129, 190, - 130, 131, 191, 255, 133, 137, 141, 151, - 129, 132, 142, 161, 162, 163, 164, 255, - 138, 143, 150, 159, 144, 145, 146, 148, - 152, 158, 178, 179, 177, 179, 180, 186, - 135, 142, 177, 179, 180, 185, 187, 188, - 136, 141, 181, 183, 185, 152, 153, 190, - 191, 191, 177, 190, 128, 132, 134, 135, - 141, 151, 153, 188, 134, 128, 129, 130, - 141, 156, 157, 158, 159, 160, 162, 164, - 168, 169, 170, 172, 173, 174, 175, 176, - 179, 183, 177, 173, 183, 185, 186, 187, - 188, 189, 190, 150, 151, 152, 153, 158, - 160, 177, 180, 130, 132, 141, 157, 133, - 134, 157, 159, 146, 148, 178, 180, 146, - 147, 178, 179, 182, 180, 189, 190, 255, - 134, 157, 137, 147, 148, 255, 139, 141, - 169, 133, 134, 178, 160, 162, 163, 166, - 167, 168, 169, 171, 176, 184, 185, 187, - 155, 151, 152, 153, 154, 150, 160, 162, - 191, 149, 151, 152, 158, 165, 172, 173, - 178, 179, 188, 176, 190, 132, 181, 187, - 128, 131, 180, 188, 189, 255, 130, 133, - 170, 171, 179, 180, 255, 130, 161, 170, - 128, 129, 162, 165, 166, 167, 168, 173, - 167, 173, 166, 169, 170, 174, 175, 177, - 178, 179, 164, 171, 172, 179, 180, 181, - 182, 183, 161, 173, 180, 144, 146, 148, - 168, 178, 179, 184, 185, 128, 181, 187, - 191, 128, 131, 179, 181, 183, 140, 141, - 144, 176, 175, 177, 191, 160, 191, 128, - 130, 170, 175, 153, 154, 153, 154, 155, - 160, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 175, 175, 178, 180, 189, - 158, 159, 176, 177, 130, 134, 139, 167, - 163, 164, 165, 166, 132, 133, 134, 159, - 160, 177, 178, 255, 166, 173, 135, 145, - 146, 147, 131, 179, 188, 128, 130, 180, - 181, 182, 185, 186, 255, 165, 129, 255, - 169, 174, 175, 176, 177, 178, 179, 180, - 181, 182, 131, 140, 141, 188, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 181, - 182, 172, 173, 174, 175, 165, 168, 172, - 173, 163, 170, 172, 184, 190, 158, 128, - 143, 160, 175, 144, 145, 150, 155, 157, - 158, 159, 135, 139, 141, 168, 171, 189, - 160, 182, 186, 191, 129, 131, 133, 134, - 140, 143, 184, 186, 165, 166, 128, 129, - 130, 132, 133, 134, 135, 136, 139, 140, - 141, 144, 145, 146, 147, 150, 151, 152, - 153, 154, 156, 176, 178, 129, 128, 130, - 184, 255, 135, 190, 130, 131, 175, 176, - 178, 183, 184, 187, 255, 172, 128, 130, - 167, 180, 179, 130, 128, 129, 179, 181, - 182, 190, 191, 255, 129, 137, 138, 140, - 141, 255, 180, 190, 172, 174, 175, 177, - 178, 181, 182, 183, 159, 160, 162, 163, - 170, 188, 190, 191, 128, 129, 130, 131, - 128, 151, 129, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 183, - 184, 191, 133, 128, 129, 130, 134, 176, - 185, 189, 177, 178, 179, 186, 187, 190, - 191, 255, 129, 132, 255, 175, 190, 176, - 177, 178, 181, 184, 187, 188, 255, 129, - 155, 158, 255, 189, 176, 178, 179, 186, - 187, 190, 191, 255, 129, 255, 172, 182, - 171, 173, 174, 175, 176, 183, 166, 157, - 159, 160, 161, 162, 171, 175, 190, 176, - 182, 184, 191, 169, 177, 180, 146, 167, - 170, 182, 171, 172, 189, 190, 176, 180, - 176, 182, 143, 146, 178, 157, 158, 133, - 134, 137, 168, 169, 170, 166, 173, 165, - 169, 174, 178, 187, 255, 131, 132, 140, - 169, 174, 255, 130, 132, 128, 182, 187, - 255, 173, 180, 182, 255, 132, 155, 159, - 161, 175, 128, 163, 165, 128, 134, 136, - 152, 155, 161, 163, 164, 166, 170, 144, - 150, 132, 138, 143, 187, 191, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 139, 168, 128, 159, 160, 175, 176, - 191, 157, 128, 191, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 167, 169, 171, - 173, 174, 175, 176, 177, 179, 180, 181, - 182, 183, 184, 185, 186, 187, 188, 189, - 190, 191, 166, 170, 172, 178, 150, 153, - 155, 163, 165, 167, 169, 173, 153, 155, - 148, 161, 163, 255, 189, 132, 185, 144, - 152, 161, 164, 255, 188, 129, 131, 190, - 255, 133, 134, 137, 138, 142, 150, 152, - 161, 164, 255, 131, 134, 137, 138, 142, - 144, 146, 175, 178, 180, 182, 255, 134, - 138, 142, 161, 164, 255, 188, 129, 131, - 190, 191, 128, 132, 135, 136, 139, 141, - 150, 151, 162, 163, 130, 190, 191, 151, - 128, 130, 134, 136, 138, 141, 128, 131, - 190, 255, 133, 137, 142, 148, 151, 161, - 164, 255, 128, 132, 134, 136, 138, 141, - 149, 150, 162, 163, 129, 131, 190, 255, - 133, 137, 142, 150, 152, 161, 164, 255, - 130, 131, 138, 150, 143, 148, 152, 159, - 178, 179, 177, 179, 186, 135, 142, 177, - 179, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 177, 191, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 173, 183, 185, - 190, 150, 153, 158, 160, 177, 180, 130, - 141, 157, 132, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 180, 255, - 148, 156, 158, 255, 139, 141, 169, 133, - 134, 160, 171, 176, 187, 151, 155, 160, - 162, 191, 149, 158, 165, 188, 176, 190, - 128, 132, 180, 255, 133, 170, 180, 255, - 128, 130, 161, 173, 166, 179, 164, 183, - 173, 144, 146, 148, 168, 178, 180, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 128, 131, 157, 179, - 181, 183, 144, 176, 164, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 163, 167, 128, 129, 180, 255, - 134, 159, 178, 255, 166, 173, 135, 147, - 128, 131, 179, 255, 129, 164, 166, 255, - 169, 182, 131, 188, 140, 141, 176, 178, - 180, 183, 184, 190, 191, 129, 171, 175, - 181, 182, 163, 170, 172, 173, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 128, 130, 184, 255, 135, 190, 131, 175, - 187, 255, 128, 130, 167, 180, 179, 128, - 130, 179, 255, 129, 137, 141, 255, 190, - 172, 183, 159, 170, 188, 128, 131, 190, - 191, 151, 128, 132, 135, 136, 139, 141, - 162, 163, 166, 172, 176, 180, 181, 191, - 128, 134, 176, 255, 132, 255, 175, 181, - 184, 255, 129, 155, 158, 255, 129, 255, - 171, 183, 157, 171, 175, 182, 184, 191, - 146, 167, 169, 182, 171, 172, 189, 190, - 176, 180, 176, 182, 145, 190, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 165, 169, 173, 178, 187, 255, 131, - 132, 140, 169, 174, 255, 130, 132, 128, - 182, 187, 255, 173, 180, 182, 255, 132, - 155, 159, 161, 175, 128, 163, 165, 128, - 134, 136, 152, 155, 161, 163, 164, 166, - 170, 144, 150, 132, 138, 145, 146, 151, - 166, 169, 139, 168, 160, 128, 129, 132, - 135, 133, 134, 160, 255, 192, 255, 144, - 145, 150, 155, 157, 158, 141, 144, 129, - 136, 145, 151, 152, 161, 162, 163, 164, - 255, 129, 188, 190, 130, 131, 191, 255, - 141, 151, 129, 132, 133, 134, 137, 138, - 142, 161, 162, 163, 164, 255, 131, 188, - 129, 130, 190, 255, 145, 181, 129, 130, - 131, 134, 135, 136, 137, 138, 139, 141, - 142, 175, 176, 177, 178, 255, 134, 138, - 141, 129, 136, 142, 161, 162, 163, 164, - 255, 129, 188, 130, 131, 190, 191, 128, - 141, 129, 132, 135, 136, 139, 140, 150, - 151, 162, 163, 130, 190, 191, 128, 141, - 151, 129, 130, 134, 136, 138, 140, 128, - 129, 131, 190, 255, 133, 137, 129, 132, - 142, 148, 151, 161, 164, 255, 129, 188, - 190, 191, 130, 131, 130, 134, 128, 132, - 135, 136, 138, 139, 140, 141, 149, 150, - 162, 163, 129, 190, 130, 131, 191, 255, - 133, 137, 141, 151, 129, 132, 142, 161, - 162, 163, 164, 255, 138, 143, 150, 159, - 144, 145, 146, 148, 152, 158, 178, 179, - 177, 179, 180, 186, 135, 142, 177, 179, - 180, 185, 187, 188, 136, 141, 181, 183, - 185, 152, 153, 190, 191, 191, 177, 190, - 128, 132, 134, 135, 141, 151, 153, 188, - 134, 128, 129, 130, 141, 156, 157, 158, - 159, 160, 162, 164, 168, 169, 170, 172, - 173, 174, 175, 176, 179, 183, 177, 173, - 183, 185, 186, 187, 188, 189, 190, 150, - 151, 152, 153, 158, 160, 177, 180, 130, - 132, 141, 157, 133, 134, 157, 159, 146, - 148, 178, 180, 146, 147, 178, 179, 182, - 180, 189, 190, 255, 134, 157, 137, 147, - 148, 255, 139, 141, 169, 133, 134, 178, - 160, 162, 163, 166, 167, 168, 169, 171, - 176, 184, 185, 187, 155, 151, 152, 153, - 154, 150, 160, 162, 191, 149, 151, 152, - 158, 165, 172, 173, 178, 179, 188, 176, - 190, 132, 181, 187, 128, 131, 180, 188, - 189, 255, 130, 133, 170, 171, 179, 180, - 255, 130, 161, 170, 128, 129, 162, 165, - 166, 167, 168, 173, 167, 173, 166, 169, - 170, 174, 175, 177, 178, 179, 164, 171, - 172, 179, 180, 181, 182, 183, 161, 173, - 180, 144, 146, 148, 168, 178, 179, 184, - 185, 128, 181, 187, 191, 128, 131, 179, - 181, 183, 140, 141, 144, 176, 175, 177, - 191, 160, 191, 128, 130, 170, 175, 153, - 154, 153, 154, 155, 160, 162, 163, 164, - 165, 166, 167, 168, 169, 170, 171, 175, - 175, 178, 180, 189, 158, 159, 176, 177, - 130, 134, 139, 167, 163, 164, 165, 166, - 132, 133, 134, 159, 160, 177, 178, 255, - 166, 173, 135, 145, 146, 147, 131, 179, - 188, 128, 130, 180, 181, 182, 185, 186, - 255, 165, 129, 255, 169, 174, 175, 176, - 177, 178, 179, 180, 181, 182, 131, 140, - 141, 188, 176, 178, 180, 183, 184, 190, - 191, 129, 171, 181, 182, 172, 173, 174, - 175, 165, 168, 172, 173, 163, 170, 172, - 184, 190, 158, 128, 143, 160, 175, 144, - 145, 150, 155, 157, 158, 159, 135, 139, - 141, 168, 171, 189, 160, 182, 186, 191, - 129, 131, 133, 134, 140, 143, 184, 186, - 165, 166, 128, 129, 130, 132, 133, 134, - 135, 136, 139, 140, 141, 144, 145, 146, - 147, 150, 151, 152, 153, 154, 156, 176, - 178, 129, 128, 130, 184, 255, 135, 190, - 130, 131, 175, 176, 178, 183, 184, 187, - 255, 172, 128, 130, 167, 180, 179, 130, - 128, 129, 179, 181, 182, 190, 191, 255, - 129, 137, 138, 140, 141, 255, 180, 190, - 172, 174, 175, 177, 178, 181, 182, 183, - 159, 160, 162, 163, 170, 188, 190, 191, - 128, 129, 130, 131, 128, 151, 129, 132, - 135, 136, 139, 141, 162, 163, 166, 172, - 176, 180, 181, 183, 184, 191, 133, 128, - 129, 130, 134, 176, 185, 189, 177, 178, - 179, 186, 187, 190, 191, 255, 129, 132, - 255, 175, 190, 176, 177, 178, 181, 184, - 187, 188, 255, 129, 155, 158, 255, 189, - 176, 178, 179, 186, 187, 190, 191, 255, - 129, 255, 172, 182, 171, 173, 174, 175, - 176, 183, 166, 157, 159, 160, 161, 162, - 171, 175, 190, 176, 182, 184, 191, 169, - 177, 180, 146, 167, 170, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 143, 146, - 178, 157, 158, 133, 134, 137, 168, 169, - 170, 166, 173, 165, 169, 174, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 143, - 187, 191, 160, 128, 129, 132, 135, 133, - 134, 160, 255, 192, 255, 185, 128, 191, - 128, 137, 138, 141, 142, 191, 128, 191, - 165, 177, 178, 179, 180, 181, 182, 184, - 185, 186, 187, 188, 189, 191, 128, 175, - 176, 190, 192, 255, 128, 159, 160, 188, - 189, 191, 128, 156, 184, 129, 255, 148, - 176, 140, 168, 132, 160, 188, 152, 180, - 144, 172, 136, 164, 192, 255, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 161, 162, 164, 165, 166, 167, - 168, 169, 171, 172, 173, 174, 175, 176, - 178, 179, 180, 181, 182, 183, 185, 186, - 187, 188, 189, 190, 128, 191, 129, 130, - 131, 132, 133, 134, 136, 137, 138, 139, - 140, 141, 143, 144, 145, 146, 147, 148, - 150, 151, 152, 153, 154, 155, 157, 158, - 159, 160, 191, 128, 156, 161, 190, 192, - 255, 136, 164, 175, 176, 255, 135, 138, - 139, 187, 188, 191, 192, 255, 0, 127, - 192, 255, 187, 191, 128, 190, 191, 128, - 190, 188, 128, 175, 176, 189, 190, 191, - 145, 155, 157, 159, 128, 191, 130, 135, - 128, 191, 189, 128, 191, 128, 129, 130, - 131, 132, 191, 178, 128, 191, 128, 159, - 160, 163, 164, 191, 133, 128, 191, 128, - 178, 179, 186, 187, 191, 135, 142, 143, - 145, 146, 149, 150, 153, 154, 155, 164, - 128, 191, 128, 165, 166, 191, 128, 255, - 176, 255, 131, 137, 191, 145, 189, 135, - 129, 130, 132, 133, 144, 154, 176, 139, - 159, 150, 156, 159, 164, 167, 168, 170, - 173, 145, 176, 255, 139, 255, 166, 176, - 171, 179, 160, 161, 163, 164, 165, 167, - 169, 171, 173, 174, 175, 176, 177, 179, - 180, 181, 182, 183, 184, 185, 186, 187, - 188, 189, 190, 191, 166, 170, 172, 178, - 150, 153, 155, 163, 165, 167, 169, 173, - 153, 155, 148, 161, 163, 255, 189, 132, - 185, 144, 152, 161, 164, 255, 188, 129, - 131, 190, 255, 133, 134, 137, 138, 142, - 150, 152, 161, 164, 255, 131, 134, 137, - 138, 142, 144, 146, 175, 178, 180, 182, - 255, 134, 138, 142, 161, 164, 255, 188, - 129, 131, 190, 191, 128, 132, 135, 136, - 139, 141, 150, 151, 162, 163, 130, 190, - 191, 151, 128, 130, 134, 136, 138, 141, - 128, 131, 190, 255, 133, 137, 142, 148, - 151, 161, 164, 255, 128, 132, 134, 136, - 138, 141, 149, 150, 162, 163, 129, 131, - 190, 255, 133, 137, 142, 150, 152, 161, - 164, 255, 130, 131, 138, 150, 143, 148, - 152, 159, 178, 179, 177, 179, 186, 135, - 142, 177, 179, 185, 187, 188, 136, 141, - 181, 183, 185, 152, 153, 190, 191, 177, - 191, 128, 132, 134, 135, 141, 151, 153, - 188, 134, 128, 129, 130, 141, 156, 157, - 158, 159, 160, 162, 164, 168, 169, 170, - 172, 173, 174, 175, 176, 179, 183, 173, - 183, 185, 190, 150, 153, 158, 160, 177, - 180, 130, 141, 157, 132, 134, 157, 159, - 146, 148, 178, 180, 146, 147, 178, 179, - 180, 255, 148, 156, 158, 255, 139, 141, - 169, 133, 134, 160, 171, 176, 187, 151, - 155, 160, 162, 191, 149, 158, 165, 188, - 176, 190, 128, 132, 180, 255, 133, 170, - 180, 255, 128, 130, 161, 173, 166, 179, - 164, 183, 173, 144, 146, 148, 168, 178, - 180, 184, 185, 128, 181, 187, 191, 128, - 131, 179, 181, 183, 140, 141, 128, 131, - 157, 179, 181, 183, 144, 176, 164, 175, - 177, 191, 160, 191, 128, 130, 170, 175, - 153, 154, 153, 154, 155, 160, 162, 163, - 164, 165, 166, 167, 168, 169, 170, 171, - 175, 175, 178, 180, 189, 158, 159, 176, - 177, 130, 134, 139, 163, 167, 128, 129, - 180, 255, 134, 159, 178, 255, 166, 173, - 135, 147, 128, 131, 179, 255, 129, 164, - 166, 255, 169, 182, 131, 188, 140, 141, - 176, 178, 180, 183, 184, 190, 191, 129, - 171, 175, 181, 182, 163, 170, 172, 173, - 172, 184, 190, 158, 128, 143, 160, 175, - 144, 145, 150, 155, 157, 158, 159, 135, - 139, 141, 168, 171, 189, 160, 182, 186, - 191, 129, 131, 133, 134, 140, 143, 184, - 186, 165, 166, 128, 129, 130, 132, 133, - 134, 135, 136, 139, 140, 141, 144, 145, - 146, 147, 150, 151, 152, 153, 154, 156, - 176, 178, 128, 130, 184, 255, 135, 190, - 131, 175, 187, 255, 128, 130, 167, 180, - 179, 128, 130, 179, 255, 129, 137, 141, - 255, 190, 172, 183, 159, 170, 188, 128, - 131, 190, 191, 151, 128, 132, 135, 136, - 139, 141, 162, 163, 166, 172, 176, 180, - 181, 191, 128, 134, 176, 255, 132, 255, - 175, 181, 184, 255, 129, 155, 158, 255, - 129, 255, 171, 183, 157, 171, 175, 182, - 184, 191, 146, 167, 169, 182, 171, 172, - 189, 190, 176, 180, 176, 182, 145, 190, - 143, 146, 178, 157, 158, 133, 134, 137, - 168, 169, 170, 165, 169, 173, 178, 187, - 255, 131, 132, 140, 169, 174, 255, 130, - 132, 128, 182, 187, 255, 173, 180, 182, - 255, 132, 155, 159, 161, 175, 128, 163, - 165, 128, 134, 136, 152, 155, 161, 163, - 164, 166, 170, 144, 150, 132, 138, 145, - 146, 151, 166, 169, 128, 255, 176, 255, - 131, 137, 191, 145, 189, 135, 129, 130, - 132, 133, 144, 154, 176, 139, 159, 150, - 156, 159, 164, 167, 168, 170, 173, 145, - 176, 255, 139, 255, 166, 176, 171, 179, - 160, 161, 163, 164, 165, 166, 167, 169, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 187, 188, 189, 190, 191, 168, 170, 150, - 153, 155, 163, 165, 167, 169, 173, 153, - 155, 148, 161, 163, 255, 131, 187, 189, - 132, 185, 190, 255, 141, 144, 129, 136, - 145, 151, 152, 161, 162, 163, 164, 255, - 129, 188, 190, 130, 131, 191, 255, 141, - 151, 129, 132, 133, 134, 137, 138, 142, - 161, 162, 163, 164, 255, 131, 188, 129, - 130, 190, 255, 145, 181, 129, 130, 131, - 134, 135, 136, 137, 138, 139, 141, 142, - 175, 176, 177, 178, 255, 134, 138, 141, - 129, 136, 142, 161, 162, 163, 164, 255, - 129, 188, 130, 131, 190, 191, 128, 141, - 129, 132, 135, 136, 139, 140, 150, 151, - 162, 163, 130, 190, 191, 128, 141, 151, - 129, 130, 134, 136, 138, 140, 128, 129, - 131, 190, 255, 133, 137, 129, 132, 142, - 148, 151, 161, 164, 255, 129, 188, 190, - 191, 130, 131, 130, 134, 128, 132, 135, - 136, 138, 139, 140, 141, 149, 150, 162, - 163, 129, 190, 130, 131, 191, 255, 133, - 137, 141, 151, 129, 132, 142, 161, 162, - 163, 164, 255, 138, 143, 150, 159, 144, - 145, 146, 148, 152, 158, 178, 179, 177, - 179, 180, 186, 135, 142, 177, 179, 180, - 185, 187, 188, 136, 141, 181, 183, 185, - 152, 153, 190, 191, 191, 177, 190, 128, - 132, 134, 135, 141, 151, 153, 188, 134, - 128, 129, 130, 141, 156, 157, 158, 159, - 160, 162, 164, 168, 169, 170, 172, 173, - 174, 175, 176, 179, 183, 177, 173, 183, - 185, 186, 187, 188, 189, 190, 150, 151, - 152, 153, 158, 160, 177, 180, 130, 132, - 141, 157, 133, 134, 157, 159, 146, 148, - 178, 180, 146, 147, 178, 179, 182, 180, - 189, 190, 255, 134, 157, 137, 147, 148, - 255, 139, 141, 169, 133, 134, 178, 160, - 162, 163, 166, 167, 168, 169, 171, 176, - 184, 185, 187, 155, 151, 152, 153, 154, - 150, 160, 162, 191, 149, 151, 152, 158, - 165, 172, 173, 178, 179, 188, 176, 190, - 132, 181, 187, 128, 131, 180, 188, 189, - 255, 130, 133, 170, 171, 179, 180, 255, - 130, 161, 170, 128, 129, 162, 165, 166, - 167, 168, 173, 167, 173, 166, 169, 170, - 174, 175, 177, 178, 179, 164, 171, 172, - 179, 180, 181, 182, 183, 161, 173, 180, - 144, 146, 148, 168, 178, 179, 184, 185, - 128, 181, 187, 191, 128, 131, 179, 181, - 183, 140, 141, 144, 176, 175, 177, 191, - 160, 191, 128, 130, 170, 175, 153, 154, - 153, 154, 155, 160, 162, 163, 164, 165, - 166, 167, 168, 169, 170, 171, 175, 175, - 178, 180, 189, 158, 159, 176, 177, 130, - 134, 139, 167, 163, 164, 165, 166, 132, - 133, 134, 159, 160, 177, 178, 255, 166, - 173, 135, 145, 146, 147, 131, 179, 188, - 128, 130, 180, 181, 182, 185, 186, 255, - 165, 129, 255, 169, 174, 175, 176, 177, - 178, 179, 180, 181, 182, 131, 140, 141, - 188, 176, 178, 180, 183, 184, 190, 191, - 129, 171, 181, 182, 172, 173, 174, 175, - 165, 168, 172, 173, 163, 170, 172, 184, - 190, 158, 128, 143, 160, 175, 144, 145, - 150, 155, 157, 158, 159, 135, 139, 141, - 168, 171, 189, 160, 182, 186, 191, 129, - 131, 133, 134, 140, 143, 184, 186, 165, - 166, 128, 129, 130, 132, 133, 134, 135, - 136, 139, 140, 141, 144, 145, 146, 147, - 150, 151, 152, 153, 154, 156, 176, 178, - 129, 128, 130, 184, 255, 135, 190, 130, - 131, 175, 176, 178, 183, 184, 187, 255, - 172, 128, 130, 167, 180, 179, 130, 128, - 129, 179, 181, 182, 190, 191, 255, 129, - 137, 138, 140, 141, 255, 180, 190, 172, - 174, 175, 177, 178, 181, 182, 183, 159, - 160, 162, 163, 170, 188, 190, 191, 128, - 129, 130, 131, 128, 151, 129, 132, 135, - 136, 139, 141, 162, 163, 166, 172, 176, - 180, 181, 183, 184, 191, 133, 128, 129, - 130, 134, 176, 185, 189, 177, 178, 179, - 186, 187, 190, 191, 255, 129, 132, 255, - 175, 190, 176, 177, 178, 181, 184, 187, - 188, 255, 129, 155, 158, 255, 189, 176, - 178, 179, 186, 187, 190, 191, 255, 129, - 255, 172, 182, 171, 173, 174, 175, 176, - 183, 166, 157, 159, 160, 161, 162, 171, - 175, 190, 176, 182, 184, 191, 169, 177, - 180, 146, 167, 170, 182, 171, 172, 189, - 190, 176, 180, 176, 182, 143, 146, 178, - 157, 158, 133, 134, 137, 168, 169, 170, - 166, 173, 165, 169, 174, 178, 187, 255, - 131, 132, 140, 169, 174, 255, 130, 132, - 128, 182, 187, 255, 173, 180, 182, 255, - 132, 155, 159, 161, 175, 128, 163, 165, - 128, 134, 136, 152, 155, 161, 163, 164, - 166, 170, 144, 150, 132, 138, 143, 187, - 191, 160, 128, 129, 132, 135, 133, 134, - 160, 255, 192, 255, 139, 168, 160, 128, - 129, 132, 135, 133, 134, 160, 255, 192, - 255, 144, 145, 150, 155, 157, 158, 144, - 145, 150, 155, 157, 158, 159, 135, 166, - 191, 133, 128, 191, 128, 130, 131, 132, - 133, 137, 138, 139, 140, 191, 174, 188, - 128, 129, 130, 131, 132, 133, 134, 144, - 145, 165, 166, 169, 170, 175, 176, 184, - 185, 191, 128, 132, 170, 129, 135, 136, - 191, 181, 186, 128, 191, 144, 128, 148, - 149, 150, 151, 191, 128, 132, 133, 135, - 136, 138, 139, 143, 144, 191, 163, 128, - 179, 180, 182, 183, 191, 128, 129, 191, - 166, 176, 191, 128, 151, 152, 158, 159, - 178, 179, 185, 186, 187, 188, 190, 128, - 191, 160, 128, 191, 128, 130, 131, 135, - 191, 129, 134, 136, 190, 128, 159, 160, - 191, 128, 175, 176, 255, 10, 13, 127, - 194, 216, 219, 220, 224, 225, 226, 234, - 235, 236, 237, 239, 240, 243, 0, 31, - 128, 191, 192, 223, 227, 238, 241, 247, - 248, 255, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 239, 240, 243, 194, 216, - 219, 220, 224, 225, 226, 234, 235, 236, - 237, 239, 240, 243, 32, 126, 192, 223, - 227, 238, 241, 247, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 235, 236, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 237, 239, 240, 243, 204, 205, - 210, 214, 215, 216, 217, 219, 220, 221, - 222, 223, 224, 225, 226, 227, 234, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 235, 236, 237, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 239, 240, - 243, 204, 205, 210, 214, 215, 216, 217, - 219, 220, 221, 222, 223, 224, 225, 226, - 227, 234, 239, 240, 243, 204, 205, 210, - 214, 215, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 234, 237, 239, - 240, 243, 204, 205, 210, 214, 215, 216, - 217, 219, 220, 221, 222, 223, 224, 225, - 226, 227, 234, 237, 239, 240, 243, 204, - 205, 210, 214, 215, 216, 217, 219, 220, - 221, 222, 223, 224, 225, 226, 227, 234, - 237, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, 204, 205, 210, 214, - 215, 216, 217, 219, 220, 221, 222, 223, - 224, 225, 226, 227, 234, 239, 240, 243, - 204, 205, 210, 214, 215, 216, 217, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 234, 239, 240, 243, + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, } var _graphclust_single_lengths []byte = []byte{ - 0, 1, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 2, 3, 2, 2, 2, 3, - 2, 2, 3, 3, 1, 2, 4, 2, - 2, 4, 4, 2, 0, 2, 0, 3, - 1, 0, 1, 21, 1, 0, 4, 0, - 0, 0, 1, 2, 0, 1, 1, 1, - 4, 0, 3, 1, 3, 2, 0, 3, - 0, 5, 2, 0, 0, 1, 0, 2, - 0, 0, 15, 0, 0, 0, 4, 0, - 0, 0, 3, 1, 0, 4, 1, 4, - 4, 3, 1, 0, 7, 5, 1, 1, - 0, 1, 0, 23, 1, 0, 1, 1, - 1, 1, 0, 2, 1, 3, 2, 0, - 1, 3, 1, 2, 0, 1, 0, 2, - 1, 2, 3, 4, 0, 0, 0, 1, - 0, 6, 2, 0, 0, 0, 0, 1, - 3, 0, 0, 0, 1, 0, 1, 4, - 0, 0, 0, 1, 1, 1, 4, 0, - 0, 0, 6, 0, 1, 1, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 6, - 0, 1, 0, 1, 0, 2, 0, 0, - 15, 0, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 0, 2, 1, 1, 0, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 0, 0, 0, 0, 1, - 0, 0, 1, 0, 1, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 4, 0, 0, 0, 0, 1, 0, - 6, 0, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 3, 0, 0, 0, 0, - 1, 1, 0, 1, 0, 1, 0, 0, - 0, 29, 0, 0, 0, 3, 2, 3, - 2, 2, 2, 3, 2, 2, 3, 3, - 1, 2, 4, 2, 2, 4, 4, 2, - 0, 2, 0, 3, 1, 0, 1, 21, - 1, 0, 4, 0, 0, 0, 1, 2, - 0, 1, 1, 1, 4, 0, 3, 1, - 3, 2, 0, 3, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 4, 0, 0, 0, 3, 1, - 0, 4, 1, 4, 4, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 1, 0, 1, 1, 1, 1, 0, 2, - 1, 3, 2, 0, 1, 3, 1, 2, - 0, 1, 0, 2, 1, 2, 3, 4, - 0, 0, 0, 1, 0, 6, 2, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 1, 0, 1, 4, 0, 0, 0, 1, - 1, 1, 4, 0, 0, 0, 6, 0, - 0, 0, 1, 1, 2, 1, 1, 5, - 0, 24, 0, 24, 0, 0, 23, 0, - 0, 1, 0, 2, 0, 0, 0, 28, - 0, 3, 23, 2, 0, 2, 2, 3, - 2, 2, 2, 0, 54, 54, 27, 1, - 0, 5, 2, 0, 1, 1, 0, 0, - 14, 0, 3, 2, 2, 3, 2, 2, - 2, 54, 54, 27, 1, 0, 2, 0, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 7, 1, 0, 1, 0, - 2, 3, 2, 1, 0, 1, 1, 3, - 0, 1, 3, 0, 1, 1, 2, 1, - 1, 5, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 24, 0, 24, 0, - 0, 23, 0, 0, 1, 0, 2, 0, - 0, 0, 28, 0, 3, 23, 2, 0, - 2, 2, 3, 2, 2, 2, 0, 54, - 54, 27, 1, 1, 5, 2, 0, 0, - 0, 1, 1, 0, 1, 0, 1, 0, - 0, 0, 26, 0, 0, 0, 1, 1, - 1, 0, 0, 2, 1, 0, 1, 1, - 0, 2, 0, 0, 2, 0, 2, 1, - 0, 1, 0, 3, 0, 0, 1, 21, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 3, 0, 0, 0, - 0, 0, 0, 1, 0, 5, 2, 0, - 0, 1, 0, 2, 0, 0, 15, 0, - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0, 2, 1, 1, 0, 3, 1, - 0, 6, 5, 1, 1, 0, 1, 0, - 23, 0, 0, 0, 0, 1, 0, 0, - 1, 0, 1, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 4, - 0, 0, 0, 0, 1, 0, 6, 0, - 0, 0, 0, 0, 1, 3, 0, 0, - 0, 1, 4, 0, 0, 0, 6, 1, - 7, 3, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 29, - 0, 0, 0, 3, 2, 3, 2, 2, - 2, 3, 2, 2, 3, 3, 1, 2, - 4, 2, 2, 4, 4, 2, 0, 2, - 0, 3, 1, 0, 1, 21, 1, 0, - 4, 0, 0, 0, 1, 2, 0, 1, - 1, 1, 4, 0, 3, 1, 3, 2, - 0, 3, 0, 5, 2, 0, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 4, 0, 0, 0, 3, 1, 0, 4, - 1, 4, 4, 3, 1, 0, 7, 5, - 1, 1, 0, 1, 0, 23, 1, 0, - 1, 1, 1, 1, 0, 2, 1, 3, - 2, 0, 1, 3, 1, 2, 0, 1, - 0, 2, 1, 2, 3, 4, 0, 0, - 0, 1, 0, 6, 2, 0, 0, 0, - 0, 1, 3, 0, 0, 0, 1, 0, - 1, 4, 0, 0, 0, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 0, 1, - 0, 1, 0, 0, 0, 29, 0, 0, - 0, 3, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 26, 0, - 0, 0, 1, 1, 1, 0, 0, 2, - 1, 0, 1, 1, 0, 2, 0, 0, - 2, 0, 2, 1, 0, 1, 0, 3, - 0, 0, 1, 21, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 1, - 0, 5, 2, 6, 0, 1, 0, 1, - 0, 2, 0, 0, 15, 0, 0, 0, - 3, 0, 0, 0, 0, 0, 0, 0, - 2, 1, 1, 0, 3, 1, 0, 7, - 5, 1, 1, 0, 1, 0, 23, 0, - 0, 0, 0, 1, 0, 0, 1, 0, - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 4, 0, 0, - 0, 0, 1, 0, 6, 0, 0, 0, - 0, 0, 1, 3, 0, 0, 0, 3, - 0, 1, 1, 1, 4, 0, 0, 0, - 6, 2, 3, 2, 2, 2, 3, 2, - 2, 3, 3, 1, 2, 4, 2, 2, - 4, 4, 2, 0, 2, 0, 3, 1, - 0, 1, 21, 1, 0, 4, 0, 0, - 0, 1, 2, 0, 1, 1, 1, 4, - 0, 3, 1, 3, 2, 0, 3, 0, - 5, 2, 0, 0, 1, 0, 2, 0, - 0, 15, 0, 0, 0, 4, 0, 0, - 0, 3, 1, 0, 4, 1, 4, 4, - 3, 1, 0, 7, 5, 1, 1, 0, - 1, 0, 23, 1, 0, 1, 1, 1, - 1, 0, 2, 1, 3, 2, 0, 1, - 3, 1, 2, 0, 1, 0, 2, 1, - 2, 3, 4, 0, 0, 0, 1, 0, - 6, 2, 0, 0, 0, 0, 1, 3, - 0, 0, 0, 1, 0, 1, 4, 0, - 0, 0, 1, 0, 0, 14, 0, 3, - 2, 2, 3, 2, 2, 2, 54, 54, - 29, 1, 0, 0, 0, 0, 2, 1, - 1, 4, 2, 1, 0, 1, 0, 1, - 0, 11, 0, 0, 0, 0, 1, 1, - 0, 1, 0, 1, 0, 0, 0, 26, - 0, 0, 0, 1, 1, 1, 0, 0, - 2, 1, 0, 1, 1, 0, 2, 0, - 0, 2, 0, 2, 1, 0, 1, 0, - 3, 0, 0, 1, 21, 0, 0, 3, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 1, 0, 5, 2, 6, 0, 1, 0, - 1, 0, 2, 0, 0, 15, 0, 0, - 0, 3, 0, 0, 0, 0, 0, 0, - 0, 2, 1, 1, 0, 3, 1, 0, - 7, 5, 1, 1, 0, 1, 0, 23, - 0, 0, 0, 0, 1, 0, 0, 1, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 4, 0, - 0, 0, 0, 1, 0, 6, 0, 0, - 0, 0, 0, 1, 3, 0, 0, 0, - 3, 0, 0, 0, 0, 1, 1, 0, - 1, 0, 1, 0, 0, 0, 29, 0, - 0, 0, 3, 2, 3, 2, 2, 2, - 3, 2, 2, 3, 3, 1, 2, 4, - 2, 2, 4, 4, 2, 0, 2, 0, - 3, 1, 0, 1, 21, 1, 0, 4, - 0, 0, 0, 1, 2, 0, 1, 1, - 1, 4, 0, 3, 1, 3, 2, 0, - 3, 0, 5, 2, 0, 0, 1, 0, - 2, 0, 0, 15, 0, 0, 0, 4, - 0, 0, 0, 3, 1, 0, 4, 1, - 4, 4, 3, 1, 0, 7, 5, 1, - 1, 0, 1, 0, 23, 1, 0, 1, - 1, 1, 1, 0, 2, 1, 3, 2, - 0, 1, 3, 1, 2, 0, 1, 0, - 2, 1, 2, 3, 4, 0, 0, 0, - 1, 0, 6, 2, 0, 0, 0, 0, - 1, 3, 0, 0, 0, 1, 0, 1, - 4, 0, 0, 0, 1, 1, 1, 4, - 0, 0, 0, 6, 7, 1, 0, 1, - 0, 2, 3, 2, 1, 0, 1, 1, - 3, 0, 1, 5, 0, 0, 17, 20, - 20, 20, 14, 20, 20, 20, 23, 21, - 21, 21, 20, 23, 20, 20, 20, 21, - 21, 21, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, } var _graphclust_range_lengths []byte = []byte{ - 0, 0, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 5, 2, 6, 2, 8, 4, - 2, 5, 0, 3, 2, 4, 1, 6, - 2, 4, 4, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 4, 4, 1, 1, - 2, 2, 2, 2, 1, 1, 6, 2, - 5, 1, 3, 3, 4, 4, 4, 4, - 2, 0, 0, 1, 1, 0, 1, 0, - 1, 1, 0, 2, 1, 1, 2, 4, - 1, 2, 4, 1, 5, 0, 3, 2, - 1, 0, 0, 2, 0, 0, 0, 0, - 1, 4, 1, 0, 2, 1, 4, 2, - 0, 4, 3, 4, 2, 2, 6, 2, - 2, 4, 1, 4, 2, 4, 1, 3, - 3, 2, 2, 0, 1, 1, 1, 0, - 1, 0, 3, 3, 1, 2, 2, 2, - 0, 5, 1, 1, 0, 1, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 1, 0, 0, 1, 2, 2, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 0, - 1, 0, 1, 0, 1, 0, 1, 1, - 0, 2, 1, 1, 1, 2, 2, 1, - 1, 2, 2, 1, 1, 3, 2, 2, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 2, 2, 0, - 2, 2, 1, 1, 2, 6, 1, 1, - 1, 1, 2, 2, 1, 1, 1, 2, - 2, 0, 1, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 1, 1, 1, - 1, 2, 1, 1, 4, 1, 1, 1, - 1, 1, 4, 1, 2, 2, 5, 2, - 6, 2, 8, 4, 2, 5, 0, 3, - 2, 4, 1, 6, 2, 4, 4, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 4, 4, 1, 1, 2, 2, 2, 2, - 1, 1, 6, 2, 5, 1, 3, 3, - 4, 4, 4, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 2, 4, 1, 2, 4, 1, - 5, 0, 3, 2, 1, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 4, 2, 0, 4, 3, 4, - 2, 2, 6, 2, 2, 4, 1, 4, - 2, 4, 1, 3, 3, 2, 2, 0, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 0, 1, 1, 1, 0, 0, - 0, 0, 1, 1, 1, 0, 0, 1, - 2, 3, 1, 1, 1, 1, 1, 1, - 1, 0, 1, 0, 1, 1, 0, 1, - 1, 0, 1, 0, 1, 3, 1, 2, - 2, 1, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 1, 1, 2, 2, - 2, 1, 3, 2, 1, 1, 3, 1, - 3, 3, 1, 0, 0, 0, 0, 0, - 1, 1, 1, 2, 2, 4, 1, 1, - 2, 1, 1, 1, 3, 1, 2, 1, - 2, 1, 2, 0, 0, 1, 1, 5, - 9, 2, 1, 3, 5, 3, 1, 6, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 1, 0, 1, - 1, 0, 1, 1, 0, 1, 0, 1, - 3, 1, 2, 2, 1, 0, 0, 1, - 0, 0, 0, 0, 0, 1, 0, 1, - 1, 2, 2, 1, 1, 5, 1, 1, - 1, 1, 2, 1, 1, 4, 1, 1, - 1, 1, 2, 4, 1, 2, 1, 2, - 2, 5, 6, 2, 2, 5, 1, 3, - 2, 3, 5, 2, 3, 1, 3, 1, - 1, 2, 1, 2, 1, 4, 0, 0, - 2, 3, 1, 1, 2, 2, 1, 2, - 1, 1, 2, 1, 2, 1, 2, 2, - 2, 1, 1, 4, 2, 0, 0, 1, - 1, 0, 1, 0, 1, 1, 0, 2, - 1, 1, 1, 2, 2, 1, 1, 2, - 2, 1, 1, 3, 2, 2, 0, 0, - 2, 0, 0, 0, 0, 1, 4, 1, - 0, 2, 1, 2, 2, 0, 2, 2, - 1, 1, 2, 6, 1, 1, 1, 1, - 2, 2, 1, 1, 1, 2, 2, 0, - 1, 1, 1, 1, 0, 1, 0, 3, - 3, 1, 2, 2, 2, 0, 5, 1, - 1, 0, 1, 1, 1, 0, 0, 0, - 0, 0, 1, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 1, - 4, 1, 2, 2, 5, 2, 6, 2, - 8, 4, 2, 5, 0, 3, 2, 4, - 1, 6, 2, 4, 4, 1, 1, 2, - 1, 2, 1, 4, 0, 0, 4, 4, - 1, 1, 2, 2, 2, 2, 1, 1, - 6, 2, 5, 1, 3, 3, 4, 4, - 4, 4, 2, 0, 0, 1, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 2, 4, 1, 2, 4, 1, 5, 0, - 3, 2, 1, 0, 0, 2, 0, 0, - 0, 0, 1, 4, 1, 0, 2, 1, - 4, 2, 0, 4, 3, 4, 2, 2, - 6, 2, 2, 4, 1, 4, 2, 4, - 1, 3, 3, 2, 2, 0, 1, 1, - 1, 0, 1, 0, 3, 3, 1, 2, - 2, 2, 0, 5, 1, 1, 0, 1, - 0, 1, 1, 1, 0, 0, 0, 3, - 1, 1, 1, 1, 1, 2, 1, 1, - 4, 1, 1, 1, 1, 1, 4, 1, - 2, 2, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 2, 4, - 1, 2, 1, 2, 2, 5, 6, 2, - 2, 5, 1, 3, 2, 3, 5, 2, - 3, 1, 3, 1, 1, 2, 1, 2, - 1, 4, 0, 0, 2, 3, 1, 1, - 2, 2, 1, 2, 1, 1, 2, 1, - 2, 1, 2, 2, 2, 1, 1, 4, - 2, 0, 0, 0, 1, 0, 1, 0, - 1, 0, 1, 1, 0, 2, 1, 1, - 1, 2, 2, 1, 1, 2, 2, 1, - 1, 3, 2, 2, 0, 0, 2, 0, - 0, 0, 0, 1, 4, 1, 0, 2, - 1, 2, 2, 0, 2, 2, 1, 1, - 2, 6, 1, 1, 1, 1, 2, 2, - 1, 1, 1, 2, 2, 0, 1, 1, - 1, 1, 0, 1, 0, 3, 3, 1, - 2, 2, 2, 0, 5, 1, 1, 0, - 1, 0, 0, 0, 1, 1, 1, 0, - 0, 5, 2, 6, 2, 8, 4, 2, - 5, 0, 3, 2, 4, 1, 6, 2, - 4, 4, 1, 1, 2, 1, 2, 1, - 4, 0, 0, 4, 4, 1, 1, 2, - 2, 2, 2, 1, 1, 6, 2, 5, - 1, 3, 3, 4, 4, 4, 4, 2, - 0, 0, 1, 1, 0, 1, 0, 1, - 1, 0, 2, 1, 1, 2, 4, 1, - 2, 4, 1, 5, 0, 3, 2, 1, - 0, 0, 2, 0, 0, 0, 0, 1, - 4, 1, 0, 2, 1, 4, 2, 0, - 4, 3, 4, 2, 2, 6, 2, 2, - 4, 1, 4, 2, 4, 1, 3, 3, - 2, 2, 0, 1, 1, 1, 0, 1, - 0, 3, 3, 1, 2, 2, 2, 0, - 5, 1, 1, 0, 1, 0, 1, 1, - 1, 0, 1, 3, 1, 3, 3, 1, - 0, 0, 0, 0, 0, 1, 1, 1, - 3, 2, 4, 1, 0, 1, 1, 1, - 3, 1, 1, 1, 3, 1, 3, 1, - 3, 1, 2, 1, 1, 1, 1, 2, - 1, 1, 4, 1, 1, 1, 1, 2, - 4, 1, 2, 1, 2, 2, 5, 6, - 2, 2, 5, 1, 3, 2, 3, 5, - 2, 3, 1, 3, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 2, 3, 1, - 1, 2, 2, 1, 2, 1, 1, 2, - 1, 2, 1, 2, 2, 2, 1, 1, - 4, 2, 0, 0, 0, 1, 0, 1, - 0, 1, 0, 1, 1, 0, 2, 1, - 1, 1, 2, 2, 1, 1, 2, 2, - 1, 1, 3, 2, 2, 0, 0, 2, - 0, 0, 0, 0, 1, 4, 1, 0, - 2, 1, 2, 2, 0, 2, 2, 1, - 1, 2, 6, 1, 1, 1, 1, 2, - 2, 1, 1, 1, 2, 2, 0, 1, - 1, 1, 1, 0, 1, 0, 3, 3, - 1, 2, 2, 2, 0, 5, 1, 1, - 0, 1, 1, 1, 1, 1, 2, 1, - 1, 4, 1, 1, 1, 1, 1, 4, - 1, 2, 2, 5, 2, 6, 2, 8, - 4, 2, 5, 0, 3, 2, 4, 1, - 6, 2, 4, 4, 1, 1, 2, 1, - 2, 1, 4, 0, 0, 4, 4, 1, - 1, 2, 2, 2, 2, 1, 1, 6, - 2, 5, 1, 3, 3, 4, 4, 4, - 4, 2, 0, 0, 1, 1, 0, 1, - 0, 1, 1, 0, 2, 1, 1, 2, - 4, 1, 2, 4, 1, 5, 0, 3, - 2, 1, 0, 0, 2, 0, 0, 0, - 0, 1, 4, 1, 0, 2, 1, 4, - 2, 0, 4, 3, 4, 2, 2, 6, - 2, 2, 4, 1, 4, 2, 4, 1, - 3, 3, 2, 2, 0, 1, 1, 1, - 0, 1, 0, 3, 3, 1, 2, 2, - 2, 0, 5, 1, 1, 0, 1, 0, - 1, 1, 1, 0, 0, 0, 0, 1, - 1, 1, 0, 0, 0, 0, 1, 1, - 5, 9, 2, 1, 3, 5, 3, 1, - 6, 1, 1, 2, 2, 2, 6, 0, - 0, 0, 4, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, } var _graphclust_index_offsets []int16 = []int16{ - 0, 0, 2, 4, 6, 8, 11, 15, - 17, 20, 25, 28, 30, 32, 34, 63, - 68, 70, 73, 76, 80, 84, 90, 97, - 102, 106, 112, 115, 120, 123, 129, 135, - 138, 144, 146, 152, 155, 157, 161, 163, - 169, 171, 176, 178, 200, 203, 207, 212, - 214, 217, 220, 222, 225, 227, 230, 233, - 235, 241, 243, 246, 249, 252, 254, 256, - 262, 265, 271, 274, 281, 283, 285, 287, - 289, 291, 294, 296, 298, 314, 317, 319, - 321, 326, 329, 332, 334, 336, 339, 342, - 344, 348, 353, 357, 360, 364, 366, 369, - 377, 383, 385, 387, 389, 395, 397, 421, - 424, 426, 429, 432, 434, 437, 440, 443, - 445, 449, 457, 459, 461, 463, 465, 468, - 471, 473, 475, 477, 480, 483, 488, 490, - 492, 494, 496, 498, 500, 507, 511, 515, - 517, 520, 523, 527, 531, 537, 539, 541, - 545, 547, 549, 551, 553, 556, 560, 562, - 565, 570, 573, 575, 577, 579, 610, 615, - 617, 620, 626, 634, 640, 649, 654, 665, - 673, 678, 686, 690, 697, 701, 708, 714, - 723, 728, 737, 746, 750, 752, 757, 759, - 765, 768, 773, 775, 797, 803, 808, 814, - 816, 819, 822, 826, 831, 833, 836, 844, - 848, 858, 860, 867, 872, 880, 887, 892, - 900, 903, 909, 912, 914, 916, 918, 920, - 923, 925, 927, 943, 946, 948, 950, 957, - 962, 964, 967, 975, 978, 984, 989, 994, - 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, - 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, - 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, - 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, - 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, - 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, - 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, - 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, - 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, - 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, - 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, - 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, - 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, - 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, - 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, - 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, - 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, - 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, - 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, - 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, - 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, - 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, - 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, - 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, - 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, - 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, - 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, - 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, - 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, - 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, - 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, - 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, - 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, - 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, - 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, - 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, - 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, - 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, - 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, - 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, - 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, - 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, - 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, - 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, - 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, - 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, - 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, - 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, - 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, - 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, - 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, - 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, - 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, - 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, - 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, - 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, - 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, - 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, - 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, - 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, - 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, - 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, - 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, - 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, - 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, - 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, - 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, - 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, - 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, - 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, - 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, - 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, - 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, - 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, - 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, - 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, - 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, - 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, - 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, - 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, - 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, - 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, - 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, - 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, - 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, - 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, - 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, - 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, - 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, - 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, - 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, - 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, - 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, - 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, - 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, - 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, - 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, - 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, - 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, - 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, - 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, - 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, - 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, - 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, - 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, - 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, - 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, - 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, - 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, - 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, - 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, - 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, - 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, - 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, - 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, - 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, - 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, - 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, - 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, - 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, - 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, - 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, - 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, - 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, - 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, - 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, - 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, - 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, - 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, - 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, - 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, - 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, - 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, - 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, - 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, - 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, - 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, - 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, - 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, - 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, - 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, - 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, - 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, - 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, - 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, - 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, - 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, - 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, - 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, - 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, - 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, - 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, - 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, - 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, - 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, - 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, - 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, - 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, - 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, - 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, - 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, - 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, - 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, - 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, - 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, - 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, - 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, - 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, - 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, - 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, - 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, - 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, - 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, - 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, - 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, - 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, - 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, - 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, - 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, - 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, - 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, - 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, - 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, - 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, - 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, - 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, - 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, - 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, - 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, - 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, - 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, - 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, - 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, - 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, - 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, - 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, - 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, - 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, - 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, - 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, - 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, - 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, - 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, - 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, - 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, - 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, - 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, - 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, - 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, - 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, - 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, - 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, - 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, - 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, - 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, - 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, - 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, - 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, - 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, - 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, - 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, - 9718, 9739, 9760, 9781, + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, } var _graphclust_indicies []int16 = []int16{ - 0, 1, 3, 2, 2, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 2, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 2, 2, 3, 3, 2, - 3, 2, 4, 5, 6, 7, 8, 10, - 11, 12, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 9, 13, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 2, 2, 3, 2, 2, 2, 3, - 3, 3, 3, 2, 2, 2, 2, 2, - 2, 3, 2, 2, 2, 2, 2, 2, - 3, 2, 2, 2, 2, 3, 3, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 3, 3, 3, 2, - 3, 3, 2, 2, 2, 2, 2, 2, - 3, 3, 3, 3, 3, 3, 2, 3, - 3, 2, 2, 2, 2, 2, 2, 3, - 3, 2, 3, 3, 3, 3, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 2, - 3, 2, 32, 33, 34, 35, 36, 37, - 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 52, 2, - 3, 3, 2, 3, 3, 3, 2, 3, - 3, 3, 3, 2, 3, 2, 3, 3, - 2, 3, 3, 2, 3, 2, 2, 2, - 3, 3, 2, 3, 3, 2, 3, 3, - 2, 3, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 3, 2, 2, 2, - 3, 3, 3, 2, 3, 2, 3, 2, - 3, 3, 3, 3, 3, 2, 3, 3, - 2, 53, 54, 55, 56, 57, 2, 3, - 58, 2, 53, 54, 59, 55, 56, 57, - 2, 3, 2, 3, 2, 3, 2, 3, - 2, 3, 2, 60, 61, 2, 3, 2, - 3, 2, 62, 63, 64, 65, 66, 67, - 68, 69, 70, 71, 72, 73, 74, 75, - 76, 2, 3, 3, 2, 3, 2, 3, - 2, 3, 3, 3, 3, 2, 3, 3, - 2, 2, 2, 3, 3, 2, 3, 2, - 3, 3, 2, 2, 2, 3, 3, 2, - 3, 3, 3, 2, 3, 3, 3, 3, - 2, 3, 3, 3, 2, 3, 3, 2, - 77, 78, 63, 2, 3, 2, 3, 3, - 2, 79, 80, 81, 82, 83, 84, 85, - 2, 86, 87, 88, 89, 90, 2, 3, - 2, 3, 2, 3, 2, 3, 3, 3, - 3, 3, 2, 3, 2, 91, 92, 93, - 94, 95, 96, 97, 98, 99, 100, 101, - 102, 103, 104, 105, 106, 107, 104, 108, - 109, 110, 111, 112, 2, 3, 3, 2, - 2, 3, 2, 2, 3, 3, 3, 2, - 3, 2, 3, 3, 2, 2, 2, 3, - 3, 3, 2, 3, 2, 3, 3, 3, - 2, 3, 3, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 3, 2, 2, - 3, 3, 3, 2, 2, 2, 3, 2, - 3, 3, 2, 3, 2, 3, 3, 2, - 3, 3, 2, 113, 114, 115, 116, 2, - 3, 2, 3, 2, 3, 2, 3, 2, - 117, 2, 3, 2, 118, 119, 120, 121, - 122, 123, 2, 3, 3, 3, 2, 2, - 2, 2, 3, 3, 2, 3, 3, 2, - 2, 2, 3, 3, 3, 3, 2, 124, - 125, 126, 2, 3, 3, 3, 3, 3, - 2, 3, 2, 3, 2, 127, 128, 129, - 2, 130, 2, 2, 130, 2, 130, 130, - 2, 130, 130, 2, 130, 130, 130, 2, - 130, 2, 130, 130, 2, 130, 130, 130, - 130, 2, 130, 130, 2, 2, 130, 130, - 2, 130, 2, 131, 132, 133, 134, 135, - 136, 137, 139, 140, 141, 142, 143, 144, - 145, 146, 147, 148, 149, 150, 22, 151, - 152, 153, 154, 155, 156, 157, 158, 159, - 138, 2, 130, 130, 130, 130, 2, 130, - 2, 130, 130, 2, 3, 3, 2, 2, - 3, 130, 130, 2, 130, 130, 2, 130, - 2, 3, 130, 130, 130, 3, 3, 2, - 130, 130, 130, 2, 2, 2, 130, 2, - 3, 3, 130, 130, 3, 2, 130, 130, - 130, 2, 130, 2, 130, 2, 130, 2, - 3, 2, 2, 130, 130, 2, 130, 2, - 3, 130, 130, 3, 130, 2, 3, 130, - 130, 3, 3, 130, 130, 2, 130, 130, - 3, 2, 130, 130, 130, 3, 3, 3, - 2, 130, 3, 130, 2, 2, 2, 3, - 2, 2, 2, 130, 130, 130, 3, 130, - 3, 2, 130, 130, 3, 3, 3, 130, - 130, 130, 2, 130, 130, 3, 3, 2, - 2, 2, 130, 130, 130, 2, 130, 2, - 3, 130, 130, 130, 130, 3, 130, 3, - 3, 2, 130, 3, 130, 2, 130, 2, - 130, 3, 130, 130, 2, 130, 2, 130, - 130, 130, 130, 3, 2, 3, 130, 2, - 130, 130, 130, 130, 2, 130, 2, 160, - 161, 162, 163, 164, 165, 166, 167, 168, - 169, 170, 171, 172, 173, 174, 175, 176, - 177, 178, 179, 180, 2, 3, 130, 130, - 3, 130, 2, 3, 130, 130, 130, 2, - 130, 3, 130, 130, 130, 2, 130, 2, - 130, 130, 2, 130, 130, 2, 3, 130, - 3, 2, 130, 130, 130, 2, 3, 130, - 2, 130, 130, 2, 130, 130, 3, 130, - 3, 3, 130, 2, 130, 130, 3, 2, - 130, 130, 130, 130, 3, 130, 130, 3, - 130, 2, 130, 2, 3, 3, 3, 130, - 130, 3, 2, 130, 2, 130, 2, 3, - 3, 3, 3, 130, 130, 3, 130, 2, - 3, 130, 130, 3, 130, 3, 2, 3, - 130, 3, 130, 2, 3, 130, 130, 130, - 130, 3, 130, 2, 130, 130, 2, 181, - 182, 183, 184, 185, 2, 130, 58, 2, - 130, 2, 130, 2, 130, 2, 130, 2, - 186, 187, 2, 130, 2, 130, 2, 188, - 189, 190, 191, 66, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 2, 130, - 130, 2, 130, 2, 130, 2, 130, 130, - 130, 3, 3, 130, 2, 130, 2, 130, - 2, 3, 130, 2, 130, 3, 2, 3, - 130, 130, 130, 3, 130, 3, 2, 130, - 2, 3, 130, 3, 130, 3, 130, 2, - 130, 130, 3, 130, 2, 130, 130, 130, - 130, 2, 130, 3, 3, 130, 130, 3, - 2, 130, 130, 3, 130, 3, 2, 202, - 203, 189, 2, 130, 2, 130, 130, 2, - 204, 205, 206, 207, 208, 209, 210, 2, - 211, 212, 213, 214, 215, 2, 130, 2, - 130, 2, 130, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 216, 217, 218, 219, - 220, 221, 222, 223, 224, 225, 226, 227, - 228, 229, 230, 231, 232, 233, 234, 235, - 236, 237, 238, 2, 130, 3, 130, 2, - 2, 130, 3, 2, 3, 3, 2, 130, - 3, 130, 130, 2, 130, 2, 3, 130, - 3, 130, 3, 2, 2, 130, 2, 3, - 130, 130, 3, 130, 3, 130, 2, 130, - 3, 130, 2, 130, 130, 3, 130, 3, - 2, 130, 130, 3, 3, 3, 3, 130, - 130, 2, 3, 130, 2, 3, 3, 130, - 2, 130, 3, 130, 3, 130, 3, 130, - 2, 3, 2, 130, 130, 3, 3, 130, - 3, 130, 2, 2, 2, 130, 130, 3, - 130, 3, 130, 2, 2, 130, 3, 3, - 130, 3, 130, 2, 3, 130, 3, 130, - 2, 3, 3, 130, 130, 2, 3, 3, - 3, 130, 130, 2, 239, 240, 115, 241, - 2, 130, 2, 130, 2, 130, 2, 242, - 2, 130, 2, 243, 244, 245, 246, 247, - 248, 2, 3, 3, 130, 130, 130, 2, - 2, 2, 2, 130, 130, 2, 130, 130, - 2, 2, 2, 130, 130, 130, 130, 2, - 249, 250, 251, 2, 130, 130, 130, 130, - 130, 2, 130, 2, 130, 2, 252, 2, - 3, 2, 253, 2, 254, 255, 256, 258, - 257, 2, 130, 2, 2, 130, 130, 3, - 2, 3, 2, 259, 2, 260, 261, 262, - 264, 263, 2, 3, 2, 2, 3, 3, - 79, 80, 81, 82, 83, 84, 2, 3, - 1, 265, 265, 3, 1, 265, 266, 3, - 1, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 267, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 267, 267, 268, 268, 267, 268, - 267, 269, 270, 271, 272, 273, 275, 276, - 277, 279, 280, 281, 282, 283, 284, 285, - 286, 287, 288, 289, 290, 291, 292, 293, - 294, 295, 296, 274, 278, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 267, 267, 268, 267, 267, 267, 268, 268, - 268, 268, 267, 267, 267, 267, 267, 267, - 268, 267, 267, 267, 267, 267, 267, 268, - 267, 267, 267, 267, 268, 268, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 268, 268, 268, 267, 268, - 268, 267, 267, 267, 267, 267, 267, 268, - 268, 268, 268, 268, 268, 267, 268, 268, - 267, 267, 267, 267, 267, 267, 268, 268, - 267, 268, 268, 268, 268, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 267, 268, - 267, 297, 298, 299, 300, 301, 302, 303, - 304, 305, 306, 307, 308, 309, 310, 311, - 312, 313, 314, 315, 316, 317, 267, 268, - 268, 267, 268, 268, 268, 267, 268, 268, - 268, 268, 267, 268, 267, 268, 268, 267, - 268, 268, 267, 268, 267, 267, 267, 268, - 268, 267, 268, 268, 267, 268, 268, 267, - 268, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 268, 267, 267, 267, 268, - 268, 268, 267, 268, 267, 268, 267, 268, - 268, 268, 268, 268, 267, 268, 268, 267, - 318, 319, 320, 321, 322, 267, 268, 323, - 267, 318, 319, 324, 320, 321, 322, 267, - 268, 267, 268, 267, 268, 267, 268, 267, - 268, 267, 325, 326, 267, 268, 267, 268, - 267, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 267, 268, 268, 267, 268, 267, 268, 267, - 268, 268, 268, 268, 267, 268, 268, 267, - 267, 267, 268, 268, 267, 268, 267, 268, - 268, 267, 267, 267, 268, 268, 267, 268, - 268, 268, 267, 268, 268, 268, 268, 267, - 268, 268, 268, 267, 268, 268, 267, 342, - 343, 328, 267, 268, 267, 268, 268, 267, - 344, 345, 346, 347, 348, 349, 350, 267, - 351, 352, 353, 354, 355, 267, 268, 267, - 268, 267, 268, 267, 268, 268, 268, 268, - 268, 267, 268, 267, 356, 357, 358, 359, - 360, 361, 362, 363, 364, 365, 366, 367, - 368, 369, 370, 371, 372, 369, 373, 374, - 375, 376, 377, 267, 268, 268, 267, 267, - 268, 267, 267, 268, 268, 268, 267, 268, - 267, 268, 268, 267, 267, 267, 268, 268, - 268, 267, 268, 267, 268, 268, 268, 267, - 268, 268, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 268, 267, 267, 268, - 268, 268, 267, 267, 267, 268, 267, 268, - 268, 267, 268, 267, 268, 268, 267, 268, - 268, 267, 378, 379, 380, 381, 267, 268, - 267, 268, 267, 268, 267, 268, 267, 382, - 267, 268, 267, 383, 384, 385, 386, 387, - 388, 267, 268, 268, 268, 267, 267, 267, - 267, 268, 268, 267, 268, 268, 267, 267, - 267, 268, 268, 268, 268, 267, 389, 390, - 391, 267, 268, 268, 268, 268, 268, 267, - 268, 267, 268, 267, 392, 393, 394, 267, - 395, 267, 395, 267, 267, 395, 395, 267, - 395, 395, 267, 395, 395, 395, 267, 395, - 267, 395, 395, 267, 395, 395, 395, 395, - 267, 395, 395, 267, 267, 395, 395, 267, - 395, 267, 396, 397, 398, 399, 400, 401, - 402, 404, 405, 406, 407, 408, 409, 410, - 411, 412, 413, 414, 415, 287, 416, 417, - 418, 419, 420, 421, 422, 423, 424, 403, - 267, 395, 395, 395, 395, 267, 395, 267, - 395, 395, 267, 268, 268, 267, 267, 268, - 395, 395, 267, 395, 395, 267, 395, 267, - 268, 395, 395, 395, 268, 268, 267, 395, - 395, 395, 267, 267, 267, 395, 267, 268, - 268, 395, 395, 268, 267, 395, 395, 395, - 267, 395, 267, 395, 267, 395, 267, 268, - 267, 267, 395, 395, 267, 395, 267, 268, - 395, 395, 268, 395, 267, 268, 395, 395, - 268, 268, 395, 395, 267, 395, 395, 268, - 267, 395, 395, 395, 268, 268, 268, 267, - 395, 268, 395, 267, 267, 267, 268, 267, - 267, 267, 395, 395, 395, 268, 395, 268, - 267, 395, 395, 268, 268, 268, 395, 395, - 395, 267, 395, 395, 268, 268, 267, 267, - 267, 395, 395, 395, 267, 395, 267, 268, - 395, 395, 395, 395, 268, 395, 268, 268, - 267, 395, 268, 395, 267, 395, 267, 395, - 268, 395, 395, 267, 395, 267, 395, 395, - 395, 395, 268, 267, 268, 395, 267, 395, - 395, 395, 395, 267, 395, 267, 425, 426, - 427, 428, 429, 430, 431, 432, 433, 434, - 435, 436, 437, 438, 439, 440, 441, 442, - 443, 444, 445, 267, 268, 395, 395, 268, - 395, 267, 268, 395, 395, 395, 267, 395, - 268, 395, 395, 395, 267, 395, 267, 395, - 395, 267, 395, 395, 267, 268, 395, 268, - 267, 395, 395, 395, 267, 268, 395, 267, - 395, 395, 267, 395, 395, 268, 395, 268, - 268, 395, 267, 395, 395, 268, 267, 395, - 395, 395, 395, 268, 395, 395, 268, 395, - 267, 395, 267, 268, 268, 268, 395, 395, - 268, 267, 395, 267, 395, 267, 268, 268, - 268, 268, 395, 395, 268, 395, 267, 268, - 395, 395, 268, 395, 268, 267, 268, 395, - 268, 395, 267, 268, 395, 395, 395, 395, - 268, 395, 267, 395, 395, 267, 446, 447, - 448, 449, 450, 267, 395, 323, 267, 395, - 267, 395, 267, 395, 267, 395, 267, 451, - 452, 267, 395, 267, 395, 267, 453, 454, - 455, 456, 331, 457, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 267, 395, 395, - 267, 395, 267, 395, 267, 395, 395, 395, - 268, 268, 395, 267, 395, 267, 395, 267, - 268, 395, 267, 395, 268, 267, 268, 395, - 395, 395, 268, 395, 268, 267, 395, 267, - 268, 395, 268, 395, 268, 395, 267, 395, - 395, 268, 395, 267, 395, 395, 395, 395, - 267, 395, 268, 268, 395, 395, 268, 267, - 395, 395, 268, 395, 268, 267, 467, 468, - 454, 267, 395, 267, 395, 395, 267, 469, - 470, 471, 472, 473, 474, 475, 267, 476, - 477, 478, 479, 480, 267, 395, 267, 395, - 267, 395, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 481, 482, 483, 484, 485, - 486, 487, 488, 489, 490, 491, 492, 493, - 494, 495, 496, 497, 498, 499, 500, 501, - 502, 503, 267, 395, 268, 395, 267, 267, - 395, 268, 267, 268, 268, 267, 395, 268, - 395, 395, 267, 395, 267, 268, 395, 268, - 395, 268, 267, 267, 395, 267, 268, 395, - 395, 268, 395, 268, 395, 267, 395, 268, - 395, 267, 395, 395, 268, 395, 268, 267, - 395, 395, 268, 268, 268, 268, 395, 395, - 267, 268, 395, 267, 268, 268, 395, 267, - 395, 268, 395, 268, 395, 268, 395, 267, - 268, 267, 395, 395, 268, 268, 395, 268, - 395, 267, 267, 267, 395, 395, 268, 395, - 268, 395, 267, 267, 395, 268, 268, 395, - 268, 395, 267, 268, 395, 268, 395, 267, - 268, 268, 395, 395, 267, 268, 268, 268, - 395, 395, 267, 504, 505, 380, 506, 267, - 395, 267, 395, 267, 395, 267, 507, 267, - 395, 267, 508, 509, 510, 511, 512, 513, - 267, 268, 268, 395, 395, 395, 267, 267, - 267, 267, 395, 395, 267, 395, 395, 267, - 267, 267, 395, 395, 395, 395, 267, 514, - 515, 516, 267, 395, 395, 395, 395, 395, - 267, 395, 267, 395, 267, 517, 267, 268, - 267, 518, 267, 519, 520, 521, 523, 522, - 267, 395, 267, 267, 395, 395, 268, 267, - 268, 267, 524, 267, 525, 526, 527, 529, - 528, 267, 268, 267, 267, 268, 268, 344, - 345, 346, 347, 348, 349, 267, 268, 267, - 268, 268, 267, 266, 268, 268, 267, 266, - 268, 267, 266, 268, 267, 531, 532, 530, - 267, 266, 268, 267, 266, 268, 267, 533, - 534, 535, 536, 537, 530, 267, 538, 267, - 297, 298, 299, 533, 534, 539, 300, 301, - 302, 303, 304, 305, 306, 307, 308, 309, - 310, 311, 312, 313, 314, 315, 316, 317, - 267, 540, 538, 297, 298, 299, 541, 535, - 536, 300, 301, 302, 303, 304, 305, 306, - 307, 308, 309, 310, 311, 312, 313, 314, - 315, 316, 317, 267, 540, 267, 542, 540, - 297, 298, 299, 543, 536, 300, 301, 302, - 303, 304, 305, 306, 307, 308, 309, 310, - 311, 312, 313, 314, 315, 316, 317, 267, - 542, 267, 267, 542, 544, 267, 542, 267, - 545, 546, 267, 540, 267, 267, 542, 267, - 540, 267, 540, 327, 328, 329, 330, 331, - 332, 333, 547, 335, 336, 337, 338, 339, - 340, 341, 549, 550, 551, 552, 553, 554, - 549, 550, 551, 552, 553, 554, 549, 548, - 555, 267, 268, 538, 267, 556, 556, 556, - 542, 267, 297, 298, 299, 541, 539, 300, - 301, 302, 303, 304, 305, 306, 307, 308, - 309, 310, 311, 312, 313, 314, 315, 316, - 317, 267, 545, 557, 267, 267, 540, 556, - 556, 542, 556, 556, 542, 556, 556, 556, - 542, 556, 556, 542, 556, 556, 542, 556, - 556, 267, 542, 542, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 551, 552, 553, 554, 548, 549, - 551, 552, 553, 554, 548, 549, 551, 552, - 553, 554, 548, 549, 551, 552, 553, 554, - 548, 549, 550, 555, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 552, 553, 554, 548, 549, 550, - 552, 553, 554, 548, 549, 550, 552, 553, - 554, 548, 549, 550, 552, 553, 554, 548, - 549, 550, 551, 555, 553, 554, 548, 549, - 550, 551, 553, 554, 548, 549, 550, 551, - 553, 554, 548, 549, 550, 551, 553, 554, - 548, 549, 550, 551, 553, 558, 557, 552, - 267, 555, 556, 267, 540, 542, 268, 268, - 267, 559, 560, 561, 562, 563, 530, 267, - 268, 323, 268, 268, 268, 267, 268, 268, - 267, 395, 268, 267, 395, 268, 267, 268, - 395, 268, 267, 530, 267, 564, 566, 567, - 568, 569, 570, 571, 566, 567, 568, 569, - 570, 571, 566, 530, 565, 555, 267, 268, - 538, 268, 267, 540, 540, 540, 542, 267, - 540, 540, 542, 540, 540, 542, 540, 540, - 540, 542, 540, 540, 542, 540, 540, 542, - 540, 540, 267, 542, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 568, 569, 570, 571, 565, 566, - 568, 569, 570, 571, 565, 566, 568, 569, - 570, 571, 565, 566, 568, 569, 570, 571, - 565, 566, 567, 555, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 569, 570, 571, 565, 566, 567, - 569, 570, 571, 565, 566, 567, 569, 570, - 571, 565, 566, 567, 569, 570, 571, 565, - 566, 567, 568, 555, 570, 571, 565, 566, - 567, 568, 570, 571, 565, 566, 567, 568, - 570, 571, 565, 566, 567, 568, 570, 571, - 565, 566, 567, 568, 570, 572, 573, 569, - 267, 555, 540, 268, 540, 542, 268, 542, - 268, 267, 540, 574, 575, 530, 267, 268, - 267, 268, 268, 268, 267, 577, 578, 579, - 580, 576, 267, 581, 582, 530, 267, 266, - 268, 267, 268, 266, 268, 267, 583, 530, - 267, 268, 268, 267, 584, 530, 267, 268, - 268, 267, 585, 586, 587, 588, 589, 590, - 591, 592, 593, 594, 595, 530, 267, 268, - 596, 267, 344, 345, 346, 347, 348, 349, - 597, 267, 598, 267, 268, 267, 395, 268, - 267, 268, 395, 268, 395, 268, 267, 395, - 395, 268, 395, 268, 395, 268, 395, 268, - 395, 268, 267, 268, 268, 395, 395, 268, - 267, 395, 395, 268, 267, 395, 268, 395, - 268, 267, 268, 395, 268, 395, 268, 267, - 395, 268, 395, 268, 267, 395, 268, 267, - 395, 395, 268, 268, 395, 268, 395, 268, - 395, 267, 576, 267, 599, 576, 267, 322, - 530, 600, 530, 267, 268, 267, 266, 3, - 1, 266, 3, 1, 602, 603, 601, 1, - 266, 3, 1, 266, 3, 1, 604, 605, - 606, 607, 608, 601, 1, 609, 610, 612, - 611, 611, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 611, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 611, 611, 612, 612, 611, 612, 611, 613, - 614, 615, 616, 617, 619, 620, 621, 623, - 624, 625, 626, 627, 628, 629, 630, 631, - 632, 633, 634, 635, 636, 637, 638, 639, - 640, 618, 622, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 611, 611, - 612, 611, 611, 611, 612, 612, 612, 612, - 611, 611, 611, 611, 611, 611, 612, 611, - 611, 611, 611, 611, 611, 612, 611, 611, - 611, 611, 612, 612, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 612, 612, 612, 611, 612, 612, 611, - 611, 611, 611, 611, 611, 612, 612, 612, - 612, 612, 612, 611, 612, 612, 611, 611, - 611, 611, 611, 611, 612, 612, 611, 612, - 612, 612, 612, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 611, 612, 611, 641, - 642, 643, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 612, 612, 611, - 612, 612, 612, 611, 612, 612, 612, 612, - 611, 612, 611, 612, 612, 611, 612, 612, - 611, 612, 611, 611, 611, 612, 612, 611, - 612, 612, 611, 612, 612, 611, 612, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 612, 611, 611, 611, 612, 612, 612, - 611, 612, 611, 612, 611, 612, 612, 612, - 612, 612, 611, 612, 612, 611, 662, 663, - 664, 665, 666, 611, 612, 667, 611, 662, - 663, 668, 664, 665, 666, 611, 612, 611, - 612, 611, 612, 611, 612, 611, 612, 611, - 669, 670, 611, 612, 611, 612, 611, 671, - 672, 673, 674, 675, 676, 677, 678, 679, - 680, 681, 682, 683, 684, 685, 611, 612, - 612, 611, 612, 611, 612, 611, 612, 612, - 612, 612, 611, 612, 612, 611, 611, 611, - 612, 612, 611, 612, 611, 612, 612, 611, - 611, 611, 612, 612, 611, 612, 612, 612, - 611, 612, 612, 612, 612, 611, 612, 612, - 612, 611, 612, 612, 611, 686, 687, 672, - 611, 612, 611, 612, 612, 611, 688, 689, - 690, 691, 692, 693, 694, 611, 695, 696, - 697, 698, 699, 611, 612, 611, 612, 611, - 612, 611, 612, 612, 612, 612, 612, 611, - 612, 611, 700, 701, 702, 703, 704, 705, - 706, 707, 708, 709, 710, 711, 712, 713, - 714, 715, 716, 713, 717, 718, 719, 720, - 721, 611, 612, 612, 611, 611, 612, 611, - 611, 612, 612, 612, 611, 612, 611, 612, - 612, 611, 611, 611, 612, 612, 612, 611, - 612, 611, 612, 612, 612, 611, 612, 612, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 612, 611, 611, 612, 612, 612, - 611, 611, 611, 612, 611, 612, 612, 611, - 612, 611, 612, 612, 611, 612, 612, 611, - 722, 723, 724, 725, 611, 612, 611, 612, - 611, 612, 611, 612, 611, 726, 611, 612, - 611, 727, 728, 729, 730, 731, 732, 611, - 612, 612, 612, 611, 611, 611, 611, 612, - 612, 611, 612, 612, 611, 611, 611, 612, - 612, 612, 612, 611, 733, 734, 735, 611, - 612, 612, 612, 612, 612, 611, 612, 611, - 612, 611, 736, 737, 738, 611, 739, 611, - 739, 611, 611, 739, 739, 611, 739, 739, - 611, 739, 739, 739, 611, 739, 611, 739, - 739, 611, 739, 739, 739, 739, 611, 739, - 739, 611, 611, 739, 739, 611, 739, 611, - 740, 741, 742, 743, 744, 745, 746, 748, - 749, 750, 751, 752, 753, 754, 755, 756, - 757, 758, 759, 631, 760, 761, 762, 763, - 764, 765, 766, 767, 768, 747, 611, 739, - 739, 739, 739, 611, 739, 611, 739, 739, - 611, 612, 612, 611, 611, 612, 739, 739, - 611, 739, 739, 611, 739, 611, 612, 739, - 739, 739, 612, 612, 611, 739, 739, 739, - 611, 611, 611, 739, 611, 612, 612, 739, - 739, 612, 611, 739, 739, 739, 611, 739, - 611, 739, 611, 739, 611, 612, 611, 611, - 739, 739, 611, 739, 611, 612, 739, 739, - 612, 739, 611, 612, 739, 739, 612, 612, - 739, 739, 611, 739, 739, 612, 611, 739, - 739, 739, 612, 612, 612, 611, 739, 612, - 739, 611, 611, 611, 612, 611, 611, 611, - 739, 739, 739, 612, 739, 612, 611, 739, - 739, 612, 612, 612, 739, 739, 739, 611, - 739, 739, 612, 612, 611, 611, 611, 739, - 739, 739, 611, 739, 611, 612, 739, 739, - 739, 739, 612, 739, 612, 612, 611, 739, - 612, 739, 611, 739, 611, 739, 612, 739, - 739, 611, 739, 611, 739, 739, 739, 739, - 612, 611, 612, 739, 611, 739, 739, 739, - 739, 611, 739, 611, 769, 770, 771, 772, - 773, 774, 775, 776, 777, 778, 779, 780, - 781, 782, 783, 784, 785, 786, 787, 788, - 789, 611, 612, 739, 739, 612, 739, 611, - 612, 739, 739, 739, 611, 739, 612, 739, - 739, 739, 611, 739, 611, 739, 739, 611, - 739, 739, 611, 612, 739, 612, 611, 739, - 739, 739, 611, 612, 739, 611, 739, 739, - 611, 739, 739, 612, 739, 612, 612, 739, - 611, 739, 739, 612, 611, 739, 739, 739, - 739, 612, 739, 739, 612, 739, 611, 739, - 611, 612, 612, 612, 739, 739, 612, 611, - 739, 611, 739, 611, 612, 612, 612, 612, - 739, 739, 612, 739, 611, 612, 739, 739, - 612, 739, 612, 611, 612, 739, 612, 739, - 611, 612, 739, 739, 739, 739, 612, 739, - 611, 739, 739, 611, 790, 791, 792, 793, - 794, 611, 739, 667, 611, 739, 611, 739, - 611, 739, 611, 739, 611, 795, 796, 611, - 739, 611, 739, 611, 797, 798, 799, 800, - 675, 801, 802, 803, 804, 805, 806, 807, - 808, 809, 810, 611, 739, 739, 611, 739, - 611, 739, 611, 739, 739, 739, 612, 612, - 739, 611, 739, 611, 739, 611, 612, 739, - 611, 739, 612, 611, 612, 739, 739, 739, - 612, 739, 612, 611, 739, 611, 612, 739, - 612, 739, 612, 739, 611, 739, 739, 612, - 739, 611, 739, 739, 739, 739, 611, 739, - 612, 612, 739, 739, 612, 611, 739, 739, - 612, 739, 612, 611, 811, 812, 798, 611, - 739, 611, 739, 739, 611, 813, 814, 815, - 816, 817, 818, 819, 611, 820, 821, 822, - 823, 824, 611, 739, 611, 739, 611, 739, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 825, 826, 827, 828, 829, 830, 831, - 832, 833, 834, 835, 836, 837, 838, 839, - 840, 841, 842, 843, 844, 845, 846, 847, - 611, 739, 612, 739, 611, 611, 739, 612, - 611, 612, 612, 611, 739, 612, 739, 739, - 611, 739, 611, 612, 739, 612, 739, 612, - 611, 611, 739, 611, 612, 739, 739, 612, - 739, 612, 739, 611, 739, 612, 739, 611, - 739, 739, 612, 739, 612, 611, 739, 739, - 612, 612, 612, 612, 739, 739, 611, 612, - 739, 611, 612, 612, 739, 611, 739, 612, - 739, 612, 739, 612, 739, 611, 612, 611, - 739, 739, 612, 612, 739, 612, 739, 611, - 611, 611, 739, 739, 612, 739, 612, 739, - 611, 611, 739, 612, 612, 739, 612, 739, - 611, 612, 739, 612, 739, 611, 612, 612, - 739, 739, 611, 612, 612, 612, 739, 739, - 611, 848, 849, 724, 850, 611, 739, 611, - 739, 611, 739, 611, 851, 611, 739, 611, - 852, 853, 854, 855, 856, 857, 611, 612, - 612, 739, 739, 739, 611, 611, 611, 611, - 739, 739, 611, 739, 739, 611, 611, 611, - 739, 739, 739, 739, 611, 858, 859, 860, - 611, 739, 739, 739, 739, 739, 611, 739, - 611, 739, 611, 861, 611, 612, 611, 862, - 611, 863, 864, 865, 867, 866, 611, 739, - 611, 611, 739, 739, 612, 611, 612, 611, - 868, 611, 869, 870, 871, 873, 872, 611, - 612, 611, 611, 612, 612, 688, 689, 690, - 691, 692, 693, 611, 641, 642, 643, 604, - 605, 874, 644, 645, 646, 647, 648, 649, - 650, 651, 652, 653, 654, 655, 656, 657, - 658, 659, 660, 661, 611, 875, 610, 641, - 642, 643, 876, 606, 607, 644, 645, 646, - 647, 648, 649, 650, 651, 652, 653, 654, - 655, 656, 657, 658, 659, 660, 661, 611, - 875, 611, 877, 875, 641, 642, 643, 878, - 607, 644, 645, 646, 647, 648, 649, 650, - 651, 652, 653, 654, 655, 656, 657, 658, - 659, 660, 661, 611, 877, 611, 609, 877, - 879, 611, 877, 611, 880, 881, 611, 875, - 611, 611, 877, 611, 875, 611, 875, 671, - 672, 673, 674, 675, 676, 677, 882, 679, - 680, 681, 682, 683, 684, 685, 884, 885, - 886, 887, 888, 889, 884, 885, 886, 887, - 888, 889, 884, 883, 890, 611, 612, 610, - 611, 891, 891, 891, 877, 611, 641, 642, - 643, 876, 874, 644, 645, 646, 647, 648, - 649, 650, 651, 652, 653, 654, 655, 656, - 657, 658, 659, 660, 661, 611, 880, 892, - 611, 611, 875, 891, 891, 877, 891, 891, - 877, 891, 891, 891, 877, 891, 891, 877, - 891, 891, 877, 891, 891, 611, 877, 877, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 886, 887, - 888, 889, 883, 884, 886, 887, 888, 889, - 883, 884, 886, 887, 888, 889, 883, 884, - 886, 887, 888, 889, 883, 884, 885, 890, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 887, 888, - 889, 883, 884, 885, 887, 888, 889, 883, - 884, 885, 887, 888, 889, 883, 884, 885, - 887, 888, 889, 883, 884, 885, 886, 890, - 888, 889, 883, 884, 885, 886, 888, 889, - 883, 884, 885, 886, 888, 889, 883, 884, - 885, 886, 888, 889, 883, 884, 885, 886, - 888, 893, 892, 887, 611, 890, 891, 611, - 875, 877, 265, 3, 1, 894, 895, 896, - 897, 898, 601, 1, 265, 899, 3, 265, - 3, 265, 3, 1, 901, 900, 900, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 900, 901, 901, 900, 901, 901, - 901, 901, 900, 901, 901, 900, 900, 901, - 901, 900, 901, 900, 902, 903, 904, 905, - 906, 908, 909, 910, 912, 913, 914, 915, - 916, 917, 918, 919, 920, 921, 922, 923, - 924, 925, 926, 927, 928, 929, 907, 911, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 900, 900, 901, 900, 900, - 900, 901, 901, 901, 901, 900, 900, 900, - 900, 900, 900, 901, 900, 900, 900, 900, - 900, 900, 901, 900, 900, 900, 900, 901, - 901, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 900, 900, 900, 900, - 900, 900, 901, 901, 901, 901, 901, 901, - 900, 901, 901, 900, 900, 900, 900, 900, - 900, 901, 901, 900, 901, 901, 901, 901, - 901, 900, 901, 901, 900, 901, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 900, 901, 900, 930, 931, 932, 933, - 934, 935, 936, 937, 938, 939, 940, 941, - 942, 943, 944, 945, 946, 947, 948, 949, - 950, 900, 901, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 900, 901, 900, - 901, 901, 900, 901, 901, 900, 901, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 901, 900, - 901, 901, 900, 951, 952, 953, 954, 955, - 900, 901, 899, 900, 901, 900, 901, 900, - 901, 900, 901, 900, 956, 957, 900, 901, - 900, 901, 900, 958, 959, 960, 961, 962, - 963, 964, 965, 966, 967, 968, 969, 970, - 971, 972, 900, 901, 901, 900, 901, 900, - 901, 900, 901, 901, 901, 901, 900, 901, - 901, 900, 900, 900, 901, 901, 900, 901, - 900, 901, 901, 900, 900, 900, 901, 901, - 900, 901, 901, 901, 900, 901, 901, 901, - 901, 900, 901, 901, 901, 900, 901, 901, - 900, 973, 974, 959, 900, 901, 900, 901, - 901, 900, 975, 976, 977, 978, 979, 980, - 900, 981, 982, 983, 984, 985, 900, 901, - 900, 901, 900, 901, 900, 901, 901, 901, - 901, 901, 900, 901, 900, 986, 987, 988, - 989, 990, 991, 992, 993, 994, 995, 996, - 997, 998, 999, 1000, 1001, 1002, 999, 1003, - 1004, 1005, 1006, 1007, 900, 901, 901, 900, - 900, 901, 900, 900, 901, 901, 901, 900, - 901, 900, 901, 901, 900, 900, 900, 901, - 901, 901, 900, 901, 900, 901, 901, 901, - 900, 901, 901, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 901, 900, 900, - 901, 901, 901, 900, 900, 900, 901, 900, - 901, 901, 900, 901, 900, 901, 901, 900, - 901, 901, 900, 1008, 1009, 1010, 1011, 900, - 901, 900, 901, 900, 901, 900, 901, 900, - 1012, 900, 901, 900, 1013, 1014, 1015, 1016, - 1017, 1018, 900, 901, 901, 901, 900, 900, - 900, 900, 901, 901, 900, 901, 901, 900, - 900, 900, 901, 901, 901, 901, 900, 1019, - 1020, 1021, 900, 901, 901, 901, 901, 901, - 900, 901, 900, 901, 900, 1022, 900, 1023, - 1024, 1025, 1027, 1026, 900, 901, 900, 900, - 901, 901, 951, 952, 1028, 953, 954, 955, - 900, 901, 900, 975, 976, 977, 978, 979, - 980, 1029, 900, 1030, 1031, 1032, 900, 1033, - 900, 1033, 900, 900, 1033, 1033, 900, 1033, - 1033, 900, 1033, 1033, 1033, 900, 1033, 900, - 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 1033, 900, 900, 1033, 1033, 900, 1033, - 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, - 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, - 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, - 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, - 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, - 1033, 900, 901, 901, 900, 900, 901, 1033, - 1033, 900, 1033, 1033, 900, 1033, 900, 901, - 1033, 1033, 1033, 901, 901, 900, 1033, 1033, - 1033, 900, 900, 900, 1033, 900, 901, 901, - 1033, 1033, 901, 900, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 901, 900, - 900, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 901, 1033, 900, 901, 1033, 1033, 901, - 901, 1033, 1033, 900, 1033, 1033, 901, 900, - 1033, 1033, 1033, 901, 901, 901, 900, 1033, - 901, 1033, 900, 900, 900, 901, 900, 900, - 900, 1033, 1033, 1033, 901, 1033, 901, 900, - 1033, 1033, 901, 901, 901, 1033, 1033, 1033, - 900, 1033, 1033, 901, 901, 900, 900, 900, - 1033, 1033, 1033, 900, 1033, 900, 901, 1033, - 1033, 1033, 1033, 901, 1033, 901, 901, 900, - 1033, 901, 1033, 900, 1033, 900, 1033, 901, - 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, - 1033, 901, 900, 901, 1033, 900, 1033, 1033, - 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, - 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, - 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, - 1082, 1083, 900, 901, 1033, 1033, 901, 1033, - 900, 901, 1033, 1033, 1033, 900, 1033, 901, - 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, - 900, 1033, 1033, 900, 901, 1033, 901, 900, - 1033, 1033, 1033, 900, 901, 1033, 900, 1033, - 1033, 900, 1033, 1033, 901, 1033, 901, 901, - 1033, 900, 1033, 1033, 901, 900, 1033, 1033, - 1033, 1033, 901, 1033, 1033, 901, 1033, 900, - 1033, 900, 901, 901, 901, 1033, 1033, 901, - 900, 1033, 900, 1033, 900, 901, 901, 901, - 901, 1033, 1033, 901, 1033, 900, 901, 1033, - 1033, 901, 1033, 901, 900, 901, 1033, 901, - 1033, 900, 901, 1033, 1033, 1033, 1033, 901, - 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, - 1087, 1088, 900, 1033, 899, 900, 1033, 900, - 1033, 900, 1033, 900, 1033, 900, 1089, 1090, - 900, 1033, 900, 1033, 900, 1091, 1092, 1093, - 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, - 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, - 1033, 900, 1033, 900, 1033, 1033, 1033, 901, - 901, 1033, 900, 1033, 900, 1033, 900, 901, - 1033, 900, 1033, 901, 900, 901, 1033, 1033, - 1033, 901, 1033, 901, 900, 1033, 900, 901, - 1033, 901, 1033, 901, 1033, 900, 1033, 1033, - 901, 1033, 900, 1033, 1033, 1033, 1033, 900, - 1033, 901, 901, 1033, 1033, 901, 900, 1033, - 1033, 901, 1033, 901, 900, 1105, 1106, 1092, - 900, 1033, 900, 1033, 1033, 900, 1107, 1108, - 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, - 1116, 1117, 1118, 900, 1033, 900, 1033, 900, - 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, - 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, - 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, - 1141, 900, 1033, 901, 1033, 900, 900, 1033, - 901, 900, 901, 901, 900, 1033, 901, 1033, - 1033, 900, 1033, 900, 901, 1033, 901, 1033, - 901, 900, 900, 1033, 900, 901, 1033, 1033, - 901, 1033, 901, 1033, 900, 1033, 901, 1033, - 900, 1033, 1033, 901, 1033, 901, 900, 1033, - 1033, 901, 901, 901, 901, 1033, 1033, 900, - 901, 1033, 900, 901, 901, 1033, 900, 1033, - 901, 1033, 901, 1033, 901, 1033, 900, 901, - 900, 1033, 1033, 901, 901, 1033, 901, 1033, - 900, 900, 900, 1033, 1033, 901, 1033, 901, - 1033, 900, 900, 1033, 901, 901, 1033, 901, - 1033, 900, 901, 1033, 901, 1033, 900, 901, - 901, 1033, 1033, 900, 901, 901, 901, 1033, - 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, - 900, 1033, 900, 1033, 900, 1145, 900, 1033, - 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, - 901, 901, 1033, 1033, 1033, 900, 900, 900, - 900, 1033, 1033, 900, 1033, 1033, 900, 900, - 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, - 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, - 1033, 900, 1033, 900, 1155, 900, 901, 900, - 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, - 1033, 900, 900, 1033, 1033, 901, 900, 901, - 900, 3, 265, 3, 1, 1162, 3, 1, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, - 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, - 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, - 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, - 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, - 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, - 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, - 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, - 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, - 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, - 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, - 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, - 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, - 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, - 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, - 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, - 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, - 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, - 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, - 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, - 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, - 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, - 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, - 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, - 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, - 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, - 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, - 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, - 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, - 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, - 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, - 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, - 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, - 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, - 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, - 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, - 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, - 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, - 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, - 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, - 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, - 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, - 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, - 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, - 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, - 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, - 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, - 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, - 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, - 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, - 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, - 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, - 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, - 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, - 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, - 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, - 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, - 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, - 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, - 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, - 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, - 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, - 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, - 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, - 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, - 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, - 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, - 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, - 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, - 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, - 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, - 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, - 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, - 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, - 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, - 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, - 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, - 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, - 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, - 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, - 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, - 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, - 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, - 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, - 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, - 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, - 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, - 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, - 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, - 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, - 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, - 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, - 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, - 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, - 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, - 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, - 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, - 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, - 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, - 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, - 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, - 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, - 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, - 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, - 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, - 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, - 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, - 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, - 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, - 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, - 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, - 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, - 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, - 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, - 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, - 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, - 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, - 3, 1162, 3, 1, 601, 1, 1425, 1427, - 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, - 1430, 1431, 1432, 1427, 601, 1426, 890, 1, - 3, 610, 3, 1, 875, 875, 875, 877, - 1, 875, 875, 877, 875, 875, 877, 875, - 875, 875, 877, 875, 875, 877, 875, 875, - 877, 875, 875, 1, 877, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, - 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, - 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, - 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, - 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, - 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, - 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, - 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, - 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, - 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, - 1435, 1437, 1430, 1436, 1, 890, 875, 3, - 875, 877, 3, 877, 3, 1, 875, 1, - 265, 265, 1, 265, 1438, 1439, 601, 1, - 265, 3, 1, 3, 3, 265, 3, 1, - 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, - 601, 1, 266, 3, 1, 3, 266, 3, - 1, 1447, 601, 1, 3, 265, 3, 1, - 1448, 601, 1, 3, 265, 3, 1, 1449, - 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, - 1458, 1459, 601, 1, 3, 1460, 1, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, - 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, - 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, - 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, - 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, - 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, - 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, - 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, - 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, - 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, - 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, - 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, - 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, - 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, - 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, - 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, - 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, - 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, - 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, - 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, - 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, - 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, - 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, - 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, - 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, - 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, - 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, - 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, - 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, - 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, - 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, - 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, - 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, - 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, - 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, - 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, - 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, - 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, - 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, - 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, - 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, - 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, - 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, - 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, - 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, - 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, - 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, - 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, - 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, - 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, - 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, - 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, - 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, - 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, - 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, - 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, - 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, - 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, - 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, - 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, - 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, - 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, - 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, - 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, - 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, - 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, - 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, - 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, - 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, - 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, - 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, - 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, - 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, - 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, - 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, - 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, - 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, - 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, - 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, - 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, - 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, - 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, - 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, - 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, - 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, - 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, - 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, - 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, - 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, - 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, - 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, - 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, - 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, - 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, - 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, - 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, - 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, - 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, - 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, - 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, - 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, - 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, - 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, - 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, - 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, - 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, - 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, - 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, - 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, - 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, - 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, - 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, - 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, - 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, - 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, - 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, - 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, - 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, - 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, - 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, - 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, - 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, - 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, - 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, - 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, - 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, - 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, - 1162, 3, 1, 3, 1162, 3, 1162, 3, - 1, 1162, 1162, 3, 1162, 3, 1162, 3, - 1162, 3, 1162, 3, 1, 3, 3, 1162, - 1162, 3, 1, 1162, 1162, 3, 1, 1162, - 3, 1162, 3, 1, 3, 1162, 3, 1162, - 3, 1, 1162, 3, 1162, 3, 1, 1162, - 3, 1, 1162, 1162, 3, 3, 1162, 3, - 1162, 3, 1162, 1, 1440, 1, 1726, 1440, - 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, - 1, 265, 3, 1, 3, 265, 1, 1, - 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, - 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, - 1729, 1, 1732, 1740, 1747, 1, 1731, 262, - 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, - 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, - 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, - 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, - 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, - 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, - 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, - 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, - 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, - 1799, 1800, 1801, 1803, 268, 530, 576, 1802, - 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, - 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, - 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, - 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, - 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, - 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, - 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, - 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, - 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, - 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, - 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, - 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, - 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, - 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, - 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, - 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, - 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, - 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, - 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, - 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, - 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, - 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, - 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, - 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, - 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, - 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, - 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, - 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, - 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, - 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, - 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, - 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, - 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, - 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, - 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, - 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, - 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, - 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, - 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, - 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, - 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, - 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, - 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, - 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, - 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, - 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, - 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, - 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, - 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, - 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, - 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, - 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, - 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, - 2021, 1982, + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, } var _graphclust_trans_targs []int16 = []int16{ - 1974, 0, 1974, 1975, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 66, 68, 70, - 71, 72, 1976, 69, 74, 75, 77, 78, - 79, 80, 81, 82, 83, 84, 85, 86, - 87, 88, 89, 90, 91, 93, 94, 96, - 102, 125, 130, 132, 139, 143, 97, 98, - 99, 100, 101, 103, 104, 105, 106, 107, - 108, 109, 110, 111, 112, 113, 114, 115, - 116, 117, 118, 119, 120, 121, 122, 123, - 124, 126, 127, 128, 129, 131, 133, 134, - 135, 136, 137, 138, 140, 141, 142, 144, - 291, 292, 1977, 158, 159, 160, 161, 162, - 163, 164, 165, 166, 167, 168, 169, 170, - 171, 172, 173, 174, 175, 176, 177, 178, - 179, 180, 181, 182, 183, 184, 185, 186, - 188, 189, 190, 191, 192, 193, 194, 195, - 196, 197, 198, 199, 200, 201, 202, 203, - 204, 205, 206, 207, 208, 210, 211, 212, - 213, 214, 216, 217, 219, 220, 221, 222, - 223, 224, 225, 226, 227, 228, 229, 230, - 231, 232, 234, 235, 237, 243, 267, 271, - 273, 280, 284, 238, 239, 240, 241, 242, - 244, 245, 246, 247, 248, 249, 250, 251, - 252, 253, 254, 255, 256, 257, 258, 259, - 260, 261, 262, 263, 264, 265, 266, 268, - 269, 270, 272, 274, 275, 276, 277, 278, - 279, 281, 282, 283, 285, 287, 288, 289, - 145, 290, 146, 294, 295, 296, 2, 297, - 3, 1974, 1978, 1974, 1979, 315, 316, 317, - 318, 319, 320, 321, 322, 323, 324, 325, - 326, 327, 328, 329, 330, 331, 332, 333, - 334, 335, 336, 337, 338, 339, 340, 341, - 342, 344, 345, 346, 347, 348, 349, 350, - 351, 352, 353, 354, 355, 356, 357, 358, - 359, 360, 361, 362, 363, 364, 366, 368, - 370, 371, 372, 1980, 369, 374, 375, 377, - 378, 379, 380, 381, 382, 383, 384, 385, - 386, 387, 388, 389, 390, 391, 393, 394, - 396, 402, 425, 430, 432, 439, 443, 397, - 398, 399, 400, 401, 403, 404, 405, 406, - 407, 408, 409, 410, 411, 412, 413, 414, - 415, 416, 417, 418, 419, 420, 421, 422, - 423, 424, 426, 427, 428, 429, 431, 433, - 434, 435, 436, 437, 438, 440, 441, 442, - 444, 591, 592, 1981, 458, 459, 460, 461, - 462, 463, 464, 465, 466, 467, 468, 469, - 470, 471, 472, 473, 474, 475, 476, 477, - 478, 479, 480, 481, 482, 483, 484, 485, - 486, 488, 489, 490, 491, 492, 493, 494, - 495, 496, 497, 498, 499, 500, 501, 502, - 503, 504, 505, 506, 507, 508, 510, 511, - 512, 513, 514, 516, 517, 519, 520, 521, - 522, 523, 524, 525, 526, 527, 528, 529, - 530, 531, 532, 534, 535, 537, 543, 567, - 571, 573, 580, 584, 538, 539, 540, 541, - 542, 544, 545, 546, 547, 548, 549, 550, - 551, 552, 553, 554, 555, 556, 557, 558, - 559, 560, 561, 562, 563, 564, 565, 566, - 568, 569, 570, 572, 574, 575, 576, 577, - 578, 579, 581, 582, 583, 585, 587, 588, - 589, 445, 590, 446, 594, 595, 596, 302, - 597, 303, 599, 605, 606, 608, 610, 613, - 616, 640, 1982, 622, 1983, 612, 1984, 615, - 618, 620, 621, 624, 625, 629, 630, 631, - 632, 633, 634, 635, 1985, 628, 639, 642, - 643, 644, 645, 646, 649, 650, 651, 652, - 653, 654, 655, 656, 660, 661, 663, 664, - 647, 666, 669, 671, 673, 667, 668, 670, - 672, 674, 678, 679, 680, 681, 682, 683, - 684, 685, 686, 687, 1986, 676, 677, 690, - 691, 299, 695, 696, 698, 997, 1000, 1003, - 1027, 1974, 1987, 1974, 1988, 712, 713, 714, - 715, 716, 717, 718, 719, 720, 721, 722, - 723, 724, 725, 726, 727, 728, 729, 730, - 731, 732, 733, 734, 735, 736, 737, 738, - 739, 741, 742, 743, 744, 745, 746, 747, - 748, 749, 750, 751, 752, 753, 754, 755, - 756, 757, 758, 759, 760, 761, 763, 765, - 767, 768, 769, 1989, 766, 771, 772, 774, - 775, 776, 777, 778, 779, 780, 781, 782, - 783, 784, 785, 786, 787, 788, 790, 791, - 793, 799, 822, 827, 829, 836, 840, 794, - 795, 796, 797, 798, 800, 801, 802, 803, - 804, 805, 806, 807, 808, 809, 810, 811, - 812, 813, 814, 815, 816, 817, 818, 819, - 820, 821, 823, 824, 825, 826, 828, 830, - 831, 832, 833, 834, 835, 837, 838, 839, - 841, 988, 989, 1990, 855, 856, 857, 858, - 859, 860, 861, 862, 863, 864, 865, 866, - 867, 868, 869, 870, 871, 872, 873, 874, - 875, 876, 877, 878, 879, 880, 881, 882, - 883, 885, 886, 887, 888, 889, 890, 891, - 892, 893, 894, 895, 896, 897, 898, 899, - 900, 901, 902, 903, 904, 905, 907, 908, - 909, 910, 911, 913, 914, 916, 917, 918, - 919, 920, 921, 922, 923, 924, 925, 926, - 927, 928, 929, 931, 932, 934, 940, 964, - 968, 970, 977, 981, 935, 936, 937, 938, - 939, 941, 942, 943, 944, 945, 946, 947, - 948, 949, 950, 951, 952, 953, 954, 955, - 956, 957, 958, 959, 960, 961, 962, 963, - 965, 966, 967, 969, 971, 972, 973, 974, - 975, 976, 978, 979, 980, 982, 984, 985, - 986, 842, 987, 843, 991, 992, 993, 699, - 994, 700, 1009, 1991, 999, 1992, 1002, 1005, - 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, - 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, - 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, - 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, - 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, - 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, - 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, - 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, - 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, - 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, - 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, - 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, - 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, - 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, - 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, - 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, - 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, - 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, - 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, - 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, - 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, - 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, - 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, - 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, - 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, - 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, - 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, - 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, - 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, - 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, - 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, - 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, - 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, - 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, - 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, - 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, - 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, - 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, - 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, - 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, - 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, - 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, - 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, - 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, - 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, - 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, - 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, - 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, - 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, - 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, - 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, - 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, - 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, - 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, - 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, - 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, - 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, - 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, - 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, - 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, - 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, - 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, - 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, - 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, - 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, - 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, - 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, - 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, - 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, - 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, - 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, - 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, - 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, - 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, - 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, - 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, - 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, - 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, - 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, - 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, - 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, - 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, - 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, - 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, - 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, - 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, - 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, - 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, - 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, - 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, - 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, - 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, - 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, - 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, - 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, - 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, - 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, - 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, - 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, - 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, - 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, - 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, - 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, - 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, - 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, - 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, - 1973, 1974, 1, 1975, 299, 300, 301, 692, - 693, 694, 697, 1028, 1628, 1629, 1638, 1639, - 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, - 14, 43, 65, 73, 76, 92, 298, 293, - 67, 95, 147, 148, 149, 150, 151, 152, - 153, 154, 155, 156, 157, 187, 209, 215, - 218, 233, 236, 286, 1974, 600, 601, 602, - 603, 604, 607, 641, 648, 657, 658, 659, - 662, 665, 688, 689, 304, 305, 306, 307, - 308, 309, 310, 311, 312, 313, 314, 343, - 365, 373, 376, 392, 598, 593, 367, 395, - 447, 448, 449, 450, 451, 452, 453, 454, - 455, 456, 457, 487, 509, 515, 518, 533, - 536, 586, 609, 623, 636, 637, 638, 611, - 619, 614, 617, 626, 627, 675, 1974, 701, - 702, 703, 704, 705, 706, 707, 708, 709, - 710, 711, 996, 762, 770, 1010, 1023, 1024, - 1025, 789, 995, 990, 740, 773, 764, 792, - 844, 845, 846, 847, 848, 849, 850, 851, - 852, 853, 854, 884, 906, 912, 915, 930, - 933, 983, 998, 1006, 1001, 1004, 1013, 1014, - 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, - 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, - 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, - 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, - 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, - 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, - 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, - 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, - 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, - 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, - 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, - 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, - 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, - 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, - 1866, 1872, 1875, 1890, 1893, 1943, + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, } var _graphclust_trans_actions []byte = []byte{ - 31, 0, 27, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 34, 40, 25, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 40, 0, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 29, 51, 17, 40, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 51, 0, 51, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 40, 0, 0, 0, 0, - 0, 0, 0, 40, 21, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 40, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 19, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 40, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 40, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 40, 23, 40, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 40, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 43, 1, 47, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 15, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 13, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 9, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 7, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 11, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, } var _graphclust_to_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 37, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, } var _graphclust_from_state_actions []byte = []byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 3, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, } var _graphclust_eof_trans []int16 = []int16{ - 0, 0, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 0, 0, 0, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 268, 268, 268, 268, - 268, 268, 268, 268, 0, 0, 0, 0, - 0, 0, 610, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 610, 612, 612, - 610, 612, 612, 610, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 612, 612, - 612, 612, 612, 612, 612, 612, 610, 612, - 612, 612, 612, 0, 0, 0, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 901, - 901, 901, 901, 901, 901, 901, 901, 0, - 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, - 1164, 1164, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, - 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1750, - 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, - 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, - 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, - 1983, 1983, 1983, 1983, + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, } const graphclust_start int = 1974 @@ -4939,340 +4940,337 @@ const graphclust_error int = 0 const graphclust_en_main int = 1974 + // line 14 "grapheme_clusters.rl" + var Error = errors.New("invalid UTF8 text") // ScanGraphemeClusters is a split function for bufio.Scanner that splits // on grapheme cluster boundaries. func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { - if len(data) == 0 { - return 0, nil, nil - } + if len(data) == 0 { + return 0, nil, nil + } - // Ragel state - cs := 0 // Current State - p := 0 // "Pointer" into data + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data pe := len(data) // End-of-data "pointer" - ts := 0 - te := 0 - act := 0 - eof := pe + ts := 0 + te := 0 + act := 0 + eof := pe - // Make Go compiler happy - _ = ts - _ = te - _ = act - _ = eof + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof - startPos := 0 - endPos := 0 + startPos := 0 + endPos := 0 - // line 4976 "grapheme_clusters.go" + +// line 4976 "grapheme_clusters.go" { - cs = graphclust_start - ts = 0 - te = 0 - act = 0 + cs = graphclust_start + ts = 0 + te = 0 + act = 0 } - // line 4984 "grapheme_clusters.go" +// line 4984 "grapheme_clusters.go" { - var _klen int - var _trans int - var _acts int - var _nacts uint - var _keys int - if p == pe { - goto _test_eof - } - if cs == 0 { - goto _out - } - _resume: - _acts = int(_graphclust_from_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]) - _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 4: - // line 1 "NONE" + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" - ts = p +ts = p - // line 5008 "grapheme_clusters.go" - } +// line 5008 "grapheme_clusters.go" } + } - _keys = int(_graphclust_key_offsets[cs]) - _trans = int(_graphclust_index_offsets[cs]) + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) - _klen = int(_graphclust_single_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + _klen - 1) - for { - if _upper < _lower { - break - } + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } - _mid = _lower + ((_upper - _lower) >> 1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 1 - case data[p] > _graphclust_trans_keys[_mid]: - _lower = _mid + 1 - default: - _trans += int(_mid - int(_keys)) - goto _match - } + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match } - _keys += _klen - _trans += _klen } + _keys += _klen + _trans += _klen + } - _klen = int(_graphclust_range_lengths[cs]) - if _klen > 0 { - _lower := int(_keys) - var _mid int - _upper := int(_keys + (_klen << 1) - 2) - for { - if _upper < _lower { - break - } + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } - _mid = _lower + (((_upper - _lower) >> 1) & ^1) - switch { - case data[p] < _graphclust_trans_keys[_mid]: - _upper = _mid - 2 - case data[p] > _graphclust_trans_keys[_mid+1]: - _lower = _mid + 2 - default: - _trans += int((_mid - int(_keys)) >> 1) - goto _match - } + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match } - _trans += _klen } + _trans += _klen + } - _match: - _trans = int(_graphclust_indicies[_trans]) - _eof_trans: - cs = int(_graphclust_trans_targs[_trans]) +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) - if _graphclust_trans_actions[_trans] == 0 { - goto _again - } + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } - _acts = int(_graphclust_trans_actions[_trans]) - _nacts = uint(_graphclust_actions[_acts]) + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 0: - // line 46 "grapheme_clusters.rl" + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" - startPos = p - case 1: - // line 50 "grapheme_clusters.rl" + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" - endPos = p - case 5: - // line 1 "NONE" + endPos = p + + case 5: +// line 1 "NONE" - te = p + 1 +te = p+1 - case 6: - // line 54 "grapheme_clusters.rl" + case 6: +// line 54 "grapheme_clusters.rl" - act = 3 - case 7: - // line 54 "grapheme_clusters.rl" +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" - te = p + 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 8: - // line 54 "grapheme_clusters.rl" +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" - te = p + 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 9: - // line 54 "grapheme_clusters.rl" +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 10: - // line 54 "grapheme_clusters.rl" +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 11: - // line 54 "grapheme_clusters.rl" +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 12: - // line 54 "grapheme_clusters.rl" +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 13: - // line 54 "grapheme_clusters.rl" +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 14: - // line 54 "grapheme_clusters.rl" +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" - te = p - p-- - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 15: - // line 54 "grapheme_clusters.rl" +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 16: - // line 54 "grapheme_clusters.rl" +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 17: - // line 54 "grapheme_clusters.rl" +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 18: - // line 54 "grapheme_clusters.rl" +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 19: - // line 54 "grapheme_clusters.rl" +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 20: - // line 54 "grapheme_clusters.rl" +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" - p = (te) - 1 - { - return endPos + 1, data[startPos : endPos+1], nil - } - case 21: - // line 1 "NONE" +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" - switch act { - case 0: - { - cs = 0 - goto _again - } - case 3: - { - p = (te) - 1 - - return endPos + 1, data[startPos : endPos+1], nil - } - } + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 - // line 5218 "grapheme_clusters.go" - } + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" } + } - _again: - _acts = int(_graphclust_to_state_actions[cs]) - _nacts = uint(_graphclust_actions[_acts]) +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { _acts++ - for ; _nacts > 0; _nacts-- { - _acts++ - switch _graphclust_actions[_acts-1] { - case 2: - // line 1 "NONE" + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" - ts = 0 +ts = 0 - case 3: - // line 1 "NONE" + case 3: +// line 1 "NONE" - act = 0 +act = 0 - // line 5238 "grapheme_clusters.go" - } +// line 5238 "grapheme_clusters.go" } + } - if cs == 0 { - goto _out - } - p++ - if p != pe { - goto _resume - } - _test_eof: - { - } - if p == eof { - if _graphclust_eof_trans[cs] > 0 { - _trans = int(_graphclust_eof_trans[cs] - 1) - goto _eof_trans - } + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans } + } - _out: - { - } + _out: {} } - // line 116 "grapheme_clusters.rl" +// line 116 "grapheme_clusters.rl" - // If we fall out here then we were unable to complete a sequence. - // If we weren't able to complete a sequence then either we've - // reached the end of a partial buffer (so there's more data to come) - // or we have an isolated symbol that would normally be part of a - // grapheme cluster but has appeared in isolation here. - if !atEOF { - // Request more - return 0, nil, nil - } + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } - // Just take the first UTF-8 sequence and return that. - _, seqLen := utf8.DecodeRune(data) - return seqLen, data[:seqLen], nil + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 2def23fa1..3b809e847 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -43,7 +43,7 @@ type Config struct { // An optional endpoint URL (hostname only or fully qualified URI) // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. + // to `nil` or the value to `""` to use the default generated endpoint. // // Note: You must still provide a `Region` value when specifying an // endpoint for a client. @@ -138,7 +138,7 @@ type Config struct { // `ExpectContinueTimeout` for information on adjusting the continue wait // timeout. https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experience issues + // You should use this flag to disable 100-Continue if you experience issues // with proxies or third party S3 compatible services. S3Disable100Continue *bool @@ -183,7 +183,7 @@ type Config struct { // // Example: // sess := session.Must(session.NewSession(aws.NewConfig() - // .WithEC2MetadataDiableTimeoutOverride(true))) + // .WithEC2MetadataDisableTimeoutOverride(true))) // // svc := s3.New(sess) // @@ -194,7 +194,7 @@ type Config struct { // both IPv4 and IPv6 addressing. // // Setting this for a service which does not support dual stack will fail - // to make requets. It is not recommended to set this value on the session + // to make requests. It is not recommended to set this value on the session // as it will apply to all service clients created with the session. Even // services which don't support dual stack endpoints. // @@ -238,6 +238,7 @@ type Config struct { // EnableEndpointDiscovery will allow for endpoint discovery on operations that // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. // // Example: // sess := session.Must(session.NewSession(&aws.Config{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index aa902d708..d95a5eb54 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { r.Error = aws.ErrMissingRegion } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? r.Error = aws.ErrMissingEndpoint } }} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index c75d7bba0..c43b1bc0a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -107,6 +107,13 @@ type Provider interface { IsExpired() bool } +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + // An Expirer is an interface that Providers can implement to expose the expiration // time, if known. If the Provider cannot accurately provide this info, // it should not implement this interface. @@ -166,7 +173,9 @@ type Expiry struct { // the expiration time given to ensure no requests are made with expired // tokens. func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) if window > 0 { e.expiration = e.expiration.Add(-window) } @@ -233,7 +242,9 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) { // Cannot pass context down to the actual retrieve, because the first // context would cancel the whole group when there is not direct // association of items in the group. - resCh := c.sf.DoChan("", c.singleRetrieve) + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) select { case res := <-resCh: return res.Val.(Value), res.Err @@ -243,12 +254,16 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) { } } -func (c *Credentials) singleRetrieve() (interface{}, error) { +func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) { if curCreds := c.creds.Load(); !c.isExpired(curCreds) { return curCreds.(Value), nil } - creds, err := c.provider.Retrieve() + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } if err == nil { c.creds.Store(creds) } @@ -308,3 +323,19 @@ func (c *Credentials) ExpiresAt() (time.Time, error) { } return expirer.ExpiresAt(), nil } + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index 43d4ed386..92af5b725 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -7,6 +7,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" @@ -87,7 +88,14 @@ func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(* // Error will be returned if the request fails, or unable to extract // the desired credentials. func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -97,7 +105,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { } credsName := credsList[0] - roleCreds, err := requestCred(m.Client, credsName) + roleCreds, err := requestCred(ctx, m.Client, credsName) if err != nil { return credentials.Value{ProviderName: ProviderName}, err } @@ -130,8 +138,8 @@ const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) if err != nil { return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) } @@ -154,8 +162,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // // If the credentials cannot be found, or there is an error reading the response // and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index 1a7af53a4..785f30d8e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -116,7 +116,13 @@ func (p *Provider) IsExpired() bool { // Retrieve will attempt to request the credentials from the endpoint the Provider // was configured for. And error will be returned if the retrieval fails. func (p *Provider) Retrieve() (credentials.Value, error) { - resp, err := p.getCredentials() + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) if err != nil { return credentials.Value{ProviderName: ProviderName}, awserr.New("CredentialsEndpointError", "failed to load credentials", err) @@ -148,7 +154,7 @@ type errorOutput struct { Message string `json:"message"` } -func (p *Provider) getCredentials() (*getCredentialsOutput, error) { +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { op := &request.Operation{ Name: "GetCredentials", HTTPMethod: "GET", @@ -156,6 +162,7 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) req.HTTPRequest.Header.Set("Accept", "application/json") if authToken := p.AuthorizationToken; len(authToken) != 0 { req.HTTPRequest.Header.Set("Authorization", authToken) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index e15514958..22b5c5d9f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -17,8 +17,9 @@ var ( ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index 9f37f44bc..6846ef6f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -87,6 +87,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) @@ -118,6 +119,10 @@ type AssumeRoler interface { AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) } +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + // DefaultDuration is the default amount of time in minutes that the credentials // will be valid for. var DefaultDuration = time.Duration(15) * time.Minute @@ -164,6 +169,29 @@ type AssumeRoleProvider struct { // size. Policy *string + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + // The identification number of the MFA device that is associated with the user // who is making the AssumeRole call. Specify this value if the trust policy // of the role being assumed includes a condition that requires MFA authentication. @@ -265,6 +293,11 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -281,6 +314,7 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { RoleSessionName: aws.String(p.RoleSessionName), ExternalId: p.ExternalID, Tags: p.Tags, + PolicyArns: p.PolicyArns, TransitiveTagKeys: p.TransitiveTagKeys, } if p.Policy != nil { @@ -304,7 +338,15 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { } } - roleOutput, err := p.Client.AssumeRole(input) + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + if err != nil { return credentials.Value{ProviderName: ProviderName}, err } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go index b20b63394..cefe2a76d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -28,15 +28,46 @@ const ( // compare test values. var now = time.Now +// TokenFetcher shuold return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + // WebIdentityRoleProvider is used to retrieve credentials using // an OIDC token. type WebIdentityRoleProvider struct { credentials.Expiry + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration - client stsiface.STSAPI + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. ExpiryWindow time.Duration - tokenFilePath string + client stsiface.STSAPI + + tokenFetcher TokenFetcher roleARN string roleSessionName string } @@ -52,9 +83,15 @@ func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName // NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the // provided stsiface.STSAPI func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithToken(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { return &WebIdentityRoleProvider{ client: svc, - tokenFilePath: path, + tokenFetcher: tokenFetcher, roleARN: roleARN, roleSessionName: roleSessionName, } @@ -64,10 +101,16 @@ func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, p // 'WebIdentityTokenFilePath' specified destination and if that is empty an // error will be returned. func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { - b, err := ioutil.ReadFile(p.tokenFilePath) + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) if err != nil { - errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath) - return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err) + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) } sessionName := p.roleSessionName @@ -76,11 +119,22 @@ func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { // uses unix time in nanoseconds to uniquely identify sessions. sessionName = strconv.FormatInt(now().UnixNano(), 10) } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, RoleArn: &p.roleARN, RoleSessionName: &sessionName, WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, }) + + req.SetContext(ctx) + // InvalidIdentityToken error is a temporary error that can occur // when assuming an Role with a JWT web identity token. req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 12897eef6..69fa63dc0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/internal/sdkuri" @@ -15,15 +16,16 @@ import ( // getToken uses the duration to return a token for EC2 metadata service, // or an error if the request failed. -func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { op := &request.Operation{ Name: "GetToken", HTTPMethod: "PUT", - HTTPPath: "/api/token", + HTTPPath: "/latest/api/token", } var output tokenOutput req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) // remove the fetch token handler from the request handlers to avoid infinite recursion req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) @@ -50,15 +52,24 @@ func (c *EC2Metadata) getToken(duration time.Duration) (tokenOutput, error) { // instance metadata service. The content will be returned as a string, or // error if the request failed. func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/meta-data", p), + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + err := req.Send() return output.Content, err } @@ -67,14 +78,22 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { // there is no user-data setup for the EC2 instance a "NotFoundError" error // code will be returned. func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: "/user-data", + HTTPPath: "/latest/user-data", } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) err := req.Send() return output.Content, err @@ -84,14 +103,22 @@ func (c *EC2Metadata) GetUserData() (string, error) { // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: sdkuri.PathJoin("/dynamic", p), + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + req.SetContext(ctx) err := req.Send() return output.Content, err @@ -101,7 +128,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { // instance. Error is returned if the request fails or is unable to parse // the response. func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") if err != nil { return EC2InstanceIdentityDocument{}, awserr.New("EC2MetadataRequestError", @@ -120,7 +154,12 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument // IAMInfo retrieves IAM info from the metadata API func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") if err != nil { return EC2IAMInfo{}, awserr.New("EC2MetadataRequestError", @@ -145,7 +184,12 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { // Region returns the region the instance is running in. func (c *EC2Metadata) Region() (string, error) { - ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocument() + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) if err != nil { return "", err } @@ -162,7 +206,14 @@ func (c *EC2Metadata) Region() (string, error) { // Can be used to determine if application is running within an EC2 Instance and // the metadata service is available. func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { return false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index dc7e051e0..8f35b3464 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -5,6 +5,10 @@ // variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to // true instructs the SDK to disable the EC2 Metadata client. The client cannot // be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. package ec2metadata import ( @@ -12,6 +16,7 @@ import ( "errors" "io" "net/http" + "net/url" "os" "strconv" "strings" @@ -69,6 +74,9 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { // a client when not using a session. Generally using just New with a session // is preferred. // +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// // If an unmodified HTTP client is provided from the stdlib default, or no client // the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. // To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. @@ -86,6 +94,15 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg.MaxRetries = aws.Int(2) } + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + svc := &EC2Metadata{ Client: client.New( cfg, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go index 663372a91..4b29f190b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -46,7 +46,7 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) { return } - output, err := t.client.getToken(t.configuredTTL) + output, err := t.client.getToken(r.Context(), t.configuredTTL) if err != nil { @@ -87,6 +87,7 @@ func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { // If the error code status is 401, we enable the token provider if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) atomic.StoreUint32(&t.disabled, 0) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 343a2106f..654fb1ad5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -93,7 +93,7 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol } func custAddS3DualStack(p *partition) { - if p.ID != "aws" { + if !(p.ID == "aws" || p.ID == "aws-cn" || p.ID == "aws-us-gov") { return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 0a0278209..8b9fce363 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -17,6 +17,7 @@ const ( // AWS Standard partition's regions. const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). @@ -24,11 +25,12 @@ const ( ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). - EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). - EuNorth1RegionID = "eu-north-1" // EU (Stockholm). - EuWest1RegionID = "eu-west-1" // EU (Ireland). - EuWest2RegionID = "eu-west-2" // EU (London). - EuWest3RegionID = "eu-west-3" // EU (Paris). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). @@ -46,7 +48,7 @@ const ( // AWS GovCloud (US) partition's regions. const ( UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). - UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) // AWS ISO (US) partition's regions. @@ -97,7 +99,7 @@ var awsPartition = partition{ DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me)\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$") return reg }(), }, @@ -107,6 +109,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, "ap-east-1": region{ Description: "Asia Pacific (Hong Kong)", }, @@ -129,19 +134,22 @@ var awsPartition = partition{ Description: "Canada (Central)", }, "eu-central-1": region{ - Description: "EU (Frankfurt)", + Description: "Europe (Frankfurt)", }, "eu-north-1": region{ - Description: "EU (Stockholm)", + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", }, "eu-west-1": region{ - Description: "EU (Ireland)", + Description: "Europe (Ireland)", }, "eu-west-2": region{ - Description: "EU (London)", + Description: "Europe (London)", }, "eu-west-3": region{ - Description: "EU (Paris)", + Description: "Europe (Paris)", }, "me-south-1": region{ Description: "Middle East (Bahrain)", @@ -172,6 +180,7 @@ var awsPartition = partition{ "access-analyzer": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -181,20 +190,52 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "acm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -210,6 +251,7 @@ var awsPartition = partition{ }, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -250,6 +292,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -259,6 +302,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -300,9 +344,42 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "api.detective": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "api.ecr": service{ Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, "ap-east-1": endpoint{ Hostname: "api.ecr.ap-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -357,6 +434,12 @@ var awsPartition = partition{ Region: "eu-north-1", }, }, + "eu-south-1": endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, "eu-west-1": endpoint{ Hostname: "api.ecr.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -375,6 +458,54 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "fips-dkr-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-dkr-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-dkr-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-dkr-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{ Hostname: "api.ecr.me-south-1.amazonaws.com", CredentialScope: credentialScope{ @@ -413,6 +544,29 @@ var awsPartition = partition{ }, }, }, + "api.elastic-inference": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + "ap-northeast-2": endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + "eu-west-1": endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + "us-east-1": endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + "us-east-2": endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + "us-west-2": endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, "api.mediatailor": service{ Endpoints: endpoints{ @@ -439,6 +593,7 @@ var awsPartition = partition{ "api.sagemaker": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -448,6 +603,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -486,6 +642,7 @@ var awsPartition = partition{ "apigateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -495,6 +652,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -506,11 +664,32 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "appflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "application-autoscaling": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -520,6 +699,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -564,6 +744,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, @@ -586,11 +767,17 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -606,6 +793,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -622,6 +810,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -631,6 +820,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -647,6 +837,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -654,8 +845,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -697,15 +892,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "budgets": service{ @@ -754,6 +974,7 @@ var awsPartition = partition{ "cloud9": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -762,10 +983,15 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -786,6 +1012,7 @@ var awsPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -795,15 +1022,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "cloudfront": service{ @@ -842,6 +1094,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -851,6 +1104,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -880,6 +1134,7 @@ var awsPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -889,14 +1144,54 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codeartifact": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -912,6 +1207,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -959,6 +1255,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -979,6 +1276,7 @@ var awsPartition = partition{ "codedeploy": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -988,6 +1286,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1037,11 +1336,41 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "codestar": service{ @@ -1062,7 +1391,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "cognito-identity": service{ + "codestar-connections": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -1072,13 +1401,52 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cognito-idp": service{ Endpoints: endpoints{ @@ -1091,9 +1459,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "cognito-sync": service{ @@ -1126,9 +1512,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "comprehendmedical": service{ @@ -1138,9 +1542,27 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "config": service{ @@ -1193,6 +1615,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -1226,6 +1649,7 @@ var awsPartition = partition{ "datasync": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1235,6 +1659,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1303,6 +1728,7 @@ var awsPartition = partition{ "directconnect": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1312,27 +1738,58 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "discovery": service{ Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1340,17 +1797,24 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "dms-fips": endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "docdb": service{ @@ -1439,6 +1903,7 @@ var awsPartition = partition{ "ds": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1448,15 +1913,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "dynamodb": service{ @@ -1464,6 +1960,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1479,6 +1976,7 @@ var awsPartition = partition{ }, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1521,11 +2019,10 @@ var awsPartition = partition{ }, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "ebs": service{ + Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1535,6 +2032,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -1546,20 +2044,12 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, - }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, }, - }, - "ecs": service{ - Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1569,38 +2059,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticache": service{ - - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "fips": endpoint{ - Hostname: "elasticache-fips.us-west-1.amazonaws.com", + "fips-ca-central-1": endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, }, + "fips-us-west-2": endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, @@ -1609,32 +2101,21 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, }, }, - "elasticfilesystem": service{ + "ecs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1644,22 +2125,48 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elasticloadbalancing": service{ + "eks": service{ Defaults: endpoint{ - Protocols: []string{"https"}, + Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1669,84 +2176,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticmapreduce": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-east-1": endpoint{}, - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", + "fips-us-east-1": endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, }, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", + "fips-us-east-2": endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elastictranscoder": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "email": service{ - - Endpoints: endpoints{ - "ap-south-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "entitlement.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", + "fips-us-west-2": endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "es": service{ + "elasticache": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1756,11 +2219,12 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "fips": endpoint{ - Hostname: "es-fips.us-west-1.amazonaws.com", + Hostname: "elasticache-fips.us-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-1", }, @@ -1773,9 +2237,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "events": service{ + "elasticbeanstalk": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1785,20 +2250,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "firehose": service{ + "elasticfilesystem": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1808,46 +2299,540 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "fms": service{ - Defaults: endpoint{ - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, + "email": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "forecast": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1858,7 +2843,11 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1868,13 +2857,19 @@ var awsPartition = partition{ "fsx": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1905,6 +2900,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1914,15 +2910,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "glue": service{ @@ -1937,15 +2964,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "greengrass": service{ @@ -1970,7 +3022,22 @@ var awsPartition = partition{ "groundstation": service{ Endpoints: endpoints{ - "eu-north-1": endpoint{}, + "af-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "fips-us-east-2": endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, "me-south-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -1982,6 +3049,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1991,6 +3059,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2032,6 +3101,12 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "honeycode": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, "iam": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -2043,6 +3118,30 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "iam-fips": endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "identitystore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "importexport": service{ @@ -2071,10 +3170,34 @@ var awsPartition = partition{ "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "iot": service{ @@ -2108,6 +3231,7 @@ var awsPartition = partition{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, @@ -2245,6 +3369,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2259,6 +3384,7 @@ var awsPartition = partition{ "kinesis": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2268,15 +3394,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "kinesisanalytics": service{ @@ -2291,6 +3442,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2324,6 +3476,7 @@ var awsPartition = partition{ "kms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2333,6 +3486,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2354,8 +3508,12 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -2365,6 +3523,7 @@ var awsPartition = partition{ "lambda": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2374,20 +3533,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "license-manager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2397,15 +3582,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "lightsail": service{ @@ -2429,6 +3639,7 @@ var awsPartition = partition{ "logs": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2438,15 +3649,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "machinelearning": service{ @@ -2456,10 +3692,79 @@ var awsPartition = partition{ "us-east-1": endpoint{}, }, }, + "macie": service{ + + Endpoints: endpoints{ + "fips-us-east-1": endpoint{ + Hostname: "macie-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "macie2": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, }, }, @@ -2500,14 +3805,45 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "medialive": service{ @@ -2523,10 +3859,28 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, "mediapackage": service{ @@ -2538,6 +3892,7 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2556,6 +3911,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -2567,6 +3923,7 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2576,6 +3933,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2590,8 +3948,13 @@ var awsPartition = partition{ "mgh": service{ Endpoints: endpoints{ - "eu-central-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mobileanalytics": service{ @@ -2607,8 +3970,12 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -2618,6 +3985,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -2627,15 +3995,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "mq": service{ @@ -2650,6 +4043,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -2698,6 +4092,12 @@ var awsPartition = partition{ "neptune": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, "ap-northeast-1": endpoint{ Hostname: "rds.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2770,6 +4170,12 @@ var awsPartition = partition{ Region: "me-south-1", }, }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2782,6 +4188,12 @@ var awsPartition = partition{ Region: "us-east-2", }, }, + "us-west-1": endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, "us-west-2": endpoint{ Hostname: "rds.us-west-2.amazonaws.com", CredentialScope: credentialScope{ @@ -2793,6 +4205,24 @@ var awsPartition = partition{ "oidc": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, "ap-southeast-1": endpoint{ Hostname: "oidc.ap-southeast-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2817,6 +4247,12 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + "eu-north-1": endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, "eu-west-1": endpoint{ Hostname: "oidc.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2894,27 +4330,67 @@ var awsPartition = partition{ Region: "us-east-1", }, }, + "fips-aws-global": endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "outposts": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "pinpoint": service{ @@ -2924,10 +4400,14 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "fips-us-east-1": endpoint{ Hostname: "pinpoint-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2969,12 +4449,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "portal.sso": service{ @@ -3065,6 +4569,7 @@ var awsPartition = partition{ "ram": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3074,6 +4579,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3088,6 +4594,7 @@ var awsPartition = partition{ "rds": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3097,11 +4604,42 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, + "rds-fips.ca-central-1": endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rds-fips.us-east-1": endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rds-fips.us-east-2": endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rds-fips.us-west-1": endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rds-fips.us-west-2": endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{ SSLCommonName: "{service}.{dnsSuffix}", }, @@ -3113,6 +4651,7 @@ var awsPartition = partition{ "redshift": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3122,15 +4661,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "rekognition": service{ @@ -3141,18 +4711,50 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "rekognition-fips.ca-central-1": endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "rekognition-fips.us-east-1": endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "rekognition-fips.us-east-2": endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "rekognition-fips.us-west-1": endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "rekognition-fips.us-west-2": endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "resource-groups": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3162,6 +4764,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3233,6 +4836,7 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3242,9 +4846,12 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -3258,8 +4865,12 @@ var awsPartition = partition{ }, }, Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, @@ -3267,6 +4878,7 @@ var awsPartition = partition{ "runtime.sagemaker": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3276,6 +4888,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3322,7 +4935,8 @@ var awsPartition = partition{ DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ - "ap-east-1": endpoint{}, + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{ Hostname: "s3.ap-northeast-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -3347,6 +4961,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{ Hostname: "s3.eu-west-1.amazonaws.com", SignatureVersions: []string{"s3", "s3v4"}, @@ -3431,6 +5046,13 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + "ca-central-1-fips": endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, "eu-central-1": endpoint{ Hostname: "s3-control.eu-central-1.amazonaws.com", SignatureVersions: []string{"s3v4"}, @@ -3547,10 +5169,22 @@ var awsPartition = partition{ "schemas": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -3575,6 +5209,7 @@ var awsPartition = partition{ "secretsmanager": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3584,6 +5219,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3622,6 +5258,7 @@ var awsPartition = partition{ "securityhub": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3631,15 +5268,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "serverlessrepo": service{ @@ -3706,6 +5368,7 @@ var awsPartition = partition{ "servicecatalog": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3715,6 +5378,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3773,6 +5437,33 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "servicequotas": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "af-south-1": endpoint{}, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "session.qldb": service{ Endpoints: endpoints{ @@ -3788,18 +5479,31 @@ var awsPartition = partition{ }, }, "shield": service{ - IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Defaults: endpoint{ SSLCommonName: "shield.us-east-1.amazonaws.com", Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-aws-global": endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, "sms": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3809,20 +5513,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "snowball": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -3830,14 +5560,112 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ap-northeast-1": endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-northeast-3": endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "sns": service{ @@ -3845,6 +5673,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3854,15 +5683,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "sqs": service{ @@ -3871,6 +5725,7 @@ var awsPartition = partition{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3880,6 +5735,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3920,6 +5776,7 @@ var awsPartition = partition{ "ssm": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3929,20 +5786,70 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "ssm-facade-fips-us-east-1": endpoint{ + Hostname: "ssm-facade-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "ssm-facade-fips-us-east-2": endpoint{ + Hostname: "ssm-facade-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "ssm-facade-fips-us-west-1": endpoint{ + Hostname: "ssm-facade-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "ssm-facade-fips-us-west-2": endpoint{ + Hostname: "ssm-facade-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "states": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3952,20 +5859,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "storagegateway": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3975,15 +5908,22 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips": endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "streams.dynamodb": service{ @@ -4054,6 +5994,7 @@ var awsPartition = partition{ PartitionEndpoint: "aws-global", Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4069,6 +6010,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4119,6 +6061,7 @@ var awsPartition = partition{ "swf": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4128,20 +6071,46 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "tagging": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4151,6 +6120,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4178,12 +6148,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "transcribestreaming": service{ @@ -4211,11 +6205,41 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-ca-central-1": endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "translate": service{ @@ -4242,55 +6266,285 @@ var awsPartition = partition{ Region: "us-east-1", }, }, - "us-east-2": endpoint{}, - "us-east-2-fips": endpoint{ - Hostname: "translate-fips.us-east-2.amazonaws.com", + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-fips": endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "af-south-1": endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "ap-east-1": endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-south-1": endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-af-south-1": endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "fips-ap-northeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "fips-ap-northeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "fips-ap-south-1": endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "fips-ap-southeast-1": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "fips-ap-southeast-2": endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "fips-ca-central-1": endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "fips-eu-central-1": endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "fips-eu-north-1": endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "fips-eu-south-1": endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + "fips-eu-west-1": endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "fips-eu-west-2": endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "fips-eu-west-3": endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "fips-me-south-1": endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "fips-sa-east-1": endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "fips-us-east-1": endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-2", }, }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - "us-west-2-fips": endpoint{ - Hostname: "translate-fips.us-west-2.amazonaws.com", + "fips-us-west-1": endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", CredentialScope: credentialScope{ Region: "us-west-2", }, }, - }, - }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", + "me-south-1": endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "waf-regional": service{ - - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-2": endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, "workdocs": service{ @@ -4300,8 +6554,20 @@ var awsPartition = partition{ "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "workmail": service{ @@ -4325,14 +6591,27 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "xray": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -4342,6 +6621,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -4385,6 +6665,13 @@ var awscnPartition = partition{ }, }, Services: services{ + "access-analyzer": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ @@ -4409,6 +6696,13 @@ var awscnPartition = partition{ }, }, }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ @@ -4434,6 +6728,7 @@ var awscnPartition = partition{ "athena": service{ Endpoints: endpoints{ + "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, @@ -4446,6 +6741,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "autoscaling-plans": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "backup": service{ Endpoints: endpoints{ @@ -4460,6 +6764,32 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "cloudformation": service{ Endpoints: endpoints{ @@ -4495,6 +6825,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "codecommit": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "codedeploy": service{ Endpoints: endpoints{ @@ -4515,6 +6852,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "cur": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "dax": service{ Endpoints: endpoints{ @@ -4551,6 +6894,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "ebs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "ec2": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, @@ -4578,6 +6928,15 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "elasticache": service{ Endpoints: endpoints{ @@ -4597,6 +6956,18 @@ var awscnPartition = partition{ Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "elasticloadbalancing": service{ @@ -4656,6 +7027,7 @@ var awscnPartition = partition{ "glue": service{ Endpoints: endpoints{ + "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, @@ -4699,6 +7071,37 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "iotevents": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ioteventsdata": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kinesis": service{ Endpoints: endpoints{ @@ -4706,6 +7109,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "kms": service{ Endpoints: endpoints{ @@ -4765,20 +7175,66 @@ var awscnPartition = partition{ }, }, }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + "fips-aws-cn-global": endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "polly": service{ Endpoints: endpoints{ "cn-northwest-1": endpoint{}, }, }, - "rds": service{ + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "resource-groups": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, "cn-northwest-1": endpoint{}, }, }, - "redshift": service{ + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "runtime.sagemaker": service{ Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -4789,6 +7245,9 @@ var awscnPartition = partition{ Defaults: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "cn-north-1": endpoint{}, @@ -4799,6 +7258,9 @@ var awscnPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "cn-north-1": endpoint{ @@ -4837,6 +7299,13 @@ var awscnPartition = partition{ }, }, }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "sms": service{ Endpoints: endpoints{ @@ -4847,7 +7316,20 @@ var awscnPartition = partition{ "snowball": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + "fips-cn-north-1": endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "fips-cn-northwest-1": endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, }, }, "sns": service{ @@ -4995,15 +7477,25 @@ var awsusgovPartition = partition{ Description: "AWS GovCloud (US-East)", }, "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", + Description: "AWS GovCloud (US-West)", }, }, Services: services{ "access-analyzer": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "acm": service{ @@ -5018,6 +7510,18 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5025,6 +7529,30 @@ var awsusgovPartition = partition{ "api.ecr": service{ Endpoints: endpoints{ + "fips-dkr-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-dkr-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-us-gov-east-1": endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{ Hostname: "api.ecr.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -5043,6 +7571,18 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips-secondary": endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "apigateway": service{ @@ -5085,6 +7625,18 @@ var awsusgovPartition = partition{ "athena": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5092,7 +7644,9 @@ var awsusgovPartition = partition{ "autoscaling": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Protocols: []string{"http", "https"}, + }, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, @@ -5107,9 +7661,28 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "backup": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "batch": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5123,8 +7696,18 @@ var awsusgovPartition = partition{ "cloudformation": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "cloudhsm": service{ @@ -5147,20 +7730,48 @@ var awsusgovPartition = partition{ "cloudtrail": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "codebuild": service{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "codecommit": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5184,17 +7795,59 @@ var awsusgovPartition = partition{ }, }, }, + "codepipeline": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "comprehend": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, "comprehendmedical": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5227,20 +7880,59 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, + "docdb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "ds": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5264,13 +7956,30 @@ var awsusgovPartition = partition{ }, }, }, - "ec2": service{ - - Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, - }, - }, + "ebs": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "ec2metadata": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -5284,6 +7993,27 @@ var awsusgovPartition = partition{ }, "ecs": service{ + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "eks": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, @@ -5293,7 +8023,7 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "fips": endpoint{ - Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + Hostname: "elasticache.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -5305,13 +8035,35 @@ var awsusgovPartition = partition{ "elasticbeanstalk": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "elasticfilesystem": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5319,6 +8071,18 @@ var awsusgovPartition = partition{ "elasticloadbalancing": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, @@ -5328,12 +8092,36 @@ var awsusgovPartition = partition{ "elasticmapreduce": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"https"}, }, }, }, + "email": service{ + + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{}, + }, + }, "es": service{ Endpoints: endpoints{ @@ -5350,13 +8138,35 @@ var awsusgovPartition = partition{ "events": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "firehose": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5364,15 +8174,36 @@ var awsusgovPartition = partition{ "glacier": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "glue": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5383,7 +8214,12 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "guardduty": service{ @@ -5393,12 +8229,23 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "health": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "fips-us-gov-west-1": endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "iam": service{ @@ -5412,11 +8259,29 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "iam-govcloud-fips": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "inspector": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5428,17 +8293,43 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "iotsecuredtunneling": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "kinesis": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: endpoints{ "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, @@ -5460,6 +8351,18 @@ var awsusgovPartition = partition{ "lambda": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5467,6 +8370,18 @@ var awsusgovPartition = partition{ "license-manager": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5474,14 +8389,29 @@ var awsusgovPartition = partition{ "logs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "mediaconvert": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "us-gov-west-1": endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "metering.marketplace": service{ @@ -5498,6 +8428,18 @@ var awsusgovPartition = partition{ "monitoring": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5524,17 +8466,62 @@ var awsusgovPartition = partition{ IsRegionalized: boxedFalse, Endpoints: endpoints{ - "aws-us-gov-global": endpoint{ - Hostname: "organizations.us-gov-west-1.amazonaws.com", + "aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "fips-aws-us-gov-global": endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "outposts": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + "us-gov-west-1": endpoint{}, }, }, "polly": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5548,6 +8535,18 @@ var awsusgovPartition = partition{ "rds": service{ Endpoints: endpoints{ + "rds.us-gov-east-1": endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "rds.us-gov-west-1": endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5555,13 +8554,29 @@ var awsusgovPartition = partition{ "redshift": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "rekognition": service{ Endpoints: endpoints{ + "rekognition-fips.us-gov-west-1": endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5613,10 +8628,13 @@ var awsusgovPartition = partition{ "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3", "s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "fips-us-gov-west-1": endpoint{ - Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, @@ -5635,6 +8653,9 @@ var awsusgovPartition = partition{ Defaults: endpoint{ Protocols: []string{"https"}, SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", }, Endpoints: endpoints{ "us-gov-east-1": endpoint{ @@ -5686,22 +8707,56 @@ var awsusgovPartition = partition{ }, }, }, + "securityhub": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "serverlessrepo": service{ Defaults: endpoint{ Protocols: []string{"https"}, }, Endpoints: endpoints{ "us-gov-east-1": endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, }, "us-gov-west-1": endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "servicecatalog": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", @@ -5714,6 +8769,18 @@ var awsusgovPartition = partition{ "sms": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "sms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5721,6 +8788,18 @@ var awsusgovPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5728,25 +8807,67 @@ var awsusgovPartition = partition{ "sns": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "sqs": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, }, }, "ssm": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "ssm-facade-fips-us-gov-east-1": endpoint{ + Hostname: "ssm-facade.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "ssm-facade-fips-us-gov-west-1": endpoint{ + Hostname: "ssm-facade.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5754,6 +8875,18 @@ var awsusgovPartition = partition{ "states": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5761,6 +8894,12 @@ var awsusgovPartition = partition{ "storagegateway": service{ Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5792,7 +8931,19 @@ var awsusgovPartition = partition{ Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "support": service{ @@ -5805,13 +8956,29 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "fips-us-gov-west-1": endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "swf": service{ Endpoints: endpoints{ - "us-gov-east-1": endpoint{}, - "us-gov-west-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "tagging": service{ @@ -5826,6 +8993,37 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -5847,12 +9045,29 @@ var awsusgovPartition = partition{ "waf-regional": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "fips-us-gov-west-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "workspaces": service{ Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-west-1": endpoint{}, }, }, @@ -5949,6 +9164,14 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ @@ -5970,6 +9193,12 @@ var awsisoPartition = partition{ "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, "us-iso-east-1": endpoint{}, }, }, @@ -6032,6 +9261,12 @@ var awsisoPartition = partition{ }, }, }, + "es": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "events": service{ Endpoints: endpoints{ @@ -6208,6 +9443,20 @@ var awsisoPartition = partition{ "us-iso-east-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "workspaces": service{ Endpoints: endpoints{ @@ -6286,6 +9535,12 @@ var awsisobPartition = partition{ "dms": service{ Endpoints: endpoints{ + "dms-fips": endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, "us-isob-east-1": endpoint{}, }, }, @@ -6385,6 +9640,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "lambda": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "license-manager": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index eb2ac83c9..773613722 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -7,6 +7,8 @@ import ( "strings" ) +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + type partitions []partition func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { @@ -124,7 +126,7 @@ func (p partition) EndpointFor(service, region string, opts ...func(*Options)) ( defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt) } func serviceList(ss services) []string { @@ -233,7 +235,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -260,6 +262,10 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ region = signingRegion } + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + u := strings.Replace(hostname, "{service}", service, 1) u = strings.Replace(u, "{region}", region, 1) u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) @@ -274,7 +280,7 @@ func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs [ SigningName: signingName, SigningNameDerived: signingNameDerived, SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), - } + }, nil } func getEndpointScheme(protocols []string, disableSSL bool) string { @@ -339,3 +345,7 @@ const ( boxedFalse boxedTrue ) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go index d9b37f4d3..2ba3c56c1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -9,7 +9,8 @@ func isErrConnectionReset(err error) bool { return false } - if strings.Contains(err.Error(), "connection reset") || + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || strings.Contains(err.Error(), "broken pipe") { return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 7ec66e7e5..cc461bd32 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -241,5 +241,22 @@ over the AWS_CA_BUNDLE environment variable, and will be used if both are set. Setting a custom HTTPClient in the aws.Config options will override this setting. To use this option and custom HTTP client, the HTTP client needs to be provided when creating the session. Not the service client. + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2IMDSEndpoint: "http://[::1]", + }) */ package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index c1e0e9c95..d67c261d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -148,6 +148,11 @@ type envConfig struct { // // AWS_S3_USE_ARN_REGION=true S3UseARNRegion bool + + // Specifies the alternative endpoint to use for EC2 IMDS. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string } var ( @@ -211,6 +216,9 @@ var ( s3UseARNRegionEnvKey = []string{ "AWS_S3_USE_ARN_REGION", } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -332,6 +340,8 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { } } + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + return cfg, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 0ff499605..6430a7f15 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -48,6 +48,8 @@ var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credenti type Session struct { Config *aws.Config Handlers request.Handlers + + options Options } // New creates a new instance of the handlers merging in the provided configs @@ -99,7 +101,7 @@ func New(cfgs ...*aws.Config) *Session { return s } - s := deprecatedNewSession(cfgs...) + s := deprecatedNewSession(envCfg, cfgs...) if envErr != nil { msg := "failed to load env config" s.logDeprecatedNewSessionError(msg, envErr, cfgs) @@ -243,6 +245,23 @@ type Options struct { // function to initialize this value before changing the handlers to be // used by the SDK. Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The must endpoint value must + // include protocol prefix. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -329,7 +348,25 @@ func Must(sess *Session, err error) *Session { return sess } -func deprecatedNewSession(cfgs ...*aws.Config) *Session { +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } + return resolver.EndpointFor(service, region) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() @@ -341,6 +378,11 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { // endpoints for service client configurations. cfg.EndpointResolver = endpoints.DefaultResolver() } + + if len(envCfg.EC2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint) + } + cfg.Credentials = defaults.CredChain(cfg, handlers) // Reapply any passed in configs to override credentials if set @@ -349,6 +391,9 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { s := &Session{ Config: cfg, Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, } initHandlers(s) @@ -418,6 +463,7 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, s := &Session{ Config: cfg, Handlers: handlers, + options: opts, } initHandlers(s) @@ -570,6 +616,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, endpoints.LegacyS3UsEast1Endpoint, }) + ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint + if len(ec2IMDSEndpoint) == 0 { + ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint + } + if len(ec2IMDSEndpoint) != 0 { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint) + } + // Configure credentials if not already set by the user when creating the // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { @@ -627,6 +681,7 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ Config: s.Config.Copy(cfgs...), Handlers: s.Handlers.Copy(), + options: s.options, } initHandlers(newSession) @@ -665,6 +720,8 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi } } +const ec2MetadataServiceID = "ec2metadata" + func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index d542ef01b..98751ee84 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -239,3 +239,26 @@ func (es errors) Error() string { return strings.Join(parts, "\n") } + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index c563a089b..d30eed811 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.29.24" +const SDKVersion = "1.35.8" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go similarity index 54% rename from vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go rename to vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go index 2f93f96fd..bf18031a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/accesspoint_arn.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -19,23 +19,28 @@ func (a AccessPointARN) GetARN() arn.ARN { // ParseAccessPointResource attempts to parse the ARN's resource as an // AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { if len(a.Region) == 0 { - return AccessPointARN{}, InvalidARNError{a, "region not set"} + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} } if len(a.AccountID) == 0 { - return AccessPointARN{}, InvalidARNError{a, "account-id not set"} + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} } if len(resParts) == 0 { - return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} } if len(resParts) > 1 { - return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"} + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} } resID := resParts[0] if len(strings.TrimSpace(resID)) == 0 { - return AccessPointARN{}, InvalidARNError{a, "resource-id not set"} + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} } return AccessPointARN{ diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go similarity index 75% rename from vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go rename to vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go index a942d887f..7a8e46fbd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/internal/arn/arn.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -1,6 +1,7 @@ package arn import ( + "fmt" "strings" "github.com/aws/aws-sdk-go/aws/arn" @@ -25,13 +26,14 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err } if len(a.Partition) == 0 { - return nil, InvalidARNError{a, "partition not set"} + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} } - if a.Service != "s3" { - return nil, InvalidARNError{a, "service is not S3"} + + if a.Service != "s3" && a.Service != "s3-outposts" { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} } if len(a.Resource) == 0 { - return nil, InvalidARNError{a, "resource not set"} + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} } return resParser(a) @@ -66,6 +68,7 @@ type InvalidARNError struct { Reason string } +// Error returns a string denoting the occurred InvalidARNError func (e InvalidARNError) Error() string { - return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String() + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) } diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 000000000..1e10f8de0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 000000000..e756b2f87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,189 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 000000000..9f70a64ec --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,62 @@ +package s3shared + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// UseFIPS returns true if request config region is FIPS +func (r ResourceRequest) UseFIPS() bool { + return IsFIPS(aws.StringValue(r.Request.Config.Region)) +} + +// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS +func (r ResourceRequest) ResourceConfiguredForFIPS() bool { + return IsFIPS(r.ARN().Region) +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsFIPS returns true if region is a fips region +func IsFIPS(clientRegion string) bool { + return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go rename to vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 000000000..e045f38d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go index bb8ea5da1..0e4aa42f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -69,10 +69,23 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) { case ErrorMessageType: return nil, r.unmarshalErrorMessage(msg) default: - return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } } } +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + func (r *EventReader) unmarshalEventMessage( msg eventstream.Message, ) (event interface{}, err error) { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go index 3b44dde2f..f6f8c5674 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -52,6 +52,15 @@ func (hs *Headers) Del(name string) { } } +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + func decodeHeaders(r io.Reader) (Headers, error) { hs := Headers{} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go index 25c9783cd..f7427da03 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -57,6 +57,20 @@ func (m *Message) rawMessage() (rawMessage, error) { return raw, nil } +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + type messagePrelude struct { Length uint32 HeadersLen uint32 diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go index 5e9499699..8b2c9bbeb 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "math/big" "reflect" "strings" "time" @@ -15,6 +16,8 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) +var millisecondsFloat = new(big.Float).SetInt64(1e3) + // UnmarshalJSONError unmarshal's the reader's JSON document into the passed in // type. The value to unmarshal the json document into must be a pointer to the // type. @@ -39,7 +42,9 @@ func UnmarshalJSONError(v interface{}, stream io.Reader) error { func UnmarshalJSON(v interface{}, stream io.Reader) error { var out interface{} - err := json.NewDecoder(stream).Decode(&out) + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) if err == io.EOF { return nil } else if err != nil { @@ -54,7 +59,9 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error { func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { var out interface{} - err := json.NewDecoder(stream).Decode(&out) + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) if err == io.EOF { return nil } else if err != nil { @@ -254,16 +261,31 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) } - case float64: + case json.Number: switch value.Interface().(type) { case *int64: - di := int64(d) + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) value.Set(reflect.ValueOf(&di)) case *float64: - value.Set(reflect.ValueOf(&d)) + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) case *time.Time: - // Time unmarshaled from a float64 can only be epoch seconds - t := time.Unix(int64(d), 0).UTC() + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() value.Set(reflect.ValueOf(&t)) default: return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go index 05d4ff519..98f4caed9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -27,8 +27,8 @@ const ( // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" - // This format is used for output time without seconds precision - ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z" + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" ) // IsKnownTimestampFormat returns if the timestamp format name @@ -48,7 +48,7 @@ func IsKnownTimestampFormat(name string) bool { // FormatTime returns a string value of the time. func FormatTime(name string, t time.Time) string { - t = t.UTC() + t = t.UTC().Truncate(time.Millisecond) switch name { case RFC822TimeFormatName: @@ -56,7 +56,8 @@ func FormatTime(name string, t time.Time) string { case ISO8601TimeFormatName: return t.Format(ISO8601OutputTimeFormat) case UnixTimeFormatName: - return strconv.FormatInt(t.Unix(), 10) + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) default: panic("unknown timestamp format name, " + name) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index cf981fe95..09ad95159 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -8,6 +8,7 @@ import ( "reflect" "sort" "strconv" + "strings" "time" "github.com/aws/aws-sdk-go/private/protocol" @@ -60,6 +61,14 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle return nil } + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + t := tag.Get("type") if t == "" { switch value.Kind() { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 7108d3800..107c053f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -64,6 +64,14 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { // parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect // will be used to determine the type from r. func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + rtype := r.Type() if rtype.Kind() == reflect.Ptr { rtype = rtype.Elem() // check kind of actual element type diff --git a/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/api.go b/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/api.go index 8565aeada..e78b6fcfc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/api.go @@ -13,6 +13,99 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opApplyArchiveRule = "ApplyArchiveRule" + +// ApplyArchiveRuleRequest generates a "aws/request.Request" representing the +// client's request for the ApplyArchiveRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ApplyArchiveRule for more information on using the ApplyArchiveRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ApplyArchiveRuleRequest method. +// req, resp := client.ApplyArchiveRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/accessanalyzer-2019-11-01/ApplyArchiveRule +func (c *AccessAnalyzer) ApplyArchiveRuleRequest(input *ApplyArchiveRuleInput) (req *request.Request, output *ApplyArchiveRuleOutput) { + op := &request.Operation{ + Name: opApplyArchiveRule, + HTTPMethod: "PUT", + HTTPPath: "/archive-rule", + } + + if input == nil { + input = &ApplyArchiveRuleInput{} + } + + output = &ApplyArchiveRuleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ApplyArchiveRule API operation for Access Analyzer. +// +// Retroactively applies the archive rule to existing findings that meet the +// archive rule criteria. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Access Analyzer's +// API operation ApplyArchiveRule for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource could not be found. +// +// * ValidationException +// Validation exception error. +// +// * InternalServerException +// Internal server error. +// +// * ThrottlingException +// Throttling limit exceeded error. +// +// * AccessDeniedException +// You do not have sufficient access to perform this action. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/accessanalyzer-2019-11-01/ApplyArchiveRule +func (c *AccessAnalyzer) ApplyArchiveRule(input *ApplyArchiveRuleInput) (*ApplyArchiveRuleOutput, error) { + req, out := c.ApplyArchiveRuleRequest(input) + return out, req.Send() +} + +// ApplyArchiveRuleWithContext is the same as ApplyArchiveRule with the addition of +// the ability to pass a context and additional request options. +// +// See ApplyArchiveRule for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AccessAnalyzer) ApplyArchiveRuleWithContext(ctx aws.Context, input *ApplyArchiveRuleInput, opts ...request.Option) (*ApplyArchiveRuleOutput, error) { + req, out := c.ApplyArchiveRuleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateAnalyzer = "CreateAnalyzer" // CreateAnalyzerRequest generates a "aws/request.Request" representing the @@ -153,7 +246,8 @@ func (c *AccessAnalyzer) CreateArchiveRuleRequest(input *CreateArchiveRuleInput) // CreateArchiveRule API operation for Access Analyzer. // // Creates an archive rule for the specified analyzer. Archive rules automatically -// archive findings that meet the criteria you define when you create the rule. +// archive new findings that meet the criteria you define when you create the +// rule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1900,8 +1994,8 @@ func (c *AccessAnalyzer) UpdateFindingsWithContext(ctx aws.Context, input *Updat // You do not have sufficient access to perform this action. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -1918,17 +2012,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1936,22 +2030,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about the analyzed resource. @@ -1986,12 +2080,18 @@ type AnalyzedResource struct { // ResourceArn is a required field ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + // The AWS account ID that owns the resource. + // + // ResourceOwnerAccount is a required field + ResourceOwnerAccount *string `locationName:"resourceOwnerAccount" type:"string" required:"true"` + // The type of the resource that was analyzed. // // ResourceType is a required field ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` - // Indicates how the access that generated the finding is granted. + // Indicates how the access that generated the finding is granted. This is populated + // for Amazon S3 bucket findings. SharedVia []*string `locationName:"sharedVia" type:"list"` // The current status of the finding generated from the analyzed resource. @@ -2049,6 +2149,12 @@ func (s *AnalyzedResource) SetResourceArn(v string) *AnalyzedResource { return s } +// SetResourceOwnerAccount sets the ResourceOwnerAccount field's value. +func (s *AnalyzedResource) SetResourceOwnerAccount(v string) *AnalyzedResource { + s.ResourceOwnerAccount = &v + return s +} + // SetResourceType sets the ResourceType field's value. func (s *AnalyzedResource) SetResourceType(v string) *AnalyzedResource { s.ResourceType = &v @@ -2082,6 +2188,11 @@ type AnalyzedResourceSummary struct { // ResourceArn is a required field ResourceArn *string `locationName:"resourceArn" type:"string" required:"true"` + // The AWS account ID that owns the resource. + // + // ResourceOwnerAccount is a required field + ResourceOwnerAccount *string `locationName:"resourceOwnerAccount" type:"string" required:"true"` + // The type of resource that was analyzed. // // ResourceType is a required field @@ -2104,6 +2215,12 @@ func (s *AnalyzedResourceSummary) SetResourceArn(v string) *AnalyzedResourceSumm return s } +// SetResourceOwnerAccount sets the ResourceOwnerAccount field's value. +func (s *AnalyzedResourceSummary) SetResourceOwnerAccount(v string) *AnalyzedResourceSummary { + s.ResourceOwnerAccount = &v + return s +} + // SetResourceType sets the ResourceType field's value. func (s *AnalyzedResourceSummary) SetResourceType(v string) *AnalyzedResourceSummary { s.ResourceType = &v @@ -2135,6 +2252,23 @@ type AnalyzerSummary struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // The status of the analyzer. An Active analyzer successfully monitors supported + // resources and generates new findings. The analyzer is Disabled when a user + // action, such as removing trusted access for IAM Access Analyzer from AWS + // Organizations, causes the analyzer to stop generating new findings. The status + // is Creating when the analyzer creation is in progress and Failed when the + // analyzer creation has failed. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"AnalyzerStatus"` + + // The statusReason provides more details about the current status of the analyzer. + // For example, if the creation for the analyzer fails, a Failed status is displayed. + // For an analyzer with organization as the type, this failure can be due to + // an issue with creating the service-linked roles required in the member accounts + // of the AWS organization. + StatusReason *StatusReason `locationName:"statusReason" type:"structure"` + // The tags added to the analyzer. Tags map[string]*string `locationName:"tags" type:"map"` @@ -2185,6 +2319,18 @@ func (s *AnalyzerSummary) SetName(v string) *AnalyzerSummary { return s } +// SetStatus sets the Status field's value. +func (s *AnalyzerSummary) SetStatus(v string) *AnalyzerSummary { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *AnalyzerSummary) SetStatusReason(v *StatusReason) *AnalyzerSummary { + s.StatusReason = v + return s +} + // SetTags sets the Tags field's value. func (s *AnalyzerSummary) SetTags(v map[string]*string) *AnalyzerSummary { s.Tags = v @@ -2197,6 +2343,85 @@ func (s *AnalyzerSummary) SetType(v string) *AnalyzerSummary { return s } +// Retroactively applies an archive rule. +type ApplyArchiveRuleInput struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of the analyzer. + // + // AnalyzerArn is a required field + AnalyzerArn *string `locationName:"analyzerArn" type:"string" required:"true"` + + // A client token. + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // The name of the rule to apply. + // + // RuleName is a required field + RuleName *string `locationName:"ruleName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ApplyArchiveRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyArchiveRuleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ApplyArchiveRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ApplyArchiveRuleInput"} + if s.AnalyzerArn == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyzerArn")) + } + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleName != nil && len(*s.RuleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RuleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyzerArn sets the AnalyzerArn field's value. +func (s *ApplyArchiveRuleInput) SetAnalyzerArn(v string) *ApplyArchiveRuleInput { + s.AnalyzerArn = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *ApplyArchiveRuleInput) SetClientToken(v string) *ApplyArchiveRuleInput { + s.ClientToken = &v + return s +} + +// SetRuleName sets the RuleName field's value. +func (s *ApplyArchiveRuleInput) SetRuleName(v string) *ApplyArchiveRuleInput { + s.RuleName = &v + return s +} + +type ApplyArchiveRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ApplyArchiveRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ApplyArchiveRuleOutput) GoString() string { + return s.String() +} + // Contains information about an archive rule. type ArchiveRuleSummary struct { _ struct{} `type:"structure"` @@ -2258,8 +2483,8 @@ func (s *ArchiveRuleSummary) SetUpdatedAt(v time.Time) *ArchiveRuleSummary { // A conflict exception error. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -2286,17 +2511,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2304,22 +2529,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Creates an analyzer. @@ -2805,11 +3030,20 @@ type Finding struct { // The resource that an external principal has access to. Resource *string `locationName:"resource" type:"string"` + // The AWS account ID that owns the resource. + // + // ResourceOwnerAccount is a required field + ResourceOwnerAccount *string `locationName:"resourceOwnerAccount" type:"string" required:"true"` + // The type of the resource reported in the finding. // // ResourceType is a required field ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` + // The sources of the finding. This indicates how the access that generated + // the finding is granted. It is populated for Amazon S3 bucket findings. + Sources []*FindingSource `locationName:"sources" type:"list"` + // The current status of the finding. // // Status is a required field @@ -2885,12 +3119,24 @@ func (s *Finding) SetResource(v string) *Finding { return s } +// SetResourceOwnerAccount sets the ResourceOwnerAccount field's value. +func (s *Finding) SetResourceOwnerAccount(v string) *Finding { + s.ResourceOwnerAccount = &v + return s +} + // SetResourceType sets the ResourceType field's value. func (s *Finding) SetResourceType(v string) *Finding { s.ResourceType = &v return s } +// SetSources sets the Sources field's value. +func (s *Finding) SetSources(v []*FindingSource) *Finding { + s.Sources = v + return s +} + // SetStatus sets the Status field's value. func (s *Finding) SetStatus(v string) *Finding { s.Status = &v @@ -2903,6 +3149,68 @@ func (s *Finding) SetUpdatedAt(v time.Time) *Finding { return s } +// The source of the finding. This indicates how the access that generated the +// finding is granted. It is populated for Amazon S3 bucket findings. +type FindingSource struct { + _ struct{} `type:"structure"` + + // Includes details about how the access that generated the finding is granted. + // This is populated for Amazon S3 bucket findings. + Detail *FindingSourceDetail `locationName:"detail" type:"structure"` + + // Indicates the type of access that generated the finding. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"FindingSourceType"` +} + +// String returns the string representation +func (s FindingSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FindingSource) GoString() string { + return s.String() +} + +// SetDetail sets the Detail field's value. +func (s *FindingSource) SetDetail(v *FindingSourceDetail) *FindingSource { + s.Detail = v + return s +} + +// SetType sets the Type field's value. +func (s *FindingSource) SetType(v string) *FindingSource { + s.Type = &v + return s +} + +// Includes details about how the access that generated the finding is granted. +// This is populated for Amazon S3 bucket findings. +type FindingSourceDetail struct { + _ struct{} `type:"structure"` + + // The ARN of the access point that generated the finding. + AccessPointArn *string `locationName:"accessPointArn" type:"string"` +} + +// String returns the string representation +func (s FindingSourceDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FindingSourceDetail) GoString() string { + return s.String() +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *FindingSourceDetail) SetAccessPointArn(v string) *FindingSourceDetail { + s.AccessPointArn = &v + return s +} + // Contains information about a finding. type FindingSummary struct { _ struct{} `type:"structure"` @@ -2945,11 +3253,20 @@ type FindingSummary struct { // The resource that the external principal has access to. Resource *string `locationName:"resource" type:"string"` + // The AWS account ID that owns the resource. + // + // ResourceOwnerAccount is a required field + ResourceOwnerAccount *string `locationName:"resourceOwnerAccount" type:"string" required:"true"` + // The type of the resource that the external principal has access to. // // ResourceType is a required field ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` + // The sources of the finding. This indicates how the access that generated + // the finding is granted. It is populated for Amazon S3 bucket findings. + Sources []*FindingSource `locationName:"sources" type:"list"` + // The status of the finding. // // Status is a required field @@ -3025,12 +3342,24 @@ func (s *FindingSummary) SetResource(v string) *FindingSummary { return s } +// SetResourceOwnerAccount sets the ResourceOwnerAccount field's value. +func (s *FindingSummary) SetResourceOwnerAccount(v string) *FindingSummary { + s.ResourceOwnerAccount = &v + return s +} + // SetResourceType sets the ResourceType field's value. func (s *FindingSummary) SetResourceType(v string) *FindingSummary { s.ResourceType = &v return s } +// SetSources sets the Sources field's value. +func (s *FindingSummary) SetSources(v []*FindingSource) *FindingSummary { + s.Sources = v + return s +} + // SetStatus sets the Status field's value. func (s *FindingSummary) SetStatus(v string) *FindingSummary { s.Status = &v @@ -3423,8 +3752,8 @@ func (s *InlineArchiveRule) SetRuleName(v string) *InlineArchiveRule { // Internal server error. type InternalServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -3444,17 +3773,17 @@ func (s InternalServerException) GoString() string { func newErrorInternalServerException(v protocol.ResponseMetadata) error { return &InternalServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerException) Code() string { +func (s *InternalServerException) Code() string { return "InternalServerException" } // Message returns the exception's message. -func (s InternalServerException) Message() string { +func (s *InternalServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3462,22 +3791,22 @@ func (s InternalServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerException) OrigErr() error { +func (s *InternalServerException) OrigErr() error { return nil } -func (s InternalServerException) Error() string { +func (s *InternalServerException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } // Retrieves a list of resources that have been analyzed. @@ -3942,8 +4271,8 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe // The specified resource could not be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -3970,17 +4299,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3988,28 +4317,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Service quote met error. type ServiceQuotaExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -4036,17 +4365,17 @@ func (s ServiceQuotaExceededException) GoString() string { func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error { return &ServiceQuotaExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceQuotaExceededException) Code() string { +func (s *ServiceQuotaExceededException) Code() string { return "ServiceQuotaExceededException" } // Message returns the exception's message. -func (s ServiceQuotaExceededException) Message() string { +func (s *ServiceQuotaExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4054,22 +4383,22 @@ func (s ServiceQuotaExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceQuotaExceededException) OrigErr() error { +func (s *ServiceQuotaExceededException) OrigErr() error { return nil } -func (s ServiceQuotaExceededException) Error() string { +func (s *ServiceQuotaExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceQuotaExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceQuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceQuotaExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceQuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The criteria used to sort. @@ -4173,6 +4502,36 @@ func (s StartResourceScanOutput) GoString() string { return s.String() } +// Provides more details about the current status of the analyzer. For example, +// if the creation for the analyzer fails, a Failed status is displayed. For +// an analyzer with organization as the type, this failure can be due to an +// issue with creating the service-linked roles required in the member accounts +// of the AWS organization. +type StatusReason struct { + _ struct{} `type:"structure"` + + // The reason code for the current status of the analyzer. + // + // Code is a required field + Code *string `locationName:"code" type:"string" required:"true" enum:"ReasonCode"` +} + +// String returns the string representation +func (s StatusReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *StatusReason) SetCode(v string) *StatusReason { + s.Code = &v + return s +} + // Adds a tag to the specified resource. type TagResourceInput struct { _ struct{} `type:"structure"` @@ -4246,8 +4605,8 @@ func (s TagResourceOutput) GoString() string { // Throttling limit exceeded error. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -4267,17 +4626,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4285,22 +4644,22 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // Removes a tag from the specified resource. @@ -4579,8 +4938,8 @@ func (s UpdateFindingsOutput) GoString() string { // Validation exception error. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A list of fields that didn't validate. FieldList []*ValidationExceptionField `locationName:"fieldList" type:"list"` @@ -4605,17 +4964,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4623,22 +4982,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a validation exception. @@ -4678,6 +5037,50 @@ func (s *ValidationExceptionField) SetName(v string) *ValidationExceptionField { return s } +const ( + // AnalyzerStatusActive is a AnalyzerStatus enum value + AnalyzerStatusActive = "ACTIVE" + + // AnalyzerStatusCreating is a AnalyzerStatus enum value + AnalyzerStatusCreating = "CREATING" + + // AnalyzerStatusDisabled is a AnalyzerStatus enum value + AnalyzerStatusDisabled = "DISABLED" + + // AnalyzerStatusFailed is a AnalyzerStatus enum value + AnalyzerStatusFailed = "FAILED" +) + +// AnalyzerStatus_Values returns all elements of the AnalyzerStatus enum +func AnalyzerStatus_Values() []string { + return []string{ + AnalyzerStatusActive, + AnalyzerStatusCreating, + AnalyzerStatusDisabled, + AnalyzerStatusFailed, + } +} + +const ( + // FindingSourceTypePolicy is a FindingSourceType enum value + FindingSourceTypePolicy = "POLICY" + + // FindingSourceTypeBucketAcl is a FindingSourceType enum value + FindingSourceTypeBucketAcl = "BUCKET_ACL" + + // FindingSourceTypeS3AccessPoint is a FindingSourceType enum value + FindingSourceTypeS3AccessPoint = "S3_ACCESS_POINT" +) + +// FindingSourceType_Values returns all elements of the FindingSourceType enum +func FindingSourceType_Values() []string { + return []string{ + FindingSourceTypePolicy, + FindingSourceTypeBucketAcl, + FindingSourceTypeS3AccessPoint, + } +} + const ( // FindingStatusActive is a FindingStatus enum value FindingStatusActive = "ACTIVE" @@ -4689,6 +5092,15 @@ const ( FindingStatusResolved = "RESOLVED" ) +// FindingStatus_Values returns all elements of the FindingStatus enum +func FindingStatus_Values() []string { + return []string{ + FindingStatusActive, + FindingStatusArchived, + FindingStatusResolved, + } +} + const ( // FindingStatusUpdateActive is a FindingStatusUpdate enum value FindingStatusUpdateActive = "ACTIVE" @@ -4697,6 +5109,14 @@ const ( FindingStatusUpdateArchived = "ARCHIVED" ) +// FindingStatusUpdate_Values returns all elements of the FindingStatusUpdate enum +func FindingStatusUpdate_Values() []string { + return []string{ + FindingStatusUpdateActive, + FindingStatusUpdateArchived, + } +} + const ( // OrderByAsc is a OrderBy enum value OrderByAsc = "ASC" @@ -4705,12 +5125,47 @@ const ( OrderByDesc = "DESC" ) +// OrderBy_Values returns all elements of the OrderBy enum +func OrderBy_Values() []string { + return []string{ + OrderByAsc, + OrderByDesc, + } +} + const ( + // ReasonCodeAwsServiceAccessDisabled is a ReasonCode enum value + ReasonCodeAwsServiceAccessDisabled = "AWS_SERVICE_ACCESS_DISABLED" + + // ReasonCodeDelegatedAdministratorDeregistered is a ReasonCode enum value + ReasonCodeDelegatedAdministratorDeregistered = "DELEGATED_ADMINISTRATOR_DEREGISTERED" + + // ReasonCodeOrganizationDeleted is a ReasonCode enum value + ReasonCodeOrganizationDeleted = "ORGANIZATION_DELETED" + + // ReasonCodeServiceLinkedRoleCreationFailed is a ReasonCode enum value + ReasonCodeServiceLinkedRoleCreationFailed = "SERVICE_LINKED_ROLE_CREATION_FAILED" +) + +// ReasonCode_Values returns all elements of the ReasonCode enum +func ReasonCode_Values() []string { + return []string{ + ReasonCodeAwsServiceAccessDisabled, + ReasonCodeDelegatedAdministratorDeregistered, + ReasonCodeOrganizationDeleted, + ReasonCodeServiceLinkedRoleCreationFailed, + } +} + +const ( + // ResourceTypeAwsS3Bucket is a ResourceType enum value + ResourceTypeAwsS3Bucket = "AWS::S3::Bucket" + // ResourceTypeAwsIamRole is a ResourceType enum value ResourceTypeAwsIamRole = "AWS::IAM::Role" - // ResourceTypeAwsKmsKey is a ResourceType enum value - ResourceTypeAwsKmsKey = "AWS::KMS::Key" + // ResourceTypeAwsSqsQueue is a ResourceType enum value + ResourceTypeAwsSqsQueue = "AWS::SQS::Queue" // ResourceTypeAwsLambdaFunction is a ResourceType enum value ResourceTypeAwsLambdaFunction = "AWS::Lambda::Function" @@ -4718,19 +5173,42 @@ const ( // ResourceTypeAwsLambdaLayerVersion is a ResourceType enum value ResourceTypeAwsLambdaLayerVersion = "AWS::Lambda::LayerVersion" - // ResourceTypeAwsS3Bucket is a ResourceType enum value - ResourceTypeAwsS3Bucket = "AWS::S3::Bucket" - - // ResourceTypeAwsSqsQueue is a ResourceType enum value - ResourceTypeAwsSqsQueue = "AWS::SQS::Queue" + // ResourceTypeAwsKmsKey is a ResourceType enum value + ResourceTypeAwsKmsKey = "AWS::KMS::Key" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeAwsS3Bucket, + ResourceTypeAwsIamRole, + ResourceTypeAwsSqsQueue, + ResourceTypeAwsLambdaFunction, + ResourceTypeAwsLambdaLayerVersion, + ResourceTypeAwsKmsKey, + } +} + const ( // TypeAccount is a Type enum value TypeAccount = "ACCOUNT" + + // TypeOrganization is a Type enum value + TypeOrganization = "ORGANIZATION" ) +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeAccount, + TypeOrganization, + } +} + const ( + // ValidationExceptionReasonUnknownOperation is a ValidationExceptionReason enum value + ValidationExceptionReasonUnknownOperation = "unknownOperation" + // ValidationExceptionReasonCannotParse is a ValidationExceptionReason enum value ValidationExceptionReasonCannotParse = "cannotParse" @@ -4739,7 +5217,14 @@ const ( // ValidationExceptionReasonOther is a ValidationExceptionReason enum value ValidationExceptionReasonOther = "other" - - // ValidationExceptionReasonUnknownOperation is a ValidationExceptionReason enum value - ValidationExceptionReasonUnknownOperation = "unknownOperation" ) + +// ValidationExceptionReason_Values returns all elements of the ValidationExceptionReason enum +func ValidationExceptionReason_Values() []string { + return []string{ + ValidationExceptionReasonUnknownOperation, + ValidationExceptionReasonCannotParse, + ValidationExceptionReasonFieldValidationFailed, + ValidationExceptionReasonOther, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/service.go b/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/service.go index 30a3026dc..05fabf7ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/accessanalyzer/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/api.go b/vendor/github.com/aws/aws-sdk-go/service/acm/api.go index 0135a159d..621308de6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/api.go @@ -443,12 +443,11 @@ func (c *ACM) GetCertificateRequest(input *GetCertificateInput) (req *request.Re // GetCertificate API operation for AWS Certificate Manager. // -// Retrieves a certificate specified by an ARN and its certificate chain . The -// chain is an ordered list of certificates that contains the end entity certificate, -// intermediate certificates of subordinate CAs, and the root certificate in -// that order. The certificate and certificate chain are base64 encoded. If -// you want to decode the certificate to see the individual fields, you can -// use OpenSSL. +// Retrieves an Amazon-issued certificate and its certificate chain. The chain +// consists of the certificate of the issuing CA and the intermediate certificates +// of any other subordinate CAs. All of the certificates are base64 encoded. +// You can use OpenSSL (https://wiki.openssl.org/index.php/Command_Line_Utilities) +// to decode the certificates and inspect individual fields. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -604,7 +603,7 @@ func (c *ACM) ImportCertificateRequest(input *ImportCertificateInput) (req *requ // caller's account cannot be found. // // * LimitExceededException -// An ACM limit has been exceeded. +// An ACM quota has been exceeded. // // * InvalidTagException // One or both of the values that make up the key-value pair is not valid. For @@ -1123,7 +1122,7 @@ func (c *ACM) RequestCertificateRequest(input *RequestCertificateInput) (req *re // // Returned Error Types: // * LimitExceededException -// An ACM limit has been exceeded. +// An ACM quota has been exceeded. // // * InvalidDomainValidationOptionsException // One or more values in the DomainValidationOption structure is incorrect. @@ -1329,7 +1328,7 @@ func (c *ACM) UpdateCertificateOptionsRequest(input *UpdateCertificateOptionsInp // caller's account cannot be found. // // * LimitExceededException -// An ACM limit has been exceeded. +// An ACM quota has been exceeded. // // * InvalidStateException // Processing has reached an invalid state. @@ -1951,6 +1950,11 @@ type DomainValidation struct { // Contains the CNAME record that you add to your DNS database for domain validation. // For more information, see Use DNS to Validate Domain Ownership (https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-validate-dns.html). + // + // Note: The CNAME information that you need does not include the name of your + // domain. If you include your domain name in the DNS database CNAME record, + // validation fails. For example, if the name is "_a79865eb4cd1a6ab990a45779b4e0b96.yourdomain.com", + // only "_a79865eb4cd1a6ab990a45779b4e0b96" must be used. ResourceRecord *ResourceRecord `type:"structure"` // The domain name that ACM used to send domain validation emails. @@ -2356,12 +2360,12 @@ func (s *GetCertificateInput) SetCertificateArn(v string) *GetCertificateInput { type GetCertificateOutput struct { _ struct{} `type:"structure"` - // String that contains the ACM certificate represented by the ARN specified - // at input. + // The ACM-issued certificate corresponding to the ARN specified as input. Certificate *string `min:"1" type:"string"` - // The certificate chain that contains the root certificate issued by the certificate - // authority (CA). + // Certificates forming the requested certificate's chain of trust. The chain + // consists of the certificate of the issuing CA and the intermediate certificates + // of any other subordinate CAs. CertificateChain *string `min:"1" type:"string"` } @@ -2527,8 +2531,8 @@ func (s *ImportCertificateOutput) SetCertificateArn(v string) *ImportCertificate // One or more of of request parameters specified is not valid. type InvalidArgsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2545,17 +2549,17 @@ func (s InvalidArgsException) GoString() string { func newErrorInvalidArgsException(v protocol.ResponseMetadata) error { return &InvalidArgsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgsException) Code() string { +func (s *InvalidArgsException) Code() string { return "InvalidArgsException" } // Message returns the exception's message. -func (s InvalidArgsException) Message() string { +func (s *InvalidArgsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2563,28 +2567,28 @@ func (s InvalidArgsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgsException) OrigErr() error { +func (s *InvalidArgsException) OrigErr() error { return nil } -func (s InvalidArgsException) Error() string { +func (s *InvalidArgsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgsException) RequestID() string { + return s.RespMetadata.RequestID } // The requested Amazon Resource Name (ARN) does not refer to an existing resource. type InvalidArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2601,17 +2605,17 @@ func (s InvalidArnException) GoString() string { func newErrorInvalidArnException(v protocol.ResponseMetadata) error { return &InvalidArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArnException) Code() string { +func (s *InvalidArnException) Code() string { return "InvalidArnException" } // Message returns the exception's message. -func (s InvalidArnException) Message() string { +func (s *InvalidArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2619,28 +2623,28 @@ func (s InvalidArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArnException) OrigErr() error { +func (s *InvalidArnException) OrigErr() error { return nil } -func (s InvalidArnException) Error() string { +func (s *InvalidArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArnException) RequestID() string { + return s.RespMetadata.RequestID } // One or more values in the DomainValidationOption structure is incorrect. type InvalidDomainValidationOptionsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2657,17 +2661,17 @@ func (s InvalidDomainValidationOptionsException) GoString() string { func newErrorInvalidDomainValidationOptionsException(v protocol.ResponseMetadata) error { return &InvalidDomainValidationOptionsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDomainValidationOptionsException) Code() string { +func (s *InvalidDomainValidationOptionsException) Code() string { return "InvalidDomainValidationOptionsException" } // Message returns the exception's message. -func (s InvalidDomainValidationOptionsException) Message() string { +func (s *InvalidDomainValidationOptionsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2675,28 +2679,28 @@ func (s InvalidDomainValidationOptionsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDomainValidationOptionsException) OrigErr() error { +func (s *InvalidDomainValidationOptionsException) OrigErr() error { return nil } -func (s InvalidDomainValidationOptionsException) Error() string { +func (s *InvalidDomainValidationOptionsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDomainValidationOptionsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDomainValidationOptionsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDomainValidationOptionsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDomainValidationOptionsException) RequestID() string { + return s.RespMetadata.RequestID } // An input parameter was invalid. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2713,17 +2717,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2731,28 +2735,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // Processing has reached an invalid state. type InvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2769,17 +2773,17 @@ func (s InvalidStateException) GoString() string { func newErrorInvalidStateException(v protocol.ResponseMetadata) error { return &InvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateException) Code() string { +func (s *InvalidStateException) Code() string { return "InvalidStateException" } // Message returns the exception's message. -func (s InvalidStateException) Message() string { +func (s *InvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2787,29 +2791,29 @@ func (s InvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateException) OrigErr() error { +func (s *InvalidStateException) OrigErr() error { return nil } -func (s InvalidStateException) Error() string { +func (s *InvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // One or both of the values that make up the key-value pair is not valid. For // example, you cannot specify a tag value that begins with aws:. type InvalidTagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2826,17 +2830,17 @@ func (s InvalidTagException) GoString() string { func newErrorInvalidTagException(v protocol.ResponseMetadata) error { return &InvalidTagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagException) Code() string { +func (s *InvalidTagException) Code() string { return "InvalidTagException" } // Message returns the exception's message. -func (s InvalidTagException) Message() string { +func (s *InvalidTagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2844,22 +2848,22 @@ func (s InvalidTagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagException) OrigErr() error { +func (s *InvalidTagException) OrigErr() error { return nil } -func (s InvalidTagException) Error() string { +func (s *InvalidTagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagException) RequestID() string { + return s.RespMetadata.RequestID } // The Key Usage X.509 v3 extension defines the purpose of the public key contained @@ -2887,10 +2891,10 @@ func (s *KeyUsage) SetName(v string) *KeyUsage { return s } -// An ACM limit has been exceeded. +// An ACM quota has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2907,17 +2911,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2925,22 +2929,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListCertificatesInput struct { @@ -3379,9 +3383,9 @@ type RequestCertificateInput struct { // of the ACM certificate. For example, add the name www.example.net to a certificate // for which the DomainName field is www.example.com if users can reach your // site by using either name. The maximum number of domain names that you can - // add to an ACM certificate is 100. However, the initial limit is 10 domain - // names. If you need more than 10 names, you must request a limit increase. - // For more information, see Limits (https://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html). + // add to an ACM certificate is 100. However, the initial quota is 10 domain + // names. If you need more than 10 names, you must request a quota increase. + // For more information, see Quotas (https://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html). // // The maximum length of a SAN DNS name is 253 octets. The name is made up of // multiple labels separated by periods. No label can be longer than 63 octets. @@ -3547,8 +3551,8 @@ func (s *RequestCertificateOutput) SetCertificateArn(v string) *RequestCertifica // The certificate request is in process and the certificate in your account // has not yet been issued. type RequestInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3565,17 +3569,17 @@ func (s RequestInProgressException) GoString() string { func newErrorRequestInProgressException(v protocol.ResponseMetadata) error { return &RequestInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestInProgressException) Code() string { +func (s *RequestInProgressException) Code() string { return "RequestInProgressException" } // Message returns the exception's message. -func (s RequestInProgressException) Message() string { +func (s *RequestInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3583,22 +3587,22 @@ func (s RequestInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestInProgressException) OrigErr() error { +func (s *RequestInProgressException) OrigErr() error { return nil } -func (s RequestInProgressException) Error() string { +func (s *RequestInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestInProgressException) RequestID() string { + return s.RespMetadata.RequestID } type ResendValidationEmailInput struct { @@ -3715,8 +3719,8 @@ func (s ResendValidationEmailOutput) GoString() string { // The certificate is in use by another AWS service in the caller's account. // Remove the association and try again. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3733,17 +3737,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3751,29 +3755,29 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified certificate cannot be found in the caller's account or the // caller's account cannot be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3790,17 +3794,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3808,22 +3812,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains a DNS record value that you can use to can use to validate ownership @@ -3930,8 +3934,8 @@ func (s *Tag) SetValue(v string) *Tag { // A specified tag did not comply with an existing tag policy and was rejected. type TagPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3948,17 +3952,17 @@ func (s TagPolicyException) GoString() string { func newErrorTagPolicyException(v protocol.ResponseMetadata) error { return &TagPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagPolicyException) Code() string { +func (s *TagPolicyException) Code() string { return "TagPolicyException" } // Message returns the exception's message. -func (s TagPolicyException) Message() string { +func (s *TagPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3966,28 +3970,28 @@ func (s TagPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagPolicyException) OrigErr() error { +func (s *TagPolicyException) OrigErr() error { return nil } -func (s TagPolicyException) Error() string { +func (s *TagPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // The request contains too many tags. Try the request again with fewer tags. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4004,17 +4008,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4022,22 +4026,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } type UpdateCertificateOptionsInput struct { @@ -4138,6 +4142,19 @@ const ( CertificateStatusFailed = "FAILED" ) +// CertificateStatus_Values returns all elements of the CertificateStatus enum +func CertificateStatus_Values() []string { + return []string{ + CertificateStatusPendingValidation, + CertificateStatusIssued, + CertificateStatusInactive, + CertificateStatusExpired, + CertificateStatusValidationTimedOut, + CertificateStatusRevoked, + CertificateStatusFailed, + } +} + const ( // CertificateTransparencyLoggingPreferenceEnabled is a CertificateTransparencyLoggingPreference enum value CertificateTransparencyLoggingPreferenceEnabled = "ENABLED" @@ -4146,6 +4163,14 @@ const ( CertificateTransparencyLoggingPreferenceDisabled = "DISABLED" ) +// CertificateTransparencyLoggingPreference_Values returns all elements of the CertificateTransparencyLoggingPreference enum +func CertificateTransparencyLoggingPreference_Values() []string { + return []string{ + CertificateTransparencyLoggingPreferenceEnabled, + CertificateTransparencyLoggingPreferenceDisabled, + } +} + const ( // CertificateTypeImported is a CertificateType enum value CertificateTypeImported = "IMPORTED" @@ -4157,6 +4182,15 @@ const ( CertificateTypePrivate = "PRIVATE" ) +// CertificateType_Values returns all elements of the CertificateType enum +func CertificateType_Values() []string { + return []string{ + CertificateTypeImported, + CertificateTypeAmazonIssued, + CertificateTypePrivate, + } +} + const ( // DomainStatusPendingValidation is a DomainStatus enum value DomainStatusPendingValidation = "PENDING_VALIDATION" @@ -4168,6 +4202,15 @@ const ( DomainStatusFailed = "FAILED" ) +// DomainStatus_Values returns all elements of the DomainStatus enum +func DomainStatus_Values() []string { + return []string{ + DomainStatusPendingValidation, + DomainStatusSuccess, + DomainStatusFailed, + } +} + const ( // ExtendedKeyUsageNameTlsWebServerAuthentication is a ExtendedKeyUsageName enum value ExtendedKeyUsageNameTlsWebServerAuthentication = "TLS_WEB_SERVER_AUTHENTICATION" @@ -4206,6 +4249,24 @@ const ( ExtendedKeyUsageNameCustom = "CUSTOM" ) +// ExtendedKeyUsageName_Values returns all elements of the ExtendedKeyUsageName enum +func ExtendedKeyUsageName_Values() []string { + return []string{ + ExtendedKeyUsageNameTlsWebServerAuthentication, + ExtendedKeyUsageNameTlsWebClientAuthentication, + ExtendedKeyUsageNameCodeSigning, + ExtendedKeyUsageNameEmailProtection, + ExtendedKeyUsageNameTimeStamping, + ExtendedKeyUsageNameOcspSigning, + ExtendedKeyUsageNameIpsecEndSystem, + ExtendedKeyUsageNameIpsecTunnel, + ExtendedKeyUsageNameIpsecUser, + ExtendedKeyUsageNameAny, + ExtendedKeyUsageNameNone, + ExtendedKeyUsageNameCustom, + } +} + const ( // FailureReasonNoAvailableContacts is a FailureReason enum value FailureReasonNoAvailableContacts = "NO_AVAILABLE_CONTACTS" @@ -4252,10 +4313,36 @@ const ( // FailureReasonPcaAccessDenied is a FailureReason enum value FailureReasonPcaAccessDenied = "PCA_ACCESS_DENIED" + // FailureReasonSlrNotFound is a FailureReason enum value + FailureReasonSlrNotFound = "SLR_NOT_FOUND" + // FailureReasonOther is a FailureReason enum value FailureReasonOther = "OTHER" ) +// FailureReason_Values returns all elements of the FailureReason enum +func FailureReason_Values() []string { + return []string{ + FailureReasonNoAvailableContacts, + FailureReasonAdditionalVerificationRequired, + FailureReasonDomainNotAllowed, + FailureReasonInvalidPublicDomain, + FailureReasonDomainValidationDenied, + FailureReasonCaaError, + FailureReasonPcaLimitExceeded, + FailureReasonPcaInvalidArn, + FailureReasonPcaInvalidState, + FailureReasonPcaRequestFailed, + FailureReasonPcaNameConstraintsValidation, + FailureReasonPcaResourceNotFound, + FailureReasonPcaInvalidArgs, + FailureReasonPcaInvalidDuration, + FailureReasonPcaAccessDenied, + FailureReasonSlrNotFound, + FailureReasonOther, + } +} + const ( // KeyAlgorithmRsa2048 is a KeyAlgorithm enum value KeyAlgorithmRsa2048 = "RSA_2048" @@ -4276,6 +4363,18 @@ const ( KeyAlgorithmEcSecp521r1 = "EC_secp521r1" ) +// KeyAlgorithm_Values returns all elements of the KeyAlgorithm enum +func KeyAlgorithm_Values() []string { + return []string{ + KeyAlgorithmRsa2048, + KeyAlgorithmRsa1024, + KeyAlgorithmRsa4096, + KeyAlgorithmEcPrime256v1, + KeyAlgorithmEcSecp384r1, + KeyAlgorithmEcSecp521r1, + } +} + const ( // KeyUsageNameDigitalSignature is a KeyUsageName enum value KeyUsageNameDigitalSignature = "DIGITAL_SIGNATURE" @@ -4311,11 +4410,35 @@ const ( KeyUsageNameCustom = "CUSTOM" ) +// KeyUsageName_Values returns all elements of the KeyUsageName enum +func KeyUsageName_Values() []string { + return []string{ + KeyUsageNameDigitalSignature, + KeyUsageNameNonRepudiation, + KeyUsageNameKeyEncipherment, + KeyUsageNameDataEncipherment, + KeyUsageNameKeyAgreement, + KeyUsageNameCertificateSigning, + KeyUsageNameCrlSigning, + KeyUsageNameEncipherOnly, + KeyUsageNameDecipherOnly, + KeyUsageNameAny, + KeyUsageNameCustom, + } +} + const ( // RecordTypeCname is a RecordType enum value RecordTypeCname = "CNAME" ) +// RecordType_Values returns all elements of the RecordType enum +func RecordType_Values() []string { + return []string{ + RecordTypeCname, + } +} + const ( // RenewalEligibilityEligible is a RenewalEligibility enum value RenewalEligibilityEligible = "ELIGIBLE" @@ -4324,6 +4447,14 @@ const ( RenewalEligibilityIneligible = "INELIGIBLE" ) +// RenewalEligibility_Values returns all elements of the RenewalEligibility enum +func RenewalEligibility_Values() []string { + return []string{ + RenewalEligibilityEligible, + RenewalEligibilityIneligible, + } +} + const ( // RenewalStatusPendingAutoRenewal is a RenewalStatus enum value RenewalStatusPendingAutoRenewal = "PENDING_AUTO_RENEWAL" @@ -4338,6 +4469,16 @@ const ( RenewalStatusFailed = "FAILED" ) +// RenewalStatus_Values returns all elements of the RenewalStatus enum +func RenewalStatus_Values() []string { + return []string{ + RenewalStatusPendingAutoRenewal, + RenewalStatusPendingValidation, + RenewalStatusSuccess, + RenewalStatusFailed, + } +} + const ( // RevocationReasonUnspecified is a RevocationReason enum value RevocationReasonUnspecified = "UNSPECIFIED" @@ -4370,6 +4511,22 @@ const ( RevocationReasonAACompromise = "A_A_COMPROMISE" ) +// RevocationReason_Values returns all elements of the RevocationReason enum +func RevocationReason_Values() []string { + return []string{ + RevocationReasonUnspecified, + RevocationReasonKeyCompromise, + RevocationReasonCaCompromise, + RevocationReasonAffiliationChanged, + RevocationReasonSuperceded, + RevocationReasonCessationOfOperation, + RevocationReasonCertificateHold, + RevocationReasonRemoveFromCrl, + RevocationReasonPrivilegeWithdrawn, + RevocationReasonAACompromise, + } +} + const ( // ValidationMethodEmail is a ValidationMethod enum value ValidationMethodEmail = "EMAIL" @@ -4377,3 +4534,11 @@ const ( // ValidationMethodDns is a ValidationMethod enum value ValidationMethodDns = "DNS" ) + +// ValidationMethod_Values returns all elements of the ValidationMethod enum +func ValidationMethod_Values() []string { + return []string{ + ValidationMethodEmail, + ValidationMethodDns, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/errors.go b/vendor/github.com/aws/aws-sdk-go/service/acm/errors.go index 54de486c3..f054cb405 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acm/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/errors.go @@ -48,7 +48,7 @@ const ( // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // An ACM limit has been exceeded. + // An ACM quota has been exceeded. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodeRequestInProgressException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go index 7dc5165f9..d5a56d01e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go b/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go index d339900f2..2f07f7f1e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acmpca/api.go @@ -68,6 +68,14 @@ func (c *ACMPCA) CreateCertificateAuthorityRequest(input *CreateCertificateAutho // S3 bucket that is included in certificates issued by the CA. If successful, // this action returns the Amazon Resource Name (ARN) of the CA. // +// ACM Private CAA assets that are stored in Amazon S3 can be protected with +// encryption. For more information, see Encrypting Your CRLs (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaCreateCa.html#crl-encryption). +// +// Both PCA and the IAM principal must have permission to write to the S3 bucket +// that you specify. If the IAM principal making the call does not have permission +// to write to the bucket, then an exception is thrown. For more information, +// see Configure Access to ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -80,16 +88,17 @@ func (c *ACMPCA) CreateCertificateAuthorityRequest(input *CreateCertificateAutho // One or more of the specified arguments was not valid. // // * InvalidPolicyException -// The S3 bucket policy is not valid. The policy must give ACM Private CA rights -// to read from and write to the bucket and find the bucket location. +// The resource policy is invalid or is missing a required statement. For general +// information about IAM policy and statement structure, see Overview of JSON +// Policies (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policies-json). // // * InvalidTagException // The tag associated with the CA is not valid. The invalid argument is contained // in the message field. // // * LimitExceededException -// An ACM Private CA limit has been exceeded. See the exception message returned -// to determine the limit that was exceeded. +// An ACM Private CA quota has been exceeded. See the exception message returned +// to determine the quota that was exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/CreateCertificateAuthority func (c *ACMPCA) CreateCertificateAuthority(input *CreateCertificateAuthorityInput) (*CreateCertificateAuthorityOutput, error) { @@ -159,7 +168,17 @@ func (c *ACMPCA) CreateCertificateAuthorityAuditReportRequest(input *CreateCerti // // Creates an audit report that lists every time that your CA private key is // used. The report is saved in the Amazon S3 bucket that you specify on input. -// The IssueCertificate and RevokeCertificate actions use the private key. +// The IssueCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html) +// and RevokeCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RevokeCertificate.html) +// actions use the private key. +// +// Both PCA and the IAM principal must have permission to write to the S3 bucket +// that you specify. If the IAM principal making the call does not have permission +// to write to the bucket, then an exception is thrown. For more information, +// see Configure Access to ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html). +// +// ACM Private CAA assets that are stored in Amazon S3 can be protected with +// encryption. For more information, see Encrypting Your Audit Reports (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuditReport.html#audit-report-encryption). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -176,8 +195,8 @@ func (c *ACMPCA) CreateCertificateAuthorityAuditReportRequest(input *CreateCerti // The request has failed for an unspecified reason. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -186,8 +205,7 @@ func (c *ACMPCA) CreateCertificateAuthorityAuditReportRequest(input *CreateCerti // One or more of the specified arguments was not valid. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/CreateCertificateAuthorityAuditReport func (c *ACMPCA) CreateCertificateAuthorityAuditReport(input *CreateCertificateAuthorityAuditReportInput) (*CreateCertificateAuthorityAuditReportOutput, error) { @@ -256,16 +274,29 @@ func (c *ACMPCA) CreatePermissionRequest(input *CreatePermissionInput) (req *req // CreatePermission API operation for AWS Certificate Manager Private Certificate Authority. // -// Assigns permissions from a private CA to a designated AWS service. Services -// are specified by their service principals and can be given permission to -// create and retrieve certificates on a private CA. Services can also be given -// permission to list the active permissions that the private CA has granted. -// For ACM to automatically renew your private CA's certificates, you must assign -// all possible permissions from the CA to the ACM service principal. +// Grants one or more permissions on a private CA to the AWS Certificate Manager +// (ACM) service principal (acm.amazonaws.com). These permissions allow ACM +// to issue and renew ACM certificates that reside in the same AWS account as +// the CA. +// +// You can list current permissions with the ListPermissions (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListPermissions.html) +// action and revoke them with the DeletePermission (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html) +// action. +// +// About Permissions +// +// * If the private CA and the certificates it issues reside in the same +// account, you can use CreatePermission to grant permissions for ACM to +// carry out automatic certificate renewals. // -// At this time, you can only assign permissions to ACM (acm.amazonaws.com). -// Permissions can be revoked with the DeletePermission action and listed with -// the ListPermissions action. +// * For automatic certificate renewal to succeed, the ACM service principal +// needs permissions to create, retrieve, and list certificates. +// +// * If the private CA and the ACM certificates reside in different accounts, +// then permissions cannot be used to enable automatic renewals. Instead, +// the ACM certificate owner must set up a resource-based policy to enable +// cross-account issuance and renewals. For more information, see Using a +// Resource Based Policy with ACM Private CA (acm-pca/latest/userguide/pca-rbp.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -276,8 +307,8 @@ func (c *ACMPCA) CreatePermissionRequest(input *CreatePermissionInput) (req *req // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -286,12 +317,11 @@ func (c *ACMPCA) CreatePermissionRequest(input *CreatePermissionInput) (req *req // The designated permission has already been given to the user. // // * LimitExceededException -// An ACM Private CA limit has been exceeded. See the exception message returned -// to determine the limit that was exceeded. +// An ACM Private CA quota has been exceeded. See the exception message returned +// to determine the quota that was exceeded. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * RequestFailedException // The request has failed for an unspecified reason. @@ -365,27 +395,31 @@ func (c *ACMPCA) DeleteCertificateAuthorityRequest(input *DeleteCertificateAutho // // Deletes a private certificate authority (CA). You must provide the Amazon // Resource Name (ARN) of the private CA that you want to delete. You can find -// the ARN by calling the ListCertificateAuthorities action. +// the ARN by calling the ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) +// action. // // Deleting a CA will invalidate other CAs and certificates below it in your // CA hierarchy. // // Before you can delete a CA that you have created and activated, you must -// disable it. To do this, call the UpdateCertificateAuthority action and set -// the CertificateAuthorityStatus parameter to DISABLED. +// disable it. To do this, call the UpdateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html) +// action and set the CertificateAuthorityStatus parameter to DISABLED. // // Additionally, you can delete a CA if you are waiting for it to be created // (that is, the status of the CA is CREATING). You can also delete it if the // CA has been created but you haven't yet imported the signed certificate into // ACM Private CA (that is, the status of the CA is PENDING_CERTIFICATE). // -// When you successfully call DeleteCertificateAuthority, the CA's status changes -// to DELETED. However, the CA won't be permanently deleted until the restoration -// period has passed. By default, if you do not set the PermanentDeletionTimeInDays -// parameter, the CA remains restorable for 30 days. You can set the parameter -// from 7 to 30 days. The DescribeCertificateAuthority action returns the time -// remaining in the restoration window of a private CA in the DELETED state. -// To restore an eligible CA, call the RestoreCertificateAuthority action. +// When you successfully call DeleteCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeleteCertificateAuthority.html), +// the CA's status changes to DELETED. However, the CA won't be permanently +// deleted until the restoration period has passed. By default, if you do not +// set the PermanentDeletionTimeInDays parameter, the CA remains restorable +// for 30 days. You can set the parameter from 7 to 30 days. The DescribeCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DescribeCertificateAuthority.html) +// action returns the time remaining in the restoration window of a private +// CA in the DELETED state. To restore an eligible CA, call the RestoreCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RestoreCertificateAuthority.html) +// action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -399,15 +433,14 @@ func (c *ACMPCA) DeleteCertificateAuthorityRequest(input *DeleteCertificateAutho // A previous update to your private CA is still ongoing. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/DeleteCertificateAuthority func (c *ACMPCA) DeleteCertificateAuthority(input *DeleteCertificateAuthorityInput) (*DeleteCertificateAuthorityOutput, error) { @@ -476,9 +509,31 @@ func (c *ACMPCA) DeletePermissionRequest(input *DeletePermissionInput) (req *req // DeletePermission API operation for AWS Certificate Manager Private Certificate Authority. // -// Revokes permissions that a private CA assigned to a designated AWS service. -// Permissions can be created with the CreatePermission action and listed with -// the ListPermissions action. +// Revokes permissions on a private CA granted to the AWS Certificate Manager +// (ACM) service principal (acm.amazonaws.com). +// +// These permissions allow ACM to issue and renew ACM certificates that reside +// in the same AWS account as the CA. If you revoke these permissions, ACM will +// no longer renew the affected certificates automatically. +// +// Permissions can be granted with the CreatePermission (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreatePermission.html) +// action and listed with the ListPermissions (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListPermissions.html) +// action. +// +// About Permissions +// +// * If the private CA and the certificates it issues reside in the same +// account, you can use CreatePermission to grant permissions for ACM to +// carry out automatic certificate renewals. +// +// * For automatic certificate renewal to succeed, the ACM service principal +// needs permissions to create, retrieve, and list certificates. +// +// * If the private CA and the ACM certificates reside in different accounts, +// then permissions cannot be used to enable automatic renewals. Instead, +// the ACM certificate owner must set up a resource-based policy to enable +// cross-account issuance and renewals. For more information, see Using a +// Resource Based Policy with ACM Private CA (acm-pca/latest/userguide/pca-rbp.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -489,15 +544,14 @@ func (c *ACMPCA) DeletePermissionRequest(input *DeletePermissionInput) (req *req // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * RequestFailedException // The request has failed for an unspecified reason. @@ -524,6 +578,135 @@ func (c *ACMPCA) DeletePermissionWithContext(ctx aws.Context, input *DeletePermi return out, req.Send() } +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePolicy for more information on using the DeletePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/DeletePolicy +func (c *ACMPCA) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + output = &DeletePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePolicy API operation for AWS Certificate Manager Private Certificate Authority. +// +// Deletes the resource-based policy attached to a private CA. Deletion will +// remove any access that the policy has granted. If there is no policy attached +// to the private CA, this action will return successful. +// +// If you delete a policy that was applied through AWS Resource Access Manager +// (RAM), the CA will be removed from all shares in which it was included. +// +// The AWS Certificate Manager Service Linked Role that the policy supports +// is not affected when you delete the policy. +// +// The current policy can be shown with GetPolicy (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetPolicy.html) +// and updated with PutPolicy (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html). +// +// About Policies +// +// * A policy grants access on a private CA to an AWS customer account, to +// AWS Organizations, or to an AWS Organizations unit. Policies are under +// the control of a CA administrator. For more information, see Using a Resource +// Based Policy with ACM Private CA (acm-pca/latest/userguide/pca-rbp.html). +// +// * A policy permits a user of AWS Certificate Manager (ACM) to issue ACM +// certificates signed by a CA in another account. +// +// * For ACM to manage automatic renewal of these certificates, the ACM user +// must configure a Service Linked Role (SLR). The SLR allows the ACM service +// to assume the identity of the user, subject to confirmation against the +// ACM Private CA policy. For more information, see Using a Service Linked +// Role with ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). +// +// * Updates made in AWS Resource Manager (RAM) are reflected in policies. +// For more information, see Using AWS Resource Access Manager (RAM) with +// ACM Private CA (acm-pca/latest/userguide/pca-ram.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Certificate Manager Private Certificate Authority's +// API operation DeletePolicy for usage and error information. +// +// Returned Error Types: +// * ConcurrentModificationException +// A previous update to your private CA is still ongoing. +// +// * InvalidArnException +// The requested Amazon Resource Name (ARN) does not refer to an existing resource. +// +// * InvalidStateException +// The state of the private CA does not allow this action to occur. +// +// * LockoutPreventedException +// The current action was prevented because it would lock the caller out from +// performing subsequent actions. Verify that the specified parameters would +// not result in the caller being denied access to the resource. +// +// * RequestFailedException +// The request has failed for an unspecified reason. +// +// * ResourceNotFoundException +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/DeletePolicy +func (c *ACMPCA) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + return out, req.Send() +} + +// DeletePolicyWithContext is the same as DeletePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ACMPCA) DeletePolicyWithContext(ctx aws.Context, input *DeletePolicyInput, opts ...request.Option) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeCertificateAuthority = "DescribeCertificateAuthority" // DescribeCertificateAuthorityRequest generates a "aws/request.Request" representing the @@ -568,9 +751,10 @@ func (c *ACMPCA) DescribeCertificateAuthorityRequest(input *DescribeCertificateA // DescribeCertificateAuthority API operation for AWS Certificate Manager Private Certificate Authority. // -// Lists information about your private certificate authority (CA). You specify -// the private CA on input by its ARN (Amazon Resource Name). The output contains -// the status of your CA. This can be any of the following: +// Lists information about your private certificate authority (CA) or one that +// has been shared with you. You specify the private CA on input by its ARN +// (Amazon Resource Name). The output contains the status of your CA. This can +// be any of the following: // // * CREATING - ACM Private CA is creating your private certificate authority. // @@ -601,8 +785,8 @@ func (c *ACMPCA) DescribeCertificateAuthorityRequest(input *DescribeCertificateA // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -674,9 +858,12 @@ func (c *ACMPCA) DescribeCertificateAuthorityAuditReportRequest(input *DescribeC // DescribeCertificateAuthorityAuditReport API operation for AWS Certificate Manager Private Certificate Authority. // // Lists information about a specific audit report created by calling the CreateCertificateAuthorityAuditReport +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html) // action. Audit information is created every time the certificate authority // (CA) private key is used. The private key is used when you call the IssueCertificate -// action or the RevokeCertificate action. +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html) +// action or the RevokeCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RevokeCertificate.html) +// action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -687,8 +874,8 @@ func (c *ACMPCA) DescribeCertificateAuthorityAuditReportRequest(input *DescribeC // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -762,12 +949,14 @@ func (c *ACMPCA) GetCertificateRequest(input *GetCertificateInput) (req *request // GetCertificate API operation for AWS Certificate Manager Private Certificate Authority. // -// Retrieves a certificate from your private CA. The ARN of the certificate -// is returned when you call the IssueCertificate action. You must specify both -// the ARN of your private CA and the ARN of the issued certificate when calling -// the GetCertificate action. You can retrieve the certificate if it is in the -// ISSUED state. You can call the CreateCertificateAuthorityAuditReport action -// to create a report that contains information about all of the certificates +// Retrieves a certificate from your private CA or one that has been shared +// with you. The ARN of the certificate is returned when you call the IssueCertificate +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html) +// action. You must specify both the ARN of your private CA and the ARN of the +// issued certificate when calling the GetCertificate action. You can retrieve +// the certificate if it is in the ISSUED state. You can call the CreateCertificateAuthorityAuditReport +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html) +// action to create a report that contains information about all of the certificates // issued and revoked by your private CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -785,15 +974,14 @@ func (c *ACMPCA) GetCertificateRequest(input *GetCertificateInput) (req *request // The request has failed for an unspecified reason. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/GetCertificate func (c *ACMPCA) GetCertificate(input *GetCertificateInput) (*GetCertificateOutput, error) { @@ -862,9 +1050,9 @@ func (c *ACMPCA) GetCertificateAuthorityCertificateRequest(input *GetCertificate // GetCertificateAuthorityCertificate API operation for AWS Certificate Manager Private Certificate Authority. // // Retrieves the certificate and certificate chain for your private certificate -// authority (CA). Both the certificate and the chain are base64 PEM-encoded. -// The chain does not include the CA certificate. Each certificate in the chain -// signs the one before it. +// authority (CA) or one that has been shared with you. Both the certificate +// and the chain are base64 PEM-encoded. The chain does not include the CA certificate. +// Each certificate in the chain signs the one before it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -875,12 +1063,11 @@ func (c *ACMPCA) GetCertificateAuthorityCertificateRequest(input *GetCertificate // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -953,10 +1140,11 @@ func (c *ACMPCA) GetCertificateAuthorityCsrRequest(input *GetCertificateAuthorit // // Retrieves the certificate signing request (CSR) for your private certificate // authority (CA). The CSR is created when you call the CreateCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action. Sign the CSR with your ACM Private CA-hosted or on-premises root // or subordinate CA. Then import the signed certificate back into ACM Private -// CA by calling the ImportCertificateAuthorityCertificate action. The CSR is -// returned as a base64 PEM-encoded string. +// CA by calling the ImportCertificateAuthorityCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ImportCertificateAuthorityCertificate.html) +// action. The CSR is returned as a base64 PEM-encoded string. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -973,15 +1161,14 @@ func (c *ACMPCA) GetCertificateAuthorityCsrRequest(input *GetCertificateAuthorit // The request has failed for an unspecified reason. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/GetCertificateAuthorityCsr func (c *ACMPCA) GetCertificateAuthorityCsr(input *GetCertificateAuthorityCsrInput) (*GetCertificateAuthorityCsrOutput, error) { @@ -1005,6 +1192,120 @@ func (c *ACMPCA) GetCertificateAuthorityCsrWithContext(ctx aws.Context, input *G return out, req.Send() } +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPolicy for more information on using the GetPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/GetPolicy +func (c *ACMPCA) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyInput{} + } + + output = &GetPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPolicy API operation for AWS Certificate Manager Private Certificate Authority. +// +// Retrieves the resource-based policy attached to a private CA. If either the +// private CA resource or the policy cannot be found, this action returns a +// ResourceNotFoundException. +// +// The policy can be attached or updated with PutPolicy (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html) +// and removed with DeletePolicy (acm-pca/latest/APIReference/API_DeletePolicy.html). +// +// About Policies +// +// * A policy grants access on a private CA to an AWS customer account, to +// AWS Organizations, or to an AWS Organizations unit. Policies are under +// the control of a CA administrator. For more information, see Using a Resource +// Based Policy with ACM Private CA (acm-pca/latest/userguide/pca-rbp.html). +// +// * A policy permits a user of AWS Certificate Manager (ACM) to issue ACM +// certificates signed by a CA in another account. +// +// * For ACM to manage automatic renewal of these certificates, the ACM user +// must configure a Service Linked Role (SLR). The SLR allows the ACM service +// to assume the identity of the user, subject to confirmation against the +// ACM Private CA policy. For more information, see Using a Service Linked +// Role with ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). +// +// * Updates made in AWS Resource Manager (RAM) are reflected in policies. +// For more information, see Using AWS Resource Access Manager (RAM) with +// ACM Private CA (acm-pca/latest/userguide/pca-ram.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Certificate Manager Private Certificate Authority's +// API operation GetPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidArnException +// The requested Amazon Resource Name (ARN) does not refer to an existing resource. +// +// * InvalidStateException +// The state of the private CA does not allow this action to occur. +// +// * RequestFailedException +// The request has failed for an unspecified reason. +// +// * ResourceNotFoundException +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/GetPolicy +func (c *ACMPCA) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + return out, req.Send() +} + +// GetPolicyWithContext is the same as GetPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ACMPCA) GetPolicyWithContext(ctx aws.Context, input *GetPolicyInput, opts ...request.Option) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opImportCertificateAuthorityCertificate = "ImportCertificateAuthorityCertificate" // ImportCertificateAuthorityCertificateRequest generates a "aws/request.Request" representing the @@ -1055,14 +1356,15 @@ func (c *ACMPCA) ImportCertificateAuthorityCertificateRequest(input *ImportCerti // ACM Private CA. Before you can call this action, the following preparations // must in place: // -// In ACM Private CA, call the CreateCertificateAuthority action to create the -// private CA that that you plan to back with the imported certificate. +// In ACM Private CA, call the CreateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) +// action to create the private CA that that you plan to back with the imported +// certificate. // -// Call the GetCertificateAuthorityCsr action to generate a certificate signing -// request (CSR). +// Call the GetCertificateAuthorityCsr (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificateAuthorityCsr.html) +// action to generate a certificate signing request (CSR). // -// Sign the CSR using a root or intermediate CA hosted either by an on-premises -// PKI hierarchy or a commercial CA.. +// Sign the CSR using a root or intermediate CA hosted by either an on-premises +// PKI hierarchy or by a commercial CA. // // Create a certificate chain and copy the signed certificate and the certificate // chain to your working directory. @@ -1085,6 +1387,54 @@ func (c *ACMPCA) ImportCertificateAuthorityCertificateRequest(input *ImportCerti // // * The chain must be PEM-encoded. // +// * The maximum allowed size of a certificate is 32 KB. +// +// * The maximum allowed size of a certificate chain is 2 MB. +// +// Enforcement of Critical Constraints +// +// ACM Private CA allows the following extensions to be marked critical in the +// imported CA certificate or chain. +// +// * Basic constraints (must be marked critical) +// +// * Subject alternative names +// +// * Key usage +// +// * Extended key usage +// +// * Authority key identifier +// +// * Subject key identifier +// +// * Issuer alternative name +// +// * Subject directory attributes +// +// * Subject information access +// +// * Certificate policies +// +// * Policy mappings +// +// * Inhibit anyPolicy +// +// ACM Private CA rejects the following extensions when they are marked critical +// in an imported CA certificate or chain. +// +// * Name constraints +// +// * Policy constraints +// +// * CRL distribution points +// +// * Authority information access +// +// * Freshest CRL +// +// * Any other extension +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1103,8 +1453,8 @@ func (c *ACMPCA) ImportCertificateAuthorityCertificateRequest(input *ImportCerti // The request has failed for an unspecified reason. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -1113,8 +1463,7 @@ func (c *ACMPCA) ImportCertificateAuthorityCertificateRequest(input *ImportCerti // The request action cannot be performed or is prohibited. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * MalformedCertificateException // One or more fields in the certificate are invalid. @@ -1189,10 +1538,11 @@ func (c *ACMPCA) IssueCertificateRequest(input *IssueCertificateInput) (req *req // IssueCertificate API operation for AWS Certificate Manager Private Certificate Authority. // -// Uses your private certificate authority (CA) to issue a client certificate. -// This action returns the Amazon Resource Name (ARN) of the certificate. You -// can retrieve the certificate by calling the GetCertificate action and specifying -// the ARN. +// Uses your private certificate authority (CA), or one that has been shared +// with you, to issue a client certificate. This action returns the Amazon Resource +// Name (ARN) of the certificate. You can retrieve the certificate by calling +// the GetCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificate.html) +// action and specifying the ARN. // // You cannot use the ACM ListCertificateAuthorities action to retrieve the // ARNs of the certificates that you issue by using ACM Private CA. @@ -1206,16 +1556,15 @@ func (c *ACMPCA) IssueCertificateRequest(input *IssueCertificateInput) (req *req // // Returned Error Types: // * LimitExceededException -// An ACM Private CA limit has been exceeded. See the exception message returned -// to determine the limit that was exceeded. +// An ACM Private CA quota has been exceeded. See the exception message returned +// to determine the quota that was exceeded. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -1299,6 +1648,7 @@ func (c *ACMPCA) ListCertificateAuthoritiesRequest(input *ListCertificateAuthori // ListCertificateAuthorities API operation for AWS Certificate Manager Private Certificate Authority. // // Lists the private certificate authorities that you created by using the CreateCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1311,7 +1661,7 @@ func (c *ACMPCA) ListCertificateAuthoritiesRequest(input *ListCertificateAuthori // Returned Error Types: // * InvalidNextTokenException // The token specified in the NextToken argument is not valid. Use the token -// returned from your previous call to ListCertificateAuthorities. +// returned from your previous call to ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html). // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/ListCertificateAuthorities func (c *ACMPCA) ListCertificateAuthorities(input *ListCertificateAuthoritiesInput) (*ListCertificateAuthoritiesOutput, error) { @@ -1437,9 +1787,30 @@ func (c *ACMPCA) ListPermissionsRequest(input *ListPermissionsInput) (req *reque // ListPermissions API operation for AWS Certificate Manager Private Certificate Authority. // -// Lists all the permissions, if any, that have been assigned by a private CA. -// Permissions can be granted with the CreatePermission action and revoked with -// the DeletePermission action. +// List all permissions on a private CA, if any, granted to the AWS Certificate +// Manager (ACM) service principal (acm.amazonaws.com). +// +// These permissions allow ACM to issue and renew ACM certificates that reside +// in the same AWS account as the CA. +// +// Permissions can be granted with the CreatePermission (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreatePermission.html) +// action and revoked with the DeletePermission (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html) +// action. +// +// About Permissions +// +// * If the private CA and the certificates it issues reside in the same +// account, you can use CreatePermission to grant permissions for ACM to +// carry out automatic certificate renewals. +// +// * For automatic certificate renewal to succeed, the ACM service principal +// needs permissions to create, retrieve, and list certificates. +// +// * If the private CA and the ACM certificates reside in different accounts, +// then permissions cannot be used to enable automatic renewals. Instead, +// the ACM certificate owner must set up a resource-based policy to enable +// cross-account issuance and renewals. For more information, see Using a +// Resource Based Policy with ACM Private CA (acm-pca/latest/userguide/pca-rbp.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1450,19 +1821,18 @@ func (c *ACMPCA) ListPermissionsRequest(input *ListPermissionsInput) (req *reque // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidNextTokenException // The token specified in the NextToken argument is not valid. Use the token -// returned from your previous call to ListCertificateAuthorities. +// returned from your previous call to ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html). // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * RequestFailedException // The request has failed for an unspecified reason. @@ -1591,11 +1961,13 @@ func (c *ACMPCA) ListTagsRequest(input *ListTagsInput) (req *request.Request, ou // ListTags API operation for AWS Certificate Manager Private Certificate Authority. // -// Lists the tags, if any, that are associated with your private CA. Tags are -// labels that you can use to identify and organize your CAs. Each tag consists -// of a key and an optional value. Call the TagCertificateAuthority action to -// add one or more tags to your CA. Call the UntagCertificateAuthority action -// to remove tags. +// Lists the tags, if any, that are associated with your private CA or one that +// has been shared with you. Tags are labels that you can use to identify and +// organize your CAs. Each tag consists of a key and an optional value. Call +// the TagCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_TagCertificateAuthority.html) +// action to add one or more tags to your CA. Call the UntagCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UntagCertificateAuthority.html) +// action to remove tags. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1606,15 +1978,14 @@ func (c *ACMPCA) ListTagsRequest(input *ListTagsInput) (req *request.Request, ou // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/ListTags func (c *ACMPCA) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { @@ -1690,6 +2061,135 @@ func (c *ACMPCA) ListTagsPagesWithContext(ctx aws.Context, input *ListTagsInput, return p.Err() } +const opPutPolicy = "PutPolicy" + +// PutPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPolicy for more information on using the PutPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPolicyRequest method. +// req, resp := client.PutPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/PutPolicy +func (c *ACMPCA) PutPolicyRequest(input *PutPolicyInput) (req *request.Request, output *PutPolicyOutput) { + op := &request.Operation{ + Name: opPutPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutPolicyInput{} + } + + output = &PutPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutPolicy API operation for AWS Certificate Manager Private Certificate Authority. +// +// Attaches a resource-based policy to a private CA. +// +// A policy can also be applied by sharing (https://docs.aws.amazon.com/acm-pca/latest/userguide/pca-ram.html) +// a private CA through AWS Resource Access Manager (RAM). +// +// The policy can be displayed with GetPolicy (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetPolicy.html) +// and removed with DeletePolicy (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePolicy.html). +// +// About Policies +// +// * A policy grants access on a private CA to an AWS customer account, to +// AWS Organizations, or to an AWS Organizations unit. Policies are under +// the control of a CA administrator. For more information, see Using a Resource +// Based Policy with ACM Private CA (acm-pca/latest/userguide/pca-rbp.html). +// +// * A policy permits a user of AWS Certificate Manager (ACM) to issue ACM +// certificates signed by a CA in another account. +// +// * For ACM to manage automatic renewal of these certificates, the ACM user +// must configure a Service Linked Role (SLR). The SLR allows the ACM service +// to assume the identity of the user, subject to confirmation against the +// ACM Private CA policy. For more information, see Using a Service Linked +// Role with ACM (https://docs.aws.amazon.com/acm/latest/userguide/acm-slr.html). +// +// * Updates made in AWS Resource Manager (RAM) are reflected in policies. +// For more information, see Using AWS Resource Access Manager (RAM) with +// ACM Private CA (acm-pca/latest/userguide/pca-ram.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Certificate Manager Private Certificate Authority's +// API operation PutPolicy for usage and error information. +// +// Returned Error Types: +// * ConcurrentModificationException +// A previous update to your private CA is still ongoing. +// +// * InvalidArnException +// The requested Amazon Resource Name (ARN) does not refer to an existing resource. +// +// * InvalidStateException +// The state of the private CA does not allow this action to occur. +// +// * InvalidPolicyException +// The resource policy is invalid or is missing a required statement. For general +// information about IAM policy and statement structure, see Overview of JSON +// Policies (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policies-json). +// +// * LockoutPreventedException +// The current action was prevented because it would lock the caller out from +// performing subsequent actions. Verify that the specified parameters would +// not result in the caller being denied access to the resource. +// +// * RequestFailedException +// The request has failed for an unspecified reason. +// +// * ResourceNotFoundException +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/PutPolicy +func (c *ACMPCA) PutPolicy(input *PutPolicyInput) (*PutPolicyOutput, error) { + req, out := c.PutPolicyRequest(input) + return out, req.Send() +} + +// PutPolicyWithContext is the same as PutPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ACMPCA) PutPolicyWithContext(ctx aws.Context, input *PutPolicyInput, opts ...request.Option) (*PutPolicyOutput, error) { + req, out := c.PutPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRestoreCertificateAuthority = "RestoreCertificateAuthority" // RestoreCertificateAuthorityRequest generates a "aws/request.Request" representing the @@ -1737,15 +2237,18 @@ func (c *ACMPCA) RestoreCertificateAuthorityRequest(input *RestoreCertificateAut // // Restores a certificate authority (CA) that is in the DELETED state. You can // restore a CA during the period that you defined in the PermanentDeletionTimeInDays -// parameter of the DeleteCertificateAuthority action. Currently, you can specify -// 7 to 30 days. If you did not specify a PermanentDeletionTimeInDays value, -// by default you can restore the CA at any time in a 30 day period. You can -// check the time remaining in the restoration period of a private CA in the -// DELETED state by calling the DescribeCertificateAuthority or ListCertificateAuthorities +// parameter of the DeleteCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeleteCertificateAuthority.html) +// action. Currently, you can specify 7 to 30 days. If you did not specify a +// PermanentDeletionTimeInDays value, by default you can restore the CA at any +// time in a 30 day period. You can check the time remaining in the restoration +// period of a private CA in the DELETED state by calling the DescribeCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DescribeCertificateAuthority.html) +// or ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) // actions. The status of a restored CA is set to its pre-deletion status when // the RestoreCertificateAuthority action returns. To change its status to ACTIVE, -// call the UpdateCertificateAuthority action. If the private CA was in the -// PENDING_CERTIFICATE state at deletion, you must use the ImportCertificateAuthorityCertificate +// call the UpdateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html) +// action. If the private CA was in the PENDING_CERTIFICATE state at deletion, +// you must use the ImportCertificateAuthorityCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ImportCertificateAuthorityCertificate.html) // action to import a certificate authority into the private CA before it can // be activated. You cannot restore a CA after the restoration period has ended. // @@ -1758,12 +2261,11 @@ func (c *ACMPCA) RestoreCertificateAuthorityRequest(input *RestoreCertificateAut // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. @@ -1838,10 +2340,20 @@ func (c *ACMPCA) RevokeCertificateRequest(input *RevokeCertificateInput) (req *r // Revokes a certificate that was issued inside ACM Private CA. If you enable // a certificate revocation list (CRL) when you create or update your private // CA, information about the revoked certificates will be included in the CRL. -// ACM Private CA writes the CRL to an S3 bucket that you specify. For more -// information about revocation, see the CrlConfiguration structure. ACM Private -// CA also writes revocation information to the audit report. For more information, -// see CreateCertificateAuthorityAuditReport. +// ACM Private CA writes the CRL to an S3 bucket that you specify. A CRL is +// typically updated approximately 30 minutes after a certificate is revoked. +// If for any reason the CRL update fails, ACM Private CA attempts makes further +// attempts every 15 minutes. With Amazon CloudWatch, you can create alarms +// for the metrics CRLGenerated and MisconfiguredCRLBucket. For more information, +// see Supported CloudWatch Metrics (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaCloudWatch.html). +// +// Both PCA and the IAM principal must have permission to write to the S3 bucket +// that you specify. If the IAM principal making the call does not have permission +// to write to the bucket, then an exception is thrown. For more information, +// see Configure Access to ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html). +// +// ACM Private CA also writes revocation information to the audit report. For +// more information, see CreateCertificateAuthorityAuditReport (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html). // // You cannot revoke a root CA self-signed certificate. // @@ -1863,16 +2375,15 @@ func (c *ACMPCA) RevokeCertificateRequest(input *RevokeCertificateInput) (req *r // The request action cannot be performed or is prohibited. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * LimitExceededException -// An ACM Private CA limit has been exceeded. See the exception message returned -// to determine the limit that was exceeded. +// An ACM Private CA quota has been exceeded. See the exception message returned +// to determine the quota that was exceeded. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * RequestAlreadyProcessedException // Your request has already been completed. @@ -1957,8 +2468,9 @@ func (c *ACMPCA) TagCertificateAuthorityRequest(input *TagCertificateAuthorityIn // a tag to just one private CA if you want to identify a specific characteristic // of that CA, or you can apply the same tag to multiple private CAs if you // want to filter for a common relationship among those CAs. To remove one or -// more tags, use the UntagCertificateAuthority action. Call the ListTags action -// to see what tags are associated with your CA. +// more tags, use the UntagCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UntagCertificateAuthority.html) +// action. Call the ListTags (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListTags.html) +// action to see what tags are associated with your CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1969,15 +2481,14 @@ func (c *ACMPCA) TagCertificateAuthorityRequest(input *TagCertificateAuthorityIn // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * InvalidTagException // The tag associated with the CA is not valid. The invalid argument is contained @@ -2058,7 +2569,8 @@ func (c *ACMPCA) UntagCertificateAuthorityRequest(input *UntagCertificateAuthori // pair. If you do not specify the value portion of the tag when calling this // action, the tag will be removed regardless of value. If you specify a value, // the tag is removed only if it is associated with the specified value. To -// add tags to a private CA, use the TagCertificateAuthority. Call the ListTags +// add tags to a private CA, use the TagCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_TagCertificateAuthority.html). +// Call the ListTags (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListTags.html) // action to see what tags are associated with your CA. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2070,15 +2582,14 @@ func (c *ACMPCA) UntagCertificateAuthorityRequest(input *UntagCertificateAuthori // // Returned Error Types: // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArnException // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * InvalidTagException // The tag associated with the CA is not valid. The invalid argument is contained @@ -2156,6 +2667,11 @@ func (c *ACMPCA) UpdateCertificateAuthorityRequest(input *UpdateCertificateAutho // it. You can disable a private CA that is in the ACTIVE state or make a CA // that is in the DISABLED state active again. // +// Both PCA and the IAM principal must have permission to write to the S3 bucket +// that you specify. If the IAM principal making the call does not have permission +// to write to the bucket, then an exception is thrown. For more information, +// see Configure Access to ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaAuthAccess.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2168,8 +2684,8 @@ func (c *ACMPCA) UpdateCertificateAuthorityRequest(input *UpdateCertificateAutho // A previous update to your private CA is still ongoing. // // * ResourceNotFoundException -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. // // * InvalidArgsException // One or more of the specified arguments was not valid. @@ -2178,12 +2694,12 @@ func (c *ACMPCA) UpdateCertificateAuthorityRequest(input *UpdateCertificateAutho // The requested Amazon Resource Name (ARN) does not refer to an existing resource. // // * InvalidStateException -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. // // * InvalidPolicyException -// The S3 bucket policy is not valid. The policy must give ACM Private CA rights -// to read from and write to the bucket and find the bucket location. +// The resource policy is invalid or is missing a required statement. For general +// information about IAM policy and statement structure, see Overview of JSON +// Policies (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policies-json). // // See also, https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22/UpdateCertificateAuthority func (c *ACMPCA) UpdateCertificateAuthority(input *UpdateCertificateAuthorityInput) (*UpdateCertificateAuthorityOutput, error) { @@ -2224,7 +2740,7 @@ type ASN1Subject struct { // Two-digit code that specifies the country in which the certificate subject // located. - Country *string `type:"string"` + Country *string `min:"2" type:"string"` // Disambiguating information for the certificate subject. DistinguishedNameQualifier *string `type:"string"` @@ -2282,6 +2798,19 @@ func (s ASN1Subject) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ASN1Subject) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ASN1Subject"} + if s.Country != nil && len(*s.Country) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Country", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetCommonName sets the CommonName field's value. func (s *ASN1Subject) SetCommonName(v string) *ASN1Subject { s.CommonName = &v @@ -2370,11 +2899,13 @@ func (s *ASN1Subject) SetTitle(v string) *ASN1Subject { // private CA can issue and revoke X.509 digital certificates. Digital certificates // verify that the entity named in the certificate Subject field owns or controls // the public key contained in the Subject Public Key Info field. Call the CreateCertificateAuthority +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action to create your private CA. You must then call the GetCertificateAuthorityCertificate +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificateAuthorityCertificate.html) // action to retrieve a private CA certificate signing request (CSR). Sign the // CSR with your ACM Private CA-hosted or on-premises root or subordinate CA -// certificate. Call the ImportCertificateAuthorityCertificate action to import -// the signed certificate into AWS Certificate Manager (ACM). +// certificate. Call the ImportCertificateAuthorityCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ImportCertificateAuthorityCertificate.html) +// action to import the signed certificate into AWS Certificate Manager (ACM). type CertificateAuthority struct { _ struct{} `type:"structure"` @@ -2400,8 +2931,12 @@ type CertificateAuthority struct { // Date and time before which your private CA certificate is not valid. NotBefore *time.Time `type:"timestamp"` + // The AWS account ID that owns the certificate authority. + OwnerAccount *string `min:"12" type:"string"` + // The period during which a deleted CA can be restored. For more information, // see the PermanentDeletionTimeInDays parameter of the DeleteCertificateAuthorityRequest + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeleteCertificateAuthorityRequest.html) // action. RestorableUntil *time.Time `type:"timestamp"` @@ -2471,6 +3006,12 @@ func (s *CertificateAuthority) SetNotBefore(v time.Time) *CertificateAuthority { return s } +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *CertificateAuthority) SetOwnerAccount(v string) *CertificateAuthority { + s.OwnerAccount = &v + return s +} + // SetRestorableUntil sets the RestorableUntil field's value. func (s *CertificateAuthority) SetRestorableUntil(v time.Time) *CertificateAuthority { s.RestorableUntil = &v @@ -2506,7 +3047,8 @@ func (s *CertificateAuthority) SetType(v string) *CertificateAuthority { // the key pair that your private CA creates when it issues a certificate. It // also includes the signature algorithm that it uses when issuing certificates, // and its X.500 distinguished name. You must specify this information when -// you call the CreateCertificateAuthority action. +// you call the CreateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) +// action. type CertificateAuthorityConfiguration struct { _ struct{} `type:"structure"` @@ -2519,6 +3061,9 @@ type CertificateAuthorityConfiguration struct { // Name of the algorithm your private CA uses to sign certificate requests. // + // This parameter should not be confused with the SigningAlgorithm parameter + // used to sign certificates when they are issued. + // // SigningAlgorithm is a required field SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithm"` @@ -2551,6 +3096,11 @@ func (s *CertificateAuthorityConfiguration) Validate() error { if s.Subject == nil { invalidParams.Add(request.NewErrParamRequired("Subject")) } + if s.Subject != nil { + if err := s.Subject.Validate(); err != nil { + invalidParams.AddNested("Subject", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2579,8 +3129,8 @@ func (s *CertificateAuthorityConfiguration) SetSubject(v *ASN1Subject) *Certific // The certificate authority certificate you are importing does not comply with // conditions specified in the certificate that signed it. type CertificateMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2597,17 +3147,17 @@ func (s CertificateMismatchException) GoString() string { func newErrorCertificateMismatchException(v protocol.ResponseMetadata) error { return &CertificateMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateMismatchException) Code() string { +func (s *CertificateMismatchException) Code() string { return "CertificateMismatchException" } // Message returns the exception's message. -func (s CertificateMismatchException) Message() string { +func (s *CertificateMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2615,28 +3165,28 @@ func (s CertificateMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateMismatchException) OrigErr() error { +func (s *CertificateMismatchException) OrigErr() error { return nil } -func (s CertificateMismatchException) Error() string { +func (s *CertificateMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // A previous update to your private CA is still ongoing. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2653,17 +3203,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2671,22 +3221,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } type CreateCertificateAuthorityAuditReportInput struct { @@ -2707,7 +3257,7 @@ type CreateCertificateAuthorityAuditReportInput struct { // The name of the S3 bucket that will contain the audit report. // // S3BucketName is a required field - S3BucketName *string `type:"string" required:"true"` + S3BucketName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2735,6 +3285,9 @@ func (s *CreateCertificateAuthorityAuditReportInput) Validate() error { if s.S3BucketName == nil { invalidParams.Add(request.NewErrParamRequired("S3BucketName")) } + if s.S3BucketName != nil && len(*s.S3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3BucketName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2807,25 +3360,24 @@ type CreateCertificateAuthorityInput struct { CertificateAuthorityType *string `type:"string" required:"true" enum:"CertificateAuthorityType"` // Alphanumeric string that can be used to distinguish between calls to CreateCertificateAuthority. - // Idempotency tokens time out after five minutes. Therefore, if you call CreateCertificateAuthority - // multiple times with the same idempotency token within a five minute period, - // ACM Private CA recognizes that you are requesting only one certificate. As - // a result, ACM Private CA issues only one. If you change the idempotency token - // for each call, however, ACM Private CA recognizes that you are requesting - // multiple certificates. + // For a given token, ACM Private CA creates exactly one CA. If you issue a + // subsequent call using the same token, ACM Private CA returns the ARN of the + // existing CA and takes no further action. If you change the idempotency token + // across multiple calls, ACM Private CA creates a unique CA for each unique + // token. IdempotencyToken *string `min:"1" type:"string"` // Contains a Boolean value that you can use to enable a certification revocation // list (CRL) for the CA, the name of the S3 bucket to which ACM Private CA // will write the CRL, and an optional CNAME alias that you can use to hide // the name of your bucket in the CRL Distribution Points extension of your - // CA certificate. For more information, see the CrlConfiguration structure. + // CA certificate. For more information, see the CrlConfiguration (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CrlConfiguration.html) + // structure. RevocationConfiguration *RevocationConfiguration `type:"structure"` // Key-value pairs that will be attached to the new private CA. You can associate - // up to 50 tags with a private CA. For information using tags with - // - // IAM to manage permissions, see Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). + // up to 50 tags with a private CA. For information using tags with IAM to manage + // permissions, see Controlling Access Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_iam-tags.html). Tags []*Tag `min:"1" type:"list"` } @@ -2947,8 +3499,8 @@ type CreatePermissionInput struct { Actions []*string `min:"1" type:"list" required:"true"` // The Amazon Resource Name (ARN) of the CA that grants the permissions. You - // can find the ARN by calling the ListCertificateAuthorities action. This must - // have the following form: + // can find the ARN by calling the ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) + // action. This must have the following form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // @@ -3051,6 +3603,9 @@ func (s CreatePermissionOutput) GoString() string { // Points extension of each certificate it issues. Your S3 bucket policy must // give write permission to ACM Private CA. // +// ACM Private CAA assets that are stored in Amazon S3 can be protected with +// encryption. For more information, see Encrypting Your CRLs (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaCreateCa.html#crl-encryption). +// // Your private CA uses the value in the ExpirationInDays parameter to calculate // the nextUpdate field in the CRL. The CRL is refreshed at 1/2 the age of next // update or when a certificate is revoked. When a certificate is revoked, it @@ -3101,8 +3656,10 @@ type CrlConfiguration struct { // Boolean value that specifies whether certificate revocation lists (CRLs) // are enabled. You can use this value to enable certificate revocation for - // a new CA when you call the CreateCertificateAuthority action or for an existing - // CA when you call the UpdateCertificateAuthority action. + // a new CA when you call the CreateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) + // action or for an existing CA when you call the UpdateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html) + // action. // // Enabled is a required field Enabled *bool `type:"boolean" required:"true"` @@ -3113,9 +3670,9 @@ type CrlConfiguration struct { // Name of the S3 bucket that contains the CRL. If you do not provide a value // for the CustomCname argument, the name of your S3 bucket is placed into the // CRL Distribution Points extension of the issued certificate. You can change - // the name of your bucket by calling the UpdateCertificateAuthority action. - // You must specify a bucket policy that allows ACM Private CA to write the - // CRL to your bucket. + // the name of your bucket by calling the UpdateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html) + // action. You must specify a bucket policy that allows ACM Private CA to write + // the CRL to your bucket. S3BucketName *string `min:"3" type:"string"` } @@ -3175,7 +3732,8 @@ func (s *CrlConfiguration) SetS3BucketName(v string) *CrlConfiguration { type DeleteCertificateAuthorityInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must have the following form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . @@ -3247,8 +3805,8 @@ type DeletePermissionInput struct { _ struct{} `type:"structure"` // The Amazon Resource Number (ARN) of the private CA that issued the permissions. - // You can find the CA's ARN by calling the ListCertificateAuthorities action. - // This must have the following form: + // You can find the CA's ARN by calling the ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) + // action. This must have the following form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . // @@ -3329,10 +3887,69 @@ func (s DeletePermissionOutput) GoString() string { return s.String() } +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number (ARN) of the private CA that will have its policy + // deleted. You can find the CA's ARN by calling the ListCertificateAuthorities + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) + // action. The ARN value must have the form arn:aws:acm-pca:region:account:certificate-authority/01234567-89ab-cdef-0123-0123456789ab. + // + // ResourceArn is a required field + ResourceArn *string `min:"5" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DeletePolicyInput) SetResourceArn(v string) *DeletePolicyInput { + s.ResourceArn = &v + return s +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + type DescribeCertificateAuthorityAuditReportInput struct { _ struct{} `type:"structure"` // The report ID returned by calling the CreateCertificateAuthorityAuditReport + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthorityAuditReport.html) // action. // // AuditReportId is a required field @@ -3400,7 +4017,7 @@ type DescribeCertificateAuthorityAuditReportOutput struct { CreatedAt *time.Time `type:"timestamp"` // Name of the S3 bucket that contains the report. - S3BucketName *string `type:"string"` + S3BucketName *string `min:"3" type:"string"` // S3 key that uniquely identifies the report file in your S3 bucket. S3Key *string `type:"string"` @@ -3443,7 +4060,8 @@ func (s *DescribeCertificateAuthorityAuditReportOutput) SetS3Key(v string) *Desc type DescribeCertificateAuthorityInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . @@ -3487,8 +4105,8 @@ func (s *DescribeCertificateAuthorityInput) SetCertificateAuthorityArn(v string) type DescribeCertificateAuthorityOutput struct { _ struct{} `type:"structure"` - // A CertificateAuthority structure that contains information about your private - // CA. + // A CertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CertificateAuthority.html) + // structure that contains information about your private CA. CertificateAuthority *CertificateAuthority `type:"structure"` } @@ -3590,6 +4208,7 @@ type GetCertificateAuthorityCsrInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action. This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -3665,7 +4284,8 @@ type GetCertificateInput struct { // CertificateArn is a required field CertificateArn *string `min:"5" type:"string" required:"true"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 . @@ -3751,6 +4371,72 @@ func (s *GetCertificateOutput) SetCertificateChain(v string) *GetCertificateOutp return s } +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number (ARN) of the private CA that will have its policy + // retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities + // action. + // + // ResourceArn is a required field + ResourceArn *string `min:"5" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetPolicyInput) SetResourceArn(v string) *GetPolicyInput { + s.ResourceArn = &v + return s +} + +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy attached to the private CA as a JSON document. + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetPolicyOutput) SetPolicy(v string) *GetPolicyOutput { + s.Policy = &v + return s +} + type ImportCertificateAuthorityCertificateInput struct { _ struct{} `type:"structure"` @@ -3762,7 +4448,8 @@ type ImportCertificateAuthorityCertificateInput struct { // Certificate is a required field Certificate []byte `min:"1" type:"blob" required:"true"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -3848,8 +4535,8 @@ func (s ImportCertificateAuthorityCertificateOutput) GoString() string { // One or more of the specified arguments was not valid. type InvalidArgsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3866,17 +4553,17 @@ func (s InvalidArgsException) GoString() string { func newErrorInvalidArgsException(v protocol.ResponseMetadata) error { return &InvalidArgsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgsException) Code() string { +func (s *InvalidArgsException) Code() string { return "InvalidArgsException" } // Message returns the exception's message. -func (s InvalidArgsException) Message() string { +func (s *InvalidArgsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3884,28 +4571,28 @@ func (s InvalidArgsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgsException) OrigErr() error { +func (s *InvalidArgsException) OrigErr() error { return nil } -func (s InvalidArgsException) Error() string { +func (s *InvalidArgsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgsException) RequestID() string { + return s.RespMetadata.RequestID } // The requested Amazon Resource Name (ARN) does not refer to an existing resource. type InvalidArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3922,17 +4609,17 @@ func (s InvalidArnException) GoString() string { func newErrorInvalidArnException(v protocol.ResponseMetadata) error { return &InvalidArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArnException) Code() string { +func (s *InvalidArnException) Code() string { return "InvalidArnException" } // Message returns the exception's message. -func (s InvalidArnException) Message() string { +func (s *InvalidArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3940,29 +4627,29 @@ func (s InvalidArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArnException) OrigErr() error { +func (s *InvalidArnException) OrigErr() error { return nil } -func (s InvalidArnException) Error() string { +func (s *InvalidArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArnException) RequestID() string { + return s.RespMetadata.RequestID } // The token specified in the NextToken argument is not valid. Use the token -// returned from your previous call to ListCertificateAuthorities. +// returned from your previous call to ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html). type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3979,17 +4666,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3997,29 +4684,30 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } -// The S3 bucket policy is not valid. The policy must give ACM Private CA rights -// to read from and write to the bucket and find the bucket location. +// The resource policy is invalid or is missing a required statement. For general +// information about IAM policy and statement structure, see Overview of JSON +// Policies (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policies-json). type InvalidPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4036,17 +4724,17 @@ func (s InvalidPolicyException) GoString() string { func newErrorInvalidPolicyException(v protocol.ResponseMetadata) error { return &InvalidPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPolicyException) Code() string { +func (s *InvalidPolicyException) Code() string { return "InvalidPolicyException" } // Message returns the exception's message. -func (s InvalidPolicyException) Message() string { +func (s *InvalidPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4054,28 +4742,28 @@ func (s InvalidPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPolicyException) OrigErr() error { +func (s *InvalidPolicyException) OrigErr() error { return nil } -func (s InvalidPolicyException) Error() string { +func (s *InvalidPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // The request action cannot be performed or is prohibited. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4092,17 +4780,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4110,29 +4798,28 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } -// The private CA is in a state during which a report or certificate cannot -// be generated. +// The state of the private CA does not allow this action to occur. type InvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4149,17 +4836,17 @@ func (s InvalidStateException) GoString() string { func newErrorInvalidStateException(v protocol.ResponseMetadata) error { return &InvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateException) Code() string { +func (s *InvalidStateException) Code() string { return "InvalidStateException" } // Message returns the exception's message. -func (s InvalidStateException) Message() string { +func (s *InvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4167,29 +4854,29 @@ func (s InvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateException) OrigErr() error { +func (s *InvalidStateException) OrigErr() error { return nil } -func (s InvalidStateException) Error() string { +func (s *InvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // The tag associated with the CA is not valid. The invalid argument is contained // in the message field. type InvalidTagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4206,17 +4893,17 @@ func (s InvalidTagException) GoString() string { func newErrorInvalidTagException(v protocol.ResponseMetadata) error { return &InvalidTagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagException) Code() string { +func (s *InvalidTagException) Code() string { return "InvalidTagException" } // Message returns the exception's message. -func (s InvalidTagException) Message() string { +func (s *InvalidTagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4224,28 +4911,29 @@ func (s InvalidTagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagException) OrigErr() error { +func (s *InvalidTagException) OrigErr() error { return nil } -func (s InvalidTagException) Error() string { +func (s *InvalidTagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagException) RequestID() string { + return s.RespMetadata.RequestID } type IssueCertificateInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -4267,6 +4955,9 @@ type IssueCertificateInput struct { // openssl req -new -config openssl_rsa.cnf -extensions usr_cert -newkey rsa:2048 // -days -365 -keyout private/test_cert_priv_key.pem -out csr/test_cert_.csr // + // Note: A CSR must provide either a subject name or a subject alternative name + // or the request will be rejected. + // // Csr is automatically base64 encoded/decoded by the SDK. // // Csr is a required field @@ -4283,18 +4974,46 @@ type IssueCertificateInput struct { // The name of the algorithm that will be used to sign the certificate to be // issued. // + // This parameter should not be confused with the SigningAlgorithm parameter + // used to sign a CSR. + // // SigningAlgorithm is a required field SigningAlgorithm *string `type:"string" required:"true" enum:"SigningAlgorithm"` // Specifies a custom configuration template to use when issuing a certificate. // If this parameter is not provided, ACM Private CA defaults to the EndEntityCertificate/V1 - // template. + // template. For CA certificates, you should choose the shortest path length + // that meets your needs. The path length is indicated by the PathLenN portion + // of the ARN, where N is the CA depth (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaTerms.html#terms-cadepth). + // + // Note: The CA depth configured on a subordinate CA certificate must not exceed + // the limit set by its parents in the CA hierarchy. // // The following service-owned TemplateArn values are supported by ACM Private // CA: // + // * arn:aws:acm-pca:::template/CodeSigningCertificate/V1 + // + // * arn:aws:acm-pca:::template/CodeSigningCertificate_CSRPassthrough/V1 + // // * arn:aws:acm-pca:::template/EndEntityCertificate/V1 // + // * arn:aws:acm-pca:::template/EndEntityCertificate_CSRPassthrough/V1 + // + // * arn:aws:acm-pca:::template/EndEntityClientAuthCertificate/V1 + // + // * arn:aws:acm-pca:::template/EndEntityClientAuthCertificate_CSRPassthrough/V1 + // + // * arn:aws:acm-pca:::template/EndEntityServerAuthCertificate/V1 + // + // * arn:aws:acm-pca:::template/EndEntityServerAuthCertificate_CSRPassthrough/V1 + // + // * arn:aws:acm-pca:::template/OCSPSigningCertificate/V1 + // + // * arn:aws:acm-pca:::template/OCSPSigningCertificate_CSRPassthrough/V1 + // + // * arn:aws:acm-pca:::template/RootCACertificate/V1 + // // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen0/V1 // // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen1/V1 @@ -4303,12 +5022,17 @@ type IssueCertificateInput struct { // // * arn:aws:acm-pca:::template/SubordinateCACertificate_PathLen3/V1 // - // * arn:aws:acm-pca:::template/RootCACertificate/V1 - // // For more information, see Using Templates (https://docs.aws.amazon.com/acm-pca/latest/userguide/UsingTemplates.html). TemplateArn *string `min:"5" type:"string"` - // The type of the validity period. + // Information describing the validity period of the certificate. + // + // When issuing a certificate, ACM Private CA sets the "Not Before" date in + // the validity field to date and time minus 60 minutes. This is intended to + // compensate for time inconsistencies across systems of 60 minutes or less. + // + // The validity period configured on a certificate must not exceed the limit + // set by its parents in the CA hierarchy. // // Validity is a required field Validity *Validity `type:"structure" required:"true"` @@ -4425,11 +5149,11 @@ func (s *IssueCertificateOutput) SetCertificateArn(v string) *IssueCertificateOu return s } -// An ACM Private CA limit has been exceeded. See the exception message returned -// to determine the limit that was exceeded. +// An ACM Private CA quota has been exceeded. See the exception message returned +// to determine the quota that was exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4446,17 +5170,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4464,22 +5188,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListCertificateAuthoritiesInput struct { @@ -4495,6 +5219,10 @@ type ListCertificateAuthoritiesInput struct { // you receive a response with truncated results. Set it to the value of the // NextToken parameter from the response you just received. NextToken *string `min:"1" type:"string"` + + // Use this parameter to filter the returned set of certificate authorities + // based on their owner. The default is SELF. + ResourceOwner *string `type:"string" enum:"ResourceOwner"` } // String returns the string representation @@ -4535,6 +5263,12 @@ func (s *ListCertificateAuthoritiesInput) SetNextToken(v string) *ListCertificat return s } +// SetResourceOwner sets the ResourceOwner field's value. +func (s *ListCertificateAuthoritiesInput) SetResourceOwner(v string) *ListCertificateAuthoritiesInput { + s.ResourceOwner = &v + return s +} + type ListCertificateAuthoritiesOutput struct { _ struct{} `type:"structure"` @@ -4572,9 +5306,10 @@ type ListPermissionsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Number (ARN) of the private CA to inspect. You can find - // the ARN by calling the ListCertificateAuthorities action. This must be of - // the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 + // the ARN by calling the ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) + // action. This must be of the form: arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 // You can get a private CA's ARN by running the ListCertificateAuthorities + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) // action. // // CertificateAuthorityArn is a required field @@ -4680,6 +5415,7 @@ type ListTagsInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action. This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -4782,10 +5518,68 @@ func (s *ListTagsOutput) SetTags(v []*Tag) *ListTagsOutput { return s } +// The current action was prevented because it would lock the caller out from +// performing subsequent actions. Verify that the specified parameters would +// not result in the caller being denied access to the resource. +type LockoutPreventedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s LockoutPreventedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LockoutPreventedException) GoString() string { + return s.String() +} + +func newErrorLockoutPreventedException(v protocol.ResponseMetadata) error { + return &LockoutPreventedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LockoutPreventedException) Code() string { + return "LockoutPreventedException" +} + +// Message returns the exception's message. +func (s *LockoutPreventedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LockoutPreventedException) OrigErr() error { + return nil +} + +func (s *LockoutPreventedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LockoutPreventedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LockoutPreventedException) RequestID() string { + return s.RespMetadata.RequestID +} + // The certificate signing request is invalid. type MalformedCSRException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4802,17 +5596,17 @@ func (s MalformedCSRException) GoString() string { func newErrorMalformedCSRException(v protocol.ResponseMetadata) error { return &MalformedCSRException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedCSRException) Code() string { +func (s *MalformedCSRException) Code() string { return "MalformedCSRException" } // Message returns the exception's message. -func (s MalformedCSRException) Message() string { +func (s *MalformedCSRException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4820,28 +5614,28 @@ func (s MalformedCSRException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedCSRException) OrigErr() error { +func (s *MalformedCSRException) OrigErr() error { return nil } -func (s MalformedCSRException) Error() string { +func (s *MalformedCSRException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedCSRException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedCSRException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedCSRException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedCSRException) RequestID() string { + return s.RespMetadata.RequestID } // One or more fields in the certificate are invalid. type MalformedCertificateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4858,17 +5652,17 @@ func (s MalformedCertificateException) GoString() string { func newErrorMalformedCertificateException(v protocol.ResponseMetadata) error { return &MalformedCertificateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedCertificateException) Code() string { +func (s *MalformedCertificateException) Code() string { return "MalformedCertificateException" } // Message returns the exception's message. -func (s MalformedCertificateException) Message() string { +func (s *MalformedCertificateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4876,30 +5670,32 @@ func (s MalformedCertificateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedCertificateException) OrigErr() error { +func (s *MalformedCertificateException) OrigErr() error { return nil } -func (s MalformedCertificateException) Error() string { +func (s *MalformedCertificateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedCertificateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedCertificateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedCertificateException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedCertificateException) RequestID() string { + return s.RespMetadata.RequestID } // Permissions designate which private CA actions can be performed by an AWS // service or entity. In order for ACM to automatically renew private certificates, // you must give the ACM service principal all available permissions (IssueCertificate, // GetCertificate, and ListPermissions). Permissions can be assigned with the -// CreatePermission action, removed with the DeletePermission action, and listed -// with the ListPermissions action. +// CreatePermission (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreatePermission.html) +// action, removed with the DeletePermission (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePermission.html) +// action, and listed with the ListPermissions (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListPermissions.html) +// action. type Permission struct { _ struct{} `type:"structure"` @@ -4914,14 +5710,14 @@ type Permission struct { CreatedAt *time.Time `type:"timestamp"` // The name of the policy that is associated with the permission. - Policy *string `type:"string"` + Policy *string `min:"1" type:"string"` // The AWS service or entity that holds the permission. At this time, the only // valid principal is acm.amazonaws.com. Principal *string `type:"string"` // The ID of the account that assigned the permission. - SourceAccount *string `type:"string"` + SourceAccount *string `min:"12" type:"string"` } // String returns the string representation @@ -4972,8 +5768,8 @@ func (s *Permission) SetSourceAccount(v string) *Permission { // The designated permission has already been given to the user. type PermissionAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4990,17 +5786,17 @@ func (s PermissionAlreadyExistsException) GoString() string { func newErrorPermissionAlreadyExistsException(v protocol.ResponseMetadata) error { return &PermissionAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PermissionAlreadyExistsException) Code() string { +func (s *PermissionAlreadyExistsException) Code() string { return "PermissionAlreadyExistsException" } // Message returns the exception's message. -func (s PermissionAlreadyExistsException) Message() string { +func (s *PermissionAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5008,28 +5804,107 @@ func (s PermissionAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PermissionAlreadyExistsException) OrigErr() error { +func (s *PermissionAlreadyExistsException) OrigErr() error { return nil } -func (s PermissionAlreadyExistsException) Error() string { +func (s *PermissionAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PermissionAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PermissionAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PermissionAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *PermissionAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID +} + +type PutPolicyInput struct { + _ struct{} `type:"structure"` + + // The path and filename of a JSON-formatted IAM policy to attach to the specified + // private CA resource. If this policy does not contain all required statements + // or if it includes any statement that is not allowed, the PutPolicy action + // returns an InvalidPolicyException. For information about IAM policy and statement + // structure, see Overview of JSON Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policies-json). + // + // Policy is a required field + Policy *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Number (ARN) of the private CA to associate with the + // policy. The ARN of the CA can be found by calling the ListCertificateAuthorities + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html) + // action. + // + // ResourceArn is a required field + ResourceArn *string `min:"5" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPolicyInput"} + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicy sets the Policy field's value. +func (s *PutPolicyInput) SetPolicy(v string) *PutPolicyInput { + s.Policy = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *PutPolicyInput) SetResourceArn(v string) *PutPolicyInput { + s.ResourceArn = &v + return s +} + +type PutPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPolicyOutput) GoString() string { + return s.String() } // Your request has already been completed. type RequestAlreadyProcessedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5046,17 +5921,17 @@ func (s RequestAlreadyProcessedException) GoString() string { func newErrorRequestAlreadyProcessedException(v protocol.ResponseMetadata) error { return &RequestAlreadyProcessedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestAlreadyProcessedException) Code() string { +func (s *RequestAlreadyProcessedException) Code() string { return "RequestAlreadyProcessedException" } // Message returns the exception's message. -func (s RequestAlreadyProcessedException) Message() string { +func (s *RequestAlreadyProcessedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5064,28 +5939,28 @@ func (s RequestAlreadyProcessedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestAlreadyProcessedException) OrigErr() error { +func (s *RequestAlreadyProcessedException) OrigErr() error { return nil } -func (s RequestAlreadyProcessedException) Error() string { +func (s *RequestAlreadyProcessedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestAlreadyProcessedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestAlreadyProcessedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestAlreadyProcessedException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestAlreadyProcessedException) RequestID() string { + return s.RespMetadata.RequestID } // The request has failed for an unspecified reason. type RequestFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5102,17 +5977,17 @@ func (s RequestFailedException) GoString() string { func newErrorRequestFailedException(v protocol.ResponseMetadata) error { return &RequestFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestFailedException) Code() string { +func (s *RequestFailedException) Code() string { return "RequestFailedException" } // Message returns the exception's message. -func (s RequestFailedException) Message() string { +func (s *RequestFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5120,28 +5995,28 @@ func (s RequestFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestFailedException) OrigErr() error { +func (s *RequestFailedException) OrigErr() error { return nil } -func (s RequestFailedException) Error() string { +func (s *RequestFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestFailedException) RequestID() string { + return s.RespMetadata.RequestID } // Your request is already in progress. type RequestInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5158,17 +6033,17 @@ func (s RequestInProgressException) GoString() string { func newErrorRequestInProgressException(v protocol.ResponseMetadata) error { return &RequestInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestInProgressException) Code() string { +func (s *RequestInProgressException) Code() string { return "RequestInProgressException" } // Message returns the exception's message. -func (s RequestInProgressException) Message() string { +func (s *RequestInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5176,29 +6051,29 @@ func (s RequestInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestInProgressException) OrigErr() error { +func (s *RequestInProgressException) OrigErr() error { return nil } -func (s RequestInProgressException) Error() string { +func (s *RequestInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestInProgressException) RequestID() string { + return s.RespMetadata.RequestID } -// A resource such as a private CA, S3 bucket, certificate, or audit report -// cannot be found. +// A resource such as a private CA, S3 bucket, certificate, audit report, or +// policy cannot be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5215,17 +6090,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5233,28 +6108,29 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type RestoreCertificateAuthorityInput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) that was returned when you called the CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) // action. This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -5310,10 +6186,11 @@ func (s RestoreCertificateAuthorityOutput) GoString() string { } // Certificate revocation information used by the CreateCertificateAuthority -// and UpdateCertificateAuthority actions. Your private certificate authority -// (CA) can create and maintain a certificate revocation list (CRL). A CRL contains -// information about certificates revoked by your CA. For more information, -// see RevokeCertificate. +// (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html) +// and UpdateCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UpdateCertificateAuthority.html) +// actions. Your private certificate authority (CA) can create and maintain +// a certificate revocation list (CRL). A CRL contains information about certificates +// revoked by your CA. For more information, see RevokeCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_RevokeCertificate.html). type RevocationConfiguration struct { _ struct{} `type:"structure"` @@ -5365,10 +6242,10 @@ type RevokeCertificateInput struct { CertificateAuthorityArn *string `min:"5" type:"string" required:"true"` // Serial number of the certificate to be revoked. This must be in hexadecimal - // format. You can retrieve the serial number by calling GetCertificate with - // the Amazon Resource Name (ARN) of the certificate you want and the ARN of - // your private CA. The GetCertificate action retrieves the certificate in the - // PEM format. You can use the following OpenSSL command to list the certificate + // format. You can retrieve the serial number by calling GetCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetCertificate.html) + // with the Amazon Resource Name (ARN) of the certificate you want and the ARN + // of your private CA. The GetCertificate action retrieves the certificate in + // the PEM format. You can use the following OpenSSL command to list the certificate // in text format and copy the hexadecimal serial number. // // openssl x509 -in file_path -text -noout @@ -5453,7 +6330,8 @@ func (s RevokeCertificateOutput) GoString() string { // Tags are labels that you can use to identify and organize your private CAs. // Each tag consists of a key and an optional value. You can associate up to // 50 tags with a private CA. To add one or more tags to a private CA, call -// the TagCertificateAuthority action. To remove a tag, call the UntagCertificateAuthority +// the TagCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_TagCertificateAuthority.html) +// action. To remove a tag, call the UntagCertificateAuthority (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_UntagCertificateAuthority.html) // action. type Tag struct { _ struct{} `type:"structure"` @@ -5508,7 +6386,8 @@ func (s *Tag) SetValue(v string) *Tag { type TagCertificateAuthorityInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -5593,8 +6472,8 @@ func (s TagCertificateAuthorityOutput) GoString() string { // You can associate up to 50 tags with a private CA. Exception information // is contained in the exception message field. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5611,17 +6490,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5629,28 +6508,29 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagCertificateAuthorityInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority. + // The Amazon Resource Name (ARN) that was returned when you called CreateCertificateAuthority + // (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_CreateCertificateAuthority.html). // This must be of the form: // // arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 @@ -5813,18 +6693,51 @@ func (s UpdateCertificateAuthorityOutput) GoString() string { return s.String() } -// Length of time for which the certificate issued by your private certificate -// authority (CA), or by the private CA itself, is valid in days, months, or -// years. You can issue a certificate by calling the IssueCertificate action. +// Validity specifies the period of time during which a certificate is valid. +// Validity can be expressed as an explicit date and time when the certificate +// expires, or as a span of time after issuance, stated in days, months, or +// years. For more information, see Validity (https://tools.ietf.org/html/rfc5280#section-4.1.2.5) +// in RFC 5280. +// +// You can issue a certificate by calling the IssueCertificate (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_IssueCertificate.html) +// action. type Validity struct { _ struct{} `type:"structure"` - // Specifies whether the Value parameter represents days, months, or years. + // Determines how ACM Private CA interprets the Value parameter, an integer. + // Supported validity types include those listed below. Type definitions with + // values include a sample input value and the resulting output. + // + // END_DATE: The specific date and time when the certificate will expire, expressed + // using UTCTime (YYMMDDHHMMSS) or GeneralizedTime (YYYYMMDDHHMMSS) format. + // When UTCTime is used, if the year field (YY) is greater than or equal to + // 50, the year is interpreted as 19YY. If the year field is less than 50, the + // year is interpreted as 20YY. + // + // * Sample input value: 491231235959 (UTCTime format) + // + // * Output expiration date/time: 12/31/2049 23:59:59 + // + // ABSOLUTE: The specific date and time when the certificate will expire, expressed + // in seconds since the Unix Epoch. + // + // * Sample input value: 2524608000 + // + // * Output expiration date/time: 01/01/2050 00:00:00 + // + // DAYS, MONTHS, YEARS: The relative time from the moment of issuance until + // the certificate will expire, expressed in days, months, or years. + // + // Example if DAYS, issued on 10/12/2020 at 12:34:54 UTC: + // + // * Sample input value: 90 + // + // * Output expiration date: 01/10/2020 12:34:54 UTC // // Type is a required field Type *string `type:"string" required:"true" enum:"ValidityPeriodType"` - // Time period. + // A long integer interpreted according to the value of Type, below. // // Value is a required field Value *int64 `min:"1" type:"long" required:"true"` @@ -5882,6 +6795,15 @@ const ( ActionTypeListPermissions = "ListPermissions" ) +// ActionType_Values returns all elements of the ActionType enum +func ActionType_Values() []string { + return []string{ + ActionTypeIssueCertificate, + ActionTypeGetCertificate, + ActionTypeListPermissions, + } +} + const ( // AuditReportResponseFormatJson is a AuditReportResponseFormat enum value AuditReportResponseFormatJson = "JSON" @@ -5890,6 +6812,14 @@ const ( AuditReportResponseFormatCsv = "CSV" ) +// AuditReportResponseFormat_Values returns all elements of the AuditReportResponseFormat enum +func AuditReportResponseFormat_Values() []string { + return []string{ + AuditReportResponseFormatJson, + AuditReportResponseFormatCsv, + } +} + const ( // AuditReportStatusCreating is a AuditReportStatus enum value AuditReportStatusCreating = "CREATING" @@ -5901,6 +6831,15 @@ const ( AuditReportStatusFailed = "FAILED" ) +// AuditReportStatus_Values returns all elements of the AuditReportStatus enum +func AuditReportStatus_Values() []string { + return []string{ + AuditReportStatusCreating, + AuditReportStatusSuccess, + AuditReportStatusFailed, + } +} + const ( // CertificateAuthorityStatusCreating is a CertificateAuthorityStatus enum value CertificateAuthorityStatusCreating = "CREATING" @@ -5924,6 +6863,19 @@ const ( CertificateAuthorityStatusFailed = "FAILED" ) +// CertificateAuthorityStatus_Values returns all elements of the CertificateAuthorityStatus enum +func CertificateAuthorityStatus_Values() []string { + return []string{ + CertificateAuthorityStatusCreating, + CertificateAuthorityStatusPendingCertificate, + CertificateAuthorityStatusActive, + CertificateAuthorityStatusDeleted, + CertificateAuthorityStatusDisabled, + CertificateAuthorityStatusExpired, + CertificateAuthorityStatusFailed, + } +} + const ( // CertificateAuthorityTypeRoot is a CertificateAuthorityType enum value CertificateAuthorityTypeRoot = "ROOT" @@ -5932,6 +6884,14 @@ const ( CertificateAuthorityTypeSubordinate = "SUBORDINATE" ) +// CertificateAuthorityType_Values returns all elements of the CertificateAuthorityType enum +func CertificateAuthorityType_Values() []string { + return []string{ + CertificateAuthorityTypeRoot, + CertificateAuthorityTypeSubordinate, + } +} + const ( // FailureReasonRequestTimedOut is a FailureReason enum value FailureReasonRequestTimedOut = "REQUEST_TIMED_OUT" @@ -5943,6 +6903,15 @@ const ( FailureReasonOther = "OTHER" ) +// FailureReason_Values returns all elements of the FailureReason enum +func FailureReason_Values() []string { + return []string{ + FailureReasonRequestTimedOut, + FailureReasonUnsupportedAlgorithm, + FailureReasonOther, + } +} + const ( // KeyAlgorithmRsa2048 is a KeyAlgorithm enum value KeyAlgorithmRsa2048 = "RSA_2048" @@ -5957,6 +6926,32 @@ const ( KeyAlgorithmEcSecp384r1 = "EC_secp384r1" ) +// KeyAlgorithm_Values returns all elements of the KeyAlgorithm enum +func KeyAlgorithm_Values() []string { + return []string{ + KeyAlgorithmRsa2048, + KeyAlgorithmRsa4096, + KeyAlgorithmEcPrime256v1, + KeyAlgorithmEcSecp384r1, + } +} + +const ( + // ResourceOwnerSelf is a ResourceOwner enum value + ResourceOwnerSelf = "SELF" + + // ResourceOwnerOtherAccounts is a ResourceOwner enum value + ResourceOwnerOtherAccounts = "OTHER_ACCOUNTS" +) + +// ResourceOwner_Values returns all elements of the ResourceOwner enum +func ResourceOwner_Values() []string { + return []string{ + ResourceOwnerSelf, + ResourceOwnerOtherAccounts, + } +} + const ( // RevocationReasonUnspecified is a RevocationReason enum value RevocationReasonUnspecified = "UNSPECIFIED" @@ -5983,6 +6978,20 @@ const ( RevocationReasonAACompromise = "A_A_COMPROMISE" ) +// RevocationReason_Values returns all elements of the RevocationReason enum +func RevocationReason_Values() []string { + return []string{ + RevocationReasonUnspecified, + RevocationReasonKeyCompromise, + RevocationReasonCertificateAuthorityCompromise, + RevocationReasonAffiliationChanged, + RevocationReasonSuperseded, + RevocationReasonCessationOfOperation, + RevocationReasonPrivilegeWithdrawn, + RevocationReasonAACompromise, + } +} + const ( // SigningAlgorithmSha256withecdsa is a SigningAlgorithm enum value SigningAlgorithmSha256withecdsa = "SHA256WITHECDSA" @@ -6003,6 +7012,18 @@ const ( SigningAlgorithmSha512withrsa = "SHA512WITHRSA" ) +// SigningAlgorithm_Values returns all elements of the SigningAlgorithm enum +func SigningAlgorithm_Values() []string { + return []string{ + SigningAlgorithmSha256withecdsa, + SigningAlgorithmSha384withecdsa, + SigningAlgorithmSha512withecdsa, + SigningAlgorithmSha256withrsa, + SigningAlgorithmSha384withrsa, + SigningAlgorithmSha512withrsa, + } +} + const ( // ValidityPeriodTypeEndDate is a ValidityPeriodType enum value ValidityPeriodTypeEndDate = "END_DATE" @@ -6019,3 +7040,14 @@ const ( // ValidityPeriodTypeYears is a ValidityPeriodType enum value ValidityPeriodTypeYears = "YEARS" ) + +// ValidityPeriodType_Values returns all elements of the ValidityPeriodType enum +func ValidityPeriodType_Values() []string { + return []string{ + ValidityPeriodTypeEndDate, + ValidityPeriodTypeAbsolute, + ValidityPeriodTypeDays, + ValidityPeriodTypeMonths, + ValidityPeriodTypeYears, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go b/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go index ca360e0e4..bc8c55b61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acmpca/doc.go @@ -3,6 +3,7 @@ // Package acmpca provides the client and types for making API // requests to AWS Certificate Manager Private Certificate Authority. // +// // This is the ACM Private CA API Reference. It provides descriptions, syntax, // and usage examples for each of the actions and data types involved in creating // and managing private certificate authorities (CA) for your organization. @@ -12,9 +13,9 @@ // an API that's tailored to the programming language or platform that you're // using. For more information, see AWS SDKs (https://aws.amazon.com/tools/#SDKs). // -// Each ACM Private CA API action has a throttling limit which determines the -// number of times the action can be called per second. For more information, -// see API Rate Limits in ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaLimits.html#PcaLimits-api) +// Each ACM Private CA API action has a quota that determines the number of +// times the action can be called per second. For more information, see API +// Rate Quotas in ACM Private CA (https://docs.aws.amazon.com/acm-pca/latest/userguide/PcaLimits.html#PcaLimits-api) // in the ACM Private CA user guide. // // See https://docs.aws.amazon.com/goto/WebAPI/acm-pca-2017-08-22 for more information on this service. diff --git a/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go b/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go index 62564f2a8..a95d1ccca 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acmpca/errors.go @@ -37,14 +37,15 @@ const ( // "InvalidNextTokenException". // // The token specified in the NextToken argument is not valid. Use the token - // returned from your previous call to ListCertificateAuthorities. + // returned from your previous call to ListCertificateAuthorities (https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_ListCertificateAuthorities.html). ErrCodeInvalidNextTokenException = "InvalidNextTokenException" // ErrCodeInvalidPolicyException for service response error code // "InvalidPolicyException". // - // The S3 bucket policy is not valid. The policy must give ACM Private CA rights - // to read from and write to the bucket and find the bucket location. + // The resource policy is invalid or is missing a required statement. For general + // information about IAM policy and statement structure, see Overview of JSON + // Policies (https://docs.aws.amazon.com/https:/docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#access_policies-json). ErrCodeInvalidPolicyException = "InvalidPolicyException" // ErrCodeInvalidRequestException for service response error code @@ -56,8 +57,7 @@ const ( // ErrCodeInvalidStateException for service response error code // "InvalidStateException". // - // The private CA is in a state during which a report or certificate cannot - // be generated. + // The state of the private CA does not allow this action to occur. ErrCodeInvalidStateException = "InvalidStateException" // ErrCodeInvalidTagException for service response error code @@ -70,10 +70,18 @@ const ( // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // An ACM Private CA limit has been exceeded. See the exception message returned - // to determine the limit that was exceeded. + // An ACM Private CA quota has been exceeded. See the exception message returned + // to determine the quota that was exceeded. ErrCodeLimitExceededException = "LimitExceededException" + // ErrCodeLockoutPreventedException for service response error code + // "LockoutPreventedException". + // + // The current action was prevented because it would lock the caller out from + // performing subsequent actions. Verify that the specified parameters would + // not result in the caller being denied access to the resource. + ErrCodeLockoutPreventedException = "LockoutPreventedException" + // ErrCodeMalformedCSRException for service response error code // "MalformedCSRException". // @@ -113,8 +121,8 @@ const ( // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // A resource such as a private CA, S3 bucket, certificate, or audit report - // cannot be found. + // A resource such as a private CA, S3 bucket, certificate, audit report, or + // policy cannot be found. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeTooManyTagsException for service response error code @@ -136,6 +144,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidStateException": newErrorInvalidStateException, "InvalidTagException": newErrorInvalidTagException, "LimitExceededException": newErrorLimitExceededException, + "LockoutPreventedException": newErrorLockoutPreventedException, "MalformedCSRException": newErrorMalformedCSRException, "MalformedCertificateException": newErrorMalformedCertificateException, "PermissionAlreadyExistsException": newErrorPermissionAlreadyExistsException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go b/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go index 8c7602b4c..57c1d570b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go b/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go index 0eac1f76a..32cf844ef 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/amplify/api.go @@ -57,7 +57,7 @@ func (c *Amplify) CreateAppRequest(input *CreateAppInput) (req *request.Request, // CreateApp API operation for AWS Amplify. // -// Creates a new Amplify App. +// Creates a new Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -68,22 +68,19 @@ func (c *Amplify) CreateAppRequest(input *CreateAppInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateApp func (c *Amplify) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { @@ -151,7 +148,7 @@ func (c *Amplify) CreateBackendEnvironmentRequest(input *CreateBackendEnvironmen // CreateBackendEnvironment API operation for AWS Amplify. // -// Creates a new backend environment for an Amplify App. +// Creates a new backend environment for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -162,21 +159,19 @@ func (c *Amplify) CreateBackendEnvironmentRequest(input *CreateBackendEnvironmen // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBackendEnvironment func (c *Amplify) CreateBackendEnvironment(input *CreateBackendEnvironmentInput) (*CreateBackendEnvironmentOutput, error) { @@ -244,7 +239,7 @@ func (c *Amplify) CreateBranchRequest(input *CreateBranchInput) (req *request.Re // CreateBranch API operation for AWS Amplify. // -// Creates a new Branch for an Amplify App. +// Creates a new branch for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -255,25 +250,22 @@ func (c *Amplify) CreateBranchRequest(input *CreateBranchInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateBranch func (c *Amplify) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput, error) { @@ -341,7 +333,8 @@ func (c *Amplify) CreateDeploymentRequest(input *CreateDeploymentInput) (req *re // CreateDeployment API operation for AWS Amplify. // -// Create a deployment for manual deploy apps. (Apps are not connected to repository) +// Creates a deployment for a manually deployed Amplify app. Manually deployed +// apps are not connected to a repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -352,18 +345,16 @@ func (c *Amplify) CreateDeploymentRequest(input *CreateDeploymentInput) (req *re // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDeployment func (c *Amplify) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { @@ -431,7 +422,8 @@ func (c *Amplify) CreateDomainAssociationRequest(input *CreateDomainAssociationI // CreateDomainAssociation API operation for AWS Amplify. // -// Create a new DomainAssociation on an App +// Creates a new domain association for an Amplify app. This action associates +// a custom domain with the Amplify app // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -442,25 +434,22 @@ func (c *Amplify) CreateDomainAssociationRequest(input *CreateDomainAssociationI // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateDomainAssociation func (c *Amplify) CreateDomainAssociation(input *CreateDomainAssociationInput) (*CreateDomainAssociationOutput, error) { @@ -528,7 +517,7 @@ func (c *Amplify) CreateWebhookRequest(input *CreateWebhookInput) (req *request. // CreateWebhook API operation for AWS Amplify. // -// Create a new webhook on an App. +// Creates a new webhook on an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -539,25 +528,22 @@ func (c *Amplify) CreateWebhookRequest(input *CreateWebhookInput) (req *request. // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/CreateWebhook func (c *Amplify) CreateWebhook(input *CreateWebhookInput) (*CreateWebhookOutput, error) { @@ -625,7 +611,7 @@ func (c *Amplify) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, // DeleteApp API operation for AWS Amplify. // -// Delete an existing Amplify App by appId. +// Deletes an existing Amplify app specified by an app ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -636,21 +622,19 @@ func (c *Amplify) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteApp func (c *Amplify) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { @@ -718,7 +702,7 @@ func (c *Amplify) DeleteBackendEnvironmentRequest(input *DeleteBackendEnvironmen // DeleteBackendEnvironment API operation for AWS Amplify. // -// Delete backend environment for an Amplify App. +// Deletes a backend environment for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -729,21 +713,19 @@ func (c *Amplify) DeleteBackendEnvironmentRequest(input *DeleteBackendEnvironmen // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBackendEnvironment func (c *Amplify) DeleteBackendEnvironment(input *DeleteBackendEnvironmentInput) (*DeleteBackendEnvironmentOutput, error) { @@ -811,7 +793,7 @@ func (c *Amplify) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Re // DeleteBranch API operation for AWS Amplify. // -// Deletes a branch for an Amplify App. +// Deletes a branch for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -822,21 +804,19 @@ func (c *Amplify) DeleteBranchRequest(input *DeleteBranchInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteBranch func (c *Amplify) DeleteBranch(input *DeleteBranchInput) (*DeleteBranchOutput, error) { @@ -904,7 +884,7 @@ func (c *Amplify) DeleteDomainAssociationRequest(input *DeleteDomainAssociationI // DeleteDomainAssociation API operation for AWS Amplify. // -// Deletes a DomainAssociation. +// Deletes a domain association for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -915,21 +895,19 @@ func (c *Amplify) DeleteDomainAssociationRequest(input *DeleteDomainAssociationI // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteDomainAssociation func (c *Amplify) DeleteDomainAssociation(input *DeleteDomainAssociationInput) (*DeleteDomainAssociationOutput, error) { @@ -997,7 +975,7 @@ func (c *Amplify) DeleteJobRequest(input *DeleteJobInput) (req *request.Request, // DeleteJob API operation for AWS Amplify. // -// Delete a job, for an Amplify branch, part of Amplify App. +// Deletes a job for a branch of an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1008,21 +986,19 @@ func (c *Amplify) DeleteJobRequest(input *DeleteJobInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteJob func (c *Amplify) DeleteJob(input *DeleteJobInput) (*DeleteJobOutput, error) { @@ -1101,21 +1077,19 @@ func (c *Amplify) DeleteWebhookRequest(input *DeleteWebhookInput) (req *request. // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/DeleteWebhook func (c *Amplify) DeleteWebhook(input *DeleteWebhookInput) (*DeleteWebhookOutput, error) { @@ -1183,7 +1157,8 @@ func (c *Amplify) GenerateAccessLogsRequest(input *GenerateAccessLogsInput) (req // GenerateAccessLogs API operation for AWS Amplify. // -// Retrieve website access logs for a specific time range via a pre-signed URL. +// Returns the website access logs for a specific time range using a presigned +// URL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1194,17 +1169,16 @@ func (c *Amplify) GenerateAccessLogsRequest(input *GenerateAccessLogsInput) (req // // Returned Error Types: // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GenerateAccessLogs func (c *Amplify) GenerateAccessLogs(input *GenerateAccessLogsInput) (*GenerateAccessLogsOutput, error) { @@ -1272,7 +1246,7 @@ func (c *Amplify) GetAppRequest(input *GetAppInput) (req *request.Request, outpu // GetApp API operation for AWS Amplify. // -// Retrieves an existing Amplify App by appId. +// Returns an existing Amplify app by appID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1283,17 +1257,16 @@ func (c *Amplify) GetAppRequest(input *GetAppInput) (req *request.Request, outpu // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetApp func (c *Amplify) GetApp(input *GetAppInput) (*GetAppOutput, error) { @@ -1361,7 +1334,7 @@ func (c *Amplify) GetArtifactUrlRequest(input *GetArtifactUrlInput) (req *reques // GetArtifactUrl API operation for AWS Amplify. // -// Retrieves artifact info that corresponds to a artifactId. +// Returns the artifact info that corresponds to an artifact id. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1372,21 +1345,19 @@ func (c *Amplify) GetArtifactUrlRequest(input *GetArtifactUrlInput) (req *reques // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetArtifactUrl func (c *Amplify) GetArtifactUrl(input *GetArtifactUrlInput) (*GetArtifactUrlOutput, error) { @@ -1454,7 +1425,7 @@ func (c *Amplify) GetBackendEnvironmentRequest(input *GetBackendEnvironmentInput // GetBackendEnvironment API operation for AWS Amplify. // -// Retrieves a backend environment for an Amplify App. +// Returns a backend environment for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1465,17 +1436,16 @@ func (c *Amplify) GetBackendEnvironmentRequest(input *GetBackendEnvironmentInput // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBackendEnvironment func (c *Amplify) GetBackendEnvironment(input *GetBackendEnvironmentInput) (*GetBackendEnvironmentOutput, error) { @@ -1543,7 +1513,7 @@ func (c *Amplify) GetBranchRequest(input *GetBranchInput) (req *request.Request, // GetBranch API operation for AWS Amplify. // -// Retrieves a branch for an Amplify App. +// Returns a branch for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1554,17 +1524,16 @@ func (c *Amplify) GetBranchRequest(input *GetBranchInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetBranch func (c *Amplify) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) { @@ -1632,7 +1601,7 @@ func (c *Amplify) GetDomainAssociationRequest(input *GetDomainAssociationInput) // GetDomainAssociation API operation for AWS Amplify. // -// Retrieves domain info that corresponds to an appId and domainName. +// Returns the domain information for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1643,17 +1612,16 @@ func (c *Amplify) GetDomainAssociationRequest(input *GetDomainAssociationInput) // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetDomainAssociation func (c *Amplify) GetDomainAssociation(input *GetDomainAssociationInput) (*GetDomainAssociationOutput, error) { @@ -1721,7 +1689,7 @@ func (c *Amplify) GetJobRequest(input *GetJobInput) (req *request.Request, outpu // GetJob API operation for AWS Amplify. // -// Get a job for a branch, part of an Amplify App. +// Returns a job for a branch of an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1732,21 +1700,19 @@ func (c *Amplify) GetJobRequest(input *GetJobInput) (req *request.Request, outpu // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetJob func (c *Amplify) GetJob(input *GetJobInput) (*GetJobOutput, error) { @@ -1814,7 +1780,7 @@ func (c *Amplify) GetWebhookRequest(input *GetWebhookInput) (req *request.Reques // GetWebhook API operation for AWS Amplify. // -// Retrieves webhook info that corresponds to a webhookId. +// Returns the webhook information that corresponds to a specified webhook ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1825,21 +1791,19 @@ func (c *Amplify) GetWebhookRequest(input *GetWebhookInput) (req *request.Reques // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/GetWebhook func (c *Amplify) GetWebhook(input *GetWebhookInput) (*GetWebhookOutput, error) { @@ -1907,7 +1871,7 @@ func (c *Amplify) ListAppsRequest(input *ListAppsInput) (req *request.Request, o // ListApps API operation for AWS Amplify. // -// Lists existing Amplify Apps. +// Returns a list of the existing Amplify apps. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1918,14 +1882,13 @@ func (c *Amplify) ListAppsRequest(input *ListAppsInput) (req *request.Request, o // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListApps func (c *Amplify) ListApps(input *ListAppsInput) (*ListAppsOutput, error) { @@ -1993,7 +1956,7 @@ func (c *Amplify) ListArtifactsRequest(input *ListArtifactsInput) (req *request. // ListArtifacts API operation for AWS Amplify. // -// List artifacts with an app, a branch, a job and an artifact type. +// Returns a list of artifacts for a specified app, branch, and job. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2004,18 +1967,16 @@ func (c *Amplify) ListArtifactsRequest(input *ListArtifactsInput) (req *request. // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListArtifacts func (c *Amplify) ListArtifacts(input *ListArtifactsInput) (*ListArtifactsOutput, error) { @@ -2083,7 +2044,7 @@ func (c *Amplify) ListBackendEnvironmentsRequest(input *ListBackendEnvironmentsI // ListBackendEnvironments API operation for AWS Amplify. // -// Lists backend environments for an Amplify App. +// Lists the backend environments for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2094,14 +2055,13 @@ func (c *Amplify) ListBackendEnvironmentsRequest(input *ListBackendEnvironmentsI // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBackendEnvironments func (c *Amplify) ListBackendEnvironments(input *ListBackendEnvironmentsInput) (*ListBackendEnvironmentsOutput, error) { @@ -2169,7 +2129,7 @@ func (c *Amplify) ListBranchesRequest(input *ListBranchesInput) (req *request.Re // ListBranches API operation for AWS Amplify. // -// Lists branches for an Amplify App. +// Lists the branches of an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2180,14 +2140,13 @@ func (c *Amplify) ListBranchesRequest(input *ListBranchesInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListBranches func (c *Amplify) ListBranches(input *ListBranchesInput) (*ListBranchesOutput, error) { @@ -2255,7 +2214,7 @@ func (c *Amplify) ListDomainAssociationsRequest(input *ListDomainAssociationsInp // ListDomainAssociations API operation for AWS Amplify. // -// List domains with an app +// Returns the domain associations for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2266,14 +2225,13 @@ func (c *Amplify) ListDomainAssociationsRequest(input *ListDomainAssociationsInp // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListDomainAssociations func (c *Amplify) ListDomainAssociations(input *ListDomainAssociationsInput) (*ListDomainAssociationsOutput, error) { @@ -2341,7 +2299,7 @@ func (c *Amplify) ListJobsRequest(input *ListJobsInput) (req *request.Request, o // ListJobs API operation for AWS Amplify. // -// List Jobs for a branch, part of an Amplify App. +// Lists the jobs for a branch of an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2352,18 +2310,16 @@ func (c *Amplify) ListJobsRequest(input *ListJobsInput) (req *request.Request, o // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListJobs func (c *Amplify) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { @@ -2431,7 +2387,7 @@ func (c *Amplify) ListTagsForResourceRequest(input *ListTagsForResourceInput) (r // ListTagsForResource API operation for AWS Amplify. // -// List tags for resource. +// Returns a list of tags for a specified Amazon Resource Name (ARN). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2442,14 +2398,13 @@ func (c *Amplify) ListTagsForResourceRequest(input *ListTagsForResourceInput) (r // // Returned Error Types: // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * ResourceNotFoundException -// Exception thrown when an operation fails due to non-existent resource. +// An operation failed due to a non-existent resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListTagsForResource func (c *Amplify) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { @@ -2517,7 +2472,7 @@ func (c *Amplify) ListWebhooksRequest(input *ListWebhooksInput) (req *request.Re // ListWebhooks API operation for AWS Amplify. // -// List webhooks with an app. +// Returns a list of webhooks for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2528,18 +2483,16 @@ func (c *Amplify) ListWebhooksRequest(input *ListWebhooksInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/ListWebhooks func (c *Amplify) ListWebhooks(input *ListWebhooksInput) (*ListWebhooksOutput, error) { @@ -2607,7 +2560,8 @@ func (c *Amplify) StartDeploymentRequest(input *StartDeploymentInput) (req *requ // StartDeployment API operation for AWS Amplify. // -// Start a deployment for manual deploy apps. (Apps are not connected to repository) +// Starts a deployment for a manually deployed app. Manually deployed apps are +// not connected to a repository. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2618,21 +2572,19 @@ func (c *Amplify) StartDeploymentRequest(input *StartDeploymentInput) (req *requ // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartDeployment func (c *Amplify) StartDeployment(input *StartDeploymentInput) (*StartDeploymentOutput, error) { @@ -2700,7 +2652,7 @@ func (c *Amplify) StartJobRequest(input *StartJobInput) (req *request.Request, o // StartJob API operation for AWS Amplify. // -// Starts a new job for a branch, part of an Amplify App. +// Starts a new job for a branch of an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2711,21 +2663,19 @@ func (c *Amplify) StartJobRequest(input *StartJobInput) (req *request.Request, o // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StartJob func (c *Amplify) StartJob(input *StartJobInput) (*StartJobOutput, error) { @@ -2793,7 +2743,7 @@ func (c *Amplify) StopJobRequest(input *StopJobInput) (req *request.Request, out // StopJob API operation for AWS Amplify. // -// Stop a job that is in progress, for an Amplify branch, part of Amplify App. +// Stops a job that is in progress for a branch of an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2804,21 +2754,19 @@ func (c *Amplify) StopJobRequest(input *StopJobInput) (req *request.Request, out // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * LimitExceededException -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/StopJob func (c *Amplify) StopJob(input *StopJobInput) (*StopJobOutput, error) { @@ -2887,7 +2835,7 @@ func (c *Amplify) TagResourceRequest(input *TagResourceInput) (req *request.Requ // TagResource API operation for AWS Amplify. // -// Tag resource with tag key and value. +// Tags the resource with a tag key and value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2898,14 +2846,13 @@ func (c *Amplify) TagResourceRequest(input *TagResourceInput) (req *request.Requ // // Returned Error Types: // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * ResourceNotFoundException -// Exception thrown when an operation fails due to non-existent resource. +// An operation failed due to a non-existent resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/TagResource func (c *Amplify) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -2974,7 +2921,7 @@ func (c *Amplify) UntagResourceRequest(input *UntagResourceInput) (req *request. // UntagResource API operation for AWS Amplify. // -// Untag resource with resourceArn. +// Untags a resource with a specified Amazon Resource Name (ARN). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2985,14 +2932,13 @@ func (c *Amplify) UntagResourceRequest(input *UntagResourceInput) (req *request. // // Returned Error Types: // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * ResourceNotFoundException -// Exception thrown when an operation fails due to non-existent resource. +// An operation failed due to a non-existent resource. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UntagResource func (c *Amplify) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -3060,7 +3006,7 @@ func (c *Amplify) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, // UpdateApp API operation for AWS Amplify. // -// Updates an existing Amplify App. +// Updates an existing Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3071,17 +3017,16 @@ func (c *Amplify) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateApp func (c *Amplify) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) { @@ -3149,7 +3094,7 @@ func (c *Amplify) UpdateBranchRequest(input *UpdateBranchInput) (req *request.Re // UpdateBranch API operation for AWS Amplify. // -// Updates a branch for an Amplify App. +// Updates a branch for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3160,21 +3105,19 @@ func (c *Amplify) UpdateBranchRequest(input *UpdateBranchInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateBranch func (c *Amplify) UpdateBranch(input *UpdateBranchInput) (*UpdateBranchOutput, error) { @@ -3242,7 +3185,7 @@ func (c *Amplify) UpdateDomainAssociationRequest(input *UpdateDomainAssociationI // UpdateDomainAssociation API operation for AWS Amplify. // -// Create a new DomainAssociation on an App +// Creates a new domain association for an Amplify app. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3253,21 +3196,19 @@ func (c *Amplify) UpdateDomainAssociationRequest(input *UpdateDomainAssociationI // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateDomainAssociation func (c *Amplify) UpdateDomainAssociation(input *UpdateDomainAssociationInput) (*UpdateDomainAssociationOutput, error) { @@ -3335,7 +3276,7 @@ func (c *Amplify) UpdateWebhookRequest(input *UpdateWebhookInput) (req *request. // UpdateWebhook API operation for AWS Amplify. // -// Update a webhook. +// Updates a webhook. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3346,21 +3287,19 @@ func (c *Amplify) UpdateWebhookRequest(input *UpdateWebhookInput) (req *request. // // Returned Error Types: // * BadRequestException -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. // // * UnauthorizedException -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. // // * NotFoundException -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. // // * InternalFailureException -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. // // * DependentServiceFailureException -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. // // See also, https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25/UpdateWebhook func (c *Amplify) UpdateWebhook(input *UpdateWebhookInput) (*UpdateWebhookOutput, error) { @@ -3384,94 +3323,100 @@ func (c *Amplify) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebhook return out, req.Send() } -// Amplify App represents different branches of a repository for building, deploying, -// and hosting. +// Represents the different branches of a repository for building, deploying, +// and hosting an Amplify app. type App struct { _ struct{} `type:"structure"` - // ARN for the Amplify App. + // The Amazon Resource Name (ARN) of the Amplify app. // // AppArn is a required field AppArn *string `locationName:"appArn" type:"string" required:"true"` - // Unique Id for the Amplify App. + // The unique ID of the Amplify app. // // AppId is a required field AppId *string `locationName:"appId" min:"1" type:"string" required:"true"` - // Automated branch creation config for the Amplify App. + // Describes the automated branch creation configuration for the Amplify app. AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"` - // Automated branch creation glob patterns for the Amplify App. + // Describes the automated branch creation glob patterns for the Amplify app. AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"` - // Basic Authorization credentials for branches for the Amplify App. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The basic authorization credentials for branches for the Amplify app. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // BuildSpec content for Amplify App. + // Describes the content of the build specification (build spec) for the Amplify + // app. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Create date / time for the Amplify App. + // Creates a date and time for the Amplify app. // // CreateTime is a required field CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` - // Custom redirect / rewrite rules for the Amplify App. + // Describes the custom redirect and rewrite rules for the Amplify app. CustomRules []*CustomRule `locationName:"customRules" type:"list"` - // Default domain for the Amplify App. + // The default domain for the Amplify app. // // DefaultDomain is a required field DefaultDomain *string `locationName:"defaultDomain" min:"1" type:"string" required:"true"` - // Description for the Amplify App. + // The description for the Amplify app. // // Description is a required field Description *string `locationName:"description" type:"string" required:"true"` - // Enables automated branch creation for the Amplify App. + // Enables automated branch creation for the Amplify app. EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"` - // Enables Basic Authorization for branches for the Amplify App. + // Enables basic authorization for the Amplify app's branches. // // EnableBasicAuth is a required field EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean" required:"true"` - // Enables auto-building of branches for the Amplify App. + // Enables the auto-building of branches for the Amplify app. // // EnableBranchAutoBuild is a required field EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean" required:"true"` - // Environment Variables for the Amplify App. + // Automatically disconnect a branch in the Amplify Console when you delete + // a branch from your Git repository. + EnableBranchAutoDeletion *bool `locationName:"enableBranchAutoDeletion" type:"boolean"` + + // The environment variables for the Amplify app. // // EnvironmentVariables is a required field EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map" required:"true"` - // IAM service role ARN for the Amplify App. + // The AWS Identity and Access Management (IAM) service role for the Amazon + // Resource Name (ARN) of the Amplify app. IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"` - // Name for the Amplify App. + // The name for the Amplify app. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // Platform for the Amplify App. + // The platform for the Amplify app. // // Platform is a required field Platform *string `locationName:"platform" type:"string" required:"true" enum:"Platform"` - // Structure with Production Branch information. + // Describes the information about a production branch of the Amplify app. ProductionBranch *ProductionBranch `locationName:"productionBranch" type:"structure"` - // Repository for the Amplify App. + // The repository for the Amplify app. // // Repository is a required field Repository *string `locationName:"repository" type:"string" required:"true"` - // Tag for Amplify App. + // The tag for the Amplify app. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` - // Update date / time for the Amplify App. + // Updates the date and time for the Amplify app. // // UpdateTime is a required field UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` @@ -3565,6 +3510,12 @@ func (s *App) SetEnableBranchAutoBuild(v bool) *App { return s } +// SetEnableBranchAutoDeletion sets the EnableBranchAutoDeletion field's value. +func (s *App) SetEnableBranchAutoDeletion(v bool) *App { + s.EnableBranchAutoDeletion = &v + return s +} + // SetEnvironmentVariables sets the EnvironmentVariables field's value. func (s *App) SetEnvironmentVariables(v map[string]*string) *App { s.EnvironmentVariables = v @@ -3613,16 +3564,16 @@ func (s *App) SetUpdateTime(v time.Time) *App { return s } -// Structure for artifact. +// Describes an artifact. type Artifact struct { _ struct{} `type:"structure"` - // File name for the artifact. + // The file name for the artifact. // // ArtifactFileName is a required field ArtifactFileName *string `locationName:"artifactFileName" type:"string" required:"true"` - // Unique Id for a artifact. + // The unique ID for the artifact. // // ArtifactId is a required field ArtifactId *string `locationName:"artifactId" type:"string" required:"true"` @@ -3650,35 +3601,41 @@ func (s *Artifact) SetArtifactId(v string) *Artifact { return s } -// Structure with auto branch creation config. +// Describes the automated branch creation configuration. type AutoBranchCreationConfig struct { _ struct{} `type:"structure"` - // Basic Authorization credentials for the auto created branch. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The basic authorization credentials for the autocreated branch. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // BuildSpec for the auto created branch. + // The build specification (build spec) for the autocreated branch. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Enables auto building for the auto created branch. + // Enables auto building for the autocreated branch. EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"` - // Enables Basic Auth for the auto created branch. + // Enables basic authorization for the autocreated branch. EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` - // Enables Pull Request Preview for auto created branch. + // Performance mode optimizes for faster hosting performance by keeping content + // cached at the edge for a longer interval. Enabling performance mode will + // mean that hosting configuration or code changes can take up to 10 minutes + // to roll out. + EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"` + + // Enables pull request preview for the autocreated branch. EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"` - // Environment Variables for the auto created branch. + // The environment variables for the autocreated branch. EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` - // Framework for the auto created branch. + // The framework for the autocreated branch. Framework *string `locationName:"framework" type:"string"` - // The Amplify Environment name for the pull request. + // The Amplify environment name for the pull request. PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` - // Stage for the auto created branch. + // Describes the current stage for the autocreated branch. Stage *string `locationName:"stage" type:"string" enum:"Stage"` } @@ -3729,6 +3686,12 @@ func (s *AutoBranchCreationConfig) SetEnableBasicAuth(v bool) *AutoBranchCreatio return s } +// SetEnablePerformanceMode sets the EnablePerformanceMode field's value. +func (s *AutoBranchCreationConfig) SetEnablePerformanceMode(v bool) *AutoBranchCreationConfig { + s.EnablePerformanceMode = &v + return s +} + // SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. func (s *AutoBranchCreationConfig) SetEnablePullRequestPreview(v bool) *AutoBranchCreationConfig { s.EnablePullRequestPreview = &v @@ -3759,33 +3722,35 @@ func (s *AutoBranchCreationConfig) SetStage(v string) *AutoBranchCreationConfig return s } -// Backend environment for an Amplify App. +// Describes the backend environment for an Amplify app. type BackendEnvironment struct { _ struct{} `type:"structure"` - // Arn for a backend environment, part of an Amplify App. + // The Amazon Resource Name (ARN) for a backend environment that is part of + // an Amplify app. // // BackendEnvironmentArn is a required field BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string" required:"true"` - // Creation date and time for a backend environment, part of an Amplify App. + // The creation date and time for a backend environment that is part of an Amplify + // app. // // CreateTime is a required field CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` - // Name of deployment artifacts. + // The name of deployment artifacts. DeploymentArtifacts *string `locationName:"deploymentArtifacts" min:"1" type:"string"` - // Name for a backend environment, part of an Amplify App. + // The name for a backend environment that is part of an Amplify app. // // EnvironmentName is a required field EnvironmentName *string `locationName:"environmentName" min:"1" type:"string" required:"true"` - // CloudFormation stack name of backend environment. + // The AWS CloudFormation stack name of a backend environment. StackName *string `locationName:"stackName" min:"1" type:"string"` - // Last updated date and time for a backend environment, part of an Amplify - // App. + // The last updated date and time for a backend environment that is part of + // an Amplify app. // // UpdateTime is a required field UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` @@ -3837,10 +3802,10 @@ func (s *BackendEnvironment) SetUpdateTime(v time.Time) *BackendEnvironment { return s } -// Exception thrown when a request contains unexpected data. +// A request contains unexpected data. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3857,17 +3822,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3875,66 +3840,68 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } -// Branch for an Amplify App, which maps to a 3rd party repository branch. +// The branch for an Amplify app, which maps to a third-party repository branch. type Branch struct { _ struct{} `type:"structure"` - // Id of the active job for a branch, part of an Amplify App. + // The ID of the active job for a branch of an Amplify app. // // ActiveJobId is a required field ActiveJobId *string `locationName:"activeJobId" type:"string" required:"true"` - // List of custom resources that are linked to this branch. + // A list of custom resources that are linked to this branch. AssociatedResources []*string `locationName:"associatedResources" type:"list"` - // ARN for a Backend Environment, part of an Amplify App. + // The Amazon Resource Name (ARN) for a backend environment that is part of + // an Amplify app. BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"` - // Basic Authorization credentials for a branch, part of an Amplify App. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The basic authorization credentials for a branch of an Amplify app. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // ARN for a branch, part of an Amplify App. + // The Amazon Resource Name (ARN) for a branch that is part of an Amplify app. // // BranchArn is a required field BranchArn *string `locationName:"branchArn" type:"string" required:"true"` - // Name for a branch, part of an Amplify App. + // The name for the branch that is part of an Amplify app. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // BuildSpec content for branch for Amplify App. + // The build specification (build spec) content for the branch of an Amplify + // app. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Creation date and time for a branch, part of an Amplify App. + // The creation date and time for a branch that is part of an Amplify app. // // CreateTime is a required field CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` - // Custom domains for a branch, part of an Amplify App. + // The custom domains for a branch of an Amplify app. // // CustomDomains is a required field CustomDomains []*string `locationName:"customDomains" type:"list" required:"true"` - // Description for a branch, part of an Amplify App. + // The description for the branch that is part of an Amplify app. // // Description is a required field Description *string `locationName:"description" type:"string" required:"true"` @@ -3942,69 +3909,75 @@ type Branch struct { // The destination branch if the branch is a pull request branch. DestinationBranch *string `locationName:"destinationBranch" min:"1" type:"string"` - // Display name for a branch, will use as the default domain prefix. + // The display name for the branch. This is used as the default domain prefix. // // DisplayName is a required field DisplayName *string `locationName:"displayName" type:"string" required:"true"` - // Enables auto-building on push for a branch, part of an Amplify App. + // Enables auto-building on push for a branch of an Amplify app. // // EnableAutoBuild is a required field EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean" required:"true"` - // Enables Basic Authorization for a branch, part of an Amplify App. + // Enables basic authorization for a branch of an Amplify app. // // EnableBasicAuth is a required field EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean" required:"true"` - // Enables notifications for a branch, part of an Amplify App. + // Enables notifications for a branch that is part of an Amplify app. // // EnableNotification is a required field EnableNotification *bool `locationName:"enableNotification" type:"boolean" required:"true"` - // Enables Pull Request Preview for this branch. + // Performance mode optimizes for faster hosting performance by keeping content + // cached at the edge for a longer interval. Enabling performance mode will + // mean that hosting configuration or code changes can take up to 10 minutes + // to roll out. + EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"` + + // Enables pull request preview for the branch. // // EnablePullRequestPreview is a required field EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean" required:"true"` - // Environment Variables specific to a branch, part of an Amplify App. + // The environment variables specific to a branch of an Amplify app. // // EnvironmentVariables is a required field EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map" required:"true"` - // Framework for a branch, part of an Amplify App. + // The framework for a branch of an Amplify app. // // Framework is a required field Framework *string `locationName:"framework" type:"string" required:"true"` - // The Amplify Environment name for the pull request. + // The Amplify environment name for the pull request. PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` // The source branch if the branch is a pull request branch. SourceBranch *string `locationName:"sourceBranch" min:"1" type:"string"` - // Stage for a branch, part of an Amplify App. + // The current stage for the branch that is part of an Amplify app. // // Stage is a required field Stage *string `locationName:"stage" type:"string" required:"true" enum:"Stage"` - // Tag for branch for Amplify App. + // The tag for the branch of an Amplify app. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` - // Thumbnail URL for the branch. + // The thumbnail URL for the branch of an Amplify app. ThumbnailUrl *string `locationName:"thumbnailUrl" min:"1" type:"string"` - // Total number of Jobs part of an Amplify App. + // The total number of jobs that are part of an Amplify app. // // TotalNumberOfJobs is a required field TotalNumberOfJobs *string `locationName:"totalNumberOfJobs" type:"string" required:"true"` - // The content TTL for the website in seconds. + // The content Time to Live (TTL) for the website in seconds. // // Ttl is a required field Ttl *string `locationName:"ttl" type:"string" required:"true"` - // Last updated date and time for a branch, part of an Amplify App. + // The last updated date and time for a branch that is part of an Amplify app. // // UpdateTime is a required field UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` @@ -4110,6 +4083,12 @@ func (s *Branch) SetEnableNotification(v bool) *Branch { return s } +// SetEnablePerformanceMode sets the EnablePerformanceMode field's value. +func (s *Branch) SetEnablePerformanceMode(v bool) *Branch { + s.EnablePerformanceMode = &v + return s +} + // SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. func (s *Branch) SetEnablePullRequestPreview(v bool) *Branch { s.EnablePullRequestPreview = &v @@ -4176,64 +4155,71 @@ func (s *Branch) SetUpdateTime(v time.Time) *Branch { return s } -// Request structure used to create Apps in Amplify. +// The request structure used to create apps in Amplify. type CreateAppInput struct { _ struct{} `type:"structure"` - // Personal Access token for 3rd party source control system for an Amplify - // App, used to create webhook and read-only deploy key. Token is not stored. - AccessToken *string `locationName:"accessToken" min:"1" type:"string"` + // The personal access token for a third-party source control system for an + // Amplify app. The personal access token is used to create a webhook and a + // read-only deploy key. The token is not stored. + AccessToken *string `locationName:"accessToken" min:"1" type:"string" sensitive:"true"` - // Automated branch creation config for the Amplify App. + // The automated branch creation configuration for the Amplify app. AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"` - // Automated branch creation glob patterns for the Amplify App. + // The automated branch creation glob patterns for the Amplify app. AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"` - // Credentials for Basic Authorization for an Amplify App. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The credentials for basic authorization for an Amplify app. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // BuildSpec for an Amplify App + // The build specification (build spec) for an Amplify app. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Custom rewrite / redirect rules for an Amplify App. + // The custom rewrite and redirect rules for an Amplify app. CustomRules []*CustomRule `locationName:"customRules" type:"list"` - // Description for an Amplify App + // The description for an Amplify app. Description *string `locationName:"description" type:"string"` - // Enables automated branch creation for the Amplify App. + // Enables automated branch creation for the Amplify app. EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"` - // Enable Basic Authorization for an Amplify App, this will apply to all branches - // part of this App. + // Enables basic authorization for an Amplify app. This will apply to all branches + // that are part of this app. EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` - // Enable the auto building of branches for an Amplify App. + // Enables the auto building of branches for an Amplify app. EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean"` - // Environment variables map for an Amplify App. + // Automatically disconnects a branch in the Amplify Console when you delete + // a branch from your Git repository. + EnableBranchAutoDeletion *bool `locationName:"enableBranchAutoDeletion" type:"boolean"` + + // The environment variables map for an Amplify app. EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` - // AWS IAM service role for an Amplify App + // The AWS Identity and Access Management (IAM) service role for an Amplify + // app. IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"` - // Name for the Amplify App + // The name for the Amplify app. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // OAuth token for 3rd party source control system for an Amplify App, used - // to create webhook and read-only deploy key. OAuth token is not stored. - OauthToken *string `locationName:"oauthToken" type:"string"` + // The OAuth token for a third-party source control system for an Amplify app. + // The OAuth token is used to create a webhook and a read-only deploy key. The + // OAuth token is not stored. + OauthToken *string `locationName:"oauthToken" type:"string" sensitive:"true"` - // Platform / framework for an Amplify App + // The platform or framework for an Amplify app. Platform *string `locationName:"platform" type:"string" enum:"Platform"` - // Repository for an Amplify App + // The repository for an Amplify app. Repository *string `locationName:"repository" type:"string"` - // Tag for an Amplify App + // The tag for an Amplify app. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -4350,6 +4336,12 @@ func (s *CreateAppInput) SetEnableBranchAutoBuild(v bool) *CreateAppInput { return s } +// SetEnableBranchAutoDeletion sets the EnableBranchAutoDeletion field's value. +func (s *CreateAppInput) SetEnableBranchAutoDeletion(v bool) *CreateAppInput { + s.EnableBranchAutoDeletion = &v + return s +} + // SetEnvironmentVariables sets the EnvironmentVariables field's value. func (s *CreateAppInput) SetEnvironmentVariables(v map[string]*string) *CreateAppInput { s.EnvironmentVariables = v @@ -4395,8 +4387,8 @@ func (s *CreateAppInput) SetTags(v map[string]*string) *CreateAppInput { type CreateAppOutput struct { _ struct{} `type:"structure"` - // Amplify App represents different branches of a repository for building, deploying, - // and hosting. + // Represents the different branches of a repository for building, deploying, + // and hosting an Amplify app. // // App is a required field App *App `locationName:"app" type:"structure" required:"true"` @@ -4418,24 +4410,24 @@ func (s *CreateAppOutput) SetApp(v *App) *CreateAppOutput { return s } -// Request structure for a backend environment create request. +// The request structure for the backend environment create request. type CreateBackendEnvironmentInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of deployment artifacts. + // The name of deployment artifacts. DeploymentArtifacts *string `locationName:"deploymentArtifacts" min:"1" type:"string"` - // Name for the backend environment. + // The name for the backend environment. // // EnvironmentName is a required field EnvironmentName *string `locationName:"environmentName" min:"1" type:"string" required:"true"` - // CloudFormation stack name of backend environment. + // The AWS CloudFormation stack name of a backend environment. StackName *string `locationName:"stackName" min:"1" type:"string"` } @@ -4501,11 +4493,11 @@ func (s *CreateBackendEnvironmentInput) SetStackName(v string) *CreateBackendEnv return s } -// Result structure for create backend environment. +// The result structure for the create backend environment request. type CreateBackendEnvironmentOutput struct { _ struct{} `type:"structure"` - // Backend environment structure for an amplify App. + // Describes the backend environment for an Amplify app. // // BackendEnvironment is a required field BackendEnvironment *BackendEnvironment `locationName:"backendEnvironment" type:"structure" required:"true"` @@ -4527,63 +4519,70 @@ func (s *CreateBackendEnvironmentOutput) SetBackendEnvironment(v *BackendEnviron return s } -// Request structure for a branch create request. +// The request structure for the create branch request. type CreateBranchInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // ARN for a Backend Environment, part of an Amplify App. + // The Amazon Resource Name (ARN) for a backend environment that is part of + // an Amplify app. BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"` - // Basic Authorization credentials for the branch. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The basic authorization credentials for the branch. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // Name for the branch. + // The name for the branch. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // BuildSpec for the branch. + // The build specification (build spec) for the branch. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Description for the branch. + // The description for the branch. Description *string `locationName:"description" type:"string"` - // Display name for a branch, will use as the default domain prefix. + // The display name for a branch. This is used as the default domain prefix. DisplayName *string `locationName:"displayName" type:"string"` // Enables auto building for the branch. EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"` - // Enables Basic Auth for the branch. + // Enables basic authorization for the branch. EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` // Enables notifications for the branch. EnableNotification *bool `locationName:"enableNotification" type:"boolean"` - // Enables Pull Request Preview for this branch. + // Performance mode optimizes for faster hosting performance by keeping content + // cached at the edge for a longer interval. Enabling performance mode will + // mean that hosting configuration or code changes can take up to 10 minutes + // to roll out. + EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"` + + // Enables pull request preview for this branch. EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"` - // Environment Variables for the branch. + // The environment variables for the branch. EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` - // Framework for the branch. + // The framework for the branch. Framework *string `locationName:"framework" type:"string"` - // The Amplify Environment name for the pull request. + // The Amplify environment name for the pull request. PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` - // Stage for the branch. + // Describes the current stage for the branch. Stage *string `locationName:"stage" type:"string" enum:"Stage"` - // Tag for the branch. + // The tag for the branch. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` - // The content TTL for the website in seconds. + // The content Time To Live (TTL) for the website in seconds. Ttl *string `locationName:"ttl" type:"string"` } @@ -4688,6 +4687,12 @@ func (s *CreateBranchInput) SetEnableNotification(v bool) *CreateBranchInput { return s } +// SetEnablePerformanceMode sets the EnablePerformanceMode field's value. +func (s *CreateBranchInput) SetEnablePerformanceMode(v bool) *CreateBranchInput { + s.EnablePerformanceMode = &v + return s +} + // SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. func (s *CreateBranchInput) SetEnablePullRequestPreview(v bool) *CreateBranchInput { s.EnablePullRequestPreview = &v @@ -4730,11 +4735,12 @@ func (s *CreateBranchInput) SetTtl(v string) *CreateBranchInput { return s } -// Result structure for create branch request. +// The result structure for create branch request. type CreateBranchOutput struct { _ struct{} `type:"structure"` - // Branch structure for an Amplify App. + // Describes the branch for an Amplify app, which maps to a third-party repository + // branch. // // Branch is a required field Branch *Branch `locationName:"branch" type:"structure" required:"true"` @@ -4756,24 +4762,24 @@ func (s *CreateBranchOutput) SetBranch(v *Branch) *CreateBranchOutput { return s } -// Request structure for create a new deployment. +// The request structure for the create a new deployment request. type CreateDeploymentInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch, for the Job. + // The name for the branch, for the job. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Optional file map that contains file name as the key and file content md5 - // hash as the value. If this argument is provided, the service will generate - // different upload url per file. Otherwise, the service will only generate - // a single upload url for the zipped files. + // An optional file map that contains the file name as the key and the file + // content md5 hash as the value. If this argument is provided, the service + // will generate a unique upload URL per file. Otherwise, the service will only + // generate a single upload URL for the zipped files. FileMap map[string]*string `locationName:"fileMap" type:"map"` } @@ -4827,20 +4833,21 @@ func (s *CreateDeploymentInput) SetFileMap(v map[string]*string) *CreateDeployme return s } -// Result structure for create a new deployment. +// The result structure for the create a new deployment request. type CreateDeploymentOutput struct { _ struct{} `type:"structure"` - // When the fileMap argument is provided in the request, the fileUploadUrls - // will contain a map of file names to upload url. + // When the fileMap argument is provided in the request, fileUploadUrls will + // contain a map of file names to upload URLs. // // FileUploadUrls is a required field FileUploadUrls map[string]*string `locationName:"fileUploadUrls" type:"map" required:"true"` - // The jobId for this deployment, will supply to start deployment api. + // The job ID for this deployment. will supply to start deployment api. JobId *string `locationName:"jobId" type:"string"` - // When the fileMap argument is NOT provided. This zipUploadUrl will be returned. + // When the fileMap argument is not provided in the request, this zipUploadUrl + // is returned. // // ZipUploadUrl is a required field ZipUploadUrl *string `locationName:"zipUploadUrl" type:"string" required:"true"` @@ -4874,24 +4881,31 @@ func (s *CreateDeploymentOutput) SetZipUploadUrl(v string) *CreateDeploymentOutp return s } -// Request structure for create Domain Association request. +// The request structure for the create domain association request. type CreateDomainAssociationInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Domain name for the Domain Association. + // Sets the branch patterns for automatic subdomain creation. + AutoSubDomainCreationPatterns []*string `locationName:"autoSubDomainCreationPatterns" type:"list"` + + // The required AWS Identity and Access Management (IAM) service role for the + // Amazon Resource Name (ARN) for automatically creating subdomains. + AutoSubDomainIAMRole *string `locationName:"autoSubDomainIAMRole" type:"string"` + + // The domain name for the domain association. // // DomainName is a required field DomainName *string `locationName:"domainName" type:"string" required:"true"` - // Enables automated creation of Subdomains for branches. (Currently not supported) + // Enables the automated creation of subdomains for branches. EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean"` - // Setting structure for the Subdomain. + // The setting for the subdomain. // // SubDomainSettings is a required field SubDomainSettings []*SubDomainSetting `locationName:"subDomainSettings" type:"list" required:"true"` @@ -4945,6 +4959,18 @@ func (s *CreateDomainAssociationInput) SetAppId(v string) *CreateDomainAssociati return s } +// SetAutoSubDomainCreationPatterns sets the AutoSubDomainCreationPatterns field's value. +func (s *CreateDomainAssociationInput) SetAutoSubDomainCreationPatterns(v []*string) *CreateDomainAssociationInput { + s.AutoSubDomainCreationPatterns = v + return s +} + +// SetAutoSubDomainIAMRole sets the AutoSubDomainIAMRole field's value. +func (s *CreateDomainAssociationInput) SetAutoSubDomainIAMRole(v string) *CreateDomainAssociationInput { + s.AutoSubDomainIAMRole = &v + return s +} + // SetDomainName sets the DomainName field's value. func (s *CreateDomainAssociationInput) SetDomainName(v string) *CreateDomainAssociationInput { s.DomainName = &v @@ -4963,11 +4989,12 @@ func (s *CreateDomainAssociationInput) SetSubDomainSettings(v []*SubDomainSettin return s } -// Result structure for the create Domain Association request. +// The result structure for the create domain association request. type CreateDomainAssociationOutput struct { _ struct{} `type:"structure"` - // Domain Association structure. + // Describes the structure of a domain association, which associates a custom + // domain with an Amplify app. // // DomainAssociation is a required field DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` @@ -4989,21 +5016,21 @@ func (s *CreateDomainAssociationOutput) SetDomainAssociation(v *DomainAssociatio return s } -// Request structure for create webhook request. +// The request structure for the create webhook request. type CreateWebhookInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for a branch, part of an Amplify App. + // The name for a branch that is part of an Amplify app. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // Description for a webhook. + // The description for a webhook. Description *string `locationName:"description" type:"string"` } @@ -5057,11 +5084,11 @@ func (s *CreateWebhookInput) SetDescription(v string) *CreateWebhookInput { return s } -// Result structure for the create webhook request. +// The result structure for the create webhook request. type CreateWebhookOutput struct { _ struct{} `type:"structure"` - // Webhook structure. + // Describes a webhook that connects repository events to an Amplify app. // // Webhook is a required field Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` @@ -5083,11 +5110,11 @@ func (s *CreateWebhookOutput) SetWebhook(v *Webhook) *CreateWebhookOutput { return s } -// Custom rewrite / redirect rule. +// Describes a custom rewrite or redirect rule. type CustomRule struct { _ struct{} `type:"structure"` - // The condition for a URL rewrite or redirect rule, e.g. country code. + // The condition for a URL rewrite or redirect rule, such as a country code. Condition *string `locationName:"condition" min:"1" type:"string"` // The source pattern for a URL rewrite or redirect rule. @@ -5096,6 +5123,27 @@ type CustomRule struct { Source *string `locationName:"source" min:"1" type:"string" required:"true"` // The status code for a URL rewrite or redirect rule. + // + // 200 + // + // Represents a 200 rewrite rule. + // + // 301 + // + // Represents a 301 (moved pemanently) redirect rule. This and all future requests + // should be directed to the target URL. + // + // 302 + // + // Represents a 302 temporary redirect rule. + // + // 404 + // + // Represents a 404 redirect rule. + // + // 404-200 + // + // Represents a 404 rewrite rule. Status *string `locationName:"status" min:"3" type:"string"` // The target pattern for a URL rewrite or redirect rule. @@ -5166,11 +5214,11 @@ func (s *CustomRule) SetTarget(v string) *CustomRule { return s } -// Request structure for an Amplify App delete request. +// Describes the request structure for the delete app request. type DeleteAppInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` @@ -5208,12 +5256,12 @@ func (s *DeleteAppInput) SetAppId(v string) *DeleteAppInput { return s } -// Result structure for an Amplify App delete request. +// The result structure for the delete app request. type DeleteAppOutput struct { _ struct{} `type:"structure"` - // Amplify App represents different branches of a repository for building, deploying, - // and hosting. + // Represents the different branches of a repository for building, deploying, + // and hosting an Amplify app. // // App is a required field App *App `locationName:"app" type:"structure" required:"true"` @@ -5235,16 +5283,16 @@ func (s *DeleteAppOutput) SetApp(v *App) *DeleteAppOutput { return s } -// Request structure for delete backend environment request. +// The request structure for the delete backend environment request. type DeleteBackendEnvironmentInput struct { _ struct{} `type:"structure"` - // Unique Id of an Amplify App. + // The unique ID of an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of a backend environment of an Amplify App. + // The name of a backend environment of an Amplify app. // // EnvironmentName is a required field EnvironmentName *string `location:"uri" locationName:"environmentName" min:"1" type:"string" required:"true"` @@ -5294,11 +5342,11 @@ func (s *DeleteBackendEnvironmentInput) SetEnvironmentName(v string) *DeleteBack return s } -// Result structure of a delete backend environment result. +// The result structure of the delete backend environment result. type DeleteBackendEnvironmentOutput struct { _ struct{} `type:"structure"` - // Backend environment structure for an Amplify App. + // Describes the backend environment for an Amplify app. // // BackendEnvironment is a required field BackendEnvironment *BackendEnvironment `locationName:"backendEnvironment" type:"structure" required:"true"` @@ -5320,16 +5368,16 @@ func (s *DeleteBackendEnvironmentOutput) SetBackendEnvironment(v *BackendEnviron return s } -// Request structure for delete branch request. +// The request structure for the delete branch request. type DeleteBranchInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch. + // The name for the branch. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` @@ -5379,11 +5427,11 @@ func (s *DeleteBranchInput) SetBranchName(v string) *DeleteBranchInput { return s } -// Result structure for delete branch request. +// The result structure for the delete branch request. type DeleteBranchOutput struct { _ struct{} `type:"structure"` - // Branch structure for an Amplify App. + // The branch for an Amplify app, which maps to a third-party repository branch. // // Branch is a required field Branch *Branch `locationName:"branch" type:"structure" required:"true"` @@ -5405,16 +5453,16 @@ func (s *DeleteBranchOutput) SetBranch(v *Branch) *DeleteBranchOutput { return s } -// Request structure for the delete Domain Association request. +// The request structure for the delete domain association request. type DeleteDomainAssociationInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique id for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of the domain. + // The name of the domain. // // DomainName is a required field DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` @@ -5467,8 +5515,8 @@ func (s *DeleteDomainAssociationInput) SetDomainName(v string) *DeleteDomainAsso type DeleteDomainAssociationOutput struct { _ struct{} `type:"structure"` - // Structure for Domain Association, which associates a custom domain with an - // Amplify App. + // Describes a domain association that associates a custom domain with an Amplify + // app. // // DomainAssociation is a required field DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` @@ -5490,21 +5538,21 @@ func (s *DeleteDomainAssociationOutput) SetDomainAssociation(v *DomainAssociatio return s } -// Request structure for delete job request. +// The request structure for the delete job request. type DeleteJobInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch, for the Job. + // The name for the branch, for the job. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Unique Id for the Job. + // The unique ID for the job. // // JobId is a required field JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` @@ -5566,11 +5614,11 @@ func (s *DeleteJobInput) SetJobId(v string) *DeleteJobInput { return s } -// Result structure for the delete job request. +// The result structure for the delete job request. type DeleteJobOutput struct { _ struct{} `type:"structure"` - // Structure for the summary of a Job. + // Describes the summary for an execution job for an Amplify app. // // JobSummary is a required field JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` @@ -5592,11 +5640,11 @@ func (s *DeleteJobOutput) SetJobSummary(v *JobSummary) *DeleteJobOutput { return s } -// Request structure for the delete webhook request. +// The request structure for the delete webhook request. type DeleteWebhookInput struct { _ struct{} `type:"structure"` - // Unique Id for a webhook. + // The unique ID for a webhook. // // WebhookId is a required field WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"` @@ -5634,11 +5682,11 @@ func (s *DeleteWebhookInput) SetWebhookId(v string) *DeleteWebhookInput { return s } -// Result structure for the delete webhook request. +// The result structure for the delete webhook request. type DeleteWebhookOutput struct { _ struct{} `type:"structure"` - // Webhook structure. + // Describes a webhook that connects repository events to an Amplify app. // // Webhook is a required field Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` @@ -5660,11 +5708,10 @@ func (s *DeleteWebhookOutput) SetWebhook(v *Webhook) *DeleteWebhookOutput { return s } -// Exception thrown when an operation fails due to a dependent service throwing -// an exception. +// An operation failed because a dependent service threw an exception. type DependentServiceFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5681,17 +5728,17 @@ func (s DependentServiceFailureException) GoString() string { func newErrorDependentServiceFailureException(v protocol.ResponseMetadata) error { return &DependentServiceFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DependentServiceFailureException) Code() string { +func (s *DependentServiceFailureException) Code() string { return "DependentServiceFailureException" } // Message returns the exception's message. -func (s DependentServiceFailureException) Message() string { +func (s *DependentServiceFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5699,58 +5746,65 @@ func (s DependentServiceFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DependentServiceFailureException) OrigErr() error { +func (s *DependentServiceFailureException) OrigErr() error { return nil } -func (s DependentServiceFailureException) Error() string { +func (s *DependentServiceFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DependentServiceFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DependentServiceFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DependentServiceFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *DependentServiceFailureException) RequestID() string { + return s.RespMetadata.RequestID } -// Structure for Domain Association, which associates a custom domain with an -// Amplify App. +// Describes a domain association that associates a custom domain with an Amplify +// app. type DomainAssociation struct { _ struct{} `type:"structure"` - // DNS Record for certificate verification. + // Sets branch patterns for automatic subdomain creation. + AutoSubDomainCreationPatterns []*string `locationName:"autoSubDomainCreationPatterns" type:"list"` + + // The required AWS Identity and Access Management (IAM) service role for the + // Amazon Resource Name (ARN) for automatically creating subdomains. + AutoSubDomainIAMRole *string `locationName:"autoSubDomainIAMRole" type:"string"` + + // The DNS record for certificate verification. CertificateVerificationDNSRecord *string `locationName:"certificateVerificationDNSRecord" type:"string"` - // ARN for the Domain Association. + // The Amazon Resource Name (ARN) for the domain association. // // DomainAssociationArn is a required field DomainAssociationArn *string `locationName:"domainAssociationArn" type:"string" required:"true"` - // Name of the domain. + // The name of the domain. // // DomainName is a required field DomainName *string `locationName:"domainName" type:"string" required:"true"` - // Status fo the Domain Association. + // The current status of the domain association. // // DomainStatus is a required field DomainStatus *string `locationName:"domainStatus" type:"string" required:"true" enum:"DomainStatus"` - // Enables automated creation of Subdomains for branches. (Currently not supported) + // Enables the automated creation of subdomains for branches. // // EnableAutoSubDomain is a required field EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean" required:"true"` - // Reason for the current status of the Domain Association. + // The reason for the current status of the domain association. // // StatusReason is a required field StatusReason *string `locationName:"statusReason" type:"string" required:"true"` - // Subdomains for the Domain Association. + // The subdomains for the domain association. // // SubDomains is a required field SubDomains []*SubDomain `locationName:"subDomains" type:"list" required:"true"` @@ -5766,6 +5820,18 @@ func (s DomainAssociation) GoString() string { return s.String() } +// SetAutoSubDomainCreationPatterns sets the AutoSubDomainCreationPatterns field's value. +func (s *DomainAssociation) SetAutoSubDomainCreationPatterns(v []*string) *DomainAssociation { + s.AutoSubDomainCreationPatterns = v + return s +} + +// SetAutoSubDomainIAMRole sets the AutoSubDomainIAMRole field's value. +func (s *DomainAssociation) SetAutoSubDomainIAMRole(v string) *DomainAssociation { + s.AutoSubDomainIAMRole = &v + return s +} + // SetCertificateVerificationDNSRecord sets the CertificateVerificationDNSRecord field's value. func (s *DomainAssociation) SetCertificateVerificationDNSRecord(v string) *DomainAssociation { s.CertificateVerificationDNSRecord = &v @@ -5808,24 +5874,26 @@ func (s *DomainAssociation) SetSubDomains(v []*SubDomain) *DomainAssociation { return s } -// Request structure for the generate access logs request. +// The request structure for the generate access logs request. type GenerateAccessLogsInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of the domain. + // The name of the domain. // // DomainName is a required field DomainName *string `locationName:"domainName" type:"string" required:"true"` - // The time at which the logs should end, inclusive. + // The time at which the logs should end. The time range specified is inclusive + // of the end time. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // The time at which the logs should start, inclusive. + // The time at which the logs should start. The time range specified is inclusive + // of the start time. StartTime *time.Time `locationName:"startTime" type:"timestamp"` } @@ -5882,11 +5950,11 @@ func (s *GenerateAccessLogsInput) SetStartTime(v time.Time) *GenerateAccessLogsI return s } -// Result structure for the generate access logs request. +// The result structure for the generate access logs request. type GenerateAccessLogsOutput struct { _ struct{} `type:"structure"` - // Pre-signed URL for the requested access logs. + // The pre-signed URL for the requested access logs. LogUrl *string `locationName:"logUrl" type:"string"` } @@ -5906,11 +5974,11 @@ func (s *GenerateAccessLogsOutput) SetLogUrl(v string) *GenerateAccessLogsOutput return s } -// Request structure for get App request. +// The request structure for the get app request. type GetAppInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` @@ -5951,8 +6019,8 @@ func (s *GetAppInput) SetAppId(v string) *GetAppInput { type GetAppOutput struct { _ struct{} `type:"structure"` - // Amplify App represents different branches of a repository for building, deploying, - // and hosting. + // Represents the different branches of a repository for building, deploying, + // and hosting an Amplify app. // // App is a required field App *App `locationName:"app" type:"structure" required:"true"` @@ -5974,11 +6042,11 @@ func (s *GetAppOutput) SetApp(v *App) *GetAppOutput { return s } -// Request structure for the get artifact request. +// Returns the request structure for the get artifact request. type GetArtifactUrlInput struct { _ struct{} `type:"structure"` - // Unique Id for a artifact. + // The unique ID for an artifact. // // ArtifactId is a required field ArtifactId *string `location:"uri" locationName:"artifactId" type:"string" required:"true"` @@ -6016,16 +6084,16 @@ func (s *GetArtifactUrlInput) SetArtifactId(v string) *GetArtifactUrlInput { return s } -// Result structure for the get artifact request. +// Returns the result structure for the get artifact request. type GetArtifactUrlOutput struct { _ struct{} `type:"structure"` - // Unique Id for a artifact. + // The unique ID for an artifact. // // ArtifactId is a required field ArtifactId *string `locationName:"artifactId" type:"string" required:"true"` - // Presigned url for the artifact. + // The presigned URL for the artifact. // // ArtifactUrl is a required field ArtifactUrl *string `locationName:"artifactUrl" type:"string" required:"true"` @@ -6053,16 +6121,16 @@ func (s *GetArtifactUrlOutput) SetArtifactUrl(v string) *GetArtifactUrlOutput { return s } -// Request structure for get backend environment request. +// The request structure for the get backend environment request. type GetBackendEnvironmentInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique id for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the backend environment. + // The name for the backend environment. // // EnvironmentName is a required field EnvironmentName *string `location:"uri" locationName:"environmentName" min:"1" type:"string" required:"true"` @@ -6112,11 +6180,11 @@ func (s *GetBackendEnvironmentInput) SetEnvironmentName(v string) *GetBackendEnv return s } -// Result structure for get backend environment result. +// The result structure for the get backend environment result. type GetBackendEnvironmentOutput struct { _ struct{} `type:"structure"` - // Backend environment structure for an an Amplify App. + // Describes the backend environment for an Amplify app. // // BackendEnvironment is a required field BackendEnvironment *BackendEnvironment `locationName:"backendEnvironment" type:"structure" required:"true"` @@ -6138,16 +6206,16 @@ func (s *GetBackendEnvironmentOutput) SetBackendEnvironment(v *BackendEnvironmen return s } -// Request structure for get branch request. +// The request structure for the get branch request. type GetBranchInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch. + // The name for the branch. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` @@ -6200,7 +6268,7 @@ func (s *GetBranchInput) SetBranchName(v string) *GetBranchInput { type GetBranchOutput struct { _ struct{} `type:"structure"` - // Branch for an Amplify App, which maps to a 3rd party repository branch. + // The branch for an Amplify app, which maps to a third-party repository branch. // // Branch is a required field Branch *Branch `locationName:"branch" type:"structure" required:"true"` @@ -6222,16 +6290,16 @@ func (s *GetBranchOutput) SetBranch(v *Branch) *GetBranchOutput { return s } -// Request structure for the get Domain Association request. +// The request structure for the get domain association request. type GetDomainAssociationInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique id for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of the domain. + // The name of the domain. // // DomainName is a required field DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` @@ -6281,11 +6349,12 @@ func (s *GetDomainAssociationInput) SetDomainName(v string) *GetDomainAssociatio return s } -// Result structure for the get Domain Association request. +// The result structure for the get domain association request. type GetDomainAssociationOutput struct { _ struct{} `type:"structure"` - // Domain Association structure. + // Describes the structure of a domain association, which associates a custom + // domain with an Amplify app. // // DomainAssociation is a required field DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` @@ -6307,21 +6376,21 @@ func (s *GetDomainAssociationOutput) SetDomainAssociation(v *DomainAssociation) return s } -// Request structure for get job request. +// The request structure for the get job request. type GetJobInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch, for the Job. + // The branch name for the job. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Unique Id for the Job. + // The unique ID for the job. // // JobId is a required field JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` @@ -6386,7 +6455,7 @@ func (s *GetJobInput) SetJobId(v string) *GetJobInput { type GetJobOutput struct { _ struct{} `type:"structure"` - // Structure for an execution job for an Amplify App. + // Describes an execution job for an Amplify app. // // Job is a required field Job *Job `locationName:"job" type:"structure" required:"true"` @@ -6408,11 +6477,11 @@ func (s *GetJobOutput) SetJob(v *Job) *GetJobOutput { return s } -// Request structure for the get webhook request. +// The request structure for the get webhook request. type GetWebhookInput struct { _ struct{} `type:"structure"` - // Unique Id for a webhook. + // The unique ID for a webhook. // // WebhookId is a required field WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"` @@ -6450,11 +6519,11 @@ func (s *GetWebhookInput) SetWebhookId(v string) *GetWebhookInput { return s } -// Result structure for the get webhook request. +// The result structure for the get webhook request. type GetWebhookOutput struct { _ struct{} `type:"structure"` - // Webhook structure. + // Describes the structure of a webhook. // // Webhook is a required field Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` @@ -6476,11 +6545,10 @@ func (s *GetWebhookOutput) SetWebhook(v *Webhook) *GetWebhookOutput { return s } -// Exception thrown when the service fails to perform an operation due to an -// internal issue. +// The service failed to perform an operation due to an internal issue. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6497,17 +6565,17 @@ func (s InternalFailureException) GoString() string { func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6515,34 +6583,34 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } -// Structure for an execution job for an Amplify App. +// Describes an execution job for an Amplify app. type Job struct { _ struct{} `type:"structure"` - // Execution steps for an execution job, for an Amplify App. + // The execution steps for an execution job, for an Amplify app. // // Steps is a required field Steps []*Step `locationName:"steps" type:"list" required:"true"` - // Summary for an execution job for an Amplify App. + // Describes the summary for an execution job for an Amplify app. // // Summary is a required field Summary *JobSummary `locationName:"summary" type:"structure" required:"true"` @@ -6570,51 +6638,52 @@ func (s *Job) SetSummary(v *JobSummary) *Job { return s } -// Structure for the summary of a Job. +// Describes the summary for an execution job for an Amplify app. type JobSummary struct { _ struct{} `type:"structure"` - // Commit Id from 3rd party repository provider for the Job. + // The commit ID from a third-party repository provider for the job. // // CommitId is a required field CommitId *string `locationName:"commitId" type:"string" required:"true"` - // Commit message from 3rd party repository provider for the Job. + // The commit message from a third-party repository provider for the job. // // CommitMessage is a required field CommitMessage *string `locationName:"commitMessage" type:"string" required:"true"` - // Commit date / time for the Job. + // The commit date and time for the job. // // CommitTime is a required field CommitTime *time.Time `locationName:"commitTime" type:"timestamp" required:"true"` - // End date / time for the Job. + // The end date and time for the job. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // Arn for the Job. + // The Amazon Resource Name (ARN) for the job. // // JobArn is a required field JobArn *string `locationName:"jobArn" type:"string" required:"true"` - // Unique Id for the Job. + // The unique ID for the job. // // JobId is a required field JobId *string `locationName:"jobId" type:"string" required:"true"` - // Type for the Job. \n "RELEASE": Manually released from source by using StartJob - // API. "RETRY": Manually retried by using StartJob API. "WEB_HOOK": Automatically - // triggered by WebHooks. + // The type for the job. If the value is RELEASE, the job was manually released + // from its source by using the StartJob API. If the value is RETRY, the job + // was manually retried using the StartJob API. If the value is WEB_HOOK, the + // job was automatically triggered by webhooks. // // JobType is a required field JobType *string `locationName:"jobType" type:"string" required:"true" enum:"JobType"` - // Start date / time for the Job. + // The start date and time for the job. // // StartTime is a required field StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` - // Status for the Job. + // The current status for the job. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` @@ -6684,11 +6753,10 @@ func (s *JobSummary) SetStatus(v string) *JobSummary { return s } -// Exception thrown when a resource could not be created because of service -// limits. +// A resource could not be created because service quotas were exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6705,17 +6773,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6723,33 +6791,33 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } -// Request structure for an Amplify App list request. +// The request structure for the list apps request. type ListAppsInput struct { _ struct{} `type:"structure"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If non-null, the pagination token is returned in a result. + // Pass its value in another request to retrieve more entries. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -6788,18 +6856,18 @@ func (s *ListAppsInput) SetNextToken(v string) *ListAppsInput { return s } -// Result structure for an Amplify App list request. +// The result structure for an Amplify app list request. type ListAppsOutput struct { _ struct{} `type:"structure"` - // List of Amplify Apps. + // A list of Amplify apps. // // Apps is a required field Apps []*App `locationName:"apps" type:"list" required:"true"` - // Pagination token. Set to null to start listing Apps from start. If non-null - // pagination token is returned in a result, then pass its value in here to - // list more projects. + // A pagination token. Set to null to start listing apps from start. If non-null, + // the pagination token is returned in a result. Pass its value in here to list + // more projects. NextToken *string `locationName:"nextToken" type:"string"` } @@ -6825,31 +6893,31 @@ func (s *ListAppsOutput) SetNextToken(v string) *ListAppsOutput { return s } -// Request structure for the list artifacts request. +// Describes the request structure for the list artifacts request. type ListArtifactsInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for a branch, part of an Amplify App. + // The name of a branch that is part of an Amplify app. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Unique Id for an Job. + // The unique ID for a job. // // JobId is a required field JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. Set to null to start listing artifacts from start. If non-null - // pagination token is returned in a result, then pass its value in here to - // list more artifacts. + // A pagination token. Set to null to start listing artifacts from start. If + // a non-null pagination token is returned in a result, pass its value in here + // to list more artifacts. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -6924,17 +6992,17 @@ func (s *ListArtifactsInput) SetNextToken(v string) *ListArtifactsInput { return s } -// Result structure for the list artifacts request. +// The result structure for the list artifacts request. type ListArtifactsOutput struct { _ struct{} `type:"structure"` - // List of artifacts. + // A list of artifacts. // // Artifacts is a required field Artifacts []*Artifact `locationName:"artifacts" type:"list" required:"true"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If a non-null pagination token is returned in a result, + // pass its value in another request to retrieve more entries. NextToken *string `locationName:"nextToken" type:"string"` } @@ -6960,24 +7028,24 @@ func (s *ListArtifactsOutput) SetNextToken(v string) *ListArtifactsOutput { return s } -// Request structure for list backend environments request. +// The request structure for the list backend environments request. type ListBackendEnvironmentsInput struct { _ struct{} `type:"structure"` - // Unique Id for an amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of the backend environment - EnvironmentName *string `locationName:"environmentName" min:"1" type:"string"` + // The name of the backend environment + EnvironmentName *string `location:"querystring" locationName:"environmentName" min:"1" type:"string"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. Set to null to start listing backen environments from start. - // If a non-null pagination token is returned in a result, then pass its value - // in here to list more backend environments. + // A pagination token. Set to null to start listing backend environments from + // the start. If a non-null pagination token is returned in a result, pass its + // value in here to list more backend environments. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7037,17 +7105,17 @@ func (s *ListBackendEnvironmentsInput) SetNextToken(v string) *ListBackendEnviro return s } -// Result structure for list backend environments result. +// The result structure for the list backend environments result. type ListBackendEnvironmentsOutput struct { _ struct{} `type:"structure"` - // List of backend environments for an Amplify App. + // The list of backend environments for an Amplify app. // // BackendEnvironments is a required field BackendEnvironments []*BackendEnvironment `locationName:"backendEnvironments" type:"list" required:"true"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If a non-null pagination token is returned in a result, + // pass its value in another request to retrieve more entries. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7073,20 +7141,20 @@ func (s *ListBackendEnvironmentsOutput) SetNextToken(v string) *ListBackendEnvir return s } -// Request structure for list branches request. +// The request structure for the list branches request. type ListBranchesInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. Set to null to start listing branches from start. If a - // non-null pagination token is returned in a result, then pass its value in + // A pagination token. Set to null to start listing branches from the start. + // If a non-null pagination token is returned in a result, pass its value in // here to list more branches. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7138,17 +7206,17 @@ func (s *ListBranchesInput) SetNextToken(v string) *ListBranchesInput { return s } -// Result structure for list branches request. +// The result structure for the list branches request. type ListBranchesOutput struct { _ struct{} `type:"structure"` - // List of branches for an Amplify App. + // A list of branches for an Amplify app. // // Branches is a required field Branches []*Branch `locationName:"branches" type:"list" required:"true"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If a non-null pagination token is returned in a result, + // pass its value in another request to retrieve more entries. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7174,21 +7242,21 @@ func (s *ListBranchesOutput) SetNextToken(v string) *ListBranchesOutput { return s } -// Request structure for the list Domain Associations request. +// The request structure for the list domain associations request. type ListDomainAssociationsInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. Set to null to start listing Apps from start. If non-null - // pagination token is returned in a result, then pass its value in here to - // list more projects. + // A pagination token. Set to null to start listing apps from the start. If + // non-null, a pagination token is returned in a result. Pass its value in here + // to list more projects. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7239,17 +7307,17 @@ func (s *ListDomainAssociationsInput) SetNextToken(v string) *ListDomainAssociat return s } -// Result structure for the list Domain Association request. +// The result structure for the list domain association request. type ListDomainAssociationsOutput struct { _ struct{} `type:"structure"` - // List of Domain Associations. + // A list of domain associations. // // DomainAssociations is a required field DomainAssociations []*DomainAssociation `locationName:"domainAssociations" type:"list" required:"true"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If non-null, a pagination token is returned in a result. + // Pass its value in another request to retrieve more entries. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7275,26 +7343,26 @@ func (s *ListDomainAssociationsOutput) SetNextToken(v string) *ListDomainAssocia return s } -// Request structure for list job request. +// The request structure for the list jobs request. type ListJobsInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for a branch. + // The name for a branch. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. Set to null to start listing steps from start. If a non-null - // pagination token is returned in a result, then pass its value in here to - // list more steps. + // A pagination token. Set to null to start listing steps from the start. If + // a non-null pagination token is returned in a result, pass its value in here + // to list more steps. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7357,17 +7425,17 @@ func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { return s } -// Maximum number of records to list in a single response. +// The maximum number of records to list in a single response. type ListJobsOutput struct { _ struct{} `type:"structure"` - // Result structure for list job result request. + // The result structure for the list job result request. // // JobSummaries is a required field JobSummaries []*JobSummary `locationName:"jobSummaries" type:"list" required:"true"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If non-null the pagination token is returned in a result. + // Pass its value in another request to retrieve more entries. NextToken *string `locationName:"nextToken" type:"string"` } @@ -7393,11 +7461,11 @@ func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { return s } -// Request structure used to list tags for resource. +// The request structure to use to list tags for a resource. type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // Resource arn used to list tags. + // The Amazon Resource Name (ARN) to use to list tags. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` @@ -7435,11 +7503,11 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource return s } -// Response for list tags. +// The response for the list tags for resource request. type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // Tags result for response. + // A list of tags for the specified The Amazon Resource Name (ARN). Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -7459,21 +7527,21 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe return s } -// Request structure for the list webhooks request. +// The request structure for the list webhooks request. type ListWebhooksInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Maximum number of records to list in a single response. + // The maximum number of records to list in a single response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Pagination token. Set to null to start listing webhooks from start. If non-null - // pagination token is returned in a result, then pass its value in here to - // list more webhooks. + // A pagination token. Set to null to start listing webhooks from the start. + // If non-null,the pagination token is returned in a result. Pass its value + // in here to list more webhooks. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -7524,15 +7592,15 @@ func (s *ListWebhooksInput) SetNextToken(v string) *ListWebhooksInput { return s } -// Result structure for the list webhooks request. +// The result structure for the list webhooks request. type ListWebhooksOutput struct { _ struct{} `type:"structure"` - // Pagination token. If non-null pagination token is returned in a result, then - // pass its value in another request to fetch more entries. + // A pagination token. If non-null, the pagination token is returned in a result. + // Pass its value in another request to retrieve more entries. NextToken *string `locationName:"nextToken" type:"string"` - // List of webhooks. + // A list of webhooks. // // Webhooks is a required field Webhooks []*Webhook `locationName:"webhooks" type:"list" required:"true"` @@ -7560,10 +7628,10 @@ func (s *ListWebhooksOutput) SetWebhooks(v []*Webhook) *ListWebhooksOutput { return s } -// Exception thrown when an entity has not been found during an operation. +// An entity was not found during an operation. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7580,17 +7648,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7598,38 +7666,38 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// Structure with Production Branch information. +// Describes the information about a production branch for an Amplify app. type ProductionBranch struct { _ struct{} `type:"structure"` - // Branch Name for Production Branch. + // The branch name for the production branch. BranchName *string `locationName:"branchName" min:"1" type:"string"` - // Last Deploy Time of Production Branch. + // The last deploy time of the production branch. LastDeployTime *time.Time `locationName:"lastDeployTime" type:"timestamp"` - // Status of Production Branch. + // The status of the production branch. Status *string `locationName:"status" min:"3" type:"string"` - // Thumbnail URL for Production Branch. + // The thumbnail URL for the production branch. ThumbnailUrl *string `locationName:"thumbnailUrl" min:"1" type:"string"` } @@ -7667,10 +7735,10 @@ func (s *ProductionBranch) SetThumbnailUrl(v string) *ProductionBranch { return s } -// Exception thrown when an operation fails due to non-existent resource. +// An operation failed due to a non-existent resource. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -7689,17 +7757,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7707,44 +7775,44 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// Request structure for start a deployment. +// The request structure for the start a deployment request. type StartDeploymentInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch, for the Job. + // The name for the branch, for the job. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // The job id for this deployment, generated by create deployment request. + // The job ID for this deployment, generated by the create deployment request. JobId *string `locationName:"jobId" type:"string"` - // The sourceUrl for this deployment, used when calling start deployment without - // create deployment. SourceUrl can be any HTTP GET url that is public accessible - // and downloads a single zip. + // The source URL for this deployment, used when calling start deployment without + // create deployment. The source URL can be any HTTP GET URL that is publicly + // accessible and downloads a single .zip file. SourceUrl *string `locationName:"sourceUrl" type:"string"` } @@ -7804,11 +7872,11 @@ func (s *StartDeploymentInput) SetSourceUrl(v string) *StartDeploymentInput { return s } -// Result structure for start a deployment. +// The result structure for the start a deployment request. type StartDeploymentOutput struct { _ struct{} `type:"structure"` - // Summary for the Job. + // The summary for the job. // // JobSummary is a required field JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` @@ -7830,39 +7898,40 @@ func (s *StartDeploymentOutput) SetJobSummary(v *JobSummary) *StartDeploymentOut return s } -// Request structure for Start job request. +// The request structure for the start job request. type StartJobInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch, for the Job. + // The branch name for the job. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Commit Id from 3rd party repository provider for the Job. + // The commit ID from a third-party repository provider for the job. CommitId *string `locationName:"commitId" type:"string"` - // Commit message from 3rd party repository provider for the Job. + // The commit message from a third-party repository provider for the job. CommitMessage *string `locationName:"commitMessage" type:"string"` - // Commit date / time for the Job. + // The commit date and time for the job. CommitTime *time.Time `locationName:"commitTime" type:"timestamp"` - // Unique Id for an existing job. Required for "RETRY" JobType. + // The unique ID for an existing job. This is required if the value of jobType + // is RETRY. JobId *string `locationName:"jobId" type:"string"` - // Descriptive reason for starting this job. + // A descriptive reason for starting this job. JobReason *string `locationName:"jobReason" type:"string"` - // Type for the Job. Available JobTypes are: \n "RELEASE": Start a new job with - // the latest change from the specified branch. Only available for apps that - // have connected to a repository. "RETRY": Retry an existing job. JobId is - // required for this type of job. + // Describes the type for the job. The job type RELEASE starts a new job with + // the latest change from the specified branch. This value is available only + // for apps that are connected to a repository. The job type RETRY retries an + // existing job. If the job type value is RETRY, the jobId is also required. // // JobType is a required field JobType *string `locationName:"jobType" type:"string" required:"true" enum:"JobType"` @@ -7951,11 +8020,11 @@ func (s *StartJobInput) SetJobType(v string) *StartJobInput { return s } -// Result structure for run job request. +// The result structure for the run job request. type StartJobOutput struct { _ struct{} `type:"structure"` - // Summary for the Job. + // The summary for the job. // // JobSummary is a required field JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` @@ -7977,49 +8046,49 @@ func (s *StartJobOutput) SetJobSummary(v *JobSummary) *StartJobOutput { return s } -// Structure for an execution step for an execution job, for an Amplify App. +// Describes an execution step, for an execution job, for an Amplify app. type Step struct { _ struct{} `type:"structure"` - // URL to the artifact for the execution step. + // The URL to the artifact for the execution step. ArtifactsUrl *string `locationName:"artifactsUrl" type:"string"` - // The context for current step, will include build image if step is build. + // The context for the current step. Includes a build image if the step is build. Context *string `locationName:"context" type:"string"` - // End date/ time of the execution step. + // The end date and time of the execution step. // // EndTime is a required field EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"` - // URL to the logs for the execution step. + // The URL to the logs for the execution step. LogUrl *string `locationName:"logUrl" type:"string"` - // List of screenshot URLs for the execution step, if relevant. + // The list of screenshot URLs for the execution step, if relevant. Screenshots map[string]*string `locationName:"screenshots" type:"map"` - // Start date/ time of the execution step. + // The start date and time of the execution step. // // StartTime is a required field StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` - // Status of the execution step. + // The status of the execution step. // // Status is a required field Status *string `locationName:"status" type:"string" required:"true" enum:"JobStatus"` - // The reason for current step status. + // The reason for the current step status. StatusReason *string `locationName:"statusReason" type:"string"` - // Name of the execution step. + // The name of the execution step. // // StepName is a required field StepName *string `locationName:"stepName" type:"string" required:"true"` - // URL to the test artifact for the execution step. + // The URL to the test artifact for the execution step. TestArtifactsUrl *string `locationName:"testArtifactsUrl" type:"string"` - // URL to the test config for the execution step. + // The URL to the test configuration for the execution step. TestConfigUrl *string `locationName:"testConfigUrl" type:"string"` } @@ -8099,21 +8168,21 @@ func (s *Step) SetTestConfigUrl(v string) *Step { return s } -// Request structure for stop job request. +// The request structure for the stop job request. type StopJobInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name for the branch, for the Job. + // The name for the branch, for the job. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // Unique Id for the Job. + // The unique id for the job. // // JobId is a required field JobId *string `location:"uri" locationName:"jobId" type:"string" required:"true"` @@ -8175,11 +8244,11 @@ func (s *StopJobInput) SetJobId(v string) *StopJobInput { return s } -// Result structure for the stop job request. +// The result structure for the stop job request. type StopJobOutput struct { _ struct{} `type:"structure"` - // Summary for the Job. + // The summary for the job. // // JobSummary is a required field JobSummary *JobSummary `locationName:"jobSummary" type:"structure" required:"true"` @@ -8201,21 +8270,21 @@ func (s *StopJobOutput) SetJobSummary(v *JobSummary) *StopJobOutput { return s } -// Subdomain for the Domain Association. +// The subdomain for the domain association. type SubDomain struct { _ struct{} `type:"structure"` - // DNS record for the Subdomain. + // The DNS record for the subdomain. // // DnsRecord is a required field DnsRecord *string `locationName:"dnsRecord" type:"string" required:"true"` - // Setting structure for the Subdomain. + // Describes the settings for the subdomain. // // SubDomainSetting is a required field SubDomainSetting *SubDomainSetting `locationName:"subDomainSetting" type:"structure" required:"true"` - // Verified status of the Subdomain + // The verified status of the subdomain // // Verified is a required field Verified *bool `locationName:"verified" type:"boolean" required:"true"` @@ -8249,16 +8318,16 @@ func (s *SubDomain) SetVerified(v bool) *SubDomain { return s } -// Setting for the Subdomain. +// Describes the settings for the subdomain. type SubDomainSetting struct { _ struct{} `type:"structure"` - // Branch name setting for the Subdomain. + // The branch name setting for the subdomain. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // Prefix setting for the Subdomain. + // The prefix setting for the subdomain. // // Prefix is a required field Prefix *string `locationName:"prefix" type:"string" required:"true"` @@ -8305,16 +8374,16 @@ func (s *SubDomainSetting) SetPrefix(v string) *SubDomainSetting { return s } -// Request structure used to tag resource. +// The request structure to tag a resource with a tag key and value. type TagResourceInput struct { _ struct{} `type:"structure"` - // Resource arn used to tag resource. + // The Amazon Resource Name (ARN) to use to tag a resource. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` - // Tags used to tag resource. + // The tags used to tag the resource. // // Tags is a required field Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` @@ -8364,7 +8433,7 @@ func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { return s } -// Response for tag resource. +// The response for the tag resource request. type TagResourceOutput struct { _ struct{} `type:"structure"` } @@ -8379,10 +8448,10 @@ func (s TagResourceOutput) GoString() string { return s.String() } -// Exception thrown when an operation fails due to a lack of access. +// An operation failed due to a lack of access. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8399,17 +8468,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8417,34 +8486,34 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID } -// Request structure used to untag resource. +// The request structure for the untag resource request. type UntagResourceInput struct { _ struct{} `type:"structure"` - // Resource arn used to untag resource. + // The Amazon Resource Name (ARN) to use to untag a resource. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` - // Tag keys used to untag resource. + // The tag keys to use to untag a resource. // // TagKeys is a required field TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` @@ -8494,7 +8563,7 @@ func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { return s } -// Response for untag resource. +// The response for the untag resource request. type UntagResourceOutput struct { _ struct{} `type:"structure"` } @@ -8509,63 +8578,70 @@ func (s UntagResourceOutput) GoString() string { return s.String() } -// Request structure for update App request. +// The request structure for the update app request. type UpdateAppInput struct { _ struct{} `type:"structure"` - // Personal Access token for 3rd party source control system for an Amplify - // App, used to create webhook and read-only deploy key. Token is not stored. - AccessToken *string `locationName:"accessToken" min:"1" type:"string"` + // The personal access token for a third-party source control system for an + // Amplify app. The token is used to create webhook and a read-only deploy key. + // The token is not stored. + AccessToken *string `locationName:"accessToken" min:"1" type:"string" sensitive:"true"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Automated branch creation branchConfig for the Amplify App. + // The automated branch creation configuration for the Amplify app. AutoBranchCreationConfig *AutoBranchCreationConfig `locationName:"autoBranchCreationConfig" type:"structure"` - // Automated branch creation glob patterns for the Amplify App. + // Describes the automated branch creation glob patterns for the Amplify app. AutoBranchCreationPatterns []*string `locationName:"autoBranchCreationPatterns" type:"list"` - // Basic Authorization credentials for an Amplify App. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The basic authorization credentials for an Amplify app. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // BuildSpec for an Amplify App. + // The build specification (build spec) for an Amplify app. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Custom redirect / rewrite rules for an Amplify App. + // The custom redirect and rewrite rules for an Amplify app. CustomRules []*CustomRule `locationName:"customRules" type:"list"` - // Description for an Amplify App. + // The description for an Amplify app. Description *string `locationName:"description" type:"string"` - // Enables automated branch creation for the Amplify App. + // Enables automated branch creation for the Amplify app. EnableAutoBranchCreation *bool `locationName:"enableAutoBranchCreation" type:"boolean"` - // Enables Basic Authorization for an Amplify App. + // Enables basic authorization for an Amplify app. EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` - // Enables branch auto-building for an Amplify App. + // Enables branch auto-building for an Amplify app. EnableBranchAutoBuild *bool `locationName:"enableBranchAutoBuild" type:"boolean"` - // Environment Variables for an Amplify App. + // Automatically disconnects a branch in the Amplify Console when you delete + // a branch from your Git repository. + EnableBranchAutoDeletion *bool `locationName:"enableBranchAutoDeletion" type:"boolean"` + + // The environment variables for an Amplify app. EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` - // IAM service role for an Amplify App. + // The AWS Identity and Access Management (IAM) service role for an Amplify + // app. IamServiceRoleArn *string `locationName:"iamServiceRoleArn" min:"1" type:"string"` - // Name for an Amplify App. + // The name for an Amplify app. Name *string `locationName:"name" min:"1" type:"string"` - // OAuth token for 3rd party source control system for an Amplify App, used - // to create webhook and read-only deploy key. OAuth token is not stored. - OauthToken *string `locationName:"oauthToken" type:"string"` + // The OAuth token for a third-party source control system for an Amplify app. + // The token is used to create a webhook and a read-only deploy key. The OAuth + // token is not stored. + OauthToken *string `locationName:"oauthToken" type:"string" sensitive:"true"` - // Platform for an Amplify App. + // The platform for an Amplify app. Platform *string `locationName:"platform" type:"string" enum:"Platform"` - // Repository for an Amplify App + // The name of the repository for an Amplify app Repository *string `locationName:"repository" type:"string"` } @@ -8688,6 +8764,12 @@ func (s *UpdateAppInput) SetEnableBranchAutoBuild(v bool) *UpdateAppInput { return s } +// SetEnableBranchAutoDeletion sets the EnableBranchAutoDeletion field's value. +func (s *UpdateAppInput) SetEnableBranchAutoDeletion(v bool) *UpdateAppInput { + s.EnableBranchAutoDeletion = &v + return s +} + // SetEnvironmentVariables sets the EnvironmentVariables field's value. func (s *UpdateAppInput) SetEnvironmentVariables(v map[string]*string) *UpdateAppInput { s.EnvironmentVariables = v @@ -8724,11 +8806,11 @@ func (s *UpdateAppInput) SetRepository(v string) *UpdateAppInput { return s } -// Result structure for an Amplify App update request. +// The result structure for an Amplify app update request. type UpdateAppOutput struct { _ struct{} `type:"structure"` - // App structure for the updated App. + // Represents the updated Amplify app. // // App is a required field App *App `locationName:"app" type:"structure" required:"true"` @@ -8750,60 +8832,67 @@ func (s *UpdateAppOutput) SetApp(v *App) *UpdateAppOutput { return s } -// Request structure for update branch request. +// The request structure for the update branch request. type UpdateBranchInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // ARN for a Backend Environment, part of an Amplify App. + // The Amazon Resource Name (ARN) for a backend environment that is part of + // an Amplify app. BackendEnvironmentArn *string `locationName:"backendEnvironmentArn" min:"1" type:"string"` - // Basic Authorization credentials for the branch. - BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string"` + // The basic authorization credentials for the branch. + BasicAuthCredentials *string `locationName:"basicAuthCredentials" type:"string" sensitive:"true"` - // Name for the branch. + // The name for the branch. // // BranchName is a required field BranchName *string `location:"uri" locationName:"branchName" min:"1" type:"string" required:"true"` - // BuildSpec for the branch. + // The build specification (build spec) for the branch. BuildSpec *string `locationName:"buildSpec" min:"1" type:"string"` - // Description for the branch. + // The description for the branch. Description *string `locationName:"description" type:"string"` - // Display name for a branch, will use as the default domain prefix. + // The display name for a branch. This is used as the default domain prefix. DisplayName *string `locationName:"displayName" type:"string"` // Enables auto building for the branch. EnableAutoBuild *bool `locationName:"enableAutoBuild" type:"boolean"` - // Enables Basic Auth for the branch. + // Enables basic authorization for the branch. EnableBasicAuth *bool `locationName:"enableBasicAuth" type:"boolean"` // Enables notifications for the branch. EnableNotification *bool `locationName:"enableNotification" type:"boolean"` - // Enables Pull Request Preview for this branch. + // Performance mode optimizes for faster hosting performance by keeping content + // cached at the edge for a longer interval. Enabling performance mode will + // mean that hosting configuration or code changes can take up to 10 minutes + // to roll out. + EnablePerformanceMode *bool `locationName:"enablePerformanceMode" type:"boolean"` + + // Enables pull request preview for this branch. EnablePullRequestPreview *bool `locationName:"enablePullRequestPreview" type:"boolean"` - // Environment Variables for the branch. + // The environment variables for the branch. EnvironmentVariables map[string]*string `locationName:"environmentVariables" type:"map"` - // Framework for the branch. + // The framework for the branch. Framework *string `locationName:"framework" type:"string"` - // The Amplify Environment name for the pull request. + // The Amplify environment name for the pull request. PullRequestEnvironmentName *string `locationName:"pullRequestEnvironmentName" type:"string"` - // Stage for the branch. + // Describes the current stage for the branch. Stage *string `locationName:"stage" type:"string" enum:"Stage"` - // The content TTL for the website in seconds. + // The content Time to Live (TTL) for the website in seconds. Ttl *string `locationName:"ttl" type:"string"` } @@ -8905,6 +8994,12 @@ func (s *UpdateBranchInput) SetEnableNotification(v bool) *UpdateBranchInput { return s } +// SetEnablePerformanceMode sets the EnablePerformanceMode field's value. +func (s *UpdateBranchInput) SetEnablePerformanceMode(v bool) *UpdateBranchInput { + s.EnablePerformanceMode = &v + return s +} + // SetEnablePullRequestPreview sets the EnablePullRequestPreview field's value. func (s *UpdateBranchInput) SetEnablePullRequestPreview(v bool) *UpdateBranchInput { s.EnablePullRequestPreview = &v @@ -8941,11 +9036,11 @@ func (s *UpdateBranchInput) SetTtl(v string) *UpdateBranchInput { return s } -// Result structure for update branch request. +// The result structure for the update branch request. type UpdateBranchOutput struct { _ struct{} `type:"structure"` - // Branch structure for an Amplify App. + // The branch for an Amplify app, which maps to a third-party repository branch. // // Branch is a required field Branch *Branch `locationName:"branch" type:"structure" required:"true"` @@ -8967,24 +9062,31 @@ func (s *UpdateBranchOutput) SetBranch(v *Branch) *UpdateBranchOutput { return s } -// Request structure for update Domain Association request. +// The request structure for the update domain association request. type UpdateDomainAssociationInput struct { _ struct{} `type:"structure"` - // Unique Id for an Amplify App. + // The unique ID for an Amplify app. // // AppId is a required field AppId *string `location:"uri" locationName:"appId" min:"1" type:"string" required:"true"` - // Name of the domain. + // Sets the branch patterns for automatic subdomain creation. + AutoSubDomainCreationPatterns []*string `locationName:"autoSubDomainCreationPatterns" type:"list"` + + // The required AWS Identity and Access Management (IAM) service role for the + // Amazon Resource Name (ARN) for automatically creating subdomains. + AutoSubDomainIAMRole *string `locationName:"autoSubDomainIAMRole" type:"string"` + + // The name of the domain. // // DomainName is a required field DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` - // Enables automated creation of Subdomains for branches. (Currently not supported) + // Enables the automated creation of subdomains for branches. EnableAutoSubDomain *bool `locationName:"enableAutoSubDomain" type:"boolean"` - // Setting structure for the Subdomain. + // Describes the settings for the subdomain. // // SubDomainSettings is a required field SubDomainSettings []*SubDomainSetting `locationName:"subDomainSettings" type:"list" required:"true"` @@ -9041,6 +9143,18 @@ func (s *UpdateDomainAssociationInput) SetAppId(v string) *UpdateDomainAssociati return s } +// SetAutoSubDomainCreationPatterns sets the AutoSubDomainCreationPatterns field's value. +func (s *UpdateDomainAssociationInput) SetAutoSubDomainCreationPatterns(v []*string) *UpdateDomainAssociationInput { + s.AutoSubDomainCreationPatterns = v + return s +} + +// SetAutoSubDomainIAMRole sets the AutoSubDomainIAMRole field's value. +func (s *UpdateDomainAssociationInput) SetAutoSubDomainIAMRole(v string) *UpdateDomainAssociationInput { + s.AutoSubDomainIAMRole = &v + return s +} + // SetDomainName sets the DomainName field's value. func (s *UpdateDomainAssociationInput) SetDomainName(v string) *UpdateDomainAssociationInput { s.DomainName = &v @@ -9059,11 +9173,12 @@ func (s *UpdateDomainAssociationInput) SetSubDomainSettings(v []*SubDomainSettin return s } -// Result structure for the update Domain Association request. +// The result structure for the update domain association request. type UpdateDomainAssociationOutput struct { _ struct{} `type:"structure"` - // Domain Association structure. + // Describes a domain association, which associates a custom domain with an + // Amplify app. // // DomainAssociation is a required field DomainAssociation *DomainAssociation `locationName:"domainAssociation" type:"structure" required:"true"` @@ -9085,17 +9200,17 @@ func (s *UpdateDomainAssociationOutput) SetDomainAssociation(v *DomainAssociatio return s } -// Request structure for update webhook request. +// The request structure for the update webhook request. type UpdateWebhookInput struct { _ struct{} `type:"structure"` - // Name for a branch, part of an Amplify App. + // The name for a branch that is part of an Amplify app. BranchName *string `locationName:"branchName" min:"1" type:"string"` - // Description for a webhook. + // The description for a webhook. Description *string `locationName:"description" type:"string"` - // Unique Id for a webhook. + // The unique ID for a webhook. // // WebhookId is a required field WebhookId *string `location:"uri" locationName:"webhookId" type:"string" required:"true"` @@ -9148,11 +9263,11 @@ func (s *UpdateWebhookInput) SetWebhookId(v string) *UpdateWebhookInput { return s } -// Result structure for the update webhook request. +// The result structure for the update webhook request. type UpdateWebhookOutput struct { _ struct{} `type:"structure"` - // Webhook structure. + // Describes a webhook that connects repository events to an Amplify app. // // Webhook is a required field Webhook *Webhook `locationName:"webhook" type:"structure" required:"true"` @@ -9174,41 +9289,41 @@ func (s *UpdateWebhookOutput) SetWebhook(v *Webhook) *UpdateWebhookOutput { return s } -// Structure for webhook, which associates a webhook with an Amplify App. +// Describes a webhook that connects repository events to an Amplify app. type Webhook struct { _ struct{} `type:"structure"` - // Name for a branch, part of an Amplify App. + // The name for a branch that is part of an Amplify app. // // BranchName is a required field BranchName *string `locationName:"branchName" min:"1" type:"string" required:"true"` - // Create date / time for a webhook. + // The create date and time for a webhook. // // CreateTime is a required field CreateTime *time.Time `locationName:"createTime" type:"timestamp" required:"true"` - // Description for a webhook. + // The description for a webhook. // // Description is a required field Description *string `locationName:"description" type:"string" required:"true"` - // Update date / time for a webhook. + // Updates the date and time for a webhook. // // UpdateTime is a required field UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" required:"true"` - // ARN for the webhook. + // The Amazon Resource Name (ARN) for the webhook. // // WebhookArn is a required field WebhookArn *string `locationName:"webhookArn" type:"string" required:"true"` - // Id of the webhook. + // The ID of the webhook. // // WebhookId is a required field WebhookId *string `locationName:"webhookId" type:"string" required:"true"` - // Url of the webhook. + // The URL of the webhook. // // WebhookUrl is a required field WebhookUrl *string `locationName:"webhookUrl" type:"string" required:"true"` @@ -9292,6 +9407,20 @@ const ( DomainStatusUpdating = "UPDATING" ) +// DomainStatus_Values returns all elements of the DomainStatus enum +func DomainStatus_Values() []string { + return []string{ + DomainStatusPendingVerification, + DomainStatusInProgress, + DomainStatusAvailable, + DomainStatusPendingDeployment, + DomainStatusFailed, + DomainStatusCreating, + DomainStatusRequestingCertificate, + DomainStatusUpdating, + } +} + const ( // JobStatusPending is a JobStatus enum value JobStatusPending = "PENDING" @@ -9315,6 +9444,19 @@ const ( JobStatusCancelled = "CANCELLED" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusPending, + JobStatusProvisioning, + JobStatusRunning, + JobStatusFailed, + JobStatusSucceed, + JobStatusCancelling, + JobStatusCancelled, + } +} + const ( // JobTypeRelease is a JobType enum value JobTypeRelease = "RELEASE" @@ -9329,11 +9471,28 @@ const ( JobTypeWebHook = "WEB_HOOK" ) +// JobType_Values returns all elements of the JobType enum +func JobType_Values() []string { + return []string{ + JobTypeRelease, + JobTypeRetry, + JobTypeManual, + JobTypeWebHook, + } +} + const ( // PlatformWeb is a Platform enum value PlatformWeb = "WEB" ) +// Platform_Values returns all elements of the Platform enum +func Platform_Values() []string { + return []string{ + PlatformWeb, + } +} + const ( // StageProduction is a Stage enum value StageProduction = "PRODUCTION" @@ -9350,3 +9509,14 @@ const ( // StagePullRequest is a Stage enum value StagePullRequest = "PULL_REQUEST" ) + +// Stage_Values returns all elements of the Stage enum +func Stage_Values() []string { + return []string{ + StageProduction, + StageBeta, + StageDevelopment, + StageExperimental, + StagePullRequest, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go b/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go index d00cb08eb..e1395d3fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/amplify/doc.go @@ -3,8 +3,13 @@ // Package amplify provides the client and types for making API // requests to AWS Amplify. // -// Amplify is a fully managed continuous deployment and hosting service for -// modern web apps. +// Amplify enables developers to develop and deploy cloud-powered mobile and +// web apps. The Amplify Console provides a continuous delivery and hosting +// service for web applications. For more information, see the Amplify Console +// User Guide (https://docs.aws.amazon.com/amplify/latest/userguide/welcome.html). +// The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and +// documentation for client app development. For more information, see the Amplify +// Framework. (https://docs.amplify.aws/) // // See https://docs.aws.amazon.com/goto/WebAPI/amplify-2017-07-25 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go b/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go index 4e406d9ce..f79cee839 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/amplify/errors.go @@ -11,46 +11,43 @@ const ( // ErrCodeBadRequestException for service response error code // "BadRequestException". // - // Exception thrown when a request contains unexpected data. + // A request contains unexpected data. ErrCodeBadRequestException = "BadRequestException" // ErrCodeDependentServiceFailureException for service response error code // "DependentServiceFailureException". // - // Exception thrown when an operation fails due to a dependent service throwing - // an exception. + // An operation failed because a dependent service threw an exception. ErrCodeDependentServiceFailureException = "DependentServiceFailureException" // ErrCodeInternalFailureException for service response error code // "InternalFailureException". // - // Exception thrown when the service fails to perform an operation due to an - // internal issue. + // The service failed to perform an operation due to an internal issue. ErrCodeInternalFailureException = "InternalFailureException" // ErrCodeLimitExceededException for service response error code // "LimitExceededException". // - // Exception thrown when a resource could not be created because of service - // limits. + // A resource could not be created because service quotas were exceeded. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodeNotFoundException for service response error code // "NotFoundException". // - // Exception thrown when an entity has not been found during an operation. + // An entity was not found during an operation. ErrCodeNotFoundException = "NotFoundException" // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // - // Exception thrown when an operation fails due to non-existent resource. + // An operation failed due to a non-existent resource. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeUnauthorizedException for service response error code // "UnauthorizedException". // - // Exception thrown when an operation fails due to a lack of access. + // An operation failed due to a lack of access. ErrCodeUnauthorizedException = "UnauthorizedException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go b/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go index 35ff98cf3..340a68be1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go index e2e1306b9..a842a6c79 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -4624,6 +4624,9 @@ func (c *APIGateway) GetDeploymentsRequest(input *GetDeploymentsInput) (req *req // The submitted request is not valid, for example, the input is incomplete // or incorrect. See the accompanying error message for details. // +// * NotFoundException +// The requested resource is not found. Make sure that the request URI is correct. +// // * UnauthorizedException // The request is denied because the caller has insufficient permissions. // @@ -11339,7 +11342,9 @@ func (c *APIGateway) UpdateVpcLinkWithContext(ctx aws.Context, input *UpdateVpcL type AccessLogSettings struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch Logs log group to receive access logs. + // The Amazon Resource Name (ARN) of the CloudWatch Logs log group or Kinesis + // Data Firehose delivery stream to receive access logs. If you specify a Kinesis + // Data Firehose delivery stream, the stream name must begin with amazon-apigateway-. DestinationArn *string `locationName:"destinationArn" type:"string"` // A single line format of the access logs of data, as specified by selected @@ -11767,8 +11772,8 @@ func (s *Authorizer) SetType(v string) *Authorizer { // The submitted request is not valid, for example, the input is incomplete // or incorrect. See the accompanying error message for details. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11785,17 +11790,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11803,22 +11808,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the base path that callers of the API must provide as part of @@ -12004,8 +12009,8 @@ func (s *ClientCertificate) SetTags(v map[string]*string) *ClientCertificate { // The request configuration has conflicts. For details, see the accompanying // error message. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12022,17 +12027,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12040,22 +12045,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Request to create an ApiKey resource. @@ -12073,7 +12078,8 @@ type CreateApiKeyInput struct { Enabled *bool `locationName:"enabled" type:"boolean"` // Specifies whether (true) or not (false) the key identifier is distinct from - // the created API key value. + // the created API key value. This parameter is deprecated and should not be + // used. GenerateDistinctId *bool `locationName:"generateDistinctId" type:"boolean"` // The name of the ApiKey. @@ -12346,8 +12352,8 @@ type CreateBasePathMappingInput struct { RestApiId *string `locationName:"restApiId" type:"string" required:"true"` // The name of the API's stage that you want to use for this mapping. Specify - // '(none)' if you do not want callers to explicitly specify the stage name - // after any base path name. + // '(none)' if you want callers to explicitly specify the stage name after any + // base path name. Stage *string `locationName:"stage" type:"string"` } @@ -12713,6 +12719,11 @@ type CreateDomainNameInput struct { // of the domain name. EndpointConfiguration *EndpointConfiguration `locationName:"endpointConfiguration" type:"structure"` + // If specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // custom domain name. + MutualTlsAuthentication *MutualTlsAuthenticationInput `locationName:"mutualTlsAuthentication" type:"structure"` + // The reference to an AWS-managed certificate that will be used by regional // endpoint for this domain name. AWS Certificate Manager is the only supported // source. @@ -12797,6 +12808,12 @@ func (s *CreateDomainNameInput) SetEndpointConfiguration(v *EndpointConfiguratio return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *CreateDomainNameInput) SetMutualTlsAuthentication(v *MutualTlsAuthenticationInput) *CreateDomainNameInput { + s.MutualTlsAuthentication = v + return s +} + // SetRegionalCertificateArn sets the RegionalCertificateArn field's value. func (s *CreateDomainNameInput) SetRegionalCertificateArn(v string) *CreateDomainNameInput { s.RegionalCertificateArn = &v @@ -13516,8 +13533,8 @@ type CreateVpcLinkInput struct { // tag value can be up to 256 characters. Tags map[string]*string `locationName:"tags" type:"map"` - // [Required] The ARNs of network load balancers of the VPC targeted by the - // VPC link. The network load balancers must be owned by the same AWS account + // [Required] The ARN of the network load balancer of the VPC targeted by the + // VPC link. The network load balancer must be owned by the same AWS account // of the API owner. // // TargetArns is a required field @@ -15533,6 +15550,12 @@ type DomainName struct { // of the domain name. EndpointConfiguration *EndpointConfiguration `locationName:"endpointConfiguration" type:"structure"` + // The mutual TLS authentication configuration for a custom domain name. If + // specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // API. + MutualTlsAuthentication *MutualTlsAuthentication `locationName:"mutualTlsAuthentication" type:"structure"` + // The reference to an AWS-managed certificate that will be used for validating // the regional domain name. AWS Certificate Manager is the only supported source. RegionalCertificateArn *string `locationName:"regionalCertificateArn" type:"string"` @@ -15624,6 +15647,12 @@ func (s *DomainName) SetEndpointConfiguration(v *EndpointConfiguration) *DomainN return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *DomainName) SetMutualTlsAuthentication(v *MutualTlsAuthentication) *DomainName { + s.MutualTlsAuthentication = v + return s +} + // SetRegionalCertificateArn sets the RegionalCertificateArn field's value. func (s *DomainName) SetRegionalCertificateArn(v string) *DomainName { s.RegionalCertificateArn = &v @@ -18092,7 +18121,7 @@ func (s *GetModelTemplateInput) SetRestApiId(v string) *GetModelTemplateInput { type GetModelTemplateOutput struct { _ struct{} `type:"structure"` - // The Apache Velocity Template Language (VTL) (https://velocity.apache.org/engine/devel/vtl-reference-guide.html) + // The Apache Velocity Template Language (VTL) (https://velocity.apache.org/engine/devel/vtl-reference.html) // template content used for the template resource. Value *string `locationName:"value" type:"string"` } @@ -19047,8 +19076,7 @@ type GetTagsInput struct { // set. Position *string `location:"querystring" locationName:"position" type:"string"` - // [Required] The ARN of a resource that can be tagged. The resource ARN must - // be URL-encoded. + // [Required] The ARN of a resource that can be tagged. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` @@ -19845,7 +19873,7 @@ type ImportRestApiInput struct { // [Required] The POST request body containing external API definitions. Currently, // only OpenAPI definition JSON/YAML files are supported. The maximum size of - // the API definition file is 2MB. + // the API definition file is 6MB. // // Body is a required field Body []byte `locationName:"body" type:"blob" required:"true"` @@ -19934,9 +19962,10 @@ type Integration struct { // Method requestParameters. CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` - // An API-specific tag group of related cached parameters. To be valid values - // for cacheKeyParameters, these parameters must also be specified for Method - // requestParameters. + // Specifies a group of related cached parameters. By default, API Gateway uses + // the resource ID as the cacheNamespace. You can specify the same cacheNamespace + // across resources to return the same cached data for requests to different + // resources. CacheNamespace *string `locationName:"cacheNamespace" type:"string"` // The (id (https://docs.aws.amazon.com/apigateway/api-reference/resource/vpc-link/#id)) @@ -20041,6 +20070,9 @@ type Integration struct { // milliseconds or 29 seconds. TimeoutInMillis *int64 `locationName:"timeoutInMillis" type:"integer"` + // Specifies the TLS configuration for an integration. + TlsConfig *TlsConfig `locationName:"tlsConfig" type:"structure"` + // Specifies an API method integration type. The valid value is one of the following: // // * AWS: for integrating the API method request with an AWS service action, @@ -20176,6 +20208,12 @@ func (s *Integration) SetTimeoutInMillis(v int64) *Integration { return s } +// SetTlsConfig sets the TlsConfig field's value. +func (s *Integration) SetTlsConfig(v *TlsConfig) *Integration { + s.TlsConfig = v + return s +} + // SetType sets the Type field's value. func (s *Integration) SetType(v string) *Integration { s.Type = &v @@ -20283,8 +20321,8 @@ func (s *IntegrationResponse) SetStatusCode(v string) *IntegrationResponse { // The request exceeded the rate limit. Retry after the specified time period. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -20303,17 +20341,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20321,22 +20359,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a client-facing interface by which the client calls the API to @@ -20720,7 +20758,9 @@ type MethodSetting struct { // Specifies the logging level for this method, which affects the log entries // pushed to Amazon CloudWatch Logs. The PATCH path for this setting is /{method_setting_key}/logging/loglevel, - // and the available levels are OFF, ERROR, and INFO. + // and the available levels are OFF, ERROR, and INFO. Choose ERROR to write + // only error-level entries to CloudWatch Logs, or choose INFO to include all + // ERROR events as well as extra informational events. LoggingLevel *string `locationName:"loggingLevel" type:"string"` // Specifies whether Amazon CloudWatch metrics are enabled for this method. @@ -20928,10 +20968,104 @@ func (s *Model) SetSchema(v string) *Model { return s } +// If specified, API Gateway performs two-way authentication between the client +// and the server. Clients must present a trusted certificate to access your +// custom domain name. +type MutualTlsAuthentication struct { + _ struct{} `type:"structure"` + + // An Amazon S3 URL that specifies the truststore for mutual TLS authentication, + // for example s3://bucket-name/key-name. The truststore can contain certificates + // from public or private certificate authorities. To update the truststore, + // upload a new version to S3, and then update your custom domain name to use + // the new version. To update the truststore, you must have permissions to access + // the S3 object. + TruststoreUri *string `locationName:"truststoreUri" type:"string"` + + // The version of the S3 object that contains your truststore. To specify a + // version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `locationName:"truststoreVersion" type:"string"` + + // A list of warnings that API Gateway returns while processing your truststore. + // Invalid certificates produce warnings. Mutual TLS is still enabled, but some + // clients might not be able to access your API. To resolve warnings, upload + // a new truststore to S3, and then update you domain name to use the new version. + TruststoreWarnings []*string `locationName:"truststoreWarnings" type:"list"` +} + +// String returns the string representation +func (s MutualTlsAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MutualTlsAuthentication) GoString() string { + return s.String() +} + +// SetTruststoreUri sets the TruststoreUri field's value. +func (s *MutualTlsAuthentication) SetTruststoreUri(v string) *MutualTlsAuthentication { + s.TruststoreUri = &v + return s +} + +// SetTruststoreVersion sets the TruststoreVersion field's value. +func (s *MutualTlsAuthentication) SetTruststoreVersion(v string) *MutualTlsAuthentication { + s.TruststoreVersion = &v + return s +} + +// SetTruststoreWarnings sets the TruststoreWarnings field's value. +func (s *MutualTlsAuthentication) SetTruststoreWarnings(v []*string) *MutualTlsAuthentication { + s.TruststoreWarnings = v + return s +} + +// If specified, API Gateway performs two-way authentication between the client +// and the server. Clients must present a trusted certificate to access your +// custom domain name. +type MutualTlsAuthenticationInput struct { + _ struct{} `type:"structure"` + + // An Amazon S3 resource ARN that specifies the truststore for mutual TLS authentication, + // for example, s3://bucket-name/key-name. The truststore can contain certificates + // from public or private certificate authorities. To update the truststore, + // upload a new version to S3, and then update your custom domain name to use + // the new version. To update the truststore, you must have permissions to access + // the S3 object. + TruststoreUri *string `locationName:"truststoreUri" type:"string"` + + // The version of the S3 object that contains your truststore. To specify a + // version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `locationName:"truststoreVersion" type:"string"` +} + +// String returns the string representation +func (s MutualTlsAuthenticationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MutualTlsAuthenticationInput) GoString() string { + return s.String() +} + +// SetTruststoreUri sets the TruststoreUri field's value. +func (s *MutualTlsAuthenticationInput) SetTruststoreUri(v string) *MutualTlsAuthenticationInput { + s.TruststoreUri = &v + return s +} + +// SetTruststoreVersion sets the TruststoreVersion field's value. +func (s *MutualTlsAuthenticationInput) SetTruststoreVersion(v string) *MutualTlsAuthenticationInput { + s.TruststoreVersion = &v + return s +} + // The requested resource is not found. Make sure that the request URI is correct. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20948,17 +21082,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20966,22 +21100,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A single patch operation to apply to the specified resource. Please refer @@ -21188,10 +21322,15 @@ func (s *PutGatewayResponseInput) SetStatusCode(v string) *PutGatewayResponseInp type PutIntegrationInput struct { _ struct{} `type:"structure"` - // An API-specific tag group of related cached parameters. + // A list of request parameters whose values API Gateway caches. To be valid + // values for cacheKeyParameters, these parameters must also be specified for + // Method requestParameters. CacheKeyParameters []*string `locationName:"cacheKeyParameters" type:"list"` - // A list of request parameters whose values are to be cached. + // Specifies a group of related cached parameters. By default, API Gateway uses + // the resource ID as the cacheNamespace. You can specify the same cacheNamespace + // across resources to return the same cached data for requests to different + // resources. CacheNamespace *string `locationName:"cacheNamespace" type:"string"` // The (id (https://docs.aws.amazon.com/apigateway/api-reference/resource/vpc-link/#id)) @@ -21276,6 +21415,8 @@ type PutIntegrationInput struct { // milliseconds or 29 seconds. TimeoutInMillis *int64 `locationName:"timeoutInMillis" type:"integer"` + TlsConfig *TlsConfig `locationName:"tlsConfig" type:"structure"` + // [Required] Specifies a put integration input's type. // // Type is a required field @@ -21430,6 +21571,12 @@ func (s *PutIntegrationInput) SetTimeoutInMillis(v int64) *PutIntegrationInput { return s } +// SetTlsConfig sets the TlsConfig field's value. +func (s *PutIntegrationInput) SetTlsConfig(v *TlsConfig) *PutIntegrationInput { + s.TlsConfig = v + return s +} + // SetType sets the Type field's value. func (s *PutIntegrationInput) SetType(v string) *PutIntegrationInput { s.Type = &v @@ -21896,7 +22043,7 @@ type PutRestApiInput struct { // [Required] The PUT request body containing external API definitions. Currently, // only OpenAPI definition JSON/YAML files are supported. The maximum size of - // the API definition file is 2MB. + // the API definition file is 6MB. // // Body is a required field Body []byte `locationName:"body" type:"blob" required:"true"` @@ -22395,8 +22542,8 @@ func (s *SdkType) SetId(v string) *SdkType { // The requested service is not available. For details see the accompanying // error message. Retry after the specified time period. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -22415,17 +22562,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22433,22 +22580,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a unique identifier for a version of a deployed RestApi that is @@ -22666,8 +22813,7 @@ func (s *StageKey) SetStageName(v string) *StageKey { type TagResourceInput struct { _ struct{} `type:"structure"` - // [Required] The ARN of a resource that can be tagged. The resource ARN must - // be URL-encoded. + // [Required] The ARN of a resource that can be tagged. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` @@ -23175,11 +23321,41 @@ func (s *ThrottleSettings) SetRateLimit(v float64) *ThrottleSettings { return s } +type TlsConfig struct { + _ struct{} `type:"structure"` + + // Specifies whether or not API Gateway skips verification that the certificate + // for an integration endpoint is issued by a supported certificate authority + // (https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-supported-certificate-authorities-for-http-endpoints.html). + // This isn’t recommended, but it enables you to use certificates that are + // signed by private certificate authorities, or certificates that are self-signed. + // If enabled, API Gateway still performs basic certificate validation, which + // includes checking the certificate's expiration date, hostname, and presence + // of a root certificate authority. Supported only for HTTP and HTTP_PROXY integrations. + InsecureSkipVerification *bool `locationName:"insecureSkipVerification" type:"boolean"` +} + +// String returns the string representation +func (s TlsConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TlsConfig) GoString() string { + return s.String() +} + +// SetInsecureSkipVerification sets the InsecureSkipVerification field's value. +func (s *TlsConfig) SetInsecureSkipVerification(v bool) *TlsConfig { + s.InsecureSkipVerification = &v + return s +} + // The request has reached its throttling limit. Retry after the specified time // period. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -23198,17 +23374,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23216,28 +23392,28 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // The request is denied because the caller has insufficient permissions. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23254,17 +23430,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23272,30 +23448,29 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID } // Removes a tag from a given resource. type UntagResourceInput struct { _ struct{} `type:"structure"` - // [Required] The ARN of a resource that can be tagged. The resource ARN must - // be URL-encoded. + // [Required] The ARN of a resource that can be tagged. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resource_arn" type:"string" required:"true"` @@ -25083,7 +25258,7 @@ func (s *UpdateVpcLinkInput) SetVpcLinkId(v string) *UpdateVpcLinkInput { return s } -// A API Gateway VPC link for a RestApi to access resources in an Amazon Virtual +// An API Gateway VPC link for a RestApi to access resources in an Amazon Virtual // Private Cloud (VPC). // // To enable access to a resource in an Amazon Virtual Private Cloud through @@ -25117,8 +25292,9 @@ type UpdateVpcLinkOutput struct { // The collection of tags. Each tag element is associated with a given resource. Tags map[string]*string `locationName:"tags" type:"map"` - // The ARNs of network load balancers of the VPC targeted by the VPC link. The - // network load balancers must be owned by the same AWS account of the API owner. + // The ARN of the network load balancer of the VPC targeted by the VPC link. + // The network load balancer must be owned by the same AWS account of the API + // owner. TargetArns []*string `locationName:"targetArns" type:"list"` } @@ -25400,11 +25576,26 @@ const ( ApiKeySourceTypeAuthorizer = "AUTHORIZER" ) +// ApiKeySourceType_Values returns all elements of the ApiKeySourceType enum +func ApiKeySourceType_Values() []string { + return []string{ + ApiKeySourceTypeHeader, + ApiKeySourceTypeAuthorizer, + } +} + const ( // ApiKeysFormatCsv is a ApiKeysFormat enum value ApiKeysFormatCsv = "csv" ) +// ApiKeysFormat_Values returns all elements of the ApiKeysFormat enum +func ApiKeysFormat_Values() []string { + return []string{ + ApiKeysFormatCsv, + } +} + // The authorizer type. Valid values are TOKEN for a Lambda function using a // single authorization token submitted in a custom header, REQUEST for a Lambda // function using incoming request parameters, and COGNITO_USER_POOLS for using @@ -25420,6 +25611,15 @@ const ( AuthorizerTypeCognitoUserPools = "COGNITO_USER_POOLS" ) +// AuthorizerType_Values returns all elements of the AuthorizerType enum +func AuthorizerType_Values() []string { + return []string{ + AuthorizerTypeToken, + AuthorizerTypeRequest, + AuthorizerTypeCognitoUserPools, + } +} + // Returns the size of the CacheCluster. const ( // CacheClusterSize05 is a CacheClusterSize enum value @@ -25447,6 +25647,20 @@ const ( CacheClusterSize237 = "237" ) +// CacheClusterSize_Values returns all elements of the CacheClusterSize enum +func CacheClusterSize_Values() []string { + return []string{ + CacheClusterSize05, + CacheClusterSize16, + CacheClusterSize61, + CacheClusterSize135, + CacheClusterSize284, + CacheClusterSize582, + CacheClusterSize118, + CacheClusterSize237, + } +} + // Returns the status of the CacheCluster. const ( // CacheClusterStatusCreateInProgress is a CacheClusterStatus enum value @@ -25465,6 +25679,17 @@ const ( CacheClusterStatusFlushInProgress = "FLUSH_IN_PROGRESS" ) +// CacheClusterStatus_Values returns all elements of the CacheClusterStatus enum +func CacheClusterStatus_Values() []string { + return []string{ + CacheClusterStatusCreateInProgress, + CacheClusterStatusAvailable, + CacheClusterStatusDeleteInProgress, + CacheClusterStatusNotAvailable, + CacheClusterStatusFlushInProgress, + } +} + const ( // ConnectionTypeInternet is a ConnectionType enum value ConnectionTypeInternet = "INTERNET" @@ -25473,6 +25698,14 @@ const ( ConnectionTypeVpcLink = "VPC_LINK" ) +// ConnectionType_Values returns all elements of the ConnectionType enum +func ConnectionType_Values() []string { + return []string{ + ConnectionTypeInternet, + ConnectionTypeVpcLink, + } +} + const ( // ContentHandlingStrategyConvertToBinary is a ContentHandlingStrategy enum value ContentHandlingStrategyConvertToBinary = "CONVERT_TO_BINARY" @@ -25481,6 +25714,14 @@ const ( ContentHandlingStrategyConvertToText = "CONVERT_TO_TEXT" ) +// ContentHandlingStrategy_Values returns all elements of the ContentHandlingStrategy enum +func ContentHandlingStrategy_Values() []string { + return []string{ + ContentHandlingStrategyConvertToBinary, + ContentHandlingStrategyConvertToText, + } +} + const ( // DocumentationPartTypeApi is a DocumentationPartType enum value DocumentationPartTypeApi = "API" @@ -25519,6 +25760,24 @@ const ( DocumentationPartTypeResponseBody = "RESPONSE_BODY" ) +// DocumentationPartType_Values returns all elements of the DocumentationPartType enum +func DocumentationPartType_Values() []string { + return []string{ + DocumentationPartTypeApi, + DocumentationPartTypeAuthorizer, + DocumentationPartTypeModel, + DocumentationPartTypeResource, + DocumentationPartTypeMethod, + DocumentationPartTypePathParameter, + DocumentationPartTypeQueryParameter, + DocumentationPartTypeRequestHeader, + DocumentationPartTypeRequestBody, + DocumentationPartTypeResponse, + DocumentationPartTypeResponseHeader, + DocumentationPartTypeResponseBody, + } +} + const ( // DomainNameStatusAvailable is a DomainNameStatus enum value DomainNameStatusAvailable = "AVAILABLE" @@ -25530,6 +25789,15 @@ const ( DomainNameStatusPending = "PENDING" ) +// DomainNameStatus_Values returns all elements of the DomainNameStatus enum +func DomainNameStatus_Values() []string { + return []string{ + DomainNameStatusAvailable, + DomainNameStatusUpdating, + DomainNameStatusPending, + } +} + // The endpoint type. The valid values are EDGE for edge-optimized API setup, // most suitable for mobile applications; REGIONAL for regional API endpoint // setup, most suitable for calling from AWS Region; and PRIVATE for private @@ -25545,6 +25813,15 @@ const ( EndpointTypePrivate = "PRIVATE" ) +// EndpointType_Values returns all elements of the EndpointType enum +func EndpointType_Values() []string { + return []string{ + EndpointTypeRegional, + EndpointTypeEdge, + EndpointTypePrivate, + } +} + const ( // GatewayResponseTypeDefault4xx is a GatewayResponseType enum value GatewayResponseTypeDefault4xx = "DEFAULT_4XX" @@ -25607,6 +25884,32 @@ const ( GatewayResponseTypeQuotaExceeded = "QUOTA_EXCEEDED" ) +// GatewayResponseType_Values returns all elements of the GatewayResponseType enum +func GatewayResponseType_Values() []string { + return []string{ + GatewayResponseTypeDefault4xx, + GatewayResponseTypeDefault5xx, + GatewayResponseTypeResourceNotFound, + GatewayResponseTypeUnauthorized, + GatewayResponseTypeInvalidApiKey, + GatewayResponseTypeAccessDenied, + GatewayResponseTypeAuthorizerFailure, + GatewayResponseTypeAuthorizerConfigurationError, + GatewayResponseTypeInvalidSignature, + GatewayResponseTypeExpiredToken, + GatewayResponseTypeMissingAuthenticationToken, + GatewayResponseTypeIntegrationFailure, + GatewayResponseTypeIntegrationTimeout, + GatewayResponseTypeApiConfigurationError, + GatewayResponseTypeUnsupportedMediaType, + GatewayResponseTypeBadRequestParameters, + GatewayResponseTypeBadRequestBody, + GatewayResponseTypeRequestTooLarge, + GatewayResponseTypeThrottled, + GatewayResponseTypeQuotaExceeded, + } +} + // The integration type. The valid value is HTTP for integrating an API method // with an HTTP backend; AWS with any AWS service endpoints; MOCK for testing // without actually invoking the backend; HTTP_PROXY for integrating with the @@ -25628,6 +25931,17 @@ const ( IntegrationTypeAwsProxy = "AWS_PROXY" ) +// IntegrationType_Values returns all elements of the IntegrationType enum +func IntegrationType_Values() []string { + return []string{ + IntegrationTypeHttp, + IntegrationTypeAws, + IntegrationTypeMock, + IntegrationTypeHttpProxy, + IntegrationTypeAwsProxy, + } +} + const ( // LocationStatusTypeDocumented is a LocationStatusType enum value LocationStatusTypeDocumented = "DOCUMENTED" @@ -25636,6 +25950,14 @@ const ( LocationStatusTypeUndocumented = "UNDOCUMENTED" ) +// LocationStatusType_Values returns all elements of the LocationStatusType enum +func LocationStatusType_Values() []string { + return []string{ + LocationStatusTypeDocumented, + LocationStatusTypeUndocumented, + } +} + const ( // OpAdd is a Op enum value OpAdd = "add" @@ -25656,6 +25978,18 @@ const ( OpTest = "test" ) +// Op_Values returns all elements of the Op enum +func Op_Values() []string { + return []string{ + OpAdd, + OpRemove, + OpReplace, + OpMove, + OpCopy, + OpTest, + } +} + const ( // PutModeMerge is a PutMode enum value PutModeMerge = "merge" @@ -25664,6 +25998,14 @@ const ( PutModeOverwrite = "overwrite" ) +// PutMode_Values returns all elements of the PutMode enum +func PutMode_Values() []string { + return []string{ + PutModeMerge, + PutModeOverwrite, + } +} + const ( // QuotaPeriodTypeDay is a QuotaPeriodType enum value QuotaPeriodTypeDay = "DAY" @@ -25675,6 +26017,15 @@ const ( QuotaPeriodTypeMonth = "MONTH" ) +// QuotaPeriodType_Values returns all elements of the QuotaPeriodType enum +func QuotaPeriodType_Values() []string { + return []string{ + QuotaPeriodTypeDay, + QuotaPeriodTypeWeek, + QuotaPeriodTypeMonth, + } +} + const ( // SecurityPolicyTls10 is a SecurityPolicy enum value SecurityPolicyTls10 = "TLS_1_0" @@ -25683,6 +26034,14 @@ const ( SecurityPolicyTls12 = "TLS_1_2" ) +// SecurityPolicy_Values returns all elements of the SecurityPolicy enum +func SecurityPolicy_Values() []string { + return []string{ + SecurityPolicyTls10, + SecurityPolicyTls12, + } +} + const ( // UnauthorizedCacheControlHeaderStrategyFailWith403 is a UnauthorizedCacheControlHeaderStrategy enum value UnauthorizedCacheControlHeaderStrategyFailWith403 = "FAIL_WITH_403" @@ -25694,6 +26053,15 @@ const ( UnauthorizedCacheControlHeaderStrategySucceedWithoutResponseHeader = "SUCCEED_WITHOUT_RESPONSE_HEADER" ) +// UnauthorizedCacheControlHeaderStrategy_Values returns all elements of the UnauthorizedCacheControlHeaderStrategy enum +func UnauthorizedCacheControlHeaderStrategy_Values() []string { + return []string{ + UnauthorizedCacheControlHeaderStrategyFailWith403, + UnauthorizedCacheControlHeaderStrategySucceedWithResponseHeader, + UnauthorizedCacheControlHeaderStrategySucceedWithoutResponseHeader, + } +} + const ( // VpcLinkStatusAvailable is a VpcLinkStatus enum value VpcLinkStatusAvailable = "AVAILABLE" @@ -25707,3 +26075,13 @@ const ( // VpcLinkStatusFailed is a VpcLinkStatus enum value VpcLinkStatusFailed = "FAILED" ) + +// VpcLinkStatus_Values returns all elements of the VpcLinkStatus enum +func VpcLinkStatus_Values() []string { + return []string{ + VpcLinkStatusAvailable, + VpcLinkStatusPending, + VpcLinkStatusDeleting, + VpcLinkStatusFailed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go index 7f34e0c95..2f7f7f30c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go index 7d9fd1e99..d86d09fd3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/api.go @@ -2459,6 +2459,93 @@ func (c *ApiGatewayV2) DeleteVpcLinkWithContext(ctx aws.Context, input *DeleteVp return out, req.Send() } +const opExportApi = "ExportApi" + +// ExportApiRequest generates a "aws/request.Request" representing the +// client's request for the ExportApi operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExportApi for more information on using the ExportApi +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExportApiRequest method. +// req, resp := client.ExportApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/ExportApi +func (c *ApiGatewayV2) ExportApiRequest(input *ExportApiInput) (req *request.Request, output *ExportApiOutput) { + op := &request.Operation{ + Name: opExportApi, + HTTPMethod: "GET", + HTTPPath: "/v2/apis/{apiId}/exports/{specification}", + } + + if input == nil { + input = &ExportApiInput{} + } + + output = &ExportApiOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExportApi API operation for AmazonApiGatewayV2. +// +// Exports a definition of an API in a particular output format and specification. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonApiGatewayV2's +// API operation ExportApi for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// The resource specified in the request was not found. See the message field +// for more information. +// +// * TooManyRequestsException +// A limit has been exceeded. See the accompanying error message for details. +// +// * BadRequestException +// The request is not valid, for example, the input is incomplete or incorrect. +// See the accompanying error message for details. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/ExportApi +func (c *ApiGatewayV2) ExportApi(input *ExportApiInput) (*ExportApiOutput, error) { + req, out := c.ExportApiRequest(input) + return out, req.Send() +} + +// ExportApiWithContext is the same as ExportApi with the addition of +// the ability to pass a context and additional request options. +// +// See ExportApi for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApiGatewayV2) ExportApiWithContext(ctx aws.Context, input *ExportApiInput, opts ...request.Option) (*ExportApiOutput, error) { + req, out := c.ExportApiRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetApi = "GetApi" // GetApiRequest generates a "aws/request.Request" representing the @@ -4858,6 +4945,91 @@ func (c *ApiGatewayV2) ReimportApiWithContext(ctx aws.Context, input *ReimportAp return out, req.Send() } +const opResetAuthorizersCache = "ResetAuthorizersCache" + +// ResetAuthorizersCacheRequest generates a "aws/request.Request" representing the +// client's request for the ResetAuthorizersCache operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetAuthorizersCache for more information on using the ResetAuthorizersCache +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetAuthorizersCacheRequest method. +// req, resp := client.ResetAuthorizersCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/ResetAuthorizersCache +func (c *ApiGatewayV2) ResetAuthorizersCacheRequest(input *ResetAuthorizersCacheInput) (req *request.Request, output *ResetAuthorizersCacheOutput) { + op := &request.Operation{ + Name: opResetAuthorizersCache, + HTTPMethod: "DELETE", + HTTPPath: "/v2/apis/{apiId}/stages/{stageName}/cache/authorizers", + } + + if input == nil { + input = &ResetAuthorizersCacheInput{} + } + + output = &ResetAuthorizersCacheOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResetAuthorizersCache API operation for AmazonApiGatewayV2. +// +// Resets all authorizer cache entries for the specified stage. Supported only +// for HTTP API Lambda authorizers. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AmazonApiGatewayV2's +// API operation ResetAuthorizersCache for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// The resource specified in the request was not found. See the message field +// for more information. +// +// * TooManyRequestsException +// A limit has been exceeded. See the accompanying error message for details. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/apigatewayv2-2018-11-29/ResetAuthorizersCache +func (c *ApiGatewayV2) ResetAuthorizersCache(input *ResetAuthorizersCacheInput) (*ResetAuthorizersCacheOutput, error) { + req, out := c.ResetAuthorizersCacheRequest(input) + return out, req.Send() +} + +// ResetAuthorizersCacheWithContext is the same as ResetAuthorizersCache with the addition of +// the ability to pass a context and additional request options. +// +// See ResetAuthorizersCache for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ApiGatewayV2) ResetAuthorizersCacheWithContext(ctx aws.Context, input *ResetAuthorizersCacheInput, opts ...request.Option) (*ResetAuthorizersCacheOutput, error) { + req, out := c.ResetAuthorizersCacheRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -6144,8 +6316,8 @@ func (c *ApiGatewayV2) UpdateVpcLinkWithContext(ctx aws.Context, input *UpdateVp } type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6162,17 +6334,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6180,22 +6352,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // Settings for logging access in a stage. @@ -6241,6 +6413,11 @@ type Api struct { // to a deployed API stage. ApiEndpoint *string `locationName:"apiEndpoint" type:"string"` + // Specifies whether an API is managed by API Gateway. You can't update or delete + // a managed API by using API Gateway. A managed API can be deleted only through + // the tooling or service that created it. + ApiGatewayManaged *bool `locationName:"apiGatewayManaged" type:"boolean"` + // The API ID. ApiId *string `locationName:"apiId" type:"string"` @@ -6257,6 +6434,12 @@ type Api struct { // The description of the API. Description *string `locationName:"description" type:"string"` + // Specifies whether clients can invoke your API by using the default execute-api + // endpoint. By default, clients can invoke your API with the default https://{api_id}.execute-api.{region}.amazonaws.com + // endpoint. To require that clients use a custom domain name to invoke your + // API, disable the default endpoint. + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + // Avoid validating models when creating a deployment. Supported only for WebSocket // APIs. DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` @@ -6310,6 +6493,12 @@ func (s *Api) SetApiEndpoint(v string) *Api { return s } +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *Api) SetApiGatewayManaged(v bool) *Api { + s.ApiGatewayManaged = &v + return s +} + // SetApiId sets the ApiId field's value. func (s *Api) SetApiId(v string) *Api { s.ApiId = &v @@ -6340,6 +6529,12 @@ func (s *Api) SetDescription(v string) *Api { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *Api) SetDisableExecuteApiEndpoint(v bool) *Api { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *Api) SetDisableSchemaValidation(v bool) *Api { s.DisableSchemaValidation = &v @@ -6450,22 +6645,30 @@ type Authorizer struct { // Specifies the required credentials as an IAM role for API Gateway to invoke // the authorizer. To specify an IAM role for API Gateway to assume, use the // role's Amazon Resource Name (ARN). To use resource-based permissions on the - // Lambda function, specify null. Supported only for REQUEST authorizers. + // Lambda function, don't specify this parameter. Supported only for REQUEST + // authorizers. AuthorizerCredentialsArn *string `locationName:"authorizerCredentialsArn" type:"string"` // The authorizer identifier. AuthorizerId *string `locationName:"authorizerId" type:"string"` - // Authorizer caching is not currently supported. Don't specify this value for - // authorizers. + // Specifies the format of the payload sent to an HTTP API Lambda authorizer. + // Required for HTTP API Lambda authorizers. Supported values are 1.0 and 2.0. + // To learn more, see Working with AWS Lambda authorizers for HTTP APIs (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-lambda-authorizer.html). + AuthorizerPayloadFormatVersion *string `locationName:"authorizerPayloadFormatVersion" type:"string"` + + // The time to live (TTL) for cached authorizer results, in seconds. If it equals + // 0, authorization caching is disabled. If it is greater than 0, API Gateway + // caches authorizer responses. The maximum value is 3600, or 1 hour. Supported + // only for HTTP API Lambda authorizers. AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` - // The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function - // using incoming request parameters. For HTTP APIs, specify JWT to use JSON - // Web Tokens. + // The authorizer type. Specify REQUEST for a Lambda function using incoming + // request parameters. Specify JWT to use JSON Web Tokens (supported only for + // HTTP APIs). AuthorizerType *string `locationName:"authorizerType" type:"string" enum:"AuthorizerType"` - // The authorizer's Uniform Resource Identifier (URI). ForREQUEST authorizers, + // The authorizer's Uniform Resource Identifier (URI). For REQUEST authorizers, // this must be a well-formed Lambda function URI, for example, arn:aws:apigateway:us-west-2:lambda:path/2015-03-31/functions/arn:aws:lambda:us-west-2:{account_id}:function:{lambda_function_name}/invocations. // In general, the URI has this form: arn:aws:apigateway:{region}:lambda:path/{service_api} // , where {region} is the same as the region hosting the Lambda function, path @@ -6475,23 +6678,32 @@ type Authorizer struct { // only for REQUEST authorizers. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + // Specifies whether a Lambda authorizer returns a response in a simple format. + // If enabled, the Lambda authorizer can return a boolean value instead of an + // IAM policy. Supported only for HTTP APIs. To learn more, see Working with + // AWS Lambda authorizers for HTTP APIs (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-lambda-authorizer.html) + EnableSimpleResponses *bool `locationName:"enableSimpleResponses" type:"boolean"` + // The identity source for which authorization is requested. // // For a REQUEST authorizer, this is optional. The value is a set of one or - // more mapping expressions of the specified request parameters. Currently, - // the identity source can be headers, query string parameters, stage variables, - // and context parameters. For example, if an Auth header and a Name query string - // parameter are defined as identity sources, this value is route.request.header.Auth, - // route.request.querystring.Name. These parameters will be used to perform - // runtime validation for Lambda-based authorizers by verifying all of the identity-related - // request parameters are present in the request, not null, and non-empty. Only - // when this is true does the authorizer invoke the authorizer Lambda function. - // Otherwise, it returns a 401 Unauthorized response without calling the Lambda - // function. + // more mapping expressions of the specified request parameters. The identity + // source can be headers, query string parameters, stage variables, and context + // parameters. For example, if an Auth header and a Name query string parameter + // are defined as identity sources, this value is route.request.header.Auth, + // route.request.querystring.Name for WebSocket APIs. For HTTP APIs, use selection + // expressions prefixed with $, for example, $request.header.Auth, $request.querystring.Name. + // These parameters are used to perform runtime validation for Lambda-based + // authorizers by verifying all of the identity-related request parameters are + // present in the request, not null, and non-empty. Only when this is true does + // the authorizer invoke the authorizer Lambda function. Otherwise, it returns + // a 401 Unauthorized response without calling the Lambda function. For HTTP + // APIs, identity sources are also used as the cache key when caching is enabled. + // To learn more, see Working with AWS Lambda authorizers for HTTP APIs (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-lambda-authorizer.html). // // For JWT, a single entry that specifies where to extract the JSON Web Token // (JWT) from inbound requests. Currently only header-based and query parameter-based - // selections are supported, for example "$request.header.Authorization". + // selections are supported, for example $request.header.Authorization. IdentitySource []*string `locationName:"identitySource" type:"list"` // The validation expression does not apply to the REQUEST authorizer. @@ -6529,6 +6741,12 @@ func (s *Authorizer) SetAuthorizerId(v string) *Authorizer { return s } +// SetAuthorizerPayloadFormatVersion sets the AuthorizerPayloadFormatVersion field's value. +func (s *Authorizer) SetAuthorizerPayloadFormatVersion(v string) *Authorizer { + s.AuthorizerPayloadFormatVersion = &v + return s +} + // SetAuthorizerResultTtlInSeconds sets the AuthorizerResultTtlInSeconds field's value. func (s *Authorizer) SetAuthorizerResultTtlInSeconds(v int64) *Authorizer { s.AuthorizerResultTtlInSeconds = &v @@ -6547,6 +6765,12 @@ func (s *Authorizer) SetAuthorizerUri(v string) *Authorizer { return s } +// SetEnableSimpleResponses sets the EnableSimpleResponses field's value. +func (s *Authorizer) SetEnableSimpleResponses(v bool) *Authorizer { + s.EnableSimpleResponses = &v + return s +} + // SetIdentitySource sets the IdentitySource field's value. func (s *Authorizer) SetIdentitySource(v []*string) *Authorizer { s.IdentitySource = v @@ -6574,8 +6798,8 @@ func (s *Authorizer) SetName(v string) *Authorizer { // The request is not valid, for example, the input is incomplete or incorrect. // See the accompanying error message for details. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Describes the error encountered. Message_ *string `locationName:"message" type:"string"` @@ -6593,17 +6817,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6611,30 +6835,30 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation would cause a conflict with the current state of // a service resource associated with the request. Resolve the conflict before // retrying this request. See the accompanying error message for details. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Describes the error encountered. Message_ *string `locationName:"message" type:"string"` @@ -6652,17 +6876,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6670,22 +6894,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a CORS configuration. Supported only for HTTP APIs. See Configuring @@ -6794,6 +7018,8 @@ type CreateApiInput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` // A string with a length between [1-128]. @@ -6883,6 +7109,12 @@ func (s *CreateApiInput) SetDescription(v string) *CreateApiInput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *CreateApiInput) SetDisableExecuteApiEndpoint(v bool) *CreateApiInput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *CreateApiInput) SetDisableSchemaValidation(v bool) *CreateApiInput { s.DisableSchemaValidation = &v @@ -7070,6 +7302,8 @@ type CreateApiOutput struct { ApiEndpoint *string `locationName:"apiEndpoint" type:"string"` + ApiGatewayManaged *bool `locationName:"apiGatewayManaged" type:"boolean"` + // The identifier. ApiId *string `locationName:"apiId" type:"string"` @@ -7088,6 +7322,8 @@ type CreateApiOutput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` ImportInfo []*string `locationName:"importInfo" type:"list"` @@ -7128,6 +7364,12 @@ func (s *CreateApiOutput) SetApiEndpoint(v string) *CreateApiOutput { return s } +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *CreateApiOutput) SetApiGatewayManaged(v bool) *CreateApiOutput { + s.ApiGatewayManaged = &v + return s +} + // SetApiId sets the ApiId field's value. func (s *CreateApiOutput) SetApiId(v string) *CreateApiOutput { s.ApiId = &v @@ -7158,6 +7400,12 @@ func (s *CreateApiOutput) SetDescription(v string) *CreateApiOutput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *CreateApiOutput) SetDisableExecuteApiEndpoint(v bool) *CreateApiOutput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *CreateApiOutput) SetDisableSchemaValidation(v bool) *CreateApiOutput { s.DisableSchemaValidation = &v @@ -7215,12 +7463,15 @@ type CreateAuthorizerInput struct { // Represents an Amazon Resource Name (ARN). AuthorizerCredentialsArn *string `locationName:"authorizerCredentialsArn" type:"string"` + // A string with a length between [1-64]. + AuthorizerPayloadFormatVersion *string `locationName:"authorizerPayloadFormatVersion" type:"string"` + // An integer with a value between [0-3600]. AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` - // The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function - // using incoming request parameters. For HTTP APIs, specify JWT to use JSON - // Web Tokens. + // The authorizer type. Specify REQUEST for a Lambda function using incoming + // request parameters. Specify JWT to use JSON Web Tokens (supported only for + // HTTP APIs). // // AuthorizerType is a required field AuthorizerType *string `locationName:"authorizerType" type:"string" required:"true" enum:"AuthorizerType"` @@ -7228,6 +7479,8 @@ type CreateAuthorizerInput struct { // A string representation of a URI with a length between [1-2048]. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + EnableSimpleResponses *bool `locationName:"enableSimpleResponses" type:"boolean"` + // The identity source for which authorization is requested. For the REQUEST // authorizer, this is required when authorization caching is enabled. The value // is a comma-separated string of one or more mapping expressions of the specified @@ -7305,6 +7558,12 @@ func (s *CreateAuthorizerInput) SetAuthorizerCredentialsArn(v string) *CreateAut return s } +// SetAuthorizerPayloadFormatVersion sets the AuthorizerPayloadFormatVersion field's value. +func (s *CreateAuthorizerInput) SetAuthorizerPayloadFormatVersion(v string) *CreateAuthorizerInput { + s.AuthorizerPayloadFormatVersion = &v + return s +} + // SetAuthorizerResultTtlInSeconds sets the AuthorizerResultTtlInSeconds field's value. func (s *CreateAuthorizerInput) SetAuthorizerResultTtlInSeconds(v int64) *CreateAuthorizerInput { s.AuthorizerResultTtlInSeconds = &v @@ -7323,6 +7582,12 @@ func (s *CreateAuthorizerInput) SetAuthorizerUri(v string) *CreateAuthorizerInpu return s } +// SetEnableSimpleResponses sets the EnableSimpleResponses field's value. +func (s *CreateAuthorizerInput) SetEnableSimpleResponses(v bool) *CreateAuthorizerInput { + s.EnableSimpleResponses = &v + return s +} + // SetIdentitySource sets the IdentitySource field's value. func (s *CreateAuthorizerInput) SetIdentitySource(v []*string) *CreateAuthorizerInput { s.IdentitySource = v @@ -7356,17 +7621,22 @@ type CreateAuthorizerOutput struct { // The identifier. AuthorizerId *string `locationName:"authorizerId" type:"string"` + // A string with a length between [1-64]. + AuthorizerPayloadFormatVersion *string `locationName:"authorizerPayloadFormatVersion" type:"string"` + // An integer with a value between [0-3600]. AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` - // The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function - // using incoming request parameters. For HTTP APIs, specify JWT to use JSON - // Web Tokens. + // The authorizer type. Specify REQUEST for a Lambda function using incoming + // request parameters. Specify JWT to use JSON Web Tokens (supported only for + // HTTP APIs). AuthorizerType *string `locationName:"authorizerType" type:"string" enum:"AuthorizerType"` // A string representation of a URI with a length between [1-2048]. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + EnableSimpleResponses *bool `locationName:"enableSimpleResponses" type:"boolean"` + // The identity source for which authorization is requested. For the REQUEST // authorizer, this is required when authorization caching is enabled. The value // is a comma-separated string of one or more mapping expressions of the specified @@ -7415,6 +7685,12 @@ func (s *CreateAuthorizerOutput) SetAuthorizerId(v string) *CreateAuthorizerOutp return s } +// SetAuthorizerPayloadFormatVersion sets the AuthorizerPayloadFormatVersion field's value. +func (s *CreateAuthorizerOutput) SetAuthorizerPayloadFormatVersion(v string) *CreateAuthorizerOutput { + s.AuthorizerPayloadFormatVersion = &v + return s +} + // SetAuthorizerResultTtlInSeconds sets the AuthorizerResultTtlInSeconds field's value. func (s *CreateAuthorizerOutput) SetAuthorizerResultTtlInSeconds(v int64) *CreateAuthorizerOutput { s.AuthorizerResultTtlInSeconds = &v @@ -7433,6 +7709,12 @@ func (s *CreateAuthorizerOutput) SetAuthorizerUri(v string) *CreateAuthorizerOut return s } +// SetEnableSimpleResponses sets the EnableSimpleResponses field's value. +func (s *CreateAuthorizerOutput) SetEnableSimpleResponses(v bool) *CreateAuthorizerOutput { + s.EnableSimpleResponses = &v + return s +} + // SetIdentitySource sets the IdentitySource field's value. func (s *CreateAuthorizerOutput) SetIdentitySource(v []*string) *CreateAuthorizerOutput { s.IdentitySource = v @@ -7590,6 +7872,11 @@ type CreateDomainNameInput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + // If specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // API. + MutualTlsAuthentication *MutualTlsAuthenticationInput `locationName:"mutualTlsAuthentication" type:"structure"` + // Represents a collection of tags associated with the resource. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -7629,6 +7916,12 @@ func (s *CreateDomainNameInput) SetDomainNameConfigurations(v []*DomainNameConfi return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *CreateDomainNameInput) SetMutualTlsAuthentication(v *MutualTlsAuthenticationInput) *CreateDomainNameInput { + s.MutualTlsAuthentication = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateDomainNameInput) SetTags(v map[string]*string) *CreateDomainNameInput { s.Tags = v @@ -7649,6 +7942,11 @@ type CreateDomainNameOutput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + // If specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // API. + MutualTlsAuthentication *MutualTlsAuthentication `locationName:"mutualTlsAuthentication" type:"structure"` + // Represents a collection of tags associated with the resource. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -7681,6 +7979,12 @@ func (s *CreateDomainNameOutput) SetDomainNameConfigurations(v []*DomainNameConf return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *CreateDomainNameOutput) SetMutualTlsAuthentication(v *MutualTlsAuthentication) *CreateDomainNameOutput { + s.MutualTlsAuthentication = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateDomainNameOutput) SetTags(v map[string]*string) *CreateDomainNameOutput { s.Tags = v @@ -7712,6 +8016,9 @@ type CreateIntegrationInput struct { // A string with a length between [1-64]. IntegrationMethod *string `locationName:"integrationMethod" type:"string"` + // A string with a length between [1-128]. + IntegrationSubtype *string `locationName:"integrationSubtype" type:"string"` + // Represents an API method integration type. // // IntegrationType is a required field @@ -7749,7 +8056,7 @@ type CreateIntegrationInput struct { // for more information. TemplateSelectionExpression *string `locationName:"templateSelectionExpression" type:"string"` - // An integer with a value between [50-29000]. + // An integer with a value between [50-30000]. TimeoutInMillis *int64 `locationName:"timeoutInMillis" min:"50" type:"integer"` // The TLS configuration for a private integration. If you specify a TLS configuration, @@ -7832,6 +8139,12 @@ func (s *CreateIntegrationInput) SetIntegrationMethod(v string) *CreateIntegrati return s } +// SetIntegrationSubtype sets the IntegrationSubtype field's value. +func (s *CreateIntegrationInput) SetIntegrationSubtype(v string) *CreateIntegrationInput { + s.IntegrationSubtype = &v + return s +} + // SetIntegrationType sets the IntegrationType field's value. func (s *CreateIntegrationInput) SetIntegrationType(v string) *CreateIntegrationInput { s.IntegrationType = &v @@ -7918,6 +8231,9 @@ type CreateIntegrationOutput struct { // for more information. IntegrationResponseSelectionExpression *string `locationName:"integrationResponseSelectionExpression" type:"string"` + // A string with a length between [1-128]. + IntegrationSubtype *string `locationName:"integrationSubtype" type:"string"` + // Represents an API method integration type. IntegrationType *string `locationName:"integrationType" type:"string" enum:"IntegrationType"` @@ -7953,7 +8269,7 @@ type CreateIntegrationOutput struct { // for more information. TemplateSelectionExpression *string `locationName:"templateSelectionExpression" type:"string"` - // An integer with a value between [50-29000]. + // An integer with a value between [50-30000]. TimeoutInMillis *int64 `locationName:"timeoutInMillis" min:"50" type:"integer"` // The TLS configuration for a private integration. If you specify a TLS configuration, @@ -8026,6 +8342,12 @@ func (s *CreateIntegrationOutput) SetIntegrationResponseSelectionExpression(v st return s } +// SetIntegrationSubtype sets the IntegrationSubtype field's value. +func (s *CreateIntegrationOutput) SetIntegrationSubtype(v string) *CreateIntegrationOutput { + s.IntegrationSubtype = &v + return s +} + // SetIntegrationType sets the IntegrationType field's value. func (s *CreateIntegrationOutput) SetIntegrationType(v string) *CreateIntegrationOutput { s.IntegrationType = &v @@ -8451,8 +8773,9 @@ type CreateRouteInput struct { // The authorization type. For WebSocket APIs, valid values are NONE for open // access, AWS_IAM for using AWS IAM permissions, and CUSTOM for using a Lambda - // authorizer. For HTTP APIs, valid values are NONE for open access, or JWT - // for using JSON Web Tokens. + // authorizer. For HTTP APIs, valid values are NONE for open access, JWT for + // using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and CUSTOM + // for using a Lambda authorizer. AuthorizationType *string `locationName:"authorizationType" type:"string" enum:"AuthorizationType"` // The identifier. @@ -8609,8 +8932,9 @@ type CreateRouteOutput struct { // The authorization type. For WebSocket APIs, valid values are NONE for open // access, AWS_IAM for using AWS IAM permissions, and CUSTOM for using a Lambda - // authorizer. For HTTP APIs, valid values are NONE for open access, or JWT - // for using JSON Web Tokens. + // authorizer. For HTTP APIs, valid values are NONE for open access, JWT for + // using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and CUSTOM + // for using a Lambda authorizer. AuthorizationType *string `locationName:"authorizationType" type:"string" enum:"AuthorizationType"` // The identifier. @@ -10508,6 +10832,9 @@ type DomainName struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + // The mutual TLS authentication configuration for a custom domain name. + MutualTlsAuthentication *MutualTlsAuthentication `locationName:"mutualTlsAuthentication" type:"structure"` + // The collection of tags associated with a domain name. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -10540,6 +10867,12 @@ func (s *DomainName) SetDomainNameConfigurations(v []*DomainNameConfiguration) * return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *DomainName) SetMutualTlsAuthentication(v *MutualTlsAuthentication) *DomainName { + s.MutualTlsAuthentication = v + return s +} + // SetTags sets the Tags field's value. func (s *DomainName) SetTags(v map[string]*string) *DomainName { s.Tags = v @@ -10650,32 +10983,53 @@ func (s *DomainNameConfiguration) SetSecurityPolicy(v string) *DomainNameConfigu return s } -type GetApiInput struct { +type ExportApiInput struct { _ struct{} `type:"structure"` // ApiId is a required field ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` + + ExportVersion *string `location:"querystring" locationName:"exportVersion" type:"string"` + + IncludeExtensions *bool `location:"querystring" locationName:"includeExtensions" type:"boolean"` + + // OutputType is a required field + OutputType *string `location:"querystring" locationName:"outputType" type:"string" required:"true"` + + // Specification is a required field + Specification *string `location:"uri" locationName:"specification" type:"string" required:"true"` + + StageName *string `location:"querystring" locationName:"stageName" type:"string"` } // String returns the string representation -func (s GetApiInput) String() string { +func (s ExportApiInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApiInput) GoString() string { +func (s ExportApiInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetApiInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApiInput"} +func (s *ExportApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportApiInput"} if s.ApiId == nil { invalidParams.Add(request.NewErrParamRequired("ApiId")) } if s.ApiId != nil && len(*s.ApiId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ApiId", 1)) } + if s.OutputType == nil { + invalidParams.Add(request.NewErrParamRequired("OutputType")) + } + if s.Specification == nil { + invalidParams.Add(request.NewErrParamRequired("Specification")) + } + if s.Specification != nil && len(*s.Specification) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Specification", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10684,46 +11038,140 @@ func (s *GetApiInput) Validate() error { } // SetApiId sets the ApiId field's value. -func (s *GetApiInput) SetApiId(v string) *GetApiInput { +func (s *ExportApiInput) SetApiId(v string) *ExportApiInput { s.ApiId = &v return s } -type GetApiMappingInput struct { - _ struct{} `type:"structure"` +// SetExportVersion sets the ExportVersion field's value. +func (s *ExportApiInput) SetExportVersion(v string) *ExportApiInput { + s.ExportVersion = &v + return s +} - // ApiMappingId is a required field - ApiMappingId *string `location:"uri" locationName:"apiMappingId" type:"string" required:"true"` +// SetIncludeExtensions sets the IncludeExtensions field's value. +func (s *ExportApiInput) SetIncludeExtensions(v bool) *ExportApiInput { + s.IncludeExtensions = &v + return s +} - // DomainName is a required field - DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` +// SetOutputType sets the OutputType field's value. +func (s *ExportApiInput) SetOutputType(v string) *ExportApiInput { + s.OutputType = &v + return s +} + +// SetSpecification sets the Specification field's value. +func (s *ExportApiInput) SetSpecification(v string) *ExportApiInput { + s.Specification = &v + return s +} + +// SetStageName sets the StageName field's value. +func (s *ExportApiInput) SetStageName(v string) *ExportApiInput { + s.StageName = &v + return s +} + +type ExportApiOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Represents an exported definition of an API in a particular output format, + // for example, YAML. The API is serialized to the requested specification, + // for example, OpenAPI 3.0. + Body []byte `locationName:"body" type:"blob"` } // String returns the string representation -func (s GetApiMappingInput) String() string { +func (s ExportApiOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetApiMappingInput) GoString() string { +func (s ExportApiOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetApiMappingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetApiMappingInput"} - if s.ApiMappingId == nil { - invalidParams.Add(request.NewErrParamRequired("ApiMappingId")) - } - if s.ApiMappingId != nil && len(*s.ApiMappingId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ApiMappingId", 1)) - } - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) - } - if s.DomainName != nil && len(*s.DomainName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) - } +// SetBody sets the Body field's value. +func (s *ExportApiOutput) SetBody(v []byte) *ExportApiOutput { + s.Body = v + return s +} + +type GetApiInput struct { + _ struct{} `type:"structure"` + + // ApiId is a required field + ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApiInput"} + if s.ApiId == nil { + invalidParams.Add(request.NewErrParamRequired("ApiId")) + } + if s.ApiId != nil && len(*s.ApiId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApiId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApiId sets the ApiId field's value. +func (s *GetApiInput) SetApiId(v string) *GetApiInput { + s.ApiId = &v + return s +} + +type GetApiMappingInput struct { + _ struct{} `type:"structure"` + + // ApiMappingId is a required field + ApiMappingId *string `location:"uri" locationName:"apiMappingId" type:"string" required:"true"` + + // DomainName is a required field + DomainName *string `location:"uri" locationName:"domainName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetApiMappingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetApiMappingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetApiMappingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetApiMappingInput"} + if s.ApiMappingId == nil { + invalidParams.Add(request.NewErrParamRequired("ApiMappingId")) + } + if s.ApiMappingId != nil && len(*s.ApiMappingId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApiMappingId", 1)) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10889,6 +11337,8 @@ type GetApiOutput struct { ApiEndpoint *string `locationName:"apiEndpoint" type:"string"` + ApiGatewayManaged *bool `locationName:"apiGatewayManaged" type:"boolean"` + // The identifier. ApiId *string `locationName:"apiId" type:"string"` @@ -10907,6 +11357,8 @@ type GetApiOutput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` ImportInfo []*string `locationName:"importInfo" type:"list"` @@ -10947,6 +11399,12 @@ func (s *GetApiOutput) SetApiEndpoint(v string) *GetApiOutput { return s } +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *GetApiOutput) SetApiGatewayManaged(v bool) *GetApiOutput { + s.ApiGatewayManaged = &v + return s +} + // SetApiId sets the ApiId field's value. func (s *GetApiOutput) SetApiId(v string) *GetApiOutput { s.ApiId = &v @@ -10977,6 +11435,12 @@ func (s *GetApiOutput) SetDescription(v string) *GetApiOutput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *GetApiOutput) SetDisableExecuteApiEndpoint(v bool) *GetApiOutput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *GetApiOutput) SetDisableSchemaValidation(v bool) *GetApiOutput { s.DisableSchemaValidation = &v @@ -11150,17 +11614,22 @@ type GetAuthorizerOutput struct { // The identifier. AuthorizerId *string `locationName:"authorizerId" type:"string"` + // A string with a length between [1-64]. + AuthorizerPayloadFormatVersion *string `locationName:"authorizerPayloadFormatVersion" type:"string"` + // An integer with a value between [0-3600]. AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` - // The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function - // using incoming request parameters. For HTTP APIs, specify JWT to use JSON - // Web Tokens. + // The authorizer type. Specify REQUEST for a Lambda function using incoming + // request parameters. Specify JWT to use JSON Web Tokens (supported only for + // HTTP APIs). AuthorizerType *string `locationName:"authorizerType" type:"string" enum:"AuthorizerType"` // A string representation of a URI with a length between [1-2048]. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + EnableSimpleResponses *bool `locationName:"enableSimpleResponses" type:"boolean"` + // The identity source for which authorization is requested. For the REQUEST // authorizer, this is required when authorization caching is enabled. The value // is a comma-separated string of one or more mapping expressions of the specified @@ -11209,6 +11678,12 @@ func (s *GetAuthorizerOutput) SetAuthorizerId(v string) *GetAuthorizerOutput { return s } +// SetAuthorizerPayloadFormatVersion sets the AuthorizerPayloadFormatVersion field's value. +func (s *GetAuthorizerOutput) SetAuthorizerPayloadFormatVersion(v string) *GetAuthorizerOutput { + s.AuthorizerPayloadFormatVersion = &v + return s +} + // SetAuthorizerResultTtlInSeconds sets the AuthorizerResultTtlInSeconds field's value. func (s *GetAuthorizerOutput) SetAuthorizerResultTtlInSeconds(v int64) *GetAuthorizerOutput { s.AuthorizerResultTtlInSeconds = &v @@ -11227,6 +11702,12 @@ func (s *GetAuthorizerOutput) SetAuthorizerUri(v string) *GetAuthorizerOutput { return s } +// SetEnableSimpleResponses sets the EnableSimpleResponses field's value. +func (s *GetAuthorizerOutput) SetEnableSimpleResponses(v bool) *GetAuthorizerOutput { + s.EnableSimpleResponses = &v + return s +} + // SetIdentitySource sets the IdentitySource field's value. func (s *GetAuthorizerOutput) SetIdentitySource(v []*string) *GetAuthorizerOutput { s.IdentitySource = v @@ -11597,6 +12078,11 @@ type GetDomainNameOutput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + // If specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // API. + MutualTlsAuthentication *MutualTlsAuthentication `locationName:"mutualTlsAuthentication" type:"structure"` + // Represents a collection of tags associated with the resource. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -11629,6 +12115,12 @@ func (s *GetDomainNameOutput) SetDomainNameConfigurations(v []*DomainNameConfigu return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *GetDomainNameOutput) SetMutualTlsAuthentication(v *MutualTlsAuthentication) *GetDomainNameOutput { + s.MutualTlsAuthentication = v + return s +} + // SetTags sets the Tags field's value. func (s *GetDomainNameOutput) SetTags(v map[string]*string) *GetDomainNameOutput { s.Tags = v @@ -11783,6 +12275,9 @@ type GetIntegrationOutput struct { // for more information. IntegrationResponseSelectionExpression *string `locationName:"integrationResponseSelectionExpression" type:"string"` + // A string with a length between [1-128]. + IntegrationSubtype *string `locationName:"integrationSubtype" type:"string"` + // Represents an API method integration type. IntegrationType *string `locationName:"integrationType" type:"string" enum:"IntegrationType"` @@ -11818,7 +12313,7 @@ type GetIntegrationOutput struct { // for more information. TemplateSelectionExpression *string `locationName:"templateSelectionExpression" type:"string"` - // An integer with a value between [50-29000]. + // An integer with a value between [50-30000]. TimeoutInMillis *int64 `locationName:"timeoutInMillis" min:"50" type:"integer"` // The TLS configuration for a private integration. If you specify a TLS configuration, @@ -11891,6 +12386,12 @@ func (s *GetIntegrationOutput) SetIntegrationResponseSelectionExpression(v strin return s } +// SetIntegrationSubtype sets the IntegrationSubtype field's value. +func (s *GetIntegrationOutput) SetIntegrationSubtype(v string) *GetIntegrationOutput { + s.IntegrationSubtype = &v + return s +} + // SetIntegrationType sets the IntegrationType field's value. func (s *GetIntegrationOutput) SetIntegrationType(v string) *GetIntegrationOutput { s.IntegrationType = &v @@ -12637,8 +13138,9 @@ type GetRouteOutput struct { // The authorization type. For WebSocket APIs, valid values are NONE for open // access, AWS_IAM for using AWS IAM permissions, and CUSTOM for using a Lambda - // authorizer. For HTTP APIs, valid values are NONE for open access, or JWT - // for using JSON Web Tokens. + // authorizer. For HTTP APIs, valid values are NONE for open access, JWT for + // using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and CUSTOM + // for using a Lambda authorizer. AuthorizationType *string `locationName:"authorizationType" type:"string" enum:"AuthorizationType"` // The identifier. @@ -13678,6 +14180,8 @@ type ImportApiOutput struct { ApiEndpoint *string `locationName:"apiEndpoint" type:"string"` + ApiGatewayManaged *bool `locationName:"apiGatewayManaged" type:"boolean"` + // The identifier. ApiId *string `locationName:"apiId" type:"string"` @@ -13696,6 +14200,8 @@ type ImportApiOutput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` ImportInfo []*string `locationName:"importInfo" type:"list"` @@ -13736,6 +14242,12 @@ func (s *ImportApiOutput) SetApiEndpoint(v string) *ImportApiOutput { return s } +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *ImportApiOutput) SetApiGatewayManaged(v bool) *ImportApiOutput { + s.ApiGatewayManaged = &v + return s +} + // SetApiId sets the ApiId field's value. func (s *ImportApiOutput) SetApiId(v string) *ImportApiOutput { s.ApiId = &v @@ -13766,6 +14278,12 @@ func (s *ImportApiOutput) SetDescription(v string) *ImportApiOutput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *ImportApiOutput) SetDisableExecuteApiEndpoint(v bool) *ImportApiOutput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *ImportApiOutput) SetDisableSchemaValidation(v bool) *ImportApiOutput { s.DisableSchemaValidation = &v @@ -13868,6 +14386,10 @@ type Integration struct { // only for WebSocket APIs. See Integration Response Selection Expressions (https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-websocket-api-selection-expressions.html#apigateway-websocket-api-integration-response-selection-expressions). IntegrationResponseSelectionExpression *string `locationName:"integrationResponseSelectionExpression" type:"string"` + // Supported only for HTTP API AWS_PROXY integrations. Specifies the AWS service + // action to invoke. To learn more, see Integration subtype reference (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-aws-services-reference.html). + IntegrationSubtype *string `locationName:"integrationSubtype" type:"string"` + // The integration type of an integration. One of the following: // // AWS: for integrating the route or method request with an AWS service action, @@ -13876,9 +14398,9 @@ type Integration struct { // AWS service action, this is known as AWS integration. Supported only for // WebSocket APIs. // - // AWS_PROXY: for integrating the route or method request with the Lambda function-invoking - // action with the client request passed through as-is. This integration is - // also referred to as Lambda proxy integration. + // AWS_PROXY: for integrating the route or method request with a Lambda function + // or other AWS service action. This integration is also referred to as a Lambda + // proxy integration. // // HTTP: for integrating the route or method request with an HTTP endpoint. // This integration is also referred to as the HTTP custom integration. Supported @@ -13900,7 +14422,7 @@ type Integration struct { // Balancer listener, Network Load Balancer listener, or AWS Cloud Map service. // If you specify the ARN of an AWS Cloud Map service, API Gateway uses DiscoverInstances // to identify resources. You can use query parameters to target specific resources. - // To learn more, see DiscoverInstances (https://alpha-docs-aws.amazon.com/cloud-map/latest/api/API_DiscoverInstances.html). + // To learn more, see DiscoverInstances (https://docs.aws.amazon.com/cloud-map/latest/api/API_DiscoverInstances.html). // For private integrations, all resources must be owned by the same AWS account. IntegrationUri *string `locationName:"integrationUri" type:"string"` @@ -13926,14 +14448,20 @@ type Integration struct { // HTTP APIs. PayloadFormatVersion *string `locationName:"payloadFormatVersion" type:"string"` - // A key-value map specifying request parameters that are passed from the method - // request to the backend. The key is an integration request parameter name - // and the associated value is a method request parameter value or static value - // that must be enclosed within single quotes and pre-encoded as required by - // the backend. The method request parameter value must match the pattern of - // method.request.{location}.{name} , where {location} is querystring, path, - // or header; and {name} must be a valid and unique method request parameter - // name. Supported only for WebSocket APIs. + // For WebSocket APIs, a key-value map specifying request parameters that are + // passed from the method request to the backend. The key is an integration + // request parameter name and the associated value is a method request parameter + // value or static value that must be enclosed within single quotes and pre-encoded + // as required by the backend. The method request parameter value must match + // the pattern of method.request.{location}.{name} , where {location} is querystring, + // path, or header; and {name} must be a valid and unique method request parameter + // name. + // + // For HTTP APIs, request parameters are a key-value map specifying parameters + // that are passed to AWS_PROXY integrations with a specified integrationSubtype. + // You can provide static values, or map request data, stage variables, or context + // variables that are evaluated at runtime. To learn more, see Working with + // AWS service integrations for HTTP APIs (https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-aws-services.html). RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` // Represents a map of Velocity templates that are applied on the request payload @@ -13946,9 +14474,9 @@ type Integration struct { // WebSocket APIs. TemplateSelectionExpression *string `locationName:"templateSelectionExpression" type:"string"` - // Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 - // milliseconds or 29 seconds for WebSocket APIs. The default value is 5,000 - // milliseconds, or 5 seconds for HTTP APIs. + // Custom timeout between 50 and 29,000 milliseconds for WebSocket APIs and + // between 50 and 30,000 milliseconds for HTTP APIs. The default timeout is + // 29 seconds for WebSocket APIs and 30 seconds for HTTP APIs. TimeoutInMillis *int64 `locationName:"timeoutInMillis" min:"50" type:"integer"` // The TLS configuration for a private integration. If you specify a TLS configuration, @@ -14021,6 +14549,12 @@ func (s *Integration) SetIntegrationResponseSelectionExpression(v string) *Integ return s } +// SetIntegrationSubtype sets the IntegrationSubtype field's value. +func (s *Integration) SetIntegrationSubtype(v string) *Integration { + s.IntegrationSubtype = &v + return s +} + // SetIntegrationType sets the IntegrationType field's value. func (s *Integration) SetIntegrationType(v string) *Integration { s.IntegrationType = &v @@ -14271,11 +14805,105 @@ func (s *Model) SetSchema(v string) *Model { return s } +// If specified, API Gateway performs two-way authentication between the client +// and the server. Clients must present a trusted certificate to access your +// API. +type MutualTlsAuthentication struct { + _ struct{} `type:"structure"` + + // An Amazon S3 URL that specifies the truststore for mutual TLS authentication, + // for example, s3://bucket-name/key-name. The truststore can contain certificates + // from public or private certificate authorities. To update the truststore, + // upload a new version to S3, and then update your custom domain name to use + // the new version. To update the truststore, you must have permissions to access + // the S3 object. + TruststoreUri *string `locationName:"truststoreUri" type:"string"` + + // The version of the S3 object that contains your truststore. To specify a + // version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `locationName:"truststoreVersion" type:"string"` + + // A list of warnings that API Gateway returns while processing your truststore. + // Invalid certificates produce warnings. Mutual TLS is still enabled, but some + // clients might not be able to access your API. To resolve warnings, upload + // a new truststore to S3, and then update you domain name to use the new version. + TruststoreWarnings []*string `locationName:"truststoreWarnings" type:"list"` +} + +// String returns the string representation +func (s MutualTlsAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MutualTlsAuthentication) GoString() string { + return s.String() +} + +// SetTruststoreUri sets the TruststoreUri field's value. +func (s *MutualTlsAuthentication) SetTruststoreUri(v string) *MutualTlsAuthentication { + s.TruststoreUri = &v + return s +} + +// SetTruststoreVersion sets the TruststoreVersion field's value. +func (s *MutualTlsAuthentication) SetTruststoreVersion(v string) *MutualTlsAuthentication { + s.TruststoreVersion = &v + return s +} + +// SetTruststoreWarnings sets the TruststoreWarnings field's value. +func (s *MutualTlsAuthentication) SetTruststoreWarnings(v []*string) *MutualTlsAuthentication { + s.TruststoreWarnings = v + return s +} + +// If specified, API Gateway performs two-way authentication between the client +// and the server. Clients must present a trusted certificate to access your +// API. +type MutualTlsAuthenticationInput struct { + _ struct{} `type:"structure"` + + // An Amazon S3 URL that specifies the truststore for mutual TLS authentication, + // for example, s3://bucket-name/key-name. The truststore can contain certificates + // from public or private certificate authorities. To update the truststore, + // upload a new version to S3, and then update your custom domain name to use + // the new version. To update the truststore, you must have permissions to access + // the S3 object. + TruststoreUri *string `locationName:"truststoreUri" type:"string"` + + // The version of the S3 object that contains your truststore. To specify a + // version, you must have versioning enabled for the S3 bucket. + TruststoreVersion *string `locationName:"truststoreVersion" type:"string"` +} + +// String returns the string representation +func (s MutualTlsAuthenticationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MutualTlsAuthenticationInput) GoString() string { + return s.String() +} + +// SetTruststoreUri sets the TruststoreUri field's value. +func (s *MutualTlsAuthenticationInput) SetTruststoreUri(v string) *MutualTlsAuthenticationInput { + s.TruststoreUri = &v + return s +} + +// SetTruststoreVersion sets the TruststoreVersion field's value. +func (s *MutualTlsAuthenticationInput) SetTruststoreVersion(v string) *MutualTlsAuthenticationInput { + s.TruststoreVersion = &v + return s +} + // The resource specified in the request was not found. See the message field // for more information. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Describes the error encountered. Message_ *string `locationName:"message" type:"string"` @@ -14296,17 +14924,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14314,22 +14942,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Validation constraints imposed on parameters of a request (path, query string, @@ -14429,6 +15057,8 @@ type ReimportApiOutput struct { ApiEndpoint *string `locationName:"apiEndpoint" type:"string"` + ApiGatewayManaged *bool `locationName:"apiGatewayManaged" type:"boolean"` + // The identifier. ApiId *string `locationName:"apiId" type:"string"` @@ -14447,6 +15077,8 @@ type ReimportApiOutput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` ImportInfo []*string `locationName:"importInfo" type:"list"` @@ -14487,6 +15119,12 @@ func (s *ReimportApiOutput) SetApiEndpoint(v string) *ReimportApiOutput { return s } +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *ReimportApiOutput) SetApiGatewayManaged(v bool) *ReimportApiOutput { + s.ApiGatewayManaged = &v + return s +} + // SetApiId sets the ApiId field's value. func (s *ReimportApiOutput) SetApiId(v string) *ReimportApiOutput { s.ApiId = &v @@ -14517,6 +15155,12 @@ func (s *ReimportApiOutput) SetDescription(v string) *ReimportApiOutput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *ReimportApiOutput) SetDisableExecuteApiEndpoint(v bool) *ReimportApiOutput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *ReimportApiOutput) SetDisableSchemaValidation(v bool) *ReimportApiOutput { s.DisableSchemaValidation = &v @@ -14565,6 +15209,74 @@ func (s *ReimportApiOutput) SetWarnings(v []*string) *ReimportApiOutput { return s } +type ResetAuthorizersCacheInput struct { + _ struct{} `type:"structure"` + + // ApiId is a required field + ApiId *string `location:"uri" locationName:"apiId" type:"string" required:"true"` + + // StageName is a required field + StageName *string `location:"uri" locationName:"stageName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResetAuthorizersCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetAuthorizersCacheInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetAuthorizersCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetAuthorizersCacheInput"} + if s.ApiId == nil { + invalidParams.Add(request.NewErrParamRequired("ApiId")) + } + if s.ApiId != nil && len(*s.ApiId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ApiId", 1)) + } + if s.StageName == nil { + invalidParams.Add(request.NewErrParamRequired("StageName")) + } + if s.StageName != nil && len(*s.StageName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StageName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApiId sets the ApiId field's value. +func (s *ResetAuthorizersCacheInput) SetApiId(v string) *ResetAuthorizersCacheInput { + s.ApiId = &v + return s +} + +// SetStageName sets the StageName field's value. +func (s *ResetAuthorizersCacheInput) SetStageName(v string) *ResetAuthorizersCacheInput { + s.StageName = &v + return s +} + +type ResetAuthorizersCacheOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResetAuthorizersCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetAuthorizersCacheOutput) GoString() string { + return s.String() +} + // Represents a route. type Route struct { _ struct{} `type:"structure"` @@ -14590,7 +15302,8 @@ type Route struct { // The authorization type for the route. For WebSocket APIs, valid values are // NONE for open access, AWS_IAM for using AWS IAM permissions, and CUSTOM for // using a Lambda authorizer For HTTP APIs, valid values are NONE for open access, - // or JWT for using JSON Web Tokens. + // JWT for using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and + // CUSTOM for using a Lambda authorizer. AuthorizationType *string `locationName:"authorizationType" type:"string" enum:"AuthorizationType"` // The identifier of the Authorizer resource to be associated with this route. @@ -15113,8 +15826,8 @@ func (s *TlsConfigInput) SetServerNameToVerify(v string) *TlsConfigInput { // A limit has been exceeded. See the accompanying error message for details. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` LimitType *string `locationName:"limitType" type:"string"` @@ -15133,17 +15846,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15151,22 +15864,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -15256,6 +15969,8 @@ type UpdateApiInput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` // A string with a length between [1-128]. @@ -15341,6 +16056,12 @@ func (s *UpdateApiInput) SetDescription(v string) *UpdateApiInput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *UpdateApiInput) SetDisableExecuteApiEndpoint(v bool) *UpdateApiInput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *UpdateApiInput) SetDisableSchemaValidation(v bool) *UpdateApiInput { s.DisableSchemaValidation = &v @@ -15526,6 +16247,8 @@ type UpdateApiOutput struct { ApiEndpoint *string `locationName:"apiEndpoint" type:"string"` + ApiGatewayManaged *bool `locationName:"apiGatewayManaged" type:"boolean"` + // The identifier. ApiId *string `locationName:"apiId" type:"string"` @@ -15544,6 +16267,8 @@ type UpdateApiOutput struct { // A string with a length between [0-1024]. Description *string `locationName:"description" type:"string"` + DisableExecuteApiEndpoint *bool `locationName:"disableExecuteApiEndpoint" type:"boolean"` + DisableSchemaValidation *bool `locationName:"disableSchemaValidation" type:"boolean"` ImportInfo []*string `locationName:"importInfo" type:"list"` @@ -15584,6 +16309,12 @@ func (s *UpdateApiOutput) SetApiEndpoint(v string) *UpdateApiOutput { return s } +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *UpdateApiOutput) SetApiGatewayManaged(v bool) *UpdateApiOutput { + s.ApiGatewayManaged = &v + return s +} + // SetApiId sets the ApiId field's value. func (s *UpdateApiOutput) SetApiId(v string) *UpdateApiOutput { s.ApiId = &v @@ -15614,6 +16345,12 @@ func (s *UpdateApiOutput) SetDescription(v string) *UpdateApiOutput { return s } +// SetDisableExecuteApiEndpoint sets the DisableExecuteApiEndpoint field's value. +func (s *UpdateApiOutput) SetDisableExecuteApiEndpoint(v bool) *UpdateApiOutput { + s.DisableExecuteApiEndpoint = &v + return s +} + // SetDisableSchemaValidation sets the DisableSchemaValidation field's value. func (s *UpdateApiOutput) SetDisableSchemaValidation(v bool) *UpdateApiOutput { s.DisableSchemaValidation = &v @@ -15674,17 +16411,22 @@ type UpdateAuthorizerInput struct { // AuthorizerId is a required field AuthorizerId *string `location:"uri" locationName:"authorizerId" type:"string" required:"true"` + // A string with a length between [1-64]. + AuthorizerPayloadFormatVersion *string `locationName:"authorizerPayloadFormatVersion" type:"string"` + // An integer with a value between [0-3600]. AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` - // The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function - // using incoming request parameters. For HTTP APIs, specify JWT to use JSON - // Web Tokens. + // The authorizer type. Specify REQUEST for a Lambda function using incoming + // request parameters. Specify JWT to use JSON Web Tokens (supported only for + // HTTP APIs). AuthorizerType *string `locationName:"authorizerType" type:"string" enum:"AuthorizerType"` // A string representation of a URI with a length between [1-2048]. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + EnableSimpleResponses *bool `locationName:"enableSimpleResponses" type:"boolean"` + // The identity source for which authorization is requested. For the REQUEST // authorizer, this is required when authorization caching is enabled. The value // is a comma-separated string of one or more mapping expressions of the specified @@ -15761,6 +16503,12 @@ func (s *UpdateAuthorizerInput) SetAuthorizerId(v string) *UpdateAuthorizerInput return s } +// SetAuthorizerPayloadFormatVersion sets the AuthorizerPayloadFormatVersion field's value. +func (s *UpdateAuthorizerInput) SetAuthorizerPayloadFormatVersion(v string) *UpdateAuthorizerInput { + s.AuthorizerPayloadFormatVersion = &v + return s +} + // SetAuthorizerResultTtlInSeconds sets the AuthorizerResultTtlInSeconds field's value. func (s *UpdateAuthorizerInput) SetAuthorizerResultTtlInSeconds(v int64) *UpdateAuthorizerInput { s.AuthorizerResultTtlInSeconds = &v @@ -15779,6 +16527,12 @@ func (s *UpdateAuthorizerInput) SetAuthorizerUri(v string) *UpdateAuthorizerInpu return s } +// SetEnableSimpleResponses sets the EnableSimpleResponses field's value. +func (s *UpdateAuthorizerInput) SetEnableSimpleResponses(v bool) *UpdateAuthorizerInput { + s.EnableSimpleResponses = &v + return s +} + // SetIdentitySource sets the IdentitySource field's value. func (s *UpdateAuthorizerInput) SetIdentitySource(v []*string) *UpdateAuthorizerInput { s.IdentitySource = v @@ -15812,17 +16566,22 @@ type UpdateAuthorizerOutput struct { // The identifier. AuthorizerId *string `locationName:"authorizerId" type:"string"` + // A string with a length between [1-64]. + AuthorizerPayloadFormatVersion *string `locationName:"authorizerPayloadFormatVersion" type:"string"` + // An integer with a value between [0-3600]. AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` - // The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function - // using incoming request parameters. For HTTP APIs, specify JWT to use JSON - // Web Tokens. + // The authorizer type. Specify REQUEST for a Lambda function using incoming + // request parameters. Specify JWT to use JSON Web Tokens (supported only for + // HTTP APIs). AuthorizerType *string `locationName:"authorizerType" type:"string" enum:"AuthorizerType"` // A string representation of a URI with a length between [1-2048]. AuthorizerUri *string `locationName:"authorizerUri" type:"string"` + EnableSimpleResponses *bool `locationName:"enableSimpleResponses" type:"boolean"` + // The identity source for which authorization is requested. For the REQUEST // authorizer, this is required when authorization caching is enabled. The value // is a comma-separated string of one or more mapping expressions of the specified @@ -15871,6 +16630,12 @@ func (s *UpdateAuthorizerOutput) SetAuthorizerId(v string) *UpdateAuthorizerOutp return s } +// SetAuthorizerPayloadFormatVersion sets the AuthorizerPayloadFormatVersion field's value. +func (s *UpdateAuthorizerOutput) SetAuthorizerPayloadFormatVersion(v string) *UpdateAuthorizerOutput { + s.AuthorizerPayloadFormatVersion = &v + return s +} + // SetAuthorizerResultTtlInSeconds sets the AuthorizerResultTtlInSeconds field's value. func (s *UpdateAuthorizerOutput) SetAuthorizerResultTtlInSeconds(v int64) *UpdateAuthorizerOutput { s.AuthorizerResultTtlInSeconds = &v @@ -15889,6 +16654,12 @@ func (s *UpdateAuthorizerOutput) SetAuthorizerUri(v string) *UpdateAuthorizerOut return s } +// SetEnableSimpleResponses sets the EnableSimpleResponses field's value. +func (s *UpdateAuthorizerOutput) SetEnableSimpleResponses(v bool) *UpdateAuthorizerOutput { + s.EnableSimpleResponses = &v + return s +} + // SetIdentitySource sets the IdentitySource field's value. func (s *UpdateAuthorizerOutput) SetIdentitySource(v []*string) *UpdateAuthorizerOutput { s.IdentitySource = v @@ -16049,6 +16820,11 @@ type UpdateDomainNameInput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + + // If specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // API. + MutualTlsAuthentication *MutualTlsAuthenticationInput `locationName:"mutualTlsAuthentication" type:"structure"` } // String returns the string representation @@ -16089,6 +16865,12 @@ func (s *UpdateDomainNameInput) SetDomainNameConfigurations(v []*DomainNameConfi return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *UpdateDomainNameInput) SetMutualTlsAuthentication(v *MutualTlsAuthenticationInput) *UpdateDomainNameInput { + s.MutualTlsAuthentication = v + return s +} + type UpdateDomainNameOutput struct { _ struct{} `type:"structure"` @@ -16103,6 +16885,11 @@ type UpdateDomainNameOutput struct { // The domain name configurations. DomainNameConfigurations []*DomainNameConfiguration `locationName:"domainNameConfigurations" type:"list"` + // If specified, API Gateway performs two-way authentication between the client + // and the server. Clients must present a trusted certificate to access your + // API. + MutualTlsAuthentication *MutualTlsAuthentication `locationName:"mutualTlsAuthentication" type:"structure"` + // Represents a collection of tags associated with the resource. Tags map[string]*string `locationName:"tags" type:"map"` } @@ -16135,6 +16922,12 @@ func (s *UpdateDomainNameOutput) SetDomainNameConfigurations(v []*DomainNameConf return s } +// SetMutualTlsAuthentication sets the MutualTlsAuthentication field's value. +func (s *UpdateDomainNameOutput) SetMutualTlsAuthentication(v *MutualTlsAuthentication) *UpdateDomainNameOutput { + s.MutualTlsAuthentication = v + return s +} + // SetTags sets the Tags field's value. func (s *UpdateDomainNameOutput) SetTags(v map[string]*string) *UpdateDomainNameOutput { s.Tags = v @@ -16169,6 +16962,9 @@ type UpdateIntegrationInput struct { // A string with a length between [1-64]. IntegrationMethod *string `locationName:"integrationMethod" type:"string"` + // A string with a length between [1-128]. + IntegrationSubtype *string `locationName:"integrationSubtype" type:"string"` + // Represents an API method integration type. IntegrationType *string `locationName:"integrationType" type:"string" enum:"IntegrationType"` @@ -16204,7 +17000,7 @@ type UpdateIntegrationInput struct { // for more information. TemplateSelectionExpression *string `locationName:"templateSelectionExpression" type:"string"` - // An integer with a value between [50-29000]. + // An integer with a value between [50-30000]. TimeoutInMillis *int64 `locationName:"timeoutInMillis" min:"50" type:"integer"` // The TLS configuration for a private integration. If you specify a TLS configuration, @@ -16296,6 +17092,12 @@ func (s *UpdateIntegrationInput) SetIntegrationMethod(v string) *UpdateIntegrati return s } +// SetIntegrationSubtype sets the IntegrationSubtype field's value. +func (s *UpdateIntegrationInput) SetIntegrationSubtype(v string) *UpdateIntegrationInput { + s.IntegrationSubtype = &v + return s +} + // SetIntegrationType sets the IntegrationType field's value. func (s *UpdateIntegrationInput) SetIntegrationType(v string) *UpdateIntegrationInput { s.IntegrationType = &v @@ -16382,6 +17184,9 @@ type UpdateIntegrationOutput struct { // for more information. IntegrationResponseSelectionExpression *string `locationName:"integrationResponseSelectionExpression" type:"string"` + // A string with a length between [1-128]. + IntegrationSubtype *string `locationName:"integrationSubtype" type:"string"` + // Represents an API method integration type. IntegrationType *string `locationName:"integrationType" type:"string" enum:"IntegrationType"` @@ -16417,7 +17222,7 @@ type UpdateIntegrationOutput struct { // for more information. TemplateSelectionExpression *string `locationName:"templateSelectionExpression" type:"string"` - // An integer with a value between [50-29000]. + // An integer with a value between [50-30000]. TimeoutInMillis *int64 `locationName:"timeoutInMillis" min:"50" type:"integer"` // The TLS configuration for a private integration. If you specify a TLS configuration, @@ -16490,6 +17295,12 @@ func (s *UpdateIntegrationOutput) SetIntegrationResponseSelectionExpression(v st return s } +// SetIntegrationSubtype sets the IntegrationSubtype field's value. +func (s *UpdateIntegrationOutput) SetIntegrationSubtype(v string) *UpdateIntegrationOutput { + s.IntegrationSubtype = &v + return s +} + // SetIntegrationType sets the IntegrationType field's value. func (s *UpdateIntegrationOutput) SetIntegrationType(v string) *UpdateIntegrationOutput { s.IntegrationType = &v @@ -16930,8 +17741,9 @@ type UpdateRouteInput struct { // The authorization type. For WebSocket APIs, valid values are NONE for open // access, AWS_IAM for using AWS IAM permissions, and CUSTOM for using a Lambda - // authorizer. For HTTP APIs, valid values are NONE for open access, or JWT - // for using JSON Web Tokens. + // authorizer. For HTTP APIs, valid values are NONE for open access, JWT for + // using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and CUSTOM + // for using a Lambda authorizer. AuthorizationType *string `locationName:"authorizationType" type:"string" enum:"AuthorizationType"` // The identifier. @@ -17098,8 +17910,9 @@ type UpdateRouteOutput struct { // The authorization type. For WebSocket APIs, valid values are NONE for open // access, AWS_IAM for using AWS IAM permissions, and CUSTOM for using a Lambda - // authorizer. For HTTP APIs, valid values are NONE for open access, or JWT - // for using JSON Web Tokens. + // authorizer. For HTTP APIs, valid values are NONE for open access, JWT for + // using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and CUSTOM + // for using a Lambda authorizer. AuthorizationType *string `locationName:"authorizationType" type:"string" enum:"AuthorizationType"` // The identifier. @@ -17910,8 +18723,9 @@ func (s *VpcLink) SetVpcLinkVersion(v string) *VpcLink { // The authorization type. For WebSocket APIs, valid values are NONE for open // access, AWS_IAM for using AWS IAM permissions, and CUSTOM for using a Lambda -// authorizer. For HTTP APIs, valid values are NONE for open access, or JWT -// for using JSON Web Tokens. +// authorizer. For HTTP APIs, valid values are NONE for open access, JWT for +// using JSON Web Tokens, AWS_IAM for using AWS IAM permissions, and CUSTOM +// for using a Lambda authorizer. const ( // AuthorizationTypeNone is a AuthorizationType enum value AuthorizationTypeNone = "NONE" @@ -17926,9 +18740,19 @@ const ( AuthorizationTypeJwt = "JWT" ) -// The authorizer type. For WebSocket APIs, specify REQUEST for a Lambda function -// using incoming request parameters. For HTTP APIs, specify JWT to use JSON -// Web Tokens. +// AuthorizationType_Values returns all elements of the AuthorizationType enum +func AuthorizationType_Values() []string { + return []string{ + AuthorizationTypeNone, + AuthorizationTypeAwsIam, + AuthorizationTypeCustom, + AuthorizationTypeJwt, + } +} + +// The authorizer type. Specify REQUEST for a Lambda function using incoming +// request parameters. Specify JWT to use JSON Web Tokens (supported only for +// HTTP APIs). const ( // AuthorizerTypeRequest is a AuthorizerType enum value AuthorizerTypeRequest = "REQUEST" @@ -17937,6 +18761,14 @@ const ( AuthorizerTypeJwt = "JWT" ) +// AuthorizerType_Values returns all elements of the AuthorizerType enum +func AuthorizerType_Values() []string { + return []string{ + AuthorizerTypeRequest, + AuthorizerTypeJwt, + } +} + // Represents a connection type. const ( // ConnectionTypeInternet is a ConnectionType enum value @@ -17946,6 +18778,14 @@ const ( ConnectionTypeVpcLink = "VPC_LINK" ) +// ConnectionType_Values returns all elements of the ConnectionType enum +func ConnectionType_Values() []string { + return []string{ + ConnectionTypeInternet, + ConnectionTypeVpcLink, + } +} + // Specifies how to handle response payload content type conversions. Supported // only for WebSocket APIs. const ( @@ -17956,6 +18796,14 @@ const ( ContentHandlingStrategyConvertToText = "CONVERT_TO_TEXT" ) +// ContentHandlingStrategy_Values returns all elements of the ContentHandlingStrategy enum +func ContentHandlingStrategy_Values() []string { + return []string{ + ContentHandlingStrategyConvertToBinary, + ContentHandlingStrategyConvertToText, + } +} + // Represents a deployment status. const ( // DeploymentStatusPending is a DeploymentStatus enum value @@ -17968,6 +18816,15 @@ const ( DeploymentStatusDeployed = "DEPLOYED" ) +// DeploymentStatus_Values returns all elements of the DeploymentStatus enum +func DeploymentStatus_Values() []string { + return []string{ + DeploymentStatusPending, + DeploymentStatusFailed, + DeploymentStatusDeployed, + } +} + // The status of the domain name migration. The valid values are AVAILABLE and // UPDATING. If the status is UPDATING, the domain cannot be modified further // until the existing operation is complete. If it is AVAILABLE, the domain @@ -17980,6 +18837,14 @@ const ( DomainNameStatusUpdating = "UPDATING" ) +// DomainNameStatus_Values returns all elements of the DomainNameStatus enum +func DomainNameStatus_Values() []string { + return []string{ + DomainNameStatusAvailable, + DomainNameStatusUpdating, + } +} + // Represents an endpoint type. const ( // EndpointTypeRegional is a EndpointType enum value @@ -17989,6 +18854,14 @@ const ( EndpointTypeEdge = "EDGE" ) +// EndpointType_Values returns all elements of the EndpointType enum +func EndpointType_Values() []string { + return []string{ + EndpointTypeRegional, + EndpointTypeEdge, + } +} + // Represents an API method integration type. const ( // IntegrationTypeAws is a IntegrationType enum value @@ -18007,6 +18880,17 @@ const ( IntegrationTypeAwsProxy = "AWS_PROXY" ) +// IntegrationType_Values returns all elements of the IntegrationType enum +func IntegrationType_Values() []string { + return []string{ + IntegrationTypeAws, + IntegrationTypeHttp, + IntegrationTypeMock, + IntegrationTypeHttpProxy, + IntegrationTypeAwsProxy, + } +} + // The logging level. const ( // LoggingLevelError is a LoggingLevel enum value @@ -18019,6 +18903,15 @@ const ( LoggingLevelOff = "OFF" ) +// LoggingLevel_Values returns all elements of the LoggingLevel enum +func LoggingLevel_Values() []string { + return []string{ + LoggingLevelError, + LoggingLevelInfo, + LoggingLevelOff, + } +} + // Represents passthrough behavior for an integration response. Supported only // for WebSocket APIs. const ( @@ -18032,6 +18925,15 @@ const ( PassthroughBehaviorWhenNoTemplates = "WHEN_NO_TEMPLATES" ) +// PassthroughBehavior_Values returns all elements of the PassthroughBehavior enum +func PassthroughBehavior_Values() []string { + return []string{ + PassthroughBehaviorWhenNoMatch, + PassthroughBehaviorNever, + PassthroughBehaviorWhenNoTemplates, + } +} + // Represents a protocol type. const ( // ProtocolTypeWebsocket is a ProtocolType enum value @@ -18041,6 +18943,14 @@ const ( ProtocolTypeHttp = "HTTP" ) +// ProtocolType_Values returns all elements of the ProtocolType enum +func ProtocolType_Values() []string { + return []string{ + ProtocolTypeWebsocket, + ProtocolTypeHttp, + } +} + // The Transport Layer Security (TLS) version of the security policy for this // domain name. The valid values are TLS_1_0 and TLS_1_2. const ( @@ -18051,6 +18961,14 @@ const ( SecurityPolicyTls12 = "TLS_1_2" ) +// SecurityPolicy_Values returns all elements of the SecurityPolicy enum +func SecurityPolicy_Values() []string { + return []string{ + SecurityPolicyTls10, + SecurityPolicyTls12, + } +} + // The status of the VPC link. const ( // VpcLinkStatusPending is a VpcLinkStatus enum value @@ -18069,8 +18987,26 @@ const ( VpcLinkStatusInactive = "INACTIVE" ) +// VpcLinkStatus_Values returns all elements of the VpcLinkStatus enum +func VpcLinkStatus_Values() []string { + return []string{ + VpcLinkStatusPending, + VpcLinkStatusAvailable, + VpcLinkStatusDeleting, + VpcLinkStatusFailed, + VpcLinkStatusInactive, + } +} + // The version of the VPC link. const ( // VpcLinkVersionV2 is a VpcLinkVersion enum value VpcLinkVersionV2 = "V2" ) + +// VpcLinkVersion_Values returns all elements of the VpcLinkVersion enum +func VpcLinkVersion_Values() []string { + return []string{ + VpcLinkVersionV2, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go index fb0c683ee..d248211d2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go index 78ddc3443..4cd69a330 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go @@ -69,8 +69,6 @@ func (c *ApplicationAutoScaling) DeleteScalingPolicyRequest(input *DeleteScaling // and Delete a Target Tracking Scaling Policy (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html#delete-target-tracking-policy) // in the Application Auto Scaling User Guide. // -// To create a scaling policy or update an existing one, see PutScalingPolicy. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -263,12 +261,12 @@ func (c *ApplicationAutoScaling) DeregisterScalableTargetRequest(input *Deregist // DeregisterScalableTarget API operation for Application Auto Scaling. // -// Deregisters an Application Auto Scaling scalable target. -// -// Deregistering a scalable target deletes the scaling policies that are associated -// with it. +// Deregisters an Application Auto Scaling scalable target when you have finished +// using it. To see which resources have been registered, use DescribeScalableTargets +// (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). // -// To create a scalable target or update an existing one, see RegisterScalableTarget. +// Deregistering a scalable target deletes the scaling policies and the scheduled +// actions that are associated with it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -372,10 +370,6 @@ func (c *ApplicationAutoScaling) DescribeScalableTargetsRequest(input *DescribeS // // You can filter the results using ResourceIds and ScalableDimension. // -// To create a scalable target or update an existing one, see RegisterScalableTarget. -// If you are no longer using a scalable target, you can deregister it using -// DeregisterScalableTarget. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -527,11 +521,6 @@ func (c *ApplicationAutoScaling) DescribeScalingActivitiesRequest(input *Describ // // You can filter the results using ResourceId and ScalableDimension. // -// Scaling activities are triggered by CloudWatch alarms that are associated -// with scaling policies. To view the scaling policies for a service namespace, -// see DescribeScalingPolicies. To create a scaling policy or update an existing -// one, see PutScalingPolicy. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -683,8 +672,9 @@ func (c *ApplicationAutoScaling) DescribeScalingPoliciesRequest(input *DescribeS // // You can filter the results using ResourceId, ScalableDimension, and PolicyNames. // -// To create a scaling policy or update an existing one, see PutScalingPolicy. -// If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. +// For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) +// and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) +// in the Application Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -846,8 +836,8 @@ func (c *ApplicationAutoScaling) DescribeScheduledActionsRequest(input *Describe // You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames // parameters. // -// To create a scheduled action or update an existing one, see PutScheduledAction. -// If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction. +// For more information, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) +// in the Application Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -989,19 +979,13 @@ func (c *ApplicationAutoScaling) PutScalingPolicyRequest(input *PutScalingPolicy // PutScalingPolicy API operation for Application Auto Scaling. // -// Creates or updates a policy for an Application Auto Scaling scalable target. +// Creates or updates a scaling policy for an Application Auto Scaling scalable +// target. // // Each scalable target is identified by a service namespace, resource ID, and // scalable dimension. A scaling policy applies to the scalable target identified // by those three attributes. You cannot create a scaling policy until you have -// registered the resource as a scalable target using RegisterScalableTarget. -// -// To update a policy, specify its policy name and the parameters that you want -// to change. Any parameters that you don't specify are not changed by this -// update request. -// -// You can view the scaling policies for a service namespace using DescribeScalingPolicies. -// If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. +// registered the resource as a scalable target. // // Multiple scaling policies can be in force at the same time for the same scalable // target. You can have one or more target tracking scaling policies, one or @@ -1014,8 +998,21 @@ func (c *ApplicationAutoScaling) PutScalingPolicyRequest(input *PutScalingPolicy // uses the policy with the highest calculated capacity (200% of 10 = 20) and // scales out to 30. // -// Learn more about how to work with scaling policies in the Application Auto -// Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). +// We recommend caution, however, when using target tracking scaling policies +// with step scaling policies because conflicts between these policies can cause +// undesirable behavior. For example, if the step scaling policy initiates a +// scale-in activity before the target tracking policy is ready to scale in, +// the scale-in activity will not be blocked. After the scale-in activity completes, +// the target tracking policy could instruct the scalable target to scale out +// again. +// +// For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) +// and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) +// in the Application Auto Scaling User Guide. +// +// If a scalable target is deregistered, the scalable target is no longer available +// to execute scaling policies. Any scaling policies that were specified for +// the scalable target are deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1128,18 +1125,21 @@ func (c *ApplicationAutoScaling) PutScheduledActionRequest(input *PutScheduledAc // Each scalable target is identified by a service namespace, resource ID, and // scalable dimension. A scheduled action applies to the scalable target identified // by those three attributes. You cannot create a scheduled action until you -// have registered the resource as a scalable target using RegisterScalableTarget. +// have registered the resource as a scalable target. // -// To update an action, specify its name and the parameters that you want to -// change. If you don't specify start and end times, the old values are deleted. -// Any other parameters that you don't specify are not changed by this update -// request. +// When start and end times are specified with a recurring schedule using a +// cron expression or rates, they form the boundaries of when the recurring +// action starts and stops. // -// You can view the scheduled actions using DescribeScheduledActions. If you -// are no longer using a scheduled action, you can delete it using DeleteScheduledAction. +// To update a scheduled action, specify the parameters that you want to change. +// If you don't specify start and end times, the old values are deleted. // -// Learn more about how to work with scheduled actions in the Application Auto -// Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). +// For more information, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) +// in the Application Auto Scaling User Guide. +// +// If a scalable target is deregistered, the scalable target is no longer available +// to run scheduled actions. Any scheduled actions that were specified for the +// scalable target are deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1238,26 +1238,29 @@ func (c *ApplicationAutoScaling) RegisterScalableTargetRequest(input *RegisterSc // RegisterScalableTarget API operation for Application Auto Scaling. // -// Registers or updates a scalable target. A scalable target is a resource that -// Application Auto Scaling can scale out and scale in. Scalable targets are -// uniquely identified by the combination of resource ID, scalable dimension, -// and namespace. +// Registers or updates a scalable target. // -// When you register a new scalable target, you must specify values for minimum -// and maximum capacity. Application Auto Scaling will not scale capacity to -// values that are outside of this range. +// A scalable target is a resource that Application Auto Scaling can scale out +// and scale in. Scalable targets are uniquely identified by the combination +// of resource ID, scalable dimension, and namespace. // -// To update a scalable target, specify the parameter that you want to change -// as well as the following parameters that identify the scalable target: resource -// ID, scalable dimension, and namespace. Any parameters that you don't specify -// are not changed by this update request. +// When you register a new scalable target, you must specify values for minimum +// and maximum capacity. Current capacity will be adjusted within the specified +// range when scaling starts. Application Auto Scaling scaling policies will +// not scale capacity to values that are outside of this range. // // After you register a scalable target, you do not need to register it again // to use other Application Auto Scaling operations. To see which resources -// have been registered, use DescribeScalableTargets. You can also view the -// scaling policies for a service namespace by using DescribeScalableTargets. +// have been registered, use DescribeScalableTargets (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). +// You can also view the scaling policies for a service namespace by using DescribeScalableTargets +// (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DescribeScalableTargets.html). +// If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget +// (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_DeregisterScalableTarget.html). // -// If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget. +// To update a scalable target, specify the parameters that you want to change. +// Include the parameters that identify the scalable target: resource ID, scalable +// dimension, and namespace. Any parameters that you don't specify are not changed +// by this update request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1344,8 +1347,8 @@ func (s *Alarm) SetAlarmName(v string) *Alarm { // Concurrent updates caused an exception, for example, if you request an update // to an Application Auto Scaling resource that already has a pending update. type ConcurrentUpdateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1362,17 +1365,17 @@ func (s ConcurrentUpdateException) GoString() string { func newErrorConcurrentUpdateException(v protocol.ResponseMetadata) error { return &ConcurrentUpdateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentUpdateException) Code() string { +func (s *ConcurrentUpdateException) Code() string { return "ConcurrentUpdateException" } // Message returns the exception's message. -func (s ConcurrentUpdateException) Message() string { +func (s *ConcurrentUpdateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1380,27 +1383,31 @@ func (s ConcurrentUpdateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentUpdateException) OrigErr() error { +func (s *ConcurrentUpdateException) OrigErr() error { return nil } -func (s ConcurrentUpdateException) Error() string { +func (s *ConcurrentUpdateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentUpdateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentUpdateException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentUpdateException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a CloudWatch metric of your choosing for a target tracking scaling // policy to use with Application Auto Scaling. // +// For information about the available metrics for a service, see AWS Services +// That Publish CloudWatch Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/aws-services-cloudwatch-metrics.html) +// in the Amazon CloudWatch User Guide. +// // To create your customized metric specification: // // * Add values for each required parameter from CloudWatch. You can use @@ -1412,7 +1419,7 @@ func (s ConcurrentUpdateException) RequestID() string { // * Choose a metric that changes proportionally with capacity. The value // of the metric should increase or decrease in inverse proportion to the // number of capacity units. That is, the value of the metric should decrease -// when capacity increases. +// when capacity increases, and increase when capacity decreases. // // For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). type CustomizedMetricSpecification struct { @@ -1555,11 +1562,20 @@ type DeleteScalingPolicyInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -1603,16 +1619,26 @@ type DeleteScalingPolicyInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -1732,11 +1758,20 @@ type DeleteScheduledActionInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -1780,9 +1815,21 @@ type DeleteScheduledActionInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` @@ -1791,10 +1838,8 @@ type DeleteScheduledActionInput struct { // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -1914,11 +1959,20 @@ type DeregisterScalableTargetInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -1962,16 +2016,26 @@ type DeregisterScalableTargetInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -2092,10 +2156,19 @@ type DescribeScalableTargetsInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceIds []*string `type:"list"` // The scalable dimension associated with the scalable target. This string consists @@ -2139,14 +2212,24 @@ type DescribeScalableTargetsInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. ScalableDimension *string `type:"string" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -2289,10 +2372,19 @@ type DescribeScalingActivitiesInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -2336,14 +2428,24 @@ type DescribeScalingActivitiesInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. ScalableDimension *string `type:"string" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -2492,10 +2594,19 @@ type DescribeScalingPoliciesInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -2539,14 +2650,24 @@ type DescribeScalingPoliciesInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. ScalableDimension *string `type:"string" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -2698,10 +2819,19 @@ type DescribeScheduledActionsInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. + // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. ResourceId *string `min:"1" type:"string"` // The scalable dimension. This string consists of the service namespace, resource @@ -2745,17 +2875,27 @@ type DescribeScheduledActionsInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The names of the scheduled actions to describe. ScheduledActionNames []*string `type:"list"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -2863,8 +3003,8 @@ func (s *DescribeScheduledActionsOutput) SetScheduledActions(v []*ScheduledActio // DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) // on your behalf. type FailedResourceAccessException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2881,17 +3021,17 @@ func (s FailedResourceAccessException) GoString() string { func newErrorFailedResourceAccessException(v protocol.ResponseMetadata) error { return &FailedResourceAccessException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FailedResourceAccessException) Code() string { +func (s *FailedResourceAccessException) Code() string { return "FailedResourceAccessException" } // Message returns the exception's message. -func (s FailedResourceAccessException) Message() string { +func (s *FailedResourceAccessException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2899,28 +3039,28 @@ func (s FailedResourceAccessException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FailedResourceAccessException) OrigErr() error { +func (s *FailedResourceAccessException) OrigErr() error { return nil } -func (s FailedResourceAccessException) Error() string { +func (s *FailedResourceAccessException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FailedResourceAccessException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FailedResourceAccessException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FailedResourceAccessException) RequestID() string { - return s.respMetadata.RequestID +func (s *FailedResourceAccessException) RequestID() string { + return s.RespMetadata.RequestID } // The service encountered an internal error. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2937,17 +3077,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2955,28 +3095,28 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // The next token supplied was invalid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2993,17 +3133,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3011,29 +3151,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // A per-account resource limit is exceeded. For more information, see Application // Auto Scaling Limits (https://docs.aws.amazon.com/ApplicationAutoScaling/latest/userguide/application-auto-scaling-limits.html). type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3050,17 +3190,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3068,22 +3208,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the dimension names and values associated with a metric. @@ -3145,8 +3285,8 @@ func (s *MetricDimension) SetValue(v string) *MetricDimension { // does not exist. For any operation that deletes or deregisters a resource, // this exception is thrown if the resource cannot be found. type ObjectNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3163,17 +3303,17 @@ func (s ObjectNotFoundException) GoString() string { func newErrorObjectNotFoundException(v protocol.ResponseMetadata) error { return &ObjectNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ObjectNotFoundException) Code() string { +func (s *ObjectNotFoundException) Code() string { return "ObjectNotFoundException" } // Message returns the exception's message. -func (s ObjectNotFoundException) Message() string { +func (s *ObjectNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3181,26 +3321,32 @@ func (s ObjectNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ObjectNotFoundException) OrigErr() error { +func (s *ObjectNotFoundException) OrigErr() error { return nil } -func (s ObjectNotFoundException) Error() string { +func (s *ObjectNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ObjectNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ObjectNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ObjectNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ObjectNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a predefined metric for a target tracking scaling policy to use // with Application Auto Scaling. +// +// Only the AWS services that you're using send metrics to Amazon CloudWatch. +// To determine whether a desired metric already exists by looking up its namespace +// and dimension using the CloudWatch metrics dashboard in the console, follow +// the procedure in Building Dashboards with CloudWatch (https://docs.aws.amazon.com/autoscaling/application/userguide/monitoring-cloudwatch.html) +// in the Application Auto Scaling User Guide. type PredefinedMetricSpecification struct { _ struct{} `type:"structure"` @@ -3214,7 +3360,9 @@ type PredefinedMetricSpecification struct { // a resource label unless the metric type is ALBRequestCountPerTarget and there // is a target group attached to the Spot Fleet request or ECS service. // - // The format is app///targetgroup//, + // You create the resource label by appending the final portion of the load + // balancer ARN and the final portion of the target group ARN into a single + // value, separated by a forward slash (/). The format is app///targetgroup//, // where: // // * app// is the final portion of @@ -3222,6 +3370,14 @@ type PredefinedMetricSpecification struct { // // * targetgroup// is the final portion // of the target group ARN. + // + // This is an example: app/EC2Co-EcsEl-1TKLTMITMM0EO/f37c06a68c1748aa/targetgroup/EC2Co-Defau-LDNM7Q3ZH1ZN/6d4ea56ca2d6a18d. + // + // To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // API operation. To find the ARN for the target group, use the DescribeTargetGroups + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html) + // API operation. ResourceLabel *string `min:"1" type:"string"` } @@ -3278,7 +3434,8 @@ type PutScalingPolicyInput struct { // // TargetTrackingScaling—Not supported for Amazon EMR // - // StepScaling—Not supported for DynamoDB, Amazon Comprehend, or AWS Lambda + // StepScaling—Not supported for DynamoDB, Amazon Comprehend, Lambda, Amazon + // Keyspaces (for Apache Cassandra), or Amazon MSK. // // For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) // and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) @@ -3320,11 +3477,20 @@ type PutScalingPolicyInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -3368,16 +3534,26 @@ type PutScalingPolicyInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -3523,7 +3699,7 @@ func (s *PutScalingPolicyOutput) SetPolicyARN(v string) *PutScalingPolicyOutput type PutScheduledActionInput struct { _ struct{} `type:"structure"` - // The date and time for the scheduled action to end. + // The date and time for the recurring schedule to end. EndTime *time.Time `type:"timestamp"` // The identifier of the resource associated with the scheduled action. This @@ -3561,11 +3737,20 @@ type PutScheduledActionInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -3609,14 +3794,26 @@ type PutScheduledActionInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` // The new minimum and maximum capacity. You can set both values or just one. - // During the scheduled time, if the current capacity is below the minimum capacity, + // At the scheduled time, if the current capacity is below the minimum capacity, // Application Auto Scaling scales out to the minimum capacity. If the current // capacity is above the maximum capacity, Application Auto Scaling scales in // to the maximum capacity. @@ -3630,29 +3827,31 @@ type PutScheduledActionInput struct { // // * Cron expressions - "cron(fields)" // - // At expressions are useful for one-time schedules. Specify the time, in UTC. + // At expressions are useful for one-time schedules. Specify the time in UTC. // // For rate expressions, value is a positive integer and unit is minute | minutes // | hour | hours | day | days. // // For more information about cron expressions, see Cron Expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) // in the Amazon CloudWatch Events User Guide. + // + // For examples of using these expressions, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) + // in the Application Auto Scaling User Guide. Schedule *string `min:"1" type:"string"` - // The name of the scheduled action. + // The name of the scheduled action. This name must be unique among all other + // scheduled actions on the specified scalable target. // // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - // The date and time for the scheduled action to start. + // The date and time for this scheduled action to start. StartTime *time.Time `type:"timestamp"` } @@ -3762,12 +3961,30 @@ func (s PutScheduledActionOutput) GoString() string { type RegisterScalableTargetInput struct { _ struct{} `type:"structure"` - // The maximum value to scale to in response to a scale-out event. MaxCapacity - // is required to register a scalable target. + // The maximum value that you plan to scale out to. When a scaling policy is + // in effect, Application Auto Scaling can scale out (expand) as needed to the + // maximum capacity limit in response to changing demand. + // + // This parameter is required if you are registering a scalable target. + // + // Although you can specify a large maximum capacity, note that service quotas + // may impose lower limits. Each service has its own default quotas for the + // maximum capacity of the resource. If you want to specify a higher limit, + // you can request an increase. For more information, consult the documentation + // for that service. For information about the default quotas for each service, + // see Service Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html) + // in the Amazon Web Services General Reference. MaxCapacity *int64 `type:"integer"` - // The minimum value to scale to in response to a scale-in event. MinCapacity - // is required to register a scalable target. + // The minimum value that you plan to scale in to. When a scaling policy is + // in effect, Application Auto Scaling can scale in (contract) as needed to + // the minimum capacity limit in response to changing demand. + // + // This parameter is required if you are registering a scalable target. For + // certain resources, the minimum value allowed is 0. This includes Lambda provisioned + // concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR clusters, + // and custom resources. For all other resources, the minimum value allowed + // is 1. MinCapacity *int64 `type:"integer"` // The identifier of the resource that is associated with the scalable target. @@ -3805,21 +4022,30 @@ type RegisterScalableTargetInput struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // Application Auto Scaling creates a service-linked role that grants it permissions - // to modify the scalable target on your behalf. For more information, see Service-Linked - // Roles for Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html). + // This parameter is required for services that do not support service-linked + // roles (such as Amazon EMR), and it must specify the ARN of an IAM role that + // allows Application Auto Scaling to modify the scalable target on your behalf. // - // For Amazon EMR, this parameter is required, and it must specify the ARN of - // an IAM role that allows Application Auto Scaling to modify the scalable target - // on your behalf. + // If the service supports service-linked roles, Application Auto Scaling uses + // a service-linked role, which it creates if it does not yet exist. For more + // information, see Application Auto Scaling IAM Roles (https://docs.aws.amazon.com/autoscaling/application/userguide/security_iam_service-with-iam.html#security_iam_service-with-iam-roles). RoleARN *string `min:"1" type:"string"` // The scalable dimension associated with the scalable target. This string consists @@ -3862,16 +4088,26 @@ type RegisterScalableTargetInput struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource. For a resource + // provided by your own application or service, use custom-resource instead. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -3997,12 +4233,12 @@ type ScalableTarget struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` - // The maximum value to scale to in response to a scale-out event. + // The maximum value to scale to in response to a scale-out activity. // // MaxCapacity is a required field MaxCapacity *int64 `type:"integer" required:"true"` - // The minimum value to scale to in response to a scale-in event. + // The minimum value to scale to in response to a scale-in activity. // // MinCapacity is a required field MinCapacity *int64 `type:"integer" required:"true"` @@ -4042,11 +4278,20 @@ type ScalableTarget struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -4096,16 +4341,25 @@ type ScalableTarget struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -4178,9 +4432,22 @@ type ScalableTargetAction struct { _ struct{} `type:"structure"` // The maximum capacity. + // + // Although you can specify a large maximum capacity, note that service quotas + // may impose lower limits. Each service has its own default quotas for the + // maximum capacity of the resource. If you want to specify a higher limit, + // you can request an increase. For more information, consult the documentation + // for that service. For information about the default quotas for each service, + // see Service Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html) + // in the Amazon Web Services General Reference. MaxCapacity *int64 `type:"integer"` // The minimum capacity. + // + // For certain resources, the minimum value allowed is 0. This includes Lambda + // provisioned concurrency, Spot Fleet, ECS services, Aurora DB clusters, EMR + // clusters, and custom resources. For all other resources, the minimum value + // allowed is 1. MinCapacity *int64 `type:"integer"` } @@ -4266,11 +4533,20 @@ type ScalingActivity struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -4314,16 +4590,25 @@ type ScalingActivity struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -4419,6 +4704,10 @@ func (s *ScalingActivity) SetStatusMessage(v string) *ScalingActivity { } // Represents a scaling policy to use with Application Auto Scaling. +// +// For more information about configuring scaling policies for a specific service, +// see Getting started with Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/getting-started.html) +// in the Application Auto Scaling User Guide. type ScalingPolicy struct { _ struct{} `type:"structure"` @@ -4480,11 +4769,20 @@ type ScalingPolicy struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -4528,16 +4826,25 @@ type ScalingPolicy struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. + // // ScalableDimension is a required field ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -4666,11 +4973,20 @@ type ScheduledAction struct { // * Amazon Comprehend document classification endpoint - The resource type // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. // + // * Amazon Comprehend entity recognizer endpoint - The resource type and + // unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:entity-recognizer-endpoint/EXAMPLE. + // // * Lambda provisioned concurrency - The resource type is function and the // unique identifier is the function name with a function version or alias // name suffix that is not $LATEST. Example: function:my-function:prod or // function:my-function:1. // + // * Amazon Keyspaces table - The resource type is table and the unique identifier + // is the table name. Example: keyspace/mykeyspace/table/mytable. + // + // * Amazon MSK cluster - The resource type and unique identifier are specified + // using the cluster ARN. Example: arn:aws:kafka:us-east-1:123456789012:cluster/demo-cluster-1/6357e0b2-0e6a-4b86-a0b4-70df934c2e31-5. + // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -4714,12 +5030,24 @@ type ScheduledAction struct { // number of inference units for an Amazon Comprehend document classification // endpoint. // + // * comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number + // of inference units for an Amazon Comprehend entity recognizer endpoint. + // // * lambda:function:ProvisionedConcurrency - The provisioned concurrency // for a Lambda function. + // + // * cassandra:table:ReadCapacityUnits - The provisioned read capacity for + // an Amazon Keyspaces table. + // + // * cassandra:table:WriteCapacityUnits - The provisioned write capacity + // for an Amazon Keyspaces table. + // + // * kafka:broker-storage:VolumeSize - The provisioned volume size (in GiB) + // for brokers in an Amazon MSK cluster. ScalableDimension *string `type:"string" enum:"ScalableDimension"` // The new minimum and maximum capacity. You can set both values or just one. - // During the scheduled time, if the current capacity is below the minimum capacity, + // At the scheduled time, if the current capacity is below the minimum capacity, // Application Auto Scaling scales out to the minimum capacity. If the current // capacity is above the maximum capacity, Application Auto Scaling scales in // to the maximum capacity. @@ -4733,7 +5061,7 @@ type ScheduledAction struct { // // * Cron expressions - "cron(fields)" // - // At expressions are useful for one-time schedules. Specify the time, in UTC. + // At expressions are useful for one-time schedules. Specify the time in UTC. // // For rate expressions, value is a positive integer and unit is minute | minutes // | hour | hours | day | days. @@ -4741,6 +5069,9 @@ type ScheduledAction struct { // For more information about cron expressions, see Cron Expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) // in the Amazon CloudWatch Events User Guide. // + // For examples of using these expressions, see Scheduled Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html) + // in the Application Auto Scaling User Guide. + // // Schedule is a required field Schedule *string `min:"1" type:"string" required:"true"` @@ -4754,10 +5085,7 @@ type ScheduledAction struct { // ScheduledActionName is a required field ScheduledActionName *string `min:"1" type:"string" required:"true"` - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. + // The namespace of the AWS service that provides the resource, or a custom-resource. // // ServiceNamespace is a required field ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` @@ -4836,9 +5164,10 @@ func (s *ScheduledAction) SetStartTime(v time.Time) *ScheduledAction { return s } -// Represents a step adjustment for a StepScalingPolicyConfiguration. Describes -// an adjustment based on the difference between the value of the aggregated -// CloudWatch metric and the breach threshold that you've defined for the alarm. +// Represents a step adjustment for a StepScalingPolicyConfiguration (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepScalingPolicyConfiguration.html). +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. // // For the following examples, suppose that you have an alarm with a breach // threshold of 50: @@ -4885,8 +5214,8 @@ type StepAdjustment struct { MetricIntervalUpperBound *float64 `type:"double"` // The amount by which to scale, based on the specified adjustment type. A positive - // value adds to the current scalable dimension while a negative number removes - // from the current scalable dimension. + // value adds to the current capacity while a negative number removes from the + // current capacity. For exact capacity, you must specify a positive value. // // ScalingAdjustment is a required field ScalingAdjustment *int64 `type:"integer" required:"true"` @@ -4938,31 +5267,66 @@ func (s *StepAdjustment) SetScalingAdjustment(v int64) *StepAdjustment { type StepScalingPolicyConfiguration struct { _ struct{} `type:"structure"` - // Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute - // number or a percentage of the current capacity. + // Specifies how the ScalingAdjustment value in a StepAdjustment (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_StepAdjustment.html) + // is interpreted (for example, an absolute number or a percentage). The valid + // values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + // + // AdjustmentType is required if you are adding a new step scaling policy configuration. AdjustmentType *string `type:"string" enum:"AdjustmentType"` - // The amount of time, in seconds, after a scaling activity completes where - // previous trigger-related scaling activities can influence future scaling - // events. - // - // For scale-out policies, while the cooldown period is in effect, the capacity - // that has been added by the previous scale-out event that initiated the cooldown - // is calculated as part of the desired capacity for the next scale out. The - // intention is to continuously (but not excessively) scale out. For example, - // an alarm triggers a step scaling policy to scale out an Amazon ECS service - // by 2 tasks, the scaling activity completes successfully, and a cooldown period - // of 5 minutes starts. During the cooldown period, if the alarm triggers the - // same policy again but at a more aggressive step adjustment to scale out the - // service by 3 tasks, the 2 tasks that were added in the previous scale-out - // event are considered part of that capacity and only 1 additional task is - // added to the desired count. - // - // For scale-in policies, the cooldown period is used to block subsequent scale-in - // requests until it has expired. The intention is to scale in conservatively - // to protect your application's availability. However, if another alarm triggers - // a scale-out policy during the cooldown period after a scale-in, Application - // Auto Scaling scales out your scalable target immediately. + // The amount of time, in seconds, to wait for a previous scaling activity to + // take effect. + // + // With scale-out policies, the intention is to continuously (but not excessively) + // scale out. After Application Auto Scaling successfully scales out using a + // step scaling policy, it starts to calculate the cooldown time. The scaling + // policy won't increase the desired capacity again unless either a larger scale + // out is triggered or the cooldown period ends. While the cooldown period is + // in effect, capacity added by the initiating scale-out activity is calculated + // as part of the desired capacity for the next scale-out activity. For example, + // when an alarm triggers a step scaling policy to increase the capacity by + // 2, the scaling activity completes successfully, and a cooldown period starts. + // If the alarm triggers again during the cooldown period but at a more aggressive + // step adjustment of 3, the previous increase of 2 is considered part of the + // current capacity. Therefore, only 1 is added to the capacity. + // + // With scale-in policies, the intention is to scale in conservatively to protect + // your application’s availability, so scale-in activities are blocked until + // the cooldown period has expired. However, if another alarm triggers a scale-out + // activity during the cooldown period after a scale-in activity, Application + // Auto Scaling scales out the target immediately. In this case, the cooldown + // period for the scale-in activity stops and doesn't complete. + // + // Application Auto Scaling provides a default value of 300 for the following + // scalable targets: + // + // * ECS services + // + // * Spot Fleet requests + // + // * EMR clusters + // + // * AppStream 2.0 fleets + // + // * Aurora DB clusters + // + // * Amazon SageMaker endpoint variants + // + // * Custom resources + // + // For all other scalable targets, the default value is 0: + // + // * DynamoDB tables + // + // * DynamoDB global secondary indexes + // + // * Amazon Comprehend document classification and entity recognizer endpoints + // + // * Lambda provisioned concurrency + // + // * Amazon Keyspaces tables + // + // * Amazon MSK cluster storage Cooldown *int64 `type:"integer"` // The aggregation type for the CloudWatch metrics. Valid values are Minimum, @@ -4970,10 +5334,7 @@ type StepScalingPolicyConfiguration struct { // as Average. MetricAggregationType *string `type:"string" enum:"MetricAggregationType"` - // The minimum number to adjust your scalable dimension as a result of a scaling - // activity. If the adjustment type is PercentChangeInCapacity, the scaling - // policy changes the scalable dimension of the scalable target by this amount. - // + // The minimum value to scale by when the adjustment type is PercentChangeInCapacity. // For example, suppose that you create a step scaling policy to scale out an // Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude // of 2. If the service has 4 tasks and the scaling policy is performed, 25 @@ -4983,6 +5344,9 @@ type StepScalingPolicyConfiguration struct { // A set of adjustments that enable you to scale based on the size of the alarm // breach. + // + // At least one step adjustment is required if you are adding a new step scaling + // policy configuration. StepAdjustments []*StepAdjustment `type:"list"` } @@ -5106,9 +5470,9 @@ type TargetTrackingScalingPolicyConfiguration struct { // Indicates whether scale in by the target tracking scaling policy is disabled. // If the value is true, scale in is disabled and the target tracking scaling - // policy won't remove capacity from the scalable resource. Otherwise, scale - // in is enabled and the target tracking scaling policy can remove capacity - // from the scalable resource. The default value is false. + // policy won't remove capacity from the scalable target. Otherwise, scale in + // is enabled and the target tracking scaling policy can remove capacity from + // the scalable target. The default value is false. DisableScaleIn *bool `type:"boolean"` // A predefined metric. You can specify either a predefined metric or a customized @@ -5116,22 +5480,89 @@ type TargetTrackingScalingPolicyConfiguration struct { PredefinedMetricSpecification *PredefinedMetricSpecification `type:"structure"` // The amount of time, in seconds, after a scale-in activity completes before - // another scale in activity can start. + // another scale-in activity can start. + // + // With the scale-in cooldown period, the intention is to scale in conservatively + // to protect your application’s availability, so scale-in activities are + // blocked until the cooldown period has expired. However, if another alarm + // triggers a scale-out activity during the scale-in cooldown period, Application + // Auto Scaling scales out the target immediately. In this case, the scale-in + // cooldown period stops and doesn't complete. + // + // Application Auto Scaling provides a default value of 300 for the following + // scalable targets: + // + // * ECS services + // + // * Spot Fleet requests + // + // * EMR clusters + // + // * AppStream 2.0 fleets // - // The cooldown period is used to block subsequent scale-in requests until it - // has expired. The intention is to scale in conservatively to protect your - // application's availability. However, if another alarm triggers a scale-out - // policy during the cooldown period after a scale-in, Application Auto Scaling - // scales out your scalable target immediately. + // * Aurora DB clusters + // + // * Amazon SageMaker endpoint variants + // + // * Custom resources + // + // For all other scalable targets, the default value is 0: + // + // * DynamoDB tables + // + // * DynamoDB global secondary indexes + // + // * Amazon Comprehend document classification and entity recognizer endpoints + // + // * Lambda provisioned concurrency + // + // * Amazon Keyspaces tables + // + // * Amazon MSK cluster storage ScaleInCooldown *int64 `type:"integer"` - // The amount of time, in seconds, after a scale-out activity completes before - // another scale-out activity can start. + // The amount of time, in seconds, to wait for a previous scale-out activity + // to take effect. + // + // With the scale-out cooldown period, the intention is to continuously (but + // not excessively) scale out. After Application Auto Scaling successfully scales + // out using a target tracking scaling policy, it starts to calculate the cooldown + // time. The scaling policy won't increase the desired capacity again unless + // either a larger scale out is triggered or the cooldown period ends. While + // the cooldown period is in effect, the capacity added by the initiating scale-out + // activity is calculated as part of the desired capacity for the next scale-out + // activity. + // + // Application Auto Scaling provides a default value of 300 for the following + // scalable targets: + // + // * ECS services + // + // * Spot Fleet requests + // + // * EMR clusters + // + // * AppStream 2.0 fleets // - // While the cooldown period is in effect, the capacity that has been added - // by the previous scale-out event that initiated the cooldown is calculated - // as part of the desired capacity for the next scale out. The intention is - // to continuously (but not excessively) scale out. + // * Aurora DB clusters + // + // * Amazon SageMaker endpoint variants + // + // * Custom resources + // + // For all other scalable targets, the default value is 0: + // + // * DynamoDB tables + // + // * DynamoDB global secondary indexes + // + // * Amazon Comprehend document classification and entity recognizer endpoints + // + // * Lambda provisioned concurrency + // + // * Amazon Keyspaces tables + // + // * Amazon MSK cluster storage ScaleOutCooldown *int64 `type:"integer"` // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 @@ -5213,8 +5644,8 @@ func (s *TargetTrackingScalingPolicyConfiguration) SetTargetValue(v float64) *Ta // An exception was thrown for a validation issue. Review the available parameters // for the API request. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5231,17 +5662,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5249,22 +5680,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -5278,6 +5709,15 @@ const ( AdjustmentTypeExactCapacity = "ExactCapacity" ) +// AdjustmentType_Values returns all elements of the AdjustmentType enum +func AdjustmentType_Values() []string { + return []string{ + AdjustmentTypeChangeInCapacity, + AdjustmentTypePercentChangeInCapacity, + AdjustmentTypeExactCapacity, + } +} + const ( // MetricAggregationTypeAverage is a MetricAggregationType enum value MetricAggregationTypeAverage = "Average" @@ -5289,6 +5729,15 @@ const ( MetricAggregationTypeMaximum = "Maximum" ) +// MetricAggregationType_Values returns all elements of the MetricAggregationType enum +func MetricAggregationType_Values() []string { + return []string{ + MetricAggregationTypeAverage, + MetricAggregationTypeMinimum, + MetricAggregationTypeMaximum, + } +} + const ( // MetricStatisticAverage is a MetricStatistic enum value MetricStatisticAverage = "Average" @@ -5306,6 +5755,17 @@ const ( MetricStatisticSum = "Sum" ) +// MetricStatistic_Values returns all elements of the MetricStatistic enum +func MetricStatistic_Values() []string { + return []string{ + MetricStatisticAverage, + MetricStatisticMinimum, + MetricStatisticMaximum, + MetricStatisticSampleCount, + MetricStatisticSum, + } +} + const ( // MetricTypeDynamoDbreadCapacityUtilization is a MetricType enum value MetricTypeDynamoDbreadCapacityUtilization = "DynamoDBReadCapacityUtilization" @@ -5348,8 +5808,40 @@ const ( // MetricTypeLambdaProvisionedConcurrencyUtilization is a MetricType enum value MetricTypeLambdaProvisionedConcurrencyUtilization = "LambdaProvisionedConcurrencyUtilization" + + // MetricTypeCassandraReadCapacityUtilization is a MetricType enum value + MetricTypeCassandraReadCapacityUtilization = "CassandraReadCapacityUtilization" + + // MetricTypeCassandraWriteCapacityUtilization is a MetricType enum value + MetricTypeCassandraWriteCapacityUtilization = "CassandraWriteCapacityUtilization" + + // MetricTypeKafkaBrokerStorageUtilization is a MetricType enum value + MetricTypeKafkaBrokerStorageUtilization = "KafkaBrokerStorageUtilization" ) +// MetricType_Values returns all elements of the MetricType enum +func MetricType_Values() []string { + return []string{ + MetricTypeDynamoDbreadCapacityUtilization, + MetricTypeDynamoDbwriteCapacityUtilization, + MetricTypeAlbrequestCountPerTarget, + MetricTypeRdsreaderAverageCpuutilization, + MetricTypeRdsreaderAverageDatabaseConnections, + MetricTypeEc2spotFleetRequestAverageCpuutilization, + MetricTypeEc2spotFleetRequestAverageNetworkIn, + MetricTypeEc2spotFleetRequestAverageNetworkOut, + MetricTypeSageMakerVariantInvocationsPerInstance, + MetricTypeEcsserviceAverageCpuutilization, + MetricTypeEcsserviceAverageMemoryUtilization, + MetricTypeAppStreamAverageCapacityUtilization, + MetricTypeComprehendInferenceUtilization, + MetricTypeLambdaProvisionedConcurrencyUtilization, + MetricTypeCassandraReadCapacityUtilization, + MetricTypeCassandraWriteCapacityUtilization, + MetricTypeKafkaBrokerStorageUtilization, + } +} + const ( // PolicyTypeStepScaling is a PolicyType enum value PolicyTypeStepScaling = "StepScaling" @@ -5358,6 +5850,14 @@ const ( PolicyTypeTargetTrackingScaling = "TargetTrackingScaling" ) +// PolicyType_Values returns all elements of the PolicyType enum +func PolicyType_Values() []string { + return []string{ + PolicyTypeStepScaling, + PolicyTypeTargetTrackingScaling, + } +} + const ( // ScalableDimensionEcsServiceDesiredCount is a ScalableDimension enum value ScalableDimensionEcsServiceDesiredCount = "ecs:service:DesiredCount" @@ -5395,10 +5895,45 @@ const ( // ScalableDimensionComprehendDocumentClassifierEndpointDesiredInferenceUnits is a ScalableDimension enum value ScalableDimensionComprehendDocumentClassifierEndpointDesiredInferenceUnits = "comprehend:document-classifier-endpoint:DesiredInferenceUnits" + // ScalableDimensionComprehendEntityRecognizerEndpointDesiredInferenceUnits is a ScalableDimension enum value + ScalableDimensionComprehendEntityRecognizerEndpointDesiredInferenceUnits = "comprehend:entity-recognizer-endpoint:DesiredInferenceUnits" + // ScalableDimensionLambdaFunctionProvisionedConcurrency is a ScalableDimension enum value ScalableDimensionLambdaFunctionProvisionedConcurrency = "lambda:function:ProvisionedConcurrency" + + // ScalableDimensionCassandraTableReadCapacityUnits is a ScalableDimension enum value + ScalableDimensionCassandraTableReadCapacityUnits = "cassandra:table:ReadCapacityUnits" + + // ScalableDimensionCassandraTableWriteCapacityUnits is a ScalableDimension enum value + ScalableDimensionCassandraTableWriteCapacityUnits = "cassandra:table:WriteCapacityUnits" + + // ScalableDimensionKafkaBrokerStorageVolumeSize is a ScalableDimension enum value + ScalableDimensionKafkaBrokerStorageVolumeSize = "kafka:broker-storage:VolumeSize" ) +// ScalableDimension_Values returns all elements of the ScalableDimension enum +func ScalableDimension_Values() []string { + return []string{ + ScalableDimensionEcsServiceDesiredCount, + ScalableDimensionEc2SpotFleetRequestTargetCapacity, + ScalableDimensionElasticmapreduceInstancegroupInstanceCount, + ScalableDimensionAppstreamFleetDesiredCapacity, + ScalableDimensionDynamodbTableReadCapacityUnits, + ScalableDimensionDynamodbTableWriteCapacityUnits, + ScalableDimensionDynamodbIndexReadCapacityUnits, + ScalableDimensionDynamodbIndexWriteCapacityUnits, + ScalableDimensionRdsClusterReadReplicaCount, + ScalableDimensionSagemakerVariantDesiredInstanceCount, + ScalableDimensionCustomResourceResourceTypeProperty, + ScalableDimensionComprehendDocumentClassifierEndpointDesiredInferenceUnits, + ScalableDimensionComprehendEntityRecognizerEndpointDesiredInferenceUnits, + ScalableDimensionLambdaFunctionProvisionedConcurrency, + ScalableDimensionCassandraTableReadCapacityUnits, + ScalableDimensionCassandraTableWriteCapacityUnits, + ScalableDimensionKafkaBrokerStorageVolumeSize, + } +} + const ( // ScalingActivityStatusCodePending is a ScalingActivityStatusCode enum value ScalingActivityStatusCodePending = "Pending" @@ -5419,6 +5954,18 @@ const ( ScalingActivityStatusCodeFailed = "Failed" ) +// ScalingActivityStatusCode_Values returns all elements of the ScalingActivityStatusCode enum +func ScalingActivityStatusCode_Values() []string { + return []string{ + ScalingActivityStatusCodePending, + ScalingActivityStatusCodeInProgress, + ScalingActivityStatusCodeSuccessful, + ScalingActivityStatusCodeOverridden, + ScalingActivityStatusCodeUnfulfilled, + ScalingActivityStatusCodeFailed, + } +} + const ( // ServiceNamespaceEcs is a ServiceNamespace enum value ServiceNamespaceEcs = "ecs" @@ -5449,4 +5996,28 @@ const ( // ServiceNamespaceLambda is a ServiceNamespace enum value ServiceNamespaceLambda = "lambda" + + // ServiceNamespaceCassandra is a ServiceNamespace enum value + ServiceNamespaceCassandra = "cassandra" + + // ServiceNamespaceKafka is a ServiceNamespace enum value + ServiceNamespaceKafka = "kafka" ) + +// ServiceNamespace_Values returns all elements of the ServiceNamespace enum +func ServiceNamespace_Values() []string { + return []string{ + ServiceNamespaceEcs, + ServiceNamespaceElasticmapreduce, + ServiceNamespaceEc2, + ServiceNamespaceAppstream, + ServiceNamespaceDynamodb, + ServiceNamespaceRds, + ServiceNamespaceSagemaker, + ServiceNamespaceCustomResource, + ServiceNamespaceComprehend, + ServiceNamespaceLambda, + ServiceNamespaceCassandra, + ServiceNamespaceKafka, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go index 4a1a89ee7..0d163aead 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go @@ -22,10 +22,14 @@ // // * Custom resources provided by your own applications or services // -// * Amazon Comprehend document classification endpoints +// * Amazon Comprehend document classification and entity recognizer endpoints // // * AWS Lambda function provisioned concurrency // +// * Amazon Keyspaces (for Apache Cassandra) tables +// +// * Amazon Managed Streaming for Apache Kafka cluster storage +// // API Summary // // The Application Auto Scaling service API includes three key sets of actions: @@ -41,10 +45,11 @@ // activity history. // // * Suspend and resume scaling - Temporarily suspend and later resume automatic -// scaling by calling the RegisterScalableTarget action for any Application -// Auto Scaling scalable target. You can suspend and resume, individually -// or in combination, scale-out activities triggered by a scaling policy, -// scale-in activities triggered by a scaling policy, and scheduled scaling. +// scaling by calling the RegisterScalableTarget (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html) +// API action for any Application Auto Scaling scalable target. You can suspend +// and resume (individually or in combination) scale-out activities that +// are triggered by a scaling policy, scale-in activities that are triggered +// by a scaling policy, and scheduled scaling. // // To learn more about Application Auto Scaling, including information about // granting IAM users required permissions for Application Auto Scaling actions, diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go index 6930dd735..68e93cefc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go index 94be2102e..3c3302563 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/api.go @@ -2764,6 +2764,11 @@ func (s *ApplicationComponent) SetTier(v string) *ApplicationComponent { type ApplicationInfo struct { _ struct{} `type:"structure"` + // Indicates whether Application Insights can listen to CloudWatch events for + // the application resources, such as instance terminated, failed deployment, + // and others. + CWEMonitorEnabled *bool `type:"boolean"` + // The lifecycle of the application. LifeCycle *string `type:"string"` @@ -2797,6 +2802,12 @@ func (s ApplicationInfo) GoString() string { return s.String() } +// SetCWEMonitorEnabled sets the CWEMonitorEnabled field's value. +func (s *ApplicationInfo) SetCWEMonitorEnabled(v bool) *ApplicationInfo { + s.CWEMonitorEnabled = &v + return s +} + // SetLifeCycle sets the LifeCycle field's value. func (s *ApplicationInfo) SetLifeCycle(v string) *ApplicationInfo { s.LifeCycle = &v @@ -2829,8 +2840,8 @@ func (s *ApplicationInfo) SetResourceGroupName(v string) *ApplicationInfo { // The request is not understood by the server. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2847,17 +2858,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2865,22 +2876,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The event information. @@ -2957,6 +2968,11 @@ func (s *ConfigurationEvent) SetMonitoredResourceARN(v string) *ConfigurationEve type CreateApplicationInput struct { _ struct{} `type:"structure"` + // Indicates whether Application Insights can listen to CloudWatch events for + // the application resources, such as instance terminated, failed deployment, + // and others. + CWEMonitorEnabled *bool `type:"boolean"` + // When set to true, creates opsItems for any problems detected on an application. OpsCenterEnabled *bool `type:"boolean"` @@ -3014,6 +3030,12 @@ func (s *CreateApplicationInput) Validate() error { return nil } +// SetCWEMonitorEnabled sets the CWEMonitorEnabled field's value. +func (s *CreateApplicationInput) SetCWEMonitorEnabled(v bool) *CreateApplicationInput { + s.CWEMonitorEnabled = &v + return s +} + // SetOpsCenterEnabled sets the OpsCenterEnabled field's value. func (s *CreateApplicationInput) SetOpsCenterEnabled(v bool) *CreateApplicationInput { s.OpsCenterEnabled = &v @@ -4143,8 +4165,8 @@ func (s *DescribeProblemOutput) SetProblem(v *Problem) *DescribeProblemOutput { // The server encountered an internal error and is unable to complete the request. type InternalServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4161,17 +4183,17 @@ func (s InternalServerException) GoString() string { func newErrorInternalServerException(v protocol.ResponseMetadata) error { return &InternalServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerException) Code() string { +func (s *InternalServerException) Code() string { return "InternalServerException" } // Message returns the exception's message. -func (s InternalServerException) Message() string { +func (s *InternalServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4179,22 +4201,22 @@ func (s InternalServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerException) OrigErr() error { +func (s *InternalServerException) OrigErr() error { return nil } -func (s InternalServerException) Error() string { +func (s *InternalServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } type ListApplicationsInput struct { @@ -4960,9 +4982,55 @@ func (s *LogPattern) SetRank(v int64) *LogPattern { type Observation struct { _ struct{} `type:"structure"` + // The detail type of the CloudWatch Event-based observation, for example, EC2 + // Instance State-change Notification. + CloudWatchEventDetailType *string `type:"string"` + + // The ID of the CloudWatch Event-based observation related to the detected + // problem. + CloudWatchEventId *string `type:"string"` + + // The source of the CloudWatch Event. + CloudWatchEventSource *string `type:"string" enum:"CloudWatchEventSource"` + + // The CodeDeploy application to which the deployment belongs. + CodeDeployApplication *string `type:"string"` + + // The deployment group to which the CodeDeploy deployment belongs. + CodeDeployDeploymentGroup *string `type:"string"` + + // The deployment ID of the CodeDeploy-based observation related to the detected + // problem. + CodeDeployDeploymentId *string `type:"string"` + + // The instance group to which the CodeDeploy instance belongs. + CodeDeployInstanceGroupId *string `type:"string"` + + // The status of the CodeDeploy deployment, for example SUCCESS or FAILURE. + CodeDeployState *string `type:"string"` + + // The state of the instance, such as STOPPING or TERMINATING. + Ec2State *string `type:"string"` + // The time when the observation ended, in epoch seconds. EndTime *time.Time `type:"timestamp"` + // The Amazon Resource Name (ARN) of the AWS Health Event-based observation. + HealthEventArn *string `type:"string"` + + // The description of the AWS Health event provided by the service, such as + // Amazon EC2. + HealthEventDescription *string `type:"string"` + + // The category of the AWS Health event, such as issue. + HealthEventTypeCategory *string `type:"string"` + + // The type of the AWS Health event, for example, AWS_EC2_POWER_CONNECTIVITY_ISSUE. + HealthEventTypeCode *string `type:"string"` + + // The service to which the AWS Health Event belongs, such as EC2. + HealthService *string `type:"string"` + // The ID of the observation type. Id *string `min:"38" type:"string"` @@ -4999,6 +5067,27 @@ type Observation struct { // The value of the source observation metric. Value *float64 `type:"double"` + + // The X-Ray request error percentage for this node. + XRayErrorPercent *int64 `type:"integer"` + + // The X-Ray request fault percentage for this node. + XRayFaultPercent *int64 `type:"integer"` + + // The name of the X-Ray node. + XRayNodeName *string `type:"string"` + + // The type of the X-Ray node. + XRayNodeType *string `type:"string"` + + // The X-Ray node request average latency for this node. + XRayRequestAverageLatency *int64 `type:"long"` + + // The X-Ray request count for this node. + XRayRequestCount *int64 `type:"integer"` + + // The X-Ray request throttle percentage for this node. + XRayThrottlePercent *int64 `type:"integer"` } // String returns the string representation @@ -5011,12 +5100,96 @@ func (s Observation) GoString() string { return s.String() } +// SetCloudWatchEventDetailType sets the CloudWatchEventDetailType field's value. +func (s *Observation) SetCloudWatchEventDetailType(v string) *Observation { + s.CloudWatchEventDetailType = &v + return s +} + +// SetCloudWatchEventId sets the CloudWatchEventId field's value. +func (s *Observation) SetCloudWatchEventId(v string) *Observation { + s.CloudWatchEventId = &v + return s +} + +// SetCloudWatchEventSource sets the CloudWatchEventSource field's value. +func (s *Observation) SetCloudWatchEventSource(v string) *Observation { + s.CloudWatchEventSource = &v + return s +} + +// SetCodeDeployApplication sets the CodeDeployApplication field's value. +func (s *Observation) SetCodeDeployApplication(v string) *Observation { + s.CodeDeployApplication = &v + return s +} + +// SetCodeDeployDeploymentGroup sets the CodeDeployDeploymentGroup field's value. +func (s *Observation) SetCodeDeployDeploymentGroup(v string) *Observation { + s.CodeDeployDeploymentGroup = &v + return s +} + +// SetCodeDeployDeploymentId sets the CodeDeployDeploymentId field's value. +func (s *Observation) SetCodeDeployDeploymentId(v string) *Observation { + s.CodeDeployDeploymentId = &v + return s +} + +// SetCodeDeployInstanceGroupId sets the CodeDeployInstanceGroupId field's value. +func (s *Observation) SetCodeDeployInstanceGroupId(v string) *Observation { + s.CodeDeployInstanceGroupId = &v + return s +} + +// SetCodeDeployState sets the CodeDeployState field's value. +func (s *Observation) SetCodeDeployState(v string) *Observation { + s.CodeDeployState = &v + return s +} + +// SetEc2State sets the Ec2State field's value. +func (s *Observation) SetEc2State(v string) *Observation { + s.Ec2State = &v + return s +} + // SetEndTime sets the EndTime field's value. func (s *Observation) SetEndTime(v time.Time) *Observation { s.EndTime = &v return s } +// SetHealthEventArn sets the HealthEventArn field's value. +func (s *Observation) SetHealthEventArn(v string) *Observation { + s.HealthEventArn = &v + return s +} + +// SetHealthEventDescription sets the HealthEventDescription field's value. +func (s *Observation) SetHealthEventDescription(v string) *Observation { + s.HealthEventDescription = &v + return s +} + +// SetHealthEventTypeCategory sets the HealthEventTypeCategory field's value. +func (s *Observation) SetHealthEventTypeCategory(v string) *Observation { + s.HealthEventTypeCategory = &v + return s +} + +// SetHealthEventTypeCode sets the HealthEventTypeCode field's value. +func (s *Observation) SetHealthEventTypeCode(v string) *Observation { + s.HealthEventTypeCode = &v + return s +} + +// SetHealthService sets the HealthService field's value. +func (s *Observation) SetHealthService(v string) *Observation { + s.HealthService = &v + return s +} + // SetId sets the Id field's value. func (s *Observation) SetId(v string) *Observation { s.Id = &v @@ -5089,6 +5262,48 @@ func (s *Observation) SetValue(v float64) *Observation { return s } +// SetXRayErrorPercent sets the XRayErrorPercent field's value. +func (s *Observation) SetXRayErrorPercent(v int64) *Observation { + s.XRayErrorPercent = &v + return s +} + +// SetXRayFaultPercent sets the XRayFaultPercent field's value. +func (s *Observation) SetXRayFaultPercent(v int64) *Observation { + s.XRayFaultPercent = &v + return s +} + +// SetXRayNodeName sets the XRayNodeName field's value. +func (s *Observation) SetXRayNodeName(v string) *Observation { + s.XRayNodeName = &v + return s +} + +// SetXRayNodeType sets the XRayNodeType field's value. +func (s *Observation) SetXRayNodeType(v string) *Observation { + s.XRayNodeType = &v + return s +} + +// SetXRayRequestAverageLatency sets the XRayRequestAverageLatency field's value. +func (s *Observation) SetXRayRequestAverageLatency(v int64) *Observation { + s.XRayRequestAverageLatency = &v + return s +} + +// SetXRayRequestCount sets the XRayRequestCount field's value. +func (s *Observation) SetXRayRequestCount(v int64) *Observation { + s.XRayRequestCount = &v + return s +} + +// SetXRayThrottlePercent sets the XRayThrottlePercent field's value. +func (s *Observation) SetXRayThrottlePercent(v int64) *Observation { + s.XRayThrottlePercent = &v + return s +} + // Describes a problem that is detected by correlating observations. type Problem struct { _ struct{} `type:"structure"` @@ -5220,8 +5435,8 @@ func (s *RelatedObservations) SetObservationList(v []*Observation) *RelatedObser // The resource is already created or in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5238,17 +5453,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5256,28 +5471,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The resource does not exist in the customer account. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5294,17 +5509,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5312,22 +5527,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An object that defines the tags associated with an application. A tag is @@ -5494,8 +5709,8 @@ func (s TagResourceOutput) GoString() string { // Tags are already registered for the specified application ARN. type TagsAlreadyExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5512,17 +5727,17 @@ func (s TagsAlreadyExistException) GoString() string { func newErrorTagsAlreadyExistException(v protocol.ResponseMetadata) error { return &TagsAlreadyExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagsAlreadyExistException) Code() string { +func (s *TagsAlreadyExistException) Code() string { return "TagsAlreadyExistException" } // Message returns the exception's message. -func (s TagsAlreadyExistException) Message() string { +func (s *TagsAlreadyExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5530,29 +5745,29 @@ func (s TagsAlreadyExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagsAlreadyExistException) OrigErr() error { +func (s *TagsAlreadyExistException) OrigErr() error { return nil } -func (s TagsAlreadyExistException) Error() string { +func (s *TagsAlreadyExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagsAlreadyExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagsAlreadyExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagsAlreadyExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagsAlreadyExistException) RequestID() string { + return s.RespMetadata.RequestID } // The number of the provided tags is beyond the limit, or the number of total // tags you are trying to attach to the specified resource exceeds the limit. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -5572,17 +5787,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5590,22 +5805,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -5685,6 +5900,11 @@ func (s UntagResourceOutput) GoString() string { type UpdateApplicationInput struct { _ struct{} `type:"structure"` + // Indicates whether Application Insights can listen to CloudWatch events for + // the application resources, such as instance terminated, failed deployment, + // and others. + CWEMonitorEnabled *bool `type:"boolean"` + // When set to true, creates opsItems for any problems detected on an application. OpsCenterEnabled *bool `type:"boolean"` @@ -5730,6 +5950,12 @@ func (s *UpdateApplicationInput) Validate() error { return nil } +// SetCWEMonitorEnabled sets the CWEMonitorEnabled field's value. +func (s *UpdateApplicationInput) SetCWEMonitorEnabled(v bool) *UpdateApplicationInput { + s.CWEMonitorEnabled = &v + return s +} + // SetOpsCenterEnabled sets the OpsCenterEnabled field's value. func (s *UpdateApplicationInput) SetOpsCenterEnabled(v bool) *UpdateApplicationInput { s.OpsCenterEnabled = &v @@ -6102,8 +6328,8 @@ func (s *UpdateLogPatternOutput) SetResourceGroupName(v string) *UpdateLogPatter // The parameter is not valid. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6120,17 +6346,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6138,22 +6364,42 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // CloudWatchEventSourceEc2 is a CloudWatchEventSource enum value + CloudWatchEventSourceEc2 = "EC2" + + // CloudWatchEventSourceCodeDeploy is a CloudWatchEventSource enum value + CloudWatchEventSourceCodeDeploy = "CODE_DEPLOY" + + // CloudWatchEventSourceHealth is a CloudWatchEventSource enum value + CloudWatchEventSourceHealth = "HEALTH" +) + +// CloudWatchEventSource_Values returns all elements of the CloudWatchEventSource enum +func CloudWatchEventSource_Values() []string { + return []string{ + CloudWatchEventSourceEc2, + CloudWatchEventSourceCodeDeploy, + CloudWatchEventSourceHealth, + } } const ( @@ -6167,6 +6413,15 @@ const ( ConfigurationEventResourceTypeSsmAssociation = "SSM_ASSOCIATION" ) +// ConfigurationEventResourceType_Values returns all elements of the ConfigurationEventResourceType enum +func ConfigurationEventResourceType_Values() []string { + return []string{ + ConfigurationEventResourceTypeCloudwatchAlarm, + ConfigurationEventResourceTypeCloudformation, + ConfigurationEventResourceTypeSsmAssociation, + } +} + const ( // ConfigurationEventStatusInfo is a ConfigurationEventStatus enum value ConfigurationEventStatusInfo = "INFO" @@ -6178,11 +6433,27 @@ const ( ConfigurationEventStatusError = "ERROR" ) +// ConfigurationEventStatus_Values returns all elements of the ConfigurationEventStatus enum +func ConfigurationEventStatus_Values() []string { + return []string{ + ConfigurationEventStatusInfo, + ConfigurationEventStatusWarn, + ConfigurationEventStatusError, + } +} + const ( // FeedbackKeyInsightsFeedback is a FeedbackKey enum value FeedbackKeyInsightsFeedback = "INSIGHTS_FEEDBACK" ) +// FeedbackKey_Values returns all elements of the FeedbackKey enum +func FeedbackKey_Values() []string { + return []string{ + FeedbackKeyInsightsFeedback, + } +} + const ( // FeedbackValueNotSpecified is a FeedbackValue enum value FeedbackValueNotSpecified = "NOT_SPECIFIED" @@ -6194,6 +6465,15 @@ const ( FeedbackValueNotUseful = "NOT_USEFUL" ) +// FeedbackValue_Values returns all elements of the FeedbackValue enum +func FeedbackValue_Values() []string { + return []string{ + FeedbackValueNotSpecified, + FeedbackValueUseful, + FeedbackValueNotUseful, + } +} + const ( // LogFilterError is a LogFilter enum value LogFilterError = "ERROR" @@ -6205,6 +6485,15 @@ const ( LogFilterInfo = "INFO" ) +// LogFilter_Values returns all elements of the LogFilter enum +func LogFilter_Values() []string { + return []string{ + LogFilterError, + LogFilterWarn, + LogFilterInfo, + } +} + const ( // SeverityLevelLow is a SeverityLevel enum value SeverityLevelLow = "Low" @@ -6216,6 +6505,15 @@ const ( SeverityLevelHigh = "High" ) +// SeverityLevel_Values returns all elements of the SeverityLevel enum +func SeverityLevel_Values() []string { + return []string{ + SeverityLevelLow, + SeverityLevelMedium, + SeverityLevelHigh, + } +} + const ( // StatusIgnore is a Status enum value StatusIgnore = "IGNORE" @@ -6227,6 +6525,15 @@ const ( StatusPending = "PENDING" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusIgnore, + StatusResolved, + StatusPending, + } +} + const ( // TierDefault is a Tier enum value TierDefault = "DEFAULT" @@ -6243,3 +6550,14 @@ const ( // TierSqlServer is a Tier enum value TierSqlServer = "SQL_SERVER" ) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierDefault, + TierDotNetCore, + TierDotNetWorker, + TierDotNetWeb, + TierSqlServer, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go index a39b98938..641d9d10a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go index 89e83d2a1..8f436c9ae 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go @@ -13,6 +13,119 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opCreateGatewayRoute = "CreateGatewayRoute" + +// CreateGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGatewayRoute for more information on using the CreateGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGatewayRouteRequest method. +// req, resp := client.CreateGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/CreateGatewayRoute +func (c *AppMesh) CreateGatewayRouteRequest(input *CreateGatewayRouteInput) (req *request.Request, output *CreateGatewayRouteOutput) { + op := &request.Operation{ + Name: opCreateGatewayRoute, + HTTPMethod: "PUT", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", + } + + if input == nil { + input = &CreateGatewayRouteInput{} + } + + output = &CreateGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGatewayRoute API operation for AWS App Mesh. +// +// Creates a gateway route. +// +// A gateway route is attached to a virtual gateway and routes traffic to an +// existing virtual service. If a route matches a request, it can distribute +// traffic to a target virtual service. +// +// For more information about gateway routes, see Gateway routes (https://docs.aws.amazon.com/app-mesh/latest/userguide/gateway-routes.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation CreateGatewayRoute for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ConflictException +// The request contains a client token that was used for a previous update resource +// call with different specifications. Try the request again with a new client +// token. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * LimitExceededException +// You have exceeded a service limit for your account. For more information, +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) +// in the AWS App Mesh User Guide. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/CreateGatewayRoute +func (c *AppMesh) CreateGatewayRoute(input *CreateGatewayRouteInput) (*CreateGatewayRouteOutput, error) { + req, out := c.CreateGatewayRouteRequest(input) + return out, req.Send() +} + +// CreateGatewayRouteWithContext is the same as CreateGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) CreateGatewayRouteWithContext(ctx aws.Context, input *CreateGatewayRouteInput, opts ...request.Option) (*CreateGatewayRouteOutput, error) { + req, out := c.CreateGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateMesh = "CreateMesh" // CreateMeshRequest generates a "aws/request.Request" representing the @@ -57,12 +170,14 @@ func (c *AppMesh) CreateMeshRequest(input *CreateMeshInput) (req *request.Reques // CreateMesh API operation for AWS App Mesh. // -// Creates a service mesh. A service mesh is a logical boundary for network -// traffic between the services that reside within it. +// Creates a service mesh. // -// After you create your service mesh, you can create virtual services, virtual -// nodes, virtual routers, and routes to distribute traffic between the applications -// in your mesh. +// A service mesh is a logical boundary for network traffic between services +// that are represented by resources within the mesh. After you create your +// service mesh, you can create virtual services, virtual nodes, virtual routers, +// and routes to distribute traffic between the applications in your mesh. +// +// For more information about service meshes, see Service meshes (https://docs.aws.amazon.com/app-mesh/latest/userguide/meshes.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -89,7 +204,7 @@ func (c *AppMesh) CreateMeshRequest(input *CreateMeshInput) (req *request.Reques // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -171,13 +286,8 @@ func (c *AppMesh) CreateRouteRequest(input *CreateRouteInput) (req *request.Requ // // Creates a route that is associated with a virtual router. // -// You can use the prefix parameter in your route specification for path-based -// routing of requests. For example, if your virtual service name is my-service.local -// and you want the route to match requests to my-service.local/metrics, your -// prefix should be /metrics. -// -// If your route matches a request, you can distribute traffic to one or more -// target virtual nodes with relative weighting. +// You can route several different protocols and define a retry policy for a +// route. Traffic can be routed to one or more virtual nodes. // // For more information about routes, see Routes (https://docs.aws.amazon.com/app-mesh/latest/userguide/routes.html). // @@ -206,7 +316,7 @@ func (c *AppMesh) CreateRouteRequest(input *CreateRouteInput) (req *request.Requ // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -242,6 +352,121 @@ func (c *AppMesh) CreateRouteWithContext(ctx aws.Context, input *CreateRouteInpu return out, req.Send() } +const opCreateVirtualGateway = "CreateVirtualGateway" + +// CreateVirtualGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateVirtualGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVirtualGateway for more information on using the CreateVirtualGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVirtualGatewayRequest method. +// req, resp := client.CreateVirtualGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/CreateVirtualGateway +func (c *AppMesh) CreateVirtualGatewayRequest(input *CreateVirtualGatewayInput) (req *request.Request, output *CreateVirtualGatewayOutput) { + op := &request.Operation{ + Name: opCreateVirtualGateway, + HTTPMethod: "PUT", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateways", + } + + if input == nil { + input = &CreateVirtualGatewayInput{} + } + + output = &CreateVirtualGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVirtualGateway API operation for AWS App Mesh. +// +// Creates a virtual gateway. +// +// A virtual gateway allows resources outside your mesh to communicate to resources +// that are inside your mesh. The virtual gateway represents an Envoy proxy +// running in an Amazon ECS task, in a Kubernetes service, or on an Amazon EC2 +// instance. Unlike a virtual node, which represents an Envoy running with an +// application, a virtual gateway represents Envoy deployed by itself. +// +// For more information about virtual gateways, see Virtual gateways (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_gateways.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation CreateVirtualGateway for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ConflictException +// The request contains a client token that was used for a previous update resource +// call with different specifications. Try the request again with a new client +// token. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * LimitExceededException +// You have exceeded a service limit for your account. For more information, +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) +// in the AWS App Mesh User Guide. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/CreateVirtualGateway +func (c *AppMesh) CreateVirtualGateway(input *CreateVirtualGatewayInput) (*CreateVirtualGatewayOutput, error) { + req, out := c.CreateVirtualGatewayRequest(input) + return out, req.Send() +} + +// CreateVirtualGatewayWithContext is the same as CreateVirtualGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVirtualGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) CreateVirtualGatewayWithContext(ctx aws.Context, input *CreateVirtualGatewayInput, opts ...request.Option) (*CreateVirtualGatewayOutput, error) { + req, out := c.CreateVirtualGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateVirtualNode = "CreateVirtualNode" // CreateVirtualNodeRequest generates a "aws/request.Request" representing the @@ -290,11 +515,13 @@ func (c *AppMesh) CreateVirtualNodeRequest(input *CreateVirtualNodeInput) (req * // // A virtual node acts as a logical pointer to a particular task group, such // as an Amazon ECS service or a Kubernetes deployment. When you create a virtual -// node, you can specify the service discovery information for your task group. +// node, you can specify the service discovery information for your task group, +// and whether the proxy running in a task group will communicate with other +// proxies using Transport Layer Security (TLS). // -// Any inbound traffic that your virtual node expects should be specified as -// a listener. Any outbound traffic that your virtual node expects to reach -// should be specified as a backend. +// You define a listener for any inbound traffic that your virtual node expects. +// Any virtual service that your virtual node expects to communicate to is specified +// as a backend. // // The response metadata for your new virtual node contains the arn that is // associated with the virtual node. Set this value (either the full ARN or @@ -307,7 +534,7 @@ func (c *AppMesh) CreateVirtualNodeRequest(input *CreateVirtualNodeInput) (req * // override the node.cluster value that is set by APPMESH_VIRTUAL_NODE_NAME // with the APPMESH_VIRTUAL_NODE_CLUSTER environment variable. // -// For more information about virtual nodes, see Virtual Nodes (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_nodes.html). +// For more information about virtual nodes, see Virtual nodes (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_nodes.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -334,7 +561,7 @@ func (c *AppMesh) CreateVirtualNodeRequest(input *CreateVirtualNodeInput) (req * // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -416,14 +643,13 @@ func (c *AppMesh) CreateVirtualRouterRequest(input *CreateVirtualRouterInput) (r // // Creates a virtual router within a service mesh. // -// Any inbound traffic that your virtual router expects should be specified -// as a listener. -// +// Specify a listener for any inbound traffic that your virtual router receives. +// Create a virtual router for each protocol and port that you need to route. // Virtual routers handle traffic for one or more virtual services within your // mesh. After you create your virtual router, create and associate routes for // your virtual router that direct incoming requests to different virtual nodes. // -// For more information about virtual routers, see Virtual Routers (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_routers.html). +// For more information about virtual routers, see Virtual routers (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_routers.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -450,7 +676,7 @@ func (c *AppMesh) CreateVirtualRouterRequest(input *CreateVirtualRouterInput) (r // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -538,7 +764,7 @@ func (c *AppMesh) CreateVirtualServiceRequest(input *CreateVirtualServiceInput) // are routed to the virtual node or virtual router that is specified as the // provider for the virtual service. // -// For more information about virtual services, see Virtual Services (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_services.html). +// For more information about virtual services, see Virtual services (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual_services.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -565,7 +791,7 @@ func (c *AppMesh) CreateVirtualServiceRequest(input *CreateVirtualServiceInput) // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -601,6 +827,107 @@ func (c *AppMesh) CreateVirtualServiceWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opDeleteGatewayRoute = "DeleteGatewayRoute" + +// DeleteGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteGatewayRoute for more information on using the DeleteGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteGatewayRouteRequest method. +// req, resp := client.DeleteGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DeleteGatewayRoute +func (c *AppMesh) DeleteGatewayRouteRequest(input *DeleteGatewayRouteInput) (req *request.Request, output *DeleteGatewayRouteOutput) { + op := &request.Operation{ + Name: opDeleteGatewayRoute, + HTTPMethod: "DELETE", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + } + + if input == nil { + input = &DeleteGatewayRouteInput{} + } + + output = &DeleteGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteGatewayRoute API operation for AWS App Mesh. +// +// Deletes an existing gateway route. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation DeleteGatewayRoute for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ResourceInUseException +// You can't delete the specified resource because it's in use or required by +// another resource. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DeleteGatewayRoute +func (c *AppMesh) DeleteGatewayRoute(input *DeleteGatewayRouteInput) (*DeleteGatewayRouteOutput, error) { + req, out := c.DeleteGatewayRouteRequest(input) + return out, req.Send() +} + +// DeleteGatewayRouteWithContext is the same as DeleteGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) DeleteGatewayRouteWithContext(ctx aws.Context, input *DeleteGatewayRouteInput, opts ...request.Option) (*DeleteGatewayRouteOutput, error) { + req, out := c.DeleteGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteMesh = "DeleteMesh" // DeleteMeshRequest generates a "aws/request.Request" representing the @@ -806,9 +1133,111 @@ func (c *AppMesh) DeleteRouteWithContext(ctx aws.Context, input *DeleteRouteInpu return out, req.Send() } -const opDeleteVirtualNode = "DeleteVirtualNode" +const opDeleteVirtualGateway = "DeleteVirtualGateway" -// DeleteVirtualNodeRequest generates a "aws/request.Request" representing the +// DeleteVirtualGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVirtualGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVirtualGateway for more information on using the DeleteVirtualGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVirtualGatewayRequest method. +// req, resp := client.DeleteVirtualGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DeleteVirtualGateway +func (c *AppMesh) DeleteVirtualGatewayRequest(input *DeleteVirtualGatewayInput) (req *request.Request, output *DeleteVirtualGatewayOutput) { + op := &request.Operation{ + Name: opDeleteVirtualGateway, + HTTPMethod: "DELETE", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + } + + if input == nil { + input = &DeleteVirtualGatewayInput{} + } + + output = &DeleteVirtualGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteVirtualGateway API operation for AWS App Mesh. +// +// Deletes an existing virtual gateway. You cannot delete a virtual gateway +// if any gateway routes are associated to it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation DeleteVirtualGateway for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ResourceInUseException +// You can't delete the specified resource because it's in use or required by +// another resource. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DeleteVirtualGateway +func (c *AppMesh) DeleteVirtualGateway(input *DeleteVirtualGatewayInput) (*DeleteVirtualGatewayOutput, error) { + req, out := c.DeleteVirtualGatewayRequest(input) + return out, req.Send() +} + +// DeleteVirtualGatewayWithContext is the same as DeleteVirtualGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVirtualGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) DeleteVirtualGatewayWithContext(ctx aws.Context, input *DeleteVirtualGatewayInput, opts ...request.Option) (*DeleteVirtualGatewayOutput, error) { + req, out := c.DeleteVirtualGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVirtualNode = "DeleteVirtualNode" + +// DeleteVirtualNodeRequest generates a "aws/request.Request" representing the // client's request for the DeleteVirtualNode operation. The "output" return // value will be populated with the request's response once the request completes // successfully. @@ -1081,6 +1510,10 @@ func (c *AppMesh) DeleteVirtualServiceRequest(input *DeleteVirtualServiceInput) // * NotFoundException // The specified resource doesn't exist. Check your request syntax and try again. // +// * ResourceInUseException +// You can't delete the specified resource because it's in use or required by +// another resource. +// // * ServiceUnavailableException // The request has failed due to a temporary failure of the service. // @@ -1111,6 +1544,103 @@ func (c *AppMesh) DeleteVirtualServiceWithContext(ctx aws.Context, input *Delete return out, req.Send() } +const opDescribeGatewayRoute = "DescribeGatewayRoute" + +// DescribeGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGatewayRoute for more information on using the DescribeGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGatewayRouteRequest method. +// req, resp := client.DescribeGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DescribeGatewayRoute +func (c *AppMesh) DescribeGatewayRouteRequest(input *DescribeGatewayRouteInput) (req *request.Request, output *DescribeGatewayRouteOutput) { + op := &request.Operation{ + Name: opDescribeGatewayRoute, + HTTPMethod: "GET", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + } + + if input == nil { + input = &DescribeGatewayRouteInput{} + } + + output = &DescribeGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeGatewayRoute API operation for AWS App Mesh. +// +// Describes an existing gateway route. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation DescribeGatewayRoute for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DescribeGatewayRoute +func (c *AppMesh) DescribeGatewayRoute(input *DescribeGatewayRouteInput) (*DescribeGatewayRouteOutput, error) { + req, out := c.DescribeGatewayRouteRequest(input) + return out, req.Send() +} + +// DescribeGatewayRouteWithContext is the same as DescribeGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) DescribeGatewayRouteWithContext(ctx aws.Context, input *DescribeGatewayRouteInput, opts ...request.Option) (*DescribeGatewayRouteOutput, error) { + req, out := c.DescribeGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeMesh = "DescribeMesh" // DescribeMeshRequest generates a "aws/request.Request" representing the @@ -1305,6 +1835,103 @@ func (c *AppMesh) DescribeRouteWithContext(ctx aws.Context, input *DescribeRoute return out, req.Send() } +const opDescribeVirtualGateway = "DescribeVirtualGateway" + +// DescribeVirtualGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVirtualGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeVirtualGateway for more information on using the DescribeVirtualGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeVirtualGatewayRequest method. +// req, resp := client.DescribeVirtualGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DescribeVirtualGateway +func (c *AppMesh) DescribeVirtualGatewayRequest(input *DescribeVirtualGatewayInput) (req *request.Request, output *DescribeVirtualGatewayOutput) { + op := &request.Operation{ + Name: opDescribeVirtualGateway, + HTTPMethod: "GET", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", + } + + if input == nil { + input = &DescribeVirtualGatewayInput{} + } + + output = &DescribeVirtualGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeVirtualGateway API operation for AWS App Mesh. +// +// Describes an existing virtual gateway. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation DescribeVirtualGateway for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/DescribeVirtualGateway +func (c *AppMesh) DescribeVirtualGateway(input *DescribeVirtualGatewayInput) (*DescribeVirtualGatewayOutput, error) { + req, out := c.DescribeVirtualGatewayRequest(input) + return out, req.Send() +} + +// DescribeVirtualGatewayWithContext is the same as DescribeVirtualGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeVirtualGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) DescribeVirtualGatewayWithContext(ctx aws.Context, input *DescribeVirtualGatewayInput, opts ...request.Option) (*DescribeVirtualGatewayOutput, error) { + req, out := c.DescribeVirtualGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeVirtualNode = "DescribeVirtualNode" // DescribeVirtualNodeRequest generates a "aws/request.Request" representing the @@ -1596,37 +2223,37 @@ func (c *AppMesh) DescribeVirtualServiceWithContext(ctx aws.Context, input *Desc return out, req.Send() } -const opListMeshes = "ListMeshes" +const opListGatewayRoutes = "ListGatewayRoutes" -// ListMeshesRequest generates a "aws/request.Request" representing the -// client's request for the ListMeshes operation. The "output" return +// ListGatewayRoutesRequest generates a "aws/request.Request" representing the +// client's request for the ListGatewayRoutes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListMeshes for more information on using the ListMeshes +// See ListGatewayRoutes for more information on using the ListGatewayRoutes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListMeshesRequest method. -// req, resp := client.ListMeshesRequest(params) +// // Example sending a request using the ListGatewayRoutesRequest method. +// req, resp := client.ListGatewayRoutesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListMeshes -func (c *AppMesh) ListMeshesRequest(input *ListMeshesInput) (req *request.Request, output *ListMeshesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListGatewayRoutes +func (c *AppMesh) ListGatewayRoutesRequest(input *ListGatewayRoutesInput) (req *request.Request, output *ListGatewayRoutesOutput) { op := &request.Operation{ - Name: opListMeshes, + Name: opListGatewayRoutes, HTTPMethod: "GET", - HTTPPath: "/v20190125/meshes", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -1636,24 +2263,25 @@ func (c *AppMesh) ListMeshesRequest(input *ListMeshesInput) (req *request.Reques } if input == nil { - input = &ListMeshesInput{} + input = &ListGatewayRoutesInput{} } - output = &ListMeshesOutput{} + output = &ListGatewayRoutesOutput{} req = c.newRequest(op, input, output) return } -// ListMeshes API operation for AWS App Mesh. +// ListGatewayRoutes API operation for AWS App Mesh. // -// Returns a list of existing service meshes. +// Returns a list of existing gateway routes that are associated to a virtual +// gateway. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS App Mesh's -// API operation ListMeshes for usage and error information. +// API operation ListGatewayRoutes for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1677,22 +2305,177 @@ func (c *AppMesh) ListMeshesRequest(input *ListMeshesInput) (req *request.Reques // for your account. For best results, use an increasing or variable sleep interval // between requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListMeshes -func (c *AppMesh) ListMeshes(input *ListMeshesInput) (*ListMeshesOutput, error) { - req, out := c.ListMeshesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListGatewayRoutes +func (c *AppMesh) ListGatewayRoutes(input *ListGatewayRoutesInput) (*ListGatewayRoutesOutput, error) { + req, out := c.ListGatewayRoutesRequest(input) return out, req.Send() } -// ListMeshesWithContext is the same as ListMeshes with the addition of +// ListGatewayRoutesWithContext is the same as ListGatewayRoutes with the addition of // the ability to pass a context and additional request options. // -// See ListMeshes for details on how to use this API operation. +// See ListGatewayRoutes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *AppMesh) ListMeshesWithContext(ctx aws.Context, input *ListMeshesInput, opts ...request.Option) (*ListMeshesOutput, error) { +func (c *AppMesh) ListGatewayRoutesWithContext(ctx aws.Context, input *ListGatewayRoutesInput, opts ...request.Option) (*ListGatewayRoutesOutput, error) { + req, out := c.ListGatewayRoutesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListGatewayRoutesPages iterates over the pages of a ListGatewayRoutes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGatewayRoutes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGatewayRoutes operation. +// pageNum := 0 +// err := client.ListGatewayRoutesPages(params, +// func(page *appmesh.ListGatewayRoutesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AppMesh) ListGatewayRoutesPages(input *ListGatewayRoutesInput, fn func(*ListGatewayRoutesOutput, bool) bool) error { + return c.ListGatewayRoutesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGatewayRoutesPagesWithContext same as ListGatewayRoutesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) ListGatewayRoutesPagesWithContext(ctx aws.Context, input *ListGatewayRoutesInput, fn func(*ListGatewayRoutesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGatewayRoutesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGatewayRoutesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListGatewayRoutesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListMeshes = "ListMeshes" + +// ListMeshesRequest generates a "aws/request.Request" representing the +// client's request for the ListMeshes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMeshes for more information on using the ListMeshes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMeshesRequest method. +// req, resp := client.ListMeshesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListMeshes +func (c *AppMesh) ListMeshesRequest(input *ListMeshesInput) (req *request.Request, output *ListMeshesOutput) { + op := &request.Operation{ + Name: opListMeshes, + HTTPMethod: "GET", + HTTPPath: "/v20190125/meshes", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListMeshesInput{} + } + + output = &ListMeshesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMeshes API operation for AWS App Mesh. +// +// Returns a list of existing service meshes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation ListMeshes for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListMeshes +func (c *AppMesh) ListMeshes(input *ListMeshesInput) (*ListMeshesOutput, error) { + req, out := c.ListMeshesRequest(input) + return out, req.Send() +} + +// ListMeshesWithContext is the same as ListMeshes with the addition of +// the ability to pass a context and additional request options. +// +// See ListMeshes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) ListMeshesWithContext(ctx aws.Context, input *ListMeshesInput, opts ...request.Option) (*ListMeshesOutput, error) { req, out := c.ListMeshesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) @@ -2061,37 +2844,37 @@ func (c *AppMesh) ListTagsForResourcePagesWithContext(ctx aws.Context, input *Li return p.Err() } -const opListVirtualNodes = "ListVirtualNodes" +const opListVirtualGateways = "ListVirtualGateways" -// ListVirtualNodesRequest generates a "aws/request.Request" representing the -// client's request for the ListVirtualNodes operation. The "output" return +// ListVirtualGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualGateways operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListVirtualNodes for more information on using the ListVirtualNodes +// See ListVirtualGateways for more information on using the ListVirtualGateways // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListVirtualNodesRequest method. -// req, resp := client.ListVirtualNodesRequest(params) +// // Example sending a request using the ListVirtualGatewaysRequest method. +// req, resp := client.ListVirtualGatewaysRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualNodes -func (c *AppMesh) ListVirtualNodesRequest(input *ListVirtualNodesInput) (req *request.Request, output *ListVirtualNodesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualGateways +func (c *AppMesh) ListVirtualGatewaysRequest(input *ListVirtualGatewaysInput) (req *request.Request, output *ListVirtualGatewaysOutput) { op := &request.Operation{ - Name: opListVirtualNodes, + Name: opListVirtualGateways, HTTPMethod: "GET", - HTTPPath: "/v20190125/meshes/{meshName}/virtualNodes", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateways", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -2101,24 +2884,24 @@ func (c *AppMesh) ListVirtualNodesRequest(input *ListVirtualNodesInput) (req *re } if input == nil { - input = &ListVirtualNodesInput{} + input = &ListVirtualGatewaysInput{} } - output = &ListVirtualNodesOutput{} + output = &ListVirtualGatewaysOutput{} req = c.newRequest(op, input, output) return } -// ListVirtualNodes API operation for AWS App Mesh. +// ListVirtualGateways API operation for AWS App Mesh. // -// Returns a list of existing virtual nodes. +// Returns a list of existing virtual gateways in a service mesh. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS App Mesh's -// API operation ListVirtualNodes for usage and error information. +// API operation ListVirtualGateways for usage and error information. // // Returned Error Types: // * BadRequestException @@ -2142,65 +2925,65 @@ func (c *AppMesh) ListVirtualNodesRequest(input *ListVirtualNodesInput) (req *re // for your account. For best results, use an increasing or variable sleep interval // between requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualNodes -func (c *AppMesh) ListVirtualNodes(input *ListVirtualNodesInput) (*ListVirtualNodesOutput, error) { - req, out := c.ListVirtualNodesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualGateways +func (c *AppMesh) ListVirtualGateways(input *ListVirtualGatewaysInput) (*ListVirtualGatewaysOutput, error) { + req, out := c.ListVirtualGatewaysRequest(input) return out, req.Send() } -// ListVirtualNodesWithContext is the same as ListVirtualNodes with the addition of +// ListVirtualGatewaysWithContext is the same as ListVirtualGateways with the addition of // the ability to pass a context and additional request options. // -// See ListVirtualNodes for details on how to use this API operation. +// See ListVirtualGateways for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *AppMesh) ListVirtualNodesWithContext(ctx aws.Context, input *ListVirtualNodesInput, opts ...request.Option) (*ListVirtualNodesOutput, error) { - req, out := c.ListVirtualNodesRequest(input) +func (c *AppMesh) ListVirtualGatewaysWithContext(ctx aws.Context, input *ListVirtualGatewaysInput, opts ...request.Option) (*ListVirtualGatewaysOutput, error) { + req, out := c.ListVirtualGatewaysRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListVirtualNodesPages iterates over the pages of a ListVirtualNodes operation, +// ListVirtualGatewaysPages iterates over the pages of a ListVirtualGateways operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListVirtualNodes method for more information on how to use this operation. +// See ListVirtualGateways method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListVirtualNodes operation. +// // Example iterating over at most 3 pages of a ListVirtualGateways operation. // pageNum := 0 -// err := client.ListVirtualNodesPages(params, -// func(page *appmesh.ListVirtualNodesOutput, lastPage bool) bool { +// err := client.ListVirtualGatewaysPages(params, +// func(page *appmesh.ListVirtualGatewaysOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *AppMesh) ListVirtualNodesPages(input *ListVirtualNodesInput, fn func(*ListVirtualNodesOutput, bool) bool) error { - return c.ListVirtualNodesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *AppMesh) ListVirtualGatewaysPages(input *ListVirtualGatewaysInput, fn func(*ListVirtualGatewaysOutput, bool) bool) error { + return c.ListVirtualGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListVirtualNodesPagesWithContext same as ListVirtualNodesPages except +// ListVirtualGatewaysPagesWithContext same as ListVirtualGatewaysPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *AppMesh) ListVirtualNodesPagesWithContext(ctx aws.Context, input *ListVirtualNodesInput, fn func(*ListVirtualNodesOutput, bool) bool, opts ...request.Option) error { +func (c *AppMesh) ListVirtualGatewaysPagesWithContext(ctx aws.Context, input *ListVirtualGatewaysInput, fn func(*ListVirtualGatewaysOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListVirtualNodesInput + var inCpy *ListVirtualGatewaysInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListVirtualNodesRequest(inCpy) + req, _ := c.ListVirtualGatewaysRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -2208,7 +2991,7 @@ func (c *AppMesh) ListVirtualNodesPagesWithContext(ctx aws.Context, input *ListV } for p.Next() { - if !fn(p.Page().(*ListVirtualNodesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListVirtualGatewaysOutput), !p.HasNextPage()) { break } } @@ -2216,37 +2999,37 @@ func (c *AppMesh) ListVirtualNodesPagesWithContext(ctx aws.Context, input *ListV return p.Err() } -const opListVirtualRouters = "ListVirtualRouters" +const opListVirtualNodes = "ListVirtualNodes" -// ListVirtualRoutersRequest generates a "aws/request.Request" representing the -// client's request for the ListVirtualRouters operation. The "output" return +// ListVirtualNodesRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualNodes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListVirtualRouters for more information on using the ListVirtualRouters +// See ListVirtualNodes for more information on using the ListVirtualNodes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListVirtualRoutersRequest method. -// req, resp := client.ListVirtualRoutersRequest(params) +// // Example sending a request using the ListVirtualNodesRequest method. +// req, resp := client.ListVirtualNodesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualRouters -func (c *AppMesh) ListVirtualRoutersRequest(input *ListVirtualRoutersInput) (req *request.Request, output *ListVirtualRoutersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualNodes +func (c *AppMesh) ListVirtualNodesRequest(input *ListVirtualNodesInput) (req *request.Request, output *ListVirtualNodesOutput) { op := &request.Operation{ - Name: opListVirtualRouters, + Name: opListVirtualNodes, HTTPMethod: "GET", - HTTPPath: "/v20190125/meshes/{meshName}/virtualRouters", + HTTPPath: "/v20190125/meshes/{meshName}/virtualNodes", Paginator: &request.Paginator{ InputTokens: []string{"nextToken"}, OutputTokens: []string{"nextToken"}, @@ -2256,24 +3039,24 @@ func (c *AppMesh) ListVirtualRoutersRequest(input *ListVirtualRoutersInput) (req } if input == nil { - input = &ListVirtualRoutersInput{} + input = &ListVirtualNodesInput{} } - output = &ListVirtualRoutersOutput{} + output = &ListVirtualNodesOutput{} req = c.newRequest(op, input, output) return } -// ListVirtualRouters API operation for AWS App Mesh. +// ListVirtualNodes API operation for AWS App Mesh. // -// Returns a list of existing virtual routers in a service mesh. +// Returns a list of existing virtual nodes. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS App Mesh's -// API operation ListVirtualRouters for usage and error information. +// API operation ListVirtualNodes for usage and error information. // // Returned Error Types: // * BadRequestException @@ -2297,65 +3080,65 @@ func (c *AppMesh) ListVirtualRoutersRequest(input *ListVirtualRoutersInput) (req // for your account. For best results, use an increasing or variable sleep interval // between requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualRouters -func (c *AppMesh) ListVirtualRouters(input *ListVirtualRoutersInput) (*ListVirtualRoutersOutput, error) { - req, out := c.ListVirtualRoutersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualNodes +func (c *AppMesh) ListVirtualNodes(input *ListVirtualNodesInput) (*ListVirtualNodesOutput, error) { + req, out := c.ListVirtualNodesRequest(input) return out, req.Send() } -// ListVirtualRoutersWithContext is the same as ListVirtualRouters with the addition of +// ListVirtualNodesWithContext is the same as ListVirtualNodes with the addition of // the ability to pass a context and additional request options. // -// See ListVirtualRouters for details on how to use this API operation. +// See ListVirtualNodes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *AppMesh) ListVirtualRoutersWithContext(ctx aws.Context, input *ListVirtualRoutersInput, opts ...request.Option) (*ListVirtualRoutersOutput, error) { - req, out := c.ListVirtualRoutersRequest(input) +func (c *AppMesh) ListVirtualNodesWithContext(ctx aws.Context, input *ListVirtualNodesInput, opts ...request.Option) (*ListVirtualNodesOutput, error) { + req, out := c.ListVirtualNodesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListVirtualRoutersPages iterates over the pages of a ListVirtualRouters operation, +// ListVirtualNodesPages iterates over the pages of a ListVirtualNodes operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListVirtualRouters method for more information on how to use this operation. +// See ListVirtualNodes method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListVirtualRouters operation. +// // Example iterating over at most 3 pages of a ListVirtualNodes operation. // pageNum := 0 -// err := client.ListVirtualRoutersPages(params, -// func(page *appmesh.ListVirtualRoutersOutput, lastPage bool) bool { +// err := client.ListVirtualNodesPages(params, +// func(page *appmesh.ListVirtualNodesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *AppMesh) ListVirtualRoutersPages(input *ListVirtualRoutersInput, fn func(*ListVirtualRoutersOutput, bool) bool) error { - return c.ListVirtualRoutersPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *AppMesh) ListVirtualNodesPages(input *ListVirtualNodesInput, fn func(*ListVirtualNodesOutput, bool) bool) error { + return c.ListVirtualNodesPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListVirtualRoutersPagesWithContext same as ListVirtualRoutersPages except +// ListVirtualNodesPagesWithContext same as ListVirtualNodesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *AppMesh) ListVirtualRoutersPagesWithContext(ctx aws.Context, input *ListVirtualRoutersInput, fn func(*ListVirtualRoutersOutput, bool) bool, opts ...request.Option) error { +func (c *AppMesh) ListVirtualNodesPagesWithContext(ctx aws.Context, input *ListVirtualNodesInput, fn func(*ListVirtualNodesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListVirtualRoutersInput + var inCpy *ListVirtualNodesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListVirtualRoutersRequest(inCpy) + req, _ := c.ListVirtualNodesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -2363,7 +3146,7 @@ func (c *AppMesh) ListVirtualRoutersPagesWithContext(ctx aws.Context, input *Lis } for p.Next() { - if !fn(p.Page().(*ListVirtualRoutersOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListVirtualNodesOutput), !p.HasNextPage()) { break } } @@ -2371,25 +3154,180 @@ func (c *AppMesh) ListVirtualRoutersPagesWithContext(ctx aws.Context, input *Lis return p.Err() } -const opListVirtualServices = "ListVirtualServices" +const opListVirtualRouters = "ListVirtualRouters" -// ListVirtualServicesRequest generates a "aws/request.Request" representing the -// client's request for the ListVirtualServices operation. The "output" return +// ListVirtualRoutersRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualRouters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListVirtualServices for more information on using the ListVirtualServices +// See ListVirtualRouters for more information on using the ListVirtualRouters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListVirtualServicesRequest method. -// req, resp := client.ListVirtualServicesRequest(params) +// // Example sending a request using the ListVirtualRoutersRequest method. +// req, resp := client.ListVirtualRoutersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualRouters +func (c *AppMesh) ListVirtualRoutersRequest(input *ListVirtualRoutersInput) (req *request.Request, output *ListVirtualRoutersOutput) { + op := &request.Operation{ + Name: opListVirtualRouters, + HTTPMethod: "GET", + HTTPPath: "/v20190125/meshes/{meshName}/virtualRouters", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListVirtualRoutersInput{} + } + + output = &ListVirtualRoutersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListVirtualRouters API operation for AWS App Mesh. +// +// Returns a list of existing virtual routers in a service mesh. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation ListVirtualRouters for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/ListVirtualRouters +func (c *AppMesh) ListVirtualRouters(input *ListVirtualRoutersInput) (*ListVirtualRoutersOutput, error) { + req, out := c.ListVirtualRoutersRequest(input) + return out, req.Send() +} + +// ListVirtualRoutersWithContext is the same as ListVirtualRouters with the addition of +// the ability to pass a context and additional request options. +// +// See ListVirtualRouters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) ListVirtualRoutersWithContext(ctx aws.Context, input *ListVirtualRoutersInput, opts ...request.Option) (*ListVirtualRoutersOutput, error) { + req, out := c.ListVirtualRoutersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListVirtualRoutersPages iterates over the pages of a ListVirtualRouters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVirtualRouters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVirtualRouters operation. +// pageNum := 0 +// err := client.ListVirtualRoutersPages(params, +// func(page *appmesh.ListVirtualRoutersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *AppMesh) ListVirtualRoutersPages(input *ListVirtualRoutersInput, fn func(*ListVirtualRoutersOutput, bool) bool) error { + return c.ListVirtualRoutersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListVirtualRoutersPagesWithContext same as ListVirtualRoutersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) ListVirtualRoutersPagesWithContext(ctx aws.Context, input *ListVirtualRoutersInput, fn func(*ListVirtualRoutersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListVirtualRoutersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListVirtualRoutersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListVirtualRoutersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListVirtualServices = "ListVirtualServices" + +// ListVirtualServicesRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualServices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListVirtualServices for more information on using the ListVirtualServices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListVirtualServicesRequest method. +// req, resp := client.ListVirtualServicesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled @@ -2730,6 +3668,114 @@ func (c *AppMesh) UntagResourceWithContext(ctx aws.Context, input *UntagResource return out, req.Send() } +const opUpdateGatewayRoute = "UpdateGatewayRoute" + +// UpdateGatewayRouteRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGatewayRoute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGatewayRoute for more information on using the UpdateGatewayRoute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGatewayRouteRequest method. +// req, resp := client.UpdateGatewayRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateGatewayRoute +func (c *AppMesh) UpdateGatewayRouteRequest(input *UpdateGatewayRouteInput) (req *request.Request, output *UpdateGatewayRouteOutput) { + op := &request.Operation{ + Name: opUpdateGatewayRoute, + HTTPMethod: "PUT", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateway/{virtualGatewayName}/gatewayRoutes/{gatewayRouteName}", + } + + if input == nil { + input = &UpdateGatewayRouteInput{} + } + + output = &UpdateGatewayRouteOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGatewayRoute API operation for AWS App Mesh. +// +// Updates an existing gateway route that is associated to a specified virtual +// gateway in a service mesh. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation UpdateGatewayRoute for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ConflictException +// The request contains a client token that was used for a previous update resource +// call with different specifications. Try the request again with a new client +// token. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * LimitExceededException +// You have exceeded a service limit for your account. For more information, +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) +// in the AWS App Mesh User Guide. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateGatewayRoute +func (c *AppMesh) UpdateGatewayRoute(input *UpdateGatewayRouteInput) (*UpdateGatewayRouteOutput, error) { + req, out := c.UpdateGatewayRouteRequest(input) + return out, req.Send() +} + +// UpdateGatewayRouteWithContext is the same as UpdateGatewayRoute with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGatewayRoute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) UpdateGatewayRouteWithContext(ctx aws.Context, input *UpdateGatewayRouteInput, opts ...request.Option) (*UpdateGatewayRouteOutput, error) { + req, out := c.UpdateGatewayRouteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateMesh = "UpdateMesh" // UpdateMeshRequest generates a "aws/request.Request" representing the @@ -2903,7 +3949,7 @@ func (c *AppMesh) UpdateRouteRequest(input *UpdateRouteInput) (req *request.Requ // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -2939,58 +3985,58 @@ func (c *AppMesh) UpdateRouteWithContext(ctx aws.Context, input *UpdateRouteInpu return out, req.Send() } -const opUpdateVirtualNode = "UpdateVirtualNode" +const opUpdateVirtualGateway = "UpdateVirtualGateway" -// UpdateVirtualNodeRequest generates a "aws/request.Request" representing the -// client's request for the UpdateVirtualNode operation. The "output" return +// UpdateVirtualGatewayRequest generates a "aws/request.Request" representing the +// client's request for the UpdateVirtualGateway operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateVirtualNode for more information on using the UpdateVirtualNode +// See UpdateVirtualGateway for more information on using the UpdateVirtualGateway // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateVirtualNodeRequest method. -// req, resp := client.UpdateVirtualNodeRequest(params) +// // Example sending a request using the UpdateVirtualGatewayRequest method. +// req, resp := client.UpdateVirtualGatewayRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateVirtualNode -func (c *AppMesh) UpdateVirtualNodeRequest(input *UpdateVirtualNodeInput) (req *request.Request, output *UpdateVirtualNodeOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateVirtualGateway +func (c *AppMesh) UpdateVirtualGatewayRequest(input *UpdateVirtualGatewayInput) (req *request.Request, output *UpdateVirtualGatewayOutput) { op := &request.Operation{ - Name: opUpdateVirtualNode, + Name: opUpdateVirtualGateway, HTTPMethod: "PUT", - HTTPPath: "/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", + HTTPPath: "/v20190125/meshes/{meshName}/virtualGateways/{virtualGatewayName}", } if input == nil { - input = &UpdateVirtualNodeInput{} + input = &UpdateVirtualGatewayInput{} } - output = &UpdateVirtualNodeOutput{} + output = &UpdateVirtualGatewayOutput{} req = c.newRequest(op, input, output) return } -// UpdateVirtualNode API operation for AWS App Mesh. +// UpdateVirtualGateway API operation for AWS App Mesh. // -// Updates an existing virtual node in a specified service mesh. +// Updates an existing virtual gateway in a specified service mesh. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS App Mesh's -// API operation UpdateVirtualNode for usage and error information. +// API operation UpdateVirtualGateway for usage and error information. // // Returned Error Types: // * BadRequestException @@ -3010,7 +4056,7 @@ func (c *AppMesh) UpdateVirtualNodeRequest(input *UpdateVirtualNodeInput) (req * // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -3024,8 +4070,115 @@ func (c *AppMesh) UpdateVirtualNodeRequest(input *UpdateVirtualNodeInput) (req * // for your account. For best results, use an increasing or variable sleep interval // between requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateVirtualNode -func (c *AppMesh) UpdateVirtualNode(input *UpdateVirtualNodeInput) (*UpdateVirtualNodeOutput, error) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateVirtualGateway +func (c *AppMesh) UpdateVirtualGateway(input *UpdateVirtualGatewayInput) (*UpdateVirtualGatewayOutput, error) { + req, out := c.UpdateVirtualGatewayRequest(input) + return out, req.Send() +} + +// UpdateVirtualGatewayWithContext is the same as UpdateVirtualGateway with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateVirtualGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AppMesh) UpdateVirtualGatewayWithContext(ctx aws.Context, input *UpdateVirtualGatewayInput, opts ...request.Option) (*UpdateVirtualGatewayOutput, error) { + req, out := c.UpdateVirtualGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateVirtualNode = "UpdateVirtualNode" + +// UpdateVirtualNodeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateVirtualNode operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateVirtualNode for more information on using the UpdateVirtualNode +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateVirtualNodeRequest method. +// req, resp := client.UpdateVirtualNodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateVirtualNode +func (c *AppMesh) UpdateVirtualNodeRequest(input *UpdateVirtualNodeInput) (req *request.Request, output *UpdateVirtualNodeOutput) { + op := &request.Operation{ + Name: opUpdateVirtualNode, + HTTPMethod: "PUT", + HTTPPath: "/v20190125/meshes/{meshName}/virtualNodes/{virtualNodeName}", + } + + if input == nil { + input = &UpdateVirtualNodeInput{} + } + + output = &UpdateVirtualNodeOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateVirtualNode API operation for AWS App Mesh. +// +// Updates an existing virtual node in a specified service mesh. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS App Mesh's +// API operation UpdateVirtualNode for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request syntax was malformed. Check your request syntax and try again. +// +// * ConflictException +// The request contains a client token that was used for a previous update resource +// call with different specifications. Try the request again with a new client +// token. +// +// * ForbiddenException +// You don't have permissions to perform this action. +// +// * InternalServerErrorException +// The request processing has failed because of an unknown error, exception, +// or failure. +// +// * LimitExceededException +// You have exceeded a service limit for your account. For more information, +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) +// in the AWS App Mesh User Guide. +// +// * NotFoundException +// The specified resource doesn't exist. Check your request syntax and try again. +// +// * ServiceUnavailableException +// The request has failed due to a temporary failure of the service. +// +// * TooManyRequestsException +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/appmesh-2019-01-25/UpdateVirtualNode +func (c *AppMesh) UpdateVirtualNode(input *UpdateVirtualNodeInput) (*UpdateVirtualNodeOutput, error) { req, out := c.UpdateVirtualNodeRequest(input) return out, req.Send() } @@ -3117,7 +4270,7 @@ func (c *AppMesh) UpdateVirtualRouterRequest(input *UpdateVirtualRouterInput) (r // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -3224,7 +4377,7 @@ func (c *AppMesh) UpdateVirtualServiceRequest(input *UpdateVirtualServiceInput) // // * LimitExceededException // You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. // // * NotFoundException @@ -3510,8 +4663,8 @@ func (s *BackendDefaults) SetClientPolicy(v *ClientPolicy) *BackendDefaults { // The request syntax was malformed. Check your request syntax and try again. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3528,17 +4681,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3546,22 +4699,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // An object that represents a client policy. @@ -3667,8 +4820,8 @@ func (s *ClientPolicyTls) SetValidation(v *TlsValidationContext) *ClientPolicyTl // call with different specifications. Try the request again with a new client // token. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3685,17 +4838,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3703,22 +4856,173 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateGatewayRouteInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // GatewayRouteName is a required field + GatewayRouteName *string `locationName:"gatewayRouteName" min:"1" type:"string" required:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // An object that represents a gateway route specification. Specify one gateway + // route type. + // + // Spec is a required field + Spec *GatewayRouteSpec `locationName:"spec" type:"structure" required:"true"` + + Tags []*TagRef `locationName:"tags" type:"list"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGatewayRouteInput"} + if s.GatewayRouteName == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayRouteName")) + } + if s.GatewayRouteName != nil && len(*s.GatewayRouteName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GatewayRouteName", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateGatewayRouteInput) SetClientToken(v string) *CreateGatewayRouteInput { + s.ClientToken = &v + return s +} + +// SetGatewayRouteName sets the GatewayRouteName field's value. +func (s *CreateGatewayRouteInput) SetGatewayRouteName(v string) *CreateGatewayRouteInput { + s.GatewayRouteName = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *CreateGatewayRouteInput) SetMeshName(v string) *CreateGatewayRouteInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *CreateGatewayRouteInput) SetMeshOwner(v string) *CreateGatewayRouteInput { + s.MeshOwner = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *CreateGatewayRouteInput) SetSpec(v *GatewayRouteSpec) *CreateGatewayRouteInput { + s.Spec = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateGatewayRouteInput) SetTags(v []*TagRef) *CreateGatewayRouteInput { + s.Tags = v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *CreateGatewayRouteInput) SetVirtualGatewayName(v string) *CreateGatewayRouteInput { + s.VirtualGatewayName = &v + return s +} + +type CreateGatewayRouteOutput struct { + _ struct{} `type:"structure" payload:"GatewayRoute"` + + // An object that represents a gateway route returned by a describe operation. + // + // GatewayRoute is a required field + GatewayRoute *GatewayRouteData `locationName:"gatewayRoute" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetGatewayRoute sets the GatewayRoute field's value. +func (s *CreateGatewayRouteOutput) SetGatewayRoute(v *GatewayRouteData) *CreateGatewayRouteOutput { + s.GatewayRoute = v + return s } type CreateMeshInput struct { @@ -3975,7 +5279,7 @@ func (s *CreateRouteOutput) SetRoute(v *RouteData) *CreateRouteOutput { return s } -type CreateVirtualNodeInput struct { +type CreateVirtualGatewayInput struct { _ struct{} `type:"structure"` ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` @@ -3985,30 +5289,30 @@ type CreateVirtualNodeInput struct { MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - // An object that represents the specification of a virtual node. + // An object that represents the specification of a service mesh resource. // // Spec is a required field - Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` + Spec *VirtualGatewaySpec `locationName:"spec" type:"structure" required:"true"` Tags []*TagRef `locationName:"tags" type:"list"` - // VirtualNodeName is a required field - VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` + // VirtualGatewayName is a required field + VirtualGatewayName *string `locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateVirtualNodeInput) String() string { +func (s CreateVirtualGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateVirtualNodeInput) GoString() string { +func (s CreateVirtualGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateVirtualNodeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateVirtualNodeInput"} +func (s *CreateVirtualGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVirtualGatewayInput"} if s.MeshName == nil { invalidParams.Add(request.NewErrParamRequired("MeshName")) } @@ -4021,11 +5325,11 @@ func (s *CreateVirtualNodeInput) Validate() error { if s.Spec == nil { invalidParams.Add(request.NewErrParamRequired("Spec")) } - if s.VirtualNodeName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualNodeName")) + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) } - if s.VirtualNodeName != nil && len(*s.VirtualNodeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualNodeName", 1)) + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) } if s.Spec != nil { if err := s.Spec.Validate(); err != nil { @@ -4050,67 +5354,67 @@ func (s *CreateVirtualNodeInput) Validate() error { } // SetClientToken sets the ClientToken field's value. -func (s *CreateVirtualNodeInput) SetClientToken(v string) *CreateVirtualNodeInput { +func (s *CreateVirtualGatewayInput) SetClientToken(v string) *CreateVirtualGatewayInput { s.ClientToken = &v return s } // SetMeshName sets the MeshName field's value. -func (s *CreateVirtualNodeInput) SetMeshName(v string) *CreateVirtualNodeInput { +func (s *CreateVirtualGatewayInput) SetMeshName(v string) *CreateVirtualGatewayInput { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *CreateVirtualNodeInput) SetMeshOwner(v string) *CreateVirtualNodeInput { +func (s *CreateVirtualGatewayInput) SetMeshOwner(v string) *CreateVirtualGatewayInput { s.MeshOwner = &v return s } // SetSpec sets the Spec field's value. -func (s *CreateVirtualNodeInput) SetSpec(v *VirtualNodeSpec) *CreateVirtualNodeInput { +func (s *CreateVirtualGatewayInput) SetSpec(v *VirtualGatewaySpec) *CreateVirtualGatewayInput { s.Spec = v return s } // SetTags sets the Tags field's value. -func (s *CreateVirtualNodeInput) SetTags(v []*TagRef) *CreateVirtualNodeInput { +func (s *CreateVirtualGatewayInput) SetTags(v []*TagRef) *CreateVirtualGatewayInput { s.Tags = v return s } -// SetVirtualNodeName sets the VirtualNodeName field's value. -func (s *CreateVirtualNodeInput) SetVirtualNodeName(v string) *CreateVirtualNodeInput { - s.VirtualNodeName = &v +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *CreateVirtualGatewayInput) SetVirtualGatewayName(v string) *CreateVirtualGatewayInput { + s.VirtualGatewayName = &v return s } -type CreateVirtualNodeOutput struct { - _ struct{} `type:"structure" payload:"VirtualNode"` +type CreateVirtualGatewayOutput struct { + _ struct{} `type:"structure" payload:"VirtualGateway"` - // An object that represents a virtual node returned by a describe operation. + // An object that represents a virtual gateway returned by a describe operation. // - // VirtualNode is a required field - VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` + // VirtualGateway is a required field + VirtualGateway *VirtualGatewayData `locationName:"virtualGateway" type:"structure" required:"true"` } // String returns the string representation -func (s CreateVirtualNodeOutput) String() string { +func (s CreateVirtualGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateVirtualNodeOutput) GoString() string { +func (s CreateVirtualGatewayOutput) GoString() string { return s.String() } -// SetVirtualNode sets the VirtualNode field's value. -func (s *CreateVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *CreateVirtualNodeOutput { - s.VirtualNode = v +// SetVirtualGateway sets the VirtualGateway field's value. +func (s *CreateVirtualGatewayOutput) SetVirtualGateway(v *VirtualGatewayData) *CreateVirtualGatewayOutput { + s.VirtualGateway = v return s } -type CreateVirtualRouterInput struct { +type CreateVirtualNodeInput struct { _ struct{} `type:"structure"` ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` @@ -4120,24 +5424,159 @@ type CreateVirtualRouterInput struct { MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - // An object that represents the specification of a virtual router. + // An object that represents the specification of a virtual node. // // Spec is a required field - Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` + Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` Tags []*TagRef `locationName:"tags" type:"list"` - // VirtualRouterName is a required field - VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` + // VirtualNodeName is a required field + VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateVirtualRouterInput) String() string { +func (s CreateVirtualNodeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateVirtualRouterInput) GoString() string { +func (s CreateVirtualNodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVirtualNodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVirtualNodeInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualNodeName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualNodeName")) + } + if s.VirtualNodeName != nil && len(*s.VirtualNodeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualNodeName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateVirtualNodeInput) SetClientToken(v string) *CreateVirtualNodeInput { + s.ClientToken = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *CreateVirtualNodeInput) SetMeshName(v string) *CreateVirtualNodeInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *CreateVirtualNodeInput) SetMeshOwner(v string) *CreateVirtualNodeInput { + s.MeshOwner = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *CreateVirtualNodeInput) SetSpec(v *VirtualNodeSpec) *CreateVirtualNodeInput { + s.Spec = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateVirtualNodeInput) SetTags(v []*TagRef) *CreateVirtualNodeInput { + s.Tags = v + return s +} + +// SetVirtualNodeName sets the VirtualNodeName field's value. +func (s *CreateVirtualNodeInput) SetVirtualNodeName(v string) *CreateVirtualNodeInput { + s.VirtualNodeName = &v + return s +} + +type CreateVirtualNodeOutput struct { + _ struct{} `type:"structure" payload:"VirtualNode"` + + // An object that represents a virtual node returned by a describe operation. + // + // VirtualNode is a required field + VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualNodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualNodeOutput) GoString() string { + return s.String() +} + +// SetVirtualNode sets the VirtualNode field's value. +func (s *CreateVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *CreateVirtualNodeOutput { + s.VirtualNode = v + return s +} + +type CreateVirtualRouterInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // An object that represents the specification of a virtual router. + // + // Spec is a required field + Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` + + Tags []*TagRef `locationName:"tags" type:"list"` + + // VirtualRouterName is a required field + VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualRouterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualRouterInput) GoString() string { return s.String() } @@ -4377,6 +5816,111 @@ func (s *CreateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *C return s } +type DeleteGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // GatewayRouteName is a required field + GatewayRouteName *string `location:"uri" locationName:"gatewayRouteName" min:"1" type:"string" required:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGatewayRouteInput"} + if s.GatewayRouteName == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayRouteName")) + } + if s.GatewayRouteName != nil && len(*s.GatewayRouteName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GatewayRouteName", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGatewayRouteName sets the GatewayRouteName field's value. +func (s *DeleteGatewayRouteInput) SetGatewayRouteName(v string) *DeleteGatewayRouteInput { + s.GatewayRouteName = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *DeleteGatewayRouteInput) SetMeshName(v string) *DeleteGatewayRouteInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *DeleteGatewayRouteInput) SetMeshOwner(v string) *DeleteGatewayRouteInput { + s.MeshOwner = &v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *DeleteGatewayRouteInput) SetVirtualGatewayName(v string) *DeleteGatewayRouteInput { + s.VirtualGatewayName = &v + return s +} + +type DeleteGatewayRouteOutput struct { + _ struct{} `type:"structure" payload:"GatewayRoute"` + + // An object that represents a gateway route returned by a describe operation. + // + // GatewayRoute is a required field + GatewayRoute *GatewayRouteData `locationName:"gatewayRoute" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetGatewayRoute sets the GatewayRoute field's value. +func (s *DeleteGatewayRouteOutput) SetGatewayRoute(v *GatewayRouteData) *DeleteGatewayRouteOutput { + s.GatewayRoute = v + return s +} + type DeleteMeshInput struct { _ struct{} `type:"structure"` @@ -4546,7 +6090,7 @@ func (s *DeleteRouteOutput) SetRoute(v *RouteData) *DeleteRouteOutput { return s } -type DeleteVirtualNodeInput struct { +type DeleteVirtualGatewayInput struct { _ struct{} `type:"structure"` // MeshName is a required field @@ -4554,23 +6098,23 @@ type DeleteVirtualNodeInput struct { MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - // VirtualNodeName is a required field - VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteVirtualNodeInput) String() string { +func (s DeleteVirtualGatewayInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteVirtualNodeInput) GoString() string { +func (s DeleteVirtualGatewayInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteVirtualNodeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualNodeInput"} +func (s *DeleteVirtualGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualGatewayInput"} if s.MeshName == nil { invalidParams.Add(request.NewErrParamRequired("MeshName")) } @@ -4580,11 +6124,11 @@ func (s *DeleteVirtualNodeInput) Validate() error { if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } - if s.VirtualNodeName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualNodeName")) + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) } - if s.VirtualNodeName != nil && len(*s.VirtualNodeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualNodeName", 1)) + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) } if invalidParams.Len() > 0 { @@ -4594,49 +6138,49 @@ func (s *DeleteVirtualNodeInput) Validate() error { } // SetMeshName sets the MeshName field's value. -func (s *DeleteVirtualNodeInput) SetMeshName(v string) *DeleteVirtualNodeInput { +func (s *DeleteVirtualGatewayInput) SetMeshName(v string) *DeleteVirtualGatewayInput { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *DeleteVirtualNodeInput) SetMeshOwner(v string) *DeleteVirtualNodeInput { +func (s *DeleteVirtualGatewayInput) SetMeshOwner(v string) *DeleteVirtualGatewayInput { s.MeshOwner = &v return s } -// SetVirtualNodeName sets the VirtualNodeName field's value. -func (s *DeleteVirtualNodeInput) SetVirtualNodeName(v string) *DeleteVirtualNodeInput { - s.VirtualNodeName = &v +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *DeleteVirtualGatewayInput) SetVirtualGatewayName(v string) *DeleteVirtualGatewayInput { + s.VirtualGatewayName = &v return s } -type DeleteVirtualNodeOutput struct { - _ struct{} `type:"structure" payload:"VirtualNode"` +type DeleteVirtualGatewayOutput struct { + _ struct{} `type:"structure" payload:"VirtualGateway"` - // An object that represents a virtual node returned by a describe operation. + // An object that represents a virtual gateway returned by a describe operation. // - // VirtualNode is a required field - VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` + // VirtualGateway is a required field + VirtualGateway *VirtualGatewayData `locationName:"virtualGateway" type:"structure" required:"true"` } // String returns the string representation -func (s DeleteVirtualNodeOutput) String() string { +func (s DeleteVirtualGatewayOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteVirtualNodeOutput) GoString() string { +func (s DeleteVirtualGatewayOutput) GoString() string { return s.String() } -// SetVirtualNode sets the VirtualNode field's value. -func (s *DeleteVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *DeleteVirtualNodeOutput { - s.VirtualNode = v +// SetVirtualGateway sets the VirtualGateway field's value. +func (s *DeleteVirtualGatewayOutput) SetVirtualGateway(v *VirtualGatewayData) *DeleteVirtualGatewayOutput { + s.VirtualGateway = v return s } -type DeleteVirtualRouterInput struct { +type DeleteVirtualNodeInput struct { _ struct{} `type:"structure"` // MeshName is a required field @@ -4644,23 +6188,23 @@ type DeleteVirtualRouterInput struct { MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - // VirtualRouterName is a required field - VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` + // VirtualNodeName is a required field + VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteVirtualRouterInput) String() string { +func (s DeleteVirtualNodeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteVirtualRouterInput) GoString() string { +func (s DeleteVirtualNodeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteVirtualRouterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualRouterInput"} +func (s *DeleteVirtualNodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualNodeInput"} if s.MeshName == nil { invalidParams.Add(request.NewErrParamRequired("MeshName")) } @@ -4670,11 +6214,11 @@ func (s *DeleteVirtualRouterInput) Validate() error { if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } - if s.VirtualRouterName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) + if s.VirtualNodeName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualNodeName")) } - if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) + if s.VirtualNodeName != nil && len(*s.VirtualNodeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualNodeName", 1)) } if invalidParams.Len() > 0 { @@ -4684,13 +6228,103 @@ func (s *DeleteVirtualRouterInput) Validate() error { } // SetMeshName sets the MeshName field's value. -func (s *DeleteVirtualRouterInput) SetMeshName(v string) *DeleteVirtualRouterInput { +func (s *DeleteVirtualNodeInput) SetMeshName(v string) *DeleteVirtualNodeInput { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *DeleteVirtualRouterInput) SetMeshOwner(v string) *DeleteVirtualRouterInput { +func (s *DeleteVirtualNodeInput) SetMeshOwner(v string) *DeleteVirtualNodeInput { + s.MeshOwner = &v + return s +} + +// SetVirtualNodeName sets the VirtualNodeName field's value. +func (s *DeleteVirtualNodeInput) SetVirtualNodeName(v string) *DeleteVirtualNodeInput { + s.VirtualNodeName = &v + return s +} + +type DeleteVirtualNodeOutput struct { + _ struct{} `type:"structure" payload:"VirtualNode"` + + // An object that represents a virtual node returned by a describe operation. + // + // VirtualNode is a required field + VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualNodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualNodeOutput) GoString() string { + return s.String() +} + +// SetVirtualNode sets the VirtualNode field's value. +func (s *DeleteVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *DeleteVirtualNodeOutput { + s.VirtualNode = v + return s +} + +type DeleteVirtualRouterInput struct { + _ struct{} `type:"structure"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // VirtualRouterName is a required field + VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualRouterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualRouterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVirtualRouterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualRouterInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.VirtualRouterName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) + } + if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMeshName sets the MeshName field's value. +func (s *DeleteVirtualRouterInput) SetMeshName(v string) *DeleteVirtualRouterInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *DeleteVirtualRouterInput) SetMeshOwner(v string) *DeleteVirtualRouterInput { s.MeshOwner = &v return s } @@ -4816,6 +6450,111 @@ func (s *DeleteVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *D return s } +type DescribeGatewayRouteInput struct { + _ struct{} `type:"structure"` + + // GatewayRouteName is a required field + GatewayRouteName *string `location:"uri" locationName:"gatewayRouteName" min:"1" type:"string" required:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGatewayRouteInput"} + if s.GatewayRouteName == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayRouteName")) + } + if s.GatewayRouteName != nil && len(*s.GatewayRouteName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GatewayRouteName", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGatewayRouteName sets the GatewayRouteName field's value. +func (s *DescribeGatewayRouteInput) SetGatewayRouteName(v string) *DescribeGatewayRouteInput { + s.GatewayRouteName = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *DescribeGatewayRouteInput) SetMeshName(v string) *DescribeGatewayRouteInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *DescribeGatewayRouteInput) SetMeshOwner(v string) *DescribeGatewayRouteInput { + s.MeshOwner = &v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *DescribeGatewayRouteInput) SetVirtualGatewayName(v string) *DescribeGatewayRouteInput { + s.VirtualGatewayName = &v + return s +} + +type DescribeGatewayRouteOutput struct { + _ struct{} `type:"structure" payload:"GatewayRoute"` + + // An object that represents a gateway route returned by a describe operation. + // + // GatewayRoute is a required field + GatewayRoute *GatewayRouteData `locationName:"gatewayRoute" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetGatewayRoute sets the GatewayRoute field's value. +func (s *DescribeGatewayRouteOutput) SetGatewayRoute(v *GatewayRouteData) *DescribeGatewayRouteOutput { + s.GatewayRoute = v + return s +} + type DescribeMeshInput struct { _ struct{} `type:"structure"` @@ -4996,6 +6735,96 @@ func (s *DescribeRouteOutput) SetRoute(v *RouteData) *DescribeRouteOutput { return s } +type DescribeVirtualGatewayInput struct { + _ struct{} `type:"structure"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeVirtualGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeVirtualGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeVirtualGatewayInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMeshName sets the MeshName field's value. +func (s *DescribeVirtualGatewayInput) SetMeshName(v string) *DescribeVirtualGatewayInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *DescribeVirtualGatewayInput) SetMeshOwner(v string) *DescribeVirtualGatewayInput { + s.MeshOwner = &v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *DescribeVirtualGatewayInput) SetVirtualGatewayName(v string) *DescribeVirtualGatewayInput { + s.VirtualGatewayName = &v + return s +} + +type DescribeVirtualGatewayOutput struct { + _ struct{} `type:"structure" payload:"VirtualGateway"` + + // An object that represents a virtual gateway returned by a describe operation. + // + // VirtualGateway is a required field + VirtualGateway *VirtualGatewayData `locationName:"virtualGateway" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeVirtualGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVirtualGatewayOutput) GoString() string { + return s.String() +} + +// SetVirtualGateway sets the VirtualGateway field's value. +func (s *DescribeVirtualGatewayOutput) SetVirtualGateway(v *VirtualGatewayData) *DescribeVirtualGatewayOutput { + s.VirtualGateway = v + return s +} + type DescribeVirtualNodeInput struct { _ struct{} `type:"structure"` @@ -5414,8 +7243,8 @@ func (s *FileAccessLog) SetPath(v string) *FileAccessLog { // You don't have permissions to perform this action. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5432,17 +7261,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5450,162 +7279,237 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } -// An object that represents a retry policy. Specify at least one value for -// at least one of the types of RetryEvents, a value for maxRetries, and a value -// for perRetryTimeout. -type GrpcRetryPolicy struct { +// An object that represents a gateway route returned by a describe operation. +type GatewayRouteData struct { _ struct{} `type:"structure"` - GrpcRetryEvents []*string `locationName:"grpcRetryEvents" min:"1" type:"list"` + // GatewayRouteName is a required field + GatewayRouteName *string `locationName:"gatewayRouteName" min:"1" type:"string" required:"true"` - HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // MaxRetries is a required field - MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` + // An object that represents metadata for a resource. + // + // Metadata is a required field + Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object that represents a duration of time. + // An object that represents a gateway route specification. Specify one gateway + // route type. // - // PerRetryTimeout is a required field - PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` + // Spec is a required field + Spec *GatewayRouteSpec `locationName:"spec" type:"structure" required:"true"` - TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` + // An object that represents the current status of a gateway route. + // + // Status is a required field + Status *GatewayRouteStatus `locationName:"status" type:"structure" required:"true"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GrpcRetryPolicy) String() string { +func (s GatewayRouteData) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrpcRetryPolicy) GoString() string { +func (s GatewayRouteData) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GrpcRetryPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrpcRetryPolicy"} - if s.GrpcRetryEvents != nil && len(s.GrpcRetryEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrpcRetryEvents", 1)) - } - if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) - } - if s.MaxRetries == nil { - invalidParams.Add(request.NewErrParamRequired("MaxRetries")) - } - if s.PerRetryTimeout == nil { - invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) - } - if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) - } +// SetGatewayRouteName sets the GatewayRouteName field's value. +func (s *GatewayRouteData) SetGatewayRouteName(v string) *GatewayRouteData { + s.GatewayRouteName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMeshName sets the MeshName field's value. +func (s *GatewayRouteData) SetMeshName(v string) *GatewayRouteData { + s.MeshName = &v + return s } -// SetGrpcRetryEvents sets the GrpcRetryEvents field's value. -func (s *GrpcRetryPolicy) SetGrpcRetryEvents(v []*string) *GrpcRetryPolicy { - s.GrpcRetryEvents = v +// SetMetadata sets the Metadata field's value. +func (s *GatewayRouteData) SetMetadata(v *ResourceMetadata) *GatewayRouteData { + s.Metadata = v return s } -// SetHttpRetryEvents sets the HttpRetryEvents field's value. -func (s *GrpcRetryPolicy) SetHttpRetryEvents(v []*string) *GrpcRetryPolicy { - s.HttpRetryEvents = v +// SetSpec sets the Spec field's value. +func (s *GatewayRouteData) SetSpec(v *GatewayRouteSpec) *GatewayRouteData { + s.Spec = v return s } -// SetMaxRetries sets the MaxRetries field's value. -func (s *GrpcRetryPolicy) SetMaxRetries(v int64) *GrpcRetryPolicy { - s.MaxRetries = &v +// SetStatus sets the Status field's value. +func (s *GatewayRouteData) SetStatus(v *GatewayRouteStatus) *GatewayRouteData { + s.Status = v return s } -// SetPerRetryTimeout sets the PerRetryTimeout field's value. -func (s *GrpcRetryPolicy) SetPerRetryTimeout(v *Duration) *GrpcRetryPolicy { - s.PerRetryTimeout = v +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *GatewayRouteData) SetVirtualGatewayName(v string) *GatewayRouteData { + s.VirtualGatewayName = &v return s } -// SetTcpRetryEvents sets the TcpRetryEvents field's value. -func (s *GrpcRetryPolicy) SetTcpRetryEvents(v []*string) *GrpcRetryPolicy { - s.TcpRetryEvents = v +// An object that represents a gateway route returned by a list operation. +type GatewayRouteRef struct { + _ struct{} `type:"structure"` + + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // GatewayRouteName is a required field + GatewayRouteName *string `locationName:"gatewayRouteName" min:"1" type:"string" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + + // MeshOwner is a required field + MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + + // ResourceOwner is a required field + ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GatewayRouteRef) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GatewayRouteRef) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *GatewayRouteRef) SetArn(v string) *GatewayRouteRef { + s.Arn = &v return s } -// An object that represents a gRPC route type. -type GrpcRoute struct { +// SetCreatedAt sets the CreatedAt field's value. +func (s *GatewayRouteRef) SetCreatedAt(v time.Time) *GatewayRouteRef { + s.CreatedAt = &v + return s +} + +// SetGatewayRouteName sets the GatewayRouteName field's value. +func (s *GatewayRouteRef) SetGatewayRouteName(v string) *GatewayRouteRef { + s.GatewayRouteName = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *GatewayRouteRef) SetLastUpdatedAt(v time.Time) *GatewayRouteRef { + s.LastUpdatedAt = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *GatewayRouteRef) SetMeshName(v string) *GatewayRouteRef { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *GatewayRouteRef) SetMeshOwner(v string) *GatewayRouteRef { + s.MeshOwner = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *GatewayRouteRef) SetResourceOwner(v string) *GatewayRouteRef { + s.ResourceOwner = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *GatewayRouteRef) SetVersion(v int64) *GatewayRouteRef { + s.Version = &v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *GatewayRouteRef) SetVirtualGatewayName(v string) *GatewayRouteRef { + s.VirtualGatewayName = &v + return s +} + +// An object that represents a gateway route specification. Specify one gateway +// route type. +type GatewayRouteSpec struct { _ struct{} `type:"structure"` - // An object that represents the action to take if a match is determined. - // - // Action is a required field - Action *GrpcRouteAction `locationName:"action" type:"structure" required:"true"` + // An object that represents a gRPC gateway route. + GrpcRoute *GrpcGatewayRoute `locationName:"grpcRoute" type:"structure"` - // An object that represents the criteria for determining a request match. - // - // Match is a required field - Match *GrpcRouteMatch `locationName:"match" type:"structure" required:"true"` + // An object that represents an HTTP gateway route. + Http2Route *HttpGatewayRoute `locationName:"http2Route" type:"structure"` - // An object that represents a retry policy. Specify at least one value for - // at least one of the types of RetryEvents, a value for maxRetries, and a value - // for perRetryTimeout. - RetryPolicy *GrpcRetryPolicy `locationName:"retryPolicy" type:"structure"` + // An object that represents an HTTP gateway route. + HttpRoute *HttpGatewayRoute `locationName:"httpRoute" type:"structure"` } // String returns the string representation -func (s GrpcRoute) String() string { +func (s GatewayRouteSpec) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrpcRoute) GoString() string { +func (s GatewayRouteSpec) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GrpcRoute) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrpcRoute"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) - } - if s.Match == nil { - invalidParams.Add(request.NewErrParamRequired("Match")) - } - if s.Action != nil { - if err := s.Action.Validate(); err != nil { - invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) +func (s *GatewayRouteSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GatewayRouteSpec"} + if s.GrpcRoute != nil { + if err := s.GrpcRoute.Validate(); err != nil { + invalidParams.AddNested("GrpcRoute", err.(request.ErrInvalidParams)) } } - if s.Match != nil { - if err := s.Match.Validate(); err != nil { - invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + if s.Http2Route != nil { + if err := s.Http2Route.Validate(); err != nil { + invalidParams.AddNested("Http2Route", err.(request.ErrInvalidParams)) } } - if s.RetryPolicy != nil { - if err := s.RetryPolicy.Validate(); err != nil { - invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + if s.HttpRoute != nil { + if err := s.HttpRoute.Validate(); err != nil { + invalidParams.AddNested("HttpRoute", err.(request.ErrInvalidParams)) } } @@ -5615,59 +7519,77 @@ func (s *GrpcRoute) Validate() error { return nil } -// SetAction sets the Action field's value. -func (s *GrpcRoute) SetAction(v *GrpcRouteAction) *GrpcRoute { - s.Action = v +// SetGrpcRoute sets the GrpcRoute field's value. +func (s *GatewayRouteSpec) SetGrpcRoute(v *GrpcGatewayRoute) *GatewayRouteSpec { + s.GrpcRoute = v return s } -// SetMatch sets the Match field's value. -func (s *GrpcRoute) SetMatch(v *GrpcRouteMatch) *GrpcRoute { - s.Match = v +// SetHttp2Route sets the Http2Route field's value. +func (s *GatewayRouteSpec) SetHttp2Route(v *HttpGatewayRoute) *GatewayRouteSpec { + s.Http2Route = v return s } -// SetRetryPolicy sets the RetryPolicy field's value. -func (s *GrpcRoute) SetRetryPolicy(v *GrpcRetryPolicy) *GrpcRoute { - s.RetryPolicy = v +// SetHttpRoute sets the HttpRoute field's value. +func (s *GatewayRouteSpec) SetHttpRoute(v *HttpGatewayRoute) *GatewayRouteSpec { + s.HttpRoute = v return s } -// An object that represents the action to take if a match is determined. -type GrpcRouteAction struct { +// An object that represents the current status of a gateway route. +type GatewayRouteStatus struct { _ struct{} `type:"structure"` - // WeightedTargets is a required field - WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"GatewayRouteStatusCode"` } // String returns the string representation -func (s GrpcRouteAction) String() string { +func (s GatewayRouteStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrpcRouteAction) GoString() string { +func (s GatewayRouteStatus) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GatewayRouteStatus) SetStatus(v string) *GatewayRouteStatus { + s.Status = &v + return s +} + +// An object that represents a gateway route target. +type GatewayRouteTarget struct { + _ struct{} `type:"structure"` + + // An object that represents the virtual service that traffic is routed to. + // + // VirtualService is a required field + VirtualService *GatewayRouteVirtualService `locationName:"virtualService" type:"structure" required:"true"` +} + +// String returns the string representation +func (s GatewayRouteTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GatewayRouteTarget) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GrpcRouteAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrpcRouteAction"} - if s.WeightedTargets == nil { - invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) +func (s *GatewayRouteTarget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GatewayRouteTarget"} + if s.VirtualService == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualService")) } - if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) - } - if s.WeightedTargets != nil { - for i, v := range s.WeightedTargets { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) - } + if s.VirtualService != nil { + if err := s.VirtualService.Validate(); err != nil { + invalidParams.AddNested("VirtualService", err.(request.ErrInvalidParams)) } } @@ -5677,51 +7599,38 @@ func (s *GrpcRouteAction) Validate() error { return nil } -// SetWeightedTargets sets the WeightedTargets field's value. -func (s *GrpcRouteAction) SetWeightedTargets(v []*WeightedTarget) *GrpcRouteAction { - s.WeightedTargets = v +// SetVirtualService sets the VirtualService field's value. +func (s *GatewayRouteTarget) SetVirtualService(v *GatewayRouteVirtualService) *GatewayRouteTarget { + s.VirtualService = v return s } -// An object that represents the criteria for determining a request match. -type GrpcRouteMatch struct { +// An object that represents the virtual service that traffic is routed to. +type GatewayRouteVirtualService struct { _ struct{} `type:"structure"` - Metadata []*GrpcRouteMetadata `locationName:"metadata" min:"1" type:"list"` - - MethodName *string `locationName:"methodName" min:"1" type:"string"` - - ServiceName *string `locationName:"serviceName" type:"string"` + // VirtualServiceName is a required field + VirtualServiceName *string `locationName:"virtualServiceName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GrpcRouteMatch) String() string { +func (s GatewayRouteVirtualService) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrpcRouteMatch) GoString() string { +func (s GatewayRouteVirtualService) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GrpcRouteMatch) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMatch"} - if s.Metadata != nil && len(s.Metadata) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Metadata", 1)) - } - if s.MethodName != nil && len(*s.MethodName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MethodName", 1)) +func (s *GatewayRouteVirtualService) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GatewayRouteVirtualService"} + if s.VirtualServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualServiceName")) } - if s.Metadata != nil { - for i, v := range s.Metadata { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metadata", i), err.(request.ErrInvalidParams)) - } - } + if s.VirtualServiceName != nil && len(*s.VirtualServiceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualServiceName", 1)) } if invalidParams.Len() > 0 { @@ -5730,59 +7639,49 @@ func (s *GrpcRouteMatch) Validate() error { return nil } -// SetMetadata sets the Metadata field's value. -func (s *GrpcRouteMatch) SetMetadata(v []*GrpcRouteMetadata) *GrpcRouteMatch { - s.Metadata = v +// SetVirtualServiceName sets the VirtualServiceName field's value. +func (s *GatewayRouteVirtualService) SetVirtualServiceName(v string) *GatewayRouteVirtualService { + s.VirtualServiceName = &v return s } -// SetMethodName sets the MethodName field's value. -func (s *GrpcRouteMatch) SetMethodName(v string) *GrpcRouteMatch { - s.MethodName = &v - return s -} - -// SetServiceName sets the ServiceName field's value. -func (s *GrpcRouteMatch) SetServiceName(v string) *GrpcRouteMatch { - s.ServiceName = &v - return s -} - -// An object that represents the match metadata for the route. -type GrpcRouteMetadata struct { +// An object that represents a gRPC gateway route. +type GrpcGatewayRoute struct { _ struct{} `type:"structure"` - Invert *bool `locationName:"invert" type:"boolean"` - - // An object that represents the match method. Specify one of the match values. - Match *GrpcRouteMetadataMatchMethod `locationName:"match" type:"structure"` + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *GrpcGatewayRouteAction `locationName:"action" type:"structure" required:"true"` - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // An object that represents the criteria for determining a request match. + // + // Match is a required field + Match *GrpcGatewayRouteMatch `locationName:"match" type:"structure" required:"true"` } // String returns the string representation -func (s GrpcRouteMetadata) String() string { +func (s GrpcGatewayRoute) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrpcRouteMetadata) GoString() string { +func (s GrpcGatewayRoute) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GrpcRouteMetadata) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadata"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *GrpcGatewayRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcGatewayRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) } - if s.Match != nil { - if err := s.Match.Validate(); err != nil { - invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) } } @@ -5792,71 +7691,47 @@ func (s *GrpcRouteMetadata) Validate() error { return nil } -// SetInvert sets the Invert field's value. -func (s *GrpcRouteMetadata) SetInvert(v bool) *GrpcRouteMetadata { - s.Invert = &v +// SetAction sets the Action field's value. +func (s *GrpcGatewayRoute) SetAction(v *GrpcGatewayRouteAction) *GrpcGatewayRoute { + s.Action = v return s } // SetMatch sets the Match field's value. -func (s *GrpcRouteMetadata) SetMatch(v *GrpcRouteMetadataMatchMethod) *GrpcRouteMetadata { +func (s *GrpcGatewayRoute) SetMatch(v *GrpcGatewayRouteMatch) *GrpcGatewayRoute { s.Match = v return s } -// SetName sets the Name field's value. -func (s *GrpcRouteMetadata) SetName(v string) *GrpcRouteMetadata { - s.Name = &v - return s -} - -// An object that represents the match method. Specify one of the match values. -type GrpcRouteMetadataMatchMethod struct { +// An object that represents the action to take if a match is determined. +type GrpcGatewayRouteAction struct { _ struct{} `type:"structure"` - Exact *string `locationName:"exact" min:"1" type:"string"` - - Prefix *string `locationName:"prefix" min:"1" type:"string"` - - // An object that represents the range of values to match on. The first character - // of the range is included in the range, though the last character is not. - // For example, if the range specified were 1-100, only values 1-99 would be - // matched. - Range *MatchRange `locationName:"range" type:"structure"` - - Regex *string `locationName:"regex" min:"1" type:"string"` - - Suffix *string `locationName:"suffix" min:"1" type:"string"` + // An object that represents a gateway route target. + // + // Target is a required field + Target *GatewayRouteTarget `locationName:"target" type:"structure" required:"true"` } // String returns the string representation -func (s GrpcRouteMetadataMatchMethod) String() string { +func (s GrpcGatewayRouteAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrpcRouteMetadataMatchMethod) GoString() string { +func (s GrpcGatewayRouteAction) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GrpcRouteMetadataMatchMethod) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadataMatchMethod"} - if s.Exact != nil && len(*s.Exact) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) - } - if s.Prefix != nil && len(*s.Prefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) - } - if s.Regex != nil && len(*s.Regex) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) - } - if s.Suffix != nil && len(*s.Suffix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) - } - if s.Range != nil { - if err := s.Range.Validate(); err != nil { - invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) +func (s *GrpcGatewayRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcGatewayRouteAction"} + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Target != nil { + if err := s.Target.Validate(); err != nil { + invalidParams.AddNested("Target", err.(request.ErrInvalidParams)) } } @@ -5866,85 +7741,83 @@ func (s *GrpcRouteMetadataMatchMethod) Validate() error { return nil } -// SetExact sets the Exact field's value. -func (s *GrpcRouteMetadataMatchMethod) SetExact(v string) *GrpcRouteMetadataMatchMethod { - s.Exact = &v +// SetTarget sets the Target field's value. +func (s *GrpcGatewayRouteAction) SetTarget(v *GatewayRouteTarget) *GrpcGatewayRouteAction { + s.Target = v return s } -// SetPrefix sets the Prefix field's value. -func (s *GrpcRouteMetadataMatchMethod) SetPrefix(v string) *GrpcRouteMetadataMatchMethod { - s.Prefix = &v - return s +// An object that represents the criteria for determining a request match. +type GrpcGatewayRouteMatch struct { + _ struct{} `type:"structure"` + + ServiceName *string `locationName:"serviceName" type:"string"` } -// SetRange sets the Range field's value. -func (s *GrpcRouteMetadataMatchMethod) SetRange(v *MatchRange) *GrpcRouteMetadataMatchMethod { - s.Range = v - return s +// String returns the string representation +func (s GrpcGatewayRouteMatch) String() string { + return awsutil.Prettify(s) } -// SetRegex sets the Regex field's value. -func (s *GrpcRouteMetadataMatchMethod) SetRegex(v string) *GrpcRouteMetadataMatchMethod { - s.Regex = &v - return s +// GoString returns the string representation +func (s GrpcGatewayRouteMatch) GoString() string { + return s.String() } -// SetSuffix sets the Suffix field's value. -func (s *GrpcRouteMetadataMatchMethod) SetSuffix(v string) *GrpcRouteMetadataMatchMethod { - s.Suffix = &v +// SetServiceName sets the ServiceName field's value. +func (s *GrpcGatewayRouteMatch) SetServiceName(v string) *GrpcGatewayRouteMatch { + s.ServiceName = &v return s } -// An object that represents the method and value to match with the header value -// sent in a request. Specify one match method. -type HeaderMatchMethod struct { +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. +type GrpcRetryPolicy struct { _ struct{} `type:"structure"` - Exact *string `locationName:"exact" min:"1" type:"string"` + GrpcRetryEvents []*string `locationName:"grpcRetryEvents" min:"1" type:"list"` - Prefix *string `locationName:"prefix" min:"1" type:"string"` + HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` - // An object that represents the range of values to match on. The first character - // of the range is included in the range, though the last character is not. - // For example, if the range specified were 1-100, only values 1-99 would be - // matched. - Range *MatchRange `locationName:"range" type:"structure"` + // MaxRetries is a required field + MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` - Regex *string `locationName:"regex" min:"1" type:"string"` + // An object that represents a duration of time. + // + // PerRetryTimeout is a required field + PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` - Suffix *string `locationName:"suffix" min:"1" type:"string"` + TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` } // String returns the string representation -func (s HeaderMatchMethod) String() string { +func (s GrpcRetryPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HeaderMatchMethod) GoString() string { +func (s GrpcRetryPolicy) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HeaderMatchMethod) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HeaderMatchMethod"} - if s.Exact != nil && len(*s.Exact) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) +func (s *GrpcRetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRetryPolicy"} + if s.GrpcRetryEvents != nil && len(s.GrpcRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrpcRetryEvents", 1)) } - if s.Prefix != nil && len(*s.Prefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) } - if s.Regex != nil && len(*s.Regex) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + if s.MaxRetries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRetries")) } - if s.Suffix != nil && len(*s.Suffix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + if s.PerRetryTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) } - if s.Range != nil { - if err := s.Range.Validate(); err != nil { - invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) - } + if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) } if invalidParams.Len() > 0 { @@ -5953,102 +7826,92 @@ func (s *HeaderMatchMethod) Validate() error { return nil } -// SetExact sets the Exact field's value. -func (s *HeaderMatchMethod) SetExact(v string) *HeaderMatchMethod { - s.Exact = &v +// SetGrpcRetryEvents sets the GrpcRetryEvents field's value. +func (s *GrpcRetryPolicy) SetGrpcRetryEvents(v []*string) *GrpcRetryPolicy { + s.GrpcRetryEvents = v return s } -// SetPrefix sets the Prefix field's value. -func (s *HeaderMatchMethod) SetPrefix(v string) *HeaderMatchMethod { - s.Prefix = &v +// SetHttpRetryEvents sets the HttpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetHttpRetryEvents(v []*string) *GrpcRetryPolicy { + s.HttpRetryEvents = v return s } -// SetRange sets the Range field's value. -func (s *HeaderMatchMethod) SetRange(v *MatchRange) *HeaderMatchMethod { - s.Range = v +// SetMaxRetries sets the MaxRetries field's value. +func (s *GrpcRetryPolicy) SetMaxRetries(v int64) *GrpcRetryPolicy { + s.MaxRetries = &v return s } -// SetRegex sets the Regex field's value. -func (s *HeaderMatchMethod) SetRegex(v string) *HeaderMatchMethod { - s.Regex = &v +// SetPerRetryTimeout sets the PerRetryTimeout field's value. +func (s *GrpcRetryPolicy) SetPerRetryTimeout(v *Duration) *GrpcRetryPolicy { + s.PerRetryTimeout = v return s } -// SetSuffix sets the Suffix field's value. -func (s *HeaderMatchMethod) SetSuffix(v string) *HeaderMatchMethod { - s.Suffix = &v +// SetTcpRetryEvents sets the TcpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetTcpRetryEvents(v []*string) *GrpcRetryPolicy { + s.TcpRetryEvents = v return s } -// An object that represents the health check policy for a virtual node's listener. -type HealthCheckPolicy struct { +// An object that represents a gRPC route type. +type GrpcRoute struct { _ struct{} `type:"structure"` - // HealthyThreshold is a required field - HealthyThreshold *int64 `locationName:"healthyThreshold" min:"2" type:"integer" required:"true"` - - // IntervalMillis is a required field - IntervalMillis *int64 `locationName:"intervalMillis" min:"5000" type:"long" required:"true"` - - Path *string `locationName:"path" type:"string"` - - Port *int64 `locationName:"port" min:"1" type:"integer"` + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *GrpcRouteAction `locationName:"action" type:"structure" required:"true"` - // Protocol is a required field - Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` + // An object that represents the criteria for determining a request match. + // + // Match is a required field + Match *GrpcRouteMatch `locationName:"match" type:"structure" required:"true"` - // TimeoutMillis is a required field - TimeoutMillis *int64 `locationName:"timeoutMillis" min:"2000" type:"long" required:"true"` + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. + RetryPolicy *GrpcRetryPolicy `locationName:"retryPolicy" type:"structure"` - // UnhealthyThreshold is a required field - UnhealthyThreshold *int64 `locationName:"unhealthyThreshold" min:"2" type:"integer" required:"true"` + // An object that represents types of timeouts. + Timeout *GrpcTimeout `locationName:"timeout" type:"structure"` } // String returns the string representation -func (s HealthCheckPolicy) String() string { +func (s GrpcRoute) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HealthCheckPolicy) GoString() string { +func (s GrpcRoute) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HealthCheckPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HealthCheckPolicy"} - if s.HealthyThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("HealthyThreshold")) +func (s *GrpcRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.HealthyThreshold != nil && *s.HealthyThreshold < 2 { - invalidParams.Add(request.NewErrParamMinValue("HealthyThreshold", 2)) + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) } - if s.IntervalMillis == nil { - invalidParams.Add(request.NewErrParamRequired("IntervalMillis")) + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } } - if s.IntervalMillis != nil && *s.IntervalMillis < 5000 { - invalidParams.Add(request.NewErrParamMinValue("IntervalMillis", 5000)) - } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) - } - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) - } - if s.TimeoutMillis == nil { - invalidParams.Add(request.NewErrParamRequired("TimeoutMillis")) - } - if s.TimeoutMillis != nil && *s.TimeoutMillis < 2000 { - invalidParams.Add(request.NewErrParamMinValue("TimeoutMillis", 2000)) - } - if s.UnhealthyThreshold == nil { - invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } } - if s.UnhealthyThreshold != nil && *s.UnhealthyThreshold < 2 { - invalidParams.Add(request.NewErrParamMinValue("UnhealthyThreshold", 2)) + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6057,91 +7920,119 @@ func (s *HealthCheckPolicy) Validate() error { return nil } -// SetHealthyThreshold sets the HealthyThreshold field's value. -func (s *HealthCheckPolicy) SetHealthyThreshold(v int64) *HealthCheckPolicy { - s.HealthyThreshold = &v +// SetAction sets the Action field's value. +func (s *GrpcRoute) SetAction(v *GrpcRouteAction) *GrpcRoute { + s.Action = v return s } -// SetIntervalMillis sets the IntervalMillis field's value. -func (s *HealthCheckPolicy) SetIntervalMillis(v int64) *HealthCheckPolicy { - s.IntervalMillis = &v +// SetMatch sets the Match field's value. +func (s *GrpcRoute) SetMatch(v *GrpcRouteMatch) *GrpcRoute { + s.Match = v return s } -// SetPath sets the Path field's value. -func (s *HealthCheckPolicy) SetPath(v string) *HealthCheckPolicy { - s.Path = &v +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *GrpcRoute) SetRetryPolicy(v *GrpcRetryPolicy) *GrpcRoute { + s.RetryPolicy = v return s } -// SetPort sets the Port field's value. -func (s *HealthCheckPolicy) SetPort(v int64) *HealthCheckPolicy { - s.Port = &v +// SetTimeout sets the Timeout field's value. +func (s *GrpcRoute) SetTimeout(v *GrpcTimeout) *GrpcRoute { + s.Timeout = v return s } -// SetProtocol sets the Protocol field's value. -func (s *HealthCheckPolicy) SetProtocol(v string) *HealthCheckPolicy { - s.Protocol = &v - return s +// An object that represents the action to take if a match is determined. +type GrpcRouteAction struct { + _ struct{} `type:"structure"` + + // WeightedTargets is a required field + WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` } -// SetTimeoutMillis sets the TimeoutMillis field's value. -func (s *HealthCheckPolicy) SetTimeoutMillis(v int64) *HealthCheckPolicy { - s.TimeoutMillis = &v - return s +// String returns the string representation +func (s GrpcRouteAction) String() string { + return awsutil.Prettify(s) } -// SetUnhealthyThreshold sets the UnhealthyThreshold field's value. -func (s *HealthCheckPolicy) SetUnhealthyThreshold(v int64) *HealthCheckPolicy { - s.UnhealthyThreshold = &v +// GoString returns the string representation +func (s GrpcRouteAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteAction"} + if s.WeightedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) + } + if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + } + if s.WeightedTargets != nil { + for i, v := range s.WeightedTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWeightedTargets sets the WeightedTargets field's value. +func (s *GrpcRouteAction) SetWeightedTargets(v []*WeightedTarget) *GrpcRouteAction { + s.WeightedTargets = v return s } -// An object that represents a retry policy. Specify at least one value for -// at least one of the types of RetryEvents, a value for maxRetries, and a value -// for perRetryTimeout. -type HttpRetryPolicy struct { +// An object that represents the criteria for determining a request match. +type GrpcRouteMatch struct { _ struct{} `type:"structure"` - HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` - - // MaxRetries is a required field - MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` + Metadata []*GrpcRouteMetadata `locationName:"metadata" min:"1" type:"list"` - // An object that represents a duration of time. - // - // PerRetryTimeout is a required field - PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` + MethodName *string `locationName:"methodName" min:"1" type:"string"` - TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` + ServiceName *string `locationName:"serviceName" type:"string"` } // String returns the string representation -func (s HttpRetryPolicy) String() string { +func (s GrpcRouteMatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HttpRetryPolicy) GoString() string { +func (s GrpcRouteMatch) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HttpRetryPolicy) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HttpRetryPolicy"} - if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) - } - if s.MaxRetries == nil { - invalidParams.Add(request.NewErrParamRequired("MaxRetries")) +func (s *GrpcRouteMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMatch"} + if s.Metadata != nil && len(s.Metadata) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Metadata", 1)) } - if s.PerRetryTimeout == nil { - invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) + if s.MethodName != nil && len(*s.MethodName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MethodName", 1)) } - if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) + if s.Metadata != nil { + for i, v := range s.Metadata { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metadata", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -6150,85 +8041,61 @@ func (s *HttpRetryPolicy) Validate() error { return nil } -// SetHttpRetryEvents sets the HttpRetryEvents field's value. -func (s *HttpRetryPolicy) SetHttpRetryEvents(v []*string) *HttpRetryPolicy { - s.HttpRetryEvents = v - return s -} - -// SetMaxRetries sets the MaxRetries field's value. -func (s *HttpRetryPolicy) SetMaxRetries(v int64) *HttpRetryPolicy { - s.MaxRetries = &v +// SetMetadata sets the Metadata field's value. +func (s *GrpcRouteMatch) SetMetadata(v []*GrpcRouteMetadata) *GrpcRouteMatch { + s.Metadata = v return s } -// SetPerRetryTimeout sets the PerRetryTimeout field's value. -func (s *HttpRetryPolicy) SetPerRetryTimeout(v *Duration) *HttpRetryPolicy { - s.PerRetryTimeout = v +// SetMethodName sets the MethodName field's value. +func (s *GrpcRouteMatch) SetMethodName(v string) *GrpcRouteMatch { + s.MethodName = &v return s } -// SetTcpRetryEvents sets the TcpRetryEvents field's value. -func (s *HttpRetryPolicy) SetTcpRetryEvents(v []*string) *HttpRetryPolicy { - s.TcpRetryEvents = v +// SetServiceName sets the ServiceName field's value. +func (s *GrpcRouteMatch) SetServiceName(v string) *GrpcRouteMatch { + s.ServiceName = &v return s } -// An object that represents an HTTP or HTTP/2 route type. -type HttpRoute struct { +// An object that represents the match metadata for the route. +type GrpcRouteMetadata struct { _ struct{} `type:"structure"` - // An object that represents the action to take if a match is determined. - // - // Action is a required field - Action *HttpRouteAction `locationName:"action" type:"structure" required:"true"` + Invert *bool `locationName:"invert" type:"boolean"` - // An object that represents the requirements for a route to match HTTP requests - // for a virtual router. - // - // Match is a required field - Match *HttpRouteMatch `locationName:"match" type:"structure" required:"true"` + // An object that represents the match method. Specify one of the match values. + Match *GrpcRouteMetadataMatchMethod `locationName:"match" type:"structure"` - // An object that represents a retry policy. Specify at least one value for - // at least one of the types of RetryEvents, a value for maxRetries, and a value - // for perRetryTimeout. - RetryPolicy *HttpRetryPolicy `locationName:"retryPolicy" type:"structure"` + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s HttpRoute) String() string { +func (s GrpcRouteMetadata) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HttpRoute) GoString() string { +func (s GrpcRouteMetadata) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HttpRoute) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HttpRoute"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) - } - if s.Match == nil { - invalidParams.Add(request.NewErrParamRequired("Match")) +func (s *GrpcRouteMetadata) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadata"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Action != nil { - if err := s.Action.Validate(); err != nil { - invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) - } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.Match != nil { if err := s.Match.Validate(); err != nil { invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) } } - if s.RetryPolicy != nil { - if err := s.RetryPolicy.Validate(); err != nil { - invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -6236,59 +8103,71 @@ func (s *HttpRoute) Validate() error { return nil } -// SetAction sets the Action field's value. -func (s *HttpRoute) SetAction(v *HttpRouteAction) *HttpRoute { - s.Action = v +// SetInvert sets the Invert field's value. +func (s *GrpcRouteMetadata) SetInvert(v bool) *GrpcRouteMetadata { + s.Invert = &v return s } // SetMatch sets the Match field's value. -func (s *HttpRoute) SetMatch(v *HttpRouteMatch) *HttpRoute { +func (s *GrpcRouteMetadata) SetMatch(v *GrpcRouteMetadataMatchMethod) *GrpcRouteMetadata { s.Match = v return s } -// SetRetryPolicy sets the RetryPolicy field's value. -func (s *HttpRoute) SetRetryPolicy(v *HttpRetryPolicy) *HttpRoute { - s.RetryPolicy = v +// SetName sets the Name field's value. +func (s *GrpcRouteMetadata) SetName(v string) *GrpcRouteMetadata { + s.Name = &v return s } -// An object that represents the action to take if a match is determined. -type HttpRouteAction struct { +// An object that represents the match method. Specify one of the match values. +type GrpcRouteMetadataMatchMethod struct { _ struct{} `type:"structure"` - // WeightedTargets is a required field - WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` + Exact *string `locationName:"exact" min:"1" type:"string"` + + Prefix *string `locationName:"prefix" min:"1" type:"string"` + + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. + Range *MatchRange `locationName:"range" type:"structure"` + + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` } // String returns the string representation -func (s HttpRouteAction) String() string { +func (s GrpcRouteMetadataMatchMethod) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HttpRouteAction) GoString() string { +func (s GrpcRouteMetadataMatchMethod) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HttpRouteAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HttpRouteAction"} - if s.WeightedTargets == nil { - invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) +func (s *GrpcRouteMetadataMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadataMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) } - if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) } - if s.WeightedTargets != nil { - for i, v := range s.WeightedTargets { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) - } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) } } @@ -6298,117 +8177,117 @@ func (s *HttpRouteAction) Validate() error { return nil } -// SetWeightedTargets sets the WeightedTargets field's value. -func (s *HttpRouteAction) SetWeightedTargets(v []*WeightedTarget) *HttpRouteAction { - s.WeightedTargets = v +// SetExact sets the Exact field's value. +func (s *GrpcRouteMetadataMatchMethod) SetExact(v string) *GrpcRouteMetadataMatchMethod { + s.Exact = &v return s } -// An object that represents the HTTP header in the request. -type HttpRouteHeader struct { - _ struct{} `type:"structure"` +// SetPrefix sets the Prefix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetPrefix(v string) *GrpcRouteMetadataMatchMethod { + s.Prefix = &v + return s +} - Invert *bool `locationName:"invert" type:"boolean"` +// SetRange sets the Range field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRange(v *MatchRange) *GrpcRouteMetadataMatchMethod { + s.Range = v + return s +} - // An object that represents the method and value to match with the header value - // sent in a request. Specify one match method. - Match *HeaderMatchMethod `locationName:"match" type:"structure"` +// SetRegex sets the Regex field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRegex(v string) *GrpcRouteMetadataMatchMethod { + s.Regex = &v + return s +} - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` +// SetSuffix sets the Suffix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetSuffix(v string) *GrpcRouteMetadataMatchMethod { + s.Suffix = &v + return s +} + +// An object that represents types of timeouts. +type GrpcTimeout struct { + _ struct{} `type:"structure"` + + // An object that represents a duration of time. + Idle *Duration `locationName:"idle" type:"structure"` + + // An object that represents a duration of time. + PerRequest *Duration `locationName:"perRequest" type:"structure"` } // String returns the string representation -func (s HttpRouteHeader) String() string { +func (s GrpcTimeout) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HttpRouteHeader) GoString() string { +func (s GrpcTimeout) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *HttpRouteHeader) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HttpRouteHeader"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Match != nil { - if err := s.Match.Validate(); err != nil { - invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInvert sets the Invert field's value. -func (s *HttpRouteHeader) SetInvert(v bool) *HttpRouteHeader { - s.Invert = &v - return s -} - -// SetMatch sets the Match field's value. -func (s *HttpRouteHeader) SetMatch(v *HeaderMatchMethod) *HttpRouteHeader { - s.Match = v +// SetIdle sets the Idle field's value. +func (s *GrpcTimeout) SetIdle(v *Duration) *GrpcTimeout { + s.Idle = v return s } -// SetName sets the Name field's value. -func (s *HttpRouteHeader) SetName(v string) *HttpRouteHeader { - s.Name = &v +// SetPerRequest sets the PerRequest field's value. +func (s *GrpcTimeout) SetPerRequest(v *Duration) *GrpcTimeout { + s.PerRequest = v return s } -// An object that represents the requirements for a route to match HTTP requests -// for a virtual router. -type HttpRouteMatch struct { +// An object that represents the method and value to match with the header value +// sent in a request. Specify one match method. +type HeaderMatchMethod struct { _ struct{} `type:"structure"` - Headers []*HttpRouteHeader `locationName:"headers" min:"1" type:"list"` + Exact *string `locationName:"exact" min:"1" type:"string"` - Method *string `locationName:"method" type:"string" enum:"HttpMethod"` + Prefix *string `locationName:"prefix" min:"1" type:"string"` - // Prefix is a required field - Prefix *string `locationName:"prefix" type:"string" required:"true"` + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. + Range *MatchRange `locationName:"range" type:"structure"` - Scheme *string `locationName:"scheme" type:"string" enum:"HttpScheme"` + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` } // String returns the string representation -func (s HttpRouteMatch) String() string { +func (s HeaderMatchMethod) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HttpRouteMatch) GoString() string { +func (s HeaderMatchMethod) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *HttpRouteMatch) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "HttpRouteMatch"} - if s.Headers != nil && len(s.Headers) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Headers", 1)) +func (s *HeaderMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeaderMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) } - if s.Headers != nil { - for i, v := range s.Headers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Headers", i), err.(request.ErrInvalidParams)) - } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) } } @@ -6418,168 +8297,195 @@ func (s *HttpRouteMatch) Validate() error { return nil } -// SetHeaders sets the Headers field's value. -func (s *HttpRouteMatch) SetHeaders(v []*HttpRouteHeader) *HttpRouteMatch { - s.Headers = v - return s -} - -// SetMethod sets the Method field's value. -func (s *HttpRouteMatch) SetMethod(v string) *HttpRouteMatch { - s.Method = &v +// SetExact sets the Exact field's value. +func (s *HeaderMatchMethod) SetExact(v string) *HeaderMatchMethod { + s.Exact = &v return s } // SetPrefix sets the Prefix field's value. -func (s *HttpRouteMatch) SetPrefix(v string) *HttpRouteMatch { +func (s *HeaderMatchMethod) SetPrefix(v string) *HeaderMatchMethod { s.Prefix = &v return s } -// SetScheme sets the Scheme field's value. -func (s *HttpRouteMatch) SetScheme(v string) *HttpRouteMatch { - s.Scheme = &v +// SetRange sets the Range field's value. +func (s *HeaderMatchMethod) SetRange(v *MatchRange) *HeaderMatchMethod { + s.Range = v return s } -// The request processing has failed because of an unknown error, exception, -// or failure. -type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation -func (s InternalServerErrorException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InternalServerErrorException) GoString() string { - return s.String() +// SetRegex sets the Regex field's value. +func (s *HeaderMatchMethod) SetRegex(v string) *HeaderMatchMethod { + s.Regex = &v + return s } -func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { - return &InternalServerErrorException{ - respMetadata: v, - } +// SetSuffix sets the Suffix field's value. +func (s *HeaderMatchMethod) SetSuffix(v string) *HeaderMatchMethod { + s.Suffix = &v + return s } -// Code returns the exception type name. -func (s InternalServerErrorException) Code() string { - return "InternalServerErrorException" -} +// An object that represents the health check policy for a virtual node's listener. +type HealthCheckPolicy struct { + _ struct{} `type:"structure"` -// Message returns the exception's message. -func (s InternalServerErrorException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} + // HealthyThreshold is a required field + HealthyThreshold *int64 `locationName:"healthyThreshold" min:"2" type:"integer" required:"true"` -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { - return nil -} + // IntervalMillis is a required field + IntervalMillis *int64 `locationName:"intervalMillis" min:"5000" type:"long" required:"true"` -func (s InternalServerErrorException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + Path *string `locationName:"path" type:"string"` -// Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode -} + Port *int64 `locationName:"port" min:"1" type:"integer"` -// RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID -} + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` -// You have exceeded a service limit for your account. For more information, -// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) -// in the AWS App Mesh User Guide. -type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + // TimeoutMillis is a required field + TimeoutMillis *int64 `locationName:"timeoutMillis" min:"2000" type:"long" required:"true"` - Message_ *string `locationName:"message" type:"string"` + // UnhealthyThreshold is a required field + UnhealthyThreshold *int64 `locationName:"unhealthyThreshold" min:"2" type:"integer" required:"true"` } // String returns the string representation -func (s LimitExceededException) String() string { +func (s HealthCheckPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LimitExceededException) GoString() string { +func (s HealthCheckPolicy) GoString() string { return s.String() } -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *HealthCheckPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HealthCheckPolicy"} + if s.HealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("HealthyThreshold")) } + if s.HealthyThreshold != nil && *s.HealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthyThreshold", 2)) + } + if s.IntervalMillis == nil { + invalidParams.Add(request.NewErrParamRequired("IntervalMillis")) + } + if s.IntervalMillis != nil && *s.IntervalMillis < 5000 { + invalidParams.Add(request.NewErrParamMinValue("IntervalMillis", 5000)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.TimeoutMillis == nil { + invalidParams.Add(request.NewErrParamRequired("TimeoutMillis")) + } + if s.TimeoutMillis != nil && *s.TimeoutMillis < 2000 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutMillis", 2000)) + } + if s.UnhealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) + } + if s.UnhealthyThreshold != nil && *s.UnhealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("UnhealthyThreshold", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Code returns the exception type name. -func (s LimitExceededException) Code() string { - return "LimitExceededException" +// SetHealthyThreshold sets the HealthyThreshold field's value. +func (s *HealthCheckPolicy) SetHealthyThreshold(v int64) *HealthCheckPolicy { + s.HealthyThreshold = &v + return s } -// Message returns the exception's message. -func (s LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetIntervalMillis sets the IntervalMillis field's value. +func (s *HealthCheckPolicy) SetIntervalMillis(v int64) *HealthCheckPolicy { + s.IntervalMillis = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { - return nil +// SetPath sets the Path field's value. +func (s *HealthCheckPolicy) SetPath(v string) *HealthCheckPolicy { + s.Path = &v + return s } -func (s LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetPort sets the Port field's value. +func (s *HealthCheckPolicy) SetPort(v int64) *HealthCheckPolicy { + s.Port = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +// SetProtocol sets the Protocol field's value. +func (s *HealthCheckPolicy) SetProtocol(v string) *HealthCheckPolicy { + s.Protocol = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +// SetTimeoutMillis sets the TimeoutMillis field's value. +func (s *HealthCheckPolicy) SetTimeoutMillis(v int64) *HealthCheckPolicy { + s.TimeoutMillis = &v + return s } -type ListMeshesInput struct { +// SetUnhealthyThreshold sets the UnhealthyThreshold field's value. +func (s *HealthCheckPolicy) SetUnhealthyThreshold(v int64) *HealthCheckPolicy { + s.UnhealthyThreshold = &v + return s +} + +// An object that represents an HTTP gateway route. +type HttpGatewayRoute struct { _ struct{} `type:"structure"` - Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *HttpGatewayRouteAction `locationName:"action" type:"structure" required:"true"` - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // An object that represents the criteria for determining a request match. + // + // Match is a required field + Match *HttpGatewayRouteMatch `locationName:"match" type:"structure" required:"true"` } // String returns the string representation -func (s ListMeshesInput) String() string { +func (s HttpGatewayRoute) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListMeshesInput) GoString() string { +func (s HttpGatewayRoute) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListMeshesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListMeshesInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) +func (s *HttpGatewayRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpGatewayRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6588,95 +8494,85 @@ func (s *ListMeshesInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListMeshesInput) SetLimit(v int64) *ListMeshesInput { - s.Limit = &v +// SetAction sets the Action field's value. +func (s *HttpGatewayRoute) SetAction(v *HttpGatewayRouteAction) *HttpGatewayRoute { + s.Action = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListMeshesInput) SetNextToken(v string) *ListMeshesInput { - s.NextToken = &v +// SetMatch sets the Match field's value. +func (s *HttpGatewayRoute) SetMatch(v *HttpGatewayRouteMatch) *HttpGatewayRoute { + s.Match = v return s } -type ListMeshesOutput struct { +// An object that represents the action to take if a match is determined. +type HttpGatewayRouteAction struct { _ struct{} `type:"structure"` - // Meshes is a required field - Meshes []*MeshRef `locationName:"meshes" type:"list" required:"true"` - - NextToken *string `locationName:"nextToken" type:"string"` + // An object that represents a gateway route target. + // + // Target is a required field + Target *GatewayRouteTarget `locationName:"target" type:"structure" required:"true"` } // String returns the string representation -func (s ListMeshesOutput) String() string { +func (s HttpGatewayRouteAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListMeshesOutput) GoString() string { +func (s HttpGatewayRouteAction) GoString() string { return s.String() } -// SetMeshes sets the Meshes field's value. -func (s *ListMeshesOutput) SetMeshes(v []*MeshRef) *ListMeshesOutput { - s.Meshes = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpGatewayRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpGatewayRouteAction"} + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + if s.Target != nil { + if err := s.Target.Validate(); err != nil { + invalidParams.AddNested("Target", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListMeshesOutput) SetNextToken(v string) *ListMeshesOutput { - s.NextToken = &v +// SetTarget sets the Target field's value. +func (s *HttpGatewayRouteAction) SetTarget(v *GatewayRouteTarget) *HttpGatewayRouteAction { + s.Target = v return s } -type ListRoutesInput struct { +// An object that represents the criteria for determining a request match. +type HttpGatewayRouteMatch struct { _ struct{} `type:"structure"` - Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - - // VirtualRouterName is a required field - VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` + // Prefix is a required field + Prefix *string `locationName:"prefix" type:"string" required:"true"` } // String returns the string representation -func (s ListRoutesInput) String() string { +func (s HttpGatewayRouteMatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRoutesInput) GoString() string { +func (s HttpGatewayRouteMatch) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListRoutesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListRoutesInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) - } - if s.VirtualRouterName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) - } - if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) +func (s *HttpGatewayRouteMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpGatewayRouteMatch"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) } if invalidParams.Len() > 0 { @@ -6685,96 +8581,144 @@ func (s *ListRoutesInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListRoutesInput) SetLimit(v int64) *ListRoutesInput { - s.Limit = &v - return s -} - -// SetMeshName sets the MeshName field's value. -func (s *ListRoutesInput) SetMeshName(v string) *ListRoutesInput { - s.MeshName = &v - return s -} - -// SetMeshOwner sets the MeshOwner field's value. -func (s *ListRoutesInput) SetMeshOwner(v string) *ListRoutesInput { - s.MeshOwner = &v +// SetPrefix sets the Prefix field's value. +func (s *HttpGatewayRouteMatch) SetPrefix(v string) *HttpGatewayRouteMatch { + s.Prefix = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListRoutesInput) SetNextToken(v string) *ListRoutesInput { - s.NextToken = &v - return s -} +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. +type HttpRetryPolicy struct { + _ struct{} `type:"structure"` -// SetVirtualRouterName sets the VirtualRouterName field's value. -func (s *ListRoutesInput) SetVirtualRouterName(v string) *ListRoutesInput { - s.VirtualRouterName = &v - return s -} + HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` -type ListRoutesOutput struct { - _ struct{} `type:"structure"` + // MaxRetries is a required field + MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` - NextToken *string `locationName:"nextToken" type:"string"` + // An object that represents a duration of time. + // + // PerRetryTimeout is a required field + PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` - // Routes is a required field - Routes []*RouteRef `locationName:"routes" type:"list" required:"true"` + TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` } // String returns the string representation -func (s ListRoutesOutput) String() string { +func (s HttpRetryPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListRoutesOutput) GoString() string { +func (s HttpRetryPolicy) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListRoutesOutput) SetNextToken(v string) *ListRoutesOutput { - s.NextToken = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpRetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRetryPolicy"} + if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) + } + if s.MaxRetries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRetries")) + } + if s.PerRetryTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) + } + if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRoutes sets the Routes field's value. -func (s *ListRoutesOutput) SetRoutes(v []*RouteRef) *ListRoutesOutput { - s.Routes = v +// SetHttpRetryEvents sets the HttpRetryEvents field's value. +func (s *HttpRetryPolicy) SetHttpRetryEvents(v []*string) *HttpRetryPolicy { + s.HttpRetryEvents = v return s } -type ListTagsForResourceInput struct { +// SetMaxRetries sets the MaxRetries field's value. +func (s *HttpRetryPolicy) SetMaxRetries(v int64) *HttpRetryPolicy { + s.MaxRetries = &v + return s +} + +// SetPerRetryTimeout sets the PerRetryTimeout field's value. +func (s *HttpRetryPolicy) SetPerRetryTimeout(v *Duration) *HttpRetryPolicy { + s.PerRetryTimeout = v + return s +} + +// SetTcpRetryEvents sets the TcpRetryEvents field's value. +func (s *HttpRetryPolicy) SetTcpRetryEvents(v []*string) *HttpRetryPolicy { + s.TcpRetryEvents = v + return s +} + +// An object that represents an HTTP or HTTP/2 route type. +type HttpRoute struct { _ struct{} `type:"structure"` - Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *HttpRouteAction `locationName:"action" type:"structure" required:"true"` - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // An object that represents the requirements for a route to match HTTP requests + // for a virtual router. + // + // Match is a required field + Match *HttpRouteMatch `locationName:"match" type:"structure" required:"true"` - // ResourceArn is a required field - ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. + RetryPolicy *HttpRetryPolicy `locationName:"retryPolicy" type:"structure"` + + // An object that represents types of timeouts. + Timeout *HttpTimeout `locationName:"timeout" type:"structure"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s HttpRoute) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceInput) GoString() string { +func (s HttpRoute) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) +func (s *HttpRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) } - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6783,92 +8727,117 @@ func (s *ListTagsForResourceInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListTagsForResourceInput) SetLimit(v int64) *ListTagsForResourceInput { - s.Limit = &v +// SetAction sets the Action field's value. +func (s *HttpRoute) SetAction(v *HttpRouteAction) *HttpRoute { + s.Action = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { - s.NextToken = &v +// SetMatch sets the Match field's value. +func (s *HttpRoute) SetMatch(v *HttpRouteMatch) *HttpRoute { + s.Match = v return s } -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { - s.ResourceArn = &v +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *HttpRoute) SetRetryPolicy(v *HttpRetryPolicy) *HttpRoute { + s.RetryPolicy = v return s } -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` +// SetTimeout sets the Timeout field's value. +func (s *HttpRoute) SetTimeout(v *HttpTimeout) *HttpRoute { + s.Timeout = v + return s +} - NextToken *string `locationName:"nextToken" type:"string"` +// An object that represents the action to take if a match is determined. +type HttpRouteAction struct { + _ struct{} `type:"structure"` - // Tags is a required field - Tags []*TagRef `locationName:"tags" type:"list" required:"true"` + // WeightedTargets is a required field + WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListTagsForResourceOutput) String() string { +func (s HttpRouteAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { +func (s HttpRouteAction) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { - s.NextToken = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRouteAction"} + if s.WeightedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) + } + if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + } + if s.WeightedTargets != nil { + for i, v := range s.WeightedTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*TagRef) *ListTagsForResourceOutput { - s.Tags = v +// SetWeightedTargets sets the WeightedTargets field's value. +func (s *HttpRouteAction) SetWeightedTargets(v []*WeightedTarget) *HttpRouteAction { + s.WeightedTargets = v return s } -type ListVirtualNodesInput struct { +// An object that represents the HTTP header in the request. +type HttpRouteHeader struct { _ struct{} `type:"structure"` - Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + Invert *bool `locationName:"invert" type:"boolean"` - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + // An object that represents the method and value to match with the header value + // sent in a request. Specify one match method. + Match *HeaderMatchMethod `locationName:"match" type:"structure"` - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListVirtualNodesInput) String() string { +func (s HttpRouteHeader) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListVirtualNodesInput) GoString() string { +func (s HttpRouteHeader) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListVirtualNodesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListVirtualNodesInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) +func (s *HttpRouteHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRouteHeader"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -6877,162 +8846,248 @@ func (s *ListVirtualNodesInput) Validate() error { return nil } -// SetLimit sets the Limit field's value. -func (s *ListVirtualNodesInput) SetLimit(v int64) *ListVirtualNodesInput { - s.Limit = &v - return s -} - -// SetMeshName sets the MeshName field's value. -func (s *ListVirtualNodesInput) SetMeshName(v string) *ListVirtualNodesInput { - s.MeshName = &v +// SetInvert sets the Invert field's value. +func (s *HttpRouteHeader) SetInvert(v bool) *HttpRouteHeader { + s.Invert = &v return s } -// SetMeshOwner sets the MeshOwner field's value. -func (s *ListVirtualNodesInput) SetMeshOwner(v string) *ListVirtualNodesInput { - s.MeshOwner = &v +// SetMatch sets the Match field's value. +func (s *HttpRouteHeader) SetMatch(v *HeaderMatchMethod) *HttpRouteHeader { + s.Match = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListVirtualNodesInput) SetNextToken(v string) *ListVirtualNodesInput { - s.NextToken = &v +// SetName sets the Name field's value. +func (s *HttpRouteHeader) SetName(v string) *HttpRouteHeader { + s.Name = &v return s } -type ListVirtualNodesOutput struct { +// An object that represents the requirements for a route to match HTTP requests +// for a virtual router. +type HttpRouteMatch struct { _ struct{} `type:"structure"` - NextToken *string `locationName:"nextToken" type:"string"` + Headers []*HttpRouteHeader `locationName:"headers" min:"1" type:"list"` - // VirtualNodes is a required field - VirtualNodes []*VirtualNodeRef `locationName:"virtualNodes" type:"list" required:"true"` + Method *string `locationName:"method" type:"string" enum:"HttpMethod"` + + // Prefix is a required field + Prefix *string `locationName:"prefix" type:"string" required:"true"` + + Scheme *string `locationName:"scheme" type:"string" enum:"HttpScheme"` } // String returns the string representation -func (s ListVirtualNodesOutput) String() string { +func (s HttpRouteMatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListVirtualNodesOutput) GoString() string { +func (s HttpRouteMatch) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListVirtualNodesOutput) SetNextToken(v string) *ListVirtualNodesOutput { - s.NextToken = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpRouteMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpRouteMatch"} + if s.Headers != nil && len(s.Headers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Headers", 1)) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Headers != nil { + for i, v := range s.Headers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Headers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVirtualNodes sets the VirtualNodes field's value. -func (s *ListVirtualNodesOutput) SetVirtualNodes(v []*VirtualNodeRef) *ListVirtualNodesOutput { - s.VirtualNodes = v +// SetHeaders sets the Headers field's value. +func (s *HttpRouteMatch) SetHeaders(v []*HttpRouteHeader) *HttpRouteMatch { + s.Headers = v return s } -type ListVirtualRoutersInput struct { - _ struct{} `type:"structure"` - - Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` +// SetMethod sets the Method field's value. +func (s *HttpRouteMatch) SetMethod(v string) *HttpRouteMatch { + s.Method = &v + return s +} - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` +// SetPrefix sets the Prefix field's value. +func (s *HttpRouteMatch) SetPrefix(v string) *HttpRouteMatch { + s.Prefix = &v + return s +} - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` +// SetScheme sets the Scheme field's value. +func (s *HttpRouteMatch) SetScheme(v string) *HttpRouteMatch { + s.Scheme = &v + return s +} - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +// An object that represents types of timeouts. +type HttpTimeout struct { + _ struct{} `type:"structure"` + + // An object that represents a duration of time. + Idle *Duration `locationName:"idle" type:"structure"` + + // An object that represents a duration of time. + PerRequest *Duration `locationName:"perRequest" type:"structure"` } // String returns the string representation -func (s ListVirtualRoutersInput) String() string { +func (s HttpTimeout) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListVirtualRoutersInput) GoString() string { +func (s HttpTimeout) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListVirtualRoutersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListVirtualRoutersInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) - } +// SetIdle sets the Idle field's value. +func (s *HttpTimeout) SetIdle(v *Duration) *HttpTimeout { + s.Idle = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams +// SetPerRequest sets the PerRequest field's value. +func (s *HttpTimeout) SetPerRequest(v *Duration) *HttpTimeout { + s.PerRequest = v + return s +} + +// The request processing has failed because of an unknown error, exception, +// or failure. +type InternalServerErrorException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InternalServerErrorException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InternalServerErrorException) GoString() string { + return s.String() +} + +func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { + return &InternalServerErrorException{ + RespMetadata: v, } - return nil } -// SetLimit sets the Limit field's value. -func (s *ListVirtualRoutersInput) SetLimit(v int64) *ListVirtualRoutersInput { - s.Limit = &v - return s +// Code returns the exception type name. +func (s *InternalServerErrorException) Code() string { + return "InternalServerErrorException" } -// SetMeshName sets the MeshName field's value. -func (s *ListVirtualRoutersInput) SetMeshName(v string) *ListVirtualRoutersInput { - s.MeshName = &v - return s +// Message returns the exception's message. +func (s *InternalServerErrorException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetMeshOwner sets the MeshOwner field's value. -func (s *ListVirtualRoutersInput) SetMeshOwner(v string) *ListVirtualRoutersInput { - s.MeshOwner = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerErrorException) OrigErr() error { + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListVirtualRoutersInput) SetNextToken(v string) *ListVirtualRoutersInput { - s.NextToken = &v - return s +func (s *InternalServerErrorException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -type ListVirtualRoutersOutput struct { - _ struct{} `type:"structure"` +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode +} - NextToken *string `locationName:"nextToken" type:"string"` +// RequestID returns the service's response RequestID for request. +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID +} - // VirtualRouters is a required field - VirtualRouters []*VirtualRouterRef `locationName:"virtualRouters" type:"list" required:"true"` +// You have exceeded a service limit for your account. For more information, +// see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) +// in the AWS App Mesh User Guide. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s ListVirtualRoutersOutput) String() string { +func (s LimitExceededException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListVirtualRoutersOutput) GoString() string { +func (s LimitExceededException) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListVirtualRoutersOutput) SetNextToken(v string) *ListVirtualRoutersOutput { - s.NextToken = &v - return s +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } } -// SetVirtualRouters sets the VirtualRouters field's value. -func (s *ListVirtualRoutersOutput) SetVirtualRouters(v []*VirtualRouterRef) *ListVirtualRoutersOutput { - s.VirtualRouters = v - return s +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" } -type ListVirtualServicesInput struct { +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListGatewayRoutesInput struct { _ struct{} `type:"structure"` Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` @@ -7043,21 +9098,24 @@ type ListVirtualServicesInput struct { MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListVirtualServicesInput) String() string { +func (s ListGatewayRoutesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListVirtualServicesInput) GoString() string { +func (s ListGatewayRoutesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListVirtualServicesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListVirtualServicesInput"} +func (s *ListGatewayRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGatewayRoutesInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } @@ -7070,6 +9128,12 @@ func (s *ListVirtualServicesInput) Validate() error { if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7078,107 +9142,89 @@ func (s *ListVirtualServicesInput) Validate() error { } // SetLimit sets the Limit field's value. -func (s *ListVirtualServicesInput) SetLimit(v int64) *ListVirtualServicesInput { +func (s *ListGatewayRoutesInput) SetLimit(v int64) *ListGatewayRoutesInput { s.Limit = &v return s } // SetMeshName sets the MeshName field's value. -func (s *ListVirtualServicesInput) SetMeshName(v string) *ListVirtualServicesInput { +func (s *ListGatewayRoutesInput) SetMeshName(v string) *ListGatewayRoutesInput { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *ListVirtualServicesInput) SetMeshOwner(v string) *ListVirtualServicesInput { +func (s *ListGatewayRoutesInput) SetMeshOwner(v string) *ListGatewayRoutesInput { s.MeshOwner = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListVirtualServicesInput) SetNextToken(v string) *ListVirtualServicesInput { +func (s *ListGatewayRoutesInput) SetNextToken(v string) *ListGatewayRoutesInput { s.NextToken = &v return s } -type ListVirtualServicesOutput struct { +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *ListGatewayRoutesInput) SetVirtualGatewayName(v string) *ListGatewayRoutesInput { + s.VirtualGatewayName = &v + return s +} + +type ListGatewayRoutesOutput struct { _ struct{} `type:"structure"` - NextToken *string `locationName:"nextToken" type:"string"` + // GatewayRoutes is a required field + GatewayRoutes []*GatewayRouteRef `locationName:"gatewayRoutes" type:"list" required:"true"` - // VirtualServices is a required field - VirtualServices []*VirtualServiceRef `locationName:"virtualServices" type:"list" required:"true"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListVirtualServicesOutput) String() string { +func (s ListGatewayRoutesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListVirtualServicesOutput) GoString() string { +func (s ListGatewayRoutesOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListVirtualServicesOutput) SetNextToken(v string) *ListVirtualServicesOutput { - s.NextToken = &v +// SetGatewayRoutes sets the GatewayRoutes field's value. +func (s *ListGatewayRoutesOutput) SetGatewayRoutes(v []*GatewayRouteRef) *ListGatewayRoutesOutput { + s.GatewayRoutes = v return s } -// SetVirtualServices sets the VirtualServices field's value. -func (s *ListVirtualServicesOutput) SetVirtualServices(v []*VirtualServiceRef) *ListVirtualServicesOutput { - s.VirtualServices = v +// SetNextToken sets the NextToken field's value. +func (s *ListGatewayRoutesOutput) SetNextToken(v string) *ListGatewayRoutesOutput { + s.NextToken = &v return s } -// An object that represents a listener for a virtual node. -type Listener struct { +type ListMeshesInput struct { _ struct{} `type:"structure"` - // An object that represents the health check policy for a virtual node's listener. - HealthCheck *HealthCheckPolicy `locationName:"healthCheck" type:"structure"` - - // An object that represents a port mapping. - // - // PortMapping is a required field - PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // An object that represents the Transport Layer Security (TLS) properties for - // a listener. - Tls *ListenerTls `locationName:"tls" type:"structure"` + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s Listener) String() string { +func (s ListMeshesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Listener) GoString() string { +func (s ListMeshesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Listener) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Listener"} - if s.PortMapping == nil { - invalidParams.Add(request.NewErrParamRequired("PortMapping")) - } - if s.HealthCheck != nil { - if err := s.HealthCheck.Validate(); err != nil { - invalidParams.AddNested("HealthCheck", err.(request.ErrInvalidParams)) - } - } - if s.PortMapping != nil { - if err := s.PortMapping.Validate(); err != nil { - invalidParams.AddNested("PortMapping", err.(request.ErrInvalidParams)) - } - } - if s.Tls != nil { - if err := s.Tls.Validate(); err != nil { - invalidParams.AddNested("Tls", err.(request.ErrInvalidParams)) - } +func (s *ListMeshesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMeshesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } if invalidParams.Len() > 0 { @@ -7187,104 +9233,95 @@ func (s *Listener) Validate() error { return nil } -// SetHealthCheck sets the HealthCheck field's value. -func (s *Listener) SetHealthCheck(v *HealthCheckPolicy) *Listener { - s.HealthCheck = v - return s -} - -// SetPortMapping sets the PortMapping field's value. -func (s *Listener) SetPortMapping(v *PortMapping) *Listener { - s.PortMapping = v +// SetLimit sets the Limit field's value. +func (s *ListMeshesInput) SetLimit(v int64) *ListMeshesInput { + s.Limit = &v return s } -// SetTls sets the Tls field's value. -func (s *Listener) SetTls(v *ListenerTls) *Listener { - s.Tls = v +// SetNextToken sets the NextToken field's value. +func (s *ListMeshesInput) SetNextToken(v string) *ListMeshesInput { + s.NextToken = &v return s } -// An object that represents the Transport Layer Security (TLS) properties for -// a listener. -type ListenerTls struct { +type ListMeshesOutput struct { _ struct{} `type:"structure"` - // An object that represents a listener's Transport Layer Security (TLS) certificate. - // - // Certificate is a required field - Certificate *ListenerTlsCertificate `locationName:"certificate" type:"structure" required:"true"` + // Meshes is a required field + Meshes []*MeshRef `locationName:"meshes" type:"list" required:"true"` - // Mode is a required field - Mode *string `locationName:"mode" type:"string" required:"true" enum:"ListenerTlsMode"` + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListenerTls) String() string { +func (s ListMeshesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListenerTls) GoString() string { +func (s ListMeshesOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListenerTls) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListenerTls"} - if s.Certificate == nil { - invalidParams.Add(request.NewErrParamRequired("Certificate")) - } - if s.Mode == nil { - invalidParams.Add(request.NewErrParamRequired("Mode")) - } - if s.Certificate != nil { - if err := s.Certificate.Validate(); err != nil { - invalidParams.AddNested("Certificate", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCertificate sets the Certificate field's value. -func (s *ListenerTls) SetCertificate(v *ListenerTlsCertificate) *ListenerTls { - s.Certificate = v +// SetMeshes sets the Meshes field's value. +func (s *ListMeshesOutput) SetMeshes(v []*MeshRef) *ListMeshesOutput { + s.Meshes = v return s } -// SetMode sets the Mode field's value. -func (s *ListenerTls) SetMode(v string) *ListenerTls { - s.Mode = &v +// SetNextToken sets the NextToken field's value. +func (s *ListMeshesOutput) SetNextToken(v string) *ListMeshesOutput { + s.NextToken = &v return s } -// An object that represents an AWS Certicate Manager (ACM) certificate. -type ListenerTlsAcmCertificate struct { +type ListRoutesInput struct { _ struct{} `type:"structure"` - // CertificateArn is a required field - CertificateArn *string `locationName:"certificateArn" type:"string" required:"true"` + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // VirtualRouterName is a required field + VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListenerTlsAcmCertificate) String() string { +func (s ListRoutesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListenerTlsAcmCertificate) GoString() string { +func (s ListRoutesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListenerTlsAcmCertificate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListenerTlsAcmCertificate"} - if s.CertificateArn == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateArn")) +func (s *ListRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRoutesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.VirtualRouterName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) + } + if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) } if invalidParams.Len() > 0 { @@ -7293,104 +9330,96 @@ func (s *ListenerTlsAcmCertificate) Validate() error { return nil } -// SetCertificateArn sets the CertificateArn field's value. -func (s *ListenerTlsAcmCertificate) SetCertificateArn(v string) *ListenerTlsAcmCertificate { - s.CertificateArn = &v +// SetLimit sets the Limit field's value. +func (s *ListRoutesInput) SetLimit(v int64) *ListRoutesInput { + s.Limit = &v return s } -// An object that represents a listener's Transport Layer Security (TLS) certificate. -type ListenerTlsCertificate struct { +// SetMeshName sets the MeshName field's value. +func (s *ListRoutesInput) SetMeshName(v string) *ListRoutesInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *ListRoutesInput) SetMeshOwner(v string) *ListRoutesInput { + s.MeshOwner = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListRoutesInput) SetNextToken(v string) *ListRoutesInput { + s.NextToken = &v + return s +} + +// SetVirtualRouterName sets the VirtualRouterName field's value. +func (s *ListRoutesInput) SetVirtualRouterName(v string) *ListRoutesInput { + s.VirtualRouterName = &v + return s +} + +type ListRoutesOutput struct { _ struct{} `type:"structure"` - // An object that represents an AWS Certicate Manager (ACM) certificate. - Acm *ListenerTlsAcmCertificate `locationName:"acm" type:"structure"` + NextToken *string `locationName:"nextToken" type:"string"` - // An object that represents a local file certificate. The certificate must - // meet specific requirements and you must have proxy authorization enabled. - // For more information, see Transport Layer Security (TLS) (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual-node-tls.html#virtual-node-tls-prerequisites). - File *ListenerTlsFileCertificate `locationName:"file" type:"structure"` + // Routes is a required field + Routes []*RouteRef `locationName:"routes" type:"list" required:"true"` } // String returns the string representation -func (s ListenerTlsCertificate) String() string { +func (s ListRoutesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListenerTlsCertificate) GoString() string { +func (s ListRoutesOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListenerTlsCertificate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListenerTlsCertificate"} - if s.Acm != nil { - if err := s.Acm.Validate(); err != nil { - invalidParams.AddNested("Acm", err.(request.ErrInvalidParams)) - } - } - if s.File != nil { - if err := s.File.Validate(); err != nil { - invalidParams.AddNested("File", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAcm sets the Acm field's value. -func (s *ListenerTlsCertificate) SetAcm(v *ListenerTlsAcmCertificate) *ListenerTlsCertificate { - s.Acm = v +// SetNextToken sets the NextToken field's value. +func (s *ListRoutesOutput) SetNextToken(v string) *ListRoutesOutput { + s.NextToken = &v return s } -// SetFile sets the File field's value. -func (s *ListenerTlsCertificate) SetFile(v *ListenerTlsFileCertificate) *ListenerTlsCertificate { - s.File = v +// SetRoutes sets the Routes field's value. +func (s *ListRoutesOutput) SetRoutes(v []*RouteRef) *ListRoutesOutput { + s.Routes = v return s } -// An object that represents a local file certificate. The certificate must -// meet specific requirements and you must have proxy authorization enabled. -// For more information, see Transport Layer Security (TLS) (https://docs.aws.amazon.com/app-mesh/latest/userguide/virtual-node-tls.html#virtual-node-tls-prerequisites). -type ListenerTlsFileCertificate struct { +type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // CertificateChain is a required field - CertificateChain *string `locationName:"certificateChain" min:"1" type:"string" required:"true"` + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // PrivateKey is a required field - PrivateKey *string `locationName:"privateKey" min:"1" type:"string" required:"true"` + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` } // String returns the string representation -func (s ListenerTlsFileCertificate) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListenerTlsFileCertificate) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListenerTlsFileCertificate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListenerTlsFileCertificate"} - if s.CertificateChain == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateChain")) - } - if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) - } - if s.PrivateKey == nil { - invalidParams.Add(request.NewErrParamRequired("PrivateKey")) +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.PrivateKey != nil && len(*s.PrivateKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PrivateKey", 1)) + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } if invalidParams.Len() > 0 { @@ -7399,89 +9428,92 @@ func (s *ListenerTlsFileCertificate) Validate() error { return nil } -// SetCertificateChain sets the CertificateChain field's value. -func (s *ListenerTlsFileCertificate) SetCertificateChain(v string) *ListenerTlsFileCertificate { - s.CertificateChain = &v +// SetLimit sets the Limit field's value. +func (s *ListTagsForResourceInput) SetLimit(v int64) *ListTagsForResourceInput { + s.Limit = &v return s } -// SetPrivateKey sets the PrivateKey field's value. -func (s *ListenerTlsFileCertificate) SetPrivateKey(v string) *ListenerTlsFileCertificate { - s.PrivateKey = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v return s } -// An object that represents the logging information for a virtual node. -type Logging struct { +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // An object that represents the access logging information for a virtual node. - AccessLog *AccessLog `locationName:"accessLog" type:"structure"` + NextToken *string `locationName:"nextToken" type:"string"` + + // Tags is a required field + Tags []*TagRef `locationName:"tags" type:"list" required:"true"` } // String returns the string representation -func (s Logging) String() string { +func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Logging) GoString() string { +func (s ListTagsForResourceOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Logging) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Logging"} - if s.AccessLog != nil { - if err := s.AccessLog.Validate(); err != nil { - invalidParams.AddNested("AccessLog", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s } -// SetAccessLog sets the AccessLog field's value. -func (s *Logging) SetAccessLog(v *AccessLog) *Logging { - s.AccessLog = v +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*TagRef) *ListTagsForResourceOutput { + s.Tags = v return s } -// An object that represents the range of values to match on. The first character -// of the range is included in the range, though the last character is not. -// For example, if the range specified were 1-100, only values 1-99 would be -// matched. -type MatchRange struct { +type ListVirtualGatewaysInput struct { _ struct{} `type:"structure"` - // End is a required field - End *int64 `locationName:"end" type:"long" required:"true"` + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` - // Start is a required field - Start *int64 `locationName:"start" type:"long" required:"true"` + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s MatchRange) String() string { +func (s ListVirtualGatewaysInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MatchRange) GoString() string { +func (s ListVirtualGatewaysInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *MatchRange) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MatchRange"} - if s.End == nil { - invalidParams.Add(request.NewErrParamRequired("End")) +func (s *ListVirtualGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVirtualGatewaysInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } - if s.Start == nil { - invalidParams.Add(request.NewErrParamRequired("Start")) + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } if invalidParams.Len() > 0 { @@ -7490,625 +9522,2876 @@ func (s *MatchRange) Validate() error { return nil } -// SetEnd sets the End field's value. -func (s *MatchRange) SetEnd(v int64) *MatchRange { - s.End = &v +// SetLimit sets the Limit field's value. +func (s *ListVirtualGatewaysInput) SetLimit(v int64) *ListVirtualGatewaysInput { + s.Limit = &v return s } -// SetStart sets the Start field's value. -func (s *MatchRange) SetStart(v int64) *MatchRange { - s.Start = &v +// SetMeshName sets the MeshName field's value. +func (s *ListVirtualGatewaysInput) SetMeshName(v string) *ListVirtualGatewaysInput { + s.MeshName = &v return s } -// An object that represents a service mesh returned by a describe operation. -type MeshData struct { - _ struct{} `type:"structure"` +// SetMeshOwner sets the MeshOwner field's value. +func (s *ListVirtualGatewaysInput) SetMeshOwner(v string) *ListVirtualGatewaysInput { + s.MeshOwner = &v + return s +} - // MeshName is a required field - MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualGatewaysInput) SetNextToken(v string) *ListVirtualGatewaysInput { + s.NextToken = &v + return s +} - // An object that represents metadata for a resource. - // - // Metadata is a required field - Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` +type ListVirtualGatewaysOutput struct { + _ struct{} `type:"structure"` - // An object that represents the specification of a service mesh. - // - // Spec is a required field - Spec *MeshSpec `locationName:"spec" type:"structure" required:"true"` + NextToken *string `locationName:"nextToken" type:"string"` - // An object that represents the status of a service mesh. - // - // Status is a required field - Status *MeshStatus `locationName:"status" type:"structure" required:"true"` + // VirtualGateways is a required field + VirtualGateways []*VirtualGatewayRef `locationName:"virtualGateways" type:"list" required:"true"` } // String returns the string representation -func (s MeshData) String() string { +func (s ListVirtualGatewaysOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MeshData) GoString() string { +func (s ListVirtualGatewaysOutput) GoString() string { return s.String() } -// SetMeshName sets the MeshName field's value. -func (s *MeshData) SetMeshName(v string) *MeshData { - s.MeshName = &v - return s -} - -// SetMetadata sets the Metadata field's value. -func (s *MeshData) SetMetadata(v *ResourceMetadata) *MeshData { - s.Metadata = v - return s -} - -// SetSpec sets the Spec field's value. -func (s *MeshData) SetSpec(v *MeshSpec) *MeshData { - s.Spec = v +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualGatewaysOutput) SetNextToken(v string) *ListVirtualGatewaysOutput { + s.NextToken = &v return s } -// SetStatus sets the Status field's value. -func (s *MeshData) SetStatus(v *MeshStatus) *MeshData { - s.Status = v +// SetVirtualGateways sets the VirtualGateways field's value. +func (s *ListVirtualGatewaysOutput) SetVirtualGateways(v []*VirtualGatewayRef) *ListVirtualGatewaysOutput { + s.VirtualGateways = v return s } -// An object that represents a service mesh returned by a list operation. -type MeshRef struct { +type ListVirtualNodesInput struct { _ struct{} `type:"structure"` - // Arn is a required field - Arn *string `locationName:"arn" type:"string" required:"true"` + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` // MeshName is a required field - MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // MeshOwner is a required field - MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - // ResourceOwner is a required field - ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s MeshRef) String() string { +func (s ListVirtualNodesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MeshRef) GoString() string { +func (s ListVirtualNodesInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *MeshRef) SetArn(v string) *MeshRef { - s.Arn = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVirtualNodesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVirtualNodesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *ListVirtualNodesInput) SetLimit(v int64) *ListVirtualNodesInput { + s.Limit = &v return s } // SetMeshName sets the MeshName field's value. -func (s *MeshRef) SetMeshName(v string) *MeshRef { +func (s *ListVirtualNodesInput) SetMeshName(v string) *ListVirtualNodesInput { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *MeshRef) SetMeshOwner(v string) *MeshRef { +func (s *ListVirtualNodesInput) SetMeshOwner(v string) *ListVirtualNodesInput { s.MeshOwner = &v return s } -// SetResourceOwner sets the ResourceOwner field's value. -func (s *MeshRef) SetResourceOwner(v string) *MeshRef { - s.ResourceOwner = &v +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualNodesInput) SetNextToken(v string) *ListVirtualNodesInput { + s.NextToken = &v return s } -// An object that represents the specification of a service mesh. -type MeshSpec struct { +type ListVirtualNodesOutput struct { _ struct{} `type:"structure"` - // An object that represents the egress filter rules for a service mesh. - EgressFilter *EgressFilter `locationName:"egressFilter" type:"structure"` + NextToken *string `locationName:"nextToken" type:"string"` + + // VirtualNodes is a required field + VirtualNodes []*VirtualNodeRef `locationName:"virtualNodes" type:"list" required:"true"` } // String returns the string representation -func (s MeshSpec) String() string { +func (s ListVirtualNodesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MeshSpec) GoString() string { +func (s ListVirtualNodesOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *MeshSpec) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MeshSpec"} - if s.EgressFilter != nil { - if err := s.EgressFilter.Validate(); err != nil { - invalidParams.AddNested("EgressFilter", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualNodesOutput) SetNextToken(v string) *ListVirtualNodesOutput { + s.NextToken = &v + return s } -// SetEgressFilter sets the EgressFilter field's value. -func (s *MeshSpec) SetEgressFilter(v *EgressFilter) *MeshSpec { - s.EgressFilter = v +// SetVirtualNodes sets the VirtualNodes field's value. +func (s *ListVirtualNodesOutput) SetVirtualNodes(v []*VirtualNodeRef) *ListVirtualNodesOutput { + s.VirtualNodes = v return s } -// An object that represents the status of a service mesh. -type MeshStatus struct { +type ListVirtualRoutersInput struct { _ struct{} `type:"structure"` - Status *string `locationName:"status" type:"string" enum:"MeshStatusCode"` -} - -// String returns the string representation -func (s MeshStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MeshStatus) GoString() string { - return s.String() -} + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` -// SetStatus sets the Status field's value. -func (s *MeshStatus) SetStatus(v string) *MeshStatus { - s.Status = &v - return s -} + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` -// The specified resource doesn't exist. Check your request syntax and try again. -type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - Message_ *string `locationName:"message" type:"string"` + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s NotFoundException) String() string { +func (s ListVirtualRoutersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotFoundException) GoString() string { +func (s ListVirtualRoutersInput) GoString() string { return s.String() } -func newErrorNotFoundException(v protocol.ResponseMetadata) error { - return &NotFoundException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVirtualRoutersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVirtualRoutersInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } -} - -// Code returns the exception type name. -func (s NotFoundException) Code() string { - return "NotFoundException" -} -// Message returns the exception's message. -func (s NotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if invalidParams.Len() > 0 { + return invalidParams } - return "" + return nil } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { - return nil +// SetLimit sets the Limit field's value. +func (s *ListVirtualRoutersInput) SetLimit(v int64) *ListVirtualRoutersInput { + s.Limit = &v + return s } -func (s NotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetMeshName sets the MeshName field's value. +func (s *ListVirtualRoutersInput) SetMeshName(v string) *ListVirtualRoutersInput { + s.MeshName = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +// SetMeshOwner sets the MeshOwner field's value. +func (s *ListVirtualRoutersInput) SetMeshOwner(v string) *ListVirtualRoutersInput { + s.MeshOwner = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualRoutersInput) SetNextToken(v string) *ListVirtualRoutersInput { + s.NextToken = &v + return s } -// An object that represents a port mapping. -type PortMapping struct { +type ListVirtualRoutersOutput struct { _ struct{} `type:"structure"` - // Port is a required field - Port *int64 `locationName:"port" min:"1" type:"integer" required:"true"` + NextToken *string `locationName:"nextToken" type:"string"` - // Protocol is a required field - Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` + // VirtualRouters is a required field + VirtualRouters []*VirtualRouterRef `locationName:"virtualRouters" type:"list" required:"true"` } // String returns the string representation -func (s PortMapping) String() string { +func (s ListVirtualRoutersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PortMapping) GoString() string { +func (s ListVirtualRoutersOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PortMapping) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PortMapping"} - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) - } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) - } - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPort sets the Port field's value. -func (s *PortMapping) SetPort(v int64) *PortMapping { - s.Port = &v +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualRoutersOutput) SetNextToken(v string) *ListVirtualRoutersOutput { + s.NextToken = &v return s } -// SetProtocol sets the Protocol field's value. -func (s *PortMapping) SetProtocol(v string) *PortMapping { - s.Protocol = &v +// SetVirtualRouters sets the VirtualRouters field's value. +func (s *ListVirtualRoutersOutput) SetVirtualRouters(v []*VirtualRouterRef) *ListVirtualRoutersOutput { + s.VirtualRouters = v return s } -// You can't delete the specified resource because it's in use or required by -// another resource. -type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type ListVirtualServicesInput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"message" type:"string"` + Limit *int64 `location:"querystring" locationName:"limit" min:"1" type:"integer"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ResourceInUseException) String() string { +func (s ListVirtualServicesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourceInUseException) GoString() string { +func (s ListVirtualServicesInput) GoString() string { return s.String() } -func newErrorResourceInUseException(v protocol.ResponseMetadata) error { - return &ResourceInUseException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVirtualServicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVirtualServicesInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Code returns the exception type name. -func (s ResourceInUseException) Code() string { - return "ResourceInUseException" +// SetLimit sets the Limit field's value. +func (s *ListVirtualServicesInput) SetLimit(v int64) *ListVirtualServicesInput { + s.Limit = &v + return s } -// Message returns the exception's message. -func (s ResourceInUseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetMeshName sets the MeshName field's value. +func (s *ListVirtualServicesInput) SetMeshName(v string) *ListVirtualServicesInput { + s.MeshName = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { - return nil +// SetMeshOwner sets the MeshOwner field's value. +func (s *ListVirtualServicesInput) SetMeshOwner(v string) *ListVirtualServicesInput { + s.MeshOwner = &v + return s } -func (s ResourceInUseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualServicesInput) SetNextToken(v string) *ListVirtualServicesInput { + s.NextToken = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +type ListVirtualServicesOutput struct { + _ struct{} `type:"structure"` + + NextToken *string `locationName:"nextToken" type:"string"` + + // VirtualServices is a required field + VirtualServices []*VirtualServiceRef `locationName:"virtualServices" type:"list" required:"true"` } -// RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +// String returns the string representation +func (s ListVirtualServicesOutput) String() string { + return awsutil.Prettify(s) } -// An object that represents metadata for a resource. -type ResourceMetadata struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s ListVirtualServicesOutput) GoString() string { + return s.String() +} - // Arn is a required field - Arn *string `locationName:"arn" type:"string" required:"true"` +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualServicesOutput) SetNextToken(v string) *ListVirtualServicesOutput { + s.NextToken = &v + return s +} - // CreatedAt is a required field - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` +// SetVirtualServices sets the VirtualServices field's value. +func (s *ListVirtualServicesOutput) SetVirtualServices(v []*VirtualServiceRef) *ListVirtualServicesOutput { + s.VirtualServices = v + return s +} - // LastUpdatedAt is a required field - LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` +// An object that represents a listener for a virtual node. +type Listener struct { + _ struct{} `type:"structure"` - // MeshOwner is a required field - MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + // An object that represents the health check policy for a virtual node's listener. + HealthCheck *HealthCheckPolicy `locationName:"healthCheck" type:"structure"` - // ResourceOwner is a required field - ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + // An object that represents a port mapping. + // + // PortMapping is a required field + PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` - // Uid is a required field - Uid *string `locationName:"uid" type:"string" required:"true"` + // An object that represents timeouts for different protocols. + Timeout *ListenerTimeout `locationName:"timeout" type:"structure"` - // Version is a required field - Version *int64 `locationName:"version" type:"long" required:"true"` + // An object that represents the Transport Layer Security (TLS) properties for + // a listener. + Tls *ListenerTls `locationName:"tls" type:"structure"` } // String returns the string representation -func (s ResourceMetadata) String() string { +func (s Listener) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourceMetadata) GoString() string { +func (s Listener) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *ResourceMetadata) SetArn(v string) *ResourceMetadata { - s.Arn = &v - return s -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *ResourceMetadata) SetCreatedAt(v time.Time) *ResourceMetadata { - s.CreatedAt = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *Listener) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Listener"} + if s.PortMapping == nil { + invalidParams.Add(request.NewErrParamRequired("PortMapping")) + } + if s.HealthCheck != nil { + if err := s.HealthCheck.Validate(); err != nil { + invalidParams.AddNested("HealthCheck", err.(request.ErrInvalidParams)) + } + } + if s.PortMapping != nil { + if err := s.PortMapping.Validate(); err != nil { + invalidParams.AddNested("PortMapping", err.(request.ErrInvalidParams)) + } + } + if s.Tls != nil { + if err := s.Tls.Validate(); err != nil { + invalidParams.AddNested("Tls", err.(request.ErrInvalidParams)) + } + } -// SetLastUpdatedAt sets the LastUpdatedAt field's value. -func (s *ResourceMetadata) SetLastUpdatedAt(v time.Time) *ResourceMetadata { - s.LastUpdatedAt = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetMeshOwner sets the MeshOwner field's value. -func (s *ResourceMetadata) SetMeshOwner(v string) *ResourceMetadata { - s.MeshOwner = &v +// SetHealthCheck sets the HealthCheck field's value. +func (s *Listener) SetHealthCheck(v *HealthCheckPolicy) *Listener { + s.HealthCheck = v return s } -// SetResourceOwner sets the ResourceOwner field's value. -func (s *ResourceMetadata) SetResourceOwner(v string) *ResourceMetadata { - s.ResourceOwner = &v +// SetPortMapping sets the PortMapping field's value. +func (s *Listener) SetPortMapping(v *PortMapping) *Listener { + s.PortMapping = v return s } -// SetUid sets the Uid field's value. -func (s *ResourceMetadata) SetUid(v string) *ResourceMetadata { - s.Uid = &v +// SetTimeout sets the Timeout field's value. +func (s *Listener) SetTimeout(v *ListenerTimeout) *Listener { + s.Timeout = v return s } -// SetVersion sets the Version field's value. -func (s *ResourceMetadata) SetVersion(v int64) *ResourceMetadata { - s.Version = &v +// SetTls sets the Tls field's value. +func (s *Listener) SetTls(v *ListenerTls) *Listener { + s.Tls = v return s } -// An object that represents a route returned by a describe operation. -type RouteData struct { +// An object that represents timeouts for different protocols. +type ListenerTimeout struct { _ struct{} `type:"structure"` - // MeshName is a required field - MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - - // An object that represents metadata for a resource. - // - // Metadata is a required field - Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - - // RouteName is a required field - RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` + // An object that represents types of timeouts. + Grpc *GrpcTimeout `locationName:"grpc" type:"structure"` - // An object that represents a route specification. Specify one route type. - // - // Spec is a required field - Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` + // An object that represents types of timeouts. + Http *HttpTimeout `locationName:"http" type:"structure"` - // An object that represents the current status of a route. - // - // Status is a required field - Status *RouteStatus `locationName:"status" type:"structure" required:"true"` + // An object that represents types of timeouts. + Http2 *HttpTimeout `locationName:"http2" type:"structure"` - // VirtualRouterName is a required field - VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` + // An object that represents types of timeouts. + Tcp *TcpTimeout `locationName:"tcp" type:"structure"` } // String returns the string representation -func (s RouteData) String() string { +func (s ListenerTimeout) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RouteData) GoString() string { +func (s ListenerTimeout) GoString() string { return s.String() } -// SetMeshName sets the MeshName field's value. -func (s *RouteData) SetMeshName(v string) *RouteData { - s.MeshName = &v +// SetGrpc sets the Grpc field's value. +func (s *ListenerTimeout) SetGrpc(v *GrpcTimeout) *ListenerTimeout { + s.Grpc = v return s } -// SetMetadata sets the Metadata field's value. -func (s *RouteData) SetMetadata(v *ResourceMetadata) *RouteData { - s.Metadata = v +// SetHttp sets the Http field's value. +func (s *ListenerTimeout) SetHttp(v *HttpTimeout) *ListenerTimeout { + s.Http = v return s } -// SetRouteName sets the RouteName field's value. -func (s *RouteData) SetRouteName(v string) *RouteData { - s.RouteName = &v +// SetHttp2 sets the Http2 field's value. +func (s *ListenerTimeout) SetHttp2(v *HttpTimeout) *ListenerTimeout { + s.Http2 = v + return s +} + +// SetTcp sets the Tcp field's value. +func (s *ListenerTimeout) SetTcp(v *TcpTimeout) *ListenerTimeout { + s.Tcp = v + return s +} + +// An object that represents the Transport Layer Security (TLS) properties for +// a listener. +type ListenerTls struct { + _ struct{} `type:"structure"` + + // An object that represents a listener's Transport Layer Security (TLS) certificate. + // + // Certificate is a required field + Certificate *ListenerTlsCertificate `locationName:"certificate" type:"structure" required:"true"` + + // Mode is a required field + Mode *string `locationName:"mode" type:"string" required:"true" enum:"ListenerTlsMode"` +} + +// String returns the string representation +func (s ListenerTls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerTls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListenerTls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListenerTls"} + if s.Certificate == nil { + invalidParams.Add(request.NewErrParamRequired("Certificate")) + } + if s.Mode == nil { + invalidParams.Add(request.NewErrParamRequired("Mode")) + } + if s.Certificate != nil { + if err := s.Certificate.Validate(); err != nil { + invalidParams.AddNested("Certificate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificate sets the Certificate field's value. +func (s *ListenerTls) SetCertificate(v *ListenerTlsCertificate) *ListenerTls { + s.Certificate = v + return s +} + +// SetMode sets the Mode field's value. +func (s *ListenerTls) SetMode(v string) *ListenerTls { + s.Mode = &v + return s +} + +// An object that represents an AWS Certicate Manager (ACM) certificate. +type ListenerTlsAcmCertificate struct { + _ struct{} `type:"structure"` + + // CertificateArn is a required field + CertificateArn *string `locationName:"certificateArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListenerTlsAcmCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerTlsAcmCertificate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListenerTlsAcmCertificate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListenerTlsAcmCertificate"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *ListenerTlsAcmCertificate) SetCertificateArn(v string) *ListenerTlsAcmCertificate { + s.CertificateArn = &v + return s +} + +// An object that represents a listener's Transport Layer Security (TLS) certificate. +type ListenerTlsCertificate struct { + _ struct{} `type:"structure"` + + // An object that represents an AWS Certicate Manager (ACM) certificate. + Acm *ListenerTlsAcmCertificate `locationName:"acm" type:"structure"` + + // An object that represents a local file certificate. The certificate must + // meet specific requirements and you must have proxy authorization enabled. + // For more information, see Transport Layer Security (TLS) (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites). + File *ListenerTlsFileCertificate `locationName:"file" type:"structure"` +} + +// String returns the string representation +func (s ListenerTlsCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerTlsCertificate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListenerTlsCertificate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListenerTlsCertificate"} + if s.Acm != nil { + if err := s.Acm.Validate(); err != nil { + invalidParams.AddNested("Acm", err.(request.ErrInvalidParams)) + } + } + if s.File != nil { + if err := s.File.Validate(); err != nil { + invalidParams.AddNested("File", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcm sets the Acm field's value. +func (s *ListenerTlsCertificate) SetAcm(v *ListenerTlsAcmCertificate) *ListenerTlsCertificate { + s.Acm = v + return s +} + +// SetFile sets the File field's value. +func (s *ListenerTlsCertificate) SetFile(v *ListenerTlsFileCertificate) *ListenerTlsCertificate { + s.File = v + return s +} + +// An object that represents a local file certificate. The certificate must +// meet specific requirements and you must have proxy authorization enabled. +// For more information, see Transport Layer Security (TLS) (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites). +type ListenerTlsFileCertificate struct { + _ struct{} `type:"structure"` + + // CertificateChain is a required field + CertificateChain *string `locationName:"certificateChain" min:"1" type:"string" required:"true"` + + // PrivateKey is a required field + PrivateKey *string `locationName:"privateKey" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListenerTlsFileCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListenerTlsFileCertificate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListenerTlsFileCertificate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListenerTlsFileCertificate"} + if s.CertificateChain == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateChain")) + } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) + } + if s.PrivateKey == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateKey")) + } + if s.PrivateKey != nil && len(*s.PrivateKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PrivateKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateChain sets the CertificateChain field's value. +func (s *ListenerTlsFileCertificate) SetCertificateChain(v string) *ListenerTlsFileCertificate { + s.CertificateChain = &v + return s +} + +// SetPrivateKey sets the PrivateKey field's value. +func (s *ListenerTlsFileCertificate) SetPrivateKey(v string) *ListenerTlsFileCertificate { + s.PrivateKey = &v + return s +} + +// An object that represents the logging information for a virtual node. +type Logging struct { + _ struct{} `type:"structure"` + + // An object that represents the access logging information for a virtual node. + AccessLog *AccessLog `locationName:"accessLog" type:"structure"` +} + +// String returns the string representation +func (s Logging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Logging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Logging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Logging"} + if s.AccessLog != nil { + if err := s.AccessLog.Validate(); err != nil { + invalidParams.AddNested("AccessLog", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessLog sets the AccessLog field's value. +func (s *Logging) SetAccessLog(v *AccessLog) *Logging { + s.AccessLog = v + return s +} + +// An object that represents the range of values to match on. The first character +// of the range is included in the range, though the last character is not. +// For example, if the range specified were 1-100, only values 1-99 would be +// matched. +type MatchRange struct { + _ struct{} `type:"structure"` + + // End is a required field + End *int64 `locationName:"end" type:"long" required:"true"` + + // Start is a required field + Start *int64 `locationName:"start" type:"long" required:"true"` +} + +// String returns the string representation +func (s MatchRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MatchRange) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MatchRange) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MatchRange"} + if s.End == nil { + invalidParams.Add(request.NewErrParamRequired("End")) + } + if s.Start == nil { + invalidParams.Add(request.NewErrParamRequired("Start")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnd sets the End field's value. +func (s *MatchRange) SetEnd(v int64) *MatchRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *MatchRange) SetStart(v int64) *MatchRange { + s.Start = &v + return s +} + +// An object that represents a service mesh returned by a describe operation. +type MeshData struct { + _ struct{} `type:"structure"` + + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + + // An object that represents metadata for a resource. + // + // Metadata is a required field + Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` + + // An object that represents the specification of a service mesh. + // + // Spec is a required field + Spec *MeshSpec `locationName:"spec" type:"structure" required:"true"` + + // An object that represents the status of a service mesh. + // + // Status is a required field + Status *MeshStatus `locationName:"status" type:"structure" required:"true"` +} + +// String returns the string representation +func (s MeshData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MeshData) GoString() string { + return s.String() +} + +// SetMeshName sets the MeshName field's value. +func (s *MeshData) SetMeshName(v string) *MeshData { + s.MeshName = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *MeshData) SetMetadata(v *ResourceMetadata) *MeshData { + s.Metadata = v + return s +} + +// SetSpec sets the Spec field's value. +func (s *MeshData) SetSpec(v *MeshSpec) *MeshData { + s.Spec = v + return s +} + +// SetStatus sets the Status field's value. +func (s *MeshData) SetStatus(v *MeshStatus) *MeshData { + s.Status = v + return s +} + +// An object that represents a service mesh returned by a list operation. +type MeshRef struct { + _ struct{} `type:"structure"` + + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + + // MeshOwner is a required field + MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + + // ResourceOwner is a required field + ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` +} + +// String returns the string representation +func (s MeshRef) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MeshRef) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *MeshRef) SetArn(v string) *MeshRef { + s.Arn = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *MeshRef) SetCreatedAt(v time.Time) *MeshRef { + s.CreatedAt = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *MeshRef) SetLastUpdatedAt(v time.Time) *MeshRef { + s.LastUpdatedAt = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *MeshRef) SetMeshName(v string) *MeshRef { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *MeshRef) SetMeshOwner(v string) *MeshRef { + s.MeshOwner = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *MeshRef) SetResourceOwner(v string) *MeshRef { + s.ResourceOwner = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *MeshRef) SetVersion(v int64) *MeshRef { + s.Version = &v + return s +} + +// An object that represents the specification of a service mesh. +type MeshSpec struct { + _ struct{} `type:"structure"` + + // An object that represents the egress filter rules for a service mesh. + EgressFilter *EgressFilter `locationName:"egressFilter" type:"structure"` +} + +// String returns the string representation +func (s MeshSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MeshSpec) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MeshSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MeshSpec"} + if s.EgressFilter != nil { + if err := s.EgressFilter.Validate(); err != nil { + invalidParams.AddNested("EgressFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEgressFilter sets the EgressFilter field's value. +func (s *MeshSpec) SetEgressFilter(v *EgressFilter) *MeshSpec { + s.EgressFilter = v + return s +} + +// An object that represents the status of a service mesh. +type MeshStatus struct { + _ struct{} `type:"structure"` + + Status *string `locationName:"status" type:"string" enum:"MeshStatusCode"` +} + +// String returns the string representation +func (s MeshStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MeshStatus) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *MeshStatus) SetStatus(v string) *MeshStatus { + s.Status = &v + return s +} + +// The specified resource doesn't exist. Check your request syntax and try again. +type NotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s NotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotFoundException) GoString() string { + return s.String() +} + +func newErrorNotFoundException(v protocol.ResponseMetadata) error { + return &NotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *NotFoundException) Code() string { + return "NotFoundException" +} + +// Message returns the exception's message. +func (s *NotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *NotFoundException) OrigErr() error { + return nil +} + +func (s *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object that represents a port mapping. +type PortMapping struct { + _ struct{} `type:"structure"` + + // Port is a required field + Port *int64 `locationName:"port" min:"1" type:"integer" required:"true"` + + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"PortProtocol"` +} + +// String returns the string representation +func (s PortMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PortMapping) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PortMapping) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PortMapping"} + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPort sets the Port field's value. +func (s *PortMapping) SetPort(v int64) *PortMapping { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *PortMapping) SetProtocol(v string) *PortMapping { + s.Protocol = &v + return s +} + +// You can't delete the specified resource because it's in use or required by +// another resource. +type ResourceInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ResourceInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceInUseException) GoString() string { + return s.String() +} + +func newErrorResourceInUseException(v protocol.ResponseMetadata) error { + return &ResourceInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceInUseException) Code() string { + return "ResourceInUseException" +} + +// Message returns the exception's message. +func (s *ResourceInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceInUseException) OrigErr() error { + return nil +} + +func (s *ResourceInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An object that represents metadata for a resource. +type ResourceMetadata struct { + _ struct{} `type:"structure"` + + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + + // MeshOwner is a required field + MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + + // ResourceOwner is a required field + ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + + // Uid is a required field + Uid *string `locationName:"uid" type:"string" required:"true"` + + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` +} + +// String returns the string representation +func (s ResourceMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceMetadata) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ResourceMetadata) SetArn(v string) *ResourceMetadata { + s.Arn = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *ResourceMetadata) SetCreatedAt(v time.Time) *ResourceMetadata { + s.CreatedAt = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *ResourceMetadata) SetLastUpdatedAt(v time.Time) *ResourceMetadata { + s.LastUpdatedAt = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *ResourceMetadata) SetMeshOwner(v string) *ResourceMetadata { + s.MeshOwner = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *ResourceMetadata) SetResourceOwner(v string) *ResourceMetadata { + s.ResourceOwner = &v + return s +} + +// SetUid sets the Uid field's value. +func (s *ResourceMetadata) SetUid(v string) *ResourceMetadata { + s.Uid = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *ResourceMetadata) SetVersion(v int64) *ResourceMetadata { + s.Version = &v + return s +} + +// An object that represents a route returned by a describe operation. +type RouteData struct { + _ struct{} `type:"structure"` + + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + + // An object that represents metadata for a resource. + // + // Metadata is a required field + Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` + + // RouteName is a required field + RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` + + // An object that represents a route specification. Specify one route type. + // + // Spec is a required field + Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` + + // An object that represents the current status of a route. + // + // Status is a required field + Status *RouteStatus `locationName:"status" type:"structure" required:"true"` + + // VirtualRouterName is a required field + VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RouteData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteData) GoString() string { + return s.String() +} + +// SetMeshName sets the MeshName field's value. +func (s *RouteData) SetMeshName(v string) *RouteData { + s.MeshName = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *RouteData) SetMetadata(v *ResourceMetadata) *RouteData { + s.Metadata = v + return s +} + +// SetRouteName sets the RouteName field's value. +func (s *RouteData) SetRouteName(v string) *RouteData { + s.RouteName = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *RouteData) SetSpec(v *RouteSpec) *RouteData { + s.Spec = v + return s +} + +// SetStatus sets the Status field's value. +func (s *RouteData) SetStatus(v *RouteStatus) *RouteData { + s.Status = v + return s +} + +// SetVirtualRouterName sets the VirtualRouterName field's value. +func (s *RouteData) SetVirtualRouterName(v string) *RouteData { + s.VirtualRouterName = &v + return s +} + +// An object that represents a route returned by a list operation. +type RouteRef struct { + _ struct{} `type:"structure"` + + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + + // MeshOwner is a required field + MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + + // ResourceOwner is a required field + ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + + // RouteName is a required field + RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` + + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` + + // VirtualRouterName is a required field + VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RouteRef) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteRef) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *RouteRef) SetArn(v string) *RouteRef { + s.Arn = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *RouteRef) SetCreatedAt(v time.Time) *RouteRef { + s.CreatedAt = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *RouteRef) SetLastUpdatedAt(v time.Time) *RouteRef { + s.LastUpdatedAt = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *RouteRef) SetMeshName(v string) *RouteRef { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *RouteRef) SetMeshOwner(v string) *RouteRef { + s.MeshOwner = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *RouteRef) SetResourceOwner(v string) *RouteRef { + s.ResourceOwner = &v + return s +} + +// SetRouteName sets the RouteName field's value. +func (s *RouteRef) SetRouteName(v string) *RouteRef { + s.RouteName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *RouteRef) SetVersion(v int64) *RouteRef { + s.Version = &v + return s +} + +// SetVirtualRouterName sets the VirtualRouterName field's value. +func (s *RouteRef) SetVirtualRouterName(v string) *RouteRef { + s.VirtualRouterName = &v + return s +} + +// An object that represents a route specification. Specify one route type. +type RouteSpec struct { + _ struct{} `type:"structure"` + + // An object that represents a gRPC route type. + GrpcRoute *GrpcRoute `locationName:"grpcRoute" type:"structure"` + + // An object that represents an HTTP or HTTP/2 route type. + Http2Route *HttpRoute `locationName:"http2Route" type:"structure"` + + // An object that represents an HTTP or HTTP/2 route type. + HttpRoute *HttpRoute `locationName:"httpRoute" type:"structure"` + + Priority *int64 `locationName:"priority" type:"integer"` + + // An object that represents a TCP route type. + TcpRoute *TcpRoute `locationName:"tcpRoute" type:"structure"` +} + +// String returns the string representation +func (s RouteSpec) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteSpec) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RouteSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RouteSpec"} + if s.GrpcRoute != nil { + if err := s.GrpcRoute.Validate(); err != nil { + invalidParams.AddNested("GrpcRoute", err.(request.ErrInvalidParams)) + } + } + if s.Http2Route != nil { + if err := s.Http2Route.Validate(); err != nil { + invalidParams.AddNested("Http2Route", err.(request.ErrInvalidParams)) + } + } + if s.HttpRoute != nil { + if err := s.HttpRoute.Validate(); err != nil { + invalidParams.AddNested("HttpRoute", err.(request.ErrInvalidParams)) + } + } + if s.TcpRoute != nil { + if err := s.TcpRoute.Validate(); err != nil { + invalidParams.AddNested("TcpRoute", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrpcRoute sets the GrpcRoute field's value. +func (s *RouteSpec) SetGrpcRoute(v *GrpcRoute) *RouteSpec { + s.GrpcRoute = v + return s +} + +// SetHttp2Route sets the Http2Route field's value. +func (s *RouteSpec) SetHttp2Route(v *HttpRoute) *RouteSpec { + s.Http2Route = v + return s +} + +// SetHttpRoute sets the HttpRoute field's value. +func (s *RouteSpec) SetHttpRoute(v *HttpRoute) *RouteSpec { + s.HttpRoute = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *RouteSpec) SetPriority(v int64) *RouteSpec { + s.Priority = &v + return s +} + +// SetTcpRoute sets the TcpRoute field's value. +func (s *RouteSpec) SetTcpRoute(v *TcpRoute) *RouteSpec { + s.TcpRoute = v + return s +} + +// An object that represents the current status of a route. +type RouteStatus struct { + _ struct{} `type:"structure"` + + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"RouteStatusCode"` +} + +// String returns the string representation +func (s RouteStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RouteStatus) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *RouteStatus) SetStatus(v string) *RouteStatus { + s.Status = &v + return s +} + +// An object that represents the service discovery information for a virtual +// node. +type ServiceDiscovery struct { + _ struct{} `type:"structure"` + + // An object that represents the AWS Cloud Map service discovery information + // for your virtual node. + AwsCloudMap *AwsCloudMapServiceDiscovery `locationName:"awsCloudMap" type:"structure"` + + // An object that represents the DNS service discovery information for your + // virtual node. + Dns *DnsServiceDiscovery `locationName:"dns" type:"structure"` +} + +// String returns the string representation +func (s ServiceDiscovery) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceDiscovery) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServiceDiscovery) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServiceDiscovery"} + if s.AwsCloudMap != nil { + if err := s.AwsCloudMap.Validate(); err != nil { + invalidParams.AddNested("AwsCloudMap", err.(request.ErrInvalidParams)) + } + } + if s.Dns != nil { + if err := s.Dns.Validate(); err != nil { + invalidParams.AddNested("Dns", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsCloudMap sets the AwsCloudMap field's value. +func (s *ServiceDiscovery) SetAwsCloudMap(v *AwsCloudMapServiceDiscovery) *ServiceDiscovery { + s.AwsCloudMap = v + return s +} + +// SetDns sets the Dns field's value. +func (s *ServiceDiscovery) SetDns(v *DnsServiceDiscovery) *ServiceDiscovery { + s.Dns = v + return s +} + +// The request has failed due to a temporary failure of the service. +type ServiceUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceUnavailableException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceUnavailableException) GoString() string { + return s.String() +} + +func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { + return &ServiceUnavailableException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceUnavailableException) Code() string { + return "ServiceUnavailableException" +} + +// Message returns the exception's message. +func (s *ServiceUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceUnavailableException) OrigErr() error { + return nil +} + +func (s *ServiceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Optional metadata that you apply to a resource to assist with categorization +// and organization. Each tag consists of a key and an optional value, both +// of which you define. Tag keys can have a maximum character length of 128 +// characters, and tag values can have a maximum length of 256 characters. +type TagRef struct { + _ struct{} `type:"structure"` + + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s TagRef) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagRef) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagRef) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagRef"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *TagRef) SetKey(v string) *TagRef { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *TagRef) SetValue(v string) *TagRef { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` + + // Tags is a required field + Tags []*TagRef `locationName:"tags" type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*TagRef) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// An object that represents a TCP route type. +type TcpRoute struct { + _ struct{} `type:"structure"` + + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *TcpRouteAction `locationName:"action" type:"structure" required:"true"` + + // An object that represents types of timeouts. + Timeout *TcpTimeout `locationName:"timeout" type:"structure"` +} + +// String returns the string representation +func (s TcpRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TcpRoute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TcpRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TcpRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *TcpRoute) SetAction(v *TcpRouteAction) *TcpRoute { + s.Action = v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *TcpRoute) SetTimeout(v *TcpTimeout) *TcpRoute { + s.Timeout = v + return s +} + +// An object that represents the action to take if a match is determined. +type TcpRouteAction struct { + _ struct{} `type:"structure"` + + // WeightedTargets is a required field + WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TcpRouteAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TcpRouteAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TcpRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TcpRouteAction"} + if s.WeightedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) + } + if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + } + if s.WeightedTargets != nil { + for i, v := range s.WeightedTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWeightedTargets sets the WeightedTargets field's value. +func (s *TcpRouteAction) SetWeightedTargets(v []*WeightedTarget) *TcpRouteAction { + s.WeightedTargets = v + return s +} + +// An object that represents types of timeouts. +type TcpTimeout struct { + _ struct{} `type:"structure"` + + // An object that represents a duration of time. + Idle *Duration `locationName:"idle" type:"structure"` +} + +// String returns the string representation +func (s TcpTimeout) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TcpTimeout) GoString() string { + return s.String() +} + +// SetIdle sets the Idle field's value. +func (s *TcpTimeout) SetIdle(v *Duration) *TcpTimeout { + s.Idle = v + return s +} + +// An object that represents a Transport Layer Security (TLS) validation context. +type TlsValidationContext struct { + _ struct{} `type:"structure"` + + // An object that represents a Transport Layer Security (TLS) validation context + // trust. + // + // Trust is a required field + Trust *TlsValidationContextTrust `locationName:"trust" type:"structure" required:"true"` +} + +// String returns the string representation +func (s TlsValidationContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TlsValidationContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TlsValidationContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TlsValidationContext"} + if s.Trust == nil { + invalidParams.Add(request.NewErrParamRequired("Trust")) + } + if s.Trust != nil { + if err := s.Trust.Validate(); err != nil { + invalidParams.AddNested("Trust", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTrust sets the Trust field's value. +func (s *TlsValidationContext) SetTrust(v *TlsValidationContextTrust) *TlsValidationContext { + s.Trust = v + return s +} + +// An object that represents a TLS validation context trust for an AWS Certicate +// Manager (ACM) certificate. +type TlsValidationContextAcmTrust struct { + _ struct{} `type:"structure"` + + // CertificateAuthorityArns is a required field + CertificateAuthorityArns []*string `locationName:"certificateAuthorityArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TlsValidationContextAcmTrust) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TlsValidationContextAcmTrust) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TlsValidationContextAcmTrust) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TlsValidationContextAcmTrust"} + if s.CertificateAuthorityArns == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateAuthorityArns")) + } + if s.CertificateAuthorityArns != nil && len(s.CertificateAuthorityArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateAuthorityArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateAuthorityArns sets the CertificateAuthorityArns field's value. +func (s *TlsValidationContextAcmTrust) SetCertificateAuthorityArns(v []*string) *TlsValidationContextAcmTrust { + s.CertificateAuthorityArns = v + return s +} + +// An object that represents a Transport Layer Security (TLS) validation context +// trust for a local file. +type TlsValidationContextFileTrust struct { + _ struct{} `type:"structure"` + + // CertificateChain is a required field + CertificateChain *string `locationName:"certificateChain" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TlsValidationContextFileTrust) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TlsValidationContextFileTrust) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TlsValidationContextFileTrust) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TlsValidationContextFileTrust"} + if s.CertificateChain == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateChain")) + } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateChain sets the CertificateChain field's value. +func (s *TlsValidationContextFileTrust) SetCertificateChain(v string) *TlsValidationContextFileTrust { + s.CertificateChain = &v + return s +} + +// An object that represents a Transport Layer Security (TLS) validation context +// trust. +type TlsValidationContextTrust struct { + _ struct{} `type:"structure"` + + // An object that represents a TLS validation context trust for an AWS Certicate + // Manager (ACM) certificate. + Acm *TlsValidationContextAcmTrust `locationName:"acm" type:"structure"` + + // An object that represents a Transport Layer Security (TLS) validation context + // trust for a local file. + File *TlsValidationContextFileTrust `locationName:"file" type:"structure"` +} + +// String returns the string representation +func (s TlsValidationContextTrust) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TlsValidationContextTrust) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TlsValidationContextTrust) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TlsValidationContextTrust"} + if s.Acm != nil { + if err := s.Acm.Validate(); err != nil { + invalidParams.AddNested("Acm", err.(request.ErrInvalidParams)) + } + } + if s.File != nil { + if err := s.File.Validate(); err != nil { + invalidParams.AddNested("File", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcm sets the Acm field's value. +func (s *TlsValidationContextTrust) SetAcm(v *TlsValidationContextAcmTrust) *TlsValidationContextTrust { + s.Acm = v + return s +} + +// SetFile sets the File field's value. +func (s *TlsValidationContextTrust) SetFile(v *TlsValidationContextFileTrust) *TlsValidationContextTrust { + s.File = v + return s +} + +// The maximum request rate permitted by the App Mesh APIs has been exceeded +// for your account. For best results, use an increasing or variable sleep interval +// between requests. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request exceeds the maximum allowed number of tags allowed per resource. +// The current limit is 50 user tags per resource. You must reduce the number +// of tags in the request. None of the tags in this request were applied. +type TooManyTagsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s TooManyTagsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyTagsException) GoString() string { + return s.String() +} + +func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { + return &TooManyTagsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsException) Code() string { + return "TooManyTagsException" +} + +// Message returns the exception's message. +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // ResourceArn is a required field + ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` + + // TagKeys is a required field + TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +type UpdateGatewayRouteInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // GatewayRouteName is a required field + GatewayRouteName *string `location:"uri" locationName:"gatewayRouteName" min:"1" type:"string" required:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // An object that represents a gateway route specification. Specify one gateway + // route type. + // + // Spec is a required field + Spec *GatewayRouteSpec `locationName:"spec" type:"structure" required:"true"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateGatewayRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewayRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGatewayRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGatewayRouteInput"} + if s.GatewayRouteName == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayRouteName")) + } + if s.GatewayRouteName != nil && len(*s.GatewayRouteName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GatewayRouteName", 1)) + } + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *UpdateGatewayRouteInput) SetClientToken(v string) *UpdateGatewayRouteInput { + s.ClientToken = &v + return s +} + +// SetGatewayRouteName sets the GatewayRouteName field's value. +func (s *UpdateGatewayRouteInput) SetGatewayRouteName(v string) *UpdateGatewayRouteInput { + s.GatewayRouteName = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *UpdateGatewayRouteInput) SetMeshName(v string) *UpdateGatewayRouteInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *UpdateGatewayRouteInput) SetMeshOwner(v string) *UpdateGatewayRouteInput { + s.MeshOwner = &v return s } // SetSpec sets the Spec field's value. -func (s *RouteData) SetSpec(v *RouteSpec) *RouteData { +func (s *UpdateGatewayRouteInput) SetSpec(v *GatewayRouteSpec) *UpdateGatewayRouteInput { + s.Spec = v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *UpdateGatewayRouteInput) SetVirtualGatewayName(v string) *UpdateGatewayRouteInput { + s.VirtualGatewayName = &v + return s +} + +type UpdateGatewayRouteOutput struct { + _ struct{} `type:"structure" payload:"GatewayRoute"` + + // An object that represents a gateway route returned by a describe operation. + // + // GatewayRoute is a required field + GatewayRoute *GatewayRouteData `locationName:"gatewayRoute" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateGatewayRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGatewayRouteOutput) GoString() string { + return s.String() +} + +// SetGatewayRoute sets the GatewayRoute field's value. +func (s *UpdateGatewayRouteOutput) SetGatewayRoute(v *GatewayRouteData) *UpdateGatewayRouteOutput { + s.GatewayRoute = v + return s +} + +type UpdateMeshInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + // An object that represents the specification of a service mesh. + Spec *MeshSpec `locationName:"spec" type:"structure"` +} + +// String returns the string representation +func (s UpdateMeshInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMeshInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMeshInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMeshInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *UpdateMeshInput) SetClientToken(v string) *UpdateMeshInput { + s.ClientToken = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *UpdateMeshInput) SetMeshName(v string) *UpdateMeshInput { + s.MeshName = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *UpdateMeshInput) SetSpec(v *MeshSpec) *UpdateMeshInput { s.Spec = v return s } -// SetStatus sets the Status field's value. -func (s *RouteData) SetStatus(v *RouteStatus) *RouteData { - s.Status = v +type UpdateMeshOutput struct { + _ struct{} `type:"structure" payload:"Mesh"` + + // An object that represents a service mesh returned by a describe operation. + // + // Mesh is a required field + Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateMeshOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMeshOutput) GoString() string { + return s.String() +} + +// SetMesh sets the Mesh field's value. +func (s *UpdateMeshOutput) SetMesh(v *MeshData) *UpdateMeshOutput { + s.Mesh = v + return s +} + +type UpdateRouteInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // RouteName is a required field + RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` + + // An object that represents a route specification. Specify one route type. + // + // Spec is a required field + Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` + + // VirtualRouterName is a required field + VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRouteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRouteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRouteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRouteInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.RouteName == nil { + invalidParams.Add(request.NewErrParamRequired("RouteName")) + } + if s.RouteName != nil && len(*s.RouteName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RouteName", 1)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualRouterName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) + } + if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *UpdateRouteInput) SetClientToken(v string) *UpdateRouteInput { + s.ClientToken = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *UpdateRouteInput) SetMeshName(v string) *UpdateRouteInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *UpdateRouteInput) SetMeshOwner(v string) *UpdateRouteInput { + s.MeshOwner = &v + return s +} + +// SetRouteName sets the RouteName field's value. +func (s *UpdateRouteInput) SetRouteName(v string) *UpdateRouteInput { + s.RouteName = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *UpdateRouteInput) SetSpec(v *RouteSpec) *UpdateRouteInput { + s.Spec = v return s } // SetVirtualRouterName sets the VirtualRouterName field's value. -func (s *RouteData) SetVirtualRouterName(v string) *RouteData { +func (s *UpdateRouteInput) SetVirtualRouterName(v string) *UpdateRouteInput { s.VirtualRouterName = &v return s } -// An object that represents a route returned by a list operation. -type RouteRef struct { +type UpdateRouteOutput struct { + _ struct{} `type:"structure" payload:"Route"` + + // An object that represents a route returned by a describe operation. + // + // Route is a required field + Route *RouteData `locationName:"route" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateRouteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRouteOutput) GoString() string { + return s.String() +} + +// SetRoute sets the Route field's value. +func (s *UpdateRouteOutput) SetRoute(v *RouteData) *UpdateRouteOutput { + s.Route = v + return s +} + +type UpdateVirtualGatewayInput struct { _ struct{} `type:"structure"` - // Arn is a required field - Arn *string `locationName:"arn" type:"string" required:"true"` + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` // MeshName is a required field - MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // MeshOwner is a required field - MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - // ResourceOwner is a required field - ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + // An object that represents the specification of a service mesh resource. + // + // Spec is a required field + Spec *VirtualGatewaySpec `locationName:"spec" type:"structure" required:"true"` - // RouteName is a required field - RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` + // VirtualGatewayName is a required field + VirtualGatewayName *string `location:"uri" locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` +} - // VirtualRouterName is a required field - VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` +// String returns the string representation +func (s UpdateVirtualGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVirtualGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateVirtualGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualGatewayInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualGatewayName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualGatewayName")) + } + if s.VirtualGatewayName != nil && len(*s.VirtualGatewayName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualGatewayName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *UpdateVirtualGatewayInput) SetClientToken(v string) *UpdateVirtualGatewayInput { + s.ClientToken = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *UpdateVirtualGatewayInput) SetMeshName(v string) *UpdateVirtualGatewayInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *UpdateVirtualGatewayInput) SetMeshOwner(v string) *UpdateVirtualGatewayInput { + s.MeshOwner = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *UpdateVirtualGatewayInput) SetSpec(v *VirtualGatewaySpec) *UpdateVirtualGatewayInput { + s.Spec = v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *UpdateVirtualGatewayInput) SetVirtualGatewayName(v string) *UpdateVirtualGatewayInput { + s.VirtualGatewayName = &v + return s +} + +type UpdateVirtualGatewayOutput struct { + _ struct{} `type:"structure" payload:"VirtualGateway"` + + // An object that represents a virtual gateway returned by a describe operation. + // + // VirtualGateway is a required field + VirtualGateway *VirtualGatewayData `locationName:"virtualGateway" type:"structure" required:"true"` } // String returns the string representation -func (s RouteRef) String() string { +func (s UpdateVirtualGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVirtualGatewayOutput) GoString() string { + return s.String() +} + +// SetVirtualGateway sets the VirtualGateway field's value. +func (s *UpdateVirtualGatewayOutput) SetVirtualGateway(v *VirtualGatewayData) *UpdateVirtualGatewayOutput { + s.VirtualGateway = v + return s +} + +type UpdateVirtualNodeInput struct { + _ struct{} `type:"structure"` + + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // An object that represents the specification of a virtual node. + // + // Spec is a required field + Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` + + // VirtualNodeName is a required field + VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateVirtualNodeInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s RouteRef) GoString() string { - return s.String() +// GoString returns the string representation +func (s UpdateVirtualNodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateVirtualNodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualNodeInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) + } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualNodeName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualNodeName")) + } + if s.VirtualNodeName != nil && len(*s.VirtualNodeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualNodeName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetArn sets the Arn field's value. -func (s *RouteRef) SetArn(v string) *RouteRef { - s.Arn = &v +// SetClientToken sets the ClientToken field's value. +func (s *UpdateVirtualNodeInput) SetClientToken(v string) *UpdateVirtualNodeInput { + s.ClientToken = &v return s } // SetMeshName sets the MeshName field's value. -func (s *RouteRef) SetMeshName(v string) *RouteRef { +func (s *UpdateVirtualNodeInput) SetMeshName(v string) *UpdateVirtualNodeInput { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *RouteRef) SetMeshOwner(v string) *RouteRef { +func (s *UpdateVirtualNodeInput) SetMeshOwner(v string) *UpdateVirtualNodeInput { s.MeshOwner = &v return s } -// SetResourceOwner sets the ResourceOwner field's value. -func (s *RouteRef) SetResourceOwner(v string) *RouteRef { - s.ResourceOwner = &v +// SetSpec sets the Spec field's value. +func (s *UpdateVirtualNodeInput) SetSpec(v *VirtualNodeSpec) *UpdateVirtualNodeInput { + s.Spec = v return s } -// SetRouteName sets the RouteName field's value. -func (s *RouteRef) SetRouteName(v string) *RouteRef { - s.RouteName = &v +// SetVirtualNodeName sets the VirtualNodeName field's value. +func (s *UpdateVirtualNodeInput) SetVirtualNodeName(v string) *UpdateVirtualNodeInput { + s.VirtualNodeName = &v return s } -// SetVirtualRouterName sets the VirtualRouterName field's value. -func (s *RouteRef) SetVirtualRouterName(v string) *RouteRef { - s.VirtualRouterName = &v +type UpdateVirtualNodeOutput struct { + _ struct{} `type:"structure" payload:"VirtualNode"` + + // An object that represents a virtual node returned by a describe operation. + // + // VirtualNode is a required field + VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateVirtualNodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateVirtualNodeOutput) GoString() string { + return s.String() +} + +// SetVirtualNode sets the VirtualNode field's value. +func (s *UpdateVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *UpdateVirtualNodeOutput { + s.VirtualNode = v return s } -// An object that represents a route specification. Specify one route type. -type RouteSpec struct { +type UpdateVirtualRouterInput struct { _ struct{} `type:"structure"` - // An object that represents a gRPC route type. - GrpcRoute *GrpcRoute `locationName:"grpcRoute" type:"structure"` + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // An object that represents an HTTP or HTTP/2 route type. - Http2Route *HttpRoute `locationName:"http2Route" type:"structure"` + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object that represents an HTTP or HTTP/2 route type. - HttpRoute *HttpRoute `locationName:"httpRoute" type:"structure"` + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - Priority *int64 `locationName:"priority" type:"integer"` + // An object that represents the specification of a virtual router. + // + // Spec is a required field + Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - // An object that represents a TCP route type. - TcpRoute *TcpRoute `locationName:"tcpRoute" type:"structure"` + // VirtualRouterName is a required field + VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s RouteSpec) String() string { +func (s UpdateVirtualRouterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RouteSpec) GoString() string { +func (s UpdateVirtualRouterInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RouteSpec) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RouteSpec"} - if s.GrpcRoute != nil { - if err := s.GrpcRoute.Validate(); err != nil { - invalidParams.AddNested("GrpcRoute", err.(request.ErrInvalidParams)) - } +func (s *UpdateVirtualRouterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualRouterInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) } - if s.Http2Route != nil { - if err := s.Http2Route.Validate(); err != nil { - invalidParams.AddNested("Http2Route", err.(request.ErrInvalidParams)) - } + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) } - if s.HttpRoute != nil { - if err := s.HttpRoute.Validate(); err != nil { - invalidParams.AddNested("HttpRoute", err.(request.ErrInvalidParams)) - } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) } - if s.TcpRoute != nil { - if err := s.TcpRoute.Validate(); err != nil { - invalidParams.AddNested("TcpRoute", err.(request.ErrInvalidParams)) + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualRouterName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) + } + if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) } } @@ -8118,95 +12401,114 @@ func (s *RouteSpec) Validate() error { return nil } -// SetGrpcRoute sets the GrpcRoute field's value. -func (s *RouteSpec) SetGrpcRoute(v *GrpcRoute) *RouteSpec { - s.GrpcRoute = v +// SetClientToken sets the ClientToken field's value. +func (s *UpdateVirtualRouterInput) SetClientToken(v string) *UpdateVirtualRouterInput { + s.ClientToken = &v return s } -// SetHttp2Route sets the Http2Route field's value. -func (s *RouteSpec) SetHttp2Route(v *HttpRoute) *RouteSpec { - s.Http2Route = v +// SetMeshName sets the MeshName field's value. +func (s *UpdateVirtualRouterInput) SetMeshName(v string) *UpdateVirtualRouterInput { + s.MeshName = &v return s } -// SetHttpRoute sets the HttpRoute field's value. -func (s *RouteSpec) SetHttpRoute(v *HttpRoute) *RouteSpec { - s.HttpRoute = v +// SetMeshOwner sets the MeshOwner field's value. +func (s *UpdateVirtualRouterInput) SetMeshOwner(v string) *UpdateVirtualRouterInput { + s.MeshOwner = &v return s } -// SetPriority sets the Priority field's value. -func (s *RouteSpec) SetPriority(v int64) *RouteSpec { - s.Priority = &v +// SetSpec sets the Spec field's value. +func (s *UpdateVirtualRouterInput) SetSpec(v *VirtualRouterSpec) *UpdateVirtualRouterInput { + s.Spec = v return s } -// SetTcpRoute sets the TcpRoute field's value. -func (s *RouteSpec) SetTcpRoute(v *TcpRoute) *RouteSpec { - s.TcpRoute = v +// SetVirtualRouterName sets the VirtualRouterName field's value. +func (s *UpdateVirtualRouterInput) SetVirtualRouterName(v string) *UpdateVirtualRouterInput { + s.VirtualRouterName = &v return s } -// An object that represents the current status of a route. -type RouteStatus struct { - _ struct{} `type:"structure"` +type UpdateVirtualRouterOutput struct { + _ struct{} `type:"structure" payload:"VirtualRouter"` - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"RouteStatusCode"` + // An object that represents a virtual router returned by a describe operation. + // + // VirtualRouter is a required field + VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` } // String returns the string representation -func (s RouteStatus) String() string { +func (s UpdateVirtualRouterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RouteStatus) GoString() string { +func (s UpdateVirtualRouterOutput) GoString() string { return s.String() } -// SetStatus sets the Status field's value. -func (s *RouteStatus) SetStatus(v string) *RouteStatus { - s.Status = &v +// SetVirtualRouter sets the VirtualRouter field's value. +func (s *UpdateVirtualRouterOutput) SetVirtualRouter(v *VirtualRouterData) *UpdateVirtualRouterOutput { + s.VirtualRouter = v return s } -// An object that represents the service discovery information for a virtual -// node. -type ServiceDiscovery struct { +type UpdateVirtualServiceInput struct { _ struct{} `type:"structure"` - // An object that represents the AWS Cloud Map service discovery information - // for your virtual node. - AwsCloudMap *AwsCloudMapServiceDiscovery `locationName:"awsCloudMap" type:"structure"` + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // An object that represents the DNS service discovery information for your - // virtual node. - Dns *DnsServiceDiscovery `locationName:"dns" type:"structure"` + // MeshName is a required field + MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + + MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + + // An object that represents the specification of a virtual service. + // + // Spec is a required field + Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` + + // VirtualServiceName is a required field + VirtualServiceName *string `location:"uri" locationName:"virtualServiceName" type:"string" required:"true"` } // String returns the string representation -func (s ServiceDiscovery) String() string { +func (s UpdateVirtualServiceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ServiceDiscovery) GoString() string { +func (s UpdateVirtualServiceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ServiceDiscovery) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServiceDiscovery"} - if s.AwsCloudMap != nil { - if err := s.AwsCloudMap.Validate(); err != nil { - invalidParams.AddNested("AwsCloudMap", err.(request.ErrInvalidParams)) - } +func (s *UpdateVirtualServiceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualServiceInput"} + if s.MeshName == nil { + invalidParams.Add(request.NewErrParamRequired("MeshName")) } - if s.Dns != nil { - if err := s.Dns.Validate(); err != nil { - invalidParams.AddNested("Dns", err.(request.ErrInvalidParams)) + if s.MeshName != nil && len(*s.MeshName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) + } + if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { + invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) + } + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.VirtualServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualServiceName")) + } + if s.VirtualServiceName != nil && len(*s.VirtualServiceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualServiceName", 1)) + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) } } @@ -8216,105 +12518,125 @@ func (s *ServiceDiscovery) Validate() error { return nil } -// SetAwsCloudMap sets the AwsCloudMap field's value. -func (s *ServiceDiscovery) SetAwsCloudMap(v *AwsCloudMapServiceDiscovery) *ServiceDiscovery { - s.AwsCloudMap = v +// SetClientToken sets the ClientToken field's value. +func (s *UpdateVirtualServiceInput) SetClientToken(v string) *UpdateVirtualServiceInput { + s.ClientToken = &v + return s +} + +// SetMeshName sets the MeshName field's value. +func (s *UpdateVirtualServiceInput) SetMeshName(v string) *UpdateVirtualServiceInput { + s.MeshName = &v + return s +} + +// SetMeshOwner sets the MeshOwner field's value. +func (s *UpdateVirtualServiceInput) SetMeshOwner(v string) *UpdateVirtualServiceInput { + s.MeshOwner = &v + return s +} + +// SetSpec sets the Spec field's value. +func (s *UpdateVirtualServiceInput) SetSpec(v *VirtualServiceSpec) *UpdateVirtualServiceInput { + s.Spec = v return s } -// SetDns sets the Dns field's value. -func (s *ServiceDiscovery) SetDns(v *DnsServiceDiscovery) *ServiceDiscovery { - s.Dns = v +// SetVirtualServiceName sets the VirtualServiceName field's value. +func (s *UpdateVirtualServiceInput) SetVirtualServiceName(v string) *UpdateVirtualServiceInput { + s.VirtualServiceName = &v return s } -// The request has failed due to a temporary failure of the service. -type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type UpdateVirtualServiceOutput struct { + _ struct{} `type:"structure" payload:"VirtualService"` - Message_ *string `locationName:"message" type:"string"` + // An object that represents a virtual service returned by a describe operation. + // + // VirtualService is a required field + VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` } // String returns the string representation -func (s ServiceUnavailableException) String() string { +func (s UpdateVirtualServiceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ServiceUnavailableException) GoString() string { +func (s UpdateVirtualServiceOutput) GoString() string { return s.String() } -func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { - return &ServiceUnavailableException{ - respMetadata: v, - } +// SetVirtualService sets the VirtualService field's value. +func (s *UpdateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *UpdateVirtualServiceOutput { + s.VirtualService = v + return s } -// Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { - return "ServiceUnavailableException" -} +// The access log configuration for a virtual gateway. +type VirtualGatewayAccessLog struct { + _ struct{} `type:"structure"` -// Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" + // An object that represents an access log file. + File *VirtualGatewayFileAccessLog `locationName:"file" type:"structure"` } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { - return nil +// String returns the string representation +func (s VirtualGatewayAccessLog) String() string { + return awsutil.Prettify(s) } -func (s ServiceUnavailableException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// GoString returns the string representation +func (s VirtualGatewayAccessLog) GoString() string { + return s.String() } -// Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayAccessLog) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayAccessLog"} + if s.File != nil { + if err := s.File.Validate(); err != nil { + invalidParams.AddNested("File", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +// SetFile sets the File field's value. +func (s *VirtualGatewayAccessLog) SetFile(v *VirtualGatewayFileAccessLog) *VirtualGatewayAccessLog { + s.File = v + return s } -// Optional metadata that you apply to a resource to assist with categorization -// and organization. Each tag consists of a key and an optional value, both -// of which you define. Tag keys can have a maximum character length of 128 -// characters, and tag values can have a maximum length of 256 characters. -type TagRef struct { +// An object that represents the default properties for a backend. +type VirtualGatewayBackendDefaults struct { _ struct{} `type:"structure"` - // Key is a required field - Key *string `locationName:"key" min:"1" type:"string" required:"true"` - - Value *string `locationName:"value" type:"string"` + // An object that represents a client policy. + ClientPolicy *VirtualGatewayClientPolicy `locationName:"clientPolicy" type:"structure"` } // String returns the string representation -func (s TagRef) String() string { +func (s VirtualGatewayBackendDefaults) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagRef) GoString() string { +func (s VirtualGatewayBackendDefaults) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TagRef) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagRef"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) +func (s *VirtualGatewayBackendDefaults) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayBackendDefaults"} + if s.ClientPolicy != nil { + if err := s.ClientPolicy.Validate(); err != nil { + invalidParams.AddNested("ClientPolicy", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8323,55 +12645,36 @@ func (s *TagRef) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *TagRef) SetKey(v string) *TagRef { - s.Key = &v - return s -} - -// SetValue sets the Value field's value. -func (s *TagRef) SetValue(v string) *TagRef { - s.Value = &v +// SetClientPolicy sets the ClientPolicy field's value. +func (s *VirtualGatewayBackendDefaults) SetClientPolicy(v *VirtualGatewayClientPolicy) *VirtualGatewayBackendDefaults { + s.ClientPolicy = v return s } -type TagResourceInput struct { +// An object that represents a client policy. +type VirtualGatewayClientPolicy struct { _ struct{} `type:"structure"` - // ResourceArn is a required field - ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` - - // Tags is a required field - Tags []*TagRef `locationName:"tags" type:"list" required:"true"` + // An object that represents a Transport Layer Security (TLS) client policy. + Tls *VirtualGatewayClientPolicyTls `locationName:"tls" type:"structure"` } // String returns the string representation -func (s TagResourceInput) String() string { +func (s VirtualGatewayClientPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagResourceInput) GoString() string { +func (s VirtualGatewayClientPolicy) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } +func (s *VirtualGatewayClientPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayClientPolicy"} + if s.Tls != nil { + if err := s.Tls.Validate(); err != nil { + invalidParams.AddNested("Tls", err.(request.ErrInvalidParams)) } } @@ -8381,61 +12684,45 @@ func (s *TagResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { - s.ResourceArn = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*TagRef) *TagResourceInput { - s.Tags = v +// SetTls sets the Tls field's value. +func (s *VirtualGatewayClientPolicy) SetTls(v *VirtualGatewayClientPolicyTls) *VirtualGatewayClientPolicy { + s.Tls = v return s } -type TagResourceOutput struct { +// An object that represents a Transport Layer Security (TLS) client policy. +type VirtualGatewayClientPolicyTls struct { _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s TagResourceOutput) String() string { - return awsutil.Prettify(s) -} -// GoString returns the string representation -func (s TagResourceOutput) GoString() string { - return s.String() -} + Enforce *bool `locationName:"enforce" type:"boolean"` -// An object that represents a TCP route type. -type TcpRoute struct { - _ struct{} `type:"structure"` + Ports []*int64 `locationName:"ports" type:"list"` - // An object that represents the action to take if a match is determined. + // An object that represents a Transport Layer Security (TLS) validation context. // - // Action is a required field - Action *TcpRouteAction `locationName:"action" type:"structure" required:"true"` + // Validation is a required field + Validation *VirtualGatewayTlsValidationContext `locationName:"validation" type:"structure" required:"true"` } // String returns the string representation -func (s TcpRoute) String() string { +func (s VirtualGatewayClientPolicyTls) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TcpRoute) GoString() string { +func (s VirtualGatewayClientPolicyTls) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TcpRoute) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TcpRoute"} - if s.Action == nil { - invalidParams.Add(request.NewErrParamRequired("Action")) +func (s *VirtualGatewayClientPolicyTls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayClientPolicyTls"} + if s.Validation == nil { + invalidParams.Add(request.NewErrParamRequired("Validation")) } - if s.Action != nil { - if err := s.Action.Validate(); err != nil { - invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + if s.Validation != nil { + if err := s.Validation.Validate(); err != nil { + invalidParams.AddNested("Validation", err.(request.ErrInvalidParams)) } } @@ -8445,93 +12732,116 @@ func (s *TcpRoute) Validate() error { return nil } -// SetAction sets the Action field's value. -func (s *TcpRoute) SetAction(v *TcpRouteAction) *TcpRoute { - s.Action = v +// SetEnforce sets the Enforce field's value. +func (s *VirtualGatewayClientPolicyTls) SetEnforce(v bool) *VirtualGatewayClientPolicyTls { + s.Enforce = &v return s } -// An object that represents the action to take if a match is determined. -type TcpRouteAction struct { +// SetPorts sets the Ports field's value. +func (s *VirtualGatewayClientPolicyTls) SetPorts(v []*int64) *VirtualGatewayClientPolicyTls { + s.Ports = v + return s +} + +// SetValidation sets the Validation field's value. +func (s *VirtualGatewayClientPolicyTls) SetValidation(v *VirtualGatewayTlsValidationContext) *VirtualGatewayClientPolicyTls { + s.Validation = v + return s +} + +// An object that represents a virtual gateway returned by a describe operation. +type VirtualGatewayData struct { _ struct{} `type:"structure"` - // WeightedTargets is a required field - WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` + // MeshName is a required field + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` + + // An object that represents metadata for a resource. + // + // Metadata is a required field + Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` + + // An object that represents the specification of a service mesh resource. + // + // Spec is a required field + Spec *VirtualGatewaySpec `locationName:"spec" type:"structure" required:"true"` + + // An object that represents the status of the mesh resource. + // + // Status is a required field + Status *VirtualGatewayStatus `locationName:"status" type:"structure" required:"true"` + + // VirtualGatewayName is a required field + VirtualGatewayName *string `locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s TcpRouteAction) String() string { +func (s VirtualGatewayData) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TcpRouteAction) GoString() string { +func (s VirtualGatewayData) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *TcpRouteAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TcpRouteAction"} - if s.WeightedTargets == nil { - invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) - } - if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) - } - if s.WeightedTargets != nil { - for i, v := range s.WeightedTargets { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) - } - } - } +// SetMeshName sets the MeshName field's value. +func (s *VirtualGatewayData) SetMeshName(v string) *VirtualGatewayData { + s.MeshName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMetadata sets the Metadata field's value. +func (s *VirtualGatewayData) SetMetadata(v *ResourceMetadata) *VirtualGatewayData { + s.Metadata = v + return s } -// SetWeightedTargets sets the WeightedTargets field's value. -func (s *TcpRouteAction) SetWeightedTargets(v []*WeightedTarget) *TcpRouteAction { - s.WeightedTargets = v +// SetSpec sets the Spec field's value. +func (s *VirtualGatewayData) SetSpec(v *VirtualGatewaySpec) *VirtualGatewayData { + s.Spec = v + return s +} + +// SetStatus sets the Status field's value. +func (s *VirtualGatewayData) SetStatus(v *VirtualGatewayStatus) *VirtualGatewayData { + s.Status = v + return s +} + +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *VirtualGatewayData) SetVirtualGatewayName(v string) *VirtualGatewayData { + s.VirtualGatewayName = &v return s } -// An object that represents a Transport Layer Security (TLS) validation context. -type TlsValidationContext struct { +// An object that represents an access log file. +type VirtualGatewayFileAccessLog struct { _ struct{} `type:"structure"` - // An object that represents a Transport Layer Security (TLS) validation context - // trust. - // - // Trust is a required field - Trust *TlsValidationContextTrust `locationName:"trust" type:"structure" required:"true"` + // Path is a required field + Path *string `locationName:"path" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s TlsValidationContext) String() string { +func (s VirtualGatewayFileAccessLog) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TlsValidationContext) GoString() string { +func (s VirtualGatewayFileAccessLog) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TlsValidationContext) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TlsValidationContext"} - if s.Trust == nil { - invalidParams.Add(request.NewErrParamRequired("Trust")) +func (s *VirtualGatewayFileAccessLog) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayFileAccessLog"} + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) } - if s.Trust != nil { - if err := s.Trust.Validate(); err != nil { - invalidParams.AddNested("Trust", err.(request.ErrInvalidParams)) - } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) } if invalidParams.Len() > 0 { @@ -8540,39 +12850,79 @@ func (s *TlsValidationContext) Validate() error { return nil } -// SetTrust sets the Trust field's value. -func (s *TlsValidationContext) SetTrust(v *TlsValidationContextTrust) *TlsValidationContext { - s.Trust = v +// SetPath sets the Path field's value. +func (s *VirtualGatewayFileAccessLog) SetPath(v string) *VirtualGatewayFileAccessLog { + s.Path = &v return s } -// An object that represents a TLS validation context trust for an AWS Certicate -// Manager (ACM) certificate. -type TlsValidationContextAcmTrust struct { +// An object that represents the health check policy for a virtual gateway's +// listener. +type VirtualGatewayHealthCheckPolicy struct { _ struct{} `type:"structure"` - // CertificateAuthorityArns is a required field - CertificateAuthorityArns []*string `locationName:"certificateAuthorityArns" min:"1" type:"list" required:"true"` + // HealthyThreshold is a required field + HealthyThreshold *int64 `locationName:"healthyThreshold" min:"2" type:"integer" required:"true"` + + // IntervalMillis is a required field + IntervalMillis *int64 `locationName:"intervalMillis" min:"5000" type:"long" required:"true"` + + Path *string `locationName:"path" type:"string"` + + Port *int64 `locationName:"port" min:"1" type:"integer"` + + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"VirtualGatewayPortProtocol"` + + // TimeoutMillis is a required field + TimeoutMillis *int64 `locationName:"timeoutMillis" min:"2000" type:"long" required:"true"` + + // UnhealthyThreshold is a required field + UnhealthyThreshold *int64 `locationName:"unhealthyThreshold" min:"2" type:"integer" required:"true"` } // String returns the string representation -func (s TlsValidationContextAcmTrust) String() string { +func (s VirtualGatewayHealthCheckPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TlsValidationContextAcmTrust) GoString() string { +func (s VirtualGatewayHealthCheckPolicy) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TlsValidationContextAcmTrust) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TlsValidationContextAcmTrust"} - if s.CertificateAuthorityArns == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateAuthorityArns")) +func (s *VirtualGatewayHealthCheckPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayHealthCheckPolicy"} + if s.HealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("HealthyThreshold")) } - if s.CertificateAuthorityArns != nil && len(s.CertificateAuthorityArns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CertificateAuthorityArns", 1)) + if s.HealthyThreshold != nil && *s.HealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("HealthyThreshold", 2)) + } + if s.IntervalMillis == nil { + invalidParams.Add(request.NewErrParamRequired("IntervalMillis")) + } + if s.IntervalMillis != nil && *s.IntervalMillis < 5000 { + invalidParams.Add(request.NewErrParamMinValue("IntervalMillis", 5000)) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.TimeoutMillis == nil { + invalidParams.Add(request.NewErrParamRequired("TimeoutMillis")) + } + if s.TimeoutMillis != nil && *s.TimeoutMillis < 2000 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutMillis", 2000)) + } + if s.UnhealthyThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) + } + if s.UnhealthyThreshold != nil && *s.UnhealthyThreshold < 2 { + invalidParams.Add(request.NewErrParamMinValue("UnhealthyThreshold", 2)) } if invalidParams.Len() > 0 { @@ -8581,88 +12931,95 @@ func (s *TlsValidationContextAcmTrust) Validate() error { return nil } -// SetCertificateAuthorityArns sets the CertificateAuthorityArns field's value. -func (s *TlsValidationContextAcmTrust) SetCertificateAuthorityArns(v []*string) *TlsValidationContextAcmTrust { - s.CertificateAuthorityArns = v +// SetHealthyThreshold sets the HealthyThreshold field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetHealthyThreshold(v int64) *VirtualGatewayHealthCheckPolicy { + s.HealthyThreshold = &v return s } -// An object that represents a Transport Layer Security (TLS) validation context -// trust for a local file. -type TlsValidationContextFileTrust struct { - _ struct{} `type:"structure"` - - // CertificateChain is a required field - CertificateChain *string `locationName:"certificateChain" min:"1" type:"string" required:"true"` +// SetIntervalMillis sets the IntervalMillis field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetIntervalMillis(v int64) *VirtualGatewayHealthCheckPolicy { + s.IntervalMillis = &v + return s } -// String returns the string representation -func (s TlsValidationContextFileTrust) String() string { - return awsutil.Prettify(s) +// SetPath sets the Path field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetPath(v string) *VirtualGatewayHealthCheckPolicy { + s.Path = &v + return s } -// GoString returns the string representation -func (s TlsValidationContextFileTrust) GoString() string { - return s.String() +// SetPort sets the Port field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetPort(v int64) *VirtualGatewayHealthCheckPolicy { + s.Port = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *TlsValidationContextFileTrust) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TlsValidationContextFileTrust"} - if s.CertificateChain == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateChain")) - } - if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) - } +// SetProtocol sets the Protocol field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetProtocol(v string) *VirtualGatewayHealthCheckPolicy { + s.Protocol = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetTimeoutMillis sets the TimeoutMillis field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetTimeoutMillis(v int64) *VirtualGatewayHealthCheckPolicy { + s.TimeoutMillis = &v + return s } -// SetCertificateChain sets the CertificateChain field's value. -func (s *TlsValidationContextFileTrust) SetCertificateChain(v string) *TlsValidationContextFileTrust { - s.CertificateChain = &v +// SetUnhealthyThreshold sets the UnhealthyThreshold field's value. +func (s *VirtualGatewayHealthCheckPolicy) SetUnhealthyThreshold(v int64) *VirtualGatewayHealthCheckPolicy { + s.UnhealthyThreshold = &v return s } -// An object that represents a Transport Layer Security (TLS) validation context -// trust. -type TlsValidationContextTrust struct { +// An object that represents a listener for a virtual gateway. +type VirtualGatewayListener struct { _ struct{} `type:"structure"` - // An object that represents a TLS validation context trust for an AWS Certicate - // Manager (ACM) certificate. - Acm *TlsValidationContextAcmTrust `locationName:"acm" type:"structure"` + // An object that represents the health check policy for a virtual gateway's + // listener. + HealthCheck *VirtualGatewayHealthCheckPolicy `locationName:"healthCheck" type:"structure"` - // An object that represents a Transport Layer Security (TLS) validation context - // trust for a local file. - File *TlsValidationContextFileTrust `locationName:"file" type:"structure"` + // An object that represents a port mapping. + // + // PortMapping is a required field + PortMapping *VirtualGatewayPortMapping `locationName:"portMapping" type:"structure" required:"true"` + + // An object that represents the Transport Layer Security (TLS) properties for + // a listener. + Tls *VirtualGatewayListenerTls `locationName:"tls" type:"structure"` } // String returns the string representation -func (s TlsValidationContextTrust) String() string { +func (s VirtualGatewayListener) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TlsValidationContextTrust) GoString() string { +func (s VirtualGatewayListener) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TlsValidationContextTrust) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TlsValidationContextTrust"} - if s.Acm != nil { - if err := s.Acm.Validate(); err != nil { - invalidParams.AddNested("Acm", err.(request.ErrInvalidParams)) +func (s *VirtualGatewayListener) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayListener"} + if s.PortMapping == nil { + invalidParams.Add(request.NewErrParamRequired("PortMapping")) + } + if s.HealthCheck != nil { + if err := s.HealthCheck.Validate(); err != nil { + invalidParams.AddNested("HealthCheck", err.(request.ErrInvalidParams)) } } - if s.File != nil { - if err := s.File.Validate(); err != nil { - invalidParams.AddNested("File", err.(request.ErrInvalidParams)) + if s.PortMapping != nil { + if err := s.PortMapping.Validate(); err != nil { + invalidParams.AddNested("PortMapping", err.(request.ErrInvalidParams)) + } + } + if s.Tls != nil { + if err := s.Tls.Validate(); err != nil { + invalidParams.AddNested("Tls", err.(request.ErrInvalidParams)) } } @@ -8672,162 +13029,153 @@ func (s *TlsValidationContextTrust) Validate() error { return nil } -// SetAcm sets the Acm field's value. -func (s *TlsValidationContextTrust) SetAcm(v *TlsValidationContextAcmTrust) *TlsValidationContextTrust { - s.Acm = v +// SetHealthCheck sets the HealthCheck field's value. +func (s *VirtualGatewayListener) SetHealthCheck(v *VirtualGatewayHealthCheckPolicy) *VirtualGatewayListener { + s.HealthCheck = v return s } -// SetFile sets the File field's value. -func (s *TlsValidationContextTrust) SetFile(v *TlsValidationContextFileTrust) *TlsValidationContextTrust { - s.File = v +// SetPortMapping sets the PortMapping field's value. +func (s *VirtualGatewayListener) SetPortMapping(v *VirtualGatewayPortMapping) *VirtualGatewayListener { + s.PortMapping = v return s } -// The maximum request rate permitted by the App Mesh APIs has been exceeded -// for your account. For best results, use an increasing or variable sleep interval -// between requests. -type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetTls sets the Tls field's value. +func (s *VirtualGatewayListener) SetTls(v *VirtualGatewayListenerTls) *VirtualGatewayListener { + s.Tls = v + return s +} - Message_ *string `locationName:"message" type:"string"` +// An object that represents the Transport Layer Security (TLS) properties for +// a listener. +type VirtualGatewayListenerTls struct { + _ struct{} `type:"structure"` + + // An object that represents a listener's Transport Layer Security (TLS) certificate. + // + // Certificate is a required field + Certificate *VirtualGatewayListenerTlsCertificate `locationName:"certificate" type:"structure" required:"true"` + + // Mode is a required field + Mode *string `locationName:"mode" type:"string" required:"true" enum:"VirtualGatewayListenerTlsMode"` } // String returns the string representation -func (s TooManyRequestsException) String() string { +func (s VirtualGatewayListenerTls) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TooManyRequestsException) GoString() string { +func (s VirtualGatewayListenerTls) GoString() string { return s.String() } -func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { - return &TooManyRequestsException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayListenerTls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayListenerTls"} + if s.Certificate == nil { + invalidParams.Add(request.NewErrParamRequired("Certificate")) } -} - -// Code returns the exception type name. -func (s TooManyRequestsException) Code() string { - return "TooManyRequestsException" -} - -// Message returns the exception's message. -func (s TooManyRequestsException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if s.Mode == nil { + invalidParams.Add(request.NewErrParamRequired("Mode")) + } + if s.Certificate != nil { + if err := s.Certificate.Validate(); err != nil { + invalidParams.AddNested("Certificate", err.(request.ErrInvalidParams)) + } } - return "" -} -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { + if invalidParams.Len() > 0 { + return invalidParams + } return nil } -func (s TooManyRequestsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +// SetCertificate sets the Certificate field's value. +func (s *VirtualGatewayListenerTls) SetCertificate(v *VirtualGatewayListenerTlsCertificate) *VirtualGatewayListenerTls { + s.Certificate = v + return s } -// RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +// SetMode sets the Mode field's value. +func (s *VirtualGatewayListenerTls) SetMode(v string) *VirtualGatewayListenerTls { + s.Mode = &v + return s } -// The request exceeds the maximum allowed number of tags allowed per resource. -// The current limit is 50 user tags per resource. You must reduce the number -// of tags in the request. None of the tags in this request were applied. -type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// An object that represents an AWS Certicate Manager (ACM) certificate. +type VirtualGatewayListenerTlsAcmCertificate struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"message" type:"string"` + // CertificateArn is a required field + CertificateArn *string `locationName:"certificateArn" type:"string" required:"true"` } // String returns the string representation -func (s TooManyTagsException) String() string { +func (s VirtualGatewayListenerTlsAcmCertificate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TooManyTagsException) GoString() string { +func (s VirtualGatewayListenerTlsAcmCertificate) GoString() string { return s.String() } -func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { - return &TooManyTagsException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayListenerTlsAcmCertificate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayListenerTlsAcmCertificate"} + if s.CertificateArn == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateArn")) } -} -// Code returns the exception type name. -func (s TooManyTagsException) Code() string { - return "TooManyTagsException" -} - -// Message returns the exception's message. -func (s TooManyTagsException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if invalidParams.Len() > 0 { + return invalidParams } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +// SetCertificateArn sets the CertificateArn field's value. +func (s *VirtualGatewayListenerTlsAcmCertificate) SetCertificateArn(v string) *VirtualGatewayListenerTlsAcmCertificate { + s.CertificateArn = &v + return s } -type UntagResourceInput struct { +// An object that represents a listener's Transport Layer Security (TLS) certificate. +type VirtualGatewayListenerTlsCertificate struct { _ struct{} `type:"structure"` - // ResourceArn is a required field - ResourceArn *string `location:"querystring" locationName:"resourceArn" type:"string" required:"true"` + // An object that represents an AWS Certicate Manager (ACM) certificate. + Acm *VirtualGatewayListenerTlsAcmCertificate `locationName:"acm" type:"structure"` - // TagKeys is a required field - TagKeys []*string `locationName:"tagKeys" type:"list" required:"true"` + // An object that represents a local file certificate. The certificate must + // meet specific requirements and you must have proxy authorization enabled. + // For more information, see Transport Layer Security (TLS) (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites). + File *VirtualGatewayListenerTlsFileCertificate `locationName:"file" type:"structure"` } // String returns the string representation -func (s UntagResourceInput) String() string { +func (s VirtualGatewayListenerTlsCertificate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UntagResourceInput) GoString() string { +func (s VirtualGatewayListenerTlsCertificate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) +func (s *VirtualGatewayListenerTlsCertificate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayListenerTlsCertificate"} + if s.Acm != nil { + if err := s.Acm.Validate(); err != nil { + invalidParams.AddNested("Acm", err.(request.ErrInvalidParams)) + } } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) + if s.File != nil { + if err := s.File.Validate(); err != nil { + invalidParams.AddNested("File", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -8836,66 +13184,99 @@ func (s *UntagResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { - s.ResourceArn = &v +// SetAcm sets the Acm field's value. +func (s *VirtualGatewayListenerTlsCertificate) SetAcm(v *VirtualGatewayListenerTlsAcmCertificate) *VirtualGatewayListenerTlsCertificate { + s.Acm = v return s } -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v +// SetFile sets the File field's value. +func (s *VirtualGatewayListenerTlsCertificate) SetFile(v *VirtualGatewayListenerTlsFileCertificate) *VirtualGatewayListenerTlsCertificate { + s.File = v return s } -type UntagResourceOutput struct { +// An object that represents a local file certificate. The certificate must +// meet specific requirements and you must have proxy authorization enabled. +// For more information, see Transport Layer Security (TLS) (https://docs.aws.amazon.com/app-mesh/latest/userguide/tls.html#virtual-node-tls-prerequisites). +type VirtualGatewayListenerTlsFileCertificate struct { _ struct{} `type:"structure"` + + // CertificateChain is a required field + CertificateChain *string `locationName:"certificateChain" min:"1" type:"string" required:"true"` + + // PrivateKey is a required field + PrivateKey *string `locationName:"privateKey" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s UntagResourceOutput) String() string { +func (s VirtualGatewayListenerTlsFileCertificate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UntagResourceOutput) GoString() string { +func (s VirtualGatewayListenerTlsFileCertificate) GoString() string { return s.String() } -type UpdateMeshInput struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayListenerTlsFileCertificate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayListenerTlsFileCertificate"} + if s.CertificateChain == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateChain")) + } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) + } + if s.PrivateKey == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateKey")) + } + if s.PrivateKey != nil && len(*s.PrivateKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PrivateKey", 1)) + } - ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` +// SetCertificateChain sets the CertificateChain field's value. +func (s *VirtualGatewayListenerTlsFileCertificate) SetCertificateChain(v string) *VirtualGatewayListenerTlsFileCertificate { + s.CertificateChain = &v + return s +} - // An object that represents the specification of a service mesh. - Spec *MeshSpec `locationName:"spec" type:"structure"` +// SetPrivateKey sets the PrivateKey field's value. +func (s *VirtualGatewayListenerTlsFileCertificate) SetPrivateKey(v string) *VirtualGatewayListenerTlsFileCertificate { + s.PrivateKey = &v + return s +} + +// An object that represents logging information. +type VirtualGatewayLogging struct { + _ struct{} `type:"structure"` + + // The access log configuration for a virtual gateway. + AccessLog *VirtualGatewayAccessLog `locationName:"accessLog" type:"structure"` } // String returns the string representation -func (s UpdateMeshInput) String() string { +func (s VirtualGatewayLogging) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateMeshInput) GoString() string { +func (s VirtualGatewayLogging) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateMeshInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateMeshInput"} - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.Spec != nil { - if err := s.Spec.Validate(); err != nil { - invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) +func (s *VirtualGatewayLogging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayLogging"} + if s.AccessLog != nil { + if err := s.AccessLog.Validate(); err != nil { + invalidParams.AddNested("AccessLog", err.(request.ErrInvalidParams)) } } @@ -8905,234 +13286,199 @@ func (s *UpdateMeshInput) Validate() error { return nil } -// SetClientToken sets the ClientToken field's value. -func (s *UpdateMeshInput) SetClientToken(v string) *UpdateMeshInput { - s.ClientToken = &v - return s -} - -// SetMeshName sets the MeshName field's value. -func (s *UpdateMeshInput) SetMeshName(v string) *UpdateMeshInput { - s.MeshName = &v +// SetAccessLog sets the AccessLog field's value. +func (s *VirtualGatewayLogging) SetAccessLog(v *VirtualGatewayAccessLog) *VirtualGatewayLogging { + s.AccessLog = v return s } -// SetSpec sets the Spec field's value. -func (s *UpdateMeshInput) SetSpec(v *MeshSpec) *UpdateMeshInput { - s.Spec = v - return s -} +// An object that represents a port mapping. +type VirtualGatewayPortMapping struct { + _ struct{} `type:"structure"` -type UpdateMeshOutput struct { - _ struct{} `type:"structure" payload:"Mesh"` + // Port is a required field + Port *int64 `locationName:"port" min:"1" type:"integer" required:"true"` - // An object that represents a service mesh returned by a describe operation. - // - // Mesh is a required field - Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"VirtualGatewayPortProtocol"` } // String returns the string representation -func (s UpdateMeshOutput) String() string { +func (s VirtualGatewayPortMapping) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateMeshOutput) GoString() string { +func (s VirtualGatewayPortMapping) GoString() string { return s.String() } -// SetMesh sets the Mesh field's value. -func (s *UpdateMeshOutput) SetMesh(v *MeshData) *UpdateMeshOutput { - s.Mesh = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayPortMapping) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayPortMapping"} + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPort sets the Port field's value. +func (s *VirtualGatewayPortMapping) SetPort(v int64) *VirtualGatewayPortMapping { + s.Port = &v return s } -type UpdateRouteInput struct { +// SetProtocol sets the Protocol field's value. +func (s *VirtualGatewayPortMapping) SetProtocol(v string) *VirtualGatewayPortMapping { + s.Protocol = &v + return s +} + +// An object that represents a virtual gateway returned by a list operation. +type VirtualGatewayRef struct { _ struct{} `type:"structure"` - ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + // MeshOwner is a required field + MeshOwner *string `locationName:"meshOwner" min:"12" type:"string" required:"true"` - // RouteName is a required field - RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` + // ResourceOwner is a required field + ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` - // An object that represents a route specification. Specify one route type. - // - // Spec is a required field - Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` - // VirtualRouterName is a required field - VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` + // VirtualGatewayName is a required field + VirtualGatewayName *string `locationName:"virtualGatewayName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s UpdateRouteInput) String() string { +func (s VirtualGatewayRef) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateRouteInput) GoString() string { +func (s VirtualGatewayRef) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateRouteInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateRouteInput"} - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) - } - if s.RouteName == nil { - invalidParams.Add(request.NewErrParamRequired("RouteName")) - } - if s.RouteName != nil && len(*s.RouteName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RouteName", 1)) - } - if s.Spec == nil { - invalidParams.Add(request.NewErrParamRequired("Spec")) - } - if s.VirtualRouterName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) - } - if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) - } - if s.Spec != nil { - if err := s.Spec.Validate(); err != nil { - invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) - } - } +// SetArn sets the Arn field's value. +func (s *VirtualGatewayRef) SetArn(v string) *VirtualGatewayRef { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreatedAt sets the CreatedAt field's value. +func (s *VirtualGatewayRef) SetCreatedAt(v time.Time) *VirtualGatewayRef { + s.CreatedAt = &v + return s } -// SetClientToken sets the ClientToken field's value. -func (s *UpdateRouteInput) SetClientToken(v string) *UpdateRouteInput { - s.ClientToken = &v +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *VirtualGatewayRef) SetLastUpdatedAt(v time.Time) *VirtualGatewayRef { + s.LastUpdatedAt = &v return s } // SetMeshName sets the MeshName field's value. -func (s *UpdateRouteInput) SetMeshName(v string) *UpdateRouteInput { +func (s *VirtualGatewayRef) SetMeshName(v string) *VirtualGatewayRef { s.MeshName = &v return s } // SetMeshOwner sets the MeshOwner field's value. -func (s *UpdateRouteInput) SetMeshOwner(v string) *UpdateRouteInput { +func (s *VirtualGatewayRef) SetMeshOwner(v string) *VirtualGatewayRef { s.MeshOwner = &v return s } -// SetRouteName sets the RouteName field's value. -func (s *UpdateRouteInput) SetRouteName(v string) *UpdateRouteInput { - s.RouteName = &v - return s -} - -// SetSpec sets the Spec field's value. -func (s *UpdateRouteInput) SetSpec(v *RouteSpec) *UpdateRouteInput { - s.Spec = v +// SetResourceOwner sets the ResourceOwner field's value. +func (s *VirtualGatewayRef) SetResourceOwner(v string) *VirtualGatewayRef { + s.ResourceOwner = &v return s } -// SetVirtualRouterName sets the VirtualRouterName field's value. -func (s *UpdateRouteInput) SetVirtualRouterName(v string) *UpdateRouteInput { - s.VirtualRouterName = &v +// SetVersion sets the Version field's value. +func (s *VirtualGatewayRef) SetVersion(v int64) *VirtualGatewayRef { + s.Version = &v return s } -type UpdateRouteOutput struct { - _ struct{} `type:"structure" payload:"Route"` - - // An object that represents a route returned by a describe operation. - // - // Route is a required field - Route *RouteData `locationName:"route" type:"structure" required:"true"` -} - -// String returns the string representation -func (s UpdateRouteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UpdateRouteOutput) GoString() string { - return s.String() -} - -// SetRoute sets the Route field's value. -func (s *UpdateRouteOutput) SetRoute(v *RouteData) *UpdateRouteOutput { - s.Route = v +// SetVirtualGatewayName sets the VirtualGatewayName field's value. +func (s *VirtualGatewayRef) SetVirtualGatewayName(v string) *VirtualGatewayRef { + s.VirtualGatewayName = &v return s } -type UpdateVirtualNodeInput struct { +// An object that represents the specification of a service mesh resource. +type VirtualGatewaySpec struct { _ struct{} `type:"structure"` - ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` + // An object that represents the default properties for a backend. + BackendDefaults *VirtualGatewayBackendDefaults `locationName:"backendDefaults" type:"structure"` - // An object that represents the specification of a virtual node. - // - // Spec is a required field - Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` + // Listeners is a required field + Listeners []*VirtualGatewayListener `locationName:"listeners" type:"list" required:"true"` - // VirtualNodeName is a required field - VirtualNodeName *string `location:"uri" locationName:"virtualNodeName" min:"1" type:"string" required:"true"` + // An object that represents logging information. + Logging *VirtualGatewayLogging `locationName:"logging" type:"structure"` } // String returns the string representation -func (s UpdateVirtualNodeInput) String() string { +func (s VirtualGatewaySpec) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateVirtualNodeInput) GoString() string { +func (s VirtualGatewaySpec) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateVirtualNodeInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualNodeInput"} - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) +func (s *VirtualGatewaySpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewaySpec"} + if s.Listeners == nil { + invalidParams.Add(request.NewErrParamRequired("Listeners")) } - if s.Spec == nil { - invalidParams.Add(request.NewErrParamRequired("Spec")) - } - if s.VirtualNodeName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualNodeName")) + if s.BackendDefaults != nil { + if err := s.BackendDefaults.Validate(); err != nil { + invalidParams.AddNested("BackendDefaults", err.(request.ErrInvalidParams)) + } } - if s.VirtualNodeName != nil && len(*s.VirtualNodeName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualNodeName", 1)) + if s.Listeners != nil { + for i, v := range s.Listeners { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Listeners", i), err.(request.ErrInvalidParams)) + } + } } - if s.Spec != nil { - if err := s.Spec.Validate(); err != nil { - invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + if s.Logging != nil { + if err := s.Logging.Validate(); err != nil { + invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) } } @@ -9142,114 +13488,78 @@ func (s *UpdateVirtualNodeInput) Validate() error { return nil } -// SetClientToken sets the ClientToken field's value. -func (s *UpdateVirtualNodeInput) SetClientToken(v string) *UpdateVirtualNodeInput { - s.ClientToken = &v - return s -} - -// SetMeshName sets the MeshName field's value. -func (s *UpdateVirtualNodeInput) SetMeshName(v string) *UpdateVirtualNodeInput { - s.MeshName = &v - return s -} - -// SetMeshOwner sets the MeshOwner field's value. -func (s *UpdateVirtualNodeInput) SetMeshOwner(v string) *UpdateVirtualNodeInput { - s.MeshOwner = &v +// SetBackendDefaults sets the BackendDefaults field's value. +func (s *VirtualGatewaySpec) SetBackendDefaults(v *VirtualGatewayBackendDefaults) *VirtualGatewaySpec { + s.BackendDefaults = v return s } -// SetSpec sets the Spec field's value. -func (s *UpdateVirtualNodeInput) SetSpec(v *VirtualNodeSpec) *UpdateVirtualNodeInput { - s.Spec = v +// SetListeners sets the Listeners field's value. +func (s *VirtualGatewaySpec) SetListeners(v []*VirtualGatewayListener) *VirtualGatewaySpec { + s.Listeners = v return s } -// SetVirtualNodeName sets the VirtualNodeName field's value. -func (s *UpdateVirtualNodeInput) SetVirtualNodeName(v string) *UpdateVirtualNodeInput { - s.VirtualNodeName = &v +// SetLogging sets the Logging field's value. +func (s *VirtualGatewaySpec) SetLogging(v *VirtualGatewayLogging) *VirtualGatewaySpec { + s.Logging = v return s } -type UpdateVirtualNodeOutput struct { - _ struct{} `type:"structure" payload:"VirtualNode"` +// An object that represents the status of the mesh resource. +type VirtualGatewayStatus struct { + _ struct{} `type:"structure"` - // An object that represents a virtual node returned by a describe operation. - // - // VirtualNode is a required field - VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"VirtualGatewayStatusCode"` } // String returns the string representation -func (s UpdateVirtualNodeOutput) String() string { +func (s VirtualGatewayStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateVirtualNodeOutput) GoString() string { +func (s VirtualGatewayStatus) GoString() string { return s.String() } -// SetVirtualNode sets the VirtualNode field's value. -func (s *UpdateVirtualNodeOutput) SetVirtualNode(v *VirtualNodeData) *UpdateVirtualNodeOutput { - s.VirtualNode = v +// SetStatus sets the Status field's value. +func (s *VirtualGatewayStatus) SetStatus(v string) *VirtualGatewayStatus { + s.Status = &v return s } -type UpdateVirtualRouterInput struct { +// An object that represents a Transport Layer Security (TLS) validation context. +type VirtualGatewayTlsValidationContext struct { _ struct{} `type:"structure"` - ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` - - // An object that represents the specification of a virtual router. + // An object that represents a Transport Layer Security (TLS) validation context + // trust. // - // Spec is a required field - Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - - // VirtualRouterName is a required field - VirtualRouterName *string `location:"uri" locationName:"virtualRouterName" min:"1" type:"string" required:"true"` + // Trust is a required field + Trust *VirtualGatewayTlsValidationContextTrust `locationName:"trust" type:"structure" required:"true"` } // String returns the string representation -func (s UpdateVirtualRouterInput) String() string { +func (s VirtualGatewayTlsValidationContext) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateVirtualRouterInput) GoString() string { +func (s VirtualGatewayTlsValidationContext) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateVirtualRouterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualRouterInput"} - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) - } - if s.Spec == nil { - invalidParams.Add(request.NewErrParamRequired("Spec")) - } - if s.VirtualRouterName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualRouterName")) - } - if s.VirtualRouterName != nil && len(*s.VirtualRouterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualRouterName", 1)) +func (s *VirtualGatewayTlsValidationContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayTlsValidationContext"} + if s.Trust == nil { + invalidParams.Add(request.NewErrParamRequired("Trust")) } - if s.Spec != nil { - if err := s.Spec.Validate(); err != nil { - invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + if s.Trust != nil { + if err := s.Trust.Validate(); err != nil { + invalidParams.AddNested("Trust", err.(request.ErrInvalidParams)) } } @@ -9259,115 +13569,80 @@ func (s *UpdateVirtualRouterInput) Validate() error { return nil } -// SetClientToken sets the ClientToken field's value. -func (s *UpdateVirtualRouterInput) SetClientToken(v string) *UpdateVirtualRouterInput { - s.ClientToken = &v - return s -} - -// SetMeshName sets the MeshName field's value. -func (s *UpdateVirtualRouterInput) SetMeshName(v string) *UpdateVirtualRouterInput { - s.MeshName = &v - return s -} - -// SetMeshOwner sets the MeshOwner field's value. -func (s *UpdateVirtualRouterInput) SetMeshOwner(v string) *UpdateVirtualRouterInput { - s.MeshOwner = &v - return s -} - -// SetSpec sets the Spec field's value. -func (s *UpdateVirtualRouterInput) SetSpec(v *VirtualRouterSpec) *UpdateVirtualRouterInput { - s.Spec = v - return s -} - -// SetVirtualRouterName sets the VirtualRouterName field's value. -func (s *UpdateVirtualRouterInput) SetVirtualRouterName(v string) *UpdateVirtualRouterInput { - s.VirtualRouterName = &v +// SetTrust sets the Trust field's value. +func (s *VirtualGatewayTlsValidationContext) SetTrust(v *VirtualGatewayTlsValidationContextTrust) *VirtualGatewayTlsValidationContext { + s.Trust = v return s } -type UpdateVirtualRouterOutput struct { - _ struct{} `type:"structure" payload:"VirtualRouter"` +// An object that represents a TLS validation context trust for an AWS Certicate +// Manager (ACM) certificate. +type VirtualGatewayTlsValidationContextAcmTrust struct { + _ struct{} `type:"structure"` - // An object that represents a virtual router returned by a describe operation. - // - // VirtualRouter is a required field - VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` + // CertificateAuthorityArns is a required field + CertificateAuthorityArns []*string `locationName:"certificateAuthorityArns" min:"1" type:"list" required:"true"` } // String returns the string representation -func (s UpdateVirtualRouterOutput) String() string { +func (s VirtualGatewayTlsValidationContextAcmTrust) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateVirtualRouterOutput) GoString() string { +func (s VirtualGatewayTlsValidationContextAcmTrust) GoString() string { return s.String() } -// SetVirtualRouter sets the VirtualRouter field's value. -func (s *UpdateVirtualRouterOutput) SetVirtualRouter(v *VirtualRouterData) *UpdateVirtualRouterOutput { - s.VirtualRouter = v - return s -} - -type UpdateVirtualServiceInput struct { - _ struct{} `type:"structure"` - - ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayTlsValidationContextAcmTrust) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayTlsValidationContextAcmTrust"} + if s.CertificateAuthorityArns == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateAuthorityArns")) + } + if s.CertificateAuthorityArns != nil && len(s.CertificateAuthorityArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateAuthorityArns", 1)) + } - // MeshName is a required field - MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - MeshOwner *string `location:"querystring" locationName:"meshOwner" min:"12" type:"string"` +// SetCertificateAuthorityArns sets the CertificateAuthorityArns field's value. +func (s *VirtualGatewayTlsValidationContextAcmTrust) SetCertificateAuthorityArns(v []*string) *VirtualGatewayTlsValidationContextAcmTrust { + s.CertificateAuthorityArns = v + return s +} - // An object that represents the specification of a virtual service. - // - // Spec is a required field - Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` +// An object that represents a Transport Layer Security (TLS) validation context +// trust for a local file. +type VirtualGatewayTlsValidationContextFileTrust struct { + _ struct{} `type:"structure"` - // VirtualServiceName is a required field - VirtualServiceName *string `location:"uri" locationName:"virtualServiceName" type:"string" required:"true"` + // CertificateChain is a required field + CertificateChain *string `locationName:"certificateChain" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s UpdateVirtualServiceInput) String() string { +func (s VirtualGatewayTlsValidationContextFileTrust) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateVirtualServiceInput) GoString() string { +func (s VirtualGatewayTlsValidationContextFileTrust) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateVirtualServiceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateVirtualServiceInput"} - if s.MeshName == nil { - invalidParams.Add(request.NewErrParamRequired("MeshName")) - } - if s.MeshName != nil && len(*s.MeshName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MeshName", 1)) - } - if s.MeshOwner != nil && len(*s.MeshOwner) < 12 { - invalidParams.Add(request.NewErrParamMinLen("MeshOwner", 12)) - } - if s.Spec == nil { - invalidParams.Add(request.NewErrParamRequired("Spec")) - } - if s.VirtualServiceName == nil { - invalidParams.Add(request.NewErrParamRequired("VirtualServiceName")) - } - if s.VirtualServiceName != nil && len(*s.VirtualServiceName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VirtualServiceName", 1)) +func (s *VirtualGatewayTlsValidationContextFileTrust) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayTlsValidationContextFileTrust"} + if s.CertificateChain == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateChain")) } - if s.Spec != nil { - if err := s.Spec.Validate(); err != nil { - invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) - } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) } if invalidParams.Len() > 0 { @@ -9376,58 +13651,65 @@ func (s *UpdateVirtualServiceInput) Validate() error { return nil } -// SetClientToken sets the ClientToken field's value. -func (s *UpdateVirtualServiceInput) SetClientToken(v string) *UpdateVirtualServiceInput { - s.ClientToken = &v - return s -} - -// SetMeshName sets the MeshName field's value. -func (s *UpdateVirtualServiceInput) SetMeshName(v string) *UpdateVirtualServiceInput { - s.MeshName = &v - return s -} - -// SetMeshOwner sets the MeshOwner field's value. -func (s *UpdateVirtualServiceInput) SetMeshOwner(v string) *UpdateVirtualServiceInput { - s.MeshOwner = &v - return s -} - -// SetSpec sets the Spec field's value. -func (s *UpdateVirtualServiceInput) SetSpec(v *VirtualServiceSpec) *UpdateVirtualServiceInput { - s.Spec = v +// SetCertificateChain sets the CertificateChain field's value. +func (s *VirtualGatewayTlsValidationContextFileTrust) SetCertificateChain(v string) *VirtualGatewayTlsValidationContextFileTrust { + s.CertificateChain = &v return s } -// SetVirtualServiceName sets the VirtualServiceName field's value. -func (s *UpdateVirtualServiceInput) SetVirtualServiceName(v string) *UpdateVirtualServiceInput { - s.VirtualServiceName = &v - return s -} +// An object that represents a Transport Layer Security (TLS) validation context +// trust. +type VirtualGatewayTlsValidationContextTrust struct { + _ struct{} `type:"structure"` -type UpdateVirtualServiceOutput struct { - _ struct{} `type:"structure" payload:"VirtualService"` + // An object that represents a TLS validation context trust for an AWS Certicate + // Manager (ACM) certificate. + Acm *VirtualGatewayTlsValidationContextAcmTrust `locationName:"acm" type:"structure"` - // An object that represents a virtual service returned by a describe operation. - // - // VirtualService is a required field - VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` + // An object that represents a Transport Layer Security (TLS) validation context + // trust for a local file. + File *VirtualGatewayTlsValidationContextFileTrust `locationName:"file" type:"structure"` } // String returns the string representation -func (s UpdateVirtualServiceOutput) String() string { +func (s VirtualGatewayTlsValidationContextTrust) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateVirtualServiceOutput) GoString() string { +func (s VirtualGatewayTlsValidationContextTrust) GoString() string { return s.String() } -// SetVirtualService sets the VirtualService field's value. -func (s *UpdateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *UpdateVirtualServiceOutput { - s.VirtualService = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *VirtualGatewayTlsValidationContextTrust) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VirtualGatewayTlsValidationContextTrust"} + if s.Acm != nil { + if err := s.Acm.Validate(); err != nil { + invalidParams.AddNested("Acm", err.(request.ErrInvalidParams)) + } + } + if s.File != nil { + if err := s.File.Validate(); err != nil { + invalidParams.AddNested("File", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcm sets the Acm field's value. +func (s *VirtualGatewayTlsValidationContextTrust) SetAcm(v *VirtualGatewayTlsValidationContextAcmTrust) *VirtualGatewayTlsValidationContextTrust { + s.Acm = v + return s +} + +// SetFile sets the File field's value. +func (s *VirtualGatewayTlsValidationContextTrust) SetFile(v *VirtualGatewayTlsValidationContextFileTrust) *VirtualGatewayTlsValidationContextTrust { + s.File = v return s } @@ -9504,6 +13786,12 @@ type VirtualNodeRef struct { // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` @@ -9513,6 +13801,9 @@ type VirtualNodeRef struct { // ResourceOwner is a required field ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` + // VirtualNodeName is a required field VirtualNodeName *string `locationName:"virtualNodeName" min:"1" type:"string" required:"true"` } @@ -9533,6 +13824,18 @@ func (s *VirtualNodeRef) SetArn(v string) *VirtualNodeRef { return s } +// SetCreatedAt sets the CreatedAt field's value. +func (s *VirtualNodeRef) SetCreatedAt(v time.Time) *VirtualNodeRef { + s.CreatedAt = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *VirtualNodeRef) SetLastUpdatedAt(v time.Time) *VirtualNodeRef { + s.LastUpdatedAt = &v + return s +} + // SetMeshName sets the MeshName field's value. func (s *VirtualNodeRef) SetMeshName(v string) *VirtualNodeRef { s.MeshName = &v @@ -9551,6 +13854,12 @@ func (s *VirtualNodeRef) SetResourceOwner(v string) *VirtualNodeRef { return s } +// SetVersion sets the Version field's value. +func (s *VirtualNodeRef) SetVersion(v int64) *VirtualNodeRef { + s.Version = &v + return s +} + // SetVirtualNodeName sets the VirtualNodeName field's value. func (s *VirtualNodeRef) SetVirtualNodeName(v string) *VirtualNodeRef { s.VirtualNodeName = &v @@ -9842,6 +14151,12 @@ type VirtualRouterRef struct { // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` @@ -9851,6 +14166,9 @@ type VirtualRouterRef struct { // ResourceOwner is a required field ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` + // VirtualRouterName is a required field VirtualRouterName *string `locationName:"virtualRouterName" min:"1" type:"string" required:"true"` } @@ -9871,6 +14189,18 @@ func (s *VirtualRouterRef) SetArn(v string) *VirtualRouterRef { return s } +// SetCreatedAt sets the CreatedAt field's value. +func (s *VirtualRouterRef) SetCreatedAt(v time.Time) *VirtualRouterRef { + s.CreatedAt = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *VirtualRouterRef) SetLastUpdatedAt(v time.Time) *VirtualRouterRef { + s.LastUpdatedAt = &v + return s +} + // SetMeshName sets the MeshName field's value. func (s *VirtualRouterRef) SetMeshName(v string) *VirtualRouterRef { s.MeshName = &v @@ -9889,6 +14219,12 @@ func (s *VirtualRouterRef) SetResourceOwner(v string) *VirtualRouterRef { return s } +// SetVersion sets the Version field's value. +func (s *VirtualRouterRef) SetVersion(v int64) *VirtualRouterRef { + s.Version = &v + return s +} + // SetVirtualRouterName sets the VirtualRouterName field's value. func (s *VirtualRouterRef) SetVirtualRouterName(v string) *VirtualRouterRef { s.VirtualRouterName = &v @@ -10182,6 +14518,12 @@ type VirtualServiceRef struct { // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` + // CreatedAt is a required field + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" required:"true"` + + // LastUpdatedAt is a required field + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp" required:"true"` + // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` @@ -10191,6 +14533,9 @@ type VirtualServiceRef struct { // ResourceOwner is a required field ResourceOwner *string `locationName:"resourceOwner" min:"12" type:"string" required:"true"` + // Version is a required field + Version *int64 `locationName:"version" type:"long" required:"true"` + // VirtualServiceName is a required field VirtualServiceName *string `locationName:"virtualServiceName" type:"string" required:"true"` } @@ -10211,6 +14556,18 @@ func (s *VirtualServiceRef) SetArn(v string) *VirtualServiceRef { return s } +// SetCreatedAt sets the CreatedAt field's value. +func (s *VirtualServiceRef) SetCreatedAt(v time.Time) *VirtualServiceRef { + s.CreatedAt = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *VirtualServiceRef) SetLastUpdatedAt(v time.Time) *VirtualServiceRef { + s.LastUpdatedAt = &v + return s +} + // SetMeshName sets the MeshName field's value. func (s *VirtualServiceRef) SetMeshName(v string) *VirtualServiceRef { s.MeshName = &v @@ -10229,6 +14586,12 @@ func (s *VirtualServiceRef) SetResourceOwner(v string) *VirtualServiceRef { return s } +// SetVersion sets the Version field's value. +func (s *VirtualServiceRef) SetVersion(v int64) *VirtualServiceRef { + s.Version = &v + return s +} + // SetVirtualServiceName sets the VirtualServiceName field's value. func (s *VirtualServiceRef) SetVirtualServiceName(v string) *VirtualServiceRef { s.VirtualServiceName = &v @@ -10362,6 +14725,14 @@ const ( DurationUnitS = "s" ) +// DurationUnit_Values returns all elements of the DurationUnit enum +func DurationUnit_Values() []string { + return []string{ + DurationUnitMs, + DurationUnitS, + } +} + const ( // EgressFilterTypeAllowAll is a EgressFilterType enum value EgressFilterTypeAllowAll = "ALLOW_ALL" @@ -10370,6 +14741,34 @@ const ( EgressFilterTypeDropAll = "DROP_ALL" ) +// EgressFilterType_Values returns all elements of the EgressFilterType enum +func EgressFilterType_Values() []string { + return []string{ + EgressFilterTypeAllowAll, + EgressFilterTypeDropAll, + } +} + +const ( + // GatewayRouteStatusCodeActive is a GatewayRouteStatusCode enum value + GatewayRouteStatusCodeActive = "ACTIVE" + + // GatewayRouteStatusCodeDeleted is a GatewayRouteStatusCode enum value + GatewayRouteStatusCodeDeleted = "DELETED" + + // GatewayRouteStatusCodeInactive is a GatewayRouteStatusCode enum value + GatewayRouteStatusCodeInactive = "INACTIVE" +) + +// GatewayRouteStatusCode_Values returns all elements of the GatewayRouteStatusCode enum +func GatewayRouteStatusCode_Values() []string { + return []string{ + GatewayRouteStatusCodeActive, + GatewayRouteStatusCodeDeleted, + GatewayRouteStatusCodeInactive, + } +} + const ( // GrpcRetryPolicyEventCancelled is a GrpcRetryPolicyEvent enum value GrpcRetryPolicyEventCancelled = "cancelled" @@ -10387,6 +14786,17 @@ const ( GrpcRetryPolicyEventUnavailable = "unavailable" ) +// GrpcRetryPolicyEvent_Values returns all elements of the GrpcRetryPolicyEvent enum +func GrpcRetryPolicyEvent_Values() []string { + return []string{ + GrpcRetryPolicyEventCancelled, + GrpcRetryPolicyEventDeadlineExceeded, + GrpcRetryPolicyEventInternal, + GrpcRetryPolicyEventResourceExhausted, + GrpcRetryPolicyEventUnavailable, + } +} + const ( // HttpMethodConnect is a HttpMethod enum value HttpMethodConnect = "CONNECT" @@ -10416,6 +14826,21 @@ const ( HttpMethodTrace = "TRACE" ) +// HttpMethod_Values returns all elements of the HttpMethod enum +func HttpMethod_Values() []string { + return []string{ + HttpMethodConnect, + HttpMethodDelete, + HttpMethodGet, + HttpMethodHead, + HttpMethodOptions, + HttpMethodPatch, + HttpMethodPost, + HttpMethodPut, + HttpMethodTrace, + } +} + const ( // HttpSchemeHttp is a HttpScheme enum value HttpSchemeHttp = "http" @@ -10424,6 +14849,14 @@ const ( HttpSchemeHttps = "https" ) +// HttpScheme_Values returns all elements of the HttpScheme enum +func HttpScheme_Values() []string { + return []string{ + HttpSchemeHttp, + HttpSchemeHttps, + } +} + const ( // ListenerTlsModeDisabled is a ListenerTlsMode enum value ListenerTlsModeDisabled = "DISABLED" @@ -10435,6 +14868,15 @@ const ( ListenerTlsModeStrict = "STRICT" ) +// ListenerTlsMode_Values returns all elements of the ListenerTlsMode enum +func ListenerTlsMode_Values() []string { + return []string{ + ListenerTlsModeDisabled, + ListenerTlsModePermissive, + ListenerTlsModeStrict, + } +} + const ( // MeshStatusCodeActive is a MeshStatusCode enum value MeshStatusCodeActive = "ACTIVE" @@ -10446,6 +14888,15 @@ const ( MeshStatusCodeInactive = "INACTIVE" ) +// MeshStatusCode_Values returns all elements of the MeshStatusCode enum +func MeshStatusCode_Values() []string { + return []string{ + MeshStatusCodeActive, + MeshStatusCodeDeleted, + MeshStatusCodeInactive, + } +} + const ( // PortProtocolGrpc is a PortProtocol enum value PortProtocolGrpc = "grpc" @@ -10460,6 +14911,16 @@ const ( PortProtocolTcp = "tcp" ) +// PortProtocol_Values returns all elements of the PortProtocol enum +func PortProtocol_Values() []string { + return []string{ + PortProtocolGrpc, + PortProtocolHttp, + PortProtocolHttp2, + PortProtocolTcp, + } +} + const ( // RouteStatusCodeActive is a RouteStatusCode enum value RouteStatusCodeActive = "ACTIVE" @@ -10471,11 +14932,87 @@ const ( RouteStatusCodeInactive = "INACTIVE" ) +// RouteStatusCode_Values returns all elements of the RouteStatusCode enum +func RouteStatusCode_Values() []string { + return []string{ + RouteStatusCodeActive, + RouteStatusCodeDeleted, + RouteStatusCodeInactive, + } +} + const ( // TcpRetryPolicyEventConnectionError is a TcpRetryPolicyEvent enum value TcpRetryPolicyEventConnectionError = "connection-error" ) +// TcpRetryPolicyEvent_Values returns all elements of the TcpRetryPolicyEvent enum +func TcpRetryPolicyEvent_Values() []string { + return []string{ + TcpRetryPolicyEventConnectionError, + } +} + +const ( + // VirtualGatewayListenerTlsModeDisabled is a VirtualGatewayListenerTlsMode enum value + VirtualGatewayListenerTlsModeDisabled = "DISABLED" + + // VirtualGatewayListenerTlsModePermissive is a VirtualGatewayListenerTlsMode enum value + VirtualGatewayListenerTlsModePermissive = "PERMISSIVE" + + // VirtualGatewayListenerTlsModeStrict is a VirtualGatewayListenerTlsMode enum value + VirtualGatewayListenerTlsModeStrict = "STRICT" +) + +// VirtualGatewayListenerTlsMode_Values returns all elements of the VirtualGatewayListenerTlsMode enum +func VirtualGatewayListenerTlsMode_Values() []string { + return []string{ + VirtualGatewayListenerTlsModeDisabled, + VirtualGatewayListenerTlsModePermissive, + VirtualGatewayListenerTlsModeStrict, + } +} + +const ( + // VirtualGatewayPortProtocolGrpc is a VirtualGatewayPortProtocol enum value + VirtualGatewayPortProtocolGrpc = "grpc" + + // VirtualGatewayPortProtocolHttp is a VirtualGatewayPortProtocol enum value + VirtualGatewayPortProtocolHttp = "http" + + // VirtualGatewayPortProtocolHttp2 is a VirtualGatewayPortProtocol enum value + VirtualGatewayPortProtocolHttp2 = "http2" +) + +// VirtualGatewayPortProtocol_Values returns all elements of the VirtualGatewayPortProtocol enum +func VirtualGatewayPortProtocol_Values() []string { + return []string{ + VirtualGatewayPortProtocolGrpc, + VirtualGatewayPortProtocolHttp, + VirtualGatewayPortProtocolHttp2, + } +} + +const ( + // VirtualGatewayStatusCodeActive is a VirtualGatewayStatusCode enum value + VirtualGatewayStatusCodeActive = "ACTIVE" + + // VirtualGatewayStatusCodeDeleted is a VirtualGatewayStatusCode enum value + VirtualGatewayStatusCodeDeleted = "DELETED" + + // VirtualGatewayStatusCodeInactive is a VirtualGatewayStatusCode enum value + VirtualGatewayStatusCodeInactive = "INACTIVE" +) + +// VirtualGatewayStatusCode_Values returns all elements of the VirtualGatewayStatusCode enum +func VirtualGatewayStatusCode_Values() []string { + return []string{ + VirtualGatewayStatusCodeActive, + VirtualGatewayStatusCodeDeleted, + VirtualGatewayStatusCodeInactive, + } +} + const ( // VirtualNodeStatusCodeActive is a VirtualNodeStatusCode enum value VirtualNodeStatusCodeActive = "ACTIVE" @@ -10487,6 +15024,15 @@ const ( VirtualNodeStatusCodeInactive = "INACTIVE" ) +// VirtualNodeStatusCode_Values returns all elements of the VirtualNodeStatusCode enum +func VirtualNodeStatusCode_Values() []string { + return []string{ + VirtualNodeStatusCodeActive, + VirtualNodeStatusCodeDeleted, + VirtualNodeStatusCodeInactive, + } +} + const ( // VirtualRouterStatusCodeActive is a VirtualRouterStatusCode enum value VirtualRouterStatusCodeActive = "ACTIVE" @@ -10498,6 +15044,15 @@ const ( VirtualRouterStatusCodeInactive = "INACTIVE" ) +// VirtualRouterStatusCode_Values returns all elements of the VirtualRouterStatusCode enum +func VirtualRouterStatusCode_Values() []string { + return []string{ + VirtualRouterStatusCodeActive, + VirtualRouterStatusCodeDeleted, + VirtualRouterStatusCodeInactive, + } +} + const ( // VirtualServiceStatusCodeActive is a VirtualServiceStatusCode enum value VirtualServiceStatusCodeActive = "ACTIVE" @@ -10508,3 +15063,12 @@ const ( // VirtualServiceStatusCodeInactive is a VirtualServiceStatusCode enum value VirtualServiceStatusCodeInactive = "INACTIVE" ) + +// VirtualServiceStatusCode_Values returns all elements of the VirtualServiceStatusCode enum +func VirtualServiceStatusCode_Values() []string { + return []string{ + VirtualServiceStatusCodeActive, + VirtualServiceStatusCodeDeleted, + VirtualServiceStatusCodeInactive, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go index 09a029538..66f3509f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/doc.go @@ -14,7 +14,7 @@ // // App Mesh supports microservice applications that use service discovery naming // for their components. For more information about service discovery on Amazon -// ECS, see Service Discovery (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) +// ECS, see Service Discovery (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) // in the Amazon Elastic Container Service Developer Guide. Kubernetes kube-dns // and coredns are supported. For more information, see DNS for Services and // Pods (https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/errors.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/errors.go index 9e60da8d1..96973b8da 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/errors.go @@ -39,7 +39,7 @@ const ( // "LimitExceededException". // // You have exceeded a service limit for your account. For more information, - // see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service_limits.html) + // see Service Limits (https://docs.aws.amazon.com/app-mesh/latest/userguide/service-quotas.html) // in the AWS App Mesh User Guide. ErrCodeLimitExceededException = "LimitExceededException" diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go index a30311ba9..2116834b9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go index 18f4e0a36..2de24156e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go @@ -168,6 +168,9 @@ func (c *AppStream) BatchAssociateUserStackRequest(input *BatchAssociateUserStac // * OperationNotPermittedException // The attempted operation is not permitted. // +// * InvalidParameterCombinationException +// Indicates an incorrect combination of parameters, or a missing parameter. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/BatchAssociateUserStack func (c *AppStream) BatchAssociateUserStack(input *BatchAssociateUserStackInput) (*BatchAssociateUserStackOutput, error) { req, out := c.BatchAssociateUserStackRequest(input) @@ -242,6 +245,14 @@ func (c *AppStream) BatchDisassociateUserStackRequest(input *BatchDisassociateUs // // See the AWS API reference guide for Amazon AppStream's // API operation BatchDisassociateUserStack for usage and error information. +// +// Returned Error Types: +// * OperationNotPermittedException +// The attempted operation is not permitted. +// +// * InvalidParameterCombinationException +// Indicates an incorrect combination of parameters, or a missing parameter. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/BatchDisassociateUserStack func (c *AppStream) BatchDisassociateUserStack(input *BatchDisassociateUserStackInput) (*BatchDisassociateUserStackOutput, error) { req, out := c.BatchDisassociateUserStackRequest(input) @@ -416,6 +427,9 @@ func (c *AppStream) CreateDirectoryConfigRequest(input *CreateDirectoryConfigInp // API operation CreateDirectoryConfig for usage and error information. // // Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// // * ResourceAlreadyExistsException // The specified resource already exists. // @@ -426,6 +440,12 @@ func (c *AppStream) CreateDirectoryConfigRequest(input *CreateDirectoryConfigInp // The resource cannot be created because your AWS account is suspended. For // assistance, contact AWS Support. // +// * OperationNotPermittedException +// The attempted operation is not permitted. +// +// * InvalidRoleException +// The specified role is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/CreateDirectoryConfig func (c *AppStream) CreateDirectoryConfig(input *CreateDirectoryConfigInput) (*CreateDirectoryConfigOutput, error) { req, out := c.CreateDirectoryConfigRequest(input) @@ -515,6 +535,11 @@ func (c *AppStream) CreateFleetRequest(input *CreateFleetInput) (req *request.Re // * LimitExceededException // The requested limit exceeds the permitted limit for an account. // +// * RequestLimitExceededException +// AppStream 2.0 can’t process the request right now because the Describe +// calls from your AWS account are being throttled by Amazon EC2. Try again +// later. +// // * InvalidAccountStatusException // The resource cannot be created because your AWS account is suspended. For // assistance, contact AWS Support. @@ -617,6 +642,11 @@ func (c *AppStream) CreateImageBuilderRequest(input *CreateImageBuilderInput) (r // * LimitExceededException // The requested limit exceeds the permitted limit for an account. // +// * RequestLimitExceededException +// AppStream 2.0 can’t process the request right now because the Describe +// calls from your AWS account are being throttled by Amazon EC2. Try again +// later. +// // * InvalidAccountStatusException // The resource cannot be created because your AWS account is suspended. For // assistance, contact AWS Support. @@ -2635,6 +2665,9 @@ func (c *AppStream) DescribeUserStackAssociationsRequest(input *DescribeUserStac // * InvalidParameterCombinationException // Indicates an incorrect combination of parameters, or a missing parameter. // +// * OperationNotPermittedException +// The attempted operation is not permitted. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DescribeUserStackAssociations func (c *AppStream) DescribeUserStackAssociations(input *DescribeUserStackAssociationsInput) (*DescribeUserStackAssociationsOutput, error) { req, out := c.DescribeUserStackAssociationsRequest(input) @@ -2884,6 +2917,9 @@ func (c *AppStream) DisassociateFleetRequest(input *DisassociateFleetInput) (req // * ConcurrentModificationException // An API error occurred. Wait a few minutes and try again. // +// * OperationNotPermittedException +// The attempted operation is not permitted. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/DisassociateFleet func (c *AppStream) DisassociateFleet(input *DisassociateFleetInput) (*DisassociateFleetOutput, error) { req, out := c.DisassociateFleetRequest(input) @@ -3361,6 +3397,11 @@ func (c *AppStream) StartFleetRequest(input *StartFleetInput) (req *request.Requ // * LimitExceededException // The requested limit exceeds the permitted limit for an account. // +// * RequestLimitExceededException +// AppStream 2.0 can’t process the request right now because the Describe +// calls from your AWS account are being throttled by Amazon EC2. Try again +// later. +// // * InvalidAccountStatusException // The resource cannot be created because your AWS account is suspended. For // assistance, contact AWS Support. @@ -3904,6 +3945,12 @@ func (c *AppStream) UpdateDirectoryConfigRequest(input *UpdateDirectoryConfigInp // * ConcurrentModificationException // An API error occurred. Wait a few minutes and try again. // +// * OperationNotPermittedException +// The attempted operation is not permitted. +// +// * InvalidRoleException +// The specified role is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appstream-2016-12-01/UpdateDirectoryConfig func (c *AppStream) UpdateDirectoryConfig(input *UpdateDirectoryConfigInput) (*UpdateDirectoryConfigOutput, error) { req, out := c.UpdateDirectoryConfigRequest(input) @@ -3992,6 +4039,11 @@ func (c *AppStream) UpdateFleetRequest(input *UpdateFleetInput) (req *request.Re // * LimitExceededException // The requested limit exceeds the permitted limit for an account. // +// * RequestLimitExceededException +// AppStream 2.0 can’t process the request right now because the Describe +// calls from your AWS account are being throttled by Amazon EC2. Try again +// later. +// // * InvalidAccountStatusException // The resource cannot be created because your AWS account is suspended. For // assistance, contact AWS Support. @@ -4776,8 +4828,8 @@ func (s *ComputeCapacityStatus) SetRunning(v int64) *ComputeCapacityStatus { // An API error occurred. Wait a few minutes and try again. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -4795,17 +4847,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4813,22 +4865,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } type CopyImageInput struct { @@ -4948,9 +5000,7 @@ type CreateDirectoryConfigInput struct { // The credentials for the service account used by the fleet or image builder // to connect to the directory. - // - // ServiceAccountCredentials is a required field - ServiceAccountCredentials *ServiceAccountCredentials `type:"structure" required:"true"` + ServiceAccountCredentials *ServiceAccountCredentials `type:"structure"` } // String returns the string representation @@ -4972,9 +5022,6 @@ func (s *CreateDirectoryConfigInput) Validate() error { if s.OrganizationalUnitDistinguishedNames == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationalUnitDistinguishedNames")) } - if s.ServiceAccountCredentials == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceAccountCredentials")) - } if s.ServiceAccountCredentials != nil { if err := s.ServiceAccountCredentials.Validate(); err != nil { invalidParams.AddNested("ServiceAccountCredentials", err.(request.ErrInvalidParams)) @@ -5076,7 +5123,7 @@ type CreateFleetInput struct { // assume a role, a fleet instance calls the AWS Security Token Service (STS) // AssumeRole API operation and passes the ARN of the role to use. The operation // creates a new session with temporary credentials. AppStream 2.0 retrieves - // the temporary credentials and creates the AppStream_Machine_Role credential + // the temporary credentials and creates the appstream_machine_role credential // profile on the instance. // // For more information, see Using an IAM Role to Grant Permissions to Applications @@ -5141,6 +5188,18 @@ type CreateFleetInput struct { // // * stream.memory.8xlarge // + // * stream.memory.z1d.large + // + // * stream.memory.z1d.xlarge + // + // * stream.memory.z1d.2xlarge + // + // * stream.memory.z1d.3xlarge + // + // * stream.memory.z1d.6xlarge + // + // * stream.memory.z1d.12xlarge + // // * stream.graphics-design.large // // * stream.graphics-design.xlarge @@ -5151,6 +5210,18 @@ type CreateFleetInput struct { // // * stream.graphics-desktop.2xlarge // + // * stream.graphics.g4dn.xlarge + // + // * stream.graphics.g4dn.2xlarge + // + // * stream.graphics.g4dn.4xlarge + // + // * stream.graphics.g4dn.8xlarge + // + // * stream.graphics.g4dn.12xlarge + // + // * stream.graphics.g4dn.16xlarge + // // * stream.graphics-pro.4xlarge // // * stream.graphics-pro.8xlarge @@ -5174,6 +5245,14 @@ type CreateFleetInput struct { // Name is a required field Name *string `type:"string" required:"true"` + // The AppStream 2.0 view that is displayed to your users when they stream from + // the fleet. When APP is specified, only the windows of applications opened + // by users display. When DESKTOP is specified, the standard desktop that is + // provided by the operating system displays. + // + // The default value is APP. + StreamView *string `type:"string" enum:"StreamView"` + // The tags to associate with the fleet. A tag is a key-value pair, and the // value is optional. For example, Environment=Test. If you do not specify a // value, Environment=. @@ -5320,6 +5399,12 @@ func (s *CreateFleetInput) SetName(v string) *CreateFleetInput { return s } +// SetStreamView sets the StreamView field's value. +func (s *CreateFleetInput) SetStreamView(v string) *CreateFleetInput { + s.StreamView = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateFleetInput) SetTags(v map[string]*string) *CreateFleetInput { s.Tags = v @@ -5383,7 +5468,7 @@ type CreateImageBuilderInput struct { // To assume a role, the image builder calls the AWS Security Token Service // (STS) AssumeRole API operation and passes the ARN of the role to use. The // operation creates a new session with temporary credentials. AppStream 2.0 - // retrieves the temporary credentials and creates the AppStream_Machine_Role + // retrieves the temporary credentials and creates the appstream_machine_role // credential profile on the instance. // // For more information, see Using an IAM Role to Grant Permissions to Applications @@ -5424,6 +5509,18 @@ type CreateImageBuilderInput struct { // // * stream.memory.8xlarge // + // * stream.memory.z1d.large + // + // * stream.memory.z1d.xlarge + // + // * stream.memory.z1d.2xlarge + // + // * stream.memory.z1d.3xlarge + // + // * stream.memory.z1d.6xlarge + // + // * stream.memory.z1d.12xlarge + // // * stream.graphics-design.large // // * stream.graphics-design.xlarge @@ -5434,6 +5531,18 @@ type CreateImageBuilderInput struct { // // * stream.graphics-desktop.2xlarge // + // * stream.graphics.g4dn.xlarge + // + // * stream.graphics.g4dn.2xlarge + // + // * stream.graphics.g4dn.4xlarge + // + // * stream.graphics.g4dn.8xlarge + // + // * stream.graphics.g4dn.12xlarge + // + // * stream.graphics.g4dn.16xlarge + // // * stream.graphics-pro.4xlarge // // * stream.graphics-pro.8xlarge @@ -7177,7 +7286,8 @@ type DescribeSessionsInput struct { // StackName is a required field StackName *string `min:"1" type:"string" required:"true"` - // The user identifier. + // The user identifier (ID). If you specify a user ID, you must also specify + // the authentication type. UserId *string `min:"2" type:"string"` } @@ -8076,7 +8186,7 @@ type Fleet struct { // fleet instance calls the AWS Security Token Service (STS) AssumeRole API // operation and passes the ARN of the role to use. The operation creates a // new session with temporary credentials. AppStream 2.0 retrieves the temporary - // credentials and creates the AppStream_Machine_Role credential profile on + // credentials and creates the appstream_machine_role credential profile on // the instance. // // For more information, see Using an IAM Role to Grant Permissions to Applications @@ -8141,6 +8251,18 @@ type Fleet struct { // // * stream.memory.8xlarge // + // * stream.memory.z1d.large + // + // * stream.memory.z1d.xlarge + // + // * stream.memory.z1d.2xlarge + // + // * stream.memory.z1d.3xlarge + // + // * stream.memory.z1d.6xlarge + // + // * stream.memory.z1d.12xlarge + // // * stream.graphics-design.large // // * stream.graphics-design.xlarge @@ -8151,6 +8273,18 @@ type Fleet struct { // // * stream.graphics-desktop.2xlarge // + // * stream.graphics.g4dn.xlarge + // + // * stream.graphics.g4dn.2xlarge + // + // * stream.graphics.g4dn.4xlarge + // + // * stream.graphics.g4dn.8xlarge + // + // * stream.graphics.g4dn.12xlarge + // + // * stream.graphics.g4dn.16xlarge + // // * stream.graphics-pro.4xlarge // // * stream.graphics-pro.8xlarge @@ -8179,6 +8313,14 @@ type Fleet struct { // State is a required field State *string `type:"string" required:"true" enum:"FleetState"` + // The AppStream 2.0 view that is displayed to your users when they stream from + // the fleet. When APP is specified, only the windows of applications opened + // by users display. When DESKTOP is specified, the standard desktop that is + // provided by the operating system displays. + // + // The default value is APP. + StreamView *string `type:"string" enum:"StreamView"` + // The VPC configuration for the fleet. VpcConfig *VpcConfig `type:"structure"` } @@ -8301,6 +8443,12 @@ func (s *Fleet) SetState(v string) *Fleet { return s } +// SetStreamView sets the StreamView field's value. +func (s *Fleet) SetStreamView(v string) *Fleet { + s.StreamView = &v + return s +} + // SetVpcConfig sets the VpcConfig field's value. func (s *Fleet) SetVpcConfig(v *VpcConfig) *Fleet { s.VpcConfig = v @@ -8541,7 +8689,7 @@ type ImageBuilder struct { // role, the image builder calls the AWS Security Token Service (STS) AssumeRole // API operation and passes the ARN of the role to use. The operation creates // a new session with temporary credentials. AppStream 2.0 retrieves the temporary - // credentials and creates the AppStream_Machine_Role credential profile on + // credentials and creates the appstream_machine_role credential profile on // the instance. // // For more information, see Using an IAM Role to Grant Permissions to Applications @@ -8582,6 +8730,18 @@ type ImageBuilder struct { // // * stream.memory.8xlarge // + // * stream.memory.z1d.large + // + // * stream.memory.z1d.xlarge + // + // * stream.memory.z1d.2xlarge + // + // * stream.memory.z1d.3xlarge + // + // * stream.memory.z1d.6xlarge + // + // * stream.memory.z1d.12xlarge + // // * stream.graphics-design.large // // * stream.graphics-design.xlarge @@ -8592,6 +8752,18 @@ type ImageBuilder struct { // // * stream.graphics-desktop.2xlarge // + // * stream.graphics.g4dn.xlarge + // + // * stream.graphics.g4dn.2xlarge + // + // * stream.graphics.g4dn.4xlarge + // + // * stream.graphics.g4dn.8xlarge + // + // * stream.graphics.g4dn.12xlarge + // + // * stream.graphics.g4dn.16xlarge + // // * stream.graphics-pro.4xlarge // // * stream.graphics-pro.8xlarge @@ -8839,8 +9011,8 @@ func (s *ImageStateChangeReason) SetMessage(v string) *ImageStateChangeReason { // The image does not support storage connectors. type IncompatibleImageException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -8858,17 +9030,17 @@ func (s IncompatibleImageException) GoString() string { func newErrorIncompatibleImageException(v protocol.ResponseMetadata) error { return &IncompatibleImageException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncompatibleImageException) Code() string { +func (s *IncompatibleImageException) Code() string { return "IncompatibleImageException" } // Message returns the exception's message. -func (s IncompatibleImageException) Message() string { +func (s *IncompatibleImageException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8876,29 +9048,29 @@ func (s IncompatibleImageException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncompatibleImageException) OrigErr() error { +func (s *IncompatibleImageException) OrigErr() error { return nil } -func (s IncompatibleImageException) Error() string { +func (s *IncompatibleImageException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncompatibleImageException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncompatibleImageException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncompatibleImageException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncompatibleImageException) RequestID() string { + return s.RespMetadata.RequestID } // The resource cannot be created because your AWS account is suspended. For // assistance, contact AWS Support. type InvalidAccountStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -8916,17 +9088,17 @@ func (s InvalidAccountStatusException) GoString() string { func newErrorInvalidAccountStatusException(v protocol.ResponseMetadata) error { return &InvalidAccountStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAccountStatusException) Code() string { +func (s *InvalidAccountStatusException) Code() string { return "InvalidAccountStatusException" } // Message returns the exception's message. -func (s InvalidAccountStatusException) Message() string { +func (s *InvalidAccountStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8934,28 +9106,28 @@ func (s InvalidAccountStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAccountStatusException) OrigErr() error { +func (s *InvalidAccountStatusException) OrigErr() error { return nil } -func (s InvalidAccountStatusException) Error() string { +func (s *InvalidAccountStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAccountStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAccountStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAccountStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAccountStatusException) RequestID() string { + return s.RespMetadata.RequestID } // Indicates an incorrect combination of parameters, or a missing parameter. type InvalidParameterCombinationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -8973,17 +9145,17 @@ func (s InvalidParameterCombinationException) GoString() string { func newErrorInvalidParameterCombinationException(v protocol.ResponseMetadata) error { return &InvalidParameterCombinationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterCombinationException) Code() string { +func (s *InvalidParameterCombinationException) Code() string { return "InvalidParameterCombinationException" } // Message returns the exception's message. -func (s InvalidParameterCombinationException) Message() string { +func (s *InvalidParameterCombinationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8991,28 +9163,28 @@ func (s InvalidParameterCombinationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterCombinationException) OrigErr() error { +func (s *InvalidParameterCombinationException) OrigErr() error { return nil } -func (s InvalidParameterCombinationException) Error() string { +func (s *InvalidParameterCombinationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterCombinationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterCombinationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterCombinationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterCombinationException) RequestID() string { + return s.RespMetadata.RequestID } // The specified role is invalid. type InvalidRoleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9030,17 +9202,17 @@ func (s InvalidRoleException) GoString() string { func newErrorInvalidRoleException(v protocol.ResponseMetadata) error { return &InvalidRoleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRoleException) Code() string { +func (s *InvalidRoleException) Code() string { return "InvalidRoleException" } // Message returns the exception's message. -func (s InvalidRoleException) Message() string { +func (s *InvalidRoleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9048,22 +9220,22 @@ func (s InvalidRoleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRoleException) OrigErr() error { +func (s *InvalidRoleException) OrigErr() error { return nil } -func (s InvalidRoleException) Error() string { +func (s *InvalidRoleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRoleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRoleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRoleException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRoleException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the error that is returned when a usage report can't be generated. @@ -9103,8 +9275,8 @@ func (s *LastReportGenerationExecutionError) SetErrorMessage(v string) *LastRepo // The requested limit exceeds the permitted limit for an account. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9122,17 +9294,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9140,22 +9312,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAssociatedFleetsInput struct { @@ -9431,8 +9603,8 @@ func (s *NetworkAccessConfiguration) SetEniPrivateIpAddress(v string) *NetworkAc // The attempted operation is not permitted. type OperationNotPermittedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9450,17 +9622,76 @@ func (s OperationNotPermittedException) GoString() string { func newErrorOperationNotPermittedException(v protocol.ResponseMetadata) error { return &OperationNotPermittedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotPermittedException) Code() string { +func (s *OperationNotPermittedException) Code() string { return "OperationNotPermittedException" } // Message returns the exception's message. -func (s OperationNotPermittedException) Message() string { +func (s *OperationNotPermittedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OperationNotPermittedException) OrigErr() error { + return nil +} + +func (s *OperationNotPermittedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OperationNotPermittedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OperationNotPermittedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// AppStream 2.0 can’t process the request right now because the Describe +// calls from your AWS account are being throttled by Amazon EC2. Try again +// later. +type RequestLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message in the exception. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s RequestLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestLimitExceededException) GoString() string { + return s.String() +} + +func newErrorRequestLimitExceededException(v protocol.ResponseMetadata) error { + return &RequestLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RequestLimitExceededException) Code() string { + return "RequestLimitExceededException" +} + +// Message returns the exception's message. +func (s *RequestLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9468,28 +9699,28 @@ func (s OperationNotPermittedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotPermittedException) OrigErr() error { +func (s *RequestLimitExceededException) OrigErr() error { return nil } -func (s OperationNotPermittedException) Error() string { +func (s *RequestLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotPermittedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotPermittedException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9507,17 +9738,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9525,22 +9756,22 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a resource error. @@ -9587,8 +9818,8 @@ func (s *ResourceError) SetErrorTimestamp(v time.Time) *ResourceError { // The specified resource is in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9606,17 +9837,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9624,28 +9855,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource exists and is not in use, but isn't available. type ResourceNotAvailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9663,17 +9894,17 @@ func (s ResourceNotAvailableException) GoString() string { func newErrorResourceNotAvailableException(v protocol.ResponseMetadata) error { return &ResourceNotAvailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotAvailableException) Code() string { +func (s *ResourceNotAvailableException) Code() string { return "ResourceNotAvailableException" } // Message returns the exception's message. -func (s ResourceNotAvailableException) Message() string { +func (s *ResourceNotAvailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9681,28 +9912,28 @@ func (s ResourceNotAvailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotAvailableException) OrigErr() error { +func (s *ResourceNotAvailableException) OrigErr() error { return nil } -func (s ResourceNotAvailableException) Error() string { +func (s *ResourceNotAvailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotAvailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotAvailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotAvailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotAvailableException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message in the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9720,17 +9951,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9738,22 +9969,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the credentials for the service account used by the fleet or image @@ -10732,7 +10963,7 @@ type UpdateFleetInput struct { // assume a role, a fleet instance calls the AWS Security Token Service (STS) // AssumeRole API operation and passes the ARN of the role to use. The operation // creates a new session with temporary credentials. AppStream 2.0 retrieves - // the temporary credentials and creates the AppStream_Machine_Role credential + // the temporary credentials and creates the appstream_machine_role credential // profile on the instance. // // For more information, see Using an IAM Role to Grant Permissions to Applications @@ -10797,6 +11028,18 @@ type UpdateFleetInput struct { // // * stream.memory.8xlarge // + // * stream.memory.z1d.large + // + // * stream.memory.z1d.xlarge + // + // * stream.memory.z1d.2xlarge + // + // * stream.memory.z1d.3xlarge + // + // * stream.memory.z1d.6xlarge + // + // * stream.memory.z1d.12xlarge + // // * stream.graphics-design.large // // * stream.graphics-design.xlarge @@ -10807,6 +11050,18 @@ type UpdateFleetInput struct { // // * stream.graphics-desktop.2xlarge // + // * stream.graphics.g4dn.xlarge + // + // * stream.graphics.g4dn.2xlarge + // + // * stream.graphics.g4dn.4xlarge + // + // * stream.graphics.g4dn.8xlarge + // + // * stream.graphics.g4dn.12xlarge + // + // * stream.graphics.g4dn.16xlarge + // // * stream.graphics-pro.4xlarge // // * stream.graphics-pro.8xlarge @@ -10826,6 +11081,14 @@ type UpdateFleetInput struct { // A unique name for the fleet. Name *string `min:"1" type:"string"` + // The AppStream 2.0 view that is displayed to your users when they stream from + // the fleet. When APP is specified, only the windows of applications opened + // by users display. When DESKTOP is specified, the standard desktop that is + // provided by the operating system displays. + // + // The default value is APP. + StreamView *string `type:"string" enum:"StreamView"` + // The VPC configuration for the fleet. VpcConfig *VpcConfig `type:"structure"` } @@ -10954,6 +11217,12 @@ func (s *UpdateFleetInput) SetName(v string) *UpdateFleetInput { return s } +// SetStreamView sets the StreamView field's value. +func (s *UpdateFleetInput) SetStreamView(v string) *UpdateFleetInput { + s.StreamView = &v + return s +} + // SetVpcConfig sets the VpcConfig field's value. func (s *UpdateFleetInput) SetVpcConfig(v *VpcConfig) *UpdateFleetInput { s.VpcConfig = v @@ -11664,6 +11933,13 @@ const ( AccessEndpointTypeStreaming = "STREAMING" ) +// AccessEndpointType_Values returns all elements of the AccessEndpointType enum +func AccessEndpointType_Values() []string { + return []string{ + AccessEndpointTypeStreaming, + } +} + const ( // ActionClipboardCopyFromLocalDevice is a Action enum value ActionClipboardCopyFromLocalDevice = "CLIPBOARD_COPY_FROM_LOCAL_DEVICE" @@ -11681,6 +11957,17 @@ const ( ActionPrintingToLocalDevice = "PRINTING_TO_LOCAL_DEVICE" ) +// Action_Values returns all elements of the Action enum +func Action_Values() []string { + return []string{ + ActionClipboardCopyFromLocalDevice, + ActionClipboardCopyToLocalDevice, + ActionFileUpload, + ActionFileDownload, + ActionPrintingToLocalDevice, + } +} + const ( // AuthenticationTypeApi is a AuthenticationType enum value AuthenticationTypeApi = "API" @@ -11692,6 +11979,15 @@ const ( AuthenticationTypeUserpool = "USERPOOL" ) +// AuthenticationType_Values returns all elements of the AuthenticationType enum +func AuthenticationType_Values() []string { + return []string{ + AuthenticationTypeApi, + AuthenticationTypeSaml, + AuthenticationTypeUserpool, + } +} + // The fleet attribute. const ( // FleetAttributeVpcConfiguration is a FleetAttribute enum value @@ -11707,6 +12003,16 @@ const ( FleetAttributeIamRoleArn = "IAM_ROLE_ARN" ) +// FleetAttribute_Values returns all elements of the FleetAttribute enum +func FleetAttribute_Values() []string { + return []string{ + FleetAttributeVpcConfiguration, + FleetAttributeVpcConfigurationSecurityGroupIds, + FleetAttributeDomainJoinInfo, + FleetAttributeIamRoleArn, + } +} + const ( // FleetErrorCodeIamServiceRoleMissingEniDescribeAction is a FleetErrorCode enum value FleetErrorCodeIamServiceRoleMissingEniDescribeAction = "IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION" @@ -11793,6 +12099,40 @@ const ( FleetErrorCodeDomainJoinInternalServiceError = "DOMAIN_JOIN_INTERNAL_SERVICE_ERROR" ) +// FleetErrorCode_Values returns all elements of the FleetErrorCode enum +func FleetErrorCode_Values() []string { + return []string{ + FleetErrorCodeIamServiceRoleMissingEniDescribeAction, + FleetErrorCodeIamServiceRoleMissingEniCreateAction, + FleetErrorCodeIamServiceRoleMissingEniDeleteAction, + FleetErrorCodeNetworkInterfaceLimitExceeded, + FleetErrorCodeInternalServiceError, + FleetErrorCodeIamServiceRoleIsMissing, + FleetErrorCodeMachineRoleIsMissing, + FleetErrorCodeStsDisabledInRegion, + FleetErrorCodeSubnetHasInsufficientIpAddresses, + FleetErrorCodeIamServiceRoleMissingDescribeSubnetAction, + FleetErrorCodeSubnetNotFound, + FleetErrorCodeImageNotFound, + FleetErrorCodeInvalidSubnetConfiguration, + FleetErrorCodeSecurityGroupsNotFound, + FleetErrorCodeIgwNotAttached, + FleetErrorCodeIamServiceRoleMissingDescribeSecurityGroupsAction, + FleetErrorCodeDomainJoinErrorFileNotFound, + FleetErrorCodeDomainJoinErrorAccessDenied, + FleetErrorCodeDomainJoinErrorLogonFailure, + FleetErrorCodeDomainJoinErrorInvalidParameter, + FleetErrorCodeDomainJoinErrorMoreData, + FleetErrorCodeDomainJoinErrorNoSuchDomain, + FleetErrorCodeDomainJoinErrorNotSupported, + FleetErrorCodeDomainJoinNerrInvalidWorkgroupName, + FleetErrorCodeDomainJoinNerrWorkstationNotStarted, + FleetErrorCodeDomainJoinErrorDsMachineAccountQuotaExceeded, + FleetErrorCodeDomainJoinNerrPasswordExpired, + FleetErrorCodeDomainJoinInternalServiceError, + } +} + const ( // FleetStateStarting is a FleetState enum value FleetStateStarting = "STARTING" @@ -11807,6 +12147,16 @@ const ( FleetStateStopped = "STOPPED" ) +// FleetState_Values returns all elements of the FleetState enum +func FleetState_Values() []string { + return []string{ + FleetStateStarting, + FleetStateRunning, + FleetStateStopping, + FleetStateStopped, + } +} + const ( // FleetTypeAlwaysOn is a FleetType enum value FleetTypeAlwaysOn = "ALWAYS_ON" @@ -11815,6 +12165,14 @@ const ( FleetTypeOnDemand = "ON_DEMAND" ) +// FleetType_Values returns all elements of the FleetType enum +func FleetType_Values() []string { + return []string{ + FleetTypeAlwaysOn, + FleetTypeOnDemand, + } +} + const ( // ImageBuilderStatePending is a ImageBuilderState enum value ImageBuilderStatePending = "PENDING" @@ -11844,6 +12202,21 @@ const ( ImageBuilderStateFailed = "FAILED" ) +// ImageBuilderState_Values returns all elements of the ImageBuilderState enum +func ImageBuilderState_Values() []string { + return []string{ + ImageBuilderStatePending, + ImageBuilderStateUpdatingAgent, + ImageBuilderStateRunning, + ImageBuilderStateStopping, + ImageBuilderStateStopped, + ImageBuilderStateRebooting, + ImageBuilderStateSnapshotting, + ImageBuilderStateDeleting, + ImageBuilderStateFailed, + } +} + const ( // ImageBuilderStateChangeReasonCodeInternalError is a ImageBuilderStateChangeReasonCode enum value ImageBuilderStateChangeReasonCodeInternalError = "INTERNAL_ERROR" @@ -11852,6 +12225,14 @@ const ( ImageBuilderStateChangeReasonCodeImageUnavailable = "IMAGE_UNAVAILABLE" ) +// ImageBuilderStateChangeReasonCode_Values returns all elements of the ImageBuilderStateChangeReasonCode enum +func ImageBuilderStateChangeReasonCode_Values() []string { + return []string{ + ImageBuilderStateChangeReasonCodeInternalError, + ImageBuilderStateChangeReasonCodeImageUnavailable, + } +} + const ( // ImageStatePending is a ImageState enum value ImageStatePending = "PENDING" @@ -11869,6 +12250,17 @@ const ( ImageStateDeleting = "DELETING" ) +// ImageState_Values returns all elements of the ImageState enum +func ImageState_Values() []string { + return []string{ + ImageStatePending, + ImageStateAvailable, + ImageStateFailed, + ImageStateCopying, + ImageStateDeleting, + } +} + const ( // ImageStateChangeReasonCodeInternalError is a ImageStateChangeReasonCode enum value ImageStateChangeReasonCodeInternalError = "INTERNAL_ERROR" @@ -11880,6 +12272,15 @@ const ( ImageStateChangeReasonCodeImageCopyFailure = "IMAGE_COPY_FAILURE" ) +// ImageStateChangeReasonCode_Values returns all elements of the ImageStateChangeReasonCode enum +func ImageStateChangeReasonCode_Values() []string { + return []string{ + ImageStateChangeReasonCodeInternalError, + ImageStateChangeReasonCodeImageBuilderNotAvailable, + ImageStateChangeReasonCodeImageCopyFailure, + } +} + const ( // MessageActionSuppress is a MessageAction enum value MessageActionSuppress = "SUPPRESS" @@ -11888,6 +12289,14 @@ const ( MessageActionResend = "RESEND" ) +// MessageAction_Values returns all elements of the MessageAction enum +func MessageAction_Values() []string { + return []string{ + MessageActionSuppress, + MessageActionResend, + } +} + const ( // PermissionEnabled is a Permission enum value PermissionEnabled = "ENABLED" @@ -11896,6 +12305,14 @@ const ( PermissionDisabled = "DISABLED" ) +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionEnabled, + PermissionDisabled, + } +} + const ( // PlatformTypeWindows is a PlatformType enum value PlatformTypeWindows = "WINDOWS" @@ -11907,6 +12324,15 @@ const ( PlatformTypeWindowsServer2019 = "WINDOWS_SERVER_2019" ) +// PlatformType_Values returns all elements of the PlatformType enum +func PlatformType_Values() []string { + return []string{ + PlatformTypeWindows, + PlatformTypeWindowsServer2016, + PlatformTypeWindowsServer2019, + } +} + const ( // SessionConnectionStateConnected is a SessionConnectionState enum value SessionConnectionStateConnected = "CONNECTED" @@ -11915,6 +12341,14 @@ const ( SessionConnectionStateNotConnected = "NOT_CONNECTED" ) +// SessionConnectionState_Values returns all elements of the SessionConnectionState enum +func SessionConnectionState_Values() []string { + return []string{ + SessionConnectionStateConnected, + SessionConnectionStateNotConnected, + } +} + // Possible values for the state of a streaming session. const ( // SessionStateActive is a SessionState enum value @@ -11927,6 +12361,15 @@ const ( SessionStateExpired = "EXPIRED" ) +// SessionState_Values returns all elements of the SessionState enum +func SessionState_Values() []string { + return []string{ + SessionStateActive, + SessionStatePending, + SessionStateExpired, + } +} + const ( // StackAttributeStorageConnectors is a StackAttribute enum value StackAttributeStorageConnectors = "STORAGE_CONNECTORS" @@ -11962,6 +12405,23 @@ const ( StackAttributeAccessEndpoints = "ACCESS_ENDPOINTS" ) +// StackAttribute_Values returns all elements of the StackAttribute enum +func StackAttribute_Values() []string { + return []string{ + StackAttributeStorageConnectors, + StackAttributeStorageConnectorHomefolders, + StackAttributeStorageConnectorGoogleDrive, + StackAttributeStorageConnectorOneDrive, + StackAttributeRedirectUrl, + StackAttributeFeedbackUrl, + StackAttributeThemeName, + StackAttributeUserSettings, + StackAttributeEmbedHostDomains, + StackAttributeIamRoleArn, + StackAttributeAccessEndpoints, + } +} + const ( // StackErrorCodeStorageConnectorError is a StackErrorCode enum value StackErrorCodeStorageConnectorError = "STORAGE_CONNECTOR_ERROR" @@ -11970,6 +12430,14 @@ const ( StackErrorCodeInternalServiceError = "INTERNAL_SERVICE_ERROR" ) +// StackErrorCode_Values returns all elements of the StackErrorCode enum +func StackErrorCode_Values() []string { + return []string{ + StackErrorCodeStorageConnectorError, + StackErrorCodeInternalServiceError, + } +} + // The type of storage connector. const ( // StorageConnectorTypeHomefolders is a StorageConnectorType enum value @@ -11982,6 +12450,31 @@ const ( StorageConnectorTypeOneDrive = "ONE_DRIVE" ) +// StorageConnectorType_Values returns all elements of the StorageConnectorType enum +func StorageConnectorType_Values() []string { + return []string{ + StorageConnectorTypeHomefolders, + StorageConnectorTypeGoogleDrive, + StorageConnectorTypeOneDrive, + } +} + +const ( + // StreamViewApp is a StreamView enum value + StreamViewApp = "APP" + + // StreamViewDesktop is a StreamView enum value + StreamViewDesktop = "DESKTOP" +) + +// StreamView_Values returns all elements of the StreamView enum +func StreamView_Values() []string { + return []string{ + StreamViewApp, + StreamViewDesktop, + } +} + const ( // UsageReportExecutionErrorCodeResourceNotFound is a UsageReportExecutionErrorCode enum value UsageReportExecutionErrorCodeResourceNotFound = "RESOURCE_NOT_FOUND" @@ -11993,11 +12486,27 @@ const ( UsageReportExecutionErrorCodeInternalServiceError = "INTERNAL_SERVICE_ERROR" ) +// UsageReportExecutionErrorCode_Values returns all elements of the UsageReportExecutionErrorCode enum +func UsageReportExecutionErrorCode_Values() []string { + return []string{ + UsageReportExecutionErrorCodeResourceNotFound, + UsageReportExecutionErrorCodeAccessDenied, + UsageReportExecutionErrorCodeInternalServiceError, + } +} + const ( // UsageReportScheduleDaily is a UsageReportSchedule enum value UsageReportScheduleDaily = "DAILY" ) +// UsageReportSchedule_Values returns all elements of the UsageReportSchedule enum +func UsageReportSchedule_Values() []string { + return []string{ + UsageReportScheduleDaily, + } +} + const ( // UserStackAssociationErrorCodeStackNotFound is a UserStackAssociationErrorCode enum value UserStackAssociationErrorCodeStackNotFound = "STACK_NOT_FOUND" @@ -12005,10 +12514,23 @@ const ( // UserStackAssociationErrorCodeUserNameNotFound is a UserStackAssociationErrorCode enum value UserStackAssociationErrorCodeUserNameNotFound = "USER_NAME_NOT_FOUND" + // UserStackAssociationErrorCodeDirectoryNotFound is a UserStackAssociationErrorCode enum value + UserStackAssociationErrorCodeDirectoryNotFound = "DIRECTORY_NOT_FOUND" + // UserStackAssociationErrorCodeInternalError is a UserStackAssociationErrorCode enum value UserStackAssociationErrorCodeInternalError = "INTERNAL_ERROR" ) +// UserStackAssociationErrorCode_Values returns all elements of the UserStackAssociationErrorCode enum +func UserStackAssociationErrorCode_Values() []string { + return []string{ + UserStackAssociationErrorCodeStackNotFound, + UserStackAssociationErrorCodeUserNameNotFound, + UserStackAssociationErrorCodeDirectoryNotFound, + UserStackAssociationErrorCodeInternalError, + } +} + const ( // VisibilityTypePublic is a VisibilityType enum value VisibilityTypePublic = "PUBLIC" @@ -12019,3 +12541,12 @@ const ( // VisibilityTypeShared is a VisibilityType enum value VisibilityTypeShared = "SHARED" ) + +// VisibilityType_Values returns all elements of the VisibilityType enum +func VisibilityType_Values() []string { + return []string{ + VisibilityTypePublic, + VisibilityTypePrivate, + VisibilityTypeShared, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go index 285b79bde..f1570a479 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/errors.go @@ -51,6 +51,14 @@ const ( // The attempted operation is not permitted. ErrCodeOperationNotPermittedException = "OperationNotPermittedException" + // ErrCodeRequestLimitExceededException for service response error code + // "RequestLimitExceededException". + // + // AppStream 2.0 can’t process the request right now because the Describe + // calls from your AWS account are being throttled by Amazon EC2. Try again + // later. + ErrCodeRequestLimitExceededException = "RequestLimitExceededException" + // ErrCodeResourceAlreadyExistsException for service response error code // "ResourceAlreadyExistsException". // @@ -84,6 +92,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidRoleException": newErrorInvalidRoleException, "LimitExceededException": newErrorLimitExceededException, "OperationNotPermittedException": newErrorOperationNotPermittedException, + "RequestLimitExceededException": newErrorRequestLimitExceededException, "ResourceAlreadyExistsException": newErrorResourceAlreadyExistsException, "ResourceInUseException": newErrorResourceInUseException, "ResourceNotAvailableException": newErrorResourceNotAvailableException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go index 836ee2a6f..c9f50f968 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go index 38075bece..a1557319c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appsync/api.go @@ -2205,7 +2205,7 @@ func (c *AppSync) ListApiKeysRequest(input *ListApiKeysInput) (req *request.Requ // // Lists the API keys for a given API. // -// API keys are deleted automatically sometime after they expire. However, they +// API keys are deleted automatically 60 days after they expire. However, they // may still be included in the response until they have actually been deleted. // You can safely call DeleteApiKey to manually delete a key before it's automatically // deleted. @@ -3318,7 +3318,7 @@ func (c *AppSync) UpdateApiKeyRequest(input *UpdateApiKeyInput) (req *request.Re // UpdateApiKey API operation for AWS AppSync. // -// Updates an API key. +// Updates an API key. The key can be updated while it is not deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3838,8 +3838,8 @@ func (c *AppSync) UpdateTypeWithContext(ctx aws.Context, input *UpdateTypeInput, // You do not have access to perform this operation on this resource. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3856,17 +3856,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3874,22 +3874,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // Describes an additional authentication provider. @@ -3990,7 +3990,29 @@ type ApiCache struct { // Valid values are between 1 and 3600 seconds. Ttl *int64 `locationName:"ttl" type:"long"` - // The cache instance type. + // The cache instance type. Valid values are + // + // * SMALL + // + // * MEDIUM + // + // * LARGE + // + // * XLARGE + // + // * LARGE_2X + // + // * LARGE_4X + // + // * LARGE_8X (not available in all regions) + // + // * LARGE_12X + // + // Historically, instance types were identified by an EC2-style value. As of + // July 2020, this is deprecated, and the generic identifiers above should be + // used. + // + // The following legacy instance types are avaible, but their use is discouraged: // // * T2_SMALL: A t2.small instance type. // @@ -4080,21 +4102,31 @@ func (s *ApiCache) SetType(v string) *ApiCache { // da2: This version was introduced in February 2018 when AppSync added support // to extend key expiration. // -// * ListApiKeys returns the expiration time in seconds. +// * ListApiKeys returns the expiration time and deletion time in seconds. // -// * CreateApiKey returns the expiration time in seconds and accepts a user-provided -// expiration time in seconds. +// * CreateApiKey returns the expiration time and deletion time in seconds +// and accepts a user-provided expiration time in seconds. // -// * UpdateApiKey returns the expiration time in seconds and accepts a user-provided -// expiration time in seconds. Key expiration can only be updated while the -// key has not expired. +// * UpdateApiKey returns the expiration time and and deletion time in seconds +// and accepts a user-provided expiration time in seconds. Expired API keys +// are kept for 60 days after the expiration time. Key expiration time can +// be updated while the key is not deleted. // // * DeleteApiKey deletes the item from the table. // -// * Expiration is stored in Amazon DynamoDB as seconds. +// * Expiration is stored in Amazon DynamoDB as seconds. After the expiration +// time, using the key to authenticate will fail. But the key can be reinstated +// before deletion. +// +// * Deletion is stored in Amazon DynamoDB as seconds. The key will be deleted +// after deletion time. type ApiKey struct { _ struct{} `type:"structure"` + // The time after which the API key is deleted. The date is represented as seconds + // since the epoch, rounded down to the nearest hour. + Deletes *int64 `locationName:"deletes" type:"long"` + // A description of the purpose of the API key. Description *string `locationName:"description" type:"string"` @@ -4116,6 +4148,12 @@ func (s ApiKey) GoString() string { return s.String() } +// SetDeletes sets the Deletes field's value. +func (s *ApiKey) SetDeletes(v int64) *ApiKey { + s.Deletes = &v + return s +} + // SetDescription sets the Description field's value. func (s *ApiKey) SetDescription(v string) *ApiKey { s.Description = &v @@ -4136,8 +4174,8 @@ func (s *ApiKey) SetId(v string) *ApiKey { // The API key exceeded a limit. Try your request again. type ApiKeyLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4154,17 +4192,17 @@ func (s ApiKeyLimitExceededException) GoString() string { func newErrorApiKeyLimitExceededException(v protocol.ResponseMetadata) error { return &ApiKeyLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApiKeyLimitExceededException) Code() string { +func (s *ApiKeyLimitExceededException) Code() string { return "ApiKeyLimitExceededException" } // Message returns the exception's message. -func (s ApiKeyLimitExceededException) Message() string { +func (s *ApiKeyLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4172,29 +4210,29 @@ func (s ApiKeyLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApiKeyLimitExceededException) OrigErr() error { +func (s *ApiKeyLimitExceededException) OrigErr() error { return nil } -func (s ApiKeyLimitExceededException) Error() string { +func (s *ApiKeyLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApiKeyLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApiKeyLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApiKeyLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApiKeyLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The API key expiration must be set to a value between 1 and 365 days from // creation (for CreateApiKey) or from update (for UpdateApiKey). type ApiKeyValidityOutOfBoundsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4211,17 +4249,17 @@ func (s ApiKeyValidityOutOfBoundsException) GoString() string { func newErrorApiKeyValidityOutOfBoundsException(v protocol.ResponseMetadata) error { return &ApiKeyValidityOutOfBoundsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApiKeyValidityOutOfBoundsException) Code() string { +func (s *ApiKeyValidityOutOfBoundsException) Code() string { return "ApiKeyValidityOutOfBoundsException" } // Message returns the exception's message. -func (s ApiKeyValidityOutOfBoundsException) Message() string { +func (s *ApiKeyValidityOutOfBoundsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4229,28 +4267,28 @@ func (s ApiKeyValidityOutOfBoundsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApiKeyValidityOutOfBoundsException) OrigErr() error { +func (s *ApiKeyValidityOutOfBoundsException) OrigErr() error { return nil } -func (s ApiKeyValidityOutOfBoundsException) Error() string { +func (s *ApiKeyValidityOutOfBoundsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApiKeyValidityOutOfBoundsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApiKeyValidityOutOfBoundsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApiKeyValidityOutOfBoundsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApiKeyValidityOutOfBoundsException) RequestID() string { + return s.RespMetadata.RequestID } // The GraphQL API exceeded a limit. Try your request again. type ApiLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4267,17 +4305,17 @@ func (s ApiLimitExceededException) GoString() string { func newErrorApiLimitExceededException(v protocol.ResponseMetadata) error { return &ApiLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApiLimitExceededException) Code() string { +func (s *ApiLimitExceededException) Code() string { return "ApiLimitExceededException" } // Message returns the exception's message. -func (s ApiLimitExceededException) Message() string { +func (s *ApiLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4285,22 +4323,22 @@ func (s ApiLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApiLimitExceededException) OrigErr() error { +func (s *ApiLimitExceededException) OrigErr() error { return nil } -func (s ApiLimitExceededException) Error() string { +func (s *ApiLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApiLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApiLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApiLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApiLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The authorization config in case the HTTP endpoint requires authorization. @@ -4389,8 +4427,8 @@ func (s *AwsIamConfig) SetSigningServiceName(v string) *AwsIamConfig { // The request is not well formed. For example, a value is invalid or a required // field is missing. Check the field values, and then try again. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4407,17 +4445,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4425,22 +4463,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The caching configuration for a resolver that has caching enabled. @@ -4449,8 +4487,8 @@ type CachingConfig struct { // The caching keys for a resolver that has caching enabled. // - // Valid values are entries from the $context.identity and $context.arguments - // maps. + // Valid values are entries from the $context.arguments, $context.source, and + // $context.identity maps. CachingKeys []*string `locationName:"cachingKeys" type:"list"` // The TTL in seconds for a resolver that has caching enabled. @@ -4547,8 +4585,8 @@ func (s *CognitoUserPoolConfig) SetUserPoolId(v string) *CognitoUserPoolConfig { // Another modification is in progress at this time and it must complete before // you can make your change. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4565,17 +4603,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4583,22 +4621,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a CreateApiCache operation. @@ -4633,7 +4671,29 @@ type CreateApiCacheInput struct { // Ttl is a required field Ttl *int64 `locationName:"ttl" type:"long" required:"true"` - // The cache instance type. + // The cache instance type. Valid values are + // + // * SMALL + // + // * MEDIUM + // + // * LARGE + // + // * XLARGE + // + // * LARGE_2X + // + // * LARGE_4X + // + // * LARGE_8X (not available in all regions) + // + // * LARGE_12X + // + // Historically, instance types were identified by an EC2-style value. As of + // July 2020, this is deprecated, and the generic identifiers above should be + // used. + // + // The following legacy instance types are avaible, but their use is discouraged: // // * T2_SMALL: A t2.small instance type. // @@ -5041,9 +5101,7 @@ type CreateFunctionInput struct { // The Function request mapping template. Functions support only the 2018-05-29 // version of the request mapping template. - // - // RequestMappingTemplate is a required field - RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string" required:"true"` + RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string"` // The Function response mapping template. ResponseMappingTemplate *string `locationName:"responseMappingTemplate" min:"1" type:"string"` @@ -5083,9 +5141,6 @@ func (s *CreateFunctionInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.RequestMappingTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("RequestMappingTemplate")) - } if s.RequestMappingTemplate != nil && len(*s.RequestMappingTemplate) < 1 { invalidParams.Add(request.NewErrParamMinLen("RequestMappingTemplate", 1)) } @@ -5360,8 +5415,10 @@ type CreateResolverInput struct { // into a format that a data source can understand. Mapping templates are written // in Apache Velocity Template Language (VTL). // - // RequestMappingTemplate is a required field - RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string" required:"true"` + // VTL request mapping templates are optional when using a Lambda data source. + // For all other data sources, VTL request and response mapping templates are + // required. + RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string"` // The mapping template to be used for responses from the data source. ResponseMappingTemplate *string `locationName:"responseMappingTemplate" min:"1" type:"string"` @@ -5403,9 +5460,6 @@ func (s *CreateResolverInput) Validate() error { if s.FieldName != nil && len(*s.FieldName) < 1 { invalidParams.Add(request.NewErrParamMinLen("FieldName", 1)) } - if s.RequestMappingTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("RequestMappingTemplate")) - } if s.RequestMappingTemplate != nil && len(*s.RequestMappingTemplate) < 1 { invalidParams.Add(request.NewErrParamMinLen("RequestMappingTemplate", 1)) } @@ -7186,8 +7240,8 @@ func (s *GetTypeOutput) SetType(v *Type) *GetTypeOutput { // The GraphQL schema is not valid. type GraphQLSchemaException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7204,17 +7258,17 @@ func (s GraphQLSchemaException) GoString() string { func newErrorGraphQLSchemaException(v protocol.ResponseMetadata) error { return &GraphQLSchemaException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GraphQLSchemaException) Code() string { +func (s *GraphQLSchemaException) Code() string { return "GraphQLSchemaException" } // Message returns the exception's message. -func (s GraphQLSchemaException) Message() string { +func (s *GraphQLSchemaException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7222,22 +7276,22 @@ func (s GraphQLSchemaException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GraphQLSchemaException) OrigErr() error { +func (s *GraphQLSchemaException) OrigErr() error { return nil } -func (s GraphQLSchemaException) Error() string { +func (s *GraphQLSchemaException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GraphQLSchemaException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GraphQLSchemaException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GraphQLSchemaException) RequestID() string { - return s.respMetadata.RequestID +func (s *GraphQLSchemaException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a GraphQL API. @@ -7274,6 +7328,9 @@ type GraphqlApi struct { // The Amazon Cognito user pool configuration. UserPoolConfig *UserPoolConfig `locationName:"userPoolConfig" type:"structure"` + // The ARN of the AWS WAF ACL associated with this GraphqlApi if one exists. + WafWebAclArn *string `locationName:"wafWebAclArn" type:"string"` + // A flag representing whether X-Ray tracing is enabled for this GraphqlApi. XrayEnabled *bool `locationName:"xrayEnabled" type:"boolean"` } @@ -7348,6 +7405,12 @@ func (s *GraphqlApi) SetUserPoolConfig(v *UserPoolConfig) *GraphqlApi { return s } +// SetWafWebAclArn sets the WafWebAclArn field's value. +func (s *GraphqlApi) SetWafWebAclArn(v string) *GraphqlApi { + s.WafWebAclArn = &v + return s +} + // SetXrayEnabled sets the XrayEnabled field's value. func (s *GraphqlApi) SetXrayEnabled(v bool) *GraphqlApi { s.XrayEnabled = &v @@ -7407,8 +7470,8 @@ func (s *HttpDataSourceConfig) SetEndpoint(v string) *HttpDataSourceConfig { // An internal AWS AppSync error occurred. Try your request again. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7425,17 +7488,17 @@ func (s InternalFailureException) GoString() string { func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7443,22 +7506,22 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } // The LambdaConflictHandlerConfig object when configuring LAMBDA as the Conflict @@ -7527,8 +7590,8 @@ func (s *LambdaDataSourceConfig) SetLambdaFunctionArn(v string) *LambdaDataSourc // The request exceeded a limit. Try your request again. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7545,17 +7608,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7563,22 +7626,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListApiKeysInput struct { @@ -8424,8 +8487,8 @@ func (s *LogConfig) SetFieldLogLevel(v string) *LogConfig { // The resource specified in the request was not found. Check the resource, // and then try again. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8442,17 +8505,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8460,22 +8523,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Describes an OpenID Connect configuration. @@ -8585,7 +8648,7 @@ type RdsHttpEndpointConfig struct { // Logical database name. DatabaseName *string `locationName:"databaseName" type:"string"` - // Amazon RDS cluster identifier. + // Amazon RDS cluster ARN. DbClusterIdentifier *string `locationName:"dbClusterIdentifier" type:"string"` // Logical schema name. @@ -9053,8 +9116,8 @@ func (s *Type) SetName(v string) *Type { // You are not authorized to perform this operation. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9071,17 +9134,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9089,22 +9152,22 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -9204,7 +9267,29 @@ type UpdateApiCacheInput struct { // Ttl is a required field Ttl *int64 `locationName:"ttl" type:"long" required:"true"` - // The cache instance type. + // The cache instance type. Valid values are + // + // * SMALL + // + // * MEDIUM + // + // * LARGE + // + // * XLARGE + // + // * LARGE_2X + // + // * LARGE_4X + // + // * LARGE_8X (not available in all regions) + // + // * LARGE_12X + // + // Historically, instance types were identified by an EC2-style value. As of + // July 2020, this is deprecated, and the generic identifiers above should be + // used. + // + // The following legacy instance types are avaible, but their use is discouraged: // // * T2_SMALL: A t2.small instance type. // @@ -9619,9 +9704,7 @@ type UpdateFunctionInput struct { // The Function request mapping template. Functions support only the 2018-05-29 // version of the request mapping template. - // - // RequestMappingTemplate is a required field - RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string" required:"true"` + RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string"` // The Function request mapping template. ResponseMappingTemplate *string `locationName:"responseMappingTemplate" min:"1" type:"string"` @@ -9667,9 +9750,6 @@ func (s *UpdateFunctionInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.RequestMappingTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("RequestMappingTemplate")) - } if s.RequestMappingTemplate != nil && len(*s.RequestMappingTemplate) < 1 { invalidParams.Add(request.NewErrParamMinLen("RequestMappingTemplate", 1)) } @@ -9946,8 +10026,14 @@ type UpdateResolverInput struct { // The new request mapping template. // - // RequestMappingTemplate is a required field - RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string" required:"true"` + // A resolver uses a request mapping template to convert a GraphQL expression + // into a format that a data source can understand. Mapping templates are written + // in Apache Velocity Template Language (VTL). + // + // VTL request mapping templates are optional when using a Lambda data source. + // For all other data sources, VTL request and response mapping templates are + // required. + RequestMappingTemplate *string `locationName:"requestMappingTemplate" min:"1" type:"string"` // The new response mapping template. ResponseMappingTemplate *string `locationName:"responseMappingTemplate" min:"1" type:"string"` @@ -9989,9 +10075,6 @@ func (s *UpdateResolverInput) Validate() error { if s.FieldName != nil && len(*s.FieldName) < 1 { invalidParams.Add(request.NewErrParamMinLen("FieldName", 1)) } - if s.RequestMappingTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("RequestMappingTemplate")) - } if s.RequestMappingTemplate != nil && len(*s.RequestMappingTemplate) < 1 { invalidParams.Add(request.NewErrParamMinLen("RequestMappingTemplate", 1)) } @@ -10294,6 +10377,17 @@ const ( ApiCacheStatusFailed = "FAILED" ) +// ApiCacheStatus_Values returns all elements of the ApiCacheStatus enum +func ApiCacheStatus_Values() []string { + return []string{ + ApiCacheStatusAvailable, + ApiCacheStatusCreating, + ApiCacheStatusDeleting, + ApiCacheStatusModifying, + ApiCacheStatusFailed, + } +} + const ( // ApiCacheTypeT2Small is a ApiCacheType enum value ApiCacheTypeT2Small = "T2_SMALL" @@ -10315,8 +10409,53 @@ const ( // ApiCacheTypeR48xlarge is a ApiCacheType enum value ApiCacheTypeR48xlarge = "R4_8XLARGE" + + // ApiCacheTypeSmall is a ApiCacheType enum value + ApiCacheTypeSmall = "SMALL" + + // ApiCacheTypeMedium is a ApiCacheType enum value + ApiCacheTypeMedium = "MEDIUM" + + // ApiCacheTypeLarge is a ApiCacheType enum value + ApiCacheTypeLarge = "LARGE" + + // ApiCacheTypeXlarge is a ApiCacheType enum value + ApiCacheTypeXlarge = "XLARGE" + + // ApiCacheTypeLarge2x is a ApiCacheType enum value + ApiCacheTypeLarge2x = "LARGE_2X" + + // ApiCacheTypeLarge4x is a ApiCacheType enum value + ApiCacheTypeLarge4x = "LARGE_4X" + + // ApiCacheTypeLarge8x is a ApiCacheType enum value + ApiCacheTypeLarge8x = "LARGE_8X" + + // ApiCacheTypeLarge12x is a ApiCacheType enum value + ApiCacheTypeLarge12x = "LARGE_12X" ) +// ApiCacheType_Values returns all elements of the ApiCacheType enum +func ApiCacheType_Values() []string { + return []string{ + ApiCacheTypeT2Small, + ApiCacheTypeT2Medium, + ApiCacheTypeR4Large, + ApiCacheTypeR4Xlarge, + ApiCacheTypeR42xlarge, + ApiCacheTypeR44xlarge, + ApiCacheTypeR48xlarge, + ApiCacheTypeSmall, + ApiCacheTypeMedium, + ApiCacheTypeLarge, + ApiCacheTypeXlarge, + ApiCacheTypeLarge2x, + ApiCacheTypeLarge4x, + ApiCacheTypeLarge8x, + ApiCacheTypeLarge12x, + } +} + const ( // ApiCachingBehaviorFullRequestCaching is a ApiCachingBehavior enum value ApiCachingBehaviorFullRequestCaching = "FULL_REQUEST_CACHING" @@ -10325,6 +10464,14 @@ const ( ApiCachingBehaviorPerResolverCaching = "PER_RESOLVER_CACHING" ) +// ApiCachingBehavior_Values returns all elements of the ApiCachingBehavior enum +func ApiCachingBehavior_Values() []string { + return []string{ + ApiCachingBehaviorFullRequestCaching, + ApiCachingBehaviorPerResolverCaching, + } +} + const ( // AuthenticationTypeApiKey is a AuthenticationType enum value AuthenticationTypeApiKey = "API_KEY" @@ -10339,11 +10486,28 @@ const ( AuthenticationTypeOpenidConnect = "OPENID_CONNECT" ) +// AuthenticationType_Values returns all elements of the AuthenticationType enum +func AuthenticationType_Values() []string { + return []string{ + AuthenticationTypeApiKey, + AuthenticationTypeAwsIam, + AuthenticationTypeAmazonCognitoUserPools, + AuthenticationTypeOpenidConnect, + } +} + const ( // AuthorizationTypeAwsIam is a AuthorizationType enum value AuthorizationTypeAwsIam = "AWS_IAM" ) +// AuthorizationType_Values returns all elements of the AuthorizationType enum +func AuthorizationType_Values() []string { + return []string{ + AuthorizationTypeAwsIam, + } +} + const ( // ConflictDetectionTypeVersion is a ConflictDetectionType enum value ConflictDetectionTypeVersion = "VERSION" @@ -10352,6 +10516,14 @@ const ( ConflictDetectionTypeNone = "NONE" ) +// ConflictDetectionType_Values returns all elements of the ConflictDetectionType enum +func ConflictDetectionType_Values() []string { + return []string{ + ConflictDetectionTypeVersion, + ConflictDetectionTypeNone, + } +} + const ( // ConflictHandlerTypeOptimisticConcurrency is a ConflictHandlerType enum value ConflictHandlerTypeOptimisticConcurrency = "OPTIMISTIC_CONCURRENCY" @@ -10366,6 +10538,16 @@ const ( ConflictHandlerTypeNone = "NONE" ) +// ConflictHandlerType_Values returns all elements of the ConflictHandlerType enum +func ConflictHandlerType_Values() []string { + return []string{ + ConflictHandlerTypeOptimisticConcurrency, + ConflictHandlerTypeLambda, + ConflictHandlerTypeAutomerge, + ConflictHandlerTypeNone, + } +} + const ( // DataSourceTypeAwsLambda is a DataSourceType enum value DataSourceTypeAwsLambda = "AWS_LAMBDA" @@ -10386,6 +10568,18 @@ const ( DataSourceTypeRelationalDatabase = "RELATIONAL_DATABASE" ) +// DataSourceType_Values returns all elements of the DataSourceType enum +func DataSourceType_Values() []string { + return []string{ + DataSourceTypeAwsLambda, + DataSourceTypeAmazonDynamodb, + DataSourceTypeAmazonElasticsearch, + DataSourceTypeNone, + DataSourceTypeHttp, + DataSourceTypeRelationalDatabase, + } +} + const ( // DefaultActionAllow is a DefaultAction enum value DefaultActionAllow = "ALLOW" @@ -10394,6 +10588,14 @@ const ( DefaultActionDeny = "DENY" ) +// DefaultAction_Values returns all elements of the DefaultAction enum +func DefaultAction_Values() []string { + return []string{ + DefaultActionAllow, + DefaultActionDeny, + } +} + const ( // FieldLogLevelNone is a FieldLogLevel enum value FieldLogLevelNone = "NONE" @@ -10405,6 +10607,15 @@ const ( FieldLogLevelAll = "ALL" ) +// FieldLogLevel_Values returns all elements of the FieldLogLevel enum +func FieldLogLevel_Values() []string { + return []string{ + FieldLogLevelNone, + FieldLogLevelError, + FieldLogLevelAll, + } +} + const ( // OutputTypeSdl is a OutputType enum value OutputTypeSdl = "SDL" @@ -10413,11 +10624,26 @@ const ( OutputTypeJson = "JSON" ) +// OutputType_Values returns all elements of the OutputType enum +func OutputType_Values() []string { + return []string{ + OutputTypeSdl, + OutputTypeJson, + } +} + const ( // RelationalDatabaseSourceTypeRdsHttpEndpoint is a RelationalDatabaseSourceType enum value RelationalDatabaseSourceTypeRdsHttpEndpoint = "RDS_HTTP_ENDPOINT" ) +// RelationalDatabaseSourceType_Values returns all elements of the RelationalDatabaseSourceType enum +func RelationalDatabaseSourceType_Values() []string { + return []string{ + RelationalDatabaseSourceTypeRdsHttpEndpoint, + } +} + const ( // ResolverKindUnit is a ResolverKind enum value ResolverKindUnit = "UNIT" @@ -10426,6 +10652,14 @@ const ( ResolverKindPipeline = "PIPELINE" ) +// ResolverKind_Values returns all elements of the ResolverKind enum +func ResolverKind_Values() []string { + return []string{ + ResolverKindUnit, + ResolverKindPipeline, + } +} + const ( // SchemaStatusProcessing is a SchemaStatus enum value SchemaStatusProcessing = "PROCESSING" @@ -10446,6 +10680,18 @@ const ( SchemaStatusNotApplicable = "NOT_APPLICABLE" ) +// SchemaStatus_Values returns all elements of the SchemaStatus enum +func SchemaStatus_Values() []string { + return []string{ + SchemaStatusProcessing, + SchemaStatusActive, + SchemaStatusDeleting, + SchemaStatusFailed, + SchemaStatusSuccess, + SchemaStatusNotApplicable, + } +} + const ( // TypeDefinitionFormatSdl is a TypeDefinitionFormat enum value TypeDefinitionFormatSdl = "SDL" @@ -10453,3 +10699,11 @@ const ( // TypeDefinitionFormatJson is a TypeDefinitionFormat enum value TypeDefinitionFormatJson = "JSON" ) + +// TypeDefinitionFormat_Values returns all elements of the TypeDefinitionFormat enum +func TypeDefinitionFormat_Values() []string { + return []string{ + TypeDefinitionFormatSdl, + TypeDefinitionFormatJson, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go index 545b5dea7..da4d67cb1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go index e1931f6e1..7e5303a78 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/api.go @@ -194,6 +194,92 @@ func (c *Athena) BatchGetQueryExecutionWithContext(ctx aws.Context, input *Batch return out, req.Send() } +const opCreateDataCatalog = "CreateDataCatalog" + +// CreateDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the CreateDataCatalog operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDataCatalog for more information on using the CreateDataCatalog +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDataCatalogRequest method. +// req, resp := client.CreateDataCatalogRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog +func (c *Athena) CreateDataCatalogRequest(input *CreateDataCatalogInput) (req *request.Request, output *CreateDataCatalogOutput) { + op := &request.Operation{ + Name: opCreateDataCatalog, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDataCatalogInput{} + } + + output = &CreateDataCatalogOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateDataCatalog API operation for Amazon Athena. +// +// Creates (registers) a data catalog with the specified name and properties. +// Catalogs created are visible to all users of the same AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation CreateDataCatalog for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/CreateDataCatalog +func (c *Athena) CreateDataCatalog(input *CreateDataCatalogInput) (*CreateDataCatalogOutput, error) { + req, out := c.CreateDataCatalogRequest(input) + return out, req.Send() +} + +// CreateDataCatalogWithContext is the same as CreateDataCatalog with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDataCatalog for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) CreateDataCatalogWithContext(ctx aws.Context, input *CreateDataCatalogInput, opts ...request.Option) (*CreateDataCatalogOutput, error) { + req, out := c.CreateDataCatalogRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateNamedQuery = "CreateNamedQuery" // CreateNamedQueryRequest generates a "aws/request.Request" representing the @@ -368,6 +454,91 @@ func (c *Athena) CreateWorkGroupWithContext(ctx aws.Context, input *CreateWorkGr return out, req.Send() } +const opDeleteDataCatalog = "DeleteDataCatalog" + +// DeleteDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataCatalog operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDataCatalog for more information on using the DeleteDataCatalog +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDataCatalogRequest method. +// req, resp := client.DeleteDataCatalogRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog +func (c *Athena) DeleteDataCatalogRequest(input *DeleteDataCatalogInput) (req *request.Request, output *DeleteDataCatalogOutput) { + op := &request.Operation{ + Name: opDeleteDataCatalog, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDataCatalogInput{} + } + + output = &DeleteDataCatalogOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDataCatalog API operation for Amazon Athena. +// +// Deletes a data catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation DeleteDataCatalog for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/DeleteDataCatalog +func (c *Athena) DeleteDataCatalog(input *DeleteDataCatalogInput) (*DeleteDataCatalogOutput, error) { + req, out := c.DeleteDataCatalogRequest(input) + return out, req.Send() +} + +// DeleteDataCatalogWithContext is the same as DeleteDataCatalog with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDataCatalog for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) DeleteDataCatalogWithContext(ctx aws.Context, input *DeleteDataCatalogInput, opts ...request.Option) (*DeleteDataCatalogOutput, error) { + req, out := c.DeleteDataCatalogRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteNamedQuery = "DeleteNamedQuery" // DeleteNamedQueryRequest generates a "aws/request.Request" representing the @@ -544,6 +715,181 @@ func (c *Athena) DeleteWorkGroupWithContext(ctx aws.Context, input *DeleteWorkGr return out, req.Send() } +const opGetDataCatalog = "GetDataCatalog" + +// GetDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the GetDataCatalog operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDataCatalog for more information on using the GetDataCatalog +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDataCatalogRequest method. +// req, resp := client.GetDataCatalogRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog +func (c *Athena) GetDataCatalogRequest(input *GetDataCatalogInput) (req *request.Request, output *GetDataCatalogOutput) { + op := &request.Operation{ + Name: opGetDataCatalog, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDataCatalogInput{} + } + + output = &GetDataCatalogOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDataCatalog API operation for Amazon Athena. +// +// Returns the specified data catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation GetDataCatalog for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDataCatalog +func (c *Athena) GetDataCatalog(input *GetDataCatalogInput) (*GetDataCatalogOutput, error) { + req, out := c.GetDataCatalogRequest(input) + return out, req.Send() +} + +// GetDataCatalogWithContext is the same as GetDataCatalog with the addition of +// the ability to pass a context and additional request options. +// +// See GetDataCatalog for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) GetDataCatalogWithContext(ctx aws.Context, input *GetDataCatalogInput, opts ...request.Option) (*GetDataCatalogOutput, error) { + req, out := c.GetDataCatalogRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetDatabase = "GetDatabase" + +// GetDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the GetDatabase operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDatabase for more information on using the GetDatabase +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDatabaseRequest method. +// req, resp := client.GetDatabaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase +func (c *Athena) GetDatabaseRequest(input *GetDatabaseInput) (req *request.Request, output *GetDatabaseOutput) { + op := &request.Operation{ + Name: opGetDatabase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDatabaseInput{} + } + + output = &GetDatabaseOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDatabase API operation for Amazon Athena. +// +// Returns a database object for the specfied database and data catalog. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation GetDatabase for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// * MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetDatabase +func (c *Athena) GetDatabase(input *GetDatabaseInput) (*GetDatabaseOutput, error) { + req, out := c.GetDatabaseRequest(input) + return out, req.Send() +} + +// GetDatabaseWithContext is the same as GetDatabase with the addition of +// the ability to pass a context and additional request options. +// +// See GetDatabase for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) GetDatabaseWithContext(ctx aws.Context, input *GetDatabaseInput, opts ...request.Option) (*GetDatabaseOutput, error) { + req, out := c.GetDatabaseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetNamedQuery = "GetNamedQuery" // GetNamedQueryRequest generates a "aws/request.Request" representing the @@ -871,7 +1217,98 @@ func (c *Athena) GetQueryResultsPagesWithContext(ctx aws.Context, input *GetQuer return p.Err() } -const opGetWorkGroup = "GetWorkGroup" +const opGetTableMetadata = "GetTableMetadata" + +// GetTableMetadataRequest generates a "aws/request.Request" representing the +// client's request for the GetTableMetadata operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTableMetadata for more information on using the GetTableMetadata +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTableMetadataRequest method. +// req, resp := client.GetTableMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata +func (c *Athena) GetTableMetadataRequest(input *GetTableMetadataInput) (req *request.Request, output *GetTableMetadataOutput) { + op := &request.Operation{ + Name: opGetTableMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetTableMetadataInput{} + } + + output = &GetTableMetadataOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTableMetadata API operation for Amazon Athena. +// +// Returns table metadata for the specified catalog, database, and table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation GetTableMetadata for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// * MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/GetTableMetadata +func (c *Athena) GetTableMetadata(input *GetTableMetadataInput) (*GetTableMetadataOutput, error) { + req, out := c.GetTableMetadataRequest(input) + return out, req.Send() +} + +// GetTableMetadataWithContext is the same as GetTableMetadata with the addition of +// the ability to pass a context and additional request options. +// +// See GetTableMetadata for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) GetTableMetadataWithContext(ctx aws.Context, input *GetTableMetadataInput, opts ...request.Option) (*GetTableMetadataOutput, error) { + req, out := c.GetTableMetadataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetWorkGroup = "GetWorkGroup" // GetWorkGroupRequest generates a "aws/request.Request" representing the // client's request for the GetWorkGroup operation. The "output" return @@ -955,35 +1392,35 @@ func (c *Athena) GetWorkGroupWithContext(ctx aws.Context, input *GetWorkGroupInp return out, req.Send() } -const opListNamedQueries = "ListNamedQueries" +const opListDataCatalogs = "ListDataCatalogs" -// ListNamedQueriesRequest generates a "aws/request.Request" representing the -// client's request for the ListNamedQueries operation. The "output" return +// ListDataCatalogsRequest generates a "aws/request.Request" representing the +// client's request for the ListDataCatalogs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListNamedQueries for more information on using the ListNamedQueries +// See ListDataCatalogs for more information on using the ListDataCatalogs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListNamedQueriesRequest method. -// req, resp := client.ListNamedQueriesRequest(params) +// // Example sending a request using the ListDataCatalogsRequest method. +// req, resp := client.ListDataCatalogsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries -func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *request.Request, output *ListNamedQueriesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs +func (c *Athena) ListDataCatalogsRequest(input *ListDataCatalogsInput) (req *request.Request, output *ListDataCatalogsOutput) { op := &request.Operation{ - Name: opListNamedQueries, + Name: opListDataCatalogs, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -995,29 +1432,24 @@ func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *req } if input == nil { - input = &ListNamedQueriesInput{} + input = &ListDataCatalogsInput{} } - output = &ListNamedQueriesOutput{} + output = &ListDataCatalogsOutput{} req = c.newRequest(op, input, output) return } -// ListNamedQueries API operation for Amazon Athena. -// -// Provides a list of available query IDs only for queries saved in the specified -// workgroup. Requires that you have access to the workgroup. +// ListDataCatalogs API operation for Amazon Athena. // -// For code samples using the AWS SDK for Java, see Examples and Code Samples -// (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the Amazon -// Athena User Guide. +// Lists the data catalogs in the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListNamedQueries for usage and error information. +// API operation ListDataCatalogs for usage and error information. // // Returned Error Types: // * InternalServerException @@ -1028,65 +1460,65 @@ func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *req // Indicates that something is wrong with the input to the request. For example, // a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries -func (c *Athena) ListNamedQueries(input *ListNamedQueriesInput) (*ListNamedQueriesOutput, error) { - req, out := c.ListNamedQueriesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDataCatalogs +func (c *Athena) ListDataCatalogs(input *ListDataCatalogsInput) (*ListDataCatalogsOutput, error) { + req, out := c.ListDataCatalogsRequest(input) return out, req.Send() } -// ListNamedQueriesWithContext is the same as ListNamedQueries with the addition of +// ListDataCatalogsWithContext is the same as ListDataCatalogs with the addition of // the ability to pass a context and additional request options. // -// See ListNamedQueries for details on how to use this API operation. +// See ListDataCatalogs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListNamedQueriesWithContext(ctx aws.Context, input *ListNamedQueriesInput, opts ...request.Option) (*ListNamedQueriesOutput, error) { - req, out := c.ListNamedQueriesRequest(input) +func (c *Athena) ListDataCatalogsWithContext(ctx aws.Context, input *ListDataCatalogsInput, opts ...request.Option) (*ListDataCatalogsOutput, error) { + req, out := c.ListDataCatalogsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListNamedQueriesPages iterates over the pages of a ListNamedQueries operation, +// ListDataCatalogsPages iterates over the pages of a ListDataCatalogs operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListNamedQueries method for more information on how to use this operation. +// See ListDataCatalogs method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListNamedQueries operation. +// // Example iterating over at most 3 pages of a ListDataCatalogs operation. // pageNum := 0 -// err := client.ListNamedQueriesPages(params, -// func(page *athena.ListNamedQueriesOutput, lastPage bool) bool { +// err := client.ListDataCatalogsPages(params, +// func(page *athena.ListDataCatalogsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Athena) ListNamedQueriesPages(input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool) error { - return c.ListNamedQueriesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Athena) ListDataCatalogsPages(input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool) error { + return c.ListDataCatalogsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListNamedQueriesPagesWithContext same as ListNamedQueriesPages except +// ListDataCatalogsPagesWithContext same as ListDataCatalogsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool, opts ...request.Option) error { +func (c *Athena) ListDataCatalogsPagesWithContext(ctx aws.Context, input *ListDataCatalogsInput, fn func(*ListDataCatalogsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListNamedQueriesInput + var inCpy *ListDataCatalogsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListNamedQueriesRequest(inCpy) + req, _ := c.ListDataCatalogsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -1094,7 +1526,7 @@ func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNa } for p.Next() { - if !fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListDataCatalogsOutput), !p.HasNextPage()) { break } } @@ -1102,35 +1534,35 @@ func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNa return p.Err() } -const opListQueryExecutions = "ListQueryExecutions" +const opListDatabases = "ListDatabases" -// ListQueryExecutionsRequest generates a "aws/request.Request" representing the -// client's request for the ListQueryExecutions operation. The "output" return +// ListDatabasesRequest generates a "aws/request.Request" representing the +// client's request for the ListDatabases operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListQueryExecutions for more information on using the ListQueryExecutions +// See ListDatabases for more information on using the ListDatabases // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListQueryExecutionsRequest method. -// req, resp := client.ListQueryExecutionsRequest(params) +// // Example sending a request using the ListDatabasesRequest method. +// req, resp := client.ListDatabasesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions -func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (req *request.Request, output *ListQueryExecutionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases +func (c *Athena) ListDatabasesRequest(input *ListDatabasesInput) (req *request.Request, output *ListDatabasesOutput) { op := &request.Operation{ - Name: opListQueryExecutions, + Name: opListDatabases, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -1142,30 +1574,24 @@ func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (re } if input == nil { - input = &ListQueryExecutionsInput{} + input = &ListDatabasesInput{} } - output = &ListQueryExecutionsOutput{} + output = &ListDatabasesOutput{} req = c.newRequest(op, input, output) return } -// ListQueryExecutions API operation for Amazon Athena. -// -// Provides a list of available query execution IDs for the queries in the specified -// workgroup. Requires you to have access to the workgroup in which the queries -// ran. +// ListDatabases API operation for Amazon Athena. // -// For code samples using the AWS SDK for Java, see Examples and Code Samples -// (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the Amazon -// Athena User Guide. +// Lists the databases in the specified data catalog. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListQueryExecutions for usage and error information. +// API operation ListDatabases for usage and error information. // // Returned Error Types: // * InternalServerException @@ -1176,65 +1602,72 @@ func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (re // Indicates that something is wrong with the input to the request. For example, // a required parameter may be missing or out of range. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions -func (c *Athena) ListQueryExecutions(input *ListQueryExecutionsInput) (*ListQueryExecutionsOutput, error) { - req, out := c.ListQueryExecutionsRequest(input) +// * MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListDatabases +func (c *Athena) ListDatabases(input *ListDatabasesInput) (*ListDatabasesOutput, error) { + req, out := c.ListDatabasesRequest(input) return out, req.Send() } -// ListQueryExecutionsWithContext is the same as ListQueryExecutions with the addition of +// ListDatabasesWithContext is the same as ListDatabases with the addition of // the ability to pass a context and additional request options. // -// See ListQueryExecutions for details on how to use this API operation. +// See ListDatabases for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListQueryExecutionsWithContext(ctx aws.Context, input *ListQueryExecutionsInput, opts ...request.Option) (*ListQueryExecutionsOutput, error) { - req, out := c.ListQueryExecutionsRequest(input) +func (c *Athena) ListDatabasesWithContext(ctx aws.Context, input *ListDatabasesInput, opts ...request.Option) (*ListDatabasesOutput, error) { + req, out := c.ListDatabasesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListQueryExecutionsPages iterates over the pages of a ListQueryExecutions operation, +// ListDatabasesPages iterates over the pages of a ListDatabases operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListQueryExecutions method for more information on how to use this operation. +// See ListDatabases method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListQueryExecutions operation. +// // Example iterating over at most 3 pages of a ListDatabases operation. // pageNum := 0 -// err := client.ListQueryExecutionsPages(params, -// func(page *athena.ListQueryExecutionsOutput, lastPage bool) bool { +// err := client.ListDatabasesPages(params, +// func(page *athena.ListDatabasesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Athena) ListQueryExecutionsPages(input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool) error { - return c.ListQueryExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Athena) ListDatabasesPages(input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool) error { + return c.ListDatabasesPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListQueryExecutionsPagesWithContext same as ListQueryExecutionsPages except +// ListDatabasesPagesWithContext same as ListDatabasesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool, opts ...request.Option) error { +func (c *Athena) ListDatabasesPagesWithContext(ctx aws.Context, input *ListDatabasesInput, fn func(*ListDatabasesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListQueryExecutionsInput + var inCpy *ListDatabasesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListQueryExecutionsRequest(inCpy) + req, _ := c.ListDatabasesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -1242,7 +1675,7 @@ func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *Lis } for p.Next() { - if !fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListDatabasesOutput), !p.HasNextPage()) { break } } @@ -1250,58 +1683,70 @@ func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *Lis return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListNamedQueries = "ListNamedQueries" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListNamedQueriesRequest generates a "aws/request.Request" representing the +// client's request for the ListNamedQueries operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListNamedQueries for more information on using the ListNamedQueries // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListNamedQueriesRequest method. +// req, resp := client.ListNamedQueriesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource -func (c *Athena) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries +func (c *Athena) ListNamedQueriesRequest(input *ListNamedQueriesInput) (req *request.Request, output *ListNamedQueriesOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opListNamedQueries, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListNamedQueriesInput{} } - output = &ListTagsForResourceOutput{} + output = &ListNamedQueriesOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon Athena. +// ListNamedQueries API operation for Amazon Athena. +// +// Provides a list of available query IDs only for queries saved in the specified +// workgroup. Requires that you have access to the specified workgroup. If a +// workgroup is not specified, lists the saved queries for the primary workgroup. // -// Lists the tags associated with this workgroup. +// For code samples using the AWS SDK for Java, see Examples and Code Samples +// (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the Amazon +// Athena User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation ListTagsForResource for usage and error information. +// API operation ListNamedQueries for usage and error information. // // Returned Error Types: // * InternalServerException @@ -1312,50 +1757,542 @@ func (c *Athena) ListTagsForResourceRequest(input *ListTagsForResourceInput) (re // Indicates that something is wrong with the input to the request. For example, // a required parameter may be missing or out of range. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource -func (c *Athena) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListNamedQueries +func (c *Athena) ListNamedQueries(input *ListNamedQueriesInput) (*ListNamedQueriesOutput, error) { + req, out := c.ListNamedQueriesRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// ListNamedQueriesWithContext is the same as ListNamedQueries with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See ListNamedQueries for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Athena) ListNamedQueriesWithContext(ctx aws.Context, input *ListNamedQueriesInput, opts ...request.Option) (*ListNamedQueriesOutput, error) { + req, out := c.ListNamedQueriesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListWorkGroups = "ListWorkGroups" +// ListNamedQueriesPages iterates over the pages of a ListNamedQueries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNamedQueries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNamedQueries operation. +// pageNum := 0 +// err := client.ListNamedQueriesPages(params, +// func(page *athena.ListNamedQueriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Athena) ListNamedQueriesPages(input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool) error { + return c.ListNamedQueriesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ListWorkGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListWorkGroups operation. The "output" return +// ListNamedQueriesPagesWithContext same as ListNamedQueriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListNamedQueriesPagesWithContext(ctx aws.Context, input *ListNamedQueriesInput, fn func(*ListNamedQueriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNamedQueriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNamedQueriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListNamedQueriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListQueryExecutions = "ListQueryExecutions" + +// ListQueryExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListQueryExecutions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListWorkGroups for more information on using the ListWorkGroups +// See ListQueryExecutions for more information on using the ListQueryExecutions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListWorkGroupsRequest method. -// req, resp := client.ListWorkGroupsRequest(params) +// // Example sending a request using the ListQueryExecutionsRequest method. +// req, resp := client.ListQueryExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions +func (c *Athena) ListQueryExecutionsRequest(input *ListQueryExecutionsInput) (req *request.Request, output *ListQueryExecutionsOutput) { + op := &request.Operation{ + Name: opListQueryExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListQueryExecutionsInput{} + } + + output = &ListQueryExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListQueryExecutions API operation for Amazon Athena. +// +// Provides a list of available query execution IDs for the queries in the specified +// workgroup. If a workgroup is not specified, returns a list of query execution +// IDs for the primary workgroup. Requires you to have access to the workgroup +// in which the queries ran. +// +// For code samples using the AWS SDK for Java, see Examples and Code Samples +// (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the Amazon +// Athena User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListQueryExecutions for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListQueryExecutions +func (c *Athena) ListQueryExecutions(input *ListQueryExecutionsInput) (*ListQueryExecutionsOutput, error) { + req, out := c.ListQueryExecutionsRequest(input) + return out, req.Send() +} + +// ListQueryExecutionsWithContext is the same as ListQueryExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListQueryExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListQueryExecutionsWithContext(ctx aws.Context, input *ListQueryExecutionsInput, opts ...request.Option) (*ListQueryExecutionsOutput, error) { + req, out := c.ListQueryExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListQueryExecutionsPages iterates over the pages of a ListQueryExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListQueryExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListQueryExecutions operation. +// pageNum := 0 +// err := client.ListQueryExecutionsPages(params, +// func(page *athena.ListQueryExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Athena) ListQueryExecutionsPages(input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool) error { + return c.ListQueryExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListQueryExecutionsPagesWithContext same as ListQueryExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListQueryExecutionsPagesWithContext(ctx aws.Context, input *ListQueryExecutionsInput, fn func(*ListQueryExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListQueryExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListQueryExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListQueryExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTableMetadata = "ListTableMetadata" + +// ListTableMetadataRequest generates a "aws/request.Request" representing the +// client's request for the ListTableMetadata operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTableMetadata for more information on using the ListTableMetadata +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTableMetadataRequest method. +// req, resp := client.ListTableMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata +func (c *Athena) ListTableMetadataRequest(input *ListTableMetadataInput) (req *request.Request, output *ListTableMetadataOutput) { + op := &request.Operation{ + Name: opListTableMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTableMetadataInput{} + } + + output = &ListTableMetadataOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTableMetadata API operation for Amazon Athena. +// +// Lists the metadata for the tables in the specified data catalog database. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListTableMetadata for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// * MetadataException +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTableMetadata +func (c *Athena) ListTableMetadata(input *ListTableMetadataInput) (*ListTableMetadataOutput, error) { + req, out := c.ListTableMetadataRequest(input) + return out, req.Send() +} + +// ListTableMetadataWithContext is the same as ListTableMetadata with the addition of +// the ability to pass a context and additional request options. +// +// See ListTableMetadata for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTableMetadataWithContext(ctx aws.Context, input *ListTableMetadataInput, opts ...request.Option) (*ListTableMetadataOutput, error) { + req, out := c.ListTableMetadataRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTableMetadataPages iterates over the pages of a ListTableMetadata operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTableMetadata method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTableMetadata operation. +// pageNum := 0 +// err := client.ListTableMetadataPages(params, +// func(page *athena.ListTableMetadataOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Athena) ListTableMetadataPages(input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool) error { + return c.ListTableMetadataPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTableMetadataPagesWithContext same as ListTableMetadataPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTableMetadataPagesWithContext(ctx aws.Context, input *ListTableMetadataInput, fn func(*ListTableMetadataOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTableMetadataInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTableMetadataRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTableMetadataOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource +func (c *Athena) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Athena. +// +// Lists the tags associated with an Athena workgroup or data catalog resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// * ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/ListTagsForResource +func (c *Athena) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *athena.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Athena) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListWorkGroups = "ListWorkGroups" + +// ListWorkGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListWorkGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWorkGroups for more information on using the ListWorkGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListWorkGroupsRequest method. +// req, resp := client.ListWorkGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled @@ -1524,11 +2461,10 @@ func (c *Athena) StartQueryExecutionRequest(input *StartQueryExecutionInput) (re // StartQueryExecution API operation for Amazon Athena. // // Runs the SQL query statements contained in the Query. Requires you to have -// access to the workgroup in which the query ran. -// -// For code samples using the AWS SDK for Java, see Examples and Code Samples -// (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) in the Amazon -// Athena User Guide. +// access to the workgroup in which the query ran. Running queries against an +// external catalog requires GetDataCatalog permission to the catalog. For code +// samples using the AWS SDK for Java, see Examples and Code Samples (http://docs.aws.amazon.com/athena/latest/ug/code-samples.html) +// in the Amazon Athena User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1706,19 +2642,18 @@ func (c *Athena) TagResourceRequest(input *TagResourceInput) (req *request.Reque // TagResource API operation for Amazon Athena. // -// Adds one or more tags to the resource, such as a workgroup. A tag is a label -// that you assign to an AWS Athena resource (a workgroup). Each tag consists -// of a key and an optional value, both of which you define. Tags enable you -// to categorize resources (workgroups) in Athena, for example, by purpose, -// owner, or environment. Use a consistent set of tag keys to make it easier -// to search and filter workgroups in your account. For best practices, see -// AWS Tagging Strategies (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). -// The key length is from 1 (minimum) to 128 (maximum) Unicode characters in -// UTF-8. The tag value length is from 0 (minimum) to 256 (maximum) Unicode -// characters in UTF-8. You can use letters and numbers representable in UTF-8, -// and the following characters: + - = . _ : / @. Tag keys and values are case-sensitive. -// Tag keys must be unique per resource. If you specify more than one, separate -// them by commas. +// Adds one or more tags to an Athena resource. A tag is a label that you assign +// to a resource. In Athena, a resource can be a workgroup or data catalog. +// Each tag consists of a key and an optional value, both of which you define. +// For example, you can use tags to categorize Athena workgroups or data catalogs +// by purpose, owner, or environment. Use a consistent set of tag keys to make +// it easier to search and filter workgroups or data catalogs in your account. +// For best practices, see Tagging Best Practices (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). +// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can +// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers +// representable in UTF-8, and the following characters: + - = . _ : / @. Tag +// keys and values are case-sensitive. Tag keys must be unique per resource. +// If you specify more than one tag, separate them by commas. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1786,36 +2721,122 @@ const opUntagResource = "UntagResource" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource -func (c *Athena) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource +func (c *Athena) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Athena. +// +// Removes one or more tags from a data catalog or workgroup resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Athena's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * InternalServerException +// Indicates a platform issue, which may be due to a transient condition or +// outage. +// +// * InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter may be missing or out of range. +// +// * ResourceNotFoundException +// A resource, such as a workgroup, was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource +func (c *Athena) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Athena) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataCatalog = "UpdateDataCatalog" + +// UpdateDataCatalogRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataCatalog operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataCatalog for more information on using the UpdateDataCatalog +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDataCatalogRequest method. +// req, resp := client.UpdateDataCatalogRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog +func (c *Athena) UpdateDataCatalogRequest(input *UpdateDataCatalogInput) (req *request.Request, output *UpdateDataCatalogOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opUpdateDataCatalog, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &UpdateDataCatalogInput{} } - output = &UntagResourceOutput{} + output = &UpdateDataCatalogOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Amazon Athena. +// UpdateDataCatalog API operation for Amazon Athena. // -// Removes one or more tags from the workgroup resource. Takes as an input a -// list of TagKey Strings separated by commas, and removes their tags at the -// same time. +// Updates the data catalog that has the specified name. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Athena's -// API operation UntagResource for usage and error information. +// API operation UpdateDataCatalog for usage and error information. // // Returned Error Types: // * InternalServerException @@ -1826,26 +2847,23 @@ func (c *Athena) UntagResourceRequest(input *UntagResourceInput) (req *request.R // Indicates that something is wrong with the input to the request. For example, // a required parameter may be missing or out of range. // -// * ResourceNotFoundException -// A resource, such as a workgroup, was not found. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UntagResource -func (c *Athena) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/athena-2017-05-18/UpdateDataCatalog +func (c *Athena) UpdateDataCatalog(input *UpdateDataCatalogInput) (*UpdateDataCatalogOutput, error) { + req, out := c.UpdateDataCatalogRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// UpdateDataCatalogWithContext is the same as UpdateDataCatalog with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See UpdateDataCatalog for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Athena) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *Athena) UpdateDataCatalogWithContext(ctx aws.Context, input *UpdateDataCatalogInput, opts ...request.Option) (*UpdateDataCatalogOutput, error) { + req, out := c.UpdateDataCatalogRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -2083,6 +3101,50 @@ func (s *BatchGetQueryExecutionOutput) SetUnprocessedQueryExecutionIds(v []*Unpr return s } +// Contains metadata for a column in a table. +type Column struct { + _ struct{} `type:"structure"` + + // Optional information about the column. + Comment *string `type:"string"` + + // The name of the column. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The data type of the column. + Type *string `type:"string"` +} + +// String returns the string representation +func (s Column) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Column) GoString() string { + return s.String() +} + +// SetComment sets the Comment field's value. +func (s *Column) SetComment(v string) *Column { + s.Comment = &v + return s +} + +// SetName sets the Name field's value. +func (s *Column) SetName(v string) *Column { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *Column) SetType(v string) *Column { + s.Type = &v + return s +} + // Information about the columns in a query execution result. type ColumnInfo struct { _ struct{} `type:"structure"` @@ -2194,6 +3256,132 @@ func (s *ColumnInfo) SetType(v string) *ColumnInfo { return s } +type CreateDataCatalogInput struct { + _ struct{} `type:"structure"` + + // A description of the data catalog to be created. + Description *string `min:"1" type:"string"` + + // The name of the data catalog to create. The catalog name must be unique for + // the AWS account and can use a maximum of 128 alphanumeric, underscore, at + // sign, or hyphen characters. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies the Lambda function or functions to use for creating the data catalog. + // This is a mapping whose values depend on the catalog type. + // + // * For the HIVE data catalog type, use the following syntax. The metadata-function + // parameter is required. The sdk-version parameter is optional and defaults + // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + // + // * For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. + // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + // If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. function=lambda_arn + // + // * The GLUE type has no parameters. + Parameters map[string]*string `type:"map"` + + // A list of comma separated tags to add to the data catalog that is created. + Tags []*Tag `type:"list"` + + // The type of data catalog to create: LAMBDA for a federated catalog, GLUE + // for AWS Glue Catalog, or HIVE for an external hive metastore. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataCatalogType"` +} + +// String returns the string representation +func (s CreateDataCatalogInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataCatalogInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataCatalogInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreateDataCatalogInput) SetDescription(v string) *CreateDataCatalogInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDataCatalogInput) SetName(v string) *CreateDataCatalogInput { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateDataCatalogInput) SetParameters(v map[string]*string) *CreateDataCatalogInput { + s.Parameters = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDataCatalogInput) SetTags(v []*Tag) *CreateDataCatalogInput { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateDataCatalogInput) SetType(v string) *CreateDataCatalogInput { + s.Type = &v + return s +} + +type CreateDataCatalogOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataCatalogOutput) GoString() string { + return s.String() +} + type CreateNamedQueryInput struct { _ struct{} `type:"structure"` @@ -2352,8 +3540,7 @@ type CreateWorkGroupInput struct { // Name is a required field Name *string `type:"string" required:"true"` - // One or more tags, separated by commas, that you want to attach to the workgroup - // as you create it. + // A list of comma separated tags to add to the workgroup that is created. Tags []*Tag `type:"list"` } @@ -2395,68 +3582,273 @@ func (s *CreateWorkGroupInput) Validate() error { return nil } -// SetConfiguration sets the Configuration field's value. -func (s *CreateWorkGroupInput) SetConfiguration(v *WorkGroupConfiguration) *CreateWorkGroupInput { - s.Configuration = v - return s +// SetConfiguration sets the Configuration field's value. +func (s *CreateWorkGroupInput) SetConfiguration(v *WorkGroupConfiguration) *CreateWorkGroupInput { + s.Configuration = v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateWorkGroupInput) SetDescription(v string) *CreateWorkGroupInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateWorkGroupInput) SetName(v string) *CreateWorkGroupInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateWorkGroupInput) SetTags(v []*Tag) *CreateWorkGroupInput { + s.Tags = v + return s +} + +type CreateWorkGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateWorkGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkGroupOutput) GoString() string { + return s.String() +} + +// Contains information about a data catalog in an AWS account. +type DataCatalog struct { + _ struct{} `type:"structure"` + + // An optional description of the data catalog. + Description *string `min:"1" type:"string"` + + // The name of the data catalog. The catalog name must be unique for the AWS + // account and can use a maximum of 128 alphanumeric, underscore, at sign, or + // hyphen characters. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies the Lambda function or functions to use for the data catalog. This + // is a mapping whose values depend on the catalog type. + // + // * For the HIVE data catalog type, use the following syntax. The metadata-function + // parameter is required. The sdk-version parameter is optional and defaults + // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + // + // * For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. + // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + // If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. function=lambda_arn + // + // * The GLUE type has no parameters. + Parameters map[string]*string `type:"map"` + + // The type of data catalog: LAMBDA for a federated catalog, GLUE for AWS Glue + // Catalog, or HIVE for an external hive metastore. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataCatalogType"` +} + +// String returns the string representation +func (s DataCatalog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataCatalog) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *DataCatalog) SetDescription(v string) *DataCatalog { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *DataCatalog) SetName(v string) *DataCatalog { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *DataCatalog) SetParameters(v map[string]*string) *DataCatalog { + s.Parameters = v + return s +} + +// SetType sets the Type field's value. +func (s *DataCatalog) SetType(v string) *DataCatalog { + s.Type = &v + return s +} + +// The summary information for the data catalog, which includes its name and +// type. +type DataCatalogSummary struct { + _ struct{} `type:"structure"` + + // The name of the data catalog. + CatalogName *string `min:"1" type:"string"` + + // The data catalog type. + Type *string `type:"string" enum:"DataCatalogType"` +} + +// String returns the string representation +func (s DataCatalogSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataCatalogSummary) GoString() string { + return s.String() +} + +// SetCatalogName sets the CatalogName field's value. +func (s *DataCatalogSummary) SetCatalogName(v string) *DataCatalogSummary { + s.CatalogName = &v + return s +} + +// SetType sets the Type field's value. +func (s *DataCatalogSummary) SetType(v string) *DataCatalogSummary { + s.Type = &v + return s +} + +// Contains metadata information for a database in a data catalog. +type Database struct { + _ struct{} `type:"structure"` + + // An optional description of the database. + Description *string `min:"1" type:"string"` + + // The name of the database. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A set of custom key/value pairs. + Parameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s Database) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Database) GoString() string { + return s.String() } // SetDescription sets the Description field's value. -func (s *CreateWorkGroupInput) SetDescription(v string) *CreateWorkGroupInput { +func (s *Database) SetDescription(v string) *Database { s.Description = &v return s } // SetName sets the Name field's value. -func (s *CreateWorkGroupInput) SetName(v string) *CreateWorkGroupInput { +func (s *Database) SetName(v string) *Database { s.Name = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateWorkGroupInput) SetTags(v []*Tag) *CreateWorkGroupInput { - s.Tags = v +// SetParameters sets the Parameters field's value. +func (s *Database) SetParameters(v map[string]*string) *Database { + s.Parameters = v return s } -type CreateWorkGroupOutput struct { +// A piece of data (a field in the table). +type Datum struct { _ struct{} `type:"structure"` + + // The value of the datum. + VarCharValue *string `type:"string"` } // String returns the string representation -func (s CreateWorkGroupOutput) String() string { +func (s Datum) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateWorkGroupOutput) GoString() string { +func (s Datum) GoString() string { return s.String() } -// A piece of data (a field in the table). -type Datum struct { +// SetVarCharValue sets the VarCharValue field's value. +func (s *Datum) SetVarCharValue(v string) *Datum { + s.VarCharValue = &v + return s +} + +type DeleteDataCatalogInput struct { _ struct{} `type:"structure"` - // The value of the datum. - VarCharValue *string `type:"string"` + // The name of the data catalog to delete. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Datum) String() string { +func (s DeleteDataCatalogInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Datum) GoString() string { +func (s DeleteDataCatalogInput) GoString() string { return s.String() } -// SetVarCharValue sets the VarCharValue field's value. -func (s *Datum) SetVarCharValue(v string) *Datum { - s.VarCharValue = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataCatalogInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteDataCatalogInput) SetName(v string) *DeleteDataCatalogInput { + s.Name = &v return s } +type DeleteDataCatalogOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDataCatalogOutput) GoString() string { + return s.String() +} + type DeleteNamedQueryInput struct { _ struct{} `type:"structure"` @@ -2611,6 +4003,151 @@ func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration { return s } +type GetDataCatalogInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog to return. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDataCatalogInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataCatalogInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDataCatalogInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GetDataCatalogInput) SetName(v string) *GetDataCatalogInput { + s.Name = &v + return s +} + +type GetDataCatalogOutput struct { + _ struct{} `type:"structure"` + + // The data catalog returned. + DataCatalog *DataCatalog `type:"structure"` +} + +// String returns the string representation +func (s GetDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDataCatalogOutput) GoString() string { + return s.String() +} + +// SetDataCatalog sets the DataCatalog field's value. +func (s *GetDataCatalogOutput) SetDataCatalog(v *DataCatalog) *GetDataCatalogOutput { + s.DataCatalog = v + return s +} + +type GetDatabaseInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog that contains the database to return. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // The name of the database to return. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDatabaseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDatabaseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDatabaseInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) + } + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogName sets the CatalogName field's value. +func (s *GetDatabaseInput) SetCatalogName(v string) *GetDatabaseInput { + s.CatalogName = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetDatabaseInput) SetDatabaseName(v string) *GetDatabaseInput { + s.DatabaseName = &v + return s +} + +type GetDatabaseOutput struct { + _ struct{} `type:"structure"` + + // The database returned. + Database *Database `type:"structure"` +} + +// String returns the string representation +func (s GetDatabaseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDatabaseOutput) GoString() string { + return s.String() +} + +// SetDatabase sets the Database field's value. +func (s *GetDatabaseOutput) SetDatabase(v *Database) *GetDatabaseOutput { + s.Database = v + return s +} + type GetNamedQueryInput struct { _ struct{} `type:"structure"` @@ -2739,8 +4276,10 @@ type GetQueryResultsInput struct { // The maximum number of results (rows) to return in this request. MaxResults *int64 `min:"1" type:"integer"` - // The token that specifies where to start pagination if a previous request - // was truncated. + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. NextToken *string `min:"1" type:"string"` // The unique ID of the query execution. @@ -2765,11 +4304,131 @@ func (s *GetQueryResultsInput) Validate() error { if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.QueryExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetQueryResultsInput) SetMaxResults(v int64) *GetQueryResultsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetQueryResultsInput) SetNextToken(v string) *GetQueryResultsInput { + s.NextToken = &v + return s +} + +// SetQueryExecutionId sets the QueryExecutionId field's value. +func (s *GetQueryResultsInput) SetQueryExecutionId(v string) *GetQueryResultsInput { + s.QueryExecutionId = &v + return s +} + +type GetQueryResultsOutput struct { + _ struct{} `type:"structure"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The results of the query execution. + ResultSet *ResultSet `type:"structure"` + + // The number of rows inserted with a CREATE TABLE AS SELECT statement. + UpdateCount *int64 `type:"long"` +} + +// String returns the string representation +func (s GetQueryResultsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetQueryResultsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetQueryResultsOutput) SetNextToken(v string) *GetQueryResultsOutput { + s.NextToken = &v + return s +} + +// SetResultSet sets the ResultSet field's value. +func (s *GetQueryResultsOutput) SetResultSet(v *ResultSet) *GetQueryResultsOutput { + s.ResultSet = v + return s +} + +// SetUpdateCount sets the UpdateCount field's value. +func (s *GetQueryResultsOutput) SetUpdateCount(v int64) *GetQueryResultsOutput { + s.UpdateCount = &v + return s +} + +type GetTableMetadataInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog that contains the database and table metadata + // to return. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // The name of the database that contains the table metadata to return. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the table for which metadata is returned. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTableMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTableMetadataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTableMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTableMetadataInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) + } + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) } - if s.QueryExecutionId == nil { - invalidParams.Add(request.NewErrParamRequired("QueryExecutionId")) + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) } if invalidParams.Len() > 0 { @@ -2778,62 +4437,44 @@ func (s *GetQueryResultsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *GetQueryResultsInput) SetMaxResults(v int64) *GetQueryResultsInput { - s.MaxResults = &v +// SetCatalogName sets the CatalogName field's value. +func (s *GetTableMetadataInput) SetCatalogName(v string) *GetTableMetadataInput { + s.CatalogName = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *GetQueryResultsInput) SetNextToken(v string) *GetQueryResultsInput { - s.NextToken = &v +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetTableMetadataInput) SetDatabaseName(v string) *GetTableMetadataInput { + s.DatabaseName = &v return s } -// SetQueryExecutionId sets the QueryExecutionId field's value. -func (s *GetQueryResultsInput) SetQueryExecutionId(v string) *GetQueryResultsInput { - s.QueryExecutionId = &v +// SetTableName sets the TableName field's value. +func (s *GetTableMetadataInput) SetTableName(v string) *GetTableMetadataInput { + s.TableName = &v return s } -type GetQueryResultsOutput struct { +type GetTableMetadataOutput struct { _ struct{} `type:"structure"` - // A token to be used by the next request if this request is truncated. - NextToken *string `min:"1" type:"string"` - - // The results of the query execution. - ResultSet *ResultSet `type:"structure"` - - // The number of rows inserted with a CREATE TABLE AS SELECT statement. - UpdateCount *int64 `type:"long"` + // An object that contains table metadata. + TableMetadata *TableMetadata `type:"structure"` } // String returns the string representation -func (s GetQueryResultsOutput) String() string { +func (s GetTableMetadataOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetQueryResultsOutput) GoString() string { +func (s GetTableMetadataOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *GetQueryResultsOutput) SetNextToken(v string) *GetQueryResultsOutput { - s.NextToken = &v - return s -} - -// SetResultSet sets the ResultSet field's value. -func (s *GetQueryResultsOutput) SetResultSet(v *ResultSet) *GetQueryResultsOutput { - s.ResultSet = v - return s -} - -// SetUpdateCount sets the UpdateCount field's value. -func (s *GetQueryResultsOutput) SetUpdateCount(v int64) *GetQueryResultsOutput { - s.UpdateCount = &v +// SetTableMetadata sets the TableMetadata field's value. +func (s *GetTableMetadataOutput) SetTableMetadata(v *TableMetadata) *GetTableMetadataOutput { + s.TableMetadata = v return s } @@ -2901,8 +4542,8 @@ func (s *GetWorkGroupOutput) SetWorkGroup(v *WorkGroup) *GetWorkGroupOutput { // Indicates a platform issue, which may be due to a transient condition or // outage. type InternalServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2919,17 +4560,17 @@ func (s InternalServerException) GoString() string { func newErrorInternalServerException(v protocol.ResponseMetadata) error { return &InternalServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerException) Code() string { +func (s *InternalServerException) Code() string { return "InternalServerException" } // Message returns the exception's message. -func (s InternalServerException) Message() string { +func (s *InternalServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2937,29 +4578,29 @@ func (s InternalServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerException) OrigErr() error { +func (s *InternalServerException) OrigErr() error { return nil } -func (s InternalServerException) Error() string { +func (s *InternalServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } // Indicates that something is wrong with the input to the request. For example, // a required parameter may be missing or out of range. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error code returned when the query execution failed to process, or when // the processing request for the named query failed. @@ -2980,17 +4621,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2998,22 +4639,211 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListDataCatalogsInput struct { + _ struct{} `type:"structure"` + + // Specifies the maximum number of data catalogs to return. + MaxResults *int64 `min:"2" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDataCatalogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDataCatalogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDataCatalogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDataCatalogsInput"} + if s.MaxResults != nil && *s.MaxResults < 2 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 2)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDataCatalogsInput) SetMaxResults(v int64) *ListDataCatalogsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataCatalogsInput) SetNextToken(v string) *ListDataCatalogsInput { + s.NextToken = &v + return s +} + +type ListDataCatalogsOutput struct { + _ struct{} `type:"structure"` + + // A summary list of data catalogs. + DataCatalogsSummary []*DataCatalogSummary `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDataCatalogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDataCatalogsOutput) GoString() string { + return s.String() +} + +// SetDataCatalogsSummary sets the DataCatalogsSummary field's value. +func (s *ListDataCatalogsOutput) SetDataCatalogsSummary(v []*DataCatalogSummary) *ListDataCatalogsOutput { + s.DataCatalogsSummary = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataCatalogsOutput) SetNextToken(v string) *ListDataCatalogsOutput { + s.NextToken = &v + return s +} + +type ListDatabasesInput struct { + _ struct{} `type:"structure"` + + // The name of the data catalog that contains the databases to return. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` + + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatabasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatabasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDatabasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDatabasesInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) + } + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogName sets the CatalogName field's value. +func (s *ListDatabasesInput) SetCatalogName(v string) *ListDatabasesInput { + s.CatalogName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDatabasesInput) SetMaxResults(v int64) *ListDatabasesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatabasesInput) SetNextToken(v string) *ListDatabasesInput { + s.NextToken = &v + return s +} + +type ListDatabasesOutput struct { + _ struct{} `type:"structure"` + + // A list of databases from a data catalog. + DatabaseList []*Database `type:"list"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListDatabasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDatabasesOutput) GoString() string { + return s.String() +} + +// SetDatabaseList sets the DatabaseList field's value. +func (s *ListDatabasesOutput) SetDatabaseList(v []*Database) *ListDatabasesOutput { + s.DatabaseList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDatabasesOutput) SetNextToken(v string) *ListDatabasesOutput { + s.NextToken = &v + return s } type ListNamedQueriesInput struct { @@ -3022,11 +4852,15 @@ type ListNamedQueriesInput struct { // The maximum number of queries to return in this request. MaxResults *int64 `type:"integer"` - // The token that specifies where to start pagination if a previous request - // was truncated. + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. NextToken *string `min:"1" type:"string"` // The name of the workgroup from which the named queries are being returned. + // If a workgroup is not specified, the saved queries for the primary workgroup + // are returned. WorkGroup *string `type:"string"` } @@ -3077,59 +4911,181 @@ type ListNamedQueriesOutput struct { // The list of unique query IDs. NamedQueryIds []*string `min:"1" type:"list"` + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListNamedQueriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListNamedQueriesOutput) GoString() string { + return s.String() +} + +// SetNamedQueryIds sets the NamedQueryIds field's value. +func (s *ListNamedQueriesOutput) SetNamedQueryIds(v []*string) *ListNamedQueriesOutput { + s.NamedQueryIds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListNamedQueriesOutput) SetNextToken(v string) *ListNamedQueriesOutput { + s.NextToken = &v + return s +} + +type ListQueryExecutionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of query executions to return in this request. + MaxResults *int64 `type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` + + // The name of the workgroup from which queries are being returned. If a workgroup + // is not specified, a list of available query execution IDs for the queries + // in the primary workgroup is returned. + WorkGroup *string `type:"string"` +} + +// String returns the string representation +func (s ListQueryExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListQueryExecutionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListQueryExecutionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListQueryExecutionsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListQueryExecutionsInput) SetMaxResults(v int64) *ListQueryExecutionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListQueryExecutionsInput) SetNextToken(v string) *ListQueryExecutionsInput { + s.NextToken = &v + return s +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *ListQueryExecutionsInput) SetWorkGroup(v string) *ListQueryExecutionsInput { + s.WorkGroup = &v + return s +} + +type ListQueryExecutionsOutput struct { + _ struct{} `type:"structure"` + // A token to be used by the next request if this request is truncated. NextToken *string `min:"1" type:"string"` + + // The unique IDs of each query execution as an array of strings. + QueryExecutionIds []*string `min:"1" type:"list"` } // String returns the string representation -func (s ListNamedQueriesOutput) String() string { +func (s ListQueryExecutionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListNamedQueriesOutput) GoString() string { +func (s ListQueryExecutionsOutput) GoString() string { return s.String() } -// SetNamedQueryIds sets the NamedQueryIds field's value. -func (s *ListNamedQueriesOutput) SetNamedQueryIds(v []*string) *ListNamedQueriesOutput { - s.NamedQueryIds = v +// SetNextToken sets the NextToken field's value. +func (s *ListQueryExecutionsOutput) SetNextToken(v string) *ListQueryExecutionsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListNamedQueriesOutput) SetNextToken(v string) *ListNamedQueriesOutput { - s.NextToken = &v +// SetQueryExecutionIds sets the QueryExecutionIds field's value. +func (s *ListQueryExecutionsOutput) SetQueryExecutionIds(v []*string) *ListQueryExecutionsOutput { + s.QueryExecutionIds = v return s } -type ListQueryExecutionsInput struct { +type ListTableMetadataInput struct { _ struct{} `type:"structure"` - // The maximum number of query executions to return in this request. - MaxResults *int64 `type:"integer"` + // The name of the data catalog for which table metadata should be returned. + // + // CatalogName is a required field + CatalogName *string `min:"1" type:"string" required:"true"` - // The token that specifies where to start pagination if a previous request - // was truncated. - NextToken *string `min:"1" type:"string"` + // The name of the database for which table metadata should be returned. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` - // The name of the workgroup from which queries are being returned. - WorkGroup *string `type:"string"` + // A regex filter that pattern-matches table names. If no expression is supplied, + // metadata for all tables are listed. + Expression *string `type:"string"` + + // Specifies the maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListQueryExecutionsInput) String() string { +func (s ListTableMetadataInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListQueryExecutionsInput) GoString() string { +func (s ListTableMetadataInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListQueryExecutionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListQueryExecutionsInput"} +func (s *ListTableMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTableMetadataInput"} + if s.CatalogName == nil { + invalidParams.Add(request.NewErrParamRequired("CatalogName")) + } + if s.CatalogName != nil && len(*s.CatalogName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } @@ -3140,53 +5096,68 @@ func (s *ListQueryExecutionsInput) Validate() error { return nil } +// SetCatalogName sets the CatalogName field's value. +func (s *ListTableMetadataInput) SetCatalogName(v string) *ListTableMetadataInput { + s.CatalogName = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *ListTableMetadataInput) SetDatabaseName(v string) *ListTableMetadataInput { + s.DatabaseName = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *ListTableMetadataInput) SetExpression(v string) *ListTableMetadataInput { + s.Expression = &v + return s +} + // SetMaxResults sets the MaxResults field's value. -func (s *ListQueryExecutionsInput) SetMaxResults(v int64) *ListQueryExecutionsInput { +func (s *ListTableMetadataInput) SetMaxResults(v int64) *ListTableMetadataInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListQueryExecutionsInput) SetNextToken(v string) *ListQueryExecutionsInput { +func (s *ListTableMetadataInput) SetNextToken(v string) *ListTableMetadataInput { s.NextToken = &v return s } -// SetWorkGroup sets the WorkGroup field's value. -func (s *ListQueryExecutionsInput) SetWorkGroup(v string) *ListQueryExecutionsInput { - s.WorkGroup = &v - return s -} - -type ListQueryExecutionsOutput struct { +type ListTableMetadataOutput struct { _ struct{} `type:"structure"` - // A token to be used by the next request if this request is truncated. + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. NextToken *string `min:"1" type:"string"` - // The unique IDs of each query execution as an array of strings. - QueryExecutionIds []*string `min:"1" type:"list"` + // A list of table metadata. + TableMetadataList []*TableMetadata `type:"list"` } // String returns the string representation -func (s ListQueryExecutionsOutput) String() string { +func (s ListTableMetadataOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListQueryExecutionsOutput) GoString() string { +func (s ListTableMetadataOutput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. -func (s *ListQueryExecutionsOutput) SetNextToken(v string) *ListQueryExecutionsOutput { +func (s *ListTableMetadataOutput) SetNextToken(v string) *ListTableMetadataOutput { s.NextToken = &v return s } -// SetQueryExecutionIds sets the QueryExecutionIds field's value. -func (s *ListQueryExecutionsOutput) SetQueryExecutionIds(v []*string) *ListQueryExecutionsOutput { - s.QueryExecutionIds = v +// SetTableMetadataList sets the TableMetadataList field's value. +func (s *ListTableMetadataOutput) SetTableMetadataList(v []*TableMetadata) *ListTableMetadataOutput { + s.TableMetadataList = v return s } @@ -3194,15 +5165,15 @@ type ListTagsForResourceInput struct { _ struct{} `type:"structure"` // The maximum number of results to be returned per request that lists the tags - // for the workgroup resource. + // for the resource. MaxResults *int64 `min:"75" type:"integer"` // The token for the next set of results, or null if there are no additional - // results for this request, where the request lists the tags for the workgroup - // resource with the specified ARN. + // results for this request, where the request lists the tags for the resource + // with the specified ARN. NextToken *string `min:"1" type:"string"` - // Lists the tags for the workgroup resource with the specified ARN. + // Lists the tags for the resource with the specified ARN. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -3264,7 +5235,7 @@ type ListTagsForResourceOutput struct { // A token to be used by the next request if this request is truncated. NextToken *string `min:"1" type:"string"` - // The list of tags associated with this workgroup. + // The list of tags associated with the specified resource. Tags []*Tag `type:"list"` } @@ -3296,7 +5267,10 @@ type ListWorkGroupsInput struct { // The maximum number of workgroups to return in this request. MaxResults *int64 `min:"1" type:"integer"` - // A token to be used by the next request if this request is truncated. + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. NextToken *string `min:"1" type:"string"` } @@ -3341,7 +5315,10 @@ func (s *ListWorkGroupsInput) SetNextToken(v string) *ListWorkGroupsInput { type ListWorkGroupsOutput struct { _ struct{} `type:"structure"` - // A token to be used by the next request if this request is truncated. + // A token generated by the Athena service that specifies where to continue + // pagination if a previous request was truncated. To obtain the next set of + // pages, pass in the NextToken from the response object of the previous page + // call. NextToken *string `min:"1" type:"string"` // The list of workgroups, including their names, descriptions, creation times, @@ -3371,6 +5348,66 @@ func (s *ListWorkGroupsOutput) SetWorkGroups(v []*WorkGroupSummary) *ListWorkGro return s } +// An exception that Athena received when it called a custom metastore. Occurs +// if the error is not caused by user input (InvalidRequestException) or from +// the Athena platform (InternalServerException). For example, if a user-created +// Lambda function is missing permissions, the Lambda 4XX exception is returned +// in a MetadataException. +type MetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s MetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataException) GoString() string { + return s.String() +} + +func newErrorMetadataException(v protocol.ResponseMetadata) error { + return &MetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *MetadataException) Code() string { + return "MetadataException" +} + +// Message returns the exception's message. +func (s *MetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MetadataException) OrigErr() error { + return nil +} + +func (s *MetadataException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + // A query, where QueryString is the list of SQL query statements that comprise // the query. type NamedQuery struct { @@ -3473,8 +5510,9 @@ type QueryExecution struct { // and DML, such as SHOW CREATE TABLE, or DESCRIBE . StatementType *string `type:"string" enum:"StatementType"` - // The amount of data scanned during the query execution and the amount of time - // that it took to execute, and the type of statement that was run. + // Query execution statistics, such as the amount of data scanned, the amount + // of time that the query took to process, and the type of statement that was + // run. Statistics *QueryExecutionStatistics `type:"structure"` // The completion date, current state, submission time, and state change reason @@ -3543,11 +5581,14 @@ func (s *QueryExecution) SetWorkGroup(v string) *QueryExecution { return s } -// The database in which the query execution occurs. +// The database and data catalog context in which the query execution occurs. type QueryExecutionContext struct { _ struct{} `type:"structure"` - // The name of the database. + // The name of the data catalog used in the query execution. + Catalog *string `min:"1" type:"string"` + + // The name of the database used in the query execution. Database *string `min:"1" type:"string"` } @@ -3564,6 +5605,9 @@ func (s QueryExecutionContext) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *QueryExecutionContext) Validate() error { invalidParams := request.ErrInvalidParams{Context: "QueryExecutionContext"} + if s.Catalog != nil && len(*s.Catalog) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Catalog", 1)) + } if s.Database != nil && len(*s.Database) < 1 { invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } @@ -3574,6 +5618,12 @@ func (s *QueryExecutionContext) Validate() error { return nil } +// SetCatalog sets the Catalog field's value. +func (s *QueryExecutionContext) SetCatalog(v string) *QueryExecutionContext { + s.Catalog = &v + return s +} + // SetDatabase sets the Database field's value. func (s *QueryExecutionContext) SetDatabase(v string) *QueryExecutionContext { s.Database = &v @@ -3680,12 +5730,16 @@ type QueryExecutionStatus struct { // The date and time that the query completed. CompletionDateTime *time.Time `type:"timestamp"` - // The state of query execution. QUEUED state is listed but is not used by Athena - // and is reserved for future use. RUNNING indicates that the query has been - // submitted to the service, and Athena will execute the query as soon as resources - // are available. SUCCEEDED indicates that the query completed without errors. - // FAILED indicates that the query experienced an error and did not complete - // processing. CANCELLED indicates that a user input interrupted query execution. + // The state of query execution. QUEUED indicates that the query has been submitted + // to the service, and Athena will execute the query as soon as resources are + // available. RUNNING indicates that the query is in execution phase. SUCCEEDED + // indicates that the query completed without errors. FAILED indicates that + // the query experienced an error and did not complete processing. CANCELLED + // indicates that a user input interrupted query execution. + // + // Athena automatically retries your queries in cases of certain transient errors. + // As a result, you may see the query state transition from RUNNING or FAILED + // to QUEUED. State *string `type:"string" enum:"QueryExecutionState"` // Further detail about the status of the query. @@ -3731,8 +5785,8 @@ func (s *QueryExecutionStatus) SetSubmissionDateTime(v time.Time) *QueryExecutio // A resource, such as a workgroup, was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -3751,17 +5805,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3769,22 +5823,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The location in Amazon S3 where query results are stored and the encryption @@ -3937,7 +5991,7 @@ func (s *ResultConfigurationUpdates) SetRemoveOutputLocation(v bool) *ResultConf } // The metadata and rows that comprise a query result set. The metadata describes -// the column structure and data types. +// the column structure and data types. To return a ResultSet object, use GetQueryResults. type ResultSet struct { _ struct{} `type:"structure"` @@ -3972,7 +6026,7 @@ func (s *ResultSet) SetRows(v []*Row) *ResultSet { } // The metadata that describes the column structure and data types of a table -// of query results. +// of query results. To return a ResultSetMetadata object, use GetQueryResults. type ResultSetMetadata struct { _ struct{} `type:"structure"` @@ -4181,16 +6235,97 @@ func (s StopQueryExecutionOutput) GoString() string { return s.String() } -// A tag that you can add to a resource. A tag is a label that you assign to -// an AWS Athena resource (a workgroup). Each tag consists of a key and an optional -// value, both of which you define. Tags enable you to categorize workgroups -// in Athena, for example, by purpose, owner, or environment. Use a consistent -// set of tag keys to make it easier to search and filter workgroups in your -// account. The maximum tag key length is 128 Unicode characters in UTF-8. The -// maximum tag value length is 256 Unicode characters in UTF-8. You can use -// letters and numbers representable in UTF-8, and the following characters: -// + - = . _ : / @. Tag keys and values are case-sensitive. Tag keys must be -// unique per resource. +// Contains metadata for a table. +type TableMetadata struct { + _ struct{} `type:"structure"` + + // A list of the columns in the table. + Columns []*Column `type:"list"` + + // The time that the table was created. + CreateTime *time.Time `type:"timestamp"` + + // The last time the table was accessed. + LastAccessTime *time.Time `type:"timestamp"` + + // The name of the table. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A set of custom key/value pairs for table properties. + Parameters map[string]*string `type:"map"` + + // A list of the partition keys in the table. + PartitionKeys []*Column `type:"list"` + + // The type of table. In Athena, only EXTERNAL_TABLE is supported. + TableType *string `type:"string"` +} + +// String returns the string representation +func (s TableMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableMetadata) GoString() string { + return s.String() +} + +// SetColumns sets the Columns field's value. +func (s *TableMetadata) SetColumns(v []*Column) *TableMetadata { + s.Columns = v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *TableMetadata) SetCreateTime(v time.Time) *TableMetadata { + s.CreateTime = &v + return s +} + +// SetLastAccessTime sets the LastAccessTime field's value. +func (s *TableMetadata) SetLastAccessTime(v time.Time) *TableMetadata { + s.LastAccessTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *TableMetadata) SetName(v string) *TableMetadata { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *TableMetadata) SetParameters(v map[string]*string) *TableMetadata { + s.Parameters = v + return s +} + +// SetPartitionKeys sets the PartitionKeys field's value. +func (s *TableMetadata) SetPartitionKeys(v []*Column) *TableMetadata { + s.PartitionKeys = v + return s +} + +// SetTableType sets the TableType field's value. +func (s *TableMetadata) SetTableType(v string) *TableMetadata { + s.TableType = &v + return s +} + +// A label that you assign to a resource. In Athena, a resource can be a workgroup +// or data catalog. Each tag consists of a key and an optional value, both of +// which you define. For example, you can use tags to categorize Athena workgroups +// or data catalogs by purpose, owner, or environment. Use a consistent set +// of tag keys to make it easier to search and filter workgroups or data catalogs +// in your account. For best practices, see Tagging Best Practices (https://aws.amazon.com/answers/account-management/aws-tagging-strategies/). +// Tag keys can be from 1 to 128 UTF-8 Unicode characters, and tag values can +// be from 0 to 256 UTF-8 Unicode characters. Tags can use letters and numbers +// representable in UTF-8, and the following characters: + - = . _ : / @. Tag +// keys and values are case-sensitive. Tag keys must be unique per resource. +// If you specify more than one tag, separate them by commas. type Tag struct { _ struct{} `type:"structure"` @@ -4244,14 +6379,14 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // Requests that one or more tags are added to the resource (such as a workgroup) - // for the specified ARN. + // Specifies the ARN of the Athena resource (workgroup or data catalog) to which + // tags are to be added. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // One or more tags, separated by commas, to be added to the resource, such - // as a workgroup. + // A collection of one or more tags, separated by commas, to be added to an + // Athena workgroup or data catalog resource. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -4324,8 +6459,8 @@ func (s TagResourceOutput) GoString() string { // Indicates that the request was throttled. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -4346,17 +6481,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4364,22 +6499,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a named query ID that could not be processed. @@ -4472,13 +6607,13 @@ func (s *UnprocessedQueryExecutionId) SetQueryExecutionId(v string) *Unprocessed type UntagResourceInput struct { _ struct{} `type:"structure"` - // Removes one or more tags from the workgroup resource for the specified ARN. + // Specifies the ARN of the resource from which tags are to be removed. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // Removes the tags associated with one or more tag keys from the workgroup - // resource. + // A comma-separated list of one or more tag keys whose tags are to be removed + // from the specified resource. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` @@ -4539,6 +6674,113 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +type UpdateDataCatalogInput struct { + _ struct{} `type:"structure"` + + // New or modified text that describes the data catalog. + Description *string `min:"1" type:"string"` + + // The name of the data catalog to update. The catalog name must be unique for + // the AWS account and can use a maximum of 128 alphanumeric, underscore, at + // sign, or hyphen characters. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies the Lambda function or functions to use for updating the data catalog. + // This is a mapping whose values depend on the catalog type. + // + // * For the HIVE data catalog type, use the following syntax. The metadata-function + // parameter is required. The sdk-version parameter is optional and defaults + // to the currently supported version. metadata-function=lambda_arn, sdk-version=version_number + // + // * For the LAMBDA data catalog type, use one of the following sets of required + // parameters, but not both. If you have one Lambda function that processes + // metadata and another for reading the actual data, use the following syntax. + // Both parameters are required. metadata-function=lambda_arn, record-function=lambda_arn + // If you have a composite Lambda function that processes both metadata and + // data, use the following syntax to specify your Lambda function. function=lambda_arn + // + // * The GLUE type has no parameters. + Parameters map[string]*string `type:"map"` + + // Specifies the type of data catalog to update. Specify LAMBDA for a federated + // catalog, GLUE for AWS Glue Catalog, or HIVE for an external hive metastore. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataCatalogType"` +} + +// String returns the string representation +func (s UpdateDataCatalogInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDataCatalogInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDataCatalogInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataCatalogInput"} + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateDataCatalogInput) SetDescription(v string) *UpdateDataCatalogInput { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateDataCatalogInput) SetName(v string) *UpdateDataCatalogInput { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *UpdateDataCatalogInput) SetParameters(v map[string]*string) *UpdateDataCatalogInput { + s.Parameters = v + return s +} + +// SetType sets the Type field's value. +func (s *UpdateDataCatalogInput) SetType(v string) *UpdateDataCatalogInput { + s.Type = &v + return s +} + +type UpdateDataCatalogOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateDataCatalogOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDataCatalogOutput) GoString() string { + return s.String() +} + type UpdateWorkGroupInput struct { _ struct{} `type:"structure"` @@ -4964,6 +7206,35 @@ const ( ColumnNullableUnknown = "UNKNOWN" ) +// ColumnNullable_Values returns all elements of the ColumnNullable enum +func ColumnNullable_Values() []string { + return []string{ + ColumnNullableNotNull, + ColumnNullableNullable, + ColumnNullableUnknown, + } +} + +const ( + // DataCatalogTypeLambda is a DataCatalogType enum value + DataCatalogTypeLambda = "LAMBDA" + + // DataCatalogTypeGlue is a DataCatalogType enum value + DataCatalogTypeGlue = "GLUE" + + // DataCatalogTypeHive is a DataCatalogType enum value + DataCatalogTypeHive = "HIVE" +) + +// DataCatalogType_Values returns all elements of the DataCatalogType enum +func DataCatalogType_Values() []string { + return []string{ + DataCatalogTypeLambda, + DataCatalogTypeGlue, + DataCatalogTypeHive, + } +} + const ( // EncryptionOptionSseS3 is a EncryptionOption enum value EncryptionOptionSseS3 = "SSE_S3" @@ -4975,6 +7246,15 @@ const ( EncryptionOptionCseKms = "CSE_KMS" ) +// EncryptionOption_Values returns all elements of the EncryptionOption enum +func EncryptionOption_Values() []string { + return []string{ + EncryptionOptionSseS3, + EncryptionOptionSseKms, + EncryptionOptionCseKms, + } +} + const ( // QueryExecutionStateQueued is a QueryExecutionState enum value QueryExecutionStateQueued = "QUEUED" @@ -4992,6 +7272,17 @@ const ( QueryExecutionStateCancelled = "CANCELLED" ) +// QueryExecutionState_Values returns all elements of the QueryExecutionState enum +func QueryExecutionState_Values() []string { + return []string{ + QueryExecutionStateQueued, + QueryExecutionStateRunning, + QueryExecutionStateSucceeded, + QueryExecutionStateFailed, + QueryExecutionStateCancelled, + } +} + const ( // StatementTypeDdl is a StatementType enum value StatementTypeDdl = "DDL" @@ -5003,6 +7294,15 @@ const ( StatementTypeUtility = "UTILITY" ) +// StatementType_Values returns all elements of the StatementType enum +func StatementType_Values() []string { + return []string{ + StatementTypeDdl, + StatementTypeDml, + StatementTypeUtility, + } +} + // The reason for the query throttling, for example, when it exceeds the concurrent // query limit. const ( @@ -5010,6 +7310,13 @@ const ( ThrottleReasonConcurrentQueryLimitExceeded = "CONCURRENT_QUERY_LIMIT_EXCEEDED" ) +// ThrottleReason_Values returns all elements of the ThrottleReason enum +func ThrottleReason_Values() []string { + return []string{ + ThrottleReasonConcurrentQueryLimitExceeded, + } +} + const ( // WorkGroupStateEnabled is a WorkGroupState enum value WorkGroupStateEnabled = "ENABLED" @@ -5017,3 +7324,11 @@ const ( // WorkGroupStateDisabled is a WorkGroupState enum value WorkGroupStateDisabled = "DISABLED" ) + +// WorkGroupState_Values returns all elements of the WorkGroupState enum +func WorkGroupState_Values() []string { + return []string{ + WorkGroupStateEnabled, + WorkGroupStateDisabled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go b/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go index 5da82bf6a..a0939ed6d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/errors.go @@ -22,6 +22,16 @@ const ( // a required parameter may be missing or out of range. ErrCodeInvalidRequestException = "InvalidRequestException" + // ErrCodeMetadataException for service response error code + // "MetadataException". + // + // An exception that Athena received when it called a custom metastore. Occurs + // if the error is not caused by user input (InvalidRequestException) or from + // the Athena platform (InternalServerException). For example, if a user-created + // Lambda function is missing permissions, the Lambda 4XX exception is returned + // in a MetadataException. + ErrCodeMetadataException = "MetadataException" + // ErrCodeResourceNotFoundException for service response error code // "ResourceNotFoundException". // @@ -38,6 +48,7 @@ const ( var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InternalServerException": newErrorInternalServerException, "InvalidRequestException": newErrorInvalidRequestException, + "MetadataException": newErrorMetadataException, "ResourceNotFoundException": newErrorResourceNotFoundException, "TooManyRequestsException": newErrorTooManyRequestsException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go index b42906d68..3b624a855 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index d80c2f819..bea102a88 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -158,8 +158,9 @@ func (c *AutoScaling) AttachLoadBalancerTargetGroupsRequest(input *AttachLoadBal // // Attaches one or more target groups to the specified Auto Scaling group. // -// To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. -// To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups. +// To describe the target groups for an Auto Scaling group, call the DescribeLoadBalancerTargetGroups +// API. To detach the target group from the Auto Scaling group, call the DetachLoadBalancerTargetGroups +// API. // // With Application Load Balancers and Network Load Balancers, instances are // registered as targets with a target group. With Classic Load Balancers, instances @@ -249,14 +250,17 @@ func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput // AttachLoadBalancers API operation for Auto Scaling. // -// Attaches one or more Classic Load Balancers to the specified Auto Scaling -// group. // -// To attach an Application Load Balancer or a Network Load Balancer instead, -// see AttachLoadBalancerTargetGroups. +// To attach an Application Load Balancer or a Network Load Balancer, use the +// AttachLoadBalancerTargetGroups API operation instead. +// +// Attaches one or more Classic Load Balancers to the specified Auto Scaling +// group. Amazon EC2 Auto Scaling registers the running instances with these +// Classic Load Balancers. // -// To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. -// To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. +// To describe the load balancers for an Auto Scaling group, call the DescribeLoadBalancers +// API. To detach the load balancer from the Auto Scaling group, call the DetachLoadBalancers +// API. // // For more information, see Attaching a Load Balancer to Your Auto Scaling // Group (https://docs.aws.amazon.com/autoscaling/ec2/userguide/attach-load-balancer-asg.html) @@ -442,7 +446,8 @@ func (c *AutoScaling) BatchPutScheduledUpdateGroupActionRequest(input *BatchPutS // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -470,6 +475,101 @@ func (c *AutoScaling) BatchPutScheduledUpdateGroupActionWithContext(ctx aws.Cont return out, req.Send() } +const opCancelInstanceRefresh = "CancelInstanceRefresh" + +// CancelInstanceRefreshRequest generates a "aws/request.Request" representing the +// client's request for the CancelInstanceRefresh operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelInstanceRefresh for more information on using the CancelInstanceRefresh +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelInstanceRefreshRequest method. +// req, resp := client.CancelInstanceRefreshRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CancelInstanceRefresh +func (c *AutoScaling) CancelInstanceRefreshRequest(input *CancelInstanceRefreshInput) (req *request.Request, output *CancelInstanceRefreshOutput) { + op := &request.Operation{ + Name: opCancelInstanceRefresh, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelInstanceRefreshInput{} + } + + output = &CancelInstanceRefreshOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelInstanceRefresh API operation for Auto Scaling. +// +// Cancels an instance refresh operation in progress. Cancellation does not +// roll back any replacements that have already been completed, but it prevents +// new replacements from being started. +// +// For more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation CancelInstanceRefresh for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// * ErrCodeActiveInstanceRefreshNotFoundFault "ActiveInstanceRefreshNotFound" +// The request failed because an active instance refresh for the specified Auto +// Scaling group was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/CancelInstanceRefresh +func (c *AutoScaling) CancelInstanceRefresh(input *CancelInstanceRefreshInput) (*CancelInstanceRefreshOutput, error) { + req, out := c.CancelInstanceRefreshRequest(input) + return out, req.Send() +} + +// CancelInstanceRefreshWithContext is the same as CancelInstanceRefresh with the addition of +// the ability to pass a context and additional request options. +// +// See CancelInstanceRefresh for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) CancelInstanceRefreshWithContext(ctx aws.Context, input *CancelInstanceRefreshInput, opts ...request.Option) (*CancelInstanceRefreshOutput, error) { + req, out := c.CancelInstanceRefreshRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCompleteLifecycleAction = "CompleteLifecycleAction" // CompleteLifecycleActionRequest generates a "aws/request.Request" representing the @@ -622,11 +722,23 @@ func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGrou // Creates an Auto Scaling group with the specified name and attributes. // // If you exceed your maximum limit of Auto Scaling groups, the call fails. -// For information about viewing this limit, see DescribeAccountLimits. For -// information about updating this limit, see Amazon EC2 Auto Scaling Service -// Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// To query this limit, call the DescribeAccountLimits API. For information +// about updating this limit, see Amazon EC2 Auto Scaling Service Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// in the Amazon EC2 Auto Scaling User Guide. +// +// For introductory exercises for creating an Auto Scaling group, see Getting +// Started with Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/GettingStartedTutorial.html) +// and Tutorial: Set Up a Scaled and Load-Balanced Application (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-register-lbs-with-asg.html) +// in the Amazon EC2 Auto Scaling User Guide. For more information, see Auto +// Scaling Groups (https://docs.aws.amazon.com/autoscaling/ec2/userguide/AutoScalingGroup.html) // in the Amazon EC2 Auto Scaling User Guide. // +// Every Auto Scaling group has three size parameters (DesiredCapacity, MaxSize, +// and MinSize). Usually, you set these sizes based on a specific number of +// instances. However, if you configure a mixed instances policy that defines +// weights for the instance types, you must specify these sizes with the same +// units that you use for weighting instances. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -642,7 +754,8 @@ func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGrou // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -721,9 +834,8 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // Creates a launch configuration. // // If you exceed your maximum limit of launch configurations, the call fails. -// For information about viewing this limit, see DescribeAccountLimits. For -// information about updating this limit, see Amazon EC2 Auto Scaling Service -// Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) +// To query this limit, call the DescribeAccountLimits API. For information +// about updating this limit, see Amazon EC2 Auto Scaling Service Quotas (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html) // in the Amazon EC2 Auto Scaling User Guide. // // For more information, see Launch Configurations (https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html) @@ -744,7 +856,8 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -836,7 +949,8 @@ func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeAlreadyExistsFault "AlreadyExists" // You already have an Auto Scaling group or launch configuration with this @@ -925,13 +1039,13 @@ func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGrou // alarm actions, and any alarm that no longer has an associated action. // // To remove instances from the Auto Scaling group before deleting it, call -// DetachInstances with the list of instances and the option to decrement the -// desired capacity. This ensures that Amazon EC2 Auto Scaling does not launch -// replacement instances. +// the DetachInstances API with the list of instances and the option to decrement +// the desired capacity. This ensures that Amazon EC2 Auto Scaling does not +// launch replacement instances. // -// To terminate all instances before deleting the Auto Scaling group, call UpdateAutoScalingGroup -// and set the minimum size and desired capacity of the Auto Scaling group to -// zero. +// To terminate all instances before deleting the Auto Scaling group, call the +// UpdateAutoScalingGroup API and set the minimum size and desired capacity +// of the Auto Scaling group to zero. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1612,7 +1726,17 @@ func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTy // DescribeAdjustmentTypes API operation for Auto Scaling. // -// Describes the policy adjustment types for use with PutScalingPolicy. +// Describes the available adjustment types for Amazon EC2 Auto Scaling scaling +// policies. These settings apply to step scaling policies and simple scaling +// policies; they do not apply to target tracking scaling policies. +// +// The following adjustment types are supported: +// +// * ChangeInCapacity +// +// * ExactCapacity +// +// * PercentChangeInCapacity // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2010,6 +2134,110 @@ func (c *AutoScaling) DescribeAutoScalingNotificationTypesWithContext(ctx aws.Co return out, req.Send() } +const opDescribeInstanceRefreshes = "DescribeInstanceRefreshes" + +// DescribeInstanceRefreshesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceRefreshes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceRefreshes for more information on using the DescribeInstanceRefreshes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceRefreshesRequest method. +// req, resp := client.DescribeInstanceRefreshesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeInstanceRefreshes +func (c *AutoScaling) DescribeInstanceRefreshesRequest(input *DescribeInstanceRefreshesInput) (req *request.Request, output *DescribeInstanceRefreshesOutput) { + op := &request.Operation{ + Name: opDescribeInstanceRefreshes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceRefreshesInput{} + } + + output = &DescribeInstanceRefreshesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceRefreshes API operation for Auto Scaling. +// +// Describes one or more instance refreshes. +// +// You can determine the status of a request by looking at the Status parameter. +// The following are the possible statuses: +// +// * Pending - The request was created, but the operation has not started. +// +// * InProgress - The operation is in progress. +// +// * Successful - The operation completed successfully. +// +// * Failed - The operation failed to complete. You can troubleshoot using +// the status reason and the scaling activities. +// +// * Cancelling - An ongoing operation is being cancelled. Cancellation does +// not roll back any replacements that have already been completed, but it +// prevents new replacements from being started. +// +// * Cancelled - The operation is cancelled. +// +// For more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation DescribeInstanceRefreshes for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidNextToken "InvalidNextToken" +// The NextToken value is not valid. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/DescribeInstanceRefreshes +func (c *AutoScaling) DescribeInstanceRefreshes(input *DescribeInstanceRefreshesInput) (*DescribeInstanceRefreshesOutput, error) { + req, out := c.DescribeInstanceRefreshesRequest(input) + return out, req.Send() +} + +// DescribeInstanceRefreshesWithContext is the same as DescribeInstanceRefreshes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceRefreshes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) DescribeInstanceRefreshesWithContext(ctx aws.Context, input *DescribeInstanceRefreshesInput, opts ...request.Option) (*DescribeInstanceRefreshesOutput, error) { + req, out := c.DescribeInstanceRefreshesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" // DescribeLaunchConfigurationsRequest generates a "aws/request.Request" representing the @@ -2444,8 +2672,8 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI // Describes the load balancers for the specified Auto Scaling group. // // This operation describes only Classic Load Balancers. If you have Application -// Load Balancers or Network Load Balancers, use DescribeLoadBalancerTargetGroups -// instead. +// Load Balancers or Network Load Balancers, use the DescribeLoadBalancerTargetGroups +// API instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2528,7 +2756,7 @@ func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetric // Describes the available CloudWatch metrics for Amazon EC2 Auto Scaling. // // The GroupStandbyInstances metric is not returned by default. You must explicitly -// request this metric when calling EnableMetricsCollection. +// request this metric when calling the EnableMetricsCollection API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3035,7 +3263,8 @@ func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingP // DescribeScalingProcessTypes API operation for Auto Scaling. // -// Describes the scaling process types for use with ResumeProcesses and SuspendProcesses. +// Describes the scaling process types for use with the ResumeProcesses and +// SuspendProcesses APIs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3123,7 +3352,7 @@ func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledAc // // Describes the actions scheduled for your Auto Scaling group that haven't // run or that have not reached their end time. To describe the actions that -// have already run, use DescribeScalingActivities. +// have already run, call the DescribeScalingActivities API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3275,6 +3504,9 @@ func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *reques // a particular tag only if it matches all the filters. If there's no match, // no special message is returned. // +// For more information, see Tagging Auto Scaling Groups and Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) +// in the Amazon EC2 Auto Scaling User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3673,13 +3905,13 @@ func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput // group. // // This operation detaches only Classic Load Balancers. If you have Application -// Load Balancers or Network Load Balancers, use DetachLoadBalancerTargetGroups -// instead. +// Load Balancers or Network Load Balancers, use the DetachLoadBalancerTargetGroups +// API instead. // // When you detach a load balancer, it enters the Removing state while deregistering // the instances in the group. When all instances are deregistered, then you -// can no longer describe the load balancer using DescribeLoadBalancers. The -// instances remain running. +// can no longer describe the load balancer using the DescribeLoadBalancers +// API call. The instances remain running. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4017,7 +4249,8 @@ func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *requ // ExecutePolicy API operation for Auto Scaling. // -// Executes the specified policy. +// Executes the specified policy. This can be useful for testing the design +// of your scaling policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4209,10 +4442,11 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // launch or terminate. // // If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state using RecordLifecycleActionHeartbeat. +// instance in a pending state using the RecordLifecycleActionHeartbeat API +// call. // // If you finish before the timeout period ends, complete the lifecycle action -// using CompleteLifecycleAction. +// using the CompleteLifecycleAction API call. // // For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) // in the Amazon EC2 Auto Scaling User Guide. @@ -4220,8 +4454,9 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // If you exceed your maximum limit of lifecycle hooks, which by default is // 50 per Auto Scaling group, the call fails. // -// You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. -// If you are no longer using a lifecycle hook, you can delete it using DeleteLifecycleHook. +// You can view the lifecycle hooks for an Auto Scaling group using the DescribeLifecycleHooks +// API call. If you are no longer using a lifecycle hook, you can delete it +// by calling the DeleteLifecycleHook API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4234,7 +4469,8 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4328,7 +4564,8 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4406,7 +4643,8 @@ func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req // Creates or updates a scaling policy for an Auto Scaling group. // // For more information about using scaling policies to scale your Auto Scaling -// group automatically, see Dynamic Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scale-based-on-demand.html) +// group, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html) +// and Step and Simple Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4420,7 +4658,8 @@ func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4518,7 +4757,8 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -4593,7 +4833,7 @@ func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecyc // // Records a heartbeat for the lifecycle action associated with the specified // token or instance. This extends the timeout by the length of time defined -// using PutLifecycleHook. +// using the PutLifecycleHook API call. // // This step is a part of the procedure for adding a lifecycle hook to an Auto // Scaling group: @@ -4786,8 +5026,11 @@ func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) // // Sets the size of the specified Auto Scaling group. // -// For more information about desired capacity, see What Is Amazon EC2 Auto -// Scaling? (https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html) +// If a scale-in activity occurs as a result of a new DesiredCapacity value +// that is lower than the current size of the group, the Auto Scaling group +// uses its termination policy to determine which instances to terminate. +// +// For more information, see Manual Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-manual-scaling.html) // in the Amazon EC2 Auto Scaling User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4974,7 +5217,8 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // * ErrCodeLimitExceededFault "LimitExceeded" // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). -// For more information, see DescribeAccountLimits. +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. // // * ErrCodeResourceContentionFault "ResourceContention" // You already have a pending update to an Amazon EC2 Auto Scaling resource @@ -5002,6 +5246,107 @@ func (c *AutoScaling) SetInstanceProtectionWithContext(ctx aws.Context, input *S return out, req.Send() } +const opStartInstanceRefresh = "StartInstanceRefresh" + +// StartInstanceRefreshRequest generates a "aws/request.Request" representing the +// client's request for the StartInstanceRefresh operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartInstanceRefresh for more information on using the StartInstanceRefresh +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartInstanceRefreshRequest method. +// req, resp := client.StartInstanceRefreshRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/StartInstanceRefresh +func (c *AutoScaling) StartInstanceRefreshRequest(input *StartInstanceRefreshInput) (req *request.Request, output *StartInstanceRefreshOutput) { + op := &request.Operation{ + Name: opStartInstanceRefresh, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartInstanceRefreshInput{} + } + + output = &StartInstanceRefreshOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartInstanceRefresh API operation for Auto Scaling. +// +// Starts a new instance refresh operation, which triggers a rolling replacement +// of all previously launched instances in the Auto Scaling group with a new +// group of instances. +// +// If successful, this call creates a new instance refresh request with a unique +// ID that you can use to track its progress. To query its status, call the +// DescribeInstanceRefreshes API. To describe the instance refreshes that have +// already run, call the DescribeInstanceRefreshes API. To cancel an instance +// refresh operation in progress, use the CancelInstanceRefresh API. +// +// For more information, see Replacing Auto Scaling Instances Based on an Instance +// Refresh (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Auto Scaling's +// API operation StartInstanceRefresh for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededFault "LimitExceeded" +// You have already reached a limit for your Amazon EC2 Auto Scaling resources +// (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). +// For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) +// in the Amazon EC2 Auto Scaling API Reference. +// +// * ErrCodeResourceContentionFault "ResourceContention" +// You already have a pending update to an Amazon EC2 Auto Scaling resource +// (for example, an Auto Scaling group, instance, or load balancer). +// +// * ErrCodeInstanceRefreshInProgressFault "InstanceRefreshInProgress" +// The request failed because an active instance refresh operation already exists +// for the specified Auto Scaling group. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/autoscaling-2011-01-01/StartInstanceRefresh +func (c *AutoScaling) StartInstanceRefresh(input *StartInstanceRefreshInput) (*StartInstanceRefreshOutput, error) { + req, out := c.StartInstanceRefreshRequest(input) + return out, req.Send() +} + +// StartInstanceRefreshWithContext is the same as StartInstanceRefresh with the addition of +// the ability to pass a context and additional request options. +// +// See StartInstanceRefresh for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *AutoScaling) StartInstanceRefreshWithContext(ctx aws.Context, input *StartInstanceRefreshInput, opts ...request.Option) (*StartInstanceRefreshOutput, error) { + req, out := c.StartInstanceRefreshRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSuspendProcesses = "SuspendProcesses" // SuspendProcessesRequest generates a "aws/request.Request" representing the @@ -5051,13 +5396,12 @@ func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req * // the specified Auto Scaling group. // // If you suspend either the Launch or Terminate process types, it can prevent -// other process types from functioning properly. -// -// To resume processes that have been suspended, use ResumeProcesses. -// -// For more information, see Suspending and Resuming Scaling Processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) +// other process types from functioning properly. For more information, see +// Suspending and Resuming Scaling Processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html) // in the Amazon EC2 Auto Scaling User Guide. // +// To resume processes that have been suspended, call the ResumeProcesses API. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5140,7 +5484,9 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *Terminat // TerminateInstanceInAutoScalingGroup API operation for Auto Scaling. // // Terminates the specified instance and optionally adjusts the desired group -// size. This call simply makes a termination request. The instance is not terminated +// size. +// +// This call simply makes a termination request. The instance is not terminated // immediately. When an instance is terminated, the instance status changes // to terminated. You can't connect to or start an instance after you've terminated // it. @@ -5260,7 +5606,7 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // // Note the following about changing DesiredCapacity, MaxSize, or MinSize: // -// * If a scale-in event occurs as a result of a new DesiredCapacity value +// * If a scale-in activity occurs as a result of a new DesiredCapacity value // that is lower than the current size of the group, the Auto Scaling group // uses its termination policy to determine which instances to terminate. // @@ -5273,9 +5619,10 @@ func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGrou // of the group, this sets the group's DesiredCapacity to the new MaxSize // value. // -// To see which parameters have been set, use DescribeAutoScalingGroups. You -// can also view the scaling policies for an Auto Scaling group using DescribePolicies. -// If the group has scaling policies, you can update them using PutScalingPolicy. +// To see which parameters have been set, call the DescribeAutoScalingGroups +// API. To view the scaling policies for an Auto Scaling group, call the DescribePolicies +// API. If the group has scaling policies, you can update them by calling the +// PutScalingPolicy API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5876,17 +6223,23 @@ type BlockDeviceMapping struct { // DeviceName is a required field DeviceName *string `min:"1" type:"string" required:"true"` - // The information about the Amazon EBS volume. + // Parameters used to automatically set up EBS volumes when an instance is launched. + // + // You can specify either VirtualName or Ebs, but not both. Ebs *Ebs `type:"structure"` - // Suppresses a device mapping. + // Setting this value to true suppresses the specified device included in the + // block device mapping of the AMI. // - // If this parameter is true for the root device, the instance might fail the - // EC2 health check. In that case, Amazon EC2 Auto Scaling launches a replacement - // instance. + // If NoDevice is true for the root device, instances might fail the EC2 health + // check. In that case, Amazon EC2 Auto Scaling launches replacement instances. + // + // If you specify NoDevice, you cannot specify Ebs. NoDevice *bool `type:"boolean"` // The name of the virtual device (for example, ephemeral0). + // + // You can specify either VirtualName or Ebs, but not both. VirtualName *string `min:"1" type:"string"` } @@ -5948,58 +6301,122 @@ func (s *BlockDeviceMapping) SetVirtualName(v string) *BlockDeviceMapping { return s } -type CompleteLifecycleActionInput struct { +type CancelInstanceRefreshInput struct { _ struct{} `type:"structure"` // The name of the Auto Scaling group. // // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - - // The ID of the instance. - InstanceId *string `min:"1" type:"string"` - - // The action for the group to take. This parameter can be either CONTINUE or - // ABANDON. - // - // LifecycleActionResult is a required field - LifecycleActionResult *string `type:"string" required:"true"` - - // A universally unique identifier (UUID) that identifies a specific lifecycle - // action associated with an instance. Amazon EC2 Auto Scaling sends this token - // to the notification target you specified when you created the lifecycle hook. - LifecycleActionToken *string `min:"36" type:"string"` - - // The name of the lifecycle hook. - // - // LifecycleHookName is a required field - LifecycleHookName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CompleteLifecycleActionInput) String() string { +func (s CancelInstanceRefreshInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteLifecycleActionInput) GoString() string { +func (s CancelInstanceRefreshInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteLifecycleActionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteLifecycleActionInput"} +func (s *CancelInstanceRefreshInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelInstanceRefreshInput"} if s.AutoScalingGroupName == nil { invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) } if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) } - if s.InstanceId != nil && len(*s.InstanceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) - } - if s.LifecycleActionResult == nil { - invalidParams.Add(request.NewErrParamRequired("LifecycleActionResult")) + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *CancelInstanceRefreshInput) SetAutoScalingGroupName(v string) *CancelInstanceRefreshInput { + s.AutoScalingGroupName = &v + return s +} + +type CancelInstanceRefreshOutput struct { + _ struct{} `type:"structure"` + + // The instance refresh ID. + InstanceRefreshId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CancelInstanceRefreshOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelInstanceRefreshOutput) GoString() string { + return s.String() +} + +// SetInstanceRefreshId sets the InstanceRefreshId field's value. +func (s *CancelInstanceRefreshOutput) SetInstanceRefreshId(v string) *CancelInstanceRefreshOutput { + s.InstanceRefreshId = &v + return s +} + +type CompleteLifecycleActionInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The ID of the instance. + InstanceId *string `min:"1" type:"string"` + + // The action for the group to take. This parameter can be either CONTINUE or + // ABANDON. + // + // LifecycleActionResult is a required field + LifecycleActionResult *string `type:"string" required:"true"` + + // A universally unique identifier (UUID) that identifies a specific lifecycle + // action associated with an instance. Amazon EC2 Auto Scaling sends this token + // to the notification target you specified when you created the lifecycle hook. + LifecycleActionToken *string `min:"36" type:"string"` + + // The name of the lifecycle hook. + // + // LifecycleHookName is a required field + LifecycleHookName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteLifecycleActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteLifecycleActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteLifecycleActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteLifecycleActionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) + } + if s.LifecycleActionResult == nil { + invalidParams.Add(request.NewErrParamRequired("LifecycleActionResult")) } if s.LifecycleActionToken != nil && len(*s.LifecycleActionToken) < 36 { invalidParams.Add(request.NewErrParamMinLen("LifecycleActionToken", 36)) @@ -6080,15 +6497,19 @@ type CreateAutoScalingGroupInput struct { // The amount of time, in seconds, after a scaling activity completes before // another scaling activity can start. The default value is 300. // - // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // This setting applies when using simple scaling policies, but not when using + // other scaling policies or scheduled scaling. For more information, see Scaling + // Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` - // The number of Amazon EC2 instances that the Auto Scaling group attempts to - // maintain. This number must be greater than or equal to the minimum size of - // the group and less than or equal to the maximum size of the group. If you - // do not specify a desired capacity, the default is the minimum size of the - // group. + // The desired capacity is the initial capacity of the Auto Scaling group at + // the time of its creation and the capacity it attempts to maintain. It can + // scale beyond this capacity if you configure automatic scaling. + // + // This number must be greater than or equal to the minimum size of the group + // and less than or equal to the maximum size of the group. If you do not specify + // a desired capacity, the default is the minimum size of the group. DesiredCapacity *int64 `type:"integer"` // The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before @@ -6099,7 +6520,7 @@ type CreateAutoScalingGroupInput struct { // For more information, see Health Check Grace Period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) // in the Amazon EC2 Auto Scaling User Guide. // - // Conditional: This parameter is required if you are adding an ELB health check. + // Required if you are adding an ELB health check. HealthCheckGracePeriod *int64 `type:"integer"` // The service to use for the health checks. The valid values are EC2 and ELB. @@ -6112,33 +6533,38 @@ type CreateAutoScalingGroupInput struct { HealthCheckType *string `min:"1" type:"string"` // The ID of the instance used to create a launch configuration for the group. + // To get the instance ID, use the Amazon EC2 DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // API operation. // // When you specify an ID of an instance, Amazon EC2 Auto Scaling creates a // new launch configuration and associates it with the group. This launch configuration // derives its attributes from the specified instance, except for the block // device mapping. // - // For more information, see Create an Auto Scaling Group Using an EC2 Instance - // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-asg-from-instance.html) - // in the Amazon EC2 Auto Scaling User Guide. - // // You must specify one of the following parameters in your request: LaunchConfigurationName, // LaunchTemplate, InstanceId, or MixedInstancesPolicy. InstanceId *string `min:"1" type:"string"` - // The name of the launch configuration. + // The name of the launch configuration to use when an instance is launched. + // To get the launch configuration name, use the DescribeLaunchConfigurations + // API operation. New launch configurations can be created with the CreateLaunchConfiguration + // API. // - // If you do not specify LaunchConfigurationName, you must specify one of the - // following parameters: InstanceId, LaunchTemplate, or MixedInstancesPolicy. + // You must specify one of the following parameters in your request: LaunchConfigurationName, + // LaunchTemplate, InstanceId, or MixedInstancesPolicy. LaunchConfigurationName *string `min:"1" type:"string"` - // The launch template to use to launch instances. + // Parameters used to specify the launch template and version to use when an + // instance is launched. // // For more information, see LaunchTemplateSpecification (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_LaunchTemplateSpecification.html) // in the Amazon EC2 Auto Scaling API Reference. // - // If you do not specify LaunchTemplate, you must specify one of the following - // parameters: InstanceId, LaunchConfigurationName, or MixedInstancesPolicy. + // You can alternatively associate a launch template to the Auto Scaling group + // by using the MixedInstancesPolicy parameter. + // + // You must specify one of the following parameters in your request: LaunchConfigurationName, + // LaunchTemplate, InstanceId, or MixedInstancesPolicy. LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // One or more lifecycle hooks. @@ -6154,16 +6580,27 @@ type CreateAutoScalingGroupInput struct { LoadBalancerNames []*string `type:"list"` // The maximum amount of time, in seconds, that an instance can be in service. + // The default is null. + // + // This parameter is optional, but if you specify a value for it, you must specify + // a value of at least 604,800 seconds (7 days). To clear a previously set value, + // specify a new value of 0. // // For more information, see Replacing Auto Scaling Instances Based on Maximum // Instance Lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) // in the Amazon EC2 Auto Scaling User Guide. // - // Valid Range: Minimum value of 604800. + // Valid Range: Minimum value of 0. MaxInstanceLifetime *int64 `type:"integer"` // The maximum size of the group. // + // With a mixed instances policy that uses instance weighting, Amazon EC2 Auto + // Scaling may need to go above MaxSize to meet your capacity requirements. + // In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more + // than your largest instance weight (weights that define how many units each + // instance contributes to the desired capacity of the group). + // // MaxSize is a required field MaxSize *int64 `type:"integer" required:"true"` @@ -6214,7 +6651,14 @@ type CreateAutoScalingGroupInput struct { // in the Amazon EC2 Auto Scaling User Guide. ServiceLinkedRoleARN *string `min:"1" type:"string"` - // One or more tags. + // One or more tags. You can tag your Auto Scaling group and propagate the tags + // to the Amazon EC2 instances it launches. + // + // Tags are not propagated to Amazon EBS volumes. To add tags to Amazon EBS + // volumes, specify the tags in a launch template but use caution. If the launch + // template specifies an instance tag with a key that is also specified for + // the Auto Scaling group, Amazon EC2 Auto Scaling overrides the value of that + // instance tag with the value specified by the Auto Scaling group. // // For more information, see Tagging Auto Scaling Groups and Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) // in the Amazon EC2 Auto Scaling User Guide. @@ -6597,6 +7041,11 @@ type CreateLaunchConfigurationInput struct { // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + // The metadata options for the instances. For more information, see Instance + // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon EC2 User Guide for Linux Instances. + MetadataOptions *InstanceMetadataOptions `type:"structure"` + // The tenancy of the instance. An instance with dedicated tenancy runs on isolated, // single-tenant hardware and can only be launched into a VPC. // @@ -6704,6 +7153,11 @@ func (s *CreateLaunchConfigurationInput) Validate() error { } } } + if s.MetadataOptions != nil { + if err := s.MetadataOptions.Validate(); err != nil { + invalidParams.AddNested("MetadataOptions", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6789,6 +7243,12 @@ func (s *CreateLaunchConfigurationInput) SetLaunchConfigurationName(v string) *C return s } +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *CreateLaunchConfigurationInput) SetMetadataOptions(v *InstanceMetadataOptions) *CreateLaunchConfigurationInput { + s.MetadataOptions = v + return s +} + // SetPlacementTenancy sets the PlacementTenancy field's value. func (s *CreateLaunchConfigurationInput) SetPlacementTenancy(v string) *CreateLaunchConfigurationInput { s.PlacementTenancy = &v @@ -7780,6 +8240,111 @@ func (s *DescribeAutoScalingNotificationTypesOutput) SetAutoScalingNotificationT return s } +type DescribeInstanceRefreshesInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // One or more instance refresh IDs. + InstanceRefreshIds []*string `type:"list"` + + // The maximum number of items to return with this call. The default value is + // 50 and the maximum value is 100. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceRefreshesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceRefreshesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstanceRefreshesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstanceRefreshesInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *DescribeInstanceRefreshesInput) SetAutoScalingGroupName(v string) *DescribeInstanceRefreshesInput { + s.AutoScalingGroupName = &v + return s +} + +// SetInstanceRefreshIds sets the InstanceRefreshIds field's value. +func (s *DescribeInstanceRefreshesInput) SetInstanceRefreshIds(v []*string) *DescribeInstanceRefreshesInput { + s.InstanceRefreshIds = v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeInstanceRefreshesInput) SetMaxRecords(v int64) *DescribeInstanceRefreshesInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceRefreshesInput) SetNextToken(v string) *DescribeInstanceRefreshesInput { + s.NextToken = &v + return s +} + +type DescribeInstanceRefreshesOutput struct { + _ struct{} `type:"structure"` + + // The instance refreshes for the specified group. + InstanceRefreshes []*InstanceRefresh `type:"list"` + + // A string that indicates that the response contains more items than can be + // returned in a single response. To receive additional items, specify this + // string for the NextToken value when requesting the next set of items. This + // value is null when there are no more items to return. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstanceRefreshesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceRefreshesOutput) GoString() string { + return s.String() +} + +// SetInstanceRefreshes sets the InstanceRefreshes field's value. +func (s *DescribeInstanceRefreshesOutput) SetInstanceRefreshes(v []*InstanceRefresh) *DescribeInstanceRefreshesOutput { + s.InstanceRefreshes = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInstanceRefreshesOutput) SetNextToken(v string) *DescribeInstanceRefreshesOutput { + s.NextToken = &v + return s +} + type DescribeLaunchConfigurationsInput struct { _ struct{} `type:"structure"` @@ -9022,8 +9587,7 @@ type DisableMetricsCollectionInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // One or more of the following metrics. If you omit this parameter, all metrics - // are disabled. + // Specifies one or more of the following metrics: // // * GroupMinSize // @@ -9040,6 +9604,18 @@ type DisableMetricsCollectionInput struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity + // + // If you omit this parameter, all metrics are disabled. Metrics []*string `type:"list"` } @@ -9095,7 +9671,8 @@ func (s DisableMetricsCollectionOutput) GoString() string { return s.String() } -// Describes an Amazon EBS volume. Used in combination with BlockDeviceMapping. +// Describes information used to set up an Amazon EBS volume specified in a +// block device mapping. type Ebs struct { _ struct{} `type:"structure"` @@ -9133,15 +9710,13 @@ type Ebs struct { // see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon EC2 User Guide for Linux Instances. // - // Conditional: This parameter is required when the volume type is io1. (Not - // used with standard, gp2, st1, or sc1 volumes.) + // Required when the volume type is io1. (Not used with standard, gp2, st1, + // or sc1 volumes.) Iops *int64 `min:"100" type:"integer"` // The snapshot ID of the volume to use. // - // Conditional: This parameter is optional if you specify a volume size. If - // you specify both SnapshotId and VolumeSize, VolumeSize must be equal or greater - // than the size of the snapshot. + // You must specify either a VolumeSize or a SnapshotId. SnapshotId *string `min:"1" type:"string"` // The volume size, in Gibibytes (GiB). @@ -9153,7 +9728,9 @@ type Ebs struct { // Default: If you create a volume from a snapshot and you don't specify a volume // size, the default is the snapshot size. // - // At least one of VolumeSize or SnapshotId is required. + // You must specify either a VolumeSize or a SnapshotId. If you specify both + // SnapshotId and VolumeSize, the volume size must be equal or greater than + // the size of the snapshot. VolumeSize *int64 `min:"1" type:"integer"` // The volume type, which can be standard for Magnetic, io1 for Provisioned @@ -9247,8 +9824,8 @@ type EnableMetricsCollectionInput struct { // Granularity is a required field Granularity *string `min:"1" type:"string" required:"true"` - // One or more of the following metrics. If you omit this parameter, all metrics - // are enabled. + // Specifies which group-level metrics to start collecting. You can specify + // one or more of the following metrics: // // * GroupMinSize // @@ -9265,6 +9842,20 @@ type EnableMetricsCollectionInput struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // The instance weighting feature supports the following additional metrics: + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity + // + // If you omit this parameter, all metrics are enabled. Metrics []*string `type:"list"` } @@ -9356,6 +9947,16 @@ type EnabledMetric struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity Metric *string `min:"1" type:"string"` } @@ -9477,16 +10078,14 @@ type ExecutePolicyInput struct { // The breach threshold for the alarm. // - // Conditional: This parameter is required if the policy type is StepScaling - // and not supported otherwise. + // Required if the policy type is StepScaling and not supported otherwise. BreachThreshold *float64 `type:"double"` // Indicates whether Amazon EC2 Auto Scaling waits for the cooldown period to // complete before executing the policy. // - // This parameter is not supported if the policy type is StepScaling or TargetTrackingScaling. - // - // For more information, see Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Valid only if the policy type is SimpleScaling. For more information, see + // Scaling Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. HonorCooldown *bool `type:"boolean"` @@ -9499,8 +10098,7 @@ type ExecutePolicyInput struct { // If you specify a metric value that doesn't correspond to a step adjustment // for the policy, the call returns an error. // - // Conditional: This parameter is required if the policy type is StepScaling - // and not supported otherwise. + // Required if the policy type is StepScaling and not supported otherwise. MetricValue *float64 `type:"double"` // The name or ARN of the policy. @@ -9699,15 +10297,19 @@ func (s *FailedScheduledUpdateGroupActionRequest) SetScheduledActionName(v strin return s } -// Describes a filter. +// Describes a filter that is used to return a more specific list of results +// when describing tags. +// +// For more information, see Tagging Auto Scaling Groups and Instances (https://docs.aws.amazon.com/autoscaling/ec2/userguide/autoscaling-tagging.html) +// in the Amazon EC2 Auto Scaling User Guide. type Filter struct { _ struct{} `type:"structure"` - // The name of the filter. The valid values are: "auto-scaling-group", "key", - // "value", and "propagate-at-launch". + // The name of the filter. The valid values are: auto-scaling-group, key, value, + // and propagate-at-launch. Name *string `type:"string"` - // The value of the filter. + // One or more filter values. Filter values are case-sensitive. Values []*string `type:"list"` } @@ -9755,8 +10357,7 @@ type Group struct { // CreatedTime is a required field CreatedTime *time.Time `type:"timestamp" required:"true"` - // The amount of time, in seconds, after a scaling activity completes before - // another scaling activity can start. + // The duration of the default cooldown period, in seconds. // // DefaultCooldown is a required field DefaultCooldown *int64 `type:"integer" required:"true"` @@ -9795,7 +10396,7 @@ type Group struct { // The maximum amount of time, in seconds, that an instance can be in service. // - // Valid Range: Minimum value of 604800. + // Valid Range: Minimum value of 0. MaxInstanceLifetime *int64 `type:"integer"` // The maximum size of the group. @@ -9822,7 +10423,8 @@ type Group struct { // group uses to call other AWS services on your behalf. ServiceLinkedRoleARN *string `min:"1" type:"string"` - // The current state of the group when DeleteAutoScalingGroup is in progress. + // The current state of the group when the DeleteAutoScalingGroup operation + // is in progress. Status *string `min:"1" type:"string"` // The suspended processes associated with the group. @@ -10246,6 +10848,84 @@ func (s *InstanceDetails) SetWeightedCapacity(v string) *InstanceDetails { return s } +// The metadata options for the instances. For more information, see Instance +// Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) +// in the Amazon EC2 User Guide for Linux Instances. +type InstanceMetadataOptions struct { + _ struct{} `type:"structure"` + + // This parameter enables or disables the HTTP metadata endpoint on your instances. + // If the parameter is not specified, the default state is enabled. + // + // If you specify a value of disabled, you will not be able to access your instance + // metadata. + HttpEndpoint *string `type:"string" enum:"InstanceMetadataEndpointState"` + + // The desired HTTP PUT response hop limit for instance metadata requests. The + // larger the number, the further instance metadata requests can travel. + // + // Default: 1 + // + // Possible values: Integers from 1 to 64 + HttpPutResponseHopLimit *int64 `min:"1" type:"integer"` + + // The state of token usage for your instance metadata requests. If the parameter + // is not specified in the request, the default state is optional. + // + // If the state is optional, you can choose to retrieve instance metadata with + // or without a signed token header on your request. If you retrieve the IAM + // role credentials without a token, the version 1.0 role credentials are returned. + // If you retrieve the IAM role credentials using a valid signed token, the + // version 2.0 role credentials are returned. + // + // If the state is required, you must send a signed token header with any instance + // metadata retrieval requests. In this state, retrieving the IAM role credentials + // always returns the version 2.0 credentials; the version 1.0 credentials are + // not available. + HttpTokens *string `type:"string" enum:"InstanceMetadataHttpTokensState"` +} + +// String returns the string representation +func (s InstanceMetadataOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceMetadataOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceMetadataOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceMetadataOptions"} + if s.HttpPutResponseHopLimit != nil && *s.HttpPutResponseHopLimit < 1 { + invalidParams.Add(request.NewErrParamMinValue("HttpPutResponseHopLimit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHttpEndpoint sets the HttpEndpoint field's value. +func (s *InstanceMetadataOptions) SetHttpEndpoint(v string) *InstanceMetadataOptions { + s.HttpEndpoint = &v + return s +} + +// SetHttpPutResponseHopLimit sets the HttpPutResponseHopLimit field's value. +func (s *InstanceMetadataOptions) SetHttpPutResponseHopLimit(v int64) *InstanceMetadataOptions { + s.HttpPutResponseHopLimit = &v + return s +} + +// SetHttpTokens sets the HttpTokens field's value. +func (s *InstanceMetadataOptions) SetHttpTokens(v string) *InstanceMetadataOptions { + s.HttpTokens = &v + return s +} + // Describes whether detailed monitoring is enabled for the Auto Scaling instances. type InstanceMonitoring struct { _ struct{} `type:"structure"` @@ -10270,7 +10950,114 @@ func (s *InstanceMonitoring) SetEnabled(v bool) *InstanceMonitoring { return s } -// Describes an instances distribution for an Auto Scaling group with MixedInstancesPolicy. +// Describes an instance refresh for an Auto Scaling group. +type InstanceRefresh struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string"` + + // The date and time at which the instance refresh ended. + EndTime *time.Time `type:"timestamp"` + + // The instance refresh ID. + InstanceRefreshId *string `min:"1" type:"string"` + + // The number of instances remaining to update before the instance refresh is + // complete. + InstancesToUpdate *int64 `type:"integer"` + + // The percentage of the instance refresh that is complete. For each instance + // replacement, Amazon EC2 Auto Scaling tracks the instance's health status + // and warm-up time. When the instance's health status changes to healthy and + // the specified warm-up time passes, the instance is considered updated and + // added to the percentage complete. + PercentageComplete *int64 `type:"integer"` + + // The date and time at which the instance refresh began. + StartTime *time.Time `type:"timestamp"` + + // The current status for the instance refresh operation: + // + // * Pending - The request was created, but the operation has not started. + // + // * InProgress - The operation is in progress. + // + // * Successful - The operation completed successfully. + // + // * Failed - The operation failed to complete. You can troubleshoot using + // the status reason and the scaling activities. + // + // * Cancelling - An ongoing operation is being cancelled. Cancellation does + // not roll back any replacements that have already been completed, but it + // prevents new replacements from being started. + // + // * Cancelled - The operation is cancelled. + Status *string `type:"string" enum:"InstanceRefreshStatus"` + + // Provides more details about the current status of the instance refresh. + StatusReason *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s InstanceRefresh) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceRefresh) GoString() string { + return s.String() +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *InstanceRefresh) SetAutoScalingGroupName(v string) *InstanceRefresh { + s.AutoScalingGroupName = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *InstanceRefresh) SetEndTime(v time.Time) *InstanceRefresh { + s.EndTime = &v + return s +} + +// SetInstanceRefreshId sets the InstanceRefreshId field's value. +func (s *InstanceRefresh) SetInstanceRefreshId(v string) *InstanceRefresh { + s.InstanceRefreshId = &v + return s +} + +// SetInstancesToUpdate sets the InstancesToUpdate field's value. +func (s *InstanceRefresh) SetInstancesToUpdate(v int64) *InstanceRefresh { + s.InstancesToUpdate = &v + return s +} + +// SetPercentageComplete sets the PercentageComplete field's value. +func (s *InstanceRefresh) SetPercentageComplete(v int64) *InstanceRefresh { + s.PercentageComplete = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *InstanceRefresh) SetStartTime(v time.Time) *InstanceRefresh { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstanceRefresh) SetStatus(v string) *InstanceRefresh { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *InstanceRefresh) SetStatusReason(v string) *InstanceRefresh { + s.StatusReason = &v + return s +} + +// Describes an instances distribution for an Auto Scaling group with a MixedInstancesPolicy. // // The instances distribution specifies the distribution of On-Demand Instances // and Spot Instances, the maximum price to pay for Spot Instances, and how @@ -10501,6 +11288,11 @@ type LaunchConfiguration struct { // LaunchConfigurationName is a required field LaunchConfigurationName *string `min:"1" type:"string" required:"true"` + // The metadata options for the instances. For more information, see Instance + // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html) + // in the Amazon EC2 User Guide for Linux Instances. + MetadataOptions *InstanceMetadataOptions `type:"structure"` + // The tenancy of the instance, either default or dedicated. An instance with // dedicated tenancy runs on isolated, single-tenant hardware and can only be // launched into a VPC. @@ -10629,6 +11421,12 @@ func (s *LaunchConfiguration) SetLaunchConfigurationName(v string) *LaunchConfig return s } +// SetMetadataOptions sets the MetadataOptions field's value. +func (s *LaunchConfiguration) SetMetadataOptions(v *InstanceMetadataOptions) *LaunchConfiguration { + s.MetadataOptions = v + return s +} + // SetPlacementTenancy sets the PlacementTenancy field's value. func (s *LaunchConfiguration) SetPlacementTenancy(v string) *LaunchConfiguration { s.PlacementTenancy = &v @@ -10676,9 +11474,12 @@ type LaunchTemplate struct { // or launch template name in the request. LaunchTemplateSpecification *LaunchTemplateSpecification `type:"structure"` - // An optional setting. Any parameters that you specify override the same parameters - // in the launch template. Currently, the only supported override is instance - // type. You can specify between 1 and 20 instance types. + // Any parameters that you specify override the same parameters in the launch + // template. Currently, the only supported override is instance type. You can + // specify between 1 and 20 instance types. + // + // If not provided, Amazon EC2 Auto Scaling will use the instance type specified + // in the launch template to launch instances. Overrides []*LaunchTemplateOverrides `type:"list"` } @@ -10729,11 +11530,16 @@ func (s *LaunchTemplate) SetOverrides(v []*LaunchTemplateOverrides) *LaunchTempl return s } -// Describes an override for a launch template. +// Describes an override for a launch template. Currently, the only supported +// override is instance type. +// +// The maximum number of instance type overrides that can be associated with +// an Auto Scaling group is 20. type LaunchTemplateOverrides struct { _ struct{} `type:"structure"` - // The instance type. + // The instance type. You must use an instance type that is supported in your + // requested Region and Availability Zones. // // For information about available instance types, see Available Instance Types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes) @@ -10792,7 +11598,8 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v string) *LaunchTemplateO return s } -// Describes a launch template and the launch template version. +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by an Auto Scaling group to configure Amazon EC2 instances. // // The launch template that is specified must be configured for use with an // Auto Scaling group. For more information, see Creating a Launch Template @@ -10801,19 +11608,34 @@ func (s *LaunchTemplateOverrides) SetWeightedCapacity(v string) *LaunchTemplateO type LaunchTemplateSpecification struct { _ struct{} `type:"structure"` - // The ID of the launch template. You must specify either a template ID or a - // template name. + // The ID of the launch template. To get the template ID, use the Amazon EC2 + // DescribeLaunchTemplates (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplates.html) + // API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) + // API. + // + // You must specify either a template ID or a template name. LaunchTemplateId *string `min:"1" type:"string"` - // The name of the launch template. You must specify either a template name - // or a template ID. + // The name of the launch template. To get the template name, use the Amazon + // EC2 DescribeLaunchTemplates (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplates.html) + // API operation. New launch templates can be created using the Amazon EC2 CreateLaunchTemplate + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) + // API. + // + // You must specify either a template ID or a template name. LaunchTemplateName *string `min:"3" type:"string"` - // The version number, $Latest, or $Default. If the value is $Latest, Amazon - // EC2 Auto Scaling selects the latest version of the launch template when launching - // instances. If the value is $Default, Amazon EC2 Auto Scaling selects the - // default version of the launch template when launching instances. The default - // value is $Default. + // The version number, $Latest, or $Default. To get the version number, use + // the Amazon EC2 DescribeLaunchTemplateVersions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeLaunchTemplateVersions.html) + // API operation. New launch template versions can be created using the Amazon + // EC2 CreateLaunchTemplateVersion (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplateVersion.html) + // API. + // + // If the value is $Latest, Amazon EC2 Auto Scaling selects the latest version + // of the launch template when launching instances. If the value is $Default, + // Amazon EC2 Auto Scaling selects the default version of the launch template + // when launching instances. The default value is $Default. Version *string `min:"1" type:"string"` } @@ -10866,7 +11688,6 @@ func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecif // Describes a lifecycle hook, which tells Amazon EC2 Auto Scaling that you // want to perform an action whenever it launches instances or terminates instances. -// Used in response to DescribeLifecycleHooks. type LifecycleHook struct { _ struct{} `type:"structure"` @@ -10977,7 +11798,8 @@ func (s *LifecycleHook) SetRoleARN(v string) *LifecycleHook { return s } -// Describes a lifecycle hook. Used in combination with CreateAutoScalingGroup. +// Describes information used to specify a lifecycle hook for an Auto Scaling +// group. // // A lifecycle hook tells Amazon EC2 Auto Scaling to perform an action on an // instance when the instance launches (before it is put into service) or as @@ -10998,18 +11820,12 @@ func (s *LifecycleHook) SetRoleARN(v string) *LifecycleHook { // launch or terminate. // // If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state using RecordLifecycleActionHeartbeat. +// instance in a pending state. // -// If you finish before the timeout period ends, complete the lifecycle action -// using CompleteLifecycleAction. +// If you finish before the timeout period ends, complete the lifecycle action. // // For more information, see Amazon EC2 Auto Scaling Lifecycle Hooks (https://docs.aws.amazon.com/autoscaling/ec2/userguide/lifecycle-hooks.html) // in the Amazon EC2 Auto Scaling User Guide. -// -// You can view the lifecycle hooks for an Auto Scaling group using DescribeLifecycleHooks. -// You can modify an existing lifecycle hook or create new lifecycle hooks using -// PutLifecycleHook. If you are no longer using a lifecycle hook, you can delete -// it using DeleteLifecycleHook. type LifecycleHookSpecification struct { _ struct{} `type:"structure"` @@ -11268,6 +12084,16 @@ type MetricCollectionType struct { // * GroupTerminatingInstances // // * GroupTotalInstances + // + // * GroupInServiceCapacity + // + // * GroupPendingCapacity + // + // * GroupStandbyCapacity + // + // * GroupTerminatingCapacity + // + // * GroupTotalCapacity Metric *string `min:"1" type:"string"` } @@ -11374,7 +12200,8 @@ func (s *MetricGranularityType) SetGranularity(v string) *MetricGranularityType // You can create a mixed instances policy for a new Auto Scaling group, or // you can create it for an existing group by updating the group to specify // MixedInstancesPolicy as the top-level parameter instead of a launch configuration -// or template. For more information, see CreateAutoScalingGroup and UpdateAutoScalingGroup. +// or launch template. For more information, see CreateAutoScalingGroup and +// UpdateAutoScalingGroup. type MixedInstancesPolicy struct { _ struct{} `type:"structure"` @@ -11386,7 +12213,7 @@ type MixedInstancesPolicy struct { // The launch template and instance types (overrides). // - // This parameter must be specified when creating a mixed instances policy. + // Required when creating a mixed instances policy. LaunchTemplate *LaunchTemplate `type:"structure"` } @@ -11506,7 +12333,9 @@ type PredefinedMetricSpecification struct { // a resource label unless the metric type is ALBRequestCountPerTarget and there // is a target group attached to the Auto Scaling group. // - // The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id + // Elastic Load Balancing sends data about your load balancers to Amazon CloudWatch. + // CloudWatch collects the data and specifies the format to use to access the + // data. The format is app/load-balancer-name/load-balancer-id/targetgroup/target-group-name/target-group-id // , where // // * app/load-balancer-name/load-balancer-id is the final portion of the @@ -11514,6 +12343,12 @@ type PredefinedMetricSpecification struct { // // * targetgroup/target-group-name/target-group-id is the final portion of // the target group ARN. + // + // To find the ARN for an Application Load Balancer, use the DescribeLoadBalancers + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // API operation. To find the ARN for the target group, use the DescribeTargetGroups + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeTargetGroups.html) + // API operation. ResourceLabel *string `min:"1" type:"string"` } @@ -11576,6 +12411,8 @@ type ProcessType struct { // // * HealthCheck // + // * InstanceRefresh + // // * ReplaceUnhealthy // // * ScheduledActions @@ -11619,7 +12456,7 @@ type PutLifecycleHookInput struct { // // If the lifecycle hook times out, Amazon EC2 Auto Scaling performs the action // that you specified in the DefaultResult parameter. You can prevent the lifecycle - // hook from timing out by calling RecordLifecycleActionHeartbeat. + // hook from timing out by calling the RecordLifecycleActionHeartbeat API. HeartbeatTimeout *int64 `type:"integer"` // The name of the lifecycle hook. @@ -11634,8 +12471,7 @@ type PutLifecycleHookInput struct { // // * autoscaling:EC2_INSTANCE_TERMINATING // - // Conditional: This parameter is required for new lifecycle hooks, but optional - // when updating existing hooks. + // Required for new lifecycle hooks, but optional when updating existing hooks. LifecycleTransition *string `type:"string"` // Additional information that you want to include any time Amazon EC2 Auto @@ -11661,8 +12497,7 @@ type PutLifecycleHookInput struct { // the specified notification target, for example, an Amazon SNS topic or an // Amazon SQS queue. // - // Conditional: This parameter is required for new lifecycle hooks, but optional - // when updating existing hooks. + // Required for new lifecycle hooks, but optional when updating existing hooks. RoleARN *string `min:"1" type:"string"` } @@ -11774,8 +12609,9 @@ type PutNotificationConfigurationInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The type of event that causes the notification to be sent. For more information - // about notification types supported by Amazon EC2 Auto Scaling, see DescribeAutoScalingNotificationTypes. + // The type of event that causes the notification to be sent. To query the notification + // types supported by Amazon EC2 Auto Scaling, call the DescribeAutoScalingNotificationTypes + // API. // // NotificationTypes is a required field NotificationTypes []*string `type:"list" required:"true"` @@ -11857,11 +12693,11 @@ func (s PutNotificationConfigurationOutput) GoString() string { type PutScalingPolicyInput struct { _ struct{} `type:"structure"` - // Specifies whether the ScalingAdjustment parameter is an absolute number or - // a percentage of the current capacity. The valid values are ChangeInCapacity, - // ExactCapacity, and PercentChangeInCapacity. + // Specifies how the scaling adjustment is interpreted (for example, an absolute + // number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. // - // Valid only if the policy type is StepScaling or SimpleScaling. For more information, + // Required if the policy type is StepScaling or SimpleScaling. For more information, // see Scaling Adjustment Types (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) // in the Amazon EC2 Auto Scaling User Guide. AdjustmentType *string `min:"1" type:"string"` @@ -11871,12 +12707,12 @@ type PutScalingPolicyInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The amount of time, in seconds, after a scaling activity completes before - // any further dynamic scaling activities can start. If this parameter is not - // specified, the default cooldown period for the group applies. + // The duration of the policy's cooldown period, in seconds. When a cooldown + // period is specified here, it overrides the default cooldown period defined + // for the Auto Scaling group. // // Valid only if the policy type is SimpleScaling. For more information, see - // Scaling Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // Scaling Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. Cooldown *int64 `type:"integer"` @@ -11887,10 +12723,10 @@ type PutScalingPolicyInput struct { Enabled *bool `type:"boolean"` // The estimated time, in seconds, until a newly launched instance can contribute - // to the CloudWatch metrics. The default is to use the value specified for - // the default cooldown period for the group. + // to the CloudWatch metrics. If not provided, the default is to use the value + // from the default cooldown period for the Auto Scaling group. // - // Valid only if the policy type is StepScaling or TargetTrackingScaling. + // Valid only if the policy type is TargetTrackingScaling or StepScaling. EstimatedInstanceWarmup *int64 `type:"integer"` // The aggregation type for the CloudWatch metrics. The valid values are Minimum, @@ -11900,19 +12736,19 @@ type PutScalingPolicyInput struct { // Valid only if the policy type is StepScaling. MetricAggregationType *string `min:"1" type:"string"` - // The minimum number of instances to scale. If the value of AdjustmentType - // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity - // of the Auto Scaling group by at least this many instances. Otherwise, the - // error is ValidationError. + // The minimum value to scale by when the adjustment type is PercentChangeInCapacity. + // For example, suppose that you create a step scaling policy to scale out an + // Auto Scaling group by 25 percent and you specify a MinAdjustmentMagnitude + // of 2. If the group has 4 instances and the scaling policy is performed, 25 + // percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude + // of 2, Amazon EC2 Auto Scaling scales out the group by 2 instances. // - // This property replaces the MinAdjustmentStep property. For example, suppose - // that you create a step scaling policy to scale out an Auto Scaling group - // by 25 percent and you specify a MinAdjustmentMagnitude of 2. If the group - // has 4 instances and the scaling policy is performed, 25 percent of 4 is 1. - // However, because you specified a MinAdjustmentMagnitude of 2, Amazon EC2 - // Auto Scaling scales out the group by 2 instances. + // Valid only if the policy type is StepScaling or SimpleScaling. For more information, + // see Scaling Adjustment Types (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-adjustment) + // in the Amazon EC2 Auto Scaling User Guide. // - // Valid only if the policy type is SimpleScaling or StepScaling. + // Some Auto Scaling groups use instance weights. In this case, set the MinAdjustmentMagnitude + // to a value that is at least as large as your largest instance weight. MinAdjustmentMagnitude *int64 `type:"integer"` // Available for backward compatibility. Use MinAdjustmentMagnitude instead. @@ -11923,36 +12759,50 @@ type PutScalingPolicyInput struct { // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` - // The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. - // If the policy type is null, the value is treated as SimpleScaling. + // One of the following policy types: + // + // * TargetTrackingScaling + // + // * StepScaling + // + // * SimpleScaling (default) PolicyType *string `min:"1" type:"string"` - // The amount by which a simple scaling policy scales the Auto Scaling group - // in response to an alarm breach. The adjustment is based on the value that - // you specified in the AdjustmentType parameter (either an absolute number - // or a percentage). A positive value adds to the current capacity and a negative - // value subtracts from the current capacity. For exact capacity, you must specify - // a positive value. + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current capacity while a negative number removes from the + // current capacity. For exact capacity, you must specify a positive value. // - // Conditional: If you specify SimpleScaling for the policy type, you must specify - // this parameter. (Not used with any other policy type.) + // Required if the policy type is SimpleScaling. (Not used with any other policy + // type.) ScalingAdjustment *int64 `type:"integer"` // A set of adjustments that enable you to scale based on the size of the alarm // breach. // - // Conditional: If you specify StepScaling for the policy type, you must specify - // this parameter. (Not used with any other policy type.) + // Required if the policy type is StepScaling. (Not used with any other policy + // type.) StepAdjustments []*StepAdjustment `type:"list"` // A target tracking scaling policy. Includes support for predefined or customized // metrics. // + // The following predefined metrics are available: + // + // * ASGAverageCPUUtilization + // + // * ASGAverageNetworkIn + // + // * ASGAverageNetworkOut + // + // * ALBRequestCountPerTarget + // + // If you specify ALBRequestCountPerTarget for the metric, you must specify + // the ResourceLabel parameter with the PredefinedMetricSpecification. + // // For more information, see TargetTrackingConfiguration (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_TargetTrackingConfiguration.html) // in the Amazon EC2 Auto Scaling API Reference. // - // Conditional: If you specify TargetTrackingScaling for the policy type, you - // must specify this parameter. (Not used with any other policy type.) + // Required if the policy type is TargetTrackingScaling. TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure"` } @@ -12131,17 +12981,19 @@ type PutScheduledUpdateGroupActionInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The number of EC2 instances that should be running in the Auto Scaling group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // the scheduled action runs and the capacity it attempts to maintain. It can + // scale beyond this capacity if you add more scaling conditions. DesiredCapacity *int64 `type:"integer"` // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling // does not perform the action after this time. EndTime *time.Time `type:"timestamp"` - // The maximum number of instances in the Auto Scaling group. + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum number of instances in the Auto Scaling group. + // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for this action, in Unix cron syntax format. This @@ -12375,6 +13227,45 @@ func (s RecordLifecycleActionHeartbeatOutput) GoString() string { return s.String() } +// Describes information used to start an instance refresh. +type RefreshPreferences struct { + _ struct{} `type:"structure"` + + // The number of seconds until a newly launched instance is configured and ready + // to use. During this time, Amazon EC2 Auto Scaling does not immediately move + // on to the next replacement. The default is to use the value for the health + // check grace period defined for the group. + InstanceWarmup *int64 `type:"integer"` + + // The amount of capacity in the Auto Scaling group that must remain healthy + // during an instance refresh to allow the operation to continue, as a percentage + // of the desired capacity of the Auto Scaling group (rounded up to the nearest + // integer). The default is 90. + MinHealthyPercentage *int64 `type:"integer"` +} + +// String returns the string representation +func (s RefreshPreferences) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RefreshPreferences) GoString() string { + return s.String() +} + +// SetInstanceWarmup sets the InstanceWarmup field's value. +func (s *RefreshPreferences) SetInstanceWarmup(v int64) *RefreshPreferences { + s.InstanceWarmup = &v + return s +} + +// SetMinHealthyPercentage sets the MinHealthyPercentage field's value. +func (s *RefreshPreferences) SetMinHealthyPercentage(v int64) *RefreshPreferences { + s.MinHealthyPercentage = &v + return s +} + type ResumeProcessesOutput struct { _ struct{} `type:"structure"` } @@ -12393,8 +13284,9 @@ func (s ResumeProcessesOutput) GoString() string { type ScalingPolicy struct { _ struct{} `type:"structure"` - // The adjustment type, which specifies how ScalingAdjustment is interpreted. - // The valid values are ChangeInCapacity, ExactCapacity, and PercentChangeInCapacity. + // Specifies how the scaling adjustment is interpreted (for example, an absolute + // number or a percentage). The valid values are ChangeInCapacity, ExactCapacity, + // and PercentChangeInCapacity. AdjustmentType *string `min:"1" type:"string"` // The CloudWatch alarms related to the policy. @@ -12403,8 +13295,7 @@ type ScalingPolicy struct { // The name of the Auto Scaling group. AutoScalingGroupName *string `min:"1" type:"string"` - // The amount of time, in seconds, after a scaling activity completes before - // any further dynamic scaling activities can start. + // The duration of the policy's cooldown period, in seconds. Cooldown *int64 `type:"integer"` // Indicates whether the policy is enabled (true) or disabled (false). @@ -12418,10 +13309,7 @@ type ScalingPolicy struct { // Maximum, and Average. MetricAggregationType *string `min:"1" type:"string"` - // The minimum number of instances to scale. If the value of AdjustmentType - // is PercentChangeInCapacity, the scaling policy changes the DesiredCapacity - // of the Auto Scaling group by at least this many instances. Otherwise, the - // error is ValidationError. + // The minimum value to scale by when the adjustment type is PercentChangeInCapacity. MinAdjustmentMagnitude *int64 `type:"integer"` // Available for backward compatibility. Use MinAdjustmentMagnitude instead. @@ -12433,7 +13321,17 @@ type ScalingPolicy struct { // The name of the scaling policy. PolicyName *string `min:"1" type:"string"` - // The policy type. The valid values are SimpleScaling, StepScaling, and TargetTrackingScaling. + // One of the following policy types: + // + // * TargetTrackingScaling + // + // * StepScaling + // + // * SimpleScaling (default) + // + // For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-target-tracking.html) + // and Step and Simple Scaling Policies (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html) + // in the Amazon EC2 Auto Scaling User Guide. PolicyType *string `min:"1" type:"string"` // The amount by which to scale, based on the specified adjustment type. A positive @@ -12557,24 +13455,27 @@ type ScalingProcessQuery struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // One or more of the following processes. If you omit this parameter, all processes - // are specified. + // One or more of the following processes: // // * Launch // // * Terminate // - // * HealthCheck + // * AddToLoadBalancer // - // * ReplaceUnhealthy + // * AlarmNotification // // * AZRebalance // - // * AlarmNotification + // * HealthCheck + // + // * InstanceRefresh + // + // * ReplaceUnhealthy // // * ScheduledActions // - // * AddToLoadBalancer + // If you omit this parameter, all processes are specified. ScalingProcesses []*string `type:"list"` } @@ -12616,24 +13517,25 @@ func (s *ScalingProcessQuery) SetScalingProcesses(v []*string) *ScalingProcessQu return s } -// Describes a scheduled scaling action. Used in response to DescribeScheduledActions. +// Describes a scheduled scaling action. type ScheduledUpdateGroupAction struct { _ struct{} `type:"structure"` // The name of the Auto Scaling group. AutoScalingGroupName *string `min:"1" type:"string"` - // The number of instances you prefer to maintain in the group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // the scheduled action runs and the capacity it attempts to maintain. DesiredCapacity *int64 `type:"integer"` // The date and time in UTC for the recurring schedule to end. For example, // "2019-06-01T00:00:00Z". EndTime *time.Time `type:"timestamp"` - // The maximum number of instances in the Auto Scaling group. + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum number of instances in the Auto Scaling group. + // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for the action, in Unix cron syntax format. @@ -12725,25 +13627,26 @@ func (s *ScheduledUpdateGroupAction) SetTime(v time.Time) *ScheduledUpdateGroupA return s } -// Describes one or more scheduled scaling action updates for a specified Auto -// Scaling group. Used in combination with BatchPutScheduledUpdateGroupAction. +// Describes information used for one or more scheduled scaling action updates +// in a BatchPutScheduledUpdateGroupAction operation. // // When updating a scheduled scaling action, all optional parameters are left // unchanged if not specified. type ScheduledUpdateGroupActionRequest struct { _ struct{} `type:"structure"` - // The number of EC2 instances that should be running in the group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // the scheduled action runs and the capacity it attempts to maintain. DesiredCapacity *int64 `type:"integer"` // The date and time for the recurring schedule to end. Amazon EC2 Auto Scaling // does not perform the action after this time. EndTime *time.Time `type:"timestamp"` - // The maximum number of instances in the Auto Scaling group. + // The maximum size of the Auto Scaling group. MaxSize *int64 `type:"integer"` - // The minimum number of instances in the Auto Scaling group. + // The minimum size of the Auto Scaling group. MinSize *int64 `type:"integer"` // The recurring schedule for the action, in Unix cron syntax format. This format @@ -12851,7 +13754,8 @@ type SetDesiredCapacityInput struct { // AutoScalingGroupName is a required field AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The number of EC2 instances that should be running in the Auto Scaling group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // this operation completes and the capacity it attempts to maintain. // // DesiredCapacity is a required field DesiredCapacity *int64 `type:"integer" required:"true"` @@ -12944,7 +13848,9 @@ type SetInstanceHealthInput struct { // Set this to False, to have the call not respect the grace period associated // with the group. // - // For more information about the health check grace period, see CreateAutoScalingGroup. + // For more information about the health check grace period, see CreateAutoScalingGroup + // (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_CreateAutoScalingGroup.html) + // in the Amazon EC2 Auto Scaling API Reference. ShouldRespectGracePeriod *bool `type:"boolean"` } @@ -13096,9 +14002,103 @@ func (s SetInstanceProtectionOutput) GoString() string { return s.String() } -// Describes an adjustment based on the difference between the value of the -// aggregated CloudWatch metric and the breach threshold that you've defined -// for the alarm. Used in combination with PutScalingPolicy. +type StartInstanceRefreshInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + // + // AutoScalingGroupName is a required field + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // Set of preferences associated with the instance refresh request. + // + // If not provided, the default values are used. For MinHealthyPercentage, the + // default value is 90. For InstanceWarmup, the default is to use the value + // specified for the health check grace period for the Auto Scaling group. + // + // For more information, see RefreshPreferences (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_RefreshPreferences.html) + // in the Amazon EC2 Auto Scaling API Reference. + Preferences *RefreshPreferences `type:"structure"` + + // The strategy to use for the instance refresh. The only valid value is Rolling. + // + // A rolling update is an update that is applied to all instances in an Auto + // Scaling group until all instances have been updated. A rolling update can + // fail due to failed health checks or if instances are on standby or are protected + // from scale in. If the rolling update process fails, any instances that were + // already replaced are not rolled back to their previous configuration. + Strategy *string `type:"string" enum:"RefreshStrategy"` +} + +// String returns the string representation +func (s StartInstanceRefreshInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceRefreshInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartInstanceRefreshInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartInstanceRefreshInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingGroupName sets the AutoScalingGroupName field's value. +func (s *StartInstanceRefreshInput) SetAutoScalingGroupName(v string) *StartInstanceRefreshInput { + s.AutoScalingGroupName = &v + return s +} + +// SetPreferences sets the Preferences field's value. +func (s *StartInstanceRefreshInput) SetPreferences(v *RefreshPreferences) *StartInstanceRefreshInput { + s.Preferences = v + return s +} + +// SetStrategy sets the Strategy field's value. +func (s *StartInstanceRefreshInput) SetStrategy(v string) *StartInstanceRefreshInput { + s.Strategy = &v + return s +} + +type StartInstanceRefreshOutput struct { + _ struct{} `type:"structure"` + + // A unique ID for tracking the progress of the request. + InstanceRefreshId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StartInstanceRefreshOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartInstanceRefreshOutput) GoString() string { + return s.String() +} + +// SetInstanceRefreshId sets the InstanceRefreshId field's value. +func (s *StartInstanceRefreshOutput) SetInstanceRefreshId(v string) *StartInstanceRefreshOutput { + s.InstanceRefreshId = &v + return s +} + +// Describes information used to create a step adjustment for a step scaling +// policy. // // For the following examples, suppose that you have an alarm with a breach // threshold of 50: @@ -13124,6 +14124,9 @@ func (s SetInstanceProtectionOutput) GoString() string { // with a null upper bound. // // * The upper and lower bound can't be null in the same step adjustment. +// +// For more information, see Step Adjustments (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-scaling-simple-step.html#as-scaling-steps) +// in the Amazon EC2 Auto Scaling User Guide. type StepAdjustment struct { _ struct{} `type:"structure"` @@ -13207,8 +14210,10 @@ func (s SuspendProcessesOutput) GoString() string { return s.String() } -// Describes an automatic scaling process that has been suspended. For more -// information, see ProcessType. +// Describes an automatic scaling process that has been suspended. +// +// For more information, see Scaling Processes (https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html#process-types) +// in the Amazon EC2 Auto Scaling User Guide. type SuspendedProcess struct { _ struct{} `type:"structure"` @@ -13554,16 +14559,17 @@ type UpdateAutoScalingGroupInput struct { AvailabilityZones []*string `min:"1" type:"list"` // The amount of time, in seconds, after a scaling activity completes before - // another scaling activity can start. The default value is 300. This cooldown - // period is not used when a scaling-specific cooldown is specified. + // another scaling activity can start. The default value is 300. // - // Cooldown periods are not supported for target tracking scaling policies, - // step scaling policies, or scheduled scaling. For more information, see Scaling - // Cooldowns (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) + // This setting applies when using simple scaling policies, but not when using + // other scaling policies or scheduled scaling. For more information, see Scaling + // Cooldowns for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/Cooldown.html) // in the Amazon EC2 Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` - // The number of EC2 instances that should be running in the Auto Scaling group. + // The desired capacity is the initial capacity of the Auto Scaling group after + // this operation completes and the capacity it attempts to maintain. + // // This number must be greater than or equal to the minimum size of the group // and less than or equal to the maximum size of the group. DesiredCapacity *int64 `type:"integer"` @@ -13575,7 +14581,7 @@ type UpdateAutoScalingGroupInput struct { // For more information, see Health Check Grace Period (https://docs.aws.amazon.com/autoscaling/ec2/userguide/healthcheck.html#health-check-grace-period) // in the Amazon EC2 Auto Scaling User Guide. // - // Conditional: This parameter is required if you are adding an ELB health check. + // Required if you are adding an ELB health check. HealthCheckGracePeriod *int64 `type:"integer"` // The service to use for the health checks. The valid values are EC2 and ELB. @@ -13597,15 +14603,26 @@ type UpdateAutoScalingGroupInput struct { LaunchTemplate *LaunchTemplateSpecification `type:"structure"` // The maximum amount of time, in seconds, that an instance can be in service. + // The default is null. + // + // This parameter is optional, but if you specify a value for it, you must specify + // a value of at least 604,800 seconds (7 days). To clear a previously set value, + // specify a new value of 0. // // For more information, see Replacing Auto Scaling Instances Based on Maximum // Instance Lifetime (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-max-instance-lifetime.html) // in the Amazon EC2 Auto Scaling User Guide. // - // Valid Range: Minimum value of 604800. + // Valid Range: Minimum value of 0. MaxInstanceLifetime *int64 `type:"integer"` // The maximum size of the Auto Scaling group. + // + // With a mixed instances policy that uses instance weighting, Amazon EC2 Auto + // Scaling may need to go above MaxSize to meet your capacity requirements. + // In this event, Amazon EC2 Auto Scaling will never go above MaxSize by more + // than your largest instance weight (weights that define how many units each + // instance contributes to the desired capacity of the group). MaxSize *int64 `type:"integer"` // The minimum size of the Auto Scaling group. @@ -13829,6 +14846,70 @@ func (s UpdateAutoScalingGroupOutput) GoString() string { return s.String() } +const ( + // InstanceMetadataEndpointStateDisabled is a InstanceMetadataEndpointState enum value + InstanceMetadataEndpointStateDisabled = "disabled" + + // InstanceMetadataEndpointStateEnabled is a InstanceMetadataEndpointState enum value + InstanceMetadataEndpointStateEnabled = "enabled" +) + +// InstanceMetadataEndpointState_Values returns all elements of the InstanceMetadataEndpointState enum +func InstanceMetadataEndpointState_Values() []string { + return []string{ + InstanceMetadataEndpointStateDisabled, + InstanceMetadataEndpointStateEnabled, + } +} + +const ( + // InstanceMetadataHttpTokensStateOptional is a InstanceMetadataHttpTokensState enum value + InstanceMetadataHttpTokensStateOptional = "optional" + + // InstanceMetadataHttpTokensStateRequired is a InstanceMetadataHttpTokensState enum value + InstanceMetadataHttpTokensStateRequired = "required" +) + +// InstanceMetadataHttpTokensState_Values returns all elements of the InstanceMetadataHttpTokensState enum +func InstanceMetadataHttpTokensState_Values() []string { + return []string{ + InstanceMetadataHttpTokensStateOptional, + InstanceMetadataHttpTokensStateRequired, + } +} + +const ( + // InstanceRefreshStatusPending is a InstanceRefreshStatus enum value + InstanceRefreshStatusPending = "Pending" + + // InstanceRefreshStatusInProgress is a InstanceRefreshStatus enum value + InstanceRefreshStatusInProgress = "InProgress" + + // InstanceRefreshStatusSuccessful is a InstanceRefreshStatus enum value + InstanceRefreshStatusSuccessful = "Successful" + + // InstanceRefreshStatusFailed is a InstanceRefreshStatus enum value + InstanceRefreshStatusFailed = "Failed" + + // InstanceRefreshStatusCancelling is a InstanceRefreshStatus enum value + InstanceRefreshStatusCancelling = "Cancelling" + + // InstanceRefreshStatusCancelled is a InstanceRefreshStatus enum value + InstanceRefreshStatusCancelled = "Cancelled" +) + +// InstanceRefreshStatus_Values returns all elements of the InstanceRefreshStatus enum +func InstanceRefreshStatus_Values() []string { + return []string{ + InstanceRefreshStatusPending, + InstanceRefreshStatusInProgress, + InstanceRefreshStatusSuccessful, + InstanceRefreshStatusFailed, + InstanceRefreshStatusCancelling, + InstanceRefreshStatusCancelled, + } +} + const ( // LifecycleStatePending is a LifecycleState enum value LifecycleStatePending = "Pending" @@ -13870,6 +14951,25 @@ const ( LifecycleStateStandby = "Standby" ) +// LifecycleState_Values returns all elements of the LifecycleState enum +func LifecycleState_Values() []string { + return []string{ + LifecycleStatePending, + LifecycleStatePendingWait, + LifecycleStatePendingProceed, + LifecycleStateQuarantined, + LifecycleStateInService, + LifecycleStateTerminating, + LifecycleStateTerminatingWait, + LifecycleStateTerminatingProceed, + LifecycleStateTerminated, + LifecycleStateDetaching, + LifecycleStateDetached, + LifecycleStateEnteringStandby, + LifecycleStateStandby, + } +} + const ( // MetricStatisticAverage is a MetricStatistic enum value MetricStatisticAverage = "Average" @@ -13887,6 +14987,17 @@ const ( MetricStatisticSum = "Sum" ) +// MetricStatistic_Values returns all elements of the MetricStatistic enum +func MetricStatistic_Values() []string { + return []string{ + MetricStatisticAverage, + MetricStatisticMinimum, + MetricStatisticMaximum, + MetricStatisticSampleCount, + MetricStatisticSum, + } +} + const ( // MetricTypeAsgaverageCpuutilization is a MetricType enum value MetricTypeAsgaverageCpuutilization = "ASGAverageCPUUtilization" @@ -13901,6 +15012,28 @@ const ( MetricTypeAlbrequestCountPerTarget = "ALBRequestCountPerTarget" ) +// MetricType_Values returns all elements of the MetricType enum +func MetricType_Values() []string { + return []string{ + MetricTypeAsgaverageCpuutilization, + MetricTypeAsgaverageNetworkIn, + MetricTypeAsgaverageNetworkOut, + MetricTypeAlbrequestCountPerTarget, + } +} + +const ( + // RefreshStrategyRolling is a RefreshStrategy enum value + RefreshStrategyRolling = "Rolling" +) + +// RefreshStrategy_Values returns all elements of the RefreshStrategy enum +func RefreshStrategy_Values() []string { + return []string{ + RefreshStrategyRolling, + } +} + const ( // ScalingActivityStatusCodePendingSpotBidPlacement is a ScalingActivityStatusCode enum value ScalingActivityStatusCodePendingSpotBidPlacement = "PendingSpotBidPlacement" @@ -13938,3 +15071,21 @@ const ( // ScalingActivityStatusCodeCancelled is a ScalingActivityStatusCode enum value ScalingActivityStatusCodeCancelled = "Cancelled" ) + +// ScalingActivityStatusCode_Values returns all elements of the ScalingActivityStatusCode enum +func ScalingActivityStatusCode_Values() []string { + return []string{ + ScalingActivityStatusCodePendingSpotBidPlacement, + ScalingActivityStatusCodeWaitingForSpotInstanceRequestId, + ScalingActivityStatusCodeWaitingForSpotInstanceId, + ScalingActivityStatusCodeWaitingForInstanceId, + ScalingActivityStatusCodePreInService, + ScalingActivityStatusCodeInProgress, + ScalingActivityStatusCodeWaitingForElbconnectionDraining, + ScalingActivityStatusCodeMidLifecycleAction, + ScalingActivityStatusCodeWaitingForInstanceWarmup, + ScalingActivityStatusCodeSuccessful, + ScalingActivityStatusCodeFailed, + ScalingActivityStatusCodeCancelled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go index 2e65ee3d9..85e907df7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/errors.go @@ -4,6 +4,13 @@ package autoscaling const ( + // ErrCodeActiveInstanceRefreshNotFoundFault for service response error code + // "ActiveInstanceRefreshNotFound". + // + // The request failed because an active instance refresh for the specified Auto + // Scaling group was not found. + ErrCodeActiveInstanceRefreshNotFoundFault = "ActiveInstanceRefreshNotFound" + // ErrCodeAlreadyExistsFault for service response error code // "AlreadyExists". // @@ -11,6 +18,13 @@ const ( // name. ErrCodeAlreadyExistsFault = "AlreadyExists" + // ErrCodeInstanceRefreshInProgressFault for service response error code + // "InstanceRefreshInProgress". + // + // The request failed because an active instance refresh operation already exists + // for the specified Auto Scaling group. + ErrCodeInstanceRefreshInProgressFault = "InstanceRefreshInProgress" + // ErrCodeInvalidNextToken for service response error code // "InvalidNextToken". // @@ -22,7 +36,8 @@ const ( // // You have already reached a limit for your Amazon EC2 Auto Scaling resources // (for example, Auto Scaling groups, launch configurations, or lifecycle hooks). - // For more information, see DescribeAccountLimits. + // For more information, see DescribeAccountLimits (https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_DescribeAccountLimits.html) + // in the Amazon EC2 Auto Scaling API Reference. ErrCodeLimitExceededFault = "LimitExceeded" // ErrCodeResourceContentionFault for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go index e89bc3d9d..644838dd3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go index 017b19196..da115a42b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/api.go @@ -612,8 +612,8 @@ func (s *ApplicationSource) SetTagFilters(v []*TagFilter) *ApplicationSource { // Concurrent updates caused an exception, for example, if you request an update // to a scaling plan that already has a pending update. type ConcurrentUpdateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -630,17 +630,17 @@ func (s ConcurrentUpdateException) GoString() string { func newErrorConcurrentUpdateException(v protocol.ResponseMetadata) error { return &ConcurrentUpdateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentUpdateException) Code() string { +func (s *ConcurrentUpdateException) Code() string { return "ConcurrentUpdateException" } // Message returns the exception's message. -func (s ConcurrentUpdateException) Message() string { +func (s *ConcurrentUpdateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -648,22 +648,22 @@ func (s ConcurrentUpdateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentUpdateException) OrigErr() error { +func (s *ConcurrentUpdateException) OrigErr() error { return nil } -func (s ConcurrentUpdateException) Error() string { +func (s *ConcurrentUpdateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentUpdateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentUpdateException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentUpdateException) RequestID() string { + return s.RespMetadata.RequestID } type CreateScalingPlanInput struct { @@ -1539,8 +1539,8 @@ func (s *GetScalingPlanResourceForecastDataOutput) SetDatapoints(v []*Datapoint) // The service encountered an internal error. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1557,17 +1557,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1575,28 +1575,28 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // The token provided is not valid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1613,17 +1613,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1631,29 +1631,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // Your account exceeded a limit. This exception is thrown when a per-account // resource limit is exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1670,17 +1670,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1688,22 +1688,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a dimension for a customized metric. @@ -1761,8 +1761,8 @@ func (s *MetricDimension) SetValue(v string) *MetricDimension { // The specified object could not be found. type ObjectNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1779,17 +1779,17 @@ func (s ObjectNotFoundException) GoString() string { func newErrorObjectNotFoundException(v protocol.ResponseMetadata) error { return &ObjectNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ObjectNotFoundException) Code() string { +func (s *ObjectNotFoundException) Code() string { return "ObjectNotFoundException" } // Message returns the exception's message. -func (s ObjectNotFoundException) Message() string { +func (s *ObjectNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1797,22 +1797,22 @@ func (s ObjectNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ObjectNotFoundException) OrigErr() error { +func (s *ObjectNotFoundException) OrigErr() error { return nil } -func (s ObjectNotFoundException) Error() string { +func (s *ObjectNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ObjectNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ObjectNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ObjectNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ObjectNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a predefined metric that can be used for predictive scaling. @@ -2878,8 +2878,8 @@ func (s UpdateScalingPlanOutput) GoString() string { // An exception was thrown for a validation issue. Review the parameters provided. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2896,17 +2896,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2914,22 +2914,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -2946,6 +2946,16 @@ const ( ForecastDataTypeScheduledActionMaxCapacity = "ScheduledActionMaxCapacity" ) +// ForecastDataType_Values returns all elements of the ForecastDataType enum +func ForecastDataType_Values() []string { + return []string{ + ForecastDataTypeCapacityForecast, + ForecastDataTypeLoadForecast, + ForecastDataTypeScheduledActionMinCapacity, + ForecastDataTypeScheduledActionMaxCapacity, + } +} + const ( // LoadMetricTypeAsgtotalCpuutilization is a LoadMetricType enum value LoadMetricTypeAsgtotalCpuutilization = "ASGTotalCPUUtilization" @@ -2960,6 +2970,16 @@ const ( LoadMetricTypeAlbtargetGroupRequestCount = "ALBTargetGroupRequestCount" ) +// LoadMetricType_Values returns all elements of the LoadMetricType enum +func LoadMetricType_Values() []string { + return []string{ + LoadMetricTypeAsgtotalCpuutilization, + LoadMetricTypeAsgtotalNetworkIn, + LoadMetricTypeAsgtotalNetworkOut, + LoadMetricTypeAlbtargetGroupRequestCount, + } +} + const ( // MetricStatisticAverage is a MetricStatistic enum value MetricStatisticAverage = "Average" @@ -2977,11 +2997,29 @@ const ( MetricStatisticSum = "Sum" ) +// MetricStatistic_Values returns all elements of the MetricStatistic enum +func MetricStatistic_Values() []string { + return []string{ + MetricStatisticAverage, + MetricStatisticMinimum, + MetricStatisticMaximum, + MetricStatisticSampleCount, + MetricStatisticSum, + } +} + const ( // PolicyTypeTargetTrackingScaling is a PolicyType enum value PolicyTypeTargetTrackingScaling = "TargetTrackingScaling" ) +// PolicyType_Values returns all elements of the PolicyType enum +func PolicyType_Values() []string { + return []string{ + PolicyTypeTargetTrackingScaling, + } +} + const ( // PredictiveScalingMaxCapacityBehaviorSetForecastCapacityToMaxCapacity is a PredictiveScalingMaxCapacityBehavior enum value PredictiveScalingMaxCapacityBehaviorSetForecastCapacityToMaxCapacity = "SetForecastCapacityToMaxCapacity" @@ -2993,6 +3031,15 @@ const ( PredictiveScalingMaxCapacityBehaviorSetMaxCapacityAboveForecastCapacity = "SetMaxCapacityAboveForecastCapacity" ) +// PredictiveScalingMaxCapacityBehavior_Values returns all elements of the PredictiveScalingMaxCapacityBehavior enum +func PredictiveScalingMaxCapacityBehavior_Values() []string { + return []string{ + PredictiveScalingMaxCapacityBehaviorSetForecastCapacityToMaxCapacity, + PredictiveScalingMaxCapacityBehaviorSetMaxCapacityToForecastCapacity, + PredictiveScalingMaxCapacityBehaviorSetMaxCapacityAboveForecastCapacity, + } +} + const ( // PredictiveScalingModeForecastAndScale is a PredictiveScalingMode enum value PredictiveScalingModeForecastAndScale = "ForecastAndScale" @@ -3001,6 +3048,14 @@ const ( PredictiveScalingModeForecastOnly = "ForecastOnly" ) +// PredictiveScalingMode_Values returns all elements of the PredictiveScalingMode enum +func PredictiveScalingMode_Values() []string { + return []string{ + PredictiveScalingModeForecastAndScale, + PredictiveScalingModeForecastOnly, + } +} + const ( // ScalableDimensionAutoscalingAutoScalingGroupDesiredCapacity is a ScalableDimension enum value ScalableDimensionAutoscalingAutoScalingGroupDesiredCapacity = "autoscaling:autoScalingGroup:DesiredCapacity" @@ -3027,6 +3082,20 @@ const ( ScalableDimensionDynamodbIndexWriteCapacityUnits = "dynamodb:index:WriteCapacityUnits" ) +// ScalableDimension_Values returns all elements of the ScalableDimension enum +func ScalableDimension_Values() []string { + return []string{ + ScalableDimensionAutoscalingAutoScalingGroupDesiredCapacity, + ScalableDimensionEcsServiceDesiredCount, + ScalableDimensionEc2SpotFleetRequestTargetCapacity, + ScalableDimensionRdsClusterReadReplicaCount, + ScalableDimensionDynamodbTableReadCapacityUnits, + ScalableDimensionDynamodbTableWriteCapacityUnits, + ScalableDimensionDynamodbIndexReadCapacityUnits, + ScalableDimensionDynamodbIndexWriteCapacityUnits, + } +} + const ( // ScalingMetricTypeAsgaverageCpuutilization is a ScalingMetricType enum value ScalingMetricTypeAsgaverageCpuutilization = "ASGAverageCPUUtilization" @@ -3068,6 +3137,25 @@ const ( ScalingMetricTypeEc2spotFleetRequestAverageNetworkOut = "EC2SpotFleetRequestAverageNetworkOut" ) +// ScalingMetricType_Values returns all elements of the ScalingMetricType enum +func ScalingMetricType_Values() []string { + return []string{ + ScalingMetricTypeAsgaverageCpuutilization, + ScalingMetricTypeAsgaverageNetworkIn, + ScalingMetricTypeAsgaverageNetworkOut, + ScalingMetricTypeDynamoDbreadCapacityUtilization, + ScalingMetricTypeDynamoDbwriteCapacityUtilization, + ScalingMetricTypeEcsserviceAverageCpuutilization, + ScalingMetricTypeEcsserviceAverageMemoryUtilization, + ScalingMetricTypeAlbrequestCountPerTarget, + ScalingMetricTypeRdsreaderAverageCpuutilization, + ScalingMetricTypeRdsreaderAverageDatabaseConnections, + ScalingMetricTypeEc2spotFleetRequestAverageCpuutilization, + ScalingMetricTypeEc2spotFleetRequestAverageNetworkIn, + ScalingMetricTypeEc2spotFleetRequestAverageNetworkOut, + } +} + const ( // ScalingPlanStatusCodeActive is a ScalingPlanStatusCode enum value ScalingPlanStatusCodeActive = "Active" @@ -3094,6 +3182,20 @@ const ( ScalingPlanStatusCodeUpdateFailed = "UpdateFailed" ) +// ScalingPlanStatusCode_Values returns all elements of the ScalingPlanStatusCode enum +func ScalingPlanStatusCode_Values() []string { + return []string{ + ScalingPlanStatusCodeActive, + ScalingPlanStatusCodeActiveWithProblems, + ScalingPlanStatusCodeCreationInProgress, + ScalingPlanStatusCodeCreationFailed, + ScalingPlanStatusCodeDeletionInProgress, + ScalingPlanStatusCodeDeletionFailed, + ScalingPlanStatusCodeUpdateInProgress, + ScalingPlanStatusCodeUpdateFailed, + } +} + const ( // ScalingPolicyUpdateBehaviorKeepExternalPolicies is a ScalingPolicyUpdateBehavior enum value ScalingPolicyUpdateBehaviorKeepExternalPolicies = "KeepExternalPolicies" @@ -3102,6 +3204,14 @@ const ( ScalingPolicyUpdateBehaviorReplaceExternalPolicies = "ReplaceExternalPolicies" ) +// ScalingPolicyUpdateBehavior_Values returns all elements of the ScalingPolicyUpdateBehavior enum +func ScalingPolicyUpdateBehavior_Values() []string { + return []string{ + ScalingPolicyUpdateBehaviorKeepExternalPolicies, + ScalingPolicyUpdateBehaviorReplaceExternalPolicies, + } +} + const ( // ScalingStatusCodeInactive is a ScalingStatusCode enum value ScalingStatusCodeInactive = "Inactive" @@ -3113,6 +3223,15 @@ const ( ScalingStatusCodeActive = "Active" ) +// ScalingStatusCode_Values returns all elements of the ScalingStatusCode enum +func ScalingStatusCode_Values() []string { + return []string{ + ScalingStatusCodeInactive, + ScalingStatusCodePartiallyActive, + ScalingStatusCodeActive, + } +} + const ( // ServiceNamespaceAutoscaling is a ServiceNamespace enum value ServiceNamespaceAutoscaling = "autoscaling" @@ -3129,3 +3248,14 @@ const ( // ServiceNamespaceDynamodb is a ServiceNamespace enum value ServiceNamespaceDynamodb = "dynamodb" ) + +// ServiceNamespace_Values returns all elements of the ServiceNamespace enum +func ServiceNamespace_Values() []string { + return []string{ + ServiceNamespaceAutoscaling, + ServiceNamespaceEcs, + ServiceNamespaceEc2, + ServiceNamespaceRds, + ServiceNamespaceDynamodb, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go index cd75a0299..9f5c1db6b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/backup/api.go b/vendor/github.com/aws/aws-sdk-go/service/backup/api.go index 827543e75..299d29150 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/backup/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/backup/api.go @@ -57,8 +57,9 @@ func (c *Backup) CreateBackupPlanRequest(input *CreateBackupPlanInput) (req *req // CreateBackupPlan API operation for AWS Backup. // -// Backup plans are documents that contain information that AWS Backup uses -// to schedule tasks that create recovery points of resources. +// Creates a backup plan using a backup plan name and backup rules. A backup +// plan is a document that contains information that AWS Backup uses to schedule +// tasks that create recovery points for resources. // // If you call CreateBackupPlan with a plan that already exists, an AlreadyExistsException // is returned. @@ -162,17 +163,17 @@ func (c *Backup) CreateBackupSelectionRequest(input *CreateBackupSelectionInput) // // * Resources: "arn:aws:ec2:region:account-id:volume/volume-id" // -// * ConditionKey:"department" ConditionValue:"finance" ConditionType:"STRINGEQUALS" +// * ConditionKey:"department" ConditionValue:"finance" ConditionType:"StringEquals" // -// * ConditionKey:"importance" ConditionValue:"critical" ConditionType:"STRINGEQUALS" +// * ConditionKey:"importance" ConditionValue:"critical" ConditionType:"StringEquals" // // Using these patterns would back up all Amazon Elastic Block Store (Amazon // EBS) volumes that are tagged as "department=finance", "importance=critical", -// in addition to an EBS volume with the specified volume Id. +// in addition to an EBS volume with the specified volume ID. // // Resources and conditions are additive in that all resources that match the // pattern are selected. This shouldn't be confused with a logical AND, where -// all conditions must match. The matching patterns are logically 'put together +// all conditions must match. The matching patterns are logically put together // using the OR operator. In other words, all patterns that match are selected // for backup. // @@ -921,7 +922,7 @@ func (c *Backup) DescribeBackupJobRequest(input *DescribeBackupJobInput) (req *r // DescribeBackupJob API operation for AWS Backup. // -// Returns metadata associated with creating a backup of a resource. +// Returns backup job details for the specified BackupJobId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1193,7 +1194,7 @@ func (c *Backup) DescribeProtectedResourceRequest(input *DescribeProtectedResour // DescribeProtectedResource API operation for AWS Backup. // // Returns information about a saved resource, including the last time it was -// backed-up, its Amazon Resource Name (ARN), and the AWS service type of the +// backed up, its Amazon Resource Name (ARN), and the AWS service type of the // saved resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1329,6 +1330,89 @@ func (c *Backup) DescribeRecoveryPointWithContext(ctx aws.Context, input *Descri return out, req.Send() } +const opDescribeRegionSettings = "DescribeRegionSettings" + +// DescribeRegionSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRegionSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeRegionSettings for more information on using the DescribeRegionSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeRegionSettingsRequest method. +// req, resp := client.DescribeRegionSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRegionSettings +func (c *Backup) DescribeRegionSettingsRequest(input *DescribeRegionSettingsInput) (req *request.Request, output *DescribeRegionSettingsOutput) { + op := &request.Operation{ + Name: opDescribeRegionSettings, + HTTPMethod: "GET", + HTTPPath: "/account-settings", + } + + if input == nil { + input = &DescribeRegionSettingsInput{} + } + + output = &DescribeRegionSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeRegionSettings API operation for AWS Backup. +// +// Returns the current service opt-in settings for the Region. If the service +// has a value set to true, AWS Backup tries to protect that service's resources +// in this Region, when included in an on-demand backup or scheduled backup +// plan. If the value is set to false for a service, AWS Backup does not try +// to protect that service's resources in this Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Backup's +// API operation DescribeRegionSettings for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// The request failed due to a temporary failure of the server. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/DescribeRegionSettings +func (c *Backup) DescribeRegionSettings(input *DescribeRegionSettingsInput) (*DescribeRegionSettingsOutput, error) { + req, out := c.DescribeRegionSettingsRequest(input) + return out, req.Send() +} + +// DescribeRegionSettingsWithContext is the same as DescribeRegionSettings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeRegionSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Backup) DescribeRegionSettingsWithContext(ctx aws.Context, input *DescribeRegionSettingsInput, opts ...request.Option) (*DescribeRegionSettingsOutput, error) { + req, out := c.DescribeRegionSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeRestoreJob = "DescribeRestoreJob" // DescribeRestoreJobRequest generates a "aws/request.Request" representing the @@ -1556,7 +1640,8 @@ func (c *Backup) GetBackupPlanRequest(input *GetBackupPlanInput) (req *request.R // GetBackupPlan API operation for AWS Backup. // -// Returns the body of a backup plan in JSON format, in addition to plan metadata. +// Returns BackupPlan details for the specified BackupPlanId. Returns the body +// of a backup plan in JSON format, in addition to plan metadata. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2271,7 +2356,7 @@ func (c *Backup) ListBackupJobsRequest(input *ListBackupJobsInput) (req *request // ListBackupJobs API operation for AWS Backup. // -// Returns metadata about your backup jobs. +// Returns a list of existing backup jobs for an authenticated account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2285,10 +2370,6 @@ func (c *Backup) ListBackupJobsRequest(input *ListBackupJobsInput) (req *request // Indicates that something is wrong with a parameter's value. For example, // the value is out of range. // -// * InvalidRequestException -// Indicates that something is wrong with the input to the request. For example, -// a parameter is of the wrong type. -// // * ServiceUnavailableException // The request failed due to a temporary failure of the server. // @@ -2713,9 +2794,11 @@ func (c *Backup) ListBackupPlansRequest(input *ListBackupPlansInput) (req *reque // ListBackupPlans API operation for AWS Backup. // -// Returns metadata of your saved backup plans, including Amazon Resource Names -// (ARNs), plan IDs, creation and deletion dates, version IDs, plan names, and -// creator request IDs. +// Returns a list of existing backup plans for an authenticated account. The +// list is populated only if the advanced option is set for the backup plan. +// The list contains information such as Amazon Resource Names (ARNs), plan +// IDs, creation and deletion dates, version IDs, plan names, and creator request +// IDs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3889,6 +3972,8 @@ func (c *Backup) ListTagsRequest(input *ListTagsInput) (req *request.Request, ou // Returns a list of key-value pairs assigned to a target recovery point, backup // plan, or backup vault. // +// ListTags are currently only supported with Amazon EFS backups. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4210,7 +4295,7 @@ func (c *Backup) StartBackupJobRequest(input *StartBackupJobInput) (req *request // StartBackupJob API operation for AWS Backup. // -// Starts a job to create a one-time backup of the specified resource. +// Starts an on-demand backup job for the specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4769,9 +4854,8 @@ func (c *Backup) UpdateBackupPlanRequest(input *UpdateBackupPlanInput) (req *req // UpdateBackupPlan API operation for AWS Backup. // -// Replaces the body of a saved backup plan identified by its backupPlanId with -// the input document in JSON format. The new version is uniquely identified -// by a VersionId. +// Updates an existing backup plan identified by its backupPlanId with the input +// document in JSON format. The new version is uniquely identified by a VersionId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4915,10 +4999,147 @@ func (c *Backup) UpdateRecoveryPointLifecycleWithContext(ctx aws.Context, input return out, req.Send() } +const opUpdateRegionSettings = "UpdateRegionSettings" + +// UpdateRegionSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRegionSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRegionSettings for more information on using the UpdateRegionSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRegionSettingsRequest method. +// req, resp := client.UpdateRegionSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateRegionSettings +func (c *Backup) UpdateRegionSettingsRequest(input *UpdateRegionSettingsInput) (req *request.Request, output *UpdateRegionSettingsOutput) { + op := &request.Operation{ + Name: opUpdateRegionSettings, + HTTPMethod: "PUT", + HTTPPath: "/account-settings", + } + + if input == nil { + input = &UpdateRegionSettingsInput{} + } + + output = &UpdateRegionSettingsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateRegionSettings API operation for AWS Backup. +// +// Updates the current service opt-in settings for the Region. If the service +// has a value set to true, AWS Backup tries to protect that service's resources +// in this Region, when included in an on-demand backup or scheduled backup +// plan. If the value is set to false for a service, AWS Backup does not try +// to protect that service's resources in this Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Backup's +// API operation UpdateRegionSettings for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// The request failed due to a temporary failure of the server. +// +// * MissingParameterValueException +// Indicates that a required parameter is missing. +// +// * InvalidParameterValueException +// Indicates that something is wrong with a parameter's value. For example, +// the value is out of range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/backup-2018-11-15/UpdateRegionSettings +func (c *Backup) UpdateRegionSettings(input *UpdateRegionSettingsInput) (*UpdateRegionSettingsOutput, error) { + req, out := c.UpdateRegionSettingsRequest(input) + return out, req.Send() +} + +// UpdateRegionSettingsWithContext is the same as UpdateRegionSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRegionSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Backup) UpdateRegionSettingsWithContext(ctx aws.Context, input *UpdateRegionSettingsInput, opts ...request.Option) (*UpdateRegionSettingsOutput, error) { + req, out := c.UpdateRegionSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// A list of backup options for each resource type. +type AdvancedBackupSetting struct { + _ struct{} `type:"structure"` + + // Specifies the backup option for a selected resource. This option is only + // available for Windows VSS backup jobs. + // + // Valid value: "WindowsVSS”:“enabled". If enabled, creates a VSS Windows + // backup; otherwise, creates a regular backup. + // + // If you specify an invalid option, you get an InvalidParameterValueException + // exception. + // + // For more information about Windows VSS backups, see Creating a VSS-Enabled + // Windows Backup (https://docs.aws.amazon.com/aws-backup/latest/devguide/windows-backups.html). + BackupOptions map[string]*string `type:"map"` + + // The type of AWS resource to be backed up. For VSS Windows backups, the only + // supported resource type is Amazon EC2. + // + // Valid values: EC2. + ResourceType *string `type:"string"` +} + +// String returns the string representation +func (s AdvancedBackupSetting) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedBackupSetting) GoString() string { + return s.String() +} + +// SetBackupOptions sets the BackupOptions field's value. +func (s *AdvancedBackupSetting) SetBackupOptions(v map[string]*string) *AdvancedBackupSetting { + s.BackupOptions = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *AdvancedBackupSetting) SetResourceType(v string) *AdvancedBackupSetting { + s.ResourceType = &v + return s +} + // The required resource already exists. type AlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Arn *string `type:"string"` @@ -4945,17 +5166,17 @@ func (s AlreadyExistsException) GoString() string { func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { return &AlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyExistsException) Code() string { +func (s *AlreadyExistsException) Code() string { return "AlreadyExistsException" } // Message returns the exception's message. -func (s AlreadyExistsException) Message() string { +func (s *AlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4963,22 +5184,22 @@ func (s AlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyExistsException) OrigErr() error { +func (s *AlreadyExistsException) OrigErr() error { return nil } -func (s AlreadyExistsException) Error() string { +func (s *AlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Contains DeleteAt and MoveToColdStorageAt timestamps, which are used to specify @@ -5025,7 +5246,7 @@ func (s *CalculatedLifecycle) SetMoveToColdStorageAt(v time.Time) *CalculatedLif return s } -// Contains an array of triplets made up of a condition type (such as STRINGEQUALS), +// Contains an array of triplets made up of a condition type (such as StringEquals), // a key, and a value. Conditions are used to filter resources in a selection // that is assigned to a backup plan. type Condition struct { @@ -5037,7 +5258,7 @@ type Condition struct { // ConditionKey is a required field ConditionKey *string `type:"string" required:"true"` - // An operation, such as STRINGEQUALS, that is applied to a key-value pair used + // An operation, such as StringEquals, that is applied to a key-value pair used // to filter resources in a selection. // // ConditionType is a required field @@ -5157,16 +5378,19 @@ func (s *CopyAction) SetLifecycle(v *Lifecycle) *CopyAction { type CopyJob struct { _ struct{} `type:"structure"` + // The account ID that owns the copy job. + AccountId *string `type:"string"` + // The size, in bytes, of a copy job. BackupSizeInBytes *int64 `type:"long"` - // The date and time a job to create a copy job is completed, in Unix format - // and Coordinated Universal Time (UTC). The value of CompletionDate is accurate - // to milliseconds. For example, the value 1516925490.087 represents Friday, - // January 26, 2018 12:11:30.087 AM. + // The date and time a copy job is completed, in Unix format and Coordinated + // Universal Time (UTC). The value of CompletionDate is accurate to milliseconds. + // For example, the value 1516925490.087 represents Friday, January 26, 2018 + // 12:11:30.087 AM. CompletionDate *time.Time `type:"timestamp"` - // Uniquely identifies a request to AWS Backup to copy a resource. + // Uniquely identifies a copy job. CopyJobId *string `type:"string"` // Contains information about the backup plan and rule that AWS Backup used @@ -5191,9 +5415,9 @@ type CopyJob struct { // arn:aws:iam::123456789012:role/S3Access. IamRoleArn *string `type:"string"` - // The type of AWS resource to be copied; for example, an Amazon Elastic Block - // Store (Amazon EBS) volume or an Amazon Relational Database Service (Amazon - // RDS) database. + // The AWS resource to be copied; for example, an Amazon Elastic Block Store + // (Amazon EBS) volume or an Amazon Relational Database Service (Amazon RDS) + // database. ResourceArn *string `type:"string"` // The type of AWS resource to be copied; for example, an Amazon Elastic Block @@ -5208,10 +5432,10 @@ type CopyJob struct { // An ARN that uniquely identifies a source recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. SourceRecoveryPointArn *string `type:"string"` - // The current state of a resource recovery point. + // The current state of a copy job. State *string `type:"string" enum:"CopyJobState"` - // A detailed message explaining the status of the job that to copy a resource. + // A detailed message explaining the status of the job to copy a resource. StatusMessage *string `type:"string"` } @@ -5225,6 +5449,12 @@ func (s CopyJob) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *CopyJob) SetAccountId(v string) *CopyJob { + s.AccountId = &v + return s +} + // SetBackupSizeInBytes sets the BackupSizeInBytes field's value. func (s *CopyJob) SetBackupSizeInBytes(v int64) *CopyJob { s.BackupSizeInBytes = &v @@ -5324,7 +5554,7 @@ type CreateBackupPlanInput struct { BackupPlanTags map[string]*string `type:"map" sensitive:"true"` // Identifies the request and allows failed requests to be retried without the - // risk of executing the operation twice. If the request includes a CreatorRequestId + // risk of running the operation twice. If the request includes a CreatorRequestId // that matches an existing backup plan, that plan is returned. This parameter // is optional. CreatorRequestId *string `type:"string"` @@ -5379,6 +5609,10 @@ func (s *CreateBackupPlanInput) SetCreatorRequestId(v string) *CreateBackupPlanI type CreateBackupPlanOutput struct { _ struct{} `type:"structure"` + // A list of BackupOptions settings for a resource type. This option is only + // available for Windows VSS backup jobs. + AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"` + // An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for // example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50. BackupPlanArn *string `type:"string"` @@ -5393,7 +5627,7 @@ type CreateBackupPlanOutput struct { CreationDate *time.Time `type:"timestamp"` // Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most - // 1024 bytes long. They cannot be edited. + // 1,024 bytes long. They cannot be edited. VersionId *string `type:"string"` } @@ -5407,6 +5641,12 @@ func (s CreateBackupPlanOutput) GoString() string { return s.String() } +// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value. +func (s *CreateBackupPlanOutput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *CreateBackupPlanOutput { + s.AdvancedBackupSettings = v + return s +} + // SetBackupPlanArn sets the BackupPlanArn field's value. func (s *CreateBackupPlanOutput) SetBackupPlanArn(v string) *CreateBackupPlanOutput { s.BackupPlanArn = &v @@ -5447,7 +5687,7 @@ type CreateBackupSelectionInput struct { BackupSelection *Selection `type:"structure" required:"true"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` } @@ -5564,7 +5804,7 @@ type CreateBackupVaultInput struct { BackupVaultTags map[string]*string `type:"map" sensitive:"true"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // The server-side encryption key that is used to protect your backups; for @@ -5722,13 +5962,13 @@ type DeleteBackupPlanOutput struct { BackupPlanId *string `type:"string"` // The date and time a backup plan is deleted, in Unix format and Coordinated - // Universal Time (UTC). The value of CreationDate is accurate to milliseconds. + // Universal Time (UTC). The value of DeletionDate is accurate to milliseconds. // For example, the value 1516925490.087 represents Friday, January 26, 2018 // 12:11:30.087 AM. DeletionDate *time.Time `type:"timestamp"` // Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most - // 1,024 bytes long. Version Ids cannot be edited. + // 1,024 bytes long. Version IDs cannot be edited. VersionId *string `type:"string"` } @@ -5902,7 +6142,7 @@ type DeleteBackupVaultInput struct { // The name of a logical container where backups are stored. Backup vaults are // identified by names that are unique to the account used to create them and - // theAWS Region where they are created. They consist of lowercase letters, + // the AWS Region where they are created. They consist of lowercase letters, // numbers, and hyphens. // // BackupVaultName is a required field @@ -6092,8 +6332,8 @@ func (s DeleteRecoveryPointOutput) GoString() string { // A dependent AWS service or resource returned an error to the AWS Backup service, // and the action cannot be completed. type DependencyFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -6116,17 +6356,17 @@ func (s DependencyFailureException) GoString() string { func newErrorDependencyFailureException(v protocol.ResponseMetadata) error { return &DependencyFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DependencyFailureException) Code() string { +func (s *DependencyFailureException) Code() string { return "DependencyFailureException" } // Message returns the exception's message. -func (s DependencyFailureException) Message() string { +func (s *DependencyFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6134,22 +6374,22 @@ func (s DependencyFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DependencyFailureException) OrigErr() error { +func (s *DependencyFailureException) OrigErr() error { return nil } -func (s DependencyFailureException) Error() string { +func (s *DependencyFailureException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DependencyFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DependencyFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DependencyFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *DependencyFailureException) RequestID() string { + return s.RespMetadata.RequestID } type DescribeBackupJobInput struct { @@ -6196,12 +6436,24 @@ func (s *DescribeBackupJobInput) SetBackupJobId(v string) *DescribeBackupJobInpu type DescribeBackupJobOutput struct { _ struct{} `type:"structure"` + // Returns the account ID that owns the backup job. + AccountId *string `type:"string"` + // Uniquely identifies a request to AWS Backup to back up a resource. BackupJobId *string `type:"string"` + // Represents the options specified as part of backup plan or on-demand backup + // job. + BackupOptions map[string]*string `type:"map"` + // The size, in bytes, of a backup. BackupSizeInBytes *int64 `type:"long"` + // Represents the actual backup type selected for a backup job. For example, + // if a successful WindowsVSS backup was taken, BackupType returns “WindowsVSS”. + // If BackupType is empty, then it is a regular backup. + BackupType *string `type:"string"` + // An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for // example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault. BackupVaultArn *string `type:"string"` @@ -6217,8 +6469,8 @@ type DescribeBackupJobOutput struct { BytesTransferred *int64 `type:"long"` // The date and time that a job to create a backup job is completed, in Unix - // format and Coordinated Universal Time (UTC). The value of CreationDate is - // accurate to milliseconds. For example, the value 1516925490.087 represents + // format and Coordinated Universal Time (UTC). The value of CompletionDate + // is accurate to milliseconds. For example, the value 1516925490.087 represents // Friday, January 26, 2018 12:11:30.087 AM. CompletionDate *time.Time `type:"timestamp"` @@ -6254,7 +6506,7 @@ type DescribeBackupJobOutput struct { // on the resource type. ResourceArn *string `type:"string"` - // The type of AWS resource to be backed-up; for example, an Amazon Elastic + // The type of AWS resource to be backed up; for example, an Amazon Elastic // Block Store (Amazon EBS) volume or an Amazon Relational Database Service // (Amazon RDS) database. ResourceType *string `type:"string"` @@ -6285,18 +6537,36 @@ func (s DescribeBackupJobOutput) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *DescribeBackupJobOutput) SetAccountId(v string) *DescribeBackupJobOutput { + s.AccountId = &v + return s +} + // SetBackupJobId sets the BackupJobId field's value. func (s *DescribeBackupJobOutput) SetBackupJobId(v string) *DescribeBackupJobOutput { s.BackupJobId = &v return s } +// SetBackupOptions sets the BackupOptions field's value. +func (s *DescribeBackupJobOutput) SetBackupOptions(v map[string]*string) *DescribeBackupJobOutput { + s.BackupOptions = v + return s +} + // SetBackupSizeInBytes sets the BackupSizeInBytes field's value. func (s *DescribeBackupJobOutput) SetBackupSizeInBytes(v int64) *DescribeBackupJobOutput { s.BackupSizeInBytes = &v return s } +// SetBackupType sets the BackupType field's value. +func (s *DescribeBackupJobOutput) SetBackupType(v string) *DescribeBackupJobOutput { + s.BackupType = &v + return s +} + // SetBackupVaultArn sets the BackupVaultArn field's value. func (s *DescribeBackupJobOutput) SetBackupVaultArn(v string) *DescribeBackupJobOutput { s.BackupVaultArn = &v @@ -6451,7 +6721,7 @@ type DescribeBackupVaultOutput struct { CreationDate *time.Time `type:"timestamp"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // The server-side encryption key that is used to protect your backups; for @@ -6511,7 +6781,7 @@ func (s *DescribeBackupVaultOutput) SetNumberOfRecoveryPoints(v int64) *Describe type DescribeCopyJobInput struct { _ struct{} `type:"structure"` - // Uniquely identifies a request to AWS Backup to copy a resource. + // Uniquely identifies a copy job. // // CopyJobId is a required field CopyJobId *string `location:"uri" locationName:"copyJobId" type:"string" required:"true"` @@ -6922,6 +7192,43 @@ func (s *DescribeRecoveryPointOutput) SetStorageClass(v string) *DescribeRecover return s } +type DescribeRegionSettingsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeRegionSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionSettingsInput) GoString() string { + return s.String() +} + +type DescribeRegionSettingsOutput struct { + _ struct{} `type:"structure"` + + // Returns a list of all services along with the opt-in preferences in the Region. + ResourceTypeOptInPreference map[string]*bool `type:"map"` +} + +// String returns the string representation +func (s DescribeRegionSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeRegionSettingsOutput) GoString() string { + return s.String() +} + +// SetResourceTypeOptInPreference sets the ResourceTypeOptInPreference field's value. +func (s *DescribeRegionSettingsOutput) SetResourceTypeOptInPreference(v map[string]*bool) *DescribeRegionSettingsOutput { + s.ResourceTypeOptInPreference = v + return s +} + type DescribeRestoreJobInput struct { _ struct{} `type:"structure"` @@ -6966,6 +7273,9 @@ func (s *DescribeRestoreJobInput) SetRestoreJobId(v string) *DescribeRestoreJobI type DescribeRestoreJobOutput struct { _ struct{} `type:"structure"` + // Returns the account ID that owns the restore job. + AccountId *string `type:"string"` + // The size, in bytes, of the restored resource. BackupSizeInBytes *int64 `type:"long"` @@ -7001,6 +7311,9 @@ type DescribeRestoreJobOutput struct { // An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. RecoveryPointArn *string `type:"string"` + // Returns metadata associated with a restore job listed by resource type. + ResourceType *string `type:"string"` + // Uniquely identifies the job that restores a recovery point. RestoreJobId *string `type:"string"` @@ -7008,7 +7321,7 @@ type DescribeRestoreJobOutput struct { // to restore a recovery point. Status *string `type:"string" enum:"RestoreJobStatus"` - // A detailed message explaining the status of a job to restore a recovery point. + // A message showing the status of a job to restore a recovery point. StatusMessage *string `type:"string"` } @@ -7022,6 +7335,12 @@ func (s DescribeRestoreJobOutput) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *DescribeRestoreJobOutput) SetAccountId(v string) *DescribeRestoreJobOutput { + s.AccountId = &v + return s +} + // SetBackupSizeInBytes sets the BackupSizeInBytes field's value. func (s *DescribeRestoreJobOutput) SetBackupSizeInBytes(v int64) *DescribeRestoreJobOutput { s.BackupSizeInBytes = &v @@ -7070,6 +7389,12 @@ func (s *DescribeRestoreJobOutput) SetRecoveryPointArn(v string) *DescribeRestor return s } +// SetResourceType sets the ResourceType field's value. +func (s *DescribeRestoreJobOutput) SetResourceType(v string) *DescribeRestoreJobOutput { + s.ResourceType = &v + return s +} + // SetRestoreJobId sets the RestoreJobId field's value. func (s *DescribeRestoreJobOutput) SetRestoreJobId(v string) *DescribeRestoreJobOutput { s.RestoreJobId = &v @@ -7336,6 +7661,10 @@ func (s *GetBackupPlanInput) SetVersionId(v string) *GetBackupPlanInput { type GetBackupPlanOutput struct { _ struct{} `type:"structure"` + // Contains a list of BackupOptions for each resource type. The list is populated + // only if the advanced option is set for the backup plan. + AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"` + // Specifies the body of a backup plan. Includes a BackupPlanName and one or // more sets of Rules. BackupPlan *Plan `type:"structure"` @@ -7354,19 +7683,19 @@ type GetBackupPlanOutput struct { CreationDate *time.Time `type:"timestamp"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // The date and time that a backup plan is deleted, in Unix format and Coordinated - // Universal Time (UTC). The value of CreationDate is accurate to milliseconds. + // Universal Time (UTC). The value of DeletionDate is accurate to milliseconds. // For example, the value 1516925490.087 represents Friday, January 26, 2018 // 12:11:30.087 AM. DeletionDate *time.Time `type:"timestamp"` - // The last time a job to back up resources was executed with this backup plan. - // A date and time, in Unix format and Coordinated Universal Time (UTC). The - // value of LastExecutionDate is accurate to milliseconds. For example, the - // value 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + // The last time a job to back up resources was run with this backup plan. A + // date and time, in Unix format and Coordinated Universal Time (UTC). The value + // of LastExecutionDate is accurate to milliseconds. For example, the value + // 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. LastExecutionDate *time.Time `type:"timestamp"` // Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most @@ -7384,6 +7713,12 @@ func (s GetBackupPlanOutput) GoString() string { return s.String() } +// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value. +func (s *GetBackupPlanOutput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *GetBackupPlanOutput { + s.AdvancedBackupSettings = v + return s +} + // SetBackupPlan sets the BackupPlan field's value. func (s *GetBackupPlanOutput) SetBackupPlan(v *Plan) *GetBackupPlanOutput { s.BackupPlan = v @@ -7508,7 +7843,7 @@ type GetBackupSelectionOutput struct { CreationDate *time.Time `type:"timestamp"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // Uniquely identifies the body of a request to assign a set of resources to @@ -7816,7 +8151,7 @@ type GetRecoveryPointRestoreMetadataOutput struct { // An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. RecoveryPointArn *string `type:"string"` - // The set of metadata key-value pairs that describes the original configuration + // The set of metadata key-value pairs that describe the original configuration // of the backed-up resource. These values vary depending on the service that // is being restored. RestoreMetadata map[string]*string `type:"map" sensitive:"true"` @@ -7869,15 +8204,17 @@ type GetSupportedResourceTypesOutput struct { // Contains a string with the supported AWS resource types: // + // * DynamoDB for Amazon DynamoDB + // // * EBS for Amazon Elastic Block Store // - // * Storage Gateway for AWS Storage Gateway + // * EC2 for Amazon Elastic Compute Cloud // - // * RDS for Amazon Relational Database Service + // * EFS for Amazon Elastic File System // - // * DDB for Amazon DynamoDB + // * RDS for Amazon Relational Database Service // - // * EFS for Amazon Elastic File System + // * Storage Gateway for AWS Storage Gateway ResourceTypes []*string `type:"list"` } @@ -7900,8 +8237,8 @@ func (s *GetSupportedResourceTypesOutput) SetResourceTypes(v []*string) *GetSupp // Indicates that something is wrong with a parameter's value. For example, // the value is out of range. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -7924,17 +8261,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7942,29 +8279,29 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // Indicates that something is wrong with the input to the request. For example, // a parameter is of the wrong type. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -7987,17 +8324,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8005,34 +8342,48 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Contains detailed information about a backup job. type Job struct { _ struct{} `type:"structure"` + // The account ID that owns the backup job. + AccountId *string `type:"string"` + // Uniquely identifies a request to AWS Backup to back up a resource. BackupJobId *string `type:"string"` + // Specifies the backup option for a selected resource. This option is only + // available for Windows VSS backup jobs. + // + // Valid value: "WindowsVSS”:“enabled". If enabled, creates a VSS Windows + // backup; otherwise, creates a regular backup. If you specify an invalid option, + // you get an InvalidParameterValueException exception. + BackupOptions map[string]*string `type:"map"` + // The size, in bytes, of a backup. BackupSizeInBytes *int64 `type:"long"` + // Represents the type of backup for a backup job. + BackupType *string `type:"string"` + // An Amazon Resource Name (ARN) that uniquely identifies a backup vault; for // example, arn:aws:backup:us-east-1:123456789012:vault:aBackupVault. BackupVaultArn *string `type:"string"` @@ -8085,9 +8436,10 @@ type Job struct { // on the resource type. ResourceArn *string `type:"string"` - // The type of AWS resource to be backed-up; for example, an Amazon Elastic + // The type of AWS resource to be backed up; for example, an Amazon Elastic // Block Store (Amazon EBS) volume or an Amazon Relational Database Service - // (Amazon RDS) database. + // (Amazon RDS) database. For VSS Windows backups, the only supported resource + // type is Amazon EC2. ResourceType *string `type:"string"` // Specifies the time in Unix format and Coordinated Universal Time (UTC) when @@ -8116,18 +8468,36 @@ func (s Job) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *Job) SetAccountId(v string) *Job { + s.AccountId = &v + return s +} + // SetBackupJobId sets the BackupJobId field's value. func (s *Job) SetBackupJobId(v string) *Job { s.BackupJobId = &v return s } +// SetBackupOptions sets the BackupOptions field's value. +func (s *Job) SetBackupOptions(v map[string]*string) *Job { + s.BackupOptions = v + return s +} + // SetBackupSizeInBytes sets the BackupSizeInBytes field's value. func (s *Job) SetBackupSizeInBytes(v int64) *Job { s.BackupSizeInBytes = &v return s } +// SetBackupType sets the BackupType field's value. +func (s *Job) SetBackupType(v string) *Job { + s.BackupType = &v + return s +} + // SetBackupVaultArn sets the BackupVaultArn field's value. func (s *Job) SetBackupVaultArn(v string) *Job { s.BackupVaultArn = &v @@ -8263,8 +8633,8 @@ func (s *Lifecycle) SetMoveToColdStorageAfterDays(v int64) *Lifecycle { // A limit in the request has been exceeded; for example, a maximum number of // items allowed in a request. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -8287,17 +8657,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8305,27 +8675,31 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListBackupJobsInput struct { _ struct{} `type:"structure"` + // The account ID to list the jobs from. Returns only backup jobs associated + // with the specified account ID. + ByAccountId *string `location:"querystring" locationName:"accountId" type:"string"` + // Returns only backup jobs that will be stored in the specified backup vault. // Backup vaults are identified by names that are unique to the account used // to create them and the AWS Region where they are created. They consist of @@ -8348,6 +8722,8 @@ type ListBackupJobsInput struct { // // * EBS for Amazon Elastic Block Store // + // * EC2 for Amazon Elastic Compute Cloud + // // * EFS for Amazon Elastic File System // // * RDS for Amazon Relational Database Service @@ -8391,6 +8767,12 @@ func (s *ListBackupJobsInput) Validate() error { return nil } +// SetByAccountId sets the ByAccountId field's value. +func (s *ListBackupJobsInput) SetByAccountId(v string) *ListBackupJobsInput { + s.ByAccountId = &v + return s +} + // SetByBackupVaultName sets the ByBackupVaultName field's value. func (s *ListBackupJobsInput) SetByBackupVaultName(v string) *ListBackupJobsInput { s.ByBackupVaultName = &v @@ -8942,6 +9324,10 @@ func (s *ListBackupVaultsOutput) SetNextToken(v string) *ListBackupVaultsOutput type ListCopyJobsInput struct { _ struct{} `type:"structure"` + // The account ID to list the jobs from. Returns only copy jobs associated with + // the specified account ID. + ByAccountId *string `location:"querystring" locationName:"accountId" type:"string"` + // Returns only copy jobs that were created after the specified date. ByCreatedAfter *time.Time `location:"querystring" locationName:"createdAfter" type:"timestamp"` @@ -8962,6 +9348,8 @@ type ListCopyJobsInput struct { // // * EBS for Amazon Elastic Block Store // + // * EC2 for Amazon Elastic Compute Cloud + // // * EFS for Amazon Elastic File System // // * RDS for Amazon Relational Database Service @@ -9005,6 +9393,12 @@ func (s *ListCopyJobsInput) Validate() error { return nil } +// SetByAccountId sets the ByAccountId field's value. +func (s *ListCopyJobsInput) SetByAccountId(v string) *ListCopyJobsInput { + s.ByAccountId = &v + return s +} + // SetByCreatedAfter sets the ByCreatedAfter field's value. func (s *ListCopyJobsInput) SetByCreatedAfter(v time.Time) *ListCopyJobsInput { s.ByCreatedAfter = &v @@ -9429,6 +9823,19 @@ func (s *ListRecoveryPointsByResourceOutput) SetRecoveryPoints(v []*RecoveryPoin type ListRestoreJobsInput struct { _ struct{} `type:"structure"` + // The account ID to list the jobs from. Returns only restore jobs associated + // with the specified account ID. + ByAccountId *string `location:"querystring" locationName:"accountId" type:"string"` + + // Returns only restore jobs that were created after the specified date. + ByCreatedAfter *time.Time `location:"querystring" locationName:"createdAfter" type:"timestamp"` + + // Returns only restore jobs that were created before the specified date. + ByCreatedBefore *time.Time `location:"querystring" locationName:"createdBefore" type:"timestamp"` + + // Returns only restore jobs associated with the specified job status. + ByStatus *string `location:"querystring" locationName:"status" type:"string" enum:"RestoreJobStatus"` + // The maximum number of items to be returned. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` @@ -9462,6 +9869,30 @@ func (s *ListRestoreJobsInput) Validate() error { return nil } +// SetByAccountId sets the ByAccountId field's value. +func (s *ListRestoreJobsInput) SetByAccountId(v string) *ListRestoreJobsInput { + s.ByAccountId = &v + return s +} + +// SetByCreatedAfter sets the ByCreatedAfter field's value. +func (s *ListRestoreJobsInput) SetByCreatedAfter(v time.Time) *ListRestoreJobsInput { + s.ByCreatedAfter = &v + return s +} + +// SetByCreatedBefore sets the ByCreatedBefore field's value. +func (s *ListRestoreJobsInput) SetByCreatedBefore(v time.Time) *ListRestoreJobsInput { + s.ByCreatedBefore = &v + return s +} + +// SetByStatus sets the ByStatus field's value. +func (s *ListRestoreJobsInput) SetByStatus(v string) *ListRestoreJobsInput { + s.ByStatus = &v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListRestoreJobsInput) SetMaxResults(v int64) *ListRestoreJobsInput { s.MaxResults = &v @@ -9615,8 +10046,8 @@ func (s *ListTagsOutput) SetTags(v map[string]*string) *ListTagsOutput { // Indicates that a required parameter is missing. type MissingParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -9639,17 +10070,17 @@ func (s MissingParameterValueException) GoString() string { func newErrorMissingParameterValueException(v protocol.ResponseMetadata) error { return &MissingParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MissingParameterValueException) Code() string { +func (s *MissingParameterValueException) Code() string { return "MissingParameterValueException" } // Message returns the exception's message. -func (s MissingParameterValueException) Message() string { +func (s *MissingParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9657,22 +10088,22 @@ func (s MissingParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MissingParameterValueException) OrigErr() error { +func (s *MissingParameterValueException) OrigErr() error { return nil } -func (s MissingParameterValueException) Error() string { +func (s *MissingParameterValueException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s MissingParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MissingParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MissingParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *MissingParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // Contains an optional backup plan display name and an array of BackupRule @@ -9682,6 +10113,9 @@ func (s MissingParameterValueException) RequestID() string { type Plan struct { _ struct{} `type:"structure"` + // Contains a list of BackupOptions for each resource type. + AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"` + // The display name of a backup plan. // // BackupPlanName is a required field @@ -9704,6 +10138,12 @@ func (s Plan) GoString() string { return s.String() } +// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value. +func (s *Plan) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *Plan { + s.AdvancedBackupSettings = v + return s +} + // SetBackupPlanName sets the BackupPlanName field's value. func (s *Plan) SetBackupPlanName(v string) *Plan { s.BackupPlanName = &v @@ -9723,7 +10163,11 @@ func (s *Plan) SetRules(v []*Rule) *Plan { type PlanInput struct { _ struct{} `type:"structure"` - // The display name of a backup plan. + // Specifies a list of BackupOptions for each resource type. These settings + // are only available for Windows VSS backup jobs. + AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"` + + // The optional display name of a backup plan. // // BackupPlanName is a required field BackupPlanName *string `type:"string" required:"true"` @@ -9771,6 +10215,12 @@ func (s *PlanInput) Validate() error { return nil } +// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value. +func (s *PlanInput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *PlanInput { + s.AdvancedBackupSettings = v + return s +} + // SetBackupPlanName sets the BackupPlanName field's value. func (s *PlanInput) SetBackupPlanName(v string) *PlanInput { s.BackupPlanName = &v @@ -9820,6 +10270,9 @@ func (s *PlanTemplatesListMember) SetBackupPlanTemplateName(v string) *PlanTempl type PlansListMember struct { _ struct{} `type:"structure"` + // Contains a list of BackupOptions for a resource type. + AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"` + // An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for // example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50. BackupPlanArn *string `type:"string"` @@ -9837,7 +10290,7 @@ type PlansListMember struct { CreationDate *time.Time `type:"timestamp"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // The date and time a backup plan is deleted, in Unix format and Coordinated @@ -9846,10 +10299,10 @@ type PlansListMember struct { // 12:11:30.087 AM. DeletionDate *time.Time `type:"timestamp"` - // The last time a job to back up resources was executed with this rule. A date - // and time, in Unix format and Coordinated Universal Time (UTC). The value - // of LastExecutionDate is accurate to milliseconds. For example, the value - // 1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM. + // The last time a job to back up resources was run with this rule. A date and + // time, in Unix format and Coordinated Universal Time (UTC). The value of LastExecutionDate + // is accurate to milliseconds. For example, the value 1516925490.087 represents + // Friday, January 26, 2018 12:11:30.087 AM. LastExecutionDate *time.Time `type:"timestamp"` // Unique, randomly generated, Unicode, UTF-8 encoded strings that are at most @@ -9867,6 +10320,12 @@ func (s PlansListMember) GoString() string { return s.String() } +// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value. +func (s *PlansListMember) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *PlansListMember { + s.AdvancedBackupSettings = v + return s +} + // SetBackupPlanArn sets the BackupPlanArn field's value. func (s *PlansListMember) SetBackupPlanArn(v string) *PlansListMember { s.BackupPlanArn = &v @@ -9931,6 +10390,7 @@ type ProtectedResource struct { // The type of AWS resource; for example, an Amazon Elastic Block Store (Amazon // EBS) volume or an Amazon Relational Database Service (Amazon RDS) database. + // For VSS Windows backups, the only supported resource type is Amazon EC2. ResourceType *string `type:"string"` } @@ -10194,7 +10654,8 @@ type RecoveryPointByBackupVault struct { // The type of AWS resource saved as a recovery point; for example, an Amazon // Elastic Block Store (Amazon EBS) volume or an Amazon Relational Database - // Service (Amazon RDS) database. + // Service (Amazon RDS) database. For VSS Windows backups, the only supported + // resource type is Amazon EC2. ResourceType *string `type:"string"` // A status code specifying the state of the recovery point. @@ -10441,8 +10902,8 @@ func (s *RecoveryPointCreator) SetBackupRuleId(v string) *RecoveryPointCreator { // A resource that is required for the action doesn't exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -10465,17 +10926,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10483,28 +10944,31 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains metadata about a restore job. type RestoreJobsListMember struct { _ struct{} `type:"structure"` + // The account ID that owns the restore job. + AccountId *string `type:"string"` + // The size, in bytes, of the restored resource. BackupSizeInBytes *int64 `type:"long"` @@ -10539,6 +11003,12 @@ type RestoreJobsListMember struct { // An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. RecoveryPointArn *string `type:"string"` + // The resource type of the listed restore jobs; for example, an Amazon Elastic + // Block Store (Amazon EBS) volume or an Amazon Relational Database Service + // (Amazon RDS) database. For VSS Windows backups, the only supported resource + // type is Amazon EC2. + ResourceType *string `type:"string"` + // Uniquely identifies the job that restores a recovery point. RestoreJobId *string `type:"string"` @@ -10561,6 +11031,12 @@ func (s RestoreJobsListMember) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *RestoreJobsListMember) SetAccountId(v string) *RestoreJobsListMember { + s.AccountId = &v + return s +} + // SetBackupSizeInBytes sets the BackupSizeInBytes field's value. func (s *RestoreJobsListMember) SetBackupSizeInBytes(v int64) *RestoreJobsListMember { s.BackupSizeInBytes = &v @@ -10609,6 +11085,12 @@ func (s *RestoreJobsListMember) SetRecoveryPointArn(v string) *RestoreJobsListMe return s } +// SetResourceType sets the ResourceType field's value. +func (s *RestoreJobsListMember) SetResourceType(v string) *RestoreJobsListMember { + s.ResourceType = &v + return s +} + // SetRestoreJobId sets the RestoreJobId field's value. func (s *RestoreJobsListMember) SetRestoreJobId(v string) *RestoreJobsListMember { s.RestoreJobId = &v @@ -10632,7 +11114,7 @@ type Rule struct { _ struct{} `type:"structure"` // A value in minutes after a backup job is successfully started before it must - // be completed or it is canceled by AWS Backup. This value is optional. + // be completed or it will be canceled by AWS Backup. This value is optional. CompletionWindowMinutes *int64 `type:"long"` // An array of CopyAction objects, which contains the details of the copy operation. @@ -10665,8 +11147,8 @@ type Rule struct { // A CRON expression specifying when AWS Backup initiates a backup job. ScheduleExpression *string `type:"string"` - // An optional value that specifies a period of time in minutes after a backup - // is scheduled before a job is canceled if it doesn't start successfully. + // A value in minutes after a backup is scheduled before a job will be canceled + // if it doesn't start successfully. This value is optional. StartWindowMinutes *int64 `type:"long"` // The name of a logical container where backups are stored. Backup vaults are @@ -10746,8 +11228,8 @@ func (s *Rule) SetTargetBackupVaultName(v string) *Rule { type RuleInput struct { _ struct{} `type:"structure"` - // The amount of time AWS Backup attempts a backup before canceling the job - // and returning an error. + // A value in minutes after a backup job is successfully started before it must + // be completed or it will be canceled by AWS Backup. This value is optional. CompletionWindowMinutes *int64 `type:"long"` // An array of CopyAction objects, which contains the details of the copy operation. @@ -10759,16 +11241,16 @@ type RuleInput struct { // // Backups transitioned to cold storage must be stored in cold storage for a // minimum of 90 days. Therefore, the “expire after days” setting must be - // 90 days greater than the “transition to cold after days”. The “transition - // to cold after days” setting cannot be changed after a backup has been transitioned - // to cold. + // 90 days greater than the “transition to cold after days” setting. The + // “transition to cold after days” setting cannot be changed after a backup + // has been transitioned to cold. Lifecycle *Lifecycle `type:"structure"` // To help organize your resources, you can assign your own metadata to the // resources that you create. Each tag is a key-value pair. RecoveryPointTags map[string]*string `type:"map" sensitive:"true"` - // >An optional display name for a backup rule. + // An optional display name for a backup rule. // // RuleName is a required field RuleName *string `type:"string" required:"true"` @@ -10776,7 +11258,8 @@ type RuleInput struct { // A CRON expression specifying when AWS Backup initiates a backup job. ScheduleExpression *string `type:"string"` - // The amount of time in minutes before beginning a backup. + // A value in minutes after a backup is scheduled before a job will be canceled + // if it doesn't start successfully. This value is optional. StartWindowMinutes *int64 `type:"long"` // The name of a logical container where backups are stored. Backup vaults are @@ -10876,14 +11359,14 @@ func (s *RuleInput) SetTargetBackupVaultName(v string) *RuleInput { type Selection struct { _ struct{} `type:"structure"` - // The ARN of the IAM role that AWS Backup uses to authenticate when restoring - // the target resource; for example, arn:aws:iam::123456789012:role/S3Access. + // The ARN of the IAM role that AWS Backup uses to authenticate when backing + // up the target resource; for example, arn:aws:iam::123456789012:role/S3Access. // // IamRoleArn is a required field IamRoleArn *string `type:"string" required:"true"` // An array of conditions used to specify a set of resources to assign to a - // backup plan; for example, "STRINGEQUALS": {"ec2:ResourceTag/Department": + // backup plan; for example, "StringEquals": {"ec2:ResourceTag/Department": // "accounting". ListOfTags []*Condition `type:"list"` @@ -10971,7 +11454,7 @@ type SelectionsListMember struct { CreationDate *time.Time `type:"timestamp"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // Specifies the IAM role Amazon Resource Name (ARN) to create the target recovery @@ -11033,8 +11516,8 @@ func (s *SelectionsListMember) SetSelectionName(v string) *SelectionsListMember // The request failed due to a temporary failure of the server. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -11057,17 +11540,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11075,27 +11558,34 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type StartBackupJobInput struct { _ struct{} `type:"structure"` + // Specifies the backup option for a selected resource. This option is only + // available for Windows VSS backup jobs. + // + // Valid value: "WindowsVSS”:“enabled". If enabled, creates a VSS Windows + // backup; otherwise, creates a regular backup. + BackupOptions map[string]*string `type:"map"` + // The name of a logical container where backups are stored. Backup vaults are // identified by names that are unique to the account used to create them and // the AWS Region where they are created. They consist of lowercase letters, @@ -11104,8 +11594,8 @@ type StartBackupJobInput struct { // BackupVaultName is a required field BackupVaultName *string `type:"string" required:"true"` - // The amount of time AWS Backup attempts a backup before canceling the job - // and returning an error. + // A value in minutes after a backup job is successfully started before it must + // be completed or it will be canceled by AWS Backup. This value is optional. CompleteWindowMinutes *int64 `type:"long"` // Specifies the IAM role ARN used to create the target recovery point; for @@ -11139,7 +11629,8 @@ type StartBackupJobInput struct { // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` - // The amount of time in minutes before beginning a backup. + // A value in minutes after a backup is scheduled before a job will be canceled + // if it doesn't start successfully. This value is optional. StartWindowMinutes *int64 `type:"long"` } @@ -11172,6 +11663,12 @@ func (s *StartBackupJobInput) Validate() error { return nil } +// SetBackupOptions sets the BackupOptions field's value. +func (s *StartBackupJobInput) SetBackupOptions(v map[string]*string) *StartBackupJobInput { + s.BackupOptions = v + return s +} + // SetBackupVaultName sets the BackupVaultName field's value. func (s *StartBackupJobInput) SetBackupVaultName(v string) *StartBackupJobInput { s.BackupVaultName = &v @@ -11302,7 +11799,7 @@ type StartCopyJobInput struct { // The name of a logical source container where backups are stored. Backup vaults // are identified by names that are unique to the account used to create them // and the AWS Region where they are created. They consist of lowercase letters, - // numbers, and hyphens. > + // numbers, and hyphens. // // SourceBackupVaultName is a required field SourceBackupVaultName *string `type:"string" required:"true"` @@ -11379,13 +11876,13 @@ func (s *StartCopyJobInput) SetSourceBackupVaultName(v string) *StartCopyJobInpu type StartCopyJobOutput struct { _ struct{} `type:"structure"` - // Uniquely identifies a request to AWS Backup to copy a resource. + // Uniquely identifies a copy job. CopyJobId *string `type:"string"` - // The date and time that a backup job is started, in Unix format and Coordinated + // The date and time that a copy job is started, in Unix format and Coordinated // Universal Time (UTC). The value of CreationDate is accurate to milliseconds. // For example, the value 1516925490.087 represents Friday, January 26, 2018 - // 12:11:30.087 AM. > + // 12:11:30.087 AM. CreationDate *time.Time `type:"timestamp"` } @@ -11427,17 +11924,17 @@ type StartRestoreJobInput struct { // A set of metadata key-value pairs. Contains information, such as a resource // name, required to restore a recovery point. // - // You can get configuration metadata about a resource at the time it was backed-up - // by calling GetRecoveryPointRestoreMetadata. However, values in addition to - // those provided by GetRecoveryPointRestoreMetadata might be required to restore - // a resource. For example, you might need to provide a new resource name if - // the original already exists. + // You can get configuration metadata about a resource at the time it was backed + // up by calling GetRecoveryPointRestoreMetadata. However, values in addition + // to those provided by GetRecoveryPointRestoreMetadata might be required to + // restore a resource. For example, you might need to provide a new resource + // name if the original already exists. // // You need to specify specific metadata to restore an Amazon Elastic File System // (Amazon EFS) instance: // - // * file-system-id: ID of the Amazon EFS file system that is backed up by - // AWS Backup. Returned in GetRecoveryPointRestoreMetadata. + // * file-system-id: The ID of the Amazon EFS file system that is backed + // up by AWS Backup. Returned in GetRecoveryPointRestoreMetadata. // // * Encrypted: A Boolean value that, if true, specifies that the file system // is encrypted. If KmsKeyId is specified, Encrypted must be set to true. @@ -11463,15 +11960,17 @@ type StartRestoreJobInput struct { // Starts a job to restore a recovery point for one of the following resources: // + // * DynamoDB for Amazon DynamoDB + // // * EBS for Amazon Elastic Block Store // - // * Storage Gateway for AWS Storage Gateway + // * EC2 for Amazon Elastic Compute Cloud // - // * RDS for Amazon Relational Database Service + // * EFS for Amazon Elastic File System // - // * DDB for Amazon DynamoDB + // * RDS for Amazon Relational Database Service // - // * EFS for Amazon Elastic File System + // * Storage Gateway for AWS Storage Gateway ResourceType *string `type:"string"` } @@ -11817,6 +12316,9 @@ func (s *UpdateBackupPlanInput) SetBackupPlanId(v string) *UpdateBackupPlanInput type UpdateBackupPlanOutput struct { _ struct{} `type:"structure"` + // Contains a list of BackupOptions for each resource type. + AdvancedBackupSettings []*AdvancedBackupSetting `type:"list"` + // An Amazon Resource Name (ARN) that uniquely identifies a backup plan; for // example, arn:aws:backup:us-east-1:123456789012:plan:8F81F553-3A74-4A3F-B93D-B3360DC80C50. BackupPlanArn *string `type:"string"` @@ -11845,6 +12347,12 @@ func (s UpdateBackupPlanOutput) GoString() string { return s.String() } +// SetAdvancedBackupSettings sets the AdvancedBackupSettings field's value. +func (s *UpdateBackupPlanOutput) SetAdvancedBackupSettings(v []*AdvancedBackupSetting) *UpdateBackupPlanOutput { + s.AdvancedBackupSettings = v + return s +} + // SetBackupPlanArn sets the BackupPlanArn field's value. func (s *UpdateBackupPlanOutput) SetBackupPlanArn(v string) *UpdateBackupPlanOutput { s.BackupPlanArn = &v @@ -12008,6 +12516,43 @@ func (s *UpdateRecoveryPointLifecycleOutput) SetRecoveryPointArn(v string) *Upda return s } +type UpdateRegionSettingsInput struct { + _ struct{} `type:"structure"` + + // Updates the list of services along with the opt-in preferences for the Region. + ResourceTypeOptInPreference map[string]*bool `type:"map"` +} + +// String returns the string representation +func (s UpdateRegionSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRegionSettingsInput) GoString() string { + return s.String() +} + +// SetResourceTypeOptInPreference sets the ResourceTypeOptInPreference field's value. +func (s *UpdateRegionSettingsInput) SetResourceTypeOptInPreference(v map[string]*bool) *UpdateRegionSettingsInput { + s.ResourceTypeOptInPreference = v + return s +} + +type UpdateRegionSettingsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRegionSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRegionSettingsOutput) GoString() string { + return s.String() +} + // Contains metadata about a backup vault. type VaultListMember struct { _ struct{} `type:"structure"` @@ -12029,7 +12574,7 @@ type VaultListMember struct { CreationDate *time.Time `type:"timestamp"` // A unique string that identifies the request and allows failed requests to - // be retried without the risk of executing the operation twice. + // be retried without the risk of running the operation twice. CreatorRequestId *string `type:"string"` // The server-side encryption key that is used to protect your backups; for @@ -12091,6 +12636,13 @@ const ( ConditionTypeStringequals = "STRINGEQUALS" ) +// ConditionType_Values returns all elements of the ConditionType enum +func ConditionType_Values() []string { + return []string{ + ConditionTypeStringequals, + } +} + const ( // CopyJobStateCreated is a CopyJobState enum value CopyJobStateCreated = "CREATED" @@ -12105,6 +12657,16 @@ const ( CopyJobStateFailed = "FAILED" ) +// CopyJobState_Values returns all elements of the CopyJobState enum +func CopyJobState_Values() []string { + return []string{ + CopyJobStateCreated, + CopyJobStateRunning, + CopyJobStateCompleted, + CopyJobStateFailed, + } +} + const ( // JobStateCreated is a JobState enum value JobStateCreated = "CREATED" @@ -12131,6 +12693,20 @@ const ( JobStateExpired = "EXPIRED" ) +// JobState_Values returns all elements of the JobState enum +func JobState_Values() []string { + return []string{ + JobStateCreated, + JobStatePending, + JobStateRunning, + JobStateAborting, + JobStateAborted, + JobStateCompleted, + JobStateFailed, + JobStateExpired, + } +} + const ( // RecoveryPointStatusCompleted is a RecoveryPointStatus enum value RecoveryPointStatusCompleted = "COMPLETED" @@ -12145,6 +12721,16 @@ const ( RecoveryPointStatusExpired = "EXPIRED" ) +// RecoveryPointStatus_Values returns all elements of the RecoveryPointStatus enum +func RecoveryPointStatus_Values() []string { + return []string{ + RecoveryPointStatusCompleted, + RecoveryPointStatusPartial, + RecoveryPointStatusDeleting, + RecoveryPointStatusExpired, + } +} + const ( // RestoreJobStatusPending is a RestoreJobStatus enum value RestoreJobStatusPending = "PENDING" @@ -12162,6 +12748,17 @@ const ( RestoreJobStatusFailed = "FAILED" ) +// RestoreJobStatus_Values returns all elements of the RestoreJobStatus enum +func RestoreJobStatus_Values() []string { + return []string{ + RestoreJobStatusPending, + RestoreJobStatusRunning, + RestoreJobStatusCompleted, + RestoreJobStatusAborted, + RestoreJobStatusFailed, + } +} + const ( // StorageClassWarm is a StorageClass enum value StorageClassWarm = "WARM" @@ -12173,6 +12770,15 @@ const ( StorageClassDeleted = "DELETED" ) +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassWarm, + StorageClassCold, + StorageClassDeleted, + } +} + const ( // VaultEventBackupJobStarted is a VaultEvent enum value VaultEventBackupJobStarted = "BACKUP_JOB_STARTED" @@ -12219,3 +12825,24 @@ const ( // VaultEventBackupPlanModified is a VaultEvent enum value VaultEventBackupPlanModified = "BACKUP_PLAN_MODIFIED" ) + +// VaultEvent_Values returns all elements of the VaultEvent enum +func VaultEvent_Values() []string { + return []string{ + VaultEventBackupJobStarted, + VaultEventBackupJobCompleted, + VaultEventBackupJobSuccessful, + VaultEventBackupJobFailed, + VaultEventBackupJobExpired, + VaultEventRestoreJobStarted, + VaultEventRestoreJobCompleted, + VaultEventRestoreJobSuccessful, + VaultEventRestoreJobFailed, + VaultEventCopyJobStarted, + VaultEventCopyJobSuccessful, + VaultEventCopyJobFailed, + VaultEventRecoveryPointModified, + VaultEventBackupPlanCreated, + VaultEventBackupPlanModified, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/backup/service.go b/vendor/github.com/aws/aws-sdk-go/service/backup/service.go index 1158d2b82..56d44bfd0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/backup/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/backup/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/batch/api.go b/vendor/github.com/aws/aws-sdk-go/service/batch/api.go index c2fba8ec0..c99583957 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/batch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/batch/api.go @@ -1249,6 +1249,92 @@ func (c *Batch) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/ListTagsForResource +func (c *Batch) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Batch. +// +// List the tags for an AWS Batch resource. AWS Batch resources that support +// tags are compute environments, jobs, job definitions, and job queues. ARNs +// for child jobs of array and multi-node parallel (MNP) jobs are not supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/ListTagsForResource +func (c *Batch) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterJobDefinition = "RegisterJobDefinition" // RegisterJobDefinitionRequest generates a "aws/request.Request" representing the @@ -1418,6 +1504,96 @@ func (c *Batch) SubmitJobWithContext(ctx aws.Context, input *SubmitJobInput, opt return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/TagResource +func (c *Batch) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Batch. +// +// Associates the specified tags to a resource with the specified resourceArn. +// If existing tags on a resource are not specified in the request parameters, +// they are not changed. When a resource is deleted, the tags associated with +// that resource are deleted as well. AWS Batch resources that support tags +// are compute environments, jobs, job definitions, and job queues. ARNs for +// child jobs of array and multi-node parallel (MNP) jobs are not supported. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/TagResource +func (c *Batch) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTerminateJob = "TerminateJob" // TerminateJobRequest generates a "aws/request.Request" representing the @@ -1505,6 +1681,91 @@ func (c *Batch) TerminateJobWithContext(ctx aws.Context, input *TerminateJobInpu return out, req.Send() } +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UntagResource +func (c *Batch) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Batch. +// +// Deletes specified tags from an AWS Batch resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UntagResource +func (c *Batch) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateComputeEnvironment = "UpdateComputeEnvironment" // UpdateComputeEnvironmentRequest generates a "aws/request.Request" representing the @@ -1857,18 +2118,17 @@ type AttemptDetail struct { // Details about the container in this job attempt. Container *AttemptContainerDetail `locationName:"container" type:"structure"` - // The Unix timestamp (in seconds and milliseconds) for when the attempt was - // started (when the attempt transitioned from the STARTING state to the RUNNING - // state). + // The Unix timestamp (in milliseconds) for when the attempt was started (when + // the attempt transitioned from the STARTING state to the RUNNING state). StartedAt *int64 `locationName:"startedAt" type:"long"` // A short, human-readable string to provide additional details about the current // status of the job attempt. StatusReason *string `locationName:"statusReason" type:"string"` - // The Unix timestamp (in seconds and milliseconds) for when the attempt was - // stopped (when the attempt transitioned from the RUNNING state to a terminal - // state, such as SUCCEEDED or FAILED). + // The Unix timestamp (in milliseconds) for when the attempt was stopped (when + // the attempt transitioned from the RUNNING state to a terminal state, such + // as SUCCEEDED or FAILED). StoppedAt *int64 `locationName:"stoppedAt" type:"long"` } @@ -1978,8 +2238,8 @@ func (s CancelJobOutput) GoString() string { // or resource on behalf of a user that doesn't have permissions to use the // action or resource, or specifying an identifier that is not valid. type ClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -1996,17 +2256,17 @@ func (s ClientException) GoString() string { func newErrorClientException(v protocol.ResponseMetadata) error { return &ClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientException) Code() string { +func (s *ClientException) Code() string { return "ClientException" } // Message returns the exception's message. -func (s ClientException) Message() string { +func (s *ClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2014,22 +2274,22 @@ func (s ClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientException) OrigErr() error { +func (s *ClientException) OrigErr() error { return nil } -func (s ClientException) Error() string { +func (s *ClientException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an AWS Batch compute environment. @@ -2080,6 +2340,9 @@ type ComputeEnvironmentDetail struct { // status of the compute environment. StatusReason *string `locationName:"statusReason" type:"string"` + // The tags applied to the compute environment. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The type of the compute environment. Type *string `locationName:"type" type:"string" enum:"CEType"` } @@ -2142,6 +2405,12 @@ func (s *ComputeEnvironmentDetail) SetStatusReason(v string) *ComputeEnvironment return s } +// SetTags sets the Tags field's value. +func (s *ComputeEnvironmentDetail) SetTags(v map[string]*string) *ComputeEnvironmentDetail { + s.Tags = v + return s +} + // SetType sets the Type field's value. func (s *ComputeEnvironmentDetail) SetType(v string) *ComputeEnvironmentDetail { s.Type = &v @@ -2316,7 +2585,11 @@ type ComputeResource struct { // Key-value pair tags to be applied to resources that are launched in the compute // environment. For AWS Batch, these take the form of "String1": "String2", // where String1 is the tag key and String2 is the tag value—for example, - // { "Name": "AWS Batch Instance - C4OnDemand" }. + // { "Name": "AWS Batch Instance - C4OnDemand" }. These tags can not be updated + // or removed after the compute environment has been created; any changes require + // creating a new compute environment and removing the old compute environment. + // These tags are not seen when using the AWS Batch ListTagsForResource API + // operation. Tags map[string]*string `locationName:"tags" type:"map"` // The type of compute environment: EC2 or SPOT. @@ -2519,6 +2792,10 @@ type ContainerDetail struct { // is reserved for variables that are set by the AWS Batch service. Environment []*KeyValuePair `locationName:"environment" type:"list"` + // The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. + // For more information, see AWS Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html). + ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` + // The exit code to return upon completion. ExitCode *int64 `locationName:"exitCode" type:"integer"` @@ -2536,12 +2813,43 @@ type ContainerDetail struct { // for device mappings. LinuxParameters *LinuxParameters `locationName:"linuxParameters" type:"structure"` + // The log configuration specification for the container. + // + // This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) + // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). + // By default, containers use the same logging driver that the Docker daemon + // uses. However the container may use a different logging driver than the Docker + // daemon by specifying a log driver with this parameter in the container definition. + // To use a different logging driver for a container, the log system must be + // configured properly on the container instance (or on a different log server + // for remote logging options). For more information on the options for different + // supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + // in the Docker documentation. + // + // AWS Batch currently supports a subset of the logging drivers available to + // the Docker daemon (shown in the LogConfiguration data type). Additional log + // drivers may be available in future releases of the Amazon ECS container agent. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS + // environment variable before containers placed on that instance can use these + // log configuration options. For more information, see Amazon ECS Container + // Agent Configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + // in the Amazon Elastic Container Service Developer Guide. + LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` + // The name of the CloudWatch Logs log stream associated with the container. // The log group for AWS Batch jobs is /aws/batch/job. Each container attempt // receives a log stream name when they reach the RUNNING status. LogStreamName *string `locationName:"logStreamName" type:"string"` - // The number of MiB of memory reserved for the job. + // The number of MiB of memory reserved for the job. This is a required parameter. Memory *int64 `locationName:"memory" type:"integer"` // The mount points for data volumes in your container. @@ -2566,6 +2874,11 @@ type ContainerDetail struct { // only supported resource is GPU. ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` + // The secrets to pass to the container. For more information, see Specifying + // Sensitive Data (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) + // in the Amazon Elastic Container Service Developer Guide. + Secrets []*Secret `locationName:"secrets" type:"list"` + // The Amazon Resource Name (ARN) of the Amazon ECS task that is associated // with the container job. Each container attempt receives a task ARN when they // reach the STARTING status. @@ -2577,7 +2890,7 @@ type ContainerDetail struct { // The user name to use inside the container. User *string `locationName:"user" type:"string"` - // The number of VCPUs allocated for the job. + // The number of VCPUs allocated for the job. This is a required parameter. Vcpus *int64 `locationName:"vcpus" type:"integer"` // A list of volumes associated with the job. @@ -2612,6 +2925,12 @@ func (s *ContainerDetail) SetEnvironment(v []*KeyValuePair) *ContainerDetail { return s } +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *ContainerDetail) SetExecutionRoleArn(v string) *ContainerDetail { + s.ExecutionRoleArn = &v + return s +} + // SetExitCode sets the ExitCode field's value. func (s *ContainerDetail) SetExitCode(v int64) *ContainerDetail { s.ExitCode = &v @@ -2642,6 +2961,12 @@ func (s *ContainerDetail) SetLinuxParameters(v *LinuxParameters) *ContainerDetai return s } +// SetLogConfiguration sets the LogConfiguration field's value. +func (s *ContainerDetail) SetLogConfiguration(v *LogConfiguration) *ContainerDetail { + s.LogConfiguration = v + return s +} + // SetLogStreamName sets the LogStreamName field's value. func (s *ContainerDetail) SetLogStreamName(v string) *ContainerDetail { s.LogStreamName = &v @@ -2690,6 +3015,12 @@ func (s *ContainerDetail) SetResourceRequirements(v []*ResourceRequirement) *Con return s } +// SetSecrets sets the Secrets field's value. +func (s *ContainerDetail) SetSecrets(v []*Secret) *ContainerDetail { + s.Secrets = v + return s +} + // SetTaskArn sets the TaskArn field's value. func (s *ContainerDetail) SetTaskArn(v string) *ContainerDetail { s.TaskArn = &v @@ -2845,6 +3176,10 @@ type ContainerProperties struct { // is reserved for variables that are set by the AWS Batch service. Environment []*KeyValuePair `locationName:"environment" type:"list"` + // The Amazon Resource Name (ARN) of the execution role that AWS Batch can assume. + // For more information, see AWS Batch execution IAM role (https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html). + ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` + // The image used to start a container. This string is passed directly to the // Docker daemon. Images in the Docker Hub registry are available by default. // Other repositories are specified with repository-url/image:tag . Up to 255 @@ -2880,12 +3215,44 @@ type ContainerProperties struct { // for device mappings. LinuxParameters *LinuxParameters `locationName:"linuxParameters" type:"structure"` + // The log configuration specification for the container. + // + // This parameter maps to LogConfig in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) + // and the --log-driver option to docker run (https://docs.docker.com/engine/reference/run/). + // By default, containers use the same logging driver that the Docker daemon + // uses. However the container may use a different logging driver than the Docker + // daemon by specifying a log driver with this parameter in the container definition. + // To use a different logging driver for a container, the log system must be + // configured properly on the container instance (or on a different log server + // for remote logging options). For more information on the options for different + // supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + // in the Docker documentation. + // + // AWS Batch currently supports a subset of the logging drivers available to + // the Docker daemon (shown in the LogConfiguration data type). + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + // + // The Amazon ECS container agent running on a container instance must register + // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS + // environment variable before containers placed on that instance can use these + // log configuration options. For more information, see Amazon ECS Container + // Agent Configuration (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) + // in the Amazon Elastic Container Service Developer Guide. + LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` + // The hard limit (in MiB) of memory to present to the container. If your container // attempts to exceed the memory specified here, the container is killed. This // parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). - // You must specify at least 4 MiB of memory for a job. + // You must specify at least 4 MiB of memory for a job. This is required but + // can be specified in several places for multi-node parallel (MNP) jobs; it + // must be specified for each node at least once. // // If you are trying to maximize your resource utilization by providing your // jobs as much memory as possible for a particular instance type, see Memory @@ -2917,6 +3284,11 @@ type ContainerProperties struct { // only supported resource is GPU. ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` + // The secrets for the container. For more information, see Specifying Sensitive + // Data (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) + // in the Amazon Elastic Container Service Developer Guide. + Secrets []*Secret `locationName:"secrets" type:"list"` + // A list of ulimits to set in the container. This parameter maps to Ulimits // in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) @@ -2934,7 +3306,8 @@ type ContainerProperties struct { // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). // Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one - // vCPU. + // vCPU. This is required but can be specified in several places for multi-node + // parallel (MNP) jobs; it must be specified for each node at least once. Vcpus *int64 `locationName:"vcpus" type:"integer"` // A list of data volumes used in a job. @@ -2959,6 +3332,11 @@ func (s *ContainerProperties) Validate() error { invalidParams.AddNested("LinuxParameters", err.(request.ErrInvalidParams)) } } + if s.LogConfiguration != nil { + if err := s.LogConfiguration.Validate(); err != nil { + invalidParams.AddNested("LogConfiguration", err.(request.ErrInvalidParams)) + } + } if s.ResourceRequirements != nil { for i, v := range s.ResourceRequirements { if v == nil { @@ -2969,6 +3347,16 @@ func (s *ContainerProperties) Validate() error { } } } + if s.Secrets != nil { + for i, v := range s.Secrets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Secrets", i), err.(request.ErrInvalidParams)) + } + } + } if s.Ulimits != nil { for i, v := range s.Ulimits { if v == nil { @@ -2998,6 +3386,12 @@ func (s *ContainerProperties) SetEnvironment(v []*KeyValuePair) *ContainerProper return s } +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *ContainerProperties) SetExecutionRoleArn(v string) *ContainerProperties { + s.ExecutionRoleArn = &v + return s +} + // SetImage sets the Image field's value. func (s *ContainerProperties) SetImage(v string) *ContainerProperties { s.Image = &v @@ -3022,6 +3416,12 @@ func (s *ContainerProperties) SetLinuxParameters(v *LinuxParameters) *ContainerP return s } +// SetLogConfiguration sets the LogConfiguration field's value. +func (s *ContainerProperties) SetLogConfiguration(v *LogConfiguration) *ContainerProperties { + s.LogConfiguration = v + return s +} + // SetMemory sets the Memory field's value. func (s *ContainerProperties) SetMemory(v int64) *ContainerProperties { s.Memory = &v @@ -3052,6 +3452,12 @@ func (s *ContainerProperties) SetResourceRequirements(v []*ResourceRequirement) return s } +// SetSecrets sets the Secrets field's value. +func (s *ContainerProperties) SetSecrets(v []*Secret) *ContainerProperties { + s.Secrets = v + return s +} + // SetUlimits sets the Ulimits field's value. func (s *ContainerProperties) SetUlimits(v []*Ulimit) *ContainerProperties { s.Ulimits = v @@ -3146,6 +3552,16 @@ type CreateComputeEnvironmentInput struct { // on queues. State *string `locationName:"state" type:"string" enum:"CEState"` + // The tags that you apply to the compute environment to help you categorize + // and organize your resources. Each tag consists of a key and an optional value. + // For more information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in AWS General Reference. + // + // These tags can be updated or removed using the TagResource (https://docs.aws.amazon.com/batch/latest/APIReference/API_TagResource.html) + // and UntagResource (https://docs.aws.amazon.com/batch/latest/APIReference/API_UntagResource.html) + // API operations. These tags do not propagate to the underlying compute resources. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The type of the compute environment. For more information, see Compute Environments // (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the AWS Batch User Guide. @@ -3173,6 +3589,9 @@ func (s *CreateComputeEnvironmentInput) Validate() error { if s.ServiceRole == nil { invalidParams.Add(request.NewErrParamRequired("ServiceRole")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -3212,6 +3631,12 @@ func (s *CreateComputeEnvironmentInput) SetState(v string) *CreateComputeEnviron return s } +// SetTags sets the Tags field's value. +func (s *CreateComputeEnvironmentInput) SetTags(v map[string]*string) *CreateComputeEnvironmentInput { + s.Tags = v + return s +} + // SetType sets the Type field's value. func (s *CreateComputeEnvironmentInput) SetType(v string) *CreateComputeEnvironmentInput { s.Type = &v @@ -3277,8 +3702,15 @@ type CreateJobQueueInput struct { Priority *int64 `locationName:"priority" type:"integer" required:"true"` // The state of the job queue. If the job queue state is ENABLED, it is able - // to accept jobs. + // to accept jobs. If the job queue state is DISABLED, new jobs cannot be added + // to the queue, but jobs already in the queue can finish. State *string `locationName:"state" type:"string" enum:"JQState"` + + // The tags that you apply to the job queue to help you categorize and organize + // your resources. Each tag consists of a key and an optional value. For more + // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in AWS General Reference. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -3303,6 +3735,9 @@ func (s *CreateJobQueueInput) Validate() error { if s.Priority == nil { invalidParams.Add(request.NewErrParamRequired("Priority")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.ComputeEnvironmentOrder != nil { for i, v := range s.ComputeEnvironmentOrder { if v == nil { @@ -3344,6 +3779,12 @@ func (s *CreateJobQueueInput) SetState(v string) *CreateJobQueueInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateJobQueueInput) SetTags(v map[string]*string) *CreateJobQueueInput { + s.Tags = v + return s +} + type CreateJobQueueOutput struct { _ struct{} `type:"structure"` @@ -4019,6 +4460,9 @@ type JobDefinition struct { // The status of the job definition. Status *string `locationName:"status" type:"string"` + // The tags applied to the job definition. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The timeout configuration for jobs that are submitted with this job definition. // You can specify a timeout duration after which AWS Batch terminates your // jobs if they have not finished. @@ -4088,6 +4532,12 @@ func (s *JobDefinition) SetStatus(v string) *JobDefinition { return s } +// SetTags sets the Tags field's value. +func (s *JobDefinition) SetTags(v map[string]*string) *JobDefinition { + s.Tags = v + return s +} + // SetTimeout sets the Timeout field's value. func (s *JobDefinition) SetTimeout(v *JobTimeout) *JobDefinition { s.Timeout = v @@ -4147,16 +4597,18 @@ type JobDetail struct { // the job. Container *ContainerDetail `locationName:"container" type:"structure"` - // The Unix timestamp (in seconds and milliseconds) for when the job was created. - // For non-array jobs and parent array jobs, this is when the job entered the - // SUBMITTED state (at the time SubmitJob was called). For array child jobs, - // this is when the child job was spawned by its parent and entered the PENDING - // state. + // The Unix timestamp (in milliseconds) for when the job was created. For non-array + // jobs and parent array jobs, this is when the job entered the SUBMITTED state + // (at the time SubmitJob was called). For array child jobs, this is when the + // child job was spawned by its parent and entered the PENDING state. CreatedAt *int64 `locationName:"createdAt" type:"long"` // A list of job IDs on which this job depends. DependsOn []*JobDependency `locationName:"dependsOn" type:"list"` + // The Amazon Resource Name (ARN) of the job. + JobArn *string `locationName:"jobArn" type:"string"` + // The job definition that is used by this job. // // JobDefinition is a required field @@ -4192,8 +4644,9 @@ type JobDetail struct { // The retry strategy to use for this job if an attempt fails. RetryStrategy *RetryStrategy `locationName:"retryStrategy" type:"structure"` - // The Unix timestamp (in seconds and milliseconds) for when the job was started - // (when the job transitioned from the STARTING state to the RUNNING state). + // The Unix timestamp (in milliseconds) for when the job was started (when the + // job transitioned from the STARTING state to the RUNNING state). This parameter + // is not provided for child jobs of array jobs or multi-node parallel jobs. // // StartedAt is a required field StartedAt *int64 `locationName:"startedAt" type:"long" required:"true"` @@ -4211,11 +4664,14 @@ type JobDetail struct { // status of the job. StatusReason *string `locationName:"statusReason" type:"string"` - // The Unix timestamp (in seconds and milliseconds) for when the job was stopped - // (when the job transitioned from the RUNNING state to a terminal state, such - // as SUCCEEDED or FAILED). + // The Unix timestamp (in milliseconds) for when the job was stopped (when the + // job transitioned from the RUNNING state to a terminal state, such as SUCCEEDED + // or FAILED). StoppedAt *int64 `locationName:"stoppedAt" type:"long"` + // The tags applied to the job. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The timeout configuration for the job. Timeout *JobTimeout `locationName:"timeout" type:"structure"` } @@ -4260,6 +4716,12 @@ func (s *JobDetail) SetDependsOn(v []*JobDependency) *JobDetail { return s } +// SetJobArn sets the JobArn field's value. +func (s *JobDetail) SetJobArn(v string) *JobDetail { + s.JobArn = &v + return s +} + // SetJobDefinition sets the JobDefinition field's value. func (s *JobDetail) SetJobDefinition(v string) *JobDetail { s.JobDefinition = &v @@ -4332,6 +4794,12 @@ func (s *JobDetail) SetStoppedAt(v int64) *JobDetail { return s } +// SetTags sets the Tags field's value. +func (s *JobDetail) SetTags(v map[string]*string) *JobDetail { + s.Tags = v + return s +} + // SetTimeout sets the Timeout field's value. func (s *JobDetail) SetTimeout(v *JobTimeout) *JobDetail { s.Timeout = v @@ -4364,7 +4832,10 @@ type JobQueueDetail struct { // Priority is a required field Priority *int64 `locationName:"priority" type:"integer" required:"true"` - // Describes the ability of the queue to accept new jobs. + // Describes the ability of the queue to accept new jobs. If the job queue state + // is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, + // new jobs cannot be added to the queue, but jobs already in the queue can + // finish. // // State is a required field State *string `locationName:"state" type:"string" required:"true" enum:"JQState"` @@ -4375,6 +4846,9 @@ type JobQueueDetail struct { // A short, human-readable string to provide additional details about the current // status of the job queue. StatusReason *string `locationName:"statusReason" type:"string"` + + // The tags applied to the job queue. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } // String returns the string representation @@ -4429,6 +4903,12 @@ func (s *JobQueueDetail) SetStatusReason(v string) *JobQueueDetail { return s } +// SetTags sets the Tags field's value. +func (s *JobQueueDetail) SetTags(v map[string]*string) *JobQueueDetail { + s.Tags = v + return s +} + // An object representing summary details of a job. type JobSummary struct { _ struct{} `type:"structure"` @@ -4446,6 +4926,9 @@ type JobSummary struct { // spawned by its parent and entered the PENDING state. CreatedAt *int64 `locationName:"createdAt" type:"long"` + // The Amazon Resource Name (ARN) of the job. + JobArn *string `locationName:"jobArn" type:"string"` + // The ID of the job. // // JobId is a required field @@ -4503,6 +4986,12 @@ func (s *JobSummary) SetCreatedAt(v int64) *JobSummary { return s } +// SetJobArn sets the JobArn field's value. +func (s *JobSummary) SetJobArn(v string) *JobSummary { + s.JobArn = &v + return s +} + // SetJobId sets the JobId field's value. func (s *JobSummary) SetJobId(v string) *JobSummary { s.JobId = &v @@ -4617,9 +5106,12 @@ type LaunchTemplateSpecification struct { // The name of the launch template. LaunchTemplateName *string `locationName:"launchTemplateName" type:"string"` - // The version number of the launch template. + // The version number of the launch template, $Latest, or $Default. + // + // If the value is $Latest, the latest version of the launch template is used. + // If the value is $Default, the default version of the launch template is used. // - // Default: The default version of the launch template. + // Default: $Default. Version *string `locationName:"version" type:"string"` } @@ -4661,6 +5153,43 @@ type LinuxParameters struct { // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --device option to docker run (https://docs.docker.com/engine/reference/run/). Devices []*Device `locationName:"devices" type:"list"` + + // If true, run an init process inside the container that forwards signals and + // reaps processes. This parameter maps to the --init option to docker run (https://docs.docker.com/engine/reference/run/). + // This parameter requires version 1.25 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + InitProcessEnabled *bool `locationName:"initProcessEnabled" type:"boolean"` + + // The total amount of swap memory (in MiB) a container can use. This parameter + // will be translated to the --memory-swap option to docker run (https://docs.docker.com/engine/reference/run/) + // where the value would be the sum of the container memory plus the maxSwap + // value. + // + // If a maxSwap value of 0 is specified, the container will not use swap. Accepted + // values are 0 or any positive integer. If the maxSwap parameter is omitted, + // the container will use the swap configuration for the container instance + // it is running on. A maxSwap value must be set for the swappiness parameter + // to be used. + MaxSwap *int64 `locationName:"maxSwap" type:"integer"` + + // The value for the size (in MiB) of the /dev/shm volume. This parameter maps + // to the --shm-size option to docker run (https://docs.docker.com/engine/reference/run/). + SharedMemorySize *int64 `locationName:"sharedMemorySize" type:"integer"` + + // This allows you to tune a container's memory swappiness behavior. A swappiness + // value of 0 will cause swapping to not happen unless absolutely necessary. + // A swappiness value of 100 will cause pages to be swapped very aggressively. + // Accepted values are whole numbers between 0 and 100. If the swappiness parameter + // is not specified, a default value of 60 is used. If a value is not specified + // for maxSwap then this parameter is ignored. This parameter maps to the --memory-swappiness + // option to docker run (https://docs.docker.com/engine/reference/run/). + Swappiness *int64 `locationName:"swappiness" type:"integer"` + + // The container path, mount options, and size (in MiB) of the tmpfs mount. + // This parameter maps to the --tmpfs option to docker run (https://docs.docker.com/engine/reference/run/). + Tmpfs []*Tmpfs `locationName:"tmpfs" type:"list"` } // String returns the string representation @@ -4686,6 +5215,16 @@ func (s *LinuxParameters) Validate() error { } } } + if s.Tmpfs != nil { + for i, v := range s.Tmpfs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tmpfs", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4699,6 +5238,36 @@ func (s *LinuxParameters) SetDevices(v []*Device) *LinuxParameters { return s } +// SetInitProcessEnabled sets the InitProcessEnabled field's value. +func (s *LinuxParameters) SetInitProcessEnabled(v bool) *LinuxParameters { + s.InitProcessEnabled = &v + return s +} + +// SetMaxSwap sets the MaxSwap field's value. +func (s *LinuxParameters) SetMaxSwap(v int64) *LinuxParameters { + s.MaxSwap = &v + return s +} + +// SetSharedMemorySize sets the SharedMemorySize field's value. +func (s *LinuxParameters) SetSharedMemorySize(v int64) *LinuxParameters { + s.SharedMemorySize = &v + return s +} + +// SetSwappiness sets the Swappiness field's value. +func (s *LinuxParameters) SetSwappiness(v int64) *LinuxParameters { + s.Swappiness = &v + return s +} + +// SetTmpfs sets the Tmpfs field's value. +func (s *LinuxParameters) SetTmpfs(v []*Tmpfs) *LinuxParameters { + s.Tmpfs = v + return s +} + type ListJobsInput struct { _ struct{} `type:"structure"` @@ -4821,6 +5390,206 @@ func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. AWS Batch resources that support tags are compute environments, + // jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node + // parallel (MNP) jobs are not supported. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags for the resource. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +// Log configuration options to send to a custom log driver for the container. +type LogConfiguration struct { + _ struct{} `type:"structure"` + + // The log driver to use for the container. The valid values listed for this + // parameter are log drivers that the Amazon ECS container agent can communicate + // with by default. + // + // The supported log drivers are awslogs, fluentd, gelf, json-file, journald, + // logentries, syslog, and splunk. + // + // awslogs + // + // Specifies the Amazon CloudWatch Logs logging driver. For more information, + // see Using the awslogs Log Driver (https://docs.aws.amazon.com/batch/latest/userguide/using_awslogs.html) + // in the AWS Batch User Guide and Amazon CloudWatch Logs logging driver (https://docs.docker.com/config/containers/logging/awslogs/) + // in the Docker documentation. + // + // fluentd + // + // Specifies the Fluentd logging driver. For more information, including usage + // and options, see Fluentd logging driver (https://docs.docker.com/config/containers/logging/fluentd/) + // in the Docker documentation. + // + // gelf + // + // Specifies the Graylog Extended Format (GELF) logging driver. For more information, + // including usage and options, see Graylog Extended Format logging driver (https://docs.docker.com/config/containers/logging/gelf/) + // in the Docker documentation. + // + // journald + // + // Specifies the journald logging driver. For more information, including usage + // and options, see Journald logging driver (https://docs.docker.com/config/containers/logging/journald/) + // in the Docker documentation. + // + // json-file + // + // Specifies the JSON file logging driver. For more information, including usage + // and options, see JSON File logging driver (https://docs.docker.com/config/containers/logging/json-file/) + // in the Docker documentation. + // + // splunk + // + // Specifies the Splunk logging driver. For more information, including usage + // and options, see Splunk logging driver (https://docs.docker.com/config/containers/logging/splunk/) + // in the Docker documentation. + // + // syslog + // + // Specifies the syslog logging driver. For more information, including usage + // and options, see Syslog logging driver (https://docs.docker.com/config/containers/logging/syslog/) + // in the Docker documentation. + // + // If you have a custom driver that is not listed earlier that you would like + // to work with the Amazon ECS container agent, you can fork the Amazon ECS + // container agent project that is available on GitHub (https://github.com/aws/amazon-ecs-agent) + // and customize it to work with that driver. We encourage you to submit pull + // requests for changes that you would like to have included. However, Amazon + // Web Services does not currently support running modified copies of this software. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" + // + // LogDriver is a required field + LogDriver *string `locationName:"logDriver" type:"string" required:"true" enum:"LogDriver"` + + // The configuration options to send to the log driver. This parameter requires + // version 1.19 of the Docker Remote API or greater on your container instance. + // To check the Docker Remote API version on your container instance, log into + // your container instance and run the following command: sudo docker version + // | grep "Server API version" + Options map[string]*string `locationName:"options" type:"map"` + + // The secrets to pass to the log configuration. For more information, see Specifying + // Sensitive Data (https://docs.aws.amazon.com/batch/latest/userguide/specifying-sensitive-data.html) + // in the AWS Batch User Guide. + SecretOptions []*Secret `locationName:"secretOptions" type:"list"` +} + +// String returns the string representation +func (s LogConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogConfiguration"} + if s.LogDriver == nil { + invalidParams.Add(request.NewErrParamRequired("LogDriver")) + } + if s.SecretOptions != nil { + for i, v := range s.SecretOptions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecretOptions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogDriver sets the LogDriver field's value. +func (s *LogConfiguration) SetLogDriver(v string) *LogConfiguration { + s.LogDriver = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *LogConfiguration) SetOptions(v map[string]*string) *LogConfiguration { + s.Options = v + return s +} + +// SetSecretOptions sets the SecretOptions field's value. +func (s *LogConfiguration) SetSecretOptions(v []*Secret) *LogConfiguration { + s.SecretOptions = v + return s +} + // Details on a Docker volume mount point that is used in a job's container // properties. This parameter maps to Volumes in the Create a container (https://docs.docker.com/engine/reference/api/docker_remote_api_v1.19/#create-a-container) // section of the Docker Remote API and the --volume option to docker run. @@ -5285,6 +6054,12 @@ type RegisterJobDefinitionInput struct { // a timeout, it is not retried. RetryStrategy *RetryStrategy `locationName:"retryStrategy" type:"structure"` + // The tags that you apply to the job definition to help you categorize and + // organize your resources. Each tag consists of a key and an optional value. + // For more information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in AWS General Reference. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The timeout configuration for jobs that are submitted with this job definition, // after which AWS Batch terminates your jobs if they have not finished. If // a job is terminated due to a timeout, it is not retried. The minimum value @@ -5316,6 +6091,9 @@ func (s *RegisterJobDefinitionInput) Validate() error { if s.JobDefinitionName == nil { invalidParams.Add(request.NewErrParamRequired("JobDefinitionName")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -5366,6 +6144,12 @@ func (s *RegisterJobDefinitionInput) SetRetryStrategy(v *RetryStrategy) *Registe return s } +// SetTags sets the Tags field's value. +func (s *RegisterJobDefinitionInput) SetTags(v map[string]*string) *RegisterJobDefinitionInput { + s.Tags = v + return s +} + // SetTimeout sets the Timeout field's value. func (s *RegisterJobDefinitionInput) SetTimeout(v *JobTimeout) *RegisterJobDefinitionInput { s.Timeout = v @@ -5508,10 +6292,80 @@ func (s *RetryStrategy) SetAttempts(v int64) *RetryStrategy { return s } +// An object representing the secret to expose to your container. Secrets can +// be exposed to a container in the following ways: +// +// * To inject sensitive data into your containers as environment variables, +// use the secrets container definition parameter. +// +// * To reference sensitive information in the log configuration of a container, +// use the secretOptions container definition parameter. +// +// For more information, see Specifying Sensitive Data (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data.html) +// in the Amazon Elastic Container Service Developer Guide. +type Secret struct { + _ struct{} `type:"structure"` + + // The name of the secret. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The secret to expose to the container. The supported values are either the + // full ARN of the AWS Secrets Manager secret or the full ARN of the parameter + // in the AWS Systems Manager Parameter Store. + // + // If the AWS Systems Manager Parameter Store parameter exists in the same Region + // as the task you are launching, then you can use either the full ARN or name + // of the parameter. If the parameter exists in a different Region, then the + // full ARN must be specified. + // + // ValueFrom is a required field + ValueFrom *string `locationName:"valueFrom" type:"string" required:"true"` +} + +// String returns the string representation +func (s Secret) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Secret) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Secret) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Secret"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ValueFrom == nil { + invalidParams.Add(request.NewErrParamRequired("ValueFrom")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *Secret) SetName(v string) *Secret { + s.Name = &v + return s +} + +// SetValueFrom sets the ValueFrom field's value. +func (s *Secret) SetValueFrom(v string) *Secret { + s.ValueFrom = &v + return s +} + // These errors are usually caused by a server issue. type ServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5528,17 +6382,17 @@ func (s ServerException) GoString() string { func newErrorServerException(v protocol.ResponseMetadata) error { return &ServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerException) Code() string { +func (s *ServerException) Code() string { return "ServerException" } // Message returns the exception's message. -func (s ServerException) Message() string { +func (s *ServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5546,22 +6400,22 @@ func (s ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerException) OrigErr() error { +func (s *ServerException) OrigErr() error { return nil } -func (s ServerException) Error() string { +func (s *ServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID } type SubmitJobInput struct { @@ -5627,6 +6481,12 @@ type SubmitJobInput struct { // defined in the job definition. RetryStrategy *RetryStrategy `locationName:"retryStrategy" type:"structure"` + // The tags that you apply to the job request to help you categorize and organize + // your resources. Each tag consists of a key and an optional value. For more + // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in AWS General Reference. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + // The timeout configuration for this SubmitJob operation. You can specify a // timeout duration after which AWS Batch terminates your jobs if they have // not finished. If a job is terminated due to a timeout, it is not retried. @@ -5660,6 +6520,9 @@ func (s *SubmitJobInput) Validate() error { if s.JobQueue == nil { invalidParams.Add(request.NewErrParamRequired("JobQueue")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.ContainerOverrides != nil { if err := s.ContainerOverrides.Validate(); err != nil { invalidParams.AddNested("ContainerOverrides", err.(request.ErrInvalidParams)) @@ -5731,6 +6594,12 @@ func (s *SubmitJobInput) SetRetryStrategy(v *RetryStrategy) *SubmitJobInput { return s } +// SetTags sets the Tags field's value. +func (s *SubmitJobInput) SetTags(v map[string]*string) *SubmitJobInput { + s.Tags = v + return s +} + // SetTimeout sets the Timeout field's value. func (s *SubmitJobInput) SetTimeout(v *JobTimeout) *SubmitJobInput { s.Timeout = v @@ -5740,6 +6609,9 @@ func (s *SubmitJobInput) SetTimeout(v *JobTimeout) *SubmitJobInput { type SubmitJobOutput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) for the job. + JobArn *string `locationName:"jobArn" type:"string"` + // The unique identifier for the job. // // JobId is a required field @@ -5761,6 +6633,12 @@ func (s SubmitJobOutput) GoString() string { return s.String() } +// SetJobArn sets the JobArn field's value. +func (s *SubmitJobOutput) SetJobArn(v string) *SubmitJobOutput { + s.JobArn = &v + return s +} + // SetJobId sets the JobId field's value. func (s *SubmitJobOutput) SetJobId(v string) *SubmitJobOutput { s.JobId = &v @@ -5773,6 +6651,84 @@ func (s *SubmitJobOutput) SetJobName(v string) *SubmitJobOutput { return s } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which to add tags. AWS + // Batch resources that support tags are compute environments, jobs, job definitions, + // and job queues. ARNs for child jobs of array and multi-node parallel (MNP) + // jobs are not supported. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The tags that you apply to the resource to help you categorize and organize + // your resources. Each tag consists of a key and an optional value. For more + // information, see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in AWS General Reference. + // + // Tags is a required field + Tags map[string]*string `locationName:"tags" min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + type TerminateJobInput struct { _ struct{} `type:"structure"` @@ -5841,6 +6797,75 @@ func (s TerminateJobOutput) GoString() string { return s.String() } +// The container path, mount options, and size of the tmpfs mount. +type Tmpfs struct { + _ struct{} `type:"structure"` + + // The absolute file path in the container where the tmpfs volume is to be mounted. + // + // ContainerPath is a required field + ContainerPath *string `locationName:"containerPath" type:"string" required:"true"` + + // The list of tmpfs volume mount options. + // + // Valid values: "defaults" | "ro" | "rw" | "suid" | "nosuid" | "dev" | "nodev" + // | "exec" | "noexec" | "sync" | "async" | "dirsync" | "remount" | "mand" | + // "nomand" | "atime" | "noatime" | "diratime" | "nodiratime" | "bind" | "rbind" + // | "unbindable" | "runbindable" | "private" | "rprivate" | "shared" | "rshared" + // | "slave" | "rslave" | "relatime" | "norelatime" | "strictatime" | "nostrictatime" + // | "mode" | "uid" | "gid" | "nr_inodes" | "nr_blocks" | "mpol" + MountOptions []*string `locationName:"mountOptions" type:"list"` + + // The size (in MiB) of the tmpfs volume. + // + // Size is a required field + Size *int64 `locationName:"size" type:"integer" required:"true"` +} + +// String returns the string representation +func (s Tmpfs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tmpfs) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tmpfs) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tmpfs"} + if s.ContainerPath == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerPath")) + } + if s.Size == nil { + invalidParams.Add(request.NewErrParamRequired("Size")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerPath sets the ContainerPath field's value. +func (s *Tmpfs) SetContainerPath(v string) *Tmpfs { + s.ContainerPath = &v + return s +} + +// SetMountOptions sets the MountOptions field's value. +func (s *Tmpfs) SetMountOptions(v []*string) *Tmpfs { + s.MountOptions = v + return s +} + +// SetSize sets the Size field's value. +func (s *Tmpfs) SetSize(v int64) *Tmpfs { + s.Size = &v + return s +} + // The ulimit settings to pass to the container. type Ulimit struct { _ struct{} `type:"structure"` @@ -5908,6 +6933,81 @@ func (s *Ulimit) SetSoftLimit(v int64) *Ulimit { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource from which to delete tags. + // AWS Batch resources that support tags are compute environments, jobs, job + // definitions, and job queues. ARNs for child jobs of array and multi-node + // parallel (MNP) jobs are not supported. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + + // The keys of the tags to be removed. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateComputeEnvironmentInput struct { _ struct{} `type:"structure"` @@ -6040,7 +7140,10 @@ type UpdateJobQueueInput struct { // preference over a job queue with a priority value of 1. Priority *int64 `locationName:"priority" type:"integer"` - // Describes the queue's ability to accept new jobs. + // Describes the queue's ability to accept new jobs. If the job queue state + // is ENABLED, it is able to accept jobs. If the job queue state is DISABLED, + // new jobs cannot be added to the queue, but jobs already in the queue can + // finish. State *string `locationName:"state" type:"string" enum:"JQState"` } @@ -6180,6 +7283,14 @@ const ( ArrayJobDependencySequential = "SEQUENTIAL" ) +// ArrayJobDependency_Values returns all elements of the ArrayJobDependency enum +func ArrayJobDependency_Values() []string { + return []string{ + ArrayJobDependencyNToN, + ArrayJobDependencySequential, + } +} + const ( // CEStateEnabled is a CEState enum value CEStateEnabled = "ENABLED" @@ -6188,6 +7299,14 @@ const ( CEStateDisabled = "DISABLED" ) +// CEState_Values returns all elements of the CEState enum +func CEState_Values() []string { + return []string{ + CEStateEnabled, + CEStateDisabled, + } +} + const ( // CEStatusCreating is a CEStatus enum value CEStatusCreating = "CREATING" @@ -6208,6 +7327,18 @@ const ( CEStatusInvalid = "INVALID" ) +// CEStatus_Values returns all elements of the CEStatus enum +func CEStatus_Values() []string { + return []string{ + CEStatusCreating, + CEStatusUpdating, + CEStatusDeleting, + CEStatusDeleted, + CEStatusValid, + CEStatusInvalid, + } +} + const ( // CETypeManaged is a CEType enum value CETypeManaged = "MANAGED" @@ -6216,6 +7347,14 @@ const ( CETypeUnmanaged = "UNMANAGED" ) +// CEType_Values returns all elements of the CEType enum +func CEType_Values() []string { + return []string{ + CETypeManaged, + CETypeUnmanaged, + } +} + const ( // CRAllocationStrategyBestFit is a CRAllocationStrategy enum value CRAllocationStrategyBestFit = "BEST_FIT" @@ -6227,6 +7366,15 @@ const ( CRAllocationStrategySpotCapacityOptimized = "SPOT_CAPACITY_OPTIMIZED" ) +// CRAllocationStrategy_Values returns all elements of the CRAllocationStrategy enum +func CRAllocationStrategy_Values() []string { + return []string{ + CRAllocationStrategyBestFit, + CRAllocationStrategyBestFitProgressive, + CRAllocationStrategySpotCapacityOptimized, + } +} + const ( // CRTypeEc2 is a CRType enum value CRTypeEc2 = "EC2" @@ -6235,6 +7383,14 @@ const ( CRTypeSpot = "SPOT" ) +// CRType_Values returns all elements of the CRType enum +func CRType_Values() []string { + return []string{ + CRTypeEc2, + CRTypeSpot, + } +} + const ( // DeviceCgroupPermissionRead is a DeviceCgroupPermission enum value DeviceCgroupPermissionRead = "READ" @@ -6246,6 +7402,15 @@ const ( DeviceCgroupPermissionMknod = "MKNOD" ) +// DeviceCgroupPermission_Values returns all elements of the DeviceCgroupPermission enum +func DeviceCgroupPermission_Values() []string { + return []string{ + DeviceCgroupPermissionRead, + DeviceCgroupPermissionWrite, + DeviceCgroupPermissionMknod, + } +} + const ( // JQStateEnabled is a JQState enum value JQStateEnabled = "ENABLED" @@ -6254,6 +7419,14 @@ const ( JQStateDisabled = "DISABLED" ) +// JQState_Values returns all elements of the JQState enum +func JQState_Values() []string { + return []string{ + JQStateEnabled, + JQStateDisabled, + } +} + const ( // JQStatusCreating is a JQStatus enum value JQStatusCreating = "CREATING" @@ -6274,6 +7447,18 @@ const ( JQStatusInvalid = "INVALID" ) +// JQStatus_Values returns all elements of the JQStatus enum +func JQStatus_Values() []string { + return []string{ + JQStatusCreating, + JQStatusUpdating, + JQStatusDeleting, + JQStatusDeleted, + JQStatusValid, + JQStatusInvalid, + } +} + const ( // JobDefinitionTypeContainer is a JobDefinitionType enum value JobDefinitionTypeContainer = "container" @@ -6282,6 +7467,14 @@ const ( JobDefinitionTypeMultinode = "multinode" ) +// JobDefinitionType_Values returns all elements of the JobDefinitionType enum +func JobDefinitionType_Values() []string { + return []string{ + JobDefinitionTypeContainer, + JobDefinitionTypeMultinode, + } +} + const ( // JobStatusSubmitted is a JobStatus enum value JobStatusSubmitted = "SUBMITTED" @@ -6305,7 +7498,63 @@ const ( JobStatusFailed = "FAILED" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusSubmitted, + JobStatusPending, + JobStatusRunnable, + JobStatusStarting, + JobStatusRunning, + JobStatusSucceeded, + JobStatusFailed, + } +} + +const ( + // LogDriverJsonFile is a LogDriver enum value + LogDriverJsonFile = "json-file" + + // LogDriverSyslog is a LogDriver enum value + LogDriverSyslog = "syslog" + + // LogDriverJournald is a LogDriver enum value + LogDriverJournald = "journald" + + // LogDriverGelf is a LogDriver enum value + LogDriverGelf = "gelf" + + // LogDriverFluentd is a LogDriver enum value + LogDriverFluentd = "fluentd" + + // LogDriverAwslogs is a LogDriver enum value + LogDriverAwslogs = "awslogs" + + // LogDriverSplunk is a LogDriver enum value + LogDriverSplunk = "splunk" +) + +// LogDriver_Values returns all elements of the LogDriver enum +func LogDriver_Values() []string { + return []string{ + LogDriverJsonFile, + LogDriverSyslog, + LogDriverJournald, + LogDriverGelf, + LogDriverFluentd, + LogDriverAwslogs, + LogDriverSplunk, + } +} + const ( // ResourceTypeGpu is a ResourceType enum value ResourceTypeGpu = "GPU" ) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeGpu, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/batch/service.go b/vendor/github.com/aws/aws-sdk-go/service/batch/service.go index c8114539b..184de6342 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/batch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/batch/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go index d5a922714..878b6c880 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/api.go @@ -109,6 +109,99 @@ func (c *Budgets) CreateBudgetWithContext(ctx aws.Context, input *CreateBudgetIn return out, req.Send() } +const opCreateBudgetAction = "CreateBudgetAction" + +// CreateBudgetActionRequest generates a "aws/request.Request" representing the +// client's request for the CreateBudgetAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBudgetAction for more information on using the CreateBudgetAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBudgetActionRequest method. +// req, resp := client.CreateBudgetActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) CreateBudgetActionRequest(input *CreateBudgetActionInput) (req *request.Request, output *CreateBudgetActionOutput) { + op := &request.Operation{ + Name: opCreateBudgetAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBudgetActionInput{} + } + + output = &CreateBudgetActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBudgetAction API operation for AWS Budgets. +// +// Creates a budget action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation CreateBudgetAction for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * CreationLimitExceededException +// You've exceeded the notification or subscriber limit. +// +// * DuplicateRecordException +// The budget name already exists. Budget names must be unique within an account. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +func (c *Budgets) CreateBudgetAction(input *CreateBudgetActionInput) (*CreateBudgetActionOutput, error) { + req, out := c.CreateBudgetActionRequest(input) + return out, req.Send() +} + +// CreateBudgetActionWithContext is the same as CreateBudgetAction with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBudgetAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) CreateBudgetActionWithContext(ctx aws.Context, input *CreateBudgetActionInput, opts ...request.Option) (*CreateBudgetActionOutput, error) { + req, out := c.CreateBudgetActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateNotification = "CreateNotification" // CreateNotificationRequest generates a "aws/request.Request" representing the @@ -390,6 +483,97 @@ func (c *Budgets) DeleteBudgetWithContext(ctx aws.Context, input *DeleteBudgetIn return out, req.Send() } +const opDeleteBudgetAction = "DeleteBudgetAction" + +// DeleteBudgetActionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBudgetAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBudgetAction for more information on using the DeleteBudgetAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBudgetActionRequest method. +// req, resp := client.DeleteBudgetActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) DeleteBudgetActionRequest(input *DeleteBudgetActionInput) (req *request.Request, output *DeleteBudgetActionOutput) { + op := &request.Operation{ + Name: opDeleteBudgetAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBudgetActionInput{} + } + + output = &DeleteBudgetActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBudgetAction API operation for AWS Budgets. +// +// Deletes a budget action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation DeleteBudgetAction for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +// * ResourceLockedException +// The request was received and recognized by the server, but the server rejected +// that particular method for the requested resource. +// +func (c *Budgets) DeleteBudgetAction(input *DeleteBudgetActionInput) (*DeleteBudgetActionOutput, error) { + req, out := c.DeleteBudgetActionRequest(input) + return out, req.Send() +} + +// DeleteBudgetActionWithContext is the same as DeleteBudgetAction with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBudgetAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DeleteBudgetActionWithContext(ctx aws.Context, input *DeleteBudgetActionInput, opts ...request.Option) (*DeleteBudgetActionOutput, error) { + req, out := c.DeleteBudgetActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteNotification = "DeleteNotification" // DeleteNotificationRequest generates a "aws/request.Request" representing the @@ -662,57 +846,56 @@ func (c *Budgets) DescribeBudgetWithContext(ctx aws.Context, input *DescribeBudg return out, req.Send() } -const opDescribeBudgetPerformanceHistory = "DescribeBudgetPerformanceHistory" +const opDescribeBudgetAction = "DescribeBudgetAction" -// DescribeBudgetPerformanceHistoryRequest generates a "aws/request.Request" representing the -// client's request for the DescribeBudgetPerformanceHistory operation. The "output" return +// DescribeBudgetActionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBudgetAction operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeBudgetPerformanceHistory for more information on using the DescribeBudgetPerformanceHistory +// See DescribeBudgetAction for more information on using the DescribeBudgetAction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeBudgetPerformanceHistoryRequest method. -// req, resp := client.DescribeBudgetPerformanceHistoryRequest(params) +// // Example sending a request using the DescribeBudgetActionRequest method. +// req, resp := client.DescribeBudgetActionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) DescribeBudgetPerformanceHistoryRequest(input *DescribeBudgetPerformanceHistoryInput) (req *request.Request, output *DescribeBudgetPerformanceHistoryOutput) { +func (c *Budgets) DescribeBudgetActionRequest(input *DescribeBudgetActionInput) (req *request.Request, output *DescribeBudgetActionOutput) { op := &request.Operation{ - Name: opDescribeBudgetPerformanceHistory, + Name: opDescribeBudgetAction, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeBudgetPerformanceHistoryInput{} + input = &DescribeBudgetActionInput{} } - output = &DescribeBudgetPerformanceHistoryOutput{} + output = &DescribeBudgetActionOutput{} req = c.newRequest(op, input, output) return } -// DescribeBudgetPerformanceHistory API operation for AWS Budgets. +// DescribeBudgetAction API operation for AWS Budgets. // -// Describes the history for DAILY, MONTHLY, and QUARTERLY budgets. Budget history -// isn't available for ANNUAL budgets. +// Describes a budget action detail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation DescribeBudgetPerformanceHistory for usage and error information. +// API operation DescribeBudgetAction for usage and error information. // // Returned Error Types: // * InternalErrorException @@ -726,90 +909,86 @@ func (c *Budgets) DescribeBudgetPerformanceHistoryRequest(input *DescribeBudgetP // * NotFoundException // We can’t locate the resource that you specified. // -// * InvalidNextTokenException -// The pagination token is invalid. -// -// * ExpiredNextTokenException -// The pagination token expired. -// // * AccessDeniedException // You are not authorized to use this operation with the given parameters. // -func (c *Budgets) DescribeBudgetPerformanceHistory(input *DescribeBudgetPerformanceHistoryInput) (*DescribeBudgetPerformanceHistoryOutput, error) { - req, out := c.DescribeBudgetPerformanceHistoryRequest(input) +func (c *Budgets) DescribeBudgetAction(input *DescribeBudgetActionInput) (*DescribeBudgetActionOutput, error) { + req, out := c.DescribeBudgetActionRequest(input) return out, req.Send() } -// DescribeBudgetPerformanceHistoryWithContext is the same as DescribeBudgetPerformanceHistory with the addition of +// DescribeBudgetActionWithContext is the same as DescribeBudgetAction with the addition of // the ability to pass a context and additional request options. // -// See DescribeBudgetPerformanceHistory for details on how to use this API operation. +// See DescribeBudgetAction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) DescribeBudgetPerformanceHistoryWithContext(ctx aws.Context, input *DescribeBudgetPerformanceHistoryInput, opts ...request.Option) (*DescribeBudgetPerformanceHistoryOutput, error) { - req, out := c.DescribeBudgetPerformanceHistoryRequest(input) +func (c *Budgets) DescribeBudgetActionWithContext(ctx aws.Context, input *DescribeBudgetActionInput, opts ...request.Option) (*DescribeBudgetActionOutput, error) { + req, out := c.DescribeBudgetActionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeBudgets = "DescribeBudgets" +const opDescribeBudgetActionHistories = "DescribeBudgetActionHistories" -// DescribeBudgetsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeBudgets operation. The "output" return +// DescribeBudgetActionHistoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBudgetActionHistories operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeBudgets for more information on using the DescribeBudgets +// See DescribeBudgetActionHistories for more information on using the DescribeBudgetActionHistories // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeBudgetsRequest method. -// req, resp := client.DescribeBudgetsRequest(params) +// // Example sending a request using the DescribeBudgetActionHistoriesRequest method. +// req, resp := client.DescribeBudgetActionHistoriesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) DescribeBudgetsRequest(input *DescribeBudgetsInput) (req *request.Request, output *DescribeBudgetsOutput) { +func (c *Budgets) DescribeBudgetActionHistoriesRequest(input *DescribeBudgetActionHistoriesInput) (req *request.Request, output *DescribeBudgetActionHistoriesOutput) { op := &request.Operation{ - Name: opDescribeBudgets, + Name: opDescribeBudgetActionHistories, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &DescribeBudgetsInput{} + input = &DescribeBudgetActionHistoriesInput{} } - output = &DescribeBudgetsOutput{} + output = &DescribeBudgetActionHistoriesOutput{} req = c.newRequest(op, input, output) return } -// DescribeBudgets API operation for AWS Budgets. +// DescribeBudgetActionHistories API operation for AWS Budgets. // -// Lists the budgets that are associated with an account. -// -// The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, -// see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudgets.html#API_DescribeBudgets_Examples) -// section. +// Describes a budget action history detail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation DescribeBudgets for usage and error information. +// API operation DescribeBudgetActionHistories for usage and error information. // // Returned Error Types: // * InternalErrorException @@ -823,86 +1002,141 @@ func (c *Budgets) DescribeBudgetsRequest(input *DescribeBudgetsInput) (req *requ // * NotFoundException // We can’t locate the resource that you specified. // -// * InvalidNextTokenException -// The pagination token is invalid. -// -// * ExpiredNextTokenException -// The pagination token expired. -// // * AccessDeniedException // You are not authorized to use this operation with the given parameters. // -func (c *Budgets) DescribeBudgets(input *DescribeBudgetsInput) (*DescribeBudgetsOutput, error) { - req, out := c.DescribeBudgetsRequest(input) +// * InvalidNextTokenException +// The pagination token is invalid. +// +func (c *Budgets) DescribeBudgetActionHistories(input *DescribeBudgetActionHistoriesInput) (*DescribeBudgetActionHistoriesOutput, error) { + req, out := c.DescribeBudgetActionHistoriesRequest(input) return out, req.Send() } -// DescribeBudgetsWithContext is the same as DescribeBudgets with the addition of +// DescribeBudgetActionHistoriesWithContext is the same as DescribeBudgetActionHistories with the addition of // the ability to pass a context and additional request options. // -// See DescribeBudgets for details on how to use this API operation. +// See DescribeBudgetActionHistories for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) DescribeBudgetsWithContext(ctx aws.Context, input *DescribeBudgetsInput, opts ...request.Option) (*DescribeBudgetsOutput, error) { - req, out := c.DescribeBudgetsRequest(input) +func (c *Budgets) DescribeBudgetActionHistoriesWithContext(ctx aws.Context, input *DescribeBudgetActionHistoriesInput, opts ...request.Option) (*DescribeBudgetActionHistoriesOutput, error) { + req, out := c.DescribeBudgetActionHistoriesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeNotificationsForBudget = "DescribeNotificationsForBudget" +// DescribeBudgetActionHistoriesPages iterates over the pages of a DescribeBudgetActionHistories operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeBudgetActionHistories method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBudgetActionHistories operation. +// pageNum := 0 +// err := client.DescribeBudgetActionHistoriesPages(params, +// func(page *budgets.DescribeBudgetActionHistoriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeBudgetActionHistoriesPages(input *DescribeBudgetActionHistoriesInput, fn func(*DescribeBudgetActionHistoriesOutput, bool) bool) error { + return c.DescribeBudgetActionHistoriesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DescribeNotificationsForBudgetRequest generates a "aws/request.Request" representing the -// client's request for the DescribeNotificationsForBudget operation. The "output" return +// DescribeBudgetActionHistoriesPagesWithContext same as DescribeBudgetActionHistoriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeBudgetActionHistoriesPagesWithContext(ctx aws.Context, input *DescribeBudgetActionHistoriesInput, fn func(*DescribeBudgetActionHistoriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeBudgetActionHistoriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBudgetActionHistoriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeBudgetActionHistoriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeBudgetActionsForAccount = "DescribeBudgetActionsForAccount" + +// DescribeBudgetActionsForAccountRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBudgetActionsForAccount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeNotificationsForBudget for more information on using the DescribeNotificationsForBudget +// See DescribeBudgetActionsForAccount for more information on using the DescribeBudgetActionsForAccount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeNotificationsForBudgetRequest method. -// req, resp := client.DescribeNotificationsForBudgetRequest(params) +// // Example sending a request using the DescribeBudgetActionsForAccountRequest method. +// req, resp := client.DescribeBudgetActionsForAccountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) DescribeNotificationsForBudgetRequest(input *DescribeNotificationsForBudgetInput) (req *request.Request, output *DescribeNotificationsForBudgetOutput) { +func (c *Budgets) DescribeBudgetActionsForAccountRequest(input *DescribeBudgetActionsForAccountInput) (req *request.Request, output *DescribeBudgetActionsForAccountOutput) { op := &request.Operation{ - Name: opDescribeNotificationsForBudget, + Name: opDescribeBudgetActionsForAccount, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &DescribeNotificationsForBudgetInput{} + input = &DescribeBudgetActionsForAccountInput{} } - output = &DescribeNotificationsForBudgetOutput{} + output = &DescribeBudgetActionsForAccountOutput{} req = c.newRequest(op, input, output) return } -// DescribeNotificationsForBudget API operation for AWS Budgets. +// DescribeBudgetActionsForAccount API operation for AWS Budgets. // -// Lists the notifications that are associated with a budget. +// Describes all of the budget actions for an account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation DescribeNotificationsForBudget for usage and error information. +// API operation DescribeBudgetActionsForAccount for usage and error information. // // Returned Error Types: // * InternalErrorException @@ -913,190 +1147,290 @@ func (c *Budgets) DescribeNotificationsForBudgetRequest(input *DescribeNotificat // An error on the client occurred. Typically, the cause is an invalid input // value. // -// * NotFoundException -// We can’t locate the resource that you specified. +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. // // * InvalidNextTokenException // The pagination token is invalid. // -// * ExpiredNextTokenException -// The pagination token expired. -// -// * AccessDeniedException -// You are not authorized to use this operation with the given parameters. -// -func (c *Budgets) DescribeNotificationsForBudget(input *DescribeNotificationsForBudgetInput) (*DescribeNotificationsForBudgetOutput, error) { - req, out := c.DescribeNotificationsForBudgetRequest(input) +func (c *Budgets) DescribeBudgetActionsForAccount(input *DescribeBudgetActionsForAccountInput) (*DescribeBudgetActionsForAccountOutput, error) { + req, out := c.DescribeBudgetActionsForAccountRequest(input) return out, req.Send() } -// DescribeNotificationsForBudgetWithContext is the same as DescribeNotificationsForBudget with the addition of +// DescribeBudgetActionsForAccountWithContext is the same as DescribeBudgetActionsForAccount with the addition of // the ability to pass a context and additional request options. // -// See DescribeNotificationsForBudget for details on how to use this API operation. +// See DescribeBudgetActionsForAccount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) DescribeNotificationsForBudgetWithContext(ctx aws.Context, input *DescribeNotificationsForBudgetInput, opts ...request.Option) (*DescribeNotificationsForBudgetOutput, error) { - req, out := c.DescribeNotificationsForBudgetRequest(input) +func (c *Budgets) DescribeBudgetActionsForAccountWithContext(ctx aws.Context, input *DescribeBudgetActionsForAccountInput, opts ...request.Option) (*DescribeBudgetActionsForAccountOutput, error) { + req, out := c.DescribeBudgetActionsForAccountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeSubscribersForNotification = "DescribeSubscribersForNotification" - -// DescribeSubscribersForNotificationRequest generates a "aws/request.Request" representing the -// client's request for the DescribeSubscribersForNotification operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// DescribeBudgetActionsForAccountPages iterates over the pages of a DescribeBudgetActionsForAccount operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Use "Send" method on the returned Request to send the API call to the service. +// See DescribeBudgetActionsForAccount method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBudgetActionsForAccount operation. +// pageNum := 0 +// err := client.DescribeBudgetActionsForAccountPages(params, +// func(page *budgets.DescribeBudgetActionsForAccountOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeBudgetActionsForAccountPages(input *DescribeBudgetActionsForAccountInput, fn func(*DescribeBudgetActionsForAccountOutput, bool) bool) error { + return c.DescribeBudgetActionsForAccountPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeBudgetActionsForAccountPagesWithContext same as DescribeBudgetActionsForAccountPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeBudgetActionsForAccountPagesWithContext(ctx aws.Context, input *DescribeBudgetActionsForAccountInput, fn func(*DescribeBudgetActionsForAccountOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeBudgetActionsForAccountInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBudgetActionsForAccountRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeBudgetActionsForAccountOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeBudgetActionsForBudget = "DescribeBudgetActionsForBudget" + +// DescribeBudgetActionsForBudgetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBudgetActionsForBudget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeSubscribersForNotification for more information on using the DescribeSubscribersForNotification +// See DescribeBudgetActionsForBudget for more information on using the DescribeBudgetActionsForBudget // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeSubscribersForNotificationRequest method. -// req, resp := client.DescribeSubscribersForNotificationRequest(params) +// // Example sending a request using the DescribeBudgetActionsForBudgetRequest method. +// req, resp := client.DescribeBudgetActionsForBudgetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) DescribeSubscribersForNotificationRequest(input *DescribeSubscribersForNotificationInput) (req *request.Request, output *DescribeSubscribersForNotificationOutput) { +func (c *Budgets) DescribeBudgetActionsForBudgetRequest(input *DescribeBudgetActionsForBudgetInput) (req *request.Request, output *DescribeBudgetActionsForBudgetOutput) { op := &request.Operation{ - Name: opDescribeSubscribersForNotification, + Name: opDescribeBudgetActionsForBudget, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &DescribeSubscribersForNotificationInput{} + input = &DescribeBudgetActionsForBudgetInput{} } - output = &DescribeSubscribersForNotificationOutput{} + output = &DescribeBudgetActionsForBudgetOutput{} req = c.newRequest(op, input, output) return } -// DescribeSubscribersForNotification API operation for AWS Budgets. +// DescribeBudgetActionsForBudget API operation for AWS Budgets. // -// Lists the subscribers that are associated with a notification. +// Describes all of the budget actions for a budget. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation DescribeSubscribersForNotification for usage and error information. +// API operation DescribeBudgetActionsForBudget for usage and error information. // // Returned Error Types: // * InternalErrorException // An error on the server occurred during the processing of your request. Try // again later. // -// * NotFoundException -// We can’t locate the resource that you specified. -// // * InvalidParameterException // An error on the client occurred. Typically, the cause is an invalid input // value. // -// * InvalidNextTokenException -// The pagination token is invalid. -// -// * ExpiredNextTokenException -// The pagination token expired. +// * NotFoundException +// We can’t locate the resource that you specified. // // * AccessDeniedException // You are not authorized to use this operation with the given parameters. // -func (c *Budgets) DescribeSubscribersForNotification(input *DescribeSubscribersForNotificationInput) (*DescribeSubscribersForNotificationOutput, error) { - req, out := c.DescribeSubscribersForNotificationRequest(input) +// * InvalidNextTokenException +// The pagination token is invalid. +// +func (c *Budgets) DescribeBudgetActionsForBudget(input *DescribeBudgetActionsForBudgetInput) (*DescribeBudgetActionsForBudgetOutput, error) { + req, out := c.DescribeBudgetActionsForBudgetRequest(input) return out, req.Send() } -// DescribeSubscribersForNotificationWithContext is the same as DescribeSubscribersForNotification with the addition of +// DescribeBudgetActionsForBudgetWithContext is the same as DescribeBudgetActionsForBudget with the addition of // the ability to pass a context and additional request options. // -// See DescribeSubscribersForNotification for details on how to use this API operation. +// See DescribeBudgetActionsForBudget for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) DescribeSubscribersForNotificationWithContext(ctx aws.Context, input *DescribeSubscribersForNotificationInput, opts ...request.Option) (*DescribeSubscribersForNotificationOutput, error) { - req, out := c.DescribeSubscribersForNotificationRequest(input) +func (c *Budgets) DescribeBudgetActionsForBudgetWithContext(ctx aws.Context, input *DescribeBudgetActionsForBudgetInput, opts ...request.Option) (*DescribeBudgetActionsForBudgetOutput, error) { + req, out := c.DescribeBudgetActionsForBudgetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateBudget = "UpdateBudget" +// DescribeBudgetActionsForBudgetPages iterates over the pages of a DescribeBudgetActionsForBudget operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeBudgetActionsForBudget method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBudgetActionsForBudget operation. +// pageNum := 0 +// err := client.DescribeBudgetActionsForBudgetPages(params, +// func(page *budgets.DescribeBudgetActionsForBudgetOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeBudgetActionsForBudgetPages(input *DescribeBudgetActionsForBudgetInput, fn func(*DescribeBudgetActionsForBudgetOutput, bool) bool) error { + return c.DescribeBudgetActionsForBudgetPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateBudgetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateBudget operation. The "output" return +// DescribeBudgetActionsForBudgetPagesWithContext same as DescribeBudgetActionsForBudgetPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeBudgetActionsForBudgetPagesWithContext(ctx aws.Context, input *DescribeBudgetActionsForBudgetInput, fn func(*DescribeBudgetActionsForBudgetOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeBudgetActionsForBudgetInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBudgetActionsForBudgetRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeBudgetActionsForBudgetOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeBudgetPerformanceHistory = "DescribeBudgetPerformanceHistory" + +// DescribeBudgetPerformanceHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBudgetPerformanceHistory operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateBudget for more information on using the UpdateBudget +// See DescribeBudgetPerformanceHistory for more information on using the DescribeBudgetPerformanceHistory // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateBudgetRequest method. -// req, resp := client.UpdateBudgetRequest(params) +// // Example sending a request using the DescribeBudgetPerformanceHistoryRequest method. +// req, resp := client.DescribeBudgetPerformanceHistoryRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) UpdateBudgetRequest(input *UpdateBudgetInput) (req *request.Request, output *UpdateBudgetOutput) { +func (c *Budgets) DescribeBudgetPerformanceHistoryRequest(input *DescribeBudgetPerformanceHistoryInput) (req *request.Request, output *DescribeBudgetPerformanceHistoryOutput) { op := &request.Operation{ - Name: opUpdateBudget, + Name: opDescribeBudgetPerformanceHistory, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateBudgetInput{} + input = &DescribeBudgetPerformanceHistoryInput{} } - output = &UpdateBudgetOutput{} + output = &DescribeBudgetPerformanceHistoryOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateBudget API operation for AWS Budgets. -// -// Updates a budget. You can change every part of a budget except for the budgetName -// and the calculatedSpend. When you modify a budget, the calculatedSpend drops -// to zero until AWS has new usage data to use for forecasting. +// DescribeBudgetPerformanceHistory API operation for AWS Budgets. // -// Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax -// at one time. Use the syntax that matches your case. The Request Syntax section -// shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_UpdateBudget.html#API_UpdateBudget_Examples) -// section. +// Describes the history for DAILY, MONTHLY, and QUARTERLY budgets. Budget history +// isn't available for ANNUAL budgets. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation UpdateBudget for usage and error information. +// API operation DescribeBudgetPerformanceHistory for usage and error information. // // Returned Error Types: // * InternalErrorException @@ -1110,81 +1444,148 @@ func (c *Budgets) UpdateBudgetRequest(input *UpdateBudgetInput) (req *request.Re // * NotFoundException // We can’t locate the resource that you specified. // +// * InvalidNextTokenException +// The pagination token is invalid. +// +// * ExpiredNextTokenException +// The pagination token expired. +// // * AccessDeniedException // You are not authorized to use this operation with the given parameters. // -func (c *Budgets) UpdateBudget(input *UpdateBudgetInput) (*UpdateBudgetOutput, error) { - req, out := c.UpdateBudgetRequest(input) +func (c *Budgets) DescribeBudgetPerformanceHistory(input *DescribeBudgetPerformanceHistoryInput) (*DescribeBudgetPerformanceHistoryOutput, error) { + req, out := c.DescribeBudgetPerformanceHistoryRequest(input) return out, req.Send() } -// UpdateBudgetWithContext is the same as UpdateBudget with the addition of +// DescribeBudgetPerformanceHistoryWithContext is the same as DescribeBudgetPerformanceHistory with the addition of // the ability to pass a context and additional request options. // -// See UpdateBudget for details on how to use this API operation. +// See DescribeBudgetPerformanceHistory for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) UpdateBudgetWithContext(ctx aws.Context, input *UpdateBudgetInput, opts ...request.Option) (*UpdateBudgetOutput, error) { - req, out := c.UpdateBudgetRequest(input) +func (c *Budgets) DescribeBudgetPerformanceHistoryWithContext(ctx aws.Context, input *DescribeBudgetPerformanceHistoryInput, opts ...request.Option) (*DescribeBudgetPerformanceHistoryOutput, error) { + req, out := c.DescribeBudgetPerformanceHistoryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateNotification = "UpdateNotification" +// DescribeBudgetPerformanceHistoryPages iterates over the pages of a DescribeBudgetPerformanceHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeBudgetPerformanceHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBudgetPerformanceHistory operation. +// pageNum := 0 +// err := client.DescribeBudgetPerformanceHistoryPages(params, +// func(page *budgets.DescribeBudgetPerformanceHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeBudgetPerformanceHistoryPages(input *DescribeBudgetPerformanceHistoryInput, fn func(*DescribeBudgetPerformanceHistoryOutput, bool) bool) error { + return c.DescribeBudgetPerformanceHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateNotificationRequest generates a "aws/request.Request" representing the -// client's request for the UpdateNotification operation. The "output" return +// DescribeBudgetPerformanceHistoryPagesWithContext same as DescribeBudgetPerformanceHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeBudgetPerformanceHistoryPagesWithContext(ctx aws.Context, input *DescribeBudgetPerformanceHistoryInput, fn func(*DescribeBudgetPerformanceHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeBudgetPerformanceHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBudgetPerformanceHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeBudgetPerformanceHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeBudgets = "DescribeBudgets" + +// DescribeBudgetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBudgets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateNotification for more information on using the UpdateNotification +// See DescribeBudgets for more information on using the DescribeBudgets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateNotificationRequest method. -// req, resp := client.UpdateNotificationRequest(params) +// // Example sending a request using the DescribeBudgetsRequest method. +// req, resp := client.DescribeBudgetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) UpdateNotificationRequest(input *UpdateNotificationInput) (req *request.Request, output *UpdateNotificationOutput) { +func (c *Budgets) DescribeBudgetsRequest(input *DescribeBudgetsInput) (req *request.Request, output *DescribeBudgetsOutput) { op := &request.Operation{ - Name: opUpdateNotification, + Name: opDescribeBudgets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateNotificationInput{} + input = &DescribeBudgetsInput{} } - output = &UpdateNotificationOutput{} + output = &DescribeBudgetsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateNotification API operation for AWS Budgets. +// DescribeBudgets API operation for AWS Budgets. // -// Updates a notification. +// Lists the budgets that are associated with an account. +// +// The Request Syntax section shows the BudgetLimit syntax. For PlannedBudgetLimits, +// see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_DescribeBudgets.html#API_DescribeBudgets_Examples) +// section. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation UpdateNotification for usage and error information. +// API operation DescribeBudgets for usage and error information. // // Returned Error Types: // * InternalErrorException @@ -1198,84 +1599,144 @@ func (c *Budgets) UpdateNotificationRequest(input *UpdateNotificationInput) (req // * NotFoundException // We can’t locate the resource that you specified. // -// * DuplicateRecordException -// The budget name already exists. Budget names must be unique within an account. +// * InvalidNextTokenException +// The pagination token is invalid. +// +// * ExpiredNextTokenException +// The pagination token expired. // // * AccessDeniedException // You are not authorized to use this operation with the given parameters. // -func (c *Budgets) UpdateNotification(input *UpdateNotificationInput) (*UpdateNotificationOutput, error) { - req, out := c.UpdateNotificationRequest(input) +func (c *Budgets) DescribeBudgets(input *DescribeBudgetsInput) (*DescribeBudgetsOutput, error) { + req, out := c.DescribeBudgetsRequest(input) return out, req.Send() } -// UpdateNotificationWithContext is the same as UpdateNotification with the addition of +// DescribeBudgetsWithContext is the same as DescribeBudgets with the addition of // the ability to pass a context and additional request options. // -// See UpdateNotification for details on how to use this API operation. +// See DescribeBudgets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) UpdateNotificationWithContext(ctx aws.Context, input *UpdateNotificationInput, opts ...request.Option) (*UpdateNotificationOutput, error) { - req, out := c.UpdateNotificationRequest(input) +func (c *Budgets) DescribeBudgetsWithContext(ctx aws.Context, input *DescribeBudgetsInput, opts ...request.Option) (*DescribeBudgetsOutput, error) { + req, out := c.DescribeBudgetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateSubscriber = "UpdateSubscriber" +// DescribeBudgetsPages iterates over the pages of a DescribeBudgets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeBudgets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeBudgets operation. +// pageNum := 0 +// err := client.DescribeBudgetsPages(params, +// func(page *budgets.DescribeBudgetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeBudgetsPages(input *DescribeBudgetsInput, fn func(*DescribeBudgetsOutput, bool) bool) error { + return c.DescribeBudgetsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateSubscriberRequest generates a "aws/request.Request" representing the -// client's request for the UpdateSubscriber operation. The "output" return +// DescribeBudgetsPagesWithContext same as DescribeBudgetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeBudgetsPagesWithContext(ctx aws.Context, input *DescribeBudgetsInput, fn func(*DescribeBudgetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeBudgetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeBudgetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeBudgetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeNotificationsForBudget = "DescribeNotificationsForBudget" + +// DescribeNotificationsForBudgetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNotificationsForBudget operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateSubscriber for more information on using the UpdateSubscriber +// See DescribeNotificationsForBudget for more information on using the DescribeNotificationsForBudget // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateSubscriberRequest method. -// req, resp := client.UpdateSubscriberRequest(params) +// // Example sending a request using the DescribeNotificationsForBudgetRequest method. +// req, resp := client.DescribeNotificationsForBudgetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *Budgets) UpdateSubscriberRequest(input *UpdateSubscriberInput) (req *request.Request, output *UpdateSubscriberOutput) { +func (c *Budgets) DescribeNotificationsForBudgetRequest(input *DescribeNotificationsForBudgetInput) (req *request.Request, output *DescribeNotificationsForBudgetOutput) { op := &request.Operation{ - Name: opUpdateSubscriber, + Name: opDescribeNotificationsForBudget, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateSubscriberInput{} + input = &DescribeNotificationsForBudgetInput{} } - output = &UpdateSubscriberOutput{} + output = &DescribeNotificationsForBudgetOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateSubscriber API operation for AWS Budgets. +// DescribeNotificationsForBudget API operation for AWS Budgets. // -// Updates a subscriber. +// Lists the notifications that are associated with a budget. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Budgets's -// API operation UpdateSubscriber for usage and error information. +// API operation DescribeNotificationsForBudget for usage and error information. // // Returned Error Types: // * InternalErrorException @@ -1289,251 +1750,2105 @@ func (c *Budgets) UpdateSubscriberRequest(input *UpdateSubscriberInput) (req *re // * NotFoundException // We can’t locate the resource that you specified. // -// * DuplicateRecordException -// The budget name already exists. Budget names must be unique within an account. +// * InvalidNextTokenException +// The pagination token is invalid. +// +// * ExpiredNextTokenException +// The pagination token expired. // // * AccessDeniedException // You are not authorized to use this operation with the given parameters. // -func (c *Budgets) UpdateSubscriber(input *UpdateSubscriberInput) (*UpdateSubscriberOutput, error) { - req, out := c.UpdateSubscriberRequest(input) +func (c *Budgets) DescribeNotificationsForBudget(input *DescribeNotificationsForBudgetInput) (*DescribeNotificationsForBudgetOutput, error) { + req, out := c.DescribeNotificationsForBudgetRequest(input) return out, req.Send() } -// UpdateSubscriberWithContext is the same as UpdateSubscriber with the addition of +// DescribeNotificationsForBudgetWithContext is the same as DescribeNotificationsForBudget with the addition of // the ability to pass a context and additional request options. // -// See UpdateSubscriber for details on how to use this API operation. +// See DescribeNotificationsForBudget for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Budgets) UpdateSubscriberWithContext(ctx aws.Context, input *UpdateSubscriberInput, opts ...request.Option) (*UpdateSubscriberOutput, error) { - req, out := c.UpdateSubscriberRequest(input) +func (c *Budgets) DescribeNotificationsForBudgetWithContext(ctx aws.Context, input *DescribeNotificationsForBudgetInput, opts ...request.Option) (*DescribeNotificationsForBudgetOutput, error) { + req, out := c.DescribeNotificationsForBudgetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeNotificationsForBudgetPages iterates over the pages of a DescribeNotificationsForBudget operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNotificationsForBudget method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNotificationsForBudget operation. +// pageNum := 0 +// err := client.DescribeNotificationsForBudgetPages(params, +// func(page *budgets.DescribeNotificationsForBudgetOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeNotificationsForBudgetPages(input *DescribeNotificationsForBudgetInput, fn func(*DescribeNotificationsForBudgetOutput, bool) bool) error { + return c.DescribeNotificationsForBudgetPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeNotificationsForBudgetPagesWithContext same as DescribeNotificationsForBudgetPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeNotificationsForBudgetPagesWithContext(ctx aws.Context, input *DescribeNotificationsForBudgetInput, fn func(*DescribeNotificationsForBudgetOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeNotificationsForBudgetInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeNotificationsForBudgetRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeNotificationsForBudgetOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSubscribersForNotification = "DescribeSubscribersForNotification" + +// DescribeSubscribersForNotificationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscribersForNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSubscribersForNotification for more information on using the DescribeSubscribersForNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSubscribersForNotificationRequest method. +// req, resp := client.DescribeSubscribersForNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) DescribeSubscribersForNotificationRequest(input *DescribeSubscribersForNotificationInput) (req *request.Request, output *DescribeSubscribersForNotificationOutput) { + op := &request.Operation{ + Name: opDescribeSubscribersForNotification, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeSubscribersForNotificationInput{} + } + + output = &DescribeSubscribersForNotificationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSubscribersForNotification API operation for AWS Budgets. +// +// Lists the subscribers that are associated with a notification. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation DescribeSubscribersForNotification for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * InvalidNextTokenException +// The pagination token is invalid. +// +// * ExpiredNextTokenException +// The pagination token expired. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +func (c *Budgets) DescribeSubscribersForNotification(input *DescribeSubscribersForNotificationInput) (*DescribeSubscribersForNotificationOutput, error) { + req, out := c.DescribeSubscribersForNotificationRequest(input) + return out, req.Send() +} + +// DescribeSubscribersForNotificationWithContext is the same as DescribeSubscribersForNotification with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSubscribersForNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeSubscribersForNotificationWithContext(ctx aws.Context, input *DescribeSubscribersForNotificationInput, opts ...request.Option) (*DescribeSubscribersForNotificationOutput, error) { + req, out := c.DescribeSubscribersForNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeSubscribersForNotificationPages iterates over the pages of a DescribeSubscribersForNotification operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubscribersForNotification method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubscribersForNotification operation. +// pageNum := 0 +// err := client.DescribeSubscribersForNotificationPages(params, +// func(page *budgets.DescribeSubscribersForNotificationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Budgets) DescribeSubscribersForNotificationPages(input *DescribeSubscribersForNotificationInput, fn func(*DescribeSubscribersForNotificationOutput, bool) bool) error { + return c.DescribeSubscribersForNotificationPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSubscribersForNotificationPagesWithContext same as DescribeSubscribersForNotificationPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) DescribeSubscribersForNotificationPagesWithContext(ctx aws.Context, input *DescribeSubscribersForNotificationInput, fn func(*DescribeSubscribersForNotificationOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSubscribersForNotificationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSubscribersForNotificationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSubscribersForNotificationOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opExecuteBudgetAction = "ExecuteBudgetAction" + +// ExecuteBudgetActionRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteBudgetAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ExecuteBudgetAction for more information on using the ExecuteBudgetAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ExecuteBudgetActionRequest method. +// req, resp := client.ExecuteBudgetActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) ExecuteBudgetActionRequest(input *ExecuteBudgetActionInput) (req *request.Request, output *ExecuteBudgetActionOutput) { + op := &request.Operation{ + Name: opExecuteBudgetAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ExecuteBudgetActionInput{} + } + + output = &ExecuteBudgetActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// ExecuteBudgetAction API operation for AWS Budgets. +// +// Executes a budget action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation ExecuteBudgetAction for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +// * ResourceLockedException +// The request was received and recognized by the server, but the server rejected +// that particular method for the requested resource. +// +func (c *Budgets) ExecuteBudgetAction(input *ExecuteBudgetActionInput) (*ExecuteBudgetActionOutput, error) { + req, out := c.ExecuteBudgetActionRequest(input) + return out, req.Send() +} + +// ExecuteBudgetActionWithContext is the same as ExecuteBudgetAction with the addition of +// the ability to pass a context and additional request options. +// +// See ExecuteBudgetAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) ExecuteBudgetActionWithContext(ctx aws.Context, input *ExecuteBudgetActionInput, opts ...request.Option) (*ExecuteBudgetActionOutput, error) { + req, out := c.ExecuteBudgetActionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// You are not authorized to use this operation with the given parameters. -type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +const opUpdateBudget = "UpdateBudget" + +// UpdateBudgetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBudget operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBudget for more information on using the UpdateBudget +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBudgetRequest method. +// req, resp := client.UpdateBudgetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) UpdateBudgetRequest(input *UpdateBudgetInput) (req *request.Request, output *UpdateBudgetOutput) { + op := &request.Operation{ + Name: opUpdateBudget, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBudgetInput{} + } + + output = &UpdateBudgetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateBudget API operation for AWS Budgets. +// +// Updates a budget. You can change every part of a budget except for the budgetName +// and the calculatedSpend. When you modify a budget, the calculatedSpend drops +// to zero until AWS has new usage data to use for forecasting. +// +// Only one of BudgetLimit or PlannedBudgetLimits can be present in the syntax +// at one time. Use the syntax that matches your case. The Request Syntax section +// shows the BudgetLimit syntax. For PlannedBudgetLimits, see the Examples (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_budgets_UpdateBudget.html#API_UpdateBudget_Examples) +// section. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation UpdateBudget for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +func (c *Budgets) UpdateBudget(input *UpdateBudgetInput) (*UpdateBudgetOutput, error) { + req, out := c.UpdateBudgetRequest(input) + return out, req.Send() +} + +// UpdateBudgetWithContext is the same as UpdateBudget with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBudget for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) UpdateBudgetWithContext(ctx aws.Context, input *UpdateBudgetInput, opts ...request.Option) (*UpdateBudgetOutput, error) { + req, out := c.UpdateBudgetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateBudgetAction = "UpdateBudgetAction" + +// UpdateBudgetActionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBudgetAction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBudgetAction for more information on using the UpdateBudgetAction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBudgetActionRequest method. +// req, resp := client.UpdateBudgetActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) UpdateBudgetActionRequest(input *UpdateBudgetActionInput) (req *request.Request, output *UpdateBudgetActionOutput) { + op := &request.Operation{ + Name: opUpdateBudgetAction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateBudgetActionInput{} + } + + output = &UpdateBudgetActionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBudgetAction API operation for AWS Budgets. +// +// Updates a budget action. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation UpdateBudgetAction for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +// * ResourceLockedException +// The request was received and recognized by the server, but the server rejected +// that particular method for the requested resource. +// +func (c *Budgets) UpdateBudgetAction(input *UpdateBudgetActionInput) (*UpdateBudgetActionOutput, error) { + req, out := c.UpdateBudgetActionRequest(input) + return out, req.Send() +} + +// UpdateBudgetActionWithContext is the same as UpdateBudgetAction with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBudgetAction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) UpdateBudgetActionWithContext(ctx aws.Context, input *UpdateBudgetActionInput, opts ...request.Option) (*UpdateBudgetActionOutput, error) { + req, out := c.UpdateBudgetActionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateNotification = "UpdateNotification" + +// UpdateNotificationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateNotification for more information on using the UpdateNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateNotificationRequest method. +// req, resp := client.UpdateNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) UpdateNotificationRequest(input *UpdateNotificationInput) (req *request.Request, output *UpdateNotificationOutput) { + op := &request.Operation{ + Name: opUpdateNotification, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateNotificationInput{} + } + + output = &UpdateNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateNotification API operation for AWS Budgets. +// +// Updates a notification. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation UpdateNotification for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * DuplicateRecordException +// The budget name already exists. Budget names must be unique within an account. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +func (c *Budgets) UpdateNotification(input *UpdateNotificationInput) (*UpdateNotificationOutput, error) { + req, out := c.UpdateNotificationRequest(input) + return out, req.Send() +} + +// UpdateNotificationWithContext is the same as UpdateNotification with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) UpdateNotificationWithContext(ctx aws.Context, input *UpdateNotificationInput, opts ...request.Option) (*UpdateNotificationOutput, error) { + req, out := c.UpdateNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSubscriber = "UpdateSubscriber" + +// UpdateSubscriberRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSubscriber operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSubscriber for more information on using the UpdateSubscriber +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSubscriberRequest method. +// req, resp := client.UpdateSubscriberRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *Budgets) UpdateSubscriberRequest(input *UpdateSubscriberInput) (req *request.Request, output *UpdateSubscriberOutput) { + op := &request.Operation{ + Name: opUpdateSubscriber, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSubscriberInput{} + } + + output = &UpdateSubscriberOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateSubscriber API operation for AWS Budgets. +// +// Updates a subscriber. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Budgets's +// API operation UpdateSubscriber for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// An error on the server occurred during the processing of your request. Try +// again later. +// +// * InvalidParameterException +// An error on the client occurred. Typically, the cause is an invalid input +// value. +// +// * NotFoundException +// We can’t locate the resource that you specified. +// +// * DuplicateRecordException +// The budget name already exists. Budget names must be unique within an account. +// +// * AccessDeniedException +// You are not authorized to use this operation with the given parameters. +// +func (c *Budgets) UpdateSubscriber(input *UpdateSubscriberInput) (*UpdateSubscriberOutput, error) { + req, out := c.UpdateSubscriberRequest(input) + return out, req.Send() +} + +// UpdateSubscriberWithContext is the same as UpdateSubscriber with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSubscriber for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Budgets) UpdateSubscriberWithContext(ctx aws.Context, input *UpdateSubscriberInput, opts ...request.Option) (*UpdateSubscriberOutput, error) { + req, out := c.UpdateSubscriberRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You are not authorized to use this operation with the given parameters. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message the exception carries. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A budget action resource. +type Action struct { + _ struct{} `type:"structure"` + + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // The trigger threshold of the action. + // + // ActionThreshold is a required field + ActionThreshold *ActionThreshold `type:"structure" required:"true"` + + // The type of action. This defines the type of tasks that can be carried out + // by this action. This field also determines the format for definition. + // + // ActionType is a required field + ActionType *string `type:"string" required:"true" enum:"ActionType"` + + // This specifies if the action needs manual or automatic approval. + // + // ApprovalModel is a required field + ApprovalModel *string `type:"string" required:"true" enum:"ApprovalModel"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // Where you specify all of the type-specific parameters. + // + // Definition is a required field + Definition *Definition `type:"structure" required:"true"` + + // The role passed for action execution and reversion. Roles and actions must + // be in the same account. + // + // ExecutionRoleArn is a required field + ExecutionRoleArn *string `min:"32" type:"string" required:"true"` + + // The type of a notification. It must be ACTUAL or FORECASTED. + // + // NotificationType is a required field + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // The status of action. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ActionStatus"` + + // A list of subscribers. + // + // Subscribers is a required field + Subscribers []*Subscriber `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Action) GoString() string { + return s.String() +} + +// SetActionId sets the ActionId field's value. +func (s *Action) SetActionId(v string) *Action { + s.ActionId = &v + return s +} + +// SetActionThreshold sets the ActionThreshold field's value. +func (s *Action) SetActionThreshold(v *ActionThreshold) *Action { + s.ActionThreshold = v + return s +} + +// SetActionType sets the ActionType field's value. +func (s *Action) SetActionType(v string) *Action { + s.ActionType = &v + return s +} + +// SetApprovalModel sets the ApprovalModel field's value. +func (s *Action) SetApprovalModel(v string) *Action { + s.ApprovalModel = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *Action) SetBudgetName(v string) *Action { + s.BudgetName = &v + return s +} + +// SetDefinition sets the Definition field's value. +func (s *Action) SetDefinition(v *Definition) *Action { + s.Definition = v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *Action) SetExecutionRoleArn(v string) *Action { + s.ExecutionRoleArn = &v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *Action) SetNotificationType(v string) *Action { + s.NotificationType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Action) SetStatus(v string) *Action { + s.Status = &v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *Action) SetSubscribers(v []*Subscriber) *Action { + s.Subscribers = v + return s +} + +// The historical records for a budget action. +type ActionHistory struct { + _ struct{} `type:"structure"` + + // The description of details of the event. + // + // ActionHistoryDetails is a required field + ActionHistoryDetails *ActionHistoryDetails `type:"structure" required:"true"` + + // This distinguishes between whether the events are triggered by the user or + // generated by the system. + // + // EventType is a required field + EventType *string `type:"string" required:"true" enum:"EventType"` + + // The status of action at the time of the event. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ActionStatus"` + + // A generic time stamp. In Java, it is transformed to a Date object. + // + // Timestamp is a required field + Timestamp *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s ActionHistory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionHistory) GoString() string { + return s.String() +} + +// SetActionHistoryDetails sets the ActionHistoryDetails field's value. +func (s *ActionHistory) SetActionHistoryDetails(v *ActionHistoryDetails) *ActionHistory { + s.ActionHistoryDetails = v + return s +} + +// SetEventType sets the EventType field's value. +func (s *ActionHistory) SetEventType(v string) *ActionHistory { + s.EventType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ActionHistory) SetStatus(v string) *ActionHistory { + s.Status = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *ActionHistory) SetTimestamp(v time.Time) *ActionHistory { + s.Timestamp = &v + return s +} + +// The description of details of the event. +type ActionHistoryDetails struct { + _ struct{} `type:"structure"` + + // The budget action resource. + // + // Action is a required field + Action *Action `type:"structure" required:"true"` + + // A generic string. + // + // Message is a required field + Message *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ActionHistoryDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionHistoryDetails) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *ActionHistoryDetails) SetAction(v *Action) *ActionHistoryDetails { + s.Action = v + return s +} + +// SetMessage sets the Message field's value. +func (s *ActionHistoryDetails) SetMessage(v string) *ActionHistoryDetails { + s.Message = &v + return s +} + +// The trigger threshold of the action. +type ActionThreshold struct { + _ struct{} `type:"structure"` + + // The type of threshold for a notification. + // + // ActionThresholdType is a required field + ActionThresholdType *string `type:"string" required:"true" enum:"ThresholdType"` + + // The threshold of a notification. + // + // ActionThresholdValue is a required field + ActionThresholdValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s ActionThreshold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActionThreshold) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ActionThreshold) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ActionThreshold"} + if s.ActionThresholdType == nil { + invalidParams.Add(request.NewErrParamRequired("ActionThresholdType")) + } + if s.ActionThresholdValue == nil { + invalidParams.Add(request.NewErrParamRequired("ActionThresholdValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionThresholdType sets the ActionThresholdType field's value. +func (s *ActionThreshold) SetActionThresholdType(v string) *ActionThreshold { + s.ActionThresholdType = &v + return s +} + +// SetActionThresholdValue sets the ActionThresholdValue field's value. +func (s *ActionThreshold) SetActionThresholdValue(v float64) *ActionThreshold { + s.ActionThresholdValue = &v + return s +} + +// Represents the output of the CreateBudget operation. The content consists +// of the detailed metadata and data file information, and the current status +// of the budget object. +// +// This is the ARN pattern for a budget: +// +// arn:aws:budgets::AccountId:budget/budgetName +type Budget struct { + _ struct{} `type:"structure"` + + // The total amount of cost, usage, RI utilization, RI coverage, Savings Plans + // utilization, or Savings Plans coverage that you want to track with your budget. + // + // BudgetLimit is required for cost or usage budgets, but optional for RI or + // Savings Plans utilization or coverage budgets. RI and Savings Plans utilization + // or coverage budgets default to 100, which is the only valid value for RI + // or Savings Plans utilization or coverage budgets. You can't use BudgetLimit + // with PlannedBudgetLimits for CreateBudget and UpdateBudget actions. + BudgetLimit *Spend `type:"structure"` + + // The name of a budget. The name must be unique within an account. The : and + // \ characters aren't allowed in BudgetName. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // Whether this budget tracks costs, usage, RI utilization, RI coverage, Savings + // Plans utilization, or Savings Plans coverage. + // + // BudgetType is a required field + BudgetType *string `type:"string" required:"true" enum:"BudgetType"` + + // The actual and forecasted cost or usage that the budget tracks. + CalculatedSpend *CalculatedSpend `type:"structure"` + + // The cost filters, such as service or tag, that are applied to a budget. + // + // AWS Budgets supports the following services as a filter for RI budgets: + // + // * Amazon Elastic Compute Cloud - Compute + // + // * Amazon Redshift + // + // * Amazon Relational Database Service + // + // * Amazon ElastiCache + // + // * Amazon Elasticsearch Service + CostFilters map[string][]*string `type:"map"` + + // The types of costs that are included in this COST budget. + // + // USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE + // budgets do not have CostTypes. + CostTypes *CostTypes `type:"structure"` + + // The last time that you updated this budget. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A map containing multiple BudgetLimit, including current or future limits. + // + // PlannedBudgetLimits is available for cost or usage budget and supports monthly + // and quarterly TimeUnit. + // + // For monthly budgets, provide 12 months of PlannedBudgetLimits values. This + // must start from the current month and include the next 11 months. The key + // is the start of the month, UTC in epoch seconds. + // + // For quarterly budgets, provide 4 quarters of PlannedBudgetLimits value entries + // in standard calendar quarter increments. This must start from the current + // quarter and include the next 3 quarters. The key is the start of the quarter, + // UTC in epoch seconds. + // + // If the planned budget expires before 12 months for monthly or 4 quarters + // for quarterly, provide the PlannedBudgetLimits values only for the remaining + // periods. + // + // If the budget begins at a date in the future, provide PlannedBudgetLimits + // values from the start date of the budget. + // + // After all of the BudgetLimit values in PlannedBudgetLimits are used, the + // budget continues to use the last limit as the BudgetLimit. At that point, + // the planned budget provides the same experience as a fixed budget. + // + // DescribeBudget and DescribeBudgets response along with PlannedBudgetLimits + // will also contain BudgetLimit representing the current month or quarter limit + // present in PlannedBudgetLimits. This only applies to budgets created with + // PlannedBudgetLimits. Budgets created without PlannedBudgetLimits will only + // contain BudgetLimit, and no PlannedBudgetLimits. + PlannedBudgetLimits map[string]*Spend `type:"map"` + + // The period of time that is covered by a budget. The period has a start date + // and an end date. The start date must come before the end date. The end date + // must come before 06/15/87 00:00 UTC. + // + // If you create your budget and don't specify a start date, AWS defaults to + // the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). + // For example, if you created your budget on January 24, 2018, chose DAILY, + // and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. + // If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. If you + // didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. + // The defaults are the same for the AWS Billing and Cost Management console + // and the API. + // + // You can change either date with the UpdateBudget operation. + // + // After the end date, AWS deletes the budget and all associated notifications + // and subscribers. + TimePeriod *TimePeriod `type:"structure"` + + // The length of time until a budget resets the actual and forecasted spend. + // + // TimeUnit is a required field + TimeUnit *string `type:"string" required:"true" enum:"TimeUnit"` +} + +// String returns the string representation +func (s Budget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Budget) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Budget) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Budget"} + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) + } + if s.BudgetType == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetType")) + } + if s.TimeUnit == nil { + invalidParams.Add(request.NewErrParamRequired("TimeUnit")) + } + if s.BudgetLimit != nil { + if err := s.BudgetLimit.Validate(); err != nil { + invalidParams.AddNested("BudgetLimit", err.(request.ErrInvalidParams)) + } + } + if s.CalculatedSpend != nil { + if err := s.CalculatedSpend.Validate(); err != nil { + invalidParams.AddNested("CalculatedSpend", err.(request.ErrInvalidParams)) + } + } + if s.PlannedBudgetLimits != nil { + for i, v := range s.PlannedBudgetLimits { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlannedBudgetLimits", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBudgetLimit sets the BudgetLimit field's value. +func (s *Budget) SetBudgetLimit(v *Spend) *Budget { + s.BudgetLimit = v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *Budget) SetBudgetName(v string) *Budget { + s.BudgetName = &v + return s +} + +// SetBudgetType sets the BudgetType field's value. +func (s *Budget) SetBudgetType(v string) *Budget { + s.BudgetType = &v + return s +} + +// SetCalculatedSpend sets the CalculatedSpend field's value. +func (s *Budget) SetCalculatedSpend(v *CalculatedSpend) *Budget { + s.CalculatedSpend = v + return s +} + +// SetCostFilters sets the CostFilters field's value. +func (s *Budget) SetCostFilters(v map[string][]*string) *Budget { + s.CostFilters = v + return s +} + +// SetCostTypes sets the CostTypes field's value. +func (s *Budget) SetCostTypes(v *CostTypes) *Budget { + s.CostTypes = v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Budget) SetLastUpdatedTime(v time.Time) *Budget { + s.LastUpdatedTime = &v + return s +} + +// SetPlannedBudgetLimits sets the PlannedBudgetLimits field's value. +func (s *Budget) SetPlannedBudgetLimits(v map[string]*Spend) *Budget { + s.PlannedBudgetLimits = v + return s +} + +// SetTimePeriod sets the TimePeriod field's value. +func (s *Budget) SetTimePeriod(v *TimePeriod) *Budget { + s.TimePeriod = v + return s +} + +// SetTimeUnit sets the TimeUnit field's value. +func (s *Budget) SetTimeUnit(v string) *Budget { + s.TimeUnit = &v + return s +} + +// A history of the state of a budget at the end of the budget's specified time +// period. +type BudgetPerformanceHistory struct { + _ struct{} `type:"structure"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + BudgetName *string `min:"1" type:"string"` + + // The type of a budget. It must be one of the following types: + // + // COST, USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, or SAVINGS_PLANS_COVERAGE. + BudgetType *string `type:"string" enum:"BudgetType"` + + // A list of amounts of cost or usage that you created budgets for, compared + // to your actual costs or usage. + BudgetedAndActualAmountsList []*BudgetedAndActualAmounts `type:"list"` + + // The history of the cost filters for a budget during the specified time period. + CostFilters map[string][]*string `type:"map"` + + // The history of the cost types for a budget during the specified time period. + CostTypes *CostTypes `type:"structure"` + + // The time unit of the budget, such as MONTHLY or QUARTERLY. + TimeUnit *string `type:"string" enum:"TimeUnit"` +} + +// String returns the string representation +func (s BudgetPerformanceHistory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BudgetPerformanceHistory) GoString() string { + return s.String() +} + +// SetBudgetName sets the BudgetName field's value. +func (s *BudgetPerformanceHistory) SetBudgetName(v string) *BudgetPerformanceHistory { + s.BudgetName = &v + return s +} + +// SetBudgetType sets the BudgetType field's value. +func (s *BudgetPerformanceHistory) SetBudgetType(v string) *BudgetPerformanceHistory { + s.BudgetType = &v + return s +} + +// SetBudgetedAndActualAmountsList sets the BudgetedAndActualAmountsList field's value. +func (s *BudgetPerformanceHistory) SetBudgetedAndActualAmountsList(v []*BudgetedAndActualAmounts) *BudgetPerformanceHistory { + s.BudgetedAndActualAmountsList = v + return s +} + +// SetCostFilters sets the CostFilters field's value. +func (s *BudgetPerformanceHistory) SetCostFilters(v map[string][]*string) *BudgetPerformanceHistory { + s.CostFilters = v + return s +} + +// SetCostTypes sets the CostTypes field's value. +func (s *BudgetPerformanceHistory) SetCostTypes(v *CostTypes) *BudgetPerformanceHistory { + s.CostTypes = v + return s +} + +// SetTimeUnit sets the TimeUnit field's value. +func (s *BudgetPerformanceHistory) SetTimeUnit(v string) *BudgetPerformanceHistory { + s.TimeUnit = &v + return s +} + +// The amount of cost or usage that you created the budget for, compared to +// your actual costs or usage. +type BudgetedAndActualAmounts struct { + _ struct{} `type:"structure"` + + // Your actual costs or usage for a budget period. + ActualAmount *Spend `type:"structure"` + + // The amount of cost or usage that you created the budget for. + BudgetedAmount *Spend `type:"structure"` + + // The time period covered by this budget comparison. + TimePeriod *TimePeriod `type:"structure"` +} + +// String returns the string representation +func (s BudgetedAndActualAmounts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BudgetedAndActualAmounts) GoString() string { + return s.String() +} + +// SetActualAmount sets the ActualAmount field's value. +func (s *BudgetedAndActualAmounts) SetActualAmount(v *Spend) *BudgetedAndActualAmounts { + s.ActualAmount = v + return s +} + +// SetBudgetedAmount sets the BudgetedAmount field's value. +func (s *BudgetedAndActualAmounts) SetBudgetedAmount(v *Spend) *BudgetedAndActualAmounts { + s.BudgetedAmount = v + return s +} + +// SetTimePeriod sets the TimePeriod field's value. +func (s *BudgetedAndActualAmounts) SetTimePeriod(v *TimePeriod) *BudgetedAndActualAmounts { + s.TimePeriod = v + return s +} + +// The spend objects that are associated with this budget. The actualSpend tracks +// how much you've used, cost, usage, RI units, or Savings Plans units and the +// forecastedSpend tracks how much you are predicted to spend based on your +// historical usage profile. +// +// For example, if it is the 20th of the month and you have spent 50 dollars +// on Amazon EC2, your actualSpend is 50 USD, and your forecastedSpend is 75 +// USD. +type CalculatedSpend struct { + _ struct{} `type:"structure"` + + // The amount of cost, usage, RI units, or Savings Plans units that you have + // used. + // + // ActualSpend is a required field + ActualSpend *Spend `type:"structure" required:"true"` + + // The amount of cost, usage, RI units, or Savings Plans units that you are + // forecasted to use. + ForecastedSpend *Spend `type:"structure"` +} + +// String returns the string representation +func (s CalculatedSpend) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CalculatedSpend) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CalculatedSpend) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CalculatedSpend"} + if s.ActualSpend == nil { + invalidParams.Add(request.NewErrParamRequired("ActualSpend")) + } + if s.ActualSpend != nil { + if err := s.ActualSpend.Validate(); err != nil { + invalidParams.AddNested("ActualSpend", err.(request.ErrInvalidParams)) + } + } + if s.ForecastedSpend != nil { + if err := s.ForecastedSpend.Validate(); err != nil { + invalidParams.AddNested("ForecastedSpend", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActualSpend sets the ActualSpend field's value. +func (s *CalculatedSpend) SetActualSpend(v *Spend) *CalculatedSpend { + s.ActualSpend = v + return s +} + +// SetForecastedSpend sets the ForecastedSpend field's value. +func (s *CalculatedSpend) SetForecastedSpend(v *Spend) *CalculatedSpend { + s.ForecastedSpend = v + return s +} + +// The types of cost that are included in a COST budget, such as tax and subscriptions. +// +// USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, and SAVINGS_PLANS_COVERAGE +// budgets do not have CostTypes. +type CostTypes struct { + _ struct{} `type:"structure"` + + // Specifies whether a budget includes credits. + // + // The default value is true. + IncludeCredit *bool `type:"boolean"` + + // Specifies whether a budget includes discounts. + // + // The default value is true. + IncludeDiscount *bool `type:"boolean"` + + // Specifies whether a budget includes non-RI subscription costs. + // + // The default value is true. + IncludeOtherSubscription *bool `type:"boolean"` + + // Specifies whether a budget includes recurring fees such as monthly RI fees. + // + // The default value is true. + IncludeRecurring *bool `type:"boolean"` + + // Specifies whether a budget includes refunds. + // + // The default value is true. + IncludeRefund *bool `type:"boolean"` + + // Specifies whether a budget includes subscriptions. + // + // The default value is true. + IncludeSubscription *bool `type:"boolean"` + + // Specifies whether a budget includes support subscription fees. + // + // The default value is true. + IncludeSupport *bool `type:"boolean"` + + // Specifies whether a budget includes taxes. + // + // The default value is true. + IncludeTax *bool `type:"boolean"` + + // Specifies whether a budget includes upfront RI costs. + // + // The default value is true. + IncludeUpfront *bool `type:"boolean"` + + // Specifies whether a budget uses the amortized rate. + // + // The default value is false. + UseAmortized *bool `type:"boolean"` + + // Specifies whether a budget uses a blended rate. + // + // The default value is false. + UseBlended *bool `type:"boolean"` +} + +// String returns the string representation +func (s CostTypes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CostTypes) GoString() string { + return s.String() +} + +// SetIncludeCredit sets the IncludeCredit field's value. +func (s *CostTypes) SetIncludeCredit(v bool) *CostTypes { + s.IncludeCredit = &v + return s +} + +// SetIncludeDiscount sets the IncludeDiscount field's value. +func (s *CostTypes) SetIncludeDiscount(v bool) *CostTypes { + s.IncludeDiscount = &v + return s +} + +// SetIncludeOtherSubscription sets the IncludeOtherSubscription field's value. +func (s *CostTypes) SetIncludeOtherSubscription(v bool) *CostTypes { + s.IncludeOtherSubscription = &v + return s +} + +// SetIncludeRecurring sets the IncludeRecurring field's value. +func (s *CostTypes) SetIncludeRecurring(v bool) *CostTypes { + s.IncludeRecurring = &v + return s +} + +// SetIncludeRefund sets the IncludeRefund field's value. +func (s *CostTypes) SetIncludeRefund(v bool) *CostTypes { + s.IncludeRefund = &v + return s +} + +// SetIncludeSubscription sets the IncludeSubscription field's value. +func (s *CostTypes) SetIncludeSubscription(v bool) *CostTypes { + s.IncludeSubscription = &v + return s +} + +// SetIncludeSupport sets the IncludeSupport field's value. +func (s *CostTypes) SetIncludeSupport(v bool) *CostTypes { + s.IncludeSupport = &v + return s +} + +// SetIncludeTax sets the IncludeTax field's value. +func (s *CostTypes) SetIncludeTax(v bool) *CostTypes { + s.IncludeTax = &v + return s +} + +// SetIncludeUpfront sets the IncludeUpfront field's value. +func (s *CostTypes) SetIncludeUpfront(v bool) *CostTypes { + s.IncludeUpfront = &v + return s +} + +// SetUseAmortized sets the UseAmortized field's value. +func (s *CostTypes) SetUseAmortized(v bool) *CostTypes { + s.UseAmortized = &v + return s +} + +// SetUseBlended sets the UseBlended field's value. +func (s *CostTypes) SetUseBlended(v bool) *CostTypes { + s.UseBlended = &v + return s +} + +type CreateBudgetActionInput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // The trigger threshold of the action. + // + // ActionThreshold is a required field + ActionThreshold *ActionThreshold `type:"structure" required:"true"` + + // The type of action. This defines the type of tasks that can be carried out + // by this action. This field also determines the format for definition. + // + // ActionType is a required field + ActionType *string `type:"string" required:"true" enum:"ActionType"` + + // This specifies if the action needs manual or automatic approval. + // + // ApprovalModel is a required field + ApprovalModel *string `type:"string" required:"true" enum:"ApprovalModel"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // Specifies all of the type-specific parameters. + // + // Definition is a required field + Definition *Definition `type:"structure" required:"true"` + + // The role passed for action execution and reversion. Roles and actions must + // be in the same account. + // + // ExecutionRoleArn is a required field + ExecutionRoleArn *string `min:"32" type:"string" required:"true"` + + // The type of a notification. It must be ACTUAL or FORECASTED. + // + // NotificationType is a required field + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // A list of subscribers. + // + // Subscribers is a required field + Subscribers []*Subscriber `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateBudgetActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBudgetActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBudgetActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBudgetActionInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.ActionThreshold == nil { + invalidParams.Add(request.NewErrParamRequired("ActionThreshold")) + } + if s.ActionType == nil { + invalidParams.Add(request.NewErrParamRequired("ActionType")) + } + if s.ApprovalModel == nil { + invalidParams.Add(request.NewErrParamRequired("ApprovalModel")) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) + } + if s.Definition == nil { + invalidParams.Add(request.NewErrParamRequired("Definition")) + } + if s.ExecutionRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionRoleArn")) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 32)) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + if s.Subscribers == nil { + invalidParams.Add(request.NewErrParamRequired("Subscribers")) + } + if s.Subscribers != nil && len(s.Subscribers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) + } + if s.ActionThreshold != nil { + if err := s.ActionThreshold.Validate(); err != nil { + invalidParams.AddNested("ActionThreshold", err.(request.ErrInvalidParams)) + } + } + if s.Definition != nil { + if err := s.Definition.Validate(); err != nil { + invalidParams.AddNested("Definition", err.(request.ErrInvalidParams)) + } + } + if s.Subscribers != nil { + for i, v := range s.Subscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateBudgetActionInput) SetAccountId(v string) *CreateBudgetActionInput { + s.AccountId = &v + return s +} + +// SetActionThreshold sets the ActionThreshold field's value. +func (s *CreateBudgetActionInput) SetActionThreshold(v *ActionThreshold) *CreateBudgetActionInput { + s.ActionThreshold = v + return s +} + +// SetActionType sets the ActionType field's value. +func (s *CreateBudgetActionInput) SetActionType(v string) *CreateBudgetActionInput { + s.ActionType = &v + return s +} + +// SetApprovalModel sets the ApprovalModel field's value. +func (s *CreateBudgetActionInput) SetApprovalModel(v string) *CreateBudgetActionInput { + s.ApprovalModel = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *CreateBudgetActionInput) SetBudgetName(v string) *CreateBudgetActionInput { + s.BudgetName = &v + return s +} + +// SetDefinition sets the Definition field's value. +func (s *CreateBudgetActionInput) SetDefinition(v *Definition) *CreateBudgetActionInput { + s.Definition = v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *CreateBudgetActionInput) SetExecutionRoleArn(v string) *CreateBudgetActionInput { + s.ExecutionRoleArn = &v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *CreateBudgetActionInput) SetNotificationType(v string) *CreateBudgetActionInput { + s.NotificationType = &v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *CreateBudgetActionInput) SetSubscribers(v []*Subscriber) *CreateBudgetActionInput { + s.Subscribers = v + return s +} + +type CreateBudgetActionOutput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBudgetActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBudgetActionOutput) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateBudgetActionOutput) SetAccountId(v string) *CreateBudgetActionOutput { + s.AccountId = &v + return s +} + +// SetActionId sets the ActionId field's value. +func (s *CreateBudgetActionOutput) SetActionId(v string) *CreateBudgetActionOutput { + s.ActionId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *CreateBudgetActionOutput) SetBudgetName(v string) *CreateBudgetActionOutput { + s.BudgetName = &v + return s +} + +// Request of CreateBudget +type CreateBudgetInput struct { + _ struct{} `type:"structure"` + + // The accountId that is associated with the budget. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // The budget object that you want to create. + // + // Budget is a required field + Budget *Budget `type:"structure" required:"true"` + + // A notification that you want to associate with a budget. A budget can have + // up to five notifications, and each notification can have one SNS subscriber + // and up to 10 email subscribers. If you include notifications and subscribers + // in your CreateBudget call, AWS creates the notifications and subscribers + // for you. + NotificationsWithSubscribers []*NotificationWithSubscribers `type:"list"` +} + +// String returns the string representation +func (s CreateBudgetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBudgetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBudgetInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.Budget == nil { + invalidParams.Add(request.NewErrParamRequired("Budget")) + } + if s.Budget != nil { + if err := s.Budget.Validate(); err != nil { + invalidParams.AddNested("Budget", err.(request.ErrInvalidParams)) + } + } + if s.NotificationsWithSubscribers != nil { + for i, v := range s.NotificationsWithSubscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationsWithSubscribers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateBudgetInput) SetAccountId(v string) *CreateBudgetInput { + s.AccountId = &v + return s +} + +// SetBudget sets the Budget field's value. +func (s *CreateBudgetInput) SetBudget(v *Budget) *CreateBudgetInput { + s.Budget = v + return s +} + +// SetNotificationsWithSubscribers sets the NotificationsWithSubscribers field's value. +func (s *CreateBudgetInput) SetNotificationsWithSubscribers(v []*NotificationWithSubscribers) *CreateBudgetInput { + s.NotificationsWithSubscribers = v + return s +} + +// Response of CreateBudget +type CreateBudgetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateBudgetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBudgetOutput) GoString() string { + return s.String() +} + +// Request of CreateNotification +type CreateNotificationInput struct { + _ struct{} `type:"structure"` + + // The accountId that is associated with the budget that you want to create + // a notification for. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` - // The error message the exception carries. - Message_ *string `locationName:"Message" type:"string"` + // The name of the budget that you want AWS to notify you about. Budget names + // must be unique within an account. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // The notification that you want to create. + // + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` + + // A list of subscribers that you want to associate with the notification. Each + // notification can have one SNS subscriber and up to 10 email subscribers. + // + // Subscribers is a required field + Subscribers []*Subscriber `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s AccessDeniedException) String() string { +func (s CreateNotificationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccessDeniedException) GoString() string { +func (s CreateNotificationInput) GoString() string { return s.String() } -func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { - return &AccessDeniedException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNotificationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Subscribers == nil { + invalidParams.Add(request.NewErrParamRequired("Subscribers")) + } + if s.Subscribers != nil && len(s.Subscribers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + if s.Subscribers != nil { + for i, v := range s.Subscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) + } + } } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Code returns the exception type name. -func (s AccessDeniedException) Code() string { - return "AccessDeniedException" +// SetAccountId sets the AccountId field's value. +func (s *CreateNotificationInput) SetAccountId(v string) *CreateNotificationInput { + s.AccountId = &v + return s } -// Message returns the exception's message. -func (s AccessDeniedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetBudgetName sets the BudgetName field's value. +func (s *CreateNotificationInput) SetBudgetName(v string) *CreateNotificationInput { + s.BudgetName = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { - return nil +// SetNotification sets the Notification field's value. +func (s *CreateNotificationInput) SetNotification(v *Notification) *CreateNotificationInput { + s.Notification = v + return s } -func (s AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetSubscribers sets the Subscribers field's value. +func (s *CreateNotificationInput) SetSubscribers(v []*Subscriber) *CreateNotificationInput { + s.Subscribers = v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +// Response of CreateNotification +type CreateNotificationOutput struct { + _ struct{} `type:"structure"` } -// RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +// String returns the string representation +func (s CreateNotificationOutput) String() string { + return awsutil.Prettify(s) } -// Represents the output of the CreateBudget operation. The content consists -// of the detailed metadata and data file information, and the current status -// of the budget object. -// -// This is the ARN pattern for a budget: -// -// arn:aws:budgetservice::AccountId:budget/budgetName -type Budget struct { +// GoString returns the string representation +func (s CreateNotificationOutput) GoString() string { + return s.String() +} + +// Request of CreateSubscriber +type CreateSubscriberInput struct { _ struct{} `type:"structure"` - // The total amount of cost, usage, RI utilization, RI coverage, Savings Plans - // utilization, or Savings Plans coverage that you want to track with your budget. + // The accountId that is associated with the budget that you want to create + // a subscriber for. // - // BudgetLimit is required for cost or usage budgets, but optional for RI or - // Savings Plans utilization or coverage budgets. RI and Savings Plans utilization - // or coverage budgets default to 100, which is the only valid value for RI - // or Savings Plans utilization or coverage budgets. You can't use BudgetLimit - // with PlannedBudgetLimits for CreateBudget and UpdateBudget actions. - BudgetLimit *Spend `type:"structure"` + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` - // The name of a budget. The name must be unique within an account. The : and - // \ characters aren't allowed in BudgetName. + // The name of the budget that you want to subscribe to. Budget names must be + // unique within an account. // // BudgetName is a required field BudgetName *string `min:"1" type:"string" required:"true"` - // Whether this budget tracks costs, usage, RI utilization, RI coverage, Savings - // Plans utilization, or Savings Plans coverage. - // - // BudgetType is a required field - BudgetType *string `type:"string" required:"true" enum:"BudgetType"` - - // The actual and forecasted cost or usage that the budget tracks. - CalculatedSpend *CalculatedSpend `type:"structure"` - - // The cost filters, such as service or tag, that are applied to a budget. - // - // AWS Budgets supports the following services as a filter for RI budgets: - // - // * Amazon Elastic Compute Cloud - Compute - // - // * Amazon Redshift - // - // * Amazon Relational Database Service - // - // * Amazon ElastiCache - // - // * Amazon Elasticsearch Service - CostFilters map[string][]*string `type:"map"` - - // The types of costs that are included in this COST budget. - // - // USAGE, RI_UTILIZATION, RI_COVERAGE, Savings_Plans_Utilization, and Savings_Plans_Coverage - // budgets do not have CostTypes. - CostTypes *CostTypes `type:"structure"` - - // The last time that you updated this budget. - LastUpdatedTime *time.Time `type:"timestamp"` - - // A map containing multiple BudgetLimit, including current or future limits. - // - // PlannedBudgetLimits is available for cost or usage budget and supports monthly - // and quarterly TimeUnit. - // - // For monthly budgets, provide 12 months of PlannedBudgetLimits values. This - // must start from the current month and include the next 11 months. The key - // is the start of the month, UTC in epoch seconds. - // - // For quarterly budgets, provide 4 quarters of PlannedBudgetLimits value entries - // in standard calendar quarter increments. This must start from the current - // quarter and include the next 3 quarters. The key is the start of the quarter, - // UTC in epoch seconds. - // - // If the planned budget expires before 12 months for monthly or 4 quarters - // for quarterly, provide the PlannedBudgetLimits values only for the remaining - // periods. - // - // If the budget begins at a date in the future, provide PlannedBudgetLimits - // values from the start date of the budget. - // - // After all of the BudgetLimit values in PlannedBudgetLimits are used, the - // budget continues to use the last limit as the BudgetLimit. At that point, - // the planned budget provides the same experience as a fixed budget. - // - // DescribeBudget and DescribeBudgets response along with PlannedBudgetLimits - // will also contain BudgetLimit representing the current month or quarter limit - // present in PlannedBudgetLimits. This only applies to budgets created with - // PlannedBudgetLimits. Budgets created without PlannedBudgetLimits will only - // contain BudgetLimit, and no PlannedBudgetLimits. - PlannedBudgetLimits map[string]*Spend `type:"map"` - - // The period of time that is covered by a budget. The period has a start date - // and an end date. The start date must come before the end date. The end date - // must come before 06/15/87 00:00 UTC. - // - // If you create your budget and don't specify a start date, AWS defaults to - // the start of your chosen time period (DAILY, MONTHLY, QUARTERLY, or ANNUALLY). - // For example, if you created your budget on January 24, 2018, chose DAILY, - // and didn't set a start date, AWS set your start date to 01/24/18 00:00 UTC. - // If you chose MONTHLY, AWS set your start date to 01/01/18 00:00 UTC. If you - // didn't specify an end date, AWS set your end date to 06/15/87 00:00 UTC. - // The defaults are the same for the AWS Billing and Cost Management console - // and the API. - // - // You can change either date with the UpdateBudget operation. + // The notification that you want to create a subscriber for. // - // After the end date, AWS deletes the budget and all associated notifications - // and subscribers. - TimePeriod *TimePeriod `type:"structure"` + // Notification is a required field + Notification *Notification `type:"structure" required:"true"` - // The length of time until a budget resets the actual and forecasted spend. - // DAILY is available only for RI_UTILIZATION, RI_COVERAGE, Savings_Plans_Utilization, - // and Savings_Plans_Coverage budgets. + // The subscriber that you want to associate with a budget notification. // - // TimeUnit is a required field - TimeUnit *string `type:"string" required:"true" enum:"TimeUnit"` + // Subscriber is a required field + Subscriber *Subscriber `type:"structure" required:"true"` } // String returns the string representation -func (s Budget) String() string { +func (s CreateSubscriberInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Budget) GoString() string { +func (s CreateSubscriberInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Budget) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Budget"} +func (s *CreateSubscriberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSubscriberInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } if s.BudgetName == nil { invalidParams.Add(request.NewErrParamRequired("BudgetName")) } if s.BudgetName != nil && len(*s.BudgetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) } - if s.BudgetType == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetType")) - } - if s.TimeUnit == nil { - invalidParams.Add(request.NewErrParamRequired("TimeUnit")) - } - if s.BudgetLimit != nil { - if err := s.BudgetLimit.Validate(); err != nil { - invalidParams.AddNested("BudgetLimit", err.(request.ErrInvalidParams)) - } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) } - if s.CalculatedSpend != nil { - if err := s.CalculatedSpend.Validate(); err != nil { - invalidParams.AddNested("CalculatedSpend", err.(request.ErrInvalidParams)) + if s.Subscriber == nil { + invalidParams.Add(request.NewErrParamRequired("Subscriber")) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) } } - if s.PlannedBudgetLimits != nil { - for i, v := range s.PlannedBudgetLimits { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlannedBudgetLimits", i), err.(request.ErrInvalidParams)) - } + if s.Subscriber != nil { + if err := s.Subscriber.Validate(); err != nil { + invalidParams.AddNested("Subscriber", err.(request.ErrInvalidParams)) } } @@ -1543,443 +3858,332 @@ func (s *Budget) Validate() error { return nil } -// SetBudgetLimit sets the BudgetLimit field's value. -func (s *Budget) SetBudgetLimit(v *Spend) *Budget { - s.BudgetLimit = v +// SetAccountId sets the AccountId field's value. +func (s *CreateSubscriberInput) SetAccountId(v string) *CreateSubscriberInput { + s.AccountId = &v return s } // SetBudgetName sets the BudgetName field's value. -func (s *Budget) SetBudgetName(v string) *Budget { +func (s *CreateSubscriberInput) SetBudgetName(v string) *CreateSubscriberInput { s.BudgetName = &v return s } -// SetBudgetType sets the BudgetType field's value. -func (s *Budget) SetBudgetType(v string) *Budget { - s.BudgetType = &v - return s -} - -// SetCalculatedSpend sets the CalculatedSpend field's value. -func (s *Budget) SetCalculatedSpend(v *CalculatedSpend) *Budget { - s.CalculatedSpend = v - return s -} - -// SetCostFilters sets the CostFilters field's value. -func (s *Budget) SetCostFilters(v map[string][]*string) *Budget { - s.CostFilters = v - return s -} - -// SetCostTypes sets the CostTypes field's value. -func (s *Budget) SetCostTypes(v *CostTypes) *Budget { - s.CostTypes = v - return s -} - -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *Budget) SetLastUpdatedTime(v time.Time) *Budget { - s.LastUpdatedTime = &v - return s -} - -// SetPlannedBudgetLimits sets the PlannedBudgetLimits field's value. -func (s *Budget) SetPlannedBudgetLimits(v map[string]*Spend) *Budget { - s.PlannedBudgetLimits = v - return s -} - -// SetTimePeriod sets the TimePeriod field's value. -func (s *Budget) SetTimePeriod(v *TimePeriod) *Budget { - s.TimePeriod = v +// SetNotification sets the Notification field's value. +func (s *CreateSubscriberInput) SetNotification(v *Notification) *CreateSubscriberInput { + s.Notification = v return s } -// SetTimeUnit sets the TimeUnit field's value. -func (s *Budget) SetTimeUnit(v string) *Budget { - s.TimeUnit = &v +// SetSubscriber sets the Subscriber field's value. +func (s *CreateSubscriberInput) SetSubscriber(v *Subscriber) *CreateSubscriberInput { + s.Subscriber = v return s } -// A history of the state of a budget at the end of the budget's specified time -// period. -type BudgetPerformanceHistory struct { +// Response of CreateSubscriber +type CreateSubscriberOutput struct { _ struct{} `type:"structure"` - - // A string that represents the budget name. The ":" and "\" characters aren't - // allowed. - BudgetName *string `min:"1" type:"string"` - - // The type of a budget. It must be one of the following types: - // - // COST, USAGE, RI_UTILIZATION, or RI_COVERAGE. - BudgetType *string `type:"string" enum:"BudgetType"` - - // A list of amounts of cost or usage that you created budgets for, compared - // to your actual costs or usage. - BudgetedAndActualAmountsList []*BudgetedAndActualAmounts `type:"list"` - - // The history of the cost filters for a budget during the specified time period. - CostFilters map[string][]*string `type:"map"` - - // The history of the cost types for a budget during the specified time period. - CostTypes *CostTypes `type:"structure"` - - // The time unit of the budget, such as MONTHLY or QUARTERLY. - TimeUnit *string `type:"string" enum:"TimeUnit"` } // String returns the string representation -func (s BudgetPerformanceHistory) String() string { +func (s CreateSubscriberOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BudgetPerformanceHistory) GoString() string { +func (s CreateSubscriberOutput) GoString() string { return s.String() } -// SetBudgetName sets the BudgetName field's value. -func (s *BudgetPerformanceHistory) SetBudgetName(v string) *BudgetPerformanceHistory { - s.BudgetName = &v - return s -} - -// SetBudgetType sets the BudgetType field's value. -func (s *BudgetPerformanceHistory) SetBudgetType(v string) *BudgetPerformanceHistory { - s.BudgetType = &v - return s -} +// You've exceeded the notification or subscriber limit. +type CreationLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` -// SetBudgetedAndActualAmountsList sets the BudgetedAndActualAmountsList field's value. -func (s *BudgetPerformanceHistory) SetBudgetedAndActualAmountsList(v []*BudgetedAndActualAmounts) *BudgetPerformanceHistory { - s.BudgetedAndActualAmountsList = v - return s + // The error message the exception carries. + Message_ *string `locationName:"Message" type:"string"` } -// SetCostFilters sets the CostFilters field's value. -func (s *BudgetPerformanceHistory) SetCostFilters(v map[string][]*string) *BudgetPerformanceHistory { - s.CostFilters = v - return s +// String returns the string representation +func (s CreationLimitExceededException) String() string { + return awsutil.Prettify(s) } -// SetCostTypes sets the CostTypes field's value. -func (s *BudgetPerformanceHistory) SetCostTypes(v *CostTypes) *BudgetPerformanceHistory { - s.CostTypes = v - return s +// GoString returns the string representation +func (s CreationLimitExceededException) GoString() string { + return s.String() } -// SetTimeUnit sets the TimeUnit field's value. -func (s *BudgetPerformanceHistory) SetTimeUnit(v string) *BudgetPerformanceHistory { - s.TimeUnit = &v - return s +func newErrorCreationLimitExceededException(v protocol.ResponseMetadata) error { + return &CreationLimitExceededException{ + RespMetadata: v, + } } -// The amount of cost or usage that you created the budget for, compared to -// your actual costs or usage. -type BudgetedAndActualAmounts struct { - _ struct{} `type:"structure"` - - // Your actual costs or usage for a budget period. - ActualAmount *Spend `type:"structure"` - - // The amount of cost or usage that you created the budget for. - BudgetedAmount *Spend `type:"structure"` - - // The time period covered by this budget comparison. - TimePeriod *TimePeriod `type:"structure"` +// Code returns the exception type name. +func (s *CreationLimitExceededException) Code() string { + return "CreationLimitExceededException" } -// String returns the string representation -func (s BudgetedAndActualAmounts) String() string { - return awsutil.Prettify(s) +// Message returns the exception's message. +func (s *CreationLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// GoString returns the string representation -func (s BudgetedAndActualAmounts) GoString() string { - return s.String() +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *CreationLimitExceededException) OrigErr() error { + return nil } -// SetActualAmount sets the ActualAmount field's value. -func (s *BudgetedAndActualAmounts) SetActualAmount(v *Spend) *BudgetedAndActualAmounts { - s.ActualAmount = v - return s +func (s *CreationLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetBudgetedAmount sets the BudgetedAmount field's value. -func (s *BudgetedAndActualAmounts) SetBudgetedAmount(v *Spend) *BudgetedAndActualAmounts { - s.BudgetedAmount = v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *CreationLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetTimePeriod sets the TimePeriod field's value. -func (s *BudgetedAndActualAmounts) SetTimePeriod(v *TimePeriod) *BudgetedAndActualAmounts { - s.TimePeriod = v - return s +// RequestID returns the service's response RequestID for request. +func (s *CreationLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } -// The spend objects that are associated with this budget. The actualSpend tracks -// how much you've used, cost, usage, or RI units, and the forecastedSpend tracks -// how much you are predicted to spend if your current usage remains steady. -// -// For example, if it is the 20th of the month and you have spent 50 dollars -// on Amazon EC2, your actualSpend is 50 USD, and your forecastedSpend is 75 -// USD. -type CalculatedSpend struct { +// Specifies all of the type-specific parameters. +type Definition struct { _ struct{} `type:"structure"` - // The amount of cost, usage, or RI units that you have used. - // - // ActualSpend is a required field - ActualSpend *Spend `type:"structure" required:"true"` + // The AWS Identity and Access Management (IAM) action definition details. + IamActionDefinition *IamActionDefinition `type:"structure"` - // The amount of cost, usage, or RI units that you are forecasted to use. - ForecastedSpend *Spend `type:"structure"` + // The service control policies (SCPs) action definition details. + ScpActionDefinition *ScpActionDefinition `type:"structure"` + + // The AWS Systems Manager (SSM) action definition details. + SsmActionDefinition *SsmActionDefinition `type:"structure"` } // String returns the string representation -func (s CalculatedSpend) String() string { +func (s Definition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CalculatedSpend) GoString() string { +func (s Definition) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CalculatedSpend) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CalculatedSpend"} - if s.ActualSpend == nil { - invalidParams.Add(request.NewErrParamRequired("ActualSpend")) - } - if s.ActualSpend != nil { - if err := s.ActualSpend.Validate(); err != nil { - invalidParams.AddNested("ActualSpend", err.(request.ErrInvalidParams)) +func (s *Definition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Definition"} + if s.IamActionDefinition != nil { + if err := s.IamActionDefinition.Validate(); err != nil { + invalidParams.AddNested("IamActionDefinition", err.(request.ErrInvalidParams)) } } - if s.ForecastedSpend != nil { - if err := s.ForecastedSpend.Validate(); err != nil { - invalidParams.AddNested("ForecastedSpend", err.(request.ErrInvalidParams)) + if s.ScpActionDefinition != nil { + if err := s.ScpActionDefinition.Validate(); err != nil { + invalidParams.AddNested("ScpActionDefinition", err.(request.ErrInvalidParams)) } } - - if invalidParams.Len() > 0 { - return invalidParams + if s.SsmActionDefinition != nil { + if err := s.SsmActionDefinition.Validate(); err != nil { + invalidParams.AddNested("SsmActionDefinition", err.(request.ErrInvalidParams)) + } } - return nil -} - -// SetActualSpend sets the ActualSpend field's value. -func (s *CalculatedSpend) SetActualSpend(v *Spend) *CalculatedSpend { - s.ActualSpend = v - return s -} - -// SetForecastedSpend sets the ForecastedSpend field's value. -func (s *CalculatedSpend) SetForecastedSpend(v *Spend) *CalculatedSpend { - s.ForecastedSpend = v - return s -} - -// The types of cost that are included in a COST budget, such as tax and subscriptions. -// -// USAGE, RI_UTILIZATION, and RI_COVERAGE budgets do not have CostTypes. -type CostTypes struct { - _ struct{} `type:"structure"` - - // Specifies whether a budget includes credits. - // - // The default value is true. - IncludeCredit *bool `type:"boolean"` - - // Specifies whether a budget includes discounts. - // - // The default value is true. - IncludeDiscount *bool `type:"boolean"` - - // Specifies whether a budget includes non-RI subscription costs. - // - // The default value is true. - IncludeOtherSubscription *bool `type:"boolean"` - // Specifies whether a budget includes recurring fees such as monthly RI fees. - // - // The default value is true. - IncludeRecurring *bool `type:"boolean"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Specifies whether a budget includes refunds. - // - // The default value is true. - IncludeRefund *bool `type:"boolean"` +// SetIamActionDefinition sets the IamActionDefinition field's value. +func (s *Definition) SetIamActionDefinition(v *IamActionDefinition) *Definition { + s.IamActionDefinition = v + return s +} - // Specifies whether a budget includes subscriptions. - // - // The default value is true. - IncludeSubscription *bool `type:"boolean"` +// SetScpActionDefinition sets the ScpActionDefinition field's value. +func (s *Definition) SetScpActionDefinition(v *ScpActionDefinition) *Definition { + s.ScpActionDefinition = v + return s +} - // Specifies whether a budget includes support subscription fees. - // - // The default value is true. - IncludeSupport *bool `type:"boolean"` +// SetSsmActionDefinition sets the SsmActionDefinition field's value. +func (s *Definition) SetSsmActionDefinition(v *SsmActionDefinition) *Definition { + s.SsmActionDefinition = v + return s +} - // Specifies whether a budget includes taxes. - // - // The default value is true. - IncludeTax *bool `type:"boolean"` +type DeleteBudgetActionInput struct { + _ struct{} `type:"structure"` - // Specifies whether a budget includes upfront RI costs. + // The account ID of the user. It should be a 12-digit number. // - // The default value is true. - IncludeUpfront *bool `type:"boolean"` + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` - // Specifies whether a budget uses the amortized rate. + // A system-generated universally unique identifier (UUID) for the action. // - // The default value is false. - UseAmortized *bool `type:"boolean"` + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` - // Specifies whether a budget uses a blended rate. + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. // - // The default value is false. - UseBlended *bool `type:"boolean"` + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CostTypes) String() string { +func (s DeleteBudgetActionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CostTypes) GoString() string { +func (s DeleteBudgetActionInput) GoString() string { return s.String() } -// SetIncludeCredit sets the IncludeCredit field's value. -func (s *CostTypes) SetIncludeCredit(v bool) *CostTypes { - s.IncludeCredit = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBudgetActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBudgetActionInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.ActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionId")) + } + if s.ActionId != nil && len(*s.ActionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ActionId", 36)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) + } -// SetIncludeDiscount sets the IncludeDiscount field's value. -func (s *CostTypes) SetIncludeDiscount(v bool) *CostTypes { - s.IncludeDiscount = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetIncludeOtherSubscription sets the IncludeOtherSubscription field's value. -func (s *CostTypes) SetIncludeOtherSubscription(v bool) *CostTypes { - s.IncludeOtherSubscription = &v +// SetAccountId sets the AccountId field's value. +func (s *DeleteBudgetActionInput) SetAccountId(v string) *DeleteBudgetActionInput { + s.AccountId = &v return s } -// SetIncludeRecurring sets the IncludeRecurring field's value. -func (s *CostTypes) SetIncludeRecurring(v bool) *CostTypes { - s.IncludeRecurring = &v +// SetActionId sets the ActionId field's value. +func (s *DeleteBudgetActionInput) SetActionId(v string) *DeleteBudgetActionInput { + s.ActionId = &v return s } -// SetIncludeRefund sets the IncludeRefund field's value. -func (s *CostTypes) SetIncludeRefund(v bool) *CostTypes { - s.IncludeRefund = &v +// SetBudgetName sets the BudgetName field's value. +func (s *DeleteBudgetActionInput) SetBudgetName(v string) *DeleteBudgetActionInput { + s.BudgetName = &v return s } -// SetIncludeSubscription sets the IncludeSubscription field's value. -func (s *CostTypes) SetIncludeSubscription(v bool) *CostTypes { - s.IncludeSubscription = &v - return s +type DeleteBudgetActionOutput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A budget action resource. + // + // Action is a required field + Action *Action `type:"structure" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` } -// SetIncludeSupport sets the IncludeSupport field's value. -func (s *CostTypes) SetIncludeSupport(v bool) *CostTypes { - s.IncludeSupport = &v - return s +// String returns the string representation +func (s DeleteBudgetActionOutput) String() string { + return awsutil.Prettify(s) } -// SetIncludeTax sets the IncludeTax field's value. -func (s *CostTypes) SetIncludeTax(v bool) *CostTypes { - s.IncludeTax = &v - return s +// GoString returns the string representation +func (s DeleteBudgetActionOutput) GoString() string { + return s.String() } -// SetIncludeUpfront sets the IncludeUpfront field's value. -func (s *CostTypes) SetIncludeUpfront(v bool) *CostTypes { - s.IncludeUpfront = &v +// SetAccountId sets the AccountId field's value. +func (s *DeleteBudgetActionOutput) SetAccountId(v string) *DeleteBudgetActionOutput { + s.AccountId = &v return s } -// SetUseAmortized sets the UseAmortized field's value. -func (s *CostTypes) SetUseAmortized(v bool) *CostTypes { - s.UseAmortized = &v +// SetAction sets the Action field's value. +func (s *DeleteBudgetActionOutput) SetAction(v *Action) *DeleteBudgetActionOutput { + s.Action = v return s } -// SetUseBlended sets the UseBlended field's value. -func (s *CostTypes) SetUseBlended(v bool) *CostTypes { - s.UseBlended = &v +// SetBudgetName sets the BudgetName field's value. +func (s *DeleteBudgetActionOutput) SetBudgetName(v string) *DeleteBudgetActionOutput { + s.BudgetName = &v return s } -// Request of CreateBudget -type CreateBudgetInput struct { +// Request of DeleteBudget +type DeleteBudgetInput struct { _ struct{} `type:"structure"` - // The accountId that is associated with the budget. + // The accountId that is associated with the budget that you want to delete. // // AccountId is a required field AccountId *string `min:"12" type:"string" required:"true"` - // The budget object that you want to create. + // The name of the budget that you want to delete. // - // Budget is a required field - Budget *Budget `type:"structure" required:"true"` - - // A notification that you want to associate with a budget. A budget can have - // up to five notifications, and each notification can have one SNS subscriber - // and up to 10 email subscribers. If you include notifications and subscribers - // in your CreateBudget call, AWS creates the notifications and subscribers - // for you. - NotificationsWithSubscribers []*NotificationWithSubscribers `type:"list"` + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateBudgetInput) String() string { +func (s DeleteBudgetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBudgetInput) GoString() string { +func (s DeleteBudgetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateBudgetInput"} +func (s *DeleteBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBudgetInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) } - if s.Budget == nil { - invalidParams.Add(request.NewErrParamRequired("Budget")) - } - if s.Budget != nil { - if err := s.Budget.Validate(); err != nil { - invalidParams.AddNested("Budget", err.(request.ErrInvalidParams)) - } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) } - if s.NotificationsWithSubscribers != nil { - for i, v := range s.NotificationsWithSubscribers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NotificationsWithSubscribers", i), err.(request.ErrInvalidParams)) - } - } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) } if invalidParams.Len() > 0 { @@ -1989,79 +4193,66 @@ func (s *CreateBudgetInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *CreateBudgetInput) SetAccountId(v string) *CreateBudgetInput { +func (s *DeleteBudgetInput) SetAccountId(v string) *DeleteBudgetInput { s.AccountId = &v return s } -// SetBudget sets the Budget field's value. -func (s *CreateBudgetInput) SetBudget(v *Budget) *CreateBudgetInput { - s.Budget = v - return s -} - -// SetNotificationsWithSubscribers sets the NotificationsWithSubscribers field's value. -func (s *CreateBudgetInput) SetNotificationsWithSubscribers(v []*NotificationWithSubscribers) *CreateBudgetInput { - s.NotificationsWithSubscribers = v +// SetBudgetName sets the BudgetName field's value. +func (s *DeleteBudgetInput) SetBudgetName(v string) *DeleteBudgetInput { + s.BudgetName = &v return s } -// Response of CreateBudget -type CreateBudgetOutput struct { +// Response of DeleteBudget +type DeleteBudgetOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateBudgetOutput) String() string { +func (s DeleteBudgetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateBudgetOutput) GoString() string { +func (s DeleteBudgetOutput) GoString() string { return s.String() } -// Request of CreateNotification -type CreateNotificationInput struct { +// Request of DeleteNotification +type DeleteNotificationInput struct { _ struct{} `type:"structure"` - // The accountId that is associated with the budget that you want to create - // a notification for. + // The accountId that is associated with the budget whose notification you want + // to delete. // // AccountId is a required field AccountId *string `min:"12" type:"string" required:"true"` - // The name of the budget that you want AWS to notify you about. Budget names - // must be unique within an account. + // The name of the budget whose notification you want to delete. // // BudgetName is a required field BudgetName *string `min:"1" type:"string" required:"true"` - // The notification that you want to create. + // The notification that you want to delete. // // Notification is a required field Notification *Notification `type:"structure" required:"true"` - - // A list of subscribers that you want to associate with the notification. Each - // notification can have one SNS subscriber and up to 10 email subscribers. - // - // Subscribers is a required field - Subscribers []*Subscriber `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s CreateNotificationInput) String() string { +func (s DeleteNotificationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateNotificationInput) GoString() string { +func (s DeleteNotificationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateNotificationInput"} +func (s *DeleteNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNotificationInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } @@ -2077,27 +4268,11 @@ func (s *CreateNotificationInput) Validate() error { if s.Notification == nil { invalidParams.Add(request.NewErrParamRequired("Notification")) } - if s.Subscribers == nil { - invalidParams.Add(request.NewErrParamRequired("Subscribers")) - } - if s.Subscribers != nil && len(s.Subscribers) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) - } if s.Notification != nil { if err := s.Notification.Validate(); err != nil { invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) } } - if s.Subscribers != nil { - for i, v := range s.Subscribers { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) - } - } - } if invalidParams.Len() > 0 { return invalidParams @@ -2106,84 +4281,77 @@ func (s *CreateNotificationInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *CreateNotificationInput) SetAccountId(v string) *CreateNotificationInput { +func (s *DeleteNotificationInput) SetAccountId(v string) *DeleteNotificationInput { s.AccountId = &v return s } // SetBudgetName sets the BudgetName field's value. -func (s *CreateNotificationInput) SetBudgetName(v string) *CreateNotificationInput { +func (s *DeleteNotificationInput) SetBudgetName(v string) *DeleteNotificationInput { s.BudgetName = &v return s } - -// SetNotification sets the Notification field's value. -func (s *CreateNotificationInput) SetNotification(v *Notification) *CreateNotificationInput { - s.Notification = v - return s -} - -// SetSubscribers sets the Subscribers field's value. -func (s *CreateNotificationInput) SetSubscribers(v []*Subscriber) *CreateNotificationInput { - s.Subscribers = v + +// SetNotification sets the Notification field's value. +func (s *DeleteNotificationInput) SetNotification(v *Notification) *DeleteNotificationInput { + s.Notification = v return s } -// Response of CreateNotification -type CreateNotificationOutput struct { +// Response of DeleteNotification +type DeleteNotificationOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateNotificationOutput) String() string { +func (s DeleteNotificationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateNotificationOutput) GoString() string { +func (s DeleteNotificationOutput) GoString() string { return s.String() } -// Request of CreateSubscriber -type CreateSubscriberInput struct { +// Request of DeleteSubscriber +type DeleteSubscriberInput struct { _ struct{} `type:"structure"` - // The accountId that is associated with the budget that you want to create - // a subscriber for. + // The accountId that is associated with the budget whose subscriber you want + // to delete. // // AccountId is a required field AccountId *string `min:"12" type:"string" required:"true"` - // The name of the budget that you want to subscribe to. Budget names must be - // unique within an account. + // The name of the budget whose subscriber you want to delete. // // BudgetName is a required field BudgetName *string `min:"1" type:"string" required:"true"` - // The notification that you want to create a subscriber for. + // The notification whose subscriber you want to delete. // // Notification is a required field Notification *Notification `type:"structure" required:"true"` - // The subscriber that you want to associate with a budget notification. + // The subscriber that you want to delete. // // Subscriber is a required field Subscriber *Subscriber `type:"structure" required:"true"` } // String returns the string representation -func (s CreateSubscriberInput) String() string { +func (s DeleteSubscriberInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSubscriberInput) GoString() string { +func (s DeleteSubscriberInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSubscriberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSubscriberInput"} +func (s *DeleteSubscriberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriberInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } @@ -2220,135 +4388,232 @@ func (s *CreateSubscriberInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *CreateSubscriberInput) SetAccountId(v string) *CreateSubscriberInput { +func (s *DeleteSubscriberInput) SetAccountId(v string) *DeleteSubscriberInput { s.AccountId = &v return s } // SetBudgetName sets the BudgetName field's value. -func (s *CreateSubscriberInput) SetBudgetName(v string) *CreateSubscriberInput { +func (s *DeleteSubscriberInput) SetBudgetName(v string) *DeleteSubscriberInput { s.BudgetName = &v return s } // SetNotification sets the Notification field's value. -func (s *CreateSubscriberInput) SetNotification(v *Notification) *CreateSubscriberInput { +func (s *DeleteSubscriberInput) SetNotification(v *Notification) *DeleteSubscriberInput { s.Notification = v return s } // SetSubscriber sets the Subscriber field's value. -func (s *CreateSubscriberInput) SetSubscriber(v *Subscriber) *CreateSubscriberInput { +func (s *DeleteSubscriberInput) SetSubscriber(v *Subscriber) *DeleteSubscriberInput { s.Subscriber = v return s } -// Response of CreateSubscriber -type CreateSubscriberOutput struct { +// Response of DeleteSubscriber +type DeleteSubscriberOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateSubscriberOutput) String() string { +func (s DeleteSubscriberOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateSubscriberOutput) GoString() string { +func (s DeleteSubscriberOutput) GoString() string { return s.String() } -// You've exceeded the notification or subscriber limit. -type CreationLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type DescribeBudgetActionHistoriesInput struct { + _ struct{} `type:"structure"` - // The error message the exception carries. - Message_ *string `locationName:"Message" type:"string"` + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // An integer that represents how many entries a paginated response contains. + // The maximum is 100. + MaxResults *int64 `min:"1" type:"integer"` + + // A generic string. + NextToken *string `type:"string"` + + // The period of time that is covered by a budget. The period has a start date + // and an end date. The start date must come before the end date. There are + // no restrictions on the end date. + TimePeriod *TimePeriod `type:"structure"` } // String returns the string representation -func (s CreationLimitExceededException) String() string { +func (s DescribeBudgetActionHistoriesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreationLimitExceededException) GoString() string { +func (s DescribeBudgetActionHistoriesInput) GoString() string { return s.String() } -func newErrorCreationLimitExceededException(v protocol.ResponseMetadata) error { - return &CreationLimitExceededException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBudgetActionHistoriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetActionHistoriesInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.ActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionId")) + } + if s.ActionId != nil && len(*s.ActionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ActionId", 36)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Code returns the exception type name. -func (s CreationLimitExceededException) Code() string { - return "CreationLimitExceededException" +// SetAccountId sets the AccountId field's value. +func (s *DescribeBudgetActionHistoriesInput) SetAccountId(v string) *DescribeBudgetActionHistoriesInput { + s.AccountId = &v + return s } -// Message returns the exception's message. -func (s CreationLimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetActionId sets the ActionId field's value. +func (s *DescribeBudgetActionHistoriesInput) SetActionId(v string) *DescribeBudgetActionHistoriesInput { + s.ActionId = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s CreationLimitExceededException) OrigErr() error { - return nil +// SetBudgetName sets the BudgetName field's value. +func (s *DescribeBudgetActionHistoriesInput) SetBudgetName(v string) *DescribeBudgetActionHistoriesInput { + s.BudgetName = &v + return s } -func (s CreationLimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeBudgetActionHistoriesInput) SetMaxResults(v int64) *DescribeBudgetActionHistoriesInput { + s.MaxResults = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s CreationLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetActionHistoriesInput) SetNextToken(v string) *DescribeBudgetActionHistoriesInput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s CreationLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +// SetTimePeriod sets the TimePeriod field's value. +func (s *DescribeBudgetActionHistoriesInput) SetTimePeriod(v *TimePeriod) *DescribeBudgetActionHistoriesInput { + s.TimePeriod = v + return s } -// Request of DeleteBudget -type DeleteBudgetInput struct { +type DescribeBudgetActionHistoriesOutput struct { _ struct{} `type:"structure"` - // The accountId that is associated with the budget that you want to delete. + // The historical record of the budget action resource. + // + // ActionHistories is a required field + ActionHistories []*ActionHistory `type:"list" required:"true"` + + // A generic string. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeBudgetActionHistoriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBudgetActionHistoriesOutput) GoString() string { + return s.String() +} + +// SetActionHistories sets the ActionHistories field's value. +func (s *DescribeBudgetActionHistoriesOutput) SetActionHistories(v []*ActionHistory) *DescribeBudgetActionHistoriesOutput { + s.ActionHistories = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetActionHistoriesOutput) SetNextToken(v string) *DescribeBudgetActionHistoriesOutput { + s.NextToken = &v + return s +} + +type DescribeBudgetActionInput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. // // AccountId is a required field AccountId *string `min:"12" type:"string" required:"true"` - // The name of the budget that you want to delete. + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. // // BudgetName is a required field BudgetName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBudgetInput) String() string { +func (s DescribeBudgetActionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBudgetInput) GoString() string { +func (s DescribeBudgetActionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBudgetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBudgetInput"} +func (s *DescribeBudgetActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetActionInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) } + if s.ActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionId")) + } + if s.ActionId != nil && len(*s.ActionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ActionId", 36)) + } if s.BudgetName == nil { invalidParams.Add(request.NewErrParamRequired("BudgetName")) } @@ -2363,85 +4628,108 @@ func (s *DeleteBudgetInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *DeleteBudgetInput) SetAccountId(v string) *DeleteBudgetInput { +func (s *DescribeBudgetActionInput) SetAccountId(v string) *DescribeBudgetActionInput { s.AccountId = &v return s } +// SetActionId sets the ActionId field's value. +func (s *DescribeBudgetActionInput) SetActionId(v string) *DescribeBudgetActionInput { + s.ActionId = &v + return s +} + // SetBudgetName sets the BudgetName field's value. -func (s *DeleteBudgetInput) SetBudgetName(v string) *DeleteBudgetInput { +func (s *DescribeBudgetActionInput) SetBudgetName(v string) *DescribeBudgetActionInput { s.BudgetName = &v return s } -// Response of DeleteBudget -type DeleteBudgetOutput struct { +type DescribeBudgetActionOutput struct { _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A budget action resource. + // + // Action is a required field + Action *Action `type:"structure" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBudgetOutput) String() string { +func (s DescribeBudgetActionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBudgetOutput) GoString() string { +func (s DescribeBudgetActionOutput) GoString() string { return s.String() } -// Request of DeleteNotification -type DeleteNotificationInput struct { +// SetAccountId sets the AccountId field's value. +func (s *DescribeBudgetActionOutput) SetAccountId(v string) *DescribeBudgetActionOutput { + s.AccountId = &v + return s +} + +// SetAction sets the Action field's value. +func (s *DescribeBudgetActionOutput) SetAction(v *Action) *DescribeBudgetActionOutput { + s.Action = v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DescribeBudgetActionOutput) SetBudgetName(v string) *DescribeBudgetActionOutput { + s.BudgetName = &v + return s +} + +type DescribeBudgetActionsForAccountInput struct { _ struct{} `type:"structure"` - // The accountId that is associated with the budget whose notification you want - // to delete. + // The account ID of the user. It should be a 12-digit number. // // AccountId is a required field AccountId *string `min:"12" type:"string" required:"true"` - // The name of the budget whose notification you want to delete. - // - // BudgetName is a required field - BudgetName *string `min:"1" type:"string" required:"true"` + // An integer that represents how many entries a paginated response contains. + // The maximum is 100. + MaxResults *int64 `min:"1" type:"integer"` - // The notification that you want to delete. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` + // A generic string. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteNotificationInput) String() string { +func (s DescribeBudgetActionsForAccountInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteNotificationInput) GoString() string { +func (s DescribeBudgetActionsForAccountInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteNotificationInput"} +func (s *DescribeBudgetActionsForAccountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetActionsForAccountInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) } - if s.BudgetName == nil { - invalidParams.Add(request.NewErrParamRequired("BudgetName")) - } - if s.BudgetName != nil && len(*s.BudgetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -2451,77 +4739,92 @@ func (s *DeleteNotificationInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *DeleteNotificationInput) SetAccountId(v string) *DeleteNotificationInput { +func (s *DescribeBudgetActionsForAccountInput) SetAccountId(v string) *DescribeBudgetActionsForAccountInput { s.AccountId = &v return s } -// SetBudgetName sets the BudgetName field's value. -func (s *DeleteNotificationInput) SetBudgetName(v string) *DeleteNotificationInput { - s.BudgetName = &v +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeBudgetActionsForAccountInput) SetMaxResults(v int64) *DescribeBudgetActionsForAccountInput { + s.MaxResults = &v return s } -// SetNotification sets the Notification field's value. -func (s *DeleteNotificationInput) SetNotification(v *Notification) *DeleteNotificationInput { - s.Notification = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetActionsForAccountInput) SetNextToken(v string) *DescribeBudgetActionsForAccountInput { + s.NextToken = &v return s } -// Response of DeleteNotification -type DeleteNotificationOutput struct { +type DescribeBudgetActionsForAccountOutput struct { _ struct{} `type:"structure"` + + // A list of the budget action resources information. + // + // Actions is a required field + Actions []*Action `type:"list" required:"true"` + + // A generic string. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteNotificationOutput) String() string { +func (s DescribeBudgetActionsForAccountOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteNotificationOutput) GoString() string { +func (s DescribeBudgetActionsForAccountOutput) GoString() string { return s.String() } -// Request of DeleteSubscriber -type DeleteSubscriberInput struct { +// SetActions sets the Actions field's value. +func (s *DescribeBudgetActionsForAccountOutput) SetActions(v []*Action) *DescribeBudgetActionsForAccountOutput { + s.Actions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetActionsForAccountOutput) SetNextToken(v string) *DescribeBudgetActionsForAccountOutput { + s.NextToken = &v + return s +} + +type DescribeBudgetActionsForBudgetInput struct { _ struct{} `type:"structure"` - // The accountId that is associated with the budget whose subscriber you want - // to delete. + // The account ID of the user. It should be a 12-digit number. // // AccountId is a required field AccountId *string `min:"12" type:"string" required:"true"` - // The name of the budget whose subscriber you want to delete. + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. // // BudgetName is a required field BudgetName *string `min:"1" type:"string" required:"true"` - // The notification whose subscriber you want to delete. - // - // Notification is a required field - Notification *Notification `type:"structure" required:"true"` + // An integer that represents how many entries a paginated response contains. + // The maximum is 100. + MaxResults *int64 `min:"1" type:"integer"` - // The subscriber that you want to delete. - // - // Subscriber is a required field - Subscriber *Subscriber `type:"structure" required:"true"` + // A generic string. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteSubscriberInput) String() string { +func (s DescribeBudgetActionsForBudgetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSubscriberInput) GoString() string { +func (s DescribeBudgetActionsForBudgetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSubscriberInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriberInput"} +func (s *DescribeBudgetActionsForBudgetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBudgetActionsForBudgetInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } @@ -2534,21 +4837,8 @@ func (s *DeleteSubscriberInput) Validate() error { if s.BudgetName != nil && len(*s.BudgetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Subscriber == nil { - invalidParams.Add(request.NewErrParamRequired("Subscriber")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } - } - if s.Subscriber != nil { - if err := s.Subscriber.Validate(); err != nil { - invalidParams.AddNested("Subscriber", err.(request.ErrInvalidParams)) - } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -2558,44 +4848,63 @@ func (s *DeleteSubscriberInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *DeleteSubscriberInput) SetAccountId(v string) *DeleteSubscriberInput { +func (s *DescribeBudgetActionsForBudgetInput) SetAccountId(v string) *DescribeBudgetActionsForBudgetInput { s.AccountId = &v return s } // SetBudgetName sets the BudgetName field's value. -func (s *DeleteSubscriberInput) SetBudgetName(v string) *DeleteSubscriberInput { +func (s *DescribeBudgetActionsForBudgetInput) SetBudgetName(v string) *DescribeBudgetActionsForBudgetInput { s.BudgetName = &v return s } -// SetNotification sets the Notification field's value. -func (s *DeleteSubscriberInput) SetNotification(v *Notification) *DeleteSubscriberInput { - s.Notification = v +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeBudgetActionsForBudgetInput) SetMaxResults(v int64) *DescribeBudgetActionsForBudgetInput { + s.MaxResults = &v return s } -// SetSubscriber sets the Subscriber field's value. -func (s *DeleteSubscriberInput) SetSubscriber(v *Subscriber) *DeleteSubscriberInput { - s.Subscriber = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetActionsForBudgetInput) SetNextToken(v string) *DescribeBudgetActionsForBudgetInput { + s.NextToken = &v return s } -// Response of DeleteSubscriber -type DeleteSubscriberOutput struct { +type DescribeBudgetActionsForBudgetOutput struct { _ struct{} `type:"structure"` + + // A list of the budget action resources information. + // + // Actions is a required field + Actions []*Action `type:"list" required:"true"` + + // A generic string. + NextToken *string `type:"string"` } // String returns the string representation -func (s DeleteSubscriberOutput) String() string { +func (s DescribeBudgetActionsForBudgetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSubscriberOutput) GoString() string { +func (s DescribeBudgetActionsForBudgetOutput) GoString() string { return s.String() } +// SetActions sets the Actions field's value. +func (s *DescribeBudgetActionsForBudgetOutput) SetActions(v []*Action) *DescribeBudgetActionsForBudgetOutput { + s.Actions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeBudgetActionsForBudgetOutput) SetNextToken(v string) *DescribeBudgetActionsForBudgetOutput { + s.NextToken = &v + return s +} + // Request of DescribeBudget type DescribeBudgetInput struct { _ struct{} `type:"structure"` @@ -3060,35 +5369,222 @@ func (s DescribeSubscribersForNotificationInput) String() string { } // GoString returns the string representation -func (s DescribeSubscribersForNotificationInput) GoString() string { +func (s DescribeSubscribersForNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSubscribersForNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSubscribersForNotificationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Notification == nil { + invalidParams.Add(request.NewErrParamRequired("Notification")) + } + if s.Notification != nil { + if err := s.Notification.Validate(); err != nil { + invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DescribeSubscribersForNotificationInput) SetAccountId(v string) *DescribeSubscribersForNotificationInput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *DescribeSubscribersForNotificationInput) SetBudgetName(v string) *DescribeSubscribersForNotificationInput { + s.BudgetName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeSubscribersForNotificationInput) SetMaxResults(v int64) *DescribeSubscribersForNotificationInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscribersForNotificationInput) SetNextToken(v string) *DescribeSubscribersForNotificationInput { + s.NextToken = &v + return s +} + +// SetNotification sets the Notification field's value. +func (s *DescribeSubscribersForNotificationInput) SetNotification(v *Notification) *DescribeSubscribersForNotificationInput { + s.Notification = v + return s +} + +// Response of DescribeSubscribersForNotification +type DescribeSubscribersForNotificationOutput struct { + _ struct{} `type:"structure"` + + // The pagination token in the service response that indicates the next set + // of results that you can retrieve. + NextToken *string `type:"string"` + + // A list of subscribers that are associated with a notification. + Subscribers []*Subscriber `min:"1" type:"list"` +} + +// String returns the string representation +func (s DescribeSubscribersForNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSubscribersForNotificationOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeSubscribersForNotificationOutput) SetNextToken(v string) *DescribeSubscribersForNotificationOutput { + s.NextToken = &v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *DescribeSubscribersForNotificationOutput) SetSubscribers(v []*Subscriber) *DescribeSubscribersForNotificationOutput { + s.Subscribers = v + return s +} + +// The budget name already exists. Budget names must be unique within an account. +type DuplicateRecordException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message the exception carries. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DuplicateRecordException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DuplicateRecordException) GoString() string { + return s.String() +} + +func newErrorDuplicateRecordException(v protocol.ResponseMetadata) error { + return &DuplicateRecordException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DuplicateRecordException) Code() string { + return "DuplicateRecordException" +} + +// Message returns the exception's message. +func (s *DuplicateRecordException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateRecordException) OrigErr() error { + return nil +} + +func (s *DuplicateRecordException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateRecordException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateRecordException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ExecuteBudgetActionInput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // The type of execution. + // + // ExecutionType is a required field + ExecutionType *string `type:"string" required:"true" enum:"ExecutionType"` +} + +// String returns the string representation +func (s ExecuteBudgetActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecuteBudgetActionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeSubscribersForNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeSubscribersForNotificationInput"} +func (s *ExecuteBudgetActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteBudgetActionInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) } + if s.ActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionId")) + } + if s.ActionId != nil && len(*s.ActionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ActionId", 36)) + } if s.BudgetName == nil { invalidParams.Add(request.NewErrParamRequired("BudgetName")) } if s.BudgetName != nil && len(*s.BudgetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Notification == nil { - invalidParams.Add(request.NewErrParamRequired("Notification")) - } - if s.Notification != nil { - if err := s.Notification.Validate(); err != nil { - invalidParams.AddNested("Notification", err.(request.ErrInvalidParams)) - } + if s.ExecutionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionType")) } if invalidParams.Len() > 0 { @@ -3098,101 +5594,120 @@ func (s *DescribeSubscribersForNotificationInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *DescribeSubscribersForNotificationInput) SetAccountId(v string) *DescribeSubscribersForNotificationInput { +func (s *ExecuteBudgetActionInput) SetAccountId(v string) *ExecuteBudgetActionInput { s.AccountId = &v return s } -// SetBudgetName sets the BudgetName field's value. -func (s *DescribeSubscribersForNotificationInput) SetBudgetName(v string) *DescribeSubscribersForNotificationInput { - s.BudgetName = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeSubscribersForNotificationInput) SetMaxResults(v int64) *DescribeSubscribersForNotificationInput { - s.MaxResults = &v +// SetActionId sets the ActionId field's value. +func (s *ExecuteBudgetActionInput) SetActionId(v string) *ExecuteBudgetActionInput { + s.ActionId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeSubscribersForNotificationInput) SetNextToken(v string) *DescribeSubscribersForNotificationInput { - s.NextToken = &v +// SetBudgetName sets the BudgetName field's value. +func (s *ExecuteBudgetActionInput) SetBudgetName(v string) *ExecuteBudgetActionInput { + s.BudgetName = &v return s } -// SetNotification sets the Notification field's value. -func (s *DescribeSubscribersForNotificationInput) SetNotification(v *Notification) *DescribeSubscribersForNotificationInput { - s.Notification = v +// SetExecutionType sets the ExecutionType field's value. +func (s *ExecuteBudgetActionInput) SetExecutionType(v string) *ExecuteBudgetActionInput { + s.ExecutionType = &v return s } -// Response of DescribeSubscribersForNotification -type DescribeSubscribersForNotificationOutput struct { +type ExecuteBudgetActionOutput struct { _ struct{} `type:"structure"` - // The pagination token in the service response that indicates the next set - // of results that you can retrieve. - NextToken *string `type:"string"` + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` - // A list of subscribers that are associated with a notification. - Subscribers []*Subscriber `min:"1" type:"list"` + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // The type of execution. + // + // ExecutionType is a required field + ExecutionType *string `type:"string" required:"true" enum:"ExecutionType"` } // String returns the string representation -func (s DescribeSubscribersForNotificationOutput) String() string { +func (s ExecuteBudgetActionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeSubscribersForNotificationOutput) GoString() string { +func (s ExecuteBudgetActionOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *DescribeSubscribersForNotificationOutput) SetNextToken(v string) *DescribeSubscribersForNotificationOutput { - s.NextToken = &v +// SetAccountId sets the AccountId field's value. +func (s *ExecuteBudgetActionOutput) SetAccountId(v string) *ExecuteBudgetActionOutput { + s.AccountId = &v return s } -// SetSubscribers sets the Subscribers field's value. -func (s *DescribeSubscribersForNotificationOutput) SetSubscribers(v []*Subscriber) *DescribeSubscribersForNotificationOutput { - s.Subscribers = v +// SetActionId sets the ActionId field's value. +func (s *ExecuteBudgetActionOutput) SetActionId(v string) *ExecuteBudgetActionOutput { + s.ActionId = &v return s } -// The budget name already exists. Budget names must be unique within an account. -type DuplicateRecordException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetBudgetName sets the BudgetName field's value. +func (s *ExecuteBudgetActionOutput) SetBudgetName(v string) *ExecuteBudgetActionOutput { + s.BudgetName = &v + return s +} + +// SetExecutionType sets the ExecutionType field's value. +func (s *ExecuteBudgetActionOutput) SetExecutionType(v string) *ExecuteBudgetActionOutput { + s.ExecutionType = &v + return s +} + +// The pagination token expired. +type ExpiredNextTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message the exception carries. Message_ *string `locationName:"Message" type:"string"` } // String returns the string representation -func (s DuplicateRecordException) String() string { +func (s ExpiredNextTokenException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DuplicateRecordException) GoString() string { +func (s ExpiredNextTokenException) GoString() string { return s.String() } -func newErrorDuplicateRecordException(v protocol.ResponseMetadata) error { - return &DuplicateRecordException{ - respMetadata: v, +func newErrorExpiredNextTokenException(v protocol.ResponseMetadata) error { + return &ExpiredNextTokenException{ + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateRecordException) Code() string { - return "DuplicateRecordException" +func (s *ExpiredNextTokenException) Code() string { + return "ExpiredNextTokenException" } // Message returns the exception's message. -func (s DuplicateRecordException) Message() string { +func (s *ExpiredNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3200,86 +5715,107 @@ func (s DuplicateRecordException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateRecordException) OrigErr() error { +func (s *ExpiredNextTokenException) OrigErr() error { return nil } -func (s DuplicateRecordException) Error() string { +func (s *ExpiredNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateRecordException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateRecordException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } -// The pagination token expired. -type ExpiredNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// The AWS Identity and Access Management (IAM) action definition details. +type IamActionDefinition struct { + _ struct{} `type:"structure"` - // The error message the exception carries. - Message_ *string `locationName:"Message" type:"string"` + // A list of groups to be attached. There must be at least one group. + Groups []*string `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) of the policy to be attached. + // + // PolicyArn is a required field + PolicyArn *string `min:"25" type:"string" required:"true"` + + // A list of roles to be attached. There must be at least one role. + Roles []*string `min:"1" type:"list"` + + // A list of users to be attached. There must be at least one user. + Users []*string `min:"1" type:"list"` } // String returns the string representation -func (s ExpiredNextTokenException) String() string { +func (s IamActionDefinition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExpiredNextTokenException) GoString() string { +func (s IamActionDefinition) GoString() string { return s.String() } -func newErrorExpiredNextTokenException(v protocol.ResponseMetadata) error { - return &ExpiredNextTokenException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *IamActionDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IamActionDefinition"} + if s.Groups != nil && len(s.Groups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Groups", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 25 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 25)) + } + if s.Roles != nil && len(s.Roles) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Roles", 1)) + } + if s.Users != nil && len(s.Users) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Users", 1)) } -} - -// Code returns the exception type name. -func (s ExpiredNextTokenException) Code() string { - return "ExpiredNextTokenException" -} -// Message returns the exception's message. -func (s ExpiredNextTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if invalidParams.Len() > 0 { + return invalidParams } - return "" + return nil } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredNextTokenException) OrigErr() error { - return nil +// SetGroups sets the Groups field's value. +func (s *IamActionDefinition) SetGroups(v []*string) *IamActionDefinition { + s.Groups = v + return s } -func (s ExpiredNextTokenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetPolicyArn sets the PolicyArn field's value. +func (s *IamActionDefinition) SetPolicyArn(v string) *IamActionDefinition { + s.PolicyArn = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ExpiredNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +// SetRoles sets the Roles field's value. +func (s *IamActionDefinition) SetRoles(v []*string) *IamActionDefinition { + s.Roles = v + return s } -// RequestID returns the service's response RequestID for request. -func (s ExpiredNextTokenException) RequestID() string { - return s.respMetadata.RequestID +// SetUsers sets the Users field's value. +func (s *IamActionDefinition) SetUsers(v []*string) *IamActionDefinition { + s.Users = v + return s } // An error on the server occurred during the processing of your request. Try // again later. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message the exception carries. Message_ *string `locationName:"Message" type:"string"` @@ -3297,17 +5833,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3315,28 +5851,28 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The pagination token is invalid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message the exception carries. Message_ *string `locationName:"Message" type:"string"` @@ -3354,17 +5890,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3372,29 +5908,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // An error on the client occurred. Typically, the cause is an invalid input // value. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message the exception carries. Message_ *string `locationName:"Message" type:"string"` @@ -3412,17 +5948,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3430,28 +5966,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // We can’t locate the resource that you specified. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message the exception carries. Message_ *string `locationName:"Message" type:"string"` @@ -3469,17 +6005,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3487,26 +6023,26 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A notification that is associated with a budget. A budget can have up to -// five notifications. +// ten notifications. // // Each notification must have at least one subscriber. A notification can have // one SNS subscriber and up to 10 email subscribers, for a total of 11 subscribers. @@ -3540,7 +6076,9 @@ type Notification struct { NotificationType *string `type:"string" required:"true" enum:"NotificationType"` // The threshold that is associated with a notification. Thresholds are always - // a percentage. + // a percentage, and many customers find value being alerted between 50% - 200% + // of the budgeted amount. The maximum limit for your threshold is 1,000,000% + // above the budgeted amount. // // Threshold is a required field Threshold *float64 `type:"double" required:"true"` @@ -3685,6 +6223,123 @@ func (s *NotificationWithSubscribers) SetSubscribers(v []*Subscriber) *Notificat return s } +// The request was received and recognized by the server, but the server rejected +// that particular method for the requested resource. +type ResourceLockedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error message the exception carries. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceLockedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceLockedException) GoString() string { + return s.String() +} + +func newErrorResourceLockedException(v protocol.ResponseMetadata) error { + return &ResourceLockedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceLockedException) Code() string { + return "ResourceLockedException" +} + +// Message returns the exception's message. +func (s *ResourceLockedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceLockedException) OrigErr() error { + return nil +} + +func (s *ResourceLockedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceLockedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceLockedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The service control policies (SCP) action definition details. +type ScpActionDefinition struct { + _ struct{} `type:"structure"` + + // The policy ID attached. + // + // PolicyId is a required field + PolicyId *string `min:"10" type:"string" required:"true"` + + // A list of target IDs. + // + // TargetIds is a required field + TargetIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ScpActionDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScpActionDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScpActionDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScpActionDefinition"} + if s.PolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyId")) + } + if s.PolicyId != nil && len(*s.PolicyId) < 10 { + invalidParams.Add(request.NewErrParamMinLen("PolicyId", 10)) + } + if s.TargetIds == nil { + invalidParams.Add(request.NewErrParamRequired("TargetIds")) + } + if s.TargetIds != nil && len(s.TargetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyId sets the PolicyId field's value. +func (s *ScpActionDefinition) SetPolicyId(v string) *ScpActionDefinition { + s.PolicyId = &v + return s +} + +// SetTargetIds sets the TargetIds field's value. +func (s *ScpActionDefinition) SetTargetIds(v []*string) *ScpActionDefinition { + s.TargetIds = v + return s +} + // The amount of cost or usage that is measured for a budget. // // For example, a Spend for 3 GB of S3 usage would have the following parameters: @@ -3695,43 +6350,110 @@ func (s *NotificationWithSubscribers) SetSubscribers(v []*Subscriber) *Notificat type Spend struct { _ struct{} `type:"structure"` - // The cost or usage amount that is associated with a budget forecast, actual - // spend, or budget threshold. + // The cost or usage amount that is associated with a budget forecast, actual + // spend, or budget threshold. + // + // Amount is a required field + Amount *string `min:"1" type:"string" required:"true"` + + // The unit of measurement that is used for the budget forecast, actual spend, + // or budget threshold, such as dollars or GB. + // + // Unit is a required field + Unit *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Spend) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Spend) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Spend) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Spend"} + if s.Amount == nil { + invalidParams.Add(request.NewErrParamRequired("Amount")) + } + if s.Amount != nil && len(*s.Amount) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Amount", 1)) + } + if s.Unit == nil { + invalidParams.Add(request.NewErrParamRequired("Unit")) + } + if s.Unit != nil && len(*s.Unit) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Unit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmount sets the Amount field's value. +func (s *Spend) SetAmount(v string) *Spend { + s.Amount = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *Spend) SetUnit(v string) *Spend { + s.Unit = &v + return s +} + +// The AWS Systems Manager (SSM) action definition details. +type SsmActionDefinition struct { + _ struct{} `type:"structure"` + + // The action subType. + // + // ActionSubType is a required field + ActionSubType *string `type:"string" required:"true" enum:"ActionSubType"` + + // The EC2 and RDS instance IDs. // - // Amount is a required field - Amount *string `min:"1" type:"string" required:"true"` + // InstanceIds is a required field + InstanceIds []*string `min:"1" type:"list" required:"true"` - // The unit of measurement that is used for the budget forecast, actual spend, - // or budget threshold, such as dollars or GB. + // The Region to run the SSM document. // - // Unit is a required field - Unit *string `min:"1" type:"string" required:"true"` + // Region is a required field + Region *string `min:"9" type:"string" required:"true"` } // String returns the string representation -func (s Spend) String() string { +func (s SsmActionDefinition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Spend) GoString() string { +func (s SsmActionDefinition) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Spend) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Spend"} - if s.Amount == nil { - invalidParams.Add(request.NewErrParamRequired("Amount")) +func (s *SsmActionDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SsmActionDefinition"} + if s.ActionSubType == nil { + invalidParams.Add(request.NewErrParamRequired("ActionSubType")) } - if s.Amount != nil && len(*s.Amount) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Amount", 1)) + if s.InstanceIds == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceIds")) } - if s.Unit == nil { - invalidParams.Add(request.NewErrParamRequired("Unit")) + if s.InstanceIds != nil && len(s.InstanceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceIds", 1)) } - if s.Unit != nil && len(*s.Unit) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Unit", 1)) + if s.Region == nil { + invalidParams.Add(request.NewErrParamRequired("Region")) + } + if s.Region != nil && len(*s.Region) < 9 { + invalidParams.Add(request.NewErrParamMinLen("Region", 9)) } if invalidParams.Len() > 0 { @@ -3740,15 +6462,21 @@ func (s *Spend) Validate() error { return nil } -// SetAmount sets the Amount field's value. -func (s *Spend) SetAmount(v string) *Spend { - s.Amount = &v +// SetActionSubType sets the ActionSubType field's value. +func (s *SsmActionDefinition) SetActionSubType(v string) *SsmActionDefinition { + s.ActionSubType = &v return s } -// SetUnit sets the Unit field's value. -func (s *Spend) SetUnit(v string) *Spend { - s.Unit = &v +// SetInstanceIds sets the InstanceIds field's value. +func (s *SsmActionDefinition) SetInstanceIds(v []*string) *SsmActionDefinition { + s.InstanceIds = v + return s +} + +// SetRegion sets the Region field's value. +func (s *SsmActionDefinition) SetRegion(v string) *SsmActionDefinition { + s.Region = &v return s } @@ -3866,6 +6594,222 @@ func (s *TimePeriod) SetStart(v time.Time) *TimePeriod { return s } +type UpdateBudgetActionInput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A system-generated universally unique identifier (UUID) for the action. + // + // ActionId is a required field + ActionId *string `min:"36" type:"string" required:"true"` + + // The trigger threshold of the action. + ActionThreshold *ActionThreshold `type:"structure"` + + // This specifies if the action needs manual or automatic approval. + ApprovalModel *string `type:"string" enum:"ApprovalModel"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // Specifies all of the type-specific parameters. + Definition *Definition `type:"structure"` + + // The role passed for action execution and reversion. Roles and actions must + // be in the same account. + ExecutionRoleArn *string `min:"32" type:"string"` + + // The type of a notification. It must be ACTUAL or FORECASTED. + NotificationType *string `type:"string" enum:"NotificationType"` + + // A list of subscribers. + Subscribers []*Subscriber `min:"1" type:"list"` +} + +// String returns the string representation +func (s UpdateBudgetActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBudgetActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBudgetActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBudgetActionInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 12)) + } + if s.ActionId == nil { + invalidParams.Add(request.NewErrParamRequired("ActionId")) + } + if s.ActionId != nil && len(*s.ActionId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ActionId", 36)) + } + if s.BudgetName == nil { + invalidParams.Add(request.NewErrParamRequired("BudgetName")) + } + if s.BudgetName != nil && len(*s.BudgetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BudgetName", 1)) + } + if s.ExecutionRoleArn != nil && len(*s.ExecutionRoleArn) < 32 { + invalidParams.Add(request.NewErrParamMinLen("ExecutionRoleArn", 32)) + } + if s.Subscribers != nil && len(s.Subscribers) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Subscribers", 1)) + } + if s.ActionThreshold != nil { + if err := s.ActionThreshold.Validate(); err != nil { + invalidParams.AddNested("ActionThreshold", err.(request.ErrInvalidParams)) + } + } + if s.Definition != nil { + if err := s.Definition.Validate(); err != nil { + invalidParams.AddNested("Definition", err.(request.ErrInvalidParams)) + } + } + if s.Subscribers != nil { + for i, v := range s.Subscribers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Subscribers", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *UpdateBudgetActionInput) SetAccountId(v string) *UpdateBudgetActionInput { + s.AccountId = &v + return s +} + +// SetActionId sets the ActionId field's value. +func (s *UpdateBudgetActionInput) SetActionId(v string) *UpdateBudgetActionInput { + s.ActionId = &v + return s +} + +// SetActionThreshold sets the ActionThreshold field's value. +func (s *UpdateBudgetActionInput) SetActionThreshold(v *ActionThreshold) *UpdateBudgetActionInput { + s.ActionThreshold = v + return s +} + +// SetApprovalModel sets the ApprovalModel field's value. +func (s *UpdateBudgetActionInput) SetApprovalModel(v string) *UpdateBudgetActionInput { + s.ApprovalModel = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *UpdateBudgetActionInput) SetBudgetName(v string) *UpdateBudgetActionInput { + s.BudgetName = &v + return s +} + +// SetDefinition sets the Definition field's value. +func (s *UpdateBudgetActionInput) SetDefinition(v *Definition) *UpdateBudgetActionInput { + s.Definition = v + return s +} + +// SetExecutionRoleArn sets the ExecutionRoleArn field's value. +func (s *UpdateBudgetActionInput) SetExecutionRoleArn(v string) *UpdateBudgetActionInput { + s.ExecutionRoleArn = &v + return s +} + +// SetNotificationType sets the NotificationType field's value. +func (s *UpdateBudgetActionInput) SetNotificationType(v string) *UpdateBudgetActionInput { + s.NotificationType = &v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *UpdateBudgetActionInput) SetSubscribers(v []*Subscriber) *UpdateBudgetActionInput { + s.Subscribers = v + return s +} + +type UpdateBudgetActionOutput struct { + _ struct{} `type:"structure"` + + // The account ID of the user. It should be a 12-digit number. + // + // AccountId is a required field + AccountId *string `min:"12" type:"string" required:"true"` + + // A string that represents the budget name. The ":" and "\" characters aren't + // allowed. + // + // BudgetName is a required field + BudgetName *string `min:"1" type:"string" required:"true"` + + // The updated action resource information. + // + // NewAction is a required field + NewAction *Action `type:"structure" required:"true"` + + // The previous action resource information. + // + // OldAction is a required field + OldAction *Action `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateBudgetActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBudgetActionOutput) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *UpdateBudgetActionOutput) SetAccountId(v string) *UpdateBudgetActionOutput { + s.AccountId = &v + return s +} + +// SetBudgetName sets the BudgetName field's value. +func (s *UpdateBudgetActionOutput) SetBudgetName(v string) *UpdateBudgetActionOutput { + s.BudgetName = &v + return s +} + +// SetNewAction sets the NewAction field's value. +func (s *UpdateBudgetActionOutput) SetNewAction(v *Action) *UpdateBudgetActionOutput { + s.NewAction = v + return s +} + +// SetOldAction sets the OldAction field's value. +func (s *UpdateBudgetActionOutput) SetOldAction(v *Action) *UpdateBudgetActionOutput { + s.OldAction = v + return s +} + // Request of UpdateBudget type UpdateBudgetInput struct { _ struct{} `type:"structure"` @@ -4187,9 +7131,109 @@ func (s UpdateSubscriberOutput) GoString() string { return s.String() } +const ( + // ActionStatusStandby is a ActionStatus enum value + ActionStatusStandby = "STANDBY" + + // ActionStatusPending is a ActionStatus enum value + ActionStatusPending = "PENDING" + + // ActionStatusExecutionInProgress is a ActionStatus enum value + ActionStatusExecutionInProgress = "EXECUTION_IN_PROGRESS" + + // ActionStatusExecutionSuccess is a ActionStatus enum value + ActionStatusExecutionSuccess = "EXECUTION_SUCCESS" + + // ActionStatusExecutionFailure is a ActionStatus enum value + ActionStatusExecutionFailure = "EXECUTION_FAILURE" + + // ActionStatusReverseInProgress is a ActionStatus enum value + ActionStatusReverseInProgress = "REVERSE_IN_PROGRESS" + + // ActionStatusReverseSuccess is a ActionStatus enum value + ActionStatusReverseSuccess = "REVERSE_SUCCESS" + + // ActionStatusReverseFailure is a ActionStatus enum value + ActionStatusReverseFailure = "REVERSE_FAILURE" + + // ActionStatusResetInProgress is a ActionStatus enum value + ActionStatusResetInProgress = "RESET_IN_PROGRESS" + + // ActionStatusResetFailure is a ActionStatus enum value + ActionStatusResetFailure = "RESET_FAILURE" +) + +// ActionStatus_Values returns all elements of the ActionStatus enum +func ActionStatus_Values() []string { + return []string{ + ActionStatusStandby, + ActionStatusPending, + ActionStatusExecutionInProgress, + ActionStatusExecutionSuccess, + ActionStatusExecutionFailure, + ActionStatusReverseInProgress, + ActionStatusReverseSuccess, + ActionStatusReverseFailure, + ActionStatusResetInProgress, + ActionStatusResetFailure, + } +} + +const ( + // ActionSubTypeStopEc2Instances is a ActionSubType enum value + ActionSubTypeStopEc2Instances = "STOP_EC2_INSTANCES" + + // ActionSubTypeStopRdsInstances is a ActionSubType enum value + ActionSubTypeStopRdsInstances = "STOP_RDS_INSTANCES" +) + +// ActionSubType_Values returns all elements of the ActionSubType enum +func ActionSubType_Values() []string { + return []string{ + ActionSubTypeStopEc2Instances, + ActionSubTypeStopRdsInstances, + } +} + +const ( + // ActionTypeApplyIamPolicy is a ActionType enum value + ActionTypeApplyIamPolicy = "APPLY_IAM_POLICY" + + // ActionTypeApplyScpPolicy is a ActionType enum value + ActionTypeApplyScpPolicy = "APPLY_SCP_POLICY" + + // ActionTypeRunSsmDocuments is a ActionType enum value + ActionTypeRunSsmDocuments = "RUN_SSM_DOCUMENTS" +) + +// ActionType_Values returns all elements of the ActionType enum +func ActionType_Values() []string { + return []string{ + ActionTypeApplyIamPolicy, + ActionTypeApplyScpPolicy, + ActionTypeRunSsmDocuments, + } +} + +const ( + // ApprovalModelAutomatic is a ApprovalModel enum value + ApprovalModelAutomatic = "AUTOMATIC" + + // ApprovalModelManual is a ApprovalModel enum value + ApprovalModelManual = "MANUAL" +) + +// ApprovalModel_Values returns all elements of the ApprovalModel enum +func ApprovalModel_Values() []string { + return []string{ + ApprovalModelAutomatic, + ApprovalModelManual, + } +} + // The type of a budget. It must be one of the following types: // -// COST, USAGE, RI_UTILIZATION, or RI_COVERAGE. +// COST, USAGE, RI_UTILIZATION, RI_COVERAGE, SAVINGS_PLANS_UTILIZATION, or SAVINGS_PLANS_COVERAGE. const ( // BudgetTypeUsage is a BudgetType enum value BudgetTypeUsage = "USAGE" @@ -4210,6 +7254,18 @@ const ( BudgetTypeSavingsPlansCoverage = "SAVINGS_PLANS_COVERAGE" ) +// BudgetType_Values returns all elements of the BudgetType enum +func BudgetType_Values() []string { + return []string{ + BudgetTypeUsage, + BudgetTypeCost, + BudgetTypeRiUtilization, + BudgetTypeRiCoverage, + BudgetTypeSavingsPlansUtilization, + BudgetTypeSavingsPlansCoverage, + } +} + // The comparison operator of a notification. Currently the service supports // the following operators: // @@ -4225,6 +7281,67 @@ const ( ComparisonOperatorEqualTo = "EQUAL_TO" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorGreaterThan, + ComparisonOperatorLessThan, + ComparisonOperatorEqualTo, + } +} + +const ( + // EventTypeSystem is a EventType enum value + EventTypeSystem = "SYSTEM" + + // EventTypeCreateAction is a EventType enum value + EventTypeCreateAction = "CREATE_ACTION" + + // EventTypeDeleteAction is a EventType enum value + EventTypeDeleteAction = "DELETE_ACTION" + + // EventTypeUpdateAction is a EventType enum value + EventTypeUpdateAction = "UPDATE_ACTION" + + // EventTypeExecuteAction is a EventType enum value + EventTypeExecuteAction = "EXECUTE_ACTION" +) + +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeSystem, + EventTypeCreateAction, + EventTypeDeleteAction, + EventTypeUpdateAction, + EventTypeExecuteAction, + } +} + +const ( + // ExecutionTypeApproveBudgetAction is a ExecutionType enum value + ExecutionTypeApproveBudgetAction = "APPROVE_BUDGET_ACTION" + + // ExecutionTypeRetryBudgetAction is a ExecutionType enum value + ExecutionTypeRetryBudgetAction = "RETRY_BUDGET_ACTION" + + // ExecutionTypeReverseBudgetAction is a ExecutionType enum value + ExecutionTypeReverseBudgetAction = "REVERSE_BUDGET_ACTION" + + // ExecutionTypeResetBudgetAction is a ExecutionType enum value + ExecutionTypeResetBudgetAction = "RESET_BUDGET_ACTION" +) + +// ExecutionType_Values returns all elements of the ExecutionType enum +func ExecutionType_Values() []string { + return []string{ + ExecutionTypeApproveBudgetAction, + ExecutionTypeRetryBudgetAction, + ExecutionTypeReverseBudgetAction, + ExecutionTypeResetBudgetAction, + } +} + const ( // NotificationStateOk is a NotificationState enum value NotificationStateOk = "OK" @@ -4233,6 +7350,14 @@ const ( NotificationStateAlarm = "ALARM" ) +// NotificationState_Values returns all elements of the NotificationState enum +func NotificationState_Values() []string { + return []string{ + NotificationStateOk, + NotificationStateAlarm, + } +} + // The type of a notification. It must be ACTUAL or FORECASTED. const ( // NotificationTypeActual is a NotificationType enum value @@ -4242,6 +7367,14 @@ const ( NotificationTypeForecasted = "FORECASTED" ) +// NotificationType_Values returns all elements of the NotificationType enum +func NotificationType_Values() []string { + return []string{ + NotificationTypeActual, + NotificationTypeForecasted, + } +} + // The subscription type of the subscriber. It can be SMS or EMAIL. const ( // SubscriptionTypeSns is a SubscriptionType enum value @@ -4251,7 +7384,15 @@ const ( SubscriptionTypeEmail = "EMAIL" ) -// The type of threshold for a notification. It can be PERCENTAGE or ABSOLUTE_VALUE. +// SubscriptionType_Values returns all elements of the SubscriptionType enum +func SubscriptionType_Values() []string { + return []string{ + SubscriptionTypeSns, + SubscriptionTypeEmail, + } +} + +// The type of threshold for a notification. const ( // ThresholdTypePercentage is a ThresholdType enum value ThresholdTypePercentage = "PERCENTAGE" @@ -4260,6 +7401,14 @@ const ( ThresholdTypeAbsoluteValue = "ABSOLUTE_VALUE" ) +// ThresholdType_Values returns all elements of the ThresholdType enum +func ThresholdType_Values() []string { + return []string{ + ThresholdTypePercentage, + ThresholdTypeAbsoluteValue, + } +} + // The time unit of the budget, such as MONTHLY or QUARTERLY. const ( // TimeUnitDaily is a TimeUnit enum value @@ -4274,3 +7423,13 @@ const ( // TimeUnitAnnually is a TimeUnit enum value TimeUnitAnnually = "ANNUALLY" ) + +// TimeUnit_Values returns all elements of the TimeUnit enum +func TimeUnit_Values() []string { + return []string{ + TimeUnitDaily, + TimeUnitMonthly, + TimeUnitQuarterly, + TimeUnitAnnually, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go index 15cf0ff5d..93062b49c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/errors.go @@ -57,6 +57,13 @@ const ( // // We can’t locate the resource that you specified. ErrCodeNotFoundException = "NotFoundException" + + // ErrCodeResourceLockedException for service response error code + // "ResourceLockedException". + // + // The request was received and recognized by the server, but the server rejected + // that particular method for the requested resource. + ErrCodeResourceLockedException = "ResourceLockedException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ @@ -68,4 +75,5 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidNextTokenException": newErrorInvalidNextTokenException, "InvalidParameterException": newErrorInvalidParameterException, "NotFoundException": newErrorNotFoundException, + "ResourceLockedException": newErrorResourceLockedException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go index 6dc8ae78b..5e6a10716 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go index fb8066d8a..df2c680ef 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go @@ -1063,6 +1063,9 @@ func (c *Cloud9) TagResourceRequest(input *TagResourceInput) (req *request.Reque // * BadRequestException // The target request is invalid. // +// * ConcurrentAccessException +// A concurrent access issue occurred. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/cloud9-2017-09-23/TagResource func (c *Cloud9) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) @@ -1149,6 +1152,9 @@ func (c *Cloud9) UntagResourceRequest(input *UntagResourceInput) (req *request.R // * BadRequestException // The target request is invalid. // +// * ConcurrentAccessException +// A concurrent access issue occurred. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/cloud9-2017-09-23/UntagResource func (c *Cloud9) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -1369,8 +1375,8 @@ func (c *Cloud9) UpdateEnvironmentMembershipWithContext(ctx aws.Context, input * // The target request is invalid. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -1387,17 +1393,73 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequestException) OrigErr() error { + return nil +} + +func (s *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A concurrent access issue occurred. +type ConcurrentAccessException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ConcurrentAccessException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConcurrentAccessException) GoString() string { + return s.String() +} + +func newErrorConcurrentAccessException(v protocol.ResponseMetadata) error { + return &ConcurrentAccessException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConcurrentAccessException) Code() string { + return "ConcurrentAccessException" +} + +// Message returns the exception's message. +func (s *ConcurrentAccessException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1405,28 +1467,28 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *ConcurrentAccessException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *ConcurrentAccessException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentAccessException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentAccessException) RequestID() string { + return s.RespMetadata.RequestID } // A conflict occurred. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -1443,17 +1505,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1461,22 +1523,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } type CreateEnvironmentEC2Input struct { @@ -1493,6 +1555,9 @@ type CreateEnvironmentEC2Input struct { // in the Amazon EC2 API Reference. ClientRequestToken *string `locationName:"clientRequestToken" type:"string"` + // The connection type used for connecting to an Amazon EC2 environment. + ConnectionType *string `locationName:"connectionType" type:"string" enum:"ConnectionType"` + // The description of the environment to create. Description *string `locationName:"description" type:"string" sensitive:"true"` @@ -1579,6 +1644,12 @@ func (s *CreateEnvironmentEC2Input) SetClientRequestToken(v string) *CreateEnvir return s } +// SetConnectionType sets the ConnectionType field's value. +func (s *CreateEnvironmentEC2Input) SetConnectionType(v string) *CreateEnvironmentEC2Input { + s.ConnectionType = &v + return s +} + // SetDescription sets the Description field's value. func (s *CreateEnvironmentEC2Input) SetDescription(v string) *CreateEnvironmentEC2Input { s.Description = &v @@ -2117,6 +2188,9 @@ type Environment struct { // The Amazon Resource Name (ARN) of the environment. Arn *string `locationName:"arn" type:"string"` + // The connection type used for connecting to an Amazon EC2 environment. + ConnectionType *string `locationName:"connectionType" type:"string" enum:"ConnectionType"` + // The description for the environment. Description *string `locationName:"description" type:"string" sensitive:"true"` @@ -2157,6 +2231,12 @@ func (s *Environment) SetArn(v string) *Environment { return s } +// SetConnectionType sets the ConnectionType field's value. +func (s *Environment) SetConnectionType(v string) *Environment { + s.ConnectionType = &v + return s +} + // SetDescription sets the Description field's value. func (s *Environment) SetDescription(v string) *Environment { s.Description = &v @@ -2318,8 +2398,8 @@ func (s *EnvironmentMember) SetUserId(v string) *EnvironmentMember { // An access permissions issue occurred. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2336,17 +2416,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2354,28 +2434,28 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } // An internal server error occurred. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2392,17 +2472,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2410,28 +2490,28 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // A service limit was exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2448,17 +2528,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2466,22 +2546,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListEnvironmentsInput struct { @@ -2620,8 +2700,8 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput // The target resource cannot be found. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2638,17 +2718,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2656,22 +2736,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Metadata that is associated with AWS resources. In particular, a name-value @@ -2813,8 +2893,8 @@ func (s TagResourceOutput) GoString() string { // Too many service requests were made over the given time period. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2831,17 +2911,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2849,22 +2929,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -3104,6 +3184,22 @@ func (s UpdateEnvironmentOutput) GoString() string { return s.String() } +const ( + // ConnectionTypeConnectSsh is a ConnectionType enum value + ConnectionTypeConnectSsh = "CONNECT_SSH" + + // ConnectionTypeConnectSsm is a ConnectionType enum value + ConnectionTypeConnectSsm = "CONNECT_SSM" +) + +// ConnectionType_Values returns all elements of the ConnectionType enum +func ConnectionType_Values() []string { + return []string{ + ConnectionTypeConnectSsh, + ConnectionTypeConnectSsm, + } +} + const ( // EnvironmentLifecycleStatusCreating is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusCreating = "CREATING" @@ -3121,6 +3217,17 @@ const ( EnvironmentLifecycleStatusDeleteFailed = "DELETE_FAILED" ) +// EnvironmentLifecycleStatus_Values returns all elements of the EnvironmentLifecycleStatus enum +func EnvironmentLifecycleStatus_Values() []string { + return []string{ + EnvironmentLifecycleStatusCreating, + EnvironmentLifecycleStatusCreated, + EnvironmentLifecycleStatusCreateFailed, + EnvironmentLifecycleStatusDeleting, + EnvironmentLifecycleStatusDeleteFailed, + } +} + const ( // EnvironmentStatusError is a EnvironmentStatus enum value EnvironmentStatusError = "error" @@ -3144,6 +3251,19 @@ const ( EnvironmentStatusDeleting = "deleting" ) +// EnvironmentStatus_Values returns all elements of the EnvironmentStatus enum +func EnvironmentStatus_Values() []string { + return []string{ + EnvironmentStatusError, + EnvironmentStatusCreating, + EnvironmentStatusConnecting, + EnvironmentStatusReady, + EnvironmentStatusStopping, + EnvironmentStatusStopped, + EnvironmentStatusDeleting, + } +} + const ( // EnvironmentTypeSsh is a EnvironmentType enum value EnvironmentTypeSsh = "ssh" @@ -3152,6 +3272,14 @@ const ( EnvironmentTypeEc2 = "ec2" ) +// EnvironmentType_Values returns all elements of the EnvironmentType enum +func EnvironmentType_Values() []string { + return []string{ + EnvironmentTypeSsh, + EnvironmentTypeEc2, + } +} + const ( // MemberPermissionsReadWrite is a MemberPermissions enum value MemberPermissionsReadWrite = "read-write" @@ -3160,6 +3288,14 @@ const ( MemberPermissionsReadOnly = "read-only" ) +// MemberPermissions_Values returns all elements of the MemberPermissions enum +func MemberPermissions_Values() []string { + return []string{ + MemberPermissionsReadWrite, + MemberPermissionsReadOnly, + } +} + const ( // PermissionsOwner is a Permissions enum value PermissionsOwner = "owner" @@ -3170,3 +3306,12 @@ const ( // PermissionsReadOnly is a Permissions enum value PermissionsReadOnly = "read-only" ) + +// Permissions_Values returns all elements of the Permissions enum +func Permissions_Values() []string { + return []string{ + PermissionsOwner, + PermissionsReadWrite, + PermissionsReadOnly, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/errors.go index 0318a99b3..308ab4f0b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/errors.go @@ -14,6 +14,12 @@ const ( // The target request is invalid. ErrCodeBadRequestException = "BadRequestException" + // ErrCodeConcurrentAccessException for service response error code + // "ConcurrentAccessException". + // + // A concurrent access issue occurred. + ErrCodeConcurrentAccessException = "ConcurrentAccessException" + // ErrCodeConflictException for service response error code // "ConflictException". // @@ -53,6 +59,7 @@ const ( var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "BadRequestException": newErrorBadRequestException, + "ConcurrentAccessException": newErrorConcurrentAccessException, "ConflictException": newErrorConflictException, "ForbiddenException": newErrorForbiddenException, "InternalServerErrorException": newErrorInternalServerErrorException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go index 5f302027e..cfaec1d77 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go index d4fbccb7e..f8b384c0a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -437,7 +437,7 @@ func (c *CloudFormation) CreateStackInstancesRequest(input *CreateStackInstances // CreateStackInstances API operation for AWS CloudFormation. // // Creates stack instances for the specified accounts, within the specified -// regions. A stack instance refers to a stack in a specific account and region. +// Regions. A stack instance refers to a stack in a specific account and Region. // You must specify at least one value for either Accounts or DeploymentTargets, // and you must specify at least one value for Regions. // @@ -794,7 +794,7 @@ func (c *CloudFormation) DeleteStackInstancesRequest(input *DeleteStackInstances // DeleteStackInstances API operation for AWS CloudFormation. // -// Deletes stack instances for the specified accounts, in the specified regions. +// Deletes stack instances for the specified accounts, in the specified Regions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1055,6 +1055,12 @@ func (c *CloudFormation) DescribeAccountLimitsRequest(input *DescribeAccountLimi Name: opDescribeAccountLimits, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -1101,6 +1107,58 @@ func (c *CloudFormation) DescribeAccountLimitsWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeAccountLimitsPages iterates over the pages of a DescribeAccountLimits operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAccountLimits method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAccountLimits operation. +// pageNum := 0 +// err := client.DescribeAccountLimitsPages(params, +// func(page *cloudformation.DescribeAccountLimitsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) DescribeAccountLimitsPages(input *DescribeAccountLimitsInput, fn func(*DescribeAccountLimitsOutput, bool) bool) error { + return c.DescribeAccountLimitsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAccountLimitsPagesWithContext same as DescribeAccountLimitsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) DescribeAccountLimitsPagesWithContext(ctx aws.Context, input *DescribeAccountLimitsInput, fn func(*DescribeAccountLimitsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAccountLimitsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAccountLimitsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAccountLimitsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeChangeSet = "DescribeChangeSet" // DescribeChangeSetRequest generates a "aws/request.Request" representing the @@ -1452,7 +1510,7 @@ func (c *CloudFormation) DescribeStackInstanceRequest(input *DescribeStackInstan // DescribeStackInstance API operation for AWS CloudFormation. // // Returns the stack instance that's associated with the specified stack set, -// AWS account, and region. +// AWS account, and Region. // // For a list of stack instances that are associated with a specific stack set, // use ListStackInstances. @@ -3038,6 +3096,12 @@ func (c *CloudFormation) ListChangeSetsRequest(input *ListChangeSetsInput) (req Name: opListChangeSets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -3083,6 +3147,58 @@ func (c *CloudFormation) ListChangeSetsWithContext(ctx aws.Context, input *ListC return out, req.Send() } +// ListChangeSetsPages iterates over the pages of a ListChangeSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListChangeSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListChangeSets operation. +// pageNum := 0 +// err := client.ListChangeSetsPages(params, +// func(page *cloudformation.ListChangeSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListChangeSetsPages(input *ListChangeSetsInput, fn func(*ListChangeSetsOutput, bool) bool) error { + return c.ListChangeSetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListChangeSetsPagesWithContext same as ListChangeSetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListChangeSetsPagesWithContext(ctx aws.Context, input *ListChangeSetsInput, fn func(*ListChangeSetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListChangeSetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListChangeSetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListChangeSetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListExports = "ListExports" // ListExportsRequest generates a "aws/request.Request" representing the @@ -3133,7 +3249,7 @@ func (c *CloudFormation) ListExportsRequest(input *ListExportsInput) (req *reque // ListExports API operation for AWS CloudFormation. // -// Lists all exported output values in the account and region in which you call +// Lists all exported output values in the account and Region in which you call // this action. Use this action to see the exported output values that you can // import into other stacks. To import values, use the Fn::ImportValue (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-importvalue.html) // function. @@ -3389,6 +3505,12 @@ func (c *CloudFormation) ListStackInstancesRequest(input *ListStackInstancesInpu Name: opListStackInstances, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3404,7 +3526,7 @@ func (c *CloudFormation) ListStackInstancesRequest(input *ListStackInstancesInpu // // Returns summary information about stack instances that are associated with // the specified stack set. You can filter for stack instances that are associated -// with a specific AWS account name or region. +// with a specific AWS account name or Region, or that have a specific status. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3439,6 +3561,58 @@ func (c *CloudFormation) ListStackInstancesWithContext(ctx aws.Context, input *L return out, req.Send() } +// ListStackInstancesPages iterates over the pages of a ListStackInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStackInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStackInstances operation. +// pageNum := 0 +// err := client.ListStackInstancesPages(params, +// func(page *cloudformation.ListStackInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListStackInstancesPages(input *ListStackInstancesInput, fn func(*ListStackInstancesOutput, bool) bool) error { + return c.ListStackInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStackInstancesPagesWithContext same as ListStackInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackInstancesPagesWithContext(ctx aws.Context, input *ListStackInstancesInput, fn func(*ListStackInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStackInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStackInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStackInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListStackResources = "ListStackResources" // ListStackResourcesRequest generates a "aws/request.Request" representing the @@ -3605,6 +3779,12 @@ func (c *CloudFormation) ListStackSetOperationResultsRequest(input *ListStackSet Name: opListStackSetOperationResults, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3656,6 +3836,58 @@ func (c *CloudFormation) ListStackSetOperationResultsWithContext(ctx aws.Context return out, req.Send() } +// ListStackSetOperationResultsPages iterates over the pages of a ListStackSetOperationResults operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStackSetOperationResults method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStackSetOperationResults operation. +// pageNum := 0 +// err := client.ListStackSetOperationResultsPages(params, +// func(page *cloudformation.ListStackSetOperationResultsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListStackSetOperationResultsPages(input *ListStackSetOperationResultsInput, fn func(*ListStackSetOperationResultsOutput, bool) bool) error { + return c.ListStackSetOperationResultsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStackSetOperationResultsPagesWithContext same as ListStackSetOperationResultsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackSetOperationResultsPagesWithContext(ctx aws.Context, input *ListStackSetOperationResultsInput, fn func(*ListStackSetOperationResultsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStackSetOperationResultsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStackSetOperationResultsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStackSetOperationResultsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListStackSetOperations = "ListStackSetOperations" // ListStackSetOperationsRequest generates a "aws/request.Request" representing the @@ -3687,6 +3919,12 @@ func (c *CloudFormation) ListStackSetOperationsRequest(input *ListStackSetOperat Name: opListStackSetOperations, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3735,6 +3973,58 @@ func (c *CloudFormation) ListStackSetOperationsWithContext(ctx aws.Context, inpu return out, req.Send() } +// ListStackSetOperationsPages iterates over the pages of a ListStackSetOperations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStackSetOperations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStackSetOperations operation. +// pageNum := 0 +// err := client.ListStackSetOperationsPages(params, +// func(page *cloudformation.ListStackSetOperationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListStackSetOperationsPages(input *ListStackSetOperationsInput, fn func(*ListStackSetOperationsOutput, bool) bool) error { + return c.ListStackSetOperationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStackSetOperationsPagesWithContext same as ListStackSetOperationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackSetOperationsPagesWithContext(ctx aws.Context, input *ListStackSetOperationsInput, fn func(*ListStackSetOperationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStackSetOperationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStackSetOperationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStackSetOperationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListStackSets = "ListStackSets" // ListStackSetsRequest generates a "aws/request.Request" representing the @@ -3766,6 +4056,12 @@ func (c *CloudFormation) ListStackSetsRequest(input *ListStackSetsInput) (req *r Name: opListStackSets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3810,6 +4106,58 @@ func (c *CloudFormation) ListStackSetsWithContext(ctx aws.Context, input *ListSt return out, req.Send() } +// ListStackSetsPages iterates over the pages of a ListStackSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStackSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStackSets operation. +// pageNum := 0 +// err := client.ListStackSetsPages(params, +// func(page *cloudformation.ListStackSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFormation) ListStackSetsPages(input *ListStackSetsInput, fn func(*ListStackSetsOutput, bool) bool) error { + return c.ListStackSetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStackSetsPagesWithContext same as ListStackSetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) ListStackSetsPagesWithContext(ctx aws.Context, input *ListStackSetsInput, fn func(*ListStackSetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStackSetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStackSetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStackSetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListStacks = "ListStacks" // ListStacksRequest generates a "aws/request.Request" representing the @@ -4503,6 +4851,10 @@ func (c *CloudFormation) RegisterTypeRequest(input *RegisterTypeInput) (req *req // see Creating Resource Providers (https://docs.aws.amazon.com/cloudformation-cli/latest/userguide/resource-types.html) // in the CloudFormation CLI User Guide. // +// You can have a maximum of 50 resource type versions registered at a time. +// This maximum is per account and per region. Use DeregisterType (AWSCloudFormation/latest/APIReference/API_DeregisterType.html) +// to deregister specific resource type versions if necessary. +// // Once you have initiated a registration request using RegisterType , you can // use DescribeTypeRegistration to monitor the progress of the registration // request. @@ -5001,10 +5353,10 @@ func (c *CloudFormation) UpdateStackInstancesRequest(input *UpdateStackInstances // UpdateStackInstances API operation for AWS CloudFormation. // // Updates the parameter values for stack instances for the specified accounts, -// within the specified regions. A stack instance refers to a stack in a specific -// account and region. +// within the specified Regions. A stack instance refers to a stack in a specific +// account and Region. // -// You can only update stack instances in regions and accounts where they already +// You can only update stack instances in Regions and accounts where they already // exist; to create additional stack instances, use CreateStackInstances (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_CreateStackInstances.html). // // During stack set updates, any parameters overridden for a stack instance @@ -5114,7 +5466,7 @@ func (c *CloudFormation) UpdateStackSetRequest(input *UpdateStackSetInput) (req // UpdateStackSet API operation for AWS CloudFormation. // // Updates the stack set, and associated stack instances in the specified accounts -// and regions. +// and Regions. // // Even if the stack set operation created by updating the stack set fails (completely // or partially, below or above a specified failure tolerance), the stack set @@ -5332,15 +5684,15 @@ func (c *CloudFormation) ValidateTemplateWithContext(ctx aws.Context, input *Val // Structure that contains the results of the account gate function which AWS // CloudFormation invokes, if present, before proceeding with a stack set operation -// in an account and region. +// in an account and Region. // -// For each account and region, AWS CloudFormation lets you specify a Lamdba +// For each account and Region, AWS CloudFormation lets you specify a Lamdba // function that encapsulates any requirements that must be met before CloudFormation -// can proceed with a stack set operation in that account and region. CloudFormation +// can proceed with a stack set operation in that account and Region. CloudFormation // invokes the function each time a stack set operation is requested for that -// account and region; if the function returns FAILED, CloudFormation cancels -// the operation in that account and region, and sets the stack set operation -// result status for that account and region to FAILED. +// account and Region; if the function returns FAILED, CloudFormation cancels +// the operation in that account and Region, and sets the stack set operation +// result status for that account and Region to FAILED. // // For more information, see Configuring a target account gate (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-account-gating.html). type AccountGateResult struct { @@ -5349,28 +5701,28 @@ type AccountGateResult struct { // The status of the account gate function. // // * SUCCEEDED: The account gate function has determined that the account - // and region passes any requirements for a stack set operation to occur. + // and Region passes any requirements for a stack set operation to occur. // AWS CloudFormation proceeds with the stack operation in that account and - // region. + // Region. // // * FAILED: The account gate function has determined that the account and - // region does not meet the requirements for a stack set operation to occur. + // Region does not meet the requirements for a stack set operation to occur. // AWS CloudFormation cancels the stack set operation in that account and - // region, and sets the stack set operation result status for that account - // and region to FAILED. + // Region, and sets the stack set operation result status for that account + // and Region to FAILED. // // * SKIPPED: AWS CloudFormation has skipped calling the account gate function - // for this account and region, for one of the following reasons: An account - // gate function has not been specified for the account and region. AWS CloudFormation - // proceeds with the stack set operation in this account and region. The + // for this account and Region, for one of the following reasons: An account + // gate function has not been specified for the account and Region. AWS CloudFormation + // proceeds with the stack set operation in this account and Region. The // AWSCloudFormationStackSetExecutionRole of the stack set adminstration // account lacks permissions to invoke the function. AWS CloudFormation proceeds - // with the stack set operation in this account and region. Either no action + // with the stack set operation in this account and Region. Either no action // is necessary, or no action is possible, on the stack. AWS CloudFormation - // skips the stack set operation in this account and region. + // skips the stack set operation in this account and Region. Status *string `type:"string" enum:"AccountGateStatus"` - // The reason for the account gate status assigned to this account and region + // The reason for the account gate status assigned to this account and Region // for the stack set operation. StatusReason *string `type:"string"` } @@ -6327,7 +6679,7 @@ type CreateStackInput struct { RollbackConfiguration *RollbackConfiguration `type:"structure"` // The name that is associated with the stack. The name must be unique in the - // region in which you are creating the stack. + // Region in which you are creating the stack. // // A stack name can contain only alphanumeric characters (case sensitive) and // hyphens. It must start with an alphabetic character and cannot be longer @@ -6343,7 +6695,7 @@ type CreateStackInput struct { StackPolicyBody *string `min:"1" type:"string"` // Location of a file containing the stack policy. The URL must point to a policy - // (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. + // (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. // You can specify either the StackPolicyBody or the StackPolicyURL parameter, // but not both. StackPolicyURL *string `min:"1" type:"string"` @@ -6542,7 +6894,7 @@ type CreateStackInstancesInput struct { _ struct{} `type:"structure"` // [Self-managed permissions] The names of one or more AWS accounts that you - // want to create stack instances in the specified region(s) for. + // want to create stack instances in the specified Region(s) for. // // You can specify Accounts or DeploymentTargets, but not both. Accounts []*string `type:"list"` @@ -6573,7 +6925,7 @@ type CreateStackInstancesInput struct { // stack instances. // // Any overridden parameter values will be applied to all stack instances in - // the specified accounts and regions. When specifying parameters and their + // the specified accounts and Regions. When specifying parameters and their // values, be aware of how AWS CloudFormation sets parameter values during stack // instance operations: // @@ -6599,7 +6951,7 @@ type CreateStackInstancesInput struct { // to update the stack set template. ParameterOverrides []*Parameter `type:"list"` - // The names of one or more regions where you want to create stack instances + // The names of one or more Regions where you want to create stack instances // using the specified AWS account(s). // // Regions is a required field @@ -6751,8 +7103,6 @@ type CreateStackSetInput struct { // Describes whether StackSets automatically deploys to AWS Organizations accounts // that are added to the target organization or organizational unit (OU). Specify // only if PermissionModel is SERVICE_MANAGED. - // - // If you specify AutoDeployment, do not specify DeploymentTargets or Regions. AutoDeployment *AutoDeployment `type:"structure"` // In some cases, you must explicitly acknowledge that your stack set template @@ -6832,7 +7182,7 @@ type CreateStackSetInput struct { PermissionModel *string `type:"string" enum:"PermissionModels"` // The name to associate with the stack set. The name must be unique in the - // region where you create your stack set. + // Region where you create your stack set. // // A stack name can contain only alphanumeric characters (case-sensitive) and // hyphens. It must start with an alphabetic character and can't be longer than @@ -7215,7 +7565,7 @@ type DeleteStackInstancesInput struct { // Preferences for how AWS CloudFormation performs this stack set operation. OperationPreferences *StackSetOperationPreferences `type:"structure"` - // The regions where you want to delete stack set instances. + // The Regions where you want to delete stack set instances. // // Regions is a required field Regions []*string `type:"list" required:"true"` @@ -7406,7 +7756,9 @@ func (s DeleteStackSetOutput) GoString() string { } // [Service-managed permissions] The AWS Organizations accounts to which StackSets -// deploys. +// deploys. StackSets does not deploy stack instances to the organization master +// account, even if the master account is in your organization or in an OU in +// your organization. // // For update operations, you can specify either Accounts or OrganizationalUnitIds. // For create and delete operations, specify OrganizationalUnitIds. @@ -7417,7 +7769,7 @@ type DeploymentTargets struct { // set updates. Accounts []*string `type:"list"` - // The organization root ID or organizational unit (OUs) IDs to which StackSets + // The organization root ID or organizational unit (OU) IDs to which StackSets // deploys. OrganizationalUnitIds []*string `type:"list"` } @@ -8107,7 +8459,7 @@ type DescribeStackInstanceInput struct { // StackInstanceAccount is a required field StackInstanceAccount *string `type:"string" required:"true"` - // The name of a region that's associated with this stack instance. + // The name of a Region that's associated with this stack instance. // // StackInstanceRegion is a required field StackInstanceRegion *string `type:"string" required:"true"` @@ -8847,6 +9199,9 @@ type DescribeTypeOutput struct { // role to provide your resource type with the appropriate credentials. ExecutionRoleArn *string `min:"1" type:"string"` + // Whether the specified type version is set as the default version. + IsDefaultVersion *bool `type:"boolean"` + // When the specified type version was registered. LastUpdated *time.Time `type:"timestamp"` @@ -8948,6 +9303,12 @@ func (s *DescribeTypeOutput) SetExecutionRoleArn(v string) *DescribeTypeOutput { return s } +// SetIsDefaultVersion sets the IsDefaultVersion field's value. +func (s *DescribeTypeOutput) SetIsDefaultVersion(v bool) *DescribeTypeOutput { + s.IsDefaultVersion = &v + return s +} + // SetLastUpdated sets the LastUpdated field's value. func (s *DescribeTypeOutput) SetLastUpdated(v time.Time) *DescribeTypeOutput { s.LastUpdated = &v @@ -10216,6 +10577,9 @@ func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput { type ListStackInstancesInput struct { _ struct{} `type:"structure"` + // The status that stack instances are filtered by. + Filters []*StackInstanceFilter `type:"list"` + // The maximum number of results to be returned with a single call. If the number // of available results exceeds this maximum, the response includes a NextToken // value that you can assign to the NextToken request parameter to get the next @@ -10232,7 +10596,7 @@ type ListStackInstancesInput struct { // The name of the AWS account that you want to list stack instances for. StackInstanceAccount *string `type:"string"` - // The name of the region where you want to list stack instances. + // The name of the Region where you want to list stack instances. StackInstanceRegion *string `type:"string"` // The name or unique ID of the stack set that you want to list stack instances @@ -10264,6 +10628,16 @@ func (s *ListStackInstancesInput) Validate() error { if s.StackSetName == nil { invalidParams.Add(request.NewErrParamRequired("StackSetName")) } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10271,6 +10645,12 @@ func (s *ListStackInstancesInput) Validate() error { return nil } +// SetFilters sets the Filters field's value. +func (s *ListStackInstancesInput) SetFilters(v []*StackInstanceFilter) *ListStackInstancesInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListStackInstancesInput) SetMaxResults(v int64) *ListStackInstancesInput { s.MaxResults = &v @@ -10528,7 +10908,7 @@ type ListStackSetOperationResultsOutput struct { NextToken *string `min:"1" type:"string"` // A list of StackSetOperationResultSummary structures that contain information - // about the specified operation results, for accounts and regions that are + // about the specified operation results, for accounts and Regions that are // included in the operation. Summaries []*StackSetOperationResultSummary `type:"list"` } @@ -11794,12 +12174,15 @@ type RegisterTypeInput struct { // if the request is submitted multiple times. ClientRequestToken *string `min:"1" type:"string"` - // The Amazon Resource Name (ARN) of the IAM execution role to use to register - // the type. If your resource type calls AWS APIs in any of its handlers, you - // must create an IAM execution role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) + // The Amazon Resource Name (ARN) of the IAM role for CloudFormation to assume + // when invoking the resource provider. If your resource type calls AWS APIs + // in any of its handlers, you must create an IAM execution role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // that includes the necessary permissions to call those AWS APIs, and provision - // that execution role in your account. CloudFormation then assumes that execution - // role to provide your resource type with the appropriate credentials. + // that execution role in your account. When CloudFormation needs to invoke + // the resource provider handler, CloudFormation assumes this execution role + // to create a temporary session token, which it then passes to the resource + // provider handler, thereby supplying your resource provider with the appropriate + // credentials. ExecutionRoleArn *string `min:"1" type:"string"` // Specifies logging configuration information for a type. @@ -12255,6 +12638,9 @@ type ResourceToImport struct { ResourceIdentifier map[string]*string `min:"1" type:"map" required:"true"` // The type of resource to import into your stack, such as AWS::S3::Bucket. + // For a list of supported resource types, see Resources that support import + // operations (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/resource-import-supported-resources.html) + // in the AWS CloudFormation User Guide. // // ResourceType is a required field ResourceType *string `min:"1" type:"string" required:"true"` @@ -12488,7 +12874,7 @@ type SetStackPolicyInput struct { StackPolicyBody *string `min:"1" type:"string"` // Location of a file containing the stack policy. The URL must point to a policy - // (maximum size: 16 KB) located in an S3 bucket in the same region as the stack. + // (maximum size: 16 KB) located in an S3 bucket in the same Region as the stack. // You can specify either the StackPolicyBody or the StackPolicyURL parameter, // but not both. StackPolicyURL *string `min:"1" type:"string"` @@ -13236,9 +13622,9 @@ func (s *StackEvent) SetTimestamp(v time.Time) *StackEvent { return s } -// An AWS CloudFormation stack, in a specific account and region, that's part +// An AWS CloudFormation stack, in a specific account and Region, that's part // of a stack set operation. A stack instance is a reference to an attempted -// or actual stack in a given account within a given region. A stack instance +// or actual stack in a given account within a given Region. A stack instance // can exist without a stack—for example, if the stack couldn't be created // for some reason. A stack instance is associated with only one stack set. // Each stack instance contains the ID of its associated stack set, as well @@ -13273,19 +13659,22 @@ type StackInstance struct { LastDriftCheckTimestamp *time.Time `type:"timestamp"` // [Service-managed permissions] The organization root ID or organizational - // unit (OU) ID that the stack instance is associated with. + // unit (OU) IDs that you specified for DeploymentTargets (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeploymentTargets.html). OrganizationalUnitId *string `type:"string"` // A list of parameters from the stack set template whose values have been overridden // in this stack instance. ParameterOverrides []*Parameter `type:"list"` - // The name of the AWS region that the stack instance is associated with. + // The name of the AWS Region that the stack instance is associated with. Region *string `type:"string"` // The ID of the stack instance. StackId *string `type:"string"` + // The detailed status of the stack instance. + StackInstanceStatus *StackInstanceComprehensiveStatus `type:"structure"` + // The name or unique ID of the stack set that the stack instance is associated // with. StackSetId *string `type:"string"` @@ -13364,6 +13753,12 @@ func (s *StackInstance) SetStackId(v string) *StackInstance { return s } +// SetStackInstanceStatus sets the StackInstanceStatus field's value. +func (s *StackInstance) SetStackInstanceStatus(v *StackInstanceComprehensiveStatus) *StackInstance { + s.StackInstanceStatus = v + return s +} + // SetStackSetId sets the StackSetId field's value. func (s *StackInstance) SetStackSetId(v string) *StackInstance { s.StackSetId = &v @@ -13382,6 +13777,97 @@ func (s *StackInstance) SetStatusReason(v string) *StackInstance { return s } +// The detailed status of the stack instance. +type StackInstanceComprehensiveStatus struct { + _ struct{} `type:"structure"` + + // * CANCELLED: The operation in the specified account and Region has been + // cancelled. This is either because a user has stopped the stack set operation, + // or because the failure tolerance of the stack set operation has been exceeded. + // + // * FAILED: The operation in the specified account and Region failed. If + // the stack set operation fails in enough accounts within a Region, the + // failure tolerance for the stack set operation as a whole might be exceeded. + // + // * INOPERABLE: A DeleteStackInstances operation has failed and left the + // stack in an unstable state. Stacks in this state are excluded from further + // UpdateStackSet operations. You might need to perform a DeleteStackInstances + // operation, with RetainStacks set to true, to delete the stack instance, + // and then delete the stack manually. + // + // * PENDING: The operation in the specified account and Region has yet to + // start. + // + // * RUNNING: The operation in the specified account and Region is currently + // in progress. + // + // * SUCCEEDED: The operation in the specified account and Region completed + // successfully. + DetailedStatus *string `type:"string" enum:"StackInstanceDetailedStatus"` +} + +// String returns the string representation +func (s StackInstanceComprehensiveStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackInstanceComprehensiveStatus) GoString() string { + return s.String() +} + +// SetDetailedStatus sets the DetailedStatus field's value. +func (s *StackInstanceComprehensiveStatus) SetDetailedStatus(v string) *StackInstanceComprehensiveStatus { + s.DetailedStatus = &v + return s +} + +// The status that stack instances are filtered by. +type StackInstanceFilter struct { + _ struct{} `type:"structure"` + + // The type of filter to apply. + Name *string `type:"string" enum:"StackInstanceFilterName"` + + // The status to filter by. + Values *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s StackInstanceFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StackInstanceFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StackInstanceFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StackInstanceFilter"} + if s.Values != nil && len(*s.Values) < 6 { + invalidParams.Add(request.NewErrParamMinLen("Values", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *StackInstanceFilter) SetName(v string) *StackInstanceFilter { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *StackInstanceFilter) SetValues(v string) *StackInstanceFilter { + s.Values = &v + return s +} + // The structure that contains summary information about a stack instance. type StackInstanceSummary struct { _ struct{} `type:"structure"` @@ -13413,15 +13899,18 @@ type StackInstanceSummary struct { LastDriftCheckTimestamp *time.Time `type:"timestamp"` // [Service-managed permissions] The organization root ID or organizational - // unit (OU) ID that the stack instance is associated with. + // unit (OU) IDs that you specified for DeploymentTargets (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeploymentTargets.html). OrganizationalUnitId *string `type:"string"` - // The name of the AWS region that the stack instance is associated with. + // The name of the AWS Region that the stack instance is associated with. Region *string `type:"string"` // The ID of the stack instance. StackId *string `type:"string"` + // The detailed status of the stack instance. + StackInstanceStatus *StackInstanceComprehensiveStatus `type:"structure"` + // The name or unique ID of the stack set that the stack instance is associated // with. StackSetId *string `type:"string"` @@ -13493,6 +13982,12 @@ func (s *StackInstanceSummary) SetStackId(v string) *StackInstanceSummary { return s } +// SetStackInstanceStatus sets the StackInstanceStatus field's value. +func (s *StackInstanceSummary) SetStackInstanceStatus(v *StackInstanceComprehensiveStatus) *StackInstanceSummary { + s.StackInstanceStatus = v + return s +} + // SetStackSetId sets the StackSetId field's value. func (s *StackInstanceSummary) SetStackSetId(v string) *StackInstanceSummary { s.StackSetId = &v @@ -14109,7 +14604,7 @@ func (s *StackResourceSummary) SetResourceType(v string) *StackResourceSummary { } // A structure that contains information about a stack set. A stack set enables -// you to provision stacks into AWS accounts and across regions by using a single +// you to provision stacks into AWS accounts and across Regions by using a single // CloudFormation template. In the stack set, you specify the template to use, // as well as any parameters and capabilities that the template requires. type StackSet struct { @@ -14147,7 +14642,7 @@ type StackSet struct { ExecutionRoleName *string `min:"1" type:"string"` // [Service-managed permissions] The organization root ID or organizational - // unit (OUs) IDs to which stacks in your stack set have been deployed. + // unit (OU) IDs that you specified for DeploymentTargets (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeploymentTargets.html). OrganizationalUnitIds []*string `type:"list"` // A list of input parameters for a stack set. @@ -14454,7 +14949,7 @@ type StackSetOperation struct { // The time at which the operation was initiated. Note that the creation times // for the stack set operation might differ from the creation time of the individual // stacks themselves. This is because AWS CloudFormation needs to perform preparatory - // work for the operation, such as dispatching the work to the requested regions, + // work for the operation, such as dispatching the work to the requested Regions, // before actually creating the first stacks. CreationTimestamp *time.Time `type:"timestamp"` @@ -14463,8 +14958,8 @@ type StackSetOperation struct { DeploymentTargets *DeploymentTargets `type:"structure"` // The time at which the stack set operation ended, across all accounts and - // regions specified. Note that this doesn't necessarily mean that the stack - // set operation was successful, or even attempted, in each account or region. + // Regions specified. Note that this doesn't necessarily mean that the stack + // set operation was successful, or even attempted, in each account or Region. EndTimestamp *time.Time `type:"timestamp"` // The name of the IAM execution role used to create or update the stack set. @@ -14503,14 +14998,14 @@ type StackSetOperation struct { // // * FAILED: The operation exceeded the specified failure tolerance. The // failure tolerance value that you've set for an operation is applied for - // each region during stack create and update operations. If the number of - // failed stacks within a region exceeds the failure tolerance, the status - // of the operation in the region is set to FAILED. This in turn sets the + // each Region during stack create and update operations. If the number of + // failed stacks within a Region exceeds the failure tolerance, the status + // of the operation in the Region is set to FAILED. This in turn sets the // status of the operation as a whole to FAILED, and AWS CloudFormation cancels - // the operation in any remaining regions. + // the operation in any remaining Regions. // // * QUEUED: [Service-managed permissions] For automatic deployments that - // require a sequence of operations. The operation is queued to be performed. + // require a sequence of operations, the operation is queued to be performed. // For more information, see the stack set operation status codes (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-status-codes) // in the AWS CloudFormation User Guide. // @@ -14615,19 +15110,19 @@ func (s *StackSetOperation) SetStatus(v string) *StackSetOperation { type StackSetOperationPreferences struct { _ struct{} `type:"structure"` - // The number of accounts, per region, for which this operation can fail before - // AWS CloudFormation stops the operation in that region. If the operation is - // stopped in a region, AWS CloudFormation doesn't attempt the operation in - // any subsequent regions. + // The number of accounts, per Region, for which this operation can fail before + // AWS CloudFormation stops the operation in that Region. If the operation is + // stopped in a Region, AWS CloudFormation doesn't attempt the operation in + // any subsequent Regions. // // Conditional: You must specify either FailureToleranceCount or FailureTolerancePercentage // (but not both). FailureToleranceCount *int64 `type:"integer"` - // The percentage of accounts, per region, for which this stack operation can - // fail before AWS CloudFormation stops the operation in that region. If the - // operation is stopped in a region, AWS CloudFormation doesn't attempt the - // operation in any subsequent regions. + // The percentage of accounts, per Region, for which this stack operation can + // fail before AWS CloudFormation stops the operation in that Region. If the + // operation is stopped in a Region, AWS CloudFormation doesn't attempt the + // operation in any subsequent Regions. // // When calculating the number of accounts based on the specified percentage, // AWS CloudFormation rounds down to the next whole number. @@ -14637,8 +15132,8 @@ type StackSetOperationPreferences struct { FailureTolerancePercentage *int64 `type:"integer"` // The maximum number of accounts in which to perform this operation at one - // time. This is dependent on the value of FailureToleranceCount—MaxConcurrentCount - // is at most one more than the FailureToleranceCount . + // time. This is dependent on the value of FailureToleranceCount. MaxConcurrentCount + // is at most one more than the FailureToleranceCount. // // Note that this setting lets you specify the maximum for operations. For large // deployments, under certain circumstances the actual number of accounts acted @@ -14664,7 +15159,7 @@ type StackSetOperationPreferences struct { // but not both. MaxConcurrentPercentage *int64 `min:"1" type:"integer"` - // The order of the regions in where you want to perform the stack operation. + // The order of the Regions in where you want to perform the stack operation. RegionOrder []*string `type:"list"` } @@ -14725,7 +15220,7 @@ func (s *StackSetOperationPreferences) SetRegionOrder(v []*string) *StackSetOper } // The structure that contains information about a specified operation's results -// for a given account in a given region. +// for a given account in a given Region. type StackSetOperationResultSummary struct { _ struct{} `type:"structure"` @@ -14738,30 +15233,30 @@ type StackSetOperationResultSummary struct { AccountGateResult *AccountGateResult `type:"structure"` // [Service-managed permissions] The organization root ID or organizational - // unit (OU) ID for this operation result. + // unit (OU) IDs that you specified for DeploymentTargets (https://docs.aws.amazon.com/AWSCloudFormation/latest/APIReference/API_DeploymentTargets.html). OrganizationalUnitId *string `type:"string"` - // The name of the AWS region for this operation result. + // The name of the AWS Region for this operation result. Region *string `type:"string"` // The result status of the stack set operation for the given account in the - // given region. + // given Region. // - // * CANCELLED: The operation in the specified account and region has been + // * CANCELLED: The operation in the specified account and Region has been // cancelled. This is either because a user has stopped the stack set operation, // or because the failure tolerance of the stack set operation has been exceeded. // - // * FAILED: The operation in the specified account and region failed. If - // the stack set operation fails in enough accounts within a region, the + // * FAILED: The operation in the specified account and Region failed. If + // the stack set operation fails in enough accounts within a Region, the // failure tolerance for the stack set operation as a whole might be exceeded. // - // * RUNNING: The operation in the specified account and region is currently + // * RUNNING: The operation in the specified account and Region is currently // in progress. // - // * PENDING: The operation in the specified account and region has yet to + // * PENDING: The operation in the specified account and Region has yet to // start. // - // * SUCCEEDED: The operation in the specified account and region completed + // * SUCCEEDED: The operation in the specified account and Region completed // successfully. Status *string `type:"string" enum:"StackSetOperationResultStatus"` @@ -14828,13 +15323,13 @@ type StackSetOperationSummary struct { // The time at which the operation was initiated. Note that the creation times // for the stack set operation might differ from the creation time of the individual // stacks themselves. This is because AWS CloudFormation needs to perform preparatory - // work for the operation, such as dispatching the work to the requested regions, + // work for the operation, such as dispatching the work to the requested Regions, // before actually creating the first stacks. CreationTimestamp *time.Time `type:"timestamp"` // The time at which the stack set operation ended, across all accounts and - // regions specified. Note that this doesn't necessarily mean that the stack - // set operation was successful, or even attempted, in each account or region. + // Regions specified. Note that this doesn't necessarily mean that the stack + // set operation was successful, or even attempted, in each account or Region. EndTimestamp *time.Time `type:"timestamp"` // The unique ID of the stack set operation. @@ -14844,14 +15339,14 @@ type StackSetOperationSummary struct { // // * FAILED: The operation exceeded the specified failure tolerance. The // failure tolerance value that you've set for an operation is applied for - // each region during stack create and update operations. If the number of - // failed stacks within a region exceeds the failure tolerance, the status - // of the operation in the region is set to FAILED. This in turn sets the + // each Region during stack create and update operations. If the number of + // failed stacks within a Region exceeds the failure tolerance, the status + // of the operation in the Region is set to FAILED. This in turn sets the // status of the operation as a whole to FAILED, and AWS CloudFormation cancels - // the operation in any remaining regions. + // the operation in any remaining Regions. // // * QUEUED: [Service-managed permissions] For automatic deployments that - // require a sequence of operations. The operation is queued to be performed. + // require a sequence of operations, the operation is queued to be performed. // For more information, see the stack set operation status codes (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-concepts.html#stackset-status-codes) // in the AWS CloudFormation User Guide. // @@ -15424,6 +15919,9 @@ type TypeVersionSummary struct { // The description of the type version. Description *string `min:"1" type:"string"` + // Whether the specified type version is set as the default version. + IsDefaultVersion *bool `type:"boolean"` + // When the version was registered. TimeCreated *time.Time `type:"timestamp"` @@ -15461,6 +15959,12 @@ func (s *TypeVersionSummary) SetDescription(v string) *TypeVersionSummary { return s } +// SetIsDefaultVersion sets the IsDefaultVersion field's value. +func (s *TypeVersionSummary) SetIsDefaultVersion(v bool) *TypeVersionSummary { + s.IsDefaultVersion = &v + return s +} + // SetTimeCreated sets the TimeCreated field's value. func (s *TypeVersionSummary) SetTimeCreated(v time.Time) *TypeVersionSummary { s.TimeCreated = &v @@ -15614,7 +16118,7 @@ type UpdateStackInput struct { // Location of a file containing the temporary overriding stack policy. The // URL must point to a policy (max size: 16KB) located in an S3 bucket in the - // same region as the stack. You can specify either the StackPolicyDuringUpdateBody + // same Region as the stack. You can specify either the StackPolicyDuringUpdateBody // or the StackPolicyDuringUpdateURL parameter, but not both. // // If you want to update protected resources, specify a temporary overriding @@ -15623,7 +16127,7 @@ type UpdateStackInput struct { StackPolicyDuringUpdateURL *string `min:"1" type:"string"` // Location of a file containing the updated stack policy. The URL must point - // to a policy (max size: 16KB) located in an S3 bucket in the same region as + // to a policy (max size: 16KB) located in an S3 bucket in the same Region as // the stack. You can specify either the StackPolicyBody or the StackPolicyURL // parameter, but not both. // @@ -15831,7 +16335,7 @@ type UpdateStackInstancesInput struct { // [Self-managed permissions] The names of one or more AWS accounts for which // you want to update parameter values for stack instances. The overridden parameter // values will be applied to all stack instances in the specified accounts and - // regions. + // Regions. // // You can specify Accounts or DeploymentTargets, but not both. Accounts []*string `type:"list"` @@ -15862,7 +16366,7 @@ type UpdateStackInstancesInput struct { // stack instances. // // Any overridden parameter values will be applied to all stack instances in - // the specified accounts and regions. When specifying parameters and their + // the specified accounts and Regions. When specifying parameters and their // values, be aware of how AWS CloudFormation sets parameter values during stack // instance update operations: // @@ -15893,9 +16397,9 @@ type UpdateStackInstancesInput struct { // new parameter, you can then override the parameter value using UpdateStackInstances. ParameterOverrides []*Parameter `type:"list"` - // The names of one or more regions in which you want to update parameter values + // The names of one or more Regions in which you want to update parameter values // for stack instances. The overridden parameter values will be applied to all - // stack instances in the specified accounts and regions. + // stack instances in the specified accounts and Regions. // // Regions is a required field Regions []*string `type:"list" required:"true"` @@ -16033,7 +16537,7 @@ type UpdateStackSetInput struct { _ struct{} `type:"structure"` // [Self-managed permissions] The accounts in which to update associated stack - // instances. If you specify accounts, you must also specify the regions in + // instances. If you specify accounts, you must also specify the Regions in // which to update stack set instances. // // To update all the stack instances associated with this stack set, do not @@ -16042,10 +16546,10 @@ type UpdateStackSetInput struct { // If the stack set update includes changes to the template (that is, if the // TemplateBody or TemplateURL properties are specified), or the Parameters // property, AWS CloudFormation marks all stack instances with a status of OUTDATED - // prior to updating the stack instances in the specified accounts and regions. + // prior to updating the stack instances in the specified accounts and Regions. // If the stack set update does not include changes to the template or parameters, // AWS CloudFormation updates the stack instances in the specified accounts - // and regions, while leaving all other stack instances with their existing + // and Regions, while leaving all other stack instances with their existing // stack instance status. Accounts []*string `type:"list"` @@ -16174,8 +16678,8 @@ type UpdateStackSetInput struct { // (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/stacksets-prereqs-service-managed.html). PermissionModel *string `type:"string" enum:"PermissionModels"` - // The regions in which to update associated stack instances. If you specify - // regions, you must also specify accounts in which to update stack set instances. + // The Regions in which to update associated stack instances. If you specify + // Regions, you must also specify accounts in which to update stack set instances. // // To update all the stack instances associated with this stack set, do not // specify the Accounts or Regions properties. @@ -16183,10 +16687,10 @@ type UpdateStackSetInput struct { // If the stack set update includes changes to the template (that is, if the // TemplateBody or TemplateURL properties are specified), or the Parameters // property, AWS CloudFormation marks all stack instances with a status of OUTDATED - // prior to updating the stack instances in the specified accounts and regions. + // prior to updating the stack instances in the specified accounts and Regions. // If the stack set update does not include changes to the template or parameters, // AWS CloudFormation updates the stack instances in the specified accounts - // and regions, while leaving all other stack instances with their existing + // and Regions, while leaving all other stack instances with their existing // stack instance status. Regions []*string `type:"list"` @@ -16650,6 +17154,15 @@ const ( AccountGateStatusSkipped = "SKIPPED" ) +// AccountGateStatus_Values returns all elements of the AccountGateStatus enum +func AccountGateStatus_Values() []string { + return []string{ + AccountGateStatusSucceeded, + AccountGateStatusFailed, + AccountGateStatusSkipped, + } +} + const ( // CapabilityCapabilityIam is a Capability enum value CapabilityCapabilityIam = "CAPABILITY_IAM" @@ -16661,6 +17174,15 @@ const ( CapabilityCapabilityAutoExpand = "CAPABILITY_AUTO_EXPAND" ) +// Capability_Values returns all elements of the Capability enum +func Capability_Values() []string { + return []string{ + CapabilityCapabilityIam, + CapabilityCapabilityNamedIam, + CapabilityCapabilityAutoExpand, + } +} + const ( // ChangeActionAdd is a ChangeAction enum value ChangeActionAdd = "Add" @@ -16675,6 +17197,16 @@ const ( ChangeActionImport = "Import" ) +// ChangeAction_Values returns all elements of the ChangeAction enum +func ChangeAction_Values() []string { + return []string{ + ChangeActionAdd, + ChangeActionModify, + ChangeActionRemove, + ChangeActionImport, + } +} + const ( // ChangeSetStatusCreatePending is a ChangeSetStatus enum value ChangeSetStatusCreatePending = "CREATE_PENDING" @@ -16692,6 +17224,17 @@ const ( ChangeSetStatusFailed = "FAILED" ) +// ChangeSetStatus_Values returns all elements of the ChangeSetStatus enum +func ChangeSetStatus_Values() []string { + return []string{ + ChangeSetStatusCreatePending, + ChangeSetStatusCreateInProgress, + ChangeSetStatusCreateComplete, + ChangeSetStatusDeleteComplete, + ChangeSetStatusFailed, + } +} + const ( // ChangeSetTypeCreate is a ChangeSetType enum value ChangeSetTypeCreate = "CREATE" @@ -16703,6 +17246,15 @@ const ( ChangeSetTypeImport = "IMPORT" ) +// ChangeSetType_Values returns all elements of the ChangeSetType enum +func ChangeSetType_Values() []string { + return []string{ + ChangeSetTypeCreate, + ChangeSetTypeUpdate, + ChangeSetTypeImport, + } +} + const ( // ChangeSourceResourceReference is a ChangeSource enum value ChangeSourceResourceReference = "ResourceReference" @@ -16720,11 +17272,29 @@ const ( ChangeSourceAutomatic = "Automatic" ) +// ChangeSource_Values returns all elements of the ChangeSource enum +func ChangeSource_Values() []string { + return []string{ + ChangeSourceResourceReference, + ChangeSourceParameterReference, + ChangeSourceResourceAttribute, + ChangeSourceDirectModification, + ChangeSourceAutomatic, + } +} + const ( // ChangeTypeResource is a ChangeType enum value ChangeTypeResource = "Resource" ) +// ChangeType_Values returns all elements of the ChangeType enum +func ChangeType_Values() []string { + return []string{ + ChangeTypeResource, + } +} + const ( // DeprecatedStatusLive is a DeprecatedStatus enum value DeprecatedStatusLive = "LIVE" @@ -16733,6 +17303,14 @@ const ( DeprecatedStatusDeprecated = "DEPRECATED" ) +// DeprecatedStatus_Values returns all elements of the DeprecatedStatus enum +func DeprecatedStatus_Values() []string { + return []string{ + DeprecatedStatusLive, + DeprecatedStatusDeprecated, + } +} + const ( // DifferenceTypeAdd is a DifferenceType enum value DifferenceTypeAdd = "ADD" @@ -16744,6 +17322,15 @@ const ( DifferenceTypeNotEqual = "NOT_EQUAL" ) +// DifferenceType_Values returns all elements of the DifferenceType enum +func DifferenceType_Values() []string { + return []string{ + DifferenceTypeAdd, + DifferenceTypeRemove, + DifferenceTypeNotEqual, + } +} + const ( // EvaluationTypeStatic is a EvaluationType enum value EvaluationTypeStatic = "Static" @@ -16752,6 +17339,14 @@ const ( EvaluationTypeDynamic = "Dynamic" ) +// EvaluationType_Values returns all elements of the EvaluationType enum +func EvaluationType_Values() []string { + return []string{ + EvaluationTypeStatic, + EvaluationTypeDynamic, + } +} + const ( // ExecutionStatusUnavailable is a ExecutionStatus enum value ExecutionStatusUnavailable = "UNAVAILABLE" @@ -16772,6 +17367,18 @@ const ( ExecutionStatusObsolete = "OBSOLETE" ) +// ExecutionStatus_Values returns all elements of the ExecutionStatus enum +func ExecutionStatus_Values() []string { + return []string{ + ExecutionStatusUnavailable, + ExecutionStatusAvailable, + ExecutionStatusExecuteInProgress, + ExecutionStatusExecuteComplete, + ExecutionStatusExecuteFailed, + ExecutionStatusObsolete, + } +} + const ( // HandlerErrorCodeNotUpdatable is a HandlerErrorCode enum value HandlerErrorCodeNotUpdatable = "NotUpdatable" @@ -16816,6 +17423,26 @@ const ( HandlerErrorCodeInternalFailure = "InternalFailure" ) +// HandlerErrorCode_Values returns all elements of the HandlerErrorCode enum +func HandlerErrorCode_Values() []string { + return []string{ + HandlerErrorCodeNotUpdatable, + HandlerErrorCodeInvalidRequest, + HandlerErrorCodeAccessDenied, + HandlerErrorCodeInvalidCredentials, + HandlerErrorCodeAlreadyExists, + HandlerErrorCodeNotFound, + HandlerErrorCodeResourceConflict, + HandlerErrorCodeThrottling, + HandlerErrorCodeServiceLimitExceeded, + HandlerErrorCodeNotStabilized, + HandlerErrorCodeGeneralServiceException, + HandlerErrorCodeServiceInternalError, + HandlerErrorCodeNetworkFailure, + HandlerErrorCodeInternalFailure, + } +} + const ( // OnFailureDoNothing is a OnFailure enum value OnFailureDoNothing = "DO_NOTHING" @@ -16827,6 +17454,15 @@ const ( OnFailureDelete = "DELETE" ) +// OnFailure_Values returns all elements of the OnFailure enum +func OnFailure_Values() []string { + return []string{ + OnFailureDoNothing, + OnFailureRollback, + OnFailureDelete, + } +} + const ( // OperationStatusPending is a OperationStatus enum value OperationStatusPending = "PENDING" @@ -16841,6 +17477,16 @@ const ( OperationStatusFailed = "FAILED" ) +// OperationStatus_Values returns all elements of the OperationStatus enum +func OperationStatus_Values() []string { + return []string{ + OperationStatusPending, + OperationStatusInProgress, + OperationStatusSuccess, + OperationStatusFailed, + } +} + const ( // PermissionModelsServiceManaged is a PermissionModels enum value PermissionModelsServiceManaged = "SERVICE_MANAGED" @@ -16849,6 +17495,14 @@ const ( PermissionModelsSelfManaged = "SELF_MANAGED" ) +// PermissionModels_Values returns all elements of the PermissionModels enum +func PermissionModels_Values() []string { + return []string{ + PermissionModelsServiceManaged, + PermissionModelsSelfManaged, + } +} + const ( // ProvisioningTypeNonProvisionable is a ProvisioningType enum value ProvisioningTypeNonProvisionable = "NON_PROVISIONABLE" @@ -16860,6 +17514,15 @@ const ( ProvisioningTypeFullyMutable = "FULLY_MUTABLE" ) +// ProvisioningType_Values returns all elements of the ProvisioningType enum +func ProvisioningType_Values() []string { + return []string{ + ProvisioningTypeNonProvisionable, + ProvisioningTypeImmutable, + ProvisioningTypeFullyMutable, + } +} + const ( // RegistrationStatusComplete is a RegistrationStatus enum value RegistrationStatusComplete = "COMPLETE" @@ -16871,11 +17534,27 @@ const ( RegistrationStatusFailed = "FAILED" ) +// RegistrationStatus_Values returns all elements of the RegistrationStatus enum +func RegistrationStatus_Values() []string { + return []string{ + RegistrationStatusComplete, + RegistrationStatusInProgress, + RegistrationStatusFailed, + } +} + const ( // RegistryTypeResource is a RegistryType enum value RegistryTypeResource = "RESOURCE" ) +// RegistryType_Values returns all elements of the RegistryType enum +func RegistryType_Values() []string { + return []string{ + RegistryTypeResource, + } +} + const ( // ReplacementTrue is a Replacement enum value ReplacementTrue = "True" @@ -16887,6 +17566,15 @@ const ( ReplacementConditional = "Conditional" ) +// Replacement_Values returns all elements of the Replacement enum +func Replacement_Values() []string { + return []string{ + ReplacementTrue, + ReplacementFalse, + ReplacementConditional, + } +} + const ( // RequiresRecreationNever is a RequiresRecreation enum value RequiresRecreationNever = "Never" @@ -16898,6 +17586,15 @@ const ( RequiresRecreationAlways = "Always" ) +// RequiresRecreation_Values returns all elements of the RequiresRecreation enum +func RequiresRecreation_Values() []string { + return []string{ + RequiresRecreationNever, + RequiresRecreationConditionally, + RequiresRecreationAlways, + } +} + const ( // ResourceAttributeProperties is a ResourceAttribute enum value ResourceAttributeProperties = "Properties" @@ -16918,6 +17615,18 @@ const ( ResourceAttributeTags = "Tags" ) +// ResourceAttribute_Values returns all elements of the ResourceAttribute enum +func ResourceAttribute_Values() []string { + return []string{ + ResourceAttributeProperties, + ResourceAttributeMetadata, + ResourceAttributeCreationPolicy, + ResourceAttributeUpdatePolicy, + ResourceAttributeDeletionPolicy, + ResourceAttributeTags, + } +} + const ( // ResourceSignalStatusSuccess is a ResourceSignalStatus enum value ResourceSignalStatusSuccess = "SUCCESS" @@ -16926,6 +17635,14 @@ const ( ResourceSignalStatusFailure = "FAILURE" ) +// ResourceSignalStatus_Values returns all elements of the ResourceSignalStatus enum +func ResourceSignalStatus_Values() []string { + return []string{ + ResourceSignalStatusSuccess, + ResourceSignalStatusFailure, + } +} + const ( // ResourceStatusCreateInProgress is a ResourceStatus enum value ResourceStatusCreateInProgress = "CREATE_IN_PROGRESS" @@ -16976,6 +17693,28 @@ const ( ResourceStatusImportRollbackComplete = "IMPORT_ROLLBACK_COMPLETE" ) +// ResourceStatus_Values returns all elements of the ResourceStatus enum +func ResourceStatus_Values() []string { + return []string{ + ResourceStatusCreateInProgress, + ResourceStatusCreateFailed, + ResourceStatusCreateComplete, + ResourceStatusDeleteInProgress, + ResourceStatusDeleteFailed, + ResourceStatusDeleteComplete, + ResourceStatusDeleteSkipped, + ResourceStatusUpdateInProgress, + ResourceStatusUpdateFailed, + ResourceStatusUpdateComplete, + ResourceStatusImportFailed, + ResourceStatusImportComplete, + ResourceStatusImportInProgress, + ResourceStatusImportRollbackInProgress, + ResourceStatusImportRollbackFailed, + ResourceStatusImportRollbackComplete, + } +} + const ( // StackDriftDetectionStatusDetectionInProgress is a StackDriftDetectionStatus enum value StackDriftDetectionStatusDetectionInProgress = "DETECTION_IN_PROGRESS" @@ -16987,6 +17726,15 @@ const ( StackDriftDetectionStatusDetectionComplete = "DETECTION_COMPLETE" ) +// StackDriftDetectionStatus_Values returns all elements of the StackDriftDetectionStatus enum +func StackDriftDetectionStatus_Values() []string { + return []string{ + StackDriftDetectionStatusDetectionInProgress, + StackDriftDetectionStatusDetectionFailed, + StackDriftDetectionStatusDetectionComplete, + } +} + const ( // StackDriftStatusDrifted is a StackDriftStatus enum value StackDriftStatusDrifted = "DRIFTED" @@ -17001,6 +17749,60 @@ const ( StackDriftStatusNotChecked = "NOT_CHECKED" ) +// StackDriftStatus_Values returns all elements of the StackDriftStatus enum +func StackDriftStatus_Values() []string { + return []string{ + StackDriftStatusDrifted, + StackDriftStatusInSync, + StackDriftStatusUnknown, + StackDriftStatusNotChecked, + } +} + +const ( + // StackInstanceDetailedStatusPending is a StackInstanceDetailedStatus enum value + StackInstanceDetailedStatusPending = "PENDING" + + // StackInstanceDetailedStatusRunning is a StackInstanceDetailedStatus enum value + StackInstanceDetailedStatusRunning = "RUNNING" + + // StackInstanceDetailedStatusSucceeded is a StackInstanceDetailedStatus enum value + StackInstanceDetailedStatusSucceeded = "SUCCEEDED" + + // StackInstanceDetailedStatusFailed is a StackInstanceDetailedStatus enum value + StackInstanceDetailedStatusFailed = "FAILED" + + // StackInstanceDetailedStatusCancelled is a StackInstanceDetailedStatus enum value + StackInstanceDetailedStatusCancelled = "CANCELLED" + + // StackInstanceDetailedStatusInoperable is a StackInstanceDetailedStatus enum value + StackInstanceDetailedStatusInoperable = "INOPERABLE" +) + +// StackInstanceDetailedStatus_Values returns all elements of the StackInstanceDetailedStatus enum +func StackInstanceDetailedStatus_Values() []string { + return []string{ + StackInstanceDetailedStatusPending, + StackInstanceDetailedStatusRunning, + StackInstanceDetailedStatusSucceeded, + StackInstanceDetailedStatusFailed, + StackInstanceDetailedStatusCancelled, + StackInstanceDetailedStatusInoperable, + } +} + +const ( + // StackInstanceFilterNameDetailedStatus is a StackInstanceFilterName enum value + StackInstanceFilterNameDetailedStatus = "DETAILED_STATUS" +) + +// StackInstanceFilterName_Values returns all elements of the StackInstanceFilterName enum +func StackInstanceFilterName_Values() []string { + return []string{ + StackInstanceFilterNameDetailedStatus, + } +} + const ( // StackInstanceStatusCurrent is a StackInstanceStatus enum value StackInstanceStatusCurrent = "CURRENT" @@ -17012,6 +17814,15 @@ const ( StackInstanceStatusInoperable = "INOPERABLE" ) +// StackInstanceStatus_Values returns all elements of the StackInstanceStatus enum +func StackInstanceStatus_Values() []string { + return []string{ + StackInstanceStatusCurrent, + StackInstanceStatusOutdated, + StackInstanceStatusInoperable, + } +} + const ( // StackResourceDriftStatusInSync is a StackResourceDriftStatus enum value StackResourceDriftStatusInSync = "IN_SYNC" @@ -17026,6 +17837,16 @@ const ( StackResourceDriftStatusNotChecked = "NOT_CHECKED" ) +// StackResourceDriftStatus_Values returns all elements of the StackResourceDriftStatus enum +func StackResourceDriftStatus_Values() []string { + return []string{ + StackResourceDriftStatusInSync, + StackResourceDriftStatusModified, + StackResourceDriftStatusDeleted, + StackResourceDriftStatusNotChecked, + } +} + const ( // StackSetDriftDetectionStatusCompleted is a StackSetDriftDetectionStatus enum value StackSetDriftDetectionStatusCompleted = "COMPLETED" @@ -17043,6 +17864,17 @@ const ( StackSetDriftDetectionStatusStopped = "STOPPED" ) +// StackSetDriftDetectionStatus_Values returns all elements of the StackSetDriftDetectionStatus enum +func StackSetDriftDetectionStatus_Values() []string { + return []string{ + StackSetDriftDetectionStatusCompleted, + StackSetDriftDetectionStatusFailed, + StackSetDriftDetectionStatusPartialSuccess, + StackSetDriftDetectionStatusInProgress, + StackSetDriftDetectionStatusStopped, + } +} + const ( // StackSetDriftStatusDrifted is a StackSetDriftStatus enum value StackSetDriftStatusDrifted = "DRIFTED" @@ -17054,6 +17886,15 @@ const ( StackSetDriftStatusNotChecked = "NOT_CHECKED" ) +// StackSetDriftStatus_Values returns all elements of the StackSetDriftStatus enum +func StackSetDriftStatus_Values() []string { + return []string{ + StackSetDriftStatusDrifted, + StackSetDriftStatusInSync, + StackSetDriftStatusNotChecked, + } +} + const ( // StackSetOperationActionCreate is a StackSetOperationAction enum value StackSetOperationActionCreate = "CREATE" @@ -17068,6 +17909,16 @@ const ( StackSetOperationActionDetectDrift = "DETECT_DRIFT" ) +// StackSetOperationAction_Values returns all elements of the StackSetOperationAction enum +func StackSetOperationAction_Values() []string { + return []string{ + StackSetOperationActionCreate, + StackSetOperationActionUpdate, + StackSetOperationActionDelete, + StackSetOperationActionDetectDrift, + } +} + const ( // StackSetOperationResultStatusPending is a StackSetOperationResultStatus enum value StackSetOperationResultStatusPending = "PENDING" @@ -17085,6 +17936,17 @@ const ( StackSetOperationResultStatusCancelled = "CANCELLED" ) +// StackSetOperationResultStatus_Values returns all elements of the StackSetOperationResultStatus enum +func StackSetOperationResultStatus_Values() []string { + return []string{ + StackSetOperationResultStatusPending, + StackSetOperationResultStatusRunning, + StackSetOperationResultStatusSucceeded, + StackSetOperationResultStatusFailed, + StackSetOperationResultStatusCancelled, + } +} + const ( // StackSetOperationStatusRunning is a StackSetOperationStatus enum value StackSetOperationStatusRunning = "RUNNING" @@ -17105,6 +17967,18 @@ const ( StackSetOperationStatusQueued = "QUEUED" ) +// StackSetOperationStatus_Values returns all elements of the StackSetOperationStatus enum +func StackSetOperationStatus_Values() []string { + return []string{ + StackSetOperationStatusRunning, + StackSetOperationStatusSucceeded, + StackSetOperationStatusFailed, + StackSetOperationStatusStopping, + StackSetOperationStatusStopped, + StackSetOperationStatusQueued, + } +} + const ( // StackSetStatusActive is a StackSetStatus enum value StackSetStatusActive = "ACTIVE" @@ -17113,6 +17987,14 @@ const ( StackSetStatusDeleted = "DELETED" ) +// StackSetStatus_Values returns all elements of the StackSetStatus enum +func StackSetStatus_Values() []string { + return []string{ + StackSetStatusActive, + StackSetStatusDeleted, + } +} + const ( // StackStatusCreateInProgress is a StackStatus enum value StackStatusCreateInProgress = "CREATE_IN_PROGRESS" @@ -17181,6 +18063,34 @@ const ( StackStatusImportRollbackComplete = "IMPORT_ROLLBACK_COMPLETE" ) +// StackStatus_Values returns all elements of the StackStatus enum +func StackStatus_Values() []string { + return []string{ + StackStatusCreateInProgress, + StackStatusCreateFailed, + StackStatusCreateComplete, + StackStatusRollbackInProgress, + StackStatusRollbackFailed, + StackStatusRollbackComplete, + StackStatusDeleteInProgress, + StackStatusDeleteFailed, + StackStatusDeleteComplete, + StackStatusUpdateInProgress, + StackStatusUpdateCompleteCleanupInProgress, + StackStatusUpdateComplete, + StackStatusUpdateRollbackInProgress, + StackStatusUpdateRollbackFailed, + StackStatusUpdateRollbackCompleteCleanupInProgress, + StackStatusUpdateRollbackComplete, + StackStatusReviewInProgress, + StackStatusImportInProgress, + StackStatusImportComplete, + StackStatusImportRollbackInProgress, + StackStatusImportRollbackFailed, + StackStatusImportRollbackComplete, + } +} + const ( // TemplateStageOriginal is a TemplateStage enum value TemplateStageOriginal = "Original" @@ -17189,6 +18099,14 @@ const ( TemplateStageProcessed = "Processed" ) +// TemplateStage_Values returns all elements of the TemplateStage enum +func TemplateStage_Values() []string { + return []string{ + TemplateStageOriginal, + TemplateStageProcessed, + } +} + const ( // VisibilityPublic is a Visibility enum value VisibilityPublic = "PUBLIC" @@ -17196,3 +18114,11 @@ const ( // VisibilityPrivate is a Visibility enum value VisibilityPrivate = "PRIVATE" ) + +// Visibility_Values returns all elements of the Visibility enum +func Visibility_Values() []string { + return []string{ + VisibilityPublic, + VisibilityPrivate, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go index de5611f9e..33748e5d2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go index 0dbdc6c23..183720d48 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/waiters.go @@ -349,6 +349,72 @@ func (c *CloudFormation) WaitUntilStackImportCompleteWithContext(ctx aws.Context return w.WaitWithContext(ctx) } +// WaitUntilStackRollbackComplete uses the AWS CloudFormation API operation +// DescribeStacks to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *CloudFormation) WaitUntilStackRollbackComplete(input *DescribeStacksInput) error { + return c.WaitUntilStackRollbackCompleteWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilStackRollbackCompleteWithContext is an extended version of WaitUntilStackRollbackComplete. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFormation) WaitUntilStackRollbackCompleteWithContext(ctx aws.Context, input *DescribeStacksInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilStackRollbackComplete", + MaxAttempts: 120, + Delay: request.ConstantWaiterDelay(30 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_COMPLETE", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_FAILED", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "UPDATE_ROLLBACK_FAILED", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathAnyWaiterMatch, Argument: "Stacks[].StackStatus", + Expected: "DELETE_FAILED", + }, + { + State: request.FailureWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ValidationError", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeStacksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeStacksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + // WaitUntilStackUpdateComplete uses the AWS CloudFormation API operation // DescribeStacks to wait for a condition to be met before returning. // If the condition is not met within the max attempt window, an error will diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go index 316965c88..699a3c813 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -13,7 +13,138 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2019_03_26" +const opCreateCachePolicy = "CreateCachePolicy2020_05_31" + +// CreateCachePolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateCachePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCachePolicy for more information on using the CreateCachePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCachePolicyRequest method. +// req, resp := client.CreateCachePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateCachePolicy +func (c *CloudFront) CreateCachePolicyRequest(input *CreateCachePolicyInput) (req *request.Request, output *CreateCachePolicyOutput) { + op := &request.Operation{ + Name: opCreateCachePolicy, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/cache-policy", + } + + if input == nil { + input = &CreateCachePolicyInput{} + } + + output = &CreateCachePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCachePolicy API operation for Amazon CloudFront. +// +// Creates a cache policy. +// +// After you create a cache policy, you can attach it to one or more cache behaviors. +// When it’s attached to a cache behavior, the cache policy determines the +// following: +// +// * The values that CloudFront includes in the cache key. These values can +// include HTTP headers, cookies, and URL query strings. CloudFront uses +// the cache key to find an object in its cache that it can return to the +// viewer. +// +// * The default, minimum, and maximum time to live (TTL) values that you +// want objects to stay in the CloudFront cache. +// +// The headers, cookies, and query strings that are included in the cache key +// are automatically included in requests that CloudFront sends to the origin. +// CloudFront sends a request when it can’t find an object in its cache that +// matches the request’s cache key. If you want to send values to the origin +// but not include them in the cache key, use OriginRequestPolicy. +// +// For more information about cache policies, see Controlling the cache key +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html) +// in the Amazon CloudFront Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation CreateCachePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeCachePolicyAlreadyExists "CachePolicyAlreadyExists" +// A cache policy with this name already exists. You must provide a unique name. +// To modify an existing cache policy, use UpdateCachePolicy. +// +// * ErrCodeTooManyCachePolicies "TooManyCachePolicies" +// You have reached the maximum number of cache policies for this AWS account. +// For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyHeadersInCachePolicy "TooManyHeadersInCachePolicy" +// The number of headers in the cache policy exceeds the maximum. For more information, +// see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyCookiesInCachePolicy "TooManyCookiesInCachePolicy" +// The number of cookies in the cache policy exceeds the maximum. For more information, +// see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyQueryStringsInCachePolicy "TooManyQueryStringsInCachePolicy" +// The number of query strings in the cache policy exceeds the maximum. For +// more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateCachePolicy +func (c *CloudFront) CreateCachePolicy(input *CreateCachePolicyInput) (*CreateCachePolicyOutput, error) { + req, out := c.CreateCachePolicyRequest(input) + return out, req.Send() +} + +// CreateCachePolicyWithContext is the same as CreateCachePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCachePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) CreateCachePolicyWithContext(ctx aws.Context, input *CreateCachePolicyInput, opts ...request.Option) (*CreateCachePolicyOutput, error) { + req, out := c.CreateCachePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2020_05_31" // CreateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the CreateCloudFrontOriginAccessIdentity operation. The "output" return @@ -38,12 +169,12 @@ const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIden // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateCloudFrontOriginAccessIdentity func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *CreateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opCreateCloudFrontOriginAccessIdentity, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/origin-access-identity/cloudfront", + HTTPPath: "/2020-05-31/origin-access-identity/cloudfront", } if input == nil { @@ -87,12 +218,12 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCl // access identities allowed. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// An argument is invalid. // // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateCloudFrontOriginAccessIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateCloudFrontOriginAccessIdentity func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFrontOriginAccessIdentityInput) (*CreateCloudFrontOriginAccessIdentityOutput, error) { req, out := c.CreateCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() @@ -114,7 +245,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityWithContext(ctx aws.Con return out, req.Send() } -const opCreateDistribution = "CreateDistribution2019_03_26" +const opCreateDistribution = "CreateDistribution2020_05_31" // CreateDistributionRequest generates a "aws/request.Request" representing the // client's request for the CreateDistribution operation. The "output" return @@ -139,12 +270,12 @@ const opCreateDistribution = "CreateDistribution2019_03_26" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistribution +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateDistribution func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { op := &request.Operation{ Name: opCreateDistribution, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/distribution", + HTTPPath: "/2020-05-31/distribution", } if input == nil { @@ -234,7 +365,7 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // A response code is not valid. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// An argument is invalid. // // * ErrCodeInvalidRequiredProtocol "InvalidRequiredProtocol" // This operation requires the HTTPS protocol. Ensure that you specify the HTTPS @@ -308,6 +439,10 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // Processing your request would cause the maximum number of distributions with // Lambda function associations per owner to be exceeded. // +// * ErrCodeTooManyDistributionsWithSingleFunctionARN "TooManyDistributionsWithSingleFunctionARN" +// The maximum number of distributions have been associated with the specified +// Lambda function. +// // * ErrCodeTooManyLambdaFunctionAssociations "TooManyLambdaFunctionAssociations" // Your request contains more Lambda function associations than are allowed // per distribution. @@ -332,7 +467,23 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( // The maximum number of distributions have been associated with the specified // configuration for field-level encryption. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistribution +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. +// +// * ErrCodeTooManyDistributionsAssociatedToCachePolicy "TooManyDistributionsAssociatedToCachePolicy" +// The maximum number of distributions have been associated with the specified +// cache policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodeTooManyDistributionsAssociatedToOriginRequestPolicy "TooManyDistributionsAssociatedToOriginRequestPolicy" +// The maximum number of distributions have been associated with the specified +// origin request policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateDistribution func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*CreateDistributionOutput, error) { req, out := c.CreateDistributionRequest(input) return out, req.Send() @@ -354,7 +505,7 @@ func (c *CloudFront) CreateDistributionWithContext(ctx aws.Context, input *Creat return out, req.Send() } -const opCreateDistributionWithTags = "CreateDistributionWithTags2019_03_26" +const opCreateDistributionWithTags = "CreateDistributionWithTags2020_05_31" // CreateDistributionWithTagsRequest generates a "aws/request.Request" representing the // client's request for the CreateDistributionWithTags operation. The "output" return @@ -379,12 +530,12 @@ const opCreateDistributionWithTags = "CreateDistributionWithTags2019_03_26" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistributionWithTags +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateDistributionWithTags func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistributionWithTagsInput) (req *request.Request, output *CreateDistributionWithTagsOutput) { op := &request.Operation{ Name: opCreateDistributionWithTags, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/distribution?WithTags", + HTTPPath: "/2020-05-31/distribution?WithTags", } if input == nil { @@ -462,7 +613,7 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // A response code is not valid. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// An argument is invalid. // // * ErrCodeInvalidRequiredProtocol "InvalidRequiredProtocol" // This operation requires the HTTPS protocol. Ensure that you specify the HTTPS @@ -539,6 +690,10 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // Processing your request would cause the maximum number of distributions with // Lambda function associations per owner to be exceeded. // +// * ErrCodeTooManyDistributionsWithSingleFunctionARN "TooManyDistributionsWithSingleFunctionARN" +// The maximum number of distributions have been associated with the specified +// Lambda function. +// // * ErrCodeTooManyLambdaFunctionAssociations "TooManyLambdaFunctionAssociations" // Your request contains more Lambda function associations than are allowed // per distribution. @@ -563,7 +718,23 @@ func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistribution // The maximum number of distributions have been associated with the specified // configuration for field-level encryption. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateDistributionWithTags +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. +// +// * ErrCodeTooManyDistributionsAssociatedToCachePolicy "TooManyDistributionsAssociatedToCachePolicy" +// The maximum number of distributions have been associated with the specified +// cache policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodeTooManyDistributionsAssociatedToOriginRequestPolicy "TooManyDistributionsAssociatedToOriginRequestPolicy" +// The maximum number of distributions have been associated with the specified +// origin request policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateDistributionWithTags func (c *CloudFront) CreateDistributionWithTags(input *CreateDistributionWithTagsInput) (*CreateDistributionWithTagsOutput, error) { req, out := c.CreateDistributionWithTagsRequest(input) return out, req.Send() @@ -585,7 +756,7 @@ func (c *CloudFront) CreateDistributionWithTagsWithContext(ctx aws.Context, inpu return out, req.Send() } -const opCreateFieldLevelEncryptionConfig = "CreateFieldLevelEncryptionConfig2019_03_26" +const opCreateFieldLevelEncryptionConfig = "CreateFieldLevelEncryptionConfig2020_05_31" // CreateFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the // client's request for the CreateFieldLevelEncryptionConfig operation. The "output" return @@ -610,12 +781,12 @@ const opCreateFieldLevelEncryptionConfig = "CreateFieldLevelEncryptionConfig2019 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateFieldLevelEncryptionConfig func (c *CloudFront) CreateFieldLevelEncryptionConfigRequest(input *CreateFieldLevelEncryptionConfigInput) (req *request.Request, output *CreateFieldLevelEncryptionConfigOutput) { op := &request.Operation{ Name: opCreateFieldLevelEncryptionConfig, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/field-level-encryption", + HTTPPath: "/2020-05-31/field-level-encryption", } if input == nil { @@ -643,7 +814,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionConfigRequest(input *CreateFieldL // The value of Quantity and the size of Items don't match. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// An argument is invalid. // // * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" // The specified profile for field-level encryption doesn't exist. @@ -666,7 +837,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionConfigRequest(input *CreateFieldL // * ErrCodeQueryArgProfileEmpty "QueryArgProfileEmpty" // No profile specified for the field-level encryption query argument. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionConfig +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateFieldLevelEncryptionConfig func (c *CloudFront) CreateFieldLevelEncryptionConfig(input *CreateFieldLevelEncryptionConfigInput) (*CreateFieldLevelEncryptionConfigOutput, error) { req, out := c.CreateFieldLevelEncryptionConfigRequest(input) return out, req.Send() @@ -688,7 +859,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionConfigWithContext(ctx aws.Context return out, req.Send() } -const opCreateFieldLevelEncryptionProfile = "CreateFieldLevelEncryptionProfile2019_03_26" +const opCreateFieldLevelEncryptionProfile = "CreateFieldLevelEncryptionProfile2020_05_31" // CreateFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the // client's request for the CreateFieldLevelEncryptionProfile operation. The "output" return @@ -713,12 +884,12 @@ const opCreateFieldLevelEncryptionProfile = "CreateFieldLevelEncryptionProfile20 // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateFieldLevelEncryptionProfile func (c *CloudFront) CreateFieldLevelEncryptionProfileRequest(input *CreateFieldLevelEncryptionProfileInput) (req *request.Request, output *CreateFieldLevelEncryptionProfileOutput) { op := &request.Operation{ Name: opCreateFieldLevelEncryptionProfile, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/field-level-encryption-profile", + HTTPPath: "/2020-05-31/field-level-encryption-profile", } if input == nil { @@ -746,7 +917,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionProfileRequest(input *CreateField // The value of Quantity and the size of Items don't match. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// An argument is invalid. // // * ErrCodeNoSuchPublicKey "NoSuchPublicKey" // The specified public key doesn't exist. @@ -768,7 +939,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionProfileRequest(input *CreateField // The maximum number of field patterns for field-level encryption have been // created. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateFieldLevelEncryptionProfile +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateFieldLevelEncryptionProfile func (c *CloudFront) CreateFieldLevelEncryptionProfile(input *CreateFieldLevelEncryptionProfileInput) (*CreateFieldLevelEncryptionProfileOutput, error) { req, out := c.CreateFieldLevelEncryptionProfileRequest(input) return out, req.Send() @@ -790,7 +961,7 @@ func (c *CloudFront) CreateFieldLevelEncryptionProfileWithContext(ctx aws.Contex return out, req.Send() } -const opCreateInvalidation = "CreateInvalidation2019_03_26" +const opCreateInvalidation = "CreateInvalidation2020_05_31" // CreateInvalidationRequest generates a "aws/request.Request" representing the // client's request for the CreateInvalidation operation. The "output" return @@ -815,12 +986,12 @@ const opCreateInvalidation = "CreateInvalidation2019_03_26" // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateInvalidation +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateInvalidation func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) (req *request.Request, output *CreateInvalidationOutput) { op := &request.Operation{ Name: opCreateInvalidation, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/distribution/{DistributionId}/invalidation", + HTTPPath: "/2020-05-31/distribution/{DistributionId}/invalidation", } if input == nil { @@ -852,7 +1023,7 @@ func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) ( // header is set. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// An argument is invalid. // // * ErrCodeNoSuchDistribution "NoSuchDistribution" // The specified distribution does not exist. @@ -867,7 +1038,7 @@ func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) ( // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateInvalidation +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateInvalidation func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*CreateInvalidationOutput, error) { req, out := c.CreateInvalidationRequest(input) return out, req.Send() @@ -889,4149 +1060,9156 @@ func (c *CloudFront) CreateInvalidationWithContext(ctx aws.Context, input *Creat return out, req.Send() } -const opCreatePublicKey = "CreatePublicKey2019_03_26" +const opCreateMonitoringSubscription = "CreateMonitoringSubscription2020_05_31" -// CreatePublicKeyRequest generates a "aws/request.Request" representing the -// client's request for the CreatePublicKey operation. The "output" return +// CreateMonitoringSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateMonitoringSubscription operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreatePublicKey for more information on using the CreatePublicKey +// See CreateMonitoringSubscription for more information on using the CreateMonitoringSubscription // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreatePublicKeyRequest method. -// req, resp := client.CreatePublicKeyRequest(params) +// // Example sending a request using the CreateMonitoringSubscriptionRequest method. +// req, resp := client.CreateMonitoringSubscriptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreatePublicKey -func (c *CloudFront) CreatePublicKeyRequest(input *CreatePublicKeyInput) (req *request.Request, output *CreatePublicKeyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateMonitoringSubscription +func (c *CloudFront) CreateMonitoringSubscriptionRequest(input *CreateMonitoringSubscriptionInput) (req *request.Request, output *CreateMonitoringSubscriptionOutput) { op := &request.Operation{ - Name: opCreatePublicKey, + Name: opCreateMonitoringSubscription, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/public-key", + HTTPPath: "/2020-05-31/distributions/{DistributionId}/monitoring-subscription", } if input == nil { - input = &CreatePublicKeyInput{} + input = &CreateMonitoringSubscriptionInput{} } - output = &CreatePublicKeyOutput{} + output = &CreateMonitoringSubscriptionOutput{} req = c.newRequest(op, input, output) return } -// CreatePublicKey API operation for Amazon CloudFront. +// CreateMonitoringSubscription API operation for Amazon CloudFront. // -// Add a new public key to CloudFront to use, for example, for field-level encryption. -// You can add a maximum of 10 public keys with one AWS account. +// Enables additional CloudWatch metrics for the specified CloudFront distribution. +// The additional metrics incur an additional cost. +// +// For more information, see Viewing additional CloudFront distribution metrics +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/viewing-cloudfront-metrics.html#monitoring-console.distributions-additional) +// in the Amazon CloudFront Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation CreatePublicKey for usage and error information. +// API operation CreateMonitoringSubscription for usage and error information. // // Returned Error Codes: -// * ErrCodePublicKeyAlreadyExists "PublicKeyAlreadyExists" -// The specified public key already exists. -// -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// * ErrCodeTooManyPublicKeys "TooManyPublicKeys" -// The maximum number of public keys for field-level encryption have been created. -// To create a new public key, delete one of the existing keys. +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreatePublicKey -func (c *CloudFront) CreatePublicKey(input *CreatePublicKeyInput) (*CreatePublicKeyOutput, error) { - req, out := c.CreatePublicKeyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateMonitoringSubscription +func (c *CloudFront) CreateMonitoringSubscription(input *CreateMonitoringSubscriptionInput) (*CreateMonitoringSubscriptionOutput, error) { + req, out := c.CreateMonitoringSubscriptionRequest(input) return out, req.Send() } -// CreatePublicKeyWithContext is the same as CreatePublicKey with the addition of +// CreateMonitoringSubscriptionWithContext is the same as CreateMonitoringSubscription with the addition of // the ability to pass a context and additional request options. // -// See CreatePublicKey for details on how to use this API operation. +// See CreateMonitoringSubscription for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) CreatePublicKeyWithContext(ctx aws.Context, input *CreatePublicKeyInput, opts ...request.Option) (*CreatePublicKeyOutput, error) { - req, out := c.CreatePublicKeyRequest(input) +func (c *CloudFront) CreateMonitoringSubscriptionWithContext(ctx aws.Context, input *CreateMonitoringSubscriptionInput, opts ...request.Option) (*CreateMonitoringSubscriptionOutput, error) { + req, out := c.CreateMonitoringSubscriptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateStreamingDistribution = "CreateStreamingDistribution2019_03_26" +const opCreateOriginRequestPolicy = "CreateOriginRequestPolicy2020_05_31" -// CreateStreamingDistributionRequest generates a "aws/request.Request" representing the -// client's request for the CreateStreamingDistribution operation. The "output" return +// CreateOriginRequestPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateOriginRequestPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateStreamingDistribution for more information on using the CreateStreamingDistribution +// See CreateOriginRequestPolicy for more information on using the CreateOriginRequestPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateStreamingDistributionRequest method. -// req, resp := client.CreateStreamingDistributionRequest(params) +// // Example sending a request using the CreateOriginRequestPolicyRequest method. +// req, resp := client.CreateOriginRequestPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistribution -func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateOriginRequestPolicy +func (c *CloudFront) CreateOriginRequestPolicyRequest(input *CreateOriginRequestPolicyInput) (req *request.Request, output *CreateOriginRequestPolicyOutput) { op := &request.Operation{ - Name: opCreateStreamingDistribution, + Name: opCreateOriginRequestPolicy, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/streaming-distribution", + HTTPPath: "/2020-05-31/origin-request-policy", } if input == nil { - input = &CreateStreamingDistributionInput{} + input = &CreateOriginRequestPolicyInput{} } - output = &CreateStreamingDistributionOutput{} + output = &CreateOriginRequestPolicyOutput{} req = c.newRequest(op, input, output) return } -// CreateStreamingDistribution API operation for Amazon CloudFront. +// CreateOriginRequestPolicy API operation for Amazon CloudFront. // -// Creates a new RTMP distribution. An RTMP distribution is similar to a web -// distribution, but an RTMP distribution streams media files using the Adobe -// Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP. +// Creates an origin request policy. // -// To create a new distribution, submit a POST request to the CloudFront API -// version/distribution resource. The request body must include a document with -// a StreamingDistributionConfig element. The response echoes the StreamingDistributionConfig -// element and returns other information about the RTMP distribution. +// After you create an origin request policy, you can attach it to one or more +// cache behaviors. When it’s attached to a cache behavior, the origin request +// policy determines the values that CloudFront includes in requests that it +// sends to the origin. Each request that CloudFront sends to the origin includes +// the following: // -// To get the status of your request, use the GET StreamingDistribution API -// action. When the value of Enabled is true and the value of Status is Deployed, -// your distribution is ready. A distribution usually deploys in less than 15 -// minutes. +// * The request body and the URL path (without the domain name) from the +// viewer request. // -// For more information about web distributions, see Working with RTMP Distributions -// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-rtmp.html) -// in the Amazon CloudFront Developer Guide. +// * The headers that CloudFront automatically includes in every origin request, +// including Host, User-Agent, and X-Amz-Cf-Id. // -// Beginning with the 2012-05-05 version of the CloudFront API, we made substantial -// changes to the format of the XML document that you include in the request -// body when you create or update a web distribution or an RTMP distribution, -// and when you invalidate objects. With previous versions of the API, we discovered -// that it was too easy to accidentally delete one or more values for an element -// that accepts multiple values, for example, CNAMEs and trusted signers. Our -// changes for the 2012-05-05 release are intended to prevent these accidental -// deletions and to notify you when there's a mismatch between the number of -// values you say you're specifying in the Quantity element and the number of -// values specified. +// * All HTTP headers, cookies, and URL query strings that are specified +// in the cache policy or the origin request policy. These can include items +// from the viewer request and, in the case of headers, additional ones that +// are added by CloudFront. +// +// CloudFront sends a request when it can’t find a valid object in its cache +// that matches the request. If you want to send values to the origin and also +// include them in the cache key, use CachePolicy. +// +// For more information about origin request policies, see Controlling origin +// requests (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html) +// in the Amazon CloudFront Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation CreateStreamingDistribution for usage and error information. +// API operation CreateOriginRequestPolicy for usage and error information. // // Returned Error Codes: -// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" -// The CNAME specified is already defined for CloudFront. -// -// * ErrCodeStreamingDistributionAlreadyExists "StreamingDistributionAlreadyExists" -// The caller reference you attempted to create the streaming distribution with -// is associated with another distribution -// -// * ErrCodeInvalidOrigin "InvalidOrigin" -// The Amazon S3 origin server specified does not refer to a valid Amazon S3 -// bucket. -// -// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" -// The origin access identity is not valid or doesn't exist. -// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" -// Your request contains more trusted signers than are allowed per distribution. -// -// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers don't exist. -// -// * ErrCodeMissingBody "MissingBody" -// This operation requires a body. Ensure that the body is present and the Content-Type -// header is set. -// -// * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" -// Your request contains more CNAMEs than are allowed per distribution. -// -// * ErrCodeTooManyStreamingDistributions "TooManyStreamingDistributions" -// Processing your request would cause you to exceed the maximum number of streaming -// distributions allowed. -// -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// // * ErrCodeInconsistentQuantities "InconsistentQuantities" // The value of Quantity and the size of Items don't match. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistribution -func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistributionInput) (*CreateStreamingDistributionOutput, error) { - req, out := c.CreateStreamingDistributionRequest(input) +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeOriginRequestPolicyAlreadyExists "OriginRequestPolicyAlreadyExists" +// An origin request policy with this name already exists. You must provide +// a unique name. To modify an existing origin request policy, use UpdateOriginRequestPolicy. +// +// * ErrCodeTooManyOriginRequestPolicies "TooManyOriginRequestPolicies" +// You have reached the maximum number of origin request policies for this AWS +// account. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyHeadersInOriginRequestPolicy "TooManyHeadersInOriginRequestPolicy" +// The number of headers in the origin request policy exceeds the maximum. For +// more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyCookiesInOriginRequestPolicy "TooManyCookiesInOriginRequestPolicy" +// The number of cookies in the origin request policy exceeds the maximum. For +// more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyQueryStringsInOriginRequestPolicy "TooManyQueryStringsInOriginRequestPolicy" +// The number of query strings in the origin request policy exceeds the maximum. +// For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateOriginRequestPolicy +func (c *CloudFront) CreateOriginRequestPolicy(input *CreateOriginRequestPolicyInput) (*CreateOriginRequestPolicyOutput, error) { + req, out := c.CreateOriginRequestPolicyRequest(input) return out, req.Send() } -// CreateStreamingDistributionWithContext is the same as CreateStreamingDistribution with the addition of +// CreateOriginRequestPolicyWithContext is the same as CreateOriginRequestPolicy with the addition of // the ability to pass a context and additional request options. // -// See CreateStreamingDistribution for details on how to use this API operation. +// See CreateOriginRequestPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) CreateStreamingDistributionWithContext(ctx aws.Context, input *CreateStreamingDistributionInput, opts ...request.Option) (*CreateStreamingDistributionOutput, error) { - req, out := c.CreateStreamingDistributionRequest(input) +func (c *CloudFront) CreateOriginRequestPolicyWithContext(ctx aws.Context, input *CreateOriginRequestPolicyInput, opts ...request.Option) (*CreateOriginRequestPolicyOutput, error) { + req, out := c.CreateOriginRequestPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opCreateStreamingDistributionWithTags = "CreateStreamingDistributionWithTags2019_03_26" +const opCreatePublicKey = "CreatePublicKey2020_05_31" -// CreateStreamingDistributionWithTagsRequest generates a "aws/request.Request" representing the -// client's request for the CreateStreamingDistributionWithTags operation. The "output" return +// CreatePublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreatePublicKey operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See CreateStreamingDistributionWithTags for more information on using the CreateStreamingDistributionWithTags +// See CreatePublicKey for more information on using the CreatePublicKey // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the CreateStreamingDistributionWithTagsRequest method. -// req, resp := client.CreateStreamingDistributionWithTagsRequest(params) +// // Example sending a request using the CreatePublicKeyRequest method. +// req, resp := client.CreatePublicKeyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistributionWithTags -func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStreamingDistributionWithTagsInput) (req *request.Request, output *CreateStreamingDistributionWithTagsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreatePublicKey +func (c *CloudFront) CreatePublicKeyRequest(input *CreatePublicKeyInput) (req *request.Request, output *CreatePublicKeyOutput) { op := &request.Operation{ - Name: opCreateStreamingDistributionWithTags, + Name: opCreatePublicKey, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/streaming-distribution?WithTags", + HTTPPath: "/2020-05-31/public-key", } if input == nil { - input = &CreateStreamingDistributionWithTagsInput{} + input = &CreatePublicKeyInput{} } - output = &CreateStreamingDistributionWithTagsOutput{} + output = &CreatePublicKeyOutput{} req = c.newRequest(op, input, output) return } -// CreateStreamingDistributionWithTags API operation for Amazon CloudFront. +// CreatePublicKey API operation for Amazon CloudFront. // -// Create a new streaming distribution with tags. +// Add a new public key to CloudFront to use, for example, for field-level encryption. +// You can add a maximum of 10 public keys with one AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation CreateStreamingDistributionWithTags for usage and error information. +// API operation CreatePublicKey for usage and error information. // // Returned Error Codes: -// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" -// The CNAME specified is already defined for CloudFront. -// -// * ErrCodeStreamingDistributionAlreadyExists "StreamingDistributionAlreadyExists" -// The caller reference you attempted to create the streaming distribution with -// is associated with another distribution -// -// * ErrCodeInvalidOrigin "InvalidOrigin" -// The Amazon S3 origin server specified does not refer to a valid Amazon S3 -// bucket. -// -// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" -// The origin access identity is not valid or doesn't exist. +// * ErrCodePublicKeyAlreadyExists "PublicKeyAlreadyExists" +// The specified public key already exists. // -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. // -// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" -// Your request contains more trusted signers than are allowed per distribution. +// * ErrCodeTooManyPublicKeys "TooManyPublicKeys" +// The maximum number of public keys for field-level encryption have been created. +// To create a new public key, delete one of the existing keys. // -// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers don't exist. +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreatePublicKey +func (c *CloudFront) CreatePublicKey(input *CreatePublicKeyInput) (*CreatePublicKeyOutput, error) { + req, out := c.CreatePublicKeyRequest(input) + return out, req.Send() +} + +// CreatePublicKeyWithContext is the same as CreatePublicKey with the addition of +// the ability to pass a context and additional request options. // -// * ErrCodeMissingBody "MissingBody" -// This operation requires a body. Ensure that the body is present and the Content-Type -// header is set. -// -// * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" -// Your request contains more CNAMEs than are allowed per distribution. -// -// * ErrCodeTooManyStreamingDistributions "TooManyStreamingDistributions" -// Processing your request would cause you to exceed the maximum number of streaming -// distributions allowed. -// -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items don't match. -// -// * ErrCodeInvalidTagging "InvalidTagging" -// The tagging specified is not valid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/CreateStreamingDistributionWithTags -func (c *CloudFront) CreateStreamingDistributionWithTags(input *CreateStreamingDistributionWithTagsInput) (*CreateStreamingDistributionWithTagsOutput, error) { - req, out := c.CreateStreamingDistributionWithTagsRequest(input) - return out, req.Send() -} - -// CreateStreamingDistributionWithTagsWithContext is the same as CreateStreamingDistributionWithTags with the addition of -// the ability to pass a context and additional request options. -// -// See CreateStreamingDistributionWithTags for details on how to use this API operation. +// See CreatePublicKey for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) CreateStreamingDistributionWithTagsWithContext(ctx aws.Context, input *CreateStreamingDistributionWithTagsInput, opts ...request.Option) (*CreateStreamingDistributionWithTagsOutput, error) { - req, out := c.CreateStreamingDistributionWithTagsRequest(input) +func (c *CloudFront) CreatePublicKeyWithContext(ctx aws.Context, input *CreatePublicKeyInput, opts ...request.Option) (*CreatePublicKeyOutput, error) { + req, out := c.CreatePublicKeyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2019_03_26" +const opCreateRealtimeLogConfig = "CreateRealtimeLogConfig2020_05_31" -// DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return +// CreateRealtimeLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateRealtimeLogConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteCloudFrontOriginAccessIdentity for more information on using the DeleteCloudFrontOriginAccessIdentity +// See CreateRealtimeLogConfig for more information on using the CreateRealtimeLogConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteCloudFrontOriginAccessIdentityRequest method. -// req, resp := client.DeleteCloudFrontOriginAccessIdentityRequest(params) +// // Example sending a request using the CreateRealtimeLogConfigRequest method. +// req, resp := client.CreateRealtimeLogConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteCloudFrontOriginAccessIdentity -func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateRealtimeLogConfig +func (c *CloudFront) CreateRealtimeLogConfigRequest(input *CreateRealtimeLogConfigInput) (req *request.Request, output *CreateRealtimeLogConfigOutput) { op := &request.Operation{ - Name: opDeleteCloudFrontOriginAccessIdentity, - HTTPMethod: "DELETE", - HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}", + Name: opCreateRealtimeLogConfig, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/realtime-log-config", } if input == nil { - input = &DeleteCloudFrontOriginAccessIdentityInput{} + input = &CreateRealtimeLogConfigInput{} } - output = &DeleteCloudFrontOriginAccessIdentityOutput{} + output = &CreateRealtimeLogConfigOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteCloudFrontOriginAccessIdentity API operation for Amazon CloudFront. +// CreateRealtimeLogConfig API operation for Amazon CloudFront. // -// Delete an origin access identity. +// Creates a real-time log configuration. +// +// After you create a real-time log configuration, you can attach it to one +// or more cache behaviors to send real-time log data to the specified Amazon +// Kinesis data stream. +// +// For more information about real-time log configurations, see Real-time logs +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html) +// in the Amazon CloudFront Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation DeleteCloudFrontOriginAccessIdentity for usage and error information. +// API operation CreateRealtimeLogConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. -// -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. -// -// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" -// The specified origin access identity does not exist. +// * ErrCodeRealtimeLogConfigAlreadyExists "RealtimeLogConfigAlreadyExists" +// A real-time log configuration with this name already exists. You must provide +// a unique name. To modify an existing real-time log configuration, use UpdateRealtimeLogConfig. // -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. +// * ErrCodeTooManyRealtimeLogConfigs "TooManyRealtimeLogConfigs" +// You have reached the maximum number of real-time log configurations for this +// AWS account. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. // -// * ErrCodeOriginAccessIdentityInUse "CloudFrontOriginAccessIdentityInUse" -// The Origin Access Identity specified is already in use. +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteCloudFrontOriginAccessIdentity -func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFrontOriginAccessIdentityInput) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { - req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateRealtimeLogConfig +func (c *CloudFront) CreateRealtimeLogConfig(input *CreateRealtimeLogConfigInput) (*CreateRealtimeLogConfigOutput, error) { + req, out := c.CreateRealtimeLogConfigRequest(input) return out, req.Send() } -// DeleteCloudFrontOriginAccessIdentityWithContext is the same as DeleteCloudFrontOriginAccessIdentity with the addition of +// CreateRealtimeLogConfigWithContext is the same as CreateRealtimeLogConfig with the addition of // the ability to pass a context and additional request options. // -// See DeleteCloudFrontOriginAccessIdentity for details on how to use this API operation. +// See CreateRealtimeLogConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityWithContext(ctx aws.Context, input *DeleteCloudFrontOriginAccessIdentityInput, opts ...request.Option) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { - req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) +func (c *CloudFront) CreateRealtimeLogConfigWithContext(ctx aws.Context, input *CreateRealtimeLogConfigInput, opts ...request.Option) (*CreateRealtimeLogConfigOutput, error) { + req, out := c.CreateRealtimeLogConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteDistribution = "DeleteDistribution2019_03_26" +const opCreateStreamingDistribution = "CreateStreamingDistribution2020_05_31" -// DeleteDistributionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDistribution operation. The "output" return +// CreateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingDistribution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteDistribution for more information on using the DeleteDistribution +// See CreateStreamingDistribution for more information on using the CreateStreamingDistribution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteDistributionRequest method. -// req, resp := client.DeleteDistributionRequest(params) +// // Example sending a request using the CreateStreamingDistributionRequest method. +// req, resp := client.CreateStreamingDistributionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteDistribution -func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateStreamingDistribution +func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { op := &request.Operation{ - Name: opDeleteDistribution, - HTTPMethod: "DELETE", - HTTPPath: "/2019-03-26/distribution/{Id}", + Name: opCreateStreamingDistribution, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/streaming-distribution", } if input == nil { - input = &DeleteDistributionInput{} + input = &CreateStreamingDistributionInput{} } - output = &DeleteDistributionOutput{} + output = &CreateStreamingDistributionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteDistribution API operation for Amazon CloudFront. +// CreateStreamingDistribution API operation for Amazon CloudFront. // -// Delete a distribution. +// Creates a new RTMP distribution. An RTMP distribution is similar to a web +// distribution, but an RTMP distribution streams media files using the Adobe +// Real-Time Messaging Protocol (RTMP) instead of serving files using HTTP. +// +// To create a new distribution, submit a POST request to the CloudFront API +// version/distribution resource. The request body must include a document with +// a StreamingDistributionConfig element. The response echoes the StreamingDistributionConfig +// element and returns other information about the RTMP distribution. +// +// To get the status of your request, use the GET StreamingDistribution API +// action. When the value of Enabled is true and the value of Status is Deployed, +// your distribution is ready. A distribution usually deploys in less than 15 +// minutes. +// +// For more information about web distributions, see Working with RTMP Distributions +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-rtmp.html) +// in the Amazon CloudFront Developer Guide. +// +// Beginning with the 2012-05-05 version of the CloudFront API, we made substantial +// changes to the format of the XML document that you include in the request +// body when you create or update a web distribution or an RTMP distribution, +// and when you invalidate objects. With previous versions of the API, we discovered +// that it was too easy to accidentally delete one or more values for an element +// that accepts multiple values, for example, CNAMEs and trusted signers. Our +// changes for the 2012-05-05 release are intended to prevent these accidental +// deletions and to notify you when there's a mismatch between the number of +// values you say you're specifying in the Quantity element and the number of +// values specified. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation DeleteDistribution for usage and error information. +// API operation CreateStreamingDistribution for usage and error information. // // Returned Error Codes: +// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. +// +// * ErrCodeStreamingDistributionAlreadyExists "StreamingDistributionAlreadyExists" +// The caller reference you attempted to create the streaming distribution with +// is associated with another distribution +// +// * ErrCodeInvalidOrigin "InvalidOrigin" +// The Amazon S3 origin server specified does not refer to a valid Amazon S3 +// bucket. +// +// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" +// The origin access identity is not valid or doesn't exist. +// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeDistributionNotDisabled "DistributionNotDisabled" -// The specified CloudFront distribution is not disabled. You must disable the -// distribution before you can delete it. +// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" +// Your request contains more trusted signers than are allowed per distribution. // -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. +// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" +// One or more of your trusted signers don't exist. // -// * ErrCodeNoSuchDistribution "NoSuchDistribution" -// The specified distribution does not exist. +// * ErrCodeMissingBody "MissingBody" +// This operation requires a body. Ensure that the body is present and the Content-Type +// header is set. // -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. +// * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteDistribution -func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) { - req, out := c.DeleteDistributionRequest(input) +// * ErrCodeTooManyStreamingDistributions "TooManyStreamingDistributions" +// Processing your request would cause you to exceed the maximum number of streaming +// distributions allowed. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateStreamingDistribution +func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistributionInput) (*CreateStreamingDistributionOutput, error) { + req, out := c.CreateStreamingDistributionRequest(input) return out, req.Send() } -// DeleteDistributionWithContext is the same as DeleteDistribution with the addition of +// CreateStreamingDistributionWithContext is the same as CreateStreamingDistribution with the addition of // the ability to pass a context and additional request options. // -// See DeleteDistribution for details on how to use this API operation. +// See CreateStreamingDistribution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) DeleteDistributionWithContext(ctx aws.Context, input *DeleteDistributionInput, opts ...request.Option) (*DeleteDistributionOutput, error) { - req, out := c.DeleteDistributionRequest(input) +func (c *CloudFront) CreateStreamingDistributionWithContext(ctx aws.Context, input *CreateStreamingDistributionInput, opts ...request.Option) (*CreateStreamingDistributionOutput, error) { + req, out := c.CreateStreamingDistributionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteFieldLevelEncryptionConfig = "DeleteFieldLevelEncryptionConfig2019_03_26" +const opCreateStreamingDistributionWithTags = "CreateStreamingDistributionWithTags2020_05_31" -// DeleteFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the -// client's request for the DeleteFieldLevelEncryptionConfig operation. The "output" return +// CreateStreamingDistributionWithTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingDistributionWithTags operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteFieldLevelEncryptionConfig for more information on using the DeleteFieldLevelEncryptionConfig +// See CreateStreamingDistributionWithTags for more information on using the CreateStreamingDistributionWithTags // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteFieldLevelEncryptionConfigRequest method. -// req, resp := client.DeleteFieldLevelEncryptionConfigRequest(params) +// // Example sending a request using the CreateStreamingDistributionWithTagsRequest method. +// req, resp := client.CreateStreamingDistributionWithTagsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionConfig -func (c *CloudFront) DeleteFieldLevelEncryptionConfigRequest(input *DeleteFieldLevelEncryptionConfigInput) (req *request.Request, output *DeleteFieldLevelEncryptionConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateStreamingDistributionWithTags +func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStreamingDistributionWithTagsInput) (req *request.Request, output *CreateStreamingDistributionWithTagsOutput) { op := &request.Operation{ - Name: opDeleteFieldLevelEncryptionConfig, - HTTPMethod: "DELETE", - HTTPPath: "/2019-03-26/field-level-encryption/{Id}", + Name: opCreateStreamingDistributionWithTags, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/streaming-distribution?WithTags", } if input == nil { - input = &DeleteFieldLevelEncryptionConfigInput{} + input = &CreateStreamingDistributionWithTagsInput{} } - output = &DeleteFieldLevelEncryptionConfigOutput{} + output = &CreateStreamingDistributionWithTagsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteFieldLevelEncryptionConfig API operation for Amazon CloudFront. +// CreateStreamingDistributionWithTags API operation for Amazon CloudFront. // -// Remove a field-level encryption configuration. +// Create a new streaming distribution with tags. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation DeleteFieldLevelEncryptionConfig for usage and error information. +// API operation CreateStreamingDistributionWithTags for usage and error information. // // Returned Error Codes: +// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. +// +// * ErrCodeStreamingDistributionAlreadyExists "StreamingDistributionAlreadyExists" +// The caller reference you attempted to create the streaming distribution with +// is associated with another distribution +// +// * ErrCodeInvalidOrigin "InvalidOrigin" +// The Amazon S3 origin server specified does not refer to a valid Amazon S3 +// bucket. +// +// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" +// The origin access identity is not valid or doesn't exist. +// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. +// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" +// Your request contains more trusted signers than are allowed per distribution. // -// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" -// The specified configuration for field-level encryption doesn't exist. +// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" +// One or more of your trusted signers don't exist. // -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. +// * ErrCodeMissingBody "MissingBody" +// This operation requires a body. Ensure that the body is present and the Content-Type +// header is set. // -// * ErrCodeFieldLevelEncryptionConfigInUse "FieldLevelEncryptionConfigInUse" -// The specified configuration for field-level encryption is in use. +// * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionConfig -func (c *CloudFront) DeleteFieldLevelEncryptionConfig(input *DeleteFieldLevelEncryptionConfigInput) (*DeleteFieldLevelEncryptionConfigOutput, error) { - req, out := c.DeleteFieldLevelEncryptionConfigRequest(input) +// * ErrCodeTooManyStreamingDistributions "TooManyStreamingDistributions" +// Processing your request would cause you to exceed the maximum number of streaming +// distributions allowed. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeInvalidTagging "InvalidTagging" +// The tagging specified is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/CreateStreamingDistributionWithTags +func (c *CloudFront) CreateStreamingDistributionWithTags(input *CreateStreamingDistributionWithTagsInput) (*CreateStreamingDistributionWithTagsOutput, error) { + req, out := c.CreateStreamingDistributionWithTagsRequest(input) return out, req.Send() } -// DeleteFieldLevelEncryptionConfigWithContext is the same as DeleteFieldLevelEncryptionConfig with the addition of +// CreateStreamingDistributionWithTagsWithContext is the same as CreateStreamingDistributionWithTags with the addition of // the ability to pass a context and additional request options. // -// See DeleteFieldLevelEncryptionConfig for details on how to use this API operation. +// See CreateStreamingDistributionWithTags for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) DeleteFieldLevelEncryptionConfigWithContext(ctx aws.Context, input *DeleteFieldLevelEncryptionConfigInput, opts ...request.Option) (*DeleteFieldLevelEncryptionConfigOutput, error) { - req, out := c.DeleteFieldLevelEncryptionConfigRequest(input) +func (c *CloudFront) CreateStreamingDistributionWithTagsWithContext(ctx aws.Context, input *CreateStreamingDistributionWithTagsInput, opts ...request.Option) (*CreateStreamingDistributionWithTagsOutput, error) { + req, out := c.CreateStreamingDistributionWithTagsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteFieldLevelEncryptionProfile = "DeleteFieldLevelEncryptionProfile2019_03_26" +const opDeleteCachePolicy = "DeleteCachePolicy2020_05_31" -// DeleteFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the -// client's request for the DeleteFieldLevelEncryptionProfile operation. The "output" return +// DeleteCachePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCachePolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteFieldLevelEncryptionProfile for more information on using the DeleteFieldLevelEncryptionProfile +// See DeleteCachePolicy for more information on using the DeleteCachePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteFieldLevelEncryptionProfileRequest method. -// req, resp := client.DeleteFieldLevelEncryptionProfileRequest(params) +// // Example sending a request using the DeleteCachePolicyRequest method. +// req, resp := client.DeleteCachePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionProfile -func (c *CloudFront) DeleteFieldLevelEncryptionProfileRequest(input *DeleteFieldLevelEncryptionProfileInput) (req *request.Request, output *DeleteFieldLevelEncryptionProfileOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteCachePolicy +func (c *CloudFront) DeleteCachePolicyRequest(input *DeleteCachePolicyInput) (req *request.Request, output *DeleteCachePolicyOutput) { op := &request.Operation{ - Name: opDeleteFieldLevelEncryptionProfile, + Name: opDeleteCachePolicy, HTTPMethod: "DELETE", - HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}", + HTTPPath: "/2020-05-31/cache-policy/{Id}", } if input == nil { - input = &DeleteFieldLevelEncryptionProfileInput{} + input = &DeleteCachePolicyInput{} } - output = &DeleteFieldLevelEncryptionProfileOutput{} + output = &DeleteCachePolicyOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteFieldLevelEncryptionProfile API operation for Amazon CloudFront. +// DeleteCachePolicy API operation for Amazon CloudFront. // -// Remove a field-level encryption profile. +// Deletes a cache policy. +// +// You cannot delete a cache policy if it’s attached to a cache behavior. +// First update your distributions to remove the cache policy from all cache +// behaviors, then delete the cache policy. +// +// To delete a cache policy, you must provide the policy’s identifier and +// version. To get these values, you can use ListCachePolicies or GetCachePolicy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation DeleteFieldLevelEncryptionProfile for usage and error information. +// API operation DeleteCachePolicy for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // // * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. +// The If-Match version is missing or not valid. // -// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" -// The specified profile for field-level encryption doesn't exist. +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. // // * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated +// The precondition given in one or more of the request header fields evaluated // to false. // -// * ErrCodeFieldLevelEncryptionProfileInUse "FieldLevelEncryptionProfileInUse" -// The specified profile for field-level encryption is in use. +// * ErrCodeIllegalDelete "IllegalDelete" +// You cannot delete a managed policy. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteFieldLevelEncryptionProfile -func (c *CloudFront) DeleteFieldLevelEncryptionProfile(input *DeleteFieldLevelEncryptionProfileInput) (*DeleteFieldLevelEncryptionProfileOutput, error) { - req, out := c.DeleteFieldLevelEncryptionProfileRequest(input) +// * ErrCodeCachePolicyInUse "CachePolicyInUse" +// Cannot delete the cache policy because it is attached to one or more cache +// behaviors. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteCachePolicy +func (c *CloudFront) DeleteCachePolicy(input *DeleteCachePolicyInput) (*DeleteCachePolicyOutput, error) { + req, out := c.DeleteCachePolicyRequest(input) return out, req.Send() } -// DeleteFieldLevelEncryptionProfileWithContext is the same as DeleteFieldLevelEncryptionProfile with the addition of +// DeleteCachePolicyWithContext is the same as DeleteCachePolicy with the addition of // the ability to pass a context and additional request options. // -// See DeleteFieldLevelEncryptionProfile for details on how to use this API operation. +// See DeleteCachePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) DeleteFieldLevelEncryptionProfileWithContext(ctx aws.Context, input *DeleteFieldLevelEncryptionProfileInput, opts ...request.Option) (*DeleteFieldLevelEncryptionProfileOutput, error) { - req, out := c.DeleteFieldLevelEncryptionProfileRequest(input) +func (c *CloudFront) DeleteCachePolicyWithContext(ctx aws.Context, input *DeleteCachePolicyInput, opts ...request.Option) (*DeleteCachePolicyOutput, error) { + req, out := c.DeleteCachePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeletePublicKey = "DeletePublicKey2019_03_26" +const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2020_05_31" -// DeletePublicKeyRequest generates a "aws/request.Request" representing the -// client's request for the DeletePublicKey operation. The "output" return +// DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeletePublicKey for more information on using the DeletePublicKey +// See DeleteCloudFrontOriginAccessIdentity for more information on using the DeleteCloudFrontOriginAccessIdentity // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeletePublicKeyRequest method. -// req, resp := client.DeletePublicKeyRequest(params) +// // Example sending a request using the DeleteCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.DeleteCloudFrontOriginAccessIdentityRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeletePublicKey -func (c *CloudFront) DeletePublicKeyRequest(input *DeletePublicKeyInput) (req *request.Request, output *DeletePublicKeyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteCloudFrontOriginAccessIdentity +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ - Name: opDeletePublicKey, + Name: opDeleteCloudFrontOriginAccessIdentity, HTTPMethod: "DELETE", - HTTPPath: "/2019-03-26/public-key/{Id}", + HTTPPath: "/2020-05-31/origin-access-identity/cloudfront/{Id}", } if input == nil { - input = &DeletePublicKeyInput{} + input = &DeleteCloudFrontOriginAccessIdentityInput{} } - output = &DeletePublicKeyOutput{} + output = &DeleteCloudFrontOriginAccessIdentityOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeletePublicKey API operation for Amazon CloudFront. +// DeleteCloudFrontOriginAccessIdentity API operation for Amazon CloudFront. // -// Remove a public key you previously added to CloudFront. +// Delete an origin access identity. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation DeletePublicKey for usage and error information. +// API operation DeleteCloudFrontOriginAccessIdentity for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodePublicKeyInUse "PublicKeyInUse" -// The specified public key is in use. -// // * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. +// The If-Match version is missing or not valid. // -// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" -// The specified public key doesn't exist. +// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" +// The specified origin access identity does not exist. // // * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated +// The precondition given in one or more of the request header fields evaluated // to false. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeletePublicKey -func (c *CloudFront) DeletePublicKey(input *DeletePublicKeyInput) (*DeletePublicKeyOutput, error) { - req, out := c.DeletePublicKeyRequest(input) +// * ErrCodeOriginAccessIdentityInUse "CloudFrontOriginAccessIdentityInUse" +// The Origin Access Identity specified is already in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteCloudFrontOriginAccessIdentity +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFrontOriginAccessIdentityInput) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() } -// DeletePublicKeyWithContext is the same as DeletePublicKey with the addition of +// DeleteCloudFrontOriginAccessIdentityWithContext is the same as DeleteCloudFrontOriginAccessIdentity with the addition of // the ability to pass a context and additional request options. // -// See DeletePublicKey for details on how to use this API operation. +// See DeleteCloudFrontOriginAccessIdentity for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) DeletePublicKeyWithContext(ctx aws.Context, input *DeletePublicKeyInput, opts ...request.Option) (*DeletePublicKeyOutput, error) { - req, out := c.DeletePublicKeyRequest(input) +func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityWithContext(ctx aws.Context, input *DeleteCloudFrontOriginAccessIdentityInput, opts ...request.Option) (*DeleteCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.DeleteCloudFrontOriginAccessIdentityRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteStreamingDistribution = "DeleteStreamingDistribution2019_03_26" +const opDeleteDistribution = "DeleteDistribution2020_05_31" -// DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteStreamingDistribution operation. The "output" return +// DeleteDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDistribution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteStreamingDistribution for more information on using the DeleteStreamingDistribution +// See DeleteDistribution for more information on using the DeleteDistribution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteStreamingDistributionRequest method. -// req, resp := client.DeleteStreamingDistributionRequest(params) +// // Example sending a request using the DeleteDistributionRequest method. +// req, resp := client.DeleteDistributionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteStreamingDistribution -func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteDistribution +func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { op := &request.Operation{ - Name: opDeleteStreamingDistribution, + Name: opDeleteDistribution, HTTPMethod: "DELETE", - HTTPPath: "/2019-03-26/streaming-distribution/{Id}", + HTTPPath: "/2020-05-31/distribution/{Id}", } if input == nil { - input = &DeleteStreamingDistributionInput{} + input = &DeleteDistributionInput{} } - output = &DeleteStreamingDistributionOutput{} + output = &DeleteDistributionOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteStreamingDistribution API operation for Amazon CloudFront. -// -// Delete a streaming distribution. To delete an RTMP distribution using the -// CloudFront API, perform the following steps. -// -// To delete an RTMP distribution using the CloudFront API: -// -// Disable the RTMP distribution. -// -// Submit a GET Streaming Distribution Config request to get the current configuration -// and the Etag header for the distribution. -// -// Update the XML document that was returned in the response to your GET Streaming -// Distribution Config request to change the value of Enabled to false. -// -// Submit a PUT Streaming Distribution Config request to update the configuration -// for your distribution. In the request body, include the XML document that -// you updated in Step 3. Then set the value of the HTTP If-Match header to -// the value of the ETag header that CloudFront returned when you submitted -// the GET Streaming Distribution Config request in Step 2. -// -// Review the response to the PUT Streaming Distribution Config request to confirm -// that the distribution was successfully disabled. -// -// Submit a GET Streaming Distribution Config request to confirm that your changes -// have propagated. When propagation is complete, the value of Status is Deployed. -// -// Submit a DELETE Streaming Distribution request. Set the value of the HTTP -// If-Match header to the value of the ETag header that CloudFront returned -// when you submitted the GET Streaming Distribution Config request in Step -// 2. -// -// Review the response to your DELETE Streaming Distribution request to confirm -// that the distribution was successfully deleted. +// DeleteDistribution API operation for Amazon CloudFront. // -// For information about deleting a distribution using the CloudFront console, -// see Deleting a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) -// in the Amazon CloudFront Developer Guide. +// Delete a distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation DeleteStreamingDistribution for usage and error information. +// API operation DeleteDistribution for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeStreamingDistributionNotDisabled "StreamingDistributionNotDisabled" +// * ErrCodeDistributionNotDisabled "DistributionNotDisabled" // The specified CloudFront distribution is not disabled. You must disable the // distribution before you can delete it. // // * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. +// The If-Match version is missing or not valid. // -// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" -// The specified streaming distribution does not exist. +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. // // * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated +// The precondition given in one or more of the request header fields evaluated // to false. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/DeleteStreamingDistribution -func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistributionInput) (*DeleteStreamingDistributionOutput, error) { - req, out := c.DeleteStreamingDistributionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteDistribution +func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) { + req, out := c.DeleteDistributionRequest(input) return out, req.Send() } -// DeleteStreamingDistributionWithContext is the same as DeleteStreamingDistribution with the addition of +// DeleteDistributionWithContext is the same as DeleteDistribution with the addition of // the ability to pass a context and additional request options. // -// See DeleteStreamingDistribution for details on how to use this API operation. +// See DeleteDistribution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) DeleteStreamingDistributionWithContext(ctx aws.Context, input *DeleteStreamingDistributionInput, opts ...request.Option) (*DeleteStreamingDistributionOutput, error) { - req, out := c.DeleteStreamingDistributionRequest(input) +func (c *CloudFront) DeleteDistributionWithContext(ctx aws.Context, input *DeleteDistributionInput, opts ...request.Option) (*DeleteDistributionOutput, error) { + req, out := c.DeleteDistributionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2019_03_26" +const opDeleteFieldLevelEncryptionConfig = "DeleteFieldLevelEncryptionConfig2020_05_31" -// GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the -// client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return +// DeleteFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFieldLevelEncryptionConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCloudFrontOriginAccessIdentity for more information on using the GetCloudFrontOriginAccessIdentity +// See DeleteFieldLevelEncryptionConfig for more information on using the DeleteFieldLevelEncryptionConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCloudFrontOriginAccessIdentityRequest method. -// req, resp := client.GetCloudFrontOriginAccessIdentityRequest(params) +// // Example sending a request using the DeleteFieldLevelEncryptionConfigRequest method. +// req, resp := client.DeleteFieldLevelEncryptionConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentity -func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteFieldLevelEncryptionConfig +func (c *CloudFront) DeleteFieldLevelEncryptionConfigRequest(input *DeleteFieldLevelEncryptionConfigInput) (req *request.Request, output *DeleteFieldLevelEncryptionConfigOutput) { op := &request.Operation{ - Name: opGetCloudFrontOriginAccessIdentity, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}", + Name: opDeleteFieldLevelEncryptionConfig, + HTTPMethod: "DELETE", + HTTPPath: "/2020-05-31/field-level-encryption/{Id}", } if input == nil { - input = &GetCloudFrontOriginAccessIdentityInput{} + input = &DeleteFieldLevelEncryptionConfigInput{} } - output = &GetCloudFrontOriginAccessIdentityOutput{} + output = &DeleteFieldLevelEncryptionConfigOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetCloudFrontOriginAccessIdentity API operation for Amazon CloudFront. +// DeleteFieldLevelEncryptionConfig API operation for Amazon CloudFront. // -// Get the information about an origin access identity. +// Remove a field-level encryption configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetCloudFrontOriginAccessIdentity for usage and error information. +// API operation DeleteFieldLevelEncryptionConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" -// The specified origin access identity does not exist. -// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentity -func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOriginAccessIdentityInput) (*GetCloudFrontOriginAccessIdentityOutput, error) { - req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" +// The specified configuration for field-level encryption doesn't exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeFieldLevelEncryptionConfigInUse "FieldLevelEncryptionConfigInUse" +// The specified configuration for field-level encryption is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteFieldLevelEncryptionConfig +func (c *CloudFront) DeleteFieldLevelEncryptionConfig(input *DeleteFieldLevelEncryptionConfigInput) (*DeleteFieldLevelEncryptionConfigOutput, error) { + req, out := c.DeleteFieldLevelEncryptionConfigRequest(input) return out, req.Send() } -// GetCloudFrontOriginAccessIdentityWithContext is the same as GetCloudFrontOriginAccessIdentity with the addition of +// DeleteFieldLevelEncryptionConfigWithContext is the same as DeleteFieldLevelEncryptionConfig with the addition of // the ability to pass a context and additional request options. // -// See GetCloudFrontOriginAccessIdentity for details on how to use this API operation. +// See DeleteFieldLevelEncryptionConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetCloudFrontOriginAccessIdentityWithContext(ctx aws.Context, input *GetCloudFrontOriginAccessIdentityInput, opts ...request.Option) (*GetCloudFrontOriginAccessIdentityOutput, error) { - req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) +func (c *CloudFront) DeleteFieldLevelEncryptionConfigWithContext(ctx aws.Context, input *DeleteFieldLevelEncryptionConfigInput, opts ...request.Option) (*DeleteFieldLevelEncryptionConfigOutput, error) { + req, out := c.DeleteFieldLevelEncryptionConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2019_03_26" +const opDeleteFieldLevelEncryptionProfile = "DeleteFieldLevelEncryptionProfile2020_05_31" -// GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return +// DeleteFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFieldLevelEncryptionProfile operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCloudFrontOriginAccessIdentityConfig for more information on using the GetCloudFrontOriginAccessIdentityConfig +// See DeleteFieldLevelEncryptionProfile for more information on using the DeleteFieldLevelEncryptionProfile // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCloudFrontOriginAccessIdentityConfigRequest method. -// req, resp := client.GetCloudFrontOriginAccessIdentityConfigRequest(params) +// // Example sending a request using the DeleteFieldLevelEncryptionProfileRequest method. +// req, resp := client.DeleteFieldLevelEncryptionProfileRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentityConfig -func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteFieldLevelEncryptionProfile +func (c *CloudFront) DeleteFieldLevelEncryptionProfileRequest(input *DeleteFieldLevelEncryptionProfileInput) (req *request.Request, output *DeleteFieldLevelEncryptionProfileOutput) { op := &request.Operation{ - Name: opGetCloudFrontOriginAccessIdentityConfig, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}/config", + Name: opDeleteFieldLevelEncryptionProfile, + HTTPMethod: "DELETE", + HTTPPath: "/2020-05-31/field-level-encryption-profile/{Id}", } if input == nil { - input = &GetCloudFrontOriginAccessIdentityConfigInput{} + input = &DeleteFieldLevelEncryptionProfileInput{} } - output = &GetCloudFrontOriginAccessIdentityConfigOutput{} + output = &DeleteFieldLevelEncryptionProfileOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetCloudFrontOriginAccessIdentityConfig API operation for Amazon CloudFront. +// DeleteFieldLevelEncryptionProfile API operation for Amazon CloudFront. // -// Get the configuration information about an origin access identity. +// Remove a field-level encryption profile. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetCloudFrontOriginAccessIdentityConfig for usage and error information. +// API operation DeleteFieldLevelEncryptionProfile for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" -// The specified origin access identity does not exist. -// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetCloudFrontOriginAccessIdentityConfig -func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFrontOriginAccessIdentityConfigInput) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { - req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" +// The specified profile for field-level encryption doesn't exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeFieldLevelEncryptionProfileInUse "FieldLevelEncryptionProfileInUse" +// The specified profile for field-level encryption is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteFieldLevelEncryptionProfile +func (c *CloudFront) DeleteFieldLevelEncryptionProfile(input *DeleteFieldLevelEncryptionProfileInput) (*DeleteFieldLevelEncryptionProfileOutput, error) { + req, out := c.DeleteFieldLevelEncryptionProfileRequest(input) return out, req.Send() } -// GetCloudFrontOriginAccessIdentityConfigWithContext is the same as GetCloudFrontOriginAccessIdentityConfig with the addition of +// DeleteFieldLevelEncryptionProfileWithContext is the same as DeleteFieldLevelEncryptionProfile with the addition of // the ability to pass a context and additional request options. // -// See GetCloudFrontOriginAccessIdentityConfig for details on how to use this API operation. +// See DeleteFieldLevelEncryptionProfile for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigWithContext(ctx aws.Context, input *GetCloudFrontOriginAccessIdentityConfigInput, opts ...request.Option) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { - req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) +func (c *CloudFront) DeleteFieldLevelEncryptionProfileWithContext(ctx aws.Context, input *DeleteFieldLevelEncryptionProfileInput, opts ...request.Option) (*DeleteFieldLevelEncryptionProfileOutput, error) { + req, out := c.DeleteFieldLevelEncryptionProfileRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDistribution = "GetDistribution2019_03_26" +const opDeleteMonitoringSubscription = "DeleteMonitoringSubscription2020_05_31" -// GetDistributionRequest generates a "aws/request.Request" representing the -// client's request for the GetDistribution operation. The "output" return +// DeleteMonitoringSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMonitoringSubscription operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDistribution for more information on using the GetDistribution +// See DeleteMonitoringSubscription for more information on using the DeleteMonitoringSubscription // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetDistributionRequest method. -// req, resp := client.GetDistributionRequest(params) +// // Example sending a request using the DeleteMonitoringSubscriptionRequest method. +// req, resp := client.DeleteMonitoringSubscriptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistribution -func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteMonitoringSubscription +func (c *CloudFront) DeleteMonitoringSubscriptionRequest(input *DeleteMonitoringSubscriptionInput) (req *request.Request, output *DeleteMonitoringSubscriptionOutput) { op := &request.Operation{ - Name: opGetDistribution, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/distribution/{Id}", + Name: opDeleteMonitoringSubscription, + HTTPMethod: "DELETE", + HTTPPath: "/2020-05-31/distributions/{DistributionId}/monitoring-subscription", } if input == nil { - input = &GetDistributionInput{} + input = &DeleteMonitoringSubscriptionInput{} } - output = &GetDistributionOutput{} + output = &DeleteMonitoringSubscriptionOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetDistribution API operation for Amazon CloudFront. +// DeleteMonitoringSubscription API operation for Amazon CloudFront. // -// Get the information about a distribution. +// Disables additional CloudWatch metrics for the specified CloudFront distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetDistribution for usage and error information. +// API operation DeleteMonitoringSubscription for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchDistribution "NoSuchDistribution" -// The specified distribution does not exist. -// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistribution -func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistributionOutput, error) { - req, out := c.GetDistributionRequest(input) +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteMonitoringSubscription +func (c *CloudFront) DeleteMonitoringSubscription(input *DeleteMonitoringSubscriptionInput) (*DeleteMonitoringSubscriptionOutput, error) { + req, out := c.DeleteMonitoringSubscriptionRequest(input) return out, req.Send() } -// GetDistributionWithContext is the same as GetDistribution with the addition of +// DeleteMonitoringSubscriptionWithContext is the same as DeleteMonitoringSubscription with the addition of // the ability to pass a context and additional request options. // -// See GetDistribution for details on how to use this API operation. +// See DeleteMonitoringSubscription for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetDistributionWithContext(ctx aws.Context, input *GetDistributionInput, opts ...request.Option) (*GetDistributionOutput, error) { - req, out := c.GetDistributionRequest(input) +func (c *CloudFront) DeleteMonitoringSubscriptionWithContext(ctx aws.Context, input *DeleteMonitoringSubscriptionInput, opts ...request.Option) (*DeleteMonitoringSubscriptionOutput, error) { + req, out := c.DeleteMonitoringSubscriptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDistributionConfig = "GetDistributionConfig2019_03_26" +const opDeleteOriginRequestPolicy = "DeleteOriginRequestPolicy2020_05_31" -// GetDistributionConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetDistributionConfig operation. The "output" return +// DeleteOriginRequestPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOriginRequestPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDistributionConfig for more information on using the GetDistributionConfig +// See DeleteOriginRequestPolicy for more information on using the DeleteOriginRequestPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetDistributionConfigRequest method. -// req, resp := client.GetDistributionConfigRequest(params) +// // Example sending a request using the DeleteOriginRequestPolicyRequest method. +// req, resp := client.DeleteOriginRequestPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistributionConfig -func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteOriginRequestPolicy +func (c *CloudFront) DeleteOriginRequestPolicyRequest(input *DeleteOriginRequestPolicyInput) (req *request.Request, output *DeleteOriginRequestPolicyOutput) { op := &request.Operation{ - Name: opGetDistributionConfig, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/distribution/{Id}/config", + Name: opDeleteOriginRequestPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/2020-05-31/origin-request-policy/{Id}", } if input == nil { - input = &GetDistributionConfigInput{} + input = &DeleteOriginRequestPolicyInput{} } - output = &GetDistributionConfigOutput{} + output = &DeleteOriginRequestPolicyOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetDistributionConfig API operation for Amazon CloudFront. +// DeleteOriginRequestPolicy API operation for Amazon CloudFront. // -// Get the configuration information about a distribution. +// Deletes an origin request policy. +// +// You cannot delete an origin request policy if it’s attached to any cache +// behaviors. First update your distributions to remove the origin request policy +// from all cache behaviors, then delete the origin request policy. +// +// To delete an origin request policy, you must provide the policy’s identifier +// and version. To get the identifier, you can use ListOriginRequestPolicies +// or GetOriginRequestPolicy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetDistributionConfig for usage and error information. +// API operation DeleteOriginRequestPolicy for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchDistribution "NoSuchDistribution" -// The specified distribution does not exist. -// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetDistributionConfig -func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (*GetDistributionConfigOutput, error) { - req, out := c.GetDistributionConfigRequest(input) +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeIllegalDelete "IllegalDelete" +// You cannot delete a managed policy. +// +// * ErrCodeOriginRequestPolicyInUse "OriginRequestPolicyInUse" +// Cannot delete the origin request policy because it is attached to one or +// more cache behaviors. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteOriginRequestPolicy +func (c *CloudFront) DeleteOriginRequestPolicy(input *DeleteOriginRequestPolicyInput) (*DeleteOriginRequestPolicyOutput, error) { + req, out := c.DeleteOriginRequestPolicyRequest(input) return out, req.Send() } -// GetDistributionConfigWithContext is the same as GetDistributionConfig with the addition of +// DeleteOriginRequestPolicyWithContext is the same as DeleteOriginRequestPolicy with the addition of // the ability to pass a context and additional request options. // -// See GetDistributionConfig for details on how to use this API operation. +// See DeleteOriginRequestPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetDistributionConfigWithContext(ctx aws.Context, input *GetDistributionConfigInput, opts ...request.Option) (*GetDistributionConfigOutput, error) { - req, out := c.GetDistributionConfigRequest(input) +func (c *CloudFront) DeleteOriginRequestPolicyWithContext(ctx aws.Context, input *DeleteOriginRequestPolicyInput, opts ...request.Option) (*DeleteOriginRequestPolicyOutput, error) { + req, out := c.DeleteOriginRequestPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetFieldLevelEncryption = "GetFieldLevelEncryption2019_03_26" +const opDeletePublicKey = "DeletePublicKey2020_05_31" -// GetFieldLevelEncryptionRequest generates a "aws/request.Request" representing the -// client's request for the GetFieldLevelEncryption operation. The "output" return +// DeletePublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicKey operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetFieldLevelEncryption for more information on using the GetFieldLevelEncryption +// See DeletePublicKey for more information on using the DeletePublicKey // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetFieldLevelEncryptionRequest method. -// req, resp := client.GetFieldLevelEncryptionRequest(params) +// // Example sending a request using the DeletePublicKeyRequest method. +// req, resp := client.DeletePublicKeyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryption -func (c *CloudFront) GetFieldLevelEncryptionRequest(input *GetFieldLevelEncryptionInput) (req *request.Request, output *GetFieldLevelEncryptionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeletePublicKey +func (c *CloudFront) DeletePublicKeyRequest(input *DeletePublicKeyInput) (req *request.Request, output *DeletePublicKeyOutput) { op := &request.Operation{ - Name: opGetFieldLevelEncryption, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/field-level-encryption/{Id}", + Name: opDeletePublicKey, + HTTPMethod: "DELETE", + HTTPPath: "/2020-05-31/public-key/{Id}", } if input == nil { - input = &GetFieldLevelEncryptionInput{} + input = &DeletePublicKeyInput{} } - output = &GetFieldLevelEncryptionOutput{} + output = &DeletePublicKeyOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetFieldLevelEncryption API operation for Amazon CloudFront. +// DeletePublicKey API operation for Amazon CloudFront. // -// Get the field-level encryption configuration information. +// Remove a public key you previously added to CloudFront. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetFieldLevelEncryption for usage and error information. +// API operation DeletePublicKey for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" -// The specified configuration for field-level encryption doesn't exist. +// * ErrCodePublicKeyInUse "PublicKeyInUse" +// The specified public key is in use. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryption -func (c *CloudFront) GetFieldLevelEncryption(input *GetFieldLevelEncryptionInput) (*GetFieldLevelEncryptionOutput, error) { - req, out := c.GetFieldLevelEncryptionRequest(input) +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" +// The specified public key doesn't exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeletePublicKey +func (c *CloudFront) DeletePublicKey(input *DeletePublicKeyInput) (*DeletePublicKeyOutput, error) { + req, out := c.DeletePublicKeyRequest(input) return out, req.Send() } -// GetFieldLevelEncryptionWithContext is the same as GetFieldLevelEncryption with the addition of +// DeletePublicKeyWithContext is the same as DeletePublicKey with the addition of // the ability to pass a context and additional request options. // -// See GetFieldLevelEncryption for details on how to use this API operation. +// See DeletePublicKey for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetFieldLevelEncryptionWithContext(ctx aws.Context, input *GetFieldLevelEncryptionInput, opts ...request.Option) (*GetFieldLevelEncryptionOutput, error) { - req, out := c.GetFieldLevelEncryptionRequest(input) +func (c *CloudFront) DeletePublicKeyWithContext(ctx aws.Context, input *DeletePublicKeyInput, opts ...request.Option) (*DeletePublicKeyOutput, error) { + req, out := c.DeletePublicKeyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetFieldLevelEncryptionConfig = "GetFieldLevelEncryptionConfig2019_03_26" +const opDeleteRealtimeLogConfig = "DeleteRealtimeLogConfig2020_05_31" -// GetFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetFieldLevelEncryptionConfig operation. The "output" return +// DeleteRealtimeLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRealtimeLogConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetFieldLevelEncryptionConfig for more information on using the GetFieldLevelEncryptionConfig +// See DeleteRealtimeLogConfig for more information on using the DeleteRealtimeLogConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetFieldLevelEncryptionConfigRequest method. -// req, resp := client.GetFieldLevelEncryptionConfigRequest(params) +// // Example sending a request using the DeleteRealtimeLogConfigRequest method. +// req, resp := client.DeleteRealtimeLogConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionConfig -func (c *CloudFront) GetFieldLevelEncryptionConfigRequest(input *GetFieldLevelEncryptionConfigInput) (req *request.Request, output *GetFieldLevelEncryptionConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteRealtimeLogConfig +func (c *CloudFront) DeleteRealtimeLogConfigRequest(input *DeleteRealtimeLogConfigInput) (req *request.Request, output *DeleteRealtimeLogConfigOutput) { op := &request.Operation{ - Name: opGetFieldLevelEncryptionConfig, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/field-level-encryption/{Id}/config", + Name: opDeleteRealtimeLogConfig, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/delete-realtime-log-config/", } if input == nil { - input = &GetFieldLevelEncryptionConfigInput{} + input = &DeleteRealtimeLogConfigInput{} } - output = &GetFieldLevelEncryptionConfigOutput{} + output = &DeleteRealtimeLogConfigOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetFieldLevelEncryptionConfig API operation for Amazon CloudFront. +// DeleteRealtimeLogConfig API operation for Amazon CloudFront. // -// Get the field-level encryption configuration information. +// Deletes a real-time log configuration. +// +// You cannot delete a real-time log configuration if it’s attached to a cache +// behavior. First update your distributions to remove the real-time log configuration +// from all cache behaviors, then delete the real-time log configuration. +// +// To delete a real-time log configuration, you can provide the configuration’s +// name or its Amazon Resource Name (ARN). You must provide at least one. If +// you provide both, CloudFront uses the name to identify the real-time log +// configuration to delete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetFieldLevelEncryptionConfig for usage and error information. +// API operation DeleteRealtimeLogConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. +// * ErrCodeNoSuchRealtimeLogConfig "NoSuchRealtimeLogConfig" +// The real-time log configuration does not exist. // -// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" -// The specified configuration for field-level encryption doesn't exist. +// * ErrCodeRealtimeLogConfigInUse "RealtimeLogConfigInUse" +// Cannot delete the real-time log configuration because it is attached to one +// or more cache behaviors. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionConfig -func (c *CloudFront) GetFieldLevelEncryptionConfig(input *GetFieldLevelEncryptionConfigInput) (*GetFieldLevelEncryptionConfigOutput, error) { - req, out := c.GetFieldLevelEncryptionConfigRequest(input) +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteRealtimeLogConfig +func (c *CloudFront) DeleteRealtimeLogConfig(input *DeleteRealtimeLogConfigInput) (*DeleteRealtimeLogConfigOutput, error) { + req, out := c.DeleteRealtimeLogConfigRequest(input) return out, req.Send() } -// GetFieldLevelEncryptionConfigWithContext is the same as GetFieldLevelEncryptionConfig with the addition of +// DeleteRealtimeLogConfigWithContext is the same as DeleteRealtimeLogConfig with the addition of // the ability to pass a context and additional request options. // -// See GetFieldLevelEncryptionConfig for details on how to use this API operation. +// See DeleteRealtimeLogConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetFieldLevelEncryptionConfigWithContext(ctx aws.Context, input *GetFieldLevelEncryptionConfigInput, opts ...request.Option) (*GetFieldLevelEncryptionConfigOutput, error) { - req, out := c.GetFieldLevelEncryptionConfigRequest(input) +func (c *CloudFront) DeleteRealtimeLogConfigWithContext(ctx aws.Context, input *DeleteRealtimeLogConfigInput, opts ...request.Option) (*DeleteRealtimeLogConfigOutput, error) { + req, out := c.DeleteRealtimeLogConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetFieldLevelEncryptionProfile = "GetFieldLevelEncryptionProfile2019_03_26" +const opDeleteStreamingDistribution = "DeleteStreamingDistribution2020_05_31" -// GetFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the -// client's request for the GetFieldLevelEncryptionProfile operation. The "output" return +// DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStreamingDistribution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetFieldLevelEncryptionProfile for more information on using the GetFieldLevelEncryptionProfile +// See DeleteStreamingDistribution for more information on using the DeleteStreamingDistribution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetFieldLevelEncryptionProfileRequest method. -// req, resp := client.GetFieldLevelEncryptionProfileRequest(params) +// // Example sending a request using the DeleteStreamingDistributionRequest method. +// req, resp := client.DeleteStreamingDistributionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfile -func (c *CloudFront) GetFieldLevelEncryptionProfileRequest(input *GetFieldLevelEncryptionProfileInput) (req *request.Request, output *GetFieldLevelEncryptionProfileOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteStreamingDistribution +func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { op := &request.Operation{ - Name: opGetFieldLevelEncryptionProfile, - HTTPMethod: "GET", - HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}", + Name: opDeleteStreamingDistribution, + HTTPMethod: "DELETE", + HTTPPath: "/2020-05-31/streaming-distribution/{Id}", } if input == nil { - input = &GetFieldLevelEncryptionProfileInput{} + input = &DeleteStreamingDistributionInput{} } - output = &GetFieldLevelEncryptionProfileOutput{} + output = &DeleteStreamingDistributionOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// GetFieldLevelEncryptionProfile API operation for Amazon CloudFront. +// DeleteStreamingDistribution API operation for Amazon CloudFront. // -// Get the field-level encryption profile information. +// Delete a streaming distribution. To delete an RTMP distribution using the +// CloudFront API, perform the following steps. +// +// To delete an RTMP distribution using the CloudFront API: +// +// Disable the RTMP distribution. +// +// Submit a GET Streaming Distribution Config request to get the current configuration +// and the Etag header for the distribution. +// +// Update the XML document that was returned in the response to your GET Streaming +// Distribution Config request to change the value of Enabled to false. +// +// Submit a PUT Streaming Distribution Config request to update the configuration +// for your distribution. In the request body, include the XML document that +// you updated in Step 3. Then set the value of the HTTP If-Match header to +// the value of the ETag header that CloudFront returned when you submitted +// the GET Streaming Distribution Config request in Step 2. +// +// Review the response to the PUT Streaming Distribution Config request to confirm +// that the distribution was successfully disabled. +// +// Submit a GET Streaming Distribution Config request to confirm that your changes +// have propagated. When propagation is complete, the value of Status is Deployed. +// +// Submit a DELETE Streaming Distribution request. Set the value of the HTTP +// If-Match header to the value of the ETag header that CloudFront returned +// when you submitted the GET Streaming Distribution Config request in Step +// 2. +// +// Review the response to your DELETE Streaming Distribution request to confirm +// that the distribution was successfully deleted. +// +// For information about deleting a distribution using the CloudFront console, +// see Deleting a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) +// in the Amazon CloudFront Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetFieldLevelEncryptionProfile for usage and error information. +// API operation DeleteStreamingDistribution for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" -// The specified profile for field-level encryption doesn't exist. +// * ErrCodeStreamingDistributionNotDisabled "StreamingDistributionNotDisabled" +// The specified CloudFront distribution is not disabled. You must disable the +// distribution before you can delete it. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfile -func (c *CloudFront) GetFieldLevelEncryptionProfile(input *GetFieldLevelEncryptionProfileInput) (*GetFieldLevelEncryptionProfileOutput, error) { - req, out := c.GetFieldLevelEncryptionProfileRequest(input) +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" +// The specified streaming distribution does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/DeleteStreamingDistribution +func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistributionInput) (*DeleteStreamingDistributionOutput, error) { + req, out := c.DeleteStreamingDistributionRequest(input) return out, req.Send() } -// GetFieldLevelEncryptionProfileWithContext is the same as GetFieldLevelEncryptionProfile with the addition of +// DeleteStreamingDistributionWithContext is the same as DeleteStreamingDistribution with the addition of // the ability to pass a context and additional request options. // -// See GetFieldLevelEncryptionProfile for details on how to use this API operation. +// See DeleteStreamingDistribution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetFieldLevelEncryptionProfileWithContext(ctx aws.Context, input *GetFieldLevelEncryptionProfileInput, opts ...request.Option) (*GetFieldLevelEncryptionProfileOutput, error) { - req, out := c.GetFieldLevelEncryptionProfileRequest(input) +func (c *CloudFront) DeleteStreamingDistributionWithContext(ctx aws.Context, input *DeleteStreamingDistributionInput, opts ...request.Option) (*DeleteStreamingDistributionOutput, error) { + req, out := c.DeleteStreamingDistributionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetFieldLevelEncryptionProfileConfig = "GetFieldLevelEncryptionProfileConfig2019_03_26" +const opGetCachePolicy = "GetCachePolicy2020_05_31" -// GetFieldLevelEncryptionProfileConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetFieldLevelEncryptionProfileConfig operation. The "output" return +// GetCachePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetCachePolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetFieldLevelEncryptionProfileConfig for more information on using the GetFieldLevelEncryptionProfileConfig +// See GetCachePolicy for more information on using the GetCachePolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetFieldLevelEncryptionProfileConfigRequest method. -// req, resp := client.GetFieldLevelEncryptionProfileConfigRequest(params) +// // Example sending a request using the GetCachePolicyRequest method. +// req, resp := client.GetCachePolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfileConfig -func (c *CloudFront) GetFieldLevelEncryptionProfileConfigRequest(input *GetFieldLevelEncryptionProfileConfigInput) (req *request.Request, output *GetFieldLevelEncryptionProfileConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCachePolicy +func (c *CloudFront) GetCachePolicyRequest(input *GetCachePolicyInput) (req *request.Request, output *GetCachePolicyOutput) { op := &request.Operation{ - Name: opGetFieldLevelEncryptionProfileConfig, + Name: opGetCachePolicy, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}/config", + HTTPPath: "/2020-05-31/cache-policy/{Id}", } if input == nil { - input = &GetFieldLevelEncryptionProfileConfigInput{} + input = &GetCachePolicyInput{} } - output = &GetFieldLevelEncryptionProfileConfigOutput{} + output = &GetCachePolicyOutput{} req = c.newRequest(op, input, output) return } -// GetFieldLevelEncryptionProfileConfig API operation for Amazon CloudFront. +// GetCachePolicy API operation for Amazon CloudFront. // -// Get the field-level encryption profile configuration information. +// Gets a cache policy, including the following metadata: +// +// * The policy’s identifier. +// +// * The date and time when the policy was last modified. +// +// To get a cache policy, you must provide the policy’s identifier. If the +// cache policy is attached to a distribution’s cache behavior, you can get +// the policy’s identifier using ListDistributions or GetDistribution. If +// the cache policy is not attached to a cache behavior, you can get the identifier +// using ListCachePolicies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetFieldLevelEncryptionProfileConfig for usage and error information. +// API operation GetCachePolicy for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" -// The specified profile for field-level encryption doesn't exist. +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetFieldLevelEncryptionProfileConfig -func (c *CloudFront) GetFieldLevelEncryptionProfileConfig(input *GetFieldLevelEncryptionProfileConfigInput) (*GetFieldLevelEncryptionProfileConfigOutput, error) { - req, out := c.GetFieldLevelEncryptionProfileConfigRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCachePolicy +func (c *CloudFront) GetCachePolicy(input *GetCachePolicyInput) (*GetCachePolicyOutput, error) { + req, out := c.GetCachePolicyRequest(input) return out, req.Send() } -// GetFieldLevelEncryptionProfileConfigWithContext is the same as GetFieldLevelEncryptionProfileConfig with the addition of +// GetCachePolicyWithContext is the same as GetCachePolicy with the addition of // the ability to pass a context and additional request options. // -// See GetFieldLevelEncryptionProfileConfig for details on how to use this API operation. +// See GetCachePolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetFieldLevelEncryptionProfileConfigWithContext(ctx aws.Context, input *GetFieldLevelEncryptionProfileConfigInput, opts ...request.Option) (*GetFieldLevelEncryptionProfileConfigOutput, error) { - req, out := c.GetFieldLevelEncryptionProfileConfigRequest(input) +func (c *CloudFront) GetCachePolicyWithContext(ctx aws.Context, input *GetCachePolicyInput, opts ...request.Option) (*GetCachePolicyOutput, error) { + req, out := c.GetCachePolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInvalidation = "GetInvalidation2019_03_26" +const opGetCachePolicyConfig = "GetCachePolicyConfig2020_05_31" -// GetInvalidationRequest generates a "aws/request.Request" representing the -// client's request for the GetInvalidation operation. The "output" return +// GetCachePolicyConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetCachePolicyConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInvalidation for more information on using the GetInvalidation +// See GetCachePolicyConfig for more information on using the GetCachePolicyConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInvalidationRequest method. -// req, resp := client.GetInvalidationRequest(params) +// // Example sending a request using the GetCachePolicyConfigRequest method. +// req, resp := client.GetCachePolicyConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetInvalidation -func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCachePolicyConfig +func (c *CloudFront) GetCachePolicyConfigRequest(input *GetCachePolicyConfigInput) (req *request.Request, output *GetCachePolicyConfigOutput) { op := &request.Operation{ - Name: opGetInvalidation, + Name: opGetCachePolicyConfig, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/distribution/{DistributionId}/invalidation/{Id}", + HTTPPath: "/2020-05-31/cache-policy/{Id}/config", } if input == nil { - input = &GetInvalidationInput{} + input = &GetCachePolicyConfigInput{} } - output = &GetInvalidationOutput{} + output = &GetCachePolicyConfigOutput{} req = c.newRequest(op, input, output) return } -// GetInvalidation API operation for Amazon CloudFront. +// GetCachePolicyConfig API operation for Amazon CloudFront. // -// Get the information about an invalidation. +// Gets a cache policy configuration. +// +// To get a cache policy configuration, you must provide the policy’s identifier. +// If the cache policy is attached to a distribution’s cache behavior, you +// can get the policy’s identifier using ListDistributions or GetDistribution. +// If the cache policy is not attached to a cache behavior, you can get the +// identifier using ListCachePolicies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetInvalidation for usage and error information. +// API operation GetCachePolicyConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchInvalidation "NoSuchInvalidation" -// The specified invalidation does not exist. -// -// * ErrCodeNoSuchDistribution "NoSuchDistribution" -// The specified distribution does not exist. -// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetInvalidation -func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidationOutput, error) { - req, out := c.GetInvalidationRequest(input) +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCachePolicyConfig +func (c *CloudFront) GetCachePolicyConfig(input *GetCachePolicyConfigInput) (*GetCachePolicyConfigOutput, error) { + req, out := c.GetCachePolicyConfigRequest(input) return out, req.Send() } -// GetInvalidationWithContext is the same as GetInvalidation with the addition of +// GetCachePolicyConfigWithContext is the same as GetCachePolicyConfig with the addition of // the ability to pass a context and additional request options. // -// See GetInvalidation for details on how to use this API operation. +// See GetCachePolicyConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetInvalidationWithContext(ctx aws.Context, input *GetInvalidationInput, opts ...request.Option) (*GetInvalidationOutput, error) { - req, out := c.GetInvalidationRequest(input) +func (c *CloudFront) GetCachePolicyConfigWithContext(ctx aws.Context, input *GetCachePolicyConfigInput, opts ...request.Option) (*GetCachePolicyConfigOutput, error) { + req, out := c.GetCachePolicyConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetPublicKey = "GetPublicKey2019_03_26" +const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2020_05_31" -// GetPublicKeyRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicKey operation. The "output" return +// GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPublicKey for more information on using the GetPublicKey +// See GetCloudFrontOriginAccessIdentity for more information on using the GetCloudFrontOriginAccessIdentity // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPublicKeyRequest method. -// req, resp := client.GetPublicKeyRequest(params) +// // Example sending a request using the GetCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKey -func (c *CloudFront) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Request, output *GetPublicKeyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCloudFrontOriginAccessIdentity +func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ - Name: opGetPublicKey, + Name: opGetCloudFrontOriginAccessIdentity, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/public-key/{Id}", + HTTPPath: "/2020-05-31/origin-access-identity/cloudfront/{Id}", } if input == nil { - input = &GetPublicKeyInput{} + input = &GetCloudFrontOriginAccessIdentityInput{} } - output = &GetPublicKeyOutput{} + output = &GetCloudFrontOriginAccessIdentityOutput{} req = c.newRequest(op, input, output) return } -// GetPublicKey API operation for Amazon CloudFront. +// GetCloudFrontOriginAccessIdentity API operation for Amazon CloudFront. // -// Get the public key information. +// Get the information about an origin access identity. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetPublicKey for usage and error information. +// API operation GetCloudFrontOriginAccessIdentity for usage and error information. // // Returned Error Codes: +// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" +// The specified origin access identity does not exist. +// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" -// The specified public key doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKey -func (c *CloudFront) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) { - req, out := c.GetPublicKeyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCloudFrontOriginAccessIdentity +func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOriginAccessIdentityInput) (*GetCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) return out, req.Send() } -// GetPublicKeyWithContext is the same as GetPublicKey with the addition of +// GetCloudFrontOriginAccessIdentityWithContext is the same as GetCloudFrontOriginAccessIdentity with the addition of // the ability to pass a context and additional request options. // -// See GetPublicKey for details on how to use this API operation. +// See GetCloudFrontOriginAccessIdentity for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetPublicKeyWithContext(ctx aws.Context, input *GetPublicKeyInput, opts ...request.Option) (*GetPublicKeyOutput, error) { - req, out := c.GetPublicKeyRequest(input) +func (c *CloudFront) GetCloudFrontOriginAccessIdentityWithContext(ctx aws.Context, input *GetCloudFrontOriginAccessIdentityInput, opts ...request.Option) (*GetCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetPublicKeyConfig = "GetPublicKeyConfig2019_03_26" +const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2020_05_31" -// GetPublicKeyConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicKeyConfig operation. The "output" return +// GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPublicKeyConfig for more information on using the GetPublicKeyConfig +// See GetCloudFrontOriginAccessIdentityConfig for more information on using the GetCloudFrontOriginAccessIdentityConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPublicKeyConfigRequest method. -// req, resp := client.GetPublicKeyConfigRequest(params) +// // Example sending a request using the GetCloudFrontOriginAccessIdentityConfigRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKeyConfig -func (c *CloudFront) GetPublicKeyConfigRequest(input *GetPublicKeyConfigInput) (req *request.Request, output *GetPublicKeyConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCloudFrontOriginAccessIdentityConfig +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { op := &request.Operation{ - Name: opGetPublicKeyConfig, + Name: opGetCloudFrontOriginAccessIdentityConfig, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/public-key/{Id}/config", + HTTPPath: "/2020-05-31/origin-access-identity/cloudfront/{Id}/config", } if input == nil { - input = &GetPublicKeyConfigInput{} + input = &GetCloudFrontOriginAccessIdentityConfigInput{} } - output = &GetPublicKeyConfigOutput{} + output = &GetCloudFrontOriginAccessIdentityConfigOutput{} req = c.newRequest(op, input, output) return } -// GetPublicKeyConfig API operation for Amazon CloudFront. +// GetCloudFrontOriginAccessIdentityConfig API operation for Amazon CloudFront. // -// Return public key configuration informaation +// Get the configuration information about an origin access identity. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetPublicKeyConfig for usage and error information. +// API operation GetCloudFrontOriginAccessIdentityConfig for usage and error information. // // Returned Error Codes: +// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" +// The specified origin access identity does not exist. +// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" -// The specified public key doesn't exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetPublicKeyConfig -func (c *CloudFront) GetPublicKeyConfig(input *GetPublicKeyConfigInput) (*GetPublicKeyConfigOutput, error) { - req, out := c.GetPublicKeyConfigRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetCloudFrontOriginAccessIdentityConfig +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFrontOriginAccessIdentityConfigInput) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) return out, req.Send() } -// GetPublicKeyConfigWithContext is the same as GetPublicKeyConfig with the addition of +// GetCloudFrontOriginAccessIdentityConfigWithContext is the same as GetCloudFrontOriginAccessIdentityConfig with the addition of // the ability to pass a context and additional request options. // -// See GetPublicKeyConfig for details on how to use this API operation. +// See GetCloudFrontOriginAccessIdentityConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetPublicKeyConfigWithContext(ctx aws.Context, input *GetPublicKeyConfigInput, opts ...request.Option) (*GetPublicKeyConfigOutput, error) { - req, out := c.GetPublicKeyConfigRequest(input) +func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigWithContext(ctx aws.Context, input *GetCloudFrontOriginAccessIdentityConfigInput, opts ...request.Option) (*GetCloudFrontOriginAccessIdentityConfigOutput, error) { + req, out := c.GetCloudFrontOriginAccessIdentityConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetStreamingDistribution = "GetStreamingDistribution2019_03_26" +const opGetDistribution = "GetDistribution2020_05_31" -// GetStreamingDistributionRequest generates a "aws/request.Request" representing the -// client's request for the GetStreamingDistribution operation. The "output" return +// GetDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetDistribution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetStreamingDistribution for more information on using the GetStreamingDistribution +// See GetDistribution for more information on using the GetDistribution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetStreamingDistributionRequest method. -// req, resp := client.GetStreamingDistributionRequest(params) +// // Example sending a request using the GetDistributionRequest method. +// req, resp := client.GetDistributionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistribution -func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetDistribution +func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { op := &request.Operation{ - Name: opGetStreamingDistribution, + Name: opGetDistribution, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/streaming-distribution/{Id}", + HTTPPath: "/2020-05-31/distribution/{Id}", } if input == nil { - input = &GetStreamingDistributionInput{} + input = &GetDistributionInput{} } - output = &GetStreamingDistributionOutput{} + output = &GetDistributionOutput{} req = c.newRequest(op, input, output) return } -// GetStreamingDistribution API operation for Amazon CloudFront. +// GetDistribution API operation for Amazon CloudFront. // -// Gets information about a specified RTMP distribution, including the distribution -// configuration. +// Get the information about a distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetStreamingDistribution for usage and error information. +// API operation GetDistribution for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" -// The specified streaming distribution does not exist. +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. // // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistribution -func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInput) (*GetStreamingDistributionOutput, error) { - req, out := c.GetStreamingDistributionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetDistribution +func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistributionOutput, error) { + req, out := c.GetDistributionRequest(input) return out, req.Send() } -// GetStreamingDistributionWithContext is the same as GetStreamingDistribution with the addition of +// GetDistributionWithContext is the same as GetDistribution with the addition of // the ability to pass a context and additional request options. // -// See GetStreamingDistribution for details on how to use this API operation. +// See GetDistribution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetStreamingDistributionWithContext(ctx aws.Context, input *GetStreamingDistributionInput, opts ...request.Option) (*GetStreamingDistributionOutput, error) { - req, out := c.GetStreamingDistributionRequest(input) +func (c *CloudFront) GetDistributionWithContext(ctx aws.Context, input *GetDistributionInput, opts ...request.Option) (*GetDistributionOutput, error) { + req, out := c.GetDistributionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2019_03_26" +const opGetDistributionConfig = "GetDistributionConfig2020_05_31" -// GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the -// client's request for the GetStreamingDistributionConfig operation. The "output" return +// GetDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetStreamingDistributionConfig for more information on using the GetStreamingDistributionConfig +// See GetDistributionConfig for more information on using the GetDistributionConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetStreamingDistributionConfigRequest method. -// req, resp := client.GetStreamingDistributionConfigRequest(params) +// // Example sending a request using the GetDistributionConfigRequest method. +// req, resp := client.GetDistributionConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistributionConfig -func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetDistributionConfig +func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { op := &request.Operation{ - Name: opGetStreamingDistributionConfig, + Name: opGetDistributionConfig, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/streaming-distribution/{Id}/config", + HTTPPath: "/2020-05-31/distribution/{Id}/config", } if input == nil { - input = &GetStreamingDistributionConfigInput{} + input = &GetDistributionConfigInput{} } - output = &GetStreamingDistributionConfigOutput{} + output = &GetDistributionConfigOutput{} req = c.newRequest(op, input, output) return } -// GetStreamingDistributionConfig API operation for Amazon CloudFront. +// GetDistributionConfig API operation for Amazon CloudFront. // -// Get the configuration information about a streaming distribution. +// Get the configuration information about a distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation GetStreamingDistributionConfig for usage and error information. +// API operation GetDistributionConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" -// The specified streaming distribution does not exist. +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. // // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/GetStreamingDistributionConfig -func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistributionConfigInput) (*GetStreamingDistributionConfigOutput, error) { - req, out := c.GetStreamingDistributionConfigRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetDistributionConfig +func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (*GetDistributionConfigOutput, error) { + req, out := c.GetDistributionConfigRequest(input) return out, req.Send() } -// GetStreamingDistributionConfigWithContext is the same as GetStreamingDistributionConfig with the addition of +// GetDistributionConfigWithContext is the same as GetDistributionConfig with the addition of // the ability to pass a context and additional request options. // -// See GetStreamingDistributionConfig for details on how to use this API operation. +// See GetDistributionConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) GetStreamingDistributionConfigWithContext(ctx aws.Context, input *GetStreamingDistributionConfigInput, opts ...request.Option) (*GetStreamingDistributionConfigOutput, error) { - req, out := c.GetStreamingDistributionConfigRequest(input) +func (c *CloudFront) GetDistributionConfigWithContext(ctx aws.Context, input *GetDistributionConfigInput, opts ...request.Option) (*GetDistributionConfigOutput, error) { + req, out := c.GetDistributionConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2019_03_26" +const opGetFieldLevelEncryption = "GetFieldLevelEncryption2020_05_31" -// ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the -// client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return +// GetFieldLevelEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetFieldLevelEncryption operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListCloudFrontOriginAccessIdentities for more information on using the ListCloudFrontOriginAccessIdentities +// See GetFieldLevelEncryption for more information on using the GetFieldLevelEncryption // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListCloudFrontOriginAccessIdentitiesRequest method. -// req, resp := client.ListCloudFrontOriginAccessIdentitiesRequest(params) +// // Example sending a request using the GetFieldLevelEncryptionRequest method. +// req, resp := client.GetFieldLevelEncryptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListCloudFrontOriginAccessIdentities -func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryption +func (c *CloudFront) GetFieldLevelEncryptionRequest(input *GetFieldLevelEncryptionInput) (req *request.Request, output *GetFieldLevelEncryptionOutput) { op := &request.Operation{ - Name: opListCloudFrontOriginAccessIdentities, + Name: opGetFieldLevelEncryption, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/origin-access-identity/cloudfront", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"CloudFrontOriginAccessIdentityList.NextMarker"}, - LimitToken: "MaxItems", - TruncationToken: "CloudFrontOriginAccessIdentityList.IsTruncated", - }, + HTTPPath: "/2020-05-31/field-level-encryption/{Id}", } if input == nil { - input = &ListCloudFrontOriginAccessIdentitiesInput{} + input = &GetFieldLevelEncryptionInput{} } - output = &ListCloudFrontOriginAccessIdentitiesOutput{} + output = &GetFieldLevelEncryptionOutput{} req = c.newRequest(op, input, output) return } -// ListCloudFrontOriginAccessIdentities API operation for Amazon CloudFront. +// GetFieldLevelEncryption API operation for Amazon CloudFront. // -// Lists origin access identities. +// Get the field-level encryption configuration information. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListCloudFrontOriginAccessIdentities for usage and error information. +// API operation GetFieldLevelEncryption for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListCloudFrontOriginAccessIdentities -func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontOriginAccessIdentitiesInput) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { - req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) +// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" +// The specified configuration for field-level encryption doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryption +func (c *CloudFront) GetFieldLevelEncryption(input *GetFieldLevelEncryptionInput) (*GetFieldLevelEncryptionOutput, error) { + req, out := c.GetFieldLevelEncryptionRequest(input) return out, req.Send() } -// ListCloudFrontOriginAccessIdentitiesWithContext is the same as ListCloudFrontOriginAccessIdentities with the addition of +// GetFieldLevelEncryptionWithContext is the same as GetFieldLevelEncryption with the addition of // the ability to pass a context and additional request options. // -// See ListCloudFrontOriginAccessIdentities for details on how to use this API operation. +// See GetFieldLevelEncryption for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesWithContext(ctx aws.Context, input *ListCloudFrontOriginAccessIdentitiesInput, opts ...request.Option) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { - req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) +func (c *CloudFront) GetFieldLevelEncryptionWithContext(ctx aws.Context, input *GetFieldLevelEncryptionInput, opts ...request.Option) (*GetFieldLevelEncryptionOutput, error) { + req, out := c.GetFieldLevelEncryptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListCloudFrontOriginAccessIdentitiesPages iterates over the pages of a ListCloudFrontOriginAccessIdentities operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGetFieldLevelEncryptionConfig = "GetFieldLevelEncryptionConfig2020_05_31" + +// GetFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetFieldLevelEncryptionConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListCloudFrontOriginAccessIdentities method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See GetFieldLevelEncryptionConfig for more information on using the GetFieldLevelEncryptionConfig +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListCloudFrontOriginAccessIdentities operation. -// pageNum := 0 -// err := client.ListCloudFrontOriginAccessIdentitiesPages(params, -// func(page *cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudFrontOriginAccessIdentitiesInput, fn func(*ListCloudFrontOriginAccessIdentitiesOutput, bool) bool) error { - return c.ListCloudFrontOriginAccessIdentitiesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListCloudFrontOriginAccessIdentitiesPagesWithContext same as ListCloudFrontOriginAccessIdentitiesPages except -// it takes a Context and allows setting request options on the pages. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPagesWithContext(ctx aws.Context, input *ListCloudFrontOriginAccessIdentitiesInput, fn func(*ListCloudFrontOriginAccessIdentitiesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListCloudFrontOriginAccessIdentitiesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListCloudFrontOriginAccessIdentitiesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListCloudFrontOriginAccessIdentitiesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDistributions = "ListDistributions2019_03_26" - -// ListDistributionsRequest generates a "aws/request.Request" representing the -// client's request for the ListDistributions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDistributions for more information on using the ListDistributions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the ListDistributionsRequest method. -// req, resp := client.ListDistributionsRequest(params) +// // Example sending a request using the GetFieldLevelEncryptionConfigRequest method. +// req, resp := client.GetFieldLevelEncryptionConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributions -func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryptionConfig +func (c *CloudFront) GetFieldLevelEncryptionConfigRequest(input *GetFieldLevelEncryptionConfigInput) (req *request.Request, output *GetFieldLevelEncryptionConfigOutput) { op := &request.Operation{ - Name: opListDistributions, + Name: opGetFieldLevelEncryptionConfig, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/distribution", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"DistributionList.NextMarker"}, - LimitToken: "MaxItems", - TruncationToken: "DistributionList.IsTruncated", - }, + HTTPPath: "/2020-05-31/field-level-encryption/{Id}/config", } if input == nil { - input = &ListDistributionsInput{} + input = &GetFieldLevelEncryptionConfigInput{} } - output = &ListDistributionsOutput{} + output = &GetFieldLevelEncryptionConfigOutput{} req = c.newRequest(op, input, output) return } -// ListDistributions API operation for Amazon CloudFront. +// GetFieldLevelEncryptionConfig API operation for Amazon CloudFront. // -// List CloudFront distributions. +// Get the field-level encryption configuration information. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListDistributions for usage and error information. +// API operation GetFieldLevelEncryptionConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributions -func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDistributionsOutput, error) { - req, out := c.ListDistributionsRequest(input) +// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" +// The specified configuration for field-level encryption doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryptionConfig +func (c *CloudFront) GetFieldLevelEncryptionConfig(input *GetFieldLevelEncryptionConfigInput) (*GetFieldLevelEncryptionConfigOutput, error) { + req, out := c.GetFieldLevelEncryptionConfigRequest(input) return out, req.Send() } -// ListDistributionsWithContext is the same as ListDistributions with the addition of +// GetFieldLevelEncryptionConfigWithContext is the same as GetFieldLevelEncryptionConfig with the addition of // the ability to pass a context and additional request options. // -// See ListDistributions for details on how to use this API operation. +// See GetFieldLevelEncryptionConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListDistributionsWithContext(ctx aws.Context, input *ListDistributionsInput, opts ...request.Option) (*ListDistributionsOutput, error) { - req, out := c.ListDistributionsRequest(input) +func (c *CloudFront) GetFieldLevelEncryptionConfigWithContext(ctx aws.Context, input *GetFieldLevelEncryptionConfigInput, opts ...request.Option) (*GetFieldLevelEncryptionConfigOutput, error) { + req, out := c.GetFieldLevelEncryptionConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDistributionsPages iterates over the pages of a ListDistributions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDistributions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDistributions operation. -// pageNum := 0 -// err := client.ListDistributionsPages(params, -// func(page *cloudfront.ListDistributionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn func(*ListDistributionsOutput, bool) bool) error { - return c.ListDistributionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDistributionsPagesWithContext same as ListDistributionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *CloudFront) ListDistributionsPagesWithContext(ctx aws.Context, input *ListDistributionsInput, fn func(*ListDistributionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDistributionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDistributionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDistributionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2019_03_26" +const opGetFieldLevelEncryptionProfile = "GetFieldLevelEncryptionProfile2020_05_31" -// ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the -// client's request for the ListDistributionsByWebACLId operation. The "output" return +// GetFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetFieldLevelEncryptionProfile operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDistributionsByWebACLId for more information on using the ListDistributionsByWebACLId +// See GetFieldLevelEncryptionProfile for more information on using the GetFieldLevelEncryptionProfile // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDistributionsByWebACLIdRequest method. -// req, resp := client.ListDistributionsByWebACLIdRequest(params) +// // Example sending a request using the GetFieldLevelEncryptionProfileRequest method. +// req, resp := client.GetFieldLevelEncryptionProfileRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributionsByWebACLId -func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryptionProfile +func (c *CloudFront) GetFieldLevelEncryptionProfileRequest(input *GetFieldLevelEncryptionProfileInput) (req *request.Request, output *GetFieldLevelEncryptionProfileOutput) { op := &request.Operation{ - Name: opListDistributionsByWebACLId, + Name: opGetFieldLevelEncryptionProfile, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/distributionsByWebACLId/{WebACLId}", + HTTPPath: "/2020-05-31/field-level-encryption-profile/{Id}", } if input == nil { - input = &ListDistributionsByWebACLIdInput{} + input = &GetFieldLevelEncryptionProfileInput{} } - output = &ListDistributionsByWebACLIdOutput{} + output = &GetFieldLevelEncryptionProfileOutput{} req = c.newRequest(op, input, output) return } -// ListDistributionsByWebACLId API operation for Amazon CloudFront. +// GetFieldLevelEncryptionProfile API operation for Amazon CloudFront. // -// List the distributions that are associated with a specified AWS WAF web ACL. +// Get the field-level encryption profile information. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListDistributionsByWebACLId for usage and error information. +// API operation GetFieldLevelEncryptionProfile for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// * ErrCodeInvalidWebACLId "InvalidWebACLId" -// A web ACL ID specified is not valid. To specify a web ACL created using the -// latest version of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. -// To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example -// 473e64fd-f30b-4765-81a0-62ad96dd167a. +// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" +// The specified profile for field-level encryption doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListDistributionsByWebACLId -func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebACLIdInput) (*ListDistributionsByWebACLIdOutput, error) { - req, out := c.ListDistributionsByWebACLIdRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryptionProfile +func (c *CloudFront) GetFieldLevelEncryptionProfile(input *GetFieldLevelEncryptionProfileInput) (*GetFieldLevelEncryptionProfileOutput, error) { + req, out := c.GetFieldLevelEncryptionProfileRequest(input) return out, req.Send() } -// ListDistributionsByWebACLIdWithContext is the same as ListDistributionsByWebACLId with the addition of +// GetFieldLevelEncryptionProfileWithContext is the same as GetFieldLevelEncryptionProfile with the addition of // the ability to pass a context and additional request options. // -// See ListDistributionsByWebACLId for details on how to use this API operation. +// See GetFieldLevelEncryptionProfile for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListDistributionsByWebACLIdWithContext(ctx aws.Context, input *ListDistributionsByWebACLIdInput, opts ...request.Option) (*ListDistributionsByWebACLIdOutput, error) { - req, out := c.ListDistributionsByWebACLIdRequest(input) +func (c *CloudFront) GetFieldLevelEncryptionProfileWithContext(ctx aws.Context, input *GetFieldLevelEncryptionProfileInput, opts ...request.Option) (*GetFieldLevelEncryptionProfileOutput, error) { + req, out := c.GetFieldLevelEncryptionProfileRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListFieldLevelEncryptionConfigs = "ListFieldLevelEncryptionConfigs2019_03_26" +const opGetFieldLevelEncryptionProfileConfig = "GetFieldLevelEncryptionProfileConfig2020_05_31" -// ListFieldLevelEncryptionConfigsRequest generates a "aws/request.Request" representing the -// client's request for the ListFieldLevelEncryptionConfigs operation. The "output" return +// GetFieldLevelEncryptionProfileConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetFieldLevelEncryptionProfileConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListFieldLevelEncryptionConfigs for more information on using the ListFieldLevelEncryptionConfigs +// See GetFieldLevelEncryptionProfileConfig for more information on using the GetFieldLevelEncryptionProfileConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListFieldLevelEncryptionConfigsRequest method. -// req, resp := client.ListFieldLevelEncryptionConfigsRequest(params) +// // Example sending a request using the GetFieldLevelEncryptionProfileConfigRequest method. +// req, resp := client.GetFieldLevelEncryptionProfileConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionConfigs -func (c *CloudFront) ListFieldLevelEncryptionConfigsRequest(input *ListFieldLevelEncryptionConfigsInput) (req *request.Request, output *ListFieldLevelEncryptionConfigsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryptionProfileConfig +func (c *CloudFront) GetFieldLevelEncryptionProfileConfigRequest(input *GetFieldLevelEncryptionProfileConfigInput) (req *request.Request, output *GetFieldLevelEncryptionProfileConfigOutput) { op := &request.Operation{ - Name: opListFieldLevelEncryptionConfigs, + Name: opGetFieldLevelEncryptionProfileConfig, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/field-level-encryption", + HTTPPath: "/2020-05-31/field-level-encryption-profile/{Id}/config", } if input == nil { - input = &ListFieldLevelEncryptionConfigsInput{} + input = &GetFieldLevelEncryptionProfileConfigInput{} } - output = &ListFieldLevelEncryptionConfigsOutput{} + output = &GetFieldLevelEncryptionProfileConfigOutput{} req = c.newRequest(op, input, output) return } -// ListFieldLevelEncryptionConfigs API operation for Amazon CloudFront. +// GetFieldLevelEncryptionProfileConfig API operation for Amazon CloudFront. // -// List all field-level encryption configurations that have been created in -// CloudFront for this account. +// Get the field-level encryption profile configuration information. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListFieldLevelEncryptionConfigs for usage and error information. +// API operation GetFieldLevelEncryptionProfileConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionConfigs -func (c *CloudFront) ListFieldLevelEncryptionConfigs(input *ListFieldLevelEncryptionConfigsInput) (*ListFieldLevelEncryptionConfigsOutput, error) { - req, out := c.ListFieldLevelEncryptionConfigsRequest(input) +// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" +// The specified profile for field-level encryption doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetFieldLevelEncryptionProfileConfig +func (c *CloudFront) GetFieldLevelEncryptionProfileConfig(input *GetFieldLevelEncryptionProfileConfigInput) (*GetFieldLevelEncryptionProfileConfigOutput, error) { + req, out := c.GetFieldLevelEncryptionProfileConfigRequest(input) return out, req.Send() } -// ListFieldLevelEncryptionConfigsWithContext is the same as ListFieldLevelEncryptionConfigs with the addition of +// GetFieldLevelEncryptionProfileConfigWithContext is the same as GetFieldLevelEncryptionProfileConfig with the addition of // the ability to pass a context and additional request options. // -// See ListFieldLevelEncryptionConfigs for details on how to use this API operation. +// See GetFieldLevelEncryptionProfileConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListFieldLevelEncryptionConfigsWithContext(ctx aws.Context, input *ListFieldLevelEncryptionConfigsInput, opts ...request.Option) (*ListFieldLevelEncryptionConfigsOutput, error) { - req, out := c.ListFieldLevelEncryptionConfigsRequest(input) +func (c *CloudFront) GetFieldLevelEncryptionProfileConfigWithContext(ctx aws.Context, input *GetFieldLevelEncryptionProfileConfigInput, opts ...request.Option) (*GetFieldLevelEncryptionProfileConfigOutput, error) { + req, out := c.GetFieldLevelEncryptionProfileConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListFieldLevelEncryptionProfiles = "ListFieldLevelEncryptionProfiles2019_03_26" +const opGetInvalidation = "GetInvalidation2020_05_31" -// ListFieldLevelEncryptionProfilesRequest generates a "aws/request.Request" representing the -// client's request for the ListFieldLevelEncryptionProfiles operation. The "output" return +// GetInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the GetInvalidation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListFieldLevelEncryptionProfiles for more information on using the ListFieldLevelEncryptionProfiles +// See GetInvalidation for more information on using the GetInvalidation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListFieldLevelEncryptionProfilesRequest method. -// req, resp := client.ListFieldLevelEncryptionProfilesRequest(params) +// // Example sending a request using the GetInvalidationRequest method. +// req, resp := client.GetInvalidationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionProfiles -func (c *CloudFront) ListFieldLevelEncryptionProfilesRequest(input *ListFieldLevelEncryptionProfilesInput) (req *request.Request, output *ListFieldLevelEncryptionProfilesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetInvalidation +func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { op := &request.Operation{ - Name: opListFieldLevelEncryptionProfiles, + Name: opGetInvalidation, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/field-level-encryption-profile", + HTTPPath: "/2020-05-31/distribution/{DistributionId}/invalidation/{Id}", } if input == nil { - input = &ListFieldLevelEncryptionProfilesInput{} + input = &GetInvalidationInput{} } - output = &ListFieldLevelEncryptionProfilesOutput{} + output = &GetInvalidationOutput{} req = c.newRequest(op, input, output) return } -// ListFieldLevelEncryptionProfiles API operation for Amazon CloudFront. +// GetInvalidation API operation for Amazon CloudFront. // -// Request a list of field-level encryption profiles that have been created -// in CloudFront for this account. +// Get the information about an invalidation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListFieldLevelEncryptionProfiles for usage and error information. +// API operation GetInvalidation for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeNoSuchInvalidation "NoSuchInvalidation" +// The specified invalidation does not exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListFieldLevelEncryptionProfiles -func (c *CloudFront) ListFieldLevelEncryptionProfiles(input *ListFieldLevelEncryptionProfilesInput) (*ListFieldLevelEncryptionProfilesOutput, error) { - req, out := c.ListFieldLevelEncryptionProfilesRequest(input) +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. +// +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetInvalidation +func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidationOutput, error) { + req, out := c.GetInvalidationRequest(input) return out, req.Send() } -// ListFieldLevelEncryptionProfilesWithContext is the same as ListFieldLevelEncryptionProfiles with the addition of +// GetInvalidationWithContext is the same as GetInvalidation with the addition of // the ability to pass a context and additional request options. // -// See ListFieldLevelEncryptionProfiles for details on how to use this API operation. +// See GetInvalidation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListFieldLevelEncryptionProfilesWithContext(ctx aws.Context, input *ListFieldLevelEncryptionProfilesInput, opts ...request.Option) (*ListFieldLevelEncryptionProfilesOutput, error) { - req, out := c.ListFieldLevelEncryptionProfilesRequest(input) +func (c *CloudFront) GetInvalidationWithContext(ctx aws.Context, input *GetInvalidationInput, opts ...request.Option) (*GetInvalidationOutput, error) { + req, out := c.GetInvalidationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListInvalidations = "ListInvalidations2019_03_26" +const opGetMonitoringSubscription = "GetMonitoringSubscription2020_05_31" -// ListInvalidationsRequest generates a "aws/request.Request" representing the -// client's request for the ListInvalidations operation. The "output" return +// GetMonitoringSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the GetMonitoringSubscription operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListInvalidations for more information on using the ListInvalidations +// See GetMonitoringSubscription for more information on using the GetMonitoringSubscription // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListInvalidationsRequest method. -// req, resp := client.ListInvalidationsRequest(params) +// // Example sending a request using the GetMonitoringSubscriptionRequest method. +// req, resp := client.GetMonitoringSubscriptionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListInvalidations -func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetMonitoringSubscription +func (c *CloudFront) GetMonitoringSubscriptionRequest(input *GetMonitoringSubscriptionInput) (req *request.Request, output *GetMonitoringSubscriptionOutput) { op := &request.Operation{ - Name: opListInvalidations, + Name: opGetMonitoringSubscription, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/distribution/{DistributionId}/invalidation", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"InvalidationList.NextMarker"}, - LimitToken: "MaxItems", - TruncationToken: "InvalidationList.IsTruncated", - }, + HTTPPath: "/2020-05-31/distributions/{DistributionId}/monitoring-subscription", } if input == nil { - input = &ListInvalidationsInput{} + input = &GetMonitoringSubscriptionInput{} } - output = &ListInvalidationsOutput{} + output = &GetMonitoringSubscriptionOutput{} req = c.newRequest(op, input, output) return } -// ListInvalidations API operation for Amazon CloudFront. +// GetMonitoringSubscription API operation for Amazon CloudFront. // -// Lists invalidation batches. +// Gets information about whether additional CloudWatch metrics are enabled +// for the specified CloudFront distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListInvalidations for usage and error information. +// API operation GetMonitoringSubscription for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // // * ErrCodeNoSuchDistribution "NoSuchDistribution" // The specified distribution does not exist. // -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListInvalidations -func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInvalidationsOutput, error) { - req, out := c.ListInvalidationsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetMonitoringSubscription +func (c *CloudFront) GetMonitoringSubscription(input *GetMonitoringSubscriptionInput) (*GetMonitoringSubscriptionOutput, error) { + req, out := c.GetMonitoringSubscriptionRequest(input) return out, req.Send() } -// ListInvalidationsWithContext is the same as ListInvalidations with the addition of +// GetMonitoringSubscriptionWithContext is the same as GetMonitoringSubscription with the addition of // the ability to pass a context and additional request options. // -// See ListInvalidations for details on how to use this API operation. +// See GetMonitoringSubscription for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListInvalidationsWithContext(ctx aws.Context, input *ListInvalidationsInput, opts ...request.Option) (*ListInvalidationsOutput, error) { - req, out := c.ListInvalidationsRequest(input) +func (c *CloudFront) GetMonitoringSubscriptionWithContext(ctx aws.Context, input *GetMonitoringSubscriptionInput, opts ...request.Option) (*GetMonitoringSubscriptionOutput, error) { + req, out := c.GetMonitoringSubscriptionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListInvalidationsPages iterates over the pages of a ListInvalidations operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListInvalidations method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListInvalidations operation. -// pageNum := 0 -// err := client.ListInvalidationsPages(params, -// func(page *cloudfront.ListInvalidationsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn func(*ListInvalidationsOutput, bool) bool) error { - return c.ListInvalidationsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListInvalidationsPagesWithContext same as ListInvalidationsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *CloudFront) ListInvalidationsPagesWithContext(ctx aws.Context, input *ListInvalidationsInput, fn func(*ListInvalidationsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListInvalidationsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListInvalidationsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListInvalidationsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListPublicKeys = "ListPublicKeys2019_03_26" +const opGetOriginRequestPolicy = "GetOriginRequestPolicy2020_05_31" -// ListPublicKeysRequest generates a "aws/request.Request" representing the -// client's request for the ListPublicKeys operation. The "output" return +// GetOriginRequestPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetOriginRequestPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListPublicKeys for more information on using the ListPublicKeys +// See GetOriginRequestPolicy for more information on using the GetOriginRequestPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListPublicKeysRequest method. -// req, resp := client.ListPublicKeysRequest(params) +// // Example sending a request using the GetOriginRequestPolicyRequest method. +// req, resp := client.GetOriginRequestPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListPublicKeys -func (c *CloudFront) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetOriginRequestPolicy +func (c *CloudFront) GetOriginRequestPolicyRequest(input *GetOriginRequestPolicyInput) (req *request.Request, output *GetOriginRequestPolicyOutput) { op := &request.Operation{ - Name: opListPublicKeys, + Name: opGetOriginRequestPolicy, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/public-key", + HTTPPath: "/2020-05-31/origin-request-policy/{Id}", } if input == nil { - input = &ListPublicKeysInput{} + input = &GetOriginRequestPolicyInput{} } - output = &ListPublicKeysOutput{} + output = &GetOriginRequestPolicyOutput{} req = c.newRequest(op, input, output) return } -// ListPublicKeys API operation for Amazon CloudFront. +// GetOriginRequestPolicy API operation for Amazon CloudFront. // -// List all public keys that have been added to CloudFront for this account. +// Gets an origin request policy, including the following metadata: +// +// * The policy’s identifier. +// +// * The date and time when the policy was last modified. +// +// To get an origin request policy, you must provide the policy’s identifier. +// If the origin request policy is attached to a distribution’s cache behavior, +// you can get the policy’s identifier using ListDistributions or GetDistribution. +// If the origin request policy is not attached to a cache behavior, you can +// get the identifier using ListOriginRequestPolicies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListPublicKeys for usage and error information. +// API operation GetOriginRequestPolicy for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListPublicKeys -func (c *CloudFront) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeysOutput, error) { - req, out := c.ListPublicKeysRequest(input) +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetOriginRequestPolicy +func (c *CloudFront) GetOriginRequestPolicy(input *GetOriginRequestPolicyInput) (*GetOriginRequestPolicyOutput, error) { + req, out := c.GetOriginRequestPolicyRequest(input) return out, req.Send() } -// ListPublicKeysWithContext is the same as ListPublicKeys with the addition of +// GetOriginRequestPolicyWithContext is the same as GetOriginRequestPolicy with the addition of // the ability to pass a context and additional request options. // -// See ListPublicKeys for details on how to use this API operation. +// See GetOriginRequestPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListPublicKeysWithContext(ctx aws.Context, input *ListPublicKeysInput, opts ...request.Option) (*ListPublicKeysOutput, error) { - req, out := c.ListPublicKeysRequest(input) +func (c *CloudFront) GetOriginRequestPolicyWithContext(ctx aws.Context, input *GetOriginRequestPolicyInput, opts ...request.Option) (*GetOriginRequestPolicyOutput, error) { + req, out := c.GetOriginRequestPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListStreamingDistributions = "ListStreamingDistributions2019_03_26" +const opGetOriginRequestPolicyConfig = "GetOriginRequestPolicyConfig2020_05_31" -// ListStreamingDistributionsRequest generates a "aws/request.Request" representing the -// client's request for the ListStreamingDistributions operation. The "output" return +// GetOriginRequestPolicyConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetOriginRequestPolicyConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListStreamingDistributions for more information on using the ListStreamingDistributions +// See GetOriginRequestPolicyConfig for more information on using the GetOriginRequestPolicyConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListStreamingDistributionsRequest method. -// req, resp := client.ListStreamingDistributionsRequest(params) +// // Example sending a request using the GetOriginRequestPolicyConfigRequest method. +// req, resp := client.GetOriginRequestPolicyConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListStreamingDistributions -func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetOriginRequestPolicyConfig +func (c *CloudFront) GetOriginRequestPolicyConfigRequest(input *GetOriginRequestPolicyConfigInput) (req *request.Request, output *GetOriginRequestPolicyConfigOutput) { op := &request.Operation{ - Name: opListStreamingDistributions, + Name: opGetOriginRequestPolicyConfig, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/streaming-distribution", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"StreamingDistributionList.NextMarker"}, - LimitToken: "MaxItems", - TruncationToken: "StreamingDistributionList.IsTruncated", - }, + HTTPPath: "/2020-05-31/origin-request-policy/{Id}/config", } if input == nil { - input = &ListStreamingDistributionsInput{} + input = &GetOriginRequestPolicyConfigInput{} } - output = &ListStreamingDistributionsOutput{} + output = &GetOriginRequestPolicyConfigOutput{} req = c.newRequest(op, input, output) return } -// ListStreamingDistributions API operation for Amazon CloudFront. +// GetOriginRequestPolicyConfig API operation for Amazon CloudFront. // -// List streaming distributions. +// Gets an origin request policy configuration. +// +// To get an origin request policy configuration, you must provide the policy’s +// identifier. If the origin request policy is attached to a distribution’s +// cache behavior, you can get the policy’s identifier using ListDistributions +// or GetDistribution. If the origin request policy is not attached to a cache +// behavior, you can get the identifier using ListOriginRequestPolicies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListStreamingDistributions for usage and error information. +// API operation GetOriginRequestPolicyConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListStreamingDistributions -func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistributionsInput) (*ListStreamingDistributionsOutput, error) { - req, out := c.ListStreamingDistributionsRequest(input) +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetOriginRequestPolicyConfig +func (c *CloudFront) GetOriginRequestPolicyConfig(input *GetOriginRequestPolicyConfigInput) (*GetOriginRequestPolicyConfigOutput, error) { + req, out := c.GetOriginRequestPolicyConfigRequest(input) return out, req.Send() } -// ListStreamingDistributionsWithContext is the same as ListStreamingDistributions with the addition of +// GetOriginRequestPolicyConfigWithContext is the same as GetOriginRequestPolicyConfig with the addition of // the ability to pass a context and additional request options. // -// See ListStreamingDistributions for details on how to use this API operation. +// See GetOriginRequestPolicyConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListStreamingDistributionsWithContext(ctx aws.Context, input *ListStreamingDistributionsInput, opts ...request.Option) (*ListStreamingDistributionsOutput, error) { - req, out := c.ListStreamingDistributionsRequest(input) +func (c *CloudFront) GetOriginRequestPolicyConfigWithContext(ctx aws.Context, input *GetOriginRequestPolicyConfigInput, opts ...request.Option) (*GetOriginRequestPolicyConfigOutput, error) { + req, out := c.GetOriginRequestPolicyConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListStreamingDistributionsPages iterates over the pages of a ListStreamingDistributions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListStreamingDistributions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListStreamingDistributions operation. -// pageNum := 0 -// err := client.ListStreamingDistributionsPages(params, -// func(page *cloudfront.ListStreamingDistributionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistributionsInput, fn func(*ListStreamingDistributionsOutput, bool) bool) error { - return c.ListStreamingDistributionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListStreamingDistributionsPagesWithContext same as ListStreamingDistributionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *CloudFront) ListStreamingDistributionsPagesWithContext(ctx aws.Context, input *ListStreamingDistributionsInput, fn func(*ListStreamingDistributionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListStreamingDistributionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListStreamingDistributionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListStreamingDistributionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTagsForResource = "ListTagsForResource2019_03_26" +const opGetPublicKey = "GetPublicKey2020_05_31" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// GetPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicKey operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See GetPublicKey for more information on using the GetPublicKey // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the GetPublicKeyRequest method. +// req, resp := client.GetPublicKeyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListTagsForResource -func (c *CloudFront) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetPublicKey +func (c *CloudFront) GetPublicKeyRequest(input *GetPublicKeyInput) (req *request.Request, output *GetPublicKeyOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opGetPublicKey, HTTPMethod: "GET", - HTTPPath: "/2019-03-26/tagging", + HTTPPath: "/2020-05-31/public-key/{Id}", } if input == nil { - input = &ListTagsForResourceInput{} + input = &GetPublicKeyInput{} } - output = &ListTagsForResourceOutput{} + output = &GetPublicKeyOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon CloudFront. +// GetPublicKey API operation for Amazon CloudFront. // -// List tags for a CloudFront resource. +// Get the public key information. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation ListTagsForResource for usage and error information. +// API operation GetPublicKey for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidTagging "InvalidTagging" -// The tagging specified is not valid. -// -// * ErrCodeNoSuchResource "NoSuchResource" -// A resource that was specified is not valid. +// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" +// The specified public key doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/ListTagsForResource -func (c *CloudFront) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetPublicKey +func (c *CloudFront) GetPublicKey(input *GetPublicKeyInput) (*GetPublicKeyOutput, error) { + req, out := c.GetPublicKeyRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// GetPublicKeyWithContext is the same as GetPublicKey with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See GetPublicKey for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *CloudFront) GetPublicKeyWithContext(ctx aws.Context, input *GetPublicKeyInput, opts ...request.Option) (*GetPublicKeyOutput, error) { + req, out := c.GetPublicKeyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource2019_03_26" +const opGetPublicKeyConfig = "GetPublicKeyConfig2020_05_31" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// GetPublicKeyConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicKeyConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See GetPublicKeyConfig for more information on using the GetPublicKeyConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the GetPublicKeyConfigRequest method. +// req, resp := client.GetPublicKeyConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/TagResource -func (c *CloudFront) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetPublicKeyConfig +func (c *CloudFront) GetPublicKeyConfigRequest(input *GetPublicKeyConfigInput) (req *request.Request, output *GetPublicKeyConfigOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/2019-03-26/tagging?Operation=Tag", + Name: opGetPublicKeyConfig, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/public-key/{Id}/config", } if input == nil { - input = &TagResourceInput{} + input = &GetPublicKeyConfigInput{} } - output = &TagResourceOutput{} + output = &GetPublicKeyConfigOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Amazon CloudFront. +// GetPublicKeyConfig API operation for Amazon CloudFront. // -// Add tags to a CloudFront resource. +// Return public key configuration informaation // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation TagResource for usage and error information. +// API operation GetPublicKeyConfig for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidTagging "InvalidTagging" -// The tagging specified is not valid. -// -// * ErrCodeNoSuchResource "NoSuchResource" -// A resource that was specified is not valid. +// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" +// The specified public key doesn't exist. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/TagResource -func (c *CloudFront) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetPublicKeyConfig +func (c *CloudFront) GetPublicKeyConfig(input *GetPublicKeyConfigInput) (*GetPublicKeyConfigOutput, error) { + req, out := c.GetPublicKeyConfigRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// GetPublicKeyConfigWithContext is the same as GetPublicKeyConfig with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See GetPublicKeyConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *CloudFront) GetPublicKeyConfigWithContext(ctx aws.Context, input *GetPublicKeyConfigInput, opts ...request.Option) (*GetPublicKeyConfigOutput, error) { + req, out := c.GetPublicKeyConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource2019_03_26" +const opGetRealtimeLogConfig = "GetRealtimeLogConfig2020_05_31" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// GetRealtimeLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetRealtimeLogConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See GetRealtimeLogConfig for more information on using the GetRealtimeLogConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the GetRealtimeLogConfigRequest method. +// req, resp := client.GetRealtimeLogConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UntagResource -func (c *CloudFront) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetRealtimeLogConfig +func (c *CloudFront) GetRealtimeLogConfigRequest(input *GetRealtimeLogConfigInput) (req *request.Request, output *GetRealtimeLogConfigOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opGetRealtimeLogConfig, HTTPMethod: "POST", - HTTPPath: "/2019-03-26/tagging?Operation=Untag", + HTTPPath: "/2020-05-31/get-realtime-log-config/", } if input == nil { - input = &UntagResourceInput{} + input = &GetRealtimeLogConfigInput{} } - output = &UntagResourceOutput{} + output = &GetRealtimeLogConfigOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Amazon CloudFront. +// GetRealtimeLogConfig API operation for Amazon CloudFront. // -// Remove tags from a CloudFront resource. +// Gets a real-time log configuration. +// +// To get a real-time log configuration, you can provide the configuration’s +// name or its Amazon Resource Name (ARN). You must provide at least one. If +// you provide both, CloudFront uses the name to identify the real-time log +// configuration to get. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UntagResource for usage and error information. +// API operation GetRealtimeLogConfig for usage and error information. // // Returned Error Codes: -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. +// * ErrCodeNoSuchRealtimeLogConfig "NoSuchRealtimeLogConfig" +// The real-time log configuration does not exist. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidTagging "InvalidTagging" -// The tagging specified is not valid. -// -// * ErrCodeNoSuchResource "NoSuchResource" -// A resource that was specified is not valid. +// An argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UntagResource -func (c *CloudFront) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetRealtimeLogConfig +func (c *CloudFront) GetRealtimeLogConfig(input *GetRealtimeLogConfigInput) (*GetRealtimeLogConfigOutput, error) { + req, out := c.GetRealtimeLogConfigRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// GetRealtimeLogConfigWithContext is the same as GetRealtimeLogConfig with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See GetRealtimeLogConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *CloudFront) GetRealtimeLogConfigWithContext(ctx aws.Context, input *GetRealtimeLogConfigInput, opts ...request.Option) (*GetRealtimeLogConfigOutput, error) { + req, out := c.GetRealtimeLogConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2019_03_26" +const opGetStreamingDistribution = "GetStreamingDistribution2020_05_31" -// UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the -// client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return +// GetStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistribution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateCloudFrontOriginAccessIdentity for more information on using the UpdateCloudFrontOriginAccessIdentity +// See GetStreamingDistribution for more information on using the GetStreamingDistribution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateCloudFrontOriginAccessIdentityRequest method. -// req, resp := client.UpdateCloudFrontOriginAccessIdentityRequest(params) +// // Example sending a request using the GetStreamingDistributionRequest method. +// req, resp := client.GetStreamingDistributionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateCloudFrontOriginAccessIdentity -func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetStreamingDistribution +func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { op := &request.Operation{ - Name: opUpdateCloudFrontOriginAccessIdentity, - HTTPMethod: "PUT", - HTTPPath: "/2019-03-26/origin-access-identity/cloudfront/{Id}/config", + Name: opGetStreamingDistribution, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/streaming-distribution/{Id}", } if input == nil { - input = &UpdateCloudFrontOriginAccessIdentityInput{} + input = &GetStreamingDistributionInput{} } - output = &UpdateCloudFrontOriginAccessIdentityOutput{} + output = &GetStreamingDistributionOutput{} req = c.newRequest(op, input, output) return } -// UpdateCloudFrontOriginAccessIdentity API operation for Amazon CloudFront. +// GetStreamingDistribution API operation for Amazon CloudFront. // -// Update an origin access identity. +// Gets information about a specified RTMP distribution, including the distribution +// configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UpdateCloudFrontOriginAccessIdentity for usage and error information. +// API operation GetStreamingDistribution for usage and error information. // // Returned Error Codes: +// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" +// The specified streaming distribution does not exist. +// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeIllegalUpdate "IllegalUpdate" -// Origin and CallerReference cannot be updated. -// -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. -// -// * ErrCodeMissingBody "MissingBody" -// This operation requires a body. Ensure that the body is present and the Content-Type -// header is set. -// -// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" -// The specified origin access identity does not exist. -// -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. -// -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items don't match. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateCloudFrontOriginAccessIdentity -func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFrontOriginAccessIdentityInput) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { - req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetStreamingDistribution +func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInput) (*GetStreamingDistributionOutput, error) { + req, out := c.GetStreamingDistributionRequest(input) return out, req.Send() } -// UpdateCloudFrontOriginAccessIdentityWithContext is the same as UpdateCloudFrontOriginAccessIdentity with the addition of +// GetStreamingDistributionWithContext is the same as GetStreamingDistribution with the addition of // the ability to pass a context and additional request options. // -// See UpdateCloudFrontOriginAccessIdentity for details on how to use this API operation. +// See GetStreamingDistribution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityWithContext(ctx aws.Context, input *UpdateCloudFrontOriginAccessIdentityInput, opts ...request.Option) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { - req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) +func (c *CloudFront) GetStreamingDistributionWithContext(ctx aws.Context, input *GetStreamingDistributionInput, opts ...request.Option) (*GetStreamingDistributionOutput, error) { + req, out := c.GetStreamingDistributionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDistribution = "UpdateDistribution2019_03_26" +const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2020_05_31" -// UpdateDistributionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDistribution operation. The "output" return +// GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistributionConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDistribution for more information on using the UpdateDistribution +// See GetStreamingDistributionConfig for more information on using the GetStreamingDistributionConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDistributionRequest method. -// req, resp := client.UpdateDistributionRequest(params) +// // Example sending a request using the GetStreamingDistributionConfigRequest method. +// req, resp := client.GetStreamingDistributionConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateDistribution -func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetStreamingDistributionConfig +func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { op := &request.Operation{ - Name: opUpdateDistribution, - HTTPMethod: "PUT", - HTTPPath: "/2019-03-26/distribution/{Id}/config", + Name: opGetStreamingDistributionConfig, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/streaming-distribution/{Id}/config", } if input == nil { - input = &UpdateDistributionInput{} + input = &GetStreamingDistributionConfigInput{} } - output = &UpdateDistributionOutput{} + output = &GetStreamingDistributionConfigOutput{} req = c.newRequest(op, input, output) return } -// UpdateDistribution API operation for Amazon CloudFront. +// GetStreamingDistributionConfig API operation for Amazon CloudFront. // -// Updates the configuration for a web distribution. +// Get the configuration information about a streaming distribution. // -// When you update a distribution, there are more required fields than when -// you create a distribution. When you update your distribution by using this -// API action, follow the steps here to get the current configuration and then -// make your updates, to make sure that you include all of the required fields. -// To view a summary, see Required Fields for Create Distribution and Update -// Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html) -// in the Amazon CloudFront Developer Guide. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// The update process includes getting the current distribution configuration, -// updating the XML document that is returned to make your changes, and then -// submitting an UpdateDistribution request to make the updates. +// See the AWS API reference guide for Amazon CloudFront's +// API operation GetStreamingDistributionConfig for usage and error information. // -// For information about updating a distribution using the CloudFront console -// instead, see Creating a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-creating-console.html) -// in the Amazon CloudFront Developer Guide. +// Returned Error Codes: +// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" +// The specified streaming distribution does not exist. // -// To update a web distribution using the CloudFront API +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// Submit a GetDistributionConfig (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistributionConfig.html) -// request to get the current configuration and an Etag header for the distribution. +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/GetStreamingDistributionConfig +func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistributionConfigInput) (*GetStreamingDistributionConfigOutput, error) { + req, out := c.GetStreamingDistributionConfigRequest(input) + return out, req.Send() +} + +// GetStreamingDistributionConfigWithContext is the same as GetStreamingDistributionConfig with the addition of +// the ability to pass a context and additional request options. // -// If you update the distribution again, you must get a new Etag header. +// See GetStreamingDistributionConfig for details on how to use this API operation. // -// Update the XML document that was returned in the response to your GetDistributionConfig -// request to include your changes. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) GetStreamingDistributionConfigWithContext(ctx aws.Context, input *GetStreamingDistributionConfigInput, opts ...request.Option) (*GetStreamingDistributionConfigOutput, error) { + req, out := c.GetStreamingDistributionConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListCachePolicies = "ListCachePolicies2020_05_31" + +// ListCachePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListCachePolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// When you edit the XML file, be aware of the following: +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * You must strip out the ETag parameter that is returned. +// See ListCachePolicies for more information on using the ListCachePolicies +// API call, and error handling. // -// * Additional fields are required when you update a distribution. There -// may be fields included in the XML file for features that you haven't configured -// for your distribution. This is expected and required to successfully update -// the distribution. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * You can't change the value of CallerReference. If you try to change -// this value, CloudFront returns an IllegalUpdate error. // -// * The new configuration replaces the existing configuration; the values -// that you specify in an UpdateDistribution request are not merged into -// your existing configuration. When you add, delete, or replace values in -// an element that allows multiple values (for example, CNAME), you must -// specify all of the values that you want to appear in the updated distribution. -// In addition, you must update the corresponding Quantity element. +// // Example sending a request using the ListCachePoliciesRequest method. +// req, resp := client.ListCachePoliciesRequest(params) // -// Submit an UpdateDistribution request to update the configuration for your -// distribution: +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * In the request body, include the XML document that you updated in Step -// 2. The request body must include an XML document with a DistributionConfig -// element. +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListCachePolicies +func (c *CloudFront) ListCachePoliciesRequest(input *ListCachePoliciesInput) (req *request.Request, output *ListCachePoliciesOutput) { + op := &request.Operation{ + Name: opListCachePolicies, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/cache-policy", + } + + if input == nil { + input = &ListCachePoliciesInput{} + } + + output = &ListCachePoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListCachePolicies API operation for Amazon CloudFront. // -// * Set the value of the HTTP If-Match header to the value of the ETag header -// that CloudFront returned when you submitted the GetDistributionConfig -// request in Step 1. +// Gets a list of cache policies. // -// Review the response to the UpdateDistribution request to confirm that the -// configuration was successfully updated. +// You can optionally apply a filter to return only the managed policies created +// by AWS, or only the custom policies created in your AWS account. // -// Optional: Submit a GetDistribution (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistribution.html) -// request to confirm that your changes have propagated. When propagation is -// complete, the value of Status is Deployed. +// You can optionally specify the maximum number of items to receive in the +// response. If the total number of items in the list exceeds the maximum that +// you specify, or the default maximum, the response is paginated. To get the +// next page of items, send a subsequent request that specifies the NextMarker +// value from the current response as the Marker value in the subsequent request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UpdateDistribution for usage and error information. +// API operation ListCachePolicies for usage and error information. // // Returned Error Codes: // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" -// The CNAME specified is already defined for CloudFront. -// -// * ErrCodeIllegalUpdate "IllegalUpdate" -// Origin and CallerReference cannot be updated. -// -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. -// -// * ErrCodeMissingBody "MissingBody" -// This operation requires a body. Ensure that the body is present and the Content-Type -// header is set. -// -// * ErrCodeNoSuchDistribution "NoSuchDistribution" -// The specified distribution does not exist. -// -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. -// -// * ErrCodeTooManyDistributionCNAMEs "TooManyDistributionCNAMEs" -// Your request contains more CNAMEs than are allowed per distribution. -// -// * ErrCodeInvalidDefaultRootObject "InvalidDefaultRootObject" -// The default root object file name is too big or contains an invalid character. -// -// * ErrCodeInvalidRelativePath "InvalidRelativePath" -// The relative path is too big, is not URL-encoded, or does not begin with -// a slash (/). -// -// * ErrCodeInvalidErrorCode "InvalidErrorCode" -// An invalid error code was specified. -// -// * ErrCodeInvalidResponseCode "InvalidResponseCode" -// A response code is not valid. +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" -// The origin access identity is not valid or doesn't exist. -// -// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" -// Your request contains more trusted signers than are allowed per distribution. -// -// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers don't exist. -// -// * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" -// A viewer certificate specified is not valid. -// -// * ErrCodeInvalidMinimumProtocolVersion "InvalidMinimumProtocolVersion" -// The minimum protocol version specified is not valid. -// -// * ErrCodeInvalidRequiredProtocol "InvalidRequiredProtocol" -// This operation requires the HTTPS protocol. Ensure that you specify the HTTPS -// protocol in your request, or omit the RequiredProtocols element from your -// distribution configuration. -// -// * ErrCodeNoSuchOrigin "NoSuchOrigin" -// No origin exists with the specified Origin Id. -// -// * ErrCodeTooManyOrigins "TooManyOrigins" -// You cannot create more origins for the distribution. -// -// * ErrCodeTooManyOriginGroupsPerDistribution "TooManyOriginGroupsPerDistribution" -// Processing your request would cause you to exceed the maximum number of origin -// groups allowed. -// -// * ErrCodeTooManyCacheBehaviors "TooManyCacheBehaviors" -// You cannot create more cache behaviors for the distribution. -// -// * ErrCodeTooManyCookieNamesInWhiteList "TooManyCookieNamesInWhiteList" -// Your request contains more cookie names in the whitelist than are allowed -// per cache behavior. -// -// * ErrCodeInvalidForwardCookies "InvalidForwardCookies" -// Your request contains forward cookies option which doesn't match with the -// expectation for the whitelisted list of cookie names. Either list of cookie -// names has been specified when not allowed or list of cookie names is missing -// when expected. -// -// * ErrCodeTooManyHeadersInForwardedValues "TooManyHeadersInForwardedValues" -// Your request contains too many headers in forwarded values. -// -// * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" -// The headers specified are not valid for an Amazon S3 origin. -// -// * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items don't match. -// -// * ErrCodeTooManyCertificates "TooManyCertificates" -// You cannot create anymore custom SSL/TLS certificates. -// -// * ErrCodeInvalidLocationCode "InvalidLocationCode" -// The location code specified is not valid. -// -// * ErrCodeInvalidGeoRestrictionParameter "InvalidGeoRestrictionParameter" -// The specified geo restriction parameter is not valid. -// -// * ErrCodeInvalidTTLOrder "InvalidTTLOrder" -// The TTL order specified is not valid. -// -// * ErrCodeInvalidWebACLId "InvalidWebACLId" -// A web ACL ID specified is not valid. To specify a web ACL created using the -// latest version of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. -// To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example -// 473e64fd-f30b-4765-81a0-62ad96dd167a. -// -// * ErrCodeTooManyOriginCustomHeaders "TooManyOriginCustomHeaders" -// Your request contains too many origin custom headers. -// -// * ErrCodeTooManyQueryStringParameters "TooManyQueryStringParameters" -// Your request contains too many query string parameters. -// -// * ErrCodeInvalidQueryStringParameters "InvalidQueryStringParameters" -// The query string parameters specified are not valid. -// -// * ErrCodeTooManyDistributionsWithLambdaAssociations "TooManyDistributionsWithLambdaAssociations" -// Processing your request would cause the maximum number of distributions with -// Lambda function associations per owner to be exceeded. -// -// * ErrCodeTooManyLambdaFunctionAssociations "TooManyLambdaFunctionAssociations" -// Your request contains more Lambda function associations than are allowed -// per distribution. -// -// * ErrCodeInvalidLambdaFunctionAssociation "InvalidLambdaFunctionAssociation" -// The specified Lambda function association is invalid. -// -// * ErrCodeInvalidOriginReadTimeout "InvalidOriginReadTimeout" -// The read timeout specified for the origin is not valid. -// -// * ErrCodeInvalidOriginKeepaliveTimeout "InvalidOriginKeepaliveTimeout" -// The keep alive timeout specified for the origin is not valid. -// -// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" -// The specified configuration for field-level encryption doesn't exist. -// -// * ErrCodeIllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior "IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior" -// The specified configuration for field-level encryption can't be associated -// with the specified cache behavior. -// -// * ErrCodeTooManyDistributionsAssociatedToFieldLevelEncryptionConfig "TooManyDistributionsAssociatedToFieldLevelEncryptionConfig" -// The maximum number of distributions have been associated with the specified -// configuration for field-level encryption. +// An argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateDistribution -func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { - req, out := c.UpdateDistributionRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListCachePolicies +func (c *CloudFront) ListCachePolicies(input *ListCachePoliciesInput) (*ListCachePoliciesOutput, error) { + req, out := c.ListCachePoliciesRequest(input) return out, req.Send() } -// UpdateDistributionWithContext is the same as UpdateDistribution with the addition of +// ListCachePoliciesWithContext is the same as ListCachePolicies with the addition of // the ability to pass a context and additional request options. // -// See UpdateDistribution for details on how to use this API operation. +// See ListCachePolicies for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UpdateDistributionWithContext(ctx aws.Context, input *UpdateDistributionInput, opts ...request.Option) (*UpdateDistributionOutput, error) { - req, out := c.UpdateDistributionRequest(input) +func (c *CloudFront) ListCachePoliciesWithContext(ctx aws.Context, input *ListCachePoliciesInput, opts ...request.Option) (*ListCachePoliciesOutput, error) { + req, out := c.ListCachePoliciesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateFieldLevelEncryptionConfig = "UpdateFieldLevelEncryptionConfig2019_03_26" +const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2020_05_31" -// UpdateFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFieldLevelEncryptionConfig operation. The "output" return +// ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateFieldLevelEncryptionConfig for more information on using the UpdateFieldLevelEncryptionConfig +// See ListCloudFrontOriginAccessIdentities for more information on using the ListCloudFrontOriginAccessIdentities // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateFieldLevelEncryptionConfigRequest method. -// req, resp := client.UpdateFieldLevelEncryptionConfigRequest(params) +// // Example sending a request using the ListCloudFrontOriginAccessIdentitiesRequest method. +// req, resp := client.ListCloudFrontOriginAccessIdentitiesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionConfig -func (c *CloudFront) UpdateFieldLevelEncryptionConfigRequest(input *UpdateFieldLevelEncryptionConfigInput) (req *request.Request, output *UpdateFieldLevelEncryptionConfigOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListCloudFrontOriginAccessIdentities +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { op := &request.Operation{ - Name: opUpdateFieldLevelEncryptionConfig, - HTTPMethod: "PUT", - HTTPPath: "/2019-03-26/field-level-encryption/{Id}/config", + Name: opListCloudFrontOriginAccessIdentities, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/origin-access-identity/cloudfront", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"CloudFrontOriginAccessIdentityList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "CloudFrontOriginAccessIdentityList.IsTruncated", + }, } if input == nil { - input = &UpdateFieldLevelEncryptionConfigInput{} + input = &ListCloudFrontOriginAccessIdentitiesInput{} } - output = &UpdateFieldLevelEncryptionConfigOutput{} + output = &ListCloudFrontOriginAccessIdentitiesOutput{} req = c.newRequest(op, input, output) return } -// UpdateFieldLevelEncryptionConfig API operation for Amazon CloudFront. +// ListCloudFrontOriginAccessIdentities API operation for Amazon CloudFront. // -// Update a field-level encryption configuration. +// Lists origin access identities. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UpdateFieldLevelEncryptionConfig for usage and error information. +// API operation ListCloudFrontOriginAccessIdentities for usage and error information. // // Returned Error Codes: -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. -// -// * ErrCodeIllegalUpdate "IllegalUpdate" -// Origin and CallerReference cannot be updated. -// -// * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items don't match. -// // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. -// -// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" -// The specified profile for field-level encryption doesn't exist. -// -// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" -// The specified configuration for field-level encryption doesn't exist. -// -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. -// -// * ErrCodeTooManyFieldLevelEncryptionQueryArgProfiles "TooManyFieldLevelEncryptionQueryArgProfiles" -// The maximum number of query arg profiles for field-level encryption have -// been created. -// -// * ErrCodeTooManyFieldLevelEncryptionContentTypeProfiles "TooManyFieldLevelEncryptionContentTypeProfiles" -// The maximum number of content type profiles for field-level encryption have -// been created. -// -// * ErrCodeQueryArgProfileEmpty "QueryArgProfileEmpty" -// No profile specified for the field-level encryption query argument. +// An argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionConfig -func (c *CloudFront) UpdateFieldLevelEncryptionConfig(input *UpdateFieldLevelEncryptionConfigInput) (*UpdateFieldLevelEncryptionConfigOutput, error) { - req, out := c.UpdateFieldLevelEncryptionConfigRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListCloudFrontOriginAccessIdentities +func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontOriginAccessIdentitiesInput) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { + req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) return out, req.Send() } -// UpdateFieldLevelEncryptionConfigWithContext is the same as UpdateFieldLevelEncryptionConfig with the addition of +// ListCloudFrontOriginAccessIdentitiesWithContext is the same as ListCloudFrontOriginAccessIdentities with the addition of // the ability to pass a context and additional request options. // -// See UpdateFieldLevelEncryptionConfig for details on how to use this API operation. +// See ListCloudFrontOriginAccessIdentities for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UpdateFieldLevelEncryptionConfigWithContext(ctx aws.Context, input *UpdateFieldLevelEncryptionConfigInput, opts ...request.Option) (*UpdateFieldLevelEncryptionConfigOutput, error) { - req, out := c.UpdateFieldLevelEncryptionConfigRequest(input) +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesWithContext(ctx aws.Context, input *ListCloudFrontOriginAccessIdentitiesInput, opts ...request.Option) (*ListCloudFrontOriginAccessIdentitiesOutput, error) { + req, out := c.ListCloudFrontOriginAccessIdentitiesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateFieldLevelEncryptionProfile = "UpdateFieldLevelEncryptionProfile2019_03_26" - -// UpdateFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFieldLevelEncryptionProfile operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateFieldLevelEncryptionProfile for more information on using the UpdateFieldLevelEncryptionProfile -// API call, and error handling. +// ListCloudFrontOriginAccessIdentitiesPages iterates over the pages of a ListCloudFrontOriginAccessIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// See ListCloudFrontOriginAccessIdentities method for more information on how to use this operation. // +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the UpdateFieldLevelEncryptionProfileRequest method. -// req, resp := client.UpdateFieldLevelEncryptionProfileRequest(params) +// // Example iterating over at most 3 pages of a ListCloudFrontOriginAccessIdentities operation. +// pageNum := 0 +// err := client.ListCloudFrontOriginAccessIdentitiesPages(params, +// func(page *cloudfront.ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudFrontOriginAccessIdentitiesInput, fn func(*ListCloudFrontOriginAccessIdentitiesOutput, bool) bool) error { + return c.ListCloudFrontOriginAccessIdentitiesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCloudFrontOriginAccessIdentitiesPagesWithContext same as ListCloudFrontOriginAccessIdentitiesPages except +// it takes a Context and allows setting request options on the pages. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionProfile -func (c *CloudFront) UpdateFieldLevelEncryptionProfileRequest(input *UpdateFieldLevelEncryptionProfileInput) (req *request.Request, output *UpdateFieldLevelEncryptionProfileOutput) { +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPagesWithContext(ctx aws.Context, input *ListCloudFrontOriginAccessIdentitiesInput, fn func(*ListCloudFrontOriginAccessIdentitiesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCloudFrontOriginAccessIdentitiesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCloudFrontOriginAccessIdentitiesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCloudFrontOriginAccessIdentitiesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDistributions = "ListDistributions2020_05_31" + +// ListDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDistributions for more information on using the ListDistributions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDistributionsRequest method. +// req, resp := client.ListDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributions +func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { op := &request.Operation{ - Name: opUpdateFieldLevelEncryptionProfile, - HTTPMethod: "PUT", - HTTPPath: "/2019-03-26/field-level-encryption-profile/{Id}/config", + Name: opListDistributions, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/distribution", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"DistributionList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "DistributionList.IsTruncated", + }, } if input == nil { - input = &UpdateFieldLevelEncryptionProfileInput{} + input = &ListDistributionsInput{} } - output = &UpdateFieldLevelEncryptionProfileOutput{} + output = &ListDistributionsOutput{} req = c.newRequest(op, input, output) return } -// UpdateFieldLevelEncryptionProfile API operation for Amazon CloudFront. +// ListDistributions API operation for Amazon CloudFront. // -// Update a field-level encryption profile. +// List CloudFront distributions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UpdateFieldLevelEncryptionProfile for usage and error information. +// API operation ListDistributions for usage and error information. // // Returned Error Codes: -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. -// -// * ErrCodeFieldLevelEncryptionProfileAlreadyExists "FieldLevelEncryptionProfileAlreadyExists" -// The specified profile for field-level encryption already exists. -// -// * ErrCodeIllegalUpdate "IllegalUpdate" -// Origin and CallerReference cannot be updated. -// -// * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items don't match. -// // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. -// -// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" -// The specified public key doesn't exist. -// -// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" -// The specified profile for field-level encryption doesn't exist. -// -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. -// -// * ErrCodeFieldLevelEncryptionProfileSizeExceeded "FieldLevelEncryptionProfileSizeExceeded" -// The maximum size of a profile for field-level encryption was exceeded. -// -// * ErrCodeTooManyFieldLevelEncryptionEncryptionEntities "TooManyFieldLevelEncryptionEncryptionEntities" -// The maximum number of encryption entities for field-level encryption have -// been created. -// -// * ErrCodeTooManyFieldLevelEncryptionFieldPatterns "TooManyFieldLevelEncryptionFieldPatterns" -// The maximum number of field patterns for field-level encryption have been -// created. +// An argument is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateFieldLevelEncryptionProfile -func (c *CloudFront) UpdateFieldLevelEncryptionProfile(input *UpdateFieldLevelEncryptionProfileInput) (*UpdateFieldLevelEncryptionProfileOutput, error) { - req, out := c.UpdateFieldLevelEncryptionProfileRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributions +func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDistributionsOutput, error) { + req, out := c.ListDistributionsRequest(input) return out, req.Send() } -// UpdateFieldLevelEncryptionProfileWithContext is the same as UpdateFieldLevelEncryptionProfile with the addition of +// ListDistributionsWithContext is the same as ListDistributions with the addition of // the ability to pass a context and additional request options. // -// See UpdateFieldLevelEncryptionProfile for details on how to use this API operation. +// See ListDistributions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UpdateFieldLevelEncryptionProfileWithContext(ctx aws.Context, input *UpdateFieldLevelEncryptionProfileInput, opts ...request.Option) (*UpdateFieldLevelEncryptionProfileOutput, error) { - req, out := c.UpdateFieldLevelEncryptionProfileRequest(input) +func (c *CloudFront) ListDistributionsWithContext(ctx aws.Context, input *ListDistributionsInput, opts ...request.Option) (*ListDistributionsOutput, error) { + req, out := c.ListDistributionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdatePublicKey = "UpdatePublicKey2019_03_26" +// ListDistributionsPages iterates over the pages of a ListDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDistributions operation. +// pageNum := 0 +// err := client.ListDistributionsPages(params, +// func(page *cloudfront.ListDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn func(*ListDistributionsOutput, bool) bool) error { + return c.ListDistributionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDistributionsPagesWithContext same as ListDistributionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListDistributionsPagesWithContext(ctx aws.Context, input *ListDistributionsInput, fn func(*ListDistributionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDistributionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDistributionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDistributionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDistributionsByCachePolicyId = "ListDistributionsByCachePolicyId2020_05_31" -// UpdatePublicKeyRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePublicKey operation. The "output" return +// ListDistributionsByCachePolicyIdRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByCachePolicyId operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdatePublicKey for more information on using the UpdatePublicKey +// See ListDistributionsByCachePolicyId for more information on using the ListDistributionsByCachePolicyId // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdatePublicKeyRequest method. -// req, resp := client.UpdatePublicKeyRequest(params) +// // Example sending a request using the ListDistributionsByCachePolicyIdRequest method. +// req, resp := client.ListDistributionsByCachePolicyIdRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdatePublicKey -func (c *CloudFront) UpdatePublicKeyRequest(input *UpdatePublicKeyInput) (req *request.Request, output *UpdatePublicKeyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByCachePolicyId +func (c *CloudFront) ListDistributionsByCachePolicyIdRequest(input *ListDistributionsByCachePolicyIdInput) (req *request.Request, output *ListDistributionsByCachePolicyIdOutput) { op := &request.Operation{ - Name: opUpdatePublicKey, - HTTPMethod: "PUT", - HTTPPath: "/2019-03-26/public-key/{Id}/config", + Name: opListDistributionsByCachePolicyId, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/distributionsByCachePolicyId/{CachePolicyId}", } if input == nil { - input = &UpdatePublicKeyInput{} + input = &ListDistributionsByCachePolicyIdInput{} } - output = &UpdatePublicKeyOutput{} + output = &ListDistributionsByCachePolicyIdOutput{} req = c.newRequest(op, input, output) return } -// UpdatePublicKey API operation for Amazon CloudFront. +// ListDistributionsByCachePolicyId API operation for Amazon CloudFront. // -// Update public key information. Note that the only value you can change is -// the comment. +// Gets a list of distribution IDs for distributions that have a cache behavior +// that’s associated with the specified cache policy. +// +// You can optionally specify the maximum number of items to receive in the +// response. If the total number of items in the list exceeds the maximum that +// you specify, or the default maximum, the response is paginated. To get the +// next page of items, send a subsequent request that specifies the NextMarker +// value from the current response as the Marker value in the subsequent request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UpdatePublicKey for usage and error information. +// API operation ListDistributionsByCachePolicyId for usage and error information. // // Returned Error Codes: -// * ErrCodeAccessDenied "AccessDenied" -// Access denied. -// -// * ErrCodeCannotChangeImmutablePublicKeyFields "CannotChangeImmutablePublicKeyFields" -// You can't change the value of a public key. +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. // // * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. -// -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. -// -// * ErrCodeIllegalUpdate "IllegalUpdate" -// Origin and CallerReference cannot be updated. -// -// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" -// The specified public key doesn't exist. +// An argument is invalid. // -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdatePublicKey -func (c *CloudFront) UpdatePublicKey(input *UpdatePublicKeyInput) (*UpdatePublicKeyOutput, error) { - req, out := c.UpdatePublicKeyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByCachePolicyId +func (c *CloudFront) ListDistributionsByCachePolicyId(input *ListDistributionsByCachePolicyIdInput) (*ListDistributionsByCachePolicyIdOutput, error) { + req, out := c.ListDistributionsByCachePolicyIdRequest(input) return out, req.Send() } -// UpdatePublicKeyWithContext is the same as UpdatePublicKey with the addition of +// ListDistributionsByCachePolicyIdWithContext is the same as ListDistributionsByCachePolicyId with the addition of // the ability to pass a context and additional request options. // -// See UpdatePublicKey for details on how to use this API operation. +// See ListDistributionsByCachePolicyId for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UpdatePublicKeyWithContext(ctx aws.Context, input *UpdatePublicKeyInput, opts ...request.Option) (*UpdatePublicKeyOutput, error) { - req, out := c.UpdatePublicKeyRequest(input) +func (c *CloudFront) ListDistributionsByCachePolicyIdWithContext(ctx aws.Context, input *ListDistributionsByCachePolicyIdInput, opts ...request.Option) (*ListDistributionsByCachePolicyIdOutput, error) { + req, out := c.ListDistributionsByCachePolicyIdRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateStreamingDistribution = "UpdateStreamingDistribution2019_03_26" +const opListDistributionsByOriginRequestPolicyId = "ListDistributionsByOriginRequestPolicyId2020_05_31" -// UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateStreamingDistribution operation. The "output" return +// ListDistributionsByOriginRequestPolicyIdRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByOriginRequestPolicyId operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateStreamingDistribution for more information on using the UpdateStreamingDistribution +// See ListDistributionsByOriginRequestPolicyId for more information on using the ListDistributionsByOriginRequestPolicyId // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateStreamingDistributionRequest method. -// req, resp := client.UpdateStreamingDistributionRequest(params) +// // Example sending a request using the ListDistributionsByOriginRequestPolicyIdRequest method. +// req, resp := client.ListDistributionsByOriginRequestPolicyIdRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateStreamingDistribution -func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByOriginRequestPolicyId +func (c *CloudFront) ListDistributionsByOriginRequestPolicyIdRequest(input *ListDistributionsByOriginRequestPolicyIdInput) (req *request.Request, output *ListDistributionsByOriginRequestPolicyIdOutput) { op := &request.Operation{ - Name: opUpdateStreamingDistribution, - HTTPMethod: "PUT", - HTTPPath: "/2019-03-26/streaming-distribution/{Id}/config", + Name: opListDistributionsByOriginRequestPolicyId, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/distributionsByOriginRequestPolicyId/{OriginRequestPolicyId}", } if input == nil { - input = &UpdateStreamingDistributionInput{} + input = &ListDistributionsByOriginRequestPolicyIdInput{} } - output = &UpdateStreamingDistributionOutput{} + output = &ListDistributionsByOriginRequestPolicyIdOutput{} req = c.newRequest(op, input, output) return } -// UpdateStreamingDistribution API operation for Amazon CloudFront. +// ListDistributionsByOriginRequestPolicyId API operation for Amazon CloudFront. // -// Update a streaming distribution. +// Gets a list of distribution IDs for distributions that have a cache behavior +// that’s associated with the specified origin request policy. +// +// You can optionally specify the maximum number of items to receive in the +// response. If the total number of items in the list exceeds the maximum that +// you specify, or the default maximum, the response is paginated. To get the +// next page of items, send a subsequent request that specifies the NextMarker +// value from the current response as the Marker value in the subsequent request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon CloudFront's -// API operation UpdateStreamingDistribution for usage and error information. +// API operation ListDistributionsByOriginRequestPolicyId for usage and error information. // // Returned Error Codes: +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// // * ErrCodeAccessDenied "AccessDenied" // Access denied. // -// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" -// The CNAME specified is already defined for CloudFront. +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByOriginRequestPolicyId +func (c *CloudFront) ListDistributionsByOriginRequestPolicyId(input *ListDistributionsByOriginRequestPolicyIdInput) (*ListDistributionsByOriginRequestPolicyIdOutput, error) { + req, out := c.ListDistributionsByOriginRequestPolicyIdRequest(input) + return out, req.Send() +} + +// ListDistributionsByOriginRequestPolicyIdWithContext is the same as ListDistributionsByOriginRequestPolicyId with the addition of +// the ability to pass a context and additional request options. // -// * ErrCodeIllegalUpdate "IllegalUpdate" -// Origin and CallerReference cannot be updated. +// See ListDistributionsByOriginRequestPolicyId for details on how to use this API operation. // -// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" -// The If-Match version is missing or not valid for the distribution. +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListDistributionsByOriginRequestPolicyIdWithContext(ctx aws.Context, input *ListDistributionsByOriginRequestPolicyIdInput, opts ...request.Option) (*ListDistributionsByOriginRequestPolicyIdOutput, error) { + req, out := c.ListDistributionsByOriginRequestPolicyIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDistributionsByRealtimeLogConfig = "ListDistributionsByRealtimeLogConfig2020_05_31" + +// ListDistributionsByRealtimeLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByRealtimeLogConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// * ErrCodeMissingBody "MissingBody" -// This operation requires a body. Ensure that the body is present and the Content-Type -// header is set. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" -// The specified streaming distribution does not exist. +// See ListDistributionsByRealtimeLogConfig for more information on using the ListDistributionsByRealtimeLogConfig +// API call, and error handling. // -// * ErrCodePreconditionFailed "PreconditionFailed" -// The precondition given in one or more of the request-header fields evaluated -// to false. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" -// Your request contains more CNAMEs than are allowed per distribution. // -// * ErrCodeInvalidArgument "InvalidArgument" -// The argument is invalid. +// // Example sending a request using the ListDistributionsByRealtimeLogConfigRequest method. +// req, resp := client.ListDistributionsByRealtimeLogConfigRequest(params) // -// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" -// The origin access identity is not valid or doesn't exist. +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } // -// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" -// Your request contains more trusted signers than are allowed per distribution. +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByRealtimeLogConfig +func (c *CloudFront) ListDistributionsByRealtimeLogConfigRequest(input *ListDistributionsByRealtimeLogConfigInput) (req *request.Request, output *ListDistributionsByRealtimeLogConfigOutput) { + op := &request.Operation{ + Name: opListDistributionsByRealtimeLogConfig, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/distributionsByRealtimeLogConfig/", + } + + if input == nil { + input = &ListDistributionsByRealtimeLogConfigInput{} + } + + output = &ListDistributionsByRealtimeLogConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDistributionsByRealtimeLogConfig API operation for Amazon CloudFront. // -// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" -// One or more of your trusted signers don't exist. +// Gets a list of distributions that have a cache behavior that’s associated +// with the specified real-time log configuration. // -// * ErrCodeInconsistentQuantities "InconsistentQuantities" -// The value of Quantity and the size of Items don't match. +// You can specify the real-time log configuration by its name or its Amazon +// Resource Name (ARN). You must provide at least one. If you provide both, +// CloudFront uses the name to identify the real-time log configuration to list +// distributions for. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26/UpdateStreamingDistribution -func (c *CloudFront) UpdateStreamingDistribution(input *UpdateStreamingDistributionInput) (*UpdateStreamingDistributionOutput, error) { - req, out := c.UpdateStreamingDistributionRequest(input) +// You can optionally specify the maximum number of items to receive in the +// response. If the total number of items in the list exceeds the maximum that +// you specify, or the default maximum, the response is paginated. To get the +// next page of items, send a subsequent request that specifies the NextMarker +// value from the current response as the Marker value in the subsequent request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListDistributionsByRealtimeLogConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByRealtimeLogConfig +func (c *CloudFront) ListDistributionsByRealtimeLogConfig(input *ListDistributionsByRealtimeLogConfigInput) (*ListDistributionsByRealtimeLogConfigOutput, error) { + req, out := c.ListDistributionsByRealtimeLogConfigRequest(input) return out, req.Send() } -// UpdateStreamingDistributionWithContext is the same as UpdateStreamingDistribution with the addition of +// ListDistributionsByRealtimeLogConfigWithContext is the same as ListDistributionsByRealtimeLogConfig with the addition of // the ability to pass a context and additional request options. // -// See UpdateStreamingDistribution for details on how to use this API operation. +// See ListDistributionsByRealtimeLogConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CloudFront) UpdateStreamingDistributionWithContext(ctx aws.Context, input *UpdateStreamingDistributionInput, opts ...request.Option) (*UpdateStreamingDistributionOutput, error) { - req, out := c.UpdateStreamingDistributionRequest(input) +func (c *CloudFront) ListDistributionsByRealtimeLogConfigWithContext(ctx aws.Context, input *ListDistributionsByRealtimeLogConfigInput, opts ...request.Option) (*ListDistributionsByRealtimeLogConfigOutput, error) { + req, out := c.ListDistributionsByRealtimeLogConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// A complex type that lists the AWS accounts, if any, that you included in -// the TrustedSigners complex type for this distribution. These are the accounts -// that you want to allow to create signed URLs for private content. +const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2020_05_31" + +// ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByWebACLId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// The Signer complex type lists the AWS account number of the trusted signer -// or self if the signer is the AWS account that created the distribution. The -// Signer element also includes the IDs of any active CloudFront key pairs that -// are associated with the trusted signer's AWS account. If no KeyPairId element -// appears for a Signer, that signer can't create signed URLs. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// For more information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) -// in the Amazon CloudFront Developer Guide. -type ActiveTrustedSigners struct { - _ struct{} `type:"structure"` - - // Enabled is true if any of the AWS accounts listed in the TrustedSigners complex - // type for this distribution have active CloudFront key pairs. If not, Enabled - // is false. - // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` - - // A complex type that contains one Signer complex type for each trusted signer - // that is specified in the TrustedSigners complex type. - Items []*Signer `locationNameList:"Signer" type:"list"` - - // The number of trusted signers specified in the TrustedSigners complex type. - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` -} +// See ListDistributionsByWebACLId for more information on using the ListDistributionsByWebACLId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDistributionsByWebACLIdRequest method. +// req, resp := client.ListDistributionsByWebACLIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByWebACLId +func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { + op := &request.Operation{ + Name: opListDistributionsByWebACLId, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/distributionsByWebACLId/{WebACLId}", + } -// String returns the string representation -func (s ActiveTrustedSigners) String() string { - return awsutil.Prettify(s) -} + if input == nil { + input = &ListDistributionsByWebACLIdInput{} + } -// GoString returns the string representation -func (s ActiveTrustedSigners) GoString() string { - return s.String() + output = &ListDistributionsByWebACLIdOutput{} + req = c.newRequest(op, input, output) + return } -// SetEnabled sets the Enabled field's value. -func (s *ActiveTrustedSigners) SetEnabled(v bool) *ActiveTrustedSigners { - s.Enabled = &v - return s +// ListDistributionsByWebACLId API operation for Amazon CloudFront. +// +// List the distributions that are associated with a specified AWS WAF web ACL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListDistributionsByWebACLId for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidWebACLId "InvalidWebACLId" +// A web ACL ID specified is not valid. To specify a web ACL created using the +// latest version of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. +// To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example +// 473e64fd-f30b-4765-81a0-62ad96dd167a. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListDistributionsByWebACLId +func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebACLIdInput) (*ListDistributionsByWebACLIdOutput, error) { + req, out := c.ListDistributionsByWebACLIdRequest(input) + return out, req.Send() } -// SetItems sets the Items field's value. -func (s *ActiveTrustedSigners) SetItems(v []*Signer) *ActiveTrustedSigners { - s.Items = v - return s +// ListDistributionsByWebACLIdWithContext is the same as ListDistributionsByWebACLId with the addition of +// the ability to pass a context and additional request options. +// +// See ListDistributionsByWebACLId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListDistributionsByWebACLIdWithContext(ctx aws.Context, input *ListDistributionsByWebACLIdInput, opts ...request.Option) (*ListDistributionsByWebACLIdOutput, error) { + req, out := c.ListDistributionsByWebACLIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// SetQuantity sets the Quantity field's value. -func (s *ActiveTrustedSigners) SetQuantity(v int64) *ActiveTrustedSigners { - s.Quantity = &v - return s -} +const opListFieldLevelEncryptionConfigs = "ListFieldLevelEncryptionConfigs2020_05_31" -// AWS services in China customers must file for an Internet Content Provider -// (ICP) recordal if they want to serve content publicly on an alternate domain -// name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal -// provides the ICP recordal status for CNAMEs associated with distributions. -// The status is returned in the CloudFront response; you can't configure it -// yourself. +// ListFieldLevelEncryptionConfigsRequest generates a "aws/request.Request" representing the +// client's request for the ListFieldLevelEncryptionConfigs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// For more information about ICP recordals, see Signup, Accounts, and Credentials -// (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) -// in Getting Started with AWS services in China. -type AliasICPRecordal struct { - _ struct{} `type:"structure"` - - // A domain name associated with a distribution. - CNAME *string `type:"string"` +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListFieldLevelEncryptionConfigs for more information on using the ListFieldLevelEncryptionConfigs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListFieldLevelEncryptionConfigsRequest method. +// req, resp := client.ListFieldLevelEncryptionConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListFieldLevelEncryptionConfigs +func (c *CloudFront) ListFieldLevelEncryptionConfigsRequest(input *ListFieldLevelEncryptionConfigsInput) (req *request.Request, output *ListFieldLevelEncryptionConfigsOutput) { + op := &request.Operation{ + Name: opListFieldLevelEncryptionConfigs, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/field-level-encryption", + } - // The Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus - // is set to APPROVED for all CNAMEs (aliases) in regions outside of China. - // - // The status values returned are the following: - // - // * APPROVED indicates that the associated CNAME has a valid ICP recordal - // number. Multiple CNAMEs can be associated with a distribution, and CNAMEs - // can correspond to different ICP recordals. To be marked as APPROVED, that - // is, valid to use with China region, a CNAME must have one ICP recordal - // number associated with it. - // - // * SUSPENDED indicates that the associated CNAME does not have a valid - // ICP recordal number. - // - // * PENDING indicates that CloudFront can't determine the ICP recordal status - // of the CNAME associated with the distribution because there was an error - // in trying to determine the status. You can try again to see if the error - // is resolved in which case CloudFront returns an APPROVED or SUSPENDED - // status. - ICPRecordalStatus *string `type:"string" enum:"ICPRecordalStatus"` -} + if input == nil { + input = &ListFieldLevelEncryptionConfigsInput{} + } -// String returns the string representation -func (s AliasICPRecordal) String() string { - return awsutil.Prettify(s) + output = &ListFieldLevelEncryptionConfigsOutput{} + req = c.newRequest(op, input, output) + return } -// GoString returns the string representation -func (s AliasICPRecordal) GoString() string { - return s.String() +// ListFieldLevelEncryptionConfigs API operation for Amazon CloudFront. +// +// List all field-level encryption configurations that have been created in +// CloudFront for this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListFieldLevelEncryptionConfigs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListFieldLevelEncryptionConfigs +func (c *CloudFront) ListFieldLevelEncryptionConfigs(input *ListFieldLevelEncryptionConfigsInput) (*ListFieldLevelEncryptionConfigsOutput, error) { + req, out := c.ListFieldLevelEncryptionConfigsRequest(input) + return out, req.Send() } -// SetCNAME sets the CNAME field's value. +// ListFieldLevelEncryptionConfigsWithContext is the same as ListFieldLevelEncryptionConfigs with the addition of +// the ability to pass a context and additional request options. +// +// See ListFieldLevelEncryptionConfigs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListFieldLevelEncryptionConfigsWithContext(ctx aws.Context, input *ListFieldLevelEncryptionConfigsInput, opts ...request.Option) (*ListFieldLevelEncryptionConfigsOutput, error) { + req, out := c.ListFieldLevelEncryptionConfigsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListFieldLevelEncryptionProfiles = "ListFieldLevelEncryptionProfiles2020_05_31" + +// ListFieldLevelEncryptionProfilesRequest generates a "aws/request.Request" representing the +// client's request for the ListFieldLevelEncryptionProfiles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListFieldLevelEncryptionProfiles for more information on using the ListFieldLevelEncryptionProfiles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListFieldLevelEncryptionProfilesRequest method. +// req, resp := client.ListFieldLevelEncryptionProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListFieldLevelEncryptionProfiles +func (c *CloudFront) ListFieldLevelEncryptionProfilesRequest(input *ListFieldLevelEncryptionProfilesInput) (req *request.Request, output *ListFieldLevelEncryptionProfilesOutput) { + op := &request.Operation{ + Name: opListFieldLevelEncryptionProfiles, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/field-level-encryption-profile", + } + + if input == nil { + input = &ListFieldLevelEncryptionProfilesInput{} + } + + output = &ListFieldLevelEncryptionProfilesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListFieldLevelEncryptionProfiles API operation for Amazon CloudFront. +// +// Request a list of field-level encryption profiles that have been created +// in CloudFront for this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListFieldLevelEncryptionProfiles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListFieldLevelEncryptionProfiles +func (c *CloudFront) ListFieldLevelEncryptionProfiles(input *ListFieldLevelEncryptionProfilesInput) (*ListFieldLevelEncryptionProfilesOutput, error) { + req, out := c.ListFieldLevelEncryptionProfilesRequest(input) + return out, req.Send() +} + +// ListFieldLevelEncryptionProfilesWithContext is the same as ListFieldLevelEncryptionProfiles with the addition of +// the ability to pass a context and additional request options. +// +// See ListFieldLevelEncryptionProfiles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListFieldLevelEncryptionProfilesWithContext(ctx aws.Context, input *ListFieldLevelEncryptionProfilesInput, opts ...request.Option) (*ListFieldLevelEncryptionProfilesOutput, error) { + req, out := c.ListFieldLevelEncryptionProfilesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListInvalidations = "ListInvalidations2020_05_31" + +// ListInvalidationsRequest generates a "aws/request.Request" representing the +// client's request for the ListInvalidations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInvalidations for more information on using the ListInvalidations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInvalidationsRequest method. +// req, resp := client.ListInvalidationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListInvalidations +func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { + op := &request.Operation{ + Name: opListInvalidations, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/distribution/{DistributionId}/invalidation", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"InvalidationList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "InvalidationList.IsTruncated", + }, + } + + if input == nil { + input = &ListInvalidationsInput{} + } + + output = &ListInvalidationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInvalidations API operation for Amazon CloudFront. +// +// Lists invalidation batches. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListInvalidations for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. +// +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListInvalidations +func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInvalidationsOutput, error) { + req, out := c.ListInvalidationsRequest(input) + return out, req.Send() +} + +// ListInvalidationsWithContext is the same as ListInvalidations with the addition of +// the ability to pass a context and additional request options. +// +// See ListInvalidations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListInvalidationsWithContext(ctx aws.Context, input *ListInvalidationsInput, opts ...request.Option) (*ListInvalidationsOutput, error) { + req, out := c.ListInvalidationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListInvalidationsPages iterates over the pages of a ListInvalidations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInvalidations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInvalidations operation. +// pageNum := 0 +// err := client.ListInvalidationsPages(params, +// func(page *cloudfront.ListInvalidationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn func(*ListInvalidationsOutput, bool) bool) error { + return c.ListInvalidationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListInvalidationsPagesWithContext same as ListInvalidationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListInvalidationsPagesWithContext(ctx aws.Context, input *ListInvalidationsInput, fn func(*ListInvalidationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListInvalidationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListInvalidationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListInvalidationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListOriginRequestPolicies = "ListOriginRequestPolicies2020_05_31" + +// ListOriginRequestPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListOriginRequestPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListOriginRequestPolicies for more information on using the ListOriginRequestPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListOriginRequestPoliciesRequest method. +// req, resp := client.ListOriginRequestPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListOriginRequestPolicies +func (c *CloudFront) ListOriginRequestPoliciesRequest(input *ListOriginRequestPoliciesInput) (req *request.Request, output *ListOriginRequestPoliciesOutput) { + op := &request.Operation{ + Name: opListOriginRequestPolicies, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/origin-request-policy", + } + + if input == nil { + input = &ListOriginRequestPoliciesInput{} + } + + output = &ListOriginRequestPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListOriginRequestPolicies API operation for Amazon CloudFront. +// +// Gets a list of origin request policies. +// +// You can optionally apply a filter to return only the managed policies created +// by AWS, or only the custom policies created in your AWS account. +// +// You can optionally specify the maximum number of items to receive in the +// response. If the total number of items in the list exceeds the maximum that +// you specify, or the default maximum, the response is paginated. To get the +// next page of items, send a subsequent request that specifies the NextMarker +// value from the current response as the Marker value in the subsequent request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListOriginRequestPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListOriginRequestPolicies +func (c *CloudFront) ListOriginRequestPolicies(input *ListOriginRequestPoliciesInput) (*ListOriginRequestPoliciesOutput, error) { + req, out := c.ListOriginRequestPoliciesRequest(input) + return out, req.Send() +} + +// ListOriginRequestPoliciesWithContext is the same as ListOriginRequestPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListOriginRequestPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListOriginRequestPoliciesWithContext(ctx aws.Context, input *ListOriginRequestPoliciesInput, opts ...request.Option) (*ListOriginRequestPoliciesOutput, error) { + req, out := c.ListOriginRequestPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListPublicKeys = "ListPublicKeys2020_05_31" + +// ListPublicKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListPublicKeys operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPublicKeys for more information on using the ListPublicKeys +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPublicKeysRequest method. +// req, resp := client.ListPublicKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListPublicKeys +func (c *CloudFront) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { + op := &request.Operation{ + Name: opListPublicKeys, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/public-key", + } + + if input == nil { + input = &ListPublicKeysInput{} + } + + output = &ListPublicKeysOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPublicKeys API operation for Amazon CloudFront. +// +// List all public keys that have been added to CloudFront for this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListPublicKeys for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListPublicKeys +func (c *CloudFront) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeysOutput, error) { + req, out := c.ListPublicKeysRequest(input) + return out, req.Send() +} + +// ListPublicKeysWithContext is the same as ListPublicKeys with the addition of +// the ability to pass a context and additional request options. +// +// See ListPublicKeys for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListPublicKeysWithContext(ctx aws.Context, input *ListPublicKeysInput, opts ...request.Option) (*ListPublicKeysOutput, error) { + req, out := c.ListPublicKeysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRealtimeLogConfigs = "ListRealtimeLogConfigs2020_05_31" + +// ListRealtimeLogConfigsRequest generates a "aws/request.Request" representing the +// client's request for the ListRealtimeLogConfigs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRealtimeLogConfigs for more information on using the ListRealtimeLogConfigs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRealtimeLogConfigsRequest method. +// req, resp := client.ListRealtimeLogConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListRealtimeLogConfigs +func (c *CloudFront) ListRealtimeLogConfigsRequest(input *ListRealtimeLogConfigsInput) (req *request.Request, output *ListRealtimeLogConfigsOutput) { + op := &request.Operation{ + Name: opListRealtimeLogConfigs, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/realtime-log-config", + } + + if input == nil { + input = &ListRealtimeLogConfigsInput{} + } + + output = &ListRealtimeLogConfigsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRealtimeLogConfigs API operation for Amazon CloudFront. +// +// Gets a list of real-time log configurations. +// +// You can optionally specify the maximum number of items to receive in the +// response. If the total number of items in the list exceeds the maximum that +// you specify, or the default maximum, the response is paginated. To get the +// next page of items, send a subsequent request that specifies the NextMarker +// value from the current response as the Marker value in the subsequent request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListRealtimeLogConfigs for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListRealtimeLogConfigs +func (c *CloudFront) ListRealtimeLogConfigs(input *ListRealtimeLogConfigsInput) (*ListRealtimeLogConfigsOutput, error) { + req, out := c.ListRealtimeLogConfigsRequest(input) + return out, req.Send() +} + +// ListRealtimeLogConfigsWithContext is the same as ListRealtimeLogConfigs with the addition of +// the ability to pass a context and additional request options. +// +// See ListRealtimeLogConfigs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListRealtimeLogConfigsWithContext(ctx aws.Context, input *ListRealtimeLogConfigsInput, opts ...request.Option) (*ListRealtimeLogConfigsOutput, error) { + req, out := c.ListRealtimeLogConfigsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListStreamingDistributions = "ListStreamingDistributions2020_05_31" + +// ListStreamingDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreamingDistributions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListStreamingDistributions for more information on using the ListStreamingDistributions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListStreamingDistributionsRequest method. +// req, resp := client.ListStreamingDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListStreamingDistributions +func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { + op := &request.Operation{ + Name: opListStreamingDistributions, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/streaming-distribution", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"StreamingDistributionList.NextMarker"}, + LimitToken: "MaxItems", + TruncationToken: "StreamingDistributionList.IsTruncated", + }, + } + + if input == nil { + input = &ListStreamingDistributionsInput{} + } + + output = &ListStreamingDistributionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListStreamingDistributions API operation for Amazon CloudFront. +// +// List streaming distributions. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListStreamingDistributions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListStreamingDistributions +func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistributionsInput) (*ListStreamingDistributionsOutput, error) { + req, out := c.ListStreamingDistributionsRequest(input) + return out, req.Send() +} + +// ListStreamingDistributionsWithContext is the same as ListStreamingDistributions with the addition of +// the ability to pass a context and additional request options. +// +// See ListStreamingDistributions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListStreamingDistributionsWithContext(ctx aws.Context, input *ListStreamingDistributionsInput, opts ...request.Option) (*ListStreamingDistributionsOutput, error) { + req, out := c.ListStreamingDistributionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListStreamingDistributionsPages iterates over the pages of a ListStreamingDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreamingDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreamingDistributions operation. +// pageNum := 0 +// err := client.ListStreamingDistributionsPages(params, +// func(page *cloudfront.ListStreamingDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistributionsInput, fn func(*ListStreamingDistributionsOutput, bool) bool) error { + return c.ListStreamingDistributionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStreamingDistributionsPagesWithContext same as ListStreamingDistributionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListStreamingDistributionsPagesWithContext(ctx aws.Context, input *ListStreamingDistributionsInput, fn func(*ListStreamingDistributionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStreamingDistributionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStreamingDistributionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStreamingDistributionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource2020_05_31" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListTagsForResource +func (c *CloudFront) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2020-05-31/tagging", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon CloudFront. +// +// List tags for a CloudFront resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidTagging "InvalidTagging" +// The tagging specified is not valid. +// +// * ErrCodeNoSuchResource "NoSuchResource" +// A resource that was specified is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/ListTagsForResource +func (c *CloudFront) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource2020_05_31" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/TagResource +func (c *CloudFront) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/tagging?Operation=Tag", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon CloudFront. +// +// Add tags to a CloudFront resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidTagging "InvalidTagging" +// The tagging specified is not valid. +// +// * ErrCodeNoSuchResource "NoSuchResource" +// A resource that was specified is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/TagResource +func (c *CloudFront) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource2020_05_31" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UntagResource +func (c *CloudFront) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/2020-05-31/tagging?Operation=Untag", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon CloudFront. +// +// Remove tags from a CloudFront resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidTagging "InvalidTagging" +// The tagging specified is not valid. +// +// * ErrCodeNoSuchResource "NoSuchResource" +// A resource that was specified is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UntagResource +func (c *CloudFront) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCachePolicy = "UpdateCachePolicy2020_05_31" + +// UpdateCachePolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCachePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCachePolicy for more information on using the UpdateCachePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCachePolicyRequest method. +// req, resp := client.UpdateCachePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateCachePolicy +func (c *CloudFront) UpdateCachePolicyRequest(input *UpdateCachePolicyInput) (req *request.Request, output *UpdateCachePolicyOutput) { + op := &request.Operation{ + Name: opUpdateCachePolicy, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/cache-policy/{Id}", + } + + if input == nil { + input = &UpdateCachePolicyInput{} + } + + output = &UpdateCachePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateCachePolicy API operation for Amazon CloudFront. +// +// Updates a cache policy configuration. +// +// When you update a cache policy configuration, all the fields are updated +// with the values provided in the request. You cannot update some fields independent +// of others. To update a cache policy configuration: +// +// Use GetCachePolicyConfig to get the current configuration. +// +// Locally modify the fields in the cache policy configuration that you want +// to update. +// +// Call UpdateCachePolicy by providing the entire cache policy configuration, +// including the fields that you modified and those that you didn’t. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateCachePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeCachePolicyAlreadyExists "CachePolicyAlreadyExists" +// A cache policy with this name already exists. You must provide a unique name. +// To modify an existing cache policy, use UpdateCachePolicy. +// +// * ErrCodeTooManyHeadersInCachePolicy "TooManyHeadersInCachePolicy" +// The number of headers in the cache policy exceeds the maximum. For more information, +// see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyCookiesInCachePolicy "TooManyCookiesInCachePolicy" +// The number of cookies in the cache policy exceeds the maximum. For more information, +// see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyQueryStringsInCachePolicy "TooManyQueryStringsInCachePolicy" +// The number of query strings in the cache policy exceeds the maximum. For +// more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateCachePolicy +func (c *CloudFront) UpdateCachePolicy(input *UpdateCachePolicyInput) (*UpdateCachePolicyOutput, error) { + req, out := c.UpdateCachePolicyRequest(input) + return out, req.Send() +} + +// UpdateCachePolicyWithContext is the same as UpdateCachePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCachePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateCachePolicyWithContext(ctx aws.Context, input *UpdateCachePolicyInput, opts ...request.Option) (*UpdateCachePolicyOutput, error) { + req, out := c.UpdateCachePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2020_05_31" + +// UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateCloudFrontOriginAccessIdentity for more information on using the UpdateCloudFrontOriginAccessIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.UpdateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateCloudFrontOriginAccessIdentity +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { + op := &request.Operation{ + Name: opUpdateCloudFrontOriginAccessIdentity, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/origin-access-identity/cloudfront/{Id}/config", + } + + if input == nil { + input = &UpdateCloudFrontOriginAccessIdentityInput{} + } + + output = &UpdateCloudFrontOriginAccessIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateCloudFrontOriginAccessIdentity API operation for Amazon CloudFront. +// +// Update an origin access identity. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateCloudFrontOriginAccessIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeMissingBody "MissingBody" +// This operation requires a body. Ensure that the body is present and the Content-Type +// header is set. +// +// * ErrCodeNoSuchCloudFrontOriginAccessIdentity "NoSuchCloudFrontOriginAccessIdentity" +// The specified origin access identity does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateCloudFrontOriginAccessIdentity +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFrontOriginAccessIdentityInput) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) + return out, req.Send() +} + +// UpdateCloudFrontOriginAccessIdentityWithContext is the same as UpdateCloudFrontOriginAccessIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateCloudFrontOriginAccessIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityWithContext(ctx aws.Context, input *UpdateCloudFrontOriginAccessIdentityInput, opts ...request.Option) (*UpdateCloudFrontOriginAccessIdentityOutput, error) { + req, out := c.UpdateCloudFrontOriginAccessIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDistribution = "UpdateDistribution2020_05_31" + +// UpdateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDistribution for more information on using the UpdateDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDistributionRequest method. +// req, resp := client.UpdateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateDistribution +func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { + op := &request.Operation{ + Name: opUpdateDistribution, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/distribution/{Id}/config", + } + + if input == nil { + input = &UpdateDistributionInput{} + } + + output = &UpdateDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDistribution API operation for Amazon CloudFront. +// +// Updates the configuration for a web distribution. +// +// When you update a distribution, there are more required fields than when +// you create a distribution. When you update your distribution by using this +// API action, follow the steps here to get the current configuration and then +// make your updates, to make sure that you include all of the required fields. +// To view a summary, see Required Fields for Create Distribution and Update +// Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html) +// in the Amazon CloudFront Developer Guide. +// +// The update process includes getting the current distribution configuration, +// updating the XML document that is returned to make your changes, and then +// submitting an UpdateDistribution request to make the updates. +// +// For information about updating a distribution using the CloudFront console +// instead, see Creating a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-creating-console.html) +// in the Amazon CloudFront Developer Guide. +// +// To update a web distribution using the CloudFront API +// +// Submit a GetDistributionConfig (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistributionConfig.html) +// request to get the current configuration and an Etag header for the distribution. +// +// If you update the distribution again, you must get a new Etag header. +// +// Update the XML document that was returned in the response to your GetDistributionConfig +// request to include your changes. +// +// When you edit the XML file, be aware of the following: +// +// * You must strip out the ETag parameter that is returned. +// +// * Additional fields are required when you update a distribution. There +// may be fields included in the XML file for features that you haven't configured +// for your distribution. This is expected and required to successfully update +// the distribution. +// +// * You can't change the value of CallerReference. If you try to change +// this value, CloudFront returns an IllegalUpdate error. +// +// * The new configuration replaces the existing configuration; the values +// that you specify in an UpdateDistribution request are not merged into +// your existing configuration. When you add, delete, or replace values in +// an element that allows multiple values (for example, CNAME), you must +// specify all of the values that you want to appear in the updated distribution. +// In addition, you must update the corresponding Quantity element. +// +// Submit an UpdateDistribution request to update the configuration for your +// distribution: +// +// * In the request body, include the XML document that you updated in Step +// 2. The request body must include an XML document with a DistributionConfig +// element. +// +// * Set the value of the HTTP If-Match header to the value of the ETag header +// that CloudFront returned when you submitted the GetDistributionConfig +// request in Step 1. +// +// Review the response to the UpdateDistribution request to confirm that the +// configuration was successfully updated. +// +// Optional: Submit a GetDistribution (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_GetDistribution.html) +// request to confirm that your changes have propagated. When propagation is +// complete, the value of Status is Deployed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateDistribution for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeMissingBody "MissingBody" +// This operation requires a body. Ensure that the body is present and the Content-Type +// header is set. +// +// * ErrCodeNoSuchDistribution "NoSuchDistribution" +// The specified distribution does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeTooManyDistributionCNAMEs "TooManyDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. +// +// * ErrCodeInvalidDefaultRootObject "InvalidDefaultRootObject" +// The default root object file name is too big or contains an invalid character. +// +// * ErrCodeInvalidRelativePath "InvalidRelativePath" +// The relative path is too big, is not URL-encoded, or does not begin with +// a slash (/). +// +// * ErrCodeInvalidErrorCode "InvalidErrorCode" +// An invalid error code was specified. +// +// * ErrCodeInvalidResponseCode "InvalidResponseCode" +// A response code is not valid. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" +// The origin access identity is not valid or doesn't exist. +// +// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" +// Your request contains more trusted signers than are allowed per distribution. +// +// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" +// One or more of your trusted signers don't exist. +// +// * ErrCodeInvalidViewerCertificate "InvalidViewerCertificate" +// A viewer certificate specified is not valid. +// +// * ErrCodeInvalidMinimumProtocolVersion "InvalidMinimumProtocolVersion" +// The minimum protocol version specified is not valid. +// +// * ErrCodeInvalidRequiredProtocol "InvalidRequiredProtocol" +// This operation requires the HTTPS protocol. Ensure that you specify the HTTPS +// protocol in your request, or omit the RequiredProtocols element from your +// distribution configuration. +// +// * ErrCodeNoSuchOrigin "NoSuchOrigin" +// No origin exists with the specified Origin Id. +// +// * ErrCodeTooManyOrigins "TooManyOrigins" +// You cannot create more origins for the distribution. +// +// * ErrCodeTooManyOriginGroupsPerDistribution "TooManyOriginGroupsPerDistribution" +// Processing your request would cause you to exceed the maximum number of origin +// groups allowed. +// +// * ErrCodeTooManyCacheBehaviors "TooManyCacheBehaviors" +// You cannot create more cache behaviors for the distribution. +// +// * ErrCodeTooManyCookieNamesInWhiteList "TooManyCookieNamesInWhiteList" +// Your request contains more cookie names in the whitelist than are allowed +// per cache behavior. +// +// * ErrCodeInvalidForwardCookies "InvalidForwardCookies" +// Your request contains forward cookies option which doesn't match with the +// expectation for the whitelisted list of cookie names. Either list of cookie +// names has been specified when not allowed or list of cookie names is missing +// when expected. +// +// * ErrCodeTooManyHeadersInForwardedValues "TooManyHeadersInForwardedValues" +// Your request contains too many headers in forwarded values. +// +// * ErrCodeInvalidHeadersForS3Origin "InvalidHeadersForS3Origin" +// The headers specified are not valid for an Amazon S3 origin. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeTooManyCertificates "TooManyCertificates" +// You cannot create anymore custom SSL/TLS certificates. +// +// * ErrCodeInvalidLocationCode "InvalidLocationCode" +// The location code specified is not valid. +// +// * ErrCodeInvalidGeoRestrictionParameter "InvalidGeoRestrictionParameter" +// The specified geo restriction parameter is not valid. +// +// * ErrCodeInvalidTTLOrder "InvalidTTLOrder" +// The TTL order specified is not valid. +// +// * ErrCodeInvalidWebACLId "InvalidWebACLId" +// A web ACL ID specified is not valid. To specify a web ACL created using the +// latest version of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. +// To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example +// 473e64fd-f30b-4765-81a0-62ad96dd167a. +// +// * ErrCodeTooManyOriginCustomHeaders "TooManyOriginCustomHeaders" +// Your request contains too many origin custom headers. +// +// * ErrCodeTooManyQueryStringParameters "TooManyQueryStringParameters" +// Your request contains too many query string parameters. +// +// * ErrCodeInvalidQueryStringParameters "InvalidQueryStringParameters" +// The query string parameters specified are not valid. +// +// * ErrCodeTooManyDistributionsWithLambdaAssociations "TooManyDistributionsWithLambdaAssociations" +// Processing your request would cause the maximum number of distributions with +// Lambda function associations per owner to be exceeded. +// +// * ErrCodeTooManyDistributionsWithSingleFunctionARN "TooManyDistributionsWithSingleFunctionARN" +// The maximum number of distributions have been associated with the specified +// Lambda function. +// +// * ErrCodeTooManyLambdaFunctionAssociations "TooManyLambdaFunctionAssociations" +// Your request contains more Lambda function associations than are allowed +// per distribution. +// +// * ErrCodeInvalidLambdaFunctionAssociation "InvalidLambdaFunctionAssociation" +// The specified Lambda function association is invalid. +// +// * ErrCodeInvalidOriginReadTimeout "InvalidOriginReadTimeout" +// The read timeout specified for the origin is not valid. +// +// * ErrCodeInvalidOriginKeepaliveTimeout "InvalidOriginKeepaliveTimeout" +// The keep alive timeout specified for the origin is not valid. +// +// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" +// The specified configuration for field-level encryption doesn't exist. +// +// * ErrCodeIllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior "IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior" +// The specified configuration for field-level encryption can't be associated +// with the specified cache behavior. +// +// * ErrCodeTooManyDistributionsAssociatedToFieldLevelEncryptionConfig "TooManyDistributionsAssociatedToFieldLevelEncryptionConfig" +// The maximum number of distributions have been associated with the specified +// configuration for field-level encryption. +// +// * ErrCodeNoSuchCachePolicy "NoSuchCachePolicy" +// The cache policy does not exist. +// +// * ErrCodeTooManyDistributionsAssociatedToCachePolicy "TooManyDistributionsAssociatedToCachePolicy" +// The maximum number of distributions have been associated with the specified +// cache policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodeTooManyDistributionsAssociatedToOriginRequestPolicy "TooManyDistributionsAssociatedToOriginRequestPolicy" +// The maximum number of distributions have been associated with the specified +// origin request policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateDistribution +func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + return out, req.Send() +} + +// UpdateDistributionWithContext is the same as UpdateDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateDistributionWithContext(ctx aws.Context, input *UpdateDistributionInput, opts ...request.Option) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFieldLevelEncryptionConfig = "UpdateFieldLevelEncryptionConfig2020_05_31" + +// UpdateFieldLevelEncryptionConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFieldLevelEncryptionConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFieldLevelEncryptionConfig for more information on using the UpdateFieldLevelEncryptionConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFieldLevelEncryptionConfigRequest method. +// req, resp := client.UpdateFieldLevelEncryptionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateFieldLevelEncryptionConfig +func (c *CloudFront) UpdateFieldLevelEncryptionConfigRequest(input *UpdateFieldLevelEncryptionConfigInput) (req *request.Request, output *UpdateFieldLevelEncryptionConfigOutput) { + op := &request.Operation{ + Name: opUpdateFieldLevelEncryptionConfig, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/field-level-encryption/{Id}/config", + } + + if input == nil { + input = &UpdateFieldLevelEncryptionConfigInput{} + } + + output = &UpdateFieldLevelEncryptionConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFieldLevelEncryptionConfig API operation for Amazon CloudFront. +// +// Update a field-level encryption configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateFieldLevelEncryptionConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" +// The specified profile for field-level encryption doesn't exist. +// +// * ErrCodeNoSuchFieldLevelEncryptionConfig "NoSuchFieldLevelEncryptionConfig" +// The specified configuration for field-level encryption doesn't exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeTooManyFieldLevelEncryptionQueryArgProfiles "TooManyFieldLevelEncryptionQueryArgProfiles" +// The maximum number of query arg profiles for field-level encryption have +// been created. +// +// * ErrCodeTooManyFieldLevelEncryptionContentTypeProfiles "TooManyFieldLevelEncryptionContentTypeProfiles" +// The maximum number of content type profiles for field-level encryption have +// been created. +// +// * ErrCodeQueryArgProfileEmpty "QueryArgProfileEmpty" +// No profile specified for the field-level encryption query argument. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateFieldLevelEncryptionConfig +func (c *CloudFront) UpdateFieldLevelEncryptionConfig(input *UpdateFieldLevelEncryptionConfigInput) (*UpdateFieldLevelEncryptionConfigOutput, error) { + req, out := c.UpdateFieldLevelEncryptionConfigRequest(input) + return out, req.Send() +} + +// UpdateFieldLevelEncryptionConfigWithContext is the same as UpdateFieldLevelEncryptionConfig with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFieldLevelEncryptionConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateFieldLevelEncryptionConfigWithContext(ctx aws.Context, input *UpdateFieldLevelEncryptionConfigInput, opts ...request.Option) (*UpdateFieldLevelEncryptionConfigOutput, error) { + req, out := c.UpdateFieldLevelEncryptionConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFieldLevelEncryptionProfile = "UpdateFieldLevelEncryptionProfile2020_05_31" + +// UpdateFieldLevelEncryptionProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFieldLevelEncryptionProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFieldLevelEncryptionProfile for more information on using the UpdateFieldLevelEncryptionProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFieldLevelEncryptionProfileRequest method. +// req, resp := client.UpdateFieldLevelEncryptionProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateFieldLevelEncryptionProfile +func (c *CloudFront) UpdateFieldLevelEncryptionProfileRequest(input *UpdateFieldLevelEncryptionProfileInput) (req *request.Request, output *UpdateFieldLevelEncryptionProfileOutput) { + op := &request.Operation{ + Name: opUpdateFieldLevelEncryptionProfile, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/field-level-encryption-profile/{Id}/config", + } + + if input == nil { + input = &UpdateFieldLevelEncryptionProfileInput{} + } + + output = &UpdateFieldLevelEncryptionProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFieldLevelEncryptionProfile API operation for Amazon CloudFront. +// +// Update a field-level encryption profile. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateFieldLevelEncryptionProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeFieldLevelEncryptionProfileAlreadyExists "FieldLevelEncryptionProfileAlreadyExists" +// The specified profile for field-level encryption already exists. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" +// The specified public key doesn't exist. +// +// * ErrCodeNoSuchFieldLevelEncryptionProfile "NoSuchFieldLevelEncryptionProfile" +// The specified profile for field-level encryption doesn't exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeFieldLevelEncryptionProfileSizeExceeded "FieldLevelEncryptionProfileSizeExceeded" +// The maximum size of a profile for field-level encryption was exceeded. +// +// * ErrCodeTooManyFieldLevelEncryptionEncryptionEntities "TooManyFieldLevelEncryptionEncryptionEntities" +// The maximum number of encryption entities for field-level encryption have +// been created. +// +// * ErrCodeTooManyFieldLevelEncryptionFieldPatterns "TooManyFieldLevelEncryptionFieldPatterns" +// The maximum number of field patterns for field-level encryption have been +// created. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateFieldLevelEncryptionProfile +func (c *CloudFront) UpdateFieldLevelEncryptionProfile(input *UpdateFieldLevelEncryptionProfileInput) (*UpdateFieldLevelEncryptionProfileOutput, error) { + req, out := c.UpdateFieldLevelEncryptionProfileRequest(input) + return out, req.Send() +} + +// UpdateFieldLevelEncryptionProfileWithContext is the same as UpdateFieldLevelEncryptionProfile with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFieldLevelEncryptionProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateFieldLevelEncryptionProfileWithContext(ctx aws.Context, input *UpdateFieldLevelEncryptionProfileInput, opts ...request.Option) (*UpdateFieldLevelEncryptionProfileOutput, error) { + req, out := c.UpdateFieldLevelEncryptionProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateOriginRequestPolicy = "UpdateOriginRequestPolicy2020_05_31" + +// UpdateOriginRequestPolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOriginRequestPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateOriginRequestPolicy for more information on using the UpdateOriginRequestPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateOriginRequestPolicyRequest method. +// req, resp := client.UpdateOriginRequestPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateOriginRequestPolicy +func (c *CloudFront) UpdateOriginRequestPolicyRequest(input *UpdateOriginRequestPolicyInput) (req *request.Request, output *UpdateOriginRequestPolicyOutput) { + op := &request.Operation{ + Name: opUpdateOriginRequestPolicy, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/origin-request-policy/{Id}", + } + + if input == nil { + input = &UpdateOriginRequestPolicyInput{} + } + + output = &UpdateOriginRequestPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateOriginRequestPolicy API operation for Amazon CloudFront. +// +// Updates an origin request policy configuration. +// +// When you update an origin request policy configuration, all the fields are +// updated with the values provided in the request. You cannot update some fields +// independent of others. To update an origin request policy configuration: +// +// Use GetOriginRequestPolicyConfig to get the current configuration. +// +// Locally modify the fields in the origin request policy configuration that +// you want to update. +// +// Call UpdateOriginRequestPolicy by providing the entire origin request policy +// configuration, including the fields that you modified and those that you +// didn’t. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateOriginRequestPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeNoSuchOriginRequestPolicy "NoSuchOriginRequestPolicy" +// The origin request policy does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeOriginRequestPolicyAlreadyExists "OriginRequestPolicyAlreadyExists" +// An origin request policy with this name already exists. You must provide +// a unique name. To modify an existing origin request policy, use UpdateOriginRequestPolicy. +// +// * ErrCodeTooManyHeadersInOriginRequestPolicy "TooManyHeadersInOriginRequestPolicy" +// The number of headers in the origin request policy exceeds the maximum. For +// more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyCookiesInOriginRequestPolicy "TooManyCookiesInOriginRequestPolicy" +// The number of cookies in the origin request policy exceeds the maximum. For +// more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// * ErrCodeTooManyQueryStringsInOriginRequestPolicy "TooManyQueryStringsInOriginRequestPolicy" +// The number of query strings in the origin request policy exceeds the maximum. +// For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// (formerly known as limits) in the Amazon CloudFront Developer Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateOriginRequestPolicy +func (c *CloudFront) UpdateOriginRequestPolicy(input *UpdateOriginRequestPolicyInput) (*UpdateOriginRequestPolicyOutput, error) { + req, out := c.UpdateOriginRequestPolicyRequest(input) + return out, req.Send() +} + +// UpdateOriginRequestPolicyWithContext is the same as UpdateOriginRequestPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateOriginRequestPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateOriginRequestPolicyWithContext(ctx aws.Context, input *UpdateOriginRequestPolicyInput, opts ...request.Option) (*UpdateOriginRequestPolicyOutput, error) { + req, out := c.UpdateOriginRequestPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePublicKey = "UpdatePublicKey2020_05_31" + +// UpdatePublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePublicKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePublicKey for more information on using the UpdatePublicKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePublicKeyRequest method. +// req, resp := client.UpdatePublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdatePublicKey +func (c *CloudFront) UpdatePublicKeyRequest(input *UpdatePublicKeyInput) (req *request.Request, output *UpdatePublicKeyOutput) { + op := &request.Operation{ + Name: opUpdatePublicKey, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/public-key/{Id}/config", + } + + if input == nil { + input = &UpdatePublicKeyInput{} + } + + output = &UpdatePublicKeyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdatePublicKey API operation for Amazon CloudFront. +// +// Update public key information. Note that the only value you can change is +// the comment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdatePublicKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeCannotChangeImmutablePublicKeyFields "CannotChangeImmutablePublicKeyFields" +// You can't change the value of a public key. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeNoSuchPublicKey "NoSuchPublicKey" +// The specified public key doesn't exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdatePublicKey +func (c *CloudFront) UpdatePublicKey(input *UpdatePublicKeyInput) (*UpdatePublicKeyOutput, error) { + req, out := c.UpdatePublicKeyRequest(input) + return out, req.Send() +} + +// UpdatePublicKeyWithContext is the same as UpdatePublicKey with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePublicKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdatePublicKeyWithContext(ctx aws.Context, input *UpdatePublicKeyInput, opts ...request.Option) (*UpdatePublicKeyOutput, error) { + req, out := c.UpdatePublicKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRealtimeLogConfig = "UpdateRealtimeLogConfig2020_05_31" + +// UpdateRealtimeLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRealtimeLogConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRealtimeLogConfig for more information on using the UpdateRealtimeLogConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRealtimeLogConfigRequest method. +// req, resp := client.UpdateRealtimeLogConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateRealtimeLogConfig +func (c *CloudFront) UpdateRealtimeLogConfigRequest(input *UpdateRealtimeLogConfigInput) (req *request.Request, output *UpdateRealtimeLogConfigOutput) { + op := &request.Operation{ + Name: opUpdateRealtimeLogConfig, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/realtime-log-config/", + } + + if input == nil { + input = &UpdateRealtimeLogConfigInput{} + } + + output = &UpdateRealtimeLogConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRealtimeLogConfig API operation for Amazon CloudFront. +// +// Updates a real-time log configuration. +// +// When you update a real-time log configuration, all the parameters are updated +// with the values provided in the request. You cannot update some parameters +// independent of others. To update a real-time log configuration: +// +// Call GetRealtimeLogConfig to get the current real-time log configuration. +// +// Locally modify the parameters in the real-time log configuration that you +// want to update. +// +// Call this API (UpdateRealtimeLogConfig) by providing the entire real-time +// log configuration, including the parameters that you modified and those that +// you didn’t. +// +// You cannot update a real-time log configuration’s Name or ARN. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateRealtimeLogConfig for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchRealtimeLogConfig "NoSuchRealtimeLogConfig" +// The real-time log configuration does not exist. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateRealtimeLogConfig +func (c *CloudFront) UpdateRealtimeLogConfig(input *UpdateRealtimeLogConfigInput) (*UpdateRealtimeLogConfigOutput, error) { + req, out := c.UpdateRealtimeLogConfigRequest(input) + return out, req.Send() +} + +// UpdateRealtimeLogConfigWithContext is the same as UpdateRealtimeLogConfig with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRealtimeLogConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateRealtimeLogConfigWithContext(ctx aws.Context, input *UpdateRealtimeLogConfigInput, opts ...request.Option) (*UpdateRealtimeLogConfigOutput, error) { + req, out := c.UpdateRealtimeLogConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateStreamingDistribution = "UpdateStreamingDistribution2020_05_31" + +// UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStreamingDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateStreamingDistribution for more information on using the UpdateStreamingDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateStreamingDistributionRequest method. +// req, resp := client.UpdateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateStreamingDistribution +func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { + op := &request.Operation{ + Name: opUpdateStreamingDistribution, + HTTPMethod: "PUT", + HTTPPath: "/2020-05-31/streaming-distribution/{Id}/config", + } + + if input == nil { + input = &UpdateStreamingDistributionInput{} + } + + output = &UpdateStreamingDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateStreamingDistribution API operation for Amazon CloudFront. +// +// Update a streaming distribution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudFront's +// API operation UpdateStreamingDistribution for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAccessDenied "AccessDenied" +// Access denied. +// +// * ErrCodeCNAMEAlreadyExists "CNAMEAlreadyExists" +// The CNAME specified is already defined for CloudFront. +// +// * ErrCodeIllegalUpdate "IllegalUpdate" +// The update contains modifications that are not allowed. +// +// * ErrCodeInvalidIfMatchVersion "InvalidIfMatchVersion" +// The If-Match version is missing or not valid. +// +// * ErrCodeMissingBody "MissingBody" +// This operation requires a body. Ensure that the body is present and the Content-Type +// header is set. +// +// * ErrCodeNoSuchStreamingDistribution "NoSuchStreamingDistribution" +// The specified streaming distribution does not exist. +// +// * ErrCodePreconditionFailed "PreconditionFailed" +// The precondition given in one or more of the request header fields evaluated +// to false. +// +// * ErrCodeTooManyStreamingDistributionCNAMEs "TooManyStreamingDistributionCNAMEs" +// Your request contains more CNAMEs than are allowed per distribution. +// +// * ErrCodeInvalidArgument "InvalidArgument" +// An argument is invalid. +// +// * ErrCodeInvalidOriginAccessIdentity "InvalidOriginAccessIdentity" +// The origin access identity is not valid or doesn't exist. +// +// * ErrCodeTooManyTrustedSigners "TooManyTrustedSigners" +// Your request contains more trusted signers than are allowed per distribution. +// +// * ErrCodeTrustedSignerDoesNotExist "TrustedSignerDoesNotExist" +// One or more of your trusted signers don't exist. +// +// * ErrCodeInconsistentQuantities "InconsistentQuantities" +// The value of Quantity and the size of Items don't match. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31/UpdateStreamingDistribution +func (c *CloudFront) UpdateStreamingDistribution(input *UpdateStreamingDistributionInput) (*UpdateStreamingDistributionOutput, error) { + req, out := c.UpdateStreamingDistributionRequest(input) + return out, req.Send() +} + +// UpdateStreamingDistributionWithContext is the same as UpdateStreamingDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateStreamingDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudFront) UpdateStreamingDistributionWithContext(ctx aws.Context, input *UpdateStreamingDistributionInput, opts ...request.Option) (*UpdateStreamingDistributionOutput, error) { + req, out := c.UpdateStreamingDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// A complex type that lists the AWS accounts, if any, that you included in +// the TrustedSigners complex type for this distribution. These are the accounts +// that you want to allow to create signed URLs for private content. +// +// The Signer complex type lists the AWS account number of the trusted signer +// or self if the signer is the AWS account that created the distribution. The +// Signer element also includes the IDs of any active CloudFront key pairs that +// are associated with the trusted signer's AWS account. If no KeyPairId element +// appears for a Signer, that signer can't create signed URLs. +// +// For more information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) +// in the Amazon CloudFront Developer Guide. +type ActiveTrustedSigners struct { + _ struct{} `type:"structure"` + + // Enabled is true if any of the AWS accounts listed in the TrustedSigners complex + // type for this distribution have active CloudFront key pairs. If not, Enabled + // is false. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` + + // A complex type that contains one Signer complex type for each trusted signer + // that is specified in the TrustedSigners complex type. + Items []*Signer `locationNameList:"Signer" type:"list"` + + // The number of trusted signers specified in the TrustedSigners complex type. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ActiveTrustedSigners) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveTrustedSigners) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *ActiveTrustedSigners) SetEnabled(v bool) *ActiveTrustedSigners { + s.Enabled = &v + return s +} + +// SetItems sets the Items field's value. +func (s *ActiveTrustedSigners) SetItems(v []*Signer) *ActiveTrustedSigners { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *ActiveTrustedSigners) SetQuantity(v int64) *ActiveTrustedSigners { + s.Quantity = &v + return s +} + +// AWS services in China customers must file for an Internet Content Provider +// (ICP) recordal if they want to serve content publicly on an alternate domain +// name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal +// provides the ICP recordal status for CNAMEs associated with distributions. +// The status is returned in the CloudFront response; you can't configure it +// yourself. +// +// For more information about ICP recordals, see Signup, Accounts, and Credentials +// (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) +// in Getting Started with AWS services in China. +type AliasICPRecordal struct { + _ struct{} `type:"structure"` + + // A domain name associated with a distribution. + CNAME *string `type:"string"` + + // The Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus + // is set to APPROVED for all CNAMEs (aliases) in regions outside of China. + // + // The status values returned are the following: + // + // * APPROVED indicates that the associated CNAME has a valid ICP recordal + // number. Multiple CNAMEs can be associated with a distribution, and CNAMEs + // can correspond to different ICP recordals. To be marked as APPROVED, that + // is, valid to use with China region, a CNAME must have one ICP recordal + // number associated with it. + // + // * SUSPENDED indicates that the associated CNAME does not have a valid + // ICP recordal number. + // + // * PENDING indicates that CloudFront can't determine the ICP recordal status + // of the CNAME associated with the distribution because there was an error + // in trying to determine the status. You can try again to see if the error + // is resolved in which case CloudFront returns an APPROVED or SUSPENDED + // status. + ICPRecordalStatus *string `type:"string" enum:"ICPRecordalStatus"` +} + +// String returns the string representation +func (s AliasICPRecordal) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AliasICPRecordal) GoString() string { + return s.String() +} + +// SetCNAME sets the CNAME field's value. func (s *AliasICPRecordal) SetCNAME(v string) *AliasICPRecordal { s.CNAME = &v return s } -// SetICPRecordalStatus sets the ICPRecordalStatus field's value. -func (s *AliasICPRecordal) SetICPRecordalStatus(v string) *AliasICPRecordal { - s.ICPRecordalStatus = &v +// SetICPRecordalStatus sets the ICPRecordalStatus field's value. +func (s *AliasICPRecordal) SetICPRecordalStatus(v string) *AliasICPRecordal { + s.ICPRecordalStatus = &v + return s +} + +// A complex type that contains information about CNAMEs (alternate domain names), +// if any, for this distribution. +type Aliases struct { + _ struct{} `type:"structure"` + + // A complex type that contains the CNAME aliases, if any, that you want to + // associate with this distribution. + Items []*string `locationNameList:"CNAME" type:"list"` + + // The number of CNAME aliases, if any, that you want to associate with this + // distribution. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Aliases) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Aliases) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Aliases) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Aliases"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *Aliases) SetItems(v []*string) *Aliases { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *Aliases) SetQuantity(v int64) *Aliases { + s.Quantity = &v + return s +} + +// A complex type that controls which HTTP methods CloudFront processes and +// forwards to your Amazon S3 bucket or your custom origin. There are three +// choices: +// +// * CloudFront forwards only GET and HEAD requests. +// +// * CloudFront forwards only GET, HEAD, and OPTIONS requests. +// +// * CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE +// requests. +// +// If you pick the third choice, you may need to restrict access to your Amazon +// S3 bucket or to your custom origin so users can't perform operations that +// you don't want them to. For example, you might not want users to have permissions +// to delete objects from your origin. +type AllowedMethods struct { + _ struct{} `type:"structure"` + + // A complex type that controls whether CloudFront caches the response to requests + // using the specified HTTP methods. There are two choices: + // + // * CloudFront caches responses to GET and HEAD requests. + // + // * CloudFront caches responses to GET, HEAD, and OPTIONS requests. + // + // If you pick the second choice for your Amazon S3 Origin, you may need to + // forward Access-Control-Request-Method, Access-Control-Request-Headers, and + // Origin headers for the responses to be cached correctly. + CachedMethods *CachedMethods `type:"structure"` + + // A complex type that contains the HTTP methods that you want CloudFront to + // process and forward to your origin. + // + // Items is a required field + Items []*string `locationNameList:"Method" type:"list" required:"true"` + + // The number of HTTP methods that you want CloudFront to forward to your origin. + // Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD, and OPTIONS + // requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests). + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s AllowedMethods) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllowedMethods) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllowedMethods) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllowedMethods"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.CachedMethods != nil { + if err := s.CachedMethods.Validate(); err != nil { + invalidParams.AddNested("CachedMethods", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCachedMethods sets the CachedMethods field's value. +func (s *AllowedMethods) SetCachedMethods(v *CachedMethods) *AllowedMethods { + s.CachedMethods = v + return s +} + +// SetItems sets the Items field's value. +func (s *AllowedMethods) SetItems(v []*string) *AllowedMethods { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *AllowedMethods) SetQuantity(v int64) *AllowedMethods { + s.Quantity = &v + return s +} + +// A complex type that describes how CloudFront processes requests. +// +// You must create at least as many cache behaviors (including the default cache +// behavior) as you have origins if you want CloudFront to serve objects from +// all of the origins. Each cache behavior specifies the one origin from which +// you want CloudFront to get objects. If you have two origins and only the +// default cache behavior, the default cache behavior will cause CloudFront +// to get objects from one of the origins, but the other origin is never used. +// +// For the current quota (formerly known as limit) on the number of cache behaviors +// that you can add to a distribution, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) +// in the Amazon CloudFront Developer Guide. +// +// If you don’t want to specify any cache behaviors, include only an empty +// CacheBehaviors element. Don’t include an empty CacheBehavior element because +// this is invalid. +// +// To delete all cache behaviors in an existing distribution, update the distribution +// configuration and include only an empty CacheBehaviors element. +// +// To add, change, or remove one or more cache behaviors, update the distribution +// configuration and specify all of the cache behaviors that you want to include +// in the updated distribution. +// +// For more information about cache behaviors, see Cache Behavior Settings (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) +// in the Amazon CloudFront Developer Guide. +type CacheBehavior struct { + _ struct{} `type:"structure"` + + // A complex type that controls which HTTP methods CloudFront processes and + // forwards to your Amazon S3 bucket or your custom origin. There are three + // choices: + // + // * CloudFront forwards only GET and HEAD requests. + // + // * CloudFront forwards only GET, HEAD, and OPTIONS requests. + // + // * CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE + // requests. + // + // If you pick the third choice, you may need to restrict access to your Amazon + // S3 bucket or to your custom origin so users can't perform operations that + // you don't want them to. For example, you might not want users to have permissions + // to delete objects from your origin. + AllowedMethods *AllowedMethods `type:"structure"` + + // The unique identifier of the cache policy that is attached to this cache + // behavior. For more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + CachePolicyId *string `type:"string"` + + // Whether you want CloudFront to automatically compress certain files for this + // cache behavior. If so, specify true; if not, specify false. For more information, + // see Serving Compressed Files (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) + // in the Amazon CloudFront Developer Guide. + Compress *bool `type:"boolean"` + + // This field is deprecated. We recommend that you use the DefaultTTL field + // in a cache policy instead of this field. For more information, see Creating + // cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // The default amount of time that you want objects to stay in CloudFront caches + // before CloudFront forwards another request to your origin to determine whether + // the object has been updated. The value that you specify applies only when + // your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // Deprecated: DefaultTTL has been deprecated + DefaultTTL *int64 `deprecated:"true" type:"long"` + + // The value of ID for the field-level encryption configuration that you want + // CloudFront to use for encrypting specific fields of data for this cache behavior. + FieldLevelEncryptionId *string `type:"string"` + + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. For more information, see Working + // with policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/working-with-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // If you want to include values in the cache key, use a cache policy. For more + // information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // If you want to send values to the origin but not include them in the cache + // key, use an origin request policy. For more information, see Creating origin + // request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // or Using the managed origin request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // A complex type that specifies how CloudFront handles query strings, cookies, + // and HTTP headers. + // + // Deprecated: ForwardedValues has been deprecated + ForwardedValues *ForwardedValues `deprecated:"true" type:"structure"` + + // A complex type that contains zero or more Lambda function associations for + // a cache behavior. + LambdaFunctionAssociations *LambdaFunctionAssociations `type:"structure"` + + // This field is deprecated. We recommend that you use the MaxTTL field in a + // cache policy instead of this field. For more information, see Creating cache + // policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // The maximum amount of time that you want objects to stay in CloudFront caches + // before CloudFront forwards another request to your origin to determine whether + // the object has been updated. The value that you specify applies only when + // your origin adds HTTP headers such as Cache-Control max-age, Cache-Control + // s-maxage, and Expires to objects. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // Deprecated: MaxTTL has been deprecated + MaxTTL *int64 `deprecated:"true" type:"long"` + + // This field is deprecated. We recommend that you use the MinTTL field in a + // cache policy instead of this field. For more information, see Creating cache + // policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // The minimum amount of time that you want objects to stay in CloudFront caches + // before CloudFront forwards another request to your origin to determine whether + // the object has been updated. For more information, see Managing How Long + // Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // You must specify 0 for MinTTL if you configure CloudFront to forward all + // headers to your origin (under Headers, if you specify 1 for Quantity and + // * for Name). + // + // Deprecated: MinTTL has been deprecated + MinTTL *int64 `deprecated:"true" type:"long"` + + // The unique identifier of the origin request policy that is attached to this + // cache behavior. For more information, see Creating origin request policies + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // or Using the managed origin request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) + // in the Amazon CloudFront Developer Guide. + OriginRequestPolicyId *string `type:"string"` + + // The pattern (for example, images/*.jpg) that specifies which requests to + // apply the behavior to. When CloudFront receives a viewer request, the requested + // path is compared with path patterns in the order in which cache behaviors + // are listed in the distribution. + // + // You can optionally include a slash (/) at the beginning of the path pattern. + // For example, /images/*.jpg. CloudFront behavior is the same with or without + // the leading /. + // + // The path pattern for the default cache behavior is * and cannot be changed. + // If the request for an object does not match the path pattern for any cache + // behaviors, CloudFront applies the behavior in the default cache behavior. + // + // For more information, see Path Pattern (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesPathPattern) + // in the Amazon CloudFront Developer Guide. + // + // PathPattern is a required field + PathPattern *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the real-time log configuration that is + // attached to this cache behavior. For more information, see Real-time logs + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html) + // in the Amazon CloudFront Developer Guide. + RealtimeLogConfigArn *string `type:"string"` + + // Indicates whether you want to distribute media files in the Microsoft Smooth + // Streaming format using the origin that is associated with this cache behavior. + // If so, specify true; if not, specify false. If you specify true for SmoothStreaming, + // you can still distribute other content using this cache behavior if the content + // matches the value of PathPattern. + SmoothStreaming *bool `type:"boolean"` + + // The value of ID for the origin that you want CloudFront to route requests + // to when they match this cache behavior. + // + // TargetOriginId is a required field + TargetOriginId *string `type:"string" required:"true"` + + // A complex type that specifies the AWS accounts, if any, that you want to + // allow to create signed URLs for private content. + // + // If you want to require signed URLs in requests for objects in the target + // origin that match the PathPattern for this cache behavior, specify true for + // Enabled, and specify the applicable values for Quantity and Items. For more + // information, see Serving Private Content with Signed URLs and Signed Cookies + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // in the Amazon CloudFront Developer Guide. + // + // If you don’t want to require signed URLs in requests for objects that match + // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. + // + // To add, change, or remove one or more trusted signers, change Enabled to + // true (if it’s currently false), change Quantity as applicable, and specify + // all of the trusted signers that you want to include in the updated distribution. + // + // TrustedSigners is a required field + TrustedSigners *TrustedSigners `type:"structure" required:"true"` + + // The protocol that viewers can use to access the files in the origin specified + // by TargetOriginId when a request matches the path pattern in PathPattern. + // You can specify the following options: + // + // * allow-all: Viewers can use HTTP or HTTPS. + // + // * redirect-to-https: If a viewer submits an HTTP request, CloudFront returns + // an HTTP status code of 301 (Moved Permanently) to the viewer along with + // the HTTPS URL. The viewer then resubmits the request using the new URL. + // + // * https-only: If a viewer sends an HTTP request, CloudFront returns an + // HTTP status code of 403 (Forbidden). + // + // For more information about requiring the HTTPS protocol, see Requiring HTTPS + // Between Viewers and CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html) + // in the Amazon CloudFront Developer Guide. + // + // The only way to guarantee that viewers retrieve an object that was fetched + // from the origin using HTTPS is never to use any other protocol to fetch the + // object. If you have recently changed from HTTP to HTTPS, we recommend that + // you clear your objects’ cache because cached objects are protocol agnostic. + // That means that an edge location will return an object from the cache regardless + // of whether the current request protocol matches the protocol used previously. + // For more information, see Managing Cache Expiration (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // ViewerProtocolPolicy is a required field + ViewerProtocolPolicy *string `type:"string" required:"true" enum:"ViewerProtocolPolicy"` +} + +// String returns the string representation +func (s CacheBehavior) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehavior) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CacheBehavior) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CacheBehavior"} + if s.PathPattern == nil { + invalidParams.Add(request.NewErrParamRequired("PathPattern")) + } + if s.TargetOriginId == nil { + invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) + } + if s.TrustedSigners == nil { + invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) + } + if s.ViewerProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ViewerProtocolPolicy")) + } + if s.AllowedMethods != nil { + if err := s.AllowedMethods.Validate(); err != nil { + invalidParams.AddNested("AllowedMethods", err.(request.ErrInvalidParams)) + } + } + if s.ForwardedValues != nil { + if err := s.ForwardedValues.Validate(); err != nil { + invalidParams.AddNested("ForwardedValues", err.(request.ErrInvalidParams)) + } + } + if s.LambdaFunctionAssociations != nil { + if err := s.LambdaFunctionAssociations.Validate(); err != nil { + invalidParams.AddNested("LambdaFunctionAssociations", err.(request.ErrInvalidParams)) + } + } + if s.TrustedSigners != nil { + if err := s.TrustedSigners.Validate(); err != nil { + invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CacheBehavior) SetAllowedMethods(v *AllowedMethods) *CacheBehavior { + s.AllowedMethods = v + return s +} + +// SetCachePolicyId sets the CachePolicyId field's value. +func (s *CacheBehavior) SetCachePolicyId(v string) *CacheBehavior { + s.CachePolicyId = &v + return s +} + +// SetCompress sets the Compress field's value. +func (s *CacheBehavior) SetCompress(v bool) *CacheBehavior { + s.Compress = &v + return s +} + +// SetDefaultTTL sets the DefaultTTL field's value. +func (s *CacheBehavior) SetDefaultTTL(v int64) *CacheBehavior { + s.DefaultTTL = &v + return s +} + +// SetFieldLevelEncryptionId sets the FieldLevelEncryptionId field's value. +func (s *CacheBehavior) SetFieldLevelEncryptionId(v string) *CacheBehavior { + s.FieldLevelEncryptionId = &v + return s +} + +// SetForwardedValues sets the ForwardedValues field's value. +func (s *CacheBehavior) SetForwardedValues(v *ForwardedValues) *CacheBehavior { + s.ForwardedValues = v + return s +} + +// SetLambdaFunctionAssociations sets the LambdaFunctionAssociations field's value. +func (s *CacheBehavior) SetLambdaFunctionAssociations(v *LambdaFunctionAssociations) *CacheBehavior { + s.LambdaFunctionAssociations = v + return s +} + +// SetMaxTTL sets the MaxTTL field's value. +func (s *CacheBehavior) SetMaxTTL(v int64) *CacheBehavior { + s.MaxTTL = &v + return s +} + +// SetMinTTL sets the MinTTL field's value. +func (s *CacheBehavior) SetMinTTL(v int64) *CacheBehavior { + s.MinTTL = &v + return s +} + +// SetOriginRequestPolicyId sets the OriginRequestPolicyId field's value. +func (s *CacheBehavior) SetOriginRequestPolicyId(v string) *CacheBehavior { + s.OriginRequestPolicyId = &v + return s +} + +// SetPathPattern sets the PathPattern field's value. +func (s *CacheBehavior) SetPathPattern(v string) *CacheBehavior { + s.PathPattern = &v + return s +} + +// SetRealtimeLogConfigArn sets the RealtimeLogConfigArn field's value. +func (s *CacheBehavior) SetRealtimeLogConfigArn(v string) *CacheBehavior { + s.RealtimeLogConfigArn = &v + return s +} + +// SetSmoothStreaming sets the SmoothStreaming field's value. +func (s *CacheBehavior) SetSmoothStreaming(v bool) *CacheBehavior { + s.SmoothStreaming = &v + return s +} + +// SetTargetOriginId sets the TargetOriginId field's value. +func (s *CacheBehavior) SetTargetOriginId(v string) *CacheBehavior { + s.TargetOriginId = &v + return s +} + +// SetTrustedSigners sets the TrustedSigners field's value. +func (s *CacheBehavior) SetTrustedSigners(v *TrustedSigners) *CacheBehavior { + s.TrustedSigners = v + return s +} + +// SetViewerProtocolPolicy sets the ViewerProtocolPolicy field's value. +func (s *CacheBehavior) SetViewerProtocolPolicy(v string) *CacheBehavior { + s.ViewerProtocolPolicy = &v + return s +} + +// A complex type that contains zero or more CacheBehavior elements. +type CacheBehaviors struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains cache behaviors for this distribution. + // If Quantity is 0, you can omit Items. + Items []*CacheBehavior `locationNameList:"CacheBehavior" type:"list"` + + // The number of cache behaviors for this distribution. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CacheBehaviors) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehaviors) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CacheBehaviors) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CacheBehaviors"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *CacheBehaviors) SetItems(v []*CacheBehavior) *CacheBehaviors { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *CacheBehaviors) SetQuantity(v int64) *CacheBehaviors { + s.Quantity = &v + return s +} + +// A cache policy. +// +// When it’s attached to a cache behavior, the cache policy determines the +// following: +// +// * The values that CloudFront includes in the cache key. These values can +// include HTTP headers, cookies, and URL query strings. CloudFront uses +// the cache key to find an object in its cache that it can return to the +// viewer. +// +// * The default, minimum, and maximum time to live (TTL) values that you +// want objects to stay in the CloudFront cache. +// +// The headers, cookies, and query strings that are included in the cache key +// are automatically included in requests that CloudFront sends to the origin. +// CloudFront sends a request when it can’t find a valid object in its cache +// that matches the request’s cache key. If you want to send values to the +// origin but not include them in the cache key, use OriginRequestPolicy. +type CachePolicy struct { + _ struct{} `type:"structure"` + + // The cache policy configuration. + // + // CachePolicyConfig is a required field + CachePolicyConfig *CachePolicyConfig `type:"structure" required:"true"` + + // The unique identifier for the cache policy. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The date and time when the cache policy was last modified. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` +} + +// String returns the string representation +func (s CachePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicy) GoString() string { + return s.String() +} + +// SetCachePolicyConfig sets the CachePolicyConfig field's value. +func (s *CachePolicy) SetCachePolicyConfig(v *CachePolicyConfig) *CachePolicy { + s.CachePolicyConfig = v + return s +} + +// SetId sets the Id field's value. +func (s *CachePolicy) SetId(v string) *CachePolicy { + s.Id = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *CachePolicy) SetLastModifiedTime(v time.Time) *CachePolicy { + s.LastModifiedTime = &v + return s +} + +// A cache policy configuration. +// +// This configuration determines the following: +// +// * The values that CloudFront includes in the cache key. These values can +// include HTTP headers, cookies, and URL query strings. CloudFront uses +// the cache key to find an object in its cache that it can return to the +// viewer. +// +// * The default, minimum, and maximum time to live (TTL) values that you +// want objects to stay in the CloudFront cache. +// +// The headers, cookies, and query strings that are included in the cache key +// are automatically included in requests that CloudFront sends to the origin. +// CloudFront sends a request when it can’t find a valid object in its cache +// that matches the request’s cache key. If you want to send values to the +// origin but not include them in the cache key, use OriginRequestPolicy. +type CachePolicyConfig struct { + _ struct{} `type:"structure"` + + // A comment to describe the cache policy. + Comment *string `type:"string"` + + // The default amount of time, in seconds, that you want objects to stay in + // the CloudFront cache before CloudFront sends another request to the origin + // to see if the object has been updated. CloudFront uses this value as the + // object’s time to live (TTL) only when the origin does not send Cache-Control + // or Expires headers with the object. For more information, see Managing How + // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // The default value for this field is 86400 seconds (one day). If the value + // of MinTTL is more than 86400 seconds, then the default value for this field + // is the same as the value of MinTTL. + DefaultTTL *int64 `type:"long"` + + // The maximum amount of time, in seconds, that objects stay in the CloudFront + // cache before CloudFront sends another request to the origin to see if the + // object has been updated. CloudFront uses this value only when the origin + // sends Cache-Control or Expires headers with the object. For more information, + // see Managing How Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // The default value for this field is 31536000 seconds (one year). If the value + // of MinTTL or DefaultTTL is more than 31536000 seconds, then the default value + // for this field is the same as the value of DefaultTTL. + MaxTTL *int64 `type:"long"` + + // The minimum amount of time, in seconds, that you want objects to stay in + // the CloudFront cache before CloudFront sends another request to the origin + // to see if the object has been updated. For more information, see Managing + // How Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // in the Amazon CloudFront Developer Guide. + // + // MinTTL is a required field + MinTTL *int64 `type:"long" required:"true"` + + // A unique name to identify the cache policy. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The HTTP headers, cookies, and URL query strings to include in the cache + // key. The values included in the cache key are automatically included in requests + // that CloudFront sends to the origin. + ParametersInCacheKeyAndForwardedToOrigin *ParametersInCacheKeyAndForwardedToOrigin `type:"structure"` +} + +// String returns the string representation +func (s CachePolicyConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicyConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachePolicyConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachePolicyConfig"} + if s.MinTTL == nil { + invalidParams.Add(request.NewErrParamRequired("MinTTL")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ParametersInCacheKeyAndForwardedToOrigin != nil { + if err := s.ParametersInCacheKeyAndForwardedToOrigin.Validate(); err != nil { + invalidParams.AddNested("ParametersInCacheKeyAndForwardedToOrigin", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComment sets the Comment field's value. +func (s *CachePolicyConfig) SetComment(v string) *CachePolicyConfig { + s.Comment = &v + return s +} + +// SetDefaultTTL sets the DefaultTTL field's value. +func (s *CachePolicyConfig) SetDefaultTTL(v int64) *CachePolicyConfig { + s.DefaultTTL = &v + return s +} + +// SetMaxTTL sets the MaxTTL field's value. +func (s *CachePolicyConfig) SetMaxTTL(v int64) *CachePolicyConfig { + s.MaxTTL = &v + return s +} + +// SetMinTTL sets the MinTTL field's value. +func (s *CachePolicyConfig) SetMinTTL(v int64) *CachePolicyConfig { + s.MinTTL = &v + return s +} + +// SetName sets the Name field's value. +func (s *CachePolicyConfig) SetName(v string) *CachePolicyConfig { + s.Name = &v + return s +} + +// SetParametersInCacheKeyAndForwardedToOrigin sets the ParametersInCacheKeyAndForwardedToOrigin field's value. +func (s *CachePolicyConfig) SetParametersInCacheKeyAndForwardedToOrigin(v *ParametersInCacheKeyAndForwardedToOrigin) *CachePolicyConfig { + s.ParametersInCacheKeyAndForwardedToOrigin = v + return s +} + +// An object that determines whether any cookies in viewer requests (and if +// so, which cookies) are included in the cache key and automatically included +// in requests that CloudFront sends to the origin. +type CachePolicyCookiesConfig struct { + _ struct{} `type:"structure"` + + // Determines whether any cookies in viewer requests are included in the cache + // key and automatically included in requests that CloudFront sends to the origin. + // Valid values are: + // + // * none – Cookies in viewer requests are not included in the cache key + // and are not automatically included in requests that CloudFront sends to + // the origin. Even when this field is set to none, any cookies that are + // listed in an OriginRequestPolicy are included in origin requests. + // + // * whitelist – The cookies in viewer requests that are listed in the + // CookieNames type are included in the cache key and automatically included + // in requests that CloudFront sends to the origin. + // + // * allExcept – All cookies in viewer requests that are not listed in + // the CookieNames type are included in the cache key and automatically included + // in requests that CloudFront sends to the origin. + // + // * all – All cookies in viewer requests are included in the cache key + // and are automatically included in requests that CloudFront sends to the + // origin. + // + // CookieBehavior is a required field + CookieBehavior *string `type:"string" required:"true" enum:"CachePolicyCookieBehavior"` + + // Contains a list of cookie names. + Cookies *CookieNames `type:"structure"` +} + +// String returns the string representation +func (s CachePolicyCookiesConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicyCookiesConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachePolicyCookiesConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachePolicyCookiesConfig"} + if s.CookieBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("CookieBehavior")) + } + if s.Cookies != nil { + if err := s.Cookies.Validate(); err != nil { + invalidParams.AddNested("Cookies", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCookieBehavior sets the CookieBehavior field's value. +func (s *CachePolicyCookiesConfig) SetCookieBehavior(v string) *CachePolicyCookiesConfig { + s.CookieBehavior = &v + return s +} + +// SetCookies sets the Cookies field's value. +func (s *CachePolicyCookiesConfig) SetCookies(v *CookieNames) *CachePolicyCookiesConfig { + s.Cookies = v + return s +} + +// An object that determines whether any HTTP headers (and if so, which headers) +// are included in the cache key and automatically included in requests that +// CloudFront sends to the origin. +type CachePolicyHeadersConfig struct { + _ struct{} `type:"structure"` + + // Determines whether any HTTP headers are included in the cache key and automatically + // included in requests that CloudFront sends to the origin. Valid values are: + // + // * none – HTTP headers are not included in the cache key and are not + // automatically included in requests that CloudFront sends to the origin. + // Even when this field is set to none, any headers that are listed in an + // OriginRequestPolicy are included in origin requests. + // + // * whitelist – The HTTP headers that are listed in the Headers type are + // included in the cache key and are automatically included in requests that + // CloudFront sends to the origin. + // + // HeaderBehavior is a required field + HeaderBehavior *string `type:"string" required:"true" enum:"CachePolicyHeaderBehavior"` + + // Contains a list of HTTP header names. + Headers *Headers `type:"structure"` +} + +// String returns the string representation +func (s CachePolicyHeadersConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicyHeadersConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachePolicyHeadersConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachePolicyHeadersConfig"} + if s.HeaderBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderBehavior")) + } + if s.Headers != nil { + if err := s.Headers.Validate(); err != nil { + invalidParams.AddNested("Headers", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHeaderBehavior sets the HeaderBehavior field's value. +func (s *CachePolicyHeadersConfig) SetHeaderBehavior(v string) *CachePolicyHeadersConfig { + s.HeaderBehavior = &v + return s +} + +// SetHeaders sets the Headers field's value. +func (s *CachePolicyHeadersConfig) SetHeaders(v *Headers) *CachePolicyHeadersConfig { + s.Headers = v + return s +} + +// A list of cache policies. +type CachePolicyList struct { + _ struct{} `type:"structure"` + + // Contains the cache policies in the list. + Items []*CachePolicySummary `locationNameList:"CachePolicySummary" type:"list"` + + // The maximum number of cache policies requested. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` + + // If there are more items in the list than are in this response, this element + // is present. It contains the value that you should use in the Marker field + // of a subsequent request to continue listing cache policies where you left + // off. + NextMarker *string `type:"string"` + + // The total number of cache policies returned in the response. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CachePolicyList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicyList) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *CachePolicyList) SetItems(v []*CachePolicySummary) *CachePolicyList { + s.Items = v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *CachePolicyList) SetMaxItems(v int64) *CachePolicyList { + s.MaxItems = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *CachePolicyList) SetNextMarker(v string) *CachePolicyList { + s.NextMarker = &v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *CachePolicyList) SetQuantity(v int64) *CachePolicyList { + s.Quantity = &v + return s +} + +// An object that determines whether any URL query strings in viewer requests +// (and if so, which query strings) are included in the cache key and automatically +// included in requests that CloudFront sends to the origin. +type CachePolicyQueryStringsConfig struct { + _ struct{} `type:"structure"` + + // Determines whether any URL query strings in viewer requests are included + // in the cache key and automatically included in requests that CloudFront sends + // to the origin. Valid values are: + // + // * none – Query strings in viewer requests are not included in the cache + // key and are not automatically included in requests that CloudFront sends + // to the origin. Even when this field is set to none, any query strings + // that are listed in an OriginRequestPolicy are included in origin requests. + // + // * whitelist – The query strings in viewer requests that are listed in + // the QueryStringNames type are included in the cache key and automatically + // included in requests that CloudFront sends to the origin. + // + // * allExcept – All query strings in viewer requests that are not listed + // in the QueryStringNames type are included in the cache key and automatically + // included in requests that CloudFront sends to the origin. + // + // * all – All query strings in viewer requests are included in the cache + // key and are automatically included in requests that CloudFront sends to + // the origin. + // + // QueryStringBehavior is a required field + QueryStringBehavior *string `type:"string" required:"true" enum:"CachePolicyQueryStringBehavior"` + + // Contains the specific query strings in viewer requests that either are or + // are not included in the cache key and automatically included in requests + // that CloudFront sends to the origin. The behavior depends on whether the + // QueryStringBehavior field in the CachePolicyQueryStringsConfig type is set + // to whitelist (the listed query strings are included) or allExcept (the listed + // query strings are not included, but all other query strings are). + QueryStrings *QueryStringNames `type:"structure"` +} + +// String returns the string representation +func (s CachePolicyQueryStringsConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicyQueryStringsConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachePolicyQueryStringsConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachePolicyQueryStringsConfig"} + if s.QueryStringBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("QueryStringBehavior")) + } + if s.QueryStrings != nil { + if err := s.QueryStrings.Validate(); err != nil { + invalidParams.AddNested("QueryStrings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryStringBehavior sets the QueryStringBehavior field's value. +func (s *CachePolicyQueryStringsConfig) SetQueryStringBehavior(v string) *CachePolicyQueryStringsConfig { + s.QueryStringBehavior = &v + return s +} + +// SetQueryStrings sets the QueryStrings field's value. +func (s *CachePolicyQueryStringsConfig) SetQueryStrings(v *QueryStringNames) *CachePolicyQueryStringsConfig { + s.QueryStrings = v + return s +} + +// Contains a cache policy. +type CachePolicySummary struct { + _ struct{} `type:"structure"` + + // The cache policy. + // + // CachePolicy is a required field + CachePolicy *CachePolicy `type:"structure" required:"true"` + + // The type of cache policy, either managed (created by AWS) or custom (created + // in this AWS account). + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"CachePolicyType"` +} + +// String returns the string representation +func (s CachePolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachePolicySummary) GoString() string { + return s.String() +} + +// SetCachePolicy sets the CachePolicy field's value. +func (s *CachePolicySummary) SetCachePolicy(v *CachePolicy) *CachePolicySummary { + s.CachePolicy = v + return s +} + +// SetType sets the Type field's value. +func (s *CachePolicySummary) SetType(v string) *CachePolicySummary { + s.Type = &v + return s +} + +// A complex type that controls whether CloudFront caches the response to requests +// using the specified HTTP methods. There are two choices: +// +// * CloudFront caches responses to GET and HEAD requests. +// +// * CloudFront caches responses to GET, HEAD, and OPTIONS requests. +// +// If you pick the second choice for your Amazon S3 Origin, you may need to +// forward Access-Control-Request-Method, Access-Control-Request-Headers, and +// Origin headers for the responses to be cached correctly. +type CachedMethods struct { + _ struct{} `type:"structure"` + + // A complex type that contains the HTTP methods that you want CloudFront to + // cache responses to. + // + // Items is a required field + Items []*string `locationNameList:"Method" type:"list" required:"true"` + + // The number of HTTP methods for which you want CloudFront to cache responses. + // Valid values are 2 (for caching responses to GET and HEAD requests) and 3 + // (for caching responses to GET, HEAD, and OPTIONS requests). + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CachedMethods) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CachedMethods) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CachedMethods) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CachedMethods"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *CachedMethods) SetItems(v []*string) *CachedMethods { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *CachedMethods) SetQuantity(v int64) *CachedMethods { + s.Quantity = &v + return s +} + +// A field-level encryption content type profile. +type ContentTypeProfile struct { + _ struct{} `type:"structure"` + + // The content type for a field-level encryption content type-profile mapping. + // + // ContentType is a required field + ContentType *string `type:"string" required:"true"` + + // The format for a field-level encryption content type-profile mapping. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"Format"` + + // The profile ID for a field-level encryption content type-profile mapping. + ProfileId *string `type:"string"` +} + +// String returns the string representation +func (s ContentTypeProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContentTypeProfile) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContentTypeProfile) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContentTypeProfile"} + if s.ContentType == nil { + invalidParams.Add(request.NewErrParamRequired("ContentType")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContentType sets the ContentType field's value. +func (s *ContentTypeProfile) SetContentType(v string) *ContentTypeProfile { + s.ContentType = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *ContentTypeProfile) SetFormat(v string) *ContentTypeProfile { + s.Format = &v + return s +} + +// SetProfileId sets the ProfileId field's value. +func (s *ContentTypeProfile) SetProfileId(v string) *ContentTypeProfile { + s.ProfileId = &v + return s +} + +// The configuration for a field-level encryption content type-profile mapping. +type ContentTypeProfileConfig struct { + _ struct{} `type:"structure"` + + // The configuration for a field-level encryption content type-profile. + ContentTypeProfiles *ContentTypeProfiles `type:"structure"` + + // The setting in a field-level encryption content type-profile mapping that + // specifies what to do when an unknown content type is provided for the profile. + // If true, content is forwarded without being encrypted when the content type + // is unknown. If false (the default), an error is returned when the content + // type is unknown. + // + // ForwardWhenContentTypeIsUnknown is a required field + ForwardWhenContentTypeIsUnknown *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ContentTypeProfileConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContentTypeProfileConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContentTypeProfileConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContentTypeProfileConfig"} + if s.ForwardWhenContentTypeIsUnknown == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardWhenContentTypeIsUnknown")) + } + if s.ContentTypeProfiles != nil { + if err := s.ContentTypeProfiles.Validate(); err != nil { + invalidParams.AddNested("ContentTypeProfiles", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContentTypeProfiles sets the ContentTypeProfiles field's value. +func (s *ContentTypeProfileConfig) SetContentTypeProfiles(v *ContentTypeProfiles) *ContentTypeProfileConfig { + s.ContentTypeProfiles = v + return s +} + +// SetForwardWhenContentTypeIsUnknown sets the ForwardWhenContentTypeIsUnknown field's value. +func (s *ContentTypeProfileConfig) SetForwardWhenContentTypeIsUnknown(v bool) *ContentTypeProfileConfig { + s.ForwardWhenContentTypeIsUnknown = &v + return s +} + +// Field-level encryption content type-profile. +type ContentTypeProfiles struct { + _ struct{} `type:"structure"` + + // Items in a field-level encryption content type-profile mapping. + Items []*ContentTypeProfile `locationNameList:"ContentTypeProfile" type:"list"` + + // The number of field-level encryption content type-profile mappings. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s ContentTypeProfiles) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContentTypeProfiles) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContentTypeProfiles) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContentTypeProfiles"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *ContentTypeProfiles) SetItems(v []*ContentTypeProfile) *ContentTypeProfiles { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *ContentTypeProfiles) SetQuantity(v int64) *ContentTypeProfiles { + s.Quantity = &v + return s +} + +// Contains a list of cookie names. +type CookieNames struct { + _ struct{} `type:"structure"` + + // A list of cookie names. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of cookie names in the Items list. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CookieNames) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CookieNames) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CookieNames) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CookieNames"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *CookieNames) SetItems(v []*string) *CookieNames { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *CookieNames) SetQuantity(v int64) *CookieNames { + s.Quantity = &v + return s +} + +// This field is deprecated. We recommend that you use a cache policy or an +// origin request policy instead of this field. +// +// If you want to include cookies in the cache key, use CookiesConfig in a cache +// policy. See CachePolicy. +// +// If you want to send cookies to the origin but not include them in the cache +// key, use CookiesConfig in an origin request policy. See OriginRequestPolicy. +// +// A complex type that specifies whether you want CloudFront to forward cookies +// to the origin and, if so, which ones. For more information about forwarding +// cookies to the origin, see Caching Content Based on Cookies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) +// in the Amazon CloudFront Developer Guide. +type CookiePreference struct { + _ struct{} `type:"structure"` + + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. + // + // If you want to include cookies in the cache key, use a cache policy. For + // more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // in the Amazon CloudFront Developer Guide. + // + // If you want to send cookies to the origin but not include them in the cache + // key, use origin request policy. For more information, see Creating origin + // request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // in the Amazon CloudFront Developer Guide. + // + // Specifies which cookies to forward to the origin for this cache behavior: + // all, none, or the list of cookies specified in the WhitelistedNames complex + // type. + // + // Amazon S3 doesn't process cookies. When the cache behavior is forwarding + // requests to an Amazon S3 origin, specify none for the Forward element. + // + // Forward is a required field + Forward *string `type:"string" required:"true" enum:"ItemSelection"` + + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. + // + // If you want to include cookies in the cache key, use a cache policy. For + // more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // in the Amazon CloudFront Developer Guide. + // + // If you want to send cookies to the origin but not include them in the cache + // key, use an origin request policy. For more information, see Creating origin + // request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // in the Amazon CloudFront Developer Guide. + // + // Required if you specify whitelist for the value of Forward. A complex type + // that specifies how many different cookies you want CloudFront to forward + // to the origin for this cache behavior and, if you want to forward selected + // cookies, the names of those cookies. + // + // If you specify all or none for the value of Forward, omit WhitelistedNames. + // If you change the value of Forward from whitelist to all or none and you + // don't delete the WhitelistedNames element and its child elements, CloudFront + // deletes them automatically. + // + // For the current limit on the number of cookie names that you can whitelist + // for each cache behavior, see CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/xrefaws_service_limits.html#limits_cloudfront) + // in the AWS General Reference. + WhitelistedNames *CookieNames `type:"structure"` +} + +// String returns the string representation +func (s CookiePreference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CookiePreference) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CookiePreference) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CookiePreference"} + if s.Forward == nil { + invalidParams.Add(request.NewErrParamRequired("Forward")) + } + if s.WhitelistedNames != nil { + if err := s.WhitelistedNames.Validate(); err != nil { + invalidParams.AddNested("WhitelistedNames", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForward sets the Forward field's value. +func (s *CookiePreference) SetForward(v string) *CookiePreference { + s.Forward = &v + return s +} + +// SetWhitelistedNames sets the WhitelistedNames field's value. +func (s *CookiePreference) SetWhitelistedNames(v *CookieNames) *CookiePreference { + s.WhitelistedNames = v + return s +} + +type CreateCachePolicyInput struct { + _ struct{} `locationName:"CreateCachePolicyRequest" type:"structure" payload:"CachePolicyConfig"` + + // A cache policy configuration. + // + // CachePolicyConfig is a required field + CachePolicyConfig *CachePolicyConfig `locationName:"CachePolicyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateCachePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCachePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCachePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCachePolicyInput"} + if s.CachePolicyConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CachePolicyConfig")) + } + if s.CachePolicyConfig != nil { + if err := s.CachePolicyConfig.Validate(); err != nil { + invalidParams.AddNested("CachePolicyConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCachePolicyConfig sets the CachePolicyConfig field's value. +func (s *CreateCachePolicyInput) SetCachePolicyConfig(v *CachePolicyConfig) *CreateCachePolicyInput { + s.CachePolicyConfig = v + return s +} + +type CreateCachePolicyOutput struct { + _ struct{} `type:"structure" payload:"CachePolicy"` + + // A cache policy. + CachePolicy *CachePolicy `type:"structure"` + + // The current version of the cache policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the cache policy just created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateCachePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCachePolicyOutput) GoString() string { + return s.String() +} + +// SetCachePolicy sets the CachePolicy field's value. +func (s *CreateCachePolicyOutput) SetCachePolicy(v *CachePolicy) *CreateCachePolicyOutput { + s.CachePolicy = v + return s +} + +// SetETag sets the ETag field's value. +func (s *CreateCachePolicyOutput) SetETag(v string) *CreateCachePolicyOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateCachePolicyOutput) SetLocation(v string) *CreateCachePolicyOutput { + s.Location = &v + return s +} + +// The request to create a new origin access identity (OAI). An origin access +// identity is a special CloudFront user that you can associate with Amazon +// S3 origins, so that you can secure all or just some of your Amazon S3 content. +// For more information, see Restricting Access to Amazon S3 Content by Using +// an Origin Access Identity (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) +// in the Amazon CloudFront Developer Guide. +type CreateCloudFrontOriginAccessIdentityInput struct { + _ struct{} `locationName:"CreateCloudFrontOriginAccessIdentityRequest" type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` + + // The current configuration information for the identity. + // + // CloudFrontOriginAccessIdentityConfig is a required field + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCloudFrontOriginAccessIdentityInput"} + if s.CloudFrontOriginAccessIdentityConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CloudFrontOriginAccessIdentityConfig")) + } + if s.CloudFrontOriginAccessIdentityConfig != nil { + if err := s.CloudFrontOriginAccessIdentityConfig.Validate(); err != nil { + invalidParams.AddNested("CloudFrontOriginAccessIdentityConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCloudFrontOriginAccessIdentityConfig sets the CloudFrontOriginAccessIdentityConfig field's value. +func (s *CreateCloudFrontOriginAccessIdentityInput) SetCloudFrontOriginAccessIdentityConfig(v *OriginAccessIdentityConfig) *CreateCloudFrontOriginAccessIdentityInput { + s.CloudFrontOriginAccessIdentityConfig = v + return s +} + +// The returned result of the corresponding request. +type CreateCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` + + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` + + // The current version of the origin access identity created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new origin access identity just created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// SetCloudFrontOriginAccessIdentity sets the CloudFrontOriginAccessIdentity field's value. +func (s *CreateCloudFrontOriginAccessIdentityOutput) SetCloudFrontOriginAccessIdentity(v *OriginAccessIdentity) *CreateCloudFrontOriginAccessIdentityOutput { + s.CloudFrontOriginAccessIdentity = v + return s +} + +// SetETag sets the ETag field's value. +func (s *CreateCloudFrontOriginAccessIdentityOutput) SetETag(v string) *CreateCloudFrontOriginAccessIdentityOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateCloudFrontOriginAccessIdentityOutput) SetLocation(v string) *CreateCloudFrontOriginAccessIdentityOutput { + s.Location = &v + return s +} + +// The request to create a new distribution. +type CreateDistributionInput struct { + _ struct{} `locationName:"CreateDistributionRequest" type:"structure" payload:"DistributionConfig"` + + // The distribution's configuration information. + // + // DistributionConfig is a required field + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDistributionInput"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) + } + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDistributionConfig sets the DistributionConfig field's value. +func (s *CreateDistributionInput) SetDistributionConfig(v *DistributionConfig) *CreateDistributionInput { + s.DistributionConfig = v + return s +} + +// The returned result of the corresponding request. +type CreateDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new distribution resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionOutput) GoString() string { + return s.String() +} + +// SetDistribution sets the Distribution field's value. +func (s *CreateDistributionOutput) SetDistribution(v *Distribution) *CreateDistributionOutput { + s.Distribution = v + return s +} + +// SetETag sets the ETag field's value. +func (s *CreateDistributionOutput) SetETag(v string) *CreateDistributionOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateDistributionOutput) SetLocation(v string) *CreateDistributionOutput { + s.Location = &v + return s +} + +// The request to create a new distribution with tags. +type CreateDistributionWithTagsInput struct { + _ struct{} `locationName:"CreateDistributionWithTagsRequest" type:"structure" payload:"DistributionConfigWithTags"` + + // The distribution's configuration information. + // + // DistributionConfigWithTags is a required field + DistributionConfigWithTags *DistributionConfigWithTags `locationName:"DistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateDistributionWithTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionWithTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDistributionWithTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDistributionWithTagsInput"} + if s.DistributionConfigWithTags == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfigWithTags")) + } + if s.DistributionConfigWithTags != nil { + if err := s.DistributionConfigWithTags.Validate(); err != nil { + invalidParams.AddNested("DistributionConfigWithTags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDistributionConfigWithTags sets the DistributionConfigWithTags field's value. +func (s *CreateDistributionWithTagsInput) SetDistributionConfigWithTags(v *DistributionConfigWithTags) *CreateDistributionWithTagsInput { + s.DistributionConfigWithTags = v + return s +} + +// The returned result of the corresponding request. +type CreateDistributionWithTagsOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new distribution resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateDistributionWithTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionWithTagsOutput) GoString() string { + return s.String() +} + +// SetDistribution sets the Distribution field's value. +func (s *CreateDistributionWithTagsOutput) SetDistribution(v *Distribution) *CreateDistributionWithTagsOutput { + s.Distribution = v + return s +} + +// SetETag sets the ETag field's value. +func (s *CreateDistributionWithTagsOutput) SetETag(v string) *CreateDistributionWithTagsOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateDistributionWithTagsOutput) SetLocation(v string) *CreateDistributionWithTagsOutput { + s.Location = &v + return s +} + +type CreateFieldLevelEncryptionConfigInput struct { + _ struct{} `locationName:"CreateFieldLevelEncryptionConfigRequest" type:"structure" payload:"FieldLevelEncryptionConfig"` + + // The request to create a new field-level encryption configuration. + // + // FieldLevelEncryptionConfig is a required field + FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateFieldLevelEncryptionConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFieldLevelEncryptionConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFieldLevelEncryptionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFieldLevelEncryptionConfigInput"} + if s.FieldLevelEncryptionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("FieldLevelEncryptionConfig")) + } + if s.FieldLevelEncryptionConfig != nil { + if err := s.FieldLevelEncryptionConfig.Validate(); err != nil { + invalidParams.AddNested("FieldLevelEncryptionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldLevelEncryptionConfig sets the FieldLevelEncryptionConfig field's value. +func (s *CreateFieldLevelEncryptionConfigInput) SetFieldLevelEncryptionConfig(v *FieldLevelEncryptionConfig) *CreateFieldLevelEncryptionConfigInput { + s.FieldLevelEncryptionConfig = v + return s +} + +type CreateFieldLevelEncryptionConfigOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryption"` + + // The current version of the field level encryption configuration. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // Returned when you create a new field-level encryption configuration. + FieldLevelEncryption *FieldLevelEncryption `type:"structure"` + + // The fully qualified URI of the new configuration resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateFieldLevelEncryptionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFieldLevelEncryptionConfigOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CreateFieldLevelEncryptionConfigOutput) SetETag(v string) *CreateFieldLevelEncryptionConfigOutput { + s.ETag = &v + return s +} + +// SetFieldLevelEncryption sets the FieldLevelEncryption field's value. +func (s *CreateFieldLevelEncryptionConfigOutput) SetFieldLevelEncryption(v *FieldLevelEncryption) *CreateFieldLevelEncryptionConfigOutput { + s.FieldLevelEncryption = v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateFieldLevelEncryptionConfigOutput) SetLocation(v string) *CreateFieldLevelEncryptionConfigOutput { + s.Location = &v + return s +} + +type CreateFieldLevelEncryptionProfileInput struct { + _ struct{} `locationName:"CreateFieldLevelEncryptionProfileRequest" type:"structure" payload:"FieldLevelEncryptionProfileConfig"` + + // The request to create a field-level encryption profile. + // + // FieldLevelEncryptionProfileConfig is a required field + FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateFieldLevelEncryptionProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFieldLevelEncryptionProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFieldLevelEncryptionProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFieldLevelEncryptionProfileInput"} + if s.FieldLevelEncryptionProfileConfig == nil { + invalidParams.Add(request.NewErrParamRequired("FieldLevelEncryptionProfileConfig")) + } + if s.FieldLevelEncryptionProfileConfig != nil { + if err := s.FieldLevelEncryptionProfileConfig.Validate(); err != nil { + invalidParams.AddNested("FieldLevelEncryptionProfileConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFieldLevelEncryptionProfileConfig sets the FieldLevelEncryptionProfileConfig field's value. +func (s *CreateFieldLevelEncryptionProfileInput) SetFieldLevelEncryptionProfileConfig(v *FieldLevelEncryptionProfileConfig) *CreateFieldLevelEncryptionProfileInput { + s.FieldLevelEncryptionProfileConfig = v + return s +} + +type CreateFieldLevelEncryptionProfileOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfile"` + + // The current version of the field level encryption profile. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // Returned when you create a new field-level encryption profile. + FieldLevelEncryptionProfile *FieldLevelEncryptionProfile `type:"structure"` + + // The fully qualified URI of the new profile resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateFieldLevelEncryptionProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFieldLevelEncryptionProfileOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CreateFieldLevelEncryptionProfileOutput) SetETag(v string) *CreateFieldLevelEncryptionProfileOutput { + s.ETag = &v + return s +} + +// SetFieldLevelEncryptionProfile sets the FieldLevelEncryptionProfile field's value. +func (s *CreateFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile(v *FieldLevelEncryptionProfile) *CreateFieldLevelEncryptionProfileOutput { + s.FieldLevelEncryptionProfile = v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateFieldLevelEncryptionProfileOutput) SetLocation(v string) *CreateFieldLevelEncryptionProfileOutput { + s.Location = &v + return s +} + +// The request to create an invalidation. +type CreateInvalidationInput struct { + _ struct{} `locationName:"CreateInvalidationRequest" type:"structure" payload:"InvalidationBatch"` + + // The distribution's id. + // + // DistributionId is a required field + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // The batch information for the invalidation. + // + // InvalidationBatch is a required field + InvalidationBatch *InvalidationBatch `locationName:"InvalidationBatch" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateInvalidationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInvalidationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInvalidationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInvalidationInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.DistributionId != nil && len(*s.DistributionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) + } + if s.InvalidationBatch == nil { + invalidParams.Add(request.NewErrParamRequired("InvalidationBatch")) + } + if s.InvalidationBatch != nil { + if err := s.InvalidationBatch.Validate(); err != nil { + invalidParams.AddNested("InvalidationBatch", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDistributionId sets the DistributionId field's value. +func (s *CreateInvalidationInput) SetDistributionId(v string) *CreateInvalidationInput { + s.DistributionId = &v + return s +} + +// SetInvalidationBatch sets the InvalidationBatch field's value. +func (s *CreateInvalidationInput) SetInvalidationBatch(v *InvalidationBatch) *CreateInvalidationInput { + s.InvalidationBatch = v + return s +} + +// The returned result of the corresponding request. +type CreateInvalidationOutput struct { + _ struct{} `type:"structure" payload:"Invalidation"` + + // The invalidation's information. + Invalidation *Invalidation `type:"structure"` + + // The fully qualified URI of the distribution and invalidation batch request, + // including the Invalidation ID. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateInvalidationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInvalidationOutput) GoString() string { + return s.String() +} + +// SetInvalidation sets the Invalidation field's value. +func (s *CreateInvalidationOutput) SetInvalidation(v *Invalidation) *CreateInvalidationOutput { + s.Invalidation = v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateInvalidationOutput) SetLocation(v string) *CreateInvalidationOutput { + s.Location = &v + return s +} + +type CreateMonitoringSubscriptionInput struct { + _ struct{} `locationName:"CreateMonitoringSubscriptionRequest" type:"structure" payload:"MonitoringSubscription"` + + // The ID of the distribution that you are enabling metrics for. + // + // DistributionId is a required field + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + + // A monitoring subscription. This structure contains information about whether + // additional CloudWatch metrics are enabled for a given CloudFront distribution. + // + // MonitoringSubscription is a required field + MonitoringSubscription *MonitoringSubscription `locationName:"MonitoringSubscription" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateMonitoringSubscriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMonitoringSubscriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMonitoringSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMonitoringSubscriptionInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.DistributionId != nil && len(*s.DistributionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) + } + if s.MonitoringSubscription == nil { + invalidParams.Add(request.NewErrParamRequired("MonitoringSubscription")) + } + if s.MonitoringSubscription != nil { + if err := s.MonitoringSubscription.Validate(); err != nil { + invalidParams.AddNested("MonitoringSubscription", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDistributionId sets the DistributionId field's value. +func (s *CreateMonitoringSubscriptionInput) SetDistributionId(v string) *CreateMonitoringSubscriptionInput { + s.DistributionId = &v + return s +} + +// SetMonitoringSubscription sets the MonitoringSubscription field's value. +func (s *CreateMonitoringSubscriptionInput) SetMonitoringSubscription(v *MonitoringSubscription) *CreateMonitoringSubscriptionInput { + s.MonitoringSubscription = v + return s +} + +type CreateMonitoringSubscriptionOutput struct { + _ struct{} `type:"structure" payload:"MonitoringSubscription"` + + // A monitoring subscription. This structure contains information about whether + // additional CloudWatch metrics are enabled for a given CloudFront distribution. + MonitoringSubscription *MonitoringSubscription `type:"structure"` +} + +// String returns the string representation +func (s CreateMonitoringSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMonitoringSubscriptionOutput) GoString() string { + return s.String() +} + +// SetMonitoringSubscription sets the MonitoringSubscription field's value. +func (s *CreateMonitoringSubscriptionOutput) SetMonitoringSubscription(v *MonitoringSubscription) *CreateMonitoringSubscriptionOutput { + s.MonitoringSubscription = v + return s +} + +type CreateOriginRequestPolicyInput struct { + _ struct{} `locationName:"CreateOriginRequestPolicyRequest" type:"structure" payload:"OriginRequestPolicyConfig"` + + // An origin request policy configuration. + // + // OriginRequestPolicyConfig is a required field + OriginRequestPolicyConfig *OriginRequestPolicyConfig `locationName:"OriginRequestPolicyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateOriginRequestPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOriginRequestPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOriginRequestPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOriginRequestPolicyInput"} + if s.OriginRequestPolicyConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OriginRequestPolicyConfig")) + } + if s.OriginRequestPolicyConfig != nil { + if err := s.OriginRequestPolicyConfig.Validate(); err != nil { + invalidParams.AddNested("OriginRequestPolicyConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOriginRequestPolicyConfig sets the OriginRequestPolicyConfig field's value. +func (s *CreateOriginRequestPolicyInput) SetOriginRequestPolicyConfig(v *OriginRequestPolicyConfig) *CreateOriginRequestPolicyInput { + s.OriginRequestPolicyConfig = v + return s +} + +type CreateOriginRequestPolicyOutput struct { + _ struct{} `type:"structure" payload:"OriginRequestPolicy"` + + // The current version of the origin request policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the origin request policy just created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // An origin request policy. + OriginRequestPolicy *OriginRequestPolicy `type:"structure"` +} + +// String returns the string representation +func (s CreateOriginRequestPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOriginRequestPolicyOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CreateOriginRequestPolicyOutput) SetETag(v string) *CreateOriginRequestPolicyOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateOriginRequestPolicyOutput) SetLocation(v string) *CreateOriginRequestPolicyOutput { + s.Location = &v + return s +} + +// SetOriginRequestPolicy sets the OriginRequestPolicy field's value. +func (s *CreateOriginRequestPolicyOutput) SetOriginRequestPolicy(v *OriginRequestPolicy) *CreateOriginRequestPolicyOutput { + s.OriginRequestPolicy = v + return s +} + +type CreatePublicKeyInput struct { + _ struct{} `locationName:"CreatePublicKeyRequest" type:"structure" payload:"PublicKeyConfig"` + + // The request to add a public key to CloudFront. + // + // PublicKeyConfig is a required field + PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreatePublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePublicKeyInput"} + if s.PublicKeyConfig == nil { + invalidParams.Add(request.NewErrParamRequired("PublicKeyConfig")) + } + if s.PublicKeyConfig != nil { + if err := s.PublicKeyConfig.Validate(); err != nil { + invalidParams.AddNested("PublicKeyConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPublicKeyConfig sets the PublicKeyConfig field's value. +func (s *CreatePublicKeyInput) SetPublicKeyConfig(v *PublicKeyConfig) *CreatePublicKeyInput { + s.PublicKeyConfig = v + return s +} + +type CreatePublicKeyOutput struct { + _ struct{} `type:"structure" payload:"PublicKey"` + + // The current version of the public key. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new public key resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // Returned when you add a public key. + PublicKey *PublicKey `type:"structure"` +} + +// String returns the string representation +func (s CreatePublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePublicKeyOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CreatePublicKeyOutput) SetETag(v string) *CreatePublicKeyOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreatePublicKeyOutput) SetLocation(v string) *CreatePublicKeyOutput { + s.Location = &v + return s +} + +// SetPublicKey sets the PublicKey field's value. +func (s *CreatePublicKeyOutput) SetPublicKey(v *PublicKey) *CreatePublicKeyOutput { + s.PublicKey = v + return s +} + +type CreateRealtimeLogConfigInput struct { + _ struct{} `locationName:"CreateRealtimeLogConfigRequest" type:"structure" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` + + // Contains information about the Amazon Kinesis data stream where you are sending + // real-time log data. + // + // EndPoints is a required field + EndPoints []*EndPoint `type:"list" required:"true"` + + // A list of fields to include in each real-time log record. + // + // For more information about fields, see Real-time log configuration fields + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html#understand-real-time-log-config-fields) + // in the Amazon CloudFront Developer Guide. + // + // Fields is a required field + Fields []*string `locationNameList:"Field" type:"list" required:"true"` + + // A unique name to identify this real-time log configuration. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The sampling rate for this real-time log configuration. The sampling rate + // determines the percentage of viewer requests that are represented in the + // real-time log data. You must provide an integer between 1 and 100, inclusive. + // + // SamplingRate is a required field + SamplingRate *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateRealtimeLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRealtimeLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRealtimeLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRealtimeLogConfigInput"} + if s.EndPoints == nil { + invalidParams.Add(request.NewErrParamRequired("EndPoints")) + } + if s.Fields == nil { + invalidParams.Add(request.NewErrParamRequired("Fields")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.SamplingRate == nil { + invalidParams.Add(request.NewErrParamRequired("SamplingRate")) + } + if s.EndPoints != nil { + for i, v := range s.EndPoints { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EndPoints", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndPoints sets the EndPoints field's value. +func (s *CreateRealtimeLogConfigInput) SetEndPoints(v []*EndPoint) *CreateRealtimeLogConfigInput { + s.EndPoints = v + return s +} + +// SetFields sets the Fields field's value. +func (s *CreateRealtimeLogConfigInput) SetFields(v []*string) *CreateRealtimeLogConfigInput { + s.Fields = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateRealtimeLogConfigInput) SetName(v string) *CreateRealtimeLogConfigInput { + s.Name = &v + return s +} + +// SetSamplingRate sets the SamplingRate field's value. +func (s *CreateRealtimeLogConfigInput) SetSamplingRate(v int64) *CreateRealtimeLogConfigInput { + s.SamplingRate = &v + return s +} + +type CreateRealtimeLogConfigOutput struct { + _ struct{} `type:"structure"` + + // A real-time log configuration. + RealtimeLogConfig *RealtimeLogConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateRealtimeLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRealtimeLogConfigOutput) GoString() string { + return s.String() +} + +// SetRealtimeLogConfig sets the RealtimeLogConfig field's value. +func (s *CreateRealtimeLogConfigOutput) SetRealtimeLogConfig(v *RealtimeLogConfig) *CreateRealtimeLogConfigOutput { + s.RealtimeLogConfig = v + return s +} + +// The request to create a new streaming distribution. +type CreateStreamingDistributionInput struct { + _ struct{} `locationName:"CreateStreamingDistributionRequest" type:"structure" payload:"StreamingDistributionConfig"` + + // The streaming distribution's configuration information. + // + // StreamingDistributionConfig is a required field + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateStreamingDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionInput"} + if s.StreamingDistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) + } + if s.StreamingDistributionConfig != nil { + if err := s.StreamingDistributionConfig.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamingDistributionConfig sets the StreamingDistributionConfig field's value. +func (s *CreateStreamingDistributionInput) SetStreamingDistributionConfig(v *StreamingDistributionConfig) *CreateStreamingDistributionInput { + s.StreamingDistributionConfig = v + return s +} + +// The returned result of the corresponding request. +type CreateStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the streaming distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new streaming distribution resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamingDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CreateStreamingDistributionOutput) SetETag(v string) *CreateStreamingDistributionOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateStreamingDistributionOutput) SetLocation(v string) *CreateStreamingDistributionOutput { + s.Location = &v + return s +} + +// SetStreamingDistribution sets the StreamingDistribution field's value. +func (s *CreateStreamingDistributionOutput) SetStreamingDistribution(v *StreamingDistribution) *CreateStreamingDistributionOutput { + s.StreamingDistribution = v + return s +} + +// The request to create a new streaming distribution with tags. +type CreateStreamingDistributionWithTagsInput struct { + _ struct{} `locationName:"CreateStreamingDistributionWithTagsRequest" type:"structure" payload:"StreamingDistributionConfigWithTags"` + + // The streaming distribution's configuration information. + // + // StreamingDistributionConfigWithTags is a required field + StreamingDistributionConfigWithTags *StreamingDistributionConfigWithTags `locationName:"StreamingDistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s CreateStreamingDistributionWithTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionWithTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamingDistributionWithTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionWithTagsInput"} + if s.StreamingDistributionConfigWithTags == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfigWithTags")) + } + if s.StreamingDistributionConfigWithTags != nil { + if err := s.StreamingDistributionConfigWithTags.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfigWithTags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStreamingDistributionConfigWithTags sets the StreamingDistributionConfigWithTags field's value. +func (s *CreateStreamingDistributionWithTagsInput) SetStreamingDistributionConfigWithTags(v *StreamingDistributionConfigWithTags) *CreateStreamingDistributionWithTagsInput { + s.StreamingDistributionConfigWithTags = v + return s +} + +// The returned result of the corresponding request. +type CreateStreamingDistributionWithTagsOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new streaming distribution resource just created. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamingDistributionWithTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionWithTagsOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CreateStreamingDistributionWithTagsOutput) SetETag(v string) *CreateStreamingDistributionWithTagsOutput { + s.ETag = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateStreamingDistributionWithTagsOutput) SetLocation(v string) *CreateStreamingDistributionWithTagsOutput { + s.Location = &v + return s +} + +// SetStreamingDistribution sets the StreamingDistribution field's value. +func (s *CreateStreamingDistributionWithTagsOutput) SetStreamingDistribution(v *StreamingDistribution) *CreateStreamingDistributionWithTagsOutput { + s.StreamingDistribution = v + return s +} + +// A complex type that controls: +// +// * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range +// with custom error messages before returning the response to the viewer. +// +// * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. +// +// For more information about custom error pages, see Customizing Error Responses +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) +// in the Amazon CloudFront Developer Guide. +type CustomErrorResponse struct { + _ struct{} `type:"structure"` + + // The minimum amount of time, in seconds, that you want CloudFront to cache + // the HTTP status code specified in ErrorCode. When this time period has elapsed, + // CloudFront queries your origin to see whether the problem that caused the + // error has been resolved and the requested object is now available. + // + // For more information, see Customizing Error Responses (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) + // in the Amazon CloudFront Developer Guide. + ErrorCachingMinTTL *int64 `type:"long"` + + // The HTTP status code for which you want to specify a custom error page and/or + // a caching duration. + // + // ErrorCode is a required field + ErrorCode *int64 `type:"integer" required:"true"` + + // The HTTP status code that you want CloudFront to return to the viewer along + // with the custom error page. There are a variety of reasons that you might + // want CloudFront to return a status code different from the status code that + // your origin returned to CloudFront, for example: + // + // * Some Internet devices (some firewalls and corporate proxies, for example) + // intercept HTTP 4xx and 5xx and prevent the response from being returned + // to the viewer. If you substitute 200, the response typically won't be + // intercepted. + // + // * If you don't care about distinguishing among different client errors + // or server errors, you can specify 400 or 500 as the ResponseCode for all + // 4xx or 5xx errors. + // + // * You might want to return a 200 status code (OK) and static website so + // your customers don't know that your website is down. + // + // If you specify a value for ResponseCode, you must also specify a value for + // ResponsePagePath. + ResponseCode *string `type:"string"` + + // The path to the custom error page that you want CloudFront to return to a + // viewer when your origin returns the HTTP status code specified by ErrorCode, + // for example, /4xx-errors/403-forbidden.html. If you want to store your objects + // and your custom error pages in different locations, your distribution must + // include a cache behavior for which the following is true: + // + // * The value of PathPattern matches the path to your custom error messages. + // For example, suppose you saved custom error pages for 4xx errors in an + // Amazon S3 bucket in a directory named /4xx-errors. Your distribution must + // include a cache behavior for which the path pattern routes requests for + // your custom error pages to that location, for example, /4xx-errors/*. + // + // * The value of TargetOriginId specifies the value of the ID element for + // the origin that contains your custom error pages. + // + // If you specify a value for ResponsePagePath, you must also specify a value + // for ResponseCode. + // + // We recommend that you store custom error pages in an Amazon S3 bucket. If + // you store custom error pages on an HTTP server and the server starts to return + // 5xx errors, CloudFront can't get the files that you want to return to viewers + // because the origin server is unavailable. + ResponsePagePath *string `type:"string"` +} + +// String returns the string representation +func (s CustomErrorResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomErrorResponse) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomErrorResponse) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponse"} + if s.ErrorCode == nil { + invalidParams.Add(request.NewErrParamRequired("ErrorCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorCachingMinTTL sets the ErrorCachingMinTTL field's value. +func (s *CustomErrorResponse) SetErrorCachingMinTTL(v int64) *CustomErrorResponse { + s.ErrorCachingMinTTL = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *CustomErrorResponse) SetErrorCode(v int64) *CustomErrorResponse { + s.ErrorCode = &v + return s +} + +// SetResponseCode sets the ResponseCode field's value. +func (s *CustomErrorResponse) SetResponseCode(v string) *CustomErrorResponse { + s.ResponseCode = &v + return s +} + +// SetResponsePagePath sets the ResponsePagePath field's value. +func (s *CustomErrorResponse) SetResponsePagePath(v string) *CustomErrorResponse { + s.ResponsePagePath = &v + return s +} + +// A complex type that controls: +// +// * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range +// with custom error messages before returning the response to the viewer. +// +// * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. +// +// For more information about custom error pages, see Customizing Error Responses +// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) +// in the Amazon CloudFront Developer Guide. +type CustomErrorResponses struct { + _ struct{} `type:"structure"` + + // A complex type that contains a CustomErrorResponse element for each HTTP + // status code for which you want to specify a custom error page and/or a caching + // duration. + Items []*CustomErrorResponse `locationNameList:"CustomErrorResponse" type:"list"` + + // The number of HTTP status codes for which you want to specify a custom error + // page and/or a caching duration. If Quantity is 0, you can omit Items. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s CustomErrorResponses) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomErrorResponses) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomErrorResponses) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponses"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *CustomErrorResponses) SetItems(v []*CustomErrorResponse) *CustomErrorResponses { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *CustomErrorResponses) SetQuantity(v int64) *CustomErrorResponses { + s.Quantity = &v return s } -// A complex type that contains information about CNAMEs (alternate domain names), -// if any, for this distribution. -type Aliases struct { +// A complex type that contains the list of Custom Headers for each origin. +type CustomHeaders struct { _ struct{} `type:"structure"` - // A complex type that contains the CNAME aliases, if any, that you want to - // associate with this distribution. - Items []*string `locationNameList:"CNAME" type:"list"` + // Optional: A list that contains one OriginCustomHeader element for each custom + // header that you want CloudFront to forward to the origin. If Quantity is + // 0, omit Items. + Items []*OriginCustomHeader `locationNameList:"OriginCustomHeader" type:"list"` - // The number of CNAME aliases, if any, that you want to associate with this - // distribution. + // The number of custom headers, if any, for this distribution. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s Aliases) String() string { +func (s CustomHeaders) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Aliases) GoString() string { +func (s CustomHeaders) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Aliases) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Aliases"} +func (s *CustomHeaders) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomHeaders"} if s.Quantity == nil { invalidParams.Add(request.NewErrParamRequired("Quantity")) } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5040,83 +10218,100 @@ func (s *Aliases) Validate() error { } // SetItems sets the Items field's value. -func (s *Aliases) SetItems(v []*string) *Aliases { +func (s *CustomHeaders) SetItems(v []*OriginCustomHeader) *CustomHeaders { s.Items = v return s } // SetQuantity sets the Quantity field's value. -func (s *Aliases) SetQuantity(v int64) *Aliases { +func (s *CustomHeaders) SetQuantity(v int64) *CustomHeaders { s.Quantity = &v return s } -// A complex type that controls which HTTP methods CloudFront processes and -// forwards to your Amazon S3 bucket or your custom origin. There are three -// choices: -// -// * CloudFront forwards only GET and HEAD requests. -// -// * CloudFront forwards only GET, HEAD, and OPTIONS requests. -// -// * CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE -// requests. -// -// If you pick the third choice, you may need to restrict access to your Amazon -// S3 bucket or to your custom origin so users can't perform operations that -// you don't want them to. For example, you might not want users to have permissions -// to delete objects from your origin. -type AllowedMethods struct { +// A custom origin. A custom origin is any origin that is not an Amazon S3 bucket, +// with one exception. An Amazon S3 bucket that is configured with static website +// hosting (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) +// is a custom origin. +type CustomOriginConfig struct { _ struct{} `type:"structure"` - // A complex type that controls whether CloudFront caches the response to requests - // using the specified HTTP methods. There are two choices: + // The HTTP port that CloudFront uses to connect to the origin. Specify the + // HTTP port that the origin listens on. // - // * CloudFront caches responses to GET and HEAD requests. + // HTTPPort is a required field + HTTPPort *int64 `type:"integer" required:"true"` + + // The HTTPS port that CloudFront uses to connect to the origin. Specify the + // HTTPS port that the origin listens on. // - // * CloudFront caches responses to GET, HEAD, and OPTIONS requests. + // HTTPSPort is a required field + HTTPSPort *int64 `type:"integer" required:"true"` + + // Specifies how long, in seconds, CloudFront persists its connection to the + // origin. The minimum timeout is 1 second, the maximum is 60 seconds, and the + // default (if you don’t specify otherwise) is 5 seconds. // - // If you pick the second choice for your Amazon S3 Origin, you may need to - // forward Access-Control-Request-Method, Access-Control-Request-Headers, and - // Origin headers for the responses to be cached correctly. - CachedMethods *CachedMethods `type:"structure"` + // For more information, see Origin Keep-alive Timeout (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginKeepaliveTimeout) + // in the Amazon CloudFront Developer Guide. + OriginKeepaliveTimeout *int64 `type:"integer"` - // A complex type that contains the HTTP methods that you want CloudFront to - // process and forward to your origin. + // Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to + // the origin. Valid values are: // - // Items is a required field - Items []*string `locationNameList:"Method" type:"list" required:"true"` + // * http-only – CloudFront always uses HTTP to connect to the origin. + // + // * match-viewer – CloudFront connects to the origin using the same protocol + // that the viewer used to connect to CloudFront. + // + // * https-only – CloudFront always uses HTTPS to connect to the origin. + // + // OriginProtocolPolicy is a required field + OriginProtocolPolicy *string `type:"string" required:"true" enum:"OriginProtocolPolicy"` - // The number of HTTP methods that you want CloudFront to forward to your origin. - // Valid values are 2 (for GET and HEAD requests), 3 (for GET, HEAD, and OPTIONS - // requests) and 7 (for GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE requests). + // Specifies how long, in seconds, CloudFront waits for a response from the + // origin. This is also known as the origin response timeout. The minimum timeout + // is 1 second, the maximum is 60 seconds, and the default (if you don’t specify + // otherwise) is 30 seconds. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // For more information, see Origin Response Timeout (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout) + // in the Amazon CloudFront Developer Guide. + OriginReadTimeout *int64 `type:"integer"` + + // Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting + // to your origin over HTTPS. Valid values include SSLv3, TLSv1, TLSv1.1, and + // TLSv1.2. + // + // For more information, see Minimum Origin SSL Protocol (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginSSLProtocols) + // in the Amazon CloudFront Developer Guide. + OriginSslProtocols *OriginSslProtocols `type:"structure"` } // String returns the string representation -func (s AllowedMethods) String() string { +func (s CustomOriginConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AllowedMethods) GoString() string { +func (s CustomOriginConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AllowedMethods) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AllowedMethods"} - if s.Items == nil { - invalidParams.Add(request.NewErrParamRequired("Items")) +func (s *CustomOriginConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomOriginConfig"} + if s.HTTPPort == nil { + invalidParams.Add(request.NewErrParamRequired("HTTPPort")) } - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) + if s.HTTPSPort == nil { + invalidParams.Add(request.NewErrParamRequired("HTTPSPort")) } - if s.CachedMethods != nil { - if err := s.CachedMethods.Validate(); err != nil { - invalidParams.AddNested("CachedMethods", err.(request.ErrInvalidParams)) + if s.OriginProtocolPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("OriginProtocolPolicy")) + } + if s.OriginSslProtocols != nil { + if err := s.OriginSslProtocols.Validate(); err != nil { + invalidParams.AddNested("OriginSslProtocols", err.(request.ErrInvalidParams)) } } @@ -5126,51 +10321,47 @@ func (s *AllowedMethods) Validate() error { return nil } -// SetCachedMethods sets the CachedMethods field's value. -func (s *AllowedMethods) SetCachedMethods(v *CachedMethods) *AllowedMethods { - s.CachedMethods = v +// SetHTTPPort sets the HTTPPort field's value. +func (s *CustomOriginConfig) SetHTTPPort(v int64) *CustomOriginConfig { + s.HTTPPort = &v return s } -// SetItems sets the Items field's value. -func (s *AllowedMethods) SetItems(v []*string) *AllowedMethods { - s.Items = v +// SetHTTPSPort sets the HTTPSPort field's value. +func (s *CustomOriginConfig) SetHTTPSPort(v int64) *CustomOriginConfig { + s.HTTPSPort = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *AllowedMethods) SetQuantity(v int64) *AllowedMethods { - s.Quantity = &v +// SetOriginKeepaliveTimeout sets the OriginKeepaliveTimeout field's value. +func (s *CustomOriginConfig) SetOriginKeepaliveTimeout(v int64) *CustomOriginConfig { + s.OriginKeepaliveTimeout = &v return s } -// A complex type that describes how CloudFront processes requests. -// -// You must create at least as many cache behaviors (including the default cache -// behavior) as you have origins if you want CloudFront to distribute objects -// from all of the origins. Each cache behavior specifies the one origin from -// which you want CloudFront to get objects. If you have two origins and only -// the default cache behavior, the default cache behavior will cause CloudFront -// to get objects from one of the origins, but the other origin is never used. -// -// For the current limit on the number of cache behaviors that you can add to -// a distribution, see Amazon CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) -// in the AWS General Reference. -// -// If you don't want to specify any cache behaviors, include only an empty CacheBehaviors -// element. Don't include an empty CacheBehavior element, or CloudFront returns -// a MalformedXML error. -// -// To delete all cache behaviors in an existing distribution, update the distribution -// configuration and include only an empty CacheBehaviors element. -// -// To add, change, or remove one or more cache behaviors, update the distribution -// configuration and specify all of the cache behaviors that you want to include -// in the updated distribution. -// -// For more information about cache behaviors, see Cache Behaviors (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) -// in the Amazon CloudFront Developer Guide. -type CacheBehavior struct { +// SetOriginProtocolPolicy sets the OriginProtocolPolicy field's value. +func (s *CustomOriginConfig) SetOriginProtocolPolicy(v string) *CustomOriginConfig { + s.OriginProtocolPolicy = &v + return s +} + +// SetOriginReadTimeout sets the OriginReadTimeout field's value. +func (s *CustomOriginConfig) SetOriginReadTimeout(v int64) *CustomOriginConfig { + s.OriginReadTimeout = &v + return s +} + +// SetOriginSslProtocols sets the OriginSslProtocols field's value. +func (s *CustomOriginConfig) SetOriginSslProtocols(v *OriginSslProtocols) *CustomOriginConfig { + s.OriginSslProtocols = v + return s +} + +// A complex type that describes the default cache behavior if you don’t specify +// a CacheBehavior element or if request URLs don’t match any of the values +// of PathPattern in CacheBehavior elements. You must create exactly one default +// cache behavior. +type DefaultCacheBehavior struct { _ struct{} `type:"structure"` // A complex type that controls which HTTP methods CloudFront processes and @@ -5190,12 +10381,24 @@ type CacheBehavior struct { // to delete objects from your origin. AllowedMethods *AllowedMethods `type:"structure"` + // The unique identifier of the cache policy that is attached to the default + // cache behavior. For more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + CachePolicyId *string `type:"string"` + // Whether you want CloudFront to automatically compress certain files for this // cache behavior. If so, specify true; if not, specify false. For more information, // see Serving Compressed Files (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) // in the Amazon CloudFront Developer Guide. Compress *bool `type:"boolean"` + // This field is deprecated. We recommend that you use the DefaultTTL field + // in a cache policy instead of this field. For more information, see Creating + // cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // // The default amount of time that you want objects to stay in CloudFront caches // before CloudFront forwards another request to your origin to determine whether // the object has been updated. The value that you specify applies only when @@ -5203,23 +10406,47 @@ type CacheBehavior struct { // s-maxage, and Expires to objects. For more information, see Managing How // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. - DefaultTTL *int64 `type:"long"` + // + // Deprecated: DefaultTTL has been deprecated + DefaultTTL *int64 `deprecated:"true" type:"long"` // The value of ID for the field-level encryption configuration that you want - // CloudFront to use for encrypting specific fields of data for a cache behavior - // or for the default cache behavior in your distribution. + // CloudFront to use for encrypting specific fields of data for the default + // cache behavior. FieldLevelEncryptionId *string `type:"string"` + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. For more information, see Working + // with policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/working-with-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // If you want to include values in the cache key, use a cache policy. For more + // information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // + // If you want to send values to the origin but not include them in the cache + // key, use an origin request policy. For more information, see Creating origin + // request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // or Using the managed origin request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) + // in the Amazon CloudFront Developer Guide. + // // A complex type that specifies how CloudFront handles query strings, cookies, // and HTTP headers. // - // ForwardedValues is a required field - ForwardedValues *ForwardedValues `type:"structure" required:"true"` + // Deprecated: ForwardedValues has been deprecated + ForwardedValues *ForwardedValues `deprecated:"true" type:"structure"` // A complex type that contains zero or more Lambda function associations for // a cache behavior. LambdaFunctionAssociations *LambdaFunctionAssociations `type:"structure"` + // This field is deprecated. We recommend that you use the MaxTTL field in a + // cache policy instead of this field. For more information, see Creating cache + // policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. + // // The maximum amount of time that you want objects to stay in CloudFront caches // before CloudFront forwards another request to your origin to determine whether // the object has been updated. The value that you specify applies only when @@ -5227,39 +10454,41 @@ type CacheBehavior struct { // s-maxage, and Expires to objects. For more information, see Managing How // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. - MaxTTL *int64 `type:"long"` - - // The minimum amount of time that you want objects to stay in CloudFront caches - // before CloudFront forwards another request to your origin to determine whether - // the object has been updated. For more information, see Managing How Long - // Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon CloudFront Developer Guide. - // - // You must specify 0 for MinTTL if you configure CloudFront to forward all - // headers to your origin (under Headers, if you specify 1 for Quantity and - // * for Name). // - // MinTTL is a required field - MinTTL *int64 `type:"long" required:"true"` + // Deprecated: MaxTTL has been deprecated + MaxTTL *int64 `deprecated:"true" type:"long"` - // The pattern (for example, images/*.jpg) that specifies which requests to - // apply the behavior to. When CloudFront receives a viewer request, the requested - // path is compared with path patterns in the order in which cache behaviors - // are listed in the distribution. - // - // You can optionally include a slash (/) at the beginning of the path pattern. - // For example, /images/*.jpg. CloudFront behavior is the same with or without - // the leading /. - // - // The path pattern for the default cache behavior is * and cannot be changed. - // If the request for an object does not match the path pattern for any cache - // behaviors, CloudFront applies the behavior in the default cache behavior. + // This field is deprecated. We recommend that you use the MinTTL field in a + // cache policy instead of this field. For more information, see Creating cache + // policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // or Using the managed cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-cache-policies.html) + // in the Amazon CloudFront Developer Guide. // - // For more information, see Path Pattern (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesPathPattern) + // The minimum amount of time that you want objects to stay in CloudFront caches + // before CloudFront forwards another request to your origin to determine whether + // the object has been updated. For more information, see Managing How Long + // Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. // - // PathPattern is a required field - PathPattern *string `type:"string" required:"true"` + // You must specify 0 for MinTTL if you configure CloudFront to forward all + // headers to your origin (under Headers, if you specify 1 for Quantity and + // * for Name). + // + // Deprecated: MinTTL has been deprecated + MinTTL *int64 `deprecated:"true" type:"long"` + + // The unique identifier of the origin request policy that is attached to the + // default cache behavior. For more information, see Creating origin request + // policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // or Using the managed origin request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-managed-origin-request-policies.html) + // in the Amazon CloudFront Developer Guide. + OriginRequestPolicyId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the real-time log configuration that is + // attached to this cache behavior. For more information, see Real-time logs + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html) + // in the Amazon CloudFront Developer Guide. + RealtimeLogConfigArn *string `type:"string"` // Indicates whether you want to distribute media files in the Microsoft Smooth // Streaming format using the origin that is associated with this cache behavior. @@ -5269,8 +10498,7 @@ type CacheBehavior struct { SmoothStreaming *bool `type:"boolean"` // The value of ID for the origin that you want CloudFront to route requests - // to when a request matches the path pattern either for a cache behavior or - // for the default cache behavior in your distribution. + // to when they use the default cache behavior. // // TargetOriginId is a required field TargetOriginId *string `type:"string" required:"true"` @@ -5281,14 +10509,15 @@ type CacheBehavior struct { // If you want to require signed URLs in requests for objects in the target // origin that match the PathPattern for this cache behavior, specify true for // Enabled, and specify the applicable values for Quantity and Items. For more - // information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) + // information, see Serving Private Content with Signed URLs and Signed Cookies + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) // in the Amazon CloudFront Developer Guide. // - // If you don't want to require signed URLs in requests for objects that match + // If you don’t want to require signed URLs in requests for objects that match // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. // // To add, change, or remove one or more trusted signers, change Enabled to - // true (if it's currently false), change Quantity as applicable, and specify + // true (if it’s currently false), change Quantity as applicable, and specify // all of the trusted signers that you want to include in the updated distribution. // // TrustedSigners is a required field @@ -5307,18 +10536,17 @@ type CacheBehavior struct { // * https-only: If a viewer sends an HTTP request, CloudFront returns an // HTTP status code of 403 (Forbidden). // - // For more information about requiring the HTTPS protocol, see Using an HTTPS - // Connection to Access Your Objects (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) + // For more information about requiring the HTTPS protocol, see Requiring HTTPS + // Between Viewers and CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/using-https-viewers-to-cloudfront.html) // in the Amazon CloudFront Developer Guide. // // The only way to guarantee that viewers retrieve an object that was fetched // from the origin using HTTPS is never to use any other protocol to fetch the // object. If you have recently changed from HTTP to HTTPS, we recommend that - // you clear your objects' cache because cached objects are protocol agnostic. + // you clear your objects’ cache because cached objects are protocol agnostic. // That means that an edge location will return an object from the cache regardless // of whether the current request protocol matches the protocol used previously. - // For more information, see Managing How Long Content Stays in an Edge Cache - // (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) + // For more information, see Managing Cache Expiration (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) // in the Amazon CloudFront Developer Guide. // // ViewerProtocolPolicy is a required field @@ -5326,27 +10554,18 @@ type CacheBehavior struct { } // String returns the string representation -func (s CacheBehavior) String() string { +func (s DefaultCacheBehavior) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheBehavior) GoString() string { +func (s DefaultCacheBehavior) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CacheBehavior) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CacheBehavior"} - if s.ForwardedValues == nil { - invalidParams.Add(request.NewErrParamRequired("ForwardedValues")) - } - if s.MinTTL == nil { - invalidParams.Add(request.NewErrParamRequired("MinTTL")) - } - if s.PathPattern == nil { - invalidParams.Add(request.NewErrParamRequired("PathPattern")) - } +func (s *DefaultCacheBehavior) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DefaultCacheBehavior"} if s.TargetOriginId == nil { invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) } @@ -5384,122 +10603,194 @@ func (s *CacheBehavior) Validate() error { } // SetAllowedMethods sets the AllowedMethods field's value. -func (s *CacheBehavior) SetAllowedMethods(v *AllowedMethods) *CacheBehavior { +func (s *DefaultCacheBehavior) SetAllowedMethods(v *AllowedMethods) *DefaultCacheBehavior { s.AllowedMethods = v return s } +// SetCachePolicyId sets the CachePolicyId field's value. +func (s *DefaultCacheBehavior) SetCachePolicyId(v string) *DefaultCacheBehavior { + s.CachePolicyId = &v + return s +} + // SetCompress sets the Compress field's value. -func (s *CacheBehavior) SetCompress(v bool) *CacheBehavior { +func (s *DefaultCacheBehavior) SetCompress(v bool) *DefaultCacheBehavior { s.Compress = &v return s } // SetDefaultTTL sets the DefaultTTL field's value. -func (s *CacheBehavior) SetDefaultTTL(v int64) *CacheBehavior { +func (s *DefaultCacheBehavior) SetDefaultTTL(v int64) *DefaultCacheBehavior { s.DefaultTTL = &v return s } // SetFieldLevelEncryptionId sets the FieldLevelEncryptionId field's value. -func (s *CacheBehavior) SetFieldLevelEncryptionId(v string) *CacheBehavior { +func (s *DefaultCacheBehavior) SetFieldLevelEncryptionId(v string) *DefaultCacheBehavior { s.FieldLevelEncryptionId = &v return s } // SetForwardedValues sets the ForwardedValues field's value. -func (s *CacheBehavior) SetForwardedValues(v *ForwardedValues) *CacheBehavior { +func (s *DefaultCacheBehavior) SetForwardedValues(v *ForwardedValues) *DefaultCacheBehavior { s.ForwardedValues = v return s } // SetLambdaFunctionAssociations sets the LambdaFunctionAssociations field's value. -func (s *CacheBehavior) SetLambdaFunctionAssociations(v *LambdaFunctionAssociations) *CacheBehavior { +func (s *DefaultCacheBehavior) SetLambdaFunctionAssociations(v *LambdaFunctionAssociations) *DefaultCacheBehavior { s.LambdaFunctionAssociations = v return s } // SetMaxTTL sets the MaxTTL field's value. -func (s *CacheBehavior) SetMaxTTL(v int64) *CacheBehavior { +func (s *DefaultCacheBehavior) SetMaxTTL(v int64) *DefaultCacheBehavior { s.MaxTTL = &v return s } // SetMinTTL sets the MinTTL field's value. -func (s *CacheBehavior) SetMinTTL(v int64) *CacheBehavior { +func (s *DefaultCacheBehavior) SetMinTTL(v int64) *DefaultCacheBehavior { s.MinTTL = &v return s } -// SetPathPattern sets the PathPattern field's value. -func (s *CacheBehavior) SetPathPattern(v string) *CacheBehavior { - s.PathPattern = &v +// SetOriginRequestPolicyId sets the OriginRequestPolicyId field's value. +func (s *DefaultCacheBehavior) SetOriginRequestPolicyId(v string) *DefaultCacheBehavior { + s.OriginRequestPolicyId = &v + return s +} + +// SetRealtimeLogConfigArn sets the RealtimeLogConfigArn field's value. +func (s *DefaultCacheBehavior) SetRealtimeLogConfigArn(v string) *DefaultCacheBehavior { + s.RealtimeLogConfigArn = &v return s } // SetSmoothStreaming sets the SmoothStreaming field's value. -func (s *CacheBehavior) SetSmoothStreaming(v bool) *CacheBehavior { +func (s *DefaultCacheBehavior) SetSmoothStreaming(v bool) *DefaultCacheBehavior { s.SmoothStreaming = &v return s } // SetTargetOriginId sets the TargetOriginId field's value. -func (s *CacheBehavior) SetTargetOriginId(v string) *CacheBehavior { +func (s *DefaultCacheBehavior) SetTargetOriginId(v string) *DefaultCacheBehavior { s.TargetOriginId = &v return s } // SetTrustedSigners sets the TrustedSigners field's value. -func (s *CacheBehavior) SetTrustedSigners(v *TrustedSigners) *CacheBehavior { +func (s *DefaultCacheBehavior) SetTrustedSigners(v *TrustedSigners) *DefaultCacheBehavior { s.TrustedSigners = v return s } // SetViewerProtocolPolicy sets the ViewerProtocolPolicy field's value. -func (s *CacheBehavior) SetViewerProtocolPolicy(v string) *CacheBehavior { +func (s *DefaultCacheBehavior) SetViewerProtocolPolicy(v string) *DefaultCacheBehavior { s.ViewerProtocolPolicy = &v return s } -// A complex type that contains zero or more CacheBehavior elements. -type CacheBehaviors struct { +type DeleteCachePolicyInput struct { + _ struct{} `locationName:"DeleteCachePolicyRequest" type:"structure"` + + // The unique identifier for the cache policy that you are deleting. To get + // the identifier, you can use ListCachePolicies. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version of the cache policy that you are deleting. The version is the + // cache policy’s ETag value, which you can get using ListCachePolicies, GetCachePolicy, + // or GetCachePolicyConfig. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteCachePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCachePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCachePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCachePolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *DeleteCachePolicyInput) SetId(v string) *DeleteCachePolicyInput { + s.Id = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteCachePolicyInput) SetIfMatch(v string) *DeleteCachePolicyInput { + s.IfMatch = &v + return s +} + +type DeleteCachePolicyOutput struct { _ struct{} `type:"structure"` +} - // Optional: A complex type that contains cache behaviors for this distribution. - // If Quantity is 0, you can omit Items. - Items []*CacheBehavior `locationNameList:"CacheBehavior" type:"list"` +// String returns the string representation +func (s DeleteCachePolicyOutput) String() string { + return awsutil.Prettify(s) +} - // The number of cache behaviors for this distribution. +// GoString returns the string representation +func (s DeleteCachePolicyOutput) GoString() string { + return s.String() +} + +// Deletes a origin access identity. +type DeleteCloudFrontOriginAccessIdentityInput struct { + _ struct{} `locationName:"DeleteCloudFrontOriginAccessIdentityRequest" type:"structure"` + + // The origin access identity's ID. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header you received from a previous GET or PUT request. + // For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` } // String returns the string representation -func (s CacheBehaviors) String() string { +func (s DeleteCloudFrontOriginAccessIdentityInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheBehaviors) GoString() string { +func (s DeleteCloudFrontOriginAccessIdentityInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CacheBehaviors) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CacheBehaviors"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *DeleteCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCloudFrontOriginAccessIdentityInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -5508,63 +10799,163 @@ func (s *CacheBehaviors) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *CacheBehaviors) SetItems(v []*CacheBehavior) *CacheBehaviors { - s.Items = v +// SetId sets the Id field's value. +func (s *DeleteCloudFrontOriginAccessIdentityInput) SetId(v string) *DeleteCloudFrontOriginAccessIdentityInput { + s.Id = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteCloudFrontOriginAccessIdentityInput) SetIfMatch(v string) *DeleteCloudFrontOriginAccessIdentityInput { + s.IfMatch = &v + return s +} + +type DeleteCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} + +// This action deletes a web distribution. To delete a web distribution using +// the CloudFront API, perform the following steps. +// +// To delete a web distribution using the CloudFront API: +// +// Disable the web distribution +// +// Submit a GET Distribution Config request to get the current configuration +// and the Etag header for the distribution. +// +// Update the XML document that was returned in the response to your GET Distribution +// Config request to change the value of Enabled to false. +// +// Submit a PUT Distribution Config request to update the configuration for +// your distribution. In the request body, include the XML document that you +// updated in Step 3. Set the value of the HTTP If-Match header to the value +// of the ETag header that CloudFront returned when you submitted the GET Distribution +// Config request in Step 2. +// +// Review the response to the PUT Distribution Config request to confirm that +// the distribution was successfully disabled. +// +// Submit a GET Distribution request to confirm that your changes have propagated. +// When propagation is complete, the value of Status is Deployed. +// +// Submit a DELETE Distribution request. Set the value of the HTTP If-Match +// header to the value of the ETag header that CloudFront returned when you +// submitted the GET Distribution Config request in Step 6. +// +// Review the response to your DELETE Distribution request to confirm that the +// distribution was successfully deleted. +// +// For information about deleting a distribution using the CloudFront console, +// see Deleting a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) +// in the Amazon CloudFront Developer Guide. +type DeleteDistributionInput struct { + _ struct{} `locationName:"DeleteDistributionRequest" type:"structure"` + + // The distribution ID. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header that you received when you disabled the distribution. + // For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s DeleteDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *DeleteDistributionInput) SetId(v string) *DeleteDistributionInput { + s.Id = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *CacheBehaviors) SetQuantity(v int64) *CacheBehaviors { - s.Quantity = &v +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteDistributionInput) SetIfMatch(v string) *DeleteDistributionInput { + s.IfMatch = &v return s } -// A complex type that controls whether CloudFront caches the response to requests -// using the specified HTTP methods. There are two choices: -// -// * CloudFront caches responses to GET and HEAD requests. -// -// * CloudFront caches responses to GET, HEAD, and OPTIONS requests. -// -// If you pick the second choice for your Amazon S3 Origin, you may need to -// forward Access-Control-Request-Method, Access-Control-Request-Headers, and -// Origin headers for the responses to be cached correctly. -type CachedMethods struct { +type DeleteDistributionOutput struct { _ struct{} `type:"structure"` +} - // A complex type that contains the HTTP methods that you want CloudFront to - // cache responses to. - // - // Items is a required field - Items []*string `locationNameList:"Method" type:"list" required:"true"` +// String returns the string representation +func (s DeleteDistributionOutput) String() string { + return awsutil.Prettify(s) +} - // The number of HTTP methods for which you want CloudFront to cache responses. - // Valid values are 2 (for caching responses to GET and HEAD requests) and 3 - // (for caching responses to GET, HEAD, and OPTIONS requests). +// GoString returns the string representation +func (s DeleteDistributionOutput) GoString() string { + return s.String() +} + +type DeleteFieldLevelEncryptionConfigInput struct { + _ struct{} `locationName:"DeleteFieldLevelEncryptionConfigRequest" type:"structure"` + + // The ID of the configuration you want to delete from CloudFront. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header that you received when retrieving the configuration + // identity to delete. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` } // String returns the string representation -func (s CachedMethods) String() string { +func (s DeleteFieldLevelEncryptionConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CachedMethods) GoString() string { +func (s DeleteFieldLevelEncryptionConfigInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CachedMethods) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CachedMethods"} - if s.Items == nil { - invalidParams.Add(request.NewErrParamRequired("Items")) +func (s *DeleteFieldLevelEncryptionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFieldLevelEncryptionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -5573,54 +10964,63 @@ func (s *CachedMethods) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *CachedMethods) SetItems(v []*string) *CachedMethods { - s.Items = v +// SetId sets the Id field's value. +func (s *DeleteFieldLevelEncryptionConfigInput) SetId(v string) *DeleteFieldLevelEncryptionConfigInput { + s.Id = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *CachedMethods) SetQuantity(v int64) *CachedMethods { - s.Quantity = &v +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteFieldLevelEncryptionConfigInput) SetIfMatch(v string) *DeleteFieldLevelEncryptionConfigInput { + s.IfMatch = &v return s } -// A field-level encryption content type profile. -type ContentTypeProfile struct { +type DeleteFieldLevelEncryptionConfigOutput struct { _ struct{} `type:"structure"` +} - // The content type for a field-level encryption content type-profile mapping. - // - // ContentType is a required field - ContentType *string `type:"string" required:"true"` +// String returns the string representation +func (s DeleteFieldLevelEncryptionConfigOutput) String() string { + return awsutil.Prettify(s) +} - // The format for a field-level encryption content type-profile mapping. +// GoString returns the string representation +func (s DeleteFieldLevelEncryptionConfigOutput) GoString() string { + return s.String() +} + +type DeleteFieldLevelEncryptionProfileInput struct { + _ struct{} `locationName:"DeleteFieldLevelEncryptionProfileRequest" type:"structure"` + + // Request the ID of the profile you want to delete from CloudFront. // - // Format is a required field - Format *string `type:"string" required:"true" enum:"Format"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` - // The profile ID for a field-level encryption content type-profile mapping. - ProfileId *string `type:"string"` + // The value of the ETag header that you received when retrieving the profile + // to delete. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` } // String returns the string representation -func (s ContentTypeProfile) String() string { +func (s DeleteFieldLevelEncryptionProfileInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ContentTypeProfile) GoString() string { +func (s DeleteFieldLevelEncryptionProfileInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ContentTypeProfile) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ContentTypeProfile"} - if s.ContentType == nil { - invalidParams.Add(request.NewErrParamRequired("ContentType")) +func (s *DeleteFieldLevelEncryptionProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFieldLevelEncryptionProfileInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -5629,61 +11029,59 @@ func (s *ContentTypeProfile) Validate() error { return nil } -// SetContentType sets the ContentType field's value. -func (s *ContentTypeProfile) SetContentType(v string) *ContentTypeProfile { - s.ContentType = &v +// SetId sets the Id field's value. +func (s *DeleteFieldLevelEncryptionProfileInput) SetId(v string) *DeleteFieldLevelEncryptionProfileInput { + s.Id = &v return s } -// SetFormat sets the Format field's value. -func (s *ContentTypeProfile) SetFormat(v string) *ContentTypeProfile { - s.Format = &v +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteFieldLevelEncryptionProfileInput) SetIfMatch(v string) *DeleteFieldLevelEncryptionProfileInput { + s.IfMatch = &v return s } -// SetProfileId sets the ProfileId field's value. -func (s *ContentTypeProfile) SetProfileId(v string) *ContentTypeProfile { - s.ProfileId = &v - return s +type DeleteFieldLevelEncryptionProfileOutput struct { + _ struct{} `type:"structure"` } -// The configuration for a field-level encryption content type-profile mapping. -type ContentTypeProfileConfig struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s DeleteFieldLevelEncryptionProfileOutput) String() string { + return awsutil.Prettify(s) +} - // The configuration for a field-level encryption content type-profile. - ContentTypeProfiles *ContentTypeProfiles `type:"structure"` +// GoString returns the string representation +func (s DeleteFieldLevelEncryptionProfileOutput) GoString() string { + return s.String() +} - // The setting in a field-level encryption content type-profile mapping that - // specifies what to do when an unknown content type is provided for the profile. - // If true, content is forwarded without being encrypted when the content type - // is unknown. If false (the default), an error is returned when the content - // type is unknown. +type DeleteMonitoringSubscriptionInput struct { + _ struct{} `locationName:"DeleteMonitoringSubscriptionRequest" type:"structure"` + + // The ID of the distribution that you are disabling metrics for. // - // ForwardWhenContentTypeIsUnknown is a required field - ForwardWhenContentTypeIsUnknown *bool `type:"boolean" required:"true"` + // DistributionId is a required field + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` } // String returns the string representation -func (s ContentTypeProfileConfig) String() string { +func (s DeleteMonitoringSubscriptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ContentTypeProfileConfig) GoString() string { +func (s DeleteMonitoringSubscriptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ContentTypeProfileConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ContentTypeProfileConfig"} - if s.ForwardWhenContentTypeIsUnknown == nil { - invalidParams.Add(request.NewErrParamRequired("ForwardWhenContentTypeIsUnknown")) +func (s *DeleteMonitoringSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMonitoringSubscriptionInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) } - if s.ContentTypeProfiles != nil { - if err := s.ContentTypeProfiles.Validate(); err != nil { - invalidParams.AddNested("ContentTypeProfiles", err.(request.ErrInvalidParams)) - } + if s.DistributionId != nil && len(*s.DistributionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) } if invalidParams.Len() > 0 { @@ -5692,56 +11090,59 @@ func (s *ContentTypeProfileConfig) Validate() error { return nil } -// SetContentTypeProfiles sets the ContentTypeProfiles field's value. -func (s *ContentTypeProfileConfig) SetContentTypeProfiles(v *ContentTypeProfiles) *ContentTypeProfileConfig { - s.ContentTypeProfiles = v +// SetDistributionId sets the DistributionId field's value. +func (s *DeleteMonitoringSubscriptionInput) SetDistributionId(v string) *DeleteMonitoringSubscriptionInput { + s.DistributionId = &v return s } -// SetForwardWhenContentTypeIsUnknown sets the ForwardWhenContentTypeIsUnknown field's value. -func (s *ContentTypeProfileConfig) SetForwardWhenContentTypeIsUnknown(v bool) *ContentTypeProfileConfig { - s.ForwardWhenContentTypeIsUnknown = &v - return s +type DeleteMonitoringSubscriptionOutput struct { + _ struct{} `type:"structure"` } -// Field-level encryption content type-profile. -type ContentTypeProfiles struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s DeleteMonitoringSubscriptionOutput) String() string { + return awsutil.Prettify(s) +} - // Items in a field-level encryption content type-profile mapping. - Items []*ContentTypeProfile `locationNameList:"ContentTypeProfile" type:"list"` +// GoString returns the string representation +func (s DeleteMonitoringSubscriptionOutput) GoString() string { + return s.String() +} - // The number of field-level encryption content type-profile mappings. +type DeleteOriginRequestPolicyInput struct { + _ struct{} `locationName:"DeleteOriginRequestPolicyRequest" type:"structure"` + + // The unique identifier for the origin request policy that you are deleting. + // To get the identifier, you can use ListOriginRequestPolicies. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version of the origin request policy that you are deleting. The version + // is the origin request policy’s ETag value, which you can get using ListOriginRequestPolicies, + // GetOriginRequestPolicy, or GetOriginRequestPolicyConfig. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` } // String returns the string representation -func (s ContentTypeProfiles) String() string { +func (s DeleteOriginRequestPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ContentTypeProfiles) GoString() string { +func (s DeleteOriginRequestPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ContentTypeProfiles) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ContentTypeProfiles"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *DeleteOriginRequestPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOriginRequestPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -5750,59 +11151,63 @@ func (s *ContentTypeProfiles) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *ContentTypeProfiles) SetItems(v []*ContentTypeProfile) *ContentTypeProfiles { - s.Items = v +// SetId sets the Id field's value. +func (s *DeleteOriginRequestPolicyInput) SetId(v string) *DeleteOriginRequestPolicyInput { + s.Id = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *ContentTypeProfiles) SetQuantity(v int64) *ContentTypeProfiles { - s.Quantity = &v +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteOriginRequestPolicyInput) SetIfMatch(v string) *DeleteOriginRequestPolicyInput { + s.IfMatch = &v return s } -// A complex type that specifies whether you want CloudFront to forward cookies -// to the origin and, if so, which ones. For more information about forwarding -// cookies to the origin, see Caching Content Based on Request Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) -// in the Amazon CloudFront Developer Guide. -type CookieNames struct { +type DeleteOriginRequestPolicyOutput struct { _ struct{} `type:"structure"` +} - // A complex type that contains one Name element for each cookie that you want - // CloudFront to forward to the origin for this cache behavior. It must contain - // the same number of items that is specified in the Quantity field. - // - // When you set Forward = whitelist (in the CookiePreferences object), this - // field must contain at least one item. - Items []*string `locationNameList:"Name" type:"list"` +// String returns the string representation +func (s DeleteOriginRequestPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOriginRequestPolicyOutput) GoString() string { + return s.String() +} + +type DeletePublicKeyInput struct { + _ struct{} `locationName:"DeletePublicKeyRequest" type:"structure"` - // The number of different cookies that you want CloudFront to forward to the - // origin for this cache behavior. The value must equal the number of items - // that are in the Items field. - // - // When you set Forward = whitelist (in the CookiePreferences object), this - // value must be 1 or higher. + // The ID of the public key you want to remove from CloudFront. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header that you received when retrieving the public + // key identity to delete. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` } // String returns the string representation -func (s CookieNames) String() string { +func (s DeletePublicKeyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CookieNames) GoString() string { +func (s DeletePublicKeyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CookieNames) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CookieNames"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *DeletePublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicKeyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -5811,126 +11216,110 @@ func (s *CookieNames) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *CookieNames) SetItems(v []*string) *CookieNames { - s.Items = v +// SetId sets the Id field's value. +func (s *DeletePublicKeyInput) SetId(v string) *DeletePublicKeyInput { + s.Id = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *CookieNames) SetQuantity(v int64) *CookieNames { - s.Quantity = &v +// SetIfMatch sets the IfMatch field's value. +func (s *DeletePublicKeyInput) SetIfMatch(v string) *DeletePublicKeyInput { + s.IfMatch = &v return s } -// A complex type that specifies whether you want CloudFront to forward cookies -// to the origin and, if so, which ones. For more information about forwarding -// cookies to the origin, see Caching Content Based on Cookies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) -// in the Amazon CloudFront Developer Guide. -type CookiePreference struct { +type DeletePublicKeyOutput struct { _ struct{} `type:"structure"` - - // Specifies which cookies to forward to the origin for this cache behavior: - // all, none, or the list of cookies specified in the WhitelistedNames complex - // type. - // - // Amazon S3 doesn't process cookies. When the cache behavior is forwarding - // requests to an Amazon S3 origin, specify none for the Forward element. - // - // Forward is a required field - Forward *string `type:"string" required:"true" enum:"ItemSelection"` - - // Required if you specify whitelist for the value of Forward. A complex type - // that specifies how many different cookies you want CloudFront to forward - // to the origin for this cache behavior and, if you want to forward selected - // cookies, the names of those cookies. - // - // If you specify all or none for the value of Forward, omit WhitelistedNames. - // If you change the value of Forward from whitelist to all or none and you - // don't delete the WhitelistedNames element and its child elements, CloudFront - // deletes them automatically. - // - // For the current limit on the number of cookie names that you can whitelist - // for each cache behavior, see CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/xrefaws_service_limits.html#limits_cloudfront) - // in the AWS General Reference. - WhitelistedNames *CookieNames `type:"structure"` } // String returns the string representation -func (s CookiePreference) String() string { +func (s DeletePublicKeyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CookiePreference) GoString() string { +func (s DeletePublicKeyOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CookiePreference) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CookiePreference"} - if s.Forward == nil { - invalidParams.Add(request.NewErrParamRequired("Forward")) - } - if s.WhitelistedNames != nil { - if err := s.WhitelistedNames.Validate(); err != nil { - invalidParams.AddNested("WhitelistedNames", err.(request.ErrInvalidParams)) - } - } +type DeleteRealtimeLogConfigInput struct { + _ struct{} `locationName:"DeleteRealtimeLogConfigRequest" type:"structure" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil + // The Amazon Resource Name (ARN) of the real-time log configuration to delete. + ARN *string `type:"string"` + + // The name of the real-time log configuration to delete. + Name *string `type:"string"` } -// SetForward sets the Forward field's value. -func (s *CookiePreference) SetForward(v string) *CookiePreference { - s.Forward = &v +// String returns the string representation +func (s DeleteRealtimeLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRealtimeLogConfigInput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *DeleteRealtimeLogConfigInput) SetARN(v string) *DeleteRealtimeLogConfigInput { + s.ARN = &v return s } -// SetWhitelistedNames sets the WhitelistedNames field's value. -func (s *CookiePreference) SetWhitelistedNames(v *CookieNames) *CookiePreference { - s.WhitelistedNames = v +// SetName sets the Name field's value. +func (s *DeleteRealtimeLogConfigInput) SetName(v string) *DeleteRealtimeLogConfigInput { + s.Name = &v return s } -// The request to create a new origin access identity (OAI). An origin access -// identity is a special CloudFront user that you can associate with Amazon -// S3 origins, so that you can secure all or just some of your Amazon S3 content. -// For more information, see Restricting Access to Amazon S3 Content by Using -// an Origin Access Identity (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html) -// in the Amazon CloudFront Developer Guide. -type CreateCloudFrontOriginAccessIdentityInput struct { - _ struct{} `locationName:"CreateCloudFrontOriginAccessIdentityRequest" type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` +type DeleteRealtimeLogConfigOutput struct { + _ struct{} `type:"structure"` +} - // The current configuration information for the identity. +// String returns the string representation +func (s DeleteRealtimeLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRealtimeLogConfigOutput) GoString() string { + return s.String() +} + +// The request to delete a streaming distribution. +type DeleteStreamingDistributionInput struct { + _ struct{} `locationName:"DeleteStreamingDistributionRequest" type:"structure"` + + // The distribution ID. // - // CloudFrontOriginAccessIdentityConfig is a required field - CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The value of the ETag header that you received when you disabled the streaming + // distribution. For example: E2QWRUHAPOMQZL. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` } // String returns the string representation -func (s CreateCloudFrontOriginAccessIdentityInput) String() string { +func (s DeleteStreamingDistributionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCloudFrontOriginAccessIdentityInput) GoString() string { +func (s DeleteStreamingDistributionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCloudFrontOriginAccessIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCloudFrontOriginAccessIdentityInput"} - if s.CloudFrontOriginAccessIdentityConfig == nil { - invalidParams.Add(request.NewErrParamRequired("CloudFrontOriginAccessIdentityConfig")) +func (s *DeleteStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteStreamingDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.CloudFrontOriginAccessIdentityConfig != nil { - if err := s.CloudFrontOriginAccessIdentityConfig.Validate(); err != nil { - invalidParams.AddNested("CloudFrontOriginAccessIdentityConfig", err.(request.ErrInvalidParams)) - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -5939,257 +11328,415 @@ func (s *CreateCloudFrontOriginAccessIdentityInput) Validate() error { return nil } -// SetCloudFrontOriginAccessIdentityConfig sets the CloudFrontOriginAccessIdentityConfig field's value. -func (s *CreateCloudFrontOriginAccessIdentityInput) SetCloudFrontOriginAccessIdentityConfig(v *OriginAccessIdentityConfig) *CreateCloudFrontOriginAccessIdentityInput { - s.CloudFrontOriginAccessIdentityConfig = v +// SetId sets the Id field's value. +func (s *DeleteStreamingDistributionInput) SetId(v string) *DeleteStreamingDistributionInput { + s.Id = &v return s } -// The returned result of the corresponding request. -type CreateCloudFrontOriginAccessIdentityOutput struct { - _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` - - // The origin access identity's information. - CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` - - // The current version of the origin access identity created. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetIfMatch sets the IfMatch field's value. +func (s *DeleteStreamingDistributionInput) SetIfMatch(v string) *DeleteStreamingDistributionInput { + s.IfMatch = &v + return s +} - // The fully qualified URI of the new origin access identity just created. For - // example: https://cloudfront.amazonaws.com/2010-11-01/origin-access-identity/cloudfront/E74FTE3AJFJ256A. - Location *string `location:"header" locationName:"Location" type:"string"` +type DeleteStreamingDistributionOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s CreateCloudFrontOriginAccessIdentityOutput) String() string { +func (s DeleteStreamingDistributionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCloudFrontOriginAccessIdentityOutput) GoString() string { +func (s DeleteStreamingDistributionOutput) GoString() string { return s.String() } -// SetCloudFrontOriginAccessIdentity sets the CloudFrontOriginAccessIdentity field's value. -func (s *CreateCloudFrontOriginAccessIdentityOutput) SetCloudFrontOriginAccessIdentity(v *OriginAccessIdentity) *CreateCloudFrontOriginAccessIdentityOutput { - s.CloudFrontOriginAccessIdentity = v - return s -} +// A distribution tells CloudFront where you want content to be delivered from, +// and the details about how to track and manage content delivery. +type Distribution struct { + _ struct{} `type:"structure"` -// SetETag sets the ETag field's value. -func (s *CreateCloudFrontOriginAccessIdentityOutput) SetETag(v string) *CreateCloudFrontOriginAccessIdentityOutput { - s.ETag = &v - return s -} + // The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, + // where 123456789012 is your AWS account ID. + // + // ARN is a required field + ARN *string `type:"string" required:"true"` -// SetLocation sets the Location field's value. -func (s *CreateCloudFrontOriginAccessIdentityOutput) SetLocation(v string) *CreateCloudFrontOriginAccessIdentityOutput { - s.Location = &v - return s -} + // CloudFront automatically adds this element to the response only if you've + // set up the distribution to serve private content with signed URLs. The element + // lists the key pair IDs that CloudFront is aware of for each trusted signer. + // The Signer child element lists the AWS account number of the trusted signer + // (or an empty Self element if the signer is you). The Signer element also + // includes the IDs of any active key pairs associated with the trusted signer's + // AWS account. If no KeyPairId element appears for a Signer, that signer can't + // create working signed URLs. + // + // ActiveTrustedSigners is a required field + ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` -// The request to create a new distribution. -type CreateDistributionInput struct { - _ struct{} `locationName:"CreateDistributionRequest" type:"structure" payload:"DistributionConfig"` + // AWS services in China customers must file for an Internet Content Provider + // (ICP) recordal if they want to serve content publicly on an alternate domain + // name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal + // provides the ICP recordal status for CNAMEs associated with distributions. + // + // For more information about ICP recordals, see Signup, Accounts, and Credentials + // (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) + // in Getting Started with AWS services in China. + AliasICPRecordals []*AliasICPRecordal `locationNameList:"AliasICPRecordal" type:"list"` - // The distribution's configuration information. + // The current configuration information for the distribution. Send a GET request + // to the /CloudFront API version/distribution ID/config resource. + // + // DistributionConfig is a required field + DistributionConfig *DistributionConfig `type:"structure" required:"true"` + + // The domain name corresponding to the distribution, for example, d111111abcdef8.cloudfront.net. + // + // DomainName is a required field + DomainName *string `type:"string" required:"true"` + + // The identifier for the distribution. For example: EDFDVBD632BHDS5. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The number of invalidation batches currently in progress. + // + // InProgressInvalidationBatches is a required field + InProgressInvalidationBatches *int64 `type:"integer" required:"true"` + + // The date and time the distribution was last modified. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` + + // This response element indicates the current status of the distribution. When + // the status is Deployed, the distribution's information is fully propagated + // to all CloudFront edge locations. // - // DistributionConfig is a required field - DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + // Status is a required field + Status *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateDistributionInput) String() string { +func (s Distribution) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDistributionInput) GoString() string { +func (s Distribution) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDistributionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDistributionInput"} - if s.DistributionConfig == nil { - invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) - } - if s.DistributionConfig != nil { - if err := s.DistributionConfig.Validate(); err != nil { - invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetARN sets the ARN field's value. +func (s *Distribution) SetARN(v string) *Distribution { + s.ARN = &v + return s } -// SetDistributionConfig sets the DistributionConfig field's value. -func (s *CreateDistributionInput) SetDistributionConfig(v *DistributionConfig) *CreateDistributionInput { - s.DistributionConfig = v +// SetActiveTrustedSigners sets the ActiveTrustedSigners field's value. +func (s *Distribution) SetActiveTrustedSigners(v *ActiveTrustedSigners) *Distribution { + s.ActiveTrustedSigners = v return s } -// The returned result of the corresponding request. -type CreateDistributionOutput struct { - _ struct{} `type:"structure" payload:"Distribution"` - - // The distribution's information. - Distribution *Distribution `type:"structure"` - - // The current version of the distribution created. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetAliasICPRecordals sets the AliasICPRecordals field's value. +func (s *Distribution) SetAliasICPRecordals(v []*AliasICPRecordal) *Distribution { + s.AliasICPRecordals = v + return s +} - // The fully qualified URI of the new distribution resource just created. For - // example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5. - Location *string `location:"header" locationName:"Location" type:"string"` +// SetDistributionConfig sets the DistributionConfig field's value. +func (s *Distribution) SetDistributionConfig(v *DistributionConfig) *Distribution { + s.DistributionConfig = v + return s } -// String returns the string representation -func (s CreateDistributionOutput) String() string { - return awsutil.Prettify(s) +// SetDomainName sets the DomainName field's value. +func (s *Distribution) SetDomainName(v string) *Distribution { + s.DomainName = &v + return s } -// GoString returns the string representation -func (s CreateDistributionOutput) GoString() string { - return s.String() +// SetId sets the Id field's value. +func (s *Distribution) SetId(v string) *Distribution { + s.Id = &v + return s } -// SetDistribution sets the Distribution field's value. -func (s *CreateDistributionOutput) SetDistribution(v *Distribution) *CreateDistributionOutput { - s.Distribution = v +// SetInProgressInvalidationBatches sets the InProgressInvalidationBatches field's value. +func (s *Distribution) SetInProgressInvalidationBatches(v int64) *Distribution { + s.InProgressInvalidationBatches = &v return s } -// SetETag sets the ETag field's value. -func (s *CreateDistributionOutput) SetETag(v string) *CreateDistributionOutput { - s.ETag = &v +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *Distribution) SetLastModifiedTime(v time.Time) *Distribution { + s.LastModifiedTime = &v return s } -// SetLocation sets the Location field's value. -func (s *CreateDistributionOutput) SetLocation(v string) *CreateDistributionOutput { - s.Location = &v +// SetStatus sets the Status field's value. +func (s *Distribution) SetStatus(v string) *Distribution { + s.Status = &v return s } -// The request to create a new distribution with tags. -type CreateDistributionWithTagsInput struct { - _ struct{} `locationName:"CreateDistributionWithTagsRequest" type:"structure" payload:"DistributionConfigWithTags"` +// A distribution configuration. +type DistributionConfig struct { + _ struct{} `type:"structure"` - // The distribution's configuration information. - // - // DistributionConfigWithTags is a required field - DistributionConfigWithTags *DistributionConfigWithTags `locationName:"DistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` -} + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this distribution. + Aliases *Aliases `type:"structure"` -// String returns the string representation -func (s CreateDistributionWithTagsInput) String() string { - return awsutil.Prettify(s) -} + // A complex type that contains zero or more CacheBehavior elements. + CacheBehaviors *CacheBehaviors `type:"structure"` -// GoString returns the string representation -func (s CreateDistributionWithTagsInput) GoString() string { - return s.String() -} + // A unique value (for example, a date-time stamp) that ensures that the request + // can't be replayed. + // + // If the value of CallerReference is new (regardless of the content of the + // DistributionConfig object), CloudFront creates a new distribution. + // + // If CallerReference is a value that you already sent in a previous request + // to create a distribution, CloudFront returns a DistributionAlreadyExists + // error. + // + // CallerReference is a required field + CallerReference *string `type:"string" required:"true"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDistributionWithTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDistributionWithTagsInput"} - if s.DistributionConfigWithTags == nil { - invalidParams.Add(request.NewErrParamRequired("DistributionConfigWithTags")) - } - if s.DistributionConfigWithTags != nil { - if err := s.DistributionConfigWithTags.Validate(); err != nil { - invalidParams.AddNested("DistributionConfigWithTags", err.(request.ErrInvalidParams)) - } - } + // Any comments you want to include about the distribution. + // + // If you don't want to specify a comment, include an empty Comment element. + // + // To delete an existing comment, update the distribution configuration and + // include an empty Comment element. + // + // To add or change a comment, update the distribution configuration and specify + // the new comment. + // + // Comment is a required field + Comment *string `type:"string" required:"true" sensitive:"true"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // A complex type that controls the following: + // + // * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range + // with custom error messages before returning the response to the viewer. + // + // * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. + // + // For more information about custom error pages, see Customizing Error Responses + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) + // in the Amazon CloudFront Developer Guide. + CustomErrorResponses *CustomErrorResponses `type:"structure"` -// SetDistributionConfigWithTags sets the DistributionConfigWithTags field's value. -func (s *CreateDistributionWithTagsInput) SetDistributionConfigWithTags(v *DistributionConfigWithTags) *CreateDistributionWithTagsInput { - s.DistributionConfigWithTags = v - return s -} + // A complex type that describes the default cache behavior if you don't specify + // a CacheBehavior element or if files don't match any of the values of PathPattern + // in CacheBehavior elements. You must create exactly one default cache behavior. + // + // DefaultCacheBehavior is a required field + DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` -// The returned result of the corresponding request. -type CreateDistributionWithTagsOutput struct { - _ struct{} `type:"structure" payload:"Distribution"` + // The object that you want CloudFront to request from your origin (for example, + // index.html) when a viewer requests the root URL for your distribution (http://www.example.com) + // instead of an object in your distribution (http://www.example.com/product-description.html). + // Specifying a default root object avoids exposing the contents of your distribution. + // + // Specify only the object name, for example, index.html. Don't add a / before + // the object name. + // + // If you don't want to specify a default root object when you create a distribution, + // include an empty DefaultRootObject element. + // + // To delete the default root object from an existing distribution, update the + // distribution configuration and include an empty DefaultRootObject element. + // + // To replace the default root object, update the distribution configuration + // and specify the new object. + // + // For more information about the default root object, see Creating a Default + // Root Object (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) + // in the Amazon CloudFront Developer Guide. + DefaultRootObject *string `type:"string"` - // The distribution's information. - Distribution *Distribution `type:"structure"` + // From this field, you can enable or disable the selected distribution. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` - // The current version of the distribution created. - ETag *string `location:"header" locationName:"ETag" type:"string"` + // (Optional) Specify the maximum HTTP version that you want viewers to use + // to communicate with CloudFront. The default value for new web distributions + // is http2. Viewers that don't support HTTP/2 automatically use an earlier + // HTTP version. + // + // For viewers and CloudFront to use HTTP/2, viewers must support TLS 1.2 or + // later, and must support Server Name Identification (SNI). + // + // In general, configuring CloudFront to communicate with viewers using HTTP/2 + // reduces latency. You can improve performance by optimizing for HTTP/2. For + // more information, do an Internet search for "http/2 optimization." + HttpVersion *string `type:"string" enum:"HttpVersion"` - // The fully qualified URI of the new distribution resource just created. For - // example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5. - Location *string `location:"header" locationName:"Location" type:"string"` -} + // If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address + // for your distribution, specify true. If you specify false, CloudFront responds + // to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses. + // This allows viewers to submit a second request, for an IPv4 address for your + // distribution. + // + // In general, you should enable IPv6 if you have users on IPv6 networks who + // want to access your content. However, if you're using signed URLs or signed + // cookies to restrict access to your content, and if you're using a custom + // policy that includes the IpAddress parameter to restrict the IP addresses + // that can access your content, don't enable IPv6. If you want to restrict + // access to some content by IP address and not restrict access to other content + // (or restrict access but not by IP address), you can create two distributions. + // For more information, see Creating a Signed URL Using a Custom Policy (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) + // in the Amazon CloudFront Developer Guide. + // + // If you're using an Amazon Route 53 alias resource record set to route traffic + // to your CloudFront distribution, you need to create a second alias resource + // record set when both of the following are true: + // + // * You enable IPv6 for the distribution + // + // * You're using alternate domain names in the URLs for your objects + // + // For more information, see Routing Traffic to an Amazon CloudFront Web Distribution + // by Using Your Domain Name (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) + // in the Amazon Route 53 Developer Guide. + // + // If you created a CNAME resource record set, either with Amazon Route 53 or + // with another DNS service, you don't need to make any changes. A CNAME record + // will route traffic to your distribution regardless of the IP address format + // of the viewer request. + IsIPV6Enabled *bool `type:"boolean"` -// String returns the string representation -func (s CreateDistributionWithTagsOutput) String() string { - return awsutil.Prettify(s) -} + // A complex type that controls whether access logs are written for the distribution. + // + // For more information about logging, see Access Logs (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html) + // in the Amazon CloudFront Developer Guide. + Logging *LoggingConfig `type:"structure"` -// GoString returns the string representation -func (s CreateDistributionWithTagsOutput) GoString() string { - return s.String() -} + // A complex type that contains information about origin groups for this distribution. + OriginGroups *OriginGroups `type:"structure"` -// SetDistribution sets the Distribution field's value. -func (s *CreateDistributionWithTagsOutput) SetDistribution(v *Distribution) *CreateDistributionWithTagsOutput { - s.Distribution = v - return s -} + // A complex type that contains information about origins for this distribution. + // + // Origins is a required field + Origins *Origins `type:"structure" required:"true"` -// SetETag sets the ETag field's value. -func (s *CreateDistributionWithTagsOutput) SetETag(v string) *CreateDistributionWithTagsOutput { - s.ETag = &v - return s -} + // The price class that corresponds with the maximum price that you want to + // pay for CloudFront service. If you specify PriceClass_All, CloudFront responds + // to requests for your objects from all CloudFront edge locations. + // + // If you specify a price class other than PriceClass_All, CloudFront serves + // your objects from the CloudFront edge location that has the lowest latency + // among the edge locations in your price class. Viewers who are in or near + // regions that are excluded from your specified price class may encounter slower + // performance. + // + // For more information about price classes, see Choosing the Price Class for + // a CloudFront Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) + // in the Amazon CloudFront Developer Guide. For information about CloudFront + // pricing, including how price classes (such as Price Class 100) map to CloudFront + // regions, see Amazon CloudFront Pricing (http://aws.amazon.com/cloudfront/pricing/). + PriceClass *string `type:"string" enum:"PriceClass"` -// SetLocation sets the Location field's value. -func (s *CreateDistributionWithTagsOutput) SetLocation(v string) *CreateDistributionWithTagsOutput { - s.Location = &v - return s -} + // A complex type that identifies ways in which you want to restrict distribution + // of your content. + Restrictions *Restrictions `type:"structure"` -type CreateFieldLevelEncryptionConfigInput struct { - _ struct{} `locationName:"CreateFieldLevelEncryptionConfigRequest" type:"structure" payload:"FieldLevelEncryptionConfig"` + // A complex type that determines the distribution’s SSL/TLS configuration + // for communicating with viewers. + ViewerCertificate *ViewerCertificate `type:"structure"` - // The request to create a new field-level encryption configuration. + // A unique identifier that specifies the AWS WAF web ACL, if any, to associate + // with this distribution. To specify a web ACL created using the latest version + // of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. + // To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example + // 473e64fd-f30b-4765-81a0-62ad96dd167a. // - // FieldLevelEncryptionConfig is a required field - FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + // AWS WAF is a web application firewall that lets you monitor the HTTP and + // HTTPS requests that are forwarded to CloudFront, and lets you control access + // to your content. Based on conditions that you specify, such as the IP addresses + // that requests originate from or the values of query strings, CloudFront responds + // to requests either with the requested content or with an HTTP 403 status + // code (Forbidden). You can also configure CloudFront to return a custom error + // page when a request is blocked. For more information about AWS WAF, see the + // AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html). + WebACLId *string `type:"string"` } // String returns the string representation -func (s CreateFieldLevelEncryptionConfigInput) String() string { +func (s DistributionConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateFieldLevelEncryptionConfigInput) GoString() string { +func (s DistributionConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateFieldLevelEncryptionConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateFieldLevelEncryptionConfigInput"} - if s.FieldLevelEncryptionConfig == nil { - invalidParams.Add(request.NewErrParamRequired("FieldLevelEncryptionConfig")) +func (s *DistributionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DistributionConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) } - if s.FieldLevelEncryptionConfig != nil { - if err := s.FieldLevelEncryptionConfig.Validate(); err != nil { - invalidParams.AddNested("FieldLevelEncryptionConfig", err.(request.ErrInvalidParams)) + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) + } + if s.DefaultCacheBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultCacheBehavior")) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Origins == nil { + invalidParams.Add(request.NewErrParamRequired("Origins")) + } + if s.Aliases != nil { + if err := s.Aliases.Validate(); err != nil { + invalidParams.AddNested("Aliases", err.(request.ErrInvalidParams)) + } + } + if s.CacheBehaviors != nil { + if err := s.CacheBehaviors.Validate(); err != nil { + invalidParams.AddNested("CacheBehaviors", err.(request.ErrInvalidParams)) + } + } + if s.CustomErrorResponses != nil { + if err := s.CustomErrorResponses.Validate(); err != nil { + invalidParams.AddNested("CustomErrorResponses", err.(request.ErrInvalidParams)) + } + } + if s.DefaultCacheBehavior != nil { + if err := s.DefaultCacheBehavior.Validate(); err != nil { + invalidParams.AddNested("DefaultCacheBehavior", err.(request.ErrInvalidParams)) + } + } + if s.Logging != nil { + if err := s.Logging.Validate(); err != nil { + invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) + } + } + if s.OriginGroups != nil { + if err := s.OriginGroups.Validate(); err != nil { + invalidParams.AddNested("OriginGroups", err.(request.ErrInvalidParams)) + } + } + if s.Origins != nil { + if err := s.Origins.Validate(); err != nil { + invalidParams.AddNested("Origins", err.(request.ErrInvalidParams)) + } + } + if s.Restrictions != nil { + if err := s.Restrictions.Validate(); err != nil { + invalidParams.AddNested("Restrictions", err.(request.ErrInvalidParams)) } } @@ -6199,180 +11746,151 @@ func (s *CreateFieldLevelEncryptionConfigInput) Validate() error { return nil } -// SetFieldLevelEncryptionConfig sets the FieldLevelEncryptionConfig field's value. -func (s *CreateFieldLevelEncryptionConfigInput) SetFieldLevelEncryptionConfig(v *FieldLevelEncryptionConfig) *CreateFieldLevelEncryptionConfigInput { - s.FieldLevelEncryptionConfig = v +// SetAliases sets the Aliases field's value. +func (s *DistributionConfig) SetAliases(v *Aliases) *DistributionConfig { + s.Aliases = v return s } -type CreateFieldLevelEncryptionConfigOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryption"` - - // The current version of the field level encryption configuration. For example: - // E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // Returned when you create a new field-level encryption configuration. - FieldLevelEncryption *FieldLevelEncryption `type:"structure"` - - // The fully qualified URI of the new configuration resource just created. For - // example: https://cloudfront.amazonaws.com/2010-11-01/field-level-encryption-config/EDFDVBD632BHDS5. - Location *string `location:"header" locationName:"Location" type:"string"` -} - -// String returns the string representation -func (s CreateFieldLevelEncryptionConfigOutput) String() string { - return awsutil.Prettify(s) +// SetCacheBehaviors sets the CacheBehaviors field's value. +func (s *DistributionConfig) SetCacheBehaviors(v *CacheBehaviors) *DistributionConfig { + s.CacheBehaviors = v + return s } -// GoString returns the string representation -func (s CreateFieldLevelEncryptionConfigOutput) GoString() string { - return s.String() +// SetCallerReference sets the CallerReference field's value. +func (s *DistributionConfig) SetCallerReference(v string) *DistributionConfig { + s.CallerReference = &v + return s } -// SetETag sets the ETag field's value. -func (s *CreateFieldLevelEncryptionConfigOutput) SetETag(v string) *CreateFieldLevelEncryptionConfigOutput { - s.ETag = &v +// SetComment sets the Comment field's value. +func (s *DistributionConfig) SetComment(v string) *DistributionConfig { + s.Comment = &v return s } -// SetFieldLevelEncryption sets the FieldLevelEncryption field's value. -func (s *CreateFieldLevelEncryptionConfigOutput) SetFieldLevelEncryption(v *FieldLevelEncryption) *CreateFieldLevelEncryptionConfigOutput { - s.FieldLevelEncryption = v +// SetCustomErrorResponses sets the CustomErrorResponses field's value. +func (s *DistributionConfig) SetCustomErrorResponses(v *CustomErrorResponses) *DistributionConfig { + s.CustomErrorResponses = v return s } -// SetLocation sets the Location field's value. -func (s *CreateFieldLevelEncryptionConfigOutput) SetLocation(v string) *CreateFieldLevelEncryptionConfigOutput { - s.Location = &v +// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. +func (s *DistributionConfig) SetDefaultCacheBehavior(v *DefaultCacheBehavior) *DistributionConfig { + s.DefaultCacheBehavior = v return s } -type CreateFieldLevelEncryptionProfileInput struct { - _ struct{} `locationName:"CreateFieldLevelEncryptionProfileRequest" type:"structure" payload:"FieldLevelEncryptionProfileConfig"` - - // The request to create a field-level encryption profile. - // - // FieldLevelEncryptionProfileConfig is a required field - FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` +// SetDefaultRootObject sets the DefaultRootObject field's value. +func (s *DistributionConfig) SetDefaultRootObject(v string) *DistributionConfig { + s.DefaultRootObject = &v + return s } -// String returns the string representation -func (s CreateFieldLevelEncryptionProfileInput) String() string { - return awsutil.Prettify(s) +// SetEnabled sets the Enabled field's value. +func (s *DistributionConfig) SetEnabled(v bool) *DistributionConfig { + s.Enabled = &v + return s } -// GoString returns the string representation -func (s CreateFieldLevelEncryptionProfileInput) GoString() string { - return s.String() +// SetHttpVersion sets the HttpVersion field's value. +func (s *DistributionConfig) SetHttpVersion(v string) *DistributionConfig { + s.HttpVersion = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateFieldLevelEncryptionProfileInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateFieldLevelEncryptionProfileInput"} - if s.FieldLevelEncryptionProfileConfig == nil { - invalidParams.Add(request.NewErrParamRequired("FieldLevelEncryptionProfileConfig")) - } - if s.FieldLevelEncryptionProfileConfig != nil { - if err := s.FieldLevelEncryptionProfileConfig.Validate(); err != nil { - invalidParams.AddNested("FieldLevelEncryptionProfileConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetIsIPV6Enabled sets the IsIPV6Enabled field's value. +func (s *DistributionConfig) SetIsIPV6Enabled(v bool) *DistributionConfig { + s.IsIPV6Enabled = &v + return s } -// SetFieldLevelEncryptionProfileConfig sets the FieldLevelEncryptionProfileConfig field's value. -func (s *CreateFieldLevelEncryptionProfileInput) SetFieldLevelEncryptionProfileConfig(v *FieldLevelEncryptionProfileConfig) *CreateFieldLevelEncryptionProfileInput { - s.FieldLevelEncryptionProfileConfig = v +// SetLogging sets the Logging field's value. +func (s *DistributionConfig) SetLogging(v *LoggingConfig) *DistributionConfig { + s.Logging = v return s } -type CreateFieldLevelEncryptionProfileOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfile"` - - // The current version of the field level encryption profile. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // Returned when you create a new field-level encryption profile. - FieldLevelEncryptionProfile *FieldLevelEncryptionProfile `type:"structure"` - - // The fully qualified URI of the new profile resource just created. For example: - // https://cloudfront.amazonaws.com/2010-11-01/field-level-encryption-profile/EDFDVBD632BHDS5. - Location *string `location:"header" locationName:"Location" type:"string"` +// SetOriginGroups sets the OriginGroups field's value. +func (s *DistributionConfig) SetOriginGroups(v *OriginGroups) *DistributionConfig { + s.OriginGroups = v + return s } -// String returns the string representation -func (s CreateFieldLevelEncryptionProfileOutput) String() string { - return awsutil.Prettify(s) +// SetOrigins sets the Origins field's value. +func (s *DistributionConfig) SetOrigins(v *Origins) *DistributionConfig { + s.Origins = v + return s } -// GoString returns the string representation -func (s CreateFieldLevelEncryptionProfileOutput) GoString() string { - return s.String() +// SetPriceClass sets the PriceClass field's value. +func (s *DistributionConfig) SetPriceClass(v string) *DistributionConfig { + s.PriceClass = &v + return s } -// SetETag sets the ETag field's value. -func (s *CreateFieldLevelEncryptionProfileOutput) SetETag(v string) *CreateFieldLevelEncryptionProfileOutput { - s.ETag = &v +// SetRestrictions sets the Restrictions field's value. +func (s *DistributionConfig) SetRestrictions(v *Restrictions) *DistributionConfig { + s.Restrictions = v return s } -// SetFieldLevelEncryptionProfile sets the FieldLevelEncryptionProfile field's value. -func (s *CreateFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile(v *FieldLevelEncryptionProfile) *CreateFieldLevelEncryptionProfileOutput { - s.FieldLevelEncryptionProfile = v +// SetViewerCertificate sets the ViewerCertificate field's value. +func (s *DistributionConfig) SetViewerCertificate(v *ViewerCertificate) *DistributionConfig { + s.ViewerCertificate = v return s } -// SetLocation sets the Location field's value. -func (s *CreateFieldLevelEncryptionProfileOutput) SetLocation(v string) *CreateFieldLevelEncryptionProfileOutput { - s.Location = &v +// SetWebACLId sets the WebACLId field's value. +func (s *DistributionConfig) SetWebACLId(v string) *DistributionConfig { + s.WebACLId = &v return s } -// The request to create an invalidation. -type CreateInvalidationInput struct { - _ struct{} `locationName:"CreateInvalidationRequest" type:"structure" payload:"InvalidationBatch"` +// A distribution Configuration and a list of tags to be associated with the +// distribution. +type DistributionConfigWithTags struct { + _ struct{} `type:"structure"` - // The distribution's id. + // A distribution configuration. // - // DistributionId is a required field - DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + // DistributionConfig is a required field + DistributionConfig *DistributionConfig `type:"structure" required:"true"` - // The batch information for the invalidation. + // A complex type that contains zero or more Tag elements. // - // InvalidationBatch is a required field - InvalidationBatch *InvalidationBatch `locationName:"InvalidationBatch" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + // Tags is a required field + Tags *Tags `type:"structure" required:"true"` } // String returns the string representation -func (s CreateInvalidationInput) String() string { +func (s DistributionConfigWithTags) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInvalidationInput) GoString() string { +func (s DistributionConfigWithTags) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateInvalidationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateInvalidationInput"} - if s.DistributionId == nil { - invalidParams.Add(request.NewErrParamRequired("DistributionId")) +func (s *DistributionConfigWithTags) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DistributionConfigWithTags"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) } - if s.DistributionId != nil && len(*s.DistributionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) } - if s.InvalidationBatch == nil { - invalidParams.Add(request.NewErrParamRequired("InvalidationBatch")) + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } } - if s.InvalidationBatch != nil { - if err := s.InvalidationBatch.Validate(); err != nil { - invalidParams.AddNested("InvalidationBatch", err.(request.ErrInvalidParams)) + if s.Tags != nil { + if err := s.Tags.Validate(); err != nil { + invalidParams.AddNested("Tags", err.(request.ErrInvalidParams)) } } @@ -6382,470 +11900,459 @@ func (s *CreateInvalidationInput) Validate() error { return nil } -// SetDistributionId sets the DistributionId field's value. -func (s *CreateInvalidationInput) SetDistributionId(v string) *CreateInvalidationInput { - s.DistributionId = &v +// SetDistributionConfig sets the DistributionConfig field's value. +func (s *DistributionConfigWithTags) SetDistributionConfig(v *DistributionConfig) *DistributionConfigWithTags { + s.DistributionConfig = v return s } -// SetInvalidationBatch sets the InvalidationBatch field's value. -func (s *CreateInvalidationInput) SetInvalidationBatch(v *InvalidationBatch) *CreateInvalidationInput { - s.InvalidationBatch = v +// SetTags sets the Tags field's value. +func (s *DistributionConfigWithTags) SetTags(v *Tags) *DistributionConfigWithTags { + s.Tags = v return s } -// The returned result of the corresponding request. -type CreateInvalidationOutput struct { - _ struct{} `type:"structure" payload:"Invalidation"` +// A list of distribution IDs. +type DistributionIdList struct { + _ struct{} `type:"structure"` - // The invalidation's information. - Invalidation *Invalidation `type:"structure"` + // A flag that indicates whether more distribution IDs remain to be listed. + // If your results were truncated, you can make a subsequent request using the + // Marker request field to retrieve more distribution IDs in the list. + // + // IsTruncated is a required field + IsTruncated *bool `type:"boolean" required:"true"` - // The fully qualified URI of the distribution and invalidation batch request, - // including the Invalidation ID. - Location *string `location:"header" locationName:"Location" type:"string"` + // Contains the distribution IDs in the list. + Items []*string `locationNameList:"DistributionId" type:"list"` + + // The value provided in the Marker request field. + // + // Marker is a required field + Marker *string `type:"string" required:"true"` + + // The maximum number of distribution IDs requested. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` + + // Contains the value that you should use in the Marker field of a subsequent + // request to continue listing distribution IDs where you left off. + NextMarker *string `type:"string"` + + // The total number of distribution IDs returned in the response. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s CreateInvalidationOutput) String() string { +func (s DistributionIdList) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInvalidationOutput) GoString() string { +func (s DistributionIdList) GoString() string { return s.String() } -// SetInvalidation sets the Invalidation field's value. -func (s *CreateInvalidationOutput) SetInvalidation(v *Invalidation) *CreateInvalidationOutput { - s.Invalidation = v +// SetIsTruncated sets the IsTruncated field's value. +func (s *DistributionIdList) SetIsTruncated(v bool) *DistributionIdList { + s.IsTruncated = &v return s } -// SetLocation sets the Location field's value. -func (s *CreateInvalidationOutput) SetLocation(v string) *CreateInvalidationOutput { - s.Location = &v +// SetItems sets the Items field's value. +func (s *DistributionIdList) SetItems(v []*string) *DistributionIdList { + s.Items = v return s } -type CreatePublicKeyInput struct { - _ struct{} `locationName:"CreatePublicKeyRequest" type:"structure" payload:"PublicKeyConfig"` +// SetMarker sets the Marker field's value. +func (s *DistributionIdList) SetMarker(v string) *DistributionIdList { + s.Marker = &v + return s +} - // The request to add a public key to CloudFront. +// SetMaxItems sets the MaxItems field's value. +func (s *DistributionIdList) SetMaxItems(v int64) *DistributionIdList { + s.MaxItems = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *DistributionIdList) SetNextMarker(v string) *DistributionIdList { + s.NextMarker = &v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *DistributionIdList) SetQuantity(v int64) *DistributionIdList { + s.Quantity = &v + return s +} + +// A distribution list. +type DistributionList struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether more distributions remain to be listed. If + // your results were truncated, you can make a follow-up pagination request + // using the Marker request parameter to retrieve more distributions in the + // list. // - // PublicKeyConfig is a required field - PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + // IsTruncated is a required field + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one DistributionSummary element for each distribution + // that was created by the current AWS account. + Items []*DistributionSummary `locationNameList:"DistributionSummary" type:"list"` + + // The value you provided for the Marker request parameter. + // + // Marker is a required field + Marker *string `type:"string" required:"true"` + + // The value you provided for the MaxItems request parameter. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your distributions + // where they left off. + NextMarker *string `type:"string"` + + // The number of distributions that were created by the current AWS account. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s CreatePublicKeyInput) String() string { +func (s DistributionList) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreatePublicKeyInput) GoString() string { +func (s DistributionList) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreatePublicKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreatePublicKeyInput"} - if s.PublicKeyConfig == nil { - invalidParams.Add(request.NewErrParamRequired("PublicKeyConfig")) - } - if s.PublicKeyConfig != nil { - if err := s.PublicKeyConfig.Validate(); err != nil { - invalidParams.AddNested("PublicKeyConfig", err.(request.ErrInvalidParams)) - } - } +// SetIsTruncated sets the IsTruncated field's value. +func (s *DistributionList) SetIsTruncated(v bool) *DistributionList { + s.IsTruncated = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetItems sets the Items field's value. +func (s *DistributionList) SetItems(v []*DistributionSummary) *DistributionList { + s.Items = v + return s } -// SetPublicKeyConfig sets the PublicKeyConfig field's value. -func (s *CreatePublicKeyInput) SetPublicKeyConfig(v *PublicKeyConfig) *CreatePublicKeyInput { - s.PublicKeyConfig = v +// SetMarker sets the Marker field's value. +func (s *DistributionList) SetMarker(v string) *DistributionList { + s.Marker = &v return s } -type CreatePublicKeyOutput struct { - _ struct{} `type:"structure" payload:"PublicKey"` +// SetMaxItems sets the MaxItems field's value. +func (s *DistributionList) SetMaxItems(v int64) *DistributionList { + s.MaxItems = &v + return s +} - // The current version of the public key. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetNextMarker sets the NextMarker field's value. +func (s *DistributionList) SetNextMarker(v string) *DistributionList { + s.NextMarker = &v + return s +} - // The fully qualified URI of the new public key resource just created. For - // example: https://cloudfront.amazonaws.com/2010-11-01/cloudfront-public-key/EDFDVBD632BHDS5. - Location *string `location:"header" locationName:"Location" type:"string"` +// SetQuantity sets the Quantity field's value. +func (s *DistributionList) SetQuantity(v int64) *DistributionList { + s.Quantity = &v + return s +} + +// A summary of the information about a CloudFront distribution. +type DistributionSummary struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, + // where 123456789012 is your AWS account ID. + // + // ARN is a required field + ARN *string `type:"string" required:"true"` + + // AWS services in China customers must file for an Internet Content Provider + // (ICP) recordal if they want to serve content publicly on an alternate domain + // name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal + // provides the ICP recordal status for CNAMEs associated with distributions. + // + // For more information about ICP recordals, see Signup, Accounts, and Credentials + // (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) + // in Getting Started with AWS services in China. + AliasICPRecordals []*AliasICPRecordal `locationNameList:"AliasICPRecordal" type:"list"` + + // A complex type that contains information about CNAMEs (alternate domain names), + // if any, for this distribution. + // + // Aliases is a required field + Aliases *Aliases `type:"structure" required:"true"` - // Returned when you add a public key. - PublicKey *PublicKey `type:"structure"` -} + // A complex type that contains zero or more CacheBehavior elements. + // + // CacheBehaviors is a required field + CacheBehaviors *CacheBehaviors `type:"structure" required:"true"` -// String returns the string representation -func (s CreatePublicKeyOutput) String() string { - return awsutil.Prettify(s) -} + // The comment originally specified when this distribution was created. + // + // Comment is a required field + Comment *string `type:"string" required:"true"` -// GoString returns the string representation -func (s CreatePublicKeyOutput) GoString() string { - return s.String() -} + // A complex type that contains zero or more CustomErrorResponses elements. + // + // CustomErrorResponses is a required field + CustomErrorResponses *CustomErrorResponses `type:"structure" required:"true"` -// SetETag sets the ETag field's value. -func (s *CreatePublicKeyOutput) SetETag(v string) *CreatePublicKeyOutput { - s.ETag = &v - return s -} + // A complex type that describes the default cache behavior if you don't specify + // a CacheBehavior element or if files don't match any of the values of PathPattern + // in CacheBehavior elements. You must create exactly one default cache behavior. + // + // DefaultCacheBehavior is a required field + DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` -// SetLocation sets the Location field's value. -func (s *CreatePublicKeyOutput) SetLocation(v string) *CreatePublicKeyOutput { - s.Location = &v - return s -} + // The domain name that corresponds to the distribution, for example, d111111abcdef8.cloudfront.net. + // + // DomainName is a required field + DomainName *string `type:"string" required:"true"` -// SetPublicKey sets the PublicKey field's value. -func (s *CreatePublicKeyOutput) SetPublicKey(v *PublicKey) *CreatePublicKeyOutput { - s.PublicKey = v - return s -} + // Whether the distribution is enabled to accept user requests for content. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` -// The request to create a new streaming distribution. -type CreateStreamingDistributionInput struct { - _ struct{} `locationName:"CreateStreamingDistributionRequest" type:"structure" payload:"StreamingDistributionConfig"` + // Specify the maximum HTTP version that you want viewers to use to communicate + // with CloudFront. The default value for new web distributions is http2. Viewers + // that don't support HTTP/2 will automatically use an earlier version. + // + // HttpVersion is a required field + HttpVersion *string `type:"string" required:"true" enum:"HttpVersion"` - // The streaming distribution's configuration information. + // The identifier for the distribution. For example: EDFDVBD632BHDS5. // - // StreamingDistributionConfig is a required field - StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` -} + // Id is a required field + Id *string `type:"string" required:"true"` -// String returns the string representation -func (s CreateStreamingDistributionInput) String() string { - return awsutil.Prettify(s) -} + // Whether CloudFront responds to IPv6 DNS requests with an IPv6 address for + // your distribution. + // + // IsIPV6Enabled is a required field + IsIPV6Enabled *bool `type:"boolean" required:"true"` -// GoString returns the string representation -func (s CreateStreamingDistributionInput) GoString() string { - return s.String() -} + // The date and time the distribution was last modified. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateStreamingDistributionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionInput"} - if s.StreamingDistributionConfig == nil { - invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) - } - if s.StreamingDistributionConfig != nil { - if err := s.StreamingDistributionConfig.Validate(); err != nil { - invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) - } - } + // A complex type that contains information about origin groups for this distribution. + OriginGroups *OriginGroups `type:"structure"` - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} + // A complex type that contains information about origins for this distribution. + // + // Origins is a required field + Origins *Origins `type:"structure" required:"true"` -// SetStreamingDistributionConfig sets the StreamingDistributionConfig field's value. -func (s *CreateStreamingDistributionInput) SetStreamingDistributionConfig(v *StreamingDistributionConfig) *CreateStreamingDistributionInput { - s.StreamingDistributionConfig = v - return s -} + // A complex type that contains information about price class for this streaming + // distribution. + // + // PriceClass is a required field + PriceClass *string `type:"string" required:"true" enum:"PriceClass"` -// The returned result of the corresponding request. -type CreateStreamingDistributionOutput struct { - _ struct{} `type:"structure" payload:"StreamingDistribution"` + // A complex type that identifies ways in which you want to restrict distribution + // of your content. + // + // Restrictions is a required field + Restrictions *Restrictions `type:"structure" required:"true"` - // The current version of the streaming distribution created. - ETag *string `location:"header" locationName:"ETag" type:"string"` + // The current status of the distribution. When the status is Deployed, the + // distribution's information is propagated to all CloudFront edge locations. + // + // Status is a required field + Status *string `type:"string" required:"true"` - // The fully qualified URI of the new streaming distribution resource just created. - // For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. - Location *string `location:"header" locationName:"Location" type:"string"` + // A complex type that determines the distribution’s SSL/TLS configuration + // for communicating with viewers. + // + // ViewerCertificate is a required field + ViewerCertificate *ViewerCertificate `type:"structure" required:"true"` - // The streaming distribution's information. - StreamingDistribution *StreamingDistribution `type:"structure"` + // The Web ACL Id (if any) associated with the distribution. + // + // WebACLId is a required field + WebACLId *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateStreamingDistributionOutput) String() string { +func (s DistributionSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateStreamingDistributionOutput) GoString() string { +func (s DistributionSummary) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *CreateStreamingDistributionOutput) SetETag(v string) *CreateStreamingDistributionOutput { - s.ETag = &v +// SetARN sets the ARN field's value. +func (s *DistributionSummary) SetARN(v string) *DistributionSummary { + s.ARN = &v return s } -// SetLocation sets the Location field's value. -func (s *CreateStreamingDistributionOutput) SetLocation(v string) *CreateStreamingDistributionOutput { - s.Location = &v +// SetAliasICPRecordals sets the AliasICPRecordals field's value. +func (s *DistributionSummary) SetAliasICPRecordals(v []*AliasICPRecordal) *DistributionSummary { + s.AliasICPRecordals = v return s } -// SetStreamingDistribution sets the StreamingDistribution field's value. -func (s *CreateStreamingDistributionOutput) SetStreamingDistribution(v *StreamingDistribution) *CreateStreamingDistributionOutput { - s.StreamingDistribution = v +// SetAliases sets the Aliases field's value. +func (s *DistributionSummary) SetAliases(v *Aliases) *DistributionSummary { + s.Aliases = v return s } -// The request to create a new streaming distribution with tags. -type CreateStreamingDistributionWithTagsInput struct { - _ struct{} `locationName:"CreateStreamingDistributionWithTagsRequest" type:"structure" payload:"StreamingDistributionConfigWithTags"` - - // The streaming distribution's configuration information. - // - // StreamingDistributionConfigWithTags is a required field - StreamingDistributionConfigWithTags *StreamingDistributionConfigWithTags `locationName:"StreamingDistributionConfigWithTags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` -} - -// String returns the string representation -func (s CreateStreamingDistributionWithTagsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateStreamingDistributionWithTagsInput) GoString() string { - return s.String() +// SetCacheBehaviors sets the CacheBehaviors field's value. +func (s *DistributionSummary) SetCacheBehaviors(v *CacheBehaviors) *DistributionSummary { + s.CacheBehaviors = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateStreamingDistributionWithTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionWithTagsInput"} - if s.StreamingDistributionConfigWithTags == nil { - invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfigWithTags")) - } - if s.StreamingDistributionConfigWithTags != nil { - if err := s.StreamingDistributionConfigWithTags.Validate(); err != nil { - invalidParams.AddNested("StreamingDistributionConfigWithTags", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetComment sets the Comment field's value. +func (s *DistributionSummary) SetComment(v string) *DistributionSummary { + s.Comment = &v + return s } -// SetStreamingDistributionConfigWithTags sets the StreamingDistributionConfigWithTags field's value. -func (s *CreateStreamingDistributionWithTagsInput) SetStreamingDistributionConfigWithTags(v *StreamingDistributionConfigWithTags) *CreateStreamingDistributionWithTagsInput { - s.StreamingDistributionConfigWithTags = v +// SetCustomErrorResponses sets the CustomErrorResponses field's value. +func (s *DistributionSummary) SetCustomErrorResponses(v *CustomErrorResponses) *DistributionSummary { + s.CustomErrorResponses = v return s } -// The returned result of the corresponding request. -type CreateStreamingDistributionWithTagsOutput struct { - _ struct{} `type:"structure" payload:"StreamingDistribution"` - - // The current version of the distribution created. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // The fully qualified URI of the new streaming distribution resource just created. - // For example:https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. - Location *string `location:"header" locationName:"Location" type:"string"` - - // The streaming distribution's information. - StreamingDistribution *StreamingDistribution `type:"structure"` +// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. +func (s *DistributionSummary) SetDefaultCacheBehavior(v *DefaultCacheBehavior) *DistributionSummary { + s.DefaultCacheBehavior = v + return s } -// String returns the string representation -func (s CreateStreamingDistributionWithTagsOutput) String() string { - return awsutil.Prettify(s) +// SetDomainName sets the DomainName field's value. +func (s *DistributionSummary) SetDomainName(v string) *DistributionSummary { + s.DomainName = &v + return s } -// GoString returns the string representation -func (s CreateStreamingDistributionWithTagsOutput) GoString() string { - return s.String() +// SetEnabled sets the Enabled field's value. +func (s *DistributionSummary) SetEnabled(v bool) *DistributionSummary { + s.Enabled = &v + return s } -// SetETag sets the ETag field's value. -func (s *CreateStreamingDistributionWithTagsOutput) SetETag(v string) *CreateStreamingDistributionWithTagsOutput { - s.ETag = &v +// SetHttpVersion sets the HttpVersion field's value. +func (s *DistributionSummary) SetHttpVersion(v string) *DistributionSummary { + s.HttpVersion = &v return s } -// SetLocation sets the Location field's value. -func (s *CreateStreamingDistributionWithTagsOutput) SetLocation(v string) *CreateStreamingDistributionWithTagsOutput { - s.Location = &v +// SetId sets the Id field's value. +func (s *DistributionSummary) SetId(v string) *DistributionSummary { + s.Id = &v return s } -// SetStreamingDistribution sets the StreamingDistribution field's value. -func (s *CreateStreamingDistributionWithTagsOutput) SetStreamingDistribution(v *StreamingDistribution) *CreateStreamingDistributionWithTagsOutput { - s.StreamingDistribution = v +// SetIsIPV6Enabled sets the IsIPV6Enabled field's value. +func (s *DistributionSummary) SetIsIPV6Enabled(v bool) *DistributionSummary { + s.IsIPV6Enabled = &v return s } -// A complex type that controls: -// -// * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range -// with custom error messages before returning the response to the viewer. -// -// * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. -// -// For more information about custom error pages, see Customizing Error Responses -// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) -// in the Amazon CloudFront Developer Guide. -type CustomErrorResponse struct { - _ struct{} `type:"structure"` - - // The minimum amount of time, in seconds, that you want CloudFront to cache - // the HTTP status code specified in ErrorCode. When this time period has elapsed, - // CloudFront queries your origin to see whether the problem that caused the - // error has been resolved and the requested object is now available. - // - // For more information, see Customizing Error Responses (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) - // in the Amazon CloudFront Developer Guide. - ErrorCachingMinTTL *int64 `type:"long"` - - // The HTTP status code for which you want to specify a custom error page and/or - // a caching duration. - // - // ErrorCode is a required field - ErrorCode *int64 `type:"integer" required:"true"` - - // The HTTP status code that you want CloudFront to return to the viewer along - // with the custom error page. There are a variety of reasons that you might - // want CloudFront to return a status code different from the status code that - // your origin returned to CloudFront, for example: - // - // * Some Internet devices (some firewalls and corporate proxies, for example) - // intercept HTTP 4xx and 5xx and prevent the response from being returned - // to the viewer. If you substitute 200, the response typically won't be - // intercepted. - // - // * If you don't care about distinguishing among different client errors - // or server errors, you can specify 400 or 500 as the ResponseCode for all - // 4xx or 5xx errors. - // - // * You might want to return a 200 status code (OK) and static website so - // your customers don't know that your website is down. - // - // If you specify a value for ResponseCode, you must also specify a value for - // ResponsePagePath. - ResponseCode *string `type:"string"` - - // The path to the custom error page that you want CloudFront to return to a - // viewer when your origin returns the HTTP status code specified by ErrorCode, - // for example, /4xx-errors/403-forbidden.html. If you want to store your objects - // and your custom error pages in different locations, your distribution must - // include a cache behavior for which the following is true: - // - // * The value of PathPattern matches the path to your custom error messages. - // For example, suppose you saved custom error pages for 4xx errors in an - // Amazon S3 bucket in a directory named /4xx-errors. Your distribution must - // include a cache behavior for which the path pattern routes requests for - // your custom error pages to that location, for example, /4xx-errors/*. - // - // * The value of TargetOriginId specifies the value of the ID element for - // the origin that contains your custom error pages. - // - // If you specify a value for ResponsePagePath, you must also specify a value - // for ResponseCode. - // - // We recommend that you store custom error pages in an Amazon S3 bucket. If - // you store custom error pages on an HTTP server and the server starts to return - // 5xx errors, CloudFront can't get the files that you want to return to viewers - // because the origin server is unavailable. - ResponsePagePath *string `type:"string"` +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *DistributionSummary) SetLastModifiedTime(v time.Time) *DistributionSummary { + s.LastModifiedTime = &v + return s } -// String returns the string representation -func (s CustomErrorResponse) String() string { - return awsutil.Prettify(s) +// SetOriginGroups sets the OriginGroups field's value. +func (s *DistributionSummary) SetOriginGroups(v *OriginGroups) *DistributionSummary { + s.OriginGroups = v + return s } -// GoString returns the string representation -func (s CustomErrorResponse) GoString() string { - return s.String() +// SetOrigins sets the Origins field's value. +func (s *DistributionSummary) SetOrigins(v *Origins) *DistributionSummary { + s.Origins = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CustomErrorResponse) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponse"} - if s.ErrorCode == nil { - invalidParams.Add(request.NewErrParamRequired("ErrorCode")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetPriceClass sets the PriceClass field's value. +func (s *DistributionSummary) SetPriceClass(v string) *DistributionSummary { + s.PriceClass = &v + return s } -// SetErrorCachingMinTTL sets the ErrorCachingMinTTL field's value. -func (s *CustomErrorResponse) SetErrorCachingMinTTL(v int64) *CustomErrorResponse { - s.ErrorCachingMinTTL = &v +// SetRestrictions sets the Restrictions field's value. +func (s *DistributionSummary) SetRestrictions(v *Restrictions) *DistributionSummary { + s.Restrictions = v return s } -// SetErrorCode sets the ErrorCode field's value. -func (s *CustomErrorResponse) SetErrorCode(v int64) *CustomErrorResponse { - s.ErrorCode = &v +// SetStatus sets the Status field's value. +func (s *DistributionSummary) SetStatus(v string) *DistributionSummary { + s.Status = &v return s } -// SetResponseCode sets the ResponseCode field's value. -func (s *CustomErrorResponse) SetResponseCode(v string) *CustomErrorResponse { - s.ResponseCode = &v +// SetViewerCertificate sets the ViewerCertificate field's value. +func (s *DistributionSummary) SetViewerCertificate(v *ViewerCertificate) *DistributionSummary { + s.ViewerCertificate = v return s } -// SetResponsePagePath sets the ResponsePagePath field's value. -func (s *CustomErrorResponse) SetResponsePagePath(v string) *CustomErrorResponse { - s.ResponsePagePath = &v +// SetWebACLId sets the WebACLId field's value. +func (s *DistributionSummary) SetWebACLId(v string) *DistributionSummary { + s.WebACLId = &v return s } -// A complex type that controls: -// -// * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range -// with custom error messages before returning the response to the viewer. -// -// * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. -// -// For more information about custom error pages, see Customizing Error Responses -// (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) -// in the Amazon CloudFront Developer Guide. -type CustomErrorResponses struct { +// Complex data type for field-level encryption profiles that includes all of +// the encryption entities. +type EncryptionEntities struct { _ struct{} `type:"structure"` - // A complex type that contains a CustomErrorResponse element for each HTTP - // status code for which you want to specify a custom error page and/or a caching - // duration. - Items []*CustomErrorResponse `locationNameList:"CustomErrorResponse" type:"list"` + // An array of field patterns in a field-level encryption content type-profile + // mapping. + Items []*EncryptionEntity `locationNameList:"EncryptionEntity" type:"list"` - // The number of HTTP status codes for which you want to specify a custom error - // page and/or a caching duration. If Quantity is 0, you can omit Items. + // Number of field pattern items in a field-level encryption content type-profile + // mapping. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s CustomErrorResponses) String() string { +func (s EncryptionEntities) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CustomErrorResponses) GoString() string { +func (s EncryptionEntities) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CustomErrorResponses) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomErrorResponses"} +func (s *EncryptionEntities) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionEntities"} if s.Quantity == nil { invalidParams.Add(request.NewErrParamRequired("Quantity")) } @@ -6867,56 +12374,70 @@ func (s *CustomErrorResponses) Validate() error { } // SetItems sets the Items field's value. -func (s *CustomErrorResponses) SetItems(v []*CustomErrorResponse) *CustomErrorResponses { +func (s *EncryptionEntities) SetItems(v []*EncryptionEntity) *EncryptionEntities { s.Items = v return s } // SetQuantity sets the Quantity field's value. -func (s *CustomErrorResponses) SetQuantity(v int64) *CustomErrorResponses { +func (s *EncryptionEntities) SetQuantity(v int64) *EncryptionEntities { s.Quantity = &v return s } -// A complex type that contains the list of Custom Headers for each origin. -type CustomHeaders struct { +// Complex data type for field-level encryption profiles that includes the encryption +// key and field pattern specifications. +type EncryptionEntity struct { _ struct{} `type:"structure"` - // Optional: A list that contains one OriginCustomHeader element for each custom - // header that you want CloudFront to forward to the origin. If Quantity is - // 0, omit Items. - Items []*OriginCustomHeader `locationNameList:"OriginCustomHeader" type:"list"` + // Field patterns in a field-level encryption content type profile specify the + // fields that you want to be encrypted. You can provide the full field name, + // or any beginning characters followed by a wildcard (*). You can't overlap + // field patterns. For example, you can't have both ABC* and AB*. Note that + // field patterns are case-sensitive. + // + // FieldPatterns is a required field + FieldPatterns *FieldPatterns `type:"structure" required:"true"` - // The number of custom headers, if any, for this distribution. + // The provider associated with the public key being used for encryption. This + // value must also be provided with the private key for applications to be able + // to decrypt data. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // ProviderId is a required field + ProviderId *string `type:"string" required:"true"` + + // The public key associated with a set of field-level encryption patterns, + // to be used when encrypting the fields that match the patterns. + // + // PublicKeyId is a required field + PublicKeyId *string `type:"string" required:"true"` } // String returns the string representation -func (s CustomHeaders) String() string { +func (s EncryptionEntity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CustomHeaders) GoString() string { +func (s EncryptionEntity) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CustomHeaders) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomHeaders"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *EncryptionEntity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionEntity"} + if s.FieldPatterns == nil { + invalidParams.Add(request.NewErrParamRequired("FieldPatterns")) } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } + if s.ProviderId == nil { + invalidParams.Add(request.NewErrParamRequired("ProviderId")) + } + if s.PublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("PublicKeyId")) + } + if s.FieldPatterns != nil { + if err := s.FieldPatterns.Validate(); err != nil { + invalidParams.AddNested("FieldPatterns", err.(request.ErrInvalidParams)) } } @@ -6926,85 +12447,59 @@ func (s *CustomHeaders) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *CustomHeaders) SetItems(v []*OriginCustomHeader) *CustomHeaders { - s.Items = v +// SetFieldPatterns sets the FieldPatterns field's value. +func (s *EncryptionEntity) SetFieldPatterns(v *FieldPatterns) *EncryptionEntity { + s.FieldPatterns = v return s } -// SetQuantity sets the Quantity field's value. -func (s *CustomHeaders) SetQuantity(v int64) *CustomHeaders { - s.Quantity = &v +// SetProviderId sets the ProviderId field's value. +func (s *EncryptionEntity) SetProviderId(v string) *EncryptionEntity { + s.ProviderId = &v return s } -// A custom origin or an Amazon S3 bucket configured as a website endpoint. -type CustomOriginConfig struct { - _ struct{} `type:"structure"` - - // The HTTP port the custom origin listens on. - // - // HTTPPort is a required field - HTTPPort *int64 `type:"integer" required:"true"` - - // The HTTPS port the custom origin listens on. - // - // HTTPSPort is a required field - HTTPSPort *int64 `type:"integer" required:"true"` - - // You can create a custom keep-alive timeout. All timeout units are in seconds. - // The default keep-alive timeout is 5 seconds, but you can configure custom - // timeout lengths using the CloudFront API. The minimum timeout length is 1 - // second; the maximum is 60 seconds. - // - // If you need to increase the maximum time limit, contact the AWS Support Center - // (https://console.aws.amazon.com/support/home#/). - OriginKeepaliveTimeout *int64 `type:"integer"` +// SetPublicKeyId sets the PublicKeyId field's value. +func (s *EncryptionEntity) SetPublicKeyId(v string) *EncryptionEntity { + s.PublicKeyId = &v + return s +} - // The origin protocol policy to apply to your origin. - // - // OriginProtocolPolicy is a required field - OriginProtocolPolicy *string `type:"string" required:"true" enum:"OriginProtocolPolicy"` +// Contains information about the Amazon Kinesis data stream where you are sending +// real-time log data in a real-time log configuration. +type EndPoint struct { + _ struct{} `type:"structure"` - // You can create a custom origin read timeout. All timeout units are in seconds. - // The default origin read timeout is 30 seconds, but you can configure custom - // timeout lengths using the CloudFront API. The minimum timeout length is 4 - // seconds; the maximum is 60 seconds. - // - // If you need to increase the maximum time limit, contact the AWS Support Center - // (https://console.aws.amazon.com/support/home#/). - OriginReadTimeout *int64 `type:"integer"` + // Contains information about the Amazon Kinesis data stream where you are sending + // real-time log data. + KinesisStreamConfig *KinesisStreamConfig `type:"structure"` - // The SSL/TLS protocols that you want CloudFront to use when communicating - // with your origin over HTTPS. - OriginSslProtocols *OriginSslProtocols `type:"structure"` + // The type of data stream where you are sending real-time log data. The only + // valid value is Kinesis. + // + // StreamType is a required field + StreamType *string `type:"string" required:"true"` } // String returns the string representation -func (s CustomOriginConfig) String() string { +func (s EndPoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CustomOriginConfig) GoString() string { +func (s EndPoint) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CustomOriginConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomOriginConfig"} - if s.HTTPPort == nil { - invalidParams.Add(request.NewErrParamRequired("HTTPPort")) - } - if s.HTTPSPort == nil { - invalidParams.Add(request.NewErrParamRequired("HTTPSPort")) - } - if s.OriginProtocolPolicy == nil { - invalidParams.Add(request.NewErrParamRequired("OriginProtocolPolicy")) - } - if s.OriginSslProtocols != nil { - if err := s.OriginSslProtocols.Validate(); err != nil { - invalidParams.AddNested("OriginSslProtocols", err.(request.ErrInvalidParams)) +func (s *EndPoint) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EndPoint"} + if s.StreamType == nil { + invalidParams.Add(request.NewErrParamRequired("StreamType")) + } + if s.KinesisStreamConfig != nil { + if err := s.KinesisStreamConfig.Validate(); err != nil { + invalidParams.AddNested("KinesisStreamConfig", err.(request.ErrInvalidParams)) } } @@ -7014,227 +12509,117 @@ func (s *CustomOriginConfig) Validate() error { return nil } -// SetHTTPPort sets the HTTPPort field's value. -func (s *CustomOriginConfig) SetHTTPPort(v int64) *CustomOriginConfig { - s.HTTPPort = &v +// SetKinesisStreamConfig sets the KinesisStreamConfig field's value. +func (s *EndPoint) SetKinesisStreamConfig(v *KinesisStreamConfig) *EndPoint { + s.KinesisStreamConfig = v return s } -// SetHTTPSPort sets the HTTPSPort field's value. -func (s *CustomOriginConfig) SetHTTPSPort(v int64) *CustomOriginConfig { - s.HTTPSPort = &v +// SetStreamType sets the StreamType field's value. +func (s *EndPoint) SetStreamType(v string) *EndPoint { + s.StreamType = &v return s } -// SetOriginKeepaliveTimeout sets the OriginKeepaliveTimeout field's value. -func (s *CustomOriginConfig) SetOriginKeepaliveTimeout(v int64) *CustomOriginConfig { - s.OriginKeepaliveTimeout = &v - return s +// A complex data type that includes the profile configurations and other options +// specified for field-level encryption. +type FieldLevelEncryption struct { + _ struct{} `type:"structure"` + + // A complex data type that includes the profile configurations specified for + // field-level encryption. + // + // FieldLevelEncryptionConfig is a required field + FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `type:"structure" required:"true"` + + // The configuration ID for a field-level encryption configuration which includes + // a set of profiles that specify certain selected data fields to be encrypted + // by specific public keys. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The last time the field-level encryption configuration was changed. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` } -// SetOriginProtocolPolicy sets the OriginProtocolPolicy field's value. -func (s *CustomOriginConfig) SetOriginProtocolPolicy(v string) *CustomOriginConfig { - s.OriginProtocolPolicy = &v +// String returns the string representation +func (s FieldLevelEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FieldLevelEncryption) GoString() string { + return s.String() +} + +// SetFieldLevelEncryptionConfig sets the FieldLevelEncryptionConfig field's value. +func (s *FieldLevelEncryption) SetFieldLevelEncryptionConfig(v *FieldLevelEncryptionConfig) *FieldLevelEncryption { + s.FieldLevelEncryptionConfig = v return s } -// SetOriginReadTimeout sets the OriginReadTimeout field's value. -func (s *CustomOriginConfig) SetOriginReadTimeout(v int64) *CustomOriginConfig { - s.OriginReadTimeout = &v +// SetId sets the Id field's value. +func (s *FieldLevelEncryption) SetId(v string) *FieldLevelEncryption { + s.Id = &v return s } -// SetOriginSslProtocols sets the OriginSslProtocols field's value. -func (s *CustomOriginConfig) SetOriginSslProtocols(v *OriginSslProtocols) *CustomOriginConfig { - s.OriginSslProtocols = v +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *FieldLevelEncryption) SetLastModifiedTime(v time.Time) *FieldLevelEncryption { + s.LastModifiedTime = &v return s } -// A complex type that describes the default cache behavior if you don't specify -// a CacheBehavior element or if files don't match any of the values of PathPattern -// in CacheBehavior elements. You must create exactly one default cache behavior. -type DefaultCacheBehavior struct { +// A complex data type that includes the profile configurations specified for +// field-level encryption. +type FieldLevelEncryptionConfig struct { _ struct{} `type:"structure"` - // A complex type that controls which HTTP methods CloudFront processes and - // forwards to your Amazon S3 bucket or your custom origin. There are three - // choices: - // - // * CloudFront forwards only GET and HEAD requests. - // - // * CloudFront forwards only GET, HEAD, and OPTIONS requests. - // - // * CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE - // requests. - // - // If you pick the third choice, you may need to restrict access to your Amazon - // S3 bucket or to your custom origin so users can't perform operations that - // you don't want them to. For example, you might not want users to have permissions - // to delete objects from your origin. - AllowedMethods *AllowedMethods `type:"structure"` - - // Whether you want CloudFront to automatically compress certain files for this - // cache behavior. If so, specify true; if not, specify false. For more information, - // see Serving Compressed Files (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/ServingCompressedFiles.html) - // in the Amazon CloudFront Developer Guide. - Compress *bool `type:"boolean"` - - // The default amount of time that you want objects to stay in CloudFront caches - // before CloudFront forwards another request to your origin to determine whether - // the object has been updated. The value that you specify applies only when - // your origin does not add HTTP headers such as Cache-Control max-age, Cache-Control - // s-maxage, and Expires to objects. For more information, see Managing How - // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon CloudFront Developer Guide. - DefaultTTL *int64 `type:"long"` - - // The value of ID for the field-level encryption configuration that you want - // CloudFront to use for encrypting specific fields of data for a cache behavior - // or for the default cache behavior in your distribution. - FieldLevelEncryptionId *string `type:"string"` - - // A complex type that specifies how CloudFront handles query strings, cookies, - // and HTTP headers. - // - // ForwardedValues is a required field - ForwardedValues *ForwardedValues `type:"structure" required:"true"` - - // A complex type that contains zero or more Lambda function associations for - // a cache behavior. - LambdaFunctionAssociations *LambdaFunctionAssociations `type:"structure"` - - // The maximum amount of time that you want objects to stay in CloudFront caches - // before CloudFront forwards another request to your origin to determine whether - // the object has been updated. The value that you specify applies only when - // your origin adds HTTP headers such as Cache-Control max-age, Cache-Control - // s-maxage, and Expires to objects. For more information, see Managing How - // Long Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon CloudFront Developer Guide. - MaxTTL *int64 `type:"long"` - - // The minimum amount of time that you want objects to stay in CloudFront caches - // before CloudFront forwards another request to your origin to determine whether - // the object has been updated. For more information, see Managing How Long - // Content Stays in an Edge Cache (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon CloudFront Developer Guide. - // - // You must specify 0 for MinTTL if you configure CloudFront to forward all - // headers to your origin (under Headers, if you specify 1 for Quantity and - // * for Name). - // - // MinTTL is a required field - MinTTL *int64 `type:"long" required:"true"` - - // Indicates whether you want to distribute media files in the Microsoft Smooth - // Streaming format using the origin that is associated with this cache behavior. - // If so, specify true; if not, specify false. If you specify true for SmoothStreaming, - // you can still distribute other content using this cache behavior if the content - // matches the value of PathPattern. - SmoothStreaming *bool `type:"boolean"` - - // The value of ID for the origin that you want CloudFront to route requests - // to when a request matches the path pattern either for a cache behavior or - // for the default cache behavior in your distribution. - // - // TargetOriginId is a required field - TargetOriginId *string `type:"string" required:"true"` - - // A complex type that specifies the AWS accounts, if any, that you want to - // allow to create signed URLs for private content. - // - // If you want to require signed URLs in requests for objects in the target - // origin that match the PathPattern for this cache behavior, specify true for - // Enabled, and specify the applicable values for Quantity and Items. For more - // information, see Serving Private Content through CloudFront (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) - // in the Amazon CloudFront Developer Guide. - // - // If you don't want to require signed URLs in requests for objects that match - // PathPattern, specify false for Enabled and 0 for Quantity. Omit Items. - // - // To add, change, or remove one or more trusted signers, change Enabled to - // true (if it's currently false), change Quantity as applicable, and specify - // all of the trusted signers that you want to include in the updated distribution. + // A unique number that ensures the request can't be replayed. // - // TrustedSigners is a required field - TrustedSigners *TrustedSigners `type:"structure" required:"true"` + // CallerReference is a required field + CallerReference *string `type:"string" required:"true"` - // The protocol that viewers can use to access the files in the origin specified - // by TargetOriginId when a request matches the path pattern in PathPattern. - // You can specify the following options: - // - // * allow-all: Viewers can use HTTP or HTTPS. - // - // * redirect-to-https: If a viewer submits an HTTP request, CloudFront returns - // an HTTP status code of 301 (Moved Permanently) to the viewer along with - // the HTTPS URL. The viewer then resubmits the request using the new URL. - // - // * https-only: If a viewer sends an HTTP request, CloudFront returns an - // HTTP status code of 403 (Forbidden). - // - // For more information about requiring the HTTPS protocol, see Using an HTTPS - // Connection to Access Your Objects (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/SecureConnections.html) - // in the Amazon CloudFront Developer Guide. - // - // The only way to guarantee that viewers retrieve an object that was fetched - // from the origin using HTTPS is never to use any other protocol to fetch the - // object. If you have recently changed from HTTP to HTTPS, we recommend that - // you clear your objects' cache because cached objects are protocol agnostic. - // That means that an edge location will return an object from the cache regardless - // of whether the current request protocol matches the protocol used previously. - // For more information, see Managing How Long Content Stays in an Edge Cache - // (Expiration) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Expiration.html) - // in the Amazon CloudFront Developer Guide. - // - // ViewerProtocolPolicy is a required field - ViewerProtocolPolicy *string `type:"string" required:"true" enum:"ViewerProtocolPolicy"` -} + // An optional comment about the configuration. + Comment *string `type:"string"` -// String returns the string representation -func (s DefaultCacheBehavior) String() string { - return awsutil.Prettify(s) -} + // A complex data type that specifies when to forward content if a content type + // isn't recognized and profiles to use as by default in a request if a query + // argument doesn't specify a profile to use. + ContentTypeProfileConfig *ContentTypeProfileConfig `type:"structure"` -// GoString returns the string representation -func (s DefaultCacheBehavior) GoString() string { - return s.String() + // A complex data type that specifies when to forward content if a profile isn't + // found and the profile that can be provided as a query argument in a request. + QueryArgProfileConfig *QueryArgProfileConfig `type:"structure"` } - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DefaultCacheBehavior) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DefaultCacheBehavior"} - if s.ForwardedValues == nil { - invalidParams.Add(request.NewErrParamRequired("ForwardedValues")) - } - if s.MinTTL == nil { - invalidParams.Add(request.NewErrParamRequired("MinTTL")) - } - if s.TargetOriginId == nil { - invalidParams.Add(request.NewErrParamRequired("TargetOriginId")) - } - if s.TrustedSigners == nil { - invalidParams.Add(request.NewErrParamRequired("TrustedSigners")) - } - if s.ViewerProtocolPolicy == nil { - invalidParams.Add(request.NewErrParamRequired("ViewerProtocolPolicy")) - } - if s.AllowedMethods != nil { - if err := s.AllowedMethods.Validate(); err != nil { - invalidParams.AddNested("AllowedMethods", err.(request.ErrInvalidParams)) - } - } - if s.ForwardedValues != nil { - if err := s.ForwardedValues.Validate(); err != nil { - invalidParams.AddNested("ForwardedValues", err.(request.ErrInvalidParams)) - } + +// String returns the string representation +func (s FieldLevelEncryptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FieldLevelEncryptionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FieldLevelEncryptionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FieldLevelEncryptionConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) } - if s.LambdaFunctionAssociations != nil { - if err := s.LambdaFunctionAssociations.Validate(); err != nil { - invalidParams.AddNested("LambdaFunctionAssociations", err.(request.ErrInvalidParams)) + if s.ContentTypeProfileConfig != nil { + if err := s.ContentTypeProfileConfig.Validate(); err != nil { + invalidParams.AddNested("ContentTypeProfileConfig", err.(request.ErrInvalidParams)) } } - if s.TrustedSigners != nil { - if err := s.TrustedSigners.Validate(); err != nil { - invalidParams.AddNested("TrustedSigners", err.(request.ErrInvalidParams)) + if s.QueryArgProfileConfig != nil { + if err := s.QueryArgProfileConfig.Validate(); err != nil { + invalidParams.AddNested("QueryArgProfileConfig", err.(request.ErrInvalidParams)) } } @@ -7244,210 +12629,189 @@ func (s *DefaultCacheBehavior) Validate() error { return nil } -// SetAllowedMethods sets the AllowedMethods field's value. -func (s *DefaultCacheBehavior) SetAllowedMethods(v *AllowedMethods) *DefaultCacheBehavior { - s.AllowedMethods = v +// SetCallerReference sets the CallerReference field's value. +func (s *FieldLevelEncryptionConfig) SetCallerReference(v string) *FieldLevelEncryptionConfig { + s.CallerReference = &v return s } -// SetCompress sets the Compress field's value. -func (s *DefaultCacheBehavior) SetCompress(v bool) *DefaultCacheBehavior { - s.Compress = &v +// SetComment sets the Comment field's value. +func (s *FieldLevelEncryptionConfig) SetComment(v string) *FieldLevelEncryptionConfig { + s.Comment = &v return s } -// SetDefaultTTL sets the DefaultTTL field's value. -func (s *DefaultCacheBehavior) SetDefaultTTL(v int64) *DefaultCacheBehavior { - s.DefaultTTL = &v +// SetContentTypeProfileConfig sets the ContentTypeProfileConfig field's value. +func (s *FieldLevelEncryptionConfig) SetContentTypeProfileConfig(v *ContentTypeProfileConfig) *FieldLevelEncryptionConfig { + s.ContentTypeProfileConfig = v return s } -// SetFieldLevelEncryptionId sets the FieldLevelEncryptionId field's value. -func (s *DefaultCacheBehavior) SetFieldLevelEncryptionId(v string) *DefaultCacheBehavior { - s.FieldLevelEncryptionId = &v +// SetQueryArgProfileConfig sets the QueryArgProfileConfig field's value. +func (s *FieldLevelEncryptionConfig) SetQueryArgProfileConfig(v *QueryArgProfileConfig) *FieldLevelEncryptionConfig { + s.QueryArgProfileConfig = v return s } -// SetForwardedValues sets the ForwardedValues field's value. -func (s *DefaultCacheBehavior) SetForwardedValues(v *ForwardedValues) *DefaultCacheBehavior { - s.ForwardedValues = v - return s -} +// List of field-level encrpytion configurations. +type FieldLevelEncryptionList struct { + _ struct{} `type:"structure"` -// SetLambdaFunctionAssociations sets the LambdaFunctionAssociations field's value. -func (s *DefaultCacheBehavior) SetLambdaFunctionAssociations(v *LambdaFunctionAssociations) *DefaultCacheBehavior { - s.LambdaFunctionAssociations = v - return s + // An array of field-level encryption items. + Items []*FieldLevelEncryptionSummary `locationNameList:"FieldLevelEncryptionSummary" type:"list"` + + // The maximum number of elements you want in the response body. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` + + // If there are more elements to be listed, this element is present and contains + // the value that you can use for the Marker request parameter to continue listing + // your configurations where you left off. + NextMarker *string `type:"string"` + + // The number of field-level encryption items. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } -// SetMaxTTL sets the MaxTTL field's value. -func (s *DefaultCacheBehavior) SetMaxTTL(v int64) *DefaultCacheBehavior { - s.MaxTTL = &v - return s +// String returns the string representation +func (s FieldLevelEncryptionList) String() string { + return awsutil.Prettify(s) } -// SetMinTTL sets the MinTTL field's value. -func (s *DefaultCacheBehavior) SetMinTTL(v int64) *DefaultCacheBehavior { - s.MinTTL = &v - return s +// GoString returns the string representation +func (s FieldLevelEncryptionList) GoString() string { + return s.String() } -// SetSmoothStreaming sets the SmoothStreaming field's value. -func (s *DefaultCacheBehavior) SetSmoothStreaming(v bool) *DefaultCacheBehavior { - s.SmoothStreaming = &v +// SetItems sets the Items field's value. +func (s *FieldLevelEncryptionList) SetItems(v []*FieldLevelEncryptionSummary) *FieldLevelEncryptionList { + s.Items = v return s } -// SetTargetOriginId sets the TargetOriginId field's value. -func (s *DefaultCacheBehavior) SetTargetOriginId(v string) *DefaultCacheBehavior { - s.TargetOriginId = &v +// SetMaxItems sets the MaxItems field's value. +func (s *FieldLevelEncryptionList) SetMaxItems(v int64) *FieldLevelEncryptionList { + s.MaxItems = &v return s } -// SetTrustedSigners sets the TrustedSigners field's value. -func (s *DefaultCacheBehavior) SetTrustedSigners(v *TrustedSigners) *DefaultCacheBehavior { - s.TrustedSigners = v +// SetNextMarker sets the NextMarker field's value. +func (s *FieldLevelEncryptionList) SetNextMarker(v string) *FieldLevelEncryptionList { + s.NextMarker = &v return s } -// SetViewerProtocolPolicy sets the ViewerProtocolPolicy field's value. -func (s *DefaultCacheBehavior) SetViewerProtocolPolicy(v string) *DefaultCacheBehavior { - s.ViewerProtocolPolicy = &v +// SetQuantity sets the Quantity field's value. +func (s *FieldLevelEncryptionList) SetQuantity(v int64) *FieldLevelEncryptionList { + s.Quantity = &v return s } -// Deletes a origin access identity. -type DeleteCloudFrontOriginAccessIdentityInput struct { - _ struct{} `locationName:"DeleteCloudFrontOriginAccessIdentityRequest" type:"structure"` +// A complex data type for field-level encryption profiles. +type FieldLevelEncryptionProfile struct { + _ struct{} `type:"structure"` - // The origin access identity's ID. + // A complex data type that includes the profile name and the encryption entities + // for the field-level encryption profile. + // + // FieldLevelEncryptionProfileConfig is a required field + FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `type:"structure" required:"true"` + + // The ID for a field-level encryption profile configuration which includes + // a set of profiles that specify certain selected data fields to be encrypted + // by specific public keys. // // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + Id *string `type:"string" required:"true"` - // The value of the ETag header you received from a previous GET or PUT request. - // For example: E2QWRUHAPOMQZL. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // The last time the field-level encryption profile was updated. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` } // String returns the string representation -func (s DeleteCloudFrontOriginAccessIdentityInput) String() string { +func (s FieldLevelEncryptionProfile) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCloudFrontOriginAccessIdentityInput) GoString() string { +func (s FieldLevelEncryptionProfile) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCloudFrontOriginAccessIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCloudFrontOriginAccessIdentityInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetFieldLevelEncryptionProfileConfig sets the FieldLevelEncryptionProfileConfig field's value. +func (s *FieldLevelEncryptionProfile) SetFieldLevelEncryptionProfileConfig(v *FieldLevelEncryptionProfileConfig) *FieldLevelEncryptionProfile { + s.FieldLevelEncryptionProfileConfig = v + return s } // SetId sets the Id field's value. -func (s *DeleteCloudFrontOriginAccessIdentityInput) SetId(v string) *DeleteCloudFrontOriginAccessIdentityInput { +func (s *FieldLevelEncryptionProfile) SetId(v string) *FieldLevelEncryptionProfile { s.Id = &v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *DeleteCloudFrontOriginAccessIdentityInput) SetIfMatch(v string) *DeleteCloudFrontOriginAccessIdentityInput { - s.IfMatch = &v +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *FieldLevelEncryptionProfile) SetLastModifiedTime(v time.Time) *FieldLevelEncryptionProfile { + s.LastModifiedTime = &v return s } -type DeleteCloudFrontOriginAccessIdentityOutput struct { +// A complex data type of profiles for the field-level encryption. +type FieldLevelEncryptionProfileConfig struct { _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteCloudFrontOriginAccessIdentityOutput) String() string { - return awsutil.Prettify(s) -} -// GoString returns the string representation -func (s DeleteCloudFrontOriginAccessIdentityOutput) GoString() string { - return s.String() -} + // A unique number that ensures that the request can't be replayed. + // + // CallerReference is a required field + CallerReference *string `type:"string" required:"true"` -// This action deletes a web distribution. To delete a web distribution using -// the CloudFront API, perform the following steps. -// -// To delete a web distribution using the CloudFront API: -// -// Disable the web distribution -// -// Submit a GET Distribution Config request to get the current configuration -// and the Etag header for the distribution. -// -// Update the XML document that was returned in the response to your GET Distribution -// Config request to change the value of Enabled to false. -// -// Submit a PUT Distribution Config request to update the configuration for -// your distribution. In the request body, include the XML document that you -// updated in Step 3. Set the value of the HTTP If-Match header to the value -// of the ETag header that CloudFront returned when you submitted the GET Distribution -// Config request in Step 2. -// -// Review the response to the PUT Distribution Config request to confirm that -// the distribution was successfully disabled. -// -// Submit a GET Distribution request to confirm that your changes have propagated. -// When propagation is complete, the value of Status is Deployed. -// -// Submit a DELETE Distribution request. Set the value of the HTTP If-Match -// header to the value of the ETag header that CloudFront returned when you -// submitted the GET Distribution Config request in Step 6. -// -// Review the response to your DELETE Distribution request to confirm that the -// distribution was successfully deleted. -// -// For information about deleting a distribution using the CloudFront console, -// see Deleting a Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/HowToDeleteDistribution.html) -// in the Amazon CloudFront Developer Guide. -type DeleteDistributionInput struct { - _ struct{} `locationName:"DeleteDistributionRequest" type:"structure"` + // An optional comment for the field-level encryption profile. + Comment *string `type:"string"` - // The distribution ID. + // A complex data type of encryption entities for the field-level encryption + // profile that include the public key ID, provider, and field patterns for + // specifying which fields to encrypt with this key. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // EncryptionEntities is a required field + EncryptionEntities *EncryptionEntities `type:"structure" required:"true"` - // The value of the ETag header that you received when you disabled the distribution. - // For example: E2QWRUHAPOMQZL. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // Profile name for the field-level encryption profile. + // + // Name is a required field + Name *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteDistributionInput) String() string { +func (s FieldLevelEncryptionProfileConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDistributionInput) GoString() string { +func (s FieldLevelEncryptionProfileConfig) GoString() string { return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDistributionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDistributionInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FieldLevelEncryptionProfileConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FieldLevelEncryptionProfileConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.EncryptionEntities == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionEntities")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.EncryptionEntities != nil { + if err := s.EncryptionEntities.Validate(); err != nil { + invalidParams.AddNested("EncryptionEntities", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -7456,259 +12820,251 @@ func (s *DeleteDistributionInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *DeleteDistributionInput) SetId(v string) *DeleteDistributionInput { - s.Id = &v +// SetCallerReference sets the CallerReference field's value. +func (s *FieldLevelEncryptionProfileConfig) SetCallerReference(v string) *FieldLevelEncryptionProfileConfig { + s.CallerReference = &v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *DeleteDistributionInput) SetIfMatch(v string) *DeleteDistributionInput { - s.IfMatch = &v +// SetComment sets the Comment field's value. +func (s *FieldLevelEncryptionProfileConfig) SetComment(v string) *FieldLevelEncryptionProfileConfig { + s.Comment = &v return s } -type DeleteDistributionOutput struct { - _ struct{} `type:"structure"` +// SetEncryptionEntities sets the EncryptionEntities field's value. +func (s *FieldLevelEncryptionProfileConfig) SetEncryptionEntities(v *EncryptionEntities) *FieldLevelEncryptionProfileConfig { + s.EncryptionEntities = v + return s } -// String returns the string representation -func (s DeleteDistributionOutput) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *FieldLevelEncryptionProfileConfig) SetName(v string) *FieldLevelEncryptionProfileConfig { + s.Name = &v + return s } -// GoString returns the string representation -func (s DeleteDistributionOutput) GoString() string { - return s.String() -} +// List of field-level encryption profiles. +type FieldLevelEncryptionProfileList struct { + _ struct{} `type:"structure"` -type DeleteFieldLevelEncryptionConfigInput struct { - _ struct{} `locationName:"DeleteFieldLevelEncryptionConfigRequest" type:"structure"` + // The field-level encryption profile items. + Items []*FieldLevelEncryptionProfileSummary `locationNameList:"FieldLevelEncryptionProfileSummary" type:"list"` - // The ID of the configuration you want to delete from CloudFront. + // The maximum number of field-level encryption profiles you want in the response + // body. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` - // The value of the ETag header that you received when retrieving the configuration - // identity to delete. For example: E2QWRUHAPOMQZL. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // If there are more elements to be listed, this element is present and contains + // the value that you can use for the Marker request parameter to continue listing + // your profiles where you left off. + NextMarker *string `type:"string"` + + // The number of field-level encryption profiles. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s DeleteFieldLevelEncryptionConfigInput) String() string { +func (s FieldLevelEncryptionProfileList) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFieldLevelEncryptionConfigInput) GoString() string { +func (s FieldLevelEncryptionProfileList) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFieldLevelEncryptionConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFieldLevelEncryptionConfigInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetItems sets the Items field's value. +func (s *FieldLevelEncryptionProfileList) SetItems(v []*FieldLevelEncryptionProfileSummary) *FieldLevelEncryptionProfileList { + s.Items = v + return s } -// SetId sets the Id field's value. -func (s *DeleteFieldLevelEncryptionConfigInput) SetId(v string) *DeleteFieldLevelEncryptionConfigInput { - s.Id = &v +// SetMaxItems sets the MaxItems field's value. +func (s *FieldLevelEncryptionProfileList) SetMaxItems(v int64) *FieldLevelEncryptionProfileList { + s.MaxItems = &v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *DeleteFieldLevelEncryptionConfigInput) SetIfMatch(v string) *DeleteFieldLevelEncryptionConfigInput { - s.IfMatch = &v +// SetNextMarker sets the NextMarker field's value. +func (s *FieldLevelEncryptionProfileList) SetNextMarker(v string) *FieldLevelEncryptionProfileList { + s.NextMarker = &v return s } -type DeleteFieldLevelEncryptionConfigOutput struct { - _ struct{} `type:"structure"` +// SetQuantity sets the Quantity field's value. +func (s *FieldLevelEncryptionProfileList) SetQuantity(v int64) *FieldLevelEncryptionProfileList { + s.Quantity = &v + return s } -// String returns the string representation -func (s DeleteFieldLevelEncryptionConfigOutput) String() string { - return awsutil.Prettify(s) -} +// The field-level encryption profile summary. +type FieldLevelEncryptionProfileSummary struct { + _ struct{} `type:"structure"` -// GoString returns the string representation -func (s DeleteFieldLevelEncryptionConfigOutput) GoString() string { - return s.String() -} + // An optional comment for the field-level encryption profile summary. + Comment *string `type:"string"` -type DeleteFieldLevelEncryptionProfileInput struct { - _ struct{} `locationName:"DeleteFieldLevelEncryptionProfileRequest" type:"structure"` + // A complex data type of encryption entities for the field-level encryption + // profile that include the public key ID, provider, and field patterns for + // specifying which fields to encrypt with this key. + // + // EncryptionEntities is a required field + EncryptionEntities *EncryptionEntities `type:"structure" required:"true"` - // Request the ID of the profile you want to delete from CloudFront. + // ID for the field-level encryption profile summary. // // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + Id *string `type:"string" required:"true"` - // The value of the ETag header that you received when retrieving the profile - // to delete. For example: E2QWRUHAPOMQZL. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // The time when the the field-level encryption profile summary was last updated. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` + + // Name for the field-level encryption profile summary. + // + // Name is a required field + Name *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteFieldLevelEncryptionProfileInput) String() string { +func (s FieldLevelEncryptionProfileSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFieldLevelEncryptionProfileInput) GoString() string { +func (s FieldLevelEncryptionProfileSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFieldLevelEncryptionProfileInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFieldLevelEncryptionProfileInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } +// SetComment sets the Comment field's value. +func (s *FieldLevelEncryptionProfileSummary) SetComment(v string) *FieldLevelEncryptionProfileSummary { + s.Comment = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetEncryptionEntities sets the EncryptionEntities field's value. +func (s *FieldLevelEncryptionProfileSummary) SetEncryptionEntities(v *EncryptionEntities) *FieldLevelEncryptionProfileSummary { + s.EncryptionEntities = v + return s } // SetId sets the Id field's value. -func (s *DeleteFieldLevelEncryptionProfileInput) SetId(v string) *DeleteFieldLevelEncryptionProfileInput { +func (s *FieldLevelEncryptionProfileSummary) SetId(v string) *FieldLevelEncryptionProfileSummary { s.Id = &v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *DeleteFieldLevelEncryptionProfileInput) SetIfMatch(v string) *DeleteFieldLevelEncryptionProfileInput { - s.IfMatch = &v +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *FieldLevelEncryptionProfileSummary) SetLastModifiedTime(v time.Time) *FieldLevelEncryptionProfileSummary { + s.LastModifiedTime = &v return s } -type DeleteFieldLevelEncryptionProfileOutput struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *FieldLevelEncryptionProfileSummary) SetName(v string) *FieldLevelEncryptionProfileSummary { + s.Name = &v + return s } -// String returns the string representation -func (s DeleteFieldLevelEncryptionProfileOutput) String() string { - return awsutil.Prettify(s) -} +// A summary of a field-level encryption item. +type FieldLevelEncryptionSummary struct { + _ struct{} `type:"structure"` -// GoString returns the string representation -func (s DeleteFieldLevelEncryptionProfileOutput) GoString() string { - return s.String() -} + // An optional comment about the field-level encryption item. + Comment *string `type:"string"` -type DeletePublicKeyInput struct { - _ struct{} `locationName:"DeletePublicKeyRequest" type:"structure"` + // A summary of a content type-profile mapping. + ContentTypeProfileConfig *ContentTypeProfileConfig `type:"structure"` - // The ID of the public key you want to remove from CloudFront. + // The unique ID of a field-level encryption item. // // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + Id *string `type:"string" required:"true"` - // The value of the ETag header that you received when retrieving the public - // key identity to delete. For example: E2QWRUHAPOMQZL. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // The last time that the summary of field-level encryption items was modified. + // + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` + + // A summary of a query argument-profile mapping. + QueryArgProfileConfig *QueryArgProfileConfig `type:"structure"` } // String returns the string representation -func (s DeletePublicKeyInput) String() string { +func (s FieldLevelEncryptionSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePublicKeyInput) GoString() string { +func (s FieldLevelEncryptionSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePublicKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePublicKeyInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetComment sets the Comment field's value. +func (s *FieldLevelEncryptionSummary) SetComment(v string) *FieldLevelEncryptionSummary { + s.Comment = &v + return s } -// SetId sets the Id field's value. -func (s *DeletePublicKeyInput) SetId(v string) *DeletePublicKeyInput { - s.Id = &v +// SetContentTypeProfileConfig sets the ContentTypeProfileConfig field's value. +func (s *FieldLevelEncryptionSummary) SetContentTypeProfileConfig(v *ContentTypeProfileConfig) *FieldLevelEncryptionSummary { + s.ContentTypeProfileConfig = v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *DeletePublicKeyInput) SetIfMatch(v string) *DeletePublicKeyInput { - s.IfMatch = &v +// SetId sets the Id field's value. +func (s *FieldLevelEncryptionSummary) SetId(v string) *FieldLevelEncryptionSummary { + s.Id = &v return s } -type DeletePublicKeyOutput struct { - _ struct{} `type:"structure"` +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *FieldLevelEncryptionSummary) SetLastModifiedTime(v time.Time) *FieldLevelEncryptionSummary { + s.LastModifiedTime = &v + return s } -// String returns the string representation -func (s DeletePublicKeyOutput) String() string { - return awsutil.Prettify(s) +// SetQueryArgProfileConfig sets the QueryArgProfileConfig field's value. +func (s *FieldLevelEncryptionSummary) SetQueryArgProfileConfig(v *QueryArgProfileConfig) *FieldLevelEncryptionSummary { + s.QueryArgProfileConfig = v + return s } -// GoString returns the string representation -func (s DeletePublicKeyOutput) GoString() string { - return s.String() -} +// A complex data type that includes the field patterns to match for field-level +// encryption. +type FieldPatterns struct { + _ struct{} `type:"structure"` -// The request to delete a streaming distribution. -type DeleteStreamingDistributionInput struct { - _ struct{} `locationName:"DeleteStreamingDistributionRequest" type:"structure"` + // An array of the field-level encryption field patterns. + Items []*string `locationNameList:"FieldPattern" type:"list"` - // The distribution ID. + // The number of field-level encryption field patterns. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` - - // The value of the ETag header that you received when you disabled the streaming - // distribution. For example: E2QWRUHAPOMQZL. - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s DeleteStreamingDistributionInput) String() string { +func (s FieldPatterns) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteStreamingDistributionInput) GoString() string { +func (s FieldPatterns) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteStreamingDistributionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteStreamingDistributionInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) +func (s *FieldPatterns) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FieldPatterns"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) } if invalidParams.Len() > 0 { @@ -7717,418 +13073,315 @@ func (s *DeleteStreamingDistributionInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *DeleteStreamingDistributionInput) SetId(v string) *DeleteStreamingDistributionInput { - s.Id = &v +// SetItems sets the Items field's value. +func (s *FieldPatterns) SetItems(v []*string) *FieldPatterns { + s.Items = v return s } -// SetIfMatch sets the IfMatch field's value. -func (s *DeleteStreamingDistributionInput) SetIfMatch(v string) *DeleteStreamingDistributionInput { - s.IfMatch = &v +// SetQuantity sets the Quantity field's value. +func (s *FieldPatterns) SetQuantity(v int64) *FieldPatterns { + s.Quantity = &v return s } -type DeleteStreamingDistributionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteStreamingDistributionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteStreamingDistributionOutput) GoString() string { - return s.String() -} - -// A distribution tells CloudFront where you want content to be delivered from, -// and the details about how to track and manage content delivery. -type Distribution struct { +// This field is deprecated. We recommend that you use a cache policy or an +// origin request policy instead of this field. +// +// If you want to include values in the cache key, use a cache policy. For more +// information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) +// in the Amazon CloudFront Developer Guide. +// +// If you want to send values to the origin but not include them in the cache +// key, use an origin request policy. For more information, see Creating origin +// request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) +// in the Amazon CloudFront Developer Guide. +// +// A complex type that specifies how CloudFront handles query strings, cookies, +// and HTTP headers. +type ForwardedValues struct { _ struct{} `type:"structure"` - // The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, - // where 123456789012 is your AWS account ID. + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. // - // ARN is a required field - ARN *string `type:"string" required:"true"` - - // CloudFront automatically adds this element to the response only if you've - // set up the distribution to serve private content with signed URLs. The element - // lists the key pair IDs that CloudFront is aware of for each trusted signer. - // The Signer child element lists the AWS account number of the trusted signer - // (or an empty Self element if the signer is you). The Signer element also - // includes the IDs of any active key pairs associated with the trusted signer's - // AWS account. If no KeyPairId element appears for a Signer, that signer can't - // create working signed URLs. + // If you want to include cookies in the cache key, use a cache policy. For + // more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // in the Amazon CloudFront Developer Guide. // - // ActiveTrustedSigners is a required field - ActiveTrustedSigners *ActiveTrustedSigners `type:"structure" required:"true"` - - // AWS services in China customers must file for an Internet Content Provider - // (ICP) recordal if they want to serve content publicly on an alternate domain - // name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal - // provides the ICP recordal status for CNAMEs associated with distributions. + // If you want to send cookies to the origin but not include them in the cache + // key, use an origin request policy. For more information, see Creating origin + // request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // in the Amazon CloudFront Developer Guide. // - // For more information about ICP recordals, see Signup, Accounts, and Credentials - // (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) - // in Getting Started with AWS services in China. - AliasICPRecordals []*AliasICPRecordal `locationNameList:"AliasICPRecordal" type:"list"` - - // The current configuration information for the distribution. Send a GET request - // to the /CloudFront API version/distribution ID/config resource. + // A complex type that specifies whether you want CloudFront to forward cookies + // to the origin and, if so, which ones. For more information about forwarding + // cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) + // in the Amazon CloudFront Developer Guide. // - // DistributionConfig is a required field - DistributionConfig *DistributionConfig `type:"structure" required:"true"` + // Cookies is a required field + Cookies *CookiePreference `type:"structure" required:"true"` - // The domain name corresponding to the distribution, for example, d111111abcdef8.cloudfront.net. + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. // - // DomainName is a required field - DomainName *string `type:"string" required:"true"` - - // The identifier for the distribution. For example: EDFDVBD632BHDS5. + // If you want to include headers in the cache key, use a cache policy. For + // more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // in the Amazon CloudFront Developer Guide. // - // Id is a required field - Id *string `type:"string" required:"true"` - - // The number of invalidation batches currently in progress. + // If you want to send headers to the origin but not include them in the cache + // key, use an origin request policy. For more information, see Creating origin + // request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // in the Amazon CloudFront Developer Guide. // - // InProgressInvalidationBatches is a required field - InProgressInvalidationBatches *int64 `type:"integer" required:"true"` + // A complex type that specifies the Headers, if any, that you want CloudFront + // to forward to the origin for this cache behavior (whitelisted headers). For + // the headers that you specify, CloudFront also caches separate versions of + // a specified object that is based on the header values in viewer requests. + // + // For more information, see Caching Content Based on Request Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) + // in the Amazon CloudFront Developer Guide. + Headers *Headers `type:"structure"` - // The date and time the distribution was last modified. + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" required:"true"` + // If you want to include query strings in the cache key, use a cache policy. + // For more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // in the Amazon CloudFront Developer Guide. + // + // If you want to send query strings to the origin but not include them in the + // cache key, use an origin request policy. For more information, see Creating + // origin request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // in the Amazon CloudFront Developer Guide. + // + // Indicates whether you want CloudFront to forward query strings to the origin + // that is associated with this cache behavior and cache based on the query + // string parameters. CloudFront behavior depends on the value of QueryString + // and on the values that you specify for QueryStringCacheKeys, if any: + // + // If you specify true for QueryString and you don't specify any values for + // QueryStringCacheKeys, CloudFront forwards all query string parameters to + // the origin and caches based on all query string parameters. Depending on + // how many query string parameters and values you have, this can adversely + // affect performance because CloudFront must forward more requests to the origin. + // + // If you specify true for QueryString and you specify one or more values for + // QueryStringCacheKeys, CloudFront forwards all query string parameters to + // the origin, but it only caches based on the query string parameters that + // you specify. + // + // If you specify false for QueryString, CloudFront doesn't forward any query + // string parameters to the origin, and doesn't cache based on query string + // parameters. + // + // For more information, see Configuring CloudFront to Cache Based on Query + // String Parameters (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/QueryStringParameters.html) + // in the Amazon CloudFront Developer Guide. + // + // QueryString is a required field + QueryString *bool `type:"boolean" required:"true"` - // This response element indicates the current status of the distribution. When - // the status is Deployed, the distribution's information is fully propagated - // to all CloudFront edge locations. + // This field is deprecated. We recommend that you use a cache policy or an + // origin request policy instead of this field. // - // Status is a required field - Status *string `type:"string" required:"true"` + // If you want to include query strings in the cache key, use a cache policy. + // For more information, see Creating cache policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) + // in the Amazon CloudFront Developer Guide. + // + // If you want to send query strings to the origin but not include them in the + // cache key, use an origin request policy. For more information, see Creating + // origin request policies (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-origin-requests.html#origin-request-create-origin-request-policy) + // in the Amazon CloudFront Developer Guide. + // + // A complex type that contains information about the query string parameters + // that you want CloudFront to use for caching for this cache behavior. + QueryStringCacheKeys *QueryStringCacheKeys `type:"structure"` } // String returns the string representation -func (s Distribution) String() string { +func (s ForwardedValues) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Distribution) GoString() string { +func (s ForwardedValues) GoString() string { return s.String() } -// SetARN sets the ARN field's value. -func (s *Distribution) SetARN(v string) *Distribution { - s.ARN = &v - return s -} - -// SetActiveTrustedSigners sets the ActiveTrustedSigners field's value. -func (s *Distribution) SetActiveTrustedSigners(v *ActiveTrustedSigners) *Distribution { - s.ActiveTrustedSigners = v - return s -} - -// SetAliasICPRecordals sets the AliasICPRecordals field's value. -func (s *Distribution) SetAliasICPRecordals(v []*AliasICPRecordal) *Distribution { - s.AliasICPRecordals = v - return s -} - -// SetDistributionConfig sets the DistributionConfig field's value. -func (s *Distribution) SetDistributionConfig(v *DistributionConfig) *Distribution { - s.DistributionConfig = v - return s -} - -// SetDomainName sets the DomainName field's value. -func (s *Distribution) SetDomainName(v string) *Distribution { - s.DomainName = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ForwardedValues) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ForwardedValues"} + if s.Cookies == nil { + invalidParams.Add(request.NewErrParamRequired("Cookies")) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.Cookies != nil { + if err := s.Cookies.Validate(); err != nil { + invalidParams.AddNested("Cookies", err.(request.ErrInvalidParams)) + } + } + if s.Headers != nil { + if err := s.Headers.Validate(); err != nil { + invalidParams.AddNested("Headers", err.(request.ErrInvalidParams)) + } + } + if s.QueryStringCacheKeys != nil { + if err := s.QueryStringCacheKeys.Validate(); err != nil { + invalidParams.AddNested("QueryStringCacheKeys", err.(request.ErrInvalidParams)) + } + } -// SetId sets the Id field's value. -func (s *Distribution) SetId(v string) *Distribution { - s.Id = &v + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCookies sets the Cookies field's value. +func (s *ForwardedValues) SetCookies(v *CookiePreference) *ForwardedValues { + s.Cookies = v return s } -// SetInProgressInvalidationBatches sets the InProgressInvalidationBatches field's value. -func (s *Distribution) SetInProgressInvalidationBatches(v int64) *Distribution { - s.InProgressInvalidationBatches = &v +// SetHeaders sets the Headers field's value. +func (s *ForwardedValues) SetHeaders(v *Headers) *ForwardedValues { + s.Headers = v return s } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *Distribution) SetLastModifiedTime(v time.Time) *Distribution { - s.LastModifiedTime = &v +// SetQueryString sets the QueryString field's value. +func (s *ForwardedValues) SetQueryString(v bool) *ForwardedValues { + s.QueryString = &v return s } -// SetStatus sets the Status field's value. -func (s *Distribution) SetStatus(v string) *Distribution { - s.Status = &v +// SetQueryStringCacheKeys sets the QueryStringCacheKeys field's value. +func (s *ForwardedValues) SetQueryStringCacheKeys(v *QueryStringCacheKeys) *ForwardedValues { + s.QueryStringCacheKeys = v return s } -// A distribution configuration. -type DistributionConfig struct { +// A complex type that controls the countries in which your content is distributed. +// CloudFront determines the location of your users using MaxMind GeoIP databases. +type GeoRestriction struct { _ struct{} `type:"structure"` - // A complex type that contains information about CNAMEs (alternate domain names), - // if any, for this distribution. - Aliases *Aliases `type:"structure"` - - // A complex type that contains zero or more CacheBehavior elements. - CacheBehaviors *CacheBehaviors `type:"structure"` - - // A unique value (for example, a date-time stamp) that ensures that the request - // can't be replayed. - // - // If the value of CallerReference is new (regardless of the content of the - // DistributionConfig object), CloudFront creates a new distribution. - // - // If CallerReference is a value that you already sent in a previous request - // to create a distribution, CloudFront returns a DistributionAlreadyExists - // error. - // - // CallerReference is a required field - CallerReference *string `type:"string" required:"true"` - - // Any comments you want to include about the distribution. - // - // If you don't want to specify a comment, include an empty Comment element. - // - // To delete an existing comment, update the distribution configuration and - // include an empty Comment element. - // - // To add or change a comment, update the distribution configuration and specify - // the new comment. - // - // Comment is a required field - Comment *string `type:"string" required:"true" sensitive:"true"` - - // A complex type that controls the following: - // - // * Whether CloudFront replaces HTTP status codes in the 4xx and 5xx range - // with custom error messages before returning the response to the viewer. - // - // * How long CloudFront caches HTTP status codes in the 4xx and 5xx range. - // - // For more information about custom error pages, see Customizing Error Responses - // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages.html) - // in the Amazon CloudFront Developer Guide. - CustomErrorResponses *CustomErrorResponses `type:"structure"` - - // A complex type that describes the default cache behavior if you don't specify - // a CacheBehavior element or if files don't match any of the values of PathPattern - // in CacheBehavior elements. You must create exactly one default cache behavior. - // - // DefaultCacheBehavior is a required field - DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` - - // The object that you want CloudFront to request from your origin (for example, - // index.html) when a viewer requests the root URL for your distribution (http://www.example.com) - // instead of an object in your distribution (http://www.example.com/product-description.html). - // Specifying a default root object avoids exposing the contents of your distribution. - // - // Specify only the object name, for example, index.html. Don't add a / before - // the object name. - // - // If you don't want to specify a default root object when you create a distribution, - // include an empty DefaultRootObject element. - // - // To delete the default root object from an existing distribution, update the - // distribution configuration and include an empty DefaultRootObject element. - // - // To replace the default root object, update the distribution configuration - // and specify the new object. + // A complex type that contains a Location element for each country in which + // you want CloudFront either to distribute your content (whitelist) or not + // distribute your content (blacklist). // - // For more information about the default root object, see Creating a Default - // Root Object (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DefaultRootObject.html) - // in the Amazon CloudFront Developer Guide. - DefaultRootObject *string `type:"string"` - - // From this field, you can enable or disable the selected distribution. + // The Location element is a two-letter, uppercase country code for a country + // that you want to include in your blacklist or whitelist. Include one Location + // element for each country. // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` + // CloudFront and MaxMind both use ISO 3166 country codes. For the current list + // of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on + // the International Organization for Standardization website. You can also + // refer to the country list on the CloudFront console, which includes both + // country names and codes. + Items []*string `locationNameList:"Location" type:"list"` - // (Optional) Specify the maximum HTTP version that you want viewers to use - // to communicate with CloudFront. The default value for new web distributions - // is http2. Viewers that don't support HTTP/2 automatically use an earlier - // HTTP version. - // - // For viewers and CloudFront to use HTTP/2, viewers must support TLS 1.2 or - // later, and must support Server Name Identification (SNI). + // When geo restriction is enabled, this is the number of countries in your + // whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, + // and you can omit Items. // - // In general, configuring CloudFront to communicate with viewers using HTTP/2 - // reduces latency. You can improve performance by optimizing for HTTP/2. For - // more information, do an Internet search for "http/2 optimization." - HttpVersion *string `type:"string" enum:"HttpVersion"` + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` - // If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address - // for your distribution, specify true. If you specify false, CloudFront responds - // to IPv6 DNS requests with the DNS response code NOERROR and with no IP addresses. - // This allows viewers to submit a second request, for an IPv4 address for your - // distribution. - // - // In general, you should enable IPv6 if you have users on IPv6 networks who - // want to access your content. However, if you're using signed URLs or signed - // cookies to restrict access to your content, and if you're using a custom - // policy that includes the IpAddress parameter to restrict the IP addresses - // that can access your content, don't enable IPv6. If you want to restrict - // access to some content by IP address and not restrict access to other content - // (or restrict access but not by IP address), you can create two distributions. - // For more information, see Creating a Signed URL Using a Custom Policy (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html) - // in the Amazon CloudFront Developer Guide. - // - // If you're using an Amazon Route 53 alias resource record set to route traffic - // to your CloudFront distribution, you need to create a second alias resource - // record set when both of the following are true: + // The method that you want to use to restrict distribution of your content + // by country: // - // * You enable IPv6 for the distribution + // * none: No geo restriction is enabled, meaning access to content is not + // restricted by client geo location. // - // * You're using alternate domain names in the URLs for your objects + // * blacklist: The Location elements specify the countries in which you + // don't want CloudFront to distribute your content. // - // For more information, see Routing Traffic to an Amazon CloudFront Web Distribution - // by Using Your Domain Name (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-cloudfront-distribution.html) - // in the Amazon Route 53 Developer Guide. + // * whitelist: The Location elements specify the countries in which you + // want CloudFront to distribute your content. // - // If you created a CNAME resource record set, either with Amazon Route 53 or - // with another DNS service, you don't need to make any changes. A CNAME record - // will route traffic to your distribution regardless of the IP address format - // of the viewer request. - IsIPV6Enabled *bool `type:"boolean"` + // RestrictionType is a required field + RestrictionType *string `type:"string" required:"true" enum:"GeoRestrictionType"` +} - // A complex type that controls whether access logs are written for the distribution. - // - // For more information about logging, see Access Logs (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html) - // in the Amazon CloudFront Developer Guide. - Logging *LoggingConfig `type:"structure"` +// String returns the string representation +func (s GeoRestriction) String() string { + return awsutil.Prettify(s) +} - // A complex type that contains information about origin groups for this distribution. - OriginGroups *OriginGroups `type:"structure"` +// GoString returns the string representation +func (s GeoRestriction) GoString() string { + return s.String() +} - // A complex type that contains information about origins for this distribution. - // - // Origins is a required field - Origins *Origins `type:"structure" required:"true"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *GeoRestriction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoRestriction"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.RestrictionType == nil { + invalidParams.Add(request.NewErrParamRequired("RestrictionType")) + } - // The price class that corresponds with the maximum price that you want to - // pay for CloudFront service. If you specify PriceClass_All, CloudFront responds - // to requests for your objects from all CloudFront edge locations. - // - // If you specify a price class other than PriceClass_All, CloudFront serves - // your objects from the CloudFront edge location that has the lowest latency - // among the edge locations in your price class. Viewers who are in or near - // regions that are excluded from your specified price class may encounter slower - // performance. - // - // For more information about price classes, see Choosing the Price Class for - // a CloudFront Distribution (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PriceClass.html) - // in the Amazon CloudFront Developer Guide. For information about CloudFront - // pricing, including how price classes (such as Price Class 100) map to CloudFront - // regions, see Amazon CloudFront Pricing (http://aws.amazon.com/cloudfront/pricing/). - // For price class information, scroll down to see the table at the bottom of - // the page. - PriceClass *string `type:"string" enum:"PriceClass"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // A complex type that identifies ways in which you want to restrict distribution - // of your content. - Restrictions *Restrictions `type:"structure"` +// SetItems sets the Items field's value. +func (s *GeoRestriction) SetItems(v []*string) *GeoRestriction { + s.Items = v + return s +} - // A complex type that determines the distribution’s SSL/TLS configuration - // for communicating with viewers. - ViewerCertificate *ViewerCertificate `type:"structure"` +// SetQuantity sets the Quantity field's value. +func (s *GeoRestriction) SetQuantity(v int64) *GeoRestriction { + s.Quantity = &v + return s +} - // A unique identifier that specifies the AWS WAF web ACL, if any, to associate - // with this distribution. To specify a web ACL created using the latest version - // of AWS WAF, use the ACL ARN, for example arn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a. - // To specify a web ACL created using AWS WAF Classic, use the ACL ID, for example - // 473e64fd-f30b-4765-81a0-62ad96dd167a. - // - // AWS WAF is a web application firewall that lets you monitor the HTTP and - // HTTPS requests that are forwarded to CloudFront, and lets you control access - // to your content. Based on conditions that you specify, such as the IP addresses - // that requests originate from or the values of query strings, CloudFront responds - // to requests either with the requested content or with an HTTP 403 status - // code (Forbidden). You can also configure CloudFront to return a custom error - // page when a request is blocked. For more information about AWS WAF, see the - // AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/what-is-aws-waf.html). - WebACLId *string `type:"string"` +// SetRestrictionType sets the RestrictionType field's value. +func (s *GeoRestriction) SetRestrictionType(v string) *GeoRestriction { + s.RestrictionType = &v + return s +} + +type GetCachePolicyConfigInput struct { + _ struct{} `locationName:"GetCachePolicyConfigRequest" type:"structure"` + + // The unique identifier for the cache policy. If the cache policy is attached + // to a distribution’s cache behavior, you can get the policy’s identifier + // using ListDistributions or GetDistribution. If the cache policy is not attached + // to a cache behavior, you can get the identifier using ListCachePolicies. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s DistributionConfig) String() string { +func (s GetCachePolicyConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DistributionConfig) GoString() string { +func (s GetCachePolicyConfigInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DistributionConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DistributionConfig"} - if s.CallerReference == nil { - invalidParams.Add(request.NewErrParamRequired("CallerReference")) - } - if s.Comment == nil { - invalidParams.Add(request.NewErrParamRequired("Comment")) - } - if s.DefaultCacheBehavior == nil { - invalidParams.Add(request.NewErrParamRequired("DefaultCacheBehavior")) - } - if s.Enabled == nil { - invalidParams.Add(request.NewErrParamRequired("Enabled")) - } - if s.Origins == nil { - invalidParams.Add(request.NewErrParamRequired("Origins")) - } - if s.Aliases != nil { - if err := s.Aliases.Validate(); err != nil { - invalidParams.AddNested("Aliases", err.(request.ErrInvalidParams)) - } - } - if s.CacheBehaviors != nil { - if err := s.CacheBehaviors.Validate(); err != nil { - invalidParams.AddNested("CacheBehaviors", err.(request.ErrInvalidParams)) - } - } - if s.CustomErrorResponses != nil { - if err := s.CustomErrorResponses.Validate(); err != nil { - invalidParams.AddNested("CustomErrorResponses", err.(request.ErrInvalidParams)) - } - } - if s.DefaultCacheBehavior != nil { - if err := s.DefaultCacheBehavior.Validate(); err != nil { - invalidParams.AddNested("DefaultCacheBehavior", err.(request.ErrInvalidParams)) - } - } - if s.Logging != nil { - if err := s.Logging.Validate(); err != nil { - invalidParams.AddNested("Logging", err.(request.ErrInvalidParams)) - } - } - if s.OriginGroups != nil { - if err := s.OriginGroups.Validate(); err != nil { - invalidParams.AddNested("OriginGroups", err.(request.ErrInvalidParams)) - } - } - if s.Origins != nil { - if err := s.Origins.Validate(); err != nil { - invalidParams.AddNested("Origins", err.(request.ErrInvalidParams)) - } +func (s *GetCachePolicyConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCachePolicyConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Restrictions != nil { - if err := s.Restrictions.Validate(); err != nil { - invalidParams.AddNested("Restrictions", err.(request.ErrInvalidParams)) - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -8137,152 +13390,149 @@ func (s *DistributionConfig) Validate() error { return nil } -// SetAliases sets the Aliases field's value. -func (s *DistributionConfig) SetAliases(v *Aliases) *DistributionConfig { - s.Aliases = v +// SetId sets the Id field's value. +func (s *GetCachePolicyConfigInput) SetId(v string) *GetCachePolicyConfigInput { + s.Id = &v return s } -// SetCacheBehaviors sets the CacheBehaviors field's value. -func (s *DistributionConfig) SetCacheBehaviors(v *CacheBehaviors) *DistributionConfig { - s.CacheBehaviors = v - return s -} +type GetCachePolicyConfigOutput struct { + _ struct{} `type:"structure" payload:"CachePolicyConfig"` -// SetCallerReference sets the CallerReference field's value. -func (s *DistributionConfig) SetCallerReference(v string) *DistributionConfig { - s.CallerReference = &v - return s + // The cache policy configuration. + CachePolicyConfig *CachePolicyConfig `type:"structure"` + + // The current version of the cache policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` } -// SetComment sets the Comment field's value. -func (s *DistributionConfig) SetComment(v string) *DistributionConfig { - s.Comment = &v - return s +// String returns the string representation +func (s GetCachePolicyConfigOutput) String() string { + return awsutil.Prettify(s) } -// SetCustomErrorResponses sets the CustomErrorResponses field's value. -func (s *DistributionConfig) SetCustomErrorResponses(v *CustomErrorResponses) *DistributionConfig { - s.CustomErrorResponses = v - return s +// GoString returns the string representation +func (s GetCachePolicyConfigOutput) GoString() string { + return s.String() } -// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. -func (s *DistributionConfig) SetDefaultCacheBehavior(v *DefaultCacheBehavior) *DistributionConfig { - s.DefaultCacheBehavior = v +// SetCachePolicyConfig sets the CachePolicyConfig field's value. +func (s *GetCachePolicyConfigOutput) SetCachePolicyConfig(v *CachePolicyConfig) *GetCachePolicyConfigOutput { + s.CachePolicyConfig = v return s } -// SetDefaultRootObject sets the DefaultRootObject field's value. -func (s *DistributionConfig) SetDefaultRootObject(v string) *DistributionConfig { - s.DefaultRootObject = &v +// SetETag sets the ETag field's value. +func (s *GetCachePolicyConfigOutput) SetETag(v string) *GetCachePolicyConfigOutput { + s.ETag = &v return s } -// SetEnabled sets the Enabled field's value. -func (s *DistributionConfig) SetEnabled(v bool) *DistributionConfig { - s.Enabled = &v - return s +type GetCachePolicyInput struct { + _ struct{} `locationName:"GetCachePolicyRequest" type:"structure"` + + // The unique identifier for the cache policy. If the cache policy is attached + // to a distribution’s cache behavior, you can get the policy’s identifier + // using ListDistributions or GetDistribution. If the cache policy is not attached + // to a cache behavior, you can get the identifier using ListCachePolicies. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } -// SetHttpVersion sets the HttpVersion field's value. -func (s *DistributionConfig) SetHttpVersion(v string) *DistributionConfig { - s.HttpVersion = &v - return s +// String returns the string representation +func (s GetCachePolicyInput) String() string { + return awsutil.Prettify(s) } -// SetIsIPV6Enabled sets the IsIPV6Enabled field's value. -func (s *DistributionConfig) SetIsIPV6Enabled(v bool) *DistributionConfig { - s.IsIPV6Enabled = &v - return s +// GoString returns the string representation +func (s GetCachePolicyInput) GoString() string { + return s.String() } -// SetLogging sets the Logging field's value. -func (s *DistributionConfig) SetLogging(v *LoggingConfig) *DistributionConfig { - s.Logging = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCachePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCachePolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetOriginGroups sets the OriginGroups field's value. -func (s *DistributionConfig) SetOriginGroups(v *OriginGroups) *DistributionConfig { - s.OriginGroups = v +// SetId sets the Id field's value. +func (s *GetCachePolicyInput) SetId(v string) *GetCachePolicyInput { + s.Id = &v return s } -// SetOrigins sets the Origins field's value. -func (s *DistributionConfig) SetOrigins(v *Origins) *DistributionConfig { - s.Origins = v - return s +type GetCachePolicyOutput struct { + _ struct{} `type:"structure" payload:"CachePolicy"` + + // The cache policy. + CachePolicy *CachePolicy `type:"structure"` + + // The current version of the cache policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` } -// SetPriceClass sets the PriceClass field's value. -func (s *DistributionConfig) SetPriceClass(v string) *DistributionConfig { - s.PriceClass = &v - return s +// String returns the string representation +func (s GetCachePolicyOutput) String() string { + return awsutil.Prettify(s) } -// SetRestrictions sets the Restrictions field's value. -func (s *DistributionConfig) SetRestrictions(v *Restrictions) *DistributionConfig { - s.Restrictions = v - return s +// GoString returns the string representation +func (s GetCachePolicyOutput) GoString() string { + return s.String() } -// SetViewerCertificate sets the ViewerCertificate field's value. -func (s *DistributionConfig) SetViewerCertificate(v *ViewerCertificate) *DistributionConfig { - s.ViewerCertificate = v +// SetCachePolicy sets the CachePolicy field's value. +func (s *GetCachePolicyOutput) SetCachePolicy(v *CachePolicy) *GetCachePolicyOutput { + s.CachePolicy = v return s } -// SetWebACLId sets the WebACLId field's value. -func (s *DistributionConfig) SetWebACLId(v string) *DistributionConfig { - s.WebACLId = &v +// SetETag sets the ETag field's value. +func (s *GetCachePolicyOutput) SetETag(v string) *GetCachePolicyOutput { + s.ETag = &v return s } -// A distribution Configuration and a list of tags to be associated with the -// distribution. -type DistributionConfigWithTags struct { - _ struct{} `type:"structure"` - - // A distribution configuration. - // - // DistributionConfig is a required field - DistributionConfig *DistributionConfig `type:"structure" required:"true"` +// The origin access identity's configuration information. For more information, +// see CloudFrontOriginAccessIdentityConfig (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CloudFrontOriginAccessIdentityConfig.html). +type GetCloudFrontOriginAccessIdentityConfigInput struct { + _ struct{} `locationName:"GetCloudFrontOriginAccessIdentityConfigRequest" type:"structure"` - // A complex type that contains zero or more Tag elements. + // The identity's ID. // - // Tags is a required field - Tags *Tags `type:"structure" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s DistributionConfigWithTags) String() string { +func (s GetCloudFrontOriginAccessIdentityConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DistributionConfigWithTags) GoString() string { +func (s GetCloudFrontOriginAccessIdentityConfigInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DistributionConfigWithTags) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DistributionConfigWithTags"} - if s.DistributionConfig == nil { - invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.DistributionConfig != nil { - if err := s.DistributionConfig.Validate(); err != nil { - invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) - } +func (s *GetCloudFrontOriginAccessIdentityConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Tags != nil { - if err := s.Tags.Validate(); err != nil { - invalidParams.AddNested("Tags", err.(request.ErrInvalidParams)) - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -8291,391 +13541,448 @@ func (s *DistributionConfigWithTags) Validate() error { return nil } -// SetDistributionConfig sets the DistributionConfig field's value. -func (s *DistributionConfigWithTags) SetDistributionConfig(v *DistributionConfig) *DistributionConfigWithTags { - s.DistributionConfig = v - return s -} - -// SetTags sets the Tags field's value. -func (s *DistributionConfigWithTags) SetTags(v *Tags) *DistributionConfigWithTags { - s.Tags = v +// SetId sets the Id field's value. +func (s *GetCloudFrontOriginAccessIdentityConfigInput) SetId(v string) *GetCloudFrontOriginAccessIdentityConfigInput { + s.Id = &v return s } -// A distribution list. -type DistributionList struct { - _ struct{} `type:"structure"` - - // A flag that indicates whether more distributions remain to be listed. If - // your results were truncated, you can make a follow-up pagination request - // using the Marker request parameter to retrieve more distributions in the - // list. - // - // IsTruncated is a required field - IsTruncated *bool `type:"boolean" required:"true"` - - // A complex type that contains one DistributionSummary element for each distribution - // that was created by the current AWS account. - Items []*DistributionSummary `locationNameList:"DistributionSummary" type:"list"` - - // The value you provided for the Marker request parameter. - // - // Marker is a required field - Marker *string `type:"string" required:"true"` - - // The value you provided for the MaxItems request parameter. - // - // MaxItems is a required field - MaxItems *int64 `type:"integer" required:"true"` +// The returned result of the corresponding request. +type GetCloudFrontOriginAccessIdentityConfigOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` - // If IsTruncated is true, this element is present and contains the value you - // can use for the Marker request parameter to continue listing your distributions - // where they left off. - NextMarker *string `type:"string"` + // The origin access identity's configuration information. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` - // The number of distributions that were created by the current AWS account. - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` } // String returns the string representation -func (s DistributionList) String() string { +func (s GetCloudFrontOriginAccessIdentityConfigOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DistributionList) GoString() string { +func (s GetCloudFrontOriginAccessIdentityConfigOutput) GoString() string { return s.String() } -// SetIsTruncated sets the IsTruncated field's value. -func (s *DistributionList) SetIsTruncated(v bool) *DistributionList { - s.IsTruncated = &v +// SetCloudFrontOriginAccessIdentityConfig sets the CloudFrontOriginAccessIdentityConfig field's value. +func (s *GetCloudFrontOriginAccessIdentityConfigOutput) SetCloudFrontOriginAccessIdentityConfig(v *OriginAccessIdentityConfig) *GetCloudFrontOriginAccessIdentityConfigOutput { + s.CloudFrontOriginAccessIdentityConfig = v return s } -// SetItems sets the Items field's value. -func (s *DistributionList) SetItems(v []*DistributionSummary) *DistributionList { - s.Items = v +// SetETag sets the ETag field's value. +func (s *GetCloudFrontOriginAccessIdentityConfigOutput) SetETag(v string) *GetCloudFrontOriginAccessIdentityConfigOutput { + s.ETag = &v return s } -// SetMarker sets the Marker field's value. -func (s *DistributionList) SetMarker(v string) *DistributionList { - s.Marker = &v - return s +// The request to get an origin access identity's information. +type GetCloudFrontOriginAccessIdentityInput struct { + _ struct{} `locationName:"GetCloudFrontOriginAccessIdentityRequest" type:"structure"` + + // The identity's ID. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } -// SetMaxItems sets the MaxItems field's value. -func (s *DistributionList) SetMaxItems(v int64) *DistributionList { - s.MaxItems = &v - return s +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityInput) String() string { + return awsutil.Prettify(s) } -// SetNextMarker sets the NextMarker field's value. -func (s *DistributionList) SetNextMarker(v string) *DistributionList { - s.NextMarker = &v - return s +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityInput) GoString() string { + return s.String() } -// SetQuantity sets the Quantity field's value. -func (s *DistributionList) SetQuantity(v int64) *DistributionList { - s.Quantity = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCloudFrontOriginAccessIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *GetCloudFrontOriginAccessIdentityInput) SetId(v string) *GetCloudFrontOriginAccessIdentityInput { + s.Id = &v return s } -// A summary of the information about a CloudFront distribution. -type DistributionSummary struct { - _ struct{} `type:"structure"` +// The returned result of the corresponding request. +type GetCloudFrontOriginAccessIdentityOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` - // The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, - // where 123456789012 is your AWS account ID. - // - // ARN is a required field - ARN *string `type:"string" required:"true"` + // The origin access identity's information. + CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` - // AWS services in China customers must file for an Internet Content Provider - // (ICP) recordal if they want to serve content publicly on an alternate domain - // name, also known as a CNAME, that they've added to CloudFront. AliasICPRecordal - // provides the ICP recordal status for CNAMEs associated with distributions. - // - // For more information about ICP recordals, see Signup, Accounts, and Credentials - // (https://docs.amazonaws.cn/en_us/aws/latest/userguide/accounts-and-credentials.html) - // in Getting Started with AWS services in China. - AliasICPRecordals []*AliasICPRecordal `locationNameList:"AliasICPRecordal" type:"list"` + // The current version of the origin access identity's information. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} - // A complex type that contains information about CNAMEs (alternate domain names), - // if any, for this distribution. - // - // Aliases is a required field - Aliases *Aliases `type:"structure" required:"true"` +// String returns the string representation +func (s GetCloudFrontOriginAccessIdentityOutput) String() string { + return awsutil.Prettify(s) +} - // A complex type that contains zero or more CacheBehavior elements. - // - // CacheBehaviors is a required field - CacheBehaviors *CacheBehaviors `type:"structure" required:"true"` +// GoString returns the string representation +func (s GetCloudFrontOriginAccessIdentityOutput) GoString() string { + return s.String() +} - // The comment originally specified when this distribution was created. - // - // Comment is a required field - Comment *string `type:"string" required:"true"` +// SetCloudFrontOriginAccessIdentity sets the CloudFrontOriginAccessIdentity field's value. +func (s *GetCloudFrontOriginAccessIdentityOutput) SetCloudFrontOriginAccessIdentity(v *OriginAccessIdentity) *GetCloudFrontOriginAccessIdentityOutput { + s.CloudFrontOriginAccessIdentity = v + return s +} - // A complex type that contains zero or more CustomErrorResponses elements. - // - // CustomErrorResponses is a required field - CustomErrorResponses *CustomErrorResponses `type:"structure" required:"true"` +// SetETag sets the ETag field's value. +func (s *GetCloudFrontOriginAccessIdentityOutput) SetETag(v string) *GetCloudFrontOriginAccessIdentityOutput { + s.ETag = &v + return s +} - // A complex type that describes the default cache behavior if you don't specify - // a CacheBehavior element or if files don't match any of the values of PathPattern - // in CacheBehavior elements. You must create exactly one default cache behavior. - // - // DefaultCacheBehavior is a required field - DefaultCacheBehavior *DefaultCacheBehavior `type:"structure" required:"true"` +// The request to get a distribution configuration. +type GetDistributionConfigInput struct { + _ struct{} `locationName:"GetDistributionConfigRequest" type:"structure"` - // The domain name that corresponds to the distribution, for example, d111111abcdef8.cloudfront.net. + // The distribution's ID. If the ID is empty, an empty distribution configuration + // is returned. // - // DomainName is a required field - DomainName *string `type:"string" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` +} - // Whether the distribution is enabled to accept user requests for content. - // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` +// String returns the string representation +func (s GetDistributionConfigInput) String() string { + return awsutil.Prettify(s) +} - // Specify the maximum HTTP version that you want viewers to use to communicate - // with CloudFront. The default value for new web distributions is http2. Viewers - // that don't support HTTP/2 will automatically use an earlier version. - // - // HttpVersion is a required field - HttpVersion *string `type:"string" required:"true" enum:"HttpVersion"` +// GoString returns the string representation +func (s GetDistributionConfigInput) GoString() string { + return s.String() +} - // The identifier for the distribution. For example: EDFDVBD632BHDS5. - // - // Id is a required field - Id *string `type:"string" required:"true"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDistributionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } - // Whether CloudFront responds to IPv6 DNS requests with an IPv6 address for - // your distribution. - // - // IsIPV6Enabled is a required field - IsIPV6Enabled *bool `type:"boolean" required:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The date and time the distribution was last modified. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" required:"true"` +// SetId sets the Id field's value. +func (s *GetDistributionConfigInput) SetId(v string) *GetDistributionConfigInput { + s.Id = &v + return s +} - // A complex type that contains information about origin groups for this distribution. - OriginGroups *OriginGroups `type:"structure"` +// The returned result of the corresponding request. +type GetDistributionConfigOutput struct { + _ struct{} `type:"structure" payload:"DistributionConfig"` - // A complex type that contains information about origins for this distribution. - // - // Origins is a required field - Origins *Origins `type:"structure" required:"true"` + // The distribution's configuration information. + DistributionConfig *DistributionConfig `type:"structure"` - // A complex type that contains information about price class for this streaming - // distribution. - // - // PriceClass is a required field - PriceClass *string `type:"string" required:"true" enum:"PriceClass"` + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} - // A complex type that identifies ways in which you want to restrict distribution - // of your content. - // - // Restrictions is a required field - Restrictions *Restrictions `type:"structure" required:"true"` +// String returns the string representation +func (s GetDistributionConfigOutput) String() string { + return awsutil.Prettify(s) +} - // The current status of the distribution. When the status is Deployed, the - // distribution's information is propagated to all CloudFront edge locations. - // - // Status is a required field - Status *string `type:"string" required:"true"` +// GoString returns the string representation +func (s GetDistributionConfigOutput) GoString() string { + return s.String() +} - // A complex type that determines the distribution’s SSL/TLS configuration - // for communicating with viewers. - // - // ViewerCertificate is a required field - ViewerCertificate *ViewerCertificate `type:"structure" required:"true"` +// SetDistributionConfig sets the DistributionConfig field's value. +func (s *GetDistributionConfigOutput) SetDistributionConfig(v *DistributionConfig) *GetDistributionConfigOutput { + s.DistributionConfig = v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetDistributionConfigOutput) SetETag(v string) *GetDistributionConfigOutput { + s.ETag = &v + return s +} - // The Web ACL Id (if any) associated with the distribution. +// The request to get a distribution's information. +type GetDistributionInput struct { + _ struct{} `locationName:"GetDistributionRequest" type:"structure"` + + // The distribution's ID. If the ID is empty, an empty distribution configuration + // is returned. // - // WebACLId is a required field - WebACLId *string `type:"string" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s DistributionSummary) String() string { +func (s GetDistributionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DistributionSummary) GoString() string { +func (s GetDistributionInput) GoString() string { return s.String() } -// SetARN sets the ARN field's value. -func (s *DistributionSummary) SetARN(v string) *DistributionSummary { - s.ARN = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetAliasICPRecordals sets the AliasICPRecordals field's value. -func (s *DistributionSummary) SetAliasICPRecordals(v []*AliasICPRecordal) *DistributionSummary { - s.AliasICPRecordals = v +// SetId sets the Id field's value. +func (s *GetDistributionInput) SetId(v string) *GetDistributionInput { + s.Id = &v return s } -// SetAliases sets the Aliases field's value. -func (s *DistributionSummary) SetAliases(v *Aliases) *DistributionSummary { - s.Aliases = v - return s +// The returned result of the corresponding request. +type GetDistributionOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution's information. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` } -// SetCacheBehaviors sets the CacheBehaviors field's value. -func (s *DistributionSummary) SetCacheBehaviors(v *CacheBehaviors) *DistributionSummary { - s.CacheBehaviors = v - return s +// String returns the string representation +func (s GetDistributionOutput) String() string { + return awsutil.Prettify(s) } -// SetComment sets the Comment field's value. -func (s *DistributionSummary) SetComment(v string) *DistributionSummary { - s.Comment = &v - return s +// GoString returns the string representation +func (s GetDistributionOutput) GoString() string { + return s.String() } -// SetCustomErrorResponses sets the CustomErrorResponses field's value. -func (s *DistributionSummary) SetCustomErrorResponses(v *CustomErrorResponses) *DistributionSummary { - s.CustomErrorResponses = v +// SetDistribution sets the Distribution field's value. +func (s *GetDistributionOutput) SetDistribution(v *Distribution) *GetDistributionOutput { + s.Distribution = v return s } -// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. -func (s *DistributionSummary) SetDefaultCacheBehavior(v *DefaultCacheBehavior) *DistributionSummary { - s.DefaultCacheBehavior = v +// SetETag sets the ETag field's value. +func (s *GetDistributionOutput) SetETag(v string) *GetDistributionOutput { + s.ETag = &v return s } -// SetDomainName sets the DomainName field's value. -func (s *DistributionSummary) SetDomainName(v string) *DistributionSummary { - s.DomainName = &v - return s +type GetFieldLevelEncryptionConfigInput struct { + _ struct{} `locationName:"GetFieldLevelEncryptionConfigRequest" type:"structure"` + + // Request the ID for the field-level encryption configuration information. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } -// SetEnabled sets the Enabled field's value. -func (s *DistributionSummary) SetEnabled(v bool) *DistributionSummary { - s.Enabled = &v - return s +// String returns the string representation +func (s GetFieldLevelEncryptionConfigInput) String() string { + return awsutil.Prettify(s) } -// SetHttpVersion sets the HttpVersion field's value. -func (s *DistributionSummary) SetHttpVersion(v string) *DistributionSummary { - s.HttpVersion = &v - return s +// GoString returns the string representation +func (s GetFieldLevelEncryptionConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFieldLevelEncryptionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetId sets the Id field's value. -func (s *DistributionSummary) SetId(v string) *DistributionSummary { +func (s *GetFieldLevelEncryptionConfigInput) SetId(v string) *GetFieldLevelEncryptionConfigInput { s.Id = &v return s } -// SetIsIPV6Enabled sets the IsIPV6Enabled field's value. -func (s *DistributionSummary) SetIsIPV6Enabled(v bool) *DistributionSummary { - s.IsIPV6Enabled = &v - return s +type GetFieldLevelEncryptionConfigOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryptionConfig"` + + // The current version of the field level encryption configuration. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // Return the field-level encryption configuration information. + FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `type:"structure"` } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *DistributionSummary) SetLastModifiedTime(v time.Time) *DistributionSummary { - s.LastModifiedTime = &v - return s +// String returns the string representation +func (s GetFieldLevelEncryptionConfigOutput) String() string { + return awsutil.Prettify(s) } -// SetOriginGroups sets the OriginGroups field's value. -func (s *DistributionSummary) SetOriginGroups(v *OriginGroups) *DistributionSummary { - s.OriginGroups = v - return s +// GoString returns the string representation +func (s GetFieldLevelEncryptionConfigOutput) GoString() string { + return s.String() } -// SetOrigins sets the Origins field's value. -func (s *DistributionSummary) SetOrigins(v *Origins) *DistributionSummary { - s.Origins = v +// SetETag sets the ETag field's value. +func (s *GetFieldLevelEncryptionConfigOutput) SetETag(v string) *GetFieldLevelEncryptionConfigOutput { + s.ETag = &v return s } -// SetPriceClass sets the PriceClass field's value. -func (s *DistributionSummary) SetPriceClass(v string) *DistributionSummary { - s.PriceClass = &v +// SetFieldLevelEncryptionConfig sets the FieldLevelEncryptionConfig field's value. +func (s *GetFieldLevelEncryptionConfigOutput) SetFieldLevelEncryptionConfig(v *FieldLevelEncryptionConfig) *GetFieldLevelEncryptionConfigOutput { + s.FieldLevelEncryptionConfig = v return s } -// SetRestrictions sets the Restrictions field's value. -func (s *DistributionSummary) SetRestrictions(v *Restrictions) *DistributionSummary { - s.Restrictions = v - return s +type GetFieldLevelEncryptionInput struct { + _ struct{} `locationName:"GetFieldLevelEncryptionRequest" type:"structure"` + + // Request the ID for the field-level encryption configuration information. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } -// SetStatus sets the Status field's value. -func (s *DistributionSummary) SetStatus(v string) *DistributionSummary { - s.Status = &v - return s +// String returns the string representation +func (s GetFieldLevelEncryptionInput) String() string { + return awsutil.Prettify(s) } -// SetViewerCertificate sets the ViewerCertificate field's value. -func (s *DistributionSummary) SetViewerCertificate(v *ViewerCertificate) *DistributionSummary { - s.ViewerCertificate = v +// GoString returns the string representation +func (s GetFieldLevelEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFieldLevelEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *GetFieldLevelEncryptionInput) SetId(v string) *GetFieldLevelEncryptionInput { + s.Id = &v return s } -// SetWebACLId sets the WebACLId field's value. -func (s *DistributionSummary) SetWebACLId(v string) *DistributionSummary { - s.WebACLId = &v +type GetFieldLevelEncryptionOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryption"` + + // The current version of the field level encryption configuration. For example: + // E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // Return the field-level encryption configuration information. + FieldLevelEncryption *FieldLevelEncryption `type:"structure"` +} + +// String returns the string representation +func (s GetFieldLevelEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFieldLevelEncryptionOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *GetFieldLevelEncryptionOutput) SetETag(v string) *GetFieldLevelEncryptionOutput { + s.ETag = &v return s } -// Complex data type for field-level encryption profiles that includes all of -// the encryption entities. -type EncryptionEntities struct { - _ struct{} `type:"structure"` +// SetFieldLevelEncryption sets the FieldLevelEncryption field's value. +func (s *GetFieldLevelEncryptionOutput) SetFieldLevelEncryption(v *FieldLevelEncryption) *GetFieldLevelEncryptionOutput { + s.FieldLevelEncryption = v + return s +} - // An array of field patterns in a field-level encryption content type-profile - // mapping. - Items []*EncryptionEntity `locationNameList:"EncryptionEntity" type:"list"` +type GetFieldLevelEncryptionProfileConfigInput struct { + _ struct{} `locationName:"GetFieldLevelEncryptionProfileConfigRequest" type:"structure"` - // Number of field pattern items in a field-level encryption content type-profile - // mapping. + // Get the ID for the field-level encryption profile configuration information. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s EncryptionEntities) String() string { +func (s GetFieldLevelEncryptionProfileConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EncryptionEntities) GoString() string { +func (s GetFieldLevelEncryptionProfileConfigInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionEntities) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionEntities"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *GetFieldLevelEncryptionProfileConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionProfileConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -8684,72 +13991,72 @@ func (s *EncryptionEntities) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *EncryptionEntities) SetItems(v []*EncryptionEntity) *EncryptionEntities { - s.Items = v +// SetId sets the Id field's value. +func (s *GetFieldLevelEncryptionProfileConfigInput) SetId(v string) *GetFieldLevelEncryptionProfileConfigInput { + s.Id = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *EncryptionEntities) SetQuantity(v int64) *EncryptionEntities { - s.Quantity = &v - return s +type GetFieldLevelEncryptionProfileConfigOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfileConfig"` + + // The current version of the field-level encryption profile configuration result. + // For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // Return the field-level encryption profile configuration information. + FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `type:"structure"` } -// Complex data type for field-level encryption profiles that includes the encryption -// key and field pattern specifications. -type EncryptionEntity struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s GetFieldLevelEncryptionProfileConfigOutput) String() string { + return awsutil.Prettify(s) +} - // Field patterns in a field-level encryption content type profile specify the - // fields that you want to be encrypted. You can provide the full field name, - // or any beginning characters followed by a wildcard (*). You can't overlap - // field patterns. For example, you can't have both ABC* and AB*. Note that - // field patterns are case-sensitive. - // - // FieldPatterns is a required field - FieldPatterns *FieldPatterns `type:"structure" required:"true"` +// GoString returns the string representation +func (s GetFieldLevelEncryptionProfileConfigOutput) GoString() string { + return s.String() +} - // The provider associated with the public key being used for encryption. This - // value must also be provided with the private key for applications to be able - // to decrypt data. - // - // ProviderId is a required field - ProviderId *string `type:"string" required:"true"` +// SetETag sets the ETag field's value. +func (s *GetFieldLevelEncryptionProfileConfigOutput) SetETag(v string) *GetFieldLevelEncryptionProfileConfigOutput { + s.ETag = &v + return s +} - // The public key associated with a set of field-level encryption patterns, - // to be used when encrypting the fields that match the patterns. +// SetFieldLevelEncryptionProfileConfig sets the FieldLevelEncryptionProfileConfig field's value. +func (s *GetFieldLevelEncryptionProfileConfigOutput) SetFieldLevelEncryptionProfileConfig(v *FieldLevelEncryptionProfileConfig) *GetFieldLevelEncryptionProfileConfigOutput { + s.FieldLevelEncryptionProfileConfig = v + return s +} + +type GetFieldLevelEncryptionProfileInput struct { + _ struct{} `locationName:"GetFieldLevelEncryptionProfileRequest" type:"structure"` + + // Get the ID for the field-level encryption profile information. // - // PublicKeyId is a required field - PublicKeyId *string `type:"string" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s EncryptionEntity) String() string { +func (s GetFieldLevelEncryptionProfileInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EncryptionEntity) GoString() string { +func (s GetFieldLevelEncryptionProfileInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EncryptionEntity) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EncryptionEntity"} - if s.FieldPatterns == nil { - invalidParams.Add(request.NewErrParamRequired("FieldPatterns")) - } - if s.ProviderId == nil { - invalidParams.Add(request.NewErrParamRequired("ProviderId")) - } - if s.PublicKeyId == nil { - invalidParams.Add(request.NewErrParamRequired("PublicKeyId")) +func (s *GetFieldLevelEncryptionProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionProfileInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.FieldPatterns != nil { - if err := s.FieldPatterns.Validate(); err != nil { - invalidParams.AddNested("FieldPatterns", err.(request.ErrInvalidParams)) - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -8758,124 +14065,83 @@ func (s *EncryptionEntity) Validate() error { return nil } -// SetFieldPatterns sets the FieldPatterns field's value. -func (s *EncryptionEntity) SetFieldPatterns(v *FieldPatterns) *EncryptionEntity { - s.FieldPatterns = v - return s -} - -// SetProviderId sets the ProviderId field's value. -func (s *EncryptionEntity) SetProviderId(v string) *EncryptionEntity { - s.ProviderId = &v - return s -} - -// SetPublicKeyId sets the PublicKeyId field's value. -func (s *EncryptionEntity) SetPublicKeyId(v string) *EncryptionEntity { - s.PublicKeyId = &v +// SetId sets the Id field's value. +func (s *GetFieldLevelEncryptionProfileInput) SetId(v string) *GetFieldLevelEncryptionProfileInput { + s.Id = &v return s } -// A complex data type that includes the profile configurations and other options -// specified for field-level encryption. -type FieldLevelEncryption struct { - _ struct{} `type:"structure"` - - // A complex data type that includes the profile configurations specified for - // field-level encryption. - // - // FieldLevelEncryptionConfig is a required field - FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `type:"structure" required:"true"` +type GetFieldLevelEncryptionProfileOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfile"` - // The configuration ID for a field-level encryption configuration which includes - // a set of profiles that specify certain selected data fields to be encrypted - // by specific public keys. - // - // Id is a required field - Id *string `type:"string" required:"true"` + // The current version of the field level encryption profile. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` - // The last time the field-level encryption configuration was changed. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" required:"true"` + // Return the field-level encryption profile information. + FieldLevelEncryptionProfile *FieldLevelEncryptionProfile `type:"structure"` } // String returns the string representation -func (s FieldLevelEncryption) String() string { +func (s GetFieldLevelEncryptionProfileOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryption) GoString() string { +func (s GetFieldLevelEncryptionProfileOutput) GoString() string { return s.String() } -// SetFieldLevelEncryptionConfig sets the FieldLevelEncryptionConfig field's value. -func (s *FieldLevelEncryption) SetFieldLevelEncryptionConfig(v *FieldLevelEncryptionConfig) *FieldLevelEncryption { - s.FieldLevelEncryptionConfig = v - return s -} - -// SetId sets the Id field's value. -func (s *FieldLevelEncryption) SetId(v string) *FieldLevelEncryption { - s.Id = &v +// SetETag sets the ETag field's value. +func (s *GetFieldLevelEncryptionProfileOutput) SetETag(v string) *GetFieldLevelEncryptionProfileOutput { + s.ETag = &v return s } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *FieldLevelEncryption) SetLastModifiedTime(v time.Time) *FieldLevelEncryption { - s.LastModifiedTime = &v +// SetFieldLevelEncryptionProfile sets the FieldLevelEncryptionProfile field's value. +func (s *GetFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile(v *FieldLevelEncryptionProfile) *GetFieldLevelEncryptionProfileOutput { + s.FieldLevelEncryptionProfile = v return s } -// A complex data type that includes the profile configurations specified for -// field-level encryption. -type FieldLevelEncryptionConfig struct { - _ struct{} `type:"structure"` +// The request to get an invalidation's information. +type GetInvalidationInput struct { + _ struct{} `locationName:"GetInvalidationRequest" type:"structure"` - // A unique number that ensures the request can't be replayed. + // The distribution's ID. // - // CallerReference is a required field - CallerReference *string `type:"string" required:"true"` - - // An optional comment about the configuration. - Comment *string `type:"string"` - - // A complex data type that specifies when to forward content if a content type - // isn't recognized and profiles to use as by default in a request if a query - // argument doesn't specify a profile to use. - ContentTypeProfileConfig *ContentTypeProfileConfig `type:"structure"` + // DistributionId is a required field + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` - // A complex data type that specifies when to forward content if a profile isn't - // found and the profile that can be provided as a query argument in a request. - QueryArgProfileConfig *QueryArgProfileConfig `type:"structure"` + // The identifier for the invalidation request, for example, IDFDVBD632BHDS5. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s FieldLevelEncryptionConfig) String() string { +func (s GetInvalidationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionConfig) GoString() string { +func (s GetInvalidationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *FieldLevelEncryptionConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FieldLevelEncryptionConfig"} - if s.CallerReference == nil { - invalidParams.Add(request.NewErrParamRequired("CallerReference")) +func (s *GetInvalidationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInvalidationInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) } - if s.ContentTypeProfileConfig != nil { - if err := s.ContentTypeProfileConfig.Validate(); err != nil { - invalidParams.AddNested("ContentTypeProfileConfig", err.(request.ErrInvalidParams)) - } + if s.DistributionId != nil && len(*s.DistributionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) } - if s.QueryArgProfileConfig != nil { - if err := s.QueryArgProfileConfig.Validate(); err != nil { - invalidParams.AddNested("QueryArgProfileConfig", err.(request.ErrInvalidParams)) - } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -8884,189 +14150,139 @@ func (s *FieldLevelEncryptionConfig) Validate() error { return nil } -// SetCallerReference sets the CallerReference field's value. -func (s *FieldLevelEncryptionConfig) SetCallerReference(v string) *FieldLevelEncryptionConfig { - s.CallerReference = &v +// SetDistributionId sets the DistributionId field's value. +func (s *GetInvalidationInput) SetDistributionId(v string) *GetInvalidationInput { + s.DistributionId = &v return s } -// SetComment sets the Comment field's value. -func (s *FieldLevelEncryptionConfig) SetComment(v string) *FieldLevelEncryptionConfig { - s.Comment = &v +// SetId sets the Id field's value. +func (s *GetInvalidationInput) SetId(v string) *GetInvalidationInput { + s.Id = &v return s } -// SetContentTypeProfileConfig sets the ContentTypeProfileConfig field's value. -func (s *FieldLevelEncryptionConfig) SetContentTypeProfileConfig(v *ContentTypeProfileConfig) *FieldLevelEncryptionConfig { - s.ContentTypeProfileConfig = v - return s +// The returned result of the corresponding request. +type GetInvalidationOutput struct { + _ struct{} `type:"structure" payload:"Invalidation"` + + // The invalidation's information. For more information, see Invalidation Complex + // Type (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/InvalidationDatatype.html). + Invalidation *Invalidation `type:"structure"` +} + +// String returns the string representation +func (s GetInvalidationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInvalidationOutput) GoString() string { + return s.String() } -// SetQueryArgProfileConfig sets the QueryArgProfileConfig field's value. -func (s *FieldLevelEncryptionConfig) SetQueryArgProfileConfig(v *QueryArgProfileConfig) *FieldLevelEncryptionConfig { - s.QueryArgProfileConfig = v +// SetInvalidation sets the Invalidation field's value. +func (s *GetInvalidationOutput) SetInvalidation(v *Invalidation) *GetInvalidationOutput { + s.Invalidation = v return s } -// List of field-level encrpytion configurations. -type FieldLevelEncryptionList struct { - _ struct{} `type:"structure"` - - // An array of field-level encryption items. - Items []*FieldLevelEncryptionSummary `locationNameList:"FieldLevelEncryptionSummary" type:"list"` - - // The maximum number of elements you want in the response body. - // - // MaxItems is a required field - MaxItems *int64 `type:"integer" required:"true"` - - // If there are more elements to be listed, this element is present and contains - // the value that you can use for the Marker request parameter to continue listing - // your configurations where you left off. - NextMarker *string `type:"string"` +type GetMonitoringSubscriptionInput struct { + _ struct{} `locationName:"GetMonitoringSubscriptionRequest" type:"structure"` - // The number of field-level encryption items. + // The ID of the distribution that you are getting metrics information for. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // DistributionId is a required field + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` } // String returns the string representation -func (s FieldLevelEncryptionList) String() string { +func (s GetMonitoringSubscriptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionList) GoString() string { +func (s GetMonitoringSubscriptionInput) GoString() string { return s.String() } -// SetItems sets the Items field's value. -func (s *FieldLevelEncryptionList) SetItems(v []*FieldLevelEncryptionSummary) *FieldLevelEncryptionList { - s.Items = v - return s -} - -// SetMaxItems sets the MaxItems field's value. -func (s *FieldLevelEncryptionList) SetMaxItems(v int64) *FieldLevelEncryptionList { - s.MaxItems = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMonitoringSubscriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMonitoringSubscriptionInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) + } + if s.DistributionId != nil && len(*s.DistributionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) + } -// SetNextMarker sets the NextMarker field's value. -func (s *FieldLevelEncryptionList) SetNextMarker(v string) *FieldLevelEncryptionList { - s.NextMarker = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetQuantity sets the Quantity field's value. -func (s *FieldLevelEncryptionList) SetQuantity(v int64) *FieldLevelEncryptionList { - s.Quantity = &v +// SetDistributionId sets the DistributionId field's value. +func (s *GetMonitoringSubscriptionInput) SetDistributionId(v string) *GetMonitoringSubscriptionInput { + s.DistributionId = &v return s } -// A complex data type for field-level encryption profiles. -type FieldLevelEncryptionProfile struct { - _ struct{} `type:"structure"` - - // A complex data type that includes the profile name and the encryption entities - // for the field-level encryption profile. - // - // FieldLevelEncryptionProfileConfig is a required field - FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `type:"structure" required:"true"` - - // The ID for a field-level encryption profile configuration which includes - // a set of profiles that specify certain selected data fields to be encrypted - // by specific public keys. - // - // Id is a required field - Id *string `type:"string" required:"true"` +type GetMonitoringSubscriptionOutput struct { + _ struct{} `type:"structure" payload:"MonitoringSubscription"` - // The last time the field-level encryption profile was updated. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" required:"true"` + // A monitoring subscription. This structure contains information about whether + // additional CloudWatch metrics are enabled for a given CloudFront distribution. + MonitoringSubscription *MonitoringSubscription `type:"structure"` } // String returns the string representation -func (s FieldLevelEncryptionProfile) String() string { +func (s GetMonitoringSubscriptionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionProfile) GoString() string { +func (s GetMonitoringSubscriptionOutput) GoString() string { return s.String() } -// SetFieldLevelEncryptionProfileConfig sets the FieldLevelEncryptionProfileConfig field's value. -func (s *FieldLevelEncryptionProfile) SetFieldLevelEncryptionProfileConfig(v *FieldLevelEncryptionProfileConfig) *FieldLevelEncryptionProfile { - s.FieldLevelEncryptionProfileConfig = v - return s -} - -// SetId sets the Id field's value. -func (s *FieldLevelEncryptionProfile) SetId(v string) *FieldLevelEncryptionProfile { - s.Id = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *FieldLevelEncryptionProfile) SetLastModifiedTime(v time.Time) *FieldLevelEncryptionProfile { - s.LastModifiedTime = &v +// SetMonitoringSubscription sets the MonitoringSubscription field's value. +func (s *GetMonitoringSubscriptionOutput) SetMonitoringSubscription(v *MonitoringSubscription) *GetMonitoringSubscriptionOutput { + s.MonitoringSubscription = v return s } -// A complex data type of profiles for the field-level encryption. -type FieldLevelEncryptionProfileConfig struct { - _ struct{} `type:"structure"` - - // A unique number that ensures that the request can't be replayed. - // - // CallerReference is a required field - CallerReference *string `type:"string" required:"true"` - - // An optional comment for the field-level encryption profile. - Comment *string `type:"string"` - - // A complex data type of encryption entities for the field-level encryption - // profile that include the public key ID, provider, and field patterns for - // specifying which fields to encrypt with this key. - // - // EncryptionEntities is a required field - EncryptionEntities *EncryptionEntities `type:"structure" required:"true"` +type GetOriginRequestPolicyConfigInput struct { + _ struct{} `locationName:"GetOriginRequestPolicyConfigRequest" type:"structure"` - // Profile name for the field-level encryption profile. + // The unique identifier for the origin request policy. If the origin request + // policy is attached to a distribution’s cache behavior, you can get the + // policy’s identifier using ListDistributions or GetDistribution. If the + // origin request policy is not attached to a cache behavior, you can get the + // identifier using ListOriginRequestPolicies. // - // Name is a required field - Name *string `type:"string" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s FieldLevelEncryptionProfileConfig) String() string { +func (s GetOriginRequestPolicyConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionProfileConfig) GoString() string { +func (s GetOriginRequestPolicyConfigInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *FieldLevelEncryptionProfileConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FieldLevelEncryptionProfileConfig"} - if s.CallerReference == nil { - invalidParams.Add(request.NewErrParamRequired("CallerReference")) - } - if s.EncryptionEntities == nil { - invalidParams.Add(request.NewErrParamRequired("EncryptionEntities")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *GetOriginRequestPolicyConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOriginRequestPolicyConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.EncryptionEntities != nil { - if err := s.EncryptionEntities.Validate(); err != nil { - invalidParams.AddNested("EncryptionEntities", err.(request.ErrInvalidParams)) - } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -9075,251 +14291,221 @@ func (s *FieldLevelEncryptionProfileConfig) Validate() error { return nil } -// SetCallerReference sets the CallerReference field's value. -func (s *FieldLevelEncryptionProfileConfig) SetCallerReference(v string) *FieldLevelEncryptionProfileConfig { - s.CallerReference = &v - return s -} - -// SetComment sets the Comment field's value. -func (s *FieldLevelEncryptionProfileConfig) SetComment(v string) *FieldLevelEncryptionProfileConfig { - s.Comment = &v - return s -} - -// SetEncryptionEntities sets the EncryptionEntities field's value. -func (s *FieldLevelEncryptionProfileConfig) SetEncryptionEntities(v *EncryptionEntities) *FieldLevelEncryptionProfileConfig { - s.EncryptionEntities = v - return s -} - -// SetName sets the Name field's value. -func (s *FieldLevelEncryptionProfileConfig) SetName(v string) *FieldLevelEncryptionProfileConfig { - s.Name = &v +// SetId sets the Id field's value. +func (s *GetOriginRequestPolicyConfigInput) SetId(v string) *GetOriginRequestPolicyConfigInput { + s.Id = &v return s } -// List of field-level encryption profiles. -type FieldLevelEncryptionProfileList struct { - _ struct{} `type:"structure"` - - // The field-level encryption profile items. - Items []*FieldLevelEncryptionProfileSummary `locationNameList:"FieldLevelEncryptionProfileSummary" type:"list"` - - // The maximum number of field-level encryption profiles you want in the response - // body. - // - // MaxItems is a required field - MaxItems *int64 `type:"integer" required:"true"` +type GetOriginRequestPolicyConfigOutput struct { + _ struct{} `type:"structure" payload:"OriginRequestPolicyConfig"` - // If there are more elements to be listed, this element is present and contains - // the value that you can use for the Marker request parameter to continue listing - // your profiles where you left off. - NextMarker *string `type:"string"` + // The current version of the origin request policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` - // The number of field-level encryption profiles. - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // The origin request policy configuration. + OriginRequestPolicyConfig *OriginRequestPolicyConfig `type:"structure"` } // String returns the string representation -func (s FieldLevelEncryptionProfileList) String() string { +func (s GetOriginRequestPolicyConfigOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionProfileList) GoString() string { +func (s GetOriginRequestPolicyConfigOutput) GoString() string { return s.String() } -// SetItems sets the Items field's value. -func (s *FieldLevelEncryptionProfileList) SetItems(v []*FieldLevelEncryptionProfileSummary) *FieldLevelEncryptionProfileList { - s.Items = v +// SetETag sets the ETag field's value. +func (s *GetOriginRequestPolicyConfigOutput) SetETag(v string) *GetOriginRequestPolicyConfigOutput { + s.ETag = &v return s } -// SetMaxItems sets the MaxItems field's value. -func (s *FieldLevelEncryptionProfileList) SetMaxItems(v int64) *FieldLevelEncryptionProfileList { - s.MaxItems = &v +// SetOriginRequestPolicyConfig sets the OriginRequestPolicyConfig field's value. +func (s *GetOriginRequestPolicyConfigOutput) SetOriginRequestPolicyConfig(v *OriginRequestPolicyConfig) *GetOriginRequestPolicyConfigOutput { + s.OriginRequestPolicyConfig = v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *FieldLevelEncryptionProfileList) SetNextMarker(v string) *FieldLevelEncryptionProfileList { - s.NextMarker = &v - return s -} +type GetOriginRequestPolicyInput struct { + _ struct{} `locationName:"GetOriginRequestPolicyRequest" type:"structure"` -// SetQuantity sets the Quantity field's value. -func (s *FieldLevelEncryptionProfileList) SetQuantity(v int64) *FieldLevelEncryptionProfileList { - s.Quantity = &v - return s + // The unique identifier for the origin request policy. If the origin request + // policy is attached to a distribution’s cache behavior, you can get the + // policy’s identifier using ListDistributions or GetDistribution. If the + // origin request policy is not attached to a cache behavior, you can get the + // identifier using ListOriginRequestPolicies. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } -// The field-level encryption profile summary. -type FieldLevelEncryptionProfileSummary struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s GetOriginRequestPolicyInput) String() string { + return awsutil.Prettify(s) +} - // An optional comment for the field-level encryption profile summary. - Comment *string `type:"string"` +// GoString returns the string representation +func (s GetOriginRequestPolicyInput) GoString() string { + return s.String() +} - // A complex data type of encryption entities for the field-level encryption - // profile that include the public key ID, provider, and field patterns for - // specifying which fields to encrypt with this key. - // - // EncryptionEntities is a required field - EncryptionEntities *EncryptionEntities `type:"structure" required:"true"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOriginRequestPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOriginRequestPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } - // ID for the field-level encryption profile summary. - // - // Id is a required field - Id *string `type:"string" required:"true"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The time when the the field-level encryption profile summary was last updated. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" required:"true"` +// SetId sets the Id field's value. +func (s *GetOriginRequestPolicyInput) SetId(v string) *GetOriginRequestPolicyInput { + s.Id = &v + return s +} - // Name for the field-level encryption profile summary. - // - // Name is a required field - Name *string `type:"string" required:"true"` +type GetOriginRequestPolicyOutput struct { + _ struct{} `type:"structure" payload:"OriginRequestPolicy"` + + // The current version of the origin request policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The origin request policy. + OriginRequestPolicy *OriginRequestPolicy `type:"structure"` } // String returns the string representation -func (s FieldLevelEncryptionProfileSummary) String() string { +func (s GetOriginRequestPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionProfileSummary) GoString() string { +func (s GetOriginRequestPolicyOutput) GoString() string { return s.String() } -// SetComment sets the Comment field's value. -func (s *FieldLevelEncryptionProfileSummary) SetComment(v string) *FieldLevelEncryptionProfileSummary { - s.Comment = &v - return s -} - -// SetEncryptionEntities sets the EncryptionEntities field's value. -func (s *FieldLevelEncryptionProfileSummary) SetEncryptionEntities(v *EncryptionEntities) *FieldLevelEncryptionProfileSummary { - s.EncryptionEntities = v - return s -} - -// SetId sets the Id field's value. -func (s *FieldLevelEncryptionProfileSummary) SetId(v string) *FieldLevelEncryptionProfileSummary { - s.Id = &v - return s -} - -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *FieldLevelEncryptionProfileSummary) SetLastModifiedTime(v time.Time) *FieldLevelEncryptionProfileSummary { - s.LastModifiedTime = &v +// SetETag sets the ETag field's value. +func (s *GetOriginRequestPolicyOutput) SetETag(v string) *GetOriginRequestPolicyOutput { + s.ETag = &v return s } -// SetName sets the Name field's value. -func (s *FieldLevelEncryptionProfileSummary) SetName(v string) *FieldLevelEncryptionProfileSummary { - s.Name = &v +// SetOriginRequestPolicy sets the OriginRequestPolicy field's value. +func (s *GetOriginRequestPolicyOutput) SetOriginRequestPolicy(v *OriginRequestPolicy) *GetOriginRequestPolicyOutput { + s.OriginRequestPolicy = v return s } -// A summary of a field-level encryption item. -type FieldLevelEncryptionSummary struct { - _ struct{} `type:"structure"` - - // An optional comment about the field-level encryption item. - Comment *string `type:"string"` - - // A summary of a content type-profile mapping. - ContentTypeProfileConfig *ContentTypeProfileConfig `type:"structure"` +type GetPublicKeyConfigInput struct { + _ struct{} `locationName:"GetPublicKeyConfigRequest" type:"structure"` - // The unique ID of a field-level encryption item. + // Request the ID for the public key configuration. // // Id is a required field - Id *string `type:"string" required:"true"` - - // The last time that the summary of field-level encryption items was modified. - // - // LastModifiedTime is a required field - LastModifiedTime *time.Time `type:"timestamp" required:"true"` - - // A summary of a query argument-profile mapping. - QueryArgProfileConfig *QueryArgProfileConfig `type:"structure"` + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s FieldLevelEncryptionSummary) String() string { +func (s GetPublicKeyConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldLevelEncryptionSummary) GoString() string { +func (s GetPublicKeyConfigInput) GoString() string { return s.String() } -// SetComment sets the Comment field's value. -func (s *FieldLevelEncryptionSummary) SetComment(v string) *FieldLevelEncryptionSummary { - s.Comment = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicKeyConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicKeyConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } -// SetContentTypeProfileConfig sets the ContentTypeProfileConfig field's value. -func (s *FieldLevelEncryptionSummary) SetContentTypeProfileConfig(v *ContentTypeProfileConfig) *FieldLevelEncryptionSummary { - s.ContentTypeProfileConfig = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetId sets the Id field's value. -func (s *FieldLevelEncryptionSummary) SetId(v string) *FieldLevelEncryptionSummary { +func (s *GetPublicKeyConfigInput) SetId(v string) *GetPublicKeyConfigInput { s.Id = &v return s } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *FieldLevelEncryptionSummary) SetLastModifiedTime(v time.Time) *FieldLevelEncryptionSummary { - s.LastModifiedTime = &v - return s +type GetPublicKeyConfigOutput struct { + _ struct{} `type:"structure" payload:"PublicKeyConfig"` + + // The current version of the public key configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // Return the result for the public key configuration. + PublicKeyConfig *PublicKeyConfig `type:"structure"` } -// SetQueryArgProfileConfig sets the QueryArgProfileConfig field's value. -func (s *FieldLevelEncryptionSummary) SetQueryArgProfileConfig(v *QueryArgProfileConfig) *FieldLevelEncryptionSummary { - s.QueryArgProfileConfig = v +// String returns the string representation +func (s GetPublicKeyConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicKeyConfigOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *GetPublicKeyConfigOutput) SetETag(v string) *GetPublicKeyConfigOutput { + s.ETag = &v return s } -// A complex data type that includes the field patterns to match for field-level -// encryption. -type FieldPatterns struct { - _ struct{} `type:"structure"` +// SetPublicKeyConfig sets the PublicKeyConfig field's value. +func (s *GetPublicKeyConfigOutput) SetPublicKeyConfig(v *PublicKeyConfig) *GetPublicKeyConfigOutput { + s.PublicKeyConfig = v + return s +} - // An array of the field-level encryption field patterns. - Items []*string `locationNameList:"FieldPattern" type:"list"` +type GetPublicKeyInput struct { + _ struct{} `locationName:"GetPublicKeyRequest" type:"structure"` - // The number of field-level encryption field patterns. + // Request the ID for the public key. // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s FieldPatterns) String() string { +func (s GetPublicKeyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FieldPatterns) GoString() string { +func (s GetPublicKeyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *FieldPatterns) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FieldPatterns"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *GetPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicKeyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -9328,199 +14514,127 @@ func (s *FieldPatterns) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *FieldPatterns) SetItems(v []*string) *FieldPatterns { - s.Items = v - return s -} - -// SetQuantity sets the Quantity field's value. -func (s *FieldPatterns) SetQuantity(v int64) *FieldPatterns { - s.Quantity = &v +// SetId sets the Id field's value. +func (s *GetPublicKeyInput) SetId(v string) *GetPublicKeyInput { + s.Id = &v return s } -// A complex type that specifies how CloudFront handles query strings, cookies, -// and HTTP headers. -type ForwardedValues struct { - _ struct{} `type:"structure"` - - // A complex type that specifies whether you want CloudFront to forward cookies - // to the origin and, if so, which ones. For more information about forwarding - // cookies to the origin, see How CloudFront Forwards, Caches, and Logs Cookies - // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Cookies.html) - // in the Amazon CloudFront Developer Guide. - // - // Cookies is a required field - Cookies *CookiePreference `type:"structure" required:"true"` - - // A complex type that specifies the Headers, if any, that you want CloudFront - // to forward to the origin for this cache behavior (whitelisted headers). For - // the headers that you specify, CloudFront also caches separate versions of - // a specified object that is based on the header values in viewer requests. - // - // For more information, see Caching Content Based on Request Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) - // in the Amazon CloudFront Developer Guide. - Headers *Headers `type:"structure"` +type GetPublicKeyOutput struct { + _ struct{} `type:"structure" payload:"PublicKey"` - // Indicates whether you want CloudFront to forward query strings to the origin - // that is associated with this cache behavior and cache based on the query - // string parameters. CloudFront behavior depends on the value of QueryString - // and on the values that you specify for QueryStringCacheKeys, if any: - // - // If you specify true for QueryString and you don't specify any values for - // QueryStringCacheKeys, CloudFront forwards all query string parameters to - // the origin and caches based on all query string parameters. Depending on - // how many query string parameters and values you have, this can adversely - // affect performance because CloudFront must forward more requests to the origin. - // - // If you specify true for QueryString and you specify one or more values for - // QueryStringCacheKeys, CloudFront forwards all query string parameters to - // the origin, but it only caches based on the query string parameters that - // you specify. - // - // If you specify false for QueryString, CloudFront doesn't forward any query - // string parameters to the origin, and doesn't cache based on query string - // parameters. - // - // For more information, see Configuring CloudFront to Cache Based on Query - // String Parameters (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/QueryStringParameters.html) - // in the Amazon CloudFront Developer Guide. - // - // QueryString is a required field - QueryString *bool `type:"boolean" required:"true"` + // The current version of the public key. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` - // A complex type that contains information about the query string parameters - // that you want CloudFront to use for caching for this cache behavior. - QueryStringCacheKeys *QueryStringCacheKeys `type:"structure"` + // Return the public key. + PublicKey *PublicKey `type:"structure"` } // String returns the string representation -func (s ForwardedValues) String() string { +func (s GetPublicKeyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ForwardedValues) GoString() string { +func (s GetPublicKeyOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ForwardedValues) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ForwardedValues"} - if s.Cookies == nil { - invalidParams.Add(request.NewErrParamRequired("Cookies")) - } - if s.QueryString == nil { - invalidParams.Add(request.NewErrParamRequired("QueryString")) - } - if s.Cookies != nil { - if err := s.Cookies.Validate(); err != nil { - invalidParams.AddNested("Cookies", err.(request.ErrInvalidParams)) - } - } - if s.Headers != nil { - if err := s.Headers.Validate(); err != nil { - invalidParams.AddNested("Headers", err.(request.ErrInvalidParams)) - } - } - if s.QueryStringCacheKeys != nil { - if err := s.QueryStringCacheKeys.Validate(); err != nil { - invalidParams.AddNested("QueryStringCacheKeys", err.(request.ErrInvalidParams)) - } - } +// SetETag sets the ETag field's value. +func (s *GetPublicKeyOutput) SetETag(v string) *GetPublicKeyOutput { + s.ETag = &v + return s +} + +// SetPublicKey sets the PublicKey field's value. +func (s *GetPublicKeyOutput) SetPublicKey(v *PublicKey) *GetPublicKeyOutput { + s.PublicKey = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +type GetRealtimeLogConfigInput struct { + _ struct{} `locationName:"GetRealtimeLogConfigRequest" type:"structure" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` + + // The Amazon Resource Name (ARN) of the real-time log configuration to get. + ARN *string `type:"string"` + + // The name of the real-time log configuration to get. + Name *string `type:"string"` } -// SetCookies sets the Cookies field's value. -func (s *ForwardedValues) SetCookies(v *CookiePreference) *ForwardedValues { - s.Cookies = v - return s +// String returns the string representation +func (s GetRealtimeLogConfigInput) String() string { + return awsutil.Prettify(s) } -// SetHeaders sets the Headers field's value. -func (s *ForwardedValues) SetHeaders(v *Headers) *ForwardedValues { - s.Headers = v - return s +// GoString returns the string representation +func (s GetRealtimeLogConfigInput) GoString() string { + return s.String() } -// SetQueryString sets the QueryString field's value. -func (s *ForwardedValues) SetQueryString(v bool) *ForwardedValues { - s.QueryString = &v +// SetARN sets the ARN field's value. +func (s *GetRealtimeLogConfigInput) SetARN(v string) *GetRealtimeLogConfigInput { + s.ARN = &v return s } -// SetQueryStringCacheKeys sets the QueryStringCacheKeys field's value. -func (s *ForwardedValues) SetQueryStringCacheKeys(v *QueryStringCacheKeys) *ForwardedValues { - s.QueryStringCacheKeys = v +// SetName sets the Name field's value. +func (s *GetRealtimeLogConfigInput) SetName(v string) *GetRealtimeLogConfigInput { + s.Name = &v return s } -// A complex type that controls the countries in which your content is distributed. -// CloudFront determines the location of your users using MaxMind GeoIP databases. -type GeoRestriction struct { +type GetRealtimeLogConfigOutput struct { _ struct{} `type:"structure"` - // A complex type that contains a Location element for each country in which - // you want CloudFront either to distribute your content (whitelist) or not - // distribute your content (blacklist). - // - // The Location element is a two-letter, uppercase country code for a country - // that you want to include in your blacklist or whitelist. Include one Location - // element for each country. - // - // CloudFront and MaxMind both use ISO 3166 country codes. For the current list - // of countries and the corresponding codes, see ISO 3166-1-alpha-2 code on - // the International Organization for Standardization website. You can also - // refer to the country list on the CloudFront console, which includes both - // country names and codes. - Items []*string `locationNameList:"Location" type:"list"` + // A real-time log configuration. + RealtimeLogConfig *RealtimeLogConfig `type:"structure"` +} - // When geo restriction is enabled, this is the number of countries in your - // whitelist or blacklist. Otherwise, when it is not enabled, Quantity is 0, - // and you can omit Items. - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` +// String returns the string representation +func (s GetRealtimeLogConfigOutput) String() string { + return awsutil.Prettify(s) +} - // The method that you want to use to restrict distribution of your content - // by country: - // - // * none: No geo restriction is enabled, meaning access to content is not - // restricted by client geo location. - // - // * blacklist: The Location elements specify the countries in which you - // don't want CloudFront to distribute your content. - // - // * whitelist: The Location elements specify the countries in which you - // want CloudFront to distribute your content. +// GoString returns the string representation +func (s GetRealtimeLogConfigOutput) GoString() string { + return s.String() +} + +// SetRealtimeLogConfig sets the RealtimeLogConfig field's value. +func (s *GetRealtimeLogConfigOutput) SetRealtimeLogConfig(v *RealtimeLogConfig) *GetRealtimeLogConfigOutput { + s.RealtimeLogConfig = v + return s +} + +// To request to get a streaming distribution configuration. +type GetStreamingDistributionConfigInput struct { + _ struct{} `locationName:"GetStreamingDistributionConfigRequest" type:"structure"` + + // The streaming distribution's ID. // - // RestrictionType is a required field - RestrictionType *string `type:"string" required:"true" enum:"GeoRestrictionType"` + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s GeoRestriction) String() string { +func (s GetStreamingDistributionConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GeoRestriction) GoString() string { +func (s GetStreamingDistributionConfigInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GeoRestriction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GeoRestriction"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) +func (s *GetStreamingDistributionConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionConfigInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.RestrictionType == nil { - invalidParams.Add(request.NewErrParamRequired("RestrictionType")) + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -9529,48 +14643,68 @@ func (s *GeoRestriction) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *GeoRestriction) SetItems(v []*string) *GeoRestriction { - s.Items = v +// SetId sets the Id field's value. +func (s *GetStreamingDistributionConfigInput) SetId(v string) *GetStreamingDistributionConfigInput { + s.Id = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *GeoRestriction) SetQuantity(v int64) *GeoRestriction { - s.Quantity = &v +// The returned result of the corresponding request. +type GetStreamingDistributionConfigOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` + + // The current version of the configuration. For example: E2QWRUHAPOMQZL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's configuration information. + StreamingDistributionConfig *StreamingDistributionConfig `type:"structure"` +} + +// String returns the string representation +func (s GetStreamingDistributionConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetStreamingDistributionConfigOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *GetStreamingDistributionConfigOutput) SetETag(v string) *GetStreamingDistributionConfigOutput { + s.ETag = &v return s } -// SetRestrictionType sets the RestrictionType field's value. -func (s *GeoRestriction) SetRestrictionType(v string) *GeoRestriction { - s.RestrictionType = &v +// SetStreamingDistributionConfig sets the StreamingDistributionConfig field's value. +func (s *GetStreamingDistributionConfigOutput) SetStreamingDistributionConfig(v *StreamingDistributionConfig) *GetStreamingDistributionConfigOutput { + s.StreamingDistributionConfig = v return s } -// The origin access identity's configuration information. For more information, -// see CloudFrontOriginAccessIdentityConfig (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_CloudFrontOriginAccessIdentityConfig.html). -type GetCloudFrontOriginAccessIdentityConfigInput struct { - _ struct{} `locationName:"GetCloudFrontOriginAccessIdentityConfigRequest" type:"structure"` +// The request to get a streaming distribution's information. +type GetStreamingDistributionInput struct { + _ struct{} `locationName:"GetStreamingDistributionRequest" type:"structure"` - // The identity's ID. + // The streaming distribution's ID. // // Id is a required field Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` } // String returns the string representation -func (s GetCloudFrontOriginAccessIdentityConfigInput) String() string { +func (s GetStreamingDistributionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCloudFrontOriginAccessIdentityConfigInput) GoString() string { +func (s GetStreamingDistributionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetCloudFrontOriginAccessIdentityConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityConfigInput"} +func (s *GetStreamingDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionInput"} if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -9585,149 +14719,208 @@ func (s *GetCloudFrontOriginAccessIdentityConfigInput) Validate() error { } // SetId sets the Id field's value. -func (s *GetCloudFrontOriginAccessIdentityConfigInput) SetId(v string) *GetCloudFrontOriginAccessIdentityConfigInput { +func (s *GetStreamingDistributionInput) SetId(v string) *GetStreamingDistributionInput { s.Id = &v return s } // The returned result of the corresponding request. -type GetCloudFrontOriginAccessIdentityConfigOutput struct { - _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` - - // The origin access identity's configuration information. - CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` +type GetStreamingDistributionOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` - // The current version of the configuration. For example: E2QWRUHAPOMQZL. + // The current version of the streaming distribution's information. For example: + // E2QWRUHAPOMQZL. ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` } // String returns the string representation -func (s GetCloudFrontOriginAccessIdentityConfigOutput) String() string { +func (s GetStreamingDistributionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCloudFrontOriginAccessIdentityConfigOutput) GoString() string { +func (s GetStreamingDistributionOutput) GoString() string { return s.String() } -// SetCloudFrontOriginAccessIdentityConfig sets the CloudFrontOriginAccessIdentityConfig field's value. -func (s *GetCloudFrontOriginAccessIdentityConfigOutput) SetCloudFrontOriginAccessIdentityConfig(v *OriginAccessIdentityConfig) *GetCloudFrontOriginAccessIdentityConfigOutput { - s.CloudFrontOriginAccessIdentityConfig = v +// SetETag sets the ETag field's value. +func (s *GetStreamingDistributionOutput) SetETag(v string) *GetStreamingDistributionOutput { + s.ETag = &v return s } -// SetETag sets the ETag field's value. -func (s *GetCloudFrontOriginAccessIdentityConfigOutput) SetETag(v string) *GetCloudFrontOriginAccessIdentityConfigOutput { - s.ETag = &v +// SetStreamingDistribution sets the StreamingDistribution field's value. +func (s *GetStreamingDistributionOutput) SetStreamingDistribution(v *StreamingDistribution) *GetStreamingDistributionOutput { + s.StreamingDistribution = v return s } -// The request to get an origin access identity's information. -type GetCloudFrontOriginAccessIdentityInput struct { - _ struct{} `locationName:"GetCloudFrontOriginAccessIdentityRequest" type:"structure"` +// Contains a list of HTTP header names. +type Headers struct { + _ struct{} `type:"structure"` - // The identity's ID. + // A list of HTTP header names. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of header names in the Items list. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s Headers) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Headers) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Headers) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Headers"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *Headers) SetItems(v []*string) *Headers { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *Headers) SetQuantity(v int64) *Headers { + s.Quantity = &v + return s +} + +// An invalidation. +type Invalidation struct { + _ struct{} `type:"structure"` + + // The date and time the invalidation request was first made. + // + // CreateTime is a required field + CreateTime *time.Time `type:"timestamp" required:"true"` + + // The identifier for the invalidation request. For example: IDFDVBD632BHDS5. // // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + Id *string `type:"string" required:"true"` + + // The current invalidation information for the batch request. + // + // InvalidationBatch is a required field + InvalidationBatch *InvalidationBatch `type:"structure" required:"true"` + + // The status of the invalidation request. When the invalidation batch is finished, + // the status is Completed. + // + // Status is a required field + Status *string `type:"string" required:"true"` } // String returns the string representation -func (s GetCloudFrontOriginAccessIdentityInput) String() string { +func (s Invalidation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCloudFrontOriginAccessIdentityInput) GoString() string { +func (s Invalidation) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetCloudFrontOriginAccessIdentityInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetCloudFrontOriginAccessIdentityInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreateTime sets the CreateTime field's value. +func (s *Invalidation) SetCreateTime(v time.Time) *Invalidation { + s.CreateTime = &v + return s } // SetId sets the Id field's value. -func (s *GetCloudFrontOriginAccessIdentityInput) SetId(v string) *GetCloudFrontOriginAccessIdentityInput { +func (s *Invalidation) SetId(v string) *Invalidation { s.Id = &v return s } -// The returned result of the corresponding request. -type GetCloudFrontOriginAccessIdentityOutput struct { - _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentity"` - - // The origin access identity's information. - CloudFrontOriginAccessIdentity *OriginAccessIdentity `type:"structure"` - - // The current version of the origin access identity's information. For example: - // E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` -} - -// String returns the string representation -func (s GetCloudFrontOriginAccessIdentityOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetCloudFrontOriginAccessIdentityOutput) GoString() string { - return s.String() -} - -// SetCloudFrontOriginAccessIdentity sets the CloudFrontOriginAccessIdentity field's value. -func (s *GetCloudFrontOriginAccessIdentityOutput) SetCloudFrontOriginAccessIdentity(v *OriginAccessIdentity) *GetCloudFrontOriginAccessIdentityOutput { - s.CloudFrontOriginAccessIdentity = v +// SetInvalidationBatch sets the InvalidationBatch field's value. +func (s *Invalidation) SetInvalidationBatch(v *InvalidationBatch) *Invalidation { + s.InvalidationBatch = v return s } -// SetETag sets the ETag field's value. -func (s *GetCloudFrontOriginAccessIdentityOutput) SetETag(v string) *GetCloudFrontOriginAccessIdentityOutput { - s.ETag = &v +// SetStatus sets the Status field's value. +func (s *Invalidation) SetStatus(v string) *Invalidation { + s.Status = &v return s } -// The request to get a distribution configuration. -type GetDistributionConfigInput struct { - _ struct{} `locationName:"GetDistributionConfigRequest" type:"structure"` +// An invalidation batch. +type InvalidationBatch struct { + _ struct{} `type:"structure"` - // The distribution's ID. If the ID is empty, an empty distribution configuration - // is returned. + // A value that you specify to uniquely identify an invalidation request. CloudFront + // uses the value to prevent you from accidentally resubmitting an identical + // request. Whenever you create a new invalidation request, you must specify + // a new value for CallerReference and change other values in the request as + // applicable. One way to ensure that the value of CallerReference is unique + // is to use a timestamp, for example, 20120301090000. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // If you make a second invalidation request with the same value for CallerReference, + // and if the rest of the request is the same, CloudFront doesn't create a new + // invalidation request. Instead, CloudFront returns information about the invalidation + // request that you previously created with the same CallerReference. + // + // If CallerReference is a value you already sent in a previous invalidation + // batch request but the content of any Path is different from the original + // request, CloudFront returns an InvalidationBatchAlreadyExists error. + // + // CallerReference is a required field + CallerReference *string `type:"string" required:"true"` + + // A complex type that contains information about the objects that you want + // to invalidate. For more information, see Specifying the Objects to Invalidate + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html#invalidation-specifying-objects) + // in the Amazon CloudFront Developer Guide. + // + // Paths is a required field + Paths *Paths `type:"structure" required:"true"` } // String returns the string representation -func (s GetDistributionConfigInput) String() string { +func (s InvalidationBatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDistributionConfigInput) GoString() string { +func (s InvalidationBatch) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDistributionConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDistributionConfigInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *InvalidationBatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvalidationBatch"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.Paths == nil { + invalidParams.Add(request.NewErrParamRequired("Paths")) + } + if s.Paths != nil { + if err := s.Paths.Validate(); err != nil { + invalidParams.AddNested("Paths", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -9736,148 +14929,236 @@ func (s *GetDistributionConfigInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *GetDistributionConfigInput) SetId(v string) *GetDistributionConfigInput { - s.Id = &v +// SetCallerReference sets the CallerReference field's value. +func (s *InvalidationBatch) SetCallerReference(v string) *InvalidationBatch { + s.CallerReference = &v return s } -// The returned result of the corresponding request. -type GetDistributionConfigOutput struct { - _ struct{} `type:"structure" payload:"DistributionConfig"` +// SetPaths sets the Paths field's value. +func (s *InvalidationBatch) SetPaths(v *Paths) *InvalidationBatch { + s.Paths = v + return s +} - // The distribution's configuration information. - DistributionConfig *DistributionConfig `type:"structure"` +// The InvalidationList complex type describes the list of invalidation objects. +// For more information about invalidation, see Invalidating Objects (Web Distributions +// Only) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html) +// in the Amazon CloudFront Developer Guide. +type InvalidationList struct { + _ struct{} `type:"structure"` - // The current version of the configuration. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` + // A flag that indicates whether more invalidation batch requests remain to + // be listed. If your results were truncated, you can make a follow-up pagination + // request using the Marker request parameter to retrieve more invalidation + // batches in the list. + // + // IsTruncated is a required field + IsTruncated *bool `type:"boolean" required:"true"` + + // A complex type that contains one InvalidationSummary element for each invalidation + // batch created by the current AWS account. + Items []*InvalidationSummary `locationNameList:"InvalidationSummary" type:"list"` + + // The value that you provided for the Marker request parameter. + // + // Marker is a required field + Marker *string `type:"string" required:"true"` + + // The value that you provided for the MaxItems request parameter. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` + + // If IsTruncated is true, this element is present and contains the value that + // you can use for the Marker request parameter to continue listing your invalidation + // batches where they left off. + NextMarker *string `type:"string"` + + // The number of invalidation batches that were created by the current AWS account. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s GetDistributionConfigOutput) String() string { +func (s InvalidationList) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDistributionConfigOutput) GoString() string { +func (s InvalidationList) GoString() string { return s.String() } -// SetDistributionConfig sets the DistributionConfig field's value. -func (s *GetDistributionConfigOutput) SetDistributionConfig(v *DistributionConfig) *GetDistributionConfigOutput { - s.DistributionConfig = v +// SetIsTruncated sets the IsTruncated field's value. +func (s *InvalidationList) SetIsTruncated(v bool) *InvalidationList { + s.IsTruncated = &v return s } -// SetETag sets the ETag field's value. -func (s *GetDistributionConfigOutput) SetETag(v string) *GetDistributionConfigOutput { - s.ETag = &v +// SetItems sets the Items field's value. +func (s *InvalidationList) SetItems(v []*InvalidationSummary) *InvalidationList { + s.Items = v return s } -// The request to get a distribution's information. -type GetDistributionInput struct { - _ struct{} `locationName:"GetDistributionRequest" type:"structure"` +// SetMarker sets the Marker field's value. +func (s *InvalidationList) SetMarker(v string) *InvalidationList { + s.Marker = &v + return s +} - // The distribution's ID. If the ID is empty, an empty distribution configuration - // is returned. +// SetMaxItems sets the MaxItems field's value. +func (s *InvalidationList) SetMaxItems(v int64) *InvalidationList { + s.MaxItems = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *InvalidationList) SetNextMarker(v string) *InvalidationList { + s.NextMarker = &v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *InvalidationList) SetQuantity(v int64) *InvalidationList { + s.Quantity = &v + return s +} + +// A summary of an invalidation request. +type InvalidationSummary struct { + _ struct{} `type:"structure"` + + // The time that an invalidation request was created. + // + // CreateTime is a required field + CreateTime *time.Time `type:"timestamp" required:"true"` + + // The unique ID for an invalidation request. // // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + Id *string `type:"string" required:"true"` + + // The status of an invalidation request. + // + // Status is a required field + Status *string `type:"string" required:"true"` } // String returns the string representation -func (s GetDistributionInput) String() string { +func (s InvalidationSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDistributionInput) GoString() string { +func (s InvalidationSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDistributionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDistributionInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreateTime sets the CreateTime field's value. +func (s *InvalidationSummary) SetCreateTime(v time.Time) *InvalidationSummary { + s.CreateTime = &v + return s } // SetId sets the Id field's value. -func (s *GetDistributionInput) SetId(v string) *GetDistributionInput { +func (s *InvalidationSummary) SetId(v string) *InvalidationSummary { s.Id = &v return s } -// The returned result of the corresponding request. -type GetDistributionOutput struct { - _ struct{} `type:"structure" payload:"Distribution"` +// SetStatus sets the Status field's value. +func (s *InvalidationSummary) SetStatus(v string) *InvalidationSummary { + s.Status = &v + return s +} + +// A complex type that lists the active CloudFront key pairs, if any, that are +// associated with AwsAccountNumber. +// +// For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). +type KeyPairIds struct { + _ struct{} `type:"structure"` - // The distribution's information. - Distribution *Distribution `type:"structure"` + // A complex type that lists the active CloudFront key pairs, if any, that are + // associated with AwsAccountNumber. + // + // For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). + Items []*string `locationNameList:"KeyPairId" type:"list"` - // The current version of the distribution's information. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` + // The number of active CloudFront key pairs for AwsAccountNumber. + // + // For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s GetDistributionOutput) String() string { +func (s KeyPairIds) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDistributionOutput) GoString() string { +func (s KeyPairIds) GoString() string { return s.String() } -// SetDistribution sets the Distribution field's value. -func (s *GetDistributionOutput) SetDistribution(v *Distribution) *GetDistributionOutput { - s.Distribution = v +// SetItems sets the Items field's value. +func (s *KeyPairIds) SetItems(v []*string) *KeyPairIds { + s.Items = v return s } -// SetETag sets the ETag field's value. -func (s *GetDistributionOutput) SetETag(v string) *GetDistributionOutput { - s.ETag = &v +// SetQuantity sets the Quantity field's value. +func (s *KeyPairIds) SetQuantity(v int64) *KeyPairIds { + s.Quantity = &v return s } -type GetFieldLevelEncryptionConfigInput struct { - _ struct{} `locationName:"GetFieldLevelEncryptionConfigRequest" type:"structure"` +// Contains information about the Amazon Kinesis data stream where you are sending +// real-time log data. +type KinesisStreamConfig struct { + _ struct{} `type:"structure"` - // Request the ID for the field-level encryption configuration information. + // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) + // role that CloudFront can use to send real-time log data to your Kinesis data + // stream. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // For more information the IAM role, see Real-time log configuration IAM role + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html#understand-real-time-log-config-iam-role) + // in the Amazon CloudFront Developer Guide. + // + // RoleARN is a required field + RoleARN *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Kinesis data stream where you are sending + // real-time log data. + // + // StreamARN is a required field + StreamARN *string `type:"string" required:"true"` } // String returns the string representation -func (s GetFieldLevelEncryptionConfigInput) String() string { +func (s KinesisStreamConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFieldLevelEncryptionConfigInput) GoString() string { +func (s KinesisStreamConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetFieldLevelEncryptionConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionConfigInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *KinesisStreamConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KinesisStreamConfig"} + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.StreamARN == nil { + invalidParams.Add(request.NewErrParamRequired("StreamARN")) } if invalidParams.Len() > 0 { @@ -9886,72 +15167,76 @@ func (s *GetFieldLevelEncryptionConfigInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *GetFieldLevelEncryptionConfigInput) SetId(v string) *GetFieldLevelEncryptionConfigInput { - s.Id = &v +// SetRoleARN sets the RoleARN field's value. +func (s *KinesisStreamConfig) SetRoleARN(v string) *KinesisStreamConfig { + s.RoleARN = &v return s } -type GetFieldLevelEncryptionConfigOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionConfig"` - - // The current version of the field level encryption configuration. For example: - // E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // Return the field-level encryption configuration information. - FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `type:"structure"` -} - -// String returns the string representation -func (s GetFieldLevelEncryptionConfigOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetFieldLevelEncryptionConfigOutput) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *GetFieldLevelEncryptionConfigOutput) SetETag(v string) *GetFieldLevelEncryptionConfigOutput { - s.ETag = &v +// SetStreamARN sets the StreamARN field's value. +func (s *KinesisStreamConfig) SetStreamARN(v string) *KinesisStreamConfig { + s.StreamARN = &v return s } -// SetFieldLevelEncryptionConfig sets the FieldLevelEncryptionConfig field's value. -func (s *GetFieldLevelEncryptionConfigOutput) SetFieldLevelEncryptionConfig(v *FieldLevelEncryptionConfig) *GetFieldLevelEncryptionConfigOutput { - s.FieldLevelEncryptionConfig = v - return s -} +// A complex type that contains a Lambda function association. +type LambdaFunctionAssociation struct { + _ struct{} `type:"structure"` -type GetFieldLevelEncryptionInput struct { - _ struct{} `locationName:"GetFieldLevelEncryptionRequest" type:"structure"` + // Specifies the event type that triggers a Lambda function invocation. You + // can specify the following values: + // + // * viewer-request: The function executes when CloudFront receives a request + // from a viewer and before it checks to see whether the requested object + // is in the edge cache. + // + // * origin-request: The function executes only when CloudFront sends a request + // to your origin. When the requested object is in the edge cache, the function + // doesn't execute. + // + // * origin-response: The function executes after CloudFront receives a response + // from the origin and before it caches the object in the response. When + // the requested object is in the edge cache, the function doesn't execute. + // + // * viewer-response: The function executes before CloudFront returns the + // requested object to the viewer. The function executes regardless of whether + // the object was already in the edge cache. If the origin returns an HTTP + // status code other than HTTP 200 (OK), the function doesn't execute. + // + // EventType is a required field + EventType *string `type:"string" required:"true" enum:"EventType"` - // Request the ID for the field-level encryption configuration information. + // A flag that allows a Lambda function to have read access to the body content. + // For more information, see Accessing the Request Body by Choosing the Include + // Body Option (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-include-body-access.html) + // in the Amazon CloudFront Developer Guide. + IncludeBody *bool `type:"boolean"` + + // The ARN of the Lambda function. You must specify the ARN of a function version; + // you can't specify a Lambda alias or $LATEST. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // LambdaFunctionARN is a required field + LambdaFunctionARN *string `type:"string" required:"true"` } // String returns the string representation -func (s GetFieldLevelEncryptionInput) String() string { +func (s LambdaFunctionAssociation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFieldLevelEncryptionInput) GoString() string { +func (s LambdaFunctionAssociation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetFieldLevelEncryptionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *LambdaFunctionAssociation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionAssociation"} + if s.EventType == nil { + invalidParams.Add(request.NewErrParamRequired("EventType")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.LambdaFunctionARN == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionARN")) } if invalidParams.Len() > 0 { @@ -9960,231 +15245,263 @@ func (s *GetFieldLevelEncryptionInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *GetFieldLevelEncryptionInput) SetId(v string) *GetFieldLevelEncryptionInput { - s.Id = &v +// SetEventType sets the EventType field's value. +func (s *LambdaFunctionAssociation) SetEventType(v string) *LambdaFunctionAssociation { + s.EventType = &v return s } -type GetFieldLevelEncryptionOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryption"` +// SetIncludeBody sets the IncludeBody field's value. +func (s *LambdaFunctionAssociation) SetIncludeBody(v bool) *LambdaFunctionAssociation { + s.IncludeBody = &v + return s +} - // The current version of the field level encryption configuration. For example: - // E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetLambdaFunctionARN sets the LambdaFunctionARN field's value. +func (s *LambdaFunctionAssociation) SetLambdaFunctionARN(v string) *LambdaFunctionAssociation { + s.LambdaFunctionARN = &v + return s +} - // Return the field-level encryption configuration information. - FieldLevelEncryption *FieldLevelEncryption `type:"structure"` +// A complex type that specifies a list of Lambda functions associations for +// a cache behavior. +// +// If you want to invoke one or more Lambda functions triggered by requests +// that match the PathPattern of the cache behavior, specify the applicable +// values for Quantity and Items. Note that there can be up to 4 LambdaFunctionAssociation +// items in this list (one for each possible value of EventType) and each EventType +// can be associated with the Lambda function only once. +// +// If you don't want to invoke any Lambda functions for the requests that match +// PathPattern, specify 0 for Quantity and omit Items. +type LambdaFunctionAssociations struct { + _ struct{} `type:"structure"` + + // Optional: A complex type that contains LambdaFunctionAssociation items for + // this cache behavior. If Quantity is 0, you can omit Items. + Items []*LambdaFunctionAssociation `locationNameList:"LambdaFunctionAssociation" type:"list"` + + // The number of Lambda function associations for this cache behavior. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s GetFieldLevelEncryptionOutput) String() string { +func (s LambdaFunctionAssociations) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFieldLevelEncryptionOutput) GoString() string { +func (s LambdaFunctionAssociations) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *GetFieldLevelEncryptionOutput) SetETag(v string) *GetFieldLevelEncryptionOutput { - s.ETag = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionAssociations) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionAssociations"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *LambdaFunctionAssociations) SetItems(v []*LambdaFunctionAssociation) *LambdaFunctionAssociations { + s.Items = v return s } -// SetFieldLevelEncryption sets the FieldLevelEncryption field's value. -func (s *GetFieldLevelEncryptionOutput) SetFieldLevelEncryption(v *FieldLevelEncryption) *GetFieldLevelEncryptionOutput { - s.FieldLevelEncryption = v +// SetQuantity sets the Quantity field's value. +func (s *LambdaFunctionAssociations) SetQuantity(v int64) *LambdaFunctionAssociations { + s.Quantity = &v return s } -type GetFieldLevelEncryptionProfileConfigInput struct { - _ struct{} `locationName:"GetFieldLevelEncryptionProfileConfigRequest" type:"structure"` +type ListCachePoliciesInput struct { + _ struct{} `locationName:"ListCachePoliciesRequest" type:"structure"` - // Get the ID for the field-level encryption profile configuration information. + // Use this field when paginating results to indicate where to begin in your + // list of cache policies. The response includes cache policies in the list + // that occur after the marker. To get the next page of the list, set this field’s + // value to the value of NextMarker from the current page’s response. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of cache policies that you want in the response. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + + // A filter to return only the specified kinds of cache policies. Valid values + // are: // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // * managed – Returns only the managed policies created by AWS. + // + // * custom – Returns only the custom policies created in your AWS account. + Type *string `location:"querystring" locationName:"Type" type:"string" enum:"CachePolicyType"` } // String returns the string representation -func (s GetFieldLevelEncryptionProfileConfigInput) String() string { +func (s ListCachePoliciesInput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s GetFieldLevelEncryptionProfileConfigInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFieldLevelEncryptionProfileConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionProfileConfigInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } +// GoString returns the string representation +func (s ListCachePoliciesInput) GoString() string { + return s.String() +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMarker sets the Marker field's value. +func (s *ListCachePoliciesInput) SetMarker(v string) *ListCachePoliciesInput { + s.Marker = &v + return s } -// SetId sets the Id field's value. -func (s *GetFieldLevelEncryptionProfileConfigInput) SetId(v string) *GetFieldLevelEncryptionProfileConfigInput { - s.Id = &v +// SetMaxItems sets the MaxItems field's value. +func (s *ListCachePoliciesInput) SetMaxItems(v int64) *ListCachePoliciesInput { + s.MaxItems = &v return s } -type GetFieldLevelEncryptionProfileConfigOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfileConfig"` +// SetType sets the Type field's value. +func (s *ListCachePoliciesInput) SetType(v string) *ListCachePoliciesInput { + s.Type = &v + return s +} - // The current version of the field-level encryption profile configuration result. - // For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +type ListCachePoliciesOutput struct { + _ struct{} `type:"structure" payload:"CachePolicyList"` - // Return the field-level encryption profile configuration information. - FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `type:"structure"` + // A list of cache policies. + CachePolicyList *CachePolicyList `type:"structure"` } // String returns the string representation -func (s GetFieldLevelEncryptionProfileConfigOutput) String() string { +func (s ListCachePoliciesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFieldLevelEncryptionProfileConfigOutput) GoString() string { +func (s ListCachePoliciesOutput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *GetFieldLevelEncryptionProfileConfigOutput) SetETag(v string) *GetFieldLevelEncryptionProfileConfigOutput { - s.ETag = &v +// SetCachePolicyList sets the CachePolicyList field's value. +func (s *ListCachePoliciesOutput) SetCachePolicyList(v *CachePolicyList) *ListCachePoliciesOutput { + s.CachePolicyList = v return s } -// SetFieldLevelEncryptionProfileConfig sets the FieldLevelEncryptionProfileConfig field's value. -func (s *GetFieldLevelEncryptionProfileConfigOutput) SetFieldLevelEncryptionProfileConfig(v *FieldLevelEncryptionProfileConfig) *GetFieldLevelEncryptionProfileConfigOutput { - s.FieldLevelEncryptionProfileConfig = v - return s -} +// The request to list origin access identities. +type ListCloudFrontOriginAccessIdentitiesInput struct { + _ struct{} `locationName:"ListCloudFrontOriginAccessIdentitiesRequest" type:"structure"` -type GetFieldLevelEncryptionProfileInput struct { - _ struct{} `locationName:"GetFieldLevelEncryptionProfileRequest" type:"structure"` + // Use this when paginating results to indicate where to begin in your list + // of origin access identities. The results include identities in the list that + // occur after the marker. To get the next page of results, set the Marker to + // the value of the NextMarker from the current page's response (which is also + // the ID of the last identity on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // Get the ID for the field-level encryption profile information. - // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // The maximum number of origin access identities you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s GetFieldLevelEncryptionProfileInput) String() string { +func (s ListCloudFrontOriginAccessIdentitiesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFieldLevelEncryptionProfileInput) GoString() string { +func (s ListCloudFrontOriginAccessIdentitiesInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetFieldLevelEncryptionProfileInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetFieldLevelEncryptionProfileInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMarker sets the Marker field's value. +func (s *ListCloudFrontOriginAccessIdentitiesInput) SetMarker(v string) *ListCloudFrontOriginAccessIdentitiesInput { + s.Marker = &v + return s } -// SetId sets the Id field's value. -func (s *GetFieldLevelEncryptionProfileInput) SetId(v string) *GetFieldLevelEncryptionProfileInput { - s.Id = &v +// SetMaxItems sets the MaxItems field's value. +func (s *ListCloudFrontOriginAccessIdentitiesInput) SetMaxItems(v int64) *ListCloudFrontOriginAccessIdentitiesInput { + s.MaxItems = &v return s } -type GetFieldLevelEncryptionProfileOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfile"` - - // The current version of the field level encryption profile. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// The returned result of the corresponding request. +type ListCloudFrontOriginAccessIdentitiesOutput struct { + _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityList"` - // Return the field-level encryption profile information. - FieldLevelEncryptionProfile *FieldLevelEncryptionProfile `type:"structure"` + // The CloudFrontOriginAccessIdentityList type. + CloudFrontOriginAccessIdentityList *OriginAccessIdentityList `type:"structure"` } // String returns the string representation -func (s GetFieldLevelEncryptionProfileOutput) String() string { +func (s ListCloudFrontOriginAccessIdentitiesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetFieldLevelEncryptionProfileOutput) GoString() string { +func (s ListCloudFrontOriginAccessIdentitiesOutput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *GetFieldLevelEncryptionProfileOutput) SetETag(v string) *GetFieldLevelEncryptionProfileOutput { - s.ETag = &v - return s -} - -// SetFieldLevelEncryptionProfile sets the FieldLevelEncryptionProfile field's value. -func (s *GetFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile(v *FieldLevelEncryptionProfile) *GetFieldLevelEncryptionProfileOutput { - s.FieldLevelEncryptionProfile = v +// SetCloudFrontOriginAccessIdentityList sets the CloudFrontOriginAccessIdentityList field's value. +func (s *ListCloudFrontOriginAccessIdentitiesOutput) SetCloudFrontOriginAccessIdentityList(v *OriginAccessIdentityList) *ListCloudFrontOriginAccessIdentitiesOutput { + s.CloudFrontOriginAccessIdentityList = v return s } -// The request to get an invalidation's information. -type GetInvalidationInput struct { - _ struct{} `locationName:"GetInvalidationRequest" type:"structure"` +type ListDistributionsByCachePolicyIdInput struct { + _ struct{} `locationName:"ListDistributionsByCachePolicyIdRequest" type:"structure"` - // The distribution's ID. + // The ID of the cache policy whose associated distribution IDs you want to + // list. // - // DistributionId is a required field - DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` + // CachePolicyId is a required field + CachePolicyId *string `location:"uri" locationName:"CachePolicyId" type:"string" required:"true"` - // The identifier for the invalidation request, for example, IDFDVBD632BHDS5. - // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // Use this field when paginating results to indicate where to begin in your + // list of distribution IDs. The response includes distribution IDs in the list + // that occur after the marker. To get the next page of the list, set this field’s + // value to the value of NextMarker from the current page’s response. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distribution IDs that you want in the response. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s GetInvalidationInput) String() string { +func (s ListDistributionsByCachePolicyIdInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetInvalidationInput) GoString() string { +func (s ListDistributionsByCachePolicyIdInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetInvalidationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetInvalidationInput"} - if s.DistributionId == nil { - invalidParams.Add(request.NewErrParamRequired("DistributionId")) - } - if s.DistributionId != nil && len(*s.DistributionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) - } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *ListDistributionsByCachePolicyIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDistributionsByCachePolicyIdInput"} + if s.CachePolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("CachePolicyId")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.CachePolicyId != nil && len(*s.CachePolicyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CachePolicyId", 1)) } if invalidParams.Len() > 0 { @@ -10193,70 +15510,84 @@ func (s *GetInvalidationInput) Validate() error { return nil } -// SetDistributionId sets the DistributionId field's value. -func (s *GetInvalidationInput) SetDistributionId(v string) *GetInvalidationInput { - s.DistributionId = &v +// SetCachePolicyId sets the CachePolicyId field's value. +func (s *ListDistributionsByCachePolicyIdInput) SetCachePolicyId(v string) *ListDistributionsByCachePolicyIdInput { + s.CachePolicyId = &v return s } -// SetId sets the Id field's value. -func (s *GetInvalidationInput) SetId(v string) *GetInvalidationInput { - s.Id = &v +// SetMarker sets the Marker field's value. +func (s *ListDistributionsByCachePolicyIdInput) SetMarker(v string) *ListDistributionsByCachePolicyIdInput { + s.Marker = &v return s } -// The returned result of the corresponding request. -type GetInvalidationOutput struct { - _ struct{} `type:"structure" payload:"Invalidation"` +// SetMaxItems sets the MaxItems field's value. +func (s *ListDistributionsByCachePolicyIdInput) SetMaxItems(v int64) *ListDistributionsByCachePolicyIdInput { + s.MaxItems = &v + return s +} - // The invalidation's information. For more information, see Invalidation Complex - // Type (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/InvalidationDatatype.html). - Invalidation *Invalidation `type:"structure"` +type ListDistributionsByCachePolicyIdOutput struct { + _ struct{} `type:"structure" payload:"DistributionIdList"` + + // A list of distribution IDs. + DistributionIdList *DistributionIdList `type:"structure"` } // String returns the string representation -func (s GetInvalidationOutput) String() string { +func (s ListDistributionsByCachePolicyIdOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetInvalidationOutput) GoString() string { +func (s ListDistributionsByCachePolicyIdOutput) GoString() string { return s.String() } -// SetInvalidation sets the Invalidation field's value. -func (s *GetInvalidationOutput) SetInvalidation(v *Invalidation) *GetInvalidationOutput { - s.Invalidation = v +// SetDistributionIdList sets the DistributionIdList field's value. +func (s *ListDistributionsByCachePolicyIdOutput) SetDistributionIdList(v *DistributionIdList) *ListDistributionsByCachePolicyIdOutput { + s.DistributionIdList = v return s } -type GetPublicKeyConfigInput struct { - _ struct{} `locationName:"GetPublicKeyConfigRequest" type:"structure"` +type ListDistributionsByOriginRequestPolicyIdInput struct { + _ struct{} `locationName:"ListDistributionsByOriginRequestPolicyIdRequest" type:"structure"` - // Request the ID for the public key configuration. + // Use this field when paginating results to indicate where to begin in your + // list of distribution IDs. The response includes distribution IDs in the list + // that occur after the marker. To get the next page of the list, set this field’s + // value to the value of NextMarker from the current page’s response. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distribution IDs that you want in the response. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + + // The ID of the origin request policy whose associated distribution IDs you + // want to list. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // OriginRequestPolicyId is a required field + OriginRequestPolicyId *string `location:"uri" locationName:"OriginRequestPolicyId" type:"string" required:"true"` } // String returns the string representation -func (s GetPublicKeyConfigInput) String() string { +func (s ListDistributionsByOriginRequestPolicyIdInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPublicKeyConfigInput) GoString() string { +func (s ListDistributionsByOriginRequestPolicyIdInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicKeyConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicKeyConfigInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *ListDistributionsByOriginRequestPolicyIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDistributionsByOriginRequestPolicyIdInput"} + if s.OriginRequestPolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("OriginRequestPolicyId")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.OriginRequestPolicyId != nil && len(*s.OriginRequestPolicyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OriginRequestPolicyId", 1)) } if invalidParams.Len() > 0 { @@ -10265,145 +15596,167 @@ func (s *GetPublicKeyConfigInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *GetPublicKeyConfigInput) SetId(v string) *GetPublicKeyConfigInput { - s.Id = &v +// SetMarker sets the Marker field's value. +func (s *ListDistributionsByOriginRequestPolicyIdInput) SetMarker(v string) *ListDistributionsByOriginRequestPolicyIdInput { + s.Marker = &v return s } -type GetPublicKeyConfigOutput struct { - _ struct{} `type:"structure" payload:"PublicKeyConfig"` +// SetMaxItems sets the MaxItems field's value. +func (s *ListDistributionsByOriginRequestPolicyIdInput) SetMaxItems(v int64) *ListDistributionsByOriginRequestPolicyIdInput { + s.MaxItems = &v + return s +} - // The current version of the public key configuration. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetOriginRequestPolicyId sets the OriginRequestPolicyId field's value. +func (s *ListDistributionsByOriginRequestPolicyIdInput) SetOriginRequestPolicyId(v string) *ListDistributionsByOriginRequestPolicyIdInput { + s.OriginRequestPolicyId = &v + return s +} - // Return the result for the public key configuration. - PublicKeyConfig *PublicKeyConfig `type:"structure"` +type ListDistributionsByOriginRequestPolicyIdOutput struct { + _ struct{} `type:"structure" payload:"DistributionIdList"` + + // A list of distribution IDs. + DistributionIdList *DistributionIdList `type:"structure"` } // String returns the string representation -func (s GetPublicKeyConfigOutput) String() string { +func (s ListDistributionsByOriginRequestPolicyIdOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPublicKeyConfigOutput) GoString() string { +func (s ListDistributionsByOriginRequestPolicyIdOutput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *GetPublicKeyConfigOutput) SetETag(v string) *GetPublicKeyConfigOutput { - s.ETag = &v +// SetDistributionIdList sets the DistributionIdList field's value. +func (s *ListDistributionsByOriginRequestPolicyIdOutput) SetDistributionIdList(v *DistributionIdList) *ListDistributionsByOriginRequestPolicyIdOutput { + s.DistributionIdList = v return s } -// SetPublicKeyConfig sets the PublicKeyConfig field's value. -func (s *GetPublicKeyConfigOutput) SetPublicKeyConfig(v *PublicKeyConfig) *GetPublicKeyConfigOutput { - s.PublicKeyConfig = v - return s -} +type ListDistributionsByRealtimeLogConfigInput struct { + _ struct{} `locationName:"ListDistributionsByRealtimeLogConfigRequest" type:"structure" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` -type GetPublicKeyInput struct { - _ struct{} `locationName:"GetPublicKeyRequest" type:"structure"` + // Use this field when paginating results to indicate where to begin in your + // list of distributions. The response includes distributions in the list that + // occur after the marker. To get the next page of the list, set this field’s + // value to the value of NextMarker from the current page’s response. + Marker *string `type:"string"` - // Request the ID for the public key. - // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // The maximum number of distributions that you want in the response. + MaxItems *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the real-time log configuration whose associated + // distributions you want to list. + RealtimeLogConfigArn *string `type:"string"` + + // The name of the real-time log configuration whose associated distributions + // you want to list. + RealtimeLogConfigName *string `type:"string"` } // String returns the string representation -func (s GetPublicKeyInput) String() string { +func (s ListDistributionsByRealtimeLogConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPublicKeyInput) GoString() string { +func (s ListDistributionsByRealtimeLogConfigInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicKeyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicKeyInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } +// SetMarker sets the Marker field's value. +func (s *ListDistributionsByRealtimeLogConfigInput) SetMarker(v string) *ListDistributionsByRealtimeLogConfigInput { + s.Marker = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMaxItems sets the MaxItems field's value. +func (s *ListDistributionsByRealtimeLogConfigInput) SetMaxItems(v int64) *ListDistributionsByRealtimeLogConfigInput { + s.MaxItems = &v + return s } -// SetId sets the Id field's value. -func (s *GetPublicKeyInput) SetId(v string) *GetPublicKeyInput { - s.Id = &v +// SetRealtimeLogConfigArn sets the RealtimeLogConfigArn field's value. +func (s *ListDistributionsByRealtimeLogConfigInput) SetRealtimeLogConfigArn(v string) *ListDistributionsByRealtimeLogConfigInput { + s.RealtimeLogConfigArn = &v return s } -type GetPublicKeyOutput struct { - _ struct{} `type:"structure" payload:"PublicKey"` +// SetRealtimeLogConfigName sets the RealtimeLogConfigName field's value. +func (s *ListDistributionsByRealtimeLogConfigInput) SetRealtimeLogConfigName(v string) *ListDistributionsByRealtimeLogConfigInput { + s.RealtimeLogConfigName = &v + return s +} - // The current version of the public key. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +type ListDistributionsByRealtimeLogConfigOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` - // Return the public key. - PublicKey *PublicKey `type:"structure"` + // A distribution list. + DistributionList *DistributionList `type:"structure"` } // String returns the string representation -func (s GetPublicKeyOutput) String() string { +func (s ListDistributionsByRealtimeLogConfigOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPublicKeyOutput) GoString() string { - return s.String() -} - -// SetETag sets the ETag field's value. -func (s *GetPublicKeyOutput) SetETag(v string) *GetPublicKeyOutput { - s.ETag = &v - return s +func (s ListDistributionsByRealtimeLogConfigOutput) GoString() string { + return s.String() } -// SetPublicKey sets the PublicKey field's value. -func (s *GetPublicKeyOutput) SetPublicKey(v *PublicKey) *GetPublicKeyOutput { - s.PublicKey = v +// SetDistributionList sets the DistributionList field's value. +func (s *ListDistributionsByRealtimeLogConfigOutput) SetDistributionList(v *DistributionList) *ListDistributionsByRealtimeLogConfigOutput { + s.DistributionList = v return s } -// To request to get a streaming distribution configuration. -type GetStreamingDistributionConfigInput struct { - _ struct{} `locationName:"GetStreamingDistributionConfigRequest" type:"structure"` +// The request to list distributions that are associated with a specified AWS +// WAF web ACL. +type ListDistributionsByWebACLIdInput struct { + _ struct{} `locationName:"ListDistributionsByWebACLIdRequest" type:"structure"` - // The streaming distribution's ID. + // Use Marker and MaxItems to control pagination of results. If you have more + // than MaxItems distributions that satisfy the request, the response includes + // a NextMarker element. To get the next page of results, submit another request. + // For the value of Marker, specify the value of NextMarker from the last response. + // (For the first request, omit Marker.) + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of distributions that you want CloudFront to return in + // the response body. The maximum and default values are both 100. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + + // The ID of the AWS WAF web ACL that you want to list the associated distributions. + // If you specify "null" for the ID, the request returns a list of the distributions + // that aren't associated with a web ACL. // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // WebACLId is a required field + WebACLId *string `location:"uri" locationName:"WebACLId" type:"string" required:"true"` } // String returns the string representation -func (s GetStreamingDistributionConfigInput) String() string { +func (s ListDistributionsByWebACLIdInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetStreamingDistributionConfigInput) GoString() string { +func (s ListDistributionsByWebACLIdInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetStreamingDistributionConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionConfigInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) +func (s *ListDistributionsByWebACLIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDistributionsByWebACLIdInput"} + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) } if invalidParams.Len() > 0 { @@ -10412,320 +15765,273 @@ func (s *GetStreamingDistributionConfigInput) Validate() error { return nil } -// SetId sets the Id field's value. -func (s *GetStreamingDistributionConfigInput) SetId(v string) *GetStreamingDistributionConfigInput { - s.Id = &v +// SetMarker sets the Marker field's value. +func (s *ListDistributionsByWebACLIdInput) SetMarker(v string) *ListDistributionsByWebACLIdInput { + s.Marker = &v return s } -// The returned result of the corresponding request. -type GetStreamingDistributionConfigOutput struct { - _ struct{} `type:"structure" payload:"StreamingDistributionConfig"` +// SetMaxItems sets the MaxItems field's value. +func (s *ListDistributionsByWebACLIdInput) SetMaxItems(v int64) *ListDistributionsByWebACLIdInput { + s.MaxItems = &v + return s +} - // The current version of the configuration. For example: E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +// SetWebACLId sets the WebACLId field's value. +func (s *ListDistributionsByWebACLIdInput) SetWebACLId(v string) *ListDistributionsByWebACLIdInput { + s.WebACLId = &v + return s +} - // The streaming distribution's configuration information. - StreamingDistributionConfig *StreamingDistributionConfig `type:"structure"` +// The response to a request to list the distributions that are associated with +// a specified AWS WAF web ACL. +type ListDistributionsByWebACLIdOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` + + // The DistributionList type. + DistributionList *DistributionList `type:"structure"` } // String returns the string representation -func (s GetStreamingDistributionConfigOutput) String() string { +func (s ListDistributionsByWebACLIdOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetStreamingDistributionConfigOutput) GoString() string { +func (s ListDistributionsByWebACLIdOutput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *GetStreamingDistributionConfigOutput) SetETag(v string) *GetStreamingDistributionConfigOutput { - s.ETag = &v +// SetDistributionList sets the DistributionList field's value. +func (s *ListDistributionsByWebACLIdOutput) SetDistributionList(v *DistributionList) *ListDistributionsByWebACLIdOutput { + s.DistributionList = v return s } -// SetStreamingDistributionConfig sets the StreamingDistributionConfig field's value. -func (s *GetStreamingDistributionConfigOutput) SetStreamingDistributionConfig(v *StreamingDistributionConfig) *GetStreamingDistributionConfigOutput { - s.StreamingDistributionConfig = v - return s -} +// The request to list your distributions. +type ListDistributionsInput struct { + _ struct{} `locationName:"ListDistributionsRequest" type:"structure"` -// The request to get a streaming distribution's information. -type GetStreamingDistributionInput struct { - _ struct{} `locationName:"GetStreamingDistributionRequest" type:"structure"` + // Use this when paginating results to indicate where to begin in your list + // of distributions. The results include distributions in the list that occur + // after the marker. To get the next page of results, set the Marker to the + // value of the NextMarker from the current page's response (which is also the + // ID of the last distribution on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // The streaming distribution's ID. - // - // Id is a required field - Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + // The maximum number of distributions you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s GetStreamingDistributionInput) String() string { +func (s ListDistributionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetStreamingDistributionInput) GoString() string { +func (s ListDistributionsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetStreamingDistributionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetStreamingDistributionInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } - if s.Id != nil && len(*s.Id) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Id", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMarker sets the Marker field's value. +func (s *ListDistributionsInput) SetMarker(v string) *ListDistributionsInput { + s.Marker = &v + return s } -// SetId sets the Id field's value. -func (s *GetStreamingDistributionInput) SetId(v string) *GetStreamingDistributionInput { - s.Id = &v +// SetMaxItems sets the MaxItems field's value. +func (s *ListDistributionsInput) SetMaxItems(v int64) *ListDistributionsInput { + s.MaxItems = &v return s } // The returned result of the corresponding request. -type GetStreamingDistributionOutput struct { - _ struct{} `type:"structure" payload:"StreamingDistribution"` - - // The current version of the streaming distribution's information. For example: - // E2QWRUHAPOMQZL. - ETag *string `location:"header" locationName:"ETag" type:"string"` +type ListDistributionsOutput struct { + _ struct{} `type:"structure" payload:"DistributionList"` - // The streaming distribution's information. - StreamingDistribution *StreamingDistribution `type:"structure"` + // The DistributionList type. + DistributionList *DistributionList `type:"structure"` } // String returns the string representation -func (s GetStreamingDistributionOutput) String() string { +func (s ListDistributionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetStreamingDistributionOutput) GoString() string { +func (s ListDistributionsOutput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *GetStreamingDistributionOutput) SetETag(v string) *GetStreamingDistributionOutput { - s.ETag = &v - return s -} - -// SetStreamingDistribution sets the StreamingDistribution field's value. -func (s *GetStreamingDistributionOutput) SetStreamingDistribution(v *StreamingDistribution) *GetStreamingDistributionOutput { - s.StreamingDistribution = v +// SetDistributionList sets the DistributionList field's value. +func (s *ListDistributionsOutput) SetDistributionList(v *DistributionList) *ListDistributionsOutput { + s.DistributionList = v return s } -// A complex type that specifies the request headers, if any, that you want -// CloudFront to base caching on for this cache behavior. -// -// For the headers that you specify, CloudFront caches separate versions of -// a specified object based on the header values in viewer requests. For example, -// suppose viewer requests for logo.jpg contain a custom product header that -// has a value of either acme or apex, and you configure CloudFront to cache -// your content based on values in the product header. CloudFront forwards the -// product header to the origin and caches the response from the origin once -// for each header value. For more information about caching based on header -// values, see How CloudFront Forwards and Caches Headers (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/header-caching.html) -// in the Amazon CloudFront Developer Guide. -type Headers struct { - _ struct{} `type:"structure"` +type ListFieldLevelEncryptionConfigsInput struct { + _ struct{} `locationName:"ListFieldLevelEncryptionConfigsRequest" type:"structure"` - // A list that contains one Name element for each header that you want CloudFront - // to use for caching in this cache behavior. If Quantity is 0, omit Items. - Items []*string `locationNameList:"Name" type:"list"` + // Use this when paginating results to indicate where to begin in your list + // of configurations. The results include configurations in the list that occur + // after the marker. To get the next page of results, set the Marker to the + // value of the NextMarker from the current page's response (which is also the + // ID of the last configuration on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // The number of different headers that you want CloudFront to base caching - // on for this cache behavior. You can configure each cache behavior in a web - // distribution to do one of the following: - // - // * Forward all headers to your origin: Specify 1 for Quantity and * for - // Name. CloudFront doesn't cache the objects that are associated with this - // cache behavior. Instead, CloudFront sends every request to the origin. - // - // * Forward a whitelist of headers you specify: Specify the number of headers - // that you want CloudFront to base caching on. Then specify the header names - // in Name elements. CloudFront caches your objects based on the values in - // the specified headers. - // - // * Forward only the default headers: Specify 0 for Quantity and omit Items. - // In this configuration, CloudFront doesn't cache based on the values in - // the request headers. - // - // Regardless of which option you choose, CloudFront forwards headers to your - // origin based on whether the origin is an S3 bucket or a custom origin. See - // the following documentation: - // - // * S3 bucket: See HTTP Request Headers That CloudFront Removes or Updates - // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorS3Origin.html#request-s3-removed-headers) - // - // * Custom origin: See HTTP Request Headers and CloudFront Behavior (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/RequestAndResponseBehaviorCustomOrigin.html#request-custom-headers-behavior) - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // The maximum number of field-level encryption configurations you want in the + // response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s Headers) String() string { +func (s ListFieldLevelEncryptionConfigsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Headers) GoString() string { +func (s ListFieldLevelEncryptionConfigsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Headers) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Headers"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMarker sets the Marker field's value. +func (s *ListFieldLevelEncryptionConfigsInput) SetMarker(v string) *ListFieldLevelEncryptionConfigsInput { + s.Marker = &v + return s } -// SetItems sets the Items field's value. -func (s *Headers) SetItems(v []*string) *Headers { - s.Items = v +// SetMaxItems sets the MaxItems field's value. +func (s *ListFieldLevelEncryptionConfigsInput) SetMaxItems(v int64) *ListFieldLevelEncryptionConfigsInput { + s.MaxItems = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *Headers) SetQuantity(v int64) *Headers { - s.Quantity = &v - return s +type ListFieldLevelEncryptionConfigsOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryptionList"` + + // Returns a list of all field-level encryption configurations that have been + // created in CloudFront for this account. + FieldLevelEncryptionList *FieldLevelEncryptionList `type:"structure"` } -// An invalidation. -type Invalidation struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s ListFieldLevelEncryptionConfigsOutput) String() string { + return awsutil.Prettify(s) +} - // The date and time the invalidation request was first made. - // - // CreateTime is a required field - CreateTime *time.Time `type:"timestamp" required:"true"` +// GoString returns the string representation +func (s ListFieldLevelEncryptionConfigsOutput) GoString() string { + return s.String() +} - // The identifier for the invalidation request. For example: IDFDVBD632BHDS5. - // - // Id is a required field - Id *string `type:"string" required:"true"` +// SetFieldLevelEncryptionList sets the FieldLevelEncryptionList field's value. +func (s *ListFieldLevelEncryptionConfigsOutput) SetFieldLevelEncryptionList(v *FieldLevelEncryptionList) *ListFieldLevelEncryptionConfigsOutput { + s.FieldLevelEncryptionList = v + return s +} - // The current invalidation information for the batch request. - // - // InvalidationBatch is a required field - InvalidationBatch *InvalidationBatch `type:"structure" required:"true"` +type ListFieldLevelEncryptionProfilesInput struct { + _ struct{} `locationName:"ListFieldLevelEncryptionProfilesRequest" type:"structure"` - // The status of the invalidation request. When the invalidation batch is finished, - // the status is Completed. - // - // Status is a required field - Status *string `type:"string" required:"true"` + // Use this when paginating results to indicate where to begin in your list + // of profiles. The results include profiles in the list that occur after the + // marker. To get the next page of results, set the Marker to the value of the + // NextMarker from the current page's response (which is also the ID of the + // last profile on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of field-level encryption profiles you want in the response + // body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s Invalidation) String() string { +func (s ListFieldLevelEncryptionProfilesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Invalidation) GoString() string { +func (s ListFieldLevelEncryptionProfilesInput) GoString() string { return s.String() } -// SetCreateTime sets the CreateTime field's value. -func (s *Invalidation) SetCreateTime(v time.Time) *Invalidation { - s.CreateTime = &v +// SetMarker sets the Marker field's value. +func (s *ListFieldLevelEncryptionProfilesInput) SetMarker(v string) *ListFieldLevelEncryptionProfilesInput { + s.Marker = &v return s } -// SetId sets the Id field's value. -func (s *Invalidation) SetId(v string) *Invalidation { - s.Id = &v +// SetMaxItems sets the MaxItems field's value. +func (s *ListFieldLevelEncryptionProfilesInput) SetMaxItems(v int64) *ListFieldLevelEncryptionProfilesInput { + s.MaxItems = &v return s } -// SetInvalidationBatch sets the InvalidationBatch field's value. -func (s *Invalidation) SetInvalidationBatch(v *InvalidationBatch) *Invalidation { - s.InvalidationBatch = v - return s +type ListFieldLevelEncryptionProfilesOutput struct { + _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfileList"` + + // Returns a list of the field-level encryption profiles that have been created + // in CloudFront for this account. + FieldLevelEncryptionProfileList *FieldLevelEncryptionProfileList `type:"structure"` } -// SetStatus sets the Status field's value. -func (s *Invalidation) SetStatus(v string) *Invalidation { - s.Status = &v +// String returns the string representation +func (s ListFieldLevelEncryptionProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFieldLevelEncryptionProfilesOutput) GoString() string { + return s.String() +} + +// SetFieldLevelEncryptionProfileList sets the FieldLevelEncryptionProfileList field's value. +func (s *ListFieldLevelEncryptionProfilesOutput) SetFieldLevelEncryptionProfileList(v *FieldLevelEncryptionProfileList) *ListFieldLevelEncryptionProfilesOutput { + s.FieldLevelEncryptionProfileList = v return s } -// An invalidation batch. -type InvalidationBatch struct { - _ struct{} `type:"structure"` +// The request to list invalidations. +type ListInvalidationsInput struct { + _ struct{} `locationName:"ListInvalidationsRequest" type:"structure"` - // A value that you specify to uniquely identify an invalidation request. CloudFront - // uses the value to prevent you from accidentally resubmitting an identical - // request. Whenever you create a new invalidation request, you must specify - // a new value for CallerReference and change other values in the request as - // applicable. One way to ensure that the value of CallerReference is unique - // is to use a timestamp, for example, 20120301090000. - // - // If you make a second invalidation request with the same value for CallerReference, - // and if the rest of the request is the same, CloudFront doesn't create a new - // invalidation request. Instead, CloudFront returns information about the invalidation - // request that you previously created with the same CallerReference. - // - // If CallerReference is a value you already sent in a previous invalidation - // batch request but the content of any Path is different from the original - // request, CloudFront returns an InvalidationBatchAlreadyExists error. + // The distribution's ID. // - // CallerReference is a required field - CallerReference *string `type:"string" required:"true"` + // DistributionId is a required field + DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` - // A complex type that contains information about the objects that you want - // to invalidate. For more information, see Specifying the Objects to Invalidate - // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html#invalidation-specifying-objects) - // in the Amazon CloudFront Developer Guide. - // - // Paths is a required field - Paths *Paths `type:"structure" required:"true"` + // Use this parameter when paginating results to indicate where to begin in + // your list of invalidation batches. Because the results are returned in decreasing + // order from most recent to oldest, the most recent results are on the first + // page, the second page will contain earlier results, and so on. To get the + // next page of results, set Marker to the value of the NextMarker from the + // current page's response. This value is the same as the ID of the last invalidation + // batch on that page. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of invalidation batches that you want in the response + // body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s InvalidationBatch) String() string { +func (s ListInvalidationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidationBatch) GoString() string { +func (s ListInvalidationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InvalidationBatch) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InvalidationBatch"} - if s.CallerReference == nil { - invalidParams.Add(request.NewErrParamRequired("CallerReference")) - } - if s.Paths == nil { - invalidParams.Add(request.NewErrParamRequired("Paths")) +func (s *ListInvalidationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInvalidationsInput"} + if s.DistributionId == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionId")) } - if s.Paths != nil { - if err := s.Paths.Validate(); err != nil { - invalidParams.AddNested("Paths", err.(request.ErrInvalidParams)) - } + if s.DistributionId != nil && len(*s.DistributionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) } if invalidParams.Len() > 0 { @@ -10734,451 +16040,424 @@ func (s *InvalidationBatch) Validate() error { return nil } -// SetCallerReference sets the CallerReference field's value. -func (s *InvalidationBatch) SetCallerReference(v string) *InvalidationBatch { - s.CallerReference = &v +// SetDistributionId sets the DistributionId field's value. +func (s *ListInvalidationsInput) SetDistributionId(v string) *ListInvalidationsInput { + s.DistributionId = &v return s } -// SetPaths sets the Paths field's value. -func (s *InvalidationBatch) SetPaths(v *Paths) *InvalidationBatch { - s.Paths = v +// SetMarker sets the Marker field's value. +func (s *ListInvalidationsInput) SetMarker(v string) *ListInvalidationsInput { + s.Marker = &v return s } -// The InvalidationList complex type describes the list of invalidation objects. -// For more information about invalidation, see Invalidating Objects (Web Distributions -// Only) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html) -// in the Amazon CloudFront Developer Guide. -type InvalidationList struct { - _ struct{} `type:"structure"` - - // A flag that indicates whether more invalidation batch requests remain to - // be listed. If your results were truncated, you can make a follow-up pagination - // request using the Marker request parameter to retrieve more invalidation - // batches in the list. - // - // IsTruncated is a required field - IsTruncated *bool `type:"boolean" required:"true"` - - // A complex type that contains one InvalidationSummary element for each invalidation - // batch created by the current AWS account. - Items []*InvalidationSummary `locationNameList:"InvalidationSummary" type:"list"` - - // The value that you provided for the Marker request parameter. - // - // Marker is a required field - Marker *string `type:"string" required:"true"` - - // The value that you provided for the MaxItems request parameter. - // - // MaxItems is a required field - MaxItems *int64 `type:"integer" required:"true"` +// SetMaxItems sets the MaxItems field's value. +func (s *ListInvalidationsInput) SetMaxItems(v int64) *ListInvalidationsInput { + s.MaxItems = &v + return s +} - // If IsTruncated is true, this element is present and contains the value that - // you can use for the Marker request parameter to continue listing your invalidation - // batches where they left off. - NextMarker *string `type:"string"` +// The returned result of the corresponding request. +type ListInvalidationsOutput struct { + _ struct{} `type:"structure" payload:"InvalidationList"` - // The number of invalidation batches that were created by the current AWS account. - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // Information about invalidation batches. + InvalidationList *InvalidationList `type:"structure"` } // String returns the string representation -func (s InvalidationList) String() string { +func (s ListInvalidationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidationList) GoString() string { +func (s ListInvalidationsOutput) GoString() string { return s.String() } -// SetIsTruncated sets the IsTruncated field's value. -func (s *InvalidationList) SetIsTruncated(v bool) *InvalidationList { - s.IsTruncated = &v +// SetInvalidationList sets the InvalidationList field's value. +func (s *ListInvalidationsOutput) SetInvalidationList(v *InvalidationList) *ListInvalidationsOutput { + s.InvalidationList = v return s } -// SetItems sets the Items field's value. -func (s *InvalidationList) SetItems(v []*InvalidationSummary) *InvalidationList { - s.Items = v - return s +type ListOriginRequestPoliciesInput struct { + _ struct{} `locationName:"ListOriginRequestPoliciesRequest" type:"structure"` + + // Use this field when paginating results to indicate where to begin in your + // list of origin request policies. The response includes origin request policies + // in the list that occur after the marker. To get the next page of the list, + // set this field’s value to the value of NextMarker from the current page’s + // response. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of origin request policies that you want in the response. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + + // A filter to return only the specified kinds of origin request policies. Valid + // values are: + // + // * managed – Returns only the managed policies created by AWS. + // + // * custom – Returns only the custom policies created in your AWS account. + Type *string `location:"querystring" locationName:"Type" type:"string" enum:"OriginRequestPolicyType"` +} + +// String returns the string representation +func (s ListOriginRequestPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOriginRequestPoliciesInput) GoString() string { + return s.String() } // SetMarker sets the Marker field's value. -func (s *InvalidationList) SetMarker(v string) *InvalidationList { +func (s *ListOriginRequestPoliciesInput) SetMarker(v string) *ListOriginRequestPoliciesInput { s.Marker = &v return s } // SetMaxItems sets the MaxItems field's value. -func (s *InvalidationList) SetMaxItems(v int64) *InvalidationList { +func (s *ListOriginRequestPoliciesInput) SetMaxItems(v int64) *ListOriginRequestPoliciesInput { s.MaxItems = &v return s } -// SetNextMarker sets the NextMarker field's value. -func (s *InvalidationList) SetNextMarker(v string) *InvalidationList { - s.NextMarker = &v +// SetType sets the Type field's value. +func (s *ListOriginRequestPoliciesInput) SetType(v string) *ListOriginRequestPoliciesInput { + s.Type = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *InvalidationList) SetQuantity(v int64) *InvalidationList { - s.Quantity = &v - return s +type ListOriginRequestPoliciesOutput struct { + _ struct{} `type:"structure" payload:"OriginRequestPolicyList"` + + // A list of origin request policies. + OriginRequestPolicyList *OriginRequestPolicyList `type:"structure"` } -// A summary of an invalidation request. -type InvalidationSummary struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s ListOriginRequestPoliciesOutput) String() string { + return awsutil.Prettify(s) +} - // The time that an invalidation request was created. - // - // CreateTime is a required field - CreateTime *time.Time `type:"timestamp" required:"true"` +// GoString returns the string representation +func (s ListOriginRequestPoliciesOutput) GoString() string { + return s.String() +} - // The unique ID for an invalidation request. - // - // Id is a required field - Id *string `type:"string" required:"true"` +// SetOriginRequestPolicyList sets the OriginRequestPolicyList field's value. +func (s *ListOriginRequestPoliciesOutput) SetOriginRequestPolicyList(v *OriginRequestPolicyList) *ListOriginRequestPoliciesOutput { + s.OriginRequestPolicyList = v + return s +} - // The status of an invalidation request. - // - // Status is a required field - Status *string `type:"string" required:"true"` +type ListPublicKeysInput struct { + _ struct{} `locationName:"ListPublicKeysRequest" type:"structure"` + + // Use this when paginating results to indicate where to begin in your list + // of public keys. The results include public keys in the list that occur after + // the marker. To get the next page of results, set the Marker to the value + // of the NextMarker from the current page's response (which is also the ID + // of the last public key on that page). + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The maximum number of public keys you want in the response body. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s InvalidationSummary) String() string { +func (s ListPublicKeysInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidationSummary) GoString() string { +func (s ListPublicKeysInput) GoString() string { return s.String() } -// SetCreateTime sets the CreateTime field's value. -func (s *InvalidationSummary) SetCreateTime(v time.Time) *InvalidationSummary { - s.CreateTime = &v +// SetMarker sets the Marker field's value. +func (s *ListPublicKeysInput) SetMarker(v string) *ListPublicKeysInput { + s.Marker = &v return s } -// SetId sets the Id field's value. -func (s *InvalidationSummary) SetId(v string) *InvalidationSummary { - s.Id = &v +// SetMaxItems sets the MaxItems field's value. +func (s *ListPublicKeysInput) SetMaxItems(v int64) *ListPublicKeysInput { + s.MaxItems = &v return s } -// SetStatus sets the Status field's value. -func (s *InvalidationSummary) SetStatus(v string) *InvalidationSummary { - s.Status = &v +type ListPublicKeysOutput struct { + _ struct{} `type:"structure" payload:"PublicKeyList"` + + // Returns a list of all public keys that have been added to CloudFront for + // this account. + PublicKeyList *PublicKeyList `type:"structure"` +} + +// String returns the string representation +func (s ListPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublicKeysOutput) GoString() string { + return s.String() +} + +// SetPublicKeyList sets the PublicKeyList field's value. +func (s *ListPublicKeysOutput) SetPublicKeyList(v *PublicKeyList) *ListPublicKeysOutput { + s.PublicKeyList = v return s } -// A complex type that lists the active CloudFront key pairs, if any, that are -// associated with AwsAccountNumber. -// -// For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). -type KeyPairIds struct { - _ struct{} `type:"structure"` +type ListRealtimeLogConfigsInput struct { + _ struct{} `locationName:"ListRealtimeLogConfigsRequest" type:"structure"` - // A complex type that lists the active CloudFront key pairs, if any, that are - // associated with AwsAccountNumber. - // - // For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). - Items []*string `locationNameList:"KeyPairId" type:"list"` + // Use this field when paginating results to indicate where to begin in your + // list of real-time log configurations. The response includes real-time log + // configurations in the list that occur after the marker. To get the next page + // of the list, set this field’s value to the value of NextMarker from the + // current page’s response. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // The number of active CloudFront key pairs for AwsAccountNumber. - // - // For more information, see ActiveTrustedSigners (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_ActiveTrustedSigners.html). - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // The maximum number of real-time log configurations that you want in the response. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } // String returns the string representation -func (s KeyPairIds) String() string { +func (s ListRealtimeLogConfigsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s KeyPairIds) GoString() string { +func (s ListRealtimeLogConfigsInput) GoString() string { return s.String() } -// SetItems sets the Items field's value. -func (s *KeyPairIds) SetItems(v []*string) *KeyPairIds { - s.Items = v +// SetMarker sets the Marker field's value. +func (s *ListRealtimeLogConfigsInput) SetMarker(v string) *ListRealtimeLogConfigsInput { + s.Marker = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *KeyPairIds) SetQuantity(v int64) *KeyPairIds { - s.Quantity = &v +// SetMaxItems sets the MaxItems field's value. +func (s *ListRealtimeLogConfigsInput) SetMaxItems(v int64) *ListRealtimeLogConfigsInput { + s.MaxItems = &v return s } -// A complex type that contains a Lambda function association. -type LambdaFunctionAssociation struct { - _ struct{} `type:"structure"` - - // Specifies the event type that triggers a Lambda function invocation. You - // can specify the following values: - // - // * viewer-request: The function executes when CloudFront receives a request - // from a viewer and before it checks to see whether the requested object - // is in the edge cache. - // - // * origin-request: The function executes only when CloudFront forwards - // a request to your origin. When the requested object is in the edge cache, - // the function doesn't execute. - // - // * origin-response: The function executes after CloudFront receives a response - // from the origin and before it caches the object in the response. When - // the requested object is in the edge cache, the function doesn't execute. - // - // * viewer-response: The function executes before CloudFront returns the - // requested object to the viewer. The function executes regardless of whether - // the object was already in the edge cache. If the origin returns an HTTP - // status code other than HTTP 200 (OK), the function doesn't execute. - // - // EventType is a required field - EventType *string `type:"string" required:"true" enum:"EventType"` - - // A flag that allows a Lambda function to have read access to the body content. - // For more information, see Accessing the Request Body by Choosing the Include - // Body Option (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-include-body-access.html) - // in the Amazon CloudFront Developer Guide. - IncludeBody *bool `type:"boolean"` +type ListRealtimeLogConfigsOutput struct { + _ struct{} `type:"structure" payload:"RealtimeLogConfigs"` - // The ARN of the Lambda function. You must specify the ARN of a function version; - // you can't specify a Lambda alias or $LATEST. - // - // LambdaFunctionARN is a required field - LambdaFunctionARN *string `type:"string" required:"true"` + // A list of real-time log configurations. + RealtimeLogConfigs *RealtimeLogConfigs `type:"structure"` } // String returns the string representation -func (s LambdaFunctionAssociation) String() string { +func (s ListRealtimeLogConfigsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LambdaFunctionAssociation) GoString() string { +func (s ListRealtimeLogConfigsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionAssociation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionAssociation"} - if s.EventType == nil { - invalidParams.Add(request.NewErrParamRequired("EventType")) - } - if s.LambdaFunctionARN == nil { - invalidParams.Add(request.NewErrParamRequired("LambdaFunctionARN")) - } +// SetRealtimeLogConfigs sets the RealtimeLogConfigs field's value. +func (s *ListRealtimeLogConfigsOutput) SetRealtimeLogConfigs(v *RealtimeLogConfigs) *ListRealtimeLogConfigsOutput { + s.RealtimeLogConfigs = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// The request to list your streaming distributions. +type ListStreamingDistributionsInput struct { + _ struct{} `locationName:"ListStreamingDistributionsRequest" type:"structure"` + + // The value that you provided for the Marker request parameter. + Marker *string `location:"querystring" locationName:"Marker" type:"string"` + + // The value that you provided for the MaxItems request parameter. + MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` } -// SetEventType sets the EventType field's value. -func (s *LambdaFunctionAssociation) SetEventType(v string) *LambdaFunctionAssociation { - s.EventType = &v - return s +// String returns the string representation +func (s ListStreamingDistributionsInput) String() string { + return awsutil.Prettify(s) } -// SetIncludeBody sets the IncludeBody field's value. -func (s *LambdaFunctionAssociation) SetIncludeBody(v bool) *LambdaFunctionAssociation { - s.IncludeBody = &v - return s +// GoString returns the string representation +func (s ListStreamingDistributionsInput) GoString() string { + return s.String() } -// SetLambdaFunctionARN sets the LambdaFunctionARN field's value. -func (s *LambdaFunctionAssociation) SetLambdaFunctionARN(v string) *LambdaFunctionAssociation { - s.LambdaFunctionARN = &v +// SetMarker sets the Marker field's value. +func (s *ListStreamingDistributionsInput) SetMarker(v string) *ListStreamingDistributionsInput { + s.Marker = &v return s } -// A complex type that specifies a list of Lambda functions associations for -// a cache behavior. -// -// If you want to invoke one or more Lambda functions triggered by requests -// that match the PathPattern of the cache behavior, specify the applicable -// values for Quantity and Items. Note that there can be up to 4 LambdaFunctionAssociation -// items in this list (one for each possible value of EventType) and each EventType -// can be associated with the Lambda function only once. -// -// If you don't want to invoke any Lambda functions for the requests that match -// PathPattern, specify 0 for Quantity and omit Items. -type LambdaFunctionAssociations struct { - _ struct{} `type:"structure"` +// SetMaxItems sets the MaxItems field's value. +func (s *ListStreamingDistributionsInput) SetMaxItems(v int64) *ListStreamingDistributionsInput { + s.MaxItems = &v + return s +} - // Optional: A complex type that contains LambdaFunctionAssociation items for - // this cache behavior. If Quantity is 0, you can omit Items. - Items []*LambdaFunctionAssociation `locationNameList:"LambdaFunctionAssociation" type:"list"` +// The returned result of the corresponding request. +type ListStreamingDistributionsOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionList"` - // The number of Lambda function associations for this cache behavior. - // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // The StreamingDistributionList type. + StreamingDistributionList *StreamingDistributionList `type:"structure"` } // String returns the string representation -func (s LambdaFunctionAssociations) String() string { +func (s ListStreamingDistributionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LambdaFunctionAssociations) GoString() string { +func (s ListStreamingDistributionsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaFunctionAssociations) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionAssociations"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) - } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetItems sets the Items field's value. -func (s *LambdaFunctionAssociations) SetItems(v []*LambdaFunctionAssociation) *LambdaFunctionAssociations { - s.Items = v - return s -} - -// SetQuantity sets the Quantity field's value. -func (s *LambdaFunctionAssociations) SetQuantity(v int64) *LambdaFunctionAssociations { - s.Quantity = &v +// SetStreamingDistributionList sets the StreamingDistributionList field's value. +func (s *ListStreamingDistributionsOutput) SetStreamingDistributionList(v *StreamingDistributionList) *ListStreamingDistributionsOutput { + s.StreamingDistributionList = v return s } -// The request to list origin access identities. -type ListCloudFrontOriginAccessIdentitiesInput struct { - _ struct{} `locationName:"ListCloudFrontOriginAccessIdentitiesRequest" type:"structure"` - - // Use this when paginating results to indicate where to begin in your list - // of origin access identities. The results include identities in the list that - // occur after the marker. To get the next page of results, set the Marker to - // the value of the NextMarker from the current page's response (which is also - // the ID of the last identity on that page). - Marker *string `location:"querystring" locationName:"Marker" type:"string"` +// The request to list tags for a CloudFront resource. +type ListTagsForResourceInput struct { + _ struct{} `locationName:"ListTagsForResourceRequest" type:"structure"` - // The maximum number of origin access identities you want in the response body. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + // An ARN of a CloudFront resource. + // + // Resource is a required field + Resource *string `location:"querystring" locationName:"Resource" type:"string" required:"true"` } // String returns the string representation -func (s ListCloudFrontOriginAccessIdentitiesInput) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCloudFrontOriginAccessIdentitiesInput) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *ListCloudFrontOriginAccessIdentitiesInput) SetMarker(v string) *ListCloudFrontOriginAccessIdentitiesInput { - s.Marker = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetMaxItems sets the MaxItems field's value. -func (s *ListCloudFrontOriginAccessIdentitiesInput) SetMaxItems(v int64) *ListCloudFrontOriginAccessIdentitiesInput { - s.MaxItems = &v +// SetResource sets the Resource field's value. +func (s *ListTagsForResourceInput) SetResource(v string) *ListTagsForResourceInput { + s.Resource = &v return s } // The returned result of the corresponding request. -type ListCloudFrontOriginAccessIdentitiesOutput struct { - _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityList"` +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure" payload:"Tags"` - // The CloudFrontOriginAccessIdentityList type. - CloudFrontOriginAccessIdentityList *OriginAccessIdentityList `type:"structure"` + // A complex type that contains zero or more Tag elements. + // + // Tags is a required field + Tags *Tags `type:"structure" required:"true"` } // String returns the string representation -func (s ListCloudFrontOriginAccessIdentitiesOutput) String() string { +func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCloudFrontOriginAccessIdentitiesOutput) GoString() string { +func (s ListTagsForResourceOutput) GoString() string { return s.String() } -// SetCloudFrontOriginAccessIdentityList sets the CloudFrontOriginAccessIdentityList field's value. -func (s *ListCloudFrontOriginAccessIdentitiesOutput) SetCloudFrontOriginAccessIdentityList(v *OriginAccessIdentityList) *ListCloudFrontOriginAccessIdentitiesOutput { - s.CloudFrontOriginAccessIdentityList = v +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v *Tags) *ListTagsForResourceOutput { + s.Tags = v return s } -// The request to list distributions that are associated with a specified AWS -// WAF web ACL. -type ListDistributionsByWebACLIdInput struct { - _ struct{} `locationName:"ListDistributionsByWebACLIdRequest" type:"structure"` +// A complex type that controls whether access logs are written for the distribution. +type LoggingConfig struct { + _ struct{} `type:"structure"` - // Use Marker and MaxItems to control pagination of results. If you have more - // than MaxItems distributions that satisfy the request, the response includes - // a NextMarker element. To get the next page of results, submit another request. - // For the value of Marker, specify the value of NextMarker from the last response. - // (For the first request, omit Marker.) - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + // The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` - // The maximum number of distributions that you want CloudFront to return in - // the response body. The maximum and default values are both 100. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + // Specifies whether you want CloudFront to save access logs to an Amazon S3 + // bucket. If you don't want to enable logging when you create a distribution + // or if you want to disable logging for an existing distribution, specify false + // for Enabled, and specify empty Bucket and Prefix elements. If you specify + // false for Enabled but you specify values for Bucket, prefix, and IncludeCookies, + // the values are automatically deleted. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` - // The ID of the AWS WAF web ACL that you want to list the associated distributions. - // If you specify "null" for the ID, the request returns a list of the distributions - // that aren't associated with a web ACL. + // Specifies whether you want CloudFront to include cookies in access logs, + // specify true for IncludeCookies. If you choose to include cookies in logs, + // CloudFront logs all cookies regardless of how you configure the cache behaviors + // for this distribution. If you don't want to include cookies when you create + // a distribution or if you want to disable include cookies for an existing + // distribution, specify false for IncludeCookies. // - // WebACLId is a required field - WebACLId *string `location:"uri" locationName:"WebACLId" type:"string" required:"true"` + // IncludeCookies is a required field + IncludeCookies *bool `type:"boolean" required:"true"` + + // An optional string that you want CloudFront to prefix to the access log filenames + // for this distribution, for example, myprefix/. If you want to enable logging, + // but you don't want to specify a prefix, you still must include an empty Prefix + // element in the Logging element. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` } // String returns the string representation -func (s ListDistributionsByWebACLIdInput) String() string { +func (s LoggingConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDistributionsByWebACLIdInput) GoString() string { +func (s LoggingConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListDistributionsByWebACLIdInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDistributionsByWebACLIdInput"} - if s.WebACLId == nil { - invalidParams.Add(request.NewErrParamRequired("WebACLId")) +func (s *LoggingConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingConfig"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.WebACLId != nil && len(*s.WebACLId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.IncludeCookies == nil { + invalidParams.Add(request.NewErrParamRequired("IncludeCookies")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) } if invalidParams.Len() > 0 { @@ -11187,273 +16466,342 @@ func (s *ListDistributionsByWebACLIdInput) Validate() error { return nil } -// SetMarker sets the Marker field's value. -func (s *ListDistributionsByWebACLIdInput) SetMarker(v string) *ListDistributionsByWebACLIdInput { - s.Marker = &v +// SetBucket sets the Bucket field's value. +func (s *LoggingConfig) SetBucket(v string) *LoggingConfig { + s.Bucket = &v return s } -// SetMaxItems sets the MaxItems field's value. -func (s *ListDistributionsByWebACLIdInput) SetMaxItems(v int64) *ListDistributionsByWebACLIdInput { - s.MaxItems = &v +// SetEnabled sets the Enabled field's value. +func (s *LoggingConfig) SetEnabled(v bool) *LoggingConfig { + s.Enabled = &v return s } -// SetWebACLId sets the WebACLId field's value. -func (s *ListDistributionsByWebACLIdInput) SetWebACLId(v string) *ListDistributionsByWebACLIdInput { - s.WebACLId = &v +// SetIncludeCookies sets the IncludeCookies field's value. +func (s *LoggingConfig) SetIncludeCookies(v bool) *LoggingConfig { + s.IncludeCookies = &v return s } -// The response to a request to list the distributions that are associated with -// a specified AWS WAF web ACL. -type ListDistributionsByWebACLIdOutput struct { - _ struct{} `type:"structure" payload:"DistributionList"` +// SetPrefix sets the Prefix field's value. +func (s *LoggingConfig) SetPrefix(v string) *LoggingConfig { + s.Prefix = &v + return s +} - // The DistributionList type. - DistributionList *DistributionList `type:"structure"` +// A monitoring subscription. This structure contains information about whether +// additional CloudWatch metrics are enabled for a given CloudFront distribution. +type MonitoringSubscription struct { + _ struct{} `type:"structure"` + + // A subscription configuration for additional CloudWatch metrics. + RealtimeMetricsSubscriptionConfig *RealtimeMetricsSubscriptionConfig `type:"structure"` } // String returns the string representation -func (s ListDistributionsByWebACLIdOutput) String() string { +func (s MonitoringSubscription) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDistributionsByWebACLIdOutput) GoString() string { +func (s MonitoringSubscription) GoString() string { return s.String() } -// SetDistributionList sets the DistributionList field's value. -func (s *ListDistributionsByWebACLIdOutput) SetDistributionList(v *DistributionList) *ListDistributionsByWebACLIdOutput { - s.DistributionList = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *MonitoringSubscription) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MonitoringSubscription"} + if s.RealtimeMetricsSubscriptionConfig != nil { + if err := s.RealtimeMetricsSubscriptionConfig.Validate(); err != nil { + invalidParams.AddNested("RealtimeMetricsSubscriptionConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRealtimeMetricsSubscriptionConfig sets the RealtimeMetricsSubscriptionConfig field's value. +func (s *MonitoringSubscription) SetRealtimeMetricsSubscriptionConfig(v *RealtimeMetricsSubscriptionConfig) *MonitoringSubscription { + s.RealtimeMetricsSubscriptionConfig = v return s } -// The request to list your distributions. -type ListDistributionsInput struct { - _ struct{} `locationName:"ListDistributionsRequest" type:"structure"` +// An origin. +// +// An origin is the location where content is stored, and from which CloudFront +// gets content to serve to viewers. To specify an origin: +// +// * Use the S3OriginConfig type to specify an Amazon S3 bucket that is not +// configured with static website hosting. +// +// * Use the CustomOriginConfig type to specify various other kinds of content +// containers or HTTP servers, including: An Amazon S3 bucket that is configured +// with static website hosting An Elastic Load Balancing load balancer An +// AWS Elemental MediaPackage origin An AWS Elemental MediaStore container +// Any other HTTP server, running on an Amazon EC2 instance or any other +// kind of host +// +// For the current maximum number of origins that you can specify per distribution, +// see General Quotas on Web Distributions (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html#limits-web-distributions) +// in the Amazon CloudFront Developer Guide (quotas were formerly referred to +// as limits). +type Origin struct { + _ struct{} `type:"structure"` - // Use this when paginating results to indicate where to begin in your list - // of distributions. The results include distributions in the list that occur - // after the marker. To get the next page of results, set the Marker to the - // value of the NextMarker from the current page's response (which is also the - // ID of the last distribution on that page). - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + // The number of times that CloudFront attempts to connect to the origin. The + // minimum number is 1, the maximum is 3, and the default (if you don’t specify + // otherwise) is 3. + // + // For a custom origin (including an Amazon S3 bucket that’s configured with + // static website hosting), this value also specifies the number of times that + // CloudFront attempts to get a response from the origin, in the case of an + // Origin Response Timeout (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginResponseTimeout). + // + // For more information, see Origin Connection Attempts (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#origin-connection-attempts) + // in the Amazon CloudFront Developer Guide. + ConnectionAttempts *int64 `type:"integer"` - // The maximum number of distributions you want in the response body. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` -} + // The number of seconds that CloudFront waits when trying to establish a connection + // to the origin. The minimum timeout is 1 second, the maximum is 10 seconds, + // and the default (if you don’t specify otherwise) is 10 seconds. + // + // For more information, see Origin Connection Timeout (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#origin-connection-timeout) + // in the Amazon CloudFront Developer Guide. + ConnectionTimeout *int64 `type:"integer"` -// String returns the string representation -func (s ListDistributionsInput) String() string { - return awsutil.Prettify(s) -} + // A list of HTTP header names and values that CloudFront adds to requests it + // sends to the origin. + // + // For more information, see Adding Custom Headers to Origin Requests (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/add-origin-custom-headers.html) + // in the Amazon CloudFront Developer Guide. + CustomHeaders *CustomHeaders `type:"structure"` -// GoString returns the string representation -func (s ListDistributionsInput) GoString() string { - return s.String() -} + // Use this type to specify an origin that is a content container or HTTP server, + // including an Amazon S3 bucket that is configured with static website hosting. + // To specify an Amazon S3 bucket that is not configured with static website + // hosting, use the S3OriginConfig type instead. + CustomOriginConfig *CustomOriginConfig `type:"structure"` -// SetMarker sets the Marker field's value. -func (s *ListDistributionsInput) SetMarker(v string) *ListDistributionsInput { - s.Marker = &v - return s -} + // The domain name for the origin. + // + // For more information, see Origin Domain Name (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName) + // in the Amazon CloudFront Developer Guide. + // + // DomainName is a required field + DomainName *string `type:"string" required:"true"` -// SetMaxItems sets the MaxItems field's value. -func (s *ListDistributionsInput) SetMaxItems(v int64) *ListDistributionsInput { - s.MaxItems = &v - return s -} + // A unique identifier for the origin. This value must be unique within the + // distribution. + // + // Use this value to specify the TargetOriginId in a CacheBehavior or DefaultCacheBehavior. + // + // Id is a required field + Id *string `type:"string" required:"true"` -// The returned result of the corresponding request. -type ListDistributionsOutput struct { - _ struct{} `type:"structure" payload:"DistributionList"` + // An optional path that CloudFront appends to the origin domain name when CloudFront + // requests content from the origin. + // + // For more information, see Origin Path (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesOriginPath) + // in the Amazon CloudFront Developer Guide. + OriginPath *string `type:"string"` - // The DistributionList type. - DistributionList *DistributionList `type:"structure"` + // Use this type to specify an origin that is an Amazon S3 bucket that is not + // configured with static website hosting. To specify any other type of origin, + // including an Amazon S3 bucket that is configured with static website hosting, + // use the CustomOriginConfig type instead. + S3OriginConfig *S3OriginConfig `type:"structure"` } // String returns the string representation -func (s ListDistributionsOutput) String() string { +func (s Origin) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDistributionsOutput) GoString() string { +func (s Origin) GoString() string { return s.String() } -// SetDistributionList sets the DistributionList field's value. -func (s *ListDistributionsOutput) SetDistributionList(v *DistributionList) *ListDistributionsOutput { - s.DistributionList = v - return s -} - -type ListFieldLevelEncryptionConfigsInput struct { - _ struct{} `locationName:"ListFieldLevelEncryptionConfigsRequest" type:"structure"` - - // Use this when paginating results to indicate where to begin in your list - // of configurations. The results include configurations in the list that occur - // after the marker. To get the next page of results, set the Marker to the - // value of the NextMarker from the current page's response (which is also the - // ID of the last configuration on that page). - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // The maximum number of field-level encryption configurations you want in the - // response body. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *Origin) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Origin"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.CustomHeaders != nil { + if err := s.CustomHeaders.Validate(); err != nil { + invalidParams.AddNested("CustomHeaders", err.(request.ErrInvalidParams)) + } + } + if s.CustomOriginConfig != nil { + if err := s.CustomOriginConfig.Validate(); err != nil { + invalidParams.AddNested("CustomOriginConfig", err.(request.ErrInvalidParams)) + } + } + if s.S3OriginConfig != nil { + if err := s.S3OriginConfig.Validate(); err != nil { + invalidParams.AddNested("S3OriginConfig", err.(request.ErrInvalidParams)) + } + } -// String returns the string representation -func (s ListFieldLevelEncryptionConfigsInput) String() string { - return awsutil.Prettify(s) + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// GoString returns the string representation -func (s ListFieldLevelEncryptionConfigsInput) GoString() string { - return s.String() +// SetConnectionAttempts sets the ConnectionAttempts field's value. +func (s *Origin) SetConnectionAttempts(v int64) *Origin { + s.ConnectionAttempts = &v + return s } -// SetMarker sets the Marker field's value. -func (s *ListFieldLevelEncryptionConfigsInput) SetMarker(v string) *ListFieldLevelEncryptionConfigsInput { - s.Marker = &v +// SetConnectionTimeout sets the ConnectionTimeout field's value. +func (s *Origin) SetConnectionTimeout(v int64) *Origin { + s.ConnectionTimeout = &v return s } -// SetMaxItems sets the MaxItems field's value. -func (s *ListFieldLevelEncryptionConfigsInput) SetMaxItems(v int64) *ListFieldLevelEncryptionConfigsInput { - s.MaxItems = &v +// SetCustomHeaders sets the CustomHeaders field's value. +func (s *Origin) SetCustomHeaders(v *CustomHeaders) *Origin { + s.CustomHeaders = v return s } -type ListFieldLevelEncryptionConfigsOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionList"` +// SetCustomOriginConfig sets the CustomOriginConfig field's value. +func (s *Origin) SetCustomOriginConfig(v *CustomOriginConfig) *Origin { + s.CustomOriginConfig = v + return s +} - // Returns a list of all field-level encryption configurations that have been - // created in CloudFront for this account. - FieldLevelEncryptionList *FieldLevelEncryptionList `type:"structure"` +// SetDomainName sets the DomainName field's value. +func (s *Origin) SetDomainName(v string) *Origin { + s.DomainName = &v + return s } -// String returns the string representation -func (s ListFieldLevelEncryptionConfigsOutput) String() string { - return awsutil.Prettify(s) +// SetId sets the Id field's value. +func (s *Origin) SetId(v string) *Origin { + s.Id = &v + return s } -// GoString returns the string representation -func (s ListFieldLevelEncryptionConfigsOutput) GoString() string { - return s.String() +// SetOriginPath sets the OriginPath field's value. +func (s *Origin) SetOriginPath(v string) *Origin { + s.OriginPath = &v + return s } -// SetFieldLevelEncryptionList sets the FieldLevelEncryptionList field's value. -func (s *ListFieldLevelEncryptionConfigsOutput) SetFieldLevelEncryptionList(v *FieldLevelEncryptionList) *ListFieldLevelEncryptionConfigsOutput { - s.FieldLevelEncryptionList = v +// SetS3OriginConfig sets the S3OriginConfig field's value. +func (s *Origin) SetS3OriginConfig(v *S3OriginConfig) *Origin { + s.S3OriginConfig = v return s } -type ListFieldLevelEncryptionProfilesInput struct { - _ struct{} `locationName:"ListFieldLevelEncryptionProfilesRequest" type:"structure"` +// CloudFront origin access identity. +type OriginAccessIdentity struct { + _ struct{} `type:"structure"` - // Use this when paginating results to indicate where to begin in your list - // of profiles. The results include profiles in the list that occur after the - // marker. To get the next page of results, set the Marker to the value of the - // NextMarker from the current page's response (which is also the ID of the - // last profile on that page). - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + // The current configuration information for the identity. + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` - // The maximum number of field-level encryption profiles you want in the response - // body. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + // The ID for the origin access identity, for example, E74FTE3AJFJ256A. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The Amazon S3 canonical user ID for the origin access identity, used when + // giving the origin access identity read permission to an object in Amazon + // S3. + // + // S3CanonicalUserId is a required field + S3CanonicalUserId *string `type:"string" required:"true"` } // String returns the string representation -func (s ListFieldLevelEncryptionProfilesInput) String() string { +func (s OriginAccessIdentity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFieldLevelEncryptionProfilesInput) GoString() string { +func (s OriginAccessIdentity) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *ListFieldLevelEncryptionProfilesInput) SetMarker(v string) *ListFieldLevelEncryptionProfilesInput { - s.Marker = &v +// SetCloudFrontOriginAccessIdentityConfig sets the CloudFrontOriginAccessIdentityConfig field's value. +func (s *OriginAccessIdentity) SetCloudFrontOriginAccessIdentityConfig(v *OriginAccessIdentityConfig) *OriginAccessIdentity { + s.CloudFrontOriginAccessIdentityConfig = v return s } -// SetMaxItems sets the MaxItems field's value. -func (s *ListFieldLevelEncryptionProfilesInput) SetMaxItems(v int64) *ListFieldLevelEncryptionProfilesInput { - s.MaxItems = &v +// SetId sets the Id field's value. +func (s *OriginAccessIdentity) SetId(v string) *OriginAccessIdentity { + s.Id = &v return s } -type ListFieldLevelEncryptionProfilesOutput struct { - _ struct{} `type:"structure" payload:"FieldLevelEncryptionProfileList"` - - // Returns a list of the field-level encryption profiles that have been created - // in CloudFront for this account. - FieldLevelEncryptionProfileList *FieldLevelEncryptionProfileList `type:"structure"` -} - -// String returns the string representation -func (s ListFieldLevelEncryptionProfilesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListFieldLevelEncryptionProfilesOutput) GoString() string { - return s.String() -} - -// SetFieldLevelEncryptionProfileList sets the FieldLevelEncryptionProfileList field's value. -func (s *ListFieldLevelEncryptionProfilesOutput) SetFieldLevelEncryptionProfileList(v *FieldLevelEncryptionProfileList) *ListFieldLevelEncryptionProfilesOutput { - s.FieldLevelEncryptionProfileList = v +// SetS3CanonicalUserId sets the S3CanonicalUserId field's value. +func (s *OriginAccessIdentity) SetS3CanonicalUserId(v string) *OriginAccessIdentity { + s.S3CanonicalUserId = &v return s } -// The request to list invalidations. -type ListInvalidationsInput struct { - _ struct{} `locationName:"ListInvalidationsRequest" type:"structure"` +// Origin access identity configuration. Send a GET request to the /CloudFront +// API version/CloudFront/identity ID/config resource. +type OriginAccessIdentityConfig struct { + _ struct{} `type:"structure"` - // The distribution's ID. + // A unique value (for example, a date-time stamp) that ensures that the request + // can't be replayed. // - // DistributionId is a required field - DistributionId *string `location:"uri" locationName:"DistributionId" type:"string" required:"true"` - - // Use this parameter when paginating results to indicate where to begin in - // your list of invalidation batches. Because the results are returned in decreasing - // order from most recent to oldest, the most recent results are on the first - // page, the second page will contain earlier results, and so on. To get the - // next page of results, set Marker to the value of the NextMarker from the - // current page's response. This value is the same as the ID of the last invalidation - // batch on that page. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + // If the value of CallerReference is new (regardless of the content of the + // CloudFrontOriginAccessIdentityConfig object), a new origin access identity + // is created. + // + // If the CallerReference is a value already sent in a previous identity request, + // and the content of the CloudFrontOriginAccessIdentityConfig is identical + // to the original request (ignoring white space), the response includes the + // same information returned to the original request. + // + // If the CallerReference is a value you already sent in a previous request + // to create an identity, but the content of the CloudFrontOriginAccessIdentityConfig + // is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists + // error. + // + // CallerReference is a required field + CallerReference *string `type:"string" required:"true"` - // The maximum number of invalidation batches that you want in the response - // body. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` + // Any comments you want to include about the origin access identity. + // + // Comment is a required field + Comment *string `type:"string" required:"true"` } // String returns the string representation -func (s ListInvalidationsInput) String() string { +func (s OriginAccessIdentityConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListInvalidationsInput) GoString() string { +func (s OriginAccessIdentityConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListInvalidationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListInvalidationsInput"} - if s.DistributionId == nil { - invalidParams.Add(request.NewErrParamRequired("DistributionId")) +func (s *OriginAccessIdentityConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginAccessIdentityConfig"} + if s.CallerReference == nil { + invalidParams.Add(request.NewErrParamRequired("CallerReference")) } - if s.DistributionId != nil && len(*s.DistributionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DistributionId", 1)) + if s.Comment == nil { + invalidParams.Add(request.NewErrParamRequired("Comment")) } if invalidParams.Len() > 0 { @@ -11462,449 +16810,274 @@ func (s *ListInvalidationsInput) Validate() error { return nil } -// SetDistributionId sets the DistributionId field's value. -func (s *ListInvalidationsInput) SetDistributionId(v string) *ListInvalidationsInput { - s.DistributionId = &v - return s -} - -// SetMarker sets the Marker field's value. -func (s *ListInvalidationsInput) SetMarker(v string) *ListInvalidationsInput { - s.Marker = &v - return s -} - -// SetMaxItems sets the MaxItems field's value. -func (s *ListInvalidationsInput) SetMaxItems(v int64) *ListInvalidationsInput { - s.MaxItems = &v +// SetCallerReference sets the CallerReference field's value. +func (s *OriginAccessIdentityConfig) SetCallerReference(v string) *OriginAccessIdentityConfig { + s.CallerReference = &v return s } -// The returned result of the corresponding request. -type ListInvalidationsOutput struct { - _ struct{} `type:"structure" payload:"InvalidationList"` - - // Information about invalidation batches. - InvalidationList *InvalidationList `type:"structure"` -} - -// String returns the string representation -func (s ListInvalidationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListInvalidationsOutput) GoString() string { - return s.String() -} - -// SetInvalidationList sets the InvalidationList field's value. -func (s *ListInvalidationsOutput) SetInvalidationList(v *InvalidationList) *ListInvalidationsOutput { - s.InvalidationList = v +// SetComment sets the Comment field's value. +func (s *OriginAccessIdentityConfig) SetComment(v string) *OriginAccessIdentityConfig { + s.Comment = &v return s } -type ListPublicKeysInput struct { - _ struct{} `locationName:"ListPublicKeysRequest" type:"structure"` - - // Use this when paginating results to indicate where to begin in your list - // of public keys. The results include public keys in the list that occur after - // the marker. To get the next page of results, set the Marker to the value - // of the NextMarker from the current page's response (which is also the ID - // of the last public key on that page). - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // The maximum number of public keys you want in the response body. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` -} +// Lists the origin access identities for CloudFront.Send a GET request to the +// /CloudFront API version/origin-access-identity/cloudfront resource. The response +// includes a CloudFrontOriginAccessIdentityList element with zero or more CloudFrontOriginAccessIdentitySummary +// child elements. By default, your entire list of origin access identities +// is returned in one single page. If the list is long, you can paginate it +// using the MaxItems and Marker parameters. +type OriginAccessIdentityList struct { + _ struct{} `type:"structure"` -// String returns the string representation -func (s ListPublicKeysInput) String() string { - return awsutil.Prettify(s) -} + // A flag that indicates whether more origin access identities remain to be + // listed. If your results were truncated, you can make a follow-up pagination + // request using the Marker request parameter to retrieve more items in the + // list. + // + // IsTruncated is a required field + IsTruncated *bool `type:"boolean" required:"true"` -// GoString returns the string representation -func (s ListPublicKeysInput) GoString() string { - return s.String() -} + // A complex type that contains one CloudFrontOriginAccessIdentitySummary element + // for each origin access identity that was created by the current AWS account. + Items []*OriginAccessIdentitySummary `locationNameList:"CloudFrontOriginAccessIdentitySummary" type:"list"` -// SetMarker sets the Marker field's value. -func (s *ListPublicKeysInput) SetMarker(v string) *ListPublicKeysInput { - s.Marker = &v - return s -} + // Use this when paginating results to indicate where to begin in your list + // of origin access identities. The results include identities in the list that + // occur after the marker. To get the next page of results, set the Marker to + // the value of the NextMarker from the current page's response (which is also + // the ID of the last identity on that page). + // + // Marker is a required field + Marker *string `type:"string" required:"true"` -// SetMaxItems sets the MaxItems field's value. -func (s *ListPublicKeysInput) SetMaxItems(v int64) *ListPublicKeysInput { - s.MaxItems = &v - return s -} + // The maximum number of origin access identities you want in the response body. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` -type ListPublicKeysOutput struct { - _ struct{} `type:"structure" payload:"PublicKeyList"` + // If IsTruncated is true, this element is present and contains the value you + // can use for the Marker request parameter to continue listing your origin + // access identities where they left off. + NextMarker *string `type:"string"` - // Returns a list of all public keys that have been added to CloudFront for - // this account. - PublicKeyList *PublicKeyList `type:"structure"` + // The number of CloudFront origin access identities that were created by the + // current AWS account. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s ListPublicKeysOutput) String() string { +func (s OriginAccessIdentityList) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPublicKeysOutput) GoString() string { +func (s OriginAccessIdentityList) GoString() string { return s.String() } -// SetPublicKeyList sets the PublicKeyList field's value. -func (s *ListPublicKeysOutput) SetPublicKeyList(v *PublicKeyList) *ListPublicKeysOutput { - s.PublicKeyList = v +// SetIsTruncated sets the IsTruncated field's value. +func (s *OriginAccessIdentityList) SetIsTruncated(v bool) *OriginAccessIdentityList { + s.IsTruncated = &v return s } -// The request to list your streaming distributions. -type ListStreamingDistributionsInput struct { - _ struct{} `locationName:"ListStreamingDistributionsRequest" type:"structure"` - - // The value that you provided for the Marker request parameter. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` - - // The value that you provided for the MaxItems request parameter. - MaxItems *int64 `location:"querystring" locationName:"MaxItems" type:"integer"` -} - -// String returns the string representation -func (s ListStreamingDistributionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListStreamingDistributionsInput) GoString() string { - return s.String() +// SetItems sets the Items field's value. +func (s *OriginAccessIdentityList) SetItems(v []*OriginAccessIdentitySummary) *OriginAccessIdentityList { + s.Items = v + return s } // SetMarker sets the Marker field's value. -func (s *ListStreamingDistributionsInput) SetMarker(v string) *ListStreamingDistributionsInput { +func (s *OriginAccessIdentityList) SetMarker(v string) *OriginAccessIdentityList { s.Marker = &v return s } // SetMaxItems sets the MaxItems field's value. -func (s *ListStreamingDistributionsInput) SetMaxItems(v int64) *ListStreamingDistributionsInput { +func (s *OriginAccessIdentityList) SetMaxItems(v int64) *OriginAccessIdentityList { s.MaxItems = &v return s } -// The returned result of the corresponding request. -type ListStreamingDistributionsOutput struct { - _ struct{} `type:"structure" payload:"StreamingDistributionList"` - - // The StreamingDistributionList type. - StreamingDistributionList *StreamingDistributionList `type:"structure"` +// SetNextMarker sets the NextMarker field's value. +func (s *OriginAccessIdentityList) SetNextMarker(v string) *OriginAccessIdentityList { + s.NextMarker = &v + return s } -// String returns the string representation -func (s ListStreamingDistributionsOutput) String() string { - return awsutil.Prettify(s) +// SetQuantity sets the Quantity field's value. +func (s *OriginAccessIdentityList) SetQuantity(v int64) *OriginAccessIdentityList { + s.Quantity = &v + return s } -// GoString returns the string representation -func (s ListStreamingDistributionsOutput) GoString() string { - return s.String() -} +// Summary of the information about a CloudFront origin access identity. +type OriginAccessIdentitySummary struct { + _ struct{} `type:"structure"` -// SetStreamingDistributionList sets the StreamingDistributionList field's value. -func (s *ListStreamingDistributionsOutput) SetStreamingDistributionList(v *StreamingDistributionList) *ListStreamingDistributionsOutput { - s.StreamingDistributionList = v - return s -} + // The comment for this origin access identity, as originally specified when + // created. + // + // Comment is a required field + Comment *string `type:"string" required:"true"` -// The request to list tags for a CloudFront resource. -type ListTagsForResourceInput struct { - _ struct{} `locationName:"ListTagsForResourceRequest" type:"structure"` + // The ID for the origin access identity. For example: E74FTE3AJFJ256A. + // + // Id is a required field + Id *string `type:"string" required:"true"` - // An ARN of a CloudFront resource. + // The Amazon S3 canonical user ID for the origin access identity, which you + // use when giving the origin access identity read permission to an object in + // Amazon S3. // - // Resource is a required field - Resource *string `location:"querystring" locationName:"Resource" type:"string" required:"true"` + // S3CanonicalUserId is a required field + S3CanonicalUserId *string `type:"string" required:"true"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s OriginAccessIdentitySummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceInput) GoString() string { +func (s OriginAccessIdentitySummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.Resource == nil { - invalidParams.Add(request.NewErrParamRequired("Resource")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResource sets the Resource field's value. -func (s *ListTagsForResourceInput) SetResource(v string) *ListTagsForResourceInput { - s.Resource = &v +// SetComment sets the Comment field's value. +func (s *OriginAccessIdentitySummary) SetComment(v string) *OriginAccessIdentitySummary { + s.Comment = &v return s } -// The returned result of the corresponding request. -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure" payload:"Tags"` - - // A complex type that contains zero or more Tag elements. - // - // Tags is a required field - Tags *Tags `type:"structure" required:"true"` -} - -// String returns the string representation -func (s ListTagsForResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { - return s.String() +// SetId sets the Id field's value. +func (s *OriginAccessIdentitySummary) SetId(v string) *OriginAccessIdentitySummary { + s.Id = &v + return s } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v *Tags) *ListTagsForResourceOutput { - s.Tags = v +// SetS3CanonicalUserId sets the S3CanonicalUserId field's value. +func (s *OriginAccessIdentitySummary) SetS3CanonicalUserId(v string) *OriginAccessIdentitySummary { + s.S3CanonicalUserId = &v return s } -// A complex type that controls whether access logs are written for the distribution. -type LoggingConfig struct { +// A complex type that contains HeaderName and HeaderValue elements, if any, +// for this distribution. +type OriginCustomHeader struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket to store the access logs in, for example, myawslogbucket.s3.amazonaws.com. - // - // Bucket is a required field - Bucket *string `type:"string" required:"true"` - - // Specifies whether you want CloudFront to save access logs to an Amazon S3 - // bucket. If you don't want to enable logging when you create a distribution - // or if you want to disable logging for an existing distribution, specify false - // for Enabled, and specify empty Bucket and Prefix elements. If you specify - // false for Enabled but you specify values for Bucket, prefix, and IncludeCookies, - // the values are automatically deleted. - // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` - - // Specifies whether you want CloudFront to include cookies in access logs, - // specify true for IncludeCookies. If you choose to include cookies in logs, - // CloudFront logs all cookies regardless of how you configure the cache behaviors - // for this distribution. If you don't want to include cookies when you create - // a distribution or if you want to disable include cookies for an existing - // distribution, specify false for IncludeCookies. + // The name of a header that you want CloudFront to send to your origin. For + // more information, see Adding Custom Headers to Origin Requests (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html) + // in the Amazon CloudFront Developer Guide. // - // IncludeCookies is a required field - IncludeCookies *bool `type:"boolean" required:"true"` + // HeaderName is a required field + HeaderName *string `type:"string" required:"true"` - // An optional string that you want CloudFront to prefix to the access log filenames - // for this distribution, for example, myprefix/. If you want to enable logging, - // but you don't want to specify a prefix, you still must include an empty Prefix - // element in the Logging element. + // The value for the header that you specified in the HeaderName field. // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` + // HeaderValue is a required field + HeaderValue *string `type:"string" required:"true"` } // String returns the string representation -func (s LoggingConfig) String() string { +func (s OriginCustomHeader) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LoggingConfig) GoString() string { +func (s OriginCustomHeader) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LoggingConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LoggingConfig"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Enabled == nil { - invalidParams.Add(request.NewErrParamRequired("Enabled")) - } - if s.IncludeCookies == nil { - invalidParams.Add(request.NewErrParamRequired("IncludeCookies")) +func (s *OriginCustomHeader) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginCustomHeader"} + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) + if s.HeaderValue == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderValue")) } if invalidParams.Len() > 0 { return invalidParams } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *LoggingConfig) SetBucket(v string) *LoggingConfig { - s.Bucket = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *LoggingConfig) SetEnabled(v bool) *LoggingConfig { - s.Enabled = &v - return s + return nil } -// SetIncludeCookies sets the IncludeCookies field's value. -func (s *LoggingConfig) SetIncludeCookies(v bool) *LoggingConfig { - s.IncludeCookies = &v +// SetHeaderName sets the HeaderName field's value. +func (s *OriginCustomHeader) SetHeaderName(v string) *OriginCustomHeader { + s.HeaderName = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *LoggingConfig) SetPrefix(v string) *LoggingConfig { - s.Prefix = &v +// SetHeaderValue sets the HeaderValue field's value. +func (s *OriginCustomHeader) SetHeaderValue(v string) *OriginCustomHeader { + s.HeaderValue = &v return s } -// A complex type that describes the Amazon S3 bucket, HTTP server (for example, -// a web server), Amazon MediaStore, or other server from which CloudFront gets -// your files. This can also be an origin group, if you've created an origin -// group. You must specify at least one origin or origin group. -// -// For the current limit on the number of origins or origin groups that you -// can specify for a distribution, see Amazon CloudFront Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_cloudfront) -// in the AWS General Reference. -type Origin struct { +// An origin group includes two origins (a primary origin and a second origin +// to failover to) and a failover criteria that you specify. You create an origin +// group to support origin failover in CloudFront. When you create or update +// a distribution, you can specifiy the origin group instead of a single origin, +// and CloudFront will failover from the primary origin to the second origin +// under the failover conditions that you've chosen. +type OriginGroup struct { _ struct{} `type:"structure"` - // A complex type that contains names and values for the custom headers that - // you want. - CustomHeaders *CustomHeaders `type:"structure"` - - // A complex type that contains information about a custom origin. If the origin - // is an Amazon S3 bucket, use the S3OriginConfig element instead. - CustomOriginConfig *CustomOriginConfig `type:"structure"` - - // Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want - // CloudFront to get objects for this origin, for example, myawsbucket.s3.amazonaws.com. - // If you set up your bucket to be configured as a website endpoint, enter the - // Amazon S3 static website hosting endpoint for the bucket. - // - // For more information about specifying this value for different types of origins, - // see Origin Domain Name (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesDomainName) - // in the Amazon CloudFront Developer Guide. - // - // Constraints for Amazon S3 origins: - // - // * If you configured Amazon S3 Transfer Acceleration for your bucket, don't - // specify the s3-accelerate endpoint for DomainName. - // - // * The bucket name must be between 3 and 63 characters long (inclusive). - // - // * The bucket name must contain only lowercase characters, numbers, periods, - // underscores, and dashes. - // - // * The bucket name must not contain adjacent periods. - // - // Custom Origins: The DNS domain name for the HTTP server from which you want - // CloudFront to get objects for this origin, for example, www.example.com. - // - // Constraints for custom origins: - // - // * DomainName must be a valid DNS name that contains only a-z, A-Z, 0-9, - // dot (.), hyphen (-), or underscore (_) characters. - // - // * The name cannot exceed 128 characters. + // A complex type that contains information about the failover criteria for + // an origin group. // - // DomainName is a required field - DomainName *string `type:"string" required:"true"` + // FailoverCriteria is a required field + FailoverCriteria *OriginGroupFailoverCriteria `type:"structure" required:"true"` - // A unique identifier for the origin or origin group. The value of Id must - // be unique within the distribution. - // - // When you specify the value of TargetOriginId for the default cache behavior - // or for another cache behavior, you indicate the origin to which you want - // the cache behavior to route requests by specifying the value of the Id element - // for that origin. When a request matches the path pattern for that cache behavior, - // CloudFront routes the request to the specified origin. For more information, - // see Cache Behavior Settings (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-web-values-specify.html#DownloadDistValuesCacheBehavior) - // in the Amazon CloudFront Developer Guide. + // The origin group's ID. // // Id is a required field Id *string `type:"string" required:"true"` - // An optional element that causes CloudFront to request your content from a - // directory in your Amazon S3 bucket or your custom origin. When you include - // the OriginPath element, specify the directory name, beginning with a /. CloudFront - // appends the directory name to the value of DomainName, for example, example.com/production. - // Do not include a / at the end of the directory name. - // - // For example, suppose you've specified the following values for your distribution: - // - // * DomainName: An Amazon S3 bucket named myawsbucket. - // - // * OriginPath: /production - // - // * CNAME: example.com - // - // When a user enters example.com/index.html in a browser, CloudFront sends - // a request to Amazon S3 for myawsbucket/production/index.html. + // A complex type that contains information about the origins in an origin group. // - // When a user enters example.com/acme/index.html in a browser, CloudFront sends - // a request to Amazon S3 for myawsbucket/production/acme/index.html. - OriginPath *string `type:"string"` - - // A complex type that contains information about the Amazon S3 origin. If the - // origin is a custom origin, use the CustomOriginConfig element instead. - S3OriginConfig *S3OriginConfig `type:"structure"` + // Members is a required field + Members *OriginGroupMembers `type:"structure" required:"true"` } // String returns the string representation -func (s Origin) String() string { +func (s OriginGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Origin) GoString() string { +func (s OriginGroup) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Origin) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Origin"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) +func (s *OriginGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginGroup"} + if s.FailoverCriteria == nil { + invalidParams.Add(request.NewErrParamRequired("FailoverCriteria")) } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } - if s.CustomHeaders != nil { - if err := s.CustomHeaders.Validate(); err != nil { - invalidParams.AddNested("CustomHeaders", err.(request.ErrInvalidParams)) - } + if s.Members == nil { + invalidParams.Add(request.NewErrParamRequired("Members")) } - if s.CustomOriginConfig != nil { - if err := s.CustomOriginConfig.Validate(); err != nil { - invalidParams.AddNested("CustomOriginConfig", err.(request.ErrInvalidParams)) + if s.FailoverCriteria != nil { + if err := s.FailoverCriteria.Validate(); err != nil { + invalidParams.AddNested("FailoverCriteria", err.(request.ErrInvalidParams)) } } - if s.S3OriginConfig != nil { - if err := s.S3OriginConfig.Validate(); err != nil { - invalidParams.AddNested("S3OriginConfig", err.(request.ErrInvalidParams)) + if s.Members != nil { + if err := s.Members.Validate(); err != nil { + invalidParams.AddNested("Members", err.(request.ErrInvalidParams)) } } @@ -11914,416 +17087,387 @@ func (s *Origin) Validate() error { return nil } -// SetCustomHeaders sets the CustomHeaders field's value. -func (s *Origin) SetCustomHeaders(v *CustomHeaders) *Origin { - s.CustomHeaders = v - return s -} - -// SetCustomOriginConfig sets the CustomOriginConfig field's value. -func (s *Origin) SetCustomOriginConfig(v *CustomOriginConfig) *Origin { - s.CustomOriginConfig = v - return s -} - -// SetDomainName sets the DomainName field's value. -func (s *Origin) SetDomainName(v string) *Origin { - s.DomainName = &v +// SetFailoverCriteria sets the FailoverCriteria field's value. +func (s *OriginGroup) SetFailoverCriteria(v *OriginGroupFailoverCriteria) *OriginGroup { + s.FailoverCriteria = v return s } // SetId sets the Id field's value. -func (s *Origin) SetId(v string) *Origin { +func (s *OriginGroup) SetId(v string) *OriginGroup { s.Id = &v return s } -// SetOriginPath sets the OriginPath field's value. -func (s *Origin) SetOriginPath(v string) *Origin { - s.OriginPath = &v - return s -} - -// SetS3OriginConfig sets the S3OriginConfig field's value. -func (s *Origin) SetS3OriginConfig(v *S3OriginConfig) *Origin { - s.S3OriginConfig = v +// SetMembers sets the Members field's value. +func (s *OriginGroup) SetMembers(v *OriginGroupMembers) *OriginGroup { + s.Members = v return s } -// CloudFront origin access identity. -type OriginAccessIdentity struct { +// A complex data type that includes information about the failover criteria +// for an origin group, including the status codes for which CloudFront will +// failover from the primary origin to the second origin. +type OriginGroupFailoverCriteria struct { _ struct{} `type:"structure"` - // The current configuration information for the identity. - CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `type:"structure"` - - // The ID for the origin access identity, for example, E74FTE3AJFJ256A. - // - // Id is a required field - Id *string `type:"string" required:"true"` - - // The Amazon S3 canonical user ID for the origin access identity, used when - // giving the origin access identity read permission to an object in Amazon - // S3. + // The status codes that, when returned from the primary origin, will trigger + // CloudFront to failover to the second origin. // - // S3CanonicalUserId is a required field - S3CanonicalUserId *string `type:"string" required:"true"` + // StatusCodes is a required field + StatusCodes *StatusCodes `type:"structure" required:"true"` } // String returns the string representation -func (s OriginAccessIdentity) String() string { +func (s OriginGroupFailoverCriteria) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginAccessIdentity) GoString() string { +func (s OriginGroupFailoverCriteria) GoString() string { return s.String() } -// SetCloudFrontOriginAccessIdentityConfig sets the CloudFrontOriginAccessIdentityConfig field's value. -func (s *OriginAccessIdentity) SetCloudFrontOriginAccessIdentityConfig(v *OriginAccessIdentityConfig) *OriginAccessIdentity { - s.CloudFrontOriginAccessIdentityConfig = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginGroupFailoverCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginGroupFailoverCriteria"} + if s.StatusCodes == nil { + invalidParams.Add(request.NewErrParamRequired("StatusCodes")) + } + if s.StatusCodes != nil { + if err := s.StatusCodes.Validate(); err != nil { + invalidParams.AddNested("StatusCodes", err.(request.ErrInvalidParams)) + } + } -// SetId sets the Id field's value. -func (s *OriginAccessIdentity) SetId(v string) *OriginAccessIdentity { - s.Id = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetS3CanonicalUserId sets the S3CanonicalUserId field's value. -func (s *OriginAccessIdentity) SetS3CanonicalUserId(v string) *OriginAccessIdentity { - s.S3CanonicalUserId = &v +// SetStatusCodes sets the StatusCodes field's value. +func (s *OriginGroupFailoverCriteria) SetStatusCodes(v *StatusCodes) *OriginGroupFailoverCriteria { + s.StatusCodes = v return s } -// Origin access identity configuration. Send a GET request to the /CloudFront -// API version/CloudFront/identity ID/config resource. -type OriginAccessIdentityConfig struct { +// An origin in an origin group. +type OriginGroupMember struct { _ struct{} `type:"structure"` - // A unique value (for example, a date-time stamp) that ensures that the request - // can't be replayed. - // - // If the value of CallerReference is new (regardless of the content of the - // CloudFrontOriginAccessIdentityConfig object), a new origin access identity - // is created. - // - // If the CallerReference is a value already sent in a previous identity request, - // and the content of the CloudFrontOriginAccessIdentityConfig is identical - // to the original request (ignoring white space), the response includes the - // same information returned to the original request. - // - // If the CallerReference is a value you already sent in a previous request - // to create an identity, but the content of the CloudFrontOriginAccessIdentityConfig - // is different from the original request, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists - // error. - // - // CallerReference is a required field - CallerReference *string `type:"string" required:"true"` - - // Any comments you want to include about the origin access identity. + // The ID for an origin in an origin group. // - // Comment is a required field - Comment *string `type:"string" required:"true"` + // OriginId is a required field + OriginId *string `type:"string" required:"true"` } // String returns the string representation -func (s OriginAccessIdentityConfig) String() string { +func (s OriginGroupMember) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginAccessIdentityConfig) GoString() string { +func (s OriginGroupMember) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *OriginAccessIdentityConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginAccessIdentityConfig"} - if s.CallerReference == nil { - invalidParams.Add(request.NewErrParamRequired("CallerReference")) - } - if s.Comment == nil { - invalidParams.Add(request.NewErrParamRequired("Comment")) +func (s *OriginGroupMember) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginGroupMember"} + if s.OriginId == nil { + invalidParams.Add(request.NewErrParamRequired("OriginId")) } if invalidParams.Len() > 0 { return invalidParams } return nil -} - -// SetCallerReference sets the CallerReference field's value. -func (s *OriginAccessIdentityConfig) SetCallerReference(v string) *OriginAccessIdentityConfig { - s.CallerReference = &v - return s -} - -// SetComment sets the Comment field's value. -func (s *OriginAccessIdentityConfig) SetComment(v string) *OriginAccessIdentityConfig { - s.Comment = &v - return s -} - -// Lists the origin access identities for CloudFront.Send a GET request to the -// /CloudFront API version/origin-access-identity/cloudfront resource. The response -// includes a CloudFrontOriginAccessIdentityList element with zero or more CloudFrontOriginAccessIdentitySummary -// child elements. By default, your entire list of origin access identities -// is returned in one single page. If the list is long, you can paginate it -// using the MaxItems and Marker parameters. -type OriginAccessIdentityList struct { - _ struct{} `type:"structure"` - - // A flag that indicates whether more origin access identities remain to be - // listed. If your results were truncated, you can make a follow-up pagination - // request using the Marker request parameter to retrieve more items in the - // list. - // - // IsTruncated is a required field - IsTruncated *bool `type:"boolean" required:"true"` - - // A complex type that contains one CloudFrontOriginAccessIdentitySummary element - // for each origin access identity that was created by the current AWS account. - Items []*OriginAccessIdentitySummary `locationNameList:"CloudFrontOriginAccessIdentitySummary" type:"list"` +} - // Use this when paginating results to indicate where to begin in your list - // of origin access identities. The results include identities in the list that - // occur after the marker. To get the next page of results, set the Marker to - // the value of the NextMarker from the current page's response (which is also - // the ID of the last identity on that page). - // - // Marker is a required field - Marker *string `type:"string" required:"true"` +// SetOriginId sets the OriginId field's value. +func (s *OriginGroupMember) SetOriginId(v string) *OriginGroupMember { + s.OriginId = &v + return s +} - // The maximum number of origin access identities you want in the response body. - // - // MaxItems is a required field - MaxItems *int64 `type:"integer" required:"true"` +// A complex data type for the origins included in an origin group. +type OriginGroupMembers struct { + _ struct{} `type:"structure"` - // If IsTruncated is true, this element is present and contains the value you - // can use for the Marker request parameter to continue listing your origin - // access identities where they left off. - NextMarker *string `type:"string"` + // Items (origins) in an origin group. + // + // Items is a required field + Items []*OriginGroupMember `locationNameList:"OriginGroupMember" min:"2" type:"list" required:"true"` - // The number of CloudFront origin access identities that were created by the - // current AWS account. + // The number of origins in an origin group. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s OriginAccessIdentityList) String() string { +func (s OriginGroupMembers) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginAccessIdentityList) GoString() string { +func (s OriginGroupMembers) GoString() string { return s.String() } -// SetIsTruncated sets the IsTruncated field's value. -func (s *OriginAccessIdentityList) SetIsTruncated(v bool) *OriginAccessIdentityList { - s.IsTruncated = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginGroupMembers) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginGroupMembers"} + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Items != nil && len(s.Items) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Items", 2)) + } + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } // SetItems sets the Items field's value. -func (s *OriginAccessIdentityList) SetItems(v []*OriginAccessIdentitySummary) *OriginAccessIdentityList { +func (s *OriginGroupMembers) SetItems(v []*OriginGroupMember) *OriginGroupMembers { s.Items = v return s } -// SetMarker sets the Marker field's value. -func (s *OriginAccessIdentityList) SetMarker(v string) *OriginAccessIdentityList { - s.Marker = &v - return s -} - -// SetMaxItems sets the MaxItems field's value. -func (s *OriginAccessIdentityList) SetMaxItems(v int64) *OriginAccessIdentityList { - s.MaxItems = &v - return s -} - -// SetNextMarker sets the NextMarker field's value. -func (s *OriginAccessIdentityList) SetNextMarker(v string) *OriginAccessIdentityList { - s.NextMarker = &v - return s -} - // SetQuantity sets the Quantity field's value. -func (s *OriginAccessIdentityList) SetQuantity(v int64) *OriginAccessIdentityList { +func (s *OriginGroupMembers) SetQuantity(v int64) *OriginGroupMembers { s.Quantity = &v return s } -// Summary of the information about a CloudFront origin access identity. -type OriginAccessIdentitySummary struct { +// A complex data type for the origin groups specified for a distribution. +type OriginGroups struct { _ struct{} `type:"structure"` - // The comment for this origin access identity, as originally specified when - // created. - // - // Comment is a required field - Comment *string `type:"string" required:"true"` - - // The ID for the origin access identity. For example: E74FTE3AJFJ256A. - // - // Id is a required field - Id *string `type:"string" required:"true"` + // The items (origin groups) in a distribution. + Items []*OriginGroup `locationNameList:"OriginGroup" type:"list"` - // The Amazon S3 canonical user ID for the origin access identity, which you - // use when giving the origin access identity read permission to an object in - // Amazon S3. + // The number of origin groups. // - // S3CanonicalUserId is a required field - S3CanonicalUserId *string `type:"string" required:"true"` + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s OriginAccessIdentitySummary) String() string { +func (s OriginGroups) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginAccessIdentitySummary) GoString() string { +func (s OriginGroups) GoString() string { return s.String() } -// SetComment sets the Comment field's value. -func (s *OriginAccessIdentitySummary) SetComment(v string) *OriginAccessIdentitySummary { - s.Comment = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *OriginGroups) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginGroups"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetId sets the Id field's value. -func (s *OriginAccessIdentitySummary) SetId(v string) *OriginAccessIdentitySummary { - s.Id = &v +// SetItems sets the Items field's value. +func (s *OriginGroups) SetItems(v []*OriginGroup) *OriginGroups { + s.Items = v return s } -// SetS3CanonicalUserId sets the S3CanonicalUserId field's value. -func (s *OriginAccessIdentitySummary) SetS3CanonicalUserId(v string) *OriginAccessIdentitySummary { - s.S3CanonicalUserId = &v +// SetQuantity sets the Quantity field's value. +func (s *OriginGroups) SetQuantity(v int64) *OriginGroups { + s.Quantity = &v return s } -// A complex type that contains HeaderName and HeaderValue elements, if any, -// for this distribution. -type OriginCustomHeader struct { +// An origin request policy. +// +// When it’s attached to a cache behavior, the origin request policy determines +// the values that CloudFront includes in requests that it sends to the origin. +// Each request that CloudFront sends to the origin includes the following: +// +// * The request body and the URL path (without the domain name) from the +// viewer request. +// +// * The headers that CloudFront automatically includes in every origin request, +// including Host, User-Agent, and X-Amz-Cf-Id. +// +// * All HTTP headers, cookies, and URL query strings that are specified +// in the cache policy or the origin request policy. These can include items +// from the viewer request and, in the case of headers, additional ones that +// are added by CloudFront. +// +// CloudFront sends a request when it can’t find an object in its cache that +// matches the request. If you want to send values to the origin and also include +// them in the cache key, use CachePolicy. +type OriginRequestPolicy struct { _ struct{} `type:"structure"` - // The name of a header that you want CloudFront to forward to your origin. - // For more information, see Forwarding Custom Headers to Your Origin (Web Distributions - // Only) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/forward-custom-headers.html) - // in the Amazon CloudFront Developer Guide. + // The unique identifier for the origin request policy. // - // HeaderName is a required field - HeaderName *string `type:"string" required:"true"` + // Id is a required field + Id *string `type:"string" required:"true"` - // The value for the header that you specified in the HeaderName field. + // The date and time when the origin request policy was last modified. // - // HeaderValue is a required field - HeaderValue *string `type:"string" required:"true"` + // LastModifiedTime is a required field + LastModifiedTime *time.Time `type:"timestamp" required:"true"` + + // The origin request policy configuration. + // + // OriginRequestPolicyConfig is a required field + OriginRequestPolicyConfig *OriginRequestPolicyConfig `type:"structure" required:"true"` } // String returns the string representation -func (s OriginCustomHeader) String() string { +func (s OriginRequestPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginCustomHeader) GoString() string { +func (s OriginRequestPolicy) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *OriginCustomHeader) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginCustomHeader"} - if s.HeaderName == nil { - invalidParams.Add(request.NewErrParamRequired("HeaderName")) - } - if s.HeaderValue == nil { - invalidParams.Add(request.NewErrParamRequired("HeaderValue")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetId sets the Id field's value. +func (s *OriginRequestPolicy) SetId(v string) *OriginRequestPolicy { + s.Id = &v + return s } -// SetHeaderName sets the HeaderName field's value. -func (s *OriginCustomHeader) SetHeaderName(v string) *OriginCustomHeader { - s.HeaderName = &v +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *OriginRequestPolicy) SetLastModifiedTime(v time.Time) *OriginRequestPolicy { + s.LastModifiedTime = &v return s } -// SetHeaderValue sets the HeaderValue field's value. -func (s *OriginCustomHeader) SetHeaderValue(v string) *OriginCustomHeader { - s.HeaderValue = &v +// SetOriginRequestPolicyConfig sets the OriginRequestPolicyConfig field's value. +func (s *OriginRequestPolicy) SetOriginRequestPolicyConfig(v *OriginRequestPolicyConfig) *OriginRequestPolicy { + s.OriginRequestPolicyConfig = v return s } -// An origin group includes two origins (a primary origin and a second origin -// to failover to) and a failover criteria that you specify. You create an origin -// group to support origin failover in CloudFront. When you create or update -// a distribution, you can specifiy the origin group instead of a single origin, -// and CloudFront will failover from the primary origin to the second origin -// under the failover conditions that you've chosen. -type OriginGroup struct { +// An origin request policy configuration. +// +// This configuration determines the values that CloudFront includes in requests +// that it sends to the origin. Each request that CloudFront sends to the origin +// includes the following: +// +// * The request body and the URL path (without the domain name) from the +// viewer request. +// +// * The headers that CloudFront automatically includes in every origin request, +// including Host, User-Agent, and X-Amz-Cf-Id. +// +// * All HTTP headers, cookies, and URL query strings that are specified +// in the cache policy or the origin request policy. These can include items +// from the viewer request and, in the case of headers, additional ones that +// are added by CloudFront. +// +// CloudFront sends a request when it can’t find an object in its cache that +// matches the request. If you want to send values to the origin and also include +// them in the cache key, use CachePolicy. +type OriginRequestPolicyConfig struct { _ struct{} `type:"structure"` - // A complex type that contains information about the failover criteria for - // an origin group. + // A comment to describe the origin request policy. + Comment *string `type:"string"` + + // The cookies from viewer requests to include in origin requests. // - // FailoverCriteria is a required field - FailoverCriteria *OriginGroupFailoverCriteria `type:"structure" required:"true"` + // CookiesConfig is a required field + CookiesConfig *OriginRequestPolicyCookiesConfig `type:"structure" required:"true"` - // The origin group's ID. + // The HTTP headers to include in origin requests. These can include headers + // from viewer requests and additional headers added by CloudFront. // - // Id is a required field - Id *string `type:"string" required:"true"` + // HeadersConfig is a required field + HeadersConfig *OriginRequestPolicyHeadersConfig `type:"structure" required:"true"` - // A complex type that contains information about the origins in an origin group. + // A unique name to identify the origin request policy. // - // Members is a required field - Members *OriginGroupMembers `type:"structure" required:"true"` + // Name is a required field + Name *string `type:"string" required:"true"` + + // The URL query strings from viewer requests to include in origin requests. + // + // QueryStringsConfig is a required field + QueryStringsConfig *OriginRequestPolicyQueryStringsConfig `type:"structure" required:"true"` } // String returns the string representation -func (s OriginGroup) String() string { +func (s OriginRequestPolicyConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginGroup) GoString() string { +func (s OriginRequestPolicyConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *OriginGroup) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginGroup"} - if s.FailoverCriteria == nil { - invalidParams.Add(request.NewErrParamRequired("FailoverCriteria")) +func (s *OriginRequestPolicyConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginRequestPolicyConfig"} + if s.CookiesConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CookiesConfig")) } - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) + if s.HeadersConfig == nil { + invalidParams.Add(request.NewErrParamRequired("HeadersConfig")) } - if s.Members == nil { - invalidParams.Add(request.NewErrParamRequired("Members")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.FailoverCriteria != nil { - if err := s.FailoverCriteria.Validate(); err != nil { - invalidParams.AddNested("FailoverCriteria", err.(request.ErrInvalidParams)) + if s.QueryStringsConfig == nil { + invalidParams.Add(request.NewErrParamRequired("QueryStringsConfig")) + } + if s.CookiesConfig != nil { + if err := s.CookiesConfig.Validate(); err != nil { + invalidParams.AddNested("CookiesConfig", err.(request.ErrInvalidParams)) } } - if s.Members != nil { - if err := s.Members.Validate(); err != nil { - invalidParams.AddNested("Members", err.(request.ErrInvalidParams)) + if s.HeadersConfig != nil { + if err := s.HeadersConfig.Validate(); err != nil { + invalidParams.AddNested("HeadersConfig", err.(request.ErrInvalidParams)) + } + } + if s.QueryStringsConfig != nil { + if err := s.QueryStringsConfig.Validate(); err != nil { + invalidParams.AddNested("QueryStringsConfig", err.(request.ErrInvalidParams)) } } @@ -12333,56 +17477,82 @@ func (s *OriginGroup) Validate() error { return nil } -// SetFailoverCriteria sets the FailoverCriteria field's value. -func (s *OriginGroup) SetFailoverCriteria(v *OriginGroupFailoverCriteria) *OriginGroup { - s.FailoverCriteria = v +// SetComment sets the Comment field's value. +func (s *OriginRequestPolicyConfig) SetComment(v string) *OriginRequestPolicyConfig { + s.Comment = &v + return s +} + +// SetCookiesConfig sets the CookiesConfig field's value. +func (s *OriginRequestPolicyConfig) SetCookiesConfig(v *OriginRequestPolicyCookiesConfig) *OriginRequestPolicyConfig { + s.CookiesConfig = v + return s +} + +// SetHeadersConfig sets the HeadersConfig field's value. +func (s *OriginRequestPolicyConfig) SetHeadersConfig(v *OriginRequestPolicyHeadersConfig) *OriginRequestPolicyConfig { + s.HeadersConfig = v return s } -// SetId sets the Id field's value. -func (s *OriginGroup) SetId(v string) *OriginGroup { - s.Id = &v +// SetName sets the Name field's value. +func (s *OriginRequestPolicyConfig) SetName(v string) *OriginRequestPolicyConfig { + s.Name = &v return s } -// SetMembers sets the Members field's value. -func (s *OriginGroup) SetMembers(v *OriginGroupMembers) *OriginGroup { - s.Members = v +// SetQueryStringsConfig sets the QueryStringsConfig field's value. +func (s *OriginRequestPolicyConfig) SetQueryStringsConfig(v *OriginRequestPolicyQueryStringsConfig) *OriginRequestPolicyConfig { + s.QueryStringsConfig = v return s } -// A complex data type that includes information about the failover criteria -// for an origin group, including the status codes for which CloudFront will -// failover from the primary origin to the second origin. -type OriginGroupFailoverCriteria struct { +// An object that determines whether any cookies in viewer requests (and if +// so, which cookies) are included in requests that CloudFront sends to the +// origin. +type OriginRequestPolicyCookiesConfig struct { _ struct{} `type:"structure"` - // The status codes that, when returned from the primary origin, will trigger - // CloudFront to failover to the second origin. + // Determines whether cookies in viewer requests are included in requests that + // CloudFront sends to the origin. Valid values are: // - // StatusCodes is a required field - StatusCodes *StatusCodes `type:"structure" required:"true"` + // * none – Cookies in viewer requests are not included in requests that + // CloudFront sends to the origin. Even when this field is set to none, any + // cookies that are listed in a CachePolicy are included in origin requests. + // + // * whitelist – The cookies in viewer requests that are listed in the + // CookieNames type are included in requests that CloudFront sends to the + // origin. + // + // * all – All cookies in viewer requests are included in requests that + // CloudFront sends to the origin. + // + // CookieBehavior is a required field + CookieBehavior *string `type:"string" required:"true" enum:"OriginRequestPolicyCookieBehavior"` + + // Contains a list of cookie names. + Cookies *CookieNames `type:"structure"` } // String returns the string representation -func (s OriginGroupFailoverCriteria) String() string { +func (s OriginRequestPolicyCookiesConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginGroupFailoverCriteria) GoString() string { +func (s OriginRequestPolicyCookiesConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *OriginGroupFailoverCriteria) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginGroupFailoverCriteria"} - if s.StatusCodes == nil { - invalidParams.Add(request.NewErrParamRequired("StatusCodes")) +func (s *OriginRequestPolicyCookiesConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginRequestPolicyCookiesConfig"} + if s.CookieBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("CookieBehavior")) } - if s.StatusCodes != nil { - if err := s.StatusCodes.Validate(); err != nil { - invalidParams.AddNested("StatusCodes", err.(request.ErrInvalidParams)) + if s.Cookies != nil { + if err := s.Cookies.Validate(); err != nil { + invalidParams.AddNested("Cookies", err.(request.ErrInvalidParams)) } } @@ -12392,37 +17562,68 @@ func (s *OriginGroupFailoverCriteria) Validate() error { return nil } -// SetStatusCodes sets the StatusCodes field's value. -func (s *OriginGroupFailoverCriteria) SetStatusCodes(v *StatusCodes) *OriginGroupFailoverCriteria { - s.StatusCodes = v +// SetCookieBehavior sets the CookieBehavior field's value. +func (s *OriginRequestPolicyCookiesConfig) SetCookieBehavior(v string) *OriginRequestPolicyCookiesConfig { + s.CookieBehavior = &v return s } -// An origin in an origin group. -type OriginGroupMember struct { +// SetCookies sets the Cookies field's value. +func (s *OriginRequestPolicyCookiesConfig) SetCookies(v *CookieNames) *OriginRequestPolicyCookiesConfig { + s.Cookies = v + return s +} + +// An object that determines whether any HTTP headers (and if so, which headers) +// are included in requests that CloudFront sends to the origin. +type OriginRequestPolicyHeadersConfig struct { _ struct{} `type:"structure"` - // The ID for an origin in an origin group. + // Determines whether any HTTP headers are included in requests that CloudFront + // sends to the origin. Valid values are: // - // OriginId is a required field - OriginId *string `type:"string" required:"true"` + // * none – HTTP headers are not included in requests that CloudFront sends + // to the origin. Even when this field is set to none, any headers that are + // listed in a CachePolicy are included in origin requests. + // + // * whitelist – The HTTP headers that are listed in the Headers type are + // included in requests that CloudFront sends to the origin. + // + // * allViewer – All HTTP headers in viewer requests are included in requests + // that CloudFront sends to the origin. + // + // * allViewerAndWhitelistCloudFront – All HTTP headers in viewer requests + // and the additional CloudFront headers that are listed in the Headers type + // are included in requests that CloudFront sends to the origin. The additional + // headers are added by CloudFront. + // + // HeaderBehavior is a required field + HeaderBehavior *string `type:"string" required:"true" enum:"OriginRequestPolicyHeaderBehavior"` + + // Contains a list of HTTP header names. + Headers *Headers `type:"structure"` } // String returns the string representation -func (s OriginGroupMember) String() string { +func (s OriginRequestPolicyHeadersConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginGroupMember) GoString() string { +func (s OriginRequestPolicyHeadersConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *OriginGroupMember) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginGroupMember"} - if s.OriginId == nil { - invalidParams.Add(request.NewErrParamRequired("OriginId")) +func (s *OriginRequestPolicyHeadersConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginRequestPolicyHeadersConfig"} + if s.HeaderBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderBehavior")) + } + if s.Headers != nil { + if err := s.Headers.Validate(); err != nil { + invalidParams.AddNested("Headers", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -12431,115 +17632,124 @@ func (s *OriginGroupMember) Validate() error { return nil } -// SetOriginId sets the OriginId field's value. -func (s *OriginGroupMember) SetOriginId(v string) *OriginGroupMember { - s.OriginId = &v +// SetHeaderBehavior sets the HeaderBehavior field's value. +func (s *OriginRequestPolicyHeadersConfig) SetHeaderBehavior(v string) *OriginRequestPolicyHeadersConfig { + s.HeaderBehavior = &v return s } -// A complex data type for the origins included in an origin group. -type OriginGroupMembers struct { +// SetHeaders sets the Headers field's value. +func (s *OriginRequestPolicyHeadersConfig) SetHeaders(v *Headers) *OriginRequestPolicyHeadersConfig { + s.Headers = v + return s +} + +// A list of origin request policies. +type OriginRequestPolicyList struct { _ struct{} `type:"structure"` - // Items (origins) in an origin group. + // Contains the origin request policies in the list. + Items []*OriginRequestPolicySummary `locationNameList:"OriginRequestPolicySummary" type:"list"` + + // The maximum number of origin request policies requested. // - // Items is a required field - Items []*OriginGroupMember `locationNameList:"OriginGroupMember" min:"2" type:"list" required:"true"` + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` - // The number of origins in an origin group. + // If there are more items in the list than are in this response, this element + // is present. It contains the value that you should use in the Marker field + // of a subsequent request to continue listing origin request policies where + // you left off. + NextMarker *string `type:"string"` + + // The total number of origin request policies returned in the response. // // Quantity is a required field Quantity *int64 `type:"integer" required:"true"` } // String returns the string representation -func (s OriginGroupMembers) String() string { +func (s OriginRequestPolicyList) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginGroupMembers) GoString() string { +func (s OriginRequestPolicyList) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *OriginGroupMembers) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginGroupMembers"} - if s.Items == nil { - invalidParams.Add(request.NewErrParamRequired("Items")) - } - if s.Items != nil && len(s.Items) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Items", 2)) - } - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) - } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } - } - } +// SetItems sets the Items field's value. +func (s *OriginRequestPolicyList) SetItems(v []*OriginRequestPolicySummary) *OriginRequestPolicyList { + s.Items = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMaxItems sets the MaxItems field's value. +func (s *OriginRequestPolicyList) SetMaxItems(v int64) *OriginRequestPolicyList { + s.MaxItems = &v + return s } -// SetItems sets the Items field's value. -func (s *OriginGroupMembers) SetItems(v []*OriginGroupMember) *OriginGroupMembers { - s.Items = v +// SetNextMarker sets the NextMarker field's value. +func (s *OriginRequestPolicyList) SetNextMarker(v string) *OriginRequestPolicyList { + s.NextMarker = &v return s } // SetQuantity sets the Quantity field's value. -func (s *OriginGroupMembers) SetQuantity(v int64) *OriginGroupMembers { +func (s *OriginRequestPolicyList) SetQuantity(v int64) *OriginRequestPolicyList { s.Quantity = &v return s } -// A complex data type for the origin groups specified for a distribution. -type OriginGroups struct { +// An object that determines whether any URL query strings in viewer requests +// (and if so, which query strings) are included in requests that CloudFront +// sends to the origin. +type OriginRequestPolicyQueryStringsConfig struct { _ struct{} `type:"structure"` - // The items (origin groups) in a distribution. - Items []*OriginGroup `locationNameList:"OriginGroup" type:"list"` - - // The number of origin groups. + // Determines whether any URL query strings in viewer requests are included + // in requests that CloudFront sends to the origin. Valid values are: // - // Quantity is a required field - Quantity *int64 `type:"integer" required:"true"` + // * none – Query strings in viewer requests are not included in requests + // that CloudFront sends to the origin. Even when this field is set to none, + // any query strings that are listed in a CachePolicy are included in origin + // requests. + // + // * whitelist – The query strings in viewer requests that are listed in + // the QueryStringNames type are included in requests that CloudFront sends + // to the origin. + // + // * all – All query strings in viewer requests are included in requests + // that CloudFront sends to the origin. + // + // QueryStringBehavior is a required field + QueryStringBehavior *string `type:"string" required:"true" enum:"OriginRequestPolicyQueryStringBehavior"` + + // Contains a list of the query strings in viewer requests that are included + // in requests that CloudFront sends to the origin. + QueryStrings *QueryStringNames `type:"structure"` } // String returns the string representation -func (s OriginGroups) String() string { +func (s OriginRequestPolicyQueryStringsConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OriginGroups) GoString() string { +func (s OriginRequestPolicyQueryStringsConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *OriginGroups) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "OriginGroups"} - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) - } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } +func (s *OriginRequestPolicyQueryStringsConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OriginRequestPolicyQueryStringsConfig"} + if s.QueryStringBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("QueryStringBehavior")) + } + if s.QueryStrings != nil { + if err := s.QueryStrings.Validate(); err != nil { + invalidParams.AddNested("QueryStrings", err.(request.ErrInvalidParams)) } } @@ -12549,15 +17759,53 @@ func (s *OriginGroups) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *OriginGroups) SetItems(v []*OriginGroup) *OriginGroups { - s.Items = v +// SetQueryStringBehavior sets the QueryStringBehavior field's value. +func (s *OriginRequestPolicyQueryStringsConfig) SetQueryStringBehavior(v string) *OriginRequestPolicyQueryStringsConfig { + s.QueryStringBehavior = &v return s } -// SetQuantity sets the Quantity field's value. -func (s *OriginGroups) SetQuantity(v int64) *OriginGroups { - s.Quantity = &v +// SetQueryStrings sets the QueryStrings field's value. +func (s *OriginRequestPolicyQueryStringsConfig) SetQueryStrings(v *QueryStringNames) *OriginRequestPolicyQueryStringsConfig { + s.QueryStrings = v + return s +} + +// Contains an origin request policy. +type OriginRequestPolicySummary struct { + _ struct{} `type:"structure"` + + // The origin request policy. + // + // OriginRequestPolicy is a required field + OriginRequestPolicy *OriginRequestPolicy `type:"structure" required:"true"` + + // The type of origin request policy, either managed (created by AWS) or custom + // (created in this AWS account). + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"OriginRequestPolicyType"` +} + +// String returns the string representation +func (s OriginRequestPolicySummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OriginRequestPolicySummary) GoString() string { + return s.String() +} + +// SetOriginRequestPolicy sets the OriginRequestPolicy field's value. +func (s *OriginRequestPolicySummary) SetOriginRequestPolicy(v *OriginRequestPolicy) *OriginRequestPolicySummary { + s.OriginRequestPolicy = v + return s +} + +// SetType sets the Type field's value. +func (s *OriginRequestPolicySummary) SetType(v string) *OriginRequestPolicySummary { + s.Type = &v return s } @@ -12651,17 +17899,173 @@ func (s *Origins) Validate() error { if s.Items != nil && len(s.Items) < 1 { invalidParams.Add(request.NewErrParamMinLen("Items", 1)) } - if s.Quantity == nil { - invalidParams.Add(request.NewErrParamRequired("Quantity")) + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *Origins) SetItems(v []*Origin) *Origins { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *Origins) SetQuantity(v int64) *Origins { + s.Quantity = &v + return s +} + +// This object determines the values that CloudFront includes in the cache key. +// These values can include HTTP headers, cookies, and URL query strings. CloudFront +// uses the cache key to find an object in its cache that it can return to the +// viewer. +// +// The headers, cookies, and query strings that are included in the cache key +// are automatically included in requests that CloudFront sends to the origin. +// CloudFront sends a request when it can’t find an object in its cache that +// matches the request’s cache key. If you want to send values to the origin +// but not include them in the cache key, use OriginRequestPolicy. +type ParametersInCacheKeyAndForwardedToOrigin struct { + _ struct{} `type:"structure"` + + // An object that determines whether any cookies in viewer requests (and if + // so, which cookies) are included in the cache key and automatically included + // in requests that CloudFront sends to the origin. + // + // CookiesConfig is a required field + CookiesConfig *CachePolicyCookiesConfig `type:"structure" required:"true"` + + // A flag that can affect whether the Accept-Encoding HTTP header is included + // in the cache key and included in requests that CloudFront sends to the origin. + // + // This field is related to the EnableAcceptEncodingGzip field. If one or both + // of these fields is true and the viewer request includes the Accept-Encoding + // header, then CloudFront does the following: + // + // * Normalizes the value of the viewer’s Accept-Encoding header + // + // * Includes the normalized header in the cache key + // + // * Includes the normalized header in the request to the origin, if a request + // is necessary + // + // For more information, see Cache compressed objects (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-policy-compressed-objects) + // in the Amazon CloudFront Developer Guide. + // + // If you set this value to true, and this cache behavior also has an origin + // request policy attached, do not include the Accept-Encoding header in the + // origin request policy. CloudFront always includes the Accept-Encoding header + // in origin requests when the value of this field is true, so including this + // header in an origin request policy has no effect. + // + // If both of these fields are false, then CloudFront treats the Accept-Encoding + // header the same as any other HTTP header in the viewer request. By default, + // it’s not included in the cache key and it’s not included in origin requests. + // In this case, you can manually add Accept-Encoding to the headers whitelist + // like any other HTTP header. + EnableAcceptEncodingBrotli *bool `type:"boolean"` + + // A flag that can affect whether the Accept-Encoding HTTP header is included + // in the cache key and included in requests that CloudFront sends to the origin. + // + // This field is related to the EnableAcceptEncodingBrotli field. If one or + // both of these fields is true and the viewer request includes the Accept-Encoding + // header, then CloudFront does the following: + // + // * Normalizes the value of the viewer’s Accept-Encoding header + // + // * Includes the normalized header in the cache key + // + // * Includes the normalized header in the request to the origin, if a request + // is necessary + // + // For more information, see Cache compressed objects (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-policy-compressed-objects) + // in the Amazon CloudFront Developer Guide. + // + // If you set this value to true, and this cache behavior also has an origin + // request policy attached, do not include the Accept-Encoding header in the + // origin request policy. CloudFront always includes the Accept-Encoding header + // in origin requests when the value of this field is true, so including this + // header in an origin request policy has no effect. + // + // If both of these fields are false, then CloudFront treats the Accept-Encoding + // header the same as any other HTTP header in the viewer request. By default, + // it’s not included in the cache key and it’s not included in origin requests. + // In this case, you can manually add Accept-Encoding to the headers whitelist + // like any other HTTP header. + // + // EnableAcceptEncodingGzip is a required field + EnableAcceptEncodingGzip *bool `type:"boolean" required:"true"` + + // An object that determines whether any HTTP headers (and if so, which headers) + // are included in the cache key and automatically included in requests that + // CloudFront sends to the origin. + // + // HeadersConfig is a required field + HeadersConfig *CachePolicyHeadersConfig `type:"structure" required:"true"` + + // An object that determines whether any URL query strings in viewer requests + // (and if so, which query strings) are included in the cache key and automatically + // included in requests that CloudFront sends to the origin. + // + // QueryStringsConfig is a required field + QueryStringsConfig *CachePolicyQueryStringsConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ParametersInCacheKeyAndForwardedToOrigin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParametersInCacheKeyAndForwardedToOrigin) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParametersInCacheKeyAndForwardedToOrigin) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParametersInCacheKeyAndForwardedToOrigin"} + if s.CookiesConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CookiesConfig")) + } + if s.EnableAcceptEncodingGzip == nil { + invalidParams.Add(request.NewErrParamRequired("EnableAcceptEncodingGzip")) + } + if s.HeadersConfig == nil { + invalidParams.Add(request.NewErrParamRequired("HeadersConfig")) + } + if s.QueryStringsConfig == nil { + invalidParams.Add(request.NewErrParamRequired("QueryStringsConfig")) + } + if s.CookiesConfig != nil { + if err := s.CookiesConfig.Validate(); err != nil { + invalidParams.AddNested("CookiesConfig", err.(request.ErrInvalidParams)) + } } - if s.Items != nil { - for i, v := range s.Items { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) - } + if s.HeadersConfig != nil { + if err := s.HeadersConfig.Validate(); err != nil { + invalidParams.AddNested("HeadersConfig", err.(request.ErrInvalidParams)) + } + } + if s.QueryStringsConfig != nil { + if err := s.QueryStringsConfig.Validate(); err != nil { + invalidParams.AddNested("QueryStringsConfig", err.(request.ErrInvalidParams)) } } @@ -12671,15 +18075,33 @@ func (s *Origins) Validate() error { return nil } -// SetItems sets the Items field's value. -func (s *Origins) SetItems(v []*Origin) *Origins { - s.Items = v +// SetCookiesConfig sets the CookiesConfig field's value. +func (s *ParametersInCacheKeyAndForwardedToOrigin) SetCookiesConfig(v *CachePolicyCookiesConfig) *ParametersInCacheKeyAndForwardedToOrigin { + s.CookiesConfig = v return s } -// SetQuantity sets the Quantity field's value. -func (s *Origins) SetQuantity(v int64) *Origins { - s.Quantity = &v +// SetEnableAcceptEncodingBrotli sets the EnableAcceptEncodingBrotli field's value. +func (s *ParametersInCacheKeyAndForwardedToOrigin) SetEnableAcceptEncodingBrotli(v bool) *ParametersInCacheKeyAndForwardedToOrigin { + s.EnableAcceptEncodingBrotli = &v + return s +} + +// SetEnableAcceptEncodingGzip sets the EnableAcceptEncodingGzip field's value. +func (s *ParametersInCacheKeyAndForwardedToOrigin) SetEnableAcceptEncodingGzip(v bool) *ParametersInCacheKeyAndForwardedToOrigin { + s.EnableAcceptEncodingGzip = &v + return s +} + +// SetHeadersConfig sets the HeadersConfig field's value. +func (s *ParametersInCacheKeyAndForwardedToOrigin) SetHeadersConfig(v *CachePolicyHeadersConfig) *ParametersInCacheKeyAndForwardedToOrigin { + s.HeadersConfig = v + return s +} + +// SetQueryStringsConfig sets the QueryStringsConfig field's value. +func (s *ParametersInCacheKeyAndForwardedToOrigin) SetQueryStringsConfig(v *CachePolicyQueryStringsConfig) *ParametersInCacheKeyAndForwardedToOrigin { + s.QueryStringsConfig = v return s } @@ -13158,6 +18580,15 @@ func (s *QueryArgProfiles) SetQuantity(v int64) *QueryArgProfiles { return s } +// This field is deprecated. We recommend that you use a cache policy or an +// origin request policy instead of this field. +// +// If you want to include query strings in the cache key, use QueryStringsConfig +// in a cache policy. See CachePolicy. +// +// If you want to send query strings to the origin but not include them in the +// cache key, use QueryStringsConfig in an origin request policy. See OriginRequestPolicy. +// // A complex type that contains information about the query string parameters // that you want CloudFront to use for caching for a cache behavior. type QueryStringCacheKeys struct { @@ -13209,6 +18640,245 @@ func (s *QueryStringCacheKeys) SetQuantity(v int64) *QueryStringCacheKeys { return s } +// Contains a list of query string names. +type QueryStringNames struct { + _ struct{} `type:"structure"` + + // A list of query string names. + Items []*string `locationNameList:"Name" type:"list"` + + // The number of query string names in the Items list. + // + // Quantity is a required field + Quantity *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s QueryStringNames) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryStringNames) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryStringNames) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryStringNames"} + if s.Quantity == nil { + invalidParams.Add(request.NewErrParamRequired("Quantity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetItems sets the Items field's value. +func (s *QueryStringNames) SetItems(v []*string) *QueryStringNames { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *QueryStringNames) SetQuantity(v int64) *QueryStringNames { + s.Quantity = &v + return s +} + +// A real-time log configuration. +type RealtimeLogConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of this real-time log configuration. + // + // ARN is a required field + ARN *string `type:"string" required:"true"` + + // Contains information about the Amazon Kinesis data stream where you are sending + // real-time log data for this real-time log configuration. + // + // EndPoints is a required field + EndPoints []*EndPoint `type:"list" required:"true"` + + // A list of fields that are included in each real-time log record. In an API + // response, the fields are provided in the same order in which they are sent + // to the Amazon Kinesis data stream. + // + // For more information about fields, see Real-time log configuration fields + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html#understand-real-time-log-config-fields) + // in the Amazon CloudFront Developer Guide. + // + // Fields is a required field + Fields []*string `locationNameList:"Field" type:"list" required:"true"` + + // The unique name of this real-time log configuration. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The sampling rate for this real-time log configuration. The sampling rate + // determines the percentage of viewer requests that are represented in the + // real-time log data. The sampling rate is an integer between 1 and 100, inclusive. + // + // SamplingRate is a required field + SamplingRate *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s RealtimeLogConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RealtimeLogConfig) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *RealtimeLogConfig) SetARN(v string) *RealtimeLogConfig { + s.ARN = &v + return s +} + +// SetEndPoints sets the EndPoints field's value. +func (s *RealtimeLogConfig) SetEndPoints(v []*EndPoint) *RealtimeLogConfig { + s.EndPoints = v + return s +} + +// SetFields sets the Fields field's value. +func (s *RealtimeLogConfig) SetFields(v []*string) *RealtimeLogConfig { + s.Fields = v + return s +} + +// SetName sets the Name field's value. +func (s *RealtimeLogConfig) SetName(v string) *RealtimeLogConfig { + s.Name = &v + return s +} + +// SetSamplingRate sets the SamplingRate field's value. +func (s *RealtimeLogConfig) SetSamplingRate(v int64) *RealtimeLogConfig { + s.SamplingRate = &v + return s +} + +// A list of real-time log configurations. +type RealtimeLogConfigs struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more real-time log configurations + // than are contained in this list. + // + // IsTruncated is a required field + IsTruncated *bool `type:"boolean" required:"true"` + + // Contains the list of real-time log configurations. + Items []*RealtimeLogConfig `type:"list"` + + // This parameter indicates where this list of real-time log configurations + // begins. This list includes real-time log configurations that occur after + // the marker. + // + // Marker is a required field + Marker *string `type:"string" required:"true"` + + // The maximum number of real-time log configurations requested. + // + // MaxItems is a required field + MaxItems *int64 `type:"integer" required:"true"` + + // If there are more items in the list than are in this response, this element + // is present. It contains the value that you should use in the Marker field + // of a subsequent request to continue listing real-time log configurations + // where you left off. + NextMarker *string `type:"string"` +} + +// String returns the string representation +func (s RealtimeLogConfigs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RealtimeLogConfigs) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *RealtimeLogConfigs) SetIsTruncated(v bool) *RealtimeLogConfigs { + s.IsTruncated = &v + return s +} + +// SetItems sets the Items field's value. +func (s *RealtimeLogConfigs) SetItems(v []*RealtimeLogConfig) *RealtimeLogConfigs { + s.Items = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *RealtimeLogConfigs) SetMarker(v string) *RealtimeLogConfigs { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *RealtimeLogConfigs) SetMaxItems(v int64) *RealtimeLogConfigs { + s.MaxItems = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *RealtimeLogConfigs) SetNextMarker(v string) *RealtimeLogConfigs { + s.NextMarker = &v + return s +} + +// A subscription configuration for additional CloudWatch metrics. +type RealtimeMetricsSubscriptionConfig struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether additional CloudWatch metrics are enabled for + // a given CloudFront distribution. + // + // RealtimeMetricsSubscriptionStatus is a required field + RealtimeMetricsSubscriptionStatus *string `type:"string" required:"true" enum:"RealtimeMetricsSubscriptionStatus"` +} + +// String returns the string representation +func (s RealtimeMetricsSubscriptionConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RealtimeMetricsSubscriptionConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RealtimeMetricsSubscriptionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RealtimeMetricsSubscriptionConfig"} + if s.RealtimeMetricsSubscriptionStatus == nil { + invalidParams.Add(request.NewErrParamRequired("RealtimeMetricsSubscriptionStatus")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRealtimeMetricsSubscriptionStatus sets the RealtimeMetricsSubscriptionStatus field's value. +func (s *RealtimeMetricsSubscriptionConfig) SetRealtimeMetricsSubscriptionStatus(v string) *RealtimeMetricsSubscriptionConfig { + s.RealtimeMetricsSubscriptionStatus = &v + return s +} + // A complex type that identifies ways in which you want to restrict distribution // of your content. type Restrictions struct { @@ -13326,7 +18996,8 @@ func (s *S3Origin) SetOriginAccessIdentity(v string) *S3Origin { } // A complex type that contains information about the Amazon S3 origin. If the -// origin is a custom origin, use the CustomOriginConfig element instead. +// origin is a custom origin or an S3 bucket that is configured as a website +// endpoint, use the CustomOriginConfig element instead. type S3OriginConfig struct { _ struct{} `type:"structure"` @@ -14222,7 +19893,7 @@ type TagResourceInput struct { // A complex type that contains zero or more Tag elements. // // Tags is a required field - Tags *Tags `locationName:"Tags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + Tags *Tags `locationName:"Tags" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` } // String returns the string representation @@ -14420,7 +20091,7 @@ type UntagResourceInput struct { // A complex type that contains zero or more Tag key elements. // // TagKeys is a required field - TagKeys *TagKeys `locationName:"TagKeys" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + TagKeys *TagKeys `locationName:"TagKeys" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` } // String returns the string representation @@ -14475,6 +20146,110 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +type UpdateCachePolicyInput struct { + _ struct{} `locationName:"UpdateCachePolicyRequest" type:"structure" payload:"CachePolicyConfig"` + + // A cache policy configuration. + // + // CachePolicyConfig is a required field + CachePolicyConfig *CachePolicyConfig `locationName:"CachePolicyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` + + // The unique identifier for the cache policy that you are updating. The identifier + // is returned in a cache behavior’s CachePolicyId field in the response to + // GetDistributionConfig. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version of the cache policy that you are updating. The version is returned + // in the cache policy’s ETag field in the response to GetCachePolicyConfig. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` +} + +// String returns the string representation +func (s UpdateCachePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCachePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateCachePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateCachePolicyInput"} + if s.CachePolicyConfig == nil { + invalidParams.Add(request.NewErrParamRequired("CachePolicyConfig")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.CachePolicyConfig != nil { + if err := s.CachePolicyConfig.Validate(); err != nil { + invalidParams.AddNested("CachePolicyConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCachePolicyConfig sets the CachePolicyConfig field's value. +func (s *UpdateCachePolicyInput) SetCachePolicyConfig(v *CachePolicyConfig) *UpdateCachePolicyInput { + s.CachePolicyConfig = v + return s +} + +// SetId sets the Id field's value. +func (s *UpdateCachePolicyInput) SetId(v string) *UpdateCachePolicyInput { + s.Id = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *UpdateCachePolicyInput) SetIfMatch(v string) *UpdateCachePolicyInput { + s.IfMatch = &v + return s +} + +type UpdateCachePolicyOutput struct { + _ struct{} `type:"structure" payload:"CachePolicy"` + + // A cache policy. + CachePolicy *CachePolicy `type:"structure"` + + // The current version of the cache policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` +} + +// String returns the string representation +func (s UpdateCachePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCachePolicyOutput) GoString() string { + return s.String() +} + +// SetCachePolicy sets the CachePolicy field's value. +func (s *UpdateCachePolicyOutput) SetCachePolicy(v *CachePolicy) *UpdateCachePolicyOutput { + s.CachePolicy = v + return s +} + +// SetETag sets the ETag field's value. +func (s *UpdateCachePolicyOutput) SetETag(v string) *UpdateCachePolicyOutput { + s.ETag = &v + return s +} + // The request to update an origin access identity. type UpdateCloudFrontOriginAccessIdentityInput struct { _ struct{} `locationName:"UpdateCloudFrontOriginAccessIdentityRequest" type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` @@ -14482,7 +20257,7 @@ type UpdateCloudFrontOriginAccessIdentityInput struct { // The identity's configuration information. // // CloudFrontOriginAccessIdentityConfig is a required field - CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + CloudFrontOriginAccessIdentityConfig *OriginAccessIdentityConfig `locationName:"CloudFrontOriginAccessIdentityConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` // The identity's id. // @@ -14586,7 +20361,7 @@ type UpdateDistributionInput struct { // The distribution's configuration information. // // DistributionConfig is a required field - DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + DistributionConfig *DistributionConfig `locationName:"DistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` // The distribution's id. // @@ -14689,7 +20464,7 @@ type UpdateFieldLevelEncryptionConfigInput struct { // Request to update a field-level encryption configuration. // // FieldLevelEncryptionConfig is a required field - FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + FieldLevelEncryptionConfig *FieldLevelEncryptionConfig `locationName:"FieldLevelEncryptionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` // The ID of the configuration you want to update. // @@ -14792,7 +20567,7 @@ type UpdateFieldLevelEncryptionProfileInput struct { // Request to update a field-level encryption profile. // // FieldLevelEncryptionProfileConfig is a required field - FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + FieldLevelEncryptionProfileConfig *FieldLevelEncryptionProfileConfig `locationName:"FieldLevelEncryptionProfileConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` // The ID of the field-level encryption profile request. // @@ -14888,6 +20663,111 @@ func (s *UpdateFieldLevelEncryptionProfileOutput) SetFieldLevelEncryptionProfile return s } +type UpdateOriginRequestPolicyInput struct { + _ struct{} `locationName:"UpdateOriginRequestPolicyRequest" type:"structure" payload:"OriginRequestPolicyConfig"` + + // The unique identifier for the origin request policy that you are updating. + // The identifier is returned in a cache behavior’s OriginRequestPolicyId + // field in the response to GetDistributionConfig. + // + // Id is a required field + Id *string `location:"uri" locationName:"Id" type:"string" required:"true"` + + // The version of the origin request policy that you are updating. The version + // is returned in the origin request policy’s ETag field in the response to + // GetOriginRequestPolicyConfig. + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // An origin request policy configuration. + // + // OriginRequestPolicyConfig is a required field + OriginRequestPolicyConfig *OriginRequestPolicyConfig `locationName:"OriginRequestPolicyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` +} + +// String returns the string representation +func (s UpdateOriginRequestPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOriginRequestPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOriginRequestPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateOriginRequestPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.OriginRequestPolicyConfig == nil { + invalidParams.Add(request.NewErrParamRequired("OriginRequestPolicyConfig")) + } + if s.OriginRequestPolicyConfig != nil { + if err := s.OriginRequestPolicyConfig.Validate(); err != nil { + invalidParams.AddNested("OriginRequestPolicyConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *UpdateOriginRequestPolicyInput) SetId(v string) *UpdateOriginRequestPolicyInput { + s.Id = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *UpdateOriginRequestPolicyInput) SetIfMatch(v string) *UpdateOriginRequestPolicyInput { + s.IfMatch = &v + return s +} + +// SetOriginRequestPolicyConfig sets the OriginRequestPolicyConfig field's value. +func (s *UpdateOriginRequestPolicyInput) SetOriginRequestPolicyConfig(v *OriginRequestPolicyConfig) *UpdateOriginRequestPolicyInput { + s.OriginRequestPolicyConfig = v + return s +} + +type UpdateOriginRequestPolicyOutput struct { + _ struct{} `type:"structure" payload:"OriginRequestPolicy"` + + // The current version of the origin request policy. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // An origin request policy. + OriginRequestPolicy *OriginRequestPolicy `type:"structure"` +} + +// String returns the string representation +func (s UpdateOriginRequestPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOriginRequestPolicyOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *UpdateOriginRequestPolicyOutput) SetETag(v string) *UpdateOriginRequestPolicyOutput { + s.ETag = &v + return s +} + +// SetOriginRequestPolicy sets the OriginRequestPolicy field's value. +func (s *UpdateOriginRequestPolicyOutput) SetOriginRequestPolicy(v *OriginRequestPolicy) *UpdateOriginRequestPolicyOutput { + s.OriginRequestPolicy = v + return s +} + type UpdatePublicKeyInput struct { _ struct{} `locationName:"UpdatePublicKeyRequest" type:"structure" payload:"PublicKeyConfig"` @@ -14903,7 +20783,7 @@ type UpdatePublicKeyInput struct { // Request to update public key information. // // PublicKeyConfig is a required field - PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + PublicKeyConfig *PublicKeyConfig `locationName:"PublicKeyConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` } // String returns the string representation @@ -14990,6 +20870,115 @@ func (s *UpdatePublicKeyOutput) SetPublicKey(v *PublicKey) *UpdatePublicKeyOutpu return s } +type UpdateRealtimeLogConfigInput struct { + _ struct{} `locationName:"UpdateRealtimeLogConfigRequest" type:"structure" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` + + // The Amazon Resource Name (ARN) for this real-time log configuration. + ARN *string `type:"string"` + + // Contains information about the Amazon Kinesis data stream where you are sending + // real-time log data. + EndPoints []*EndPoint `type:"list"` + + // A list of fields to include in each real-time log record. + // + // For more information about fields, see Real-time log configuration fields + // (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/real-time-logs.html#understand-real-time-log-config-fields) + // in the Amazon CloudFront Developer Guide. + Fields []*string `locationNameList:"Field" type:"list"` + + // The name for this real-time log configuration. + Name *string `type:"string"` + + // The sampling rate for this real-time log configuration. The sampling rate + // determines the percentage of viewer requests that are represented in the + // real-time log data. You must provide an integer between 1 and 100, inclusive. + SamplingRate *int64 `type:"long"` +} + +// String returns the string representation +func (s UpdateRealtimeLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRealtimeLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRealtimeLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRealtimeLogConfigInput"} + if s.EndPoints != nil { + for i, v := range s.EndPoints { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EndPoints", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetARN sets the ARN field's value. +func (s *UpdateRealtimeLogConfigInput) SetARN(v string) *UpdateRealtimeLogConfigInput { + s.ARN = &v + return s +} + +// SetEndPoints sets the EndPoints field's value. +func (s *UpdateRealtimeLogConfigInput) SetEndPoints(v []*EndPoint) *UpdateRealtimeLogConfigInput { + s.EndPoints = v + return s +} + +// SetFields sets the Fields field's value. +func (s *UpdateRealtimeLogConfigInput) SetFields(v []*string) *UpdateRealtimeLogConfigInput { + s.Fields = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateRealtimeLogConfigInput) SetName(v string) *UpdateRealtimeLogConfigInput { + s.Name = &v + return s +} + +// SetSamplingRate sets the SamplingRate field's value. +func (s *UpdateRealtimeLogConfigInput) SetSamplingRate(v int64) *UpdateRealtimeLogConfigInput { + s.SamplingRate = &v + return s +} + +type UpdateRealtimeLogConfigOutput struct { + _ struct{} `type:"structure"` + + // A real-time log configuration. + RealtimeLogConfig *RealtimeLogConfig `type:"structure"` +} + +// String returns the string representation +func (s UpdateRealtimeLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRealtimeLogConfigOutput) GoString() string { + return s.String() +} + +// SetRealtimeLogConfig sets the RealtimeLogConfig field's value. +func (s *UpdateRealtimeLogConfigOutput) SetRealtimeLogConfig(v *RealtimeLogConfig) *UpdateRealtimeLogConfigOutput { + s.RealtimeLogConfig = v + return s +} + // The request to update a streaming distribution. type UpdateStreamingDistributionInput struct { _ struct{} `locationName:"UpdateStreamingDistributionRequest" type:"structure" payload:"StreamingDistributionConfig"` @@ -15006,7 +20995,7 @@ type UpdateStreamingDistributionInput struct { // The streaming distribution's configuration information. // // StreamingDistributionConfig is a required field - StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2019-03-26/"` + StreamingDistributionConfig *StreamingDistributionConfig `locationName:"StreamingDistributionConfig" type:"structure" required:"true" xmlURI:"http://cloudfront.amazonaws.com/doc/2020-05-31/"` } // String returns the string representation @@ -15109,10 +21098,10 @@ func (s *UpdateStreamingDistributionOutput) SetStreamingDistribution(v *Streamin // viewers that support server name indication (SNI) (https://en.wikipedia.org/wiki/Server_Name_Indication) // (recommended), or all viewers including those that don’t support SNI. // To accept HTTPS connections from only viewers that support SNI, set SSLSupportMethod -// to sni-only. This is recommended. Most browsers and clients released after -// 2010 support SNI. To accept HTTPS connections from all viewers, including -// those that don’t support SNI, set SSLSupportMethod to vip. This is not -// recommended, and results in additional monthly charges from CloudFront. +// to sni-only. This is recommended. Most browsers and clients support SNI. +// To accept HTTPS connections from all viewers, including those that don’t +// support SNI, set SSLSupportMethod to vip. This is not recommended, and +// results in additional monthly charges from CloudFront. // // * The minimum SSL/TLS protocol version that the distribution can use to // communicate with viewers. To specify a minimum version, choose a value @@ -15207,9 +21196,6 @@ type ViewerCertificate struct { // // On the CloudFront console, this setting is called Security Policy. // - // We recommend that you specify TLSv1.2_2018 unless your viewers are using - // browsers or devices that don’t support TLSv1.2. - // // When you’re using SNI only (you set SSLSupportMethod to sni-only), you // must specify TLSv1 or higher. // @@ -15223,13 +21209,17 @@ type ViewerCertificate struct { // // * sni-only – The distribution accepts HTTPS connections from only viewers // that support server name indication (SNI) (https://en.wikipedia.org/wiki/Server_Name_Indication). - // This is recommended. Most browsers and clients released after 2010 support - // SNI. + // This is recommended. Most browsers and clients support SNI. // // * vip – The distribution accepts HTTPS connections from all viewers // including those that don’t support SNI. This is not recommended, and // results in additional monthly charges from CloudFront. // + // * static-ip - Do not specify this value unless your distribution has been + // enabled for this feature by the CloudFront team. If you have a use case + // that requires static IP addresses for a distribution, contact CloudFront + // through the AWS Support Center (https://console.aws.amazon.com/support/home). + // // If the distribution uses the CloudFront domain name such as d111111abcdef8.cloudfront.net, // don’t set a value for this field. SSLSupportMethod *string `type:"string" enum:"SSLSupportMethod"` @@ -15287,6 +21277,86 @@ func (s *ViewerCertificate) SetSSLSupportMethod(v string) *ViewerCertificate { return s } +const ( + // CachePolicyCookieBehaviorNone is a CachePolicyCookieBehavior enum value + CachePolicyCookieBehaviorNone = "none" + + // CachePolicyCookieBehaviorWhitelist is a CachePolicyCookieBehavior enum value + CachePolicyCookieBehaviorWhitelist = "whitelist" + + // CachePolicyCookieBehaviorAllExcept is a CachePolicyCookieBehavior enum value + CachePolicyCookieBehaviorAllExcept = "allExcept" + + // CachePolicyCookieBehaviorAll is a CachePolicyCookieBehavior enum value + CachePolicyCookieBehaviorAll = "all" +) + +// CachePolicyCookieBehavior_Values returns all elements of the CachePolicyCookieBehavior enum +func CachePolicyCookieBehavior_Values() []string { + return []string{ + CachePolicyCookieBehaviorNone, + CachePolicyCookieBehaviorWhitelist, + CachePolicyCookieBehaviorAllExcept, + CachePolicyCookieBehaviorAll, + } +} + +const ( + // CachePolicyHeaderBehaviorNone is a CachePolicyHeaderBehavior enum value + CachePolicyHeaderBehaviorNone = "none" + + // CachePolicyHeaderBehaviorWhitelist is a CachePolicyHeaderBehavior enum value + CachePolicyHeaderBehaviorWhitelist = "whitelist" +) + +// CachePolicyHeaderBehavior_Values returns all elements of the CachePolicyHeaderBehavior enum +func CachePolicyHeaderBehavior_Values() []string { + return []string{ + CachePolicyHeaderBehaviorNone, + CachePolicyHeaderBehaviorWhitelist, + } +} + +const ( + // CachePolicyQueryStringBehaviorNone is a CachePolicyQueryStringBehavior enum value + CachePolicyQueryStringBehaviorNone = "none" + + // CachePolicyQueryStringBehaviorWhitelist is a CachePolicyQueryStringBehavior enum value + CachePolicyQueryStringBehaviorWhitelist = "whitelist" + + // CachePolicyQueryStringBehaviorAllExcept is a CachePolicyQueryStringBehavior enum value + CachePolicyQueryStringBehaviorAllExcept = "allExcept" + + // CachePolicyQueryStringBehaviorAll is a CachePolicyQueryStringBehavior enum value + CachePolicyQueryStringBehaviorAll = "all" +) + +// CachePolicyQueryStringBehavior_Values returns all elements of the CachePolicyQueryStringBehavior enum +func CachePolicyQueryStringBehavior_Values() []string { + return []string{ + CachePolicyQueryStringBehaviorNone, + CachePolicyQueryStringBehaviorWhitelist, + CachePolicyQueryStringBehaviorAllExcept, + CachePolicyQueryStringBehaviorAll, + } +} + +const ( + // CachePolicyTypeManaged is a CachePolicyType enum value + CachePolicyTypeManaged = "managed" + + // CachePolicyTypeCustom is a CachePolicyType enum value + CachePolicyTypeCustom = "custom" +) + +// CachePolicyType_Values returns all elements of the CachePolicyType enum +func CachePolicyType_Values() []string { + return []string{ + CachePolicyTypeManaged, + CachePolicyTypeCustom, + } +} + const ( // CertificateSourceCloudfront is a CertificateSource enum value CertificateSourceCloudfront = "cloudfront" @@ -15298,6 +21368,15 @@ const ( CertificateSourceAcm = "acm" ) +// CertificateSource_Values returns all elements of the CertificateSource enum +func CertificateSource_Values() []string { + return []string{ + CertificateSourceCloudfront, + CertificateSourceIam, + CertificateSourceAcm, + } +} + const ( // EventTypeViewerRequest is a EventType enum value EventTypeViewerRequest = "viewer-request" @@ -15312,11 +21391,28 @@ const ( EventTypeOriginResponse = "origin-response" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeViewerRequest, + EventTypeViewerResponse, + EventTypeOriginRequest, + EventTypeOriginResponse, + } +} + const ( // FormatUrlencoded is a Format enum value FormatUrlencoded = "URLEncoded" ) +// Format_Values returns all elements of the Format enum +func Format_Values() []string { + return []string{ + FormatUrlencoded, + } +} + const ( // GeoRestrictionTypeBlacklist is a GeoRestrictionType enum value GeoRestrictionTypeBlacklist = "blacklist" @@ -15328,6 +21424,15 @@ const ( GeoRestrictionTypeNone = "none" ) +// GeoRestrictionType_Values returns all elements of the GeoRestrictionType enum +func GeoRestrictionType_Values() []string { + return []string{ + GeoRestrictionTypeBlacklist, + GeoRestrictionTypeWhitelist, + GeoRestrictionTypeNone, + } +} + const ( // HttpVersionHttp11 is a HttpVersion enum value HttpVersionHttp11 = "http1.1" @@ -15336,6 +21441,14 @@ const ( HttpVersionHttp2 = "http2" ) +// HttpVersion_Values returns all elements of the HttpVersion enum +func HttpVersion_Values() []string { + return []string{ + HttpVersionHttp11, + HttpVersionHttp2, + } +} + const ( // ICPRecordalStatusApproved is a ICPRecordalStatus enum value ICPRecordalStatusApproved = "APPROVED" @@ -15347,6 +21460,15 @@ const ( ICPRecordalStatusPending = "PENDING" ) +// ICPRecordalStatus_Values returns all elements of the ICPRecordalStatus enum +func ICPRecordalStatus_Values() []string { + return []string{ + ICPRecordalStatusApproved, + ICPRecordalStatusSuspended, + ICPRecordalStatusPending, + } +} + const ( // ItemSelectionNone is a ItemSelection enum value ItemSelectionNone = "none" @@ -15358,6 +21480,15 @@ const ( ItemSelectionAll = "all" ) +// ItemSelection_Values returns all elements of the ItemSelection enum +func ItemSelection_Values() []string { + return []string{ + ItemSelectionNone, + ItemSelectionWhitelist, + ItemSelectionAll, + } +} + const ( // MethodGet is a Method enum value MethodGet = "GET" @@ -15381,6 +21512,19 @@ const ( MethodDelete = "DELETE" ) +// Method_Values returns all elements of the Method enum +func Method_Values() []string { + return []string{ + MethodGet, + MethodHead, + MethodPost, + MethodPut, + MethodPatch, + MethodOptions, + MethodDelete, + } +} + const ( // MinimumProtocolVersionSslv3 is a MinimumProtocolVersion enum value MinimumProtocolVersionSslv3 = "SSLv3" @@ -15396,8 +21540,23 @@ const ( // MinimumProtocolVersionTlsv122018 is a MinimumProtocolVersion enum value MinimumProtocolVersionTlsv122018 = "TLSv1.2_2018" + + // MinimumProtocolVersionTlsv122019 is a MinimumProtocolVersion enum value + MinimumProtocolVersionTlsv122019 = "TLSv1.2_2019" ) +// MinimumProtocolVersion_Values returns all elements of the MinimumProtocolVersion enum +func MinimumProtocolVersion_Values() []string { + return []string{ + MinimumProtocolVersionSslv3, + MinimumProtocolVersionTlsv1, + MinimumProtocolVersionTlsv12016, + MinimumProtocolVersionTlsv112016, + MinimumProtocolVersionTlsv122018, + MinimumProtocolVersionTlsv122019, + } +} + const ( // OriginProtocolPolicyHttpOnly is a OriginProtocolPolicy enum value OriginProtocolPolicyHttpOnly = "http-only" @@ -15409,6 +21568,95 @@ const ( OriginProtocolPolicyHttpsOnly = "https-only" ) +// OriginProtocolPolicy_Values returns all elements of the OriginProtocolPolicy enum +func OriginProtocolPolicy_Values() []string { + return []string{ + OriginProtocolPolicyHttpOnly, + OriginProtocolPolicyMatchViewer, + OriginProtocolPolicyHttpsOnly, + } +} + +const ( + // OriginRequestPolicyCookieBehaviorNone is a OriginRequestPolicyCookieBehavior enum value + OriginRequestPolicyCookieBehaviorNone = "none" + + // OriginRequestPolicyCookieBehaviorWhitelist is a OriginRequestPolicyCookieBehavior enum value + OriginRequestPolicyCookieBehaviorWhitelist = "whitelist" + + // OriginRequestPolicyCookieBehaviorAll is a OriginRequestPolicyCookieBehavior enum value + OriginRequestPolicyCookieBehaviorAll = "all" +) + +// OriginRequestPolicyCookieBehavior_Values returns all elements of the OriginRequestPolicyCookieBehavior enum +func OriginRequestPolicyCookieBehavior_Values() []string { + return []string{ + OriginRequestPolicyCookieBehaviorNone, + OriginRequestPolicyCookieBehaviorWhitelist, + OriginRequestPolicyCookieBehaviorAll, + } +} + +const ( + // OriginRequestPolicyHeaderBehaviorNone is a OriginRequestPolicyHeaderBehavior enum value + OriginRequestPolicyHeaderBehaviorNone = "none" + + // OriginRequestPolicyHeaderBehaviorWhitelist is a OriginRequestPolicyHeaderBehavior enum value + OriginRequestPolicyHeaderBehaviorWhitelist = "whitelist" + + // OriginRequestPolicyHeaderBehaviorAllViewer is a OriginRequestPolicyHeaderBehavior enum value + OriginRequestPolicyHeaderBehaviorAllViewer = "allViewer" + + // OriginRequestPolicyHeaderBehaviorAllViewerAndWhitelistCloudFront is a OriginRequestPolicyHeaderBehavior enum value + OriginRequestPolicyHeaderBehaviorAllViewerAndWhitelistCloudFront = "allViewerAndWhitelistCloudFront" +) + +// OriginRequestPolicyHeaderBehavior_Values returns all elements of the OriginRequestPolicyHeaderBehavior enum +func OriginRequestPolicyHeaderBehavior_Values() []string { + return []string{ + OriginRequestPolicyHeaderBehaviorNone, + OriginRequestPolicyHeaderBehaviorWhitelist, + OriginRequestPolicyHeaderBehaviorAllViewer, + OriginRequestPolicyHeaderBehaviorAllViewerAndWhitelistCloudFront, + } +} + +const ( + // OriginRequestPolicyQueryStringBehaviorNone is a OriginRequestPolicyQueryStringBehavior enum value + OriginRequestPolicyQueryStringBehaviorNone = "none" + + // OriginRequestPolicyQueryStringBehaviorWhitelist is a OriginRequestPolicyQueryStringBehavior enum value + OriginRequestPolicyQueryStringBehaviorWhitelist = "whitelist" + + // OriginRequestPolicyQueryStringBehaviorAll is a OriginRequestPolicyQueryStringBehavior enum value + OriginRequestPolicyQueryStringBehaviorAll = "all" +) + +// OriginRequestPolicyQueryStringBehavior_Values returns all elements of the OriginRequestPolicyQueryStringBehavior enum +func OriginRequestPolicyQueryStringBehavior_Values() []string { + return []string{ + OriginRequestPolicyQueryStringBehaviorNone, + OriginRequestPolicyQueryStringBehaviorWhitelist, + OriginRequestPolicyQueryStringBehaviorAll, + } +} + +const ( + // OriginRequestPolicyTypeManaged is a OriginRequestPolicyType enum value + OriginRequestPolicyTypeManaged = "managed" + + // OriginRequestPolicyTypeCustom is a OriginRequestPolicyType enum value + OriginRequestPolicyTypeCustom = "custom" +) + +// OriginRequestPolicyType_Values returns all elements of the OriginRequestPolicyType enum +func OriginRequestPolicyType_Values() []string { + return []string{ + OriginRequestPolicyTypeManaged, + OriginRequestPolicyTypeCustom, + } +} + const ( // PriceClassPriceClass100 is a PriceClass enum value PriceClassPriceClass100 = "PriceClass_100" @@ -15420,14 +21668,51 @@ const ( PriceClassPriceClassAll = "PriceClass_All" ) +// PriceClass_Values returns all elements of the PriceClass enum +func PriceClass_Values() []string { + return []string{ + PriceClassPriceClass100, + PriceClassPriceClass200, + PriceClassPriceClassAll, + } +} + +const ( + // RealtimeMetricsSubscriptionStatusEnabled is a RealtimeMetricsSubscriptionStatus enum value + RealtimeMetricsSubscriptionStatusEnabled = "Enabled" + + // RealtimeMetricsSubscriptionStatusDisabled is a RealtimeMetricsSubscriptionStatus enum value + RealtimeMetricsSubscriptionStatusDisabled = "Disabled" +) + +// RealtimeMetricsSubscriptionStatus_Values returns all elements of the RealtimeMetricsSubscriptionStatus enum +func RealtimeMetricsSubscriptionStatus_Values() []string { + return []string{ + RealtimeMetricsSubscriptionStatusEnabled, + RealtimeMetricsSubscriptionStatusDisabled, + } +} + const ( // SSLSupportMethodSniOnly is a SSLSupportMethod enum value SSLSupportMethodSniOnly = "sni-only" // SSLSupportMethodVip is a SSLSupportMethod enum value SSLSupportMethodVip = "vip" + + // SSLSupportMethodStaticIp is a SSLSupportMethod enum value + SSLSupportMethodStaticIp = "static-ip" ) +// SSLSupportMethod_Values returns all elements of the SSLSupportMethod enum +func SSLSupportMethod_Values() []string { + return []string{ + SSLSupportMethodSniOnly, + SSLSupportMethodVip, + SSLSupportMethodStaticIp, + } +} + const ( // SslProtocolSslv3 is a SslProtocol enum value SslProtocolSslv3 = "SSLv3" @@ -15442,6 +21727,16 @@ const ( SslProtocolTlsv12 = "TLSv1.2" ) +// SslProtocol_Values returns all elements of the SslProtocol enum +func SslProtocol_Values() []string { + return []string{ + SslProtocolSslv3, + SslProtocolTlsv1, + SslProtocolTlsv11, + SslProtocolTlsv12, + } +} + const ( // ViewerProtocolPolicyAllowAll is a ViewerProtocolPolicy enum value ViewerProtocolPolicyAllowAll = "allow-all" @@ -15452,3 +21747,12 @@ const ( // ViewerProtocolPolicyRedirectToHttps is a ViewerProtocolPolicy enum value ViewerProtocolPolicyRedirectToHttps = "redirect-to-https" ) + +// ViewerProtocolPolicy_Values returns all elements of the ViewerProtocolPolicy enum +func ViewerProtocolPolicy_Values() []string { + return []string{ + ViewerProtocolPolicyAllowAll, + ViewerProtocolPolicyHttpsOnly, + ViewerProtocolPolicyRedirectToHttps, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go index fb197f85b..c9f335dd4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/doc.go @@ -8,7 +8,7 @@ // errors. For detailed information about CloudFront features, see the Amazon // CloudFront Developer Guide. // -// See https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2019-03-26 for more information on this service. +// See https://docs.aws.amazon.com/goto/WebAPI/cloudfront-2020-05-31 for more information on this service. // // See cloudfront package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/cloudfront/ diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go index 827ff5f7e..ced65fe86 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/errors.go @@ -22,6 +22,20 @@ const ( // The CNAME specified is already defined for CloudFront. ErrCodeCNAMEAlreadyExists = "CNAMEAlreadyExists" + // ErrCodeCachePolicyAlreadyExists for service response error code + // "CachePolicyAlreadyExists". + // + // A cache policy with this name already exists. You must provide a unique name. + // To modify an existing cache policy, use UpdateCachePolicy. + ErrCodeCachePolicyAlreadyExists = "CachePolicyAlreadyExists" + + // ErrCodeCachePolicyInUse for service response error code + // "CachePolicyInUse". + // + // Cannot delete the cache policy because it is attached to one or more cache + // behaviors. + ErrCodeCachePolicyInUse = "CachePolicyInUse" + // ErrCodeCannotChangeImmutablePublicKeyFields for service response error code // "CannotChangeImmutablePublicKeyFields". // @@ -72,6 +86,12 @@ const ( // The maximum size of a profile for field-level encryption was exceeded. ErrCodeFieldLevelEncryptionProfileSizeExceeded = "FieldLevelEncryptionProfileSizeExceeded" + // ErrCodeIllegalDelete for service response error code + // "IllegalDelete". + // + // You cannot delete a managed policy. + ErrCodeIllegalDelete = "IllegalDelete" + // ErrCodeIllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior for service response error code // "IllegalFieldLevelEncryptionConfigAssociationWithCacheBehavior". // @@ -82,7 +102,7 @@ const ( // ErrCodeIllegalUpdate for service response error code // "IllegalUpdate". // - // Origin and CallerReference cannot be updated. + // The update contains modifications that are not allowed. ErrCodeIllegalUpdate = "IllegalUpdate" // ErrCodeInconsistentQuantities for service response error code @@ -94,7 +114,7 @@ const ( // ErrCodeInvalidArgument for service response error code // "InvalidArgument". // - // The argument is invalid. + // An argument is invalid. ErrCodeInvalidArgument = "InvalidArgument" // ErrCodeInvalidDefaultRootObject for service response error code @@ -133,7 +153,7 @@ const ( // ErrCodeInvalidIfMatchVersion for service response error code // "InvalidIfMatchVersion". // - // The If-Match version is missing or not valid for the distribution. + // The If-Match version is missing or not valid. ErrCodeInvalidIfMatchVersion = "InvalidIfMatchVersion" // ErrCodeInvalidLambdaFunctionAssociation for service response error code @@ -247,6 +267,12 @@ const ( // header is set. ErrCodeMissingBody = "MissingBody" + // ErrCodeNoSuchCachePolicy for service response error code + // "NoSuchCachePolicy". + // + // The cache policy does not exist. + ErrCodeNoSuchCachePolicy = "NoSuchCachePolicy" + // ErrCodeNoSuchCloudFrontOriginAccessIdentity for service response error code // "NoSuchCloudFrontOriginAccessIdentity". // @@ -283,12 +309,24 @@ const ( // No origin exists with the specified Origin Id. ErrCodeNoSuchOrigin = "NoSuchOrigin" + // ErrCodeNoSuchOriginRequestPolicy for service response error code + // "NoSuchOriginRequestPolicy". + // + // The origin request policy does not exist. + ErrCodeNoSuchOriginRequestPolicy = "NoSuchOriginRequestPolicy" + // ErrCodeNoSuchPublicKey for service response error code // "NoSuchPublicKey". // // The specified public key doesn't exist. ErrCodeNoSuchPublicKey = "NoSuchPublicKey" + // ErrCodeNoSuchRealtimeLogConfig for service response error code + // "NoSuchRealtimeLogConfig". + // + // The real-time log configuration does not exist. + ErrCodeNoSuchRealtimeLogConfig = "NoSuchRealtimeLogConfig" + // ErrCodeNoSuchResource for service response error code // "NoSuchResource". // @@ -316,10 +354,24 @@ const ( // The Origin Access Identity specified is already in use. ErrCodeOriginAccessIdentityInUse = "CloudFrontOriginAccessIdentityInUse" + // ErrCodeOriginRequestPolicyAlreadyExists for service response error code + // "OriginRequestPolicyAlreadyExists". + // + // An origin request policy with this name already exists. You must provide + // a unique name. To modify an existing origin request policy, use UpdateOriginRequestPolicy. + ErrCodeOriginRequestPolicyAlreadyExists = "OriginRequestPolicyAlreadyExists" + + // ErrCodeOriginRequestPolicyInUse for service response error code + // "OriginRequestPolicyInUse". + // + // Cannot delete the origin request policy because it is attached to one or + // more cache behaviors. + ErrCodeOriginRequestPolicyInUse = "OriginRequestPolicyInUse" + // ErrCodePreconditionFailed for service response error code // "PreconditionFailed". // - // The precondition given in one or more of the request-header fields evaluated + // The precondition given in one or more of the request header fields evaluated // to false. ErrCodePreconditionFailed = "PreconditionFailed" @@ -341,6 +393,20 @@ const ( // No profile specified for the field-level encryption query argument. ErrCodeQueryArgProfileEmpty = "QueryArgProfileEmpty" + // ErrCodeRealtimeLogConfigAlreadyExists for service response error code + // "RealtimeLogConfigAlreadyExists". + // + // A real-time log configuration with this name already exists. You must provide + // a unique name. To modify an existing real-time log configuration, use UpdateRealtimeLogConfig. + ErrCodeRealtimeLogConfigAlreadyExists = "RealtimeLogConfigAlreadyExists" + + // ErrCodeRealtimeLogConfigInUse for service response error code + // "RealtimeLogConfigInUse". + // + // Cannot delete the real-time log configuration because it is attached to one + // or more cache behaviors. + ErrCodeRealtimeLogConfigInUse = "RealtimeLogConfigInUse" + // ErrCodeStreamingDistributionAlreadyExists for service response error code // "StreamingDistributionAlreadyExists". // @@ -361,6 +427,14 @@ const ( // You cannot create more cache behaviors for the distribution. ErrCodeTooManyCacheBehaviors = "TooManyCacheBehaviors" + // ErrCodeTooManyCachePolicies for service response error code + // "TooManyCachePolicies". + // + // You have reached the maximum number of cache policies for this AWS account. + // For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyCachePolicies = "TooManyCachePolicies" + // ErrCodeTooManyCertificates for service response error code // "TooManyCertificates". // @@ -381,6 +455,22 @@ const ( // per cache behavior. ErrCodeTooManyCookieNamesInWhiteList = "TooManyCookieNamesInWhiteList" + // ErrCodeTooManyCookiesInCachePolicy for service response error code + // "TooManyCookiesInCachePolicy". + // + // The number of cookies in the cache policy exceeds the maximum. For more information, + // see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyCookiesInCachePolicy = "TooManyCookiesInCachePolicy" + + // ErrCodeTooManyCookiesInOriginRequestPolicy for service response error code + // "TooManyCookiesInOriginRequestPolicy". + // + // The number of cookies in the origin request policy exceeds the maximum. For + // more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyCookiesInOriginRequestPolicy = "TooManyCookiesInOriginRequestPolicy" + // ErrCodeTooManyDistributionCNAMEs for service response error code // "TooManyDistributionCNAMEs". // @@ -394,6 +484,14 @@ const ( // allowed. ErrCodeTooManyDistributions = "TooManyDistributions" + // ErrCodeTooManyDistributionsAssociatedToCachePolicy for service response error code + // "TooManyDistributionsAssociatedToCachePolicy". + // + // The maximum number of distributions have been associated with the specified + // cache policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyDistributionsAssociatedToCachePolicy = "TooManyDistributionsAssociatedToCachePolicy" + // ErrCodeTooManyDistributionsAssociatedToFieldLevelEncryptionConfig for service response error code // "TooManyDistributionsAssociatedToFieldLevelEncryptionConfig". // @@ -401,6 +499,14 @@ const ( // configuration for field-level encryption. ErrCodeTooManyDistributionsAssociatedToFieldLevelEncryptionConfig = "TooManyDistributionsAssociatedToFieldLevelEncryptionConfig" + // ErrCodeTooManyDistributionsAssociatedToOriginRequestPolicy for service response error code + // "TooManyDistributionsAssociatedToOriginRequestPolicy". + // + // The maximum number of distributions have been associated with the specified + // origin request policy. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyDistributionsAssociatedToOriginRequestPolicy = "TooManyDistributionsAssociatedToOriginRequestPolicy" + // ErrCodeTooManyDistributionsWithLambdaAssociations for service response error code // "TooManyDistributionsWithLambdaAssociations". // @@ -408,6 +514,13 @@ const ( // Lambda function associations per owner to be exceeded. ErrCodeTooManyDistributionsWithLambdaAssociations = "TooManyDistributionsWithLambdaAssociations" + // ErrCodeTooManyDistributionsWithSingleFunctionARN for service response error code + // "TooManyDistributionsWithSingleFunctionARN". + // + // The maximum number of distributions have been associated with the specified + // Lambda function. + ErrCodeTooManyDistributionsWithSingleFunctionARN = "TooManyDistributionsWithSingleFunctionARN" + // ErrCodeTooManyFieldLevelEncryptionConfigs for service response error code // "TooManyFieldLevelEncryptionConfigs". // @@ -449,12 +562,28 @@ const ( // been created. ErrCodeTooManyFieldLevelEncryptionQueryArgProfiles = "TooManyFieldLevelEncryptionQueryArgProfiles" + // ErrCodeTooManyHeadersInCachePolicy for service response error code + // "TooManyHeadersInCachePolicy". + // + // The number of headers in the cache policy exceeds the maximum. For more information, + // see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyHeadersInCachePolicy = "TooManyHeadersInCachePolicy" + // ErrCodeTooManyHeadersInForwardedValues for service response error code // "TooManyHeadersInForwardedValues". // // Your request contains too many headers in forwarded values. ErrCodeTooManyHeadersInForwardedValues = "TooManyHeadersInForwardedValues" + // ErrCodeTooManyHeadersInOriginRequestPolicy for service response error code + // "TooManyHeadersInOriginRequestPolicy". + // + // The number of headers in the origin request policy exceeds the maximum. For + // more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyHeadersInOriginRequestPolicy = "TooManyHeadersInOriginRequestPolicy" + // ErrCodeTooManyInvalidationsInProgress for service response error code // "TooManyInvalidationsInProgress". // @@ -482,6 +611,14 @@ const ( // groups allowed. ErrCodeTooManyOriginGroupsPerDistribution = "TooManyOriginGroupsPerDistribution" + // ErrCodeTooManyOriginRequestPolicies for service response error code + // "TooManyOriginRequestPolicies". + // + // You have reached the maximum number of origin request policies for this AWS + // account. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyOriginRequestPolicies = "TooManyOriginRequestPolicies" + // ErrCodeTooManyOrigins for service response error code // "TooManyOrigins". // @@ -501,6 +638,30 @@ const ( // Your request contains too many query string parameters. ErrCodeTooManyQueryStringParameters = "TooManyQueryStringParameters" + // ErrCodeTooManyQueryStringsInCachePolicy for service response error code + // "TooManyQueryStringsInCachePolicy". + // + // The number of query strings in the cache policy exceeds the maximum. For + // more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyQueryStringsInCachePolicy = "TooManyQueryStringsInCachePolicy" + + // ErrCodeTooManyQueryStringsInOriginRequestPolicy for service response error code + // "TooManyQueryStringsInOriginRequestPolicy". + // + // The number of query strings in the origin request policy exceeds the maximum. + // For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyQueryStringsInOriginRequestPolicy = "TooManyQueryStringsInOriginRequestPolicy" + + // ErrCodeTooManyRealtimeLogConfigs for service response error code + // "TooManyRealtimeLogConfigs". + // + // You have reached the maximum number of real-time log configurations for this + // AWS account. For more information, see Quotas (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/cloudfront-limits.html) + // (formerly known as limits) in the Amazon CloudFront Developer Guide. + ErrCodeTooManyRealtimeLogConfigs = "TooManyRealtimeLogConfigs" + // ErrCodeTooManyStreamingDistributionCNAMEs for service response error code // "TooManyStreamingDistributionCNAMEs". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index af2afbcfb..283385508 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -63,7 +63,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, SigningRegion: signingRegion, PartitionID: partitionID, Endpoint: endpoint, - APIVersion: "2019-03-26", + APIVersion: "2020-05-31", }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go index c5fb669db..7d3fdaac0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/api.go @@ -85,6 +85,8 @@ func (c *CloudHSMV2) CopyBackupToRegionRequest(input *CopyBackupToRegionInput) ( // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/CopyBackupToRegion func (c *CloudHSMV2) CopyBackupToRegion(input *CopyBackupToRegionInput) (*CopyBackupToRegionOutput, error) { @@ -180,6 +182,8 @@ func (c *CloudHSMV2) CreateClusterRequest(input *CreateClusterInput) (req *reque // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/CreateCluster func (c *CloudHSMV2) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, error) { @@ -466,6 +470,8 @@ func (c *CloudHSMV2) DeleteClusterRequest(input *DeleteClusterInput) (req *reque // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/DeleteCluster func (c *CloudHSMV2) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, error) { @@ -670,6 +676,8 @@ func (c *CloudHSMV2) DescribeBackupsRequest(input *DescribeBackupsInput) (req *r // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/DescribeBackups func (c *CloudHSMV2) DescribeBackups(input *DescribeBackupsInput) (*DescribeBackupsOutput, error) { @@ -827,6 +835,8 @@ func (c *CloudHSMV2) DescribeClustersRequest(input *DescribeClustersInput) (req // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/DescribeClusters func (c *CloudHSMV2) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersOutput, error) { @@ -1082,6 +1092,8 @@ func (c *CloudHSMV2) ListTagsRequest(input *ListTagsInput) (req *request.Request // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/ListTags func (c *CloudHSMV2) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { @@ -1324,6 +1336,8 @@ func (c *CloudHSMV2) TagResourceRequest(input *TagResourceInput) (req *request.R // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/TagResource func (c *CloudHSMV2) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -1420,6 +1434,8 @@ func (c *CloudHSMV2) UntagResourceRequest(input *UntagResourceInput) (req *reque // The request was rejected because an error occurred. // // * CloudHsmTagException +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/cloudhsmv2-2017-04-28/UntagResource func (c *CloudHSMV2) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -1476,13 +1492,14 @@ type Backup struct { SourceBackup *string `type:"string"` // The identifier (ID) of the cluster containing the source backup from which - // the new backup was copied. . + // the new backup was copied. SourceCluster *string `type:"string"` - // The AWS region that contains the source backup from which the new backup + // The AWS Region that contains the source backup from which the new backup // was copied. SourceRegion *string `type:"string"` + // The list of tags for the backup. TagList []*Tag `min:"1" type:"list"` } @@ -1621,8 +1638,8 @@ func (s *Certificates) SetManufacturerHardwareCertificate(v string) *Certificate // The request was rejected because the requester does not have permission to // perform the requested operation. type CloudHsmAccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1639,17 +1656,17 @@ func (s CloudHsmAccessDeniedException) GoString() string { func newErrorCloudHsmAccessDeniedException(v protocol.ResponseMetadata) error { return &CloudHsmAccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmAccessDeniedException) Code() string { +func (s *CloudHsmAccessDeniedException) Code() string { return "CloudHsmAccessDeniedException" } // Message returns the exception's message. -func (s CloudHsmAccessDeniedException) Message() string { +func (s *CloudHsmAccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1657,29 +1674,29 @@ func (s CloudHsmAccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmAccessDeniedException) OrigErr() error { +func (s *CloudHsmAccessDeniedException) OrigErr() error { return nil } -func (s CloudHsmAccessDeniedException) Error() string { +func (s *CloudHsmAccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmAccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmAccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because of an AWS CloudHSM internal failure. The // request can be retried. type CloudHsmInternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1696,17 +1713,17 @@ func (s CloudHsmInternalFailureException) GoString() string { func newErrorCloudHsmInternalFailureException(v protocol.ResponseMetadata) error { return &CloudHsmInternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmInternalFailureException) Code() string { +func (s *CloudHsmInternalFailureException) Code() string { return "CloudHsmInternalFailureException" } // Message returns the exception's message. -func (s CloudHsmInternalFailureException) Message() string { +func (s *CloudHsmInternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1714,28 +1731,28 @@ func (s CloudHsmInternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmInternalFailureException) OrigErr() error { +func (s *CloudHsmInternalFailureException) OrigErr() error { return nil } -func (s CloudHsmInternalFailureException) Error() string { +func (s *CloudHsmInternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmInternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmInternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmInternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmInternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because it is not a valid request. type CloudHsmInvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1752,17 +1769,17 @@ func (s CloudHsmInvalidRequestException) GoString() string { func newErrorCloudHsmInvalidRequestException(v protocol.ResponseMetadata) error { return &CloudHsmInvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmInvalidRequestException) Code() string { +func (s *CloudHsmInvalidRequestException) Code() string { return "CloudHsmInvalidRequestException" } // Message returns the exception's message. -func (s CloudHsmInvalidRequestException) Message() string { +func (s *CloudHsmInvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1770,28 +1787,28 @@ func (s CloudHsmInvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmInvalidRequestException) OrigErr() error { +func (s *CloudHsmInvalidRequestException) OrigErr() error { return nil } -func (s CloudHsmInvalidRequestException) Error() string { +func (s *CloudHsmInvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmInvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmInvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmInvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmInvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because it refers to a resource that cannot be found. type CloudHsmResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1808,17 +1825,17 @@ func (s CloudHsmResourceNotFoundException) GoString() string { func newErrorCloudHsmResourceNotFoundException(v protocol.ResponseMetadata) error { return &CloudHsmResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmResourceNotFoundException) Code() string { +func (s *CloudHsmResourceNotFoundException) Code() string { return "CloudHsmResourceNotFoundException" } // Message returns the exception's message. -func (s CloudHsmResourceNotFoundException) Message() string { +func (s *CloudHsmResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1826,28 +1843,28 @@ func (s CloudHsmResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmResourceNotFoundException) OrigErr() error { +func (s *CloudHsmResourceNotFoundException) OrigErr() error { return nil } -func (s CloudHsmResourceNotFoundException) Error() string { +func (s *CloudHsmResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because an error occurred. type CloudHsmServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1864,17 +1881,17 @@ func (s CloudHsmServiceException) GoString() string { func newErrorCloudHsmServiceException(v protocol.ResponseMetadata) error { return &CloudHsmServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmServiceException) Code() string { +func (s *CloudHsmServiceException) Code() string { return "CloudHsmServiceException" } // Message returns the exception's message. -func (s CloudHsmServiceException) Message() string { +func (s *CloudHsmServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1882,27 +1899,29 @@ func (s CloudHsmServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmServiceException) OrigErr() error { +func (s *CloudHsmServiceException) OrigErr() error { return nil } -func (s CloudHsmServiceException) Error() string { +func (s *CloudHsmServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmServiceException) RequestID() string { + return s.RespMetadata.RequestID } +// The request was rejected because of a tagging failure. Verify the tag conditions +// in all applicable policies, and then retry the request. type CloudHsmTagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1919,17 +1938,17 @@ func (s CloudHsmTagException) GoString() string { func newErrorCloudHsmTagException(v protocol.ResponseMetadata) error { return &CloudHsmTagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmTagException) Code() string { +func (s *CloudHsmTagException) Code() string { return "CloudHsmTagException" } // Message returns the exception's message. -func (s CloudHsmTagException) Message() string { +func (s *CloudHsmTagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1937,22 +1956,22 @@ func (s CloudHsmTagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmTagException) OrigErr() error { +func (s *CloudHsmTagException) OrigErr() error { return nil } -func (s CloudHsmTagException) Error() string { +func (s *CloudHsmTagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmTagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmTagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmTagException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmTagException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about an AWS CloudHSM cluster. @@ -1997,6 +2016,7 @@ type Cluster struct { // zone. SubnetMapping map[string]*string `type:"map"` + // The list of tags for the cluster. TagList []*Tag `min:"1" type:"list"` // The identifier (ID) of the virtual private cloud (VPC) that contains the @@ -2111,6 +2131,10 @@ type CopyBackupToRegionInput struct { // DestinationRegion is a required field DestinationRegion *string `type:"string" required:"true"` + // Tags to apply to the destination backup during creation. If you specify tags, + // only these tags will be applied to the destination backup. If you do not + // specify tags, the service copies tags from the source backup to the destination + // backup. TagList []*Tag `min:"1" type:"list"` } @@ -2226,6 +2250,7 @@ type CreateClusterInput struct { // SubnetIds is a required field SubnetIds []*string `min:"1" type:"list" required:"true"` + // Tags to apply to the CloudHSM cluster during creation. TagList []*Tag `min:"1" type:"list"` } @@ -3451,6 +3476,13 @@ const ( BackupPolicyDefault = "DEFAULT" ) +// BackupPolicy_Values returns all elements of the BackupPolicy enum +func BackupPolicy_Values() []string { + return []string{ + BackupPolicyDefault, + } +} + const ( // BackupStateCreateInProgress is a BackupState enum value BackupStateCreateInProgress = "CREATE_IN_PROGRESS" @@ -3465,6 +3497,16 @@ const ( BackupStatePendingDeletion = "PENDING_DELETION" ) +// BackupState_Values returns all elements of the BackupState enum +func BackupState_Values() []string { + return []string{ + BackupStateCreateInProgress, + BackupStateReady, + BackupStateDeleted, + BackupStatePendingDeletion, + } +} + const ( // ClusterStateCreateInProgress is a ClusterState enum value ClusterStateCreateInProgress = "CREATE_IN_PROGRESS" @@ -3494,6 +3536,21 @@ const ( ClusterStateDegraded = "DEGRADED" ) +// ClusterState_Values returns all elements of the ClusterState enum +func ClusterState_Values() []string { + return []string{ + ClusterStateCreateInProgress, + ClusterStateUninitialized, + ClusterStateInitializeInProgress, + ClusterStateInitialized, + ClusterStateActive, + ClusterStateUpdateInProgress, + ClusterStateDeleteInProgress, + ClusterStateDeleted, + ClusterStateDegraded, + } +} + const ( // HsmStateCreateInProgress is a HsmState enum value HsmStateCreateInProgress = "CREATE_IN_PROGRESS" @@ -3510,3 +3567,14 @@ const ( // HsmStateDeleted is a HsmState enum value HsmStateDeleted = "DELETED" ) + +// HsmState_Values returns all elements of the HsmState enum +func HsmState_Values() []string { + return []string{ + HsmStateCreateInProgress, + HsmStateActive, + HsmStateDegraded, + HsmStateDeleteInProgress, + HsmStateDeleted, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/errors.go index a99c421f3..9f72ea42e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/errors.go @@ -42,6 +42,9 @@ const ( // ErrCodeCloudHsmTagException for service response error code // "CloudHsmTagException". + // + // The request was rejected because of a tagging failure. Verify the tag conditions + // in all applicable policies, and then retry the request. ErrCodeCloudHsmTagException = "CloudHsmTagException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go index 26c4f41e0..6034df794 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go index a39be361f..7cb304fd4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/api.go @@ -6625,6 +6625,16 @@ const ( AlgorithmicStemmingFull = "full" ) +// AlgorithmicStemming_Values returns all elements of the AlgorithmicStemming enum +func AlgorithmicStemming_Values() []string { + return []string{ + AlgorithmicStemmingNone, + AlgorithmicStemmingMinimal, + AlgorithmicStemmingLight, + AlgorithmicStemmingFull, + } +} + // An IETF RFC 4646 (http://tools.ietf.org/html/rfc4646) language code or mul // for multiple languages. const ( @@ -6734,6 +6744,47 @@ const ( AnalysisSchemeLanguageZhHant = "zh-Hant" ) +// AnalysisSchemeLanguage_Values returns all elements of the AnalysisSchemeLanguage enum +func AnalysisSchemeLanguage_Values() []string { + return []string{ + AnalysisSchemeLanguageAr, + AnalysisSchemeLanguageBg, + AnalysisSchemeLanguageCa, + AnalysisSchemeLanguageCs, + AnalysisSchemeLanguageDa, + AnalysisSchemeLanguageDe, + AnalysisSchemeLanguageEl, + AnalysisSchemeLanguageEn, + AnalysisSchemeLanguageEs, + AnalysisSchemeLanguageEu, + AnalysisSchemeLanguageFa, + AnalysisSchemeLanguageFi, + AnalysisSchemeLanguageFr, + AnalysisSchemeLanguageGa, + AnalysisSchemeLanguageGl, + AnalysisSchemeLanguageHe, + AnalysisSchemeLanguageHi, + AnalysisSchemeLanguageHu, + AnalysisSchemeLanguageHy, + AnalysisSchemeLanguageId, + AnalysisSchemeLanguageIt, + AnalysisSchemeLanguageJa, + AnalysisSchemeLanguageKo, + AnalysisSchemeLanguageLv, + AnalysisSchemeLanguageMul, + AnalysisSchemeLanguageNl, + AnalysisSchemeLanguageNo, + AnalysisSchemeLanguagePt, + AnalysisSchemeLanguageRo, + AnalysisSchemeLanguageRu, + AnalysisSchemeLanguageSv, + AnalysisSchemeLanguageTh, + AnalysisSchemeLanguageTr, + AnalysisSchemeLanguageZhHans, + AnalysisSchemeLanguageZhHant, + } +} + // The type of field. The valid options for a field depend on the field type. // For more information about the supported field types, see Configuring Index // Fields (http://docs.aws.amazon.com/cloudsearch/latest/developerguide/configuring-index-fields.html) @@ -6773,6 +6824,23 @@ const ( IndexFieldTypeDateArray = "date-array" ) +// IndexFieldType_Values returns all elements of the IndexFieldType enum +func IndexFieldType_Values() []string { + return []string{ + IndexFieldTypeInt, + IndexFieldTypeDouble, + IndexFieldTypeLiteral, + IndexFieldTypeText, + IndexFieldTypeDate, + IndexFieldTypeLatlon, + IndexFieldTypeIntArray, + IndexFieldTypeDoubleArray, + IndexFieldTypeLiteralArray, + IndexFieldTypeTextArray, + IndexFieldTypeDateArray, + } +} + // The state of processing a change to an option. One of: // // * RequiresIndexDocuments: The option's latest value will not be deployed @@ -6799,6 +6867,16 @@ const ( OptionStateFailedToValidate = "FailedToValidate" ) +// OptionState_Values returns all elements of the OptionState enum +func OptionState_Values() []string { + return []string{ + OptionStateRequiresIndexDocuments, + OptionStateProcessing, + OptionStateActive, + OptionStateFailedToValidate, + } +} + // The instance type (such as search.m1.small) on which an index partition is // hosted. const ( @@ -6827,6 +6905,20 @@ const ( PartitionInstanceTypeSearchM32xlarge = "search.m3.2xlarge" ) +// PartitionInstanceType_Values returns all elements of the PartitionInstanceType enum +func PartitionInstanceType_Values() []string { + return []string{ + PartitionInstanceTypeSearchM1Small, + PartitionInstanceTypeSearchM1Large, + PartitionInstanceTypeSearchM2Xlarge, + PartitionInstanceTypeSearchM22xlarge, + PartitionInstanceTypeSearchM3Medium, + PartitionInstanceTypeSearchM3Large, + PartitionInstanceTypeSearchM3Xlarge, + PartitionInstanceTypeSearchM32xlarge, + } +} + const ( // SuggesterFuzzyMatchingNone is a SuggesterFuzzyMatching enum value SuggesterFuzzyMatchingNone = "none" @@ -6838,6 +6930,15 @@ const ( SuggesterFuzzyMatchingHigh = "high" ) +// SuggesterFuzzyMatching_Values returns all elements of the SuggesterFuzzyMatching enum +func SuggesterFuzzyMatching_Values() []string { + return []string{ + SuggesterFuzzyMatchingNone, + SuggesterFuzzyMatchingLow, + SuggesterFuzzyMatchingHigh, + } +} + // The minimum required TLS version. const ( // TLSSecurityPolicyPolicyMinTls10201907 is a TLSSecurityPolicy enum value @@ -6846,3 +6947,11 @@ const ( // TLSSecurityPolicyPolicyMinTls12201907 is a TLSSecurityPolicy enum value TLSSecurityPolicyPolicyMinTls12201907 = "Policy-Min-TLS-1-2-2019-07" ) + +// TLSSecurityPolicy_Values returns all elements of the TLSSecurityPolicy enum +func TLSSecurityPolicy_Values() []string { + return []string{ + TLSSecurityPolicyPolicyMinTls10201907, + TLSSecurityPolicyPolicyMinTls12201907, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go index e0a62d1a2..e260fc33d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go index 8b227be26..5c1a5889e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go @@ -2524,8 +2524,8 @@ func (c *CloudTrail) UpdateTrailWithContext(ctx aws.Context, input *UpdateTrailI // // arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail type ARNInvalidException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2542,17 +2542,17 @@ func (s ARNInvalidException) GoString() string { func newErrorARNInvalidException(v protocol.ResponseMetadata) error { return &ARNInvalidException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ARNInvalidException) Code() string { +func (s *ARNInvalidException) Code() string { return "CloudTrailARNInvalidException" } // Message returns the exception's message. -func (s ARNInvalidException) Message() string { +func (s *ARNInvalidException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2560,22 +2560,22 @@ func (s ARNInvalidException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ARNInvalidException) OrigErr() error { +func (s *ARNInvalidException) OrigErr() error { return nil } -func (s ARNInvalidException) Error() string { +func (s *ARNInvalidException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ARNInvalidException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ARNInvalidException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ARNInvalidException) RequestID() string { - return s.respMetadata.RequestID +func (s *ARNInvalidException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when trusted access has not been enabled between @@ -2583,8 +2583,8 @@ func (s ARNInvalidException) RequestID() string { // Trusted Access with Other AWS Services (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html) // and Prepare For Creating a Trail For Your Organization (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). type AccessNotEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2601,17 +2601,17 @@ func (s AccessNotEnabledException) GoString() string { func newErrorAccessNotEnabledException(v protocol.ResponseMetadata) error { return &AccessNotEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessNotEnabledException) Code() string { +func (s *AccessNotEnabledException) Code() string { return "CloudTrailAccessNotEnabledException" } // Message returns the exception's message. -func (s AccessNotEnabledException) Message() string { +func (s *AccessNotEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2619,22 +2619,22 @@ func (s AccessNotEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessNotEnabledException) OrigErr() error { +func (s *AccessNotEnabledException) OrigErr() error { return nil } -func (s AccessNotEnabledException) Error() string { +func (s *AccessNotEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessNotEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessNotEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessNotEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessNotEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the tags to add to a trail. @@ -2716,8 +2716,8 @@ func (s AddTagsOutput) GoString() string { // Cannot set a CloudWatch Logs delivery for this region. type CloudWatchLogsDeliveryUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2734,17 +2734,17 @@ func (s CloudWatchLogsDeliveryUnavailableException) GoString() string { func newErrorCloudWatchLogsDeliveryUnavailableException(v protocol.ResponseMetadata) error { return &CloudWatchLogsDeliveryUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudWatchLogsDeliveryUnavailableException) Code() string { +func (s *CloudWatchLogsDeliveryUnavailableException) Code() string { return "CloudWatchLogsDeliveryUnavailableException" } // Message returns the exception's message. -func (s CloudWatchLogsDeliveryUnavailableException) Message() string { +func (s *CloudWatchLogsDeliveryUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2752,22 +2752,22 @@ func (s CloudWatchLogsDeliveryUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudWatchLogsDeliveryUnavailableException) OrigErr() error { +func (s *CloudWatchLogsDeliveryUnavailableException) OrigErr() error { return nil } -func (s CloudWatchLogsDeliveryUnavailableException) Error() string { +func (s *CloudWatchLogsDeliveryUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudWatchLogsDeliveryUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudWatchLogsDeliveryUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudWatchLogsDeliveryUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudWatchLogsDeliveryUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the settings for each trail. @@ -4026,8 +4026,8 @@ func (s *GetTrailStatusOutput) SetTimeLoggingStopped(v string) *GetTrailStatusOu // If you run GetInsightSelectors on a trail that does not have Insights events // enabled, the operation throws the exception InsightNotEnabledException. type InsightNotEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4044,17 +4044,17 @@ func (s InsightNotEnabledException) GoString() string { func newErrorInsightNotEnabledException(v protocol.ResponseMetadata) error { return &InsightNotEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsightNotEnabledException) Code() string { +func (s *InsightNotEnabledException) Code() string { return "InsightNotEnabledException" } // Message returns the exception's message. -func (s InsightNotEnabledException) Message() string { +func (s *InsightNotEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4062,22 +4062,22 @@ func (s InsightNotEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsightNotEnabledException) OrigErr() error { +func (s *InsightNotEnabledException) OrigErr() error { return nil } -func (s InsightNotEnabledException) Error() string { +func (s *InsightNotEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsightNotEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsightNotEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsightNotEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsightNotEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // A JSON string that contains a list of insight types that are logged on a @@ -4111,8 +4111,8 @@ func (s *InsightSelector) SetInsightType(v string) *InsightSelector { // an organization trail in a required service. For more information, see Prepare // For Creating a Trail For Your Organization (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). type InsufficientDependencyServiceAccessPermissionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4129,17 +4129,17 @@ func (s InsufficientDependencyServiceAccessPermissionException) GoString() strin func newErrorInsufficientDependencyServiceAccessPermissionException(v protocol.ResponseMetadata) error { return &InsufficientDependencyServiceAccessPermissionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientDependencyServiceAccessPermissionException) Code() string { +func (s *InsufficientDependencyServiceAccessPermissionException) Code() string { return "InsufficientDependencyServiceAccessPermissionException" } // Message returns the exception's message. -func (s InsufficientDependencyServiceAccessPermissionException) Message() string { +func (s *InsufficientDependencyServiceAccessPermissionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4147,29 +4147,29 @@ func (s InsufficientDependencyServiceAccessPermissionException) Message() string } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientDependencyServiceAccessPermissionException) OrigErr() error { +func (s *InsufficientDependencyServiceAccessPermissionException) OrigErr() error { return nil } -func (s InsufficientDependencyServiceAccessPermissionException) Error() string { +func (s *InsufficientDependencyServiceAccessPermissionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientDependencyServiceAccessPermissionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientDependencyServiceAccessPermissionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientDependencyServiceAccessPermissionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientDependencyServiceAccessPermissionException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the policy on the S3 bucket or KMS key is not // sufficient. type InsufficientEncryptionPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4186,17 +4186,17 @@ func (s InsufficientEncryptionPolicyException) GoString() string { func newErrorInsufficientEncryptionPolicyException(v protocol.ResponseMetadata) error { return &InsufficientEncryptionPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientEncryptionPolicyException) Code() string { +func (s *InsufficientEncryptionPolicyException) Code() string { return "InsufficientEncryptionPolicyException" } // Message returns the exception's message. -func (s InsufficientEncryptionPolicyException) Message() string { +func (s *InsufficientEncryptionPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4204,28 +4204,28 @@ func (s InsufficientEncryptionPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientEncryptionPolicyException) OrigErr() error { +func (s *InsufficientEncryptionPolicyException) OrigErr() error { return nil } -func (s InsufficientEncryptionPolicyException) Error() string { +func (s *InsufficientEncryptionPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientEncryptionPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientEncryptionPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientEncryptionPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientEncryptionPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the policy on the S3 bucket is not sufficient. type InsufficientS3BucketPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4242,17 +4242,17 @@ func (s InsufficientS3BucketPolicyException) GoString() string { func newErrorInsufficientS3BucketPolicyException(v protocol.ResponseMetadata) error { return &InsufficientS3BucketPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientS3BucketPolicyException) Code() string { +func (s *InsufficientS3BucketPolicyException) Code() string { return "InsufficientS3BucketPolicyException" } // Message returns the exception's message. -func (s InsufficientS3BucketPolicyException) Message() string { +func (s *InsufficientS3BucketPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4260,28 +4260,28 @@ func (s InsufficientS3BucketPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientS3BucketPolicyException) OrigErr() error { +func (s *InsufficientS3BucketPolicyException) OrigErr() error { return nil } -func (s InsufficientS3BucketPolicyException) Error() string { +func (s *InsufficientS3BucketPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientS3BucketPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientS3BucketPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientS3BucketPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientS3BucketPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the policy on the SNS topic is not sufficient. type InsufficientSnsTopicPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4298,17 +4298,17 @@ func (s InsufficientSnsTopicPolicyException) GoString() string { func newErrorInsufficientSnsTopicPolicyException(v protocol.ResponseMetadata) error { return &InsufficientSnsTopicPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientSnsTopicPolicyException) Code() string { +func (s *InsufficientSnsTopicPolicyException) Code() string { return "InsufficientSnsTopicPolicyException" } // Message returns the exception's message. -func (s InsufficientSnsTopicPolicyException) Message() string { +func (s *InsufficientSnsTopicPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4316,28 +4316,28 @@ func (s InsufficientSnsTopicPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientSnsTopicPolicyException) OrigErr() error { +func (s *InsufficientSnsTopicPolicyException) OrigErr() error { return nil } -func (s InsufficientSnsTopicPolicyException) Error() string { +func (s *InsufficientSnsTopicPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientSnsTopicPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientSnsTopicPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientSnsTopicPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientSnsTopicPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the provided CloudWatch log group is not valid. type InvalidCloudWatchLogsLogGroupArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4354,17 +4354,17 @@ func (s InvalidCloudWatchLogsLogGroupArnException) GoString() string { func newErrorInvalidCloudWatchLogsLogGroupArnException(v protocol.ResponseMetadata) error { return &InvalidCloudWatchLogsLogGroupArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCloudWatchLogsLogGroupArnException) Code() string { +func (s *InvalidCloudWatchLogsLogGroupArnException) Code() string { return "InvalidCloudWatchLogsLogGroupArnException" } // Message returns the exception's message. -func (s InvalidCloudWatchLogsLogGroupArnException) Message() string { +func (s *InvalidCloudWatchLogsLogGroupArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4372,28 +4372,28 @@ func (s InvalidCloudWatchLogsLogGroupArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCloudWatchLogsLogGroupArnException) OrigErr() error { +func (s *InvalidCloudWatchLogsLogGroupArnException) OrigErr() error { return nil } -func (s InvalidCloudWatchLogsLogGroupArnException) Error() string { +func (s *InvalidCloudWatchLogsLogGroupArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCloudWatchLogsLogGroupArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCloudWatchLogsLogGroupArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCloudWatchLogsLogGroupArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCloudWatchLogsLogGroupArnException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the provided role is not valid. type InvalidCloudWatchLogsRoleArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4410,17 +4410,17 @@ func (s InvalidCloudWatchLogsRoleArnException) GoString() string { func newErrorInvalidCloudWatchLogsRoleArnException(v protocol.ResponseMetadata) error { return &InvalidCloudWatchLogsRoleArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCloudWatchLogsRoleArnException) Code() string { +func (s *InvalidCloudWatchLogsRoleArnException) Code() string { return "InvalidCloudWatchLogsRoleArnException" } // Message returns the exception's message. -func (s InvalidCloudWatchLogsRoleArnException) Message() string { +func (s *InvalidCloudWatchLogsRoleArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4428,29 +4428,29 @@ func (s InvalidCloudWatchLogsRoleArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCloudWatchLogsRoleArnException) OrigErr() error { +func (s *InvalidCloudWatchLogsRoleArnException) OrigErr() error { return nil } -func (s InvalidCloudWatchLogsRoleArnException) Error() string { +func (s *InvalidCloudWatchLogsRoleArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCloudWatchLogsRoleArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCloudWatchLogsRoleArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCloudWatchLogsRoleArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCloudWatchLogsRoleArnException) RequestID() string { + return s.RespMetadata.RequestID } // Occurs if an event category that is not valid is specified as a value of // EventCategory. type InvalidEventCategoryException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4467,17 +4467,17 @@ func (s InvalidEventCategoryException) GoString() string { func newErrorInvalidEventCategoryException(v protocol.ResponseMetadata) error { return &InvalidEventCategoryException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEventCategoryException) Code() string { +func (s *InvalidEventCategoryException) Code() string { return "InvalidEventCategoryException" } // Message returns the exception's message. -func (s InvalidEventCategoryException) Message() string { +func (s *InvalidEventCategoryException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4485,22 +4485,22 @@ func (s InvalidEventCategoryException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEventCategoryException) OrigErr() error { +func (s *InvalidEventCategoryException) OrigErr() error { return nil } -func (s InvalidEventCategoryException) Error() string { +func (s *InvalidEventCategoryException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEventCategoryException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEventCategoryException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEventCategoryException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEventCategoryException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the PutEventSelectors operation is called with @@ -4523,8 +4523,8 @@ func (s InvalidEventCategoryException) RequestID() string { // * Specify a valid value for a parameter. For example, specifying the ReadWriteType // parameter with a value of read-only is invalid. type InvalidEventSelectorsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4541,17 +4541,17 @@ func (s InvalidEventSelectorsException) GoString() string { func newErrorInvalidEventSelectorsException(v protocol.ResponseMetadata) error { return &InvalidEventSelectorsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEventSelectorsException) Code() string { +func (s *InvalidEventSelectorsException) Code() string { return "InvalidEventSelectorsException" } // Message returns the exception's message. -func (s InvalidEventSelectorsException) Message() string { +func (s *InvalidEventSelectorsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4559,29 +4559,29 @@ func (s InvalidEventSelectorsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEventSelectorsException) OrigErr() error { +func (s *InvalidEventSelectorsException) OrigErr() error { return nil } -func (s InvalidEventSelectorsException) Error() string { +func (s *InvalidEventSelectorsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEventSelectorsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEventSelectorsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEventSelectorsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEventSelectorsException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when an operation is called on a trail from a region // other than the region in which the trail was created. type InvalidHomeRegionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4598,17 +4598,17 @@ func (s InvalidHomeRegionException) GoString() string { func newErrorInvalidHomeRegionException(v protocol.ResponseMetadata) error { return &InvalidHomeRegionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidHomeRegionException) Code() string { +func (s *InvalidHomeRegionException) Code() string { return "InvalidHomeRegionException" } // Message returns the exception's message. -func (s InvalidHomeRegionException) Message() string { +func (s *InvalidHomeRegionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4616,30 +4616,30 @@ func (s InvalidHomeRegionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidHomeRegionException) OrigErr() error { +func (s *InvalidHomeRegionException) OrigErr() error { return nil } -func (s InvalidHomeRegionException) Error() string { +func (s *InvalidHomeRegionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidHomeRegionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidHomeRegionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidHomeRegionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidHomeRegionException) RequestID() string { + return s.RespMetadata.RequestID } // The formatting or syntax of the InsightSelectors JSON statement in your PutInsightSelectors // or GetInsightSelectors request is not valid, or the specified insight type // in the InsightSelectors statement is not a valid insight type. type InvalidInsightSelectorsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4656,17 +4656,17 @@ func (s InvalidInsightSelectorsException) GoString() string { func newErrorInvalidInsightSelectorsException(v protocol.ResponseMetadata) error { return &InvalidInsightSelectorsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInsightSelectorsException) Code() string { +func (s *InvalidInsightSelectorsException) Code() string { return "InvalidInsightSelectorsException" } // Message returns the exception's message. -func (s InvalidInsightSelectorsException) Message() string { +func (s *InvalidInsightSelectorsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4674,28 +4674,28 @@ func (s InvalidInsightSelectorsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInsightSelectorsException) OrigErr() error { +func (s *InvalidInsightSelectorsException) OrigErr() error { return nil } -func (s InvalidInsightSelectorsException) Error() string { +func (s *InvalidInsightSelectorsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInsightSelectorsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInsightSelectorsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInsightSelectorsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInsightSelectorsException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the KMS key ARN is invalid. type InvalidKmsKeyIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4712,17 +4712,17 @@ func (s InvalidKmsKeyIdException) GoString() string { func newErrorInvalidKmsKeyIdException(v protocol.ResponseMetadata) error { return &InvalidKmsKeyIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidKmsKeyIdException) Code() string { +func (s *InvalidKmsKeyIdException) Code() string { return "InvalidKmsKeyIdException" } // Message returns the exception's message. -func (s InvalidKmsKeyIdException) Message() string { +func (s *InvalidKmsKeyIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4730,28 +4730,28 @@ func (s InvalidKmsKeyIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidKmsKeyIdException) OrigErr() error { +func (s *InvalidKmsKeyIdException) OrigErr() error { return nil } -func (s InvalidKmsKeyIdException) Error() string { +func (s *InvalidKmsKeyIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidKmsKeyIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidKmsKeyIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidKmsKeyIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidKmsKeyIdException) RequestID() string { + return s.RespMetadata.RequestID } // Occurs when an invalid lookup attribute is specified. type InvalidLookupAttributesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4768,17 +4768,17 @@ func (s InvalidLookupAttributesException) GoString() string { func newErrorInvalidLookupAttributesException(v protocol.ResponseMetadata) error { return &InvalidLookupAttributesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLookupAttributesException) Code() string { +func (s *InvalidLookupAttributesException) Code() string { return "InvalidLookupAttributesException" } // Message returns the exception's message. -func (s InvalidLookupAttributesException) Message() string { +func (s *InvalidLookupAttributesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4786,28 +4786,28 @@ func (s InvalidLookupAttributesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLookupAttributesException) OrigErr() error { +func (s *InvalidLookupAttributesException) OrigErr() error { return nil } -func (s InvalidLookupAttributesException) Error() string { +func (s *InvalidLookupAttributesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLookupAttributesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLookupAttributesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLookupAttributesException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLookupAttributesException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown if the limit specified is invalid. type InvalidMaxResultsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4824,17 +4824,17 @@ func (s InvalidMaxResultsException) GoString() string { func newErrorInvalidMaxResultsException(v protocol.ResponseMetadata) error { return &InvalidMaxResultsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMaxResultsException) Code() string { +func (s *InvalidMaxResultsException) Code() string { return "InvalidMaxResultsException" } // Message returns the exception's message. -func (s InvalidMaxResultsException) Message() string { +func (s *InvalidMaxResultsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4842,29 +4842,29 @@ func (s InvalidMaxResultsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMaxResultsException) OrigErr() error { +func (s *InvalidMaxResultsException) OrigErr() error { return nil } -func (s InvalidMaxResultsException) Error() string { +func (s *InvalidMaxResultsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMaxResultsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMaxResultsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMaxResultsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMaxResultsException) RequestID() string { + return s.RespMetadata.RequestID } // Invalid token or token that was previously used in a request with different // parameters. This exception is thrown if the token is invalid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4881,17 +4881,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4899,29 +4899,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the combination of parameters provided is not // valid. type InvalidParameterCombinationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4938,17 +4938,17 @@ func (s InvalidParameterCombinationException) GoString() string { func newErrorInvalidParameterCombinationException(v protocol.ResponseMetadata) error { return &InvalidParameterCombinationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterCombinationException) Code() string { +func (s *InvalidParameterCombinationException) Code() string { return "InvalidParameterCombinationException" } // Message returns the exception's message. -func (s InvalidParameterCombinationException) Message() string { +func (s *InvalidParameterCombinationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4956,28 +4956,28 @@ func (s InvalidParameterCombinationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterCombinationException) OrigErr() error { +func (s *InvalidParameterCombinationException) OrigErr() error { return nil } -func (s InvalidParameterCombinationException) Error() string { +func (s *InvalidParameterCombinationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterCombinationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterCombinationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterCombinationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterCombinationException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the provided S3 bucket name is not valid. type InvalidS3BucketNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4994,17 +4994,17 @@ func (s InvalidS3BucketNameException) GoString() string { func newErrorInvalidS3BucketNameException(v protocol.ResponseMetadata) error { return &InvalidS3BucketNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidS3BucketNameException) Code() string { +func (s *InvalidS3BucketNameException) Code() string { return "InvalidS3BucketNameException" } // Message returns the exception's message. -func (s InvalidS3BucketNameException) Message() string { +func (s *InvalidS3BucketNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5012,28 +5012,28 @@ func (s InvalidS3BucketNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidS3BucketNameException) OrigErr() error { +func (s *InvalidS3BucketNameException) OrigErr() error { return nil } -func (s InvalidS3BucketNameException) Error() string { +func (s *InvalidS3BucketNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidS3BucketNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidS3BucketNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidS3BucketNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidS3BucketNameException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the provided S3 prefix is not valid. type InvalidS3PrefixException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5050,17 +5050,17 @@ func (s InvalidS3PrefixException) GoString() string { func newErrorInvalidS3PrefixException(v protocol.ResponseMetadata) error { return &InvalidS3PrefixException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidS3PrefixException) Code() string { +func (s *InvalidS3PrefixException) Code() string { return "InvalidS3PrefixException" } // Message returns the exception's message. -func (s InvalidS3PrefixException) Message() string { +func (s *InvalidS3PrefixException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5068,28 +5068,28 @@ func (s InvalidS3PrefixException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidS3PrefixException) OrigErr() error { +func (s *InvalidS3PrefixException) OrigErr() error { return nil } -func (s InvalidS3PrefixException) Error() string { +func (s *InvalidS3PrefixException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidS3PrefixException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidS3PrefixException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidS3PrefixException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidS3PrefixException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the provided SNS topic name is not valid. type InvalidSnsTopicNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5106,17 +5106,17 @@ func (s InvalidSnsTopicNameException) GoString() string { func newErrorInvalidSnsTopicNameException(v protocol.ResponseMetadata) error { return &InvalidSnsTopicNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSnsTopicNameException) Code() string { +func (s *InvalidSnsTopicNameException) Code() string { return "InvalidSnsTopicNameException" } // Message returns the exception's message. -func (s InvalidSnsTopicNameException) Message() string { +func (s *InvalidSnsTopicNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5124,29 +5124,29 @@ func (s InvalidSnsTopicNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSnsTopicNameException) OrigErr() error { +func (s *InvalidSnsTopicNameException) OrigErr() error { return nil } -func (s InvalidSnsTopicNameException) Error() string { +func (s *InvalidSnsTopicNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSnsTopicNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSnsTopicNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSnsTopicNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSnsTopicNameException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the specified tag key or values are not valid. // It can also occur if there are duplicate tags or too many tags on the resource. type InvalidTagParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5163,17 +5163,17 @@ func (s InvalidTagParameterException) GoString() string { func newErrorInvalidTagParameterException(v protocol.ResponseMetadata) error { return &InvalidTagParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagParameterException) Code() string { +func (s *InvalidTagParameterException) Code() string { return "InvalidTagParameterException" } // Message returns the exception's message. -func (s InvalidTagParameterException) Message() string { +func (s *InvalidTagParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5181,29 +5181,29 @@ func (s InvalidTagParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagParameterException) OrigErr() error { +func (s *InvalidTagParameterException) OrigErr() error { return nil } -func (s InvalidTagParameterException) Error() string { +func (s *InvalidTagParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagParameterException) RequestID() string { + return s.RespMetadata.RequestID } // Occurs if the timestamp values are invalid. Either the start time occurs // after the end time or the time range is outside the range of possible values. type InvalidTimeRangeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5220,17 +5220,17 @@ func (s InvalidTimeRangeException) GoString() string { func newErrorInvalidTimeRangeException(v protocol.ResponseMetadata) error { return &InvalidTimeRangeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTimeRangeException) Code() string { +func (s *InvalidTimeRangeException) Code() string { return "InvalidTimeRangeException" } // Message returns the exception's message. -func (s InvalidTimeRangeException) Message() string { +func (s *InvalidTimeRangeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5238,28 +5238,28 @@ func (s InvalidTimeRangeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTimeRangeException) OrigErr() error { +func (s *InvalidTimeRangeException) OrigErr() error { return nil } -func (s InvalidTimeRangeException) Error() string { +func (s *InvalidTimeRangeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTimeRangeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTimeRangeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTimeRangeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTimeRangeException) RequestID() string { + return s.RespMetadata.RequestID } // Reserved for future use. type InvalidTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5276,17 +5276,17 @@ func (s InvalidTokenException) GoString() string { func newErrorInvalidTokenException(v protocol.ResponseMetadata) error { return &InvalidTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTokenException) Code() string { +func (s *InvalidTokenException) Code() string { return "InvalidTokenException" } // Message returns the exception's message. -func (s InvalidTokenException) Message() string { +func (s *InvalidTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5294,22 +5294,22 @@ func (s InvalidTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTokenException) OrigErr() error { +func (s *InvalidTokenException) OrigErr() error { return nil } -func (s InvalidTokenException) Error() string { +func (s *InvalidTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTokenException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the provided trail name is not valid. Trail @@ -5327,8 +5327,8 @@ func (s InvalidTokenException) RequestID() string { // // * Not be in IP address format (for example, 192.168.5.4) type InvalidTrailNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5345,17 +5345,17 @@ func (s InvalidTrailNameException) GoString() string { func newErrorInvalidTrailNameException(v protocol.ResponseMetadata) error { return &InvalidTrailNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTrailNameException) Code() string { +func (s *InvalidTrailNameException) Code() string { return "InvalidTrailNameException" } // Message returns the exception's message. -func (s InvalidTrailNameException) Message() string { +func (s *InvalidTrailNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5363,29 +5363,29 @@ func (s InvalidTrailNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTrailNameException) OrigErr() error { +func (s *InvalidTrailNameException) OrigErr() error { return nil } -func (s InvalidTrailNameException) Error() string { +func (s *InvalidTrailNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTrailNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTrailNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTrailNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTrailNameException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when there is an issue with the specified KMS key // and the trail can’t be updated. type KmsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5402,17 +5402,17 @@ func (s KmsException) GoString() string { func newErrorKmsException(v protocol.ResponseMetadata) error { return &KmsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KmsException) Code() string { +func (s *KmsException) Code() string { return "KmsException" } // Message returns the exception's message. -func (s KmsException) Message() string { +func (s *KmsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5420,30 +5420,30 @@ func (s KmsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KmsException) OrigErr() error { +func (s *KmsException) OrigErr() error { return nil } -func (s KmsException) Error() string { +func (s *KmsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KmsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KmsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KmsException) RequestID() string { - return s.respMetadata.RequestID +func (s *KmsException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is no longer in use. // // Deprecated: KmsKeyDisabledException has been deprecated type KmsKeyDisabledException struct { - _ struct{} `deprecated:"true" type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `deprecated:"true" type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5460,17 +5460,17 @@ func (s KmsKeyDisabledException) GoString() string { func newErrorKmsKeyDisabledException(v protocol.ResponseMetadata) error { return &KmsKeyDisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KmsKeyDisabledException) Code() string { +func (s *KmsKeyDisabledException) Code() string { return "KmsKeyDisabledException" } // Message returns the exception's message. -func (s KmsKeyDisabledException) Message() string { +func (s *KmsKeyDisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5478,29 +5478,29 @@ func (s KmsKeyDisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KmsKeyDisabledException) OrigErr() error { +func (s *KmsKeyDisabledException) OrigErr() error { return nil } -func (s KmsKeyDisabledException) Error() string { +func (s *KmsKeyDisabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KmsKeyDisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KmsKeyDisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KmsKeyDisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *KmsKeyDisabledException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the KMS key does not exist, or when the S3 // bucket and the KMS key are not in the same region. type KmsKeyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5517,17 +5517,17 @@ func (s KmsKeyNotFoundException) GoString() string { func newErrorKmsKeyNotFoundException(v protocol.ResponseMetadata) error { return &KmsKeyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KmsKeyNotFoundException) Code() string { +func (s *KmsKeyNotFoundException) Code() string { return "KmsKeyNotFoundException" } // Message returns the exception's message. -func (s KmsKeyNotFoundException) Message() string { +func (s *KmsKeyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5535,22 +5535,22 @@ func (s KmsKeyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KmsKeyNotFoundException) OrigErr() error { +func (s *KmsKeyNotFoundException) OrigErr() error { return nil } -func (s KmsKeyNotFoundException) Error() string { +func (s *KmsKeyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KmsKeyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KmsKeyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KmsKeyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *KmsKeyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Requests the public keys for a specified time range. @@ -5981,8 +5981,8 @@ func (s *LookupEventsOutput) SetNextToken(v string) *LookupEventsOutput { // This exception is thrown when the maximum number of trails is reached. type MaximumNumberOfTrailsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5999,17 +5999,17 @@ func (s MaximumNumberOfTrailsExceededException) GoString() string { func newErrorMaximumNumberOfTrailsExceededException(v protocol.ResponseMetadata) error { return &MaximumNumberOfTrailsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumNumberOfTrailsExceededException) Code() string { +func (s *MaximumNumberOfTrailsExceededException) Code() string { return "MaximumNumberOfTrailsExceededException" } // Message returns the exception's message. -func (s MaximumNumberOfTrailsExceededException) Message() string { +func (s *MaximumNumberOfTrailsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6017,22 +6017,22 @@ func (s MaximumNumberOfTrailsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumNumberOfTrailsExceededException) OrigErr() error { +func (s *MaximumNumberOfTrailsExceededException) OrigErr() error { return nil } -func (s MaximumNumberOfTrailsExceededException) Error() string { +func (s *MaximumNumberOfTrailsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumNumberOfTrailsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumNumberOfTrailsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumNumberOfTrailsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumNumberOfTrailsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the AWS account making the request to create @@ -6040,8 +6040,8 @@ func (s MaximumNumberOfTrailsExceededException) RequestID() string { // in AWS Organizations. For more information, see Prepare For Creating a Trail // For Your Organization (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). type NotOrganizationMasterAccountException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6058,17 +6058,17 @@ func (s NotOrganizationMasterAccountException) GoString() string { func newErrorNotOrganizationMasterAccountException(v protocol.ResponseMetadata) error { return &NotOrganizationMasterAccountException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotOrganizationMasterAccountException) Code() string { +func (s *NotOrganizationMasterAccountException) Code() string { return "NotOrganizationMasterAccountException" } // Message returns the exception's message. -func (s NotOrganizationMasterAccountException) Message() string { +func (s *NotOrganizationMasterAccountException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6076,28 +6076,28 @@ func (s NotOrganizationMasterAccountException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotOrganizationMasterAccountException) OrigErr() error { +func (s *NotOrganizationMasterAccountException) OrigErr() error { return nil } -func (s NotOrganizationMasterAccountException) Error() string { +func (s *NotOrganizationMasterAccountException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotOrganizationMasterAccountException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotOrganizationMasterAccountException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotOrganizationMasterAccountException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotOrganizationMasterAccountException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the requested operation is not permitted. type OperationNotPermittedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6114,17 +6114,17 @@ func (s OperationNotPermittedException) GoString() string { func newErrorOperationNotPermittedException(v protocol.ResponseMetadata) error { return &OperationNotPermittedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotPermittedException) Code() string { +func (s *OperationNotPermittedException) Code() string { return "OperationNotPermittedException" } // Message returns the exception's message. -func (s OperationNotPermittedException) Message() string { +func (s *OperationNotPermittedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6132,22 +6132,22 @@ func (s OperationNotPermittedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotPermittedException) OrigErr() error { +func (s *OperationNotPermittedException) OrigErr() error { return nil } -func (s OperationNotPermittedException) Error() string { +func (s *OperationNotPermittedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotPermittedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotPermittedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotPermittedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotPermittedException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when AWS Organizations is not configured to support @@ -6155,8 +6155,8 @@ func (s OperationNotPermittedException) RequestID() string { // creating an organization trail. For more information, see Prepare For Creating // a Trail For Your Organization (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/creating-an-organizational-trail-prepare.html). type OrganizationNotInAllFeaturesModeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6173,17 +6173,17 @@ func (s OrganizationNotInAllFeaturesModeException) GoString() string { func newErrorOrganizationNotInAllFeaturesModeException(v protocol.ResponseMetadata) error { return &OrganizationNotInAllFeaturesModeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationNotInAllFeaturesModeException) Code() string { +func (s *OrganizationNotInAllFeaturesModeException) Code() string { return "OrganizationNotInAllFeaturesModeException" } // Message returns the exception's message. -func (s OrganizationNotInAllFeaturesModeException) Message() string { +func (s *OrganizationNotInAllFeaturesModeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6191,30 +6191,30 @@ func (s OrganizationNotInAllFeaturesModeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationNotInAllFeaturesModeException) OrigErr() error { +func (s *OrganizationNotInAllFeaturesModeException) OrigErr() error { return nil } -func (s OrganizationNotInAllFeaturesModeException) Error() string { +func (s *OrganizationNotInAllFeaturesModeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationNotInAllFeaturesModeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationNotInAllFeaturesModeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationNotInAllFeaturesModeException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationNotInAllFeaturesModeException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the request is made from an AWS account that // is not a member of an organization. To make this request, sign in using the // credentials of an account that belongs to an organization. type OrganizationsNotInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6231,17 +6231,17 @@ func (s OrganizationsNotInUseException) GoString() string { func newErrorOrganizationsNotInUseException(v protocol.ResponseMetadata) error { return &OrganizationsNotInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationsNotInUseException) Code() string { +func (s *OrganizationsNotInUseException) Code() string { return "OrganizationsNotInUseException" } // Message returns the exception's message. -func (s OrganizationsNotInUseException) Message() string { +func (s *OrganizationsNotInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6249,22 +6249,22 @@ func (s OrganizationsNotInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationsNotInUseException) OrigErr() error { +func (s *OrganizationsNotInUseException) OrigErr() error { return nil } -func (s OrganizationsNotInUseException) Error() string { +func (s *OrganizationsNotInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationsNotInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationsNotInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationsNotInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationsNotInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a returned public key. @@ -6632,8 +6632,8 @@ func (s *Resource) SetResourceType(v string) *Resource { // This exception is thrown when the specified resource is not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6650,17 +6650,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6668,22 +6668,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A resource tag. @@ -6722,8 +6722,8 @@ func (s *ResourceTag) SetTagsList(v []*Tag) *ResourceTag { // This exception is thrown when the specified resource type is not supported // by CloudTrail. type ResourceTypeNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6740,17 +6740,17 @@ func (s ResourceTypeNotSupportedException) GoString() string { func newErrorResourceTypeNotSupportedException(v protocol.ResponseMetadata) error { return &ResourceTypeNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceTypeNotSupportedException) Code() string { +func (s *ResourceTypeNotSupportedException) Code() string { return "ResourceTypeNotSupportedException" } // Message returns the exception's message. -func (s ResourceTypeNotSupportedException) Message() string { +func (s *ResourceTypeNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6758,28 +6758,28 @@ func (s ResourceTypeNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceTypeNotSupportedException) OrigErr() error { +func (s *ResourceTypeNotSupportedException) OrigErr() error { return nil } -func (s ResourceTypeNotSupportedException) Error() string { +func (s *ResourceTypeNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceTypeNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceTypeNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceTypeNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceTypeNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the specified S3 bucket does not exist. type S3BucketDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6796,17 +6796,17 @@ func (s S3BucketDoesNotExistException) GoString() string { func newErrorS3BucketDoesNotExistException(v protocol.ResponseMetadata) error { return &S3BucketDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s S3BucketDoesNotExistException) Code() string { +func (s *S3BucketDoesNotExistException) Code() string { return "S3BucketDoesNotExistException" } // Message returns the exception's message. -func (s S3BucketDoesNotExistException) Message() string { +func (s *S3BucketDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6814,22 +6814,22 @@ func (s S3BucketDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s S3BucketDoesNotExistException) OrigErr() error { +func (s *S3BucketDoesNotExistException) OrigErr() error { return nil } -func (s S3BucketDoesNotExistException) Error() string { +func (s *S3BucketDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s S3BucketDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *S3BucketDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s S3BucketDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *S3BucketDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The request to CloudTrail to start logging AWS API calls for an account. @@ -7002,8 +7002,8 @@ func (s *Tag) SetValue(v string) *Tag { // The number of tags per trail has exceeded the permitted amount. Currently, // the limit is 50. type TagsLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7020,17 +7020,17 @@ func (s TagsLimitExceededException) GoString() string { func newErrorTagsLimitExceededException(v protocol.ResponseMetadata) error { return &TagsLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagsLimitExceededException) Code() string { +func (s *TagsLimitExceededException) Code() string { return "TagsLimitExceededException" } // Message returns the exception's message. -func (s TagsLimitExceededException) Message() string { +func (s *TagsLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7038,22 +7038,22 @@ func (s TagsLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagsLimitExceededException) OrigErr() error { +func (s *TagsLimitExceededException) OrigErr() error { return nil } -func (s TagsLimitExceededException) Error() string { +func (s *TagsLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagsLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagsLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagsLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagsLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The settings for a trail. @@ -7235,8 +7235,8 @@ func (s *Trail) SetTrailARN(v string) *Trail { // This exception is thrown when the specified trail already exists. type TrailAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7253,17 +7253,17 @@ func (s TrailAlreadyExistsException) GoString() string { func newErrorTrailAlreadyExistsException(v protocol.ResponseMetadata) error { return &TrailAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TrailAlreadyExistsException) Code() string { +func (s *TrailAlreadyExistsException) Code() string { return "TrailAlreadyExistsException" } // Message returns the exception's message. -func (s TrailAlreadyExistsException) Message() string { +func (s *TrailAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7271,22 +7271,22 @@ func (s TrailAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TrailAlreadyExistsException) OrigErr() error { +func (s *TrailAlreadyExistsException) OrigErr() error { return nil } -func (s TrailAlreadyExistsException) Error() string { +func (s *TrailAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TrailAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TrailAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TrailAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TrailAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a CloudTrail trail, including the trail's name, home region, @@ -7334,8 +7334,8 @@ func (s *TrailInfo) SetTrailARN(v string) *TrailInfo { // This exception is thrown when the trail with the given name is not found. type TrailNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7352,17 +7352,17 @@ func (s TrailNotFoundException) GoString() string { func newErrorTrailNotFoundException(v protocol.ResponseMetadata) error { return &TrailNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TrailNotFoundException) Code() string { +func (s *TrailNotFoundException) Code() string { return "TrailNotFoundException" } // Message returns the exception's message. -func (s TrailNotFoundException) Message() string { +func (s *TrailNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7370,28 +7370,28 @@ func (s TrailNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TrailNotFoundException) OrigErr() error { +func (s *TrailNotFoundException) OrigErr() error { return nil } -func (s TrailNotFoundException) Error() string { +func (s *TrailNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TrailNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TrailNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TrailNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *TrailNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is no longer in use. type TrailNotProvidedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7408,17 +7408,17 @@ func (s TrailNotProvidedException) GoString() string { func newErrorTrailNotProvidedException(v protocol.ResponseMetadata) error { return &TrailNotProvidedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TrailNotProvidedException) Code() string { +func (s *TrailNotProvidedException) Code() string { return "TrailNotProvidedException" } // Message returns the exception's message. -func (s TrailNotProvidedException) Message() string { +func (s *TrailNotProvidedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7426,28 +7426,28 @@ func (s TrailNotProvidedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TrailNotProvidedException) OrigErr() error { +func (s *TrailNotProvidedException) OrigErr() error { return nil } -func (s TrailNotProvidedException) Error() string { +func (s *TrailNotProvidedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TrailNotProvidedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TrailNotProvidedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TrailNotProvidedException) RequestID() string { - return s.respMetadata.RequestID +func (s *TrailNotProvidedException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the requested operation is not supported. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7464,17 +7464,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7482,22 +7482,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies settings to update for the trail. @@ -7845,11 +7845,25 @@ const ( EventCategoryInsight = "insight" ) +// EventCategory_Values returns all elements of the EventCategory enum +func EventCategory_Values() []string { + return []string{ + EventCategoryInsight, + } +} + const ( // InsightTypeApiCallRateInsight is a InsightType enum value InsightTypeApiCallRateInsight = "ApiCallRateInsight" ) +// InsightType_Values returns all elements of the InsightType enum +func InsightType_Values() []string { + return []string{ + InsightTypeApiCallRateInsight, + } +} + const ( // LookupAttributeKeyEventId is a LookupAttributeKey enum value LookupAttributeKeyEventId = "EventId" @@ -7876,6 +7890,20 @@ const ( LookupAttributeKeyAccessKeyId = "AccessKeyId" ) +// LookupAttributeKey_Values returns all elements of the LookupAttributeKey enum +func LookupAttributeKey_Values() []string { + return []string{ + LookupAttributeKeyEventId, + LookupAttributeKeyEventName, + LookupAttributeKeyReadOnly, + LookupAttributeKeyUsername, + LookupAttributeKeyResourceType, + LookupAttributeKeyResourceName, + LookupAttributeKeyEventSource, + LookupAttributeKeyAccessKeyId, + } +} + const ( // ReadWriteTypeReadOnly is a ReadWriteType enum value ReadWriteTypeReadOnly = "ReadOnly" @@ -7886,3 +7914,12 @@ const ( // ReadWriteTypeAll is a ReadWriteType enum value ReadWriteTypeAll = "All" ) + +// ReadWriteType_Values returns all elements of the ReadWriteType enum +func ReadWriteType_Values() []string { + return []string{ + ReadWriteTypeReadOnly, + ReadWriteTypeWriteOnly, + ReadWriteTypeAll, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go index 7671e6ca3..fece8a618 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index dec4cd383..7a2d4b2d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -246,7 +246,7 @@ func (c *CloudWatch) DeleteDashboardsRequest(input *DeleteDashboardsInput) (req // DeleteDashboards API operation for Amazon CloudWatch. // -// Deletes all dashboards that you specify. You may specify up to 100 dashboards +// Deletes all dashboards that you specify. You can specify up to 100 dashboards // to delete. If there is an error during this call, no dashboards are deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -335,8 +335,7 @@ func (c *CloudWatch) DeleteInsightRulesRequest(input *DeleteInsightRulesInput) ( // Permanently deletes the specified Contributor Insights rules. // // If you create a rule, delete it, and then re-create it with the same name, -// historical data from the first time the rule was created may or may not be -// available. +// historical data from the first time the rule was created might not be available. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1420,7 +1419,7 @@ func (c *CloudWatch) GetInsightRuleReportRequest(input *GetInsightRuleReportInpu // point. // // * MaxContributorValue -- the value of the top contributor for each data -// point. The identity of the contributor may change for each data point +// point. The identity of the contributor might change for each data point // in the graph. If this rule aggregates by COUNT, the top contributor for // each data point is the contributor with the most occurrences in that period. // If the rule aggregates by SUM, the top contributor is the contributor @@ -1569,9 +1568,9 @@ func (c *CloudWatch) GetMetricDataRequest(input *GetMetricDataInput) (req *reque // If you omit Unit in your request, all data that was collected with any unit // is returned, along with the corresponding units that were specified when // the data was reported to CloudWatch. If you specify a unit, the operation -// returns only data data that was collected with that unit specified. If you -// specify a unit that does not match the data collected, the results of the -// operation are null. CloudWatch does not perform unit conversions. +// returns only data that was collected with that unit specified. If you specify +// a unit that does not match the data collected, the results of the operation +// are null. CloudWatch does not perform unit conversions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2091,9 +2090,13 @@ func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.R // Up to 500 results are returned for any one call. To retrieve additional results, // use the returned token with subsequent calls. // -// After you create a metric, allow up to fifteen minutes before the metric -// appears. Statistics about the metric, however, are available sooner using -// GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// After you create a metric, allow up to 15 minutes before the metric appears. +// You can see statistics about the metric sooner by using GetMetricData (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) +// or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). +// +// ListMetrics doesn't return information about metrics if those metrics haven't +// reported data in the past two weeks. To retrieve those metrics, use GetMetricData +// (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricData.html) // or GetMetricStatistics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_GetMetricStatistics.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2228,7 +2231,8 @@ func (c *CloudWatch) ListTagsForResourceRequest(input *ListTagsForResourceInput) // ListTagsForResource API operation for Amazon CloudWatch. // -// Displays the tags associated with a CloudWatch resource. Alarms support tagging. +// Displays the tags associated with a CloudWatch resource. Currently, alarms +// and Contributor Insights rules support tagging. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2629,8 +2633,7 @@ func (c *CloudWatch) PutInsightRuleRequest(input *PutInsightRuleInput) (req *req // Analyze High-Cardinality Data (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContributorInsights.html). // // If you create a rule, delete it, and then re-create it with the same name, -// historical data from the first time the rule was created may or may not be -// available. +// historical data from the first time the rule was created might not be available. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2981,8 +2984,8 @@ func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *reque // DescribeAlarmHistory (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarmHistory.html). // // If you use SetAlarmState on a composite alarm, the composite alarm is not -// guaranteed to return to its actual state. It will return to its actual state -// only once any of its children alarms change state. It is also re-evaluated +// guaranteed to return to its actual state. It returns to its actual state +// only once any of its children alarms change state. It is also reevaluated // if you update its configuration. // // If an alarm triggers EC2 Auto Scaling policies or application Auto Scaling @@ -3071,10 +3074,11 @@ func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.R // TagResource API operation for Amazon CloudWatch. // // Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. -// Currently, the only CloudWatch resources that can be tagged are alarms. +// Currently, the only CloudWatch resources that can be tagged are alarms and +// Contributor Insights rules. // // Tags can help you organize and categorize your resources. You can also use -// them to scope user permissions, by granting a user permission to access or +// them to scope user permissions by granting a user permission to access or // change only resources with certain tag values. // // Tags don't have any semantic meaning to AWS and are interpreted strictly @@ -3086,7 +3090,7 @@ func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.R // associated with the alarm, the new tag value that you specify replaces the // previous value for that tag. // -// You can associate as many as 50 tags with a resource. +// You can associate as many as 50 tags with a CloudWatch resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4319,8 +4323,8 @@ type DescribeAlarmsInput struct { // is not returned. // // If you specify ChildrenOfAlarmName, you cannot specify any other parameters - // in the request except for MaxRecords and NextToken. If you do so, you will - // receive a validation error. + // in the request except for MaxRecords and NextToken. If you do so, you receive + // a validation error. // // Only the Alarm Name, ARN, StateValue (OK/ALARM/INSUFFICIENT_DATA), and StateUpdatedTimestamp // information are returned by this operation when you use this parameter. To @@ -4342,8 +4346,8 @@ type DescribeAlarmsInput struct { // alarm that you specify in ParentsOfAlarmName is not returned. // // If you specify ParentsOfAlarmName, you cannot specify any other parameters - // in the request except for MaxRecords and NextToken. If you do so, you will - // receive a validation error. + // in the request except for MaxRecords and NextToken. If you do so, you receive + // a validation error. // // Only the Alarm Name and ARN are returned by this operation when you use this // parameter. To get complete information about these alarms, perform another @@ -4622,7 +4626,7 @@ type DescribeInsightRulesInput struct { _ struct{} `type:"structure"` // This parameter is not currently used. Reserved for future use. If it is used - // in the future, the maximum value may be different. + // in the future, the maximum value might be different. MaxResults *int64 `min:"1" type:"integer"` // Reserved for future use. @@ -4696,16 +4700,20 @@ func (s *DescribeInsightRulesOutput) SetNextToken(v string) *DescribeInsightRule return s } -// Expands the identity of a metric. +// A dimension is a name/value pair that is part of the identity of a metric. +// You can assign up to 10 dimensions to a metric. Because dimensions are part +// of the unique identifier for a metric, whenever you add a unique name/value +// pair to one of your metrics, you are creating a new variation of that metric. type Dimension struct { _ struct{} `type:"structure"` - // The name of the dimension. + // The name of the dimension. Dimension names cannot contain blank spaces or + // non-ASCII characters. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The value representing the dimension measurement. + // The value of the dimension. // // Value is a required field Value *string `min:"1" type:"string" required:"true"` @@ -5140,7 +5148,7 @@ type GetInsightRuleReportInput struct { // point. // // * MaxContributorValue -- the value of the top contributor for each data - // point. The identity of the contributor may change for each data point + // point. The identity of the contributor might change for each data point // in the graph. If this rule aggregates by COUNT, the top contributor for // each data point is the contributor with the most occurrences in that period. // If the rule aggregates by SUM, the top contributor is the contributor @@ -5494,7 +5502,7 @@ type GetMetricDataOutput struct { _ struct{} `type:"structure"` // Contains a message about this GetMetricData operation, if the operation results - // in such a message. An example of a message that may be returned is Maximum + // in such a message. An example of a message that might be returned is Maximum // number of allowed metrics exceeded. If there is a message, as much of the // operation as possible is still executed. // @@ -5638,9 +5646,9 @@ type GetMetricStatisticsInput struct { // The unit for a given metric. If you omit Unit, all data that was collected // with any unit is returned, along with the corresponding units that were specified // when the data was reported to CloudWatch. If you specify a unit, the operation - // returns only data data that was collected with that unit specified. If you - // specify a unit that does not match the data collected, the results of the - // operation are null. CloudWatch does not perform unit conversions. + // returns only data that was collected with that unit specified. If you specify + // a unit that does not match the data collected, the results of the operation + // are null. CloudWatch does not perform unit conversions. Unit *string `type:"string" enum:"StandardUnit"` } @@ -5878,7 +5886,7 @@ func (s *GetMetricWidgetImageInput) SetOutputFormat(v string) *GetMetricWidgetIm type GetMetricWidgetImageOutput struct { _ struct{} `type:"structure"` - // The image of the graph, in the output format specified. + // The image of the graph, in the output format specified. The output is base64-encoded. // // MetricWidgetImage is automatically base64 encoded/decoded by the SDK. MetricWidgetImage []byte `type:"blob"` @@ -6261,6 +6269,15 @@ type ListMetricsInput struct { // The token returned by a previous call to indicate that there is more data // available. NextToken *string `type:"string"` + + // To filter the results to show only metrics that have had data points published + // in the past three hours, specify this parameter with a value of PT3H. This + // is the only valid value for this parameter. + // + // The results that are returned are an approximation of the value you specify. + // There is a low probability that the returned results include metrics with + // last published data as much as 40 minutes more than the specified time interval. + RecentlyActive *string `type:"string" enum:"RecentlyActive"` } // String returns the string representation @@ -6323,10 +6340,16 @@ func (s *ListMetricsInput) SetNextToken(v string) *ListMetricsInput { return s } +// SetRecentlyActive sets the RecentlyActive field's value. +func (s *ListMetricsInput) SetRecentlyActive(v string) *ListMetricsInput { + s.RecentlyActive = &v + return s +} + type ListMetricsOutput struct { _ struct{} `type:"structure"` - // The metrics. + // The metrics that match your request. Metrics []*Metric `type:"list"` // The token that marks the start of the next batch of returned results. @@ -6358,8 +6381,14 @@ func (s *ListMetricsOutput) SetNextToken(v string) *ListMetricsOutput { type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch resource that you want to view tags for. For more - // information on ARN format, see Example ARNs (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch) + // The ARN of the CloudWatch resource that you want to view tags for. + // + // The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name + // + // For more information about ARN format, see Resource Types Defined by Amazon + // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) // in the Amazon Web Services General Reference. // // ResourceARN is a required field @@ -7243,9 +7272,9 @@ type MetricStat struct { // In a Get operation, if you omit Unit then all data that was collected with // any unit is returned, along with the corresponding units that were specified // when the data was reported to CloudWatch. If you specify a unit, the operation - // returns only data data that was collected with that unit specified. If you - // specify a unit that does not match the data collected, the results of the - // operation are null. CloudWatch does not perform unit conversions. + // returns only data that was collected with that unit specified. If you specify + // a unit that does not match the data collected, the results of the operation + // are null. CloudWatch does not perform unit conversions. Unit *string `type:"string" enum:"StandardUnit"` } @@ -7504,8 +7533,7 @@ type PutCompositeAlarmInput struct { // The description for the composite alarm. AlarmDescription *string `type:"string"` - // The name for the composite alarm. This name must be unique within your AWS - // account. + // The name for the composite alarm. This name must be unique within the Region. // // AlarmName is a required field AlarmName *string `min:"1" type:"string" required:"true"` @@ -7759,7 +7787,7 @@ type PutDashboardOutput struct { // // If this result includes only warning messages, then the input was valid enough // for the dashboard to be created or modified, but some elements of the dashboard - // may not render. + // might not render. // // If this result includes error messages, the input was not valid and the operation // failed. @@ -7798,6 +7826,21 @@ type PutInsightRuleInput struct { // The state of the rule. Valid values are ENABLED and DISABLED. RuleState *string `min:"1" type:"string"` + + // A list of key-value pairs to associate with the Contributor Insights rule. + // You can associate as many as 50 tags with a rule. + // + // Tags can help you organize and categorize your resources. You can also use + // them to scope user permissions, by granting a user permission to access or + // change only the resources that have certain tag values. + // + // To be able to associate tags with a rule, you must have the cloudwatch:TagResource + // permission in addition to the cloudwatch:PutInsightRule permission. + // + // If you are using this operation to update an existing Contributor Insights + // rule, any tags you specify in this parameter are ignored. To change the tags + // of an existing rule, use TagResource (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_TagResource.html). + Tags []*Tag `type:"list"` } // String returns the string representation @@ -7828,6 +7871,16 @@ func (s *PutInsightRuleInput) Validate() error { if s.RuleState != nil && len(*s.RuleState) < 1 { invalidParams.Add(request.NewErrParamMinLen("RuleState", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7853,6 +7906,12 @@ func (s *PutInsightRuleInput) SetRuleState(v string) *PutInsightRuleInput { return s } +// SetTags sets the Tags field's value. +func (s *PutInsightRuleInput) SetTags(v []*Tag) *PutInsightRuleInput { + s.Tags = v + return s +} + type PutInsightRuleOutput struct { _ struct{} `type:"structure"` } @@ -7889,7 +7948,7 @@ type PutMetricAlarmInput struct { // The description for the alarm. AlarmDescription *string `type:"string"` - // The name for the alarm. This name must be unique within your AWS account. + // The name for the alarm. This name must be unique within the Region. // // AlarmName is a required field AlarmName *string `min:"1" type:"string" required:"true"` @@ -8006,7 +8065,7 @@ type PutMetricAlarmInput struct { // a metric that does not have sub-minute resolution, the alarm still attempts // to gather data at the period rate that you specify. In this case, it does // not receive data for the attempts that do not correspond to a one-minute - // data resolution, and the alarm may often lapse into INSUFFICENT_DATA status. + // data resolution, and the alarm might often lapse into INSUFFICENT_DATA status. // Specifying 10 or 30 also sets this alarm as a high-resolution alarm, which // has a higher charge than other alarms. For more information about pricing, // see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/). @@ -8025,7 +8084,7 @@ type PutMetricAlarmInput struct { // as many as 50 tags with an alarm. // // Tags can help you organize and categorize your resources. You can also use - // them to scope user permissions, by granting a user permission to access or + // them to scope user permissions by granting a user permission to access or // change only resources with certain tag values. Tags []*Tag `type:"list"` @@ -8059,12 +8118,12 @@ type PutMetricAlarmInput struct { // Percent, are aggregated separately. // // If you don't specify Unit, CloudWatch retrieves all unit types that have - // been published for the metric and attempts to evaluate the alarm. Usually - // metrics are published with only one unit, so the alarm will work as intended. + // been published for the metric and attempts to evaluate the alarm. Usually, + // metrics are published with only one unit, so the alarm works as intended. // // However, if the metric is published with multiple types of units and you - // don't specify a unit, the alarm's behavior is not defined and will behave - // un-predictably. + // don't specify a unit, the alarm's behavior is not defined and it behaves + // predictably. // // We recommend omitting Unit so that you don't inadvertently specify an incorrect // unit that is not published for this metric. Doing so causes the alarm to @@ -8446,8 +8505,7 @@ func (s *Range) SetStartTime(v time.Time) *Range { type SetAlarmStateInput struct { _ struct{} `type:"structure"` - // The name for the alarm. This name must be unique within the AWS account. - // The maximum length is 255 characters. + // The name of the alarm. // // AlarmName is a required field AlarmName *string `min:"1" type:"string" required:"true"` @@ -8681,8 +8739,15 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch alarm that you're adding tags to. The ARN format - // is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // The ARN of the CloudWatch resource that you're adding tags to. + // + // The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name + // + // For more information about ARN format, see Resource Types Defined by Amazon + // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) + // in the Amazon Web Services General Reference. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -8761,8 +8826,14 @@ func (s TagResourceOutput) GoString() string { type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch resource that you're removing tags from. For more - // information on ARN format, see Example ARNs (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch) + // The ARN of the CloudWatch resource that you're removing tags from. + // + // The ARN format of an alarm is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name + // + // The ARN format of a Contributor Insights rule is arn:aws:cloudwatch:Region:account-id:insight-rule:insight-rule-name + // + // For more information about ARN format, see Resource Types Defined by Amazon + // CloudWatch (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatch.html#amazoncloudwatch-resources-for-iam-policies) // in the Amazon Web Services General Reference. // // ResourceARN is a required field @@ -8837,6 +8908,14 @@ const ( AlarmTypeMetricAlarm = "MetricAlarm" ) +// AlarmType_Values returns all elements of the AlarmType enum +func AlarmType_Values() []string { + return []string{ + AlarmTypeCompositeAlarm, + AlarmTypeMetricAlarm, + } +} + const ( // AnomalyDetectorStateValuePendingTraining is a AnomalyDetectorStateValue enum value AnomalyDetectorStateValuePendingTraining = "PENDING_TRAINING" @@ -8848,6 +8927,15 @@ const ( AnomalyDetectorStateValueTrained = "TRAINED" ) +// AnomalyDetectorStateValue_Values returns all elements of the AnomalyDetectorStateValue enum +func AnomalyDetectorStateValue_Values() []string { + return []string{ + AnomalyDetectorStateValuePendingTraining, + AnomalyDetectorStateValueTrainedInsufficientData, + AnomalyDetectorStateValueTrained, + } +} + const ( // ComparisonOperatorGreaterThanOrEqualToThreshold is a ComparisonOperator enum value ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" @@ -8871,6 +8959,19 @@ const ( ComparisonOperatorGreaterThanUpperThreshold = "GreaterThanUpperThreshold" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorGreaterThanOrEqualToThreshold, + ComparisonOperatorGreaterThanThreshold, + ComparisonOperatorLessThanThreshold, + ComparisonOperatorLessThanOrEqualToThreshold, + ComparisonOperatorLessThanLowerOrGreaterThanUpperThreshold, + ComparisonOperatorLessThanLowerThreshold, + ComparisonOperatorGreaterThanUpperThreshold, + } +} + const ( // HistoryItemTypeConfigurationUpdate is a HistoryItemType enum value HistoryItemTypeConfigurationUpdate = "ConfigurationUpdate" @@ -8882,6 +8983,27 @@ const ( HistoryItemTypeAction = "Action" ) +// HistoryItemType_Values returns all elements of the HistoryItemType enum +func HistoryItemType_Values() []string { + return []string{ + HistoryItemTypeConfigurationUpdate, + HistoryItemTypeStateUpdate, + HistoryItemTypeAction, + } +} + +const ( + // RecentlyActivePt3h is a RecentlyActive enum value + RecentlyActivePt3h = "PT3H" +) + +// RecentlyActive_Values returns all elements of the RecentlyActive enum +func RecentlyActive_Values() []string { + return []string{ + RecentlyActivePt3h, + } +} + const ( // ScanByTimestampDescending is a ScanBy enum value ScanByTimestampDescending = "TimestampDescending" @@ -8890,6 +9012,14 @@ const ( ScanByTimestampAscending = "TimestampAscending" ) +// ScanBy_Values returns all elements of the ScanBy enum +func ScanBy_Values() []string { + return []string{ + ScanByTimestampDescending, + ScanByTimestampAscending, + } +} + const ( // StandardUnitSeconds is a StandardUnit enum value StandardUnitSeconds = "Seconds" @@ -8973,6 +9103,39 @@ const ( StandardUnitNone = "None" ) +// StandardUnit_Values returns all elements of the StandardUnit enum +func StandardUnit_Values() []string { + return []string{ + StandardUnitSeconds, + StandardUnitMicroseconds, + StandardUnitMilliseconds, + StandardUnitBytes, + StandardUnitKilobytes, + StandardUnitMegabytes, + StandardUnitGigabytes, + StandardUnitTerabytes, + StandardUnitBits, + StandardUnitKilobits, + StandardUnitMegabits, + StandardUnitGigabits, + StandardUnitTerabits, + StandardUnitPercent, + StandardUnitCount, + StandardUnitBytesSecond, + StandardUnitKilobytesSecond, + StandardUnitMegabytesSecond, + StandardUnitGigabytesSecond, + StandardUnitTerabytesSecond, + StandardUnitBitsSecond, + StandardUnitKilobitsSecond, + StandardUnitMegabitsSecond, + StandardUnitGigabitsSecond, + StandardUnitTerabitsSecond, + StandardUnitCountSecond, + StandardUnitNone, + } +} + const ( // StateValueOk is a StateValue enum value StateValueOk = "OK" @@ -8984,6 +9147,15 @@ const ( StateValueInsufficientData = "INSUFFICIENT_DATA" ) +// StateValue_Values returns all elements of the StateValue enum +func StateValue_Values() []string { + return []string{ + StateValueOk, + StateValueAlarm, + StateValueInsufficientData, + } +} + const ( // StatisticSampleCount is a Statistic enum value StatisticSampleCount = "SampleCount" @@ -9001,6 +9173,17 @@ const ( StatisticMaximum = "Maximum" ) +// Statistic_Values returns all elements of the Statistic enum +func Statistic_Values() []string { + return []string{ + StatisticSampleCount, + StatisticAverage, + StatisticSum, + StatisticMinimum, + StatisticMaximum, + } +} + const ( // StatusCodeComplete is a StatusCode enum value StatusCodeComplete = "Complete" @@ -9011,3 +9194,12 @@ const ( // StatusCodePartialData is a StatusCode enum value StatusCodePartialData = "PartialData" ) + +// StatusCode_Values returns all elements of the StatusCode enum +func StatusCode_Values() []string { + return []string{ + StatusCodeComplete, + StatusCodeInternalError, + StatusCodePartialData, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go index 3eccf59bf..c926b57c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go index 4be852438..0081e438b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go @@ -81,6 +81,9 @@ func (c *CloudWatchEvents) ActivateEventSourceRequest(input *ActivateEventSource // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ActivateEventSource func (c *CloudWatchEvents) ActivateEventSource(input *ActivateEventSourceInput) (*ActivateEventSourceOutput, error) { req, out := c.ActivateEventSourceRequest(input) @@ -178,6 +181,9 @@ func (c *CloudWatchEvents) CreateEventBusRequest(input *CreateEventBusInput) (re // * LimitExceededException // You tried to create more rules or add more targets to a rule than is allowed. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/CreateEventBus func (c *CloudWatchEvents) CreateEventBus(input *CreateEventBusInput) (*CreateEventBusOutput, error) { req, out := c.CreateEventBusRequest(input) @@ -290,6 +296,9 @@ func (c *CloudWatchEvents) CreatePartnerEventSourceRequest(input *CreatePartnerE // * LimitExceededException // You tried to create more rules or add more targets to a rule than is allowed. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/CreatePartnerEventSource func (c *CloudWatchEvents) CreatePartnerEventSource(input *CreatePartnerEventSourceInput) (*CreatePartnerEventSourceOutput, error) { req, out := c.CreatePartnerEventSourceRequest(input) @@ -385,6 +394,9 @@ func (c *CloudWatchEvents) DeactivateEventSourceRequest(input *DeactivateEventSo // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeactivateEventSource func (c *CloudWatchEvents) DeactivateEventSource(input *DeactivateEventSourceInput) (*DeactivateEventSourceOutput, error) { req, out := c.DeactivateEventSourceRequest(input) @@ -557,6 +569,9 @@ func (c *CloudWatchEvents) DeletePartnerEventSourceRequest(input *DeletePartnerE // * ConcurrentModificationException // There is concurrent modification on a rule or target. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DeletePartnerEventSource func (c *CloudWatchEvents) DeletePartnerEventSource(input *DeletePartnerEventSourceInput) (*DeletePartnerEventSourceOutput, error) { req, out := c.DeletePartnerEventSourceRequest(input) @@ -835,6 +850,9 @@ func (c *CloudWatchEvents) DescribeEventSourceRequest(input *DescribeEventSource // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribeEventSource func (c *CloudWatchEvents) DescribeEventSource(input *DescribeEventSourceInput) (*DescribeEventSourceOutput, error) { req, out := c.DescribeEventSourceRequest(input) @@ -920,6 +938,9 @@ func (c *CloudWatchEvents) DescribePartnerEventSourceRequest(input *DescribePart // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/DescribePartnerEventSource func (c *CloudWatchEvents) DescribePartnerEventSource(input *DescribePartnerEventSourceInput) (*DescribePartnerEventSourceOutput, error) { req, out := c.DescribePartnerEventSourceRequest(input) @@ -1362,6 +1383,9 @@ func (c *CloudWatchEvents) ListEventSourcesRequest(input *ListEventSourcesInput) // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListEventSources func (c *CloudWatchEvents) ListEventSources(input *ListEventSourcesInput) (*ListEventSourcesOutput, error) { req, out := c.ListEventSourcesRequest(input) @@ -1446,6 +1470,9 @@ func (c *CloudWatchEvents) ListPartnerEventSourceAccountsRequest(input *ListPart // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListPartnerEventSourceAccounts func (c *CloudWatchEvents) ListPartnerEventSourceAccounts(input *ListPartnerEventSourceAccountsInput) (*ListPartnerEventSourceAccountsOutput, error) { req, out := c.ListPartnerEventSourceAccountsRequest(input) @@ -1526,6 +1553,9 @@ func (c *CloudWatchEvents) ListPartnerEventSourcesRequest(input *ListPartnerEven // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/ListPartnerEventSources func (c *CloudWatchEvents) ListPartnerEventSources(input *ListPartnerEventSourcesInput) (*ListPartnerEventSourcesOutput, error) { req, out := c.ListPartnerEventSourcesRequest(input) @@ -2020,6 +2050,9 @@ func (c *CloudWatchEvents) PutPartnerEventsRequest(input *PutPartnerEventsInput) // * InternalException // This exception occurs due to unexpected causes. // +// * OperationDisabledException +// The operation you are attempting is not available in this region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/events-2015-10-07/PutPartnerEvents func (c *CloudWatchEvents) PutPartnerEvents(input *PutPartnerEventsInput) (*PutPartnerEventsOutput, error) { req, out := c.PutPartnerEventsRequest(input) @@ -2088,8 +2121,9 @@ func (c *CloudWatchEvents) PutPermissionRequest(input *PutPermissionInput) (req // PutPermission API operation for Amazon CloudWatch Events. // // Running PutPermission permits the specified AWS account or AWS organization -// to put events to the specified event bus. CloudWatch Events rules in your -// account are triggered by these events arriving to an event bus in your account. +// to put events to the specified event bus. Amazon EventBridge (CloudWatch +// Events) rules in your account are triggered by these events arriving to an +// event bus in your account. // // For another account to send events to your account, that external account // must have an EventBridge rule with your account's event bus as a target. @@ -2383,6 +2417,10 @@ func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *reque // // * The default event bus of another AWS account // +// * Amazon API Gateway REST APIs +// +// * Redshift Clusters to invoke Data API ExecuteStatement on +// // Creating rules with built-in targets is supported only in the AWS Management // Console. The built-in targets are EC2 CreateSnapshot API call, EC2 RebootInstances // API call, EC2 StopInstances API call, and EC2 TerminateInstances API call. @@ -2393,12 +2431,13 @@ func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *reque // on multiple EC2 instances with one rule, you can use the RunCommandParameters // field. // -// To be able to make API calls against the resources that you own, Amazon CloudWatch -// Events needs the appropriate permissions. For AWS Lambda and Amazon SNS resources, -// EventBridge relies on resource-based policies. For EC2 instances, Kinesis -// data streams, and AWS Step Functions state machines, EventBridge relies on -// IAM roles that you specify in the RoleARN argument in PutTargets. For more -// information, see Authentication and Access Control (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html) +// To be able to make API calls against the resources that you own, Amazon EventBridge +// (CloudWatch Events) needs the appropriate permissions. For AWS Lambda and +// Amazon SNS resources, EventBridge relies on resource-based policies. For +// EC2 instances, Kinesis data streams, AWS Step Functions state machines and +// API Gateway REST APIs, EventBridge relies on IAM roles that you specify in +// the RoleARN argument in PutTargets. For more information, see Authentication +// and Access Control (https://docs.aws.amazon.com/eventbridge/latest/userguide/auth-and-access-control-eventbridge.html) // in the Amazon EventBridge User Guide. // // If another AWS account is in the same region and has granted you permission @@ -2408,7 +2447,8 @@ func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *reque // you run PutTargets. If your account sends events to another account, your // account is charged for each sent event. Each event sent to another account // is charged as a custom event. The account receiving the event is not charged. -// For more information, see Amazon CloudWatch Pricing (https://aws.amazon.com/cloudwatch/pricing/). +// For more information, see Amazon EventBridge (CloudWatch Events) Pricing +// (https://aws.amazon.com/eventbridge/pricing/). // // Input, InputPath, and InputTransformer are not available with PutTarget if // the target is an event bus of a different AWS account. @@ -2936,8 +2976,8 @@ func (c *CloudWatchEvents) UntagResourceRequest(input *UntagResourceInput) (req // UntagResource API operation for Amazon CloudWatch Events. // -// Removes one or more tags from the specified EventBridge resource. In CloudWatch -// Events, rules and event buses can be tagged. +// Removes one or more tags from the specified EventBridge resource. In Amazon +// EventBridge (CloudWatch Events, rules and event buses can be tagged. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3242,8 +3282,8 @@ func (s *BatchRetryStrategy) SetAttempts(v int64) *BatchRetryStrategy { // There is concurrent modification on a rule or target. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3260,17 +3300,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3278,22 +3318,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // A JSON string which you can use to limit the event bus permissions you are @@ -3613,6 +3653,44 @@ func (s DeactivateEventSourceOutput) GoString() string { return s.String() } +// A DeadLetterConfig object that contains information about a dead-letter queue +// configuration. +type DeadLetterConfig struct { + _ struct{} `type:"structure"` + + // The ARN of the SQS queue specified as the target for the dead-letter queue. + Arn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeadLetterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeadLetterConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeadLetterConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeadLetterConfig"} + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DeadLetterConfig) SetArn(v string) *DeadLetterConfig { + s.Arn = &v + return s +} + type DeleteEventBusInput struct { _ struct{} `type:"structure"` @@ -4611,6 +4689,52 @@ func (s *EventSource) SetState(v string) *EventSource { return s } +// These are custom parameter to be used when the target is an API Gateway REST +// APIs. +type HttpParameters struct { + _ struct{} `type:"structure"` + + // The headers that need to be sent as part of request invoking the API Gateway + // REST API. + HeaderParameters map[string]*string `type:"map"` + + // The path parameter values to be used to populate API Gateway REST API path + // wildcards ("*"). + PathParameterValues []*string `type:"list"` + + // The query string keys/values that need to be sent as part of request invoking + // the API Gateway REST API. + QueryStringParameters map[string]*string `type:"map"` +} + +// String returns the string representation +func (s HttpParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpParameters) GoString() string { + return s.String() +} + +// SetHeaderParameters sets the HeaderParameters field's value. +func (s *HttpParameters) SetHeaderParameters(v map[string]*string) *HttpParameters { + s.HeaderParameters = v + return s +} + +// SetPathParameterValues sets the PathParameterValues field's value. +func (s *HttpParameters) SetPathParameterValues(v []*string) *HttpParameters { + s.PathParameterValues = v + return s +} + +// SetQueryStringParameters sets the QueryStringParameters field's value. +func (s *HttpParameters) SetQueryStringParameters(v map[string]*string) *HttpParameters { + s.QueryStringParameters = v + return s +} + // Contains the parameters needed for you to provide custom input to a target // based on one or more pieces of data extracted from the event. type InputTransformer struct { @@ -4708,8 +4832,8 @@ func (s *InputTransformer) SetInputTemplate(v string) *InputTransformer { // This exception occurs due to unexpected causes. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4726,17 +4850,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4744,28 +4868,28 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // The event pattern is not valid. type InvalidEventPatternException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4782,17 +4906,17 @@ func (s InvalidEventPatternException) GoString() string { func newErrorInvalidEventPatternException(v protocol.ResponseMetadata) error { return &InvalidEventPatternException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEventPatternException) Code() string { +func (s *InvalidEventPatternException) Code() string { return "InvalidEventPatternException" } // Message returns the exception's message. -func (s InvalidEventPatternException) Message() string { +func (s *InvalidEventPatternException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4800,28 +4924,28 @@ func (s InvalidEventPatternException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEventPatternException) OrigErr() error { +func (s *InvalidEventPatternException) OrigErr() error { return nil } -func (s InvalidEventPatternException) Error() string { +func (s *InvalidEventPatternException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEventPatternException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEventPatternException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEventPatternException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEventPatternException) RequestID() string { + return s.RespMetadata.RequestID } // The specified state is not a valid state for an event source. type InvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4838,17 +4962,17 @@ func (s InvalidStateException) GoString() string { func newErrorInvalidStateException(v protocol.ResponseMetadata) error { return &InvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateException) Code() string { +func (s *InvalidStateException) Code() string { return "InvalidStateException" } // Message returns the exception's message. -func (s InvalidStateException) Message() string { +func (s *InvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4856,22 +4980,22 @@ func (s InvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateException) OrigErr() error { +func (s *InvalidStateException) OrigErr() error { return nil } -func (s InvalidStateException) Error() string { +func (s *InvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // This object enables you to specify a JSON path to extract from the event @@ -4920,8 +5044,8 @@ func (s *KinesisParameters) SetPartitionKeyPath(v string) *KinesisParameters { // You tried to create more rules or add more targets to a rule than is allowed. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4938,17 +5062,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4956,22 +5080,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListEventBusesInput struct { @@ -5768,8 +5892,8 @@ func (s *ListTargetsByRuleOutput) SetTargets(v []*Target) *ListTargetsByRuleOutp // rules by using DisableRule, EnableRule, PutTargets, PutRule, TagResource, // or UntagResource. type ManagedRuleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5786,17 +5910,17 @@ func (s ManagedRuleException) GoString() string { func newErrorManagedRuleException(v protocol.ResponseMetadata) error { return &ManagedRuleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ManagedRuleException) Code() string { +func (s *ManagedRuleException) Code() string { return "ManagedRuleException" } // Message returns the exception's message. -func (s ManagedRuleException) Message() string { +func (s *ManagedRuleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5804,22 +5928,22 @@ func (s ManagedRuleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ManagedRuleException) OrigErr() error { +func (s *ManagedRuleException) OrigErr() error { return nil } -func (s ManagedRuleException) Error() string { +func (s *ManagedRuleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ManagedRuleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ManagedRuleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ManagedRuleException) RequestID() string { - return s.respMetadata.RequestID +func (s *ManagedRuleException) RequestID() string { + return s.RespMetadata.RequestID } // This structure specifies the network configuration for an ECS task. @@ -5863,6 +5987,62 @@ func (s *NetworkConfiguration) SetAwsvpcConfiguration(v *AwsVpcConfiguration) *N return s } +// The operation you are attempting is not available in this region. +type OperationDisabledException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s OperationDisabledException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OperationDisabledException) GoString() string { + return s.String() +} + +func newErrorOperationDisabledException(v protocol.ResponseMetadata) error { + return &OperationDisabledException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OperationDisabledException) Code() string { + return "OperationDisabledException" +} + +// Message returns the exception's message. +func (s *OperationDisabledException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OperationDisabledException) OrigErr() error { + return nil +} + +func (s *OperationDisabledException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *OperationDisabledException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *OperationDisabledException) RequestID() string { + return s.RespMetadata.RequestID +} + // A partner event source is created by an SaaS partner. If a customer creates // a partner event bus that matches this event source, that AWS account can // receive events from the partner's applications or services. @@ -5956,8 +6136,8 @@ func (s *PartnerEventSourceAccount) SetState(v string) *PartnerEventSourceAccoun // The event bus policy is too long. For more information, see the limits. type PolicyLengthExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5974,17 +6154,17 @@ func (s PolicyLengthExceededException) GoString() string { func newErrorPolicyLengthExceededException(v protocol.ResponseMetadata) error { return &PolicyLengthExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyLengthExceededException) Code() string { +func (s *PolicyLengthExceededException) Code() string { return "PolicyLengthExceededException" } // Message returns the exception's message. -func (s PolicyLengthExceededException) Message() string { +func (s *PolicyLengthExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5992,22 +6172,22 @@ func (s PolicyLengthExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyLengthExceededException) OrigErr() error { +func (s *PolicyLengthExceededException) OrigErr() error { return nil } -func (s PolicyLengthExceededException) Error() string { +func (s *PolicyLengthExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyLengthExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyLengthExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyLengthExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyLengthExceededException) RequestID() string { + return s.RespMetadata.RequestID } type PutEventsInput struct { @@ -6866,6 +7046,114 @@ func (s *PutTargetsResultEntry) SetTargetId(v string) *PutTargetsResultEntry { return s } +// These are custom parameters to be used when the target is a Redshift cluster +// to invoke the Redshift Data API ExecuteStatement based on EventBridge events. +type RedshiftDataParameters struct { + _ struct{} `type:"structure"` + + // The name of the database. Required when authenticating using temporary credentials. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // The database user name. Required when authenticating using temporary credentials. + DbUser *string `min:"1" type:"string"` + + // The name or ARN of the secret that enables access to the database. Required + // when authenticating using AWS Secrets Manager. + SecretManagerArn *string `min:"1" type:"string"` + + // The SQL statement text to run. + // + // Sql is a required field + Sql *string `min:"1" type:"string" required:"true"` + + // The name of the SQL statement. You can name the SQL statement when you create + // it to identify the query. + StatementName *string `min:"1" type:"string"` + + // Indicates whether to send an event back to EventBridge after the SQL statement + // runs. + WithEvent *bool `type:"boolean"` +} + +// String returns the string representation +func (s RedshiftDataParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftDataParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftDataParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftDataParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.DbUser != nil && len(*s.DbUser) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DbUser", 1)) + } + if s.SecretManagerArn != nil && len(*s.SecretManagerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretManagerArn", 1)) + } + if s.Sql == nil { + invalidParams.Add(request.NewErrParamRequired("Sql")) + } + if s.Sql != nil && len(*s.Sql) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Sql", 1)) + } + if s.StatementName != nil && len(*s.StatementName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StatementName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *RedshiftDataParameters) SetDatabase(v string) *RedshiftDataParameters { + s.Database = &v + return s +} + +// SetDbUser sets the DbUser field's value. +func (s *RedshiftDataParameters) SetDbUser(v string) *RedshiftDataParameters { + s.DbUser = &v + return s +} + +// SetSecretManagerArn sets the SecretManagerArn field's value. +func (s *RedshiftDataParameters) SetSecretManagerArn(v string) *RedshiftDataParameters { + s.SecretManagerArn = &v + return s +} + +// SetSql sets the Sql field's value. +func (s *RedshiftDataParameters) SetSql(v string) *RedshiftDataParameters { + s.Sql = &v + return s +} + +// SetStatementName sets the StatementName field's value. +func (s *RedshiftDataParameters) SetStatementName(v string) *RedshiftDataParameters { + s.StatementName = &v + return s +} + +// SetWithEvent sets the WithEvent field's value. +func (s *RedshiftDataParameters) SetWithEvent(v bool) *RedshiftDataParameters { + s.WithEvent = &v + return s +} + type RemovePermissionInput struct { _ struct{} `type:"structure"` @@ -7096,8 +7384,8 @@ func (s *RemoveTargetsResultEntry) SetTargetId(v string) *RemoveTargetsResultEnt // The resource you are trying to create already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7114,17 +7402,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7132,28 +7420,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An entity that you specified does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7170,17 +7458,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7188,22 +7476,70 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A RetryPolicy object that includes information about the retry policy settings. +type RetryPolicy struct { + _ struct{} `type:"structure"` + + // The maximum amount of time, in seconds, to continue to make retry attempts. + MaximumEventAgeInSeconds *int64 `min:"60" type:"integer"` + + // The maximum number of retry attempts to make before the request fails. Retry + // attempts continue until either the maximum number of attempts is made or + // until the duration of the MaximumEventAgeInSeconds is met. + MaximumRetryAttempts *int64 `type:"integer"` +} + +// String returns the string representation +func (s RetryPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RetryPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetryPolicy"} + if s.MaximumEventAgeInSeconds != nil && *s.MaximumEventAgeInSeconds < 60 { + invalidParams.Add(request.NewErrParamMinValue("MaximumEventAgeInSeconds", 60)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumEventAgeInSeconds sets the MaximumEventAgeInSeconds field's value. +func (s *RetryPolicy) SetMaximumEventAgeInSeconds(v int64) *RetryPolicy { + s.MaximumEventAgeInSeconds = &v + return s +} + +// SetMaximumRetryAttempts sets the MaximumRetryAttempts field's value. +func (s *RetryPolicy) SetMaximumRetryAttempts(v int64) *RetryPolicy { + s.MaximumRetryAttempts = &v + return s } // Contains information about a rule in Amazon EventBridge. @@ -7605,12 +7941,24 @@ type Target struct { // in the AWS Batch User Guide. BatchParameters *BatchParameters `type:"structure"` + // The DeadLetterConfig that defines the target queue to send dead-letter queue + // events to. + DeadLetterConfig *DeadLetterConfig `type:"structure"` + // Contains the Amazon ECS task definition and task count to be used, if the // event target is an Amazon ECS task. For more information about Amazon ECS // tasks, see Task Definitions (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) // in the Amazon EC2 Container Service Developer Guide. EcsParameters *EcsParameters `type:"structure"` + // Contains the HTTP parameters to use when the target is a API Gateway REST + // endpoint. + // + // If you specify an API Gateway REST API as a target, you can use this parameter + // to specify headers, path parameter, query string keys/values as part of your + // target invoking request. + HttpParameters *HttpParameters `type:"structure"` + // The ID of the target. // // Id is a required field @@ -7636,6 +7984,18 @@ type Target struct { // default is to use the eventId as the partition key. KinesisParameters *KinesisParameters `type:"structure"` + // Contains the Redshift Data API parameters to use when the target is a Redshift + // cluster. + // + // If you specify a Redshift Cluster as a Target, you can use this to specify + // parameters to invoke the Redshift Data API ExecuteStatement based on EventBridge + // events. + RedshiftDataParameters *RedshiftDataParameters `type:"structure"` + + // The RetryPolicy object that contains the retry policy configuration to use + // for the dead-letter queue. + RetryPolicy *RetryPolicy `type:"structure"` + // The Amazon Resource Name (ARN) of the IAM role to be used for this target // when the rule is triggered. If one rule triggers multiple targets, you can // use a different IAM role for each target. @@ -7684,6 +8044,11 @@ func (s *Target) Validate() error { invalidParams.AddNested("BatchParameters", err.(request.ErrInvalidParams)) } } + if s.DeadLetterConfig != nil { + if err := s.DeadLetterConfig.Validate(); err != nil { + invalidParams.AddNested("DeadLetterConfig", err.(request.ErrInvalidParams)) + } + } if s.EcsParameters != nil { if err := s.EcsParameters.Validate(); err != nil { invalidParams.AddNested("EcsParameters", err.(request.ErrInvalidParams)) @@ -7699,6 +8064,16 @@ func (s *Target) Validate() error { invalidParams.AddNested("KinesisParameters", err.(request.ErrInvalidParams)) } } + if s.RedshiftDataParameters != nil { + if err := s.RedshiftDataParameters.Validate(); err != nil { + invalidParams.AddNested("RedshiftDataParameters", err.(request.ErrInvalidParams)) + } + } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } if s.RunCommandParameters != nil { if err := s.RunCommandParameters.Validate(); err != nil { invalidParams.AddNested("RunCommandParameters", err.(request.ErrInvalidParams)) @@ -7723,12 +8098,24 @@ func (s *Target) SetBatchParameters(v *BatchParameters) *Target { return s } +// SetDeadLetterConfig sets the DeadLetterConfig field's value. +func (s *Target) SetDeadLetterConfig(v *DeadLetterConfig) *Target { + s.DeadLetterConfig = v + return s +} + // SetEcsParameters sets the EcsParameters field's value. func (s *Target) SetEcsParameters(v *EcsParameters) *Target { s.EcsParameters = v return s } +// SetHttpParameters sets the HttpParameters field's value. +func (s *Target) SetHttpParameters(v *HttpParameters) *Target { + s.HttpParameters = v + return s +} + // SetId sets the Id field's value. func (s *Target) SetId(v string) *Target { s.Id = &v @@ -7759,6 +8146,18 @@ func (s *Target) SetKinesisParameters(v *KinesisParameters) *Target { return s } +// SetRedshiftDataParameters sets the RedshiftDataParameters field's value. +func (s *Target) SetRedshiftDataParameters(v *RedshiftDataParameters) *Target { + s.RedshiftDataParameters = v + return s +} + +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *Target) SetRetryPolicy(v *RetryPolicy) *Target { + s.RetryPolicy = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *Target) SetRoleArn(v string) *Target { s.RoleArn = &v @@ -7930,6 +8329,14 @@ const ( AssignPublicIpDisabled = "DISABLED" ) +// AssignPublicIp_Values returns all elements of the AssignPublicIp enum +func AssignPublicIp_Values() []string { + return []string{ + AssignPublicIpEnabled, + AssignPublicIpDisabled, + } +} + const ( // EventSourceStatePending is a EventSourceState enum value EventSourceStatePending = "PENDING" @@ -7941,6 +8348,15 @@ const ( EventSourceStateDeleted = "DELETED" ) +// EventSourceState_Values returns all elements of the EventSourceState enum +func EventSourceState_Values() []string { + return []string{ + EventSourceStatePending, + EventSourceStateActive, + EventSourceStateDeleted, + } +} + const ( // LaunchTypeEc2 is a LaunchType enum value LaunchTypeEc2 = "EC2" @@ -7949,6 +8365,14 @@ const ( LaunchTypeFargate = "FARGATE" ) +// LaunchType_Values returns all elements of the LaunchType enum +func LaunchType_Values() []string { + return []string{ + LaunchTypeEc2, + LaunchTypeFargate, + } +} + const ( // RuleStateEnabled is a RuleState enum value RuleStateEnabled = "ENABLED" @@ -7956,3 +8380,11 @@ const ( // RuleStateDisabled is a RuleState enum value RuleStateDisabled = "DISABLED" ) + +// RuleState_Values returns all elements of the RuleState enum +func RuleState_Values() []string { + return []string{ + RuleStateEnabled, + RuleStateDisabled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go index 059007e40..9de2a8c22 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/errors.go @@ -49,6 +49,12 @@ const ( // or UntagResource. ErrCodeManagedRuleException = "ManagedRuleException" + // ErrCodeOperationDisabledException for service response error code + // "OperationDisabledException". + // + // The operation you are attempting is not available in this region. + ErrCodeOperationDisabledException = "OperationDisabledException" + // ErrCodePolicyLengthExceededException for service response error code // "PolicyLengthExceededException". // @@ -75,6 +81,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidStateException": newErrorInvalidStateException, "LimitExceededException": newErrorLimitExceededException, "ManagedRuleException": newErrorManagedRuleException, + "OperationDisabledException": newErrorOperationDisabledException, "PolicyLengthExceededException": newErrorPolicyLengthExceededException, "ResourceAlreadyExistsException": newErrorResourceAlreadyExistsException, "ResourceNotFoundException": newErrorResourceNotFoundException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go index affde50d0..b52dda40f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go index 594ec846d..5deb41731 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -67,14 +67,14 @@ func (c *CloudWatchLogs) AssociateKmsKeyRequest(input *AssociateKmsKeyInput) (re // within Amazon CloudWatch Logs. This enables Amazon CloudWatch Logs to decrypt // this data whenever it is requested. // -// Important: CloudWatch Logs supports only symmetric CMKs. Do not use an associate -// an asymmetric CMK with your log group. For more information, see Using Symmetric +// CloudWatch Logs supports only symmetric CMKs. Do not use an associate an +// asymmetric CMK with your log group. For more information, see Using Symmetric // and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). // -// Note that it can take up to 5 minutes for this operation to take effect. +// It can take up to 5 minutes for this operation to take effect. // // If you attempt to associate a CMK with a log group but the CMK does not exist -// or the CMK is disabled, you will receive an InvalidParameterException error. +// or the CMK is disabled, you receive an InvalidParameterException error. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -254,13 +254,16 @@ func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) ( // CreateExportTask API operation for Amazon CloudWatch Logs. // // Creates an export task, which allows you to efficiently export data from -// a log group to an Amazon S3 bucket. +// a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, +// you must use credentials that have permission to write to the S3 bucket that +// you specify as the destination. // // This is an asynchronous call. If all the required information is provided, // this operation initiates an export task and responds with the ID of the task. -// After the task has started, you can use DescribeExportTasks to get the status -// of the export task. Each account can only have one active (RUNNING or PENDING) -// export task at a time. To cancel an export task, use CancelExportTask. +// After the task has started, you can use DescribeExportTasks (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeExportTasks.html) +// to get the status of the export task. Each account can only have one active +// (RUNNING or PENDING) export task at a time. To cancel an export task, use +// CancelExportTask (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CancelExportTask.html). // // You can export logs from multiple log groups or multiple time ranges to the // same S3 bucket. To separate out log data for each export task, you can specify @@ -362,9 +365,8 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req // CreateLogGroup API operation for Amazon CloudWatch Logs. // -// Creates a log group with the specified name. -// -// You can create up to 20,000 log groups per account. +// Creates a log group with the specified name. You can create up to 20,000 +// log groups per account. // // You must use the following guidelines when naming a log group: // @@ -376,6 +378,10 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req // '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and // '#' (number sign) // +// When you create a log group, by default the log events in the log group never +// expire. To set a retention policy so that events expire and are deleted after +// a specified time, use PutRetentionPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutRetentionPolicy.html). +// // If you associate a AWS Key Management Service (AWS KMS) customer master key // (CMK) with the log group, ingested data is encrypted using the CMK. This // association is stored as long as the data encrypted with the CMK is still @@ -383,12 +389,11 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req // this data whenever it is requested. // // If you attempt to associate a CMK with the log group but the CMK does not -// exist or the CMK is disabled, you will receive an InvalidParameterException -// error. +// exist or the CMK is disabled, you receive an InvalidParameterException error. // -// Important: CloudWatch Logs supports only symmetric CMKs. Do not associate -// an asymmetric CMK with your log group. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). +// CloudWatch Logs supports only symmetric CMKs. Do not associate an asymmetric +// CMK with your log group. For more information, see Using Symmetric and Asymmetric +// Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -480,7 +485,9 @@ func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (re // CreateLogStream API operation for Amazon CloudWatch Logs. // -// Creates a log stream for the specified log group. +// Creates a log stream for the specified log group. A log stream is a sequence +// of log events that originate from a single source, such as an application +// instance or a resource that is being monitored. // // There is no limit on the number of log streams that you can create for a // log group. There is a limit of 50 TPS on CreateLogStream operations, after @@ -896,6 +903,97 @@ func (c *CloudWatchLogs) DeleteMetricFilterWithContext(ctx aws.Context, input *D return out, req.Send() } +const opDeleteQueryDefinition = "DeleteQueryDefinition" + +// DeleteQueryDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueryDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteQueryDefinition for more information on using the DeleteQueryDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteQueryDefinitionRequest method. +// req, resp := client.DeleteQueryDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteQueryDefinition +func (c *CloudWatchLogs) DeleteQueryDefinitionRequest(input *DeleteQueryDefinitionInput) (req *request.Request, output *DeleteQueryDefinitionOutput) { + op := &request.Operation{ + Name: opDeleteQueryDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteQueryDefinitionInput{} + } + + output = &DeleteQueryDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteQueryDefinition API operation for Amazon CloudWatch Logs. +// +// Deletes a saved CloudWatch Logs Insights query definition. A query definition +// contains details about a saved CloudWatch Logs Insights query. +// +// Each DeleteQueryDefinition operation can delete one query definition. +// +// You must have the logs:DeleteQueryDefinition permission to be able to perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DeleteQueryDefinition for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DeleteQueryDefinition +func (c *CloudWatchLogs) DeleteQueryDefinition(input *DeleteQueryDefinitionInput) (*DeleteQueryDefinitionOutput, error) { + req, out := c.DeleteQueryDefinitionRequest(input) + return out, req.Send() +} + +// DeleteQueryDefinitionWithContext is the same as DeleteQueryDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteQueryDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DeleteQueryDefinitionWithContext(ctx aws.Context, input *DeleteQueryDefinitionInput, opts ...request.Option) (*DeleteQueryDefinitionOutput, error) { + req, out := c.DeleteQueryDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteResourcePolicy = "DeleteResourcePolicy" // DeleteResourcePolicyRequest generates a "aws/request.Request" representing the @@ -1730,8 +1828,8 @@ func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFilte // DescribeMetricFilters API operation for Amazon CloudWatch Logs. // -// Lists the specified metric filters. You can list all the metric filters or -// filter the results by log name, prefix, metric name, or metric namespace. +// Lists the specified metric filters. You can list all of the metric filters +// or filter the results by log name, prefix, metric name, or metric namespace. // The results are ASCII-sorted by filter name. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1871,7 +1969,7 @@ func (c *CloudWatchLogs) DescribeQueriesRequest(input *DescribeQueriesInput) (re // DescribeQueries API operation for Amazon CloudWatch Logs. // // Returns a list of CloudWatch Logs Insights queries that are scheduled, executing, -// or have been executed recently in this account. You can request all queries, +// or have been executed recently in this account. You can request all queries // or limit it to queries of a specific log group or queries with a certain // status. // @@ -1914,6 +2012,92 @@ func (c *CloudWatchLogs) DescribeQueriesWithContext(ctx aws.Context, input *Desc return out, req.Send() } +const opDescribeQueryDefinitions = "DescribeQueryDefinitions" + +// DescribeQueryDefinitionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeQueryDefinitions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeQueryDefinitions for more information on using the DescribeQueryDefinitions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeQueryDefinitionsRequest method. +// req, resp := client.DescribeQueryDefinitionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueryDefinitions +func (c *CloudWatchLogs) DescribeQueryDefinitionsRequest(input *DescribeQueryDefinitionsInput) (req *request.Request, output *DescribeQueryDefinitionsOutput) { + op := &request.Operation{ + Name: opDescribeQueryDefinitions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeQueryDefinitionsInput{} + } + + output = &DescribeQueryDefinitionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeQueryDefinitions API operation for Amazon CloudWatch Logs. +// +// This operation returns a paginated list of your saved CloudWatch Logs Insights +// query definitions. +// +// You can use the queryDefinitionNamePrefix parameter to limit the results +// to only the query definitions that have names that start with a certain string. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation DescribeQueryDefinitions for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/DescribeQueryDefinitions +func (c *CloudWatchLogs) DescribeQueryDefinitions(input *DescribeQueryDefinitionsInput) (*DescribeQueryDefinitionsOutput, error) { + req, out := c.DescribeQueryDefinitionsRequest(input) + return out, req.Send() +} + +// DescribeQueryDefinitionsWithContext is the same as DescribeQueryDefinitions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeQueryDefinitions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) DescribeQueryDefinitionsWithContext(ctx aws.Context, input *DescribeQueryDefinitionsInput, opts ...request.Option) (*DescribeQueryDefinitionsOutput, error) { + req, out := c.DescribeQueryDefinitionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeResourcePolicies = "DescribeResourcePolicies" // DescribeResourcePoliciesRequest generates a "aws/request.Request" representing the @@ -2294,10 +2478,15 @@ func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (re // of the log stream. // // By default, this operation returns as many log events as can fit in 1 MB -// (up to 10,000 log events), or all the events found within the time range -// that you specify. If the results include a token, then there are more log -// events available, and you can get additional results by specifying the token -// in a subsequent call. +// (up to 10,000 log events) or all the events found within the time range that +// you specify. If the results include a token, then there are more log events +// available, and you can get additional results by specifying the token in +// a subsequent call. This operation can return empty results while there are +// more log events available through the token. +// +// The returned log events are sorted by event timestamp, the timestamp when +// the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents +// request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2441,12 +2630,14 @@ func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *req // GetLogEvents API operation for Amazon CloudWatch Logs. // -// Lists log events from the specified log stream. You can list all the log +// Lists log events from the specified log stream. You can list all of the log // events or filter using a time range. // // By default, this operation returns as many log events as can fit in a response // size of 1MB (up to 10,000 log events). You can get additional log events -// by specifying one of the tokens in a subsequent call. +// by specifying one of the tokens in a subsequent call. This operation can +// return empty results while there are more log events available through the +// token. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2589,7 +2780,9 @@ func (c *CloudWatchLogs) GetLogGroupFieldsRequest(input *GetLogGroupFieldsInput) // The search is limited to a time period that you specify. // // In the results, fields that start with @ are fields generated by CloudWatch -// Logs. For example, @timestamp is the timestamp of each log event. +// Logs. For example, @timestamp is the timestamp of each log event. For more +// information about the fields that are generated by CloudWatch logs, see Supported +// Logs and Discovered Fields (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html). // // The response results are sorted by the frequency percentage, starting with // the highest percentage. @@ -2680,12 +2873,12 @@ func (c *CloudWatchLogs) GetLogRecordRequest(input *GetLogRecordInput) (req *req // GetLogRecord API operation for Amazon CloudWatch Logs. // -// Retrieves all the fields and values of a single log event. All fields are -// retrieved, even if the original query that produced the logRecordPointer +// Retrieves all of the fields and values of a single log event. All fields +// are retrieved, even if the original query that produced the logRecordPointer // retrieved only a subset of fields. Fields are returned as field name/field // value pairs. // -// Additionally, the entire unparsed log event is returned within @message. +// The full unparsed log event is returned within @message. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2775,11 +2968,13 @@ func (c *CloudWatchLogs) GetQueryResultsRequest(input *GetQueryResultsInput) (re // // Returns the results from the specified query. // -// Only the fields requested in the query are returned, along with a @ptr field +// Only the fields requested in the query are returned, along with a @ptr field, // which is the identifier for the log record. You can use the value of @ptr -// in a operation to get the full log record. +// in a GetLogRecord (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_GetLogRecord.html) +// operation to get the full log record. // -// GetQueryResults does not start a query execution. To run a query, use . +// GetQueryResults does not start a query execution. To run a query, use StartQuery +// (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_StartQuery.html). // // If the value of the Status field in the output is Running, this operation // returns only partial results. If you see a value of Scheduled or Running @@ -2955,14 +3150,18 @@ func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req // // A destination encapsulates a physical resource (such as an Amazon Kinesis // stream) and enables you to subscribe to a real-time stream of log events -// for a different account, ingested using PutLogEvents. +// for a different account, ingested using PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). // // Through an access policy, a destination controls what is written to it. By // default, PutDestination does not set any access policy with the destination, -// which means a cross-account user cannot call PutSubscriptionFilter against -// this destination. To enable this, the destination owner must call PutDestinationPolicy +// which means a cross-account user cannot call PutSubscriptionFilter (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutSubscriptionFilter.html) +// against this destination. To enable this, the destination owner must call +// PutDestinationPolicy (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutDestinationPolicy.html) // after PutDestination. // +// To perform a PutDestination operation, you must also have the iam:PassRole +// permission. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3141,14 +3340,13 @@ func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *req // call. An upload in a newly created log stream does not require a sequence // token. You can also get the sequence token in the expectedSequenceToken field // from InvalidSequenceTokenException. If you call PutLogEvents twice within -// a narrow time period using the same value for sequenceToken, both calls may -// be successful, or one may be rejected. +// a narrow time period using the same value for sequenceToken, both calls might +// be successful or one might be rejected. // // The batch of events must satisfy the following constraints: // -// * The maximum batch size is 1,048,576 bytes, and this size is calculated -// as the sum of all event messages in UTF-8, plus 26 bytes for each log -// event. +// * The maximum batch size is 1,048,576 bytes. This size is calculated as +// the sum of all event messages in UTF-8, plus 26 bytes for each log event. // // * None of the log events in the batch can be more than 2 hours in the // future. @@ -3156,7 +3354,7 @@ func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *req // * None of the log events in the batch can be older than 14 days or older // than the retention period of the log group. // -// * The log events in the batch must be in chronological ordered by their +// * The log events in the batch must be in chronological order by their // timestamp. The timestamp is the time the event occurred, expressed as // the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In AWS Tools // for PowerShell and the AWS SDK for .NET, the timestamp is specified in @@ -3269,7 +3467,7 @@ func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (re // // Creates or updates a metric filter and associates it with the specified log // group. Metric filters allow you to configure rules to extract metric data -// from log events ingested through PutLogEvents. +// from log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html). // // The maximum number of metric filters that can be associated with a log group // is 100. @@ -3319,6 +3517,103 @@ func (c *CloudWatchLogs) PutMetricFilterWithContext(ctx aws.Context, input *PutM return out, req.Send() } +const opPutQueryDefinition = "PutQueryDefinition" + +// PutQueryDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the PutQueryDefinition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutQueryDefinition for more information on using the PutQueryDefinition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutQueryDefinitionRequest method. +// req, resp := client.PutQueryDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutQueryDefinition +func (c *CloudWatchLogs) PutQueryDefinitionRequest(input *PutQueryDefinitionInput) (req *request.Request, output *PutQueryDefinitionOutput) { + op := &request.Operation{ + Name: opPutQueryDefinition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutQueryDefinitionInput{} + } + + output = &PutQueryDefinitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutQueryDefinition API operation for Amazon CloudWatch Logs. +// +// Creates or updates a query definition for CloudWatch Logs Insights. For more +// information, see Analyzing Log Data with CloudWatch Logs Insights (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/AnalyzingLogData.html). +// +// To update a query definition, specify its queryDefinitionId in your request. +// The values of name, queryString, and logGroupNames are changed to the values +// that you specify in your update operation. No current values are retained +// from the current query definition. For example, if you update a current query +// definition that includes log groups, and you don't specify the logGroupNames +// parameter in your update operation, the query definition changes to contain +// no log groups. +// +// You must have the logs:PutQueryDefinition permission to be able to perform +// this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon CloudWatch Logs's +// API operation PutQueryDefinition for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// A parameter is specified incorrectly. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ServiceUnavailableException +// The service cannot complete the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/logs-2014-03-28/PutQueryDefinition +func (c *CloudWatchLogs) PutQueryDefinition(input *PutQueryDefinitionInput) (*PutQueryDefinitionOutput, error) { + req, out := c.PutQueryDefinitionRequest(input) + return out, req.Send() +} + +// PutQueryDefinitionWithContext is the same as PutQueryDefinition with the addition of +// the ability to pass a context and additional request options. +// +// See PutQueryDefinition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CloudWatchLogs) PutQueryDefinitionWithContext(ctx aws.Context, input *PutQueryDefinitionInput, opts ...request.Option) (*PutQueryDefinitionOutput, error) { + req, out := c.PutQueryDefinitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutResourcePolicy = "PutResourcePolicy" // PutResourcePolicyRequest generates a "aws/request.Request" representing the @@ -3365,7 +3660,7 @@ func (c *CloudWatchLogs) PutResourcePolicyRequest(input *PutResourcePolicyInput) // // Creates or updates a resource policy allowing other AWS services to put log // events to this account, such as Amazon Route 53. An account can have up to -// 10 resource policies per region. +// 10 resource policies per AWS Region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3544,8 +3839,12 @@ func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilt // // Creates or updates a subscription filter and associates it with the specified // log group. Subscription filters allow you to subscribe to a real-time stream -// of log events ingested through PutLogEvents and have them delivered to a -// specific destination. Currently, the supported destinations are: +// of log events ingested through PutLogEvents (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html) +// and have them delivered to a specific destination. When log events are sent +// to the receiving service, they are Base64 encoded and compressed with the +// gzip format. +// +// The following destinations are supported for subscription filters: // // * An Amazon Kinesis stream belonging to the same account as the subscription // filter, for same-account delivery. @@ -3564,6 +3863,9 @@ func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilt // filterName. Otherwise, the call fails because you cannot associate a second // filter with a log group. // +// To perform a PutSubscriptionFilter operation, you must also have the iam:PassRole +// permission. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3654,12 +3956,12 @@ func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request // StartQuery API operation for Amazon CloudWatch Logs. // // Schedules a query of a log group using CloudWatch Logs Insights. You specify -// the log group and time range to query, and the query string to use. +// the log group and time range to query and the query string to use. // // For more information, see CloudWatch Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). // // Queries time out after 15 minutes of execution. If your queries are timing -// out, reduce the time range being searched, or partition your query into a +// out, reduce the time range being searched or partition your query into a // number of queries. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3672,7 +3974,7 @@ func (c *CloudWatchLogs) StartQueryRequest(input *StartQueryInput) (req *request // Returned Error Types: // * MalformedQueryException // The query string is not valid. Details about this error are displayed in -// a QueryCompileError object. For more information, see . +// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). // // For more information about valid query syntax, see CloudWatch Logs Insights // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). @@ -3845,11 +4147,11 @@ func (c *CloudWatchLogs) TagLogGroupRequest(input *TagLogGroupInput) (req *reque // // Adds or updates the specified tags for the specified log group. // -// To list the tags for a log group, use ListTagsLogGroup. To remove tags, use -// UntagLogGroup. +// To list the tags for a log group, use ListTagsLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). +// To remove tags, use UntagLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_UntagLogGroup.html). // // For more information about tags, see Tag Log Groups in Amazon CloudWatch -// Logs (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/log-group-tagging.html) +// Logs (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html#log-group-tagging) // in the Amazon CloudWatch Logs User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4019,8 +4321,8 @@ func (c *CloudWatchLogs) UntagLogGroupRequest(input *UntagLogGroupInput) (req *r // // Removes the specified tags from the specified log group. // -// To list the tags for a log group, use ListTagsLogGroup. To add tags, use -// UntagLogGroup. +// To list the tags for a log group, use ListTagsLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_ListTagsLogGroup.html). +// To add tags, use TagLogGroup (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_TagLogGroup.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4483,8 +4785,8 @@ func (s CreateLogStreamOutput) GoString() string { // The event was already logged. type DataAlreadyAcceptedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ExpectedSequenceToken *string `locationName:"expectedSequenceToken" min:"1" type:"string"` @@ -4503,17 +4805,17 @@ func (s DataAlreadyAcceptedException) GoString() string { func newErrorDataAlreadyAcceptedException(v protocol.ResponseMetadata) error { return &DataAlreadyAcceptedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DataAlreadyAcceptedException) Code() string { +func (s *DataAlreadyAcceptedException) Code() string { return "DataAlreadyAcceptedException" } // Message returns the exception's message. -func (s DataAlreadyAcceptedException) Message() string { +func (s *DataAlreadyAcceptedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4521,22 +4823,22 @@ func (s DataAlreadyAcceptedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DataAlreadyAcceptedException) OrigErr() error { +func (s *DataAlreadyAcceptedException) OrigErr() error { return nil } -func (s DataAlreadyAcceptedException) Error() string { +func (s *DataAlreadyAcceptedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DataAlreadyAcceptedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DataAlreadyAcceptedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DataAlreadyAcceptedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DataAlreadyAcceptedException) RequestID() string { + return s.RespMetadata.RequestID } type DeleteDestinationInput struct { @@ -4793,6 +5095,70 @@ func (s DeleteMetricFilterOutput) GoString() string { return s.String() } +type DeleteQueryDefinitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the query definition that you want to delete. You can use DescribeQueryDefinitions + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html) + // to retrieve the IDs of your saved query definitions. + // + // QueryDefinitionId is a required field + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteQueryDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueryDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteQueryDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteQueryDefinitionInput"} + if s.QueryDefinitionId == nil { + invalidParams.Add(request.NewErrParamRequired("QueryDefinitionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *DeleteQueryDefinitionInput) SetQueryDefinitionId(v string) *DeleteQueryDefinitionInput { + s.QueryDefinitionId = &v + return s +} + +type DeleteQueryDefinitionOutput struct { + _ struct{} `type:"structure"` + + // A value of TRUE indicates that the operation succeeded. FALSE indicates that + // the operation failed. + Success *bool `locationName:"success" type:"boolean"` +} + +// String returns the string representation +func (s DeleteQueryDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteQueryDefinitionOutput) GoString() string { + return s.String() +} + +// SetSuccess sets the Success field's value. +func (s *DeleteQueryDefinitionOutput) SetSuccess(v bool) *DeleteQueryDefinitionOutput { + s.Success = &v + return s +} + type DeleteResourcePolicyInput struct { _ struct{} `type:"structure"` @@ -5224,6 +5590,9 @@ type DescribeLogGroupsOutput struct { _ struct{} `type:"structure"` // The log groups. + // + // If the retentionInDays value if not included for a log group, then that log + // group is set to have its events never expire. LogGroups []*LogGroup `locationName:"logGroups" type:"list"` // The token for the next set of items to return. The token expires after 24 @@ -5272,7 +5641,7 @@ type DescribeLogStreamsInput struct { // The prefix to match. // - // If orderBy is LastEventTime,you cannot specify this parameter. + // If orderBy is LastEventTime, you cannot specify this parameter. LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` // The token for the next set of items to return. (You received this token from @@ -5286,11 +5655,11 @@ type DescribeLogStreamsInput struct { // If you order the results by event time, you cannot specify the logStreamNamePrefix // parameter. // - // lastEventTimestamp represents the time of the most recent log event in the + // lastEventTimeStamp represents the time of the most recent log event in the // log stream in CloudWatch Logs. This number is expressed as the number of // milliseconds after Jan 1, 1970 00:00:00 UTC. lastEventTimeStamp updates on // an eventual consistency basis. It typically updates in less than an hour - // from ingestion, but may take longer in some rare situations. + // from ingestion, but in rare situations might take longer. OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` } @@ -5401,7 +5770,8 @@ func (s *DescribeLogStreamsOutput) SetNextToken(v string) *DescribeLogStreamsOut type DescribeMetricFiltersInput struct { _ struct{} `type:"structure"` - // The prefix to match. + // The prefix to match. CloudWatch Logs uses the value you set here only if + // you also include the logGroupName parameter in your request. FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` // The maximum number of items returned. If you don't specify a value, the default @@ -5631,6 +6001,101 @@ func (s *DescribeQueriesOutput) SetQueries(v []*QueryInfo) *DescribeQueriesOutpu return s } +type DescribeQueryDefinitionsInput struct { + _ struct{} `type:"structure"` + + // Limits the number of returned query definitions to the specified number. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Use this parameter to filter your results to only the query definitions that + // have names that start with the prefix you specify. + QueryDefinitionNamePrefix *string `locationName:"queryDefinitionNamePrefix" min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeQueryDefinitionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeQueryDefinitionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeQueryDefinitionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeQueryDefinitionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.QueryDefinitionNamePrefix != nil && len(*s.QueryDefinitionNamePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryDefinitionNamePrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeQueryDefinitionsInput) SetMaxResults(v int64) *DescribeQueryDefinitionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueryDefinitionsInput) SetNextToken(v string) *DescribeQueryDefinitionsInput { + s.NextToken = &v + return s +} + +// SetQueryDefinitionNamePrefix sets the QueryDefinitionNamePrefix field's value. +func (s *DescribeQueryDefinitionsInput) SetQueryDefinitionNamePrefix(v string) *DescribeQueryDefinitionsInput { + s.QueryDefinitionNamePrefix = &v + return s +} + +type DescribeQueryDefinitionsOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of items to return. The token expires after 24 + // hours. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The list of query definitions that match your request. + QueryDefinitions []*QueryDefinition `locationName:"queryDefinitions" type:"list"` +} + +// String returns the string representation +func (s DescribeQueryDefinitionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeQueryDefinitionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeQueryDefinitionsOutput) SetNextToken(v string) *DescribeQueryDefinitionsOutput { + s.NextToken = &v + return s +} + +// SetQueryDefinitions sets the QueryDefinitions field's value. +func (s *DescribeQueryDefinitionsOutput) SetQueryDefinitions(v []*QueryDefinition) *DescribeQueryDefinitionsOutput { + s.QueryDefinitions = v + return s +} + type DescribeResourcePoliciesInput struct { _ struct{} `type:"structure"` @@ -5847,7 +6312,7 @@ type Destination struct { // A role for impersonation, used when delivering log events to the target. RoleArn *string `locationName:"roleArn" min:"1" type:"string"` - // The Amazon Resource Name (ARN) of the physical target to where the log events + // The Amazon Resource Name (ARN) of the physical target where the log events // are delivered (for example, a Kinesis stream). TargetArn *string `locationName:"targetArn" min:"1" type:"string"` } @@ -5957,13 +6422,13 @@ func (s DisassociateKmsKeyOutput) GoString() string { type ExportTask struct { _ struct{} `type:"structure"` - // The name of Amazon S3 bucket to which the log data was exported. + // The name of the S3 bucket to which the log data was exported. Destination *string `locationName:"destination" min:"1" type:"string"` // The prefix that was used as the start of Amazon S3 key for every object exported. DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` - // Execution info about the export task. + // Execution information about the export task. ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` // The start time, expressed as the number of milliseconds after Jan 1, 1970 @@ -6139,9 +6604,9 @@ type FilterLogEventsInput struct { // the first log stream are searched first, then those in the next log stream, // and so on. The default is false. // - // IMPORTANT: Starting on June 17, 2019, this parameter will be ignored and - // the value will be assumed to be true. The response from this operation will - // always interleave events from multiple log streams within a log group. + // Important: Starting on June 17, 2019, this parameter is ignored and the value + // is assumed to be true. The response from this operation always interleaves + // events from multiple log streams within a log group. // // Deprecated: Starting on June 17, 2019, this parameter will be ignored and the value will be assumed to be true. The response from this operation will always interleave events from multiple log streams within a log group. Interleaved *bool `locationName:"interleaved" deprecated:"true" type:"boolean"` @@ -6175,6 +6640,9 @@ type FilterLogEventsInput struct { // The start of the time range, expressed as the number of milliseconds after // Jan 1, 1970 00:00:00 UTC. Events with a timestamp before this time are not // returned. + // + // If you omit startTime and endTime the most recent log events are retrieved, + // to up 1 MB or 10,000 log events. StartTime *int64 `locationName:"startTime" type:"long"` } @@ -6280,6 +6748,9 @@ type FilterLogEventsOutput struct { // after 24 hours. NextToken *string `locationName:"nextToken" min:"1" type:"string"` + // IMPORTANT Starting on May 15, 2020, this parameter will be deprecated. This + // parameter will be an empty list after the deprecation occurs. + // // Indicates which log streams have been searched and whether each has been // searched completely. SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` @@ -6505,13 +6976,13 @@ type GetLogEventsOutput struct { Events []*OutputLogEvent `locationName:"events" type:"list"` // The token for the next set of items in the backward direction. The token - // expires after 24 hours. This token will never be null. If you have reached - // the end of the stream, it will return the same token you passed in. + // expires after 24 hours. This token is never null. If you have reached the + // end of the stream, it returns the same token you passed in. NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` // The token for the next set of items in the forward direction. The token expires - // after 24 hours. If you have reached the end of the stream, it will return - // the same token you passed in. + // after 24 hours. If you have reached the end of the stream, it returns the + // same token you passed in. NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` } @@ -6738,14 +7209,15 @@ type GetQueryResultsOutput struct { // Includes the number of log events scanned by the query, the number of log // events that matched the query criteria, and the total number of bytes in - // the log events that were scanned. + // the log events that were scanned. These values reflect the full raw results + // of the query. Statistics *QueryStatistics `locationName:"statistics" type:"structure"` // The status of the most recent running of the query. Possible values are Cancelled, // Complete, Failed, Running, Scheduled, Timeout, and Unknown. // // Queries time out after 15 minutes of execution. To avoid having your queries - // time out, reduce the time range being searched, or partition your query into + // time out, reduce the time range being searched or partition your query into // a number of queries. Status *string `locationName:"status" type:"string" enum:"QueryStatus"` } @@ -6838,8 +7310,8 @@ func (s *InputLogEvent) SetTimestamp(v int64) *InputLogEvent { // The operation is not valid on the specified resource. type InvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6856,17 +7328,17 @@ func (s InvalidOperationException) GoString() string { func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { return &InvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOperationException) Code() string { +func (s *InvalidOperationException) Code() string { return "InvalidOperationException" } // Message returns the exception's message. -func (s InvalidOperationException) Message() string { +func (s *InvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6874,28 +7346,28 @@ func (s InvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOperationException) OrigErr() error { +func (s *InvalidOperationException) OrigErr() error { return nil } -func (s InvalidOperationException) Error() string { +func (s *InvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // A parameter is specified incorrectly. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6912,17 +7384,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6930,29 +7402,29 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The sequence token is not valid. You can get the correct sequence token in // the expectedSequenceToken field in the InvalidSequenceTokenException message. type InvalidSequenceTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ExpectedSequenceToken *string `locationName:"expectedSequenceToken" min:"1" type:"string"` @@ -6971,17 +7443,17 @@ func (s InvalidSequenceTokenException) GoString() string { func newErrorInvalidSequenceTokenException(v protocol.ResponseMetadata) error { return &InvalidSequenceTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSequenceTokenException) Code() string { +func (s *InvalidSequenceTokenException) Code() string { return "InvalidSequenceTokenException" } // Message returns the exception's message. -func (s InvalidSequenceTokenException) Message() string { +func (s *InvalidSequenceTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6989,28 +7461,28 @@ func (s InvalidSequenceTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSequenceTokenException) OrigErr() error { +func (s *InvalidSequenceTokenException) OrigErr() error { return nil } -func (s InvalidSequenceTokenException) Error() string { +func (s *InvalidSequenceTokenException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSequenceTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSequenceTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSequenceTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSequenceTokenException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the maximum number of resources that can be created. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7027,17 +7499,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7045,22 +7517,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListTagsLogGroupInput struct { @@ -7150,6 +7622,9 @@ type LogGroup struct { // The number of days to retain the log events in the specified log group. Possible // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, // 1827, and 3653. + // + // If you omit retentionInDays in a PutRetentionPolicy operation, the events + // in the log group are always retained and never expire. RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` // The number of bytes stored. @@ -7261,8 +7736,8 @@ type LogStream struct { // The time of the most recent log event in the log stream in CloudWatch Logs. // This number is expressed as the number of milliseconds after Jan 1, 1970 // 00:00:00 UTC. The lastEventTime value updates on an eventual consistency - // basis. It typically updates in less than an hour from ingestion, but may - // take longer in some rare situations. + // basis. It typically updates in less than an hour from ingestion, but in rare + // situations might take longer. LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` // The ingestion time, expressed as the number of milliseconds after Jan 1, @@ -7274,7 +7749,7 @@ type LogStream struct { // The number of bytes stored. // - // IMPORTANT:On June 17, 2019, this parameter was deprecated for log streams, + // Important: On June 17, 2019, this parameter was deprecated for log streams, // and is always reported as zero. This change applies only to log streams. // The storedBytes parameter for log groups is not affected. // @@ -7344,13 +7819,13 @@ func (s *LogStream) SetUploadSequenceToken(v string) *LogStream { } // The query string is not valid. Details about this error are displayed in -// a QueryCompileError object. For more information, see . +// a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). // // For more information about valid query syntax, see CloudWatch Logs Insights // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). type MalformedQueryException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -7370,17 +7845,17 @@ func (s MalformedQueryException) GoString() string { func newErrorMalformedQueryException(v protocol.ResponseMetadata) error { return &MalformedQueryException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedQueryException) Code() string { +func (s *MalformedQueryException) Code() string { return "MalformedQueryException" } // Message returns the exception's message. -func (s MalformedQueryException) Message() string { +func (s *MalformedQueryException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7388,22 +7863,22 @@ func (s MalformedQueryException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedQueryException) OrigErr() error { +func (s *MalformedQueryException) OrigErr() error { return nil } -func (s MalformedQueryException) Error() string { +func (s *MalformedQueryException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedQueryException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedQueryException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedQueryException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedQueryException) RequestID() string { + return s.RespMetadata.RequestID } // Metric filters express how CloudWatch Logs would extract metric observations @@ -7420,7 +7895,7 @@ type MetricFilter struct { FilterName *string `locationName:"filterName" min:"1" type:"string"` // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, + // each log event. For example, a log event can contain timestamps, IP addresses, // strings, and so on. You use the filter pattern to specify what to look for // in the log event message. FilterPattern *string `locationName:"filterPattern" type:"string"` @@ -7528,7 +8003,9 @@ type MetricTransformation struct { // MetricName is a required field MetricName *string `locationName:"metricName" type:"string" required:"true"` - // The namespace of the CloudWatch metric. + // A custom namespace to contain your metric in CloudWatch. Use namespaces to + // group together metrics that are similar. For more information, see Namespaces + // (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Namespace). // // MetricNamespace is a required field MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` @@ -7595,8 +8072,8 @@ func (s *MetricTransformation) SetMetricValue(v string) *MetricTransformation { // Multiple requests to update the same resource were in conflict. type OperationAbortedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7613,17 +8090,17 @@ func (s OperationAbortedException) GoString() string { func newErrorOperationAbortedException(v protocol.ResponseMetadata) error { return &OperationAbortedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationAbortedException) Code() string { +func (s *OperationAbortedException) Code() string { return "OperationAbortedException" } // Message returns the exception's message. -func (s OperationAbortedException) Message() string { +func (s *OperationAbortedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7631,22 +8108,22 @@ func (s OperationAbortedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationAbortedException) OrigErr() error { +func (s *OperationAbortedException) OrigErr() error { return nil } -func (s OperationAbortedException) Error() string { +func (s *OperationAbortedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationAbortedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationAbortedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationAbortedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationAbortedException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a log event. @@ -7796,7 +8273,7 @@ type PutDestinationPolicyInput struct { _ struct{} `type:"structure"` // An IAM policy document that authorizes cross-account users to deliver their - // log events to the associated destination. + // log events to the associated destination. This can be up to 5120 bytes. // // AccessPolicy is a required field AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` @@ -7885,9 +8362,9 @@ type PutLogEventsInput struct { // The sequence token obtained from the response of the previous PutLogEvents // call. An upload in a newly created log stream does not require a sequence - // token. You can also get the sequence token using DescribeLogStreams. If you - // call PutLogEvents twice within a narrow time period using the same value - // for sequenceToken, both calls may be successful, or one may be rejected. + // token. You can also get the sequence token using DescribeLogStreams (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeLogStreams.html). + // If you call PutLogEvents twice within a narrow time period using the same + // value for sequenceToken, both calls might be successful or one might be rejected. SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` } @@ -8111,6 +8588,119 @@ func (s PutMetricFilterOutput) GoString() string { return s.String() } +type PutQueryDefinitionInput struct { + _ struct{} `type:"structure"` + + // Use this parameter to include specific log groups as part of your query definition. + // + // If you are updating a query definition and you omit this parameter, then + // the updated definition will contain no log groups. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // A name for the query definition. If you are saving a lot of query definitions, + // we recommend that you name them so that you can easily find the ones you + // want by using the first part of the name as a filter in the queryDefinitionNamePrefix + // parameter of DescribeQueryDefinitions (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html). + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` + + // If you are updating a query definition, use this parameter to specify the + // ID of the query definition that you want to update. You can use DescribeQueryDefinitions + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeQueryDefinitions.html) + // to retrieve the IDs of your saved query definitions. + // + // If you are creating a query definition, do not specify this parameter. CloudWatch + // generates a unique ID for the new query definition and include it in the + // response to this operation. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` + + // The query string to use for this definition. For more information, see CloudWatch + // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + // + // QueryString is a required field + QueryString *string `locationName:"queryString" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutQueryDefinitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutQueryDefinitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutQueryDefinitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutQueryDefinitionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.QueryString == nil { + invalidParams.Add(request.NewErrParamRequired("QueryString")) + } + if s.QueryString != nil && len(*s.QueryString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("QueryString", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *PutQueryDefinitionInput) SetLogGroupNames(v []*string) *PutQueryDefinitionInput { + s.LogGroupNames = v + return s +} + +// SetName sets the Name field's value. +func (s *PutQueryDefinitionInput) SetName(v string) *PutQueryDefinitionInput { + s.Name = &v + return s +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *PutQueryDefinitionInput) SetQueryDefinitionId(v string) *PutQueryDefinitionInput { + s.QueryDefinitionId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *PutQueryDefinitionInput) SetQueryString(v string) *PutQueryDefinitionInput { + s.QueryString = &v + return s +} + +type PutQueryDefinitionOutput struct { + _ struct{} `type:"structure"` + + // The ID of the query definition. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` +} + +// String returns the string representation +func (s PutQueryDefinitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutQueryDefinitionOutput) GoString() string { + return s.String() +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *PutQueryDefinitionOutput) SetQueryDefinitionId(v string) *PutQueryDefinitionOutput { + s.QueryDefinitionId = &v + return s +} + type PutResourcePolicyInput struct { _ struct{} `type:"structure"` @@ -8201,6 +8791,9 @@ type PutRetentionPolicyInput struct { // values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, // 1827, and 3653. // + // If you omit retentionInDays in a PutRetentionPolicy operation, the events + // in the log group are always retained and never expire. + // // RetentionInDays is a required field RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` } @@ -8281,7 +8874,7 @@ type PutSubscriptionFilterInput struct { // DestinationArn is a required field DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` - // The method used to distribute log data to the destination. By default log + // The method used to distribute log data to the destination. By default, log // data is grouped by log stream, but the grouping can be set to random for // a more even distribution. This property is only applicable when the destination // is an Amazon Kinesis stream. @@ -8290,7 +8883,8 @@ type PutSubscriptionFilterInput struct { // A name for the subscription filter. If you are updating an existing filter, // you must specify the correct name in filterName. Otherwise, the call fails // because you cannot associate a second filter with a log group. To find the - // name of the filter currently associated with a log group, use DescribeSubscriptionFilters. + // name of the filter currently associated with a log group, use DescribeSubscriptionFilters + // (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeSubscriptionFilters.html). // // FilterName is a required field FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` @@ -8472,6 +9066,69 @@ func (s *QueryCompileErrorLocation) SetStartCharOffset(v int64) *QueryCompileErr return s } +// This structure contains details about a saved CloudWatch Logs Insights query +// definition. +type QueryDefinition struct { + _ struct{} `type:"structure"` + + // The date that the query definition was most recently modified. + LastModified *int64 `locationName:"lastModified" type:"long"` + + // If this query definition contains a list of log groups that it is limited + // to, that list appears here. + LogGroupNames []*string `locationName:"logGroupNames" type:"list"` + + // The name of the query definition. + Name *string `locationName:"name" min:"1" type:"string"` + + // The unique ID of the query definition. + QueryDefinitionId *string `locationName:"queryDefinitionId" type:"string"` + + // The query string to use for this definition. For more information, see CloudWatch + // Logs Insights Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). + QueryString *string `locationName:"queryString" min:"1" type:"string"` +} + +// String returns the string representation +func (s QueryDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryDefinition) GoString() string { + return s.String() +} + +// SetLastModified sets the LastModified field's value. +func (s *QueryDefinition) SetLastModified(v int64) *QueryDefinition { + s.LastModified = &v + return s +} + +// SetLogGroupNames sets the LogGroupNames field's value. +func (s *QueryDefinition) SetLogGroupNames(v []*string) *QueryDefinition { + s.LogGroupNames = v + return s +} + +// SetName sets the Name field's value. +func (s *QueryDefinition) SetName(v string) *QueryDefinition { + s.Name = &v + return s +} + +// SetQueryDefinitionId sets the QueryDefinitionId field's value. +func (s *QueryDefinition) SetQueryDefinitionId(v string) *QueryDefinition { + s.QueryDefinitionId = &v + return s +} + +// SetQueryString sets the QueryString field's value. +func (s *QueryDefinition) SetQueryString(v string) *QueryDefinition { + s.QueryString = &v + return s +} + // Information about one CloudWatch Logs Insights query that matches the request // in a DescribeQueries operation. type QueryInfo struct { @@ -8622,8 +9279,8 @@ func (s *RejectedLogEventsInfo) SetTooOldLogEventEndIndex(v int64) *RejectedLogE // The specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8640,17 +9297,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8658,28 +9315,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8696,17 +9353,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8714,22 +9371,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A policy enabling one or more entities to put logs to a log group in this @@ -8778,6 +9435,9 @@ func (s *ResourcePolicy) SetPolicyName(v string) *ResourcePolicy { // Contains one field from one log event returned by a CloudWatch Logs Insights // query, along with the value of that field. +// +// For more information about the fields that are generated by CloudWatch logs, +// see Supported Logs and Discovered Fields (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_AnalyzeLogData-discoverable-fields.html). type ResultField struct { _ struct{} `type:"structure"` @@ -8845,8 +9505,8 @@ func (s *SearchedLogStream) SetSearchedCompletely(v bool) *SearchedLogStream { // The service cannot complete the request. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8863,17 +9523,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8881,22 +9541,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type StartQueryInput struct { @@ -9037,8 +9697,7 @@ func (s *StartQueryOutput) SetQueryId(v string) *StartQueryOutput { type StopQueryInput struct { _ struct{} `type:"structure"` - // The ID number of the query to stop. If necessary, you can use DescribeQueries - // to find this ID number. + // The ID number of the query to stop. To find this ID number, use DescribeQueries. // // QueryId is a required field QueryId *string `locationName:"queryId" type:"string" required:"true"` @@ -9115,7 +9774,7 @@ type SubscriptionFilter struct { FilterName *string `locationName:"filterName" min:"1" type:"string"` // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, + // each log event. For example, a log event can contain timestamps, IP addresses, // strings, and so on. You use the filter pattern to specify what to look for // in the log event message. FilterPattern *string `locationName:"filterPattern" type:"string"` @@ -9254,7 +9913,7 @@ type TestMetricFilterInput struct { _ struct{} `type:"structure"` // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, + // each log event. For example, a log event can contain timestamps, IP addresses, // strings, and so on. You use the filter pattern to specify what to look for // in the log event message. // @@ -9333,8 +9992,8 @@ func (s *TestMetricFilterOutput) SetMatches(v []*MetricFilterMatchRecord) *TestM // The most likely cause is an invalid AWS access key ID or secret key. type UnrecognizedClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9351,17 +10010,17 @@ func (s UnrecognizedClientException) GoString() string { func newErrorUnrecognizedClientException(v protocol.ResponseMetadata) error { return &UnrecognizedClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnrecognizedClientException) Code() string { +func (s *UnrecognizedClientException) Code() string { return "UnrecognizedClientException" } // Message returns the exception's message. -func (s UnrecognizedClientException) Message() string { +func (s *UnrecognizedClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9369,22 +10028,22 @@ func (s UnrecognizedClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnrecognizedClientException) OrigErr() error { +func (s *UnrecognizedClientException) OrigErr() error { return nil } -func (s UnrecognizedClientException) Error() string { +func (s *UnrecognizedClientException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnrecognizedClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnrecognizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnrecognizedClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnrecognizedClientException) RequestID() string { + return s.RespMetadata.RequestID } type UntagLogGroupInput struct { @@ -9469,6 +10128,14 @@ const ( DistributionByLogStream = "ByLogStream" ) +// Distribution_Values returns all elements of the Distribution enum +func Distribution_Values() []string { + return []string{ + DistributionRandom, + DistributionByLogStream, + } +} + const ( // ExportTaskStatusCodeCancelled is a ExportTaskStatusCode enum value ExportTaskStatusCodeCancelled = "CANCELLED" @@ -9489,6 +10156,18 @@ const ( ExportTaskStatusCodeRunning = "RUNNING" ) +// ExportTaskStatusCode_Values returns all elements of the ExportTaskStatusCode enum +func ExportTaskStatusCode_Values() []string { + return []string{ + ExportTaskStatusCodeCancelled, + ExportTaskStatusCodeCompleted, + ExportTaskStatusCodeFailed, + ExportTaskStatusCodePending, + ExportTaskStatusCodePendingCancel, + ExportTaskStatusCodeRunning, + } +} + const ( // OrderByLogStreamName is a OrderBy enum value OrderByLogStreamName = "LogStreamName" @@ -9497,6 +10176,14 @@ const ( OrderByLastEventTime = "LastEventTime" ) +// OrderBy_Values returns all elements of the OrderBy enum +func OrderBy_Values() []string { + return []string{ + OrderByLogStreamName, + OrderByLastEventTime, + } +} + const ( // QueryStatusScheduled is a QueryStatus enum value QueryStatusScheduled = "Scheduled" @@ -9513,3 +10200,14 @@ const ( // QueryStatusCancelled is a QueryStatus enum value QueryStatusCancelled = "Cancelled" ) + +// QueryStatus_Values returns all elements of the QueryStatus enum +func QueryStatus_Values() []string { + return []string{ + QueryStatusScheduled, + QueryStatusRunning, + QueryStatusComplete, + QueryStatusFailed, + QueryStatusCancelled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go index a20147e7b..647d68301 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/doc.go @@ -4,8 +4,8 @@ // requests to Amazon CloudWatch Logs. // // You can use Amazon CloudWatch Logs to monitor, store, and access your log -// files from Amazon EC2 instances, AWS CloudTrail, or other sources. You can -// then retrieve the associated log data from CloudWatch Logs using the CloudWatch +// files from EC2 instances, AWS CloudTrail, or other sources. You can then +// retrieve the associated log data from CloudWatch Logs using the CloudWatch // console, CloudWatch Logs commands in the AWS CLI, CloudWatch Logs API, or // CloudWatch Logs SDK. // @@ -15,8 +15,8 @@ // Logs to monitor applications and systems using log data. For example, // CloudWatch Logs can track the number of errors that occur in your application // logs and send you a notification whenever the rate of errors exceeds a -// threshold that you specify. CloudWatch Logs uses your log data for monitoring; -// so, no code changes are required. For example, you can monitor application +// threshold that you specify. CloudWatch Logs uses your log data for monitoring +// so no code changes are required. For example, you can monitor application // logs for specific literal terms (such as "NullReferenceException") or // count the number of occurrences of a literal term at a particular position // in log data (such as "404" status codes in an Apache access log). When @@ -24,8 +24,8 @@ // to a CloudWatch metric that you specify. // // * Monitor AWS CloudTrail logged events: You can create alarms in CloudWatch -// and receive notifications of particular API activity as captured by CloudTrail -// and use the notification to perform troubleshooting. +// and receive notifications of particular API activity as captured by CloudTrail. +// You can use the notification to perform troubleshooting. // // * Archive log data: You can use CloudWatch Logs to store your log data // in highly durable storage. You can change the log retention setting so diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go index c6e23336d..44e3bb576 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/errors.go @@ -43,7 +43,7 @@ const ( // "MalformedQueryException". // // The query string is not valid. Details about this error are displayed in - // a QueryCompileError object. For more information, see . + // a QueryCompileError object. For more information, see QueryCompileError (https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_QueryCompileError.html). // // For more information about valid query syntax, see CloudWatch Logs Insights // Query Syntax (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/CWL_QuerySyntax.html). diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go index a40ff41cb..41520eda9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go b/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go index c22775a00..766ad817b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codebuild/api.go @@ -92,6 +92,85 @@ func (c *CodeBuild) BatchDeleteBuildsWithContext(ctx aws.Context, input *BatchDe return out, req.Send() } +const opBatchGetBuildBatches = "BatchGetBuildBatches" + +// BatchGetBuildBatchesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetBuildBatches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetBuildBatches for more information on using the BatchGetBuildBatches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGetBuildBatchesRequest method. +// req, resp := client.BatchGetBuildBatchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/BatchGetBuildBatches +func (c *CodeBuild) BatchGetBuildBatchesRequest(input *BatchGetBuildBatchesInput) (req *request.Request, output *BatchGetBuildBatchesOutput) { + op := &request.Operation{ + Name: opBatchGetBuildBatches, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchGetBuildBatchesInput{} + } + + output = &BatchGetBuildBatchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchGetBuildBatches API operation for AWS CodeBuild. +// +// Retrieves information about one or more batch builds. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation BatchGetBuildBatches for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/BatchGetBuildBatches +func (c *CodeBuild) BatchGetBuildBatches(input *BatchGetBuildBatchesInput) (*BatchGetBuildBatchesOutput, error) { + req, out := c.BatchGetBuildBatchesRequest(input) + return out, req.Send() +} + +// BatchGetBuildBatchesWithContext is the same as BatchGetBuildBatches with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetBuildBatches for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) BatchGetBuildBatchesWithContext(ctx aws.Context, input *BatchGetBuildBatchesInput, opts ...request.Option) (*BatchGetBuildBatchesOutput, error) { + req, out := c.BatchGetBuildBatchesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchGetBuilds = "BatchGetBuilds" // BatchGetBuildsRequest generates a "aws/request.Request" representing the @@ -680,6 +759,85 @@ func (c *CodeBuild) CreateWebhookWithContext(ctx aws.Context, input *CreateWebho return out, req.Send() } +const opDeleteBuildBatch = "DeleteBuildBatch" + +// DeleteBuildBatchRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBuildBatch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBuildBatch for more information on using the DeleteBuildBatch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBuildBatchRequest method. +// req, resp := client.DeleteBuildBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DeleteBuildBatch +func (c *CodeBuild) DeleteBuildBatchRequest(input *DeleteBuildBatchInput) (req *request.Request, output *DeleteBuildBatchOutput) { + op := &request.Operation{ + Name: opDeleteBuildBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBuildBatchInput{} + } + + output = &DeleteBuildBatchOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteBuildBatch API operation for AWS CodeBuild. +// +// Deletes a batch build. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation DeleteBuildBatch for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DeleteBuildBatch +func (c *CodeBuild) DeleteBuildBatch(input *DeleteBuildBatchInput) (*DeleteBuildBatchOutput, error) { + req, out := c.DeleteBuildBatchRequest(input) + return out, req.Send() +} + +// DeleteBuildBatchWithContext is the same as DeleteBuildBatch with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBuildBatch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) DeleteBuildBatchWithContext(ctx aws.Context, input *DeleteBuildBatchInput, opts ...request.Option) (*DeleteBuildBatchOutput, error) { + req, out := c.DeleteBuildBatchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteProject = "DeleteProject" // DeleteProjectRequest generates a "aws/request.Request" representing the @@ -885,11 +1043,8 @@ func (c *CodeBuild) DeleteReportGroupRequest(input *DeleteReportGroupInput) (req // DeleteReportGroup API operation for AWS CodeBuild. // -// DeleteReportGroup: Deletes a report group. Before you delete a report group, -// you must delete its reports. Use ListReportsForReportGroup (https://docs.aws.amazon.com/codebuild/latest/APIReference/API_ListReportsForReportGroup.html) -// to get the reports in a report group. Use DeleteReport (https://docs.aws.amazon.com/codebuild/latest/APIReference/API_DeleteReport.html) -// to delete the reports. If you call DeleteReportGroup for a report group that -// contains one or more reports, an exception is thrown. +// Deletes a report group. Before you delete a report group, you must delete +// its reports. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1174,6 +1329,143 @@ func (c *CodeBuild) DeleteWebhookWithContext(ctx aws.Context, input *DeleteWebho return out, req.Send() } +const opDescribeCodeCoverages = "DescribeCodeCoverages" + +// DescribeCodeCoveragesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCodeCoverages operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCodeCoverages for more information on using the DescribeCodeCoverages +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCodeCoveragesRequest method. +// req, resp := client.DescribeCodeCoveragesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DescribeCodeCoverages +func (c *CodeBuild) DescribeCodeCoveragesRequest(input *DescribeCodeCoveragesInput) (req *request.Request, output *DescribeCodeCoveragesOutput) { + op := &request.Operation{ + Name: opDescribeCodeCoverages, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCodeCoveragesInput{} + } + + output = &DescribeCodeCoveragesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCodeCoverages API operation for AWS CodeBuild. +// +// Retrieves one or more code coverage reports. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation DescribeCodeCoverages for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/DescribeCodeCoverages +func (c *CodeBuild) DescribeCodeCoverages(input *DescribeCodeCoveragesInput) (*DescribeCodeCoveragesOutput, error) { + req, out := c.DescribeCodeCoveragesRequest(input) + return out, req.Send() +} + +// DescribeCodeCoveragesWithContext is the same as DescribeCodeCoverages with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCodeCoverages for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) DescribeCodeCoveragesWithContext(ctx aws.Context, input *DescribeCodeCoveragesInput, opts ...request.Option) (*DescribeCodeCoveragesOutput, error) { + req, out := c.DescribeCodeCoveragesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCodeCoveragesPages iterates over the pages of a DescribeCodeCoverages operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCodeCoverages method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCodeCoverages operation. +// pageNum := 0 +// err := client.DescribeCodeCoveragesPages(params, +// func(page *codebuild.DescribeCodeCoveragesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) DescribeCodeCoveragesPages(input *DescribeCodeCoveragesInput, fn func(*DescribeCodeCoveragesOutput, bool) bool) error { + return c.DescribeCodeCoveragesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCodeCoveragesPagesWithContext same as DescribeCodeCoveragesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) DescribeCodeCoveragesPagesWithContext(ctx aws.Context, input *DescribeCodeCoveragesInput, fn func(*DescribeCodeCoveragesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCodeCoveragesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCodeCoveragesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCodeCoveragesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeTestCases = "DescribeTestCases" // DescribeTestCasesRequest generates a "aws/request.Request" representing the @@ -1205,6 +1497,12 @@ func (c *CodeBuild) DescribeTestCasesRequest(input *DescribeTestCasesInput) (req Name: opDescribeTestCases, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1256,6 +1554,58 @@ func (c *CodeBuild) DescribeTestCasesWithContext(ctx aws.Context, input *Describ return out, req.Send() } +// DescribeTestCasesPages iterates over the pages of a DescribeTestCases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTestCases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTestCases operation. +// pageNum := 0 +// err := client.DescribeTestCasesPages(params, +// func(page *codebuild.DescribeTestCasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) DescribeTestCasesPages(input *DescribeTestCasesInput, fn func(*DescribeTestCasesOutput, bool) bool) error { + return c.DescribeTestCasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeTestCasesPagesWithContext same as DescribeTestCasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) DescribeTestCasesPagesWithContext(ctx aws.Context, input *DescribeTestCasesInput, fn func(*DescribeTestCasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeTestCasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTestCasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeTestCasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetResourcePolicy = "GetResourcePolicy" // GetResourcePolicyRequest generates a "aws/request.Request" representing the @@ -1508,96 +1858,431 @@ func (c *CodeBuild) InvalidateProjectCacheWithContext(ctx aws.Context, input *In return out, req.Send() } -const opListBuilds = "ListBuilds" +const opListBuildBatches = "ListBuildBatches" -// ListBuildsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuilds operation. The "output" return +// ListBuildBatchesRequest generates a "aws/request.Request" representing the +// client's request for the ListBuildBatches operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBuilds for more information on using the ListBuilds +// See ListBuildBatches for more information on using the ListBuildBatches // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBuildsRequest method. -// req, resp := client.ListBuildsRequest(params) +// // Example sending a request using the ListBuildBatchesRequest method. +// req, resp := client.ListBuildBatchesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuilds -func (c *CodeBuild) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildBatches +func (c *CodeBuild) ListBuildBatchesRequest(input *ListBuildBatchesInput) (req *request.Request, output *ListBuildBatchesOutput) { op := &request.Operation{ - Name: opListBuilds, + Name: opListBuildBatches, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListBuildsInput{} + input = &ListBuildBatchesInput{} } - output = &ListBuildsOutput{} + output = &ListBuildBatchesOutput{} req = c.newRequest(op, input, output) return } -// ListBuilds API operation for AWS CodeBuild. +// ListBuildBatches API operation for AWS CodeBuild. // -// Gets a list of build IDs, with each build ID representing a single build. +// Retrieves the identifiers of your build batches in the current region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeBuild's -// API operation ListBuilds for usage and error information. +// API operation ListBuildBatches for usage and error information. // // Returned Error Types: // * InvalidInputException // The input value that was provided is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuilds -func (c *CodeBuild) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { - req, out := c.ListBuildsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildBatches +func (c *CodeBuild) ListBuildBatches(input *ListBuildBatchesInput) (*ListBuildBatchesOutput, error) { + req, out := c.ListBuildBatchesRequest(input) return out, req.Send() } -// ListBuildsWithContext is the same as ListBuilds with the addition of +// ListBuildBatchesWithContext is the same as ListBuildBatches with the addition of // the ability to pass a context and additional request options. // -// See ListBuilds for details on how to use this API operation. +// See ListBuildBatches for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeBuild) ListBuildsWithContext(ctx aws.Context, input *ListBuildsInput, opts ...request.Option) (*ListBuildsOutput, error) { - req, out := c.ListBuildsRequest(input) +func (c *CodeBuild) ListBuildBatchesWithContext(ctx aws.Context, input *ListBuildBatchesInput, opts ...request.Option) (*ListBuildBatchesOutput, error) { + req, out := c.ListBuildBatchesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBuildsForProject = "ListBuildsForProject" +// ListBuildBatchesPages iterates over the pages of a ListBuildBatches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBuildBatches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBuildBatches operation. +// pageNum := 0 +// err := client.ListBuildBatchesPages(params, +// func(page *codebuild.ListBuildBatchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListBuildBatchesPages(input *ListBuildBatchesInput, fn func(*ListBuildBatchesOutput, bool) bool) error { + return c.ListBuildBatchesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ListBuildsForProjectRequest generates a "aws/request.Request" representing the -// client's request for the ListBuildsForProject operation. The "output" return +// ListBuildBatchesPagesWithContext same as ListBuildBatchesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListBuildBatchesPagesWithContext(ctx aws.Context, input *ListBuildBatchesInput, fn func(*ListBuildBatchesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBuildBatchesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBuildBatchesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBuildBatchesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListBuildBatchesForProject = "ListBuildBatchesForProject" + +// ListBuildBatchesForProjectRequest generates a "aws/request.Request" representing the +// client's request for the ListBuildBatchesForProject operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBuildsForProject for more information on using the ListBuildsForProject +// See ListBuildBatchesForProject for more information on using the ListBuildBatchesForProject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBuildBatchesForProjectRequest method. +// req, resp := client.ListBuildBatchesForProjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildBatchesForProject +func (c *CodeBuild) ListBuildBatchesForProjectRequest(input *ListBuildBatchesForProjectInput) (req *request.Request, output *ListBuildBatchesForProjectOutput) { + op := &request.Operation{ + Name: opListBuildBatchesForProject, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBuildBatchesForProjectInput{} + } + + output = &ListBuildBatchesForProjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuildBatchesForProject API operation for AWS CodeBuild. +// +// Retrieves the identifiers of the build batches for a specific project. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation ListBuildBatchesForProject for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuildBatchesForProject +func (c *CodeBuild) ListBuildBatchesForProject(input *ListBuildBatchesForProjectInput) (*ListBuildBatchesForProjectOutput, error) { + req, out := c.ListBuildBatchesForProjectRequest(input) + return out, req.Send() +} + +// ListBuildBatchesForProjectWithContext is the same as ListBuildBatchesForProject with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuildBatchesForProject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListBuildBatchesForProjectWithContext(ctx aws.Context, input *ListBuildBatchesForProjectInput, opts ...request.Option) (*ListBuildBatchesForProjectOutput, error) { + req, out := c.ListBuildBatchesForProjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListBuildBatchesForProjectPages iterates over the pages of a ListBuildBatchesForProject operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBuildBatchesForProject method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBuildBatchesForProject operation. +// pageNum := 0 +// err := client.ListBuildBatchesForProjectPages(params, +// func(page *codebuild.ListBuildBatchesForProjectOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListBuildBatchesForProjectPages(input *ListBuildBatchesForProjectInput, fn func(*ListBuildBatchesForProjectOutput, bool) bool) error { + return c.ListBuildBatchesForProjectPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBuildBatchesForProjectPagesWithContext same as ListBuildBatchesForProjectPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListBuildBatchesForProjectPagesWithContext(ctx aws.Context, input *ListBuildBatchesForProjectInput, fn func(*ListBuildBatchesForProjectOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBuildBatchesForProjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBuildBatchesForProjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBuildBatchesForProjectOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListBuilds = "ListBuilds" + +// ListBuildsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuilds operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuilds for more information on using the ListBuilds +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBuildsRequest method. +// req, resp := client.ListBuildsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuilds +func (c *CodeBuild) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { + op := &request.Operation{ + Name: opListBuilds, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListBuildsInput{} + } + + output = &ListBuildsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuilds API operation for AWS CodeBuild. +// +// Gets a list of build IDs, with each build ID representing a single build. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation ListBuilds for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListBuilds +func (c *CodeBuild) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) + return out, req.Send() +} + +// ListBuildsWithContext is the same as ListBuilds with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuilds for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListBuildsWithContext(ctx aws.Context, input *ListBuildsInput, opts ...request.Option) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListBuildsPages iterates over the pages of a ListBuilds operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBuilds method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBuilds operation. +// pageNum := 0 +// err := client.ListBuildsPages(params, +// func(page *codebuild.ListBuildsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListBuildsPages(input *ListBuildsInput, fn func(*ListBuildsOutput, bool) bool) error { + return c.ListBuildsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBuildsPagesWithContext same as ListBuildsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListBuildsPagesWithContext(ctx aws.Context, input *ListBuildsInput, fn func(*ListBuildsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBuildsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBuildsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBuildsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListBuildsForProject = "ListBuildsForProject" + +// ListBuildsForProjectRequest generates a "aws/request.Request" representing the +// client's request for the ListBuildsForProject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuildsForProject for more information on using the ListBuildsForProject // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration @@ -1618,6 +2303,12 @@ func (c *CodeBuild) ListBuildsForProjectRequest(input *ListBuildsForProjectInput Name: opListBuildsForProject, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -1670,6 +2361,58 @@ func (c *CodeBuild) ListBuildsForProjectWithContext(ctx aws.Context, input *List return out, req.Send() } +// ListBuildsForProjectPages iterates over the pages of a ListBuildsForProject operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBuildsForProject method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBuildsForProject operation. +// pageNum := 0 +// err := client.ListBuildsForProjectPages(params, +// func(page *codebuild.ListBuildsForProjectOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListBuildsForProjectPages(input *ListBuildsForProjectInput, fn func(*ListBuildsForProjectOutput, bool) bool) error { + return c.ListBuildsForProjectPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBuildsForProjectPagesWithContext same as ListBuildsForProjectPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListBuildsForProjectPagesWithContext(ctx aws.Context, input *ListBuildsForProjectInput, fn func(*ListBuildsForProjectOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBuildsForProjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBuildsForProjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBuildsForProjectOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListCuratedEnvironmentImages = "ListCuratedEnvironmentImages" // ListCuratedEnvironmentImagesRequest generates a "aws/request.Request" representing the @@ -1775,6 +2518,12 @@ func (c *CodeBuild) ListProjectsRequest(input *ListProjectsInput) (req *request. Name: opListProjects, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -1824,6 +2573,58 @@ func (c *CodeBuild) ListProjectsWithContext(ctx aws.Context, input *ListProjects return out, req.Send() } +// ListProjectsPages iterates over the pages of a ListProjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProjects operation. +// pageNum := 0 +// err := client.ListProjectsPages(params, +// func(page *codebuild.ListProjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListProjectsPages(input *ListProjectsInput, fn func(*ListProjectsOutput, bool) bool) error { + return c.ListProjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListProjectsPagesWithContext same as ListProjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListProjectsPagesWithContext(ctx aws.Context, input *ListProjectsInput, fn func(*ListProjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListProjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListProjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListProjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListReportGroups = "ListReportGroups" // ListReportGroupsRequest generates a "aws/request.Request" representing the @@ -1855,6 +2656,12 @@ func (c *CodeBuild) ListReportGroupsRequest(input *ListReportGroupsInput) (req * Name: opListReportGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1903,6 +2710,58 @@ func (c *CodeBuild) ListReportGroupsWithContext(ctx aws.Context, input *ListRepo return out, req.Send() } +// ListReportGroupsPages iterates over the pages of a ListReportGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListReportGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListReportGroups operation. +// pageNum := 0 +// err := client.ListReportGroupsPages(params, +// func(page *codebuild.ListReportGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListReportGroupsPages(input *ListReportGroupsInput, fn func(*ListReportGroupsOutput, bool) bool) error { + return c.ListReportGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListReportGroupsPagesWithContext same as ListReportGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListReportGroupsPagesWithContext(ctx aws.Context, input *ListReportGroupsInput, fn func(*ListReportGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListReportGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListReportGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListReportGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListReports = "ListReports" // ListReportsRequest generates a "aws/request.Request" representing the @@ -1934,6 +2793,12 @@ func (c *CodeBuild) ListReportsRequest(input *ListReportsInput) (req *request.Re Name: opListReports, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1982,25 +2847,77 @@ func (c *CodeBuild) ListReportsWithContext(ctx aws.Context, input *ListReportsIn return out, req.Send() } -const opListReportsForReportGroup = "ListReportsForReportGroup" - -// ListReportsForReportGroupRequest generates a "aws/request.Request" representing the -// client's request for the ListReportsForReportGroup operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// ListReportsPages iterates over the pages of a ListReports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// See ListReportsForReportGroup for more information on using the ListReportsForReportGroup -// API call, and error handling. +// See ListReports method for more information on how to use this operation. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// Note: This operation can generate multiple requests to a service. // +// // Example iterating over at most 3 pages of a ListReports operation. +// pageNum := 0 +// err := client.ListReportsPages(params, +// func(page *codebuild.ListReportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// // Example sending a request using the ListReportsForReportGroupRequest method. -// req, resp := client.ListReportsForReportGroupRequest(params) +func (c *CodeBuild) ListReportsPages(input *ListReportsInput, fn func(*ListReportsOutput, bool) bool) error { + return c.ListReportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListReportsPagesWithContext same as ListReportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListReportsPagesWithContext(ctx aws.Context, input *ListReportsInput, fn func(*ListReportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListReportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListReportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListReportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListReportsForReportGroup = "ListReportsForReportGroup" + +// ListReportsForReportGroupRequest generates a "aws/request.Request" representing the +// client's request for the ListReportsForReportGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListReportsForReportGroup for more information on using the ListReportsForReportGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListReportsForReportGroupRequest method. +// req, resp := client.ListReportsForReportGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled @@ -2013,6 +2930,12 @@ func (c *CodeBuild) ListReportsForReportGroupRequest(input *ListReportsForReport Name: opListReportsForReportGroup, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -2064,6 +2987,58 @@ func (c *CodeBuild) ListReportsForReportGroupWithContext(ctx aws.Context, input return out, req.Send() } +// ListReportsForReportGroupPages iterates over the pages of a ListReportsForReportGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListReportsForReportGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListReportsForReportGroup operation. +// pageNum := 0 +// err := client.ListReportsForReportGroupPages(params, +// func(page *codebuild.ListReportsForReportGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListReportsForReportGroupPages(input *ListReportsForReportGroupInput, fn func(*ListReportsForReportGroupOutput, bool) bool) error { + return c.ListReportsForReportGroupPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListReportsForReportGroupPagesWithContext same as ListReportsForReportGroupPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListReportsForReportGroupPagesWithContext(ctx aws.Context, input *ListReportsForReportGroupInput, fn func(*ListReportsForReportGroupOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListReportsForReportGroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListReportsForReportGroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListReportsForReportGroupOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSharedProjects = "ListSharedProjects" // ListSharedProjectsRequest generates a "aws/request.Request" representing the @@ -2095,6 +3070,12 @@ func (c *CodeBuild) ListSharedProjectsRequest(input *ListSharedProjectsInput) (r Name: opListSharedProjects, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -2143,6 +3124,58 @@ func (c *CodeBuild) ListSharedProjectsWithContext(ctx aws.Context, input *ListSh return out, req.Send() } +// ListSharedProjectsPages iterates over the pages of a ListSharedProjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSharedProjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSharedProjects operation. +// pageNum := 0 +// err := client.ListSharedProjectsPages(params, +// func(page *codebuild.ListSharedProjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListSharedProjectsPages(input *ListSharedProjectsInput, fn func(*ListSharedProjectsOutput, bool) bool) error { + return c.ListSharedProjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSharedProjectsPagesWithContext same as ListSharedProjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListSharedProjectsPagesWithContext(ctx aws.Context, input *ListSharedProjectsInput, fn func(*ListSharedProjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSharedProjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSharedProjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSharedProjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSharedReportGroups = "ListSharedReportGroups" // ListSharedReportGroupsRequest generates a "aws/request.Request" representing the @@ -2174,6 +3207,12 @@ func (c *CodeBuild) ListSharedReportGroupsRequest(input *ListSharedReportGroupsI Name: opListSharedReportGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -2222,6 +3261,58 @@ func (c *CodeBuild) ListSharedReportGroupsWithContext(ctx aws.Context, input *Li return out, req.Send() } +// ListSharedReportGroupsPages iterates over the pages of a ListSharedReportGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSharedReportGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSharedReportGroups operation. +// pageNum := 0 +// err := client.ListSharedReportGroupsPages(params, +// func(page *codebuild.ListSharedReportGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeBuild) ListSharedReportGroupsPages(input *ListSharedReportGroupsInput, fn func(*ListSharedReportGroupsOutput, bool) bool) error { + return c.ListSharedReportGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSharedReportGroupsPagesWithContext same as ListSharedReportGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) ListSharedReportGroupsPagesWithContext(ctx aws.Context, input *ListSharedReportGroupsInput, fn func(*ListSharedReportGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSharedReportGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSharedReportGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSharedReportGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSourceCredentials = "ListSourceCredentials" // ListSourceCredentialsRequest generates a "aws/request.Request" representing the @@ -2274,6 +3365,11 @@ func (c *CodeBuild) ListSourceCredentialsRequest(input *ListSourceCredentialsInp // // See the AWS API reference guide for AWS CodeBuild's // API operation ListSourceCredentials for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/ListSourceCredentials func (c *CodeBuild) ListSourceCredentials(input *ListSourceCredentialsInput) (*ListSourceCredentialsOutput, error) { req, out := c.ListSourceCredentialsRequest(input) @@ -2378,58 +3474,58 @@ func (c *CodeBuild) PutResourcePolicyWithContext(ctx aws.Context, input *PutReso return out, req.Send() } -const opStartBuild = "StartBuild" +const opRetryBuild = "RetryBuild" -// StartBuildRequest generates a "aws/request.Request" representing the -// client's request for the StartBuild operation. The "output" return +// RetryBuildRequest generates a "aws/request.Request" representing the +// client's request for the RetryBuild operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartBuild for more information on using the StartBuild +// See RetryBuild for more information on using the RetryBuild // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartBuildRequest method. -// req, resp := client.StartBuildRequest(params) +// // Example sending a request using the RetryBuildRequest method. +// req, resp := client.RetryBuildRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuild -func (c *CodeBuild) StartBuildRequest(input *StartBuildInput) (req *request.Request, output *StartBuildOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuild +func (c *CodeBuild) RetryBuildRequest(input *RetryBuildInput) (req *request.Request, output *RetryBuildOutput) { op := &request.Operation{ - Name: opStartBuild, + Name: opRetryBuild, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartBuildInput{} + input = &RetryBuildInput{} } - output = &StartBuildOutput{} + output = &RetryBuildOutput{} req = c.newRequest(op, input, output) return } -// StartBuild API operation for AWS CodeBuild. +// RetryBuild API operation for AWS CodeBuild. // -// Starts running a build. +// Restarts a build. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeBuild's -// API operation StartBuild for usage and error information. +// API operation RetryBuild for usage and error information. // // Returned Error Types: // * InvalidInputException @@ -2441,80 +3537,80 @@ func (c *CodeBuild) StartBuildRequest(input *StartBuildInput) (req *request.Requ // * AccountLimitExceededException // An AWS service limit was exceeded for the calling AWS account. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuild -func (c *CodeBuild) StartBuild(input *StartBuildInput) (*StartBuildOutput, error) { - req, out := c.StartBuildRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuild +func (c *CodeBuild) RetryBuild(input *RetryBuildInput) (*RetryBuildOutput, error) { + req, out := c.RetryBuildRequest(input) return out, req.Send() } -// StartBuildWithContext is the same as StartBuild with the addition of +// RetryBuildWithContext is the same as RetryBuild with the addition of // the ability to pass a context and additional request options. // -// See StartBuild for details on how to use this API operation. +// See RetryBuild for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeBuild) StartBuildWithContext(ctx aws.Context, input *StartBuildInput, opts ...request.Option) (*StartBuildOutput, error) { - req, out := c.StartBuildRequest(input) +func (c *CodeBuild) RetryBuildWithContext(ctx aws.Context, input *RetryBuildInput, opts ...request.Option) (*RetryBuildOutput, error) { + req, out := c.RetryBuildRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopBuild = "StopBuild" +const opRetryBuildBatch = "RetryBuildBatch" -// StopBuildRequest generates a "aws/request.Request" representing the -// client's request for the StopBuild operation. The "output" return +// RetryBuildBatchRequest generates a "aws/request.Request" representing the +// client's request for the RetryBuildBatch operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopBuild for more information on using the StopBuild +// See RetryBuildBatch for more information on using the RetryBuildBatch // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopBuildRequest method. -// req, resp := client.StopBuildRequest(params) +// // Example sending a request using the RetryBuildBatchRequest method. +// req, resp := client.RetryBuildBatchRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuild -func (c *CodeBuild) StopBuildRequest(input *StopBuildInput) (req *request.Request, output *StopBuildOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuildBatch +func (c *CodeBuild) RetryBuildBatchRequest(input *RetryBuildBatchInput) (req *request.Request, output *RetryBuildBatchOutput) { op := &request.Operation{ - Name: opStopBuild, + Name: opRetryBuildBatch, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopBuildInput{} + input = &RetryBuildBatchInput{} } - output = &StopBuildOutput{} + output = &RetryBuildBatchOutput{} req = c.newRequest(op, input, output) return } -// StopBuild API operation for AWS CodeBuild. +// RetryBuildBatch API operation for AWS CodeBuild. // -// Attempts to stop running a build. +// Restarts a batch build. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeBuild's -// API operation StopBuild for usage and error information. +// API operation RetryBuildBatch for usage and error information. // // Returned Error Types: // * InvalidInputException @@ -2523,80 +3619,80 @@ func (c *CodeBuild) StopBuildRequest(input *StopBuildInput) (req *request.Reques // * ResourceNotFoundException // The specified AWS resource cannot be found. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuild -func (c *CodeBuild) StopBuild(input *StopBuildInput) (*StopBuildOutput, error) { - req, out := c.StopBuildRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/RetryBuildBatch +func (c *CodeBuild) RetryBuildBatch(input *RetryBuildBatchInput) (*RetryBuildBatchOutput, error) { + req, out := c.RetryBuildBatchRequest(input) return out, req.Send() } -// StopBuildWithContext is the same as StopBuild with the addition of +// RetryBuildBatchWithContext is the same as RetryBuildBatch with the addition of // the ability to pass a context and additional request options. // -// See StopBuild for details on how to use this API operation. +// See RetryBuildBatch for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeBuild) StopBuildWithContext(ctx aws.Context, input *StopBuildInput, opts ...request.Option) (*StopBuildOutput, error) { - req, out := c.StopBuildRequest(input) +func (c *CodeBuild) RetryBuildBatchWithContext(ctx aws.Context, input *RetryBuildBatchInput, opts ...request.Option) (*RetryBuildBatchOutput, error) { + req, out := c.RetryBuildBatchRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateProject = "UpdateProject" +const opStartBuild = "StartBuild" -// UpdateProjectRequest generates a "aws/request.Request" representing the -// client's request for the UpdateProject operation. The "output" return +// StartBuildRequest generates a "aws/request.Request" representing the +// client's request for the StartBuild operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateProject for more information on using the UpdateProject +// See StartBuild for more information on using the StartBuild // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateProjectRequest method. -// req, resp := client.UpdateProjectRequest(params) +// // Example sending a request using the StartBuildRequest method. +// req, resp := client.StartBuildRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateProject -func (c *CodeBuild) UpdateProjectRequest(input *UpdateProjectInput) (req *request.Request, output *UpdateProjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuild +func (c *CodeBuild) StartBuildRequest(input *StartBuildInput) (req *request.Request, output *StartBuildOutput) { op := &request.Operation{ - Name: opUpdateProject, + Name: opStartBuild, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateProjectInput{} + input = &StartBuildInput{} } - output = &UpdateProjectOutput{} + output = &StartBuildOutput{} req = c.newRequest(op, input, output) return } -// UpdateProject API operation for AWS CodeBuild. +// StartBuild API operation for AWS CodeBuild. // -// Changes the settings of a build project. +// Starts running a build. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeBuild's -// API operation UpdateProject for usage and error information. +// API operation StartBuild for usage and error information. // // Returned Error Types: // * InvalidInputException @@ -2605,80 +3701,83 @@ func (c *CodeBuild) UpdateProjectRequest(input *UpdateProjectInput) (req *reques // * ResourceNotFoundException // The specified AWS resource cannot be found. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateProject -func (c *CodeBuild) UpdateProject(input *UpdateProjectInput) (*UpdateProjectOutput, error) { - req, out := c.UpdateProjectRequest(input) +// * AccountLimitExceededException +// An AWS service limit was exceeded for the calling AWS account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuild +func (c *CodeBuild) StartBuild(input *StartBuildInput) (*StartBuildOutput, error) { + req, out := c.StartBuildRequest(input) return out, req.Send() } -// UpdateProjectWithContext is the same as UpdateProject with the addition of +// StartBuildWithContext is the same as StartBuild with the addition of // the ability to pass a context and additional request options. // -// See UpdateProject for details on how to use this API operation. +// See StartBuild for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeBuild) UpdateProjectWithContext(ctx aws.Context, input *UpdateProjectInput, opts ...request.Option) (*UpdateProjectOutput, error) { - req, out := c.UpdateProjectRequest(input) +func (c *CodeBuild) StartBuildWithContext(ctx aws.Context, input *StartBuildInput, opts ...request.Option) (*StartBuildOutput, error) { + req, out := c.StartBuildRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateReportGroup = "UpdateReportGroup" +const opStartBuildBatch = "StartBuildBatch" -// UpdateReportGroupRequest generates a "aws/request.Request" representing the -// client's request for the UpdateReportGroup operation. The "output" return +// StartBuildBatchRequest generates a "aws/request.Request" representing the +// client's request for the StartBuildBatch operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateReportGroup for more information on using the UpdateReportGroup +// See StartBuildBatch for more information on using the StartBuildBatch // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateReportGroupRequest method. -// req, resp := client.UpdateReportGroupRequest(params) +// // Example sending a request using the StartBuildBatchRequest method. +// req, resp := client.StartBuildBatchRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateReportGroup -func (c *CodeBuild) UpdateReportGroupRequest(input *UpdateReportGroupInput) (req *request.Request, output *UpdateReportGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuildBatch +func (c *CodeBuild) StartBuildBatchRequest(input *StartBuildBatchInput) (req *request.Request, output *StartBuildBatchOutput) { op := &request.Operation{ - Name: opUpdateReportGroup, + Name: opStartBuildBatch, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateReportGroupInput{} + input = &StartBuildBatchInput{} } - output = &UpdateReportGroupOutput{} + output = &StartBuildBatchOutput{} req = c.newRequest(op, input, output) return } -// UpdateReportGroup API operation for AWS CodeBuild. +// StartBuildBatch API operation for AWS CodeBuild. // -// Updates a report group. +// Starts a batch build for a project. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeBuild's -// API operation UpdateReportGroup for usage and error information. +// API operation StartBuildBatch for usage and error information. // // Returned Error Types: // * InvalidInputException @@ -2687,82 +3786,80 @@ func (c *CodeBuild) UpdateReportGroupRequest(input *UpdateReportGroupInput) (req // * ResourceNotFoundException // The specified AWS resource cannot be found. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateReportGroup -func (c *CodeBuild) UpdateReportGroup(input *UpdateReportGroupInput) (*UpdateReportGroupOutput, error) { - req, out := c.UpdateReportGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StartBuildBatch +func (c *CodeBuild) StartBuildBatch(input *StartBuildBatchInput) (*StartBuildBatchOutput, error) { + req, out := c.StartBuildBatchRequest(input) return out, req.Send() } -// UpdateReportGroupWithContext is the same as UpdateReportGroup with the addition of +// StartBuildBatchWithContext is the same as StartBuildBatch with the addition of // the ability to pass a context and additional request options. // -// See UpdateReportGroup for details on how to use this API operation. +// See StartBuildBatch for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeBuild) UpdateReportGroupWithContext(ctx aws.Context, input *UpdateReportGroupInput, opts ...request.Option) (*UpdateReportGroupOutput, error) { - req, out := c.UpdateReportGroupRequest(input) +func (c *CodeBuild) StartBuildBatchWithContext(ctx aws.Context, input *StartBuildBatchInput, opts ...request.Option) (*StartBuildBatchOutput, error) { + req, out := c.StartBuildBatchRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateWebhook = "UpdateWebhook" +const opStopBuild = "StopBuild" -// UpdateWebhookRequest generates a "aws/request.Request" representing the -// client's request for the UpdateWebhook operation. The "output" return +// StopBuildRequest generates a "aws/request.Request" representing the +// client's request for the StopBuild operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateWebhook for more information on using the UpdateWebhook +// See StopBuild for more information on using the StopBuild // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateWebhookRequest method. -// req, resp := client.UpdateWebhookRequest(params) +// // Example sending a request using the StopBuildRequest method. +// req, resp := client.StopBuildRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateWebhook -func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *request.Request, output *UpdateWebhookOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuild +func (c *CodeBuild) StopBuildRequest(input *StopBuildInput) (req *request.Request, output *StopBuildOutput) { op := &request.Operation{ - Name: opUpdateWebhook, + Name: opStopBuild, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateWebhookInput{} + input = &StopBuildInput{} } - output = &UpdateWebhookOutput{} + output = &StopBuildOutput{} req = c.newRequest(op, input, output) return } -// UpdateWebhook API operation for AWS CodeBuild. -// -// Updates the webhook associated with an AWS CodeBuild build project. +// StopBuild API operation for AWS CodeBuild. // -// If you use Bitbucket for your repository, rotateSecret is ignored. +// Attempts to stop running a build. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS CodeBuild's -// API operation UpdateWebhook for usage and error information. +// API operation StopBuild for usage and error information. // // Returned Error Types: // * InvalidInputException @@ -2771,114 +3868,2677 @@ func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *reques // * ResourceNotFoundException // The specified AWS resource cannot be found. // -// * OAuthProviderException -// There was a problem with the underlying OAuth provider. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateWebhook -func (c *CodeBuild) UpdateWebhook(input *UpdateWebhookInput) (*UpdateWebhookOutput, error) { - req, out := c.UpdateWebhookRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuild +func (c *CodeBuild) StopBuild(input *StopBuildInput) (*StopBuildOutput, error) { + req, out := c.StopBuildRequest(input) return out, req.Send() } -// UpdateWebhookWithContext is the same as UpdateWebhook with the addition of +// StopBuildWithContext is the same as StopBuild with the addition of // the ability to pass a context and additional request options. // -// See UpdateWebhook for details on how to use this API operation. +// See StopBuild for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *CodeBuild) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebhookInput, opts ...request.Option) (*UpdateWebhookOutput, error) { - req, out := c.UpdateWebhookRequest(input) +func (c *CodeBuild) StopBuildWithContext(ctx aws.Context, input *StopBuildInput, opts ...request.Option) (*StopBuildOutput, error) { + req, out := c.StopBuildRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// An AWS service limit was exceeded for the calling AWS account. -type AccountLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"message" type:"string"` -} +const opStopBuildBatch = "StopBuildBatch" -// String returns the string representation -func (s AccountLimitExceededException) String() string { +// StopBuildBatchRequest generates a "aws/request.Request" representing the +// client's request for the StopBuildBatch operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopBuildBatch for more information on using the StopBuildBatch +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopBuildBatchRequest method. +// req, resp := client.StopBuildBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuildBatch +func (c *CodeBuild) StopBuildBatchRequest(input *StopBuildBatchInput) (req *request.Request, output *StopBuildBatchOutput) { + op := &request.Operation{ + Name: opStopBuildBatch, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopBuildBatchInput{} + } + + output = &StopBuildBatchOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopBuildBatch API operation for AWS CodeBuild. +// +// Stops a running batch build. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation StopBuildBatch for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/StopBuildBatch +func (c *CodeBuild) StopBuildBatch(input *StopBuildBatchInput) (*StopBuildBatchOutput, error) { + req, out := c.StopBuildBatchRequest(input) + return out, req.Send() +} + +// StopBuildBatchWithContext is the same as StopBuildBatch with the addition of +// the ability to pass a context and additional request options. +// +// See StopBuildBatch for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) StopBuildBatchWithContext(ctx aws.Context, input *StopBuildBatchInput, opts ...request.Option) (*StopBuildBatchOutput, error) { + req, out := c.StopBuildBatchRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateProject = "UpdateProject" + +// UpdateProjectRequest generates a "aws/request.Request" representing the +// client's request for the UpdateProject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateProject for more information on using the UpdateProject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateProjectRequest method. +// req, resp := client.UpdateProjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateProject +func (c *CodeBuild) UpdateProjectRequest(input *UpdateProjectInput) (req *request.Request, output *UpdateProjectOutput) { + op := &request.Operation{ + Name: opUpdateProject, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateProjectInput{} + } + + output = &UpdateProjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateProject API operation for AWS CodeBuild. +// +// Changes the settings of a build project. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation UpdateProject for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateProject +func (c *CodeBuild) UpdateProject(input *UpdateProjectInput) (*UpdateProjectOutput, error) { + req, out := c.UpdateProjectRequest(input) + return out, req.Send() +} + +// UpdateProjectWithContext is the same as UpdateProject with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateProject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) UpdateProjectWithContext(ctx aws.Context, input *UpdateProjectInput, opts ...request.Option) (*UpdateProjectOutput, error) { + req, out := c.UpdateProjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateReportGroup = "UpdateReportGroup" + +// UpdateReportGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateReportGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateReportGroup for more information on using the UpdateReportGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateReportGroupRequest method. +// req, resp := client.UpdateReportGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateReportGroup +func (c *CodeBuild) UpdateReportGroupRequest(input *UpdateReportGroupInput) (req *request.Request, output *UpdateReportGroupOutput) { + op := &request.Operation{ + Name: opUpdateReportGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateReportGroupInput{} + } + + output = &UpdateReportGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateReportGroup API operation for AWS CodeBuild. +// +// Updates a report group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation UpdateReportGroup for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateReportGroup +func (c *CodeBuild) UpdateReportGroup(input *UpdateReportGroupInput) (*UpdateReportGroupOutput, error) { + req, out := c.UpdateReportGroupRequest(input) + return out, req.Send() +} + +// UpdateReportGroupWithContext is the same as UpdateReportGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateReportGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) UpdateReportGroupWithContext(ctx aws.Context, input *UpdateReportGroupInput, opts ...request.Option) (*UpdateReportGroupOutput, error) { + req, out := c.UpdateReportGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateWebhook = "UpdateWebhook" + +// UpdateWebhookRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWebhook operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWebhook for more information on using the UpdateWebhook +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateWebhookRequest method. +// req, resp := client.UpdateWebhookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateWebhook +func (c *CodeBuild) UpdateWebhookRequest(input *UpdateWebhookInput) (req *request.Request, output *UpdateWebhookOutput) { + op := &request.Operation{ + Name: opUpdateWebhook, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWebhookInput{} + } + + output = &UpdateWebhookOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateWebhook API operation for AWS CodeBuild. +// +// Updates the webhook associated with an AWS CodeBuild build project. +// +// If you use Bitbucket for your repository, rotateSecret is ignored. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeBuild's +// API operation UpdateWebhook for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input value that was provided is not valid. +// +// * ResourceNotFoundException +// The specified AWS resource cannot be found. +// +// * OAuthProviderException +// There was a problem with the underlying OAuth provider. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codebuild-2016-10-06/UpdateWebhook +func (c *CodeBuild) UpdateWebhook(input *UpdateWebhookInput) (*UpdateWebhookOutput, error) { + req, out := c.UpdateWebhookRequest(input) + return out, req.Send() +} + +// UpdateWebhookWithContext is the same as UpdateWebhook with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWebhook for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeBuild) UpdateWebhookWithContext(ctx aws.Context, input *UpdateWebhookInput, opts ...request.Option) (*UpdateWebhookOutput, error) { + req, out := c.UpdateWebhookRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// An AWS service limit was exceeded for the calling AWS account. +type AccountLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AccountLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountLimitExceededException) GoString() string { + return s.String() +} + +func newErrorAccountLimitExceededException(v protocol.ResponseMetadata) error { + return &AccountLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccountLimitExceededException) Code() string { + return "AccountLimitExceededException" +} + +// Message returns the exception's message. +func (s *AccountLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccountLimitExceededException) OrigErr() error { + return nil +} + +func (s *AccountLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type BatchDeleteBuildsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the builds to delete. + // + // Ids is a required field + Ids []*string `locationName:"ids" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteBuildsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteBuildsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteBuildsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteBuildsInput"} + if s.Ids == nil { + invalidParams.Add(request.NewErrParamRequired("Ids")) + } + if s.Ids != nil && len(s.Ids) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Ids", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIds sets the Ids field's value. +func (s *BatchDeleteBuildsInput) SetIds(v []*string) *BatchDeleteBuildsInput { + s.Ids = v + return s +} + +type BatchDeleteBuildsOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the builds that were successfully deleted. + BuildsDeleted []*string `locationName:"buildsDeleted" min:"1" type:"list"` + + // Information about any builds that could not be successfully deleted. + BuildsNotDeleted []*BuildNotDeleted `locationName:"buildsNotDeleted" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteBuildsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteBuildsOutput) GoString() string { + return s.String() +} + +// SetBuildsDeleted sets the BuildsDeleted field's value. +func (s *BatchDeleteBuildsOutput) SetBuildsDeleted(v []*string) *BatchDeleteBuildsOutput { + s.BuildsDeleted = v + return s +} + +// SetBuildsNotDeleted sets the BuildsNotDeleted field's value. +func (s *BatchDeleteBuildsOutput) SetBuildsNotDeleted(v []*BuildNotDeleted) *BatchDeleteBuildsOutput { + s.BuildsNotDeleted = v + return s +} + +type BatchGetBuildBatchesInput struct { + _ struct{} `type:"structure"` + + // An array that contains the batch build identifiers to retrieve. + // + // Ids is a required field + Ids []*string `locationName:"ids" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetBuildBatchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetBuildBatchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetBuildBatchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetBuildBatchesInput"} + if s.Ids == nil { + invalidParams.Add(request.NewErrParamRequired("Ids")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIds sets the Ids field's value. +func (s *BatchGetBuildBatchesInput) SetIds(v []*string) *BatchGetBuildBatchesInput { + s.Ids = v + return s +} + +type BatchGetBuildBatchesOutput struct { + _ struct{} `type:"structure"` + + // An array of BuildBatch objects that represent the retrieved batch builds. + BuildBatches []*BuildBatch `locationName:"buildBatches" type:"list"` + + // An array that contains the identifiers of any batch builds that are not found. + BuildBatchesNotFound []*string `locationName:"buildBatchesNotFound" type:"list"` +} + +// String returns the string representation +func (s BatchGetBuildBatchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetBuildBatchesOutput) GoString() string { + return s.String() +} + +// SetBuildBatches sets the BuildBatches field's value. +func (s *BatchGetBuildBatchesOutput) SetBuildBatches(v []*BuildBatch) *BatchGetBuildBatchesOutput { + s.BuildBatches = v + return s +} + +// SetBuildBatchesNotFound sets the BuildBatchesNotFound field's value. +func (s *BatchGetBuildBatchesOutput) SetBuildBatchesNotFound(v []*string) *BatchGetBuildBatchesOutput { + s.BuildBatchesNotFound = v + return s +} + +type BatchGetBuildsInput struct { + _ struct{} `type:"structure"` + + // The IDs of the builds. + // + // Ids is a required field + Ids []*string `locationName:"ids" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetBuildsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetBuildsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetBuildsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetBuildsInput"} + if s.Ids == nil { + invalidParams.Add(request.NewErrParamRequired("Ids")) + } + if s.Ids != nil && len(s.Ids) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Ids", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIds sets the Ids field's value. +func (s *BatchGetBuildsInput) SetIds(v []*string) *BatchGetBuildsInput { + s.Ids = v + return s +} + +type BatchGetBuildsOutput struct { + _ struct{} `type:"structure"` + + // Information about the requested builds. + Builds []*Build `locationName:"builds" type:"list"` + + // The IDs of builds for which information could not be found. + BuildsNotFound []*string `locationName:"buildsNotFound" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetBuildsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetBuildsOutput) GoString() string { + return s.String() +} + +// SetBuilds sets the Builds field's value. +func (s *BatchGetBuildsOutput) SetBuilds(v []*Build) *BatchGetBuildsOutput { + s.Builds = v + return s +} + +// SetBuildsNotFound sets the BuildsNotFound field's value. +func (s *BatchGetBuildsOutput) SetBuildsNotFound(v []*string) *BatchGetBuildsOutput { + s.BuildsNotFound = v + return s +} + +type BatchGetProjectsInput struct { + _ struct{} `type:"structure"` + + // The names or ARNs of the build projects. To get information about a project + // shared with your AWS account, its ARN must be specified. You cannot specify + // a shared project using its name. + // + // Names is a required field + Names []*string `locationName:"names" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetProjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetProjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetProjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetProjectsInput"} + if s.Names == nil { + invalidParams.Add(request.NewErrParamRequired("Names")) + } + if s.Names != nil && len(s.Names) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Names", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNames sets the Names field's value. +func (s *BatchGetProjectsInput) SetNames(v []*string) *BatchGetProjectsInput { + s.Names = v + return s +} + +type BatchGetProjectsOutput struct { + _ struct{} `type:"structure"` + + // Information about the requested build projects. + Projects []*Project `locationName:"projects" type:"list"` + + // The names of build projects for which information could not be found. + ProjectsNotFound []*string `locationName:"projectsNotFound" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetProjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetProjectsOutput) GoString() string { + return s.String() +} + +// SetProjects sets the Projects field's value. +func (s *BatchGetProjectsOutput) SetProjects(v []*Project) *BatchGetProjectsOutput { + s.Projects = v + return s +} + +// SetProjectsNotFound sets the ProjectsNotFound field's value. +func (s *BatchGetProjectsOutput) SetProjectsNotFound(v []*string) *BatchGetProjectsOutput { + s.ProjectsNotFound = v + return s +} + +type BatchGetReportGroupsInput struct { + _ struct{} `type:"structure"` + + // An array of report group ARNs that identify the report groups to return. + // + // ReportGroupArns is a required field + ReportGroupArns []*string `locationName:"reportGroupArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetReportGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetReportGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetReportGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetReportGroupsInput"} + if s.ReportGroupArns == nil { + invalidParams.Add(request.NewErrParamRequired("ReportGroupArns")) + } + if s.ReportGroupArns != nil && len(s.ReportGroupArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReportGroupArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReportGroupArns sets the ReportGroupArns field's value. +func (s *BatchGetReportGroupsInput) SetReportGroupArns(v []*string) *BatchGetReportGroupsInput { + s.ReportGroupArns = v + return s +} + +type BatchGetReportGroupsOutput struct { + _ struct{} `type:"structure"` + + // The array of report groups returned by BatchGetReportGroups. + ReportGroups []*ReportGroup `locationName:"reportGroups" min:"1" type:"list"` + + // An array of ARNs passed to BatchGetReportGroups that are not associated with + // a ReportGroup. + ReportGroupsNotFound []*string `locationName:"reportGroupsNotFound" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetReportGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetReportGroupsOutput) GoString() string { + return s.String() +} + +// SetReportGroups sets the ReportGroups field's value. +func (s *BatchGetReportGroupsOutput) SetReportGroups(v []*ReportGroup) *BatchGetReportGroupsOutput { + s.ReportGroups = v + return s +} + +// SetReportGroupsNotFound sets the ReportGroupsNotFound field's value. +func (s *BatchGetReportGroupsOutput) SetReportGroupsNotFound(v []*string) *BatchGetReportGroupsOutput { + s.ReportGroupsNotFound = v + return s +} + +type BatchGetReportsInput struct { + _ struct{} `type:"structure"` + + // An array of ARNs that identify the Report objects to return. + // + // ReportArns is a required field + ReportArns []*string `locationName:"reportArns" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchGetReportsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetReportsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetReportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetReportsInput"} + if s.ReportArns == nil { + invalidParams.Add(request.NewErrParamRequired("ReportArns")) + } + if s.ReportArns != nil && len(s.ReportArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReportArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReportArns sets the ReportArns field's value. +func (s *BatchGetReportsInput) SetReportArns(v []*string) *BatchGetReportsInput { + s.ReportArns = v + return s +} + +type BatchGetReportsOutput struct { + _ struct{} `type:"structure"` + + // The array of Report objects returned by BatchGetReports. + Reports []*Report `locationName:"reports" min:"1" type:"list"` + + // An array of ARNs passed to BatchGetReportGroups that are not associated with + // a Report. + ReportsNotFound []*string `locationName:"reportsNotFound" min:"1" type:"list"` +} + +// String returns the string representation +func (s BatchGetReportsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetReportsOutput) GoString() string { + return s.String() +} + +// SetReports sets the Reports field's value. +func (s *BatchGetReportsOutput) SetReports(v []*Report) *BatchGetReportsOutput { + s.Reports = v + return s +} + +// SetReportsNotFound sets the ReportsNotFound field's value. +func (s *BatchGetReportsOutput) SetReportsNotFound(v []*string) *BatchGetReportsOutput { + s.ReportsNotFound = v + return s +} + +// Specifies restrictions for the batch build. +type BatchRestrictions struct { + _ struct{} `type:"structure"` + + // An array of strings that specify the compute types that are allowed for the + // batch build. See Build environment compute types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) + // in the AWS CodeBuild User Guide for these values. + ComputeTypesAllowed []*string `locationName:"computeTypesAllowed" type:"list"` + + // Specifies the maximum number of builds allowed. + MaximumBuildsAllowed *int64 `locationName:"maximumBuildsAllowed" type:"integer"` +} + +// String returns the string representation +func (s BatchRestrictions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchRestrictions) GoString() string { + return s.String() +} + +// SetComputeTypesAllowed sets the ComputeTypesAllowed field's value. +func (s *BatchRestrictions) SetComputeTypesAllowed(v []*string) *BatchRestrictions { + s.ComputeTypesAllowed = v + return s +} + +// SetMaximumBuildsAllowed sets the MaximumBuildsAllowed field's value. +func (s *BatchRestrictions) SetMaximumBuildsAllowed(v int64) *BatchRestrictions { + s.MaximumBuildsAllowed = &v + return s +} + +// Information about a build. +type Build struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the build. + Arn *string `locationName:"arn" min:"1" type:"string"` + + // Information about the output artifacts for the build. + Artifacts *BuildArtifacts `locationName:"artifacts" type:"structure"` + + // The ARN of the batch build that this build is a member of, if applicable. + BuildBatchArn *string `locationName:"buildBatchArn" type:"string"` + + // Whether the build is complete. True if complete; otherwise, false. + BuildComplete *bool `locationName:"buildComplete" type:"boolean"` + + // The number of the build. For each project, the buildNumber of its first build + // is 1. The buildNumber of each subsequent build is incremented by 1. If a + // build is deleted, the buildNumber of other builds does not change. + BuildNumber *int64 `locationName:"buildNumber" type:"long"` + + // The current status of the build. Valid values include: + // + // * FAILED: The build failed. + // + // * FAULT: The build faulted. + // + // * IN_PROGRESS: The build is still in progress. + // + // * STOPPED: The build stopped. + // + // * SUCCEEDED: The build succeeded. + // + // * TIMED_OUT: The build timed out. + BuildStatus *string `locationName:"buildStatus" type:"string" enum:"StatusType"` + + // Information about the cache for the build. + Cache *ProjectCache `locationName:"cache" type:"structure"` + + // The current build phase. + CurrentPhase *string `locationName:"currentPhase" type:"string"` + + // Contains information about the debug session for this build. + DebugSession *DebugSession `locationName:"debugSession" type:"structure"` + + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. + // + // You can use a cross-account KMS key to encrypt the build output artifacts + // if your service role has permission to that key. + // + // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, + // the CMK's alias (using the format alias/). + EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` + + // When the build process ended, expressed in Unix time format. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // Information about the build environment for this build. + Environment *ProjectEnvironment `locationName:"environment" type:"structure"` + + // A list of exported environment variables for this build. + ExportedEnvironmentVariables []*ExportedEnvironmentVariable `locationName:"exportedEnvironmentVariables" type:"list"` + + // An array of ProjectFileSystemLocation objects for a CodeBuild build project. + // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, + // mountPoint, and type of a file system created using Amazon Elastic File System. + FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` + + // The unique ID for the build. + Id *string `locationName:"id" min:"1" type:"string"` + + // The entity that started the build. Valid values include: + // + // * If AWS CodePipeline started the build, the pipeline's name (for example, + // codepipeline/my-demo-pipeline). + // + // * If an AWS Identity and Access Management (IAM) user started the build, + // the user's name (for example, MyUserName). + // + // * If the Jenkins plugin for AWS CodeBuild started the build, the string + // CodeBuild-Jenkins-Plugin. + Initiator *string `locationName:"initiator" type:"string"` + + // Information about the build's logs in Amazon CloudWatch Logs. + Logs *LogsLocation `locationName:"logs" type:"structure"` + + // Describes a network interface. + NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` + + // Information about all previous build phases that are complete and information + // about any current build phase that is not yet complete. + Phases []*BuildPhase `locationName:"phases" type:"list"` + + // The name of the AWS CodeBuild project. + ProjectName *string `locationName:"projectName" min:"1" type:"string"` + + // The number of minutes a build is allowed to be queued before it times out. + QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" type:"integer"` + + // An array of the ARNs associated with this build's reports. + ReportArns []*string `locationName:"reportArns" type:"list"` + + // An identifier for the version of this build's source code. + // + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // ID. + // + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. + // + // * For Amazon Simple Storage Service (Amazon S3), this does not apply. + ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` + + // An array of ProjectArtifacts objects. + SecondaryArtifacts []*BuildArtifacts `locationName:"secondaryArtifacts" type:"list"` + + // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must + // be one of: + // + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example, pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` + + // An array of ProjectSource objects. + SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` + + // The name of a service role used for this build. + ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` + + // Information about the source code to be built. + Source *ProjectSource `locationName:"source" type:"structure"` + + // Any version identifier for the version of the source code to be built. If + // sourceVersion is specified at the project level, then this sourceVersion + // (at the build level) takes precedence. + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" min:"1" type:"string"` + + // When the build process started, expressed in Unix time format. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // How long, in minutes, for AWS CodeBuild to wait before timing out this build + // if it does not get marked as completed. + TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" type:"integer"` + + // If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide + // this parameter that identifies the VPC ID and the list of security group + // IDs and subnet IDs. The security groups and subnets must belong to the same + // VPC. You must provide at least one security group and one subnet ID. + VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` +} + +// String returns the string representation +func (s Build) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Build) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Build) SetArn(v string) *Build { + s.Arn = &v + return s +} + +// SetArtifacts sets the Artifacts field's value. +func (s *Build) SetArtifacts(v *BuildArtifacts) *Build { + s.Artifacts = v + return s +} + +// SetBuildBatchArn sets the BuildBatchArn field's value. +func (s *Build) SetBuildBatchArn(v string) *Build { + s.BuildBatchArn = &v + return s +} + +// SetBuildComplete sets the BuildComplete field's value. +func (s *Build) SetBuildComplete(v bool) *Build { + s.BuildComplete = &v + return s +} + +// SetBuildNumber sets the BuildNumber field's value. +func (s *Build) SetBuildNumber(v int64) *Build { + s.BuildNumber = &v + return s +} + +// SetBuildStatus sets the BuildStatus field's value. +func (s *Build) SetBuildStatus(v string) *Build { + s.BuildStatus = &v + return s +} + +// SetCache sets the Cache field's value. +func (s *Build) SetCache(v *ProjectCache) *Build { + s.Cache = v + return s +} + +// SetCurrentPhase sets the CurrentPhase field's value. +func (s *Build) SetCurrentPhase(v string) *Build { + s.CurrentPhase = &v + return s +} + +// SetDebugSession sets the DebugSession field's value. +func (s *Build) SetDebugSession(v *DebugSession) *Build { + s.DebugSession = v + return s +} + +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *Build) SetEncryptionKey(v string) *Build { + s.EncryptionKey = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *Build) SetEndTime(v time.Time) *Build { + s.EndTime = &v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *Build) SetEnvironment(v *ProjectEnvironment) *Build { + s.Environment = v + return s +} + +// SetExportedEnvironmentVariables sets the ExportedEnvironmentVariables field's value. +func (s *Build) SetExportedEnvironmentVariables(v []*ExportedEnvironmentVariable) *Build { + s.ExportedEnvironmentVariables = v + return s +} + +// SetFileSystemLocations sets the FileSystemLocations field's value. +func (s *Build) SetFileSystemLocations(v []*ProjectFileSystemLocation) *Build { + s.FileSystemLocations = v + return s +} + +// SetId sets the Id field's value. +func (s *Build) SetId(v string) *Build { + s.Id = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *Build) SetInitiator(v string) *Build { + s.Initiator = &v + return s +} + +// SetLogs sets the Logs field's value. +func (s *Build) SetLogs(v *LogsLocation) *Build { + s.Logs = v + return s +} + +// SetNetworkInterface sets the NetworkInterface field's value. +func (s *Build) SetNetworkInterface(v *NetworkInterface) *Build { + s.NetworkInterface = v + return s +} + +// SetPhases sets the Phases field's value. +func (s *Build) SetPhases(v []*BuildPhase) *Build { + s.Phases = v + return s +} + +// SetProjectName sets the ProjectName field's value. +func (s *Build) SetProjectName(v string) *Build { + s.ProjectName = &v + return s +} + +// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. +func (s *Build) SetQueuedTimeoutInMinutes(v int64) *Build { + s.QueuedTimeoutInMinutes = &v + return s +} + +// SetReportArns sets the ReportArns field's value. +func (s *Build) SetReportArns(v []*string) *Build { + s.ReportArns = v + return s +} + +// SetResolvedSourceVersion sets the ResolvedSourceVersion field's value. +func (s *Build) SetResolvedSourceVersion(v string) *Build { + s.ResolvedSourceVersion = &v + return s +} + +// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. +func (s *Build) SetSecondaryArtifacts(v []*BuildArtifacts) *Build { + s.SecondaryArtifacts = v + return s +} + +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *Build) SetSecondarySourceVersions(v []*ProjectSourceVersion) *Build { + s.SecondarySourceVersions = v + return s +} + +// SetSecondarySources sets the SecondarySources field's value. +func (s *Build) SetSecondarySources(v []*ProjectSource) *Build { + s.SecondarySources = v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *Build) SetServiceRole(v string) *Build { + s.ServiceRole = &v + return s +} + +// SetSource sets the Source field's value. +func (s *Build) SetSource(v *ProjectSource) *Build { + s.Source = v + return s +} + +// SetSourceVersion sets the SourceVersion field's value. +func (s *Build) SetSourceVersion(v string) *Build { + s.SourceVersion = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *Build) SetStartTime(v time.Time) *Build { + s.StartTime = &v + return s +} + +// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. +func (s *Build) SetTimeoutInMinutes(v int64) *Build { + s.TimeoutInMinutes = &v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *Build) SetVpcConfig(v *VpcConfig) *Build { + s.VpcConfig = v + return s +} + +// Information about build output artifacts. +type BuildArtifacts struct { + _ struct{} `type:"structure"` + + // An identifier for this artifact definition. + ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` + + // Information that tells you if encryption for build artifacts is disabled. + EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` + + // Information about the location of the build artifacts. + Location *string `locationName:"location" type:"string"` + + // The MD5 hash of the build artifact. + // + // You can use this hash along with a checksum tool to confirm file integrity + // and authenticity. + // + // This value is available only if the build project's packaging value is set + // to ZIP. + Md5sum *string `locationName:"md5sum" type:"string"` + + // If this flag is set, a name specified in the buildspec file overrides the + // artifact name. The name specified in a buildspec file is calculated at build + // time and uses the Shell Command Language. For example, you can append a date + // and time to your artifact name so that it is always unique. + OverrideArtifactName *bool `locationName:"overrideArtifactName" type:"boolean"` + + // The SHA-256 hash of the build artifact. + // + // You can use this hash along with a checksum tool to confirm file integrity + // and authenticity. + // + // This value is available only if the build project's packaging value is set + // to ZIP. + Sha256sum *string `locationName:"sha256sum" type:"string"` +} + +// String returns the string representation +func (s BuildArtifacts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildArtifacts) GoString() string { + return s.String() +} + +// SetArtifactIdentifier sets the ArtifactIdentifier field's value. +func (s *BuildArtifacts) SetArtifactIdentifier(v string) *BuildArtifacts { + s.ArtifactIdentifier = &v + return s +} + +// SetEncryptionDisabled sets the EncryptionDisabled field's value. +func (s *BuildArtifacts) SetEncryptionDisabled(v bool) *BuildArtifacts { + s.EncryptionDisabled = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *BuildArtifacts) SetLocation(v string) *BuildArtifacts { + s.Location = &v + return s +} + +// SetMd5sum sets the Md5sum field's value. +func (s *BuildArtifacts) SetMd5sum(v string) *BuildArtifacts { + s.Md5sum = &v + return s +} + +// SetOverrideArtifactName sets the OverrideArtifactName field's value. +func (s *BuildArtifacts) SetOverrideArtifactName(v bool) *BuildArtifacts { + s.OverrideArtifactName = &v + return s +} + +// SetSha256sum sets the Sha256sum field's value. +func (s *BuildArtifacts) SetSha256sum(v string) *BuildArtifacts { + s.Sha256sum = &v + return s +} + +// Contains information about a batch build. +type BuildBatch struct { + _ struct{} `type:"structure"` + + // The ARN of the batch build. + Arn *string `locationName:"arn" min:"1" type:"string"` + + // A BuildArtifacts object the defines the build artifacts for this batch build. + Artifacts *BuildArtifacts `locationName:"artifacts" type:"structure"` + + // Contains configuration information about a batch build project. + BuildBatchConfig *ProjectBuildBatchConfig `locationName:"buildBatchConfig" type:"structure"` + + // The number of the batch build. For each project, the buildBatchNumber of + // its first batch build is 1. The buildBatchNumber of each subsequent batch + // build is incremented by 1. If a batch build is deleted, the buildBatchNumber + // of other batch builds does not change. + BuildBatchNumber *int64 `locationName:"buildBatchNumber" type:"long"` + + // The status of the batch build. + BuildBatchStatus *string `locationName:"buildBatchStatus" type:"string" enum:"StatusType"` + + // An array of BuildGroup objects that define the build groups for the batch + // build. + BuildGroups []*BuildGroup `locationName:"buildGroups" type:"list"` + + // Specifies the maximum amount of time, in minutes, that the build in a batch + // must be completed in. + BuildTimeoutInMinutes *int64 `locationName:"buildTimeoutInMinutes" type:"integer"` + + // Information about the cache for the build project. + Cache *ProjectCache `locationName:"cache" type:"structure"` + + // Indicates if the batch build is complete. + Complete *bool `locationName:"complete" type:"boolean"` + + // The current phase of the batch build. + CurrentPhase *string `locationName:"currentPhase" type:"string"` + + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the batch build output artifacts. + // + // You can use a cross-account KMS key to encrypt the build output artifacts + // if your service role has permission to that key. + // + // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, + // the CMK's alias (using the format alias/). + EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` + + // The date and time that the batch build ended. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // Information about the build environment of the build project. + Environment *ProjectEnvironment `locationName:"environment" type:"structure"` + + // An array of ProjectFileSystemLocation objects for the batch build project. + // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, + // mountPoint, and type of a file system created using Amazon Elastic File System. + FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` + + // The identifier of the batch build. + Id *string `locationName:"id" min:"1" type:"string"` + + // The entity that started the batch build. Valid values include: + // + // * If AWS CodePipeline started the build, the pipeline's name (for example, + // codepipeline/my-demo-pipeline). + // + // * If an AWS Identity and Access Management (IAM) user started the build, + // the user's name. + // + // * If the Jenkins plugin for AWS CodeBuild started the build, the string + // CodeBuild-Jenkins-Plugin. + Initiator *string `locationName:"initiator" type:"string"` + + // Information about logs for a build project. These can be logs in Amazon CloudWatch + // Logs, built in a specified S3 bucket, or both. + LogConfig *LogsConfig `locationName:"logConfig" type:"structure"` + + // An array of BuildBatchPhase objects the specify the phases of the batch build. + Phases []*BuildBatchPhase `locationName:"phases" type:"list"` + + // The name of the batch build project. + ProjectName *string `locationName:"projectName" min:"1" type:"string"` + + // Specifies the amount of time, in minutes, that the batch build is allowed + // to be queued before it times out. + QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" type:"integer"` + + // The identifier of the resolved version of this batch build's source code. + // + // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit + // ID. + // + // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. + // + // * For Amazon Simple Storage Service (Amazon S3), this does not apply. + ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` + + // An array of BuildArtifacts objects the define the build artifacts for this + // batch build. + SecondaryArtifacts []*BuildArtifacts `locationName:"secondaryArtifacts" type:"list"` + + // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must + // be one of: + // + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example, pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` + + // An array of ProjectSource objects that define the sources for the batch build. + SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` + + // The name of a service role used for builds in the batch. + ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` + + // Information about the build input source code for the build project. + Source *ProjectSource `locationName:"source" type:"structure"` + + // The identifier of the version of the source code to be built. + SourceVersion *string `locationName:"sourceVersion" min:"1" type:"string"` + + // The date and time that the batch build started. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // Information about the VPC configuration that AWS CodeBuild accesses. + VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` +} + +// String returns the string representation +func (s BuildBatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildBatch) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *BuildBatch) SetArn(v string) *BuildBatch { + s.Arn = &v + return s +} + +// SetArtifacts sets the Artifacts field's value. +func (s *BuildBatch) SetArtifacts(v *BuildArtifacts) *BuildBatch { + s.Artifacts = v + return s +} + +// SetBuildBatchConfig sets the BuildBatchConfig field's value. +func (s *BuildBatch) SetBuildBatchConfig(v *ProjectBuildBatchConfig) *BuildBatch { + s.BuildBatchConfig = v + return s +} + +// SetBuildBatchNumber sets the BuildBatchNumber field's value. +func (s *BuildBatch) SetBuildBatchNumber(v int64) *BuildBatch { + s.BuildBatchNumber = &v + return s +} + +// SetBuildBatchStatus sets the BuildBatchStatus field's value. +func (s *BuildBatch) SetBuildBatchStatus(v string) *BuildBatch { + s.BuildBatchStatus = &v + return s +} + +// SetBuildGroups sets the BuildGroups field's value. +func (s *BuildBatch) SetBuildGroups(v []*BuildGroup) *BuildBatch { + s.BuildGroups = v + return s +} + +// SetBuildTimeoutInMinutes sets the BuildTimeoutInMinutes field's value. +func (s *BuildBatch) SetBuildTimeoutInMinutes(v int64) *BuildBatch { + s.BuildTimeoutInMinutes = &v + return s +} + +// SetCache sets the Cache field's value. +func (s *BuildBatch) SetCache(v *ProjectCache) *BuildBatch { + s.Cache = v + return s +} + +// SetComplete sets the Complete field's value. +func (s *BuildBatch) SetComplete(v bool) *BuildBatch { + s.Complete = &v + return s +} + +// SetCurrentPhase sets the CurrentPhase field's value. +func (s *BuildBatch) SetCurrentPhase(v string) *BuildBatch { + s.CurrentPhase = &v + return s +} + +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *BuildBatch) SetEncryptionKey(v string) *BuildBatch { + s.EncryptionKey = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *BuildBatch) SetEndTime(v time.Time) *BuildBatch { + s.EndTime = &v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *BuildBatch) SetEnvironment(v *ProjectEnvironment) *BuildBatch { + s.Environment = v + return s +} + +// SetFileSystemLocations sets the FileSystemLocations field's value. +func (s *BuildBatch) SetFileSystemLocations(v []*ProjectFileSystemLocation) *BuildBatch { + s.FileSystemLocations = v + return s +} + +// SetId sets the Id field's value. +func (s *BuildBatch) SetId(v string) *BuildBatch { + s.Id = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *BuildBatch) SetInitiator(v string) *BuildBatch { + s.Initiator = &v + return s +} + +// SetLogConfig sets the LogConfig field's value. +func (s *BuildBatch) SetLogConfig(v *LogsConfig) *BuildBatch { + s.LogConfig = v + return s +} + +// SetPhases sets the Phases field's value. +func (s *BuildBatch) SetPhases(v []*BuildBatchPhase) *BuildBatch { + s.Phases = v + return s +} + +// SetProjectName sets the ProjectName field's value. +func (s *BuildBatch) SetProjectName(v string) *BuildBatch { + s.ProjectName = &v + return s +} + +// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. +func (s *BuildBatch) SetQueuedTimeoutInMinutes(v int64) *BuildBatch { + s.QueuedTimeoutInMinutes = &v + return s +} + +// SetResolvedSourceVersion sets the ResolvedSourceVersion field's value. +func (s *BuildBatch) SetResolvedSourceVersion(v string) *BuildBatch { + s.ResolvedSourceVersion = &v + return s +} + +// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. +func (s *BuildBatch) SetSecondaryArtifacts(v []*BuildArtifacts) *BuildBatch { + s.SecondaryArtifacts = v + return s +} + +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *BuildBatch) SetSecondarySourceVersions(v []*ProjectSourceVersion) *BuildBatch { + s.SecondarySourceVersions = v + return s +} + +// SetSecondarySources sets the SecondarySources field's value. +func (s *BuildBatch) SetSecondarySources(v []*ProjectSource) *BuildBatch { + s.SecondarySources = v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *BuildBatch) SetServiceRole(v string) *BuildBatch { + s.ServiceRole = &v + return s +} + +// SetSource sets the Source field's value. +func (s *BuildBatch) SetSource(v *ProjectSource) *BuildBatch { + s.Source = v + return s +} + +// SetSourceVersion sets the SourceVersion field's value. +func (s *BuildBatch) SetSourceVersion(v string) *BuildBatch { + s.SourceVersion = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *BuildBatch) SetStartTime(v time.Time) *BuildBatch { + s.StartTime = &v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *BuildBatch) SetVpcConfig(v *VpcConfig) *BuildBatch { + s.VpcConfig = v + return s +} + +// Specifies filters when retrieving batch builds. +type BuildBatchFilter struct { + _ struct{} `type:"structure"` + + // The status of the batch builds to retrieve. Only batch builds that have this + // status will be retrieved. + Status *string `locationName:"status" type:"string" enum:"StatusType"` +} + +// String returns the string representation +func (s BuildBatchFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildBatchFilter) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *BuildBatchFilter) SetStatus(v string) *BuildBatchFilter { + s.Status = &v + return s +} + +// Contains information about a stage for a batch build. +type BuildBatchPhase struct { + _ struct{} `type:"structure"` + + // Additional information about the batch build phase. Especially to help troubleshoot + // a failed btach build. + Contexts []*PhaseContext `locationName:"contexts" type:"list"` + + // How long, in seconds, between the starting and ending times of the batch + // build's phase. + DurationInSeconds *int64 `locationName:"durationInSeconds" type:"long"` + + // When the batch build phase ended, expressed in Unix time format. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The current status of the batch build phase. Valid values include: + // + // FAILED + // + // The build phase failed. + // + // FAULT + // + // The build phase faulted. + // + // IN_PROGRESS + // + // The build phase is still in progress. + // + // QUEUED + // + // The build has been submitted and is queued behind other submitted builds. + // + // STOPPED + // + // The build phase stopped. + // + // SUCCEEDED + // + // The build phase succeeded. + // + // TIMED_OUT + // + // The build phase timed out. + PhaseStatus *string `locationName:"phaseStatus" type:"string" enum:"StatusType"` + + // The name of the batch build phase. Valid values include: + // + // COMBINE_ARTIFACTS + // + // Build output artifacts are being combined and uploaded to the output location. + // + // DOWNLOAD_BATCHSPEC + // + // The batch build specification is being downloaded. + // + // FAILED + // + // One or more of the builds failed. + // + // IN_PROGRESS + // + // The batch build is in progress. + // + // STOPPED + // + // The batch build was stopped. + // + // SUBMITTED + // + // The btach build has been submitted. + // + // SUCCEEDED + // + // The batch build succeeded. + PhaseType *string `locationName:"phaseType" type:"string" enum:"BuildBatchPhaseType"` + + // When the batch build phase started, expressed in Unix time format. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s BuildBatchPhase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildBatchPhase) GoString() string { + return s.String() +} + +// SetContexts sets the Contexts field's value. +func (s *BuildBatchPhase) SetContexts(v []*PhaseContext) *BuildBatchPhase { + s.Contexts = v + return s +} + +// SetDurationInSeconds sets the DurationInSeconds field's value. +func (s *BuildBatchPhase) SetDurationInSeconds(v int64) *BuildBatchPhase { + s.DurationInSeconds = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *BuildBatchPhase) SetEndTime(v time.Time) *BuildBatchPhase { + s.EndTime = &v + return s +} + +// SetPhaseStatus sets the PhaseStatus field's value. +func (s *BuildBatchPhase) SetPhaseStatus(v string) *BuildBatchPhase { + s.PhaseStatus = &v + return s +} + +// SetPhaseType sets the PhaseType field's value. +func (s *BuildBatchPhase) SetPhaseType(v string) *BuildBatchPhase { + s.PhaseType = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *BuildBatchPhase) SetStartTime(v time.Time) *BuildBatchPhase { + s.StartTime = &v + return s +} + +// Contains information about a batch build build group. Build groups are used +// to combine builds that can run in parallel, while still being able to set +// dependencies on other build groups. +type BuildGroup struct { + _ struct{} `type:"structure"` + + // A BuildSummary object that contains a summary of the current build group. + CurrentBuildSummary *BuildSummary `locationName:"currentBuildSummary" type:"structure"` + + // An array of strings that contain the identifiers of the build groups that + // this build group depends on. + DependsOn []*string `locationName:"dependsOn" type:"list"` + + // Contains the identifier of the build group. + Identifier *string `locationName:"identifier" type:"string"` + + // Specifies if failures in this build group can be ignored. + IgnoreFailure *bool `locationName:"ignoreFailure" type:"boolean"` + + // An array of BuildSummary objects that contain summaries of previous build + // groups. + PriorBuildSummaryList []*BuildSummary `locationName:"priorBuildSummaryList" type:"list"` +} + +// String returns the string representation +func (s BuildGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildGroup) GoString() string { + return s.String() +} + +// SetCurrentBuildSummary sets the CurrentBuildSummary field's value. +func (s *BuildGroup) SetCurrentBuildSummary(v *BuildSummary) *BuildGroup { + s.CurrentBuildSummary = v + return s +} + +// SetDependsOn sets the DependsOn field's value. +func (s *BuildGroup) SetDependsOn(v []*string) *BuildGroup { + s.DependsOn = v + return s +} + +// SetIdentifier sets the Identifier field's value. +func (s *BuildGroup) SetIdentifier(v string) *BuildGroup { + s.Identifier = &v + return s +} + +// SetIgnoreFailure sets the IgnoreFailure field's value. +func (s *BuildGroup) SetIgnoreFailure(v bool) *BuildGroup { + s.IgnoreFailure = &v + return s +} + +// SetPriorBuildSummaryList sets the PriorBuildSummaryList field's value. +func (s *BuildGroup) SetPriorBuildSummaryList(v []*BuildSummary) *BuildGroup { + s.PriorBuildSummaryList = v + return s +} + +// Information about a build that could not be successfully deleted. +type BuildNotDeleted struct { + _ struct{} `type:"structure"` + + // The ID of the build that could not be successfully deleted. + Id *string `locationName:"id" min:"1" type:"string"` + + // Additional information about the build that could not be successfully deleted. + StatusCode *string `locationName:"statusCode" type:"string"` +} + +// String returns the string representation +func (s BuildNotDeleted) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildNotDeleted) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *BuildNotDeleted) SetId(v string) *BuildNotDeleted { + s.Id = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *BuildNotDeleted) SetStatusCode(v string) *BuildNotDeleted { + s.StatusCode = &v + return s +} + +// Information about a stage for a build. +type BuildPhase struct { + _ struct{} `type:"structure"` + + // Additional information about a build phase, especially to help troubleshoot + // a failed build. + Contexts []*PhaseContext `locationName:"contexts" type:"list"` + + // How long, in seconds, between the starting and ending times of the build's + // phase. + DurationInSeconds *int64 `locationName:"durationInSeconds" type:"long"` + + // When the build phase ended, expressed in Unix time format. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The current status of the build phase. Valid values include: + // + // FAILED + // + // The build phase failed. + // + // FAULT + // + // The build phase faulted. + // + // IN_PROGRESS + // + // The build phase is still in progress. + // + // QUEUED + // + // The build has been submitted and is queued behind other submitted builds. + // + // STOPPED + // + // The build phase stopped. + // + // SUCCEEDED + // + // The build phase succeeded. + // + // TIMED_OUT + // + // The build phase timed out. + PhaseStatus *string `locationName:"phaseStatus" type:"string" enum:"StatusType"` + + // The name of the build phase. Valid values include: + // + // * BUILD: Core build activities typically occur in this build phase. + // + // * COMPLETED: The build has been completed. + // + // * DOWNLOAD_SOURCE: Source code is being downloaded in this build phase. + // + // * FINALIZING: The build process is completing in this build phase. + // + // * INSTALL: Installation activities typically occur in this build phase. + // + // * POST_BUILD: Post-build activities typically occur in this build phase. + // + // * PRE_BUILD: Pre-build activities typically occur in this build phase. + // + // * PROVISIONING: The build environment is being set up. + // + // * QUEUED: The build has been submitted and is queued behind other submitted + // builds. + // + // * SUBMITTED: The build has been submitted. + // + // * UPLOAD_ARTIFACTS: Build output artifacts are being uploaded to the output + // location. + PhaseType *string `locationName:"phaseType" type:"string" enum:"BuildPhaseType"` + + // When the build phase started, expressed in Unix time format. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` +} + +// String returns the string representation +func (s BuildPhase) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildPhase) GoString() string { + return s.String() +} + +// SetContexts sets the Contexts field's value. +func (s *BuildPhase) SetContexts(v []*PhaseContext) *BuildPhase { + s.Contexts = v + return s +} + +// SetDurationInSeconds sets the DurationInSeconds field's value. +func (s *BuildPhase) SetDurationInSeconds(v int64) *BuildPhase { + s.DurationInSeconds = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *BuildPhase) SetEndTime(v time.Time) *BuildPhase { + s.EndTime = &v + return s +} + +// SetPhaseStatus sets the PhaseStatus field's value. +func (s *BuildPhase) SetPhaseStatus(v string) *BuildPhase { + s.PhaseStatus = &v + return s +} + +// SetPhaseType sets the PhaseType field's value. +func (s *BuildPhase) SetPhaseType(v string) *BuildPhase { + s.PhaseType = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *BuildPhase) SetStartTime(v time.Time) *BuildPhase { + s.StartTime = &v + return s +} + +// Contains information that defines how the AWS CodeBuild build project reports +// the build status to the source provider. +type BuildStatusConfig struct { + _ struct{} `type:"structure"` + + // Specifies the context of the build status CodeBuild sends to the source provider. + // The usage of this parameter depends on the source provider. + // + // Bitbucket + // + // This parameter is used for the name parameter in the Bitbucket commit status. + // For more information, see build (https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Bworkspace%7D/%7Brepo_slug%7D/commit/%7Bnode%7D/statuses/build) + // in the Bitbucket API documentation. + // + // GitHub/GitHub Enterprise Server + // + // This parameter is used for the context parameter in the GitHub commit status. + // For more information, see Create a commit status (https://developer.github.com/v3/repos/statuses/#create-a-commit-status) + // in the GitHub developer guide. + Context *string `locationName:"context" type:"string"` + + // Specifies the target url of the build status CodeBuild sends to the source + // provider. The usage of this parameter depends on the source provider. + // + // Bitbucket + // + // This parameter is used for the url parameter in the Bitbucket commit status. + // For more information, see build (https://developer.atlassian.com/bitbucket/api/2/reference/resource/repositories/%7Bworkspace%7D/%7Brepo_slug%7D/commit/%7Bnode%7D/statuses/build) + // in the Bitbucket API documentation. + // + // GitHub/GitHub Enterprise Server + // + // This parameter is used for the target_url parameter in the GitHub commit + // status. For more information, see Create a commit status (https://developer.github.com/v3/repos/statuses/#create-a-commit-status) + // in the GitHub developer guide. + TargetUrl *string `locationName:"targetUrl" type:"string"` +} + +// String returns the string representation +func (s BuildStatusConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildStatusConfig) GoString() string { + return s.String() +} + +// SetContext sets the Context field's value. +func (s *BuildStatusConfig) SetContext(v string) *BuildStatusConfig { + s.Context = &v + return s +} + +// SetTargetUrl sets the TargetUrl field's value. +func (s *BuildStatusConfig) SetTargetUrl(v string) *BuildStatusConfig { + s.TargetUrl = &v + return s +} + +// Contains summary information about a batch build group. +type BuildSummary struct { + _ struct{} `type:"structure"` + + // The batch build ARN. + Arn *string `locationName:"arn" type:"string"` + + // The status of the build group. + // + // FAILED + // + // The build group failed. + // + // FAULT + // + // The build group faulted. + // + // IN_PROGRESS + // + // The build group is still in progress. + // + // STOPPED + // + // The build group stopped. + // + // SUCCEEDED + // + // The build group succeeded. + // + // TIMED_OUT + // + // The build group timed out. + BuildStatus *string `locationName:"buildStatus" type:"string" enum:"StatusType"` + + // A ResolvedArtifact object that represents the primary build artifacts for + // the build group. + PrimaryArtifact *ResolvedArtifact `locationName:"primaryArtifact" type:"structure"` + + // When the build was started, expressed in Unix time format. + RequestedOn *time.Time `locationName:"requestedOn" type:"timestamp"` + + // An array of ResolvedArtifact objects that represents the secondary build + // artifacts for the build group. + SecondaryArtifacts []*ResolvedArtifact `locationName:"secondaryArtifacts" type:"list"` +} + +// String returns the string representation +func (s BuildSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BuildSummary) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *BuildSummary) SetArn(v string) *BuildSummary { + s.Arn = &v + return s +} + +// SetBuildStatus sets the BuildStatus field's value. +func (s *BuildSummary) SetBuildStatus(v string) *BuildSummary { + s.BuildStatus = &v + return s +} + +// SetPrimaryArtifact sets the PrimaryArtifact field's value. +func (s *BuildSummary) SetPrimaryArtifact(v *ResolvedArtifact) *BuildSummary { + s.PrimaryArtifact = v + return s +} + +// SetRequestedOn sets the RequestedOn field's value. +func (s *BuildSummary) SetRequestedOn(v time.Time) *BuildSummary { + s.RequestedOn = &v + return s +} + +// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. +func (s *BuildSummary) SetSecondaryArtifacts(v []*ResolvedArtifact) *BuildSummary { + s.SecondaryArtifacts = v + return s +} + +// Information about Amazon CloudWatch Logs for a build project. +type CloudWatchLogsConfig struct { + _ struct{} `type:"structure"` + + // The group name of the logs in Amazon CloudWatch Logs. For more information, + // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). + GroupName *string `locationName:"groupName" type:"string"` + + // The current status of the logs in Amazon CloudWatch Logs for a build project. + // Valid values are: + // + // * ENABLED: Amazon CloudWatch Logs are enabled for this build project. + // + // * DISABLED: Amazon CloudWatch Logs are not enabled for this build project. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"LogsConfigStatusType"` + + // The prefix of the stream name of the Amazon CloudWatch Logs. For more information, + // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). + StreamName *string `locationName:"streamName" type:"string"` +} + +// String returns the string representation +func (s CloudWatchLogsConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchLogsConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloudWatchLogsConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloudWatchLogsConfig"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *CloudWatchLogsConfig) SetGroupName(v string) *CloudWatchLogsConfig { + s.GroupName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CloudWatchLogsConfig) SetStatus(v string) *CloudWatchLogsConfig { + s.Status = &v + return s +} + +// SetStreamName sets the StreamName field's value. +func (s *CloudWatchLogsConfig) SetStreamName(v string) *CloudWatchLogsConfig { + s.StreamName = &v + return s +} + +// Contains code coverage report information. +// +// Line coverage measures how many statements your tests cover. A statement +// is a single instruction, not including comments, conditionals, etc. +// +// Branch coverage determines if your tests cover every possible branch of a +// control structure, such as an if or case statement. +type CodeCoverage struct { + _ struct{} `type:"structure"` + + // The percentage of branches that are covered by your tests. + BranchCoveragePercentage *float64 `locationName:"branchCoveragePercentage" type:"double"` + + // The number of conditional branches that are covered by your tests. + BranchesCovered *int64 `locationName:"branchesCovered" type:"integer"` + + // The number of conditional branches that are not covered by your tests. + BranchesMissed *int64 `locationName:"branchesMissed" type:"integer"` + + // The date and time that the tests were run. + Expired *time.Time `locationName:"expired" type:"timestamp"` + + // The path of the test report file. + FilePath *string `locationName:"filePath" min:"1" type:"string"` + + // The identifier of the code coverage report. + Id *string `locationName:"id" min:"1" type:"string"` + + // The percentage of lines that are covered by your tests. + LineCoveragePercentage *float64 `locationName:"lineCoveragePercentage" type:"double"` + + // The number of lines that are covered by your tests. + LinesCovered *int64 `locationName:"linesCovered" type:"integer"` + + // The number of lines that are not covered by your tests. + LinesMissed *int64 `locationName:"linesMissed" type:"integer"` + + // The ARN of the report. + ReportARN *string `locationName:"reportARN" min:"1" type:"string"` +} + +// String returns the string representation +func (s CodeCoverage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CodeCoverage) GoString() string { + return s.String() +} + +// SetBranchCoveragePercentage sets the BranchCoveragePercentage field's value. +func (s *CodeCoverage) SetBranchCoveragePercentage(v float64) *CodeCoverage { + s.BranchCoveragePercentage = &v + return s +} + +// SetBranchesCovered sets the BranchesCovered field's value. +func (s *CodeCoverage) SetBranchesCovered(v int64) *CodeCoverage { + s.BranchesCovered = &v + return s +} + +// SetBranchesMissed sets the BranchesMissed field's value. +func (s *CodeCoverage) SetBranchesMissed(v int64) *CodeCoverage { + s.BranchesMissed = &v + return s +} + +// SetExpired sets the Expired field's value. +func (s *CodeCoverage) SetExpired(v time.Time) *CodeCoverage { + s.Expired = &v + return s +} + +// SetFilePath sets the FilePath field's value. +func (s *CodeCoverage) SetFilePath(v string) *CodeCoverage { + s.FilePath = &v + return s +} + +// SetId sets the Id field's value. +func (s *CodeCoverage) SetId(v string) *CodeCoverage { + s.Id = &v + return s +} + +// SetLineCoveragePercentage sets the LineCoveragePercentage field's value. +func (s *CodeCoverage) SetLineCoveragePercentage(v float64) *CodeCoverage { + s.LineCoveragePercentage = &v + return s +} + +// SetLinesCovered sets the LinesCovered field's value. +func (s *CodeCoverage) SetLinesCovered(v int64) *CodeCoverage { + s.LinesCovered = &v + return s +} + +// SetLinesMissed sets the LinesMissed field's value. +func (s *CodeCoverage) SetLinesMissed(v int64) *CodeCoverage { + s.LinesMissed = &v + return s +} + +// SetReportARN sets the ReportARN field's value. +func (s *CodeCoverage) SetReportARN(v string) *CodeCoverage { + s.ReportARN = &v + return s +} + +// Contains a summary of a code coverage report. +// +// Line coverage measures how many statements your tests cover. A statement +// is a single instruction, not including comments, conditionals, etc. +// +// Branch coverage determines if your tests cover every possible branch of a +// control structure, such as an if or case statement. +type CodeCoverageReportSummary struct { + _ struct{} `type:"structure"` + + // The percentage of branches that are covered by your tests. + BranchCoveragePercentage *float64 `locationName:"branchCoveragePercentage" type:"double"` + + // The number of conditional branches that are covered by your tests. + BranchesCovered *int64 `locationName:"branchesCovered" type:"integer"` + + // The number of conditional branches that are not covered by your tests. + BranchesMissed *int64 `locationName:"branchesMissed" type:"integer"` + + // The percentage of lines that are covered by your tests. + LineCoveragePercentage *float64 `locationName:"lineCoveragePercentage" type:"double"` + + // The number of lines that are covered by your tests. + LinesCovered *int64 `locationName:"linesCovered" type:"integer"` + + // The number of lines that are not covered by your tests. + LinesMissed *int64 `locationName:"linesMissed" type:"integer"` +} + +// String returns the string representation +func (s CodeCoverageReportSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccountLimitExceededException) GoString() string { +func (s CodeCoverageReportSummary) GoString() string { return s.String() } -func newErrorAccountLimitExceededException(v protocol.ResponseMetadata) error { - return &AccountLimitExceededException{ - respMetadata: v, +// SetBranchCoveragePercentage sets the BranchCoveragePercentage field's value. +func (s *CodeCoverageReportSummary) SetBranchCoveragePercentage(v float64) *CodeCoverageReportSummary { + s.BranchCoveragePercentage = &v + return s +} + +// SetBranchesCovered sets the BranchesCovered field's value. +func (s *CodeCoverageReportSummary) SetBranchesCovered(v int64) *CodeCoverageReportSummary { + s.BranchesCovered = &v + return s +} + +// SetBranchesMissed sets the BranchesMissed field's value. +func (s *CodeCoverageReportSummary) SetBranchesMissed(v int64) *CodeCoverageReportSummary { + s.BranchesMissed = &v + return s +} + +// SetLineCoveragePercentage sets the LineCoveragePercentage field's value. +func (s *CodeCoverageReportSummary) SetLineCoveragePercentage(v float64) *CodeCoverageReportSummary { + s.LineCoveragePercentage = &v + return s +} + +// SetLinesCovered sets the LinesCovered field's value. +func (s *CodeCoverageReportSummary) SetLinesCovered(v int64) *CodeCoverageReportSummary { + s.LinesCovered = &v + return s +} + +// SetLinesMissed sets the LinesMissed field's value. +func (s *CodeCoverageReportSummary) SetLinesMissed(v int64) *CodeCoverageReportSummary { + s.LinesMissed = &v + return s +} + +type CreateProjectInput struct { + _ struct{} `type:"structure"` + + // Information about the build output artifacts for the build project. + // + // Artifacts is a required field + Artifacts *ProjectArtifacts `locationName:"artifacts" type:"structure" required:"true"` + + // Set this to true to generate a publicly accessible URL for your project's + // build badge. + BadgeEnabled *bool `locationName:"badgeEnabled" type:"boolean"` + + // A ProjectBuildBatchConfig object that defines the batch build options for + // the project. + BuildBatchConfig *ProjectBuildBatchConfig `locationName:"buildBatchConfig" type:"structure"` + + // Stores recently used information so that it can be quickly accessed at a + // later time. + Cache *ProjectCache `locationName:"cache" type:"structure"` + + // A description that makes the build project easy to identify. + Description *string `locationName:"description" type:"string"` + + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. + // + // You can use a cross-account KMS key to encrypt the build output artifacts + // if your service role has permission to that key. + // + // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, + // the CMK's alias (using the format alias/). + EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` + + // Information about the build environment for the build project. + // + // Environment is a required field + Environment *ProjectEnvironment `locationName:"environment" type:"structure" required:"true"` + + // An array of ProjectFileSystemLocation objects for a CodeBuild build project. + // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, + // mountPoint, and type of a file system created using Amazon Elastic File System. + FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` + + // Information about logs for the build project. These can be logs in Amazon + // CloudWatch Logs, logs uploaded to a specified S3 bucket, or both. + LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` + + // The name of the build project. + // + // Name is a required field + Name *string `locationName:"name" min:"2" type:"string" required:"true"` + + // The number of minutes a build is allowed to be queued before it times out. + QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" min:"5" type:"integer"` + + // An array of ProjectArtifacts objects. + SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` + + // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified + // at the build level, then they take precedence over these secondarySourceVersions + // (at the project level). + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` + + // An array of ProjectSource objects. + SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` + + // The ARN of the AWS Identity and Access Management (IAM) role that enables + // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS + // account. + // + // ServiceRole is a required field + ServiceRole *string `locationName:"serviceRole" min:"1" type:"string" required:"true"` + + // Information about the build input source code for the build project. + // + // Source is a required field + Source *ProjectSource `locationName:"source" type:"structure" required:"true"` + + // A version of the build input to be built for this project. If not specified, + // the latest version is used. If specified, it must be one of: + // + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + // + // If sourceVersion is specified at the build level, then that version takes + // precedence over this sourceVersion (at the project level). + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" type:"string"` + + // A list of tag key and value pairs associated with this build project. + // + // These tags are available for use by AWS services that support AWS CodeBuild + // build project tags. + Tags []*Tag `locationName:"tags" type:"list"` + + // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait + // before it times out any build that has not been marked as completed. The + // default is 60 minutes. + TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` + + // VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC. + VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` +} + +// String returns the string representation +func (s CreateProjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateProjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateProjectInput"} + if s.Artifacts == nil { + invalidParams.Add(request.NewErrParamRequired("Artifacts")) + } + if s.EncryptionKey != nil && len(*s.EncryptionKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncryptionKey", 1)) + } + if s.Environment == nil { + invalidParams.Add(request.NewErrParamRequired("Environment")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.QueuedTimeoutInMinutes != nil && *s.QueuedTimeoutInMinutes < 5 { + invalidParams.Add(request.NewErrParamMinValue("QueuedTimeoutInMinutes", 5)) + } + if s.ServiceRole == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceRole")) + } + if s.ServiceRole != nil && len(*s.ServiceRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceRole", 1)) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.TimeoutInMinutes != nil && *s.TimeoutInMinutes < 5 { + invalidParams.Add(request.NewErrParamMinValue("TimeoutInMinutes", 5)) + } + if s.Artifacts != nil { + if err := s.Artifacts.Validate(); err != nil { + invalidParams.AddNested("Artifacts", err.(request.ErrInvalidParams)) + } + } + if s.BuildBatchConfig != nil { + if err := s.BuildBatchConfig.Validate(); err != nil { + invalidParams.AddNested("BuildBatchConfig", err.(request.ErrInvalidParams)) + } + } + if s.Cache != nil { + if err := s.Cache.Validate(); err != nil { + invalidParams.AddNested("Cache", err.(request.ErrInvalidParams)) + } + } + if s.Environment != nil { + if err := s.Environment.Validate(); err != nil { + invalidParams.AddNested("Environment", err.(request.ErrInvalidParams)) + } + } + if s.LogsConfig != nil { + if err := s.LogsConfig.Validate(); err != nil { + invalidParams.AddNested("LogsConfig", err.(request.ErrInvalidParams)) + } + } + if s.SecondaryArtifacts != nil { + for i, v := range s.SecondaryArtifacts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondaryArtifacts", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SecondarySourceVersions != nil { + for i, v := range s.SecondarySourceVersions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySourceVersions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SecondarySources != nil { + for i, v := range s.SecondarySources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySources", i), err.(request.ErrInvalidParams)) + } + } } -} - -// Code returns the exception type name. -func (s AccountLimitExceededException) Code() string { - return "AccountLimitExceededException" -} - -// Message returns the exception's message. -func (s AccountLimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccountLimitExceededException) OrigErr() error { - return nil -} - -func (s AccountLimitExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s AccountLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s AccountLimitExceededException) RequestID() string { - return s.respMetadata.RequestID -} - -type BatchDeleteBuildsInput struct { - _ struct{} `type:"structure"` - - // The IDs of the builds to delete. - // - // Ids is a required field - Ids []*string `locationName:"ids" min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s BatchDeleteBuildsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchDeleteBuildsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchDeleteBuildsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchDeleteBuildsInput"} - if s.Ids == nil { - invalidParams.Add(request.NewErrParamRequired("Ids")) + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } - if s.Ids != nil && len(s.Ids) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Ids", 1)) + if s.VpcConfig != nil { + if err := s.VpcConfig.Validate(); err != nil { + invalidParams.AddNested("VpcConfig", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -2887,219 +6547,214 @@ func (s *BatchDeleteBuildsInput) Validate() error { return nil } -// SetIds sets the Ids field's value. -func (s *BatchDeleteBuildsInput) SetIds(v []*string) *BatchDeleteBuildsInput { - s.Ids = v +// SetArtifacts sets the Artifacts field's value. +func (s *CreateProjectInput) SetArtifacts(v *ProjectArtifacts) *CreateProjectInput { + s.Artifacts = v return s } -type BatchDeleteBuildsOutput struct { - _ struct{} `type:"structure"` - - // The IDs of the builds that were successfully deleted. - BuildsDeleted []*string `locationName:"buildsDeleted" min:"1" type:"list"` - - // Information about any builds that could not be successfully deleted. - BuildsNotDeleted []*BuildNotDeleted `locationName:"buildsNotDeleted" type:"list"` -} - -// String returns the string representation -func (s BatchDeleteBuildsOutput) String() string { - return awsutil.Prettify(s) +// SetBadgeEnabled sets the BadgeEnabled field's value. +func (s *CreateProjectInput) SetBadgeEnabled(v bool) *CreateProjectInput { + s.BadgeEnabled = &v + return s } -// GoString returns the string representation -func (s BatchDeleteBuildsOutput) GoString() string { - return s.String() +// SetBuildBatchConfig sets the BuildBatchConfig field's value. +func (s *CreateProjectInput) SetBuildBatchConfig(v *ProjectBuildBatchConfig) *CreateProjectInput { + s.BuildBatchConfig = v + return s } -// SetBuildsDeleted sets the BuildsDeleted field's value. -func (s *BatchDeleteBuildsOutput) SetBuildsDeleted(v []*string) *BatchDeleteBuildsOutput { - s.BuildsDeleted = v +// SetCache sets the Cache field's value. +func (s *CreateProjectInput) SetCache(v *ProjectCache) *CreateProjectInput { + s.Cache = v return s } -// SetBuildsNotDeleted sets the BuildsNotDeleted field's value. -func (s *BatchDeleteBuildsOutput) SetBuildsNotDeleted(v []*BuildNotDeleted) *BatchDeleteBuildsOutput { - s.BuildsNotDeleted = v +// SetDescription sets the Description field's value. +func (s *CreateProjectInput) SetDescription(v string) *CreateProjectInput { + s.Description = &v return s } -type BatchGetBuildsInput struct { - _ struct{} `type:"structure"` - - // The IDs of the builds. - // - // Ids is a required field - Ids []*string `locationName:"ids" min:"1" type:"list" required:"true"` +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *CreateProjectInput) SetEncryptionKey(v string) *CreateProjectInput { + s.EncryptionKey = &v + return s } -// String returns the string representation -func (s BatchGetBuildsInput) String() string { - return awsutil.Prettify(s) +// SetEnvironment sets the Environment field's value. +func (s *CreateProjectInput) SetEnvironment(v *ProjectEnvironment) *CreateProjectInput { + s.Environment = v + return s } -// GoString returns the string representation -func (s BatchGetBuildsInput) GoString() string { - return s.String() +// SetFileSystemLocations sets the FileSystemLocations field's value. +func (s *CreateProjectInput) SetFileSystemLocations(v []*ProjectFileSystemLocation) *CreateProjectInput { + s.FileSystemLocations = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetBuildsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetBuildsInput"} - if s.Ids == nil { - invalidParams.Add(request.NewErrParamRequired("Ids")) - } - if s.Ids != nil && len(s.Ids) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Ids", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetLogsConfig sets the LogsConfig field's value. +func (s *CreateProjectInput) SetLogsConfig(v *LogsConfig) *CreateProjectInput { + s.LogsConfig = v + return s } -// SetIds sets the Ids field's value. -func (s *BatchGetBuildsInput) SetIds(v []*string) *BatchGetBuildsInput { - s.Ids = v +// SetName sets the Name field's value. +func (s *CreateProjectInput) SetName(v string) *CreateProjectInput { + s.Name = &v return s } -type BatchGetBuildsOutput struct { - _ struct{} `type:"structure"` - - // Information about the requested builds. - Builds []*Build `locationName:"builds" type:"list"` - - // The IDs of builds for which information could not be found. - BuildsNotFound []*string `locationName:"buildsNotFound" min:"1" type:"list"` +// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. +func (s *CreateProjectInput) SetQueuedTimeoutInMinutes(v int64) *CreateProjectInput { + s.QueuedTimeoutInMinutes = &v + return s } -// String returns the string representation -func (s BatchGetBuildsOutput) String() string { - return awsutil.Prettify(s) +// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. +func (s *CreateProjectInput) SetSecondaryArtifacts(v []*ProjectArtifacts) *CreateProjectInput { + s.SecondaryArtifacts = v + return s } -// GoString returns the string representation -func (s BatchGetBuildsOutput) GoString() string { - return s.String() +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *CreateProjectInput) SetSecondarySourceVersions(v []*ProjectSourceVersion) *CreateProjectInput { + s.SecondarySourceVersions = v + return s } -// SetBuilds sets the Builds field's value. -func (s *BatchGetBuildsOutput) SetBuilds(v []*Build) *BatchGetBuildsOutput { - s.Builds = v +// SetSecondarySources sets the SecondarySources field's value. +func (s *CreateProjectInput) SetSecondarySources(v []*ProjectSource) *CreateProjectInput { + s.SecondarySources = v return s } -// SetBuildsNotFound sets the BuildsNotFound field's value. -func (s *BatchGetBuildsOutput) SetBuildsNotFound(v []*string) *BatchGetBuildsOutput { - s.BuildsNotFound = v +// SetServiceRole sets the ServiceRole field's value. +func (s *CreateProjectInput) SetServiceRole(v string) *CreateProjectInput { + s.ServiceRole = &v return s } -type BatchGetProjectsInput struct { - _ struct{} `type:"structure"` - - // The names or ARNs of the build projects. To get information about a project - // shared with your AWS account, its ARN must be specified. You cannot specify - // a shared project using its name. - // - // Names is a required field - Names []*string `locationName:"names" min:"1" type:"list" required:"true"` +// SetSource sets the Source field's value. +func (s *CreateProjectInput) SetSource(v *ProjectSource) *CreateProjectInput { + s.Source = v + return s } -// String returns the string representation -func (s BatchGetProjectsInput) String() string { - return awsutil.Prettify(s) +// SetSourceVersion sets the SourceVersion field's value. +func (s *CreateProjectInput) SetSourceVersion(v string) *CreateProjectInput { + s.SourceVersion = &v + return s } -// GoString returns the string representation -func (s BatchGetProjectsInput) GoString() string { - return s.String() +// SetTags sets the Tags field's value. +func (s *CreateProjectInput) SetTags(v []*Tag) *CreateProjectInput { + s.Tags = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetProjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetProjectsInput"} - if s.Names == nil { - invalidParams.Add(request.NewErrParamRequired("Names")) - } - if s.Names != nil && len(s.Names) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Names", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. +func (s *CreateProjectInput) SetTimeoutInMinutes(v int64) *CreateProjectInput { + s.TimeoutInMinutes = &v + return s } -// SetNames sets the Names field's value. -func (s *BatchGetProjectsInput) SetNames(v []*string) *BatchGetProjectsInput { - s.Names = v +// SetVpcConfig sets the VpcConfig field's value. +func (s *CreateProjectInput) SetVpcConfig(v *VpcConfig) *CreateProjectInput { + s.VpcConfig = v return s } -type BatchGetProjectsOutput struct { - _ struct{} `type:"structure"` - - // Information about the requested build projects. - Projects []*Project `locationName:"projects" type:"list"` +type CreateProjectOutput struct { + _ struct{} `type:"structure"` - // The names of build projects for which information could not be found. - ProjectsNotFound []*string `locationName:"projectsNotFound" min:"1" type:"list"` + // Information about the build project that was created. + Project *Project `locationName:"project" type:"structure"` } // String returns the string representation -func (s BatchGetProjectsOutput) String() string { +func (s CreateProjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetProjectsOutput) GoString() string { +func (s CreateProjectOutput) GoString() string { return s.String() } -// SetProjects sets the Projects field's value. -func (s *BatchGetProjectsOutput) SetProjects(v []*Project) *BatchGetProjectsOutput { - s.Projects = v - return s -} - -// SetProjectsNotFound sets the ProjectsNotFound field's value. -func (s *BatchGetProjectsOutput) SetProjectsNotFound(v []*string) *BatchGetProjectsOutput { - s.ProjectsNotFound = v +// SetProject sets the Project field's value. +func (s *CreateProjectOutput) SetProject(v *Project) *CreateProjectOutput { + s.Project = v return s } -type BatchGetReportGroupsInput struct { +type CreateReportGroupInput struct { _ struct{} `type:"structure"` - // An array of report group ARNs that identify the report groups to return. + // A ReportExportConfig object that contains information about where the report + // group test results are exported. // - // ReportGroupArns is a required field - ReportGroupArns []*string `locationName:"reportGroupArns" min:"1" type:"list" required:"true"` + // ExportConfig is a required field + ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure" required:"true"` + + // The name of the report group. + // + // Name is a required field + Name *string `locationName:"name" min:"2" type:"string" required:"true"` + + // A list of tag key and value pairs associated with this report group. + // + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. + Tags []*Tag `locationName:"tags" type:"list"` + + // The type of report group. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"ReportType"` } // String returns the string representation -func (s BatchGetReportGroupsInput) String() string { +func (s CreateReportGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetReportGroupsInput) GoString() string { +func (s CreateReportGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetReportGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetReportGroupsInput"} - if s.ReportGroupArns == nil { - invalidParams.Add(request.NewErrParamRequired("ReportGroupArns")) +func (s *CreateReportGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReportGroupInput"} + if s.ExportConfig == nil { + invalidParams.Add(request.NewErrParamRequired("ExportConfig")) } - if s.ReportGroupArns != nil && len(s.ReportGroupArns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReportGroupArns", 1)) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.ExportConfig != nil { + if err := s.ExportConfig.Validate(); err != nil { + invalidParams.AddNested("ExportConfig", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -3108,72 +6763,99 @@ func (s *BatchGetReportGroupsInput) Validate() error { return nil } -// SetReportGroupArns sets the ReportGroupArns field's value. -func (s *BatchGetReportGroupsInput) SetReportGroupArns(v []*string) *BatchGetReportGroupsInput { - s.ReportGroupArns = v +// SetExportConfig sets the ExportConfig field's value. +func (s *CreateReportGroupInput) SetExportConfig(v *ReportExportConfig) *CreateReportGroupInput { + s.ExportConfig = v return s } -type BatchGetReportGroupsOutput struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *CreateReportGroupInput) SetName(v string) *CreateReportGroupInput { + s.Name = &v + return s +} - // The array of report groups returned by BatchGetReportGroups. - ReportGroups []*ReportGroup `locationName:"reportGroups" min:"1" type:"list"` +// SetTags sets the Tags field's value. +func (s *CreateReportGroupInput) SetTags(v []*Tag) *CreateReportGroupInput { + s.Tags = v + return s +} - // An array of ARNs passed to BatchGetReportGroups that are not associated with - // a ReportGroup. - ReportGroupsNotFound []*string `locationName:"reportGroupsNotFound" min:"1" type:"list"` +// SetType sets the Type field's value. +func (s *CreateReportGroupInput) SetType(v string) *CreateReportGroupInput { + s.Type = &v + return s +} + +type CreateReportGroupOutput struct { + _ struct{} `type:"structure"` + + // Information about the report group that was created. + ReportGroup *ReportGroup `locationName:"reportGroup" type:"structure"` } // String returns the string representation -func (s BatchGetReportGroupsOutput) String() string { +func (s CreateReportGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetReportGroupsOutput) GoString() string { +func (s CreateReportGroupOutput) GoString() string { return s.String() } -// SetReportGroups sets the ReportGroups field's value. -func (s *BatchGetReportGroupsOutput) SetReportGroups(v []*ReportGroup) *BatchGetReportGroupsOutput { - s.ReportGroups = v - return s -} - -// SetReportGroupsNotFound sets the ReportGroupsNotFound field's value. -func (s *BatchGetReportGroupsOutput) SetReportGroupsNotFound(v []*string) *BatchGetReportGroupsOutput { - s.ReportGroupsNotFound = v +// SetReportGroup sets the ReportGroup field's value. +func (s *CreateReportGroupOutput) SetReportGroup(v *ReportGroup) *CreateReportGroupOutput { + s.ReportGroup = v return s } -type BatchGetReportsInput struct { +type CreateWebhookInput struct { _ struct{} `type:"structure"` - // An array of ARNs that identify the Report objects to return. + // A regular expression used to determine which repository branches are built + // when a webhook is triggered. If the name of a branch matches the regular + // expression, then it is built. If branchFilter is empty, then all branches + // are built. // - // ReportArns is a required field - ReportArns []*string `locationName:"reportArns" min:"1" type:"list" required:"true"` + // It is recommended that you use filterGroups instead of branchFilter. + BranchFilter *string `locationName:"branchFilter" type:"string"` + + // Specifies the type of build this webhook will trigger. + BuildType *string `locationName:"buildType" type:"string" enum:"WebhookBuildType"` + + // An array of arrays of WebhookFilter objects used to determine which webhooks + // are triggered. At least one WebhookFilter in the array must specify EVENT + // as its type. + // + // For a build to be triggered, at least one filter group in the filterGroups + // array must pass. For a filter group to pass, each of its filters must pass. + FilterGroups [][]*WebhookFilter `locationName:"filterGroups" type:"list"` + + // The name of the AWS CodeBuild project. + // + // ProjectName is a required field + ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s BatchGetReportsInput) String() string { +func (s CreateWebhookInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetReportsInput) GoString() string { +func (s CreateWebhookInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchGetReportsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchGetReportsInput"} - if s.ReportArns == nil { - invalidParams.Add(request.NewErrParamRequired("ReportArns")) +func (s *CreateWebhookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWebhookInput"} + if s.ProjectName == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectName")) } - if s.ReportArns != nil && len(s.ReportArns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ReportArns", 1)) + if s.ProjectName != nil && len(*s.ProjectName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ProjectName", 2)) } if invalidParams.Len() > 0 { @@ -3182,662 +6864,587 @@ func (s *BatchGetReportsInput) Validate() error { return nil } -// SetReportArns sets the ReportArns field's value. -func (s *BatchGetReportsInput) SetReportArns(v []*string) *BatchGetReportsInput { - s.ReportArns = v +// SetBranchFilter sets the BranchFilter field's value. +func (s *CreateWebhookInput) SetBranchFilter(v string) *CreateWebhookInput { + s.BranchFilter = &v return s } -type BatchGetReportsOutput struct { - _ struct{} `type:"structure"` - - // The array of Report objects returned by BatchGetReports. - Reports []*Report `locationName:"reports" min:"1" type:"list"` - - // An array of ARNs passed to BatchGetReportGroups that are not associated with - // a Report. - ReportsNotFound []*string `locationName:"reportsNotFound" min:"1" type:"list"` -} - -// String returns the string representation -func (s BatchGetReportsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BatchGetReportsOutput) GoString() string { - return s.String() +// SetBuildType sets the BuildType field's value. +func (s *CreateWebhookInput) SetBuildType(v string) *CreateWebhookInput { + s.BuildType = &v + return s } -// SetReports sets the Reports field's value. -func (s *BatchGetReportsOutput) SetReports(v []*Report) *BatchGetReportsOutput { - s.Reports = v +// SetFilterGroups sets the FilterGroups field's value. +func (s *CreateWebhookInput) SetFilterGroups(v [][]*WebhookFilter) *CreateWebhookInput { + s.FilterGroups = v return s } -// SetReportsNotFound sets the ReportsNotFound field's value. -func (s *BatchGetReportsOutput) SetReportsNotFound(v []*string) *BatchGetReportsOutput { - s.ReportsNotFound = v +// SetProjectName sets the ProjectName field's value. +func (s *CreateWebhookInput) SetProjectName(v string) *CreateWebhookInput { + s.ProjectName = &v return s } -// Information about a build. -type Build struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the build. - Arn *string `locationName:"arn" min:"1" type:"string"` - - // Information about the output artifacts for the build. - Artifacts *BuildArtifacts `locationName:"artifacts" type:"structure"` - - // Whether the build is complete. True if complete; otherwise, false. - BuildComplete *bool `locationName:"buildComplete" type:"boolean"` - - // The number of the build. For each project, the buildNumber of its first build - // is 1. The buildNumber of each subsequent build is incremented by 1. If a - // build is deleted, the buildNumber of other builds does not change. - BuildNumber *int64 `locationName:"buildNumber" type:"long"` - - // The current status of the build. Valid values include: - // - // * FAILED: The build failed. - // - // * FAULT: The build faulted. - // - // * IN_PROGRESS: The build is still in progress. - // - // * STOPPED: The build stopped. - // - // * SUCCEEDED: The build succeeded. - // - // * TIMED_OUT: The build timed out. - BuildStatus *string `locationName:"buildStatus" type:"string" enum:"StatusType"` - - // Information about the cache for the build. - Cache *ProjectCache `locationName:"cache" type:"structure"` - - // The current build phase. - CurrentPhase *string `locationName:"currentPhase" type:"string"` - - // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be - // used for encrypting the build output artifacts. - // - // You can use a cross-account KMS key to encrypt the build output artifacts - // if your service role has permission to that key. - // - // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name ). - EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` +type CreateWebhookOutput struct { + _ struct{} `type:"structure"` - // When the build process ended, expressed in Unix time format. - EndTime *time.Time `locationName:"endTime" type:"timestamp"` + // Information about a webhook that connects repository events to a build project + // in AWS CodeBuild. + Webhook *Webhook `locationName:"webhook" type:"structure"` +} - // Information about the build environment for this build. - Environment *ProjectEnvironment `locationName:"environment" type:"structure"` +// String returns the string representation +func (s CreateWebhookOutput) String() string { + return awsutil.Prettify(s) +} - // A list of exported environment variables for this build. - ExportedEnvironmentVariables []*ExportedEnvironmentVariable `locationName:"exportedEnvironmentVariables" type:"list"` +// GoString returns the string representation +func (s CreateWebhookOutput) GoString() string { + return s.String() +} - // An array of ProjectFileSystemLocation objects for a CodeBuild build project. - // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, - // mountPoint, and type of a file system created using Amazon Elastic File System. - FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` +// SetWebhook sets the Webhook field's value. +func (s *CreateWebhookOutput) SetWebhook(v *Webhook) *CreateWebhookOutput { + s.Webhook = v + return s +} - // The unique ID for the build. - Id *string `locationName:"id" min:"1" type:"string"` +// Contains information about the debug session for a build. For more information, +// see Viewing a running build in Session Manager (https://docs.aws.amazon.com/codebuild/latest/userguide/session-manager.html). +type DebugSession struct { + _ struct{} `type:"structure"` - // The entity that started the build. Valid values include: - // - // * If AWS CodePipeline started the build, the pipeline's name (for example, - // codepipeline/my-demo-pipeline). - // - // * If an AWS Identity and Access Management (IAM) user started the build, - // the user's name (for example, MyUserName). - // - // * If the Jenkins plugin for AWS CodeBuild started the build, the string - // CodeBuild-Jenkins-Plugin. - Initiator *string `locationName:"initiator" type:"string"` + // Specifies if session debugging is enabled for this build. + SessionEnabled *bool `locationName:"sessionEnabled" type:"boolean"` - // Information about the build's logs in Amazon CloudWatch Logs. - Logs *LogsLocation `locationName:"logs" type:"structure"` + // Contains the identifier of the Session Manager session used for the build. + // To work with the paused build, you open this session to examine, control, + // and resume the build. + SessionTarget *string `locationName:"sessionTarget" min:"1" type:"string"` +} - // Describes a network interface. - NetworkInterface *NetworkInterface `locationName:"networkInterface" type:"structure"` +// String returns the string representation +func (s DebugSession) String() string { + return awsutil.Prettify(s) +} - // Information about all previous build phases that are complete and information - // about any current build phase that is not yet complete. - Phases []*BuildPhase `locationName:"phases" type:"list"` +// GoString returns the string representation +func (s DebugSession) GoString() string { + return s.String() +} - // The name of the AWS CodeBuild project. - ProjectName *string `locationName:"projectName" min:"1" type:"string"` +// SetSessionEnabled sets the SessionEnabled field's value. +func (s *DebugSession) SetSessionEnabled(v bool) *DebugSession { + s.SessionEnabled = &v + return s +} - // The number of minutes a build is allowed to be queued before it times out. - QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" type:"integer"` +// SetSessionTarget sets the SessionTarget field's value. +func (s *DebugSession) SetSessionTarget(v string) *DebugSession { + s.SessionTarget = &v + return s +} - // An array of the ARNs associated with this build's reports. - ReportArns []*string `locationName:"reportArns" type:"list"` +type DeleteBuildBatchInput struct { + _ struct{} `type:"structure"` - // An identifier for the version of this build's source code. - // - // * For AWS CodeCommit, GitHub, GitHub Enterprise, and BitBucket, the commit - // ID. + // The identifier of the batch build to delete. // - // * For AWS CodePipeline, the source revision provided by AWS CodePipeline. - // - // * For Amazon Simple Storage Service (Amazon S3), this does not apply. - ResolvedSourceVersion *string `locationName:"resolvedSourceVersion" min:"1" type:"string"` + // Id is a required field + Id *string `locationName:"id" min:"1" type:"string" required:"true"` +} - // An array of ProjectArtifacts objects. - SecondaryArtifacts []*BuildArtifacts `locationName:"secondaryArtifacts" type:"list"` +// String returns the string representation +func (s DeleteBuildBatchInput) String() string { + return awsutil.Prettify(s) +} - // An array of ProjectSourceVersion objects. Each ProjectSourceVersion must - // be one of: - // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. - // - // * For GitHub: the commit ID, pull request ID, branch name, or tag name - // that corresponds to the version of the source code you want to build. - // If a pull request ID is specified, it must use the format pr/pull-request-ID - // (for example, pr/25). If a branch name is specified, the branch's HEAD - // commit ID is used. If not specified, the default branch's HEAD commit - // ID is used. - // - // * For Bitbucket: the commit ID, branch name, or tag name that corresponds - // to the version of the source code you want to build. If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the - // default branch's HEAD commit ID is used. - // - // * For Amazon Simple Storage Service (Amazon S3): the version ID of the - // object that represents the build input ZIP file to use. - SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` +// GoString returns the string representation +func (s DeleteBuildBatchInput) GoString() string { + return s.String() +} - // An array of ProjectSource objects. - SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBuildBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBuildBatchInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } - // The name of a service role used for this build. - ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Information about the source code to be built. - Source *ProjectSource `locationName:"source" type:"structure"` +// SetId sets the Id field's value. +func (s *DeleteBuildBatchInput) SetId(v string) *DeleteBuildBatchInput { + s.Id = &v + return s +} - // Any version identifier for the version of the source code to be built. If - // sourceVersion is specified at the project level, then this sourceVersion - // (at the build level) takes precedence. - // - // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the AWS CodeBuild User Guide. - SourceVersion *string `locationName:"sourceVersion" min:"1" type:"string"` +type DeleteBuildBatchOutput struct { + _ struct{} `type:"structure"` - // When the build process started, expressed in Unix time format. - StartTime *time.Time `locationName:"startTime" type:"timestamp"` + // An array of strings that contain the identifiers of the builds that were + // deleted. + BuildsDeleted []*string `locationName:"buildsDeleted" min:"1" type:"list"` - // How long, in minutes, for AWS CodeBuild to wait before timing out this build - // if it does not get marked as completed. - TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" type:"integer"` + // An array of BuildNotDeleted objects that specify the builds that could not + // be deleted. + BuildsNotDeleted []*BuildNotDeleted `locationName:"buildsNotDeleted" type:"list"` - // If your AWS CodeBuild project accesses resources in an Amazon VPC, you provide - // this parameter that identifies the VPC ID and the list of security group - // IDs and subnet IDs. The security groups and subnets must belong to the same - // VPC. You must provide at least one security group and one subnet ID. - VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` + // The status code. + StatusCode *string `locationName:"statusCode" type:"string"` } // String returns the string representation -func (s Build) String() string { +func (s DeleteBuildBatchOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Build) GoString() string { +func (s DeleteBuildBatchOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Build) SetArn(v string) *Build { - s.Arn = &v +// SetBuildsDeleted sets the BuildsDeleted field's value. +func (s *DeleteBuildBatchOutput) SetBuildsDeleted(v []*string) *DeleteBuildBatchOutput { + s.BuildsDeleted = v return s } -// SetArtifacts sets the Artifacts field's value. -func (s *Build) SetArtifacts(v *BuildArtifacts) *Build { - s.Artifacts = v +// SetBuildsNotDeleted sets the BuildsNotDeleted field's value. +func (s *DeleteBuildBatchOutput) SetBuildsNotDeleted(v []*BuildNotDeleted) *DeleteBuildBatchOutput { + s.BuildsNotDeleted = v return s } -// SetBuildComplete sets the BuildComplete field's value. -func (s *Build) SetBuildComplete(v bool) *Build { - s.BuildComplete = &v +// SetStatusCode sets the StatusCode field's value. +func (s *DeleteBuildBatchOutput) SetStatusCode(v string) *DeleteBuildBatchOutput { + s.StatusCode = &v return s } -// SetBuildNumber sets the BuildNumber field's value. -func (s *Build) SetBuildNumber(v int64) *Build { - s.BuildNumber = &v - return s +type DeleteProjectInput struct { + _ struct{} `type:"structure"` + + // The name of the build project. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` } -// SetBuildStatus sets the BuildStatus field's value. -func (s *Build) SetBuildStatus(v string) *Build { - s.BuildStatus = &v - return s +// String returns the string representation +func (s DeleteProjectInput) String() string { + return awsutil.Prettify(s) } -// SetCache sets the Cache field's value. -func (s *Build) SetCache(v *ProjectCache) *Build { - s.Cache = v - return s +// GoString returns the string representation +func (s DeleteProjectInput) GoString() string { + return s.String() } -// SetCurrentPhase sets the CurrentPhase field's value. -func (s *Build) SetCurrentPhase(v string) *Build { - s.CurrentPhase = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteProjectInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteProjectInput) SetName(v string) *DeleteProjectInput { + s.Name = &v return s } -// SetEncryptionKey sets the EncryptionKey field's value. -func (s *Build) SetEncryptionKey(v string) *Build { - s.EncryptionKey = &v - return s +type DeleteProjectOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteProjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProjectOutput) GoString() string { + return s.String() +} + +type DeleteReportGroupInput struct { + _ struct{} `type:"structure"` + + // The ARN of the report group to delete. + // + // Arn is a required field + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + + // If true, deletes any reports that belong to a report group before deleting + // the report group. + // + // If false, you must delete any reports in the report group. Use ListReportsForReportGroup + // (https://docs.aws.amazon.com/codebuild/latest/APIReference/API_ListReportsForReportGroup.html) + // to get the reports in a report group. Use DeleteReport (https://docs.aws.amazon.com/codebuild/latest/APIReference/API_DeleteReport.html) + // to delete the reports. If you call DeleteReportGroup for a report group that + // contains one or more reports, an exception is thrown. + DeleteReports *bool `locationName:"deleteReports" type:"boolean"` +} + +// String returns the string representation +func (s DeleteReportGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReportGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReportGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReportGroupInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetEndTime sets the EndTime field's value. -func (s *Build) SetEndTime(v time.Time) *Build { - s.EndTime = &v +// SetArn sets the Arn field's value. +func (s *DeleteReportGroupInput) SetArn(v string) *DeleteReportGroupInput { + s.Arn = &v return s } -// SetEnvironment sets the Environment field's value. -func (s *Build) SetEnvironment(v *ProjectEnvironment) *Build { - s.Environment = v +// SetDeleteReports sets the DeleteReports field's value. +func (s *DeleteReportGroupInput) SetDeleteReports(v bool) *DeleteReportGroupInput { + s.DeleteReports = &v return s } -// SetExportedEnvironmentVariables sets the ExportedEnvironmentVariables field's value. -func (s *Build) SetExportedEnvironmentVariables(v []*ExportedEnvironmentVariable) *Build { - s.ExportedEnvironmentVariables = v - return s +type DeleteReportGroupOutput struct { + _ struct{} `type:"structure"` } -// SetFileSystemLocations sets the FileSystemLocations field's value. -func (s *Build) SetFileSystemLocations(v []*ProjectFileSystemLocation) *Build { - s.FileSystemLocations = v - return s +// String returns the string representation +func (s DeleteReportGroupOutput) String() string { + return awsutil.Prettify(s) } -// SetId sets the Id field's value. -func (s *Build) SetId(v string) *Build { - s.Id = &v - return s +// GoString returns the string representation +func (s DeleteReportGroupOutput) GoString() string { + return s.String() } -// SetInitiator sets the Initiator field's value. -func (s *Build) SetInitiator(v string) *Build { - s.Initiator = &v - return s -} +type DeleteReportInput struct { + _ struct{} `type:"structure"` -// SetLogs sets the Logs field's value. -func (s *Build) SetLogs(v *LogsLocation) *Build { - s.Logs = v - return s + // The ARN of the report to delete. + // + // Arn is a required field + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` } -// SetNetworkInterface sets the NetworkInterface field's value. -func (s *Build) SetNetworkInterface(v *NetworkInterface) *Build { - s.NetworkInterface = v - return s +// String returns the string representation +func (s DeleteReportInput) String() string { + return awsutil.Prettify(s) } -// SetPhases sets the Phases field's value. -func (s *Build) SetPhases(v []*BuildPhase) *Build { - s.Phases = v - return s +// GoString returns the string representation +func (s DeleteReportInput) GoString() string { + return s.String() } -// SetProjectName sets the ProjectName field's value. -func (s *Build) SetProjectName(v string) *Build { - s.ProjectName = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReportInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } -// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. -func (s *Build) SetQueuedTimeoutInMinutes(v int64) *Build { - s.QueuedTimeoutInMinutes = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetReportArns sets the ReportArns field's value. -func (s *Build) SetReportArns(v []*string) *Build { - s.ReportArns = v +// SetArn sets the Arn field's value. +func (s *DeleteReportInput) SetArn(v string) *DeleteReportInput { + s.Arn = &v return s } -// SetResolvedSourceVersion sets the ResolvedSourceVersion field's value. -func (s *Build) SetResolvedSourceVersion(v string) *Build { - s.ResolvedSourceVersion = &v - return s +type DeleteReportOutput struct { + _ struct{} `type:"structure"` } -// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. -func (s *Build) SetSecondaryArtifacts(v []*BuildArtifacts) *Build { - s.SecondaryArtifacts = v - return s +// String returns the string representation +func (s DeleteReportOutput) String() string { + return awsutil.Prettify(s) } -// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. -func (s *Build) SetSecondarySourceVersions(v []*ProjectSourceVersion) *Build { - s.SecondarySourceVersions = v - return s +// GoString returns the string representation +func (s DeleteReportOutput) GoString() string { + return s.String() } -// SetSecondarySources sets the SecondarySources field's value. -func (s *Build) SetSecondarySources(v []*ProjectSource) *Build { - s.SecondarySources = v - return s -} +type DeleteResourcePolicyInput struct { + _ struct{} `type:"structure"` -// SetServiceRole sets the ServiceRole field's value. -func (s *Build) SetServiceRole(v string) *Build { - s.ServiceRole = &v - return s + // The ARN of the resource that is associated with the resource policy. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` } -// SetSource sets the Source field's value. -func (s *Build) SetSource(v *ProjectSource) *Build { - s.Source = v - return s +// String returns the string representation +func (s DeleteResourcePolicyInput) String() string { + return awsutil.Prettify(s) } -// SetSourceVersion sets the SourceVersion field's value. -func (s *Build) SetSourceVersion(v string) *Build { - s.SourceVersion = &v - return s +// GoString returns the string representation +func (s DeleteResourcePolicyInput) GoString() string { + return s.String() } -// SetStartTime sets the StartTime field's value. -func (s *Build) SetStartTime(v time.Time) *Build { - s.StartTime = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } -// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. -func (s *Build) SetTimeoutInMinutes(v int64) *Build { - s.TimeoutInMinutes = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVpcConfig sets the VpcConfig field's value. -func (s *Build) SetVpcConfig(v *VpcConfig) *Build { - s.VpcConfig = v +// SetResourceArn sets the ResourceArn field's value. +func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput { + s.ResourceArn = &v return s } -// Information about build output artifacts. -type BuildArtifacts struct { +type DeleteResourcePolicyOutput struct { _ struct{} `type:"structure"` - - // An identifier for this artifact definition. - ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` - - // Information that tells you if encryption for build artifacts is disabled. - EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` - - // Information about the location of the build artifacts. - Location *string `locationName:"location" type:"string"` - - // The MD5 hash of the build artifact. - // - // You can use this hash along with a checksum tool to confirm file integrity - // and authenticity. - // - // This value is available only if the build project's packaging value is set - // to ZIP. - Md5sum *string `locationName:"md5sum" type:"string"` - - // If this flag is set, a name specified in the buildspec file overrides the - // artifact name. The name specified in a buildspec file is calculated at build - // time and uses the Shell Command Language. For example, you can append a date - // and time to your artifact name so that it is always unique. - OverrideArtifactName *bool `locationName:"overrideArtifactName" type:"boolean"` - - // The SHA-256 hash of the build artifact. - // - // You can use this hash along with a checksum tool to confirm file integrity - // and authenticity. - // - // This value is available only if the build project's packaging value is set - // to ZIP. - Sha256sum *string `locationName:"sha256sum" type:"string"` } // String returns the string representation -func (s BuildArtifacts) String() string { +func (s DeleteResourcePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BuildArtifacts) GoString() string { +func (s DeleteResourcePolicyOutput) GoString() string { return s.String() } -// SetArtifactIdentifier sets the ArtifactIdentifier field's value. -func (s *BuildArtifacts) SetArtifactIdentifier(v string) *BuildArtifacts { - s.ArtifactIdentifier = &v - return s -} +type DeleteSourceCredentialsInput struct { + _ struct{} `type:"structure"` -// SetEncryptionDisabled sets the EncryptionDisabled field's value. -func (s *BuildArtifacts) SetEncryptionDisabled(v bool) *BuildArtifacts { - s.EncryptionDisabled = &v - return s + // The Amazon Resource Name (ARN) of the token. + // + // Arn is a required field + Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` } -// SetLocation sets the Location field's value. -func (s *BuildArtifacts) SetLocation(v string) *BuildArtifacts { - s.Location = &v - return s +// String returns the string representation +func (s DeleteSourceCredentialsInput) String() string { + return awsutil.Prettify(s) } -// SetMd5sum sets the Md5sum field's value. -func (s *BuildArtifacts) SetMd5sum(v string) *BuildArtifacts { - s.Md5sum = &v - return s +// GoString returns the string representation +func (s DeleteSourceCredentialsInput) GoString() string { + return s.String() } -// SetOverrideArtifactName sets the OverrideArtifactName field's value. -func (s *BuildArtifacts) SetOverrideArtifactName(v bool) *BuildArtifacts { - s.OverrideArtifactName = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSourceCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSourceCredentialsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSha256sum sets the Sha256sum field's value. -func (s *BuildArtifacts) SetSha256sum(v string) *BuildArtifacts { - s.Sha256sum = &v +// SetArn sets the Arn field's value. +func (s *DeleteSourceCredentialsInput) SetArn(v string) *DeleteSourceCredentialsInput { + s.Arn = &v return s } -// Information about a build that could not be successfully deleted. -type BuildNotDeleted struct { +type DeleteSourceCredentialsOutput struct { _ struct{} `type:"structure"` - // The ID of the build that could not be successfully deleted. - Id *string `locationName:"id" min:"1" type:"string"` - - // Additional information about the build that could not be successfully deleted. - StatusCode *string `locationName:"statusCode" type:"string"` + // The Amazon Resource Name (ARN) of the token. + Arn *string `locationName:"arn" min:"1" type:"string"` } // String returns the string representation -func (s BuildNotDeleted) String() string { +func (s DeleteSourceCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BuildNotDeleted) GoString() string { +func (s DeleteSourceCredentialsOutput) GoString() string { return s.String() } -// SetId sets the Id field's value. -func (s *BuildNotDeleted) SetId(v string) *BuildNotDeleted { - s.Id = &v - return s -} - -// SetStatusCode sets the StatusCode field's value. -func (s *BuildNotDeleted) SetStatusCode(v string) *BuildNotDeleted { - s.StatusCode = &v +// SetArn sets the Arn field's value. +func (s *DeleteSourceCredentialsOutput) SetArn(v string) *DeleteSourceCredentialsOutput { + s.Arn = &v return s } -// Information about a stage for a build. -type BuildPhase struct { +type DeleteWebhookInput struct { _ struct{} `type:"structure"` - // Additional information about a build phase, especially to help troubleshoot - // a failed build. - Contexts []*PhaseContext `locationName:"contexts" type:"list"` - - // How long, in seconds, between the starting and ending times of the build's - // phase. - DurationInSeconds *int64 `locationName:"durationInSeconds" type:"long"` - - // When the build phase ended, expressed in Unix time format. - EndTime *time.Time `locationName:"endTime" type:"timestamp"` - - // The current status of the build phase. Valid values include: - // - // * FAILED: The build phase failed. - // - // * FAULT: The build phase faulted. - // - // * IN_PROGRESS: The build phase is still in progress. - // - // * QUEUED: The build has been submitted and is queued behind other submitted - // builds. - // - // * STOPPED: The build phase stopped. - // - // * SUCCEEDED: The build phase succeeded. - // - // * TIMED_OUT: The build phase timed out. - PhaseStatus *string `locationName:"phaseStatus" type:"string" enum:"StatusType"` - - // The name of the build phase. Valid values include: - // - // * BUILD: Core build activities typically occur in this build phase. - // - // * COMPLETED: The build has been completed. - // - // * DOWNLOAD_SOURCE: Source code is being downloaded in this build phase. - // - // * FINALIZING: The build process is completing in this build phase. - // - // * INSTALL: Installation activities typically occur in this build phase. - // - // * POST_BUILD: Post-build activities typically occur in this build phase. - // - // * PRE_BUILD: Pre-build activities typically occur in this build phase. - // - // * PROVISIONING: The build environment is being set up. - // - // * QUEUED: The build has been submitted and is queued behind other submitted - // builds. - // - // * SUBMITTED: The build has been submitted. + // The name of the AWS CodeBuild project. // - // * UPLOAD_ARTIFACTS: Build output artifacts are being uploaded to the output - // location. - PhaseType *string `locationName:"phaseType" type:"string" enum:"BuildPhaseType"` - - // When the build phase started, expressed in Unix time format. - StartTime *time.Time `locationName:"startTime" type:"timestamp"` + // ProjectName is a required field + ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` } // String returns the string representation -func (s BuildPhase) String() string { +func (s DeleteWebhookInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BuildPhase) GoString() string { +func (s DeleteWebhookInput) GoString() string { return s.String() } -// SetContexts sets the Contexts field's value. -func (s *BuildPhase) SetContexts(v []*PhaseContext) *BuildPhase { - s.Contexts = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWebhookInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWebhookInput"} + if s.ProjectName == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectName")) + } + if s.ProjectName != nil && len(*s.ProjectName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ProjectName", 2)) + } -// SetDurationInSeconds sets the DurationInSeconds field's value. -func (s *BuildPhase) SetDurationInSeconds(v int64) *BuildPhase { - s.DurationInSeconds = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetEndTime sets the EndTime field's value. -func (s *BuildPhase) SetEndTime(v time.Time) *BuildPhase { - s.EndTime = &v +// SetProjectName sets the ProjectName field's value. +func (s *DeleteWebhookInput) SetProjectName(v string) *DeleteWebhookInput { + s.ProjectName = &v return s } -// SetPhaseStatus sets the PhaseStatus field's value. -func (s *BuildPhase) SetPhaseStatus(v string) *BuildPhase { - s.PhaseStatus = &v - return s +type DeleteWebhookOutput struct { + _ struct{} `type:"structure"` } -// SetPhaseType sets the PhaseType field's value. -func (s *BuildPhase) SetPhaseType(v string) *BuildPhase { - s.PhaseType = &v - return s +// String returns the string representation +func (s DeleteWebhookOutput) String() string { + return awsutil.Prettify(s) } -// SetStartTime sets the StartTime field's value. -func (s *BuildPhase) SetStartTime(v time.Time) *BuildPhase { - s.StartTime = &v - return s +// GoString returns the string representation +func (s DeleteWebhookOutput) GoString() string { + return s.String() } -// Information about Amazon CloudWatch Logs for a build project. -type CloudWatchLogsConfig struct { +type DescribeCodeCoveragesInput struct { _ struct{} `type:"structure"` - // The group name of the logs in Amazon CloudWatch Logs. For more information, - // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). - GroupName *string `locationName:"groupName" type:"string"` + // The maximum line coverage percentage to report. + MaxLineCoveragePercentage *float64 `locationName:"maxLineCoveragePercentage" type:"double"` - // The current status of the logs in Amazon CloudWatch Logs for a build project. - // Valid values are: + // The maximum number of results to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The minimum line coverage percentage to report. + MinLineCoveragePercentage *float64 `locationName:"minLineCoveragePercentage" type:"double"` + + // The nextToken value returned from a previous call to DescribeCodeCoverages. + // This specifies the next item to return. To return the beginning of the list, + // exclude this parameter. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ARN of the report for which test cases are returned. // - // * ENABLED: Amazon CloudWatch Logs are enabled for this build project. + // ReportArn is a required field + ReportArn *string `locationName:"reportArn" min:"1" type:"string" required:"true"` + + // Specifies how the results are sorted. Possible values are: // - // * DISABLED: Amazon CloudWatch Logs are not enabled for this build project. + // FILE_PATH // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"LogsConfigStatusType"` + // The results are sorted by file path. + // + // LINE_COVERAGE_PERCENTAGE + // + // The results are sorted by the percentage of lines that are covered. + SortBy *string `locationName:"sortBy" type:"string" enum:"ReportCodeCoverageSortByType"` - // The prefix of the stream name of the Amazon CloudWatch Logs. For more information, - // see Working with Log Groups and Log Streams (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/Working-with-log-groups-and-streams.html). - StreamName *string `locationName:"streamName" type:"string"` + // Specifies if the results are sorted in ascending or descending order. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s CloudWatchLogsConfig) String() string { +func (s DescribeCodeCoveragesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloudWatchLogsConfig) GoString() string { +func (s DescribeCodeCoveragesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CloudWatchLogsConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CloudWatchLogsConfig"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) +func (s *DescribeCodeCoveragesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCodeCoveragesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ReportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReportArn")) + } + if s.ReportArn != nil && len(*s.ReportArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReportArn", 1)) } if invalidParams.Len() > 0 { @@ -3846,252 +7453,125 @@ func (s *CloudWatchLogsConfig) Validate() error { return nil } -// SetGroupName sets the GroupName field's value. -func (s *CloudWatchLogsConfig) SetGroupName(v string) *CloudWatchLogsConfig { - s.GroupName = &v +// SetMaxLineCoveragePercentage sets the MaxLineCoveragePercentage field's value. +func (s *DescribeCodeCoveragesInput) SetMaxLineCoveragePercentage(v float64) *DescribeCodeCoveragesInput { + s.MaxLineCoveragePercentage = &v return s } -// SetStatus sets the Status field's value. -func (s *CloudWatchLogsConfig) SetStatus(v string) *CloudWatchLogsConfig { - s.Status = &v +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeCodeCoveragesInput) SetMaxResults(v int64) *DescribeCodeCoveragesInput { + s.MaxResults = &v return s } -// SetStreamName sets the StreamName field's value. -func (s *CloudWatchLogsConfig) SetStreamName(v string) *CloudWatchLogsConfig { - s.StreamName = &v +// SetMinLineCoveragePercentage sets the MinLineCoveragePercentage field's value. +func (s *DescribeCodeCoveragesInput) SetMinLineCoveragePercentage(v float64) *DescribeCodeCoveragesInput { + s.MinLineCoveragePercentage = &v return s } -type CreateProjectInput struct { - _ struct{} `type:"structure"` - - // Information about the build output artifacts for the build project. - // - // Artifacts is a required field - Artifacts *ProjectArtifacts `locationName:"artifacts" type:"structure" required:"true"` - - // Set this to true to generate a publicly accessible URL for your project's - // build badge. - BadgeEnabled *bool `locationName:"badgeEnabled" type:"boolean"` - - // Stores recently used information so that it can be quickly accessed at a - // later time. - Cache *ProjectCache `locationName:"cache" type:"structure"` - - // A description that makes the build project easy to identify. - Description *string `locationName:"description" type:"string"` - - // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be - // used for encrypting the build output artifacts. - // - // You can use a cross-account KMS key to encrypt the build output artifacts - // if your service role has permission to that key. - // - // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name ). - EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` +// SetNextToken sets the NextToken field's value. +func (s *DescribeCodeCoveragesInput) SetNextToken(v string) *DescribeCodeCoveragesInput { + s.NextToken = &v + return s +} - // Information about the build environment for the build project. - // - // Environment is a required field - Environment *ProjectEnvironment `locationName:"environment" type:"structure" required:"true"` +// SetReportArn sets the ReportArn field's value. +func (s *DescribeCodeCoveragesInput) SetReportArn(v string) *DescribeCodeCoveragesInput { + s.ReportArn = &v + return s +} - // An array of ProjectFileSystemLocation objects for a CodeBuild build project. - // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, - // mountPoint, and type of a file system created using Amazon Elastic File System. - FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` +// SetSortBy sets the SortBy field's value. +func (s *DescribeCodeCoveragesInput) SetSortBy(v string) *DescribeCodeCoveragesInput { + s.SortBy = &v + return s +} - // Information about logs for the build project. These can be logs in Amazon - // CloudWatch Logs, logs uploaded to a specified S3 bucket, or both. - LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` +// SetSortOrder sets the SortOrder field's value. +func (s *DescribeCodeCoveragesInput) SetSortOrder(v string) *DescribeCodeCoveragesInput { + s.SortOrder = &v + return s +} - // The name of the build project. - // - // Name is a required field - Name *string `locationName:"name" min:"2" type:"string" required:"true"` +type DescribeCodeCoveragesOutput struct { + _ struct{} `type:"structure"` - // The number of minutes a build is allowed to be queued before it times out. - QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" min:"5" type:"integer"` + // An array of CodeCoverage objects that contain the results. + CodeCoverages []*CodeCoverage `locationName:"codeCoverages" type:"list"` - // An array of ProjectArtifacts objects. - SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` + // If there are more items to return, this contains a token that is passed to + // a subsequent call to DescribeCodeCoverages to retrieve the next set of items. + NextToken *string `locationName:"nextToken" type:"string"` +} - // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified - // at the build level, then they take precedence over these secondarySourceVersions - // (at the project level). - SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` +// String returns the string representation +func (s DescribeCodeCoveragesOutput) String() string { + return awsutil.Prettify(s) +} - // An array of ProjectSource objects. - SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` +// GoString returns the string representation +func (s DescribeCodeCoveragesOutput) GoString() string { + return s.String() +} - // The ARN of the AWS Identity and Access Management (IAM) role that enables - // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS - // account. - // - // ServiceRole is a required field - ServiceRole *string `locationName:"serviceRole" min:"1" type:"string" required:"true"` +// SetCodeCoverages sets the CodeCoverages field's value. +func (s *DescribeCodeCoveragesOutput) SetCodeCoverages(v []*CodeCoverage) *DescribeCodeCoveragesOutput { + s.CodeCoverages = v + return s +} - // Information about the build input source code for the build project. - // - // Source is a required field - Source *ProjectSource `locationName:"source" type:"structure" required:"true"` +// SetNextToken sets the NextToken field's value. +func (s *DescribeCodeCoveragesOutput) SetNextToken(v string) *DescribeCodeCoveragesOutput { + s.NextToken = &v + return s +} - // A version of the build input to be built for this project. If not specified, - // the latest version is used. If specified, it must be one of: - // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. - // - // * For GitHub: the commit ID, pull request ID, branch name, or tag name - // that corresponds to the version of the source code you want to build. - // If a pull request ID is specified, it must use the format pr/pull-request-ID - // (for example pr/25). If a branch name is specified, the branch's HEAD - // commit ID is used. If not specified, the default branch's HEAD commit - // ID is used. - // - // * For Bitbucket: the commit ID, branch name, or tag name that corresponds - // to the version of the source code you want to build. If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the - // default branch's HEAD commit ID is used. - // - // * For Amazon Simple Storage Service (Amazon S3): the version ID of the - // object that represents the build input ZIP file to use. - // - // If sourceVersion is specified at the build level, then that version takes - // precedence over this sourceVersion (at the project level). - // - // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the AWS CodeBuild User Guide. - SourceVersion *string `locationName:"sourceVersion" type:"string"` +type DescribeTestCasesInput struct { + _ struct{} `type:"structure"` - // A set of tags for this build project. - // - // These tags are available for use by AWS services that support AWS CodeBuild - // build project tags. - Tags []*Tag `locationName:"tags" type:"list"` + // A TestCaseFilter object used to filter the returned reports. + Filter *TestCaseFilter `locationName:"filter" type:"structure"` - // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait - // before it times out any build that has not been marked as completed. The - // default is 60 minutes. - TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` + // The maximum number of paginated test cases returned per response. Use nextToken + // to iterate pages in the list of returned TestCase objects. The default value + // is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` - // VpcConfig enables AWS CodeBuild to access resources in an Amazon VPC. - VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ARN of the report for which test cases are returned. + // + // ReportArn is a required field + ReportArn *string `locationName:"reportArn" type:"string" required:"true"` } // String returns the string representation -func (s CreateProjectInput) String() string { +func (s DescribeTestCasesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateProjectInput) GoString() string { +func (s DescribeTestCasesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateProjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateProjectInput"} - if s.Artifacts == nil { - invalidParams.Add(request.NewErrParamRequired("Artifacts")) - } - if s.EncryptionKey != nil && len(*s.EncryptionKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncryptionKey", 1)) - } - if s.Environment == nil { - invalidParams.Add(request.NewErrParamRequired("Environment")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.QueuedTimeoutInMinutes != nil && *s.QueuedTimeoutInMinutes < 5 { - invalidParams.Add(request.NewErrParamMinValue("QueuedTimeoutInMinutes", 5)) - } - if s.ServiceRole == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceRole")) - } - if s.ServiceRole != nil && len(*s.ServiceRole) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ServiceRole", 1)) - } - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.TimeoutInMinutes != nil && *s.TimeoutInMinutes < 5 { - invalidParams.Add(request.NewErrParamMinValue("TimeoutInMinutes", 5)) - } - if s.Artifacts != nil { - if err := s.Artifacts.Validate(); err != nil { - invalidParams.AddNested("Artifacts", err.(request.ErrInvalidParams)) - } - } - if s.Cache != nil { - if err := s.Cache.Validate(); err != nil { - invalidParams.AddNested("Cache", err.(request.ErrInvalidParams)) - } - } - if s.Environment != nil { - if err := s.Environment.Validate(); err != nil { - invalidParams.AddNested("Environment", err.(request.ErrInvalidParams)) - } - } - if s.LogsConfig != nil { - if err := s.LogsConfig.Validate(); err != nil { - invalidParams.AddNested("LogsConfig", err.(request.ErrInvalidParams)) - } - } - if s.SecondaryArtifacts != nil { - for i, v := range s.SecondaryArtifacts { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondaryArtifacts", i), err.(request.ErrInvalidParams)) - } - } - } - if s.SecondarySourceVersions != nil { - for i, v := range s.SecondarySourceVersions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySourceVersions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.SecondarySources != nil { - for i, v := range s.SecondarySources { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySources", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } - } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } +func (s *DescribeTestCasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTestCasesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.VpcConfig != nil { - if err := s.VpcConfig.Validate(); err != nil { - invalidParams.AddNested("VpcConfig", err.(request.ErrInvalidParams)) - } + if s.ReportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReportArn")) } if invalidParams.Len() > 0 { @@ -4100,192 +7580,236 @@ func (s *CreateProjectInput) Validate() error { return nil } -// SetArtifacts sets the Artifacts field's value. -func (s *CreateProjectInput) SetArtifacts(v *ProjectArtifacts) *CreateProjectInput { - s.Artifacts = v +// SetFilter sets the Filter field's value. +func (s *DescribeTestCasesInput) SetFilter(v *TestCaseFilter) *DescribeTestCasesInput { + s.Filter = v return s } -// SetBadgeEnabled sets the BadgeEnabled field's value. -func (s *CreateProjectInput) SetBadgeEnabled(v bool) *CreateProjectInput { - s.BadgeEnabled = &v +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeTestCasesInput) SetMaxResults(v int64) *DescribeTestCasesInput { + s.MaxResults = &v return s } -// SetCache sets the Cache field's value. -func (s *CreateProjectInput) SetCache(v *ProjectCache) *CreateProjectInput { - s.Cache = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeTestCasesInput) SetNextToken(v string) *DescribeTestCasesInput { + s.NextToken = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateProjectInput) SetDescription(v string) *CreateProjectInput { - s.Description = &v +// SetReportArn sets the ReportArn field's value. +func (s *DescribeTestCasesInput) SetReportArn(v string) *DescribeTestCasesInput { + s.ReportArn = &v return s } -// SetEncryptionKey sets the EncryptionKey field's value. -func (s *CreateProjectInput) SetEncryptionKey(v string) *CreateProjectInput { - s.EncryptionKey = &v - return s +type DescribeTestCasesOutput struct { + _ struct{} `type:"structure"` + + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` + + // The returned list of test cases. + TestCases []*TestCase `locationName:"testCases" type:"list"` } -// SetEnvironment sets the Environment field's value. -func (s *CreateProjectInput) SetEnvironment(v *ProjectEnvironment) *CreateProjectInput { - s.Environment = v +// String returns the string representation +func (s DescribeTestCasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTestCasesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeTestCasesOutput) SetNextToken(v string) *DescribeTestCasesOutput { + s.NextToken = &v return s } -// SetFileSystemLocations sets the FileSystemLocations field's value. -func (s *CreateProjectInput) SetFileSystemLocations(v []*ProjectFileSystemLocation) *CreateProjectInput { - s.FileSystemLocations = v +// SetTestCases sets the TestCases field's value. +func (s *DescribeTestCasesOutput) SetTestCases(v []*TestCase) *DescribeTestCasesOutput { + s.TestCases = v return s } -// SetLogsConfig sets the LogsConfig field's value. -func (s *CreateProjectInput) SetLogsConfig(v *LogsConfig) *CreateProjectInput { - s.LogsConfig = v - return s -} +// Information about a Docker image that is managed by AWS CodeBuild. +type EnvironmentImage struct { + _ struct{} `type:"structure"` -// SetName sets the Name field's value. -func (s *CreateProjectInput) SetName(v string) *CreateProjectInput { - s.Name = &v - return s + // The description of the Docker image. + Description *string `locationName:"description" type:"string"` + + // The name of the Docker image. + Name *string `locationName:"name" type:"string"` + + // A list of environment image versions. + Versions []*string `locationName:"versions" type:"list"` } -// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. -func (s *CreateProjectInput) SetQueuedTimeoutInMinutes(v int64) *CreateProjectInput { - s.QueuedTimeoutInMinutes = &v - return s +// String returns the string representation +func (s EnvironmentImage) String() string { + return awsutil.Prettify(s) } -// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. -func (s *CreateProjectInput) SetSecondaryArtifacts(v []*ProjectArtifacts) *CreateProjectInput { - s.SecondaryArtifacts = v - return s +// GoString returns the string representation +func (s EnvironmentImage) GoString() string { + return s.String() } -// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. -func (s *CreateProjectInput) SetSecondarySourceVersions(v []*ProjectSourceVersion) *CreateProjectInput { - s.SecondarySourceVersions = v +// SetDescription sets the Description field's value. +func (s *EnvironmentImage) SetDescription(v string) *EnvironmentImage { + s.Description = &v return s } -// SetSecondarySources sets the SecondarySources field's value. -func (s *CreateProjectInput) SetSecondarySources(v []*ProjectSource) *CreateProjectInput { - s.SecondarySources = v +// SetName sets the Name field's value. +func (s *EnvironmentImage) SetName(v string) *EnvironmentImage { + s.Name = &v return s } -// SetServiceRole sets the ServiceRole field's value. -func (s *CreateProjectInput) SetServiceRole(v string) *CreateProjectInput { - s.ServiceRole = &v +// SetVersions sets the Versions field's value. +func (s *EnvironmentImage) SetVersions(v []*string) *EnvironmentImage { + s.Versions = v return s } -// SetSource sets the Source field's value. -func (s *CreateProjectInput) SetSource(v *ProjectSource) *CreateProjectInput { - s.Source = v - return s +// A set of Docker images that are related by programming language and are managed +// by AWS CodeBuild. +type EnvironmentLanguage struct { + _ struct{} `type:"structure"` + + // The list of Docker images that are related by the specified programming language. + Images []*EnvironmentImage `locationName:"images" type:"list"` + + // The programming language for the Docker images. + Language *string `locationName:"language" type:"string" enum:"LanguageType"` } -// SetSourceVersion sets the SourceVersion field's value. -func (s *CreateProjectInput) SetSourceVersion(v string) *CreateProjectInput { - s.SourceVersion = &v - return s +// String returns the string representation +func (s EnvironmentLanguage) String() string { + return awsutil.Prettify(s) } -// SetTags sets the Tags field's value. -func (s *CreateProjectInput) SetTags(v []*Tag) *CreateProjectInput { - s.Tags = v - return s +// GoString returns the string representation +func (s EnvironmentLanguage) GoString() string { + return s.String() } -// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. -func (s *CreateProjectInput) SetTimeoutInMinutes(v int64) *CreateProjectInput { - s.TimeoutInMinutes = &v +// SetImages sets the Images field's value. +func (s *EnvironmentLanguage) SetImages(v []*EnvironmentImage) *EnvironmentLanguage { + s.Images = v return s } -// SetVpcConfig sets the VpcConfig field's value. -func (s *CreateProjectInput) SetVpcConfig(v *VpcConfig) *CreateProjectInput { - s.VpcConfig = v +// SetLanguage sets the Language field's value. +func (s *EnvironmentLanguage) SetLanguage(v string) *EnvironmentLanguage { + s.Language = &v return s } -type CreateProjectOutput struct { +// A set of Docker images that are related by platform and are managed by AWS +// CodeBuild. +type EnvironmentPlatform struct { _ struct{} `type:"structure"` - // Information about the build project that was created. - Project *Project `locationName:"project" type:"structure"` + // The list of programming languages that are available for the specified platform. + Languages []*EnvironmentLanguage `locationName:"languages" type:"list"` + + // The platform's name. + Platform *string `locationName:"platform" type:"string" enum:"PlatformType"` } // String returns the string representation -func (s CreateProjectOutput) String() string { +func (s EnvironmentPlatform) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateProjectOutput) GoString() string { +func (s EnvironmentPlatform) GoString() string { return s.String() } -// SetProject sets the Project field's value. -func (s *CreateProjectOutput) SetProject(v *Project) *CreateProjectOutput { - s.Project = v +// SetLanguages sets the Languages field's value. +func (s *EnvironmentPlatform) SetLanguages(v []*EnvironmentLanguage) *EnvironmentPlatform { + s.Languages = v return s } -type CreateReportGroupInput struct { +// SetPlatform sets the Platform field's value. +func (s *EnvironmentPlatform) SetPlatform(v string) *EnvironmentPlatform { + s.Platform = &v + return s +} + +// Information about an environment variable for a build project or a build. +type EnvironmentVariable struct { _ struct{} `type:"structure"` - // A ReportExportConfig object that contains information about where the report - // group test results are exported. + // The name or key of the environment variable. // - // ExportConfig is a required field - ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure" required:"true"` + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The name of the report group. + // The type of environment variable. Valid values include: // - // Name is a required field - Name *string `locationName:"name" min:"2" type:"string" required:"true"` + // * PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems + // Manager Parameter Store. To learn how to specify a parameter store environment + // variable, see env/parameter-store (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.parameter-store) + // in the AWS CodeBuild User Guide. + // + // * PLAINTEXT: An environment variable in plain text format. This is the + // default value. + // + // * SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager. + // To learn how to specify a secrets manager environment variable, see env/secrets-manager + // (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec.env.secrets-manager) + // in the AWS CodeBuild User Guide. + Type *string `locationName:"type" type:"string" enum:"EnvironmentVariableType"` - // The type of report group. + // The value of the environment variable. // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"ReportType"` + // We strongly discourage the use of PLAINTEXT environment variables to store + // sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT + // environment variables can be displayed in plain text using the AWS CodeBuild + // console and the AWS Command Line Interface (AWS CLI). For sensitive values, + // we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` } // String returns the string representation -func (s CreateReportGroupInput) String() string { +func (s EnvironmentVariable) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateReportGroupInput) GoString() string { +func (s EnvironmentVariable) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateReportGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateReportGroupInput"} - if s.ExportConfig == nil { - invalidParams.Add(request.NewErrParamRequired("ExportConfig")) - } +func (s *EnvironmentVariable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnvironmentVariable"} if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Name != nil && len(*s.Name) < 2 { - invalidParams.Add(request.NewErrParamMinLen("Name", 2)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.ExportConfig != nil { - if err := s.ExportConfig.Validate(); err != nil { - invalidParams.AddNested("ExportConfig", err.(request.ErrInvalidParams)) - } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) } if invalidParams.Len() > 0 { @@ -4294,90 +7818,89 @@ func (s *CreateReportGroupInput) Validate() error { return nil } -// SetExportConfig sets the ExportConfig field's value. -func (s *CreateReportGroupInput) SetExportConfig(v *ReportExportConfig) *CreateReportGroupInput { - s.ExportConfig = v - return s -} - // SetName sets the Name field's value. -func (s *CreateReportGroupInput) SetName(v string) *CreateReportGroupInput { +func (s *EnvironmentVariable) SetName(v string) *EnvironmentVariable { s.Name = &v return s } // SetType sets the Type field's value. -func (s *CreateReportGroupInput) SetType(v string) *CreateReportGroupInput { +func (s *EnvironmentVariable) SetType(v string) *EnvironmentVariable { s.Type = &v return s } -type CreateReportGroupOutput struct { +// SetValue sets the Value field's value. +func (s *EnvironmentVariable) SetValue(v string) *EnvironmentVariable { + s.Value = &v + return s +} + +// Information about an exported environment variable. +type ExportedEnvironmentVariable struct { _ struct{} `type:"structure"` - // Information about the report group that was created. - ReportGroup *ReportGroup `locationName:"reportGroup" type:"structure"` + // The name of this exported environment variable. + Name *string `locationName:"name" min:"1" type:"string"` + + // The value assigned to this exported environment variable. + // + // During a build, the value of a variable is available starting with the install + // phase. It can be updated between the start of the install phase and the end + // of the post_build phase. After the post_build phase ends, the value of exported + // variables cannot change. + Value *string `locationName:"value" type:"string"` } // String returns the string representation -func (s CreateReportGroupOutput) String() string { +func (s ExportedEnvironmentVariable) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateReportGroupOutput) GoString() string { +func (s ExportedEnvironmentVariable) GoString() string { return s.String() } -// SetReportGroup sets the ReportGroup field's value. -func (s *CreateReportGroupOutput) SetReportGroup(v *ReportGroup) *CreateReportGroupOutput { - s.ReportGroup = v +// SetName sets the Name field's value. +func (s *ExportedEnvironmentVariable) SetName(v string) *ExportedEnvironmentVariable { + s.Name = &v return s } -type CreateWebhookInput struct { - _ struct{} `type:"structure"` - - // A regular expression used to determine which repository branches are built - // when a webhook is triggered. If the name of a branch matches the regular - // expression, then it is built. If branchFilter is empty, then all branches - // are built. - // - // It is recommended that you use filterGroups instead of branchFilter. - BranchFilter *string `locationName:"branchFilter" type:"string"` +// SetValue sets the Value field's value. +func (s *ExportedEnvironmentVariable) SetValue(v string) *ExportedEnvironmentVariable { + s.Value = &v + return s +} - // An array of arrays of WebhookFilter objects used to determine which webhooks - // are triggered. At least one WebhookFilter in the array must specify EVENT - // as its type. - // - // For a build to be triggered, at least one filter group in the filterGroups - // array must pass. For a filter group to pass, each of its filters must pass. - FilterGroups [][]*WebhookFilter `locationName:"filterGroups" type:"list"` +type GetResourcePolicyInput struct { + _ struct{} `type:"structure"` - // The name of the AWS CodeBuild project. + // The ARN of the resource that is associated with the resource policy. // - // ProjectName is a required field - ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateWebhookInput) String() string { +func (s GetResourcePolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateWebhookInput) GoString() string { +func (s GetResourcePolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateWebhookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateWebhookInput"} - if s.ProjectName == nil { - invalidParams.Add(request.NewErrParamRequired("ProjectName")) +func (s *GetResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } - if s.ProjectName != nil && len(*s.ProjectName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ProjectName", 2)) + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) } if invalidParams.Len() > 0 { @@ -4386,75 +7909,61 @@ func (s *CreateWebhookInput) Validate() error { return nil } -// SetBranchFilter sets the BranchFilter field's value. -func (s *CreateWebhookInput) SetBranchFilter(v string) *CreateWebhookInput { - s.BranchFilter = &v - return s -} - -// SetFilterGroups sets the FilterGroups field's value. -func (s *CreateWebhookInput) SetFilterGroups(v [][]*WebhookFilter) *CreateWebhookInput { - s.FilterGroups = v - return s -} - -// SetProjectName sets the ProjectName field's value. -func (s *CreateWebhookInput) SetProjectName(v string) *CreateWebhookInput { - s.ProjectName = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *GetResourcePolicyInput) SetResourceArn(v string) *GetResourcePolicyInput { + s.ResourceArn = &v return s } -type CreateWebhookOutput struct { +type GetResourcePolicyOutput struct { _ struct{} `type:"structure"` - // Information about a webhook that connects repository events to a build project - // in AWS CodeBuild. - Webhook *Webhook `locationName:"webhook" type:"structure"` + // The resource policy for the resource identified by the input ARN parameter. + Policy *string `locationName:"policy" min:"1" type:"string"` } // String returns the string representation -func (s CreateWebhookOutput) String() string { +func (s GetResourcePolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateWebhookOutput) GoString() string { +func (s GetResourcePolicyOutput) GoString() string { return s.String() } -// SetWebhook sets the Webhook field's value. -func (s *CreateWebhookOutput) SetWebhook(v *Webhook) *CreateWebhookOutput { - s.Webhook = v +// SetPolicy sets the Policy field's value. +func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput { + s.Policy = &v return s } -type DeleteProjectInput struct { +// Information about the Git submodules configuration for an AWS CodeBuild build +// project. +type GitSubmodulesConfig struct { _ struct{} `type:"structure"` - // The name of the build project. + // Set to true to fetch Git submodules for your AWS CodeBuild build project. // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // FetchSubmodules is a required field + FetchSubmodules *bool `locationName:"fetchSubmodules" type:"boolean" required:"true"` } // String returns the string representation -func (s DeleteProjectInput) String() string { +func (s GitSubmodulesConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteProjectInput) GoString() string { +func (s GitSubmodulesConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteProjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteProjectInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *GitSubmodulesConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GitSubmodulesConfig"} + if s.FetchSubmodules == nil { + invalidParams.Add(request.NewErrParamRequired("FetchSubmodules")) } if invalidParams.Len() > 0 { @@ -4463,53 +7972,70 @@ func (s *DeleteProjectInput) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DeleteProjectInput) SetName(v string) *DeleteProjectInput { - s.Name = &v +// SetFetchSubmodules sets the FetchSubmodules field's value. +func (s *GitSubmodulesConfig) SetFetchSubmodules(v bool) *GitSubmodulesConfig { + s.FetchSubmodules = &v return s } -type DeleteProjectOutput struct { +type ImportSourceCredentialsInput struct { _ struct{} `type:"structure"` -} -// String returns the string representation -func (s DeleteProjectOutput) String() string { - return awsutil.Prettify(s) -} + // The type of authentication used to connect to a GitHub, GitHub Enterprise, + // or Bitbucket repository. An OAUTH connection is not supported by the API + // and must be created using the AWS CodeBuild console. + // + // AuthType is a required field + AuthType *string `locationName:"authType" type:"string" required:"true" enum:"AuthType"` -// GoString returns the string representation -func (s DeleteProjectOutput) GoString() string { - return s.String() -} + // The source provider used for this project. + // + // ServerType is a required field + ServerType *string `locationName:"serverType" type:"string" required:"true" enum:"ServerType"` -type DeleteReportGroupInput struct { - _ struct{} `type:"structure"` + // Set to false to prevent overwriting the repository source credentials. Set + // to true to overwrite the repository source credentials. The default value + // is true. + ShouldOverwrite *bool `locationName:"shouldOverwrite" type:"boolean"` - // The ARN of the report group to delete. + // For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, + // this is the app password. // - // Arn is a required field - Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + // Token is a required field + Token *string `locationName:"token" min:"1" type:"string" required:"true" sensitive:"true"` + + // The Bitbucket username when the authType is BASIC_AUTH. This parameter is + // not valid for other types of source providers or connections. + Username *string `locationName:"username" min:"1" type:"string"` } // String returns the string representation -func (s DeleteReportGroupInput) String() string { +func (s ImportSourceCredentialsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReportGroupInput) GoString() string { +func (s ImportSourceCredentialsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReportGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReportGroupInput"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) +func (s *ImportSourceCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportSourceCredentialsInput"} + if s.AuthType == nil { + invalidParams.Add(request.NewErrParamRequired("AuthType")) } - if s.Arn != nil && len(*s.Arn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + if s.ServerType == nil { + invalidParams.Add(request.NewErrParamRequired("ServerType")) + } + if s.Token == nil { + invalidParams.Add(request.NewErrParamRequired("Token")) + } + if s.Token != nil && len(*s.Token) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Token", 1)) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) } if invalidParams.Len() > 0 { @@ -4518,108 +8044,142 @@ func (s *DeleteReportGroupInput) Validate() error { return nil } -// SetArn sets the Arn field's value. -func (s *DeleteReportGroupInput) SetArn(v string) *DeleteReportGroupInput { - s.Arn = &v +// SetAuthType sets the AuthType field's value. +func (s *ImportSourceCredentialsInput) SetAuthType(v string) *ImportSourceCredentialsInput { + s.AuthType = &v return s } -type DeleteReportGroupOutput struct { +// SetServerType sets the ServerType field's value. +func (s *ImportSourceCredentialsInput) SetServerType(v string) *ImportSourceCredentialsInput { + s.ServerType = &v + return s +} + +// SetShouldOverwrite sets the ShouldOverwrite field's value. +func (s *ImportSourceCredentialsInput) SetShouldOverwrite(v bool) *ImportSourceCredentialsInput { + s.ShouldOverwrite = &v + return s +} + +// SetToken sets the Token field's value. +func (s *ImportSourceCredentialsInput) SetToken(v string) *ImportSourceCredentialsInput { + s.Token = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *ImportSourceCredentialsInput) SetUsername(v string) *ImportSourceCredentialsInput { + s.Username = &v + return s +} + +type ImportSourceCredentialsOutput struct { _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the token. + Arn *string `locationName:"arn" min:"1" type:"string"` } // String returns the string representation -func (s DeleteReportGroupOutput) String() string { +func (s ImportSourceCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReportGroupOutput) GoString() string { +func (s ImportSourceCredentialsOutput) GoString() string { return s.String() } -type DeleteReportInput struct { - _ struct{} `type:"structure"` +// SetArn sets the Arn field's value. +func (s *ImportSourceCredentialsOutput) SetArn(v string) *ImportSourceCredentialsOutput { + s.Arn = &v + return s +} - // The ARN of the report to delete. - // - // Arn is a required field - Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` +// The input value that was provided is not valid. +type InvalidInputException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s DeleteReportInput) String() string { +func (s InvalidInputException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReportInput) GoString() string { +func (s InvalidInputException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReportInput"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) - } - if s.Arn != nil && len(*s.Arn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) +func newErrorInvalidInputException(v protocol.ResponseMetadata) error { + return &InvalidInputException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *InvalidInputException) Code() string { + return "InvalidInputException" +} + +// Message returns the exception's message. +func (s *InvalidInputException) Message() string { + if s.Message_ != nil { + return *s.Message_ } - return nil + return "" } -// SetArn sets the Arn field's value. -func (s *DeleteReportInput) SetArn(v string) *DeleteReportInput { - s.Arn = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidInputException) OrigErr() error { + return nil } -type DeleteReportOutput struct { - _ struct{} `type:"structure"` +func (s *InvalidInputException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// String returns the string representation -func (s DeleteReportOutput) String() string { - return awsutil.Prettify(s) +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } -// GoString returns the string representation -func (s DeleteReportOutput) GoString() string { - return s.String() +// RequestID returns the service's response RequestID for request. +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } -type DeleteResourcePolicyInput struct { +type InvalidateProjectCacheInput struct { _ struct{} `type:"structure"` - // The ARN of the resource that is associated with the resource policy. + // The name of the AWS CodeBuild build project that the cache is reset for. // - // ResourceArn is a required field - ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + // ProjectName is a required field + ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteResourcePolicyInput) String() string { +func (s InvalidateProjectCacheInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteResourcePolicyInput) GoString() string { +func (s InvalidateProjectCacheInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) +func (s *InvalidateProjectCacheInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InvalidateProjectCacheInput"} + if s.ProjectName == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectName")) } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + if s.ProjectName != nil && len(*s.ProjectName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1)) } if invalidParams.Len() > 0 { @@ -4628,53 +8188,70 @@ func (s *DeleteResourcePolicyInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput { - s.ResourceArn = &v +// SetProjectName sets the ProjectName field's value. +func (s *InvalidateProjectCacheInput) SetProjectName(v string) *InvalidateProjectCacheInput { + s.ProjectName = &v return s } -type DeleteResourcePolicyOutput struct { +type InvalidateProjectCacheOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeleteResourcePolicyOutput) String() string { +func (s InvalidateProjectCacheOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteResourcePolicyOutput) GoString() string { +func (s InvalidateProjectCacheOutput) GoString() string { return s.String() } -type DeleteSourceCredentialsInput struct { +type ListBuildBatchesForProjectInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the token. + // A BuildBatchFilter object that specifies the filters for the search. + Filter *BuildBatchFilter `locationName:"filter" type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous call to ListBuildBatchesForProject. + // This specifies the next item to return. To return the beginning of the list, + // exclude this parameter. + NextToken *string `locationName:"nextToken" type:"string"` + + // The name of the project. + ProjectName *string `locationName:"projectName" min:"1" type:"string"` + + // Specifies the sort order of the returned items. Valid values include: // - // Arn is a required field - Arn *string `locationName:"arn" min:"1" type:"string" required:"true"` + // * ASCENDING: List the batch build identifiers in ascending order by identifier. + // + // * DESCENDING: List the batch build identifiers in descending order by + // identifier. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s DeleteSourceCredentialsInput) String() string { +func (s ListBuildBatchesForProjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSourceCredentialsInput) GoString() string { +func (s ListBuildBatchesForProjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSourceCredentialsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSourceCredentialsInput"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) +func (s *ListBuildBatchesForProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBuildBatchesForProjectInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Arn != nil && len(*s.Arn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + if s.ProjectName != nil && len(*s.ProjectName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1)) } if invalidParams.Len() > 0 { @@ -4683,62 +8260,108 @@ func (s *DeleteSourceCredentialsInput) Validate() error { return nil } -// SetArn sets the Arn field's value. -func (s *DeleteSourceCredentialsInput) SetArn(v string) *DeleteSourceCredentialsInput { - s.Arn = &v +// SetFilter sets the Filter field's value. +func (s *ListBuildBatchesForProjectInput) SetFilter(v *BuildBatchFilter) *ListBuildBatchesForProjectInput { + s.Filter = v return s } -type DeleteSourceCredentialsOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListBuildBatchesForProjectInput) SetMaxResults(v int64) *ListBuildBatchesForProjectInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildBatchesForProjectInput) SetNextToken(v string) *ListBuildBatchesForProjectInput { + s.NextToken = &v + return s +} + +// SetProjectName sets the ProjectName field's value. +func (s *ListBuildBatchesForProjectInput) SetProjectName(v string) *ListBuildBatchesForProjectInput { + s.ProjectName = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListBuildBatchesForProjectInput) SetSortOrder(v string) *ListBuildBatchesForProjectInput { + s.SortOrder = &v + return s +} + +type ListBuildBatchesForProjectOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the token. - Arn *string `locationName:"arn" min:"1" type:"string"` + // An array of strings that contains the batch build identifiers. + Ids []*string `locationName:"ids" type:"list"` + + // If there are more items to return, this contains a token that is passed to + // a subsequent call to ListBuildBatchesForProject to retrieve the next set + // of items. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s DeleteSourceCredentialsOutput) String() string { +func (s ListBuildBatchesForProjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSourceCredentialsOutput) GoString() string { +func (s ListBuildBatchesForProjectOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DeleteSourceCredentialsOutput) SetArn(v string) *DeleteSourceCredentialsOutput { - s.Arn = &v +// SetIds sets the Ids field's value. +func (s *ListBuildBatchesForProjectOutput) SetIds(v []*string) *ListBuildBatchesForProjectOutput { + s.Ids = v return s } -type DeleteWebhookInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListBuildBatchesForProjectOutput) SetNextToken(v string) *ListBuildBatchesForProjectOutput { + s.NextToken = &v + return s +} + +type ListBuildBatchesInput struct { _ struct{} `type:"structure"` - // The name of the AWS CodeBuild project. + // A BuildBatchFilter object that specifies the filters for the search. + Filter *BuildBatchFilter `locationName:"filter" type:"structure"` + + // The maximum number of results to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous call to ListBuildBatches. This + // specifies the next item to return. To return the beginning of the list, exclude + // this parameter. + NextToken *string `locationName:"nextToken" type:"string"` + + // Specifies the sort order of the returned items. Valid values include: // - // ProjectName is a required field - ProjectName *string `locationName:"projectName" min:"2" type:"string" required:"true"` + // * ASCENDING: List the batch build identifiers in ascending order by identifier. + // + // * DESCENDING: List the batch build identifiers in descending order by + // identifier. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s DeleteWebhookInput) String() string { +func (s ListBuildBatchesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteWebhookInput) GoString() string { +func (s ListBuildBatchesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteWebhookInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteWebhookInput"} - if s.ProjectName == nil { - invalidParams.Add(request.NewErrParamRequired("ProjectName")) - } - if s.ProjectName != nil && len(*s.ProjectName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("ProjectName", 2)) +func (s *ListBuildBatchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBuildBatchesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -4747,70 +8370,105 @@ func (s *DeleteWebhookInput) Validate() error { return nil } -// SetProjectName sets the ProjectName field's value. -func (s *DeleteWebhookInput) SetProjectName(v string) *DeleteWebhookInput { - s.ProjectName = &v +// SetFilter sets the Filter field's value. +func (s *ListBuildBatchesInput) SetFilter(v *BuildBatchFilter) *ListBuildBatchesInput { + s.Filter = v return s } -type DeleteWebhookOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListBuildBatchesInput) SetMaxResults(v int64) *ListBuildBatchesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildBatchesInput) SetNextToken(v string) *ListBuildBatchesInput { + s.NextToken = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListBuildBatchesInput) SetSortOrder(v string) *ListBuildBatchesInput { + s.SortOrder = &v + return s +} + +type ListBuildBatchesOutput struct { _ struct{} `type:"structure"` + + // An array of strings that contains the batch build identifiers. + Ids []*string `locationName:"ids" type:"list"` + + // If there are more items to return, this contains a token that is passed to + // a subsequent call to ListBuildBatches to retrieve the next set of items. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s DeleteWebhookOutput) String() string { +func (s ListBuildBatchesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteWebhookOutput) GoString() string { +func (s ListBuildBatchesOutput) GoString() string { return s.String() } -type DescribeTestCasesInput struct { - _ struct{} `type:"structure"` +// SetIds sets the Ids field's value. +func (s *ListBuildBatchesOutput) SetIds(v []*string) *ListBuildBatchesOutput { + s.Ids = v + return s +} - // A TestCaseFilter object used to filter the returned reports. - Filter *TestCaseFilter `locationName:"filter" type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListBuildBatchesOutput) SetNextToken(v string) *ListBuildBatchesOutput { + s.NextToken = &v + return s +} - // The maximum number of paginated test cases returned per response. Use nextToken - // to iterate pages in the list of returned TestCase objects. The default value - // is 100. - MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` +type ListBuildsForProjectInput struct { + _ struct{} `type:"structure"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. + // During a previous call, if there are more than 100 items in the list, only + // the first 100 items are returned, along with a unique string called a nextToken. + // To get the next batch of items in the list, call this operation again, adding + // the next token to the call. To get all of the items in the list, keep calling + // this operation with each subsequent next token that is returned, until no + // more next tokens are returned. NextToken *string `locationName:"nextToken" type:"string"` - // The ARN of the report for which test cases are returned. + // The name of the AWS CodeBuild project. // - // ReportArn is a required field - ReportArn *string `locationName:"reportArn" type:"string" required:"true"` + // ProjectName is a required field + ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` + + // The order to list build IDs. Valid values include: + // + // * ASCENDING: List the build IDs in ascending order by build ID. + // + // * DESCENDING: List the build IDs in descending order by build ID. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s DescribeTestCasesInput) String() string { +func (s ListBuildsForProjectInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTestCasesInput) GoString() string { +func (s ListBuildsForProjectInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTestCasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTestCasesInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *ListBuildsForProjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBuildsForProjectInput"} + if s.ProjectName == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectName")) } - if s.ReportArn == nil { - invalidParams.Add(request.NewErrParamRequired("ReportArn")) + if s.ProjectName != nil && len(*s.ProjectName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1)) } if invalidParams.Len() > 0 { @@ -4819,231 +8477,223 @@ func (s *DescribeTestCasesInput) Validate() error { return nil } -// SetFilter sets the Filter field's value. -func (s *DescribeTestCasesInput) SetFilter(v *TestCaseFilter) *DescribeTestCasesInput { - s.Filter = v +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsForProjectInput) SetNextToken(v string) *ListBuildsForProjectInput { + s.NextToken = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeTestCasesInput) SetMaxResults(v int64) *DescribeTestCasesInput { - s.MaxResults = &v +// SetProjectName sets the ProjectName field's value. +func (s *ListBuildsForProjectInput) SetProjectName(v string) *ListBuildsForProjectInput { + s.ProjectName = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *DescribeTestCasesInput) SetNextToken(v string) *DescribeTestCasesInput { - s.NextToken = &v +// SetSortOrder sets the SortOrder field's value. +func (s *ListBuildsForProjectInput) SetSortOrder(v string) *ListBuildsForProjectInput { + s.SortOrder = &v return s } -// SetReportArn sets the ReportArn field's value. -func (s *DescribeTestCasesInput) SetReportArn(v string) *DescribeTestCasesInput { - s.ReportArn = &v - return s +type ListBuildsForProjectOutput struct { + _ struct{} `type:"structure"` + + // A list of build IDs for the specified build project, with each build ID representing + // a single build. + Ids []*string `locationName:"ids" min:"1" type:"list"` + + // If there are more than 100 items in the list, only the first 100 items are + // returned, along with a unique string called a nextToken. To get the next + // batch of items in the list, call this operation again, adding the next token + // to the call. + NextToken *string `locationName:"nextToken" type:"string"` } -type DescribeTestCasesOutput struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s ListBuildsForProjectOutput) String() string { + return awsutil.Prettify(s) +} - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. +// GoString returns the string representation +func (s ListBuildsForProjectOutput) GoString() string { + return s.String() +} + +// SetIds sets the Ids field's value. +func (s *ListBuildsForProjectOutput) SetIds(v []*string) *ListBuildsForProjectOutput { + s.Ids = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsForProjectOutput) SetNextToken(v string) *ListBuildsForProjectOutput { + s.NextToken = &v + return s +} + +type ListBuildsInput struct { + _ struct{} `type:"structure"` + + // During a previous call, if there are more than 100 items in the list, only + // the first 100 items are returned, along with a unique string called a nextToken. + // To get the next batch of items in the list, call this operation again, adding + // the next token to the call. To get all of the items in the list, keep calling + // this operation with each subsequent next token that is returned, until no + // more next tokens are returned. NextToken *string `locationName:"nextToken" type:"string"` - // The returned list of test cases. - TestCases []*TestCase `locationName:"testCases" type:"list"` + // The order to list build IDs. Valid values include: + // + // * ASCENDING: List the build IDs in ascending order by build ID. + // + // * DESCENDING: List the build IDs in descending order by build ID. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s DescribeTestCasesOutput) String() string { +func (s ListBuildsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTestCasesOutput) GoString() string { +func (s ListBuildsInput) GoString() string { return s.String() } // SetNextToken sets the NextToken field's value. -func (s *DescribeTestCasesOutput) SetNextToken(v string) *DescribeTestCasesOutput { +func (s *ListBuildsInput) SetNextToken(v string) *ListBuildsInput { s.NextToken = &v return s } -// SetTestCases sets the TestCases field's value. -func (s *DescribeTestCasesOutput) SetTestCases(v []*TestCase) *DescribeTestCasesOutput { - s.TestCases = v +// SetSortOrder sets the SortOrder field's value. +func (s *ListBuildsInput) SetSortOrder(v string) *ListBuildsInput { + s.SortOrder = &v return s } -// Information about a Docker image that is managed by AWS CodeBuild. -type EnvironmentImage struct { +type ListBuildsOutput struct { _ struct{} `type:"structure"` - // The description of the Docker image. - Description *string `locationName:"description" type:"string"` - - // The name of the Docker image. - Name *string `locationName:"name" type:"string"` + // A list of build IDs, with each build ID representing a single build. + Ids []*string `locationName:"ids" min:"1" type:"list"` - // A list of environment image versions. - Versions []*string `locationName:"versions" type:"list"` + // If there are more than 100 items in the list, only the first 100 items are + // returned, along with a unique string called a nextToken. To get the next + // batch of items in the list, call this operation again, adding the next token + // to the call. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s EnvironmentImage) String() string { +func (s ListBuildsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnvironmentImage) GoString() string { +func (s ListBuildsOutput) GoString() string { return s.String() } -// SetDescription sets the Description field's value. -func (s *EnvironmentImage) SetDescription(v string) *EnvironmentImage { - s.Description = &v - return s -} - -// SetName sets the Name field's value. -func (s *EnvironmentImage) SetName(v string) *EnvironmentImage { - s.Name = &v +// SetIds sets the Ids field's value. +func (s *ListBuildsOutput) SetIds(v []*string) *ListBuildsOutput { + s.Ids = v return s } -// SetVersions sets the Versions field's value. -func (s *EnvironmentImage) SetVersions(v []*string) *EnvironmentImage { - s.Versions = v +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsOutput) SetNextToken(v string) *ListBuildsOutput { + s.NextToken = &v return s } -// A set of Docker images that are related by programming language and are managed -// by AWS CodeBuild. -type EnvironmentLanguage struct { +type ListCuratedEnvironmentImagesInput struct { _ struct{} `type:"structure"` - - // The list of Docker images that are related by the specified programming language. - Images []*EnvironmentImage `locationName:"images" type:"list"` - - // The programming language for the Docker images. - Language *string `locationName:"language" type:"string" enum:"LanguageType"` } // String returns the string representation -func (s EnvironmentLanguage) String() string { +func (s ListCuratedEnvironmentImagesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnvironmentLanguage) GoString() string { +func (s ListCuratedEnvironmentImagesInput) GoString() string { return s.String() } -// SetImages sets the Images field's value. -func (s *EnvironmentLanguage) SetImages(v []*EnvironmentImage) *EnvironmentLanguage { - s.Images = v - return s -} - -// SetLanguage sets the Language field's value. -func (s *EnvironmentLanguage) SetLanguage(v string) *EnvironmentLanguage { - s.Language = &v - return s -} - -// A set of Docker images that are related by platform and are managed by AWS -// CodeBuild. -type EnvironmentPlatform struct { +type ListCuratedEnvironmentImagesOutput struct { _ struct{} `type:"structure"` - // The list of programming languages that are available for the specified platform. - Languages []*EnvironmentLanguage `locationName:"languages" type:"list"` - - // The platform's name. - Platform *string `locationName:"platform" type:"string" enum:"PlatformType"` + // Information about supported platforms for Docker images that are managed + // by AWS CodeBuild. + Platforms []*EnvironmentPlatform `locationName:"platforms" type:"list"` } // String returns the string representation -func (s EnvironmentPlatform) String() string { +func (s ListCuratedEnvironmentImagesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnvironmentPlatform) GoString() string { +func (s ListCuratedEnvironmentImagesOutput) GoString() string { return s.String() } -// SetLanguages sets the Languages field's value. -func (s *EnvironmentPlatform) SetLanguages(v []*EnvironmentLanguage) *EnvironmentPlatform { - s.Languages = v - return s -} - -// SetPlatform sets the Platform field's value. -func (s *EnvironmentPlatform) SetPlatform(v string) *EnvironmentPlatform { - s.Platform = &v +// SetPlatforms sets the Platforms field's value. +func (s *ListCuratedEnvironmentImagesOutput) SetPlatforms(v []*EnvironmentPlatform) *ListCuratedEnvironmentImagesOutput { + s.Platforms = v return s } -// Information about an environment variable for a build project or a build. -type EnvironmentVariable struct { +type ListProjectsInput struct { _ struct{} `type:"structure"` - // The name or key of the environment variable. - // - // Name is a required field - Name *string `locationName:"name" min:"1" type:"string" required:"true"` + // During a previous call, if there are more than 100 items in the list, only + // the first 100 items are returned, along with a unique string called a nextToken. + // To get the next batch of items in the list, call this operation again, adding + // the next token to the call. To get all of the items in the list, keep calling + // this operation with each subsequent next token that is returned, until no + // more next tokens are returned. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` - // The type of environment variable. Valid values include: + // The criterion to be used to list build project names. Valid values include: // - // * PARAMETER_STORE: An environment variable stored in Amazon EC2 Systems - // Manager Parameter Store. + // * CREATED_TIME: List based on when each build project was created. // - // * PLAINTEXT: An environment variable in plain text format. This is the - // default value. + // * LAST_MODIFIED_TIME: List based on when information about each build + // project was last changed. // - // * SECRETS_MANAGER: An environment variable stored in AWS Secrets Manager. - Type *string `locationName:"type" type:"string" enum:"EnvironmentVariableType"` + // * NAME: List based on each build project's name. + // + // Use sortOrder to specify in what order to list the build project names based + // on the preceding criteria. + SortBy *string `locationName:"sortBy" type:"string" enum:"ProjectSortByType"` - // The value of the environment variable. + // The order in which to list build projects. Valid values include: // - // We strongly discourage the use of PLAINTEXT environment variables to store - // sensitive values, especially AWS secret key IDs and secret access keys. PLAINTEXT - // environment variables can be displayed in plain text using the AWS CodeBuild - // console and the AWS Command Line Interface (AWS CLI). For sensitive values, - // we recommend you use an environment variable of type PARAMETER_STORE or SECRETS_MANAGER. + // * ASCENDING: List in ascending order. // - // Value is a required field - Value *string `locationName:"value" type:"string" required:"true"` + // * DESCENDING: List in descending order. + // + // Use sortBy to specify the criterion to be used to list build project names. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s EnvironmentVariable) String() string { +func (s ListProjectsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnvironmentVariable) GoString() string { +func (s ListProjectsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EnvironmentVariable) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnvironmentVariable"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) +func (s *ListProjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListProjectsInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -5052,89 +8702,106 @@ func (s *EnvironmentVariable) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *EnvironmentVariable) SetName(v string) *EnvironmentVariable { - s.Name = &v +// SetNextToken sets the NextToken field's value. +func (s *ListProjectsInput) SetNextToken(v string) *ListProjectsInput { + s.NextToken = &v return s } -// SetType sets the Type field's value. -func (s *EnvironmentVariable) SetType(v string) *EnvironmentVariable { - s.Type = &v +// SetSortBy sets the SortBy field's value. +func (s *ListProjectsInput) SetSortBy(v string) *ListProjectsInput { + s.SortBy = &v return s } -// SetValue sets the Value field's value. -func (s *EnvironmentVariable) SetValue(v string) *EnvironmentVariable { - s.Value = &v +// SetSortOrder sets the SortOrder field's value. +func (s *ListProjectsInput) SetSortOrder(v string) *ListProjectsInput { + s.SortOrder = &v return s } -// Information about an exported environment variable. -type ExportedEnvironmentVariable struct { +type ListProjectsOutput struct { _ struct{} `type:"structure"` - // The name of this exported environment variable. - Name *string `locationName:"name" min:"1" type:"string"` + // If there are more than 100 items in the list, only the first 100 items are + // returned, along with a unique string called a nextToken. To get the next + // batch of items in the list, call this operation again, adding the next token + // to the call. + NextToken *string `locationName:"nextToken" type:"string"` - // The value assigned to this exported environment variable. - // - // During a build, the value of a variable is available starting with the install - // phase. It can be updated between the start of the install phase and the end - // of the post_build phase. After the post_build phase ends, the value of exported - // variables cannot change. - Value *string `locationName:"value" type:"string"` + // The list of build project names, with each build project name representing + // a single build project. + Projects []*string `locationName:"projects" min:"1" type:"list"` } // String returns the string representation -func (s ExportedEnvironmentVariable) String() string { +func (s ListProjectsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportedEnvironmentVariable) GoString() string { +func (s ListProjectsOutput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *ExportedEnvironmentVariable) SetName(v string) *ExportedEnvironmentVariable { - s.Name = &v +// SetNextToken sets the NextToken field's value. +func (s *ListProjectsOutput) SetNextToken(v string) *ListProjectsOutput { + s.NextToken = &v return s } -// SetValue sets the Value field's value. -func (s *ExportedEnvironmentVariable) SetValue(v string) *ExportedEnvironmentVariable { - s.Value = &v +// SetProjects sets the Projects field's value. +func (s *ListProjectsOutput) SetProjects(v []*string) *ListProjectsOutput { + s.Projects = v return s } -type GetResourcePolicyInput struct { +type ListReportGroupsInput struct { _ struct{} `type:"structure"` - // The ARN of the resource that is associated with the resource policy. + // The maximum number of paginated report groups returned per response. Use + // nextToken to iterate pages in the list of returned ReportGroup objects. The + // default value is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` + + // The criterion to be used to list build report groups. Valid values include: // - // ResourceArn is a required field - ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + // * CREATED_TIME: List based on when each report group was created. + // + // * LAST_MODIFIED_TIME: List based on when each report group was last changed. + // + // * NAME: List based on each report group's name. + SortBy *string `locationName:"sortBy" type:"string" enum:"ReportGroupSortByType"` + + // Used to specify the order to sort the list of returned report groups. Valid + // values are ASCENDING and DESCENDING. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s GetResourcePolicyInput) String() string { +func (s ListReportGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetResourcePolicyInput) GoString() string { +func (s ListReportGroupsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) +func (s *ListReportGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReportGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -5143,133 +8810,116 @@ func (s *GetResourcePolicyInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *GetResourcePolicyInput) SetResourceArn(v string) *GetResourcePolicyInput { - s.ResourceArn = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListReportGroupsInput) SetMaxResults(v int64) *ListReportGroupsInput { + s.MaxResults = &v return s } -type GetResourcePolicyOutput struct { - _ struct{} `type:"structure"` - - // The resource policy for the resource identified by the input ARN parameter. - Policy *string `locationName:"policy" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetResourcePolicyOutput) String() string { - return awsutil.Prettify(s) +// SetNextToken sets the NextToken field's value. +func (s *ListReportGroupsInput) SetNextToken(v string) *ListReportGroupsInput { + s.NextToken = &v + return s } -// GoString returns the string representation -func (s GetResourcePolicyOutput) GoString() string { - return s.String() +// SetSortBy sets the SortBy field's value. +func (s *ListReportGroupsInput) SetSortBy(v string) *ListReportGroupsInput { + s.SortBy = &v + return s } -// SetPolicy sets the Policy field's value. -func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput { - s.Policy = &v +// SetSortOrder sets the SortOrder field's value. +func (s *ListReportGroupsInput) SetSortOrder(v string) *ListReportGroupsInput { + s.SortOrder = &v return s } -// Information about the Git submodules configuration for an AWS CodeBuild build -// project. -type GitSubmodulesConfig struct { +type ListReportGroupsOutput struct { _ struct{} `type:"structure"` - // Set to true to fetch Git submodules for your AWS CodeBuild build project. - // - // FetchSubmodules is a required field - FetchSubmodules *bool `locationName:"fetchSubmodules" type:"boolean" required:"true"` + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of ARNs for the report groups in the current AWS account. + ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` } // String returns the string representation -func (s GitSubmodulesConfig) String() string { +func (s ListReportGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GitSubmodulesConfig) GoString() string { +func (s ListReportGroupsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GitSubmodulesConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GitSubmodulesConfig"} - if s.FetchSubmodules == nil { - invalidParams.Add(request.NewErrParamRequired("FetchSubmodules")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListReportGroupsOutput) SetNextToken(v string) *ListReportGroupsOutput { + s.NextToken = &v + return s } -// SetFetchSubmodules sets the FetchSubmodules field's value. -func (s *GitSubmodulesConfig) SetFetchSubmodules(v bool) *GitSubmodulesConfig { - s.FetchSubmodules = &v +// SetReportGroups sets the ReportGroups field's value. +func (s *ListReportGroupsOutput) SetReportGroups(v []*string) *ListReportGroupsOutput { + s.ReportGroups = v return s } -type ImportSourceCredentialsInput struct { +type ListReportsForReportGroupInput struct { _ struct{} `type:"structure"` - // The type of authentication used to connect to a GitHub, GitHub Enterprise, - // or Bitbucket repository. An OAUTH connection is not supported by the API - // and must be created using the AWS CodeBuild console. - // - // AuthType is a required field - AuthType *string `locationName:"authType" type:"string" required:"true" enum:"AuthType"` + // A ReportFilter object used to filter the returned reports. + Filter *ReportFilter `locationName:"filter" type:"structure"` - // The source provider used for this project. - // - // ServerType is a required field - ServerType *string `locationName:"serverType" type:"string" required:"true" enum:"ServerType"` + // The maximum number of paginated reports in this report group returned per + // response. Use nextToken to iterate pages in the list of returned Report objects. + // The default value is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` - // Set to false to prevent overwriting the repository source credentials. Set - // to true to overwrite the repository source credentials. The default value - // is true. - ShouldOverwrite *bool `locationName:"shouldOverwrite" type:"boolean"` + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` - // For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, - // this is the app password. + // The ARN of the report group for which you want to return report ARNs. // - // Token is a required field - Token *string `locationName:"token" min:"1" type:"string" required:"true" sensitive:"true"` + // ReportGroupArn is a required field + ReportGroupArn *string `locationName:"reportGroupArn" type:"string" required:"true"` - // The Bitbucket username when the authType is BASIC_AUTH. This parameter is - // not valid for other types of source providers or connections. - Username *string `locationName:"username" min:"1" type:"string"` + // Use to specify whether the results are returned in ascending or descending + // order. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s ImportSourceCredentialsInput) String() string { +func (s ListReportsForReportGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ImportSourceCredentialsInput) GoString() string { +func (s ListReportsForReportGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ImportSourceCredentialsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ImportSourceCredentialsInput"} - if s.AuthType == nil { - invalidParams.Add(request.NewErrParamRequired("AuthType")) - } - if s.ServerType == nil { - invalidParams.Add(request.NewErrParamRequired("ServerType")) - } - if s.Token == nil { - invalidParams.Add(request.NewErrParamRequired("Token")) - } - if s.Token != nil && len(*s.Token) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Token", 1)) +func (s *ListReportsForReportGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReportsForReportGroupInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Username != nil && len(*s.Username) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + if s.ReportGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReportGroupArn")) } if invalidParams.Len() > 0 { @@ -5278,142 +8928,119 @@ func (s *ImportSourceCredentialsInput) Validate() error { return nil } -// SetAuthType sets the AuthType field's value. -func (s *ImportSourceCredentialsInput) SetAuthType(v string) *ImportSourceCredentialsInput { - s.AuthType = &v +// SetFilter sets the Filter field's value. +func (s *ListReportsForReportGroupInput) SetFilter(v *ReportFilter) *ListReportsForReportGroupInput { + s.Filter = v return s } -// SetServerType sets the ServerType field's value. -func (s *ImportSourceCredentialsInput) SetServerType(v string) *ImportSourceCredentialsInput { - s.ServerType = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListReportsForReportGroupInput) SetMaxResults(v int64) *ListReportsForReportGroupInput { + s.MaxResults = &v return s } -// SetShouldOverwrite sets the ShouldOverwrite field's value. -func (s *ImportSourceCredentialsInput) SetShouldOverwrite(v bool) *ImportSourceCredentialsInput { - s.ShouldOverwrite = &v +// SetNextToken sets the NextToken field's value. +func (s *ListReportsForReportGroupInput) SetNextToken(v string) *ListReportsForReportGroupInput { + s.NextToken = &v return s } -// SetToken sets the Token field's value. -func (s *ImportSourceCredentialsInput) SetToken(v string) *ImportSourceCredentialsInput { - s.Token = &v +// SetReportGroupArn sets the ReportGroupArn field's value. +func (s *ListReportsForReportGroupInput) SetReportGroupArn(v string) *ListReportsForReportGroupInput { + s.ReportGroupArn = &v return s } -// SetUsername sets the Username field's value. -func (s *ImportSourceCredentialsInput) SetUsername(v string) *ImportSourceCredentialsInput { - s.Username = &v +// SetSortOrder sets the SortOrder field's value. +func (s *ListReportsForReportGroupInput) SetSortOrder(v string) *ListReportsForReportGroupInput { + s.SortOrder = &v return s } -type ImportSourceCredentialsOutput struct { +type ListReportsForReportGroupOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the token. - Arn *string `locationName:"arn" min:"1" type:"string"` -} - -// String returns the string representation -func (s ImportSourceCredentialsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ImportSourceCredentialsOutput) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *ImportSourceCredentialsOutput) SetArn(v string) *ImportSourceCredentialsOutput { - s.Arn = &v - return s -} - -// The input value that was provided is not valid. -type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation -func (s InvalidInputException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InvalidInputException) GoString() string { - return s.String() -} - -func newErrorInvalidInputException(v protocol.ResponseMetadata) error { - return &InvalidInputException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s InvalidInputException) Code() string { - return "InvalidInputException" -} + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` -// Message returns the exception's message. -func (s InvalidInputException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" + // The list of report ARNs. + Reports []*string `locationName:"reports" min:"1" type:"list"` } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { - return nil +// String returns the string representation +func (s ListReportsForReportGroupOutput) String() string { + return awsutil.Prettify(s) } -func (s InvalidInputException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// GoString returns the string representation +func (s ListReportsForReportGroupOutput) GoString() string { + return s.String() } -// Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *ListReportsForReportGroupOutput) SetNextToken(v string) *ListReportsForReportGroupOutput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +// SetReports sets the Reports field's value. +func (s *ListReportsForReportGroupOutput) SetReports(v []*string) *ListReportsForReportGroupOutput { + s.Reports = v + return s } -type InvalidateProjectCacheInput struct { +type ListReportsInput struct { _ struct{} `type:"structure"` - // The name of the AWS CodeBuild build project that the cache is reset for. + // A ReportFilter object used to filter the returned reports. + Filter *ReportFilter `locationName:"filter" type:"structure"` + + // The maximum number of paginated reports returned per response. Use nextToken + // to iterate pages in the list of returned Report objects. The default value + // is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` + + // Specifies the sort order for the list of returned reports. Valid values are: // - // ProjectName is a required field - ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` + // * ASCENDING: return reports in chronological order based on their creation + // date. + // + // * DESCENDING: return reports in the reverse chronological order based + // on their creation date. + SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s InvalidateProjectCacheInput) String() string { +func (s ListReportsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidateProjectCacheInput) GoString() string { +func (s ListReportsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InvalidateProjectCacheInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InvalidateProjectCacheInput"} - if s.ProjectName == nil { - invalidParams.Add(request.NewErrParamRequired("ProjectName")) - } - if s.ProjectName != nil && len(*s.ProjectName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1)) +func (s *ListReportsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReportsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -5422,68 +9049,120 @@ func (s *InvalidateProjectCacheInput) Validate() error { return nil } -// SetProjectName sets the ProjectName field's value. -func (s *InvalidateProjectCacheInput) SetProjectName(v string) *InvalidateProjectCacheInput { - s.ProjectName = &v +// SetFilter sets the Filter field's value. +func (s *ListReportsInput) SetFilter(v *ReportFilter) *ListReportsInput { + s.Filter = v return s } -type InvalidateProjectCacheOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListReportsInput) SetMaxResults(v int64) *ListReportsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListReportsInput) SetNextToken(v string) *ListReportsInput { + s.NextToken = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListReportsInput) SetSortOrder(v string) *ListReportsInput { + s.SortOrder = &v + return s +} + +type ListReportsOutput struct { _ struct{} `type:"structure"` + + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of returned ARNs for the reports in the current AWS account. + Reports []*string `locationName:"reports" min:"1" type:"list"` } // String returns the string representation -func (s InvalidateProjectCacheOutput) String() string { +func (s ListReportsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidateProjectCacheOutput) GoString() string { +func (s ListReportsOutput) GoString() string { return s.String() } -type ListBuildsForProjectInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListReportsOutput) SetNextToken(v string) *ListReportsOutput { + s.NextToken = &v + return s +} + +// SetReports sets the Reports field's value. +func (s *ListReportsOutput) SetReports(v []*string) *ListReportsOutput { + s.Reports = v + return s +} + +type ListSharedProjectsInput struct { _ struct{} `type:"structure"` - // During a previous call, if there are more than 100 items in the list, only - // the first 100 items are returned, along with a unique string called a nextToken. - // To get the next batch of items in the list, call this operation again, adding - // the next token to the call. To get all of the items in the list, keep calling - // this operation with each subsequent next token that is returned, until no - // more next tokens are returned. - NextToken *string `locationName:"nextToken" type:"string"` + // The maximum number of paginated shared build projects returned per response. + // Use nextToken to iterate pages in the list of returned Project objects. The + // default value is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` - // The name of the AWS CodeBuild project. + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // The criterion to be used to list build projects shared with the current AWS + // account or user. Valid values include: // - // ProjectName is a required field - ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` + // * ARN: List based on the ARN. + // + // * MODIFIED_TIME: List based on when information about the shared project + // was last changed. + SortBy *string `locationName:"sortBy" type:"string" enum:"SharedResourceSortByType"` - // The order to list build IDs. Valid values include: + // The order in which to list shared build projects. Valid values include: // - // * ASCENDING: List the build IDs in ascending order by build ID. + // * ASCENDING: List in ascending order. // - // * DESCENDING: List the build IDs in descending order by build ID. + // * DESCENDING: List in descending order. SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s ListBuildsForProjectInput) String() string { +func (s ListSharedProjectsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBuildsForProjectInput) GoString() string { +func (s ListSharedProjectsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListBuildsForProjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBuildsForProjectInput"} - if s.ProjectName == nil { - invalidParams.Add(request.NewErrParamRequired("ProjectName")) +func (s *ListSharedProjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSharedProjectsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.ProjectName != nil && len(*s.ProjectName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1)) + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } if invalidParams.Len() > 0 { @@ -5492,223 +9171,264 @@ func (s *ListBuildsForProjectInput) Validate() error { return nil } +// SetMaxResults sets the MaxResults field's value. +func (s *ListSharedProjectsInput) SetMaxResults(v int64) *ListSharedProjectsInput { + s.MaxResults = &v + return s +} + // SetNextToken sets the NextToken field's value. -func (s *ListBuildsForProjectInput) SetNextToken(v string) *ListBuildsForProjectInput { +func (s *ListSharedProjectsInput) SetNextToken(v string) *ListSharedProjectsInput { s.NextToken = &v return s } -// SetProjectName sets the ProjectName field's value. -func (s *ListBuildsForProjectInput) SetProjectName(v string) *ListBuildsForProjectInput { - s.ProjectName = &v +// SetSortBy sets the SortBy field's value. +func (s *ListSharedProjectsInput) SetSortBy(v string) *ListSharedProjectsInput { + s.SortBy = &v return s } // SetSortOrder sets the SortOrder field's value. -func (s *ListBuildsForProjectInput) SetSortOrder(v string) *ListBuildsForProjectInput { +func (s *ListSharedProjectsInput) SetSortOrder(v string) *ListSharedProjectsInput { s.SortOrder = &v return s } -type ListBuildsForProjectOutput struct { +type ListSharedProjectsOutput struct { _ struct{} `type:"structure"` - // A list of build IDs for the specified build project, with each build ID representing - // a single build. - Ids []*string `locationName:"ids" min:"1" type:"list"` - - // If there are more than 100 items in the list, only the first 100 items are - // returned, along with a unique string called a nextToken. To get the next - // batch of items in the list, call this operation again, adding the next token - // to the call. + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. NextToken *string `locationName:"nextToken" type:"string"` + + // The list of ARNs for the build projects shared with the current AWS account + // or user. + Projects []*string `locationName:"projects" min:"1" type:"list"` } // String returns the string representation -func (s ListBuildsForProjectOutput) String() string { +func (s ListSharedProjectsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBuildsForProjectOutput) GoString() string { +func (s ListSharedProjectsOutput) GoString() string { return s.String() } -// SetIds sets the Ids field's value. -func (s *ListBuildsForProjectOutput) SetIds(v []*string) *ListBuildsForProjectOutput { - s.Ids = v +// SetNextToken sets the NextToken field's value. +func (s *ListSharedProjectsOutput) SetNextToken(v string) *ListSharedProjectsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListBuildsForProjectOutput) SetNextToken(v string) *ListBuildsForProjectOutput { - s.NextToken = &v +// SetProjects sets the Projects field's value. +func (s *ListSharedProjectsOutput) SetProjects(v []*string) *ListSharedProjectsOutput { + s.Projects = v return s } -type ListBuildsInput struct { +type ListSharedReportGroupsInput struct { _ struct{} `type:"structure"` - // During a previous call, if there are more than 100 items in the list, only - // the first 100 items are returned, along with a unique string called a nextToken. - // To get the next batch of items in the list, call this operation again, adding - // the next token to the call. To get all of the items in the list, keep calling - // this operation with each subsequent next token that is returned, until no - // more next tokens are returned. + // The maximum number of paginated shared report groups per response. Use nextToken + // to iterate pages in the list of returned ReportGroup objects. The default + // value is 100. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. NextToken *string `locationName:"nextToken" type:"string"` - // The order to list build IDs. Valid values include: + // The criterion to be used to list report groups shared with the current AWS + // account or user. Valid values include: + // + // * ARN: List based on the ARN. + // + // * MODIFIED_TIME: List based on when information about the shared report + // group was last changed. + SortBy *string `locationName:"sortBy" type:"string" enum:"SharedResourceSortByType"` + + // The order in which to list shared report groups. Valid values include: // - // * ASCENDING: List the build IDs in ascending order by build ID. + // * ASCENDING: List in ascending order. // - // * DESCENDING: List the build IDs in descending order by build ID. + // * DESCENDING: List in descending order. SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` } // String returns the string representation -func (s ListBuildsInput) String() string { +func (s ListSharedReportGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBuildsInput) GoString() string { +func (s ListSharedReportGroupsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSharedReportGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSharedReportGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSharedReportGroupsInput) SetMaxResults(v int64) *ListSharedReportGroupsInput { + s.MaxResults = &v + return s +} + // SetNextToken sets the NextToken field's value. -func (s *ListBuildsInput) SetNextToken(v string) *ListBuildsInput { +func (s *ListSharedReportGroupsInput) SetNextToken(v string) *ListSharedReportGroupsInput { s.NextToken = &v return s } +// SetSortBy sets the SortBy field's value. +func (s *ListSharedReportGroupsInput) SetSortBy(v string) *ListSharedReportGroupsInput { + s.SortBy = &v + return s +} + // SetSortOrder sets the SortOrder field's value. -func (s *ListBuildsInput) SetSortOrder(v string) *ListBuildsInput { +func (s *ListSharedReportGroupsInput) SetSortOrder(v string) *ListSharedReportGroupsInput { s.SortOrder = &v return s } -type ListBuildsOutput struct { +type ListSharedReportGroupsOutput struct { _ struct{} `type:"structure"` - // A list of build IDs, with each build ID representing a single build. - Ids []*string `locationName:"ids" min:"1" type:"list"` - - // If there are more than 100 items in the list, only the first 100 items are - // returned, along with a unique string called a nextToken. To get the next - // batch of items in the list, call this operation again, adding the next token - // to the call. + // During a previous call, the maximum number of items that can be returned + // is the value specified in maxResults. If there more items in the list, then + // a unique string called a nextToken is returned. To get the next batch of + // items in the list, call this operation again, adding the next token to the + // call. To get all of the items in the list, keep calling this operation with + // each subsequent next token that is returned, until no more next tokens are + // returned. NextToken *string `locationName:"nextToken" type:"string"` + + // The list of ARNs for the report groups shared with the current AWS account + // or user. + ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` } // String returns the string representation -func (s ListBuildsOutput) String() string { +func (s ListSharedReportGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBuildsOutput) GoString() string { +func (s ListSharedReportGroupsOutput) GoString() string { return s.String() } -// SetIds sets the Ids field's value. -func (s *ListBuildsOutput) SetIds(v []*string) *ListBuildsOutput { - s.Ids = v +// SetNextToken sets the NextToken field's value. +func (s *ListSharedReportGroupsOutput) SetNextToken(v string) *ListSharedReportGroupsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListBuildsOutput) SetNextToken(v string) *ListBuildsOutput { - s.NextToken = &v +// SetReportGroups sets the ReportGroups field's value. +func (s *ListSharedReportGroupsOutput) SetReportGroups(v []*string) *ListSharedReportGroupsOutput { + s.ReportGroups = v return s } -type ListCuratedEnvironmentImagesInput struct { +type ListSourceCredentialsInput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s ListCuratedEnvironmentImagesInput) String() string { +func (s ListSourceCredentialsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCuratedEnvironmentImagesInput) GoString() string { +func (s ListSourceCredentialsInput) GoString() string { return s.String() } -type ListCuratedEnvironmentImagesOutput struct { +type ListSourceCredentialsOutput struct { _ struct{} `type:"structure"` - // Information about supported platforms for Docker images that are managed - // by AWS CodeBuild. - Platforms []*EnvironmentPlatform `locationName:"platforms" type:"list"` + // A list of SourceCredentialsInfo objects. Each SourceCredentialsInfo object + // includes the authentication type, token ARN, and type of source provider + // for one set of credentials. + SourceCredentialsInfos []*SourceCredentialsInfo `locationName:"sourceCredentialsInfos" type:"list"` } // String returns the string representation -func (s ListCuratedEnvironmentImagesOutput) String() string { +func (s ListSourceCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCuratedEnvironmentImagesOutput) GoString() string { +func (s ListSourceCredentialsOutput) GoString() string { return s.String() } -// SetPlatforms sets the Platforms field's value. -func (s *ListCuratedEnvironmentImagesOutput) SetPlatforms(v []*EnvironmentPlatform) *ListCuratedEnvironmentImagesOutput { - s.Platforms = v +// SetSourceCredentialsInfos sets the SourceCredentialsInfos field's value. +func (s *ListSourceCredentialsOutput) SetSourceCredentialsInfos(v []*SourceCredentialsInfo) *ListSourceCredentialsOutput { + s.SourceCredentialsInfos = v return s } -type ListProjectsInput struct { +// Information about logs for a build project. These can be logs in Amazon CloudWatch +// Logs, built in a specified S3 bucket, or both. +type LogsConfig struct { _ struct{} `type:"structure"` - // During a previous call, if there are more than 100 items in the list, only - // the first 100 items are returned, along with a unique string called a nextToken. - // To get the next batch of items in the list, call this operation again, adding - // the next token to the call. To get all of the items in the list, keep calling - // this operation with each subsequent next token that is returned, until no - // more next tokens are returned. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // The criterion to be used to list build project names. Valid values include: - // - // * CREATED_TIME: List based on when each build project was created. - // - // * LAST_MODIFIED_TIME: List based on when information about each build - // project was last changed. - // - // * NAME: List based on each build project's name. - // - // Use sortOrder to specify in what order to list the build project names based - // on the preceding criteria. - SortBy *string `locationName:"sortBy" type:"string" enum:"ProjectSortByType"` + // Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch + // Logs are enabled by default. + CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` - // The order in which to list build projects. Valid values include: - // - // * ASCENDING: List in ascending order. - // - // * DESCENDING: List in descending order. - // - // Use sortBy to specify the criterion to be used to list build project names. - SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` + // Information about logs built to an S3 bucket for a build project. S3 logs + // are not enabled by default. + S3Logs *S3LogsConfig `locationName:"s3Logs" type:"structure"` } // String returns the string representation -func (s ListProjectsInput) String() string { +func (s LogsConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListProjectsInput) GoString() string { +func (s LogsConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListProjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListProjectsInput"} - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) +func (s *LogsConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogsConfig"} + if s.CloudWatchLogs != nil { + if err := s.CloudWatchLogs.Validate(); err != nil { + invalidParams.AddNested("CloudWatchLogs", err.(request.ErrInvalidParams)) + } + } + if s.S3Logs != nil { + if err := s.S3Logs.Validate(); err != nil { + invalidParams.AddNested("S3Logs", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -5717,587 +9437,645 @@ func (s *ListProjectsInput) Validate() error { return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListProjectsInput) SetNextToken(v string) *ListProjectsInput { - s.NextToken = &v - return s -} - -// SetSortBy sets the SortBy field's value. -func (s *ListProjectsInput) SetSortBy(v string) *ListProjectsInput { - s.SortBy = &v +// SetCloudWatchLogs sets the CloudWatchLogs field's value. +func (s *LogsConfig) SetCloudWatchLogs(v *CloudWatchLogsConfig) *LogsConfig { + s.CloudWatchLogs = v return s } -// SetSortOrder sets the SortOrder field's value. -func (s *ListProjectsInput) SetSortOrder(v string) *ListProjectsInput { - s.SortOrder = &v +// SetS3Logs sets the S3Logs field's value. +func (s *LogsConfig) SetS3Logs(v *S3LogsConfig) *LogsConfig { + s.S3Logs = v return s } -type ListProjectsOutput struct { +// Information about build logs in Amazon CloudWatch Logs. +type LogsLocation struct { _ struct{} `type:"structure"` - // If there are more than 100 items in the list, only the first 100 items are - // returned, along with a unique string called a nextToken. To get the next - // batch of items in the list, call this operation again, adding the next token - // to the call. - NextToken *string `locationName:"nextToken" type:"string"` - - // The list of build project names, with each build project name representing - // a single build project. - Projects []*string `locationName:"projects" min:"1" type:"list"` -} - -// String returns the string representation -func (s ListProjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListProjectsOutput) GoString() string { - return s.String() -} + // Information about Amazon CloudWatch Logs for a build project. + CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` -// SetNextToken sets the NextToken field's value. -func (s *ListProjectsOutput) SetNextToken(v string) *ListProjectsOutput { - s.NextToken = &v - return s -} + // The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. + // For more information, see Resources Defined by Amazon CloudWatch Logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html#amazoncloudwatchlogs-resources-for-iam-policies). + CloudWatchLogsArn *string `locationName:"cloudWatchLogsArn" type:"string"` -// SetProjects sets the Projects field's value. -func (s *ListProjectsOutput) SetProjects(v []*string) *ListProjectsOutput { - s.Projects = v - return s -} + // The URL to an individual build log in Amazon CloudWatch Logs. + DeepLink *string `locationName:"deepLink" type:"string"` -type ListReportGroupsInput struct { - _ struct{} `type:"structure"` + // The name of the Amazon CloudWatch Logs group for the build logs. + GroupName *string `locationName:"groupName" type:"string"` - // The maximum number of paginated report groups returned per response. Use - // nextToken to iterate pages in the list of returned ReportGroup objects. The - // default value is 100. - MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + // The URL to a build log in an S3 bucket. + S3DeepLink *string `locationName:"s3DeepLink" type:"string"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` + // Information about S3 logs for a build project. + S3Logs *S3LogsConfig `locationName:"s3Logs" type:"structure"` - // The criterion to be used to list build report groups. Valid values include: - // - // * CREATED_TIME: List based on when each report group was created. - // - // * LAST_MODIFIED_TIME: List based on when each report group was last changed. - // - // * NAME: List based on each report group's name. - SortBy *string `locationName:"sortBy" type:"string" enum:"ReportGroupSortByType"` + // The ARN of S3 logs for a build project. Its format is arn:${Partition}:s3:::${BucketName}/${ObjectName}. + // For more information, see Resources Defined by Amazon S3 (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html#amazons3-resources-for-iam-policies). + S3LogsArn *string `locationName:"s3LogsArn" type:"string"` - // Used to specify the order to sort the list of returned report groups. Valid - // values are ASCENDING and DESCENDING. - SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` + // The name of the Amazon CloudWatch Logs stream for the build logs. + StreamName *string `locationName:"streamName" type:"string"` } // String returns the string representation -func (s ListReportGroupsInput) String() string { +func (s LogsLocation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListReportGroupsInput) GoString() string { +func (s LogsLocation) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListReportGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListReportGroupsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } +// SetCloudWatchLogs sets the CloudWatchLogs field's value. +func (s *LogsLocation) SetCloudWatchLogs(v *CloudWatchLogsConfig) *LogsLocation { + s.CloudWatchLogs = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCloudWatchLogsArn sets the CloudWatchLogsArn field's value. +func (s *LogsLocation) SetCloudWatchLogsArn(v string) *LogsLocation { + s.CloudWatchLogsArn = &v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListReportGroupsInput) SetMaxResults(v int64) *ListReportGroupsInput { - s.MaxResults = &v +// SetDeepLink sets the DeepLink field's value. +func (s *LogsLocation) SetDeepLink(v string) *LogsLocation { + s.DeepLink = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListReportGroupsInput) SetNextToken(v string) *ListReportGroupsInput { - s.NextToken = &v +// SetGroupName sets the GroupName field's value. +func (s *LogsLocation) SetGroupName(v string) *LogsLocation { + s.GroupName = &v return s } -// SetSortBy sets the SortBy field's value. -func (s *ListReportGroupsInput) SetSortBy(v string) *ListReportGroupsInput { - s.SortBy = &v +// SetS3DeepLink sets the S3DeepLink field's value. +func (s *LogsLocation) SetS3DeepLink(v string) *LogsLocation { + s.S3DeepLink = &v return s } -// SetSortOrder sets the SortOrder field's value. -func (s *ListReportGroupsInput) SetSortOrder(v string) *ListReportGroupsInput { - s.SortOrder = &v +// SetS3Logs sets the S3Logs field's value. +func (s *LogsLocation) SetS3Logs(v *S3LogsConfig) *LogsLocation { + s.S3Logs = v return s } -type ListReportGroupsOutput struct { +// SetS3LogsArn sets the S3LogsArn field's value. +func (s *LogsLocation) SetS3LogsArn(v string) *LogsLocation { + s.S3LogsArn = &v + return s +} + +// SetStreamName sets the StreamName field's value. +func (s *LogsLocation) SetStreamName(v string) *LogsLocation { + s.StreamName = &v + return s +} + +// Describes a network interface. +type NetworkInterface struct { _ struct{} `type:"structure"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` + // The ID of the network interface. + NetworkInterfaceId *string `locationName:"networkInterfaceId" min:"1" type:"string"` - // The list of ARNs for the report groups in the current AWS account. - ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` + // The ID of the subnet. + SubnetId *string `locationName:"subnetId" min:"1" type:"string"` } // String returns the string representation -func (s ListReportGroupsOutput) String() string { +func (s NetworkInterface) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListReportGroupsOutput) GoString() string { +func (s NetworkInterface) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListReportGroupsOutput) SetNextToken(v string) *ListReportGroupsOutput { - s.NextToken = &v +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *NetworkInterface) SetNetworkInterfaceId(v string) *NetworkInterface { + s.NetworkInterfaceId = &v return s } -// SetReportGroups sets the ReportGroups field's value. -func (s *ListReportGroupsOutput) SetReportGroups(v []*string) *ListReportGroupsOutput { - s.ReportGroups = v +// SetSubnetId sets the SubnetId field's value. +func (s *NetworkInterface) SetSubnetId(v string) *NetworkInterface { + s.SubnetId = &v return s } -type ListReportsForReportGroupInput struct { - _ struct{} `type:"structure"` - - // A ReportFilter object used to filter the returned reports. - Filter *ReportFilter `locationName:"filter" type:"structure"` - - // The maximum number of paginated reports in this report group returned per - // response. Use nextToken to iterate pages in the list of returned Report objects. - // The default value is 100. - MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` - - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` - - // The ARN of the report group for which you want to return report ARNs. - // - // ReportGroupArn is a required field - ReportGroupArn *string `locationName:"reportGroupArn" type:"string" required:"true"` +// There was a problem with the underlying OAuth provider. +type OAuthProviderException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // Use to specify whether the results are returned in ascending or descending - // order. - SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s ListReportsForReportGroupInput) String() string { +func (s OAuthProviderException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListReportsForReportGroupInput) GoString() string { +func (s OAuthProviderException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListReportsForReportGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListReportsForReportGroupInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.ReportGroupArn == nil { - invalidParams.Add(request.NewErrParamRequired("ReportGroupArn")) +func newErrorOAuthProviderException(v protocol.ResponseMetadata) error { + return &OAuthProviderException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// Code returns the exception type name. +func (s *OAuthProviderException) Code() string { + return "OAuthProviderException" } -// SetFilter sets the Filter field's value. -func (s *ListReportsForReportGroupInput) SetFilter(v *ReportFilter) *ListReportsForReportGroupInput { - s.Filter = v - return s +// Message returns the exception's message. +func (s *OAuthProviderException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetMaxResults sets the MaxResults field's value. -func (s *ListReportsForReportGroupInput) SetMaxResults(v int64) *ListReportsForReportGroupInput { - s.MaxResults = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *OAuthProviderException) OrigErr() error { + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListReportsForReportGroupInput) SetNextToken(v string) *ListReportsForReportGroupInput { - s.NextToken = &v - return s +func (s *OAuthProviderException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetReportGroupArn sets the ReportGroupArn field's value. -func (s *ListReportsForReportGroupInput) SetReportGroupArn(v string) *ListReportsForReportGroupInput { - s.ReportGroupArn = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *OAuthProviderException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetSortOrder sets the SortOrder field's value. -func (s *ListReportsForReportGroupInput) SetSortOrder(v string) *ListReportsForReportGroupInput { - s.SortOrder = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *OAuthProviderException) RequestID() string { + return s.RespMetadata.RequestID } -type ListReportsForReportGroupOutput struct { +// Additional information about a build phase that has an error. You can use +// this information for troubleshooting. +type PhaseContext struct { _ struct{} `type:"structure"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` + // An explanation of the build phase's context. This might include a command + // ID and an exit code. + Message *string `locationName:"message" type:"string"` - // The list of returned report group ARNs. - Reports []*string `locationName:"reports" min:"1" type:"list"` + // The status code for the context of the build phase. + StatusCode *string `locationName:"statusCode" type:"string"` } // String returns the string representation -func (s ListReportsForReportGroupOutput) String() string { +func (s PhaseContext) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s ListReportsForReportGroupOutput) GoString() string { - return s.String() -} +// GoString returns the string representation +func (s PhaseContext) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *PhaseContext) SetMessage(v string) *PhaseContext { + s.Message = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *PhaseContext) SetStatusCode(v string) *PhaseContext { + s.StatusCode = &v + return s +} + +// Information about a build project. +type Project struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the build project. + Arn *string `locationName:"arn" type:"string"` + + // Information about the build output artifacts for the build project. + Artifacts *ProjectArtifacts `locationName:"artifacts" type:"structure"` + + // Information about the build badge for the build project. + Badge *ProjectBadge `locationName:"badge" type:"structure"` + + // A ProjectBuildBatchConfig object that defines the batch build options for + // the project. + BuildBatchConfig *ProjectBuildBatchConfig `locationName:"buildBatchConfig" type:"structure"` + + // Information about the cache for the build project. + Cache *ProjectCache `locationName:"cache" type:"structure"` + + // When the build project was created, expressed in Unix time format. + Created *time.Time `locationName:"created" type:"timestamp"` + + // A description that makes the build project easy to identify. + Description *string `locationName:"description" type:"string"` + + // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be + // used for encrypting the build output artifacts. + // + // You can use a cross-account KMS key to encrypt the build output artifacts + // if your service role has permission to that key. + // + // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, + // the CMK's alias (using the format alias/). + EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` + + // Information about the build environment for this build project. + Environment *ProjectEnvironment `locationName:"environment" type:"structure"` + + // An array of ProjectFileSystemLocation objects for a CodeBuild build project. + // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, + // mountPoint, and type of a file system created using Amazon Elastic File System. + FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` + + // When the build project's settings were last modified, expressed in Unix time + // format. + LastModified *time.Time `locationName:"lastModified" type:"timestamp"` + + // Information about logs for the build project. A project can create logs in + // Amazon CloudWatch Logs, an S3 bucket, or both. + LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` + + // The name of the build project. + Name *string `locationName:"name" min:"2" type:"string"` -// SetNextToken sets the NextToken field's value. -func (s *ListReportsForReportGroupOutput) SetNextToken(v string) *ListReportsForReportGroupOutput { - s.NextToken = &v - return s -} + // The number of minutes a build is allowed to be queued before it times out. + QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" min:"5" type:"integer"` -// SetReports sets the Reports field's value. -func (s *ListReportsForReportGroupOutput) SetReports(v []*string) *ListReportsForReportGroupOutput { - s.Reports = v - return s -} + // An array of ProjectArtifacts objects. + SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` -type ListReportsInput struct { - _ struct{} `type:"structure"` + // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified + // at the build level, then they take over these secondarySourceVersions (at + // the project level). + SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` - // A ReportFilter object used to filter the returned reports. - Filter *ReportFilter `locationName:"filter" type:"structure"` + // An array of ProjectSource objects. + SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` - // The maximum number of paginated reports returned per response. Use nextToken - // to iterate pages in the list of returned Report objects. The default value - // is 100. - MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + // The ARN of the AWS Identity and Access Management (IAM) role that enables + // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS + // account. + ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` + // Information about the build input source code for this build project. + Source *ProjectSource `locationName:"source" type:"structure"` - // Specifies the sort order for the list of returned reports. Valid values are: + // A version of the build input to be built for this project. If not specified, + // the latest version is used. If specified, it must be one of: // - // * ASCENDING: return reports in chronological order based on their creation - // date. + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. // - // * DESCENDING: return reports in the reverse chronological order based - // on their creation date. - SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + // + // If sourceVersion is specified at the build level, then that version takes + // precedence over this sourceVersion (at the project level). + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" type:"string"` + + // A list of tag key and value pairs associated with this build project. + // + // These tags are available for use by AWS services that support AWS CodeBuild + // build project tags. + Tags []*Tag `locationName:"tags" type:"list"` + + // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait + // before timing out any related build that did not get marked as completed. + // The default is 60 minutes. + TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` + + // Information about the VPC configuration that AWS CodeBuild accesses. + VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` + + // Information about a webhook that connects repository events to a build project + // in AWS CodeBuild. + Webhook *Webhook `locationName:"webhook" type:"structure"` } // String returns the string representation -func (s ListReportsInput) String() string { +func (s Project) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListReportsInput) GoString() string { +func (s Project) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListReportsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListReportsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *Project) SetArn(v string) *Project { + s.Arn = &v + return s } -// SetFilter sets the Filter field's value. -func (s *ListReportsInput) SetFilter(v *ReportFilter) *ListReportsInput { - s.Filter = v +// SetArtifacts sets the Artifacts field's value. +func (s *Project) SetArtifacts(v *ProjectArtifacts) *Project { + s.Artifacts = v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListReportsInput) SetMaxResults(v int64) *ListReportsInput { - s.MaxResults = &v +// SetBadge sets the Badge field's value. +func (s *Project) SetBadge(v *ProjectBadge) *Project { + s.Badge = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListReportsInput) SetNextToken(v string) *ListReportsInput { - s.NextToken = &v +// SetBuildBatchConfig sets the BuildBatchConfig field's value. +func (s *Project) SetBuildBatchConfig(v *ProjectBuildBatchConfig) *Project { + s.BuildBatchConfig = v return s } -// SetSortOrder sets the SortOrder field's value. -func (s *ListReportsInput) SetSortOrder(v string) *ListReportsInput { - s.SortOrder = &v +// SetCache sets the Cache field's value. +func (s *Project) SetCache(v *ProjectCache) *Project { + s.Cache = v return s } -type ListReportsOutput struct { - _ struct{} `type:"structure"` - - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` - - // The list of returned ARNs for the reports in the current AWS account. - Reports []*string `locationName:"reports" min:"1" type:"list"` +// SetCreated sets the Created field's value. +func (s *Project) SetCreated(v time.Time) *Project { + s.Created = &v + return s } -// String returns the string representation -func (s ListReportsOutput) String() string { - return awsutil.Prettify(s) +// SetDescription sets the Description field's value. +func (s *Project) SetDescription(v string) *Project { + s.Description = &v + return s } -// GoString returns the string representation -func (s ListReportsOutput) GoString() string { - return s.String() +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *Project) SetEncryptionKey(v string) *Project { + s.EncryptionKey = &v + return s } -// SetNextToken sets the NextToken field's value. -func (s *ListReportsOutput) SetNextToken(v string) *ListReportsOutput { - s.NextToken = &v +// SetEnvironment sets the Environment field's value. +func (s *Project) SetEnvironment(v *ProjectEnvironment) *Project { + s.Environment = v return s } -// SetReports sets the Reports field's value. -func (s *ListReportsOutput) SetReports(v []*string) *ListReportsOutput { - s.Reports = v +// SetFileSystemLocations sets the FileSystemLocations field's value. +func (s *Project) SetFileSystemLocations(v []*ProjectFileSystemLocation) *Project { + s.FileSystemLocations = v return s } -type ListSharedProjectsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of paginated shared build projects returned per response. - // Use nextToken to iterate pages in the list of returned Project objects. The - // default value is 100. - MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` - - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // The criterion to be used to list build projects shared with the current AWS - // account or user. Valid values include: - // - // * ARN: List based on the ARN. - // - // * MODIFIED_TIME: List based on when information about the shared project - // was last changed. - SortBy *string `locationName:"sortBy" type:"string" enum:"SharedResourceSortByType"` - - // The order in which to list shared build projects. Valid values include: - // - // * ASCENDING: List in ascending order. - // - // * DESCENDING: List in descending order. - SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` +// SetLastModified sets the LastModified field's value. +func (s *Project) SetLastModified(v time.Time) *Project { + s.LastModified = &v + return s } -// String returns the string representation -func (s ListSharedProjectsInput) String() string { - return awsutil.Prettify(s) +// SetLogsConfig sets the LogsConfig field's value. +func (s *Project) SetLogsConfig(v *LogsConfig) *Project { + s.LogsConfig = v + return s } -// GoString returns the string representation -func (s ListSharedProjectsInput) GoString() string { - return s.String() +// SetName sets the Name field's value. +func (s *Project) SetName(v string) *Project { + s.Name = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListSharedProjectsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSharedProjectsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } +// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. +func (s *Project) SetQueuedTimeoutInMinutes(v int64) *Project { + s.QueuedTimeoutInMinutes = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. +func (s *Project) SetSecondaryArtifacts(v []*ProjectArtifacts) *Project { + s.SecondaryArtifacts = v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListSharedProjectsInput) SetMaxResults(v int64) *ListSharedProjectsInput { - s.MaxResults = &v +// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. +func (s *Project) SetSecondarySourceVersions(v []*ProjectSourceVersion) *Project { + s.SecondarySourceVersions = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListSharedProjectsInput) SetNextToken(v string) *ListSharedProjectsInput { - s.NextToken = &v +// SetSecondarySources sets the SecondarySources field's value. +func (s *Project) SetSecondarySources(v []*ProjectSource) *Project { + s.SecondarySources = v return s } -// SetSortBy sets the SortBy field's value. -func (s *ListSharedProjectsInput) SetSortBy(v string) *ListSharedProjectsInput { - s.SortBy = &v +// SetServiceRole sets the ServiceRole field's value. +func (s *Project) SetServiceRole(v string) *Project { + s.ServiceRole = &v return s } -// SetSortOrder sets the SortOrder field's value. -func (s *ListSharedProjectsInput) SetSortOrder(v string) *ListSharedProjectsInput { - s.SortOrder = &v +// SetSource sets the Source field's value. +func (s *Project) SetSource(v *ProjectSource) *Project { + s.Source = v return s } -type ListSharedProjectsOutput struct { - _ struct{} `type:"structure"` - - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` - - // The list of ARNs for the build projects shared with the current AWS account - // or user. - Projects []*string `locationName:"projects" min:"1" type:"list"` +// SetSourceVersion sets the SourceVersion field's value. +func (s *Project) SetSourceVersion(v string) *Project { + s.SourceVersion = &v + return s } -// String returns the string representation -func (s ListSharedProjectsOutput) String() string { - return awsutil.Prettify(s) +// SetTags sets the Tags field's value. +func (s *Project) SetTags(v []*Tag) *Project { + s.Tags = v + return s } -// GoString returns the string representation -func (s ListSharedProjectsOutput) GoString() string { - return s.String() +// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. +func (s *Project) SetTimeoutInMinutes(v int64) *Project { + s.TimeoutInMinutes = &v + return s } -// SetNextToken sets the NextToken field's value. -func (s *ListSharedProjectsOutput) SetNextToken(v string) *ListSharedProjectsOutput { - s.NextToken = &v +// SetVpcConfig sets the VpcConfig field's value. +func (s *Project) SetVpcConfig(v *VpcConfig) *Project { + s.VpcConfig = v return s } -// SetProjects sets the Projects field's value. -func (s *ListSharedProjectsOutput) SetProjects(v []*string) *ListSharedProjectsOutput { - s.Projects = v +// SetWebhook sets the Webhook field's value. +func (s *Project) SetWebhook(v *Webhook) *Project { + s.Webhook = v return s } -type ListSharedReportGroupsInput struct { +// Information about the build output artifacts for the build project. +type ProjectArtifacts struct { _ struct{} `type:"structure"` - // The maximum number of paginated shared report groups per response. Use nextToken - // to iterate pages in the list of returned ReportGroup objects. The default - // value is 100. - MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + // An identifier for this artifact definition. + ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` + // Set to true if you do not want your output artifacts encrypted. This option + // is valid only if your artifacts type is Amazon Simple Storage Service (Amazon + // S3). If this is set with another artifacts type, an invalidInputException + // is thrown. + EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` - // The criterion to be used to list report groups shared with the current AWS - // account or user. Valid values include: + // Information about the build output artifact location: // - // * ARN: List based on the ARN. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // locations instead of AWS CodeBuild. // - // * MODIFIED_TIME: List based on when information about the shared report - // group was last changed. - SortBy *string `locationName:"sortBy" type:"string" enum:"SharedResourceSortByType"` + // * If type is set to NO_ARTIFACTS, this value is ignored if specified, + // because no build output is produced. + // + // * If type is set to S3, this is the name of the output bucket. + Location *string `locationName:"location" type:"string"` - // The order in which to list shared report groups. Valid values include: + // Along with path and namespaceType, the pattern that AWS CodeBuild uses to + // name and store the output artifact: // - // * ASCENDING: List in ascending order. + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // names instead of AWS CodeBuild. // - // * DESCENDING: List in descending order. - SortOrder *string `locationName:"sortOrder" type:"string" enum:"SortOrderType"` + // * If type is set to NO_ARTIFACTS, this value is ignored if specified, + // because no build output is produced. + // + // * If type is set to S3, this is the name of the output artifact object. + // If you set the name to be a forward slash ("/"), the artifact is stored + // in the root of the output bucket. + // + // For example: + // + // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and + // name is set to MyArtifact.zip, then the output artifact is stored in MyArtifacts//MyArtifact.zip. + // + // * If path is empty, namespaceType is set to NONE, and name is set to "/", + // the output artifact is stored in the root of the output bucket. + // + // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and + // name is set to "/", the output artifact is stored in MyArtifacts/. + Name *string `locationName:"name" type:"string"` + + // Along with path and name, the pattern that AWS CodeBuild uses to determine + // the name and location to store the output artifact: + // + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // names instead of AWS CodeBuild. + // + // * If type is set to NO_ARTIFACTS, this value is ignored if specified, + // because no build output is produced. + // + // * If type is set to S3, valid values include: BUILD_ID: Include the build + // ID in the location of the build output artifact. NONE: Do not include + // the build ID. This is the default if namespaceType is not specified. + // + // For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, + // and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts//MyArtifact.zip. + NamespaceType *string `locationName:"namespaceType" type:"string" enum:"ArtifactNamespace"` + + // If this flag is set, a name specified in the buildspec file overrides the + // artifact name. The name specified in a buildspec file is calculated at build + // time and uses the Shell Command Language. For example, you can append a date + // and time to your artifact name so that it is always unique. + OverrideArtifactName *bool `locationName:"overrideArtifactName" type:"boolean"` + + // The type of build output artifact to create: + // + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // artifacts instead of AWS CodeBuild. + // + // * If type is set to NO_ARTIFACTS, this value is ignored if specified, + // because no build output is produced. + // + // * If type is set to S3, valid values include: NONE: AWS CodeBuild creates + // in the output bucket a folder that contains the build output. This is + // the default if packaging is not specified. ZIP: AWS CodeBuild creates + // in the output bucket a ZIP file that contains the build output. + Packaging *string `locationName:"packaging" type:"string" enum:"ArtifactPackaging"` + + // Along with namespaceType and name, the pattern that AWS CodeBuild uses to + // name and store the output artifact: + // + // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value + // if specified. This is because AWS CodePipeline manages its build output + // names instead of AWS CodeBuild. + // + // * If type is set to NO_ARTIFACTS, this value is ignored if specified, + // because no build output is produced. + // + // * If type is set to S3, this is the path to the output artifact. If path + // is not specified, path is not used. + // + // For example, if path is set to MyArtifacts, namespaceType is set to NONE, + // and name is set to MyArtifact.zip, the output artifact is stored in the output + // bucket at MyArtifacts/MyArtifact.zip. + Path *string `locationName:"path" type:"string"` + + // The type of build output artifact. Valid values include: + // + // * CODEPIPELINE: The build project has build output generated through AWS + // CodePipeline. The CODEPIPELINE type is not supported for secondaryArtifacts. + // + // * NO_ARTIFACTS: The build project does not produce any build output. + // + // * S3: The build project stores build output in Amazon Simple Storage Service + // (Amazon S3). + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"ArtifactsType"` } // String returns the string representation -func (s ListSharedReportGroupsInput) String() string { +func (s ProjectArtifacts) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSharedReportGroupsInput) GoString() string { +func (s ProjectArtifacts) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListSharedReportGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListSharedReportGroupsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *ProjectArtifacts) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectArtifacts"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -6306,144 +10084,232 @@ func (s *ListSharedReportGroupsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListSharedReportGroupsInput) SetMaxResults(v int64) *ListSharedReportGroupsInput { - s.MaxResults = &v +// SetArtifactIdentifier sets the ArtifactIdentifier field's value. +func (s *ProjectArtifacts) SetArtifactIdentifier(v string) *ProjectArtifacts { + s.ArtifactIdentifier = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListSharedReportGroupsInput) SetNextToken(v string) *ListSharedReportGroupsInput { - s.NextToken = &v +// SetEncryptionDisabled sets the EncryptionDisabled field's value. +func (s *ProjectArtifacts) SetEncryptionDisabled(v bool) *ProjectArtifacts { + s.EncryptionDisabled = &v return s } -// SetSortBy sets the SortBy field's value. -func (s *ListSharedReportGroupsInput) SetSortBy(v string) *ListSharedReportGroupsInput { - s.SortBy = &v +// SetLocation sets the Location field's value. +func (s *ProjectArtifacts) SetLocation(v string) *ProjectArtifacts { + s.Location = &v + return s +} + +// SetName sets the Name field's value. +func (s *ProjectArtifacts) SetName(v string) *ProjectArtifacts { + s.Name = &v + return s +} + +// SetNamespaceType sets the NamespaceType field's value. +func (s *ProjectArtifacts) SetNamespaceType(v string) *ProjectArtifacts { + s.NamespaceType = &v + return s +} + +// SetOverrideArtifactName sets the OverrideArtifactName field's value. +func (s *ProjectArtifacts) SetOverrideArtifactName(v bool) *ProjectArtifacts { + s.OverrideArtifactName = &v + return s +} + +// SetPackaging sets the Packaging field's value. +func (s *ProjectArtifacts) SetPackaging(v string) *ProjectArtifacts { + s.Packaging = &v + return s +} + +// SetPath sets the Path field's value. +func (s *ProjectArtifacts) SetPath(v string) *ProjectArtifacts { + s.Path = &v return s } -// SetSortOrder sets the SortOrder field's value. -func (s *ListSharedReportGroupsInput) SetSortOrder(v string) *ListSharedReportGroupsInput { - s.SortOrder = &v +// SetType sets the Type field's value. +func (s *ProjectArtifacts) SetType(v string) *ProjectArtifacts { + s.Type = &v return s } -type ListSharedReportGroupsOutput struct { +// Information about the build badge for the build project. +type ProjectBadge struct { _ struct{} `type:"structure"` - // During a previous call, the maximum number of items that can be returned - // is the value specified in maxResults. If there more items in the list, then - // a unique string called a nextToken is returned. To get the next batch of - // items in the list, call this operation again, adding the next token to the - // call. To get all of the items in the list, keep calling this operation with - // each subsequent next token that is returned, until no more next tokens are - // returned. - NextToken *string `locationName:"nextToken" type:"string"` + // Set this to true to generate a publicly accessible URL for your project's + // build badge. + BadgeEnabled *bool `locationName:"badgeEnabled" type:"boolean"` - // The list of ARNs for the report groups shared with the current AWS account - // or user. - ReportGroups []*string `locationName:"reportGroups" min:"1" type:"list"` + // The publicly-accessible URL through which you can access the build badge + // for your project. + // + // The publicly accessible URL through which you can access the build badge + // for your project. + BadgeRequestUrl *string `locationName:"badgeRequestUrl" type:"string"` } // String returns the string representation -func (s ListSharedReportGroupsOutput) String() string { +func (s ProjectBadge) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSharedReportGroupsOutput) GoString() string { +func (s ProjectBadge) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListSharedReportGroupsOutput) SetNextToken(v string) *ListSharedReportGroupsOutput { - s.NextToken = &v +// SetBadgeEnabled sets the BadgeEnabled field's value. +func (s *ProjectBadge) SetBadgeEnabled(v bool) *ProjectBadge { + s.BadgeEnabled = &v return s } -// SetReportGroups sets the ReportGroups field's value. -func (s *ListSharedReportGroupsOutput) SetReportGroups(v []*string) *ListSharedReportGroupsOutput { - s.ReportGroups = v +// SetBadgeRequestUrl sets the BadgeRequestUrl field's value. +func (s *ProjectBadge) SetBadgeRequestUrl(v string) *ProjectBadge { + s.BadgeRequestUrl = &v return s } -type ListSourceCredentialsInput struct { +// Contains configuration information about a batch build project. +type ProjectBuildBatchConfig struct { _ struct{} `type:"structure"` + + // Specifies if the build artifacts for the batch build should be combined into + // a single artifact location. + CombineArtifacts *bool `locationName:"combineArtifacts" type:"boolean"` + + // A BatchRestrictions object that specifies the restrictions for the batch + // build. + Restrictions *BatchRestrictions `locationName:"restrictions" type:"structure"` + + // Specifies the service role ARN for the batch build project. + ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` + + // Specifies the maximum amount of time, in minutes, that the batch build must + // be completed in. + TimeoutInMins *int64 `locationName:"timeoutInMins" type:"integer"` } // String returns the string representation -func (s ListSourceCredentialsInput) String() string { +func (s ProjectBuildBatchConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListSourceCredentialsInput) GoString() string { +func (s ProjectBuildBatchConfig) GoString() string { return s.String() } -type ListSourceCredentialsOutput struct { - _ struct{} `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProjectBuildBatchConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectBuildBatchConfig"} + if s.ServiceRole != nil && len(*s.ServiceRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceRole", 1)) + } - // A list of SourceCredentialsInfo objects. Each SourceCredentialsInfo object - // includes the authentication type, token ARN, and type of source provider - // for one set of credentials. - SourceCredentialsInfos []*SourceCredentialsInfo `locationName:"sourceCredentialsInfos" type:"list"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// String returns the string representation -func (s ListSourceCredentialsOutput) String() string { - return awsutil.Prettify(s) +// SetCombineArtifacts sets the CombineArtifacts field's value. +func (s *ProjectBuildBatchConfig) SetCombineArtifacts(v bool) *ProjectBuildBatchConfig { + s.CombineArtifacts = &v + return s } -// GoString returns the string representation -func (s ListSourceCredentialsOutput) GoString() string { - return s.String() +// SetRestrictions sets the Restrictions field's value. +func (s *ProjectBuildBatchConfig) SetRestrictions(v *BatchRestrictions) *ProjectBuildBatchConfig { + s.Restrictions = v + return s } -// SetSourceCredentialsInfos sets the SourceCredentialsInfos field's value. -func (s *ListSourceCredentialsOutput) SetSourceCredentialsInfos(v []*SourceCredentialsInfo) *ListSourceCredentialsOutput { - s.SourceCredentialsInfos = v +// SetServiceRole sets the ServiceRole field's value. +func (s *ProjectBuildBatchConfig) SetServiceRole(v string) *ProjectBuildBatchConfig { + s.ServiceRole = &v return s } -// Information about logs for a build project. These can be logs in Amazon CloudWatch -// Logs, built in a specified S3 bucket, or both. -type LogsConfig struct { +// SetTimeoutInMins sets the TimeoutInMins field's value. +func (s *ProjectBuildBatchConfig) SetTimeoutInMins(v int64) *ProjectBuildBatchConfig { + s.TimeoutInMins = &v + return s +} + +// Information about the cache for the build project. +type ProjectCache struct { _ struct{} `type:"structure"` - // Information about Amazon CloudWatch Logs for a build project. Amazon CloudWatch - // Logs are enabled by default. - CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` + // Information about the cache location: + // + // * NO_CACHE or LOCAL: This value is ignored. + // + // * S3: This is the S3 bucket name/prefix. + Location *string `locationName:"location" type:"string"` - // Information about logs built to an S3 bucket for a build project. S3 logs - // are not enabled by default. - S3Logs *S3LogsConfig `locationName:"s3Logs" type:"structure"` + // If you use a LOCAL cache, the local cache mode. You can use one or more local + // cache modes at the same time. + // + // * LOCAL_SOURCE_CACHE mode caches Git metadata for primary and secondary + // sources. After the cache is created, subsequent builds pull only the change + // between commits. This mode is a good choice for projects with a clean + // working directory and a source that is a large Git repository. If you + // choose this option and your project does not use a Git repository (GitHub, + // GitHub Enterprise, or Bitbucket), the option is ignored. + // + // * LOCAL_DOCKER_LAYER_CACHE mode caches existing Docker layers. This mode + // is a good choice for projects that build or pull large Docker images. + // It can prevent the performance issues caused by pulling large Docker images + // down from the network. You can use a Docker layer cache in the Linux environment + // only. The privileged flag must be set so that your project has the required + // Docker permissions. You should consider the security implications before + // you use a Docker layer cache. + // + // * LOCAL_CUSTOM_CACHE mode caches directories you specify in the buildspec + // file. This mode is a good choice if your build scenario is not suited + // to one of the other three local cache modes. If you use a custom cache: + // Only directories can be specified for caching. You cannot specify individual + // files. Symlinks are used to reference cached directories. Cached directories + // are linked to your build before it downloads its project sources. Cached + // items are overridden if a source item has the same name. Directories are + // specified using cache paths in the buildspec file. + Modes []*string `locationName:"modes" type:"list"` + + // The type of cache used by the build project. Valid values include: + // + // * NO_CACHE: The build project does not use any cache. + // + // * S3: The build project reads and writes from and to S3. + // + // * LOCAL: The build project stores a cache locally on a build host that + // is only available to that build host. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"CacheType"` } // String returns the string representation -func (s LogsConfig) String() string { +func (s ProjectCache) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LogsConfig) GoString() string { +func (s ProjectCache) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LogsConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LogsConfig"} - if s.CloudWatchLogs != nil { - if err := s.CloudWatchLogs.Validate(); err != nil { - invalidParams.AddNested("CloudWatchLogs", err.(request.ErrInvalidParams)) - } - } - if s.S3Logs != nil { - if err := s.S3Logs.Validate(); err != nil { - invalidParams.AddNested("S3Logs", err.(request.ErrInvalidParams)) - } +func (s *ProjectCache) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectCache"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -6452,635 +10318,735 @@ func (s *LogsConfig) Validate() error { return nil } -// SetCloudWatchLogs sets the CloudWatchLogs field's value. -func (s *LogsConfig) SetCloudWatchLogs(v *CloudWatchLogsConfig) *LogsConfig { - s.CloudWatchLogs = v +// SetLocation sets the Location field's value. +func (s *ProjectCache) SetLocation(v string) *ProjectCache { + s.Location = &v return s } -// SetS3Logs sets the S3Logs field's value. -func (s *LogsConfig) SetS3Logs(v *S3LogsConfig) *LogsConfig { - s.S3Logs = v +// SetModes sets the Modes field's value. +func (s *ProjectCache) SetModes(v []*string) *ProjectCache { + s.Modes = v return s } -// Information about build logs in Amazon CloudWatch Logs. -type LogsLocation struct { +// SetType sets the Type field's value. +func (s *ProjectCache) SetType(v string) *ProjectCache { + s.Type = &v + return s +} + +// Information about the build environment of the build project. +type ProjectEnvironment struct { _ struct{} `type:"structure"` - // Information about Amazon CloudWatch Logs for a build project. - CloudWatchLogs *CloudWatchLogsConfig `locationName:"cloudWatchLogs" type:"structure"` + // The certificate to use with this build project. + Certificate *string `locationName:"certificate" type:"string"` - // The ARN of Amazon CloudWatch Logs for a build project. Its format is arn:${Partition}:logs:${Region}:${Account}:log-group:${LogGroupName}:log-stream:${LogStreamName}. - // For more information, see Resources Defined by Amazon CloudWatch Logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazoncloudwatchlogs.html#amazoncloudwatchlogs-resources-for-iam-policies). - CloudWatchLogsArn *string `locationName:"cloudWatchLogsArn" type:"string"` + // Information about the compute resources the build project uses. Available + // values include: + // + // * BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds. + // + // * BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. + // + // * BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, + // depending on your environment type. + // + // * BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB + // of SSD storage for builds. This compute type supports Docker images up + // to 100 GB uncompressed. + // + // If you use BUILD_GENERAL1_LARGE: + // + // * For environment type LINUX_CONTAINER, you can use up to 15 GB memory + // and 8 vCPUs for builds. + // + // * For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, + // 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. + // + // * For environment type ARM_CONTAINER, you can use up to 16 GB memory and + // 8 vCPUs on ARM-based processors for builds. + // + // For more information, see Build Environment Compute Types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) + // in the AWS CodeBuild User Guide. + // + // ComputeType is a required field + ComputeType *string `locationName:"computeType" type:"string" required:"true" enum:"ComputeType"` - // The URL to an individual build log in Amazon CloudWatch Logs. - DeepLink *string `locationName:"deepLink" type:"string"` + // A set of environment variables to make available to builds for this build + // project. + EnvironmentVariables []*EnvironmentVariable `locationName:"environmentVariables" type:"list"` - // The name of the Amazon CloudWatch Logs group for the build logs. - GroupName *string `locationName:"groupName" type:"string"` + // The image tag or image digest that identifies the Docker image to use for + // this build project. Use the following formats: + // + // * For an image tag: /:. For example, in the + // Docker repository that CodeBuild uses to manage its Docker images, this + // would be aws/codebuild/standard:4.0. To specify the latest version of + // this image, this would be aws/codebuild/standard:latest. + // + // * For an image digest: /@. For example, + // to specify an image with the digest "sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf," + // use /@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf. + // + // Image is a required field + Image *string `locationName:"image" min:"1" type:"string" required:"true"` - // The URL to a build log in an S3 bucket. - S3DeepLink *string `locationName:"s3DeepLink" type:"string"` + // The type of credentials AWS CodeBuild uses to pull images in your build. + // There are two valid values: + // + // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This + // requires that you modify your ECR repository policy to trust AWS CodeBuild's + // service principal. + // + // * SERVICE_ROLE specifies that AWS CodeBuild uses your build project's + // service role. + // + // When you use a cross-account or private registry image, you must use SERVICE_ROLE + // credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD + // credentials. + ImagePullCredentialsType *string `locationName:"imagePullCredentialsType" type:"string" enum:"ImagePullCredentialsType"` - // Information about S3 logs for a build project. - S3Logs *S3LogsConfig `locationName:"s3Logs" type:"structure"` + // Enables running the Docker daemon inside a Docker container. Set to true + // only if the build project is used to build Docker images. Otherwise, a build + // that attempts to interact with the Docker daemon fails. The default setting + // is false. + // + // You can initialize the Docker daemon during the install phase of your build + // by adding one of the following sets of commands to the install phase of your + // buildspec file: + // + // If the operating system's base image is Ubuntu Linux: + // + // - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 + // --storage-driver=overlay& + // + // - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" + // + // If the operating system's base image is Alpine Linux and the previous command + // does not work, add the -t argument to timeout: + // + // - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 + // --storage-driver=overlay& + // + // - timeout -t 15 sh -c "until docker info; do echo .; sleep 1; done" + PrivilegedMode *bool `locationName:"privilegedMode" type:"boolean"` - // The ARN of S3 logs for a build project. Its format is arn:${Partition}:s3:::${BucketName}/${ObjectName}. - // For more information, see Resources Defined by Amazon S3 (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html#amazons3-resources-for-iam-policies). - S3LogsArn *string `locationName:"s3LogsArn" type:"string"` + // The credentials for access to a private registry. + RegistryCredential *RegistryCredential `locationName:"registryCredential" type:"structure"` - // The name of the Amazon CloudWatch Logs stream for the build logs. - StreamName *string `locationName:"streamName" type:"string"` + // The type of build environment to use for related builds. + // + // * The environment type ARM_CONTAINER is available only in regions US East + // (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific + // (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt). + // + // * The environment type LINUX_CONTAINER with compute type build.general1.2xlarge + // is available only in regions US East (N. Virginia), US East (Ohio), US + // West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), + // Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), + // Asia Pacific (Sydney), China (Beijing), and China (Ningxia). + // + // * The environment type LINUX_GPU_CONTAINER is available only in regions + // US East (N. Virginia), US East (Ohio), US West (Oregon), Canada (Central), + // EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), Asia + // Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China + // (Beijing), and China (Ningxia). + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"EnvironmentType"` } // String returns the string representation -func (s LogsLocation) String() string { +func (s ProjectEnvironment) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LogsLocation) GoString() string { +func (s ProjectEnvironment) GoString() string { return s.String() } -// SetCloudWatchLogs sets the CloudWatchLogs field's value. -func (s *LogsLocation) SetCloudWatchLogs(v *CloudWatchLogsConfig) *LogsLocation { - s.CloudWatchLogs = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProjectEnvironment) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectEnvironment"} + if s.ComputeType == nil { + invalidParams.Add(request.NewErrParamRequired("ComputeType")) + } + if s.Image == nil { + invalidParams.Add(request.NewErrParamRequired("Image")) + } + if s.Image != nil && len(*s.Image) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Image", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.EnvironmentVariables != nil { + for i, v := range s.EnvironmentVariables { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EnvironmentVariables", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RegistryCredential != nil { + if err := s.RegistryCredential.Validate(); err != nil { + invalidParams.AddNested("RegistryCredential", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCloudWatchLogsArn sets the CloudWatchLogsArn field's value. -func (s *LogsLocation) SetCloudWatchLogsArn(v string) *LogsLocation { - s.CloudWatchLogsArn = &v +// SetCertificate sets the Certificate field's value. +func (s *ProjectEnvironment) SetCertificate(v string) *ProjectEnvironment { + s.Certificate = &v return s } -// SetDeepLink sets the DeepLink field's value. -func (s *LogsLocation) SetDeepLink(v string) *LogsLocation { - s.DeepLink = &v +// SetComputeType sets the ComputeType field's value. +func (s *ProjectEnvironment) SetComputeType(v string) *ProjectEnvironment { + s.ComputeType = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *LogsLocation) SetGroupName(v string) *LogsLocation { - s.GroupName = &v +// SetEnvironmentVariables sets the EnvironmentVariables field's value. +func (s *ProjectEnvironment) SetEnvironmentVariables(v []*EnvironmentVariable) *ProjectEnvironment { + s.EnvironmentVariables = v return s } -// SetS3DeepLink sets the S3DeepLink field's value. -func (s *LogsLocation) SetS3DeepLink(v string) *LogsLocation { - s.S3DeepLink = &v +// SetImage sets the Image field's value. +func (s *ProjectEnvironment) SetImage(v string) *ProjectEnvironment { + s.Image = &v return s } -// SetS3Logs sets the S3Logs field's value. -func (s *LogsLocation) SetS3Logs(v *S3LogsConfig) *LogsLocation { - s.S3Logs = v +// SetImagePullCredentialsType sets the ImagePullCredentialsType field's value. +func (s *ProjectEnvironment) SetImagePullCredentialsType(v string) *ProjectEnvironment { + s.ImagePullCredentialsType = &v return s } -// SetS3LogsArn sets the S3LogsArn field's value. -func (s *LogsLocation) SetS3LogsArn(v string) *LogsLocation { - s.S3LogsArn = &v +// SetPrivilegedMode sets the PrivilegedMode field's value. +func (s *ProjectEnvironment) SetPrivilegedMode(v bool) *ProjectEnvironment { + s.PrivilegedMode = &v return s } -// SetStreamName sets the StreamName field's value. -func (s *LogsLocation) SetStreamName(v string) *LogsLocation { - s.StreamName = &v +// SetRegistryCredential sets the RegistryCredential field's value. +func (s *ProjectEnvironment) SetRegistryCredential(v *RegistryCredential) *ProjectEnvironment { + s.RegistryCredential = v return s } -// Describes a network interface. -type NetworkInterface struct { - _ struct{} `type:"structure"` - - // The ID of the network interface. - NetworkInterfaceId *string `locationName:"networkInterfaceId" min:"1" type:"string"` - - // The ID of the subnet. - SubnetId *string `locationName:"subnetId" min:"1" type:"string"` +// SetType sets the Type field's value. +func (s *ProjectEnvironment) SetType(v string) *ProjectEnvironment { + s.Type = &v + return s } -// String returns the string representation -func (s NetworkInterface) String() string { - return awsutil.Prettify(s) -} +// Information about a file system created by Amazon Elastic File System (EFS). +// For more information, see What Is Amazon Elastic File System? (https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html) +type ProjectFileSystemLocation struct { + _ struct{} `type:"structure"` -// GoString returns the string representation -func (s NetworkInterface) GoString() string { - return s.String() -} + // The name used to access a file system created by Amazon EFS. CodeBuild creates + // an environment variable by appending the identifier in all capital letters + // to CODEBUILD_. For example, if you specify my-efs for identifier, a new environment + // variable is create named CODEBUILD_MY-EFS. + // + // The identifier is used to mount your file system. + Identifier *string `locationName:"identifier" type:"string"` -// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. -func (s *NetworkInterface) SetNetworkInterfaceId(v string) *NetworkInterface { - s.NetworkInterfaceId = &v - return s -} + // A string that specifies the location of the file system created by Amazon + // EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name + // of file system when you view it in the AWS EFS console. The directory path + // is a path to a directory in the file system that CodeBuild mounts. For example, + // if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, + // and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory. + // + // The directory path in the format efs-dns-name:/directory-path is optional. + // If you do not specify a directory path, the location is only the DNS name + // and CodeBuild mounts the entire file system. + Location *string `locationName:"location" type:"string"` -// SetSubnetId sets the SubnetId field's value. -func (s *NetworkInterface) SetSubnetId(v string) *NetworkInterface { - s.SubnetId = &v - return s -} + // The mount options for a file system created by AWS EFS. The default mount + // options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. + // For more information, see Recommended NFS Mount Options (https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-nfs-mount-settings.html). + MountOptions *string `locationName:"mountOptions" type:"string"` -// There was a problem with the underlying OAuth provider. -type OAuthProviderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + // The location in the container where you mount the file system. + MountPoint *string `locationName:"mountPoint" type:"string"` - Message_ *string `locationName:"message" type:"string"` + // The type of the file system. The one supported type is EFS. + Type *string `locationName:"type" type:"string" enum:"FileSystemType"` } // String returns the string representation -func (s OAuthProviderException) String() string { +func (s ProjectFileSystemLocation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OAuthProviderException) GoString() string { +func (s ProjectFileSystemLocation) GoString() string { return s.String() } -func newErrorOAuthProviderException(v protocol.ResponseMetadata) error { - return &OAuthProviderException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s OAuthProviderException) Code() string { - return "OAuthProviderException" -} - -// Message returns the exception's message. -func (s OAuthProviderException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s OAuthProviderException) OrigErr() error { - return nil -} - -func (s OAuthProviderException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s OAuthProviderException) StatusCode() int { - return s.respMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s OAuthProviderException) RequestID() string { - return s.respMetadata.RequestID -} - -// Additional information about a build phase that has an error. You can use -// this information for troubleshooting. -type PhaseContext struct { - _ struct{} `type:"structure"` - - // An explanation of the build phase's context. This might include a command - // ID and an exit code. - Message *string `locationName:"message" type:"string"` - - // The status code for the context of the build phase. - StatusCode *string `locationName:"statusCode" type:"string"` +// SetIdentifier sets the Identifier field's value. +func (s *ProjectFileSystemLocation) SetIdentifier(v string) *ProjectFileSystemLocation { + s.Identifier = &v + return s } -// String returns the string representation -func (s PhaseContext) String() string { - return awsutil.Prettify(s) +// SetLocation sets the Location field's value. +func (s *ProjectFileSystemLocation) SetLocation(v string) *ProjectFileSystemLocation { + s.Location = &v + return s } -// GoString returns the string representation -func (s PhaseContext) GoString() string { - return s.String() +// SetMountOptions sets the MountOptions field's value. +func (s *ProjectFileSystemLocation) SetMountOptions(v string) *ProjectFileSystemLocation { + s.MountOptions = &v + return s } -// SetMessage sets the Message field's value. -func (s *PhaseContext) SetMessage(v string) *PhaseContext { - s.Message = &v +// SetMountPoint sets the MountPoint field's value. +func (s *ProjectFileSystemLocation) SetMountPoint(v string) *ProjectFileSystemLocation { + s.MountPoint = &v return s } -// SetStatusCode sets the StatusCode field's value. -func (s *PhaseContext) SetStatusCode(v string) *PhaseContext { - s.StatusCode = &v +// SetType sets the Type field's value. +func (s *ProjectFileSystemLocation) SetType(v string) *ProjectFileSystemLocation { + s.Type = &v return s } -// Information about a build project. -type Project struct { +// Information about the build input source code for the build project. +type ProjectSource struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the build project. - Arn *string `locationName:"arn" type:"string"` + // Information about the authorization settings for AWS CodeBuild to access + // the source code to be built. + // + // This information is for the AWS CodeBuild console's use only. Your code should + // not get or set this information directly. + Auth *SourceAuth `locationName:"auth" type:"structure"` - // Information about the build output artifacts for the build project. - Artifacts *ProjectArtifacts `locationName:"artifacts" type:"structure"` + // Contains information that defines how the build project reports the build + // status to the source provider. This option is only used when the source provider + // is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET. + BuildStatusConfig *BuildStatusConfig `locationName:"buildStatusConfig" type:"structure"` - // Information about the build badge for the build project. - Badge *ProjectBadge `locationName:"badge" type:"structure"` + // The buildspec file declaration to use for the builds in this build project. + // + // If this value is set, it can be either an inline buildspec definition, the + // path to an alternate buildspec file relative to the value of the built-in + // CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The + // bucket must be in the same AWS Region as the build project. Specify the buildspec + // file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). + // If this value is not provided or is set to an empty string, the source code + // must contain a buildspec file in its root directory. For more information, + // see Buildspec File Name and Storage Location (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-name-storage). + Buildspec *string `locationName:"buildspec" type:"string"` - // Information about the cache for the build project. - Cache *ProjectCache `locationName:"cache" type:"structure"` + // Information about the Git clone depth for the build project. + GitCloneDepth *int64 `locationName:"gitCloneDepth" type:"integer"` - // When the build project was created, expressed in Unix time format. - Created *time.Time `locationName:"created" type:"timestamp"` + // Information about the Git submodules configuration for the build project. + GitSubmodulesConfig *GitSubmodulesConfig `locationName:"gitSubmodulesConfig" type:"structure"` - // A description that makes the build project easy to identify. - Description *string `locationName:"description" type:"string"` + // Enable this flag to ignore SSL warnings while connecting to the project source + // code. + InsecureSsl *bool `locationName:"insecureSsl" type:"boolean"` - // The AWS Key Management Service (AWS KMS) customer master key (CMK) to be - // used for encrypting the build output artifacts. + // Information about the location of the source code to be built. Valid values + // include: // - // You can use a cross-account KMS key to encrypt the build output artifacts - // if your service role has permission to that key. + // * For source code settings that are specified in the source action of + // a pipeline in AWS CodePipeline, location should not be specified. If it + // is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline + // uses the settings in a pipeline's source action instead of this value. // - // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name ). - EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` - - // Information about the build environment for this build project. - Environment *ProjectEnvironment `locationName:"environment" type:"structure"` - - // An array of ProjectFileSystemLocation objects for a CodeBuild build project. - // A ProjectFileSystemLocation object specifies the identifier, location, mountOptions, - // mountPoint, and type of a file system created using Amazon Elastic File System. - FileSystemLocations []*ProjectFileSystemLocation `locationName:"fileSystemLocations" type:"list"` - - // When the build project's settings were last modified, expressed in Unix time - // format. - LastModified *time.Time `locationName:"lastModified" type:"timestamp"` - - // Information about logs for the build project. A project can create logs in - // Amazon CloudWatch Logs, an S3 bucket, or both. - LogsConfig *LogsConfig `locationName:"logsConfig" type:"structure"` - - // The name of the build project. - Name *string `locationName:"name" min:"2" type:"string"` - - // The number of minutes a build is allowed to be queued before it times out. - QueuedTimeoutInMinutes *int64 `locationName:"queuedTimeoutInMinutes" min:"5" type:"integer"` - - // An array of ProjectArtifacts objects. - SecondaryArtifacts []*ProjectArtifacts `locationName:"secondaryArtifacts" type:"list"` - - // An array of ProjectSourceVersion objects. If secondarySourceVersions is specified - // at the build level, then they take over these secondarySourceVersions (at - // the project level). - SecondarySourceVersions []*ProjectSourceVersion `locationName:"secondarySourceVersions" type:"list"` - - // An array of ProjectSource objects. - SecondarySources []*ProjectSource `locationName:"secondarySources" type:"list"` + // * For source code in an AWS CodeCommit repository, the HTTPS clone URL + // to the repository that contains the source code and the buildspec file + // (for example, https://git-codecommit..amazonaws.com/v1/repos/). + // + // * For source code in an Amazon Simple Storage Service (Amazon S3) input + // bucket, one of the following. The path to the ZIP file that contains the + // source code (for example, //.zip). The + // path to the folder that contains the source code (for example, ///). + // + // * For source code in a GitHub repository, the HTTPS clone URL to the repository + // that contains the source and the buildspec file. You must connect your + // AWS account to your GitHub account. Use the AWS CodeBuild console to start + // creating a build project. When you use the console to connect (or reconnect) + // with GitHub, on the GitHub Authorize application page, for Organization + // access, choose Request access next to each repository you want to allow + // AWS CodeBuild to have access to, and then choose Authorize application. + // (After you have connected to your GitHub account, you do not need to finish + // creating the build project. You can leave the AWS CodeBuild console.) + // To instruct AWS CodeBuild to use this connection, in the source object, + // set the auth object's type value to OAUTH. + // + // * For source code in a Bitbucket repository, the HTTPS clone URL to the + // repository that contains the source and the buildspec file. You must connect + // your AWS account to your Bitbucket account. Use the AWS CodeBuild console + // to start creating a build project. When you use the console to connect + // (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your + // account page, choose Grant access. (After you have connected to your Bitbucket + // account, you do not need to finish creating the build project. You can + // leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this + // connection, in the source object, set the auth object's type value to + // OAUTH. + Location *string `locationName:"location" type:"string"` - // The ARN of the AWS Identity and Access Management (IAM) role that enables - // AWS CodeBuild to interact with dependent AWS services on behalf of the AWS - // account. - ServiceRole *string `locationName:"serviceRole" min:"1" type:"string"` + // Set to true to report the status of a build's start and finish to your source + // provider. This option is valid only when your source provider is GitHub, + // GitHub Enterprise, or Bitbucket. If this is set and you use a different source + // provider, an invalidInputException is thrown. + // + // The status of a build triggered by a webhook is always reported to your source + // provider. + ReportBuildStatus *bool `locationName:"reportBuildStatus" type:"boolean"` - // Information about the build input source code for this build project. - Source *ProjectSource `locationName:"source" type:"structure"` + // An identifier for this project source. + SourceIdentifier *string `locationName:"sourceIdentifier" type:"string"` - // A version of the build input to be built for this project. If not specified, - // the latest version is used. If specified, it must be one of: + // The type of repository that contains the source code to be built. Valid values + // include: // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // * BITBUCKET: The source code is in a Bitbucket repository. // - // * For GitHub: the commit ID, pull request ID, branch name, or tag name - // that corresponds to the version of the source code you want to build. - // If a pull request ID is specified, it must use the format pr/pull-request-ID - // (for example pr/25). If a branch name is specified, the branch's HEAD - // commit ID is used. If not specified, the default branch's HEAD commit - // ID is used. + // * CODECOMMIT: The source code is in an AWS CodeCommit repository. // - // * For Bitbucket: the commit ID, branch name, or tag name that corresponds - // to the version of the source code you want to build. If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the - // default branch's HEAD commit ID is used. + // * CODEPIPELINE: The source code settings are specified in the source action + // of a pipeline in AWS CodePipeline. // - // * For Amazon Simple Storage Service (Amazon S3): the version ID of the - // object that represents the build input ZIP file to use. + // * GITHUB: The source code is in a GitHub or GitHub Enterprise Cloud repository. // - // If sourceVersion is specified at the build level, then that version takes - // precedence over this sourceVersion (at the project level). + // * GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise Server + // repository. // - // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the AWS CodeBuild User Guide. - SourceVersion *string `locationName:"sourceVersion" type:"string"` - - // The tags for this build project. + // * NO_SOURCE: The project does not have input source code. // - // These tags are available for use by AWS services that support AWS CodeBuild - // build project tags. - Tags []*Tag `locationName:"tags" type:"list"` - - // How long, in minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait - // before timing out any related build that did not get marked as completed. - // The default is 60 minutes. - TimeoutInMinutes *int64 `locationName:"timeoutInMinutes" min:"5" type:"integer"` - - // Information about the VPC configuration that AWS CodeBuild accesses. - VpcConfig *VpcConfig `locationName:"vpcConfig" type:"structure"` - - // Information about a webhook that connects repository events to a build project - // in AWS CodeBuild. - Webhook *Webhook `locationName:"webhook" type:"structure"` + // * S3: The source code is in an Amazon Simple Storage Service (Amazon S3) + // input bucket. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"SourceType"` } // String returns the string representation -func (s Project) String() string { +func (s ProjectSource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Project) GoString() string { +func (s ProjectSource) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Project) SetArn(v string) *Project { - s.Arn = &v - return s -} - -// SetArtifacts sets the Artifacts field's value. -func (s *Project) SetArtifacts(v *ProjectArtifacts) *Project { - s.Artifacts = v - return s -} - -// SetBadge sets the Badge field's value. -func (s *Project) SetBadge(v *ProjectBadge) *Project { - s.Badge = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProjectSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectSource"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Auth != nil { + if err := s.Auth.Validate(); err != nil { + invalidParams.AddNested("Auth", err.(request.ErrInvalidParams)) + } + } + if s.GitSubmodulesConfig != nil { + if err := s.GitSubmodulesConfig.Validate(); err != nil { + invalidParams.AddNested("GitSubmodulesConfig", err.(request.ErrInvalidParams)) + } + } -// SetCache sets the Cache field's value. -func (s *Project) SetCache(v *ProjectCache) *Project { - s.Cache = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCreated sets the Created field's value. -func (s *Project) SetCreated(v time.Time) *Project { - s.Created = &v +// SetAuth sets the Auth field's value. +func (s *ProjectSource) SetAuth(v *SourceAuth) *ProjectSource { + s.Auth = v return s } -// SetDescription sets the Description field's value. -func (s *Project) SetDescription(v string) *Project { - s.Description = &v +// SetBuildStatusConfig sets the BuildStatusConfig field's value. +func (s *ProjectSource) SetBuildStatusConfig(v *BuildStatusConfig) *ProjectSource { + s.BuildStatusConfig = v return s } -// SetEncryptionKey sets the EncryptionKey field's value. -func (s *Project) SetEncryptionKey(v string) *Project { - s.EncryptionKey = &v +// SetBuildspec sets the Buildspec field's value. +func (s *ProjectSource) SetBuildspec(v string) *ProjectSource { + s.Buildspec = &v return s } -// SetEnvironment sets the Environment field's value. -func (s *Project) SetEnvironment(v *ProjectEnvironment) *Project { - s.Environment = v +// SetGitCloneDepth sets the GitCloneDepth field's value. +func (s *ProjectSource) SetGitCloneDepth(v int64) *ProjectSource { + s.GitCloneDepth = &v return s } -// SetFileSystemLocations sets the FileSystemLocations field's value. -func (s *Project) SetFileSystemLocations(v []*ProjectFileSystemLocation) *Project { - s.FileSystemLocations = v +// SetGitSubmodulesConfig sets the GitSubmodulesConfig field's value. +func (s *ProjectSource) SetGitSubmodulesConfig(v *GitSubmodulesConfig) *ProjectSource { + s.GitSubmodulesConfig = v return s } -// SetLastModified sets the LastModified field's value. -func (s *Project) SetLastModified(v time.Time) *Project { - s.LastModified = &v +// SetInsecureSsl sets the InsecureSsl field's value. +func (s *ProjectSource) SetInsecureSsl(v bool) *ProjectSource { + s.InsecureSsl = &v return s } -// SetLogsConfig sets the LogsConfig field's value. -func (s *Project) SetLogsConfig(v *LogsConfig) *Project { - s.LogsConfig = v +// SetLocation sets the Location field's value. +func (s *ProjectSource) SetLocation(v string) *ProjectSource { + s.Location = &v return s } -// SetName sets the Name field's value. -func (s *Project) SetName(v string) *Project { - s.Name = &v +// SetReportBuildStatus sets the ReportBuildStatus field's value. +func (s *ProjectSource) SetReportBuildStatus(v bool) *ProjectSource { + s.ReportBuildStatus = &v return s } -// SetQueuedTimeoutInMinutes sets the QueuedTimeoutInMinutes field's value. -func (s *Project) SetQueuedTimeoutInMinutes(v int64) *Project { - s.QueuedTimeoutInMinutes = &v +// SetSourceIdentifier sets the SourceIdentifier field's value. +func (s *ProjectSource) SetSourceIdentifier(v string) *ProjectSource { + s.SourceIdentifier = &v return s } -// SetSecondaryArtifacts sets the SecondaryArtifacts field's value. -func (s *Project) SetSecondaryArtifacts(v []*ProjectArtifacts) *Project { - s.SecondaryArtifacts = v +// SetType sets the Type field's value. +func (s *ProjectSource) SetType(v string) *ProjectSource { + s.Type = &v return s } -// SetSecondarySourceVersions sets the SecondarySourceVersions field's value. -func (s *Project) SetSecondarySourceVersions(v []*ProjectSourceVersion) *Project { - s.SecondarySourceVersions = v - return s +// A source identifier and its corresponding version. +type ProjectSourceVersion struct { + _ struct{} `type:"structure"` + + // An identifier for a source in the build project. + // + // SourceIdentifier is a required field + SourceIdentifier *string `locationName:"sourceIdentifier" type:"string" required:"true"` + + // The source version for the corresponding source identifier. If specified, + // must be one of: + // + // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // + // * For GitHub: the commit ID, pull request ID, branch name, or tag name + // that corresponds to the version of the source code you want to build. + // If a pull request ID is specified, it must use the format pr/pull-request-ID + // (for example, pr/25). If a branch name is specified, the branch's HEAD + // commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // * For Bitbucket: the commit ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a branch name + // is specified, the branch's HEAD commit ID is used. If not specified, the + // default branch's HEAD commit ID is used. + // + // * For Amazon Simple Storage Service (Amazon S3): the version ID of the + // object that represents the build input ZIP file to use. + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + // + // SourceVersion is a required field + SourceVersion *string `locationName:"sourceVersion" type:"string" required:"true"` } -// SetSecondarySources sets the SecondarySources field's value. -func (s *Project) SetSecondarySources(v []*ProjectSource) *Project { - s.SecondarySources = v - return s +// String returns the string representation +func (s ProjectSourceVersion) String() string { + return awsutil.Prettify(s) } -// SetServiceRole sets the ServiceRole field's value. -func (s *Project) SetServiceRole(v string) *Project { - s.ServiceRole = &v - return s +// GoString returns the string representation +func (s ProjectSourceVersion) GoString() string { + return s.String() } -// SetSource sets the Source field's value. -func (s *Project) SetSource(v *ProjectSource) *Project { - s.Source = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProjectSourceVersion) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectSourceVersion"} + if s.SourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceIdentifier")) + } + if s.SourceVersion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSourceIdentifier sets the SourceIdentifier field's value. +func (s *ProjectSourceVersion) SetSourceIdentifier(v string) *ProjectSourceVersion { + s.SourceIdentifier = &v return s } // SetSourceVersion sets the SourceVersion field's value. -func (s *Project) SetSourceVersion(v string) *Project { +func (s *ProjectSourceVersion) SetSourceVersion(v string) *ProjectSourceVersion { s.SourceVersion = &v return s } -// SetTags sets the Tags field's value. -func (s *Project) SetTags(v []*Tag) *Project { - s.Tags = v - return s +type PutResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // A JSON-formatted resource policy. For more information, see Sharing a Project + // (https://docs.aws.amazon.com/codebuild/latest/userguide/project-sharing.html#project-sharing-share) + // and Sharing a Report Group (https://docs.aws.amazon.com/codebuild/latest/userguide/report-groups-sharing.html#report-groups-sharing-share) + // in the AWS CodeBuild User Guide. + // + // Policy is a required field + Policy *string `locationName:"policy" min:"1" type:"string" required:"true"` + + // The ARN of the Project or ReportGroup resource you want to associate with + // a resource policy. + // + // ResourceArn is a required field + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` } -// SetTimeoutInMinutes sets the TimeoutInMinutes field's value. -func (s *Project) SetTimeoutInMinutes(v int64) *Project { - s.TimeoutInMinutes = &v - return s +// String returns the string representation +func (s PutResourcePolicyInput) String() string { + return awsutil.Prettify(s) } -// SetVpcConfig sets the VpcConfig field's value. -func (s *Project) SetVpcConfig(v *VpcConfig) *Project { - s.VpcConfig = v +// GoString returns the string representation +func (s PutResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicy sets the Policy field's value. +func (s *PutResourcePolicyInput) SetPolicy(v string) *PutResourcePolicyInput { + s.Policy = &v return s } -// SetWebhook sets the Webhook field's value. -func (s *Project) SetWebhook(v *Webhook) *Project { - s.Webhook = v +// SetResourceArn sets the ResourceArn field's value. +func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput { + s.ResourceArn = &v return s } -// Information about the build output artifacts for the build project. -type ProjectArtifacts struct { +type PutResourcePolicyOutput struct { _ struct{} `type:"structure"` - // An identifier for this artifact definition. - ArtifactIdentifier *string `locationName:"artifactIdentifier" type:"string"` - - // Set to true if you do not want your output artifacts encrypted. This option - // is valid only if your artifacts type is Amazon Simple Storage Service (Amazon - // S3). If this is set with another artifacts type, an invalidInputException - // is thrown. - EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` - - // Information about the build output artifact location: - // - // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value - // if specified. This is because AWS CodePipeline manages its build output - // locations instead of AWS CodeBuild. - // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, - // because no build output is produced. - // - // * If type is set to S3, this is the name of the output bucket. - Location *string `locationName:"location" type:"string"` + // The ARN of the Project or ReportGroup resource that is associated with a + // resource policy. + ResourceArn *string `locationName:"resourceArn" min:"1" type:"string"` +} - // Along with path and namespaceType, the pattern that AWS CodeBuild uses to - // name and store the output artifact: - // - // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value - // if specified. This is because AWS CodePipeline manages its build output - // names instead of AWS CodeBuild. - // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, - // because no build output is produced. - // - // * If type is set to S3, this is the name of the output artifact object. - // If you set the name to be a forward slash ("/"), the artifact is stored - // in the root of the output bucket. - // - // For example: - // - // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and - // name is set to MyArtifact.zip, then the output artifact is stored in MyArtifacts/build-ID/MyArtifact.zip. - // - // * If path is empty, namespaceType is set to NONE, and name is set to "/", - // the output artifact is stored in the root of the output bucket. - // - // * If path is set to MyArtifacts, namespaceType is set to BUILD_ID, and - // name is set to "/", the output artifact is stored in MyArtifacts/build-ID . - Name *string `locationName:"name" type:"string"` +// String returns the string representation +func (s PutResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} - // Along with path and name, the pattern that AWS CodeBuild uses to determine - // the name and location to store the output artifact: - // - // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value - // if specified. This is because AWS CodePipeline manages its build output - // names instead of AWS CodeBuild. - // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, - // because no build output is produced. - // - // * If type is set to S3, valid values include: BUILD_ID: Include the build - // ID in the location of the build output artifact. NONE: Do not include - // the build ID. This is the default if namespaceType is not specified. - // - // For example, if path is set to MyArtifacts, namespaceType is set to BUILD_ID, - // and name is set to MyArtifact.zip, the output artifact is stored in MyArtifacts/build-ID/MyArtifact.zip. - NamespaceType *string `locationName:"namespaceType" type:"string" enum:"ArtifactNamespace"` +// GoString returns the string representation +func (s PutResourcePolicyOutput) GoString() string { + return s.String() +} - // If this flag is set, a name specified in the buildspec file overrides the - // artifact name. The name specified in a buildspec file is calculated at build - // time and uses the Shell Command Language. For example, you can append a date - // and time to your artifact name so that it is always unique. - OverrideArtifactName *bool `locationName:"overrideArtifactName" type:"boolean"` +// SetResourceArn sets the ResourceArn field's value. +func (s *PutResourcePolicyOutput) SetResourceArn(v string) *PutResourcePolicyOutput { + s.ResourceArn = &v + return s +} - // The type of build output artifact to create: - // - // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value - // if specified. This is because AWS CodePipeline manages its build output - // artifacts instead of AWS CodeBuild. - // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, - // because no build output is produced. - // - // * If type is set to S3, valid values include: NONE: AWS CodeBuild creates - // in the output bucket a folder that contains the build output. This is - // the default if packaging is not specified. ZIP: AWS CodeBuild creates - // in the output bucket a ZIP file that contains the build output. - Packaging *string `locationName:"packaging" type:"string" enum:"ArtifactPackaging"` +// Information about credentials that provide access to a private Docker registry. +// When this is set: +// +// * imagePullCredentialsType must be set to SERVICE_ROLE. +// +// * images cannot be curated or an Amazon ECR image. +// +// For more information, see Private Registry with AWS Secrets Manager Sample +// for AWS CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-private-registry.html). +type RegistryCredential struct { + _ struct{} `type:"structure"` - // Along with namespaceType and name, the pattern that AWS CodeBuild uses to - // name and store the output artifact: - // - // * If type is set to CODEPIPELINE, AWS CodePipeline ignores this value - // if specified. This is because AWS CodePipeline manages its build output - // names instead of AWS CodeBuild. - // - // * If type is set to NO_ARTIFACTS, this value is ignored if specified, - // because no build output is produced. + // The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets + // Manager. // - // * If type is set to S3, this is the path to the output artifact. If path - // is not specified, path is not used. + // The credential can use the name of the credentials only if they exist in + // your current AWS Region. // - // For example, if path is set to MyArtifacts, namespaceType is set to NONE, - // and name is set to MyArtifact.zip, the output artifact is stored in the output - // bucket at MyArtifacts/MyArtifact.zip. - Path *string `locationName:"path" type:"string"` + // Credential is a required field + Credential *string `locationName:"credential" min:"1" type:"string" required:"true"` - // The type of build output artifact. Valid values include: - // - // * CODEPIPELINE: The build project has build output generated through AWS - // CodePipeline. The CODEPIPELINE type is not supported for secondaryArtifacts. - // - // * NO_ARTIFACTS: The build project does not produce any build output. - // - // * S3: The build project stores build output in Amazon Simple Storage Service - // (Amazon S3). + // The service that created the credentials to access a private Docker registry. + // The valid value, SECRETS_MANAGER, is for AWS Secrets Manager. // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"ArtifactsType"` + // CredentialProvider is a required field + CredentialProvider *string `locationName:"credentialProvider" type:"string" required:"true" enum:"CredentialProviderType"` } // String returns the string representation -func (s ProjectArtifacts) String() string { +func (s RegistryCredential) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectArtifacts) GoString() string { +func (s RegistryCredential) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ProjectArtifacts) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProjectArtifacts"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) +func (s *RegistryCredential) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegistryCredential"} + if s.Credential == nil { + invalidParams.Add(request.NewErrParamRequired("Credential")) + } + if s.Credential != nil && len(*s.Credential) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Credential", 1)) + } + if s.CredentialProvider == nil { + invalidParams.Add(request.NewErrParamRequired("CredentialProvider")) } if invalidParams.Len() > 0 { @@ -7089,349 +11055,188 @@ func (s *ProjectArtifacts) Validate() error { return nil } -// SetArtifactIdentifier sets the ArtifactIdentifier field's value. -func (s *ProjectArtifacts) SetArtifactIdentifier(v string) *ProjectArtifacts { - s.ArtifactIdentifier = &v +// SetCredential sets the Credential field's value. +func (s *RegistryCredential) SetCredential(v string) *RegistryCredential { + s.Credential = &v return s } -// SetEncryptionDisabled sets the EncryptionDisabled field's value. -func (s *ProjectArtifacts) SetEncryptionDisabled(v bool) *ProjectArtifacts { - s.EncryptionDisabled = &v +// SetCredentialProvider sets the CredentialProvider field's value. +func (s *RegistryCredential) SetCredentialProvider(v string) *RegistryCredential { + s.CredentialProvider = &v return s } -// SetLocation sets the Location field's value. -func (s *ProjectArtifacts) SetLocation(v string) *ProjectArtifacts { - s.Location = &v - return s -} +// Information about the results from running a series of test cases during +// the run of a build project. The test cases are specified in the buildspec +// for the build project using one or more paths to the test case files. You +// can specify any type of tests you want, such as unit tests, integration tests, +// and functional tests. +type Report struct { + _ struct{} `type:"structure"` -// SetName sets the Name field's value. -func (s *ProjectArtifacts) SetName(v string) *ProjectArtifacts { - s.Name = &v - return s -} + // The ARN of the report run. + Arn *string `locationName:"arn" min:"1" type:"string"` -// SetNamespaceType sets the NamespaceType field's value. -func (s *ProjectArtifacts) SetNamespaceType(v string) *ProjectArtifacts { - s.NamespaceType = &v - return s -} + // A CodeCoverageReportSummary object that contains a code coverage summary + // for this report. + CodeCoverageSummary *CodeCoverageReportSummary `locationName:"codeCoverageSummary" type:"structure"` -// SetOverrideArtifactName sets the OverrideArtifactName field's value. -func (s *ProjectArtifacts) SetOverrideArtifactName(v bool) *ProjectArtifacts { - s.OverrideArtifactName = &v - return s -} + // The date and time this report run occurred. + Created *time.Time `locationName:"created" type:"timestamp"` -// SetPackaging sets the Packaging field's value. -func (s *ProjectArtifacts) SetPackaging(v string) *ProjectArtifacts { - s.Packaging = &v - return s -} + // The ARN of the build run that generated this report. + ExecutionId *string `locationName:"executionId" type:"string"` -// SetPath sets the Path field's value. -func (s *ProjectArtifacts) SetPath(v string) *ProjectArtifacts { - s.Path = &v - return s -} + // The date and time a report expires. A report expires 30 days after it is + // created. An expired report is not available to view in CodeBuild. + Expired *time.Time `locationName:"expired" type:"timestamp"` -// SetType sets the Type field's value. -func (s *ProjectArtifacts) SetType(v string) *ProjectArtifacts { - s.Type = &v - return s -} + // Information about where the raw data used to generate this report was exported. + ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure"` -// Information about the build badge for the build project. -type ProjectBadge struct { - _ struct{} `type:"structure"` + // The name of the report that was run. + Name *string `locationName:"name" type:"string"` - // Set this to true to generate a publicly accessible URL for your project's - // build badge. - BadgeEnabled *bool `locationName:"badgeEnabled" type:"boolean"` + // The ARN of the report group associated with this report. + ReportGroupArn *string `locationName:"reportGroupArn" min:"1" type:"string"` - // The publicly-accessible URL through which you can access the build badge - // for your project. + // The status of this report. + Status *string `locationName:"status" type:"string" enum:"ReportStatusType"` + + // A TestReportSummary object that contains information about this test report. + TestSummary *TestReportSummary `locationName:"testSummary" type:"structure"` + + // A boolean that specifies if this report run is truncated. The list of test + // cases is truncated after the maximum number of test cases is reached. + Truncated *bool `locationName:"truncated" type:"boolean"` + + // The type of the report that was run. // - // The publicly accessible URL through which you can access the build badge - // for your project. - BadgeRequestUrl *string `locationName:"badgeRequestUrl" type:"string"` + // CODE_COVERAGE + // + // A code coverage report. + // + // TEST + // + // A test report. + Type *string `locationName:"type" type:"string" enum:"ReportType"` } // String returns the string representation -func (s ProjectBadge) String() string { +func (s Report) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectBadge) GoString() string { +func (s Report) GoString() string { return s.String() } -// SetBadgeEnabled sets the BadgeEnabled field's value. -func (s *ProjectBadge) SetBadgeEnabled(v bool) *ProjectBadge { - s.BadgeEnabled = &v +// SetArn sets the Arn field's value. +func (s *Report) SetArn(v string) *Report { + s.Arn = &v return s } -// SetBadgeRequestUrl sets the BadgeRequestUrl field's value. -func (s *ProjectBadge) SetBadgeRequestUrl(v string) *ProjectBadge { - s.BadgeRequestUrl = &v +// SetCodeCoverageSummary sets the CodeCoverageSummary field's value. +func (s *Report) SetCodeCoverageSummary(v *CodeCoverageReportSummary) *Report { + s.CodeCoverageSummary = v return s } -// Information about the cache for the build project. -type ProjectCache struct { - _ struct{} `type:"structure"` - - // Information about the cache location: - // - // * NO_CACHE or LOCAL: This value is ignored. - // - // * S3: This is the S3 bucket name/prefix. - Location *string `locationName:"location" type:"string"` +// SetCreated sets the Created field's value. +func (s *Report) SetCreated(v time.Time) *Report { + s.Created = &v + return s +} - // If you use a LOCAL cache, the local cache mode. You can use one or more local - // cache modes at the same time. - // - // * LOCAL_SOURCE_CACHE mode caches Git metadata for primary and secondary - // sources. After the cache is created, subsequent builds pull only the change - // between commits. This mode is a good choice for projects with a clean - // working directory and a source that is a large Git repository. If you - // choose this option and your project does not use a Git repository (GitHub, - // GitHub Enterprise, or Bitbucket), the option is ignored. - // - // * LOCAL_DOCKER_LAYER_CACHE mode caches existing Docker layers. This mode - // is a good choice for projects that build or pull large Docker images. - // It can prevent the performance issues caused by pulling large Docker images - // down from the network. You can use a Docker layer cache in the Linux environment - // only. The privileged flag must be set so that your project has the required - // Docker permissions. You should consider the security implications before - // you use a Docker layer cache. - // - // * LOCAL_CUSTOM_CACHE mode caches directories you specify in the buildspec - // file. This mode is a good choice if your build scenario is not suited - // to one of the other three local cache modes. If you use a custom cache: - // Only directories can be specified for caching. You cannot specify individual - // files. Symlinks are used to reference cached directories. Cached directories - // are linked to your build before it downloads its project sources. Cached - // items are overridden if a source item has the same name. Directories are - // specified using cache paths in the buildspec file. - Modes []*string `locationName:"modes" type:"list"` +// SetExecutionId sets the ExecutionId field's value. +func (s *Report) SetExecutionId(v string) *Report { + s.ExecutionId = &v + return s +} - // The type of cache used by the build project. Valid values include: - // - // * NO_CACHE: The build project does not use any cache. - // - // * S3: The build project reads and writes from and to S3. - // - // * LOCAL: The build project stores a cache locally on a build host that - // is only available to that build host. - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"CacheType"` +// SetExpired sets the Expired field's value. +func (s *Report) SetExpired(v time.Time) *Report { + s.Expired = &v + return s } -// String returns the string representation -func (s ProjectCache) String() string { - return awsutil.Prettify(s) +// SetExportConfig sets the ExportConfig field's value. +func (s *Report) SetExportConfig(v *ReportExportConfig) *Report { + s.ExportConfig = v + return s } -// GoString returns the string representation -func (s ProjectCache) GoString() string { - return s.String() +// SetName sets the Name field's value. +func (s *Report) SetName(v string) *Report { + s.Name = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ProjectCache) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProjectCache"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } +// SetReportGroupArn sets the ReportGroupArn field's value. +func (s *Report) SetReportGroupArn(v string) *Report { + s.ReportGroupArn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetStatus sets the Status field's value. +func (s *Report) SetStatus(v string) *Report { + s.Status = &v + return s } -// SetLocation sets the Location field's value. -func (s *ProjectCache) SetLocation(v string) *ProjectCache { - s.Location = &v +// SetTestSummary sets the TestSummary field's value. +func (s *Report) SetTestSummary(v *TestReportSummary) *Report { + s.TestSummary = v return s } -// SetModes sets the Modes field's value. -func (s *ProjectCache) SetModes(v []*string) *ProjectCache { - s.Modes = v +// SetTruncated sets the Truncated field's value. +func (s *Report) SetTruncated(v bool) *Report { + s.Truncated = &v return s } // SetType sets the Type field's value. -func (s *ProjectCache) SetType(v string) *ProjectCache { +func (s *Report) SetType(v string) *Report { s.Type = &v return s } -// Information about the build environment of the build project. -type ProjectEnvironment struct { +// Information about the location where the run of a report is exported. +type ReportExportConfig struct { _ struct{} `type:"structure"` - // The certificate to use with this build project. - Certificate *string `locationName:"certificate" type:"string"` - - // Information about the compute resources the build project uses. Available - // values include: - // - // * BUILD_GENERAL1_SMALL: Use up to 3 GB memory and 2 vCPUs for builds. - // - // * BUILD_GENERAL1_MEDIUM: Use up to 7 GB memory and 4 vCPUs for builds. - // - // * BUILD_GENERAL1_LARGE: Use up to 16 GB memory and 8 vCPUs for builds, - // depending on your environment type. - // - // * BUILD_GENERAL1_2XLARGE: Use up to 145 GB memory, 72 vCPUs, and 824 GB - // of SSD storage for builds. This compute type supports Docker images up - // to 100 GB uncompressed. - // - // If you use BUILD_GENERAL1_LARGE: - // - // * For environment type LINUX_CONTAINER, you can use up to 15 GB memory - // and 8 vCPUs for builds. - // - // * For environment type LINUX_GPU_CONTAINER, you can use up to 255 GB memory, - // 32 vCPUs, and 4 NVIDIA Tesla V100 GPUs for builds. - // - // * For environment type ARM_CONTAINER, you can use up to 16 GB memory and - // 8 vCPUs on ARM-based processors for builds. - // - // For more information, see Build Environment Compute Types (https://docs.aws.amazon.com/codebuild/latest/userguide/build-env-ref-compute-types.html) - // in the AWS CodeBuild User Guide. - // - // ComputeType is a required field - ComputeType *string `locationName:"computeType" type:"string" required:"true" enum:"ComputeType"` - - // A set of environment variables to make available to builds for this build - // project. - EnvironmentVariables []*EnvironmentVariable `locationName:"environmentVariables" type:"list"` - - // The image tag or image digest that identifies the Docker image to use for - // this build project. Use the following formats: - // - // * For an image tag: registry/repository:tag. For example, to specify an - // image with the tag "latest," use registry/repository:latest. - // - // * For an image digest: registry/repository@digest. For example, to specify - // an image with the digest "sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf," - // use registry/repository@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf. - // - // Image is a required field - Image *string `locationName:"image" min:"1" type:"string" required:"true"` - - // The type of credentials AWS CodeBuild uses to pull images in your build. - // There are two valid values: - // - // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This - // requires that you modify your ECR repository policy to trust AWS CodeBuild's - // service principal. - // - // * SERVICE_ROLE specifies that AWS CodeBuild uses your build project's - // service role. - // - // When you use a cross-account or private registry image, you must use SERVICE_ROLE - // credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD - // credentials. - ImagePullCredentialsType *string `locationName:"imagePullCredentialsType" type:"string" enum:"ImagePullCredentialsType"` - - // Enables running the Docker daemon inside a Docker container. Set to true - // only if the build project is used to build Docker images. Otherwise, a build - // that attempts to interact with the Docker daemon fails. The default setting - // is false. - // - // You can initialize the Docker daemon during the install phase of your build - // by adding one of the following sets of commands to the install phase of your - // buildspec file: - // - // If the operating system's base image is Ubuntu Linux: - // - // - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 - // --storage-driver=overlay& - // - // - timeout 15 sh -c "until docker info; do echo .; sleep 1; done" - // - // If the operating system's base image is Alpine Linux and the previous command - // does not work, add the -t argument to timeout: + // The export configuration type. Valid values are: // - // - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 - // --storage-driver=overlay& + // * S3: The report results are exported to an S3 bucket. // - // - timeout -t 15 sh -c "until docker info; do echo .; sleep 1; done" - PrivilegedMode *bool `locationName:"privilegedMode" type:"boolean"` - - // The credentials for access to a private registry. - RegistryCredential *RegistryCredential `locationName:"registryCredential" type:"structure"` + // * NO_EXPORT: The report results are not exported. + ExportConfigType *string `locationName:"exportConfigType" type:"string" enum:"ReportExportConfigType"` - // The type of build environment to use for related builds. - // - // * The environment type ARM_CONTAINER is available only in regions US East - // (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), Asia Pacific - // (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and EU (Frankfurt). - // - // * The environment type LINUX_CONTAINER with compute type build.general1.2xlarge - // is available only in regions US East (N. Virginia), US East (N. Virginia), - // US West (Oregon), Canada (Central), EU (Ireland), EU (London), EU (Frankfurt), - // Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), - // Asia Pacific (Sydney), China (Beijing), and China (Ningxia). - // - // * The environment type LINUX_GPU_CONTAINER is available only in regions - // US East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada - // (Central), EU (Ireland), EU (London), EU (Frankfurt), Asia Pacific (Tokyo), - // Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) - // , China (Beijing), and China (Ningxia). - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"EnvironmentType"` + // A S3ReportExportConfig object that contains information about the S3 bucket + // where the run of a report is exported. + S3Destination *S3ReportExportConfig `locationName:"s3Destination" type:"structure"` } // String returns the string representation -func (s ProjectEnvironment) String() string { +func (s ReportExportConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectEnvironment) GoString() string { +func (s ReportExportConfig) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ProjectEnvironment) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProjectEnvironment"} - if s.ComputeType == nil { - invalidParams.Add(request.NewErrParamRequired("ComputeType")) - } - if s.Image == nil { - invalidParams.Add(request.NewErrParamRequired("Image")) - } - if s.Image != nil && len(*s.Image) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Image", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.EnvironmentVariables != nil { - for i, v := range s.EnvironmentVariables { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EnvironmentVariables", i), err.(request.ErrInvalidParams)) - } - } - } - if s.RegistryCredential != nil { - if err := s.RegistryCredential.Validate(); err != nil { - invalidParams.AddNested("RegistryCredential", err.(request.ErrInvalidParams)) +func (s *ReportExportConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReportExportConfig"} + if s.S3Destination != nil { + if err := s.S3Destination.Validate(); err != nil { + invalidParams.AddNested("S3Destination", err.(request.ErrInvalidParams)) } } @@ -7441,444 +11246,317 @@ func (s *ProjectEnvironment) Validate() error { return nil } -// SetCertificate sets the Certificate field's value. -func (s *ProjectEnvironment) SetCertificate(v string) *ProjectEnvironment { - s.Certificate = &v - return s +// SetExportConfigType sets the ExportConfigType field's value. +func (s *ReportExportConfig) SetExportConfigType(v string) *ReportExportConfig { + s.ExportConfigType = &v + return s +} + +// SetS3Destination sets the S3Destination field's value. +func (s *ReportExportConfig) SetS3Destination(v *S3ReportExportConfig) *ReportExportConfig { + s.S3Destination = v + return s +} + +// A filter used to return reports with the status specified by the input status +// parameter. +type ReportFilter struct { + _ struct{} `type:"structure"` + + // The status used to filter reports. You can filter using one status only. + Status *string `locationName:"status" type:"string" enum:"ReportStatusType"` +} + +// String returns the string representation +func (s ReportFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportFilter) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ReportFilter) SetStatus(v string) *ReportFilter { + s.Status = &v + return s +} + +// A series of reports. Each report contains information about the results from +// running a series of test cases. You specify the test cases for a report group +// in the buildspec for a build project using one or more paths to the test +// case files. +type ReportGroup struct { + _ struct{} `type:"structure"` + + // The ARN of a ReportGroup. + Arn *string `locationName:"arn" min:"1" type:"string"` + + // The date and time this ReportGroup was created. + Created *time.Time `locationName:"created" type:"timestamp"` + + // Information about the destination where the raw data of this ReportGroup + // is exported. + ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure"` + + // The date and time this ReportGroup was last modified. + LastModified *time.Time `locationName:"lastModified" type:"timestamp"` + + // The name of a ReportGroup. + Name *string `locationName:"name" min:"2" type:"string"` + + // A list of tag key and value pairs associated with this report group. + // + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. + Tags []*Tag `locationName:"tags" type:"list"` + + // The type of the ReportGroup. The one valid value is TEST. + Type *string `locationName:"type" type:"string" enum:"ReportType"` +} + +// String returns the string representation +func (s ReportGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReportGroup) GoString() string { + return s.String() } -// SetComputeType sets the ComputeType field's value. -func (s *ProjectEnvironment) SetComputeType(v string) *ProjectEnvironment { - s.ComputeType = &v +// SetArn sets the Arn field's value. +func (s *ReportGroup) SetArn(v string) *ReportGroup { + s.Arn = &v return s } -// SetEnvironmentVariables sets the EnvironmentVariables field's value. -func (s *ProjectEnvironment) SetEnvironmentVariables(v []*EnvironmentVariable) *ProjectEnvironment { - s.EnvironmentVariables = v +// SetCreated sets the Created field's value. +func (s *ReportGroup) SetCreated(v time.Time) *ReportGroup { + s.Created = &v return s } -// SetImage sets the Image field's value. -func (s *ProjectEnvironment) SetImage(v string) *ProjectEnvironment { - s.Image = &v +// SetExportConfig sets the ExportConfig field's value. +func (s *ReportGroup) SetExportConfig(v *ReportExportConfig) *ReportGroup { + s.ExportConfig = v return s } -// SetImagePullCredentialsType sets the ImagePullCredentialsType field's value. -func (s *ProjectEnvironment) SetImagePullCredentialsType(v string) *ProjectEnvironment { - s.ImagePullCredentialsType = &v +// SetLastModified sets the LastModified field's value. +func (s *ReportGroup) SetLastModified(v time.Time) *ReportGroup { + s.LastModified = &v return s } -// SetPrivilegedMode sets the PrivilegedMode field's value. -func (s *ProjectEnvironment) SetPrivilegedMode(v bool) *ProjectEnvironment { - s.PrivilegedMode = &v +// SetName sets the Name field's value. +func (s *ReportGroup) SetName(v string) *ReportGroup { + s.Name = &v return s } -// SetRegistryCredential sets the RegistryCredential field's value. -func (s *ProjectEnvironment) SetRegistryCredential(v *RegistryCredential) *ProjectEnvironment { - s.RegistryCredential = v +// SetTags sets the Tags field's value. +func (s *ReportGroup) SetTags(v []*Tag) *ReportGroup { + s.Tags = v return s } // SetType sets the Type field's value. -func (s *ProjectEnvironment) SetType(v string) *ProjectEnvironment { +func (s *ReportGroup) SetType(v string) *ReportGroup { s.Type = &v return s } -// Information about a file system created by Amazon Elastic File System (EFS). -// For more information, see What Is Amazon Elastic File System? (https://docs.aws.amazon.com/efs/latest/ug/whatisefs.html) -type ProjectFileSystemLocation struct { +// Represents a resolved build artifact. A resolve artifact is an artifact that +// is built and deployed to the destination, such as Amazon Simple Storage Service +// (Amazon S3). +type ResolvedArtifact struct { _ struct{} `type:"structure"` - // The name used to access a file system created by Amazon EFS. CodeBuild creates - // an environment variable by appending the identifier in all capital letters - // to CODEBUILD_. For example, if you specify my-efs for identifier, a new environment - // variable is create named CODEBUILD_MY-EFS. - // - // The identifier is used to mount your file system. + // The identifier of the artifact. Identifier *string `locationName:"identifier" type:"string"` - // A string that specifies the location of the file system created by Amazon - // EFS. Its format is efs-dns-name:/directory-path. You can find the DNS name - // of file system when you view it in the AWS EFS console. The directory path - // is a path to a directory in the file system that CodeBuild mounts. For example, - // if the DNS name of a file system is fs-abcd1234.efs.us-west-2.amazonaws.com, - // and its mount directory is my-efs-mount-directory, then the location is fs-abcd1234.efs.us-west-2.amazonaws.com:/my-efs-mount-directory. - // - // The directory path in the format efs-dns-name:/directory-path is optional. - // If you do not specify a directory path, the location is only the DNS name - // and CodeBuild mounts the entire file system. + // The location of the artifact. Location *string `locationName:"location" type:"string"` - // The mount options for a file system created by AWS EFS. The default mount - // options used by CodeBuild are nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2. - // For more information, see Recommended NFS Mount Options (https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-nfs-mount-settings.html). - MountOptions *string `locationName:"mountOptions" type:"string"` - - // The location in the container where you mount the file system. - MountPoint *string `locationName:"mountPoint" type:"string"` - - // The type of the file system. The one supported type is EFS. - Type *string `locationName:"type" type:"string" enum:"FileSystemType"` + // Specifies the type of artifact. + Type *string `locationName:"type" type:"string" enum:"ArtifactsType"` } // String returns the string representation -func (s ProjectFileSystemLocation) String() string { +func (s ResolvedArtifact) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectFileSystemLocation) GoString() string { +func (s ResolvedArtifact) GoString() string { return s.String() } // SetIdentifier sets the Identifier field's value. -func (s *ProjectFileSystemLocation) SetIdentifier(v string) *ProjectFileSystemLocation { +func (s *ResolvedArtifact) SetIdentifier(v string) *ResolvedArtifact { s.Identifier = &v return s } // SetLocation sets the Location field's value. -func (s *ProjectFileSystemLocation) SetLocation(v string) *ProjectFileSystemLocation { +func (s *ResolvedArtifact) SetLocation(v string) *ResolvedArtifact { s.Location = &v return s } -// SetMountOptions sets the MountOptions field's value. -func (s *ProjectFileSystemLocation) SetMountOptions(v string) *ProjectFileSystemLocation { - s.MountOptions = &v - return s -} - -// SetMountPoint sets the MountPoint field's value. -func (s *ProjectFileSystemLocation) SetMountPoint(v string) *ProjectFileSystemLocation { - s.MountPoint = &v - return s -} - // SetType sets the Type field's value. -func (s *ProjectFileSystemLocation) SetType(v string) *ProjectFileSystemLocation { +func (s *ResolvedArtifact) SetType(v string) *ResolvedArtifact { s.Type = &v return s } -// Information about the build input source code for the build project. -type ProjectSource struct { - _ struct{} `type:"structure"` - - // Information about the authorization settings for AWS CodeBuild to access - // the source code to be built. - // - // This information is for the AWS CodeBuild console's use only. Your code should - // not get or set this information directly. - Auth *SourceAuth `locationName:"auth" type:"structure"` - - // The buildspec file declaration to use for the builds in this build project. - // - // If this value is set, it can be either an inline buildspec definition, the - // path to an alternate buildspec file relative to the value of the built-in - // CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The - // bucket must be in the same AWS Region as the build project. Specify the buildspec - // file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). - // If this value is not provided or is set to an empty string, the source code - // must contain a buildspec file in its root directory. For more information, - // see Buildspec File Name and Storage Location (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-name-storage). - Buildspec *string `locationName:"buildspec" type:"string"` - - // Information about the Git clone depth for the build project. - GitCloneDepth *int64 `locationName:"gitCloneDepth" type:"integer"` - - // Information about the Git submodules configuration for the build project. - GitSubmodulesConfig *GitSubmodulesConfig `locationName:"gitSubmodulesConfig" type:"structure"` - - // Enable this flag to ignore SSL warnings while connecting to the project source - // code. - InsecureSsl *bool `locationName:"insecureSsl" type:"boolean"` - - // Information about the location of the source code to be built. Valid values - // include: - // - // * For source code settings that are specified in the source action of - // a pipeline in AWS CodePipeline, location should not be specified. If it - // is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline - // uses the settings in a pipeline's source action instead of this value. - // - // * For source code in an AWS CodeCommit repository, the HTTPS clone URL - // to the repository that contains the source code and the buildspec file - // (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name - // ). - // - // * For source code in an Amazon Simple Storage Service (Amazon S3) input - // bucket, one of the following. The path to the ZIP file that contains the - // source code (for example, bucket-name/path/to/object-name.zip). The path - // to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/). - // - // * For source code in a GitHub repository, the HTTPS clone URL to the repository - // that contains the source and the buildspec file. You must connect your - // AWS account to your GitHub account. Use the AWS CodeBuild console to start - // creating a build project. When you use the console to connect (or reconnect) - // with GitHub, on the GitHub Authorize application page, for Organization - // access, choose Request access next to each repository you want to allow - // AWS CodeBuild to have access to, and then choose Authorize application. - // (After you have connected to your GitHub account, you do not need to finish - // creating the build project. You can leave the AWS CodeBuild console.) - // To instruct AWS CodeBuild to use this connection, in the source object, - // set the auth object's type value to OAUTH. - // - // * For source code in a Bitbucket repository, the HTTPS clone URL to the - // repository that contains the source and the buildspec file. You must connect - // your AWS account to your Bitbucket account. Use the AWS CodeBuild console - // to start creating a build project. When you use the console to connect - // (or reconnect) with Bitbucket, on the Bitbucket Confirm access to your - // account page, choose Grant access. (After you have connected to your Bitbucket - // account, you do not need to finish creating the build project. You can - // leave the AWS CodeBuild console.) To instruct AWS CodeBuild to use this - // connection, in the source object, set the auth object's type value to - // OAUTH. - Location *string `locationName:"location" type:"string"` - - // Set to true to report the status of a build's start and finish to your source - // provider. This option is valid only when your source provider is GitHub, - // GitHub Enterprise, or Bitbucket. If this is set and you use a different source - // provider, an invalidInputException is thrown. - // - // The status of a build triggered by a webhook is always reported to your source - // provider. - ReportBuildStatus *bool `locationName:"reportBuildStatus" type:"boolean"` - - // An identifier for this project source. - SourceIdentifier *string `locationName:"sourceIdentifier" type:"string"` +// The specified AWS resource cannot be created, because an AWS resource with +// the same settings already exists. +type ResourceAlreadyExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The type of repository that contains the source code to be built. Valid values - // include: - // - // * BITBUCKET: The source code is in a Bitbucket repository. - // - // * CODECOMMIT: The source code is in an AWS CodeCommit repository. - // - // * CODEPIPELINE: The source code settings are specified in the source action - // of a pipeline in AWS CodePipeline. - // - // * GITHUB: The source code is in a GitHub repository. - // - // * GITHUB_ENTERPRISE: The source code is in a GitHub Enterprise repository. - // - // * NO_SOURCE: The project does not have input source code. - // - // * S3: The source code is in an Amazon Simple Storage Service (Amazon S3) - // input bucket. - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"SourceType"` + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s ProjectSource) String() string { +func (s ResourceAlreadyExistsException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectSource) GoString() string { +func (s ResourceAlreadyExistsException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ProjectSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProjectSource"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.Auth != nil { - if err := s.Auth.Validate(); err != nil { - invalidParams.AddNested("Auth", err.(request.ErrInvalidParams)) - } - } - if s.GitSubmodulesConfig != nil { - if err := s.GitSubmodulesConfig.Validate(); err != nil { - invalidParams.AddNested("GitSubmodulesConfig", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams +func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { + return &ResourceAlreadyExistsException{ + RespMetadata: v, } - return nil -} - -// SetAuth sets the Auth field's value. -func (s *ProjectSource) SetAuth(v *SourceAuth) *ProjectSource { - s.Auth = v - return s -} - -// SetBuildspec sets the Buildspec field's value. -func (s *ProjectSource) SetBuildspec(v string) *ProjectSource { - s.Buildspec = &v - return s -} - -// SetGitCloneDepth sets the GitCloneDepth field's value. -func (s *ProjectSource) SetGitCloneDepth(v int64) *ProjectSource { - s.GitCloneDepth = &v - return s } -// SetGitSubmodulesConfig sets the GitSubmodulesConfig field's value. -func (s *ProjectSource) SetGitSubmodulesConfig(v *GitSubmodulesConfig) *ProjectSource { - s.GitSubmodulesConfig = v - return s +// Code returns the exception type name. +func (s *ResourceAlreadyExistsException) Code() string { + return "ResourceAlreadyExistsException" } -// SetInsecureSsl sets the InsecureSsl field's value. -func (s *ProjectSource) SetInsecureSsl(v bool) *ProjectSource { - s.InsecureSsl = &v - return s +// Message returns the exception's message. +func (s *ResourceAlreadyExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetLocation sets the Location field's value. -func (s *ProjectSource) SetLocation(v string) *ProjectSource { - s.Location = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceAlreadyExistsException) OrigErr() error { + return nil } -// SetReportBuildStatus sets the ReportBuildStatus field's value. -func (s *ProjectSource) SetReportBuildStatus(v bool) *ProjectSource { - s.ReportBuildStatus = &v - return s +func (s *ResourceAlreadyExistsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetSourceIdentifier sets the SourceIdentifier field's value. -func (s *ProjectSource) SetSourceIdentifier(v string) *ProjectSource { - s.SourceIdentifier = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetType sets the Type field's value. -func (s *ProjectSource) SetType(v string) *ProjectSource { - s.Type = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } -// A source identifier and its corresponding version. -type ProjectSourceVersion struct { - _ struct{} `type:"structure"` - - // An identifier for a source in the build project. - // - // SourceIdentifier is a required field - SourceIdentifier *string `locationName:"sourceIdentifier" type:"string" required:"true"` +// The specified AWS resource cannot be found. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The source version for the corresponding source identifier. If specified, - // must be one of: - // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. - // - // * For GitHub: the commit ID, pull request ID, branch name, or tag name - // that corresponds to the version of the source code you want to build. - // If a pull request ID is specified, it must use the format pr/pull-request-ID - // (for example, pr/25). If a branch name is specified, the branch's HEAD - // commit ID is used. If not specified, the default branch's HEAD commit - // ID is used. - // - // * For Bitbucket: the commit ID, branch name, or tag name that corresponds - // to the version of the source code you want to build. If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the - // default branch's HEAD commit ID is used. - // - // * For Amazon Simple Storage Service (Amazon S3): the version ID of the - // object that represents the build input ZIP file to use. - // - // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) - // in the AWS CodeBuild User Guide. - // - // SourceVersion is a required field - SourceVersion *string `locationName:"sourceVersion" type:"string" required:"true"` + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s ProjectSourceVersion) String() string { +func (s ResourceNotFoundException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectSourceVersion) GoString() string { +func (s ResourceNotFoundException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ProjectSourceVersion) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProjectSourceVersion"} - if s.SourceIdentifier == nil { - invalidParams.Add(request.NewErrParamRequired("SourceIdentifier")) - } - if s.SourceVersion == nil { - invalidParams.Add(request.NewErrParamRequired("SourceVersion")) +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { return nil } -// SetSourceIdentifier sets the SourceIdentifier field's value. -func (s *ProjectSourceVersion) SetSourceIdentifier(v string) *ProjectSourceVersion { - s.SourceIdentifier = &v - return s +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// SetSourceVersion sets the SourceVersion field's value. -func (s *ProjectSourceVersion) SetSourceVersion(v string) *ProjectSourceVersion { - s.SourceVersion = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } -type PutResourcePolicyInput struct { +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RetryBuildBatchInput struct { _ struct{} `type:"structure"` - // A JSON-formatted resource policy. For more information, see Sharing a Project - // (https://docs.aws.amazon.com/codebuild/latest/userguide/project-sharing.html#project-sharing-share) - // and Sharing a Report Group (https://docs.aws.amazon.com/codebuild/latest/userguide/report-groups-sharing.html#report-groups-sharing-share) - // in the AWS CodeBuild User Guide. - // - // Policy is a required field - Policy *string `locationName:"policy" min:"1" type:"string" required:"true"` + // Specifies the identifier of the batch build to restart. + Id *string `locationName:"id" min:"1" type:"string"` - // The ARN of the Project or ReportGroup resource you want to associate with - // a resource policy. - // - // ResourceArn is a required field - ResourceArn *string `locationName:"resourceArn" min:"1" type:"string" required:"true"` + // A unique, case sensitive identifier you provide to ensure the idempotency + // of the RetryBuildBatch request. The token is included in the RetryBuildBatch + // request and is valid for five minutes. If you repeat the RetryBuildBatch + // request with the same token, but change a parameter, AWS CodeBuild returns + // a parameter mismatch error. + IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` + + // Specifies the type of retry to perform. + RetryType *string `locationName:"retryType" type:"string" enum:"RetryBuildBatchType"` } // String returns the string representation -func (s PutResourcePolicyInput) String() string { +func (s RetryBuildBatchInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutResourcePolicyInput) GoString() string { +func (s RetryBuildBatchInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutResourcePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"} - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) - } - if s.Policy != nil && len(*s.Policy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) - } - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) - } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) +func (s *RetryBuildBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetryBuildBatchInput"} + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -7887,91 +11565,76 @@ func (s *PutResourcePolicyInput) Validate() error { return nil } -// SetPolicy sets the Policy field's value. -func (s *PutResourcePolicyInput) SetPolicy(v string) *PutResourcePolicyInput { - s.Policy = &v +// SetId sets the Id field's value. +func (s *RetryBuildBatchInput) SetId(v string) *RetryBuildBatchInput { + s.Id = &v return s } -// SetResourceArn sets the ResourceArn field's value. -func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput { - s.ResourceArn = &v +// SetIdempotencyToken sets the IdempotencyToken field's value. +func (s *RetryBuildBatchInput) SetIdempotencyToken(v string) *RetryBuildBatchInput { + s.IdempotencyToken = &v return s } -type PutResourcePolicyOutput struct { +// SetRetryType sets the RetryType field's value. +func (s *RetryBuildBatchInput) SetRetryType(v string) *RetryBuildBatchInput { + s.RetryType = &v + return s +} + +type RetryBuildBatchOutput struct { _ struct{} `type:"structure"` - // The ARN of the Project or ReportGroup resource that is associated with a - // resource policy. - ResourceArn *string `locationName:"resourceArn" min:"1" type:"string"` + // Contains information about a batch build. + BuildBatch *BuildBatch `locationName:"buildBatch" type:"structure"` } // String returns the string representation -func (s PutResourcePolicyOutput) String() string { +func (s RetryBuildBatchOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutResourcePolicyOutput) GoString() string { +func (s RetryBuildBatchOutput) GoString() string { return s.String() } -// SetResourceArn sets the ResourceArn field's value. -func (s *PutResourcePolicyOutput) SetResourceArn(v string) *PutResourcePolicyOutput { - s.ResourceArn = &v +// SetBuildBatch sets the BuildBatch field's value. +func (s *RetryBuildBatchOutput) SetBuildBatch(v *BuildBatch) *RetryBuildBatchOutput { + s.BuildBatch = v return s } -// Information about credentials that provide access to a private Docker registry. -// When this is set: -// -// * imagePullCredentialsType must be set to SERVICE_ROLE. -// -// * images cannot be curated or an Amazon ECR image. -// -// For more information, see Private Registry with AWS Secrets Manager Sample -// for AWS CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-private-registry.html). -type RegistryCredential struct { +type RetryBuildInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets - // Manager. - // - // The credential can use the name of the credentials only if they exist in - // your current AWS Region. - // - // Credential is a required field - Credential *string `locationName:"credential" min:"1" type:"string" required:"true"` + // Specifies the identifier of the build to restart. + Id *string `locationName:"id" min:"1" type:"string"` - // The service that created the credentials to access a private Docker registry. - // The valid value, SECRETS_MANAGER, is for AWS Secrets Manager. - // - // CredentialProvider is a required field - CredentialProvider *string `locationName:"credentialProvider" type:"string" required:"true" enum:"CredentialProviderType"` + // A unique, case sensitive identifier you provide to ensure the idempotency + // of the RetryBuild request. The token is included in the RetryBuild request + // and is valid for five minutes. If you repeat the RetryBuild request with + // the same token, but change a parameter, AWS CodeBuild returns a parameter + // mismatch error. + IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` } // String returns the string representation -func (s RegistryCredential) String() string { +func (s RetryBuildInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RegistryCredential) GoString() string { +func (s RetryBuildInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RegistryCredential) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegistryCredential"} - if s.Credential == nil { - invalidParams.Add(request.NewErrParamRequired("Credential")) - } - if s.Credential != nil && len(*s.Credential) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Credential", 1)) - } - if s.CredentialProvider == nil { - invalidParams.Add(request.NewErrParamRequired("CredentialProvider")) +func (s *RetryBuildInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RetryBuildInput"} + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } if invalidParams.Len() > 0 { @@ -7980,171 +11643,223 @@ func (s *RegistryCredential) Validate() error { return nil } -// SetCredential sets the Credential field's value. -func (s *RegistryCredential) SetCredential(v string) *RegistryCredential { - s.Credential = &v +// SetId sets the Id field's value. +func (s *RetryBuildInput) SetId(v string) *RetryBuildInput { + s.Id = &v return s } -// SetCredentialProvider sets the CredentialProvider field's value. -func (s *RegistryCredential) SetCredentialProvider(v string) *RegistryCredential { - s.CredentialProvider = &v +// SetIdempotencyToken sets the IdempotencyToken field's value. +func (s *RetryBuildInput) SetIdempotencyToken(v string) *RetryBuildInput { + s.IdempotencyToken = &v return s } -// Information about the results from running a series of test cases during -// the run of a build project. The test cases are specified in the buildspec -// for the build project using one or more paths to the test case files. You -// can specify any type of tests you want, such as unit tests, integration tests, -// and functional tests. -type Report struct { +type RetryBuildOutput struct { _ struct{} `type:"structure"` - // The ARN of the report run. - Arn *string `locationName:"arn" min:"1" type:"string"` - - // The date and time this report run occurred. - Created *time.Time `locationName:"created" type:"timestamp"` - - // The ARN of the build run that generated this report. - ExecutionId *string `locationName:"executionId" type:"string"` - - // The date and time a report expires. A report expires 30 days after it is - // created. An expired report is not available to view in CodeBuild. - Expired *time.Time `locationName:"expired" type:"timestamp"` - - // Information about where the raw data used to generate this report was exported. - ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure"` + // Information about a build. + Build *Build `locationName:"build" type:"structure"` +} - // The name of the report that was run. - Name *string `locationName:"name" type:"string"` +// String returns the string representation +func (s RetryBuildOutput) String() string { + return awsutil.Prettify(s) +} - // The ARN of the report group associated with this report. - ReportGroupArn *string `locationName:"reportGroupArn" min:"1" type:"string"` +// GoString returns the string representation +func (s RetryBuildOutput) GoString() string { + return s.String() +} - // The status of this report. - Status *string `locationName:"status" type:"string" enum:"ReportStatusType"` +// SetBuild sets the Build field's value. +func (s *RetryBuildOutput) SetBuild(v *Build) *RetryBuildOutput { + s.Build = v + return s +} - // A TestReportSummary object that contains information about this test report. - TestSummary *TestReportSummary `locationName:"testSummary" type:"structure"` +// Information about S3 logs for a build project. +type S3LogsConfig struct { + _ struct{} `type:"structure"` - // A boolean that specifies if this report run is truncated. The list of test - // cases is truncated after the maximum number of test cases is reached. - Truncated *bool `locationName:"truncated" type:"boolean"` + // Set to true if you do not want your S3 build log output encrypted. By default + // S3 build logs are encrypted. + EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` - // The type of the report that was run. - Type *string `locationName:"type" type:"string" enum:"ReportType"` + // The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 + // bucket name is my-bucket, and your path prefix is build-log, then acceptable + // formats are my-bucket/build-log or arn:aws:s3:::my-bucket/build-log. + Location *string `locationName:"location" type:"string"` + + // The current status of the S3 build logs. Valid values are: + // + // * ENABLED: S3 build logs are enabled for this build project. + // + // * DISABLED: S3 build logs are not enabled for this build project. + // + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"LogsConfigStatusType"` } // String returns the string representation -func (s Report) String() string { +func (s S3LogsConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Report) GoString() string { +func (s S3LogsConfig) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Report) SetArn(v string) *Report { - s.Arn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3LogsConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3LogsConfig"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCreated sets the Created field's value. -func (s *Report) SetCreated(v time.Time) *Report { - s.Created = &v +// SetEncryptionDisabled sets the EncryptionDisabled field's value. +func (s *S3LogsConfig) SetEncryptionDisabled(v bool) *S3LogsConfig { + s.EncryptionDisabled = &v return s } -// SetExecutionId sets the ExecutionId field's value. -func (s *Report) SetExecutionId(v string) *Report { - s.ExecutionId = &v +// SetLocation sets the Location field's value. +func (s *S3LogsConfig) SetLocation(v string) *S3LogsConfig { + s.Location = &v return s } -// SetExpired sets the Expired field's value. -func (s *Report) SetExpired(v time.Time) *Report { - s.Expired = &v +// SetStatus sets the Status field's value. +func (s *S3LogsConfig) SetStatus(v string) *S3LogsConfig { + s.Status = &v return s } -// SetExportConfig sets the ExportConfig field's value. -func (s *Report) SetExportConfig(v *ReportExportConfig) *Report { - s.ExportConfig = v - return s +// Information about the S3 bucket where the raw data of a report are exported. +type S3ReportExportConfig struct { + _ struct{} `type:"structure"` + + // The name of the S3 bucket where the raw data of a report are exported. + Bucket *string `locationName:"bucket" min:"1" type:"string"` + + // A boolean value that specifies if the results of a report are encrypted. + EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` + + // The encryption key for the report's encrypted raw data. + EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` + + // The type of build output artifact to create. Valid values include: + // + // * NONE: AWS CodeBuild creates the raw data in the output bucket. This + // is the default if packaging is not specified. + // + // * ZIP: AWS CodeBuild creates a ZIP file with the raw data in the output + // bucket. + Packaging *string `locationName:"packaging" type:"string" enum:"ReportPackagingType"` + + // The path to the exported report's raw data results. + Path *string `locationName:"path" type:"string"` } -// SetName sets the Name field's value. -func (s *Report) SetName(v string) *Report { - s.Name = &v - return s +// String returns the string representation +func (s S3ReportExportConfig) String() string { + return awsutil.Prettify(s) } -// SetReportGroupArn sets the ReportGroupArn field's value. -func (s *Report) SetReportGroupArn(v string) *Report { - s.ReportGroupArn = &v +// GoString returns the string representation +func (s S3ReportExportConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3ReportExportConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3ReportExportConfig"} + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.EncryptionKey != nil && len(*s.EncryptionKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncryptionKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *S3ReportExportConfig) SetBucket(v string) *S3ReportExportConfig { + s.Bucket = &v return s } -// SetStatus sets the Status field's value. -func (s *Report) SetStatus(v string) *Report { - s.Status = &v +// SetEncryptionDisabled sets the EncryptionDisabled field's value. +func (s *S3ReportExportConfig) SetEncryptionDisabled(v bool) *S3ReportExportConfig { + s.EncryptionDisabled = &v return s } -// SetTestSummary sets the TestSummary field's value. -func (s *Report) SetTestSummary(v *TestReportSummary) *Report { - s.TestSummary = v +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *S3ReportExportConfig) SetEncryptionKey(v string) *S3ReportExportConfig { + s.EncryptionKey = &v return s } -// SetTruncated sets the Truncated field's value. -func (s *Report) SetTruncated(v bool) *Report { - s.Truncated = &v +// SetPackaging sets the Packaging field's value. +func (s *S3ReportExportConfig) SetPackaging(v string) *S3ReportExportConfig { + s.Packaging = &v return s } -// SetType sets the Type field's value. -func (s *Report) SetType(v string) *Report { - s.Type = &v +// SetPath sets the Path field's value. +func (s *S3ReportExportConfig) SetPath(v string) *S3ReportExportConfig { + s.Path = &v return s } -// Information about the location where the run of a report is exported. -type ReportExportConfig struct { +// Information about the authorization settings for AWS CodeBuild to access +// the source code to be built. +// +// This information is for the AWS CodeBuild console's use only. Your code should +// not get or set this information directly. +type SourceAuth struct { _ struct{} `type:"structure"` - // The export configuration type. Valid values are: + // The resource value that applies to the specified authorization type. + Resource *string `locationName:"resource" type:"string"` + // - // * S3: The report results are exported to an S3 bucket. + // This data type is deprecated and is no longer accurate or used. // - // * NO_EXPORT: The report results are not exported. - ExportConfigType *string `locationName:"exportConfigType" type:"string" enum:"ReportExportConfigType"` - - // A S3ReportExportConfig object that contains information about the S3 bucket - // where the run of a report is exported. - S3Destination *S3ReportExportConfig `locationName:"s3Destination" type:"structure"` + // The authorization type to use. The only valid value is OAUTH, which represents + // the OAuth authorization type. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"SourceAuthType"` } // String returns the string representation -func (s ReportExportConfig) String() string { +func (s SourceAuth) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReportExportConfig) GoString() string { +func (s SourceAuth) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ReportExportConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReportExportConfig"} - if s.S3Destination != nil { - if err := s.S3Destination.Validate(); err != nil { - invalidParams.AddNested("S3Destination", err.(request.ErrInvalidParams)) - } +func (s *SourceAuth) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceAuth"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -8153,473 +11868,573 @@ func (s *ReportExportConfig) Validate() error { return nil } -// SetExportConfigType sets the ExportConfigType field's value. -func (s *ReportExportConfig) SetExportConfigType(v string) *ReportExportConfig { - s.ExportConfigType = &v +// SetResource sets the Resource field's value. +func (s *SourceAuth) SetResource(v string) *SourceAuth { + s.Resource = &v return s } -// SetS3Destination sets the S3Destination field's value. -func (s *ReportExportConfig) SetS3Destination(v *S3ReportExportConfig) *ReportExportConfig { - s.S3Destination = v +// SetType sets the Type field's value. +func (s *SourceAuth) SetType(v string) *SourceAuth { + s.Type = &v return s } -// A filter used to return reports with the status specified by the input status -// parameter. -type ReportFilter struct { +// Information about the credentials for a GitHub, GitHub Enterprise, or Bitbucket +// repository. +type SourceCredentialsInfo struct { _ struct{} `type:"structure"` - // The status used to filter reports. You can filter using one status only. - Status *string `locationName:"status" type:"string" enum:"ReportStatusType"` + // The Amazon Resource Name (ARN) of the token. + Arn *string `locationName:"arn" min:"1" type:"string"` + + // The type of authentication used by the credentials. Valid options are OAUTH, + // BASIC_AUTH, or PERSONAL_ACCESS_TOKEN. + AuthType *string `locationName:"authType" type:"string" enum:"AuthType"` + + // The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, + // or BITBUCKET. + ServerType *string `locationName:"serverType" type:"string" enum:"ServerType"` } // String returns the string representation -func (s ReportFilter) String() string { +func (s SourceCredentialsInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReportFilter) GoString() string { +func (s SourceCredentialsInfo) GoString() string { return s.String() } -// SetStatus sets the Status field's value. -func (s *ReportFilter) SetStatus(v string) *ReportFilter { - s.Status = &v +// SetArn sets the Arn field's value. +func (s *SourceCredentialsInfo) SetArn(v string) *SourceCredentialsInfo { + s.Arn = &v return s } -// A series of reports. Each report contains information about the results from -// running a series of test cases. You specify the test cases for a report group -// in the buildspec for a build project using one or more paths to the test -// case files. -type ReportGroup struct { +// SetAuthType sets the AuthType field's value. +func (s *SourceCredentialsInfo) SetAuthType(v string) *SourceCredentialsInfo { + s.AuthType = &v + return s +} + +// SetServerType sets the ServerType field's value. +func (s *SourceCredentialsInfo) SetServerType(v string) *SourceCredentialsInfo { + s.ServerType = &v + return s +} + +type StartBuildBatchInput struct { _ struct{} `type:"structure"` - // The ARN of a ReportGroup. - Arn *string `locationName:"arn" min:"1" type:"string"` + // An array of ProjectArtifacts objects that contains information about the + // build output artifact overrides for the build project. + ArtifactsOverride *ProjectArtifacts `locationName:"artifactsOverride" type:"structure"` - // The date and time this ReportGroup was created. - Created *time.Time `locationName:"created" type:"timestamp"` + // A BuildBatchConfigOverride object that contains batch build configuration + // overrides. + BuildBatchConfigOverride *ProjectBuildBatchConfig `locationName:"buildBatchConfigOverride" type:"structure"` - // Information about the destination where the raw data of this ReportGroup - // is exported. - ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure"` + // Overrides the build timeout specified in the batch build project. + BuildTimeoutInMinutesOverride *int64 `locationName:"buildTimeoutInMinutesOverride" min:"5" type:"integer"` + + // A buildspec file declaration that overrides, for this build only, the latest + // one already defined in the build project. + // + // If this value is set, it can be either an inline buildspec definition, the + // path to an alternate buildspec file relative to the value of the built-in + // CODEBUILD_SRC_DIR environment variable, or the path to an S3 bucket. The + // bucket must be in the same AWS Region as the build project. Specify the buildspec + // file using its ARN (for example, arn:aws:s3:::my-codebuild-sample2/buildspec.yml). + // If this value is not provided or is set to an empty string, the source code + // must contain a buildspec file in its root directory. For more information, + // see Buildspec File Name and Storage Location (https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-name-storage). + BuildspecOverride *string `locationName:"buildspecOverride" type:"string"` + + // A ProjectCache object that specifies cache overrides. + CacheOverride *ProjectCache `locationName:"cacheOverride" type:"structure"` - // The date and time this ReportGroup was last modified. - LastModified *time.Time `locationName:"lastModified" type:"timestamp"` + // The name of a certificate for this batch build that overrides the one specified + // in the batch build project. + CertificateOverride *string `locationName:"certificateOverride" type:"string"` - // The name of a ReportGroup. - Name *string `locationName:"name" min:"2" type:"string"` + // The name of a compute type for this batch build that overrides the one specified + // in the batch build project. + ComputeTypeOverride *string `locationName:"computeTypeOverride" type:"string" enum:"ComputeType"` - // The type of the ReportGroup. The one valid value is TEST. - Type *string `locationName:"type" type:"string" enum:"ReportType"` -} + // The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides + // the one specified in the batch build project. The CMK key encrypts the build + // output artifacts. + // + // You can use a cross-account KMS key to encrypt the build output artifacts + // if your service role has permission to that key. + // + // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, + // the CMK's alias (using the format alias/). + EncryptionKeyOverride *string `locationName:"encryptionKeyOverride" min:"1" type:"string"` -// String returns the string representation -func (s ReportGroup) String() string { - return awsutil.Prettify(s) -} + // A container type for this batch build that overrides the one specified in + // the batch build project. + EnvironmentTypeOverride *string `locationName:"environmentTypeOverride" type:"string" enum:"EnvironmentType"` -// GoString returns the string representation -func (s ReportGroup) GoString() string { - return s.String() -} + // An array of EnvironmentVariable objects that override, or add to, the environment + // variables defined in the batch build project. + EnvironmentVariablesOverride []*EnvironmentVariable `locationName:"environmentVariablesOverride" type:"list"` -// SetArn sets the Arn field's value. -func (s *ReportGroup) SetArn(v string) *ReportGroup { - s.Arn = &v - return s -} + // The user-defined depth of history, with a minimum value of 0, that overrides, + // for this batch build only, any previous depth of history defined in the batch + // build project. + GitCloneDepthOverride *int64 `locationName:"gitCloneDepthOverride" type:"integer"` -// SetCreated sets the Created field's value. -func (s *ReportGroup) SetCreated(v time.Time) *ReportGroup { - s.Created = &v - return s -} + // A GitSubmodulesConfig object that overrides the Git submodules configuration + // for this batch build. + GitSubmodulesConfigOverride *GitSubmodulesConfig `locationName:"gitSubmodulesConfigOverride" type:"structure"` -// SetExportConfig sets the ExportConfig field's value. -func (s *ReportGroup) SetExportConfig(v *ReportExportConfig) *ReportGroup { - s.ExportConfig = v - return s -} + // A unique, case sensitive identifier you provide to ensure the idempotency + // of the StartBuildBatch request. The token is included in the StartBuildBatch + // request and is valid for five minutes. If you repeat the StartBuildBatch + // request with the same token, but change a parameter, AWS CodeBuild returns + // a parameter mismatch error. + IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` -// SetLastModified sets the LastModified field's value. -func (s *ReportGroup) SetLastModified(v time.Time) *ReportGroup { - s.LastModified = &v - return s -} + // The name of an image for this batch build that overrides the one specified + // in the batch build project. + ImageOverride *string `locationName:"imageOverride" min:"1" type:"string"` -// SetName sets the Name field's value. -func (s *ReportGroup) SetName(v string) *ReportGroup { - s.Name = &v - return s -} + // The type of credentials AWS CodeBuild uses to pull images in your batch build. + // There are two valid values: + // + // CODEBUILD + // + // Specifies that AWS CodeBuild uses its own credentials. This requires that + // you modify your ECR repository policy to trust AWS CodeBuild's service principal. + // + // SERVICE_ROLE + // + // Specifies that AWS CodeBuild uses your build project's service role. + // + // When using a cross-account or private registry image, you must use SERVICE_ROLE + // credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD + // credentials. + ImagePullCredentialsTypeOverride *string `locationName:"imagePullCredentialsTypeOverride" type:"string" enum:"ImagePullCredentialsType"` -// SetType sets the Type field's value. -func (s *ReportGroup) SetType(v string) *ReportGroup { - s.Type = &v - return s -} + // Enable this flag to override the insecure SSL setting that is specified in + // the batch build project. The insecure SSL setting determines whether to ignore + // SSL warnings while connecting to the project source code. This override applies + // only if the build's source is GitHub Enterprise. + InsecureSslOverride *bool `locationName:"insecureSslOverride" type:"boolean"` -// The specified AWS resource cannot be created, because an AWS resource with -// the same settings already exists. -type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + // A LogsConfig object that override the log settings defined in the batch build + // project. + LogsConfigOverride *LogsConfig `locationName:"logsConfigOverride" type:"structure"` - Message_ *string `locationName:"message" type:"string"` -} + // Enable this flag to override privileged mode in the batch build project. + PrivilegedModeOverride *bool `locationName:"privilegedModeOverride" type:"boolean"` -// String returns the string representation -func (s ResourceAlreadyExistsException) String() string { - return awsutil.Prettify(s) -} + // The name of the project. + // + // ProjectName is a required field + ProjectName *string `locationName:"projectName" min:"1" type:"string" required:"true"` -// GoString returns the string representation -func (s ResourceAlreadyExistsException) GoString() string { - return s.String() -} + // The number of minutes a batch build is allowed to be queued before it times + // out. + QueuedTimeoutInMinutesOverride *int64 `locationName:"queuedTimeoutInMinutesOverride" min:"5" type:"integer"` -func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { - return &ResourceAlreadyExistsException{ - respMetadata: v, - } -} + // A RegistryCredential object that overrides credentials for access to a private + // registry. + RegistryCredentialOverride *RegistryCredential `locationName:"registryCredentialOverride" type:"structure"` -// Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { - return "ResourceAlreadyExistsException" -} + // Set to true to report to your source provider the status of a batch build's + // start and completion. If you use this option with a source provider other + // than GitHub, GitHub Enterprise, or Bitbucket, an invalidInputException is + // thrown. + // + // The status of a build triggered by a webhook is always reported to your source + // provider. + ReportBuildBatchStatusOverride *bool `locationName:"reportBuildBatchStatusOverride" type:"boolean"` -// Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} + // An array of ProjectArtifacts objects that override the secondary artifacts + // defined in the batch build project. + SecondaryArtifactsOverride []*ProjectArtifacts `locationName:"secondaryArtifactsOverride" type:"list"` -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { - return nil -} + // An array of ProjectSource objects that override the secondary sources defined + // in the batch build project. + SecondarySourcesOverride []*ProjectSource `locationName:"secondarySourcesOverride" type:"list"` -func (s ResourceAlreadyExistsException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + // An array of ProjectSourceVersion objects that override the secondary source + // versions in the batch build project. + SecondarySourcesVersionOverride []*ProjectSourceVersion `locationName:"secondarySourcesVersionOverride" type:"list"` -// Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode -} + // The name of a service role for this batch build that overrides the one specified + // in the batch build project. + ServiceRoleOverride *string `locationName:"serviceRoleOverride" min:"1" type:"string"` -// RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID -} + // A SourceAuth object that overrides the one defined in the batch build project. + // This override applies only if the build project's source is BitBucket or + // GitHub. + SourceAuthOverride *SourceAuth `locationName:"sourceAuthOverride" type:"structure"` -// The specified AWS resource cannot be found. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + // A location that overrides, for this batch build, the source location defined + // in the batch build project. + SourceLocationOverride *string `locationName:"sourceLocationOverride" type:"string"` - Message_ *string `locationName:"message" type:"string"` + // The source input type that overrides the source input defined in the batch + // build project. + SourceTypeOverride *string `locationName:"sourceTypeOverride" type:"string" enum:"SourceType"` + + // The version of the batch build input to be built, for this build only. If + // not specified, the latest version is used. If specified, the contents depends + // on the source provider: + // + // AWS CodeCommit + // + // The commit ID, branch, or Git tag to use. + // + // GitHub + // + // The commit ID, pull request ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a pull request ID + // is specified, it must use the format pr/pull-request-ID (for example pr/25). + // If a branch name is specified, the branch's HEAD commit ID is used. If not + // specified, the default branch's HEAD commit ID is used. + // + // Bitbucket + // + // The commit ID, branch name, or tag name that corresponds to the version of + // the source code you want to build. If a branch name is specified, the branch's + // HEAD commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // Amazon Simple Storage Service (Amazon S3) + // + // The version ID of the object that represents the build input ZIP file to + // use. + // + // If sourceVersion is specified at the project level, then this sourceVersion + // (at the build level) takes precedence. + // + // For more information, see Source Version Sample with CodeBuild (https://docs.aws.amazon.com/codebuild/latest/userguide/sample-source-version.html) + // in the AWS CodeBuild User Guide. + SourceVersion *string `locationName:"sourceVersion" type:"string"` } // String returns the string representation -func (s ResourceNotFoundException) String() string { +func (s StartBuildBatchInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourceNotFoundException) GoString() string { +func (s StartBuildBatchInput) GoString() string { return s.String() } -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartBuildBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartBuildBatchInput"} + if s.BuildTimeoutInMinutesOverride != nil && *s.BuildTimeoutInMinutesOverride < 5 { + invalidParams.Add(request.NewErrParamMinValue("BuildTimeoutInMinutesOverride", 5)) } -} - -// Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" -} - -// Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if s.EncryptionKeyOverride != nil && len(*s.EncryptionKeyOverride) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncryptionKeyOverride", 1)) + } + if s.ImageOverride != nil && len(*s.ImageOverride) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageOverride", 1)) + } + if s.ProjectName == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectName")) + } + if s.ProjectName != nil && len(*s.ProjectName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProjectName", 1)) + } + if s.QueuedTimeoutInMinutesOverride != nil && *s.QueuedTimeoutInMinutesOverride < 5 { + invalidParams.Add(request.NewErrParamMinValue("QueuedTimeoutInMinutesOverride", 5)) + } + if s.ServiceRoleOverride != nil && len(*s.ServiceRoleOverride) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceRoleOverride", 1)) + } + if s.ArtifactsOverride != nil { + if err := s.ArtifactsOverride.Validate(); err != nil { + invalidParams.AddNested("ArtifactsOverride", err.(request.ErrInvalidParams)) + } + } + if s.BuildBatchConfigOverride != nil { + if err := s.BuildBatchConfigOverride.Validate(); err != nil { + invalidParams.AddNested("BuildBatchConfigOverride", err.(request.ErrInvalidParams)) + } + } + if s.CacheOverride != nil { + if err := s.CacheOverride.Validate(); err != nil { + invalidParams.AddNested("CacheOverride", err.(request.ErrInvalidParams)) + } + } + if s.EnvironmentVariablesOverride != nil { + for i, v := range s.EnvironmentVariablesOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EnvironmentVariablesOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GitSubmodulesConfigOverride != nil { + if err := s.GitSubmodulesConfigOverride.Validate(); err != nil { + invalidParams.AddNested("GitSubmodulesConfigOverride", err.(request.ErrInvalidParams)) + } + } + if s.LogsConfigOverride != nil { + if err := s.LogsConfigOverride.Validate(); err != nil { + invalidParams.AddNested("LogsConfigOverride", err.(request.ErrInvalidParams)) + } + } + if s.RegistryCredentialOverride != nil { + if err := s.RegistryCredentialOverride.Validate(); err != nil { + invalidParams.AddNested("RegistryCredentialOverride", err.(request.ErrInvalidParams)) + } + } + if s.SecondaryArtifactsOverride != nil { + for i, v := range s.SecondaryArtifactsOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondaryArtifactsOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SecondarySourcesOverride != nil { + for i, v := range s.SecondarySourcesOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySourcesOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SecondarySourcesVersionOverride != nil { + for i, v := range s.SecondarySourcesVersionOverride { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "SecondarySourcesVersionOverride", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SourceAuthOverride != nil { + if err := s.SourceAuthOverride.Validate(); err != nil { + invalidParams.AddNested("SourceAuthOverride", err.(request.ErrInvalidParams)) + } } - return "" -} -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { + if invalidParams.Len() > 0 { + return invalidParams + } return nil } -func (s ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetArtifactsOverride sets the ArtifactsOverride field's value. +func (s *StartBuildBatchInput) SetArtifactsOverride(v *ProjectArtifacts) *StartBuildBatchInput { + s.ArtifactsOverride = v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +// SetBuildBatchConfigOverride sets the BuildBatchConfigOverride field's value. +func (s *StartBuildBatchInput) SetBuildBatchConfigOverride(v *ProjectBuildBatchConfig) *StartBuildBatchInput { + s.BuildBatchConfigOverride = v + return s } -// RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +// SetBuildTimeoutInMinutesOverride sets the BuildTimeoutInMinutesOverride field's value. +func (s *StartBuildBatchInput) SetBuildTimeoutInMinutesOverride(v int64) *StartBuildBatchInput { + s.BuildTimeoutInMinutesOverride = &v + return s } -// Information about S3 logs for a build project. -type S3LogsConfig struct { - _ struct{} `type:"structure"` - - // Set to true if you do not want your S3 build log output encrypted. By default - // S3 build logs are encrypted. - EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` - - // The ARN of an S3 bucket and the path prefix for S3 logs. If your Amazon S3 - // bucket name is my-bucket, and your path prefix is build-log, then acceptable - // formats are my-bucket/build-log or arn:aws:s3:::my-bucket/build-log. - Location *string `locationName:"location" type:"string"` - - // The current status of the S3 build logs. Valid values are: - // - // * ENABLED: S3 build logs are enabled for this build project. - // - // * DISABLED: S3 build logs are not enabled for this build project. - // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"LogsConfigStatusType"` +// SetBuildspecOverride sets the BuildspecOverride field's value. +func (s *StartBuildBatchInput) SetBuildspecOverride(v string) *StartBuildBatchInput { + s.BuildspecOverride = &v + return s } -// String returns the string representation -func (s S3LogsConfig) String() string { - return awsutil.Prettify(s) +// SetCacheOverride sets the CacheOverride field's value. +func (s *StartBuildBatchInput) SetCacheOverride(v *ProjectCache) *StartBuildBatchInput { + s.CacheOverride = v + return s } -// GoString returns the string representation -func (s S3LogsConfig) GoString() string { - return s.String() +// SetCertificateOverride sets the CertificateOverride field's value. +func (s *StartBuildBatchInput) SetCertificateOverride(v string) *StartBuildBatchInput { + s.CertificateOverride = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3LogsConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3LogsConfig"} - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetComputeTypeOverride sets the ComputeTypeOverride field's value. +func (s *StartBuildBatchInput) SetComputeTypeOverride(v string) *StartBuildBatchInput { + s.ComputeTypeOverride = &v + return s } -// SetEncryptionDisabled sets the EncryptionDisabled field's value. -func (s *S3LogsConfig) SetEncryptionDisabled(v bool) *S3LogsConfig { - s.EncryptionDisabled = &v +// SetEncryptionKeyOverride sets the EncryptionKeyOverride field's value. +func (s *StartBuildBatchInput) SetEncryptionKeyOverride(v string) *StartBuildBatchInput { + s.EncryptionKeyOverride = &v return s } -// SetLocation sets the Location field's value. -func (s *S3LogsConfig) SetLocation(v string) *S3LogsConfig { - s.Location = &v +// SetEnvironmentTypeOverride sets the EnvironmentTypeOverride field's value. +func (s *StartBuildBatchInput) SetEnvironmentTypeOverride(v string) *StartBuildBatchInput { + s.EnvironmentTypeOverride = &v return s } -// SetStatus sets the Status field's value. -func (s *S3LogsConfig) SetStatus(v string) *S3LogsConfig { - s.Status = &v +// SetEnvironmentVariablesOverride sets the EnvironmentVariablesOverride field's value. +func (s *StartBuildBatchInput) SetEnvironmentVariablesOverride(v []*EnvironmentVariable) *StartBuildBatchInput { + s.EnvironmentVariablesOverride = v return s } -// Information about the S3 bucket where the raw data of a report are exported. -type S3ReportExportConfig struct { - _ struct{} `type:"structure"` - - // The name of the S3 bucket where the raw data of a report are exported. - Bucket *string `locationName:"bucket" min:"1" type:"string"` - - // A boolean value that specifies if the results of a report are encrypted. - EncryptionDisabled *bool `locationName:"encryptionDisabled" type:"boolean"` - - // The encryption key for the report's encrypted raw data. - EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` - - // The type of build output artifact to create. Valid values include: - // - // * NONE: AWS CodeBuild creates the raw data in the output bucket. This - // is the default if packaging is not specified. - // - // * ZIP: AWS CodeBuild creates a ZIP file with the raw data in the output - // bucket. - Packaging *string `locationName:"packaging" type:"string" enum:"ReportPackagingType"` +// SetGitCloneDepthOverride sets the GitCloneDepthOverride field's value. +func (s *StartBuildBatchInput) SetGitCloneDepthOverride(v int64) *StartBuildBatchInput { + s.GitCloneDepthOverride = &v + return s +} - // The path to the exported report's raw data results. - Path *string `locationName:"path" type:"string"` +// SetGitSubmodulesConfigOverride sets the GitSubmodulesConfigOverride field's value. +func (s *StartBuildBatchInput) SetGitSubmodulesConfigOverride(v *GitSubmodulesConfig) *StartBuildBatchInput { + s.GitSubmodulesConfigOverride = v + return s } -// String returns the string representation -func (s S3ReportExportConfig) String() string { - return awsutil.Prettify(s) +// SetIdempotencyToken sets the IdempotencyToken field's value. +func (s *StartBuildBatchInput) SetIdempotencyToken(v string) *StartBuildBatchInput { + s.IdempotencyToken = &v + return s } -// GoString returns the string representation -func (s S3ReportExportConfig) GoString() string { - return s.String() +// SetImageOverride sets the ImageOverride field's value. +func (s *StartBuildBatchInput) SetImageOverride(v string) *StartBuildBatchInput { + s.ImageOverride = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3ReportExportConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3ReportExportConfig"} - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.EncryptionKey != nil && len(*s.EncryptionKey) < 1 { - invalidParams.Add(request.NewErrParamMinLen("EncryptionKey", 1)) - } +// SetImagePullCredentialsTypeOverride sets the ImagePullCredentialsTypeOverride field's value. +func (s *StartBuildBatchInput) SetImagePullCredentialsTypeOverride(v string) *StartBuildBatchInput { + s.ImagePullCredentialsTypeOverride = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetInsecureSslOverride sets the InsecureSslOverride field's value. +func (s *StartBuildBatchInput) SetInsecureSslOverride(v bool) *StartBuildBatchInput { + s.InsecureSslOverride = &v + return s } -// SetBucket sets the Bucket field's value. -func (s *S3ReportExportConfig) SetBucket(v string) *S3ReportExportConfig { - s.Bucket = &v +// SetLogsConfigOverride sets the LogsConfigOverride field's value. +func (s *StartBuildBatchInput) SetLogsConfigOverride(v *LogsConfig) *StartBuildBatchInput { + s.LogsConfigOverride = v return s } -// SetEncryptionDisabled sets the EncryptionDisabled field's value. -func (s *S3ReportExportConfig) SetEncryptionDisabled(v bool) *S3ReportExportConfig { - s.EncryptionDisabled = &v +// SetPrivilegedModeOverride sets the PrivilegedModeOverride field's value. +func (s *StartBuildBatchInput) SetPrivilegedModeOverride(v bool) *StartBuildBatchInput { + s.PrivilegedModeOverride = &v return s } -// SetEncryptionKey sets the EncryptionKey field's value. -func (s *S3ReportExportConfig) SetEncryptionKey(v string) *S3ReportExportConfig { - s.EncryptionKey = &v +// SetProjectName sets the ProjectName field's value. +func (s *StartBuildBatchInput) SetProjectName(v string) *StartBuildBatchInput { + s.ProjectName = &v return s } -// SetPackaging sets the Packaging field's value. -func (s *S3ReportExportConfig) SetPackaging(v string) *S3ReportExportConfig { - s.Packaging = &v +// SetQueuedTimeoutInMinutesOverride sets the QueuedTimeoutInMinutesOverride field's value. +func (s *StartBuildBatchInput) SetQueuedTimeoutInMinutesOverride(v int64) *StartBuildBatchInput { + s.QueuedTimeoutInMinutesOverride = &v return s } -// SetPath sets the Path field's value. -func (s *S3ReportExportConfig) SetPath(v string) *S3ReportExportConfig { - s.Path = &v +// SetRegistryCredentialOverride sets the RegistryCredentialOverride field's value. +func (s *StartBuildBatchInput) SetRegistryCredentialOverride(v *RegistryCredential) *StartBuildBatchInput { + s.RegistryCredentialOverride = v return s } -// Information about the authorization settings for AWS CodeBuild to access -// the source code to be built. -// -// This information is for the AWS CodeBuild console's use only. Your code should -// not get or set this information directly. -type SourceAuth struct { - _ struct{} `type:"structure"` +// SetReportBuildBatchStatusOverride sets the ReportBuildBatchStatusOverride field's value. +func (s *StartBuildBatchInput) SetReportBuildBatchStatusOverride(v bool) *StartBuildBatchInput { + s.ReportBuildBatchStatusOverride = &v + return s +} - // The resource value that applies to the specified authorization type. - Resource *string `locationName:"resource" type:"string"` +// SetSecondaryArtifactsOverride sets the SecondaryArtifactsOverride field's value. +func (s *StartBuildBatchInput) SetSecondaryArtifactsOverride(v []*ProjectArtifacts) *StartBuildBatchInput { + s.SecondaryArtifactsOverride = v + return s +} - // - // This data type is deprecated and is no longer accurate or used. - // - // The authorization type to use. The only valid value is OAUTH, which represents - // the OAuth authorization type. - // - // Type is a required field - Type *string `locationName:"type" type:"string" required:"true" enum:"SourceAuthType"` +// SetSecondarySourcesOverride sets the SecondarySourcesOverride field's value. +func (s *StartBuildBatchInput) SetSecondarySourcesOverride(v []*ProjectSource) *StartBuildBatchInput { + s.SecondarySourcesOverride = v + return s } -// String returns the string representation -func (s SourceAuth) String() string { - return awsutil.Prettify(s) +// SetSecondarySourcesVersionOverride sets the SecondarySourcesVersionOverride field's value. +func (s *StartBuildBatchInput) SetSecondarySourcesVersionOverride(v []*ProjectSourceVersion) *StartBuildBatchInput { + s.SecondarySourcesVersionOverride = v + return s } -// GoString returns the string representation -func (s SourceAuth) GoString() string { - return s.String() +// SetServiceRoleOverride sets the ServiceRoleOverride field's value. +func (s *StartBuildBatchInput) SetServiceRoleOverride(v string) *StartBuildBatchInput { + s.ServiceRoleOverride = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *SourceAuth) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SourceAuth"} - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } +// SetSourceAuthOverride sets the SourceAuthOverride field's value. +func (s *StartBuildBatchInput) SetSourceAuthOverride(v *SourceAuth) *StartBuildBatchInput { + s.SourceAuthOverride = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSourceLocationOverride sets the SourceLocationOverride field's value. +func (s *StartBuildBatchInput) SetSourceLocationOverride(v string) *StartBuildBatchInput { + s.SourceLocationOverride = &v + return s } -// SetResource sets the Resource field's value. -func (s *SourceAuth) SetResource(v string) *SourceAuth { - s.Resource = &v +// SetSourceTypeOverride sets the SourceTypeOverride field's value. +func (s *StartBuildBatchInput) SetSourceTypeOverride(v string) *StartBuildBatchInput { + s.SourceTypeOverride = &v return s } -// SetType sets the Type field's value. -func (s *SourceAuth) SetType(v string) *SourceAuth { - s.Type = &v +// SetSourceVersion sets the SourceVersion field's value. +func (s *StartBuildBatchInput) SetSourceVersion(v string) *StartBuildBatchInput { + s.SourceVersion = &v return s } -// Information about the credentials for a GitHub, GitHub Enterprise, or Bitbucket -// repository. -type SourceCredentialsInfo struct { +type StartBuildBatchOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the token. - Arn *string `locationName:"arn" min:"1" type:"string"` - - // The type of authentication used by the credentials. Valid options are OAUTH, - // BASIC_AUTH, or PERSONAL_ACCESS_TOKEN. - AuthType *string `locationName:"authType" type:"string" enum:"AuthType"` - - // The type of source provider. The valid options are GITHUB, GITHUB_ENTERPRISE, - // or BITBUCKET. - ServerType *string `locationName:"serverType" type:"string" enum:"ServerType"` + // A BuildBatch object that contains information about the batch build. + BuildBatch *BuildBatch `locationName:"buildBatch" type:"structure"` } // String returns the string representation -func (s SourceCredentialsInfo) String() string { +func (s StartBuildBatchOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SourceCredentialsInfo) GoString() string { +func (s StartBuildBatchOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *SourceCredentialsInfo) SetArn(v string) *SourceCredentialsInfo { - s.Arn = &v - return s -} - -// SetAuthType sets the AuthType field's value. -func (s *SourceCredentialsInfo) SetAuthType(v string) *SourceCredentialsInfo { - s.AuthType = &v - return s -} - -// SetServerType sets the ServerType field's value. -func (s *SourceCredentialsInfo) SetServerType(v string) *SourceCredentialsInfo { - s.ServerType = &v +// SetBuildBatch sets the BuildBatch field's value. +func (s *StartBuildBatchOutput) SetBuildBatch(v *BuildBatch) *StartBuildBatchOutput { + s.BuildBatch = v return s } @@ -8630,6 +12445,11 @@ type StartBuildInput struct { // ones already defined in the build project. ArtifactsOverride *ProjectArtifacts `locationName:"artifactsOverride" type:"structure"` + // Contains information that defines how the build project reports the build + // status to the source provider. This option is only used when the source provider + // is GITHUB, GITHUB_ENTERPRISE, or BITBUCKET. + BuildStatusConfigOverride *BuildStatusConfig `locationName:"buildStatusConfigOverride" type:"structure"` + // A buildspec file declaration that overrides, for this build only, the latest // one already defined in the build project. // @@ -8655,6 +12475,10 @@ type StartBuildInput struct { // in the build project. ComputeTypeOverride *string `locationName:"computeTypeOverride" type:"string" enum:"ComputeType"` + // Specifies if session debugging is enabled for this build. For more information, + // see Viewing a running build in Session Manager (https://docs.aws.amazon.com/codebuild/latest/userguide/session-manager.html). + DebugSessionEnabled *bool `locationName:"debugSessionEnabled" type:"boolean"` + // The AWS Key Management Service (AWS KMS) customer master key (CMK) that overrides // the one specified in the build project. The CMK key encrypts the build output // artifacts. @@ -8663,7 +12487,7 @@ type StartBuildInput struct { // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name ). + // the CMK's alias (using the format alias/). EncryptionKeyOverride *string `locationName:"encryptionKeyOverride" min:"1" type:"string"` // A container type for this build that overrides the one specified in the build @@ -8684,7 +12508,7 @@ type StartBuildInput struct { // A unique, case sensitive identifier you provide to ensure the idempotency // of the StartBuild request. The token is included in the StartBuild request - // and is valid for 12 hours. If you repeat the StartBuild request with the + // and is valid for 5 minutes. If you repeat the StartBuild request with the // same token, but change a parameter, AWS CodeBuild returns a parameter mismatch // error. IdempotencyToken *string `locationName:"idempotencyToken" type:"string"` @@ -8696,12 +12520,14 @@ type StartBuildInput struct { // The type of credentials AWS CodeBuild uses to pull images in your build. // There are two valid values: // - // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This - // requires that you modify your ECR repository policy to trust AWS CodeBuild's - // service principal. + // CODEBUILD // - // * SERVICE_ROLE specifies that AWS CodeBuild uses your build project's - // service role. + // Specifies that AWS CodeBuild uses its own credentials. This requires that + // you modify your ECR repository policy to trust AWS CodeBuild's service principal. + // + // SERVICE_ROLE + // + // Specifies that AWS CodeBuild uses your build project's service role. // // When using a cross-account or private registry image, you must use SERVICE_ROLE // credentials. When using an AWS CodeBuild curated image, you must use CODEBUILD @@ -8767,25 +12593,33 @@ type StartBuildInput struct { // in the build project. SourceTypeOverride *string `locationName:"sourceTypeOverride" type:"string" enum:"SourceType"` - // A version of the build input to be built, for this build only. If not specified, - // the latest version is used. If specified, must be one of: + // The version of the build input to be built, for this build only. If not specified, + // the latest version is used. If specified, the contents depends on the source + // provider: // - // * For AWS CodeCommit: the commit ID, branch, or Git tag to use. + // AWS CodeCommit // - // * For GitHub: the commit ID, pull request ID, branch name, or tag name - // that corresponds to the version of the source code you want to build. - // If a pull request ID is specified, it must use the format pr/pull-request-ID - // (for example pr/25). If a branch name is specified, the branch's HEAD - // commit ID is used. If not specified, the default branch's HEAD commit - // ID is used. + // The commit ID, branch, or Git tag to use. // - // * For Bitbucket: the commit ID, branch name, or tag name that corresponds - // to the version of the source code you want to build. If a branch name - // is specified, the branch's HEAD commit ID is used. If not specified, the - // default branch's HEAD commit ID is used. + // GitHub // - // * For Amazon Simple Storage Service (Amazon S3): the version ID of the - // object that represents the build input ZIP file to use. + // The commit ID, pull request ID, branch name, or tag name that corresponds + // to the version of the source code you want to build. If a pull request ID + // is specified, it must use the format pr/pull-request-ID (for example pr/25). + // If a branch name is specified, the branch's HEAD commit ID is used. If not + // specified, the default branch's HEAD commit ID is used. + // + // Bitbucket + // + // The commit ID, branch name, or tag name that corresponds to the version of + // the source code you want to build. If a branch name is specified, the branch's + // HEAD commit ID is used. If not specified, the default branch's HEAD commit + // ID is used. + // + // Amazon Simple Storage Service (Amazon S3) + // + // The version ID of the object that represents the build input ZIP file to + // use. // // If sourceVersion is specified at the project level, then this sourceVersion // (at the build level) takes precedence. @@ -8916,6 +12750,12 @@ func (s *StartBuildInput) SetArtifactsOverride(v *ProjectArtifacts) *StartBuildI return s } +// SetBuildStatusConfigOverride sets the BuildStatusConfigOverride field's value. +func (s *StartBuildInput) SetBuildStatusConfigOverride(v *BuildStatusConfig) *StartBuildInput { + s.BuildStatusConfigOverride = v + return s +} + // SetBuildspecOverride sets the BuildspecOverride field's value. func (s *StartBuildInput) SetBuildspecOverride(v string) *StartBuildInput { s.BuildspecOverride = &v @@ -8940,6 +12780,12 @@ func (s *StartBuildInput) SetComputeTypeOverride(v string) *StartBuildInput { return s } +// SetDebugSessionEnabled sets the DebugSessionEnabled field's value. +func (s *StartBuildInput) SetDebugSessionEnabled(v bool) *StartBuildInput { + s.DebugSessionEnabled = &v + return s +} + // SetEncryptionKeyOverride sets the EncryptionKeyOverride field's value. func (s *StartBuildInput) SetEncryptionKeyOverride(v string) *StartBuildInput { s.EncryptionKeyOverride = &v @@ -9107,6 +12953,70 @@ func (s *StartBuildOutput) SetBuild(v *Build) *StartBuildOutput { return s } +type StopBuildBatchInput struct { + _ struct{} `type:"structure"` + + // The identifier of the batch build to stop. + // + // Id is a required field + Id *string `locationName:"id" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopBuildBatchInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopBuildBatchInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopBuildBatchInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopBuildBatchInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *StopBuildBatchInput) SetId(v string) *StopBuildBatchInput { + s.Id = &v + return s +} + +type StopBuildBatchOutput struct { + _ struct{} `type:"structure"` + + // Contains information about a batch build. + BuildBatch *BuildBatch `locationName:"buildBatch" type:"structure"` +} + +// String returns the string representation +func (s StopBuildBatchOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopBuildBatchOutput) GoString() string { + return s.String() +} + +// SetBuildBatch sets the BuildBatch field's value. +func (s *StopBuildBatchOutput) SetBuildBatch(v *BuildBatch) *StopBuildBatchOutput { + s.BuildBatch = v + return s +} + type StopBuildInput struct { _ struct{} `type:"structure"` @@ -9181,7 +13091,7 @@ type Tag struct { Key *string `locationName:"key" min:"1" type:"string"` // The tag's value. - Value *string `locationName:"value" min:"1" type:"string"` + Value *string `locationName:"value" type:"string"` } // String returns the string representation @@ -9200,9 +13110,6 @@ func (s *Tag) Validate() error { if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } - if s.Value != nil && len(*s.Value) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Value", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9314,12 +13221,28 @@ func (s *TestCase) SetTestRawDataPath(v string) *TestCase { return s } -// A filter used to return specific types of test cases. +// A filter used to return specific types of test cases. In order to pass the +// filter, the report must meet all of the filter properties. type TestCaseFilter struct { _ struct{} `type:"structure"` - // The status used to filter test cases. Valid statuses are SUCCEEDED, FAILED, - // ERROR, SKIPPED, and UNKNOWN. A TestCaseFilter can have one status. + // A keyword that is used to filter on the name or the prefix of the test cases. + // Only test cases where the keyword is a substring of the name or the prefix + // will be returned. + Keyword *string `locationName:"keyword" type:"string"` + + // The status used to filter test cases. A TestCaseFilter can have one status. + // Valid values are: + // + // * SUCCEEDED + // + // * FAILED + // + // * ERROR + // + // * SKIPPED + // + // * UNKNOWN Status *string `locationName:"status" type:"string"` } @@ -9333,6 +13256,12 @@ func (s TestCaseFilter) GoString() string { return s.String() } +// SetKeyword sets the Keyword field's value. +func (s *TestCaseFilter) SetKeyword(v string) *TestCaseFilter { + s.Keyword = &v + return s +} + // SetStatus sets the Status field's value. func (s *TestCaseFilter) SetStatus(v string) *TestCaseFilter { s.Status = &v @@ -9400,6 +13329,9 @@ type UpdateProjectInput struct { // build badge. BadgeEnabled *bool `locationName:"badgeEnabled" type:"boolean"` + // Contains configuration information about a batch build project. + BuildBatchConfig *ProjectBuildBatchConfig `locationName:"buildBatchConfig" type:"structure"` + // Stores recently used information so that it can be quickly accessed at a // later time. Cache *ProjectCache `locationName:"cache" type:"structure"` @@ -9414,7 +13346,7 @@ type UpdateProjectInput struct { // if your service role has permission to that key. // // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK's alias (using the format alias/alias-name ). + // the CMK's alias (using the format alias/). EncryptionKey *string `locationName:"encryptionKey" min:"1" type:"string"` // Information to be changed about the build environment for the build project. @@ -9486,7 +13418,7 @@ type UpdateProjectInput struct { // in the AWS CodeBuild User Guide. SourceVersion *string `locationName:"sourceVersion" type:"string"` - // The replacement set of tags for this build project. + // An updated list of tag key and value pairs associated with this build project. // // These tags are available for use by AWS services that support AWS CodeBuild // build project tags. @@ -9536,6 +13468,11 @@ func (s *UpdateProjectInput) Validate() error { invalidParams.AddNested("Artifacts", err.(request.ErrInvalidParams)) } } + if s.BuildBatchConfig != nil { + if err := s.BuildBatchConfig.Validate(); err != nil { + invalidParams.AddNested("BuildBatchConfig", err.(request.ErrInvalidParams)) + } + } if s.Cache != nil { if err := s.Cache.Validate(); err != nil { invalidParams.AddNested("Cache", err.(request.ErrInvalidParams)) @@ -9620,6 +13557,12 @@ func (s *UpdateProjectInput) SetBadgeEnabled(v bool) *UpdateProjectInput { return s } +// SetBuildBatchConfig sets the BuildBatchConfig field's value. +func (s *UpdateProjectInput) SetBuildBatchConfig(v *ProjectBuildBatchConfig) *UpdateProjectInput { + s.BuildBatchConfig = v + return s +} + // SetCache sets the Cache field's value. func (s *UpdateProjectInput) SetCache(v *ProjectCache) *UpdateProjectInput { s.Cache = v @@ -9759,6 +13702,12 @@ type UpdateReportGroupInput struct { // // * NO_EXPORT: The report results are not exported. ExportConfig *ReportExportConfig `locationName:"exportConfig" type:"structure"` + + // An updated list of tag key and value pairs associated with this report group. + // + // These tags are available for use by AWS services that support AWS CodeBuild + // report group tags. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -9785,6 +13734,16 @@ func (s *UpdateReportGroupInput) Validate() error { invalidParams.AddNested("ExportConfig", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -9804,6 +13763,12 @@ func (s *UpdateReportGroupInput) SetExportConfig(v *ReportExportConfig) *UpdateR return s } +// SetTags sets the Tags field's value. +func (s *UpdateReportGroupInput) SetTags(v []*Tag) *UpdateReportGroupInput { + s.Tags = v + return s +} + type UpdateReportGroupOutput struct { _ struct{} `type:"structure"` @@ -9838,6 +13803,9 @@ type UpdateWebhookInput struct { // It is recommended that you use filterGroups instead of branchFilter. BranchFilter *string `locationName:"branchFilter" type:"string"` + // Specifies the type of build this webhook will trigger. + BuildType *string `locationName:"buildType" type:"string" enum:"WebhookBuildType"` + // An array of arrays of WebhookFilter objects used to determine if a webhook // event can trigger a build. A filter group must contain at least one EVENT // WebhookFilter. @@ -9886,6 +13854,12 @@ func (s *UpdateWebhookInput) SetBranchFilter(v string) *UpdateWebhookInput { return s } +// SetBuildType sets the BuildType field's value. +func (s *UpdateWebhookInput) SetBuildType(v string) *UpdateWebhookInput { + s.BuildType = &v + return s +} + // SetFilterGroups sets the FilterGroups field's value. func (s *UpdateWebhookInput) SetFilterGroups(v [][]*WebhookFilter) *UpdateWebhookInput { s.FilterGroups = v @@ -9996,6 +13970,9 @@ type Webhook struct { // It is recommended that you use filterGroups instead of branchFilter. BranchFilter *string `locationName:"branchFilter" type:"string"` + // Specifies the type of build this webhook will trigger. + BuildType *string `locationName:"buildType" type:"string" enum:"WebhookBuildType"` + // An array of arrays of WebhookFilter objects used to determine which webhooks // are triggered. At least one WebhookFilter in the array must specify EVENT // as its type. @@ -10036,6 +14013,12 @@ func (s *Webhook) SetBranchFilter(v string) *Webhook { return s } +// SetBuildType sets the BuildType field's value. +func (s *Webhook) SetBuildType(v string) *Webhook { + s.BuildType = &v + return s +} + // SetFilterGroups sets the FilterGroups field's value. func (s *Webhook) SetFilterGroups(v [][]*WebhookFilter) *Webhook { s.FilterGroups = v @@ -10089,16 +14072,16 @@ type WebhookFilter struct { // Pattern is a required field Pattern *string `locationName:"pattern" type:"string" required:"true"` - // The type of webhook filter. There are five webhook filter types: EVENT, ACTOR_ACCOUNT_ID, - // HEAD_REF, BASE_REF, and FILE_PATH. + // The type of webhook filter. There are six webhook filter types: EVENT, ACTOR_ACCOUNT_ID, + // HEAD_REF, BASE_REF, FILE_PATH, and COMMIT_MESSAGE. // // EVENT // // A webhook event triggers a build when the provided pattern matches one of - // four event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, and PULL_REQUEST_REOPENED. - // The EVENT patterns are specified as a comma-separated string. For example, - // PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request - // created, and pull request updated events. + // five event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_REOPENED, + // and PULL_REQUEST_MERGED. The EVENT patterns are specified as a comma-separated + // string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters + // all push, pull request created, and pull request updated events. // // The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. // @@ -10127,7 +14110,18 @@ type WebhookFilter struct { // A webhook triggers a build when the path of a changed file matches the regular // expression pattern. // - // Works with GitHub and GitHub Enterprise push events only. + // Works with GitHub and Bitbucket events push and pull requests events. Also + // works with GitHub Enterprise push events, but does not work with GitHub Enterprise + // pull request events. + // + // COMMIT_MESSAGE + // + // A webhook triggers a build when the head commit message matches the regular + // expression pattern. + // + // Works with GitHub and Bitbucket events push and pull requests events. Also + // works with GitHub Enterprise push events, but does not work with GitHub Enterprise + // pull request events. // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"WebhookFilterType"` @@ -10169,6 +14163,14 @@ const ( ArtifactNamespaceBuildId = "BUILD_ID" ) +// ArtifactNamespace_Values returns all elements of the ArtifactNamespace enum +func ArtifactNamespace_Values() []string { + return []string{ + ArtifactNamespaceNone, + ArtifactNamespaceBuildId, + } +} + const ( // ArtifactPackagingNone is a ArtifactPackaging enum value ArtifactPackagingNone = "NONE" @@ -10177,6 +14179,14 @@ const ( ArtifactPackagingZip = "ZIP" ) +// ArtifactPackaging_Values returns all elements of the ArtifactPackaging enum +func ArtifactPackaging_Values() []string { + return []string{ + ArtifactPackagingNone, + ArtifactPackagingZip, + } +} + const ( // ArtifactsTypeCodepipeline is a ArtifactsType enum value ArtifactsTypeCodepipeline = "CODEPIPELINE" @@ -10188,6 +14198,15 @@ const ( ArtifactsTypeNoArtifacts = "NO_ARTIFACTS" ) +// ArtifactsType_Values returns all elements of the ArtifactsType enum +func ArtifactsType_Values() []string { + return []string{ + ArtifactsTypeCodepipeline, + ArtifactsTypeS3, + ArtifactsTypeNoArtifacts, + } +} + const ( // AuthTypeOauth is a AuthType enum value AuthTypeOauth = "OAUTH" @@ -10199,6 +14218,51 @@ const ( AuthTypePersonalAccessToken = "PERSONAL_ACCESS_TOKEN" ) +// AuthType_Values returns all elements of the AuthType enum +func AuthType_Values() []string { + return []string{ + AuthTypeOauth, + AuthTypeBasicAuth, + AuthTypePersonalAccessToken, + } +} + +const ( + // BuildBatchPhaseTypeSubmitted is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeSubmitted = "SUBMITTED" + + // BuildBatchPhaseTypeDownloadBatchspec is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeDownloadBatchspec = "DOWNLOAD_BATCHSPEC" + + // BuildBatchPhaseTypeInProgress is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeInProgress = "IN_PROGRESS" + + // BuildBatchPhaseTypeCombineArtifacts is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeCombineArtifacts = "COMBINE_ARTIFACTS" + + // BuildBatchPhaseTypeSucceeded is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeSucceeded = "SUCCEEDED" + + // BuildBatchPhaseTypeFailed is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeFailed = "FAILED" + + // BuildBatchPhaseTypeStopped is a BuildBatchPhaseType enum value + BuildBatchPhaseTypeStopped = "STOPPED" +) + +// BuildBatchPhaseType_Values returns all elements of the BuildBatchPhaseType enum +func BuildBatchPhaseType_Values() []string { + return []string{ + BuildBatchPhaseTypeSubmitted, + BuildBatchPhaseTypeDownloadBatchspec, + BuildBatchPhaseTypeInProgress, + BuildBatchPhaseTypeCombineArtifacts, + BuildBatchPhaseTypeSucceeded, + BuildBatchPhaseTypeFailed, + BuildBatchPhaseTypeStopped, + } +} + const ( // BuildPhaseTypeSubmitted is a BuildPhaseType enum value BuildPhaseTypeSubmitted = "SUBMITTED" @@ -10234,6 +14298,23 @@ const ( BuildPhaseTypeCompleted = "COMPLETED" ) +// BuildPhaseType_Values returns all elements of the BuildPhaseType enum +func BuildPhaseType_Values() []string { + return []string{ + BuildPhaseTypeSubmitted, + BuildPhaseTypeQueued, + BuildPhaseTypeProvisioning, + BuildPhaseTypeDownloadSource, + BuildPhaseTypeInstall, + BuildPhaseTypePreBuild, + BuildPhaseTypeBuild, + BuildPhaseTypePostBuild, + BuildPhaseTypeUploadArtifacts, + BuildPhaseTypeFinalizing, + BuildPhaseTypeCompleted, + } +} + const ( // CacheModeLocalDockerLayerCache is a CacheMode enum value CacheModeLocalDockerLayerCache = "LOCAL_DOCKER_LAYER_CACHE" @@ -10245,6 +14326,15 @@ const ( CacheModeLocalCustomCache = "LOCAL_CUSTOM_CACHE" ) +// CacheMode_Values returns all elements of the CacheMode enum +func CacheMode_Values() []string { + return []string{ + CacheModeLocalDockerLayerCache, + CacheModeLocalSourceCache, + CacheModeLocalCustomCache, + } +} + const ( // CacheTypeNoCache is a CacheType enum value CacheTypeNoCache = "NO_CACHE" @@ -10256,6 +14346,15 @@ const ( CacheTypeLocal = "LOCAL" ) +// CacheType_Values returns all elements of the CacheType enum +func CacheType_Values() []string { + return []string{ + CacheTypeNoCache, + CacheTypeS3, + CacheTypeLocal, + } +} + const ( // ComputeTypeBuildGeneral1Small is a ComputeType enum value ComputeTypeBuildGeneral1Small = "BUILD_GENERAL1_SMALL" @@ -10270,11 +14369,28 @@ const ( ComputeTypeBuildGeneral12xlarge = "BUILD_GENERAL1_2XLARGE" ) +// ComputeType_Values returns all elements of the ComputeType enum +func ComputeType_Values() []string { + return []string{ + ComputeTypeBuildGeneral1Small, + ComputeTypeBuildGeneral1Medium, + ComputeTypeBuildGeneral1Large, + ComputeTypeBuildGeneral12xlarge, + } +} + const ( // CredentialProviderTypeSecretsManager is a CredentialProviderType enum value CredentialProviderTypeSecretsManager = "SECRETS_MANAGER" ) +// CredentialProviderType_Values returns all elements of the CredentialProviderType enum +func CredentialProviderType_Values() []string { + return []string{ + CredentialProviderTypeSecretsManager, + } +} + const ( // EnvironmentTypeWindowsContainer is a EnvironmentType enum value EnvironmentTypeWindowsContainer = "WINDOWS_CONTAINER" @@ -10287,8 +14403,22 @@ const ( // EnvironmentTypeArmContainer is a EnvironmentType enum value EnvironmentTypeArmContainer = "ARM_CONTAINER" + + // EnvironmentTypeWindowsServer2019Container is a EnvironmentType enum value + EnvironmentTypeWindowsServer2019Container = "WINDOWS_SERVER_2019_CONTAINER" ) +// EnvironmentType_Values returns all elements of the EnvironmentType enum +func EnvironmentType_Values() []string { + return []string{ + EnvironmentTypeWindowsContainer, + EnvironmentTypeLinuxContainer, + EnvironmentTypeLinuxGpuContainer, + EnvironmentTypeArmContainer, + EnvironmentTypeWindowsServer2019Container, + } +} + const ( // EnvironmentVariableTypePlaintext is a EnvironmentVariableType enum value EnvironmentVariableTypePlaintext = "PLAINTEXT" @@ -10300,11 +14430,27 @@ const ( EnvironmentVariableTypeSecretsManager = "SECRETS_MANAGER" ) +// EnvironmentVariableType_Values returns all elements of the EnvironmentVariableType enum +func EnvironmentVariableType_Values() []string { + return []string{ + EnvironmentVariableTypePlaintext, + EnvironmentVariableTypeParameterStore, + EnvironmentVariableTypeSecretsManager, + } +} + const ( // FileSystemTypeEfs is a FileSystemType enum value FileSystemTypeEfs = "EFS" ) +// FileSystemType_Values returns all elements of the FileSystemType enum +func FileSystemType_Values() []string { + return []string{ + FileSystemTypeEfs, + } +} + const ( // ImagePullCredentialsTypeCodebuild is a ImagePullCredentialsType enum value ImagePullCredentialsTypeCodebuild = "CODEBUILD" @@ -10313,6 +14459,14 @@ const ( ImagePullCredentialsTypeServiceRole = "SERVICE_ROLE" ) +// ImagePullCredentialsType_Values returns all elements of the ImagePullCredentialsType enum +func ImagePullCredentialsType_Values() []string { + return []string{ + ImagePullCredentialsTypeCodebuild, + ImagePullCredentialsTypeServiceRole, + } +} + const ( // LanguageTypeJava is a LanguageType enum value LanguageTypeJava = "JAVA" @@ -10345,6 +14499,22 @@ const ( LanguageTypePhp = "PHP" ) +// LanguageType_Values returns all elements of the LanguageType enum +func LanguageType_Values() []string { + return []string{ + LanguageTypeJava, + LanguageTypePython, + LanguageTypeNodeJs, + LanguageTypeRuby, + LanguageTypeGolang, + LanguageTypeDocker, + LanguageTypeAndroid, + LanguageTypeDotnet, + LanguageTypeBase, + LanguageTypePhp, + } +} + const ( // LogsConfigStatusTypeEnabled is a LogsConfigStatusType enum value LogsConfigStatusTypeEnabled = "ENABLED" @@ -10353,6 +14523,14 @@ const ( LogsConfigStatusTypeDisabled = "DISABLED" ) +// LogsConfigStatusType_Values returns all elements of the LogsConfigStatusType enum +func LogsConfigStatusType_Values() []string { + return []string{ + LogsConfigStatusTypeEnabled, + LogsConfigStatusTypeDisabled, + } +} + const ( // PlatformTypeDebian is a PlatformType enum value PlatformTypeDebian = "DEBIAN" @@ -10367,6 +14545,16 @@ const ( PlatformTypeWindowsServer = "WINDOWS_SERVER" ) +// PlatformType_Values returns all elements of the PlatformType enum +func PlatformType_Values() []string { + return []string{ + PlatformTypeDebian, + PlatformTypeAmazonLinux, + PlatformTypeUbuntu, + PlatformTypeWindowsServer, + } +} + const ( // ProjectSortByTypeName is a ProjectSortByType enum value ProjectSortByTypeName = "NAME" @@ -10378,6 +14566,31 @@ const ( ProjectSortByTypeLastModifiedTime = "LAST_MODIFIED_TIME" ) +// ProjectSortByType_Values returns all elements of the ProjectSortByType enum +func ProjectSortByType_Values() []string { + return []string{ + ProjectSortByTypeName, + ProjectSortByTypeCreatedTime, + ProjectSortByTypeLastModifiedTime, + } +} + +const ( + // ReportCodeCoverageSortByTypeLineCoveragePercentage is a ReportCodeCoverageSortByType enum value + ReportCodeCoverageSortByTypeLineCoveragePercentage = "LINE_COVERAGE_PERCENTAGE" + + // ReportCodeCoverageSortByTypeFilePath is a ReportCodeCoverageSortByType enum value + ReportCodeCoverageSortByTypeFilePath = "FILE_PATH" +) + +// ReportCodeCoverageSortByType_Values returns all elements of the ReportCodeCoverageSortByType enum +func ReportCodeCoverageSortByType_Values() []string { + return []string{ + ReportCodeCoverageSortByTypeLineCoveragePercentage, + ReportCodeCoverageSortByTypeFilePath, + } +} + const ( // ReportExportConfigTypeS3 is a ReportExportConfigType enum value ReportExportConfigTypeS3 = "S3" @@ -10386,6 +14599,14 @@ const ( ReportExportConfigTypeNoExport = "NO_EXPORT" ) +// ReportExportConfigType_Values returns all elements of the ReportExportConfigType enum +func ReportExportConfigType_Values() []string { + return []string{ + ReportExportConfigTypeS3, + ReportExportConfigTypeNoExport, + } +} + const ( // ReportGroupSortByTypeName is a ReportGroupSortByType enum value ReportGroupSortByTypeName = "NAME" @@ -10397,6 +14618,15 @@ const ( ReportGroupSortByTypeLastModifiedTime = "LAST_MODIFIED_TIME" ) +// ReportGroupSortByType_Values returns all elements of the ReportGroupSortByType enum +func ReportGroupSortByType_Values() []string { + return []string{ + ReportGroupSortByTypeName, + ReportGroupSortByTypeCreatedTime, + ReportGroupSortByTypeLastModifiedTime, + } +} + const ( // ReportPackagingTypeZip is a ReportPackagingType enum value ReportPackagingTypeZip = "ZIP" @@ -10405,6 +14635,14 @@ const ( ReportPackagingTypeNone = "NONE" ) +// ReportPackagingType_Values returns all elements of the ReportPackagingType enum +func ReportPackagingType_Values() []string { + return []string{ + ReportPackagingTypeZip, + ReportPackagingTypeNone, + } +} + const ( // ReportStatusTypeGenerating is a ReportStatusType enum value ReportStatusTypeGenerating = "GENERATING" @@ -10422,11 +14660,49 @@ const ( ReportStatusTypeDeleting = "DELETING" ) +// ReportStatusType_Values returns all elements of the ReportStatusType enum +func ReportStatusType_Values() []string { + return []string{ + ReportStatusTypeGenerating, + ReportStatusTypeSucceeded, + ReportStatusTypeFailed, + ReportStatusTypeIncomplete, + ReportStatusTypeDeleting, + } +} + const ( // ReportTypeTest is a ReportType enum value ReportTypeTest = "TEST" + + // ReportTypeCodeCoverage is a ReportType enum value + ReportTypeCodeCoverage = "CODE_COVERAGE" +) + +// ReportType_Values returns all elements of the ReportType enum +func ReportType_Values() []string { + return []string{ + ReportTypeTest, + ReportTypeCodeCoverage, + } +} + +const ( + // RetryBuildBatchTypeRetryAllBuilds is a RetryBuildBatchType enum value + RetryBuildBatchTypeRetryAllBuilds = "RETRY_ALL_BUILDS" + + // RetryBuildBatchTypeRetryFailedBuilds is a RetryBuildBatchType enum value + RetryBuildBatchTypeRetryFailedBuilds = "RETRY_FAILED_BUILDS" ) +// RetryBuildBatchType_Values returns all elements of the RetryBuildBatchType enum +func RetryBuildBatchType_Values() []string { + return []string{ + RetryBuildBatchTypeRetryAllBuilds, + RetryBuildBatchTypeRetryFailedBuilds, + } +} + const ( // ServerTypeGithub is a ServerType enum value ServerTypeGithub = "GITHUB" @@ -10438,6 +14714,15 @@ const ( ServerTypeGithubEnterprise = "GITHUB_ENTERPRISE" ) +// ServerType_Values returns all elements of the ServerType enum +func ServerType_Values() []string { + return []string{ + ServerTypeGithub, + ServerTypeBitbucket, + ServerTypeGithubEnterprise, + } +} + const ( // SharedResourceSortByTypeArn is a SharedResourceSortByType enum value SharedResourceSortByTypeArn = "ARN" @@ -10446,6 +14731,14 @@ const ( SharedResourceSortByTypeModifiedTime = "MODIFIED_TIME" ) +// SharedResourceSortByType_Values returns all elements of the SharedResourceSortByType enum +func SharedResourceSortByType_Values() []string { + return []string{ + SharedResourceSortByTypeArn, + SharedResourceSortByTypeModifiedTime, + } +} + const ( // SortOrderTypeAscending is a SortOrderType enum value SortOrderTypeAscending = "ASCENDING" @@ -10454,11 +14747,26 @@ const ( SortOrderTypeDescending = "DESCENDING" ) +// SortOrderType_Values returns all elements of the SortOrderType enum +func SortOrderType_Values() []string { + return []string{ + SortOrderTypeAscending, + SortOrderTypeDescending, + } +} + const ( // SourceAuthTypeOauth is a SourceAuthType enum value SourceAuthTypeOauth = "OAUTH" ) +// SourceAuthType_Values returns all elements of the SourceAuthType enum +func SourceAuthType_Values() []string { + return []string{ + SourceAuthTypeOauth, + } +} + const ( // SourceTypeCodecommit is a SourceType enum value SourceTypeCodecommit = "CODECOMMIT" @@ -10482,6 +14790,19 @@ const ( SourceTypeNoSource = "NO_SOURCE" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeCodecommit, + SourceTypeCodepipeline, + SourceTypeGithub, + SourceTypeS3, + SourceTypeBitbucket, + SourceTypeGithubEnterprise, + SourceTypeNoSource, + } +} + const ( // StatusTypeSucceeded is a StatusType enum value StatusTypeSucceeded = "SUCCEEDED" @@ -10502,6 +14823,34 @@ const ( StatusTypeStopped = "STOPPED" ) +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypeSucceeded, + StatusTypeFailed, + StatusTypeFault, + StatusTypeTimedOut, + StatusTypeInProgress, + StatusTypeStopped, + } +} + +const ( + // WebhookBuildTypeBuild is a WebhookBuildType enum value + WebhookBuildTypeBuild = "BUILD" + + // WebhookBuildTypeBuildBatch is a WebhookBuildType enum value + WebhookBuildTypeBuildBatch = "BUILD_BATCH" +) + +// WebhookBuildType_Values returns all elements of the WebhookBuildType enum +func WebhookBuildType_Values() []string { + return []string{ + WebhookBuildTypeBuild, + WebhookBuildTypeBuildBatch, + } +} + const ( // WebhookFilterTypeEvent is a WebhookFilterType enum value WebhookFilterTypeEvent = "EVENT" @@ -10517,4 +14866,19 @@ const ( // WebhookFilterTypeFilePath is a WebhookFilterType enum value WebhookFilterTypeFilePath = "FILE_PATH" + + // WebhookFilterTypeCommitMessage is a WebhookFilterType enum value + WebhookFilterTypeCommitMessage = "COMMIT_MESSAGE" ) + +// WebhookFilterType_Values returns all elements of the WebhookFilterType enum +func WebhookFilterType_Values() []string { + return []string{ + WebhookFilterTypeEvent, + WebhookFilterTypeBaseRef, + WebhookFilterTypeHeadRef, + WebhookFilterTypeActorAccountId, + WebhookFilterTypeFilePath, + WebhookFilterTypeCommitMessage, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go b/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go index 5505820c8..2dff5342c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index 8a444d564..a8d53f462 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -922,7 +922,8 @@ func (c *CodeCommit) CreateBranchRequest(input *CreateBranchInput) (req *request // A branch name is required, but was not specified. // // * BranchNameExistsException -// The specified branch name already exists. +// Cannot create the branch with the specified name because the commit conflicts +// with an existing branch with the same name. Branch names must be unique. // // * InvalidBranchNameException // The specified reference name is not valid. @@ -3531,6 +3532,10 @@ func (c *CodeCommit) GetCommentRequest(input *GetCommentInput) (req *request.Req // // Returns the content of a comment made on a change, file, or commit in a repository. // +// Reaction counts might include numbers from user identities who were deleted +// after the reaction was made. For a count of reactions from active identities, +// use GetCommentReactions. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3543,6 +3548,10 @@ func (c *CodeCommit) GetCommentRequest(input *GetCommentInput) (req *request.Req // No comment exists with the provided ID. Verify that you have used the correct // ID, and then try again. // +// * CommentDeletedException +// This comment has already been deleted. You cannot edit or delete a deleted +// comment. +// // * CommentIdRequiredException // The comment ID is missing or null. A comment ID is required. // @@ -3550,9 +3559,20 @@ func (c *CodeCommit) GetCommentRequest(input *GetCommentInput) (req *request.Req // The comment ID is not in a valid format. Make sure that you have provided // the full comment ID. // -// * CommentDeletedException -// This comment has already been deleted. You cannot edit or delete a deleted -// comment. +// * EncryptionIntegrityChecksFailedException +// An encryption integrity check failed. +// +// * EncryptionKeyAccessDeniedException +// An encryption key could not be accessed. +// +// * EncryptionKeyDisabledException +// The encryption key is disabled. +// +// * EncryptionKeyNotFoundException +// No encryption key was found. +// +// * EncryptionKeyUnavailableException +// The encryption key is not available. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetComment func (c *CodeCommit) GetComment(input *GetCommentInput) (*GetCommentOutput, error) { @@ -3576,6 +3596,165 @@ func (c *CodeCommit) GetCommentWithContext(ctx aws.Context, input *GetCommentInp return out, req.Send() } +const opGetCommentReactions = "GetCommentReactions" + +// GetCommentReactionsRequest generates a "aws/request.Request" representing the +// client's request for the GetCommentReactions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCommentReactions for more information on using the GetCommentReactions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCommentReactionsRequest method. +// req, resp := client.GetCommentReactionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetCommentReactions +func (c *CodeCommit) GetCommentReactionsRequest(input *GetCommentReactionsInput) (req *request.Request, output *GetCommentReactionsOutput) { + op := &request.Operation{ + Name: opGetCommentReactions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetCommentReactionsInput{} + } + + output = &GetCommentReactionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCommentReactions API operation for AWS CodeCommit. +// +// Returns information about reactions to a specified comment ID. Reactions +// from users who have been deleted will not be included in the count. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation GetCommentReactions for usage and error information. +// +// Returned Error Types: +// * CommentDoesNotExistException +// No comment exists with the provided ID. Verify that you have used the correct +// ID, and then try again. +// +// * CommentIdRequiredException +// The comment ID is missing or null. A comment ID is required. +// +// * InvalidCommentIdException +// The comment ID is not in a valid format. Make sure that you have provided +// the full comment ID. +// +// * InvalidReactionUserArnException +// The Amazon Resource Name (ARN) of the user or identity is not valid. +// +// * InvalidMaxResultsException +// The specified number of maximum results is not valid. +// +// * InvalidContinuationTokenException +// The specified continuation token is not valid. +// +// * CommentDeletedException +// This comment has already been deleted. You cannot edit or delete a deleted +// comment. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/GetCommentReactions +func (c *CodeCommit) GetCommentReactions(input *GetCommentReactionsInput) (*GetCommentReactionsOutput, error) { + req, out := c.GetCommentReactionsRequest(input) + return out, req.Send() +} + +// GetCommentReactionsWithContext is the same as GetCommentReactions with the addition of +// the ability to pass a context and additional request options. +// +// See GetCommentReactions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) GetCommentReactionsWithContext(ctx aws.Context, input *GetCommentReactionsInput, opts ...request.Option) (*GetCommentReactionsOutput, error) { + req, out := c.GetCommentReactionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetCommentReactionsPages iterates over the pages of a GetCommentReactions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetCommentReactions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetCommentReactions operation. +// pageNum := 0 +// err := client.GetCommentReactionsPages(params, +// func(page *codecommit.GetCommentReactionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *CodeCommit) GetCommentReactionsPages(input *GetCommentReactionsInput, fn func(*GetCommentReactionsOutput, bool) bool) error { + return c.GetCommentReactionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetCommentReactionsPagesWithContext same as GetCommentReactionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) GetCommentReactionsPagesWithContext(ctx aws.Context, input *GetCommentReactionsInput, fn func(*GetCommentReactionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetCommentReactionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetCommentReactionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetCommentReactionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetCommentsForComparedCommit = "GetCommentsForComparedCommit" // GetCommentsForComparedCommitRequest generates a "aws/request.Request" representing the @@ -3628,6 +3807,10 @@ func (c *CodeCommit) GetCommentsForComparedCommitRequest(input *GetCommentsForCo // // Returns information about comments made on the comparison between two commits. // +// Reaction counts might include numbers from user identities who were deleted +// after the reaction was made. For a count of reactions from active identities, +// use GetCommentReactions. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3806,6 +3989,10 @@ func (c *CodeCommit) GetCommentsForPullRequestRequest(input *GetCommentsForPullR // // Returns comments made on a pull request. // +// Reaction counts might include numbers from user identities who were deleted +// after the reaction was made. For a count of reactions from active identities, +// use GetCommentReactions. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7986,6 +8173,10 @@ func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForCo // * InvalidCommitIdException // The specified commit ID is not valid. // +// * BeforeCommitIdAndAfterCommitIdAreSameException +// The before commit ID and the after commit ID are the same, which is not valid. +// The before commit ID and the after commit ID must be different commit IDs. +// // * EncryptionIntegrityChecksFailedException // An encryption integrity check failed. // @@ -8001,10 +8192,6 @@ func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForCo // * EncryptionKeyUnavailableException // The encryption key is not available. // -// * BeforeCommitIdAndAfterCommitIdAreSameException -// The before commit ID and the after commit ID are the same, which is not valid. -// The before commit ID and the after commit ID must be different commit IDs. -// // * CommitDoesNotExistException // The specified commit does not exist or no commit was specified, and the specified // repository has no default branch. @@ -8015,6 +8202,9 @@ func (c *CodeCommit) PostCommentForComparedCommitRequest(input *PostCommentForCo // * PathDoesNotExistException // The specified path does not exist. // +// * PathRequiredException +// The folderPath for a location cannot be null. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForComparedCommit func (c *CodeCommit) PostCommentForComparedCommit(input *PostCommentForComparedCommitInput) (*PostCommentForComparedCommitOutput, error) { req, out := c.PostCommentForComparedCommitRequest(input) @@ -8163,6 +8353,10 @@ func (c *CodeCommit) PostCommentForPullRequestRequest(input *PostCommentForPullR // * InvalidCommitIdException // The specified commit ID is not valid. // +// * BeforeCommitIdAndAfterCommitIdAreSameException +// The before commit ID and the after commit ID are the same, which is not valid. +// The before commit ID and the after commit ID must be different commit IDs. +// // * EncryptionIntegrityChecksFailedException // An encryption integrity check failed. // @@ -8191,10 +8385,6 @@ func (c *CodeCommit) PostCommentForPullRequestRequest(input *PostCommentForPullR // * PathRequiredException // The folderPath for a location cannot be null. // -// * BeforeCommitIdAndAfterCommitIdAreSameException -// The before commit ID and the after commit ID are the same, which is not valid. -// The before commit ID and the after commit ID must be different commit IDs. -// // See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PostCommentForPullRequest func (c *CodeCommit) PostCommentForPullRequest(input *PostCommentForPullRequestInput) (*PostCommentForPullRequestOutput, error) { req, out := c.PostCommentForPullRequestRequest(input) @@ -8326,6 +8516,111 @@ func (c *CodeCommit) PostCommentReplyWithContext(ctx aws.Context, input *PostCom return out, req.Send() } +const opPutCommentReaction = "PutCommentReaction" + +// PutCommentReactionRequest generates a "aws/request.Request" representing the +// client's request for the PutCommentReaction operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutCommentReaction for more information on using the PutCommentReaction +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutCommentReactionRequest method. +// req, resp := client.PutCommentReactionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutCommentReaction +func (c *CodeCommit) PutCommentReactionRequest(input *PutCommentReactionInput) (req *request.Request, output *PutCommentReactionOutput) { + op := &request.Operation{ + Name: opPutCommentReaction, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutCommentReactionInput{} + } + + output = &PutCommentReactionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutCommentReaction API operation for AWS CodeCommit. +// +// Adds or updates a reaction to a specified comment for the user whose identity +// is used to make the request. You can only add or update a reaction for yourself. +// You cannot add, modify, or delete a reaction for another user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeCommit's +// API operation PutCommentReaction for usage and error information. +// +// Returned Error Types: +// * CommentDoesNotExistException +// No comment exists with the provided ID. Verify that you have used the correct +// ID, and then try again. +// +// * CommentIdRequiredException +// The comment ID is missing or null. A comment ID is required. +// +// * InvalidCommentIdException +// The comment ID is not in a valid format. Make sure that you have provided +// the full comment ID. +// +// * InvalidReactionValueException +// The value of the reaction is not valid. For more information, see the AWS +// CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). +// +// * ReactionValueRequiredException +// A reaction value is required. +// +// * ReactionLimitExceededException +// The number of reactions has been exceeded. Reactions are limited to one reaction +// per user for each individual comment ID. +// +// * CommentDeletedException +// This comment has already been deleted. You cannot edit or delete a deleted +// comment. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codecommit-2015-04-13/PutCommentReaction +func (c *CodeCommit) PutCommentReaction(input *PutCommentReactionInput) (*PutCommentReactionOutput, error) { + req, out := c.PutCommentReactionRequest(input) + return out, req.Send() +} + +// PutCommentReactionWithContext is the same as PutCommentReaction with the addition of +// the ability to pass a context and additional request options. +// +// See PutCommentReaction for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeCommit) PutCommentReactionWithContext(ctx aws.Context, input *PutCommentReactionInput, opts ...request.Option) (*PutCommentReactionOutput, error) { + req, out := c.PutCommentReactionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutFile = "PutFile" // PutFileRequest generates a "aws/request.Request" representing the @@ -10354,8 +10649,8 @@ func (c *CodeCommit) UpdateRepositoryNameWithContext(ctx aws.Context, input *Upd // The specified Amazon Resource Name (ARN) does not exist in the AWS account. type ActorDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10372,17 +10667,17 @@ func (s ActorDoesNotExistException) GoString() string { func newErrorActorDoesNotExistException(v protocol.ResponseMetadata) error { return &ActorDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActorDoesNotExistException) Code() string { +func (s *ActorDoesNotExistException) Code() string { return "ActorDoesNotExistException" } // Message returns the exception's message. -func (s ActorDoesNotExistException) Message() string { +func (s *ActorDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10390,22 +10685,22 @@ func (s ActorDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActorDoesNotExistException) OrigErr() error { +func (s *ActorDoesNotExistException) OrigErr() error { return nil } -func (s ActorDoesNotExistException) Error() string { +func (s *ActorDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ActorDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActorDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActorDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ActorDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a specific approval on a pull request. @@ -10532,8 +10827,8 @@ func (s *ApprovalRule) SetRuleContentSha256(v string) *ApprovalRule { // The content for the approval rule is empty. You must provide some content // for an approval rule. The content cannot be null. type ApprovalRuleContentRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10550,17 +10845,17 @@ func (s ApprovalRuleContentRequiredException) GoString() string { func newErrorApprovalRuleContentRequiredException(v protocol.ResponseMetadata) error { return &ApprovalRuleContentRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleContentRequiredException) Code() string { +func (s *ApprovalRuleContentRequiredException) Code() string { return "ApprovalRuleContentRequiredException" } // Message returns the exception's message. -func (s ApprovalRuleContentRequiredException) Message() string { +func (s *ApprovalRuleContentRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10568,28 +10863,28 @@ func (s ApprovalRuleContentRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleContentRequiredException) OrigErr() error { +func (s *ApprovalRuleContentRequiredException) OrigErr() error { return nil } -func (s ApprovalRuleContentRequiredException) Error() string { +func (s *ApprovalRuleContentRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleContentRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleContentRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleContentRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleContentRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified approval rule does not exist. type ApprovalRuleDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10606,17 +10901,17 @@ func (s ApprovalRuleDoesNotExistException) GoString() string { func newErrorApprovalRuleDoesNotExistException(v protocol.ResponseMetadata) error { return &ApprovalRuleDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleDoesNotExistException) Code() string { +func (s *ApprovalRuleDoesNotExistException) Code() string { return "ApprovalRuleDoesNotExistException" } // Message returns the exception's message. -func (s ApprovalRuleDoesNotExistException) Message() string { +func (s *ApprovalRuleDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10624,22 +10919,22 @@ func (s ApprovalRuleDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleDoesNotExistException) OrigErr() error { +func (s *ApprovalRuleDoesNotExistException) OrigErr() error { return nil } -func (s ApprovalRuleDoesNotExistException) Error() string { +func (s *ApprovalRuleDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about an event for an approval rule. @@ -10687,8 +10982,8 @@ func (s *ApprovalRuleEventMetadata) SetApprovalRuleName(v string) *ApprovalRuleE // An approval rule with that name already exists. Approval rule names must // be unique within the scope of a pull request. type ApprovalRuleNameAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10705,17 +11000,17 @@ func (s ApprovalRuleNameAlreadyExistsException) GoString() string { func newErrorApprovalRuleNameAlreadyExistsException(v protocol.ResponseMetadata) error { return &ApprovalRuleNameAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleNameAlreadyExistsException) Code() string { +func (s *ApprovalRuleNameAlreadyExistsException) Code() string { return "ApprovalRuleNameAlreadyExistsException" } // Message returns the exception's message. -func (s ApprovalRuleNameAlreadyExistsException) Message() string { +func (s *ApprovalRuleNameAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10723,28 +11018,28 @@ func (s ApprovalRuleNameAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleNameAlreadyExistsException) OrigErr() error { +func (s *ApprovalRuleNameAlreadyExistsException) OrigErr() error { return nil } -func (s ApprovalRuleNameAlreadyExistsException) Error() string { +func (s *ApprovalRuleNameAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleNameAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleNameAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleNameAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleNameAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An approval rule name is required, but was not specified. type ApprovalRuleNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10761,17 +11056,17 @@ func (s ApprovalRuleNameRequiredException) GoString() string { func newErrorApprovalRuleNameRequiredException(v protocol.ResponseMetadata) error { return &ApprovalRuleNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleNameRequiredException) Code() string { +func (s *ApprovalRuleNameRequiredException) Code() string { return "ApprovalRuleNameRequiredException" } // Message returns the exception's message. -func (s ApprovalRuleNameRequiredException) Message() string { +func (s *ApprovalRuleNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10779,22 +11074,22 @@ func (s ApprovalRuleNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleNameRequiredException) OrigErr() error { +func (s *ApprovalRuleNameRequiredException) OrigErr() error { return nil } -func (s ApprovalRuleNameRequiredException) Error() string { +func (s *ApprovalRuleNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about an override event for approval rules for a pull @@ -10923,8 +11218,8 @@ func (s *ApprovalRuleTemplate) SetRuleContentSha256(v string) *ApprovalRuleTempl // The content for the approval rule template is empty. You must provide some // content for an approval rule template. The content cannot be null. type ApprovalRuleTemplateContentRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10941,17 +11236,17 @@ func (s ApprovalRuleTemplateContentRequiredException) GoString() string { func newErrorApprovalRuleTemplateContentRequiredException(v protocol.ResponseMetadata) error { return &ApprovalRuleTemplateContentRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleTemplateContentRequiredException) Code() string { +func (s *ApprovalRuleTemplateContentRequiredException) Code() string { return "ApprovalRuleTemplateContentRequiredException" } // Message returns the exception's message. -func (s ApprovalRuleTemplateContentRequiredException) Message() string { +func (s *ApprovalRuleTemplateContentRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10959,30 +11254,30 @@ func (s ApprovalRuleTemplateContentRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleTemplateContentRequiredException) OrigErr() error { +func (s *ApprovalRuleTemplateContentRequiredException) OrigErr() error { return nil } -func (s ApprovalRuleTemplateContentRequiredException) Error() string { +func (s *ApprovalRuleTemplateContentRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleTemplateContentRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleTemplateContentRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleTemplateContentRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleTemplateContentRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified approval rule template does not exist. Verify that the name // is correct and that you are signed in to the AWS Region where the template // was created, and then try again. type ApprovalRuleTemplateDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10999,17 +11294,17 @@ func (s ApprovalRuleTemplateDoesNotExistException) GoString() string { func newErrorApprovalRuleTemplateDoesNotExistException(v protocol.ResponseMetadata) error { return &ApprovalRuleTemplateDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleTemplateDoesNotExistException) Code() string { +func (s *ApprovalRuleTemplateDoesNotExistException) Code() string { return "ApprovalRuleTemplateDoesNotExistException" } // Message returns the exception's message. -func (s ApprovalRuleTemplateDoesNotExistException) Message() string { +func (s *ApprovalRuleTemplateDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11017,30 +11312,30 @@ func (s ApprovalRuleTemplateDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleTemplateDoesNotExistException) OrigErr() error { +func (s *ApprovalRuleTemplateDoesNotExistException) OrigErr() error { return nil } -func (s ApprovalRuleTemplateDoesNotExistException) Error() string { +func (s *ApprovalRuleTemplateDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleTemplateDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleTemplateDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleTemplateDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleTemplateDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The approval rule template is associated with one or more repositories. You // cannot delete a template that is associated with a repository. Remove all // associations, and then try again. type ApprovalRuleTemplateInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11057,17 +11352,17 @@ func (s ApprovalRuleTemplateInUseException) GoString() string { func newErrorApprovalRuleTemplateInUseException(v protocol.ResponseMetadata) error { return &ApprovalRuleTemplateInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleTemplateInUseException) Code() string { +func (s *ApprovalRuleTemplateInUseException) Code() string { return "ApprovalRuleTemplateInUseException" } // Message returns the exception's message. -func (s ApprovalRuleTemplateInUseException) Message() string { +func (s *ApprovalRuleTemplateInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11075,30 +11370,30 @@ func (s ApprovalRuleTemplateInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleTemplateInUseException) OrigErr() error { +func (s *ApprovalRuleTemplateInUseException) OrigErr() error { return nil } -func (s ApprovalRuleTemplateInUseException) Error() string { +func (s *ApprovalRuleTemplateInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleTemplateInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleTemplateInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleTemplateInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleTemplateInUseException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot create an approval rule template with that name because a template // with that name already exists in this AWS Region for your AWS account. Approval // rule template names must be unique. type ApprovalRuleTemplateNameAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11115,17 +11410,17 @@ func (s ApprovalRuleTemplateNameAlreadyExistsException) GoString() string { func newErrorApprovalRuleTemplateNameAlreadyExistsException(v protocol.ResponseMetadata) error { return &ApprovalRuleTemplateNameAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleTemplateNameAlreadyExistsException) Code() string { +func (s *ApprovalRuleTemplateNameAlreadyExistsException) Code() string { return "ApprovalRuleTemplateNameAlreadyExistsException" } // Message returns the exception's message. -func (s ApprovalRuleTemplateNameAlreadyExistsException) Message() string { +func (s *ApprovalRuleTemplateNameAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11133,28 +11428,28 @@ func (s ApprovalRuleTemplateNameAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleTemplateNameAlreadyExistsException) OrigErr() error { +func (s *ApprovalRuleTemplateNameAlreadyExistsException) OrigErr() error { return nil } -func (s ApprovalRuleTemplateNameAlreadyExistsException) Error() string { +func (s *ApprovalRuleTemplateNameAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleTemplateNameAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleTemplateNameAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleTemplateNameAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleTemplateNameAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An approval rule template name is required, but was not specified. type ApprovalRuleTemplateNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11171,17 +11466,17 @@ func (s ApprovalRuleTemplateNameRequiredException) GoString() string { func newErrorApprovalRuleTemplateNameRequiredException(v protocol.ResponseMetadata) error { return &ApprovalRuleTemplateNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalRuleTemplateNameRequiredException) Code() string { +func (s *ApprovalRuleTemplateNameRequiredException) Code() string { return "ApprovalRuleTemplateNameRequiredException" } // Message returns the exception's message. -func (s ApprovalRuleTemplateNameRequiredException) Message() string { +func (s *ApprovalRuleTemplateNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11189,22 +11484,22 @@ func (s ApprovalRuleTemplateNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalRuleTemplateNameRequiredException) OrigErr() error { +func (s *ApprovalRuleTemplateNameRequiredException) OrigErr() error { return nil } -func (s ApprovalRuleTemplateNameRequiredException) Error() string { +func (s *ApprovalRuleTemplateNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalRuleTemplateNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalRuleTemplateNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalRuleTemplateNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalRuleTemplateNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a change in the approval state for a pull request. @@ -11242,8 +11537,8 @@ func (s *ApprovalStateChangedEventMetadata) SetRevisionId(v string) *ApprovalSta // An approval state is required, but was not specified. type ApprovalStateRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11260,17 +11555,17 @@ func (s ApprovalStateRequiredException) GoString() string { func newErrorApprovalStateRequiredException(v protocol.ResponseMetadata) error { return &ApprovalStateRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalStateRequiredException) Code() string { +func (s *ApprovalStateRequiredException) Code() string { return "ApprovalStateRequiredException" } // Message returns the exception's message. -func (s ApprovalStateRequiredException) Message() string { +func (s *ApprovalStateRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11278,22 +11573,22 @@ func (s ApprovalStateRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalStateRequiredException) OrigErr() error { +func (s *ApprovalStateRequiredException) OrigErr() error { return nil } -func (s ApprovalStateRequiredException) Error() string { +func (s *ApprovalStateRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalStateRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalStateRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalStateRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalStateRequiredException) RequestID() string { + return s.RespMetadata.RequestID } type AssociateApprovalRuleTemplateWithRepositoryInput struct { @@ -11370,8 +11665,8 @@ func (s AssociateApprovalRuleTemplateWithRepositoryOutput) GoString() string { // The specified Amazon Resource Name (ARN) does not exist in the AWS account. type AuthorDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11388,17 +11683,17 @@ func (s AuthorDoesNotExistException) GoString() string { func newErrorAuthorDoesNotExistException(v protocol.ResponseMetadata) error { return &AuthorDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AuthorDoesNotExistException) Code() string { +func (s *AuthorDoesNotExistException) Code() string { return "AuthorDoesNotExistException" } // Message returns the exception's message. -func (s AuthorDoesNotExistException) Message() string { +func (s *AuthorDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11406,22 +11701,22 @@ func (s AuthorDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AuthorDoesNotExistException) OrigErr() error { +func (s *AuthorDoesNotExistException) OrigErr() error { return nil } -func (s AuthorDoesNotExistException) Error() string { +func (s *AuthorDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AuthorDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AuthorDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AuthorDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *AuthorDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about errors in a BatchAssociateApprovalRuleTemplateWithRepositories @@ -12195,8 +12490,8 @@ func (s *BatchGetRepositoriesOutput) SetRepositoriesNotFound(v []*string) *Batch // The before commit ID and the after commit ID are the same, which is not valid. // The before commit ID and the after commit ID must be different commit IDs. type BeforeCommitIdAndAfterCommitIdAreSameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12213,17 +12508,17 @@ func (s BeforeCommitIdAndAfterCommitIdAreSameException) GoString() string { func newErrorBeforeCommitIdAndAfterCommitIdAreSameException(v protocol.ResponseMetadata) error { return &BeforeCommitIdAndAfterCommitIdAreSameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BeforeCommitIdAndAfterCommitIdAreSameException) Code() string { +func (s *BeforeCommitIdAndAfterCommitIdAreSameException) Code() string { return "BeforeCommitIdAndAfterCommitIdAreSameException" } // Message returns the exception's message. -func (s BeforeCommitIdAndAfterCommitIdAreSameException) Message() string { +func (s *BeforeCommitIdAndAfterCommitIdAreSameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12231,28 +12526,28 @@ func (s BeforeCommitIdAndAfterCommitIdAreSameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BeforeCommitIdAndAfterCommitIdAreSameException) OrigErr() error { +func (s *BeforeCommitIdAndAfterCommitIdAreSameException) OrigErr() error { return nil } -func (s BeforeCommitIdAndAfterCommitIdAreSameException) Error() string { +func (s *BeforeCommitIdAndAfterCommitIdAreSameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BeforeCommitIdAndAfterCommitIdAreSameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BeforeCommitIdAndAfterCommitIdAreSameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BeforeCommitIdAndAfterCommitIdAreSameException) RequestID() string { - return s.respMetadata.RequestID +func (s *BeforeCommitIdAndAfterCommitIdAreSameException) RequestID() string { + return s.RespMetadata.RequestID } // The specified blob does not exist. type BlobIdDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12269,17 +12564,17 @@ func (s BlobIdDoesNotExistException) GoString() string { func newErrorBlobIdDoesNotExistException(v protocol.ResponseMetadata) error { return &BlobIdDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BlobIdDoesNotExistException) Code() string { +func (s *BlobIdDoesNotExistException) Code() string { return "BlobIdDoesNotExistException" } // Message returns the exception's message. -func (s BlobIdDoesNotExistException) Message() string { +func (s *BlobIdDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12287,28 +12582,28 @@ func (s BlobIdDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BlobIdDoesNotExistException) OrigErr() error { +func (s *BlobIdDoesNotExistException) OrigErr() error { return nil } -func (s BlobIdDoesNotExistException) Error() string { +func (s *BlobIdDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BlobIdDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BlobIdDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BlobIdDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *BlobIdDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // A blob ID is required, but was not specified. type BlobIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12325,17 +12620,17 @@ func (s BlobIdRequiredException) GoString() string { func newErrorBlobIdRequiredException(v protocol.ResponseMetadata) error { return &BlobIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BlobIdRequiredException) Code() string { +func (s *BlobIdRequiredException) Code() string { return "BlobIdRequiredException" } // Message returns the exception's message. -func (s BlobIdRequiredException) Message() string { +func (s *BlobIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12343,22 +12638,22 @@ func (s BlobIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BlobIdRequiredException) OrigErr() error { +func (s *BlobIdRequiredException) OrigErr() error { return nil } -func (s BlobIdRequiredException) Error() string { +func (s *BlobIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BlobIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BlobIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BlobIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *BlobIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a specific Git blob object. @@ -12413,8 +12708,8 @@ func (s *BlobMetadata) SetPath(v string) *BlobMetadata { // The specified branch does not exist. type BranchDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12431,17 +12726,17 @@ func (s BranchDoesNotExistException) GoString() string { func newErrorBranchDoesNotExistException(v protocol.ResponseMetadata) error { return &BranchDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BranchDoesNotExistException) Code() string { +func (s *BranchDoesNotExistException) Code() string { return "BranchDoesNotExistException" } // Message returns the exception's message. -func (s BranchDoesNotExistException) Message() string { +func (s *BranchDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12449,22 +12744,22 @@ func (s BranchDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BranchDoesNotExistException) OrigErr() error { +func (s *BranchDoesNotExistException) OrigErr() error { return nil } -func (s BranchDoesNotExistException) Error() string { +func (s *BranchDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BranchDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BranchDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BranchDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *BranchDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a branch. @@ -12500,10 +12795,11 @@ func (s *BranchInfo) SetCommitId(v string) *BranchInfo { return s } -// The specified branch name already exists. +// Cannot create the branch with the specified name because the commit conflicts +// with an existing branch with the same name. Branch names must be unique. type BranchNameExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12520,17 +12816,17 @@ func (s BranchNameExistsException) GoString() string { func newErrorBranchNameExistsException(v protocol.ResponseMetadata) error { return &BranchNameExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BranchNameExistsException) Code() string { +func (s *BranchNameExistsException) Code() string { return "BranchNameExistsException" } // Message returns the exception's message. -func (s BranchNameExistsException) Message() string { +func (s *BranchNameExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12538,30 +12834,30 @@ func (s BranchNameExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BranchNameExistsException) OrigErr() error { +func (s *BranchNameExistsException) OrigErr() error { return nil } -func (s BranchNameExistsException) Error() string { +func (s *BranchNameExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BranchNameExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BranchNameExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BranchNameExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *BranchNameExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified branch name is not valid because it is a tag name. Enter the // name of a branch in the repository. For a list of valid branch names, use // ListBranches. type BranchNameIsTagNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12578,17 +12874,17 @@ func (s BranchNameIsTagNameException) GoString() string { func newErrorBranchNameIsTagNameException(v protocol.ResponseMetadata) error { return &BranchNameIsTagNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BranchNameIsTagNameException) Code() string { +func (s *BranchNameIsTagNameException) Code() string { return "BranchNameIsTagNameException" } // Message returns the exception's message. -func (s BranchNameIsTagNameException) Message() string { +func (s *BranchNameIsTagNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12596,28 +12892,28 @@ func (s BranchNameIsTagNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BranchNameIsTagNameException) OrigErr() error { +func (s *BranchNameIsTagNameException) OrigErr() error { return nil } -func (s BranchNameIsTagNameException) Error() string { +func (s *BranchNameIsTagNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BranchNameIsTagNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BranchNameIsTagNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BranchNameIsTagNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *BranchNameIsTagNameException) RequestID() string { + return s.RespMetadata.RequestID } // A branch name is required, but was not specified. type BranchNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12634,17 +12930,17 @@ func (s BranchNameRequiredException) GoString() string { func newErrorBranchNameRequiredException(v protocol.ResponseMetadata) error { return &BranchNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BranchNameRequiredException) Code() string { +func (s *BranchNameRequiredException) Code() string { return "BranchNameRequiredException" } // Message returns the exception's message. -func (s BranchNameRequiredException) Message() string { +func (s *BranchNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12652,29 +12948,29 @@ func (s BranchNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BranchNameRequiredException) OrigErr() error { +func (s *BranchNameRequiredException) OrigErr() error { return nil } -func (s BranchNameRequiredException) Error() string { +func (s *BranchNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BranchNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BranchNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BranchNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *BranchNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The approval rule cannot be deleted from the pull request because it was // created by an approval rule template and applied to the pull request automatically. type CannotDeleteApprovalRuleFromTemplateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12691,17 +12987,17 @@ func (s CannotDeleteApprovalRuleFromTemplateException) GoString() string { func newErrorCannotDeleteApprovalRuleFromTemplateException(v protocol.ResponseMetadata) error { return &CannotDeleteApprovalRuleFromTemplateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CannotDeleteApprovalRuleFromTemplateException) Code() string { +func (s *CannotDeleteApprovalRuleFromTemplateException) Code() string { return "CannotDeleteApprovalRuleFromTemplateException" } // Message returns the exception's message. -func (s CannotDeleteApprovalRuleFromTemplateException) Message() string { +func (s *CannotDeleteApprovalRuleFromTemplateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12709,29 +13005,29 @@ func (s CannotDeleteApprovalRuleFromTemplateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CannotDeleteApprovalRuleFromTemplateException) OrigErr() error { +func (s *CannotDeleteApprovalRuleFromTemplateException) OrigErr() error { return nil } -func (s CannotDeleteApprovalRuleFromTemplateException) Error() string { +func (s *CannotDeleteApprovalRuleFromTemplateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CannotDeleteApprovalRuleFromTemplateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CannotDeleteApprovalRuleFromTemplateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CannotDeleteApprovalRuleFromTemplateException) RequestID() string { - return s.respMetadata.RequestID +func (s *CannotDeleteApprovalRuleFromTemplateException) RequestID() string { + return s.RespMetadata.RequestID } // The approval rule cannot be modified for the pull request because it was // created by an approval rule template and applied to the pull request automatically. type CannotModifyApprovalRuleFromTemplateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12748,17 +13044,17 @@ func (s CannotModifyApprovalRuleFromTemplateException) GoString() string { func newErrorCannotModifyApprovalRuleFromTemplateException(v protocol.ResponseMetadata) error { return &CannotModifyApprovalRuleFromTemplateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CannotModifyApprovalRuleFromTemplateException) Code() string { +func (s *CannotModifyApprovalRuleFromTemplateException) Code() string { return "CannotModifyApprovalRuleFromTemplateException" } // Message returns the exception's message. -func (s CannotModifyApprovalRuleFromTemplateException) Message() string { +func (s *CannotModifyApprovalRuleFromTemplateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12766,22 +13062,22 @@ func (s CannotModifyApprovalRuleFromTemplateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CannotModifyApprovalRuleFromTemplateException) OrigErr() error { +func (s *CannotModifyApprovalRuleFromTemplateException) OrigErr() error { return nil } -func (s CannotModifyApprovalRuleFromTemplateException) Error() string { +func (s *CannotModifyApprovalRuleFromTemplateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CannotModifyApprovalRuleFromTemplateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CannotModifyApprovalRuleFromTemplateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CannotModifyApprovalRuleFromTemplateException) RequestID() string { - return s.respMetadata.RequestID +func (s *CannotModifyApprovalRuleFromTemplateException) RequestID() string { + return s.RespMetadata.RequestID } // A client request token is required. A client request token is an unique, @@ -12790,8 +13086,8 @@ func (s CannotModifyApprovalRuleFromTemplateException) RequestID() string { // received with the same parameters and a token is included, the request returns // information about the initial request that used that token. type ClientRequestTokenRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12808,17 +13104,17 @@ func (s ClientRequestTokenRequiredException) GoString() string { func newErrorClientRequestTokenRequiredException(v protocol.ResponseMetadata) error { return &ClientRequestTokenRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientRequestTokenRequiredException) Code() string { +func (s *ClientRequestTokenRequiredException) Code() string { return "ClientRequestTokenRequiredException" } // Message returns the exception's message. -func (s ClientRequestTokenRequiredException) Message() string { +func (s *ClientRequestTokenRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12826,22 +13122,22 @@ func (s ClientRequestTokenRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientRequestTokenRequiredException) OrigErr() error { +func (s *ClientRequestTokenRequiredException) OrigErr() error { return nil } -func (s ClientRequestTokenRequiredException) Error() string { +func (s *ClientRequestTokenRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientRequestTokenRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientRequestTokenRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientRequestTokenRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientRequestTokenRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a specific comment. @@ -12851,6 +13147,10 @@ type Comment struct { // The Amazon Resource Name (ARN) of the person who posted the comment. AuthorArn *string `locationName:"authorArn" type:"string"` + // The emoji reactions to a comment, if any, submitted by the user whose credentials + // are associated with the call to the API. + CallerReactions []*string `locationName:"callerReactions" type:"list"` + // A unique, client-generated idempotency token that, when provided in a request, // ensures the request cannot be repeated with a changed parameter. If a request // is received with the same parameters and a token is included, the request @@ -12874,6 +13174,10 @@ type Comment struct { // The date and time the comment was most recently modified, in timestamp format. LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // A string to integer map that represents the number of individual users who + // have responded to a comment with the specified reactions. + ReactionCounts map[string]*int64 `locationName:"reactionCounts" type:"map"` } // String returns the string representation @@ -12892,6 +13196,12 @@ func (s *Comment) SetAuthorArn(v string) *Comment { return s } +// SetCallerReactions sets the CallerReactions field's value. +func (s *Comment) SetCallerReactions(v []*string) *Comment { + s.CallerReactions = v + return s +} + // SetClientRequestToken sets the ClientRequestToken field's value. func (s *Comment) SetClientRequestToken(v string) *Comment { s.ClientRequestToken = &v @@ -12934,11 +13244,17 @@ func (s *Comment) SetLastModifiedDate(v time.Time) *Comment { return s } +// SetReactionCounts sets the ReactionCounts field's value. +func (s *Comment) SetReactionCounts(v map[string]*int64) *Comment { + s.ReactionCounts = v + return s +} + // The comment is empty. You must provide some content for a comment. The content // cannot be null. type CommentContentRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12955,17 +13271,17 @@ func (s CommentContentRequiredException) GoString() string { func newErrorCommentContentRequiredException(v protocol.ResponseMetadata) error { return &CommentContentRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommentContentRequiredException) Code() string { +func (s *CommentContentRequiredException) Code() string { return "CommentContentRequiredException" } // Message returns the exception's message. -func (s CommentContentRequiredException) Message() string { +func (s *CommentContentRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12973,28 +13289,28 @@ func (s CommentContentRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommentContentRequiredException) OrigErr() error { +func (s *CommentContentRequiredException) OrigErr() error { return nil } -func (s CommentContentRequiredException) Error() string { +func (s *CommentContentRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommentContentRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommentContentRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommentContentRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommentContentRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The comment is too large. Comments are limited to 1,000 characters. type CommentContentSizeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13011,17 +13327,17 @@ func (s CommentContentSizeLimitExceededException) GoString() string { func newErrorCommentContentSizeLimitExceededException(v protocol.ResponseMetadata) error { return &CommentContentSizeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommentContentSizeLimitExceededException) Code() string { +func (s *CommentContentSizeLimitExceededException) Code() string { return "CommentContentSizeLimitExceededException" } // Message returns the exception's message. -func (s CommentContentSizeLimitExceededException) Message() string { +func (s *CommentContentSizeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13029,29 +13345,29 @@ func (s CommentContentSizeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommentContentSizeLimitExceededException) OrigErr() error { +func (s *CommentContentSizeLimitExceededException) OrigErr() error { return nil } -func (s CommentContentSizeLimitExceededException) Error() string { +func (s *CommentContentSizeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommentContentSizeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommentContentSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommentContentSizeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommentContentSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // This comment has already been deleted. You cannot edit or delete a deleted // comment. type CommentDeletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13068,17 +13384,17 @@ func (s CommentDeletedException) GoString() string { func newErrorCommentDeletedException(v protocol.ResponseMetadata) error { return &CommentDeletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommentDeletedException) Code() string { +func (s *CommentDeletedException) Code() string { return "CommentDeletedException" } // Message returns the exception's message. -func (s CommentDeletedException) Message() string { +func (s *CommentDeletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13086,29 +13402,29 @@ func (s CommentDeletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommentDeletedException) OrigErr() error { +func (s *CommentDeletedException) OrigErr() error { return nil } -func (s CommentDeletedException) Error() string { +func (s *CommentDeletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommentDeletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommentDeletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommentDeletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommentDeletedException) RequestID() string { + return s.RespMetadata.RequestID } // No comment exists with the provided ID. Verify that you have used the correct // ID, and then try again. type CommentDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13125,17 +13441,17 @@ func (s CommentDoesNotExistException) GoString() string { func newErrorCommentDoesNotExistException(v protocol.ResponseMetadata) error { return &CommentDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommentDoesNotExistException) Code() string { +func (s *CommentDoesNotExistException) Code() string { return "CommentDoesNotExistException" } // Message returns the exception's message. -func (s CommentDoesNotExistException) Message() string { +func (s *CommentDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13143,28 +13459,28 @@ func (s CommentDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommentDoesNotExistException) OrigErr() error { +func (s *CommentDoesNotExistException) OrigErr() error { return nil } -func (s CommentDoesNotExistException) Error() string { +func (s *CommentDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommentDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommentDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommentDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommentDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The comment ID is missing or null. A comment ID is required. type CommentIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13181,17 +13497,17 @@ func (s CommentIdRequiredException) GoString() string { func newErrorCommentIdRequiredException(v protocol.ResponseMetadata) error { return &CommentIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommentIdRequiredException) Code() string { +func (s *CommentIdRequiredException) Code() string { return "CommentIdRequiredException" } // Message returns the exception's message. -func (s CommentIdRequiredException) Message() string { +func (s *CommentIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13199,29 +13515,29 @@ func (s CommentIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommentIdRequiredException) OrigErr() error { +func (s *CommentIdRequiredException) OrigErr() error { return nil } -func (s CommentIdRequiredException) Error() string { +func (s *CommentIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommentIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommentIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommentIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommentIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot modify or delete this comment. Only comment authors can modify // or delete their comments. type CommentNotCreatedByCallerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13238,17 +13554,17 @@ func (s CommentNotCreatedByCallerException) GoString() string { func newErrorCommentNotCreatedByCallerException(v protocol.ResponseMetadata) error { return &CommentNotCreatedByCallerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommentNotCreatedByCallerException) Code() string { +func (s *CommentNotCreatedByCallerException) Code() string { return "CommentNotCreatedByCallerException" } // Message returns the exception's message. -func (s CommentNotCreatedByCallerException) Message() string { +func (s *CommentNotCreatedByCallerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13256,22 +13572,22 @@ func (s CommentNotCreatedByCallerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommentNotCreatedByCallerException) OrigErr() error { +func (s *CommentNotCreatedByCallerException) OrigErr() error { return nil } -func (s CommentNotCreatedByCallerException) Error() string { +func (s *CommentNotCreatedByCallerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommentNotCreatedByCallerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommentNotCreatedByCallerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommentNotCreatedByCallerException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommentNotCreatedByCallerException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about comments on the comparison between two commits. @@ -13541,8 +13857,8 @@ func (s *Commit) SetTreeId(v string) *Commit { // The specified commit does not exist or no commit was specified, and the specified // repository has no default branch. type CommitDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13559,17 +13875,17 @@ func (s CommitDoesNotExistException) GoString() string { func newErrorCommitDoesNotExistException(v protocol.ResponseMetadata) error { return &CommitDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitDoesNotExistException) Code() string { +func (s *CommitDoesNotExistException) Code() string { return "CommitDoesNotExistException" } // Message returns the exception's message. -func (s CommitDoesNotExistException) Message() string { +func (s *CommitDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13577,28 +13893,28 @@ func (s CommitDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitDoesNotExistException) OrigErr() error { +func (s *CommitDoesNotExistException) OrigErr() error { return nil } -func (s CommitDoesNotExistException) Error() string { +func (s *CommitDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The specified commit ID does not exist. type CommitIdDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13615,17 +13931,17 @@ func (s CommitIdDoesNotExistException) GoString() string { func newErrorCommitIdDoesNotExistException(v protocol.ResponseMetadata) error { return &CommitIdDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitIdDoesNotExistException) Code() string { +func (s *CommitIdDoesNotExistException) Code() string { return "CommitIdDoesNotExistException" } // Message returns the exception's message. -func (s CommitIdDoesNotExistException) Message() string { +func (s *CommitIdDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13633,28 +13949,28 @@ func (s CommitIdDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitIdDoesNotExistException) OrigErr() error { +func (s *CommitIdDoesNotExistException) OrigErr() error { return nil } -func (s CommitIdDoesNotExistException) Error() string { +func (s *CommitIdDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitIdDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitIdDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitIdDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitIdDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // A commit ID was not specified. type CommitIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13671,17 +13987,17 @@ func (s CommitIdRequiredException) GoString() string { func newErrorCommitIdRequiredException(v protocol.ResponseMetadata) error { return &CommitIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitIdRequiredException) Code() string { +func (s *CommitIdRequiredException) Code() string { return "CommitIdRequiredException" } // Message returns the exception's message. -func (s CommitIdRequiredException) Message() string { +func (s *CommitIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13689,30 +14005,30 @@ func (s CommitIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitIdRequiredException) OrigErr() error { +func (s *CommitIdRequiredException) OrigErr() error { return nil } -func (s CommitIdRequiredException) Error() string { +func (s *CommitIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of allowed commit IDs in a batch request is 100. Verify // that your batch requests contains no more than 100 commit IDs, and then try // again. type CommitIdsLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13729,17 +14045,17 @@ func (s CommitIdsLimitExceededException) GoString() string { func newErrorCommitIdsLimitExceededException(v protocol.ResponseMetadata) error { return &CommitIdsLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitIdsLimitExceededException) Code() string { +func (s *CommitIdsLimitExceededException) Code() string { return "CommitIdsLimitExceededException" } // Message returns the exception's message. -func (s CommitIdsLimitExceededException) Message() string { +func (s *CommitIdsLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13747,29 +14063,29 @@ func (s CommitIdsLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitIdsLimitExceededException) OrigErr() error { +func (s *CommitIdsLimitExceededException) OrigErr() error { return nil } -func (s CommitIdsLimitExceededException) Error() string { +func (s *CommitIdsLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitIdsLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitIdsLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitIdsLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitIdsLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A list of commit IDs is required, but was either not specified or the list // was empty. type CommitIdsListRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13786,17 +14102,17 @@ func (s CommitIdsListRequiredException) GoString() string { func newErrorCommitIdsListRequiredException(v protocol.ResponseMetadata) error { return &CommitIdsListRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitIdsListRequiredException) Code() string { +func (s *CommitIdsListRequiredException) Code() string { return "CommitIdsListRequiredException" } // Message returns the exception's message. -func (s CommitIdsListRequiredException) Message() string { +func (s *CommitIdsListRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13804,28 +14120,28 @@ func (s CommitIdsListRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitIdsListRequiredException) OrigErr() error { +func (s *CommitIdsListRequiredException) OrigErr() error { return nil } -func (s CommitIdsListRequiredException) Error() string { +func (s *CommitIdsListRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitIdsListRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitIdsListRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitIdsListRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitIdsListRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The commit message is too long. Provide a shorter string. type CommitMessageLengthExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13842,17 +14158,17 @@ func (s CommitMessageLengthExceededException) GoString() string { func newErrorCommitMessageLengthExceededException(v protocol.ResponseMetadata) error { return &CommitMessageLengthExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitMessageLengthExceededException) Code() string { +func (s *CommitMessageLengthExceededException) Code() string { return "CommitMessageLengthExceededException" } // Message returns the exception's message. -func (s CommitMessageLengthExceededException) Message() string { +func (s *CommitMessageLengthExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13860,28 +14176,28 @@ func (s CommitMessageLengthExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitMessageLengthExceededException) OrigErr() error { +func (s *CommitMessageLengthExceededException) OrigErr() error { return nil } -func (s CommitMessageLengthExceededException) Error() string { +func (s *CommitMessageLengthExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitMessageLengthExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitMessageLengthExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitMessageLengthExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitMessageLengthExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A commit was not specified. type CommitRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13898,17 +14214,17 @@ func (s CommitRequiredException) GoString() string { func newErrorCommitRequiredException(v protocol.ResponseMetadata) error { return &CommitRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CommitRequiredException) Code() string { +func (s *CommitRequiredException) Code() string { return "CommitRequiredException" } // Message returns the exception's message. -func (s CommitRequiredException) Message() string { +func (s *CommitRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13916,30 +14232,30 @@ func (s CommitRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CommitRequiredException) OrigErr() error { +func (s *CommitRequiredException) OrigErr() error { return nil } -func (s CommitRequiredException) Error() string { +func (s *CommitRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CommitRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CommitRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CommitRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *CommitRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The merge cannot be completed because the target branch has been modified. // Another user might have modified the target branch while the merge was in // progress. Wait a few minutes, and then try again. type ConcurrentReferenceUpdateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13956,17 +14272,17 @@ func (s ConcurrentReferenceUpdateException) GoString() string { func newErrorConcurrentReferenceUpdateException(v protocol.ResponseMetadata) error { return &ConcurrentReferenceUpdateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentReferenceUpdateException) Code() string { +func (s *ConcurrentReferenceUpdateException) Code() string { return "ConcurrentReferenceUpdateException" } // Message returns the exception's message. -func (s ConcurrentReferenceUpdateException) Message() string { +func (s *ConcurrentReferenceUpdateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13974,22 +14290,22 @@ func (s ConcurrentReferenceUpdateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentReferenceUpdateException) OrigErr() error { +func (s *ConcurrentReferenceUpdateException) OrigErr() error { return nil } -func (s ConcurrentReferenceUpdateException) Error() string { +func (s *ConcurrentReferenceUpdateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentReferenceUpdateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentReferenceUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentReferenceUpdateException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentReferenceUpdateException) RequestID() string { + return s.RespMetadata.RequestID } // Information about conflicts in a merge operation. @@ -14246,7 +14562,7 @@ type CreateApprovalRuleTemplateInput struct { // Amazon Resource Name (ARN) of the IAM user or role. // // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers - // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in the IAM User Guide. // // ApprovalRuleTemplateContent is a required field @@ -14678,7 +14994,7 @@ type CreatePullRequestApprovalRuleInput struct { // Amazon Resource Name (ARN) of the IAM user or role. // // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers - // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in the IAM User Guide. // // ApprovalRuleContent is a required field @@ -15186,8 +15502,8 @@ func (s *CreateUnreferencedMergeCommitOutput) SetTreeId(v string) *CreateUnrefer // be deleted. To delete this branch, you must first set another branch as the // default branch. type DefaultBranchCannotBeDeletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15204,17 +15520,17 @@ func (s DefaultBranchCannotBeDeletedException) GoString() string { func newErrorDefaultBranchCannotBeDeletedException(v protocol.ResponseMetadata) error { return &DefaultBranchCannotBeDeletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DefaultBranchCannotBeDeletedException) Code() string { +func (s *DefaultBranchCannotBeDeletedException) Code() string { return "DefaultBranchCannotBeDeletedException" } // Message returns the exception's message. -func (s DefaultBranchCannotBeDeletedException) Message() string { +func (s *DefaultBranchCannotBeDeletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15222,22 +15538,22 @@ func (s DefaultBranchCannotBeDeletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DefaultBranchCannotBeDeletedException) OrigErr() error { +func (s *DefaultBranchCannotBeDeletedException) OrigErr() error { return nil } -func (s DefaultBranchCannotBeDeletedException) Error() string { +func (s *DefaultBranchCannotBeDeletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DefaultBranchCannotBeDeletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DefaultBranchCannotBeDeletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DefaultBranchCannotBeDeletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DefaultBranchCannotBeDeletedException) RequestID() string { + return s.RespMetadata.RequestID } type DeleteApprovalRuleTemplateInput struct { @@ -16223,8 +16539,8 @@ func (s *Difference) SetChangeType(v string) *Difference { // provide a different name for the file, or specify a different path for the // file. type DirectoryNameConflictsWithFileNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16241,17 +16557,17 @@ func (s DirectoryNameConflictsWithFileNameException) GoString() string { func newErrorDirectoryNameConflictsWithFileNameException(v protocol.ResponseMetadata) error { return &DirectoryNameConflictsWithFileNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryNameConflictsWithFileNameException) Code() string { +func (s *DirectoryNameConflictsWithFileNameException) Code() string { return "DirectoryNameConflictsWithFileNameException" } // Message returns the exception's message. -func (s DirectoryNameConflictsWithFileNameException) Message() string { +func (s *DirectoryNameConflictsWithFileNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16259,22 +16575,22 @@ func (s DirectoryNameConflictsWithFileNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryNameConflictsWithFileNameException) OrigErr() error { +func (s *DirectoryNameConflictsWithFileNameException) OrigErr() error { return nil } -func (s DirectoryNameConflictsWithFileNameException) Error() string { +func (s *DirectoryNameConflictsWithFileNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryNameConflictsWithFileNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryNameConflictsWithFileNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryNameConflictsWithFileNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryNameConflictsWithFileNameException) RequestID() string { + return s.RespMetadata.RequestID } type DisassociateApprovalRuleTemplateFromRepositoryInput struct { @@ -16351,8 +16667,8 @@ func (s DisassociateApprovalRuleTemplateFromRepositoryOutput) GoString() string // An encryption integrity check failed. type EncryptionIntegrityChecksFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16369,17 +16685,17 @@ func (s EncryptionIntegrityChecksFailedException) GoString() string { func newErrorEncryptionIntegrityChecksFailedException(v protocol.ResponseMetadata) error { return &EncryptionIntegrityChecksFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionIntegrityChecksFailedException) Code() string { +func (s *EncryptionIntegrityChecksFailedException) Code() string { return "EncryptionIntegrityChecksFailedException" } // Message returns the exception's message. -func (s EncryptionIntegrityChecksFailedException) Message() string { +func (s *EncryptionIntegrityChecksFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16387,28 +16703,28 @@ func (s EncryptionIntegrityChecksFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionIntegrityChecksFailedException) OrigErr() error { +func (s *EncryptionIntegrityChecksFailedException) OrigErr() error { return nil } -func (s EncryptionIntegrityChecksFailedException) Error() string { +func (s *EncryptionIntegrityChecksFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionIntegrityChecksFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionIntegrityChecksFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionIntegrityChecksFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionIntegrityChecksFailedException) RequestID() string { + return s.RespMetadata.RequestID } // An encryption key could not be accessed. type EncryptionKeyAccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16425,17 +16741,17 @@ func (s EncryptionKeyAccessDeniedException) GoString() string { func newErrorEncryptionKeyAccessDeniedException(v protocol.ResponseMetadata) error { return &EncryptionKeyAccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionKeyAccessDeniedException) Code() string { +func (s *EncryptionKeyAccessDeniedException) Code() string { return "EncryptionKeyAccessDeniedException" } // Message returns the exception's message. -func (s EncryptionKeyAccessDeniedException) Message() string { +func (s *EncryptionKeyAccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16443,28 +16759,28 @@ func (s EncryptionKeyAccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionKeyAccessDeniedException) OrigErr() error { +func (s *EncryptionKeyAccessDeniedException) OrigErr() error { return nil } -func (s EncryptionKeyAccessDeniedException) Error() string { +func (s *EncryptionKeyAccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionKeyAccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionKeyAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionKeyAccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionKeyAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The encryption key is disabled. type EncryptionKeyDisabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16481,17 +16797,17 @@ func (s EncryptionKeyDisabledException) GoString() string { func newErrorEncryptionKeyDisabledException(v protocol.ResponseMetadata) error { return &EncryptionKeyDisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionKeyDisabledException) Code() string { +func (s *EncryptionKeyDisabledException) Code() string { return "EncryptionKeyDisabledException" } // Message returns the exception's message. -func (s EncryptionKeyDisabledException) Message() string { +func (s *EncryptionKeyDisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16499,28 +16815,28 @@ func (s EncryptionKeyDisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionKeyDisabledException) OrigErr() error { +func (s *EncryptionKeyDisabledException) OrigErr() error { return nil } -func (s EncryptionKeyDisabledException) Error() string { +func (s *EncryptionKeyDisabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionKeyDisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionKeyDisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionKeyDisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionKeyDisabledException) RequestID() string { + return s.RespMetadata.RequestID } // No encryption key was found. type EncryptionKeyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16537,17 +16853,17 @@ func (s EncryptionKeyNotFoundException) GoString() string { func newErrorEncryptionKeyNotFoundException(v protocol.ResponseMetadata) error { return &EncryptionKeyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionKeyNotFoundException) Code() string { +func (s *EncryptionKeyNotFoundException) Code() string { return "EncryptionKeyNotFoundException" } // Message returns the exception's message. -func (s EncryptionKeyNotFoundException) Message() string { +func (s *EncryptionKeyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16555,28 +16871,28 @@ func (s EncryptionKeyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionKeyNotFoundException) OrigErr() error { +func (s *EncryptionKeyNotFoundException) OrigErr() error { return nil } -func (s EncryptionKeyNotFoundException) Error() string { +func (s *EncryptionKeyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionKeyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionKeyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionKeyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionKeyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The encryption key is not available. type EncryptionKeyUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16593,17 +16909,17 @@ func (s EncryptionKeyUnavailableException) GoString() string { func newErrorEncryptionKeyUnavailableException(v protocol.ResponseMetadata) error { return &EncryptionKeyUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionKeyUnavailableException) Code() string { +func (s *EncryptionKeyUnavailableException) Code() string { return "EncryptionKeyUnavailableException" } // Message returns the exception's message. -func (s EncryptionKeyUnavailableException) Message() string { +func (s *EncryptionKeyUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16611,22 +16927,22 @@ func (s EncryptionKeyUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionKeyUnavailableException) OrigErr() error { +func (s *EncryptionKeyUnavailableException) OrigErr() error { return nil } -func (s EncryptionKeyUnavailableException) Error() string { +func (s *EncryptionKeyUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionKeyUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionKeyUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionKeyUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionKeyUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type EvaluatePullRequestApprovalRulesInput struct { @@ -16819,8 +17135,8 @@ func (s *File) SetRelativePath(v string) *File { // have been specified for the same file. You cannot provide both. Either specify // a source file or provide the file content directly. type FileContentAndSourceFileSpecifiedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16837,17 +17153,17 @@ func (s FileContentAndSourceFileSpecifiedException) GoString() string { func newErrorFileContentAndSourceFileSpecifiedException(v protocol.ResponseMetadata) error { return &FileContentAndSourceFileSpecifiedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileContentAndSourceFileSpecifiedException) Code() string { +func (s *FileContentAndSourceFileSpecifiedException) Code() string { return "FileContentAndSourceFileSpecifiedException" } // Message returns the exception's message. -func (s FileContentAndSourceFileSpecifiedException) Message() string { +func (s *FileContentAndSourceFileSpecifiedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16855,29 +17171,29 @@ func (s FileContentAndSourceFileSpecifiedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileContentAndSourceFileSpecifiedException) OrigErr() error { +func (s *FileContentAndSourceFileSpecifiedException) OrigErr() error { return nil } -func (s FileContentAndSourceFileSpecifiedException) Error() string { +func (s *FileContentAndSourceFileSpecifiedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileContentAndSourceFileSpecifiedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileContentAndSourceFileSpecifiedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileContentAndSourceFileSpecifiedException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileContentAndSourceFileSpecifiedException) RequestID() string { + return s.RespMetadata.RequestID } // The file cannot be added because it is empty. Empty files cannot be added // to the repository with this API. type FileContentRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16894,17 +17210,17 @@ func (s FileContentRequiredException) GoString() string { func newErrorFileContentRequiredException(v protocol.ResponseMetadata) error { return &FileContentRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileContentRequiredException) Code() string { +func (s *FileContentRequiredException) Code() string { return "FileContentRequiredException" } // Message returns the exception's message. -func (s FileContentRequiredException) Message() string { +func (s *FileContentRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16912,30 +17228,30 @@ func (s FileContentRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileContentRequiredException) OrigErr() error { +func (s *FileContentRequiredException) OrigErr() error { return nil } -func (s FileContentRequiredException) Error() string { +func (s *FileContentRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileContentRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileContentRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileContentRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileContentRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The file cannot be added because it is too large. The maximum file size is // 6 MB, and the combined file content change size is 7 MB. Consider making // these changes using a Git client. type FileContentSizeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16952,17 +17268,17 @@ func (s FileContentSizeLimitExceededException) GoString() string { func newErrorFileContentSizeLimitExceededException(v protocol.ResponseMetadata) error { return &FileContentSizeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileContentSizeLimitExceededException) Code() string { +func (s *FileContentSizeLimitExceededException) Code() string { return "FileContentSizeLimitExceededException" } // Message returns the exception's message. -func (s FileContentSizeLimitExceededException) Message() string { +func (s *FileContentSizeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16970,29 +17286,29 @@ func (s FileContentSizeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileContentSizeLimitExceededException) OrigErr() error { +func (s *FileContentSizeLimitExceededException) OrigErr() error { return nil } -func (s FileContentSizeLimitExceededException) Error() string { +func (s *FileContentSizeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileContentSizeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileContentSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileContentSizeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileContentSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified file does not exist. Verify that you have used the correct // file name, full path, and extension. type FileDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17009,17 +17325,17 @@ func (s FileDoesNotExistException) GoString() string { func newErrorFileDoesNotExistException(v protocol.ResponseMetadata) error { return &FileDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileDoesNotExistException) Code() string { +func (s *FileDoesNotExistException) Code() string { return "FileDoesNotExistException" } // Message returns the exception's message. -func (s FileDoesNotExistException) Message() string { +func (s *FileDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17027,29 +17343,29 @@ func (s FileDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileDoesNotExistException) OrigErr() error { +func (s *FileDoesNotExistException) OrigErr() error { return nil } -func (s FileDoesNotExistException) Error() string { +func (s *FileDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The commit cannot be created because no files have been specified as added, // updated, or changed (PutFile or DeleteFile) for the commit. type FileEntryRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17066,17 +17382,17 @@ func (s FileEntryRequiredException) GoString() string { func newErrorFileEntryRequiredException(v protocol.ResponseMetadata) error { return &FileEntryRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileEntryRequiredException) Code() string { +func (s *FileEntryRequiredException) Code() string { return "FileEntryRequiredException" } // Message returns the exception's message. -func (s FileEntryRequiredException) Message() string { +func (s *FileEntryRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17084,22 +17400,22 @@ func (s FileEntryRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileEntryRequiredException) OrigErr() error { +func (s *FileEntryRequiredException) OrigErr() error { return nil } -func (s FileEntryRequiredException) Error() string { +func (s *FileEntryRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileEntryRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileEntryRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileEntryRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileEntryRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // A file to be added, updated, or deleted as part of a commit. @@ -17149,8 +17465,8 @@ func (s *FileMetadata) SetFileMode(v string) *FileMetadata { // The commit cannot be created because no file mode has been specified. A file // mode is required to update mode permissions for a file. type FileModeRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17167,17 +17483,17 @@ func (s FileModeRequiredException) GoString() string { func newErrorFileModeRequiredException(v protocol.ResponseMetadata) error { return &FileModeRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileModeRequiredException) Code() string { +func (s *FileModeRequiredException) Code() string { return "FileModeRequiredException" } // Message returns the exception's message. -func (s FileModeRequiredException) Message() string { +func (s *FileModeRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17185,22 +17501,22 @@ func (s FileModeRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileModeRequiredException) OrigErr() error { +func (s *FileModeRequiredException) OrigErr() error { return nil } -func (s FileModeRequiredException) Error() string { +func (s *FileModeRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileModeRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileModeRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileModeRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileModeRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about file modes in a merge or pull request. @@ -17250,8 +17566,8 @@ func (s *FileModes) SetSource(v string) *FileModes { // name for the file, or add the file in a directory that does not match the // file name. type FileNameConflictsWithDirectoryNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17268,17 +17584,17 @@ func (s FileNameConflictsWithDirectoryNameException) GoString() string { func newErrorFileNameConflictsWithDirectoryNameException(v protocol.ResponseMetadata) error { return &FileNameConflictsWithDirectoryNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileNameConflictsWithDirectoryNameException) Code() string { +func (s *FileNameConflictsWithDirectoryNameException) Code() string { return "FileNameConflictsWithDirectoryNameException" } // Message returns the exception's message. -func (s FileNameConflictsWithDirectoryNameException) Message() string { +func (s *FileNameConflictsWithDirectoryNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17286,30 +17602,30 @@ func (s FileNameConflictsWithDirectoryNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileNameConflictsWithDirectoryNameException) OrigErr() error { +func (s *FileNameConflictsWithDirectoryNameException) OrigErr() error { return nil } -func (s FileNameConflictsWithDirectoryNameException) Error() string { +func (s *FileNameConflictsWithDirectoryNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileNameConflictsWithDirectoryNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileNameConflictsWithDirectoryNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileNameConflictsWithDirectoryNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileNameConflictsWithDirectoryNameException) RequestID() string { + return s.RespMetadata.RequestID } // The commit cannot be created because a specified file path points to a submodule. // Verify that the destination files have valid file paths that do not point // to a submodule. type FilePathConflictsWithSubmodulePathException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17326,17 +17642,17 @@ func (s FilePathConflictsWithSubmodulePathException) GoString() string { func newErrorFilePathConflictsWithSubmodulePathException(v protocol.ResponseMetadata) error { return &FilePathConflictsWithSubmodulePathException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FilePathConflictsWithSubmodulePathException) Code() string { +func (s *FilePathConflictsWithSubmodulePathException) Code() string { return "FilePathConflictsWithSubmodulePathException" } // Message returns the exception's message. -func (s FilePathConflictsWithSubmodulePathException) Message() string { +func (s *FilePathConflictsWithSubmodulePathException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17344,22 +17660,22 @@ func (s FilePathConflictsWithSubmodulePathException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FilePathConflictsWithSubmodulePathException) OrigErr() error { +func (s *FilePathConflictsWithSubmodulePathException) OrigErr() error { return nil } -func (s FilePathConflictsWithSubmodulePathException) Error() string { +func (s *FilePathConflictsWithSubmodulePathException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FilePathConflictsWithSubmodulePathException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FilePathConflictsWithSubmodulePathException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FilePathConflictsWithSubmodulePathException) RequestID() string { - return s.respMetadata.RequestID +func (s *FilePathConflictsWithSubmodulePathException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the size of files in a merge or pull request. @@ -17408,8 +17724,8 @@ func (s *FileSizes) SetSource(v int64) *FileSizes { // information about limits in AWS CodeCommit, see AWS CodeCommit User Guide // (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). type FileTooLargeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17426,17 +17742,17 @@ func (s FileTooLargeException) GoString() string { func newErrorFileTooLargeException(v protocol.ResponseMetadata) error { return &FileTooLargeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileTooLargeException) Code() string { +func (s *FileTooLargeException) Code() string { return "FileTooLargeException" } // Message returns the exception's message. -func (s FileTooLargeException) Message() string { +func (s *FileTooLargeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17444,22 +17760,22 @@ func (s FileTooLargeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileTooLargeException) OrigErr() error { +func (s *FileTooLargeException) OrigErr() error { return nil } -func (s FileTooLargeException) Error() string { +func (s *FileTooLargeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileTooLargeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileTooLargeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileTooLargeException) RequestID() string { - return s.respMetadata.RequestID +func (s *FileTooLargeException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a folder in a repository. @@ -17511,8 +17827,8 @@ func (s *Folder) SetTreeId(v string) *Folder { // Either reduce the number and size of your changes, or split the changes across // multiple folders. type FolderContentSizeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17529,17 +17845,17 @@ func (s FolderContentSizeLimitExceededException) GoString() string { func newErrorFolderContentSizeLimitExceededException(v protocol.ResponseMetadata) error { return &FolderContentSizeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FolderContentSizeLimitExceededException) Code() string { +func (s *FolderContentSizeLimitExceededException) Code() string { return "FolderContentSizeLimitExceededException" } // Message returns the exception's message. -func (s FolderContentSizeLimitExceededException) Message() string { +func (s *FolderContentSizeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17547,29 +17863,29 @@ func (s FolderContentSizeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FolderContentSizeLimitExceededException) OrigErr() error { +func (s *FolderContentSizeLimitExceededException) OrigErr() error { return nil } -func (s FolderContentSizeLimitExceededException) Error() string { +func (s *FolderContentSizeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FolderContentSizeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FolderContentSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FolderContentSizeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *FolderContentSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified folder does not exist. Either the folder name is not correct, // or you did not enter the full path to the folder. type FolderDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17586,17 +17902,17 @@ func (s FolderDoesNotExistException) GoString() string { func newErrorFolderDoesNotExistException(v protocol.ResponseMetadata) error { return &FolderDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FolderDoesNotExistException) Code() string { +func (s *FolderDoesNotExistException) Code() string { return "FolderDoesNotExistException" } // Message returns the exception's message. -func (s FolderDoesNotExistException) Message() string { +func (s *FolderDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17604,22 +17920,22 @@ func (s FolderDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FolderDoesNotExistException) OrigErr() error { +func (s *FolderDoesNotExistException) OrigErr() error { return nil } -func (s FolderDoesNotExistException) Error() string { +func (s *FolderDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FolderDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FolderDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FolderDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *FolderDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } type GetApprovalRuleTemplateInput struct { @@ -17908,16 +18224,119 @@ func (s *GetCommentOutput) SetComment(v *Comment) *GetCommentOutput { return s } -type GetCommentsForComparedCommitInput struct { +type GetCommentReactionsInput struct { _ struct{} `type:"structure"` - // To establish the directionality of the comparison, the full commit ID of - // the after commit. + // The ID of the comment for which you want to get reactions information. // - // AfterCommitId is a required field - AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` + // CommentId is a required field + CommentId *string `locationName:"commentId" type:"string" required:"true"` - // To establish the directionality of the comparison, the full commit ID of + // A non-zero, non-negative integer used to limit the number of returned results. + // The default is the same as the allowed maximum, 1,000. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // An enumeration token that, when provided in a request, returns the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Optional. The Amazon Resource Name (ARN) of the user or identity for which + // you want to get reaction information. + ReactionUserArn *string `locationName:"reactionUserArn" type:"string"` +} + +// String returns the string representation +func (s GetCommentReactionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCommentReactionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetCommentReactionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetCommentReactionsInput"} + if s.CommentId == nil { + invalidParams.Add(request.NewErrParamRequired("CommentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommentId sets the CommentId field's value. +func (s *GetCommentReactionsInput) SetCommentId(v string) *GetCommentReactionsInput { + s.CommentId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetCommentReactionsInput) SetMaxResults(v int64) *GetCommentReactionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCommentReactionsInput) SetNextToken(v string) *GetCommentReactionsInput { + s.NextToken = &v + return s +} + +// SetReactionUserArn sets the ReactionUserArn field's value. +func (s *GetCommentReactionsInput) SetReactionUserArn(v string) *GetCommentReactionsInput { + s.ReactionUserArn = &v + return s +} + +type GetCommentReactionsOutput struct { + _ struct{} `type:"structure"` + + // An enumeration token that can be used in a request to return the next batch + // of the results. + NextToken *string `locationName:"nextToken" type:"string"` + + // An array of reactions to the specified comment. + // + // ReactionsForComment is a required field + ReactionsForComment []*ReactionForComment `locationName:"reactionsForComment" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetCommentReactionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCommentReactionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetCommentReactionsOutput) SetNextToken(v string) *GetCommentReactionsOutput { + s.NextToken = &v + return s +} + +// SetReactionsForComment sets the ReactionsForComment field's value. +func (s *GetCommentReactionsOutput) SetReactionsForComment(v []*ReactionForComment) *GetCommentReactionsOutput { + s.ReactionsForComment = v + return s +} + +type GetCommentsForComparedCommitInput struct { + _ struct{} `type:"structure"` + + // To establish the directionality of the comparison, the full commit ID of + // the after commit. + // + // AfterCommitId is a required field + AfterCommitId *string `locationName:"afterCommitId" type:"string" required:"true"` + + // To establish the directionality of the comparison, the full commit ID of // the before commit. BeforeCommitId *string `locationName:"beforeCommitId" type:"string"` @@ -19571,8 +19990,8 @@ func (s *GetRepositoryTriggersOutput) SetTriggers(v []*RepositoryTrigger) *GetRe // The client request token is not valid. Either the token is not in a valid // format, or the token has been used in a previous request and cannot be reused. type IdempotencyParameterMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19589,17 +20008,17 @@ func (s IdempotencyParameterMismatchException) GoString() string { func newErrorIdempotencyParameterMismatchException(v protocol.ResponseMetadata) error { return &IdempotencyParameterMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotencyParameterMismatchException) Code() string { +func (s *IdempotencyParameterMismatchException) Code() string { return "IdempotencyParameterMismatchException" } // Message returns the exception's message. -func (s IdempotencyParameterMismatchException) Message() string { +func (s *IdempotencyParameterMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19607,30 +20026,30 @@ func (s IdempotencyParameterMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotencyParameterMismatchException) OrigErr() error { +func (s *IdempotencyParameterMismatchException) OrigErr() error { return nil } -func (s IdempotencyParameterMismatchException) Error() string { +func (s *IdempotencyParameterMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotencyParameterMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotencyParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotencyParameterMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotencyParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon Resource Name (ARN) is not valid. Make sure that you have provided // the full ARN for the user who initiated the change for the pull request, // and then try again. type InvalidActorArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19647,17 +20066,17 @@ func (s InvalidActorArnException) GoString() string { func newErrorInvalidActorArnException(v protocol.ResponseMetadata) error { return &InvalidActorArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidActorArnException) Code() string { +func (s *InvalidActorArnException) Code() string { return "InvalidActorArnException" } // Message returns the exception's message. -func (s InvalidActorArnException) Message() string { +func (s *InvalidActorArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19665,28 +20084,28 @@ func (s InvalidActorArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidActorArnException) OrigErr() error { +func (s *InvalidActorArnException) OrigErr() error { return nil } -func (s InvalidActorArnException) Error() string { +func (s *InvalidActorArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidActorArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidActorArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidActorArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidActorArnException) RequestID() string { + return s.RespMetadata.RequestID } // The content for the approval rule is not valid. type InvalidApprovalRuleContentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19703,17 +20122,17 @@ func (s InvalidApprovalRuleContentException) GoString() string { func newErrorInvalidApprovalRuleContentException(v protocol.ResponseMetadata) error { return &InvalidApprovalRuleContentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalRuleContentException) Code() string { +func (s *InvalidApprovalRuleContentException) Code() string { return "InvalidApprovalRuleContentException" } // Message returns the exception's message. -func (s InvalidApprovalRuleContentException) Message() string { +func (s *InvalidApprovalRuleContentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19721,28 +20140,28 @@ func (s InvalidApprovalRuleContentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalRuleContentException) OrigErr() error { +func (s *InvalidApprovalRuleContentException) OrigErr() error { return nil } -func (s InvalidApprovalRuleContentException) Error() string { +func (s *InvalidApprovalRuleContentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalRuleContentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalRuleContentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalRuleContentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalRuleContentException) RequestID() string { + return s.RespMetadata.RequestID } // The name for the approval rule is not valid. type InvalidApprovalRuleNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19759,17 +20178,17 @@ func (s InvalidApprovalRuleNameException) GoString() string { func newErrorInvalidApprovalRuleNameException(v protocol.ResponseMetadata) error { return &InvalidApprovalRuleNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalRuleNameException) Code() string { +func (s *InvalidApprovalRuleNameException) Code() string { return "InvalidApprovalRuleNameException" } // Message returns the exception's message. -func (s InvalidApprovalRuleNameException) Message() string { +func (s *InvalidApprovalRuleNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19777,28 +20196,28 @@ func (s InvalidApprovalRuleNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalRuleNameException) OrigErr() error { +func (s *InvalidApprovalRuleNameException) OrigErr() error { return nil } -func (s InvalidApprovalRuleNameException) Error() string { +func (s *InvalidApprovalRuleNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalRuleNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalRuleNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalRuleNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalRuleNameException) RequestID() string { + return s.RespMetadata.RequestID } // The content of the approval rule template is not valid. type InvalidApprovalRuleTemplateContentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19815,17 +20234,17 @@ func (s InvalidApprovalRuleTemplateContentException) GoString() string { func newErrorInvalidApprovalRuleTemplateContentException(v protocol.ResponseMetadata) error { return &InvalidApprovalRuleTemplateContentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalRuleTemplateContentException) Code() string { +func (s *InvalidApprovalRuleTemplateContentException) Code() string { return "InvalidApprovalRuleTemplateContentException" } // Message returns the exception's message. -func (s InvalidApprovalRuleTemplateContentException) Message() string { +func (s *InvalidApprovalRuleTemplateContentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19833,30 +20252,30 @@ func (s InvalidApprovalRuleTemplateContentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalRuleTemplateContentException) OrigErr() error { +func (s *InvalidApprovalRuleTemplateContentException) OrigErr() error { return nil } -func (s InvalidApprovalRuleTemplateContentException) Error() string { +func (s *InvalidApprovalRuleTemplateContentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalRuleTemplateContentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalRuleTemplateContentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalRuleTemplateContentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalRuleTemplateContentException) RequestID() string { + return s.RespMetadata.RequestID } // The description for the approval rule template is not valid because it exceeds // the maximum characters allowed for a description. For more information about // limits in AWS CodeCommit, see AWS CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). type InvalidApprovalRuleTemplateDescriptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19873,17 +20292,17 @@ func (s InvalidApprovalRuleTemplateDescriptionException) GoString() string { func newErrorInvalidApprovalRuleTemplateDescriptionException(v protocol.ResponseMetadata) error { return &InvalidApprovalRuleTemplateDescriptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalRuleTemplateDescriptionException) Code() string { +func (s *InvalidApprovalRuleTemplateDescriptionException) Code() string { return "InvalidApprovalRuleTemplateDescriptionException" } // Message returns the exception's message. -func (s InvalidApprovalRuleTemplateDescriptionException) Message() string { +func (s *InvalidApprovalRuleTemplateDescriptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19891,30 +20310,30 @@ func (s InvalidApprovalRuleTemplateDescriptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalRuleTemplateDescriptionException) OrigErr() error { +func (s *InvalidApprovalRuleTemplateDescriptionException) OrigErr() error { return nil } -func (s InvalidApprovalRuleTemplateDescriptionException) Error() string { +func (s *InvalidApprovalRuleTemplateDescriptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalRuleTemplateDescriptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalRuleTemplateDescriptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalRuleTemplateDescriptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalRuleTemplateDescriptionException) RequestID() string { + return s.RespMetadata.RequestID } // The name of the approval rule template is not valid. Template names must // be between 1 and 100 valid characters in length. For more information about // limits in AWS CodeCommit, see AWS CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html). type InvalidApprovalRuleTemplateNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19931,17 +20350,17 @@ func (s InvalidApprovalRuleTemplateNameException) GoString() string { func newErrorInvalidApprovalRuleTemplateNameException(v protocol.ResponseMetadata) error { return &InvalidApprovalRuleTemplateNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalRuleTemplateNameException) Code() string { +func (s *InvalidApprovalRuleTemplateNameException) Code() string { return "InvalidApprovalRuleTemplateNameException" } // Message returns the exception's message. -func (s InvalidApprovalRuleTemplateNameException) Message() string { +func (s *InvalidApprovalRuleTemplateNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19949,29 +20368,29 @@ func (s InvalidApprovalRuleTemplateNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalRuleTemplateNameException) OrigErr() error { +func (s *InvalidApprovalRuleTemplateNameException) OrigErr() error { return nil } -func (s InvalidApprovalRuleTemplateNameException) Error() string { +func (s *InvalidApprovalRuleTemplateNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalRuleTemplateNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalRuleTemplateNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalRuleTemplateNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalRuleTemplateNameException) RequestID() string { + return s.RespMetadata.RequestID } // The state for the approval is not valid. Valid values include APPROVE and // REVOKE. type InvalidApprovalStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19988,17 +20407,17 @@ func (s InvalidApprovalStateException) GoString() string { func newErrorInvalidApprovalStateException(v protocol.ResponseMetadata) error { return &InvalidApprovalStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalStateException) Code() string { +func (s *InvalidApprovalStateException) Code() string { return "InvalidApprovalStateException" } // Message returns the exception's message. -func (s InvalidApprovalStateException) Message() string { +func (s *InvalidApprovalStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20006,29 +20425,29 @@ func (s InvalidApprovalStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalStateException) OrigErr() error { +func (s *InvalidApprovalStateException) OrigErr() error { return nil } -func (s InvalidApprovalStateException) Error() string { +func (s *InvalidApprovalStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalStateException) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon Resource Name (ARN) is not valid. Make sure that you have provided // the full ARN for the author of the pull request, and then try again. type InvalidAuthorArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20045,17 +20464,17 @@ func (s InvalidAuthorArnException) GoString() string { func newErrorInvalidAuthorArnException(v protocol.ResponseMetadata) error { return &InvalidAuthorArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAuthorArnException) Code() string { +func (s *InvalidAuthorArnException) Code() string { return "InvalidAuthorArnException" } // Message returns the exception's message. -func (s InvalidAuthorArnException) Message() string { +func (s *InvalidAuthorArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20063,28 +20482,28 @@ func (s InvalidAuthorArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAuthorArnException) OrigErr() error { +func (s *InvalidAuthorArnException) OrigErr() error { return nil } -func (s InvalidAuthorArnException) Error() string { +func (s *InvalidAuthorArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAuthorArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAuthorArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAuthorArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAuthorArnException) RequestID() string { + return s.RespMetadata.RequestID } // The specified blob is not valid. type InvalidBlobIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20101,17 +20520,17 @@ func (s InvalidBlobIdException) GoString() string { func newErrorInvalidBlobIdException(v protocol.ResponseMetadata) error { return &InvalidBlobIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidBlobIdException) Code() string { +func (s *InvalidBlobIdException) Code() string { return "InvalidBlobIdException" } // Message returns the exception's message. -func (s InvalidBlobIdException) Message() string { +func (s *InvalidBlobIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20119,28 +20538,28 @@ func (s InvalidBlobIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidBlobIdException) OrigErr() error { +func (s *InvalidBlobIdException) OrigErr() error { return nil } -func (s InvalidBlobIdException) Error() string { +func (s *InvalidBlobIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidBlobIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidBlobIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidBlobIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidBlobIdException) RequestID() string { + return s.RespMetadata.RequestID } // The specified reference name is not valid. type InvalidBranchNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20157,17 +20576,17 @@ func (s InvalidBranchNameException) GoString() string { func newErrorInvalidBranchNameException(v protocol.ResponseMetadata) error { return &InvalidBranchNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidBranchNameException) Code() string { +func (s *InvalidBranchNameException) Code() string { return "InvalidBranchNameException" } // Message returns the exception's message. -func (s InvalidBranchNameException) Message() string { +func (s *InvalidBranchNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20175,28 +20594,28 @@ func (s InvalidBranchNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidBranchNameException) OrigErr() error { +func (s *InvalidBranchNameException) OrigErr() error { return nil } -func (s InvalidBranchNameException) Error() string { +func (s *InvalidBranchNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidBranchNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidBranchNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidBranchNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidBranchNameException) RequestID() string { + return s.RespMetadata.RequestID } // The client request token is not valid. type InvalidClientRequestTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20213,17 +20632,17 @@ func (s InvalidClientRequestTokenException) GoString() string { func newErrorInvalidClientRequestTokenException(v protocol.ResponseMetadata) error { return &InvalidClientRequestTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidClientRequestTokenException) Code() string { +func (s *InvalidClientRequestTokenException) Code() string { return "InvalidClientRequestTokenException" } // Message returns the exception's message. -func (s InvalidClientRequestTokenException) Message() string { +func (s *InvalidClientRequestTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20231,29 +20650,29 @@ func (s InvalidClientRequestTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidClientRequestTokenException) OrigErr() error { +func (s *InvalidClientRequestTokenException) OrigErr() error { return nil } -func (s InvalidClientRequestTokenException) Error() string { +func (s *InvalidClientRequestTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidClientRequestTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidClientRequestTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidClientRequestTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidClientRequestTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The comment ID is not in a valid format. Make sure that you have provided // the full comment ID. type InvalidCommentIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20270,17 +20689,17 @@ func (s InvalidCommentIdException) GoString() string { func newErrorInvalidCommentIdException(v protocol.ResponseMetadata) error { return &InvalidCommentIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCommentIdException) Code() string { +func (s *InvalidCommentIdException) Code() string { return "InvalidCommentIdException" } // Message returns the exception's message. -func (s InvalidCommentIdException) Message() string { +func (s *InvalidCommentIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20288,28 +20707,28 @@ func (s InvalidCommentIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCommentIdException) OrigErr() error { +func (s *InvalidCommentIdException) OrigErr() error { return nil } -func (s InvalidCommentIdException) Error() string { +func (s *InvalidCommentIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCommentIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCommentIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCommentIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCommentIdException) RequestID() string { + return s.RespMetadata.RequestID } // The specified commit is not valid. type InvalidCommitException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20326,17 +20745,17 @@ func (s InvalidCommitException) GoString() string { func newErrorInvalidCommitException(v protocol.ResponseMetadata) error { return &InvalidCommitException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCommitException) Code() string { +func (s *InvalidCommitException) Code() string { return "InvalidCommitException" } // Message returns the exception's message. -func (s InvalidCommitException) Message() string { +func (s *InvalidCommitException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20344,28 +20763,28 @@ func (s InvalidCommitException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCommitException) OrigErr() error { +func (s *InvalidCommitException) OrigErr() error { return nil } -func (s InvalidCommitException) Error() string { +func (s *InvalidCommitException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCommitException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCommitException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCommitException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCommitException) RequestID() string { + return s.RespMetadata.RequestID } // The specified commit ID is not valid. type InvalidCommitIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20382,17 +20801,17 @@ func (s InvalidCommitIdException) GoString() string { func newErrorInvalidCommitIdException(v protocol.ResponseMetadata) error { return &InvalidCommitIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCommitIdException) Code() string { +func (s *InvalidCommitIdException) Code() string { return "InvalidCommitIdException" } // Message returns the exception's message. -func (s InvalidCommitIdException) Message() string { +func (s *InvalidCommitIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20400,28 +20819,28 @@ func (s InvalidCommitIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCommitIdException) OrigErr() error { +func (s *InvalidCommitIdException) OrigErr() error { return nil } -func (s InvalidCommitIdException) Error() string { +func (s *InvalidCommitIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCommitIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCommitIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCommitIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCommitIdException) RequestID() string { + return s.RespMetadata.RequestID } // The specified conflict detail level is not valid. type InvalidConflictDetailLevelException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20438,17 +20857,17 @@ func (s InvalidConflictDetailLevelException) GoString() string { func newErrorInvalidConflictDetailLevelException(v protocol.ResponseMetadata) error { return &InvalidConflictDetailLevelException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidConflictDetailLevelException) Code() string { +func (s *InvalidConflictDetailLevelException) Code() string { return "InvalidConflictDetailLevelException" } // Message returns the exception's message. -func (s InvalidConflictDetailLevelException) Message() string { +func (s *InvalidConflictDetailLevelException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20456,28 +20875,28 @@ func (s InvalidConflictDetailLevelException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidConflictDetailLevelException) OrigErr() error { +func (s *InvalidConflictDetailLevelException) OrigErr() error { return nil } -func (s InvalidConflictDetailLevelException) Error() string { +func (s *InvalidConflictDetailLevelException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidConflictDetailLevelException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidConflictDetailLevelException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidConflictDetailLevelException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidConflictDetailLevelException) RequestID() string { + return s.RespMetadata.RequestID } // The specified conflict resolution list is not valid. type InvalidConflictResolutionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20494,17 +20913,17 @@ func (s InvalidConflictResolutionException) GoString() string { func newErrorInvalidConflictResolutionException(v protocol.ResponseMetadata) error { return &InvalidConflictResolutionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidConflictResolutionException) Code() string { +func (s *InvalidConflictResolutionException) Code() string { return "InvalidConflictResolutionException" } // Message returns the exception's message. -func (s InvalidConflictResolutionException) Message() string { +func (s *InvalidConflictResolutionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20512,28 +20931,28 @@ func (s InvalidConflictResolutionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidConflictResolutionException) OrigErr() error { +func (s *InvalidConflictResolutionException) OrigErr() error { return nil } -func (s InvalidConflictResolutionException) Error() string { +func (s *InvalidConflictResolutionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidConflictResolutionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidConflictResolutionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidConflictResolutionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidConflictResolutionException) RequestID() string { + return s.RespMetadata.RequestID } // The specified conflict resolution strategy is not valid. type InvalidConflictResolutionStrategyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20550,17 +20969,17 @@ func (s InvalidConflictResolutionStrategyException) GoString() string { func newErrorInvalidConflictResolutionStrategyException(v protocol.ResponseMetadata) error { return &InvalidConflictResolutionStrategyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidConflictResolutionStrategyException) Code() string { +func (s *InvalidConflictResolutionStrategyException) Code() string { return "InvalidConflictResolutionStrategyException" } // Message returns the exception's message. -func (s InvalidConflictResolutionStrategyException) Message() string { +func (s *InvalidConflictResolutionStrategyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20568,28 +20987,28 @@ func (s InvalidConflictResolutionStrategyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidConflictResolutionStrategyException) OrigErr() error { +func (s *InvalidConflictResolutionStrategyException) OrigErr() error { return nil } -func (s InvalidConflictResolutionStrategyException) Error() string { +func (s *InvalidConflictResolutionStrategyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidConflictResolutionStrategyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidConflictResolutionStrategyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidConflictResolutionStrategyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidConflictResolutionStrategyException) RequestID() string { + return s.RespMetadata.RequestID } // The specified continuation token is not valid. type InvalidContinuationTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20606,17 +21025,17 @@ func (s InvalidContinuationTokenException) GoString() string { func newErrorInvalidContinuationTokenException(v protocol.ResponseMetadata) error { return &InvalidContinuationTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidContinuationTokenException) Code() string { +func (s *InvalidContinuationTokenException) Code() string { return "InvalidContinuationTokenException" } // Message returns the exception's message. -func (s InvalidContinuationTokenException) Message() string { +func (s *InvalidContinuationTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20624,28 +21043,28 @@ func (s InvalidContinuationTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidContinuationTokenException) OrigErr() error { +func (s *InvalidContinuationTokenException) OrigErr() error { return nil } -func (s InvalidContinuationTokenException) Error() string { +func (s *InvalidContinuationTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidContinuationTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidContinuationTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidContinuationTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidContinuationTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The specified deletion parameter is not valid. type InvalidDeletionParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20662,17 +21081,17 @@ func (s InvalidDeletionParameterException) GoString() string { func newErrorInvalidDeletionParameterException(v protocol.ResponseMetadata) error { return &InvalidDeletionParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeletionParameterException) Code() string { +func (s *InvalidDeletionParameterException) Code() string { return "InvalidDeletionParameterException" } // Message returns the exception's message. -func (s InvalidDeletionParameterException) Message() string { +func (s *InvalidDeletionParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20680,29 +21099,29 @@ func (s InvalidDeletionParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeletionParameterException) OrigErr() error { +func (s *InvalidDeletionParameterException) OrigErr() error { return nil } -func (s InvalidDeletionParameterException) Error() string { +func (s *InvalidDeletionParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeletionParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeletionParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeletionParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeletionParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The pull request description is not valid. Descriptions cannot be more than // 1,000 characters. type InvalidDescriptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20719,17 +21138,17 @@ func (s InvalidDescriptionException) GoString() string { func newErrorInvalidDescriptionException(v protocol.ResponseMetadata) error { return &InvalidDescriptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDescriptionException) Code() string { +func (s *InvalidDescriptionException) Code() string { return "InvalidDescriptionException" } // Message returns the exception's message. -func (s InvalidDescriptionException) Message() string { +func (s *InvalidDescriptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20737,29 +21156,29 @@ func (s InvalidDescriptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDescriptionException) OrigErr() error { +func (s *InvalidDescriptionException) OrigErr() error { return nil } -func (s InvalidDescriptionException) Error() string { +func (s *InvalidDescriptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDescriptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDescriptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDescriptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDescriptionException) RequestID() string { + return s.RespMetadata.RequestID } // The destination commit specifier is not valid. You must provide a valid branch // name, tag, or full commit ID. type InvalidDestinationCommitSpecifierException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20776,17 +21195,17 @@ func (s InvalidDestinationCommitSpecifierException) GoString() string { func newErrorInvalidDestinationCommitSpecifierException(v protocol.ResponseMetadata) error { return &InvalidDestinationCommitSpecifierException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDestinationCommitSpecifierException) Code() string { +func (s *InvalidDestinationCommitSpecifierException) Code() string { return "InvalidDestinationCommitSpecifierException" } // Message returns the exception's message. -func (s InvalidDestinationCommitSpecifierException) Message() string { +func (s *InvalidDestinationCommitSpecifierException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20794,30 +21213,30 @@ func (s InvalidDestinationCommitSpecifierException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDestinationCommitSpecifierException) OrigErr() error { +func (s *InvalidDestinationCommitSpecifierException) OrigErr() error { return nil } -func (s InvalidDestinationCommitSpecifierException) Error() string { +func (s *InvalidDestinationCommitSpecifierException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDestinationCommitSpecifierException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDestinationCommitSpecifierException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDestinationCommitSpecifierException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDestinationCommitSpecifierException) RequestID() string { + return s.RespMetadata.RequestID } // The specified email address either contains one or more characters that are // not allowed, or it exceeds the maximum number of characters allowed for an // email address. type InvalidEmailException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20834,17 +21253,17 @@ func (s InvalidEmailException) GoString() string { func newErrorInvalidEmailException(v protocol.ResponseMetadata) error { return &InvalidEmailException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEmailException) Code() string { +func (s *InvalidEmailException) Code() string { return "InvalidEmailException" } // Message returns the exception's message. -func (s InvalidEmailException) Message() string { +func (s *InvalidEmailException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20852,29 +21271,29 @@ func (s InvalidEmailException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEmailException) OrigErr() error { +func (s *InvalidEmailException) OrigErr() error { return nil } -func (s InvalidEmailException) Error() string { +func (s *InvalidEmailException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEmailException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEmailException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEmailException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEmailException) RequestID() string { + return s.RespMetadata.RequestID } // The location of the file is not valid. Make sure that you include the file // name and extension. type InvalidFileLocationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20891,17 +21310,17 @@ func (s InvalidFileLocationException) GoString() string { func newErrorInvalidFileLocationException(v protocol.ResponseMetadata) error { return &InvalidFileLocationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFileLocationException) Code() string { +func (s *InvalidFileLocationException) Code() string { return "InvalidFileLocationException" } // Message returns the exception's message. -func (s InvalidFileLocationException) Message() string { +func (s *InvalidFileLocationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20909,29 +21328,29 @@ func (s InvalidFileLocationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFileLocationException) OrigErr() error { +func (s *InvalidFileLocationException) OrigErr() error { return nil } -func (s InvalidFileLocationException) Error() string { +func (s *InvalidFileLocationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFileLocationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFileLocationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFileLocationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFileLocationException) RequestID() string { + return s.RespMetadata.RequestID } // The specified file mode permission is not valid. For a list of valid file // mode permissions, see PutFile. type InvalidFileModeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20948,17 +21367,17 @@ func (s InvalidFileModeException) GoString() string { func newErrorInvalidFileModeException(v protocol.ResponseMetadata) error { return &InvalidFileModeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFileModeException) Code() string { +func (s *InvalidFileModeException) Code() string { return "InvalidFileModeException" } // Message returns the exception's message. -func (s InvalidFileModeException) Message() string { +func (s *InvalidFileModeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20966,29 +21385,29 @@ func (s InvalidFileModeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFileModeException) OrigErr() error { +func (s *InvalidFileModeException) OrigErr() error { return nil } -func (s InvalidFileModeException) Error() string { +func (s *InvalidFileModeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFileModeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFileModeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFileModeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFileModeException) RequestID() string { + return s.RespMetadata.RequestID } // The position is not valid. Make sure that the line number exists in the version // of the file you want to comment on. type InvalidFilePositionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21005,17 +21424,17 @@ func (s InvalidFilePositionException) GoString() string { func newErrorInvalidFilePositionException(v protocol.ResponseMetadata) error { return &InvalidFilePositionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFilePositionException) Code() string { +func (s *InvalidFilePositionException) Code() string { return "InvalidFilePositionException" } // Message returns the exception's message. -func (s InvalidFilePositionException) Message() string { +func (s *InvalidFilePositionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21023,28 +21442,28 @@ func (s InvalidFilePositionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFilePositionException) OrigErr() error { +func (s *InvalidFilePositionException) OrigErr() error { return nil } -func (s InvalidFilePositionException) Error() string { +func (s *InvalidFilePositionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFilePositionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFilePositionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFilePositionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFilePositionException) RequestID() string { + return s.RespMetadata.RequestID } // The specified value for the number of conflict files to return is not valid. type InvalidMaxConflictFilesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21061,17 +21480,17 @@ func (s InvalidMaxConflictFilesException) GoString() string { func newErrorInvalidMaxConflictFilesException(v protocol.ResponseMetadata) error { return &InvalidMaxConflictFilesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMaxConflictFilesException) Code() string { +func (s *InvalidMaxConflictFilesException) Code() string { return "InvalidMaxConflictFilesException" } // Message returns the exception's message. -func (s InvalidMaxConflictFilesException) Message() string { +func (s *InvalidMaxConflictFilesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21079,28 +21498,28 @@ func (s InvalidMaxConflictFilesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMaxConflictFilesException) OrigErr() error { +func (s *InvalidMaxConflictFilesException) OrigErr() error { return nil } -func (s InvalidMaxConflictFilesException) Error() string { +func (s *InvalidMaxConflictFilesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMaxConflictFilesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMaxConflictFilesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMaxConflictFilesException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMaxConflictFilesException) RequestID() string { + return s.RespMetadata.RequestID } // The specified value for the number of merge hunks to return is not valid. type InvalidMaxMergeHunksException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21117,17 +21536,17 @@ func (s InvalidMaxMergeHunksException) GoString() string { func newErrorInvalidMaxMergeHunksException(v protocol.ResponseMetadata) error { return &InvalidMaxMergeHunksException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMaxMergeHunksException) Code() string { +func (s *InvalidMaxMergeHunksException) Code() string { return "InvalidMaxMergeHunksException" } // Message returns the exception's message. -func (s InvalidMaxMergeHunksException) Message() string { +func (s *InvalidMaxMergeHunksException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21135,28 +21554,28 @@ func (s InvalidMaxMergeHunksException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMaxMergeHunksException) OrigErr() error { +func (s *InvalidMaxMergeHunksException) OrigErr() error { return nil } -func (s InvalidMaxMergeHunksException) Error() string { +func (s *InvalidMaxMergeHunksException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMaxMergeHunksException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMaxMergeHunksException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMaxMergeHunksException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMaxMergeHunksException) RequestID() string { + return s.RespMetadata.RequestID } // The specified number of maximum results is not valid. type InvalidMaxResultsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21173,17 +21592,17 @@ func (s InvalidMaxResultsException) GoString() string { func newErrorInvalidMaxResultsException(v protocol.ResponseMetadata) error { return &InvalidMaxResultsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMaxResultsException) Code() string { +func (s *InvalidMaxResultsException) Code() string { return "InvalidMaxResultsException" } // Message returns the exception's message. -func (s InvalidMaxResultsException) Message() string { +func (s *InvalidMaxResultsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21191,29 +21610,29 @@ func (s InvalidMaxResultsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMaxResultsException) OrigErr() error { +func (s *InvalidMaxResultsException) OrigErr() error { return nil } -func (s InvalidMaxResultsException) Error() string { +func (s *InvalidMaxResultsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMaxResultsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMaxResultsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMaxResultsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMaxResultsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified merge option is not valid for this operation. Not all merge // strategies are supported for all operations. type InvalidMergeOptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21230,17 +21649,17 @@ func (s InvalidMergeOptionException) GoString() string { func newErrorInvalidMergeOptionException(v protocol.ResponseMetadata) error { return &InvalidMergeOptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMergeOptionException) Code() string { +func (s *InvalidMergeOptionException) Code() string { return "InvalidMergeOptionException" } // Message returns the exception's message. -func (s InvalidMergeOptionException) Message() string { +func (s *InvalidMergeOptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21248,28 +21667,28 @@ func (s InvalidMergeOptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMergeOptionException) OrigErr() error { +func (s *InvalidMergeOptionException) OrigErr() error { return nil } -func (s InvalidMergeOptionException) Error() string { +func (s *InvalidMergeOptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMergeOptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMergeOptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMergeOptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMergeOptionException) RequestID() string { + return s.RespMetadata.RequestID } // The specified sort order is not valid. type InvalidOrderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21286,17 +21705,17 @@ func (s InvalidOrderException) GoString() string { func newErrorInvalidOrderException(v protocol.ResponseMetadata) error { return &InvalidOrderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOrderException) Code() string { +func (s *InvalidOrderException) Code() string { return "InvalidOrderException" } // Message returns the exception's message. -func (s InvalidOrderException) Message() string { +func (s *InvalidOrderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21304,28 +21723,28 @@ func (s InvalidOrderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOrderException) OrigErr() error { +func (s *InvalidOrderException) OrigErr() error { return nil } -func (s InvalidOrderException) Error() string { +func (s *InvalidOrderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOrderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOrderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOrderException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOrderException) RequestID() string { + return s.RespMetadata.RequestID } // The override status is not valid. Valid statuses are OVERRIDE and REVOKE. type InvalidOverrideStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21342,17 +21761,17 @@ func (s InvalidOverrideStatusException) GoString() string { func newErrorInvalidOverrideStatusException(v protocol.ResponseMetadata) error { return &InvalidOverrideStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOverrideStatusException) Code() string { +func (s *InvalidOverrideStatusException) Code() string { return "InvalidOverrideStatusException" } // Message returns the exception's message. -func (s InvalidOverrideStatusException) Message() string { +func (s *InvalidOverrideStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21360,30 +21779,30 @@ func (s InvalidOverrideStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOverrideStatusException) OrigErr() error { +func (s *InvalidOverrideStatusException) OrigErr() error { return nil } -func (s InvalidOverrideStatusException) Error() string { +func (s *InvalidOverrideStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOverrideStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOverrideStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOverrideStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOverrideStatusException) RequestID() string { + return s.RespMetadata.RequestID } // The parent commit ID is not valid. The commit ID cannot be empty, and must // match the head commit ID for the branch of the repository where you want // to add or update a file. type InvalidParentCommitIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21400,17 +21819,17 @@ func (s InvalidParentCommitIdException) GoString() string { func newErrorInvalidParentCommitIdException(v protocol.ResponseMetadata) error { return &InvalidParentCommitIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParentCommitIdException) Code() string { +func (s *InvalidParentCommitIdException) Code() string { return "InvalidParentCommitIdException" } // Message returns the exception's message. -func (s InvalidParentCommitIdException) Message() string { +func (s *InvalidParentCommitIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21418,28 +21837,28 @@ func (s InvalidParentCommitIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParentCommitIdException) OrigErr() error { +func (s *InvalidParentCommitIdException) OrigErr() error { return nil } -func (s InvalidParentCommitIdException) Error() string { +func (s *InvalidParentCommitIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParentCommitIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParentCommitIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParentCommitIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParentCommitIdException) RequestID() string { + return s.RespMetadata.RequestID } // The specified path is not valid. type InvalidPathException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21456,17 +21875,17 @@ func (s InvalidPathException) GoString() string { func newErrorInvalidPathException(v protocol.ResponseMetadata) error { return &InvalidPathException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPathException) Code() string { +func (s *InvalidPathException) Code() string { return "InvalidPathException" } // Message returns the exception's message. -func (s InvalidPathException) Message() string { +func (s *InvalidPathException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21474,28 +21893,28 @@ func (s InvalidPathException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPathException) OrigErr() error { +func (s *InvalidPathException) OrigErr() error { return nil } -func (s InvalidPathException) Error() string { +func (s *InvalidPathException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPathException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPathException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPathException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPathException) RequestID() string { + return s.RespMetadata.RequestID } // The pull request event type is not valid. type InvalidPullRequestEventTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21512,17 +21931,17 @@ func (s InvalidPullRequestEventTypeException) GoString() string { func newErrorInvalidPullRequestEventTypeException(v protocol.ResponseMetadata) error { return &InvalidPullRequestEventTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPullRequestEventTypeException) Code() string { +func (s *InvalidPullRequestEventTypeException) Code() string { return "InvalidPullRequestEventTypeException" } // Message returns the exception's message. -func (s InvalidPullRequestEventTypeException) Message() string { +func (s *InvalidPullRequestEventTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21530,30 +21949,30 @@ func (s InvalidPullRequestEventTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPullRequestEventTypeException) OrigErr() error { +func (s *InvalidPullRequestEventTypeException) OrigErr() error { return nil } -func (s InvalidPullRequestEventTypeException) Error() string { +func (s *InvalidPullRequestEventTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPullRequestEventTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPullRequestEventTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPullRequestEventTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPullRequestEventTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The pull request ID is not valid. Make sure that you have provided the full // ID and that the pull request is in the specified repository, and then try // again. type InvalidPullRequestIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21570,17 +21989,17 @@ func (s InvalidPullRequestIdException) GoString() string { func newErrorInvalidPullRequestIdException(v protocol.ResponseMetadata) error { return &InvalidPullRequestIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPullRequestIdException) Code() string { +func (s *InvalidPullRequestIdException) Code() string { return "InvalidPullRequestIdException" } // Message returns the exception's message. -func (s InvalidPullRequestIdException) Message() string { +func (s *InvalidPullRequestIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21588,29 +22007,29 @@ func (s InvalidPullRequestIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPullRequestIdException) OrigErr() error { +func (s *InvalidPullRequestIdException) OrigErr() error { return nil } -func (s InvalidPullRequestIdException) Error() string { +func (s *InvalidPullRequestIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPullRequestIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPullRequestIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPullRequestIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPullRequestIdException) RequestID() string { + return s.RespMetadata.RequestID } // The pull request status is not valid. The only valid values are OPEN and // CLOSED. type InvalidPullRequestStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21627,17 +22046,17 @@ func (s InvalidPullRequestStatusException) GoString() string { func newErrorInvalidPullRequestStatusException(v protocol.ResponseMetadata) error { return &InvalidPullRequestStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPullRequestStatusException) Code() string { +func (s *InvalidPullRequestStatusException) Code() string { return "InvalidPullRequestStatusException" } // Message returns the exception's message. -func (s InvalidPullRequestStatusException) Message() string { +func (s *InvalidPullRequestStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21645,29 +22064,29 @@ func (s InvalidPullRequestStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPullRequestStatusException) OrigErr() error { +func (s *InvalidPullRequestStatusException) OrigErr() error { return nil } -func (s InvalidPullRequestStatusException) Error() string { +func (s *InvalidPullRequestStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPullRequestStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPullRequestStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPullRequestStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPullRequestStatusException) RequestID() string { + return s.RespMetadata.RequestID } // The pull request status update is not valid. The only valid update is from // OPEN to CLOSED. type InvalidPullRequestStatusUpdateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21684,17 +22103,73 @@ func (s InvalidPullRequestStatusUpdateException) GoString() string { func newErrorInvalidPullRequestStatusUpdateException(v protocol.ResponseMetadata) error { return &InvalidPullRequestStatusUpdateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPullRequestStatusUpdateException) Code() string { +func (s *InvalidPullRequestStatusUpdateException) Code() string { return "InvalidPullRequestStatusUpdateException" } // Message returns the exception's message. -func (s InvalidPullRequestStatusUpdateException) Message() string { +func (s *InvalidPullRequestStatusUpdateException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidPullRequestStatusUpdateException) OrigErr() error { + return nil +} + +func (s *InvalidPullRequestStatusUpdateException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidPullRequestStatusUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidPullRequestStatusUpdateException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The Amazon Resource Name (ARN) of the user or identity is not valid. +type InvalidReactionUserArnException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidReactionUserArnException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidReactionUserArnException) GoString() string { + return s.String() +} + +func newErrorInvalidReactionUserArnException(v protocol.ResponseMetadata) error { + return &InvalidReactionUserArnException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidReactionUserArnException) Code() string { + return "InvalidReactionUserArnException" +} + +// Message returns the exception's message. +func (s *InvalidReactionUserArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21702,22 +22177,79 @@ func (s InvalidPullRequestStatusUpdateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPullRequestStatusUpdateException) OrigErr() error { +func (s *InvalidReactionUserArnException) OrigErr() error { return nil } -func (s InvalidPullRequestStatusUpdateException) Error() string { +func (s *InvalidReactionUserArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPullRequestStatusUpdateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidReactionUserArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPullRequestStatusUpdateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidReactionUserArnException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The value of the reaction is not valid. For more information, see the AWS +// CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). +type InvalidReactionValueException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidReactionValueException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidReactionValueException) GoString() string { + return s.String() +} + +func newErrorInvalidReactionValueException(v protocol.ResponseMetadata) error { + return &InvalidReactionValueException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidReactionValueException) Code() string { + return "InvalidReactionValueException" +} + +// Message returns the exception's message. +func (s *InvalidReactionValueException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidReactionValueException) OrigErr() error { + return nil +} + +func (s *InvalidReactionValueException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidReactionValueException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidReactionValueException) RequestID() string { + return s.RespMetadata.RequestID } // The specified reference name format is not valid. Reference names must conform @@ -21725,8 +22257,8 @@ func (s InvalidPullRequestStatusUpdateException) RequestID() string { // see Git Internals - Git References (https://git-scm.com/book/en/v2/Git-Internals-Git-References) // or consult your Git documentation. type InvalidReferenceNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21743,17 +22275,17 @@ func (s InvalidReferenceNameException) GoString() string { func newErrorInvalidReferenceNameException(v protocol.ResponseMetadata) error { return &InvalidReferenceNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidReferenceNameException) Code() string { +func (s *InvalidReferenceNameException) Code() string { return "InvalidReferenceNameException" } // Message returns the exception's message. -func (s InvalidReferenceNameException) Message() string { +func (s *InvalidReferenceNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21761,29 +22293,29 @@ func (s InvalidReferenceNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidReferenceNameException) OrigErr() error { +func (s *InvalidReferenceNameException) OrigErr() error { return nil } -func (s InvalidReferenceNameException) Error() string { +func (s *InvalidReferenceNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidReferenceNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidReferenceNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidReferenceNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidReferenceNameException) RequestID() string { + return s.RespMetadata.RequestID } // Either the enum is not in a valid format, or the specified file version enum // is not valid in respect to the current file version. type InvalidRelativeFileVersionEnumException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21800,17 +22332,17 @@ func (s InvalidRelativeFileVersionEnumException) GoString() string { func newErrorInvalidRelativeFileVersionEnumException(v protocol.ResponseMetadata) error { return &InvalidRelativeFileVersionEnumException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRelativeFileVersionEnumException) Code() string { +func (s *InvalidRelativeFileVersionEnumException) Code() string { return "InvalidRelativeFileVersionEnumException" } // Message returns the exception's message. -func (s InvalidRelativeFileVersionEnumException) Message() string { +func (s *InvalidRelativeFileVersionEnumException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21818,29 +22350,29 @@ func (s InvalidRelativeFileVersionEnumException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRelativeFileVersionEnumException) OrigErr() error { +func (s *InvalidRelativeFileVersionEnumException) OrigErr() error { return nil } -func (s InvalidRelativeFileVersionEnumException) Error() string { +func (s *InvalidRelativeFileVersionEnumException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRelativeFileVersionEnumException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRelativeFileVersionEnumException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRelativeFileVersionEnumException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRelativeFileVersionEnumException) RequestID() string { + return s.RespMetadata.RequestID } // Automerge was specified for resolving the conflict, but the replacement type // is not valid or content is missing. type InvalidReplacementContentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21857,17 +22389,17 @@ func (s InvalidReplacementContentException) GoString() string { func newErrorInvalidReplacementContentException(v protocol.ResponseMetadata) error { return &InvalidReplacementContentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidReplacementContentException) Code() string { +func (s *InvalidReplacementContentException) Code() string { return "InvalidReplacementContentException" } // Message returns the exception's message. -func (s InvalidReplacementContentException) Message() string { +func (s *InvalidReplacementContentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21875,29 +22407,29 @@ func (s InvalidReplacementContentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidReplacementContentException) OrigErr() error { +func (s *InvalidReplacementContentException) OrigErr() error { return nil } -func (s InvalidReplacementContentException) Error() string { +func (s *InvalidReplacementContentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidReplacementContentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidReplacementContentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidReplacementContentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidReplacementContentException) RequestID() string { + return s.RespMetadata.RequestID } // Automerge was specified for resolving the conflict, but the specified replacement // type is not valid. type InvalidReplacementTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21914,17 +22446,17 @@ func (s InvalidReplacementTypeException) GoString() string { func newErrorInvalidReplacementTypeException(v protocol.ResponseMetadata) error { return &InvalidReplacementTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidReplacementTypeException) Code() string { +func (s *InvalidReplacementTypeException) Code() string { return "InvalidReplacementTypeException" } // Message returns the exception's message. -func (s InvalidReplacementTypeException) Message() string { +func (s *InvalidReplacementTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21932,28 +22464,28 @@ func (s InvalidReplacementTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidReplacementTypeException) OrigErr() error { +func (s *InvalidReplacementTypeException) OrigErr() error { return nil } -func (s InvalidReplacementTypeException) Error() string { +func (s *InvalidReplacementTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidReplacementTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidReplacementTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidReplacementTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidReplacementTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository description is not valid. type InvalidRepositoryDescriptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21970,17 +22502,17 @@ func (s InvalidRepositoryDescriptionException) GoString() string { func newErrorInvalidRepositoryDescriptionException(v protocol.ResponseMetadata) error { return &InvalidRepositoryDescriptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryDescriptionException) Code() string { +func (s *InvalidRepositoryDescriptionException) Code() string { return "InvalidRepositoryDescriptionException" } // Message returns the exception's message. -func (s InvalidRepositoryDescriptionException) Message() string { +func (s *InvalidRepositoryDescriptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21988,22 +22520,22 @@ func (s InvalidRepositoryDescriptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryDescriptionException) OrigErr() error { +func (s *InvalidRepositoryDescriptionException) OrigErr() error { return nil } -func (s InvalidRepositoryDescriptionException) Error() string { +func (s *InvalidRepositoryDescriptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryDescriptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryDescriptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryDescriptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryDescriptionException) RequestID() string { + return s.RespMetadata.RequestID } // A specified repository name is not valid. @@ -22012,8 +22544,8 @@ func (s InvalidRepositoryDescriptionException) RequestID() string { // Other exceptions occur when a required repository parameter is missing, or // when a specified repository does not exist. type InvalidRepositoryNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22030,17 +22562,17 @@ func (s InvalidRepositoryNameException) GoString() string { func newErrorInvalidRepositoryNameException(v protocol.ResponseMetadata) error { return &InvalidRepositoryNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryNameException) Code() string { +func (s *InvalidRepositoryNameException) Code() string { return "InvalidRepositoryNameException" } // Message returns the exception's message. -func (s InvalidRepositoryNameException) Message() string { +func (s *InvalidRepositoryNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22048,28 +22580,28 @@ func (s InvalidRepositoryNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryNameException) OrigErr() error { +func (s *InvalidRepositoryNameException) OrigErr() error { return nil } -func (s InvalidRepositoryNameException) Error() string { +func (s *InvalidRepositoryNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryNameException) RequestID() string { + return s.RespMetadata.RequestID } // One or more branch names specified for the trigger is not valid. type InvalidRepositoryTriggerBranchNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22086,17 +22618,17 @@ func (s InvalidRepositoryTriggerBranchNameException) GoString() string { func newErrorInvalidRepositoryTriggerBranchNameException(v protocol.ResponseMetadata) error { return &InvalidRepositoryTriggerBranchNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryTriggerBranchNameException) Code() string { +func (s *InvalidRepositoryTriggerBranchNameException) Code() string { return "InvalidRepositoryTriggerBranchNameException" } // Message returns the exception's message. -func (s InvalidRepositoryTriggerBranchNameException) Message() string { +func (s *InvalidRepositoryTriggerBranchNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22104,28 +22636,28 @@ func (s InvalidRepositoryTriggerBranchNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryTriggerBranchNameException) OrigErr() error { +func (s *InvalidRepositoryTriggerBranchNameException) OrigErr() error { return nil } -func (s InvalidRepositoryTriggerBranchNameException) Error() string { +func (s *InvalidRepositoryTriggerBranchNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryTriggerBranchNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryTriggerBranchNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryTriggerBranchNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryTriggerBranchNameException) RequestID() string { + return s.RespMetadata.RequestID } // The custom data provided for the trigger is not valid. type InvalidRepositoryTriggerCustomDataException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22142,17 +22674,17 @@ func (s InvalidRepositoryTriggerCustomDataException) GoString() string { func newErrorInvalidRepositoryTriggerCustomDataException(v protocol.ResponseMetadata) error { return &InvalidRepositoryTriggerCustomDataException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryTriggerCustomDataException) Code() string { +func (s *InvalidRepositoryTriggerCustomDataException) Code() string { return "InvalidRepositoryTriggerCustomDataException" } // Message returns the exception's message. -func (s InvalidRepositoryTriggerCustomDataException) Message() string { +func (s *InvalidRepositoryTriggerCustomDataException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22160,30 +22692,30 @@ func (s InvalidRepositoryTriggerCustomDataException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryTriggerCustomDataException) OrigErr() error { +func (s *InvalidRepositoryTriggerCustomDataException) OrigErr() error { return nil } -func (s InvalidRepositoryTriggerCustomDataException) Error() string { +func (s *InvalidRepositoryTriggerCustomDataException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryTriggerCustomDataException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryTriggerCustomDataException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryTriggerCustomDataException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryTriggerCustomDataException) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon Resource Name (ARN) for the trigger is not valid for the specified // destination. The most common reason for this error is that the ARN does not // meet the requirements for the service type. type InvalidRepositoryTriggerDestinationArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22200,17 +22732,17 @@ func (s InvalidRepositoryTriggerDestinationArnException) GoString() string { func newErrorInvalidRepositoryTriggerDestinationArnException(v protocol.ResponseMetadata) error { return &InvalidRepositoryTriggerDestinationArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryTriggerDestinationArnException) Code() string { +func (s *InvalidRepositoryTriggerDestinationArnException) Code() string { return "InvalidRepositoryTriggerDestinationArnException" } // Message returns the exception's message. -func (s InvalidRepositoryTriggerDestinationArnException) Message() string { +func (s *InvalidRepositoryTriggerDestinationArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22218,29 +22750,29 @@ func (s InvalidRepositoryTriggerDestinationArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryTriggerDestinationArnException) OrigErr() error { +func (s *InvalidRepositoryTriggerDestinationArnException) OrigErr() error { return nil } -func (s InvalidRepositoryTriggerDestinationArnException) Error() string { +func (s *InvalidRepositoryTriggerDestinationArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryTriggerDestinationArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryTriggerDestinationArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryTriggerDestinationArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryTriggerDestinationArnException) RequestID() string { + return s.RespMetadata.RequestID } // One or more events specified for the trigger is not valid. Check to make // sure that all events specified match the requirements for allowed events. type InvalidRepositoryTriggerEventsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22257,17 +22789,17 @@ func (s InvalidRepositoryTriggerEventsException) GoString() string { func newErrorInvalidRepositoryTriggerEventsException(v protocol.ResponseMetadata) error { return &InvalidRepositoryTriggerEventsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryTriggerEventsException) Code() string { +func (s *InvalidRepositoryTriggerEventsException) Code() string { return "InvalidRepositoryTriggerEventsException" } // Message returns the exception's message. -func (s InvalidRepositoryTriggerEventsException) Message() string { +func (s *InvalidRepositoryTriggerEventsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22275,28 +22807,28 @@ func (s InvalidRepositoryTriggerEventsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryTriggerEventsException) OrigErr() error { +func (s *InvalidRepositoryTriggerEventsException) OrigErr() error { return nil } -func (s InvalidRepositoryTriggerEventsException) Error() string { +func (s *InvalidRepositoryTriggerEventsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryTriggerEventsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryTriggerEventsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryTriggerEventsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryTriggerEventsException) RequestID() string { + return s.RespMetadata.RequestID } // The name of the trigger is not valid. type InvalidRepositoryTriggerNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22313,17 +22845,17 @@ func (s InvalidRepositoryTriggerNameException) GoString() string { func newErrorInvalidRepositoryTriggerNameException(v protocol.ResponseMetadata) error { return &InvalidRepositoryTriggerNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryTriggerNameException) Code() string { +func (s *InvalidRepositoryTriggerNameException) Code() string { return "InvalidRepositoryTriggerNameException" } // Message returns the exception's message. -func (s InvalidRepositoryTriggerNameException) Message() string { +func (s *InvalidRepositoryTriggerNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22331,30 +22863,30 @@ func (s InvalidRepositoryTriggerNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryTriggerNameException) OrigErr() error { +func (s *InvalidRepositoryTriggerNameException) OrigErr() error { return nil } -func (s InvalidRepositoryTriggerNameException) Error() string { +func (s *InvalidRepositoryTriggerNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryTriggerNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryTriggerNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryTriggerNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryTriggerNameException) RequestID() string { + return s.RespMetadata.RequestID } // The AWS Region for the trigger target does not match the AWS Region for the // repository. Triggers must be created in the same Region as the target for // the trigger. type InvalidRepositoryTriggerRegionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22371,17 +22903,17 @@ func (s InvalidRepositoryTriggerRegionException) GoString() string { func newErrorInvalidRepositoryTriggerRegionException(v protocol.ResponseMetadata) error { return &InvalidRepositoryTriggerRegionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRepositoryTriggerRegionException) Code() string { +func (s *InvalidRepositoryTriggerRegionException) Code() string { return "InvalidRepositoryTriggerRegionException" } // Message returns the exception's message. -func (s InvalidRepositoryTriggerRegionException) Message() string { +func (s *InvalidRepositoryTriggerRegionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22389,30 +22921,30 @@ func (s InvalidRepositoryTriggerRegionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRepositoryTriggerRegionException) OrigErr() error { +func (s *InvalidRepositoryTriggerRegionException) OrigErr() error { return nil } -func (s InvalidRepositoryTriggerRegionException) Error() string { +func (s *InvalidRepositoryTriggerRegionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRepositoryTriggerRegionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRepositoryTriggerRegionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRepositoryTriggerRegionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRepositoryTriggerRegionException) RequestID() string { + return s.RespMetadata.RequestID } // The value for the resource ARN is not valid. For more information about resources // in AWS CodeCommit, see CodeCommit Resources and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) // in the AWS CodeCommit User Guide. type InvalidResourceArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22429,17 +22961,17 @@ func (s InvalidResourceArnException) GoString() string { func newErrorInvalidResourceArnException(v protocol.ResponseMetadata) error { return &InvalidResourceArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceArnException) Code() string { +func (s *InvalidResourceArnException) Code() string { return "InvalidResourceArnException" } // Message returns the exception's message. -func (s InvalidResourceArnException) Message() string { +func (s *InvalidResourceArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22447,28 +22979,28 @@ func (s InvalidResourceArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceArnException) OrigErr() error { +func (s *InvalidResourceArnException) OrigErr() error { return nil } -func (s InvalidResourceArnException) Error() string { +func (s *InvalidResourceArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceArnException) RequestID() string { + return s.RespMetadata.RequestID } // The revision ID is not valid. Use GetPullRequest to determine the value. type InvalidRevisionIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22485,17 +23017,17 @@ func (s InvalidRevisionIdException) GoString() string { func newErrorInvalidRevisionIdException(v protocol.ResponseMetadata) error { return &InvalidRevisionIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRevisionIdException) Code() string { +func (s *InvalidRevisionIdException) Code() string { return "InvalidRevisionIdException" } // Message returns the exception's message. -func (s InvalidRevisionIdException) Message() string { +func (s *InvalidRevisionIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22503,28 +23035,28 @@ func (s InvalidRevisionIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRevisionIdException) OrigErr() error { +func (s *InvalidRevisionIdException) OrigErr() error { return nil } -func (s InvalidRevisionIdException) Error() string { +func (s *InvalidRevisionIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRevisionIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRevisionIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRevisionIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRevisionIdException) RequestID() string { + return s.RespMetadata.RequestID } // The SHA-256 hash signature for the rule content is not valid. type InvalidRuleContentSha256Exception struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22541,17 +23073,17 @@ func (s InvalidRuleContentSha256Exception) GoString() string { func newErrorInvalidRuleContentSha256Exception(v protocol.ResponseMetadata) error { return &InvalidRuleContentSha256Exception{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRuleContentSha256Exception) Code() string { +func (s *InvalidRuleContentSha256Exception) Code() string { return "InvalidRuleContentSha256Exception" } // Message returns the exception's message. -func (s InvalidRuleContentSha256Exception) Message() string { +func (s *InvalidRuleContentSha256Exception) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22559,28 +23091,28 @@ func (s InvalidRuleContentSha256Exception) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRuleContentSha256Exception) OrigErr() error { +func (s *InvalidRuleContentSha256Exception) OrigErr() error { return nil } -func (s InvalidRuleContentSha256Exception) Error() string { +func (s *InvalidRuleContentSha256Exception) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRuleContentSha256Exception) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRuleContentSha256Exception) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRuleContentSha256Exception) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRuleContentSha256Exception) RequestID() string { + return s.RespMetadata.RequestID } // The specified sort by value is not valid. type InvalidSortByException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22597,17 +23129,17 @@ func (s InvalidSortByException) GoString() string { func newErrorInvalidSortByException(v protocol.ResponseMetadata) error { return &InvalidSortByException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSortByException) Code() string { +func (s *InvalidSortByException) Code() string { return "InvalidSortByException" } // Message returns the exception's message. -func (s InvalidSortByException) Message() string { +func (s *InvalidSortByException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22615,29 +23147,29 @@ func (s InvalidSortByException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSortByException) OrigErr() error { +func (s *InvalidSortByException) OrigErr() error { return nil } -func (s InvalidSortByException) Error() string { +func (s *InvalidSortByException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSortByException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSortByException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSortByException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSortByException) RequestID() string { + return s.RespMetadata.RequestID } // The source commit specifier is not valid. You must provide a valid branch // name, tag, or full commit ID. type InvalidSourceCommitSpecifierException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22654,17 +23186,17 @@ func (s InvalidSourceCommitSpecifierException) GoString() string { func newErrorInvalidSourceCommitSpecifierException(v protocol.ResponseMetadata) error { return &InvalidSourceCommitSpecifierException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSourceCommitSpecifierException) Code() string { +func (s *InvalidSourceCommitSpecifierException) Code() string { return "InvalidSourceCommitSpecifierException" } // Message returns the exception's message. -func (s InvalidSourceCommitSpecifierException) Message() string { +func (s *InvalidSourceCommitSpecifierException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22672,28 +23204,28 @@ func (s InvalidSourceCommitSpecifierException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSourceCommitSpecifierException) OrigErr() error { +func (s *InvalidSourceCommitSpecifierException) OrigErr() error { return nil } -func (s InvalidSourceCommitSpecifierException) Error() string { +func (s *InvalidSourceCommitSpecifierException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSourceCommitSpecifierException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSourceCommitSpecifierException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSourceCommitSpecifierException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSourceCommitSpecifierException) RequestID() string { + return s.RespMetadata.RequestID } // The specified tag is not valid. Key names cannot be prefixed with aws:. type InvalidSystemTagUsageException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22710,17 +23242,17 @@ func (s InvalidSystemTagUsageException) GoString() string { func newErrorInvalidSystemTagUsageException(v protocol.ResponseMetadata) error { return &InvalidSystemTagUsageException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSystemTagUsageException) Code() string { +func (s *InvalidSystemTagUsageException) Code() string { return "InvalidSystemTagUsageException" } // Message returns the exception's message. -func (s InvalidSystemTagUsageException) Message() string { +func (s *InvalidSystemTagUsageException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22728,28 +23260,28 @@ func (s InvalidSystemTagUsageException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSystemTagUsageException) OrigErr() error { +func (s *InvalidSystemTagUsageException) OrigErr() error { return nil } -func (s InvalidSystemTagUsageException) Error() string { +func (s *InvalidSystemTagUsageException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSystemTagUsageException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSystemTagUsageException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSystemTagUsageException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSystemTagUsageException) RequestID() string { + return s.RespMetadata.RequestID } // The list of tags is not valid. type InvalidTagKeysListException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22766,17 +23298,17 @@ func (s InvalidTagKeysListException) GoString() string { func newErrorInvalidTagKeysListException(v protocol.ResponseMetadata) error { return &InvalidTagKeysListException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagKeysListException) Code() string { +func (s *InvalidTagKeysListException) Code() string { return "InvalidTagKeysListException" } // Message returns the exception's message. -func (s InvalidTagKeysListException) Message() string { +func (s *InvalidTagKeysListException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22784,28 +23316,28 @@ func (s InvalidTagKeysListException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagKeysListException) OrigErr() error { +func (s *InvalidTagKeysListException) OrigErr() error { return nil } -func (s InvalidTagKeysListException) Error() string { +func (s *InvalidTagKeysListException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagKeysListException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagKeysListException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagKeysListException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagKeysListException) RequestID() string { + return s.RespMetadata.RequestID } // The map of tags is not valid. type InvalidTagsMapException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22822,17 +23354,17 @@ func (s InvalidTagsMapException) GoString() string { func newErrorInvalidTagsMapException(v protocol.ResponseMetadata) error { return &InvalidTagsMapException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagsMapException) Code() string { +func (s *InvalidTagsMapException) Code() string { return "InvalidTagsMapException" } // Message returns the exception's message. -func (s InvalidTagsMapException) Message() string { +func (s *InvalidTagsMapException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22840,28 +23372,28 @@ func (s InvalidTagsMapException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagsMapException) OrigErr() error { +func (s *InvalidTagsMapException) OrigErr() error { return nil } -func (s InvalidTagsMapException) Error() string { +func (s *InvalidTagsMapException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagsMapException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagsMapException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagsMapException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagsMapException) RequestID() string { + return s.RespMetadata.RequestID } // The specified target branch is not valid. type InvalidTargetBranchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22878,17 +23410,17 @@ func (s InvalidTargetBranchException) GoString() string { func newErrorInvalidTargetBranchException(v protocol.ResponseMetadata) error { return &InvalidTargetBranchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetBranchException) Code() string { +func (s *InvalidTargetBranchException) Code() string { return "InvalidTargetBranchException" } // Message returns the exception's message. -func (s InvalidTargetBranchException) Message() string { +func (s *InvalidTargetBranchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22896,30 +23428,30 @@ func (s InvalidTargetBranchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetBranchException) OrigErr() error { +func (s *InvalidTargetBranchException) OrigErr() error { return nil } -func (s InvalidTargetBranchException) Error() string { +func (s *InvalidTargetBranchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetBranchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetBranchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetBranchException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetBranchException) RequestID() string { + return s.RespMetadata.RequestID } // The target for the pull request is not valid. A target must contain the full // values for the repository name, source branch, and destination branch for // the pull request. type InvalidTargetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22936,17 +23468,17 @@ func (s InvalidTargetException) GoString() string { func newErrorInvalidTargetException(v protocol.ResponseMetadata) error { return &InvalidTargetException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetException) Code() string { +func (s *InvalidTargetException) Code() string { return "InvalidTargetException" } // Message returns the exception's message. -func (s InvalidTargetException) Message() string { +func (s *InvalidTargetException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22954,22 +23486,22 @@ func (s InvalidTargetException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetException) OrigErr() error { +func (s *InvalidTargetException) OrigErr() error { return nil } -func (s InvalidTargetException) Error() string { +func (s *InvalidTargetException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetException) RequestID() string { + return s.RespMetadata.RequestID } // The targets for the pull request is not valid or not in a valid format. Targets @@ -22977,8 +23509,8 @@ func (s InvalidTargetException) RequestID() string { // for the repository name, source branch, and destination branch for a pull // request. type InvalidTargetsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -22995,17 +23527,17 @@ func (s InvalidTargetsException) GoString() string { func newErrorInvalidTargetsException(v protocol.ResponseMetadata) error { return &InvalidTargetsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetsException) Code() string { +func (s *InvalidTargetsException) Code() string { return "InvalidTargetsException" } // Message returns the exception's message. -func (s InvalidTargetsException) Message() string { +func (s *InvalidTargetsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23013,29 +23545,29 @@ func (s InvalidTargetsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetsException) OrigErr() error { +func (s *InvalidTargetsException) OrigErr() error { return nil } -func (s InvalidTargetsException) Error() string { +func (s *InvalidTargetsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetsException) RequestID() string { + return s.RespMetadata.RequestID } // The title of the pull request is not valid. Pull request titles cannot exceed // 100 characters in length. type InvalidTitleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23052,17 +23584,17 @@ func (s InvalidTitleException) GoString() string { func newErrorInvalidTitleException(v protocol.ResponseMetadata) error { return &InvalidTitleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTitleException) Code() string { +func (s *InvalidTitleException) Code() string { return "InvalidTitleException" } // Message returns the exception's message. -func (s InvalidTitleException) Message() string { +func (s *InvalidTitleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23070,22 +23602,22 @@ func (s InvalidTitleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTitleException) OrigErr() error { +func (s *InvalidTitleException) OrigErr() error { return nil } -func (s InvalidTitleException) Error() string { +func (s *InvalidTitleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTitleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTitleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTitleException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTitleException) RequestID() string { + return s.RespMetadata.RequestID } // Information about whether a file is binary or textual in a merge or pull @@ -23801,8 +24333,8 @@ func (s *Location) SetRelativeFileVersion(v string) *Location { // The pull request cannot be merged automatically into the destination branch. // You must manually merge the branches and resolve any conflicts. type ManualMergeRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23819,17 +24351,17 @@ func (s ManualMergeRequiredException) GoString() string { func newErrorManualMergeRequiredException(v protocol.ResponseMetadata) error { return &ManualMergeRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ManualMergeRequiredException) Code() string { +func (s *ManualMergeRequiredException) Code() string { return "ManualMergeRequiredException" } // Message returns the exception's message. -func (s ManualMergeRequiredException) Message() string { +func (s *ManualMergeRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23837,28 +24369,28 @@ func (s ManualMergeRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ManualMergeRequiredException) OrigErr() error { +func (s *ManualMergeRequiredException) OrigErr() error { return nil } -func (s ManualMergeRequiredException) Error() string { +func (s *ManualMergeRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ManualMergeRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ManualMergeRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ManualMergeRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ManualMergeRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The number of branches for the trigger was exceeded. type MaximumBranchesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23875,17 +24407,17 @@ func (s MaximumBranchesExceededException) GoString() string { func newErrorMaximumBranchesExceededException(v protocol.ResponseMetadata) error { return &MaximumBranchesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumBranchesExceededException) Code() string { +func (s *MaximumBranchesExceededException) Code() string { return "MaximumBranchesExceededException" } // Message returns the exception's message. -func (s MaximumBranchesExceededException) Message() string { +func (s *MaximumBranchesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23893,28 +24425,28 @@ func (s MaximumBranchesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumBranchesExceededException) OrigErr() error { +func (s *MaximumBranchesExceededException) OrigErr() error { return nil } -func (s MaximumBranchesExceededException) Error() string { +func (s *MaximumBranchesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumBranchesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumBranchesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumBranchesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumBranchesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of allowed conflict resolution entries was exceeded. type MaximumConflictResolutionEntriesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23931,17 +24463,17 @@ func (s MaximumConflictResolutionEntriesExceededException) GoString() string { func newErrorMaximumConflictResolutionEntriesExceededException(v protocol.ResponseMetadata) error { return &MaximumConflictResolutionEntriesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumConflictResolutionEntriesExceededException) Code() string { +func (s *MaximumConflictResolutionEntriesExceededException) Code() string { return "MaximumConflictResolutionEntriesExceededException" } // Message returns the exception's message. -func (s MaximumConflictResolutionEntriesExceededException) Message() string { +func (s *MaximumConflictResolutionEntriesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23949,28 +24481,28 @@ func (s MaximumConflictResolutionEntriesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumConflictResolutionEntriesExceededException) OrigErr() error { +func (s *MaximumConflictResolutionEntriesExceededException) OrigErr() error { return nil } -func (s MaximumConflictResolutionEntriesExceededException) Error() string { +func (s *MaximumConflictResolutionEntriesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumConflictResolutionEntriesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumConflictResolutionEntriesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumConflictResolutionEntriesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumConflictResolutionEntriesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of files to load exceeds the allowed limit. type MaximumFileContentToLoadExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23987,17 +24519,17 @@ func (s MaximumFileContentToLoadExceededException) GoString() string { func newErrorMaximumFileContentToLoadExceededException(v protocol.ResponseMetadata) error { return &MaximumFileContentToLoadExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumFileContentToLoadExceededException) Code() string { +func (s *MaximumFileContentToLoadExceededException) Code() string { return "MaximumFileContentToLoadExceededException" } // Message returns the exception's message. -func (s MaximumFileContentToLoadExceededException) Message() string { +func (s *MaximumFileContentToLoadExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24005,30 +24537,30 @@ func (s MaximumFileContentToLoadExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumFileContentToLoadExceededException) OrigErr() error { +func (s *MaximumFileContentToLoadExceededException) OrigErr() error { return nil } -func (s MaximumFileContentToLoadExceededException) Error() string { +func (s *MaximumFileContentToLoadExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumFileContentToLoadExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumFileContentToLoadExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumFileContentToLoadExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumFileContentToLoadExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of specified files to change as part of this commit exceeds the // maximum number of files that can be changed in a single commit. Consider // using a Git client for these changes. type MaximumFileEntriesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24045,17 +24577,17 @@ func (s MaximumFileEntriesExceededException) GoString() string { func newErrorMaximumFileEntriesExceededException(v protocol.ResponseMetadata) error { return &MaximumFileEntriesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumFileEntriesExceededException) Code() string { +func (s *MaximumFileEntriesExceededException) Code() string { return "MaximumFileEntriesExceededException" } // Message returns the exception's message. -func (s MaximumFileEntriesExceededException) Message() string { +func (s *MaximumFileEntriesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24063,29 +24595,29 @@ func (s MaximumFileEntriesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumFileEntriesExceededException) OrigErr() error { +func (s *MaximumFileEntriesExceededException) OrigErr() error { return nil } -func (s MaximumFileEntriesExceededException) Error() string { +func (s *MaximumFileEntriesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumFileEntriesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumFileEntriesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumFileEntriesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumFileEntriesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of items to compare between the source or destination branches // and the merge base has exceeded the maximum allowed. type MaximumItemsToCompareExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24102,17 +24634,17 @@ func (s MaximumItemsToCompareExceededException) GoString() string { func newErrorMaximumItemsToCompareExceededException(v protocol.ResponseMetadata) error { return &MaximumItemsToCompareExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumItemsToCompareExceededException) Code() string { +func (s *MaximumItemsToCompareExceededException) Code() string { return "MaximumItemsToCompareExceededException" } // Message returns the exception's message. -func (s MaximumItemsToCompareExceededException) Message() string { +func (s *MaximumItemsToCompareExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24120,29 +24652,29 @@ func (s MaximumItemsToCompareExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumItemsToCompareExceededException) OrigErr() error { +func (s *MaximumItemsToCompareExceededException) OrigErr() error { return nil } -func (s MaximumItemsToCompareExceededException) Error() string { +func (s *MaximumItemsToCompareExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumItemsToCompareExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumItemsToCompareExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumItemsToCompareExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumItemsToCompareExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of approvals required for the approval rule exceeds the maximum // number allowed. type MaximumNumberOfApprovalsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24159,17 +24691,17 @@ func (s MaximumNumberOfApprovalsExceededException) GoString() string { func newErrorMaximumNumberOfApprovalsExceededException(v protocol.ResponseMetadata) error { return &MaximumNumberOfApprovalsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumNumberOfApprovalsExceededException) Code() string { +func (s *MaximumNumberOfApprovalsExceededException) Code() string { return "MaximumNumberOfApprovalsExceededException" } // Message returns the exception's message. -func (s MaximumNumberOfApprovalsExceededException) Message() string { +func (s *MaximumNumberOfApprovalsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24177,30 +24709,30 @@ func (s MaximumNumberOfApprovalsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumNumberOfApprovalsExceededException) OrigErr() error { +func (s *MaximumNumberOfApprovalsExceededException) OrigErr() error { return nil } -func (s MaximumNumberOfApprovalsExceededException) Error() string { +func (s *MaximumNumberOfApprovalsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumNumberOfApprovalsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumNumberOfApprovalsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumNumberOfApprovalsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumNumberOfApprovalsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot create the pull request because the repository has too many open // pull requests. The maximum number of open pull requests for a repository // is 1,000. Close one or more open pull requests, and then try again. type MaximumOpenPullRequestsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24217,17 +24749,17 @@ func (s MaximumOpenPullRequestsExceededException) GoString() string { func newErrorMaximumOpenPullRequestsExceededException(v protocol.ResponseMetadata) error { return &MaximumOpenPullRequestsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumOpenPullRequestsExceededException) Code() string { +func (s *MaximumOpenPullRequestsExceededException) Code() string { return "MaximumOpenPullRequestsExceededException" } // Message returns the exception's message. -func (s MaximumOpenPullRequestsExceededException) Message() string { +func (s *MaximumOpenPullRequestsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24235,29 +24767,29 @@ func (s MaximumOpenPullRequestsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumOpenPullRequestsExceededException) OrigErr() error { +func (s *MaximumOpenPullRequestsExceededException) OrigErr() error { return nil } -func (s MaximumOpenPullRequestsExceededException) Error() string { +func (s *MaximumOpenPullRequestsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumOpenPullRequestsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumOpenPullRequestsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumOpenPullRequestsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumOpenPullRequestsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of allowed repository names was exceeded. Currently, this // number is 100. type MaximumRepositoryNamesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24274,17 +24806,17 @@ func (s MaximumRepositoryNamesExceededException) GoString() string { func newErrorMaximumRepositoryNamesExceededException(v protocol.ResponseMetadata) error { return &MaximumRepositoryNamesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumRepositoryNamesExceededException) Code() string { +func (s *MaximumRepositoryNamesExceededException) Code() string { return "MaximumRepositoryNamesExceededException" } // Message returns the exception's message. -func (s MaximumRepositoryNamesExceededException) Message() string { +func (s *MaximumRepositoryNamesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24292,28 +24824,28 @@ func (s MaximumRepositoryNamesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumRepositoryNamesExceededException) OrigErr() error { +func (s *MaximumRepositoryNamesExceededException) OrigErr() error { return nil } -func (s MaximumRepositoryNamesExceededException) Error() string { +func (s *MaximumRepositoryNamesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumRepositoryNamesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumRepositoryNamesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumRepositoryNamesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumRepositoryNamesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of triggers allowed for the repository was exceeded. type MaximumRepositoryTriggersExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24330,17 +24862,17 @@ func (s MaximumRepositoryTriggersExceededException) GoString() string { func newErrorMaximumRepositoryTriggersExceededException(v protocol.ResponseMetadata) error { return &MaximumRepositoryTriggersExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumRepositoryTriggersExceededException) Code() string { +func (s *MaximumRepositoryTriggersExceededException) Code() string { return "MaximumRepositoryTriggersExceededException" } // Message returns the exception's message. -func (s MaximumRepositoryTriggersExceededException) Message() string { +func (s *MaximumRepositoryTriggersExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24348,29 +24880,29 @@ func (s MaximumRepositoryTriggersExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumRepositoryTriggersExceededException) OrigErr() error { +func (s *MaximumRepositoryTriggersExceededException) OrigErr() error { return nil } -func (s MaximumRepositoryTriggersExceededException) Error() string { +func (s *MaximumRepositoryTriggersExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumRepositoryTriggersExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumRepositoryTriggersExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumRepositoryTriggersExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumRepositoryTriggersExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of approval rule templates for a repository has been exceeded. // You cannot associate more than 25 approval rule templates with a repository. type MaximumRuleTemplatesAssociatedWithRepositoryException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24387,17 +24919,17 @@ func (s MaximumRuleTemplatesAssociatedWithRepositoryException) GoString() string func newErrorMaximumRuleTemplatesAssociatedWithRepositoryException(v protocol.ResponseMetadata) error { return &MaximumRuleTemplatesAssociatedWithRepositoryException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaximumRuleTemplatesAssociatedWithRepositoryException) Code() string { +func (s *MaximumRuleTemplatesAssociatedWithRepositoryException) Code() string { return "MaximumRuleTemplatesAssociatedWithRepositoryException" } // Message returns the exception's message. -func (s MaximumRuleTemplatesAssociatedWithRepositoryException) Message() string { +func (s *MaximumRuleTemplatesAssociatedWithRepositoryException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24405,22 +24937,22 @@ func (s MaximumRuleTemplatesAssociatedWithRepositoryException) Message() string } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaximumRuleTemplatesAssociatedWithRepositoryException) OrigErr() error { +func (s *MaximumRuleTemplatesAssociatedWithRepositoryException) OrigErr() error { return nil } -func (s MaximumRuleTemplatesAssociatedWithRepositoryException) Error() string { +func (s *MaximumRuleTemplatesAssociatedWithRepositoryException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaximumRuleTemplatesAssociatedWithRepositoryException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaximumRuleTemplatesAssociatedWithRepositoryException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaximumRuleTemplatesAssociatedWithRepositoryException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaximumRuleTemplatesAssociatedWithRepositoryException) RequestID() string { + return s.RespMetadata.RequestID } type MergeBranchesByFastForwardInput struct { @@ -25114,8 +25646,8 @@ func (s *MergeOperations) SetSource(v string) *MergeOperations { // A merge option or stategy is required, and none was provided. type MergeOptionRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25132,17 +25664,17 @@ func (s MergeOptionRequiredException) GoString() string { func newErrorMergeOptionRequiredException(v protocol.ResponseMetadata) error { return &MergeOptionRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MergeOptionRequiredException) Code() string { +func (s *MergeOptionRequiredException) Code() string { return "MergeOptionRequiredException" } // Message returns the exception's message. -func (s MergeOptionRequiredException) Message() string { +func (s *MergeOptionRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25150,22 +25682,22 @@ func (s MergeOptionRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MergeOptionRequiredException) OrigErr() error { +func (s *MergeOptionRequiredException) OrigErr() error { return nil } -func (s MergeOptionRequiredException) Error() string { +func (s *MergeOptionRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MergeOptionRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MergeOptionRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MergeOptionRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *MergeOptionRequiredException) RequestID() string { + return s.RespMetadata.RequestID } type MergePullRequestByFastForwardInput struct { @@ -25596,8 +26128,8 @@ func (s *MergePullRequestByThreeWayOutput) SetPullRequest(v *PullRequest) *Merge // More than one conflict resolution entries exists for the conflict. A conflict // can have only one conflict resolution entry. type MultipleConflictResolutionEntriesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25614,17 +26146,17 @@ func (s MultipleConflictResolutionEntriesException) GoString() string { func newErrorMultipleConflictResolutionEntriesException(v protocol.ResponseMetadata) error { return &MultipleConflictResolutionEntriesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MultipleConflictResolutionEntriesException) Code() string { +func (s *MultipleConflictResolutionEntriesException) Code() string { return "MultipleConflictResolutionEntriesException" } // Message returns the exception's message. -func (s MultipleConflictResolutionEntriesException) Message() string { +func (s *MultipleConflictResolutionEntriesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25632,30 +26164,30 @@ func (s MultipleConflictResolutionEntriesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MultipleConflictResolutionEntriesException) OrigErr() error { +func (s *MultipleConflictResolutionEntriesException) OrigErr() error { return nil } -func (s MultipleConflictResolutionEntriesException) Error() string { +func (s *MultipleConflictResolutionEntriesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MultipleConflictResolutionEntriesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MultipleConflictResolutionEntriesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MultipleConflictResolutionEntriesException) RequestID() string { - return s.respMetadata.RequestID +func (s *MultipleConflictResolutionEntriesException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot include more than one repository in a pull request. Make sure // you have specified only one repository name in your request, and then try // again. type MultipleRepositoriesInPullRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25672,17 +26204,17 @@ func (s MultipleRepositoriesInPullRequestException) GoString() string { func newErrorMultipleRepositoriesInPullRequestException(v protocol.ResponseMetadata) error { return &MultipleRepositoriesInPullRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MultipleRepositoriesInPullRequestException) Code() string { +func (s *MultipleRepositoriesInPullRequestException) Code() string { return "MultipleRepositoriesInPullRequestException" } // Message returns the exception's message. -func (s MultipleRepositoriesInPullRequestException) Message() string { +func (s *MultipleRepositoriesInPullRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25690,29 +26222,29 @@ func (s MultipleRepositoriesInPullRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MultipleRepositoriesInPullRequestException) OrigErr() error { +func (s *MultipleRepositoriesInPullRequestException) OrigErr() error { return nil } -func (s MultipleRepositoriesInPullRequestException) Error() string { +func (s *MultipleRepositoriesInPullRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MultipleRepositoriesInPullRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MultipleRepositoriesInPullRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MultipleRepositoriesInPullRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *MultipleRepositoriesInPullRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The user name is not valid because it has exceeded the character limit for // author names. type NameLengthExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25729,17 +26261,17 @@ func (s NameLengthExceededException) GoString() string { func newErrorNameLengthExceededException(v protocol.ResponseMetadata) error { return &NameLengthExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NameLengthExceededException) Code() string { +func (s *NameLengthExceededException) Code() string { return "NameLengthExceededException" } // Message returns the exception's message. -func (s NameLengthExceededException) Message() string { +func (s *NameLengthExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25747,29 +26279,29 @@ func (s NameLengthExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NameLengthExceededException) OrigErr() error { +func (s *NameLengthExceededException) OrigErr() error { return nil } -func (s NameLengthExceededException) Error() string { +func (s *NameLengthExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NameLengthExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NameLengthExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NameLengthExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *NameLengthExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The commit cannot be created because no changes will be made to the repository // as a result of this commit. A commit must contain at least one change. type NoChangeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25786,17 +26318,17 @@ func (s NoChangeException) GoString() string { func newErrorNoChangeException(v protocol.ResponseMetadata) error { return &NoChangeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoChangeException) Code() string { +func (s *NoChangeException) Code() string { return "NoChangeException" } // Message returns the exception's message. -func (s NoChangeException) Message() string { +func (s *NoChangeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25804,29 +26336,29 @@ func (s NoChangeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoChangeException) OrigErr() error { +func (s *NoChangeException) OrigErr() error { return nil } -func (s NoChangeException) Error() string { +func (s *NoChangeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoChangeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoChangeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoChangeException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoChangeException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of approval rule templates has been exceeded for this // AWS Region. type NumberOfRuleTemplatesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25843,17 +26375,17 @@ func (s NumberOfRuleTemplatesExceededException) GoString() string { func newErrorNumberOfRuleTemplatesExceededException(v protocol.ResponseMetadata) error { return &NumberOfRuleTemplatesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NumberOfRuleTemplatesExceededException) Code() string { +func (s *NumberOfRuleTemplatesExceededException) Code() string { return "NumberOfRuleTemplatesExceededException" } // Message returns the exception's message. -func (s NumberOfRuleTemplatesExceededException) Message() string { +func (s *NumberOfRuleTemplatesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25861,29 +26393,29 @@ func (s NumberOfRuleTemplatesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NumberOfRuleTemplatesExceededException) OrigErr() error { +func (s *NumberOfRuleTemplatesExceededException) OrigErr() error { return nil } -func (s NumberOfRuleTemplatesExceededException) Error() string { +func (s *NumberOfRuleTemplatesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NumberOfRuleTemplatesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NumberOfRuleTemplatesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NumberOfRuleTemplatesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *NumberOfRuleTemplatesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The approval rule cannot be added. The pull request has the maximum number // of approval rules associated with it. type NumberOfRulesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25900,17 +26432,17 @@ func (s NumberOfRulesExceededException) GoString() string { func newErrorNumberOfRulesExceededException(v protocol.ResponseMetadata) error { return &NumberOfRulesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NumberOfRulesExceededException) Code() string { +func (s *NumberOfRulesExceededException) Code() string { return "NumberOfRulesExceededException" } // Message returns the exception's message. -func (s NumberOfRulesExceededException) Message() string { +func (s *NumberOfRulesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25918,22 +26450,22 @@ func (s NumberOfRulesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NumberOfRulesExceededException) OrigErr() error { +func (s *NumberOfRulesExceededException) OrigErr() error { return nil } -func (s NumberOfRulesExceededException) Error() string { +func (s *NumberOfRulesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NumberOfRulesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NumberOfRulesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NumberOfRulesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *NumberOfRulesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the type of an object in a merge operation. @@ -26014,8 +26546,8 @@ func (s *OriginApprovalRuleTemplate) SetApprovalRuleTemplateName(v string) *Orig // The pull request has already had its approval rules set to override. type OverrideAlreadySetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26032,17 +26564,17 @@ func (s OverrideAlreadySetException) GoString() string { func newErrorOverrideAlreadySetException(v protocol.ResponseMetadata) error { return &OverrideAlreadySetException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OverrideAlreadySetException) Code() string { +func (s *OverrideAlreadySetException) Code() string { return "OverrideAlreadySetException" } // Message returns the exception's message. -func (s OverrideAlreadySetException) Message() string { +func (s *OverrideAlreadySetException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26050,22 +26582,22 @@ func (s OverrideAlreadySetException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OverrideAlreadySetException) OrigErr() error { +func (s *OverrideAlreadySetException) OrigErr() error { return nil } -func (s OverrideAlreadySetException) Error() string { +func (s *OverrideAlreadySetException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OverrideAlreadySetException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OverrideAlreadySetException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OverrideAlreadySetException) RequestID() string { - return s.respMetadata.RequestID +func (s *OverrideAlreadySetException) RequestID() string { + return s.RespMetadata.RequestID } type OverridePullRequestApprovalRulesInput struct { @@ -26156,8 +26688,8 @@ func (s OverridePullRequestApprovalRulesOutput) GoString() string { // An override status is required, but no value was provided. Valid values include // OVERRIDE and REVOKE. type OverrideStatusRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26174,17 +26706,17 @@ func (s OverrideStatusRequiredException) GoString() string { func newErrorOverrideStatusRequiredException(v protocol.ResponseMetadata) error { return &OverrideStatusRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OverrideStatusRequiredException) Code() string { +func (s *OverrideStatusRequiredException) Code() string { return "OverrideStatusRequiredException" } // Message returns the exception's message. -func (s OverrideStatusRequiredException) Message() string { +func (s *OverrideStatusRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26192,29 +26724,29 @@ func (s OverrideStatusRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OverrideStatusRequiredException) OrigErr() error { +func (s *OverrideStatusRequiredException) OrigErr() error { return nil } -func (s OverrideStatusRequiredException) Error() string { +func (s *OverrideStatusRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OverrideStatusRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OverrideStatusRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OverrideStatusRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *OverrideStatusRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The parent commit ID is not valid because it does not exist. The specified // parent commit ID does not exist in the specified branch of the repository. type ParentCommitDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26231,17 +26763,17 @@ func (s ParentCommitDoesNotExistException) GoString() string { func newErrorParentCommitDoesNotExistException(v protocol.ResponseMetadata) error { return &ParentCommitDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParentCommitDoesNotExistException) Code() string { +func (s *ParentCommitDoesNotExistException) Code() string { return "ParentCommitDoesNotExistException" } // Message returns the exception's message. -func (s ParentCommitDoesNotExistException) Message() string { +func (s *ParentCommitDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26249,30 +26781,30 @@ func (s ParentCommitDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParentCommitDoesNotExistException) OrigErr() error { +func (s *ParentCommitDoesNotExistException) OrigErr() error { return nil } -func (s ParentCommitDoesNotExistException) Error() string { +func (s *ParentCommitDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParentCommitDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParentCommitDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParentCommitDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ParentCommitDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The file could not be added because the provided parent commit ID is not // the current tip of the specified branch. To view the full commit ID of the // current head of the branch, use GetBranch. type ParentCommitIdOutdatedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26289,17 +26821,17 @@ func (s ParentCommitIdOutdatedException) GoString() string { func newErrorParentCommitIdOutdatedException(v protocol.ResponseMetadata) error { return &ParentCommitIdOutdatedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParentCommitIdOutdatedException) Code() string { +func (s *ParentCommitIdOutdatedException) Code() string { return "ParentCommitIdOutdatedException" } // Message returns the exception's message. -func (s ParentCommitIdOutdatedException) Message() string { +func (s *ParentCommitIdOutdatedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26307,30 +26839,30 @@ func (s ParentCommitIdOutdatedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParentCommitIdOutdatedException) OrigErr() error { +func (s *ParentCommitIdOutdatedException) OrigErr() error { return nil } -func (s ParentCommitIdOutdatedException) Error() string { +func (s *ParentCommitIdOutdatedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParentCommitIdOutdatedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParentCommitIdOutdatedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParentCommitIdOutdatedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ParentCommitIdOutdatedException) RequestID() string { + return s.RespMetadata.RequestID } // A parent commit ID is required. To view the full commit ID of a branch in // a repository, use GetBranch or a Git command (for example, git pull or git // log). type ParentCommitIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26347,17 +26879,17 @@ func (s ParentCommitIdRequiredException) GoString() string { func newErrorParentCommitIdRequiredException(v protocol.ResponseMetadata) error { return &ParentCommitIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParentCommitIdRequiredException) Code() string { +func (s *ParentCommitIdRequiredException) Code() string { return "ParentCommitIdRequiredException" } // Message returns the exception's message. -func (s ParentCommitIdRequiredException) Message() string { +func (s *ParentCommitIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26365,28 +26897,28 @@ func (s ParentCommitIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParentCommitIdRequiredException) OrigErr() error { +func (s *ParentCommitIdRequiredException) OrigErr() error { return nil } -func (s ParentCommitIdRequiredException) Error() string { +func (s *ParentCommitIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParentCommitIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParentCommitIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParentCommitIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ParentCommitIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified path does not exist. type PathDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26403,17 +26935,17 @@ func (s PathDoesNotExistException) GoString() string { func newErrorPathDoesNotExistException(v protocol.ResponseMetadata) error { return &PathDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PathDoesNotExistException) Code() string { +func (s *PathDoesNotExistException) Code() string { return "PathDoesNotExistException" } // Message returns the exception's message. -func (s PathDoesNotExistException) Message() string { +func (s *PathDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26421,28 +26953,28 @@ func (s PathDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PathDoesNotExistException) OrigErr() error { +func (s *PathDoesNotExistException) OrigErr() error { return nil } -func (s PathDoesNotExistException) Error() string { +func (s *PathDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PathDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PathDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PathDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *PathDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The folderPath for a location cannot be null. type PathRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26459,17 +26991,17 @@ func (s PathRequiredException) GoString() string { func newErrorPathRequiredException(v protocol.ResponseMetadata) error { return &PathRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PathRequiredException) Code() string { +func (s *PathRequiredException) Code() string { return "PathRequiredException" } // Message returns the exception's message. -func (s PathRequiredException) Message() string { +func (s *PathRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26477,22 +27009,22 @@ func (s PathRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PathRequiredException) OrigErr() error { +func (s *PathRequiredException) OrigErr() error { return nil } -func (s PathRequiredException) Error() string { +func (s *PathRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PathRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PathRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PathRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *PathRequiredException) RequestID() string { + return s.RespMetadata.RequestID } type PostCommentForComparedCommitInput struct { @@ -27099,8 +27631,8 @@ func (s *PullRequest) SetTitle(v string) *PullRequest { // The pull request status cannot be updated because it is already closed. type PullRequestAlreadyClosedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27117,17 +27649,17 @@ func (s PullRequestAlreadyClosedException) GoString() string { func newErrorPullRequestAlreadyClosedException(v protocol.ResponseMetadata) error { return &PullRequestAlreadyClosedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PullRequestAlreadyClosedException) Code() string { +func (s *PullRequestAlreadyClosedException) Code() string { return "PullRequestAlreadyClosedException" } // Message returns the exception's message. -func (s PullRequestAlreadyClosedException) Message() string { +func (s *PullRequestAlreadyClosedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27135,29 +27667,29 @@ func (s PullRequestAlreadyClosedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PullRequestAlreadyClosedException) OrigErr() error { +func (s *PullRequestAlreadyClosedException) OrigErr() error { return nil } -func (s PullRequestAlreadyClosedException) Error() string { +func (s *PullRequestAlreadyClosedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PullRequestAlreadyClosedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PullRequestAlreadyClosedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PullRequestAlreadyClosedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PullRequestAlreadyClosedException) RequestID() string { + return s.RespMetadata.RequestID } // The pull request cannot be merged because one or more approval rules applied // to the pull request have conditions that have not been met. type PullRequestApprovalRulesNotSatisfiedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27174,17 +27706,17 @@ func (s PullRequestApprovalRulesNotSatisfiedException) GoString() string { func newErrorPullRequestApprovalRulesNotSatisfiedException(v protocol.ResponseMetadata) error { return &PullRequestApprovalRulesNotSatisfiedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PullRequestApprovalRulesNotSatisfiedException) Code() string { +func (s *PullRequestApprovalRulesNotSatisfiedException) Code() string { return "PullRequestApprovalRulesNotSatisfiedException" } // Message returns the exception's message. -func (s PullRequestApprovalRulesNotSatisfiedException) Message() string { +func (s *PullRequestApprovalRulesNotSatisfiedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27192,30 +27724,30 @@ func (s PullRequestApprovalRulesNotSatisfiedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PullRequestApprovalRulesNotSatisfiedException) OrigErr() error { +func (s *PullRequestApprovalRulesNotSatisfiedException) OrigErr() error { return nil } -func (s PullRequestApprovalRulesNotSatisfiedException) Error() string { +func (s *PullRequestApprovalRulesNotSatisfiedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PullRequestApprovalRulesNotSatisfiedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PullRequestApprovalRulesNotSatisfiedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PullRequestApprovalRulesNotSatisfiedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PullRequestApprovalRulesNotSatisfiedException) RequestID() string { + return s.RespMetadata.RequestID } // The approval cannot be applied because the user approving the pull request // matches the user who created the pull request. You cannot approve a pull // request that you created. type PullRequestCannotBeApprovedByAuthorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27232,17 +27764,17 @@ func (s PullRequestCannotBeApprovedByAuthorException) GoString() string { func newErrorPullRequestCannotBeApprovedByAuthorException(v protocol.ResponseMetadata) error { return &PullRequestCannotBeApprovedByAuthorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PullRequestCannotBeApprovedByAuthorException) Code() string { +func (s *PullRequestCannotBeApprovedByAuthorException) Code() string { return "PullRequestCannotBeApprovedByAuthorException" } // Message returns the exception's message. -func (s PullRequestCannotBeApprovedByAuthorException) Message() string { +func (s *PullRequestCannotBeApprovedByAuthorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27250,22 +27782,22 @@ func (s PullRequestCannotBeApprovedByAuthorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PullRequestCannotBeApprovedByAuthorException) OrigErr() error { +func (s *PullRequestCannotBeApprovedByAuthorException) OrigErr() error { return nil } -func (s PullRequestCannotBeApprovedByAuthorException) Error() string { +func (s *PullRequestCannotBeApprovedByAuthorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PullRequestCannotBeApprovedByAuthorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PullRequestCannotBeApprovedByAuthorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PullRequestCannotBeApprovedByAuthorException) RequestID() string { - return s.respMetadata.RequestID +func (s *PullRequestCannotBeApprovedByAuthorException) RequestID() string { + return s.RespMetadata.RequestID } // Metadata about the pull request that is used when comparing the pull request @@ -27325,8 +27857,8 @@ func (s *PullRequestCreatedEventMetadata) SetSourceCommitId(v string) *PullReque // The pull request ID could not be found. Make sure that you have specified // the correct repository name and pull request ID, and then try again. type PullRequestDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27343,17 +27875,17 @@ func (s PullRequestDoesNotExistException) GoString() string { func newErrorPullRequestDoesNotExistException(v protocol.ResponseMetadata) error { return &PullRequestDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PullRequestDoesNotExistException) Code() string { +func (s *PullRequestDoesNotExistException) Code() string { return "PullRequestDoesNotExistException" } // Message returns the exception's message. -func (s PullRequestDoesNotExistException) Message() string { +func (s *PullRequestDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27361,22 +27893,22 @@ func (s PullRequestDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PullRequestDoesNotExistException) OrigErr() error { +func (s *PullRequestDoesNotExistException) OrigErr() error { return nil } -func (s PullRequestDoesNotExistException) Error() string { +func (s *PullRequestDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PullRequestDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PullRequestDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PullRequestDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *PullRequestDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a pull request event. @@ -27498,8 +28030,8 @@ func (s *PullRequestEvent) SetPullRequestStatusChangedEventMetadata(v *PullReque // A pull request ID is required, but none was provided. type PullRequestIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27516,17 +28048,17 @@ func (s PullRequestIdRequiredException) GoString() string { func newErrorPullRequestIdRequiredException(v protocol.ResponseMetadata) error { return &PullRequestIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PullRequestIdRequiredException) Code() string { +func (s *PullRequestIdRequiredException) Code() string { return "PullRequestIdRequiredException" } // Message returns the exception's message. -func (s PullRequestIdRequiredException) Message() string { +func (s *PullRequestIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27534,22 +28066,22 @@ func (s PullRequestIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PullRequestIdRequiredException) OrigErr() error { +func (s *PullRequestIdRequiredException) OrigErr() error { return nil } -func (s PullRequestIdRequiredException) Error() string { +func (s *PullRequestIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PullRequestIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PullRequestIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PullRequestIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *PullRequestIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about the change in the merge state for a pull request @@ -27675,8 +28207,8 @@ func (s *PullRequestStatusChangedEventMetadata) SetPullRequestStatus(v string) * // A pull request status is required, but none was provided. type PullRequestStatusRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27693,17 +28225,17 @@ func (s PullRequestStatusRequiredException) GoString() string { func newErrorPullRequestStatusRequiredException(v protocol.ResponseMetadata) error { return &PullRequestStatusRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PullRequestStatusRequiredException) Code() string { +func (s *PullRequestStatusRequiredException) Code() string { return "PullRequestStatusRequiredException" } // Message returns the exception's message. -func (s PullRequestStatusRequiredException) Message() string { +func (s *PullRequestStatusRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27711,22 +28243,22 @@ func (s PullRequestStatusRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PullRequestStatusRequiredException) OrigErr() error { +func (s *PullRequestStatusRequiredException) OrigErr() error { return nil } -func (s PullRequestStatusRequiredException) Error() string { +func (s *PullRequestStatusRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PullRequestStatusRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PullRequestStatusRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PullRequestStatusRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *PullRequestStatusRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a pull request target. @@ -27815,6 +28347,75 @@ func (s *PullRequestTarget) SetSourceReference(v string) *PullRequestTarget { return s } +type PutCommentReactionInput struct { + _ struct{} `type:"structure"` + + // The ID of the comment to which you want to add or update a reaction. + // + // CommentId is a required field + CommentId *string `locationName:"commentId" type:"string" required:"true"` + + // The emoji reaction you want to add or update. To remove a reaction, provide + // a value of blank or null. You can also provide the value of none. For information + // about emoji reaction values supported in AWS CodeCommit, see the AWS CodeCommit + // User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/how-to-commit-comment.html#emoji-reaction-table). + // + // ReactionValue is a required field + ReactionValue *string `locationName:"reactionValue" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutCommentReactionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutCommentReactionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutCommentReactionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutCommentReactionInput"} + if s.CommentId == nil { + invalidParams.Add(request.NewErrParamRequired("CommentId")) + } + if s.ReactionValue == nil { + invalidParams.Add(request.NewErrParamRequired("ReactionValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCommentId sets the CommentId field's value. +func (s *PutCommentReactionInput) SetCommentId(v string) *PutCommentReactionInput { + s.CommentId = &v + return s +} + +// SetReactionValue sets the ReactionValue field's value. +func (s *PutCommentReactionInput) SetReactionValue(v string) *PutCommentReactionInput { + s.ReactionValue = &v + return s +} + +type PutCommentReactionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutCommentReactionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutCommentReactionOutput) GoString() string { + return s.String() +} + // Information about a file added or updated as part of a commit. type PutFileEntry struct { _ struct{} `type:"structure"` @@ -27893,8 +28494,8 @@ func (s *PutFileEntry) SetSourceFile(v *SourceFileSpecifier) *PutFileEntry { // The commit cannot be created because one or more files specified in the commit // reference both a file and a folder. type PutFileEntryConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27911,17 +28512,17 @@ func (s PutFileEntryConflictException) GoString() string { func newErrorPutFileEntryConflictException(v protocol.ResponseMetadata) error { return &PutFileEntryConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PutFileEntryConflictException) Code() string { +func (s *PutFileEntryConflictException) Code() string { return "PutFileEntryConflictException" } // Message returns the exception's message. -func (s PutFileEntryConflictException) Message() string { +func (s *PutFileEntryConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27929,22 +28530,22 @@ func (s PutFileEntryConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PutFileEntryConflictException) OrigErr() error { +func (s *PutFileEntryConflictException) OrigErr() error { return nil } -func (s PutFileEntryConflictException) Error() string { +func (s *PutFileEntryConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PutFileEntryConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PutFileEntryConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PutFileEntryConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *PutFileEntryConflictException) RequestID() string { + return s.RespMetadata.RequestID } type PutFileInput struct { @@ -28231,10 +28832,213 @@ func (s *PutRepositoryTriggersOutput) SetConfigurationId(v string) *PutRepositor return s } +// Information about the reaction values provided by users on a comment. +type ReactionForComment struct { + _ struct{} `type:"structure"` + + // The reaction for a specified comment. + Reaction *ReactionValueFormats `locationName:"reaction" type:"structure"` + + // The Amazon Resource Names (ARNs) of users who have provided reactions to + // the comment. + ReactionUsers []*string `locationName:"reactionUsers" type:"list"` + + // A numerical count of users who reacted with the specified emoji whose identities + // have been subsequently deleted from IAM. While these IAM users or roles no + // longer exist, the reactions might still appear in total reaction counts. + ReactionsFromDeletedUsersCount *int64 `locationName:"reactionsFromDeletedUsersCount" type:"integer"` +} + +// String returns the string representation +func (s ReactionForComment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReactionForComment) GoString() string { + return s.String() +} + +// SetReaction sets the Reaction field's value. +func (s *ReactionForComment) SetReaction(v *ReactionValueFormats) *ReactionForComment { + s.Reaction = v + return s +} + +// SetReactionUsers sets the ReactionUsers field's value. +func (s *ReactionForComment) SetReactionUsers(v []*string) *ReactionForComment { + s.ReactionUsers = v + return s +} + +// SetReactionsFromDeletedUsersCount sets the ReactionsFromDeletedUsersCount field's value. +func (s *ReactionForComment) SetReactionsFromDeletedUsersCount(v int64) *ReactionForComment { + s.ReactionsFromDeletedUsersCount = &v + return s +} + +// The number of reactions has been exceeded. Reactions are limited to one reaction +// per user for each individual comment ID. +type ReactionLimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ReactionLimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReactionLimitExceededException) GoString() string { + return s.String() +} + +func newErrorReactionLimitExceededException(v protocol.ResponseMetadata) error { + return &ReactionLimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReactionLimitExceededException) Code() string { + return "ReactionLimitExceededException" +} + +// Message returns the exception's message. +func (s *ReactionLimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReactionLimitExceededException) OrigErr() error { + return nil +} + +func (s *ReactionLimitExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReactionLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReactionLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about the values for reactions to a comment. AWS CodeCommit supports +// a limited set of reactions. +type ReactionValueFormats struct { + _ struct{} `type:"structure"` + + // The Emoji Version 1.0 graphic of the reaction. These graphics are interpreted + // slightly differently on different operating systems. + Emoji *string `locationName:"emoji" type:"string"` + + // The emoji short code for the reaction. Short codes are interpreted slightly + // differently on different operating systems. + ShortCode *string `locationName:"shortCode" type:"string"` + + // The Unicode codepoint for the reaction. + Unicode *string `locationName:"unicode" type:"string"` +} + +// String returns the string representation +func (s ReactionValueFormats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReactionValueFormats) GoString() string { + return s.String() +} + +// SetEmoji sets the Emoji field's value. +func (s *ReactionValueFormats) SetEmoji(v string) *ReactionValueFormats { + s.Emoji = &v + return s +} + +// SetShortCode sets the ShortCode field's value. +func (s *ReactionValueFormats) SetShortCode(v string) *ReactionValueFormats { + s.ShortCode = &v + return s +} + +// SetUnicode sets the Unicode field's value. +func (s *ReactionValueFormats) SetUnicode(v string) *ReactionValueFormats { + s.Unicode = &v + return s +} + +// A reaction value is required. +type ReactionValueRequiredException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ReactionValueRequiredException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReactionValueRequiredException) GoString() string { + return s.String() +} + +func newErrorReactionValueRequiredException(v protocol.ResponseMetadata) error { + return &ReactionValueRequiredException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReactionValueRequiredException) Code() string { + return "ReactionValueRequiredException" +} + +// Message returns the exception's message. +func (s *ReactionValueRequiredException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReactionValueRequiredException) OrigErr() error { + return nil +} + +func (s *ReactionValueRequiredException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReactionValueRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReactionValueRequiredException) RequestID() string { + return s.RespMetadata.RequestID +} + // The specified reference does not exist. You must provide a full commit ID. type ReferenceDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28251,17 +29055,17 @@ func (s ReferenceDoesNotExistException) GoString() string { func newErrorReferenceDoesNotExistException(v protocol.ResponseMetadata) error { return &ReferenceDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReferenceDoesNotExistException) Code() string { +func (s *ReferenceDoesNotExistException) Code() string { return "ReferenceDoesNotExistException" } // Message returns the exception's message. -func (s ReferenceDoesNotExistException) Message() string { +func (s *ReferenceDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28269,28 +29073,28 @@ func (s ReferenceDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReferenceDoesNotExistException) OrigErr() error { +func (s *ReferenceDoesNotExistException) OrigErr() error { return nil } -func (s ReferenceDoesNotExistException) Error() string { +func (s *ReferenceDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReferenceDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReferenceDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReferenceDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReferenceDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // A reference name is required, but none was provided. type ReferenceNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28307,17 +29111,17 @@ func (s ReferenceNameRequiredException) GoString() string { func newErrorReferenceNameRequiredException(v protocol.ResponseMetadata) error { return &ReferenceNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReferenceNameRequiredException) Code() string { +func (s *ReferenceNameRequiredException) Code() string { return "ReferenceNameRequiredException" } // Message returns the exception's message. -func (s ReferenceNameRequiredException) Message() string { +func (s *ReferenceNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28325,28 +29129,28 @@ func (s ReferenceNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReferenceNameRequiredException) OrigErr() error { +func (s *ReferenceNameRequiredException) OrigErr() error { return nil } -func (s ReferenceNameRequiredException) Error() string { +func (s *ReferenceNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReferenceNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReferenceNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReferenceNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReferenceNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified reference is not a supported type. type ReferenceTypeNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28363,17 +29167,17 @@ func (s ReferenceTypeNotSupportedException) GoString() string { func newErrorReferenceTypeNotSupportedException(v protocol.ResponseMetadata) error { return &ReferenceTypeNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReferenceTypeNotSupportedException) Code() string { +func (s *ReferenceTypeNotSupportedException) Code() string { return "ReferenceTypeNotSupportedException" } // Message returns the exception's message. -func (s ReferenceTypeNotSupportedException) Message() string { +func (s *ReferenceTypeNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28381,22 +29185,22 @@ func (s ReferenceTypeNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReferenceTypeNotSupportedException) OrigErr() error { +func (s *ReferenceTypeNotSupportedException) OrigErr() error { return nil } -func (s ReferenceTypeNotSupportedException) Error() string { +func (s *ReferenceTypeNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReferenceTypeNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReferenceTypeNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReferenceTypeNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReferenceTypeNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a replacement content entry in the conflict of a merge @@ -28475,8 +29279,8 @@ func (s *ReplaceContentEntry) SetReplacementType(v string) *ReplaceContentEntry // USE_NEW_CONTENT was specified, but no replacement content has been provided. type ReplacementContentRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28493,17 +29297,17 @@ func (s ReplacementContentRequiredException) GoString() string { func newErrorReplacementContentRequiredException(v protocol.ResponseMetadata) error { return &ReplacementContentRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReplacementContentRequiredException) Code() string { +func (s *ReplacementContentRequiredException) Code() string { return "ReplacementContentRequiredException" } // Message returns the exception's message. -func (s ReplacementContentRequiredException) Message() string { +func (s *ReplacementContentRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28511,28 +29315,28 @@ func (s ReplacementContentRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReplacementContentRequiredException) OrigErr() error { +func (s *ReplacementContentRequiredException) OrigErr() error { return nil } -func (s ReplacementContentRequiredException) Error() string { +func (s *ReplacementContentRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReplacementContentRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReplacementContentRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReplacementContentRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReplacementContentRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // A replacement type is required. type ReplacementTypeRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28549,17 +29353,17 @@ func (s ReplacementTypeRequiredException) GoString() string { func newErrorReplacementTypeRequiredException(v protocol.ResponseMetadata) error { return &ReplacementTypeRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReplacementTypeRequiredException) Code() string { +func (s *ReplacementTypeRequiredException) Code() string { return "ReplacementTypeRequiredException" } // Message returns the exception's message. -func (s ReplacementTypeRequiredException) Message() string { +func (s *ReplacementTypeRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28567,28 +29371,28 @@ func (s ReplacementTypeRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReplacementTypeRequiredException) OrigErr() error { +func (s *ReplacementTypeRequiredException) OrigErr() error { return nil } -func (s ReplacementTypeRequiredException) Error() string { +func (s *ReplacementTypeRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReplacementTypeRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReplacementTypeRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReplacementTypeRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReplacementTypeRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository does not exist. type RepositoryDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28605,17 +29409,17 @@ func (s RepositoryDoesNotExistException) GoString() string { func newErrorRepositoryDoesNotExistException(v protocol.ResponseMetadata) error { return &RepositoryDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryDoesNotExistException) Code() string { +func (s *RepositoryDoesNotExistException) Code() string { return "RepositoryDoesNotExistException" } // Message returns the exception's message. -func (s RepositoryDoesNotExistException) Message() string { +func (s *RepositoryDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28623,28 +29427,28 @@ func (s RepositoryDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryDoesNotExistException) OrigErr() error { +func (s *RepositoryDoesNotExistException) OrigErr() error { return nil } -func (s RepositoryDoesNotExistException) Error() string { +func (s *RepositoryDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // A repository resource limit was exceeded. type RepositoryLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28661,17 +29465,17 @@ func (s RepositoryLimitExceededException) GoString() string { func newErrorRepositoryLimitExceededException(v protocol.ResponseMetadata) error { return &RepositoryLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryLimitExceededException) Code() string { +func (s *RepositoryLimitExceededException) Code() string { return "RepositoryLimitExceededException" } // Message returns the exception's message. -func (s RepositoryLimitExceededException) Message() string { +func (s *RepositoryLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28679,22 +29483,22 @@ func (s RepositoryLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryLimitExceededException) OrigErr() error { +func (s *RepositoryLimitExceededException) OrigErr() error { return nil } -func (s RepositoryLimitExceededException) Error() string { +func (s *RepositoryLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a repository. @@ -28804,8 +29608,8 @@ func (s *RepositoryMetadata) SetRepositoryName(v string) *RepositoryMetadata { // The specified repository name already exists. type RepositoryNameExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28822,17 +29626,17 @@ func (s RepositoryNameExistsException) GoString() string { func newErrorRepositoryNameExistsException(v protocol.ResponseMetadata) error { return &RepositoryNameExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNameExistsException) Code() string { +func (s *RepositoryNameExistsException) Code() string { return "RepositoryNameExistsException" } // Message returns the exception's message. -func (s RepositoryNameExistsException) Message() string { +func (s *RepositoryNameExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28840,22 +29644,22 @@ func (s RepositoryNameExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNameExistsException) OrigErr() error { +func (s *RepositoryNameExistsException) OrigErr() error { return nil } -func (s RepositoryNameExistsException) Error() string { +func (s *RepositoryNameExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNameExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNameExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNameExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNameExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a repository name and ID. @@ -28893,8 +29697,8 @@ func (s *RepositoryNameIdPair) SetRepositoryName(v string) *RepositoryNameIdPair // A repository name is required, but was not specified. type RepositoryNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28911,17 +29715,17 @@ func (s RepositoryNameRequiredException) GoString() string { func newErrorRepositoryNameRequiredException(v protocol.ResponseMetadata) error { return &RepositoryNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNameRequiredException) Code() string { +func (s *RepositoryNameRequiredException) Code() string { return "RepositoryNameRequiredException" } // Message returns the exception's message. -func (s RepositoryNameRequiredException) Message() string { +func (s *RepositoryNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28929,28 +29733,28 @@ func (s RepositoryNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNameRequiredException) OrigErr() error { +func (s *RepositoryNameRequiredException) OrigErr() error { return nil } -func (s RepositoryNameRequiredException) Error() string { +func (s *RepositoryNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // At least one repository name object is required, but was not specified. type RepositoryNamesRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28967,17 +29771,17 @@ func (s RepositoryNamesRequiredException) GoString() string { func newErrorRepositoryNamesRequiredException(v protocol.ResponseMetadata) error { return &RepositoryNamesRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNamesRequiredException) Code() string { +func (s *RepositoryNamesRequiredException) Code() string { return "RepositoryNamesRequiredException" } // Message returns the exception's message. -func (s RepositoryNamesRequiredException) Message() string { +func (s *RepositoryNamesRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28985,30 +29789,30 @@ func (s RepositoryNamesRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNamesRequiredException) OrigErr() error { +func (s *RepositoryNamesRequiredException) OrigErr() error { return nil } -func (s RepositoryNamesRequiredException) Error() string { +func (s *RepositoryNamesRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNamesRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNamesRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNamesRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNamesRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The repository does not contain any pull requests with that pull request // ID. Use GetPullRequest to verify the correct repository name for the pull // request ID. type RepositoryNotAssociatedWithPullRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29025,17 +29829,17 @@ func (s RepositoryNotAssociatedWithPullRequestException) GoString() string { func newErrorRepositoryNotAssociatedWithPullRequestException(v protocol.ResponseMetadata) error { return &RepositoryNotAssociatedWithPullRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNotAssociatedWithPullRequestException) Code() string { +func (s *RepositoryNotAssociatedWithPullRequestException) Code() string { return "RepositoryNotAssociatedWithPullRequestException" } // Message returns the exception's message. -func (s RepositoryNotAssociatedWithPullRequestException) Message() string { +func (s *RepositoryNotAssociatedWithPullRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29043,22 +29847,22 @@ func (s RepositoryNotAssociatedWithPullRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNotAssociatedWithPullRequestException) OrigErr() error { +func (s *RepositoryNotAssociatedWithPullRequestException) OrigErr() error { return nil } -func (s RepositoryNotAssociatedWithPullRequestException) Error() string { +func (s *RepositoryNotAssociatedWithPullRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNotAssociatedWithPullRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNotAssociatedWithPullRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNotAssociatedWithPullRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNotAssociatedWithPullRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a trigger for a repository. @@ -29158,8 +29962,8 @@ func (s *RepositoryTrigger) SetName(v string) *RepositoryTrigger { // At least one branch name is required, but was not specified in the trigger // configuration. type RepositoryTriggerBranchNameListRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29176,17 +29980,17 @@ func (s RepositoryTriggerBranchNameListRequiredException) GoString() string { func newErrorRepositoryTriggerBranchNameListRequiredException(v protocol.ResponseMetadata) error { return &RepositoryTriggerBranchNameListRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryTriggerBranchNameListRequiredException) Code() string { +func (s *RepositoryTriggerBranchNameListRequiredException) Code() string { return "RepositoryTriggerBranchNameListRequiredException" } // Message returns the exception's message. -func (s RepositoryTriggerBranchNameListRequiredException) Message() string { +func (s *RepositoryTriggerBranchNameListRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29194,29 +29998,29 @@ func (s RepositoryTriggerBranchNameListRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryTriggerBranchNameListRequiredException) OrigErr() error { +func (s *RepositoryTriggerBranchNameListRequiredException) OrigErr() error { return nil } -func (s RepositoryTriggerBranchNameListRequiredException) Error() string { +func (s *RepositoryTriggerBranchNameListRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryTriggerBranchNameListRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryTriggerBranchNameListRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryTriggerBranchNameListRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryTriggerBranchNameListRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // A destination ARN for the target service for the trigger is required, but // was not specified. type RepositoryTriggerDestinationArnRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29233,17 +30037,17 @@ func (s RepositoryTriggerDestinationArnRequiredException) GoString() string { func newErrorRepositoryTriggerDestinationArnRequiredException(v protocol.ResponseMetadata) error { return &RepositoryTriggerDestinationArnRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryTriggerDestinationArnRequiredException) Code() string { +func (s *RepositoryTriggerDestinationArnRequiredException) Code() string { return "RepositoryTriggerDestinationArnRequiredException" } // Message returns the exception's message. -func (s RepositoryTriggerDestinationArnRequiredException) Message() string { +func (s *RepositoryTriggerDestinationArnRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29251,28 +30055,28 @@ func (s RepositoryTriggerDestinationArnRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryTriggerDestinationArnRequiredException) OrigErr() error { +func (s *RepositoryTriggerDestinationArnRequiredException) OrigErr() error { return nil } -func (s RepositoryTriggerDestinationArnRequiredException) Error() string { +func (s *RepositoryTriggerDestinationArnRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryTriggerDestinationArnRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryTriggerDestinationArnRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryTriggerDestinationArnRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryTriggerDestinationArnRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // At least one event for the trigger is required, but was not specified. type RepositoryTriggerEventsListRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29289,17 +30093,17 @@ func (s RepositoryTriggerEventsListRequiredException) GoString() string { func newErrorRepositoryTriggerEventsListRequiredException(v protocol.ResponseMetadata) error { return &RepositoryTriggerEventsListRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryTriggerEventsListRequiredException) Code() string { +func (s *RepositoryTriggerEventsListRequiredException) Code() string { return "RepositoryTriggerEventsListRequiredException" } // Message returns the exception's message. -func (s RepositoryTriggerEventsListRequiredException) Message() string { +func (s *RepositoryTriggerEventsListRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29307,22 +30111,22 @@ func (s RepositoryTriggerEventsListRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryTriggerEventsListRequiredException) OrigErr() error { +func (s *RepositoryTriggerEventsListRequiredException) OrigErr() error { return nil } -func (s RepositoryTriggerEventsListRequiredException) Error() string { +func (s *RepositoryTriggerEventsListRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryTriggerEventsListRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryTriggerEventsListRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryTriggerEventsListRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryTriggerEventsListRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // A trigger failed to run. @@ -29360,8 +30164,8 @@ func (s *RepositoryTriggerExecutionFailure) SetTrigger(v string) *RepositoryTrig // A name for the trigger is required, but was not specified. type RepositoryTriggerNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29378,17 +30182,17 @@ func (s RepositoryTriggerNameRequiredException) GoString() string { func newErrorRepositoryTriggerNameRequiredException(v protocol.ResponseMetadata) error { return &RepositoryTriggerNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryTriggerNameRequiredException) Code() string { +func (s *RepositoryTriggerNameRequiredException) Code() string { return "RepositoryTriggerNameRequiredException" } // Message returns the exception's message. -func (s RepositoryTriggerNameRequiredException) Message() string { +func (s *RepositoryTriggerNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29396,28 +30200,28 @@ func (s RepositoryTriggerNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryTriggerNameRequiredException) OrigErr() error { +func (s *RepositoryTriggerNameRequiredException) OrigErr() error { return nil } -func (s RepositoryTriggerNameRequiredException) Error() string { +func (s *RepositoryTriggerNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryTriggerNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryTriggerNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryTriggerNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryTriggerNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The list of triggers for the repository is required, but was not specified. type RepositoryTriggersListRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29434,17 +30238,17 @@ func (s RepositoryTriggersListRequiredException) GoString() string { func newErrorRepositoryTriggersListRequiredException(v protocol.ResponseMetadata) error { return &RepositoryTriggersListRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryTriggersListRequiredException) Code() string { +func (s *RepositoryTriggersListRequiredException) Code() string { return "RepositoryTriggersListRequiredException" } // Message returns the exception's message. -func (s RepositoryTriggersListRequiredException) Message() string { +func (s *RepositoryTriggersListRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29452,22 +30256,22 @@ func (s RepositoryTriggersListRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryTriggersListRequiredException) OrigErr() error { +func (s *RepositoryTriggersListRequiredException) OrigErr() error { return nil } -func (s RepositoryTriggersListRequiredException) Error() string { +func (s *RepositoryTriggersListRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryTriggersListRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryTriggersListRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryTriggersListRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryTriggersListRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // A valid Amazon Resource Name (ARN) for an AWS CodeCommit resource is required. @@ -29475,8 +30279,8 @@ func (s RepositoryTriggersListRequiredException) RequestID() string { // and Operations (https://docs.aws.amazon.com/codecommit/latest/userguide/auth-and-access-control-iam-access-control-identity-based.html#arn-formats) // in the AWS CodeCommit User Guide. type ResourceArnRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29493,17 +30297,17 @@ func (s ResourceArnRequiredException) GoString() string { func newErrorResourceArnRequiredException(v protocol.ResponseMetadata) error { return &ResourceArnRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceArnRequiredException) Code() string { +func (s *ResourceArnRequiredException) Code() string { return "ResourceArnRequiredException" } // Message returns the exception's message. -func (s ResourceArnRequiredException) Message() string { +func (s *ResourceArnRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29511,29 +30315,29 @@ func (s ResourceArnRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceArnRequiredException) OrigErr() error { +func (s *ResourceArnRequiredException) OrigErr() error { return nil } -func (s ResourceArnRequiredException) Error() string { +func (s *ResourceArnRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceArnRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceArnRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceArnRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceArnRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The commit cannot be created because one of the changes specifies copying // or moving a .gitkeep file. type RestrictedSourceFileException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29550,17 +30354,17 @@ func (s RestrictedSourceFileException) GoString() string { func newErrorRestrictedSourceFileException(v protocol.ResponseMetadata) error { return &RestrictedSourceFileException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RestrictedSourceFileException) Code() string { +func (s *RestrictedSourceFileException) Code() string { return "RestrictedSourceFileException" } // Message returns the exception's message. -func (s RestrictedSourceFileException) Message() string { +func (s *RestrictedSourceFileException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29568,28 +30372,28 @@ func (s RestrictedSourceFileException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RestrictedSourceFileException) OrigErr() error { +func (s *RestrictedSourceFileException) OrigErr() error { return nil } -func (s RestrictedSourceFileException) Error() string { +func (s *RestrictedSourceFileException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RestrictedSourceFileException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RestrictedSourceFileException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RestrictedSourceFileException) RequestID() string { - return s.respMetadata.RequestID +func (s *RestrictedSourceFileException) RequestID() string { + return s.RespMetadata.RequestID } // A revision ID is required, but was not provided. type RevisionIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29606,17 +30410,17 @@ func (s RevisionIdRequiredException) GoString() string { func newErrorRevisionIdRequiredException(v protocol.ResponseMetadata) error { return &RevisionIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RevisionIdRequiredException) Code() string { +func (s *RevisionIdRequiredException) Code() string { return "RevisionIdRequiredException" } // Message returns the exception's message. -func (s RevisionIdRequiredException) Message() string { +func (s *RevisionIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29624,29 +30428,29 @@ func (s RevisionIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RevisionIdRequiredException) OrigErr() error { +func (s *RevisionIdRequiredException) OrigErr() error { return nil } -func (s RevisionIdRequiredException) Error() string { +func (s *RevisionIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RevisionIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RevisionIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RevisionIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RevisionIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The revision ID provided in the request does not match the current revision // ID. Use GetPullRequest to retrieve the current revision ID. type RevisionNotCurrentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29663,17 +30467,17 @@ func (s RevisionNotCurrentException) GoString() string { func newErrorRevisionNotCurrentException(v protocol.ResponseMetadata) error { return &RevisionNotCurrentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RevisionNotCurrentException) Code() string { +func (s *RevisionNotCurrentException) Code() string { return "RevisionNotCurrentException" } // Message returns the exception's message. -func (s RevisionNotCurrentException) Message() string { +func (s *RevisionNotCurrentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29681,30 +30485,30 @@ func (s RevisionNotCurrentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RevisionNotCurrentException) OrigErr() error { +func (s *RevisionNotCurrentException) OrigErr() error { return nil } -func (s RevisionNotCurrentException) Error() string { +func (s *RevisionNotCurrentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RevisionNotCurrentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RevisionNotCurrentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RevisionNotCurrentException) RequestID() string { - return s.respMetadata.RequestID +func (s *RevisionNotCurrentException) RequestID() string { + return s.RespMetadata.RequestID } // The file was not added or updated because the content of the file is exactly // the same as the content of that file in the repository and branch that you // specified. type SameFileContentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29721,17 +30525,17 @@ func (s SameFileContentException) GoString() string { func newErrorSameFileContentException(v protocol.ResponseMetadata) error { return &SameFileContentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SameFileContentException) Code() string { +func (s *SameFileContentException) Code() string { return "SameFileContentException" } // Message returns the exception's message. -func (s SameFileContentException) Message() string { +func (s *SameFileContentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29739,22 +30543,22 @@ func (s SameFileContentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SameFileContentException) OrigErr() error { +func (s *SameFileContentException) OrigErr() error { return nil } -func (s SameFileContentException) Error() string { +func (s *SameFileContentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SameFileContentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SameFileContentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SameFileContentException) RequestID() string { - return s.respMetadata.RequestID +func (s *SameFileContentException) RequestID() string { + return s.RespMetadata.RequestID } // The commit cannot be created because one or more changes in this commit duplicate @@ -29762,8 +30566,8 @@ func (s SameFileContentException) RequestID() string { // request to the same file in the same file path twice, or make a delete request // and a move request to the same file as part of the same commit. type SamePathRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29780,17 +30584,17 @@ func (s SamePathRequestException) GoString() string { func newErrorSamePathRequestException(v protocol.ResponseMetadata) error { return &SamePathRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SamePathRequestException) Code() string { +func (s *SamePathRequestException) Code() string { return "SamePathRequestException" } // Message returns the exception's message. -func (s SamePathRequestException) Message() string { +func (s *SamePathRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29798,22 +30602,22 @@ func (s SamePathRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SamePathRequestException) OrigErr() error { +func (s *SamePathRequestException) OrigErr() error { return nil } -func (s SamePathRequestException) Error() string { +func (s *SamePathRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SamePathRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SamePathRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SamePathRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *SamePathRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the file mode changes. @@ -29872,8 +30676,8 @@ func (s *SetFileModeEntry) SetFilePath(v string) *SetFileModeEntry { // The source branch and destination branch for the pull request are the same. // You must specify different branches for the source and destination. type SourceAndDestinationAreSameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29890,17 +30694,17 @@ func (s SourceAndDestinationAreSameException) GoString() string { func newErrorSourceAndDestinationAreSameException(v protocol.ResponseMetadata) error { return &SourceAndDestinationAreSameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SourceAndDestinationAreSameException) Code() string { +func (s *SourceAndDestinationAreSameException) Code() string { return "SourceAndDestinationAreSameException" } // Message returns the exception's message. -func (s SourceAndDestinationAreSameException) Message() string { +func (s *SourceAndDestinationAreSameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29908,29 +30712,29 @@ func (s SourceAndDestinationAreSameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SourceAndDestinationAreSameException) OrigErr() error { +func (s *SourceAndDestinationAreSameException) OrigErr() error { return nil } -func (s SourceAndDestinationAreSameException) Error() string { +func (s *SourceAndDestinationAreSameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SourceAndDestinationAreSameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SourceAndDestinationAreSameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SourceAndDestinationAreSameException) RequestID() string { - return s.respMetadata.RequestID +func (s *SourceAndDestinationAreSameException) RequestID() string { + return s.RespMetadata.RequestID } // The commit cannot be created because no source files or file content have // been specified for the commit. type SourceFileOrContentRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29947,17 +30751,17 @@ func (s SourceFileOrContentRequiredException) GoString() string { func newErrorSourceFileOrContentRequiredException(v protocol.ResponseMetadata) error { return &SourceFileOrContentRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SourceFileOrContentRequiredException) Code() string { +func (s *SourceFileOrContentRequiredException) Code() string { return "SourceFileOrContentRequiredException" } // Message returns the exception's message. -func (s SourceFileOrContentRequiredException) Message() string { +func (s *SourceFileOrContentRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29965,22 +30769,22 @@ func (s SourceFileOrContentRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SourceFileOrContentRequiredException) OrigErr() error { +func (s *SourceFileOrContentRequiredException) OrigErr() error { return nil } -func (s SourceFileOrContentRequiredException) Error() string { +func (s *SourceFileOrContentRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SourceFileOrContentRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SourceFileOrContentRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SourceFileOrContentRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *SourceFileOrContentRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a source file that is part of changes made in a commit. @@ -30128,8 +30932,8 @@ func (s *SymbolicLink) SetRelativePath(v string) *SymbolicLink { // A list of tag keys is required. The list cannot be empty or null. type TagKeysListRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30146,17 +30950,17 @@ func (s TagKeysListRequiredException) GoString() string { func newErrorTagKeysListRequiredException(v protocol.ResponseMetadata) error { return &TagKeysListRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagKeysListRequiredException) Code() string { +func (s *TagKeysListRequiredException) Code() string { return "TagKeysListRequiredException" } // Message returns the exception's message. -func (s TagKeysListRequiredException) Message() string { +func (s *TagKeysListRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30164,28 +30968,28 @@ func (s TagKeysListRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagKeysListRequiredException) OrigErr() error { +func (s *TagKeysListRequiredException) OrigErr() error { return nil } -func (s TagKeysListRequiredException) Error() string { +func (s *TagKeysListRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagKeysListRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagKeysListRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagKeysListRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagKeysListRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The tag policy is not valid. type TagPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30202,17 +31006,17 @@ func (s TagPolicyException) GoString() string { func newErrorTagPolicyException(v protocol.ResponseMetadata) error { return &TagPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagPolicyException) Code() string { +func (s *TagPolicyException) Code() string { return "TagPolicyException" } // Message returns the exception's message. -func (s TagPolicyException) Message() string { +func (s *TagPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30220,22 +31024,22 @@ func (s TagPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagPolicyException) OrigErr() error { +func (s *TagPolicyException) OrigErr() error { return nil } -func (s TagPolicyException) Error() string { +func (s *TagPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagPolicyException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -30307,8 +31111,8 @@ func (s TagResourceOutput) GoString() string { // A map of tags is required. type TagsMapRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30325,17 +31129,17 @@ func (s TagsMapRequiredException) GoString() string { func newErrorTagsMapRequiredException(v protocol.ResponseMetadata) error { return &TagsMapRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagsMapRequiredException) Code() string { +func (s *TagsMapRequiredException) Code() string { return "TagsMapRequiredException" } // Message returns the exception's message. -func (s TagsMapRequiredException) Message() string { +func (s *TagsMapRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30343,22 +31147,22 @@ func (s TagsMapRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagsMapRequiredException) OrigErr() error { +func (s *TagsMapRequiredException) OrigErr() error { return nil } -func (s TagsMapRequiredException) Error() string { +func (s *TagsMapRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagsMapRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagsMapRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagsMapRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagsMapRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about a target for a pull request. @@ -30432,8 +31236,8 @@ func (s *Target) SetSourceReference(v string) *Target { // target must contain the full values for the repository name, source branch, // and destination branch for the pull request. type TargetRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30450,17 +31254,17 @@ func (s TargetRequiredException) GoString() string { func newErrorTargetRequiredException(v protocol.ResponseMetadata) error { return &TargetRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TargetRequiredException) Code() string { +func (s *TargetRequiredException) Code() string { return "TargetRequiredException" } // Message returns the exception's message. -func (s TargetRequiredException) Message() string { +func (s *TargetRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30468,28 +31272,28 @@ func (s TargetRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TargetRequiredException) OrigErr() error { +func (s *TargetRequiredException) OrigErr() error { return nil } -func (s TargetRequiredException) Error() string { +func (s *TargetRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TargetRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TargetRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TargetRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *TargetRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // An array of target objects is required. It cannot be empty or null. type TargetsRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30506,17 +31310,17 @@ func (s TargetsRequiredException) GoString() string { func newErrorTargetsRequiredException(v protocol.ResponseMetadata) error { return &TargetsRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TargetsRequiredException) Code() string { +func (s *TargetsRequiredException) Code() string { return "TargetsRequiredException" } // Message returns the exception's message. -func (s TargetsRequiredException) Message() string { +func (s *TargetsRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30524,22 +31328,22 @@ func (s TargetsRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TargetsRequiredException) OrigErr() error { +func (s *TargetsRequiredException) OrigErr() error { return nil } -func (s TargetsRequiredException) Error() string { +func (s *TargetsRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TargetsRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TargetsRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TargetsRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *TargetsRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a test repository triggers operation. @@ -30647,8 +31451,8 @@ func (s *TestRepositoryTriggersOutput) SetSuccessfulExecutions(v []*string) *Tes // the tip of the source branch specified in your request. The pull request // might have been updated. Make sure that you have the latest changes. type TipOfSourceReferenceIsDifferentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30665,17 +31469,17 @@ func (s TipOfSourceReferenceIsDifferentException) GoString() string { func newErrorTipOfSourceReferenceIsDifferentException(v protocol.ResponseMetadata) error { return &TipOfSourceReferenceIsDifferentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TipOfSourceReferenceIsDifferentException) Code() string { +func (s *TipOfSourceReferenceIsDifferentException) Code() string { return "TipOfSourceReferenceIsDifferentException" } // Message returns the exception's message. -func (s TipOfSourceReferenceIsDifferentException) Message() string { +func (s *TipOfSourceReferenceIsDifferentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30683,30 +31487,30 @@ func (s TipOfSourceReferenceIsDifferentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TipOfSourceReferenceIsDifferentException) OrigErr() error { +func (s *TipOfSourceReferenceIsDifferentException) OrigErr() error { return nil } -func (s TipOfSourceReferenceIsDifferentException) Error() string { +func (s *TipOfSourceReferenceIsDifferentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TipOfSourceReferenceIsDifferentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TipOfSourceReferenceIsDifferentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TipOfSourceReferenceIsDifferentException) RequestID() string { - return s.respMetadata.RequestID +func (s *TipOfSourceReferenceIsDifferentException) RequestID() string { + return s.RespMetadata.RequestID } // The divergence between the tips of the provided commit specifiers is too // great to determine whether there might be any merge conflicts. Locally compare // the specifiers using git diff or a diff tool. type TipsDivergenceExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30723,17 +31527,17 @@ func (s TipsDivergenceExceededException) GoString() string { func newErrorTipsDivergenceExceededException(v protocol.ResponseMetadata) error { return &TipsDivergenceExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TipsDivergenceExceededException) Code() string { +func (s *TipsDivergenceExceededException) Code() string { return "TipsDivergenceExceededException" } // Message returns the exception's message. -func (s TipsDivergenceExceededException) Message() string { +func (s *TipsDivergenceExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30741,28 +31545,28 @@ func (s TipsDivergenceExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TipsDivergenceExceededException) OrigErr() error { +func (s *TipsDivergenceExceededException) OrigErr() error { return nil } -func (s TipsDivergenceExceededException) Error() string { +func (s *TipsDivergenceExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TipsDivergenceExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TipsDivergenceExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TipsDivergenceExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TipsDivergenceExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A pull request title is required. It cannot be empty or null. type TitleRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30779,17 +31583,17 @@ func (s TitleRequiredException) GoString() string { func newErrorTitleRequiredException(v protocol.ResponseMetadata) error { return &TitleRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TitleRequiredException) Code() string { +func (s *TitleRequiredException) Code() string { return "TitleRequiredException" } // Message returns the exception's message. -func (s TitleRequiredException) Message() string { +func (s *TitleRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30797,28 +31601,28 @@ func (s TitleRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TitleRequiredException) OrigErr() error { +func (s *TitleRequiredException) OrigErr() error { return nil } -func (s TitleRequiredException) Error() string { +func (s *TitleRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TitleRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TitleRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TitleRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *TitleRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of tags for an AWS CodeCommit resource has been exceeded. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30835,17 +31639,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30853,22 +31657,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -31378,7 +32182,7 @@ type UpdatePullRequestApprovalRuleContentInput struct { // Amazon Resource Name (ARN) of the IAM user or role. // // For more information about IAM ARNs, wildcards, and formats, see IAM Identifiers - // (https://docs.aws.amazon.com/iam/latest/UserGuide/reference_identifiers.html) + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in the IAM User Guide. // // NewRuleContent is a required field @@ -31977,6 +32781,14 @@ const ( ApprovalStateRevoke = "REVOKE" ) +// ApprovalState_Values returns all elements of the ApprovalState enum +func ApprovalState_Values() []string { + return []string{ + ApprovalStateApprove, + ApprovalStateRevoke, + } +} + const ( // ChangeTypeEnumA is a ChangeTypeEnum enum value ChangeTypeEnumA = "A" @@ -31988,6 +32800,15 @@ const ( ChangeTypeEnumD = "D" ) +// ChangeTypeEnum_Values returns all elements of the ChangeTypeEnum enum +func ChangeTypeEnum_Values() []string { + return []string{ + ChangeTypeEnumA, + ChangeTypeEnumM, + ChangeTypeEnumD, + } +} + const ( // ConflictDetailLevelTypeEnumFileLevel is a ConflictDetailLevelTypeEnum enum value ConflictDetailLevelTypeEnumFileLevel = "FILE_LEVEL" @@ -31996,6 +32817,14 @@ const ( ConflictDetailLevelTypeEnumLineLevel = "LINE_LEVEL" ) +// ConflictDetailLevelTypeEnum_Values returns all elements of the ConflictDetailLevelTypeEnum enum +func ConflictDetailLevelTypeEnum_Values() []string { + return []string{ + ConflictDetailLevelTypeEnumFileLevel, + ConflictDetailLevelTypeEnumLineLevel, + } +} + const ( // ConflictResolutionStrategyTypeEnumNone is a ConflictResolutionStrategyTypeEnum enum value ConflictResolutionStrategyTypeEnumNone = "NONE" @@ -32010,6 +32839,16 @@ const ( ConflictResolutionStrategyTypeEnumAutomerge = "AUTOMERGE" ) +// ConflictResolutionStrategyTypeEnum_Values returns all elements of the ConflictResolutionStrategyTypeEnum enum +func ConflictResolutionStrategyTypeEnum_Values() []string { + return []string{ + ConflictResolutionStrategyTypeEnumNone, + ConflictResolutionStrategyTypeEnumAcceptSource, + ConflictResolutionStrategyTypeEnumAcceptDestination, + ConflictResolutionStrategyTypeEnumAutomerge, + } +} + const ( // FileModeTypeEnumExecutable is a FileModeTypeEnum enum value FileModeTypeEnumExecutable = "EXECUTABLE" @@ -32021,6 +32860,15 @@ const ( FileModeTypeEnumSymlink = "SYMLINK" ) +// FileModeTypeEnum_Values returns all elements of the FileModeTypeEnum enum +func FileModeTypeEnum_Values() []string { + return []string{ + FileModeTypeEnumExecutable, + FileModeTypeEnumNormal, + FileModeTypeEnumSymlink, + } +} + const ( // MergeOptionTypeEnumFastForwardMerge is a MergeOptionTypeEnum enum value MergeOptionTypeEnumFastForwardMerge = "FAST_FORWARD_MERGE" @@ -32032,6 +32880,15 @@ const ( MergeOptionTypeEnumThreeWayMerge = "THREE_WAY_MERGE" ) +// MergeOptionTypeEnum_Values returns all elements of the MergeOptionTypeEnum enum +func MergeOptionTypeEnum_Values() []string { + return []string{ + MergeOptionTypeEnumFastForwardMerge, + MergeOptionTypeEnumSquashMerge, + MergeOptionTypeEnumThreeWayMerge, + } +} + const ( // ObjectTypeEnumFile is a ObjectTypeEnum enum value ObjectTypeEnumFile = "FILE" @@ -32046,6 +32903,16 @@ const ( ObjectTypeEnumSymbolicLink = "SYMBOLIC_LINK" ) +// ObjectTypeEnum_Values returns all elements of the ObjectTypeEnum enum +func ObjectTypeEnum_Values() []string { + return []string{ + ObjectTypeEnumFile, + ObjectTypeEnumDirectory, + ObjectTypeEnumGitLink, + ObjectTypeEnumSymbolicLink, + } +} + const ( // OrderEnumAscending is a OrderEnum enum value OrderEnumAscending = "ascending" @@ -32054,6 +32921,14 @@ const ( OrderEnumDescending = "descending" ) +// OrderEnum_Values returns all elements of the OrderEnum enum +func OrderEnum_Values() []string { + return []string{ + OrderEnumAscending, + OrderEnumDescending, + } +} + const ( // OverrideStatusOverride is a OverrideStatus enum value OverrideStatusOverride = "OVERRIDE" @@ -32062,6 +32937,14 @@ const ( OverrideStatusRevoke = "REVOKE" ) +// OverrideStatus_Values returns all elements of the OverrideStatus enum +func OverrideStatus_Values() []string { + return []string{ + OverrideStatusOverride, + OverrideStatusRevoke, + } +} + const ( // PullRequestEventTypePullRequestCreated is a PullRequestEventType enum value PullRequestEventTypePullRequestCreated = "PULL_REQUEST_CREATED" @@ -32091,6 +32974,21 @@ const ( PullRequestEventTypePullRequestApprovalStateChanged = "PULL_REQUEST_APPROVAL_STATE_CHANGED" ) +// PullRequestEventType_Values returns all elements of the PullRequestEventType enum +func PullRequestEventType_Values() []string { + return []string{ + PullRequestEventTypePullRequestCreated, + PullRequestEventTypePullRequestStatusChanged, + PullRequestEventTypePullRequestSourceReferenceUpdated, + PullRequestEventTypePullRequestMergeStateChanged, + PullRequestEventTypePullRequestApprovalRuleCreated, + PullRequestEventTypePullRequestApprovalRuleUpdated, + PullRequestEventTypePullRequestApprovalRuleDeleted, + PullRequestEventTypePullRequestApprovalRuleOverridden, + PullRequestEventTypePullRequestApprovalStateChanged, + } +} + const ( // PullRequestStatusEnumOpen is a PullRequestStatusEnum enum value PullRequestStatusEnumOpen = "OPEN" @@ -32099,6 +32997,14 @@ const ( PullRequestStatusEnumClosed = "CLOSED" ) +// PullRequestStatusEnum_Values returns all elements of the PullRequestStatusEnum enum +func PullRequestStatusEnum_Values() []string { + return []string{ + PullRequestStatusEnumOpen, + PullRequestStatusEnumClosed, + } +} + const ( // RelativeFileVersionEnumBefore is a RelativeFileVersionEnum enum value RelativeFileVersionEnumBefore = "BEFORE" @@ -32107,6 +33013,14 @@ const ( RelativeFileVersionEnumAfter = "AFTER" ) +// RelativeFileVersionEnum_Values returns all elements of the RelativeFileVersionEnum enum +func RelativeFileVersionEnum_Values() []string { + return []string{ + RelativeFileVersionEnumBefore, + RelativeFileVersionEnumAfter, + } +} + const ( // ReplacementTypeEnumKeepBase is a ReplacementTypeEnum enum value ReplacementTypeEnumKeepBase = "KEEP_BASE" @@ -32121,6 +33035,16 @@ const ( ReplacementTypeEnumUseNewContent = "USE_NEW_CONTENT" ) +// ReplacementTypeEnum_Values returns all elements of the ReplacementTypeEnum enum +func ReplacementTypeEnum_Values() []string { + return []string{ + ReplacementTypeEnumKeepBase, + ReplacementTypeEnumKeepSource, + ReplacementTypeEnumKeepDestination, + ReplacementTypeEnumUseNewContent, + } +} + const ( // RepositoryTriggerEventEnumAll is a RepositoryTriggerEventEnum enum value RepositoryTriggerEventEnumAll = "all" @@ -32135,6 +33059,16 @@ const ( RepositoryTriggerEventEnumDeleteReference = "deleteReference" ) +// RepositoryTriggerEventEnum_Values returns all elements of the RepositoryTriggerEventEnum enum +func RepositoryTriggerEventEnum_Values() []string { + return []string{ + RepositoryTriggerEventEnumAll, + RepositoryTriggerEventEnumUpdateReference, + RepositoryTriggerEventEnumCreateReference, + RepositoryTriggerEventEnumDeleteReference, + } +} + const ( // SortByEnumRepositoryName is a SortByEnum enum value SortByEnumRepositoryName = "repositoryName" @@ -32142,3 +33076,11 @@ const ( // SortByEnumLastModifiedDate is a SortByEnum enum value SortByEnumLastModifiedDate = "lastModifiedDate" ) + +// SortByEnum_Values returns all elements of the SortByEnum enum +func SortByEnum_Values() []string { + return []string{ + SortByEnumRepositoryName, + SortByEnumLastModifiedDate, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go index 2025f901b..87186d5ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/doc.go @@ -224,6 +224,9 @@ // // * GetComment, which returns information about a comment on a commit. // +// * GetCommentReactions, which returns information about emoji reactions +// to comments. +// // * GetCommentsForComparedCommit, which returns information about comments // on the comparison between two commit specifiers in a repository. // @@ -232,6 +235,9 @@ // // * PostCommentReply, which creates a reply to a comment. // +// * PutCommentReaction, which creates or updates an emoji reaction to a +// comment. +// // * UpdateComment, which updates the content of a comment on a commit in // a repository. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go index 030dd1c47..44aa82763 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/errors.go @@ -117,7 +117,8 @@ const ( // ErrCodeBranchNameExistsException for service response error code // "BranchNameExistsException". // - // The specified branch name already exists. + // Cannot create the branch with the specified name because the commit conflicts + // with an existing branch with the same name. Branch names must be unique. ErrCodeBranchNameExistsException = "BranchNameExistsException" // ErrCodeBranchNameIsTagNameException for service response error code @@ -636,6 +637,19 @@ const ( // OPEN to CLOSED. ErrCodeInvalidPullRequestStatusUpdateException = "InvalidPullRequestStatusUpdateException" + // ErrCodeInvalidReactionUserArnException for service response error code + // "InvalidReactionUserArnException". + // + // The Amazon Resource Name (ARN) of the user or identity is not valid. + ErrCodeInvalidReactionUserArnException = "InvalidReactionUserArnException" + + // ErrCodeInvalidReactionValueException for service response error code + // "InvalidReactionValueException". + // + // The value of the reaction is not valid. For more information, see the AWS + // CodeCommit User Guide (https://docs.aws.amazon.com/codecommit/latest/userguide/welcome.html). + ErrCodeInvalidReactionValueException = "InvalidReactionValueException" + // ErrCodeInvalidReferenceNameException for service response error code // "InvalidReferenceNameException". // @@ -1023,6 +1037,19 @@ const ( // reference both a file and a folder. ErrCodePutFileEntryConflictException = "PutFileEntryConflictException" + // ErrCodeReactionLimitExceededException for service response error code + // "ReactionLimitExceededException". + // + // The number of reactions has been exceeded. Reactions are limited to one reaction + // per user for each individual comment ID. + ErrCodeReactionLimitExceededException = "ReactionLimitExceededException" + + // ErrCodeReactionValueRequiredException for service response error code + // "ReactionValueRequiredException". + // + // A reaction value is required. + ErrCodeReactionValueRequiredException = "ReactionValueRequiredException" + // ErrCodeReferenceDoesNotExistException for service response error code // "ReferenceDoesNotExistException". // @@ -1337,6 +1364,8 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidPullRequestIdException": newErrorInvalidPullRequestIdException, "InvalidPullRequestStatusException": newErrorInvalidPullRequestStatusException, "InvalidPullRequestStatusUpdateException": newErrorInvalidPullRequestStatusUpdateException, + "InvalidReactionUserArnException": newErrorInvalidReactionUserArnException, + "InvalidReactionValueException": newErrorInvalidReactionValueException, "InvalidReferenceNameException": newErrorInvalidReferenceNameException, "InvalidRelativeFileVersionEnumException": newErrorInvalidRelativeFileVersionEnumException, "InvalidReplacementContentException": newErrorInvalidReplacementContentException, @@ -1393,6 +1422,8 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "PullRequestIdRequiredException": newErrorPullRequestIdRequiredException, "PullRequestStatusRequiredException": newErrorPullRequestStatusRequiredException, "PutFileEntryConflictException": newErrorPutFileEntryConflictException, + "ReactionLimitExceededException": newErrorReactionLimitExceededException, + "ReactionValueRequiredException": newErrorReactionValueRequiredException, "ReferenceDoesNotExistException": newErrorReferenceDoesNotExistException, "ReferenceNameRequiredException": newErrorReferenceNameRequiredException, "ReferenceTypeNotSupportedException": newErrorReferenceTypeNotSupportedException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go index b7d90534a..2cb50a485 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go index 16e57401d..9cbd7b88d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go @@ -252,7 +252,7 @@ func (c *CodeDeploy) BatchGetApplicationsRequest(input *BatchGetApplicationsInpu // BatchGetApplications API operation for AWS CodeDeploy. // // Gets information about one or more applications. The maximum number of applications -// that can be returned is 25. +// that can be returned is 100. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -477,7 +477,8 @@ func (c *CodeDeploy) BatchGetDeploymentInstancesRequest(input *BatchGetDeploymen // The maximum number of names or IDs allowed for this request (100) was exceeded. // // * InvalidComputePlatformException -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/BatchGetDeploymentInstances // @@ -554,7 +555,8 @@ func (c *CodeDeploy) BatchGetDeploymentTargetsRequest(input *BatchGetDeploymentT // BatchGetDeploymentInstances. The maximum number of targets that can be returned // is 25. // -// The type of targets returned depends on the deployment's compute platform: +// The type of targets returned depends on the deployment's compute platform +// or deployment method: // // * EC2/On-premises: Information about EC2 instance targets. // @@ -562,6 +564,9 @@ func (c *CodeDeploy) BatchGetDeploymentTargetsRequest(input *BatchGetDeploymentT // // * Amazon ECS: Information about Amazon ECS service targets. // +// * CloudFormation: Information about targets of blue/green deployments +// initiated by a CloudFormation stack update. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -597,6 +602,9 @@ func (c *CodeDeploy) BatchGetDeploymentTargetsRequest(input *BatchGetDeploymentT // must have exactly one item. This exception does not apply to EC2/On-premises // deployments. // +// * InstanceDoesNotExistException +// The specified instance does not exist in the deployment group. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/BatchGetDeploymentTargets func (c *CodeDeploy) BatchGetDeploymentTargets(input *BatchGetDeploymentTargetsInput) (*BatchGetDeploymentTargetsOutput, error) { req, out := c.BatchGetDeploymentTargetsRequest(input) @@ -965,7 +973,8 @@ func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (re // More applications were attempted to be created than are allowed. // // * InvalidComputePlatformException -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. // // * InvalidTagsToAddException // The specified tags are not valid. @@ -1133,6 +1142,10 @@ func (c *CodeDeploy) CreateDeploymentRequest(input *CreateDeploymentInput) (req // * InvalidGitHubAccountTokenException // The GitHub token is not valid. // +// * InvalidTrafficRoutingConfigurationException +// The configuration that specifies how traffic is routed during a deployment +// is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeployment func (c *CodeDeploy) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploymentOutput, error) { req, out := c.CreateDeploymentRequest(input) @@ -1217,7 +1230,7 @@ func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfig // // * DeploymentConfigAlreadyExistsException // A deployment configuration with the specified name with the IAM user or AWS -// account already exists . +// account already exists. // // * InvalidMinimumHealthyHostValueException // The minimum healthy instance value was specified in an invalid format. @@ -1226,7 +1239,8 @@ func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfig // The deployment configurations limit was exceeded. // // * InvalidComputePlatformException -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. // // * InvalidTrafficRoutingConfigurationException // The configuration that specifies how traffic is routed during a deployment @@ -1427,6 +1441,10 @@ func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupIn // * InvalidTagsToAddException // The specified tags are not valid. // +// * InvalidTrafficRoutingConfigurationException +// The configuration that specifies how traffic is routed during a deployment +// is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/CreateDeploymentGroup func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (*CreateDeploymentGroupOutput, error) { req, out := c.CreateDeploymentGroupRequest(input) @@ -1813,6 +1831,81 @@ func (c *CodeDeploy) DeleteGitHubAccountTokenWithContext(ctx aws.Context, input return out, req.Send() } +const opDeleteResourcesByExternalId = "DeleteResourcesByExternalId" + +// DeleteResourcesByExternalIdRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResourcesByExternalId operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResourcesByExternalId for more information on using the DeleteResourcesByExternalId +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteResourcesByExternalIdRequest method. +// req, resp := client.DeleteResourcesByExternalIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteResourcesByExternalId +func (c *CodeDeploy) DeleteResourcesByExternalIdRequest(input *DeleteResourcesByExternalIdInput) (req *request.Request, output *DeleteResourcesByExternalIdOutput) { + op := &request.Operation{ + Name: opDeleteResourcesByExternalId, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResourcesByExternalIdInput{} + } + + output = &DeleteResourcesByExternalIdOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteResourcesByExternalId API operation for AWS CodeDeploy. +// +// Deletes resources linked to an external ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS CodeDeploy's +// API operation DeleteResourcesByExternalId for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/DeleteResourcesByExternalId +func (c *CodeDeploy) DeleteResourcesByExternalId(input *DeleteResourcesByExternalIdInput) (*DeleteResourcesByExternalIdOutput, error) { + req, out := c.DeleteResourcesByExternalIdRequest(input) + return out, req.Send() +} + +// DeleteResourcesByExternalIdWithContext is the same as DeleteResourcesByExternalId with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResourcesByExternalId for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *CodeDeploy) DeleteResourcesByExternalIdWithContext(ctx aws.Context, input *DeleteResourcesByExternalIdInput, opts ...request.Option) (*DeleteResourcesByExternalIdOutput, error) { + req, out := c.DeleteResourcesByExternalIdRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" // DeregisterOnPremisesInstanceRequest generates a "aws/request.Request" representing the @@ -2229,7 +2322,8 @@ func (c *CodeDeploy) GetDeploymentConfigRequest(input *GetDeploymentConfigInput) // The deployment configuration does not exist with the IAM user or AWS account. // // * InvalidComputePlatformException -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/GetDeploymentConfig func (c *CodeDeploy) GetDeploymentConfig(input *GetDeploymentConfigInput) (*GetDeploymentConfigOutput, error) { @@ -2428,7 +2522,8 @@ func (c *CodeDeploy) GetDeploymentInstanceRequest(input *GetDeploymentInstanceIn // The on-premises instance name was specified in an invalid format. // // * InvalidComputePlatformException -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/GetDeploymentInstance // @@ -3328,7 +3423,8 @@ func (c *CodeDeploy) ListDeploymentInstancesRequest(input *ListDeploymentInstanc // The target filter name is invalid. // // * InvalidComputePlatformException -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. // // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/ListDeploymentInstances // @@ -3603,6 +3699,12 @@ func (c *CodeDeploy) ListDeploymentsRequest(input *ListDeploymentsInput) (req *r // * InvalidNextTokenException // The next token was specified in an invalid format. // +// * InvalidExternalIdException +// The external ID was specified in an invalid format. +// +// * InvalidInputException +// The input was specified in an invalid format. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/ListDeployments func (c *CodeDeploy) ListDeployments(input *ListDeploymentsInput) (*ListDeploymentsOutput, error) { req, out := c.ListDeploymentsRequest(input) @@ -3895,8 +3997,9 @@ func (c *CodeDeploy) ListTagsForResourceRequest(input *ListTagsForResourceInput) // ListTagsForResource API operation for AWS CodeDeploy. // -// Returns a list of tags for the resource identified by a specified ARN. Tags -// are used to organize and categorize your CodeDeploy resources. +// Returns a list of tags for the resource identified by a specified Amazon +// Resource Name (ARN). Tags are used to organize and categorize your CodeDeploy +// resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3982,9 +4085,15 @@ func (c *CodeDeploy) PutLifecycleEventHookExecutionStatusRequest(input *PutLifec // PutLifecycleEventHookExecutionStatus API operation for AWS CodeDeploy. // -// Sets the result of a Lambda validation function. The function validates one -// or both lifecycle events (BeforeAllowTraffic and AfterAllowTraffic) and returns -// Succeeded or Failed. +// Sets the result of a Lambda validation function. The function validates lifecycle +// hooks during a deployment that uses the AWS Lambda or Amazon ECS compute +// platform. For AWS Lambda deployments, the available lifecycle hooks are BeforeAllowTraffic +// and AfterAllowTraffic. For Amazon ECS deployments, the available lifecycle +// hooks are BeforeInstall, AfterInstall, AfterAllowTestTraffic, BeforeAllowTraffic, +// and AfterAllowTraffic. Lambda validation functions return Succeeded or Failed. +// For more information, see AppSpec 'hooks' Section for an AWS Lambda Deployment +// (https://docs.aws.amazon.com/codedeploy/latest/userguide/reference-appspec-file-structure-hooks.html#appspec-hooks-lambda) +// and AppSpec 'hooks' Section for an Amazon ECS Deployment (https://docs.aws.amazon.com/codedeploy/latest/userguide/reference-appspec-file-structure-hooks.html#appspec-hooks-ecs). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4520,6 +4629,9 @@ func (c *CodeDeploy) StopDeploymentRequest(input *StopDeploymentInput) (req *req // * InvalidDeploymentIdException // At least one of the deployment IDs was specified in an invalid format. // +// * UnsupportedActionForDeploymentTypeException +// A call was submitted that is not supported for the specified deployment type. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/StopDeployment func (c *CodeDeploy) StopDeployment(input *StopDeploymentInput) (*StopDeploymentOutput, error) { req, out := c.StopDeploymentRequest(input) @@ -4691,7 +4803,7 @@ func (c *CodeDeploy) UntagResourceRequest(input *UntagResourceInput) (req *reque // UntagResource API operation for AWS CodeDeploy. // // Disassociates a resource from a list of tags. The resource is identified -// by the ResourceArn input parameter. The tags are identfied by the list of +// by the ResourceArn input parameter. The tags are identified by the list of // keys in the TagKeys input parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5006,6 +5118,10 @@ func (c *CodeDeploy) UpdateDeploymentGroupRequest(input *UpdateDeploymentGroupIn // The Amazon ECS service is associated with more than one deployment groups. // An Amazon ECS service can be associated with only one deployment group. // +// * InvalidTrafficRoutingConfigurationException +// The configuration that specifies how traffic is routed during a deployment +// is invalid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/codedeploy-2014-10-06/UpdateDeploymentGroup func (c *CodeDeploy) UpdateDeploymentGroup(input *UpdateDeploymentGroupInput) (*UpdateDeploymentGroupOutput, error) { req, out := c.UpdateDeploymentGroupRequest(input) @@ -5176,8 +5292,8 @@ func (s *AlarmConfiguration) SetIgnorePollAlarmFailure(v bool) *AlarmConfigurati // The maximum number of alarms for a deployment group (10) was exceeded. type AlarmsLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5194,17 +5310,17 @@ func (s AlarmsLimitExceededException) GoString() string { func newErrorAlarmsLimitExceededException(v protocol.ResponseMetadata) error { return &AlarmsLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlarmsLimitExceededException) Code() string { +func (s *AlarmsLimitExceededException) Code() string { return "AlarmsLimitExceededException" } // Message returns the exception's message. -func (s AlarmsLimitExceededException) Message() string { +func (s *AlarmsLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5212,22 +5328,22 @@ func (s AlarmsLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlarmsLimitExceededException) OrigErr() error { +func (s *AlarmsLimitExceededException) OrigErr() error { return nil } -func (s AlarmsLimitExceededException) Error() string { +func (s *AlarmsLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlarmsLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlarmsLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlarmsLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlarmsLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A revision for an AWS Lambda or Amazon ECS deployment that is a YAML-formatted @@ -5280,8 +5396,8 @@ func (s *AppSpecContent) SetSha256(v string) *AppSpecContent { // An application with the specified name with the IAM user or AWS account already // exists. type ApplicationAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5298,17 +5414,17 @@ func (s ApplicationAlreadyExistsException) GoString() string { func newErrorApplicationAlreadyExistsException(v protocol.ResponseMetadata) error { return &ApplicationAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApplicationAlreadyExistsException) Code() string { +func (s *ApplicationAlreadyExistsException) Code() string { return "ApplicationAlreadyExistsException" } // Message returns the exception's message. -func (s ApplicationAlreadyExistsException) Message() string { +func (s *ApplicationAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5316,28 +5432,28 @@ func (s ApplicationAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApplicationAlreadyExistsException) OrigErr() error { +func (s *ApplicationAlreadyExistsException) OrigErr() error { return nil } -func (s ApplicationAlreadyExistsException) Error() string { +func (s *ApplicationAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApplicationAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApplicationAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApplicationAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApplicationAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The application does not exist with the IAM user or AWS account. type ApplicationDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5354,17 +5470,17 @@ func (s ApplicationDoesNotExistException) GoString() string { func newErrorApplicationDoesNotExistException(v protocol.ResponseMetadata) error { return &ApplicationDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApplicationDoesNotExistException) Code() string { +func (s *ApplicationDoesNotExistException) Code() string { return "ApplicationDoesNotExistException" } // Message returns the exception's message. -func (s ApplicationDoesNotExistException) Message() string { +func (s *ApplicationDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5372,22 +5488,22 @@ func (s ApplicationDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApplicationDoesNotExistException) OrigErr() error { +func (s *ApplicationDoesNotExistException) OrigErr() error { return nil } -func (s ApplicationDoesNotExistException) Error() string { +func (s *ApplicationDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApplicationDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApplicationDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApplicationDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApplicationDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Information about an application. @@ -5463,8 +5579,8 @@ func (s *ApplicationInfo) SetLinkedToGitHub(v bool) *ApplicationInfo { // More applications were attempted to be created than are allowed. type ApplicationLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5481,17 +5597,17 @@ func (s ApplicationLimitExceededException) GoString() string { func newErrorApplicationLimitExceededException(v protocol.ResponseMetadata) error { return &ApplicationLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApplicationLimitExceededException) Code() string { +func (s *ApplicationLimitExceededException) Code() string { return "ApplicationLimitExceededException" } // Message returns the exception's message. -func (s ApplicationLimitExceededException) Message() string { +func (s *ApplicationLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5499,28 +5615,28 @@ func (s ApplicationLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApplicationLimitExceededException) OrigErr() error { +func (s *ApplicationLimitExceededException) OrigErr() error { return nil } -func (s ApplicationLimitExceededException) Error() string { +func (s *ApplicationLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApplicationLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApplicationLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApplicationLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApplicationLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The minimum number of required application names was not specified. type ApplicationNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5537,17 +5653,17 @@ func (s ApplicationNameRequiredException) GoString() string { func newErrorApplicationNameRequiredException(v protocol.ResponseMetadata) error { return &ApplicationNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApplicationNameRequiredException) Code() string { +func (s *ApplicationNameRequiredException) Code() string { return "ApplicationNameRequiredException" } // Message returns the exception's message. -func (s ApplicationNameRequiredException) Message() string { +func (s *ApplicationNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5555,29 +5671,29 @@ func (s ApplicationNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApplicationNameRequiredException) OrigErr() error { +func (s *ApplicationNameRequiredException) OrigErr() error { return nil } -func (s ApplicationNameRequiredException) Error() string { +func (s *ApplicationNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApplicationNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApplicationNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApplicationNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApplicationNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified ARN is not supported. For example, it might be an ARN for a // resource that is not expected. type ArnNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5594,17 +5710,17 @@ func (s ArnNotSupportedException) GoString() string { func newErrorArnNotSupportedException(v protocol.ResponseMetadata) error { return &ArnNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ArnNotSupportedException) Code() string { +func (s *ArnNotSupportedException) Code() string { return "ArnNotSupportedException" } // Message returns the exception's message. -func (s ArnNotSupportedException) Message() string { +func (s *ArnNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5612,22 +5728,22 @@ func (s ArnNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ArnNotSupportedException) OrigErr() error { +func (s *ArnNotSupportedException) OrigErr() error { return nil } -func (s ArnNotSupportedException) Error() string { +func (s *ArnNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ArnNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ArnNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ArnNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ArnNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a configuration for automatically rolling back to a previous @@ -5803,7 +5919,7 @@ type BatchGetApplicationsInput struct { _ struct{} `type:"structure"` // A list of application names separated by spaces. The maximum number of application - // names you can specify is 25. + // names you can specify is 100. // // ApplicationNames is a required field ApplicationNames []*string `locationName:"applicationNames" type:"list" required:"true"` @@ -6058,6 +6174,9 @@ type BatchGetDeploymentTargetsInput struct { // * For deployments that use the Amazon ECS compute platform, the target // IDs are pairs of Amazon ECS clusters and services specified using the // format :. Their target type is ecsTarget. + // + // * For deployments that are deployed with AWS CloudFormation, the target + // IDs are CloudFormation stack IDs. Their target type is cloudFormationTarget. TargetIds []*string `locationName:"targetIds" type:"list"` } @@ -6096,6 +6215,9 @@ type BatchGetDeploymentTargetsOutput struct { // function. // // * Amazon ECS: The target object is an Amazon ECS service. + // + // * CloudFormation: The target object is an AWS CloudFormation blue/green + // deployment. DeploymentTargets []*DeploymentTarget `locationName:"deploymentTargets" type:"list"` } @@ -6245,8 +6367,8 @@ func (s *BatchGetOnPremisesInstancesOutput) SetInstanceInfos(v []*InstanceInfo) // The maximum number of names or IDs allowed for this request (100) was exceeded. type BatchLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6263,17 +6385,17 @@ func (s BatchLimitExceededException) GoString() string { func newErrorBatchLimitExceededException(v protocol.ResponseMetadata) error { return &BatchLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BatchLimitExceededException) Code() string { +func (s *BatchLimitExceededException) Code() string { return "BatchLimitExceededException" } // Message returns the exception's message. -func (s BatchLimitExceededException) Message() string { +func (s *BatchLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6281,22 +6403,22 @@ func (s BatchLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BatchLimitExceededException) OrigErr() error { +func (s *BatchLimitExceededException) OrigErr() error { return nil } -func (s BatchLimitExceededException) Error() string { +func (s *BatchLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BatchLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BatchLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BatchLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *BatchLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about blue/green deployment options for a deployment group. @@ -6394,8 +6516,8 @@ func (s *BlueInstanceTerminationOption) SetTerminationWaitTimeInMinutes(v int64) // A bucket name is required, but was not provided. type BucketNameFilterRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6412,17 +6534,17 @@ func (s BucketNameFilterRequiredException) GoString() string { func newErrorBucketNameFilterRequiredException(v protocol.ResponseMetadata) error { return &BucketNameFilterRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BucketNameFilterRequiredException) Code() string { +func (s *BucketNameFilterRequiredException) Code() string { return "BucketNameFilterRequiredException" } // Message returns the exception's message. -func (s BucketNameFilterRequiredException) Message() string { +func (s *BucketNameFilterRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6430,22 +6552,105 @@ func (s BucketNameFilterRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BucketNameFilterRequiredException) OrigErr() error { +func (s *BucketNameFilterRequiredException) OrigErr() error { return nil } -func (s BucketNameFilterRequiredException) Error() string { +func (s *BucketNameFilterRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BucketNameFilterRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BucketNameFilterRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BucketNameFilterRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *BucketNameFilterRequiredException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about the target to be updated by an AWS CloudFormation blue/green +// deployment. This target type is used for all deployments initiated by a CloudFormation +// stack update. +type CloudFormationTarget struct { + _ struct{} `type:"structure"` + + // The unique ID of an AWS CloudFormation blue/green deployment. + DeploymentId *string `locationName:"deploymentId" type:"string"` + + // The date and time when the target application was updated by an AWS CloudFormation + // blue/green deployment. + LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp"` + + // The lifecycle events of the AWS CloudFormation blue/green deployment to this + // target application. + LifecycleEvents []*LifecycleEvent `locationName:"lifecycleEvents" type:"list"` + + // The resource type for the AWS CloudFormation blue/green deployment. + ResourceType *string `locationName:"resourceType" type:"string"` + + // The status of an AWS CloudFormation blue/green deployment's target application. + Status *string `locationName:"status" type:"string" enum:"TargetStatus"` + + // The unique ID of a deployment target that has a type of CloudFormationTarget. + TargetId *string `locationName:"targetId" type:"string"` + + // The percentage of production traffic that the target version of an AWS CloudFormation + // blue/green deployment receives. + TargetVersionWeight *float64 `locationName:"targetVersionWeight" type:"double"` +} + +// String returns the string representation +func (s CloudFormationTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFormationTarget) GoString() string { + return s.String() +} + +// SetDeploymentId sets the DeploymentId field's value. +func (s *CloudFormationTarget) SetDeploymentId(v string) *CloudFormationTarget { + s.DeploymentId = &v + return s +} + +// SetLastUpdatedAt sets the LastUpdatedAt field's value. +func (s *CloudFormationTarget) SetLastUpdatedAt(v time.Time) *CloudFormationTarget { + s.LastUpdatedAt = &v + return s +} + +// SetLifecycleEvents sets the LifecycleEvents field's value. +func (s *CloudFormationTarget) SetLifecycleEvents(v []*LifecycleEvent) *CloudFormationTarget { + s.LifecycleEvents = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *CloudFormationTarget) SetResourceType(v string) *CloudFormationTarget { + s.ResourceType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CloudFormationTarget) SetStatus(v string) *CloudFormationTarget { + s.Status = &v + return s +} + +// SetTargetId sets the TargetId field's value. +func (s *CloudFormationTarget) SetTargetId(v string) *CloudFormationTarget { + s.TargetId = &v + return s +} + +// SetTargetVersionWeight sets the TargetVersionWeight field's value. +func (s *CloudFormationTarget) SetTargetVersionWeight(v float64) *CloudFormationTarget { + s.TargetVersionWeight = &v + return s } type ContinueDeploymentInput struct { @@ -6455,9 +6660,9 @@ type ContinueDeploymentInput struct { // traffic to the replacement environment. DeploymentId *string `locationName:"deploymentId" type:"string"` - // The status of the deployment's waiting period. READY_WAIT indicates the deployment - // is ready to start shifting traffic. TERMINATION_WAIT indicates the traffic - // is shifted, but the original target is not terminated. + // The status of the deployment's waiting period. READY_WAIT indicates that + // the deployment is ready to start shifting traffic. TERMINATION_WAIT indicates + // that the traffic is shifted, but the original target is not terminated. DeploymentWaitType *string `locationName:"deploymentWaitType" type:"string" enum:"DeploymentWaitType"` } @@ -6608,7 +6813,7 @@ type CreateDeploymentConfigInput struct { // * FLEET_PERCENT: The value parameter represents the minimum number of // healthy instances as a percentage of the total number of instances in // the deployment. If you specify FLEET_PERCENT, at the start of the deployment, - // AWS CodeDeploy converts the percentage to the equivalent number of instance + // AWS CodeDeploy converts the percentage to the equivalent number of instances // and rounds up fractional instances. // // The value parameter takes an integer. @@ -6729,7 +6934,7 @@ type CreateDeploymentGroupInput struct { // group. // // For more information about the predefined deployment configurations in AWS - // CodeDeploy, see Working with Deployment Groups in AWS CodeDeploy (https://docs.aws.amazon.com/codedeploy/latest/userguide/deployment-configurations.html) + // CodeDeploy, see Working with Deployment Configurations in CodeDeploy (https://docs.aws.amazon.com/codedeploy/latest/userguide/deployment-configurations.html) // in the AWS CodeDeploy User Guide. DeploymentConfigName *string `locationName:"deploymentConfigName" min:"1" type:"string"` @@ -6771,8 +6976,8 @@ type CreateDeploymentGroupInput struct { // Cannot be used in the same call as onPremisesInstanceTagFilters. OnPremisesTagSet *OnPremisesTagSet `locationName:"onPremisesTagSet" type:"structure"` - // A service role ARN that allows AWS CodeDeploy to act on the user's behalf - // when interacting with AWS services. + // A service role Amazon Resource Name (ARN) that allows AWS CodeDeploy to act + // on the user's behalf when interacting with AWS services. // // ServiceRoleArn is a required field ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string" required:"true"` @@ -7400,10 +7605,48 @@ func (s *DeleteGitHubAccountTokenOutput) SetTokenName(v string) *DeleteGitHubAcc return s } +type DeleteResourcesByExternalIdInput struct { + _ struct{} `type:"structure"` + + // The unique ID of an external resource (for example, a CloudFormation stack + // ID) that is linked to one or more CodeDeploy resources. + ExternalId *string `locationName:"externalId" type:"string"` +} + +// String returns the string representation +func (s DeleteResourcesByExternalIdInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourcesByExternalIdInput) GoString() string { + return s.String() +} + +// SetExternalId sets the ExternalId field's value. +func (s *DeleteResourcesByExternalIdInput) SetExternalId(v string) *DeleteResourcesByExternalIdInput { + s.ExternalId = &v + return s +} + +type DeleteResourcesByExternalIdOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteResourcesByExternalIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResourcesByExternalIdOutput) GoString() string { + return s.String() +} + // The deployment is already complete. type DeploymentAlreadyCompletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7420,17 +7663,17 @@ func (s DeploymentAlreadyCompletedException) GoString() string { func newErrorDeploymentAlreadyCompletedException(v protocol.ResponseMetadata) error { return &DeploymentAlreadyCompletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentAlreadyCompletedException) Code() string { +func (s *DeploymentAlreadyCompletedException) Code() string { return "DeploymentAlreadyCompletedException" } // Message returns the exception's message. -func (s DeploymentAlreadyCompletedException) Message() string { +func (s *DeploymentAlreadyCompletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7438,29 +7681,29 @@ func (s DeploymentAlreadyCompletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentAlreadyCompletedException) OrigErr() error { +func (s *DeploymentAlreadyCompletedException) OrigErr() error { return nil } -func (s DeploymentAlreadyCompletedException) Error() string { +func (s *DeploymentAlreadyCompletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentAlreadyCompletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentAlreadyCompletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentAlreadyCompletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentAlreadyCompletedException) RequestID() string { + return s.RespMetadata.RequestID } // A deployment configuration with the specified name with the IAM user or AWS -// account already exists . +// account already exists. type DeploymentConfigAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7477,17 +7720,17 @@ func (s DeploymentConfigAlreadyExistsException) GoString() string { func newErrorDeploymentConfigAlreadyExistsException(v protocol.ResponseMetadata) error { return &DeploymentConfigAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentConfigAlreadyExistsException) Code() string { +func (s *DeploymentConfigAlreadyExistsException) Code() string { return "DeploymentConfigAlreadyExistsException" } // Message returns the exception's message. -func (s DeploymentConfigAlreadyExistsException) Message() string { +func (s *DeploymentConfigAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7495,28 +7738,28 @@ func (s DeploymentConfigAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentConfigAlreadyExistsException) OrigErr() error { +func (s *DeploymentConfigAlreadyExistsException) OrigErr() error { return nil } -func (s DeploymentConfigAlreadyExistsException) Error() string { +func (s *DeploymentConfigAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentConfigAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentConfigAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentConfigAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentConfigAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment configuration does not exist with the IAM user or AWS account. type DeploymentConfigDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7533,17 +7776,17 @@ func (s DeploymentConfigDoesNotExistException) GoString() string { func newErrorDeploymentConfigDoesNotExistException(v protocol.ResponseMetadata) error { return &DeploymentConfigDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentConfigDoesNotExistException) Code() string { +func (s *DeploymentConfigDoesNotExistException) Code() string { return "DeploymentConfigDoesNotExistException" } // Message returns the exception's message. -func (s DeploymentConfigDoesNotExistException) Message() string { +func (s *DeploymentConfigDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7551,28 +7794,28 @@ func (s DeploymentConfigDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentConfigDoesNotExistException) OrigErr() error { +func (s *DeploymentConfigDoesNotExistException) OrigErr() error { return nil } -func (s DeploymentConfigDoesNotExistException) Error() string { +func (s *DeploymentConfigDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentConfigDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentConfigDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentConfigDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentConfigDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment configuration is still in use. type DeploymentConfigInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7589,17 +7832,17 @@ func (s DeploymentConfigInUseException) GoString() string { func newErrorDeploymentConfigInUseException(v protocol.ResponseMetadata) error { return &DeploymentConfigInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentConfigInUseException) Code() string { +func (s *DeploymentConfigInUseException) Code() string { return "DeploymentConfigInUseException" } // Message returns the exception's message. -func (s DeploymentConfigInUseException) Message() string { +func (s *DeploymentConfigInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7607,22 +7850,22 @@ func (s DeploymentConfigInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentConfigInUseException) OrigErr() error { +func (s *DeploymentConfigInUseException) OrigErr() error { return nil } -func (s DeploymentConfigInUseException) Error() string { +func (s *DeploymentConfigInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentConfigInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentConfigInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentConfigInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentConfigInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a deployment configuration. @@ -7644,8 +7887,8 @@ type DeploymentConfigInfo struct { // Information about the number or percentage of minimum healthy instance. MinimumHealthyHosts *MinimumHealthyHosts `locationName:"minimumHealthyHosts" type:"structure"` - // The configuration that specifies how the deployment traffic is routed. Only - // deployments with a Lambda compute platform can specify this. + // The configuration that specifies how the deployment traffic is routed. Used + // for deployments with a Lambda or ECS compute platform only. TrafficRoutingConfig *TrafficRoutingConfig `locationName:"trafficRoutingConfig" type:"structure"` } @@ -7697,8 +7940,8 @@ func (s *DeploymentConfigInfo) SetTrafficRoutingConfig(v *TrafficRoutingConfig) // The deployment configurations limit was exceeded. type DeploymentConfigLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7715,17 +7958,17 @@ func (s DeploymentConfigLimitExceededException) GoString() string { func newErrorDeploymentConfigLimitExceededException(v protocol.ResponseMetadata) error { return &DeploymentConfigLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentConfigLimitExceededException) Code() string { +func (s *DeploymentConfigLimitExceededException) Code() string { return "DeploymentConfigLimitExceededException" } // Message returns the exception's message. -func (s DeploymentConfigLimitExceededException) Message() string { +func (s *DeploymentConfigLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7733,28 +7976,28 @@ func (s DeploymentConfigLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentConfigLimitExceededException) OrigErr() error { +func (s *DeploymentConfigLimitExceededException) OrigErr() error { return nil } -func (s DeploymentConfigLimitExceededException) Error() string { +func (s *DeploymentConfigLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentConfigLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentConfigLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentConfigLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentConfigLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment configuration name was not specified. type DeploymentConfigNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7771,17 +8014,17 @@ func (s DeploymentConfigNameRequiredException) GoString() string { func newErrorDeploymentConfigNameRequiredException(v protocol.ResponseMetadata) error { return &DeploymentConfigNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentConfigNameRequiredException) Code() string { +func (s *DeploymentConfigNameRequiredException) Code() string { return "DeploymentConfigNameRequiredException" } // Message returns the exception's message. -func (s DeploymentConfigNameRequiredException) Message() string { +func (s *DeploymentConfigNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7789,28 +8032,28 @@ func (s DeploymentConfigNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentConfigNameRequiredException) OrigErr() error { +func (s *DeploymentConfigNameRequiredException) OrigErr() error { return nil } -func (s DeploymentConfigNameRequiredException) Error() string { +func (s *DeploymentConfigNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentConfigNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentConfigNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentConfigNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentConfigNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment with the IAM user or AWS account does not exist. type DeploymentDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7827,17 +8070,17 @@ func (s DeploymentDoesNotExistException) GoString() string { func newErrorDeploymentDoesNotExistException(v protocol.ResponseMetadata) error { return &DeploymentDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentDoesNotExistException) Code() string { +func (s *DeploymentDoesNotExistException) Code() string { return "DeploymentDoesNotExistException" } // Message returns the exception's message. -func (s DeploymentDoesNotExistException) Message() string { +func (s *DeploymentDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7845,29 +8088,29 @@ func (s DeploymentDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentDoesNotExistException) OrigErr() error { +func (s *DeploymentDoesNotExistException) OrigErr() error { return nil } -func (s DeploymentDoesNotExistException) Error() string { +func (s *DeploymentDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // A deployment group with the specified name with the IAM user or AWS account // already exists. type DeploymentGroupAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7884,17 +8127,17 @@ func (s DeploymentGroupAlreadyExistsException) GoString() string { func newErrorDeploymentGroupAlreadyExistsException(v protocol.ResponseMetadata) error { return &DeploymentGroupAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentGroupAlreadyExistsException) Code() string { +func (s *DeploymentGroupAlreadyExistsException) Code() string { return "DeploymentGroupAlreadyExistsException" } // Message returns the exception's message. -func (s DeploymentGroupAlreadyExistsException) Message() string { +func (s *DeploymentGroupAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7902,28 +8145,28 @@ func (s DeploymentGroupAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentGroupAlreadyExistsException) OrigErr() error { +func (s *DeploymentGroupAlreadyExistsException) OrigErr() error { return nil } -func (s DeploymentGroupAlreadyExistsException) Error() string { +func (s *DeploymentGroupAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentGroupAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentGroupAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentGroupAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentGroupAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The named deployment group with the IAM user or AWS account does not exist. type DeploymentGroupDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7940,17 +8183,17 @@ func (s DeploymentGroupDoesNotExistException) GoString() string { func newErrorDeploymentGroupDoesNotExistException(v protocol.ResponseMetadata) error { return &DeploymentGroupDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentGroupDoesNotExistException) Code() string { +func (s *DeploymentGroupDoesNotExistException) Code() string { return "DeploymentGroupDoesNotExistException" } // Message returns the exception's message. -func (s DeploymentGroupDoesNotExistException) Message() string { +func (s *DeploymentGroupDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7958,22 +8201,22 @@ func (s DeploymentGroupDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentGroupDoesNotExistException) OrigErr() error { +func (s *DeploymentGroupDoesNotExistException) OrigErr() error { return nil } -func (s DeploymentGroupDoesNotExistException) Error() string { +func (s *DeploymentGroupDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentGroupDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentGroupDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentGroupDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentGroupDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a deployment group. @@ -8199,8 +8442,8 @@ func (s *DeploymentGroupInfo) SetTriggerConfigurations(v []*TriggerConfig) *Depl // The deployment groups limit was exceeded. type DeploymentGroupLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8217,17 +8460,17 @@ func (s DeploymentGroupLimitExceededException) GoString() string { func newErrorDeploymentGroupLimitExceededException(v protocol.ResponseMetadata) error { return &DeploymentGroupLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentGroupLimitExceededException) Code() string { +func (s *DeploymentGroupLimitExceededException) Code() string { return "DeploymentGroupLimitExceededException" } // Message returns the exception's message. -func (s DeploymentGroupLimitExceededException) Message() string { +func (s *DeploymentGroupLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8235,28 +8478,28 @@ func (s DeploymentGroupLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentGroupLimitExceededException) OrigErr() error { +func (s *DeploymentGroupLimitExceededException) OrigErr() error { return nil } -func (s DeploymentGroupLimitExceededException) Error() string { +func (s *DeploymentGroupLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentGroupLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentGroupLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentGroupLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentGroupLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment group name was not specified. type DeploymentGroupNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8273,17 +8516,17 @@ func (s DeploymentGroupNameRequiredException) GoString() string { func newErrorDeploymentGroupNameRequiredException(v protocol.ResponseMetadata) error { return &DeploymentGroupNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentGroupNameRequiredException) Code() string { +func (s *DeploymentGroupNameRequiredException) Code() string { return "DeploymentGroupNameRequiredException" } // Message returns the exception's message. -func (s DeploymentGroupNameRequiredException) Message() string { +func (s *DeploymentGroupNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8291,28 +8534,28 @@ func (s DeploymentGroupNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentGroupNameRequiredException) OrigErr() error { +func (s *DeploymentGroupNameRequiredException) OrigErr() error { return nil } -func (s DeploymentGroupNameRequiredException) Error() string { +func (s *DeploymentGroupNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentGroupNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentGroupNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentGroupNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentGroupNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // At least one deployment ID must be specified. type DeploymentIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8329,17 +8572,17 @@ func (s DeploymentIdRequiredException) GoString() string { func newErrorDeploymentIdRequiredException(v protocol.ResponseMetadata) error { return &DeploymentIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentIdRequiredException) Code() string { +func (s *DeploymentIdRequiredException) Code() string { return "DeploymentIdRequiredException" } // Message returns the exception's message. -func (s DeploymentIdRequiredException) Message() string { +func (s *DeploymentIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8347,22 +8590,22 @@ func (s DeploymentIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentIdRequiredException) OrigErr() error { +func (s *DeploymentIdRequiredException) OrigErr() error { return nil } -func (s DeploymentIdRequiredException) Error() string { +func (s *DeploymentIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a deployment. @@ -8426,6 +8669,10 @@ type DeploymentInfo struct { // Information about any error associated with this deployment. ErrorInformation *ErrorInformation `locationName:"errorInformation" type:"structure"` + // The unique ID for an external resource (for example, a CloudFormation stack + // ID) that is linked to this deployment. + ExternalId *string `locationName:"externalId" type:"string"` + // Information about how AWS CodeDeploy handles files that already exist in // a deployment target location but weren't part of the previous successful // deployment. @@ -8611,6 +8858,12 @@ func (s *DeploymentInfo) SetErrorInformation(v *ErrorInformation) *DeploymentInf return s } +// SetExternalId sets the ExternalId field's value. +func (s *DeploymentInfo) SetExternalId(v string) *DeploymentInfo { + s.ExternalId = &v + return s +} + // SetFileExistsBehavior sets the FileExistsBehavior field's value. func (s *DeploymentInfo) SetFileExistsBehavior(v string) *DeploymentInfo { s.FileExistsBehavior = &v @@ -8679,8 +8932,8 @@ func (s *DeploymentInfo) SetUpdateOutdatedInstancesOnly(v bool) *DeploymentInfo // The deployment does not have a status of Ready and can't continue yet. type DeploymentIsNotInReadyStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8697,17 +8950,17 @@ func (s DeploymentIsNotInReadyStateException) GoString() string { func newErrorDeploymentIsNotInReadyStateException(v protocol.ResponseMetadata) error { return &DeploymentIsNotInReadyStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentIsNotInReadyStateException) Code() string { +func (s *DeploymentIsNotInReadyStateException) Code() string { return "DeploymentIsNotInReadyStateException" } // Message returns the exception's message. -func (s DeploymentIsNotInReadyStateException) Message() string { +func (s *DeploymentIsNotInReadyStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8715,28 +8968,28 @@ func (s DeploymentIsNotInReadyStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentIsNotInReadyStateException) OrigErr() error { +func (s *DeploymentIsNotInReadyStateException) OrigErr() error { return nil } -func (s DeploymentIsNotInReadyStateException) Error() string { +func (s *DeploymentIsNotInReadyStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentIsNotInReadyStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentIsNotInReadyStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentIsNotInReadyStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentIsNotInReadyStateException) RequestID() string { + return s.RespMetadata.RequestID } // The number of allowed deployments was exceeded. type DeploymentLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8753,17 +9006,17 @@ func (s DeploymentLimitExceededException) GoString() string { func newErrorDeploymentLimitExceededException(v protocol.ResponseMetadata) error { return &DeploymentLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentLimitExceededException) Code() string { +func (s *DeploymentLimitExceededException) Code() string { return "DeploymentLimitExceededException" } // Message returns the exception's message. -func (s DeploymentLimitExceededException) Message() string { +func (s *DeploymentLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8771,28 +9024,28 @@ func (s DeploymentLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentLimitExceededException) OrigErr() error { +func (s *DeploymentLimitExceededException) OrigErr() error { return nil } -func (s DeploymentLimitExceededException) Error() string { +func (s *DeploymentLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified deployment has not started. type DeploymentNotStartedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8809,17 +9062,17 @@ func (s DeploymentNotStartedException) GoString() string { func newErrorDeploymentNotStartedException(v protocol.ResponseMetadata) error { return &DeploymentNotStartedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentNotStartedException) Code() string { +func (s *DeploymentNotStartedException) Code() string { return "DeploymentNotStartedException" } // Message returns the exception's message. -func (s DeploymentNotStartedException) Message() string { +func (s *DeploymentNotStartedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8827,22 +9080,22 @@ func (s DeploymentNotStartedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentNotStartedException) OrigErr() error { +func (s *DeploymentNotStartedException) OrigErr() error { return nil } -func (s DeploymentNotStartedException) Error() string { +func (s *DeploymentNotStartedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentNotStartedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentNotStartedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentNotStartedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentNotStartedException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the deployment status of the instances in the deployment. @@ -8936,7 +9189,7 @@ type DeploymentReadyOption struct { // The number of minutes to wait before the status of a blue/green deployment // is changed to Stopped if rerouting is not started manually. Applies only - // to the STOP_DEPLOYMENT option for actionOnTimeout + // to the STOP_DEPLOYMENT option for actionOnTimeout. WaitTimeInMinutes *int64 `locationName:"waitTimeInMinutes" type:"integer"` } @@ -9000,7 +9253,13 @@ func (s *DeploymentStyle) SetDeploymentType(v string) *DeploymentStyle { type DeploymentTarget struct { _ struct{} `type:"structure"` - // The deployment type that is specific to the deployment's compute platform. + // Information about the target to be updated by an AWS CloudFormation blue/green + // deployment. This target type is used for all deployments initiated by a CloudFormation + // stack update. + CloudFormationTarget *CloudFormationTarget `locationName:"cloudFormationTarget" type:"structure"` + + // The deployment type that is specific to the deployment's compute platform + // or deployments initiated by a CloudFormation stack update. DeploymentTargetType *string `locationName:"deploymentTargetType" type:"string" enum:"DeploymentTargetType"` // Information about the target for a deployment that uses the Amazon ECS compute @@ -9026,6 +9285,12 @@ func (s DeploymentTarget) GoString() string { return s.String() } +// SetCloudFormationTarget sets the CloudFormationTarget field's value. +func (s *DeploymentTarget) SetCloudFormationTarget(v *CloudFormationTarget) *DeploymentTarget { + s.CloudFormationTarget = v + return s +} + // SetDeploymentTargetType sets the DeploymentTargetType field's value. func (s *DeploymentTarget) SetDeploymentTargetType(v string) *DeploymentTarget { s.DeploymentTargetType = &v @@ -9052,8 +9317,8 @@ func (s *DeploymentTarget) SetLambdaTarget(v *LambdaTarget) *DeploymentTarget { // The provided target ID does not belong to the attempted deployment. type DeploymentTargetDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9070,17 +9335,17 @@ func (s DeploymentTargetDoesNotExistException) GoString() string { func newErrorDeploymentTargetDoesNotExistException(v protocol.ResponseMetadata) error { return &DeploymentTargetDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentTargetDoesNotExistException) Code() string { +func (s *DeploymentTargetDoesNotExistException) Code() string { return "DeploymentTargetDoesNotExistException" } // Message returns the exception's message. -func (s DeploymentTargetDoesNotExistException) Message() string { +func (s *DeploymentTargetDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9088,28 +9353,28 @@ func (s DeploymentTargetDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentTargetDoesNotExistException) OrigErr() error { +func (s *DeploymentTargetDoesNotExistException) OrigErr() error { return nil } -func (s DeploymentTargetDoesNotExistException) Error() string { +func (s *DeploymentTargetDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentTargetDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentTargetDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentTargetDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentTargetDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // A deployment target ID was not provided. type DeploymentTargetIdRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9126,17 +9391,17 @@ func (s DeploymentTargetIdRequiredException) GoString() string { func newErrorDeploymentTargetIdRequiredException(v protocol.ResponseMetadata) error { return &DeploymentTargetIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentTargetIdRequiredException) Code() string { +func (s *DeploymentTargetIdRequiredException) Code() string { return "DeploymentTargetIdRequiredException" } // Message returns the exception's message. -func (s DeploymentTargetIdRequiredException) Message() string { +func (s *DeploymentTargetIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9144,22 +9409,22 @@ func (s DeploymentTargetIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentTargetIdRequiredException) OrigErr() error { +func (s *DeploymentTargetIdRequiredException) OrigErr() error { return nil } -func (s DeploymentTargetIdRequiredException) Error() string { +func (s *DeploymentTargetIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentTargetIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentTargetIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentTargetIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentTargetIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of targets that can be associated with an Amazon ECS or @@ -9167,8 +9432,8 @@ func (s DeploymentTargetIdRequiredException) RequestID() string { // must have exactly one item. This exception does not apply to EC2/On-premises // deployments. type DeploymentTargetListSizeExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9185,17 +9450,17 @@ func (s DeploymentTargetListSizeExceededException) GoString() string { func newErrorDeploymentTargetListSizeExceededException(v protocol.ResponseMetadata) error { return &DeploymentTargetListSizeExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeploymentTargetListSizeExceededException) Code() string { +func (s *DeploymentTargetListSizeExceededException) Code() string { return "DeploymentTargetListSizeExceededException" } // Message returns the exception's message. -func (s DeploymentTargetListSizeExceededException) Message() string { +func (s *DeploymentTargetListSizeExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9203,22 +9468,22 @@ func (s DeploymentTargetListSizeExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeploymentTargetListSizeExceededException) OrigErr() error { +func (s *DeploymentTargetListSizeExceededException) OrigErr() error { return nil } -func (s DeploymentTargetListSizeExceededException) Error() string { +func (s *DeploymentTargetListSizeExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeploymentTargetListSizeExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeploymentTargetListSizeExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeploymentTargetListSizeExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeploymentTargetListSizeExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a DeregisterOnPremisesInstance operation. @@ -9276,8 +9541,8 @@ func (s DeregisterOnPremisesInstanceOutput) GoString() string { // The description is too long. type DescriptionTooLongException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9294,17 +9559,17 @@ func (s DescriptionTooLongException) GoString() string { func newErrorDescriptionTooLongException(v protocol.ResponseMetadata) error { return &DescriptionTooLongException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DescriptionTooLongException) Code() string { +func (s *DescriptionTooLongException) Code() string { return "DescriptionTooLongException" } // Message returns the exception's message. -func (s DescriptionTooLongException) Message() string { +func (s *DescriptionTooLongException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9312,22 +9577,22 @@ func (s DescriptionTooLongException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DescriptionTooLongException) OrigErr() error { +func (s *DescriptionTooLongException) OrigErr() error { return nil } -func (s DescriptionTooLongException) Error() string { +func (s *DescriptionTooLongException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DescriptionTooLongException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DescriptionTooLongException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DescriptionTooLongException) RequestID() string { - return s.respMetadata.RequestID +func (s *DescriptionTooLongException) RequestID() string { + return s.RespMetadata.RequestID } // Diagnostic information about executable scripts that are part of a deployment. @@ -9509,8 +9774,8 @@ func (s *ECSService) SetServiceName(v string) *ECSService { // The Amazon ECS service is associated with more than one deployment groups. // An Amazon ECS service can be associated with only one deployment group. type ECSServiceMappingLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9527,17 +9792,17 @@ func (s ECSServiceMappingLimitExceededException) GoString() string { func newErrorECSServiceMappingLimitExceededException(v protocol.ResponseMetadata) error { return &ECSServiceMappingLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ECSServiceMappingLimitExceededException) Code() string { +func (s *ECSServiceMappingLimitExceededException) Code() string { return "ECSServiceMappingLimitExceededException" } // Message returns the exception's message. -func (s ECSServiceMappingLimitExceededException) Message() string { +func (s *ECSServiceMappingLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9545,22 +9810,22 @@ func (s ECSServiceMappingLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ECSServiceMappingLimitExceededException) OrigErr() error { +func (s *ECSServiceMappingLimitExceededException) OrigErr() error { return nil } -func (s ECSServiceMappingLimitExceededException) Error() string { +func (s *ECSServiceMappingLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ECSServiceMappingLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ECSServiceMappingLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ECSServiceMappingLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ECSServiceMappingLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the target of an Amazon ECS deployment. @@ -9580,7 +9845,7 @@ type ECSTarget struct { // The status an Amazon ECS deployment's target ECS application. Status *string `locationName:"status" type:"string" enum:"TargetStatus"` - // The ARN of the target. + // The Amazon Resource Name (ARN) of the target. TargetArn *string `locationName:"targetArn" type:"string"` // The unique ID of a deployment target that has a type of ecsTarget. @@ -10404,7 +10669,7 @@ type GetDeploymentTargetOutput struct { _ struct{} `type:"structure"` // A deployment target that contains information about a deployment such as - // its status, lifecyle events, and when it was last updated. It also contains + // its status, lifecycle events, and when it was last updated. It also contains // metadata about the deployment target. The deployment target metadata depends // on the deployment target's type (instanceTarget, lambdaTarget, or ecsTarget). DeploymentTarget *DeploymentTarget `locationName:"deploymentTarget" type:"structure"` @@ -10491,8 +10756,8 @@ func (s *GetOnPremisesInstanceOutput) SetInstanceInfo(v *InstanceInfo) *GetOnPre // No GitHub account connection exists with the named specified in the call. type GitHubAccountTokenDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10509,17 +10774,17 @@ func (s GitHubAccountTokenDoesNotExistException) GoString() string { func newErrorGitHubAccountTokenDoesNotExistException(v protocol.ResponseMetadata) error { return &GitHubAccountTokenDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GitHubAccountTokenDoesNotExistException) Code() string { +func (s *GitHubAccountTokenDoesNotExistException) Code() string { return "GitHubAccountTokenDoesNotExistException" } // Message returns the exception's message. -func (s GitHubAccountTokenDoesNotExistException) Message() string { +func (s *GitHubAccountTokenDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10527,28 +10792,28 @@ func (s GitHubAccountTokenDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GitHubAccountTokenDoesNotExistException) OrigErr() error { +func (s *GitHubAccountTokenDoesNotExistException) OrigErr() error { return nil } -func (s GitHubAccountTokenDoesNotExistException) Error() string { +func (s *GitHubAccountTokenDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GitHubAccountTokenDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GitHubAccountTokenDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GitHubAccountTokenDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *GitHubAccountTokenDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The call is missing a required GitHub account connection name. type GitHubAccountTokenNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10565,17 +10830,17 @@ func (s GitHubAccountTokenNameRequiredException) GoString() string { func newErrorGitHubAccountTokenNameRequiredException(v protocol.ResponseMetadata) error { return &GitHubAccountTokenNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GitHubAccountTokenNameRequiredException) Code() string { +func (s *GitHubAccountTokenNameRequiredException) Code() string { return "GitHubAccountTokenNameRequiredException" } // Message returns the exception's message. -func (s GitHubAccountTokenNameRequiredException) Message() string { +func (s *GitHubAccountTokenNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10583,22 +10848,22 @@ func (s GitHubAccountTokenNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GitHubAccountTokenNameRequiredException) OrigErr() error { +func (s *GitHubAccountTokenNameRequiredException) OrigErr() error { return nil } -func (s GitHubAccountTokenNameRequiredException) Error() string { +func (s *GitHubAccountTokenNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GitHubAccountTokenNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GitHubAccountTokenNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GitHubAccountTokenNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *GitHubAccountTokenNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the location of application artifacts stored in GitHub. @@ -10672,8 +10937,8 @@ func (s *GreenFleetProvisioningOption) SetAction(v string) *GreenFleetProvisioni // No IAM ARN was included in the request. You must use an IAM session ARN or // IAM user ARN in the request. type IamArnRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10690,17 +10955,17 @@ func (s IamArnRequiredException) GoString() string { func newErrorIamArnRequiredException(v protocol.ResponseMetadata) error { return &IamArnRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IamArnRequiredException) Code() string { +func (s *IamArnRequiredException) Code() string { return "IamArnRequiredException" } // Message returns the exception's message. -func (s IamArnRequiredException) Message() string { +func (s *IamArnRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10708,29 +10973,29 @@ func (s IamArnRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IamArnRequiredException) OrigErr() error { +func (s *IamArnRequiredException) OrigErr() error { return nil } -func (s IamArnRequiredException) Error() string { +func (s *IamArnRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IamArnRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IamArnRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IamArnRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *IamArnRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The request included an IAM session ARN that has already been used to register // a different instance. type IamSessionArnAlreadyRegisteredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10747,17 +11012,17 @@ func (s IamSessionArnAlreadyRegisteredException) GoString() string { func newErrorIamSessionArnAlreadyRegisteredException(v protocol.ResponseMetadata) error { return &IamSessionArnAlreadyRegisteredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IamSessionArnAlreadyRegisteredException) Code() string { +func (s *IamSessionArnAlreadyRegisteredException) Code() string { return "IamSessionArnAlreadyRegisteredException" } // Message returns the exception's message. -func (s IamSessionArnAlreadyRegisteredException) Message() string { +func (s *IamSessionArnAlreadyRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10765,28 +11030,28 @@ func (s IamSessionArnAlreadyRegisteredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IamSessionArnAlreadyRegisteredException) OrigErr() error { +func (s *IamSessionArnAlreadyRegisteredException) OrigErr() error { return nil } -func (s IamSessionArnAlreadyRegisteredException) Error() string { +func (s *IamSessionArnAlreadyRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IamSessionArnAlreadyRegisteredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IamSessionArnAlreadyRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IamSessionArnAlreadyRegisteredException) RequestID() string { - return s.respMetadata.RequestID +func (s *IamSessionArnAlreadyRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified IAM user ARN is already registered with an on-premises instance. type IamUserArnAlreadyRegisteredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10803,17 +11068,17 @@ func (s IamUserArnAlreadyRegisteredException) GoString() string { func newErrorIamUserArnAlreadyRegisteredException(v protocol.ResponseMetadata) error { return &IamUserArnAlreadyRegisteredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IamUserArnAlreadyRegisteredException) Code() string { +func (s *IamUserArnAlreadyRegisteredException) Code() string { return "IamUserArnAlreadyRegisteredException" } // Message returns the exception's message. -func (s IamUserArnAlreadyRegisteredException) Message() string { +func (s *IamUserArnAlreadyRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10821,28 +11086,28 @@ func (s IamUserArnAlreadyRegisteredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IamUserArnAlreadyRegisteredException) OrigErr() error { +func (s *IamUserArnAlreadyRegisteredException) OrigErr() error { return nil } -func (s IamUserArnAlreadyRegisteredException) Error() string { +func (s *IamUserArnAlreadyRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IamUserArnAlreadyRegisteredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IamUserArnAlreadyRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IamUserArnAlreadyRegisteredException) RequestID() string { - return s.respMetadata.RequestID +func (s *IamUserArnAlreadyRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // An IAM user ARN was not specified. type IamUserArnRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10859,17 +11124,17 @@ func (s IamUserArnRequiredException) GoString() string { func newErrorIamUserArnRequiredException(v protocol.ResponseMetadata) error { return &IamUserArnRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IamUserArnRequiredException) Code() string { +func (s *IamUserArnRequiredException) Code() string { return "IamUserArnRequiredException" } // Message returns the exception's message. -func (s IamUserArnRequiredException) Message() string { +func (s *IamUserArnRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10877,30 +11142,30 @@ func (s IamUserArnRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IamUserArnRequiredException) OrigErr() error { +func (s *IamUserArnRequiredException) OrigErr() error { return nil } -func (s IamUserArnRequiredException) Error() string { +func (s *IamUserArnRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IamUserArnRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IamUserArnRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IamUserArnRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *IamUserArnRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified instance does not exist in the deployment group. // // Deprecated: This exception is deprecated, use DeploymentTargetDoesNotExistException instead. type InstanceDoesNotExistException struct { - _ struct{} `deprecated:"true" type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `deprecated:"true" type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10917,17 +11182,17 @@ func (s InstanceDoesNotExistException) GoString() string { func newErrorInstanceDoesNotExistException(v protocol.ResponseMetadata) error { return &InstanceDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceDoesNotExistException) Code() string { +func (s *InstanceDoesNotExistException) Code() string { return "InstanceDoesNotExistException" } // Message returns the exception's message. -func (s InstanceDoesNotExistException) Message() string { +func (s *InstanceDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10935,30 +11200,30 @@ func (s InstanceDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceDoesNotExistException) OrigErr() error { +func (s *InstanceDoesNotExistException) OrigErr() error { return nil } -func (s InstanceDoesNotExistException) Error() string { +func (s *InstanceDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The instance ID was not specified. // // Deprecated: This exception is deprecated, use DeploymentTargetIdRequiredException instead. type InstanceIdRequiredException struct { - _ struct{} `deprecated:"true" type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `deprecated:"true" type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10975,17 +11240,17 @@ func (s InstanceIdRequiredException) GoString() string { func newErrorInstanceIdRequiredException(v protocol.ResponseMetadata) error { return &InstanceIdRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceIdRequiredException) Code() string { +func (s *InstanceIdRequiredException) Code() string { return "InstanceIdRequiredException" } // Message returns the exception's message. -func (s InstanceIdRequiredException) Message() string { +func (s *InstanceIdRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10993,22 +11258,22 @@ func (s InstanceIdRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceIdRequiredException) OrigErr() error { +func (s *InstanceIdRequiredException) OrigErr() error { return nil } -func (s InstanceIdRequiredException) Error() string { +func (s *InstanceIdRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceIdRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceIdRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceIdRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceIdRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about an on-premises instance. @@ -11093,8 +11358,8 @@ func (s *InstanceInfo) SetTags(v []*Tag) *InstanceInfo { // The maximum number of allowed on-premises instances in a single call was // exceeded. type InstanceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11111,17 +11376,17 @@ func (s InstanceLimitExceededException) GoString() string { func newErrorInstanceLimitExceededException(v protocol.ResponseMetadata) error { return &InstanceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceLimitExceededException) Code() string { +func (s *InstanceLimitExceededException) Code() string { return "InstanceLimitExceededException" } // Message returns the exception's message. -func (s InstanceLimitExceededException) Message() string { +func (s *InstanceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11129,28 +11394,28 @@ func (s InstanceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceLimitExceededException) OrigErr() error { +func (s *InstanceLimitExceededException) OrigErr() error { return nil } -func (s InstanceLimitExceededException) Error() string { +func (s *InstanceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified on-premises instance name is already registered. type InstanceNameAlreadyRegisteredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11167,17 +11432,17 @@ func (s InstanceNameAlreadyRegisteredException) GoString() string { func newErrorInstanceNameAlreadyRegisteredException(v protocol.ResponseMetadata) error { return &InstanceNameAlreadyRegisteredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceNameAlreadyRegisteredException) Code() string { +func (s *InstanceNameAlreadyRegisteredException) Code() string { return "InstanceNameAlreadyRegisteredException" } // Message returns the exception's message. -func (s InstanceNameAlreadyRegisteredException) Message() string { +func (s *InstanceNameAlreadyRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11185,28 +11450,28 @@ func (s InstanceNameAlreadyRegisteredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceNameAlreadyRegisteredException) OrigErr() error { +func (s *InstanceNameAlreadyRegisteredException) OrigErr() error { return nil } -func (s InstanceNameAlreadyRegisteredException) Error() string { +func (s *InstanceNameAlreadyRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceNameAlreadyRegisteredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceNameAlreadyRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceNameAlreadyRegisteredException) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceNameAlreadyRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // An on-premises instance name was not specified. type InstanceNameRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11223,17 +11488,17 @@ func (s InstanceNameRequiredException) GoString() string { func newErrorInstanceNameRequiredException(v protocol.ResponseMetadata) error { return &InstanceNameRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceNameRequiredException) Code() string { +func (s *InstanceNameRequiredException) Code() string { return "InstanceNameRequiredException" } // Message returns the exception's message. -func (s InstanceNameRequiredException) Message() string { +func (s *InstanceNameRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11241,28 +11506,28 @@ func (s InstanceNameRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceNameRequiredException) OrigErr() error { +func (s *InstanceNameRequiredException) OrigErr() error { return nil } -func (s InstanceNameRequiredException) Error() string { +func (s *InstanceNameRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceNameRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceNameRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceNameRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceNameRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified on-premises instance is not registered. type InstanceNotRegisteredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11279,17 +11544,17 @@ func (s InstanceNotRegisteredException) GoString() string { func newErrorInstanceNotRegisteredException(v protocol.ResponseMetadata) error { return &InstanceNotRegisteredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceNotRegisteredException) Code() string { +func (s *InstanceNotRegisteredException) Code() string { return "InstanceNotRegisteredException" } // Message returns the exception's message. -func (s InstanceNotRegisteredException) Message() string { +func (s *InstanceNotRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11297,22 +11562,22 @@ func (s InstanceNotRegisteredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceNotRegisteredException) OrigErr() error { +func (s *InstanceNotRegisteredException) OrigErr() error { return nil } -func (s InstanceNotRegisteredException) Error() string { +func (s *InstanceNotRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceNotRegisteredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceNotRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceNotRegisteredException) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceNotRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about an instance in a deployment. @@ -11335,7 +11600,7 @@ type InstanceSummary struct { // * GREEN: The instance is part of the replacement environment. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` - // A timestamp that indicaties when the instance information was last updated. + // A timestamp that indicates when the instance information was last updated. LastUpdatedAt *time.Time `locationName:"lastUpdatedAt" type:"timestamp"` // A list of lifecycle events for this instance. @@ -11424,7 +11689,7 @@ type InstanceTarget struct { // The status an EC2/On-premises deployment's target instance. Status *string `locationName:"status" type:"string" enum:"TargetStatus"` - // The ARN of the target. + // The Amazon Resource Name (ARN) of the target. TargetArn *string `locationName:"targetArn" type:"string"` // The unique ID of a deployment target that has a type of instanceTarget. @@ -11495,8 +11760,8 @@ func (s *InstanceTarget) SetTargetId(v string) *InstanceTarget { // // * The alarm configuration is enabled, but the alarm list is empty. type InvalidAlarmConfigException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11513,17 +11778,17 @@ func (s InvalidAlarmConfigException) GoString() string { func newErrorInvalidAlarmConfigException(v protocol.ResponseMetadata) error { return &InvalidAlarmConfigException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAlarmConfigException) Code() string { +func (s *InvalidAlarmConfigException) Code() string { return "InvalidAlarmConfigException" } // Message returns the exception's message. -func (s InvalidAlarmConfigException) Message() string { +func (s *InvalidAlarmConfigException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11531,28 +11796,28 @@ func (s InvalidAlarmConfigException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAlarmConfigException) OrigErr() error { +func (s *InvalidAlarmConfigException) OrigErr() error { return nil } -func (s InvalidAlarmConfigException) Error() string { +func (s *InvalidAlarmConfigException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAlarmConfigException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAlarmConfigException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAlarmConfigException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAlarmConfigException) RequestID() string { + return s.RespMetadata.RequestID } // The application name was specified in an invalid format. type InvalidApplicationNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11569,17 +11834,17 @@ func (s InvalidApplicationNameException) GoString() string { func newErrorInvalidApplicationNameException(v protocol.ResponseMetadata) error { return &InvalidApplicationNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApplicationNameException) Code() string { +func (s *InvalidApplicationNameException) Code() string { return "InvalidApplicationNameException" } // Message returns the exception's message. -func (s InvalidApplicationNameException) Message() string { +func (s *InvalidApplicationNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11587,28 +11852,28 @@ func (s InvalidApplicationNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApplicationNameException) OrigErr() error { +func (s *InvalidApplicationNameException) OrigErr() error { return nil } -func (s InvalidApplicationNameException) Error() string { +func (s *InvalidApplicationNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApplicationNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApplicationNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApplicationNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApplicationNameException) RequestID() string { + return s.RespMetadata.RequestID } // The specified ARN is not in a valid format. type InvalidArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11625,17 +11890,17 @@ func (s InvalidArnException) GoString() string { func newErrorInvalidArnException(v protocol.ResponseMetadata) error { return &InvalidArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArnException) Code() string { +func (s *InvalidArnException) Code() string { return "InvalidArnException" } // Message returns the exception's message. -func (s InvalidArnException) Message() string { +func (s *InvalidArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11643,30 +11908,30 @@ func (s InvalidArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArnException) OrigErr() error { +func (s *InvalidArnException) OrigErr() error { return nil } -func (s InvalidArnException) Error() string { +func (s *InvalidArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArnException) RequestID() string { + return s.RespMetadata.RequestID } // The automatic rollback configuration was specified in an invalid format. // For example, automatic rollback is enabled, but an invalid triggering event // type or no event types were listed. type InvalidAutoRollbackConfigException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11683,17 +11948,17 @@ func (s InvalidAutoRollbackConfigException) GoString() string { func newErrorInvalidAutoRollbackConfigException(v protocol.ResponseMetadata) error { return &InvalidAutoRollbackConfigException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAutoRollbackConfigException) Code() string { +func (s *InvalidAutoRollbackConfigException) Code() string { return "InvalidAutoRollbackConfigException" } // Message returns the exception's message. -func (s InvalidAutoRollbackConfigException) Message() string { +func (s *InvalidAutoRollbackConfigException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11701,28 +11966,28 @@ func (s InvalidAutoRollbackConfigException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAutoRollbackConfigException) OrigErr() error { +func (s *InvalidAutoRollbackConfigException) OrigErr() error { return nil } -func (s InvalidAutoRollbackConfigException) Error() string { +func (s *InvalidAutoRollbackConfigException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAutoRollbackConfigException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAutoRollbackConfigException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAutoRollbackConfigException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAutoRollbackConfigException) RequestID() string { + return s.RespMetadata.RequestID } // The Auto Scaling group was specified in an invalid format or does not exist. type InvalidAutoScalingGroupException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11739,17 +12004,17 @@ func (s InvalidAutoScalingGroupException) GoString() string { func newErrorInvalidAutoScalingGroupException(v protocol.ResponseMetadata) error { return &InvalidAutoScalingGroupException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAutoScalingGroupException) Code() string { +func (s *InvalidAutoScalingGroupException) Code() string { return "InvalidAutoScalingGroupException" } // Message returns the exception's message. -func (s InvalidAutoScalingGroupException) Message() string { +func (s *InvalidAutoScalingGroupException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11757,30 +12022,30 @@ func (s InvalidAutoScalingGroupException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAutoScalingGroupException) OrigErr() error { +func (s *InvalidAutoScalingGroupException) OrigErr() error { return nil } -func (s InvalidAutoScalingGroupException) Error() string { +func (s *InvalidAutoScalingGroupException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAutoScalingGroupException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAutoScalingGroupException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAutoScalingGroupException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAutoScalingGroupException) RequestID() string { + return s.RespMetadata.RequestID } // The configuration for the blue/green deployment group was provided in an // invalid format. For information about deployment configuration format, see // CreateDeploymentConfig. type InvalidBlueGreenDeploymentConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11797,17 +12062,17 @@ func (s InvalidBlueGreenDeploymentConfigurationException) GoString() string { func newErrorInvalidBlueGreenDeploymentConfigurationException(v protocol.ResponseMetadata) error { return &InvalidBlueGreenDeploymentConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidBlueGreenDeploymentConfigurationException) Code() string { +func (s *InvalidBlueGreenDeploymentConfigurationException) Code() string { return "InvalidBlueGreenDeploymentConfigurationException" } // Message returns the exception's message. -func (s InvalidBlueGreenDeploymentConfigurationException) Message() string { +func (s *InvalidBlueGreenDeploymentConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11815,28 +12080,28 @@ func (s InvalidBlueGreenDeploymentConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidBlueGreenDeploymentConfigurationException) OrigErr() error { +func (s *InvalidBlueGreenDeploymentConfigurationException) OrigErr() error { return nil } -func (s InvalidBlueGreenDeploymentConfigurationException) Error() string { +func (s *InvalidBlueGreenDeploymentConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidBlueGreenDeploymentConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidBlueGreenDeploymentConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidBlueGreenDeploymentConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidBlueGreenDeploymentConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // The bucket name either doesn't exist or was specified in an invalid format. type InvalidBucketNameFilterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11853,17 +12118,17 @@ func (s InvalidBucketNameFilterException) GoString() string { func newErrorInvalidBucketNameFilterException(v protocol.ResponseMetadata) error { return &InvalidBucketNameFilterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidBucketNameFilterException) Code() string { +func (s *InvalidBucketNameFilterException) Code() string { return "InvalidBucketNameFilterException" } // Message returns the exception's message. -func (s InvalidBucketNameFilterException) Message() string { +func (s *InvalidBucketNameFilterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11871,28 +12136,29 @@ func (s InvalidBucketNameFilterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidBucketNameFilterException) OrigErr() error { +func (s *InvalidBucketNameFilterException) OrigErr() error { return nil } -func (s InvalidBucketNameFilterException) Error() string { +func (s *InvalidBucketNameFilterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidBucketNameFilterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidBucketNameFilterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidBucketNameFilterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidBucketNameFilterException) RequestID() string { + return s.RespMetadata.RequestID } -// The computePlatform is invalid. The computePlatform should be Lambda or Server. +// The computePlatform is invalid. The computePlatform should be Lambda, Server, +// or ECS. type InvalidComputePlatformException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11909,17 +12175,17 @@ func (s InvalidComputePlatformException) GoString() string { func newErrorInvalidComputePlatformException(v protocol.ResponseMetadata) error { return &InvalidComputePlatformException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidComputePlatformException) Code() string { +func (s *InvalidComputePlatformException) Code() string { return "InvalidComputePlatformException" } // Message returns the exception's message. -func (s InvalidComputePlatformException) Message() string { +func (s *InvalidComputePlatformException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11927,28 +12193,28 @@ func (s InvalidComputePlatformException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidComputePlatformException) OrigErr() error { +func (s *InvalidComputePlatformException) OrigErr() error { return nil } -func (s InvalidComputePlatformException) Error() string { +func (s *InvalidComputePlatformException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidComputePlatformException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidComputePlatformException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidComputePlatformException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidComputePlatformException) RequestID() string { + return s.RespMetadata.RequestID } // The deployed state filter was specified in an invalid format. type InvalidDeployedStateFilterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11965,17 +12231,17 @@ func (s InvalidDeployedStateFilterException) GoString() string { func newErrorInvalidDeployedStateFilterException(v protocol.ResponseMetadata) error { return &InvalidDeployedStateFilterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeployedStateFilterException) Code() string { +func (s *InvalidDeployedStateFilterException) Code() string { return "InvalidDeployedStateFilterException" } // Message returns the exception's message. -func (s InvalidDeployedStateFilterException) Message() string { +func (s *InvalidDeployedStateFilterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11983,28 +12249,28 @@ func (s InvalidDeployedStateFilterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeployedStateFilterException) OrigErr() error { +func (s *InvalidDeployedStateFilterException) OrigErr() error { return nil } -func (s InvalidDeployedStateFilterException) Error() string { +func (s *InvalidDeployedStateFilterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeployedStateFilterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeployedStateFilterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeployedStateFilterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeployedStateFilterException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment configuration name was specified in an invalid format. type InvalidDeploymentConfigNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12021,17 +12287,17 @@ func (s InvalidDeploymentConfigNameException) GoString() string { func newErrorInvalidDeploymentConfigNameException(v protocol.ResponseMetadata) error { return &InvalidDeploymentConfigNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentConfigNameException) Code() string { +func (s *InvalidDeploymentConfigNameException) Code() string { return "InvalidDeploymentConfigNameException" } // Message returns the exception's message. -func (s InvalidDeploymentConfigNameException) Message() string { +func (s *InvalidDeploymentConfigNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12039,28 +12305,28 @@ func (s InvalidDeploymentConfigNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentConfigNameException) OrigErr() error { +func (s *InvalidDeploymentConfigNameException) OrigErr() error { return nil } -func (s InvalidDeploymentConfigNameException) Error() string { +func (s *InvalidDeploymentConfigNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentConfigNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentConfigNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentConfigNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentConfigNameException) RequestID() string { + return s.RespMetadata.RequestID } // The deployment group name was specified in an invalid format. type InvalidDeploymentGroupNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12077,17 +12343,17 @@ func (s InvalidDeploymentGroupNameException) GoString() string { func newErrorInvalidDeploymentGroupNameException(v protocol.ResponseMetadata) error { return &InvalidDeploymentGroupNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentGroupNameException) Code() string { +func (s *InvalidDeploymentGroupNameException) Code() string { return "InvalidDeploymentGroupNameException" } // Message returns the exception's message. -func (s InvalidDeploymentGroupNameException) Message() string { +func (s *InvalidDeploymentGroupNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12095,28 +12361,28 @@ func (s InvalidDeploymentGroupNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentGroupNameException) OrigErr() error { +func (s *InvalidDeploymentGroupNameException) OrigErr() error { return nil } -func (s InvalidDeploymentGroupNameException) Error() string { +func (s *InvalidDeploymentGroupNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentGroupNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentGroupNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentGroupNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentGroupNameException) RequestID() string { + return s.RespMetadata.RequestID } // At least one of the deployment IDs was specified in an invalid format. type InvalidDeploymentIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12133,17 +12399,17 @@ func (s InvalidDeploymentIdException) GoString() string { func newErrorInvalidDeploymentIdException(v protocol.ResponseMetadata) error { return &InvalidDeploymentIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentIdException) Code() string { +func (s *InvalidDeploymentIdException) Code() string { return "InvalidDeploymentIdException" } // Message returns the exception's message. -func (s InvalidDeploymentIdException) Message() string { +func (s *InvalidDeploymentIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12151,29 +12417,29 @@ func (s InvalidDeploymentIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentIdException) OrigErr() error { +func (s *InvalidDeploymentIdException) OrigErr() error { return nil } -func (s InvalidDeploymentIdException) Error() string { +func (s *InvalidDeploymentIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentIdException) RequestID() string { + return s.RespMetadata.RequestID } // An instance type was specified for an in-place deployment. Instance types // are supported for blue/green deployments only. type InvalidDeploymentInstanceTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12190,17 +12456,17 @@ func (s InvalidDeploymentInstanceTypeException) GoString() string { func newErrorInvalidDeploymentInstanceTypeException(v protocol.ResponseMetadata) error { return &InvalidDeploymentInstanceTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentInstanceTypeException) Code() string { +func (s *InvalidDeploymentInstanceTypeException) Code() string { return "InvalidDeploymentInstanceTypeException" } // Message returns the exception's message. -func (s InvalidDeploymentInstanceTypeException) Message() string { +func (s *InvalidDeploymentInstanceTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12208,28 +12474,28 @@ func (s InvalidDeploymentInstanceTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentInstanceTypeException) OrigErr() error { +func (s *InvalidDeploymentInstanceTypeException) OrigErr() error { return nil } -func (s InvalidDeploymentInstanceTypeException) Error() string { +func (s *InvalidDeploymentInstanceTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentInstanceTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentInstanceTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentInstanceTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentInstanceTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The specified deployment status doesn't exist or cannot be determined. type InvalidDeploymentStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12246,17 +12512,17 @@ func (s InvalidDeploymentStatusException) GoString() string { func newErrorInvalidDeploymentStatusException(v protocol.ResponseMetadata) error { return &InvalidDeploymentStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentStatusException) Code() string { +func (s *InvalidDeploymentStatusException) Code() string { return "InvalidDeploymentStatusException" } // Message returns the exception's message. -func (s InvalidDeploymentStatusException) Message() string { +func (s *InvalidDeploymentStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12264,30 +12530,30 @@ func (s InvalidDeploymentStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentStatusException) OrigErr() error { +func (s *InvalidDeploymentStatusException) OrigErr() error { return nil } -func (s InvalidDeploymentStatusException) Error() string { +func (s *InvalidDeploymentStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentStatusException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid deployment style was specified. Valid deployment types include // "IN_PLACE" and "BLUE_GREEN." Valid deployment options include "WITH_TRAFFIC_CONTROL" // and "WITHOUT_TRAFFIC_CONTROL." type InvalidDeploymentStyleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12304,17 +12570,17 @@ func (s InvalidDeploymentStyleException) GoString() string { func newErrorInvalidDeploymentStyleException(v protocol.ResponseMetadata) error { return &InvalidDeploymentStyleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentStyleException) Code() string { +func (s *InvalidDeploymentStyleException) Code() string { return "InvalidDeploymentStyleException" } // Message returns the exception's message. -func (s InvalidDeploymentStyleException) Message() string { +func (s *InvalidDeploymentStyleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12322,28 +12588,28 @@ func (s InvalidDeploymentStyleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentStyleException) OrigErr() error { +func (s *InvalidDeploymentStyleException) OrigErr() error { return nil } -func (s InvalidDeploymentStyleException) Error() string { +func (s *InvalidDeploymentStyleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentStyleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentStyleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentStyleException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentStyleException) RequestID() string { + return s.RespMetadata.RequestID } // The target ID provided was not valid. type InvalidDeploymentTargetIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12360,17 +12626,17 @@ func (s InvalidDeploymentTargetIdException) GoString() string { func newErrorInvalidDeploymentTargetIdException(v protocol.ResponseMetadata) error { return &InvalidDeploymentTargetIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentTargetIdException) Code() string { +func (s *InvalidDeploymentTargetIdException) Code() string { return "InvalidDeploymentTargetIdException" } // Message returns the exception's message. -func (s InvalidDeploymentTargetIdException) Message() string { +func (s *InvalidDeploymentTargetIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12378,28 +12644,28 @@ func (s InvalidDeploymentTargetIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentTargetIdException) OrigErr() error { +func (s *InvalidDeploymentTargetIdException) OrigErr() error { return nil } -func (s InvalidDeploymentTargetIdException) Error() string { +func (s *InvalidDeploymentTargetIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentTargetIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentTargetIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentTargetIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentTargetIdException) RequestID() string { + return s.RespMetadata.RequestID } // The wait type is invalid. type InvalidDeploymentWaitTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12416,17 +12682,17 @@ func (s InvalidDeploymentWaitTypeException) GoString() string { func newErrorInvalidDeploymentWaitTypeException(v protocol.ResponseMetadata) error { return &InvalidDeploymentWaitTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeploymentWaitTypeException) Code() string { +func (s *InvalidDeploymentWaitTypeException) Code() string { return "InvalidDeploymentWaitTypeException" } // Message returns the exception's message. -func (s InvalidDeploymentWaitTypeException) Message() string { +func (s *InvalidDeploymentWaitTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12434,29 +12700,29 @@ func (s InvalidDeploymentWaitTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeploymentWaitTypeException) OrigErr() error { +func (s *InvalidDeploymentWaitTypeException) OrigErr() error { return nil } -func (s InvalidDeploymentWaitTypeException) Error() string { +func (s *InvalidDeploymentWaitTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeploymentWaitTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeploymentWaitTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeploymentWaitTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeploymentWaitTypeException) RequestID() string { + return s.RespMetadata.RequestID } // A call was submitted that specified both Ec2TagFilters and Ec2TagSet, but // only one of these data types can be used in a single call. type InvalidEC2TagCombinationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12473,17 +12739,17 @@ func (s InvalidEC2TagCombinationException) GoString() string { func newErrorInvalidEC2TagCombinationException(v protocol.ResponseMetadata) error { return &InvalidEC2TagCombinationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEC2TagCombinationException) Code() string { +func (s *InvalidEC2TagCombinationException) Code() string { return "InvalidEC2TagCombinationException" } // Message returns the exception's message. -func (s InvalidEC2TagCombinationException) Message() string { +func (s *InvalidEC2TagCombinationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12491,28 +12757,28 @@ func (s InvalidEC2TagCombinationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEC2TagCombinationException) OrigErr() error { +func (s *InvalidEC2TagCombinationException) OrigErr() error { return nil } -func (s InvalidEC2TagCombinationException) Error() string { +func (s *InvalidEC2TagCombinationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEC2TagCombinationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEC2TagCombinationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEC2TagCombinationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEC2TagCombinationException) RequestID() string { + return s.RespMetadata.RequestID } // The tag was specified in an invalid format. type InvalidEC2TagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12529,17 +12795,17 @@ func (s InvalidEC2TagException) GoString() string { func newErrorInvalidEC2TagException(v protocol.ResponseMetadata) error { return &InvalidEC2TagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEC2TagException) Code() string { +func (s *InvalidEC2TagException) Code() string { return "InvalidEC2TagException" } // Message returns the exception's message. -func (s InvalidEC2TagException) Message() string { +func (s *InvalidEC2TagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12547,28 +12813,28 @@ func (s InvalidEC2TagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEC2TagException) OrigErr() error { +func (s *InvalidEC2TagException) OrigErr() error { return nil } -func (s InvalidEC2TagException) Error() string { +func (s *InvalidEC2TagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEC2TagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEC2TagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEC2TagException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEC2TagException) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon ECS service identifier is not valid. type InvalidECSServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12585,17 +12851,73 @@ func (s InvalidECSServiceException) GoString() string { func newErrorInvalidECSServiceException(v protocol.ResponseMetadata) error { return &InvalidECSServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidECSServiceException) Code() string { +func (s *InvalidECSServiceException) Code() string { return "InvalidECSServiceException" } // Message returns the exception's message. -func (s InvalidECSServiceException) Message() string { +func (s *InvalidECSServiceException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidECSServiceException) OrigErr() error { + return nil +} + +func (s *InvalidECSServiceException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidECSServiceException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidECSServiceException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The external ID was specified in an invalid format. +type InvalidExternalIdException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidExternalIdException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidExternalIdException) GoString() string { + return s.String() +} + +func newErrorInvalidExternalIdException(v protocol.ResponseMetadata) error { + return &InvalidExternalIdException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidExternalIdException) Code() string { + return "InvalidExternalIdException" +} + +// Message returns the exception's message. +func (s *InvalidExternalIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12603,22 +12925,22 @@ func (s InvalidECSServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidECSServiceException) OrigErr() error { +func (s *InvalidExternalIdException) OrigErr() error { return nil } -func (s InvalidECSServiceException) Error() string { +func (s *InvalidExternalIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidECSServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidExternalIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidECSServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidExternalIdException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid fileExistsBehavior option was specified to determine how AWS CodeDeploy @@ -12626,8 +12948,8 @@ func (s InvalidECSServiceException) RequestID() string { // but weren't part of the previous successful deployment. Valid values include // "DISALLOW," "OVERWRITE," and "RETAIN." type InvalidFileExistsBehaviorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12644,17 +12966,17 @@ func (s InvalidFileExistsBehaviorException) GoString() string { func newErrorInvalidFileExistsBehaviorException(v protocol.ResponseMetadata) error { return &InvalidFileExistsBehaviorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFileExistsBehaviorException) Code() string { +func (s *InvalidFileExistsBehaviorException) Code() string { return "InvalidFileExistsBehaviorException" } // Message returns the exception's message. -func (s InvalidFileExistsBehaviorException) Message() string { +func (s *InvalidFileExistsBehaviorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12662,28 +12984,28 @@ func (s InvalidFileExistsBehaviorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFileExistsBehaviorException) OrigErr() error { +func (s *InvalidFileExistsBehaviorException) OrigErr() error { return nil } -func (s InvalidFileExistsBehaviorException) Error() string { +func (s *InvalidFileExistsBehaviorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFileExistsBehaviorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFileExistsBehaviorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFileExistsBehaviorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFileExistsBehaviorException) RequestID() string { + return s.RespMetadata.RequestID } // The GitHub token is not valid. type InvalidGitHubAccountTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12700,17 +13022,17 @@ func (s InvalidGitHubAccountTokenException) GoString() string { func newErrorInvalidGitHubAccountTokenException(v protocol.ResponseMetadata) error { return &InvalidGitHubAccountTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGitHubAccountTokenException) Code() string { +func (s *InvalidGitHubAccountTokenException) Code() string { return "InvalidGitHubAccountTokenException" } // Message returns the exception's message. -func (s InvalidGitHubAccountTokenException) Message() string { +func (s *InvalidGitHubAccountTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12718,28 +13040,28 @@ func (s InvalidGitHubAccountTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGitHubAccountTokenException) OrigErr() error { +func (s *InvalidGitHubAccountTokenException) OrigErr() error { return nil } -func (s InvalidGitHubAccountTokenException) Error() string { +func (s *InvalidGitHubAccountTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGitHubAccountTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGitHubAccountTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGitHubAccountTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGitHubAccountTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The format of the specified GitHub account connection name is invalid. type InvalidGitHubAccountTokenNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12756,17 +13078,17 @@ func (s InvalidGitHubAccountTokenNameException) GoString() string { func newErrorInvalidGitHubAccountTokenNameException(v protocol.ResponseMetadata) error { return &InvalidGitHubAccountTokenNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGitHubAccountTokenNameException) Code() string { +func (s *InvalidGitHubAccountTokenNameException) Code() string { return "InvalidGitHubAccountTokenNameException" } // Message returns the exception's message. -func (s InvalidGitHubAccountTokenNameException) Message() string { +func (s *InvalidGitHubAccountTokenNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12774,28 +13096,28 @@ func (s InvalidGitHubAccountTokenNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGitHubAccountTokenNameException) OrigErr() error { +func (s *InvalidGitHubAccountTokenNameException) OrigErr() error { return nil } -func (s InvalidGitHubAccountTokenNameException) Error() string { +func (s *InvalidGitHubAccountTokenNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGitHubAccountTokenNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGitHubAccountTokenNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGitHubAccountTokenNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGitHubAccountTokenNameException) RequestID() string { + return s.RespMetadata.RequestID } // The IAM session ARN was specified in an invalid format. type InvalidIamSessionArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12812,17 +13134,17 @@ func (s InvalidIamSessionArnException) GoString() string { func newErrorInvalidIamSessionArnException(v protocol.ResponseMetadata) error { return &InvalidIamSessionArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidIamSessionArnException) Code() string { +func (s *InvalidIamSessionArnException) Code() string { return "InvalidIamSessionArnException" } // Message returns the exception's message. -func (s InvalidIamSessionArnException) Message() string { +func (s *InvalidIamSessionArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12830,28 +13152,28 @@ func (s InvalidIamSessionArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidIamSessionArnException) OrigErr() error { +func (s *InvalidIamSessionArnException) OrigErr() error { return nil } -func (s InvalidIamSessionArnException) Error() string { +func (s *InvalidIamSessionArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidIamSessionArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidIamSessionArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidIamSessionArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidIamSessionArnException) RequestID() string { + return s.RespMetadata.RequestID } // The IAM user ARN was specified in an invalid format. type InvalidIamUserArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12868,17 +13190,17 @@ func (s InvalidIamUserArnException) GoString() string { func newErrorInvalidIamUserArnException(v protocol.ResponseMetadata) error { return &InvalidIamUserArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidIamUserArnException) Code() string { +func (s *InvalidIamUserArnException) Code() string { return "InvalidIamUserArnException" } // Message returns the exception's message. -func (s InvalidIamUserArnException) Message() string { +func (s *InvalidIamUserArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12886,29 +13208,29 @@ func (s InvalidIamUserArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidIamUserArnException) OrigErr() error { +func (s *InvalidIamUserArnException) OrigErr() error { return nil } -func (s InvalidIamUserArnException) Error() string { +func (s *InvalidIamUserArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidIamUserArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidIamUserArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidIamUserArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidIamUserArnException) RequestID() string { + return s.RespMetadata.RequestID } // The IgnoreApplicationStopFailures value is invalid. For AWS Lambda deployments, // false is expected. For EC2/On-premises deployments, true or false is expected. type InvalidIgnoreApplicationStopFailuresValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12925,17 +13247,17 @@ func (s InvalidIgnoreApplicationStopFailuresValueException) GoString() string { func newErrorInvalidIgnoreApplicationStopFailuresValueException(v protocol.ResponseMetadata) error { return &InvalidIgnoreApplicationStopFailuresValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidIgnoreApplicationStopFailuresValueException) Code() string { +func (s *InvalidIgnoreApplicationStopFailuresValueException) Code() string { return "InvalidIgnoreApplicationStopFailuresValueException" } // Message returns the exception's message. -func (s InvalidIgnoreApplicationStopFailuresValueException) Message() string { +func (s *InvalidIgnoreApplicationStopFailuresValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12943,28 +13265,28 @@ func (s InvalidIgnoreApplicationStopFailuresValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidIgnoreApplicationStopFailuresValueException) OrigErr() error { +func (s *InvalidIgnoreApplicationStopFailuresValueException) OrigErr() error { return nil } -func (s InvalidIgnoreApplicationStopFailuresValueException) Error() string { +func (s *InvalidIgnoreApplicationStopFailuresValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidIgnoreApplicationStopFailuresValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidIgnoreApplicationStopFailuresValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidIgnoreApplicationStopFailuresValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidIgnoreApplicationStopFailuresValueException) RequestID() string { + return s.RespMetadata.RequestID } // The input was specified in an invalid format. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12981,17 +13303,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12999,28 +13321,28 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // The on-premises instance name was specified in an invalid format. type InvalidInstanceNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13037,17 +13359,17 @@ func (s InvalidInstanceNameException) GoString() string { func newErrorInvalidInstanceNameException(v protocol.ResponseMetadata) error { return &InvalidInstanceNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInstanceNameException) Code() string { +func (s *InvalidInstanceNameException) Code() string { return "InvalidInstanceNameException" } // Message returns the exception's message. -func (s InvalidInstanceNameException) Message() string { +func (s *InvalidInstanceNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13055,28 +13377,28 @@ func (s InvalidInstanceNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInstanceNameException) OrigErr() error { +func (s *InvalidInstanceNameException) OrigErr() error { return nil } -func (s InvalidInstanceNameException) Error() string { +func (s *InvalidInstanceNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInstanceNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInstanceNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInstanceNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInstanceNameException) RequestID() string { + return s.RespMetadata.RequestID } // The specified instance status does not exist. type InvalidInstanceStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13093,17 +13415,17 @@ func (s InvalidInstanceStatusException) GoString() string { func newErrorInvalidInstanceStatusException(v protocol.ResponseMetadata) error { return &InvalidInstanceStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInstanceStatusException) Code() string { +func (s *InvalidInstanceStatusException) Code() string { return "InvalidInstanceStatusException" } // Message returns the exception's message. -func (s InvalidInstanceStatusException) Message() string { +func (s *InvalidInstanceStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13111,30 +13433,30 @@ func (s InvalidInstanceStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInstanceStatusException) OrigErr() error { +func (s *InvalidInstanceStatusException) OrigErr() error { return nil } -func (s InvalidInstanceStatusException) Error() string { +func (s *InvalidInstanceStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInstanceStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInstanceStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInstanceStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInstanceStatusException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid instance type was specified for instances in a blue/green deployment. // Valid values include "Blue" for an original environment and "Green" for a // replacement environment. type InvalidInstanceTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13151,17 +13473,17 @@ func (s InvalidInstanceTypeException) GoString() string { func newErrorInvalidInstanceTypeException(v protocol.ResponseMetadata) error { return &InvalidInstanceTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInstanceTypeException) Code() string { +func (s *InvalidInstanceTypeException) Code() string { return "InvalidInstanceTypeException" } // Message returns the exception's message. -func (s InvalidInstanceTypeException) Message() string { +func (s *InvalidInstanceTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13169,28 +13491,28 @@ func (s InvalidInstanceTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInstanceTypeException) OrigErr() error { +func (s *InvalidInstanceTypeException) OrigErr() error { return nil } -func (s InvalidInstanceTypeException) Error() string { +func (s *InvalidInstanceTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInstanceTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInstanceTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInstanceTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInstanceTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The specified key prefix filter was specified in an invalid format. type InvalidKeyPrefixFilterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13207,17 +13529,17 @@ func (s InvalidKeyPrefixFilterException) GoString() string { func newErrorInvalidKeyPrefixFilterException(v protocol.ResponseMetadata) error { return &InvalidKeyPrefixFilterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidKeyPrefixFilterException) Code() string { +func (s *InvalidKeyPrefixFilterException) Code() string { return "InvalidKeyPrefixFilterException" } // Message returns the exception's message. -func (s InvalidKeyPrefixFilterException) Message() string { +func (s *InvalidKeyPrefixFilterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13225,29 +13547,29 @@ func (s InvalidKeyPrefixFilterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidKeyPrefixFilterException) OrigErr() error { +func (s *InvalidKeyPrefixFilterException) OrigErr() error { return nil } -func (s InvalidKeyPrefixFilterException) Error() string { +func (s *InvalidKeyPrefixFilterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidKeyPrefixFilterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidKeyPrefixFilterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidKeyPrefixFilterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidKeyPrefixFilterException) RequestID() string { + return s.RespMetadata.RequestID } // A lifecycle event hook is invalid. Review the hooks section in your AppSpec // file to ensure the lifecycle events and hooks functions are valid. type InvalidLifecycleEventHookExecutionIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13264,17 +13586,17 @@ func (s InvalidLifecycleEventHookExecutionIdException) GoString() string { func newErrorInvalidLifecycleEventHookExecutionIdException(v protocol.ResponseMetadata) error { return &InvalidLifecycleEventHookExecutionIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLifecycleEventHookExecutionIdException) Code() string { +func (s *InvalidLifecycleEventHookExecutionIdException) Code() string { return "InvalidLifecycleEventHookExecutionIdException" } // Message returns the exception's message. -func (s InvalidLifecycleEventHookExecutionIdException) Message() string { +func (s *InvalidLifecycleEventHookExecutionIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13282,29 +13604,29 @@ func (s InvalidLifecycleEventHookExecutionIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLifecycleEventHookExecutionIdException) OrigErr() error { +func (s *InvalidLifecycleEventHookExecutionIdException) OrigErr() error { return nil } -func (s InvalidLifecycleEventHookExecutionIdException) Error() string { +func (s *InvalidLifecycleEventHookExecutionIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLifecycleEventHookExecutionIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLifecycleEventHookExecutionIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLifecycleEventHookExecutionIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLifecycleEventHookExecutionIdException) RequestID() string { + return s.RespMetadata.RequestID } // The result of a Lambda validation function that verifies a lifecycle event // is invalid. It should return Succeeded or Failed. type InvalidLifecycleEventHookExecutionStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13321,17 +13643,17 @@ func (s InvalidLifecycleEventHookExecutionStatusException) GoString() string { func newErrorInvalidLifecycleEventHookExecutionStatusException(v protocol.ResponseMetadata) error { return &InvalidLifecycleEventHookExecutionStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLifecycleEventHookExecutionStatusException) Code() string { +func (s *InvalidLifecycleEventHookExecutionStatusException) Code() string { return "InvalidLifecycleEventHookExecutionStatusException" } // Message returns the exception's message. -func (s InvalidLifecycleEventHookExecutionStatusException) Message() string { +func (s *InvalidLifecycleEventHookExecutionStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13339,28 +13661,28 @@ func (s InvalidLifecycleEventHookExecutionStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLifecycleEventHookExecutionStatusException) OrigErr() error { +func (s *InvalidLifecycleEventHookExecutionStatusException) OrigErr() error { return nil } -func (s InvalidLifecycleEventHookExecutionStatusException) Error() string { +func (s *InvalidLifecycleEventHookExecutionStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLifecycleEventHookExecutionStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLifecycleEventHookExecutionStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLifecycleEventHookExecutionStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLifecycleEventHookExecutionStatusException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid load balancer name, or no load balancer name, was specified. type InvalidLoadBalancerInfoException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13377,17 +13699,17 @@ func (s InvalidLoadBalancerInfoException) GoString() string { func newErrorInvalidLoadBalancerInfoException(v protocol.ResponseMetadata) error { return &InvalidLoadBalancerInfoException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLoadBalancerInfoException) Code() string { +func (s *InvalidLoadBalancerInfoException) Code() string { return "InvalidLoadBalancerInfoException" } // Message returns the exception's message. -func (s InvalidLoadBalancerInfoException) Message() string { +func (s *InvalidLoadBalancerInfoException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13395,28 +13717,28 @@ func (s InvalidLoadBalancerInfoException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLoadBalancerInfoException) OrigErr() error { +func (s *InvalidLoadBalancerInfoException) OrigErr() error { return nil } -func (s InvalidLoadBalancerInfoException) Error() string { +func (s *InvalidLoadBalancerInfoException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLoadBalancerInfoException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLoadBalancerInfoException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLoadBalancerInfoException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLoadBalancerInfoException) RequestID() string { + return s.RespMetadata.RequestID } // The minimum healthy instance value was specified in an invalid format. type InvalidMinimumHealthyHostValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13433,17 +13755,17 @@ func (s InvalidMinimumHealthyHostValueException) GoString() string { func newErrorInvalidMinimumHealthyHostValueException(v protocol.ResponseMetadata) error { return &InvalidMinimumHealthyHostValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMinimumHealthyHostValueException) Code() string { +func (s *InvalidMinimumHealthyHostValueException) Code() string { return "InvalidMinimumHealthyHostValueException" } // Message returns the exception's message. -func (s InvalidMinimumHealthyHostValueException) Message() string { +func (s *InvalidMinimumHealthyHostValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13451,28 +13773,28 @@ func (s InvalidMinimumHealthyHostValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMinimumHealthyHostValueException) OrigErr() error { +func (s *InvalidMinimumHealthyHostValueException) OrigErr() error { return nil } -func (s InvalidMinimumHealthyHostValueException) Error() string { +func (s *InvalidMinimumHealthyHostValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMinimumHealthyHostValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMinimumHealthyHostValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMinimumHealthyHostValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMinimumHealthyHostValueException) RequestID() string { + return s.RespMetadata.RequestID } // The next token was specified in an invalid format. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13489,17 +13811,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13507,29 +13829,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // A call was submitted that specified both OnPremisesTagFilters and OnPremisesTagSet, // but only one of these data types can be used in a single call. type InvalidOnPremisesTagCombinationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13546,17 +13868,17 @@ func (s InvalidOnPremisesTagCombinationException) GoString() string { func newErrorInvalidOnPremisesTagCombinationException(v protocol.ResponseMetadata) error { return &InvalidOnPremisesTagCombinationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOnPremisesTagCombinationException) Code() string { +func (s *InvalidOnPremisesTagCombinationException) Code() string { return "InvalidOnPremisesTagCombinationException" } // Message returns the exception's message. -func (s InvalidOnPremisesTagCombinationException) Message() string { +func (s *InvalidOnPremisesTagCombinationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13564,28 +13886,28 @@ func (s InvalidOnPremisesTagCombinationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOnPremisesTagCombinationException) OrigErr() error { +func (s *InvalidOnPremisesTagCombinationException) OrigErr() error { return nil } -func (s InvalidOnPremisesTagCombinationException) Error() string { +func (s *InvalidOnPremisesTagCombinationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOnPremisesTagCombinationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOnPremisesTagCombinationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOnPremisesTagCombinationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOnPremisesTagCombinationException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid operation was detected. type InvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13602,17 +13924,17 @@ func (s InvalidOperationException) GoString() string { func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { return &InvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOperationException) Code() string { +func (s *InvalidOperationException) Code() string { return "InvalidOperationException" } // Message returns the exception's message. -func (s InvalidOperationException) Message() string { +func (s *InvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13620,28 +13942,28 @@ func (s InvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOperationException) OrigErr() error { +func (s *InvalidOperationException) OrigErr() error { return nil } -func (s InvalidOperationException) Error() string { +func (s *InvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // The registration status was specified in an invalid format. type InvalidRegistrationStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13658,17 +13980,17 @@ func (s InvalidRegistrationStatusException) GoString() string { func newErrorInvalidRegistrationStatusException(v protocol.ResponseMetadata) error { return &InvalidRegistrationStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRegistrationStatusException) Code() string { +func (s *InvalidRegistrationStatusException) Code() string { return "InvalidRegistrationStatusException" } // Message returns the exception's message. -func (s InvalidRegistrationStatusException) Message() string { +func (s *InvalidRegistrationStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13676,28 +13998,28 @@ func (s InvalidRegistrationStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRegistrationStatusException) OrigErr() error { +func (s *InvalidRegistrationStatusException) OrigErr() error { return nil } -func (s InvalidRegistrationStatusException) Error() string { +func (s *InvalidRegistrationStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRegistrationStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRegistrationStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRegistrationStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRegistrationStatusException) RequestID() string { + return s.RespMetadata.RequestID } // The revision was specified in an invalid format. type InvalidRevisionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13714,17 +14036,17 @@ func (s InvalidRevisionException) GoString() string { func newErrorInvalidRevisionException(v protocol.ResponseMetadata) error { return &InvalidRevisionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRevisionException) Code() string { +func (s *InvalidRevisionException) Code() string { return "InvalidRevisionException" } // Message returns the exception's message. -func (s InvalidRevisionException) Message() string { +func (s *InvalidRevisionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13732,30 +14054,30 @@ func (s InvalidRevisionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRevisionException) OrigErr() error { +func (s *InvalidRevisionException) OrigErr() error { return nil } -func (s InvalidRevisionException) Error() string { +func (s *InvalidRevisionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRevisionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRevisionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRevisionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRevisionException) RequestID() string { + return s.RespMetadata.RequestID } // The service role ARN was specified in an invalid format. Or, if an Auto Scaling // group was specified, the specified service role does not grant the appropriate // permissions to Amazon EC2 Auto Scaling. type InvalidRoleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13772,17 +14094,17 @@ func (s InvalidRoleException) GoString() string { func newErrorInvalidRoleException(v protocol.ResponseMetadata) error { return &InvalidRoleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRoleException) Code() string { +func (s *InvalidRoleException) Code() string { return "InvalidRoleException" } // Message returns the exception's message. -func (s InvalidRoleException) Message() string { +func (s *InvalidRoleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13790,29 +14112,29 @@ func (s InvalidRoleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRoleException) OrigErr() error { +func (s *InvalidRoleException) OrigErr() error { return nil } -func (s InvalidRoleException) Error() string { +func (s *InvalidRoleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRoleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRoleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRoleException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRoleException) RequestID() string { + return s.RespMetadata.RequestID } // The column name to sort by is either not present or was specified in an invalid // format. type InvalidSortByException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13829,17 +14151,17 @@ func (s InvalidSortByException) GoString() string { func newErrorInvalidSortByException(v protocol.ResponseMetadata) error { return &InvalidSortByException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSortByException) Code() string { +func (s *InvalidSortByException) Code() string { return "InvalidSortByException" } // Message returns the exception's message. -func (s InvalidSortByException) Message() string { +func (s *InvalidSortByException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13847,28 +14169,28 @@ func (s InvalidSortByException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSortByException) OrigErr() error { +func (s *InvalidSortByException) OrigErr() error { return nil } -func (s InvalidSortByException) Error() string { +func (s *InvalidSortByException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSortByException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSortByException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSortByException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSortByException) RequestID() string { + return s.RespMetadata.RequestID } // The sort order was specified in an invalid format. type InvalidSortOrderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13885,17 +14207,17 @@ func (s InvalidSortOrderException) GoString() string { func newErrorInvalidSortOrderException(v protocol.ResponseMetadata) error { return &InvalidSortOrderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSortOrderException) Code() string { +func (s *InvalidSortOrderException) Code() string { return "InvalidSortOrderException" } // Message returns the exception's message. -func (s InvalidSortOrderException) Message() string { +func (s *InvalidSortOrderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13903,28 +14225,28 @@ func (s InvalidSortOrderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSortOrderException) OrigErr() error { +func (s *InvalidSortOrderException) OrigErr() error { return nil } -func (s InvalidSortOrderException) Error() string { +func (s *InvalidSortOrderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSortOrderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSortOrderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSortOrderException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSortOrderException) RequestID() string { + return s.RespMetadata.RequestID } // The tag was specified in an invalid format. type InvalidTagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13941,17 +14263,17 @@ func (s InvalidTagException) GoString() string { func newErrorInvalidTagException(v protocol.ResponseMetadata) error { return &InvalidTagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagException) Code() string { +func (s *InvalidTagException) Code() string { return "InvalidTagException" } // Message returns the exception's message. -func (s InvalidTagException) Message() string { +func (s *InvalidTagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13959,28 +14281,28 @@ func (s InvalidTagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagException) OrigErr() error { +func (s *InvalidTagException) OrigErr() error { return nil } -func (s InvalidTagException) Error() string { +func (s *InvalidTagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagException) RequestID() string { + return s.RespMetadata.RequestID } // The tag filter was specified in an invalid format. type InvalidTagFilterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13997,17 +14319,17 @@ func (s InvalidTagFilterException) GoString() string { func newErrorInvalidTagFilterException(v protocol.ResponseMetadata) error { return &InvalidTagFilterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagFilterException) Code() string { +func (s *InvalidTagFilterException) Code() string { return "InvalidTagFilterException" } // Message returns the exception's message. -func (s InvalidTagFilterException) Message() string { +func (s *InvalidTagFilterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14015,28 +14337,28 @@ func (s InvalidTagFilterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagFilterException) OrigErr() error { +func (s *InvalidTagFilterException) OrigErr() error { return nil } -func (s InvalidTagFilterException) Error() string { +func (s *InvalidTagFilterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagFilterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagFilterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagFilterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagFilterException) RequestID() string { + return s.RespMetadata.RequestID } // The specified tags are not valid. type InvalidTagsToAddException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14053,17 +14375,17 @@ func (s InvalidTagsToAddException) GoString() string { func newErrorInvalidTagsToAddException(v protocol.ResponseMetadata) error { return &InvalidTagsToAddException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagsToAddException) Code() string { +func (s *InvalidTagsToAddException) Code() string { return "InvalidTagsToAddException" } // Message returns the exception's message. -func (s InvalidTagsToAddException) Message() string { +func (s *InvalidTagsToAddException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14071,28 +14393,28 @@ func (s InvalidTagsToAddException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagsToAddException) OrigErr() error { +func (s *InvalidTagsToAddException) OrigErr() error { return nil } -func (s InvalidTagsToAddException) Error() string { +func (s *InvalidTagsToAddException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagsToAddException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagsToAddException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagsToAddException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagsToAddException) RequestID() string { + return s.RespMetadata.RequestID } // The target filter name is invalid. type InvalidTargetFilterNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14109,17 +14431,17 @@ func (s InvalidTargetFilterNameException) GoString() string { func newErrorInvalidTargetFilterNameException(v protocol.ResponseMetadata) error { return &InvalidTargetFilterNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetFilterNameException) Code() string { +func (s *InvalidTargetFilterNameException) Code() string { return "InvalidTargetFilterNameException" } // Message returns the exception's message. -func (s InvalidTargetFilterNameException) Message() string { +func (s *InvalidTargetFilterNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14127,28 +14449,28 @@ func (s InvalidTargetFilterNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetFilterNameException) OrigErr() error { +func (s *InvalidTargetFilterNameException) OrigErr() error { return nil } -func (s InvalidTargetFilterNameException) Error() string { +func (s *InvalidTargetFilterNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetFilterNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetFilterNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetFilterNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetFilterNameException) RequestID() string { + return s.RespMetadata.RequestID } // A target group pair associated with this deployment is not valid. type InvalidTargetGroupPairException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14165,17 +14487,17 @@ func (s InvalidTargetGroupPairException) GoString() string { func newErrorInvalidTargetGroupPairException(v protocol.ResponseMetadata) error { return &InvalidTargetGroupPairException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetGroupPairException) Code() string { +func (s *InvalidTargetGroupPairException) Code() string { return "InvalidTargetGroupPairException" } // Message returns the exception's message. -func (s InvalidTargetGroupPairException) Message() string { +func (s *InvalidTargetGroupPairException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14183,22 +14505,22 @@ func (s InvalidTargetGroupPairException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetGroupPairException) OrigErr() error { +func (s *InvalidTargetGroupPairException) OrigErr() error { return nil } -func (s InvalidTargetGroupPairException) Error() string { +func (s *InvalidTargetGroupPairException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetGroupPairException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetGroupPairException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetGroupPairException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetGroupPairException) RequestID() string { + return s.RespMetadata.RequestID } // The target instance configuration is invalid. Possible causes include: @@ -14212,8 +14534,8 @@ func (s InvalidTargetGroupPairException) RequestID() string { // // * A specified tag is not currently applied to any instances. type InvalidTargetInstancesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14230,17 +14552,17 @@ func (s InvalidTargetInstancesException) GoString() string { func newErrorInvalidTargetInstancesException(v protocol.ResponseMetadata) error { return &InvalidTargetInstancesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetInstancesException) Code() string { +func (s *InvalidTargetInstancesException) Code() string { return "InvalidTargetInstancesException" } // Message returns the exception's message. -func (s InvalidTargetInstancesException) Message() string { +func (s *InvalidTargetInstancesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14248,28 +14570,28 @@ func (s InvalidTargetInstancesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetInstancesException) OrigErr() error { +func (s *InvalidTargetInstancesException) OrigErr() error { return nil } -func (s InvalidTargetInstancesException) Error() string { +func (s *InvalidTargetInstancesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetInstancesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetInstancesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetInstancesException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetInstancesException) RequestID() string { + return s.RespMetadata.RequestID } // The specified time range was specified in an invalid format. type InvalidTimeRangeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14286,17 +14608,17 @@ func (s InvalidTimeRangeException) GoString() string { func newErrorInvalidTimeRangeException(v protocol.ResponseMetadata) error { return &InvalidTimeRangeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTimeRangeException) Code() string { +func (s *InvalidTimeRangeException) Code() string { return "InvalidTimeRangeException" } // Message returns the exception's message. -func (s InvalidTimeRangeException) Message() string { +func (s *InvalidTimeRangeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14304,29 +14626,29 @@ func (s InvalidTimeRangeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTimeRangeException) OrigErr() error { +func (s *InvalidTimeRangeException) OrigErr() error { return nil } -func (s InvalidTimeRangeException) Error() string { +func (s *InvalidTimeRangeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTimeRangeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTimeRangeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTimeRangeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTimeRangeException) RequestID() string { + return s.RespMetadata.RequestID } // The configuration that specifies how traffic is routed during a deployment // is invalid. type InvalidTrafficRoutingConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14343,17 +14665,17 @@ func (s InvalidTrafficRoutingConfigurationException) GoString() string { func newErrorInvalidTrafficRoutingConfigurationException(v protocol.ResponseMetadata) error { return &InvalidTrafficRoutingConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTrafficRoutingConfigurationException) Code() string { +func (s *InvalidTrafficRoutingConfigurationException) Code() string { return "InvalidTrafficRoutingConfigurationException" } // Message returns the exception's message. -func (s InvalidTrafficRoutingConfigurationException) Message() string { +func (s *InvalidTrafficRoutingConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14361,28 +14683,28 @@ func (s InvalidTrafficRoutingConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTrafficRoutingConfigurationException) OrigErr() error { +func (s *InvalidTrafficRoutingConfigurationException) OrigErr() error { return nil } -func (s InvalidTrafficRoutingConfigurationException) Error() string { +func (s *InvalidTrafficRoutingConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTrafficRoutingConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTrafficRoutingConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTrafficRoutingConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTrafficRoutingConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // The trigger was specified in an invalid format. type InvalidTriggerConfigException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14399,17 +14721,17 @@ func (s InvalidTriggerConfigException) GoString() string { func newErrorInvalidTriggerConfigException(v protocol.ResponseMetadata) error { return &InvalidTriggerConfigException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTriggerConfigException) Code() string { +func (s *InvalidTriggerConfigException) Code() string { return "InvalidTriggerConfigException" } // Message returns the exception's message. -func (s InvalidTriggerConfigException) Message() string { +func (s *InvalidTriggerConfigException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14417,29 +14739,29 @@ func (s InvalidTriggerConfigException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTriggerConfigException) OrigErr() error { +func (s *InvalidTriggerConfigException) OrigErr() error { return nil } -func (s InvalidTriggerConfigException) Error() string { +func (s *InvalidTriggerConfigException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTriggerConfigException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTriggerConfigException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTriggerConfigException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTriggerConfigException) RequestID() string { + return s.RespMetadata.RequestID } // The UpdateOutdatedInstancesOnly value is invalid. For AWS Lambda deployments, // false is expected. For EC2/On-premises deployments, true or false is expected. type InvalidUpdateOutdatedInstancesOnlyValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14456,17 +14778,17 @@ func (s InvalidUpdateOutdatedInstancesOnlyValueException) GoString() string { func newErrorInvalidUpdateOutdatedInstancesOnlyValueException(v protocol.ResponseMetadata) error { return &InvalidUpdateOutdatedInstancesOnlyValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidUpdateOutdatedInstancesOnlyValueException) Code() string { +func (s *InvalidUpdateOutdatedInstancesOnlyValueException) Code() string { return "InvalidUpdateOutdatedInstancesOnlyValueException" } // Message returns the exception's message. -func (s InvalidUpdateOutdatedInstancesOnlyValueException) Message() string { +func (s *InvalidUpdateOutdatedInstancesOnlyValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14474,22 +14796,22 @@ func (s InvalidUpdateOutdatedInstancesOnlyValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidUpdateOutdatedInstancesOnlyValueException) OrigErr() error { +func (s *InvalidUpdateOutdatedInstancesOnlyValueException) OrigErr() error { return nil } -func (s InvalidUpdateOutdatedInstancesOnlyValueException) Error() string { +func (s *InvalidUpdateOutdatedInstancesOnlyValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidUpdateOutdatedInstancesOnlyValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidUpdateOutdatedInstancesOnlyValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidUpdateOutdatedInstancesOnlyValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidUpdateOutdatedInstancesOnlyValueException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a Lambda function specified in a deployment. @@ -14499,8 +14821,9 @@ type LambdaFunctionInfo struct { // The version of a Lambda function that production traffic points to. CurrentVersion *string `locationName:"currentVersion" type:"string"` - // The alias of a Lambda function. For more information, see Introduction to - // AWS Lambda Aliases (https://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html). + // The alias of a Lambda function. For more information, see AWS Lambda Function + // Aliases (https://docs.aws.amazon.com/lambda/latest/dg/aliases-intro.html) + // in the AWS Lambda Developer Guide. FunctionAlias *string `locationName:"functionAlias" type:"string"` // The name of a Lambda function. @@ -14574,7 +14897,7 @@ type LambdaTarget struct { // The status an AWS Lambda deployment's target Lambda function. Status *string `locationName:"status" type:"string" enum:"TargetStatus"` - // The ARN of the target. + // The Amazon Resource Name (ARN) of the target. TargetArn *string `locationName:"targetArn" type:"string"` // The unique ID of a deployment target that has a type of lambdaTarget. @@ -14762,8 +15085,8 @@ func (s *LifecycleEvent) SetStatus(v string) *LifecycleEvent { // An attempt to return the status of an already completed lifecycle event occurred. type LifecycleEventAlreadyCompletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14780,17 +15103,17 @@ func (s LifecycleEventAlreadyCompletedException) GoString() string { func newErrorLifecycleEventAlreadyCompletedException(v protocol.ResponseMetadata) error { return &LifecycleEventAlreadyCompletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecycleEventAlreadyCompletedException) Code() string { +func (s *LifecycleEventAlreadyCompletedException) Code() string { return "LifecycleEventAlreadyCompletedException" } // Message returns the exception's message. -func (s LifecycleEventAlreadyCompletedException) Message() string { +func (s *LifecycleEventAlreadyCompletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14798,28 +15121,28 @@ func (s LifecycleEventAlreadyCompletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecycleEventAlreadyCompletedException) OrigErr() error { +func (s *LifecycleEventAlreadyCompletedException) OrigErr() error { return nil } -func (s LifecycleEventAlreadyCompletedException) Error() string { +func (s *LifecycleEventAlreadyCompletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecycleEventAlreadyCompletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecycleEventAlreadyCompletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecycleEventAlreadyCompletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecycleEventAlreadyCompletedException) RequestID() string { + return s.RespMetadata.RequestID } // The limit for lifecycle hooks was exceeded. type LifecycleHookLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14836,17 +15159,17 @@ func (s LifecycleHookLimitExceededException) GoString() string { func newErrorLifecycleHookLimitExceededException(v protocol.ResponseMetadata) error { return &LifecycleHookLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecycleHookLimitExceededException) Code() string { +func (s *LifecycleHookLimitExceededException) Code() string { return "LifecycleHookLimitExceededException" } // Message returns the exception's message. -func (s LifecycleHookLimitExceededException) Message() string { +func (s *LifecycleHookLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14854,22 +15177,22 @@ func (s LifecycleHookLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecycleHookLimitExceededException) OrigErr() error { +func (s *LifecycleHookLimitExceededException) OrigErr() error { return nil } -func (s LifecycleHookLimitExceededException) Error() string { +func (s *LifecycleHookLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecycleHookLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecycleHookLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecycleHookLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecycleHookLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a ListApplicationRevisions operation. @@ -14883,7 +15206,7 @@ type ListApplicationRevisionsInput struct { ApplicationName *string `locationName:"applicationName" min:"1" type:"string" required:"true"` // Whether to list revisions based on whether the revision is the target revision - // of an deployment group: + // of a deployment group: // // * include: List revisions that are target revisions of a deployment group. // @@ -15469,6 +15792,10 @@ type ListDeploymentsInput struct { // If it is not specified, then applicationName must not be specified. DeploymentGroupName *string `locationName:"deploymentGroupName" min:"1" type:"string"` + // The unique ID of an external resource for returning deployments linked to + // the external resource. + ExternalId *string `locationName:"externalId" type:"string"` + // A subset of deployments to list by status: // // * Created: Include created deployments in the resulting list. @@ -15533,6 +15860,12 @@ func (s *ListDeploymentsInput) SetDeploymentGroupName(v string) *ListDeployments return s } +// SetExternalId sets the ExternalId field's value. +func (s *ListDeploymentsInput) SetExternalId(v string) *ListDeploymentsInput { + s.ExternalId = &v + return s +} + // SetIncludeOnlyStatuses sets the IncludeOnlyStatuses field's value. func (s *ListDeploymentsInput) SetIncludeOnlyStatuses(v []*string) *ListDeploymentsInput { s.IncludeOnlyStatuses = v @@ -15870,16 +16203,16 @@ type MinimumHealthyHosts struct { // The minimum healthy instance type: // - // * HOST_COUNT: The minimum number of healthy instance as an absolute value. + // * HOST_COUNT: The minimum number of healthy instances as an absolute value. // - // * FLEET_PERCENT: The minimum number of healthy instance as a percentage - // of the total number of instance in the deployment. + // * FLEET_PERCENT: The minimum number of healthy instances as a percentage + // of the total number of instances in the deployment. // - // In an example of nine instance, if a HOST_COUNT of six is specified, deploy + // In an example of nine instances, if a HOST_COUNT of six is specified, deploy // to up to three instances at a time. The deployment is successful if six or // more instances are deployed to successfully. Otherwise, the deployment fails. - // If a FLEET_PERCENT of 40 is specified, deploy to up to five instance at a - // time. The deployment is successful if four or more instance are deployed + // If a FLEET_PERCENT of 40 is specified, deploy to up to five instances at + // a time. The deployment is successful if four or more instances are deployed // to successfully. Otherwise, the deployment fails. // // In a call to the GetDeploymentConfig, CodeDeployDefault.OneAtATime returns @@ -15925,8 +16258,8 @@ func (s *MinimumHealthyHosts) SetValue(v int64) *MinimumHealthyHosts { // Both an IAM user ARN and an IAM session ARN were included in the request. // Use only one ARN type. type MultipleIamArnsProvidedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15943,17 +16276,17 @@ func (s MultipleIamArnsProvidedException) GoString() string { func newErrorMultipleIamArnsProvidedException(v protocol.ResponseMetadata) error { return &MultipleIamArnsProvidedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MultipleIamArnsProvidedException) Code() string { +func (s *MultipleIamArnsProvidedException) Code() string { return "MultipleIamArnsProvidedException" } // Message returns the exception's message. -func (s MultipleIamArnsProvidedException) Message() string { +func (s *MultipleIamArnsProvidedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15961,22 +16294,22 @@ func (s MultipleIamArnsProvidedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MultipleIamArnsProvidedException) OrigErr() error { +func (s *MultipleIamArnsProvidedException) OrigErr() error { return nil } -func (s MultipleIamArnsProvidedException) Error() string { +func (s *MultipleIamArnsProvidedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MultipleIamArnsProvidedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MultipleIamArnsProvidedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MultipleIamArnsProvidedException) RequestID() string { - return s.respMetadata.RequestID +func (s *MultipleIamArnsProvidedException) RequestID() string { + return s.RespMetadata.RequestID } // Information about groups of on-premises instance tags. @@ -16007,8 +16340,8 @@ func (s *OnPremisesTagSet) SetOnPremisesTagSetList(v [][]*TagFilter) *OnPremises // The API used does not support the deployment. type OperationNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16025,17 +16358,17 @@ func (s OperationNotSupportedException) GoString() string { func newErrorOperationNotSupportedException(v protocol.ResponseMetadata) error { return &OperationNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotSupportedException) Code() string { +func (s *OperationNotSupportedException) Code() string { return "OperationNotSupportedException" } // Message returns the exception's message. -func (s OperationNotSupportedException) Message() string { +func (s *OperationNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16043,22 +16376,22 @@ func (s OperationNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotSupportedException) OrigErr() error { +func (s *OperationNotSupportedException) OrigErr() error { return nil } -func (s OperationNotSupportedException) Error() string { +func (s *OperationNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } type PutLifecycleEventHookExecutionStatusInput struct { @@ -16389,8 +16722,8 @@ func (s RemoveTagsFromOnPremisesInstancesOutput) GoString() string { // The ARN of a resource is required, but was not found. type ResourceArnRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16407,17 +16740,17 @@ func (s ResourceArnRequiredException) GoString() string { func newErrorResourceArnRequiredException(v protocol.ResponseMetadata) error { return &ResourceArnRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceArnRequiredException) Code() string { +func (s *ResourceArnRequiredException) Code() string { return "ResourceArnRequiredException" } // Message returns the exception's message. -func (s ResourceArnRequiredException) Message() string { +func (s *ResourceArnRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16425,28 +16758,28 @@ func (s ResourceArnRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceArnRequiredException) OrigErr() error { +func (s *ResourceArnRequiredException) OrigErr() error { return nil } -func (s ResourceArnRequiredException) Error() string { +func (s *ResourceArnRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceArnRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceArnRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceArnRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceArnRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource could not be validated. type ResourceValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16463,17 +16796,17 @@ func (s ResourceValidationException) GoString() string { func newErrorResourceValidationException(v protocol.ResponseMetadata) error { return &ResourceValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceValidationException) Code() string { +func (s *ResourceValidationException) Code() string { return "ResourceValidationException" } // Message returns the exception's message. -func (s ResourceValidationException) Message() string { +func (s *ResourceValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16481,28 +16814,28 @@ func (s ResourceValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceValidationException) OrigErr() error { +func (s *ResourceValidationException) OrigErr() error { return nil } -func (s ResourceValidationException) Error() string { +func (s *ResourceValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceValidationException) RequestID() string { + return s.RespMetadata.RequestID } // The named revision does not exist with the IAM user or AWS account. type RevisionDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16519,17 +16852,17 @@ func (s RevisionDoesNotExistException) GoString() string { func newErrorRevisionDoesNotExistException(v protocol.ResponseMetadata) error { return &RevisionDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RevisionDoesNotExistException) Code() string { +func (s *RevisionDoesNotExistException) Code() string { return "RevisionDoesNotExistException" } // Message returns the exception's message. -func (s RevisionDoesNotExistException) Message() string { +func (s *RevisionDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16537,22 +16870,22 @@ func (s RevisionDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RevisionDoesNotExistException) OrigErr() error { +func (s *RevisionDoesNotExistException) OrigErr() error { return nil } -func (s RevisionDoesNotExistException) Error() string { +func (s *RevisionDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RevisionDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RevisionDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RevisionDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *RevisionDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Information about an application revision. @@ -16609,6 +16942,10 @@ type RevisionLocation struct { // // * String: A YAML-formatted or JSON-formatted string (AWS Lambda deployments // only). + // + // * AppSpecContent: An AppSpecContent object that contains the contents + // of an AppSpec file for an AWS Lambda or Amazon ECS deployment. The content + // is formatted as JSON or YAML stored as a RawString. RevisionType *string `locationName:"revisionType" type:"string" enum:"RevisionLocationType"` // Information about the location of a revision stored in Amazon S3. @@ -16661,8 +16998,8 @@ func (s *RevisionLocation) SetString_(v *RawString) *RevisionLocation { // The revision ID was not specified. type RevisionRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16679,17 +17016,17 @@ func (s RevisionRequiredException) GoString() string { func newErrorRevisionRequiredException(v protocol.ResponseMetadata) error { return &RevisionRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RevisionRequiredException) Code() string { +func (s *RevisionRequiredException) Code() string { return "RevisionRequiredException" } // Message returns the exception's message. -func (s RevisionRequiredException) Message() string { +func (s *RevisionRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16697,28 +17034,28 @@ func (s RevisionRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RevisionRequiredException) OrigErr() error { +func (s *RevisionRequiredException) OrigErr() error { return nil } -func (s RevisionRequiredException) Error() string { +func (s *RevisionRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RevisionRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RevisionRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RevisionRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RevisionRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The role ID was not specified. type RoleRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16735,17 +17072,17 @@ func (s RoleRequiredException) GoString() string { func newErrorRoleRequiredException(v protocol.ResponseMetadata) error { return &RoleRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RoleRequiredException) Code() string { +func (s *RoleRequiredException) Code() string { return "RoleRequiredException" } // Message returns the exception's message. -func (s RoleRequiredException) Message() string { +func (s *RoleRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16753,22 +17090,22 @@ func (s RoleRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RoleRequiredException) OrigErr() error { +func (s *RoleRequiredException) OrigErr() error { return nil } -func (s RoleRequiredException) Error() string { +func (s *RoleRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RoleRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RoleRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RoleRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *RoleRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a deployment rollback. @@ -17099,8 +17436,8 @@ func (s *TagFilter) SetValue(v string) *TagFilter { // The maximum allowed number of tags was exceeded. type TagLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17117,17 +17454,17 @@ func (s TagLimitExceededException) GoString() string { func newErrorTagLimitExceededException(v protocol.ResponseMetadata) error { return &TagLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagLimitExceededException) Code() string { +func (s *TagLimitExceededException) Code() string { return "TagLimitExceededException" } // Message returns the exception's message. -func (s TagLimitExceededException) Message() string { +func (s *TagLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17135,28 +17472,28 @@ func (s TagLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagLimitExceededException) OrigErr() error { +func (s *TagLimitExceededException) OrigErr() error { return nil } -func (s TagLimitExceededException) Error() string { +func (s *TagLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A tag was not specified. type TagRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17173,17 +17510,17 @@ func (s TagRequiredException) GoString() string { func newErrorTagRequiredException(v protocol.ResponseMetadata) error { return &TagRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagRequiredException) Code() string { +func (s *TagRequiredException) Code() string { return "TagRequiredException" } // Message returns the exception's message. -func (s TagRequiredException) Message() string { +func (s *TagRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17191,22 +17528,22 @@ func (s TagRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagRequiredException) OrigErr() error { +func (s *TagRequiredException) OrigErr() error { return nil } -func (s TagRequiredException) Error() string { +func (s *TagRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagRequiredException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -17282,8 +17619,8 @@ func (s TagResourceOutput) GoString() string { // The number of tag groups included in the tag set list exceeded the maximum // allowed limit of 3. type TagSetListLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17300,17 +17637,17 @@ func (s TagSetListLimitExceededException) GoString() string { func newErrorTagSetListLimitExceededException(v protocol.ResponseMetadata) error { return &TagSetListLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagSetListLimitExceededException) Code() string { +func (s *TagSetListLimitExceededException) Code() string { return "TagSetListLimitExceededException" } // Message returns the exception's message. -func (s TagSetListLimitExceededException) Message() string { +func (s *TagSetListLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17318,22 +17655,22 @@ func (s TagSetListLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagSetListLimitExceededException) OrigErr() error { +func (s *TagSetListLimitExceededException) OrigErr() error { return nil } -func (s TagSetListLimitExceededException) Error() string { +func (s *TagSetListLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagSetListLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagSetListLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagSetListLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagSetListLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a target group in Elastic Load Balancing to use in a deployment. @@ -17465,8 +17802,8 @@ func (s *TargetInstances) SetTagFilters(v []*EC2TagFilter) *TargetInstances { // An API function was called too frequently. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17483,17 +17820,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17501,27 +17838,28 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // A configuration that shifts traffic from one version of a Lambda function -// to another in two increments. The original and target Lambda function versions -// are specified in the deployment's AppSpec file. +// or ECS task set to another in two increments. The original and target Lambda +// function versions or ECS task sets are specified in the deployment's AppSpec +// file. type TimeBasedCanary struct { _ struct{} `type:"structure"` @@ -17557,9 +17895,9 @@ func (s *TimeBasedCanary) SetCanaryPercentage(v int64) *TimeBasedCanary { } // A configuration that shifts traffic from one version of a Lambda function -// to another in equal increments, with an equal number of minutes between each -// increment. The original and target Lambda function versions are specified -// in the deployment's AppSpec file. +// or ECS task set to another in equal increments, with an equal number of minutes +// between each increment. The original and target Lambda function versions +// or ECS task sets are specified in the deployment's AppSpec file. type TimeBasedLinear struct { _ struct{} `type:"structure"` @@ -17636,9 +17974,9 @@ func (s *TimeRange) SetStart(v time.Time) *TimeRange { type TrafficRoute struct { _ struct{} `type:"structure"` - // The ARN of one listener. The listener identifies the route between a target - // group and a load balancer. This is an array of strings with a maximum size - // of one. + // The Amazon Resource Name (ARN) of one listener. The listener identifies the + // route between a target group and a load balancer. This is an array of strings + // with a maximum size of one. ListenerArns []*string `locationName:"listenerArns" type:"list"` } @@ -17659,23 +17997,25 @@ func (s *TrafficRoute) SetListenerArns(v []*string) *TrafficRoute { } // The configuration that specifies how traffic is shifted from one version -// of a Lambda function to another version during an AWS Lambda deployment. +// of a Lambda function to another version during an AWS Lambda deployment, +// or from one Amazon ECS task set to another during an Amazon ECS deployment. type TrafficRoutingConfig struct { _ struct{} `type:"structure"` // A configuration that shifts traffic from one version of a Lambda function - // to another in two increments. The original and target Lambda function versions - // are specified in the deployment's AppSpec file. + // or ECS task set to another in two increments. The original and target Lambda + // function versions or ECS task sets are specified in the deployment's AppSpec + // file. TimeBasedCanary *TimeBasedCanary `locationName:"timeBasedCanary" type:"structure"` // A configuration that shifts traffic from one version of a Lambda function - // to another in equal increments, with an equal number of minutes between each - // increment. The original and target Lambda function versions are specified - // in the deployment's AppSpec file. + // or ECS task set to another in equal increments, with an equal number of minutes + // between each increment. The original and target Lambda function versions + // or ECS task sets are specified in the deployment's AppSpec file. TimeBasedLinear *TimeBasedLinear `locationName:"timeBasedLinear" type:"structure"` // The type of traffic shifting (TimeBasedCanary or TimeBasedLinear) used by - // a deployment configuration . + // a deployment configuration. Type *string `locationName:"type" type:"string" enum:"TrafficRoutingType"` } @@ -17717,8 +18057,9 @@ type TriggerConfig struct { // The name of the notification trigger. TriggerName *string `locationName:"triggerName" type:"string"` - // The ARN of the Amazon Simple Notification Service topic through which notifications - // about deployment or instance events are sent. + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // topic through which notifications about deployment or instance events are + // sent. TriggerTargetArn *string `locationName:"triggerTargetArn" type:"string"` } @@ -17752,8 +18093,8 @@ func (s *TriggerConfig) SetTriggerTargetArn(v string) *TriggerConfig { // The maximum allowed number of triggers was exceeded. type TriggerTargetsLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17770,17 +18111,17 @@ func (s TriggerTargetsLimitExceededException) GoString() string { func newErrorTriggerTargetsLimitExceededException(v protocol.ResponseMetadata) error { return &TriggerTargetsLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TriggerTargetsLimitExceededException) Code() string { +func (s *TriggerTargetsLimitExceededException) Code() string { return "TriggerTargetsLimitExceededException" } // Message returns the exception's message. -func (s TriggerTargetsLimitExceededException) Message() string { +func (s *TriggerTargetsLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17788,28 +18129,28 @@ func (s TriggerTargetsLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TriggerTargetsLimitExceededException) OrigErr() error { +func (s *TriggerTargetsLimitExceededException) OrigErr() error { return nil } -func (s TriggerTargetsLimitExceededException) Error() string { +func (s *TriggerTargetsLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TriggerTargetsLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TriggerTargetsLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TriggerTargetsLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TriggerTargetsLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A call was submitted that is not supported for the specified deployment type. type UnsupportedActionForDeploymentTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17826,17 +18167,17 @@ func (s UnsupportedActionForDeploymentTypeException) GoString() string { func newErrorUnsupportedActionForDeploymentTypeException(v protocol.ResponseMetadata) error { return &UnsupportedActionForDeploymentTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedActionForDeploymentTypeException) Code() string { +func (s *UnsupportedActionForDeploymentTypeException) Code() string { return "UnsupportedActionForDeploymentTypeException" } // Message returns the exception's message. -func (s UnsupportedActionForDeploymentTypeException) Message() string { +func (s *UnsupportedActionForDeploymentTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17844,29 +18185,29 @@ func (s UnsupportedActionForDeploymentTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedActionForDeploymentTypeException) OrigErr() error { +func (s *UnsupportedActionForDeploymentTypeException) OrigErr() error { return nil } -func (s UnsupportedActionForDeploymentTypeException) Error() string { +func (s *UnsupportedActionForDeploymentTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedActionForDeploymentTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedActionForDeploymentTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedActionForDeploymentTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedActionForDeploymentTypeException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ARN that specifies from which resource to disassociate the tags with - // the keys in the TagKeys input paramter. + // The Amazon Resource Name (ARN) that specifies from which resource to disassociate + // the tags with the keys in the TagKeys input parameter. // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -18069,7 +18410,7 @@ type UpdateDeploymentGroupInput struct { ServiceRoleArn *string `locationName:"serviceRoleArn" type:"string"` // Information about triggers to change when the deployment group is updated. - // For examples, see Modify Triggers in an AWS CodeDeploy Deployment Group (https://docs.aws.amazon.com/codedeploy/latest/userguide/how-to-notify-edit.html) + // For examples, see Edit a Trigger in a CodeDeploy Deployment Group (https://docs.aws.amazon.com/codedeploy/latest/userguide/how-to-notify-edit.html) // in the AWS CodeDeploy User Guide. TriggerConfigurations []*TriggerConfig `locationName:"triggerConfigurations" type:"list"` } @@ -18253,6 +18594,15 @@ const ( ApplicationRevisionSortByLastUsedTime = "lastUsedTime" ) +// ApplicationRevisionSortBy_Values returns all elements of the ApplicationRevisionSortBy enum +func ApplicationRevisionSortBy_Values() []string { + return []string{ + ApplicationRevisionSortByRegisterTime, + ApplicationRevisionSortByFirstUsedTime, + ApplicationRevisionSortByLastUsedTime, + } +} + const ( // AutoRollbackEventDeploymentFailure is a AutoRollbackEvent enum value AutoRollbackEventDeploymentFailure = "DEPLOYMENT_FAILURE" @@ -18264,6 +18614,15 @@ const ( AutoRollbackEventDeploymentStopOnRequest = "DEPLOYMENT_STOP_ON_REQUEST" ) +// AutoRollbackEvent_Values returns all elements of the AutoRollbackEvent enum +func AutoRollbackEvent_Values() []string { + return []string{ + AutoRollbackEventDeploymentFailure, + AutoRollbackEventDeploymentStopOnAlarm, + AutoRollbackEventDeploymentStopOnRequest, + } +} + const ( // BundleTypeTar is a BundleType enum value BundleTypeTar = "tar" @@ -18281,6 +18640,17 @@ const ( BundleTypeJson = "JSON" ) +// BundleType_Values returns all elements of the BundleType enum +func BundleType_Values() []string { + return []string{ + BundleTypeTar, + BundleTypeTgz, + BundleTypeZip, + BundleTypeYaml, + BundleTypeJson, + } +} + const ( // ComputePlatformServer is a ComputePlatform enum value ComputePlatformServer = "Server" @@ -18292,6 +18662,15 @@ const ( ComputePlatformEcs = "ECS" ) +// ComputePlatform_Values returns all elements of the ComputePlatform enum +func ComputePlatform_Values() []string { + return []string{ + ComputePlatformServer, + ComputePlatformLambda, + ComputePlatformEcs, + } +} + const ( // DeploymentCreatorUser is a DeploymentCreator enum value DeploymentCreatorUser = "user" @@ -18301,8 +18680,29 @@ const ( // DeploymentCreatorCodeDeployRollback is a DeploymentCreator enum value DeploymentCreatorCodeDeployRollback = "codeDeployRollback" + + // DeploymentCreatorCodeDeploy is a DeploymentCreator enum value + DeploymentCreatorCodeDeploy = "CodeDeploy" + + // DeploymentCreatorCloudFormation is a DeploymentCreator enum value + DeploymentCreatorCloudFormation = "CloudFormation" + + // DeploymentCreatorCloudFormationRollback is a DeploymentCreator enum value + DeploymentCreatorCloudFormationRollback = "CloudFormationRollback" ) +// DeploymentCreator_Values returns all elements of the DeploymentCreator enum +func DeploymentCreator_Values() []string { + return []string{ + DeploymentCreatorUser, + DeploymentCreatorAutoscaling, + DeploymentCreatorCodeDeployRollback, + DeploymentCreatorCodeDeploy, + DeploymentCreatorCloudFormation, + DeploymentCreatorCloudFormationRollback, + } +} + const ( // DeploymentOptionWithTrafficControl is a DeploymentOption enum value DeploymentOptionWithTrafficControl = "WITH_TRAFFIC_CONTROL" @@ -18311,6 +18711,14 @@ const ( DeploymentOptionWithoutTrafficControl = "WITHOUT_TRAFFIC_CONTROL" ) +// DeploymentOption_Values returns all elements of the DeploymentOption enum +func DeploymentOption_Values() []string { + return []string{ + DeploymentOptionWithTrafficControl, + DeploymentOptionWithoutTrafficControl, + } +} + const ( // DeploymentReadyActionContinueDeployment is a DeploymentReadyAction enum value DeploymentReadyActionContinueDeployment = "CONTINUE_DEPLOYMENT" @@ -18319,6 +18727,14 @@ const ( DeploymentReadyActionStopDeployment = "STOP_DEPLOYMENT" ) +// DeploymentReadyAction_Values returns all elements of the DeploymentReadyAction enum +func DeploymentReadyAction_Values() []string { + return []string{ + DeploymentReadyActionContinueDeployment, + DeploymentReadyActionStopDeployment, + } +} + const ( // DeploymentStatusCreated is a DeploymentStatus enum value DeploymentStatusCreated = "Created" @@ -18329,6 +18745,9 @@ const ( // DeploymentStatusInProgress is a DeploymentStatus enum value DeploymentStatusInProgress = "InProgress" + // DeploymentStatusBaking is a DeploymentStatus enum value + DeploymentStatusBaking = "Baking" + // DeploymentStatusSucceeded is a DeploymentStatus enum value DeploymentStatusSucceeded = "Succeeded" @@ -18342,6 +18761,20 @@ const ( DeploymentStatusReady = "Ready" ) +// DeploymentStatus_Values returns all elements of the DeploymentStatus enum +func DeploymentStatus_Values() []string { + return []string{ + DeploymentStatusCreated, + DeploymentStatusQueued, + DeploymentStatusInProgress, + DeploymentStatusBaking, + DeploymentStatusSucceeded, + DeploymentStatusFailed, + DeploymentStatusStopped, + DeploymentStatusReady, + } +} + const ( // DeploymentTargetTypeInstanceTarget is a DeploymentTargetType enum value DeploymentTargetTypeInstanceTarget = "InstanceTarget" @@ -18351,8 +18784,21 @@ const ( // DeploymentTargetTypeEcstarget is a DeploymentTargetType enum value DeploymentTargetTypeEcstarget = "ECSTarget" + + // DeploymentTargetTypeCloudFormationTarget is a DeploymentTargetType enum value + DeploymentTargetTypeCloudFormationTarget = "CloudFormationTarget" ) +// DeploymentTargetType_Values returns all elements of the DeploymentTargetType enum +func DeploymentTargetType_Values() []string { + return []string{ + DeploymentTargetTypeInstanceTarget, + DeploymentTargetTypeLambdaTarget, + DeploymentTargetTypeEcstarget, + DeploymentTargetTypeCloudFormationTarget, + } +} + const ( // DeploymentTypeInPlace is a DeploymentType enum value DeploymentTypeInPlace = "IN_PLACE" @@ -18361,6 +18807,14 @@ const ( DeploymentTypeBlueGreen = "BLUE_GREEN" ) +// DeploymentType_Values returns all elements of the DeploymentType enum +func DeploymentType_Values() []string { + return []string{ + DeploymentTypeInPlace, + DeploymentTypeBlueGreen, + } +} + const ( // DeploymentWaitTypeReadyWait is a DeploymentWaitType enum value DeploymentWaitTypeReadyWait = "READY_WAIT" @@ -18369,6 +18823,14 @@ const ( DeploymentWaitTypeTerminationWait = "TERMINATION_WAIT" ) +// DeploymentWaitType_Values returns all elements of the DeploymentWaitType enum +func DeploymentWaitType_Values() []string { + return []string{ + DeploymentWaitTypeReadyWait, + DeploymentWaitTypeTerminationWait, + } +} + const ( // EC2TagFilterTypeKeyOnly is a EC2TagFilterType enum value EC2TagFilterTypeKeyOnly = "KEY_ONLY" @@ -18380,6 +18842,15 @@ const ( EC2TagFilterTypeKeyAndValue = "KEY_AND_VALUE" ) +// EC2TagFilterType_Values returns all elements of the EC2TagFilterType enum +func EC2TagFilterType_Values() []string { + return []string{ + EC2TagFilterTypeKeyOnly, + EC2TagFilterTypeValueOnly, + EC2TagFilterTypeKeyAndValue, + } +} + const ( // ErrorCodeAgentIssue is a ErrorCode enum value ErrorCodeAgentIssue = "AGENT_ISSUE" @@ -18479,8 +18950,51 @@ const ( // ErrorCodeTimeout is a ErrorCode enum value ErrorCodeTimeout = "TIMEOUT" + + // ErrorCodeCloudformationStackFailure is a ErrorCode enum value + ErrorCodeCloudformationStackFailure = "CLOUDFORMATION_STACK_FAILURE" ) +// ErrorCode_Values returns all elements of the ErrorCode enum +func ErrorCode_Values() []string { + return []string{ + ErrorCodeAgentIssue, + ErrorCodeAlarmActive, + ErrorCodeApplicationMissing, + ErrorCodeAutoscalingValidationError, + ErrorCodeAutoScalingConfiguration, + ErrorCodeAutoScalingIamRolePermissions, + ErrorCodeCodedeployResourceCannotBeFound, + ErrorCodeCustomerApplicationUnhealthy, + ErrorCodeDeploymentGroupMissing, + ErrorCodeEcsUpdateError, + ErrorCodeElasticLoadBalancingInvalid, + ErrorCodeElbInvalidInstance, + ErrorCodeHealthConstraints, + ErrorCodeHealthConstraintsInvalid, + ErrorCodeHookExecutionFailure, + ErrorCodeIamRoleMissing, + ErrorCodeIamRolePermissions, + ErrorCodeInternalError, + ErrorCodeInvalidEcsService, + ErrorCodeInvalidLambdaConfiguration, + ErrorCodeInvalidLambdaFunction, + ErrorCodeInvalidRevision, + ErrorCodeManualStop, + ErrorCodeMissingBlueGreenDeploymentConfiguration, + ErrorCodeMissingElbInformation, + ErrorCodeMissingGithubToken, + ErrorCodeNoEc2Subscription, + ErrorCodeNoInstances, + ErrorCodeOverMaxInstances, + ErrorCodeResourceLimitExceeded, + ErrorCodeRevisionMissing, + ErrorCodeThrottled, + ErrorCodeTimeout, + ErrorCodeCloudformationStackFailure, + } +} + const ( // FileExistsBehaviorDisallow is a FileExistsBehavior enum value FileExistsBehaviorDisallow = "DISALLOW" @@ -18492,6 +19006,15 @@ const ( FileExistsBehaviorRetain = "RETAIN" ) +// FileExistsBehavior_Values returns all elements of the FileExistsBehavior enum +func FileExistsBehavior_Values() []string { + return []string{ + FileExistsBehaviorDisallow, + FileExistsBehaviorOverwrite, + FileExistsBehaviorRetain, + } +} + const ( // GreenFleetProvisioningActionDiscoverExisting is a GreenFleetProvisioningAction enum value GreenFleetProvisioningActionDiscoverExisting = "DISCOVER_EXISTING" @@ -18500,6 +19023,14 @@ const ( GreenFleetProvisioningActionCopyAutoScalingGroup = "COPY_AUTO_SCALING_GROUP" ) +// GreenFleetProvisioningAction_Values returns all elements of the GreenFleetProvisioningAction enum +func GreenFleetProvisioningAction_Values() []string { + return []string{ + GreenFleetProvisioningActionDiscoverExisting, + GreenFleetProvisioningActionCopyAutoScalingGroup, + } +} + const ( // InstanceActionTerminate is a InstanceAction enum value InstanceActionTerminate = "TERMINATE" @@ -18508,6 +19039,14 @@ const ( InstanceActionKeepAlive = "KEEP_ALIVE" ) +// InstanceAction_Values returns all elements of the InstanceAction enum +func InstanceAction_Values() []string { + return []string{ + InstanceActionTerminate, + InstanceActionKeepAlive, + } +} + const ( // InstanceStatusPending is a InstanceStatus enum value InstanceStatusPending = "Pending" @@ -18531,6 +19070,19 @@ const ( InstanceStatusReady = "Ready" ) +// InstanceStatus_Values returns all elements of the InstanceStatus enum +func InstanceStatus_Values() []string { + return []string{ + InstanceStatusPending, + InstanceStatusInProgress, + InstanceStatusSucceeded, + InstanceStatusFailed, + InstanceStatusSkipped, + InstanceStatusUnknown, + InstanceStatusReady, + } +} + const ( // InstanceTypeBlue is a InstanceType enum value InstanceTypeBlue = "Blue" @@ -18539,6 +19091,14 @@ const ( InstanceTypeGreen = "Green" ) +// InstanceType_Values returns all elements of the InstanceType enum +func InstanceType_Values() []string { + return []string{ + InstanceTypeBlue, + InstanceTypeGreen, + } +} + const ( // LifecycleErrorCodeSuccess is a LifecycleErrorCode enum value LifecycleErrorCodeSuccess = "Success" @@ -18559,6 +19119,18 @@ const ( LifecycleErrorCodeUnknownError = "UnknownError" ) +// LifecycleErrorCode_Values returns all elements of the LifecycleErrorCode enum +func LifecycleErrorCode_Values() []string { + return []string{ + LifecycleErrorCodeSuccess, + LifecycleErrorCodeScriptMissing, + LifecycleErrorCodeScriptNotExecutable, + LifecycleErrorCodeScriptTimedOut, + LifecycleErrorCodeScriptFailed, + LifecycleErrorCodeUnknownError, + } +} + const ( // LifecycleEventStatusPending is a LifecycleEventStatus enum value LifecycleEventStatusPending = "Pending" @@ -18579,6 +19151,18 @@ const ( LifecycleEventStatusUnknown = "Unknown" ) +// LifecycleEventStatus_Values returns all elements of the LifecycleEventStatus enum +func LifecycleEventStatus_Values() []string { + return []string{ + LifecycleEventStatusPending, + LifecycleEventStatusInProgress, + LifecycleEventStatusSucceeded, + LifecycleEventStatusFailed, + LifecycleEventStatusSkipped, + LifecycleEventStatusUnknown, + } +} + const ( // ListStateFilterActionInclude is a ListStateFilterAction enum value ListStateFilterActionInclude = "include" @@ -18590,6 +19174,15 @@ const ( ListStateFilterActionIgnore = "ignore" ) +// ListStateFilterAction_Values returns all elements of the ListStateFilterAction enum +func ListStateFilterAction_Values() []string { + return []string{ + ListStateFilterActionInclude, + ListStateFilterActionExclude, + ListStateFilterActionIgnore, + } +} + const ( // MinimumHealthyHostsTypeHostCount is a MinimumHealthyHostsType enum value MinimumHealthyHostsTypeHostCount = "HOST_COUNT" @@ -18598,6 +19191,14 @@ const ( MinimumHealthyHostsTypeFleetPercent = "FLEET_PERCENT" ) +// MinimumHealthyHostsType_Values returns all elements of the MinimumHealthyHostsType enum +func MinimumHealthyHostsType_Values() []string { + return []string{ + MinimumHealthyHostsTypeHostCount, + MinimumHealthyHostsTypeFleetPercent, + } +} + const ( // RegistrationStatusRegistered is a RegistrationStatus enum value RegistrationStatusRegistered = "Registered" @@ -18606,6 +19207,14 @@ const ( RegistrationStatusDeregistered = "Deregistered" ) +// RegistrationStatus_Values returns all elements of the RegistrationStatus enum +func RegistrationStatus_Values() []string { + return []string{ + RegistrationStatusRegistered, + RegistrationStatusDeregistered, + } +} + const ( // RevisionLocationTypeS3 is a RevisionLocationType enum value RevisionLocationTypeS3 = "S3" @@ -18620,6 +19229,16 @@ const ( RevisionLocationTypeAppSpecContent = "AppSpecContent" ) +// RevisionLocationType_Values returns all elements of the RevisionLocationType enum +func RevisionLocationType_Values() []string { + return []string{ + RevisionLocationTypeS3, + RevisionLocationTypeGitHub, + RevisionLocationTypeString, + RevisionLocationTypeAppSpecContent, + } +} + const ( // SortOrderAscending is a SortOrder enum value SortOrderAscending = "ascending" @@ -18628,6 +19247,14 @@ const ( SortOrderDescending = "descending" ) +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} + const ( // StopStatusPending is a StopStatus enum value StopStatusPending = "Pending" @@ -18636,6 +19263,14 @@ const ( StopStatusSucceeded = "Succeeded" ) +// StopStatus_Values returns all elements of the StopStatus enum +func StopStatus_Values() []string { + return []string{ + StopStatusPending, + StopStatusSucceeded, + } +} + const ( // TagFilterTypeKeyOnly is a TagFilterType enum value TagFilterTypeKeyOnly = "KEY_ONLY" @@ -18647,6 +19282,15 @@ const ( TagFilterTypeKeyAndValue = "KEY_AND_VALUE" ) +// TagFilterType_Values returns all elements of the TagFilterType enum +func TagFilterType_Values() []string { + return []string{ + TagFilterTypeKeyOnly, + TagFilterTypeValueOnly, + TagFilterTypeKeyAndValue, + } +} + const ( // TargetFilterNameTargetStatus is a TargetFilterName enum value TargetFilterNameTargetStatus = "TargetStatus" @@ -18655,6 +19299,14 @@ const ( TargetFilterNameServerInstanceLabel = "ServerInstanceLabel" ) +// TargetFilterName_Values returns all elements of the TargetFilterName enum +func TargetFilterName_Values() []string { + return []string{ + TargetFilterNameTargetStatus, + TargetFilterNameServerInstanceLabel, + } +} + const ( // TargetLabelBlue is a TargetLabel enum value TargetLabelBlue = "Blue" @@ -18663,6 +19315,14 @@ const ( TargetLabelGreen = "Green" ) +// TargetLabel_Values returns all elements of the TargetLabel enum +func TargetLabel_Values() []string { + return []string{ + TargetLabelBlue, + TargetLabelGreen, + } +} + const ( // TargetStatusPending is a TargetStatus enum value TargetStatusPending = "Pending" @@ -18686,6 +19346,19 @@ const ( TargetStatusReady = "Ready" ) +// TargetStatus_Values returns all elements of the TargetStatus enum +func TargetStatus_Values() []string { + return []string{ + TargetStatusPending, + TargetStatusInProgress, + TargetStatusSucceeded, + TargetStatusFailed, + TargetStatusSkipped, + TargetStatusUnknown, + TargetStatusReady, + } +} + const ( // TrafficRoutingTypeTimeBasedCanary is a TrafficRoutingType enum value TrafficRoutingTypeTimeBasedCanary = "TimeBasedCanary" @@ -18697,6 +19370,15 @@ const ( TrafficRoutingTypeAllAtOnce = "AllAtOnce" ) +// TrafficRoutingType_Values returns all elements of the TrafficRoutingType enum +func TrafficRoutingType_Values() []string { + return []string{ + TrafficRoutingTypeTimeBasedCanary, + TrafficRoutingTypeTimeBasedLinear, + TrafficRoutingTypeAllAtOnce, + } +} + const ( // TriggerEventTypeDeploymentStart is a TriggerEventType enum value TriggerEventTypeDeploymentStart = "DeploymentStart" @@ -18728,3 +19410,19 @@ const ( // TriggerEventTypeInstanceReady is a TriggerEventType enum value TriggerEventTypeInstanceReady = "InstanceReady" ) + +// TriggerEventType_Values returns all elements of the TriggerEventType enum +func TriggerEventType_Values() []string { + return []string{ + TriggerEventTypeDeploymentStart, + TriggerEventTypeDeploymentSuccess, + TriggerEventTypeDeploymentFailure, + TriggerEventTypeDeploymentStop, + TriggerEventTypeDeploymentRollback, + TriggerEventTypeDeploymentReady, + TriggerEventTypeInstanceStart, + TriggerEventTypeInstanceSuccess, + TriggerEventTypeInstanceFailure, + TriggerEventTypeInstanceReady, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go index 89a7dac68..11057d7b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/errors.go @@ -68,7 +68,7 @@ const ( // "DeploymentConfigAlreadyExistsException". // // A deployment configuration with the specified name with the IAM user or AWS - // account already exists . + // account already exists. ErrCodeDeploymentConfigAlreadyExistsException = "DeploymentConfigAlreadyExistsException" // ErrCodeDeploymentConfigDoesNotExistException for service response error code @@ -318,7 +318,8 @@ const ( // ErrCodeInvalidComputePlatformException for service response error code // "InvalidComputePlatformException". // - // The computePlatform is invalid. The computePlatform should be Lambda or Server. + // The computePlatform is invalid. The computePlatform should be Lambda, Server, + // or ECS. ErrCodeInvalidComputePlatformException = "InvalidComputePlatformException" // ErrCodeInvalidDeployedStateFilterException for service response error code @@ -397,6 +398,12 @@ const ( // The Amazon ECS service identifier is not valid. ErrCodeInvalidECSServiceException = "InvalidECSServiceException" + // ErrCodeInvalidExternalIdException for service response error code + // "InvalidExternalIdException". + // + // The external ID was specified in an invalid format. + ErrCodeInvalidExternalIdException = "InvalidExternalIdException" + // ErrCodeInvalidFileExistsBehaviorException for service response error code // "InvalidFileExistsBehaviorException". // @@ -772,6 +779,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidEC2TagCombinationException": newErrorInvalidEC2TagCombinationException, "InvalidEC2TagException": newErrorInvalidEC2TagException, "InvalidECSServiceException": newErrorInvalidECSServiceException, + "InvalidExternalIdException": newErrorInvalidExternalIdException, "InvalidFileExistsBehaviorException": newErrorInvalidFileExistsBehaviorException, "InvalidGitHubAccountTokenException": newErrorInvalidGitHubAccountTokenException, "InvalidGitHubAccountTokenNameException": newErrorInvalidGitHubAccountTokenNameException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go index 405ddb295..3e34be1be 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go index 49b9395f6..9d13004e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/api.go @@ -4684,8 +4684,8 @@ func (s *ActionExecutionResult) SetExternalExecutionUrl(v string) *ActionExecuti // The specified action cannot be found. type ActionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4702,17 +4702,17 @@ func (s ActionNotFoundException) GoString() string { func newErrorActionNotFoundException(v protocol.ResponseMetadata) error { return &ActionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActionNotFoundException) Code() string { +func (s *ActionNotFoundException) Code() string { return "ActionNotFoundException" } // Message returns the exception's message. -func (s ActionNotFoundException) Message() string { +func (s *ActionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4720,22 +4720,22 @@ func (s ActionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActionNotFoundException) OrigErr() error { +func (s *ActionNotFoundException) OrigErr() error { return nil } -func (s ActionNotFoundException) Error() string { +func (s *ActionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ActionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ActionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about the version (or revision) of an action. @@ -5037,8 +5037,8 @@ func (s *ActionTypeId) SetVersion(v string) *ActionTypeId { // The specified action type cannot be found. type ActionTypeNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5055,17 +5055,17 @@ func (s ActionTypeNotFoundException) GoString() string { func newErrorActionTypeNotFoundException(v protocol.ResponseMetadata) error { return &ActionTypeNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActionTypeNotFoundException) Code() string { +func (s *ActionTypeNotFoundException) Code() string { return "ActionTypeNotFoundException" } // Message returns the exception's message. -func (s ActionTypeNotFoundException) Message() string { +func (s *ActionTypeNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5073,22 +5073,22 @@ func (s ActionTypeNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActionTypeNotFoundException) OrigErr() error { +func (s *ActionTypeNotFoundException) OrigErr() error { return nil } -func (s ActionTypeNotFoundException) Error() string { +func (s *ActionTypeNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ActionTypeNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActionTypeNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActionTypeNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ActionTypeNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about the settings for an action type. @@ -5176,8 +5176,8 @@ func (s *ActionTypeSettings) SetThirdPartyConfigurationUrl(v string) *ActionType // The approval action has already been approved or rejected. type ApprovalAlreadyCompletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5194,17 +5194,17 @@ func (s ApprovalAlreadyCompletedException) GoString() string { func newErrorApprovalAlreadyCompletedException(v protocol.ResponseMetadata) error { return &ApprovalAlreadyCompletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ApprovalAlreadyCompletedException) Code() string { +func (s *ApprovalAlreadyCompletedException) Code() string { return "ApprovalAlreadyCompletedException" } // Message returns the exception's message. -func (s ApprovalAlreadyCompletedException) Message() string { +func (s *ApprovalAlreadyCompletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5212,22 +5212,22 @@ func (s ApprovalAlreadyCompletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ApprovalAlreadyCompletedException) OrigErr() error { +func (s *ApprovalAlreadyCompletedException) OrigErr() error { return nil } -func (s ApprovalAlreadyCompletedException) Error() string { +func (s *ApprovalAlreadyCompletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ApprovalAlreadyCompletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ApprovalAlreadyCompletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ApprovalAlreadyCompletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ApprovalAlreadyCompletedException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about the result of an approval request. @@ -5661,8 +5661,8 @@ func (s *BlockerDeclaration) SetType(v string) *BlockerDeclaration { // Unable to modify the tag due to a simultaneous update request. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -5679,17 +5679,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5697,22 +5697,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a CreateCustomActionType operation. @@ -6465,8 +6465,8 @@ func (s DisableStageTransitionOutput) GoString() string { // out of sequence tasks. If you already chose to stop and abandon, you cannot // make that request again. type DuplicatedStopRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -6483,17 +6483,17 @@ func (s DuplicatedStopRequestException) GoString() string { func newErrorDuplicatedStopRequestException(v protocol.ResponseMetadata) error { return &DuplicatedStopRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicatedStopRequestException) Code() string { +func (s *DuplicatedStopRequestException) Code() string { return "DuplicatedStopRequestException" } // Message returns the exception's message. -func (s DuplicatedStopRequestException) Message() string { +func (s *DuplicatedStopRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6501,22 +6501,22 @@ func (s DuplicatedStopRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicatedStopRequestException) OrigErr() error { +func (s *DuplicatedStopRequestException) OrigErr() error { return nil } -func (s DuplicatedStopRequestException) Error() string { +func (s *DuplicatedStopRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicatedStopRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicatedStopRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicatedStopRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicatedStopRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of an EnableStageTransition action. @@ -7347,8 +7347,8 @@ func (s *InputArtifact) SetName(v string) *InputArtifact { // The action declaration was specified in an invalid format. type InvalidActionDeclarationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7365,17 +7365,17 @@ func (s InvalidActionDeclarationException) GoString() string { func newErrorInvalidActionDeclarationException(v protocol.ResponseMetadata) error { return &InvalidActionDeclarationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidActionDeclarationException) Code() string { +func (s *InvalidActionDeclarationException) Code() string { return "InvalidActionDeclarationException" } // Message returns the exception's message. -func (s InvalidActionDeclarationException) Message() string { +func (s *InvalidActionDeclarationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7383,28 +7383,28 @@ func (s InvalidActionDeclarationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidActionDeclarationException) OrigErr() error { +func (s *InvalidActionDeclarationException) OrigErr() error { return nil } -func (s InvalidActionDeclarationException) Error() string { +func (s *InvalidActionDeclarationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidActionDeclarationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidActionDeclarationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidActionDeclarationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidActionDeclarationException) RequestID() string { + return s.RespMetadata.RequestID } // The approval request already received a response or has expired. type InvalidApprovalTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7421,17 +7421,17 @@ func (s InvalidApprovalTokenException) GoString() string { func newErrorInvalidApprovalTokenException(v protocol.ResponseMetadata) error { return &InvalidApprovalTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApprovalTokenException) Code() string { +func (s *InvalidApprovalTokenException) Code() string { return "InvalidApprovalTokenException" } // Message returns the exception's message. -func (s InvalidApprovalTokenException) Message() string { +func (s *InvalidApprovalTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7439,28 +7439,28 @@ func (s InvalidApprovalTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApprovalTokenException) OrigErr() error { +func (s *InvalidApprovalTokenException) OrigErr() error { return nil } -func (s InvalidApprovalTokenException) Error() string { +func (s *InvalidApprovalTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApprovalTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApprovalTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApprovalTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApprovalTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource ARN is invalid. type InvalidArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -7477,17 +7477,17 @@ func (s InvalidArnException) GoString() string { func newErrorInvalidArnException(v protocol.ResponseMetadata) error { return &InvalidArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArnException) Code() string { +func (s *InvalidArnException) Code() string { return "InvalidArnException" } // Message returns the exception's message. -func (s InvalidArnException) Message() string { +func (s *InvalidArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7495,28 +7495,28 @@ func (s InvalidArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArnException) OrigErr() error { +func (s *InvalidArnException) OrigErr() error { return nil } -func (s InvalidArnException) Error() string { +func (s *InvalidArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArnException) RequestID() string { + return s.RespMetadata.RequestID } // Reserved for future use. type InvalidBlockerDeclarationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7533,17 +7533,17 @@ func (s InvalidBlockerDeclarationException) GoString() string { func newErrorInvalidBlockerDeclarationException(v protocol.ResponseMetadata) error { return &InvalidBlockerDeclarationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidBlockerDeclarationException) Code() string { +func (s *InvalidBlockerDeclarationException) Code() string { return "InvalidBlockerDeclarationException" } // Message returns the exception's message. -func (s InvalidBlockerDeclarationException) Message() string { +func (s *InvalidBlockerDeclarationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7551,28 +7551,28 @@ func (s InvalidBlockerDeclarationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidBlockerDeclarationException) OrigErr() error { +func (s *InvalidBlockerDeclarationException) OrigErr() error { return nil } -func (s InvalidBlockerDeclarationException) Error() string { +func (s *InvalidBlockerDeclarationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidBlockerDeclarationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidBlockerDeclarationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidBlockerDeclarationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidBlockerDeclarationException) RequestID() string { + return s.RespMetadata.RequestID } // The client token was specified in an invalid format type InvalidClientTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7589,17 +7589,17 @@ func (s InvalidClientTokenException) GoString() string { func newErrorInvalidClientTokenException(v protocol.ResponseMetadata) error { return &InvalidClientTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidClientTokenException) Code() string { +func (s *InvalidClientTokenException) Code() string { return "InvalidClientTokenException" } // Message returns the exception's message. -func (s InvalidClientTokenException) Message() string { +func (s *InvalidClientTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7607,28 +7607,28 @@ func (s InvalidClientTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidClientTokenException) OrigErr() error { +func (s *InvalidClientTokenException) OrigErr() error { return nil } -func (s InvalidClientTokenException) Error() string { +func (s *InvalidClientTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidClientTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidClientTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidClientTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidClientTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The job was specified in an invalid format or cannot be found. type InvalidJobException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7645,17 +7645,17 @@ func (s InvalidJobException) GoString() string { func newErrorInvalidJobException(v protocol.ResponseMetadata) error { return &InvalidJobException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidJobException) Code() string { +func (s *InvalidJobException) Code() string { return "InvalidJobException" } // Message returns the exception's message. -func (s InvalidJobException) Message() string { +func (s *InvalidJobException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7663,28 +7663,28 @@ func (s InvalidJobException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidJobException) OrigErr() error { +func (s *InvalidJobException) OrigErr() error { return nil } -func (s InvalidJobException) Error() string { +func (s *InvalidJobException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidJobException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidJobException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidJobException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidJobException) RequestID() string { + return s.RespMetadata.RequestID } // The job state was specified in an invalid format. type InvalidJobStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7701,17 +7701,17 @@ func (s InvalidJobStateException) GoString() string { func newErrorInvalidJobStateException(v protocol.ResponseMetadata) error { return &InvalidJobStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidJobStateException) Code() string { +func (s *InvalidJobStateException) Code() string { return "InvalidJobStateException" } // Message returns the exception's message. -func (s InvalidJobStateException) Message() string { +func (s *InvalidJobStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7719,29 +7719,29 @@ func (s InvalidJobStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidJobStateException) OrigErr() error { +func (s *InvalidJobStateException) OrigErr() error { return nil } -func (s InvalidJobStateException) Error() string { +func (s *InvalidJobStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidJobStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidJobStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidJobStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidJobStateException) RequestID() string { + return s.RespMetadata.RequestID } // The next token was specified in an invalid format. Make sure that the next // token you provide is the token returned by a previous call. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7758,17 +7758,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7776,28 +7776,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The nonce was specified in an invalid format. type InvalidNonceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7814,17 +7814,17 @@ func (s InvalidNonceException) GoString() string { func newErrorInvalidNonceException(v protocol.ResponseMetadata) error { return &InvalidNonceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNonceException) Code() string { +func (s *InvalidNonceException) Code() string { return "InvalidNonceException" } // Message returns the exception's message. -func (s InvalidNonceException) Message() string { +func (s *InvalidNonceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7832,28 +7832,28 @@ func (s InvalidNonceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNonceException) OrigErr() error { +func (s *InvalidNonceException) OrigErr() error { return nil } -func (s InvalidNonceException) Error() string { +func (s *InvalidNonceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNonceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNonceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNonceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNonceException) RequestID() string { + return s.RespMetadata.RequestID } // The stage declaration was specified in an invalid format. type InvalidStageDeclarationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7870,17 +7870,17 @@ func (s InvalidStageDeclarationException) GoString() string { func newErrorInvalidStageDeclarationException(v protocol.ResponseMetadata) error { return &InvalidStageDeclarationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStageDeclarationException) Code() string { +func (s *InvalidStageDeclarationException) Code() string { return "InvalidStageDeclarationException" } // Message returns the exception's message. -func (s InvalidStageDeclarationException) Message() string { +func (s *InvalidStageDeclarationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7888,28 +7888,28 @@ func (s InvalidStageDeclarationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStageDeclarationException) OrigErr() error { +func (s *InvalidStageDeclarationException) OrigErr() error { return nil } -func (s InvalidStageDeclarationException) Error() string { +func (s *InvalidStageDeclarationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStageDeclarationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStageDeclarationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStageDeclarationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStageDeclarationException) RequestID() string { + return s.RespMetadata.RequestID } // The structure was specified in an invalid format. type InvalidStructureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7926,17 +7926,17 @@ func (s InvalidStructureException) GoString() string { func newErrorInvalidStructureException(v protocol.ResponseMetadata) error { return &InvalidStructureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStructureException) Code() string { +func (s *InvalidStructureException) Code() string { return "InvalidStructureException" } // Message returns the exception's message. -func (s InvalidStructureException) Message() string { +func (s *InvalidStructureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7944,28 +7944,28 @@ func (s InvalidStructureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStructureException) OrigErr() error { +func (s *InvalidStructureException) OrigErr() error { return nil } -func (s InvalidStructureException) Error() string { +func (s *InvalidStructureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStructureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStructureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStructureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStructureException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource tags are invalid. type InvalidTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -7982,17 +7982,17 @@ func (s InvalidTagsException) GoString() string { func newErrorInvalidTagsException(v protocol.ResponseMetadata) error { return &InvalidTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagsException) Code() string { +func (s *InvalidTagsException) Code() string { return "InvalidTagsException" } // Message returns the exception's message. -func (s InvalidTagsException) Message() string { +func (s *InvalidTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8000,28 +8000,28 @@ func (s InvalidTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagsException) OrigErr() error { +func (s *InvalidTagsException) OrigErr() error { return nil } -func (s InvalidTagsException) Error() string { +func (s *InvalidTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified authentication type is in an invalid format. type InvalidWebhookAuthenticationParametersException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8038,17 +8038,17 @@ func (s InvalidWebhookAuthenticationParametersException) GoString() string { func newErrorInvalidWebhookAuthenticationParametersException(v protocol.ResponseMetadata) error { return &InvalidWebhookAuthenticationParametersException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidWebhookAuthenticationParametersException) Code() string { +func (s *InvalidWebhookAuthenticationParametersException) Code() string { return "InvalidWebhookAuthenticationParametersException" } // Message returns the exception's message. -func (s InvalidWebhookAuthenticationParametersException) Message() string { +func (s *InvalidWebhookAuthenticationParametersException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8056,28 +8056,28 @@ func (s InvalidWebhookAuthenticationParametersException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidWebhookAuthenticationParametersException) OrigErr() error { +func (s *InvalidWebhookAuthenticationParametersException) OrigErr() error { return nil } -func (s InvalidWebhookAuthenticationParametersException) Error() string { +func (s *InvalidWebhookAuthenticationParametersException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidWebhookAuthenticationParametersException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidWebhookAuthenticationParametersException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidWebhookAuthenticationParametersException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidWebhookAuthenticationParametersException) RequestID() string { + return s.RespMetadata.RequestID } // The specified event filter rule is in an invalid format. type InvalidWebhookFilterPatternException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8094,17 +8094,17 @@ func (s InvalidWebhookFilterPatternException) GoString() string { func newErrorInvalidWebhookFilterPatternException(v protocol.ResponseMetadata) error { return &InvalidWebhookFilterPatternException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidWebhookFilterPatternException) Code() string { +func (s *InvalidWebhookFilterPatternException) Code() string { return "InvalidWebhookFilterPatternException" } // Message returns the exception's message. -func (s InvalidWebhookFilterPatternException) Message() string { +func (s *InvalidWebhookFilterPatternException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8112,22 +8112,22 @@ func (s InvalidWebhookFilterPatternException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidWebhookFilterPatternException) OrigErr() error { +func (s *InvalidWebhookFilterPatternException) OrigErr() error { return nil } -func (s InvalidWebhookFilterPatternException) Error() string { +func (s *InvalidWebhookFilterPatternException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidWebhookFilterPatternException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidWebhookFilterPatternException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidWebhookFilterPatternException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidWebhookFilterPatternException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about a job. @@ -8323,8 +8323,8 @@ func (s *JobDetails) SetId(v string) *JobDetails { // The job was specified in an invalid format or cannot be found. type JobNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8341,17 +8341,17 @@ func (s JobNotFoundException) GoString() string { func newErrorJobNotFoundException(v protocol.ResponseMetadata) error { return &JobNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s JobNotFoundException) Code() string { +func (s *JobNotFoundException) Code() string { return "JobNotFoundException" } // Message returns the exception's message. -func (s JobNotFoundException) Message() string { +func (s *JobNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8359,29 +8359,29 @@ func (s JobNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s JobNotFoundException) OrigErr() error { +func (s *JobNotFoundException) OrigErr() error { return nil } -func (s JobNotFoundException) Error() string { +func (s *JobNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s JobNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *JobNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s JobNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *JobNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The number of pipelines associated with the AWS account has exceeded the // limit allowed for the account. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8398,17 +8398,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8416,22 +8416,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListActionExecutionsInput struct { @@ -9086,8 +9086,8 @@ func (s *ListWebhooksOutput) SetWebhooks(v []*ListWebhookItem) *ListWebhooksOutp // The stage has failed in a later run of the pipeline and the pipelineExecutionId // associated with the request is out of date. type NotLatestPipelineExecutionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9104,17 +9104,17 @@ func (s NotLatestPipelineExecutionException) GoString() string { func newErrorNotLatestPipelineExecutionException(v protocol.ResponseMetadata) error { return &NotLatestPipelineExecutionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotLatestPipelineExecutionException) Code() string { +func (s *NotLatestPipelineExecutionException) Code() string { return "NotLatestPipelineExecutionException" } // Message returns the exception's message. -func (s NotLatestPipelineExecutionException) Message() string { +func (s *NotLatestPipelineExecutionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9122,22 +9122,22 @@ func (s NotLatestPipelineExecutionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotLatestPipelineExecutionException) OrigErr() error { +func (s *NotLatestPipelineExecutionException) OrigErr() error { return nil } -func (s NotLatestPipelineExecutionException) Error() string { +func (s *NotLatestPipelineExecutionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotLatestPipelineExecutionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotLatestPipelineExecutionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotLatestPipelineExecutionException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotLatestPipelineExecutionException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about the output of an action. @@ -9192,8 +9192,8 @@ func (s *OutputArtifact) SetName(v string) *OutputArtifact { // Exceeded the total size limit for all variables in the pipeline. type OutputVariablesSizeExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -9210,17 +9210,17 @@ func (s OutputVariablesSizeExceededException) GoString() string { func newErrorOutputVariablesSizeExceededException(v protocol.ResponseMetadata) error { return &OutputVariablesSizeExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OutputVariablesSizeExceededException) Code() string { +func (s *OutputVariablesSizeExceededException) Code() string { return "OutputVariablesSizeExceededException" } // Message returns the exception's message. -func (s OutputVariablesSizeExceededException) Message() string { +func (s *OutputVariablesSizeExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9228,22 +9228,22 @@ func (s OutputVariablesSizeExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OutputVariablesSizeExceededException) OrigErr() error { +func (s *OutputVariablesSizeExceededException) OrigErr() error { return nil } -func (s OutputVariablesSizeExceededException) Error() string { +func (s *OutputVariablesSizeExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OutputVariablesSizeExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OutputVariablesSizeExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OutputVariablesSizeExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *OutputVariablesSizeExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about a pipeline to a job worker. @@ -9532,8 +9532,8 @@ func (s *PipelineExecution) SetStatus(v string) *PipelineExecution { // The pipeline execution was specified in an invalid format or cannot be found, // or an execution ID does not belong to the specified pipeline. type PipelineExecutionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9550,17 +9550,17 @@ func (s PipelineExecutionNotFoundException) GoString() string { func newErrorPipelineExecutionNotFoundException(v protocol.ResponseMetadata) error { return &PipelineExecutionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineExecutionNotFoundException) Code() string { +func (s *PipelineExecutionNotFoundException) Code() string { return "PipelineExecutionNotFoundException" } // Message returns the exception's message. -func (s PipelineExecutionNotFoundException) Message() string { +func (s *PipelineExecutionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9568,29 +9568,29 @@ func (s PipelineExecutionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineExecutionNotFoundException) OrigErr() error { +func (s *PipelineExecutionNotFoundException) OrigErr() error { return nil } -func (s PipelineExecutionNotFoundException) Error() string { +func (s *PipelineExecutionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineExecutionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineExecutionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineExecutionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineExecutionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Unable to stop the pipeline execution. The execution might already be in // a Stopped state, or it might no longer be in progress. type PipelineExecutionNotStoppableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -9607,17 +9607,17 @@ func (s PipelineExecutionNotStoppableException) GoString() string { func newErrorPipelineExecutionNotStoppableException(v protocol.ResponseMetadata) error { return &PipelineExecutionNotStoppableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineExecutionNotStoppableException) Code() string { +func (s *PipelineExecutionNotStoppableException) Code() string { return "PipelineExecutionNotStoppableException" } // Message returns the exception's message. -func (s PipelineExecutionNotStoppableException) Message() string { +func (s *PipelineExecutionNotStoppableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9625,22 +9625,22 @@ func (s PipelineExecutionNotStoppableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineExecutionNotStoppableException) OrigErr() error { +func (s *PipelineExecutionNotStoppableException) OrigErr() error { return nil } -func (s PipelineExecutionNotStoppableException) Error() string { +func (s *PipelineExecutionNotStoppableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineExecutionNotStoppableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineExecutionNotStoppableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineExecutionNotStoppableException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineExecutionNotStoppableException) RequestID() string { + return s.RespMetadata.RequestID } // Summary information about a pipeline execution. @@ -9786,8 +9786,8 @@ func (s *PipelineMetadata) SetUpdated(v time.Time) *PipelineMetadata { // The specified pipeline name is already in use. type PipelineNameInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9804,17 +9804,17 @@ func (s PipelineNameInUseException) GoString() string { func newErrorPipelineNameInUseException(v protocol.ResponseMetadata) error { return &PipelineNameInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineNameInUseException) Code() string { +func (s *PipelineNameInUseException) Code() string { return "PipelineNameInUseException" } // Message returns the exception's message. -func (s PipelineNameInUseException) Message() string { +func (s *PipelineNameInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9822,28 +9822,28 @@ func (s PipelineNameInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineNameInUseException) OrigErr() error { +func (s *PipelineNameInUseException) OrigErr() error { return nil } -func (s PipelineNameInUseException) Error() string { +func (s *PipelineNameInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineNameInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineNameInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineNameInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineNameInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The pipeline was specified in an invalid format or cannot be found. type PipelineNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9860,17 +9860,17 @@ func (s PipelineNotFoundException) GoString() string { func newErrorPipelineNotFoundException(v protocol.ResponseMetadata) error { return &PipelineNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineNotFoundException) Code() string { +func (s *PipelineNotFoundException) Code() string { return "PipelineNotFoundException" } // Message returns the exception's message. -func (s PipelineNotFoundException) Message() string { +func (s *PipelineNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9878,22 +9878,22 @@ func (s PipelineNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineNotFoundException) OrigErr() error { +func (s *PipelineNotFoundException) OrigErr() error { return nil } -func (s PipelineNotFoundException) Error() string { +func (s *PipelineNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Returns a summary of a pipeline. @@ -9949,8 +9949,8 @@ func (s *PipelineSummary) SetVersion(v int64) *PipelineSummary { // The pipeline version was specified in an invalid format or cannot be found. type PipelineVersionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9967,17 +9967,17 @@ func (s PipelineVersionNotFoundException) GoString() string { func newErrorPipelineVersionNotFoundException(v protocol.ResponseMetadata) error { return &PipelineVersionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineVersionNotFoundException) Code() string { +func (s *PipelineVersionNotFoundException) Code() string { return "PipelineVersionNotFoundException" } // Message returns the exception's message. -func (s PipelineVersionNotFoundException) Message() string { +func (s *PipelineVersionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9985,22 +9985,22 @@ func (s PipelineVersionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineVersionNotFoundException) OrigErr() error { +func (s *PipelineVersionNotFoundException) OrigErr() error { return nil } -func (s PipelineVersionNotFoundException) Error() string { +func (s *PipelineVersionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineVersionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineVersionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineVersionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineVersionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a PollForJobs action. @@ -10980,8 +10980,8 @@ func (s RegisterWebhookWithThirdPartyOutput) GoString() string { // The resource was specified in an invalid format. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10998,17 +10998,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11016,22 +11016,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input of a RetryStageExecution action. @@ -11427,8 +11427,8 @@ func (s *StageExecution) SetStatus(v string) *StageExecution { // The stage was specified in an invalid format or cannot be found. type StageNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11445,17 +11445,17 @@ func (s StageNotFoundException) GoString() string { func newErrorStageNotFoundException(v protocol.ResponseMetadata) error { return &StageNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StageNotFoundException) Code() string { +func (s *StageNotFoundException) Code() string { return "StageNotFoundException" } // Message returns the exception's message. -func (s StageNotFoundException) Message() string { +func (s *StageNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11463,29 +11463,29 @@ func (s StageNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StageNotFoundException) OrigErr() error { +func (s *StageNotFoundException) OrigErr() error { return nil } -func (s StageNotFoundException) Error() string { +func (s *StageNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StageNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StageNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StageNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *StageNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Unable to retry. The pipeline structure or stage state might have changed // while actions awaited retry, or the stage contains no failed actions. type StageNotRetryableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11502,17 +11502,17 @@ func (s StageNotRetryableException) GoString() string { func newErrorStageNotRetryableException(v protocol.ResponseMetadata) error { return &StageNotRetryableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StageNotRetryableException) Code() string { +func (s *StageNotRetryableException) Code() string { return "StageNotRetryableException" } // Message returns the exception's message. -func (s StageNotRetryableException) Message() string { +func (s *StageNotRetryableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11520,22 +11520,22 @@ func (s StageNotRetryableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StageNotRetryableException) OrigErr() error { +func (s *StageNotRetryableException) OrigErr() error { return nil } -func (s StageNotRetryableException) Error() string { +func (s *StageNotRetryableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StageNotRetryableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StageNotRetryableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StageNotRetryableException) RequestID() string { - return s.respMetadata.RequestID +func (s *StageNotRetryableException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about the state of the stage. @@ -12106,8 +12106,8 @@ func (s *ThirdPartyJobDetails) SetNonce(v string) *ThirdPartyJobDetails { // The tags limit for a resource has been exceeded. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" min:"1" type:"string"` } @@ -12124,17 +12124,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12142,22 +12142,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about the state of transitions between one stage and @@ -12349,8 +12349,8 @@ func (s *UpdatePipelineOutput) SetPipeline(v *PipelineDeclaration) *UpdatePipeli // The validation was specified in an invalid format. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12367,17 +12367,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12385,22 +12385,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // The authentication applied to incoming webhook trigger requests. @@ -12673,8 +12673,8 @@ func (s *WebhookFilterRule) SetMatchEquals(v string) *WebhookFilterRule { // The specified webhook was entered in an invalid format or cannot be found. type WebhookNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12691,17 +12691,17 @@ func (s WebhookNotFoundException) GoString() string { func newErrorWebhookNotFoundException(v protocol.ResponseMetadata) error { return &WebhookNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WebhookNotFoundException) Code() string { +func (s *WebhookNotFoundException) Code() string { return "WebhookNotFoundException" } // Message returns the exception's message. -func (s WebhookNotFoundException) Message() string { +func (s *WebhookNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12709,22 +12709,22 @@ func (s WebhookNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WebhookNotFoundException) OrigErr() error { +func (s *WebhookNotFoundException) OrigErr() error { return nil } -func (s WebhookNotFoundException) Error() string { +func (s *WebhookNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WebhookNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WebhookNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WebhookNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *WebhookNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -12747,6 +12747,18 @@ const ( ActionCategoryApproval = "Approval" ) +// ActionCategory_Values returns all elements of the ActionCategory enum +func ActionCategory_Values() []string { + return []string{ + ActionCategorySource, + ActionCategoryBuild, + ActionCategoryDeploy, + ActionCategoryTest, + ActionCategoryInvoke, + ActionCategoryApproval, + } +} + const ( // ActionConfigurationPropertyTypeString is a ActionConfigurationPropertyType enum value ActionConfigurationPropertyTypeString = "String" @@ -12758,6 +12770,15 @@ const ( ActionConfigurationPropertyTypeBoolean = "Boolean" ) +// ActionConfigurationPropertyType_Values returns all elements of the ActionConfigurationPropertyType enum +func ActionConfigurationPropertyType_Values() []string { + return []string{ + ActionConfigurationPropertyTypeString, + ActionConfigurationPropertyTypeNumber, + ActionConfigurationPropertyTypeBoolean, + } +} + const ( // ActionExecutionStatusInProgress is a ActionExecutionStatus enum value ActionExecutionStatusInProgress = "InProgress" @@ -12772,6 +12793,16 @@ const ( ActionExecutionStatusFailed = "Failed" ) +// ActionExecutionStatus_Values returns all elements of the ActionExecutionStatus enum +func ActionExecutionStatus_Values() []string { + return []string{ + ActionExecutionStatusInProgress, + ActionExecutionStatusAbandoned, + ActionExecutionStatusSucceeded, + ActionExecutionStatusFailed, + } +} + const ( // ActionOwnerAws is a ActionOwner enum value ActionOwnerAws = "AWS" @@ -12783,6 +12814,15 @@ const ( ActionOwnerCustom = "Custom" ) +// ActionOwner_Values returns all elements of the ActionOwner enum +func ActionOwner_Values() []string { + return []string{ + ActionOwnerAws, + ActionOwnerThirdParty, + ActionOwnerCustom, + } +} + const ( // ApprovalStatusApproved is a ApprovalStatus enum value ApprovalStatusApproved = "Approved" @@ -12791,26 +12831,62 @@ const ( ApprovalStatusRejected = "Rejected" ) +// ApprovalStatus_Values returns all elements of the ApprovalStatus enum +func ApprovalStatus_Values() []string { + return []string{ + ApprovalStatusApproved, + ApprovalStatusRejected, + } +} + const ( // ArtifactLocationTypeS3 is a ArtifactLocationType enum value ArtifactLocationTypeS3 = "S3" ) +// ArtifactLocationType_Values returns all elements of the ArtifactLocationType enum +func ArtifactLocationType_Values() []string { + return []string{ + ArtifactLocationTypeS3, + } +} + const ( // ArtifactStoreTypeS3 is a ArtifactStoreType enum value ArtifactStoreTypeS3 = "S3" ) +// ArtifactStoreType_Values returns all elements of the ArtifactStoreType enum +func ArtifactStoreType_Values() []string { + return []string{ + ArtifactStoreTypeS3, + } +} + const ( // BlockerTypeSchedule is a BlockerType enum value BlockerTypeSchedule = "Schedule" ) +// BlockerType_Values returns all elements of the BlockerType enum +func BlockerType_Values() []string { + return []string{ + BlockerTypeSchedule, + } +} + const ( // EncryptionKeyTypeKms is a EncryptionKeyType enum value EncryptionKeyTypeKms = "KMS" ) +// EncryptionKeyType_Values returns all elements of the EncryptionKeyType enum +func EncryptionKeyType_Values() []string { + return []string{ + EncryptionKeyTypeKms, + } +} + const ( // FailureTypeJobFailed is a FailureType enum value FailureTypeJobFailed = "JobFailed" @@ -12831,6 +12907,18 @@ const ( FailureTypeSystemUnavailable = "SystemUnavailable" ) +// FailureType_Values returns all elements of the FailureType enum +func FailureType_Values() []string { + return []string{ + FailureTypeJobFailed, + FailureTypeConfigurationError, + FailureTypePermissionError, + FailureTypeRevisionOutOfSync, + FailureTypeRevisionUnavailable, + FailureTypeSystemUnavailable, + } +} + const ( // JobStatusCreated is a JobStatus enum value JobStatusCreated = "Created" @@ -12854,6 +12942,19 @@ const ( JobStatusFailed = "Failed" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusCreated, + JobStatusQueued, + JobStatusDispatched, + JobStatusInProgress, + JobStatusTimedOut, + JobStatusSucceeded, + JobStatusFailed, + } +} + const ( // PipelineExecutionStatusInProgress is a PipelineExecutionStatus enum value PipelineExecutionStatusInProgress = "InProgress" @@ -12874,6 +12975,18 @@ const ( PipelineExecutionStatusFailed = "Failed" ) +// PipelineExecutionStatus_Values returns all elements of the PipelineExecutionStatus enum +func PipelineExecutionStatus_Values() []string { + return []string{ + PipelineExecutionStatusInProgress, + PipelineExecutionStatusStopped, + PipelineExecutionStatusStopping, + PipelineExecutionStatusSucceeded, + PipelineExecutionStatusSuperseded, + PipelineExecutionStatusFailed, + } +} + const ( // StageExecutionStatusInProgress is a StageExecutionStatus enum value StageExecutionStatusInProgress = "InProgress" @@ -12891,11 +13004,29 @@ const ( StageExecutionStatusSucceeded = "Succeeded" ) +// StageExecutionStatus_Values returns all elements of the StageExecutionStatus enum +func StageExecutionStatus_Values() []string { + return []string{ + StageExecutionStatusInProgress, + StageExecutionStatusFailed, + StageExecutionStatusStopped, + StageExecutionStatusStopping, + StageExecutionStatusSucceeded, + } +} + const ( // StageRetryModeFailedActions is a StageRetryMode enum value StageRetryModeFailedActions = "FAILED_ACTIONS" ) +// StageRetryMode_Values returns all elements of the StageRetryMode enum +func StageRetryMode_Values() []string { + return []string{ + StageRetryModeFailedActions, + } +} + const ( // StageTransitionTypeInbound is a StageTransitionType enum value StageTransitionTypeInbound = "Inbound" @@ -12904,6 +13035,14 @@ const ( StageTransitionTypeOutbound = "Outbound" ) +// StageTransitionType_Values returns all elements of the StageTransitionType enum +func StageTransitionType_Values() []string { + return []string{ + StageTransitionTypeInbound, + StageTransitionTypeOutbound, + } +} + const ( // TriggerTypeCreatePipeline is a TriggerType enum value TriggerTypeCreatePipeline = "CreatePipeline" @@ -12924,6 +13063,18 @@ const ( TriggerTypePutActionRevision = "PutActionRevision" ) +// TriggerType_Values returns all elements of the TriggerType enum +func TriggerType_Values() []string { + return []string{ + TriggerTypeCreatePipeline, + TriggerTypeStartPipelineExecution, + TriggerTypePollForSourceChanges, + TriggerTypeWebhook, + TriggerTypeCloudWatchEvent, + TriggerTypePutActionRevision, + } +} + const ( // WebhookAuthenticationTypeGithubHmac is a WebhookAuthenticationType enum value WebhookAuthenticationTypeGithubHmac = "GITHUB_HMAC" @@ -12934,3 +13085,12 @@ const ( // WebhookAuthenticationTypeUnauthenticated is a WebhookAuthenticationType enum value WebhookAuthenticationTypeUnauthenticated = "UNAUTHENTICATED" ) + +// WebhookAuthenticationType_Values returns all elements of the WebhookAuthenticationType enum +func WebhookAuthenticationType_Values() []string { + return []string{ + WebhookAuthenticationTypeGithubHmac, + WebhookAuthenticationTypeIp, + WebhookAuthenticationTypeUnauthenticated, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go index 08e0a0637..d144c32f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/api.go b/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/api.go index c69e76f54..8f4a33246 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/api.go @@ -1305,8 +1305,8 @@ func (c *CodeStarNotifications) UpdateNotificationRuleWithContext(ctx aws.Contex // AWS CodeStar Notifications can't create the notification rule because you // do not have sufficient permissions. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1323,17 +1323,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1341,29 +1341,29 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // AWS CodeStar Notifications can't complete the request because the resource // is being modified by another process. Wait a few minutes and try again. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1380,17 +1380,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1398,28 +1398,28 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Some or all of the configuration is incomplete, missing, or not valid. type ConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1436,17 +1436,17 @@ func (s ConfigurationException) GoString() string { func newErrorConfigurationException(v protocol.ResponseMetadata) error { return &ConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConfigurationException) Code() string { +func (s *ConfigurationException) Code() string { return "ConfigurationException" } // Message returns the exception's message. -func (s ConfigurationException) Message() string { +func (s *ConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1454,22 +1454,22 @@ func (s ConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConfigurationException) OrigErr() error { +func (s *ConfigurationException) OrigErr() error { return nil } -func (s ConfigurationException) Error() string { +func (s *ConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } type CreateNotificationRuleInput struct { @@ -1989,8 +1989,8 @@ func (s *EventTypeSummary) SetServiceName(v string) *EventTypeSummary { // The value for the enumeration token used in the request to return the next // batch of the results is not valid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2007,17 +2007,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2025,30 +2025,30 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // One of the AWS CodeStar Notifications limits has been exceeded. Limits apply // to accounts, notification rules, notifications, resources, and targets. For // more information, see Limits. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2065,17 +2065,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2083,22 +2083,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a filter to apply to the list of returned event types. @@ -2677,8 +2677,8 @@ func (s *NotificationRuleSummary) SetId(v string) *NotificationRuleSummary { // A resource with the same name or ID already exists. Notification rule names // must be unique in your AWS account. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2695,17 +2695,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2713,29 +2713,29 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // AWS CodeStar Notifications can't find a resource that matches the provided // ARN. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2752,17 +2752,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2770,22 +2770,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type SubscribeInput struct { @@ -3312,8 +3312,8 @@ func (s UpdateNotificationRuleOutput) GoString() string { // One or more parameter values are not valid. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -3330,17 +3330,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3348,22 +3348,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -3374,6 +3374,14 @@ const ( DetailTypeFull = "FULL" ) +// DetailType_Values returns all elements of the DetailType enum +func DetailType_Values() []string { + return []string{ + DetailTypeBasic, + DetailTypeFull, + } +} + const ( // ListEventTypesFilterNameResourceType is a ListEventTypesFilterName enum value ListEventTypesFilterNameResourceType = "RESOURCE_TYPE" @@ -3382,6 +3390,14 @@ const ( ListEventTypesFilterNameServiceName = "SERVICE_NAME" ) +// ListEventTypesFilterName_Values returns all elements of the ListEventTypesFilterName enum +func ListEventTypesFilterName_Values() []string { + return []string{ + ListEventTypesFilterNameResourceType, + ListEventTypesFilterNameServiceName, + } +} + const ( // ListNotificationRulesFilterNameEventTypeId is a ListNotificationRulesFilterName enum value ListNotificationRulesFilterNameEventTypeId = "EVENT_TYPE_ID" @@ -3396,6 +3412,16 @@ const ( ListNotificationRulesFilterNameTargetAddress = "TARGET_ADDRESS" ) +// ListNotificationRulesFilterName_Values returns all elements of the ListNotificationRulesFilterName enum +func ListNotificationRulesFilterName_Values() []string { + return []string{ + ListNotificationRulesFilterNameEventTypeId, + ListNotificationRulesFilterNameCreatedBy, + ListNotificationRulesFilterNameResource, + ListNotificationRulesFilterNameTargetAddress, + } +} + const ( // ListTargetsFilterNameTargetType is a ListTargetsFilterName enum value ListTargetsFilterNameTargetType = "TARGET_TYPE" @@ -3407,6 +3433,15 @@ const ( ListTargetsFilterNameTargetStatus = "TARGET_STATUS" ) +// ListTargetsFilterName_Values returns all elements of the ListTargetsFilterName enum +func ListTargetsFilterName_Values() []string { + return []string{ + ListTargetsFilterNameTargetType, + ListTargetsFilterNameTargetAddress, + ListTargetsFilterNameTargetStatus, + } +} + const ( // NotificationRuleStatusEnabled is a NotificationRuleStatus enum value NotificationRuleStatusEnabled = "ENABLED" @@ -3415,6 +3450,14 @@ const ( NotificationRuleStatusDisabled = "DISABLED" ) +// NotificationRuleStatus_Values returns all elements of the NotificationRuleStatus enum +func NotificationRuleStatus_Values() []string { + return []string{ + NotificationRuleStatusEnabled, + NotificationRuleStatusDisabled, + } +} + const ( // TargetStatusPending is a TargetStatus enum value TargetStatusPending = "PENDING" @@ -3431,3 +3474,14 @@ const ( // TargetStatusDeactivated is a TargetStatus enum value TargetStatusDeactivated = "DEACTIVATED" ) + +// TargetStatus_Values returns all elements of the TargetStatus enum +func TargetStatus_Values() []string { + return []string{ + TargetStatusPending, + TargetStatusActive, + TargetStatusUnreachable, + TargetStatusInactive, + TargetStatusDeactivated, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/service.go b/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/service.go index 5f5c775dd..c80a47fc1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codestarnotifications/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go index 6b104b41f..07c698278 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/api.go @@ -2161,8 +2161,8 @@ func (c *CognitoIdentity) UpdateIdentityPoolWithContext(ctx aws.Context, input * // Thrown if there are parallel requests to modify a resource. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by a ConcurrentModificationException. Message_ *string `locationName:"message" type:"string"` @@ -2180,17 +2180,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2198,22 +2198,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Input to the CreateIdentityPool action. @@ -2620,8 +2620,8 @@ func (s *DescribeIdentityPoolInput) SetIdentityPoolId(v string) *DescribeIdentit // The provided developer user identifier is already registered with Cognito // under a different identity ID. type DeveloperUserAlreadyRegisteredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // This developer user identifier is already registered with Cognito. Message_ *string `locationName:"message" type:"string"` @@ -2639,17 +2639,17 @@ func (s DeveloperUserAlreadyRegisteredException) GoString() string { func newErrorDeveloperUserAlreadyRegisteredException(v protocol.ResponseMetadata) error { return &DeveloperUserAlreadyRegisteredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeveloperUserAlreadyRegisteredException) Code() string { +func (s *DeveloperUserAlreadyRegisteredException) Code() string { return "DeveloperUserAlreadyRegisteredException" } // Message returns the exception's message. -func (s DeveloperUserAlreadyRegisteredException) Message() string { +func (s *DeveloperUserAlreadyRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2657,29 +2657,29 @@ func (s DeveloperUserAlreadyRegisteredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeveloperUserAlreadyRegisteredException) OrigErr() error { +func (s *DeveloperUserAlreadyRegisteredException) OrigErr() error { return nil } -func (s DeveloperUserAlreadyRegisteredException) Error() string { +func (s *DeveloperUserAlreadyRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeveloperUserAlreadyRegisteredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeveloperUserAlreadyRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeveloperUserAlreadyRegisteredException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeveloperUserAlreadyRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // An exception thrown when a dependent service such as Facebook or Twitter // is not responding type ExternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by an ExternalServiceException Message_ *string `locationName:"message" type:"string"` @@ -2697,17 +2697,17 @@ func (s ExternalServiceException) GoString() string { func newErrorExternalServiceException(v protocol.ResponseMetadata) error { return &ExternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExternalServiceException) Code() string { +func (s *ExternalServiceException) Code() string { return "ExternalServiceException" } // Message returns the exception's message. -func (s ExternalServiceException) Message() string { +func (s *ExternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2715,22 +2715,22 @@ func (s ExternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExternalServiceException) OrigErr() error { +func (s *ExternalServiceException) OrigErr() error { return nil } -func (s ExternalServiceException) Error() string { +func (s *ExternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // Input to the GetCredentialsForIdentity action. @@ -3489,8 +3489,8 @@ func (s *IdentityPoolShortDescription) SetIdentityPoolName(v string) *IdentityPo // Thrown when the service encounters an error during processing the request. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by an InternalErrorException. Message_ *string `locationName:"message" type:"string"` @@ -3508,17 +3508,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3526,29 +3526,29 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Thrown if the identity pool has no role associated for the given auth type // (auth/unauth) or if the AssumeRole fails. type InvalidIdentityPoolConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned for an InvalidIdentityPoolConfigurationException Message_ *string `locationName:"message" type:"string"` @@ -3566,17 +3566,17 @@ func (s InvalidIdentityPoolConfigurationException) GoString() string { func newErrorInvalidIdentityPoolConfigurationException(v protocol.ResponseMetadata) error { return &InvalidIdentityPoolConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidIdentityPoolConfigurationException) Code() string { +func (s *InvalidIdentityPoolConfigurationException) Code() string { return "InvalidIdentityPoolConfigurationException" } // Message returns the exception's message. -func (s InvalidIdentityPoolConfigurationException) Message() string { +func (s *InvalidIdentityPoolConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3584,28 +3584,28 @@ func (s InvalidIdentityPoolConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidIdentityPoolConfigurationException) OrigErr() error { +func (s *InvalidIdentityPoolConfigurationException) OrigErr() error { return nil } -func (s InvalidIdentityPoolConfigurationException) Error() string { +func (s *InvalidIdentityPoolConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidIdentityPoolConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidIdentityPoolConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidIdentityPoolConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidIdentityPoolConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // Thrown for missing or bad input parameter(s). type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by an InvalidParameterException. Message_ *string `locationName:"message" type:"string"` @@ -3623,17 +3623,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3641,28 +3641,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // Thrown when the total number of user pools has exceeded a preset limit. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by a LimitExceededException. Message_ *string `locationName:"message" type:"string"` @@ -3680,17 +3680,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3698,22 +3698,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Input to the ListIdentities action. @@ -4354,8 +4354,8 @@ func (s *MergeDeveloperIdentitiesOutput) SetIdentityId(v string) *MergeDeveloper // Thrown when a user is not authorized to access the requested resource. type NotAuthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by a NotAuthorizedException Message_ *string `locationName:"message" type:"string"` @@ -4373,17 +4373,17 @@ func (s NotAuthorizedException) GoString() string { func newErrorNotAuthorizedException(v protocol.ResponseMetadata) error { return &NotAuthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotAuthorizedException) Code() string { +func (s *NotAuthorizedException) Code() string { return "NotAuthorizedException" } // Message returns the exception's message. -func (s NotAuthorizedException) Message() string { +func (s *NotAuthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4391,22 +4391,22 @@ func (s NotAuthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotAuthorizedException) OrigErr() error { +func (s *NotAuthorizedException) OrigErr() error { return nil } -func (s NotAuthorizedException) Error() string { +func (s *NotAuthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotAuthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotAuthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotAuthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotAuthorizedException) RequestID() string { + return s.RespMetadata.RequestID } // A provider representing an Amazon Cognito user pool and its client ID. @@ -4479,8 +4479,8 @@ func (s *Provider) SetServerSideTokenCheck(v bool) *Provider { // Thrown when a user tries to use a login which is already linked to another // account. type ResourceConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by a ResourceConflictException. Message_ *string `locationName:"message" type:"string"` @@ -4498,17 +4498,17 @@ func (s ResourceConflictException) GoString() string { func newErrorResourceConflictException(v protocol.ResponseMetadata) error { return &ResourceConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceConflictException) Code() string { +func (s *ResourceConflictException) Code() string { return "ResourceConflictException" } // Message returns the exception's message. -func (s ResourceConflictException) Message() string { +func (s *ResourceConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4516,29 +4516,29 @@ func (s ResourceConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceConflictException) OrigErr() error { +func (s *ResourceConflictException) OrigErr() error { return nil } -func (s ResourceConflictException) Error() string { +func (s *ResourceConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Thrown when the requested resource (for example, a dataset or record) does // not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned by a ResourceNotFoundException. Message_ *string `locationName:"message" type:"string"` @@ -4556,17 +4556,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4574,22 +4574,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A role mapping. @@ -4882,8 +4882,8 @@ func (s TagResourceOutput) GoString() string { // Thrown when a request is throttled. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Message returned by a TooManyRequestsException Message_ *string `locationName:"message" type:"string"` @@ -4901,17 +4901,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4919,22 +4919,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Input to the UnlinkDeveloperIdentity action. @@ -5240,6 +5240,14 @@ const ( AmbiguousRoleResolutionTypeDeny = "Deny" ) +// AmbiguousRoleResolutionType_Values returns all elements of the AmbiguousRoleResolutionType enum +func AmbiguousRoleResolutionType_Values() []string { + return []string{ + AmbiguousRoleResolutionTypeAuthenticatedRole, + AmbiguousRoleResolutionTypeDeny, + } +} + const ( // ErrorCodeAccessDenied is a ErrorCode enum value ErrorCodeAccessDenied = "AccessDenied" @@ -5248,6 +5256,14 @@ const ( ErrorCodeInternalServerError = "InternalServerError" ) +// ErrorCode_Values returns all elements of the ErrorCode enum +func ErrorCode_Values() []string { + return []string{ + ErrorCodeAccessDenied, + ErrorCodeInternalServerError, + } +} + const ( // MappingRuleMatchTypeEquals is a MappingRuleMatchType enum value MappingRuleMatchTypeEquals = "Equals" @@ -5262,6 +5278,16 @@ const ( MappingRuleMatchTypeNotEqual = "NotEqual" ) +// MappingRuleMatchType_Values returns all elements of the MappingRuleMatchType enum +func MappingRuleMatchType_Values() []string { + return []string{ + MappingRuleMatchTypeEquals, + MappingRuleMatchTypeContains, + MappingRuleMatchTypeStartsWith, + MappingRuleMatchTypeNotEqual, + } +} + const ( // RoleMappingTypeToken is a RoleMappingType enum value RoleMappingTypeToken = "Token" @@ -5269,3 +5295,11 @@ const ( // RoleMappingTypeRules is a RoleMappingType enum value RoleMappingTypeRules = "Rules" ) + +// RoleMappingType_Values returns all elements of the RoleMappingType enum +func RoleMappingType_Values() []string { + return []string{ + RoleMappingTypeToken, + RoleMappingTypeRules, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go index fc95025a8..54811ae4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go index 8fd262b54..86c0162ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/api.go @@ -383,9 +383,9 @@ func (c *CognitoIdentityProvider) AdminCreateUserRequest(input *AdminCreateUserI // If MessageAction is not set, the default is to send a welcome message via // email or phone (SMS). // -// This message is based on a template that you configured in your call to or -// . This template includes your custom sign-up instructions and placeholders -// for user name and temporary password. +// This message is based on a template that you configured in your call to create +// or update a user pool. This template includes your custom sign-up instructions +// and placeholders for user name and temporary password. // // Alternatively, you can call AdminCreateUser with “SUPPRESS” for the MessageAction // parameter, and Amazon Cognito will not send any email. @@ -737,7 +737,7 @@ func (c *CognitoIdentityProvider) AdminDisableProviderForUserRequest(input *Admi // sign-in. If the user to disable is a linked external IdP user, any link between // that user and an existing user is removed. The next time the external user // (no longer attached to the previously linked DestinationUser) signs in, they -// must create a new user account. See . +// must create a new user account. See AdminLinkProviderForUser (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminLinkProviderForUser.html). // // This action is enabled only for admin access and requires developer credentials. // @@ -756,10 +756,11 @@ func (c *CognitoIdentityProvider) AdminDisableProviderForUserRequest(input *Admi // For de-linking a SAML identity, there are two scenarios. If the linked identity // has not yet been used to sign-in, the ProviderAttributeName and ProviderAttributeValue // must be the same values that were used for the SourceUser when the identities -// were originally linked in the call. (If the linking was done with ProviderAttributeName -// set to Cognito_Subject, the same applies here). However, if the user has -// already signed in, the ProviderAttributeName must be Cognito_Subject and -// ProviderAttributeValue must be the subject of the SAML assertion. +// were originally linked using AdminLinkProviderForUser call. (If the linking +// was done with ProviderAttributeName set to Cognito_Subject, the same applies +// here). However, if the user has already signed in, the ProviderAttributeName +// must be Cognito_Subject and ProviderAttributeValue must be the subject of +// the SAML assertion. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1510,13 +1511,13 @@ func (c *CognitoIdentityProvider) AdminLinkProviderForUserRequest(input *AdminLi // API links that user to a federated user identity, so that when the federated // user identity is used, the user signs in as the existing user account. // +// The maximum number of federated identities linked to a user is 5. +// // Because this API allows a user with an external federated identity to sign // in as an existing user in the user pool, it is critical that it only be used // with external identity providers and provider attributes that have been trusted // by the application owner. // -// See also . -// // This action is enabled only for admin access and requires developer credentials. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1551,6 +1552,10 @@ func (c *CognitoIdentityProvider) AdminLinkProviderForUserRequest(input *AdminLi // account. This exception tells user that an account with this email or phone // already exists. // +// * LimitExceededException +// This exception is thrown when a user exceeds the limit for a requested AWS +// resource. +// // * InternalErrorException // This exception is thrown when Amazon Cognito encounters an internal error. // @@ -2652,7 +2657,8 @@ func (c *CognitoIdentityProvider) AdminSetUserSettingsRequest(input *AdminSetUse // // This action is no longer supported. You can use it to configure only SMS // MFA. You can't use it to configure TOTP software token MFA. To configure -// either type of MFA, use the AdminSetUserMFAPreference action instead. +// either type of MFA, use AdminSetUserMFAPreference (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminSetUserMFAPreference.html) +// instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3204,6 +3210,9 @@ func (c *CognitoIdentityProvider) AssociateSoftwareTokenRequest(input *Associate // API operation AssociateSoftwareToken for usage and error information. // // Returned Error Types: +// * ConcurrentModificationException +// This exception is thrown if two or more modifications are happening concurrently. +// // * InvalidParameterException // This exception is thrown when the Amazon Cognito service encounters an invalid // parameter. @@ -6058,11 +6067,13 @@ func (c *CognitoIdentityProvider) ForgotPasswordRequest(input *ForgotPasswordInp // // Calling this API causes a message to be sent to the end user with a confirmation // code that is required to change the user's password. For the Username parameter, -// you can use the username or user alias. If a verified phone number exists -// for the user, the confirmation code is sent to the phone number. Otherwise, -// if a verified email exists, the confirmation code is sent to the email. If -// neither a verified phone number nor a verified email exists, InvalidParameterException -// is thrown. To use the confirmation code for resetting the password, call . +// you can use the username or user alias. The method used to send the confirmation +// code is sent according to the specified AccountRecoverySetting. For more +// information, see Recovering User Accounts (https://docs.aws.amazon.com/cognito/latest/developerguide/how-to-recover-a-user-account.html) +// in the Amazon Cognito Developer Guide. If neither a verified phone number +// nor a verified email exists, an InvalidParameterException is thrown. To use +// the confirmation code for resetting the password, call ConfirmForgotPassword +// (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ConfirmForgotPassword.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7203,6 +7214,7 @@ func (c *CognitoIdentityProvider) InitiateAuthRequest(input *InitiateAuthInput) output = &InitiateAuthOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } @@ -8827,6 +8839,7 @@ func (c *CognitoIdentityProvider) RespondToAuthChallengeRequest(input *RespondTo output = &RespondToAuthChallengeOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } @@ -8991,8 +9004,6 @@ func (c *CognitoIdentityProvider) SetRiskConfigurationRequest(input *SetRiskConf // To enable Amazon Cognito advanced security features, update the user pool // to include the UserPoolAddOns keyAdvancedSecurityMode. // -// See . -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -9413,7 +9424,8 @@ func (c *CognitoIdentityProvider) SetUserSettingsRequest(input *SetUserSettingsI // // This action is no longer supported. You can use it to configure only SMS // MFA. You can't use it to configure TOTP software token MFA. To configure -// either type of MFA, use the SetUserMFAPreference action instead. +// either type of MFA, use SetUserMFAPreference (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_SetUserMFAPreference.html) +// instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10698,7 +10710,7 @@ func (c *CognitoIdentityProvider) UpdateUserPoolRequest(input *UpdateUserPoolInp // UpdateUserPool API operation for Amazon Cognito Identity Provider. // // Updates the specified user pool with the specified attributes. You can get -// a list of the current user pool settings with . +// a list of the current user pool settings using DescribeUserPool (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPool.html). // // If you don't provide a value for an attribute, it will be set to the default // value. @@ -10820,7 +10832,8 @@ func (c *CognitoIdentityProvider) UpdateUserPoolClientRequest(input *UpdateUserP // UpdateUserPoolClient API operation for Amazon Cognito Identity Provider. // // Updates the specified user pool app client with the specified attributes. -// You can get a list of the current user pool app client settings with . +// You can get a list of the current user pool app client settings using DescribeUserPoolClient +// (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_DescribeUserPoolClient.html). // // If you don't provide a value for an attribute, it will be set to the default // value. @@ -11905,9 +11918,9 @@ type AdminCreateUserInput struct { // An array of name-value pairs that contain user attributes and attribute values // to be set for the user to be created. You can create a user without specifying // any attributes other than Username. However, any attributes that you specify - // as required (in or in the Attributes tab of the console) must be supplied - // either by you (in your call to AdminCreateUser) or by the user (when he or - // she signs up in response to your welcome message). + // as required (when creating a user pool or in the Attributes tab of the console) + // must be supplied either by you (in your call to AdminCreateUser) or by the + // user (when he or she signs up in response to your welcome message). // // For custom attributes, you must prepend the custom: prefix to the attribute // name. @@ -11919,7 +11932,7 @@ type AdminCreateUserInput struct { // // In your call to AdminCreateUser, you can set the email_verified attribute // to True, and you can set the phone_number_verified attribute to True. (You - // can also do this by calling .) + // can also do this by calling AdminUpdateUserAttributes (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminUpdateUserAttributes.html).) // // * email: The email address of the user to whom the message that contains // the code and username will be sent. Required if the email_verified attribute @@ -12741,7 +12754,7 @@ type AdminGetUserOutput struct { // This response parameter is no longer supported. It provides information only // about SMS MFA configurations. It doesn't provide information about TOTP software // token MFA configurations. To look up information about either type of MFA - // configuration, use the AdminGetUserResponse$UserMFASettingList response instead. + // configuration, use UserMFASettingList instead. MFAOptions []*MFAOptionType `type:"list"` // The user's preferred MFA setting. @@ -12901,17 +12914,19 @@ type AdminInitiateAuthInput struct { // that you are invoking. The required values depend on the value of AuthFlow: // // * For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH - // (required if the app client is configured with a client secret), DEVICE_KEY + // (required if the app client is configured with a client secret), DEVICE_KEY. // // * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH - // (required if the app client is configured with a client secret), DEVICE_KEY + // (required if the app client is configured with a client secret), DEVICE_KEY. // // * For ADMIN_NO_SRP_AUTH: USERNAME (required), SECRET_HASH (if app client - // is configured with client secret), PASSWORD (required), DEVICE_KEY + // is configured with client secret), PASSWORD (required), DEVICE_KEY. // // * For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is - // configured with client secret), DEVICE_KEY - AuthParameters map[string]*string `type:"map"` + // configured with client secret), DEVICE_KEY. To start the authentication + // flow with password verification, include ChallengeName: SRP_A and SRP_A: + // (The SRP_A Value). + AuthParameters map[string]*string `type:"map" sensitive:"true"` // The app client ID. // @@ -13833,7 +13848,7 @@ type AdminRespondToAuthChallengeInput struct { // calls. AnalyticsMetadata *AnalyticsMetadataType `type:"structure"` - // The challenge name. For more information, see . + // The challenge name. For more information, see AdminInitiateAuth (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminInitiateAuth.html). // // ChallengeName is a required field ChallengeName *string `type:"string" required:"true" enum:"ChallengeNameType"` @@ -14014,17 +14029,16 @@ type AdminRespondToAuthChallengeOutput struct { // The result returned by the server in response to the authentication request. AuthenticationResult *AuthenticationResultType `type:"structure"` - // The name of the challenge. For more information, see . + // The name of the challenge. For more information, see AdminInitiateAuth (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminInitiateAuth.html). ChallengeName *string `type:"string" enum:"ChallengeNameType"` - // The challenge parameters. For more information, see . + // The challenge parameters. For more information, see AdminInitiateAuth (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminInitiateAuth.html). ChallengeParameters map[string]*string `type:"map"` // The session which should be passed both ways in challenge-response calls - // to the service. If the or API call determines that the caller needs to go - // through another challenge, they return a session with other challenge parameters. - // This session should be passed as it is to the next RespondToAuthChallenge - // API call. + // to the service. If the caller needs to go through another challenge, they + // return a session with other challenge parameters. This session should be + // passed as it is to the next RespondToAuthChallenge API call. Session *string `min:"20" type:"string"` } @@ -14772,8 +14786,8 @@ func (s AdminUserGlobalSignOutOutput) GoString() string { // account. This exception tells user that an account with this email or phone // already exists. type AliasExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message sent to the user when an alias exists. Message_ *string `locationName:"message" type:"string"` @@ -14791,17 +14805,17 @@ func (s AliasExistsException) GoString() string { func newErrorAliasExistsException(v protocol.ResponseMetadata) error { return &AliasExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AliasExistsException) Code() string { +func (s *AliasExistsException) Code() string { return "AliasExistsException" } // Message returns the exception's message. -func (s AliasExistsException) Message() string { +func (s *AliasExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14809,44 +14823,49 @@ func (s AliasExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AliasExistsException) OrigErr() error { +func (s *AliasExistsException) OrigErr() error { return nil } -func (s AliasExistsException) Error() string { +func (s *AliasExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AliasExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AliasExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AliasExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AliasExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon Pinpoint analytics configuration for collecting metrics for a // user pool. +// +// In regions where Pinpoint is not available, Cognito User Pools only supports +// sending events to Amazon Pinpoint projects in us-east-1. In regions where +// Pinpoint is available, Cognito User Pools will support sending events to +// Amazon Pinpoint projects within that same region. type AnalyticsConfigurationType struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of an Amazon Pinpoint project. You can use + // the Amazon Pinpoint project for Pinpoint integration with the chosen User + // Pool Client. Amazon Cognito publishes events to the pinpoint project declared + // by the app ARN. + ApplicationArn *string `min:"20" type:"string"` + // The application ID for an Amazon Pinpoint application. - // - // ApplicationId is a required field - ApplicationId *string `type:"string" required:"true"` + ApplicationId *string `type:"string"` // The external ID. - // - // ExternalId is a required field - ExternalId *string `type:"string" required:"true"` + ExternalId *string `type:"string"` // The ARN of an IAM role that authorizes Amazon Cognito to publish events to // Amazon Pinpoint analytics. - // - // RoleArn is a required field - RoleArn *string `min:"20" type:"string" required:"true"` + RoleArn *string `min:"20" type:"string"` // If UserDataShared is true, Amazon Cognito will include user data in the events // it publishes to Amazon Pinpoint analytics. @@ -14866,14 +14885,8 @@ func (s AnalyticsConfigurationType) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *AnalyticsConfigurationType) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfigurationType"} - if s.ApplicationId == nil { - invalidParams.Add(request.NewErrParamRequired("ApplicationId")) - } - if s.ExternalId == nil { - invalidParams.Add(request.NewErrParamRequired("ExternalId")) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) + if s.ApplicationArn != nil && len(*s.ApplicationArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ApplicationArn", 20)) } if s.RoleArn != nil && len(*s.RoleArn) < 20 { invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) @@ -14885,6 +14898,12 @@ func (s *AnalyticsConfigurationType) Validate() error { return nil } +// SetApplicationArn sets the ApplicationArn field's value. +func (s *AnalyticsConfigurationType) SetApplicationArn(v string) *AnalyticsConfigurationType { + s.ApplicationArn = &v + return s +} + // SetApplicationId sets the ApplicationId field's value. func (s *AnalyticsConfigurationType) SetApplicationId(v string) *AnalyticsConfigurationType { s.ApplicationId = &v @@ -14913,6 +14932,10 @@ func (s *AnalyticsConfigurationType) SetUserDataShared(v bool) *AnalyticsConfigu // // An endpoint uniquely identifies a mobile device, email address, or phone // number that can receive messages from Amazon Pinpoint analytics. +// +// Cognito User Pools only supports sending events to Amazon Pinpoint projects +// in the US East (N. Virginia) us-east-1 Region, regardless of the region in +// which the user pool resides. type AnalyticsMetadataType struct { _ struct{} `type:"structure"` @@ -15392,8 +15415,8 @@ func (s *CodeDeliveryDetailsType) SetDestination(v string) *CodeDeliveryDetailsT // This exception is thrown when a verification code fails to deliver successfully. type CodeDeliveryFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message sent when a verification code fails to deliver successfully. Message_ *string `locationName:"message" type:"string"` @@ -15411,17 +15434,17 @@ func (s CodeDeliveryFailureException) GoString() string { func newErrorCodeDeliveryFailureException(v protocol.ResponseMetadata) error { return &CodeDeliveryFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CodeDeliveryFailureException) Code() string { +func (s *CodeDeliveryFailureException) Code() string { return "CodeDeliveryFailureException" } // Message returns the exception's message. -func (s CodeDeliveryFailureException) Message() string { +func (s *CodeDeliveryFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15429,29 +15452,29 @@ func (s CodeDeliveryFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CodeDeliveryFailureException) OrigErr() error { +func (s *CodeDeliveryFailureException) OrigErr() error { return nil } -func (s CodeDeliveryFailureException) Error() string { +func (s *CodeDeliveryFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CodeDeliveryFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CodeDeliveryFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CodeDeliveryFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *CodeDeliveryFailureException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown if the provided code does not match what the server // was expecting. type CodeMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message provided when the code mismatch exception is thrown. Message_ *string `locationName:"message" type:"string"` @@ -15469,17 +15492,17 @@ func (s CodeMismatchException) GoString() string { func newErrorCodeMismatchException(v protocol.ResponseMetadata) error { return &CodeMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CodeMismatchException) Code() string { +func (s *CodeMismatchException) Code() string { return "CodeMismatchException" } // Message returns the exception's message. -func (s CodeMismatchException) Message() string { +func (s *CodeMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15487,22 +15510,22 @@ func (s CodeMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CodeMismatchException) OrigErr() error { +func (s *CodeMismatchException) OrigErr() error { return nil } -func (s CodeMismatchException) Error() string { +func (s *CodeMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CodeMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CodeMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CodeMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *CodeMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // The compromised credentials actions type @@ -15600,8 +15623,8 @@ func (s *CompromisedCredentialsRiskConfigurationType) SetEventFilter(v []*string // This exception is thrown if two or more modifications are happening concurrently. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message provided when the concurrent exception is thrown. Message_ *string `locationName:"message" type:"string"` @@ -15619,17 +15642,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15637,22 +15660,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Confirms the device request. @@ -15802,7 +15825,7 @@ type ConfirmForgotPasswordInput struct { ClientMetadata map[string]*string `type:"map"` // The confirmation code sent by a user's request to retrieve a forgotten password. - // For more information, see + // For more information, see ForgotPassword (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_ForgotPassword.html). // // ConfirmationCode is a required field ConfirmationCode *string `min:"1" type:"string" required:"true"` @@ -16344,8 +16367,9 @@ type CreateIdentityProviderInput struct { // The identity provider details. The following list describes the provider // detail keys for each identity provider type. // - // * For Google, Facebook and Login with Amazon: client_id client_secret - // authorize_scopes + // * For Google and Login with Amazon: client_id client_secret authorize_scopes + // + // * For Facebook: client_id client_secret authorize_scopes api_version // // * For Sign in with Apple: client_id team_id key_id private_key authorize_scopes // @@ -16354,9 +16378,9 @@ type CreateIdentityProviderInput struct { // URL specified by oidc_issuer key token_url if not available from discovery // URL specified by oidc_issuer key attributes_url if not available from // discovery URL specified by oidc_issuer key jwks_uri if not available from - // discovery URL specified by oidc_issuer key authorize_scopes + // discovery URL specified by oidc_issuer key // - // * For SAML providers: MetadataFile OR MetadataURL IDPSignOut optional + // * For SAML providers: MetadataFile OR MetadataURL IDPSignout optional // // ProviderDetails is a required field ProviderDetails map[string]*string `type:"map" required:"true"` @@ -16701,6 +16725,11 @@ func (s *CreateUserImportJobOutput) SetUserImportJob(v *UserImportJobType) *Crea type CreateUserPoolClientInput struct { _ struct{} `type:"structure"` + // The time limit, between 5 minutes and 1 day, after which the access token + // is no longer valid and cannot be used. This value will be overridden if you + // have entered a value in TokenValidityUnits. + AccessTokenValidity *int64 `min:"1" type:"integer"` + // The allowed OAuth flows. // // Set to code to initiate a code grant flow, which provides an authorization @@ -16726,6 +16755,11 @@ type CreateUserPoolClientInput struct { // The Amazon Pinpoint analytics configuration for collecting metrics for this // user pool. + // + // In regions where Pinpoint is not available, Cognito User Pools only supports + // sending events to Amazon Pinpoint projects in us-east-1. In regions where + // Pinpoint is available, Cognito User Pools will support sending events to + // Amazon Pinpoint projects within that same region. AnalyticsConfiguration *AnalyticsConfigurationType `type:"structure"` // A list of allowed redirect (callback) URLs for the identity providers. @@ -16797,6 +16831,11 @@ type CreateUserPoolClientInput struct { // client being created. GenerateSecret *bool `type:"boolean"` + // The time limit, between 5 minutes and 1 day, after which the ID token is + // no longer valid and cannot be used. This value will be overridden if you + // have entered a value in TokenValidityUnits. + IdTokenValidity *int64 `min:"1" type:"integer"` + // A list of allowed logout URLs for the identity providers. LogoutURLs []*string `type:"list"` @@ -16816,24 +16855,6 @@ type CreateUserPoolClientInput struct { // * LEGACY - This represents the old behavior of Cognito where user existence // related errors are not prevented. // - // This setting affects the behavior of following APIs: - // - // * AdminInitiateAuth - // - // * AdminRespondToAuthChallenge - // - // * InitiateAuth - // - // * RespondToAuthChallenge - // - // * ForgotPassword - // - // * ConfirmForgotPassword - // - // * ConfirmSignUp - // - // * ResendConfirmationCode - // // After February 15th 2020, the value of PreventUserExistenceErrors will default // to ENABLED for newly created user pool clients if no value is provided. PreventUserExistenceErrors *string `type:"string" enum:"PreventUserExistenceErrorTypes"` @@ -16849,6 +16870,10 @@ type CreateUserPoolClientInput struct { // this client. The following are supported: COGNITO, Facebook, Google and LoginWithAmazon. SupportedIdentityProviders []*string `type:"list"` + // The units in which the validity times are represented in. Default for RefreshToken + // is days, and default for ID and access tokens are hours. + TokenValidityUnits *TokenValidityUnitsType `type:"structure"` + // The user pool ID for the user pool where you want to create a user pool client. // // UserPoolId is a required field @@ -16879,6 +16904,9 @@ func (s CreateUserPoolClientInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateUserPoolClientInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateUserPoolClientInput"} + if s.AccessTokenValidity != nil && *s.AccessTokenValidity < 1 { + invalidParams.Add(request.NewErrParamMinValue("AccessTokenValidity", 1)) + } if s.ClientName == nil { invalidParams.Add(request.NewErrParamRequired("ClientName")) } @@ -16888,6 +16916,9 @@ func (s *CreateUserPoolClientInput) Validate() error { if s.DefaultRedirectURI != nil && len(*s.DefaultRedirectURI) < 1 { invalidParams.Add(request.NewErrParamMinLen("DefaultRedirectURI", 1)) } + if s.IdTokenValidity != nil && *s.IdTokenValidity < 1 { + invalidParams.Add(request.NewErrParamMinValue("IdTokenValidity", 1)) + } if s.UserPoolId == nil { invalidParams.Add(request.NewErrParamRequired("UserPoolId")) } @@ -16906,6 +16937,12 @@ func (s *CreateUserPoolClientInput) Validate() error { return nil } +// SetAccessTokenValidity sets the AccessTokenValidity field's value. +func (s *CreateUserPoolClientInput) SetAccessTokenValidity(v int64) *CreateUserPoolClientInput { + s.AccessTokenValidity = &v + return s +} + // SetAllowedOAuthFlows sets the AllowedOAuthFlows field's value. func (s *CreateUserPoolClientInput) SetAllowedOAuthFlows(v []*string) *CreateUserPoolClientInput { s.AllowedOAuthFlows = v @@ -16960,6 +16997,12 @@ func (s *CreateUserPoolClientInput) SetGenerateSecret(v bool) *CreateUserPoolCli return s } +// SetIdTokenValidity sets the IdTokenValidity field's value. +func (s *CreateUserPoolClientInput) SetIdTokenValidity(v int64) *CreateUserPoolClientInput { + s.IdTokenValidity = &v + return s +} + // SetLogoutURLs sets the LogoutURLs field's value. func (s *CreateUserPoolClientInput) SetLogoutURLs(v []*string) *CreateUserPoolClientInput { s.LogoutURLs = v @@ -16990,6 +17033,12 @@ func (s *CreateUserPoolClientInput) SetSupportedIdentityProviders(v []*string) * return s } +// SetTokenValidityUnits sets the TokenValidityUnits field's value. +func (s *CreateUserPoolClientInput) SetTokenValidityUnits(v *TokenValidityUnitsType) *CreateUserPoolClientInput { + s.TokenValidityUnits = v + return s +} + // SetUserPoolId sets the UserPoolId field's value. func (s *CreateUserPoolClientInput) SetUserPoolId(v string) *CreateUserPoolClientInput { s.UserPoolId = &v @@ -17141,10 +17190,6 @@ type CreateUserPoolInput struct { // if the user also has SMS MFA enabled. In the absence of this setting, Cognito // uses the legacy behavior to determine the recovery method where SMS is preferred // over email. - // - // Starting February 1, 2020, the value of AccountRecoverySetting will default - // to verified_email first and verified_phone_number as the second option for - // newly created user pools if no value is provided. AccountRecoverySetting *AccountRecoverySettingType `type:"structure"` // The configuration for AdminCreateUser requests. @@ -17221,7 +17266,8 @@ type CreateUserPoolInput struct { // You can choose to set case sensitivity on the username input for the selected // sign-in option. For example, when this is set to False, users will be able // to sign in using either "username" or "Username". This configuration is immutable - // once it has been set. For more information, see . + // once it has been set. For more information, see UsernameConfigurationType + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html). UsernameConfiguration *UsernameConfigurationType `type:"structure"` // The template for the verification message that the user sees when the app @@ -18821,8 +18867,8 @@ func (s *DomainDescriptionType) SetVersion(v string) *DomainDescriptionType { // This exception is thrown when the provider is already supported by the user // pool. type DuplicateProviderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18839,17 +18885,17 @@ func (s DuplicateProviderException) GoString() string { func newErrorDuplicateProviderException(v protocol.ResponseMetadata) error { return &DuplicateProviderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateProviderException) Code() string { +func (s *DuplicateProviderException) Code() string { return "DuplicateProviderException" } // Message returns the exception's message. -func (s DuplicateProviderException) Message() string { +func (s *DuplicateProviderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18857,22 +18903,22 @@ func (s DuplicateProviderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateProviderException) OrigErr() error { +func (s *DuplicateProviderException) OrigErr() error { return nil } -func (s DuplicateProviderException) Error() string { +func (s *DuplicateProviderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateProviderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateProviderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateProviderException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateProviderException) RequestID() string { + return s.RespMetadata.RequestID } // The email configuration type. @@ -19017,8 +19063,8 @@ func (s *EmailConfigurationType) SetSourceArn(v string) *EmailConfigurationType // This exception is thrown when there is a code mismatch and the service fails // to configure the software token TOTP multi-factor authentication (MFA). type EnableSoftwareTokenMFAException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19035,17 +19081,17 @@ func (s EnableSoftwareTokenMFAException) GoString() string { func newErrorEnableSoftwareTokenMFAException(v protocol.ResponseMetadata) error { return &EnableSoftwareTokenMFAException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EnableSoftwareTokenMFAException) Code() string { +func (s *EnableSoftwareTokenMFAException) Code() string { return "EnableSoftwareTokenMFAException" } // Message returns the exception's message. -func (s EnableSoftwareTokenMFAException) Message() string { +func (s *EnableSoftwareTokenMFAException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19053,22 +19099,22 @@ func (s EnableSoftwareTokenMFAException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EnableSoftwareTokenMFAException) OrigErr() error { +func (s *EnableSoftwareTokenMFAException) OrigErr() error { return nil } -func (s EnableSoftwareTokenMFAException) Error() string { +func (s *EnableSoftwareTokenMFAException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EnableSoftwareTokenMFAException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EnableSoftwareTokenMFAException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EnableSoftwareTokenMFAException) RequestID() string { - return s.respMetadata.RequestID +func (s *EnableSoftwareTokenMFAException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the user context data captured at the time of an event request. @@ -19181,6 +19227,10 @@ func (s *EventFeedbackType) SetProvider(v string) *EventFeedbackType { type EventRiskType struct { _ struct{} `type:"structure"` + // Indicates whether compromised credentials were detected during an authentication + // event. + CompromisedCredentialsDetected *bool `type:"boolean"` + // The risk decision. RiskDecision *string `type:"string" enum:"RiskDecisionType"` @@ -19198,6 +19248,12 @@ func (s EventRiskType) GoString() string { return s.String() } +// SetCompromisedCredentialsDetected sets the CompromisedCredentialsDetected field's value. +func (s *EventRiskType) SetCompromisedCredentialsDetected(v bool) *EventRiskType { + s.CompromisedCredentialsDetected = &v + return s +} + // SetRiskDecision sets the RiskDecision field's value. func (s *EventRiskType) SetRiskDecision(v string) *EventRiskType { s.RiskDecision = &v @@ -19212,8 +19268,8 @@ func (s *EventRiskType) SetRiskLevel(v string) *EventRiskType { // This exception is thrown if a code has expired. type ExpiredCodeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the expired code exception is thrown. Message_ *string `locationName:"message" type:"string"` @@ -19231,17 +19287,17 @@ func (s ExpiredCodeException) GoString() string { func newErrorExpiredCodeException(v protocol.ResponseMetadata) error { return &ExpiredCodeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExpiredCodeException) Code() string { +func (s *ExpiredCodeException) Code() string { return "ExpiredCodeException" } // Message returns the exception's message. -func (s ExpiredCodeException) Message() string { +func (s *ExpiredCodeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19249,22 +19305,22 @@ func (s ExpiredCodeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredCodeException) OrigErr() error { +func (s *ExpiredCodeException) OrigErr() error { return nil } -func (s ExpiredCodeException) Error() string { +func (s *ExpiredCodeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExpiredCodeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredCodeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExpiredCodeException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredCodeException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the request to forget the device. @@ -20120,8 +20176,7 @@ type GetUserOutput struct { // This response parameter is no longer supported. It provides information only // about SMS MFA configurations. It doesn't provide information about TOTP software // token MFA configurations. To look up information about either type of MFA - // configuration, use the use the GetUserResponse$UserMFASettingList response - // instead. + // configuration, use UserMFASettingList instead. MFAOptions []*MFAOptionType `type:"list"` // The user's preferred MFA setting. @@ -20331,8 +20386,8 @@ func (s GlobalSignOutOutput) GoString() string { // This exception is thrown when Amazon Cognito encounters a group that already // exists in the user pool. type GroupExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20349,17 +20404,17 @@ func (s GroupExistsException) GoString() string { func newErrorGroupExistsException(v protocol.ResponseMetadata) error { return &GroupExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GroupExistsException) Code() string { +func (s *GroupExistsException) Code() string { return "GroupExistsException" } // Message returns the exception's message. -func (s GroupExistsException) Message() string { +func (s *GroupExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20367,22 +20422,22 @@ func (s GroupExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GroupExistsException) OrigErr() error { +func (s *GroupExistsException) OrigErr() error { return nil } -func (s GroupExistsException) Error() string { +func (s *GroupExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GroupExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GroupExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GroupExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *GroupExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The group type. @@ -20529,8 +20584,9 @@ type IdentityProviderType struct { // The identity provider details. The following list describes the provider // detail keys for each identity provider type. // - // * For Google, Facebook and Login with Amazon: client_id client_secret - // authorize_scopes + // * For Google and Login with Amazon: client_id client_secret authorize_scopes + // + // * For Facebook: client_id client_secret authorize_scopes api_version // // * For Sign in with Apple: client_id team_id key_id private_key authorize_scopes // @@ -20661,14 +20717,16 @@ type InitiateAuthInput struct { // that you are invoking. The required values depend on the value of AuthFlow: // // * For USER_SRP_AUTH: USERNAME (required), SRP_A (required), SECRET_HASH - // (required if the app client is configured with a client secret), DEVICE_KEY + // (required if the app client is configured with a client secret), DEVICE_KEY. // // * For REFRESH_TOKEN_AUTH/REFRESH_TOKEN: REFRESH_TOKEN (required), SECRET_HASH - // (required if the app client is configured with a client secret), DEVICE_KEY + // (required if the app client is configured with a client secret), DEVICE_KEY. // // * For CUSTOM_AUTH: USERNAME (required), SECRET_HASH (if app client is - // configured with client secret), DEVICE_KEY - AuthParameters map[string]*string `type:"map"` + // configured with client secret), DEVICE_KEY. To start the authentication + // flow with password verification, include ChallengeName: SRP_A and SRP_A: + // (The SRP_A Value). + AuthParameters map[string]*string `type:"map" sensitive:"true"` // The app client ID. // @@ -20848,10 +20906,9 @@ type InitiateAuthOutput struct { ChallengeParameters map[string]*string `type:"map"` // The session which should be passed both ways in challenge-response calls - // to the service. If the or API call determines that the caller needs to go - // through another challenge, they return a session with other challenge parameters. - // This session should be passed as it is to the next RespondToAuthChallenge - // API call. + // to the service. If the caller needs to go through another challenge, they + // return a session with other challenge parameters. This session should be + // passed as it is to the next RespondToAuthChallenge API call. Session *string `min:"20" type:"string"` } @@ -20891,8 +20948,8 @@ func (s *InitiateAuthOutput) SetSession(v string) *InitiateAuthOutput { // This exception is thrown when Amazon Cognito encounters an internal error. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when Amazon Cognito throws an internal error exception. Message_ *string `locationName:"message" type:"string"` @@ -20910,17 +20967,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20928,29 +20985,29 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when Amazon Cognito is not allowed to use your email // identity. HTTP status code: 400. type InvalidEmailRoleAccessPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when you have an unverified email address or the identity // policy is not set on an email address that Amazon Cognito can access. @@ -20969,17 +21026,17 @@ func (s InvalidEmailRoleAccessPolicyException) GoString() string { func newErrorInvalidEmailRoleAccessPolicyException(v protocol.ResponseMetadata) error { return &InvalidEmailRoleAccessPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidEmailRoleAccessPolicyException) Code() string { +func (s *InvalidEmailRoleAccessPolicyException) Code() string { return "InvalidEmailRoleAccessPolicyException" } // Message returns the exception's message. -func (s InvalidEmailRoleAccessPolicyException) Message() string { +func (s *InvalidEmailRoleAccessPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20987,29 +21044,29 @@ func (s InvalidEmailRoleAccessPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidEmailRoleAccessPolicyException) OrigErr() error { +func (s *InvalidEmailRoleAccessPolicyException) OrigErr() error { return nil } -func (s InvalidEmailRoleAccessPolicyException) Error() string { +func (s *InvalidEmailRoleAccessPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidEmailRoleAccessPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidEmailRoleAccessPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidEmailRoleAccessPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidEmailRoleAccessPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the Amazon Cognito service encounters an invalid // AWS Lambda response. type InvalidLambdaResponseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service throws an invalid AWS // Lambda response exception. @@ -21028,17 +21085,17 @@ func (s InvalidLambdaResponseException) GoString() string { func newErrorInvalidLambdaResponseException(v protocol.ResponseMetadata) error { return &InvalidLambdaResponseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLambdaResponseException) Code() string { +func (s *InvalidLambdaResponseException) Code() string { return "InvalidLambdaResponseException" } // Message returns the exception's message. -func (s InvalidLambdaResponseException) Message() string { +func (s *InvalidLambdaResponseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21046,28 +21103,28 @@ func (s InvalidLambdaResponseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLambdaResponseException) OrigErr() error { +func (s *InvalidLambdaResponseException) OrigErr() error { return nil } -func (s InvalidLambdaResponseException) Error() string { +func (s *InvalidLambdaResponseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLambdaResponseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLambdaResponseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLambdaResponseException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLambdaResponseException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the specified OAuth flow is invalid. type InvalidOAuthFlowException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21084,17 +21141,17 @@ func (s InvalidOAuthFlowException) GoString() string { func newErrorInvalidOAuthFlowException(v protocol.ResponseMetadata) error { return &InvalidOAuthFlowException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOAuthFlowException) Code() string { +func (s *InvalidOAuthFlowException) Code() string { return "InvalidOAuthFlowException" } // Message returns the exception's message. -func (s InvalidOAuthFlowException) Message() string { +func (s *InvalidOAuthFlowException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21102,29 +21159,29 @@ func (s InvalidOAuthFlowException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOAuthFlowException) OrigErr() error { +func (s *InvalidOAuthFlowException) OrigErr() error { return nil } -func (s InvalidOAuthFlowException) Error() string { +func (s *InvalidOAuthFlowException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOAuthFlowException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOAuthFlowException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOAuthFlowException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOAuthFlowException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the Amazon Cognito service encounters an invalid // parameter. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service throws an invalid parameter // exception. @@ -21143,17 +21200,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21161,29 +21218,29 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the Amazon Cognito service encounters an invalid // password. type InvalidPasswordException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service throws an invalid user // password exception. @@ -21202,17 +21259,17 @@ func (s InvalidPasswordException) GoString() string { func newErrorInvalidPasswordException(v protocol.ResponseMetadata) error { return &InvalidPasswordException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPasswordException) Code() string { +func (s *InvalidPasswordException) Code() string { return "InvalidPasswordException" } // Message returns the exception's message. -func (s InvalidPasswordException) Message() string { +func (s *InvalidPasswordException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21220,29 +21277,29 @@ func (s InvalidPasswordException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPasswordException) OrigErr() error { +func (s *InvalidPasswordException) OrigErr() error { return nil } -func (s InvalidPasswordException) Error() string { +func (s *InvalidPasswordException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPasswordException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPasswordException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPasswordException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPasswordException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is returned when the role provided for SMS configuration does // not have permission to publish using Amazon SNS. type InvalidSmsRoleAccessPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message retuned when the invalid SMS role access policy exception is // thrown. @@ -21261,17 +21318,17 @@ func (s InvalidSmsRoleAccessPolicyException) GoString() string { func newErrorInvalidSmsRoleAccessPolicyException(v protocol.ResponseMetadata) error { return &InvalidSmsRoleAccessPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSmsRoleAccessPolicyException) Code() string { +func (s *InvalidSmsRoleAccessPolicyException) Code() string { return "InvalidSmsRoleAccessPolicyException" } // Message returns the exception's message. -func (s InvalidSmsRoleAccessPolicyException) Message() string { +func (s *InvalidSmsRoleAccessPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21279,22 +21336,22 @@ func (s InvalidSmsRoleAccessPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSmsRoleAccessPolicyException) OrigErr() error { +func (s *InvalidSmsRoleAccessPolicyException) OrigErr() error { return nil } -func (s InvalidSmsRoleAccessPolicyException) Error() string { +func (s *InvalidSmsRoleAccessPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSmsRoleAccessPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSmsRoleAccessPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSmsRoleAccessPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSmsRoleAccessPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the trust relationship is invalid for the role @@ -21302,8 +21359,8 @@ func (s InvalidSmsRoleAccessPolicyException) RequestID() string { // or the external ID provided in the role does not match what is provided in // the SMS configuration for the user pool. type InvalidSmsRoleTrustRelationshipException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the role trust relationship for the SMS message // is invalid. @@ -21322,17 +21379,17 @@ func (s InvalidSmsRoleTrustRelationshipException) GoString() string { func newErrorInvalidSmsRoleTrustRelationshipException(v protocol.ResponseMetadata) error { return &InvalidSmsRoleTrustRelationshipException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSmsRoleTrustRelationshipException) Code() string { +func (s *InvalidSmsRoleTrustRelationshipException) Code() string { return "InvalidSmsRoleTrustRelationshipException" } // Message returns the exception's message. -func (s InvalidSmsRoleTrustRelationshipException) Message() string { +func (s *InvalidSmsRoleTrustRelationshipException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21340,28 +21397,28 @@ func (s InvalidSmsRoleTrustRelationshipException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSmsRoleTrustRelationshipException) OrigErr() error { +func (s *InvalidSmsRoleTrustRelationshipException) OrigErr() error { return nil } -func (s InvalidSmsRoleTrustRelationshipException) Error() string { +func (s *InvalidSmsRoleTrustRelationshipException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSmsRoleTrustRelationshipException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSmsRoleTrustRelationshipException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSmsRoleTrustRelationshipException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSmsRoleTrustRelationshipException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the user pool configuration is invalid. type InvalidUserPoolConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the user pool configuration is invalid. Message_ *string `locationName:"message" type:"string"` @@ -21379,17 +21436,17 @@ func (s InvalidUserPoolConfigurationException) GoString() string { func newErrorInvalidUserPoolConfigurationException(v protocol.ResponseMetadata) error { return &InvalidUserPoolConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidUserPoolConfigurationException) Code() string { +func (s *InvalidUserPoolConfigurationException) Code() string { return "InvalidUserPoolConfigurationException" } // Message returns the exception's message. -func (s InvalidUserPoolConfigurationException) Message() string { +func (s *InvalidUserPoolConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21397,22 +21454,22 @@ func (s InvalidUserPoolConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidUserPoolConfigurationException) OrigErr() error { +func (s *InvalidUserPoolConfigurationException) OrigErr() error { return nil } -func (s InvalidUserPoolConfigurationException) Error() string { +func (s *InvalidUserPoolConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidUserPoolConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidUserPoolConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidUserPoolConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidUserPoolConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the configuration for AWS Lambda triggers. @@ -21563,8 +21620,8 @@ func (s *LambdaConfigType) SetVerifyAuthChallengeResponse(v string) *LambdaConfi // This exception is thrown when a user exceeds the limit for a requested AWS // resource. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when Amazon Cognito throws a limit exceeded exception. Message_ *string `locationName:"message" type:"string"` @@ -21582,17 +21639,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21600,22 +21657,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the request to list the devices. @@ -22641,8 +22698,8 @@ func (s *ListUsersOutput) SetUsers(v []*UserType) *ListUsersOutput { // This exception is thrown when Amazon Cognito cannot find a multi-factor authentication // (MFA) method. type MFAMethodNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when Amazon Cognito throws an MFA method not found exception. Message_ *string `locationName:"message" type:"string"` @@ -22660,17 +22717,17 @@ func (s MFAMethodNotFoundException) GoString() string { func newErrorMFAMethodNotFoundException(v protocol.ResponseMetadata) error { return &MFAMethodNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MFAMethodNotFoundException) Code() string { +func (s *MFAMethodNotFoundException) Code() string { return "MFAMethodNotFoundException" } // Message returns the exception's message. -func (s MFAMethodNotFoundException) Message() string { +func (s *MFAMethodNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22678,32 +22735,26 @@ func (s MFAMethodNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MFAMethodNotFoundException) OrigErr() error { +func (s *MFAMethodNotFoundException) OrigErr() error { return nil } -func (s MFAMethodNotFoundException) Error() string { +func (s *MFAMethodNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MFAMethodNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MFAMethodNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MFAMethodNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *MFAMethodNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // This data type is no longer supported. You can use it only for SMS MFA configurations. // You can't use it for TOTP software token MFA configurations. -// -// To set either type of MFA configuration, use the AdminSetUserMFAPreference -// or SetUserMFAPreference actions. -// -// To look up information about either type of MFA configuration, use the AdminGetUserResponse$UserMFASettingList -// or GetUserResponse$UserMFASettingList responses. type MFAOptionType struct { _ struct{} `type:"structure"` @@ -22846,8 +22897,8 @@ func (s *NewDeviceMetadataType) SetDeviceKey(v string) *NewDeviceMetadataType { // This exception is thrown when a user is not authorized. type NotAuthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service returns a not authorized // exception. @@ -22866,17 +22917,17 @@ func (s NotAuthorizedException) GoString() string { func newErrorNotAuthorizedException(v protocol.ResponseMetadata) error { return &NotAuthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotAuthorizedException) Code() string { +func (s *NotAuthorizedException) Code() string { return "NotAuthorizedException" } // Message returns the exception's message. -func (s NotAuthorizedException) Message() string { +func (s *NotAuthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22884,22 +22935,22 @@ func (s NotAuthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotAuthorizedException) OrigErr() error { +func (s *NotAuthorizedException) OrigErr() error { return nil } -func (s NotAuthorizedException) Error() string { +func (s *NotAuthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotAuthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotAuthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotAuthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotAuthorizedException) RequestID() string { + return s.RespMetadata.RequestID } // The notify configuration type. @@ -23204,8 +23255,8 @@ func (s *PasswordPolicyType) SetTemporaryPasswordValidityDays(v int64) *Password // This exception is thrown when a password reset is required. type PasswordResetRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when a password reset is required. Message_ *string `locationName:"message" type:"string"` @@ -23223,17 +23274,17 @@ func (s PasswordResetRequiredException) GoString() string { func newErrorPasswordResetRequiredException(v protocol.ResponseMetadata) error { return &PasswordResetRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PasswordResetRequiredException) Code() string { +func (s *PasswordResetRequiredException) Code() string { return "PasswordResetRequiredException" } // Message returns the exception's message. -func (s PasswordResetRequiredException) Message() string { +func (s *PasswordResetRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23241,28 +23292,28 @@ func (s PasswordResetRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PasswordResetRequiredException) OrigErr() error { +func (s *PasswordResetRequiredException) OrigErr() error { return nil } -func (s PasswordResetRequiredException) Error() string { +func (s *PasswordResetRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PasswordResetRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PasswordResetRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PasswordResetRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *PasswordResetRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when a precondition is not met. type PreconditionNotMetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when a precondition is not met. Message_ *string `locationName:"message" type:"string"` @@ -23280,17 +23331,17 @@ func (s PreconditionNotMetException) GoString() string { func newErrorPreconditionNotMetException(v protocol.ResponseMetadata) error { return &PreconditionNotMetException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PreconditionNotMetException) Code() string { +func (s *PreconditionNotMetException) Code() string { return "PreconditionNotMetException" } // Message returns the exception's message. -func (s PreconditionNotMetException) Message() string { +func (s *PreconditionNotMetException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23298,22 +23349,22 @@ func (s PreconditionNotMetException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PreconditionNotMetException) OrigErr() error { +func (s *PreconditionNotMetException) OrigErr() error { return nil } -func (s PreconditionNotMetException) Error() string { +func (s *PreconditionNotMetException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PreconditionNotMetException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PreconditionNotMetException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PreconditionNotMetException) RequestID() string { - return s.respMetadata.RequestID +func (s *PreconditionNotMetException) RequestID() string { + return s.RespMetadata.RequestID } // A container for identity provider details. @@ -23638,8 +23689,8 @@ func (s *ResendConfirmationCodeOutput) SetCodeDeliveryDetails(v *CodeDeliveryDet // This exception is thrown when the Amazon Cognito service cannot find the // requested resource. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service returns a resource not // found exception. @@ -23658,17 +23709,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23676,22 +23727,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A resource server scope. @@ -23812,7 +23863,7 @@ type RespondToAuthChallengeInput struct { // calls. AnalyticsMetadata *AnalyticsMetadataType `type:"structure"` - // The challenge name. For more information, see . + // The challenge name. For more information, see InitiateAuth (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_InitiateAuth.html). // // ADMIN_NO_SRP_AUTH is not a valid value. // @@ -23974,17 +24025,16 @@ type RespondToAuthChallengeOutput struct { // the authentication challenge. AuthenticationResult *AuthenticationResultType `type:"structure"` - // The challenge name. For more information, see . + // The challenge name. For more information, see InitiateAuth (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_InitiateAuth.html). ChallengeName *string `type:"string" enum:"ChallengeNameType"` - // The challenge parameters. For more information, see . + // The challenge parameters. For more information, see InitiateAuth (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_InitiateAuth.html). ChallengeParameters map[string]*string `type:"map"` // The session which should be passed both ways in challenge-response calls - // to the service. If the or API call determines that the caller needs to go - // through another challenge, they return a session with other challenge parameters. - // This session should be passed as it is to the next RespondToAuthChallenge - // API call. + // to the service. If the caller needs to go through another challenge, they + // return a session with other challenge parameters. This session should be + // passed as it is to the next RespondToAuthChallenge API call. Session *string `min:"20" type:"string"` } @@ -24177,7 +24227,7 @@ type SchemaAttributeType struct { // Specifies whether the attribute type is developer only. This attribute can // only be modified by an administrator. Users will not be able to modify this // attribute using their access token. For example, DeveloperOnlyAttribute can - // be modified using the API but cannot be updated using the API. + // be modified using AdminUpdateUserAttributes but cannot be updated using UpdateUserAttributes. DeveloperOnlyAttribute *bool `type:"boolean"` // Specifies whether the value of the attribute can be changed. @@ -24272,8 +24322,8 @@ func (s *SchemaAttributeType) SetStringAttributeConstraints(v *StringAttributeCo // This exception is thrown when the specified scope does not exist. type ScopeDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -24290,17 +24340,17 @@ func (s ScopeDoesNotExistException) GoString() string { func newErrorScopeDoesNotExistException(v protocol.ResponseMetadata) error { return &ScopeDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ScopeDoesNotExistException) Code() string { +func (s *ScopeDoesNotExistException) Code() string { return "ScopeDoesNotExistException" } // Message returns the exception's message. -func (s ScopeDoesNotExistException) Message() string { +func (s *ScopeDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24308,22 +24358,22 @@ func (s ScopeDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ScopeDoesNotExistException) OrigErr() error { +func (s *ScopeDoesNotExistException) OrigErr() error { return nil } -func (s ScopeDoesNotExistException) Error() string { +func (s *ScopeDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ScopeDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ScopeDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ScopeDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *ScopeDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } type SetRiskConfigurationInput struct { @@ -25176,8 +25226,8 @@ func (s *SmsMfaConfigType) SetSmsConfiguration(v *SmsConfigurationType) *SmsMfaC // This exception is thrown when the software token TOTP multi-factor authentication // (MFA) is not enabled for the user pool. type SoftwareTokenMFANotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25194,17 +25244,17 @@ func (s SoftwareTokenMFANotFoundException) GoString() string { func newErrorSoftwareTokenMFANotFoundException(v protocol.ResponseMetadata) error { return &SoftwareTokenMFANotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SoftwareTokenMFANotFoundException) Code() string { +func (s *SoftwareTokenMFANotFoundException) Code() string { return "SoftwareTokenMFANotFoundException" } // Message returns the exception's message. -func (s SoftwareTokenMFANotFoundException) Message() string { +func (s *SoftwareTokenMFANotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25212,22 +25262,22 @@ func (s SoftwareTokenMFANotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SoftwareTokenMFANotFoundException) OrigErr() error { +func (s *SoftwareTokenMFANotFoundException) OrigErr() error { return nil } -func (s SoftwareTokenMFANotFoundException) Error() string { +func (s *SoftwareTokenMFANotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SoftwareTokenMFANotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SoftwareTokenMFANotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SoftwareTokenMFANotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *SoftwareTokenMFANotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The type used for enabling software token MFA at the user pool level. @@ -25557,11 +25607,57 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// The data type for TokenValidityUnits that specifics the time measurements +// for token validity. +type TokenValidityUnitsType struct { + _ struct{} `type:"structure"` + + // A time unit in “seconds”, “minutes”, “hours” or “days” for + // the value in AccessTokenValidity, defaults to hours. + AccessToken *string `type:"string" enum:"TimeUnitsType"` + + // A time unit in “seconds”, “minutes”, “hours” or “days” for + // the value in IdTokenValidity, defaults to hours. + IdToken *string `type:"string" enum:"TimeUnitsType"` + + // A time unit in “seconds”, “minutes”, “hours” or “days” for + // the value in RefreshTokenValidity, defaults to days. + RefreshToken *string `type:"string" enum:"TimeUnitsType"` +} + +// String returns the string representation +func (s TokenValidityUnitsType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TokenValidityUnitsType) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *TokenValidityUnitsType) SetAccessToken(v string) *TokenValidityUnitsType { + s.AccessToken = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *TokenValidityUnitsType) SetIdToken(v string) *TokenValidityUnitsType { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *TokenValidityUnitsType) SetRefreshToken(v string) *TokenValidityUnitsType { + s.RefreshToken = &v + return s +} + // This exception is thrown when the user has made too many failed attempts // for a given action (e.g., sign in). type TooManyFailedAttemptsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service returns a too many failed // attempts exception. @@ -25580,17 +25676,17 @@ func (s TooManyFailedAttemptsException) GoString() string { func newErrorTooManyFailedAttemptsException(v protocol.ResponseMetadata) error { return &TooManyFailedAttemptsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyFailedAttemptsException) Code() string { +func (s *TooManyFailedAttemptsException) Code() string { return "TooManyFailedAttemptsException" } // Message returns the exception's message. -func (s TooManyFailedAttemptsException) Message() string { +func (s *TooManyFailedAttemptsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25598,29 +25694,29 @@ func (s TooManyFailedAttemptsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyFailedAttemptsException) OrigErr() error { +func (s *TooManyFailedAttemptsException) OrigErr() error { return nil } -func (s TooManyFailedAttemptsException) Error() string { +func (s *TooManyFailedAttemptsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyFailedAttemptsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyFailedAttemptsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyFailedAttemptsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyFailedAttemptsException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the user has made too many requests for a given // operation. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service returns a too many requests // exception. @@ -25639,17 +25735,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25657,22 +25753,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // A container for the UI customization information for a user pool's built-in @@ -25757,8 +25853,8 @@ func (s *UICustomizationType) SetUserPoolId(v string) *UICustomizationType { // This exception is thrown when the Amazon Cognito service encounters an unexpected // exception with the AWS Lambda service. type UnexpectedLambdaException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service returns an unexpected // AWS Lambda exception. @@ -25777,17 +25873,17 @@ func (s UnexpectedLambdaException) GoString() string { func newErrorUnexpectedLambdaException(v protocol.ResponseMetadata) error { return &UnexpectedLambdaException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnexpectedLambdaException) Code() string { +func (s *UnexpectedLambdaException) Code() string { return "UnexpectedLambdaException" } // Message returns the exception's message. -func (s UnexpectedLambdaException) Message() string { +func (s *UnexpectedLambdaException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25795,28 +25891,28 @@ func (s UnexpectedLambdaException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnexpectedLambdaException) OrigErr() error { +func (s *UnexpectedLambdaException) OrigErr() error { return nil } -func (s UnexpectedLambdaException) Error() string { +func (s *UnexpectedLambdaException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnexpectedLambdaException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnexpectedLambdaException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnexpectedLambdaException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnexpectedLambdaException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the specified identifier is not supported. type UnsupportedIdentityProviderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -25833,17 +25929,17 @@ func (s UnsupportedIdentityProviderException) GoString() string { func newErrorUnsupportedIdentityProviderException(v protocol.ResponseMetadata) error { return &UnsupportedIdentityProviderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedIdentityProviderException) Code() string { +func (s *UnsupportedIdentityProviderException) Code() string { return "UnsupportedIdentityProviderException" } // Message returns the exception's message. -func (s UnsupportedIdentityProviderException) Message() string { +func (s *UnsupportedIdentityProviderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25851,28 +25947,28 @@ func (s UnsupportedIdentityProviderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedIdentityProviderException) OrigErr() error { +func (s *UnsupportedIdentityProviderException) OrigErr() error { return nil } -func (s UnsupportedIdentityProviderException) Error() string { +func (s *UnsupportedIdentityProviderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedIdentityProviderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedIdentityProviderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedIdentityProviderException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedIdentityProviderException) RequestID() string { + return s.RespMetadata.RequestID } // The request failed because the user is in an unsupported state. type UnsupportedUserStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the user is in an unsupported state. Message_ *string `locationName:"message" type:"string"` @@ -25890,17 +25986,17 @@ func (s UnsupportedUserStateException) GoString() string { func newErrorUnsupportedUserStateException(v protocol.ResponseMetadata) error { return &UnsupportedUserStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedUserStateException) Code() string { +func (s *UnsupportedUserStateException) Code() string { return "UnsupportedUserStateException" } // Message returns the exception's message. -func (s UnsupportedUserStateException) Message() string { +func (s *UnsupportedUserStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25908,22 +26004,22 @@ func (s UnsupportedUserStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedUserStateException) OrigErr() error { +func (s *UnsupportedUserStateException) OrigErr() error { return nil } -func (s UnsupportedUserStateException) Error() string { +func (s *UnsupportedUserStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedUserStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedUserStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedUserStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedUserStateException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -26205,7 +26301,7 @@ type UpdateGroupInput struct { GroupName *string `min:"1" type:"string" required:"true"` // The new precedence value for the group. For more information about this parameter, - // see . + // see CreateGroup (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateGroup.html). Precedence *int64 `type:"integer"` // The new role ARN for the group. This is used for setting the cognito:roles @@ -26667,6 +26763,10 @@ func (s *UpdateUserAttributesOutput) SetCodeDeliveryDetailsList(v []*CodeDeliver type UpdateUserPoolClientInput struct { _ struct{} `type:"structure"` + // The time limit, after which the access token is no longer valid and cannot + // be used. + AccessTokenValidity *int64 `min:"1" type:"integer"` + // The allowed OAuth flows. // // Set to code to initiate a code grant flow, which provides an authorization @@ -26692,6 +26792,11 @@ type UpdateUserPoolClientInput struct { // The Amazon Pinpoint analytics configuration for collecting metrics for this // user pool. + // + // In regions where Pinpoint is not available, Cognito User Pools only supports + // sending events to Amazon Pinpoint projects in us-east-1. In regions where + // Pinpoint is available, Cognito User Pools will support sending events to + // Amazon Pinpoint projects within that same region. AnalyticsConfiguration *AnalyticsConfigurationType `type:"structure"` // A list of allowed redirect (callback) URLs for the identity providers. @@ -26762,6 +26867,10 @@ type UpdateUserPoolClientInput struct { // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []*string `type:"list"` + // The time limit, after which the ID token is no longer valid and cannot be + // used. + IdTokenValidity *int64 `min:"1" type:"integer"` + // A list of allowed logout URLs for the identity providers. LogoutURLs []*string `type:"list"` @@ -26781,24 +26890,6 @@ type UpdateUserPoolClientInput struct { // * LEGACY - This represents the old behavior of Cognito where user existence // related errors are not prevented. // - // This setting affects the behavior of following APIs: - // - // * AdminInitiateAuth - // - // * AdminRespondToAuthChallenge - // - // * InitiateAuth - // - // * RespondToAuthChallenge - // - // * ForgotPassword - // - // * ConfirmForgotPassword - // - // * ConfirmSignUp - // - // * ResendConfirmationCode - // // After February 15th 2020, the value of PreventUserExistenceErrors will default // to ENABLED for newly created user pool clients if no value is provided. PreventUserExistenceErrors *string `type:"string" enum:"PreventUserExistenceErrorTypes"` @@ -26814,6 +26905,10 @@ type UpdateUserPoolClientInput struct { // this client. SupportedIdentityProviders []*string `type:"list"` + // The units in which the validity times are represented in. Default for RefreshToken + // is days, and default for ID and access tokens are hours. + TokenValidityUnits *TokenValidityUnitsType `type:"structure"` + // The user pool ID for the user pool where you want to update the user pool // client. // @@ -26837,6 +26932,9 @@ func (s UpdateUserPoolClientInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateUserPoolClientInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateUserPoolClientInput"} + if s.AccessTokenValidity != nil && *s.AccessTokenValidity < 1 { + invalidParams.Add(request.NewErrParamMinValue("AccessTokenValidity", 1)) + } if s.ClientId == nil { invalidParams.Add(request.NewErrParamRequired("ClientId")) } @@ -26849,6 +26947,9 @@ func (s *UpdateUserPoolClientInput) Validate() error { if s.DefaultRedirectURI != nil && len(*s.DefaultRedirectURI) < 1 { invalidParams.Add(request.NewErrParamMinLen("DefaultRedirectURI", 1)) } + if s.IdTokenValidity != nil && *s.IdTokenValidity < 1 { + invalidParams.Add(request.NewErrParamMinValue("IdTokenValidity", 1)) + } if s.UserPoolId == nil { invalidParams.Add(request.NewErrParamRequired("UserPoolId")) } @@ -26867,6 +26968,12 @@ func (s *UpdateUserPoolClientInput) Validate() error { return nil } +// SetAccessTokenValidity sets the AccessTokenValidity field's value. +func (s *UpdateUserPoolClientInput) SetAccessTokenValidity(v int64) *UpdateUserPoolClientInput { + s.AccessTokenValidity = &v + return s +} + // SetAllowedOAuthFlows sets the AllowedOAuthFlows field's value. func (s *UpdateUserPoolClientInput) SetAllowedOAuthFlows(v []*string) *UpdateUserPoolClientInput { s.AllowedOAuthFlows = v @@ -26921,6 +27028,12 @@ func (s *UpdateUserPoolClientInput) SetExplicitAuthFlows(v []*string) *UpdateUse return s } +// SetIdTokenValidity sets the IdTokenValidity field's value. +func (s *UpdateUserPoolClientInput) SetIdTokenValidity(v int64) *UpdateUserPoolClientInput { + s.IdTokenValidity = &v + return s +} + // SetLogoutURLs sets the LogoutURLs field's value. func (s *UpdateUserPoolClientInput) SetLogoutURLs(v []*string) *UpdateUserPoolClientInput { s.LogoutURLs = v @@ -26951,6 +27064,12 @@ func (s *UpdateUserPoolClientInput) SetSupportedIdentityProviders(v []*string) * return s } +// SetTokenValidityUnits sets the TokenValidityUnits field's value. +func (s *UpdateUserPoolClientInput) SetTokenValidityUnits(v *TokenValidityUnitsType) *UpdateUserPoolClientInput { + s.TokenValidityUnits = v + return s +} + // SetUserPoolId sets the UserPoolId field's value. func (s *UpdateUserPoolClientInput) SetUserPoolId(v string) *UpdateUserPoolClientInput { s.UserPoolId = &v @@ -27404,8 +27523,8 @@ func (s *UserContextDataType) SetEncodedData(v string) *UserContextDataType { // This exception is thrown when you are trying to modify a user pool while // a user import job is in progress for that pool. type UserImportInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the user pool has an import job running. Message_ *string `locationName:"message" type:"string"` @@ -27423,17 +27542,17 @@ func (s UserImportInProgressException) GoString() string { func newErrorUserImportInProgressException(v protocol.ResponseMetadata) error { return &UserImportInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserImportInProgressException) Code() string { +func (s *UserImportInProgressException) Code() string { return "UserImportInProgressException" } // Message returns the exception's message. -func (s UserImportInProgressException) Message() string { +func (s *UserImportInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27441,22 +27560,22 @@ func (s UserImportInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserImportInProgressException) OrigErr() error { +func (s *UserImportInProgressException) OrigErr() error { return nil } -func (s UserImportInProgressException) Error() string { +func (s *UserImportInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UserImportInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserImportInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserImportInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserImportInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // The user import job type. @@ -27617,8 +27736,8 @@ func (s *UserImportJobType) SetUserPoolId(v string) *UserImportJobType { // This exception is thrown when the Amazon Cognito service encounters a user // validation exception with the AWS Lambda service. type UserLambdaValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when the Amazon Cognito service returns a user validation // exception with the AWS Lambda service. @@ -27637,17 +27756,17 @@ func (s UserLambdaValidationException) GoString() string { func newErrorUserLambdaValidationException(v protocol.ResponseMetadata) error { return &UserLambdaValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserLambdaValidationException) Code() string { +func (s *UserLambdaValidationException) Code() string { return "UserLambdaValidationException" } // Message returns the exception's message. -func (s UserLambdaValidationException) Message() string { +func (s *UserLambdaValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27655,28 +27774,28 @@ func (s UserLambdaValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserLambdaValidationException) OrigErr() error { +func (s *UserLambdaValidationException) OrigErr() error { return nil } -func (s UserLambdaValidationException) Error() string { +func (s *UserLambdaValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UserLambdaValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserLambdaValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserLambdaValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserLambdaValidationException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when a user is not confirmed successfully. type UserNotConfirmedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when a user is not confirmed successfully. Message_ *string `locationName:"message" type:"string"` @@ -27694,17 +27813,17 @@ func (s UserNotConfirmedException) GoString() string { func newErrorUserNotConfirmedException(v protocol.ResponseMetadata) error { return &UserNotConfirmedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserNotConfirmedException) Code() string { +func (s *UserNotConfirmedException) Code() string { return "UserNotConfirmedException" } // Message returns the exception's message. -func (s UserNotConfirmedException) Message() string { +func (s *UserNotConfirmedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27712,28 +27831,28 @@ func (s UserNotConfirmedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserNotConfirmedException) OrigErr() error { +func (s *UserNotConfirmedException) OrigErr() error { return nil } -func (s UserNotConfirmedException) Error() string { +func (s *UserNotConfirmedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UserNotConfirmedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserNotConfirmedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserNotConfirmedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserNotConfirmedException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when a user is not found. type UserNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when a user is not found. Message_ *string `locationName:"message" type:"string"` @@ -27751,17 +27870,17 @@ func (s UserNotFoundException) GoString() string { func newErrorUserNotFoundException(v protocol.ResponseMetadata) error { return &UserNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserNotFoundException) Code() string { +func (s *UserNotFoundException) Code() string { return "UserNotFoundException" } // Message returns the exception's message. -func (s UserNotFoundException) Message() string { +func (s *UserNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27769,28 +27888,28 @@ func (s UserNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserNotFoundException) OrigErr() error { +func (s *UserNotFoundException) OrigErr() error { return nil } -func (s UserNotFoundException) Error() string { +func (s *UserNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UserNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when user pool add-ons are not enabled. type UserPoolAddOnNotEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -27807,17 +27926,17 @@ func (s UserPoolAddOnNotEnabledException) GoString() string { func newErrorUserPoolAddOnNotEnabledException(v protocol.ResponseMetadata) error { return &UserPoolAddOnNotEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserPoolAddOnNotEnabledException) Code() string { +func (s *UserPoolAddOnNotEnabledException) Code() string { return "UserPoolAddOnNotEnabledException" } // Message returns the exception's message. -func (s UserPoolAddOnNotEnabledException) Message() string { +func (s *UserPoolAddOnNotEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27825,22 +27944,22 @@ func (s UserPoolAddOnNotEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserPoolAddOnNotEnabledException) OrigErr() error { +func (s *UserPoolAddOnNotEnabledException) OrigErr() error { return nil } -func (s UserPoolAddOnNotEnabledException) Error() string { +func (s *UserPoolAddOnNotEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UserPoolAddOnNotEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserPoolAddOnNotEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserPoolAddOnNotEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserPoolAddOnNotEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // The user pool add-ons type. @@ -27929,6 +28048,10 @@ func (s *UserPoolClientDescription) SetUserPoolId(v string) *UserPoolClientDescr type UserPoolClientType struct { _ struct{} `type:"structure"` + // The time limit, specified by tokenValidityUnits, defaulting to hours, after + // which the access token is no longer valid and cannot be used. + AccessTokenValidity *int64 `min:"1" type:"integer"` + // The allowed OAuth flows. // // Set to code to initiate a code grant flow, which provides an authorization @@ -27953,6 +28076,10 @@ type UserPoolClientType struct { AllowedOAuthScopes []*string `type:"list"` // The Amazon Pinpoint analytics configuration for the user pool client. + // + // Cognito User Pools only supports sending events to Amazon Pinpoint projects + // in the US East (N. Virginia) us-east-1 Region, regardless of the region in + // which the user pool resides. AnalyticsConfiguration *AnalyticsConfigurationType `type:"structure"` // A list of allowed redirect (callback) URLs for the identity providers. @@ -28027,6 +28154,10 @@ type UserPoolClientType struct { // * ALLOW_REFRESH_TOKEN_AUTH: Enable authflow to refresh tokens. ExplicitAuthFlows []*string `type:"list"` + // The time limit, specified by tokenValidityUnits, defaulting to hours, after + // which the refresh token is no longer valid and cannot be used. + IdTokenValidity *int64 `min:"1" type:"integer"` + // The date the user pool client was last modified. LastModifiedDate *time.Time `type:"timestamp"` @@ -28049,24 +28180,6 @@ type UserPoolClientType struct { // * LEGACY - This represents the old behavior of Cognito where user existence // related errors are not prevented. // - // This setting affects the behavior of following APIs: - // - // * AdminInitiateAuth - // - // * AdminRespondToAuthChallenge - // - // * InitiateAuth - // - // * RespondToAuthChallenge - // - // * ForgotPassword - // - // * ConfirmForgotPassword - // - // * ConfirmSignUp - // - // * ResendConfirmationCode - // // After February 15th 2020, the value of PreventUserExistenceErrors will default // to ENABLED for newly created user pool clients if no value is provided. PreventUserExistenceErrors *string `type:"string" enum:"PreventUserExistenceErrorTypes"` @@ -28082,6 +28195,10 @@ type UserPoolClientType struct { // this client. SupportedIdentityProviders []*string `type:"list"` + // The time units used to specify the token validity times of their respective + // token. + TokenValidityUnits *TokenValidityUnitsType `type:"structure"` + // The user pool ID for the user pool client. UserPoolId *string `min:"1" type:"string"` @@ -28099,6 +28216,12 @@ func (s UserPoolClientType) GoString() string { return s.String() } +// SetAccessTokenValidity sets the AccessTokenValidity field's value. +func (s *UserPoolClientType) SetAccessTokenValidity(v int64) *UserPoolClientType { + s.AccessTokenValidity = &v + return s +} + // SetAllowedOAuthFlows sets the AllowedOAuthFlows field's value. func (s *UserPoolClientType) SetAllowedOAuthFlows(v []*string) *UserPoolClientType { s.AllowedOAuthFlows = v @@ -28165,6 +28288,12 @@ func (s *UserPoolClientType) SetExplicitAuthFlows(v []*string) *UserPoolClientTy return s } +// SetIdTokenValidity sets the IdTokenValidity field's value. +func (s *UserPoolClientType) SetIdTokenValidity(v int64) *UserPoolClientType { + s.IdTokenValidity = &v + return s +} + // SetLastModifiedDate sets the LastModifiedDate field's value. func (s *UserPoolClientType) SetLastModifiedDate(v time.Time) *UserPoolClientType { s.LastModifiedDate = &v @@ -28201,6 +28330,12 @@ func (s *UserPoolClientType) SetSupportedIdentityProviders(v []*string) *UserPoo return s } +// SetTokenValidityUnits sets the TokenValidityUnits field's value. +func (s *UserPoolClientType) SetTokenValidityUnits(v *TokenValidityUnitsType) *UserPoolClientType { + s.TokenValidityUnits = v + return s +} + // SetUserPoolId sets the UserPoolId field's value. func (s *UserPoolClientType) SetUserPoolId(v string) *UserPoolClientType { s.UserPoolId = &v @@ -28323,8 +28458,8 @@ func (s *UserPoolPolicyType) SetPasswordPolicy(v *PasswordPolicyType) *UserPoolP // This exception is thrown when a user pool tag cannot be set or updated. type UserPoolTaggingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28341,17 +28476,17 @@ func (s UserPoolTaggingException) GoString() string { func newErrorUserPoolTaggingException(v protocol.ResponseMetadata) error { return &UserPoolTaggingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserPoolTaggingException) Code() string { +func (s *UserPoolTaggingException) Code() string { return "UserPoolTaggingException" } // Message returns the exception's message. -func (s UserPoolTaggingException) Message() string { +func (s *UserPoolTaggingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28359,22 +28494,22 @@ func (s UserPoolTaggingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserPoolTaggingException) OrigErr() error { +func (s *UserPoolTaggingException) OrigErr() error { return nil } -func (s UserPoolTaggingException) Error() string { +func (s *UserPoolTaggingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UserPoolTaggingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserPoolTaggingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserPoolTaggingException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserPoolTaggingException) RequestID() string { + return s.RespMetadata.RequestID } // A container for information about the user pool. @@ -28493,7 +28628,8 @@ type UserPoolType struct { // You can choose to enable case sensitivity on the username input for the selected // sign-in option. For example, when this is set to False, users will be able // to sign in using either "username" or "Username". This configuration is immutable - // once it has been set. For more information, see . + // once it has been set. For more information, see UsernameConfigurationType + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_UsernameConfigurationType.html). UsernameConfiguration *UsernameConfigurationType `type:"structure"` // The template for verification messages. @@ -28845,8 +28981,8 @@ func (s *UsernameConfigurationType) SetCaseSensitive(v bool) *UsernameConfigurat // This exception is thrown when Amazon Cognito encounters a user name that // already exists in the user pool. type UsernameExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message returned when Amazon Cognito throws a user name exists exception. Message_ *string `locationName:"message" type:"string"` @@ -28864,17 +29000,17 @@ func (s UsernameExistsException) GoString() string { func newErrorUsernameExistsException(v protocol.ResponseMetadata) error { return &UsernameExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UsernameExistsException) Code() string { +func (s *UsernameExistsException) Code() string { return "UsernameExistsException" } // Message returns the exception's message. -func (s UsernameExistsException) Message() string { +func (s *UsernameExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28882,22 +29018,22 @@ func (s UsernameExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UsernameExistsException) OrigErr() error { +func (s *UsernameExistsException) OrigErr() error { return nil } -func (s UsernameExistsException) Error() string { +func (s *UsernameExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UsernameExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UsernameExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UsernameExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *UsernameExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The template for verification messages. @@ -29008,7 +29144,8 @@ type VerifySoftwareTokenInput struct { // to the service. Session *string `min:"20" type:"string"` - // The one time password computed using the secret code returned by + // The one time password computed using the secret code returned by AssociateSoftwareToken" + // (https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AssociateSoftwareToken.html). // // UserCode is a required field UserCode *string `min:"6" type:"string" required:"true"` @@ -29203,6 +29340,16 @@ const ( AccountTakeoverEventActionTypeNoAction = "NO_ACTION" ) +// AccountTakeoverEventActionType_Values returns all elements of the AccountTakeoverEventActionType enum +func AccountTakeoverEventActionType_Values() []string { + return []string{ + AccountTakeoverEventActionTypeBlock, + AccountTakeoverEventActionTypeMfaIfConfigured, + AccountTakeoverEventActionTypeMfaRequired, + AccountTakeoverEventActionTypeNoAction, + } +} + const ( // AdvancedSecurityModeTypeOff is a AdvancedSecurityModeType enum value AdvancedSecurityModeTypeOff = "OFF" @@ -29214,6 +29361,15 @@ const ( AdvancedSecurityModeTypeEnforced = "ENFORCED" ) +// AdvancedSecurityModeType_Values returns all elements of the AdvancedSecurityModeType enum +func AdvancedSecurityModeType_Values() []string { + return []string{ + AdvancedSecurityModeTypeOff, + AdvancedSecurityModeTypeAudit, + AdvancedSecurityModeTypeEnforced, + } +} + const ( // AliasAttributeTypePhoneNumber is a AliasAttributeType enum value AliasAttributeTypePhoneNumber = "phone_number" @@ -29225,6 +29381,15 @@ const ( AliasAttributeTypePreferredUsername = "preferred_username" ) +// AliasAttributeType_Values returns all elements of the AliasAttributeType enum +func AliasAttributeType_Values() []string { + return []string{ + AliasAttributeTypePhoneNumber, + AliasAttributeTypeEmail, + AliasAttributeTypePreferredUsername, + } +} + const ( // AttributeDataTypeString is a AttributeDataType enum value AttributeDataTypeString = "String" @@ -29239,6 +29404,16 @@ const ( AttributeDataTypeBoolean = "Boolean" ) +// AttributeDataType_Values returns all elements of the AttributeDataType enum +func AttributeDataType_Values() []string { + return []string{ + AttributeDataTypeString, + AttributeDataTypeNumber, + AttributeDataTypeDateTime, + AttributeDataTypeBoolean, + } +} + const ( // AuthFlowTypeUserSrpAuth is a AuthFlowType enum value AuthFlowTypeUserSrpAuth = "USER_SRP_AUTH" @@ -29262,6 +29437,19 @@ const ( AuthFlowTypeAdminUserPasswordAuth = "ADMIN_USER_PASSWORD_AUTH" ) +// AuthFlowType_Values returns all elements of the AuthFlowType enum +func AuthFlowType_Values() []string { + return []string{ + AuthFlowTypeUserSrpAuth, + AuthFlowTypeRefreshTokenAuth, + AuthFlowTypeRefreshToken, + AuthFlowTypeCustomAuth, + AuthFlowTypeAdminNoSrpAuth, + AuthFlowTypeUserPasswordAuth, + AuthFlowTypeAdminUserPasswordAuth, + } +} + const ( // ChallengeNamePassword is a ChallengeName enum value ChallengeNamePassword = "Password" @@ -29270,6 +29458,14 @@ const ( ChallengeNameMfa = "Mfa" ) +// ChallengeName_Values returns all elements of the ChallengeName enum +func ChallengeName_Values() []string { + return []string{ + ChallengeNamePassword, + ChallengeNameMfa, + } +} + const ( // ChallengeNameTypeSmsMfa is a ChallengeNameType enum value ChallengeNameTypeSmsMfa = "SMS_MFA" @@ -29302,6 +29498,22 @@ const ( ChallengeNameTypeNewPasswordRequired = "NEW_PASSWORD_REQUIRED" ) +// ChallengeNameType_Values returns all elements of the ChallengeNameType enum +func ChallengeNameType_Values() []string { + return []string{ + ChallengeNameTypeSmsMfa, + ChallengeNameTypeSoftwareTokenMfa, + ChallengeNameTypeSelectMfaType, + ChallengeNameTypeMfaSetup, + ChallengeNameTypePasswordVerifier, + ChallengeNameTypeCustomChallenge, + ChallengeNameTypeDeviceSrpAuth, + ChallengeNameTypeDevicePasswordVerifier, + ChallengeNameTypeAdminNoSrpAuth, + ChallengeNameTypeNewPasswordRequired, + } +} + const ( // ChallengeResponseSuccess is a ChallengeResponse enum value ChallengeResponseSuccess = "Success" @@ -29310,6 +29522,14 @@ const ( ChallengeResponseFailure = "Failure" ) +// ChallengeResponse_Values returns all elements of the ChallengeResponse enum +func ChallengeResponse_Values() []string { + return []string{ + ChallengeResponseSuccess, + ChallengeResponseFailure, + } +} + const ( // CompromisedCredentialsEventActionTypeBlock is a CompromisedCredentialsEventActionType enum value CompromisedCredentialsEventActionTypeBlock = "BLOCK" @@ -29318,6 +29538,14 @@ const ( CompromisedCredentialsEventActionTypeNoAction = "NO_ACTION" ) +// CompromisedCredentialsEventActionType_Values returns all elements of the CompromisedCredentialsEventActionType enum +func CompromisedCredentialsEventActionType_Values() []string { + return []string{ + CompromisedCredentialsEventActionTypeBlock, + CompromisedCredentialsEventActionTypeNoAction, + } +} + const ( // DefaultEmailOptionTypeConfirmWithLink is a DefaultEmailOptionType enum value DefaultEmailOptionTypeConfirmWithLink = "CONFIRM_WITH_LINK" @@ -29326,6 +29554,14 @@ const ( DefaultEmailOptionTypeConfirmWithCode = "CONFIRM_WITH_CODE" ) +// DefaultEmailOptionType_Values returns all elements of the DefaultEmailOptionType enum +func DefaultEmailOptionType_Values() []string { + return []string{ + DefaultEmailOptionTypeConfirmWithLink, + DefaultEmailOptionTypeConfirmWithCode, + } +} + const ( // DeliveryMediumTypeSms is a DeliveryMediumType enum value DeliveryMediumTypeSms = "SMS" @@ -29334,6 +29570,14 @@ const ( DeliveryMediumTypeEmail = "EMAIL" ) +// DeliveryMediumType_Values returns all elements of the DeliveryMediumType enum +func DeliveryMediumType_Values() []string { + return []string{ + DeliveryMediumTypeSms, + DeliveryMediumTypeEmail, + } +} + const ( // DeviceRememberedStatusTypeRemembered is a DeviceRememberedStatusType enum value DeviceRememberedStatusTypeRemembered = "remembered" @@ -29342,6 +29586,14 @@ const ( DeviceRememberedStatusTypeNotRemembered = "not_remembered" ) +// DeviceRememberedStatusType_Values returns all elements of the DeviceRememberedStatusType enum +func DeviceRememberedStatusType_Values() []string { + return []string{ + DeviceRememberedStatusTypeRemembered, + DeviceRememberedStatusTypeNotRemembered, + } +} + const ( // DomainStatusTypeCreating is a DomainStatusType enum value DomainStatusTypeCreating = "CREATING" @@ -29359,6 +29611,17 @@ const ( DomainStatusTypeFailed = "FAILED" ) +// DomainStatusType_Values returns all elements of the DomainStatusType enum +func DomainStatusType_Values() []string { + return []string{ + DomainStatusTypeCreating, + DomainStatusTypeDeleting, + DomainStatusTypeUpdating, + DomainStatusTypeActive, + DomainStatusTypeFailed, + } +} + const ( // EmailSendingAccountTypeCognitoDefault is a EmailSendingAccountType enum value EmailSendingAccountTypeCognitoDefault = "COGNITO_DEFAULT" @@ -29367,6 +29630,14 @@ const ( EmailSendingAccountTypeDeveloper = "DEVELOPER" ) +// EmailSendingAccountType_Values returns all elements of the EmailSendingAccountType enum +func EmailSendingAccountType_Values() []string { + return []string{ + EmailSendingAccountTypeCognitoDefault, + EmailSendingAccountTypeDeveloper, + } +} + const ( // EventFilterTypeSignIn is a EventFilterType enum value EventFilterTypeSignIn = "SIGN_IN" @@ -29378,6 +29649,15 @@ const ( EventFilterTypeSignUp = "SIGN_UP" ) +// EventFilterType_Values returns all elements of the EventFilterType enum +func EventFilterType_Values() []string { + return []string{ + EventFilterTypeSignIn, + EventFilterTypePasswordChange, + EventFilterTypeSignUp, + } +} + const ( // EventResponseTypeSuccess is a EventResponseType enum value EventResponseTypeSuccess = "Success" @@ -29386,6 +29666,14 @@ const ( EventResponseTypeFailure = "Failure" ) +// EventResponseType_Values returns all elements of the EventResponseType enum +func EventResponseType_Values() []string { + return []string{ + EventResponseTypeSuccess, + EventResponseTypeFailure, + } +} + const ( // EventTypeSignIn is a EventType enum value EventTypeSignIn = "SignIn" @@ -29397,6 +29685,15 @@ const ( EventTypeForgotPassword = "ForgotPassword" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeSignIn, + EventTypeSignUp, + EventTypeForgotPassword, + } +} + const ( // ExplicitAuthFlowsTypeAdminNoSrpAuth is a ExplicitAuthFlowsType enum value ExplicitAuthFlowsTypeAdminNoSrpAuth = "ADMIN_NO_SRP_AUTH" @@ -29423,6 +29720,20 @@ const ( ExplicitAuthFlowsTypeAllowRefreshTokenAuth = "ALLOW_REFRESH_TOKEN_AUTH" ) +// ExplicitAuthFlowsType_Values returns all elements of the ExplicitAuthFlowsType enum +func ExplicitAuthFlowsType_Values() []string { + return []string{ + ExplicitAuthFlowsTypeAdminNoSrpAuth, + ExplicitAuthFlowsTypeCustomAuthFlowOnly, + ExplicitAuthFlowsTypeUserPasswordAuth, + ExplicitAuthFlowsTypeAllowAdminUserPasswordAuth, + ExplicitAuthFlowsTypeAllowCustomAuth, + ExplicitAuthFlowsTypeAllowUserPasswordAuth, + ExplicitAuthFlowsTypeAllowUserSrpAuth, + ExplicitAuthFlowsTypeAllowRefreshTokenAuth, + } +} + const ( // FeedbackValueTypeValid is a FeedbackValueType enum value FeedbackValueTypeValid = "Valid" @@ -29431,6 +29742,14 @@ const ( FeedbackValueTypeInvalid = "Invalid" ) +// FeedbackValueType_Values returns all elements of the FeedbackValueType enum +func FeedbackValueType_Values() []string { + return []string{ + FeedbackValueTypeValid, + FeedbackValueTypeInvalid, + } +} + const ( // IdentityProviderTypeTypeSaml is a IdentityProviderTypeType enum value IdentityProviderTypeTypeSaml = "SAML" @@ -29451,6 +29770,18 @@ const ( IdentityProviderTypeTypeOidc = "OIDC" ) +// IdentityProviderTypeType_Values returns all elements of the IdentityProviderTypeType enum +func IdentityProviderTypeType_Values() []string { + return []string{ + IdentityProviderTypeTypeSaml, + IdentityProviderTypeTypeFacebook, + IdentityProviderTypeTypeGoogle, + IdentityProviderTypeTypeLoginWithAmazon, + IdentityProviderTypeTypeSignInWithApple, + IdentityProviderTypeTypeOidc, + } +} + const ( // MessageActionTypeResend is a MessageActionType enum value MessageActionTypeResend = "RESEND" @@ -29459,6 +29790,14 @@ const ( MessageActionTypeSuppress = "SUPPRESS" ) +// MessageActionType_Values returns all elements of the MessageActionType enum +func MessageActionType_Values() []string { + return []string{ + MessageActionTypeResend, + MessageActionTypeSuppress, + } +} + const ( // OAuthFlowTypeCode is a OAuthFlowType enum value OAuthFlowTypeCode = "code" @@ -29470,6 +29809,15 @@ const ( OAuthFlowTypeClientCredentials = "client_credentials" ) +// OAuthFlowType_Values returns all elements of the OAuthFlowType enum +func OAuthFlowType_Values() []string { + return []string{ + OAuthFlowTypeCode, + OAuthFlowTypeImplicit, + OAuthFlowTypeClientCredentials, + } +} + const ( // PreventUserExistenceErrorTypesLegacy is a PreventUserExistenceErrorTypes enum value PreventUserExistenceErrorTypesLegacy = "LEGACY" @@ -29478,6 +29826,14 @@ const ( PreventUserExistenceErrorTypesEnabled = "ENABLED" ) +// PreventUserExistenceErrorTypes_Values returns all elements of the PreventUserExistenceErrorTypes enum +func PreventUserExistenceErrorTypes_Values() []string { + return []string{ + PreventUserExistenceErrorTypesLegacy, + PreventUserExistenceErrorTypesEnabled, + } +} + const ( // RecoveryOptionNameTypeVerifiedEmail is a RecoveryOptionNameType enum value RecoveryOptionNameTypeVerifiedEmail = "verified_email" @@ -29489,6 +29845,15 @@ const ( RecoveryOptionNameTypeAdminOnly = "admin_only" ) +// RecoveryOptionNameType_Values returns all elements of the RecoveryOptionNameType enum +func RecoveryOptionNameType_Values() []string { + return []string{ + RecoveryOptionNameTypeVerifiedEmail, + RecoveryOptionNameTypeVerifiedPhoneNumber, + RecoveryOptionNameTypeAdminOnly, + } +} + const ( // RiskDecisionTypeNoRisk is a RiskDecisionType enum value RiskDecisionTypeNoRisk = "NoRisk" @@ -29500,6 +29865,15 @@ const ( RiskDecisionTypeBlock = "Block" ) +// RiskDecisionType_Values returns all elements of the RiskDecisionType enum +func RiskDecisionType_Values() []string { + return []string{ + RiskDecisionTypeNoRisk, + RiskDecisionTypeAccountTakeover, + RiskDecisionTypeBlock, + } +} + const ( // RiskLevelTypeLow is a RiskLevelType enum value RiskLevelTypeLow = "Low" @@ -29511,6 +29885,15 @@ const ( RiskLevelTypeHigh = "High" ) +// RiskLevelType_Values returns all elements of the RiskLevelType enum +func RiskLevelType_Values() []string { + return []string{ + RiskLevelTypeLow, + RiskLevelTypeMedium, + RiskLevelTypeHigh, + } +} + const ( // StatusTypeEnabled is a StatusType enum value StatusTypeEnabled = "Enabled" @@ -29519,6 +29902,38 @@ const ( StatusTypeDisabled = "Disabled" ) +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypeEnabled, + StatusTypeDisabled, + } +} + +const ( + // TimeUnitsTypeSeconds is a TimeUnitsType enum value + TimeUnitsTypeSeconds = "seconds" + + // TimeUnitsTypeMinutes is a TimeUnitsType enum value + TimeUnitsTypeMinutes = "minutes" + + // TimeUnitsTypeHours is a TimeUnitsType enum value + TimeUnitsTypeHours = "hours" + + // TimeUnitsTypeDays is a TimeUnitsType enum value + TimeUnitsTypeDays = "days" +) + +// TimeUnitsType_Values returns all elements of the TimeUnitsType enum +func TimeUnitsType_Values() []string { + return []string{ + TimeUnitsTypeSeconds, + TimeUnitsTypeMinutes, + TimeUnitsTypeHours, + TimeUnitsTypeDays, + } +} + const ( // UserImportJobStatusTypeCreated is a UserImportJobStatusType enum value UserImportJobStatusTypeCreated = "Created" @@ -29545,6 +29960,20 @@ const ( UserImportJobStatusTypeSucceeded = "Succeeded" ) +// UserImportJobStatusType_Values returns all elements of the UserImportJobStatusType enum +func UserImportJobStatusType_Values() []string { + return []string{ + UserImportJobStatusTypeCreated, + UserImportJobStatusTypePending, + UserImportJobStatusTypeInProgress, + UserImportJobStatusTypeStopping, + UserImportJobStatusTypeExpired, + UserImportJobStatusTypeStopped, + UserImportJobStatusTypeFailed, + UserImportJobStatusTypeSucceeded, + } +} + const ( // UserPoolMfaTypeOff is a UserPoolMfaType enum value UserPoolMfaTypeOff = "OFF" @@ -29556,6 +29985,15 @@ const ( UserPoolMfaTypeOptional = "OPTIONAL" ) +// UserPoolMfaType_Values returns all elements of the UserPoolMfaType enum +func UserPoolMfaType_Values() []string { + return []string{ + UserPoolMfaTypeOff, + UserPoolMfaTypeOn, + UserPoolMfaTypeOptional, + } +} + const ( // UserStatusTypeUnconfirmed is a UserStatusType enum value UserStatusTypeUnconfirmed = "UNCONFIRMED" @@ -29579,6 +30017,19 @@ const ( UserStatusTypeForceChangePassword = "FORCE_CHANGE_PASSWORD" ) +// UserStatusType_Values returns all elements of the UserStatusType enum +func UserStatusType_Values() []string { + return []string{ + UserStatusTypeUnconfirmed, + UserStatusTypeConfirmed, + UserStatusTypeArchived, + UserStatusTypeCompromised, + UserStatusTypeUnknown, + UserStatusTypeResetRequired, + UserStatusTypeForceChangePassword, + } +} + const ( // UsernameAttributeTypePhoneNumber is a UsernameAttributeType enum value UsernameAttributeTypePhoneNumber = "phone_number" @@ -29587,6 +30038,14 @@ const ( UsernameAttributeTypeEmail = "email" ) +// UsernameAttributeType_Values returns all elements of the UsernameAttributeType enum +func UsernameAttributeType_Values() []string { + return []string{ + UsernameAttributeTypePhoneNumber, + UsernameAttributeTypeEmail, + } +} + const ( // VerifiedAttributeTypePhoneNumber is a VerifiedAttributeType enum value VerifiedAttributeTypePhoneNumber = "phone_number" @@ -29595,6 +30054,14 @@ const ( VerifiedAttributeTypeEmail = "email" ) +// VerifiedAttributeType_Values returns all elements of the VerifiedAttributeType enum +func VerifiedAttributeType_Values() []string { + return []string{ + VerifiedAttributeTypePhoneNumber, + VerifiedAttributeTypeEmail, + } +} + const ( // VerifySoftwareTokenResponseTypeSuccess is a VerifySoftwareTokenResponseType enum value VerifySoftwareTokenResponseTypeSuccess = "SUCCESS" @@ -29602,3 +30069,11 @@ const ( // VerifySoftwareTokenResponseTypeError is a VerifySoftwareTokenResponseType enum value VerifySoftwareTokenResponseTypeError = "ERROR" ) + +// VerifySoftwareTokenResponseType_Values returns all elements of the VerifySoftwareTokenResponseType enum +func VerifySoftwareTokenResponseType_Values() []string { + return []string{ + VerifySoftwareTokenResponseTypeSuccess, + VerifySoftwareTokenResponseTypeError, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go index 59ecc0752..142666df4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go index 42c7282c8..8c05a4146 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/api.go @@ -915,8 +915,12 @@ func (c *ConfigService) DeleteOrganizationConfigRuleRequest(input *DeleteOrganiz // DeleteOrganizationConfigRule API operation for AWS Config. // // Deletes the specified organization config rule and all of its evaluation -// results from all member accounts in that organization. Only a master account -// can delete an organization config rule. +// results from all member accounts in that organization. +// +// Only a master account and a delegated administrator account can delete an +// organization config rule. When calling this API with a delegated administrator, +// you must ensure AWS Organizations ListDelegatedAdministrator permissions +// are added. // // AWS Config sets the state of a rule to DELETE_IN_PROGRESS until the deletion // is complete. You cannot update a rule while it is in this state. @@ -1036,7 +1040,11 @@ func (c *ConfigService) DeleteOrganizationConformancePackRequest(input *DeleteOr // // Deletes the specified organization conformance pack and all of the config // rules and remediation actions from all member accounts in that organization. -// Only a master account can delete an organization conformance pack. +// +// Only a master account or a delegated administrator account can delete an +// organization conformance pack. When calling this API with a delegated administrator, +// you must ensure AWS Organizations ListDelegatedAdministrator permissions +// are added. // // AWS Config sets the state of a conformance pack to DELETE_IN_PROGRESS until // the deletion is complete. You cannot update a conformance pack while it is @@ -1343,6 +1351,10 @@ func (c *ConfigService) DeleteRemediationExceptionsRequest(input *DeleteRemediat // // Deletes one or more remediation exceptions mentioned in the resource keys. // +// AWS Config generates a remediation exception when a problem occurs executing +// a remediation action to a specific resource. Remediation exceptions blocks +// auto-remediation until the exception is cleared. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2727,6 +2739,10 @@ func (c *ConfigService) DescribeConformancePackStatusRequest(input *DescribeConf // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // +// * InvalidParameterValueException +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeConformancePackStatus func (c *ConfigService) DescribeConformancePackStatus(input *DescribeConformancePackStatusInput) (*DescribeConformancePackStatusOutput, error) { req, out := c.DescribeConformancePackStatusRequest(input) @@ -2813,6 +2829,10 @@ func (c *ConfigService) DescribeConformancePacksRequest(input *DescribeConforman // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. // +// * InvalidParameterValueException +// One or more of the specified parameters are invalid. Verify that your parameters +// are valid and try again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/DescribeConformancePacks func (c *ConfigService) DescribeConformancePacks(input *DescribeConformancePacksInput) (*DescribeConformancePacksOutput, error) { req, out := c.DescribeConformancePacksRequest(input) @@ -3047,6 +3067,10 @@ func (c *ConfigService) DescribeOrganizationConfigRuleStatusesRequest(input *Des // // Provides organization config rule deployment status for an organization. // +// Only a master account and a delegated administrator account can call this +// API. When calling this API with a delegated administrator, you must ensure +// AWS Organizations ListDelegatedAdministrator permissions are added. +// // The status is not considered successful until organization config rule is // successfully deployed in all the member accounts with an exception of excluded // accounts. @@ -3056,8 +3080,6 @@ func (c *ConfigService) DescribeOrganizationConfigRuleStatusesRequest(input *Des // rule names. It is only applicable, when you request all the organization // config rules. // -// Only a master account can call this API. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3152,13 +3174,15 @@ func (c *ConfigService) DescribeOrganizationConfigRulesRequest(input *DescribeOr // // Returns a list of organization config rules. // +// Only a master account and a delegated administrator account can call this +// API. When calling this API with a delegated administrator, you must ensure +// AWS Organizations ListDelegatedAdministrator permissions are added. +// // When you specify the limit and the next token, you receive a paginated response. // Limit and next token are not applicable if you specify organization config // rule names. It is only applicable, when you request all the organization // config rules. // -// Only a master account can call this API. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3253,6 +3277,10 @@ func (c *ConfigService) DescribeOrganizationConformancePackStatusesRequest(input // // Provides organization conformance pack deployment status for an organization. // +// Only a master account and a delegated administrator account can call this +// API. When calling this API with a delegated administrator, you must ensure +// AWS Organizations ListDelegatedAdministrator permissions are added. +// // The status is not considered successful until organization conformance pack // is successfully deployed in all the member accounts with an exception of // excluded accounts. @@ -3262,8 +3290,6 @@ func (c *ConfigService) DescribeOrganizationConformancePackStatusesRequest(input // pack names. They are only applicable, when you request all the organization // conformance packs. // -// Only a master account can call this API. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3362,14 +3388,16 @@ func (c *ConfigService) DescribeOrganizationConformancePacksRequest(input *Descr // // Returns a list of organization conformance packs. // +// Only a master account and a delegated administrator account can call this +// API. When calling this API with a delegated administrator, you must ensure +// AWS Organizations ListDelegatedAdministrator permissions are added. +// // When you specify the limit and the next token, you receive a paginated response. // // Limit and next token are not applicable if you specify organization conformance // packs names. They are only applicable, when you request all the organization // conformance packs. // -// Only a master account can call this API. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3638,6 +3666,10 @@ func (c *ConfigService) DescribeRemediationExceptionsRequest(input *DescribeReme // of an exception and the time when the exception will be deleted. When you // specify the limit and the next token, you receive a paginated response. // +// AWS Config generates a remediation exception when a problem occurs executing +// a remediation action to a specific resource. Remediation exceptions blocks +// auto-remediation until the exception is cleared. +// // When you specify the limit and the next token, you receive a paginated response. // // Limit and next token are not applicable if you request resources in batch. @@ -5019,7 +5051,9 @@ func (c *ConfigService) GetOrganizationConfigRuleDetailedStatusRequest(input *Ge // Returns detailed status for each member account within an organization for // a given organization config rule. // -// Only a master account can call this API. +// Only a master account and a delegated administrator account can call this +// API. When calling this API with a delegated administrator, you must ensure +// AWS Organizations ListDelegatedAdministrator permissions are added. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5116,7 +5150,9 @@ func (c *ConfigService) GetOrganizationConformancePackDetailedStatusRequest(inpu // Returns detailed status for each member account within an organization for // a given organization conformance pack. // -// Only a master account can call this API. +// Only a master account and a delegated administrator account can call this +// API. When calling this API with a delegated administrator, you must ensure +// AWS Organizations ListDelegatedAdministrator permissions are added. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6143,8 +6179,7 @@ func (c *ConfigService) PutConformancePackRequest(input *PutConformancePackInput // // This API creates a service linked role AWSServiceRoleForConfigConforms in // your account. The service linked role is created only when the role does -// not exist in your account. AWS Config verifies the existence of role with -// GetRole action. +// not exist in your account. // // You must specify either the TemplateS3Uri or the TemplateBody parameter, // but not both. If you provide both AWS Config uses the TemplateS3Uri parameter @@ -6481,25 +6516,36 @@ func (c *ConfigService) PutOrganizationConfigRuleRequest(input *PutOrganizationC // PutOrganizationConfigRule API operation for AWS Config. // // Adds or updates organization config rule for your entire organization evaluating -// whether your AWS resources comply with your desired configurations. Only -// a master account can create or update an organization config rule. +// whether your AWS resources comply with your desired configurations. +// +// Only a master account and a delegated administrator can create or update +// an organization config rule. When calling this API with a delegated administrator, +// you must ensure AWS Organizations ListDelegatedAdministrator permissions +// are added. // // This API enables organization service access through the EnableAWSServiceAccess // action and creates a service linked role AWSServiceRoleForConfigMultiAccountSetup -// in the master account of your organization. The service linked role is created -// only when the role does not exist in the master account. AWS Config verifies -// the existence of role with GetRole action. +// in the master or delegated administrator account of your organization. The +// service linked role is created only when the role does not exist in the caller +// account. AWS Config verifies the existence of role with GetRole action. +// +// To use this API with delegated administrator, register a delegated administrator +// by calling AWS Organization register-delegated-administrator for config-multiaccountsetup.amazonaws.com. // // You can use this action to create both custom AWS Config rules and AWS managed // Config rules. If you are adding a new custom AWS Config rule, you must first -// create AWS Lambda function in the master account that the rule invokes to -// evaluate your resources. When you use the PutOrganizationConfigRule action -// to add the rule to AWS Config, you must specify the Amazon Resource Name -// (ARN) that AWS Lambda assigns to the function. If you are adding an AWS managed -// Config rule, specify the rule's identifier for the RuleIdentifier key. +// create AWS Lambda function in the master account or a delegated administrator +// that the rule invokes to evaluate your resources. When you use the PutOrganizationConfigRule +// action to add the rule to AWS Config, you must specify the Amazon Resource +// Name (ARN) that AWS Lambda assigns to the function. If you are adding an +// AWS managed Config rule, specify the rule's identifier for the RuleIdentifier +// key. // // The maximum number of organization config rules that AWS Config supports -// is 150. +// is 150 and 3 delegated administrator per organization. +// +// Prerequisite: Ensure you call EnableAllFeatures API to enable all features +// in an organization. // // Specify either OrganizationCustomRuleMetadata or OrganizationManagedRuleMetadata. // @@ -6649,21 +6695,31 @@ func (c *ConfigService) PutOrganizationConformancePackRequest(input *PutOrganiza // // Deploys conformance packs across member accounts in an AWS Organization. // +// Only a master account and a delegated administrator can call this API. When +// calling this API with a delegated administrator, you must ensure AWS Organizations +// ListDelegatedAdministrator permissions are added. +// // This API enables organization service access for config-multiaccountsetup.amazonaws.com // through the EnableAWSServiceAccess action and creates a service linked role -// AWSServiceRoleForConfigMultiAccountSetup in the master account of your organization. -// The service linked role is created only when the role does not exist in the -// master account. AWS Config verifies the existence of role with GetRole action. +// AWSServiceRoleForConfigMultiAccountSetup in the master or delegated administrator +// account of your organization. The service linked role is created only when +// the role does not exist in the caller account. To use this API with delegated +// administrator, register a delegated administrator by calling AWS Organization +// register-delegate-admin for config-multiaccountsetup.amazonaws.com. +// +// Prerequisite: Ensure you call EnableAllFeatures API to enable all features +// in an organization. // // You must specify either the TemplateS3Uri or the TemplateBody parameter, // but not both. If you provide both AWS Config uses the TemplateS3Uri parameter // and ignores the TemplateBody parameter. // // AWS Config sets the state of a conformance pack to CREATE_IN_PROGRESS and -// UPDATE_IN_PROGRESS until the confomance pack is created or updated. You cannot -// update a conformance pack while it is in this state. +// UPDATE_IN_PROGRESS until the conformance pack is created or updated. You +// cannot update a conformance pack while it is in this state. // -// You can create 6 conformance packs with 25 AWS Config rules in each pack. +// You can create 6 conformance packs with 25 AWS Config rules in each pack +// and 3 delegated administrator per organization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6815,6 +6871,9 @@ func (c *ConfigService) PutRemediationConfigurationsRequest(input *PutRemediatio // you to add a remediation configuration. The target (SSM document) must exist // and have permissions to use the target. // +// If you make backward incompatible changes to the SSM document, you must call +// this again to ensure the remediations can run. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6914,6 +6973,10 @@ func (c *ConfigService) PutRemediationExceptionsRequest(input *PutRemediationExc // for auto-remediation. This API adds a new exception or updates an exisiting // exception for a specific resource with a specific AWS Config rule. // +// AWS Config generates a remediation exception when a problem occurs executing +// a remediation action to a specific resource. Remediation exceptions blocks +// auto-remediation until the exception is cleared. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6926,6 +6989,23 @@ func (c *ConfigService) PutRemediationExceptionsRequest(input *PutRemediationExc // One or more of the specified parameters are invalid. Verify that your parameters // are valid and try again. // +// * InsufficientPermissionsException +// Indicates one of the following errors: +// +// * For PutConfigRule, the rule cannot be created because the IAM role assigned +// to AWS Config lacks permissions to perform the config:Put* action. +// +// * For PutConfigRule, the AWS Lambda function cannot be invoked. Check +// the function ARN, and check the function's permissions. +// +// * For PutOrganizationConfigRule, organization config rule cannot be created +// because you do not have permissions to call IAM GetRole action or create +// a service linked role. +// +// * For PutConformancePack and PutOrganizationConformancePack, a conformance +// pack cannot be created because you do not have permissions: To call IAM +// GetRole action or create a service linked role. To read Amazon S3 bucket. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/config-2014-11-12/PutRemediationExceptions func (c *ConfigService) PutRemediationExceptions(input *PutRemediationExceptionsInput) (*PutRemediationExceptionsOutput, error) { req, out := c.PutRemediationExceptionsRequest(input) @@ -7006,6 +7086,9 @@ func (c *ConfigService) PutResourceConfigRequest(input *PutResourceConfigInput) // resource provided in the request. This API does not change or remediate the // configuration of the resource. // +// Write-only schema properites are not recorded as part of the published configuration +// item. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8489,7 +8572,23 @@ type BaseConfigurationItem struct { // The time when the configuration recording was initiated. ConfigurationItemCaptureTime *time.Time `locationName:"configurationItemCaptureTime" type:"timestamp"` - // The configuration item status. + // The configuration item status. The valid values are: + // + // * OK – The resource configuration has been updated + // + // * ResourceDiscovered – The resource was newly discovered + // + // * ResourceNotRecorded – The resource was discovered but its configuration + // was not recorded since the recorder excludes the recording of resources + // of this type + // + // * ResourceDeleted – The resource was deleted + // + // * ResourceDeletedNotRecorded – The resource was deleted but its configuration + // was not recorded since the recorder excludes the recording of resources + // of this type + // + // The CIs do not incur any cost. ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` // An identifier that indicates the ordering of the configuration items of a @@ -9190,6 +9289,8 @@ type ConfigRule struct { // to constrain the resources that can trigger an evaluation for the rule. If // you do not specify a scope, evaluations are triggered when any resource in // the recording group changes. + // + // The scope can be empty. Scope *Scope `type:"structure"` // Provides the rule owner (AWS or customer), the rule identifier, and the notifications @@ -9455,6 +9556,7 @@ type ConfigRuleEvaluationStatus struct { // against the rule. FirstEvaluationStarted *bool `type:"boolean"` + // The time that you last turned off the AWS Config rule. LastDeactivatedTime *time.Time `type:"timestamp"` // The error code that AWS Config returned when the rule last failed. @@ -9691,6 +9793,9 @@ type ConfigurationAggregator struct { // The name of the aggregator. ConfigurationAggregatorName *string `min:"1" type:"string"` + // AWS service that created the configuration aggregator. + CreatedBy *string `min:"1" type:"string"` + // The time stamp when the configuration aggregator was created. CreationTime *time.Time `type:"timestamp"` @@ -9729,6 +9834,12 @@ func (s *ConfigurationAggregator) SetConfigurationAggregatorName(v string) *Conf return s } +// SetCreatedBy sets the CreatedBy field's value. +func (s *ConfigurationAggregator) SetCreatedBy(v string) *ConfigurationAggregator { + s.CreatedBy = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *ConfigurationAggregator) SetCreationTime(v time.Time) *ConfigurationAggregator { s.CreationTime = &v @@ -9775,7 +9886,23 @@ type ConfigurationItem struct { // that are associated with the same resource. ConfigurationItemMD5Hash *string `locationName:"configurationItemMD5Hash" type:"string"` - // The configuration item status. + // The configuration item status. The valid values are: + // + // * OK – The resource configuration has been updated + // + // * ResourceDiscovered – The resource was newly discovered + // + // * ResourceNotRecorded – The resource was discovered but its configuration + // was not recorded since the recorder excludes the recording of resources + // of this type + // + // * ResourceDeleted – The resource was deleted + // + // * ResourceDeletedNotRecorded – The resource was deleted but its configuration + // was not recorded since the recorder excludes the recording of resources + // of this type + // + // The CIs do not incur any cost. ConfigurationItemStatus *string `locationName:"configurationItemStatus" type:"string" enum:"ConfigurationItemStatus"` // An identifier that indicates the ordering of the configuration items of a @@ -10187,12 +10314,10 @@ type ConformancePackDetail struct { // Conformance pack template that is used to create a pack. The delivery bucket // name should start with awsconfigconforms. For example: "Resource": "arn:aws:s3:::your_bucket_name/*". - // - // DeliveryS3Bucket is a required field - DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + DeliveryS3Bucket *string `type:"string"` // The prefix for the Amazon S3 bucket. - DeliveryS3KeyPrefix *string `min:"1" type:"string"` + DeliveryS3KeyPrefix *string `type:"string"` // Last time when conformation pack update was requested. LastUpdateRequestedTime *time.Time `type:"timestamp"` @@ -10397,8 +10522,8 @@ func (s *ConformancePackEvaluationResult) SetResultRecordedTime(v time.Time) *Co } // Input parameters in the form of key-value pairs for the conformance pack, -// both of which you define. Keys can have a maximum character length of 128 -// characters, and values can have a maximum length of 256 characters. +// both of which you define. Keys can have a maximum character length of 255 +// characters, and values can have a maximum length of 4096 characters. type ConformancePackInputParameter struct { _ struct{} `type:"structure"` @@ -10602,8 +10727,8 @@ func (s *ConformancePackStatusDetail) SetStackArn(v string) *ConformancePackStat // You have specified a template that is not valid or supported. type ConformancePackTemplateValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10620,17 +10745,17 @@ func (s ConformancePackTemplateValidationException) GoString() string { func newErrorConformancePackTemplateValidationException(v protocol.ResponseMetadata) error { return &ConformancePackTemplateValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConformancePackTemplateValidationException) Code() string { +func (s *ConformancePackTemplateValidationException) Code() string { return "ConformancePackTemplateValidationException" } // Message returns the exception's message. -func (s ConformancePackTemplateValidationException) Message() string { +func (s *ConformancePackTemplateValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10638,22 +10763,22 @@ func (s ConformancePackTemplateValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConformancePackTemplateValidationException) OrigErr() error { +func (s *ConformancePackTemplateValidationException) OrigErr() error { return nil } -func (s ConformancePackTemplateValidationException) Error() string { +func (s *ConformancePackTemplateValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConformancePackTemplateValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConformancePackTemplateValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConformancePackTemplateValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConformancePackTemplateValidationException) RequestID() string { + return s.RespMetadata.RequestID } type DeleteAggregationAuthorizationInput struct { @@ -15624,8 +15749,8 @@ func (s *GroupedResourceCount) SetResourceCount(v int64) *GroupedResourceCount { // Your Amazon S3 bucket policy does not permit AWS Config to write to it. type InsufficientDeliveryPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15642,17 +15767,17 @@ func (s InsufficientDeliveryPolicyException) GoString() string { func newErrorInsufficientDeliveryPolicyException(v protocol.ResponseMetadata) error { return &InsufficientDeliveryPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientDeliveryPolicyException) Code() string { +func (s *InsufficientDeliveryPolicyException) Code() string { return "InsufficientDeliveryPolicyException" } // Message returns the exception's message. -func (s InsufficientDeliveryPolicyException) Message() string { +func (s *InsufficientDeliveryPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15660,22 +15785,22 @@ func (s InsufficientDeliveryPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientDeliveryPolicyException) OrigErr() error { +func (s *InsufficientDeliveryPolicyException) OrigErr() error { return nil } -func (s InsufficientDeliveryPolicyException) Error() string { +func (s *InsufficientDeliveryPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientDeliveryPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientDeliveryPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientDeliveryPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientDeliveryPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // Indicates one of the following errors: @@ -15694,8 +15819,8 @@ func (s InsufficientDeliveryPolicyException) RequestID() string { // pack cannot be created because you do not have permissions: To call IAM // GetRole action or create a service linked role. To read Amazon S3 bucket. type InsufficientPermissionsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15712,17 +15837,17 @@ func (s InsufficientPermissionsException) GoString() string { func newErrorInsufficientPermissionsException(v protocol.ResponseMetadata) error { return &InsufficientPermissionsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientPermissionsException) Code() string { +func (s *InsufficientPermissionsException) Code() string { return "InsufficientPermissionsException" } // Message returns the exception's message. -func (s InsufficientPermissionsException) Message() string { +func (s *InsufficientPermissionsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15730,28 +15855,28 @@ func (s InsufficientPermissionsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientPermissionsException) OrigErr() error { +func (s *InsufficientPermissionsException) OrigErr() error { return nil } -func (s InsufficientPermissionsException) Error() string { +func (s *InsufficientPermissionsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientPermissionsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientPermissionsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientPermissionsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientPermissionsException) RequestID() string { + return s.RespMetadata.RequestID } // You have provided a configuration recorder name that is not valid. type InvalidConfigurationRecorderNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15768,17 +15893,17 @@ func (s InvalidConfigurationRecorderNameException) GoString() string { func newErrorInvalidConfigurationRecorderNameException(v protocol.ResponseMetadata) error { return &InvalidConfigurationRecorderNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidConfigurationRecorderNameException) Code() string { +func (s *InvalidConfigurationRecorderNameException) Code() string { return "InvalidConfigurationRecorderNameException" } // Message returns the exception's message. -func (s InvalidConfigurationRecorderNameException) Message() string { +func (s *InvalidConfigurationRecorderNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15786,28 +15911,28 @@ func (s InvalidConfigurationRecorderNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidConfigurationRecorderNameException) OrigErr() error { +func (s *InvalidConfigurationRecorderNameException) OrigErr() error { return nil } -func (s InvalidConfigurationRecorderNameException) Error() string { +func (s *InvalidConfigurationRecorderNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidConfigurationRecorderNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidConfigurationRecorderNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidConfigurationRecorderNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidConfigurationRecorderNameException) RequestID() string { + return s.RespMetadata.RequestID } // The specified delivery channel name is not valid. type InvalidDeliveryChannelNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15824,17 +15949,17 @@ func (s InvalidDeliveryChannelNameException) GoString() string { func newErrorInvalidDeliveryChannelNameException(v protocol.ResponseMetadata) error { return &InvalidDeliveryChannelNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeliveryChannelNameException) Code() string { +func (s *InvalidDeliveryChannelNameException) Code() string { return "InvalidDeliveryChannelNameException" } // Message returns the exception's message. -func (s InvalidDeliveryChannelNameException) Message() string { +func (s *InvalidDeliveryChannelNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15842,28 +15967,28 @@ func (s InvalidDeliveryChannelNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeliveryChannelNameException) OrigErr() error { +func (s *InvalidDeliveryChannelNameException) OrigErr() error { return nil } -func (s InvalidDeliveryChannelNameException) Error() string { +func (s *InvalidDeliveryChannelNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeliveryChannelNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeliveryChannelNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeliveryChannelNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeliveryChannelNameException) RequestID() string { + return s.RespMetadata.RequestID } // The syntax of the query is incorrect. type InvalidExpressionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15880,17 +16005,17 @@ func (s InvalidExpressionException) GoString() string { func newErrorInvalidExpressionException(v protocol.ResponseMetadata) error { return &InvalidExpressionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidExpressionException) Code() string { +func (s *InvalidExpressionException) Code() string { return "InvalidExpressionException" } // Message returns the exception's message. -func (s InvalidExpressionException) Message() string { +func (s *InvalidExpressionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15898,28 +16023,28 @@ func (s InvalidExpressionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidExpressionException) OrigErr() error { +func (s *InvalidExpressionException) OrigErr() error { return nil } -func (s InvalidExpressionException) Error() string { +func (s *InvalidExpressionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidExpressionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidExpressionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidExpressionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidExpressionException) RequestID() string { + return s.RespMetadata.RequestID } // The specified limit is outside the allowable range. type InvalidLimitException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15936,17 +16061,17 @@ func (s InvalidLimitException) GoString() string { func newErrorInvalidLimitException(v protocol.ResponseMetadata) error { return &InvalidLimitException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLimitException) Code() string { +func (s *InvalidLimitException) Code() string { return "InvalidLimitException" } // Message returns the exception's message. -func (s InvalidLimitException) Message() string { +func (s *InvalidLimitException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15954,29 +16079,29 @@ func (s InvalidLimitException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLimitException) OrigErr() error { +func (s *InvalidLimitException) OrigErr() error { return nil } -func (s InvalidLimitException) Error() string { +func (s *InvalidLimitException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLimitException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLimitException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLimitException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLimitException) RequestID() string { + return s.RespMetadata.RequestID } // The specified next token is invalid. Specify the nextToken string that was // returned in the previous response to get the next page of results. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15993,17 +16118,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16011,29 +16136,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // One or more of the specified parameters are invalid. Verify that your parameters // are valid and try again. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16050,17 +16175,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16068,29 +16193,29 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Config throws an exception if the recording group does not contain a // valid list of resource types. Invalid values might also be incorrectly formatted. type InvalidRecordingGroupException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16107,17 +16232,17 @@ func (s InvalidRecordingGroupException) GoString() string { func newErrorInvalidRecordingGroupException(v protocol.ResponseMetadata) error { return &InvalidRecordingGroupException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRecordingGroupException) Code() string { +func (s *InvalidRecordingGroupException) Code() string { return "InvalidRecordingGroupException" } // Message returns the exception's message. -func (s InvalidRecordingGroupException) Message() string { +func (s *InvalidRecordingGroupException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16125,28 +16250,28 @@ func (s InvalidRecordingGroupException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRecordingGroupException) OrigErr() error { +func (s *InvalidRecordingGroupException) OrigErr() error { return nil } -func (s InvalidRecordingGroupException) Error() string { +func (s *InvalidRecordingGroupException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRecordingGroupException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRecordingGroupException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRecordingGroupException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRecordingGroupException) RequestID() string { + return s.RespMetadata.RequestID } // The specified ResultToken is invalid. type InvalidResultTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16163,17 +16288,17 @@ func (s InvalidResultTokenException) GoString() string { func newErrorInvalidResultTokenException(v protocol.ResponseMetadata) error { return &InvalidResultTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResultTokenException) Code() string { +func (s *InvalidResultTokenException) Code() string { return "InvalidResultTokenException" } // Message returns the exception's message. -func (s InvalidResultTokenException) Message() string { +func (s *InvalidResultTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16181,28 +16306,28 @@ func (s InvalidResultTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResultTokenException) OrigErr() error { +func (s *InvalidResultTokenException) OrigErr() error { return nil } -func (s InvalidResultTokenException) Error() string { +func (s *InvalidResultTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResultTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResultTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResultTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResultTokenException) RequestID() string { + return s.RespMetadata.RequestID } // You have provided a null or empty role ARN. type InvalidRoleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16219,17 +16344,17 @@ func (s InvalidRoleException) GoString() string { func newErrorInvalidRoleException(v protocol.ResponseMetadata) error { return &InvalidRoleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRoleException) Code() string { +func (s *InvalidRoleException) Code() string { return "InvalidRoleException" } // Message returns the exception's message. -func (s InvalidRoleException) Message() string { +func (s *InvalidRoleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16237,28 +16362,28 @@ func (s InvalidRoleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRoleException) OrigErr() error { +func (s *InvalidRoleException) OrigErr() error { return nil } -func (s InvalidRoleException) Error() string { +func (s *InvalidRoleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRoleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRoleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRoleException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRoleException) RequestID() string { + return s.RespMetadata.RequestID } // The specified Amazon S3 key prefix is not valid. type InvalidS3KeyPrefixException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16275,17 +16400,17 @@ func (s InvalidS3KeyPrefixException) GoString() string { func newErrorInvalidS3KeyPrefixException(v protocol.ResponseMetadata) error { return &InvalidS3KeyPrefixException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidS3KeyPrefixException) Code() string { +func (s *InvalidS3KeyPrefixException) Code() string { return "InvalidS3KeyPrefixException" } // Message returns the exception's message. -func (s InvalidS3KeyPrefixException) Message() string { +func (s *InvalidS3KeyPrefixException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16293,28 +16418,28 @@ func (s InvalidS3KeyPrefixException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidS3KeyPrefixException) OrigErr() error { +func (s *InvalidS3KeyPrefixException) OrigErr() error { return nil } -func (s InvalidS3KeyPrefixException) Error() string { +func (s *InvalidS3KeyPrefixException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidS3KeyPrefixException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidS3KeyPrefixException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidS3KeyPrefixException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidS3KeyPrefixException) RequestID() string { + return s.RespMetadata.RequestID } // The specified Amazon SNS topic does not exist. type InvalidSNSTopicARNException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16331,17 +16456,17 @@ func (s InvalidSNSTopicARNException) GoString() string { func newErrorInvalidSNSTopicARNException(v protocol.ResponseMetadata) error { return &InvalidSNSTopicARNException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSNSTopicARNException) Code() string { +func (s *InvalidSNSTopicARNException) Code() string { return "InvalidSNSTopicARNException" } // Message returns the exception's message. -func (s InvalidSNSTopicARNException) Message() string { +func (s *InvalidSNSTopicARNException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16349,29 +16474,29 @@ func (s InvalidSNSTopicARNException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSNSTopicARNException) OrigErr() error { +func (s *InvalidSNSTopicARNException) OrigErr() error { return nil } -func (s InvalidSNSTopicARNException) Error() string { +func (s *InvalidSNSTopicARNException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSNSTopicARNException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSNSTopicARNException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSNSTopicARNException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSNSTopicARNException) RequestID() string { + return s.RespMetadata.RequestID } // The specified time range is not valid. The earlier time is not chronologically // before the later time. type InvalidTimeRangeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16388,17 +16513,17 @@ func (s InvalidTimeRangeException) GoString() string { func newErrorInvalidTimeRangeException(v protocol.ResponseMetadata) error { return &InvalidTimeRangeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTimeRangeException) Code() string { +func (s *InvalidTimeRangeException) Code() string { return "InvalidTimeRangeException" } // Message returns the exception's message. -func (s InvalidTimeRangeException) Message() string { +func (s *InvalidTimeRangeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16406,29 +16531,29 @@ func (s InvalidTimeRangeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTimeRangeException) OrigErr() error { +func (s *InvalidTimeRangeException) OrigErr() error { return nil } -func (s InvalidTimeRangeException) Error() string { +func (s *InvalidTimeRangeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTimeRangeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTimeRangeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTimeRangeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTimeRangeException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot delete the delivery channel you specified because the configuration // recorder is running. type LastDeliveryChannelDeleteFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16445,17 +16570,17 @@ func (s LastDeliveryChannelDeleteFailedException) GoString() string { func newErrorLastDeliveryChannelDeleteFailedException(v protocol.ResponseMetadata) error { return &LastDeliveryChannelDeleteFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LastDeliveryChannelDeleteFailedException) Code() string { +func (s *LastDeliveryChannelDeleteFailedException) Code() string { return "LastDeliveryChannelDeleteFailedException" } // Message returns the exception's message. -func (s LastDeliveryChannelDeleteFailedException) Message() string { +func (s *LastDeliveryChannelDeleteFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16463,22 +16588,22 @@ func (s LastDeliveryChannelDeleteFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LastDeliveryChannelDeleteFailedException) OrigErr() error { +func (s *LastDeliveryChannelDeleteFailedException) OrigErr() error { return nil } -func (s LastDeliveryChannelDeleteFailedException) Error() string { +func (s *LastDeliveryChannelDeleteFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LastDeliveryChannelDeleteFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LastDeliveryChannelDeleteFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LastDeliveryChannelDeleteFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *LastDeliveryChannelDeleteFailedException) RequestID() string { + return s.RespMetadata.RequestID } // For StartConfigRulesEvaluation API, this exception is thrown if an evaluation @@ -16488,8 +16613,8 @@ func (s LastDeliveryChannelDeleteFailedException) RequestID() string { // For PutConfigurationAggregator API, this exception is thrown if the number // of accounts and aggregators exceeds the limit. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16506,17 +16631,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16524,22 +16649,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAggregateDiscoveredResourcesInput struct { @@ -16890,8 +17015,8 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput // You have reached the limit (100,000) of active custom resource types in your // account. Delete unused resources using DeleteResourceConfig. type MaxActiveResourcesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16908,17 +17033,17 @@ func (s MaxActiveResourcesExceededException) GoString() string { func newErrorMaxActiveResourcesExceededException(v protocol.ResponseMetadata) error { return &MaxActiveResourcesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxActiveResourcesExceededException) Code() string { +func (s *MaxActiveResourcesExceededException) Code() string { return "MaxActiveResourcesExceededException" } // Message returns the exception's message. -func (s MaxActiveResourcesExceededException) Message() string { +func (s *MaxActiveResourcesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16926,30 +17051,30 @@ func (s MaxActiveResourcesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxActiveResourcesExceededException) OrigErr() error { +func (s *MaxActiveResourcesExceededException) OrigErr() error { return nil } -func (s MaxActiveResourcesExceededException) Error() string { +func (s *MaxActiveResourcesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxActiveResourcesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxActiveResourcesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxActiveResourcesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxActiveResourcesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Failed to add the AWS Config rule because the account already contains the // maximum number of 150 rules. Consider deleting any deactivated rules before // you add new rules. type MaxNumberOfConfigRulesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16966,17 +17091,17 @@ func (s MaxNumberOfConfigRulesExceededException) GoString() string { func newErrorMaxNumberOfConfigRulesExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfConfigRulesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfConfigRulesExceededException) Code() string { +func (s *MaxNumberOfConfigRulesExceededException) Code() string { return "MaxNumberOfConfigRulesExceededException" } // Message returns the exception's message. -func (s MaxNumberOfConfigRulesExceededException) Message() string { +func (s *MaxNumberOfConfigRulesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16984,28 +17109,28 @@ func (s MaxNumberOfConfigRulesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfConfigRulesExceededException) OrigErr() error { +func (s *MaxNumberOfConfigRulesExceededException) OrigErr() error { return nil } -func (s MaxNumberOfConfigRulesExceededException) Error() string { +func (s *MaxNumberOfConfigRulesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfConfigRulesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfConfigRulesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfConfigRulesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfConfigRulesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the limit of the number of recorders you can create. type MaxNumberOfConfigurationRecordersExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17022,17 +17147,17 @@ func (s MaxNumberOfConfigurationRecordersExceededException) GoString() string { func newErrorMaxNumberOfConfigurationRecordersExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfConfigurationRecordersExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfConfigurationRecordersExceededException) Code() string { +func (s *MaxNumberOfConfigurationRecordersExceededException) Code() string { return "MaxNumberOfConfigurationRecordersExceededException" } // Message returns the exception's message. -func (s MaxNumberOfConfigurationRecordersExceededException) Message() string { +func (s *MaxNumberOfConfigurationRecordersExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17040,29 +17165,29 @@ func (s MaxNumberOfConfigurationRecordersExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfConfigurationRecordersExceededException) OrigErr() error { +func (s *MaxNumberOfConfigurationRecordersExceededException) OrigErr() error { return nil } -func (s MaxNumberOfConfigurationRecordersExceededException) Error() string { +func (s *MaxNumberOfConfigurationRecordersExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfConfigurationRecordersExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfConfigurationRecordersExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfConfigurationRecordersExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfConfigurationRecordersExceededException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the limit (6) of the number of conformance packs in an account // (6 conformance pack with 25 AWS Config rules per pack). type MaxNumberOfConformancePacksExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17079,17 +17204,17 @@ func (s MaxNumberOfConformancePacksExceededException) GoString() string { func newErrorMaxNumberOfConformancePacksExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfConformancePacksExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfConformancePacksExceededException) Code() string { +func (s *MaxNumberOfConformancePacksExceededException) Code() string { return "MaxNumberOfConformancePacksExceededException" } // Message returns the exception's message. -func (s MaxNumberOfConformancePacksExceededException) Message() string { +func (s *MaxNumberOfConformancePacksExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17097,28 +17222,28 @@ func (s MaxNumberOfConformancePacksExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfConformancePacksExceededException) OrigErr() error { +func (s *MaxNumberOfConformancePacksExceededException) OrigErr() error { return nil } -func (s MaxNumberOfConformancePacksExceededException) Error() string { +func (s *MaxNumberOfConformancePacksExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfConformancePacksExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfConformancePacksExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfConformancePacksExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfConformancePacksExceededException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the limit of the number of delivery channels you can create. type MaxNumberOfDeliveryChannelsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17135,17 +17260,17 @@ func (s MaxNumberOfDeliveryChannelsExceededException) GoString() string { func newErrorMaxNumberOfDeliveryChannelsExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfDeliveryChannelsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfDeliveryChannelsExceededException) Code() string { +func (s *MaxNumberOfDeliveryChannelsExceededException) Code() string { return "MaxNumberOfDeliveryChannelsExceededException" } // Message returns the exception's message. -func (s MaxNumberOfDeliveryChannelsExceededException) Message() string { +func (s *MaxNumberOfDeliveryChannelsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17153,29 +17278,29 @@ func (s MaxNumberOfDeliveryChannelsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfDeliveryChannelsExceededException) OrigErr() error { +func (s *MaxNumberOfDeliveryChannelsExceededException) OrigErr() error { return nil } -func (s MaxNumberOfDeliveryChannelsExceededException) Error() string { +func (s *MaxNumberOfDeliveryChannelsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfDeliveryChannelsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfDeliveryChannelsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfDeliveryChannelsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfDeliveryChannelsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the limit of the number of organization config rules you // can create. type MaxNumberOfOrganizationConfigRulesExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17192,17 +17317,17 @@ func (s MaxNumberOfOrganizationConfigRulesExceededException) GoString() string { func newErrorMaxNumberOfOrganizationConfigRulesExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfOrganizationConfigRulesExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfOrganizationConfigRulesExceededException) Code() string { +func (s *MaxNumberOfOrganizationConfigRulesExceededException) Code() string { return "MaxNumberOfOrganizationConfigRulesExceededException" } // Message returns the exception's message. -func (s MaxNumberOfOrganizationConfigRulesExceededException) Message() string { +func (s *MaxNumberOfOrganizationConfigRulesExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17210,30 +17335,30 @@ func (s MaxNumberOfOrganizationConfigRulesExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfOrganizationConfigRulesExceededException) OrigErr() error { +func (s *MaxNumberOfOrganizationConfigRulesExceededException) OrigErr() error { return nil } -func (s MaxNumberOfOrganizationConfigRulesExceededException) Error() string { +func (s *MaxNumberOfOrganizationConfigRulesExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfOrganizationConfigRulesExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfOrganizationConfigRulesExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfOrganizationConfigRulesExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfOrganizationConfigRulesExceededException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the limit (6) of the number of organization conformance // packs in an account (6 conformance pack with 25 AWS Config rules per pack // per account). type MaxNumberOfOrganizationConformancePacksExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17250,17 +17375,17 @@ func (s MaxNumberOfOrganizationConformancePacksExceededException) GoString() str func newErrorMaxNumberOfOrganizationConformancePacksExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfOrganizationConformancePacksExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfOrganizationConformancePacksExceededException) Code() string { +func (s *MaxNumberOfOrganizationConformancePacksExceededException) Code() string { return "MaxNumberOfOrganizationConformancePacksExceededException" } // Message returns the exception's message. -func (s MaxNumberOfOrganizationConformancePacksExceededException) Message() string { +func (s *MaxNumberOfOrganizationConformancePacksExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17268,29 +17393,29 @@ func (s MaxNumberOfOrganizationConformancePacksExceededException) Message() stri } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfOrganizationConformancePacksExceededException) OrigErr() error { +func (s *MaxNumberOfOrganizationConformancePacksExceededException) OrigErr() error { return nil } -func (s MaxNumberOfOrganizationConformancePacksExceededException) Error() string { +func (s *MaxNumberOfOrganizationConformancePacksExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfOrganizationConformancePacksExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfOrganizationConformancePacksExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfOrganizationConformancePacksExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfOrganizationConformancePacksExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Failed to add the retention configuration because a retention configuration // with that name already exists. type MaxNumberOfRetentionConfigurationsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17307,17 +17432,17 @@ func (s MaxNumberOfRetentionConfigurationsExceededException) GoString() string { func newErrorMaxNumberOfRetentionConfigurationsExceededException(v protocol.ResponseMetadata) error { return &MaxNumberOfRetentionConfigurationsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxNumberOfRetentionConfigurationsExceededException) Code() string { +func (s *MaxNumberOfRetentionConfigurationsExceededException) Code() string { return "MaxNumberOfRetentionConfigurationsExceededException" } // Message returns the exception's message. -func (s MaxNumberOfRetentionConfigurationsExceededException) Message() string { +func (s *MaxNumberOfRetentionConfigurationsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17325,22 +17450,22 @@ func (s MaxNumberOfRetentionConfigurationsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxNumberOfRetentionConfigurationsExceededException) OrigErr() error { +func (s *MaxNumberOfRetentionConfigurationsExceededException) OrigErr() error { return nil } -func (s MaxNumberOfRetentionConfigurationsExceededException) Error() string { +func (s *MaxNumberOfRetentionConfigurationsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxNumberOfRetentionConfigurationsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxNumberOfRetentionConfigurationsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxNumberOfRetentionConfigurationsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxNumberOfRetentionConfigurationsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Organization config rule creation or deletion status in each member account. @@ -17450,8 +17575,8 @@ func (s *MemberAccountStatus) SetMemberAccountRuleStatus(v string) *MemberAccoun // There are no configuration recorders available to provide the role needed // to describe your resources. Create a configuration recorder. type NoAvailableConfigurationRecorderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17468,17 +17593,17 @@ func (s NoAvailableConfigurationRecorderException) GoString() string { func newErrorNoAvailableConfigurationRecorderException(v protocol.ResponseMetadata) error { return &NoAvailableConfigurationRecorderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoAvailableConfigurationRecorderException) Code() string { +func (s *NoAvailableConfigurationRecorderException) Code() string { return "NoAvailableConfigurationRecorderException" } // Message returns the exception's message. -func (s NoAvailableConfigurationRecorderException) Message() string { +func (s *NoAvailableConfigurationRecorderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17486,28 +17611,28 @@ func (s NoAvailableConfigurationRecorderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoAvailableConfigurationRecorderException) OrigErr() error { +func (s *NoAvailableConfigurationRecorderException) OrigErr() error { return nil } -func (s NoAvailableConfigurationRecorderException) Error() string { +func (s *NoAvailableConfigurationRecorderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoAvailableConfigurationRecorderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoAvailableConfigurationRecorderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoAvailableConfigurationRecorderException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoAvailableConfigurationRecorderException) RequestID() string { + return s.RespMetadata.RequestID } // There is no delivery channel available to record configurations. type NoAvailableDeliveryChannelException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17524,17 +17649,17 @@ func (s NoAvailableDeliveryChannelException) GoString() string { func newErrorNoAvailableDeliveryChannelException(v protocol.ResponseMetadata) error { return &NoAvailableDeliveryChannelException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoAvailableDeliveryChannelException) Code() string { +func (s *NoAvailableDeliveryChannelException) Code() string { return "NoAvailableDeliveryChannelException" } // Message returns the exception's message. -func (s NoAvailableDeliveryChannelException) Message() string { +func (s *NoAvailableDeliveryChannelException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17542,28 +17667,28 @@ func (s NoAvailableDeliveryChannelException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoAvailableDeliveryChannelException) OrigErr() error { +func (s *NoAvailableDeliveryChannelException) OrigErr() error { return nil } -func (s NoAvailableDeliveryChannelException) Error() string { +func (s *NoAvailableDeliveryChannelException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoAvailableDeliveryChannelException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoAvailableDeliveryChannelException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoAvailableDeliveryChannelException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoAvailableDeliveryChannelException) RequestID() string { + return s.RespMetadata.RequestID } // Organization is no longer available. type NoAvailableOrganizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17580,17 +17705,17 @@ func (s NoAvailableOrganizationException) GoString() string { func newErrorNoAvailableOrganizationException(v protocol.ResponseMetadata) error { return &NoAvailableOrganizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoAvailableOrganizationException) Code() string { +func (s *NoAvailableOrganizationException) Code() string { return "NoAvailableOrganizationException" } // Message returns the exception's message. -func (s NoAvailableOrganizationException) Message() string { +func (s *NoAvailableOrganizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17598,28 +17723,28 @@ func (s NoAvailableOrganizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoAvailableOrganizationException) OrigErr() error { +func (s *NoAvailableOrganizationException) OrigErr() error { return nil } -func (s NoAvailableOrganizationException) Error() string { +func (s *NoAvailableOrganizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoAvailableOrganizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoAvailableOrganizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoAvailableOrganizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoAvailableOrganizationException) RequestID() string { + return s.RespMetadata.RequestID } // There is no configuration recorder running. type NoRunningConfigurationRecorderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17636,17 +17761,17 @@ func (s NoRunningConfigurationRecorderException) GoString() string { func newErrorNoRunningConfigurationRecorderException(v protocol.ResponseMetadata) error { return &NoRunningConfigurationRecorderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoRunningConfigurationRecorderException) Code() string { +func (s *NoRunningConfigurationRecorderException) Code() string { return "NoRunningConfigurationRecorderException" } // Message returns the exception's message. -func (s NoRunningConfigurationRecorderException) Message() string { +func (s *NoRunningConfigurationRecorderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17654,28 +17779,28 @@ func (s NoRunningConfigurationRecorderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoRunningConfigurationRecorderException) OrigErr() error { +func (s *NoRunningConfigurationRecorderException) OrigErr() error { return nil } -func (s NoRunningConfigurationRecorderException) Error() string { +func (s *NoRunningConfigurationRecorderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoRunningConfigurationRecorderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoRunningConfigurationRecorderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoRunningConfigurationRecorderException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoRunningConfigurationRecorderException) RequestID() string { + return s.RespMetadata.RequestID } // The specified Amazon S3 bucket does not exist. type NoSuchBucketException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17692,17 +17817,17 @@ func (s NoSuchBucketException) GoString() string { func newErrorNoSuchBucketException(v protocol.ResponseMetadata) error { return &NoSuchBucketException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchBucketException) Code() string { +func (s *NoSuchBucketException) Code() string { return "NoSuchBucketException" } // Message returns the exception's message. -func (s NoSuchBucketException) Message() string { +func (s *NoSuchBucketException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17710,29 +17835,29 @@ func (s NoSuchBucketException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchBucketException) OrigErr() error { +func (s *NoSuchBucketException) OrigErr() error { return nil } -func (s NoSuchBucketException) Error() string { +func (s *NoSuchBucketException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchBucketException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchBucketException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchBucketException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchBucketException) RequestID() string { + return s.RespMetadata.RequestID } // One or more AWS Config rules in the request are invalid. Verify that the // rule names are correct and try again. type NoSuchConfigRuleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17749,17 +17874,17 @@ func (s NoSuchConfigRuleException) GoString() string { func newErrorNoSuchConfigRuleException(v protocol.ResponseMetadata) error { return &NoSuchConfigRuleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchConfigRuleException) Code() string { +func (s *NoSuchConfigRuleException) Code() string { return "NoSuchConfigRuleException" } // Message returns the exception's message. -func (s NoSuchConfigRuleException) Message() string { +func (s *NoSuchConfigRuleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17767,28 +17892,28 @@ func (s NoSuchConfigRuleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchConfigRuleException) OrigErr() error { +func (s *NoSuchConfigRuleException) OrigErr() error { return nil } -func (s NoSuchConfigRuleException) Error() string { +func (s *NoSuchConfigRuleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchConfigRuleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchConfigRuleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchConfigRuleException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchConfigRuleException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Config rule that you passed in the filter does not exist. type NoSuchConfigRuleInConformancePackException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17805,17 +17930,17 @@ func (s NoSuchConfigRuleInConformancePackException) GoString() string { func newErrorNoSuchConfigRuleInConformancePackException(v protocol.ResponseMetadata) error { return &NoSuchConfigRuleInConformancePackException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchConfigRuleInConformancePackException) Code() string { +func (s *NoSuchConfigRuleInConformancePackException) Code() string { return "NoSuchConfigRuleInConformancePackException" } // Message returns the exception's message. -func (s NoSuchConfigRuleInConformancePackException) Message() string { +func (s *NoSuchConfigRuleInConformancePackException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17823,28 +17948,28 @@ func (s NoSuchConfigRuleInConformancePackException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchConfigRuleInConformancePackException) OrigErr() error { +func (s *NoSuchConfigRuleInConformancePackException) OrigErr() error { return nil } -func (s NoSuchConfigRuleInConformancePackException) Error() string { +func (s *NoSuchConfigRuleInConformancePackException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchConfigRuleInConformancePackException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchConfigRuleInConformancePackException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchConfigRuleInConformancePackException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchConfigRuleInConformancePackException) RequestID() string { + return s.RespMetadata.RequestID } // You have specified a configuration aggregator that does not exist. type NoSuchConfigurationAggregatorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17861,17 +17986,17 @@ func (s NoSuchConfigurationAggregatorException) GoString() string { func newErrorNoSuchConfigurationAggregatorException(v protocol.ResponseMetadata) error { return &NoSuchConfigurationAggregatorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchConfigurationAggregatorException) Code() string { +func (s *NoSuchConfigurationAggregatorException) Code() string { return "NoSuchConfigurationAggregatorException" } // Message returns the exception's message. -func (s NoSuchConfigurationAggregatorException) Message() string { +func (s *NoSuchConfigurationAggregatorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17879,28 +18004,28 @@ func (s NoSuchConfigurationAggregatorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchConfigurationAggregatorException) OrigErr() error { +func (s *NoSuchConfigurationAggregatorException) OrigErr() error { return nil } -func (s NoSuchConfigurationAggregatorException) Error() string { +func (s *NoSuchConfigurationAggregatorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchConfigurationAggregatorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchConfigurationAggregatorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchConfigurationAggregatorException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchConfigurationAggregatorException) RequestID() string { + return s.RespMetadata.RequestID } // You have specified a configuration recorder that does not exist. type NoSuchConfigurationRecorderException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17917,17 +18042,17 @@ func (s NoSuchConfigurationRecorderException) GoString() string { func newErrorNoSuchConfigurationRecorderException(v protocol.ResponseMetadata) error { return &NoSuchConfigurationRecorderException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchConfigurationRecorderException) Code() string { +func (s *NoSuchConfigurationRecorderException) Code() string { return "NoSuchConfigurationRecorderException" } // Message returns the exception's message. -func (s NoSuchConfigurationRecorderException) Message() string { +func (s *NoSuchConfigurationRecorderException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17935,28 +18060,28 @@ func (s NoSuchConfigurationRecorderException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchConfigurationRecorderException) OrigErr() error { +func (s *NoSuchConfigurationRecorderException) OrigErr() error { return nil } -func (s NoSuchConfigurationRecorderException) Error() string { +func (s *NoSuchConfigurationRecorderException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchConfigurationRecorderException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchConfigurationRecorderException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchConfigurationRecorderException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchConfigurationRecorderException) RequestID() string { + return s.RespMetadata.RequestID } // You specified one or more conformance packs that do not exist. type NoSuchConformancePackException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17973,17 +18098,17 @@ func (s NoSuchConformancePackException) GoString() string { func newErrorNoSuchConformancePackException(v protocol.ResponseMetadata) error { return &NoSuchConformancePackException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchConformancePackException) Code() string { +func (s *NoSuchConformancePackException) Code() string { return "NoSuchConformancePackException" } // Message returns the exception's message. -func (s NoSuchConformancePackException) Message() string { +func (s *NoSuchConformancePackException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17991,28 +18116,28 @@ func (s NoSuchConformancePackException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchConformancePackException) OrigErr() error { +func (s *NoSuchConformancePackException) OrigErr() error { return nil } -func (s NoSuchConformancePackException) Error() string { +func (s *NoSuchConformancePackException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchConformancePackException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchConformancePackException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchConformancePackException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchConformancePackException) RequestID() string { + return s.RespMetadata.RequestID } // You have specified a delivery channel that does not exist. type NoSuchDeliveryChannelException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18029,17 +18154,17 @@ func (s NoSuchDeliveryChannelException) GoString() string { func newErrorNoSuchDeliveryChannelException(v protocol.ResponseMetadata) error { return &NoSuchDeliveryChannelException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchDeliveryChannelException) Code() string { +func (s *NoSuchDeliveryChannelException) Code() string { return "NoSuchDeliveryChannelException" } // Message returns the exception's message. -func (s NoSuchDeliveryChannelException) Message() string { +func (s *NoSuchDeliveryChannelException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18047,28 +18172,28 @@ func (s NoSuchDeliveryChannelException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchDeliveryChannelException) OrigErr() error { +func (s *NoSuchDeliveryChannelException) OrigErr() error { return nil } -func (s NoSuchDeliveryChannelException) Error() string { +func (s *NoSuchDeliveryChannelException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchDeliveryChannelException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchDeliveryChannelException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchDeliveryChannelException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchDeliveryChannelException) RequestID() string { + return s.RespMetadata.RequestID } // You specified one or more organization config rules that do not exist. type NoSuchOrganizationConfigRuleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18085,17 +18210,17 @@ func (s NoSuchOrganizationConfigRuleException) GoString() string { func newErrorNoSuchOrganizationConfigRuleException(v protocol.ResponseMetadata) error { return &NoSuchOrganizationConfigRuleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchOrganizationConfigRuleException) Code() string { +func (s *NoSuchOrganizationConfigRuleException) Code() string { return "NoSuchOrganizationConfigRuleException" } // Message returns the exception's message. -func (s NoSuchOrganizationConfigRuleException) Message() string { +func (s *NoSuchOrganizationConfigRuleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18103,22 +18228,22 @@ func (s NoSuchOrganizationConfigRuleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchOrganizationConfigRuleException) OrigErr() error { +func (s *NoSuchOrganizationConfigRuleException) OrigErr() error { return nil } -func (s NoSuchOrganizationConfigRuleException) Error() string { +func (s *NoSuchOrganizationConfigRuleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchOrganizationConfigRuleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchOrganizationConfigRuleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchOrganizationConfigRuleException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchOrganizationConfigRuleException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Config organization conformance pack that you passed in the filter does @@ -18127,8 +18252,8 @@ func (s NoSuchOrganizationConfigRuleException) RequestID() string { // For DeleteOrganizationConformancePack, you tried to delete an organization // conformance pack that does not exist. type NoSuchOrganizationConformancePackException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18145,17 +18270,17 @@ func (s NoSuchOrganizationConformancePackException) GoString() string { func newErrorNoSuchOrganizationConformancePackException(v protocol.ResponseMetadata) error { return &NoSuchOrganizationConformancePackException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchOrganizationConformancePackException) Code() string { +func (s *NoSuchOrganizationConformancePackException) Code() string { return "NoSuchOrganizationConformancePackException" } // Message returns the exception's message. -func (s NoSuchOrganizationConformancePackException) Message() string { +func (s *NoSuchOrganizationConformancePackException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18163,28 +18288,28 @@ func (s NoSuchOrganizationConformancePackException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchOrganizationConformancePackException) OrigErr() error { +func (s *NoSuchOrganizationConformancePackException) OrigErr() error { return nil } -func (s NoSuchOrganizationConformancePackException) Error() string { +func (s *NoSuchOrganizationConformancePackException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchOrganizationConformancePackException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchOrganizationConformancePackException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchOrganizationConformancePackException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchOrganizationConformancePackException) RequestID() string { + return s.RespMetadata.RequestID } // You specified an AWS Config rule without a remediation configuration. type NoSuchRemediationConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18201,17 +18326,17 @@ func (s NoSuchRemediationConfigurationException) GoString() string { func newErrorNoSuchRemediationConfigurationException(v protocol.ResponseMetadata) error { return &NoSuchRemediationConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchRemediationConfigurationException) Code() string { +func (s *NoSuchRemediationConfigurationException) Code() string { return "NoSuchRemediationConfigurationException" } // Message returns the exception's message. -func (s NoSuchRemediationConfigurationException) Message() string { +func (s *NoSuchRemediationConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18219,28 +18344,28 @@ func (s NoSuchRemediationConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchRemediationConfigurationException) OrigErr() error { +func (s *NoSuchRemediationConfigurationException) OrigErr() error { return nil } -func (s NoSuchRemediationConfigurationException) Error() string { +func (s *NoSuchRemediationConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchRemediationConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchRemediationConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchRemediationConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchRemediationConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // You tried to delete a remediation exception that does not exist. type NoSuchRemediationExceptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18257,17 +18382,17 @@ func (s NoSuchRemediationExceptionException) GoString() string { func newErrorNoSuchRemediationExceptionException(v protocol.ResponseMetadata) error { return &NoSuchRemediationExceptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchRemediationExceptionException) Code() string { +func (s *NoSuchRemediationExceptionException) Code() string { return "NoSuchRemediationExceptionException" } // Message returns the exception's message. -func (s NoSuchRemediationExceptionException) Message() string { +func (s *NoSuchRemediationExceptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18275,28 +18400,28 @@ func (s NoSuchRemediationExceptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchRemediationExceptionException) OrigErr() error { +func (s *NoSuchRemediationExceptionException) OrigErr() error { return nil } -func (s NoSuchRemediationExceptionException) Error() string { +func (s *NoSuchRemediationExceptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchRemediationExceptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchRemediationExceptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchRemediationExceptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchRemediationExceptionException) RequestID() string { + return s.RespMetadata.RequestID } // You have specified a retention configuration that does not exist. type NoSuchRetentionConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18313,17 +18438,17 @@ func (s NoSuchRetentionConfigurationException) GoString() string { func newErrorNoSuchRetentionConfigurationException(v protocol.ResponseMetadata) error { return &NoSuchRetentionConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchRetentionConfigurationException) Code() string { +func (s *NoSuchRetentionConfigurationException) Code() string { return "NoSuchRetentionConfigurationException" } // Message returns the exception's message. -func (s NoSuchRetentionConfigurationException) Message() string { +func (s *NoSuchRetentionConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18331,22 +18456,22 @@ func (s NoSuchRetentionConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchRetentionConfigurationException) OrigErr() error { +func (s *NoSuchRetentionConfigurationException) OrigErr() error { return nil } -func (s NoSuchRetentionConfigurationException) Error() string { +func (s *NoSuchRetentionConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchRetentionConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchRetentionConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchRetentionConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchRetentionConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // For PutConfigAggregator API, no permission to call EnableAWSServiceAccess @@ -18356,8 +18481,8 @@ func (s NoSuchRetentionConfigurationException) RequestID() string { // Config throws an exception if APIs are called from member accounts. All APIs // must be called from organization master account. type OrganizationAccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18374,17 +18499,17 @@ func (s OrganizationAccessDeniedException) GoString() string { func newErrorOrganizationAccessDeniedException(v protocol.ResponseMetadata) error { return &OrganizationAccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationAccessDeniedException) Code() string { +func (s *OrganizationAccessDeniedException) Code() string { return "OrganizationAccessDeniedException" } // Message returns the exception's message. -func (s OrganizationAccessDeniedException) Message() string { +func (s *OrganizationAccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18392,22 +18517,22 @@ func (s OrganizationAccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationAccessDeniedException) OrigErr() error { +func (s *OrganizationAccessDeniedException) OrigErr() error { return nil } -func (s OrganizationAccessDeniedException) Error() string { +func (s *OrganizationAccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationAccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationAccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // This object contains regions to set up the aggregator and an IAM role to @@ -18475,8 +18600,8 @@ func (s *OrganizationAggregationSource) SetRoleArn(v string) *OrganizationAggreg // AWS Config resource cannot be created because your organization does not // have all features enabled. type OrganizationAllFeaturesNotEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18493,17 +18618,17 @@ func (s OrganizationAllFeaturesNotEnabledException) GoString() string { func newErrorOrganizationAllFeaturesNotEnabledException(v protocol.ResponseMetadata) error { return &OrganizationAllFeaturesNotEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationAllFeaturesNotEnabledException) Code() string { +func (s *OrganizationAllFeaturesNotEnabledException) Code() string { return "OrganizationAllFeaturesNotEnabledException" } // Message returns the exception's message. -func (s OrganizationAllFeaturesNotEnabledException) Message() string { +func (s *OrganizationAllFeaturesNotEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18511,22 +18636,22 @@ func (s OrganizationAllFeaturesNotEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationAllFeaturesNotEnabledException) OrigErr() error { +func (s *OrganizationAllFeaturesNotEnabledException) OrigErr() error { return nil } -func (s OrganizationAllFeaturesNotEnabledException) Error() string { +func (s *OrganizationAllFeaturesNotEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationAllFeaturesNotEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationAllFeaturesNotEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationAllFeaturesNotEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationAllFeaturesNotEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // An organization config rule that has information about config rules that @@ -18712,12 +18837,10 @@ type OrganizationConformancePack struct { // Location of an Amazon S3 bucket where AWS Config can deliver evaluation results // and conformance pack template that is used to create a pack. - // - // DeliveryS3Bucket is a required field - DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + DeliveryS3Bucket *string `type:"string"` // Any folder structure you want to add to an Amazon S3 bucket. - DeliveryS3KeyPrefix *string `min:"1" type:"string"` + DeliveryS3KeyPrefix *string `type:"string"` // A comma-separated list of accounts excluded from organization conformance // pack. @@ -19009,8 +19132,8 @@ func (s *OrganizationConformancePackStatus) SetStatus(v string) *OrganizationCon // You have specified a template that is not valid or supported. type OrganizationConformancePackTemplateValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19027,17 +19150,17 @@ func (s OrganizationConformancePackTemplateValidationException) GoString() strin func newErrorOrganizationConformancePackTemplateValidationException(v protocol.ResponseMetadata) error { return &OrganizationConformancePackTemplateValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationConformancePackTemplateValidationException) Code() string { +func (s *OrganizationConformancePackTemplateValidationException) Code() string { return "OrganizationConformancePackTemplateValidationException" } // Message returns the exception's message. -func (s OrganizationConformancePackTemplateValidationException) Message() string { +func (s *OrganizationConformancePackTemplateValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19045,22 +19168,22 @@ func (s OrganizationConformancePackTemplateValidationException) Message() string } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationConformancePackTemplateValidationException) OrigErr() error { +func (s *OrganizationConformancePackTemplateValidationException) OrigErr() error { return nil } -func (s OrganizationConformancePackTemplateValidationException) Error() string { +func (s *OrganizationConformancePackTemplateValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationConformancePackTemplateValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationConformancePackTemplateValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationConformancePackTemplateValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationConformancePackTemplateValidationException) RequestID() string { + return s.RespMetadata.RequestID } // An object that specifies organization custom rule metadata such as resource @@ -19420,8 +19543,8 @@ func (s *OrganizationResourceDetailedStatusFilters) SetStatus(v string) *Organiz // The configuration item size is outside the allowable range. type OversizedConfigurationItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19438,17 +19561,17 @@ func (s OversizedConfigurationItemException) GoString() string { func newErrorOversizedConfigurationItemException(v protocol.ResponseMetadata) error { return &OversizedConfigurationItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OversizedConfigurationItemException) Code() string { +func (s *OversizedConfigurationItemException) Code() string { return "OversizedConfigurationItemException" } // Message returns the exception's message. -func (s OversizedConfigurationItemException) Message() string { +func (s *OversizedConfigurationItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19456,22 +19579,22 @@ func (s OversizedConfigurationItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OversizedConfigurationItemException) OrigErr() error { +func (s *OversizedConfigurationItemException) OrigErr() error { return nil } -func (s OversizedConfigurationItemException) Error() string { +func (s *OversizedConfigurationItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OversizedConfigurationItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OversizedConfigurationItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OversizedConfigurationItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *OversizedConfigurationItemException) RequestID() string { + return s.RespMetadata.RequestID } // An object that represents the account ID and region of an aggregator account @@ -19868,12 +19991,10 @@ type PutConformancePackInput struct { ConformancePackName *string `min:"1" type:"string" required:"true"` // AWS Config stores intermediate files while processing conformance pack template. - // - // DeliveryS3Bucket is a required field - DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + DeliveryS3Bucket *string `type:"string"` // The prefix for the Amazon S3 bucket. - DeliveryS3KeyPrefix *string `min:"1" type:"string"` + DeliveryS3KeyPrefix *string `type:"string"` // A string containing full conformance pack template body. Structure containing // the template body with a minimum length of 1 byte and a maximum length of @@ -19910,15 +20031,6 @@ func (s *PutConformancePackInput) Validate() error { if s.ConformancePackName != nil && len(*s.ConformancePackName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ConformancePackName", 1)) } - if s.DeliveryS3Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("DeliveryS3Bucket")) - } - if s.DeliveryS3Bucket != nil && len(*s.DeliveryS3Bucket) < 3 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryS3Bucket", 3)) - } - if s.DeliveryS3KeyPrefix != nil && len(*s.DeliveryS3KeyPrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryS3KeyPrefix", 1)) - } if s.TemplateBody != nil && len(*s.TemplateBody) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateBody", 1)) } @@ -20272,12 +20384,10 @@ type PutOrganizationConformancePackInput struct { // The delivery bucket name should start with awsconfigconforms. For example: // "Resource": "arn:aws:s3:::your_bucket_name/*". For more information, see // Permissions for cross account bucket access (https://docs.aws.amazon.com/config/latest/developerguide/conformance-pack-organization-apis.html). - // - // DeliveryS3Bucket is a required field - DeliveryS3Bucket *string `min:"3" type:"string" required:"true"` + DeliveryS3Bucket *string `type:"string"` // The prefix for the Amazon S3 bucket. - DeliveryS3KeyPrefix *string `min:"1" type:"string"` + DeliveryS3KeyPrefix *string `type:"string"` // A list of AWS accounts to be excluded from an organization conformance pack // while deploying a conformance pack. @@ -20313,15 +20423,6 @@ func (s PutOrganizationConformancePackInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PutOrganizationConformancePackInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PutOrganizationConformancePackInput"} - if s.DeliveryS3Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("DeliveryS3Bucket")) - } - if s.DeliveryS3Bucket != nil && len(*s.DeliveryS3Bucket) < 3 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryS3Bucket", 3)) - } - if s.DeliveryS3KeyPrefix != nil && len(*s.DeliveryS3KeyPrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DeliveryS3KeyPrefix", 1)) - } if s.OrganizationConformancePackName == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationConformancePackName")) } @@ -21000,8 +21101,8 @@ type RemediationConfiguration struct { // select a number, the default is 5. // // For example, if you specify MaximumAutomaticAttempts as 5 with RetryAttemptsSeconds - // as 50 seconds, AWS Config throws an exception after the 5th failed attempt - // within 50 seconds. + // as 50 seconds, AWS Config will put a RemediationException on your behalf + // for the failing resource after the 5th failed attempt within 50 seconds. MaximumAutomaticAttempts *int64 `min:"1" type:"integer"` // An object of the RemediationParameterValue. @@ -21029,6 +21130,9 @@ type RemediationConfiguration struct { TargetType *string `type:"string" required:"true" enum:"RemediationTargetType"` // Version of the target. For example, version of the SSM document. + // + // If you make backward incompatible changes to the SSM document, you must call + // PutRemediationConfiguration API again to ensure the remediations can run. TargetVersion *string `type:"string"` } @@ -21409,8 +21513,8 @@ func (s *RemediationExecutionStep) SetStopTime(v time.Time) *RemediationExecutio // Remediation action is in progress. You can either cancel execution in AWS // Systems Manager or wait and try again later. type RemediationInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21427,17 +21531,17 @@ func (s RemediationInProgressException) GoString() string { func newErrorRemediationInProgressException(v protocol.ResponseMetadata) error { return &RemediationInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RemediationInProgressException) Code() string { +func (s *RemediationInProgressException) Code() string { return "RemediationInProgressException" } // Message returns the exception's message. -func (s RemediationInProgressException) Message() string { +func (s *RemediationInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21445,22 +21549,22 @@ func (s RemediationInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RemediationInProgressException) OrigErr() error { +func (s *RemediationInProgressException) OrigErr() error { return nil } -func (s RemediationInProgressException) Error() string { +func (s *RemediationInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RemediationInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RemediationInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RemediationInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *RemediationInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // The value is either a dynamic (resource) value or a static value. You must @@ -21751,8 +21855,8 @@ func (s *ResourceIdentifier) SetResourceType(v string) *ResourceIdentifier { // * For DeleteConformancePack, a conformance pack creation, update, and // deletion is in progress. Try your request again later. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21769,17 +21873,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21787,22 +21891,22 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The details that identify a resource within AWS Config, including the resource @@ -21864,8 +21968,8 @@ func (s *ResourceKey) SetResourceType(v string) *ResourceKey { // You have specified a resource that is either unknown or has not been discovered. type ResourceNotDiscoveredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21882,17 +21986,17 @@ func (s ResourceNotDiscoveredException) GoString() string { func newErrorResourceNotDiscoveredException(v protocol.ResponseMetadata) error { return &ResourceNotDiscoveredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotDiscoveredException) Code() string { +func (s *ResourceNotDiscoveredException) Code() string { return "ResourceNotDiscoveredException" } // Message returns the exception's message. -func (s ResourceNotDiscoveredException) Message() string { +func (s *ResourceNotDiscoveredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21900,28 +22004,28 @@ func (s ResourceNotDiscoveredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotDiscoveredException) OrigErr() error { +func (s *ResourceNotDiscoveredException) OrigErr() error { return nil } -func (s ResourceNotDiscoveredException) Error() string { +func (s *ResourceNotDiscoveredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotDiscoveredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotDiscoveredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotDiscoveredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotDiscoveredException) RequestID() string { + return s.RespMetadata.RequestID } // You have specified a resource that does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -21938,17 +22042,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21956,22 +22060,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The dynamic value of the resource. @@ -22152,6 +22256,8 @@ type SelectAggregateResourceConfigInput struct { // The maximum number of query results returned on each page. Limit *int64 `type:"integer"` + // The maximum number of query results returned on each page. AWS Config also + // allows the Limit request parameter. MaxResults *int64 `type:"integer"` // The nextToken string returned in a previous request that you use to request @@ -23074,8 +23180,8 @@ func (s TagResourceOutput) GoString() string { // You have reached the limit of the number of tags you can use. You have more // than 50 tags. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23092,17 +23198,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23110,22 +23216,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -23204,8 +23310,8 @@ func (s UntagResourceOutput) GoString() string { // The requested action is not valid. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23222,17 +23328,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23240,22 +23346,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -23269,6 +23375,15 @@ const ( AggregatedSourceStatusTypeOutdated = "OUTDATED" ) +// AggregatedSourceStatusType_Values returns all elements of the AggregatedSourceStatusType enum +func AggregatedSourceStatusType_Values() []string { + return []string{ + AggregatedSourceStatusTypeFailed, + AggregatedSourceStatusTypeSucceeded, + AggregatedSourceStatusTypeOutdated, + } +} + const ( // AggregatedSourceTypeAccount is a AggregatedSourceType enum value AggregatedSourceTypeAccount = "ACCOUNT" @@ -23277,6 +23392,14 @@ const ( AggregatedSourceTypeOrganization = "ORGANIZATION" ) +// AggregatedSourceType_Values returns all elements of the AggregatedSourceType enum +func AggregatedSourceType_Values() []string { + return []string{ + AggregatedSourceTypeAccount, + AggregatedSourceTypeOrganization, + } +} + const ( // ChronologicalOrderReverse is a ChronologicalOrder enum value ChronologicalOrderReverse = "Reverse" @@ -23285,6 +23408,14 @@ const ( ChronologicalOrderForward = "Forward" ) +// ChronologicalOrder_Values returns all elements of the ChronologicalOrder enum +func ChronologicalOrder_Values() []string { + return []string{ + ChronologicalOrderReverse, + ChronologicalOrderForward, + } +} + const ( // ComplianceTypeCompliant is a ComplianceType enum value ComplianceTypeCompliant = "COMPLIANT" @@ -23299,6 +23430,16 @@ const ( ComplianceTypeInsufficientData = "INSUFFICIENT_DATA" ) +// ComplianceType_Values returns all elements of the ComplianceType enum +func ComplianceType_Values() []string { + return []string{ + ComplianceTypeCompliant, + ComplianceTypeNonCompliant, + ComplianceTypeNotApplicable, + ComplianceTypeInsufficientData, + } +} + const ( // ConfigRuleComplianceSummaryGroupKeyAccountId is a ConfigRuleComplianceSummaryGroupKey enum value ConfigRuleComplianceSummaryGroupKeyAccountId = "ACCOUNT_ID" @@ -23307,6 +23448,14 @@ const ( ConfigRuleComplianceSummaryGroupKeyAwsRegion = "AWS_REGION" ) +// ConfigRuleComplianceSummaryGroupKey_Values returns all elements of the ConfigRuleComplianceSummaryGroupKey enum +func ConfigRuleComplianceSummaryGroupKey_Values() []string { + return []string{ + ConfigRuleComplianceSummaryGroupKeyAccountId, + ConfigRuleComplianceSummaryGroupKeyAwsRegion, + } +} + const ( // ConfigRuleStateActive is a ConfigRuleState enum value ConfigRuleStateActive = "ACTIVE" @@ -23321,6 +23470,16 @@ const ( ConfigRuleStateEvaluating = "EVALUATING" ) +// ConfigRuleState_Values returns all elements of the ConfigRuleState enum +func ConfigRuleState_Values() []string { + return []string{ + ConfigRuleStateActive, + ConfigRuleStateDeleting, + ConfigRuleStateDeletingResults, + ConfigRuleStateEvaluating, + } +} + const ( // ConfigurationItemStatusOk is a ConfigurationItemStatus enum value ConfigurationItemStatusOk = "OK" @@ -23338,6 +23497,17 @@ const ( ConfigurationItemStatusResourceDeletedNotRecorded = "ResourceDeletedNotRecorded" ) +// ConfigurationItemStatus_Values returns all elements of the ConfigurationItemStatus enum +func ConfigurationItemStatus_Values() []string { + return []string{ + ConfigurationItemStatusOk, + ConfigurationItemStatusResourceDiscovered, + ConfigurationItemStatusResourceNotRecorded, + ConfigurationItemStatusResourceDeleted, + ConfigurationItemStatusResourceDeletedNotRecorded, + } +} + const ( // ConformancePackComplianceTypeCompliant is a ConformancePackComplianceType enum value ConformancePackComplianceTypeCompliant = "COMPLIANT" @@ -23346,6 +23516,14 @@ const ( ConformancePackComplianceTypeNonCompliant = "NON_COMPLIANT" ) +// ConformancePackComplianceType_Values returns all elements of the ConformancePackComplianceType enum +func ConformancePackComplianceType_Values() []string { + return []string{ + ConformancePackComplianceTypeCompliant, + ConformancePackComplianceTypeNonCompliant, + } +} + const ( // ConformancePackStateCreateInProgress is a ConformancePackState enum value ConformancePackStateCreateInProgress = "CREATE_IN_PROGRESS" @@ -23363,6 +23541,17 @@ const ( ConformancePackStateDeleteFailed = "DELETE_FAILED" ) +// ConformancePackState_Values returns all elements of the ConformancePackState enum +func ConformancePackState_Values() []string { + return []string{ + ConformancePackStateCreateInProgress, + ConformancePackStateCreateComplete, + ConformancePackStateCreateFailed, + ConformancePackStateDeleteInProgress, + ConformancePackStateDeleteFailed, + } +} + const ( // DeliveryStatusSuccess is a DeliveryStatus enum value DeliveryStatusSuccess = "Success" @@ -23374,11 +23563,27 @@ const ( DeliveryStatusNotApplicable = "Not_Applicable" ) +// DeliveryStatus_Values returns all elements of the DeliveryStatus enum +func DeliveryStatus_Values() []string { + return []string{ + DeliveryStatusSuccess, + DeliveryStatusFailure, + DeliveryStatusNotApplicable, + } +} + const ( // EventSourceAwsConfig is a EventSource enum value EventSourceAwsConfig = "aws.config" ) +// EventSource_Values returns all elements of the EventSource enum +func EventSource_Values() []string { + return []string{ + EventSourceAwsConfig, + } +} + const ( // MaximumExecutionFrequencyOneHour is a MaximumExecutionFrequency enum value MaximumExecutionFrequencyOneHour = "One_Hour" @@ -23396,6 +23601,17 @@ const ( MaximumExecutionFrequencyTwentyFourHours = "TwentyFour_Hours" ) +// MaximumExecutionFrequency_Values returns all elements of the MaximumExecutionFrequency enum +func MaximumExecutionFrequency_Values() []string { + return []string{ + MaximumExecutionFrequencyOneHour, + MaximumExecutionFrequencyThreeHours, + MaximumExecutionFrequencySixHours, + MaximumExecutionFrequencyTwelveHours, + MaximumExecutionFrequencyTwentyFourHours, + } +} + const ( // MemberAccountRuleStatusCreateSuccessful is a MemberAccountRuleStatus enum value MemberAccountRuleStatusCreateSuccessful = "CREATE_SUCCESSFUL" @@ -23425,6 +23641,21 @@ const ( MemberAccountRuleStatusUpdateFailed = "UPDATE_FAILED" ) +// MemberAccountRuleStatus_Values returns all elements of the MemberAccountRuleStatus enum +func MemberAccountRuleStatus_Values() []string { + return []string{ + MemberAccountRuleStatusCreateSuccessful, + MemberAccountRuleStatusCreateInProgress, + MemberAccountRuleStatusCreateFailed, + MemberAccountRuleStatusDeleteSuccessful, + MemberAccountRuleStatusDeleteFailed, + MemberAccountRuleStatusDeleteInProgress, + MemberAccountRuleStatusUpdateSuccessful, + MemberAccountRuleStatusUpdateInProgress, + MemberAccountRuleStatusUpdateFailed, + } +} + const ( // MessageTypeConfigurationItemChangeNotification is a MessageType enum value MessageTypeConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" @@ -23439,6 +23670,16 @@ const ( MessageTypeOversizedConfigurationItemChangeNotification = "OversizedConfigurationItemChangeNotification" ) +// MessageType_Values returns all elements of the MessageType enum +func MessageType_Values() []string { + return []string{ + MessageTypeConfigurationItemChangeNotification, + MessageTypeConfigurationSnapshotDeliveryCompleted, + MessageTypeScheduledNotification, + MessageTypeOversizedConfigurationItemChangeNotification, + } +} + const ( // OrganizationConfigRuleTriggerTypeConfigurationItemChangeNotification is a OrganizationConfigRuleTriggerType enum value OrganizationConfigRuleTriggerTypeConfigurationItemChangeNotification = "ConfigurationItemChangeNotification" @@ -23450,6 +23691,15 @@ const ( OrganizationConfigRuleTriggerTypeScheduledNotification = "ScheduledNotification" ) +// OrganizationConfigRuleTriggerType_Values returns all elements of the OrganizationConfigRuleTriggerType enum +func OrganizationConfigRuleTriggerType_Values() []string { + return []string{ + OrganizationConfigRuleTriggerTypeConfigurationItemChangeNotification, + OrganizationConfigRuleTriggerTypeOversizedConfigurationItemChangeNotification, + OrganizationConfigRuleTriggerTypeScheduledNotification, + } +} + const ( // OrganizationResourceDetailedStatusCreateSuccessful is a OrganizationResourceDetailedStatus enum value OrganizationResourceDetailedStatusCreateSuccessful = "CREATE_SUCCESSFUL" @@ -23479,6 +23729,21 @@ const ( OrganizationResourceDetailedStatusUpdateFailed = "UPDATE_FAILED" ) +// OrganizationResourceDetailedStatus_Values returns all elements of the OrganizationResourceDetailedStatus enum +func OrganizationResourceDetailedStatus_Values() []string { + return []string{ + OrganizationResourceDetailedStatusCreateSuccessful, + OrganizationResourceDetailedStatusCreateInProgress, + OrganizationResourceDetailedStatusCreateFailed, + OrganizationResourceDetailedStatusDeleteSuccessful, + OrganizationResourceDetailedStatusDeleteFailed, + OrganizationResourceDetailedStatusDeleteInProgress, + OrganizationResourceDetailedStatusUpdateSuccessful, + OrganizationResourceDetailedStatusUpdateInProgress, + OrganizationResourceDetailedStatusUpdateFailed, + } +} + const ( // OrganizationResourceStatusCreateSuccessful is a OrganizationResourceStatus enum value OrganizationResourceStatusCreateSuccessful = "CREATE_SUCCESSFUL" @@ -23508,6 +23773,21 @@ const ( OrganizationResourceStatusUpdateFailed = "UPDATE_FAILED" ) +// OrganizationResourceStatus_Values returns all elements of the OrganizationResourceStatus enum +func OrganizationResourceStatus_Values() []string { + return []string{ + OrganizationResourceStatusCreateSuccessful, + OrganizationResourceStatusCreateInProgress, + OrganizationResourceStatusCreateFailed, + OrganizationResourceStatusDeleteSuccessful, + OrganizationResourceStatusDeleteFailed, + OrganizationResourceStatusDeleteInProgress, + OrganizationResourceStatusUpdateSuccessful, + OrganizationResourceStatusUpdateInProgress, + OrganizationResourceStatusUpdateFailed, + } +} + const ( // OrganizationRuleStatusCreateSuccessful is a OrganizationRuleStatus enum value OrganizationRuleStatusCreateSuccessful = "CREATE_SUCCESSFUL" @@ -23537,6 +23817,21 @@ const ( OrganizationRuleStatusUpdateFailed = "UPDATE_FAILED" ) +// OrganizationRuleStatus_Values returns all elements of the OrganizationRuleStatus enum +func OrganizationRuleStatus_Values() []string { + return []string{ + OrganizationRuleStatusCreateSuccessful, + OrganizationRuleStatusCreateInProgress, + OrganizationRuleStatusCreateFailed, + OrganizationRuleStatusDeleteSuccessful, + OrganizationRuleStatusDeleteFailed, + OrganizationRuleStatusDeleteInProgress, + OrganizationRuleStatusUpdateSuccessful, + OrganizationRuleStatusUpdateInProgress, + OrganizationRuleStatusUpdateFailed, + } +} + const ( // OwnerCustomLambda is a Owner enum value OwnerCustomLambda = "CUSTOM_LAMBDA" @@ -23545,6 +23840,14 @@ const ( OwnerAws = "AWS" ) +// Owner_Values returns all elements of the Owner enum +func Owner_Values() []string { + return []string{ + OwnerCustomLambda, + OwnerAws, + } +} + const ( // RecorderStatusPending is a RecorderStatus enum value RecorderStatusPending = "Pending" @@ -23556,6 +23859,15 @@ const ( RecorderStatusFailure = "Failure" ) +// RecorderStatus_Values returns all elements of the RecorderStatus enum +func RecorderStatus_Values() []string { + return []string{ + RecorderStatusPending, + RecorderStatusSuccess, + RecorderStatusFailure, + } +} + const ( // RemediationExecutionStateQueued is a RemediationExecutionState enum value RemediationExecutionStateQueued = "QUEUED" @@ -23570,6 +23882,16 @@ const ( RemediationExecutionStateFailed = "FAILED" ) +// RemediationExecutionState_Values returns all elements of the RemediationExecutionState enum +func RemediationExecutionState_Values() []string { + return []string{ + RemediationExecutionStateQueued, + RemediationExecutionStateInProgress, + RemediationExecutionStateSucceeded, + RemediationExecutionStateFailed, + } +} + const ( // RemediationExecutionStepStateSucceeded is a RemediationExecutionStepState enum value RemediationExecutionStepStateSucceeded = "SUCCEEDED" @@ -23581,11 +23903,27 @@ const ( RemediationExecutionStepStateFailed = "FAILED" ) +// RemediationExecutionStepState_Values returns all elements of the RemediationExecutionStepState enum +func RemediationExecutionStepState_Values() []string { + return []string{ + RemediationExecutionStepStateSucceeded, + RemediationExecutionStepStatePending, + RemediationExecutionStepStateFailed, + } +} + const ( // RemediationTargetTypeSsmDocument is a RemediationTargetType enum value RemediationTargetTypeSsmDocument = "SSM_DOCUMENT" ) +// RemediationTargetType_Values returns all elements of the RemediationTargetType enum +func RemediationTargetType_Values() []string { + return []string{ + RemediationTargetTypeSsmDocument, + } +} + const ( // ResourceCountGroupKeyResourceType is a ResourceCountGroupKey enum value ResourceCountGroupKeyResourceType = "RESOURCE_TYPE" @@ -23597,6 +23935,15 @@ const ( ResourceCountGroupKeyAwsRegion = "AWS_REGION" ) +// ResourceCountGroupKey_Values returns all elements of the ResourceCountGroupKey enum +func ResourceCountGroupKey_Values() []string { + return []string{ + ResourceCountGroupKeyResourceType, + ResourceCountGroupKeyAccountId, + ResourceCountGroupKeyAwsRegion, + } +} + const ( // ResourceTypeAwsEc2CustomerGateway is a ResourceType enum value ResourceTypeAwsEc2CustomerGateway = "AWS::EC2::CustomerGateway" @@ -23867,9 +24214,124 @@ const ( // ResourceTypeAwsQldbLedger is a ResourceType enum value ResourceTypeAwsQldbLedger = "AWS::QLDB::Ledger" + + // ResourceTypeAwsSecretsManagerSecret is a ResourceType enum value + ResourceTypeAwsSecretsManagerSecret = "AWS::SecretsManager::Secret" + + // ResourceTypeAwsSnsTopic is a ResourceType enum value + ResourceTypeAwsSnsTopic = "AWS::SNS::Topic" + + // ResourceTypeAwsSsmFileData is a ResourceType enum value + ResourceTypeAwsSsmFileData = "AWS::SSM::FileData" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeAwsEc2CustomerGateway, + ResourceTypeAwsEc2Eip, + ResourceTypeAwsEc2Host, + ResourceTypeAwsEc2Instance, + ResourceTypeAwsEc2InternetGateway, + ResourceTypeAwsEc2NetworkAcl, + ResourceTypeAwsEc2NetworkInterface, + ResourceTypeAwsEc2RouteTable, + ResourceTypeAwsEc2SecurityGroup, + ResourceTypeAwsEc2Subnet, + ResourceTypeAwsCloudTrailTrail, + ResourceTypeAwsEc2Volume, + ResourceTypeAwsEc2Vpc, + ResourceTypeAwsEc2Vpnconnection, + ResourceTypeAwsEc2Vpngateway, + ResourceTypeAwsEc2RegisteredHainstance, + ResourceTypeAwsEc2NatGateway, + ResourceTypeAwsEc2EgressOnlyInternetGateway, + ResourceTypeAwsEc2Vpcendpoint, + ResourceTypeAwsEc2VpcendpointService, + ResourceTypeAwsEc2FlowLog, + ResourceTypeAwsEc2VpcpeeringConnection, + ResourceTypeAwsElasticsearchDomain, + ResourceTypeAwsIamGroup, + ResourceTypeAwsIamPolicy, + ResourceTypeAwsIamRole, + ResourceTypeAwsIamUser, + ResourceTypeAwsElasticLoadBalancingV2LoadBalancer, + ResourceTypeAwsAcmCertificate, + ResourceTypeAwsRdsDbinstance, + ResourceTypeAwsRdsDbsubnetGroup, + ResourceTypeAwsRdsDbsecurityGroup, + ResourceTypeAwsRdsDbsnapshot, + ResourceTypeAwsRdsDbcluster, + ResourceTypeAwsRdsDbclusterSnapshot, + ResourceTypeAwsRdsEventSubscription, + ResourceTypeAwsS3Bucket, + ResourceTypeAwsS3AccountPublicAccessBlock, + ResourceTypeAwsRedshiftCluster, + ResourceTypeAwsRedshiftClusterSnapshot, + ResourceTypeAwsRedshiftClusterParameterGroup, + ResourceTypeAwsRedshiftClusterSecurityGroup, + ResourceTypeAwsRedshiftClusterSubnetGroup, + ResourceTypeAwsRedshiftEventSubscription, + ResourceTypeAwsSsmManagedInstanceInventory, + ResourceTypeAwsCloudWatchAlarm, + ResourceTypeAwsCloudFormationStack, + ResourceTypeAwsElasticLoadBalancingLoadBalancer, + ResourceTypeAwsAutoScalingAutoScalingGroup, + ResourceTypeAwsAutoScalingLaunchConfiguration, + ResourceTypeAwsAutoScalingScalingPolicy, + ResourceTypeAwsAutoScalingScheduledAction, + ResourceTypeAwsDynamoDbTable, + ResourceTypeAwsCodeBuildProject, + ResourceTypeAwsWafRateBasedRule, + ResourceTypeAwsWafRule, + ResourceTypeAwsWafRuleGroup, + ResourceTypeAwsWafWebAcl, + ResourceTypeAwsWafregionalRateBasedRule, + ResourceTypeAwsWafregionalRule, + ResourceTypeAwsWafregionalRuleGroup, + ResourceTypeAwsWafregionalWebAcl, + ResourceTypeAwsCloudFrontDistribution, + ResourceTypeAwsCloudFrontStreamingDistribution, + ResourceTypeAwsLambdaFunction, + ResourceTypeAwsElasticBeanstalkApplication, + ResourceTypeAwsElasticBeanstalkApplicationVersion, + ResourceTypeAwsElasticBeanstalkEnvironment, + ResourceTypeAwsWafv2WebAcl, + ResourceTypeAwsWafv2RuleGroup, + ResourceTypeAwsWafv2Ipset, + ResourceTypeAwsWafv2RegexPatternSet, + ResourceTypeAwsWafv2ManagedRuleSet, + ResourceTypeAwsXrayEncryptionConfig, + ResourceTypeAwsSsmAssociationCompliance, + ResourceTypeAwsSsmPatchCompliance, + ResourceTypeAwsShieldProtection, + ResourceTypeAwsShieldRegionalProtection, + ResourceTypeAwsConfigResourceCompliance, + ResourceTypeAwsApiGatewayStage, + ResourceTypeAwsApiGatewayRestApi, + ResourceTypeAwsApiGatewayV2Stage, + ResourceTypeAwsApiGatewayV2Api, + ResourceTypeAwsCodePipelinePipeline, + ResourceTypeAwsServiceCatalogCloudFormationProvisionedProduct, + ResourceTypeAwsServiceCatalogCloudFormationProduct, + ResourceTypeAwsServiceCatalogPortfolio, + ResourceTypeAwsSqsQueue, + ResourceTypeAwsKmsKey, + ResourceTypeAwsQldbLedger, + ResourceTypeAwsSecretsManagerSecret, + ResourceTypeAwsSnsTopic, + ResourceTypeAwsSsmFileData, + } +} + const ( // ResourceValueTypeResourceId is a ResourceValueType enum value ResourceValueTypeResourceId = "RESOURCE_ID" ) + +// ResourceValueType_Values returns all elements of the ResourceValueType enum +func ResourceValueType_Values() []string { + return []string{ + ResourceValueTypeResourceId, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go index 9758b16a9..94347635d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go index b29c73560..c9499e772 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/api.go @@ -413,7 +413,7 @@ func (c *CostandUsageReportService) PutReportDefinitionWithContext(ctx aws.Conte type DeleteReportDefinitionInput struct { _ struct{} `type:"structure"` - // The name of the report that you want to create. The name must be unique, + // The name of the report that you want to delete. The name must be unique, // is case sensitive, and can't include spaces. ReportName *string `type:"string"` } @@ -540,8 +540,8 @@ func (s *DescribeReportDefinitionsOutput) SetReportDefinitions(v []*ReportDefini // A report with the specified name already exists in the account. Specify a // different report name. type DuplicateReportNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message to show the detail of the exception. Message_ *string `locationName:"Message" type:"string"` @@ -559,17 +559,17 @@ func (s DuplicateReportNameException) GoString() string { func newErrorDuplicateReportNameException(v protocol.ResponseMetadata) error { return &DuplicateReportNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateReportNameException) Code() string { +func (s *DuplicateReportNameException) Code() string { return "DuplicateReportNameException" } // Message returns the exception's message. -func (s DuplicateReportNameException) Message() string { +func (s *DuplicateReportNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -577,29 +577,29 @@ func (s DuplicateReportNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateReportNameException) OrigErr() error { +func (s *DuplicateReportNameException) OrigErr() error { return nil } -func (s DuplicateReportNameException) Error() string { +func (s *DuplicateReportNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateReportNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateReportNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateReportNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateReportNameException) RequestID() string { + return s.RespMetadata.RequestID } // An error on the server occurred during the processing of your request. Try // again later. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message to show the detail of the exception. Message_ *string `locationName:"Message" type:"string"` @@ -617,17 +617,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -635,22 +635,22 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } type ModifyReportDefinitionInput struct { @@ -964,8 +964,8 @@ func (s *ReportDefinition) SetTimeUnit(v string) *ReportDefinition { // This account already has five reports defined. To define a new report, you // must delete an existing report. type ReportLimitReachedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message to show the detail of the exception. Message_ *string `locationName:"Message" type:"string"` @@ -983,17 +983,17 @@ func (s ReportLimitReachedException) GoString() string { func newErrorReportLimitReachedException(v protocol.ResponseMetadata) error { return &ReportLimitReachedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReportLimitReachedException) Code() string { +func (s *ReportLimitReachedException) Code() string { return "ReportLimitReachedException" } // Message returns the exception's message. -func (s ReportLimitReachedException) Message() string { +func (s *ReportLimitReachedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1001,28 +1001,28 @@ func (s ReportLimitReachedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReportLimitReachedException) OrigErr() error { +func (s *ReportLimitReachedException) OrigErr() error { return nil } -func (s ReportLimitReachedException) Error() string { +func (s *ReportLimitReachedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReportLimitReachedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReportLimitReachedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReportLimitReachedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReportLimitReachedException) RequestID() string { + return s.RespMetadata.RequestID } // The input fails to satisfy the constraints specified by an AWS service. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message to show the detail of the exception. Message_ *string `locationName:"Message" type:"string"` @@ -1040,17 +1040,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1058,40 +1058,34 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // The region of the S3 bucket that AWS delivers the report into. const ( - // AWSRegionUsEast1 is a AWSRegion enum value - AWSRegionUsEast1 = "us-east-1" - - // AWSRegionUsWest1 is a AWSRegion enum value - AWSRegionUsWest1 = "us-west-1" - - // AWSRegionUsWest2 is a AWSRegion enum value - AWSRegionUsWest2 = "us-west-2" + // AWSRegionAfSouth1 is a AWSRegion enum value + AWSRegionAfSouth1 = "af-south-1" - // AWSRegionEuCentral1 is a AWSRegion enum value - AWSRegionEuCentral1 = "eu-central-1" + // AWSRegionApEast1 is a AWSRegion enum value + AWSRegionApEast1 = "ap-east-1" - // AWSRegionEuWest1 is a AWSRegion enum value - AWSRegionEuWest1 = "eu-west-1" + // AWSRegionApSouth1 is a AWSRegion enum value + AWSRegionApSouth1 = "ap-south-1" // AWSRegionApSoutheast1 is a AWSRegion enum value AWSRegionApSoutheast1 = "ap-southeast-1" @@ -1102,16 +1096,87 @@ const ( // AWSRegionApNortheast1 is a AWSRegion enum value AWSRegionApNortheast1 = "ap-northeast-1" - // AWSRegionEuNorth1 is a AWSRegion enum value - AWSRegionEuNorth1 = "eu-north-1" + // AWSRegionApNortheast2 is a AWSRegion enum value + AWSRegionApNortheast2 = "ap-northeast-2" // AWSRegionApNortheast3 is a AWSRegion enum value AWSRegionApNortheast3 = "ap-northeast-3" - // AWSRegionApEast1 is a AWSRegion enum value - AWSRegionApEast1 = "ap-east-1" + // AWSRegionCaCentral1 is a AWSRegion enum value + AWSRegionCaCentral1 = "ca-central-1" + + // AWSRegionEuCentral1 is a AWSRegion enum value + AWSRegionEuCentral1 = "eu-central-1" + + // AWSRegionEuWest1 is a AWSRegion enum value + AWSRegionEuWest1 = "eu-west-1" + + // AWSRegionEuWest2 is a AWSRegion enum value + AWSRegionEuWest2 = "eu-west-2" + + // AWSRegionEuWest3 is a AWSRegion enum value + AWSRegionEuWest3 = "eu-west-3" + + // AWSRegionEuNorth1 is a AWSRegion enum value + AWSRegionEuNorth1 = "eu-north-1" + + // AWSRegionEuSouth1 is a AWSRegion enum value + AWSRegionEuSouth1 = "eu-south-1" + + // AWSRegionMeSouth1 is a AWSRegion enum value + AWSRegionMeSouth1 = "me-south-1" + + // AWSRegionSaEast1 is a AWSRegion enum value + AWSRegionSaEast1 = "sa-east-1" + + // AWSRegionUsEast1 is a AWSRegion enum value + AWSRegionUsEast1 = "us-east-1" + + // AWSRegionUsEast2 is a AWSRegion enum value + AWSRegionUsEast2 = "us-east-2" + + // AWSRegionUsWest1 is a AWSRegion enum value + AWSRegionUsWest1 = "us-west-1" + + // AWSRegionUsWest2 is a AWSRegion enum value + AWSRegionUsWest2 = "us-west-2" + + // AWSRegionCnNorth1 is a AWSRegion enum value + AWSRegionCnNorth1 = "cn-north-1" + + // AWSRegionCnNorthwest1 is a AWSRegion enum value + AWSRegionCnNorthwest1 = "cn-northwest-1" ) +// AWSRegion_Values returns all elements of the AWSRegion enum +func AWSRegion_Values() []string { + return []string{ + AWSRegionAfSouth1, + AWSRegionApEast1, + AWSRegionApSouth1, + AWSRegionApSoutheast1, + AWSRegionApSoutheast2, + AWSRegionApNortheast1, + AWSRegionApNortheast2, + AWSRegionApNortheast3, + AWSRegionCaCentral1, + AWSRegionEuCentral1, + AWSRegionEuWest1, + AWSRegionEuWest2, + AWSRegionEuWest3, + AWSRegionEuNorth1, + AWSRegionEuSouth1, + AWSRegionMeSouth1, + AWSRegionSaEast1, + AWSRegionUsEast1, + AWSRegionUsEast2, + AWSRegionUsWest1, + AWSRegionUsWest2, + AWSRegionCnNorth1, + AWSRegionCnNorthwest1, + } +} + // The types of manifest that you want AWS to create for this report. const ( // AdditionalArtifactRedshift is a AdditionalArtifact enum value @@ -1124,6 +1189,15 @@ const ( AdditionalArtifactAthena = "ATHENA" ) +// AdditionalArtifact_Values returns all elements of the AdditionalArtifact enum +func AdditionalArtifact_Values() []string { + return []string{ + AdditionalArtifactRedshift, + AdditionalArtifactQuicksight, + AdditionalArtifactAthena, + } +} + // The compression format that AWS uses for the report. const ( // CompressionFormatZip is a CompressionFormat enum value @@ -1136,6 +1210,15 @@ const ( CompressionFormatParquet = "Parquet" ) +// CompressionFormat_Values returns all elements of the CompressionFormat enum +func CompressionFormat_Values() []string { + return []string{ + CompressionFormatZip, + CompressionFormatGzip, + CompressionFormatParquet, + } +} + // The format that AWS saves the report in. const ( // ReportFormatTextOrcsv is a ReportFormat enum value @@ -1145,6 +1228,14 @@ const ( ReportFormatParquet = "Parquet" ) +// ReportFormat_Values returns all elements of the ReportFormat enum +func ReportFormat_Values() []string { + return []string{ + ReportFormatTextOrcsv, + ReportFormatParquet, + } +} + const ( // ReportVersioningCreateNewReport is a ReportVersioning enum value ReportVersioningCreateNewReport = "CREATE_NEW_REPORT" @@ -1153,12 +1244,27 @@ const ( ReportVersioningOverwriteReport = "OVERWRITE_REPORT" ) +// ReportVersioning_Values returns all elements of the ReportVersioning enum +func ReportVersioning_Values() []string { + return []string{ + ReportVersioningCreateNewReport, + ReportVersioningOverwriteReport, + } +} + // Whether or not AWS includes resource IDs in the report. const ( // SchemaElementResources is a SchemaElement enum value SchemaElementResources = "RESOURCES" ) +// SchemaElement_Values returns all elements of the SchemaElement enum +func SchemaElement_Values() []string { + return []string{ + SchemaElementResources, + } +} + // The length of time covered by the report. const ( // TimeUnitHourly is a TimeUnit enum value @@ -1166,4 +1272,16 @@ const ( // TimeUnitDaily is a TimeUnit enum value TimeUnitDaily = "DAILY" + + // TimeUnitMonthly is a TimeUnit enum value + TimeUnitMonthly = "MONTHLY" ) + +// TimeUnit_Values returns all elements of the TimeUnit enum +func TimeUnit_Values() []string { + return []string{ + TimeUnitHourly, + TimeUnitDaily, + TimeUnitMonthly, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go index 9a6288ce6..060ccd012 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go index 88ebd6c82..6bc40dc27 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/api.go @@ -61,7 +61,9 @@ func (c *DatabaseMigrationService) AddTagsToResourceRequest(input *AddTagsToReso // Adds metadata tags to an AWS DMS resource, including replication instance, // endpoint, security group, and migration task. These tags can also be used // with cost allocation reporting to track cost associated with DMS resources, -// or used in a Condition statement in an IAM policy for DMS. +// or used in a Condition statement in an IAM policy for DMS. For more information, +// see Tag (https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) +// data type description. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -176,6 +178,97 @@ func (c *DatabaseMigrationService) ApplyPendingMaintenanceActionWithContext(ctx return out, req.Send() } +const opCancelReplicationTaskAssessmentRun = "CancelReplicationTaskAssessmentRun" + +// CancelReplicationTaskAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the CancelReplicationTaskAssessmentRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelReplicationTaskAssessmentRun for more information on using the CancelReplicationTaskAssessmentRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelReplicationTaskAssessmentRunRequest method. +// req, resp := client.CancelReplicationTaskAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CancelReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRunRequest(input *CancelReplicationTaskAssessmentRunInput) (req *request.Request, output *CancelReplicationTaskAssessmentRunOutput) { + op := &request.Operation{ + Name: opCancelReplicationTaskAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelReplicationTaskAssessmentRunInput{} + } + + output = &CancelReplicationTaskAssessmentRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelReplicationTaskAssessmentRun API operation for AWS Database Migration Service. +// +// Cancels a single premigration assessment run. +// +// This operation prevents any individual assessments from running if they haven't +// started running. It also attempts to cancel any individual assessments that +// are currently running. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation CancelReplicationTaskAssessmentRun for usage and error information. +// +// Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/CancelReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRun(input *CancelReplicationTaskAssessmentRunInput) (*CancelReplicationTaskAssessmentRunOutput, error) { + req, out := c.CancelReplicationTaskAssessmentRunRequest(input) + return out, req.Send() +} + +// CancelReplicationTaskAssessmentRunWithContext is the same as CancelReplicationTaskAssessmentRun with the addition of +// the ability to pass a context and additional request options. +// +// See CancelReplicationTaskAssessmentRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) CancelReplicationTaskAssessmentRunWithContext(ctx aws.Context, input *CancelReplicationTaskAssessmentRunInput, opts ...request.Option) (*CancelReplicationTaskAssessmentRunOutput, error) { + req, out := c.CancelReplicationTaskAssessmentRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateEndpoint = "CreateEndpoint" // CreateEndpointRequest generates a "aws/request.Request" representing the @@ -444,9 +537,9 @@ func (c *DatabaseMigrationService) CreateReplicationInstanceRequest(input *Creat // AWS DMS requires that your account have certain roles with appropriate permissions // before you can create a replication instance. For information on the required // roles, see Creating the IAM Roles to Use With the AWS CLI and AWS DMS API -// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.APIRole.html). +// (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.APIRole). // For information on the required permissions, see IAM Permissions Needed to -// Use AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.IAMPermissions.html). +// Use AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Security.html#CHAP_Security.IAMPermissions). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1295,6 +1388,97 @@ func (c *DatabaseMigrationService) DeleteReplicationTaskWithContext(ctx aws.Cont return out, req.Send() } +const opDeleteReplicationTaskAssessmentRun = "DeleteReplicationTaskAssessmentRun" + +// DeleteReplicationTaskAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationTaskAssessmentRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteReplicationTaskAssessmentRun for more information on using the DeleteReplicationTaskAssessmentRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteReplicationTaskAssessmentRunRequest method. +// req, resp := client.DeleteReplicationTaskAssessmentRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRunRequest(input *DeleteReplicationTaskAssessmentRunInput) (req *request.Request, output *DeleteReplicationTaskAssessmentRunOutput) { + op := &request.Operation{ + Name: opDeleteReplicationTaskAssessmentRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReplicationTaskAssessmentRunInput{} + } + + output = &DeleteReplicationTaskAssessmentRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteReplicationTaskAssessmentRun API operation for AWS Database Migration Service. +// +// Deletes the record of a single premigration assessment run. +// +// This operation removes all metadata that AWS DMS maintains about this assessment +// run. However, the operation leaves untouched all information about this assessment +// run that is stored in your Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DeleteReplicationTaskAssessmentRun for usage and error information. +// +// Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DeleteReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRun(input *DeleteReplicationTaskAssessmentRunInput) (*DeleteReplicationTaskAssessmentRunOutput, error) { + req, out := c.DeleteReplicationTaskAssessmentRunRequest(input) + return out, req.Send() +} + +// DeleteReplicationTaskAssessmentRunWithContext is the same as DeleteReplicationTaskAssessmentRun with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteReplicationTaskAssessmentRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DeleteReplicationTaskAssessmentRunWithContext(ctx aws.Context, input *DeleteReplicationTaskAssessmentRunInput, opts ...request.Option) (*DeleteReplicationTaskAssessmentRunOutput, error) { + req, out := c.DeleteReplicationTaskAssessmentRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAccountAttributes = "DescribeAccountAttributes" // DescribeAccountAttributesRequest generates a "aws/request.Request" representing the @@ -1377,6 +1561,171 @@ func (c *DatabaseMigrationService) DescribeAccountAttributesWithContext(ctx aws. return out, req.Send() } +const opDescribeApplicableIndividualAssessments = "DescribeApplicableIndividualAssessments" + +// DescribeApplicableIndividualAssessmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplicableIndividualAssessments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeApplicableIndividualAssessments for more information on using the DescribeApplicableIndividualAssessments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeApplicableIndividualAssessmentsRequest method. +// req, resp := client.DescribeApplicableIndividualAssessmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeApplicableIndividualAssessments +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsRequest(input *DescribeApplicableIndividualAssessmentsInput) (req *request.Request, output *DescribeApplicableIndividualAssessmentsOutput) { + op := &request.Operation{ + Name: opDescribeApplicableIndividualAssessments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeApplicableIndividualAssessmentsInput{} + } + + output = &DescribeApplicableIndividualAssessmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeApplicableIndividualAssessments API operation for AWS Database Migration Service. +// +// Provides a list of individual assessments that you can specify for a new +// premigration assessment run, given one or more parameters. +// +// If you specify an existing migration task, this operation provides the default +// individual assessments you can specify for that task. Otherwise, the specified +// parameters model elements of a possible migration task on which to base a +// premigration assessment run. +// +// To use these migration task modeling parameters, you must specify an existing +// replication instance, a source database engine, a target database engine, +// and a migration type. This combination of parameters potentially limits the +// default individual assessments available for an assessment run created for +// a corresponding migration task. +// +// If you specify no parameters, this operation provides a list of all possible +// individual assessments that you can specify for an assessment run. If you +// specify any one of the task modeling parameters, you must specify all of +// them or the operation cannot provide a list of individual assessments. The +// only parameter that you can specify alone is for an existing migration task. +// The specified task definition then determines the default list of individual +// assessments that you can specify in an assessment run for the task. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DescribeApplicableIndividualAssessments for usage and error information. +// +// Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeApplicableIndividualAssessments +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessments(input *DescribeApplicableIndividualAssessmentsInput) (*DescribeApplicableIndividualAssessmentsOutput, error) { + req, out := c.DescribeApplicableIndividualAssessmentsRequest(input) + return out, req.Send() +} + +// DescribeApplicableIndividualAssessmentsWithContext is the same as DescribeApplicableIndividualAssessments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeApplicableIndividualAssessments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsWithContext(ctx aws.Context, input *DescribeApplicableIndividualAssessmentsInput, opts ...request.Option) (*DescribeApplicableIndividualAssessmentsOutput, error) { + req, out := c.DescribeApplicableIndividualAssessmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeApplicableIndividualAssessmentsPages iterates over the pages of a DescribeApplicableIndividualAssessments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeApplicableIndividualAssessments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeApplicableIndividualAssessments operation. +// pageNum := 0 +// err := client.DescribeApplicableIndividualAssessmentsPages(params, +// func(page *databasemigrationservice.DescribeApplicableIndividualAssessmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsPages(input *DescribeApplicableIndividualAssessmentsInput, fn func(*DescribeApplicableIndividualAssessmentsOutput, bool) bool) error { + return c.DescribeApplicableIndividualAssessmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeApplicableIndividualAssessmentsPagesWithContext same as DescribeApplicableIndividualAssessmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeApplicableIndividualAssessmentsPagesWithContext(ctx aws.Context, input *DescribeApplicableIndividualAssessmentsInput, fn func(*DescribeApplicableIndividualAssessmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeApplicableIndividualAssessmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeApplicableIndividualAssessmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeApplicableIndividualAssessmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeCertificates = "DescribeCertificates" // DescribeCertificatesRequest generates a "aws/request.Request" representing the @@ -3182,32 +3531,316 @@ func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentResultsPages return p.Err() } -const opDescribeReplicationTasks = "DescribeReplicationTasks" +const opDescribeReplicationTaskAssessmentRuns = "DescribeReplicationTaskAssessmentRuns" -// DescribeReplicationTasksRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReplicationTasks operation. The "output" return +// DescribeReplicationTaskAssessmentRunsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTaskAssessmentRuns operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReplicationTasks for more information on using the DescribeReplicationTasks +// See DescribeReplicationTaskAssessmentRuns for more information on using the DescribeReplicationTaskAssessmentRuns // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReplicationTasksRequest method. -// req, resp := client.DescribeReplicationTasksRequest(params) +// // Example sending a request using the DescribeReplicationTaskAssessmentRunsRequest method. +// req, resp := client.DescribeReplicationTaskAssessmentRunsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTasks +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskAssessmentRuns +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsRequest(input *DescribeReplicationTaskAssessmentRunsInput) (req *request.Request, output *DescribeReplicationTaskAssessmentRunsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationTaskAssessmentRuns, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationTaskAssessmentRunsInput{} + } + + output = &DescribeReplicationTaskAssessmentRunsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReplicationTaskAssessmentRuns API operation for AWS Database Migration Service. +// +// Returns a paginated list of premigration assessment runs based on filter +// settings. +// +// These filter settings can specify a combination of premigration assessment +// runs, migration tasks, replication instances, and assessment run status values. +// +// This operation doesn't return information about individual assessments. For +// this information, see the DescribeReplicationTaskIndividualAssessments operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DescribeReplicationTaskAssessmentRuns for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundFault +// The resource could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskAssessmentRuns +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRuns(input *DescribeReplicationTaskAssessmentRunsInput) (*DescribeReplicationTaskAssessmentRunsOutput, error) { + req, out := c.DescribeReplicationTaskAssessmentRunsRequest(input) + return out, req.Send() +} + +// DescribeReplicationTaskAssessmentRunsWithContext is the same as DescribeReplicationTaskAssessmentRuns with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReplicationTaskAssessmentRuns for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsWithContext(ctx aws.Context, input *DescribeReplicationTaskAssessmentRunsInput, opts ...request.Option) (*DescribeReplicationTaskAssessmentRunsOutput, error) { + req, out := c.DescribeReplicationTaskAssessmentRunsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReplicationTaskAssessmentRunsPages iterates over the pages of a DescribeReplicationTaskAssessmentRuns operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationTaskAssessmentRuns method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationTaskAssessmentRuns operation. +// pageNum := 0 +// err := client.DescribeReplicationTaskAssessmentRunsPages(params, +// func(page *databasemigrationservice.DescribeReplicationTaskAssessmentRunsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsPages(input *DescribeReplicationTaskAssessmentRunsInput, fn func(*DescribeReplicationTaskAssessmentRunsOutput, bool) bool) error { + return c.DescribeReplicationTaskAssessmentRunsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReplicationTaskAssessmentRunsPagesWithContext same as DescribeReplicationTaskAssessmentRunsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskAssessmentRunsPagesWithContext(ctx aws.Context, input *DescribeReplicationTaskAssessmentRunsInput, fn func(*DescribeReplicationTaskAssessmentRunsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReplicationTaskAssessmentRunsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReplicationTaskAssessmentRunsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReplicationTaskAssessmentRunsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReplicationTaskIndividualAssessments = "DescribeReplicationTaskIndividualAssessments" + +// DescribeReplicationTaskIndividualAssessmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTaskIndividualAssessments operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReplicationTaskIndividualAssessments for more information on using the DescribeReplicationTaskIndividualAssessments +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReplicationTaskIndividualAssessmentsRequest method. +// req, resp := client.DescribeReplicationTaskIndividualAssessmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskIndividualAssessments +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsRequest(input *DescribeReplicationTaskIndividualAssessmentsInput) (req *request.Request, output *DescribeReplicationTaskIndividualAssessmentsOutput) { + op := &request.Operation{ + Name: opDescribeReplicationTaskIndividualAssessments, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeReplicationTaskIndividualAssessmentsInput{} + } + + output = &DescribeReplicationTaskIndividualAssessmentsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeReplicationTaskIndividualAssessments API operation for AWS Database Migration Service. +// +// Returns a paginated list of individual assessments based on filter settings. +// +// These filter settings can specify a combination of premigration assessment +// runs, migration tasks, and assessment status values. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation DescribeReplicationTaskIndividualAssessments for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundFault +// The resource could not be found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTaskIndividualAssessments +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessments(input *DescribeReplicationTaskIndividualAssessmentsInput) (*DescribeReplicationTaskIndividualAssessmentsOutput, error) { + req, out := c.DescribeReplicationTaskIndividualAssessmentsRequest(input) + return out, req.Send() +} + +// DescribeReplicationTaskIndividualAssessmentsWithContext is the same as DescribeReplicationTaskIndividualAssessments with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeReplicationTaskIndividualAssessments for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsWithContext(ctx aws.Context, input *DescribeReplicationTaskIndividualAssessmentsInput, opts ...request.Option) (*DescribeReplicationTaskIndividualAssessmentsOutput, error) { + req, out := c.DescribeReplicationTaskIndividualAssessmentsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeReplicationTaskIndividualAssessmentsPages iterates over the pages of a DescribeReplicationTaskIndividualAssessments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationTaskIndividualAssessments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationTaskIndividualAssessments operation. +// pageNum := 0 +// err := client.DescribeReplicationTaskIndividualAssessmentsPages(params, +// func(page *databasemigrationservice.DescribeReplicationTaskIndividualAssessmentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsPages(input *DescribeReplicationTaskIndividualAssessmentsInput, fn func(*DescribeReplicationTaskIndividualAssessmentsOutput, bool) bool) error { + return c.DescribeReplicationTaskIndividualAssessmentsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReplicationTaskIndividualAssessmentsPagesWithContext same as DescribeReplicationTaskIndividualAssessmentsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) DescribeReplicationTaskIndividualAssessmentsPagesWithContext(ctx aws.Context, input *DescribeReplicationTaskIndividualAssessmentsInput, fn func(*DescribeReplicationTaskIndividualAssessmentsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReplicationTaskIndividualAssessmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReplicationTaskIndividualAssessmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReplicationTaskIndividualAssessmentsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReplicationTasks = "DescribeReplicationTasks" + +// DescribeReplicationTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationTasks operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeReplicationTasks for more information on using the DescribeReplicationTasks +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeReplicationTasksRequest method. +// req, resp := client.DescribeReplicationTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/DescribeReplicationTasks func (c *DatabaseMigrationService) DescribeReplicationTasksRequest(input *DescribeReplicationTasksInput) (req *request.Request, output *DescribeReplicationTasksOutput) { op := &request.Operation{ Name: opDescribeReplicationTasks, @@ -3736,7 +4369,10 @@ func (c *DatabaseMigrationService) ListTagsForResourceRequest(input *ListTagsFor // ListTagsForResource API operation for AWS Database Migration Service. // -// Lists all tags for an AWS DMS resource. +// Lists all metadata tags attached to an AWS DMS resource, including replication +// instance, endpoint, security group, and migration task. For more information, +// see Tag (https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) +// data type description. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4566,7 +5202,10 @@ func (c *DatabaseMigrationService) RemoveTagsFromResourceRequest(input *RemoveTa // RemoveTagsFromResource API operation for AWS Database Migration Service. // -// Removes metadata tags from a DMS resource. +// Removes metadata tags from an AWS DMS resource, including replication instance, +// endpoint, security group, and migration task. For more information, see Tag +// (https://docs.aws.amazon.com/dms/latest/APIReference/API_Tag.html) data type +// description. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4776,60 +5415,72 @@ func (c *DatabaseMigrationService) StartReplicationTaskAssessmentWithContext(ctx return out, req.Send() } -const opStopReplicationTask = "StopReplicationTask" +const opStartReplicationTaskAssessmentRun = "StartReplicationTaskAssessmentRun" -// StopReplicationTaskRequest generates a "aws/request.Request" representing the -// client's request for the StopReplicationTask operation. The "output" return +// StartReplicationTaskAssessmentRunRequest generates a "aws/request.Request" representing the +// client's request for the StartReplicationTaskAssessmentRun operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopReplicationTask for more information on using the StopReplicationTask +// See StartReplicationTaskAssessmentRun for more information on using the StartReplicationTaskAssessmentRun // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopReplicationTaskRequest method. -// req, resp := client.StopReplicationTaskRequest(params) +// // Example sending a request using the StartReplicationTaskAssessmentRunRequest method. +// req, resp := client.StartReplicationTaskAssessmentRunRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask -func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplicationTaskInput) (req *request.Request, output *StopReplicationTaskOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StartReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRunRequest(input *StartReplicationTaskAssessmentRunInput) (req *request.Request, output *StartReplicationTaskAssessmentRunOutput) { op := &request.Operation{ - Name: opStopReplicationTask, + Name: opStartReplicationTaskAssessmentRun, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopReplicationTaskInput{} + input = &StartReplicationTaskAssessmentRunInput{} } - output = &StopReplicationTaskOutput{} + output = &StartReplicationTaskAssessmentRunOutput{} req = c.newRequest(op, input, output) return } -// StopReplicationTask API operation for AWS Database Migration Service. +// StartReplicationTaskAssessmentRun API operation for AWS Database Migration Service. // -// Stops the replication task. +// Starts a new premigration assessment run for one or more individual assessments +// of a migration task. +// +// The assessments that you can specify depend on the source and target database +// engine and the migration type defined for the given task. To run this operation, +// your migration task must already be created. After you run this operation, +// you can review the status of each individual assessment. You can also run +// the migration task manually after the assessment run and its individual assessments +// complete. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Database Migration Service's -// API operation StopReplicationTask for usage and error information. +// API operation StartReplicationTaskAssessmentRun for usage and error information. // // Returned Error Types: +// * AccessDeniedFault +// AWS DMS was denied access to the endpoint. Check that the role is correctly +// configured. +// // * ResourceNotFoundFault // The resource could not be found. // @@ -4837,22 +5488,134 @@ func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplica // The resource is in a state that prevents it from being used for database // migration. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask -func (c *DatabaseMigrationService) StopReplicationTask(input *StopReplicationTaskInput) (*StopReplicationTaskOutput, error) { - req, out := c.StopReplicationTaskRequest(input) - return out, req.Send() -} - -// StopReplicationTaskWithContext is the same as StopReplicationTask with the addition of -// the ability to pass a context and additional request options. +// * KMSAccessDeniedFault +// The ciphertext references a key that doesn't exist or that the DMS account +// doesn't have access to. // -// See StopReplicationTask for details on how to use this API operation. +// * KMSDisabledFault +// The specified master key (CMK) isn't enabled. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DatabaseMigrationService) StopReplicationTaskWithContext(ctx aws.Context, input *StopReplicationTaskInput, opts ...request.Option) (*StopReplicationTaskOutput, error) { +// * KMSFault +// An AWS Key Management Service (AWS KMS) error is preventing access to AWS +// KMS. +// +// * KMSInvalidStateFault +// The state of the specified AWS KMS resource isn't valid for this request. +// +// * KMSNotFoundFault +// The specified AWS KMS entity or resource can't be found. +// +// * KMSKeyNotAccessibleFault +// AWS DMS cannot access the AWS KMS key. +// +// * S3AccessDeniedFault +// Insufficient privileges are preventing access to an Amazon S3 object. +// +// * S3ResourceNotFoundFault +// A specified Amazon S3 bucket, bucket folder, or other object can't be found. +// +// * ResourceAlreadyExistsFault +// The resource you are attempting to create already exists. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StartReplicationTaskAssessmentRun +func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRun(input *StartReplicationTaskAssessmentRunInput) (*StartReplicationTaskAssessmentRunOutput, error) { + req, out := c.StartReplicationTaskAssessmentRunRequest(input) + return out, req.Send() +} + +// StartReplicationTaskAssessmentRunWithContext is the same as StartReplicationTaskAssessmentRun with the addition of +// the ability to pass a context and additional request options. +// +// See StartReplicationTaskAssessmentRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) StartReplicationTaskAssessmentRunWithContext(ctx aws.Context, input *StartReplicationTaskAssessmentRunInput, opts ...request.Option) (*StartReplicationTaskAssessmentRunOutput, error) { + req, out := c.StartReplicationTaskAssessmentRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopReplicationTask = "StopReplicationTask" + +// StopReplicationTaskRequest generates a "aws/request.Request" representing the +// client's request for the StopReplicationTask operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopReplicationTask for more information on using the StopReplicationTask +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopReplicationTaskRequest method. +// req, resp := client.StopReplicationTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask +func (c *DatabaseMigrationService) StopReplicationTaskRequest(input *StopReplicationTaskInput) (req *request.Request, output *StopReplicationTaskOutput) { + op := &request.Operation{ + Name: opStopReplicationTask, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopReplicationTaskInput{} + } + + output = &StopReplicationTaskOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopReplicationTask API operation for AWS Database Migration Service. +// +// Stops the replication task. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Database Migration Service's +// API operation StopReplicationTask for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundFault +// The resource could not be found. +// +// * InvalidResourceStateFault +// The resource is in a state that prevents it from being used for database +// migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dms-2016-01-01/StopReplicationTask +func (c *DatabaseMigrationService) StopReplicationTask(input *StopReplicationTaskInput) (*StopReplicationTaskOutput, error) { + req, out := c.StopReplicationTaskRequest(input) + return out, req.Send() +} + +// StopReplicationTaskWithContext is the same as StopReplicationTask with the addition of +// the ability to pass a context and additional request options. +// +// See StopReplicationTask for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DatabaseMigrationService) StopReplicationTaskWithContext(ctx aws.Context, input *StopReplicationTaskInput, opts ...request.Option) (*StopReplicationTaskOutput, error) { req, out := c.StopReplicationTaskRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) @@ -4951,8 +5714,8 @@ func (c *DatabaseMigrationService) TestConnectionWithContext(ctx aws.Context, in // AWS DMS was denied access to the endpoint. Check that the role is correctly // configured. type AccessDeniedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4969,17 +5732,17 @@ func (s AccessDeniedFault) GoString() string { func newErrorAccessDeniedFault(v protocol.ResponseMetadata) error { return &AccessDeniedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedFault) Code() string { +func (s *AccessDeniedFault) Code() string { return "AccessDeniedFault" } // Message returns the exception's message. -func (s AccessDeniedFault) Message() string { +func (s *AccessDeniedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4987,22 +5750,22 @@ func (s AccessDeniedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedFault) OrigErr() error { +func (s *AccessDeniedFault) OrigErr() error { return nil } -func (s AccessDeniedFault) Error() string { +func (s *AccessDeniedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedFault) RequestID() string { + return s.RespMetadata.RequestID } // Describes a quota for an AWS account, for example, the number of replication @@ -5220,7 +5983,11 @@ func (s *ApplyPendingMaintenanceActionOutput) SetResourcePendingMaintenanceActio return s } -// The name of the Availability Zone for use during database migration. +// The name of an Availability Zone for use during database migration. AvailabilityZone +// is an optional parameter to the CreateReplicationInstance (https://docs.aws.amazon.com/dms/latest/APIReference/API_CreateReplicationInstance.html) +// operation, and it’s value relates to the AWS Region of an endpoint. For +// example, the availability zone of an endpoint in the us-east-1 region might +// be us-east-1a, us-east-1b, us-east-1c, or us-east-1d. type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -5244,6 +6011,67 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { return s } +type CancelReplicationTaskAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the premigration assessment run to be canceled. + // + // ReplicationTaskAssessmentRunArn is a required field + ReplicationTaskAssessmentRunArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelReplicationTaskAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReplicationTaskAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelReplicationTaskAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelReplicationTaskAssessmentRunInput"} + if s.ReplicationTaskAssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskAssessmentRunArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *CancelReplicationTaskAssessmentRunInput) SetReplicationTaskAssessmentRunArn(v string) *CancelReplicationTaskAssessmentRunInput { + s.ReplicationTaskAssessmentRunArn = &v + return s +} + +type CancelReplicationTaskAssessmentRunOutput struct { + _ struct{} `type:"structure"` + + // The ReplicationTaskAssessmentRun object for the canceled assessment run. + ReplicationTaskAssessmentRun *ReplicationTaskAssessmentRun `type:"structure"` +} + +// String returns the string representation +func (s CancelReplicationTaskAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelReplicationTaskAssessmentRunOutput) GoString() string { + return s.String() +} + +// SetReplicationTaskAssessmentRun sets the ReplicationTaskAssessmentRun field's value. +func (s *CancelReplicationTaskAssessmentRunOutput) SetReplicationTaskAssessmentRun(v *ReplicationTaskAssessmentRun) *CancelReplicationTaskAssessmentRunOutput { + s.ReplicationTaskAssessmentRun = v + return s +} + // The SSL certificate that can be used to encrypt connections between the endpoints // and the replication instance. type Certificate struct { @@ -5377,7 +6205,15 @@ type Connection struct { // string. ReplicationInstanceIdentifier *string `type:"string"` - // The connection status. + // The connection status. This parameter can return one of the following values: + // + // * "successful" + // + // * "testing" + // + // * "failed" + // + // * "deleting" Status *string `type:"string"` } @@ -5464,12 +6300,12 @@ type CreateEndpointInput struct { // Settings in JSON format for the target Elasticsearch endpoint. For more information // about the available settings, see Extra Connection Attributes When Using // Elasticsearch as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration) - // in the AWS Database Migration User Guide. + // in the AWS Database Migration Service User Guide. ElasticsearchSettings *ElasticsearchSettings `type:"structure"` // The database endpoint identifier. Identifiers must begin with a letter and // must contain only ASCII letters, digits, and hyphens. They can't end with - // a hyphen or contain two consecutive hyphens. + // a hyphen, or contain two consecutive hyphens. // // EndpointIdentifier is a required field EndpointIdentifier *string `type:"string" required:"true"` @@ -5482,7 +6318,7 @@ type CreateEndpointInput struct { // The type of engine for the endpoint. Valid values, depending on the EndpointType // value, include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", // "redshift", "s3", "db2", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", - // "kafka", "elasticsearch", "documentdb", and "sqlserver". + // "kafka", "elasticsearch", "docdb", "sqlserver", and "neptune". // // EngineName is a required field EngineName *string `type:"string" required:"true"` @@ -5498,16 +6334,22 @@ type CreateEndpointInput struct { // in the AWS Database Migration Service User Guide. ExtraConnectionAttributes *string `type:"string"` - // Settings in JSON format for the target Apache Kafka endpoint. For information - // about other available settings, see Using Object Mapping to Migrate Data - // to Apache Kafka (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html#CHAP_Target.Kafka.ObjectMapping) - // in the AWS Database Migration User Guide. + // Settings in JSON format for the source IBM Db2 LUW endpoint. For information + // about other available settings, see Extra connection attributes when using + // Db2 LUW as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.html) + // in the AWS Database Migration Service User Guide. + IBMDb2Settings *IBMDb2Settings `type:"structure"` + + // Settings in JSON format for the target Apache Kafka endpoint. For more information + // about the available settings, see Using Apache Kafka as a Target for AWS + // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) + // in the AWS Database Migration Service User Guide. KafkaSettings *KafkaSettings `type:"structure"` // Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. - // For information about other available settings, see Using Object Mapping - // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) - // in the AWS Database Migration User Guide. + // For more information about the available settings, see Using Amazon Kinesis + // Data Streams as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html) + // in the AWS Database Migration Service User Guide. KinesisSettings *KinesisSettings `type:"structure"` // An AWS KMS key identifier that is used to encrypt the connection parameters @@ -5520,21 +6362,69 @@ type CreateEndpointInput struct { // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` + // Settings in JSON format for the source and target Microsoft SQL Server endpoint. + // For information about other available settings, see Extra connection attributes + // when using SQL Server as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.html) + // and Extra connection attributes when using SQL Server as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.html) + // in the AWS Database Migration Service User Guide. + MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` + // Settings in JSON format for the source MongoDB endpoint. For more information - // about the available settings, see the configuration properties section in - // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) + // about the available settings, see Using MongoDB as a Target for AWS Database + // Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html#CHAP_Source.MongoDB.Configuration) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` + // Settings in JSON format for the source and target MySQL endpoint. For information + // about other available settings, see Extra connection attributes when using + // MySQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.html) + // and Extra connection attributes when using a MySQL-compatible database as + // a target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.html) + // in the AWS Database Migration Service User Guide. + MySQLSettings *MySQLSettings `type:"structure"` + + // Settings in JSON format for the target Amazon Neptune endpoint. For more + // information about the available settings, see Specifying Endpoint Settings + // for Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) + // in the AWS Database Migration Service User Guide. + NeptuneSettings *NeptuneSettings `type:"structure"` + + // Settings in JSON format for the source and target Oracle endpoint. For information + // about other available settings, see Extra connection attributes when using + // Oracle as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html) + // and Extra connection attributes when using Oracle as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.html) + // in the AWS Database Migration Service User Guide. + OracleSettings *OracleSettings `type:"structure"` + // The password to be used to log in to the endpoint database. Password *string `type:"string" sensitive:"true"` // The port used by the endpoint database. Port *int64 `type:"integer"` + // Settings in JSON format for the source and target PostgreSQL endpoint. For + // information about other available settings, see Extra connection attributes + // when using PostgreSQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html) + // and Extra connection attributes when using PostgreSQL as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.html) + // in the AWS Database Migration Service User Guide. + PostgreSQLSettings *PostgreSQLSettings `type:"structure"` + // Provides information that defines an Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` + // A friendly name for the resource identifier at the end of the EndpointArn + // response parameter that is returned in the created Endpoint object. The value + // for this parameter can have up to 31 characters. It can contain only ASCII + // letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain + // two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. + // For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. + // If you don't specify a ResourceIdentifier value, AWS DMS generates a default + // identifier value for the end of EndpointArn. + ResourceIdentifier *string `type:"string"` + // Settings in JSON format for the target Amazon S3 endpoint. For more information // about the available settings, see Extra Connection Attributes When Using // Amazon S3 as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring) @@ -5552,6 +6442,14 @@ type CreateEndpointInput struct { // is none SslMode *string `type:"string" enum:"DmsSslModeValue"` + // Settings in JSON format for the source and target SAP ASE endpoint. For information + // about other available settings, see Extra connection attributes when using + // SAP ASE as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.html) + // and Extra connection attributes when using SAP ASE as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.html) in + // the AWS Database Migration Service User Guide. + SybaseSettings *SybaseSettings `type:"structure"` + // One or more tags to be assigned to the endpoint. Tags []*Tag `type:"list"` @@ -5591,6 +6489,11 @@ func (s *CreateEndpointInput) Validate() error { invalidParams.AddNested("ElasticsearchSettings", err.(request.ErrInvalidParams)) } } + if s.NeptuneSettings != nil { + if err := s.NeptuneSettings.Validate(); err != nil { + invalidParams.AddNested("NeptuneSettings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5658,6 +6561,12 @@ func (s *CreateEndpointInput) SetExtraConnectionAttributes(v string) *CreateEndp return s } +// SetIBMDb2Settings sets the IBMDb2Settings field's value. +func (s *CreateEndpointInput) SetIBMDb2Settings(v *IBMDb2Settings) *CreateEndpointInput { + s.IBMDb2Settings = v + return s +} + // SetKafkaSettings sets the KafkaSettings field's value. func (s *CreateEndpointInput) SetKafkaSettings(v *KafkaSettings) *CreateEndpointInput { s.KafkaSettings = v @@ -5676,12 +6585,36 @@ func (s *CreateEndpointInput) SetKmsKeyId(v string) *CreateEndpointInput { return s } +// SetMicrosoftSQLServerSettings sets the MicrosoftSQLServerSettings field's value. +func (s *CreateEndpointInput) SetMicrosoftSQLServerSettings(v *MicrosoftSQLServerSettings) *CreateEndpointInput { + s.MicrosoftSQLServerSettings = v + return s +} + // SetMongoDbSettings sets the MongoDbSettings field's value. func (s *CreateEndpointInput) SetMongoDbSettings(v *MongoDbSettings) *CreateEndpointInput { s.MongoDbSettings = v return s } +// SetMySQLSettings sets the MySQLSettings field's value. +func (s *CreateEndpointInput) SetMySQLSettings(v *MySQLSettings) *CreateEndpointInput { + s.MySQLSettings = v + return s +} + +// SetNeptuneSettings sets the NeptuneSettings field's value. +func (s *CreateEndpointInput) SetNeptuneSettings(v *NeptuneSettings) *CreateEndpointInput { + s.NeptuneSettings = v + return s +} + +// SetOracleSettings sets the OracleSettings field's value. +func (s *CreateEndpointInput) SetOracleSettings(v *OracleSettings) *CreateEndpointInput { + s.OracleSettings = v + return s +} + // SetPassword sets the Password field's value. func (s *CreateEndpointInput) SetPassword(v string) *CreateEndpointInput { s.Password = &v @@ -5694,12 +6627,24 @@ func (s *CreateEndpointInput) SetPort(v int64) *CreateEndpointInput { return s } +// SetPostgreSQLSettings sets the PostgreSQLSettings field's value. +func (s *CreateEndpointInput) SetPostgreSQLSettings(v *PostgreSQLSettings) *CreateEndpointInput { + s.PostgreSQLSettings = v + return s +} + // SetRedshiftSettings sets the RedshiftSettings field's value. func (s *CreateEndpointInput) SetRedshiftSettings(v *RedshiftSettings) *CreateEndpointInput { s.RedshiftSettings = v return s } +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *CreateEndpointInput) SetResourceIdentifier(v string) *CreateEndpointInput { + s.ResourceIdentifier = &v + return s +} + // SetS3Settings sets the S3Settings field's value. func (s *CreateEndpointInput) SetS3Settings(v *S3Settings) *CreateEndpointInput { s.S3Settings = v @@ -5724,6 +6669,12 @@ func (s *CreateEndpointInput) SetSslMode(v string) *CreateEndpointInput { return s } +// SetSybaseSettings sets the SybaseSettings field's value. +func (s *CreateEndpointInput) SetSybaseSettings(v *SybaseSettings) *CreateEndpointInput { + s.SybaseSettings = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateEndpointInput) SetTags(v []*Tag) *CreateEndpointInput { s.Tags = v @@ -5915,10 +6866,17 @@ type CreateReplicationInstanceInput struct { // AWS Region, for example: us-east-1d AvailabilityZone *string `type:"string"` - // A list of DNS name servers supported for the replication instance. + // A list of custom DNS name servers supported for the replication instance + // to access your on-premise source or target database. This list overrides + // the default name servers supported by the replication instance. You can specify + // a comma-separated list of internet addresses for up to four on-premise DNS + // name servers. For example: "1.1.1.1,2.2.2.2,3.3.3.3,4.4.4.4" DnsNameServers *string `type:"string"` // The engine version number of the replication instance. + // + // If an engine version number is not specified when a replication instance + // is created, the default is the latest engine version available. EngineVersion *string `type:"string"` // An AWS KMS key identifier that is used to encrypt the data on the replication @@ -5954,11 +6912,13 @@ type CreateReplicationInstanceInput struct { // represents an instance with a private IP address. The default value is true. PubliclyAccessible *bool `type:"boolean"` - // The compute and memory capacity of the replication instance as specified - // by the replication instance class. + // The compute and memory capacity of the replication instance as defined for + // the specified replication instance class. For example to specify the instance + // class dms.c4.large, set this parameter to "dms.c4.large". // - // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large - // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + // For more information on the settings and capacities for the available replication + // instance classes, see Selecting the right AWS DMS replication instance for + // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). // // ReplicationInstanceClass is a required field ReplicationInstanceClass *string `type:"string" required:"true"` @@ -5968,7 +6928,7 @@ type CreateReplicationInstanceInput struct { // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain 1-63 alphanumeric characters or hyphens. // // * First character must be a letter. // @@ -5982,6 +6942,16 @@ type CreateReplicationInstanceInput struct { // A subnet group to associate with the replication instance. ReplicationSubnetGroupIdentifier *string `type:"string"` + // A friendly name for the resource identifier at the end of the EndpointArn + // response parameter that is returned in the created Endpoint object. The value + // for this parameter can have up to 31 characters. It can contain only ASCII + // letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain + // two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. + // For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. + // If you don't specify a ResourceIdentifier value, AWS DMS generates a default + // identifier value for the end of EndpointArn. + ResourceIdentifier *string `type:"string"` + // One or more tags to be assigned to the replication instance. Tags []*Tag `type:"list"` @@ -6089,6 +7059,12 @@ func (s *CreateReplicationInstanceInput) SetReplicationSubnetGroupIdentifier(v s return s } +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *CreateReplicationInstanceInput) SetResourceIdentifier(v string) *CreateReplicationInstanceInput { + s.ResourceIdentifier = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateReplicationInstanceInput) SetTags(v []*Tag) *CreateReplicationInstanceInput { s.Tags = v @@ -6260,9 +7236,9 @@ type CreateReplicationTaskInput struct { // Indicates when you want a change data capture (CDC) operation to stop. The // value can be either server time or commit time. // - // Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12” + // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 + // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 // “ CdcStopPosition *string `type:"string"` @@ -6280,7 +7256,7 @@ type CreateReplicationTaskInput struct { // // Constraints: // - // * Must contain from 1 to 255 alphanumeric characters or hyphens. + // * Must contain 1-255 alphanumeric characters or hyphens. // // * First character must be a letter. // @@ -6290,18 +7266,28 @@ type CreateReplicationTaskInput struct { ReplicationTaskIdentifier *string `type:"string" required:"true"` // Overall settings for the task, in JSON format. For more information, see - // Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) + // Specifying Task Settings for AWS Database Migration Service Tasks (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html) // in the AWS Database Migration User Guide. ReplicationTaskSettings *string `type:"string"` + // A friendly name for the resource identifier at the end of the EndpointArn + // response parameter that is returned in the created Endpoint object. The value + // for this parameter can have up to 31 characters. It can contain only ASCII + // letters, digits, and hyphen ('-'). Also, it can't end with a hyphen or contain + // two consecutive hyphens, and can only begin with a letter, such as Example-App-ARN1. + // For example, this value might result in the EndpointArn value arn:aws:dms:eu-west-1:012345678901:rep:Example-App-ARN1. + // If you don't specify a ResourceIdentifier value, AWS DMS generates a default + // identifier value for the end of EndpointArn. + ResourceIdentifier *string `type:"string"` + // An Amazon Resource Name (ARN) that uniquely identifies the source endpoint. // // SourceEndpointArn is a required field SourceEndpointArn *string `type:"string" required:"true"` // The table mappings for the task, in JSON format. For more information, see - // Table Mapping (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html) - // in the AWS Database Migration User Guide. + // Using Table Mapping to Specify Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html) + // in the AWS Database Migration Service User Guide. // // TableMappings is a required field TableMappings *string `type:"string" required:"true"` @@ -6313,6 +7299,12 @@ type CreateReplicationTaskInput struct { // // TargetEndpointArn is a required field TargetEndpointArn *string `type:"string" required:"true"` + + // Supplemental information that the task requires to migrate the data for certain + // source and target endpoints. For more information, see Specifying Supplemental + // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) + // in the AWS Database Migration Service User Guide. + TaskData *string `type:"string"` } // String returns the string representation @@ -6395,6 +7387,12 @@ func (s *CreateReplicationTaskInput) SetReplicationTaskSettings(v string) *Creat return s } +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *CreateReplicationTaskInput) SetResourceIdentifier(v string) *CreateReplicationTaskInput { + s.ResourceIdentifier = &v + return s +} + // SetSourceEndpointArn sets the SourceEndpointArn field's value. func (s *CreateReplicationTaskInput) SetSourceEndpointArn(v string) *CreateReplicationTaskInput { s.SourceEndpointArn = &v @@ -6419,6 +7417,12 @@ func (s *CreateReplicationTaskInput) SetTargetEndpointArn(v string) *CreateRepli return s } +// SetTaskData sets the TaskData field's value. +func (s *CreateReplicationTaskInput) SetTaskData(v string) *CreateReplicationTaskInput { + s.TaskData = &v + return s +} + type CreateReplicationTaskOutput struct { _ struct{} `type:"structure"` @@ -6813,30 +7817,30 @@ func (s DeleteReplicationSubnetGroupOutput) GoString() string { return s.String() } -type DeleteReplicationTaskInput struct { +type DeleteReplicationTaskAssessmentRunInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the replication task to be deleted. + // Amazon Resource Name (ARN) of the premigration assessment run to be deleted. // - // ReplicationTaskArn is a required field - ReplicationTaskArn *string `type:"string" required:"true"` + // ReplicationTaskAssessmentRunArn is a required field + ReplicationTaskAssessmentRunArn *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteReplicationTaskInput) String() string { +func (s DeleteReplicationTaskAssessmentRunInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReplicationTaskInput) GoString() string { +func (s DeleteReplicationTaskAssessmentRunInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReplicationTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationTaskInput"} - if s.ReplicationTaskArn == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) +func (s *DeleteReplicationTaskAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationTaskAssessmentRunInput"} + if s.ReplicationTaskAssessmentRunArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskAssessmentRunArn")) } if invalidParams.Len() > 0 { @@ -6845,41 +7849,102 @@ func (s *DeleteReplicationTaskInput) Validate() error { return nil } -// SetReplicationTaskArn sets the ReplicationTaskArn field's value. -func (s *DeleteReplicationTaskInput) SetReplicationTaskArn(v string) *DeleteReplicationTaskInput { - s.ReplicationTaskArn = &v +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *DeleteReplicationTaskAssessmentRunInput) SetReplicationTaskAssessmentRunArn(v string) *DeleteReplicationTaskAssessmentRunInput { + s.ReplicationTaskAssessmentRunArn = &v return s } -type DeleteReplicationTaskOutput struct { +type DeleteReplicationTaskAssessmentRunOutput struct { _ struct{} `type:"structure"` - // The deleted replication task. - ReplicationTask *ReplicationTask `type:"structure"` + // The ReplicationTaskAssessmentRun object for the deleted assessment run. + ReplicationTaskAssessmentRun *ReplicationTaskAssessmentRun `type:"structure"` } // String returns the string representation -func (s DeleteReplicationTaskOutput) String() string { +func (s DeleteReplicationTaskAssessmentRunOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReplicationTaskOutput) GoString() string { +func (s DeleteReplicationTaskAssessmentRunOutput) GoString() string { return s.String() } -// SetReplicationTask sets the ReplicationTask field's value. -func (s *DeleteReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *DeleteReplicationTaskOutput { - s.ReplicationTask = v +// SetReplicationTaskAssessmentRun sets the ReplicationTaskAssessmentRun field's value. +func (s *DeleteReplicationTaskAssessmentRunOutput) SetReplicationTaskAssessmentRun(v *ReplicationTaskAssessmentRun) *DeleteReplicationTaskAssessmentRunOutput { + s.ReplicationTaskAssessmentRun = v return s } -type DescribeAccountAttributesInput struct { +type DeleteReplicationTaskInput struct { _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the replication task to be deleted. + // + // ReplicationTaskArn is a required field + ReplicationTaskArn *string `type:"string" required:"true"` } // String returns the string representation -func (s DescribeAccountAttributesInput) String() string { +func (s DeleteReplicationTaskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationTaskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationTaskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationTaskInput"} + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *DeleteReplicationTaskInput) SetReplicationTaskArn(v string) *DeleteReplicationTaskInput { + s.ReplicationTaskArn = &v + return s +} + +type DeleteReplicationTaskOutput struct { + _ struct{} `type:"structure"` + + // The deleted replication task. + ReplicationTask *ReplicationTask `type:"structure"` +} + +// String returns the string representation +func (s DeleteReplicationTaskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicationTaskOutput) GoString() string { + return s.String() +} + +// SetReplicationTask sets the ReplicationTask field's value. +func (s *DeleteReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *DeleteReplicationTaskOutput { + s.ReplicationTask = v + return s +} + +type DescribeAccountAttributesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeAccountAttributesInput) String() string { return awsutil.Prettify(s) } @@ -6929,10 +7994,136 @@ func (s *DescribeAccountAttributesOutput) SetUniqueAccountIdentifier(v string) * return s } +type DescribeApplicableIndividualAssessmentsInput struct { + _ struct{} `type:"structure"` + + // Optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // Maximum number of records to include in the response. If more records exist + // than the specified MaxRecords value, a pagination token called a marker is + // included in the response so that the remaining results can be retrieved. + MaxRecords *int64 `type:"integer"` + + // Name of the migration type that each provided individual assessment must + // support. + MigrationType *string `type:"string" enum:"MigrationTypeValue"` + + // ARN of a replication instance on which you want to base the default list + // of individual assessments. + ReplicationInstanceArn *string `type:"string"` + + // Amazon Resource Name (ARN) of a migration task on which you want to base + // the default list of individual assessments. + ReplicationTaskArn *string `type:"string"` + + // Name of a database engine that the specified replication instance supports + // as a source. + SourceEngineName *string `type:"string"` + + // Name of a database engine that the specified replication instance supports + // as a target. + TargetEngineName *string `type:"string"` +} + +// String returns the string representation +func (s DescribeApplicableIndividualAssessmentsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicableIndividualAssessmentsInput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetMarker(v string) *DescribeApplicableIndividualAssessmentsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetMaxRecords(v int64) *DescribeApplicableIndividualAssessmentsInput { + s.MaxRecords = &v + return s +} + +// SetMigrationType sets the MigrationType field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetMigrationType(v string) *DescribeApplicableIndividualAssessmentsInput { + s.MigrationType = &v + return s +} + +// SetReplicationInstanceArn sets the ReplicationInstanceArn field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetReplicationInstanceArn(v string) *DescribeApplicableIndividualAssessmentsInput { + s.ReplicationInstanceArn = &v + return s +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetReplicationTaskArn(v string) *DescribeApplicableIndividualAssessmentsInput { + s.ReplicationTaskArn = &v + return s +} + +// SetSourceEngineName sets the SourceEngineName field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetSourceEngineName(v string) *DescribeApplicableIndividualAssessmentsInput { + s.SourceEngineName = &v + return s +} + +// SetTargetEngineName sets the TargetEngineName field's value. +func (s *DescribeApplicableIndividualAssessmentsInput) SetTargetEngineName(v string) *DescribeApplicableIndividualAssessmentsInput { + s.TargetEngineName = &v + return s +} + +type DescribeApplicableIndividualAssessmentsOutput struct { + _ struct{} `type:"structure"` + + // List of names for the individual assessments supported by the premigration + // assessment run that you start based on the specified request parameters. + // For more information on the available individual assessments, including compatibility + // with different migration task configurations, see Working with premigration + // assessment runs (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.AssessmentReport.html) + // in the AWS Database Migration Service User Guide. + IndividualAssessmentNames []*string `type:"list"` + + // Pagination token returned for you to pass to a subsequent request. If you + // pass this token as the Marker value in a subsequent request, the response + // includes only records beyond the marker, up to the value specified in the + // request by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeApplicableIndividualAssessmentsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeApplicableIndividualAssessmentsOutput) GoString() string { + return s.String() +} + +// SetIndividualAssessmentNames sets the IndividualAssessmentNames field's value. +func (s *DescribeApplicableIndividualAssessmentsOutput) SetIndividualAssessmentNames(v []*string) *DescribeApplicableIndividualAssessmentsOutput { + s.IndividualAssessmentNames = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeApplicableIndividualAssessmentsOutput) SetMarker(v string) *DescribeApplicableIndividualAssessmentsOutput { + s.Marker = &v + return s +} + type DescribeCertificatesInput struct { _ struct{} `type:"structure"` - // Filters applied to the certificate described in the form of key-value pairs. + // Filters applied to the certificates described in the form of key-value pairs. Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -7137,7 +8328,7 @@ func (s *DescribeConnectionsOutput) SetMarker(v string) *DescribeConnectionsOutp type DescribeEndpointTypesInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to the endpoint types. // // Valid filter names: engine-name | endpoint-type Filters []*Filter `type:"list"` @@ -7242,7 +8433,7 @@ func (s *DescribeEndpointTypesOutput) SetSupportedEndpointTypes(v []*SupportedEn type DescribeEndpointsInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to the endpoints. // // Valid filter names: endpoint-arn | endpoint-type | endpoint-id | engine-name Filters []*Filter `type:"list"` @@ -7347,7 +8538,7 @@ func (s *DescribeEndpointsOutput) SetMarker(v string) *DescribeEndpointsOutput { type DescribeEventCategoriesInput struct { _ struct{} `type:"structure"` - // Filters applied to the action. + // Filters applied to the event categories. Filters []*Filter `type:"list"` // The type of AWS DMS resource that generates events. @@ -7424,7 +8615,7 @@ func (s *DescribeEventCategoriesOutput) SetEventCategoryGroupList(v []*EventCate type DescribeEventSubscriptionsInput struct { _ struct{} `type:"structure"` - // Filters applied to the action. + // Filters applied to event subscriptions. Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -7545,7 +8736,7 @@ type DescribeEventsInput struct { // A list of event categories for the source type that you've chosen. EventCategories []*string `type:"list"` - // Filters applied to the action. + // Filters applied to events. Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -8049,7 +9240,7 @@ func (s *DescribeReplicationInstanceTaskLogsOutput) SetReplicationInstanceTaskLo type DescribeReplicationInstancesInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to replication instances. // // Valid filter names: replication-instance-arn | replication-instance-id | // replication-instance-class | engine-version @@ -8155,7 +9346,9 @@ func (s *DescribeReplicationInstancesOutput) SetReplicationInstances(v []*Replic type DescribeReplicationSubnetGroupsInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to replication subnet groups. + // + // Valid filter names: replication-subnet-group-id Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -8349,13 +9542,14 @@ func (s *DescribeReplicationTaskAssessmentResultsOutput) SetReplicationTaskAsses return s } -type DescribeReplicationTasksInput struct { +type DescribeReplicationTaskAssessmentRunsInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe action. + // Filters applied to the premigration assessment runs described in the form + // of key-value pairs. // - // Valid filter names: replication-task-arn | replication-task-id | migration-type - // | endpoint-arn | replication-instance-arn + // Valid filter names: replication-task-assessment-run-arn, replication-task-arn, + // replication-instance-arn, status Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -8366,31 +9560,22 @@ type DescribeReplicationTasksInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker // is included in the response so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - - // An option to set to avoid returning information about settings. Use this - // to reduce overhead when setting information is too large. To use this option, - // choose true; otherwise, choose false (the default). - WithoutSettings *bool `type:"boolean"` } // String returns the string representation -func (s DescribeReplicationTasksInput) String() string { +func (s DescribeReplicationTaskAssessmentRunsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReplicationTasksInput) GoString() string { +func (s DescribeReplicationTaskAssessmentRunsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeReplicationTasksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTasksInput"} +func (s *DescribeReplicationTaskAssessmentRunsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTaskAssessmentRunsInput"} if s.Filters != nil { for i, v := range s.Filters { if v == nil { @@ -8409,70 +9594,67 @@ func (s *DescribeReplicationTasksInput) Validate() error { } // SetFilters sets the Filters field's value. -func (s *DescribeReplicationTasksInput) SetFilters(v []*Filter) *DescribeReplicationTasksInput { +func (s *DescribeReplicationTaskAssessmentRunsInput) SetFilters(v []*Filter) *DescribeReplicationTaskAssessmentRunsInput { s.Filters = v return s } // SetMarker sets the Marker field's value. -func (s *DescribeReplicationTasksInput) SetMarker(v string) *DescribeReplicationTasksInput { +func (s *DescribeReplicationTaskAssessmentRunsInput) SetMarker(v string) *DescribeReplicationTaskAssessmentRunsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeReplicationTasksInput) SetMaxRecords(v int64) *DescribeReplicationTasksInput { +func (s *DescribeReplicationTaskAssessmentRunsInput) SetMaxRecords(v int64) *DescribeReplicationTaskAssessmentRunsInput { s.MaxRecords = &v return s } -// SetWithoutSettings sets the WithoutSettings field's value. -func (s *DescribeReplicationTasksInput) SetWithoutSettings(v bool) *DescribeReplicationTasksInput { - s.WithoutSettings = &v - return s -} - -type DescribeReplicationTasksOutput struct { +type DescribeReplicationTaskAssessmentRunsOutput struct { _ struct{} `type:"structure"` - // An optional pagination token provided by a previous request. If this parameter - // is specified, the response includes only records beyond the marker, up to - // the value specified by MaxRecords. + // A pagination token returned for you to pass to a subsequent request. If you + // pass this token as the Marker value in a subsequent request, the response + // includes only records beyond the marker, up to the value specified in the + // request by MaxRecords. Marker *string `type:"string"` - // A description of the replication tasks. - ReplicationTasks []*ReplicationTask `type:"list"` + // One or more premigration assessment runs as specified by Filters. + ReplicationTaskAssessmentRuns []*ReplicationTaskAssessmentRun `type:"list"` } // String returns the string representation -func (s DescribeReplicationTasksOutput) String() string { +func (s DescribeReplicationTaskAssessmentRunsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReplicationTasksOutput) GoString() string { +func (s DescribeReplicationTaskAssessmentRunsOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. -func (s *DescribeReplicationTasksOutput) SetMarker(v string) *DescribeReplicationTasksOutput { +func (s *DescribeReplicationTaskAssessmentRunsOutput) SetMarker(v string) *DescribeReplicationTaskAssessmentRunsOutput { s.Marker = &v return s } -// SetReplicationTasks sets the ReplicationTasks field's value. -func (s *DescribeReplicationTasksOutput) SetReplicationTasks(v []*ReplicationTask) *DescribeReplicationTasksOutput { - s.ReplicationTasks = v +// SetReplicationTaskAssessmentRuns sets the ReplicationTaskAssessmentRuns field's value. +func (s *DescribeReplicationTaskAssessmentRunsOutput) SetReplicationTaskAssessmentRuns(v []*ReplicationTaskAssessmentRun) *DescribeReplicationTaskAssessmentRunsOutput { + s.ReplicationTaskAssessmentRuns = v return s } -type DescribeSchemasInput struct { +type DescribeReplicationTaskIndividualAssessmentsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + // Filters applied to the individual assessments described in the form of key-value + // pairs. // - // EndpointArn is a required field - EndpointArn *string `type:"string" required:"true"` + // Valid filter names: replication-task-assessment-run-arn, replication-task-arn, + // status + Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter // is specified, the response includes only records beyond the marker, up to @@ -8482,28 +9664,31 @@ type DescribeSchemasInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker // is included in the response so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` } // String returns the string representation -func (s DescribeSchemasInput) String() string { +func (s DescribeReplicationTaskIndividualAssessmentsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeSchemasInput) GoString() string { +func (s DescribeReplicationTaskIndividualAssessmentsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeSchemasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeSchemasInput"} - if s.EndpointArn == nil { - invalidParams.Add(request.NewErrParamRequired("EndpointArn")) +func (s *DescribeReplicationTaskIndividualAssessmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTaskIndividualAssessmentsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -8512,67 +9697,66 @@ func (s *DescribeSchemasInput) Validate() error { return nil } -// SetEndpointArn sets the EndpointArn field's value. -func (s *DescribeSchemasInput) SetEndpointArn(v string) *DescribeSchemasInput { - s.EndpointArn = &v +// SetFilters sets the Filters field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsInput) SetFilters(v []*Filter) *DescribeReplicationTaskIndividualAssessmentsInput { + s.Filters = v return s } // SetMarker sets the Marker field's value. -func (s *DescribeSchemasInput) SetMarker(v string) *DescribeSchemasInput { +func (s *DescribeReplicationTaskIndividualAssessmentsInput) SetMarker(v string) *DescribeReplicationTaskIndividualAssessmentsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeSchemasInput) SetMaxRecords(v int64) *DescribeSchemasInput { +func (s *DescribeReplicationTaskIndividualAssessmentsInput) SetMaxRecords(v int64) *DescribeReplicationTaskIndividualAssessmentsInput { s.MaxRecords = &v return s } -type DescribeSchemasOutput struct { +type DescribeReplicationTaskIndividualAssessmentsOutput struct { _ struct{} `type:"structure"` - // An optional pagination token provided by a previous request. If this parameter - // is specified, the response includes only records beyond the marker, up to - // the value specified by MaxRecords. + // A pagination token returned for you to pass to a subsequent request. If you + // pass this token as the Marker value in a subsequent request, the response + // includes only records beyond the marker, up to the value specified in the + // request by MaxRecords. Marker *string `type:"string"` - // The described schema. - Schemas []*string `type:"list"` + // One or more individual assessments as specified by Filters. + ReplicationTaskIndividualAssessments []*ReplicationTaskIndividualAssessment `type:"list"` } // String returns the string representation -func (s DescribeSchemasOutput) String() string { +func (s DescribeReplicationTaskIndividualAssessmentsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeSchemasOutput) GoString() string { +func (s DescribeReplicationTaskIndividualAssessmentsOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. -func (s *DescribeSchemasOutput) SetMarker(v string) *DescribeSchemasOutput { +func (s *DescribeReplicationTaskIndividualAssessmentsOutput) SetMarker(v string) *DescribeReplicationTaskIndividualAssessmentsOutput { s.Marker = &v return s } -// SetSchemas sets the Schemas field's value. -func (s *DescribeSchemasOutput) SetSchemas(v []*string) *DescribeSchemasOutput { - s.Schemas = v +// SetReplicationTaskIndividualAssessments sets the ReplicationTaskIndividualAssessments field's value. +func (s *DescribeReplicationTaskIndividualAssessmentsOutput) SetReplicationTaskIndividualAssessments(v []*ReplicationTaskIndividualAssessment) *DescribeReplicationTaskIndividualAssessmentsOutput { + s.ReplicationTaskIndividualAssessments = v return s } -type DescribeTableStatisticsInput struct { +type DescribeReplicationTasksInput struct { _ struct{} `type:"structure"` - // Filters applied to the describe table statistics action. - // - // Valid filter names: schema-name | table-name | table-state + // Filters applied to replication tasks. // - // A combination of filters creates an AND condition where each record matches - // all specified filters. + // Valid filter names: replication-task-arn | replication-task-id | migration-type + // | endpoint-arn | replication-instance-arn Filters []*Filter `type:"list"` // An optional pagination token provided by a previous request. If this parameter @@ -8586,31 +9770,28 @@ type DescribeTableStatisticsInput struct { // // Default: 100 // - // Constraints: Minimum 20, maximum 500. + // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The Amazon Resource Name (ARN) of the replication task. - // - // ReplicationTaskArn is a required field - ReplicationTaskArn *string `type:"string" required:"true"` + // An option to set to avoid returning information about settings. Use this + // to reduce overhead when setting information is too large. To use this option, + // choose true; otherwise, choose false (the default). + WithoutSettings *bool `type:"boolean"` } // String returns the string representation -func (s DescribeTableStatisticsInput) String() string { +func (s DescribeReplicationTasksInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTableStatisticsInput) GoString() string { +func (s DescribeReplicationTasksInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTableStatisticsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTableStatisticsInput"} - if s.ReplicationTaskArn == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) - } +func (s *DescribeReplicationTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReplicationTasksInput"} if s.Filters != nil { for i, v := range s.Filters { if v == nil { @@ -8629,28 +9810,248 @@ func (s *DescribeTableStatisticsInput) Validate() error { } // SetFilters sets the Filters field's value. -func (s *DescribeTableStatisticsInput) SetFilters(v []*Filter) *DescribeTableStatisticsInput { +func (s *DescribeReplicationTasksInput) SetFilters(v []*Filter) *DescribeReplicationTasksInput { s.Filters = v return s } // SetMarker sets the Marker field's value. -func (s *DescribeTableStatisticsInput) SetMarker(v string) *DescribeTableStatisticsInput { +func (s *DescribeReplicationTasksInput) SetMarker(v string) *DescribeReplicationTasksInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeTableStatisticsInput) SetMaxRecords(v int64) *DescribeTableStatisticsInput { +func (s *DescribeReplicationTasksInput) SetMaxRecords(v int64) *DescribeReplicationTasksInput { s.MaxRecords = &v return s } -// SetReplicationTaskArn sets the ReplicationTaskArn field's value. -func (s *DescribeTableStatisticsInput) SetReplicationTaskArn(v string) *DescribeTableStatisticsInput { - s.ReplicationTaskArn = &v - return s -} +// SetWithoutSettings sets the WithoutSettings field's value. +func (s *DescribeReplicationTasksInput) SetWithoutSettings(v bool) *DescribeReplicationTasksInput { + s.WithoutSettings = &v + return s +} + +type DescribeReplicationTasksOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // A description of the replication tasks. + ReplicationTasks []*ReplicationTask `type:"list"` +} + +// String returns the string representation +func (s DescribeReplicationTasksOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReplicationTasksOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeReplicationTasksOutput) SetMarker(v string) *DescribeReplicationTasksOutput { + s.Marker = &v + return s +} + +// SetReplicationTasks sets the ReplicationTasks field's value. +func (s *DescribeReplicationTasksOutput) SetReplicationTasks(v []*ReplicationTask) *DescribeReplicationTasksOutput { + s.ReplicationTasks = v + return s +} + +type DescribeSchemasInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. + // + // EndpointArn is a required field + EndpointArn *string `type:"string" required:"true"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeSchemasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSchemasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSchemasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSchemasInput"} + if s.EndpointArn == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEndpointArn sets the EndpointArn field's value. +func (s *DescribeSchemasInput) SetEndpointArn(v string) *DescribeSchemasInput { + s.EndpointArn = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeSchemasInput) SetMarker(v string) *DescribeSchemasInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeSchemasInput) SetMaxRecords(v int64) *DescribeSchemasInput { + s.MaxRecords = &v + return s +} + +type DescribeSchemasOutput struct { + _ struct{} `type:"structure"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The described schema. + Schemas []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeSchemasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSchemasOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeSchemasOutput) SetMarker(v string) *DescribeSchemasOutput { + s.Marker = &v + return s +} + +// SetSchemas sets the Schemas field's value. +func (s *DescribeSchemasOutput) SetSchemas(v []*string) *DescribeSchemasOutput { + s.Schemas = v + return s +} + +type DescribeTableStatisticsInput struct { + _ struct{} `type:"structure"` + + // Filters applied to table statistics. + // + // Valid filter names: schema-name | table-name | table-state + // + // A combination of filters creates an AND condition where each record matches + // all specified filters. + Filters []*Filter `type:"list"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 500. + MaxRecords *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the replication task. + // + // ReplicationTaskArn is a required field + ReplicationTaskArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTableStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableStatisticsInput"} + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeTableStatisticsInput) SetFilters(v []*Filter) *DescribeTableStatisticsInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeTableStatisticsInput) SetMarker(v string) *DescribeTableStatisticsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeTableStatisticsInput) SetMaxRecords(v int64) *DescribeTableStatisticsInput { + s.MaxRecords = &v + return s +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *DescribeTableStatisticsInput) SetReplicationTaskArn(v string) *DescribeTableStatisticsInput { + s.ReplicationTaskArn = &v + return s +} type DescribeTableStatisticsOutput struct { _ struct{} `type:"structure"` @@ -8772,7 +10173,8 @@ func (s *DynamoDbSettings) SetServiceAccessRoleArn(v string) *DynamoDbSettings { type ElasticsearchSettings struct { _ struct{} `type:"structure"` - // The endpoint for the Elasticsearch cluster. + // The endpoint for the Elasticsearch cluster. AWS DMS uses HTTPS if a transport + // protocol (http/https) is not specified. // // EndpointUri is a required field EndpointUri *string `type:"string" required:"true"` @@ -8783,6 +10185,11 @@ type ElasticsearchSettings struct { // The maximum percentage of records that can fail to be written before a full // load operation stops. + // + // To avoid early failure, this counter is only effective after 1000 records + // are transferred. Elasticsearch also has the concept of error monitoring during + // the last 10 minutes of an Observation Window. If transfer of all records + // fail in the last 10 minutes, the full load operation stops. FullLoadErrorPercentage *int64 `type:"integer"` // The Amazon Resource Name (ARN) used by service to access the IAM role. @@ -8879,7 +10286,7 @@ type Endpoint struct { // "BucketName": "string", "CompressionType": "none"|"gzip" } DmsTransferSettings *DmsTransferSettings `type:"structure"` - // The settings for the target DynamoDB database. For more information, see + // The settings for the DynamoDB target endpoint. For more information, see // the DynamoDBSettings structure. DynamoDbSettings *DynamoDbSettings `type:"structure"` @@ -8905,7 +10312,7 @@ type Endpoint struct { // The database engine name. Valid values, depending on the EndpointType, include // "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", // "redshift", "s3", "db2", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", - // "kafka", "elasticsearch", "documentdb", and "sqlserver". + // "kafka", "elasticsearch", "documentdb", "sqlserver", and "neptune". EngineName *string `type:"string"` // Value returned by a call to CreateEndpoint that can be used for cross-account @@ -8919,6 +10326,10 @@ type Endpoint struct { // Additional connection attributes used to connect to the endpoint. ExtraConnectionAttributes *string `type:"string"` + // The settings for the IBM Db2 LUW source endpoint. For more information, see + // the IBMDb2Settings structure. + IBMDb2Settings *IBMDb2Settings `type:"structure"` + // The settings for the Apache Kafka target endpoint. For more information, // see the KafkaSettings structure. KafkaSettings *KafkaSettings `type:"structure"` @@ -8937,13 +10348,33 @@ type Endpoint struct { // account has a different default encryption key for each AWS Region. KmsKeyId *string `type:"string"` + // The settings for the Microsoft SQL Server source and target endpoint. For + // more information, see the MicrosoftSQLServerSettings structure. + MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` + // The settings for the MongoDB source endpoint. For more information, see the // MongoDbSettings structure. MongoDbSettings *MongoDbSettings `type:"structure"` + // The settings for the MySQL source and target endpoint. For more information, + // see the MySQLSettings structure. + MySQLSettings *MySQLSettings `type:"structure"` + + // The settings for the Amazon Neptune target endpoint. For more information, + // see the NeptuneSettings structure. + NeptuneSettings *NeptuneSettings `type:"structure"` + + // The settings for the Oracle source and target endpoint. For more information, + // see the OracleSettings structure. + OracleSettings *OracleSettings `type:"structure"` + // The port value used to access the endpoint. Port *int64 `type:"integer"` + // The settings for the PostgreSQL source and target endpoint. For more information, + // see the PostgreSQLSettings structure. + PostgreSQLSettings *PostgreSQLSettings `type:"structure"` + // Settings for the Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` @@ -8963,6 +10394,10 @@ type Endpoint struct { // The status of the endpoint. Status *string `type:"string"` + // The settings for the SAP ASE source and target endpoint. For more information, + // see the SybaseSettings structure. + SybaseSettings *SybaseSettings `type:"structure"` + // The user name used to connect to the endpoint. Username *string `type:"string"` } @@ -9055,6 +10490,12 @@ func (s *Endpoint) SetExtraConnectionAttributes(v string) *Endpoint { return s } +// SetIBMDb2Settings sets the IBMDb2Settings field's value. +func (s *Endpoint) SetIBMDb2Settings(v *IBMDb2Settings) *Endpoint { + s.IBMDb2Settings = v + return s +} + // SetKafkaSettings sets the KafkaSettings field's value. func (s *Endpoint) SetKafkaSettings(v *KafkaSettings) *Endpoint { s.KafkaSettings = v @@ -9073,18 +10514,48 @@ func (s *Endpoint) SetKmsKeyId(v string) *Endpoint { return s } +// SetMicrosoftSQLServerSettings sets the MicrosoftSQLServerSettings field's value. +func (s *Endpoint) SetMicrosoftSQLServerSettings(v *MicrosoftSQLServerSettings) *Endpoint { + s.MicrosoftSQLServerSettings = v + return s +} + // SetMongoDbSettings sets the MongoDbSettings field's value. func (s *Endpoint) SetMongoDbSettings(v *MongoDbSettings) *Endpoint { s.MongoDbSettings = v return s } +// SetMySQLSettings sets the MySQLSettings field's value. +func (s *Endpoint) SetMySQLSettings(v *MySQLSettings) *Endpoint { + s.MySQLSettings = v + return s +} + +// SetNeptuneSettings sets the NeptuneSettings field's value. +func (s *Endpoint) SetNeptuneSettings(v *NeptuneSettings) *Endpoint { + s.NeptuneSettings = v + return s +} + +// SetOracleSettings sets the OracleSettings field's value. +func (s *Endpoint) SetOracleSettings(v *OracleSettings) *Endpoint { + s.OracleSettings = v + return s +} + // SetPort sets the Port field's value. func (s *Endpoint) SetPort(v int64) *Endpoint { s.Port = &v return s } +// SetPostgreSQLSettings sets the PostgreSQLSettings field's value. +func (s *Endpoint) SetPostgreSQLSettings(v *PostgreSQLSettings) *Endpoint { + s.PostgreSQLSettings = v + return s +} + // SetRedshiftSettings sets the RedshiftSettings field's value. func (s *Endpoint) SetRedshiftSettings(v *RedshiftSettings) *Endpoint { s.RedshiftSettings = v @@ -9121,6 +10592,12 @@ func (s *Endpoint) SetStatus(v string) *Endpoint { return s } +// SetSybaseSettings sets the SybaseSettings field's value. +func (s *Endpoint) SetSybaseSettings(v *SybaseSettings) *Endpoint { + s.SybaseSettings = v + return s +} + // SetUsername sets the Username field's value. func (s *Endpoint) SetUsername(v string) *Endpoint { s.Username = &v @@ -9192,7 +10669,9 @@ func (s *Event) SetSourceType(v string) *Event { } // Lists categories of events subscribed to, and generated by, the applicable -// AWS DMS resource type. +// AWS DMS resource type. This data type appears in response to the DescribeEventCategories +// (https://docs.aws.amazon.com/dms/latest/APIReference/API_EventCategoryGroup.html) +// action. type EventCategoryGroup struct { _ struct{} `type:"structure"` @@ -9269,7 +10748,7 @@ type EventSubscription struct { // topic was deleted after the subscription was created. Status *string `type:"string"` - // The time the RDS event notification subscription was created. + // The time the AWS DMS event notification subscription was created. SubscriptionCreationTime *string `type:"string"` } @@ -9337,17 +10816,20 @@ func (s *EventSubscription) SetSubscriptionCreationTime(v string) *EventSubscrip return s } -// Identifies the name and value of a source filter object used to limit the -// number and type of records transferred from your source to your target. +// Identifies the name and value of a filter object. This filter is used to +// limit the number and type of AWS DMS objects that are returned for a particular +// Describe* call or similar operation. Filters are used as an optional parameter +// to the following APIs. type Filter struct { _ struct{} `type:"structure"` - // The name of the filter. + // The name of the filter as specified for a Describe* or similar operation. // // Name is a required field Name *string `type:"string" required:"true"` - // The filter value. + // The filter value, which can specify one or more values used to narrow the + // returned results. // // Values is a required field Values []*string `type:"list" required:"true"` @@ -9391,11 +10873,99 @@ func (s *Filter) SetValues(v []*string) *Filter { return s } -type ImportCertificateInput struct { +// Provides information that defines an IBM Db2 LUW endpoint. +type IBMDb2Settings struct { _ struct{} `type:"structure"` - // A customer-assigned name for the certificate. Identifiers must begin with - // a letter and must contain only ASCII letters, digits, and hyphens. They can't + // For ongoing replication (CDC), use CurrentLSN to specify a log sequence number + // (LSN) where you want the replication to start. + CurrentLsn *string `type:"string"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Maximum number of bytes per read, as a NUMBER value. The default is 64 KB. + MaxKBytesPerRead *int64 `type:"integer"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Enables ongoing replication (CDC) as a BOOLEAN value. The default is true. + SetDataCaptureChanges *bool `type:"boolean"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s IBMDb2Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IBMDb2Settings) GoString() string { + return s.String() +} + +// SetCurrentLsn sets the CurrentLsn field's value. +func (s *IBMDb2Settings) SetCurrentLsn(v string) *IBMDb2Settings { + s.CurrentLsn = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *IBMDb2Settings) SetDatabaseName(v string) *IBMDb2Settings { + s.DatabaseName = &v + return s +} + +// SetMaxKBytesPerRead sets the MaxKBytesPerRead field's value. +func (s *IBMDb2Settings) SetMaxKBytesPerRead(v int64) *IBMDb2Settings { + s.MaxKBytesPerRead = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *IBMDb2Settings) SetPassword(v string) *IBMDb2Settings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *IBMDb2Settings) SetPort(v int64) *IBMDb2Settings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *IBMDb2Settings) SetServerName(v string) *IBMDb2Settings { + s.ServerName = &v + return s +} + +// SetSetDataCaptureChanges sets the SetDataCaptureChanges field's value. +func (s *IBMDb2Settings) SetSetDataCaptureChanges(v bool) *IBMDb2Settings { + s.SetDataCaptureChanges = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *IBMDb2Settings) SetUsername(v string) *IBMDb2Settings { + s.Username = &v + return s +} + +type ImportCertificateInput struct { + _ struct{} `type:"structure"` + + // A customer-assigned name for the certificate. Identifiers must begin with + // a letter and must contain only ASCII letters, digits, and hyphens. They can't // end with a hyphen or contain two consecutive hyphens. // // CertificateIdentifier is a required field @@ -9485,8 +11055,8 @@ func (s *ImportCertificateOutput) SetCertificate(v *Certificate) *ImportCertific // There are not enough resources allocated to the database migration. type InsufficientResourceCapacityFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9503,17 +11073,17 @@ func (s InsufficientResourceCapacityFault) GoString() string { func newErrorInsufficientResourceCapacityFault(v protocol.ResponseMetadata) error { return &InsufficientResourceCapacityFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientResourceCapacityFault) Code() string { +func (s *InsufficientResourceCapacityFault) Code() string { return "InsufficientResourceCapacityFault" } // Message returns the exception's message. -func (s InsufficientResourceCapacityFault) Message() string { +func (s *InsufficientResourceCapacityFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9521,28 +11091,28 @@ func (s InsufficientResourceCapacityFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientResourceCapacityFault) OrigErr() error { +func (s *InsufficientResourceCapacityFault) OrigErr() error { return nil } -func (s InsufficientResourceCapacityFault) Error() string { +func (s *InsufficientResourceCapacityFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientResourceCapacityFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientResourceCapacityFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientResourceCapacityFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientResourceCapacityFault) RequestID() string { + return s.RespMetadata.RequestID } // The certificate was not valid. type InvalidCertificateFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9559,17 +11129,17 @@ func (s InvalidCertificateFault) GoString() string { func newErrorInvalidCertificateFault(v protocol.ResponseMetadata) error { return &InvalidCertificateFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCertificateFault) Code() string { +func (s *InvalidCertificateFault) Code() string { return "InvalidCertificateFault" } // Message returns the exception's message. -func (s InvalidCertificateFault) Message() string { +func (s *InvalidCertificateFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9577,29 +11147,29 @@ func (s InvalidCertificateFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCertificateFault) OrigErr() error { +func (s *InvalidCertificateFault) OrigErr() error { return nil } -func (s InvalidCertificateFault) Error() string { +func (s *InvalidCertificateFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCertificateFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCertificateFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCertificateFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCertificateFault) RequestID() string { + return s.RespMetadata.RequestID } // The resource is in a state that prevents it from being used for database // migration. type InvalidResourceStateFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9616,17 +11186,17 @@ func (s InvalidResourceStateFault) GoString() string { func newErrorInvalidResourceStateFault(v protocol.ResponseMetadata) error { return &InvalidResourceStateFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceStateFault) Code() string { +func (s *InvalidResourceStateFault) Code() string { return "InvalidResourceStateFault" } // Message returns the exception's message. -func (s InvalidResourceStateFault) Message() string { +func (s *InvalidResourceStateFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9634,28 +11204,28 @@ func (s InvalidResourceStateFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceStateFault) OrigErr() error { +func (s *InvalidResourceStateFault) OrigErr() error { return nil } -func (s InvalidResourceStateFault) Error() string { +func (s *InvalidResourceStateFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceStateFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceStateFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceStateFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceStateFault) RequestID() string { + return s.RespMetadata.RequestID } // The subnet provided is invalid. type InvalidSubnet struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9672,17 +11242,17 @@ func (s InvalidSubnet) GoString() string { func newErrorInvalidSubnet(v protocol.ResponseMetadata) error { return &InvalidSubnet{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSubnet) Code() string { +func (s *InvalidSubnet) Code() string { return "InvalidSubnet" } // Message returns the exception's message. -func (s InvalidSubnet) Message() string { +func (s *InvalidSubnet) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9690,29 +11260,29 @@ func (s InvalidSubnet) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSubnet) OrigErr() error { +func (s *InvalidSubnet) OrigErr() error { return nil } -func (s InvalidSubnet) Error() string { +func (s *InvalidSubnet) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSubnet) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSubnet) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSubnet) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSubnet) RequestID() string { + return s.RespMetadata.RequestID } // The ciphertext references a key that doesn't exist or that the DMS account // doesn't have access to. type KMSAccessDeniedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9729,17 +11299,17 @@ func (s KMSAccessDeniedFault) GoString() string { func newErrorKMSAccessDeniedFault(v protocol.ResponseMetadata) error { return &KMSAccessDeniedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSAccessDeniedFault) Code() string { +func (s *KMSAccessDeniedFault) Code() string { return "KMSAccessDeniedFault" } // Message returns the exception's message. -func (s KMSAccessDeniedFault) Message() string { +func (s *KMSAccessDeniedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9747,28 +11317,28 @@ func (s KMSAccessDeniedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSAccessDeniedFault) OrigErr() error { +func (s *KMSAccessDeniedFault) OrigErr() error { return nil } -func (s KMSAccessDeniedFault) Error() string { +func (s *KMSAccessDeniedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSAccessDeniedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSAccessDeniedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSAccessDeniedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSAccessDeniedFault) RequestID() string { + return s.RespMetadata.RequestID } // The specified master key (CMK) isn't enabled. type KMSDisabledFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9785,17 +11355,74 @@ func (s KMSDisabledFault) GoString() string { func newErrorKMSDisabledFault(v protocol.ResponseMetadata) error { return &KMSDisabledFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSDisabledFault) Code() string { +func (s *KMSDisabledFault) Code() string { return "KMSDisabledFault" } // Message returns the exception's message. -func (s KMSDisabledFault) Message() string { +func (s *KMSDisabledFault) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *KMSDisabledFault) OrigErr() error { + return nil +} + +func (s *KMSDisabledFault) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *KMSDisabledFault) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *KMSDisabledFault) RequestID() string { + return s.RespMetadata.RequestID +} + +// An AWS Key Management Service (AWS KMS) error is preventing access to AWS +// KMS. +type KMSFault struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s KMSFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KMSFault) GoString() string { + return s.String() +} + +func newErrorKMSFault(v protocol.ResponseMetadata) error { + return &KMSFault{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *KMSFault) Code() string { + return "KMSFault" +} + +// Message returns the exception's message. +func (s *KMSFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9803,28 +11430,28 @@ func (s KMSDisabledFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSDisabledFault) OrigErr() error { +func (s *KMSFault) OrigErr() error { return nil } -func (s KMSDisabledFault) Error() string { +func (s *KMSFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSDisabledFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSDisabledFault) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSFault) RequestID() string { + return s.RespMetadata.RequestID } // The state of the specified AWS KMS resource isn't valid for this request. type KMSInvalidStateFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9841,17 +11468,17 @@ func (s KMSInvalidStateFault) GoString() string { func newErrorKMSInvalidStateFault(v protocol.ResponseMetadata) error { return &KMSInvalidStateFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSInvalidStateFault) Code() string { +func (s *KMSInvalidStateFault) Code() string { return "KMSInvalidStateFault" } // Message returns the exception's message. -func (s KMSInvalidStateFault) Message() string { +func (s *KMSInvalidStateFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9859,28 +11486,28 @@ func (s KMSInvalidStateFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSInvalidStateFault) OrigErr() error { +func (s *KMSInvalidStateFault) OrigErr() error { return nil } -func (s KMSInvalidStateFault) Error() string { +func (s *KMSInvalidStateFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSInvalidStateFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSInvalidStateFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSInvalidStateFault) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSInvalidStateFault) RequestID() string { + return s.RespMetadata.RequestID } // AWS DMS cannot access the AWS KMS key. type KMSKeyNotAccessibleFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9897,17 +11524,17 @@ func (s KMSKeyNotAccessibleFault) GoString() string { func newErrorKMSKeyNotAccessibleFault(v protocol.ResponseMetadata) error { return &KMSKeyNotAccessibleFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSKeyNotAccessibleFault) Code() string { +func (s *KMSKeyNotAccessibleFault) Code() string { return "KMSKeyNotAccessibleFault" } // Message returns the exception's message. -func (s KMSKeyNotAccessibleFault) Message() string { +func (s *KMSKeyNotAccessibleFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9915,28 +11542,28 @@ func (s KMSKeyNotAccessibleFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSKeyNotAccessibleFault) OrigErr() error { +func (s *KMSKeyNotAccessibleFault) OrigErr() error { return nil } -func (s KMSKeyNotAccessibleFault) Error() string { +func (s *KMSKeyNotAccessibleFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSKeyNotAccessibleFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSKeyNotAccessibleFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSKeyNotAccessibleFault) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSKeyNotAccessibleFault) RequestID() string { + return s.RespMetadata.RequestID } // The specified AWS KMS entity or resource can't be found. type KMSNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9953,17 +11580,17 @@ func (s KMSNotFoundFault) GoString() string { func newErrorKMSNotFoundFault(v protocol.ResponseMetadata) error { return &KMSNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSNotFoundFault) Code() string { +func (s *KMSNotFoundFault) Code() string { return "KMSNotFoundFault" } // Message returns the exception's message. -func (s KMSNotFoundFault) Message() string { +func (s *KMSNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9971,28 +11598,28 @@ func (s KMSNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSNotFoundFault) OrigErr() error { +func (s *KMSNotFoundFault) OrigErr() error { return nil } -func (s KMSNotFoundFault) Error() string { +func (s *KMSNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // This request triggered AWS KMS request throttling. type KMSThrottlingFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10009,17 +11636,17 @@ func (s KMSThrottlingFault) GoString() string { func newErrorKMSThrottlingFault(v protocol.ResponseMetadata) error { return &KMSThrottlingFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSThrottlingFault) Code() string { +func (s *KMSThrottlingFault) Code() string { return "KMSThrottlingFault" } // Message returns the exception's message. -func (s KMSThrottlingFault) Message() string { +func (s *KMSThrottlingFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10027,22 +11654,22 @@ func (s KMSThrottlingFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSThrottlingFault) OrigErr() error { +func (s *KMSThrottlingFault) OrigErr() error { return nil } -func (s KMSThrottlingFault) Error() string { +func (s *KMSThrottlingFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSThrottlingFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSThrottlingFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSThrottlingFault) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSThrottlingFault) RequestID() string { + return s.RespMetadata.RequestID } // Provides information that describes an Apache Kafka endpoint. This information @@ -10056,6 +11683,46 @@ type KafkaSettings struct { // "ec2-12-345-678-901.compute-1.amazonaws.com:2345". Broker *string `type:"string"` + // Shows detailed control information for table definition, column definition, + // and table and column changes in the Kafka message output. The default is + // false. + IncludeControlDetails *bool `type:"boolean"` + + // Include NULL and empty columns for records migrated to the endpoint. The + // default is false. + IncludeNullAndEmpty *bool `type:"boolean"` + + // Shows the partition value within the Kafka message output, unless the partition + // type is schema-table-type. The default is false. + IncludePartitionValue *bool `type:"boolean"` + + // Includes any data definition language (DDL) operations that change the table + // in the control data, such as rename-table, drop-table, add-column, drop-column, + // and rename-column. The default is false. + IncludeTableAlterOperations *bool `type:"boolean"` + + // Provides detailed transaction information from the source database. This + // information includes a commit timestamp, a log position, and values for transaction_id, + // previous transaction_id, and transaction_record_id (the record offset within + // a transaction). The default is false. + IncludeTransactionDetails *bool `type:"boolean"` + + // The output format for the records created on the endpoint. The message format + // is JSON (default) or JSON_UNFORMATTED (a single line with no tab). + MessageFormat *string `type:"string" enum:"MessageFormatValue"` + + // The maximum size in bytes for records created on the endpoint The default + // is 1,000,000. + MessageMaxBytes *int64 `type:"integer"` + + // Prefixes schema and table names to partition values, when the partition type + // is primary-key-type. Doing this increases data distribution among Kafka partitions. + // For example, suppose that a SysBench schema has thousands of tables and each + // table has only limited range for a primary key. In this case, the same primary + // key is sent from thousands of tables to the same partition, which causes + // throttling. The default is false. + PartitionIncludeSchemaTable *bool `type:"boolean"` + // The topic to which you migrate the data. If you don't specify a topic, AWS // DMS specifies "kafka-default-topic" as the migration topic. Topic *string `type:"string"` @@ -10077,6 +11744,54 @@ func (s *KafkaSettings) SetBroker(v string) *KafkaSettings { return s } +// SetIncludeControlDetails sets the IncludeControlDetails field's value. +func (s *KafkaSettings) SetIncludeControlDetails(v bool) *KafkaSettings { + s.IncludeControlDetails = &v + return s +} + +// SetIncludeNullAndEmpty sets the IncludeNullAndEmpty field's value. +func (s *KafkaSettings) SetIncludeNullAndEmpty(v bool) *KafkaSettings { + s.IncludeNullAndEmpty = &v + return s +} + +// SetIncludePartitionValue sets the IncludePartitionValue field's value. +func (s *KafkaSettings) SetIncludePartitionValue(v bool) *KafkaSettings { + s.IncludePartitionValue = &v + return s +} + +// SetIncludeTableAlterOperations sets the IncludeTableAlterOperations field's value. +func (s *KafkaSettings) SetIncludeTableAlterOperations(v bool) *KafkaSettings { + s.IncludeTableAlterOperations = &v + return s +} + +// SetIncludeTransactionDetails sets the IncludeTransactionDetails field's value. +func (s *KafkaSettings) SetIncludeTransactionDetails(v bool) *KafkaSettings { + s.IncludeTransactionDetails = &v + return s +} + +// SetMessageFormat sets the MessageFormat field's value. +func (s *KafkaSettings) SetMessageFormat(v string) *KafkaSettings { + s.MessageFormat = &v + return s +} + +// SetMessageMaxBytes sets the MessageMaxBytes field's value. +func (s *KafkaSettings) SetMessageMaxBytes(v int64) *KafkaSettings { + s.MessageMaxBytes = &v + return s +} + +// SetPartitionIncludeSchemaTable sets the PartitionIncludeSchemaTable field's value. +func (s *KafkaSettings) SetPartitionIncludeSchemaTable(v bool) *KafkaSettings { + s.PartitionIncludeSchemaTable = &v + return s +} + // SetTopic sets the Topic field's value. func (s *KafkaSettings) SetTopic(v string) *KafkaSettings { s.Topic = &v @@ -10091,22 +11806,26 @@ type KinesisSettings struct { // Shows detailed control information for table definition, column definition, // and table and column changes in the Kinesis message output. The default is - // False. + // false. IncludeControlDetails *bool `type:"boolean"` + // Include NULL and empty columns for records migrated to the endpoint. The + // default is false. + IncludeNullAndEmpty *bool `type:"boolean"` + // Shows the partition value within the Kinesis message output, unless the partition - // type is schema-table-type. The default is False. + // type is schema-table-type. The default is false. IncludePartitionValue *bool `type:"boolean"` // Includes any data definition language (DDL) operations that change the table // in the control data, such as rename-table, drop-table, add-column, drop-column, - // and rename-column. The default is False. + // and rename-column. The default is false. IncludeTableAlterOperations *bool `type:"boolean"` // Provides detailed transaction information from the source database. This // information includes a commit timestamp, a log position, and values for transaction_id, // previous transaction_id, and transaction_record_id (the record offset within - // a transaction). The default is False. + // a transaction). The default is false. IncludeTransactionDetails *bool `type:"boolean"` // The output format for the records created on the endpoint. The message format @@ -10118,7 +11837,7 @@ type KinesisSettings struct { // shards. For example, suppose that a SysBench schema has thousands of tables // and each table has only limited range for a primary key. In this case, the // same primary key is sent from thousands of tables to the same shard, which - // causes throttling. The default is False. + // causes throttling. The default is false. PartitionIncludeSchemaTable *bool `type:"boolean"` // The Amazon Resource Name (ARN) for the AWS Identity and Access Management @@ -10145,6 +11864,12 @@ func (s *KinesisSettings) SetIncludeControlDetails(v bool) *KinesisSettings { return s } +// SetIncludeNullAndEmpty sets the IncludeNullAndEmpty field's value. +func (s *KinesisSettings) SetIncludeNullAndEmpty(v bool) *KinesisSettings { + s.IncludeNullAndEmpty = &v + return s +} + // SetIncludePartitionValue sets the IncludePartitionValue field's value. func (s *KinesisSettings) SetIncludePartitionValue(v bool) *KinesisSettings { s.IncludePartitionValue = &v @@ -10249,6 +11974,134 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut return s } +// Provides information that defines a Microsoft SQL Server endpoint. +type MicrosoftSQLServerSettings struct { + _ struct{} `type:"structure"` + + // The maximum size of the packets (in bytes) used to transfer data using BCP. + BcpPacketSize *int64 `type:"integer"` + + // Specify a filegroup for the AWS DMS internal tables. When the replication + // task starts, all the internal AWS DMS control tables (awsdms_ apply_exception, + // awsdms_apply, awsdms_changes) are created on the specified filegroup. + ControlTablesFileGroup *string `type:"string"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // When this attribute is set to Y, AWS DMS only reads changes from transaction + // log backups and doesn't read from the active transaction log file during + // ongoing replication. Setting this parameter to Y enables you to control active + // transaction log file growth during full load and ongoing replication tasks. + // However, it can add some source latency to ongoing replication. + ReadBackupOnly *bool `type:"boolean"` + + // Use this attribute to minimize the need to access the backup log and enable + // AWS DMS to prevent truncation using one of the following two methods. + // + // Start transactions in the database: This is the default method. When this + // method is used, AWS DMS prevents TLOG truncation by mimicking a transaction + // in the database. As long as such a transaction is open, changes that appear + // after the transaction started aren't truncated. If you need Microsoft Replication + // to be enabled in your database, then you must choose this method. + // + // Exclusively use sp_repldone within a single task: When this method is used, + // AWS DMS reads the changes and then uses sp_repldone to mark the TLOG transactions + // as ready for truncation. Although this method doesn't involve any transactional + // activities, it can only be used when Microsoft Replication isn't running. + // Also, when using this method, only one AWS DMS task can access the database + // at any given time. Therefore, if you need to run parallel AWS DMS tasks against + // the same database, use the default method. + SafeguardPolicy *string `type:"string" enum:"SafeguardPolicy"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Use this to attribute to transfer data for full-load operations using BCP. + // When the target table contains an identity column that does not exist in + // the source table, you must disable the use BCP for loading table option. + UseBcpFullLoad *bool `type:"boolean"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s MicrosoftSQLServerSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MicrosoftSQLServerSettings) GoString() string { + return s.String() +} + +// SetBcpPacketSize sets the BcpPacketSize field's value. +func (s *MicrosoftSQLServerSettings) SetBcpPacketSize(v int64) *MicrosoftSQLServerSettings { + s.BcpPacketSize = &v + return s +} + +// SetControlTablesFileGroup sets the ControlTablesFileGroup field's value. +func (s *MicrosoftSQLServerSettings) SetControlTablesFileGroup(v string) *MicrosoftSQLServerSettings { + s.ControlTablesFileGroup = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *MicrosoftSQLServerSettings) SetDatabaseName(v string) *MicrosoftSQLServerSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *MicrosoftSQLServerSettings) SetPassword(v string) *MicrosoftSQLServerSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MicrosoftSQLServerSettings) SetPort(v int64) *MicrosoftSQLServerSettings { + s.Port = &v + return s +} + +// SetReadBackupOnly sets the ReadBackupOnly field's value. +func (s *MicrosoftSQLServerSettings) SetReadBackupOnly(v bool) *MicrosoftSQLServerSettings { + s.ReadBackupOnly = &v + return s +} + +// SetSafeguardPolicy sets the SafeguardPolicy field's value. +func (s *MicrosoftSQLServerSettings) SetSafeguardPolicy(v string) *MicrosoftSQLServerSettings { + s.SafeguardPolicy = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MicrosoftSQLServerSettings) SetServerName(v string) *MicrosoftSQLServerSettings { + s.ServerName = &v + return s +} + +// SetUseBcpFullLoad sets the UseBcpFullLoad field's value. +func (s *MicrosoftSQLServerSettings) SetUseBcpFullLoad(v bool) *MicrosoftSQLServerSettings { + s.UseBcpFullLoad = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *MicrosoftSQLServerSettings) SetUsername(v string) *MicrosoftSQLServerSettings { + s.Username = &v + return s +} + type ModifyEndpointInput struct { _ struct{} `type:"structure"` @@ -10287,7 +12140,7 @@ type ModifyEndpointInput struct { // Settings in JSON format for the target Elasticsearch endpoint. For more information // about the available settings, see Extra Connection Attributes When Using // Elasticsearch as a Target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration) - // in the AWS Database Migration User Guide. + // in the AWS Database Migration Service User Guide. ElasticsearchSettings *ElasticsearchSettings `type:"structure"` // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. @@ -10306,7 +12159,7 @@ type ModifyEndpointInput struct { // The type of engine for the endpoint. Valid values, depending on the EndpointType, // include "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", // "redshift", "s3", "db2", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", - // "kafka", "elasticsearch", "documentdb", and "sqlserver". + // "kafka", "elasticsearch", "documentdb", "sqlserver", and "neptune". EngineName *string `type:"string"` // The external table definition. @@ -10316,30 +12169,74 @@ type ModifyEndpointInput struct { // pass the empty string ("") as an argument. ExtraConnectionAttributes *string `type:"string"` - // Settings in JSON format for the target Apache Kafka endpoint. For information - // about other available settings, see Using Object Mapping to Migrate Data - // to Apache Kafka (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html#CHAP_Target.Kafka.ObjectMapping) - // in the AWS Database Migration User Guide. + // Settings in JSON format for the source IBM Db2 LUW endpoint. For information + // about other available settings, see Extra connection attributes when using + // Db2 LUW as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DB2.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + IBMDb2Settings *IBMDb2Settings `type:"structure"` + + // Settings in JSON format for the target Apache Kafka endpoint. For more information + // about the available settings, see Using Apache Kafka as a Target for AWS + // Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html) + // in the AWS Database Migration Service User Guide. KafkaSettings *KafkaSettings `type:"structure"` // Settings in JSON format for the target endpoint for Amazon Kinesis Data Streams. - // For information about other available settings, see Using Object Mapping - // to Migrate Data to a Kinesis Data Stream (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html#CHAP_Target.Kinesis.ObjectMapping) - // in the AWS Database Migration User Guide. + // For more information about the available settings, see Using Amazon Kinesis + // Data Streams as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html) + // in the AWS Database Migration Service User Guide. KinesisSettings *KinesisSettings `type:"structure"` + // Settings in JSON format for the source and target Microsoft SQL Server endpoint. + // For information about other available settings, see Extra connection attributes + // when using SQL Server as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SQLServer.ConnectionAttrib) + // and Extra connection attributes when using SQL Server as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SQLServer.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + MicrosoftSQLServerSettings *MicrosoftSQLServerSettings `type:"structure"` + // Settings in JSON format for the source MongoDB endpoint. For more information // about the available settings, see the configuration properties section in // Using MongoDB as a Target for AWS Database Migration Service (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html) // in the AWS Database Migration Service User Guide. MongoDbSettings *MongoDbSettings `type:"structure"` + // Settings in JSON format for the source and target MySQL endpoint. For information + // about other available settings, see Extra connection attributes when using + // MySQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MySQL.ConnectionAttrib) + // and Extra connection attributes when using a MySQL-compatible database as + // a target for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.MySQL.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + MySQLSettings *MySQLSettings `type:"structure"` + + // Settings in JSON format for the target Amazon Neptune endpoint. For more + // information about the available settings, see Specifying Endpoint Settings + // for Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings) + // in the AWS Database Migration Service User Guide. + NeptuneSettings *NeptuneSettings `type:"structure"` + + // Settings in JSON format for the source and target Oracle endpoint. For information + // about other available settings, see Extra connection attributes when using + // Oracle as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.ConnectionAttrib) + // and Extra connection attributes when using Oracle as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Oracle.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + OracleSettings *OracleSettings `type:"structure"` + // The password to be used to login to the endpoint database. Password *string `type:"string" sensitive:"true"` // The port used by the endpoint database. Port *int64 `type:"integer"` + // Settings in JSON format for the source and target PostgreSQL endpoint. For + // information about other available settings, see Extra connection attributes + // when using PostgreSQL as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.ConnectionAttrib) + // and Extra connection attributes when using PostgreSQL as a target for AWS + // DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.PostgreSQL.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + PostgreSQLSettings *PostgreSQLSettings `type:"structure"` + // Provides information that defines an Amazon Redshift endpoint. RedshiftSettings *RedshiftSettings `type:"structure"` @@ -10359,6 +12256,14 @@ type ModifyEndpointInput struct { // The SSL mode used to connect to the endpoint. The default value is none. SslMode *string `type:"string" enum:"DmsSslModeValue"` + // Settings in JSON format for the source and target SAP ASE endpoint. For information + // about other available settings, see Extra connection attributes when using + // SAP ASE as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.SAP.ConnectionAttrib) + // and Extra connection attributes when using SAP ASE as a target for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.SAP.ConnectionAttrib) + // in the AWS Database Migration Service User Guide. + SybaseSettings *SybaseSettings `type:"structure"` + // The user name to be used to login to the endpoint database. Username *string `type:"string"` } @@ -10389,6 +12294,11 @@ func (s *ModifyEndpointInput) Validate() error { invalidParams.AddNested("ElasticsearchSettings", err.(request.ErrInvalidParams)) } } + if s.NeptuneSettings != nil { + if err := s.NeptuneSettings.Validate(); err != nil { + invalidParams.AddNested("NeptuneSettings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10462,6 +12372,12 @@ func (s *ModifyEndpointInput) SetExtraConnectionAttributes(v string) *ModifyEndp return s } +// SetIBMDb2Settings sets the IBMDb2Settings field's value. +func (s *ModifyEndpointInput) SetIBMDb2Settings(v *IBMDb2Settings) *ModifyEndpointInput { + s.IBMDb2Settings = v + return s +} + // SetKafkaSettings sets the KafkaSettings field's value. func (s *ModifyEndpointInput) SetKafkaSettings(v *KafkaSettings) *ModifyEndpointInput { s.KafkaSettings = v @@ -10474,13 +12390,37 @@ func (s *ModifyEndpointInput) SetKinesisSettings(v *KinesisSettings) *ModifyEndp return s } +// SetMicrosoftSQLServerSettings sets the MicrosoftSQLServerSettings field's value. +func (s *ModifyEndpointInput) SetMicrosoftSQLServerSettings(v *MicrosoftSQLServerSettings) *ModifyEndpointInput { + s.MicrosoftSQLServerSettings = v + return s +} + // SetMongoDbSettings sets the MongoDbSettings field's value. func (s *ModifyEndpointInput) SetMongoDbSettings(v *MongoDbSettings) *ModifyEndpointInput { s.MongoDbSettings = v return s } -// SetPassword sets the Password field's value. +// SetMySQLSettings sets the MySQLSettings field's value. +func (s *ModifyEndpointInput) SetMySQLSettings(v *MySQLSettings) *ModifyEndpointInput { + s.MySQLSettings = v + return s +} + +// SetNeptuneSettings sets the NeptuneSettings field's value. +func (s *ModifyEndpointInput) SetNeptuneSettings(v *NeptuneSettings) *ModifyEndpointInput { + s.NeptuneSettings = v + return s +} + +// SetOracleSettings sets the OracleSettings field's value. +func (s *ModifyEndpointInput) SetOracleSettings(v *OracleSettings) *ModifyEndpointInput { + s.OracleSettings = v + return s +} + +// SetPassword sets the Password field's value. func (s *ModifyEndpointInput) SetPassword(v string) *ModifyEndpointInput { s.Password = &v return s @@ -10492,6 +12432,12 @@ func (s *ModifyEndpointInput) SetPort(v int64) *ModifyEndpointInput { return s } +// SetPostgreSQLSettings sets the PostgreSQLSettings field's value. +func (s *ModifyEndpointInput) SetPostgreSQLSettings(v *PostgreSQLSettings) *ModifyEndpointInput { + s.PostgreSQLSettings = v + return s +} + // SetRedshiftSettings sets the RedshiftSettings field's value. func (s *ModifyEndpointInput) SetRedshiftSettings(v *RedshiftSettings) *ModifyEndpointInput { s.RedshiftSettings = v @@ -10522,6 +12468,12 @@ func (s *ModifyEndpointInput) SetSslMode(v string) *ModifyEndpointInput { return s } +// SetSybaseSettings sets the SybaseSettings field's value. +func (s *ModifyEndpointInput) SetSybaseSettings(v *SybaseSettings) *ModifyEndpointInput { + s.SybaseSettings = v + return s +} + // SetUsername sets the Username field's value. func (s *ModifyEndpointInput) SetUsername(v string) *ModifyEndpointInput { s.Username = &v @@ -10689,6 +12641,9 @@ type ModifyReplicationInstanceInput struct { AutoMinorVersionUpgrade *bool `type:"boolean"` // The engine version number of the replication instance. + // + // When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade + // to true. EngineVersion *string `type:"string"` // Specifies whether the replication instance is a Multi-AZ deployment. You @@ -10717,10 +12672,13 @@ type ModifyReplicationInstanceInput struct { // ReplicationInstanceArn is a required field ReplicationInstanceArn *string `type:"string" required:"true"` - // The compute and memory capacity of the replication instance. + // The compute and memory capacity of the replication instance as defined for + // the specified replication instance class. For example to specify the instance + // class dms.c4.large, set this parameter to "dms.c4.large". // - // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large - // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + // For more information on the settings and capacities for the available replication + // instance classes, see Selecting the right AWS DMS replication instance for + // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` // The replication instance identifier. This parameter is stored as a lowercase @@ -10961,9 +12919,9 @@ type ModifyReplicationTaskInput struct { // Indicates when you want a change data capture (CDC) operation to stop. The // value can be either server time or commit time. // - // Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12” + // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 + // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 // “ CdcStopPosition *string `type:"string"` @@ -10979,14 +12937,14 @@ type ModifyReplicationTaskInput struct { // // Constraints: // - // * Must contain from 1 to 255 alphanumeric characters or hyphens. + // * Must contain 1-255 alphanumeric characters or hyphens. // // * First character must be a letter. // // * Cannot end with a hyphen or contain two consecutive hyphens. ReplicationTaskIdentifier *string `type:"string"` - // JSON file that contains settings for the task, such as target metadata settings. + // JSON file that contains settings for the task, such as task metadata settings. ReplicationTaskSettings *string `type:"string"` // When using the AWS CLI or boto3, provide the path of the JSON file that contains @@ -10994,6 +12952,12 @@ type ModifyReplicationTaskInput struct { // DMS API, provide the JSON as the parameter value, for example: --table-mappings // file://mappingfile.json TableMappings *string `type:"string"` + + // Supplemental information that the task requires to migrate the data for certain + // source and target endpoints. For more information, see Specifying Supplemental + // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) + // in the AWS Database Migration Service User Guide. + TaskData *string `type:"string"` } // String returns the string representation @@ -11067,6 +13031,12 @@ func (s *ModifyReplicationTaskInput) SetTableMappings(v string) *ModifyReplicati return s } +// SetTaskData sets the TaskData field's value. +func (s *ModifyReplicationTaskInput) SetTaskData(v string) *ModifyReplicationTaskInput { + s.TaskData = &v + return s +} + type ModifyReplicationTaskOutput struct { _ struct{} `type:"structure"` @@ -11084,157 +13054,772 @@ func (s ModifyReplicationTaskOutput) GoString() string { return s.String() } -// SetReplicationTask sets the ReplicationTask field's value. -func (s *ModifyReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *ModifyReplicationTaskOutput { - s.ReplicationTask = v +// SetReplicationTask sets the ReplicationTask field's value. +func (s *ModifyReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *ModifyReplicationTaskOutput { + s.ReplicationTask = v + return s +} + +// Provides information that defines a MongoDB endpoint. +type MongoDbSettings struct { + _ struct{} `type:"structure"` + + // The authentication mechanism you use to access the MongoDB source endpoint. + // + // For the default value, in MongoDB version 2.x, "default" is "mongodb_cr". + // For MongoDB version 3.x or later, "default" is "scram_sha_1". This setting + // isn't used when AuthType is set to "no". + AuthMechanism *string `type:"string" enum:"AuthMechanismValue"` + + // The MongoDB database name. This setting isn't used when AuthType is set to + // "no". + // + // The default is "admin". + AuthSource *string `type:"string"` + + // The authentication type you use to access the MongoDB source endpoint. + // + // When when set to "no", user name and password parameters are not used and + // can be empty. + AuthType *string `type:"string" enum:"AuthTypeValue"` + + // The database name on the MongoDB source endpoint. + DatabaseName *string `type:"string"` + + // Indicates the number of documents to preview to determine the document organization. + // Use this setting when NestingLevel is set to "one". + // + // Must be a positive value greater than 0. Default value is 1000. + DocsToInvestigate *string `type:"string"` + + // Specifies the document ID. Use this setting when NestingLevel is set to "none". + // + // Default value is "false". + ExtractDocId *string `type:"string"` + + // The AWS KMS key identifier that is used to encrypt the content on the replication + // instance. If you don't specify a value for the KmsKeyId parameter, then AWS + // DMS uses your default encryption key. AWS KMS creates the default encryption + // key for your AWS account. Your AWS account has a different default encryption + // key for each AWS Region. + KmsKeyId *string `type:"string"` + + // Specifies either document or table mode. + // + // Default value is "none". Specify "none" to use document mode. Specify "one" + // to use table mode. + NestingLevel *string `type:"string" enum:"NestingLevelValue"` + + // The password for the user account you use to access the MongoDB source endpoint. + Password *string `type:"string" sensitive:"true"` + + // The port value for the MongoDB source endpoint. + Port *int64 `type:"integer"` + + // The name of the server on the MongoDB source endpoint. + ServerName *string `type:"string"` + + // The user name you use to access the MongoDB source endpoint. + Username *string `type:"string"` +} + +// String returns the string representation +func (s MongoDbSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MongoDbSettings) GoString() string { + return s.String() +} + +// SetAuthMechanism sets the AuthMechanism field's value. +func (s *MongoDbSettings) SetAuthMechanism(v string) *MongoDbSettings { + s.AuthMechanism = &v + return s +} + +// SetAuthSource sets the AuthSource field's value. +func (s *MongoDbSettings) SetAuthSource(v string) *MongoDbSettings { + s.AuthSource = &v + return s +} + +// SetAuthType sets the AuthType field's value. +func (s *MongoDbSettings) SetAuthType(v string) *MongoDbSettings { + s.AuthType = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *MongoDbSettings) SetDatabaseName(v string) *MongoDbSettings { + s.DatabaseName = &v + return s +} + +// SetDocsToInvestigate sets the DocsToInvestigate field's value. +func (s *MongoDbSettings) SetDocsToInvestigate(v string) *MongoDbSettings { + s.DocsToInvestigate = &v + return s +} + +// SetExtractDocId sets the ExtractDocId field's value. +func (s *MongoDbSettings) SetExtractDocId(v string) *MongoDbSettings { + s.ExtractDocId = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *MongoDbSettings) SetKmsKeyId(v string) *MongoDbSettings { + s.KmsKeyId = &v + return s +} + +// SetNestingLevel sets the NestingLevel field's value. +func (s *MongoDbSettings) SetNestingLevel(v string) *MongoDbSettings { + s.NestingLevel = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *MongoDbSettings) SetPassword(v string) *MongoDbSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MongoDbSettings) SetPort(v int64) *MongoDbSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MongoDbSettings) SetServerName(v string) *MongoDbSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *MongoDbSettings) SetUsername(v string) *MongoDbSettings { + s.Username = &v + return s +} + +// Provides information that defines a MySQL endpoint. +type MySQLSettings struct { + _ struct{} `type:"structure"` + + // Specifies a script to run immediately after AWS DMS connects to the endpoint. + // The migration task continues running regardless if the SQL statement succeeds + // or fails. + AfterConnectScript *string `type:"string"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Specifies how often to check the binary log for new changes/events when the + // database is idle. + // + // Example: eventsPollInterval=5; + // + // In the example, AWS DMS checks for changes in the binary logs every five + // seconds. + EventsPollInterval *int64 `type:"integer"` + + // Specifies the maximum size (in KB) of any .csv file used to transfer data + // to a MySQL-compatible database. + // + // Example: maxFileSize=512 + MaxFileSize *int64 `type:"integer"` + + // Improves performance when loading data into the MySQLcompatible target database. + // Specifies how many threads to use to load the data into the MySQL-compatible + // target database. Setting a large number of threads can have an adverse effect + // on database performance, because a separate connection is required for each + // thread. + // + // Example: parallelLoadThreads=1 + ParallelLoadThreads *int64 `type:"integer"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Specifies the time zone for the source MySQL database. + // + // Example: serverTimezone=US/Pacific; + // + // Note: Do not enclose time zones in single quotes. + ServerTimezone *string `type:"string"` + + // Specifies where to migrate source tables on the target, either to a single + // database or multiple databases. + // + // Example: targetDbType=MULTIPLE_DATABASES + TargetDbType *string `type:"string" enum:"TargetDbType"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s MySQLSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MySQLSettings) GoString() string { + return s.String() +} + +// SetAfterConnectScript sets the AfterConnectScript field's value. +func (s *MySQLSettings) SetAfterConnectScript(v string) *MySQLSettings { + s.AfterConnectScript = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *MySQLSettings) SetDatabaseName(v string) *MySQLSettings { + s.DatabaseName = &v + return s +} + +// SetEventsPollInterval sets the EventsPollInterval field's value. +func (s *MySQLSettings) SetEventsPollInterval(v int64) *MySQLSettings { + s.EventsPollInterval = &v + return s +} + +// SetMaxFileSize sets the MaxFileSize field's value. +func (s *MySQLSettings) SetMaxFileSize(v int64) *MySQLSettings { + s.MaxFileSize = &v + return s +} + +// SetParallelLoadThreads sets the ParallelLoadThreads field's value. +func (s *MySQLSettings) SetParallelLoadThreads(v int64) *MySQLSettings { + s.ParallelLoadThreads = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *MySQLSettings) SetPassword(v string) *MySQLSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *MySQLSettings) SetPort(v int64) *MySQLSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *MySQLSettings) SetServerName(v string) *MySQLSettings { + s.ServerName = &v + return s +} + +// SetServerTimezone sets the ServerTimezone field's value. +func (s *MySQLSettings) SetServerTimezone(v string) *MySQLSettings { + s.ServerTimezone = &v + return s +} + +// SetTargetDbType sets the TargetDbType field's value. +func (s *MySQLSettings) SetTargetDbType(v string) *MySQLSettings { + s.TargetDbType = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *MySQLSettings) SetUsername(v string) *MySQLSettings { + s.Username = &v + return s +} + +// Provides information that defines an Amazon Neptune endpoint. +type NeptuneSettings struct { + _ struct{} `type:"structure"` + + // The number of milliseconds for AWS DMS to wait to retry a bulk-load of migrated + // graph data to the Neptune target database before raising an error. The default + // is 250. + ErrorRetryDuration *int64 `type:"integer"` + + // If you want AWS Identity and Access Management (IAM) authorization enabled + // for this endpoint, set this parameter to true. Then attach the appropriate + // IAM policy document to your service role specified by ServiceAccessRoleArn. + // The default is false. + IamAuthEnabled *bool `type:"boolean"` + + // The maximum size in kilobytes of migrated graph data stored in a .csv file + // before AWS DMS bulk-loads the data to the Neptune target database. The default + // is 1,048,576 KB. If the bulk load is successful, AWS DMS clears the bucket, + // ready to store the next batch of migrated graph data. + MaxFileSize *int64 `type:"integer"` + + // The number of times for AWS DMS to retry a bulk load of migrated graph data + // to the Neptune target database before raising an error. The default is 5. + MaxRetryCount *int64 `type:"integer"` + + // A folder path where you want AWS DMS to store migrated graph data in the + // S3 bucket specified by S3BucketName + // + // S3BucketFolder is a required field + S3BucketFolder *string `type:"string" required:"true"` + + // The name of the Amazon S3 bucket where AWS DMS can temporarily store migrated + // graph data in .csv files before bulk-loading it to the Neptune target database. + // AWS DMS maps the SQL source data to graph data before storing it in these + // .csv files. + // + // S3BucketName is a required field + S3BucketName *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the service role that you created for the + // Neptune target endpoint. For more information, see Creating an IAM Service + // Role for Accessing Amazon Neptune as a Target (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.ServiceRole) + // in the AWS Database Migration Service User Guide. + ServiceAccessRoleArn *string `type:"string"` +} + +// String returns the string representation +func (s NeptuneSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NeptuneSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NeptuneSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NeptuneSettings"} + if s.S3BucketFolder == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketFolder")) + } + if s.S3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorRetryDuration sets the ErrorRetryDuration field's value. +func (s *NeptuneSettings) SetErrorRetryDuration(v int64) *NeptuneSettings { + s.ErrorRetryDuration = &v + return s +} + +// SetIamAuthEnabled sets the IamAuthEnabled field's value. +func (s *NeptuneSettings) SetIamAuthEnabled(v bool) *NeptuneSettings { + s.IamAuthEnabled = &v + return s +} + +// SetMaxFileSize sets the MaxFileSize field's value. +func (s *NeptuneSettings) SetMaxFileSize(v int64) *NeptuneSettings { + s.MaxFileSize = &v + return s +} + +// SetMaxRetryCount sets the MaxRetryCount field's value. +func (s *NeptuneSettings) SetMaxRetryCount(v int64) *NeptuneSettings { + s.MaxRetryCount = &v + return s +} + +// SetS3BucketFolder sets the S3BucketFolder field's value. +func (s *NeptuneSettings) SetS3BucketFolder(v string) *NeptuneSettings { + s.S3BucketFolder = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *NeptuneSettings) SetS3BucketName(v string) *NeptuneSettings { + s.S3BucketName = &v + return s +} + +// SetServiceAccessRoleArn sets the ServiceAccessRoleArn field's value. +func (s *NeptuneSettings) SetServiceAccessRoleArn(v string) *NeptuneSettings { + s.ServiceAccessRoleArn = &v return s } -// Provides information that defines a MongoDB endpoint. -type MongoDbSettings struct { +// Provides information that defines an Oracle endpoint. +type OracleSettings struct { _ struct{} `type:"structure"` - // The authentication mechanism you use to access the MongoDB source endpoint. - // - // Valid values: DEFAULT, MONGODB_CR, SCRAM_SHA_1 - // - // DEFAULT – For MongoDB version 2.x, use MONGODB_CR. For MongoDB version - // 3.x, use SCRAM_SHA_1. This setting isn't used when authType=No. - AuthMechanism *string `type:"string" enum:"AuthMechanismValue"` + // Set this attribute to false in order to use the Binary Reader to capture + // change data for an Amazon RDS for Oracle as the source. This tells the DMS + // instance to not access redo logs through any specified path prefix replacement + // using direct file access. + AccessAlternateDirectly *bool `type:"boolean"` + + // Set this attribute to set up table-level supplemental logging for the Oracle + // database. This attribute enables PRIMARY KEY supplemental logging on all + // tables selected for a migration task. + // + // If you use this option, you still need to enable database-level supplemental + // logging. + AddSupplementalLogging *bool `type:"boolean"` + + // Set this attribute with archivedLogDestId in a primary/ standby setup. This + // attribute is useful in the case of a switchover. In this case, AWS DMS needs + // to know which destination to get archive redo logs from to read changes. + // This need arises because the previous primary instance is now a standby instance + // after switchover. + AdditionalArchivedLogDestId *int64 `type:"integer"` + + // Set this attribute to true to enable replication of Oracle tables containing + // columns that are nested tables or defined types. + AllowSelectNestedTables *bool `type:"boolean"` + + // Specifies the destination of the archived redo logs. The value should be + // the same as the DEST_ID number in the v$archived_log table. When working + // with multiple log destinations (DEST_ID), we recommend that you to specify + // an archived redo logs location identifier. Doing this improves performance + // by ensuring that the correct logs are accessed from the outset. + ArchivedLogDestId *int64 `type:"integer"` + + // When this field is set to Y, AWS DMS only accesses the archived redo logs. + // If the archived redo logs are stored on Oracle ASM only, the AWS DMS user + // account needs to be granted ASM privileges. + ArchivedLogsOnly *bool `type:"boolean"` + + // For an Oracle source endpoint, your Oracle Automatic Storage Management (ASM) + // password. You can set this value from the asm_user_password value. You set + // this value as part of the comma-separated value that you set to the Password + // request parameter when you create the endpoint to access transaction logs + // using Binary Reader. For more information, see Configuration for change data + // capture (CDC) on an Oracle source database (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration). + AsmPassword *string `type:"string" sensitive:"true"` + + // For an Oracle source endpoint, your ASM server address. You can set this + // value from the asm_server value. You set asm_server as part of the extra + // connection attribute string to access an Oracle server with Binary Reader + // that uses ASM. For more information, see Configuration for change data capture + // (CDC) on an Oracle source database (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration). + AsmServer *string `type:"string"` + + // For an Oracle source endpoint, your ASM user name. You can set this value + // from the asm_user value. You set asm_user as part of the extra connection + // attribute string to access an Oracle server with Binary Reader that uses + // ASM. For more information, see Configuration for change data capture (CDC) + // on an Oracle source database (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.CDC.Configuration). + AsmUser *string `type:"string"` + + // Specifies whether the length of a character column is in bytes or in characters. + // To indicate that the character column length is in characters, set this attribute + // to CHAR. Otherwise, the character column length is in bytes. + // + // Example: charLengthSemantics=CHAR; + CharLengthSemantics *string `type:"string" enum:"CharLengthSemantics"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` - // The MongoDB database name. This setting isn't used when authType=NO. - // - // The default is admin. - AuthSource *string `type:"string"` + // When set to true, this attribute helps to increase the commit rate on the + // Oracle target database by writing directly to tables and not writing a trail + // to database logs. + DirectPathNoLog *bool `type:"boolean"` - // The authentication type you use to access the MongoDB source endpoint. - // - // Valid values: NO, PASSWORD - // - // When NO is selected, user name and password parameters are not used and can - // be empty. - AuthType *string `type:"string" enum:"AuthTypeValue"` + // When set to true, this attribute specifies a parallel load when useDirectPathFullLoad + // is set to Y. This attribute also only applies when you use the AWS DMS parallel + // load feature. Note that the target table cannot have any constraints or indexes. + DirectPathParallelLoad *bool `type:"boolean"` - // The database name on the MongoDB source endpoint. - DatabaseName *string `type:"string"` + // Set this attribute to enable homogenous tablespace replication and create + // existing tables or indexes under the same tablespace on the target. + EnableHomogenousTablespace *bool `type:"boolean"` - // Indicates the number of documents to preview to determine the document organization. - // Use this setting when NestingLevel is set to ONE. + // When set to true, this attribute causes a task to fail if the actual size + // of an LOB column is greater than the specified LobMaxSize. // - // Must be a positive value greater than 0. Default value is 1000. - DocsToInvestigate *string `type:"string"` + // If a task is set to limited LOB mode and this option is set to true, the + // task fails instead of truncating the LOB data. + FailTasksOnLobTruncation *bool `type:"boolean"` - // Specifies the document ID. Use this setting when NestingLevel is set to NONE. + // Specifies the number scale. You can select a scale up to 38, or you can select + // FLOAT. By default, the NUMBER data type is converted to precision 38, scale + // 10. // - // Default value is false. - ExtractDocId *string `type:"string"` + // Example: numberDataTypeScale=12 + NumberDatatypeScale *int64 `type:"integer"` - // The AWS KMS key identifier that is used to encrypt the content on the replication - // instance. If you don't specify a value for the KmsKeyId parameter, then AWS - // DMS uses your default encryption key. AWS KMS creates the default encryption - // key for your AWS account. Your AWS account has a different default encryption - // key for each AWS Region. - KmsKeyId *string `type:"string"` + // Set this string attribute to the required value in order to use the Binary + // Reader to capture change data for an Amazon RDS for Oracle as the source. + // This value specifies the default Oracle root used to access the redo logs. + OraclePathPrefix *string `type:"string"` - // Specifies either document or table mode. - // - // Valid values: NONE, ONE - // - // Default value is NONE. Specify NONE to use document mode. Specify ONE to - // use table mode. - NestingLevel *string `type:"string" enum:"NestingLevelValue"` + // Set this attribute to change the number of threads that DMS configures to + // perform a Change Data Capture (CDC) load using Oracle Automatic Storage Management + // (ASM). You can specify an integer value between 2 (the default) and 8 (the + // maximum). Use this attribute together with the readAheadBlocks attribute. + ParallelAsmReadThreads *int64 `type:"integer"` - // The password for the user account you use to access the MongoDB source endpoint. + // Endpoint connection password. Password *string `type:"string" sensitive:"true"` - // The port value for the MongoDB source endpoint. + // Endpoint TCP port. Port *int64 `type:"integer"` - // The name of the server on the MongoDB source endpoint. + // Set this attribute to change the number of read-ahead blocks that DMS configures + // to perform a Change Data Capture (CDC) load using Oracle Automatic Storage + // Management (ASM). You can specify an integer value between 1000 (the default) + // and 200,000 (the maximum). + ReadAheadBlocks *int64 `type:"integer"` + + // When set to true, this attribute supports tablespace replication. + ReadTableSpaceName *bool `type:"boolean"` + + // Set this attribute to true in order to use the Binary Reader to capture change + // data for an Amazon RDS for Oracle as the source. This setting tells DMS instance + // to replace the default Oracle root with the specified usePathPrefix setting + // to access the redo logs. + ReplacePathPrefix *bool `type:"boolean"` + + // Specifies the number of seconds that the system waits before resending a + // query. + // + // Example: retryInterval=6; + RetryInterval *int64 `type:"integer"` + + // For an Oracle source endpoint, the transparent data encryption (TDE) password + // required by AWM DMS to access Oracle redo logs encrypted by TDE using Binary + // Reader. It is also the TDE_Password part of the comma-separated value you + // set to the Password request parameter when you create the endpoint. The SecurityDbEncryptian + // setting is related to this SecurityDbEncryptionName setting. For more information, + // see Supported encryption methods for using Oracle as a source for AWS DMS + // (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) + // in the AWS Database Migration Service User Guide. + SecurityDbEncryption *string `type:"string" sensitive:"true"` + + // For an Oracle source endpoint, the name of a key used for the transparent + // data encryption (TDE) of the columns and tablespaces in an Oracle source + // database that is encrypted using TDE. The key value is the value of the SecurityDbEncryption + // setting. For more information on setting the key name value of SecurityDbEncryptionName, + // see the information and example for setting the securityDbEncryptionName + // extra connection attribute in Supported encryption methods for using Oracle + // as a source for AWS DMS (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.Oracle.html#CHAP_Source.Oracle.Encryption) + // in the AWS Database Migration Service User Guide. + SecurityDbEncryptionName *string `type:"string"` + + // Fully qualified domain name of the endpoint. ServerName *string `type:"string"` - // The user name you use to access the MongoDB source endpoint. + // Set this attribute to true in order to use the Binary Reader to capture change + // data for an Amazon RDS for Oracle as the source. This tells the DMS instance + // to use any specified prefix replacement to access all online redo logs. + UseAlternateFolderForOnline *bool `type:"boolean"` + + // Set this string attribute to the required value in order to use the Binary + // Reader to capture change data for an Amazon RDS for Oracle as the source. + // This value specifies the path prefix used to replace the default Oracle root + // to access the redo logs. + UsePathPrefix *string `type:"string"` + + // Endpoint connection user name. Username *string `type:"string"` } // String returns the string representation -func (s MongoDbSettings) String() string { +func (s OracleSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MongoDbSettings) GoString() string { +func (s OracleSettings) GoString() string { return s.String() } -// SetAuthMechanism sets the AuthMechanism field's value. -func (s *MongoDbSettings) SetAuthMechanism(v string) *MongoDbSettings { - s.AuthMechanism = &v +// SetAccessAlternateDirectly sets the AccessAlternateDirectly field's value. +func (s *OracleSettings) SetAccessAlternateDirectly(v bool) *OracleSettings { + s.AccessAlternateDirectly = &v return s } -// SetAuthSource sets the AuthSource field's value. -func (s *MongoDbSettings) SetAuthSource(v string) *MongoDbSettings { - s.AuthSource = &v +// SetAddSupplementalLogging sets the AddSupplementalLogging field's value. +func (s *OracleSettings) SetAddSupplementalLogging(v bool) *OracleSettings { + s.AddSupplementalLogging = &v return s } -// SetAuthType sets the AuthType field's value. -func (s *MongoDbSettings) SetAuthType(v string) *MongoDbSettings { - s.AuthType = &v +// SetAdditionalArchivedLogDestId sets the AdditionalArchivedLogDestId field's value. +func (s *OracleSettings) SetAdditionalArchivedLogDestId(v int64) *OracleSettings { + s.AdditionalArchivedLogDestId = &v + return s +} + +// SetAllowSelectNestedTables sets the AllowSelectNestedTables field's value. +func (s *OracleSettings) SetAllowSelectNestedTables(v bool) *OracleSettings { + s.AllowSelectNestedTables = &v + return s +} + +// SetArchivedLogDestId sets the ArchivedLogDestId field's value. +func (s *OracleSettings) SetArchivedLogDestId(v int64) *OracleSettings { + s.ArchivedLogDestId = &v + return s +} + +// SetArchivedLogsOnly sets the ArchivedLogsOnly field's value. +func (s *OracleSettings) SetArchivedLogsOnly(v bool) *OracleSettings { + s.ArchivedLogsOnly = &v + return s +} + +// SetAsmPassword sets the AsmPassword field's value. +func (s *OracleSettings) SetAsmPassword(v string) *OracleSettings { + s.AsmPassword = &v + return s +} + +// SetAsmServer sets the AsmServer field's value. +func (s *OracleSettings) SetAsmServer(v string) *OracleSettings { + s.AsmServer = &v + return s +} + +// SetAsmUser sets the AsmUser field's value. +func (s *OracleSettings) SetAsmUser(v string) *OracleSettings { + s.AsmUser = &v + return s +} + +// SetCharLengthSemantics sets the CharLengthSemantics field's value. +func (s *OracleSettings) SetCharLengthSemantics(v string) *OracleSettings { + s.CharLengthSemantics = &v return s } // SetDatabaseName sets the DatabaseName field's value. -func (s *MongoDbSettings) SetDatabaseName(v string) *MongoDbSettings { +func (s *OracleSettings) SetDatabaseName(v string) *OracleSettings { s.DatabaseName = &v return s } -// SetDocsToInvestigate sets the DocsToInvestigate field's value. -func (s *MongoDbSettings) SetDocsToInvestigate(v string) *MongoDbSettings { - s.DocsToInvestigate = &v +// SetDirectPathNoLog sets the DirectPathNoLog field's value. +func (s *OracleSettings) SetDirectPathNoLog(v bool) *OracleSettings { + s.DirectPathNoLog = &v return s } -// SetExtractDocId sets the ExtractDocId field's value. -func (s *MongoDbSettings) SetExtractDocId(v string) *MongoDbSettings { - s.ExtractDocId = &v +// SetDirectPathParallelLoad sets the DirectPathParallelLoad field's value. +func (s *OracleSettings) SetDirectPathParallelLoad(v bool) *OracleSettings { + s.DirectPathParallelLoad = &v return s } -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *MongoDbSettings) SetKmsKeyId(v string) *MongoDbSettings { - s.KmsKeyId = &v +// SetEnableHomogenousTablespace sets the EnableHomogenousTablespace field's value. +func (s *OracleSettings) SetEnableHomogenousTablespace(v bool) *OracleSettings { + s.EnableHomogenousTablespace = &v return s } -// SetNestingLevel sets the NestingLevel field's value. -func (s *MongoDbSettings) SetNestingLevel(v string) *MongoDbSettings { - s.NestingLevel = &v +// SetFailTasksOnLobTruncation sets the FailTasksOnLobTruncation field's value. +func (s *OracleSettings) SetFailTasksOnLobTruncation(v bool) *OracleSettings { + s.FailTasksOnLobTruncation = &v + return s +} + +// SetNumberDatatypeScale sets the NumberDatatypeScale field's value. +func (s *OracleSettings) SetNumberDatatypeScale(v int64) *OracleSettings { + s.NumberDatatypeScale = &v + return s +} + +// SetOraclePathPrefix sets the OraclePathPrefix field's value. +func (s *OracleSettings) SetOraclePathPrefix(v string) *OracleSettings { + s.OraclePathPrefix = &v + return s +} + +// SetParallelAsmReadThreads sets the ParallelAsmReadThreads field's value. +func (s *OracleSettings) SetParallelAsmReadThreads(v int64) *OracleSettings { + s.ParallelAsmReadThreads = &v return s } // SetPassword sets the Password field's value. -func (s *MongoDbSettings) SetPassword(v string) *MongoDbSettings { +func (s *OracleSettings) SetPassword(v string) *OracleSettings { s.Password = &v return s } // SetPort sets the Port field's value. -func (s *MongoDbSettings) SetPort(v int64) *MongoDbSettings { +func (s *OracleSettings) SetPort(v int64) *OracleSettings { s.Port = &v return s } +// SetReadAheadBlocks sets the ReadAheadBlocks field's value. +func (s *OracleSettings) SetReadAheadBlocks(v int64) *OracleSettings { + s.ReadAheadBlocks = &v + return s +} + +// SetReadTableSpaceName sets the ReadTableSpaceName field's value. +func (s *OracleSettings) SetReadTableSpaceName(v bool) *OracleSettings { + s.ReadTableSpaceName = &v + return s +} + +// SetReplacePathPrefix sets the ReplacePathPrefix field's value. +func (s *OracleSettings) SetReplacePathPrefix(v bool) *OracleSettings { + s.ReplacePathPrefix = &v + return s +} + +// SetRetryInterval sets the RetryInterval field's value. +func (s *OracleSettings) SetRetryInterval(v int64) *OracleSettings { + s.RetryInterval = &v + return s +} + +// SetSecurityDbEncryption sets the SecurityDbEncryption field's value. +func (s *OracleSettings) SetSecurityDbEncryption(v string) *OracleSettings { + s.SecurityDbEncryption = &v + return s +} + +// SetSecurityDbEncryptionName sets the SecurityDbEncryptionName field's value. +func (s *OracleSettings) SetSecurityDbEncryptionName(v string) *OracleSettings { + s.SecurityDbEncryptionName = &v + return s +} + // SetServerName sets the ServerName field's value. -func (s *MongoDbSettings) SetServerName(v string) *MongoDbSettings { +func (s *OracleSettings) SetServerName(v string) *OracleSettings { s.ServerName = &v return s } +// SetUseAlternateFolderForOnline sets the UseAlternateFolderForOnline field's value. +func (s *OracleSettings) SetUseAlternateFolderForOnline(v bool) *OracleSettings { + s.UseAlternateFolderForOnline = &v + return s +} + +// SetUsePathPrefix sets the UsePathPrefix field's value. +func (s *OracleSettings) SetUsePathPrefix(v string) *OracleSettings { + s.UsePathPrefix = &v + return s +} + // SetUsername sets the Username field's value. -func (s *MongoDbSettings) SetUsername(v string) *MongoDbSettings { +func (s *OracleSettings) SetUsername(v string) *OracleSettings { s.Username = &v return s } @@ -11273,10 +13858,13 @@ type OrderableReplicationInstance struct { // AWS DMS supports the ReleaseStatus parameter in versions 3.1.4 and later. ReleaseStatus *string `type:"string" enum:"ReleaseStatusValues"` - // The compute and memory capacity of the replication instance. + // The compute and memory capacity of the replication instance as defined for + // the specified replication instance class. For example to specify the instance + // class dms.c4.large, set this parameter to "dms.c4.large". // - // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large - // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + // For more information on the settings and capacities for the available replication + // instance classes, see Selecting the right AWS DMS replication instance for + // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` // The type of storage used by the replication instance. @@ -11429,6 +14017,152 @@ func (s *PendingMaintenanceAction) SetOptInStatus(v string) *PendingMaintenanceA return s } +// Provides information that defines a PostgreSQL endpoint. +type PostgreSQLSettings struct { + _ struct{} `type:"structure"` + + // For use with change data capture (CDC) only, this attribute has AWS DMS bypass + // foreign keys and user triggers to reduce the time it takes to bulk load data. + // + // Example: afterConnectScript=SET session_replication_role='replica' + AfterConnectScript *string `type:"string"` + + // To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL + // database when the task starts. You can later remove these artifacts. + // + // If this value is set to N, you don't have to create tables or triggers on + // the source database. + CaptureDdls *bool `type:"boolean"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // The schema in which the operational DDL database artifacts are created. + // + // Example: ddlArtifactsSchema=xyzddlschema; + DdlArtifactsSchema *string `type:"string"` + + // Sets the client statement timeout for the PostgreSQL instance, in seconds. + // The default value is 60 seconds. + // + // Example: executeTimeout=100; + ExecuteTimeout *int64 `type:"integer"` + + // When set to true, this value causes a task to fail if the actual size of + // a LOB column is greater than the specified LobMaxSize. + // + // If task is set to Limited LOB mode and this option is set to true, the task + // fails instead of truncating the LOB data. + FailTasksOnLobTruncation *bool `type:"boolean"` + + // Specifies the maximum size (in KB) of any .csv file used to transfer data + // to PostgreSQL. + // + // Example: maxFileSize=512 + MaxFileSize *int64 `type:"integer"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Sets the name of a previously created logical replication slot for a CDC + // load of the PostgreSQL source instance. + // + // When used with the AWS DMS API CdcStartPosition request parameter, this attribute + // also enables using native CDC start points. + SlotName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s PostgreSQLSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PostgreSQLSettings) GoString() string { + return s.String() +} + +// SetAfterConnectScript sets the AfterConnectScript field's value. +func (s *PostgreSQLSettings) SetAfterConnectScript(v string) *PostgreSQLSettings { + s.AfterConnectScript = &v + return s +} + +// SetCaptureDdls sets the CaptureDdls field's value. +func (s *PostgreSQLSettings) SetCaptureDdls(v bool) *PostgreSQLSettings { + s.CaptureDdls = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *PostgreSQLSettings) SetDatabaseName(v string) *PostgreSQLSettings { + s.DatabaseName = &v + return s +} + +// SetDdlArtifactsSchema sets the DdlArtifactsSchema field's value. +func (s *PostgreSQLSettings) SetDdlArtifactsSchema(v string) *PostgreSQLSettings { + s.DdlArtifactsSchema = &v + return s +} + +// SetExecuteTimeout sets the ExecuteTimeout field's value. +func (s *PostgreSQLSettings) SetExecuteTimeout(v int64) *PostgreSQLSettings { + s.ExecuteTimeout = &v + return s +} + +// SetFailTasksOnLobTruncation sets the FailTasksOnLobTruncation field's value. +func (s *PostgreSQLSettings) SetFailTasksOnLobTruncation(v bool) *PostgreSQLSettings { + s.FailTasksOnLobTruncation = &v + return s +} + +// SetMaxFileSize sets the MaxFileSize field's value. +func (s *PostgreSQLSettings) SetMaxFileSize(v int64) *PostgreSQLSettings { + s.MaxFileSize = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *PostgreSQLSettings) SetPassword(v string) *PostgreSQLSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *PostgreSQLSettings) SetPort(v int64) *PostgreSQLSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *PostgreSQLSettings) SetServerName(v string) *PostgreSQLSettings { + s.ServerName = &v + return s +} + +// SetSlotName sets the SlotName field's value. +func (s *PostgreSQLSettings) SetSlotName(v string) *PostgreSQLSettings { + s.SlotName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *PostgreSQLSettings) SetUsername(v string) *PostgreSQLSettings { + s.Username = &v + return s +} + type RebootReplicationInstanceInput struct { _ struct{} `type:"structure"` @@ -11518,13 +14252,34 @@ type RedshiftSettings struct { // not the name of a file containing the code. AfterConnectScript *string `type:"string"` - // The location where the comma-separated value (.csv) files are stored before - // being uploaded to the S3 bucket. + // An S3 folder where the comma-separated-value (.csv) files are stored before + // being uploaded to the target Redshift cluster. + // + // For full load mode, AWS DMS converts source records into .csv files and loads + // them to the BucketFolder/TableID path. AWS DMS uses the Redshift COPY command + // to upload the .csv files to the target table. The files are deleted once + // the COPY operation has finished. For more information, see Amazon Redshift + // Database Developer Guide (https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html) + // + // For change-data-capture (CDC) mode, AWS DMS creates a NetChanges table, and + // loads the .csv files to this BucketFolder/NetChangesTableID path. BucketFolder *string `type:"string"` - // The name of the S3 bucket you want to use + // The name of the intermediate S3 bucket used to store .csv files before uploading + // data to Redshift. BucketName *string `type:"string"` + // If Amazon Redshift is configured to support case sensitive schema names, + // set CaseSensitiveNames to true. The default is false. + CaseSensitiveNames *bool `type:"boolean"` + + // If you set CompUpdate to true Amazon Redshift applies automatic compression + // if the table is empty. This applies even if the table columns already have + // encodings other than RAW. If you set CompUpdate to false, automatic compression + // is disabled and existing column encodings aren't changed. The default is + // true. + CompUpdate *bool `type:"boolean"` + // A value that sets the amount of time to wait (in milliseconds) before timing // out, beginning from when you initially establish a connection. ConnectionTimeout *int64 `type:"integer"` @@ -11551,22 +14306,40 @@ type RedshiftSettings struct { // The type of server-side encryption that you want to use for your data. This // encryption type is part of the endpoint settings or the extra connections // attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS. + // + // For the ModifyEndpoint operation, you can change the existing value of the + // EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the + // existing value from SSE_S3 to SSE_KMS. + // // To use SSE_S3, create an AWS Identity and Access Management (IAM) role with // a policy that allows "arn:aws:s3:::*" to use the following actions: "s3:PutObject", // "s3:ListBucket" EncryptionMode *string `type:"string" enum:"EncryptionModeValue"` + // This setting is only valid for a full-load migration task. Set ExplicitIds + // to true to have tables with IDENTITY columns override their auto-generated + // values with explicit values loaded from the source data files used to populate + // the tables. The default is false. + ExplicitIds *bool `type:"boolean"` + // The number of threads used to upload a single file. This parameter accepts // a value from 1 through 64. It defaults to 10. + // + // The number of parallel streams used to upload a single .csv file to an S3 + // bucket using S3 Multipart Upload. For more information, see Multipart upload + // overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). + // + // FileTransferUploadStreams accepts a value from 1 through 64. It defaults + // to 10. FileTransferUploadStreams *int64 `type:"integer"` - // The amount of time to wait (in milliseconds) before timing out, beginning - // from when you begin loading. + // The amount of time to wait (in milliseconds) before timing out of operations + // performed by AWS DMS on a Redshift cluster, such as Redshift COPY, INSERT, + // DELETE, and UPDATE. LoadTimeout *int64 `type:"integer"` - // The maximum size (in KB) of any .csv file used to transfer data to Amazon - // Redshift. This accepts a value from 1 through 1,048,576. It defaults to 32,768 - // KB (32 MB). + // The maximum size (in KB) of any .csv file used to load data on an S3 bucket + // and transfer data to Amazon Redshift. It defaults to 1048576KB (1 GB). MaxFileSize *int64 `type:"integer"` // The password for the user named in the username property. @@ -11623,8 +14396,9 @@ type RedshiftSettings struct { // An Amazon Redshift user name for a registered user. Username *string `type:"string"` - // The size of the write buffer to use in rows. Valid values range from 1 through - // 2,048. The default is 1,024. Use this setting to tune performance. + // The size (in KB) of the in-memory file write buffer used when generating + // .csv files on the local disk at the DMS replication instance. The default + // value is 1000 (buffer size is 1000KB). WriteBufferSize *int64 `type:"integer"` } @@ -11662,6 +14436,18 @@ func (s *RedshiftSettings) SetBucketName(v string) *RedshiftSettings { return s } +// SetCaseSensitiveNames sets the CaseSensitiveNames field's value. +func (s *RedshiftSettings) SetCaseSensitiveNames(v bool) *RedshiftSettings { + s.CaseSensitiveNames = &v + return s +} + +// SetCompUpdate sets the CompUpdate field's value. +func (s *RedshiftSettings) SetCompUpdate(v bool) *RedshiftSettings { + s.CompUpdate = &v + return s +} + // SetConnectionTimeout sets the ConnectionTimeout field's value. func (s *RedshiftSettings) SetConnectionTimeout(v int64) *RedshiftSettings { s.ConnectionTimeout = &v @@ -11692,6 +14478,12 @@ func (s *RedshiftSettings) SetEncryptionMode(v string) *RedshiftSettings { return s } +// SetExplicitIds sets the ExplicitIds field's value. +func (s *RedshiftSettings) SetExplicitIds(v bool) *RedshiftSettings { + s.ExplicitIds = &v + return s +} + // SetFileTransferUploadStreams sets the FileTransferUploadStreams field's value. func (s *RedshiftSettings) SetFileTransferUploadStreams(v int64) *RedshiftSettings { s.FileTransferUploadStreams = &v @@ -11966,6 +14758,16 @@ func (s *ReloadTablesInput) Validate() error { if s.TablesToReload == nil { invalidParams.Add(request.NewErrParamRequired("TablesToReload")) } + if s.TablesToReload != nil { + for i, v := range s.TablesToReload { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TablesToReload", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12097,10 +14899,17 @@ type ReplicationInstance struct { // The Availability Zone for the instance. AvailabilityZone *string `type:"string"` - // The DNS name servers for the replication instance. + // The DNS name servers supported for the replication instance to access your + // on-premise source or target database. DnsNameServers *string `type:"string"` // The engine version number of the replication instance. + // + // If an engine version number is not specified when a replication instance + // is created, the default is the latest engine version available. + // + // When modifying a major engine version of an instance, also set AllowMajorVersionUpgrade + // to true. EngineVersion *string `type:"string"` // The expiration date of the free replication instance that is part of the @@ -12128,7 +14937,8 @@ type ReplicationInstance struct { // The pending modification values. PendingModifiedValues *ReplicationPendingModifiedValues `type:"structure"` - // The maintenance window times for the replication instance. + // The maintenance window times for the replication instance. Any pending upgrades + // to the replication instance are performed during this time. PreferredMaintenanceWindow *string `type:"string"` // Specifies the accessibility options for the replication instance. A value @@ -12139,18 +14949,21 @@ type ReplicationInstance struct { // The Amazon Resource Name (ARN) of the replication instance. ReplicationInstanceArn *string `type:"string"` - // The compute and memory capacity of the replication instance. + // The compute and memory capacity of the replication instance as defined for + // the specified replication instance class. It is a required parameter, although + // a defualt value is pre-selected in the DMS console. // - // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large - // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + // For more information on the settings and capacities for the available replication + // instance classes, see Selecting the right AWS DMS replication instance for + // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` - // The replication instance identifier. This parameter is stored as a lowercase - // string. + // The replication instance identifier is a required parameter. This parameter + // is stored as a lowercase string. // // Constraints: // - // * Must contain from 1 to 63 alphanumeric characters or hyphens. + // * Must contain 1-63 alphanumeric characters or hyphens. // // * First character must be a letter. // @@ -12169,13 +14982,39 @@ type ReplicationInstance struct { // The public IP address of the replication instance. // - // Deprecated: ReplicationInstancePublicIpAddress has been deprecated - ReplicationInstancePublicIpAddress *string `deprecated:"true" type:"string"` - - // One or more public IP addresses for the replication instance. - ReplicationInstancePublicIpAddresses []*string `type:"list"` - - // The status of the replication instance. + // Deprecated: ReplicationInstancePublicIpAddress has been deprecated + ReplicationInstancePublicIpAddress *string `deprecated:"true" type:"string"` + + // One or more public IP addresses for the replication instance. + ReplicationInstancePublicIpAddresses []*string `type:"list"` + + // The status of the replication instance. The possible return values include: + // + // * "available" + // + // * "creating" + // + // * "deleted" + // + // * "deleting" + // + // * "failed" + // + // * "modifying" + // + // * "upgrading" + // + // * "rebooting" + // + // * "resetting-master-credentials" + // + // * "storage-full" + // + // * "incompatible-credentials" + // + // * "incompatible-network" + // + // * "maintenance" ReplicationInstanceStatus *string `type:"string"` // The subnet group for the replication instance. @@ -12379,8 +15218,8 @@ func (s *ReplicationInstanceTaskLog) SetReplicationTaskName(v string) *Replicati } // Provides information about the values of pending modifications to a replication -// instance. This data type is an object of the ReplicationInstance user-defined -// data type. +// instance. This data type is an object of the ReplicationInstance (https://docs.aws.amazon.com/dms/latest/APIReference/API_ReplicationInstance.html) +// user-defined data type. type ReplicationPendingModifiedValues struct { _ struct{} `type:"structure"` @@ -12396,10 +15235,12 @@ type ReplicationPendingModifiedValues struct { // to true. MultiAZ *bool `type:"boolean"` - // The compute and memory capacity of the replication instance. + // The compute and memory capacity of the replication instance as defined for + // the specified replication instance class. // - // Valid Values: dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large - // | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge + // For more information on the settings and capacities for the available replication + // instance classes, see Selecting the right AWS DMS replication instance for + // your migration (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_ReplicationInstance.html#CHAP_ReplicationInstance.InDepth). ReplicationInstanceClass *string `type:"string"` } @@ -12437,7 +15278,7 @@ func (s *ReplicationPendingModifiedValues) SetReplicationInstanceClass(v string) return s } -// Describes a subnet group in response to a request by the DescribeReplicationSubnetGroup +// Describes a subnet group in response to a request by the DescribeReplicationSubnetGroups // operation. type ReplicationSubnetGroup struct { _ struct{} `type:"structure"` @@ -12501,8 +15342,8 @@ func (s *ReplicationSubnetGroup) SetVpcId(v string) *ReplicationSubnetGroup { // The replication subnet group does not cover enough Availability Zones (AZs). // Edit the replication subnet group and add more AZs. type ReplicationSubnetGroupDoesNotCoverEnoughAZs struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12519,17 +15360,17 @@ func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) GoString() string { func newErrorReplicationSubnetGroupDoesNotCoverEnoughAZs(v protocol.ResponseMetadata) error { return &ReplicationSubnetGroupDoesNotCoverEnoughAZs{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) Code() string { +func (s *ReplicationSubnetGroupDoesNotCoverEnoughAZs) Code() string { return "ReplicationSubnetGroupDoesNotCoverEnoughAZs" } // Message returns the exception's message. -func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) Message() string { +func (s *ReplicationSubnetGroupDoesNotCoverEnoughAZs) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12537,22 +15378,22 @@ func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) OrigErr() error { +func (s *ReplicationSubnetGroupDoesNotCoverEnoughAZs) OrigErr() error { return nil } -func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) Error() string { +func (s *ReplicationSubnetGroupDoesNotCoverEnoughAZs) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReplicationSubnetGroupDoesNotCoverEnoughAZs) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReplicationSubnetGroupDoesNotCoverEnoughAZs) RequestID() string { - return s.respMetadata.RequestID +func (s *ReplicationSubnetGroupDoesNotCoverEnoughAZs) RequestID() string { + return s.RespMetadata.RequestID } // Provides information that describes a replication task created by the CreateReplicationTask @@ -12576,13 +15417,13 @@ type ReplicationTask struct { // Indicates when you want a change data capture (CDC) operation to stop. The // value can be either server time or commit time. // - // Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12” + // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 + // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 // “ CdcStopPosition *string `type:"string"` - // The last error (failure) message generated for the replication instance. + // The last error (failure) message generated for the replication task. LastFailureMessage *string `type:"string"` // The type of migration. @@ -12606,7 +15447,7 @@ type ReplicationTask struct { // // Constraints: // - // * Must contain from 1 to 255 alphanumeric characters or hyphens. + // * Must contain 1-255 alphanumeric characters or hyphens. // // * First character must be a letter. // @@ -12629,7 +15470,20 @@ type ReplicationTask struct { // The status of the replication task. Status *string `type:"string"` - // The reason the replication task was stopped. + // The reason the replication task was stopped. This response parameter can + // return one of the following values: + // + // * "STOP_REASON_FULL_LOAD_COMPLETED" – Full-load migration completed. + // + // * "STOP_REASON_CACHED_CHANGES_APPLIED" – Change data capture (CDC) load + // completed. + // + // * "STOP_REASON_CACHED_CHANGES_NOT_APPLIED" – In a full-load and CDC + // migration, the full-load stopped as specified before starting the CDC + // migration. + // + // * "STOP_REASON_SERVER_TIME" – The migration stopped at the specified + // server time. StopReason *string `type:"string"` // Table mappings specified in the task. @@ -12637,6 +15491,12 @@ type ReplicationTask struct { // The Amazon Resource Name (ARN) string that uniquely identifies the endpoint. TargetEndpointArn *string `type:"string"` + + // Supplemental information that the task requires to migrate the data for certain + // source and target endpoints. For more information, see Specifying Supplemental + // Data for Task Settings (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.TaskData.html) + // in the AWS Database Migration Service User Guide. + TaskData *string `type:"string"` } // String returns the string representation @@ -12751,6 +15611,12 @@ func (s *ReplicationTask) SetTargetEndpointArn(v string) *ReplicationTask { return s } +// SetTaskData sets the TaskData field's value. +func (s *ReplicationTask) SetTaskData(v string) *ReplicationTask { + s.TaskData = &v + return s +} + // The task assessment report in JSON format. type ReplicationTaskAssessmentResult struct { _ struct{} `type:"structure"` @@ -12830,6 +15696,278 @@ func (s *ReplicationTaskAssessmentResult) SetS3ObjectUrl(v string) *ReplicationT return s } +// Provides information that describes a premigration assessment run that you +// have started using the StartReplicationTaskAssessmentRun operation. +// +// Some of the information appears based on other operations that can return +// the ReplicationTaskAssessmentRun object. +type ReplicationTaskAssessmentRun struct { + _ struct{} `type:"structure"` + + // Indication of the completion progress for the individual assessments specified + // to run. + AssessmentProgress *ReplicationTaskAssessmentRunProgress `type:"structure"` + + // Unique name of the assessment run. + AssessmentRunName *string `type:"string"` + + // Last message generated by an individual assessment failure. + LastFailureMessage *string `type:"string"` + + // ARN of the migration task associated with this premigration assessment run. + ReplicationTaskArn *string `type:"string"` + + // Amazon Resource Name (ARN) of this assessment run. + ReplicationTaskAssessmentRunArn *string `type:"string"` + + // Date on which the assessment run was created using the StartReplicationTaskAssessmentRun + // operation. + ReplicationTaskAssessmentRunCreationDate *time.Time `type:"timestamp"` + + // Encryption mode used to encrypt the assessment run results. + ResultEncryptionMode *string `type:"string"` + + // ARN of the AWS KMS encryption key used to encrypt the assessment run results. + ResultKmsKeyArn *string `type:"string"` + + // Amazon S3 bucket where AWS DMS stores the results of this assessment run. + ResultLocationBucket *string `type:"string"` + + // Folder in an Amazon S3 bucket where AWS DMS stores the results of this assessment + // run. + ResultLocationFolder *string `type:"string"` + + // ARN of the service role used to start the assessment run using the StartReplicationTaskAssessmentRun + // operation. + ServiceAccessRoleArn *string `type:"string"` + + // Assessment run status. + // + // This status can have one of the following values: + // + // * "cancelling" – The assessment run was canceled by the CancelReplicationTaskAssessmentRun + // operation. + // + // * "deleting" – The assessment run was deleted by the DeleteReplicationTaskAssessmentRun + // operation. + // + // * "failed" – At least one individual assessment completed with a failed + // status. + // + // * "error-provisioning" – An internal error occurred while resources + // were provisioned (during provisioning status). + // + // * "error-executing" – An internal error occurred while individual assessments + // ran (during running status). + // + // * "invalid state" – The assessment run is in an unknown state. + // + // * "passed" – All individual assessments have completed, and none has + // a failed status. + // + // * "provisioning" – Resources required to run individual assessments + // are being provisioned. + // + // * "running" – Individual assessments are being run. + // + // * "starting" – The assessment run is starting, but resources are not + // yet being provisioned for individual assessments. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationTaskAssessmentRun) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskAssessmentRun) GoString() string { + return s.String() +} + +// SetAssessmentProgress sets the AssessmentProgress field's value. +func (s *ReplicationTaskAssessmentRun) SetAssessmentProgress(v *ReplicationTaskAssessmentRunProgress) *ReplicationTaskAssessmentRun { + s.AssessmentProgress = v + return s +} + +// SetAssessmentRunName sets the AssessmentRunName field's value. +func (s *ReplicationTaskAssessmentRun) SetAssessmentRunName(v string) *ReplicationTaskAssessmentRun { + s.AssessmentRunName = &v + return s +} + +// SetLastFailureMessage sets the LastFailureMessage field's value. +func (s *ReplicationTaskAssessmentRun) SetLastFailureMessage(v string) *ReplicationTaskAssessmentRun { + s.LastFailureMessage = &v + return s +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *ReplicationTaskAssessmentRun) SetReplicationTaskArn(v string) *ReplicationTaskAssessmentRun { + s.ReplicationTaskArn = &v + return s +} + +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *ReplicationTaskAssessmentRun) SetReplicationTaskAssessmentRunArn(v string) *ReplicationTaskAssessmentRun { + s.ReplicationTaskAssessmentRunArn = &v + return s +} + +// SetReplicationTaskAssessmentRunCreationDate sets the ReplicationTaskAssessmentRunCreationDate field's value. +func (s *ReplicationTaskAssessmentRun) SetReplicationTaskAssessmentRunCreationDate(v time.Time) *ReplicationTaskAssessmentRun { + s.ReplicationTaskAssessmentRunCreationDate = &v + return s +} + +// SetResultEncryptionMode sets the ResultEncryptionMode field's value. +func (s *ReplicationTaskAssessmentRun) SetResultEncryptionMode(v string) *ReplicationTaskAssessmentRun { + s.ResultEncryptionMode = &v + return s +} + +// SetResultKmsKeyArn sets the ResultKmsKeyArn field's value. +func (s *ReplicationTaskAssessmentRun) SetResultKmsKeyArn(v string) *ReplicationTaskAssessmentRun { + s.ResultKmsKeyArn = &v + return s +} + +// SetResultLocationBucket sets the ResultLocationBucket field's value. +func (s *ReplicationTaskAssessmentRun) SetResultLocationBucket(v string) *ReplicationTaskAssessmentRun { + s.ResultLocationBucket = &v + return s +} + +// SetResultLocationFolder sets the ResultLocationFolder field's value. +func (s *ReplicationTaskAssessmentRun) SetResultLocationFolder(v string) *ReplicationTaskAssessmentRun { + s.ResultLocationFolder = &v + return s +} + +// SetServiceAccessRoleArn sets the ServiceAccessRoleArn field's value. +func (s *ReplicationTaskAssessmentRun) SetServiceAccessRoleArn(v string) *ReplicationTaskAssessmentRun { + s.ServiceAccessRoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTaskAssessmentRun) SetStatus(v string) *ReplicationTaskAssessmentRun { + s.Status = &v + return s +} + +// The progress values reported by the AssessmentProgress response element. +type ReplicationTaskAssessmentRunProgress struct { + _ struct{} `type:"structure"` + + // The number of individual assessments that have completed, successfully or + // not. + IndividualAssessmentCompletedCount *int64 `type:"integer"` + + // The number of individual assessments that are specified to run. + IndividualAssessmentCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ReplicationTaskAssessmentRunProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskAssessmentRunProgress) GoString() string { + return s.String() +} + +// SetIndividualAssessmentCompletedCount sets the IndividualAssessmentCompletedCount field's value. +func (s *ReplicationTaskAssessmentRunProgress) SetIndividualAssessmentCompletedCount(v int64) *ReplicationTaskAssessmentRunProgress { + s.IndividualAssessmentCompletedCount = &v + return s +} + +// SetIndividualAssessmentCount sets the IndividualAssessmentCount field's value. +func (s *ReplicationTaskAssessmentRunProgress) SetIndividualAssessmentCount(v int64) *ReplicationTaskAssessmentRunProgress { + s.IndividualAssessmentCount = &v + return s +} + +// Provides information that describes an individual assessment from a premigration +// assessment run. +type ReplicationTaskIndividualAssessment struct { + _ struct{} `type:"structure"` + + // Name of this individual assessment. + IndividualAssessmentName *string `type:"string"` + + // ARN of the premigration assessment run that is created to run this individual + // assessment. + ReplicationTaskAssessmentRunArn *string `type:"string"` + + // Amazon Resource Name (ARN) of this individual assessment. + ReplicationTaskIndividualAssessmentArn *string `type:"string"` + + // Date when this individual assessment was started as part of running the StartReplicationTaskAssessmentRun + // operation. + ReplicationTaskIndividualAssessmentStartDate *time.Time `type:"timestamp"` + + // Individual assessment status. + // + // This status can have one of the following values: + // + // * "cancelled" + // + // * "error" + // + // * "failed" + // + // * "passed" + // + // * "pending" + // + // * "running" + Status *string `type:"string"` +} + +// String returns the string representation +func (s ReplicationTaskIndividualAssessment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationTaskIndividualAssessment) GoString() string { + return s.String() +} + +// SetIndividualAssessmentName sets the IndividualAssessmentName field's value. +func (s *ReplicationTaskIndividualAssessment) SetIndividualAssessmentName(v string) *ReplicationTaskIndividualAssessment { + s.IndividualAssessmentName = &v + return s +} + +// SetReplicationTaskAssessmentRunArn sets the ReplicationTaskAssessmentRunArn field's value. +func (s *ReplicationTaskIndividualAssessment) SetReplicationTaskAssessmentRunArn(v string) *ReplicationTaskIndividualAssessment { + s.ReplicationTaskAssessmentRunArn = &v + return s +} + +// SetReplicationTaskIndividualAssessmentArn sets the ReplicationTaskIndividualAssessmentArn field's value. +func (s *ReplicationTaskIndividualAssessment) SetReplicationTaskIndividualAssessmentArn(v string) *ReplicationTaskIndividualAssessment { + s.ReplicationTaskIndividualAssessmentArn = &v + return s +} + +// SetReplicationTaskIndividualAssessmentStartDate sets the ReplicationTaskIndividualAssessmentStartDate field's value. +func (s *ReplicationTaskIndividualAssessment) SetReplicationTaskIndividualAssessmentStartDate(v time.Time) *ReplicationTaskIndividualAssessment { + s.ReplicationTaskIndividualAssessmentStartDate = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTaskIndividualAssessment) SetStatus(v string) *ReplicationTaskIndividualAssessment { + s.Status = &v + return s +} + // In response to a request by the DescribeReplicationTasks operation, this // object provides a collection of statistics about a replication task. type ReplicationTaskStats struct { @@ -12949,8 +16087,8 @@ func (s *ReplicationTaskStats) SetTablesQueued(v int64) *ReplicationTaskStats { // The resource you are attempting to create already exists. type ResourceAlreadyExistsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -12969,17 +16107,17 @@ func (s ResourceAlreadyExistsFault) GoString() string { func newErrorResourceAlreadyExistsFault(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsFault) Code() string { +func (s *ResourceAlreadyExistsFault) Code() string { return "ResourceAlreadyExistsFault" } // Message returns the exception's message. -func (s ResourceAlreadyExistsFault) Message() string { +func (s *ResourceAlreadyExistsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12987,28 +16125,28 @@ func (s ResourceAlreadyExistsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsFault) OrigErr() error { +func (s *ResourceAlreadyExistsFault) OrigErr() error { return nil } -func (s ResourceAlreadyExistsFault) Error() string { +func (s *ResourceAlreadyExistsFault) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsFault) RequestID() string { + return s.RespMetadata.RequestID } // The resource could not be found. type ResourceNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13025,17 +16163,17 @@ func (s ResourceNotFoundFault) GoString() string { func newErrorResourceNotFoundFault(v protocol.ResponseMetadata) error { return &ResourceNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundFault) Code() string { +func (s *ResourceNotFoundFault) Code() string { return "ResourceNotFoundFault" } // Message returns the exception's message. -func (s ResourceNotFoundFault) Message() string { +func (s *ResourceNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13043,22 +16181,22 @@ func (s ResourceNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundFault) OrigErr() error { +func (s *ResourceNotFoundFault) OrigErr() error { return nil } -func (s ResourceNotFoundFault) Error() string { +func (s *ResourceNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // Identifies an AWS DMS resource and any pending actions for it. @@ -13099,8 +16237,8 @@ func (s *ResourcePendingMaintenanceActions) SetResourceIdentifier(v string) *Res // The quota for this resource quota has been exceeded. type ResourceQuotaExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13117,17 +16255,129 @@ func (s ResourceQuotaExceededFault) GoString() string { func newErrorResourceQuotaExceededFault(v protocol.ResponseMetadata) error { return &ResourceQuotaExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceQuotaExceededFault) Code() string { +func (s *ResourceQuotaExceededFault) Code() string { return "ResourceQuotaExceededFault" } // Message returns the exception's message. -func (s ResourceQuotaExceededFault) Message() string { +func (s *ResourceQuotaExceededFault) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceQuotaExceededFault) OrigErr() error { + return nil +} + +func (s *ResourceQuotaExceededFault) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceQuotaExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceQuotaExceededFault) RequestID() string { + return s.RespMetadata.RequestID +} + +// Insufficient privileges are preventing access to an Amazon S3 object. +type S3AccessDeniedFault struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s S3AccessDeniedFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3AccessDeniedFault) GoString() string { + return s.String() +} + +func newErrorS3AccessDeniedFault(v protocol.ResponseMetadata) error { + return &S3AccessDeniedFault{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *S3AccessDeniedFault) Code() string { + return "S3AccessDeniedFault" +} + +// Message returns the exception's message. +func (s *S3AccessDeniedFault) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *S3AccessDeniedFault) OrigErr() error { + return nil +} + +func (s *S3AccessDeniedFault) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *S3AccessDeniedFault) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *S3AccessDeniedFault) RequestID() string { + return s.RespMetadata.RequestID +} + +// A specified Amazon S3 bucket, bucket folder, or other object can't be found. +type S3ResourceNotFoundFault struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s S3ResourceNotFoundFault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ResourceNotFoundFault) GoString() string { + return s.String() +} + +func newErrorS3ResourceNotFoundFault(v protocol.ResponseMetadata) error { + return &S3ResourceNotFoundFault{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *S3ResourceNotFoundFault) Code() string { + return "S3ResourceNotFoundFault" +} + +// Message returns the exception's message. +func (s *S3ResourceNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13135,22 +16385,22 @@ func (s ResourceQuotaExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceQuotaExceededFault) OrigErr() error { +func (s *S3ResourceNotFoundFault) OrigErr() error { return nil } -func (s ResourceQuotaExceededFault) Error() string { +func (s *S3ResourceNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceQuotaExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *S3ResourceNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceQuotaExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *S3ResourceNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // Settings for exporting data to Amazon S3. @@ -13167,9 +16417,9 @@ type S3Settings struct { // A value that enables a change data capture (CDC) load to write INSERT and // UPDATE operations to .csv or .parquet (columnar storage) output files. The - // default setting is false, but when CdcInsertsAndUpdates is set to trueor - // y, INSERTs and UPDATEs from the source database are migrated to the .csv - // or .parquet file. + // default setting is false, but when CdcInsertsAndUpdates is set to true or + // y, only INSERTs and UPDATEs from the source database are migrated to the + // .csv or .parquet file. // // For .csv file format only, how these INSERTs and UPDATEs are recorded depends // on the value of the IncludeOpForFullLoad parameter. If IncludeOpForFullLoad @@ -13220,12 +16470,12 @@ type S3Settings struct { // both .csv and .parquet file formats. CompressionType *string `type:"string" enum:"CompressionTypeValue"` - // The delimiter used to separate columns in the source files. The default is - // a comma. + // The delimiter used to separate columns in the .csv file for both source and + // target. The default is a comma. CsvDelimiter *string `type:"string"` - // The delimiter used to separate rows in the source files. The default is a - // carriage return (\n). + // The delimiter used to separate rows in the .csv file for both source and + // target. The default is a carriage return (\n). CsvRowDelimiter *string `type:"string"` // The format of the data that you want to use for output. You can choose one @@ -13241,6 +16491,21 @@ type S3Settings struct { // bytes (1 MiB). This number is used for .parquet file format only. DataPageSize *int64 `type:"integer"` + // Specifies a date separating delimiter to use during folder partitioning. + // The default value is SLASH. Use this parameter when DatePartitionedEnabled + // is set to true. + DatePartitionDelimiter *string `type:"string" enum:"DatePartitionDelimiterValue"` + + // When set to true, this parameter partitions S3 bucket folders based on transaction + // commit dates. The default value is false. For more information about date-based + // folder partitoning, see Using date-based folder partitioning (https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html#CHAP_Source.PostgreSQL.ConnectionAttrib) + DatePartitionEnabled *bool `type:"boolean"` + + // Identifies the sequence of the date format to use during folder partitioning. + // The default value is YYYYMMDD. Use this parameter when DatePartitionedEnabled + // is set to true. + DatePartitionSequence *string `type:"string" enum:"DatePartitionSequenceValue"` + // The maximum size of an encoded dictionary page of a column. If the dictionary // page exceeds this, this column is stored using an encoding type of PLAIN. // This parameter defaults to 1024 * 1024 bytes (1 MiB), the maximum size of @@ -13269,6 +16534,11 @@ type S3Settings struct { // The type of server-side encryption that you want to use for your data. This // encryption type is part of the endpoint settings or the extra connections // attributes for Amazon S3. You can choose either SSE_S3 (the default) or SSE_KMS. + // + // For the ModifyEndpoint operation, you can change the existing value of the + // EncryptionMode parameter from SSE_KMS to SSE_S3. But you can’t change the + // existing value from SSE_S3 to SSE_KMS. + // // To use SSE_S3, you need an AWS Identity and Access Management (IAM) role // with permission to allow "arn:aws:s3:::dms-*" to use the following actions: // @@ -13295,7 +16565,7 @@ type S3Settings struct { // * s3:DeleteBucketPolicy EncryptionMode *string `type:"string" enum:"EncryptionModeValue"` - // The external table definition. + // Specifies how tables are defined in the S3 source files only. ExternalTableDefinition *string `type:"string"` // A value that enables a full load to write INSERT operations to the comma-separated @@ -13363,7 +16633,9 @@ type S3Settings struct { // --endpoint-type target --engine-name s3 --s3-settings ServiceAccessRoleArn=value,BucketFolder=value,BucketName=value,EncryptionMode=SSE_KMS,ServerSideEncryptionKmsKeyId=value ServerSideEncryptionKmsKeyId *string `type:"string"` - // The Amazon Resource Name (ARN) used by the service access IAM role. + // The Amazon Resource Name (ARN) used by the service access IAM role. It is + // a required parameter that enables DMS to write and read objects from an 3S + // bucket. ServiceAccessRoleArn *string `type:"string"` // A value that when nonblank causes AWS DMS to add a column with timestamp @@ -13455,6 +16727,24 @@ func (s *S3Settings) SetDataPageSize(v int64) *S3Settings { return s } +// SetDatePartitionDelimiter sets the DatePartitionDelimiter field's value. +func (s *S3Settings) SetDatePartitionDelimiter(v string) *S3Settings { + s.DatePartitionDelimiter = &v + return s +} + +// SetDatePartitionEnabled sets the DatePartitionEnabled field's value. +func (s *S3Settings) SetDatePartitionEnabled(v bool) *S3Settings { + s.DatePartitionEnabled = &v + return s +} + +// SetDatePartitionSequence sets the DatePartitionSequence field's value. +func (s *S3Settings) SetDatePartitionSequence(v string) *S3Settings { + s.DatePartitionSequence = &v + return s +} + // SetDictPageSizeLimit sets the DictPageSizeLimit field's value. func (s *S3Settings) SetDictPageSizeLimit(v int64) *S3Settings { s.DictPageSizeLimit = &v @@ -13529,8 +16819,8 @@ func (s *S3Settings) SetTimestampColumnName(v string) *S3Settings { // The SNS topic is invalid. type SNSInvalidTopicFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13547,17 +16837,17 @@ func (s SNSInvalidTopicFault) GoString() string { func newErrorSNSInvalidTopicFault(v protocol.ResponseMetadata) error { return &SNSInvalidTopicFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SNSInvalidTopicFault) Code() string { +func (s *SNSInvalidTopicFault) Code() string { return "SNSInvalidTopicFault" } // Message returns the exception's message. -func (s SNSInvalidTopicFault) Message() string { +func (s *SNSInvalidTopicFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13565,28 +16855,28 @@ func (s SNSInvalidTopicFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SNSInvalidTopicFault) OrigErr() error { +func (s *SNSInvalidTopicFault) OrigErr() error { return nil } -func (s SNSInvalidTopicFault) Error() string { +func (s *SNSInvalidTopicFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SNSInvalidTopicFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SNSInvalidTopicFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SNSInvalidTopicFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SNSInvalidTopicFault) RequestID() string { + return s.RespMetadata.RequestID } // You are not authorized for the SNS subscription. type SNSNoAuthorizationFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13603,17 +16893,17 @@ func (s SNSNoAuthorizationFault) GoString() string { func newErrorSNSNoAuthorizationFault(v protocol.ResponseMetadata) error { return &SNSNoAuthorizationFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SNSNoAuthorizationFault) Code() string { +func (s *SNSNoAuthorizationFault) Code() string { return "SNSNoAuthorizationFault" } // Message returns the exception's message. -func (s SNSNoAuthorizationFault) Message() string { +func (s *SNSNoAuthorizationFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13621,22 +16911,22 @@ func (s SNSNoAuthorizationFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SNSNoAuthorizationFault) OrigErr() error { +func (s *SNSNoAuthorizationFault) OrigErr() error { return nil } -func (s SNSNoAuthorizationFault) Error() string { +func (s *SNSNoAuthorizationFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SNSNoAuthorizationFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SNSNoAuthorizationFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SNSNoAuthorizationFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SNSNoAuthorizationFault) RequestID() string { + return s.RespMetadata.RequestID } type StartReplicationTaskAssessmentInput struct { @@ -13700,6 +16990,187 @@ func (s *StartReplicationTaskAssessmentOutput) SetReplicationTask(v *Replication return s } +type StartReplicationTaskAssessmentRunInput struct { + _ struct{} `type:"structure"` + + // Unique name to identify the assessment run. + // + // AssessmentRunName is a required field + AssessmentRunName *string `type:"string" required:"true"` + + // Space-separated list of names for specific individual assessments that you + // want to exclude. These names come from the default list of individual assessments + // that AWS DMS supports for the associated migration task. This task is specified + // by ReplicationTaskArn. + // + // You can't set a value for Exclude if you also set a value for IncludeOnly + // in the API operation. + // + // To identify the names of the default individual assessments that AWS DMS + // supports for the associated migration task, run the DescribeApplicableIndividualAssessments + // operation using its own ReplicationTaskArn request parameter. + Exclude []*string `type:"list"` + + // Space-separated list of names for specific individual assessments that you + // want to include. These names come from the default list of individual assessments + // that AWS DMS supports for the associated migration task. This task is specified + // by ReplicationTaskArn. + // + // You can't set a value for IncludeOnly if you also set a value for Exclude + // in the API operation. + // + // To identify the names of the default individual assessments that AWS DMS + // supports for the associated migration task, run the DescribeApplicableIndividualAssessments + // operation using its own ReplicationTaskArn request parameter. + IncludeOnly []*string `type:"list"` + + // Amazon Resource Name (ARN) of the migration task associated with the premigration + // assessment run that you want to start. + // + // ReplicationTaskArn is a required field + ReplicationTaskArn *string `type:"string" required:"true"` + + // Encryption mode that you can specify to encrypt the results of this assessment + // run. If you don't specify this request parameter, AWS DMS stores the assessment + // run results without encryption. You can specify one of the options following: + // + // * "SSE_S3" – The server-side encryption provided as a default by Amazon + // S3. + // + // * "SSE_KMS" – AWS Key Management Service (AWS KMS) encryption. This + // encryption can use either a custom KMS encryption key that you specify + // or the default KMS encryption key that DMS provides. + ResultEncryptionMode *string `type:"string"` + + // ARN of a custom KMS encryption key that you specify when you set ResultEncryptionMode + // to "SSE_KMS". + ResultKmsKeyArn *string `type:"string"` + + // Amazon S3 bucket where you want AWS DMS to store the results of this assessment + // run. + // + // ResultLocationBucket is a required field + ResultLocationBucket *string `type:"string" required:"true"` + + // Folder within an Amazon S3 bucket where you want AWS DMS to store the results + // of this assessment run. + ResultLocationFolder *string `type:"string"` + + // ARN of a service role needed to start the assessment run. + // + // ServiceAccessRoleArn is a required field + ServiceAccessRoleArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartReplicationTaskAssessmentRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartReplicationTaskAssessmentRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartReplicationTaskAssessmentRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartReplicationTaskAssessmentRunInput"} + if s.AssessmentRunName == nil { + invalidParams.Add(request.NewErrParamRequired("AssessmentRunName")) + } + if s.ReplicationTaskArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationTaskArn")) + } + if s.ResultLocationBucket == nil { + invalidParams.Add(request.NewErrParamRequired("ResultLocationBucket")) + } + if s.ServiceAccessRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceAccessRoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssessmentRunName sets the AssessmentRunName field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetAssessmentRunName(v string) *StartReplicationTaskAssessmentRunInput { + s.AssessmentRunName = &v + return s +} + +// SetExclude sets the Exclude field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetExclude(v []*string) *StartReplicationTaskAssessmentRunInput { + s.Exclude = v + return s +} + +// SetIncludeOnly sets the IncludeOnly field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetIncludeOnly(v []*string) *StartReplicationTaskAssessmentRunInput { + s.IncludeOnly = v + return s +} + +// SetReplicationTaskArn sets the ReplicationTaskArn field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetReplicationTaskArn(v string) *StartReplicationTaskAssessmentRunInput { + s.ReplicationTaskArn = &v + return s +} + +// SetResultEncryptionMode sets the ResultEncryptionMode field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultEncryptionMode(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultEncryptionMode = &v + return s +} + +// SetResultKmsKeyArn sets the ResultKmsKeyArn field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultKmsKeyArn(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultKmsKeyArn = &v + return s +} + +// SetResultLocationBucket sets the ResultLocationBucket field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultLocationBucket(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultLocationBucket = &v + return s +} + +// SetResultLocationFolder sets the ResultLocationFolder field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetResultLocationFolder(v string) *StartReplicationTaskAssessmentRunInput { + s.ResultLocationFolder = &v + return s +} + +// SetServiceAccessRoleArn sets the ServiceAccessRoleArn field's value. +func (s *StartReplicationTaskAssessmentRunInput) SetServiceAccessRoleArn(v string) *StartReplicationTaskAssessmentRunInput { + s.ServiceAccessRoleArn = &v + return s +} + +type StartReplicationTaskAssessmentRunOutput struct { + _ struct{} `type:"structure"` + + // The premigration assessment run that was started. + ReplicationTaskAssessmentRun *ReplicationTaskAssessmentRun `type:"structure"` +} + +// String returns the string representation +func (s StartReplicationTaskAssessmentRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartReplicationTaskAssessmentRunOutput) GoString() string { + return s.String() +} + +// SetReplicationTaskAssessmentRun sets the ReplicationTaskAssessmentRun field's value. +func (s *StartReplicationTaskAssessmentRunOutput) SetReplicationTaskAssessmentRun(v *ReplicationTaskAssessmentRun) *StartReplicationTaskAssessmentRunOutput { + s.ReplicationTaskAssessmentRun = v + return s +} + type StartReplicationTaskInput struct { _ struct{} `type:"structure"` @@ -13732,9 +17203,9 @@ type StartReplicationTaskInput struct { // Indicates when you want a change data capture (CDC) operation to stop. The // value can be either server time or commit time. // - // Server time example: --cdc-stop-position “server_time:3018-02-09T12:12:12” + // Server time example: --cdc-stop-position “server_time:2018-02-09T12:12:12” // - // Commit time example: --cdc-stop-position “commit_time: 3018-02-09T12:12:12 + // Commit time example: --cdc-stop-position “commit_time: 2018-02-09T12:12:12 // “ CdcStopPosition *string `type:"string"` @@ -13743,7 +17214,7 @@ type StartReplicationTaskInput struct { // ReplicationTaskArn is a required field ReplicationTaskArn *string `type:"string" required:"true"` - // The type of replication task. + // A type of replication task. // // StartReplicationTaskType is a required field StartReplicationTaskType *string `type:"string" required:"true" enum:"StartReplicationTaskTypeValue"` @@ -13891,8 +17362,8 @@ func (s *StopReplicationTaskOutput) SetReplicationTask(v *ReplicationTask) *Stop // The storage quota has been exceeded. type StorageQuotaExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13909,17 +17380,17 @@ func (s StorageQuotaExceededFault) GoString() string { func newErrorStorageQuotaExceededFault(v protocol.ResponseMetadata) error { return &StorageQuotaExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StorageQuotaExceededFault) Code() string { +func (s *StorageQuotaExceededFault) Code() string { return "StorageQuotaExceededFault" } // Message returns the exception's message. -func (s StorageQuotaExceededFault) Message() string { +func (s *StorageQuotaExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13927,25 +17398,25 @@ func (s StorageQuotaExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StorageQuotaExceededFault) OrigErr() error { +func (s *StorageQuotaExceededFault) OrigErr() error { return nil } -func (s StorageQuotaExceededFault) Error() string { +func (s *StorageQuotaExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StorageQuotaExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StorageQuotaExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StorageQuotaExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *StorageQuotaExceededFault) RequestID() string { + return s.RespMetadata.RequestID } -// In response to a request by the DescribeReplicationSubnetGroup operation, +// In response to a request by the DescribeReplicationSubnetGroups operation, // this object identifies a subnet by its given Availability Zone, subnet identifier, // and status. type Subnet struct { @@ -13991,8 +17462,8 @@ func (s *Subnet) SetSubnetStatus(v string) *Subnet { // The specified subnet is already in use. type SubnetAlreadyInUse struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14009,17 +17480,17 @@ func (s SubnetAlreadyInUse) GoString() string { func newErrorSubnetAlreadyInUse(v protocol.ResponseMetadata) error { return &SubnetAlreadyInUse{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetAlreadyInUse) Code() string { +func (s *SubnetAlreadyInUse) Code() string { return "SubnetAlreadyInUse" } // Message returns the exception's message. -func (s SubnetAlreadyInUse) Message() string { +func (s *SubnetAlreadyInUse) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14027,22 +17498,22 @@ func (s SubnetAlreadyInUse) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetAlreadyInUse) OrigErr() error { +func (s *SubnetAlreadyInUse) OrigErr() error { return nil } -func (s SubnetAlreadyInUse) Error() string { +func (s *SubnetAlreadyInUse) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetAlreadyInUse) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetAlreadyInUse) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetAlreadyInUse) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetAlreadyInUse) RequestID() string { + return s.RespMetadata.RequestID } // Provides information about types of supported endpoints in response to a @@ -14062,9 +17533,14 @@ type SupportedEndpointType struct { // The database engine name. Valid values, depending on the EndpointType, include // "mysql", "oracle", "postgres", "mariadb", "aurora", "aurora-postgresql", // "redshift", "s3", "db2", "azuredb", "sybase", "dynamodb", "mongodb", "kinesis", - // "kafka", "elasticsearch", "documentdb", and "sqlserver". + // "kafka", "elasticsearch", "documentdb", "sqlserver", and "neptune". EngineName *string `type:"string"` + // The earliest AWS DMS engine version that supports this endpoint engine. Note + // that endpoint engines released with AWS DMS versions earlier than 3.1.1 do + // not return a value for this parameter. + ReplicationInstanceEngineMinimumVersion *string `type:"string"` + // Indicates if Change Data Capture (CDC) is supported. SupportsCDC *bool `type:"boolean"` } @@ -14097,12 +17573,78 @@ func (s *SupportedEndpointType) SetEngineName(v string) *SupportedEndpointType { return s } +// SetReplicationInstanceEngineMinimumVersion sets the ReplicationInstanceEngineMinimumVersion field's value. +func (s *SupportedEndpointType) SetReplicationInstanceEngineMinimumVersion(v string) *SupportedEndpointType { + s.ReplicationInstanceEngineMinimumVersion = &v + return s +} + // SetSupportsCDC sets the SupportsCDC field's value. func (s *SupportedEndpointType) SetSupportsCDC(v bool) *SupportedEndpointType { s.SupportsCDC = &v return s } +// Provides information that defines a SAP ASE endpoint. +type SybaseSettings struct { + _ struct{} `type:"structure"` + + // Database name for the endpoint. + DatabaseName *string `type:"string"` + + // Endpoint connection password. + Password *string `type:"string" sensitive:"true"` + + // Endpoint TCP port. + Port *int64 `type:"integer"` + + // Fully qualified domain name of the endpoint. + ServerName *string `type:"string"` + + // Endpoint connection user name. + Username *string `type:"string"` +} + +// String returns the string representation +func (s SybaseSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SybaseSettings) GoString() string { + return s.String() +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *SybaseSettings) SetDatabaseName(v string) *SybaseSettings { + s.DatabaseName = &v + return s +} + +// SetPassword sets the Password field's value. +func (s *SybaseSettings) SetPassword(v string) *SybaseSettings { + s.Password = &v + return s +} + +// SetPort sets the Port field's value. +func (s *SybaseSettings) SetPort(v int64) *SybaseSettings { + s.Port = &v + return s +} + +// SetServerName sets the ServerName field's value. +func (s *SybaseSettings) SetServerName(v string) *SybaseSettings { + s.ServerName = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *SybaseSettings) SetUsername(v string) *SybaseSettings { + s.Username = &v + return s +} + // Provides a collection of table statistics in response to a request by the // DescribeTableStatistics operation. type TableStatistics struct { @@ -14168,26 +17710,34 @@ type TableStatistics struct { // // This parameter can have the following values: // - // * Not enabled - Validation isn't enabled for the table in the migration + // * Not enabled – Validation isn't enabled for the table in the migration // task. // - // * Pending records - Some records in the table are waiting for validation. + // * Pending records – Some records in the table are waiting for validation. // - // * Mismatched records - Some records in the table don't match between the - // source and target. + // * Mismatched records – Some records in the table don't match between + // the source and target. // - // * Suspended records - Some records in the table couldn't be validated. + // * Suspended records – Some records in the table couldn't be validated. // - // * No primary key - The table couldn't be validated because it has no primary - // key. + // * No primary key –The table couldn't be validated because it has no + // primary key. // - // * Table error - The table wasn't validated because it's in an error state - // and some data wasn't migrated. + // * Table error – The table wasn't validated because it's in an error + // state and some data wasn't migrated. // - // * Validated - All rows in the table are validated. If the table is updated, + // * Validated – All rows in the table are validated. If the table is updated, // the status can change from Validated. // - // * Error - The table couldn't be validated because of an unexpected error. + // * Error – The table couldn't be validated because of an unexpected error. + // + // * Pending validation – The table is waiting validation. + // + // * Preparing table – Preparing the table enabled in the migration task + // for validation. + // + // * Pending revalidation – All rows in the table are pending validation + // after the table was updated. ValidationState *string `type:"string"` // Additional details about the state of validation. @@ -14326,10 +17876,14 @@ type TableToReload struct { _ struct{} `type:"structure"` // The schema name of the table to be reloaded. - SchemaName *string `type:"string"` + // + // SchemaName is a required field + SchemaName *string `type:"string" required:"true"` // The table name of the table to be reloaded. - TableName *string `type:"string"` + // + // TableName is a required field + TableName *string `type:"string" required:"true"` } // String returns the string representation @@ -14342,6 +17896,22 @@ func (s TableToReload) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableToReload) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableToReload"} + if s.SchemaName == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaName")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetSchemaName sets the SchemaName field's value. func (s *TableToReload) SetSchemaName(v string) *TableToReload { s.SchemaName = &v @@ -14365,16 +17935,16 @@ func (s *TableToReload) SetTableName(v string) *TableToReload { type Tag struct { _ struct{} `type:"structure"` - // A key is the required name of the tag. The string value can be from 1 to - // 128 Unicode characters in length and can't be prefixed with "aws:" or "dms:". - // The string can only contain only the set of Unicode letters, digits, white-space, - // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + // A key is the required name of the tag. The string value can be 1-128 Unicode + // characters in length and can't be prefixed with "aws:" or "dms:". The string + // can only contain only the set of Unicode letters, digits, white-space, '_', + // '.', '/', '=', '+', '-' (Java regular expressions: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). Key *string `type:"string"` - // A value is the optional value of the tag. The string value can be from 1 - // to 256 Unicode characters in length and can't be prefixed with "aws:" or - // "dms:". The string can only contain only the set of Unicode letters, digits, - // white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + // A value is the optional value of the tag. The string value can be 1-256 Unicode + // characters in length and can't be prefixed with "aws:" or "dms:". The string + // can only contain only the set of Unicode letters, digits, white-space, '_', + // '.', '/', '=', '+', '-' (Java regular expressions: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). Value *string `type:"string"` } @@ -14477,8 +18047,8 @@ func (s *TestConnectionOutput) SetConnection(v *Connection) *TestConnectionOutpu // An upgrade dependency is preventing the database migration. type UpgradeDependencyFailureFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14495,17 +18065,17 @@ func (s UpgradeDependencyFailureFault) GoString() string { func newErrorUpgradeDependencyFailureFault(v protocol.ResponseMetadata) error { return &UpgradeDependencyFailureFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UpgradeDependencyFailureFault) Code() string { +func (s *UpgradeDependencyFailureFault) Code() string { return "UpgradeDependencyFailureFault" } // Message returns the exception's message. -func (s UpgradeDependencyFailureFault) Message() string { +func (s *UpgradeDependencyFailureFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14513,33 +18083,33 @@ func (s UpgradeDependencyFailureFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UpgradeDependencyFailureFault) OrigErr() error { +func (s *UpgradeDependencyFailureFault) OrigErr() error { return nil } -func (s UpgradeDependencyFailureFault) Error() string { +func (s *UpgradeDependencyFailureFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UpgradeDependencyFailureFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UpgradeDependencyFailureFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UpgradeDependencyFailureFault) RequestID() string { - return s.respMetadata.RequestID +func (s *UpgradeDependencyFailureFault) RequestID() string { + return s.RespMetadata.RequestID } -// Describes status of a security group associated with the virtual private -// cloud hosting your replication and DB instances. +// Describes the status of a security group associated with the virtual private +// cloud (VPC) hosting your replication and DB instances. type VpcSecurityGroupMembership struct { _ struct{} `type:"structure"` // The status of the VPC security group. Status *string `type:"string"` - // The VPC security group Id. + // The VPC security group ID. VpcSecurityGroupId *string `type:"string"` } @@ -14576,6 +18146,15 @@ const ( AuthMechanismValueScramSha1 = "scram_sha_1" ) +// AuthMechanismValue_Values returns all elements of the AuthMechanismValue enum +func AuthMechanismValue_Values() []string { + return []string{ + AuthMechanismValueDefault, + AuthMechanismValueMongodbCr, + AuthMechanismValueScramSha1, + } +} + const ( // AuthTypeValueNo is a AuthTypeValue enum value AuthTypeValueNo = "no" @@ -14584,6 +18163,34 @@ const ( AuthTypeValuePassword = "password" ) +// AuthTypeValue_Values returns all elements of the AuthTypeValue enum +func AuthTypeValue_Values() []string { + return []string{ + AuthTypeValueNo, + AuthTypeValuePassword, + } +} + +const ( + // CharLengthSemanticsDefault is a CharLengthSemantics enum value + CharLengthSemanticsDefault = "default" + + // CharLengthSemanticsChar is a CharLengthSemantics enum value + CharLengthSemanticsChar = "char" + + // CharLengthSemanticsByte is a CharLengthSemantics enum value + CharLengthSemanticsByte = "byte" +) + +// CharLengthSemantics_Values returns all elements of the CharLengthSemantics enum +func CharLengthSemantics_Values() []string { + return []string{ + CharLengthSemanticsDefault, + CharLengthSemanticsChar, + CharLengthSemanticsByte, + } +} + const ( // CompressionTypeValueNone is a CompressionTypeValue enum value CompressionTypeValueNone = "none" @@ -14592,6 +18199,14 @@ const ( CompressionTypeValueGzip = "gzip" ) +// CompressionTypeValue_Values returns all elements of the CompressionTypeValue enum +func CompressionTypeValue_Values() []string { + return []string{ + CompressionTypeValueNone, + CompressionTypeValueGzip, + } +} + const ( // DataFormatValueCsv is a DataFormatValue enum value DataFormatValueCsv = "csv" @@ -14600,6 +18215,66 @@ const ( DataFormatValueParquet = "parquet" ) +// DataFormatValue_Values returns all elements of the DataFormatValue enum +func DataFormatValue_Values() []string { + return []string{ + DataFormatValueCsv, + DataFormatValueParquet, + } +} + +const ( + // DatePartitionDelimiterValueSlash is a DatePartitionDelimiterValue enum value + DatePartitionDelimiterValueSlash = "SLASH" + + // DatePartitionDelimiterValueUnderscore is a DatePartitionDelimiterValue enum value + DatePartitionDelimiterValueUnderscore = "UNDERSCORE" + + // DatePartitionDelimiterValueDash is a DatePartitionDelimiterValue enum value + DatePartitionDelimiterValueDash = "DASH" + + // DatePartitionDelimiterValueNone is a DatePartitionDelimiterValue enum value + DatePartitionDelimiterValueNone = "NONE" +) + +// DatePartitionDelimiterValue_Values returns all elements of the DatePartitionDelimiterValue enum +func DatePartitionDelimiterValue_Values() []string { + return []string{ + DatePartitionDelimiterValueSlash, + DatePartitionDelimiterValueUnderscore, + DatePartitionDelimiterValueDash, + DatePartitionDelimiterValueNone, + } +} + +const ( + // DatePartitionSequenceValueYyyymmdd is a DatePartitionSequenceValue enum value + DatePartitionSequenceValueYyyymmdd = "YYYYMMDD" + + // DatePartitionSequenceValueYyyymmddhh is a DatePartitionSequenceValue enum value + DatePartitionSequenceValueYyyymmddhh = "YYYYMMDDHH" + + // DatePartitionSequenceValueYyyymm is a DatePartitionSequenceValue enum value + DatePartitionSequenceValueYyyymm = "YYYYMM" + + // DatePartitionSequenceValueMmyyyydd is a DatePartitionSequenceValue enum value + DatePartitionSequenceValueMmyyyydd = "MMYYYYDD" + + // DatePartitionSequenceValueDdmmyyyy is a DatePartitionSequenceValue enum value + DatePartitionSequenceValueDdmmyyyy = "DDMMYYYY" +) + +// DatePartitionSequenceValue_Values returns all elements of the DatePartitionSequenceValue enum +func DatePartitionSequenceValue_Values() []string { + return []string{ + DatePartitionSequenceValueYyyymmdd, + DatePartitionSequenceValueYyyymmddhh, + DatePartitionSequenceValueYyyymm, + DatePartitionSequenceValueMmyyyydd, + DatePartitionSequenceValueDdmmyyyy, + } +} + const ( // DmsSslModeValueNone is a DmsSslModeValue enum value DmsSslModeValueNone = "none" @@ -14614,6 +18289,16 @@ const ( DmsSslModeValueVerifyFull = "verify-full" ) +// DmsSslModeValue_Values returns all elements of the DmsSslModeValue enum +func DmsSslModeValue_Values() []string { + return []string{ + DmsSslModeValueNone, + DmsSslModeValueRequire, + DmsSslModeValueVerifyCa, + DmsSslModeValueVerifyFull, + } +} + const ( // EncodingTypeValuePlain is a EncodingTypeValue enum value EncodingTypeValuePlain = "plain" @@ -14625,6 +18310,15 @@ const ( EncodingTypeValueRleDictionary = "rle-dictionary" ) +// EncodingTypeValue_Values returns all elements of the EncodingTypeValue enum +func EncodingTypeValue_Values() []string { + return []string{ + EncodingTypeValuePlain, + EncodingTypeValuePlainDictionary, + EncodingTypeValueRleDictionary, + } +} + const ( // EncryptionModeValueSseS3 is a EncryptionModeValue enum value EncryptionModeValueSseS3 = "sse-s3" @@ -14633,6 +18327,14 @@ const ( EncryptionModeValueSseKms = "sse-kms" ) +// EncryptionModeValue_Values returns all elements of the EncryptionModeValue enum +func EncryptionModeValue_Values() []string { + return []string{ + EncryptionModeValueSseS3, + EncryptionModeValueSseKms, + } +} + const ( // MessageFormatValueJson is a MessageFormatValue enum value MessageFormatValueJson = "json" @@ -14641,6 +18343,14 @@ const ( MessageFormatValueJsonUnformatted = "json-unformatted" ) +// MessageFormatValue_Values returns all elements of the MessageFormatValue enum +func MessageFormatValue_Values() []string { + return []string{ + MessageFormatValueJson, + MessageFormatValueJsonUnformatted, + } +} + const ( // MigrationTypeValueFullLoad is a MigrationTypeValue enum value MigrationTypeValueFullLoad = "full-load" @@ -14652,6 +18362,15 @@ const ( MigrationTypeValueFullLoadAndCdc = "full-load-and-cdc" ) +// MigrationTypeValue_Values returns all elements of the MigrationTypeValue enum +func MigrationTypeValue_Values() []string { + return []string{ + MigrationTypeValueFullLoad, + MigrationTypeValueCdc, + MigrationTypeValueFullLoadAndCdc, + } +} + const ( // NestingLevelValueNone is a NestingLevelValue enum value NestingLevelValueNone = "none" @@ -14660,6 +18379,14 @@ const ( NestingLevelValueOne = "one" ) +// NestingLevelValue_Values returns all elements of the NestingLevelValue enum +func NestingLevelValue_Values() []string { + return []string{ + NestingLevelValueNone, + NestingLevelValueOne, + } +} + const ( // ParquetVersionValueParquet10 is a ParquetVersionValue enum value ParquetVersionValueParquet10 = "parquet-1-0" @@ -14668,6 +18395,14 @@ const ( ParquetVersionValueParquet20 = "parquet-2-0" ) +// ParquetVersionValue_Values returns all elements of the ParquetVersionValue enum +func ParquetVersionValue_Values() []string { + return []string{ + ParquetVersionValueParquet10, + ParquetVersionValueParquet20, + } +} + const ( // RefreshSchemasStatusTypeValueSuccessful is a RefreshSchemasStatusTypeValue enum value RefreshSchemasStatusTypeValueSuccessful = "successful" @@ -14679,11 +18414,27 @@ const ( RefreshSchemasStatusTypeValueRefreshing = "refreshing" ) +// RefreshSchemasStatusTypeValue_Values returns all elements of the RefreshSchemasStatusTypeValue enum +func RefreshSchemasStatusTypeValue_Values() []string { + return []string{ + RefreshSchemasStatusTypeValueSuccessful, + RefreshSchemasStatusTypeValueFailed, + RefreshSchemasStatusTypeValueRefreshing, + } +} + const ( // ReleaseStatusValuesBeta is a ReleaseStatusValues enum value ReleaseStatusValuesBeta = "beta" ) +// ReleaseStatusValues_Values returns all elements of the ReleaseStatusValues enum +func ReleaseStatusValues_Values() []string { + return []string{ + ReleaseStatusValuesBeta, + } +} + const ( // ReloadOptionValueDataReload is a ReloadOptionValue enum value ReloadOptionValueDataReload = "data-reload" @@ -14692,6 +18443,14 @@ const ( ReloadOptionValueValidateOnly = "validate-only" ) +// ReloadOptionValue_Values returns all elements of the ReloadOptionValue enum +func ReloadOptionValue_Values() []string { + return []string{ + ReloadOptionValueDataReload, + ReloadOptionValueValidateOnly, + } +} + const ( // ReplicationEndpointTypeValueSource is a ReplicationEndpointTypeValue enum value ReplicationEndpointTypeValueSource = "source" @@ -14700,11 +18459,46 @@ const ( ReplicationEndpointTypeValueTarget = "target" ) +// ReplicationEndpointTypeValue_Values returns all elements of the ReplicationEndpointTypeValue enum +func ReplicationEndpointTypeValue_Values() []string { + return []string{ + ReplicationEndpointTypeValueSource, + ReplicationEndpointTypeValueTarget, + } +} + +const ( + // SafeguardPolicyRelyOnSqlServerReplicationAgent is a SafeguardPolicy enum value + SafeguardPolicyRelyOnSqlServerReplicationAgent = "rely-on-sql-server-replication-agent" + + // SafeguardPolicyExclusiveAutomaticTruncation is a SafeguardPolicy enum value + SafeguardPolicyExclusiveAutomaticTruncation = "exclusive-automatic-truncation" + + // SafeguardPolicySharedAutomaticTruncation is a SafeguardPolicy enum value + SafeguardPolicySharedAutomaticTruncation = "shared-automatic-truncation" +) + +// SafeguardPolicy_Values returns all elements of the SafeguardPolicy enum +func SafeguardPolicy_Values() []string { + return []string{ + SafeguardPolicyRelyOnSqlServerReplicationAgent, + SafeguardPolicyExclusiveAutomaticTruncation, + SafeguardPolicySharedAutomaticTruncation, + } +} + const ( // SourceTypeReplicationInstance is a SourceType enum value SourceTypeReplicationInstance = "replication-instance" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeReplicationInstance, + } +} + const ( // StartReplicationTaskTypeValueStartReplication is a StartReplicationTaskTypeValue enum value StartReplicationTaskTypeValueStartReplication = "start-replication" @@ -14715,3 +18509,28 @@ const ( // StartReplicationTaskTypeValueReloadTarget is a StartReplicationTaskTypeValue enum value StartReplicationTaskTypeValueReloadTarget = "reload-target" ) + +// StartReplicationTaskTypeValue_Values returns all elements of the StartReplicationTaskTypeValue enum +func StartReplicationTaskTypeValue_Values() []string { + return []string{ + StartReplicationTaskTypeValueStartReplication, + StartReplicationTaskTypeValueResumeProcessing, + StartReplicationTaskTypeValueReloadTarget, + } +} + +const ( + // TargetDbTypeSpecificDatabase is a TargetDbType enum value + TargetDbTypeSpecificDatabase = "specific-database" + + // TargetDbTypeMultipleDatabases is a TargetDbType enum value + TargetDbTypeMultipleDatabases = "multiple-databases" +) + +// TargetDbType_Values returns all elements of the TargetDbType enum +func TargetDbType_Values() []string { + return []string{ + TargetDbTypeSpecificDatabase, + TargetDbTypeMultipleDatabases, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go index e2e32caf6..b02044ee1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/errors.go @@ -53,6 +53,13 @@ const ( // The specified master key (CMK) isn't enabled. ErrCodeKMSDisabledFault = "KMSDisabledFault" + // ErrCodeKMSFault for service response error code + // "KMSFault". + // + // An AWS Key Management Service (AWS KMS) error is preventing access to AWS + // KMS. + ErrCodeKMSFault = "KMSFault" + // ErrCodeKMSInvalidStateFault for service response error code // "KMSInvalidStateFault". // @@ -102,6 +109,18 @@ const ( // The quota for this resource quota has been exceeded. ErrCodeResourceQuotaExceededFault = "ResourceQuotaExceededFault" + // ErrCodeS3AccessDeniedFault for service response error code + // "S3AccessDeniedFault". + // + // Insufficient privileges are preventing access to an Amazon S3 object. + ErrCodeS3AccessDeniedFault = "S3AccessDeniedFault" + + // ErrCodeS3ResourceNotFoundFault for service response error code + // "S3ResourceNotFoundFault". + // + // A specified Amazon S3 bucket, bucket folder, or other object can't be found. + ErrCodeS3ResourceNotFoundFault = "S3ResourceNotFoundFault" + // ErrCodeSNSInvalidTopicFault for service response error code // "SNSInvalidTopicFault". // @@ -141,6 +160,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidSubnet": newErrorInvalidSubnet, "KMSAccessDeniedFault": newErrorKMSAccessDeniedFault, "KMSDisabledFault": newErrorKMSDisabledFault, + "KMSFault": newErrorKMSFault, "KMSInvalidStateFault": newErrorKMSInvalidStateFault, "KMSKeyNotAccessibleFault": newErrorKMSKeyNotAccessibleFault, "KMSNotFoundFault": newErrorKMSNotFoundFault, @@ -149,6 +169,8 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ResourceAlreadyExistsFault": newErrorResourceAlreadyExistsFault, "ResourceNotFoundFault": newErrorResourceNotFoundFault, "ResourceQuotaExceededFault": newErrorResourceQuotaExceededFault, + "S3AccessDeniedFault": newErrorS3AccessDeniedFault, + "S3ResourceNotFoundFault": newErrorS3ResourceNotFoundFault, "SNSInvalidTopicFault": newErrorSNSInvalidTopicFault, "SNSNoAuthorizationFault": newErrorSNSNoAuthorizationFault, "StorageQuotaExceededFault": newErrorStorageQuotaExceededFault, diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go index 41409c62d..fbb9d0055 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dataexchange/api.go b/vendor/github.com/aws/aws-sdk-go/service/dataexchange/api.go index c95c96bec..9eb037715 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dataexchange/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dataexchange/api.go @@ -2211,8 +2211,8 @@ func (c *DataExchange) UpdateRevisionWithContext(ctx aws.Context, input *UpdateR // Access to the resource is denied. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Access to the resource is denied. Message_ *string `locationName:"Message" type:"string"` @@ -2230,17 +2230,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2248,22 +2248,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The destination for the asset. @@ -2591,8 +2591,8 @@ func (s CancelJobOutput) GoString() string { // The request couldn't be completed because it conflicted with the current // state of the resource. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The request couldn't be completed because it conflicted with the current // state of the resource. @@ -2617,17 +2617,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2635,22 +2635,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // A request to create a data set that contains one or more revisions. @@ -3658,6 +3658,9 @@ type ExportAssetsToS3RequestDetails struct { // DataSetId is a required field DataSetId *string `type:"string" required:"true"` + // Encryption configuration for the export job. + Encryption *ExportServerSideEncryption `type:"structure"` + // The unique identifier for the revision associated with this export request. // // RevisionId is a required field @@ -3696,6 +3699,11 @@ func (s *ExportAssetsToS3RequestDetails) Validate() error { } } } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3715,6 +3723,12 @@ func (s *ExportAssetsToS3RequestDetails) SetDataSetId(v string) *ExportAssetsToS return s } +// SetEncryption sets the Encryption field's value. +func (s *ExportAssetsToS3RequestDetails) SetEncryption(v *ExportServerSideEncryption) *ExportAssetsToS3RequestDetails { + s.Encryption = v + return s +} + // SetRevisionId sets the RevisionId field's value. func (s *ExportAssetsToS3RequestDetails) SetRevisionId(v string) *ExportAssetsToS3RequestDetails { s.RevisionId = &v @@ -3735,6 +3749,9 @@ type ExportAssetsToS3ResponseDetails struct { // DataSetId is a required field DataSetId *string `type:"string" required:"true"` + // Encryption configuration of the export job. + Encryption *ExportServerSideEncryption `type:"structure"` + // The unique identifier for the revision associated with this export response. // // RevisionId is a required field @@ -3763,12 +3780,71 @@ func (s *ExportAssetsToS3ResponseDetails) SetDataSetId(v string) *ExportAssetsTo return s } +// SetEncryption sets the Encryption field's value. +func (s *ExportAssetsToS3ResponseDetails) SetEncryption(v *ExportServerSideEncryption) *ExportAssetsToS3ResponseDetails { + s.Encryption = v + return s +} + // SetRevisionId sets the RevisionId field's value. func (s *ExportAssetsToS3ResponseDetails) SetRevisionId(v string) *ExportAssetsToS3ResponseDetails { s.RevisionId = &v return s } +// Encryption configuration of the export job. Includes the encryption type +// as well as the AWS KMS key. The KMS key is only necessary if you chose the +// KMS encryption type. +type ExportServerSideEncryption struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to + // encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms + // as an encryption type. + KmsKeyArn *string `type:"string"` + + // The type of server side encryption used for encrypting the objects in Amazon + // S3. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"ServerSideEncryptionTypes"` +} + +// String returns the string representation +func (s ExportServerSideEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExportServerSideEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportServerSideEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportServerSideEncryption"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *ExportServerSideEncryption) SetKmsKeyArn(v string) *ExportServerSideEncryption { + s.KmsKeyArn = &v + return s +} + +// SetType sets the Type field's value. +func (s *ExportServerSideEncryption) SetType(v string) *ExportServerSideEncryption { + s.Type = &v + return s +} + type GetAssetInput struct { _ struct{} `type:"structure"` @@ -4683,8 +4759,8 @@ func (s *ImportAssetsFromS3ResponseDetails) SetRevisionId(v string) *ImportAsset // An exception occurred with the service. type InternalServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message identifying the service exception that occurred. Message_ *string `locationName:"Message" type:"string"` @@ -4702,17 +4778,17 @@ func (s InternalServerException) GoString() string { func newErrorInternalServerException(v protocol.ResponseMetadata) error { return &InternalServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerException) Code() string { +func (s *InternalServerException) Code() string { return "InternalServerException" } // Message returns the exception's message. -func (s InternalServerException) Message() string { +func (s *InternalServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4720,22 +4796,22 @@ func (s InternalServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerException) OrigErr() error { +func (s *InternalServerException) OrigErr() error { return nil } -func (s InternalServerException) Error() string { +func (s *InternalServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Data Exchange Jobs are asynchronous import or export operations used @@ -4865,7 +4941,7 @@ type JobError struct { // Message is a required field Message *string `type:"string" required:"true"` - // The unqiue identifier for the resource related to the error. + // The unique identifier for the resource related to the error. ResourceId *string `type:"string"` // The type of resource related to the error. @@ -5460,8 +5536,8 @@ func (s *RequestDetails) SetImportAssetsFromS3(v *ImportAssetsFromS3RequestDetai // The resource couldn't be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The resource couldn't be found. Message_ *string `locationName:"Message" type:"string"` @@ -5485,17 +5561,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5503,22 +5579,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Details for the response. @@ -5707,8 +5783,8 @@ func (s *S3SnapshotAsset) SetSize(v float64) *S3SnapshotAsset { // The request has exceeded the quotas imposed by the service. type ServiceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` LimitName *string `type:"string" enum:"LimitName"` @@ -5729,17 +5805,17 @@ func (s ServiceLimitExceededException) GoString() string { func newErrorServiceLimitExceededException(v protocol.ResponseMetadata) error { return &ServiceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceLimitExceededException) Code() string { +func (s *ServiceLimitExceededException) Code() string { return "ServiceLimitExceededException" } // Message returns the exception's message. -func (s ServiceLimitExceededException) Message() string { +func (s *ServiceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5747,22 +5823,22 @@ func (s ServiceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceLimitExceededException) OrigErr() error { +func (s *ServiceLimitExceededException) OrigErr() error { return nil } -func (s ServiceLimitExceededException) Error() string { +func (s *ServiceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type StartJobInput struct { @@ -5885,8 +5961,8 @@ func (s TagResourceOutput) GoString() string { // The limit on the number of requests per second was exceeded. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The limit on the number of requests per second was exceeded. Message_ *string `locationName:"Message" type:"string"` @@ -5904,17 +5980,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5922,22 +5998,22 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -6523,8 +6599,8 @@ func (s *UpdateRevisionOutput) SetUpdatedAt(v time.Time) *UpdateRevisionOutput { // The request was invalid. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message that informs you about what was invalid about the request. Message_ *string `locationName:"Message" type:"string"` @@ -6542,17 +6618,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6560,22 +6636,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // The type of file your data is stored in. Currently, the supported asset type @@ -6585,6 +6661,13 @@ const ( AssetTypeS3Snapshot = "S3_SNAPSHOT" ) +// AssetType_Values returns all elements of the AssetType enum +func AssetType_Values() []string { + return []string{ + AssetTypeS3Snapshot, + } +} + const ( // CodeAccessDeniedException is a Code enum value CodeAccessDeniedException = "ACCESS_DENIED_EXCEPTION" @@ -6608,6 +6691,19 @@ const ( CodeMalwareScanEncryptedFile = "MALWARE_SCAN_ENCRYPTED_FILE" ) +// Code_Values returns all elements of the Code enum +func Code_Values() []string { + return []string{ + CodeAccessDeniedException, + CodeInternalServerException, + CodeMalwareDetected, + CodeResourceNotFoundException, + CodeServiceQuotaExceededException, + CodeValidationException, + CodeMalwareScanEncryptedFile, + } +} + // The name of the limit that was reached. const ( // JobErrorLimitNameAssetsperrevision is a JobErrorLimitName enum value @@ -6617,6 +6713,14 @@ const ( JobErrorLimitNameAssetsizeinGb = "Asset size in GB" ) +// JobErrorLimitName_Values returns all elements of the JobErrorLimitName enum +func JobErrorLimitName_Values() []string { + return []string{ + JobErrorLimitNameAssetsperrevision, + JobErrorLimitNameAssetsizeinGb, + } +} + // The types of resource which the job error can apply to. const ( // JobErrorResourceTypesRevision is a JobErrorResourceTypes enum value @@ -6626,6 +6730,14 @@ const ( JobErrorResourceTypesAsset = "ASSET" ) +// JobErrorResourceTypes_Values returns all elements of the JobErrorResourceTypes enum +func JobErrorResourceTypes_Values() []string { + return []string{ + JobErrorResourceTypesRevision, + JobErrorResourceTypesAsset, + } +} + const ( // LimitNameProductsperaccount is a LimitName enum value LimitNameProductsperaccount = "Products per account" @@ -6664,6 +6776,24 @@ const ( LimitNameConcurrentinprogressjobstoexportassetstoasignedUrl = "Concurrent in progress jobs to export assets to a signed URL" ) +// LimitName_Values returns all elements of the LimitName enum +func LimitName_Values() []string { + return []string{ + LimitNameProductsperaccount, + LimitNameDatasetsperaccount, + LimitNameDatasetsperproduct, + LimitNameRevisionsperdataset, + LimitNameAssetsperrevision, + LimitNameAssetsperimportjobfromAmazonS3, + LimitNameAssetperexportjobfromAmazonS3, + LimitNameAssetsizeinGb, + LimitNameConcurrentinprogressjobstoimportassetsfromAmazonS3, + LimitNameConcurrentinprogressjobstoimportassetsfromasignedUrl, + LimitNameConcurrentinprogressjobstoexportassetstoAmazonS3, + LimitNameConcurrentinprogressjobstoexportassetstoasignedUrl, + } +} + // A property that defines the data set as OWNED by the account (for providers) // or ENTITLED to the account (for subscribers). When an owned data set is published // in a product, AWS Data Exchange creates a copy of the data set. Subscribers @@ -6676,6 +6806,14 @@ const ( OriginEntitled = "ENTITLED" ) +// Origin_Values returns all elements of the Origin enum +func Origin_Values() []string { + return []string{ + OriginOwned, + OriginEntitled, + } +} + const ( // ResourceTypeDataSet is a ResourceType enum value ResourceTypeDataSet = "DATA_SET" @@ -6690,6 +6828,33 @@ const ( ResourceTypeJob = "JOB" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeDataSet, + ResourceTypeRevision, + ResourceTypeAsset, + ResourceTypeJob, + } +} + +// The types of encryption supported in export jobs to Amazon S3. +const ( + // ServerSideEncryptionTypesAwsKms is a ServerSideEncryptionTypes enum value + ServerSideEncryptionTypesAwsKms = "aws:kms" + + // ServerSideEncryptionTypesAes256 is a ServerSideEncryptionTypes enum value + ServerSideEncryptionTypesAes256 = "AES256" +) + +// ServerSideEncryptionTypes_Values returns all elements of the ServerSideEncryptionTypes enum +func ServerSideEncryptionTypes_Values() []string { + return []string{ + ServerSideEncryptionTypesAwsKms, + ServerSideEncryptionTypesAes256, + } +} + const ( // StateWaiting is a State enum value StateWaiting = "WAITING" @@ -6710,6 +6875,18 @@ const ( StateTimedOut = "TIMED_OUT" ) +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StateWaiting, + StateInProgress, + StateError, + StateCompleted, + StateCancelled, + StateTimedOut, + } +} + const ( // TypeImportAssetsFromS3 is a Type enum value TypeImportAssetsFromS3 = "IMPORT_ASSETS_FROM_S3" @@ -6723,3 +6900,13 @@ const ( // TypeExportAssetToSignedUrl is a Type enum value TypeExportAssetToSignedUrl = "EXPORT_ASSET_TO_SIGNED_URL" ) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeImportAssetsFromS3, + TypeImportAssetFromSignedUrl, + TypeExportAssetsToS3, + TypeExportAssetToSignedUrl, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dataexchange/service.go b/vendor/github.com/aws/aws-sdk-go/service/dataexchange/service.go index d5a34f815..ccf045d61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dataexchange/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dataexchange/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go index 196fab8ab..66aef55f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/api.go @@ -2899,8 +2899,8 @@ func (s *InstanceIdentity) SetSignature(v string) *InstanceIdentity { // An internal service error occurred. type InternalServiceError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Description of the error message. Message_ *string `locationName:"message" type:"string"` @@ -2918,17 +2918,17 @@ func (s InternalServiceError) GoString() string { func newErrorInternalServiceError(v protocol.ResponseMetadata) error { return &InternalServiceError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceError) Code() string { +func (s *InternalServiceError) Code() string { return "InternalServiceError" } // Message returns the exception's message. -func (s InternalServiceError) Message() string { +func (s *InternalServiceError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2936,30 +2936,30 @@ func (s InternalServiceError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceError) OrigErr() error { +func (s *InternalServiceError) OrigErr() error { return nil } -func (s InternalServiceError) Error() string { +func (s *InternalServiceError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceError) RequestID() string { + return s.RespMetadata.RequestID } // The request was not valid. Verify that your request was properly formatted, // that the signature was generated with the correct credentials, and that you // haven't exceeded any of the service limits for your account. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Description of the error message. Message_ *string `locationName:"message" type:"string"` @@ -2977,17 +2977,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2995,22 +2995,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Contains the parameters for ListPipelines. @@ -3344,8 +3344,8 @@ func (s *ParameterValue) SetStringValue(v string) *ParameterValue { // The specified pipeline has been deleted. type PipelineDeletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Description of the error message. Message_ *string `locationName:"message" type:"string"` @@ -3363,17 +3363,17 @@ func (s PipelineDeletedException) GoString() string { func newErrorPipelineDeletedException(v protocol.ResponseMetadata) error { return &PipelineDeletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineDeletedException) Code() string { +func (s *PipelineDeletedException) Code() string { return "PipelineDeletedException" } // Message returns the exception's message. -func (s PipelineDeletedException) Message() string { +func (s *PipelineDeletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3381,22 +3381,22 @@ func (s PipelineDeletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineDeletedException) OrigErr() error { +func (s *PipelineDeletedException) OrigErr() error { return nil } -func (s PipelineDeletedException) Error() string { +func (s *PipelineDeletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineDeletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineDeletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineDeletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineDeletedException) RequestID() string { + return s.RespMetadata.RequestID } // Contains pipeline metadata. @@ -3507,8 +3507,8 @@ func (s *PipelineIdName) SetName(v string) *PipelineIdName { // The specified pipeline was not found. Verify that you used the correct user // and account identifiers. type PipelineNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Description of the error message. Message_ *string `locationName:"message" type:"string"` @@ -3526,17 +3526,17 @@ func (s PipelineNotFoundException) GoString() string { func newErrorPipelineNotFoundException(v protocol.ResponseMetadata) error { return &PipelineNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PipelineNotFoundException) Code() string { +func (s *PipelineNotFoundException) Code() string { return "PipelineNotFoundException" } // Message returns the exception's message. -func (s PipelineNotFoundException) Message() string { +func (s *PipelineNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3544,22 +3544,22 @@ func (s PipelineNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PipelineNotFoundException) OrigErr() error { +func (s *PipelineNotFoundException) OrigErr() error { return nil } -func (s PipelineNotFoundException) Error() string { +func (s *PipelineNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PipelineNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PipelineNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PipelineNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *PipelineNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a pipeline object. This can be a logical, physical, @@ -4612,8 +4612,8 @@ func (s *Tag) SetValue(v string) *Tag { // The specified task was not found. type TaskNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Description of the error message. Message_ *string `locationName:"message" type:"string"` @@ -4631,17 +4631,17 @@ func (s TaskNotFoundException) GoString() string { func newErrorTaskNotFoundException(v protocol.ResponseMetadata) error { return &TaskNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TaskNotFoundException) Code() string { +func (s *TaskNotFoundException) Code() string { return "TaskNotFoundException" } // Message returns the exception's message. -func (s TaskNotFoundException) Message() string { +func (s *TaskNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4649,22 +4649,22 @@ func (s TaskNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TaskNotFoundException) OrigErr() error { +func (s *TaskNotFoundException) OrigErr() error { return nil } -func (s TaskNotFoundException) Error() string { +func (s *TaskNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TaskNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TaskNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TaskNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *TaskNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a pipeline task that is assigned to a task runner. @@ -4956,6 +4956,17 @@ const ( OperatorTypeBetween = "BETWEEN" ) +// OperatorType_Values returns all elements of the OperatorType enum +func OperatorType_Values() []string { + return []string{ + OperatorTypeEq, + OperatorTypeRefEq, + OperatorTypeLe, + OperatorTypeGe, + OperatorTypeBetween, + } +} + const ( // TaskStatusFinished is a TaskStatus enum value TaskStatusFinished = "FINISHED" @@ -4966,3 +4977,12 @@ const ( // TaskStatusFalse is a TaskStatus enum value TaskStatusFalse = "FALSE" ) + +// TaskStatus_Values returns all elements of the TaskStatus enum +func TaskStatus_Values() []string { + return []string{ + TaskStatusFinished, + TaskStatusFailed, + TaskStatusFalse, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go index 7d546a7cd..560d0e31e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go b/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go index affac0d7d..2a4bda27a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datasync/api.go @@ -60,7 +60,7 @@ func (c *DataSync) CancelTaskExecutionRequest(input *CancelTaskExecutionInput) ( // // Cancels execution of a task. // -// When you cancel a task execution, the transfer of some files are abruptly +// When you cancel a task execution, the transfer of some files is abruptly // interrupted. The contents of files that are transferred to the destination // might be incomplete or inconsistent with the source files. However, if you // start a new task execution on the same task and you allow the task execution @@ -156,9 +156,9 @@ func (c *DataSync) CreateAgentRequest(input *CreateAgentInput) (req *request.Req // target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created // in this AWS Region. // -// You can activate the agent in a VPC (Virtual private Cloud) or provide the +// You can activate the agent in a VPC (virtual private cloud) or provide the // agent access to a VPC endpoint so you can run tasks without going over the -// public Internet. +// public internet. // // You can use an agent for more than one location. If a task uses multiple // agents, all of them need to have status AVAILABLE for the task to run. If @@ -413,7 +413,7 @@ func (c *DataSync) CreateLocationNfsRequest(input *CreateLocationNfsInput) (req // CreateLocationNfs API operation for AWS DataSync. // // Defines a file system on a Network File System (NFS) server that can be read -// from or written to +// from or written to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -451,6 +451,89 @@ func (c *DataSync) CreateLocationNfsWithContext(ctx aws.Context, input *CreateLo return out, req.Send() } +const opCreateLocationObjectStorage = "CreateLocationObjectStorage" + +// CreateLocationObjectStorageRequest generates a "aws/request.Request" representing the +// client's request for the CreateLocationObjectStorage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLocationObjectStorage for more information on using the CreateLocationObjectStorage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLocationObjectStorageRequest method. +// req, resp := client.CreateLocationObjectStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationObjectStorage +func (c *DataSync) CreateLocationObjectStorageRequest(input *CreateLocationObjectStorageInput) (req *request.Request, output *CreateLocationObjectStorageOutput) { + op := &request.Operation{ + Name: opCreateLocationObjectStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLocationObjectStorageInput{} + } + + output = &CreateLocationObjectStorageOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLocationObjectStorage API operation for AWS DataSync. +// +// Creates an endpoint for a self-managed object storage bucket. For more information +// about self-managed object storage locations, see create-object-location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation CreateLocationObjectStorage for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationObjectStorage +func (c *DataSync) CreateLocationObjectStorage(input *CreateLocationObjectStorageInput) (*CreateLocationObjectStorageOutput, error) { + req, out := c.CreateLocationObjectStorageRequest(input) + return out, req.Send() +} + +// CreateLocationObjectStorageWithContext is the same as CreateLocationObjectStorage with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLocationObjectStorage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) CreateLocationObjectStorageWithContext(ctx aws.Context, input *CreateLocationObjectStorageInput, opts ...request.Option) (*CreateLocationObjectStorageOutput, error) { + req, out := c.CreateLocationObjectStorageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateLocationS3 = "CreateLocationS3" // CreateLocationS3Request generates a "aws/request.Request" representing the @@ -497,13 +580,7 @@ func (c *DataSync) CreateLocationS3Request(input *CreateLocationS3Input) (req *r // // Creates an endpoint for an Amazon S3 bucket. // -// For AWS DataSync to access a destination S3 bucket, it needs an AWS Identity -// and Access Management (IAM) role that has the required permissions. You can -// set up the required permissions by creating an IAM policy that grants the -// required permissions and attaching the policy to the role. An example of -// such a policy is shown in the examples section. -// -// For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/working-with-locations.html#create-s3-location +// For more information, see https://docs.aws.amazon.com/datasync/latest/userguide/create-locations-cli.html#create-location-s3-cli // in the AWS DataSync User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -586,7 +663,7 @@ func (c *DataSync) CreateLocationSmbRequest(input *CreateLocationSmbInput) (req // CreateLocationSmb API operation for AWS DataSync. // -// Defines a file system on an Server Message Block (SMB) server that can be +// Defines a file system on a Server Message Block (SMB) server that can be // read from or written to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -685,7 +762,7 @@ func (c *DataSync) CreateTaskRequest(input *CreateTaskInput) (req *request.Reque // remains in the CREATING status for more than a few minutes, it means that // your agent might be having trouble mounting the source NFS file system. Check // the task's ErrorCode and ErrorDetail. Mount issues are often caused by either -// a misconfigured firewall or a mistyped NFS server host name. +// a misconfigured firewall or a mistyped NFS server hostname. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1269,7 +1346,7 @@ func (c *DataSync) DescribeLocationNfsRequest(input *DescribeLocationNfsInput) ( // DescribeLocationNfs API operation for AWS DataSync. // -// Returns metadata, such as the path information, about a NFS location. +// Returns metadata, such as the path information, about an NFS location. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1307,6 +1384,89 @@ func (c *DataSync) DescribeLocationNfsWithContext(ctx aws.Context, input *Descri return out, req.Send() } +const opDescribeLocationObjectStorage = "DescribeLocationObjectStorage" + +// DescribeLocationObjectStorageRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLocationObjectStorage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLocationObjectStorage for more information on using the DescribeLocationObjectStorage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLocationObjectStorageRequest method. +// req, resp := client.DescribeLocationObjectStorageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationObjectStorage +func (c *DataSync) DescribeLocationObjectStorageRequest(input *DescribeLocationObjectStorageInput) (req *request.Request, output *DescribeLocationObjectStorageOutput) { + op := &request.Operation{ + Name: opDescribeLocationObjectStorage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLocationObjectStorageInput{} + } + + output = &DescribeLocationObjectStorageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeLocationObjectStorage API operation for AWS DataSync. +// +// Returns metadata about a self-managed object storage server location. For +// more information about self-managed object storage locations, see create-object-location. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS DataSync's +// API operation DescribeLocationObjectStorage for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * InternalException +// This exception is thrown when an error occurs in the AWS DataSync service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationObjectStorage +func (c *DataSync) DescribeLocationObjectStorage(input *DescribeLocationObjectStorageInput) (*DescribeLocationObjectStorageOutput, error) { + req, out := c.DescribeLocationObjectStorageRequest(input) + return out, req.Send() +} + +// DescribeLocationObjectStorageWithContext is the same as DescribeLocationObjectStorage with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLocationObjectStorage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DataSync) DescribeLocationObjectStorageWithContext(ctx aws.Context, input *DescribeLocationObjectStorageInput, opts ...request.Option) (*DescribeLocationObjectStorageOutput, error) { + req, out := c.DescribeLocationObjectStorageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLocationS3 = "DescribeLocationS3" // DescribeLocationS3Request generates a "aws/request.Request" representing the @@ -1433,7 +1593,7 @@ func (c *DataSync) DescribeLocationSmbRequest(input *DescribeLocationSmbInput) ( // DescribeLocationSmb API operation for AWS DataSync. // -// Returns metadata, such as the path and user information about a SMB location. +// Returns metadata, such as the path and user information about an SMB location. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1835,7 +1995,7 @@ func (c *DataSync) ListLocationsRequest(input *ListLocationsInput) (req *request // ListLocations API operation for AWS DataSync. // -// Returns a lists of source and destination locations. +// Returns a list of source and destination locations. // // If you have more locations than are returned in a response (that is, the // response returns only a truncated list of your agents), the response contains @@ -1980,7 +2140,7 @@ func (c *DataSync) ListTagsForResourceRequest(input *ListTagsForResourceInput) ( // ListTagsForResource API operation for AWS DataSync. // -// Returns all the tags associated with a specified resources. +// Returns all the tags associated with a specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2912,11 +3072,11 @@ type CreateAgentInput struct { // in UTF-8 format, and the following special characters: + - = . _ : / @. Tags []*TagListEntry `type:"list"` - // The ID of the VPC (Virtual Private Cloud) endpoint that the agent has access + // The ID of the VPC (virtual private cloud) endpoint that the agent has access // to. This is the client-side VPC endpoint, also called a PrivateLink. If you // don't have a PrivateLink VPC endpoint, see Creating a VPC Endpoint Service // Configuration (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html#create-endpoint-service) - // in the AWS VPC User Guide. + // in the Amazon VPC User Guide. // // VPC endpoint ID looks like this: vpce-01234d5aff67890e1. VpcEndpointId *string `type:"string"` @@ -3061,7 +3221,7 @@ type CreateLocationEfsInput struct { // system is used to read data from the EFS source location or write data to // the EFS destination. By default, AWS DataSync uses the root directory. // - // Subdirectory must be specified with forward slashes. For example /path/to/folder. + // Subdirectory must be specified with forward slashes. For example, /path/to/folder. Subdirectory *string `type:"string"` // The key-value pair that represents a tag that you want to add to the resource. @@ -3323,6 +3483,10 @@ type CreateLocationNfsInput struct { // Contains a list of Amazon Resource Names (ARNs) of agents that are used to // connect to an NFS server. // + // If you are copying data to or from your AWS Snowcone device, see NFS Server + // on AWS Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) + // for more information. + // // OnPremConfig is a required field OnPremConfig *OnPremConfig `type:"structure" required:"true"` @@ -3330,6 +3494,10 @@ type CreateLocationNfsInput struct { // (DNS) name of the NFS server. An agent that is installed on-premises uses // this host name to mount the NFS server in a network. // + // If you are copying data to or from your AWS Snowcone device, see NFS Server + // on AWS Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) + // for more information. + // // This name must either be DNS-compliant or must be an IP version 4 (IPv4) // address. // @@ -3354,6 +3522,10 @@ type CreateLocationNfsInput struct { // enables the agent to read the files. For the agent to access directories, // you must additionally enable all execute access. // + // If you are copying data to or from your AWS Snowcone device, see NFS Server + // on AWS Snowcone (https://docs.aws.amazon.com/datasync/latest/userguide/create-nfs-location.html#nfs-on-snowcone) + // for more information. + // // For information about NFS export configuration, see 18.7. The /etc/exports // Configuration File in the Red Hat Enterprise Linux documentation. // @@ -3464,11 +3636,204 @@ func (s *CreateLocationNfsOutput) SetLocationArn(v string) *CreateLocationNfsOut return s } +// CreateLocationObjectStorageRequest +type CreateLocationObjectStorageInput struct { + _ struct{} `type:"structure"` + + // Optional. The access key is used if credentials are required to access the + // self-managed object storage server. If your object storage requires a user + // name and password to authenticate, use AccessKey and SecretKey to provide + // the user name and password, respectively. + AccessKey *string `min:"8" type:"string"` + + // The Amazon Resource Name (ARN) of the agents associated with the self-managed + // object storage server location. + // + // AgentArns is a required field + AgentArns []*string `min:"1" type:"list" required:"true"` + + // The bucket on the self-managed object storage server that is used to read + // data from. + // + // BucketName is a required field + BucketName *string `min:"3" type:"string" required:"true"` + + // Optional. The secret key is used if credentials are required to access the + // self-managed object storage server. If your object storage requires a user + // name and password to authenticate, use AccessKey and SecretKey to provide + // the user name and password, respectively. + SecretKey *string `min:"8" type:"string" sensitive:"true"` + + // The name of the self-managed object storage server. This value is the IP + // address or Domain Name Service (DNS) name of the object storage server. An + // agent uses this host name to mount the object storage server in a network. + // + // ServerHostname is a required field + ServerHostname *string `type:"string" required:"true"` + + // The port that your self-managed object storage server accepts inbound network + // traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 + // (HTTPS). You can specify a custom port if your self-managed object storage + // server requires one. + ServerPort *int64 `min:"1" type:"integer"` + + // The protocol that the object storage server uses to communicate. Valid values + // are HTTP or HTTPS. + ServerProtocol *string `type:"string" enum:"ObjectStorageServerProtocol"` + + // The subdirectory in the self-managed object storage server that is used to + // read data from. + Subdirectory *string `type:"string"` + + // The key-value pair that represents the tag that you want to add to the location. + // The value can be an empty string. We recommend using tags to name your resources. + Tags []*TagListEntry `type:"list"` +} + +// String returns the string representation +func (s CreateLocationObjectStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationObjectStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLocationObjectStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLocationObjectStorageInput"} + if s.AccessKey != nil && len(*s.AccessKey) < 8 { + invalidParams.Add(request.NewErrParamMinLen("AccessKey", 8)) + } + if s.AgentArns == nil { + invalidParams.Add(request.NewErrParamRequired("AgentArns")) + } + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + if s.SecretKey != nil && len(*s.SecretKey) < 8 { + invalidParams.Add(request.NewErrParamMinLen("SecretKey", 8)) + } + if s.ServerHostname == nil { + invalidParams.Add(request.NewErrParamRequired("ServerHostname")) + } + if s.ServerPort != nil && *s.ServerPort < 1 { + invalidParams.Add(request.NewErrParamMinValue("ServerPort", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKey sets the AccessKey field's value. +func (s *CreateLocationObjectStorageInput) SetAccessKey(v string) *CreateLocationObjectStorageInput { + s.AccessKey = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationObjectStorageInput) SetAgentArns(v []*string) *CreateLocationObjectStorageInput { + s.AgentArns = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *CreateLocationObjectStorageInput) SetBucketName(v string) *CreateLocationObjectStorageInput { + s.BucketName = &v + return s +} + +// SetSecretKey sets the SecretKey field's value. +func (s *CreateLocationObjectStorageInput) SetSecretKey(v string) *CreateLocationObjectStorageInput { + s.SecretKey = &v + return s +} + +// SetServerHostname sets the ServerHostname field's value. +func (s *CreateLocationObjectStorageInput) SetServerHostname(v string) *CreateLocationObjectStorageInput { + s.ServerHostname = &v + return s +} + +// SetServerPort sets the ServerPort field's value. +func (s *CreateLocationObjectStorageInput) SetServerPort(v int64) *CreateLocationObjectStorageInput { + s.ServerPort = &v + return s +} + +// SetServerProtocol sets the ServerProtocol field's value. +func (s *CreateLocationObjectStorageInput) SetServerProtocol(v string) *CreateLocationObjectStorageInput { + s.ServerProtocol = &v + return s +} + +// SetSubdirectory sets the Subdirectory field's value. +func (s *CreateLocationObjectStorageInput) SetSubdirectory(v string) *CreateLocationObjectStorageInput { + s.Subdirectory = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateLocationObjectStorageInput) SetTags(v []*TagListEntry) *CreateLocationObjectStorageInput { + s.Tags = v + return s +} + +// CreateLocationObjectStorageResponse +type CreateLocationObjectStorageOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the agents associated with the self-managed + // object storage server location. + LocationArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateLocationObjectStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLocationObjectStorageOutput) GoString() string { + return s.String() +} + +// SetLocationArn sets the LocationArn field's value. +func (s *CreateLocationObjectStorageOutput) SetLocationArn(v string) *CreateLocationObjectStorageOutput { + s.LocationArn = &v + return s +} + // CreateLocationS3Request type CreateLocationS3Input struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon S3 bucket. + // If you are using DataSync on an AWS Outpost, specify the Amazon Resource + // Names (ARNs) of the DataSync agents deployed on your AWS Outpost. For more + // information about launching a DataSync agent on an Amazon Outpost, see outposts-agent. + AgentArns []*string `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) of the Amazon S3 bucket. If the bucket is + // on an AWS Outpost, this must be an access point ARN. // // S3BucketArn is a required field S3BucketArn *string `type:"string" required:"true"` @@ -3483,11 +3848,14 @@ type CreateLocationS3Input struct { S3Config *S3Config `type:"structure" required:"true"` // The Amazon S3 storage class that you want to store your files in when this - // location is used as a task destination. For more information about S3 storage - // classes, see Amazon S3 Storage Classes (https://aws.amazon.com/s3/storage-classes/) - // in the Amazon Simple Storage Service Developer Guide. Some storage classes - // have behaviors that can affect your S3 storage cost. For detailed information, - // see using-storage-classes. + // location is used as a task destination. For buckets in AWS Regions, the storage + // class defaults to Standard. For buckets on AWS Outposts, the storage class + // defaults to AWS S3 Outposts. + // + // For more information about S3 storage classes, see Amazon S3 Storage Classes + // (https://aws.amazon.com/s3/storage-classes/) in the Amazon Simple Storage + // Service Developer Guide. Some storage classes have behaviors that can affect + // your S3 storage cost. For detailed information, see using-storage-classes. S3StorageClass *string `type:"string" enum:"S3StorageClass"` // A subdirectory in the Amazon S3 bucket. This subdirectory in Amazon S3 is @@ -3512,6 +3880,9 @@ func (s CreateLocationS3Input) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateLocationS3Input) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateLocationS3Input"} + if s.AgentArns != nil && len(s.AgentArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AgentArns", 1)) + } if s.S3BucketArn == nil { invalidParams.Add(request.NewErrParamRequired("S3BucketArn")) } @@ -3540,6 +3911,12 @@ func (s *CreateLocationS3Input) Validate() error { return nil } +// SetAgentArns sets the AgentArns field's value. +func (s *CreateLocationS3Input) SetAgentArns(v []*string) *CreateLocationS3Input { + s.AgentArns = v + return s +} + // SetS3BucketArn sets the S3BucketArn field's value. func (s *CreateLocationS3Input) SetS3BucketArn(v string) *CreateLocationS3Input { s.S3BucketArn = &v @@ -3633,7 +4010,7 @@ type CreateLocationSmbInput struct { // The path should be such that it can be mounted by other SMB clients in your // network. // - // Subdirectory must be specified with forward slashes. For example /path/to/folder. + // Subdirectory must be specified with forward slashes. For example, /path/to/folder. // // To transfer all the data in the folder you specified, DataSync needs to have // permissions to mount the SMB share, as well as to access all the data in @@ -3785,12 +4162,6 @@ type CreateTaskInput struct { // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is // used to monitor and log events in the task. - // - // For more information on these groups, see Working with Log Groups and Log - // Streams in the Amazon CloudWatch User Guide. - // - // For more information about how to use CloudWatch Logs with DataSync, see - // Monitoring Your Task in the AWS DataSync User Guide. CloudWatchLogGroupArn *string `type:"string"` // The Amazon Resource Name (ARN) of an AWS storage resource's location. @@ -3814,7 +4185,7 @@ type CreateTaskInput struct { // file permissions, data integrity verification, and so on. // // For each individual task execution, you can override these options by specifying - // the OverrideOptions before starting a the task execution. For more information, + // the OverrideOptions before starting the task execution. For more information, // see the operation. Options *Options `type:"structure"` @@ -4164,7 +4535,7 @@ type DescribeAgentOutput struct { CreationTime *time.Time `type:"timestamp"` // The type of endpoint that your agent is connected to. If the endpoint is - // a VPC endpoint, the agent is not accessible over the public Internet. + // a VPC endpoint, the agent is not accessible over the public internet. EndpointType *string `type:"string" enum:"EndpointType"` // The time that the agent last connected to DataSyc. @@ -4288,7 +4659,7 @@ type DescribeLocationEfsOutput struct { // with the security group on the mount target in the subnet specified. Ec2Config *Ec2Config `type:"structure"` - // The Amazon resource Name (ARN) of the EFS location that was described. + // The Amazon Resource Name (ARN) of the EFS location that was described. LocationArn *string `type:"string"` // The URL of the EFS location that was described. @@ -4376,14 +4747,14 @@ type DescribeLocationFsxWindowsOutput struct { // The name of the Windows domain that the FSx for Windows server belongs to. Domain *string `type:"string"` - // The Amazon resource Name (ARN) of the FSx for Windows location that was described. + // The Amazon Resource Name (ARN) of the FSx for Windows location that was described. LocationArn *string `type:"string"` // The URL of the FSx for Windows location that was described. LocationUri *string `type:"string"` // The Amazon Resource Names (ARNs) of the security groups that are configured - // for the for the FSx for Windows file system. + // for the FSx for Windows file system. SecurityGroupArns []*string `min:"1" type:"list"` // The user who has the permissions to access files and folders in the FSx for @@ -4441,7 +4812,7 @@ func (s *DescribeLocationFsxWindowsOutput) SetUser(v string) *DescribeLocationFs type DescribeLocationNfsInput struct { _ struct{} `type:"structure"` - // The Amazon resource Name (ARN) of the NFS location to describe. + // The Amazon Resource Name (ARN) of the NFS location to describe. // // LocationArn is a required field LocationArn *string `type:"string" required:"true"` @@ -4483,7 +4854,7 @@ type DescribeLocationNfsOutput struct { // The time that the NFS location was created. CreationTime *time.Time `type:"timestamp"` - // The Amazon resource Name (ARN) of the NFS location that was described. + // The Amazon Resource Name (ARN) of the NFS location that was described. LocationArn *string `type:"string"` // The URL of the source NFS location that was described. @@ -4537,6 +4908,133 @@ func (s *DescribeLocationNfsOutput) SetOnPremConfig(v *OnPremConfig) *DescribeLo return s } +// DescribeLocationObjectStorageRequest +type DescribeLocationObjectStorageInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the self-managed object storage server + // location that was described. + // + // LocationArn is a required field + LocationArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeLocationObjectStorageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationObjectStorageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLocationObjectStorageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLocationObjectStorageInput"} + if s.LocationArn == nil { + invalidParams.Add(request.NewErrParamRequired("LocationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationObjectStorageInput) SetLocationArn(v string) *DescribeLocationObjectStorageInput { + s.LocationArn = &v + return s +} + +// DescribeLocationObjectStorageResponse +type DescribeLocationObjectStorageOutput struct { + _ struct{} `type:"structure"` + + // Optional. The access key is used if credentials are required to access the + // self-managed object storage server. If your object storage requires a user + // name and password to authenticate, use AccessKey and SecretKey to provide + // the user name and password, respectively. + AccessKey *string `min:"8" type:"string"` + + // The Amazon Resource Name (ARN) of the agents associated with the self-managed + // object storage server location. + AgentArns []*string `min:"1" type:"list"` + + // The time that the self-managed object storage server agent was created. + CreationTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of the self-managed object storage server + // location to describe. + LocationArn *string `type:"string"` + + // The URL of the source self-managed object storage server location that was + // described. + LocationUri *string `type:"string"` + + // The port that your self-managed object storage server accepts inbound network + // traffic on. The server port is set by default to TCP 80 (HTTP) or TCP 443 + // (HTTPS). + ServerPort *int64 `min:"1" type:"integer"` + + // The protocol that the object storage server uses to communicate. Valid values + // are HTTP or HTTPS. + ServerProtocol *string `type:"string" enum:"ObjectStorageServerProtocol"` +} + +// String returns the string representation +func (s DescribeLocationObjectStorageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLocationObjectStorageOutput) GoString() string { + return s.String() +} + +// SetAccessKey sets the AccessKey field's value. +func (s *DescribeLocationObjectStorageOutput) SetAccessKey(v string) *DescribeLocationObjectStorageOutput { + s.AccessKey = &v + return s +} + +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationObjectStorageOutput) SetAgentArns(v []*string) *DescribeLocationObjectStorageOutput { + s.AgentArns = v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *DescribeLocationObjectStorageOutput) SetCreationTime(v time.Time) *DescribeLocationObjectStorageOutput { + s.CreationTime = &v + return s +} + +// SetLocationArn sets the LocationArn field's value. +func (s *DescribeLocationObjectStorageOutput) SetLocationArn(v string) *DescribeLocationObjectStorageOutput { + s.LocationArn = &v + return s +} + +// SetLocationUri sets the LocationUri field's value. +func (s *DescribeLocationObjectStorageOutput) SetLocationUri(v string) *DescribeLocationObjectStorageOutput { + s.LocationUri = &v + return s +} + +// SetServerPort sets the ServerPort field's value. +func (s *DescribeLocationObjectStorageOutput) SetServerPort(v int64) *DescribeLocationObjectStorageOutput { + s.ServerPort = &v + return s +} + +// SetServerProtocol sets the ServerProtocol field's value. +func (s *DescribeLocationObjectStorageOutput) SetServerProtocol(v string) *DescribeLocationObjectStorageOutput { + s.ServerProtocol = &v + return s +} + // DescribeLocationS3Request type DescribeLocationS3Input struct { _ struct{} `type:"structure"` @@ -4580,10 +5078,15 @@ func (s *DescribeLocationS3Input) SetLocationArn(v string) *DescribeLocationS3In type DescribeLocationS3Output struct { _ struct{} `type:"structure"` + // If you are using DataSync on an Amazon Outpost, the Amazon Resource Name + // (ARNs) of the EC2 agents deployed on your AWS Outpost. For more information + // about launching a DataSync agent on an Amazon Outpost, see outposts-agent. + AgentArns []*string `min:"1" type:"list"` + // The time that the Amazon S3 bucket location was created. CreationTime *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of the Amazon S3 bucket location. + // The Amazon Resource Name (ARN) of the Amazon S3 bucket or access point. LocationArn *string `type:"string"` // The URL of the Amazon S3 location that was described. @@ -4615,6 +5118,12 @@ func (s DescribeLocationS3Output) GoString() string { return s.String() } +// SetAgentArns sets the AgentArns field's value. +func (s *DescribeLocationS3Output) SetAgentArns(v []*string) *DescribeLocationS3Output { + s.AgentArns = v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *DescribeLocationS3Output) SetCreationTime(v time.Time) *DescribeLocationS3Output { s.CreationTime = &v @@ -4649,7 +5158,7 @@ func (s *DescribeLocationS3Output) SetS3StorageClass(v string) *DescribeLocation type DescribeLocationSmbInput struct { _ struct{} `type:"structure"` - // The Amazon resource Name (ARN) of the SMB location to describe. + // The Amazon Resource Name (ARN) of the SMB location to describe. // // LocationArn is a required field LocationArn *string `type:"string" required:"true"` @@ -4698,7 +5207,7 @@ type DescribeLocationSmbOutput struct { // The name of the Windows domain that the SMB server belongs to. Domain *string `type:"string"` - // The Amazon resource Name (ARN) of the SMB location that was described. + // The Amazon Resource Name (ARN) of the SMB location that was described. LocationArn *string `type:"string"` // The URL of the source SBM location that was described. @@ -5272,8 +5781,8 @@ func (s *FilterRule) SetValue(v string) *FilterRule { // This exception is thrown when an error occurs in the AWS DataSync service. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorCode *string `locationName:"errorCode" type:"string"` @@ -5292,17 +5801,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5310,28 +5819,28 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the client submits a malformed request. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorCode *string `locationName:"errorCode" type:"string"` @@ -5350,17 +5859,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5368,22 +5877,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // ListAgentsRequest @@ -5458,6 +5967,11 @@ func (s *ListAgentsOutput) SetNextToken(v string) *ListAgentsOutput { type ListLocationsInput struct { _ struct{} `type:"structure"` + // You can use API filters to narrow down the list of resources returned by + // ListLocations. For example, to retrieve all tasks on a specific source location, + // you can use ListLocations with filter name LocationType S3 and Operator Equals. + Filters []*LocationFilter `type:"list"` + // The maximum number of locations to return. MaxResults *int64 `type:"integer"` @@ -5476,6 +5990,32 @@ func (s ListLocationsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListLocationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListLocationsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListLocationsInput) SetFilters(v []*LocationFilter) *ListLocationsInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListLocationsInput) SetMaxResults(v int64) *ListLocationsInput { s.MaxResults = &v @@ -5695,6 +6235,12 @@ func (s *ListTaskExecutionsOutput) SetTaskExecutions(v []*TaskExecutionListEntry type ListTasksInput struct { _ struct{} `type:"structure"` + // You can use API filters to narrow down the list of resources returned by + // ListTasks. For example, to retrieve all tasks on a specific source location, + // you can use ListTasks with filter name LocationId and Operator Equals with + // the ARN for the location. + Filters []*TaskFilter `type:"list"` + // The maximum number of tasks to return. MaxResults *int64 `type:"integer"` @@ -5713,6 +6259,32 @@ func (s ListTasksInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTasksInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTasksInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListTasksInput) SetFilters(v []*TaskFilter) *ListTasksInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListTasksInput) SetMaxResults(v int64) *ListTasksInput { s.MaxResults = &v @@ -5759,6 +6331,78 @@ func (s *ListTasksOutput) SetTasks(v []*TaskListEntry) *ListTasksOutput { return s } +// You can use API filters to narrow down the list of resources returned by +// ListLocations. For example, to retrieve all your Amazon S3 locations, you +// can use ListLocations with filter name LocationType S3 and Operator Equals. +type LocationFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter being used. Each API call supports a list of filters + // that are available for it (for example, LocationType for ListLocations). + // + // Name is a required field + Name *string `type:"string" required:"true" enum:"LocationFilterName"` + + // The operator that is used to compare filter values (for example, Equals or + // Contains). For more about API filtering operators, see query-resources. + // + // Operator is a required field + Operator *string `type:"string" required:"true" enum:"Operator"` + + // The values that you want to filter for. For example, you might want to display + // only Amazon S3 locations. + // + // Values is a required field + Values []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s LocationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocationFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LocationFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LocationFilter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Operator == nil { + invalidParams.Add(request.NewErrParamRequired("Operator")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *LocationFilter) SetName(v string) *LocationFilter { + s.Name = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *LocationFilter) SetOperator(v string) *LocationFilter { + s.Operator = &v + return s +} + +// SetValues sets the Values field's value. +func (s *LocationFilter) SetValues(v []*string) *LocationFilter { + s.Values = v + return s +} + // Represents a single entry in a list of locations. LocationListEntry returns // an array that contains a list of locations when the ListLocations operation // is called. @@ -5857,7 +6501,7 @@ func (s *NfsMountOptions) SetVersion(v string) *NfsMountOptions { type OnPremConfig struct { _ struct{} `type:"structure"` - // ARNs)of the agents to use for an NFS location. + // ARNs of the agents to use for an NFS location. // // AgentArns is a required field AgentArns []*string `min:"1" type:"list" required:"true"` @@ -5938,10 +6582,12 @@ type Options struct { // NONE: Ignore UID and GID. Gid *string `type:"string" enum:"Gid"` - // A value that determines the type of logs DataSync will deliver to your AWS - // CloudWatch Logs file. If set to OFF, no logs will be delivered. BASIC will - // deliver a few logs per transfer operation and TRANSFER will deliver a verbose - // log that contains logs for every file that is transferred. + // A value that determines the type of logs that DataSync publishes to a log + // stream in the Amazon CloudWatch log group that you provide. For more information + // about providing a log group for DataSync, see CloudWatchLogGroupArn (https://docs.aws.amazon.com/datasync/latest/userguide/API_CreateTask.html#DataSync-CreateTask-request-CloudWatchLogGroupArn). + // If set to OFF, no logs are published. BASIC publishes logs on errors for + // individual files transferred, and TRANSFER publishes logs for every file + // or object that is transferred and integrity checked. LogLevel *string `type:"string" enum:"LogLevel"` // A value that indicates the last time that a file was modified (that is, a @@ -6012,10 +6658,22 @@ type Options struct { // A value that determines whether tasks should be queued before executing the // tasks. If set to ENABLED, the tasks will be queued. The default is ENABLED. // - // If you use the same agent to run multiple tasks you can enable the tasks - // to run in series. For more information see queue-task-execution. + // If you use the same agent to run multiple tasks, you can enable the tasks + // to run in series. For more information, see queue-task-execution. TaskQueueing *string `type:"string" enum:"TaskQueueing"` + // A value that determines whether DataSync transfers only the data and metadata + // that differ between the source and the destination location, or whether DataSync + // transfers all the content from the source, without comparing to the destination + // location. + // + // CHANGED: DataSync copies only data or metadata that is new or different content + // from the source location to the destination location. + // + // ALL: DataSync copies all source location content to the destination, without + // comparing to existing content on the destination. + TransferMode *string `type:"string" enum:"TransferMode"` + // The user ID (UID) of the file's owner. // // Default value: INT_VALUE. This preserves the integer value of the ID. @@ -6027,14 +6685,21 @@ type Options struct { // A value that determines whether a data integrity verification should be performed // at the end of a task execution after all data and metadata have been transferred. + // For more information, see create-task // // Default value: POINT_IN_TIME_CONSISTENT. // - // POINT_IN_TIME_CONSISTENT: Perform verification (recommended). + // ONLY_FILES_TRANSFERRED (recommended): Perform verification only on files + // that were transferred. // - // ONLY_FILES_TRANSFERRED: Perform verification on only files that were transferred. + // POINT_IN_TIME_CONSISTENT: Scan the entire source and entire destination at + // the end of the transfer to verify that source and destination are fully synchronized. + // This option isn't supported when transferring to S3 Glacier or S3 Glacier + // Deep Archive storage classes. // - // NONE: Skip verification. + // NONE: No additional verification is done at the end of the transfer, but + // all data transmissions are integrity-checked with checksum verification during + // the transfer. VerifyMode *string `type:"string" enum:"VerifyMode"` } @@ -6121,6 +6786,12 @@ func (s *Options) SetTaskQueueing(v string) *Options { return s } +// SetTransferMode sets the TransferMode field's value. +func (s *Options) SetTransferMode(v string) *Options { + s.TransferMode = &v + return s +} + // SetUid sets the Uid field's value. func (s *Options) SetUid(v string) *Options { s.Uid = &v @@ -6133,7 +6804,7 @@ func (s *Options) SetVerifyMode(v string) *Options { return s } -// The VPC endpoint, subnet and security group that an agent uses to access +// The VPC endpoint, subnet, and security group that an agent uses to access // IP addresses in a VPC (Virtual Private Cloud). type PrivateLinkConfig struct { _ struct{} `type:"structure"` @@ -6141,7 +6812,7 @@ type PrivateLinkConfig struct { // The private endpoint that is configured for an agent that has access to IP // addresses in a PrivateLink (https://docs.aws.amazon.com/vpc/latest/userguide/endpoint-service.html). // An agent that is configured with this endpoint will not be accessible over - // the public Internet. + // the public internet. PrivateLinkEndpoint *string `min:"7" type:"string"` // The Amazon Resource Names (ARNs) of the security groups that are configured @@ -6155,7 +6826,7 @@ type PrivateLinkConfig struct { // The ID of the VPC endpoint that is configured for an agent. An agent that // is configured with a VPC endpoint will not be accessible over the public - // Internet. + // internet. VpcEndpointId *string `type:"string"` } @@ -6632,6 +7303,79 @@ func (s *TaskExecutionResultDetail) SetVerifyStatus(v string) *TaskExecutionResu return s } +// You can use API filters to narrow down the list of resources returned by +// ListTasks. For example, to retrieve all tasks on a source location, you can +// use ListTasks with filter name LocationId and Operator Equals with the ARN +// for the location. +type TaskFilter struct { + _ struct{} `type:"structure"` + + // The name of the filter being used. Each API call supports a list of filters + // that are available for it. For example, LocationId for ListTasks. + // + // Name is a required field + Name *string `type:"string" required:"true" enum:"TaskFilterName"` + + // The operator that is used to compare filter values (for example, Equals or + // Contains). For more about API filtering operators, see query-resources. + // + // Operator is a required field + Operator *string `type:"string" required:"true" enum:"Operator"` + + // The values that you want to filter for. For example, you might want to display + // only tasks for a specific destination location. + // + // Values is a required field + Values []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s TaskFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TaskFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TaskFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TaskFilter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Operator == nil { + invalidParams.Add(request.NewErrParamRequired("Operator")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *TaskFilter) SetName(v string) *TaskFilter { + s.Name = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *TaskFilter) SetOperator(v string) *TaskFilter { + s.Operator = &v + return s +} + +// SetValues sets the Values field's value. +func (s *TaskFilter) SetValues(v []*string) *TaskFilter { + s.Values = v + return s +} + // Represents a single entry in a list of tasks. TaskListEntry returns an array // that contains a list of tasks when the ListTasks operation is called. A task // includes the source and destination file systems to sync and the options @@ -6987,6 +7731,14 @@ const ( AgentStatusOffline = "OFFLINE" ) +// AgentStatus_Values returns all elements of the AgentStatus enum +func AgentStatus_Values() []string { + return []string{ + AgentStatusOnline, + AgentStatusOffline, + } +} + const ( // AtimeNone is a Atime enum value AtimeNone = "NONE" @@ -6995,6 +7747,14 @@ const ( AtimeBestEffort = "BEST_EFFORT" ) +// Atime_Values returns all elements of the Atime enum +func Atime_Values() []string { + return []string{ + AtimeNone, + AtimeBestEffort, + } +} + const ( // EndpointTypePublic is a EndpointType enum value EndpointTypePublic = "PUBLIC" @@ -7006,11 +7766,27 @@ const ( EndpointTypeFips = "FIPS" ) +// EndpointType_Values returns all elements of the EndpointType enum +func EndpointType_Values() []string { + return []string{ + EndpointTypePublic, + EndpointTypePrivateLink, + EndpointTypeFips, + } +} + const ( // FilterTypeSimplePattern is a FilterType enum value FilterTypeSimplePattern = "SIMPLE_PATTERN" ) +// FilterType_Values returns all elements of the FilterType enum +func FilterType_Values() []string { + return []string{ + FilterTypeSimplePattern, + } +} + const ( // GidNone is a Gid enum value GidNone = "NONE" @@ -7025,6 +7801,36 @@ const ( GidBoth = "BOTH" ) +// Gid_Values returns all elements of the Gid enum +func Gid_Values() []string { + return []string{ + GidNone, + GidIntValue, + GidName, + GidBoth, + } +} + +const ( + // LocationFilterNameLocationUri is a LocationFilterName enum value + LocationFilterNameLocationUri = "LocationUri" + + // LocationFilterNameLocationType is a LocationFilterName enum value + LocationFilterNameLocationType = "LocationType" + + // LocationFilterNameCreationTime is a LocationFilterName enum value + LocationFilterNameCreationTime = "CreationTime" +) + +// LocationFilterName_Values returns all elements of the LocationFilterName enum +func LocationFilterName_Values() []string { + return []string{ + LocationFilterNameLocationUri, + LocationFilterNameLocationType, + LocationFilterNameCreationTime, + } +} + const ( // LogLevelOff is a LogLevel enum value LogLevelOff = "OFF" @@ -7036,6 +7842,15 @@ const ( LogLevelTransfer = "TRANSFER" ) +// LogLevel_Values returns all elements of the LogLevel enum +func LogLevel_Values() []string { + return []string{ + LogLevelOff, + LogLevelBasic, + LogLevelTransfer, + } +} + const ( // MtimeNone is a Mtime enum value MtimeNone = "NONE" @@ -7044,6 +7859,14 @@ const ( MtimePreserve = "PRESERVE" ) +// Mtime_Values returns all elements of the Mtime enum +func Mtime_Values() []string { + return []string{ + MtimeNone, + MtimePreserve, + } +} + const ( // NfsVersionAutomatic is a NfsVersion enum value NfsVersionAutomatic = "AUTOMATIC" @@ -7058,6 +7881,80 @@ const ( NfsVersionNfs41 = "NFS4_1" ) +// NfsVersion_Values returns all elements of the NfsVersion enum +func NfsVersion_Values() []string { + return []string{ + NfsVersionAutomatic, + NfsVersionNfs3, + NfsVersionNfs40, + NfsVersionNfs41, + } +} + +const ( + // ObjectStorageServerProtocolHttps is a ObjectStorageServerProtocol enum value + ObjectStorageServerProtocolHttps = "HTTPS" + + // ObjectStorageServerProtocolHttp is a ObjectStorageServerProtocol enum value + ObjectStorageServerProtocolHttp = "HTTP" +) + +// ObjectStorageServerProtocol_Values returns all elements of the ObjectStorageServerProtocol enum +func ObjectStorageServerProtocol_Values() []string { + return []string{ + ObjectStorageServerProtocolHttps, + ObjectStorageServerProtocolHttp, + } +} + +const ( + // OperatorEquals is a Operator enum value + OperatorEquals = "Equals" + + // OperatorNotEquals is a Operator enum value + OperatorNotEquals = "NotEquals" + + // OperatorIn is a Operator enum value + OperatorIn = "In" + + // OperatorLessThanOrEqual is a Operator enum value + OperatorLessThanOrEqual = "LessThanOrEqual" + + // OperatorLessThan is a Operator enum value + OperatorLessThan = "LessThan" + + // OperatorGreaterThanOrEqual is a Operator enum value + OperatorGreaterThanOrEqual = "GreaterThanOrEqual" + + // OperatorGreaterThan is a Operator enum value + OperatorGreaterThan = "GreaterThan" + + // OperatorContains is a Operator enum value + OperatorContains = "Contains" + + // OperatorNotContains is a Operator enum value + OperatorNotContains = "NotContains" + + // OperatorBeginsWith is a Operator enum value + OperatorBeginsWith = "BeginsWith" +) + +// Operator_Values returns all elements of the Operator enum +func Operator_Values() []string { + return []string{ + OperatorEquals, + OperatorNotEquals, + OperatorIn, + OperatorLessThanOrEqual, + OperatorLessThan, + OperatorGreaterThanOrEqual, + OperatorGreaterThan, + OperatorContains, + OperatorNotContains, + OperatorBeginsWith, + } +} + const ( // OverwriteModeAlways is a OverwriteMode enum value OverwriteModeAlways = "ALWAYS" @@ -7066,6 +7963,14 @@ const ( OverwriteModeNever = "NEVER" ) +// OverwriteMode_Values returns all elements of the OverwriteMode enum +func OverwriteMode_Values() []string { + return []string{ + OverwriteModeAlways, + OverwriteModeNever, + } +} + const ( // PhaseStatusPending is a PhaseStatus enum value PhaseStatusPending = "PENDING" @@ -7077,6 +7982,15 @@ const ( PhaseStatusError = "ERROR" ) +// PhaseStatus_Values returns all elements of the PhaseStatus enum +func PhaseStatus_Values() []string { + return []string{ + PhaseStatusPending, + PhaseStatusSuccess, + PhaseStatusError, + } +} + const ( // PosixPermissionsNone is a PosixPermissions enum value PosixPermissionsNone = "NONE" @@ -7085,6 +7999,14 @@ const ( PosixPermissionsPreserve = "PRESERVE" ) +// PosixPermissions_Values returns all elements of the PosixPermissions enum +func PosixPermissions_Values() []string { + return []string{ + PosixPermissionsNone, + PosixPermissionsPreserve, + } +} + const ( // PreserveDeletedFilesPreserve is a PreserveDeletedFiles enum value PreserveDeletedFilesPreserve = "PRESERVE" @@ -7093,6 +8015,14 @@ const ( PreserveDeletedFilesRemove = "REMOVE" ) +// PreserveDeletedFiles_Values returns all elements of the PreserveDeletedFiles enum +func PreserveDeletedFiles_Values() []string { + return []string{ + PreserveDeletedFilesPreserve, + PreserveDeletedFilesRemove, + } +} + const ( // PreserveDevicesNone is a PreserveDevices enum value PreserveDevicesNone = "NONE" @@ -7101,6 +8031,14 @@ const ( PreserveDevicesPreserve = "PRESERVE" ) +// PreserveDevices_Values returns all elements of the PreserveDevices enum +func PreserveDevices_Values() []string { + return []string{ + PreserveDevicesNone, + PreserveDevicesPreserve, + } +} + const ( // S3StorageClassStandard is a S3StorageClass enum value S3StorageClassStandard = "STANDARD" @@ -7119,8 +8057,24 @@ const ( // S3StorageClassDeepArchive is a S3StorageClass enum value S3StorageClassDeepArchive = "DEEP_ARCHIVE" + + // S3StorageClassOutposts is a S3StorageClass enum value + S3StorageClassOutposts = "OUTPOSTS" ) +// S3StorageClass_Values returns all elements of the S3StorageClass enum +func S3StorageClass_Values() []string { + return []string{ + S3StorageClassStandard, + S3StorageClassStandardIa, + S3StorageClassOnezoneIa, + S3StorageClassIntelligentTiering, + S3StorageClassGlacier, + S3StorageClassDeepArchive, + S3StorageClassOutposts, + } +} + const ( // SmbVersionAutomatic is a SmbVersion enum value SmbVersionAutomatic = "AUTOMATIC" @@ -7132,6 +8086,15 @@ const ( SmbVersionSmb3 = "SMB3" ) +// SmbVersion_Values returns all elements of the SmbVersion enum +func SmbVersion_Values() []string { + return []string{ + SmbVersionAutomatic, + SmbVersionSmb2, + SmbVersionSmb3, + } +} + const ( // TaskExecutionStatusQueued is a TaskExecutionStatus enum value TaskExecutionStatusQueued = "QUEUED" @@ -7155,6 +8118,35 @@ const ( TaskExecutionStatusError = "ERROR" ) +// TaskExecutionStatus_Values returns all elements of the TaskExecutionStatus enum +func TaskExecutionStatus_Values() []string { + return []string{ + TaskExecutionStatusQueued, + TaskExecutionStatusLaunching, + TaskExecutionStatusPreparing, + TaskExecutionStatusTransferring, + TaskExecutionStatusVerifying, + TaskExecutionStatusSuccess, + TaskExecutionStatusError, + } +} + +const ( + // TaskFilterNameLocationId is a TaskFilterName enum value + TaskFilterNameLocationId = "LocationId" + + // TaskFilterNameCreationTime is a TaskFilterName enum value + TaskFilterNameCreationTime = "CreationTime" +) + +// TaskFilterName_Values returns all elements of the TaskFilterName enum +func TaskFilterName_Values() []string { + return []string{ + TaskFilterNameLocationId, + TaskFilterNameCreationTime, + } +} + const ( // TaskQueueingEnabled is a TaskQueueing enum value TaskQueueingEnabled = "ENABLED" @@ -7163,6 +8155,14 @@ const ( TaskQueueingDisabled = "DISABLED" ) +// TaskQueueing_Values returns all elements of the TaskQueueing enum +func TaskQueueing_Values() []string { + return []string{ + TaskQueueingEnabled, + TaskQueueingDisabled, + } +} + const ( // TaskStatusAvailable is a TaskStatus enum value TaskStatusAvailable = "AVAILABLE" @@ -7180,6 +8180,33 @@ const ( TaskStatusUnavailable = "UNAVAILABLE" ) +// TaskStatus_Values returns all elements of the TaskStatus enum +func TaskStatus_Values() []string { + return []string{ + TaskStatusAvailable, + TaskStatusCreating, + TaskStatusQueued, + TaskStatusRunning, + TaskStatusUnavailable, + } +} + +const ( + // TransferModeChanged is a TransferMode enum value + TransferModeChanged = "CHANGED" + + // TransferModeAll is a TransferMode enum value + TransferModeAll = "ALL" +) + +// TransferMode_Values returns all elements of the TransferMode enum +func TransferMode_Values() []string { + return []string{ + TransferModeChanged, + TransferModeAll, + } +} + const ( // UidNone is a Uid enum value UidNone = "NONE" @@ -7194,6 +8221,16 @@ const ( UidBoth = "BOTH" ) +// Uid_Values returns all elements of the Uid enum +func Uid_Values() []string { + return []string{ + UidNone, + UidIntValue, + UidName, + UidBoth, + } +} + const ( // VerifyModePointInTimeConsistent is a VerifyMode enum value VerifyModePointInTimeConsistent = "POINT_IN_TIME_CONSISTENT" @@ -7204,3 +8241,12 @@ const ( // VerifyModeNone is a VerifyMode enum value VerifyModeNone = "NONE" ) + +// VerifyMode_Values returns all elements of the VerifyMode enum +func VerifyMode_Values() []string { + return []string{ + VerifyModePointInTimeConsistent, + VerifyModeOnlyFilesTransferred, + VerifyModeNone, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go b/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go index 2e96c6fbb..aa64e77bc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/api.go b/vendor/github.com/aws/aws-sdk-go/service/dax/api.go index b9c534388..1d3195713 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/api.go @@ -2188,8 +2188,8 @@ func (s *Cluster) SetTotalNodes(v int64) *Cluster { // You already have a DAX cluster with the given identifier. type ClusterAlreadyExistsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2206,17 +2206,17 @@ func (s ClusterAlreadyExistsFault) GoString() string { func newErrorClusterAlreadyExistsFault(v protocol.ResponseMetadata) error { return &ClusterAlreadyExistsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterAlreadyExistsFault) Code() string { +func (s *ClusterAlreadyExistsFault) Code() string { return "ClusterAlreadyExistsFault" } // Message returns the exception's message. -func (s ClusterAlreadyExistsFault) Message() string { +func (s *ClusterAlreadyExistsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2224,28 +2224,28 @@ func (s ClusterAlreadyExistsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterAlreadyExistsFault) OrigErr() error { +func (s *ClusterAlreadyExistsFault) OrigErr() error { return nil } -func (s ClusterAlreadyExistsFault) Error() string { +func (s *ClusterAlreadyExistsFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterAlreadyExistsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterAlreadyExistsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterAlreadyExistsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterAlreadyExistsFault) RequestID() string { + return s.RespMetadata.RequestID } // The requested cluster ID does not refer to an existing DAX cluster. type ClusterNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2262,17 +2262,17 @@ func (s ClusterNotFoundFault) GoString() string { func newErrorClusterNotFoundFault(v protocol.ResponseMetadata) error { return &ClusterNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterNotFoundFault) Code() string { +func (s *ClusterNotFoundFault) Code() string { return "ClusterNotFoundFault" } // Message returns the exception's message. -func (s ClusterNotFoundFault) Message() string { +func (s *ClusterNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2280,29 +2280,29 @@ func (s ClusterNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterNotFoundFault) OrigErr() error { +func (s *ClusterNotFoundFault) OrigErr() error { return nil } -func (s ClusterNotFoundFault) Error() string { +func (s *ClusterNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // You have attempted to exceed the maximum number of DAX clusters for your // AWS account. type ClusterQuotaForCustomerExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2319,17 +2319,17 @@ func (s ClusterQuotaForCustomerExceededFault) GoString() string { func newErrorClusterQuotaForCustomerExceededFault(v protocol.ResponseMetadata) error { return &ClusterQuotaForCustomerExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterQuotaForCustomerExceededFault) Code() string { +func (s *ClusterQuotaForCustomerExceededFault) Code() string { return "ClusterQuotaForCustomerExceededFault" } // Message returns the exception's message. -func (s ClusterQuotaForCustomerExceededFault) Message() string { +func (s *ClusterQuotaForCustomerExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2337,22 +2337,22 @@ func (s ClusterQuotaForCustomerExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterQuotaForCustomerExceededFault) OrigErr() error { +func (s *ClusterQuotaForCustomerExceededFault) OrigErr() error { return nil } -func (s ClusterQuotaForCustomerExceededFault) Error() string { +func (s *ClusterQuotaForCustomerExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterQuotaForCustomerExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterQuotaForCustomerExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterQuotaForCustomerExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterQuotaForCustomerExceededFault) RequestID() string { + return s.RespMetadata.RequestID } type CreateClusterInput struct { @@ -3744,8 +3744,8 @@ func (s *IncreaseReplicationFactorOutput) SetCluster(v *Cluster) *IncreaseReplic // There are not enough system resources to create the cluster you requested // (or to resize an already-existing cluster). type InsufficientClusterCapacityFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3762,17 +3762,17 @@ func (s InsufficientClusterCapacityFault) GoString() string { func newErrorInsufficientClusterCapacityFault(v protocol.ResponseMetadata) error { return &InsufficientClusterCapacityFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientClusterCapacityFault) Code() string { +func (s *InsufficientClusterCapacityFault) Code() string { return "InsufficientClusterCapacityFault" } // Message returns the exception's message. -func (s InsufficientClusterCapacityFault) Message() string { +func (s *InsufficientClusterCapacityFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3780,28 +3780,28 @@ func (s InsufficientClusterCapacityFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientClusterCapacityFault) OrigErr() error { +func (s *InsufficientClusterCapacityFault) OrigErr() error { return nil } -func (s InsufficientClusterCapacityFault) Error() string { +func (s *InsufficientClusterCapacityFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientClusterCapacityFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientClusterCapacityFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientClusterCapacityFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientClusterCapacityFault) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon Resource Name (ARN) supplied in the request is not valid. type InvalidARNFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3818,17 +3818,17 @@ func (s InvalidARNFault) GoString() string { func newErrorInvalidARNFault(v protocol.ResponseMetadata) error { return &InvalidARNFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidARNFault) Code() string { +func (s *InvalidARNFault) Code() string { return "InvalidARNFault" } // Message returns the exception's message. -func (s InvalidARNFault) Message() string { +func (s *InvalidARNFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3836,28 +3836,28 @@ func (s InvalidARNFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidARNFault) OrigErr() error { +func (s *InvalidARNFault) OrigErr() error { return nil } -func (s InvalidARNFault) Error() string { +func (s *InvalidARNFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidARNFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidARNFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidARNFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidARNFault) RequestID() string { + return s.RespMetadata.RequestID } // The requested DAX cluster is not in the available state. type InvalidClusterStateFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3874,17 +3874,17 @@ func (s InvalidClusterStateFault) GoString() string { func newErrorInvalidClusterStateFault(v protocol.ResponseMetadata) error { return &InvalidClusterStateFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidClusterStateFault) Code() string { +func (s *InvalidClusterStateFault) Code() string { return "InvalidClusterStateFault" } // Message returns the exception's message. -func (s InvalidClusterStateFault) Message() string { +func (s *InvalidClusterStateFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3892,28 +3892,28 @@ func (s InvalidClusterStateFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidClusterStateFault) OrigErr() error { +func (s *InvalidClusterStateFault) OrigErr() error { return nil } -func (s InvalidClusterStateFault) Error() string { +func (s *InvalidClusterStateFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidClusterStateFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidClusterStateFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidClusterStateFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidClusterStateFault) RequestID() string { + return s.RespMetadata.RequestID } // Two or more incompatible parameters were specified. type InvalidParameterCombinationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3930,17 +3930,17 @@ func (s InvalidParameterCombinationException) GoString() string { func newErrorInvalidParameterCombinationException(v protocol.ResponseMetadata) error { return &InvalidParameterCombinationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterCombinationException) Code() string { +func (s *InvalidParameterCombinationException) Code() string { return "InvalidParameterCombinationException" } // Message returns the exception's message. -func (s InvalidParameterCombinationException) Message() string { +func (s *InvalidParameterCombinationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3948,28 +3948,28 @@ func (s InvalidParameterCombinationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterCombinationException) OrigErr() error { +func (s *InvalidParameterCombinationException) OrigErr() error { return nil } -func (s InvalidParameterCombinationException) Error() string { +func (s *InvalidParameterCombinationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterCombinationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterCombinationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterCombinationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterCombinationException) RequestID() string { + return s.RespMetadata.RequestID } // One or more parameters in a parameter group are in an invalid state. type InvalidParameterGroupStateFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3986,17 +3986,17 @@ func (s InvalidParameterGroupStateFault) GoString() string { func newErrorInvalidParameterGroupStateFault(v protocol.ResponseMetadata) error { return &InvalidParameterGroupStateFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterGroupStateFault) Code() string { +func (s *InvalidParameterGroupStateFault) Code() string { return "InvalidParameterGroupStateFault" } // Message returns the exception's message. -func (s InvalidParameterGroupStateFault) Message() string { +func (s *InvalidParameterGroupStateFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4004,28 +4004,28 @@ func (s InvalidParameterGroupStateFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterGroupStateFault) OrigErr() error { +func (s *InvalidParameterGroupStateFault) OrigErr() error { return nil } -func (s InvalidParameterGroupStateFault) Error() string { +func (s *InvalidParameterGroupStateFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterGroupStateFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterGroupStateFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterGroupStateFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterGroupStateFault) RequestID() string { + return s.RespMetadata.RequestID } // The value for a parameter is invalid. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4042,17 +4042,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4060,28 +4060,28 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid subnet identifier was specified. type InvalidSubnet struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4098,17 +4098,17 @@ func (s InvalidSubnet) GoString() string { func newErrorInvalidSubnet(v protocol.ResponseMetadata) error { return &InvalidSubnet{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSubnet) Code() string { +func (s *InvalidSubnet) Code() string { return "InvalidSubnet" } // Message returns the exception's message. -func (s InvalidSubnet) Message() string { +func (s *InvalidSubnet) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4116,28 +4116,28 @@ func (s InvalidSubnet) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSubnet) OrigErr() error { +func (s *InvalidSubnet) OrigErr() error { return nil } -func (s InvalidSubnet) Error() string { +func (s *InvalidSubnet) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSubnet) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSubnet) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSubnet) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSubnet) RequestID() string { + return s.RespMetadata.RequestID } // The VPC network is in an invalid state. type InvalidVPCNetworkStateFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4154,17 +4154,17 @@ func (s InvalidVPCNetworkStateFault) GoString() string { func newErrorInvalidVPCNetworkStateFault(v protocol.ResponseMetadata) error { return &InvalidVPCNetworkStateFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidVPCNetworkStateFault) Code() string { +func (s *InvalidVPCNetworkStateFault) Code() string { return "InvalidVPCNetworkStateFault" } // Message returns the exception's message. -func (s InvalidVPCNetworkStateFault) Message() string { +func (s *InvalidVPCNetworkStateFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4172,22 +4172,22 @@ func (s InvalidVPCNetworkStateFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidVPCNetworkStateFault) OrigErr() error { +func (s *InvalidVPCNetworkStateFault) OrigErr() error { return nil } -func (s InvalidVPCNetworkStateFault) Error() string { +func (s *InvalidVPCNetworkStateFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidVPCNetworkStateFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidVPCNetworkStateFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidVPCNetworkStateFault) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidVPCNetworkStateFault) RequestID() string { + return s.RespMetadata.RequestID } type ListTagsInput struct { @@ -4347,8 +4347,8 @@ func (s *Node) SetParameterGroupStatus(v string) *Node { // None of the nodes in the cluster have the given node ID. type NodeNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4365,17 +4365,17 @@ func (s NodeNotFoundFault) GoString() string { func newErrorNodeNotFoundFault(v protocol.ResponseMetadata) error { return &NodeNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NodeNotFoundFault) Code() string { +func (s *NodeNotFoundFault) Code() string { return "NodeNotFoundFault" } // Message returns the exception's message. -func (s NodeNotFoundFault) Message() string { +func (s *NodeNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4383,28 +4383,28 @@ func (s NodeNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NodeNotFoundFault) OrigErr() error { +func (s *NodeNotFoundFault) OrigErr() error { return nil } -func (s NodeNotFoundFault) Error() string { +func (s *NodeNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NodeNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NodeNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NodeNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *NodeNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // You have attempted to exceed the maximum number of nodes for a DAX cluster. type NodeQuotaForClusterExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4421,17 +4421,17 @@ func (s NodeQuotaForClusterExceededFault) GoString() string { func newErrorNodeQuotaForClusterExceededFault(v protocol.ResponseMetadata) error { return &NodeQuotaForClusterExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NodeQuotaForClusterExceededFault) Code() string { +func (s *NodeQuotaForClusterExceededFault) Code() string { return "NodeQuotaForClusterExceededFault" } // Message returns the exception's message. -func (s NodeQuotaForClusterExceededFault) Message() string { +func (s *NodeQuotaForClusterExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4439,28 +4439,28 @@ func (s NodeQuotaForClusterExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NodeQuotaForClusterExceededFault) OrigErr() error { +func (s *NodeQuotaForClusterExceededFault) OrigErr() error { return nil } -func (s NodeQuotaForClusterExceededFault) Error() string { +func (s *NodeQuotaForClusterExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NodeQuotaForClusterExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NodeQuotaForClusterExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NodeQuotaForClusterExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *NodeQuotaForClusterExceededFault) RequestID() string { + return s.RespMetadata.RequestID } // You have attempted to exceed the maximum number of nodes for your AWS account. type NodeQuotaForCustomerExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4477,17 +4477,17 @@ func (s NodeQuotaForCustomerExceededFault) GoString() string { func newErrorNodeQuotaForCustomerExceededFault(v protocol.ResponseMetadata) error { return &NodeQuotaForCustomerExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NodeQuotaForCustomerExceededFault) Code() string { +func (s *NodeQuotaForCustomerExceededFault) Code() string { return "NodeQuotaForCustomerExceededFault" } // Message returns the exception's message. -func (s NodeQuotaForCustomerExceededFault) Message() string { +func (s *NodeQuotaForCustomerExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4495,22 +4495,22 @@ func (s NodeQuotaForCustomerExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NodeQuotaForCustomerExceededFault) OrigErr() error { +func (s *NodeQuotaForCustomerExceededFault) OrigErr() error { return nil } -func (s NodeQuotaForCustomerExceededFault) Error() string { +func (s *NodeQuotaForCustomerExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NodeQuotaForCustomerExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NodeQuotaForCustomerExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NodeQuotaForCustomerExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *NodeQuotaForCustomerExceededFault) RequestID() string { + return s.RespMetadata.RequestID } // Represents a parameter value that is applicable to a particular node type. @@ -4725,8 +4725,8 @@ func (s *ParameterGroup) SetParameterGroupName(v string) *ParameterGroup { // The specified parameter group already exists. type ParameterGroupAlreadyExistsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4743,17 +4743,17 @@ func (s ParameterGroupAlreadyExistsFault) GoString() string { func newErrorParameterGroupAlreadyExistsFault(v protocol.ResponseMetadata) error { return &ParameterGroupAlreadyExistsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterGroupAlreadyExistsFault) Code() string { +func (s *ParameterGroupAlreadyExistsFault) Code() string { return "ParameterGroupAlreadyExistsFault" } // Message returns the exception's message. -func (s ParameterGroupAlreadyExistsFault) Message() string { +func (s *ParameterGroupAlreadyExistsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4761,28 +4761,28 @@ func (s ParameterGroupAlreadyExistsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterGroupAlreadyExistsFault) OrigErr() error { +func (s *ParameterGroupAlreadyExistsFault) OrigErr() error { return nil } -func (s ParameterGroupAlreadyExistsFault) Error() string { +func (s *ParameterGroupAlreadyExistsFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterGroupAlreadyExistsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterGroupAlreadyExistsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterGroupAlreadyExistsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterGroupAlreadyExistsFault) RequestID() string { + return s.RespMetadata.RequestID } // The specified parameter group does not exist. type ParameterGroupNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4799,17 +4799,17 @@ func (s ParameterGroupNotFoundFault) GoString() string { func newErrorParameterGroupNotFoundFault(v protocol.ResponseMetadata) error { return &ParameterGroupNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterGroupNotFoundFault) Code() string { +func (s *ParameterGroupNotFoundFault) Code() string { return "ParameterGroupNotFoundFault" } // Message returns the exception's message. -func (s ParameterGroupNotFoundFault) Message() string { +func (s *ParameterGroupNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4817,28 +4817,28 @@ func (s ParameterGroupNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterGroupNotFoundFault) OrigErr() error { +func (s *ParameterGroupNotFoundFault) OrigErr() error { return nil } -func (s ParameterGroupNotFoundFault) Error() string { +func (s *ParameterGroupNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterGroupNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterGroupNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterGroupNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterGroupNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // You have attempted to exceed the maximum number of parameter groups. type ParameterGroupQuotaExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4855,17 +4855,17 @@ func (s ParameterGroupQuotaExceededFault) GoString() string { func newErrorParameterGroupQuotaExceededFault(v protocol.ResponseMetadata) error { return &ParameterGroupQuotaExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterGroupQuotaExceededFault) Code() string { +func (s *ParameterGroupQuotaExceededFault) Code() string { return "ParameterGroupQuotaExceededFault" } // Message returns the exception's message. -func (s ParameterGroupQuotaExceededFault) Message() string { +func (s *ParameterGroupQuotaExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4873,22 +4873,22 @@ func (s ParameterGroupQuotaExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterGroupQuotaExceededFault) OrigErr() error { +func (s *ParameterGroupQuotaExceededFault) OrigErr() error { return nil } -func (s ParameterGroupQuotaExceededFault) Error() string { +func (s *ParameterGroupQuotaExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterGroupQuotaExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterGroupQuotaExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterGroupQuotaExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterGroupQuotaExceededFault) RequestID() string { + return s.RespMetadata.RequestID } // The status of a parameter group. @@ -5149,8 +5149,8 @@ func (s *SecurityGroupMembership) SetStatus(v string) *SecurityGroupMembership { // The specified service linked role (SLR) was not found. type ServiceLinkedRoleNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5167,17 +5167,17 @@ func (s ServiceLinkedRoleNotFoundFault) GoString() string { func newErrorServiceLinkedRoleNotFoundFault(v protocol.ResponseMetadata) error { return &ServiceLinkedRoleNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceLinkedRoleNotFoundFault) Code() string { +func (s *ServiceLinkedRoleNotFoundFault) Code() string { return "ServiceLinkedRoleNotFoundFault" } // Message returns the exception's message. -func (s ServiceLinkedRoleNotFoundFault) Message() string { +func (s *ServiceLinkedRoleNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5185,22 +5185,22 @@ func (s ServiceLinkedRoleNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceLinkedRoleNotFoundFault) OrigErr() error { +func (s *ServiceLinkedRoleNotFoundFault) OrigErr() error { return nil } -func (s ServiceLinkedRoleNotFoundFault) Error() string { +func (s *ServiceLinkedRoleNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceLinkedRoleNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceLinkedRoleNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceLinkedRoleNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceLinkedRoleNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // Represents the subnet associated with a DAX cluster. This parameter refers @@ -5295,8 +5295,8 @@ func (s *SubnetGroup) SetVpcId(v string) *SubnetGroup { // The specified subnet group already exists. type SubnetGroupAlreadyExistsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5313,17 +5313,17 @@ func (s SubnetGroupAlreadyExistsFault) GoString() string { func newErrorSubnetGroupAlreadyExistsFault(v protocol.ResponseMetadata) error { return &SubnetGroupAlreadyExistsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetGroupAlreadyExistsFault) Code() string { +func (s *SubnetGroupAlreadyExistsFault) Code() string { return "SubnetGroupAlreadyExistsFault" } // Message returns the exception's message. -func (s SubnetGroupAlreadyExistsFault) Message() string { +func (s *SubnetGroupAlreadyExistsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5331,28 +5331,28 @@ func (s SubnetGroupAlreadyExistsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetGroupAlreadyExistsFault) OrigErr() error { +func (s *SubnetGroupAlreadyExistsFault) OrigErr() error { return nil } -func (s SubnetGroupAlreadyExistsFault) Error() string { +func (s *SubnetGroupAlreadyExistsFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetGroupAlreadyExistsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetGroupAlreadyExistsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetGroupAlreadyExistsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetGroupAlreadyExistsFault) RequestID() string { + return s.RespMetadata.RequestID } // The specified subnet group is currently in use. type SubnetGroupInUseFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5369,17 +5369,17 @@ func (s SubnetGroupInUseFault) GoString() string { func newErrorSubnetGroupInUseFault(v protocol.ResponseMetadata) error { return &SubnetGroupInUseFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetGroupInUseFault) Code() string { +func (s *SubnetGroupInUseFault) Code() string { return "SubnetGroupInUseFault" } // Message returns the exception's message. -func (s SubnetGroupInUseFault) Message() string { +func (s *SubnetGroupInUseFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5387,28 +5387,28 @@ func (s SubnetGroupInUseFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetGroupInUseFault) OrigErr() error { +func (s *SubnetGroupInUseFault) OrigErr() error { return nil } -func (s SubnetGroupInUseFault) Error() string { +func (s *SubnetGroupInUseFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetGroupInUseFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetGroupInUseFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetGroupInUseFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetGroupInUseFault) RequestID() string { + return s.RespMetadata.RequestID } // The requested subnet group name does not refer to an existing subnet group. type SubnetGroupNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5425,17 +5425,17 @@ func (s SubnetGroupNotFoundFault) GoString() string { func newErrorSubnetGroupNotFoundFault(v protocol.ResponseMetadata) error { return &SubnetGroupNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetGroupNotFoundFault) Code() string { +func (s *SubnetGroupNotFoundFault) Code() string { return "SubnetGroupNotFoundFault" } // Message returns the exception's message. -func (s SubnetGroupNotFoundFault) Message() string { +func (s *SubnetGroupNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5443,29 +5443,29 @@ func (s SubnetGroupNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetGroupNotFoundFault) OrigErr() error { +func (s *SubnetGroupNotFoundFault) OrigErr() error { return nil } -func (s SubnetGroupNotFoundFault) Error() string { +func (s *SubnetGroupNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetGroupNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetGroupNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetGroupNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetGroupNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // The request cannot be processed because it would exceed the allowed number // of subnets in a subnet group. type SubnetGroupQuotaExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5482,17 +5482,17 @@ func (s SubnetGroupQuotaExceededFault) GoString() string { func newErrorSubnetGroupQuotaExceededFault(v protocol.ResponseMetadata) error { return &SubnetGroupQuotaExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetGroupQuotaExceededFault) Code() string { +func (s *SubnetGroupQuotaExceededFault) Code() string { return "SubnetGroupQuotaExceededFault" } // Message returns the exception's message. -func (s SubnetGroupQuotaExceededFault) Message() string { +func (s *SubnetGroupQuotaExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5500,28 +5500,28 @@ func (s SubnetGroupQuotaExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetGroupQuotaExceededFault) OrigErr() error { +func (s *SubnetGroupQuotaExceededFault) OrigErr() error { return nil } -func (s SubnetGroupQuotaExceededFault) Error() string { +func (s *SubnetGroupQuotaExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetGroupQuotaExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetGroupQuotaExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetGroupQuotaExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetGroupQuotaExceededFault) RequestID() string { + return s.RespMetadata.RequestID } // The requested subnet is being used by another subnet group. type SubnetInUse struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5538,17 +5538,17 @@ func (s SubnetInUse) GoString() string { func newErrorSubnetInUse(v protocol.ResponseMetadata) error { return &SubnetInUse{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetInUse) Code() string { +func (s *SubnetInUse) Code() string { return "SubnetInUse" } // Message returns the exception's message. -func (s SubnetInUse) Message() string { +func (s *SubnetInUse) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5556,29 +5556,29 @@ func (s SubnetInUse) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetInUse) OrigErr() error { +func (s *SubnetInUse) OrigErr() error { return nil } -func (s SubnetInUse) Error() string { +func (s *SubnetInUse) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetInUse) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetInUse) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetInUse) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetInUse) RequestID() string { + return s.RespMetadata.RequestID } // The request cannot be processed because it would exceed the allowed number // of subnets in a subnet group. type SubnetQuotaExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5595,17 +5595,17 @@ func (s SubnetQuotaExceededFault) GoString() string { func newErrorSubnetQuotaExceededFault(v protocol.ResponseMetadata) error { return &SubnetQuotaExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetQuotaExceededFault) Code() string { +func (s *SubnetQuotaExceededFault) Code() string { return "SubnetQuotaExceededFault" } // Message returns the exception's message. -func (s SubnetQuotaExceededFault) Message() string { +func (s *SubnetQuotaExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5613,22 +5613,22 @@ func (s SubnetQuotaExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetQuotaExceededFault) OrigErr() error { +func (s *SubnetQuotaExceededFault) OrigErr() error { return nil } -func (s SubnetQuotaExceededFault) Error() string { +func (s *SubnetQuotaExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetQuotaExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetQuotaExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetQuotaExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetQuotaExceededFault) RequestID() string { + return s.RespMetadata.RequestID } // A description of a tag. Every tag is a key-value pair. You can add up to @@ -5675,8 +5675,8 @@ func (s *Tag) SetValue(v string) *Tag { // The tag does not exist. type TagNotFoundFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5693,17 +5693,17 @@ func (s TagNotFoundFault) GoString() string { func newErrorTagNotFoundFault(v protocol.ResponseMetadata) error { return &TagNotFoundFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagNotFoundFault) Code() string { +func (s *TagNotFoundFault) Code() string { return "TagNotFoundFault" } // Message returns the exception's message. -func (s TagNotFoundFault) Message() string { +func (s *TagNotFoundFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5711,28 +5711,28 @@ func (s TagNotFoundFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagNotFoundFault) OrigErr() error { +func (s *TagNotFoundFault) OrigErr() error { return nil } -func (s TagNotFoundFault) Error() string { +func (s *TagNotFoundFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagNotFoundFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagNotFoundFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagNotFoundFault) RequestID() string { - return s.respMetadata.RequestID +func (s *TagNotFoundFault) RequestID() string { + return s.RespMetadata.RequestID } // You have exceeded the maximum number of tags for this DAX cluster. type TagQuotaPerResourceExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5749,17 +5749,17 @@ func (s TagQuotaPerResourceExceeded) GoString() string { func newErrorTagQuotaPerResourceExceeded(v protocol.ResponseMetadata) error { return &TagQuotaPerResourceExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagQuotaPerResourceExceeded) Code() string { +func (s *TagQuotaPerResourceExceeded) Code() string { return "TagQuotaPerResourceExceeded" } // Message returns the exception's message. -func (s TagQuotaPerResourceExceeded) Message() string { +func (s *TagQuotaPerResourceExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5767,22 +5767,22 @@ func (s TagQuotaPerResourceExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagQuotaPerResourceExceeded) OrigErr() error { +func (s *TagQuotaPerResourceExceeded) OrigErr() error { return nil } -func (s TagQuotaPerResourceExceeded) Error() string { +func (s *TagQuotaPerResourceExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagQuotaPerResourceExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagQuotaPerResourceExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagQuotaPerResourceExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *TagQuotaPerResourceExceeded) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -6218,6 +6218,14 @@ const ( ChangeTypeRequiresReboot = "REQUIRES_REBOOT" ) +// ChangeType_Values returns all elements of the ChangeType enum +func ChangeType_Values() []string { + return []string{ + ChangeTypeImmediate, + ChangeTypeRequiresReboot, + } +} + const ( // IsModifiableTrue is a IsModifiable enum value IsModifiableTrue = "TRUE" @@ -6229,6 +6237,15 @@ const ( IsModifiableConditional = "CONDITIONAL" ) +// IsModifiable_Values returns all elements of the IsModifiable enum +func IsModifiable_Values() []string { + return []string{ + IsModifiableTrue, + IsModifiableFalse, + IsModifiableConditional, + } +} + const ( // ParameterTypeDefault is a ParameterType enum value ParameterTypeDefault = "DEFAULT" @@ -6237,6 +6254,14 @@ const ( ParameterTypeNodeTypeSpecific = "NODE_TYPE_SPECIFIC" ) +// ParameterType_Values returns all elements of the ParameterType enum +func ParameterType_Values() []string { + return []string{ + ParameterTypeDefault, + ParameterTypeNodeTypeSpecific, + } +} + const ( // SSEStatusEnabling is a SSEStatus enum value SSEStatusEnabling = "ENABLING" @@ -6251,6 +6276,16 @@ const ( SSEStatusDisabled = "DISABLED" ) +// SSEStatus_Values returns all elements of the SSEStatus enum +func SSEStatus_Values() []string { + return []string{ + SSEStatusEnabling, + SSEStatusEnabled, + SSEStatusDisabling, + SSEStatusDisabled, + } +} + const ( // SourceTypeCluster is a SourceType enum value SourceTypeCluster = "CLUSTER" @@ -6261,3 +6296,12 @@ const ( // SourceTypeSubnetGroup is a SourceType enum value SourceTypeSubnetGroup = "SUBNET_GROUP" ) + +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeCluster, + SourceTypeParameterGroup, + SourceTypeSubnetGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go index ac4fc3da3..36cad4f0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go index a458895b2..81646ad64 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/api.go @@ -8007,8 +8007,8 @@ func (s *AccountSettings) SetUnmeteredRemoteAccessDevices(v map[string]*int64) * // An invalid argument was specified. type ArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Any additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -8026,17 +8026,17 @@ func (s ArgumentException) GoString() string { func newErrorArgumentException(v protocol.ResponseMetadata) error { return &ArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ArgumentException) Code() string { +func (s *ArgumentException) Code() string { return "ArgumentException" } // Message returns the exception's message. -func (s ArgumentException) Message() string { +func (s *ArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8044,22 +8044,22 @@ func (s ArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ArgumentException) OrigErr() error { +func (s *ArgumentException) OrigErr() error { return nil } -func (s ArgumentException) Error() string { +func (s *ArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *ArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the output of a test. Examples of artifacts include logs and screenshots. @@ -8228,8 +8228,8 @@ func (s *CPU) SetFrequency(v string) *CPU { // The requested object could not be deleted. type CannotDeleteException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8246,17 +8246,17 @@ func (s CannotDeleteException) GoString() string { func newErrorCannotDeleteException(v protocol.ResponseMetadata) error { return &CannotDeleteException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CannotDeleteException) Code() string { +func (s *CannotDeleteException) Code() string { return "CannotDeleteException" } // Message returns the exception's message. -func (s CannotDeleteException) Message() string { +func (s *CannotDeleteException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8264,22 +8264,22 @@ func (s CannotDeleteException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CannotDeleteException) OrigErr() error { +func (s *CannotDeleteException) OrigErr() error { return nil } -func (s CannotDeleteException) Error() string { +func (s *CannotDeleteException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CannotDeleteException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CannotDeleteException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CannotDeleteException) RequestID() string { - return s.respMetadata.RequestID +func (s *CannotDeleteException) RequestID() string { + return s.RespMetadata.RequestID } // Represents entity counters. @@ -12177,8 +12177,8 @@ func (s *GetVPCEConfigurationOutput) SetVpceConfiguration(v *VPCEConfiguration) // An entity with the same name already exists. type IdempotencyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Any additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -12196,17 +12196,17 @@ func (s IdempotencyException) GoString() string { func newErrorIdempotencyException(v protocol.ResponseMetadata) error { return &IdempotencyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotencyException) Code() string { +func (s *IdempotencyException) Code() string { return "IdempotencyException" } // Message returns the exception's message. -func (s IdempotencyException) Message() string { +func (s *IdempotencyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12214,22 +12214,22 @@ func (s IdempotencyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotencyException) OrigErr() error { +func (s *IdempotencyException) OrigErr() error { return nil } -func (s IdempotencyException) Error() string { +func (s *IdempotencyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotencyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotencyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotencyException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotencyException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about incompatibility. @@ -12442,8 +12442,8 @@ func (s *InstanceProfile) SetRebootAfterUse(v bool) *InstanceProfile { // An internal exception was raised in the service. Contact aws-devicefarm-support@amazon.com // (mailto:aws-devicefarm-support@amazon.com) if you see this error. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12460,17 +12460,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12478,29 +12478,29 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // There was an error with the update request, or you do not have sufficient // permissions to update this VPC endpoint configuration. type InvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12517,17 +12517,17 @@ func (s InvalidOperationException) GoString() string { func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { return &InvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOperationException) Code() string { +func (s *InvalidOperationException) Code() string { return "InvalidOperationException" } // Message returns the exception's message. -func (s InvalidOperationException) Message() string { +func (s *InvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12535,22 +12535,22 @@ func (s InvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOperationException) OrigErr() error { +func (s *InvalidOperationException) OrigErr() error { return nil } -func (s InvalidOperationException) Error() string { +func (s *InvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a device. @@ -12781,8 +12781,8 @@ func (s *Job) SetVideoEndpoint(v string) *Job { // A limit was exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Any additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -12800,17 +12800,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12818,22 +12818,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a request to the list artifacts operation. @@ -15349,8 +15349,8 @@ func (s *NetworkProfile) SetUplinkLossPercent(v int64) *NetworkProfile { // Exception gets thrown when a user is not eligible to perform the specified // transaction. type NotEligibleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The HTTP response code of a Not Eligible exception. Message_ *string `locationName:"message" type:"string"` @@ -15368,17 +15368,17 @@ func (s NotEligibleException) GoString() string { func newErrorNotEligibleException(v protocol.ResponseMetadata) error { return &NotEligibleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotEligibleException) Code() string { +func (s *NotEligibleException) Code() string { return "NotEligibleException" } // Message returns the exception's message. -func (s NotEligibleException) Message() string { +func (s *NotEligibleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15386,28 +15386,28 @@ func (s NotEligibleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotEligibleException) OrigErr() error { +func (s *NotEligibleException) OrigErr() error { return nil } -func (s NotEligibleException) Error() string { +func (s *NotEligibleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotEligibleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotEligibleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotEligibleException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotEligibleException) RequestID() string { + return s.RespMetadata.RequestID } // The specified entity was not found. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Any additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -15425,17 +15425,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15443,22 +15443,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the metadata of a device offering. @@ -17457,8 +17457,8 @@ func (s *ScheduleRunTest) SetType(v string) *ScheduleRunTest { // There was a problem with the service account. type ServiceAccountException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Any additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -17476,17 +17476,17 @@ func (s ServiceAccountException) GoString() string { func newErrorServiceAccountException(v protocol.ResponseMetadata) error { return &ServiceAccountException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceAccountException) Code() string { +func (s *ServiceAccountException) Code() string { return "ServiceAccountException" } // Message returns the exception's message. -func (s ServiceAccountException) Message() string { +func (s *ServiceAccountException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17494,22 +17494,22 @@ func (s ServiceAccountException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceAccountException) OrigErr() error { +func (s *ServiceAccountException) OrigErr() error { return nil } -func (s ServiceAccountException) Error() string { +func (s *ServiceAccountException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceAccountException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceAccountException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceAccountException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceAccountException) RequestID() string { + return s.RespMetadata.RequestID } type StopJobInput struct { @@ -17963,8 +17963,8 @@ func (s *Tag) SetValue(v string) *Tag { // The operation was not successful. Try again. type TagOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -17983,17 +17983,17 @@ func (s TagOperationException) GoString() string { func newErrorTagOperationException(v protocol.ResponseMetadata) error { return &TagOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagOperationException) Code() string { +func (s *TagOperationException) Code() string { return "TagOperationException" } // Message returns the exception's message. -func (s TagOperationException) Message() string { +func (s *TagOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18001,29 +18001,29 @@ func (s TagOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagOperationException) OrigErr() error { +func (s *TagOperationException) OrigErr() error { return nil } -func (s TagOperationException) Error() string { +func (s *TagOperationException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TagOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagOperationException) RequestID() string { + return s.RespMetadata.RequestID } // The request doesn't comply with the AWS Identity and Access Management (IAM) // tag policy. Correct your request and then retry it. type TagPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -18042,17 +18042,17 @@ func (s TagPolicyException) GoString() string { func newErrorTagPolicyException(v protocol.ResponseMetadata) error { return &TagPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagPolicyException) Code() string { +func (s *TagPolicyException) Code() string { return "TagPolicyException" } // Message returns the exception's message. -func (s TagPolicyException) Message() string { +func (s *TagPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18060,22 +18060,22 @@ func (s TagPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagPolicyException) OrigErr() error { +func (s *TagPolicyException) OrigErr() error { return nil } -func (s TagPolicyException) Error() string { +func (s *TagPolicyException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TagPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagPolicyException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -18581,8 +18581,8 @@ func (s *TestGridSessionArtifact) SetUrl(v string) *TestGridSessionArtifact { // The list of tags on the repository is over the limit. The maximum number // of tags that can be applied to a repository is 50. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -18601,17 +18601,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18619,22 +18619,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } // Represents information about free trial device minutes for an AWS account. @@ -19903,6 +19903,15 @@ const ( ArtifactCategoryLog = "LOG" ) +// ArtifactCategory_Values returns all elements of the ArtifactCategory enum +func ArtifactCategory_Values() []string { + return []string{ + ArtifactCategoryScreenshot, + ArtifactCategoryFile, + ArtifactCategoryLog, + } +} + const ( // ArtifactTypeUnknown is a ArtifactType enum value ArtifactTypeUnknown = "UNKNOWN" @@ -19989,6 +19998,40 @@ const ( ArtifactTypeTestspecOutput = "TESTSPEC_OUTPUT" ) +// ArtifactType_Values returns all elements of the ArtifactType enum +func ArtifactType_Values() []string { + return []string{ + ArtifactTypeUnknown, + ArtifactTypeScreenshot, + ArtifactTypeDeviceLog, + ArtifactTypeMessageLog, + ArtifactTypeVideoLog, + ArtifactTypeResultLog, + ArtifactTypeServiceLog, + ArtifactTypeWebkitLog, + ArtifactTypeInstrumentationOutput, + ArtifactTypeExerciserMonkeyOutput, + ArtifactTypeCalabashJsonOutput, + ArtifactTypeCalabashPrettyOutput, + ArtifactTypeCalabashStandardOutput, + ArtifactTypeCalabashJavaXmlOutput, + ArtifactTypeAutomationOutput, + ArtifactTypeAppiumServerOutput, + ArtifactTypeAppiumJavaOutput, + ArtifactTypeAppiumJavaXmlOutput, + ArtifactTypeAppiumPythonOutput, + ArtifactTypeAppiumPythonXmlOutput, + ArtifactTypeExplorerEventLog, + ArtifactTypeExplorerSummaryLog, + ArtifactTypeApplicationCrashReport, + ArtifactTypeXctestLog, + ArtifactTypeVideo, + ArtifactTypeCustomerArtifact, + ArtifactTypeCustomerArtifactLog, + ArtifactTypeTestspecOutput, + } +} + const ( // BillingMethodMetered is a BillingMethod enum value BillingMethodMetered = "METERED" @@ -19997,11 +20040,26 @@ const ( BillingMethodUnmetered = "UNMETERED" ) +// BillingMethod_Values returns all elements of the BillingMethod enum +func BillingMethod_Values() []string { + return []string{ + BillingMethodMetered, + BillingMethodUnmetered, + } +} + const ( // CurrencyCodeUsd is a CurrencyCode enum value CurrencyCodeUsd = "USD" ) +// CurrencyCode_Values returns all elements of the CurrencyCode enum +func CurrencyCode_Values() []string { + return []string{ + CurrencyCodeUsd, + } +} + const ( // DeviceAttributeArn is a DeviceAttribute enum value DeviceAttributeArn = "ARN" @@ -20043,6 +20101,25 @@ const ( DeviceAttributeAvailability = "AVAILABILITY" ) +// DeviceAttribute_Values returns all elements of the DeviceAttribute enum +func DeviceAttribute_Values() []string { + return []string{ + DeviceAttributeArn, + DeviceAttributePlatform, + DeviceAttributeFormFactor, + DeviceAttributeManufacturer, + DeviceAttributeRemoteAccessEnabled, + DeviceAttributeRemoteDebugEnabled, + DeviceAttributeAppiumVersion, + DeviceAttributeInstanceArn, + DeviceAttributeInstanceLabels, + DeviceAttributeFleetType, + DeviceAttributeOsVersion, + DeviceAttributeModel, + DeviceAttributeAvailability, + } +} + const ( // DeviceAvailabilityTemporaryNotAvailable is a DeviceAvailability enum value DeviceAvailabilityTemporaryNotAvailable = "TEMPORARY_NOT_AVAILABLE" @@ -20057,6 +20134,16 @@ const ( DeviceAvailabilityHighlyAvailable = "HIGHLY_AVAILABLE" ) +// DeviceAvailability_Values returns all elements of the DeviceAvailability enum +func DeviceAvailability_Values() []string { + return []string{ + DeviceAvailabilityTemporaryNotAvailable, + DeviceAvailabilityBusy, + DeviceAvailabilityAvailable, + DeviceAvailabilityHighlyAvailable, + } +} + const ( // DeviceFilterAttributeArn is a DeviceFilterAttribute enum value DeviceFilterAttributeArn = "ARN" @@ -20095,6 +20182,24 @@ const ( DeviceFilterAttributeFleetType = "FLEET_TYPE" ) +// DeviceFilterAttribute_Values returns all elements of the DeviceFilterAttribute enum +func DeviceFilterAttribute_Values() []string { + return []string{ + DeviceFilterAttributeArn, + DeviceFilterAttributePlatform, + DeviceFilterAttributeOsVersion, + DeviceFilterAttributeModel, + DeviceFilterAttributeAvailability, + DeviceFilterAttributeFormFactor, + DeviceFilterAttributeManufacturer, + DeviceFilterAttributeRemoteAccessEnabled, + DeviceFilterAttributeRemoteDebugEnabled, + DeviceFilterAttributeInstanceArn, + DeviceFilterAttributeInstanceLabels, + DeviceFilterAttributeFleetType, + } +} + const ( // DeviceFormFactorPhone is a DeviceFormFactor enum value DeviceFormFactorPhone = "PHONE" @@ -20103,6 +20208,14 @@ const ( DeviceFormFactorTablet = "TABLET" ) +// DeviceFormFactor_Values returns all elements of the DeviceFormFactor enum +func DeviceFormFactor_Values() []string { + return []string{ + DeviceFormFactorPhone, + DeviceFormFactorTablet, + } +} + const ( // DevicePlatformAndroid is a DevicePlatform enum value DevicePlatformAndroid = "ANDROID" @@ -20111,6 +20224,14 @@ const ( DevicePlatformIos = "IOS" ) +// DevicePlatform_Values returns all elements of the DevicePlatform enum +func DevicePlatform_Values() []string { + return []string{ + DevicePlatformAndroid, + DevicePlatformIos, + } +} + const ( // DevicePoolTypeCurated is a DevicePoolType enum value DevicePoolTypeCurated = "CURATED" @@ -20119,6 +20240,14 @@ const ( DevicePoolTypePrivate = "PRIVATE" ) +// DevicePoolType_Values returns all elements of the DevicePoolType enum +func DevicePoolType_Values() []string { + return []string{ + DevicePoolTypeCurated, + DevicePoolTypePrivate, + } +} + const ( // ExecutionResultPending is a ExecutionResult enum value ExecutionResultPending = "PENDING" @@ -20142,6 +20271,19 @@ const ( ExecutionResultStopped = "STOPPED" ) +// ExecutionResult_Values returns all elements of the ExecutionResult enum +func ExecutionResult_Values() []string { + return []string{ + ExecutionResultPending, + ExecutionResultPassed, + ExecutionResultWarned, + ExecutionResultFailed, + ExecutionResultSkipped, + ExecutionResultErrored, + ExecutionResultStopped, + } +} + const ( // ExecutionResultCodeParsingFailed is a ExecutionResultCode enum value ExecutionResultCodeParsingFailed = "PARSING_FAILED" @@ -20150,6 +20292,14 @@ const ( ExecutionResultCodeVpcEndpointSetupFailed = "VPC_ENDPOINT_SETUP_FAILED" ) +// ExecutionResultCode_Values returns all elements of the ExecutionResultCode enum +func ExecutionResultCode_Values() []string { + return []string{ + ExecutionResultCodeParsingFailed, + ExecutionResultCodeVpcEndpointSetupFailed, + } +} + const ( // ExecutionStatusPending is a ExecutionStatus enum value ExecutionStatusPending = "PENDING" @@ -20179,6 +20329,21 @@ const ( ExecutionStatusStopping = "STOPPING" ) +// ExecutionStatus_Values returns all elements of the ExecutionStatus enum +func ExecutionStatus_Values() []string { + return []string{ + ExecutionStatusPending, + ExecutionStatusPendingConcurrency, + ExecutionStatusPendingDevice, + ExecutionStatusProcessing, + ExecutionStatusScheduling, + ExecutionStatusPreparing, + ExecutionStatusRunning, + ExecutionStatusCompleted, + ExecutionStatusStopping, + } +} + const ( // InstanceStatusInUse is a InstanceStatus enum value InstanceStatusInUse = "IN_USE" @@ -20193,6 +20358,16 @@ const ( InstanceStatusNotAvailable = "NOT_AVAILABLE" ) +// InstanceStatus_Values returns all elements of the InstanceStatus enum +func InstanceStatus_Values() []string { + return []string{ + InstanceStatusInUse, + InstanceStatusPreparing, + InstanceStatusAvailable, + InstanceStatusNotAvailable, + } +} + const ( // InteractionModeInteractive is a InteractionMode enum value InteractionModeInteractive = "INTERACTIVE" @@ -20204,6 +20379,15 @@ const ( InteractionModeVideoOnly = "VIDEO_ONLY" ) +// InteractionMode_Values returns all elements of the InteractionMode enum +func InteractionMode_Values() []string { + return []string{ + InteractionModeInteractive, + InteractionModeNoVideo, + InteractionModeVideoOnly, + } +} + const ( // NetworkProfileTypeCurated is a NetworkProfileType enum value NetworkProfileTypeCurated = "CURATED" @@ -20212,6 +20396,14 @@ const ( NetworkProfileTypePrivate = "PRIVATE" ) +// NetworkProfileType_Values returns all elements of the NetworkProfileType enum +func NetworkProfileType_Values() []string { + return []string{ + NetworkProfileTypeCurated, + NetworkProfileTypePrivate, + } +} + const ( // OfferingTransactionTypePurchase is a OfferingTransactionType enum value OfferingTransactionTypePurchase = "PURCHASE" @@ -20223,16 +20415,39 @@ const ( OfferingTransactionTypeSystem = "SYSTEM" ) +// OfferingTransactionType_Values returns all elements of the OfferingTransactionType enum +func OfferingTransactionType_Values() []string { + return []string{ + OfferingTransactionTypePurchase, + OfferingTransactionTypeRenew, + OfferingTransactionTypeSystem, + } +} + const ( // OfferingTypeRecurring is a OfferingType enum value OfferingTypeRecurring = "RECURRING" ) +// OfferingType_Values returns all elements of the OfferingType enum +func OfferingType_Values() []string { + return []string{ + OfferingTypeRecurring, + } +} + const ( // RecurringChargeFrequencyMonthly is a RecurringChargeFrequency enum value RecurringChargeFrequencyMonthly = "MONTHLY" ) +// RecurringChargeFrequency_Values returns all elements of the RecurringChargeFrequency enum +func RecurringChargeFrequency_Values() []string { + return []string{ + RecurringChargeFrequencyMonthly, + } +} + const ( // RuleOperatorEquals is a RuleOperator enum value RuleOperatorEquals = "EQUALS" @@ -20259,6 +20474,20 @@ const ( RuleOperatorContains = "CONTAINS" ) +// RuleOperator_Values returns all elements of the RuleOperator enum +func RuleOperator_Values() []string { + return []string{ + RuleOperatorEquals, + RuleOperatorLessThan, + RuleOperatorLessThanOrEquals, + RuleOperatorGreaterThan, + RuleOperatorGreaterThanOrEquals, + RuleOperatorIn, + RuleOperatorNotIn, + RuleOperatorContains, + } +} + const ( // SampleTypeCpu is a SampleType enum value SampleTypeCpu = "CPU" @@ -20312,6 +20541,29 @@ const ( SampleTypeOpenglMaxDrawtime = "OPENGL_MAX_DRAWTIME" ) +// SampleType_Values returns all elements of the SampleType enum +func SampleType_Values() []string { + return []string{ + SampleTypeCpu, + SampleTypeMemory, + SampleTypeThreads, + SampleTypeRxRate, + SampleTypeTxRate, + SampleTypeRx, + SampleTypeTx, + SampleTypeNativeFrames, + SampleTypeNativeFps, + SampleTypeNativeMinDrawtime, + SampleTypeNativeAvgDrawtime, + SampleTypeNativeMaxDrawtime, + SampleTypeOpenglFrames, + SampleTypeOpenglFps, + SampleTypeOpenglMinDrawtime, + SampleTypeOpenglAvgDrawtime, + SampleTypeOpenglMaxDrawtime, + } +} + const ( // TestGridSessionArtifactCategoryVideo is a TestGridSessionArtifactCategory enum value TestGridSessionArtifactCategoryVideo = "VIDEO" @@ -20320,6 +20572,14 @@ const ( TestGridSessionArtifactCategoryLog = "LOG" ) +// TestGridSessionArtifactCategory_Values returns all elements of the TestGridSessionArtifactCategory enum +func TestGridSessionArtifactCategory_Values() []string { + return []string{ + TestGridSessionArtifactCategoryVideo, + TestGridSessionArtifactCategoryLog, + } +} + const ( // TestGridSessionArtifactTypeUnknown is a TestGridSessionArtifactType enum value TestGridSessionArtifactTypeUnknown = "UNKNOWN" @@ -20331,6 +20591,15 @@ const ( TestGridSessionArtifactTypeSeleniumLog = "SELENIUM_LOG" ) +// TestGridSessionArtifactType_Values returns all elements of the TestGridSessionArtifactType enum +func TestGridSessionArtifactType_Values() []string { + return []string{ + TestGridSessionArtifactTypeUnknown, + TestGridSessionArtifactTypeVideo, + TestGridSessionArtifactTypeSeleniumLog, + } +} + const ( // TestGridSessionStatusActive is a TestGridSessionStatus enum value TestGridSessionStatusActive = "ACTIVE" @@ -20342,6 +20611,15 @@ const ( TestGridSessionStatusErrored = "ERRORED" ) +// TestGridSessionStatus_Values returns all elements of the TestGridSessionStatus enum +func TestGridSessionStatus_Values() []string { + return []string{ + TestGridSessionStatusActive, + TestGridSessionStatusClosed, + TestGridSessionStatusErrored, + } +} + const ( // TestTypeBuiltinFuzz is a TestType enum value TestTypeBuiltinFuzz = "BUILTIN_FUZZ" @@ -20407,6 +20685,33 @@ const ( TestTypeRemoteAccessReplay = "REMOTE_ACCESS_REPLAY" ) +// TestType_Values returns all elements of the TestType enum +func TestType_Values() []string { + return []string{ + TestTypeBuiltinFuzz, + TestTypeBuiltinExplorer, + TestTypeWebPerformanceProfile, + TestTypeAppiumJavaJunit, + TestTypeAppiumJavaTestng, + TestTypeAppiumPython, + TestTypeAppiumNode, + TestTypeAppiumRuby, + TestTypeAppiumWebJavaJunit, + TestTypeAppiumWebJavaTestng, + TestTypeAppiumWebPython, + TestTypeAppiumWebNode, + TestTypeAppiumWebRuby, + TestTypeCalabash, + TestTypeInstrumentation, + TestTypeUiautomation, + TestTypeUiautomator, + TestTypeXctest, + TestTypeXctestUi, + TestTypeRemoteAccessRecord, + TestTypeRemoteAccessReplay, + } +} + const ( // UploadCategoryCurated is a UploadCategory enum value UploadCategoryCurated = "CURATED" @@ -20415,6 +20720,14 @@ const ( UploadCategoryPrivate = "PRIVATE" ) +// UploadCategory_Values returns all elements of the UploadCategory enum +func UploadCategory_Values() []string { + return []string{ + UploadCategoryCurated, + UploadCategoryPrivate, + } +} + const ( // UploadStatusInitialized is a UploadStatus enum value UploadStatusInitialized = "INITIALIZED" @@ -20429,6 +20742,16 @@ const ( UploadStatusFailed = "FAILED" ) +// UploadStatus_Values returns all elements of the UploadStatus enum +func UploadStatus_Values() []string { + return []string{ + UploadStatusInitialized, + UploadStatusProcessing, + UploadStatusSucceeded, + UploadStatusFailed, + } +} + const ( // UploadTypeAndroidApp is a UploadType enum value UploadTypeAndroidApp = "ANDROID_APP" @@ -20526,3 +20849,41 @@ const ( // UploadTypeXctestUiTestSpec is a UploadType enum value UploadTypeXctestUiTestSpec = "XCTEST_UI_TEST_SPEC" ) + +// UploadType_Values returns all elements of the UploadType enum +func UploadType_Values() []string { + return []string{ + UploadTypeAndroidApp, + UploadTypeIosApp, + UploadTypeWebApp, + UploadTypeExternalData, + UploadTypeAppiumJavaJunitTestPackage, + UploadTypeAppiumJavaTestngTestPackage, + UploadTypeAppiumPythonTestPackage, + UploadTypeAppiumNodeTestPackage, + UploadTypeAppiumRubyTestPackage, + UploadTypeAppiumWebJavaJunitTestPackage, + UploadTypeAppiumWebJavaTestngTestPackage, + UploadTypeAppiumWebPythonTestPackage, + UploadTypeAppiumWebNodeTestPackage, + UploadTypeAppiumWebRubyTestPackage, + UploadTypeCalabashTestPackage, + UploadTypeInstrumentationTestPackage, + UploadTypeUiautomationTestPackage, + UploadTypeUiautomatorTestPackage, + UploadTypeXctestTestPackage, + UploadTypeXctestUiTestPackage, + UploadTypeAppiumJavaJunitTestSpec, + UploadTypeAppiumJavaTestngTestSpec, + UploadTypeAppiumPythonTestSpec, + UploadTypeAppiumNodeTestSpec, + UploadTypeAppiumRubyTestSpec, + UploadTypeAppiumWebJavaJunitTestSpec, + UploadTypeAppiumWebJavaTestngTestSpec, + UploadTypeAppiumWebPythonTestSpec, + UploadTypeAppiumWebNodeTestSpec, + UploadTypeAppiumWebRubyTestSpec, + UploadTypeInstrumentationTestSpec, + UploadTypeXctestUiTestSpec, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go index cf5b2da53..ded494a33 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go index 4cd291e14..5459f9a6b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/api.go @@ -1620,10 +1620,8 @@ func (c *DirectConnect) CreateDirectConnectGatewayAssociationProposalRequest(inp // Creates a proposal to associate the specified virtual private gateway or // transit gateway with the specified Direct Connect gateway. // -// You can only associate a Direct Connect gateway and virtual private gateway -// or transit gateway when the account that owns the Direct Connect gateway -// and the account that owns the virtual private gateway or transit gateway -// have the same AWS Payer ID. +// You can associate a Direct Connect gateway and virtual private gateway or +// transit gateway that is owned by any AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1816,24 +1814,24 @@ func (c *DirectConnect) CreateLagRequest(input *CreateLagInput) (req *request.Re // CreateLag API operation for AWS Direct Connect. // // Creates a link aggregation group (LAG) with the specified number of bundled -// physical connections between the customer network and a specific AWS Direct -// Connect location. A LAG is a logical interface that uses the Link Aggregation -// Control Protocol (LACP) to aggregate multiple interfaces, enabling you to -// treat them as a single interface. +// physical dedicated connections between the customer network and a specific +// AWS Direct Connect location. A LAG is a logical interface that uses the Link +// Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling +// you to treat them as a single interface. // -// All connections in a LAG must use the same bandwidth and must terminate at -// the same AWS Direct Connect endpoint. +// All connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) +// and must terminate at the same AWS Direct Connect endpoint. // -// You can have up to 10 connections per LAG. Regardless of this limit, if you -// request more connections for the LAG than AWS Direct Connect can allocate +// You can have up to 10 dedicated connections per LAG. Regardless of this limit, +// if you request more connections for the LAG than AWS Direct Connect can allocate // on a single endpoint, no LAG is created. // -// You can specify an existing physical connection or interconnect to include -// in the LAG (which counts towards the total number of connections). Doing -// so interrupts the current physical connection or hosted connections, and -// re-establishes them as a member of the LAG. The LAG will be created on the -// same AWS Direct Connect endpoint to which the connection terminates. Any -// virtual interfaces associated with the connection are automatically disassociated +// You can specify an existing physical dedicated connection or interconnect +// to include in the LAG (which counts towards the total number of connections). +// Doing so interrupts the current physical dedicated connection, and re-establishes +// them as a member of the LAG. The LAG will be created on the same AWS Direct +// Connect endpoint to which the dedicated connection terminates. Any virtual +// interfaces associated with the dedicated connection are automatically disassociated // and re-associated with the LAG. The connection ID does not change. // // If the AWS account used to create a LAG is a registered AWS Direct Connect @@ -1935,6 +1933,13 @@ func (c *DirectConnect) CreatePrivateVirtualInterfaceRequest(input *CreatePrivat // different AWS Regions. Connecting the private virtual interface to a VGW // only provides access to a single VPC within the same Region. // +// Setting the MTU of a virtual interface to 9001 (jumbo frames) can cause an +// update to the underlying physical connection if it wasn't updated to support +// jumbo frames. Updating the connection disrupts network connectivity for all +// virtual interfaces associated with the connection for up to 30 seconds. To +// check whether your connection supports jumbo frames, call DescribeConnections. +// To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2126,6 +2131,13 @@ func (c *DirectConnect) CreateTransitVirtualInterfaceRequest(input *CreateTransi // 64512 for both your the transit gateway and Direct Connect gateway, the association // request fails. // +// Setting the MTU of a virtual interface to 8500 (jumbo frames) can cause an +// update to the underlying physical connection if it wasn't updated to support +// jumbo frames. Updating the connection disrupts network connectivity for all +// virtual interfaces associated with the connection for up to 30 seconds. To +// check whether your connection supports jumbo frames, call DescribeConnections. +// To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4331,6 +4343,262 @@ func (c *DirectConnect) DisassociateConnectionFromLagWithContext(ctx aws.Context return out, req.Send() } +const opListVirtualInterfaceTestHistory = "ListVirtualInterfaceTestHistory" + +// ListVirtualInterfaceTestHistoryRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualInterfaceTestHistory operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListVirtualInterfaceTestHistory for more information on using the ListVirtualInterfaceTestHistory +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListVirtualInterfaceTestHistoryRequest method. +// req, resp := client.ListVirtualInterfaceTestHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ListVirtualInterfaceTestHistory +func (c *DirectConnect) ListVirtualInterfaceTestHistoryRequest(input *ListVirtualInterfaceTestHistoryInput) (req *request.Request, output *ListVirtualInterfaceTestHistoryOutput) { + op := &request.Operation{ + Name: opListVirtualInterfaceTestHistory, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVirtualInterfaceTestHistoryInput{} + } + + output = &ListVirtualInterfaceTestHistoryOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListVirtualInterfaceTestHistory API operation for AWS Direct Connect. +// +// Lists the virtual interface failover test history. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation ListVirtualInterfaceTestHistory for usage and error information. +// +// Returned Error Types: +// * ServerException +// A server-side error occurred. +// +// * ClientException +// One or more parameters are not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/ListVirtualInterfaceTestHistory +func (c *DirectConnect) ListVirtualInterfaceTestHistory(input *ListVirtualInterfaceTestHistoryInput) (*ListVirtualInterfaceTestHistoryOutput, error) { + req, out := c.ListVirtualInterfaceTestHistoryRequest(input) + return out, req.Send() +} + +// ListVirtualInterfaceTestHistoryWithContext is the same as ListVirtualInterfaceTestHistory with the addition of +// the ability to pass a context and additional request options. +// +// See ListVirtualInterfaceTestHistory for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) ListVirtualInterfaceTestHistoryWithContext(ctx aws.Context, input *ListVirtualInterfaceTestHistoryInput, opts ...request.Option) (*ListVirtualInterfaceTestHistoryOutput, error) { + req, out := c.ListVirtualInterfaceTestHistoryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartBgpFailoverTest = "StartBgpFailoverTest" + +// StartBgpFailoverTestRequest generates a "aws/request.Request" representing the +// client's request for the StartBgpFailoverTest operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartBgpFailoverTest for more information on using the StartBgpFailoverTest +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartBgpFailoverTestRequest method. +// req, resp := client.StartBgpFailoverTestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/StartBgpFailoverTest +func (c *DirectConnect) StartBgpFailoverTestRequest(input *StartBgpFailoverTestInput) (req *request.Request, output *StartBgpFailoverTestOutput) { + op := &request.Operation{ + Name: opStartBgpFailoverTest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartBgpFailoverTestInput{} + } + + output = &StartBgpFailoverTestOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartBgpFailoverTest API operation for AWS Direct Connect. +// +// Starts the virtual interface failover test that verifies your configuration +// meets your resiliency requirements by placing the BGP peering session in +// the DOWN state. You can then send traffic to verify that there are no outages. +// +// You can run the test on public, private, transit, and hosted virtual interfaces. +// +// You can use ListVirtualInterfaceTestHistory (https://docs.aws.amazon.com/directconnect/latest/APIReference/API_ListVirtualInterfaceTestHistory.html) +// to view the virtual interface test history. +// +// If you need to stop the test before the test interval completes, use StopBgpFailoverTest +// (https://docs.aws.amazon.com/directconnect/latest/APIReference/API_StopBgpFailoverTest.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation StartBgpFailoverTest for usage and error information. +// +// Returned Error Types: +// * ServerException +// A server-side error occurred. +// +// * ClientException +// One or more parameters are not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/StartBgpFailoverTest +func (c *DirectConnect) StartBgpFailoverTest(input *StartBgpFailoverTestInput) (*StartBgpFailoverTestOutput, error) { + req, out := c.StartBgpFailoverTestRequest(input) + return out, req.Send() +} + +// StartBgpFailoverTestWithContext is the same as StartBgpFailoverTest with the addition of +// the ability to pass a context and additional request options. +// +// See StartBgpFailoverTest for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) StartBgpFailoverTestWithContext(ctx aws.Context, input *StartBgpFailoverTestInput, opts ...request.Option) (*StartBgpFailoverTestOutput, error) { + req, out := c.StartBgpFailoverTestRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopBgpFailoverTest = "StopBgpFailoverTest" + +// StopBgpFailoverTestRequest generates a "aws/request.Request" representing the +// client's request for the StopBgpFailoverTest operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopBgpFailoverTest for more information on using the StopBgpFailoverTest +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopBgpFailoverTestRequest method. +// req, resp := client.StopBgpFailoverTestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/StopBgpFailoverTest +func (c *DirectConnect) StopBgpFailoverTestRequest(input *StopBgpFailoverTestInput) (req *request.Request, output *StopBgpFailoverTestOutput) { + op := &request.Operation{ + Name: opStopBgpFailoverTest, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopBgpFailoverTestInput{} + } + + output = &StopBgpFailoverTestOutput{} + req = c.newRequest(op, input, output) + return +} + +// StopBgpFailoverTest API operation for AWS Direct Connect. +// +// Stops the virtual interface failover test. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Direct Connect's +// API operation StopBgpFailoverTest for usage and error information. +// +// Returned Error Types: +// * ServerException +// A server-side error occurred. +// +// * ClientException +// One or more parameters are not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/directconnect-2012-10-25/StopBgpFailoverTest +func (c *DirectConnect) StopBgpFailoverTest(input *StopBgpFailoverTestInput) (*StopBgpFailoverTestOutput, error) { + req, out := c.StopBgpFailoverTestRequest(input) + return out, req.Send() +} + +// StopBgpFailoverTestWithContext is the same as StopBgpFailoverTest with the addition of +// the ability to pass a context and additional request options. +// +// See StopBgpFailoverTest for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DirectConnect) StopBgpFailoverTestWithContext(ctx aws.Context, input *StopBgpFailoverTestInput, opts ...request.Option) (*StopBgpFailoverTestOutput, error) { + req, out := c.StopBgpFailoverTestRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -4738,7 +5006,7 @@ func (c *DirectConnect) UpdateVirtualInterfaceAttributesRequest(input *UpdateVir // jumbo frames. Updating the connection disrupts network connectivity for all // virtual interfaces associated with the connection for up to 30 seconds. To // check whether your connection supports jumbo frames, call DescribeConnections. -// To check whether your virtual interface supports jumbo frames, call DescribeVirtualInterfaces. +// To check whether your virtual q interface supports jumbo frames, call DescribeVirtualInterfaces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5660,8 +5928,8 @@ func (s *BGPPeer) SetCustomerAddress(v string) *BGPPeer { // One or more parameters are not valid. type ClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5678,17 +5946,17 @@ func (s ClientException) GoString() string { func newErrorClientException(v protocol.ResponseMetadata) error { return &ClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientException) Code() string { +func (s *ClientException) Code() string { return "DirectConnectClientException" } // Message returns the exception's message. -func (s ClientException) Message() string { +func (s *ClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5696,22 +5964,22 @@ func (s ClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientException) OrigErr() error { +func (s *ClientException) OrigErr() error { return nil } -func (s ClientException) Error() string { +func (s *ClientException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID } type ConfirmConnectionInput struct { @@ -6861,12 +7129,11 @@ type CreateLagInput struct { // The tags to associate with the automtically created LAGs. ChildConnectionTags []*Tag `locationName:"childConnectionTags" min:"1" type:"list"` - // The ID of an existing connection to migrate to the LAG. + // The ID of an existing dedicated connection to migrate to the LAG. ConnectionId *string `locationName:"connectionId" type:"string"` - // The bandwidth of the individual physical connections bundled by the LAG. - // The possible values are 50Mbps, 100Mbps, 200Mbps, 300Mbps, 400Mbps, 500Mbps, - // 1Gbps, 2Gbps, 5Gbps, and 10Gbps. + // The bandwidth of the individual physical dedicated connections bundled by + // the LAG. The possible values are 1Gbps and 10Gbps. // // ConnectionsBandwidth is a required field ConnectionsBandwidth *string `locationName:"connectionsBandwidth" type:"string" required:"true"` @@ -6881,8 +7148,8 @@ type CreateLagInput struct { // Location is a required field Location *string `locationName:"location" type:"string" required:"true"` - // The number of physical connections initially provisioned and bundled by the - // LAG. + // The number of physical dedicated connections initially provisioned and bundled + // by the LAG. // // NumberOfConnections is a required field NumberOfConnections *int64 `locationName:"numberOfConnections" type:"integer" required:"true"` @@ -8714,8 +8981,8 @@ func (s *DisassociateConnectionFromLagInput) SetLagId(v string) *DisassociateCon // A tag key was specified more than once. type DuplicateTagKeysException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8732,17 +8999,17 @@ func (s DuplicateTagKeysException) GoString() string { func newErrorDuplicateTagKeysException(v protocol.ResponseMetadata) error { return &DuplicateTagKeysException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateTagKeysException) Code() string { +func (s *DuplicateTagKeysException) Code() string { return "DuplicateTagKeysException" } // Message returns the exception's message. -func (s DuplicateTagKeysException) Message() string { +func (s *DuplicateTagKeysException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8750,22 +9017,22 @@ func (s DuplicateTagKeysException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateTagKeysException) OrigErr() error { +func (s *DuplicateTagKeysException) OrigErr() error { return nil } -func (s DuplicateTagKeysException) Error() string { +func (s *DuplicateTagKeysException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateTagKeysException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateTagKeysException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateTagKeysException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateTagKeysException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a Direct Connect gateway, which enables you to connect @@ -9356,12 +9623,12 @@ type Lag struct { // The location of the LAG. Location *string `locationName:"location" type:"string"` - // The minimum number of physical connections that must be operational for the - // LAG itself to be operational. + // The minimum number of physical dedicated connections that must be operational + // for the LAG itself to be operational. MinimumLinks *int64 `locationName:"minimumLinks" type:"integer"` - // The number of physical connections bundled by the LAG, up to a maximum of - // 10. + // The number of physical dedicated connections bundled by the LAG, up to a + // maximum of 10. NumberOfConnections *int64 `locationName:"numberOfConnections" type:"integer"` // The ID of the AWS account that owns the LAG. @@ -9489,6 +9756,111 @@ func (s *Lag) SetTags(v []*Tag) *Lag { return s } +type ListVirtualInterfaceTestHistoryInput struct { + _ struct{} `type:"structure"` + + // The BGP peers that were placed in the DOWN state during the virtual interface + // failover test. + BgpPeers []*string `locationName:"bgpPeers" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + // + // If MaxResults is given a value larger than 100, only 100 results are returned. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // The status of the virtual interface failover test. + Status *string `locationName:"status" type:"string"` + + // The ID of the virtual interface failover test. + TestId *string `locationName:"testId" type:"string"` + + // The ID of the virtual interface that was tested. + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s ListVirtualInterfaceTestHistoryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualInterfaceTestHistoryInput) GoString() string { + return s.String() +} + +// SetBgpPeers sets the BgpPeers field's value. +func (s *ListVirtualInterfaceTestHistoryInput) SetBgpPeers(v []*string) *ListVirtualInterfaceTestHistoryInput { + s.BgpPeers = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListVirtualInterfaceTestHistoryInput) SetMaxResults(v int64) *ListVirtualInterfaceTestHistoryInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualInterfaceTestHistoryInput) SetNextToken(v string) *ListVirtualInterfaceTestHistoryInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListVirtualInterfaceTestHistoryInput) SetStatus(v string) *ListVirtualInterfaceTestHistoryInput { + s.Status = &v + return s +} + +// SetTestId sets the TestId field's value. +func (s *ListVirtualInterfaceTestHistoryInput) SetTestId(v string) *ListVirtualInterfaceTestHistoryInput { + s.TestId = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *ListVirtualInterfaceTestHistoryInput) SetVirtualInterfaceId(v string) *ListVirtualInterfaceTestHistoryInput { + s.VirtualInterfaceId = &v + return s +} + +type ListVirtualInterfaceTestHistoryOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The ID of the tested virtual interface. + VirtualInterfaceTestHistory []*VirtualInterfaceTestHistory `locationName:"virtualInterfaceTestHistory" type:"list"` +} + +// String returns the string representation +func (s ListVirtualInterfaceTestHistoryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualInterfaceTestHistoryOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListVirtualInterfaceTestHistoryOutput) SetNextToken(v string) *ListVirtualInterfaceTestHistoryOutput { + s.NextToken = &v + return s +} + +// SetVirtualInterfaceTestHistory sets the VirtualInterfaceTestHistory field's value. +func (s *ListVirtualInterfaceTestHistoryOutput) SetVirtualInterfaceTestHistory(v []*VirtualInterfaceTestHistory) *ListVirtualInterfaceTestHistoryOutput { + s.VirtualInterfaceTestHistory = v + return s +} + // Information about a Letter of Authorization - Connecting Facility Assignment // (LOA-CFA) for a connection. type Loa struct { @@ -9685,7 +10057,9 @@ type NewPrivateVirtualInterface struct { // The ID of the virtual private gateway. VirtualGatewayId *string `locationName:"virtualGatewayId" type:"string"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). // // VirtualInterfaceName is a required field VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` @@ -9835,7 +10209,9 @@ type NewPrivateVirtualInterfaceAllocation struct { // The tags associated with the private virtual interface. Tags []*Tag `locationName:"tags" min:"1" type:"list"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). // // VirtualInterfaceName is a required field VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` @@ -9973,7 +10349,9 @@ type NewPublicVirtualInterface struct { // The tags associated with the public virtual interface. Tags []*Tag `locationName:"tags" min:"1" type:"list"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). // // VirtualInterfaceName is a required field VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` @@ -10111,7 +10489,9 @@ type NewPublicVirtualInterfaceAllocation struct { // The tags associated with the public virtual interface. Tags []*Tag `locationName:"tags" min:"1" type:"list"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). // // VirtualInterfaceName is a required field VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string" required:"true"` @@ -10250,7 +10630,9 @@ type NewTransitVirtualInterface struct { // The tags associated with the transitive virtual interface. Tags []*Tag `locationName:"tags" min:"1" type:"list"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` // The ID of the VLAN. @@ -10379,7 +10761,9 @@ type NewTransitVirtualInterfaceAllocation struct { // The tags associated with the transitive virtual interface. Tags []*Tag `locationName:"tags" min:"1" type:"list"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` // The ID of the VLAN. @@ -10534,8 +10918,8 @@ func (s *RouteFilterPrefix) SetCidr(v string) *RouteFilterPrefix { // A server-side error occurred. type ServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10552,17 +10936,17 @@ func (s ServerException) GoString() string { func newErrorServerException(v protocol.ResponseMetadata) error { return &ServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerException) Code() string { +func (s *ServerException) Code() string { return "DirectConnectServerException" } // Message returns the exception's message. -func (s ServerException) Message() string { +func (s *ServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10570,22 +10954,166 @@ func (s ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerException) OrigErr() error { +func (s *ServerException) OrigErr() error { return nil } -func (s ServerException) Error() string { +func (s *ServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartBgpFailoverTestInput struct { + _ struct{} `type:"structure"` + + // The BGP peers to place in the DOWN state. + BgpPeers []*string `locationName:"bgpPeers" type:"list"` + + // The time in minutes that the virtual interface failover test will last. + // + // Maximum value: 180 minutes (3 hours). + // + // Default: 180 minutes (3 hours). + TestDurationInMinutes *int64 `locationName:"testDurationInMinutes" type:"integer"` + + // The ID of the virtual interface you want to test. + // + // VirtualInterfaceId is a required field + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartBgpFailoverTestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartBgpFailoverTestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartBgpFailoverTestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartBgpFailoverTestInput"} + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBgpPeers sets the BgpPeers field's value. +func (s *StartBgpFailoverTestInput) SetBgpPeers(v []*string) *StartBgpFailoverTestInput { + s.BgpPeers = v + return s +} + +// SetTestDurationInMinutes sets the TestDurationInMinutes field's value. +func (s *StartBgpFailoverTestInput) SetTestDurationInMinutes(v int64) *StartBgpFailoverTestInput { + s.TestDurationInMinutes = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *StartBgpFailoverTestInput) SetVirtualInterfaceId(v string) *StartBgpFailoverTestInput { + s.VirtualInterfaceId = &v + return s +} + +type StartBgpFailoverTestOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual interface failover test. + VirtualInterfaceTest *VirtualInterfaceTestHistory `locationName:"virtualInterfaceTest" type:"structure"` +} + +// String returns the string representation +func (s StartBgpFailoverTestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartBgpFailoverTestOutput) GoString() string { + return s.String() +} + +// SetVirtualInterfaceTest sets the VirtualInterfaceTest field's value. +func (s *StartBgpFailoverTestOutput) SetVirtualInterfaceTest(v *VirtualInterfaceTestHistory) *StartBgpFailoverTestOutput { + s.VirtualInterfaceTest = v + return s +} + +type StopBgpFailoverTestInput struct { + _ struct{} `type:"structure"` + + // The ID of the virtual interface you no longer want to test. + // + // VirtualInterfaceId is a required field + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopBgpFailoverTestInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopBgpFailoverTestInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopBgpFailoverTestInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopBgpFailoverTestInput"} + if s.VirtualInterfaceId == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualInterfaceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *StopBgpFailoverTestInput) SetVirtualInterfaceId(v string) *StopBgpFailoverTestInput { + s.VirtualInterfaceId = &v + return s +} + +type StopBgpFailoverTestOutput struct { + _ struct{} `type:"structure"` + + // Information about the virtual interface failover test. + VirtualInterfaceTest *VirtualInterfaceTestHistory `locationName:"virtualInterfaceTest" type:"structure"` +} + +// String returns the string representation +func (s StopBgpFailoverTestOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopBgpFailoverTestOutput) GoString() string { + return s.String() +} + +// SetVirtualInterfaceTest sets the VirtualInterfaceTest field's value. +func (s *StopBgpFailoverTestOutput) SetVirtualInterfaceTest(v *VirtualInterfaceTestHistory) *StopBgpFailoverTestOutput { + s.VirtualInterfaceTest = v + return s } // Information about a tag. @@ -10720,8 +11248,8 @@ func (s TagResourceOutput) GoString() string { // You have reached the limit on the number of tags that can be assigned. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10738,17 +11266,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10756,22 +11284,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -11079,7 +11607,9 @@ type UpdateVirtualInterfaceAttributesOutput struct { // The ID of the virtual interface. VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` // The state of the virtual interface. The following are the possible values: @@ -11387,7 +11917,9 @@ type VirtualInterface struct { // The ID of the virtual interface. VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` - // The name of the virtual interface assigned by the customer network. + // The name of the virtual interface assigned by the customer network. The name + // has a maximum of 100 characters. The following are valid characters: a-z, + // 0-9 and a hyphen (-). VirtualInterfaceName *string `locationName:"virtualInterfaceName" type:"string"` // The state of the virtual interface. The following are the possible values: @@ -11583,6 +12115,94 @@ func (s *VirtualInterface) SetVlan(v int64) *VirtualInterface { return s } +// Information about the virtual interface failover test. +type VirtualInterfaceTestHistory struct { + _ struct{} `type:"structure"` + + // The BGP peers that were put in the DOWN state as part of the virtual interface + // failover test. + BgpPeers []*string `locationName:"bgpPeers" type:"list"` + + // The time that the virtual interface moves out of the DOWN state. + EndTime *time.Time `locationName:"endTime" type:"timestamp"` + + // The owner ID of the tested virtual interface. + OwnerAccount *string `locationName:"ownerAccount" type:"string"` + + // The time that the virtual interface moves to the DOWN state. + StartTime *time.Time `locationName:"startTime" type:"timestamp"` + + // The status of the virtual interface failover test. + Status *string `locationName:"status" type:"string"` + + // The time that the virtual interface failover test ran in minutes. + TestDurationInMinutes *int64 `locationName:"testDurationInMinutes" type:"integer"` + + // The ID of the virtual interface failover test. + TestId *string `locationName:"testId" type:"string"` + + // The ID of the tested virtual interface. + VirtualInterfaceId *string `locationName:"virtualInterfaceId" type:"string"` +} + +// String returns the string representation +func (s VirtualInterfaceTestHistory) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualInterfaceTestHistory) GoString() string { + return s.String() +} + +// SetBgpPeers sets the BgpPeers field's value. +func (s *VirtualInterfaceTestHistory) SetBgpPeers(v []*string) *VirtualInterfaceTestHistory { + s.BgpPeers = v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *VirtualInterfaceTestHistory) SetEndTime(v time.Time) *VirtualInterfaceTestHistory { + s.EndTime = &v + return s +} + +// SetOwnerAccount sets the OwnerAccount field's value. +func (s *VirtualInterfaceTestHistory) SetOwnerAccount(v string) *VirtualInterfaceTestHistory { + s.OwnerAccount = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *VirtualInterfaceTestHistory) SetStartTime(v time.Time) *VirtualInterfaceTestHistory { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VirtualInterfaceTestHistory) SetStatus(v string) *VirtualInterfaceTestHistory { + s.Status = &v + return s +} + +// SetTestDurationInMinutes sets the TestDurationInMinutes field's value. +func (s *VirtualInterfaceTestHistory) SetTestDurationInMinutes(v int64) *VirtualInterfaceTestHistory { + s.TestDurationInMinutes = &v + return s +} + +// SetTestId sets the TestId field's value. +func (s *VirtualInterfaceTestHistory) SetTestId(v string) *VirtualInterfaceTestHistory { + s.TestId = &v + return s +} + +// SetVirtualInterfaceId sets the VirtualInterfaceId field's value. +func (s *VirtualInterfaceTestHistory) SetVirtualInterfaceId(v string) *VirtualInterfaceTestHistory { + s.VirtualInterfaceId = &v + return s +} + const ( // AddressFamilyIpv4 is a AddressFamily enum value AddressFamilyIpv4 = "ipv4" @@ -11591,6 +12211,14 @@ const ( AddressFamilyIpv6 = "ipv6" ) +// AddressFamily_Values returns all elements of the AddressFamily enum +func AddressFamily_Values() []string { + return []string{ + AddressFamilyIpv4, + AddressFamilyIpv6, + } +} + const ( // BGPPeerStateVerifying is a BGPPeerState enum value BGPPeerStateVerifying = "verifying" @@ -11608,6 +12236,17 @@ const ( BGPPeerStateDeleted = "deleted" ) +// BGPPeerState_Values returns all elements of the BGPPeerState enum +func BGPPeerState_Values() []string { + return []string{ + BGPPeerStateVerifying, + BGPPeerStatePending, + BGPPeerStateAvailable, + BGPPeerStateDeleting, + BGPPeerStateDeleted, + } +} + const ( // BGPStatusUp is a BGPStatus enum value BGPStatusUp = "up" @@ -11619,6 +12258,15 @@ const ( BGPStatusUnknown = "unknown" ) +// BGPStatus_Values returns all elements of the BGPStatus enum +func BGPStatus_Values() []string { + return []string{ + BGPStatusUp, + BGPStatusDown, + BGPStatusUnknown, + } +} + const ( // ConnectionStateOrdering is a ConnectionState enum value ConnectionStateOrdering = "ordering" @@ -11648,6 +12296,21 @@ const ( ConnectionStateUnknown = "unknown" ) +// ConnectionState_Values returns all elements of the ConnectionState enum +func ConnectionState_Values() []string { + return []string{ + ConnectionStateOrdering, + ConnectionStateRequested, + ConnectionStatePending, + ConnectionStateAvailable, + ConnectionStateDown, + ConnectionStateDeleting, + ConnectionStateDeleted, + ConnectionStateRejected, + ConnectionStateUnknown, + } +} + const ( // GatewayAssociationProposalStateRequested is a GatewayAssociationProposalState enum value GatewayAssociationProposalStateRequested = "requested" @@ -11659,6 +12322,15 @@ const ( GatewayAssociationProposalStateDeleted = "deleted" ) +// GatewayAssociationProposalState_Values returns all elements of the GatewayAssociationProposalState enum +func GatewayAssociationProposalState_Values() []string { + return []string{ + GatewayAssociationProposalStateRequested, + GatewayAssociationProposalStateAccepted, + GatewayAssociationProposalStateDeleted, + } +} + const ( // GatewayAssociationStateAssociating is a GatewayAssociationState enum value GatewayAssociationStateAssociating = "associating" @@ -11676,6 +12348,17 @@ const ( GatewayAssociationStateUpdating = "updating" ) +// GatewayAssociationState_Values returns all elements of the GatewayAssociationState enum +func GatewayAssociationState_Values() []string { + return []string{ + GatewayAssociationStateAssociating, + GatewayAssociationStateAssociated, + GatewayAssociationStateDisassociating, + GatewayAssociationStateDisassociated, + GatewayAssociationStateUpdating, + } +} + const ( // GatewayAttachmentStateAttaching is a GatewayAttachmentState enum value GatewayAttachmentStateAttaching = "attaching" @@ -11690,6 +12373,16 @@ const ( GatewayAttachmentStateDetached = "detached" ) +// GatewayAttachmentState_Values returns all elements of the GatewayAttachmentState enum +func GatewayAttachmentState_Values() []string { + return []string{ + GatewayAttachmentStateAttaching, + GatewayAttachmentStateAttached, + GatewayAttachmentStateDetaching, + GatewayAttachmentStateDetached, + } +} + const ( // GatewayAttachmentTypeTransitVirtualInterface is a GatewayAttachmentType enum value GatewayAttachmentTypeTransitVirtualInterface = "TransitVirtualInterface" @@ -11698,6 +12391,14 @@ const ( GatewayAttachmentTypePrivateVirtualInterface = "PrivateVirtualInterface" ) +// GatewayAttachmentType_Values returns all elements of the GatewayAttachmentType enum +func GatewayAttachmentType_Values() []string { + return []string{ + GatewayAttachmentTypeTransitVirtualInterface, + GatewayAttachmentTypePrivateVirtualInterface, + } +} + const ( // GatewayStatePending is a GatewayState enum value GatewayStatePending = "pending" @@ -11712,6 +12413,16 @@ const ( GatewayStateDeleted = "deleted" ) +// GatewayState_Values returns all elements of the GatewayState enum +func GatewayState_Values() []string { + return []string{ + GatewayStatePending, + GatewayStateAvailable, + GatewayStateDeleting, + GatewayStateDeleted, + } +} + const ( // GatewayTypeVirtualPrivateGateway is a GatewayType enum value GatewayTypeVirtualPrivateGateway = "virtualPrivateGateway" @@ -11720,6 +12431,14 @@ const ( GatewayTypeTransitGateway = "transitGateway" ) +// GatewayType_Values returns all elements of the GatewayType enum +func GatewayType_Values() []string { + return []string{ + GatewayTypeVirtualPrivateGateway, + GatewayTypeTransitGateway, + } +} + const ( // HasLogicalRedundancyUnknown is a HasLogicalRedundancy enum value HasLogicalRedundancyUnknown = "unknown" @@ -11731,6 +12450,15 @@ const ( HasLogicalRedundancyNo = "no" ) +// HasLogicalRedundancy_Values returns all elements of the HasLogicalRedundancy enum +func HasLogicalRedundancy_Values() []string { + return []string{ + HasLogicalRedundancyUnknown, + HasLogicalRedundancyYes, + HasLogicalRedundancyNo, + } +} + const ( // InterconnectStateRequested is a InterconnectState enum value InterconnectStateRequested = "requested" @@ -11754,6 +12482,19 @@ const ( InterconnectStateUnknown = "unknown" ) +// InterconnectState_Values returns all elements of the InterconnectState enum +func InterconnectState_Values() []string { + return []string{ + InterconnectStateRequested, + InterconnectStatePending, + InterconnectStateAvailable, + InterconnectStateDown, + InterconnectStateDeleting, + InterconnectStateDeleted, + InterconnectStateUnknown, + } +} + const ( // LagStateRequested is a LagState enum value LagStateRequested = "requested" @@ -11777,11 +12518,31 @@ const ( LagStateUnknown = "unknown" ) +// LagState_Values returns all elements of the LagState enum +func LagState_Values() []string { + return []string{ + LagStateRequested, + LagStatePending, + LagStateAvailable, + LagStateDown, + LagStateDeleting, + LagStateDeleted, + LagStateUnknown, + } +} + const ( // LoaContentTypeApplicationPdf is a LoaContentType enum value LoaContentTypeApplicationPdf = "application/pdf" ) +// LoaContentType_Values returns all elements of the LoaContentType enum +func LoaContentType_Values() []string { + return []string{ + LoaContentTypeApplicationPdf, + } +} + const ( // VirtualInterfaceStateConfirming is a VirtualInterfaceState enum value VirtualInterfaceStateConfirming = "confirming" @@ -11810,3 +12571,18 @@ const ( // VirtualInterfaceStateUnknown is a VirtualInterfaceState enum value VirtualInterfaceStateUnknown = "unknown" ) + +// VirtualInterfaceState_Values returns all elements of the VirtualInterfaceState enum +func VirtualInterfaceState_Values() []string { + return []string{ + VirtualInterfaceStateConfirming, + VirtualInterfaceStateVerifying, + VirtualInterfaceStatePending, + VirtualInterfaceStateAvailable, + VirtualInterfaceStateDown, + VirtualInterfaceStateDeleting, + VirtualInterfaceStateDeleted, + VirtualInterfaceStateRejected, + VirtualInterfaceStateUnknown, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go index a0efad6f6..1ed5cd0de 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index ce66195e4..b53745a30 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -5569,8 +5569,8 @@ func (s *AcceptSharedDirectoryOutput) SetSharedDirectory(v *SharedDirectory) *Ac // You do not have sufficient access to perform this action. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -5591,17 +5591,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5609,22 +5609,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type AddIpRoutesInput struct { @@ -5872,8 +5872,8 @@ func (s *Attribute) SetValue(v string) *Attribute { // An authentication error occurred. type AuthenticationFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The textual message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -5894,17 +5894,17 @@ func (s AuthenticationFailedException) GoString() string { func newErrorAuthenticationFailedException(v protocol.ResponseMetadata) error { return &AuthenticationFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AuthenticationFailedException) Code() string { +func (s *AuthenticationFailedException) Code() string { return "AuthenticationFailedException" } // Message returns the exception's message. -func (s AuthenticationFailedException) Message() string { +func (s *AuthenticationFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5912,22 +5912,22 @@ func (s AuthenticationFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AuthenticationFailedException) OrigErr() error { +func (s *AuthenticationFailedException) OrigErr() error { return nil } -func (s AuthenticationFailedException) Error() string { +func (s *AuthenticationFailedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AuthenticationFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AuthenticationFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AuthenticationFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AuthenticationFailedException) RequestID() string { + return s.RespMetadata.RequestID } type CancelSchemaExtensionInput struct { @@ -6067,8 +6067,8 @@ func (s *Certificate) SetStateReason(v string) *Certificate { // The certificate has already been registered into the system. type CertificateAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -6089,17 +6089,17 @@ func (s CertificateAlreadyExistsException) GoString() string { func newErrorCertificateAlreadyExistsException(v protocol.ResponseMetadata) error { return &CertificateAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateAlreadyExistsException) Code() string { +func (s *CertificateAlreadyExistsException) Code() string { return "CertificateAlreadyExistsException" } // Message returns the exception's message. -func (s CertificateAlreadyExistsException) Message() string { +func (s *CertificateAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6107,28 +6107,28 @@ func (s CertificateAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateAlreadyExistsException) OrigErr() error { +func (s *CertificateAlreadyExistsException) OrigErr() error { return nil } -func (s CertificateAlreadyExistsException) Error() string { +func (s *CertificateAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The certificate is not present in the system for describe or deregister activities. type CertificateDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -6149,17 +6149,17 @@ func (s CertificateDoesNotExistException) GoString() string { func newErrorCertificateDoesNotExistException(v protocol.ResponseMetadata) error { return &CertificateDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateDoesNotExistException) Code() string { +func (s *CertificateDoesNotExistException) Code() string { return "CertificateDoesNotExistException" } // Message returns the exception's message. -func (s CertificateDoesNotExistException) Message() string { +func (s *CertificateDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6167,29 +6167,29 @@ func (s CertificateDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateDoesNotExistException) OrigErr() error { +func (s *CertificateDoesNotExistException) OrigErr() error { return nil } -func (s CertificateDoesNotExistException) Error() string { +func (s *CertificateDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The certificate is being used for the LDAP security connection and cannot // be removed without disabling LDAP security. type CertificateInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -6210,17 +6210,17 @@ func (s CertificateInUseException) GoString() string { func newErrorCertificateInUseException(v protocol.ResponseMetadata) error { return &CertificateInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateInUseException) Code() string { +func (s *CertificateInUseException) Code() string { return "CertificateInUseException" } // Message returns the exception's message. -func (s CertificateInUseException) Message() string { +func (s *CertificateInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6228,22 +6228,22 @@ func (s CertificateInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateInUseException) OrigErr() error { +func (s *CertificateInUseException) OrigErr() error { return nil } -func (s CertificateInUseException) Error() string { +func (s *CertificateInUseException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Contains general information about a certificate. @@ -6300,8 +6300,8 @@ func (s *CertificateInfo) SetState(v string) *CertificateInfo { // The certificate could not be added because the certificate limit has been // reached. type CertificateLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -6322,17 +6322,17 @@ func (s CertificateLimitExceededException) GoString() string { func newErrorCertificateLimitExceededException(v protocol.ResponseMetadata) error { return &CertificateLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateLimitExceededException) Code() string { +func (s *CertificateLimitExceededException) Code() string { return "CertificateLimitExceededException" } // Message returns the exception's message. -func (s CertificateLimitExceededException) Message() string { +func (s *CertificateLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6340,28 +6340,28 @@ func (s CertificateLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateLimitExceededException) OrigErr() error { +func (s *CertificateLimitExceededException) OrigErr() error { return nil } -func (s CertificateLimitExceededException) Error() string { +func (s *CertificateLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A client exception has occurred. type ClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -6382,17 +6382,17 @@ func (s ClientException) GoString() string { func newErrorClientException(v protocol.ResponseMetadata) error { return &ClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientException) Code() string { +func (s *ClientException) Code() string { return "ClientException" } // Message returns the exception's message. -func (s ClientException) Message() string { +func (s *ClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6400,22 +6400,22 @@ func (s ClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientException) OrigErr() error { +func (s *ClientException) OrigErr() error { return nil } -func (s ClientException) Error() string { +func (s *ClientException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a computer account in a directory. @@ -8813,8 +8813,8 @@ func (s *DescribeTrustsOutput) SetTrusts(v []*Trust) *DescribeTrustsOutput { // The specified directory has already been shared with this AWS account. type DirectoryAlreadySharedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -8835,17 +8835,17 @@ func (s DirectoryAlreadySharedException) GoString() string { func newErrorDirectoryAlreadySharedException(v protocol.ResponseMetadata) error { return &DirectoryAlreadySharedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryAlreadySharedException) Code() string { +func (s *DirectoryAlreadySharedException) Code() string { return "DirectoryAlreadySharedException" } // Message returns the exception's message. -func (s DirectoryAlreadySharedException) Message() string { +func (s *DirectoryAlreadySharedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8853,22 +8853,22 @@ func (s DirectoryAlreadySharedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryAlreadySharedException) OrigErr() error { +func (s *DirectoryAlreadySharedException) OrigErr() error { return nil } -func (s DirectoryAlreadySharedException) Error() string { +func (s *DirectoryAlreadySharedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryAlreadySharedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryAlreadySharedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryAlreadySharedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryAlreadySharedException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information for the ConnectDirectory operation when an AD Connector @@ -9284,8 +9284,8 @@ func (s *DirectoryDescription) SetVpcSettings(v *DirectoryVpcSettingsDescription // The specified directory does not exist in the system. type DirectoryDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9306,17 +9306,17 @@ func (s DirectoryDoesNotExistException) GoString() string { func newErrorDirectoryDoesNotExistException(v protocol.ResponseMetadata) error { return &DirectoryDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryDoesNotExistException) Code() string { +func (s *DirectoryDoesNotExistException) Code() string { return "DirectoryDoesNotExistException" } // Message returns the exception's message. -func (s DirectoryDoesNotExistException) Message() string { +func (s *DirectoryDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9324,30 +9324,30 @@ func (s DirectoryDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryDoesNotExistException) OrigErr() error { +func (s *DirectoryDoesNotExistException) OrigErr() error { return nil } -func (s DirectoryDoesNotExistException) Error() string { +func (s *DirectoryDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of directories in the region has been reached. You can // use the GetDirectoryLimits operation to determine your directory limits in // the region. type DirectoryLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9368,17 +9368,17 @@ func (s DirectoryLimitExceededException) GoString() string { func newErrorDirectoryLimitExceededException(v protocol.ResponseMetadata) error { return &DirectoryLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryLimitExceededException) Code() string { +func (s *DirectoryLimitExceededException) Code() string { return "DirectoryLimitExceededException" } // Message returns the exception's message. -func (s DirectoryLimitExceededException) Message() string { +func (s *DirectoryLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9386,22 +9386,22 @@ func (s DirectoryLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryLimitExceededException) OrigErr() error { +func (s *DirectoryLimitExceededException) OrigErr() error { return nil } -func (s DirectoryLimitExceededException) Error() string { +func (s *DirectoryLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Contains directory limit information for a Region. @@ -9503,8 +9503,8 @@ func (s *DirectoryLimits) SetConnectedDirectoriesLimitReached(v bool) *Directory // The specified directory has not been shared with this AWS account. type DirectoryNotSharedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9525,17 +9525,17 @@ func (s DirectoryNotSharedException) GoString() string { func newErrorDirectoryNotSharedException(v protocol.ResponseMetadata) error { return &DirectoryNotSharedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryNotSharedException) Code() string { +func (s *DirectoryNotSharedException) Code() string { return "DirectoryNotSharedException" } // Message returns the exception's message. -func (s DirectoryNotSharedException) Message() string { +func (s *DirectoryNotSharedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9543,28 +9543,28 @@ func (s DirectoryNotSharedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryNotSharedException) OrigErr() error { +func (s *DirectoryNotSharedException) OrigErr() error { return nil } -func (s DirectoryNotSharedException) Error() string { +func (s *DirectoryNotSharedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryNotSharedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryNotSharedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryNotSharedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryNotSharedException) RequestID() string { + return s.RespMetadata.RequestID } // The specified directory is unavailable or could not be found. type DirectoryUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -9585,17 +9585,17 @@ func (s DirectoryUnavailableException) GoString() string { func newErrorDirectoryUnavailableException(v protocol.ResponseMetadata) error { return &DirectoryUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryUnavailableException) Code() string { +func (s *DirectoryUnavailableException) Code() string { return "DirectoryUnavailableException" } // Message returns the exception's message. -func (s DirectoryUnavailableException) Message() string { +func (s *DirectoryUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9603,22 +9603,22 @@ func (s DirectoryUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryUnavailableException) OrigErr() error { +func (s *DirectoryUnavailableException) OrigErr() error { return nil } -func (s DirectoryUnavailableException) Error() string { +func (s *DirectoryUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation. @@ -10043,8 +10043,8 @@ func (s *DomainController) SetVpcId(v string) *DomainController { // The maximum allowed number of domain controllers per directory was exceeded. // The default limit per directory is 20 domain controllers. type DomainControllerLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10065,17 +10065,17 @@ func (s DomainControllerLimitExceededException) GoString() string { func newErrorDomainControllerLimitExceededException(v protocol.ResponseMetadata) error { return &DomainControllerLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DomainControllerLimitExceededException) Code() string { +func (s *DomainControllerLimitExceededException) Code() string { return "DomainControllerLimitExceededException" } // Message returns the exception's message. -func (s DomainControllerLimitExceededException) Message() string { +func (s *DomainControllerLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10083,22 +10083,22 @@ func (s DomainControllerLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DomainControllerLimitExceededException) OrigErr() error { +func (s *DomainControllerLimitExceededException) OrigErr() error { return nil } -func (s DomainControllerLimitExceededException) Error() string { +func (s *DomainControllerLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DomainControllerLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DomainControllerLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DomainControllerLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DomainControllerLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type EnableLDAPSInput struct { @@ -10330,8 +10330,8 @@ func (s EnableSsoOutput) GoString() string { // The specified entity already exists. type EntityAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10352,17 +10352,17 @@ func (s EntityAlreadyExistsException) GoString() string { func newErrorEntityAlreadyExistsException(v protocol.ResponseMetadata) error { return &EntityAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityAlreadyExistsException) Code() string { +func (s *EntityAlreadyExistsException) Code() string { return "EntityAlreadyExistsException" } // Message returns the exception's message. -func (s EntityAlreadyExistsException) Message() string { +func (s *EntityAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10370,28 +10370,28 @@ func (s EntityAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityAlreadyExistsException) OrigErr() error { +func (s *EntityAlreadyExistsException) OrigErr() error { return nil } -func (s EntityAlreadyExistsException) Error() string { +func (s *EntityAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified entity could not be found. type EntityDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10412,17 +10412,17 @@ func (s EntityDoesNotExistException) GoString() string { func newErrorEntityDoesNotExistException(v protocol.ResponseMetadata) error { return &EntityDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityDoesNotExistException) Code() string { +func (s *EntityDoesNotExistException) Code() string { return "EntityDoesNotExistException" } // Message returns the exception's message. -func (s EntityDoesNotExistException) Message() string { +func (s *EntityDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10430,22 +10430,22 @@ func (s EntityDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityDoesNotExistException) OrigErr() error { +func (s *EntityDoesNotExistException) OrigErr() error { return nil } -func (s EntityDoesNotExistException) Error() string { +func (s *EntityDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Information about SNS topic and AWS Directory Service directory associations. @@ -10615,8 +10615,8 @@ func (s *GetSnapshotLimitsOutput) SetSnapshotLimits(v *SnapshotLimits) *GetSnaps // The account does not have sufficient permission to perform the operation. type InsufficientPermissionsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10637,17 +10637,17 @@ func (s InsufficientPermissionsException) GoString() string { func newErrorInsufficientPermissionsException(v protocol.ResponseMetadata) error { return &InsufficientPermissionsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientPermissionsException) Code() string { +func (s *InsufficientPermissionsException) Code() string { return "InsufficientPermissionsException" } // Message returns the exception's message. -func (s InsufficientPermissionsException) Message() string { +func (s *InsufficientPermissionsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10655,28 +10655,28 @@ func (s InsufficientPermissionsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientPermissionsException) OrigErr() error { +func (s *InsufficientPermissionsException) OrigErr() error { return nil } -func (s InsufficientPermissionsException) Error() string { +func (s *InsufficientPermissionsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientPermissionsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientPermissionsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientPermissionsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientPermissionsException) RequestID() string { + return s.RespMetadata.RequestID } // The certificate PEM that was provided has incorrect encoding. type InvalidCertificateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10697,17 +10697,17 @@ func (s InvalidCertificateException) GoString() string { func newErrorInvalidCertificateException(v protocol.ResponseMetadata) error { return &InvalidCertificateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCertificateException) Code() string { +func (s *InvalidCertificateException) Code() string { return "InvalidCertificateException" } // Message returns the exception's message. -func (s InvalidCertificateException) Message() string { +func (s *InvalidCertificateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10715,29 +10715,29 @@ func (s InvalidCertificateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCertificateException) OrigErr() error { +func (s *InvalidCertificateException) OrigErr() error { return nil } -func (s InvalidCertificateException) Error() string { +func (s *InvalidCertificateException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCertificateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCertificateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCertificateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCertificateException) RequestID() string { + return s.RespMetadata.RequestID } // The LDAP activities could not be performed because they are limited by the // LDAPS status. type InvalidLDAPSStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10758,17 +10758,17 @@ func (s InvalidLDAPSStatusException) GoString() string { func newErrorInvalidLDAPSStatusException(v protocol.ResponseMetadata) error { return &InvalidLDAPSStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLDAPSStatusException) Code() string { +func (s *InvalidLDAPSStatusException) Code() string { return "InvalidLDAPSStatusException" } // Message returns the exception's message. -func (s InvalidLDAPSStatusException) Message() string { +func (s *InvalidLDAPSStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10776,28 +10776,28 @@ func (s InvalidLDAPSStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLDAPSStatusException) OrigErr() error { +func (s *InvalidLDAPSStatusException) OrigErr() error { return nil } -func (s InvalidLDAPSStatusException) Error() string { +func (s *InvalidLDAPSStatusException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLDAPSStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLDAPSStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLDAPSStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLDAPSStatusException) RequestID() string { + return s.RespMetadata.RequestID } // The NextToken value is not valid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10818,17 +10818,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10836,28 +10836,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // One or more parameters are not valid. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10878,17 +10878,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10896,29 +10896,29 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The new password provided by the user does not meet the password complexity // requirements defined in your directory. type InvalidPasswordException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10939,17 +10939,17 @@ func (s InvalidPasswordException) GoString() string { func newErrorInvalidPasswordException(v protocol.ResponseMetadata) error { return &InvalidPasswordException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPasswordException) Code() string { +func (s *InvalidPasswordException) Code() string { return "InvalidPasswordException" } // Message returns the exception's message. -func (s InvalidPasswordException) Message() string { +func (s *InvalidPasswordException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10957,28 +10957,28 @@ func (s InvalidPasswordException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPasswordException) OrigErr() error { +func (s *InvalidPasswordException) OrigErr() error { return nil } -func (s InvalidPasswordException) Error() string { +func (s *InvalidPasswordException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPasswordException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPasswordException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPasswordException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPasswordException) RequestID() string { + return s.RespMetadata.RequestID } // The specified shared target is not valid. type InvalidTargetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -10999,17 +10999,17 @@ func (s InvalidTargetException) GoString() string { func newErrorInvalidTargetException(v protocol.ResponseMetadata) error { return &InvalidTargetException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTargetException) Code() string { +func (s *InvalidTargetException) Code() string { return "InvalidTargetException" } // Message returns the exception's message. -func (s InvalidTargetException) Message() string { +func (s *InvalidTargetException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11017,22 +11017,22 @@ func (s InvalidTargetException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTargetException) OrigErr() error { +func (s *InvalidTargetException) OrigErr() error { return nil } -func (s InvalidTargetException) Error() string { +func (s *InvalidTargetException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTargetException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTargetException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTargetException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTargetException) RequestID() string { + return s.RespMetadata.RequestID } // IP address block. This is often the address block of the DNS server used @@ -11143,8 +11143,8 @@ func (s *IpRouteInfo) SetIpRouteStatusReason(v string) *IpRouteInfo { // The maximum allowed number of IP addresses was exceeded. The default limit // is 100 IP address blocks. type IpRouteLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -11165,17 +11165,17 @@ func (s IpRouteLimitExceededException) GoString() string { func newErrorIpRouteLimitExceededException(v protocol.ResponseMetadata) error { return &IpRouteLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IpRouteLimitExceededException) Code() string { +func (s *IpRouteLimitExceededException) Code() string { return "IpRouteLimitExceededException" } // Message returns the exception's message. -func (s IpRouteLimitExceededException) Message() string { +func (s *IpRouteLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11183,22 +11183,22 @@ func (s IpRouteLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IpRouteLimitExceededException) OrigErr() error { +func (s *IpRouteLimitExceededException) OrigErr() error { return nil } -func (s IpRouteLimitExceededException) Error() string { +func (s *IpRouteLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s IpRouteLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IpRouteLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IpRouteLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *IpRouteLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Contains general information about the LDAPS settings. @@ -11734,8 +11734,8 @@ func (s *LogSubscription) SetSubscriptionCreatedDateTime(v time.Time) *LogSubscr // The LDAP activities could not be performed because at least one valid certificate // must be registered with the system. type NoAvailableCertificateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -11756,17 +11756,17 @@ func (s NoAvailableCertificateException) GoString() string { func newErrorNoAvailableCertificateException(v protocol.ResponseMetadata) error { return &NoAvailableCertificateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoAvailableCertificateException) Code() string { +func (s *NoAvailableCertificateException) Code() string { return "NoAvailableCertificateException" } // Message returns the exception's message. -func (s NoAvailableCertificateException) Message() string { +func (s *NoAvailableCertificateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11774,28 +11774,28 @@ func (s NoAvailableCertificateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoAvailableCertificateException) OrigErr() error { +func (s *NoAvailableCertificateException) OrigErr() error { return nil } -func (s NoAvailableCertificateException) Error() string { +func (s *NoAvailableCertificateException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NoAvailableCertificateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoAvailableCertificateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoAvailableCertificateException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoAvailableCertificateException) RequestID() string { + return s.RespMetadata.RequestID } // Exception encountered while trying to access your AWS organization. type OrganizationsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -11816,17 +11816,17 @@ func (s OrganizationsException) GoString() string { func newErrorOrganizationsException(v protocol.ResponseMetadata) error { return &OrganizationsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationsException) Code() string { +func (s *OrganizationsException) Code() string { return "OrganizationsException" } // Message returns the exception's message. -func (s OrganizationsException) Message() string { +func (s *OrganizationsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11834,22 +11834,22 @@ func (s OrganizationsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationsException) OrigErr() error { +func (s *OrganizationsException) OrigErr() error { return nil } -func (s OrganizationsException) Error() string { +func (s *OrganizationsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationsException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationsException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the directory owner account details that have been shared to the @@ -12603,8 +12603,8 @@ func (s *SchemaExtensionInfo) SetStartDateTime(v time.Time) *SchemaExtensionInfo // An exception has occurred in AWS Directory Service. type ServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -12625,17 +12625,17 @@ func (s ServiceException) GoString() string { func newErrorServiceException(v protocol.ResponseMetadata) error { return &ServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceException) Code() string { +func (s *ServiceException) Code() string { return "ServiceException" } // Message returns the exception's message. -func (s ServiceException) Message() string { +func (s *ServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12643,22 +12643,22 @@ func (s ServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceException) OrigErr() error { +func (s *ServiceException) OrigErr() error { return nil } -func (s ServiceException) Error() string { +func (s *ServiceException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceException) RequestID() string { + return s.RespMetadata.RequestID } type ShareDirectoryInput struct { @@ -12774,8 +12774,8 @@ func (s *ShareDirectoryOutput) SetSharedDirectoryId(v string) *ShareDirectoryOut // The maximum number of AWS accounts that you can share with this directory // has been reached. type ShareLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -12796,17 +12796,17 @@ func (s ShareLimitExceededException) GoString() string { func newErrorShareLimitExceededException(v protocol.ResponseMetadata) error { return &ShareLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ShareLimitExceededException) Code() string { +func (s *ShareLimitExceededException) Code() string { return "ShareLimitExceededException" } // Message returns the exception's message. -func (s ShareLimitExceededException) Message() string { +func (s *ShareLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12814,22 +12814,22 @@ func (s ShareLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ShareLimitExceededException) OrigErr() error { +func (s *ShareLimitExceededException) OrigErr() error { return nil } -func (s ShareLimitExceededException) Error() string { +func (s *ShareLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ShareLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ShareLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ShareLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ShareLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Identifier that contains details about the directory consumer account. @@ -13065,8 +13065,8 @@ func (s *Snapshot) SetType(v string) *Snapshot { // You can use the GetSnapshotLimits operation to determine the snapshot limits // for a directory. type SnapshotLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -13087,17 +13087,17 @@ func (s SnapshotLimitExceededException) GoString() string { func newErrorSnapshotLimitExceededException(v protocol.ResponseMetadata) error { return &SnapshotLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SnapshotLimitExceededException) Code() string { +func (s *SnapshotLimitExceededException) Code() string { return "SnapshotLimitExceededException" } // Message returns the exception's message. -func (s SnapshotLimitExceededException) Message() string { +func (s *SnapshotLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13105,22 +13105,22 @@ func (s SnapshotLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SnapshotLimitExceededException) OrigErr() error { +func (s *SnapshotLimitExceededException) OrigErr() error { return nil } -func (s SnapshotLimitExceededException) Error() string { +func (s *SnapshotLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s SnapshotLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SnapshotLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SnapshotLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *SnapshotLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Contains manual snapshot limit information for a directory. @@ -13337,8 +13337,8 @@ func (s *Tag) SetValue(v string) *Tag { // The maximum allowed number of tags was exceeded. type TagLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -13359,17 +13359,17 @@ func (s TagLimitExceededException) GoString() string { func newErrorTagLimitExceededException(v protocol.ResponseMetadata) error { return &TagLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagLimitExceededException) Code() string { +func (s *TagLimitExceededException) Code() string { return "TagLimitExceededException" } // Message returns the exception's message. -func (s TagLimitExceededException) Message() string { +func (s *TagLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13377,22 +13377,22 @@ func (s TagLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagLimitExceededException) OrigErr() error { +func (s *TagLimitExceededException) OrigErr() error { return nil } -func (s TagLimitExceededException) Error() string { +func (s *TagLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TagLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a trust relationship between an AWS Managed Microsoft AD directory @@ -13653,8 +13653,8 @@ func (s *UnshareTarget) SetType(v string) *UnshareTarget { // The operation is not supported. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -13675,17 +13675,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13693,22 +13693,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } // Updates a conditional forwarder. @@ -14020,8 +14020,8 @@ func (s *UpdateTrustOutput) SetTrustId(v string) *UpdateTrustOutput { // The user provided a username that does not exist in your directory. type UserDoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The descriptive message for the exception. Message_ *string `locationName:"Message" type:"string"` @@ -14042,17 +14042,17 @@ func (s UserDoesNotExistException) GoString() string { func newErrorUserDoesNotExistException(v protocol.ResponseMetadata) error { return &UserDoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserDoesNotExistException) Code() string { +func (s *UserDoesNotExistException) Code() string { return "UserDoesNotExistException" } // Message returns the exception's message. -func (s UserDoesNotExistException) Message() string { +func (s *UserDoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14060,22 +14060,22 @@ func (s UserDoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserDoesNotExistException) OrigErr() error { +func (s *UserDoesNotExistException) OrigErr() error { return nil } -func (s UserDoesNotExistException) Error() string { +func (s *UserDoesNotExistException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UserDoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserDoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserDoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserDoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // Initiates the verification of an existing trust relationship between an AWS @@ -14162,6 +14162,18 @@ const ( CertificateStateDeregisterFailed = "DeregisterFailed" ) +// CertificateState_Values returns all elements of the CertificateState enum +func CertificateState_Values() []string { + return []string{ + CertificateStateRegistering, + CertificateStateRegistered, + CertificateStateRegisterFailed, + CertificateStateDeregistering, + CertificateStateDeregistered, + CertificateStateDeregisterFailed, + } +} + const ( // DirectoryEditionEnterprise is a DirectoryEdition enum value DirectoryEditionEnterprise = "Enterprise" @@ -14170,6 +14182,14 @@ const ( DirectoryEditionStandard = "Standard" ) +// DirectoryEdition_Values returns all elements of the DirectoryEdition enum +func DirectoryEdition_Values() []string { + return []string{ + DirectoryEditionEnterprise, + DirectoryEditionStandard, + } +} + const ( // DirectorySizeSmall is a DirectorySize enum value DirectorySizeSmall = "Small" @@ -14178,6 +14198,14 @@ const ( DirectorySizeLarge = "Large" ) +// DirectorySize_Values returns all elements of the DirectorySize enum +func DirectorySize_Values() []string { + return []string{ + DirectorySizeSmall, + DirectorySizeLarge, + } +} + const ( // DirectoryStageRequested is a DirectoryStage enum value DirectoryStageRequested = "Requested" @@ -14213,6 +14241,23 @@ const ( DirectoryStageFailed = "Failed" ) +// DirectoryStage_Values returns all elements of the DirectoryStage enum +func DirectoryStage_Values() []string { + return []string{ + DirectoryStageRequested, + DirectoryStageCreating, + DirectoryStageCreated, + DirectoryStageActive, + DirectoryStageInoperable, + DirectoryStageImpaired, + DirectoryStageRestoring, + DirectoryStageRestoreFailed, + DirectoryStageDeleting, + DirectoryStageDeleted, + DirectoryStageFailed, + } +} + const ( // DirectoryTypeSimpleAd is a DirectoryType enum value DirectoryTypeSimpleAd = "SimpleAD" @@ -14227,6 +14272,16 @@ const ( DirectoryTypeSharedMicrosoftAd = "SharedMicrosoftAD" ) +// DirectoryType_Values returns all elements of the DirectoryType enum +func DirectoryType_Values() []string { + return []string{ + DirectoryTypeSimpleAd, + DirectoryTypeAdconnector, + DirectoryTypeMicrosoftAd, + DirectoryTypeSharedMicrosoftAd, + } +} + const ( // DomainControllerStatusCreating is a DomainControllerStatus enum value DomainControllerStatusCreating = "Creating" @@ -14250,6 +14305,19 @@ const ( DomainControllerStatusFailed = "Failed" ) +// DomainControllerStatus_Values returns all elements of the DomainControllerStatus enum +func DomainControllerStatus_Values() []string { + return []string{ + DomainControllerStatusCreating, + DomainControllerStatusActive, + DomainControllerStatusImpaired, + DomainControllerStatusRestoring, + DomainControllerStatusDeleting, + DomainControllerStatusDeleted, + DomainControllerStatusFailed, + } +} + const ( // IpRouteStatusMsgAdding is a IpRouteStatusMsg enum value IpRouteStatusMsgAdding = "Adding" @@ -14270,6 +14338,18 @@ const ( IpRouteStatusMsgRemoveFailed = "RemoveFailed" ) +// IpRouteStatusMsg_Values returns all elements of the IpRouteStatusMsg enum +func IpRouteStatusMsg_Values() []string { + return []string{ + IpRouteStatusMsgAdding, + IpRouteStatusMsgAdded, + IpRouteStatusMsgRemoving, + IpRouteStatusMsgRemoved, + IpRouteStatusMsgAddFailed, + IpRouteStatusMsgRemoveFailed, + } +} + const ( // LDAPSStatusEnabling is a LDAPSStatus enum value LDAPSStatusEnabling = "Enabling" @@ -14284,11 +14364,28 @@ const ( LDAPSStatusDisabled = "Disabled" ) +// LDAPSStatus_Values returns all elements of the LDAPSStatus enum +func LDAPSStatus_Values() []string { + return []string{ + LDAPSStatusEnabling, + LDAPSStatusEnabled, + LDAPSStatusEnableFailed, + LDAPSStatusDisabled, + } +} + const ( // LDAPSTypeClient is a LDAPSType enum value LDAPSTypeClient = "Client" ) +// LDAPSType_Values returns all elements of the LDAPSType enum +func LDAPSType_Values() []string { + return []string{ + LDAPSTypeClient, + } +} + const ( // RadiusAuthenticationProtocolPap is a RadiusAuthenticationProtocol enum value RadiusAuthenticationProtocolPap = "PAP" @@ -14303,6 +14400,16 @@ const ( RadiusAuthenticationProtocolMsChapv2 = "MS-CHAPv2" ) +// RadiusAuthenticationProtocol_Values returns all elements of the RadiusAuthenticationProtocol enum +func RadiusAuthenticationProtocol_Values() []string { + return []string{ + RadiusAuthenticationProtocolPap, + RadiusAuthenticationProtocolChap, + RadiusAuthenticationProtocolMsChapv1, + RadiusAuthenticationProtocolMsChapv2, + } +} + const ( // RadiusStatusCreating is a RadiusStatus enum value RadiusStatusCreating = "Creating" @@ -14314,11 +14421,27 @@ const ( RadiusStatusFailed = "Failed" ) +// RadiusStatus_Values returns all elements of the RadiusStatus enum +func RadiusStatus_Values() []string { + return []string{ + RadiusStatusCreating, + RadiusStatusCompleted, + RadiusStatusFailed, + } +} + const ( // ReplicationScopeDomain is a ReplicationScope enum value ReplicationScopeDomain = "Domain" ) +// ReplicationScope_Values returns all elements of the ReplicationScope enum +func ReplicationScope_Values() []string { + return []string{ + ReplicationScopeDomain, + } +} + const ( // SchemaExtensionStatusInitializing is a SchemaExtensionStatus enum value SchemaExtensionStatusInitializing = "Initializing" @@ -14348,6 +14471,21 @@ const ( SchemaExtensionStatusCompleted = "Completed" ) +// SchemaExtensionStatus_Values returns all elements of the SchemaExtensionStatus enum +func SchemaExtensionStatus_Values() []string { + return []string{ + SchemaExtensionStatusInitializing, + SchemaExtensionStatusCreatingSnapshot, + SchemaExtensionStatusUpdatingSchema, + SchemaExtensionStatusReplicating, + SchemaExtensionStatusCancelInProgress, + SchemaExtensionStatusRollbackInProgress, + SchemaExtensionStatusCancelled, + SchemaExtensionStatusFailed, + SchemaExtensionStatusCompleted, + } +} + const ( // SelectiveAuthEnabled is a SelectiveAuth enum value SelectiveAuthEnabled = "Enabled" @@ -14356,6 +14494,14 @@ const ( SelectiveAuthDisabled = "Disabled" ) +// SelectiveAuth_Values returns all elements of the SelectiveAuth enum +func SelectiveAuth_Values() []string { + return []string{ + SelectiveAuthEnabled, + SelectiveAuthDisabled, + } +} + const ( // ShareMethodOrganizations is a ShareMethod enum value ShareMethodOrganizations = "ORGANIZATIONS" @@ -14364,6 +14510,14 @@ const ( ShareMethodHandshake = "HANDSHAKE" ) +// ShareMethod_Values returns all elements of the ShareMethod enum +func ShareMethod_Values() []string { + return []string{ + ShareMethodOrganizations, + ShareMethodHandshake, + } +} + const ( // ShareStatusShared is a ShareStatus enum value ShareStatusShared = "Shared" @@ -14393,6 +14547,21 @@ const ( ShareStatusDeleting = "Deleting" ) +// ShareStatus_Values returns all elements of the ShareStatus enum +func ShareStatus_Values() []string { + return []string{ + ShareStatusShared, + ShareStatusPendingAcceptance, + ShareStatusRejected, + ShareStatusRejecting, + ShareStatusRejectFailed, + ShareStatusSharing, + ShareStatusShareFailed, + ShareStatusDeleted, + ShareStatusDeleting, + } +} + const ( // SnapshotStatusCreating is a SnapshotStatus enum value SnapshotStatusCreating = "Creating" @@ -14404,6 +14573,15 @@ const ( SnapshotStatusFailed = "Failed" ) +// SnapshotStatus_Values returns all elements of the SnapshotStatus enum +func SnapshotStatus_Values() []string { + return []string{ + SnapshotStatusCreating, + SnapshotStatusCompleted, + SnapshotStatusFailed, + } +} + const ( // SnapshotTypeAuto is a SnapshotType enum value SnapshotTypeAuto = "Auto" @@ -14412,11 +14590,26 @@ const ( SnapshotTypeManual = "Manual" ) +// SnapshotType_Values returns all elements of the SnapshotType enum +func SnapshotType_Values() []string { + return []string{ + SnapshotTypeAuto, + SnapshotTypeManual, + } +} + const ( // TargetTypeAccount is a TargetType enum value TargetTypeAccount = "ACCOUNT" ) +// TargetType_Values returns all elements of the TargetType enum +func TargetType_Values() []string { + return []string{ + TargetTypeAccount, + } +} + const ( // TopicStatusRegistered is a TopicStatus enum value TopicStatusRegistered = "Registered" @@ -14431,6 +14624,16 @@ const ( TopicStatusDeleted = "Deleted" ) +// TopicStatus_Values returns all elements of the TopicStatus enum +func TopicStatus_Values() []string { + return []string{ + TopicStatusRegistered, + TopicStatusTopicnotfound, + TopicStatusFailed, + TopicStatusDeleted, + } +} + const ( // TrustDirectionOneWayOutgoing is a TrustDirection enum value TrustDirectionOneWayOutgoing = "One-Way: Outgoing" @@ -14442,6 +14645,15 @@ const ( TrustDirectionTwoWay = "Two-Way" ) +// TrustDirection_Values returns all elements of the TrustDirection enum +func TrustDirection_Values() []string { + return []string{ + TrustDirectionOneWayOutgoing, + TrustDirectionOneWayIncoming, + TrustDirectionTwoWay, + } +} + const ( // TrustStateCreating is a TrustState enum value TrustStateCreating = "Creating" @@ -14477,6 +14689,23 @@ const ( TrustStateFailed = "Failed" ) +// TrustState_Values returns all elements of the TrustState enum +func TrustState_Values() []string { + return []string{ + TrustStateCreating, + TrustStateCreated, + TrustStateVerifying, + TrustStateVerifyFailed, + TrustStateVerified, + TrustStateUpdating, + TrustStateUpdateFailed, + TrustStateUpdated, + TrustStateDeleting, + TrustStateDeleted, + TrustStateFailed, + } +} + const ( // TrustTypeForest is a TrustType enum value TrustTypeForest = "Forest" @@ -14484,3 +14713,11 @@ const ( // TrustTypeExternal is a TrustType enum value TrustTypeExternal = "External" ) + +// TrustType_Values returns all elements of the TrustType enum +func TrustType_Values() []string { + return []string{ + TrustTypeForest, + TrustTypeExternal, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go index 68215df56..9d44741af 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go b/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go index 6182bf173..9fd8d19e6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dlm/api.go @@ -830,23 +830,29 @@ func (s *CreateLifecyclePolicyOutput) SetPolicyId(v string) *CreateLifecyclePoli } // Specifies when to create snapshots of EBS volumes. +// +// You must specify either a Cron expression or an interval, interval unit, +// and start time. You cannot specify both. type CreateRule struct { _ struct{} `type:"structure"` - // The interval between snapshots. The supported values are 2, 3, 4, 6, 8, 12, - // and 24. - // - // Interval is a required field - Interval *int64 `min:"1" type:"integer" required:"true"` + // The schedule, as a Cron expression. The schedule interval must be between + // 1 hour and 1 year. For more information, see Cron expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) + // in the Amazon CloudWatch User Guide. + CronExpression *string `min:"17" type:"string"` + + // The interval between snapshots. The supported values are 1, 2, 3, 4, 6, 8, + // 12, and 24. + Interval *int64 `min:"1" type:"integer"` // The interval unit. - // - // IntervalUnit is a required field - IntervalUnit *string `type:"string" required:"true" enum:"IntervalUnitValues"` + IntervalUnit *string `type:"string" enum:"IntervalUnitValues"` // The time, in UTC, to start the operation. The supported format is hh:mm. // // The operation occurs within a one-hour window following the specified time. + // If you do not specify a time, Amazon DLM selects a time within the next 24 + // hours. Times []*string `type:"list"` } @@ -863,15 +869,12 @@ func (s CreateRule) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateRule) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateRule"} - if s.Interval == nil { - invalidParams.Add(request.NewErrParamRequired("Interval")) + if s.CronExpression != nil && len(*s.CronExpression) < 17 { + invalidParams.Add(request.NewErrParamMinLen("CronExpression", 17)) } if s.Interval != nil && *s.Interval < 1 { invalidParams.Add(request.NewErrParamMinValue("Interval", 1)) } - if s.IntervalUnit == nil { - invalidParams.Add(request.NewErrParamRequired("IntervalUnit")) - } if invalidParams.Len() > 0 { return invalidParams @@ -879,6 +882,12 @@ func (s *CreateRule) Validate() error { return nil } +// SetCronExpression sets the CronExpression field's value. +func (s *CreateRule) SetCronExpression(v string) *CreateRule { + s.CronExpression = &v + return s +} + // SetInterval sets the Interval field's value. func (s *CreateRule) SetInterval(v int64) *CreateRule { s.Interval = &v @@ -1337,8 +1346,8 @@ func (s *GetLifecyclePolicyOutput) SetPolicy(v *LifecyclePolicy) *GetLifecyclePo // The service failed in an unexpected way. type InternalServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -1357,17 +1366,17 @@ func (s InternalServerException) GoString() string { func newErrorInternalServerException(v protocol.ResponseMetadata) error { return &InternalServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerException) Code() string { +func (s *InternalServerException) Code() string { return "InternalServerException" } // Message returns the exception's message. -func (s InternalServerException) Message() string { +func (s *InternalServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1375,28 +1384,28 @@ func (s InternalServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerException) OrigErr() error { +func (s *InternalServerException) OrigErr() error { return nil } -func (s InternalServerException) Error() string { +func (s *InternalServerException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } // Bad request. The request is missing required parameters or has invalid parameters. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -1421,17 +1430,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1439,22 +1448,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Detailed information about a lifecycle policy. @@ -1616,8 +1625,8 @@ func (s *LifecyclePolicySummary) SetTags(v map[string]*string) *LifecyclePolicyS // The request failed because a limit was exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -1639,17 +1648,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1657,22 +1666,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListTagsForResourceInput struct { @@ -1777,10 +1786,12 @@ type PolicyDetails struct { // is EBS_SNAPSHOT_MANAGEMENT. PolicyType *string `type:"string" enum:"PolicyTypeValues"` - // The resource type. + // The resource type. Use VOLUME to create snapshots of individual volumes or + // use INSTANCE to create multi-volume snapshots from the volumes for an instance. ResourceTypes []*string `min:"1" type:"list"` - // The schedule of policy-defined actions. + // The schedules of policy-defined actions. A policy can have up to four schedules + // - one mandatory schedule and up to three optional schedules. Schedules []*Schedule `min:"1" type:"list"` // The single tag that identifies targeted resources for this policy. @@ -1868,8 +1879,8 @@ func (s *PolicyDetails) SetTargetTags(v []*Tag) *PolicyDetails { // A requested resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -1894,17 +1905,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1912,22 +1923,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the retention rule for a lifecycle policy. You can retain snapshots @@ -2443,16 +2454,39 @@ const ( GettablePolicyStateValuesError = "ERROR" ) +// GettablePolicyStateValues_Values returns all elements of the GettablePolicyStateValues enum +func GettablePolicyStateValues_Values() []string { + return []string{ + GettablePolicyStateValuesEnabled, + GettablePolicyStateValuesDisabled, + GettablePolicyStateValuesError, + } +} + const ( // IntervalUnitValuesHours is a IntervalUnitValues enum value IntervalUnitValuesHours = "HOURS" ) +// IntervalUnitValues_Values returns all elements of the IntervalUnitValues enum +func IntervalUnitValues_Values() []string { + return []string{ + IntervalUnitValuesHours, + } +} + const ( // PolicyTypeValuesEbsSnapshotManagement is a PolicyTypeValues enum value PolicyTypeValuesEbsSnapshotManagement = "EBS_SNAPSHOT_MANAGEMENT" ) +// PolicyTypeValues_Values returns all elements of the PolicyTypeValues enum +func PolicyTypeValues_Values() []string { + return []string{ + PolicyTypeValuesEbsSnapshotManagement, + } +} + const ( // ResourceTypeValuesVolume is a ResourceTypeValues enum value ResourceTypeValuesVolume = "VOLUME" @@ -2461,6 +2495,14 @@ const ( ResourceTypeValuesInstance = "INSTANCE" ) +// ResourceTypeValues_Values returns all elements of the ResourceTypeValues enum +func ResourceTypeValues_Values() []string { + return []string{ + ResourceTypeValuesVolume, + ResourceTypeValuesInstance, + } +} + const ( // RetentionIntervalUnitValuesDays is a RetentionIntervalUnitValues enum value RetentionIntervalUnitValuesDays = "DAYS" @@ -2475,6 +2517,16 @@ const ( RetentionIntervalUnitValuesYears = "YEARS" ) +// RetentionIntervalUnitValues_Values returns all elements of the RetentionIntervalUnitValues enum +func RetentionIntervalUnitValues_Values() []string { + return []string{ + RetentionIntervalUnitValuesDays, + RetentionIntervalUnitValuesWeeks, + RetentionIntervalUnitValuesMonths, + RetentionIntervalUnitValuesYears, + } +} + const ( // SettablePolicyStateValuesEnabled is a SettablePolicyStateValues enum value SettablePolicyStateValuesEnabled = "ENABLED" @@ -2482,3 +2534,11 @@ const ( // SettablePolicyStateValuesDisabled is a SettablePolicyStateValues enum value SettablePolicyStateValuesDisabled = "DISABLED" ) + +// SettablePolicyStateValues_Values returns all elements of the SettablePolicyStateValues enum +func SettablePolicyStateValues_Values() []string { + return []string{ + SettablePolicyStateValuesEnabled, + SettablePolicyStateValuesDisabled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go b/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go index b7827db77..31c39ef37 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go b/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go index c95a1db72..cb5afe2a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/docdb/api.go @@ -146,8 +146,8 @@ func (c *DocDB) ApplyPendingMaintenanceActionRequest(input *ApplyPendingMaintena // ApplyPendingMaintenanceAction API operation for Amazon DocumentDB with MongoDB compatibility. // -// Applies a pending maintenance action to a resource (for example, to a DB -// instance). +// Applies a pending maintenance action to a resource (for example, to an Amazon +// DocumentDB instance). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -320,10 +320,12 @@ func (c *DocDB) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) // Copies a snapshot of a cluster. // // To copy a cluster snapshot from a shared manual cluster snapshot, SourceDBClusterSnapshotIdentifier -// must be the Amazon Resource Name (ARN) of the shared cluster snapshot. +// must be the Amazon Resource Name (ARN) of the shared cluster snapshot. You +// can only copy a shared DB cluster snapshot, whether encrypted or not, in +// the same AWS Region. // // To cancel the copy operation after it is in progress, delete the target cluster -// snapshot identified by TargetDBClusterSnapshotIdentifier while that DB cluster +// snapshot identified by TargetDBClusterSnapshotIdentifier while that cluster // snapshot is in the copying status. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -550,24 +552,19 @@ func (c *DocDB) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParam // Creates a new cluster parameter group. // // Parameters in a cluster parameter group apply to all of the instances in -// a DB cluster. +// a cluster. // // A cluster parameter group is initially created with the default parameters -// for the database engine used by instances in the cluster. To provide custom -// values for any of the parameters, you must modify the group after you create -// it. After you create a DB cluster parameter group, you must associate it -// with your cluster. For the new DB cluster parameter group and associated -// settings to take effect, you must then reboot the instances in the cluster -// without failover. -// -// After you create a cluster parameter group, you should wait at least 5 minutes -// before creating your first cluster that uses that cluster parameter group -// as the default parameter group. This allows Amazon DocumentDB to fully complete -// the create action before the cluster parameter group is used as the default -// for a new cluster. This step is especially important for parameters that -// are critical when creating the default database for a cluster, such as the -// character set for the default database defined by the character_set_database -// parameter. +// for the database engine used by instances in the cluster. In Amazon DocumentDB, +// you cannot make modifications directly to the default.docdb3.6 cluster parameter +// group. If your Amazon DocumentDB cluster is using the default cluster parameter +// group and you want to modify a value in it, you must first create a new parameter +// group (https://docs.aws.amazon.com/documentdb/latest/developerguide/cluster_parameter_group-create.html) +// or copy an existing parameter group (https://docs.aws.amazon.com/documentdb/latest/developerguide/cluster_parameter_group-copy.html), +// modify it, and then apply the modified parameter group to your cluster. For +// the new cluster parameter group and associated settings to take effect, you +// must then reboot the instances in the cluster without failover. For more +// information, see Modifying Amazon DocumentDB Cluster Parameter Groups (https://docs.aws.amazon.com/documentdb/latest/developerguide/cluster_parameter_group-modify.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1395,6 +1392,12 @@ func (c *DocDB) DescribeCertificatesRequest(input *DescribeCertificatesInput) (r Name: opDescribeCertificates, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -1444,6 +1447,58 @@ func (c *DocDB) DescribeCertificatesWithContext(ctx aws.Context, input *Describe return out, req.Send() } +// DescribeCertificatesPages iterates over the pages of a DescribeCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCertificates operation. +// pageNum := 0 +// err := client.DescribeCertificatesPages(params, +// func(page *docdb.DescribeCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DocDB) DescribeCertificatesPages(input *DescribeCertificatesInput, fn func(*DescribeCertificatesOutput, bool) bool) error { + return c.DescribeCertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCertificatesPagesWithContext same as DescribeCertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) DescribeCertificatesPagesWithContext(ctx aws.Context, input *DescribeCertificatesInput, fn func(*DescribeCertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCertificatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" // DescribeDBClusterParameterGroupsRequest generates a "aws/request.Request" representing the @@ -1475,6 +1530,12 @@ func (c *DocDB) DescribeDBClusterParameterGroupsRequest(input *DescribeDBCluster Name: opDescribeDBClusterParameterGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -1525,6 +1586,58 @@ func (c *DocDB) DescribeDBClusterParameterGroupsWithContext(ctx aws.Context, inp return out, req.Send() } +// DescribeDBClusterParameterGroupsPages iterates over the pages of a DescribeDBClusterParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterParameterGroups operation. +// pageNum := 0 +// err := client.DescribeDBClusterParameterGroupsPages(params, +// func(page *docdb.DescribeDBClusterParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DocDB) DescribeDBClusterParameterGroupsPages(input *DescribeDBClusterParameterGroupsInput, fn func(*DescribeDBClusterParameterGroupsOutput, bool) bool) error { + return c.DescribeDBClusterParameterGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterParameterGroupsPagesWithContext same as DescribeDBClusterParameterGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) DescribeDBClusterParameterGroupsPagesWithContext(ctx aws.Context, input *DescribeDBClusterParameterGroupsInput, fn func(*DescribeDBClusterParameterGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterParameterGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterParameterGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterParameterGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterParameters = "DescribeDBClusterParameters" // DescribeDBClusterParametersRequest generates a "aws/request.Request" representing the @@ -1556,6 +1669,12 @@ func (c *DocDB) DescribeDBClusterParametersRequest(input *DescribeDBClusterParam Name: opDescribeDBClusterParameters, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -1604,6 +1723,58 @@ func (c *DocDB) DescribeDBClusterParametersWithContext(ctx aws.Context, input *D return out, req.Send() } +// DescribeDBClusterParametersPages iterates over the pages of a DescribeDBClusterParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterParameters operation. +// pageNum := 0 +// err := client.DescribeDBClusterParametersPages(params, +// func(page *docdb.DescribeDBClusterParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DocDB) DescribeDBClusterParametersPages(input *DescribeDBClusterParametersInput, fn func(*DescribeDBClusterParametersOutput, bool) bool) error { + return c.DescribeDBClusterParametersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterParametersPagesWithContext same as DescribeDBClusterParametersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) DescribeDBClusterParametersPagesWithContext(ctx aws.Context, input *DescribeDBClusterParametersInput, fn func(*DescribeDBClusterParametersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterParametersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterParametersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterParametersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterSnapshotAttributes = "DescribeDBClusterSnapshotAttributes" // DescribeDBClusterSnapshotAttributesRequest generates a "aws/request.Request" representing the @@ -1721,6 +1892,12 @@ func (c *DocDB) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapsh Name: opDescribeDBClusterSnapshots, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -1770,6 +1947,58 @@ func (c *DocDB) DescribeDBClusterSnapshotsWithContext(ctx aws.Context, input *De return out, req.Send() } +// DescribeDBClusterSnapshotsPages iterates over the pages of a DescribeDBClusterSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterSnapshots operation. +// pageNum := 0 +// err := client.DescribeDBClusterSnapshotsPages(params, +// func(page *docdb.DescribeDBClusterSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DocDB) DescribeDBClusterSnapshotsPages(input *DescribeDBClusterSnapshotsInput, fn func(*DescribeDBClusterSnapshotsOutput, bool) bool) error { + return c.DescribeDBClusterSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterSnapshotsPagesWithContext same as DescribeDBClusterSnapshotsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) DescribeDBClusterSnapshotsPagesWithContext(ctx aws.Context, input *DescribeDBClusterSnapshotsInput, fn func(*DescribeDBClusterSnapshotsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterSnapshotsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusters = "DescribeDBClusters" // DescribeDBClustersRequest generates a "aws/request.Request" representing the @@ -2767,6 +2996,12 @@ func (c *DocDB) DescribePendingMaintenanceActionsRequest(input *DescribePendingM Name: opDescribePendingMaintenanceActions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -2816,6 +3051,58 @@ func (c *DocDB) DescribePendingMaintenanceActionsWithContext(ctx aws.Context, in return out, req.Send() } +// DescribePendingMaintenanceActionsPages iterates over the pages of a DescribePendingMaintenanceActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePendingMaintenanceActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePendingMaintenanceActions operation. +// pageNum := 0 +// err := client.DescribePendingMaintenanceActionsPages(params, +// func(page *docdb.DescribePendingMaintenanceActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DocDB) DescribePendingMaintenanceActionsPages(input *DescribePendingMaintenanceActionsInput, fn func(*DescribePendingMaintenanceActionsOutput, bool) bool) error { + return c.DescribePendingMaintenanceActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePendingMaintenanceActionsPagesWithContext same as DescribePendingMaintenanceActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DocDB) DescribePendingMaintenanceActionsPagesWithContext(ctx aws.Context, input *DescribePendingMaintenanceActionsInput, fn func(*DescribePendingMaintenanceActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePendingMaintenanceActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePendingMaintenanceActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePendingMaintenanceActionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opFailoverDBCluster = "FailoverDBCluster" // FailoverDBClusterRequest generates a "aws/request.Request" representing the @@ -4239,7 +4526,7 @@ type AddTagsToResourceInput struct { _ struct{} `type:"structure"` // The Amazon DocumentDB resource that the tags are added to. This value is - // an Amazon Resource Name (ARN). + // an Amazon Resource Name . // // ResourceName is a required field ResourceName *string `type:"string" required:"true"` @@ -4562,7 +4849,7 @@ type CopyDBClusterParameterGroupInput struct { // or a valid ARN. // // * If the source parameter group is in a different AWS Region than the - // copy, specify a valid cluster parameter group ARN; for example, arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1. + // copy, specify a valid cluster parameter group ARN; for example, arn:aws:rds:us-east-1:123456789012:sample-cluster:sample-parameter-group. // // SourceDBClusterParameterGroupIdentifier is a required field SourceDBClusterParameterGroupIdentifier *string `type:"string" required:"true"` @@ -4693,7 +4980,7 @@ type CopyDBClusterSnapshotInput struct { // to the AWS KMS key ID that you want to use to encrypt the copy of the cluster // snapshot in the destination Region. AWS KMS encryption keys are specific // to the AWS Region that they are created in, and you can't use encryption - // keys from one Region in another Region. + // keys from one AWS Region in another AWS Region. // // If you copy an unencrypted cluster snapshot and specify a value for the KmsKeyId // parameter, an error is returned. @@ -4701,40 +4988,39 @@ type CopyDBClusterSnapshotInput struct { // The URL that contains a Signature Version 4 signed request for the CopyDBClusterSnapshot // API action in the AWS Region that contains the source cluster snapshot to - // copy. You must use the PreSignedUrl parameter when copying an encrypted cluster - // snapshot from another AWS Region. - // - // The presigned URL must be a valid request for the CopyDBSClusterSnapshot - // API action that can be executed in the source AWS Region that contains the - // encrypted DB cluster snapshot to be copied. The presigned URL request must - // contain the following parameter values: - // - // * KmsKeyId - The AWS KMS key identifier for the key to use to encrypt - // the copy of the cluster snapshot in the destination AWS Region. This is - // the same identifier for both the CopyDBClusterSnapshot action that is - // called in the destination AWS Region, and the action contained in the - // presigned URL. - // - // * DestinationRegion - The name of the AWS Region that the DB cluster snapshot - // will be created in. - // - // * SourceDBClusterSnapshotIdentifier - The cluster snapshot identifier - // for the encrypted cluster snapshot to be copied. This identifier must - // be in the Amazon Resource Name (ARN) format for the source AWS Region. - // For example, if you are copying an encrypted cluster snapshot from the - // us-west-2 AWS Region, then your SourceDBClusterSnapshotIdentifier looks - // like the following example: arn:aws:rds:us-west-2:123456789012:cluster-snapshot:my-cluster-snapshot-20161115. + // copy. You must use the PreSignedUrl parameter when copying a cluster snapshot + // from another AWS Region. + // + // If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion + // (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. + // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request + // for the operation that can be executed in the source AWS Region. + // + // The presigned URL must be a valid request for the CopyDBClusterSnapshot API + // action that can be executed in the source AWS Region that contains the cluster + // snapshot to be copied. The presigned URL request must contain the following + // parameter values: + // + // * SourceRegion - The ID of the region that contains the snapshot to be + // copied. + // + // * SourceDBClusterSnapshotIdentifier - The identifier for the the encrypted + // cluster snapshot to be copied. This identifier must be in the Amazon Resource + // Name (ARN) format for the source AWS Region. For example, if you are copying + // an encrypted cluster snapshot from the us-east-1 AWS Region, then your + // SourceDBClusterSnapshotIdentifier looks something like the following: + // arn:aws:rds:us-east-1:12345678012:sample-cluster:sample-cluster-snapshot. + // + // * TargetDBClusterSnapshotIdentifier - The identifier for the new cluster + // snapshot to be created. This parameter isn't case sensitive. PreSignedUrl *string `type:"string"` // The identifier of the cluster snapshot to copy. This parameter is not case // sensitive. // - // You can't copy an encrypted, shared cluster snapshot from one AWS Region - // to another. - // // Constraints: // - // * Must specify a valid system snapshot in the "available" state. + // * Must specify a valid system snapshot in the available state. // // * If the source snapshot is in the same AWS Region as the copy, specify // a valid snapshot identifier. @@ -4903,7 +5189,9 @@ type CreateDBClusterInput struct { DeletionProtection *bool `type:"boolean"` // A list of log types that need to be enabled for exporting to Amazon CloudWatch - // Logs. + // Logs. You can enable audit logs or profiler logs. For more information, see + // Auditing Amazon DocumentDB Events (https://docs.aws.amazon.com/documentdb/latest/developerguide/event-auditing.html) + // and Profiling Amazon DocumentDB Operations (https://docs.aws.amazon.com/documentdb/latest/developerguide/profiling.html). EnableCloudwatchLogsExports []*string `type:"list"` // The name of the database engine to be used for this cluster. @@ -4913,7 +5201,10 @@ type CreateDBClusterInput struct { // Engine is a required field Engine *string `type:"string" required:"true"` - // The version number of the database engine to use. + // The version number of the database engine to use. The --engine-version will + // default to the latest major engine version. For production workloads, we + // recommend explicitly declaring this parameter with the intended major engine + // version. EngineVersion *string `type:"string"` // The AWS KMS key identifier for an encrypted cluster. @@ -4966,6 +5257,9 @@ type CreateDBClusterInput struct { // The port number on which the instances in the cluster accept connections. Port *int64 `type:"integer"` + // Not currently supported. + PreSignedUrl *string `type:"string"` + // The daily time range during which automated backups are created if automated // backups are enabled using the BackupRetentionPeriod parameter. // @@ -5116,6 +5410,12 @@ func (s *CreateDBClusterInput) SetPort(v int64) *CreateDBClusterInput { return s } +// SetPreSignedUrl sets the PreSignedUrl field's value. +func (s *CreateDBClusterInput) SetPreSignedUrl(v string) *CreateDBClusterInput { + s.PreSignedUrl = &v + return s +} + // SetPreferredBackupWindow sets the PreferredBackupWindow field's value. func (s *CreateDBClusterInput) SetPreferredBackupWindow(v string) *CreateDBClusterInput { s.PreferredBackupWindow = &v @@ -5393,10 +5693,6 @@ type CreateDBInstanceInput struct { // Region. // // Example: us-east-1d - // - // Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ - // parameter is set to true. The specified Availability Zone must be in the - // same AWS Region as the current endpoint. AvailabilityZone *string `type:"string"` // The identifier of the cluster that the instance will belong to. @@ -7991,7 +8287,7 @@ type DescribeDBEngineVersionsInput struct { // The database engine version to return. // - // Example: 5.1.49 + // Example: 3.6.0 EngineVersion *string `type:"string"` // This parameter is not currently supported. @@ -11628,6 +11924,14 @@ const ( ApplyMethodPendingReboot = "pending-reboot" ) +// ApplyMethod_Values returns all elements of the ApplyMethod enum +func ApplyMethod_Values() []string { + return []string{ + ApplyMethodImmediate, + ApplyMethodPendingReboot, + } +} + const ( // SourceTypeDbInstance is a SourceType enum value SourceTypeDbInstance = "db-instance" @@ -11647,3 +11951,15 @@ const ( // SourceTypeDbClusterSnapshot is a SourceType enum value SourceTypeDbClusterSnapshot = "db-cluster-snapshot" ) + +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeDbInstance, + SourceTypeDbParameterGroup, + SourceTypeDbSecurityGroup, + SourceTypeDbSnapshot, + SourceTypeDbCluster, + SourceTypeDbClusterSnapshot, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go b/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go index e75c9cdd4..acd5ea475 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index 995fa6d51..ebfb1dcb4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -61,9 +61,9 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R output = &BatchGetItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -162,9 +162,9 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // might not be specified correctly, or its status might not be ACTIVE. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -282,9 +282,9 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque output = &BatchWriteItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -408,9 +408,9 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // that have one or more local secondary indexes. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -476,9 +476,9 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R output = &CreateBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -570,7 +570,7 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -636,9 +636,9 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req output = &CreateGlobalTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -670,7 +670,7 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // relationship between two or more DynamoDB tables with the same table name // in the provided Regions. // -// This method only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) // of global tables. // // If you want to add a new replica table to a global table, each of the following @@ -693,6 +693,14 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // * The global secondary indexes must have the same hash key and sort key // (if present). // +// If local secondary indexes are specified, then the following conditions must +// also be met: +// +// * The local secondary indexes must have the same name. +// +// * The local secondary indexes must have the same hash key and sort key +// (if present). +// // Write capacity settings should be set consistently across your replica tables // and secondary indexes. DynamoDB strongly recommends enabling auto scaling // to manage the write capacity settings for all of your global tables replicas @@ -723,7 +731,7 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -796,9 +804,9 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req output = &CreateTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -867,7 +875,7 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -933,9 +941,9 @@ func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.R output = &DeleteBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -994,7 +1002,7 @@ func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.R // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -1060,9 +1068,9 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque output = &DeleteItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1136,9 +1144,9 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque // Operation was rejected because there is an ongoing transaction for the item. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -1204,9 +1212,9 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req output = &DeleteTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1282,7 +1290,7 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -1348,9 +1356,9 @@ func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *reque output = &DescribeBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1457,9 +1465,9 @@ func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBac output = &DescribeContinuousBackupsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1807,9 +1815,9 @@ func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) ( output = &DescribeGlobalTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1839,8 +1847,10 @@ func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) ( // // Returns information about the specified global table. // -// This method only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. If you are using global tables Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// you can use DescribeTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_DescribeTable.html) +// instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1917,9 +1927,9 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable output = &DescribeGlobalTableSettingsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -1949,7 +1959,7 @@ func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTable // // Describes Region-specific settings for a global table. // -// This method only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) // of global tables. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2027,9 +2037,9 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque output = &DescribeLimitsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2057,27 +2067,27 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // DescribeLimits API operation for Amazon DynamoDB. // -// Returns the current provisioned-capacity limits for your AWS account in a +// Returns the current provisioned-capacity quotas for your AWS account in a // Region, both for the Region as a whole and for any one DynamoDB table that // you create there. // -// When you establish an AWS account, the account has initial limits on the +// When you establish an AWS account, the account has initial quotas on the // maximum read capacity units and write capacity units that you can provision // across all of your DynamoDB tables in a given Region. Also, there are per-table -// limits that apply when you create a table there. For more information, see -// Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// quotas that apply when you create a table there. For more information, see +// Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // page in the Amazon DynamoDB Developer Guide. // -// Although you can increase these limits by filing a case at AWS Support Center +// Although you can increase these quotas by filing a case at AWS Support Center // (https://console.aws.amazon.com/support/home#/), obtaining the increase is // not instantaneous. The DescribeLimits action lets you write code to compare -// the capacity you are currently using to those limits imposed by your account -// so that you have enough time to apply for an increase before you hit a limit. +// the capacity you are currently using to those quotas imposed by your account +// so that you have enough time to apply for an increase before you hit a quota. // // For example, you could use one of the AWS SDKs to do the following: // // Call DescribeLimits for a particular Region to obtain your current account -// limits on provisioned capacity there. +// quotas on provisioned capacity there. // // Create a variable to hold the aggregate read capacity units provisioned for // all your tables in that Region, and one to hold the aggregate write capacity @@ -2096,20 +2106,20 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // these GSIs and add their provisioned capacity values to your variables // as well. // -// Report the account limits for that Region returned by DescribeLimits, along +// Report the account quotas for that Region returned by DescribeLimits, along // with the total current provisioned capacity levels you have calculated. // // This will let you see whether you are getting close to your account-level -// limits. +// quotas. // -// The per-table limits apply only when you are creating a new table. They restrict +// The per-table quotas apply only when you are creating a new table. They restrict // the sum of the provisioned capacity of the new table itself and all its global // secondary indexes. // // For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned -// capacity extremely rapidly. But the only upper limit that applies is that -// the aggregate provisioned capacity over all your tables and GSIs cannot exceed -// either of the per-account limits. +// capacity extremely rapidly, but the only quota that applies is that the aggregate +// provisioned capacity over all your tables and GSIs cannot exceed either of +// the per-account quotas. // // DescribeLimits should only be called periodically. You can expect throttling // errors if you call it more than once in a minute. @@ -2188,9 +2198,9 @@ func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request output = &DescribeTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2311,7 +2321,7 @@ func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableRe // // Describes auto scaling settings across replicas of the global table at once. // -// This method only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) // of global tables. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2390,9 +2400,9 @@ func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (re output = &DescribeTimeToLiveOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2498,9 +2508,9 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou output = &GetItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2558,9 +2568,9 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou // might not be specified correctly, or its status might not be ACTIVE. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -2626,9 +2636,9 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req output = &ListBackupsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2658,11 +2668,11 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req // // List backups associated with an AWS account. To list backups for a given // table, specify TableName. ListBackups returns a paginated list of results -// with at most 1 MB worth of items in a page. You can also specify a limit -// for the maximum number of entries to be returned in a page. +// with at most 1 MB worth of items in a page. You can also specify a maximum +// number of entries to be returned in a page. // // In the request, start time is inclusive, but end time is exclusive. Note -// that these limits are for the time at which the original backup was requested. +// that these boundaries are for the time at which the original backup was requested. // // You can call ListBackups a maximum of five times per second. // @@ -2880,9 +2890,9 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r output = &ListGlobalTablesOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -2912,7 +2922,7 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r // // Lists all global tables that have a replica in the specified Region. // -// This method only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) // of global tables. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2993,9 +3003,9 @@ func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Reque output = &ListTablesOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3151,9 +3161,9 @@ func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (re output = &ListTagsOfResourceOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3263,9 +3273,9 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou output = &PutItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3325,9 +3335,15 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) // // When you add an item, the primary key attributes are the only required attributes. -// Attribute values cannot be null. String and Binary type attributes must have -// lengths greater than zero. Set type attributes cannot be empty. Requests -// with empty values will be rejected with a ValidationException exception. +// Attribute values cannot be null. +// +// Empty String and Binary attribute values are allowed. Attribute values of +// type String and Binary must have a length greater than zero if the attribute +// is used as a key attribute for a table or index. Set type attributes cannot +// be empty. +// +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. // // To prevent a new item from replacing an existing item, use a conditional // expression that contains the attribute_not_exists function with the name @@ -3369,9 +3385,9 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou // Operation was rejected because there is an ongoing transaction for the item. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -3443,9 +3459,9 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output output = &QueryOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3543,9 +3559,9 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // might not be specified correctly, or its status might not be ACTIVE. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -3663,9 +3679,9 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn output = &RestoreTableFromBackupOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3745,7 +3761,7 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -3811,9 +3827,9 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn output = &RestoreTableToPointInTimeOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -3908,7 +3924,7 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InvalidRestoreTimeException // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime @@ -3987,9 +4003,9 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * output = &ScanOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4067,9 +4083,9 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // might not be specified correctly, or its status might not be ACTIVE. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -4188,9 +4204,9 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req output = &TagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4246,7 +4262,7 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * ResourceNotFoundException // The operation tried to access a nonexistent table or index. The resource @@ -4321,9 +4337,9 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r output = &TransactGetItemsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4480,9 +4496,9 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r // in the Amazon DynamoDB Developer Guide. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -4548,9 +4564,9 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re output = &TransactWriteItemsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4748,9 +4764,9 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // in the Amazon DynamoDB Developer Guide. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -4817,9 +4833,9 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request output = &UntagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -4873,7 +4889,7 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * ResourceNotFoundException // The operation tried to access a nonexistent table or index. The resource @@ -4948,9 +4964,9 @@ func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackups output = &UpdateContinuousBackupsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5152,9 +5168,9 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req output = &UpdateGlobalTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5288,9 +5304,9 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett output = &UpdateGlobalTableSettingsOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5349,7 +5365,7 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * ResourceInUseException // The operation conflicts with the resource's availability. For example, you @@ -5420,9 +5436,9 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque output = &UpdateItemOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5490,9 +5506,9 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque // Operation was rejected because there is an ongoing transaction for the item. // // * RequestLimitExceeded -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. // // * InternalServerError // An error occurred on the server side. @@ -5558,9 +5574,9 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req output = &UpdateTableOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5636,7 +5652,7 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -5709,7 +5725,7 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic // // Updates auto scaling settings on your global tables at once. // -// This method only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) // of global tables. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5741,7 +5757,7 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -5807,9 +5823,9 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r output = &UpdateTimeToLiveOutput{} req = c.newRequest(op, input, output) - // if a custom endpoint is provided for the request, - // we skip endpoint discovery workflow - if req.Config.Endpoint == nil { + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { if aws.BoolValue(req.Config.EnableEndpointDiscovery) { de := discovererDescribeEndpoints{ Required: false, @@ -5895,7 +5911,7 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. @@ -6808,8 +6824,8 @@ func (s *BackupDetails) SetBackupType(v string) *BackupDetails { // There is another ongoing conflicting backup control plane operation on the // table. The backup is either being created, deleted or restored to a table. type BackupInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6826,17 +6842,17 @@ func (s BackupInUseException) GoString() string { func newErrorBackupInUseException(v protocol.ResponseMetadata) error { return &BackupInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BackupInUseException) Code() string { +func (s *BackupInUseException) Code() string { return "BackupInUseException" } // Message returns the exception's message. -func (s BackupInUseException) Message() string { +func (s *BackupInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6844,28 +6860,28 @@ func (s BackupInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BackupInUseException) OrigErr() error { +func (s *BackupInUseException) OrigErr() error { return nil } -func (s BackupInUseException) Error() string { +func (s *BackupInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BackupInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BackupInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BackupInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *BackupInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Backup not found for the given BackupARN. type BackupNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6882,17 +6898,17 @@ func (s BackupNotFoundException) GoString() string { func newErrorBackupNotFoundException(v protocol.ResponseMetadata) error { return &BackupNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BackupNotFoundException) Code() string { +func (s *BackupNotFoundException) Code() string { return "BackupNotFoundException" } // Message returns the exception's message. -func (s BackupNotFoundException) Message() string { +func (s *BackupNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6900,22 +6916,22 @@ func (s BackupNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BackupNotFoundException) OrigErr() error { +func (s *BackupNotFoundException) OrigErr() error { return nil } -func (s BackupNotFoundException) Error() string { +func (s *BackupNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BackupNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BackupNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BackupNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *BackupNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details for the backup. @@ -7818,8 +7834,8 @@ func (s *ConditionCheck) SetTableName(v string) *ConditionCheck { // A condition specified in the operation could not be evaluated. type ConditionalCheckFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The conditional request failed. Message_ *string `locationName:"message" type:"string"` @@ -7837,17 +7853,17 @@ func (s ConditionalCheckFailedException) GoString() string { func newErrorConditionalCheckFailedException(v protocol.ResponseMetadata) error { return &ConditionalCheckFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConditionalCheckFailedException) Code() string { +func (s *ConditionalCheckFailedException) Code() string { return "ConditionalCheckFailedException" } // Message returns the exception's message. -func (s ConditionalCheckFailedException) Message() string { +func (s *ConditionalCheckFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7855,22 +7871,22 @@ func (s ConditionalCheckFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConditionalCheckFailedException) OrigErr() error { +func (s *ConditionalCheckFailedException) OrigErr() error { return nil } -func (s ConditionalCheckFailedException) Error() string { +func (s *ConditionalCheckFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConditionalCheckFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConditionalCheckFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConditionalCheckFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConditionalCheckFailedException) RequestID() string { + return s.RespMetadata.RequestID } // The capacity units consumed by an operation. The data returned includes the @@ -7994,8 +8010,8 @@ func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *Poin // Backups have not yet been enabled for this table. type ContinuousBackupsUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8012,17 +8028,17 @@ func (s ContinuousBackupsUnavailableException) GoString() string { func newErrorContinuousBackupsUnavailableException(v protocol.ResponseMetadata) error { return &ContinuousBackupsUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ContinuousBackupsUnavailableException) Code() string { +func (s *ContinuousBackupsUnavailableException) Code() string { return "ContinuousBackupsUnavailableException" } // Message returns the exception's message. -func (s ContinuousBackupsUnavailableException) Message() string { +func (s *ContinuousBackupsUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8030,22 +8046,22 @@ func (s ContinuousBackupsUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ContinuousBackupsUnavailableException) OrigErr() error { +func (s *ContinuousBackupsUnavailableException) OrigErr() error { return nil } -func (s ContinuousBackupsUnavailableException) Error() string { +func (s *ContinuousBackupsUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ContinuousBackupsUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ContinuousBackupsUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ContinuousBackupsUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ContinuousBackupsUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Represents a Contributor Insights summary entry.. @@ -8196,8 +8212,8 @@ type CreateGlobalSecondaryIndexAction struct { // Represents the provisioned throughput settings for the specified global secondary // index. // - // For current minimum and maximum provisioned throughput values, see Limits - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *ProvisionedThroughput `type:"structure"` } @@ -8599,8 +8615,8 @@ type CreateTableInput struct { // If you set BillingMode as PROVISIONED, you must specify this property. If // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. // - // For current minimum and maximum provisioned throughput values, see Limits - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *ProvisionedThroughput `type:"structure"` @@ -10804,8 +10820,8 @@ type GlobalSecondaryIndex struct { // Represents the provisioned throughput settings for the specified global secondary // index. // - // For current minimum and maximum provisioned throughput values, see Limits - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *ProvisionedThroughput `type:"structure"` } @@ -11013,8 +11029,8 @@ type GlobalSecondaryIndexDescription struct { // Represents the provisioned throughput settings for the specified global secondary // index. // - // For current minimum and maximum provisioned throughput values, see Limits - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` } @@ -11273,8 +11289,8 @@ func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable { // The specified global table already exists. type GlobalTableAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11291,17 +11307,17 @@ func (s GlobalTableAlreadyExistsException) GoString() string { func newErrorGlobalTableAlreadyExistsException(v protocol.ResponseMetadata) error { return &GlobalTableAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GlobalTableAlreadyExistsException) Code() string { +func (s *GlobalTableAlreadyExistsException) Code() string { return "GlobalTableAlreadyExistsException" } // Message returns the exception's message. -func (s GlobalTableAlreadyExistsException) Message() string { +func (s *GlobalTableAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11309,22 +11325,22 @@ func (s GlobalTableAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GlobalTableAlreadyExistsException) OrigErr() error { +func (s *GlobalTableAlreadyExistsException) OrigErr() error { return nil } -func (s GlobalTableAlreadyExistsException) Error() string { +func (s *GlobalTableAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GlobalTableAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GlobalTableAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GlobalTableAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *GlobalTableAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about the global table. @@ -11469,8 +11485,8 @@ func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapac // The specified global table does not exist. type GlobalTableNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11487,17 +11503,17 @@ func (s GlobalTableNotFoundException) GoString() string { func newErrorGlobalTableNotFoundException(v protocol.ResponseMetadata) error { return &GlobalTableNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GlobalTableNotFoundException) Code() string { +func (s *GlobalTableNotFoundException) Code() string { return "GlobalTableNotFoundException" } // Message returns the exception's message. -func (s GlobalTableNotFoundException) Message() string { +func (s *GlobalTableNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11505,29 +11521,29 @@ func (s GlobalTableNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GlobalTableNotFoundException) OrigErr() error { +func (s *GlobalTableNotFoundException) OrigErr() error { return nil } -func (s GlobalTableNotFoundException) Error() string { +func (s *GlobalTableNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GlobalTableNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GlobalTableNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GlobalTableNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *GlobalTableNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // DynamoDB rejected the request because you retried a request with a different // payload but with an idempotent token that was already used. type IdempotentParameterMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -11544,17 +11560,17 @@ func (s IdempotentParameterMismatchException) GoString() string { func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { return &IdempotentParameterMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotentParameterMismatchException) Code() string { +func (s *IdempotentParameterMismatchException) Code() string { return "IdempotentParameterMismatchException" } // Message returns the exception's message. -func (s IdempotentParameterMismatchException) Message() string { +func (s *IdempotentParameterMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11562,28 +11578,28 @@ func (s IdempotentParameterMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotentParameterMismatchException) OrigErr() error { +func (s *IdempotentParameterMismatchException) OrigErr() error { return nil } -func (s IdempotentParameterMismatchException) Error() string { +func (s *IdempotentParameterMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotentParameterMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotentParameterMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // The operation tried to access a nonexistent index. type IndexNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11600,17 +11616,17 @@ func (s IndexNotFoundException) GoString() string { func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error { return &IndexNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IndexNotFoundException) Code() string { +func (s *IndexNotFoundException) Code() string { return "IndexNotFoundException" } // Message returns the exception's message. -func (s IndexNotFoundException) Message() string { +func (s *IndexNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11618,28 +11634,28 @@ func (s IndexNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IndexNotFoundException) OrigErr() error { +func (s *IndexNotFoundException) OrigErr() error { return nil } -func (s IndexNotFoundException) Error() string { +func (s *IndexNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IndexNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IndexNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IndexNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *IndexNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An error occurred on the server side. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The server encountered an internal error trying to fulfill the request. Message_ *string `locationName:"message" type:"string"` @@ -11657,17 +11673,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11675,29 +11691,29 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime // and LatestRestorableDateTime. type InvalidRestoreTimeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11714,17 +11730,17 @@ func (s InvalidRestoreTimeException) GoString() string { func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error { return &InvalidRestoreTimeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRestoreTimeException) Code() string { +func (s *InvalidRestoreTimeException) Code() string { return "InvalidRestoreTimeException" } // Message returns the exception's message. -func (s InvalidRestoreTimeException) Message() string { +func (s *InvalidRestoreTimeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11732,22 +11748,22 @@ func (s InvalidRestoreTimeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRestoreTimeException) OrigErr() error { +func (s *InvalidRestoreTimeException) OrigErr() error { return nil } -func (s InvalidRestoreTimeException) Error() string { +func (s *InvalidRestoreTimeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRestoreTimeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRestoreTimeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRestoreTimeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRestoreTimeException) RequestID() string { + return s.RespMetadata.RequestID } // Information about item collections, if any, that were affected by the operation. @@ -11798,8 +11814,8 @@ func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollec // An item collection is too large. This exception is only returned for tables // that have one or more local secondary indexes. type ItemCollectionSizeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The total size of an item collection has exceeded the maximum limit of 10 // gigabytes. @@ -11818,17 +11834,17 @@ func (s ItemCollectionSizeLimitExceededException) GoString() string { func newErrorItemCollectionSizeLimitExceededException(v protocol.ResponseMetadata) error { return &ItemCollectionSizeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ItemCollectionSizeLimitExceededException) Code() string { +func (s *ItemCollectionSizeLimitExceededException) Code() string { return "ItemCollectionSizeLimitExceededException" } // Message returns the exception's message. -func (s ItemCollectionSizeLimitExceededException) Message() string { +func (s *ItemCollectionSizeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11836,22 +11852,22 @@ func (s ItemCollectionSizeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ItemCollectionSizeLimitExceededException) OrigErr() error { +func (s *ItemCollectionSizeLimitExceededException) OrigErr() error { return nil } -func (s ItemCollectionSizeLimitExceededException) Error() string { +func (s *ItemCollectionSizeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ItemCollectionSizeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ItemCollectionSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ItemCollectionSizeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ItemCollectionSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Details for the requested item. @@ -12102,10 +12118,10 @@ func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // -// There is a soft account limit of 256 tables. +// There is a soft account quota of 256 tables. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Too many operations for a given subscriber. Message_ *string `locationName:"message" type:"string"` @@ -12123,17 +12139,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12141,22 +12157,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListBackupsInput struct { @@ -13006,8 +13022,8 @@ func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) // Point in time recovery has not yet been enabled for this source table. type PointInTimeRecoveryUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13024,17 +13040,17 @@ func (s PointInTimeRecoveryUnavailableException) GoString() string { func newErrorPointInTimeRecoveryUnavailableException(v protocol.ResponseMetadata) error { return &PointInTimeRecoveryUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PointInTimeRecoveryUnavailableException) Code() string { +func (s *PointInTimeRecoveryUnavailableException) Code() string { return "PointInTimeRecoveryUnavailableException" } // Message returns the exception's message. -func (s PointInTimeRecoveryUnavailableException) Message() string { +func (s *PointInTimeRecoveryUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13042,22 +13058,22 @@ func (s PointInTimeRecoveryUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PointInTimeRecoveryUnavailableException) OrigErr() error { +func (s *PointInTimeRecoveryUnavailableException) OrigErr() error { return nil } -func (s PointInTimeRecoveryUnavailableException) Error() string { +func (s *PointInTimeRecoveryUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PointInTimeRecoveryUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PointInTimeRecoveryUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PointInTimeRecoveryUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *PointInTimeRecoveryUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Represents attributes that are copied (projected) from the table into an @@ -13078,8 +13094,8 @@ type Projection struct { // // * KEYS_ONLY - Only the index and primary keys are projected into the index. // - // * INCLUDE - Only the specified table attributes are projected into the - // index. The list of projected attributes is in NonKeyAttributes. + // * INCLUDE - In addition to the attributes described in KEYS_ONLY, the + // secondary index will include other non-key attributes that you specify. // // * ALL - All of the table attributes are projected into the index. ProjectionType *string `type:"string" enum:"ProjectionType"` @@ -13123,8 +13139,8 @@ func (s *Projection) SetProjectionType(v string) *Projection { // Represents the provisioned throughput settings for a specified table or index. // The settings can be modified using the UpdateTable operation. // -// For current minimum and maximum provisioned throughput values, see Limits -// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// For current minimum and maximum provisioned throughput values, see Service, +// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. type ProvisionedThroughput struct { _ struct{} `type:"structure"` @@ -13207,7 +13223,7 @@ type ProvisionedThroughputDescription struct { // The number of provisioned throughput decreases for this table during this // UTC calendar day. For current maximums on provisioned throughput decreases, - // see Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. NumberOfDecreasesToday *int64 `min:"1" type:"long"` @@ -13269,8 +13285,8 @@ func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *Provi // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. type ProvisionedThroughputExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You exceeded your maximum allowed provisioned throughput. Message_ *string `locationName:"message" type:"string"` @@ -13288,17 +13304,17 @@ func (s ProvisionedThroughputExceededException) GoString() string { func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { return &ProvisionedThroughputExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ProvisionedThroughputExceededException) Code() string { +func (s *ProvisionedThroughputExceededException) Code() string { return "ProvisionedThroughputExceededException" } // Message returns the exception's message. -func (s ProvisionedThroughputExceededException) Message() string { +func (s *ProvisionedThroughputExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13306,22 +13322,22 @@ func (s ProvisionedThroughputExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ProvisionedThroughputExceededException) OrigErr() error { +func (s *ProvisionedThroughputExceededException) OrigErr() error { return nil } -func (s ProvisionedThroughputExceededException) Error() string { +func (s *ProvisionedThroughputExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ProvisionedThroughputExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ProvisionedThroughputExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ProvisionedThroughputExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ProvisionedThroughputExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Replica-specific provisioned throughput settings. If not specified, uses @@ -13564,6 +13580,10 @@ type PutItemInput struct { // types for those attributes must match those of the schema in the table's // attribute definition. // + // Empty String and Binary attribute values are allowed. Attribute values of + // type String and Binary must have a length greater than zero if the attribute + // is used as a key attribute for a table or index. + // // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) // in the Amazon DynamoDB Developer Guide. // @@ -14360,8 +14380,8 @@ func (s *Replica) SetRegionName(v string) *Replica { // The specified replica is already part of the global table. type ReplicaAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14378,17 +14398,17 @@ func (s ReplicaAlreadyExistsException) GoString() string { func newErrorReplicaAlreadyExistsException(v protocol.ResponseMetadata) error { return &ReplicaAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReplicaAlreadyExistsException) Code() string { +func (s *ReplicaAlreadyExistsException) Code() string { return "ReplicaAlreadyExistsException" } // Message returns the exception's message. -func (s ReplicaAlreadyExistsException) Message() string { +func (s *ReplicaAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14396,22 +14416,22 @@ func (s ReplicaAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReplicaAlreadyExistsException) OrigErr() error { +func (s *ReplicaAlreadyExistsException) OrigErr() error { return nil } -func (s ReplicaAlreadyExistsException) Error() string { +func (s *ReplicaAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReplicaAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReplicaAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReplicaAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReplicaAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the auto scaling settings of the replica. @@ -14576,6 +14596,10 @@ type ReplicaDescription struct { // The name of the Region. RegionName *string `type:"string"` + // The time at which the replica was first detected as inaccessible. To determine + // cause of inaccessibility check the ReplicaStatus property. + ReplicaInaccessibleDateTime *time.Time `type:"timestamp"` + // The current state of the replica: // // * CREATING - The replica is being created. @@ -14585,6 +14609,12 @@ type ReplicaDescription struct { // * DELETING - The replica is being deleted. // // * ACTIVE - The replica is ready for use. + // + // * REGION_DISABLED - The replica is inaccessible because the AWS Region + // has been disabled. If the AWS Region remains inaccessible for more than + // 20 hours, DynamoDB will remove this replica from the replication group. + // The replica will not be deleted and replication will stop from and to + // this region. ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` // Detailed information about the replica status. @@ -14629,6 +14659,12 @@ func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription { return s } +// SetReplicaInaccessibleDateTime sets the ReplicaInaccessibleDateTime field's value. +func (s *ReplicaDescription) SetReplicaInaccessibleDateTime(v time.Time) *ReplicaDescription { + s.ReplicaInaccessibleDateTime = &v + return s +} + // SetReplicaStatus sets the ReplicaStatus field's value. func (s *ReplicaDescription) SetReplicaStatus(v string) *ReplicaDescription { s.ReplicaStatus = &v @@ -15010,8 +15046,8 @@ func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUn // The specified replica is no longer part of the global table. type ReplicaNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15028,17 +15064,17 @@ func (s ReplicaNotFoundException) GoString() string { func newErrorReplicaNotFoundException(v protocol.ResponseMetadata) error { return &ReplicaNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReplicaNotFoundException) Code() string { +func (s *ReplicaNotFoundException) Code() string { return "ReplicaNotFoundException" } // Message returns the exception's message. -func (s ReplicaNotFoundException) Message() string { +func (s *ReplicaNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15046,22 +15082,22 @@ func (s ReplicaNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReplicaNotFoundException) OrigErr() error { +func (s *ReplicaNotFoundException) OrigErr() error { return nil } -func (s ReplicaNotFoundException) Error() string { +func (s *ReplicaNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReplicaNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReplicaNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReplicaNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReplicaNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the properties of a replica. @@ -15396,12 +15432,12 @@ func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction return s } -// Throughput exceeds the current throughput limit for your account. Please +// Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a limit increase. +// a quota increase. type RequestLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15418,17 +15454,17 @@ func (s RequestLimitExceeded) GoString() string { func newErrorRequestLimitExceeded(v protocol.ResponseMetadata) error { return &RequestLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestLimitExceeded) Code() string { +func (s *RequestLimitExceeded) Code() string { return "RequestLimitExceeded" } // Message returns the exception's message. -func (s RequestLimitExceeded) Message() string { +func (s *RequestLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15436,30 +15472,30 @@ func (s RequestLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestLimitExceeded) OrigErr() error { +func (s *RequestLimitExceeded) OrigErr() error { return nil } -func (s RequestLimitExceeded) Error() string { +func (s *RequestLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // The operation conflicts with the resource's availability. For example, you // attempted to recreate an existing table, or tried to delete a table currently // in the CREATING state. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The resource which is being attempted to be changed is in use. Message_ *string `locationName:"message" type:"string"` @@ -15477,17 +15513,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15495,29 +15531,29 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The operation tried to access a nonexistent table or index. The resource // might not be specified correctly, or its status might not be ACTIVE. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The resource which is being requested does not exist. Message_ *string `locationName:"message" type:"string"` @@ -15535,17 +15571,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15553,22 +15589,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details for the restore. @@ -16792,8 +16828,8 @@ func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification { // A target table with the specified name already exists. type TableAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16810,17 +16846,17 @@ func (s TableAlreadyExistsException) GoString() string { func newErrorTableAlreadyExistsException(v protocol.ResponseMetadata) error { return &TableAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TableAlreadyExistsException) Code() string { +func (s *TableAlreadyExistsException) Code() string { return "TableAlreadyExistsException" } // Message returns the exception's message. -func (s TableAlreadyExistsException) Message() string { +func (s *TableAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16828,22 +16864,22 @@ func (s TableAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TableAlreadyExistsException) OrigErr() error { +func (s *TableAlreadyExistsException) OrigErr() error { return nil } -func (s TableAlreadyExistsException) Error() string { +func (s *TableAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TableAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TableAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TableAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TableAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the auto scaling configuration for a global table. @@ -16957,14 +16993,14 @@ type TableDescription struct { // and index key attributes, which are automatically projected. Each attribute // specification is composed of: ProjectionType - One of the following: KEYS_ONLY // - Only the index and primary keys are projected into the index. INCLUDE - // - Only the specified table attributes are projected into the index. The - // list of projected attributes is in NonKeyAttributes. ALL - All of the - // table attributes are projected into the index. NonKeyAttributes - A list - // of one or more non-key attribute names that are projected into the secondary - // index. The total count of attributes provided in NonKeyAttributes, summed - // across all of the secondary indexes, must not exceed 20. If you project - // the same attribute into two different indexes, this counts as two distinct - // attributes when determining the total. + // - In addition to the attributes described in KEYS_ONLY, the secondary + // index will include other non-key attributes that you specify. ALL - All + // of the table attributes are projected into the index. NonKeyAttributes + // - A list of one or more non-key attribute names that are projected into + // the secondary index. The total count of attributes provided in NonKeyAttributes, + // summed across all of the secondary indexes, must not exceed 20. If you + // project the same attribute into two different indexes, this counts as + // two distinct attributes when determining the total. // // * ProvisionedThroughput - The provisioned throughput settings for the // global secondary index, consisting of read and write capacity units, along @@ -17246,8 +17282,8 @@ func (s *TableDescription) SetTableStatus(v string) *TableDescription { // A target table with the specified name is either being created or deleted. type TableInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17264,17 +17300,17 @@ func (s TableInUseException) GoString() string { func newErrorTableInUseException(v protocol.ResponseMetadata) error { return &TableInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TableInUseException) Code() string { +func (s *TableInUseException) Code() string { return "TableInUseException" } // Message returns the exception's message. -func (s TableInUseException) Message() string { +func (s *TableInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17282,29 +17318,29 @@ func (s TableInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TableInUseException) OrigErr() error { +func (s *TableInUseException) OrigErr() error { return nil } -func (s TableInUseException) Error() string { +func (s *TableInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TableInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TableInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TableInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *TableInUseException) RequestID() string { + return s.RespMetadata.RequestID } // A source table with the name TableName does not currently exist within the // subscriber's account. type TableNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17321,17 +17357,17 @@ func (s TableNotFoundException) GoString() string { func newErrorTableNotFoundException(v protocol.ResponseMetadata) error { return &TableNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TableNotFoundException) Code() string { +func (s *TableNotFoundException) Code() string { return "TableNotFoundException" } // Message returns the exception's message. -func (s TableNotFoundException) Message() string { +func (s *TableNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17339,22 +17375,22 @@ func (s TableNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TableNotFoundException) OrigErr() error { +func (s *TableNotFoundException) OrigErr() error { return nil } -func (s TableNotFoundException) Error() string { +func (s *TableNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TableNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TableNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TableNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *TableNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a tag. A tag is a key-value pair. You can add up to 50 tags to @@ -18070,8 +18106,8 @@ func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*Item // The provided expression refers to an attribute that does not exist in // the item. type TransactionCanceledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A list of cancellation reasons. CancellationReasons []*CancellationReason `min:"1" type:"list"` @@ -18091,17 +18127,17 @@ func (s TransactionCanceledException) GoString() string { func newErrorTransactionCanceledException(v protocol.ResponseMetadata) error { return &TransactionCanceledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TransactionCanceledException) Code() string { +func (s *TransactionCanceledException) Code() string { return "TransactionCanceledException" } // Message returns the exception's message. -func (s TransactionCanceledException) Message() string { +func (s *TransactionCanceledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18109,28 +18145,28 @@ func (s TransactionCanceledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TransactionCanceledException) OrigErr() error { +func (s *TransactionCanceledException) OrigErr() error { return nil } -func (s TransactionCanceledException) Error() string { +func (s *TransactionCanceledException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TransactionCanceledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TransactionCanceledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TransactionCanceledException) RequestID() string { - return s.respMetadata.RequestID +func (s *TransactionCanceledException) RequestID() string { + return s.RespMetadata.RequestID } // Operation was rejected because there is an ongoing transaction for the item. type TransactionConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18147,17 +18183,17 @@ func (s TransactionConflictException) GoString() string { func newErrorTransactionConflictException(v protocol.ResponseMetadata) error { return &TransactionConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TransactionConflictException) Code() string { +func (s *TransactionConflictException) Code() string { return "TransactionConflictException" } // Message returns the exception's message. -func (s TransactionConflictException) Message() string { +func (s *TransactionConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18165,28 +18201,28 @@ func (s TransactionConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TransactionConflictException) OrigErr() error { +func (s *TransactionConflictException) OrigErr() error { return nil } -func (s TransactionConflictException) Error() string { +func (s *TransactionConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TransactionConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TransactionConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TransactionConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *TransactionConflictException) RequestID() string { + return s.RespMetadata.RequestID } // The transaction with the given request token is already in progress. type TransactionInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18203,17 +18239,17 @@ func (s TransactionInProgressException) GoString() string { func newErrorTransactionInProgressException(v protocol.ResponseMetadata) error { return &TransactionInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TransactionInProgressException) Code() string { +func (s *TransactionInProgressException) Code() string { return "TransactionInProgressException" } // Message returns the exception's message. -func (s TransactionInProgressException) Message() string { +func (s *TransactionInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18221,22 +18257,22 @@ func (s TransactionInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TransactionInProgressException) OrigErr() error { +func (s *TransactionInProgressException) OrigErr() error { return nil } -func (s TransactionInProgressException) Error() string { +func (s *TransactionInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TransactionInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TransactionInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TransactionInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *TransactionInProgressException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -18625,8 +18661,8 @@ type UpdateGlobalSecondaryIndexAction struct { // Represents the provisioned throughput settings for the specified global secondary // index. // - // For current minimum and maximum provisioned throughput values, see Limits - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // For current minimum and maximum provisioned throughput values, see Service, + // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) // in the Amazon DynamoDB Developer Guide. // // ProvisionedThroughput is a required field @@ -19865,6 +19901,15 @@ const ( AttributeActionDelete = "DELETE" ) +// AttributeAction_Values returns all elements of the AttributeAction enum +func AttributeAction_Values() []string { + return []string{ + AttributeActionAdd, + AttributeActionPut, + AttributeActionDelete, + } +} + const ( // BackupStatusCreating is a BackupStatus enum value BackupStatusCreating = "CREATING" @@ -19876,6 +19921,15 @@ const ( BackupStatusAvailable = "AVAILABLE" ) +// BackupStatus_Values returns all elements of the BackupStatus enum +func BackupStatus_Values() []string { + return []string{ + BackupStatusCreating, + BackupStatusDeleted, + BackupStatusAvailable, + } +} + const ( // BackupTypeUser is a BackupType enum value BackupTypeUser = "USER" @@ -19887,6 +19941,15 @@ const ( BackupTypeAwsBackup = "AWS_BACKUP" ) +// BackupType_Values returns all elements of the BackupType enum +func BackupType_Values() []string { + return []string{ + BackupTypeUser, + BackupTypeSystem, + BackupTypeAwsBackup, + } +} + const ( // BackupTypeFilterUser is a BackupTypeFilter enum value BackupTypeFilterUser = "USER" @@ -19901,6 +19964,16 @@ const ( BackupTypeFilterAll = "ALL" ) +// BackupTypeFilter_Values returns all elements of the BackupTypeFilter enum +func BackupTypeFilter_Values() []string { + return []string{ + BackupTypeFilterUser, + BackupTypeFilterSystem, + BackupTypeFilterAwsBackup, + BackupTypeFilterAll, + } +} + const ( // BillingModeProvisioned is a BillingMode enum value BillingModeProvisioned = "PROVISIONED" @@ -19909,6 +19982,14 @@ const ( BillingModePayPerRequest = "PAY_PER_REQUEST" ) +// BillingMode_Values returns all elements of the BillingMode enum +func BillingMode_Values() []string { + return []string{ + BillingModeProvisioned, + BillingModePayPerRequest, + } +} + const ( // ComparisonOperatorEq is a ComparisonOperator enum value ComparisonOperatorEq = "EQ" @@ -19950,6 +20031,25 @@ const ( ComparisonOperatorBeginsWith = "BEGINS_WITH" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorIn, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + ComparisonOperatorBetween, + ComparisonOperatorNotNull, + ComparisonOperatorNull, + ComparisonOperatorContains, + ComparisonOperatorNotContains, + ComparisonOperatorBeginsWith, + } +} + const ( // ConditionalOperatorAnd is a ConditionalOperator enum value ConditionalOperatorAnd = "AND" @@ -19958,6 +20058,14 @@ const ( ConditionalOperatorOr = "OR" ) +// ConditionalOperator_Values returns all elements of the ConditionalOperator enum +func ConditionalOperator_Values() []string { + return []string{ + ConditionalOperatorAnd, + ConditionalOperatorOr, + } +} + const ( // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value ContinuousBackupsStatusEnabled = "ENABLED" @@ -19966,6 +20074,14 @@ const ( ContinuousBackupsStatusDisabled = "DISABLED" ) +// ContinuousBackupsStatus_Values returns all elements of the ContinuousBackupsStatus enum +func ContinuousBackupsStatus_Values() []string { + return []string{ + ContinuousBackupsStatusEnabled, + ContinuousBackupsStatusDisabled, + } +} + const ( // ContributorInsightsActionEnable is a ContributorInsightsAction enum value ContributorInsightsActionEnable = "ENABLE" @@ -19974,6 +20090,14 @@ const ( ContributorInsightsActionDisable = "DISABLE" ) +// ContributorInsightsAction_Values returns all elements of the ContributorInsightsAction enum +func ContributorInsightsAction_Values() []string { + return []string{ + ContributorInsightsActionEnable, + ContributorInsightsActionDisable, + } +} + const ( // ContributorInsightsStatusEnabling is a ContributorInsightsStatus enum value ContributorInsightsStatusEnabling = "ENABLING" @@ -19991,6 +20115,17 @@ const ( ContributorInsightsStatusFailed = "FAILED" ) +// ContributorInsightsStatus_Values returns all elements of the ContributorInsightsStatus enum +func ContributorInsightsStatus_Values() []string { + return []string{ + ContributorInsightsStatusEnabling, + ContributorInsightsStatusEnabled, + ContributorInsightsStatusDisabling, + ContributorInsightsStatusDisabled, + ContributorInsightsStatusFailed, + } +} + const ( // GlobalTableStatusCreating is a GlobalTableStatus enum value GlobalTableStatusCreating = "CREATING" @@ -20005,6 +20140,16 @@ const ( GlobalTableStatusUpdating = "UPDATING" ) +// GlobalTableStatus_Values returns all elements of the GlobalTableStatus enum +func GlobalTableStatus_Values() []string { + return []string{ + GlobalTableStatusCreating, + GlobalTableStatusActive, + GlobalTableStatusDeleting, + GlobalTableStatusUpdating, + } +} + const ( // IndexStatusCreating is a IndexStatus enum value IndexStatusCreating = "CREATING" @@ -20019,6 +20164,16 @@ const ( IndexStatusActive = "ACTIVE" ) +// IndexStatus_Values returns all elements of the IndexStatus enum +func IndexStatus_Values() []string { + return []string{ + IndexStatusCreating, + IndexStatusUpdating, + IndexStatusDeleting, + IndexStatusActive, + } +} + const ( // KeyTypeHash is a KeyType enum value KeyTypeHash = "HASH" @@ -20027,6 +20182,14 @@ const ( KeyTypeRange = "RANGE" ) +// KeyType_Values returns all elements of the KeyType enum +func KeyType_Values() []string { + return []string{ + KeyTypeHash, + KeyTypeRange, + } +} + const ( // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value PointInTimeRecoveryStatusEnabled = "ENABLED" @@ -20035,6 +20198,14 @@ const ( PointInTimeRecoveryStatusDisabled = "DISABLED" ) +// PointInTimeRecoveryStatus_Values returns all elements of the PointInTimeRecoveryStatus enum +func PointInTimeRecoveryStatus_Values() []string { + return []string{ + PointInTimeRecoveryStatusEnabled, + PointInTimeRecoveryStatusDisabled, + } +} + const ( // ProjectionTypeAll is a ProjectionType enum value ProjectionTypeAll = "ALL" @@ -20046,6 +20217,15 @@ const ( ProjectionTypeInclude = "INCLUDE" ) +// ProjectionType_Values returns all elements of the ProjectionType enum +func ProjectionType_Values() []string { + return []string{ + ProjectionTypeAll, + ProjectionTypeKeysOnly, + ProjectionTypeInclude, + } +} + const ( // ReplicaStatusCreating is a ReplicaStatus enum value ReplicaStatusCreating = "CREATING" @@ -20061,8 +20241,23 @@ const ( // ReplicaStatusActive is a ReplicaStatus enum value ReplicaStatusActive = "ACTIVE" + + // ReplicaStatusRegionDisabled is a ReplicaStatus enum value + ReplicaStatusRegionDisabled = "REGION_DISABLED" ) +// ReplicaStatus_Values returns all elements of the ReplicaStatus enum +func ReplicaStatus_Values() []string { + return []string{ + ReplicaStatusCreating, + ReplicaStatusCreationFailed, + ReplicaStatusUpdating, + ReplicaStatusDeleting, + ReplicaStatusActive, + ReplicaStatusRegionDisabled, + } +} + // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // @@ -20087,6 +20282,15 @@ const ( ReturnConsumedCapacityNone = "NONE" ) +// ReturnConsumedCapacity_Values returns all elements of the ReturnConsumedCapacity enum +func ReturnConsumedCapacity_Values() []string { + return []string{ + ReturnConsumedCapacityIndexes, + ReturnConsumedCapacityTotal, + ReturnConsumedCapacityNone, + } +} + const ( // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value ReturnItemCollectionMetricsSize = "SIZE" @@ -20095,6 +20299,14 @@ const ( ReturnItemCollectionMetricsNone = "NONE" ) +// ReturnItemCollectionMetrics_Values returns all elements of the ReturnItemCollectionMetrics enum +func ReturnItemCollectionMetrics_Values() []string { + return []string{ + ReturnItemCollectionMetricsSize, + ReturnItemCollectionMetricsNone, + } +} + const ( // ReturnValueNone is a ReturnValue enum value ReturnValueNone = "NONE" @@ -20112,6 +20324,17 @@ const ( ReturnValueUpdatedNew = "UPDATED_NEW" ) +// ReturnValue_Values returns all elements of the ReturnValue enum +func ReturnValue_Values() []string { + return []string{ + ReturnValueNone, + ReturnValueAllOld, + ReturnValueUpdatedOld, + ReturnValueAllNew, + ReturnValueUpdatedNew, + } +} + const ( // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD" @@ -20120,6 +20343,14 @@ const ( ReturnValuesOnConditionCheckFailureNone = "NONE" ) +// ReturnValuesOnConditionCheckFailure_Values returns all elements of the ReturnValuesOnConditionCheckFailure enum +func ReturnValuesOnConditionCheckFailure_Values() []string { + return []string{ + ReturnValuesOnConditionCheckFailureAllOld, + ReturnValuesOnConditionCheckFailureNone, + } +} + const ( // SSEStatusEnabling is a SSEStatus enum value SSEStatusEnabling = "ENABLING" @@ -20137,6 +20368,17 @@ const ( SSEStatusUpdating = "UPDATING" ) +// SSEStatus_Values returns all elements of the SSEStatus enum +func SSEStatus_Values() []string { + return []string{ + SSEStatusEnabling, + SSEStatusEnabled, + SSEStatusDisabling, + SSEStatusDisabled, + SSEStatusUpdating, + } +} + const ( // SSETypeAes256 is a SSEType enum value SSETypeAes256 = "AES256" @@ -20145,6 +20387,14 @@ const ( SSETypeKms = "KMS" ) +// SSEType_Values returns all elements of the SSEType enum +func SSEType_Values() []string { + return []string{ + SSETypeAes256, + SSETypeKms, + } +} + const ( // ScalarAttributeTypeS is a ScalarAttributeType enum value ScalarAttributeTypeS = "S" @@ -20156,6 +20406,15 @@ const ( ScalarAttributeTypeB = "B" ) +// ScalarAttributeType_Values returns all elements of the ScalarAttributeType enum +func ScalarAttributeType_Values() []string { + return []string{ + ScalarAttributeTypeS, + ScalarAttributeTypeN, + ScalarAttributeTypeB, + } +} + const ( // SelectAllAttributes is a Select enum value SelectAllAttributes = "ALL_ATTRIBUTES" @@ -20170,6 +20429,16 @@ const ( SelectCount = "COUNT" ) +// Select_Values returns all elements of the Select enum +func Select_Values() []string { + return []string{ + SelectAllAttributes, + SelectAllProjectedAttributes, + SelectSpecificAttributes, + SelectCount, + } +} + const ( // StreamViewTypeNewImage is a StreamViewType enum value StreamViewTypeNewImage = "NEW_IMAGE" @@ -20184,6 +20453,16 @@ const ( StreamViewTypeKeysOnly = "KEYS_ONLY" ) +// StreamViewType_Values returns all elements of the StreamViewType enum +func StreamViewType_Values() []string { + return []string{ + StreamViewTypeNewImage, + StreamViewTypeOldImage, + StreamViewTypeNewAndOldImages, + StreamViewTypeKeysOnly, + } +} + const ( // TableStatusCreating is a TableStatus enum value TableStatusCreating = "CREATING" @@ -20207,6 +20486,19 @@ const ( TableStatusArchived = "ARCHIVED" ) +// TableStatus_Values returns all elements of the TableStatus enum +func TableStatus_Values() []string { + return []string{ + TableStatusCreating, + TableStatusUpdating, + TableStatusDeleting, + TableStatusActive, + TableStatusInaccessibleEncryptionCredentials, + TableStatusArchiving, + TableStatusArchived, + } +} + const ( // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value TimeToLiveStatusEnabling = "ENABLING" @@ -20220,3 +20512,13 @@ const ( // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value TimeToLiveStatusDisabled = "DISABLED" ) + +// TimeToLiveStatus_Values returns all elements of the TimeToLiveStatus enum +func TimeToLiveStatus_Values() []string { + return []string{ + TimeToLiveStatusEnabling, + TimeToLiveStatusDisabling, + TimeToLiveStatusEnabled, + TimeToLiveStatusDisabled, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index 0cead1185..b7e2d40b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -92,7 +92,7 @@ const ( // if the table or index specifications are complex, DynamoDB might temporarily // reduce the number of concurrent operations. // - // There is a soft account limit of 256 tables. + // There is a soft account quota of 256 tables. ErrCodeLimitExceededException = "LimitExceededException" // ErrCodePointInTimeRecoveryUnavailableException for service response error code @@ -127,9 +127,9 @@ const ( // ErrCodeRequestLimitExceeded for service response error code // "RequestLimitExceeded". // - // Throughput exceeds the current throughput limit for your account. Please + // Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request - // a limit increase. + // a quota increase. ErrCodeRequestLimitExceeded = "RequestLimitExceeded" // ErrCodeResourceInUseException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go index ae2c7b306..8bae9d236 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -8,7 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/crr" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 297308edd..a0bb5c159 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -554,6 +554,10 @@ func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request. // For more information, see Elastic IP Addresses (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You can allocate a carrier IP address which is a public IP address from a +// telecommunication carrier, to a network interface which resides in a subnet +// in a Wavelength Zone (for example an EC2 instance). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -955,7 +959,8 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // AssociateAddress API operation for Amazon Elastic Compute Cloud. // -// Associates an Elastic IP address with an instance or a network interface. +// Associates an Elastic IP address, or carrier IP address (for instances that +// are in subnets in Wavelength Zones) with an instance or a network interface. // Before you can use an Elastic IP address, you must allocate it to your account. // // An Elastic IP address is for use in either the EC2-Classic platform or in @@ -976,6 +981,9 @@ func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *reques // an Elastic IP address with an instance or network interface that has an existing // Elastic IP address. // +// [Subnets in Wavelength Zones] You can associate an IP address from the telecommunication +// carrier to the instance or network interface. +// // You cannot associate an Elastic IP address with an interface in a different // network border group. // @@ -1943,7 +1951,7 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // in the Amazon Elastic Compute Cloud User Guide. // // After you attach an EBS volume, you must make it available. For more information, -// see Making an EBS Volume Available For Use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). +// see Making an EBS volume available for use (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html). // // If a volume has an AWS Marketplace product code: // @@ -1957,7 +1965,7 @@ func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Reques // the product. For example, you can't detach a volume from a Windows instance // and attach it to a Linux instance. // -// For more information, see Attaching Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) +// For more information, see Attaching Amazon EBS volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3325,7 +3333,7 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // Snapshots created by copying another snapshot have an arbitrary volume ID // that should not be used for any purpose. // -// For more information, see Copying an Amazon EBS Snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// For more information, see Copying an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3453,6 +3461,82 @@ func (c *EC2) CreateCapacityReservationWithContext(ctx aws.Context, input *Creat return out, req.Send() } +const opCreateCarrierGateway = "CreateCarrierGateway" + +// CreateCarrierGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateCarrierGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCarrierGateway for more information on using the CreateCarrierGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCarrierGatewayRequest method. +// req, resp := client.CreateCarrierGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCarrierGateway +func (c *EC2) CreateCarrierGatewayRequest(input *CreateCarrierGatewayInput) (req *request.Request, output *CreateCarrierGatewayOutput) { + op := &request.Operation{ + Name: opCreateCarrierGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCarrierGatewayInput{} + } + + output = &CreateCarrierGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCarrierGateway API operation for Amazon Elastic Compute Cloud. +// +// Creates a carrier gateway. For more information about carrier gateways, see +// Carrier gateways (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#wavelength-carrier-gateway) +// in the AWS Wavelength Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateCarrierGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateCarrierGateway +func (c *EC2) CreateCarrierGateway(input *CreateCarrierGatewayInput) (*CreateCarrierGatewayOutput, error) { + req, out := c.CreateCarrierGatewayRequest(input) + return out, req.Send() +} + +// CreateCarrierGatewayWithContext is the same as CreateCarrierGateway with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCarrierGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateCarrierGatewayWithContext(ctx aws.Context, input *CreateCarrierGatewayInput, opts ...request.Option) (*CreateCarrierGatewayOutput, error) { + req, out := c.CreateCarrierGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateClientVpnEndpoint = "CreateClientVpnEndpoint" // CreateClientVpnEndpointRequest generates a "aws/request.Request" representing the @@ -3653,7 +3737,7 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // Provides information to AWS about your VPN customer gateway device. The customer // gateway is the appliance at your end of the VPN connection. (The device on // the AWS side of the VPN connection is the virtual private gateway.) You must -// provide the Internet-routable IP address of the customer gateway's external +// provide the internet-routable IP address of the customer gateway's external // interface. The IP address must be static and can be behind a device performing // network address translation (NAT). // @@ -3662,9 +3746,16 @@ func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (r // ASN assigned to your network. If you don't have an ASN already, you can use // a private ASN (in the 64512 - 65534 range). // -// Amazon EC2 supports all 2-byte ASN numbers in the range of 1 - 65534, with -// the exception of 7224, which is reserved in the us-east-1 Region, and 9059, -// which is reserved in the eu-west-1 Region. +// Amazon EC2 supports all 4-byte ASN numbers in the range of 1 - 2147483647, +// with the exception of the following: +// +// * 7224 - reserved in the us-east-1 Region +// +// * 9059 - reserved in the eu-west-1 Region +// +// * 17943 - reserved in the ap-southeast-1 Region +// +// * 10124 - reserved in the ap-northeast-1 Region // // For more information, see AWS Site-to-Site VPN (https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) // in the AWS Site-to-Site VPN User Guide. @@ -3926,13 +4017,13 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // * domain-name - If you're using AmazonProvidedDNS in us-east-1, specify // ec2.internal. If you're using AmazonProvidedDNS in another Region, specify // region.compute.internal (for example, ap-northeast-1.compute.internal). -// Otherwise, specify a domain name (for example, MyCompany.com). This value -// is used to complete unqualified DNS hostnames. Important: Some Linux operating -// systems accept multiple domain names separated by spaces. However, Windows -// and other Linux operating systems treat the value as a single domain, -// which results in unexpected behavior. If your DHCP options set is associated -// with a VPC that has instances with multiple operating systems, specify -// only one domain name. +// Otherwise, specify a domain name (for example, ExampleCompany.com). This +// value is used to complete unqualified DNS hostnames. Important: Some Linux +// operating systems accept multiple domain names separated by spaces. However, +// Windows and other Linux operating systems treat the value as a single +// domain, which results in unexpected behavior. If your DHCP options set +// is associated with a VPC that has instances with multiple operating systems, +// specify only one domain name. // // * ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. @@ -4432,7 +4523,7 @@ func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInp // CreateInstanceExportTask API operation for Amazon Elastic Compute Cloud. // -// Exports a running or stopped instance to an S3 bucket. +// Exports a running or stopped instance to an Amazon S3 bucket. // // For information about the supported operating systems, image formats, and // known limitations for the types of instances you can export, see Exporting @@ -4678,6 +4769,8 @@ func (c *EC2) CreateLaunchTemplateRequest(input *CreateLaunchTemplateInput) (req // Creates a launch template. A launch template contains the parameters to launch // an instance. When you launch an instance using RunInstances, you can specify // a launch template instead of providing the launch parameters in the request. +// For more information, see Launching an instance from a launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html)in +// the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4757,6 +4850,9 @@ func (c *EC2) CreateLaunchTemplateVersionRequest(input *CreateLaunchTemplateVers // Launch template versions are numbered in the order in which they are created. // You cannot specify, change, or replace the numbering of launch template versions. // +// For more information, see Managing launch template versions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html#manage-launch-template-versions)in +// the Amazon Elastic Compute Cloud User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4933,6 +5029,84 @@ func (c *EC2) CreateLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Cont return out, req.Send() } +const opCreateManagedPrefixList = "CreateManagedPrefixList" + +// CreateManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the CreateManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateManagedPrefixList for more information on using the CreateManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateManagedPrefixListRequest method. +// req, resp := client.CreateManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixListRequest(input *CreateManagedPrefixListInput) (req *request.Request, output *CreateManagedPrefixListOutput) { + op := &request.Operation{ + Name: opCreateManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateManagedPrefixListInput{} + } + + output = &CreateManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Creates a managed prefix list. You can specify one or more entries for the +// prefix list. Each entry consists of a CIDR block and an optional description. +// +// You must specify the maximum number of entries for the prefix list. The maximum +// number of entries cannot be changed later. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateManagedPrefixList +func (c *EC2) CreateManagedPrefixList(input *CreateManagedPrefixListInput) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + return out, req.Send() +} + +// CreateManagedPrefixListWithContext is the same as CreateManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See CreateManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateManagedPrefixListWithContext(ctx aws.Context, input *CreateManagedPrefixListInput, opts ...request.Option) (*CreateManagedPrefixListOutput, error) { + req, out := c.CreateManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateNatGateway = "CreateNatGateway" // CreateNatGatewayRequest generates a "aws/request.Request" representing the @@ -5377,7 +5551,6 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req output = &CreatePlacementGroupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -5393,7 +5566,7 @@ func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req // instances in one partition do not share the same hardware with instances // in another partition. // -// For more information, see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5860,7 +6033,7 @@ func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Re // protected. // // You can tag your snapshots during creation. For more information, see Tagging -// Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. // // For more information, see Amazon Elastic Block Store (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AmazonEBS.html) @@ -6018,7 +6191,7 @@ func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSub // // Creates a data feed for Spot Instances, enabling you to view Spot Instance // usage logs. You can create one data feed per AWS account. For more information, -// see Spot Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// see Spot Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6093,15 +6266,12 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // CreateSubnet API operation for Amazon Elastic Compute Cloud. // -// Creates a subnet in an existing VPC. +// Creates a subnet in a specified VPC. // -// When you create each subnet, you provide the VPC ID and IPv4 CIDR block for -// the subnet. After you create a subnet, you can't change its CIDR block. The -// size of the subnet's IPv4 CIDR block can be the same as a VPC's IPv4 CIDR -// block, or a subset of a VPC's IPv4 CIDR block. If you create more than one -// subnet in a VPC, the subnets' CIDR blocks must not overlap. The smallest -// IPv4 subnet (and VPC) you can create uses a /28 netmask (16 IPv4 addresses), -// and the largest uses a /16 netmask (65,536 IPv4 addresses). +// You must specify an IPv4 CIDR block for the subnet. After you create a subnet, +// you can't change its CIDR block. The allowed block size is between a /16 +// netmask (65,536 IP addresses) and /28 netmask (16 IP addresses). The CIDR +// block must not overlap with the CIDR block of an existing subnet in the VPC. // // If you've associated an IPv6 CIDR block with your VPC, you can create a subnet // with an IPv6 CIDR block that uses a /64 prefix length. @@ -6112,9 +6282,7 @@ func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Reques // If you add more than one subnet to a VPC, they're set up in a star topology // with a logical router in the middle. // -// If you launch an instance in a VPC using an Amazon EBS-backed AMI, the IP -// address doesn't change if you stop and restart the instance (unlike a similar -// instance launched outside a VPC, which gets a new IP address when restarted). +// When you stop an instance in a subnet, it retains its private IPv4 address. // It's therefore possible to have a subnet with no running instances (they're // all stopped), but no remaining IP addresses available. // @@ -6194,9 +6362,10 @@ func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, o // CreateTags API operation for Amazon Elastic Compute Cloud. // -// Adds or overwrites the specified tags for the specified Amazon EC2 resource -// or resources. Each resource can have a maximum of 50 tags. Each tag consists -// of a key and optional value. Tag keys must be unique per resource. +// Adds or overwrites only the specified tags for the specified Amazon EC2 resource +// or resources. When you specify an existing tag key, the value is overwritten +// with the new value. Each resource can have a maximum of 50 tags. Each tag +// consists of a key and optional value. Tag keys must be unique per resource. // // For more information about tags, see Tagging Your Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. For more information about @@ -6813,6 +6982,81 @@ func (c *EC2) CreateTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, return out, req.Send() } +const opCreateTransitGatewayPrefixListReference = "CreateTransitGatewayPrefixListReference" + +// CreateTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the CreateTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTransitGatewayPrefixListReference for more information on using the CreateTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.CreateTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPrefixListReference +func (c *EC2) CreateTransitGatewayPrefixListReferenceRequest(input *CreateTransitGatewayPrefixListReferenceInput) (req *request.Request, output *CreateTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opCreateTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTransitGatewayPrefixListReferenceInput{} + } + + output = &CreateTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Creates a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation CreateTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/CreateTransitGatewayPrefixListReference +func (c *EC2) CreateTransitGatewayPrefixListReference(input *CreateTransitGatewayPrefixListReferenceInput) (*CreateTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.CreateTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// CreateTransitGatewayPrefixListReferenceWithContext is the same as CreateTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) CreateTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *CreateTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*CreateTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.CreateTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTransitGatewayRoute = "CreateTransitGatewayRoute" // CreateTransitGatewayRouteRequest generates a "aws/request.Request" representing the @@ -7101,10 +7345,10 @@ func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Reques // in the Amazon Elastic Compute Cloud User Guide. // // You can tag your volumes during creation. For more information, see Tagging -// Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information, see Creating an Amazon EBS Volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) +// For more information, see Creating an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-creating-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7617,15 +7861,15 @@ func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req * // CreateVpnConnection API operation for Amazon Elastic Compute Cloud. // -// Creates a VPN connection between an existing virtual private gateway and -// a VPN customer gateway. The supported connection type is ipsec.1. +// Creates a VPN connection between an existing virtual private gateway or transit +// gateway and a customer gateway. The supported connection type is ipsec.1. // // The response includes information that you need to give to your network administrator // to configure your customer gateway. // // We strongly recommend that you use HTTPS when calling this operation because // the response contains sensitive cryptographic information for configuring -// your customer gateway. +// your customer gateway device. // // If you decide to shut down your VPN connection for any reason and later create // a new VPN connection, you must reconfigure your customer gateway with the @@ -7825,6 +8069,84 @@ func (c *EC2) CreateVpnGatewayWithContext(ctx aws.Context, input *CreateVpnGatew return out, req.Send() } +const opDeleteCarrierGateway = "DeleteCarrierGateway" + +// DeleteCarrierGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCarrierGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCarrierGateway for more information on using the DeleteCarrierGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCarrierGatewayRequest method. +// req, resp := client.DeleteCarrierGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCarrierGateway +func (c *EC2) DeleteCarrierGatewayRequest(input *DeleteCarrierGatewayInput) (req *request.Request, output *DeleteCarrierGatewayOutput) { + op := &request.Operation{ + Name: opDeleteCarrierGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCarrierGatewayInput{} + } + + output = &DeleteCarrierGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCarrierGateway API operation for Amazon Elastic Compute Cloud. +// +// Deletes a carrier gateway. +// +// If you do not delete the route that contains the carrier gateway as the Target, +// the route is a blackhole route. For information about how to delete a route, +// see DeleteRoute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteRoute.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteCarrierGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteCarrierGateway +func (c *EC2) DeleteCarrierGateway(input *DeleteCarrierGatewayInput) (*DeleteCarrierGatewayOutput, error) { + req, out := c.DeleteCarrierGatewayRequest(input) + return out, req.Send() +} + +// DeleteCarrierGatewayWithContext is the same as DeleteCarrierGateway with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCarrierGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteCarrierGatewayWithContext(ctx aws.Context, input *DeleteCarrierGatewayInput, opts ...request.Option) (*DeleteCarrierGatewayOutput, error) { + req, out := c.DeleteCarrierGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteClientVpnEndpoint = "DeleteClientVpnEndpoint" // DeleteClientVpnEndpointRequest generates a "aws/request.Request" representing the @@ -8885,6 +9207,81 @@ func (c *EC2) DeleteLocalGatewayRouteTableVpcAssociationWithContext(ctx aws.Cont return out, req.Send() } +const opDeleteManagedPrefixList = "DeleteManagedPrefixList" + +// DeleteManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the DeleteManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteManagedPrefixList for more information on using the DeleteManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteManagedPrefixListRequest method. +// req, resp := client.DeleteManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixListRequest(input *DeleteManagedPrefixListInput) (req *request.Request, output *DeleteManagedPrefixListOutput) { + op := &request.Operation{ + Name: opDeleteManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteManagedPrefixListInput{} + } + + output = &DeleteManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Deletes the specified managed prefix list. You must first remove all references +// to the prefix list in your resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteManagedPrefixList +func (c *EC2) DeleteManagedPrefixList(input *DeleteManagedPrefixListInput) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + return out, req.Send() +} + +// DeleteManagedPrefixListWithContext is the same as DeleteManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteManagedPrefixListWithContext(ctx aws.Context, input *DeleteManagedPrefixListInput, opts ...request.Option) (*DeleteManagedPrefixListOutput, error) { + req, out := c.DeleteManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteNatGateway = "DeleteNatGateway" // DeleteNatGatewayRequest generates a "aws/request.Request" representing the @@ -9313,7 +9710,7 @@ func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req // // Deletes the specified placement group. You must terminate all instances in // the placement group before you can delete the placement group. For more information, -// see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9707,7 +10104,7 @@ func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Re // a registered AMI. You must first de-register the AMI before you can delete // the snapshot. // -// For more information, see Deleting an Amazon EBS Snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) +// For more information, see Deleting an Amazon EBS snapshot (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10492,6 +10889,81 @@ func (c *EC2) DeleteTransitGatewayPeeringAttachmentWithContext(ctx aws.Context, return out, req.Send() } +const opDeleteTransitGatewayPrefixListReference = "DeleteTransitGatewayPrefixListReference" + +// DeleteTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTransitGatewayPrefixListReference for more information on using the DeleteTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.DeleteTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPrefixListReference +func (c *EC2) DeleteTransitGatewayPrefixListReferenceRequest(input *DeleteTransitGatewayPrefixListReferenceInput) (req *request.Request, output *DeleteTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opDeleteTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTransitGatewayPrefixListReferenceInput{} + } + + output = &DeleteTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Deletes a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeleteTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeleteTransitGatewayPrefixListReference +func (c *EC2) DeleteTransitGatewayPrefixListReference(input *DeleteTransitGatewayPrefixListReferenceInput) (*DeleteTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.DeleteTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// DeleteTransitGatewayPrefixListReferenceWithContext is the same as DeleteTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeleteTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *DeleteTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*DeleteTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.DeleteTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteTransitGatewayRoute = "DeleteTransitGatewayRoute" // DeleteTransitGatewayRouteRequest generates a "aws/request.Request" representing the @@ -10766,7 +11238,7 @@ func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Reques // // The volume can remain in the deleting state for several minutes. // -// For more information, see Deleting an Amazon EBS Volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) +// For more information, see Deleting an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-deleting-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11235,9 +11707,13 @@ func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req * // your VPN connection have been compromised, you can delete the VPN connection // and create a new one that has new keys, without needing to delete the VPC // or virtual private gateway. If you create a new VPN connection, you must -// reconfigure the customer gateway using the new configuration information +// reconfigure the customer gateway device using the new configuration information // returned with the new VPN connection ID. // +// For certificate-based authentication, delete all AWS Certificate Manager +// (ACM) private certificates used for the AWS-side tunnel endpoints for the +// VPN connection before deleting the VPN connection. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -11585,6 +12061,81 @@ func (c *EC2) DeregisterImageWithContext(ctx aws.Context, input *DeregisterImage return out, req.Send() } +const opDeregisterInstanceEventNotificationAttributes = "DeregisterInstanceEventNotificationAttributes" + +// DeregisterInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterInstanceEventNotificationAttributes for more information on using the DeregisterInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterInstanceEventNotificationAttributesRequest method. +// req, resp := client.DeregisterInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterInstanceEventNotificationAttributes +func (c *EC2) DeregisterInstanceEventNotificationAttributesRequest(input *DeregisterInstanceEventNotificationAttributesInput) (req *request.Request, output *DeregisterInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opDeregisterInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterInstanceEventNotificationAttributesInput{} + } + + output = &DeregisterInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeregisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Deregisters tag keys to prevent tags that have the specified tag keys from +// being included in scheduled event notifications for resources in the Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DeregisterInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DeregisterInstanceEventNotificationAttributes +func (c *EC2) DeregisterInstanceEventNotificationAttributes(input *DeregisterInstanceEventNotificationAttributesInput) (*DeregisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.DeregisterInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// DeregisterInstanceEventNotificationAttributesWithContext is the same as DeregisterInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DeregisterInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *DeregisterInstanceEventNotificationAttributesInput, opts ...request.Option) (*DeregisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.DeregisterInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeregisterTransitGatewayMulticastGroupMembers = "DeregisterTransitGatewayMulticastGroupMembers" // DeregisterTransitGatewayMulticastGroupMembersRequest generates a "aws/request.Request" representing the @@ -12039,13 +12590,12 @@ func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesI // DescribeAvailabilityZones API operation for Amazon Elastic Compute Cloud. // -// Describes the Availability Zones and Local Zones that are available to you. -// If there is an event impacting an Availability Zone or Local Zone, you can -// use this request to view the state and any provided messages for that Availability -// Zone or Local Zone. +// Describes the Availability Zones, Local Zones, and Wavelength Zones that +// are available to you. If there is an event impacting a zone, you can use +// this request to view the state and any provided messages for that zone. // -// For more information about Availability Zones and Local Zones, see Regions -// and Availability Zones (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) +// For more information about Availability Zones, Local Zones, and Wavelength +// Zones, see Regions, Zones and Outposts (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -12423,6 +12973,138 @@ func (c *EC2) DescribeCapacityReservationsPagesWithContext(ctx aws.Context, inpu return p.Err() } +const opDescribeCarrierGateways = "DescribeCarrierGateways" + +// DescribeCarrierGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCarrierGateways operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCarrierGateways for more information on using the DescribeCarrierGateways +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCarrierGatewaysRequest method. +// req, resp := client.DescribeCarrierGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCarrierGateways +func (c *EC2) DescribeCarrierGatewaysRequest(input *DescribeCarrierGatewaysInput) (req *request.Request, output *DescribeCarrierGatewaysOutput) { + op := &request.Operation{ + Name: opDescribeCarrierGateways, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCarrierGatewaysInput{} + } + + output = &DescribeCarrierGatewaysOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCarrierGateways API operation for Amazon Elastic Compute Cloud. +// +// Describes one or more of your carrier gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeCarrierGateways for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeCarrierGateways +func (c *EC2) DescribeCarrierGateways(input *DescribeCarrierGatewaysInput) (*DescribeCarrierGatewaysOutput, error) { + req, out := c.DescribeCarrierGatewaysRequest(input) + return out, req.Send() +} + +// DescribeCarrierGatewaysWithContext is the same as DescribeCarrierGateways with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCarrierGateways for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCarrierGatewaysWithContext(ctx aws.Context, input *DescribeCarrierGatewaysInput, opts ...request.Option) (*DescribeCarrierGatewaysOutput, error) { + req, out := c.DescribeCarrierGatewaysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCarrierGatewaysPages iterates over the pages of a DescribeCarrierGateways operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCarrierGateways method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCarrierGateways operation. +// pageNum := 0 +// err := client.DescribeCarrierGatewaysPages(params, +// func(page *ec2.DescribeCarrierGatewaysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeCarrierGatewaysPages(input *DescribeCarrierGatewaysInput, fn func(*DescribeCarrierGatewaysOutput, bool) bool) error { + return c.DescribeCarrierGatewaysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCarrierGatewaysPagesWithContext same as DescribeCarrierGatewaysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeCarrierGatewaysPagesWithContext(ctx aws.Context, input *DescribeCarrierGatewaysInput, fn func(*DescribeCarrierGatewaysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCarrierGatewaysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCarrierGatewaysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCarrierGatewaysOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" // DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the @@ -13900,7 +14582,7 @@ func (c *EC2) DescribeExportImageTasksRequest(input *DescribeExportImageTasksInp // DescribeExportImageTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export image tasks or all your export image tasks. +// Describes the specified export image tasks or all of your export image tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -14026,7 +14708,7 @@ func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req * // DescribeExportTasks API operation for Amazon Elastic Compute Cloud. // -// Describes the specified export instance tasks or all your export instance +// Describes the specified export instance tasks or all of your export instance // tasks. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16111,7 +16793,7 @@ func (c *EC2) DescribeInstanceCreditSpecificationsRequest(input *DescribeInstanc // all, the call fails. If you specify only instance IDs in an unaffected zone, // the call works normally. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16194,6 +16876,81 @@ func (c *EC2) DescribeInstanceCreditSpecificationsPagesWithContext(ctx aws.Conte return p.Err() } +const opDescribeInstanceEventNotificationAttributes = "DescribeInstanceEventNotificationAttributes" + +// DescribeInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstanceEventNotificationAttributes for more information on using the DescribeInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstanceEventNotificationAttributesRequest method. +// req, resp := client.DescribeInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventNotificationAttributes +func (c *EC2) DescribeInstanceEventNotificationAttributesRequest(input *DescribeInstanceEventNotificationAttributesInput) (req *request.Request, output *DescribeInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opDescribeInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeInstanceEventNotificationAttributesInput{} + } + + output = &DescribeInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Describes the tag keys that are registered to appear in scheduled event notifications +// for resources in the current Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeInstanceEventNotificationAttributes +func (c *EC2) DescribeInstanceEventNotificationAttributes(input *DescribeInstanceEventNotificationAttributesInput) (*DescribeInstanceEventNotificationAttributesOutput, error) { + req, out := c.DescribeInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// DescribeInstanceEventNotificationAttributesWithContext is the same as DescribeInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *DescribeInstanceEventNotificationAttributesInput, opts ...request.Option) (*DescribeInstanceEventNotificationAttributesOutput, error) { + req, out := c.DescribeInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeInstanceStatus = "DescribeInstanceStatus" // DescribeInstanceStatusRequest generates a "aws/request.Request" representing the @@ -16252,18 +17009,18 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // // * Status checks - Amazon EC2 performs status checks on running EC2 instances // to identify hardware and software issues. For more information, see Status -// Checks for Your Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) -// and Troubleshooting Instances with Failed Status Checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) +// checks for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) +// and Troubleshooting instances with failed status checks (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) // in the Amazon Elastic Compute Cloud User Guide. // // * Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, // or terminate) for your instances related to hardware issues, software -// updates, or system maintenance. For more information, see Scheduled Events -// for Your Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) +// updates, or system maintenance. For more information, see Scheduled events +// for your instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) // in the Amazon Elastic Compute Cloud User Guide. // // * Instance state - You can manage your instances from the moment you launch -// them through their termination. For more information, see Instance Lifecycle +// them through their termination. For more information, see Instance lifecycle // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -16531,7 +17288,7 @@ func (c *EC2) DescribeInstanceTypesRequest(input *DescribeInstanceTypesInput) (r // DescribeInstanceTypes API operation for Amazon Elastic Compute Cloud. // -// Returns a list of all instance types offered in your current AWS Region. +// Describes the details of the instance types that are offered in a location. // The results can be filtered by the attributes of the instance types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -16664,13 +17421,17 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ // DescribeInstances API operation for Amazon Elastic Compute Cloud. // -// Describes the specified instances or all of AWS account's instances. +// Describes the specified instances or all instances. // -// If you specify one or more instance IDs, Amazon EC2 returns information for -// those instances. If you do not specify instance IDs, Amazon EC2 returns information -// for all relevant instances. If you specify an instance ID that is not valid, -// an error is returned. If you specify an instance that you do not own, it -// is not included in the returned results. +// If you specify instance IDs, the output includes information for only the +// specified instances. If you specify filters, the output includes information +// for only those instances that meet the filter criteria. If you do not specify +// instance IDs or filters, the output includes information for all instances, +// which can affect performance. We recommend that you use pagination to ensure +// that the operation returns quickly and successfully. +// +// If you specify an instance ID that is not valid, an error is returned. If +// you specify an instance that you do not own, it is not included in the output. // // Recently terminated instances might appear in the returned results. This // interval is usually less than one hour. @@ -17153,7 +17914,9 @@ func (c *EC2) DescribeLaunchTemplateVersionsRequest(input *DescribeLaunchTemplat // DescribeLaunchTemplateVersions API operation for Amazon Elastic Compute Cloud. // // Describes one or more versions of a specified launch template. You can describe -// all versions, individual versions, or a range of versions. +// all versions, individual versions, or a range of versions. You can also describe +// all the latest versions or all the default versions of all the launch templates +// in your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -18163,6 +18926,140 @@ func (c *EC2) DescribeLocalGatewaysPagesWithContext(ctx aws.Context, input *Desc return p.Err() } +const opDescribeManagedPrefixLists = "DescribeManagedPrefixLists" + +// DescribeManagedPrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeManagedPrefixLists operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeManagedPrefixLists for more information on using the DescribeManagedPrefixLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeManagedPrefixListsRequest method. +// req, resp := client.DescribeManagedPrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixListsRequest(input *DescribeManagedPrefixListsInput) (req *request.Request, output *DescribeManagedPrefixListsOutput) { + op := &request.Operation{ + Name: opDescribeManagedPrefixLists, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeManagedPrefixListsInput{} + } + + output = &DescribeManagedPrefixListsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeManagedPrefixLists API operation for Amazon Elastic Compute Cloud. +// +// Describes your managed prefix lists and any AWS-managed prefix lists. +// +// To view the entries for your prefix list, use GetManagedPrefixListEntries. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation DescribeManagedPrefixLists for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeManagedPrefixLists +func (c *EC2) DescribeManagedPrefixLists(input *DescribeManagedPrefixListsInput) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + return out, req.Send() +} + +// DescribeManagedPrefixListsWithContext is the same as DescribeManagedPrefixLists with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeManagedPrefixLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, opts ...request.Option) (*DescribeManagedPrefixListsOutput, error) { + req, out := c.DescribeManagedPrefixListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeManagedPrefixListsPages iterates over the pages of a DescribeManagedPrefixLists operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeManagedPrefixLists method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeManagedPrefixLists operation. +// pageNum := 0 +// err := client.DescribeManagedPrefixListsPages(params, +// func(page *ec2.DescribeManagedPrefixListsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeManagedPrefixListsPages(input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool) error { + return c.DescribeManagedPrefixListsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeManagedPrefixListsPagesWithContext same as DescribeManagedPrefixListsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) DescribeManagedPrefixListsPagesWithContext(ctx aws.Context, input *DescribeManagedPrefixListsInput, fn func(*DescribeManagedPrefixListsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeManagedPrefixListsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeManagedPrefixListsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeManagedPrefixListsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMovingAddresses = "DescribeMovingAddresses" // DescribeMovingAddressesRequest generates a "aws/request.Request" representing the @@ -18948,7 +19845,7 @@ func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput // DescribePlacementGroups API operation for Amazon Elastic Compute Cloud. // // Describes the specified placement groups or all of your placement groups. -// For more information, see Placement Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) +// For more information, see Placement groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -19031,10 +19928,9 @@ func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req * // // Describes available AWS services in a prefix list format, which includes // the prefix list name and prefix list ID of the service and the IP address -// range for the service. A prefix list ID is required for creating an outbound -// security group rule that allows traffic from a VPC to access an AWS service -// through a gateway VPC endpoint. Currently, the services that support this -// action are Amazon S3 and Amazon DynamoDB. +// range for the service. +// +// We recommend that you use DescribeManagedPrefixLists instead. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -20721,12 +21617,12 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // (if you own the snapshots), self for snapshots for which you own or have // explicit permissions, or all for public snapshots. // -// If you are describing a long list of snapshots, you can paginate the output -// to make the list more manageable. The MaxResults parameter sets the maximum -// number of results returned in a single page. If the list of results exceeds -// your MaxResults value, then that number of results is returned along with -// a NextToken value that can be passed to a subsequent DescribeSnapshots request -// to retrieve the remaining results. +// If you are describing a long list of snapshots, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeSnapshots +// request to retrieve the remaining results. // // To get the state of fast snapshot restores for a snapshot, use DescribeFastSnapshotRestores. // @@ -20858,7 +21754,7 @@ func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafee // DescribeSpotDatafeedSubscription API operation for Amazon Elastic Compute Cloud. // // Describes the data feed for Spot Instances. For more information, see Spot -// Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) +// Instance data feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -21232,8 +22128,8 @@ func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceReq // You can use DescribeSpotInstanceRequests to find a running Spot Instance // by examining the response. If the status of the Spot Instance is fulfilled, // the instance ID appears in the response and contains the identifier of the -// instance. Alternatively, you can use DescribeInstances with a filter to look -// for instances where the instance lifecycle is spot. +// instance. Alternatively, you can use DescribeInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances) +// with a filter to look for instances where the instance lifecycle is spot. // // We recommend that you set MaxResults to a value between 5 and 1000 to limit // the number of results returned. This paginates the output, which makes the @@ -21376,7 +22272,7 @@ func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInp // DescribeSpotPriceHistory API operation for Amazon Elastic Compute Cloud. // // Describes the Spot price history. For more information, see Spot Instance -// Pricing History (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) +// pricing history (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances-history.html) // in the Amazon EC2 User Guide for Linux Instances. // // When you specify a start and end time, this operation returns the prices @@ -23209,7 +24105,7 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req // status of the volume is ok. If the check fails, the overall status is impaired. // If the status is insufficient-data, then the checks may still be taking place // on your volume at the time. We recommend that you retry the request. For -// more information about volume status, see Monitoring the Status of Your Volumes +// more information about volume status, see Monitoring the status of your volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -23361,12 +24257,12 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request. // // Describes the specified EBS volumes or all of your EBS volumes. // -// If you are describing a long list of volumes, you can paginate the output -// to make the list more manageable. The MaxResults parameter sets the maximum -// number of results returned in a single page. If the list of results exceeds -// your MaxResults value, then that number of results is returned along with -// a NextToken value that can be passed to a subsequent DescribeVolumes request -// to retrieve the remaining results. +// If you are describing a long list of volumes, we recommend that you paginate +// the output to make the list more manageable. The MaxResults parameter sets +// the maximum number of results returned in a single page. If the list of results +// exceeds your MaxResults value, then that number of results is returned along +// with a NextToken value that can be passed to a subsequent DescribeVolumes +// request to retrieve the remaining results. // // For more information about EBS volumes, see Amazon EBS Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -23501,19 +24397,17 @@ func (c *EC2) DescribeVolumesModificationsRequest(input *DescribeVolumesModifica // DescribeVolumesModifications API operation for Amazon Elastic Compute Cloud. // -// Reports the current modification status of EBS volumes. +// Describes the most recent volume modification request for the specified EBS +// volumes. // -// Current-generation EBS volumes support modification of attributes including -// type, size, and (for io1 volumes) IOPS provisioning while either attached -// to or detached from an instance. Following an action from the API or the -// console to modify a volume, the status of the modification may be modifying, -// optimizing, completed, or failed. If a volume has never been modified, then -// certain elements of the returned VolumeModification objects are null. +// If a volume has never been modified, some information in the output will +// be null. If a volume has been modified more than once, the output includes +// only the most recent modification request. // // You can also use CloudWatch Events to check the status of a modification // to an EBS volume. For information about CloudWatch Events, see the Amazon // CloudWatch Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). -// For more information, see Monitoring Volume Modifications" (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) +// For more information, see Monitoring volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -25322,7 +26216,7 @@ func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Reques // When a volume with an AWS Marketplace product code is detached from an instance, // the product code is no longer associated with the instance. // -// For more information, see Detaching an Amazon EBS Volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) +// For more information, see Detaching an Amazon EBS volume (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -25869,6 +26763,8 @@ func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLin // ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You must specify a VPC ID in the request. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -26185,7 +27081,7 @@ func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) // DisassociateRouteTable API operation for Amazon Elastic Compute Cloud. // -// Disassociates a subnet from a route table. +// Disassociates a subnet or gateway from a route table. // // After you perform this action, the subnet no longer uses the routes in the // route table. Instead, it uses the routes in the VPC's main route table. For @@ -26584,7 +27480,7 @@ func (c *EC2) EnableEbsEncryptionByDefaultRequest(input *EnableEbsEncryptionByDe // // After you enable encryption by default, you can no longer launch instances // using instance types that do not support encryption. For more information, -// see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). +// see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -26665,7 +27561,7 @@ func (c *EC2) EnableFastSnapshotRestoresRequest(input *EnableFastSnapshotRestore // state. To get the current state of fast snapshot restores, use DescribeFastSnapshotRestores. // To disable fast snapshot restores, use DisableFastSnapshotRestores. // -// For more information, see Amazon EBS Fast Snapshot Restore (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-fast-snapshot-restore.html) +// For more information, see Amazon EBS fast snapshot restore (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-fast-snapshot-restore.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -27055,6 +27951,8 @@ func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkD // see ClassicLink (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) // in the Amazon Elastic Compute Cloud User Guide. // +// You must specify a VPC ID in the request. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -27889,7 +28787,7 @@ func (c *EC2) GetDefaultCreditSpecificationRequest(input *GetDefaultCreditSpecif // Describes the default credit option for CPU usage of a burstable performance // instance family. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -28077,6 +28975,138 @@ func (c *EC2) GetEbsEncryptionByDefaultWithContext(ctx aws.Context, input *GetEb return out, req.Send() } +const opGetGroupsForCapacityReservation = "GetGroupsForCapacityReservation" + +// GetGroupsForCapacityReservationRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupsForCapacityReservation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGroupsForCapacityReservation for more information on using the GetGroupsForCapacityReservation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGroupsForCapacityReservationRequest method. +// req, resp := client.GetGroupsForCapacityReservationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetGroupsForCapacityReservation +func (c *EC2) GetGroupsForCapacityReservationRequest(input *GetGroupsForCapacityReservationInput) (req *request.Request, output *GetGroupsForCapacityReservationOutput) { + op := &request.Operation{ + Name: opGetGroupsForCapacityReservation, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetGroupsForCapacityReservationInput{} + } + + output = &GetGroupsForCapacityReservationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGroupsForCapacityReservation API operation for Amazon Elastic Compute Cloud. +// +// Lists the resource groups to which a Capacity Reservation has been added. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetGroupsForCapacityReservation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetGroupsForCapacityReservation +func (c *EC2) GetGroupsForCapacityReservation(input *GetGroupsForCapacityReservationInput) (*GetGroupsForCapacityReservationOutput, error) { + req, out := c.GetGroupsForCapacityReservationRequest(input) + return out, req.Send() +} + +// GetGroupsForCapacityReservationWithContext is the same as GetGroupsForCapacityReservation with the addition of +// the ability to pass a context and additional request options. +// +// See GetGroupsForCapacityReservation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetGroupsForCapacityReservationWithContext(ctx aws.Context, input *GetGroupsForCapacityReservationInput, opts ...request.Option) (*GetGroupsForCapacityReservationOutput, error) { + req, out := c.GetGroupsForCapacityReservationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetGroupsForCapacityReservationPages iterates over the pages of a GetGroupsForCapacityReservation operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetGroupsForCapacityReservation method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetGroupsForCapacityReservation operation. +// pageNum := 0 +// err := client.GetGroupsForCapacityReservationPages(params, +// func(page *ec2.GetGroupsForCapacityReservationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetGroupsForCapacityReservationPages(input *GetGroupsForCapacityReservationInput, fn func(*GetGroupsForCapacityReservationOutput, bool) bool) error { + return c.GetGroupsForCapacityReservationPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetGroupsForCapacityReservationPagesWithContext same as GetGroupsForCapacityReservationPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetGroupsForCapacityReservationPagesWithContext(ctx aws.Context, input *GetGroupsForCapacityReservationInput, fn func(*GetGroupsForCapacityReservationOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetGroupsForCapacityReservationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetGroupsForCapacityReservationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetGroupsForCapacityReservationOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetHostReservationPurchasePreview = "GetHostReservationPurchasePreview" // GetHostReservationPurchasePreviewRequest generates a "aws/request.Request" representing the @@ -28203,6 +29233,12 @@ func (c *EC2) GetLaunchTemplateDataRequest(input *GetLaunchTemplateDataInput) (r // Retrieves the configuration data of the specified instance. You can use this // data to create a launch template. // +// This action calls on other describe actions to get instance information. +// Depending on your instance configuration, you may need to allow the following +// actions in your IAM policy: DescribeSpotInstanceRequests, DescribeInstanceCreditSpecifications, +// DescribeVolumes, DescribeInstanceAttribute, and DescribeElasticGpus. Or, +// you can allow describe* depending on your instance requirements. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -28231,6 +29267,271 @@ func (c *EC2) GetLaunchTemplateDataWithContext(ctx aws.Context, input *GetLaunch return out, req.Send() } +const opGetManagedPrefixListAssociations = "GetManagedPrefixListAssociations" + +// GetManagedPrefixListAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListAssociations for more information on using the GetManagedPrefixListAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListAssociationsRequest method. +// req, resp := client.GetManagedPrefixListAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociationsRequest(input *GetManagedPrefixListAssociationsInput) (req *request.Request, output *GetManagedPrefixListAssociationsOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListAssociationsInput{} + } + + output = &GetManagedPrefixListAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListAssociations API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the resources that are associated with the specified +// managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListAssociations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListAssociations +func (c *EC2) GetManagedPrefixListAssociations(input *GetManagedPrefixListAssociationsInput) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsWithContext is the same as GetManagedPrefixListAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, opts ...request.Option) (*GetManagedPrefixListAssociationsOutput, error) { + req, out := c.GetManagedPrefixListAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListAssociationsPages iterates over the pages of a GetManagedPrefixListAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListAssociations operation. +// pageNum := 0 +// err := client.GetManagedPrefixListAssociationsPages(params, +// func(page *ec2.GetManagedPrefixListAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListAssociationsPages(input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool) error { + return c.GetManagedPrefixListAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListAssociationsPagesWithContext same as GetManagedPrefixListAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListAssociationsPagesWithContext(ctx aws.Context, input *GetManagedPrefixListAssociationsInput, fn func(*GetManagedPrefixListAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetManagedPrefixListEntries = "GetManagedPrefixListEntries" + +// GetManagedPrefixListEntriesRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedPrefixListEntries operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedPrefixListEntries for more information on using the GetManagedPrefixListEntries +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedPrefixListEntriesRequest method. +// req, resp := client.GetManagedPrefixListEntriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntriesRequest(input *GetManagedPrefixListEntriesInput) (req *request.Request, output *GetManagedPrefixListEntriesOutput) { + op := &request.Operation{ + Name: opGetManagedPrefixListEntries, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetManagedPrefixListEntriesInput{} + } + + output = &GetManagedPrefixListEntriesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedPrefixListEntries API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the entries for a specified managed prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetManagedPrefixListEntries for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetManagedPrefixListEntries +func (c *EC2) GetManagedPrefixListEntries(input *GetManagedPrefixListEntriesInput) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + return out, req.Send() +} + +// GetManagedPrefixListEntriesWithContext is the same as GetManagedPrefixListEntries with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedPrefixListEntries for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, opts ...request.Option) (*GetManagedPrefixListEntriesOutput, error) { + req, out := c.GetManagedPrefixListEntriesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetManagedPrefixListEntriesPages iterates over the pages of a GetManagedPrefixListEntries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetManagedPrefixListEntries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetManagedPrefixListEntries operation. +// pageNum := 0 +// err := client.GetManagedPrefixListEntriesPages(params, +// func(page *ec2.GetManagedPrefixListEntriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetManagedPrefixListEntriesPages(input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool) error { + return c.GetManagedPrefixListEntriesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetManagedPrefixListEntriesPagesWithContext same as GetManagedPrefixListEntriesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetManagedPrefixListEntriesPagesWithContext(ctx aws.Context, input *GetManagedPrefixListEntriesInput, fn func(*GetManagedPrefixListEntriesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetManagedPrefixListEntriesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetManagedPrefixListEntriesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetManagedPrefixListEntriesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetPasswordData = "GetPasswordData" // GetPasswordDataRequest generates a "aws/request.Request" representing the @@ -28665,6 +29966,139 @@ func (c *EC2) GetTransitGatewayMulticastDomainAssociationsPagesWithContext(ctx a return p.Err() } +const opGetTransitGatewayPrefixListReferences = "GetTransitGatewayPrefixListReferences" + +// GetTransitGatewayPrefixListReferencesRequest generates a "aws/request.Request" representing the +// client's request for the GetTransitGatewayPrefixListReferences operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetTransitGatewayPrefixListReferences for more information on using the GetTransitGatewayPrefixListReferences +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetTransitGatewayPrefixListReferencesRequest method. +// req, resp := client.GetTransitGatewayPrefixListReferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayPrefixListReferences +func (c *EC2) GetTransitGatewayPrefixListReferencesRequest(input *GetTransitGatewayPrefixListReferencesInput) (req *request.Request, output *GetTransitGatewayPrefixListReferencesOutput) { + op := &request.Operation{ + Name: opGetTransitGatewayPrefixListReferences, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetTransitGatewayPrefixListReferencesInput{} + } + + output = &GetTransitGatewayPrefixListReferencesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetTransitGatewayPrefixListReferences API operation for Amazon Elastic Compute Cloud. +// +// Gets information about the prefix list references in a specified transit +// gateway route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation GetTransitGatewayPrefixListReferences for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetTransitGatewayPrefixListReferences +func (c *EC2) GetTransitGatewayPrefixListReferences(input *GetTransitGatewayPrefixListReferencesInput) (*GetTransitGatewayPrefixListReferencesOutput, error) { + req, out := c.GetTransitGatewayPrefixListReferencesRequest(input) + return out, req.Send() +} + +// GetTransitGatewayPrefixListReferencesWithContext is the same as GetTransitGatewayPrefixListReferences with the addition of +// the ability to pass a context and additional request options. +// +// See GetTransitGatewayPrefixListReferences for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayPrefixListReferencesWithContext(ctx aws.Context, input *GetTransitGatewayPrefixListReferencesInput, opts ...request.Option) (*GetTransitGatewayPrefixListReferencesOutput, error) { + req, out := c.GetTransitGatewayPrefixListReferencesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetTransitGatewayPrefixListReferencesPages iterates over the pages of a GetTransitGatewayPrefixListReferences operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetTransitGatewayPrefixListReferences method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetTransitGatewayPrefixListReferences operation. +// pageNum := 0 +// err := client.GetTransitGatewayPrefixListReferencesPages(params, +// func(page *ec2.GetTransitGatewayPrefixListReferencesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) GetTransitGatewayPrefixListReferencesPages(input *GetTransitGatewayPrefixListReferencesInput, fn func(*GetTransitGatewayPrefixListReferencesOutput, bool) bool) error { + return c.GetTransitGatewayPrefixListReferencesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetTransitGatewayPrefixListReferencesPagesWithContext same as GetTransitGatewayPrefixListReferencesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) GetTransitGatewayPrefixListReferencesPagesWithContext(ctx aws.Context, input *GetTransitGatewayPrefixListReferencesInput, fn func(*GetTransitGatewayPrefixListReferencesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetTransitGatewayPrefixListReferencesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetTransitGatewayPrefixListReferencesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetTransitGatewayPrefixListReferencesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetTransitGatewayRouteTableAssociations = "GetTransitGatewayRouteTableAssociations" // GetTransitGatewayRouteTableAssociationsRequest generates a "aws/request.Request" representing the @@ -29443,9 +30877,10 @@ func (c *EC2) ModifyAvailabilityZoneGroupRequest(input *ModifyAvailabilityZoneGr // ModifyAvailabilityZoneGroup API operation for Amazon Elastic Compute Cloud. // -// Enables or disables an Availability Zone group for your account. +// Changes the opt-in status of the Local Zone and Wavelength Zone group for +// your account. // -// Use describe-availability-zones (https://docs.aws.amazon.com/AWSEC2ApiDocReef/build/server-root/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) +// Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) // to view the value for GroupName. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -29687,7 +31122,7 @@ func (c *EC2) ModifyDefaultCreditSpecificationRequest(input *ModifyDefaultCredit // can call GetDefaultCreditSpecification and check DefaultCreditSpecification // for updates. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30399,7 +31834,7 @@ func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput // we recommend that you use the ModifyNetworkInterfaceAttribute action. // // To modify some attributes, the instance must be stopped. For more information, -// see Modifying Attributes of a Stopped Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) +// see Modifying attributes of a stopped instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_ChangingAttributesWhileInstanceStopped.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30554,7 +31989,7 @@ func (c *EC2) ModifyInstanceCreditSpecificationRequest(input *ModifyInstanceCred // Modifies the credit option for CPU usage on a running or stopped burstable // performance instance. The credit options are standard and unlimited. // -// For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) +// For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -30709,7 +32144,7 @@ func (c *EC2) ModifyInstanceMetadataOptionsRequest(input *ModifyInstanceMetadata // the API responds with a state of “pending”. After the parameter modifications // are successfully applied to the instance, the state of the modifications // changes from “pending” to “applied” in subsequent describe-instances -// API calls. For more information, see Instance Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). +// API calls. For more information, see Instance metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -30910,6 +32345,86 @@ func (c *EC2) ModifyLaunchTemplateWithContext(ctx aws.Context, input *ModifyLaun return out, req.Send() } +const opModifyManagedPrefixList = "ModifyManagedPrefixList" + +// ModifyManagedPrefixListRequest generates a "aws/request.Request" representing the +// client's request for the ModifyManagedPrefixList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyManagedPrefixList for more information on using the ModifyManagedPrefixList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyManagedPrefixListRequest method. +// req, resp := client.ModifyManagedPrefixListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixListRequest(input *ModifyManagedPrefixListInput) (req *request.Request, output *ModifyManagedPrefixListOutput) { + op := &request.Operation{ + Name: opModifyManagedPrefixList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyManagedPrefixListInput{} + } + + output = &ModifyManagedPrefixListOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyManagedPrefixList API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified managed prefix list. +// +// Adding or removing entries in a prefix list creates a new version of the +// prefix list. Changing the name of the prefix list does not affect the version. +// +// If you specify a current version number that does not match the true current +// version number, the request fails. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyManagedPrefixList for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyManagedPrefixList +func (c *EC2) ModifyManagedPrefixList(input *ModifyManagedPrefixListInput) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + return out, req.Send() +} + +// ModifyManagedPrefixListWithContext is the same as ModifyManagedPrefixList with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyManagedPrefixList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyManagedPrefixListWithContext(ctx aws.Context, input *ModifyManagedPrefixListInput, opts ...request.Option) (*ModifyManagedPrefixListOutput, error) { + req, out := c.ModifyManagedPrefixListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" // ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the @@ -31122,7 +32637,7 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // be made public. Snapshots encrypted with your default CMK cannot be shared // with other accounts. // -// For more information about modifying snapshot permissions, see Sharing Snapshots +// For more information about modifying snapshot permissions, see Sharing snapshots // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -31567,6 +33082,157 @@ func (c *EC2) ModifyTrafficMirrorSessionWithContext(ctx aws.Context, input *Modi return out, req.Send() } +const opModifyTransitGateway = "ModifyTransitGateway" + +// ModifyTransitGatewayRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGateway operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGateway for more information on using the ModifyTransitGateway +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayRequest method. +// req, resp := client.ModifyTransitGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGateway +func (c *EC2) ModifyTransitGatewayRequest(input *ModifyTransitGatewayInput) (req *request.Request, output *ModifyTransitGatewayOutput) { + op := &request.Operation{ + Name: opModifyTransitGateway, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayInput{} + } + + output = &ModifyTransitGatewayOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGateway API operation for Amazon Elastic Compute Cloud. +// +// Modifies the specified transit gateway. When you modify a transit gateway, +// the modified options are applied to new transit gateway attachments only. +// Your existing transit gateway attachments are not modified. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGateway for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGateway +func (c *EC2) ModifyTransitGateway(input *ModifyTransitGatewayInput) (*ModifyTransitGatewayOutput, error) { + req, out := c.ModifyTransitGatewayRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayWithContext is the same as ModifyTransitGateway with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGateway for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayWithContext(ctx aws.Context, input *ModifyTransitGatewayInput, opts ...request.Option) (*ModifyTransitGatewayOutput, error) { + req, out := c.ModifyTransitGatewayRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyTransitGatewayPrefixListReference = "ModifyTransitGatewayPrefixListReference" + +// ModifyTransitGatewayPrefixListReferenceRequest generates a "aws/request.Request" representing the +// client's request for the ModifyTransitGatewayPrefixListReference operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyTransitGatewayPrefixListReference for more information on using the ModifyTransitGatewayPrefixListReference +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyTransitGatewayPrefixListReferenceRequest method. +// req, resp := client.ModifyTransitGatewayPrefixListReferenceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayPrefixListReference +func (c *EC2) ModifyTransitGatewayPrefixListReferenceRequest(input *ModifyTransitGatewayPrefixListReferenceInput) (req *request.Request, output *ModifyTransitGatewayPrefixListReferenceOutput) { + op := &request.Operation{ + Name: opModifyTransitGatewayPrefixListReference, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyTransitGatewayPrefixListReferenceInput{} + } + + output = &ModifyTransitGatewayPrefixListReferenceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyTransitGatewayPrefixListReference API operation for Amazon Elastic Compute Cloud. +// +// Modifies a reference (route) to a prefix list in a specified transit gateway +// route table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyTransitGatewayPrefixListReference for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyTransitGatewayPrefixListReference +func (c *EC2) ModifyTransitGatewayPrefixListReference(input *ModifyTransitGatewayPrefixListReferenceInput) (*ModifyTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.ModifyTransitGatewayPrefixListReferenceRequest(input) + return out, req.Send() +} + +// ModifyTransitGatewayPrefixListReferenceWithContext is the same as ModifyTransitGatewayPrefixListReference with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyTransitGatewayPrefixListReference for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyTransitGatewayPrefixListReferenceWithContext(ctx aws.Context, input *ModifyTransitGatewayPrefixListReferenceInput, opts ...request.Option) (*ModifyTransitGatewayPrefixListReferenceOutput, error) { + req, out := c.ModifyTransitGatewayPrefixListReferenceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyTransitGatewayVpcAttachment = "ModifyTransitGatewayVpcAttachment" // ModifyTransitGatewayVpcAttachmentRequest generates a "aws/request.Request" representing the @@ -31689,30 +33355,30 @@ func (c *EC2) ModifyVolumeRequest(input *ModifyVolumeInput) (req *request.Reques // size, volume type, and IOPS capacity. If your EBS volume is attached to a // current-generation EC2 instance type, you may be able to apply these changes // without stopping the instance or detaching the volume from it. For more information -// about modifying an EBS volume running Linux, see Modifying the Size, IOPS, -// or Type of an EBS Volume on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). +// about modifying an EBS volume running Linux, see Modifying the size, IOPS, +// or type of an EBS volume on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html). // For more information about modifying an EBS volume running Windows, see Modifying -// the Size, IOPS, or Type of an EBS Volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// the size, IOPS, or type of an EBS volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). // // When you complete a resize operation on your volume, you need to extend the // volume's file-system size to take advantage of the new storage capacity. // For information about extending a Linux file system, see Extending a Linux -// File System (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux). +// file system (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#recognize-expanded-volume-linux). // For information about extending a Windows file system, see Extending a Windows -// File System (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows). +// file system (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html#recognize-expanded-volume-windows). // // You can use CloudWatch Events to check the status of a modification to an // EBS volume. For information about CloudWatch Events, see the Amazon CloudWatch // Events User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/). // You can also track the status of a modification using DescribeVolumesModifications. // For information about tracking status changes using either method, see Monitoring -// Volume Modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). +// volume modifications (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html#monitoring_mods). // // With previous-generation instance types, resizing an EBS volume may require // detaching and reattaching the volume or stopping and restarting the instance. -// For more information, see Modifying the Size, IOPS, or Type of an EBS Volume +// For more information, see Modifying the size, IOPS, or type of an EBS volume // on Linux (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-expand-volume.html) -// and Modifying the Size, IOPS, or Type of an EBS Volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). +// and Modifying the size, IOPS, or type of an EBS volume on Windows (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ebs-expand-volume.html). // // If you reach the maximum volume modification rate per volume limit, you will // need to wait at least six hours before applying further modifications to @@ -32445,8 +34111,9 @@ func (c *EC2) ModifyVpnConnectionRequest(input *ModifyVpnConnectionInput) (req * // ModifyVpnConnection API operation for Amazon Elastic Compute Cloud. // -// Modifies the target gateway of an AWS Site-to-Site VPN connection. The following -// migration options are available: +// Modifies the customer gateway or the target gateway of an AWS Site-to-Site +// VPN connection. To modify the target gateway, the following migration options +// are available: // // * An existing virtual private gateway to a new virtual private gateway // @@ -32514,6 +34181,85 @@ func (c *EC2) ModifyVpnConnectionWithContext(ctx aws.Context, input *ModifyVpnCo return out, req.Send() } +const opModifyVpnConnectionOptions = "ModifyVpnConnectionOptions" + +// ModifyVpnConnectionOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpnConnectionOptions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyVpnConnectionOptions for more information on using the ModifyVpnConnectionOptions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyVpnConnectionOptionsRequest method. +// req, resp := client.ModifyVpnConnectionOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnectionOptions +func (c *EC2) ModifyVpnConnectionOptionsRequest(input *ModifyVpnConnectionOptionsInput) (req *request.Request, output *ModifyVpnConnectionOptionsOutput) { + op := &request.Operation{ + Name: opModifyVpnConnectionOptions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyVpnConnectionOptionsInput{} + } + + output = &ModifyVpnConnectionOptionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyVpnConnectionOptions API operation for Amazon Elastic Compute Cloud. +// +// Modifies the connection options for your Site-to-Site VPN connection. +// +// When you modify the VPN connection options, the VPN endpoint IP addresses +// on the AWS side do not change, and the tunnel options do not change. Your +// VPN connection will be temporarily unavailable for a brief period while the +// VPN connection is updated. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation ModifyVpnConnectionOptions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/ModifyVpnConnectionOptions +func (c *EC2) ModifyVpnConnectionOptions(input *ModifyVpnConnectionOptionsInput) (*ModifyVpnConnectionOptionsOutput, error) { + req, out := c.ModifyVpnConnectionOptionsRequest(input) + return out, req.Send() +} + +// ModifyVpnConnectionOptionsWithContext is the same as ModifyVpnConnectionOptions with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyVpnConnectionOptions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) ModifyVpnConnectionOptionsWithContext(ctx aws.Context, input *ModifyVpnConnectionOptionsInput, opts ...request.Option) (*ModifyVpnConnectionOptionsOutput, error) { + req, out := c.ModifyVpnConnectionOptionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opModifyVpnTunnelCertificate = "ModifyVpnTunnelCertificate" // ModifyVpnTunnelCertificateRequest generates a "aws/request.Request" representing the @@ -32711,7 +34457,7 @@ func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *reques // MonitorInstances API operation for Amazon Elastic Compute Cloud. // // Enables detailed monitoring for a running instance. Otherwise, basic monitoring -// is enabled. For more information, see Monitoring Your Instances and Volumes +// is enabled. For more information, see Monitoring your instances and volumes // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -33215,8 +34961,8 @@ func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request. // If an instance does not cleanly shut down within four minutes, Amazon EC2 // performs a hard reboot. // -// For more information about troubleshooting, see Getting Console Output and -// Rebooting Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) +// For more information about troubleshooting, see Getting console output and +// rebooting instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-console.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -33293,7 +35039,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // Registers an AMI. When you're creating an AMI, this is the final step you // must complete before you can launch an instance from the AMI. For more information -// about creating AMIs, see Creating Your Own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) +// about creating AMIs, see Creating your own AMIs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) // in the Amazon Elastic Compute Cloud User Guide. // // For Amazon EBS-backed instances, CreateImage creates and registers the AMI @@ -33301,12 +35047,12 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // // You can also use RegisterImage to create an Amazon EBS-backed Linux AMI from // a snapshot of a root device volume. You specify the snapshot using the block -// device mapping. For more information, see Launching a Linux Instance from -// a Backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) +// device mapping. For more information, see Launching a Linux instance from +// a backup (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-launch-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. // -// You can't register an image where a secondary (non-root) snapshot has AWS -// Marketplace product codes. +// If any snapshots have AWS Marketplace product codes, they are copied to the +// new AMI. // // Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) // and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code @@ -33327,7 +35073,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // a Reserved Instance without the matching billing product code, the Reserved // Instance will not be applied to the On-Demand Instance. For information about // how to obtain the platform details and billing information of an AMI, see -// Obtaining Billing Information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) +// Obtaining billing information (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ami-billing-info.html) // in the Amazon Elastic Compute Cloud User Guide. // // If needed, you can deregister an AMI at any time. Any modifications you make @@ -33363,6 +35109,83 @@ func (c *EC2) RegisterImageWithContext(ctx aws.Context, input *RegisterImageInpu return out, req.Send() } +const opRegisterInstanceEventNotificationAttributes = "RegisterInstanceEventNotificationAttributes" + +// RegisterInstanceEventNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstanceEventNotificationAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterInstanceEventNotificationAttributes for more information on using the RegisterInstanceEventNotificationAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterInstanceEventNotificationAttributesRequest method. +// req, resp := client.RegisterInstanceEventNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterInstanceEventNotificationAttributes +func (c *EC2) RegisterInstanceEventNotificationAttributesRequest(input *RegisterInstanceEventNotificationAttributesInput) (req *request.Request, output *RegisterInstanceEventNotificationAttributesOutput) { + op := &request.Operation{ + Name: opRegisterInstanceEventNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterInstanceEventNotificationAttributesInput{} + } + + output = &RegisterInstanceEventNotificationAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterInstanceEventNotificationAttributes API operation for Amazon Elastic Compute Cloud. +// +// Registers a set of tag keys to include in scheduled event notifications for +// your resources. +// +// To remove tags, use . +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RegisterInstanceEventNotificationAttributes for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RegisterInstanceEventNotificationAttributes +func (c *EC2) RegisterInstanceEventNotificationAttributes(input *RegisterInstanceEventNotificationAttributesInput) (*RegisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.RegisterInstanceEventNotificationAttributesRequest(input) + return out, req.Send() +} + +// RegisterInstanceEventNotificationAttributesWithContext is the same as RegisterInstanceEventNotificationAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterInstanceEventNotificationAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RegisterInstanceEventNotificationAttributesWithContext(ctx aws.Context, input *RegisterInstanceEventNotificationAttributesInput, opts ...request.Option) (*RegisterInstanceEventNotificationAttributesOutput, error) { + req, out := c.RegisterInstanceEventNotificationAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterTransitGatewayMulticastGroupMembers = "RegisterTransitGatewayMulticastGroupMembers" // RegisterTransitGatewayMulticastGroupMembersRequest generates a "aws/request.Request" representing the @@ -34628,7 +36451,7 @@ func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *reques // the fleet. You cannot tag other resource types in a Spot Fleet request because // only the spot-fleet-request and instance resource types are supported. // -// For more information, see Spot Fleet Requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) +// For more information, see Spot Fleet requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -34705,7 +36528,7 @@ func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req // // Creates a Spot Instance request. // -// For more information, see Spot Instance Requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) +// For more information, see Spot Instance requests (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html) // in the Amazon EC2 User Guide for Linux Instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -35174,7 +36997,7 @@ func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) // // Resets permission settings for the specified snapshot. // -// For more information about modifying snapshot permissions, see Sharing Snapshots +// For more information about modifying snapshot permissions, see Sharing snapshots // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. // @@ -35283,6 +37106,81 @@ func (c *EC2) RestoreAddressToClassicWithContext(ctx aws.Context, input *Restore return out, req.Send() } +const opRestoreManagedPrefixListVersion = "RestoreManagedPrefixListVersion" + +// RestoreManagedPrefixListVersionRequest generates a "aws/request.Request" representing the +// client's request for the RestoreManagedPrefixListVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreManagedPrefixListVersion for more information on using the RestoreManagedPrefixListVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreManagedPrefixListVersionRequest method. +// req, resp := client.RestoreManagedPrefixListVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersionRequest(input *RestoreManagedPrefixListVersionInput) (req *request.Request, output *RestoreManagedPrefixListVersionOutput) { + op := &request.Operation{ + Name: opRestoreManagedPrefixListVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreManagedPrefixListVersionInput{} + } + + output = &RestoreManagedPrefixListVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreManagedPrefixListVersion API operation for Amazon Elastic Compute Cloud. +// +// Restores the entries from a previous version of a managed prefix list to +// a new version of the prefix list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic Compute Cloud's +// API operation RestoreManagedPrefixListVersion for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/RestoreManagedPrefixListVersion +func (c *EC2) RestoreManagedPrefixListVersion(input *RestoreManagedPrefixListVersionInput) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + return out, req.Send() +} + +// RestoreManagedPrefixListVersionWithContext is the same as RestoreManagedPrefixListVersion with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreManagedPrefixListVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EC2) RestoreManagedPrefixListVersionWithContext(ctx aws.Context, input *RestoreManagedPrefixListVersionInput, opts ...request.Option) (*RestoreManagedPrefixListVersionOutput, error) { + req, out := c.RestoreManagedPrefixListVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRevokeClientVpnIngress = "RevokeClientVpnIngress" // RevokeClientVpnIngressRequest generates a "aws/request.Request" representing the @@ -35396,16 +37294,22 @@ func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressI output = &RevokeSecurityGroupEgressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // RevokeSecurityGroupEgress API operation for Amazon Elastic Compute Cloud. // // [VPC only] Removes the specified egress rules from a security group for EC2-VPC. -// This action doesn't apply to security groups for use in EC2-Classic. To remove -// a rule, the values that you specify (for example, ports) must match the existing -// rule's values exactly. +// This action does not apply to security groups for use in EC2-Classic. To +// remove a rule, the values that you specify (for example, ports) must match +// the existing rule's values exactly. +// +// [Default VPC] If the values you specify do not match the existing rule's +// values, no error is returned, and the output describes the security group +// rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the IPv4 or IPv6 CIDR range or source // security group. For the TCP and UDP protocols, you must also specify the @@ -35483,7 +37387,6 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres output = &RevokeSecurityGroupIngressOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(ec2query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -35493,9 +37396,12 @@ func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngres // the values that you specify (for example, ports) must match the existing // rule's values exactly. // -// [EC2-Classic only] If the values you specify do not match the existing rule's -// values, no error is returned. Use DescribeSecurityGroups to verify that the -// rule has been removed. +// [EC2-Classic , default VPC] If the values you specify do not match the existing +// rule's values, no error is returned, and the output describes the security +// group rules that were not revoked. +// +// AWS recommends that you use DescribeSecurityGroups to verify that the rule +// has been removed. // // Each rule consists of the protocol and the CIDR range or source security // group. For the TCP and UDP protocols, you must also specify the destination @@ -35593,17 +37499,17 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // // * Some instance types must be launched into a VPC. If you do not have // a default VPC, or if you do not specify a subnet ID, the request fails. -// For more information, see Instance Types Available Only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). +// For more information, see Instance types available only in a VPC (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-vpc.html#vpc-only-instance-types). // // * [EC2-VPC] All instances have a network interface with a primary private // IPv4 address. If you don't specify this address, we choose one from the // IPv4 range of your subnet. // // * Not all instance types support IPv6 addresses. For more information, -// see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). +// see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // // * If you don't specify a security group ID, we use the default security -// group. For more information, see Security Groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). +// group. For more information, see Security groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). // // * If any of the AMIs have a product code attached for which the user has // not subscribed, the request fails. @@ -35620,17 +37526,17 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // An instance is ready for you to use when it's in the running state. You can // check the state of your instance using DescribeInstances. You can tag instances // and EBS volumes during launch, after launch, or both. For more information, -// see CreateTags and Tagging Your Amazon EC2 Resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// see CreateTags and Tagging your Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). // // Linux instances have access to the public key of the key pair at boot. You // can use this key to provide secure access to the instance. Amazon EC2 public // images use this feature to provide secure access without passwords. For more -// information, see Key Pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) +// information, see Key pairs (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For troubleshooting, see What To Do If An Instance Immediately Terminates +// For troubleshooting, see What to do if an instance immediately terminates // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_InstanceStraightToTerminated.html), -// and Troubleshooting Connecting to Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) +// and Troubleshooting connecting to your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesConnecting.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36143,8 +38049,8 @@ func (c *EC2) SendDiagnosticInterruptRequest(input *SendDiagnosticInterruptInput // system is configured to perform the required diagnostic tasks. // // For more information about configuring your operating system to generate -// a crash dump when a kernel panic or stop error occurs, see Send a Diagnostic -// Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) +// a crash dump when a kernel panic or stop error occurs, see Send a diagnostic +// interrupt (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/diagnostic-interrupt.html) // (Linux instances) or Send a Diagnostic Interrupt (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/diagnostic-interrupt.html) // (Windows instances). // @@ -36241,7 +38147,7 @@ func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Re // Performing this operation on an instance that uses an instance store as its // root device returns an error. // -// For more information, see Stopping Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) +// For more information, see Stopping instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Stop_Start.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36404,7 +38310,7 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // You can use the Stop action to hibernate an instance if the instance is enabled // for hibernation (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#enabling-hibernation) // and it meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. // // We don't charge usage for a stopped instance, or data transfer fees; however, @@ -36420,7 +38326,7 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // You can't stop or hibernate instance store-backed instances. You can't use // the Stop action to hibernate Spot Instances, but you can specify that Amazon // EC2 should hibernate Spot Instances when they are interrupted. For more information, -// see Hibernating Interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) +// see Hibernating interrupted Spot Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-interruptions.html#hibernate-spot-instances) // in the Amazon Elastic Compute Cloud User Guide. // // When you stop or hibernate an instance, we shut it down. You can restart @@ -36436,13 +38342,13 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // an instance, the root device and any other devices attached during the instance // launch are automatically deleted. For more information about the differences // between rebooting, stopping, hibernating, and terminating instances, see -// Instance Lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // // When you stop an instance, we attempt to shut it down forcibly after a short // while. If your instance appears stuck in the stopping state after a period // of time, there may be an issue with the underlying host computer. For more -// information, see Troubleshooting Stopping Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// information, see Troubleshooting stopping your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36612,11 +38518,11 @@ func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *re // an instance, any attached EBS volumes with the DeleteOnTermination block // device mapping parameter set to true are automatically deleted. For more // information about the differences between stopping and terminating instances, -// see Instance Lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) +// see Instance lifecycle (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information about troubleshooting, see Troubleshooting Terminating -// Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) +// For more information about troubleshooting, see Troubleshooting terminating +// your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesShuttingDown.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -36841,7 +38747,7 @@ func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *re // UnmonitorInstances API operation for Amazon Elastic Compute Cloud. // // Disables detailed monitoring for a running instance. For more information, -// see Monitoring Your Instances and Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) +// see Monitoring your instances and volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-cloudwatch.html) // in the Amazon Elastic Compute Cloud User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -37610,7 +39516,57 @@ func (s *ActiveInstance) SetSpotInstanceRequestId(v string) *ActiveInstance { return s } -// Describes an Elastic IP address. +// An entry for a prefix list. +type AddPrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` + + // A description for the entry. + // + // Constraints: Up to 255 characters in length. + Description *string `type:"string"` +} + +// String returns the string representation +func (s AddPrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddPrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddPrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddPrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *AddPrefixListEntry) SetCidr(v string) *AddPrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AddPrefixListEntry) SetDescription(v string) *AddPrefixListEntry { + s.Description = &v + return s +} + +// Describes an Elastic IP address, or a carrier IP address. type Address struct { _ struct{} `type:"structure"` @@ -37621,6 +39577,11 @@ type Address struct { // VPC. AssociationId *string `locationName:"associationId" type:"string"` + // The carrier IP address associated. This option is only available for network + // interfaces which reside in a subnet in a Wavelength Zone (for example an + // EC2 instance). + CarrierIp *string `locationName:"carrierIp" type:"string"` + // The customer-owned IP address. CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` @@ -37634,7 +39595,8 @@ type Address struct { // The ID of the instance that the address is associated with (if any). InstanceId *string `locationName:"instanceId" type:"string"` - // The name of the location from which the IP address is advertised. + // The name of the unique set of Availability Zones, Local Zones, or Wavelength + // Zones from which AWS advertises IP addresses. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // The ID of the network interface. @@ -37678,6 +39640,12 @@ func (s *Address) SetAssociationId(v string) *Address { return s } +// SetCarrierIp sets the CarrierIp field's value. +func (s *Address) SetCarrierIp(v string) *Address { + s.CarrierIp = &v + return s +} + // SetCustomerOwnedIp sets the CustomerOwnedIp field's value. func (s *Address) SetCustomerOwnedIp(v string) *Address { s.CustomerOwnedIp = &v @@ -37830,9 +39798,11 @@ type AllocateAddressInput struct { // address from the address pool. CustomerOwnedIpv4Pool *string `type:"string"` - // Set to vpc to allocate the address for use with instances in a VPC. + // Indicates whether the Elastic IP address is for use with instances in a VPC + // or instances in EC2-Classic. // - // Default: The address is for use with instances in EC2-Classic. + // Default: If the Region supports EC2-Classic, the default is standard. Otherwise, + // the default is vpc. Domain *string `type:"string" enum:"DomainType"` // Checks whether you have the required permissions for the action, without @@ -37841,12 +39811,9 @@ type AllocateAddressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The location from which the IP address is advertised. Use this parameter - // to limit the address to this location. - // - // A network border group is a unique set of Availability Zones or Local Zones - // from where AWS advertises IP addresses and limits the addresses to the group. - // IP addresses cannot move between network border groups. + // A unique set of Availability Zones, Local Zones, or Wavelength Zones from + // which AWS advertises IP addresses. Use this parameter to limit the IP address + // to this location. IP addresses cannot move between network border groups. // // Use DescribeAvailabilityZones (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) // to view the network border groups. @@ -37915,17 +39882,22 @@ type AllocateAddressOutput struct { // IP address for use with instances in a VPC. AllocationId *string `locationName:"allocationId" type:"string"` + // The carrier IP address. This option is only available for network interfaces + // which reside in a subnet in a Wavelength Zone (for example an EC2 instance). + CarrierIp *string `locationName:"carrierIp" type:"string"` + // The customer-owned IP address. CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` // The ID of the customer-owned address pool. CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` - // Indicates whether this Elastic IP address is for use with instances in EC2-Classic - // (standard) or instances in a VPC (vpc). + // Indicates whether the Elastic IP address is for use with instances in a VPC + // (vpc) or instances in EC2-Classic (standard). Domain *string `locationName:"domain" type:"string" enum:"DomainType"` - // The location from which the IP address is advertised. + // The set of Availability Zones, Local Zones, or Wavelength Zones from which + // AWS advertises IP addresses. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // The Elastic IP address. @@ -37951,6 +39923,12 @@ func (s *AllocateAddressOutput) SetAllocationId(v string) *AllocateAddressOutput return s } +// SetCarrierIp sets the CarrierIp field's value. +func (s *AllocateAddressOutput) SetCarrierIp(v string) *AllocateAddressOutput { + s.CarrierIp = &v + return s +} + // SetCustomerOwnedIp sets the CustomerOwnedIp field's value. func (s *AllocateAddressOutput) SetCustomerOwnedIp(v string) *AllocateAddressOutput { s.CustomerOwnedIp = &v @@ -39243,7 +41221,7 @@ type AssociateVpcCidrBlockInput struct { Ipv6CidrBlock *string `type:"string"` // The name of the location from which we advertise the IPV6 CIDR block. Use - // this parameter to limit the CiDR block to this location. + // this parameter to limit the CIDR block to this location. // // You must set AmazonProvidedIpv6CidrBlock to true to use this parameter. // @@ -39999,11 +41977,14 @@ func (s *AuthorizationRule) SetStatus(v *ClientVpnAuthorizationRuleStatus) *Auth type AuthorizeClientVpnIngressInput struct { _ struct{} `type:"structure"` - // The ID of the Active Directory group to grant access. + // The ID of the group to grant access to, for example, the Active Directory + // group or identity provider (IdP) group. Required if AuthorizeAllGroups is + // false or not specified. AccessGroupId *string `type:"string"` - // Indicates whether to grant access to all clients. Use true to grant all clients - // who successfully establish a VPN connection access to the network. + // Indicates whether to grant access to all clients. Specify true to grant all + // clients who successfully establish a VPN connection access to the network. + // Must be set to true if AccessGroupId is not specified. AuthorizeAllGroups *bool `type:"boolean"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -40411,38 +42392,52 @@ func (s AuthorizeSecurityGroupIngressOutput) GoString() string { return s.String() } -// Describes an Availability Zone or Local Zone. +// Describes Availability Zones, Local Zones, and Wavelength Zones. type AvailabilityZone struct { _ struct{} `type:"structure"` // For Availability Zones, this parameter has the same value as the Region name. // // For Local Zones, the name of the associated group, for example us-west-2-lax-1. + // + // For Wavelength Zones, the name of the associated group, for example us-east-1-wl1-bos-wlz-1. GroupName *string `locationName:"groupName" type:"string"` - // Any messages about the Availability Zone or Local Zone. + // Any messages about the Availability Zone, Local Zone, or Wavelength Zone. Messages []*AvailabilityZoneMessage `locationName:"messageSet" locationNameList:"item" type:"list"` - // The name of the location from which the address is advertised. + // The name of the network border group. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` // For Availability Zones, this parameter always has the value of opt-in-not-required. // - // For Local Zones, this parameter is the opt in status. The possible values - // are opted-in, and not-opted-in. + // For Local Zones and Wavelength Zones, this parameter is the opt-in status. + // The possible values are opted-in, and not-opted-in. OptInStatus *string `locationName:"optInStatus" type:"string" enum:"AvailabilityZoneOptInStatus"` + // The ID of the zone that handles some of the Local Zone or Wavelength Zone + // control plane operations, such as API calls. + ParentZoneId *string `locationName:"parentZoneId" type:"string"` + + // The name of the zone that handles some of the Local Zone or Wavelength Zone + // control plane operations, such as API calls. + ParentZoneName *string `locationName:"parentZoneName" type:"string"` + // The name of the Region. RegionName *string `locationName:"regionName" type:"string"` - // The state of the Availability Zone or Local Zone. + // The state of the Availability Zone, Local Zone, or Wavelength Zone. State *string `locationName:"zoneState" type:"string" enum:"AvailabilityZoneState"` - // The ID of the Availability Zone or Local Zone. + // The ID of the Availability Zone, Local Zone, or Wavelength Zone. ZoneId *string `locationName:"zoneId" type:"string"` - // The name of the Availability Zone or Local Zone. + // The name of the Availability Zone, Local Zone, or Wavelength Zone. ZoneName *string `locationName:"zoneName" type:"string"` + + // The type of zone. The valid values are availability-zone, local-zone, and + // wavelength-zone. + ZoneType *string `locationName:"zoneType" type:"string"` } // String returns the string representation @@ -40479,6 +42474,18 @@ func (s *AvailabilityZone) SetOptInStatus(v string) *AvailabilityZone { return s } +// SetParentZoneId sets the ParentZoneId field's value. +func (s *AvailabilityZone) SetParentZoneId(v string) *AvailabilityZone { + s.ParentZoneId = &v + return s +} + +// SetParentZoneName sets the ParentZoneName field's value. +func (s *AvailabilityZone) SetParentZoneName(v string) *AvailabilityZone { + s.ParentZoneName = &v + return s +} + // SetRegionName sets the RegionName field's value. func (s *AvailabilityZone) SetRegionName(v string) *AvailabilityZone { s.RegionName = &v @@ -40503,11 +42510,18 @@ func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { return s } -// Describes a message about an Availability Zone or Local Zone. +// SetZoneType sets the ZoneType field's value. +func (s *AvailabilityZone) SetZoneType(v string) *AvailabilityZone { + s.ZoneType = &v + return s +} + +// Describes a message about an Availability Zone, Local Zone, or Wavelength +// Zone. type AvailabilityZoneMessage struct { _ struct{} `type:"structure"` - // The message about the Availability Zone or Local Zone. + // The message about the Availability Zone, Local Zone, or Wavelength Zone. Message *string `locationName:"message" type:"string"` } @@ -41890,6 +43904,39 @@ func (s *CapacityReservation) SetTotalInstanceCount(v int64) *CapacityReservatio return s } +// Describes a resource group to which a Capacity Reservation has been added. +type CapacityReservationGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the resource group. + GroupArn *string `locationName:"groupArn" type:"string"` + + // The ID of the AWS account that owns the resource group. + OwnerId *string `locationName:"ownerId" type:"string"` +} + +// String returns the string representation +func (s CapacityReservationGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CapacityReservationGroup) GoString() string { + return s.String() +} + +// SetGroupArn sets the GroupArn field's value. +func (s *CapacityReservationGroup) SetGroupArn(v string) *CapacityReservationGroup { + s.GroupArn = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CapacityReservationGroup) SetOwnerId(v string) *CapacityReservationGroup { + s.OwnerId = &v + return s +} + // Describes the strategy for using unused Capacity Reservations for fulfilling // On-Demand capacity. // @@ -41988,7 +44035,7 @@ func (s *CapacityReservationOptionsRequest) SetUsageStrategy(v string) *Capacity // to run as an On-Demand Instance or to run in any open Capacity Reservation // that has matching attributes (instance type, platform, Availability Zone). // Use the CapacityReservationTarget parameter to explicitly target a specific -// Capacity Reservation. +// Capacity Reservation or a Capacity Reservation group. type CapacityReservationSpecification struct { _ struct{} `type:"structure"` @@ -42002,7 +44049,8 @@ type CapacityReservationSpecification struct { // one is available. The instance runs as an On-Demand Instance. CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` - // Information about the target Capacity Reservation. + // Information about the target Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTarget `type:"structure"` } @@ -42033,7 +44081,8 @@ func (s *CapacityReservationSpecification) SetCapacityReservationTarget(v *Capac // instance is configured to run in On-Demand capacity, or if it is configured // in run in any open Capacity Reservation that has matching attributes (instance // type, platform, Availability Zone). The action returns the capacityReservationTarget -// response element if the instance explicily targets a specific Capacity Reservation. +// response element if the instance explicily targets a specific Capacity Reservation +// or Capacity Reservation group. type CapacityReservationSpecificationResponse struct { _ struct{} `type:"structure"` @@ -42047,7 +44096,8 @@ type CapacityReservationSpecificationResponse struct { // one is available. The instance runs in On-Demand capacity. CapacityReservationPreference *string `locationName:"capacityReservationPreference" type:"string" enum:"CapacityReservationPreference"` - // Information about the targeted Capacity Reservation. + // Information about the targeted Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTargetResponse `locationName:"capacityReservationTarget" type:"structure"` } @@ -42073,12 +44123,15 @@ func (s *CapacityReservationSpecificationResponse) SetCapacityReservationTarget( return s } -// Describes a target Capacity Reservation. +// Describes a target Capacity Reservation or Capacity Reservation group. type CapacityReservationTarget struct { _ struct{} `type:"structure"` - // The ID of the Capacity Reservation. + // The ID of the Capacity Reservation in which to run the instance. CapacityReservationId *string `type:"string"` + + // The ARN of the Capacity Reservation resource group in which to run the instance. + CapacityReservationResourceGroupArn *string `type:"string"` } // String returns the string representation @@ -42097,12 +44150,21 @@ func (s *CapacityReservationTarget) SetCapacityReservationId(v string) *Capacity return s } -// Describes a target Capacity Reservation. +// SetCapacityReservationResourceGroupArn sets the CapacityReservationResourceGroupArn field's value. +func (s *CapacityReservationTarget) SetCapacityReservationResourceGroupArn(v string) *CapacityReservationTarget { + s.CapacityReservationResourceGroupArn = &v + return s +} + +// Describes a target Capacity Reservation or Capacity Reservation group. type CapacityReservationTargetResponse struct { _ struct{} `type:"structure"` - // The ID of the Capacity Reservation. + // The ID of the targeted Capacity Reservation. CapacityReservationId *string `locationName:"capacityReservationId" type:"string"` + + // The ARN of the targeted Capacity Reservation group. + CapacityReservationResourceGroupArn *string `locationName:"capacityReservationResourceGroupArn" type:"string"` } // String returns the string representation @@ -42121,6 +44183,72 @@ func (s *CapacityReservationTargetResponse) SetCapacityReservationId(v string) * return s } +// SetCapacityReservationResourceGroupArn sets the CapacityReservationResourceGroupArn field's value. +func (s *CapacityReservationTargetResponse) SetCapacityReservationResourceGroupArn(v string) *CapacityReservationTargetResponse { + s.CapacityReservationResourceGroupArn = &v + return s +} + +// Describes a carrier gateway. +type CarrierGateway struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + + // The AWS account ID of the owner of the carrier gateway. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The state of the carrier gateway. + State *string `locationName:"state" type:"string" enum:"CarrierGatewayState"` + + // The tags assigned to the carrier gateway. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The ID of the VPC associated with the carrier gateway. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s CarrierGateway) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CarrierGateway) GoString() string { + return s.String() +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *CarrierGateway) SetCarrierGatewayId(v string) *CarrierGateway { + s.CarrierGatewayId = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CarrierGateway) SetOwnerId(v string) *CarrierGateway { + s.OwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *CarrierGateway) SetState(v string) *CarrierGateway { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CarrierGateway) SetTags(v []*Tag) *CarrierGateway { + s.Tags = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CarrierGateway) SetVpcId(v string) *CarrierGateway { + s.VpcId = &v + return s +} + // Information about the client certificate used for authentication. type CertificateAuthentication struct { _ struct{} `type:"structure"` @@ -42482,9 +44610,8 @@ func (s *ClientData) SetUploadStart(v time.Time) *ClientData { return s } -// Describes the authentication methods used by a Client VPN endpoint. Client -// VPN supports Active Directory and mutual authentication. For more information, -// see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) +// Describes the authentication methods used by a Client VPN endpoint. For more +// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) // in the AWS Client VPN Administrator Guide. type ClientVpnAuthentication struct { _ struct{} `type:"structure"` @@ -42492,6 +44619,9 @@ type ClientVpnAuthentication struct { // Information about the Active Directory, if applicable. ActiveDirectory *DirectoryServiceAuthentication `locationName:"activeDirectory" type:"structure"` + // Information about the IAM SAML identity provider, if applicable. + FederatedAuthentication *FederatedAuthentication `locationName:"federatedAuthentication" type:"structure"` + // Information about the authentication certificates, if applicable. MutualAuthentication *CertificateAuthentication `locationName:"mutualAuthentication" type:"structure"` @@ -42515,6 +44645,12 @@ func (s *ClientVpnAuthentication) SetActiveDirectory(v *DirectoryServiceAuthenti return s } +// SetFederatedAuthentication sets the FederatedAuthentication field's value. +func (s *ClientVpnAuthentication) SetFederatedAuthentication(v *FederatedAuthentication) *ClientVpnAuthentication { + s.FederatedAuthentication = v + return s +} + // SetMutualAuthentication sets the MutualAuthentication field's value. func (s *ClientVpnAuthentication) SetMutualAuthentication(v *CertificateAuthentication) *ClientVpnAuthentication { s.MutualAuthentication = v @@ -42528,8 +44664,7 @@ func (s *ClientVpnAuthentication) SetType(v string) *ClientVpnAuthentication { } // Describes the authentication method to be used by a Client VPN endpoint. -// Client VPN supports Active Directory and mutual authentication. For more -// information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) +// For more information, see Authentication (https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/authentication-authrization.html#client-authentication) // in the AWS Client VPN Administrator Guide. type ClientVpnAuthenticationRequest struct { _ struct{} `type:"structure"` @@ -42538,13 +44673,15 @@ type ClientVpnAuthenticationRequest struct { // provide this information if Type is directory-service-authentication. ActiveDirectory *DirectoryServiceAuthenticationRequest `type:"structure"` + // Information about the IAM SAML identity provider to be used, if applicable. + // You must provide this information if Type is federated-authentication. + FederatedAuthentication *FederatedAuthenticationRequest `type:"structure"` + // Information about the authentication certificates to be used, if applicable. // You must provide this information if Type is certificate-authentication. MutualAuthentication *CertificateAuthenticationRequest `type:"structure"` - // The type of client authentication to be used. Specify certificate-authentication - // to use certificate-based authentication, or directory-service-authentication - // to use Active Directory authentication. + // The type of client authentication to be used. Type *string `type:"string" enum:"ClientVpnAuthenticationType"` } @@ -42564,6 +44701,12 @@ func (s *ClientVpnAuthenticationRequest) SetActiveDirectory(v *DirectoryServiceA return s } +// SetFederatedAuthentication sets the FederatedAuthentication field's value. +func (s *ClientVpnAuthenticationRequest) SetFederatedAuthentication(v *FederatedAuthenticationRequest) *ClientVpnAuthenticationRequest { + s.FederatedAuthentication = v + return s +} + // SetMutualAuthentication sets the MutualAuthentication field's value. func (s *ClientVpnAuthenticationRequest) SetMutualAuthentication(v *CertificateAuthenticationRequest) *ClientVpnAuthenticationRequest { s.MutualAuthentication = v @@ -43189,6 +45332,9 @@ type CoipPool struct { // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` + // The ARN of the address pool. + PoolArn *string `locationName:"poolArn" min:"1" type:"string"` + // The address ranges of the address pool. PoolCidrs []*string `locationName:"poolCidrSet" locationNameList:"item" type:"list"` @@ -43215,6 +45361,12 @@ func (s *CoipPool) SetLocalGatewayRouteTableId(v string) *CoipPool { return s } +// SetPoolArn sets the PoolArn field's value. +func (s *CoipPool) SetPoolArn(v string) *CoipPool { + s.PoolArn = &v + return s +} + // SetPoolCidrs sets the PoolCidrs field's value. func (s *CoipPool) SetPoolCidrs(v []*string) *CoipPool { s.PoolCidrs = v @@ -43335,7 +45487,8 @@ func (s *ConfirmProductInstanceOutput) SetReturn(v bool) *ConfirmProductInstance type ConnectionLogOptions struct { _ struct{} `type:"structure"` - // The name of the CloudWatch Logs log group. + // The name of the CloudWatch Logs log group. Required if connection logging + // is enabled. CloudwatchLogGroup *string `type:"string"` // The name of the CloudWatch Logs log stream to which the connection data is @@ -44136,8 +46289,6 @@ type CreateCapacityReservationInput struct { // Unique, case-sensitive identifier that you provide to ensure the idempotency // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). - // - // Constraint: Maximum 64 ASCII characters. ClientToken *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -44365,6 +46516,98 @@ func (s *CreateCapacityReservationOutput) SetCapacityReservation(v *CapacityRese return s } +type CreateCarrierGatewayInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that you provide to ensure the idempotency + // of the request. For more information, see How to Ensure Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Run_Instance_Idempotency.html). + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The tags to associate with the carrier gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + + // The ID of the VPC to associate with the carrier gateway. + // + // VpcId is a required field + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateCarrierGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCarrierGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCarrierGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCarrierGatewayInput"} + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateCarrierGatewayInput) SetClientToken(v string) *CreateCarrierGatewayInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateCarrierGatewayInput) SetDryRun(v bool) *CreateCarrierGatewayInput { + s.DryRun = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCarrierGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateCarrierGatewayInput { + s.TagSpecifications = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CreateCarrierGatewayInput) SetVpcId(v string) *CreateCarrierGatewayInput { + s.VpcId = &v + return s +} + +type CreateCarrierGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateway *CarrierGateway `locationName:"carrierGateway" type:"structure"` +} + +// String returns the string representation +func (s CreateCarrierGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCarrierGatewayOutput) GoString() string { + return s.String() +} + +// SetCarrierGateway sets the CarrierGateway field's value. +func (s *CreateCarrierGatewayOutput) SetCarrierGateway(v *CarrierGateway) *CreateCarrierGatewayOutput { + s.CarrierGateway = v + return s +} + type CreateClientVpnEndpointInput struct { _ struct{} `type:"structure"` @@ -44637,8 +46880,7 @@ type CreateClientVpnRouteInput struct { // * To add a route for an on-premises network, enter the AWS Site-to-Site // VPN connection's IPv4 CIDR range // - // Route address ranges cannot overlap with the CIDR range specified for client - // allocation. + // * To add a route for the local network, enter the client CIDR range // // DestinationCidrBlock is a required field DestinationCidrBlock *string `type:"string" required:"true"` @@ -44652,6 +46894,8 @@ type CreateClientVpnRouteInput struct { // The ID of the subnet through which you want to route traffic. The specified // subnet must be an existing target network of the Client VPN endpoint. // + // Alternatively, if you're adding a route for the local network, specify local. + // // TargetVpcSubnetId is a required field TargetVpcSubnetId *string `type:"string" required:"true"` } @@ -44773,6 +47017,9 @@ type CreateCustomerGatewayInput struct { // The address must be static. PublicIp *string `locationName:"IpAddress" type:"string"` + // The tags to apply to the customer gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of VPN connection that this customer gateway supports (ipsec.1). // // Type is a required field @@ -44835,6 +47082,12 @@ func (s *CreateCustomerGatewayInput) SetPublicIp(v string) *CreateCustomerGatewa return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateCustomerGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateCustomerGatewayInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *CreateCustomerGatewayInput) SetType(v string) *CreateCustomerGatewayInput { s.Type = &v @@ -45000,6 +47253,9 @@ type CreateDhcpOptionsInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the DHCP option. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -45037,6 +47293,12 @@ func (s *CreateDhcpOptionsInput) SetDryRun(v bool) *CreateDhcpOptionsInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateDhcpOptionsInput) SetTagSpecifications(v []*TagSpecification) *CreateDhcpOptionsInput { + s.TagSpecifications = v + return s +} + type CreateDhcpOptionsOutput struct { _ struct{} `type:"structure"` @@ -45073,6 +47335,9 @@ type CreateEgressOnlyInternetGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The tags to assign to the egress-only internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC for which to create the egress-only internet gateway. // // VpcId is a required field @@ -45114,6 +47379,12 @@ func (s *CreateEgressOnlyInternetGatewayInput) SetDryRun(v bool) *CreateEgressOn return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateEgressOnlyInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateEgressOnlyInternetGatewayInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateEgressOnlyInternetGatewayInput) SetVpcId(v string) *CreateEgressOnlyInternetGatewayInput { s.VpcId = &v @@ -45557,8 +47828,6 @@ type CreateFlowLogsInput struct { // // Specify the fields using the ${field-id} format, separated by spaces. For // the AWS CLI, use single quotation marks (' ') to surround the parameter value. - // - // Only applicable to flow logs that are published to an Amazon S3 bucket. LogFormat *string `type:"string"` // The name of a new or existing CloudWatch Logs log group where Amazon EC2 @@ -46002,7 +48271,7 @@ type CreateInstanceExportTaskInput struct { _ struct{} `type:"structure"` // A description for the conversion task or the resource being exported. The - // maximum length is 255 bytes. + // maximum length is 255 characters. Description *string `locationName:"description" type:"string"` // The format and location for an instance export task. @@ -46013,6 +48282,9 @@ type CreateInstanceExportTaskInput struct { // InstanceId is a required field InstanceId *string `locationName:"instanceId" type:"string" required:"true"` + // The tags to apply to the instance export task during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The target virtualization environment. TargetEnvironment *string `locationName:"targetEnvironment" type:"string" enum:"ExportEnvironment"` } @@ -46058,6 +48330,12 @@ func (s *CreateInstanceExportTaskInput) SetInstanceId(v string) *CreateInstanceE return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInstanceExportTaskInput) SetTagSpecifications(v []*TagSpecification) *CreateInstanceExportTaskInput { + s.TagSpecifications = v + return s +} + // SetTargetEnvironment sets the TargetEnvironment field's value. func (s *CreateInstanceExportTaskInput) SetTargetEnvironment(v string) *CreateInstanceExportTaskInput { s.TargetEnvironment = &v @@ -46095,6 +48373,9 @@ type CreateInternetGatewayInput struct { // the required permissions, the error response is DryRunOperation. Otherwise, // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + + // The tags to assign to the internet gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -46113,6 +48394,12 @@ func (s *CreateInternetGatewayInput) SetDryRun(v bool) *CreateInternetGatewayInp return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateInternetGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateInternetGatewayInput { + s.TagSpecifications = v + return s +} + type CreateInternetGatewayOutput struct { _ struct{} `type:"structure"` @@ -46151,6 +48438,9 @@ type CreateKeyPairInput struct { // // KeyName is a required field KeyName *string `type:"string" required:"true"` + + // The tags to apply to the new key pair. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -46188,6 +48478,12 @@ func (s *CreateKeyPairInput) SetKeyName(v string) *CreateKeyPairInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateKeyPairInput) SetTagSpecifications(v []*TagSpecification) *CreateKeyPairInput { + s.TagSpecifications = v + return s +} + // Describes a key pair. type CreateKeyPairOutput struct { _ struct{} `type:"structure"` @@ -46203,6 +48499,9 @@ type CreateKeyPairOutput struct { // The ID of the key pair. KeyPairId *string `locationName:"keyPairId" type:"string"` + + // Any tags applied to the key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -46239,6 +48538,12 @@ func (s *CreateKeyPairOutput) SetKeyPairId(v string) *CreateKeyPairOutput { return s } +// SetTags sets the Tags field's value. +func (s *CreateKeyPairOutput) SetTags(v []*Tag) *CreateKeyPairOutput { + s.Tags = v + return s +} + type CreateLaunchTemplateInput struct { _ struct{} `type:"structure"` @@ -46346,6 +48651,11 @@ type CreateLaunchTemplateOutput struct { // Information about the launch template. LaunchTemplate *LaunchTemplate `locationName:"launchTemplate" type:"structure"` + + // If the launch template contains parameters or parameter combinations that + // are not valid, an error code and an error message are returned for each issue + // that's found. + Warning *ValidationWarning `locationName:"warning" type:"structure"` } // String returns the string representation @@ -46364,6 +48674,12 @@ func (s *CreateLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Creat return s } +// SetWarning sets the Warning field's value. +func (s *CreateLaunchTemplateOutput) SetWarning(v *ValidationWarning) *CreateLaunchTemplateOutput { + s.Warning = v + return s +} + type CreateLaunchTemplateVersionInput struct { _ struct{} `type:"structure"` @@ -46481,6 +48797,11 @@ type CreateLaunchTemplateVersionOutput struct { // Information about the launch template version. LaunchTemplateVersion *LaunchTemplateVersion `locationName:"launchTemplateVersion" type:"structure"` + + // If the new version of the launch template contains parameters or parameter + // combinations that are not valid, an error code and an error message are returned + // for each issue that's found. + Warning *ValidationWarning `locationName:"warning" type:"structure"` } // String returns the string representation @@ -46499,6 +48820,12 @@ func (s *CreateLaunchTemplateVersionOutput) SetLaunchTemplateVersion(v *LaunchTe return s } +// SetWarning sets the Warning field's value. +func (s *CreateLaunchTemplateVersionOutput) SetWarning(v *ValidationWarning) *CreateLaunchTemplateVersionOutput { + s.Warning = v + return s +} + type CreateLocalGatewayRouteInput struct { _ struct{} `type:"structure"` @@ -46615,6 +48942,9 @@ type CreateLocalGatewayRouteTableVpcAssociationInput struct { // LocalGatewayRouteTableId is a required field LocalGatewayRouteTableId *string `type:"string" required:"true"` + // The tags to assign to the local gateway route table VPC association. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -46659,6 +48989,12 @@ func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetLocalGatewayRouteTa return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetTagSpecifications(v []*TagSpecification) *CreateLocalGatewayRouteTableVpcAssociationInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateLocalGatewayRouteTableVpcAssociationInput) SetVpcId(v string) *CreateLocalGatewayRouteTableVpcAssociationInput { s.VpcId = &v @@ -46688,6 +49024,151 @@ func (s *CreateLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteT return s } +type CreateManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // The IP address type. + // + // Valid Values: IPv4 | IPv6 + // + // AddressFamily is a required field + AddressFamily *string `type:"string" required:"true"` + + // Unique, case-sensitive identifier you provide to ensure the idempotency of + // the request. For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + // + // Constraints: Up to 255 UTF-8 characters in length. + ClientToken *string `type:"string" idempotencyToken:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more entries for the prefix list. + Entries []*AddPrefixListEntry `locationName:"Entry" type:"list"` + + // The maximum number of entries for the prefix list. + // + // MaxEntries is a required field + MaxEntries *int64 `type:"integer" required:"true"` + + // A name for the prefix list. + // + // Constraints: Up to 255 characters in length. The name cannot start with com.amazonaws. + // + // PrefixListName is a required field + PrefixListName *string `type:"string" required:"true"` + + // The tags to apply to the prefix list during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s CreateManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateManagedPrefixListInput"} + if s.AddressFamily == nil { + invalidParams.Add(request.NewErrParamRequired("AddressFamily")) + } + if s.MaxEntries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxEntries")) + } + if s.PrefixListName == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListName")) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *CreateManagedPrefixListInput) SetAddressFamily(v string) *CreateManagedPrefixListInput { + s.AddressFamily = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateManagedPrefixListInput) SetClientToken(v string) *CreateManagedPrefixListInput { + s.ClientToken = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateManagedPrefixListInput) SetDryRun(v bool) *CreateManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *CreateManagedPrefixListInput) SetEntries(v []*AddPrefixListEntry) *CreateManagedPrefixListInput { + s.Entries = v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *CreateManagedPrefixListInput) SetMaxEntries(v int64) *CreateManagedPrefixListInput { + s.MaxEntries = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *CreateManagedPrefixListInput) SetPrefixListName(v string) *CreateManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateManagedPrefixListInput) SetTagSpecifications(v []*TagSpecification) *CreateManagedPrefixListInput { + s.TagSpecifications = v + return s +} + +type CreateManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s CreateManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *CreateManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *CreateManagedPrefixListOutput { + s.PrefixList = v + return s +} + type CreateNatGatewayInput struct { _ struct{} `type:"structure"` @@ -46812,6 +49293,8 @@ type CreateNetworkAclEntryInput struct { _ struct{} `type:"structure"` // The IPv4 network range to allow or deny, in CIDR notation (for example 172.16.0.0/24). + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. CidrBlock *string `locationName:"cidrBlock" type:"string"` // Checks whether you have the required permissions for the action, without @@ -46986,6 +49469,9 @@ type CreateNetworkAclInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to assign to the network ACL. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -47021,6 +49507,12 @@ func (s *CreateNetworkAclInput) SetDryRun(v bool) *CreateNetworkAclInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkAclInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkAclInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateNetworkAclInput) SetVpcId(v string) *CreateNetworkAclInput { s.VpcId = &v @@ -47107,6 +49599,9 @@ type CreateNetworkInterfaceInput struct { // // SubnetId is a required field SubnetId *string `locationName:"subnetId" type:"string" required:"true"` + + // The tags to apply to the new network interface. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -47192,6 +49687,12 @@ func (s *CreateNetworkInterfaceInput) SetSubnetId(v string) *CreateNetworkInterf return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateNetworkInterfaceInput) SetTagSpecifications(v []*TagSpecification) *CreateNetworkInterfaceInput { + s.TagSpecifications = v + return s +} + // Contains the output of CreateNetworkInterface. type CreateNetworkInterfaceOutput struct { _ struct{} `type:"structure"` @@ -47343,6 +49844,9 @@ type CreatePlacementGroupInput struct { // The placement strategy. Strategy *string `locationName:"strategy" type:"string" enum:"PlacementStrategy"` + + // The tags to apply to the new placement group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -47379,8 +49883,17 @@ func (s *CreatePlacementGroupInput) SetStrategy(v string) *CreatePlacementGroupI return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreatePlacementGroupInput) SetTagSpecifications(v []*TagSpecification) *CreatePlacementGroupInput { + s.TagSpecifications = v + return s +} + type CreatePlacementGroupOutput struct { _ struct{} `type:"structure"` + + // Describes a placement group. + PlacementGroup *PlacementGroup `locationName:"placementGroup" type:"structure"` } // String returns the string representation @@ -47393,6 +49906,12 @@ func (s CreatePlacementGroupOutput) GoString() string { return s.String() } +// SetPlacementGroup sets the PlacementGroup field's value. +func (s *CreatePlacementGroupOutput) SetPlacementGroup(v *PlacementGroup) *CreatePlacementGroupOutput { + s.PlacementGroup = v + return s +} + // Contains the parameters for CreateReservedInstancesListing. type CreateReservedInstancesListingInput struct { _ struct{} `type:"structure"` @@ -47507,14 +50026,25 @@ func (s *CreateReservedInstancesListingOutput) SetReservedInstancesListings(v [] type CreateRouteInput struct { _ struct{} `type:"structure"` + // The ID of the carrier gateway. + // + // You can only use this option when the VPC contains a subnet which is associated + // with a Wavelength Zone. + CarrierGatewayId *string `type:"string"` + // The IPv4 CIDR address block used for the destination match. Routing decisions - // are based on the most specific match. + // are based on the most specific match. We modify the specified CIDR block + // to its canonical form; for example, if you specify 100.68.0.18/18, we modify + // it to 100.68.0.0/18. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` // The IPv6 CIDR block used for the destination match. Routing decisions are // based on the most specific match. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of a prefix list used for the destination match. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -47576,6 +50106,12 @@ func (s *CreateRouteInput) Validate() error { return nil } +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *CreateRouteInput) SetCarrierGatewayId(v string) *CreateRouteInput { + s.CarrierGatewayId = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *CreateRouteInput) SetDestinationCidrBlock(v string) *CreateRouteInput { s.DestinationCidrBlock = &v @@ -47588,6 +50124,12 @@ func (s *CreateRouteInput) SetDestinationIpv6CidrBlock(v string) *CreateRouteInp return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *CreateRouteInput) SetDestinationPrefixListId(v string) *CreateRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *CreateRouteInput) SetDryRun(v bool) *CreateRouteInput { s.DryRun = &v @@ -47680,6 +50222,9 @@ type CreateRouteTableInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to assign to the route table. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -47715,6 +50260,12 @@ func (s *CreateRouteTableInput) SetDryRun(v bool) *CreateRouteTableInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateRouteTableInput) SetTagSpecifications(v []*TagSpecification) *CreateRouteTableInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateRouteTableInput) SetVpcId(v string) *CreateRouteTableInput { s.VpcId = &v @@ -47775,6 +50326,9 @@ type CreateSecurityGroupInput struct { // GroupName is a required field GroupName *string `type:"string" required:"true"` + // The tags to assign to the security group. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. VpcId *string `type:"string"` } @@ -47823,6 +50377,12 @@ func (s *CreateSecurityGroupInput) SetGroupName(v string) *CreateSecurityGroupIn return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSecurityGroupInput) SetTagSpecifications(v []*TagSpecification) *CreateSecurityGroupInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateSecurityGroupInput) SetVpcId(v string) *CreateSecurityGroupInput { s.VpcId = &v @@ -47834,6 +50394,9 @@ type CreateSecurityGroupOutput struct { // The ID of the security group. GroupId *string `locationName:"groupId" type:"string"` + + // The tags assigned to the security group. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -47852,6 +50415,12 @@ func (s *CreateSecurityGroupOutput) SetGroupId(v string) *CreateSecurityGroupOut return s } +// SetTags sets the Tags field's value. +func (s *CreateSecurityGroupOutput) SetTags(v []*Tag) *CreateSecurityGroupOutput { + s.Tags = v + return s +} + type CreateSnapshotInput struct { _ struct{} `type:"structure"` @@ -48116,12 +50685,17 @@ type CreateSubnetInput struct { // for example us-west-2-lax-1a. For information about the Regions that support // Local Zones, see Available Regions (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) // in the Amazon Elastic Compute Cloud User Guide. + // + // To create a subnet in an Outpost, set this value to the Availability Zone + // for the Outpost and specify the Outpost ARN. AvailabilityZone *string `type:"string"` // The AZ ID or the Local Zone ID of the subnet. AvailabilityZoneId *string `type:"string"` // The IPv4 network range for the subnet, in CIDR notation. For example, 10.0.0.0/24. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. // // CidrBlock is a required field CidrBlock *string `type:"string" required:"true"` @@ -48136,9 +50710,13 @@ type CreateSubnetInput struct { // must use a /64 prefix length. Ipv6CidrBlock *string `type:"string"` - // The Amazon Resource Name (ARN) of the Outpost. + // The Amazon Resource Name (ARN) of the Outpost. If you specify an Outpost + // ARN, you must also specify the Availability Zone of the Outpost subnet. OutpostArn *string `type:"string"` + // The tags to assign to the subnet. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the VPC. // // VpcId is a required field @@ -48207,6 +50785,12 @@ func (s *CreateSubnetInput) SetOutpostArn(v string) *CreateSubnetInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateSubnetInput) SetTagSpecifications(v []*TagSpecification) *CreateSubnetInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateSubnetInput) SetVpcId(v string) *CreateSubnetInput { s.VpcId = &v @@ -49183,6 +51767,111 @@ func (s *CreateTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAt return s } +type CreateTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list that is used for destination matches. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the attachment to which traffic is routed. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetBlackhole(v bool) *CreateTransitGatewayPrefixListReferenceInput { + s.Blackhole = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *CreateTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetTransitGatewayAttachmentId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *CreateTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *CreateTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type CreateTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s CreateTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *CreateTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *CreateTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + type CreateTransitGatewayRouteInput struct { _ struct{} `type:"structure"` @@ -49543,22 +52232,22 @@ type CreateVolumeInput struct { // Specifies whether the volume should be encrypted. The effect of setting the // encryption state to true depends on the volume origin (new or from a snapshot), // starting encryption state, ownership, and whether encryption by default is - // enabled. For more information, see Encryption by Default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) + // enabled. For more information, see Encryption by default (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default) // in the Amazon Elastic Compute Cloud User Guide. // // Encrypted Amazon EBS volumes must be attached to instances that support Amazon - // EBS encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // EBS encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). Encrypted *bool `locationName:"encrypted" type:"boolean"` - // The number of I/O operations per second (IOPS) to provision for the volume, - // with a maximum ratio of 50 IOPS/GiB. Range is 100 to 64,000 IOPS for volumes - // in most Regions. Maximum IOPS of 64,000 is guaranteed only on Nitro-based - // instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. For more - // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // This parameter is valid only for Provisioned IOPS SSD (io1) volumes. + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. Iops *int64 `type:"integer"` // The identifier of the AWS Key Management Service (AWS KMS) customer master @@ -49594,9 +52283,9 @@ type CreateVolumeInput struct { // The size of the volume, in GiBs. You must specify either a snapshot ID or // a volume size. // - // Constraints: 1-16,384 for gp2, 4-16,384 for io1, 500-16,384 for st1, 500-16,384 - // for sc1, and 1-1,024 for standard. If you specify a snapshot, the volume - // size must be equal to or larger than the snapshot size. + // Constraints: 1-16,384 for gp2, 4-16,384 for io1 and io2, 500-16,384 for st1, + // 500-16,384 for sc1, and 1-1,024 for standard. If you specify a snapshot, + // the volume size must be equal to or larger than the snapshot size. // // Default: If you're creating the volume from a snapshot and don't specify // a volume size, the default is the snapshot size. @@ -49609,9 +52298,9 @@ type CreateVolumeInput struct { // The tags to apply to the volume during creation. TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned - // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard - // for Magnetic volumes. + // The volume type. This can be gp2 for General Purpose SSD, io1 or io2 for + // Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, + // or standard for Magnetic volumes. // // Default: gp2 VolumeType *string `type:"string" enum:"VolumeType"` @@ -50221,6 +52910,8 @@ type CreateVpcInput struct { AmazonProvidedIpv6CidrBlock *bool `locationName:"amazonProvidedIpv6CidrBlock" type:"boolean"` // The IPv4 network range for the VPC, in CIDR notation. For example, 10.0.0.0/16. + // We modify the specified CIDR block to its canonical form; for example, if + // you specify 100.68.0.18/18, we modify it to 100.68.0.0/18. // // CidrBlock is a required field CidrBlock *string `type:"string" required:"true"` @@ -50257,6 +52948,9 @@ type CreateVpcInput struct { // The ID of an IPv6 address pool from which to allocate the IPv6 CIDR block. Ipv6Pool *string `type:"string"` + + // The tags to assign to the VPC. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -50324,6 +53018,12 @@ func (s *CreateVpcInput) SetIpv6Pool(v string) *CreateVpcInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcInput { + s.TagSpecifications = v + return s +} + type CreateVpcOutput struct { _ struct{} `type:"structure"` @@ -50371,6 +53071,9 @@ type CreateVpcPeeringConnectionInput struct { // You must specify this parameter in the request. PeerVpcId *string `locationName:"peerVpcId" type:"string"` + // The tags to assign to the peering connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the requester VPC. You must specify this parameter in the request. VpcId *string `locationName:"vpcId" type:"string"` } @@ -50409,6 +53112,12 @@ func (s *CreateVpcPeeringConnectionInput) SetPeerVpcId(v string) *CreateVpcPeeri return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpcPeeringConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpcPeeringConnectionInput { + s.TagSpecifications = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *CreateVpcPeeringConnectionInput) SetVpcId(v string) *CreateVpcPeeringConnectionInput { s.VpcId = &v @@ -50456,6 +53165,9 @@ type CreateVpnConnectionInput struct { // The options for the VPN connection. Options *VpnConnectionOptionsSpecification `locationName:"options" type:"structure"` + // The tags to apply to the VPN connection. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The ID of the transit gateway. If you specify a transit gateway, you cannot // specify a virtual private gateway. TransitGatewayId *string `type:"string"` @@ -50514,6 +53226,12 @@ func (s *CreateVpnConnectionInput) SetOptions(v *VpnConnectionOptionsSpecificati return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpnConnectionInput) SetTagSpecifications(v []*TagSpecification) *CreateVpnConnectionInput { + s.TagSpecifications = v + return s +} + // SetTransitGatewayId sets the TransitGatewayId field's value. func (s *CreateVpnConnectionInput) SetTransitGatewayId(v string) *CreateVpnConnectionInput { s.TransitGatewayId = &v @@ -50643,6 +53361,9 @@ type CreateVpnGatewayInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // The tags to apply to the virtual private gateway. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The type of VPN connection this virtual private gateway supports. // // Type is a required field @@ -50690,6 +53411,12 @@ func (s *CreateVpnGatewayInput) SetDryRun(v bool) *CreateVpnGatewayInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateVpnGatewayInput) SetTagSpecifications(v []*TagSpecification) *CreateVpnGatewayInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *CreateVpnGatewayInput) SetType(v string) *CreateVpnGatewayInput { s.Type = &v @@ -50720,12 +53447,12 @@ func (s *CreateVpnGatewayOutput) SetVpnGateway(v *VpnGateway) *CreateVpnGatewayO return s } -// Describes the credit option for CPU usage of a T2 or T3 instance. +// Describes the credit option for CPU usage of a T2, T3, or T3a instance. type CreditSpecification struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 or T3 instance. Valid values are - // standard and unlimited. + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. CpuCredits *string `locationName:"cpuCredits" type:"string"` } @@ -50745,12 +53472,12 @@ func (s *CreditSpecification) SetCpuCredits(v string) *CreditSpecification { return s } -// The credit option for CPU usage of a T2 or T3 instance. +// The credit option for CPU usage of a T2, T3, or T3a instance. type CreditSpecificationRequest struct { _ struct{} `type:"structure"` - // The credit option for CPU usage of a T2 or T3 instance. Valid values are - // standard and unlimited. + // The credit option for CPU usage of a T2, T3, or T3a instance. Valid values + // are standard and unlimited. // // CpuCredits is a required field CpuCredits *string `type:"string" required:"true"` @@ -50874,6 +53601,79 @@ func (s *CustomerGateway) SetType(v string) *CustomerGateway { return s } +type DeleteCarrierGatewayInput struct { + _ struct{} `type:"structure"` + + // The ID of the carrier gateway. + // + // CarrierGatewayId is a required field + CarrierGatewayId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DeleteCarrierGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCarrierGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCarrierGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCarrierGatewayInput"} + if s.CarrierGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("CarrierGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *DeleteCarrierGatewayInput) SetCarrierGatewayId(v string) *DeleteCarrierGatewayInput { + s.CarrierGatewayId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteCarrierGatewayInput) SetDryRun(v bool) *DeleteCarrierGatewayInput { + s.DryRun = &v + return s +} + +type DeleteCarrierGatewayOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateway *CarrierGateway `locationName:"carrierGateway" type:"structure"` +} + +// String returns the string representation +func (s DeleteCarrierGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCarrierGatewayOutput) GoString() string { + return s.String() +} + +// SetCarrierGateway sets the CarrierGateway field's value. +func (s *DeleteCarrierGatewayOutput) SetCarrierGateway(v *CarrierGateway) *DeleteCarrierGatewayOutput { + s.CarrierGateway = v + return s +} + type DeleteClientVpnEndpointInput struct { _ struct{} `type:"structure"` @@ -51672,9 +54472,10 @@ type DeleteKeyPairInput struct { DryRun *bool `locationName:"dryRun" type:"boolean"` // The name of the key pair. - // - // KeyName is a required field - KeyName *string `type:"string" required:"true"` + KeyName *string `type:"string"` + + // The ID of the key pair. + KeyPairId *string `type:"string"` } // String returns the string representation @@ -51687,19 +54488,6 @@ func (s DeleteKeyPairInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteKeyPairInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"} - if s.KeyName == nil { - invalidParams.Add(request.NewErrParamRequired("KeyName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // SetDryRun sets the DryRun field's value. func (s *DeleteKeyPairInput) SetDryRun(v bool) *DeleteKeyPairInput { s.DryRun = &v @@ -51712,6 +54500,12 @@ func (s *DeleteKeyPairInput) SetKeyName(v string) *DeleteKeyPairInput { return s } +// SetKeyPairId sets the KeyPairId field's value. +func (s *DeleteKeyPairInput) SetKeyPairId(v string) *DeleteKeyPairInput { + s.KeyPairId = &v + return s +} + type DeleteKeyPairOutput struct { _ struct{} `type:"structure"` } @@ -52166,6 +54960,79 @@ func (s *DeleteLocalGatewayRouteTableVpcAssociationOutput) SetLocalGatewayRouteT return s } +type DeleteManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteManagedPrefixListInput) SetDryRun(v bool) *DeleteManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteManagedPrefixListInput) SetPrefixListId(v string) *DeleteManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +type DeleteManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s DeleteManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *DeleteManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *DeleteManagedPrefixListOutput { + s.PrefixList = v + return s +} + type DeleteNatGatewayInput struct { _ struct{} `type:"structure"` @@ -52739,6 +55606,9 @@ type DeleteRouteInput struct { // for the route exactly. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -52786,6 +55656,12 @@ func (s *DeleteRouteInput) SetDestinationIpv6CidrBlock(v string) *DeleteRouteInp return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *DeleteRouteInput) SetDestinationPrefixListId(v string) *DeleteRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *DeleteRouteInput) SetDryRun(v bool) *DeleteRouteInput { s.DryRun = &v @@ -53698,6 +56574,93 @@ func (s *DeleteTransitGatewayPeeringAttachmentOutput) SetTransitGatewayPeeringAt return s } +type DeleteTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *DeleteTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *DeleteTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *DeleteTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *DeleteTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type DeleteTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the deleted prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *DeleteTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *DeleteTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + type DeleteTransitGatewayRouteInput struct { _ struct{} `type:"structure"` @@ -54689,6 +57652,101 @@ func (s DeregisterImageOutput) GoString() string { return s.String() } +type DeregisterInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the tag keys to deregister. + InstanceTagAttribute *DeregisterInstanceTagAttributeRequest `type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DeregisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DeregisterInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DeregisterInstanceEventNotificationAttributesInput) SetInstanceTagAttribute(v *DeregisterInstanceTagAttributeRequest) *DeregisterInstanceEventNotificationAttributesInput { + s.InstanceTagAttribute = v + return s +} + +type DeregisterInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // The resulting set of tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s DeregisterInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DeregisterInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *DeregisterInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +// Information about the tag keys to deregister for the current Region. You +// can either specify individual tag keys or deregister all tag keys in the +// current Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys +// in the request +type DeregisterInstanceTagAttributeRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to deregister all tag keys in the current Region. Specify + // false to deregister all tag keys. + IncludeAllTagsOfInstance *bool `type:"boolean"` + + // Information about the tag keys to deregister. + InstanceTagKeys []*string `locationName:"InstanceTagKey" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DeregisterInstanceTagAttributeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterInstanceTagAttributeRequest) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *DeregisterInstanceTagAttributeRequest) SetIncludeAllTagsOfInstance(v bool) *DeregisterInstanceTagAttributeRequest { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *DeregisterInstanceTagAttributeRequest) SetInstanceTagKeys(v []*string) *DeregisterInstanceTagAttributeRequest { + s.InstanceTagKeys = v + return s +} + type DeregisterTransitGatewayMulticastGroupMembersInput struct { _ struct{} `type:"structure"` @@ -54923,7 +57981,8 @@ type DescribeAddressesInput struct { // * instance-id - The ID of the instance the address is associated with, // if any. // - // * network-border-group - The location from where the IP address is advertised. + // * network-border-group - A unique set of Availability Zones, Local Zones, + // or Wavelength Zones from where AWS advertises IP addresses. // // * network-interface-id - [EC2-VPC] The ID of the network interface that // the address is associated with, if any. @@ -54933,7 +57992,7 @@ type DescribeAddressesInput struct { // * private-ip-address - [EC2-VPC] The private IP address associated with // the Elastic IP address. // - // * public-ip - The Elastic IP address. + // * public-ip - The Elastic IP address, or the carrier IP address. // // * tag: - The key/value combination of a tag assigned to the resource. // Use the tag key in the filter name and the tag value as the filter value. @@ -55072,8 +58131,8 @@ func (s *DescribeAggregateIdFormatOutput) SetUseLongIdsAggregated(v bool) *Descr type DescribeAvailabilityZonesInput struct { _ struct{} `type:"structure"` - // Include all Availability Zones and Local Zones regardless of your opt in - // status. + // Include all Availability Zones, Local Zones, and Wavelength Zones regardless + // of your opt-in status. // // If you do not use this parameter, the results include only the zones for // the Regions where you have chosen the option to opt in. @@ -55089,29 +58148,41 @@ type DescribeAvailabilityZonesInput struct { // // * group-name - For Availability Zones, use the Region name. For Local // Zones, use the name of the group associated with the Local Zone (for example, - // us-west-2-lax-1). + // us-west-2-lax-1) For Wavelength Zones, use the name of the group associated + // with the Wavelength Zone (for example, us-east-1-wl1-bos-wlz-1). // - // * message - The Availability Zone or Local Zone message. + // * message - The Zone message. // - // * opt-in-status - The opt in status (opted-in, and not-opted-in | opt-in-not-required). + // * opt-in-status - The opt-in status (opted-in, and not-opted-in | opt-in-not-required). // - // * region-name - The name of the Region for the Availability Zone or Local - // Zone (for example, us-east-1). + // * parent-zoneID - The ID of the zone that handles some of the Local Zone + // and Wavelength Zone control plane operations, such as API calls. // - // * state - The state of the Availability Zone or Local Zone (available - // | information | impaired | unavailable). + // * parent-zoneName - The ID of the zone that handles some of the Local + // Zone and Wavelength Zone control plane operations, such as API calls. // - // * zone-id - The ID of the Availability Zone (for example, use1-az1) or - // the Local Zone (for example, use usw2-lax1-az1). + // * region-name - The name of the Region for the Zone (for example, us-east-1). // - // * zone-name - The name of the Availability Zone (for example, us-east-1a) - // or the Local Zone (for example, use us-west-2-lax-1a). + // * state - The state of the Availability Zone, the Local Zone, or the Wavelength + // Zone (available | information | impaired | unavailable). + // + // * zone-id - The ID of the Availability Zone (for example, use1-az1), the + // Local Zone (for example, usw2-lax1-az1), or the Wavelength Zone (for example, + // us-east-1-wl1-bos-wlz-1). + // + // * zone-type - The type of zone, for example, local-zone. + // + // * zone-name - The name of the Availability Zone (for example, us-east-1a), + // the Local Zone (for example, us-west-2-lax-1a), or the Wavelength Zone + // (for example, us-east-1-wl1-bos-wlz-1). + // + // * zone-type - The type of zone, for example, local-zone. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The IDs of the Availability Zones and Local Zones. + // The IDs of the Availability Zones, Local Zones, and Wavelength Zones. ZoneIds []*string `locationName:"ZoneId" locationNameList:"ZoneId" type:"list"` - // The names of the Availability Zones and Local Zones. + // The names of the Availability Zones, Local Zones, and Wavelength Zones. ZoneNames []*string `locationName:"ZoneName" locationNameList:"ZoneName" type:"list"` } @@ -55158,7 +58229,7 @@ func (s *DescribeAvailabilityZonesInput) SetZoneNames(v []*string) *DescribeAvai type DescribeAvailabilityZonesOutput struct { _ struct{} `type:"structure"` - // Information about the Availability Zones and Local Zones. + // Information about the Availability Zones, Local Zones, and Wavelength Zones. AvailabilityZones []*AvailabilityZone `locationName:"availabilityZoneInfo" locationNameList:"item" type:"list"` } @@ -55433,10 +58504,11 @@ type DescribeCapacityReservationsInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. MaxResults *int64 `min:"1" type:"integer"` - // The token to retrieve the next page of results. + // The token to use to retrieve the next page of results. NextToken *string `type:"string"` } @@ -55526,6 +58598,134 @@ func (s *DescribeCapacityReservationsOutput) SetNextToken(v string) *DescribeCap return s } +type DescribeCarrierGatewaysInput struct { + _ struct{} `type:"structure"` + + // One or more carrier gateway IDs. + CarrierGatewayIds []*string `locationName:"CarrierGatewayId" type:"list"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * carrier-gateway-id - The ID of the carrier gateway. + // + // * state - The state of the carrier gateway (pending | failed | available + // | deleting | deleted). + // + // * owner-id - The AWS account ID of the owner of the carrier gateway. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * vpc-id - The ID of the VPC associated with the carrier gateway. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCarrierGatewaysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCarrierGatewaysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCarrierGatewaysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCarrierGatewaysInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCarrierGatewayIds sets the CarrierGatewayIds field's value. +func (s *DescribeCarrierGatewaysInput) SetCarrierGatewayIds(v []*string) *DescribeCarrierGatewaysInput { + s.CarrierGatewayIds = v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeCarrierGatewaysInput) SetDryRun(v bool) *DescribeCarrierGatewaysInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCarrierGatewaysInput) SetFilters(v []*Filter) *DescribeCarrierGatewaysInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeCarrierGatewaysInput) SetMaxResults(v int64) *DescribeCarrierGatewaysInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCarrierGatewaysInput) SetNextToken(v string) *DescribeCarrierGatewaysInput { + s.NextToken = &v + return s +} + +type DescribeCarrierGatewaysOutput struct { + _ struct{} `type:"structure"` + + // Information about the carrier gateway. + CarrierGateways []*CarrierGateway `locationName:"carrierGatewaySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s DescribeCarrierGatewaysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCarrierGatewaysOutput) GoString() string { + return s.String() +} + +// SetCarrierGateways sets the CarrierGateways field's value. +func (s *DescribeCarrierGatewaysOutput) SetCarrierGateways(v []*CarrierGateway) *DescribeCarrierGatewaysOutput { + s.CarrierGateways = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeCarrierGatewaysOutput) SetNextToken(v string) *DescribeCarrierGatewaysOutput { + s.NextToken = &v + return s +} + type DescribeClassicLinkInstancesInput struct { _ struct{} `type:"structure"` @@ -57099,10 +60299,11 @@ type DescribeFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -57210,7 +60411,8 @@ type DescribeFastSnapshotRestoresInput struct { // // * availability-zone: The Availability Zone of the snapshot. // - // * owner-id: The ID of the AWS account that owns the snapshot. + // * owner-id: The ID of the AWS account that enabled fast snapshot restore + // on the snapshot. // // * snapshot-id: The ID of the snapshot. // @@ -58509,8 +61711,7 @@ type DescribeIamInstanceProfileAssociationsInput struct { // // * instance-id - The ID of the instance. // - // * state - The state of the association (associating | associated | disassociating - // | disassociated). + // * state - The state of the association (associating | associated | disassociating). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return in a single call. To retrieve the @@ -58917,7 +62118,7 @@ type DescribeImagesInput struct { // in GiB. // // * block-device-mapping.volume-type - The volume type of the EBS volume - // (gp2 | io1 | st1 | sc1 | standard). + // (gp2 | io1 | io2 | st1 | sc1 | standard). // // * block-device-mapping.encrypted - A Boolean that indicates whether the // EBS volume is encrypted. @@ -58941,11 +62142,13 @@ type DescribeImagesInput struct { // // * name - The name of the AMI (provided during image creation). // - // * owner-alias - String value from an Amazon-maintained list (amazon | - // aws-marketplace | microsoft) of snapshot owners. Not to be confused with - // the user-configured AWS account alias, which is set from the IAM console. + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon + // | aws-marketplace). This is not the user-configured AWS account alias + // set using the IAM console. We recommend that you use the related parameter + // instead of this filter. // - // * owner-id - The AWS account ID of the image owner. + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. // // * platform - The platform. To only list Windows-based AMIs, use windows. // @@ -58987,10 +62190,10 @@ type DescribeImagesInput struct { // Default: Describes all images available to you. ImageIds []*string `locationName:"ImageId" locationNameList:"ImageId" type:"list"` - // Filters the images by the owner. Specify an AWS account ID, self (owner is - // the sender of the request), or an AWS owner alias (valid values are amazon - // | aws-marketplace | microsoft). Omitting this option returns all images for - // which you have launch permissions, regardless of ownership. + // Scopes the results to images with the specified owners. You can specify a + // combination of AWS account IDs, self, amazon, and aws-marketplace. If you + // omit this parameter, the results include all images for which you have launch + // permissions, regardless of ownership. Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` } @@ -59589,6 +62792,55 @@ func (s *DescribeInstanceCreditSpecificationsOutput) SetNextToken(v string) *Des return s } +type DescribeInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` +} + +// String returns the string representation +func (s DescribeInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeInstanceEventNotificationAttributesInput) SetDryRun(v bool) *DescribeInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +type DescribeInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // Information about the registered tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s DescribeInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *DescribeInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *DescribeInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + type DescribeInstanceStatusInput struct { _ struct{} `type:"structure"` @@ -59882,12 +63134,33 @@ type DescribeInstanceTypesInput struct { // * current-generation - Indicates whether this instance type is the latest // generation instance type of an instance family. (true | false) // + // * ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps - The baseline + // bandwidth performance for an EBS-optimized instance type, in Mbps. + // + // * ebs-info.ebs-optimized-info.baseline-throughput-in-mbps - The baseline + // throughput performance for an EBS-optimized instance type, in MBps. + // + // * ebs-info.ebs-optimized-info.baseline-iops - The baseline input/output + // storage operations per second for an EBS-optimized instance type. + // + // * ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps - The maximum + // bandwidth performance for an EBS-optimized instance type, in Mbps. + // + // * ebs-info.ebs-optimized-info.maximum-throughput-in-mbps - The maximum + // throughput performance for an EBS-optimized instance type, in MBps. + // + // * ebs-info.ebs-optimized-info.maximum-iops - The maximum input/output + // storage operations per second for an EBS-optimized instance type. + // // * ebs-info.ebs-optimized-support - Indicates whether the instance type // is EBS-optimized. (supported | unsupported | default) // // * ebs-info.encryption-support - Indicates whether EBS encryption is supported. // (supported | unsupported) // + // * ebs-info.nvme-support - Indicates whether non-volatile memory express + // (NVMe) is supported or required. (required | supported | unsupported) + // // * free-tier-eligible - Indicates whether the instance type is eligible // to use in the free tier. (true | false) // @@ -59915,6 +63188,9 @@ type DescribeInstanceTypesInput struct { // * network-info.ena-support - Indicates whether Elastic Network Adapter // (ENA) is supported or required. (required | supported | unsupported) // + // * network-info.efa-supported - Indicates whether the instance type supports + // Elastic Fabric Adapter (EFA). (true | false) + // // * network-info.ipv4-addresses-per-interface - The maximum number of private // IPv4 addresses per network interface. // @@ -60648,9 +63924,21 @@ type DescribeKeyPairsInput struct { // The filters. // + // * key-pair-id - The ID of the key pair. + // // * fingerprint - The fingerprint of the key pair. // // * key-name - The name of the key pair. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources assigned a tag with a specific key, regardless of + // the tag value. + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The key pair names. @@ -60749,12 +64037,16 @@ type DescribeLaunchTemplateVersionsInput struct { // * ram-disk-id - The RAM disk ID. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The ID of the launch template. You must specify either the launch template - // ID or launch template name in the request. + // The ID of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. LaunchTemplateId *string `type:"string"` - // The name of the launch template. You must specify either the launch template - // ID or launch template name in the request. + // The name of the launch template. To describe one or more versions of a specified + // launch template, you must specify either the launch template ID or the launch + // template name in the request. To describe all the latest or default launch + // template versions in your account, you must omit this parameter. LaunchTemplateName *string `min:"3" type:"string"` // The maximum number of results to return in a single call. To retrieve the @@ -60771,7 +64063,18 @@ type DescribeLaunchTemplateVersionsInput struct { // The token to request the next page of results. NextToken *string `type:"string"` - // One or more versions of the launch template. + // One or more versions of the launch template. Valid values depend on whether + // you are describing a specified launch template (by ID or name) or all launch + // templates in your account. + // + // To describe one or more versions of a specified launch template, valid values + // are $Latest, $Default, and numbers. + // + // To describe all launch templates in your account that are defined as the + // latest version, the valid value is $Latest. To describe all launch templates + // in your account that are defined as the default version, the valid value + // is $Default. You can specify $Latest and $Default in the same call. You cannot + // specify numbers. Versions []*string `locationName:"LaunchTemplateVersion" locationNameList:"item" type:"list"` } @@ -61028,6 +64331,18 @@ type DescribeLocalGatewayRouteTableVirtualInterfaceGroupAssociationsInput struct DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-virtual-interface-group-association-id - The + // ID of the association. + // + // * local-gateway-route-table-virtual-interface-group-id - The ID of the + // virtual interface group. + // + // * state - The state of the association. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the associations. @@ -61137,6 +64452,16 @@ type DescribeLocalGatewayRouteTableVpcAssociationsInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-vpc-association-id - The ID of the association. + // + // * state - The state of the association. + // + // * vpc-id - The ID of the VPC. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the associations. @@ -61246,6 +64571,14 @@ type DescribeLocalGatewayRouteTablesInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of a local gateway route table. + // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // + // * state - The state of the local gateway route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the local gateway route tables. @@ -61355,6 +64688,13 @@ type DescribeLocalGatewayVirtualInterfaceGroupsInput struct { DryRun *bool `type:"boolean"` // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-virtual-interface-id - The ID of the virtual interface. + // + // * local-gateway-virtual-interface-group-id - The ID of the virtual interface + // group. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the virtual interface groups. @@ -61575,7 +64915,21 @@ type DescribeLocalGatewaysInput struct { // One or more filters. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The IDs of the local gateways. + // One or more filters. + // + // * local-gateway-id - The ID of a local gateway. + // + // * local-gateway-route-table-id - The ID of the local gateway route table. + // + // * local-gateway-route-table-virtual-interface-group-association-id - The + // ID of the association. + // + // * local-gateway-route-table-virtual-interface-group-id - The ID of the + // virtual interface group. + // + // * outpost-arn - The Amazon Resource Name (ARN) of the Outpost. + // + // * state - The state of the association. LocalGatewayIds []*string `locationName:"LocalGatewayId" locationNameList:"item" type:"list"` // The maximum number of results to return with a single call. To retrieve the @@ -61672,6 +65026,121 @@ func (s *DescribeLocalGatewaysOutput) SetNextToken(v string) *DescribeLocalGatew return s } +type DescribeManagedPrefixListsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // * owner-id - The ID of the prefix list owner. + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-name - The name of the prefix list. + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // One or more prefix list IDs. + PrefixListIds []*string `locationName:"PrefixListId" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeManagedPrefixListsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeManagedPrefixListsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *DescribeManagedPrefixListsInput) SetDryRun(v bool) *DescribeManagedPrefixListsInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeManagedPrefixListsInput) SetFilters(v []*Filter) *DescribeManagedPrefixListsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeManagedPrefixListsInput) SetMaxResults(v int64) *DescribeManagedPrefixListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsInput) SetNextToken(v string) *DescribeManagedPrefixListsInput { + s.NextToken = &v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *DescribeManagedPrefixListsInput) SetPrefixListIds(v []*string) *DescribeManagedPrefixListsInput { + s.PrefixListIds = v + return s +} + +type DescribeManagedPrefixListsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix lists. + PrefixLists []*ManagedPrefixList `locationName:"prefixListSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeManagedPrefixListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeManagedPrefixListsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeManagedPrefixListsOutput) SetNextToken(v string) *DescribeManagedPrefixListsOutput { + s.NextToken = &v + return s +} + +// SetPrefixLists sets the PrefixLists field's value. +func (s *DescribeManagedPrefixListsOutput) SetPrefixLists(v []*ManagedPrefixList) *DescribeManagedPrefixListsOutput { + s.PrefixLists = v + return s +} + type DescribeMovingAddressesInput struct { _ struct{} `type:"structure"` @@ -62362,9 +65831,6 @@ type DescribeNetworkInterfacesInput struct { // * attachment.instance-owner-id - The owner ID of the instance to which // the network interface is attached. // - // * attachment.nat-gateway-id - The ID of the NAT gateway to which the network - // interface is attached. - // // * attachment.status - The status of the attachment (attaching | attached // | detaching | detached). // @@ -62542,6 +66008,16 @@ type DescribePlacementGroupsInput struct { // | deleted). // // * strategy - The strategy of the placement group (cluster | spread | partition). + // + // * tag: - The key/value combination of a tag assigned to the resource. + // Use the tag key in the filter name and the tag value as the filter value. + // For example, to find all resources that have a tag with the key Owner + // and the value TeamA, specify tag:Owner for the filter name and TeamA for + // the filter value. + // + // * tag-key - The key of a tag assigned to the resource. Use this filter + // to find all resources that have a tag with a specific key, regardless + // of the tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The IDs of the placement groups. @@ -63644,8 +67120,6 @@ type DescribeRouteTablesInput struct { // to find all resources assigned a tag with a specific key, regardless of // the tag value. // - // * transit-gateway-id - The ID of a transit gateway. - // // * vpc-id - The ID of the VPC for the route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -64134,8 +67608,8 @@ type DescribeSecurityGroupsInput struct { // * egress.ip-permission.ipv6-cidr - An IPv6 CIDR block for an outbound // security group rule. // - // * egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service - // to which a security group rule allows outbound access. + // * egress.ip-permission.prefix-list-id - The ID of a prefix list to which + // a security group rule allows outbound access. // // * egress.ip-permission.protocol - The IP protocol for an outbound security // group rule (tcp | udp | icmp or a protocol number). @@ -64165,8 +67639,8 @@ type DescribeSecurityGroupsInput struct { // * ip-permission.ipv6-cidr - An IPv6 CIDR block for an inbound security // group rule. // - // * ip-permission.prefix-list-id - The ID (prefix) of the AWS service from - // which a security group rule allows inbound access. + // * ip-permission.prefix-list-id - The ID of a prefix list from which a + // security group rule allows inbound access. // // * ip-permission.protocol - The IP protocol for an inbound security group // rule (tcp | udp | icmp or a protocol number). @@ -64429,12 +67903,12 @@ type DescribeSnapshotsInput struct { // // * encrypted - Indicates whether the snapshot is encrypted (true | false) // - // * owner-alias - Value from an Amazon-maintained list (amazon | self | - // all | aws-marketplace | microsoft) of snapshot owners. Not to be confused - // with the user-configured AWS account alias, which is set from the IAM - // console. + // * owner-alias - The owner alias, from an Amazon-maintained list (amazon). + // This is not the user-configured AWS account alias set using the IAM console. + // We recommend that you use the related parameter instead of this filter. // - // * owner-id - The ID of the AWS account that owns the snapshot. + // * owner-id - The AWS account ID of the owner. We recommend that you use + // the related parameter instead of this filter. // // * progress - The progress of the snapshot, as a percentage (for example, // 80%). @@ -64478,7 +67952,8 @@ type DescribeSnapshotsInput struct { // to return. NextToken *string `type:"string"` - // Describes the snapshots owned by these owners. + // Scopes the results to snapshots with the specified owners. You can specify + // a combination of AWS account IDs, self, and amazon. OwnerIds []*string `locationName:"Owner" locationNameList:"Owner" type:"list"` // The IDs of the AWS accounts that can create volumes from the snapshot. @@ -65034,8 +68509,8 @@ type DescribeSpotInstanceRequestsInput struct { // in GiB. // // * launch.block-device-mapping.volume-type - The type of EBS volume: gp2 - // for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput - // Optimized HDD, sc1for Cold HDD, or standard for Magnetic. + // for General Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for + // Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic. // // * launch.group-id - The ID of the security group for the instance. // @@ -65089,7 +68564,7 @@ type DescribeSpotInstanceRequestsInput struct { // * state - The state of the Spot Instance request (open | active | closed // | cancelled | failed). Spot request status information can help you track // your Amazon EC2 Spot Instance requests. For more information, see Spot - // Request Status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // request status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon EC2 User Guide for Linux Instances. // // * status-code - The short code describing the most recent evaluation of @@ -65637,12 +69112,12 @@ type DescribeTagsInput struct { // * resource-id - The ID of the resource. // // * resource-type - The resource type (customer-gateway | dedicated-host - // | dhcp-options | elastic-ip | fleet | fpga-image | image | instance | - // host-reservation | internet-gateway | launch-template | natgateway | network-acl - // | network-interface | placement-group | reserved-instances | route-table - // | security-group | snapshot | spot-instances-request | subnet | volume - // | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection | - // vpn-connection | vpn-gateway). + // | dhcp-options | elastic-ip | fleet | fpga-image | host-reservation | + // image | instance | internet-gateway | key-pair | launch-template | natgateway + // | network-acl | network-interface | placement-group | reserved-instances + // | route-table | security-group | snapshot | spot-instances-request | subnet + // | volume | vpc | vpc-endpoint | vpc-endpoint-service | vpc-peering-connection + // | vpn-connection | vpn-gateway). // // * tag: - The key/value combination of the tag. For example, specify // "tag:Owner" for the filter name and "TeamA" for the filter value to find @@ -66109,11 +69584,12 @@ type DescribeTransitGatewayAttachmentsInput struct { // // * resource-owner-id - The ID of the AWS account that owns the resource. // - // * resource-type - The resource type (vpc | vpn). + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the attachment. Valid values are available | deleted + // | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. // // * transit-gateway-attachment-id - The ID of the attachment. // @@ -66355,9 +69831,9 @@ type DescribeTransitGatewayPeeringAttachmentsInput struct { // * remote-owner-id - The ID of the AWS account in the remote Region that // owns the transit gateway. // - // * state - The state of the peering attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the peering attachment. Valid values are available + // | deleted | deleting | failed | failing | initiatingRequest | modifying + // | pendingAcceptance | pending | rollingBack | rejected | rejecting). // // * transit-gateway-id - The ID of the transit gateway. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -66476,9 +69952,8 @@ type DescribeTransitGatewayRouteTablesInput struct { // * default-propagation-route-table - Indicates whether this is the default // propagation route table for the transit gateway (true | false). // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the route table (available | deleting | deleted + // | pending). // // * transit-gateway-id - The ID of the transit gateway. // @@ -66594,9 +70069,9 @@ type DescribeTransitGatewayVpcAttachmentsInput struct { // One or more filters. The possible values are: // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the attachment. Valid values are available | deleted + // | deleting | failed | failing | initiatingRequest | modifying | pendingAcceptance + // | pending | rollingBack | rejected | rejecting. // // * transit-gateway-attachment-id - The ID of the attachment. // @@ -66741,9 +70216,8 @@ type DescribeTransitGatewaysInput struct { // // * owner-id - The ID of the AWS account that owns the transit gateway. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the transit gateway (available | deleted | deleting + // | modifying | pending). // // * transit-gateway-id - The ID of the transit gateway. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -67127,7 +70601,7 @@ type DescribeVolumesInput struct { // // * snapshot-id - The snapshot from which the volume was created. // - // * status - The status of the volume (creating | available | in-use | deleting + // * status - The state of the volume (creating | available | in-use | deleting // | deleted | error). // // * tag: - The key/value combination of a tag assigned to the resource. @@ -67143,7 +70617,7 @@ type DescribeVolumesInput struct { // * volume-id - The volume ID. // // * volume-type - The Amazon EBS volume type. This can be gp2 for General - // Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized + // Purpose SSD, io1 or io2 for Provisioned IOPS SSD, st1 for Throughput Optimized // HDD, sc1 for Cold HDD, or standard for Magnetic volumes. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -67217,9 +70691,34 @@ type DescribeVolumesModificationsInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The filters. Supported filters: volume-id | modification-state | target-size - // | target-iops | target-volume-type | original-size | original-iops | original-volume-type - // | start-time | originalMultiAttachEnabled | targetMultiAttachEnabled. + // The filters. + // + // * modification-state - The current modification state (modifying | optimizing + // | completed | failed). + // + // * original-iops - The original IOPS rate of the volume. + // + // * original-size - The original size of the volume, in GiB. + // + // * original-volume-type - The original volume type of the volume (standard + // | io1 | io2 | gp2 | sc1 | st1). + // + // * originalMultiAttachEnabled - Indicates whether Multi-Attach support + // was enabled (true | false). + // + // * start-time - The modification start time. + // + // * target-iops - The target IOPS rate of the volume. + // + // * target-size - The target size of the volume, in GiB. + // + // * target-volume-type - The target volume type of the volume (standard + // | io1 | io2 | gp2 | sc1 | st1). + // + // * targetMultiAttachEnabled - Indicates whether Multi-Attach support is + // to be enabled (true | false). + // + // * volume-id - The ID of the volume. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results (up to a limit of 500) to be returned in a @@ -67229,7 +70728,7 @@ type DescribeVolumesModificationsInput struct { // The nextToken value returned by a previous paginated request. NextToken *string `type:"string"` - // The IDs of the volumes for which in-progress modifications will be described. + // The IDs of the volumes. VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` } @@ -69540,10 +73039,11 @@ type DisableFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -70240,7 +73740,7 @@ type DisassociateRouteTableInput struct { _ struct{} `type:"structure"` // The association ID representing the current association between the route - // table and subnet. + // table and subnet or gateway. // // AssociationId is a required field AssociationId *string `locationName:"associationId" type:"string" required:"true"` @@ -70956,7 +74456,7 @@ type EbsBlockDevice struct { _ struct{} `type:"structure"` // Indicates whether the EBS volume is deleted on instance termination. For - // more information, see Preserving Amazon EBS Volumes on Instance Termination + // more information, see Preserving Amazon EBS volumes on instance termination // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#preserving-volumes-on-termination) // in the Amazon Elastic Compute Cloud User Guide. DeleteOnTermination *bool `locationName:"deleteOnTermination" type:"boolean"` @@ -70971,27 +74471,28 @@ type EbsBlockDevice struct { // In no case can you remove encryption from an encrypted volume. // // Encrypted volumes can only be attached to instances that support Amazon EBS - // encryption. For more information, see Supported Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). + // encryption. For more information, see Supported instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#EBSEncryption_supported_instances). // // This parameter is not returned by . Encrypted *bool `locationName:"encrypted" type:"boolean"` // The number of I/O operations per second (IOPS) that the volume supports. - // For io1 volumes, this represents the number of IOPS that are provisioned + // For io1 and io2 volumes, this represents the number of IOPS that are provisioned // for the volume. For gp2 volumes, this represents the baseline performance // of the volume and the rate at which the volume accumulates I/O credits for - // bursting. For more information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // bursting. For more information, see Amazon EBS volume types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS - // for io1 volumes in most Regions. Maximum io1 IOPS of 64,000 is guaranteed - // only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000 IOPS + // for io1 and io2 volumes in most Regions. Maximum io1 and io2 IOPS of 64,000 + // is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. For more // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // Condition: This parameter is required for requests to create io1 and io2 + // volumes; it is not used in requests to create gp2, st1, sc1, or standard + // volumes. Iops *int64 `locationName:"iops" type:"integer"` // Identifier (key ID, key alias, ID ARN, or alias ARN) for a customer managed @@ -71012,15 +74513,15 @@ type EbsBlockDevice struct { // a volume size, the default is the snapshot size. // // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned - // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for - // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify + // IOPS SSD (io1 and io2), 500-16384 for Throughput Optimized HDD (st1), 500-16384 + // for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify // a snapshot, the volume size must be equal to or larger than the snapshot // size. VolumeSize *int64 `locationName:"volumeSize" type:"integer"` - // The volume type. If you set the type to io1, you must also specify the Iops - // parameter. If you set the type to gp2, st1, sc1, or standard, you must omit - // the Iops parameter. + // The volume type. If you set the type to io1 or io2, you must also specify + // the Iops parameter. If you set the type to gp2, st1, sc1, or standard, you + // must omit the Iops parameter. // // Default: gp2 VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` @@ -71082,6 +74583,9 @@ func (s *EbsBlockDevice) SetVolumeType(v string) *EbsBlockDevice { type EbsInfo struct { _ struct{} `type:"structure"` + // Describes the optimized EBS performance for the instance type. + EbsOptimizedInfo *EbsOptimizedInfo `locationName:"ebsOptimizedInfo" type:"structure"` + // Indicates that the instance type is Amazon EBS-optimized. For more information, // see Amazon EBS-Optimized Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html) // in Amazon EC2 User Guide for Linux Instances. @@ -71089,6 +74593,9 @@ type EbsInfo struct { // Indicates whether Amazon EBS encryption is supported. EncryptionSupport *string `locationName:"encryptionSupport" type:"string" enum:"EbsEncryptionSupport"` + + // Indicates whether non-volatile memory express (NVMe) is supported. + NvmeSupport *string `locationName:"nvmeSupport" type:"string" enum:"EbsNvmeSupport"` } // String returns the string representation @@ -71101,6 +74608,12 @@ func (s EbsInfo) GoString() string { return s.String() } +// SetEbsOptimizedInfo sets the EbsOptimizedInfo field's value. +func (s *EbsInfo) SetEbsOptimizedInfo(v *EbsOptimizedInfo) *EbsInfo { + s.EbsOptimizedInfo = v + return s +} + // SetEbsOptimizedSupport sets the EbsOptimizedSupport field's value. func (s *EbsInfo) SetEbsOptimizedSupport(v string) *EbsInfo { s.EbsOptimizedSupport = &v @@ -71113,6 +74626,12 @@ func (s *EbsInfo) SetEncryptionSupport(v string) *EbsInfo { return s } +// SetNvmeSupport sets the NvmeSupport field's value. +func (s *EbsInfo) SetNvmeSupport(v string) *EbsInfo { + s.NvmeSupport = &v + return s +} + // Describes a parameter used to set up an EBS volume in a block device mapping. type EbsInstanceBlockDevice struct { _ struct{} `type:"structure"` @@ -71198,6 +74717,81 @@ func (s *EbsInstanceBlockDeviceSpecification) SetVolumeId(v string) *EbsInstance return s } +// Describes the optimized EBS performance for supported instance types. +type EbsOptimizedInfo struct { + _ struct{} `type:"structure"` + + // The baseline bandwidth performance for an EBS-optimized instance type, in + // Mbps. + BaselineBandwidthInMbps *int64 `locationName:"baselineBandwidthInMbps" type:"integer"` + + // The baseline input/output storage operations per seconds for an EBS-optimized + // instance type. + BaselineIops *int64 `locationName:"baselineIops" type:"integer"` + + // The baseline throughput performance for an EBS-optimized instance type, in + // MBps. + BaselineThroughputInMBps *float64 `locationName:"baselineThroughputInMBps" type:"double"` + + // The maximum bandwidth performance for an EBS-optimized instance type, in + // Mbps. + MaximumBandwidthInMbps *int64 `locationName:"maximumBandwidthInMbps" type:"integer"` + + // The maximum input/output storage operations per second for an EBS-optimized + // instance type. + MaximumIops *int64 `locationName:"maximumIops" type:"integer"` + + // The maximum throughput performance for an EBS-optimized instance type, in + // MBps. + MaximumThroughputInMBps *float64 `locationName:"maximumThroughputInMBps" type:"double"` +} + +// String returns the string representation +func (s EbsOptimizedInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbsOptimizedInfo) GoString() string { + return s.String() +} + +// SetBaselineBandwidthInMbps sets the BaselineBandwidthInMbps field's value. +func (s *EbsOptimizedInfo) SetBaselineBandwidthInMbps(v int64) *EbsOptimizedInfo { + s.BaselineBandwidthInMbps = &v + return s +} + +// SetBaselineIops sets the BaselineIops field's value. +func (s *EbsOptimizedInfo) SetBaselineIops(v int64) *EbsOptimizedInfo { + s.BaselineIops = &v + return s +} + +// SetBaselineThroughputInMBps sets the BaselineThroughputInMBps field's value. +func (s *EbsOptimizedInfo) SetBaselineThroughputInMBps(v float64) *EbsOptimizedInfo { + s.BaselineThroughputInMBps = &v + return s +} + +// SetMaximumBandwidthInMbps sets the MaximumBandwidthInMbps field's value. +func (s *EbsOptimizedInfo) SetMaximumBandwidthInMbps(v int64) *EbsOptimizedInfo { + s.MaximumBandwidthInMbps = &v + return s +} + +// SetMaximumIops sets the MaximumIops field's value. +func (s *EbsOptimizedInfo) SetMaximumIops(v int64) *EbsOptimizedInfo { + s.MaximumIops = &v + return s +} + +// SetMaximumThroughputInMBps sets the MaximumThroughputInMBps field's value. +func (s *EbsOptimizedInfo) SetMaximumThroughputInMBps(v float64) *EbsOptimizedInfo { + s.MaximumThroughputInMBps = &v + return s +} + // Describes an egress-only internet gateway. type EgressOnlyInternetGateway struct { _ struct{} `type:"structure"` @@ -71470,7 +75064,7 @@ type ElasticInferenceAccelerator struct { Count *int64 `min:"1" type:"integer"` // The type of elastic inference accelerator. The possible values are eia1.medium, - // eia1.large, and eia1.xlarge. + // eia1.large, eia1.xlarge, eia2.medium, eia2.large, and eia2.xlarge. // // Type is a required field Type *string `type:"string" required:"true"` @@ -71738,10 +75332,11 @@ type EnableFastSnapshotRestoreSuccessItem struct { // The time at which fast snapshot restores entered the optimizing state. OptimizingTime *time.Time `locationName:"optimizingTime" type:"timestamp"` - // The alias of the snapshot owner. + // The AWS owner alias that enabled fast snapshot restores on the snapshot. + // This is intended for future use. OwnerAlias *string `locationName:"ownerAlias" type:"string"` - // The ID of the AWS account that owns the snapshot. + // The ID of the AWS account that enabled fast snapshot restores on the snapshot. OwnerId *string `locationName:"ownerId" type:"string"` // The ID of the snapshot. @@ -72553,7 +76148,7 @@ type ExportImageInput struct { // Token to enable idempotency for export image requests. ClientToken *string `type:"string" idempotencyToken:"true"` - // A description of the image being exported. The maximum length is 255 bytes. + // A description of the image being exported. The maximum length is 255 characters. Description *string `type:"string"` // The disk image format. @@ -72573,15 +76168,18 @@ type ExportImageInput struct { ImageId *string `type:"string" required:"true"` // The name of the role that grants VM Import/Export permission to export images - // to your S3 bucket. If this parameter is not specified, the default role is - // named 'vmimport'. + // to your Amazon S3 bucket. If this parameter is not specified, the default + // role is named 'vmimport'. RoleName *string `type:"string"` - // Information about the destination S3 bucket. The bucket must exist and grant - // WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // Information about the destination Amazon S3 bucket. The bucket must exist + // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. // // S3ExportLocation is a required field S3ExportLocation *ExportTaskS3LocationRequest `type:"structure" required:"true"` + + // The tags to apply to the image being exported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -72660,6 +76258,12 @@ func (s *ExportImageInput) SetS3ExportLocation(v *ExportTaskS3LocationRequest) * return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ExportImageInput) SetTagSpecifications(v []*TagSpecification) *ExportImageInput { + s.TagSpecifications = v + return s +} + type ExportImageOutput struct { _ struct{} `type:"structure"` @@ -72679,10 +76283,10 @@ type ExportImageOutput struct { Progress *string `locationName:"progress" type:"string"` // The name of the role that grants VM Import/Export permission to export images - // to your S3 bucket. + // to your Amazon S3 bucket. RoleName *string `locationName:"roleName" type:"string"` - // Information about the destination S3 bucket. + // Information about the destination Amazon S3 bucket. S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` // The status of the export image task. The possible values are active, completed, @@ -72691,6 +76295,9 @@ type ExportImageOutput struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -72757,6 +76364,12 @@ func (s *ExportImageOutput) SetStatusMessage(v string) *ExportImageOutput { return s } +// SetTags sets the Tags field's value. +func (s *ExportImageOutput) SetTags(v []*Tag) *ExportImageOutput { + s.Tags = v + return s +} + // Describes an export image task. type ExportImageTask struct { _ struct{} `type:"structure"` @@ -72773,7 +76386,7 @@ type ExportImageTask struct { // The percent complete of the export image task. Progress *string `locationName:"progress" type:"string"` - // Information about the destination S3 bucket. + // Information about the destination Amazon S3 bucket. S3ExportLocation *ExportTaskS3Location `locationName:"s3ExportLocation" type:"structure"` // The status of the export image task. The possible values are active, completed, @@ -72782,6 +76395,9 @@ type ExportImageTask struct { // The status message for the export image task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being exported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -72836,6 +76452,12 @@ func (s *ExportImageTask) SetStatusMessage(v string) *ExportImageTask { return s } +// SetTags sets the Tags field's value. +func (s *ExportImageTask) SetTags(v []*Tag) *ExportImageTask { + s.Tags = v + return s +} + // Describes an instance export task. type ExportTask struct { _ struct{} `type:"structure"` @@ -72918,7 +76540,7 @@ func (s *ExportTask) SetTags(v []*Tag) *ExportTask { type ExportTaskS3Location struct { _ struct{} `type:"structure"` - // The destination S3 bucket. + // The destination Amazon S3 bucket. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The prefix (logical hierarchy) in the bucket. @@ -72951,7 +76573,7 @@ func (s *ExportTaskS3Location) SetS3Prefix(v string) *ExportTaskS3Location { type ExportTaskS3LocationRequest struct { _ struct{} `type:"structure"` - // The destination S3 bucket. + // The destination Amazon S3 bucket. // // S3Bucket is a required field S3Bucket *string `type:"string" required:"true"` @@ -73006,8 +76628,8 @@ type ExportToS3Task struct { // The format for the exported image. DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` - // The S3 bucket for the destination image. The destination bucket must exist - // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The encryption key for your S3 bucket. @@ -73059,12 +76681,12 @@ type ExportToS3TaskSpecification struct { // The format for the exported image. DiskImageFormat *string `locationName:"diskImageFormat" type:"string" enum:"DiskImageFormat"` - // The S3 bucket for the destination image. The destination bucket must exist - // and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. + // The Amazon S3 bucket for the destination image. The destination bucket must + // exist and grant WRITE and READ_ACP permissions to the AWS account vm-import-export@amazon.com. S3Bucket *string `locationName:"s3Bucket" type:"string"` - // The image is written to a single object in the S3 bucket at the S3 key s3prefix - // + exportTaskId + '.' + diskImageFormat. + // The image is written to a single object in the Amazon S3 bucket at the S3 + // key s3prefix + exportTaskId + '.' + diskImageFormat. S3Prefix *string `locationName:"s3Prefix" type:"string"` } @@ -73131,13 +76753,11 @@ type ExportTransitGatewayRoutesInput struct { // routes in your route table and you specify supernet-of-match as 10.0.1.0/30, // then the result returns 10.0.1.0/29. // - // * state - The state of the attachment (available | deleted | deleting - // | failed | modifying | pendingAcceptance | pending | rollingBack | rejected - // | rejecting). + // * state - The state of the route (active | blackhole). // // * transit-gateway-route-destination-cidr-block - The CIDR range. // - // * type - The type of route (active | blackhole). + // * type - The type of route (propagated | static). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The name of the S3 bucket. @@ -73257,6 +76877,54 @@ func (s *FailedQueuedPurchaseDeletion) SetReservedInstancesId(v string) *FailedQ return s } +// Describes the IAM SAML identity provider used for federated authentication. +type FederatedAuthentication struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider. + SamlProviderArn *string `locationName:"samlProviderArn" type:"string"` +} + +// String returns the string representation +func (s FederatedAuthentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedAuthentication) GoString() string { + return s.String() +} + +// SetSamlProviderArn sets the SamlProviderArn field's value. +func (s *FederatedAuthentication) SetSamlProviderArn(v string) *FederatedAuthentication { + s.SamlProviderArn = &v + return s +} + +// The IAM SAML identity provider used for federated authentication. +type FederatedAuthenticationRequest struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM SAML identity provider. + SAMLProviderArn *string `type:"string"` +} + +// String returns the string representation +func (s FederatedAuthenticationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedAuthenticationRequest) GoString() string { + return s.String() +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *FederatedAuthenticationRequest) SetSAMLProviderArn(v string) *FederatedAuthenticationRequest { + s.SAMLProviderArn = &v + return s +} + // A filter name and value pair that is used to return a more specific list // of results from a describe operation. Filters can be used to match a set // of resources by specific criteria, such as tags, attributes, or IDs. The @@ -73788,19 +77456,30 @@ func (s *FleetLaunchTemplateOverridesRequest) SetWeightedCapacity(v float64) *Fl return s } -// Describes a launch template. +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by a Spot Fleet request to configure Amazon EC2 instances. +// For information about launch templates, see Launching an instance from a +// launch template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon EC2 User Guide for Linux Instances. type FleetLaunchTemplateSpecification struct { _ struct{} `type:"structure"` - // The ID of the launch template. You must specify either a template ID or a - // template name. + // The ID of the launch template. If you specify the template ID, you can't + // specify the template name. LaunchTemplateId *string `locationName:"launchTemplateId" type:"string"` - // The name of the launch template. You must specify either a template name - // or a template ID. + // The name of the launch template. If you specify the template name, you can't + // specify the template ID. LaunchTemplateName *string `locationName:"launchTemplateName" min:"3" type:"string"` - // The version number of the launch template. You must specify a version number. + // The launch template version number, $Latest, or $Default. You must specify + // a value, otherwise the request fails. + // + // If the value is $Latest, Amazon EC2 uses the latest version of the launch + // template. + // + // If the value is $Default, Amazon EC2 uses the default version of the launch + // template. Version *string `locationName:"version" type:"string"` } @@ -73845,19 +77524,30 @@ func (s *FleetLaunchTemplateSpecification) SetVersion(v string) *FleetLaunchTemp return s } -// The launch template to use. You must specify either the launch template ID -// or launch template name in the request. +// Describes the Amazon EC2 launch template and the launch template version +// that can be used by an EC2 Fleet to configure Amazon EC2 instances. For information +// about launch templates, see Launching an instance from a launch template +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon Elastic Compute Cloud User Guide. type FleetLaunchTemplateSpecificationRequest struct { _ struct{} `type:"structure"` - // The ID of the launch template. + // The ID of the launch template. If you specify the template ID, you can't + // specify the template name. LaunchTemplateId *string `type:"string"` - // The name of the launch template. + // The name of the launch template. If you specify the template name, you can't + // specify the template ID. LaunchTemplateName *string `min:"3" type:"string"` - // The version number of the launch template. Note: This is a required parameter - // and will be updated soon. + // The launch template version number, $Latest, or $Default. You must specify + // a value, otherwise the request fails. + // + // If the value is $Latest, Amazon EC2 uses the latest version of the launch + // template. + // + // If the value is $Default, Amazon EC2 uses the default version of the launch + // template. Version *string `type:"string"` } @@ -74542,12 +78232,13 @@ type GetCapacityReservationUsageInput struct { // The maximum number of results to return for the request in a single page. // The remaining results can be seen by sending another request with the returned - // nextToken value. + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. // // Valid range: Minimum value of 1. Maximum value of 1000. MaxResults *int64 `min:"1" type:"integer"` - // The token to retrieve the next page of results. + // The token to use to retrieve the next page of results. NextToken *string `type:"string"` } @@ -75194,6 +78885,114 @@ func (s *GetEbsEncryptionByDefaultOutput) SetEbsEncryptionByDefault(v bool) *Get return s } +type GetGroupsForCapacityReservationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Capacity Reservation. + // + // CapacityReservationId is a required field + CapacityReservationId *string `type:"string" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return for the request in a single page. + // The remaining results can be seen by sending another request with the returned + // nextToken value. This value can be between 5 and 500. If maxResults is given + // a larger value than 500, you receive an error. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetGroupsForCapacityReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupsForCapacityReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupsForCapacityReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupsForCapacityReservationInput"} + if s.CapacityReservationId == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityReservationId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityReservationId sets the CapacityReservationId field's value. +func (s *GetGroupsForCapacityReservationInput) SetCapacityReservationId(v string) *GetGroupsForCapacityReservationInput { + s.CapacityReservationId = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *GetGroupsForCapacityReservationInput) SetDryRun(v bool) *GetGroupsForCapacityReservationInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetGroupsForCapacityReservationInput) SetMaxResults(v int64) *GetGroupsForCapacityReservationInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetGroupsForCapacityReservationInput) SetNextToken(v string) *GetGroupsForCapacityReservationInput { + s.NextToken = &v + return s +} + +type GetGroupsForCapacityReservationOutput struct { + _ struct{} `type:"structure"` + + // Information about the resource groups to which the Capacity Reservation has + // been added. + CapacityReservationGroups []*CapacityReservationGroup `locationName:"capacityReservationGroupSet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetGroupsForCapacityReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupsForCapacityReservationOutput) GoString() string { + return s.String() +} + +// SetCapacityReservationGroups sets the CapacityReservationGroups field's value. +func (s *GetGroupsForCapacityReservationOutput) SetCapacityReservationGroups(v []*CapacityReservationGroup) *GetGroupsForCapacityReservationOutput { + s.CapacityReservationGroups = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetGroupsForCapacityReservationOutput) SetNextToken(v string) *GetGroupsForCapacityReservationOutput { + s.NextToken = &v + return s +} + type GetHostReservationPurchasePreviewInput struct { _ struct{} `type:"structure"` @@ -75371,6 +79170,226 @@ func (s *GetLaunchTemplateDataOutput) SetLaunchTemplateData(v *ResponseLaunchTem return s } +type GetManagedPrefixListAssociationsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListAssociationsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListAssociationsInput) SetDryRun(v bool) *GetManagedPrefixListAssociationsInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListAssociationsInput) SetMaxResults(v int64) *GetManagedPrefixListAssociationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsInput) SetNextToken(v string) *GetManagedPrefixListAssociationsInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListAssociationsInput) SetPrefixListId(v string) *GetManagedPrefixListAssociationsInput { + s.PrefixListId = &v + return s +} + +type GetManagedPrefixListAssociationsOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the associations. + PrefixListAssociations []*PrefixListAssociation `locationName:"prefixListAssociationSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetManagedPrefixListAssociationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListAssociationsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetNextToken(v string) *GetManagedPrefixListAssociationsOutput { + s.NextToken = &v + return s +} + +// SetPrefixListAssociations sets the PrefixListAssociations field's value. +func (s *GetManagedPrefixListAssociationsOutput) SetPrefixListAssociations(v []*PrefixListAssociation) *GetManagedPrefixListAssociationsOutput { + s.PrefixListAssociations = v + return s +} + +type GetManagedPrefixListEntriesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version of the prefix list for which to return the entries. The default + // is the current version. + TargetVersion *int64 `type:"long"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedPrefixListEntriesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedPrefixListEntriesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetManagedPrefixListEntriesInput) SetDryRun(v bool) *GetManagedPrefixListEntriesInput { + s.DryRun = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetManagedPrefixListEntriesInput) SetMaxResults(v int64) *GetManagedPrefixListEntriesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesInput) SetNextToken(v string) *GetManagedPrefixListEntriesInput { + s.NextToken = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *GetManagedPrefixListEntriesInput) SetPrefixListId(v string) *GetManagedPrefixListEntriesInput { + s.PrefixListId = &v + return s +} + +// SetTargetVersion sets the TargetVersion field's value. +func (s *GetManagedPrefixListEntriesInput) SetTargetVersion(v int64) *GetManagedPrefixListEntriesInput { + s.TargetVersion = &v + return s +} + +type GetManagedPrefixListEntriesOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list entries. + Entries []*PrefixListEntry `locationName:"entrySet" locationNameList:"item" type:"list"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s GetManagedPrefixListEntriesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedPrefixListEntriesOutput) GoString() string { + return s.String() +} + +// SetEntries sets the Entries field's value. +func (s *GetManagedPrefixListEntriesOutput) SetEntries(v []*PrefixListEntry) *GetManagedPrefixListEntriesOutput { + s.Entries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetManagedPrefixListEntriesOutput) SetNextToken(v string) *GetManagedPrefixListEntriesOutput { + s.NextToken = &v + return s +} + type GetPasswordDataInput struct { _ struct{} `type:"structure"` @@ -75867,6 +79886,137 @@ func (s *GetTransitGatewayMulticastDomainAssociationsOutput) SetNextToken(v stri return s } +type GetTransitGatewayPrefixListReferencesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. The possible values are: + // + // * attachment.resource-id - The ID of the resource for the attachment. + // + // * attachment.resource-type - The type of resource for the attachment. + // Valid values are vpc | vpn | direct-connect-gateway | peering. + // + // * attachment.transit-gateway-attachment-id - The ID of the attachment. + // + // * is-blackhole - Whether traffic matching the route is blocked (true | + // false). + // + // * prefix-list-id - The ID of the prefix list. + // + // * prefix-list-owner-id - The ID of the owner of the prefix list. + // + // * state - The state of the prefix list reference (pending | available + // | modifying | deleting). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next page of results. + NextToken *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetTransitGatewayPrefixListReferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayPrefixListReferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetTransitGatewayPrefixListReferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetTransitGatewayPrefixListReferencesInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetDryRun(v bool) *GetTransitGatewayPrefixListReferencesInput { + s.DryRun = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetFilters(v []*Filter) *GetTransitGatewayPrefixListReferencesInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetMaxResults(v int64) *GetTransitGatewayPrefixListReferencesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetNextToken(v string) *GetTransitGatewayPrefixListReferencesInput { + s.NextToken = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *GetTransitGatewayPrefixListReferencesInput) SetTransitGatewayRouteTableId(v string) *GetTransitGatewayPrefixListReferencesInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type GetTransitGatewayPrefixListReferencesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the prefix list references. + TransitGatewayPrefixListReferences []*TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReferenceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s GetTransitGatewayPrefixListReferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetTransitGatewayPrefixListReferencesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetTransitGatewayPrefixListReferencesOutput) SetNextToken(v string) *GetTransitGatewayPrefixListReferencesOutput { + s.NextToken = &v + return s +} + +// SetTransitGatewayPrefixListReferences sets the TransitGatewayPrefixListReferences field's value. +func (s *GetTransitGatewayPrefixListReferencesOutput) SetTransitGatewayPrefixListReferences(v []*TransitGatewayPrefixListReference) *GetTransitGatewayPrefixListReferencesOutput { + s.TransitGatewayPrefixListReferences = v + return s +} + type GetTransitGatewayRouteTableAssociationsInput struct { _ struct{} `type:"structure"` @@ -75880,7 +80030,8 @@ type GetTransitGatewayRouteTableAssociationsInput struct { // // * resource-id - The ID of the resource. // - // * resource-type - The resource type (vpc | vpn). + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // // * transit-gateway-attachment-id - The ID of the attachment. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -76000,7 +80151,8 @@ type GetTransitGatewayRouteTablePropagationsInput struct { // // * resource-id - The ID of the resource. // - // * resource-type - The resource type (vpc | vpn). + // * resource-type - The resource type. Valid values are vpc | vpn | direct-connect-gateway + // | peering. // // * transit-gateway-attachment-id - The ID of the attachment. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -76250,7 +80402,7 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptions struct { _ struct{} `type:"structure"` @@ -76278,7 +80430,7 @@ func (s *HibernationOptions) SetConfigured(v bool) *HibernationOptions { // Indicates whether your instance is configured for hibernation. This parameter // is valid only if the instance meets the hibernation prerequisites (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html#hibernating-prerequisites). -// For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) +// For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. type HibernationOptionsRequest struct { _ struct{} `type:"structure"` @@ -77428,7 +81580,7 @@ type ImageDiskContainer struct { // The format of the disk image being imported. // - // Valid values: VHD | VMDK | OVA + // Valid values: OVA | VHD | VHDX |VMDK Format *string `type:"string"` // The ID of the EBS snapshot to be used for importing the snapshot. @@ -77670,6 +81822,9 @@ type ImportImageInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` + + // The tags to apply to the image being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -77760,6 +81915,12 @@ func (s *ImportImageInput) SetRoleName(v string) *ImportImageInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportImageInput) SetTagSpecifications(v []*TagSpecification) *ImportImageInput { + s.TagSpecifications = v + return s +} + // The request information of license configurations. type ImportImageLicenseConfigurationRequest struct { _ struct{} `type:"structure"` @@ -77817,7 +81978,7 @@ type ImportImageOutput struct { // A description of the import task. Description *string `locationName:"description" type:"string"` - // Indicates whether the AMI is encypted. + // Indicates whether the AMI is encrypted. Encrypted *bool `locationName:"encrypted" type:"boolean"` // The target hypervisor of the import task. @@ -77853,6 +82014,9 @@ type ImportImageOutput struct { // A detailed status message of the import task. StatusMessage *string `locationName:"statusMessage" type:"string"` + + // Any tags assigned to the image being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -77949,6 +82113,12 @@ func (s *ImportImageOutput) SetStatusMessage(v string) *ImportImageOutput { return s } +// SetTags sets the Tags field's value. +func (s *ImportImageOutput) SetTags(v []*Tag) *ImportImageOutput { + s.Tags = v + return s +} + // Describes an import image task. type ImportImageTask struct { _ struct{} `type:"structure"` @@ -78482,6 +82652,9 @@ type ImportKeyPairInput struct { // // PublicKeyMaterial is a required field PublicKeyMaterial []byte `locationName:"publicKeyMaterial" type:"blob" required:"true"` + + // The tags to apply to the imported key pair. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -78528,6 +82701,12 @@ func (s *ImportKeyPairInput) SetPublicKeyMaterial(v []byte) *ImportKeyPairInput return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportKeyPairInput) SetTagSpecifications(v []*TagSpecification) *ImportKeyPairInput { + s.TagSpecifications = v + return s +} + type ImportKeyPairOutput struct { _ struct{} `type:"structure"` @@ -78536,6 +82715,12 @@ type ImportKeyPairOutput struct { // The key pair name you provided. KeyName *string `locationName:"keyName" type:"string"` + + // The ID of the resulting key pair. + KeyPairId *string `locationName:"keyPairId" type:"string"` + + // The tags applied to the imported key pair. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -78560,6 +82745,18 @@ func (s *ImportKeyPairOutput) SetKeyName(v string) *ImportKeyPairOutput { return s } +// SetKeyPairId sets the KeyPairId field's value. +func (s *ImportKeyPairOutput) SetKeyPairId(v string) *ImportKeyPairOutput { + s.KeyPairId = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ImportKeyPairOutput) SetTags(v []*Tag) *ImportKeyPairOutput { + s.Tags = v + return s +} + type ImportSnapshotInput struct { _ struct{} `type:"structure"` @@ -78622,6 +82819,9 @@ type ImportSnapshotInput struct { // The name of the role to use when not using the default role, 'vmimport'. RoleName *string `type:"string"` + + // The tags to apply to the snapshot being imported. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -78682,6 +82882,12 @@ func (s *ImportSnapshotInput) SetRoleName(v string) *ImportSnapshotInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *ImportSnapshotInput) SetTagSpecifications(v []*TagSpecification) *ImportSnapshotInput { + s.TagSpecifications = v + return s +} + type ImportSnapshotOutput struct { _ struct{} `type:"structure"` @@ -78693,6 +82899,9 @@ type ImportSnapshotOutput struct { // Information about the import snapshot task. SnapshotTaskDetail *SnapshotTaskDetail `locationName:"snapshotTaskDetail" type:"structure"` + + // Any tags assigned to the snapshot being imported. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -78723,6 +82932,12 @@ func (s *ImportSnapshotOutput) SetSnapshotTaskDetail(v *SnapshotTaskDetail) *Imp return s } +// SetTags sets the Tags field's value. +func (s *ImportSnapshotOutput) SetTags(v []*Tag) *ImportSnapshotOutput { + s.Tags = v + return s +} + // Describes an import snapshot task. type ImportSnapshotTask struct { _ struct{} `type:"structure"` @@ -79136,7 +83351,11 @@ type Instance struct { // name is only available if you've enabled DNS hostnames for your VPC. PublicDnsName *string `locationName:"dnsName" type:"string"` - // The public IPv4 address assigned to the instance, if applicable. + // The public IPv4 address, or the Carrier IP address assigned to the instance, + // if applicable. + // + // A Carrier IP address only applies to an instance launched in a subnet associated + // with a Wavelength Zone. PublicIpAddress *string `locationName:"ipAddress" type:"string"` // The RAM disk associated with this instance, if applicable. @@ -80201,6 +84420,9 @@ func (s *InstanceNetworkInterface) SetVpcId(v string) *InstanceNetworkInterface type InstanceNetworkInterfaceAssociation struct { _ struct{} `type:"structure"` + // The carrier IP address associated with the network interface. + CarrierIp *string `locationName:"carrierIp" type:"string"` + // The ID of the owner of the Elastic IP address. IpOwnerId *string `locationName:"ipOwnerId" type:"string"` @@ -80221,6 +84443,12 @@ func (s InstanceNetworkInterfaceAssociation) GoString() string { return s.String() } +// SetCarrierIp sets the CarrierIp field's value. +func (s *InstanceNetworkInterfaceAssociation) SetCarrierIp(v string) *InstanceNetworkInterfaceAssociation { + s.CarrierIp = &v + return s +} + // SetIpOwnerId sets the IpOwnerId field's value. func (s *InstanceNetworkInterfaceAssociation) SetIpOwnerId(v string) *InstanceNetworkInterfaceAssociation { s.IpOwnerId = &v @@ -80303,6 +84531,13 @@ func (s *InstanceNetworkInterfaceAttachment) SetStatus(v string) *InstanceNetwor type InstanceNetworkInterfaceSpecification struct { _ struct{} `type:"structure"` + // Indicates whether to assign a carrier IP address to the network interface. + // + // You can only assign a carrier IP address to a network interface that is in + // a subnet in a Wavelength Zone. For more information about carrier IP addresses, + // see Carrier IP addresses in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `type:"boolean"` + // Indicates whether to assign a public IPv4 address to an instance you launch // in a VPC. The public IP address can only be assigned to a network interface // for eth0, and can only be assigned to a new network interface, not an existing @@ -80393,6 +84628,12 @@ func (s InstanceNetworkInterfaceSpecification) GoString() string { return s.String() } +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *InstanceNetworkInterfaceSpecification) SetAssociateCarrierIpAddress(v bool) *InstanceNetworkInterfaceSpecification { + s.AssociateCarrierIpAddress = &v + return s +} + // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *InstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *InstanceNetworkInterfaceSpecification { s.AssociatePublicIpAddress = &v @@ -80920,6 +85161,41 @@ func (s *InstanceStorageInfo) SetTotalSizeInGB(v int64) *InstanceStorageInfo { return s } +// Describes the registered tag keys for the current Region. +type InstanceTagNotificationAttribute struct { + _ struct{} `type:"structure"` + + // Indicates wheter all tag keys in the current Region are registered to appear + // in scheduled event notifications. true indicates that all tag keys in the + // current Region are registered. + IncludeAllTagsOfInstance *bool `locationName:"includeAllTagsOfInstance" type:"boolean"` + + // The registered tag keys. + InstanceTagKeys []*string `locationName:"instanceTagKeySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s InstanceTagNotificationAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceTagNotificationAttribute) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *InstanceTagNotificationAttribute) SetIncludeAllTagsOfInstance(v bool) *InstanceTagNotificationAttribute { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *InstanceTagNotificationAttribute) SetInstanceTagKeys(v []*string) *InstanceTagNotificationAttribute { + s.InstanceTagKeys = v + return s +} + // Describes the instance type. type InstanceTypeInfo struct { _ struct{} `type:"structure"` @@ -80988,6 +85264,9 @@ type InstanceTypeInfo struct { // Indicates whether the instance type is offered for spot or On-Demand. SupportedUsageClasses []*string `locationName:"supportedUsageClasses" locationNameList:"item" type:"list"` + // The supported virtualization types. + SupportedVirtualizationTypes []*string `locationName:"supportedVirtualizationTypes" locationNameList:"item" type:"list"` + // Describes the vCPU configurations for the instance type. VCpuInfo *VCpuInfo `locationName:"vCpuInfo" type:"structure"` } @@ -81128,6 +85407,12 @@ func (s *InstanceTypeInfo) SetSupportedUsageClasses(v []*string) *InstanceTypeIn return s } +// SetSupportedVirtualizationTypes sets the SupportedVirtualizationTypes field's value. +func (s *InstanceTypeInfo) SetSupportedVirtualizationTypes(v []*string) *InstanceTypeInfo { + s.SupportedVirtualizationTypes = v + return s +} + // SetVCpuInfo sets the VCpuInfo field's value. func (s *InstanceTypeInfo) SetVCpuInfo(v *VCpuInfo) *InstanceTypeInfo { s.VCpuInfo = v @@ -81324,9 +85609,7 @@ type IpPermission struct { // [VPC only] The IPv6 ranges. Ipv6Ranges []*Ipv6Range `locationName:"ipv6Ranges" locationNameList:"item" type:"list"` - // [VPC only] The prefix list IDs for an AWS service. With outbound rules, this - // is the AWS service to access through a VPC endpoint from instances associated - // with the security group. + // [VPC only] The prefix list IDs. PrefixListIds []*PrefixListId `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code. @@ -82113,7 +86396,7 @@ func (s *LaunchTemplateBlockDeviceMappingRequest) SetVirtualName(v string) *Laun // to configure the instance to run in On-Demand capacity or to run in any open // Capacity Reservation that has matching attributes (instance type, platform, // Availability Zone). Use the CapacityReservationTarget parameter to explicitly -// target a specific Capacity Reservation. +// target a specific Capacity Reservation or a Capacity Reservation group. type LaunchTemplateCapacityReservationSpecificationRequest struct { _ struct{} `type:"structure"` @@ -82127,7 +86410,8 @@ type LaunchTemplateCapacityReservationSpecificationRequest struct { // one is available. The instance runs in On-Demand capacity. CapacityReservationPreference *string `type:"string" enum:"CapacityReservationPreference"` - // Information about the target Capacity Reservation. + // Information about the target Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTarget `type:"structure"` } @@ -82167,7 +86451,8 @@ type LaunchTemplateCapacityReservationSpecificationResponse struct { // one is available. The instance runs in On-Demand capacity. CapacityReservationPreference *string `locationName:"capacityReservationPreference" type:"string" enum:"CapacityReservationPreference"` - // Information about the target Capacity Reservation. + // Information about the target Capacity Reservation or Capacity Reservation + // group. CapacityReservationTarget *CapacityReservationTargetResponse `locationName:"capacityReservationTarget" type:"structure"` } @@ -82400,16 +86685,15 @@ type LaunchTemplateEbsBlockDeviceRequest struct { // a volume from a snapshot, you can't specify an encryption value. Encrypted *bool `type:"boolean"` - // The number of I/O operations per second (IOPS) that the volume supports. - // For io1, this represents the number of IOPS that are provisioned for the - // volume. For gp2, this represents the baseline performance of the volume and - // the rate at which the volume accumulates I/O credits for bursting. For more - // information about General Purpose SSD baseline performance, I/O credits, - // and bursting, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. For more + // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. Iops *int64 `type:"integer"` // The ARN of the symmetric AWS Key Management Service (AWS KMS) CMK used for @@ -82903,6 +87187,15 @@ func (s *LaunchTemplateInstanceMetadataOptionsRequest) SetHttpTokens(v string) * type LaunchTemplateInstanceNetworkInterfaceSpecification struct { _ struct{} `type:"structure"` + // Indicates whether to associate a Carrier IP address with eth0 for a new network + // interface. + // + // Use this option when you launch an instance in a Wavelength Zone and want + // to associate a Carrier IP address with the network interface. For more information + // about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) + // in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `locationName:"associateCarrierIpAddress" type:"boolean"` + // Indicates whether to associate a public IPv4 address with eth0 for a new // network interface. AssociatePublicIpAddress *bool `locationName:"associatePublicIpAddress" type:"boolean"` @@ -82954,6 +87247,12 @@ func (s LaunchTemplateInstanceNetworkInterfaceSpecification) GoString() string { return s.String() } +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetAssociateCarrierIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { + s.AssociateCarrierIpAddress = &v + return s +} + // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecification { s.AssociatePublicIpAddress = &v @@ -83036,6 +87335,14 @@ func (s *LaunchTemplateInstanceNetworkInterfaceSpecification) SetSubnetId(v stri type LaunchTemplateInstanceNetworkInterfaceSpecificationRequest struct { _ struct{} `type:"structure"` + // Associates a Carrier IP address with eth0 for a new network interface. + // + // Use this option when you launch an instance in a Wavelength Zone and want + // to associate a Carrier IP address with the network interface. For more information + // about Carrier IP addresses, see Carrier IP addresses (https://docs.aws.amazon.com/wavelength/latest/developerguide/how-wavelengths-work.html#provider-owned-ip) + // in the AWS Wavelength Developer Guide. + AssociateCarrierIpAddress *bool `type:"boolean"` + // Associates a public IPv4 address with eth0 for a new network interface. AssociatePublicIpAddress *bool `type:"boolean"` @@ -83095,6 +87402,12 @@ func (s LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) GoString() s return s.String() } +// SetAssociateCarrierIpAddress sets the AssociateCarrierIpAddress field's value. +func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociateCarrierIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { + s.AssociateCarrierIpAddress = &v + return s +} + // SetAssociatePublicIpAddress sets the AssociatePublicIpAddress field's value. func (s *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest) SetAssociatePublicIpAddress(v bool) *LaunchTemplateInstanceNetworkInterfaceSpecificationRequest { s.AssociatePublicIpAddress = &v @@ -84067,7 +88380,7 @@ type LocalGateway struct { // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string `locationName:"outpostArn" type:"string"` - // The ID of the AWS account ID that owns the local gateway. + // The AWS account ID that owns the local gateway. OwnerId *string `locationName:"ownerId" type:"string"` // The state of the local gateway. @@ -84124,12 +88437,18 @@ type LocalGatewayRoute struct { // The CIDR block used for destination matches. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` // The ID of the virtual interface group. LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + // The AWS account ID that owns the local gateway route. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the route. State *string `locationName:"state" type:"string" enum:"LocalGatewayRouteState"` @@ -84153,6 +88472,12 @@ func (s *LocalGatewayRoute) SetDestinationCidrBlock(v string) *LocalGatewayRoute return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRoute) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRoute { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRoute) SetLocalGatewayRouteTableId(v string) *LocalGatewayRoute { s.LocalGatewayRouteTableId = &v @@ -84165,6 +88490,12 @@ func (s *LocalGatewayRoute) SetLocalGatewayVirtualInterfaceGroupId(v string) *Lo return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRoute) SetOwnerId(v string) *LocalGatewayRoute { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRoute) SetState(v string) *LocalGatewayRoute { s.State = &v @@ -84184,12 +88515,18 @@ type LocalGatewayRouteTable struct { // The ID of the local gateway. LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` // The Amazon Resource Name (ARN) of the Outpost. OutpostArn *string `locationName:"outpostArn" type:"string"` + // The AWS account ID that owns the local gateway route table. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the local gateway route table. State *string `locationName:"state" type:"string"` @@ -84213,6 +88550,12 @@ func (s *LocalGatewayRouteTable) SetLocalGatewayId(v string) *LocalGatewayRouteT return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTable) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTable { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRouteTable) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTable { s.LocalGatewayRouteTableId = &v @@ -84225,6 +88568,12 @@ func (s *LocalGatewayRouteTable) SetOutpostArn(v string) *LocalGatewayRouteTable return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTable) SetOwnerId(v string) *LocalGatewayRouteTable { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRouteTable) SetState(v string) *LocalGatewayRouteTable { s.State = &v @@ -84245,6 +88594,10 @@ type LocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { // The ID of the local gateway. LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table for the virtual + // interface group. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` @@ -84254,6 +88607,9 @@ type LocalGatewayRouteTableVirtualInterfaceGroupAssociation struct { // The ID of the virtual interface group. LocalGatewayVirtualInterfaceGroupId *string `locationName:"localGatewayVirtualInterfaceGroupId" type:"string"` + // The AWS account ID that owns the local gateway virtual interface group association. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the association. State *string `locationName:"state" type:"string"` @@ -84277,6 +88633,12 @@ func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGateway return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { s.LocalGatewayRouteTableId = &v @@ -84295,6 +88657,12 @@ func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetLocalGateway return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetOwnerId(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRouteTableVirtualInterfaceGroupAssociation) SetState(v string) *LocalGatewayRouteTableVirtualInterfaceGroupAssociation { s.State = &v @@ -84314,12 +88682,18 @@ type LocalGatewayRouteTableVpcAssociation struct { // The ID of the local gateway. LocalGatewayId *string `locationName:"localGatewayId" type:"string"` + // The Amazon Resource Name (ARN) of the local gateway route table for the association. + LocalGatewayRouteTableArn *string `locationName:"localGatewayRouteTableArn" min:"1" type:"string"` + // The ID of the local gateway route table. LocalGatewayRouteTableId *string `locationName:"localGatewayRouteTableId" type:"string"` // The ID of the association. LocalGatewayRouteTableVpcAssociationId *string `locationName:"localGatewayRouteTableVpcAssociationId" type:"string"` + // The AWS account ID that owns the local gateway route table for the association. + OwnerId *string `locationName:"ownerId" type:"string"` + // The state of the association. State *string `locationName:"state" type:"string"` @@ -84346,6 +88720,12 @@ func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayId(v string) *Loca return s } +// SetLocalGatewayRouteTableArn sets the LocalGatewayRouteTableArn field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableArn(v string) *LocalGatewayRouteTableVpcAssociation { + s.LocalGatewayRouteTableArn = &v + return s +} + // SetLocalGatewayRouteTableId sets the LocalGatewayRouteTableId field's value. func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableId(v string) *LocalGatewayRouteTableVpcAssociation { s.LocalGatewayRouteTableId = &v @@ -84358,6 +88738,12 @@ func (s *LocalGatewayRouteTableVpcAssociation) SetLocalGatewayRouteTableVpcAssoc return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayRouteTableVpcAssociation) SetOwnerId(v string) *LocalGatewayRouteTableVpcAssociation { + s.OwnerId = &v + return s +} + // SetState sets the State field's value. func (s *LocalGatewayRouteTableVpcAssociation) SetState(v string) *LocalGatewayRouteTableVpcAssociation { s.State = &v @@ -84393,6 +88779,9 @@ type LocalGatewayVirtualInterface struct { // The ID of the virtual interface. LocalGatewayVirtualInterfaceId *string `locationName:"localGatewayVirtualInterfaceId" type:"string"` + // The AWS account ID that owns the local gateway virtual interface. + OwnerId *string `locationName:"ownerId" type:"string"` + // The peer address. PeerAddress *string `locationName:"peerAddress" type:"string"` @@ -84440,6 +88829,12 @@ func (s *LocalGatewayVirtualInterface) SetLocalGatewayVirtualInterfaceId(v strin return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayVirtualInterface) SetOwnerId(v string) *LocalGatewayVirtualInterface { + s.OwnerId = &v + return s +} + // SetPeerAddress sets the PeerAddress field's value. func (s *LocalGatewayVirtualInterface) SetPeerAddress(v string) *LocalGatewayVirtualInterface { s.PeerAddress = &v @@ -84477,6 +88872,9 @@ type LocalGatewayVirtualInterfaceGroup struct { // The IDs of the virtual interfaces. LocalGatewayVirtualInterfaceIds []*string `locationName:"localGatewayVirtualInterfaceIdSet" locationNameList:"item" type:"list"` + // The AWS account ID that owns the local gateway virtual interface group. + OwnerId *string `locationName:"ownerId" type:"string"` + // The tags assigned to the virtual interface group. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` } @@ -84509,12 +88907,123 @@ func (s *LocalGatewayVirtualInterfaceGroup) SetLocalGatewayVirtualInterfaceIds(v return s } +// SetOwnerId sets the OwnerId field's value. +func (s *LocalGatewayVirtualInterfaceGroup) SetOwnerId(v string) *LocalGatewayVirtualInterfaceGroup { + s.OwnerId = &v + return s +} + // SetTags sets the Tags field's value. func (s *LocalGatewayVirtualInterfaceGroup) SetTags(v []*Tag) *LocalGatewayVirtualInterfaceGroup { s.Tags = v return s } +// Describes a managed prefix list. +type ManagedPrefixList struct { + _ struct{} `type:"structure"` + + // The IP address version. + AddressFamily *string `locationName:"addressFamily" type:"string"` + + // The maximum number of entries for the prefix list. + MaxEntries *int64 `locationName:"maxEntries" type:"integer"` + + // The ID of the owner of the prefix list. + OwnerId *string `locationName:"ownerId" type:"string"` + + // The Amazon Resource Name (ARN) for the prefix list. + PrefixListArn *string `locationName:"prefixListArn" min:"1" type:"string"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The name of the prefix list. + PrefixListName *string `locationName:"prefixListName" type:"string"` + + // The state of the prefix list. + State *string `locationName:"state" type:"string" enum:"PrefixListState"` + + // The state message. + StateMessage *string `locationName:"stateMessage" type:"string"` + + // The tags for the prefix list. + Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` + + // The version of the prefix list. + Version *int64 `locationName:"version" type:"long"` +} + +// String returns the string representation +func (s ManagedPrefixList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPrefixList) GoString() string { + return s.String() +} + +// SetAddressFamily sets the AddressFamily field's value. +func (s *ManagedPrefixList) SetAddressFamily(v string) *ManagedPrefixList { + s.AddressFamily = &v + return s +} + +// SetMaxEntries sets the MaxEntries field's value. +func (s *ManagedPrefixList) SetMaxEntries(v int64) *ManagedPrefixList { + s.MaxEntries = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *ManagedPrefixList) SetOwnerId(v string) *ManagedPrefixList { + s.OwnerId = &v + return s +} + +// SetPrefixListArn sets the PrefixListArn field's value. +func (s *ManagedPrefixList) SetPrefixListArn(v string) *ManagedPrefixList { + s.PrefixListArn = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ManagedPrefixList) SetPrefixListId(v string) *ManagedPrefixList { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ManagedPrefixList) SetPrefixListName(v string) *ManagedPrefixList { + s.PrefixListName = &v + return s +} + +// SetState sets the State field's value. +func (s *ManagedPrefixList) SetState(v string) *ManagedPrefixList { + s.State = &v + return s +} + +// SetStateMessage sets the StateMessage field's value. +func (s *ManagedPrefixList) SetStateMessage(v string) *ManagedPrefixList { + s.StateMessage = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ManagedPrefixList) SetTags(v []*Tag) *ManagedPrefixList { + s.Tags = v + return s +} + +// SetVersion sets the Version field's value. +func (s *ManagedPrefixList) SetVersion(v int64) *ManagedPrefixList { + s.Version = &v + return s +} + // Describes the memory for the instance type. type MemoryInfo struct { _ struct{} `type:"structure"` @@ -84548,14 +89057,16 @@ type ModifyAvailabilityZoneGroupInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // The name of the Availability Zone Group. + // The name of the Availability Zone group, Local Zone group, or Wavelength + // Zone group. // // GroupName is a required field GroupName *string `type:"string" required:"true"` - // Indicates whether to enable or disable membership. The valid values are opted-in. - // You must contact AWS Support (https://console.aws.amazon.com/support/home#/case/create%3FissueType=customer-service%26serviceCode=general-info%26getting-started%26categoryCode=using-aws%26services) - // to disable an Availability Zone group. + // Indicates whether you are opted in to the Local Zone group or Wavelength + // Zone group. The only valid value is opted-in. You must contact AWS Support + // (https://console.aws.amazon.com/support/home#/case/create%3FissueType=customer-service%26serviceCode=general-info%26getting-started%26categoryCode=using-aws%26services) + // to opt out of a Local Zone group, or Wavelength Zone group. // // OptInStatus is a required field OptInStatus *string `type:"string" required:"true" enum:"ModifyAvailabilityZoneOptInStatus"` @@ -85111,6 +89622,9 @@ type ModifyFleetInput struct { // FleetId is a required field FleetId *string `type:"string" required:"true"` + // The launch template and overrides. + LaunchTemplateConfigs []*FleetLaunchTemplateConfigRequest `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` + // The size of the EC2 Fleet. // // TargetCapacitySpecification is a required field @@ -85136,6 +89650,16 @@ func (s *ModifyFleetInput) Validate() error { if s.TargetCapacitySpecification == nil { invalidParams.Add(request.NewErrParamRequired("TargetCapacitySpecification")) } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } if s.TargetCapacitySpecification != nil { if err := s.TargetCapacitySpecification.Validate(); err != nil { invalidParams.AddNested("TargetCapacitySpecification", err.(request.ErrInvalidParams)) @@ -85166,6 +89690,12 @@ func (s *ModifyFleetInput) SetFleetId(v string) *ModifyFleetInput { return s } +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *ModifyFleetInput) SetLaunchTemplateConfigs(v []*FleetLaunchTemplateConfigRequest) *ModifyFleetInput { + s.LaunchTemplateConfigs = v + return s +} + // SetTargetCapacitySpecification sets the TargetCapacitySpecification field's value. func (s *ModifyFleetInput) SetTargetCapacitySpecification(v *TargetCapacitySpecificationRequest) *ModifyFleetInput { s.TargetCapacitySpecification = v @@ -85786,7 +90316,7 @@ type ModifyInstanceAttributeInput struct { // // To add instance store volumes to an Amazon EBS-backed instance, you must // add them when you launch the instance. For more information, see Updating - // the Block Device Mapping when Launching an Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) + // the block device mapping when launching an instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html#Using_OverridingAMIBDM) // in the Amazon Elastic Compute Cloud User Guide. BlockDeviceMappings []*InstanceBlockDeviceMappingSpecification `locationName:"blockDeviceMapping" locationNameList:"item" type:"list"` @@ -85829,7 +90359,7 @@ type ModifyInstanceAttributeInput struct { InstanceInitiatedShutdownBehavior *AttributeValue `locationName:"instanceInitiatedShutdownBehavior" type:"structure"` // Changes the instance type to the specified value. For more information, see - // Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + // Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). // If the instance type is not valid, the error returned is InvalidInstanceAttributeValue. InstanceType *AttributeValue `locationName:"instanceType" type:"structure"` @@ -86637,6 +91167,135 @@ func (s *ModifyLaunchTemplateOutput) SetLaunchTemplate(v *LaunchTemplate) *Modif return s } +type ModifyManagedPrefixListInput struct { + _ struct{} `type:"structure"` + + // One or more entries to add to the prefix list. + AddEntries []*AddPrefixListEntry `locationName:"AddEntry" type:"list"` + + // The current version of the prefix list. + CurrentVersion *int64 `type:"long"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // A name for the prefix list. + PrefixListName *string `type:"string"` + + // One or more entries to remove from the prefix list. + RemoveEntries []*RemovePrefixListEntry `locationName:"RemoveEntry" type:"list"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyManagedPrefixListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyManagedPrefixListInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.AddEntries != nil { + for i, v := range s.AddEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddEntries", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RemoveEntries != nil { + for i, v := range s.RemoveEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RemoveEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddEntries sets the AddEntries field's value. +func (s *ModifyManagedPrefixListInput) SetAddEntries(v []*AddPrefixListEntry) *ModifyManagedPrefixListInput { + s.AddEntries = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *ModifyManagedPrefixListInput) SetCurrentVersion(v int64) *ModifyManagedPrefixListInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyManagedPrefixListInput) SetDryRun(v bool) *ModifyManagedPrefixListInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListId(v string) *ModifyManagedPrefixListInput { + s.PrefixListId = &v + return s +} + +// SetPrefixListName sets the PrefixListName field's value. +func (s *ModifyManagedPrefixListInput) SetPrefixListName(v string) *ModifyManagedPrefixListInput { + s.PrefixListName = &v + return s +} + +// SetRemoveEntries sets the RemoveEntries field's value. +func (s *ModifyManagedPrefixListInput) SetRemoveEntries(v []*RemovePrefixListEntry) *ModifyManagedPrefixListInput { + s.RemoveEntries = v + return s +} + +type ModifyManagedPrefixListOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s ModifyManagedPrefixListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyManagedPrefixListOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *ModifyManagedPrefixListOutput) SetPrefixList(v *ManagedPrefixList) *ModifyManagedPrefixListOutput { + s.PrefixList = v + return s +} + // Contains the parameters for ModifyNetworkInterfaceAttribute. type ModifyNetworkInterfaceAttributeInput struct { _ struct{} `type:"structure"` @@ -86952,6 +91611,12 @@ type ModifySpotFleetRequestInput struct { // the Spot Fleet. ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + // The launch template and overrides. You can only use this parameter if you + // specified a launch template (LaunchTemplateConfigs) in your Spot Fleet request. + // If you specified LaunchSpecifications in your Spot Fleet request, then omit + // this parameter. + LaunchTemplateConfigs []*LaunchTemplateConfig `locationName:"LaunchTemplateConfig" locationNameList:"item" type:"list"` + // The number of On-Demand Instances in the fleet. OnDemandTargetCapacity *int64 `type:"integer"` @@ -86980,6 +91645,16 @@ func (s *ModifySpotFleetRequestInput) Validate() error { if s.SpotFleetRequestId == nil { invalidParams.Add(request.NewErrParamRequired("SpotFleetRequestId")) } + if s.LaunchTemplateConfigs != nil { + for i, v := range s.LaunchTemplateConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LaunchTemplateConfigs", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -86993,6 +91668,12 @@ func (s *ModifySpotFleetRequestInput) SetExcessCapacityTerminationPolicy(v strin return s } +// SetLaunchTemplateConfigs sets the LaunchTemplateConfigs field's value. +func (s *ModifySpotFleetRequestInput) SetLaunchTemplateConfigs(v []*LaunchTemplateConfig) *ModifySpotFleetRequestInput { + s.LaunchTemplateConfigs = v + return s +} + // SetOnDemandTargetCapacity sets the OnDemandTargetCapacity field's value. func (s *ModifySpotFleetRequestInput) SetOnDemandTargetCapacity(v int64) *ModifySpotFleetRequestInput { s.OnDemandTargetCapacity = &v @@ -87048,8 +91729,20 @@ type ModifySubnetAttributeInput struct { // or later of the Amazon EC2 API. AssignIpv6AddressOnCreation *AttributeBooleanValue `type:"structure"` - // Specify true to indicate that ENIs attached to instances created in the specified - // subnet should be assigned a public IPv4 address. + // The customer-owned IPv4 address pool associated with the subnet. + // + // You must set this value when you specify true for MapCustomerOwnedIpOnLaunch. + CustomerOwnedIpv4Pool *string `type:"string"` + + // Specify true to indicate that network interfaces attached to instances created + // in the specified subnet should be assigned a customer-owned IPv4 address. + // + // When this value is true, you must specify the customer-owned IP pool using + // CustomerOwnedIpv4Pool. + MapCustomerOwnedIpOnLaunch *AttributeBooleanValue `type:"structure"` + + // Specify true to indicate that network interfaces attached to instances created + // in the specified subnet should be assigned a public IPv4 address. MapPublicIpOnLaunch *AttributeBooleanValue `type:"structure"` // The ID of the subnet. @@ -87087,6 +91780,18 @@ func (s *ModifySubnetAttributeInput) SetAssignIpv6AddressOnCreation(v *Attribute return s } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *ModifySubnetAttributeInput) SetCustomerOwnedIpv4Pool(v string) *ModifySubnetAttributeInput { + s.CustomerOwnedIpv4Pool = &v + return s +} + +// SetMapCustomerOwnedIpOnLaunch sets the MapCustomerOwnedIpOnLaunch field's value. +func (s *ModifySubnetAttributeInput) SetMapCustomerOwnedIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput { + s.MapCustomerOwnedIpOnLaunch = v + return s +} + // SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value. func (s *ModifySubnetAttributeInput) SetMapPublicIpOnLaunch(v *AttributeBooleanValue) *ModifySubnetAttributeInput { s.MapPublicIpOnLaunch = v @@ -87520,6 +92225,282 @@ func (s *ModifyTrafficMirrorSessionOutput) SetTrafficMirrorSession(v *TrafficMir return s } +type ModifyTransitGatewayInput struct { + _ struct{} `type:"structure"` + + // The description for the transit gateway. + Description *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The options to modify. + Options *ModifyTransitGatewayOptions `type:"structure"` + + // The ID of the transit gateway. + // + // TransitGatewayId is a required field + TransitGatewayId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayInput"} + if s.TransitGatewayId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *ModifyTransitGatewayInput) SetDescription(v string) *ModifyTransitGatewayInput { + s.Description = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayInput) SetDryRun(v bool) *ModifyTransitGatewayInput { + s.DryRun = &v + return s +} + +// SetOptions sets the Options field's value. +func (s *ModifyTransitGatewayInput) SetOptions(v *ModifyTransitGatewayOptions) *ModifyTransitGatewayInput { + s.Options = v + return s +} + +// SetTransitGatewayId sets the TransitGatewayId field's value. +func (s *ModifyTransitGatewayInput) SetTransitGatewayId(v string) *ModifyTransitGatewayInput { + s.TransitGatewayId = &v + return s +} + +// The transit gateway options. +type ModifyTransitGatewayOptions struct { + _ struct{} `type:"structure"` + + // The ID of the default association route table. + AssociationDefaultRouteTableId *string `type:"string"` + + // Enable or disable automatic acceptance of attachment requests. + AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` + + // Enable or disable automatic association with the default association route + // table. + DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` + + // Enable or disable automatic propagation of routes to the default propagation + // route table. + DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` + + // Enable or disable DNS support. + DnsSupport *string `type:"string" enum:"DnsSupportValue"` + + // The ID of the default propagation route table. + PropagationDefaultRouteTableId *string `type:"string"` + + // Enable or disable Equal Cost Multipath Protocol support. + VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` +} + +// String returns the string representation +func (s ModifyTransitGatewayOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayOptions) GoString() string { + return s.String() +} + +// SetAssociationDefaultRouteTableId sets the AssociationDefaultRouteTableId field's value. +func (s *ModifyTransitGatewayOptions) SetAssociationDefaultRouteTableId(v string) *ModifyTransitGatewayOptions { + s.AssociationDefaultRouteTableId = &v + return s +} + +// SetAutoAcceptSharedAttachments sets the AutoAcceptSharedAttachments field's value. +func (s *ModifyTransitGatewayOptions) SetAutoAcceptSharedAttachments(v string) *ModifyTransitGatewayOptions { + s.AutoAcceptSharedAttachments = &v + return s +} + +// SetDefaultRouteTableAssociation sets the DefaultRouteTableAssociation field's value. +func (s *ModifyTransitGatewayOptions) SetDefaultRouteTableAssociation(v string) *ModifyTransitGatewayOptions { + s.DefaultRouteTableAssociation = &v + return s +} + +// SetDefaultRouteTablePropagation sets the DefaultRouteTablePropagation field's value. +func (s *ModifyTransitGatewayOptions) SetDefaultRouteTablePropagation(v string) *ModifyTransitGatewayOptions { + s.DefaultRouteTablePropagation = &v + return s +} + +// SetDnsSupport sets the DnsSupport field's value. +func (s *ModifyTransitGatewayOptions) SetDnsSupport(v string) *ModifyTransitGatewayOptions { + s.DnsSupport = &v + return s +} + +// SetPropagationDefaultRouteTableId sets the PropagationDefaultRouteTableId field's value. +func (s *ModifyTransitGatewayOptions) SetPropagationDefaultRouteTableId(v string) *ModifyTransitGatewayOptions { + s.PropagationDefaultRouteTableId = &v + return s +} + +// SetVpnEcmpSupport sets the VpnEcmpSupport field's value. +func (s *ModifyTransitGatewayOptions) SetVpnEcmpSupport(v string) *ModifyTransitGatewayOptions { + s.VpnEcmpSupport = &v + return s +} + +type ModifyTransitGatewayOutput struct { + _ struct{} `type:"structure"` + + // Describes a transit gateway. + TransitGateway *TransitGateway `locationName:"transitGateway" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayOutput) GoString() string { + return s.String() +} + +// SetTransitGateway sets the TransitGateway field's value. +func (s *ModifyTransitGatewayOutput) SetTransitGateway(v *TransitGateway) *ModifyTransitGatewayOutput { + s.TransitGateway = v + return s +} + +type ModifyTransitGatewayPrefixListReferenceInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to drop traffic that matches this route. + Blackhole *bool `type:"boolean"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The ID of the attachment to which traffic is routed. + TransitGatewayAttachmentId *string `type:"string"` + + // The ID of the transit gateway route table. + // + // TransitGatewayRouteTableId is a required field + TransitGatewayRouteTableId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyTransitGatewayPrefixListReferenceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyTransitGatewayPrefixListReferenceInput"} + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.TransitGatewayRouteTableId == nil { + invalidParams.Add(request.NewErrParamRequired("TransitGatewayRouteTableId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBlackhole sets the Blackhole field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetBlackhole(v bool) *ModifyTransitGatewayPrefixListReferenceInput { + s.Blackhole = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetDryRun(v bool) *ModifyTransitGatewayPrefixListReferenceInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetPrefixListId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.PrefixListId = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetTransitGatewayAttachmentId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.TransitGatewayAttachmentId = &v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *ModifyTransitGatewayPrefixListReferenceInput) SetTransitGatewayRouteTableId(v string) *ModifyTransitGatewayPrefixListReferenceInput { + s.TransitGatewayRouteTableId = &v + return s +} + +type ModifyTransitGatewayPrefixListReferenceOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list reference. + TransitGatewayPrefixListReference *TransitGatewayPrefixListReference `locationName:"transitGatewayPrefixListReference" type:"structure"` +} + +// String returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyTransitGatewayPrefixListReferenceOutput) GoString() string { + return s.String() +} + +// SetTransitGatewayPrefixListReference sets the TransitGatewayPrefixListReference field's value. +func (s *ModifyTransitGatewayPrefixListReferenceOutput) SetTransitGatewayPrefixListReference(v *TransitGatewayPrefixListReference) *ModifyTransitGatewayPrefixListReferenceOutput { + s.TransitGatewayPrefixListReference = v + return s +} + type ModifyTransitGatewayVpcAttachmentInput struct { _ struct{} `type:"structure"` @@ -87740,8 +92721,8 @@ type ModifyVolumeInput struct { // The target IOPS rate of the volume. // - // This is only valid for Provisioned IOPS SSD (io1) volumes. For more information, - // see Provisioned IOPS SSD (io1) Volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). + // This is only valid for Provisioned IOPS SSD (io1 and io2) volumes. For moreinformation, + // see Provisioned IOPS SSD (io1 and io2) volumes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html#EBSVolumeTypes_piops). // // Default: If no IOPS value is specified, the existing value is retained. Iops *int64 `type:"integer"` @@ -88653,6 +93634,123 @@ func (s *ModifyVpnConnectionInput) SetVpnGatewayId(v string) *ModifyVpnConnectio return s } +type ModifyVpnConnectionOptionsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: 0.0.0.0/0 + LocalIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: ::/0 + LocalIpv6NetworkCidr *string `type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // + // Default: 0.0.0.0/0 + RemoteIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // + // Default: ::/0 + RemoteIpv6NetworkCidr *string `type:"string"` + + // The ID of the Site-to-Site VPN connection. + // + // VpnConnectionId is a required field + VpnConnectionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyVpnConnectionOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyVpnConnectionOptionsInput"} + if s.VpnConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("VpnConnectionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDryRun sets the DryRun field's value. +func (s *ModifyVpnConnectionOptionsInput) SetDryRun(v bool) *ModifyVpnConnectionOptionsInput { + s.DryRun = &v + return s +} + +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetLocalIpv4NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetLocalIpv6NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetRemoteIpv4NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *ModifyVpnConnectionOptionsInput) SetRemoteIpv6NetworkCidr(v string) *ModifyVpnConnectionOptionsInput { + s.RemoteIpv6NetworkCidr = &v + return s +} + +// SetVpnConnectionId sets the VpnConnectionId field's value. +func (s *ModifyVpnConnectionOptionsInput) SetVpnConnectionId(v string) *ModifyVpnConnectionOptionsInput { + s.VpnConnectionId = &v + return s +} + +type ModifyVpnConnectionOptionsOutput struct { + _ struct{} `type:"structure"` + + // Describes a VPN connection. + VpnConnection *VpnConnection `locationName:"vpnConnection" type:"structure"` +} + +// String returns the string representation +func (s ModifyVpnConnectionOptionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyVpnConnectionOptionsOutput) GoString() string { + return s.String() +} + +// SetVpnConnection sets the VpnConnection field's value. +func (s *ModifyVpnConnectionOptionsOutput) SetVpnConnection(v *VpnConnection) *ModifyVpnConnectionOptionsOutput { + s.VpnConnection = v + return s +} + type ModifyVpnConnectionOutput struct { _ struct{} `type:"structure"` @@ -88868,6 +93966,14 @@ func (s *ModifyVpnTunnelOptionsOutput) SetVpnConnection(v *VpnConnection) *Modif type ModifyVpnTunnelOptionsSpecification struct { _ struct{} `type:"structure"` + // The action to take after DPD timeout occurs. Specify restart to restart the + // IKE initiation. Specify clear to end the IKE session. + // + // Valid Values: clear | none | restart + // + // Default: clear + DPDTimeoutAction *string `type:"string"` + // The number of seconds after which a DPD timeout occurs. // // Constraints: A value between 0 and 30. @@ -88883,19 +93989,19 @@ type ModifyVpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 1 IKE negotiations. // - // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 1 of the IKE negotiation, in seconds. @@ -88908,19 +94014,19 @@ type ModifyVpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 2 IKE negotiations. // - // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 2 of the IKE negotiation, in seconds. @@ -88963,7 +94069,16 @@ type ModifyVpnTunnelOptionsSpecification struct { // Default: 1024 ReplayWindowSize *int64 `type:"integer"` - // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // The action to take when the establishing the tunnel for the VPN connection. + // By default, your customer gateway device must initiate the IKE negotiation + // and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. + // + // Valid Values: add | start + // + // Default: add + StartupAction *string `type:"string"` + + // The range of inside IPv4 addresses for the tunnel. Any specified CIDR blocks // must be unique across all VPN connections that use the same virtual private // gateway. // @@ -88984,6 +94099,12 @@ type ModifyVpnTunnelOptionsSpecification struct { // // * 169.254.169.252/30 TunnelInsideCidr *string `type:"string"` + + // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same transit gateway. + // + // Constraints: A size /126 CIDR block from the local fd00::/8 range. + TunnelInsideIpv6Cidr *string `type:"string"` } // String returns the string representation @@ -88996,6 +94117,12 @@ func (s ModifyVpnTunnelOptionsSpecification) GoString() string { return s.String() } +// SetDPDTimeoutAction sets the DPDTimeoutAction field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutAction(v string) *ModifyVpnTunnelOptionsSpecification { + s.DPDTimeoutAction = &v + return s +} + // SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. func (s *ModifyVpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *ModifyVpnTunnelOptionsSpecification { s.DPDTimeoutSeconds = &v @@ -89080,12 +94207,24 @@ func (s *ModifyVpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *Modi return s } +// SetStartupAction sets the StartupAction field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetStartupAction(v string) *ModifyVpnTunnelOptionsSpecification { + s.StartupAction = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *ModifyVpnTunnelOptionsSpecification { s.TunnelInsideCidr = &v return s } +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *ModifyVpnTunnelOptionsSpecification) SetTunnelInsideIpv6Cidr(v string) *ModifyVpnTunnelOptionsSpecification { + s.TunnelInsideIpv6Cidr = &v + return s +} + type MonitorInstancesInput struct { _ struct{} `type:"structure"` @@ -89719,6 +94858,9 @@ func (s *NetworkAclEntry) SetRuleNumber(v int64) *NetworkAclEntry { type NetworkInfo struct { _ struct{} `type:"structure"` + // Indicates whether Elastic Fabric Adapter (EFA) is supported. + EfaSupported *bool `locationName:"efaSupported" type:"boolean"` + // Indicates whether Elastic Network Adapter (ENA) is supported. EnaSupport *string `locationName:"enaSupport" type:"string" enum:"EnaSupport"` @@ -89748,6 +94890,12 @@ func (s NetworkInfo) GoString() string { return s.String() } +// SetEfaSupported sets the EfaSupported field's value. +func (s *NetworkInfo) SetEfaSupported(v bool) *NetworkInfo { + s.EfaSupported = &v + return s +} + // SetEnaSupport sets the EnaSupport field's value. func (s *NetworkInfo) SetEnaSupport(v string) *NetworkInfo { s.EnaSupport = &v @@ -89990,7 +95138,9 @@ func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { return s } -// Describes association information for an Elastic IP address (IPv4 only). +// Describes association information for an Elastic IP address (IPv4 only), +// or a Carrier IP address (for a network interface which resides in a subnet +// in a Wavelength Zone). type NetworkInterfaceAssociation struct { _ struct{} `type:"structure"` @@ -90000,13 +95150,23 @@ type NetworkInterfaceAssociation struct { // The association ID. AssociationId *string `locationName:"associationId" type:"string"` + // The carrier IP address associated with the network interface. + // + // This option is only available when the network interface is in a subnet which + // is associated with a Wavelength Zone. + CarrierIp *string `locationName:"carrierIp" type:"string"` + + // The customer-owned IP address associated with the network interface. + CustomerOwnedIp *string `locationName:"customerOwnedIp" type:"string"` + // The ID of the Elastic IP address owner. IpOwnerId *string `locationName:"ipOwnerId" type:"string"` // The public DNS name. PublicDnsName *string `locationName:"publicDnsName" type:"string"` - // The address of the Elastic IP address bound to the network interface. + // The address of the Elastic IP address or Carrier IP address bound to the + // network interface. PublicIp *string `locationName:"publicIp" type:"string"` } @@ -90032,6 +95192,18 @@ func (s *NetworkInterfaceAssociation) SetAssociationId(v string) *NetworkInterfa return s } +// SetCarrierIp sets the CarrierIp field's value. +func (s *NetworkInterfaceAssociation) SetCarrierIp(v string) *NetworkInterfaceAssociation { + s.CarrierIp = &v + return s +} + +// SetCustomerOwnedIp sets the CustomerOwnedIp field's value. +func (s *NetworkInterfaceAssociation) SetCustomerOwnedIp(v string) *NetworkInterfaceAssociation { + s.CustomerOwnedIp = &v + return s +} + // SetIpOwnerId sets the IpOwnerId field's value. func (s *NetworkInterfaceAssociation) SetIpOwnerId(v string) *NetworkInterfaceAssociation { s.IpOwnerId = &v @@ -91040,9 +96212,10 @@ type Placement struct { _ struct{} `type:"structure"` // The affinity setting for the instance on the Dedicated Host. This parameter - // is not supported for the ImportInstance command. + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). Affinity *string `locationName:"affinity" type:"string"` // The Availability Zone of the instance. @@ -91050,41 +96223,43 @@ type Placement struct { // If not specified, an Availability Zone will be automatically chosen for you // based on the load balancing criteria for the Region. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The name of the placement group the instance is in. GroupName *string `locationName:"groupName" type:"string"` // The ID of the Dedicated Host on which the instance resides. This parameter - // is not supported for the ImportInstance command. + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). HostId *string `locationName:"hostId" type:"string"` // The ARN of the host resource group in which to launch the instances. If you // specify a host resource group ARN, omit the Tenancy parameter or set it to // host. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). HostResourceGroupArn *string `locationName:"hostResourceGroupArn" type:"string"` // The number of the partition the instance is in. Valid only if the placement // group strategy is set to partition. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). PartitionNumber *int64 `locationName:"partitionNumber" type:"integer"` // Reserved for future use. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). SpreadDomain *string `locationName:"spreadDomain" type:"string"` // The tenancy of the instance (if the instance is running in a VPC). An instance // with a tenancy of dedicated runs on single-tenant hardware. The host tenancy - // is not supported for the ImportInstance command. + // is not supported for the ImportInstance (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ImportInstance.html) + // command. // - // This parameter is not supported by . + // This parameter is not supported by CreateFleet (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateFleet). Tenancy *string `locationName:"tenancy" type:"string" enum:"Tenancy"` } @@ -91362,6 +96537,72 @@ func (s *PrefixList) SetPrefixListName(v string) *PrefixList { return s } +// Describes the resource with which a prefix list is associated. +type PrefixListAssociation struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The owner of the resource. + ResourceOwner *string `locationName:"resourceOwner" type:"string"` +} + +// String returns the string representation +func (s PrefixListAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListAssociation) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *PrefixListAssociation) SetResourceId(v string) *PrefixListAssociation { + s.ResourceId = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *PrefixListAssociation) SetResourceOwner(v string) *PrefixListAssociation { + s.ResourceOwner = &v + return s +} + +// Describes a prefix list entry. +type PrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + Cidr *string `locationName:"cidr" type:"string"` + + // The description. + Description *string `locationName:"description" type:"string"` +} + +// String returns the string representation +func (s PrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PrefixListEntry) GoString() string { + return s.String() +} + +// SetCidr sets the Cidr field's value. +func (s *PrefixListEntry) SetCidr(v string) *PrefixListEntry { + s.Cidr = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *PrefixListEntry) SetDescription(v string) *PrefixListEntry { + s.Description = &v + return s +} + // Describes a prefix list ID. type PrefixListId struct { _ struct{} `type:"structure"` @@ -91778,6 +97019,9 @@ type ProvisionByoipCidrInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` + // The tags to apply to the address pool. + PoolTagSpecifications []*TagSpecification `locationName:"PoolTagSpecification" locationNameList:"item" type:"list"` + // (IPv6 only) Indicate whether the address range will be publicly advertised // to the internet. // @@ -91837,6 +97081,12 @@ func (s *ProvisionByoipCidrInput) SetDryRun(v bool) *ProvisionByoipCidrInput { return s } +// SetPoolTagSpecifications sets the PoolTagSpecifications field's value. +func (s *ProvisionByoipCidrInput) SetPoolTagSpecifications(v []*TagSpecification) *ProvisionByoipCidrInput { + s.PoolTagSpecifications = v + return s +} + // SetPubliclyAdvertisable sets the PubliclyAdvertisable field's value. func (s *ProvisionByoipCidrInput) SetPubliclyAdvertisable(v bool) *ProvisionByoipCidrInput { s.PubliclyAdvertisable = &v @@ -91945,6 +97195,11 @@ type PublicIpv4Pool struct { // A description of the address pool. Description *string `locationName:"description" type:"string"` + // The name of the location from which the address pool is advertised. A network + // border group is a unique set of Availability Zones or Local Zones from where + // AWS advertises public IP addresses. + NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` + // The address ranges. PoolAddressRanges []*PublicIpv4PoolRange `locationName:"poolAddressRangeSet" locationNameList:"item" type:"list"` @@ -91977,6 +97232,12 @@ func (s *PublicIpv4Pool) SetDescription(v string) *PublicIpv4Pool { return s } +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *PublicIpv4Pool) SetNetworkBorderGroup(v string) *PublicIpv4Pool { + s.NetworkBorderGroup = &v + return s +} + // SetPoolAddressRanges sets the PoolAddressRanges field's value. func (s *PublicIpv4Pool) SetPoolAddressRanges(v []*PublicIpv4PoolRange) *PublicIpv4Pool { s.PoolAddressRanges = v @@ -92175,6 +97436,9 @@ type PurchaseHostReservationInput struct { // // OfferingId is a required field OfferingId *string `type:"string" required:"true"` + + // The tags to apply to the Dedicated Host Reservation during purchase. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -92233,6 +97497,12 @@ func (s *PurchaseHostReservationInput) SetOfferingId(v string) *PurchaseHostRese return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *PurchaseHostReservationInput) SetTagSpecifications(v []*TagSpecification) *PurchaseHostReservationInput { + s.TagSpecifications = v + return s +} + type PurchaseHostReservationOutput struct { _ struct{} `type:"structure"` @@ -92894,6 +98164,101 @@ func (s *RegisterImageOutput) SetImageId(v string) *RegisterImageOutput { return s } +type RegisterInstanceEventNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // Information about the tag keys to register. + InstanceTagAttribute *RegisterInstanceTagAttributeRequest `type:"structure"` +} + +// String returns the string representation +func (s RegisterInstanceEventNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceEventNotificationAttributesInput) GoString() string { + return s.String() +} + +// SetDryRun sets the DryRun field's value. +func (s *RegisterInstanceEventNotificationAttributesInput) SetDryRun(v bool) *RegisterInstanceEventNotificationAttributesInput { + s.DryRun = &v + return s +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *RegisterInstanceEventNotificationAttributesInput) SetInstanceTagAttribute(v *RegisterInstanceTagAttributeRequest) *RegisterInstanceEventNotificationAttributesInput { + s.InstanceTagAttribute = v + return s +} + +type RegisterInstanceEventNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // The resulting set of tag keys. + InstanceTagAttribute *InstanceTagNotificationAttribute `locationName:"instanceTagAttribute" type:"structure"` +} + +// String returns the string representation +func (s RegisterInstanceEventNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceEventNotificationAttributesOutput) GoString() string { + return s.String() +} + +// SetInstanceTagAttribute sets the InstanceTagAttribute field's value. +func (s *RegisterInstanceEventNotificationAttributesOutput) SetInstanceTagAttribute(v *InstanceTagNotificationAttribute) *RegisterInstanceEventNotificationAttributesOutput { + s.InstanceTagAttribute = v + return s +} + +// Information about the tag keys to register for the current Region. You can +// either specify individual tag keys or register all tag keys in the current +// Region. You must specify either IncludeAllTagsOfInstance or InstanceTagKeys +// in the request +type RegisterInstanceTagAttributeRequest struct { + _ struct{} `type:"structure"` + + // Indicates whether to register all tag keys in the current Region. Specify + // true to register all tag keys. + IncludeAllTagsOfInstance *bool `type:"boolean"` + + // The tag keys to register. + InstanceTagKeys []*string `locationName:"InstanceTagKey" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RegisterInstanceTagAttributeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterInstanceTagAttributeRequest) GoString() string { + return s.String() +} + +// SetIncludeAllTagsOfInstance sets the IncludeAllTagsOfInstance field's value. +func (s *RegisterInstanceTagAttributeRequest) SetIncludeAllTagsOfInstance(v bool) *RegisterInstanceTagAttributeRequest { + s.IncludeAllTagsOfInstance = &v + return s +} + +// SetInstanceTagKeys sets the InstanceTagKeys field's value. +func (s *RegisterInstanceTagAttributeRequest) SetInstanceTagKeys(v []*string) *RegisterInstanceTagAttributeRequest { + s.InstanceTagKeys = v + return s +} + type RegisterTransitGatewayMulticastGroupMembersInput struct { _ struct{} `type:"structure"` @@ -93366,7 +98731,8 @@ type ReleaseAddressInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The location that the IP address is released from. + // The set of Availability Zones, Local Zones, or Wavelength Zones from which + // AWS advertises IP addresses. // // If you provide an incorrect network border group, you will receive an InvalidAddress.NotFound // error. For more information, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html). @@ -93499,6 +98865,45 @@ func (s *ReleaseHostsOutput) SetUnsuccessful(v []*UnsuccessfulItem) *ReleaseHost return s } +// An entry for a prefix list. +type RemovePrefixListEntry struct { + _ struct{} `type:"structure"` + + // The CIDR block. + // + // Cidr is a required field + Cidr *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemovePrefixListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemovePrefixListEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemovePrefixListEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemovePrefixListEntry"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidr sets the Cidr field's value. +func (s *RemovePrefixListEntry) SetCidr(v string) *RemovePrefixListEntry { + s.Cidr = &v + return s +} + type ReplaceIamInstanceProfileAssociationInput struct { _ struct{} `type:"structure"` @@ -93831,6 +99236,9 @@ func (s ReplaceNetworkAclEntryOutput) GoString() string { type ReplaceRouteInput struct { _ struct{} `type:"structure"` + // [IPv4 traffic only] The ID of a carrier gateway. + CarrierGatewayId *string `type:"string"` + // The IPv4 CIDR address block used for the destination match. The value that // you provide must match the CIDR of an existing route in the table. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` @@ -93839,6 +99247,9 @@ type ReplaceRouteInput struct { // you provide must match the CIDR of an existing route in the table. DestinationIpv6CidrBlock *string `locationName:"destinationIpv6CidrBlock" type:"string"` + // The ID of the prefix list for the route. + DestinationPrefixListId *string `type:"string"` + // Checks whether you have the required permissions for the action, without // actually making the request, and provides an error response. If you have // the required permissions, the error response is DryRunOperation. Otherwise, @@ -93901,6 +99312,12 @@ func (s *ReplaceRouteInput) Validate() error { return nil } +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *ReplaceRouteInput) SetCarrierGatewayId(v string) *ReplaceRouteInput { + s.CarrierGatewayId = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *ReplaceRouteInput) SetDestinationCidrBlock(v string) *ReplaceRouteInput { s.DestinationCidrBlock = &v @@ -93913,6 +99330,12 @@ func (s *ReplaceRouteInput) SetDestinationIpv6CidrBlock(v string) *ReplaceRouteI return s } +// SetDestinationPrefixListId sets the DestinationPrefixListId field's value. +func (s *ReplaceRouteInput) SetDestinationPrefixListId(v string) *ReplaceRouteInput { + s.DestinationPrefixListId = &v + return s +} + // SetDryRun sets the DryRun field's value. func (s *ReplaceRouteInput) SetDryRun(v bool) *ReplaceRouteInput { s.DryRun = &v @@ -94353,8 +99776,8 @@ type RequestLaunchTemplateData struct { // in the Amazon Elastic Compute Cloud User Guide. CpuOptions *LaunchTemplateCpuOptionsRequest `type:"structure"` - // The credit option for CPU usage of the instance. Valid for T2 or T3 instances - // only. + // The credit option for CPU usage of the instance. Valid for T2, T3, or T3a + // instances only. CreditSpecification *CreditSpecificationRequest `type:"structure"` // If you set this parameter to true, you can't terminate the instance using @@ -94816,6 +100239,12 @@ type RequestSpotInstancesInput struct { // The default is the On-Demand price. SpotPrice *string `locationName:"spotPrice" type:"string"` + // The key-value pair for tagging the Spot Instance request on creation. The + // value for ResourceType must be spot-instances-request, otherwise the Spot + // Instance request fails. To tag the Spot Instance request after it has been + // created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` + // The Spot Instance request type. // // Default: one-time @@ -94919,6 +100348,12 @@ func (s *RequestSpotInstancesInput) SetSpotPrice(v string) *RequestSpotInstances return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *RequestSpotInstancesInput) SetTagSpecifications(v []*TagSpecification) *RequestSpotInstancesInput { + s.TagSpecifications = v + return s +} + // SetType sets the Type field's value. func (s *RequestSpotInstancesInput) SetType(v string) *RequestSpotInstancesInput { s.Type = &v @@ -96806,6 +102241,107 @@ func (s *RestoreAddressToClassicOutput) SetStatus(v string) *RestoreAddressToCla return s } +type RestoreManagedPrefixListVersionInput struct { + _ struct{} `type:"structure"` + + // The current version number for the prefix list. + // + // CurrentVersion is a required field + CurrentVersion *int64 `type:"long" required:"true"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the prefix list. + // + // PrefixListId is a required field + PrefixListId *string `type:"string" required:"true"` + + // The version to restore. + // + // PreviousVersion is a required field + PreviousVersion *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreManagedPrefixListVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreManagedPrefixListVersionInput"} + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.PrefixListId == nil { + invalidParams.Add(request.NewErrParamRequired("PrefixListId")) + } + if s.PreviousVersion == nil { + invalidParams.Add(request.NewErrParamRequired("PreviousVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetCurrentVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.CurrentVersion = &v + return s +} + +// SetDryRun sets the DryRun field's value. +func (s *RestoreManagedPrefixListVersionInput) SetDryRun(v bool) *RestoreManagedPrefixListVersionInput { + s.DryRun = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPrefixListId(v string) *RestoreManagedPrefixListVersionInput { + s.PrefixListId = &v + return s +} + +// SetPreviousVersion sets the PreviousVersion field's value. +func (s *RestoreManagedPrefixListVersionInput) SetPreviousVersion(v int64) *RestoreManagedPrefixListVersionInput { + s.PreviousVersion = &v + return s +} + +type RestoreManagedPrefixListVersionOutput struct { + _ struct{} `type:"structure"` + + // Information about the prefix list. + PrefixList *ManagedPrefixList `locationName:"prefixList" type:"structure"` +} + +// String returns the string representation +func (s RestoreManagedPrefixListVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreManagedPrefixListVersionOutput) GoString() string { + return s.String() +} + +// SetPrefixList sets the PrefixList field's value. +func (s *RestoreManagedPrefixListVersionOutput) SetPrefixList(v *ManagedPrefixList) *RestoreManagedPrefixListVersionOutput { + s.PrefixList = v + return s +} + type RevokeClientVpnIngressInput struct { _ struct{} `type:"structure"` @@ -97031,6 +102567,13 @@ func (s *RevokeSecurityGroupEgressInput) SetToPort(v int64) *RevokeSecurityGroup type RevokeSecurityGroupEgressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The outbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -97043,6 +102586,18 @@ func (s RevokeSecurityGroupEgressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupEgressOutput) SetReturn(v bool) *RevokeSecurityGroupEgressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupEgressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupEgressOutput { + s.UnknownIpPermissions = v + return s +} + type RevokeSecurityGroupIngressInput struct { _ struct{} `type:"structure"` @@ -97170,6 +102725,13 @@ func (s *RevokeSecurityGroupIngressInput) SetToPort(v int64) *RevokeSecurityGrou type RevokeSecurityGroupIngressOutput struct { _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, returns an error. + Return *bool `locationName:"return" type:"boolean"` + + // The inbound rules that were unknown to the service. In some cases, unknownIpPermissionSet + // might be in a different format from the request parameter. + UnknownIpPermissions []*IpPermission `locationName:"unknownIpPermissionSet" locationNameList:"item" type:"list"` } // String returns the string representation @@ -97182,10 +102744,25 @@ func (s RevokeSecurityGroupIngressOutput) GoString() string { return s.String() } +// SetReturn sets the Return field's value. +func (s *RevokeSecurityGroupIngressOutput) SetReturn(v bool) *RevokeSecurityGroupIngressOutput { + s.Return = &v + return s +} + +// SetUnknownIpPermissions sets the UnknownIpPermissions field's value. +func (s *RevokeSecurityGroupIngressOutput) SetUnknownIpPermissions(v []*IpPermission) *RevokeSecurityGroupIngressOutput { + s.UnknownIpPermissions = v + return s +} + // Describes a route in a route table. type Route struct { _ struct{} `type:"structure"` + // The ID of the carrier gateway. + CarrierGatewayId *string `locationName:"carrierGatewayId" type:"string"` + // The IPv4 CIDR block used for the destination match. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` @@ -97248,6 +102825,12 @@ func (s Route) GoString() string { return s.String() } +// SetCarrierGatewayId sets the CarrierGatewayId field's value. +func (s *Route) SetCarrierGatewayId(v string) *Route { + s.CarrierGatewayId = &v + return s +} + // SetDestinationCidrBlock sets the DestinationCidrBlock field's value. func (s *Route) SetDestinationCidrBlock(v string) *Route { s.DestinationCidrBlock = &v @@ -97535,17 +103118,17 @@ type RunInstancesInput struct { // For more information, see Ensuring Idempotency (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). // // Constraints: Maximum 64 ASCII characters - ClientToken *string `locationName:"clientToken" type:"string"` + ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` // The CPU options for the instance. For more information, see Optimizing CPU - // Options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) + // options (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) // in the Amazon Elastic Compute Cloud User Guide. CpuOptions *CpuOptionsRequest `type:"structure"` // The credit option for CPU usage of the burstable performance instance. Valid // values are standard and unlimited. To change this attribute after launch, // use ModifyInstanceCreditSpecification (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyInstanceCreditSpecification.html). - // For more information, see Burstable Performance Instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) + // For more information, see Burstable performance instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/burstable-performance-instances.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: standard (T2 instances) or unlimited (T3/T3a instances) @@ -97584,10 +103167,12 @@ type RunInstancesInput struct { // An elastic inference accelerator to associate with the instance. Elastic // inference accelerators are a resource you can attach to your Amazon EC2 instances // to accelerate your Deep Learning (DL) inference workloads. + // + // You cannot specify accelerators from different generations in the same request. ElasticInferenceAccelerators []*ElasticInferenceAccelerator `locationName:"ElasticInferenceAccelerator" locationNameList:"item" type:"list"` // Indicates whether an instance is enabled for hibernation. For more information, - // see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. HibernationOptions *HibernationOptionsRequest `type:"structure"` @@ -97610,7 +103195,7 @@ type RunInstancesInput struct { // InstanceInterruptionBehavior is set to either hibernate or stop. InstanceMarketOptions *InstanceMarketOptionsRequest `type:"structure"` - // The instance type. For more information, see Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // The instance type. For more information, see Instance types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: m1.small @@ -97670,7 +103255,7 @@ type RunInstancesInput struct { MaxCount *int64 `type:"integer" required:"true"` // The metadata options for the instance. For more information, see Instance - // Metadata and User Data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). + // metadata and user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html). MetadataOptions *InstanceMetadataOptionsRequest `type:"structure"` // The minimum number of instances to launch. If you specify a minimum that @@ -97747,7 +103332,7 @@ type RunInstancesInput struct { TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` // The user data to make available to the instance. For more information, see - // Running Commands on Your Linux Instance at Launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // Running commands on your Linux instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) // (Linux) and Adding User Data (https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) // (Windows). If you are using a command line tool, base64-encoding is performed // for you, and you can load the text from a file. Otherwise, you must provide @@ -98738,19 +104323,15 @@ type ScheduledInstancesEbs struct { // only to instances that support them. Encrypted *bool `type:"boolean"` - // The number of I/O operations per second (IOPS) that the volume supports. - // For io1 volumes, this represents the number of IOPS that are provisioned - // for the volume. For gp2 volumes, this represents the baseline performance - // of the volume and the rate at which the volume accumulates I/O credits for - // bursting. For more information about gp2 baseline performance, I/O credits, - // and bursting, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // The number of I/O operations per second (IOPS) to provision for an io1 or + // io2 volume, with a maximum ratio of 50 IOPS/GiB for io1, and 500 IOPS/GiB + // for io2. Range is 100 to 64,000 IOPS for volumes in most Regions. Maximum + // IOPS of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Other instance families guarantee performance up to 32,000 IOPS. For more + // information, see Amazon EBS Volume Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for - // gp2 volumes. - // - // Condition: This parameter is required for requests to create io1volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // This parameter is valid only for Provisioned IOPS SSD (io1 and io2) volumes. Iops *int64 `type:"integer"` // The ID of the snapshot. @@ -98762,8 +104343,9 @@ type ScheduledInstancesEbs struct { // a volume size, the default is the snapshot size. VolumeSize *int64 `type:"integer"` - // The volume type. gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, - // Throughput Optimized HDD for st1, Cold HDD for sc1, or standard for Magnetic. + // The volume type. gp2 for General Purpose SSD, io1 or io2 for Provisioned + // IOPS SSD, Throughput Optimized HDD for st1, Cold HDD for sc1, or standard + // for Magnetic. // // Default: gp2 VolumeType *string `type:"string"` @@ -99522,7 +105104,10 @@ type SearchTransitGatewayRoutesInput struct { // // * attachment.resource-id - The resource id of the transit gateway attachment. // - // * attachment.resource-type - The attachment resource type (vpc | vpn). + // * attachment.resource-type - The attachment resource type. Valid values + // are vpc | vpn | direct-connect-gateway | peering. + // + // * prefix-list-id - The ID of the prefix list. // // * route-search.exact-match - The exact match of the specified filter. // @@ -100253,9 +105838,10 @@ type Snapshot struct { // key for the parent volume. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` - // Value from an Amazon-maintained list (amazon | self | all | aws-marketplace - // | microsoft) of snapshot owners. Not to be confused with the user-configured - // AWS account alias, which is set from the IAM console. + // The AWS owner alias, as maintained by Amazon. The possible values are: amazon + // | self | all | aws-marketplace | microsoft. This AWS owner alias is not to + // be confused with the user-configured AWS account alias, which is set from + // the IAM console. OwnerAlias *string `locationName:"ownerAlias" type:"string"` // The AWS account ID of the EBS snapshot owner. @@ -100418,7 +106004,7 @@ type SnapshotDetail struct { // The URL used to access the disk image. Url *string `locationName:"url" type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` } @@ -100508,7 +106094,7 @@ type SnapshotDiskContainer struct { // a https URL (https://..) or an Amazon S3 URL (s3://..). Url *string `type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucket `type:"structure"` } @@ -100688,7 +106274,7 @@ type SnapshotTaskDetail struct { // The URL of the disk image from which the snapshot is created. Url *string `locationName:"url" type:"string"` - // The S3 bucket for the disk image. + // The Amazon S3 bucket for the disk image. UserBucket *UserBucketDetails `locationName:"userBucket" type:"structure"` } @@ -101168,12 +106754,12 @@ type SpotFleetRequestConfigData struct { // The Amazon Resource Name (ARN) of an AWS Identity and Access Management (IAM) // role that grants the Spot Fleet the permission to request, launch, terminate, - // and tag instances on your behalf. For more information, see Spot Fleet Prerequisites + // and tag instances on your behalf. For more information, see Spot Fleet prerequisites // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-fleet-requests.html#spot-fleet-prerequisites) // in the Amazon EC2 User Guide for Linux Instances. Spot Fleet can terminate // Spot Instances on your behalf when you cancel its Spot Fleet request using - // CancelSpotFleetRequests or when the Spot Fleet request expires, if you set - // TerminateInstancesWithExpiration. + // CancelSpotFleetRequests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CancelSpotFleetRequests) + // or when the Spot Fleet request expires, if you set TerminateInstancesWithExpiration. // // IamFleetRole is a required field IamFleetRole *string `locationName:"iamFleetRole" type:"string" required:"true"` @@ -101560,7 +107146,7 @@ type SpotInstanceRequest struct { SpotPrice *string `locationName:"spotPrice" type:"string"` // The state of the Spot Instance request. Spot status information helps track - // your Spot Instance requests. For more information, see Spot Status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) + // your Spot Instance requests. For more information, see Spot status (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon EC2 User Guide for Linux Instances. State *string `locationName:"state" type:"string" enum:"SpotInstanceState"` @@ -101746,7 +107332,7 @@ func (s *SpotInstanceStateFault) SetMessage(v string) *SpotInstanceStateFault { type SpotInstanceStatus struct { _ struct{} `type:"structure"` - // The status code. For a list of status codes, see Spot Status Codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) + // The status code. For a list of status codes, see Spot status codes (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#spot-instance-bid-status-understand) // in the Amazon EC2 User Guide for Linux Instances. Code *string `locationName:"code" type:"string"` @@ -101802,9 +107388,9 @@ type SpotMarketOptions struct { // default is the On-Demand price. MaxPrice *string `type:"string"` - // The Spot Instance request type. For RunInstances, persistent Spot Instance - // requests are only supported when InstanceInterruptionBehavior is set to either - // hibernate or stop. + // The Spot Instance request type. For RunInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances), + // persistent Spot Instance requests are only supported when InstanceInterruptionBehavior + // is set to either hibernate or stop. SpotInstanceType *string `type:"string" enum:"SpotInstanceType"` // The end date of the request. For a one-time request, the request remains @@ -102170,8 +107756,7 @@ type StaleIpPermission struct { // The IP ranges. Not applicable for stale security group rules. IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"` - // The prefix list IDs for an AWS service. Not applicable for stale security - // group rules. + // The prefix list IDs. Not applicable for stale security group rules. PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"` // The end of the port range for the TCP and UDP protocols, or an ICMP type @@ -102543,7 +108128,7 @@ type StopInstancesInput struct { // Hibernates the instance if the instance was enabled for hibernation at launch. // If the instance cannot hibernate successfully, a normal shutdown occurs. - // For more information, see Hibernate Your Instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) + // For more information, see Hibernate your instance (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Hibernate.html) // in the Amazon Elastic Compute Cloud User Guide. // // Default: false @@ -102703,12 +108288,20 @@ type Subnet struct { // The IPv4 CIDR block assigned to the subnet. CidrBlock *string `locationName:"cidrBlock" type:"string"` + // The customer-owned IPv4 address pool associated with the subnet. + CustomerOwnedIpv4Pool *string `locationName:"customerOwnedIpv4Pool" type:"string"` + // Indicates whether this is the default subnet for the Availability Zone. DefaultForAz *bool `locationName:"defaultForAz" type:"boolean"` // Information about the IPv6 CIDR blocks associated with the subnet. Ipv6CidrBlockAssociationSet []*SubnetIpv6CidrBlockAssociation `locationName:"ipv6CidrBlockAssociationSet" locationNameList:"item" type:"list"` + // Indicates whether a network interface created in this subnet (including a + // network interface created by RunInstances) receives a customer-owned IPv4 + // address. + MapCustomerOwnedIpOnLaunch *bool `locationName:"mapCustomerOwnedIpOnLaunch" type:"boolean"` + // Indicates whether instances launched in this subnet receive a public IPv4 // address. MapPublicIpOnLaunch *bool `locationName:"mapPublicIpOnLaunch" type:"boolean"` @@ -102775,6 +108368,12 @@ func (s *Subnet) SetCidrBlock(v string) *Subnet { return s } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *Subnet) SetCustomerOwnedIpv4Pool(v string) *Subnet { + s.CustomerOwnedIpv4Pool = &v + return s +} + // SetDefaultForAz sets the DefaultForAz field's value. func (s *Subnet) SetDefaultForAz(v bool) *Subnet { s.DefaultForAz = &v @@ -102787,6 +108386,12 @@ func (s *Subnet) SetIpv6CidrBlockAssociationSet(v []*SubnetIpv6CidrBlockAssociat return s } +// SetMapCustomerOwnedIpOnLaunch sets the MapCustomerOwnedIpOnLaunch field's value. +func (s *Subnet) SetMapCustomerOwnedIpOnLaunch(v bool) *Subnet { + s.MapCustomerOwnedIpOnLaunch = &v + return s +} + // SetMapPublicIpOnLaunch sets the MapPublicIpOnLaunch field's value. func (s *Subnet) SetMapPublicIpOnLaunch(v bool) *Subnet { s.MapPublicIpOnLaunch = &v @@ -103087,12 +108692,17 @@ type TagSpecification struct { _ struct{} `type:"structure"` // The type of resource to tag. Currently, the resource types that support tagging - // on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host - // | fleet | fpga-image | instance | key-pair | launch-template | | natgateway - // | spot-fleet-request | placement-group | snapshot | traffic-mirror-filter - // | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment - // | transit-gateway-route-table | vpc-endpoint (for interface VPC endpoints)| - // vpc-endpoint-service (for gateway VPC endpoints) | volume | vpc-flow-log. + // on creation are: capacity-reservation | client-vpn-endpoint | customer-gateway + // | dedicated-host | dhcp-options | export-image-task | export-instance-task + // | fleet | fpga-image | host-reservation | import-image-task | import-snapshot-task + // | instance | internet-gateway | ipv4pool-ec2 | ipv6pool-ec2 | key-pair | + // launch-template | placement-group | prefix-list | natgateway | network-acl + // | route-table | security-group | spot-fleet-request | spot-instances-request + // | snapshot | subnet | traffic-mirror-filter | traffic-mirror-session | traffic-mirror-target + // | transit-gateway | transit-gateway-attachment | transit-gateway-route-table + // | volume |vpc | vpc-peering-connection | vpc-endpoint (for interface and + // gateway endpoints) | vpc-endpoint-service (for AWS PrivateLink) | vpc-flow-log + // | vpn-connection | vpn-gateway. // // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` @@ -103136,7 +108746,8 @@ func (s *TagSpecification) SetTags(v []*Tag) *TagSpecification { // reaches the maximum amount that you're willing to pay. When the maximum amount // you're willing to pay is reached, the fleet stops launching instances even // if it hasn’t met the target capacity. The MaxTotalPrice parameters are -// located in and +// located in OnDemandOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptions.html) +// and SpotOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptions) type TargetCapacitySpecification struct { _ struct{} `type:"structure"` @@ -103202,7 +108813,8 @@ func (s *TargetCapacitySpecification) SetTotalTargetCapacity(v int64) *TargetCap // instances until it reaches the maximum amount that you're willing to pay. // When the maximum amount you're willing to pay is reached, the fleet stops // launching instances even if it hasn’t met the target capacity. The MaxTotalPrice -// parameters are located in and . +// parameters are located in OnDemandOptionsRequest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_OnDemandOptionsRequest) +// and SpotOptionsRequest (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotOptionsRequest). type TargetCapacitySpecificationRequest struct { _ struct{} `type:"structure"` @@ -104288,7 +109900,7 @@ type TransitGatewayAssociation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state of the association. @@ -104357,10 +109969,10 @@ type TransitGatewayAttachment struct { // The ID of the AWS account that owns the resource. ResourceOwnerId *string `locationName:"resourceOwnerId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` - // The attachment state. + // The attachment state. Note that the initiating state has been deprecated. State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` // The tags for the attachment. @@ -105070,7 +110682,8 @@ type TransitGatewayPeeringAttachment struct { // Information about the requester transit gateway. RequesterTgwInfo *PeeringTgwInfo `locationName:"requesterTgwInfo" type:"structure"` - // The state of the transit gateway peering attachment. + // The state of the transit gateway peering attachment. Note that the initiating + // state has been deprecated. State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` // The status of the transit gateway peering attachment. @@ -105135,6 +110748,117 @@ func (s *TransitGatewayPeeringAttachment) SetTransitGatewayAttachmentId(v string return s } +// Describes a transit gateway prefix list attachment. +type TransitGatewayPrefixListAttachment struct { + _ struct{} `type:"structure"` + + // The ID of the resource. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The resource type. Note that the tgw-peering resource type has been deprecated. + ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` + + // The ID of the attachment. + TransitGatewayAttachmentId *string `locationName:"transitGatewayAttachmentId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPrefixListAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPrefixListAttachment) GoString() string { + return s.String() +} + +// SetResourceId sets the ResourceId field's value. +func (s *TransitGatewayPrefixListAttachment) SetResourceId(v string) *TransitGatewayPrefixListAttachment { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *TransitGatewayPrefixListAttachment) SetResourceType(v string) *TransitGatewayPrefixListAttachment { + s.ResourceType = &v + return s +} + +// SetTransitGatewayAttachmentId sets the TransitGatewayAttachmentId field's value. +func (s *TransitGatewayPrefixListAttachment) SetTransitGatewayAttachmentId(v string) *TransitGatewayPrefixListAttachment { + s.TransitGatewayAttachmentId = &v + return s +} + +// Describes a prefix list reference. +type TransitGatewayPrefixListReference struct { + _ struct{} `type:"structure"` + + // Indicates whether traffic that matches this route is dropped. + Blackhole *bool `locationName:"blackhole" type:"boolean"` + + // The ID of the prefix list. + PrefixListId *string `locationName:"prefixListId" type:"string"` + + // The ID of the prefix list owner. + PrefixListOwnerId *string `locationName:"prefixListOwnerId" type:"string"` + + // The state of the prefix list reference. + State *string `locationName:"state" type:"string" enum:"TransitGatewayPrefixListReferenceState"` + + // Information about the transit gateway attachment. + TransitGatewayAttachment *TransitGatewayPrefixListAttachment `locationName:"transitGatewayAttachment" type:"structure"` + + // The ID of the transit gateway route table. + TransitGatewayRouteTableId *string `locationName:"transitGatewayRouteTableId" type:"string"` +} + +// String returns the string representation +func (s TransitGatewayPrefixListReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransitGatewayPrefixListReference) GoString() string { + return s.String() +} + +// SetBlackhole sets the Blackhole field's value. +func (s *TransitGatewayPrefixListReference) SetBlackhole(v bool) *TransitGatewayPrefixListReference { + s.Blackhole = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *TransitGatewayPrefixListReference) SetPrefixListId(v string) *TransitGatewayPrefixListReference { + s.PrefixListId = &v + return s +} + +// SetPrefixListOwnerId sets the PrefixListOwnerId field's value. +func (s *TransitGatewayPrefixListReference) SetPrefixListOwnerId(v string) *TransitGatewayPrefixListReference { + s.PrefixListOwnerId = &v + return s +} + +// SetState sets the State field's value. +func (s *TransitGatewayPrefixListReference) SetState(v string) *TransitGatewayPrefixListReference { + s.State = &v + return s +} + +// SetTransitGatewayAttachment sets the TransitGatewayAttachment field's value. +func (s *TransitGatewayPrefixListReference) SetTransitGatewayAttachment(v *TransitGatewayPrefixListAttachment) *TransitGatewayPrefixListReference { + s.TransitGatewayAttachment = v + return s +} + +// SetTransitGatewayRouteTableId sets the TransitGatewayRouteTableId field's value. +func (s *TransitGatewayPrefixListReference) SetTransitGatewayRouteTableId(v string) *TransitGatewayPrefixListReference { + s.TransitGatewayRouteTableId = &v + return s +} + // Describes route propagation. type TransitGatewayPropagation struct { _ struct{} `type:"structure"` @@ -105142,7 +110866,7 @@ type TransitGatewayPropagation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state. @@ -105201,28 +110925,28 @@ type TransitGatewayRequestOptions struct { // A private Autonomous System Number (ASN) for the Amazon side of a BGP session. // The range is 64512 to 65534 for 16-bit ASNs and 4200000000 to 4294967294 - // for 32-bit ASNs. + // for 32-bit ASNs. The default is 64512. AmazonSideAsn *int64 `type:"long"` - // Enable or disable automatic acceptance of attachment requests. The default - // is disable. + // Enable or disable automatic acceptance of attachment requests. Disabled by + // default. AutoAcceptSharedAttachments *string `type:"string" enum:"AutoAcceptSharedAttachmentsValue"` // Enable or disable automatic association with the default association route - // table. The default is enable. + // table. Enabled by default. DefaultRouteTableAssociation *string `type:"string" enum:"DefaultRouteTableAssociationValue"` // Enable or disable automatic propagation of routes to the default propagation - // route table. The default is enable. + // route table. Enabled by default. DefaultRouteTablePropagation *string `type:"string" enum:"DefaultRouteTablePropagationValue"` - // Enable or disable DNS support. + // Enable or disable DNS support. Enabled by default. DnsSupport *string `type:"string" enum:"DnsSupportValue"` // Indicates whether multicast is enabled on the transit gateway MulticastSupport *string `type:"string" enum:"MulticastSupportValue"` - // Enable or disable Equal Cost Multipath Protocol support. + // Enable or disable Equal Cost Multipath Protocol support. Enabled by default. VpnEcmpSupport *string `type:"string" enum:"VpnEcmpSupportValue"` } @@ -105285,6 +111009,9 @@ type TransitGatewayRoute struct { // The CIDR block used for destination matches. DestinationCidrBlock *string `locationName:"destinationCidrBlock" type:"string"` + // The ID of the prefix list used for destination matches. + PrefixListId *string `locationName:"prefixListId" type:"string"` + // The state of the route. State *string `locationName:"state" type:"string" enum:"TransitGatewayRouteState"` @@ -105311,6 +111038,12 @@ func (s *TransitGatewayRoute) SetDestinationCidrBlock(v string) *TransitGatewayR return s } +// SetPrefixListId sets the PrefixListId field's value. +func (s *TransitGatewayRoute) SetPrefixListId(v string) *TransitGatewayRoute { + s.PrefixListId = &v + return s +} + // SetState sets the State field's value. func (s *TransitGatewayRoute) SetState(v string) *TransitGatewayRoute { s.State = &v @@ -105336,7 +111069,7 @@ type TransitGatewayRouteAttachment struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The ID of the attachment. @@ -105458,7 +111191,7 @@ type TransitGatewayRouteTableAssociation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The resource type. + // The resource type. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state of the association. @@ -105509,7 +111242,7 @@ type TransitGatewayRouteTablePropagation struct { // The ID of the resource. ResourceId *string `locationName:"resourceId" type:"string"` - // The type of resource. + // The type of resource. Note that the tgw-peering resource type has been deprecated. ResourceType *string `locationName:"resourceType" type:"string" enum:"TransitGatewayAttachmentResourceType"` // The state of the resource. @@ -105563,7 +111296,8 @@ type TransitGatewayVpcAttachment struct { // The VPC attachment options. Options *TransitGatewayVpcAttachmentOptions `locationName:"options" type:"structure"` - // The state of the VPC attachment. + // The state of the VPC attachment. Note that the initiating state has been + // deprecated. State *string `locationName:"state" type:"string" enum:"TransitGatewayAttachmentState"` // The IDs of the subnets. @@ -105686,6 +111420,9 @@ func (s *TransitGatewayVpcAttachmentOptions) SetIpv6Support(v string) *TransitGa type TunnelOption struct { _ struct{} `type:"structure"` + // The action to take after a DPD timeout occurs. + DpdTimeoutAction *string `locationName:"dpdTimeoutAction" type:"string"` + // The number of seconds after which a DPD timeout occurs. DpdTimeoutSeconds *int64 `locationName:"dpdTimeoutSeconds" type:"integer"` @@ -105736,8 +111473,14 @@ type TunnelOption struct { // The number of packets in an IKE replay window. ReplayWindowSize *int64 `locationName:"replayWindowSize" type:"integer"` - // The range of inside IP addresses for the tunnel. + // The action to take when the establishing the VPN tunnels for a VPN connection. + StartupAction *string `locationName:"startupAction" type:"string"` + + // The range of inside IPv4 addresses for the tunnel. TunnelInsideCidr *string `locationName:"tunnelInsideCidr" type:"string"` + + // The range of inside IPv6 addresses for the tunnel. + TunnelInsideIpv6Cidr *string `locationName:"tunnelInsideIpv6Cidr" type:"string"` } // String returns the string representation @@ -105750,6 +111493,12 @@ func (s TunnelOption) GoString() string { return s.String() } +// SetDpdTimeoutAction sets the DpdTimeoutAction field's value. +func (s *TunnelOption) SetDpdTimeoutAction(v string) *TunnelOption { + s.DpdTimeoutAction = &v + return s +} + // SetDpdTimeoutSeconds sets the DpdTimeoutSeconds field's value. func (s *TunnelOption) SetDpdTimeoutSeconds(v int64) *TunnelOption { s.DpdTimeoutSeconds = &v @@ -105840,12 +111589,24 @@ func (s *TunnelOption) SetReplayWindowSize(v int64) *TunnelOption { return s } +// SetStartupAction sets the StartupAction field's value. +func (s *TunnelOption) SetStartupAction(v string) *TunnelOption { + s.StartupAction = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *TunnelOption) SetTunnelInsideCidr(v string) *TunnelOption { s.TunnelInsideCidr = &v return s } +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *TunnelOption) SetTunnelInsideIpv6Cidr(v string) *TunnelOption { + s.TunnelInsideIpv6Cidr = &v + return s +} + type UnassignIpv6AddressesInput struct { _ struct{} `type:"structure"` @@ -106395,11 +112156,11 @@ func (s *UpdateSecurityGroupRuleDescriptionsIngressOutput) SetReturn(v bool) *Up return s } -// Describes the S3 bucket for the disk image. +// Describes the Amazon S3 bucket for the disk image. type UserBucket struct { _ struct{} `type:"structure"` - // The name of the S3 bucket where the disk image is located. + // The name of the Amazon S3 bucket where the disk image is located. S3Bucket *string `type:"string"` // The file name of the disk image. @@ -106428,11 +112189,11 @@ func (s *UserBucket) SetS3Key(v string) *UserBucket { return s } -// Describes the S3 bucket for the disk image. +// Describes the Amazon S3 bucket for the disk image. type UserBucketDetails struct { _ struct{} `type:"structure"` - // The S3 bucket from which the disk image was created. + // The Amazon S3 bucket from which the disk image was created. S3Bucket *string `locationName:"s3Bucket" type:"string"` // The file name of the disk image. @@ -106643,6 +112404,70 @@ func (s *VCpuInfo) SetValidThreadsPerCore(v []*int64) *VCpuInfo { return s } +// The error code and error message that is returned for a parameter or parameter +// combination that is not valid when a new launch template or new version of +// a launch template is created. +type ValidationError struct { + _ struct{} `type:"structure"` + + // The error code that indicates why the parameter or parameter combination + // is not valid. For more information about error codes, see Error Codes (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + Code *string `locationName:"code" type:"string"` + + // The error message that describes why the parameter or parameter combination + // is not valid. For more information about error messages, see Error Codes + // (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html.html). + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ValidationError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ValidationError) SetCode(v string) *ValidationError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ValidationError) SetMessage(v string) *ValidationError { + s.Message = &v + return s +} + +// The error codes and error messages that are returned for the parameters or +// parameter combinations that are not valid when a new launch template or new +// version of a launch template is created. +type ValidationWarning struct { + _ struct{} `type:"structure"` + + // The error codes and error messages. + Errors []*ValidationError `locationName:"errorSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s ValidationWarning) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationWarning) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *ValidationWarning) SetErrors(v []*ValidationError) *ValidationWarning { + s.Errors = v + return s +} + // Describes telemetry for a VPN tunnel. type VgwTelemetry struct { _ struct{} `type:"structure"` @@ -106736,17 +112561,18 @@ type Volume struct { // For Provisioned IOPS SSD volumes, this represents the number of IOPS that // are provisioned for the volume. For General Purpose SSD volumes, this represents // the baseline performance of the volume and the rate at which the volume accumulates - // I/O credits for bursting. For more information, see Amazon EBS Volume Types + // I/O credits for bursting. For more information, see Amazon EBS volume types // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // - // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000IOPS - // for io1 volumes, in most Regions. The maximum IOPS for io1 of 64,000 is guaranteed - // only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). + // Constraints: Range is 100-16,000 IOPS for gp2 volumes and 100 to 64,000 IOPS + // for io1 and io2 volumes, in most Regions. The maximum IOPS for io1 and io2 + // of 64,000 is guaranteed only on Nitro-based instances (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#ec2-nitro-instances). // Other instance families guarantee performance up to 32,000 IOPS. // - // Condition: This parameter is required for requests to create io1 volumes; - // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // Condition: This parameter is required for requests to create io1 and io2 + // volumes; it is not used in requests to create gp2, st1, sc1, or standard + // volumes. Iops *int64 `locationName:"iops" type:"integer"` // The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) @@ -106775,9 +112601,9 @@ type Volume struct { // The ID of the volume. VolumeId *string `locationName:"volumeId" type:"string"` - // The volume type. This can be gp2 for General Purpose SSD, io1 for Provisioned - // IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, or standard - // for Magnetic volumes. + // The volume type. This can be gp2 for General Purpose SSD, io1 or io2 for + // Provisioned IOPS SSD, st1 for Throughput Optimized HDD, sc1 for Cold HDD, + // or standard for Magnetic volumes. VolumeType *string `locationName:"volumeType" type:"string" enum:"VolumeType"` } @@ -107005,7 +112831,7 @@ type VolumeModification struct { // The original IOPS rate of the volume. OriginalIops *int64 `locationName:"originalIops" type:"integer"` - // The original size of the volume. + // The original size of the volume, in GiB. OriginalSize *int64 `locationName:"originalSize" type:"integer"` // The original EBS volume type of the volume. @@ -107422,8 +113248,7 @@ type Vpc struct { // Information about the IPv4 CIDR blocks associated with the VPC. CidrBlockAssociationSet []*VpcCidrBlockAssociation `locationName:"cidrBlockAssociationSet" locationNameList:"item" type:"list"` - // The ID of the set of DHCP options you've associated with the VPC (or default - // if the default options are associated with the VPC). + // The ID of the set of DHCP options you've associated with the VPC. DhcpOptionsId *string `locationName:"dhcpOptionsId" type:"string"` // The allowed tenancy of instances launched into the VPC. @@ -107932,7 +113757,8 @@ type VpcIpv6CidrBlockAssociation struct { // The ID of the IPv6 address pool from which the IPv6 CIDR block is allocated. Ipv6Pool *string `locationName:"ipv6Pool" type:"string"` - // The name of the location from which we advertise the IPV6 CIDR block. + // The name of the unique set of Availability Zones, Local Zones, or Wavelength + // Zones from which AWS advertises IP addresses, for example, us-east-1-wl1-bos-wlz-1. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` } @@ -108338,10 +114164,25 @@ type VpnConnectionOptions struct { // Indicates whether acceleration is enabled for the VPN connection. EnableAcceleration *bool `locationName:"enableAcceleration" type:"boolean"` + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIpv4NetworkCidr *string `locationName:"localIpv4NetworkCidr" type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + LocalIpv6NetworkCidr *string `locationName:"localIpv6NetworkCidr" type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + RemoteIpv4NetworkCidr *string `locationName:"remoteIpv4NetworkCidr" type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + RemoteIpv6NetworkCidr *string `locationName:"remoteIpv6NetworkCidr" type:"string"` + // Indicates whether the VPN connection uses static routes only. Static routes // must be used for devices that don't support BGP. StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + // Indicates whether the VPN tunnels process IPv4 or IPv6 traffic. + TunnelInsideIpVersion *string `locationName:"tunnelInsideIpVersion" type:"string" enum:"TunnelInsideIpVersion"` + // Indicates the VPN tunnel options. TunnelOptions []*TunnelOption `locationName:"tunnelOptionSet" locationNameList:"item" type:"list"` } @@ -108362,12 +114203,42 @@ func (s *VpnConnectionOptions) SetEnableAcceleration(v bool) *VpnConnectionOptio return s } +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *VpnConnectionOptions) SetLocalIpv4NetworkCidr(v string) *VpnConnectionOptions { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *VpnConnectionOptions) SetLocalIpv6NetworkCidr(v string) *VpnConnectionOptions { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *VpnConnectionOptions) SetRemoteIpv4NetworkCidr(v string) *VpnConnectionOptions { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *VpnConnectionOptions) SetRemoteIpv6NetworkCidr(v string) *VpnConnectionOptions { + s.RemoteIpv6NetworkCidr = &v + return s +} + // SetStaticRoutesOnly sets the StaticRoutesOnly field's value. func (s *VpnConnectionOptions) SetStaticRoutesOnly(v bool) *VpnConnectionOptions { s.StaticRoutesOnly = &v return s } +// SetTunnelInsideIpVersion sets the TunnelInsideIpVersion field's value. +func (s *VpnConnectionOptions) SetTunnelInsideIpVersion(v string) *VpnConnectionOptions { + s.TunnelInsideIpVersion = &v + return s +} + // SetTunnelOptions sets the TunnelOptions field's value. func (s *VpnConnectionOptions) SetTunnelOptions(v []*TunnelOption) *VpnConnectionOptions { s.TunnelOptions = v @@ -108383,6 +114254,26 @@ type VpnConnectionOptionsSpecification struct { // Default: false EnableAcceleration *bool `type:"boolean"` + // The IPv4 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: 0.0.0.0/0 + LocalIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the customer gateway (on-premises) side of the VPN connection. + // + // Default: ::/0 + LocalIpv6NetworkCidr *string `type:"string"` + + // The IPv4 CIDR on the AWS side of the VPN connection. + // + // Default: 0.0.0.0/0 + RemoteIpv4NetworkCidr *string `type:"string"` + + // The IPv6 CIDR on the AWS side of the VPN connection. + // + // Default: ::/0 + RemoteIpv6NetworkCidr *string `type:"string"` + // Indicate whether the VPN connection uses static routes only. If you are creating // a VPN connection for a device that does not support BGP, you must specify // true. Use CreateVpnConnectionRoute to create a static route. @@ -108390,6 +114281,11 @@ type VpnConnectionOptionsSpecification struct { // Default: false StaticRoutesOnly *bool `locationName:"staticRoutesOnly" type:"boolean"` + // Indicate whether the VPN tunnels process IPv4 or IPv6 traffic. + // + // Default: ipv4 + TunnelInsideIpVersion *string `type:"string" enum:"TunnelInsideIpVersion"` + // The tunnel options for the VPN connection. TunnelOptions []*VpnTunnelOptionsSpecification `type:"list"` } @@ -108410,12 +114306,42 @@ func (s *VpnConnectionOptionsSpecification) SetEnableAcceleration(v bool) *VpnCo return s } +// SetLocalIpv4NetworkCidr sets the LocalIpv4NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetLocalIpv4NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.LocalIpv4NetworkCidr = &v + return s +} + +// SetLocalIpv6NetworkCidr sets the LocalIpv6NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetLocalIpv6NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.LocalIpv6NetworkCidr = &v + return s +} + +// SetRemoteIpv4NetworkCidr sets the RemoteIpv4NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetRemoteIpv4NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.RemoteIpv4NetworkCidr = &v + return s +} + +// SetRemoteIpv6NetworkCidr sets the RemoteIpv6NetworkCidr field's value. +func (s *VpnConnectionOptionsSpecification) SetRemoteIpv6NetworkCidr(v string) *VpnConnectionOptionsSpecification { + s.RemoteIpv6NetworkCidr = &v + return s +} + // SetStaticRoutesOnly sets the StaticRoutesOnly field's value. func (s *VpnConnectionOptionsSpecification) SetStaticRoutesOnly(v bool) *VpnConnectionOptionsSpecification { s.StaticRoutesOnly = &v return s } +// SetTunnelInsideIpVersion sets the TunnelInsideIpVersion field's value. +func (s *VpnConnectionOptionsSpecification) SetTunnelInsideIpVersion(v string) *VpnConnectionOptionsSpecification { + s.TunnelInsideIpVersion = &v + return s +} + // SetTunnelOptions sets the TunnelOptions field's value. func (s *VpnConnectionOptionsSpecification) SetTunnelOptions(v []*VpnTunnelOptionsSpecification) *VpnConnectionOptionsSpecification { s.TunnelOptions = v @@ -108547,6 +114473,14 @@ func (s *VpnStaticRoute) SetState(v string) *VpnStaticRoute { type VpnTunnelOptionsSpecification struct { _ struct{} `type:"structure"` + // The action to take after DPD timeout occurs. Specify restart to restart the + // IKE initiation. Specify clear to end the IKE session. + // + // Valid Values: clear | none | restart + // + // Default: clear + DPDTimeoutAction *string `type:"string"` + // The number of seconds after which a DPD timeout occurs. // // Constraints: A value between 0 and 30. @@ -108562,19 +114496,19 @@ type VpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 1 IKE negotiations. // - // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase1DHGroupNumbers []*Phase1DHGroupNumbersRequestListValue `locationName:"Phase1DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase1EncryptionAlgorithms []*Phase1EncryptionAlgorithmsRequestListValue `locationName:"Phase1EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 1 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase1IntegrityAlgorithms []*Phase1IntegrityAlgorithmsRequestListValue `locationName:"Phase1IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 1 of the IKE negotiation, in seconds. @@ -108587,19 +114521,19 @@ type VpnTunnelOptionsSpecification struct { // One or more Diffie-Hellman group numbers that are permitted for the VPN tunnel // for phase 2 IKE negotiations. // - // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 22 | 23 | 24 + // Valid values: 2 | 5 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 Phase2DHGroupNumbers []*Phase2DHGroupNumbersRequestListValue `locationName:"Phase2DHGroupNumber" locationNameList:"item" type:"list"` // One or more encryption algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: AES128 | AES256 + // Valid values: AES128 | AES256 | AES128-GCM-16 | AES256-GCM-16 Phase2EncryptionAlgorithms []*Phase2EncryptionAlgorithmsRequestListValue `locationName:"Phase2EncryptionAlgorithm" locationNameList:"item" type:"list"` // One or more integrity algorithms that are permitted for the VPN tunnel for // phase 2 IKE negotiations. // - // Valid values: SHA1 | SHA2-256 + // Valid values: SHA1 | SHA2-256 | SHA2-384 | SHA2-512 Phase2IntegrityAlgorithms []*Phase2IntegrityAlgorithmsRequestListValue `locationName:"Phase2IntegrityAlgorithm" locationNameList:"item" type:"list"` // The lifetime for phase 2 of the IKE negotiation, in seconds. @@ -108642,7 +114576,16 @@ type VpnTunnelOptionsSpecification struct { // Default: 1024 ReplayWindowSize *int64 `type:"integer"` - // The range of inside IP addresses for the tunnel. Any specified CIDR blocks + // The action to take when the establishing the tunnel for the VPN connection. + // By default, your customer gateway device must initiate the IKE negotiation + // and bring up the tunnel. Specify start for AWS to initiate the IKE negotiation. + // + // Valid Values: add | start + // + // Default: add + StartupAction *string `type:"string"` + + // The range of inside IPv4 addresses for the tunnel. Any specified CIDR blocks // must be unique across all VPN connections that use the same virtual private // gateway. // @@ -108663,6 +114606,12 @@ type VpnTunnelOptionsSpecification struct { // // * 169.254.169.252/30 TunnelInsideCidr *string `type:"string"` + + // The range of inside IPv6 addresses for the tunnel. Any specified CIDR blocks + // must be unique across all VPN connections that use the same transit gateway. + // + // Constraints: A size /126 CIDR block from the local fd00::/8 range. + TunnelInsideIpv6Cidr *string `type:"string"` } // String returns the string representation @@ -108675,6 +114624,12 @@ func (s VpnTunnelOptionsSpecification) GoString() string { return s.String() } +// SetDPDTimeoutAction sets the DPDTimeoutAction field's value. +func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutAction(v string) *VpnTunnelOptionsSpecification { + s.DPDTimeoutAction = &v + return s +} + // SetDPDTimeoutSeconds sets the DPDTimeoutSeconds field's value. func (s *VpnTunnelOptionsSpecification) SetDPDTimeoutSeconds(v int64) *VpnTunnelOptionsSpecification { s.DPDTimeoutSeconds = &v @@ -108759,12 +114714,24 @@ func (s *VpnTunnelOptionsSpecification) SetReplayWindowSize(v int64) *VpnTunnelO return s } +// SetStartupAction sets the StartupAction field's value. +func (s *VpnTunnelOptionsSpecification) SetStartupAction(v string) *VpnTunnelOptionsSpecification { + s.StartupAction = &v + return s +} + // SetTunnelInsideCidr sets the TunnelInsideCidr field's value. func (s *VpnTunnelOptionsSpecification) SetTunnelInsideCidr(v string) *VpnTunnelOptionsSpecification { s.TunnelInsideCidr = &v return s } +// SetTunnelInsideIpv6Cidr sets the TunnelInsideIpv6Cidr field's value. +func (s *VpnTunnelOptionsSpecification) SetTunnelInsideIpv6Cidr(v string) *VpnTunnelOptionsSpecification { + s.TunnelInsideIpv6Cidr = &v + return s +} + type WithdrawByoipCidrInput struct { _ struct{} `type:"structure"` @@ -108846,6 +114813,14 @@ const ( AccountAttributeNameDefaultVpc = "default-vpc" ) +// AccountAttributeName_Values returns all elements of the AccountAttributeName enum +func AccountAttributeName_Values() []string { + return []string{ + AccountAttributeNameSupportedPlatforms, + AccountAttributeNameDefaultVpc, + } +} + const ( // ActivityStatusError is a ActivityStatus enum value ActivityStatusError = "error" @@ -108860,6 +114835,16 @@ const ( ActivityStatusFulfilled = "fulfilled" ) +// ActivityStatus_Values returns all elements of the ActivityStatus enum +func ActivityStatus_Values() []string { + return []string{ + ActivityStatusError, + ActivityStatusPendingFulfillment, + ActivityStatusPendingTermination, + ActivityStatusFulfilled, + } +} + const ( // AffinityDefault is a Affinity enum value AffinityDefault = "default" @@ -108868,6 +114853,14 @@ const ( AffinityHost = "host" ) +// Affinity_Values returns all elements of the Affinity enum +func Affinity_Values() []string { + return []string{ + AffinityDefault, + AffinityHost, + } +} + const ( // AllocationStateAvailable is a AllocationState enum value AllocationStateAvailable = "available" @@ -108888,6 +114881,18 @@ const ( AllocationStatePending = "pending" ) +// AllocationState_Values returns all elements of the AllocationState enum +func AllocationState_Values() []string { + return []string{ + AllocationStateAvailable, + AllocationStateUnderAssessment, + AllocationStatePermanentFailure, + AllocationStateReleased, + AllocationStateReleasedPermanentFailure, + AllocationStatePending, + } +} + const ( // AllocationStrategyLowestPrice is a AllocationStrategy enum value AllocationStrategyLowestPrice = "lowestPrice" @@ -108899,6 +114904,15 @@ const ( AllocationStrategyCapacityOptimized = "capacityOptimized" ) +// AllocationStrategy_Values returns all elements of the AllocationStrategy enum +func AllocationStrategy_Values() []string { + return []string{ + AllocationStrategyLowestPrice, + AllocationStrategyDiversified, + AllocationStrategyCapacityOptimized, + } +} + const ( // AllowsMultipleInstanceTypesOn is a AllowsMultipleInstanceTypes enum value AllowsMultipleInstanceTypesOn = "on" @@ -108907,6 +114921,14 @@ const ( AllowsMultipleInstanceTypesOff = "off" ) +// AllowsMultipleInstanceTypes_Values returns all elements of the AllowsMultipleInstanceTypes enum +func AllowsMultipleInstanceTypes_Values() []string { + return []string{ + AllowsMultipleInstanceTypesOn, + AllowsMultipleInstanceTypesOff, + } +} + const ( // ArchitectureTypeI386 is a ArchitectureType enum value ArchitectureTypeI386 = "i386" @@ -108918,6 +114940,15 @@ const ( ArchitectureTypeArm64 = "arm64" ) +// ArchitectureType_Values returns all elements of the ArchitectureType enum +func ArchitectureType_Values() []string { + return []string{ + ArchitectureTypeI386, + ArchitectureTypeX8664, + ArchitectureTypeArm64, + } +} + const ( // ArchitectureValuesI386 is a ArchitectureValues enum value ArchitectureValuesI386 = "i386" @@ -108929,11 +114960,27 @@ const ( ArchitectureValuesArm64 = "arm64" ) +// ArchitectureValues_Values returns all elements of the ArchitectureValues enum +func ArchitectureValues_Values() []string { + return []string{ + ArchitectureValuesI386, + ArchitectureValuesX8664, + ArchitectureValuesArm64, + } +} + const ( // AssociatedNetworkTypeVpc is a AssociatedNetworkType enum value AssociatedNetworkTypeVpc = "vpc" ) +// AssociatedNetworkType_Values returns all elements of the AssociatedNetworkType enum +func AssociatedNetworkType_Values() []string { + return []string{ + AssociatedNetworkTypeVpc, + } +} + const ( // AssociationStatusCodeAssociating is a AssociationStatusCode enum value AssociationStatusCodeAssociating = "associating" @@ -108951,6 +114998,17 @@ const ( AssociationStatusCodeDisassociated = "disassociated" ) +// AssociationStatusCode_Values returns all elements of the AssociationStatusCode enum +func AssociationStatusCode_Values() []string { + return []string{ + AssociationStatusCodeAssociating, + AssociationStatusCodeAssociated, + AssociationStatusCodeAssociationFailed, + AssociationStatusCodeDisassociating, + AssociationStatusCodeDisassociated, + } +} + const ( // AttachmentStatusAttaching is a AttachmentStatus enum value AttachmentStatusAttaching = "attaching" @@ -108965,6 +115023,16 @@ const ( AttachmentStatusDetached = "detached" ) +// AttachmentStatus_Values returns all elements of the AttachmentStatus enum +func AttachmentStatus_Values() []string { + return []string{ + AttachmentStatusAttaching, + AttachmentStatusAttached, + AttachmentStatusDetaching, + AttachmentStatusDetached, + } +} + const ( // AutoAcceptSharedAttachmentsValueEnable is a AutoAcceptSharedAttachmentsValue enum value AutoAcceptSharedAttachmentsValueEnable = "enable" @@ -108973,6 +115041,14 @@ const ( AutoAcceptSharedAttachmentsValueDisable = "disable" ) +// AutoAcceptSharedAttachmentsValue_Values returns all elements of the AutoAcceptSharedAttachmentsValue enum +func AutoAcceptSharedAttachmentsValue_Values() []string { + return []string{ + AutoAcceptSharedAttachmentsValueEnable, + AutoAcceptSharedAttachmentsValueDisable, + } +} + const ( // AutoPlacementOn is a AutoPlacement enum value AutoPlacementOn = "on" @@ -108981,6 +115057,14 @@ const ( AutoPlacementOff = "off" ) +// AutoPlacement_Values returns all elements of the AutoPlacement enum +func AutoPlacement_Values() []string { + return []string{ + AutoPlacementOn, + AutoPlacementOff, + } +} + const ( // AvailabilityZoneOptInStatusOptInNotRequired is a AvailabilityZoneOptInStatus enum value AvailabilityZoneOptInStatusOptInNotRequired = "opt-in-not-required" @@ -108992,6 +115076,15 @@ const ( AvailabilityZoneOptInStatusNotOptedIn = "not-opted-in" ) +// AvailabilityZoneOptInStatus_Values returns all elements of the AvailabilityZoneOptInStatus enum +func AvailabilityZoneOptInStatus_Values() []string { + return []string{ + AvailabilityZoneOptInStatusOptInNotRequired, + AvailabilityZoneOptInStatusOptedIn, + AvailabilityZoneOptInStatusNotOptedIn, + } +} + const ( // AvailabilityZoneStateAvailable is a AvailabilityZoneState enum value AvailabilityZoneStateAvailable = "available" @@ -109006,6 +115099,16 @@ const ( AvailabilityZoneStateUnavailable = "unavailable" ) +// AvailabilityZoneState_Values returns all elements of the AvailabilityZoneState enum +func AvailabilityZoneState_Values() []string { + return []string{ + AvailabilityZoneStateAvailable, + AvailabilityZoneStateInformation, + AvailabilityZoneStateImpaired, + AvailabilityZoneStateUnavailable, + } +} + const ( // BatchStateSubmitted is a BatchState enum value BatchStateSubmitted = "submitted" @@ -109029,6 +115132,19 @@ const ( BatchStateModifying = "modifying" ) +// BatchState_Values returns all elements of the BatchState enum +func BatchState_Values() []string { + return []string{ + BatchStateSubmitted, + BatchStateActive, + BatchStateCancelled, + BatchStateFailed, + BatchStateCancelledRunning, + BatchStateCancelledTerminating, + BatchStateModifying, + } +} + const ( // BundleTaskStatePending is a BundleTaskState enum value BundleTaskStatePending = "pending" @@ -109052,6 +115168,19 @@ const ( BundleTaskStateFailed = "failed" ) +// BundleTaskState_Values returns all elements of the BundleTaskState enum +func BundleTaskState_Values() []string { + return []string{ + BundleTaskStatePending, + BundleTaskStateWaitingForShutdown, + BundleTaskStateBundling, + BundleTaskStateStoring, + BundleTaskStateCancelling, + BundleTaskStateComplete, + BundleTaskStateFailed, + } +} + const ( // ByoipCidrStateAdvertised is a ByoipCidrState enum value ByoipCidrStateAdvertised = "advertised" @@ -109078,6 +115207,20 @@ const ( ByoipCidrStateProvisionedNotPubliclyAdvertisable = "provisioned-not-publicly-advertisable" ) +// ByoipCidrState_Values returns all elements of the ByoipCidrState enum +func ByoipCidrState_Values() []string { + return []string{ + ByoipCidrStateAdvertised, + ByoipCidrStateDeprovisioned, + ByoipCidrStateFailedDeprovision, + ByoipCidrStateFailedProvision, + ByoipCidrStatePendingDeprovision, + ByoipCidrStatePendingProvision, + ByoipCidrStateProvisioned, + ByoipCidrStateProvisionedNotPubliclyAdvertisable, + } +} + const ( // CancelBatchErrorCodeFleetRequestIdDoesNotExist is a CancelBatchErrorCode enum value CancelBatchErrorCodeFleetRequestIdDoesNotExist = "fleetRequestIdDoesNotExist" @@ -109092,6 +115235,16 @@ const ( CancelBatchErrorCodeUnexpectedError = "unexpectedError" ) +// CancelBatchErrorCode_Values returns all elements of the CancelBatchErrorCode enum +func CancelBatchErrorCode_Values() []string { + return []string{ + CancelBatchErrorCodeFleetRequestIdDoesNotExist, + CancelBatchErrorCodeFleetRequestIdMalformed, + CancelBatchErrorCodeFleetRequestNotInCancellableState, + CancelBatchErrorCodeUnexpectedError, + } +} + const ( // CancelSpotInstanceRequestStateActive is a CancelSpotInstanceRequestState enum value CancelSpotInstanceRequestStateActive = "active" @@ -109109,6 +115262,17 @@ const ( CancelSpotInstanceRequestStateCompleted = "completed" ) +// CancelSpotInstanceRequestState_Values returns all elements of the CancelSpotInstanceRequestState enum +func CancelSpotInstanceRequestState_Values() []string { + return []string{ + CancelSpotInstanceRequestStateActive, + CancelSpotInstanceRequestStateOpen, + CancelSpotInstanceRequestStateClosed, + CancelSpotInstanceRequestStateCancelled, + CancelSpotInstanceRequestStateCompleted, + } +} + const ( // CapacityReservationInstancePlatformLinuxUnix is a CapacityReservationInstancePlatform enum value CapacityReservationInstancePlatformLinuxUnix = "Linux/UNIX" @@ -109144,6 +115308,23 @@ const ( CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise = "Linux with SQL Server Enterprise" ) +// CapacityReservationInstancePlatform_Values returns all elements of the CapacityReservationInstancePlatform enum +func CapacityReservationInstancePlatform_Values() []string { + return []string{ + CapacityReservationInstancePlatformLinuxUnix, + CapacityReservationInstancePlatformRedHatEnterpriseLinux, + CapacityReservationInstancePlatformSuselinux, + CapacityReservationInstancePlatformWindows, + CapacityReservationInstancePlatformWindowswithSqlserver, + CapacityReservationInstancePlatformWindowswithSqlserverEnterprise, + CapacityReservationInstancePlatformWindowswithSqlserverStandard, + CapacityReservationInstancePlatformWindowswithSqlserverWeb, + CapacityReservationInstancePlatformLinuxwithSqlserverStandard, + CapacityReservationInstancePlatformLinuxwithSqlserverWeb, + CapacityReservationInstancePlatformLinuxwithSqlserverEnterprise, + } +} + const ( // CapacityReservationPreferenceOpen is a CapacityReservationPreference enum value CapacityReservationPreferenceOpen = "open" @@ -109152,6 +115333,14 @@ const ( CapacityReservationPreferenceNone = "none" ) +// CapacityReservationPreference_Values returns all elements of the CapacityReservationPreference enum +func CapacityReservationPreference_Values() []string { + return []string{ + CapacityReservationPreferenceOpen, + CapacityReservationPreferenceNone, + } +} + const ( // CapacityReservationStateActive is a CapacityReservationState enum value CapacityReservationStateActive = "active" @@ -109169,6 +115358,17 @@ const ( CapacityReservationStateFailed = "failed" ) +// CapacityReservationState_Values returns all elements of the CapacityReservationState enum +func CapacityReservationState_Values() []string { + return []string{ + CapacityReservationStateActive, + CapacityReservationStateExpired, + CapacityReservationStateCancelled, + CapacityReservationStatePending, + CapacityReservationStateFailed, + } +} + const ( // CapacityReservationTenancyDefault is a CapacityReservationTenancy enum value CapacityReservationTenancyDefault = "default" @@ -109177,6 +115377,38 @@ const ( CapacityReservationTenancyDedicated = "dedicated" ) +// CapacityReservationTenancy_Values returns all elements of the CapacityReservationTenancy enum +func CapacityReservationTenancy_Values() []string { + return []string{ + CapacityReservationTenancyDefault, + CapacityReservationTenancyDedicated, + } +} + +const ( + // CarrierGatewayStatePending is a CarrierGatewayState enum value + CarrierGatewayStatePending = "pending" + + // CarrierGatewayStateAvailable is a CarrierGatewayState enum value + CarrierGatewayStateAvailable = "available" + + // CarrierGatewayStateDeleting is a CarrierGatewayState enum value + CarrierGatewayStateDeleting = "deleting" + + // CarrierGatewayStateDeleted is a CarrierGatewayState enum value + CarrierGatewayStateDeleted = "deleted" +) + +// CarrierGatewayState_Values returns all elements of the CarrierGatewayState enum +func CarrierGatewayState_Values() []string { + return []string{ + CarrierGatewayStatePending, + CarrierGatewayStateAvailable, + CarrierGatewayStateDeleting, + CarrierGatewayStateDeleted, + } +} + const ( // ClientCertificateRevocationListStatusCodePending is a ClientCertificateRevocationListStatusCode enum value ClientCertificateRevocationListStatusCodePending = "pending" @@ -109185,14 +115417,34 @@ const ( ClientCertificateRevocationListStatusCodeActive = "active" ) +// ClientCertificateRevocationListStatusCode_Values returns all elements of the ClientCertificateRevocationListStatusCode enum +func ClientCertificateRevocationListStatusCode_Values() []string { + return []string{ + ClientCertificateRevocationListStatusCodePending, + ClientCertificateRevocationListStatusCodeActive, + } +} + const ( // ClientVpnAuthenticationTypeCertificateAuthentication is a ClientVpnAuthenticationType enum value ClientVpnAuthenticationTypeCertificateAuthentication = "certificate-authentication" // ClientVpnAuthenticationTypeDirectoryServiceAuthentication is a ClientVpnAuthenticationType enum value ClientVpnAuthenticationTypeDirectoryServiceAuthentication = "directory-service-authentication" + + // ClientVpnAuthenticationTypeFederatedAuthentication is a ClientVpnAuthenticationType enum value + ClientVpnAuthenticationTypeFederatedAuthentication = "federated-authentication" ) +// ClientVpnAuthenticationType_Values returns all elements of the ClientVpnAuthenticationType enum +func ClientVpnAuthenticationType_Values() []string { + return []string{ + ClientVpnAuthenticationTypeCertificateAuthentication, + ClientVpnAuthenticationTypeDirectoryServiceAuthentication, + ClientVpnAuthenticationTypeFederatedAuthentication, + } +} + const ( // ClientVpnAuthorizationRuleStatusCodeAuthorizing is a ClientVpnAuthorizationRuleStatusCode enum value ClientVpnAuthorizationRuleStatusCodeAuthorizing = "authorizing" @@ -109207,6 +115459,16 @@ const ( ClientVpnAuthorizationRuleStatusCodeRevoking = "revoking" ) +// ClientVpnAuthorizationRuleStatusCode_Values returns all elements of the ClientVpnAuthorizationRuleStatusCode enum +func ClientVpnAuthorizationRuleStatusCode_Values() []string { + return []string{ + ClientVpnAuthorizationRuleStatusCodeAuthorizing, + ClientVpnAuthorizationRuleStatusCodeActive, + ClientVpnAuthorizationRuleStatusCodeFailed, + ClientVpnAuthorizationRuleStatusCodeRevoking, + } +} + const ( // ClientVpnConnectionStatusCodeActive is a ClientVpnConnectionStatusCode enum value ClientVpnConnectionStatusCodeActive = "active" @@ -109221,6 +115483,16 @@ const ( ClientVpnConnectionStatusCodeTerminated = "terminated" ) +// ClientVpnConnectionStatusCode_Values returns all elements of the ClientVpnConnectionStatusCode enum +func ClientVpnConnectionStatusCode_Values() []string { + return []string{ + ClientVpnConnectionStatusCodeActive, + ClientVpnConnectionStatusCodeFailedToTerminate, + ClientVpnConnectionStatusCodeTerminating, + ClientVpnConnectionStatusCodeTerminated, + } +} + const ( // ClientVpnEndpointStatusCodePendingAssociate is a ClientVpnEndpointStatusCode enum value ClientVpnEndpointStatusCodePendingAssociate = "pending-associate" @@ -109235,6 +115507,16 @@ const ( ClientVpnEndpointStatusCodeDeleted = "deleted" ) +// ClientVpnEndpointStatusCode_Values returns all elements of the ClientVpnEndpointStatusCode enum +func ClientVpnEndpointStatusCode_Values() []string { + return []string{ + ClientVpnEndpointStatusCodePendingAssociate, + ClientVpnEndpointStatusCodeAvailable, + ClientVpnEndpointStatusCodeDeleting, + ClientVpnEndpointStatusCodeDeleted, + } +} + const ( // ClientVpnRouteStatusCodeCreating is a ClientVpnRouteStatusCode enum value ClientVpnRouteStatusCodeCreating = "creating" @@ -109249,6 +115531,16 @@ const ( ClientVpnRouteStatusCodeDeleting = "deleting" ) +// ClientVpnRouteStatusCode_Values returns all elements of the ClientVpnRouteStatusCode enum +func ClientVpnRouteStatusCode_Values() []string { + return []string{ + ClientVpnRouteStatusCodeCreating, + ClientVpnRouteStatusCodeActive, + ClientVpnRouteStatusCodeFailed, + ClientVpnRouteStatusCodeDeleting, + } +} + const ( // ConnectionNotificationStateEnabled is a ConnectionNotificationState enum value ConnectionNotificationStateEnabled = "Enabled" @@ -109257,16 +115549,38 @@ const ( ConnectionNotificationStateDisabled = "Disabled" ) +// ConnectionNotificationState_Values returns all elements of the ConnectionNotificationState enum +func ConnectionNotificationState_Values() []string { + return []string{ + ConnectionNotificationStateEnabled, + ConnectionNotificationStateDisabled, + } +} + const ( // ConnectionNotificationTypeTopic is a ConnectionNotificationType enum value ConnectionNotificationTypeTopic = "Topic" ) +// ConnectionNotificationType_Values returns all elements of the ConnectionNotificationType enum +func ConnectionNotificationType_Values() []string { + return []string{ + ConnectionNotificationTypeTopic, + } +} + const ( // ContainerFormatOva is a ContainerFormat enum value ContainerFormatOva = "ova" ) +// ContainerFormat_Values returns all elements of the ContainerFormat enum +func ContainerFormat_Values() []string { + return []string{ + ContainerFormatOva, + } +} + const ( // ConversionTaskStateActive is a ConversionTaskState enum value ConversionTaskStateActive = "active" @@ -109281,16 +115595,40 @@ const ( ConversionTaskStateCompleted = "completed" ) +// ConversionTaskState_Values returns all elements of the ConversionTaskState enum +func ConversionTaskState_Values() []string { + return []string{ + ConversionTaskStateActive, + ConversionTaskStateCancelling, + ConversionTaskStateCancelled, + ConversionTaskStateCompleted, + } +} + const ( // CopyTagsFromSourceVolume is a CopyTagsFromSource enum value CopyTagsFromSourceVolume = "volume" ) +// CopyTagsFromSource_Values returns all elements of the CopyTagsFromSource enum +func CopyTagsFromSource_Values() []string { + return []string{ + CopyTagsFromSourceVolume, + } +} + const ( // CurrencyCodeValuesUsd is a CurrencyCodeValues enum value CurrencyCodeValuesUsd = "USD" ) +// CurrencyCodeValues_Values returns all elements of the CurrencyCodeValues enum +func CurrencyCodeValues_Values() []string { + return []string{ + CurrencyCodeValuesUsd, + } +} + const ( // DatafeedSubscriptionStateActive is a DatafeedSubscriptionState enum value DatafeedSubscriptionStateActive = "Active" @@ -109299,6 +115637,14 @@ const ( DatafeedSubscriptionStateInactive = "Inactive" ) +// DatafeedSubscriptionState_Values returns all elements of the DatafeedSubscriptionState enum +func DatafeedSubscriptionState_Values() []string { + return []string{ + DatafeedSubscriptionStateActive, + DatafeedSubscriptionStateInactive, + } +} + const ( // DefaultRouteTableAssociationValueEnable is a DefaultRouteTableAssociationValue enum value DefaultRouteTableAssociationValueEnable = "enable" @@ -109307,6 +115653,14 @@ const ( DefaultRouteTableAssociationValueDisable = "disable" ) +// DefaultRouteTableAssociationValue_Values returns all elements of the DefaultRouteTableAssociationValue enum +func DefaultRouteTableAssociationValue_Values() []string { + return []string{ + DefaultRouteTableAssociationValueEnable, + DefaultRouteTableAssociationValueDisable, + } +} + const ( // DefaultRouteTablePropagationValueEnable is a DefaultRouteTablePropagationValue enum value DefaultRouteTablePropagationValueEnable = "enable" @@ -109315,6 +115669,14 @@ const ( DefaultRouteTablePropagationValueDisable = "disable" ) +// DefaultRouteTablePropagationValue_Values returns all elements of the DefaultRouteTablePropagationValue enum +func DefaultRouteTablePropagationValue_Values() []string { + return []string{ + DefaultRouteTablePropagationValueEnable, + DefaultRouteTablePropagationValueDisable, + } +} + const ( // DefaultTargetCapacityTypeSpot is a DefaultTargetCapacityType enum value DefaultTargetCapacityTypeSpot = "spot" @@ -109323,6 +115685,14 @@ const ( DefaultTargetCapacityTypeOnDemand = "on-demand" ) +// DefaultTargetCapacityType_Values returns all elements of the DefaultTargetCapacityType enum +func DefaultTargetCapacityType_Values() []string { + return []string{ + DefaultTargetCapacityTypeSpot, + DefaultTargetCapacityTypeOnDemand, + } +} + const ( // DeleteFleetErrorCodeFleetIdDoesNotExist is a DeleteFleetErrorCode enum value DeleteFleetErrorCodeFleetIdDoesNotExist = "fleetIdDoesNotExist" @@ -109337,6 +115707,16 @@ const ( DeleteFleetErrorCodeUnexpectedError = "unexpectedError" ) +// DeleteFleetErrorCode_Values returns all elements of the DeleteFleetErrorCode enum +func DeleteFleetErrorCode_Values() []string { + return []string{ + DeleteFleetErrorCodeFleetIdDoesNotExist, + DeleteFleetErrorCodeFleetIdMalformed, + DeleteFleetErrorCodeFleetNotInDeletableState, + DeleteFleetErrorCodeUnexpectedError, + } +} + const ( // DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid is a DeleteQueuedReservedInstancesErrorCode enum value DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid = "reserved-instances-id-invalid" @@ -109348,6 +115728,15 @@ const ( DeleteQueuedReservedInstancesErrorCodeUnexpectedError = "unexpected-error" ) +// DeleteQueuedReservedInstancesErrorCode_Values returns all elements of the DeleteQueuedReservedInstancesErrorCode enum +func DeleteQueuedReservedInstancesErrorCode_Values() []string { + return []string{ + DeleteQueuedReservedInstancesErrorCodeReservedInstancesIdInvalid, + DeleteQueuedReservedInstancesErrorCodeReservedInstancesNotInQueuedState, + DeleteQueuedReservedInstancesErrorCodeUnexpectedError, + } +} + const ( // DeviceTypeEbs is a DeviceType enum value DeviceTypeEbs = "ebs" @@ -109356,6 +115745,14 @@ const ( DeviceTypeInstanceStore = "instance-store" ) +// DeviceType_Values returns all elements of the DeviceType enum +func DeviceType_Values() []string { + return []string{ + DeviceTypeEbs, + DeviceTypeInstanceStore, + } +} + const ( // DiskImageFormatVmdk is a DiskImageFormat enum value DiskImageFormatVmdk = "VMDK" @@ -109367,6 +115764,15 @@ const ( DiskImageFormatVhd = "VHD" ) +// DiskImageFormat_Values returns all elements of the DiskImageFormat enum +func DiskImageFormat_Values() []string { + return []string{ + DiskImageFormatVmdk, + DiskImageFormatRaw, + DiskImageFormatVhd, + } +} + const ( // DiskTypeHdd is a DiskType enum value DiskTypeHdd = "hdd" @@ -109375,6 +115781,14 @@ const ( DiskTypeSsd = "ssd" ) +// DiskType_Values returns all elements of the DiskType enum +func DiskType_Values() []string { + return []string{ + DiskTypeHdd, + DiskTypeSsd, + } +} + const ( // DnsNameStatePendingVerification is a DnsNameState enum value DnsNameStatePendingVerification = "pendingVerification" @@ -109386,6 +115800,15 @@ const ( DnsNameStateFailed = "failed" ) +// DnsNameState_Values returns all elements of the DnsNameState enum +func DnsNameState_Values() []string { + return []string{ + DnsNameStatePendingVerification, + DnsNameStateVerified, + DnsNameStateFailed, + } +} + const ( // DnsSupportValueEnable is a DnsSupportValue enum value DnsSupportValueEnable = "enable" @@ -109394,6 +115817,14 @@ const ( DnsSupportValueDisable = "disable" ) +// DnsSupportValue_Values returns all elements of the DnsSupportValue enum +func DnsSupportValue_Values() []string { + return []string{ + DnsSupportValueEnable, + DnsSupportValueDisable, + } +} + const ( // DomainTypeVpc is a DomainType enum value DomainTypeVpc = "vpc" @@ -109402,6 +115833,14 @@ const ( DomainTypeStandard = "standard" ) +// DomainType_Values returns all elements of the DomainType enum +func DomainType_Values() []string { + return []string{ + DomainTypeVpc, + DomainTypeStandard, + } +} + const ( // EbsEncryptionSupportUnsupported is a EbsEncryptionSupport enum value EbsEncryptionSupportUnsupported = "unsupported" @@ -109410,6 +115849,34 @@ const ( EbsEncryptionSupportSupported = "supported" ) +// EbsEncryptionSupport_Values returns all elements of the EbsEncryptionSupport enum +func EbsEncryptionSupport_Values() []string { + return []string{ + EbsEncryptionSupportUnsupported, + EbsEncryptionSupportSupported, + } +} + +const ( + // EbsNvmeSupportUnsupported is a EbsNvmeSupport enum value + EbsNvmeSupportUnsupported = "unsupported" + + // EbsNvmeSupportSupported is a EbsNvmeSupport enum value + EbsNvmeSupportSupported = "supported" + + // EbsNvmeSupportRequired is a EbsNvmeSupport enum value + EbsNvmeSupportRequired = "required" +) + +// EbsNvmeSupport_Values returns all elements of the EbsNvmeSupport enum +func EbsNvmeSupport_Values() []string { + return []string{ + EbsNvmeSupportUnsupported, + EbsNvmeSupportSupported, + EbsNvmeSupportRequired, + } +} + const ( // EbsOptimizedSupportUnsupported is a EbsOptimizedSupport enum value EbsOptimizedSupportUnsupported = "unsupported" @@ -109421,11 +115888,27 @@ const ( EbsOptimizedSupportDefault = "default" ) +// EbsOptimizedSupport_Values returns all elements of the EbsOptimizedSupport enum +func EbsOptimizedSupport_Values() []string { + return []string{ + EbsOptimizedSupportUnsupported, + EbsOptimizedSupportSupported, + EbsOptimizedSupportDefault, + } +} + const ( // ElasticGpuStateAttached is a ElasticGpuState enum value ElasticGpuStateAttached = "ATTACHED" ) +// ElasticGpuState_Values returns all elements of the ElasticGpuState enum +func ElasticGpuState_Values() []string { + return []string{ + ElasticGpuStateAttached, + } +} + const ( // ElasticGpuStatusOk is a ElasticGpuStatus enum value ElasticGpuStatusOk = "OK" @@ -109434,6 +115917,14 @@ const ( ElasticGpuStatusImpaired = "IMPAIRED" ) +// ElasticGpuStatus_Values returns all elements of the ElasticGpuStatus enum +func ElasticGpuStatus_Values() []string { + return []string{ + ElasticGpuStatusOk, + ElasticGpuStatusImpaired, + } +} + const ( // EnaSupportUnsupported is a EnaSupport enum value EnaSupportUnsupported = "unsupported" @@ -109445,6 +115936,15 @@ const ( EnaSupportRequired = "required" ) +// EnaSupport_Values returns all elements of the EnaSupport enum +func EnaSupport_Values() []string { + return []string{ + EnaSupportUnsupported, + EnaSupportSupported, + EnaSupportRequired, + } +} + const ( // EndDateTypeUnlimited is a EndDateType enum value EndDateTypeUnlimited = "unlimited" @@ -109453,6 +115953,14 @@ const ( EndDateTypeLimited = "limited" ) +// EndDateType_Values returns all elements of the EndDateType enum +func EndDateType_Values() []string { + return []string{ + EndDateTypeUnlimited, + EndDateTypeLimited, + } +} + const ( // EventCodeInstanceReboot is a EventCode enum value EventCodeInstanceReboot = "instance-reboot" @@ -109470,6 +115978,17 @@ const ( EventCodeInstanceStop = "instance-stop" ) +// EventCode_Values returns all elements of the EventCode enum +func EventCode_Values() []string { + return []string{ + EventCodeInstanceReboot, + EventCodeSystemReboot, + EventCodeSystemMaintenance, + EventCodeInstanceRetirement, + EventCodeInstanceStop, + } +} + const ( // EventTypeInstanceChange is a EventType enum value EventTypeInstanceChange = "instanceChange" @@ -109484,6 +116003,16 @@ const ( EventTypeInformation = "information" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeInstanceChange, + EventTypeFleetRequestChange, + EventTypeError, + EventTypeInformation, + } +} + const ( // ExcessCapacityTerminationPolicyNoTermination is a ExcessCapacityTerminationPolicy enum value ExcessCapacityTerminationPolicyNoTermination = "noTermination" @@ -109492,6 +116021,14 @@ const ( ExcessCapacityTerminationPolicyDefault = "default" ) +// ExcessCapacityTerminationPolicy_Values returns all elements of the ExcessCapacityTerminationPolicy enum +func ExcessCapacityTerminationPolicy_Values() []string { + return []string{ + ExcessCapacityTerminationPolicyNoTermination, + ExcessCapacityTerminationPolicyDefault, + } +} + const ( // ExportEnvironmentCitrix is a ExportEnvironment enum value ExportEnvironmentCitrix = "citrix" @@ -109503,6 +116040,15 @@ const ( ExportEnvironmentMicrosoft = "microsoft" ) +// ExportEnvironment_Values returns all elements of the ExportEnvironment enum +func ExportEnvironment_Values() []string { + return []string{ + ExportEnvironmentCitrix, + ExportEnvironmentVmware, + ExportEnvironmentMicrosoft, + } +} + const ( // ExportTaskStateActive is a ExportTaskState enum value ExportTaskStateActive = "active" @@ -109517,6 +116063,16 @@ const ( ExportTaskStateCompleted = "completed" ) +// ExportTaskState_Values returns all elements of the ExportTaskState enum +func ExportTaskState_Values() []string { + return []string{ + ExportTaskStateActive, + ExportTaskStateCancelling, + ExportTaskStateCancelled, + ExportTaskStateCompleted, + } +} + const ( // FastSnapshotRestoreStateCodeEnabling is a FastSnapshotRestoreStateCode enum value FastSnapshotRestoreStateCodeEnabling = "enabling" @@ -109534,6 +116090,17 @@ const ( FastSnapshotRestoreStateCodeDisabled = "disabled" ) +// FastSnapshotRestoreStateCode_Values returns all elements of the FastSnapshotRestoreStateCode enum +func FastSnapshotRestoreStateCode_Values() []string { + return []string{ + FastSnapshotRestoreStateCodeEnabling, + FastSnapshotRestoreStateCodeOptimizing, + FastSnapshotRestoreStateCodeEnabled, + FastSnapshotRestoreStateCodeDisabling, + FastSnapshotRestoreStateCodeDisabled, + } +} + const ( // FleetActivityStatusError is a FleetActivityStatus enum value FleetActivityStatusError = "error" @@ -109548,11 +116115,28 @@ const ( FleetActivityStatusFulfilled = "fulfilled" ) +// FleetActivityStatus_Values returns all elements of the FleetActivityStatus enum +func FleetActivityStatus_Values() []string { + return []string{ + FleetActivityStatusError, + FleetActivityStatusPendingFulfillment, + FleetActivityStatusPendingTermination, + FleetActivityStatusFulfilled, + } +} + const ( // FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst is a FleetCapacityReservationUsageStrategy enum value FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst = "use-capacity-reservations-first" ) +// FleetCapacityReservationUsageStrategy_Values returns all elements of the FleetCapacityReservationUsageStrategy enum +func FleetCapacityReservationUsageStrategy_Values() []string { + return []string{ + FleetCapacityReservationUsageStrategyUseCapacityReservationsFirst, + } +} + const ( // FleetEventTypeInstanceChange is a FleetEventType enum value FleetEventTypeInstanceChange = "instance-change" @@ -109564,6 +116148,15 @@ const ( FleetEventTypeServiceError = "service-error" ) +// FleetEventType_Values returns all elements of the FleetEventType enum +func FleetEventType_Values() []string { + return []string{ + FleetEventTypeInstanceChange, + FleetEventTypeFleetChange, + FleetEventTypeServiceError, + } +} + const ( // FleetExcessCapacityTerminationPolicyNoTermination is a FleetExcessCapacityTerminationPolicy enum value FleetExcessCapacityTerminationPolicyNoTermination = "no-termination" @@ -109572,6 +116165,14 @@ const ( FleetExcessCapacityTerminationPolicyTermination = "termination" ) +// FleetExcessCapacityTerminationPolicy_Values returns all elements of the FleetExcessCapacityTerminationPolicy enum +func FleetExcessCapacityTerminationPolicy_Values() []string { + return []string{ + FleetExcessCapacityTerminationPolicyNoTermination, + FleetExcessCapacityTerminationPolicyTermination, + } +} + const ( // FleetOnDemandAllocationStrategyLowestPrice is a FleetOnDemandAllocationStrategy enum value FleetOnDemandAllocationStrategyLowestPrice = "lowest-price" @@ -109580,6 +116181,14 @@ const ( FleetOnDemandAllocationStrategyPrioritized = "prioritized" ) +// FleetOnDemandAllocationStrategy_Values returns all elements of the FleetOnDemandAllocationStrategy enum +func FleetOnDemandAllocationStrategy_Values() []string { + return []string{ + FleetOnDemandAllocationStrategyLowestPrice, + FleetOnDemandAllocationStrategyPrioritized, + } +} + const ( // FleetStateCodeSubmitted is a FleetStateCode enum value FleetStateCodeSubmitted = "submitted" @@ -109603,6 +116212,19 @@ const ( FleetStateCodeModifying = "modifying" ) +// FleetStateCode_Values returns all elements of the FleetStateCode enum +func FleetStateCode_Values() []string { + return []string{ + FleetStateCodeSubmitted, + FleetStateCodeActive, + FleetStateCodeDeleted, + FleetStateCodeFailed, + FleetStateCodeDeletedRunning, + FleetStateCodeDeletedTerminating, + FleetStateCodeModifying, + } +} + const ( // FleetTypeRequest is a FleetType enum value FleetTypeRequest = "request" @@ -109614,6 +116236,15 @@ const ( FleetTypeInstant = "instant" ) +// FleetType_Values returns all elements of the FleetType enum +func FleetType_Values() []string { + return []string{ + FleetTypeRequest, + FleetTypeMaintain, + FleetTypeInstant, + } +} + const ( // FlowLogsResourceTypeVpc is a FlowLogsResourceType enum value FlowLogsResourceTypeVpc = "VPC" @@ -109625,6 +116256,15 @@ const ( FlowLogsResourceTypeNetworkInterface = "NetworkInterface" ) +// FlowLogsResourceType_Values returns all elements of the FlowLogsResourceType enum +func FlowLogsResourceType_Values() []string { + return []string{ + FlowLogsResourceTypeVpc, + FlowLogsResourceTypeSubnet, + FlowLogsResourceTypeNetworkInterface, + } +} + const ( // FpgaImageAttributeNameDescription is a FpgaImageAttributeName enum value FpgaImageAttributeNameDescription = "description" @@ -109639,6 +116279,16 @@ const ( FpgaImageAttributeNameProductCodes = "productCodes" ) +// FpgaImageAttributeName_Values returns all elements of the FpgaImageAttributeName enum +func FpgaImageAttributeName_Values() []string { + return []string{ + FpgaImageAttributeNameDescription, + FpgaImageAttributeNameName, + FpgaImageAttributeNameLoadPermission, + FpgaImageAttributeNameProductCodes, + } +} + const ( // FpgaImageStateCodePending is a FpgaImageStateCode enum value FpgaImageStateCodePending = "pending" @@ -109653,11 +116303,28 @@ const ( FpgaImageStateCodeUnavailable = "unavailable" ) +// FpgaImageStateCode_Values returns all elements of the FpgaImageStateCode enum +func FpgaImageStateCode_Values() []string { + return []string{ + FpgaImageStateCodePending, + FpgaImageStateCodeFailed, + FpgaImageStateCodeAvailable, + FpgaImageStateCodeUnavailable, + } +} + const ( // GatewayTypeIpsec1 is a GatewayType enum value GatewayTypeIpsec1 = "ipsec.1" ) +// GatewayType_Values returns all elements of the GatewayType enum +func GatewayType_Values() []string { + return []string{ + GatewayTypeIpsec1, + } +} + const ( // HostRecoveryOn is a HostRecovery enum value HostRecoveryOn = "on" @@ -109666,6 +116333,14 @@ const ( HostRecoveryOff = "off" ) +// HostRecovery_Values returns all elements of the HostRecovery enum +func HostRecovery_Values() []string { + return []string{ + HostRecoveryOn, + HostRecoveryOff, + } +} + const ( // HostTenancyDedicated is a HostTenancy enum value HostTenancyDedicated = "dedicated" @@ -109674,6 +116349,14 @@ const ( HostTenancyHost = "host" ) +// HostTenancy_Values returns all elements of the HostTenancy enum +func HostTenancy_Values() []string { + return []string{ + HostTenancyDedicated, + HostTenancyHost, + } +} + const ( // HttpTokensStateOptional is a HttpTokensState enum value HttpTokensStateOptional = "optional" @@ -109682,6 +116365,14 @@ const ( HttpTokensStateRequired = "required" ) +// HttpTokensState_Values returns all elements of the HttpTokensState enum +func HttpTokensState_Values() []string { + return []string{ + HttpTokensStateOptional, + HttpTokensStateRequired, + } +} + const ( // HypervisorTypeOvm is a HypervisorType enum value HypervisorTypeOvm = "ovm" @@ -109690,6 +116381,14 @@ const ( HypervisorTypeXen = "xen" ) +// HypervisorType_Values returns all elements of the HypervisorType enum +func HypervisorType_Values() []string { + return []string{ + HypervisorTypeOvm, + HypervisorTypeXen, + } +} + const ( // IamInstanceProfileAssociationStateAssociating is a IamInstanceProfileAssociationState enum value IamInstanceProfileAssociationStateAssociating = "associating" @@ -109704,6 +116403,16 @@ const ( IamInstanceProfileAssociationStateDisassociated = "disassociated" ) +// IamInstanceProfileAssociationState_Values returns all elements of the IamInstanceProfileAssociationState enum +func IamInstanceProfileAssociationState_Values() []string { + return []string{ + IamInstanceProfileAssociationStateAssociating, + IamInstanceProfileAssociationStateAssociated, + IamInstanceProfileAssociationStateDisassociating, + IamInstanceProfileAssociationStateDisassociated, + } +} + const ( // ImageAttributeNameDescription is a ImageAttributeName enum value ImageAttributeNameDescription = "description" @@ -109727,6 +116436,19 @@ const ( ImageAttributeNameSriovNetSupport = "sriovNetSupport" ) +// ImageAttributeName_Values returns all elements of the ImageAttributeName enum +func ImageAttributeName_Values() []string { + return []string{ + ImageAttributeNameDescription, + ImageAttributeNameKernel, + ImageAttributeNameRamdisk, + ImageAttributeNameLaunchPermission, + ImageAttributeNameProductCodes, + ImageAttributeNameBlockDeviceMapping, + ImageAttributeNameSriovNetSupport, + } +} + const ( // ImageStatePending is a ImageState enum value ImageStatePending = "pending" @@ -109750,6 +116472,19 @@ const ( ImageStateError = "error" ) +// ImageState_Values returns all elements of the ImageState enum +func ImageState_Values() []string { + return []string{ + ImageStatePending, + ImageStateAvailable, + ImageStateInvalid, + ImageStateDeregistered, + ImageStateTransient, + ImageStateFailed, + ImageStateError, + } +} + const ( // ImageTypeValuesMachine is a ImageTypeValues enum value ImageTypeValuesMachine = "machine" @@ -109761,6 +116496,15 @@ const ( ImageTypeValuesRamdisk = "ramdisk" ) +// ImageTypeValues_Values returns all elements of the ImageTypeValues enum +func ImageTypeValues_Values() []string { + return []string{ + ImageTypeValuesMachine, + ImageTypeValuesKernel, + ImageTypeValuesRamdisk, + } +} + const ( // InstanceAttributeNameInstanceType is a InstanceAttributeName enum value InstanceAttributeNameInstanceType = "instanceType" @@ -109805,6 +116549,26 @@ const ( InstanceAttributeNameEnaSupport = "enaSupport" ) +// InstanceAttributeName_Values returns all elements of the InstanceAttributeName enum +func InstanceAttributeName_Values() []string { + return []string{ + InstanceAttributeNameInstanceType, + InstanceAttributeNameKernel, + InstanceAttributeNameRamdisk, + InstanceAttributeNameUserData, + InstanceAttributeNameDisableApiTermination, + InstanceAttributeNameInstanceInitiatedShutdownBehavior, + InstanceAttributeNameRootDeviceName, + InstanceAttributeNameBlockDeviceMapping, + InstanceAttributeNameProductCodes, + InstanceAttributeNameSourceDestCheck, + InstanceAttributeNameGroupSet, + InstanceAttributeNameEbsOptimized, + InstanceAttributeNameSriovNetSupport, + InstanceAttributeNameEnaSupport, + } +} + const ( // InstanceHealthStatusHealthy is a InstanceHealthStatus enum value InstanceHealthStatusHealthy = "healthy" @@ -109813,6 +116577,14 @@ const ( InstanceHealthStatusUnhealthy = "unhealthy" ) +// InstanceHealthStatus_Values returns all elements of the InstanceHealthStatus enum +func InstanceHealthStatus_Values() []string { + return []string{ + InstanceHealthStatusHealthy, + InstanceHealthStatusUnhealthy, + } +} + const ( // InstanceInterruptionBehaviorHibernate is a InstanceInterruptionBehavior enum value InstanceInterruptionBehaviorHibernate = "hibernate" @@ -109824,6 +116596,15 @@ const ( InstanceInterruptionBehaviorTerminate = "terminate" ) +// InstanceInterruptionBehavior_Values returns all elements of the InstanceInterruptionBehavior enum +func InstanceInterruptionBehavior_Values() []string { + return []string{ + InstanceInterruptionBehaviorHibernate, + InstanceInterruptionBehaviorStop, + InstanceInterruptionBehaviorTerminate, + } +} + const ( // InstanceLifecycleSpot is a InstanceLifecycle enum value InstanceLifecycleSpot = "spot" @@ -109832,6 +116613,14 @@ const ( InstanceLifecycleOnDemand = "on-demand" ) +// InstanceLifecycle_Values returns all elements of the InstanceLifecycle enum +func InstanceLifecycle_Values() []string { + return []string{ + InstanceLifecycleSpot, + InstanceLifecycleOnDemand, + } +} + const ( // InstanceLifecycleTypeSpot is a InstanceLifecycleType enum value InstanceLifecycleTypeSpot = "spot" @@ -109840,6 +116629,14 @@ const ( InstanceLifecycleTypeScheduled = "scheduled" ) +// InstanceLifecycleType_Values returns all elements of the InstanceLifecycleType enum +func InstanceLifecycleType_Values() []string { + return []string{ + InstanceLifecycleTypeSpot, + InstanceLifecycleTypeScheduled, + } +} + const ( // InstanceMatchCriteriaOpen is a InstanceMatchCriteria enum value InstanceMatchCriteriaOpen = "open" @@ -109848,6 +116645,14 @@ const ( InstanceMatchCriteriaTargeted = "targeted" ) +// InstanceMatchCriteria_Values returns all elements of the InstanceMatchCriteria enum +func InstanceMatchCriteria_Values() []string { + return []string{ + InstanceMatchCriteriaOpen, + InstanceMatchCriteriaTargeted, + } +} + const ( // InstanceMetadataEndpointStateDisabled is a InstanceMetadataEndpointState enum value InstanceMetadataEndpointStateDisabled = "disabled" @@ -109856,6 +116661,14 @@ const ( InstanceMetadataEndpointStateEnabled = "enabled" ) +// InstanceMetadataEndpointState_Values returns all elements of the InstanceMetadataEndpointState enum +func InstanceMetadataEndpointState_Values() []string { + return []string{ + InstanceMetadataEndpointStateDisabled, + InstanceMetadataEndpointStateEnabled, + } +} + const ( // InstanceMetadataOptionsStatePending is a InstanceMetadataOptionsState enum value InstanceMetadataOptionsStatePending = "pending" @@ -109864,6 +116677,14 @@ const ( InstanceMetadataOptionsStateApplied = "applied" ) +// InstanceMetadataOptionsState_Values returns all elements of the InstanceMetadataOptionsState enum +func InstanceMetadataOptionsState_Values() []string { + return []string{ + InstanceMetadataOptionsStatePending, + InstanceMetadataOptionsStateApplied, + } +} + const ( // InstanceStateNamePending is a InstanceStateName enum value InstanceStateNamePending = "pending" @@ -109884,6 +116705,18 @@ const ( InstanceStateNameStopped = "stopped" ) +// InstanceStateName_Values returns all elements of the InstanceStateName enum +func InstanceStateName_Values() []string { + return []string{ + InstanceStateNamePending, + InstanceStateNameRunning, + InstanceStateNameShuttingDown, + InstanceStateNameTerminated, + InstanceStateNameStopping, + InstanceStateNameStopped, + } +} + const ( // InstanceTypeT1Micro is a InstanceType enum value InstanceTypeT1Micro = "t1.micro" @@ -109951,6 +116784,27 @@ const ( // InstanceTypeT3a2xlarge is a InstanceType enum value InstanceTypeT3a2xlarge = "t3a.2xlarge" + // InstanceTypeT4gNano is a InstanceType enum value + InstanceTypeT4gNano = "t4g.nano" + + // InstanceTypeT4gMicro is a InstanceType enum value + InstanceTypeT4gMicro = "t4g.micro" + + // InstanceTypeT4gSmall is a InstanceType enum value + InstanceTypeT4gSmall = "t4g.small" + + // InstanceTypeT4gMedium is a InstanceType enum value + InstanceTypeT4gMedium = "t4g.medium" + + // InstanceTypeT4gLarge is a InstanceType enum value + InstanceTypeT4gLarge = "t4g.large" + + // InstanceTypeT4gXlarge is a InstanceType enum value + InstanceTypeT4gXlarge = "t4g.xlarge" + + // InstanceTypeT4g2xlarge is a InstanceType enum value + InstanceTypeT4g2xlarge = "t4g.2xlarge" + // InstanceTypeM1Small is a InstanceType enum value InstanceTypeM1Small = "m1.small" @@ -110140,6 +116994,60 @@ const ( // InstanceTypeR5ad24xlarge is a InstanceType enum value InstanceTypeR5ad24xlarge = "r5ad.24xlarge" + // InstanceTypeR6gMetal is a InstanceType enum value + InstanceTypeR6gMetal = "r6g.metal" + + // InstanceTypeR6gMedium is a InstanceType enum value + InstanceTypeR6gMedium = "r6g.medium" + + // InstanceTypeR6gLarge is a InstanceType enum value + InstanceTypeR6gLarge = "r6g.large" + + // InstanceTypeR6gXlarge is a InstanceType enum value + InstanceTypeR6gXlarge = "r6g.xlarge" + + // InstanceTypeR6g2xlarge is a InstanceType enum value + InstanceTypeR6g2xlarge = "r6g.2xlarge" + + // InstanceTypeR6g4xlarge is a InstanceType enum value + InstanceTypeR6g4xlarge = "r6g.4xlarge" + + // InstanceTypeR6g8xlarge is a InstanceType enum value + InstanceTypeR6g8xlarge = "r6g.8xlarge" + + // InstanceTypeR6g12xlarge is a InstanceType enum value + InstanceTypeR6g12xlarge = "r6g.12xlarge" + + // InstanceTypeR6g16xlarge is a InstanceType enum value + InstanceTypeR6g16xlarge = "r6g.16xlarge" + + // InstanceTypeR6gdMetal is a InstanceType enum value + InstanceTypeR6gdMetal = "r6gd.metal" + + // InstanceTypeR6gdMedium is a InstanceType enum value + InstanceTypeR6gdMedium = "r6gd.medium" + + // InstanceTypeR6gdLarge is a InstanceType enum value + InstanceTypeR6gdLarge = "r6gd.large" + + // InstanceTypeR6gdXlarge is a InstanceType enum value + InstanceTypeR6gdXlarge = "r6gd.xlarge" + + // InstanceTypeR6gd2xlarge is a InstanceType enum value + InstanceTypeR6gd2xlarge = "r6gd.2xlarge" + + // InstanceTypeR6gd4xlarge is a InstanceType enum value + InstanceTypeR6gd4xlarge = "r6gd.4xlarge" + + // InstanceTypeR6gd8xlarge is a InstanceType enum value + InstanceTypeR6gd8xlarge = "r6gd.8xlarge" + + // InstanceTypeR6gd12xlarge is a InstanceType enum value + InstanceTypeR6gd12xlarge = "r6gd.12xlarge" + + // InstanceTypeR6gd16xlarge is a InstanceType enum value + InstanceTypeR6gd16xlarge = "r6gd.16xlarge" + // InstanceTypeX116xlarge is a InstanceType enum value InstanceTypeX116xlarge = "x1.16xlarge" @@ -110290,6 +117198,54 @@ const ( // InstanceTypeC5Metal is a InstanceType enum value InstanceTypeC5Metal = "c5.metal" + // InstanceTypeC5aLarge is a InstanceType enum value + InstanceTypeC5aLarge = "c5a.large" + + // InstanceTypeC5aXlarge is a InstanceType enum value + InstanceTypeC5aXlarge = "c5a.xlarge" + + // InstanceTypeC5a2xlarge is a InstanceType enum value + InstanceTypeC5a2xlarge = "c5a.2xlarge" + + // InstanceTypeC5a4xlarge is a InstanceType enum value + InstanceTypeC5a4xlarge = "c5a.4xlarge" + + // InstanceTypeC5a8xlarge is a InstanceType enum value + InstanceTypeC5a8xlarge = "c5a.8xlarge" + + // InstanceTypeC5a12xlarge is a InstanceType enum value + InstanceTypeC5a12xlarge = "c5a.12xlarge" + + // InstanceTypeC5a16xlarge is a InstanceType enum value + InstanceTypeC5a16xlarge = "c5a.16xlarge" + + // InstanceTypeC5a24xlarge is a InstanceType enum value + InstanceTypeC5a24xlarge = "c5a.24xlarge" + + // InstanceTypeC5adLarge is a InstanceType enum value + InstanceTypeC5adLarge = "c5ad.large" + + // InstanceTypeC5adXlarge is a InstanceType enum value + InstanceTypeC5adXlarge = "c5ad.xlarge" + + // InstanceTypeC5ad2xlarge is a InstanceType enum value + InstanceTypeC5ad2xlarge = "c5ad.2xlarge" + + // InstanceTypeC5ad4xlarge is a InstanceType enum value + InstanceTypeC5ad4xlarge = "c5ad.4xlarge" + + // InstanceTypeC5ad8xlarge is a InstanceType enum value + InstanceTypeC5ad8xlarge = "c5ad.8xlarge" + + // InstanceTypeC5ad12xlarge is a InstanceType enum value + InstanceTypeC5ad12xlarge = "c5ad.12xlarge" + + // InstanceTypeC5ad16xlarge is a InstanceType enum value + InstanceTypeC5ad16xlarge = "c5ad.16xlarge" + + // InstanceTypeC5ad24xlarge is a InstanceType enum value + InstanceTypeC5ad24xlarge = "c5ad.24xlarge" + // InstanceTypeC5dLarge is a InstanceType enum value InstanceTypeC5dLarge = "c5d.large" @@ -110335,6 +117291,60 @@ const ( // InstanceTypeC5n18xlarge is a InstanceType enum value InstanceTypeC5n18xlarge = "c5n.18xlarge" + // InstanceTypeC6gMetal is a InstanceType enum value + InstanceTypeC6gMetal = "c6g.metal" + + // InstanceTypeC6gMedium is a InstanceType enum value + InstanceTypeC6gMedium = "c6g.medium" + + // InstanceTypeC6gLarge is a InstanceType enum value + InstanceTypeC6gLarge = "c6g.large" + + // InstanceTypeC6gXlarge is a InstanceType enum value + InstanceTypeC6gXlarge = "c6g.xlarge" + + // InstanceTypeC6g2xlarge is a InstanceType enum value + InstanceTypeC6g2xlarge = "c6g.2xlarge" + + // InstanceTypeC6g4xlarge is a InstanceType enum value + InstanceTypeC6g4xlarge = "c6g.4xlarge" + + // InstanceTypeC6g8xlarge is a InstanceType enum value + InstanceTypeC6g8xlarge = "c6g.8xlarge" + + // InstanceTypeC6g12xlarge is a InstanceType enum value + InstanceTypeC6g12xlarge = "c6g.12xlarge" + + // InstanceTypeC6g16xlarge is a InstanceType enum value + InstanceTypeC6g16xlarge = "c6g.16xlarge" + + // InstanceTypeC6gdMetal is a InstanceType enum value + InstanceTypeC6gdMetal = "c6gd.metal" + + // InstanceTypeC6gdMedium is a InstanceType enum value + InstanceTypeC6gdMedium = "c6gd.medium" + + // InstanceTypeC6gdLarge is a InstanceType enum value + InstanceTypeC6gdLarge = "c6gd.large" + + // InstanceTypeC6gdXlarge is a InstanceType enum value + InstanceTypeC6gdXlarge = "c6gd.xlarge" + + // InstanceTypeC6gd2xlarge is a InstanceType enum value + InstanceTypeC6gd2xlarge = "c6gd.2xlarge" + + // InstanceTypeC6gd4xlarge is a InstanceType enum value + InstanceTypeC6gd4xlarge = "c6gd.4xlarge" + + // InstanceTypeC6gd8xlarge is a InstanceType enum value + InstanceTypeC6gd8xlarge = "c6gd.8xlarge" + + // InstanceTypeC6gd12xlarge is a InstanceType enum value + InstanceTypeC6gd12xlarge = "c6gd.12xlarge" + + // InstanceTypeC6gd16xlarge is a InstanceType enum value + InstanceTypeC6gd16xlarge = "c6gd.16xlarge" + // InstanceTypeCc14xlarge is a InstanceType enum value InstanceTypeCc14xlarge = "cc1.4xlarge" @@ -110377,6 +117387,9 @@ const ( // InstanceTypeG4dn16xlarge is a InstanceType enum value InstanceTypeG4dn16xlarge = "g4dn.16xlarge" + // InstanceTypeG4dnMetal is a InstanceType enum value + InstanceTypeG4dnMetal = "g4dn.metal" + // InstanceTypeCg14xlarge is a InstanceType enum value InstanceTypeCg14xlarge = "cg1.4xlarge" @@ -110697,8 +117710,417 @@ const ( // InstanceTypeInf124xlarge is a InstanceType enum value InstanceTypeInf124xlarge = "inf1.24xlarge" + + // InstanceTypeM6gMetal is a InstanceType enum value + InstanceTypeM6gMetal = "m6g.metal" + + // InstanceTypeM6gMedium is a InstanceType enum value + InstanceTypeM6gMedium = "m6g.medium" + + // InstanceTypeM6gLarge is a InstanceType enum value + InstanceTypeM6gLarge = "m6g.large" + + // InstanceTypeM6gXlarge is a InstanceType enum value + InstanceTypeM6gXlarge = "m6g.xlarge" + + // InstanceTypeM6g2xlarge is a InstanceType enum value + InstanceTypeM6g2xlarge = "m6g.2xlarge" + + // InstanceTypeM6g4xlarge is a InstanceType enum value + InstanceTypeM6g4xlarge = "m6g.4xlarge" + + // InstanceTypeM6g8xlarge is a InstanceType enum value + InstanceTypeM6g8xlarge = "m6g.8xlarge" + + // InstanceTypeM6g12xlarge is a InstanceType enum value + InstanceTypeM6g12xlarge = "m6g.12xlarge" + + // InstanceTypeM6g16xlarge is a InstanceType enum value + InstanceTypeM6g16xlarge = "m6g.16xlarge" + + // InstanceTypeM6gdMetal is a InstanceType enum value + InstanceTypeM6gdMetal = "m6gd.metal" + + // InstanceTypeM6gdMedium is a InstanceType enum value + InstanceTypeM6gdMedium = "m6gd.medium" + + // InstanceTypeM6gdLarge is a InstanceType enum value + InstanceTypeM6gdLarge = "m6gd.large" + + // InstanceTypeM6gdXlarge is a InstanceType enum value + InstanceTypeM6gdXlarge = "m6gd.xlarge" + + // InstanceTypeM6gd2xlarge is a InstanceType enum value + InstanceTypeM6gd2xlarge = "m6gd.2xlarge" + + // InstanceTypeM6gd4xlarge is a InstanceType enum value + InstanceTypeM6gd4xlarge = "m6gd.4xlarge" + + // InstanceTypeM6gd8xlarge is a InstanceType enum value + InstanceTypeM6gd8xlarge = "m6gd.8xlarge" + + // InstanceTypeM6gd12xlarge is a InstanceType enum value + InstanceTypeM6gd12xlarge = "m6gd.12xlarge" + + // InstanceTypeM6gd16xlarge is a InstanceType enum value + InstanceTypeM6gd16xlarge = "m6gd.16xlarge" ) +// InstanceType_Values returns all elements of the InstanceType enum +func InstanceType_Values() []string { + return []string{ + InstanceTypeT1Micro, + InstanceTypeT2Nano, + InstanceTypeT2Micro, + InstanceTypeT2Small, + InstanceTypeT2Medium, + InstanceTypeT2Large, + InstanceTypeT2Xlarge, + InstanceTypeT22xlarge, + InstanceTypeT3Nano, + InstanceTypeT3Micro, + InstanceTypeT3Small, + InstanceTypeT3Medium, + InstanceTypeT3Large, + InstanceTypeT3Xlarge, + InstanceTypeT32xlarge, + InstanceTypeT3aNano, + InstanceTypeT3aMicro, + InstanceTypeT3aSmall, + InstanceTypeT3aMedium, + InstanceTypeT3aLarge, + InstanceTypeT3aXlarge, + InstanceTypeT3a2xlarge, + InstanceTypeT4gNano, + InstanceTypeT4gMicro, + InstanceTypeT4gSmall, + InstanceTypeT4gMedium, + InstanceTypeT4gLarge, + InstanceTypeT4gXlarge, + InstanceTypeT4g2xlarge, + InstanceTypeM1Small, + InstanceTypeM1Medium, + InstanceTypeM1Large, + InstanceTypeM1Xlarge, + InstanceTypeM3Medium, + InstanceTypeM3Large, + InstanceTypeM3Xlarge, + InstanceTypeM32xlarge, + InstanceTypeM4Large, + InstanceTypeM4Xlarge, + InstanceTypeM42xlarge, + InstanceTypeM44xlarge, + InstanceTypeM410xlarge, + InstanceTypeM416xlarge, + InstanceTypeM2Xlarge, + InstanceTypeM22xlarge, + InstanceTypeM24xlarge, + InstanceTypeCr18xlarge, + InstanceTypeR3Large, + InstanceTypeR3Xlarge, + InstanceTypeR32xlarge, + InstanceTypeR34xlarge, + InstanceTypeR38xlarge, + InstanceTypeR4Large, + InstanceTypeR4Xlarge, + InstanceTypeR42xlarge, + InstanceTypeR44xlarge, + InstanceTypeR48xlarge, + InstanceTypeR416xlarge, + InstanceTypeR5Large, + InstanceTypeR5Xlarge, + InstanceTypeR52xlarge, + InstanceTypeR54xlarge, + InstanceTypeR58xlarge, + InstanceTypeR512xlarge, + InstanceTypeR516xlarge, + InstanceTypeR524xlarge, + InstanceTypeR5Metal, + InstanceTypeR5aLarge, + InstanceTypeR5aXlarge, + InstanceTypeR5a2xlarge, + InstanceTypeR5a4xlarge, + InstanceTypeR5a8xlarge, + InstanceTypeR5a12xlarge, + InstanceTypeR5a16xlarge, + InstanceTypeR5a24xlarge, + InstanceTypeR5dLarge, + InstanceTypeR5dXlarge, + InstanceTypeR5d2xlarge, + InstanceTypeR5d4xlarge, + InstanceTypeR5d8xlarge, + InstanceTypeR5d12xlarge, + InstanceTypeR5d16xlarge, + InstanceTypeR5d24xlarge, + InstanceTypeR5dMetal, + InstanceTypeR5adLarge, + InstanceTypeR5adXlarge, + InstanceTypeR5ad2xlarge, + InstanceTypeR5ad4xlarge, + InstanceTypeR5ad8xlarge, + InstanceTypeR5ad12xlarge, + InstanceTypeR5ad16xlarge, + InstanceTypeR5ad24xlarge, + InstanceTypeR6gMetal, + InstanceTypeR6gMedium, + InstanceTypeR6gLarge, + InstanceTypeR6gXlarge, + InstanceTypeR6g2xlarge, + InstanceTypeR6g4xlarge, + InstanceTypeR6g8xlarge, + InstanceTypeR6g12xlarge, + InstanceTypeR6g16xlarge, + InstanceTypeR6gdMetal, + InstanceTypeR6gdMedium, + InstanceTypeR6gdLarge, + InstanceTypeR6gdXlarge, + InstanceTypeR6gd2xlarge, + InstanceTypeR6gd4xlarge, + InstanceTypeR6gd8xlarge, + InstanceTypeR6gd12xlarge, + InstanceTypeR6gd16xlarge, + InstanceTypeX116xlarge, + InstanceTypeX132xlarge, + InstanceTypeX1eXlarge, + InstanceTypeX1e2xlarge, + InstanceTypeX1e4xlarge, + InstanceTypeX1e8xlarge, + InstanceTypeX1e16xlarge, + InstanceTypeX1e32xlarge, + InstanceTypeI2Xlarge, + InstanceTypeI22xlarge, + InstanceTypeI24xlarge, + InstanceTypeI28xlarge, + InstanceTypeI3Large, + InstanceTypeI3Xlarge, + InstanceTypeI32xlarge, + InstanceTypeI34xlarge, + InstanceTypeI38xlarge, + InstanceTypeI316xlarge, + InstanceTypeI3Metal, + InstanceTypeI3enLarge, + InstanceTypeI3enXlarge, + InstanceTypeI3en2xlarge, + InstanceTypeI3en3xlarge, + InstanceTypeI3en6xlarge, + InstanceTypeI3en12xlarge, + InstanceTypeI3en24xlarge, + InstanceTypeI3enMetal, + InstanceTypeHi14xlarge, + InstanceTypeHs18xlarge, + InstanceTypeC1Medium, + InstanceTypeC1Xlarge, + InstanceTypeC3Large, + InstanceTypeC3Xlarge, + InstanceTypeC32xlarge, + InstanceTypeC34xlarge, + InstanceTypeC38xlarge, + InstanceTypeC4Large, + InstanceTypeC4Xlarge, + InstanceTypeC42xlarge, + InstanceTypeC44xlarge, + InstanceTypeC48xlarge, + InstanceTypeC5Large, + InstanceTypeC5Xlarge, + InstanceTypeC52xlarge, + InstanceTypeC54xlarge, + InstanceTypeC59xlarge, + InstanceTypeC512xlarge, + InstanceTypeC518xlarge, + InstanceTypeC524xlarge, + InstanceTypeC5Metal, + InstanceTypeC5aLarge, + InstanceTypeC5aXlarge, + InstanceTypeC5a2xlarge, + InstanceTypeC5a4xlarge, + InstanceTypeC5a8xlarge, + InstanceTypeC5a12xlarge, + InstanceTypeC5a16xlarge, + InstanceTypeC5a24xlarge, + InstanceTypeC5adLarge, + InstanceTypeC5adXlarge, + InstanceTypeC5ad2xlarge, + InstanceTypeC5ad4xlarge, + InstanceTypeC5ad8xlarge, + InstanceTypeC5ad12xlarge, + InstanceTypeC5ad16xlarge, + InstanceTypeC5ad24xlarge, + InstanceTypeC5dLarge, + InstanceTypeC5dXlarge, + InstanceTypeC5d2xlarge, + InstanceTypeC5d4xlarge, + InstanceTypeC5d9xlarge, + InstanceTypeC5d12xlarge, + InstanceTypeC5d18xlarge, + InstanceTypeC5d24xlarge, + InstanceTypeC5dMetal, + InstanceTypeC5nLarge, + InstanceTypeC5nXlarge, + InstanceTypeC5n2xlarge, + InstanceTypeC5n4xlarge, + InstanceTypeC5n9xlarge, + InstanceTypeC5n18xlarge, + InstanceTypeC6gMetal, + InstanceTypeC6gMedium, + InstanceTypeC6gLarge, + InstanceTypeC6gXlarge, + InstanceTypeC6g2xlarge, + InstanceTypeC6g4xlarge, + InstanceTypeC6g8xlarge, + InstanceTypeC6g12xlarge, + InstanceTypeC6g16xlarge, + InstanceTypeC6gdMetal, + InstanceTypeC6gdMedium, + InstanceTypeC6gdLarge, + InstanceTypeC6gdXlarge, + InstanceTypeC6gd2xlarge, + InstanceTypeC6gd4xlarge, + InstanceTypeC6gd8xlarge, + InstanceTypeC6gd12xlarge, + InstanceTypeC6gd16xlarge, + InstanceTypeCc14xlarge, + InstanceTypeCc28xlarge, + InstanceTypeG22xlarge, + InstanceTypeG28xlarge, + InstanceTypeG34xlarge, + InstanceTypeG38xlarge, + InstanceTypeG316xlarge, + InstanceTypeG3sXlarge, + InstanceTypeG4dnXlarge, + InstanceTypeG4dn2xlarge, + InstanceTypeG4dn4xlarge, + InstanceTypeG4dn8xlarge, + InstanceTypeG4dn12xlarge, + InstanceTypeG4dn16xlarge, + InstanceTypeG4dnMetal, + InstanceTypeCg14xlarge, + InstanceTypeP2Xlarge, + InstanceTypeP28xlarge, + InstanceTypeP216xlarge, + InstanceTypeP32xlarge, + InstanceTypeP38xlarge, + InstanceTypeP316xlarge, + InstanceTypeP3dn24xlarge, + InstanceTypeD2Xlarge, + InstanceTypeD22xlarge, + InstanceTypeD24xlarge, + InstanceTypeD28xlarge, + InstanceTypeF12xlarge, + InstanceTypeF14xlarge, + InstanceTypeF116xlarge, + InstanceTypeM5Large, + InstanceTypeM5Xlarge, + InstanceTypeM52xlarge, + InstanceTypeM54xlarge, + InstanceTypeM58xlarge, + InstanceTypeM512xlarge, + InstanceTypeM516xlarge, + InstanceTypeM524xlarge, + InstanceTypeM5Metal, + InstanceTypeM5aLarge, + InstanceTypeM5aXlarge, + InstanceTypeM5a2xlarge, + InstanceTypeM5a4xlarge, + InstanceTypeM5a8xlarge, + InstanceTypeM5a12xlarge, + InstanceTypeM5a16xlarge, + InstanceTypeM5a24xlarge, + InstanceTypeM5dLarge, + InstanceTypeM5dXlarge, + InstanceTypeM5d2xlarge, + InstanceTypeM5d4xlarge, + InstanceTypeM5d8xlarge, + InstanceTypeM5d12xlarge, + InstanceTypeM5d16xlarge, + InstanceTypeM5d24xlarge, + InstanceTypeM5dMetal, + InstanceTypeM5adLarge, + InstanceTypeM5adXlarge, + InstanceTypeM5ad2xlarge, + InstanceTypeM5ad4xlarge, + InstanceTypeM5ad8xlarge, + InstanceTypeM5ad12xlarge, + InstanceTypeM5ad16xlarge, + InstanceTypeM5ad24xlarge, + InstanceTypeH12xlarge, + InstanceTypeH14xlarge, + InstanceTypeH18xlarge, + InstanceTypeH116xlarge, + InstanceTypeZ1dLarge, + InstanceTypeZ1dXlarge, + InstanceTypeZ1d2xlarge, + InstanceTypeZ1d3xlarge, + InstanceTypeZ1d6xlarge, + InstanceTypeZ1d12xlarge, + InstanceTypeZ1dMetal, + InstanceTypeU6tb1Metal, + InstanceTypeU9tb1Metal, + InstanceTypeU12tb1Metal, + InstanceTypeU18tb1Metal, + InstanceTypeU24tb1Metal, + InstanceTypeA1Medium, + InstanceTypeA1Large, + InstanceTypeA1Xlarge, + InstanceTypeA12xlarge, + InstanceTypeA14xlarge, + InstanceTypeA1Metal, + InstanceTypeM5dnLarge, + InstanceTypeM5dnXlarge, + InstanceTypeM5dn2xlarge, + InstanceTypeM5dn4xlarge, + InstanceTypeM5dn8xlarge, + InstanceTypeM5dn12xlarge, + InstanceTypeM5dn16xlarge, + InstanceTypeM5dn24xlarge, + InstanceTypeM5nLarge, + InstanceTypeM5nXlarge, + InstanceTypeM5n2xlarge, + InstanceTypeM5n4xlarge, + InstanceTypeM5n8xlarge, + InstanceTypeM5n12xlarge, + InstanceTypeM5n16xlarge, + InstanceTypeM5n24xlarge, + InstanceTypeR5dnLarge, + InstanceTypeR5dnXlarge, + InstanceTypeR5dn2xlarge, + InstanceTypeR5dn4xlarge, + InstanceTypeR5dn8xlarge, + InstanceTypeR5dn12xlarge, + InstanceTypeR5dn16xlarge, + InstanceTypeR5dn24xlarge, + InstanceTypeR5nLarge, + InstanceTypeR5nXlarge, + InstanceTypeR5n2xlarge, + InstanceTypeR5n4xlarge, + InstanceTypeR5n8xlarge, + InstanceTypeR5n12xlarge, + InstanceTypeR5n16xlarge, + InstanceTypeR5n24xlarge, + InstanceTypeInf1Xlarge, + InstanceTypeInf12xlarge, + InstanceTypeInf16xlarge, + InstanceTypeInf124xlarge, + InstanceTypeM6gMetal, + InstanceTypeM6gMedium, + InstanceTypeM6gLarge, + InstanceTypeM6gXlarge, + InstanceTypeM6g2xlarge, + InstanceTypeM6g4xlarge, + InstanceTypeM6g8xlarge, + InstanceTypeM6g12xlarge, + InstanceTypeM6g16xlarge, + InstanceTypeM6gdMetal, + InstanceTypeM6gdMedium, + InstanceTypeM6gdLarge, + InstanceTypeM6gdXlarge, + InstanceTypeM6gd2xlarge, + InstanceTypeM6gd4xlarge, + InstanceTypeM6gd8xlarge, + InstanceTypeM6gd12xlarge, + InstanceTypeM6gd16xlarge, + } +} + const ( // InstanceTypeHypervisorNitro is a InstanceTypeHypervisor enum value InstanceTypeHypervisorNitro = "nitro" @@ -110707,6 +118129,14 @@ const ( InstanceTypeHypervisorXen = "xen" ) +// InstanceTypeHypervisor_Values returns all elements of the InstanceTypeHypervisor enum +func InstanceTypeHypervisor_Values() []string { + return []string{ + InstanceTypeHypervisorNitro, + InstanceTypeHypervisorXen, + } +} + const ( // InterfacePermissionTypeInstanceAttach is a InterfacePermissionType enum value InterfacePermissionTypeInstanceAttach = "INSTANCE-ATTACH" @@ -110715,6 +118145,14 @@ const ( InterfacePermissionTypeEipAssociate = "EIP-ASSOCIATE" ) +// InterfacePermissionType_Values returns all elements of the InterfacePermissionType enum +func InterfacePermissionType_Values() []string { + return []string{ + InterfacePermissionTypeInstanceAttach, + InterfacePermissionTypeEipAssociate, + } +} + const ( // Ipv6SupportValueEnable is a Ipv6SupportValue enum value Ipv6SupportValueEnable = "enable" @@ -110723,6 +118161,14 @@ const ( Ipv6SupportValueDisable = "disable" ) +// Ipv6SupportValue_Values returns all elements of the Ipv6SupportValue enum +func Ipv6SupportValue_Values() []string { + return []string{ + Ipv6SupportValueEnable, + Ipv6SupportValueDisable, + } +} + const ( // LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist is a LaunchTemplateErrorCode enum value LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist = "launchTemplateIdDoesNotExist" @@ -110743,6 +118189,18 @@ const ( LaunchTemplateErrorCodeUnexpectedError = "unexpectedError" ) +// LaunchTemplateErrorCode_Values returns all elements of the LaunchTemplateErrorCode enum +func LaunchTemplateErrorCode_Values() []string { + return []string{ + LaunchTemplateErrorCodeLaunchTemplateIdDoesNotExist, + LaunchTemplateErrorCodeLaunchTemplateIdMalformed, + LaunchTemplateErrorCodeLaunchTemplateNameDoesNotExist, + LaunchTemplateErrorCodeLaunchTemplateNameMalformed, + LaunchTemplateErrorCodeLaunchTemplateVersionDoesNotExist, + LaunchTemplateErrorCodeUnexpectedError, + } +} + const ( // LaunchTemplateHttpTokensStateOptional is a LaunchTemplateHttpTokensState enum value LaunchTemplateHttpTokensStateOptional = "optional" @@ -110751,6 +118209,14 @@ const ( LaunchTemplateHttpTokensStateRequired = "required" ) +// LaunchTemplateHttpTokensState_Values returns all elements of the LaunchTemplateHttpTokensState enum +func LaunchTemplateHttpTokensState_Values() []string { + return []string{ + LaunchTemplateHttpTokensStateOptional, + LaunchTemplateHttpTokensStateRequired, + } +} + const ( // LaunchTemplateInstanceMetadataEndpointStateDisabled is a LaunchTemplateInstanceMetadataEndpointState enum value LaunchTemplateInstanceMetadataEndpointStateDisabled = "disabled" @@ -110759,6 +118225,14 @@ const ( LaunchTemplateInstanceMetadataEndpointStateEnabled = "enabled" ) +// LaunchTemplateInstanceMetadataEndpointState_Values returns all elements of the LaunchTemplateInstanceMetadataEndpointState enum +func LaunchTemplateInstanceMetadataEndpointState_Values() []string { + return []string{ + LaunchTemplateInstanceMetadataEndpointStateDisabled, + LaunchTemplateInstanceMetadataEndpointStateEnabled, + } +} + const ( // LaunchTemplateInstanceMetadataOptionsStatePending is a LaunchTemplateInstanceMetadataOptionsState enum value LaunchTemplateInstanceMetadataOptionsStatePending = "pending" @@ -110767,6 +118241,14 @@ const ( LaunchTemplateInstanceMetadataOptionsStateApplied = "applied" ) +// LaunchTemplateInstanceMetadataOptionsState_Values returns all elements of the LaunchTemplateInstanceMetadataOptionsState enum +func LaunchTemplateInstanceMetadataOptionsState_Values() []string { + return []string{ + LaunchTemplateInstanceMetadataOptionsStatePending, + LaunchTemplateInstanceMetadataOptionsStateApplied, + } +} + const ( // ListingStateAvailable is a ListingState enum value ListingStateAvailable = "available" @@ -110781,6 +118263,16 @@ const ( ListingStatePending = "pending" ) +// ListingState_Values returns all elements of the ListingState enum +func ListingState_Values() []string { + return []string{ + ListingStateAvailable, + ListingStateSold, + ListingStateCancelled, + ListingStatePending, + } +} + const ( // ListingStatusActive is a ListingStatus enum value ListingStatusActive = "active" @@ -110795,6 +118287,16 @@ const ( ListingStatusClosed = "closed" ) +// ListingStatus_Values returns all elements of the ListingStatus enum +func ListingStatus_Values() []string { + return []string{ + ListingStatusActive, + ListingStatusPending, + ListingStatusCancelled, + ListingStatusClosed, + } +} + const ( // LocalGatewayRouteStatePending is a LocalGatewayRouteState enum value LocalGatewayRouteStatePending = "pending" @@ -110812,6 +118314,17 @@ const ( LocalGatewayRouteStateDeleted = "deleted" ) +// LocalGatewayRouteState_Values returns all elements of the LocalGatewayRouteState enum +func LocalGatewayRouteState_Values() []string { + return []string{ + LocalGatewayRouteStatePending, + LocalGatewayRouteStateActive, + LocalGatewayRouteStateBlackhole, + LocalGatewayRouteStateDeleting, + LocalGatewayRouteStateDeleted, + } +} + const ( // LocalGatewayRouteTypeStatic is a LocalGatewayRouteType enum value LocalGatewayRouteTypeStatic = "static" @@ -110820,6 +118333,14 @@ const ( LocalGatewayRouteTypePropagated = "propagated" ) +// LocalGatewayRouteType_Values returns all elements of the LocalGatewayRouteType enum +func LocalGatewayRouteType_Values() []string { + return []string{ + LocalGatewayRouteTypeStatic, + LocalGatewayRouteTypePropagated, + } +} + const ( // LocationTypeRegion is a LocationType enum value LocationTypeRegion = "region" @@ -110831,6 +118352,15 @@ const ( LocationTypeAvailabilityZoneId = "availability-zone-id" ) +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeRegion, + LocationTypeAvailabilityZone, + LocationTypeAvailabilityZoneId, + } +} + const ( // LogDestinationTypeCloudWatchLogs is a LogDestinationType enum value LogDestinationTypeCloudWatchLogs = "cloud-watch-logs" @@ -110839,11 +118369,26 @@ const ( LogDestinationTypeS3 = "s3" ) +// LogDestinationType_Values returns all elements of the LogDestinationType enum +func LogDestinationType_Values() []string { + return []string{ + LogDestinationTypeCloudWatchLogs, + LogDestinationTypeS3, + } +} + const ( // MarketTypeSpot is a MarketType enum value MarketTypeSpot = "spot" ) +// MarketType_Values returns all elements of the MarketType enum +func MarketType_Values() []string { + return []string{ + MarketTypeSpot, + } +} + const ( // MembershipTypeStatic is a MembershipType enum value MembershipTypeStatic = "static" @@ -110852,6 +118397,14 @@ const ( MembershipTypeIgmp = "igmp" ) +// MembershipType_Values returns all elements of the MembershipType enum +func MembershipType_Values() []string { + return []string{ + MembershipTypeStatic, + MembershipTypeIgmp, + } +} + const ( // ModifyAvailabilityZoneOptInStatusOptedIn is a ModifyAvailabilityZoneOptInStatus enum value ModifyAvailabilityZoneOptInStatusOptedIn = "opted-in" @@ -110860,6 +118413,14 @@ const ( ModifyAvailabilityZoneOptInStatusNotOptedIn = "not-opted-in" ) +// ModifyAvailabilityZoneOptInStatus_Values returns all elements of the ModifyAvailabilityZoneOptInStatus enum +func ModifyAvailabilityZoneOptInStatus_Values() []string { + return []string{ + ModifyAvailabilityZoneOptInStatusOptedIn, + ModifyAvailabilityZoneOptInStatusNotOptedIn, + } +} + const ( // MonitoringStateDisabled is a MonitoringState enum value MonitoringStateDisabled = "disabled" @@ -110874,6 +118435,16 @@ const ( MonitoringStatePending = "pending" ) +// MonitoringState_Values returns all elements of the MonitoringState enum +func MonitoringState_Values() []string { + return []string{ + MonitoringStateDisabled, + MonitoringStateDisabling, + MonitoringStateEnabled, + MonitoringStatePending, + } +} + const ( // MoveStatusMovingToVpc is a MoveStatus enum value MoveStatusMovingToVpc = "movingToVpc" @@ -110882,6 +118453,14 @@ const ( MoveStatusRestoringToClassic = "restoringToClassic" ) +// MoveStatus_Values returns all elements of the MoveStatus enum +func MoveStatus_Values() []string { + return []string{ + MoveStatusMovingToVpc, + MoveStatusRestoringToClassic, + } +} + const ( // MulticastSupportValueEnable is a MulticastSupportValue enum value MulticastSupportValueEnable = "enable" @@ -110890,6 +118469,14 @@ const ( MulticastSupportValueDisable = "disable" ) +// MulticastSupportValue_Values returns all elements of the MulticastSupportValue enum +func MulticastSupportValue_Values() []string { + return []string{ + MulticastSupportValueEnable, + MulticastSupportValueDisable, + } +} + const ( // NatGatewayStatePending is a NatGatewayState enum value NatGatewayStatePending = "pending" @@ -110907,6 +118494,17 @@ const ( NatGatewayStateDeleted = "deleted" ) +// NatGatewayState_Values returns all elements of the NatGatewayState enum +func NatGatewayState_Values() []string { + return []string{ + NatGatewayStatePending, + NatGatewayStateFailed, + NatGatewayStateAvailable, + NatGatewayStateDeleting, + NatGatewayStateDeleted, + } +} + const ( // NetworkInterfaceAttributeDescription is a NetworkInterfaceAttribute enum value NetworkInterfaceAttributeDescription = "description" @@ -110921,11 +118519,28 @@ const ( NetworkInterfaceAttributeAttachment = "attachment" ) +// NetworkInterfaceAttribute_Values returns all elements of the NetworkInterfaceAttribute enum +func NetworkInterfaceAttribute_Values() []string { + return []string{ + NetworkInterfaceAttributeDescription, + NetworkInterfaceAttributeGroupSet, + NetworkInterfaceAttributeSourceDestCheck, + NetworkInterfaceAttributeAttachment, + } +} + const ( // NetworkInterfaceCreationTypeEfa is a NetworkInterfaceCreationType enum value NetworkInterfaceCreationTypeEfa = "efa" ) +// NetworkInterfaceCreationType_Values returns all elements of the NetworkInterfaceCreationType enum +func NetworkInterfaceCreationType_Values() []string { + return []string{ + NetworkInterfaceCreationTypeEfa, + } +} + const ( // NetworkInterfacePermissionStateCodePending is a NetworkInterfacePermissionStateCode enum value NetworkInterfacePermissionStateCodePending = "pending" @@ -110940,6 +118555,16 @@ const ( NetworkInterfacePermissionStateCodeRevoked = "revoked" ) +// NetworkInterfacePermissionStateCode_Values returns all elements of the NetworkInterfacePermissionStateCode enum +func NetworkInterfacePermissionStateCode_Values() []string { + return []string{ + NetworkInterfacePermissionStateCodePending, + NetworkInterfacePermissionStateCodeGranted, + NetworkInterfacePermissionStateCodeRevoking, + NetworkInterfacePermissionStateCodeRevoked, + } +} + const ( // NetworkInterfaceStatusAvailable is a NetworkInterfaceStatus enum value NetworkInterfaceStatusAvailable = "available" @@ -110957,6 +118582,17 @@ const ( NetworkInterfaceStatusDetaching = "detaching" ) +// NetworkInterfaceStatus_Values returns all elements of the NetworkInterfaceStatus enum +func NetworkInterfaceStatus_Values() []string { + return []string{ + NetworkInterfaceStatusAvailable, + NetworkInterfaceStatusAssociated, + NetworkInterfaceStatusAttaching, + NetworkInterfaceStatusInUse, + NetworkInterfaceStatusDetaching, + } +} + const ( // NetworkInterfaceTypeInterface is a NetworkInterfaceType enum value NetworkInterfaceTypeInterface = "interface" @@ -110968,6 +118604,15 @@ const ( NetworkInterfaceTypeEfa = "efa" ) +// NetworkInterfaceType_Values returns all elements of the NetworkInterfaceType enum +func NetworkInterfaceType_Values() []string { + return []string{ + NetworkInterfaceTypeInterface, + NetworkInterfaceTypeNatGateway, + NetworkInterfaceTypeEfa, + } +} + const ( // OfferingClassTypeStandard is a OfferingClassType enum value OfferingClassTypeStandard = "standard" @@ -110976,6 +118621,14 @@ const ( OfferingClassTypeConvertible = "convertible" ) +// OfferingClassType_Values returns all elements of the OfferingClassType enum +func OfferingClassType_Values() []string { + return []string{ + OfferingClassTypeStandard, + OfferingClassTypeConvertible, + } +} + const ( // OfferingTypeValuesHeavyUtilization is a OfferingTypeValues enum value OfferingTypeValuesHeavyUtilization = "Heavy Utilization" @@ -110996,6 +118649,18 @@ const ( OfferingTypeValuesAllUpfront = "All Upfront" ) +// OfferingTypeValues_Values returns all elements of the OfferingTypeValues enum +func OfferingTypeValues_Values() []string { + return []string{ + OfferingTypeValuesHeavyUtilization, + OfferingTypeValuesMediumUtilization, + OfferingTypeValuesLightUtilization, + OfferingTypeValuesNoUpfront, + OfferingTypeValuesPartialUpfront, + OfferingTypeValuesAllUpfront, + } +} + const ( // OnDemandAllocationStrategyLowestPrice is a OnDemandAllocationStrategy enum value OnDemandAllocationStrategyLowestPrice = "lowestPrice" @@ -111004,6 +118669,14 @@ const ( OnDemandAllocationStrategyPrioritized = "prioritized" ) +// OnDemandAllocationStrategy_Values returns all elements of the OnDemandAllocationStrategy enum +func OnDemandAllocationStrategy_Values() []string { + return []string{ + OnDemandAllocationStrategyLowestPrice, + OnDemandAllocationStrategyPrioritized, + } +} + const ( // OperationTypeAdd is a OperationType enum value OperationTypeAdd = "add" @@ -111012,6 +118685,14 @@ const ( OperationTypeRemove = "remove" ) +// OperationType_Values returns all elements of the OperationType enum +func OperationType_Values() []string { + return []string{ + OperationTypeAdd, + OperationTypeRemove, + } +} + const ( // PaymentOptionAllUpfront is a PaymentOption enum value PaymentOptionAllUpfront = "AllUpfront" @@ -111023,11 +118704,27 @@ const ( PaymentOptionNoUpfront = "NoUpfront" ) +// PaymentOption_Values returns all elements of the PaymentOption enum +func PaymentOption_Values() []string { + return []string{ + PaymentOptionAllUpfront, + PaymentOptionPartialUpfront, + PaymentOptionNoUpfront, + } +} + const ( // PermissionGroupAll is a PermissionGroup enum value PermissionGroupAll = "all" ) +// PermissionGroup_Values returns all elements of the PermissionGroup enum +func PermissionGroup_Values() []string { + return []string{ + PermissionGroupAll, + } +} + const ( // PlacementGroupStatePending is a PlacementGroupState enum value PlacementGroupStatePending = "pending" @@ -111042,6 +118739,16 @@ const ( PlacementGroupStateDeleted = "deleted" ) +// PlacementGroupState_Values returns all elements of the PlacementGroupState enum +func PlacementGroupState_Values() []string { + return []string{ + PlacementGroupStatePending, + PlacementGroupStateAvailable, + PlacementGroupStateDeleting, + PlacementGroupStateDeleted, + } +} + const ( // PlacementGroupStrategyCluster is a PlacementGroupStrategy enum value PlacementGroupStrategyCluster = "cluster" @@ -111053,6 +118760,15 @@ const ( PlacementGroupStrategySpread = "spread" ) +// PlacementGroupStrategy_Values returns all elements of the PlacementGroupStrategy enum +func PlacementGroupStrategy_Values() []string { + return []string{ + PlacementGroupStrategyCluster, + PlacementGroupStrategyPartition, + PlacementGroupStrategySpread, + } +} + const ( // PlacementStrategyCluster is a PlacementStrategy enum value PlacementStrategyCluster = "cluster" @@ -111064,11 +118780,83 @@ const ( PlacementStrategyPartition = "partition" ) +// PlacementStrategy_Values returns all elements of the PlacementStrategy enum +func PlacementStrategy_Values() []string { + return []string{ + PlacementStrategyCluster, + PlacementStrategySpread, + PlacementStrategyPartition, + } +} + const ( // PlatformValuesWindows is a PlatformValues enum value PlatformValuesWindows = "Windows" ) +// PlatformValues_Values returns all elements of the PlatformValues enum +func PlatformValues_Values() []string { + return []string{ + PlatformValuesWindows, + } +} + +const ( + // PrefixListStateCreateInProgress is a PrefixListState enum value + PrefixListStateCreateInProgress = "create-in-progress" + + // PrefixListStateCreateComplete is a PrefixListState enum value + PrefixListStateCreateComplete = "create-complete" + + // PrefixListStateCreateFailed is a PrefixListState enum value + PrefixListStateCreateFailed = "create-failed" + + // PrefixListStateModifyInProgress is a PrefixListState enum value + PrefixListStateModifyInProgress = "modify-in-progress" + + // PrefixListStateModifyComplete is a PrefixListState enum value + PrefixListStateModifyComplete = "modify-complete" + + // PrefixListStateModifyFailed is a PrefixListState enum value + PrefixListStateModifyFailed = "modify-failed" + + // PrefixListStateRestoreInProgress is a PrefixListState enum value + PrefixListStateRestoreInProgress = "restore-in-progress" + + // PrefixListStateRestoreComplete is a PrefixListState enum value + PrefixListStateRestoreComplete = "restore-complete" + + // PrefixListStateRestoreFailed is a PrefixListState enum value + PrefixListStateRestoreFailed = "restore-failed" + + // PrefixListStateDeleteInProgress is a PrefixListState enum value + PrefixListStateDeleteInProgress = "delete-in-progress" + + // PrefixListStateDeleteComplete is a PrefixListState enum value + PrefixListStateDeleteComplete = "delete-complete" + + // PrefixListStateDeleteFailed is a PrefixListState enum value + PrefixListStateDeleteFailed = "delete-failed" +) + +// PrefixListState_Values returns all elements of the PrefixListState enum +func PrefixListState_Values() []string { + return []string{ + PrefixListStateCreateInProgress, + PrefixListStateCreateComplete, + PrefixListStateCreateFailed, + PrefixListStateModifyInProgress, + PrefixListStateModifyComplete, + PrefixListStateModifyFailed, + PrefixListStateRestoreInProgress, + PrefixListStateRestoreComplete, + PrefixListStateRestoreFailed, + PrefixListStateDeleteInProgress, + PrefixListStateDeleteComplete, + PrefixListStateDeleteFailed, + } +} + const ( // PrincipalTypeAll is a PrincipalType enum value PrincipalTypeAll = "All" @@ -111089,6 +118877,18 @@ const ( PrincipalTypeRole = "Role" ) +// PrincipalType_Values returns all elements of the PrincipalType enum +func PrincipalType_Values() []string { + return []string{ + PrincipalTypeAll, + PrincipalTypeService, + PrincipalTypeOrganizationUnit, + PrincipalTypeAccount, + PrincipalTypeUser, + PrincipalTypeRole, + } +} + const ( // ProductCodeValuesDevpay is a ProductCodeValues enum value ProductCodeValuesDevpay = "devpay" @@ -111097,6 +118897,14 @@ const ( ProductCodeValuesMarketplace = "marketplace" ) +// ProductCodeValues_Values returns all elements of the ProductCodeValues enum +func ProductCodeValues_Values() []string { + return []string{ + ProductCodeValuesDevpay, + ProductCodeValuesMarketplace, + } +} + const ( // RIProductDescriptionLinuxUnix is a RIProductDescription enum value RIProductDescriptionLinuxUnix = "Linux/UNIX" @@ -111111,11 +118919,28 @@ const ( RIProductDescriptionWindowsAmazonVpc = "Windows (Amazon VPC)" ) +// RIProductDescription_Values returns all elements of the RIProductDescription enum +func RIProductDescription_Values() []string { + return []string{ + RIProductDescriptionLinuxUnix, + RIProductDescriptionLinuxUnixamazonVpc, + RIProductDescriptionWindows, + RIProductDescriptionWindowsAmazonVpc, + } +} + const ( // RecurringChargeFrequencyHourly is a RecurringChargeFrequency enum value RecurringChargeFrequencyHourly = "Hourly" ) +// RecurringChargeFrequency_Values returns all elements of the RecurringChargeFrequency enum +func RecurringChargeFrequency_Values() []string { + return []string{ + RecurringChargeFrequencyHourly, + } +} + const ( // ReportInstanceReasonCodesInstanceStuckInState is a ReportInstanceReasonCodes enum value ReportInstanceReasonCodesInstanceStuckInState = "instance-stuck-in-state" @@ -111145,6 +118970,21 @@ const ( ReportInstanceReasonCodesOther = "other" ) +// ReportInstanceReasonCodes_Values returns all elements of the ReportInstanceReasonCodes enum +func ReportInstanceReasonCodes_Values() []string { + return []string{ + ReportInstanceReasonCodesInstanceStuckInState, + ReportInstanceReasonCodesUnresponsive, + ReportInstanceReasonCodesNotAcceptingCredentials, + ReportInstanceReasonCodesPasswordNotAvailable, + ReportInstanceReasonCodesPerformanceNetwork, + ReportInstanceReasonCodesPerformanceInstanceStore, + ReportInstanceReasonCodesPerformanceEbsVolume, + ReportInstanceReasonCodesPerformanceOther, + ReportInstanceReasonCodesOther, + } +} + const ( // ReportStatusTypeOk is a ReportStatusType enum value ReportStatusTypeOk = "ok" @@ -111153,6 +118993,14 @@ const ( ReportStatusTypeImpaired = "impaired" ) +// ReportStatusType_Values returns all elements of the ReportStatusType enum +func ReportStatusType_Values() []string { + return []string{ + ReportStatusTypeOk, + ReportStatusTypeImpaired, + } +} + const ( // ReservationStatePaymentPending is a ReservationState enum value ReservationStatePaymentPending = "payment-pending" @@ -111167,6 +119015,16 @@ const ( ReservationStateRetired = "retired" ) +// ReservationState_Values returns all elements of the ReservationState enum +func ReservationState_Values() []string { + return []string{ + ReservationStatePaymentPending, + ReservationStatePaymentFailed, + ReservationStateActive, + ReservationStateRetired, + } +} + const ( // ReservedInstanceStatePaymentPending is a ReservedInstanceState enum value ReservedInstanceStatePaymentPending = "payment-pending" @@ -111187,16 +119045,42 @@ const ( ReservedInstanceStateQueuedDeleted = "queued-deleted" ) +// ReservedInstanceState_Values returns all elements of the ReservedInstanceState enum +func ReservedInstanceState_Values() []string { + return []string{ + ReservedInstanceStatePaymentPending, + ReservedInstanceStateActive, + ReservedInstanceStatePaymentFailed, + ReservedInstanceStateRetired, + ReservedInstanceStateQueued, + ReservedInstanceStateQueuedDeleted, + } +} + const ( // ResetFpgaImageAttributeNameLoadPermission is a ResetFpgaImageAttributeName enum value ResetFpgaImageAttributeNameLoadPermission = "loadPermission" ) +// ResetFpgaImageAttributeName_Values returns all elements of the ResetFpgaImageAttributeName enum +func ResetFpgaImageAttributeName_Values() []string { + return []string{ + ResetFpgaImageAttributeNameLoadPermission, + } +} + const ( // ResetImageAttributeNameLaunchPermission is a ResetImageAttributeName enum value ResetImageAttributeNameLaunchPermission = "launchPermission" ) +// ResetImageAttributeName_Values returns all elements of the ResetImageAttributeName enum +func ResetImageAttributeName_Values() []string { + return []string{ + ResetImageAttributeNameLaunchPermission, + } +} + const ( // ResourceTypeClientVpnEndpoint is a ResourceType enum value ResourceTypeClientVpnEndpoint = "client-vpn-endpoint" @@ -111210,9 +119094,21 @@ const ( // ResourceTypeDhcpOptions is a ResourceType enum value ResourceTypeDhcpOptions = "dhcp-options" + // ResourceTypeEgressOnlyInternetGateway is a ResourceType enum value + ResourceTypeEgressOnlyInternetGateway = "egress-only-internet-gateway" + // ResourceTypeElasticIp is a ResourceType enum value ResourceTypeElasticIp = "elastic-ip" + // ResourceTypeElasticGpu is a ResourceType enum value + ResourceTypeElasticGpu = "elastic-gpu" + + // ResourceTypeExportImageTask is a ResourceType enum value + ResourceTypeExportImageTask = "export-image-task" + + // ResourceTypeExportInstanceTask is a ResourceType enum value + ResourceTypeExportInstanceTask = "export-instance-task" + // ResourceTypeFleet is a ResourceType enum value ResourceTypeFleet = "fleet" @@ -111225,6 +119121,12 @@ const ( // ResourceTypeImage is a ResourceType enum value ResourceTypeImage = "image" + // ResourceTypeImportImageTask is a ResourceType enum value + ResourceTypeImportImageTask = "import-image-task" + + // ResourceTypeImportSnapshotTask is a ResourceType enum value + ResourceTypeImportSnapshotTask = "import-snapshot-task" + // ResourceTypeInstance is a ResourceType enum value ResourceTypeInstance = "instance" @@ -111237,6 +119139,9 @@ const ( // ResourceTypeLaunchTemplate is a ResourceType enum value ResourceTypeLaunchTemplate = "launch-template" + // ResourceTypeLocalGatewayRouteTableVpcAssociation is a ResourceType enum value + ResourceTypeLocalGatewayRouteTableVpcAssociation = "local-gateway-route-table-vpc-association" + // ResourceTypeNatgateway is a ResourceType enum value ResourceTypeNatgateway = "natgateway" @@ -111310,6 +119215,56 @@ const ( ResourceTypeVpcFlowLog = "vpc-flow-log" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeClientVpnEndpoint, + ResourceTypeCustomerGateway, + ResourceTypeDedicatedHost, + ResourceTypeDhcpOptions, + ResourceTypeEgressOnlyInternetGateway, + ResourceTypeElasticIp, + ResourceTypeElasticGpu, + ResourceTypeExportImageTask, + ResourceTypeExportInstanceTask, + ResourceTypeFleet, + ResourceTypeFpgaImage, + ResourceTypeHostReservation, + ResourceTypeImage, + ResourceTypeImportImageTask, + ResourceTypeImportSnapshotTask, + ResourceTypeInstance, + ResourceTypeInternetGateway, + ResourceTypeKeyPair, + ResourceTypeLaunchTemplate, + ResourceTypeLocalGatewayRouteTableVpcAssociation, + ResourceTypeNatgateway, + ResourceTypeNetworkAcl, + ResourceTypeNetworkInterface, + ResourceTypePlacementGroup, + ResourceTypeReservedInstances, + ResourceTypeRouteTable, + ResourceTypeSecurityGroup, + ResourceTypeSnapshot, + ResourceTypeSpotFleetRequest, + ResourceTypeSpotInstancesRequest, + ResourceTypeSubnet, + ResourceTypeTrafficMirrorFilter, + ResourceTypeTrafficMirrorSession, + ResourceTypeTrafficMirrorTarget, + ResourceTypeTransitGateway, + ResourceTypeTransitGatewayAttachment, + ResourceTypeTransitGatewayMulticastDomain, + ResourceTypeTransitGatewayRouteTable, + ResourceTypeVolume, + ResourceTypeVpc, + ResourceTypeVpcPeeringConnection, + ResourceTypeVpnConnection, + ResourceTypeVpnGateway, + ResourceTypeVpcFlowLog, + } +} + const ( // RootDeviceTypeEbs is a RootDeviceType enum value RootDeviceTypeEbs = "ebs" @@ -111318,6 +119273,14 @@ const ( RootDeviceTypeInstanceStore = "instance-store" ) +// RootDeviceType_Values returns all elements of the RootDeviceType enum +func RootDeviceType_Values() []string { + return []string{ + RootDeviceTypeEbs, + RootDeviceTypeInstanceStore, + } +} + const ( // RouteOriginCreateRouteTable is a RouteOrigin enum value RouteOriginCreateRouteTable = "CreateRouteTable" @@ -111329,6 +119292,15 @@ const ( RouteOriginEnableVgwRoutePropagation = "EnableVgwRoutePropagation" ) +// RouteOrigin_Values returns all elements of the RouteOrigin enum +func RouteOrigin_Values() []string { + return []string{ + RouteOriginCreateRouteTable, + RouteOriginCreateRoute, + RouteOriginEnableVgwRoutePropagation, + } +} + const ( // RouteStateActive is a RouteState enum value RouteStateActive = "active" @@ -111337,6 +119309,14 @@ const ( RouteStateBlackhole = "blackhole" ) +// RouteState_Values returns all elements of the RouteState enum +func RouteState_Values() []string { + return []string{ + RouteStateActive, + RouteStateBlackhole, + } +} + const ( // RouteTableAssociationStateCodeAssociating is a RouteTableAssociationStateCode enum value RouteTableAssociationStateCodeAssociating = "associating" @@ -111354,6 +119334,17 @@ const ( RouteTableAssociationStateCodeFailed = "failed" ) +// RouteTableAssociationStateCode_Values returns all elements of the RouteTableAssociationStateCode enum +func RouteTableAssociationStateCode_Values() []string { + return []string{ + RouteTableAssociationStateCodeAssociating, + RouteTableAssociationStateCodeAssociated, + RouteTableAssociationStateCodeDisassociating, + RouteTableAssociationStateCodeDisassociated, + RouteTableAssociationStateCodeFailed, + } +} + const ( // RuleActionAllow is a RuleAction enum value RuleActionAllow = "allow" @@ -111362,6 +119353,14 @@ const ( RuleActionDeny = "deny" ) +// RuleAction_Values returns all elements of the RuleAction enum +func RuleAction_Values() []string { + return []string{ + RuleActionAllow, + RuleActionDeny, + } +} + const ( // ScopeAvailabilityZone is a Scope enum value ScopeAvailabilityZone = "Availability Zone" @@ -111370,6 +119369,14 @@ const ( ScopeRegion = "Region" ) +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeAvailabilityZone, + ScopeRegion, + } +} + const ( // ServiceStatePending is a ServiceState enum value ServiceStatePending = "Pending" @@ -111387,6 +119394,17 @@ const ( ServiceStateFailed = "Failed" ) +// ServiceState_Values returns all elements of the ServiceState enum +func ServiceState_Values() []string { + return []string{ + ServiceStatePending, + ServiceStateAvailable, + ServiceStateDeleting, + ServiceStateDeleted, + ServiceStateFailed, + } +} + const ( // ServiceTypeInterface is a ServiceType enum value ServiceTypeInterface = "Interface" @@ -111395,6 +119413,14 @@ const ( ServiceTypeGateway = "Gateway" ) +// ServiceType_Values returns all elements of the ServiceType enum +func ServiceType_Values() []string { + return []string{ + ServiceTypeInterface, + ServiceTypeGateway, + } +} + const ( // ShutdownBehaviorStop is a ShutdownBehavior enum value ShutdownBehaviorStop = "stop" @@ -111403,6 +119429,14 @@ const ( ShutdownBehaviorTerminate = "terminate" ) +// ShutdownBehavior_Values returns all elements of the ShutdownBehavior enum +func ShutdownBehavior_Values() []string { + return []string{ + ShutdownBehaviorStop, + ShutdownBehaviorTerminate, + } +} + const ( // SnapshotAttributeNameProductCodes is a SnapshotAttributeName enum value SnapshotAttributeNameProductCodes = "productCodes" @@ -111411,6 +119445,14 @@ const ( SnapshotAttributeNameCreateVolumePermission = "createVolumePermission" ) +// SnapshotAttributeName_Values returns all elements of the SnapshotAttributeName enum +func SnapshotAttributeName_Values() []string { + return []string{ + SnapshotAttributeNameProductCodes, + SnapshotAttributeNameCreateVolumePermission, + } +} + const ( // SnapshotStatePending is a SnapshotState enum value SnapshotStatePending = "pending" @@ -111422,6 +119464,15 @@ const ( SnapshotStateError = "error" ) +// SnapshotState_Values returns all elements of the SnapshotState enum +func SnapshotState_Values() []string { + return []string{ + SnapshotStatePending, + SnapshotStateCompleted, + SnapshotStateError, + } +} + const ( // SpotAllocationStrategyLowestPrice is a SpotAllocationStrategy enum value SpotAllocationStrategyLowestPrice = "lowest-price" @@ -111433,6 +119484,15 @@ const ( SpotAllocationStrategyCapacityOptimized = "capacity-optimized" ) +// SpotAllocationStrategy_Values returns all elements of the SpotAllocationStrategy enum +func SpotAllocationStrategy_Values() []string { + return []string{ + SpotAllocationStrategyLowestPrice, + SpotAllocationStrategyDiversified, + SpotAllocationStrategyCapacityOptimized, + } +} + const ( // SpotInstanceInterruptionBehaviorHibernate is a SpotInstanceInterruptionBehavior enum value SpotInstanceInterruptionBehaviorHibernate = "hibernate" @@ -111444,6 +119504,15 @@ const ( SpotInstanceInterruptionBehaviorTerminate = "terminate" ) +// SpotInstanceInterruptionBehavior_Values returns all elements of the SpotInstanceInterruptionBehavior enum +func SpotInstanceInterruptionBehavior_Values() []string { + return []string{ + SpotInstanceInterruptionBehaviorHibernate, + SpotInstanceInterruptionBehaviorStop, + SpotInstanceInterruptionBehaviorTerminate, + } +} + const ( // SpotInstanceStateOpen is a SpotInstanceState enum value SpotInstanceStateOpen = "open" @@ -111461,6 +119530,17 @@ const ( SpotInstanceStateFailed = "failed" ) +// SpotInstanceState_Values returns all elements of the SpotInstanceState enum +func SpotInstanceState_Values() []string { + return []string{ + SpotInstanceStateOpen, + SpotInstanceStateActive, + SpotInstanceStateClosed, + SpotInstanceStateCancelled, + SpotInstanceStateFailed, + } +} + const ( // SpotInstanceTypeOneTime is a SpotInstanceType enum value SpotInstanceTypeOneTime = "one-time" @@ -111469,6 +119549,14 @@ const ( SpotInstanceTypePersistent = "persistent" ) +// SpotInstanceType_Values returns all elements of the SpotInstanceType enum +func SpotInstanceType_Values() []string { + return []string{ + SpotInstanceTypeOneTime, + SpotInstanceTypePersistent, + } +} + const ( // StatePendingAcceptance is a State enum value StatePendingAcceptance = "PendingAcceptance" @@ -111495,6 +119583,20 @@ const ( StateExpired = "Expired" ) +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StatePendingAcceptance, + StatePending, + StateAvailable, + StateDeleting, + StateDeleted, + StateRejected, + StateFailed, + StateExpired, + } +} + const ( // StatusMoveInProgress is a Status enum value StatusMoveInProgress = "MoveInProgress" @@ -111506,11 +119608,27 @@ const ( StatusInClassic = "InClassic" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusMoveInProgress, + StatusInVpc, + StatusInClassic, + } +} + const ( // StatusNameReachability is a StatusName enum value StatusNameReachability = "reachability" ) +// StatusName_Values returns all elements of the StatusName enum +func StatusName_Values() []string { + return []string{ + StatusNameReachability, + } +} + const ( // StatusTypePassed is a StatusType enum value StatusTypePassed = "passed" @@ -111525,6 +119643,16 @@ const ( StatusTypeInitializing = "initializing" ) +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypePassed, + StatusTypeFailed, + StatusTypeInsufficientData, + StatusTypeInitializing, + } +} + const ( // SubnetCidrBlockStateCodeAssociating is a SubnetCidrBlockStateCode enum value SubnetCidrBlockStateCodeAssociating = "associating" @@ -111545,6 +119673,18 @@ const ( SubnetCidrBlockStateCodeFailed = "failed" ) +// SubnetCidrBlockStateCode_Values returns all elements of the SubnetCidrBlockStateCode enum +func SubnetCidrBlockStateCode_Values() []string { + return []string{ + SubnetCidrBlockStateCodeAssociating, + SubnetCidrBlockStateCodeAssociated, + SubnetCidrBlockStateCodeDisassociating, + SubnetCidrBlockStateCodeDisassociated, + SubnetCidrBlockStateCodeFailing, + SubnetCidrBlockStateCodeFailed, + } +} + const ( // SubnetStatePending is a SubnetState enum value SubnetStatePending = "pending" @@ -111553,6 +119693,14 @@ const ( SubnetStateAvailable = "available" ) +// SubnetState_Values returns all elements of the SubnetState enum +func SubnetState_Values() []string { + return []string{ + SubnetStatePending, + SubnetStateAvailable, + } +} + const ( // SummaryStatusOk is a SummaryStatus enum value SummaryStatusOk = "ok" @@ -111570,6 +119718,17 @@ const ( SummaryStatusInitializing = "initializing" ) +// SummaryStatus_Values returns all elements of the SummaryStatus enum +func SummaryStatus_Values() []string { + return []string{ + SummaryStatusOk, + SummaryStatusImpaired, + SummaryStatusInsufficientData, + SummaryStatusNotApplicable, + SummaryStatusInitializing, + } +} + const ( // TelemetryStatusUp is a TelemetryStatus enum value TelemetryStatusUp = "UP" @@ -111578,6 +119737,14 @@ const ( TelemetryStatusDown = "DOWN" ) +// TelemetryStatus_Values returns all elements of the TelemetryStatus enum +func TelemetryStatus_Values() []string { + return []string{ + TelemetryStatusUp, + TelemetryStatusDown, + } +} + const ( // TenancyDefault is a Tenancy enum value TenancyDefault = "default" @@ -111589,6 +119756,15 @@ const ( TenancyHost = "host" ) +// Tenancy_Values returns all elements of the Tenancy enum +func Tenancy_Values() []string { + return []string{ + TenancyDefault, + TenancyDedicated, + TenancyHost, + } +} + const ( // TrafficDirectionIngress is a TrafficDirection enum value TrafficDirectionIngress = "ingress" @@ -111597,6 +119773,14 @@ const ( TrafficDirectionEgress = "egress" ) +// TrafficDirection_Values returns all elements of the TrafficDirection enum +func TrafficDirection_Values() []string { + return []string{ + TrafficDirectionIngress, + TrafficDirectionEgress, + } +} + const ( // TrafficMirrorFilterRuleFieldDestinationPortRange is a TrafficMirrorFilterRuleField enum value TrafficMirrorFilterRuleFieldDestinationPortRange = "destination-port-range" @@ -111611,11 +119795,28 @@ const ( TrafficMirrorFilterRuleFieldDescription = "description" ) +// TrafficMirrorFilterRuleField_Values returns all elements of the TrafficMirrorFilterRuleField enum +func TrafficMirrorFilterRuleField_Values() []string { + return []string{ + TrafficMirrorFilterRuleFieldDestinationPortRange, + TrafficMirrorFilterRuleFieldSourcePortRange, + TrafficMirrorFilterRuleFieldProtocol, + TrafficMirrorFilterRuleFieldDescription, + } +} + const ( // TrafficMirrorNetworkServiceAmazonDns is a TrafficMirrorNetworkService enum value TrafficMirrorNetworkServiceAmazonDns = "amazon-dns" ) +// TrafficMirrorNetworkService_Values returns all elements of the TrafficMirrorNetworkService enum +func TrafficMirrorNetworkService_Values() []string { + return []string{ + TrafficMirrorNetworkServiceAmazonDns, + } +} + const ( // TrafficMirrorRuleActionAccept is a TrafficMirrorRuleAction enum value TrafficMirrorRuleActionAccept = "accept" @@ -111624,6 +119825,14 @@ const ( TrafficMirrorRuleActionReject = "reject" ) +// TrafficMirrorRuleAction_Values returns all elements of the TrafficMirrorRuleAction enum +func TrafficMirrorRuleAction_Values() []string { + return []string{ + TrafficMirrorRuleActionAccept, + TrafficMirrorRuleActionReject, + } +} + const ( // TrafficMirrorSessionFieldPacketLength is a TrafficMirrorSessionField enum value TrafficMirrorSessionFieldPacketLength = "packet-length" @@ -111635,6 +119844,15 @@ const ( TrafficMirrorSessionFieldVirtualNetworkId = "virtual-network-id" ) +// TrafficMirrorSessionField_Values returns all elements of the TrafficMirrorSessionField enum +func TrafficMirrorSessionField_Values() []string { + return []string{ + TrafficMirrorSessionFieldPacketLength, + TrafficMirrorSessionFieldDescription, + TrafficMirrorSessionFieldVirtualNetworkId, + } +} + const ( // TrafficMirrorTargetTypeNetworkInterface is a TrafficMirrorTargetType enum value TrafficMirrorTargetTypeNetworkInterface = "network-interface" @@ -111643,6 +119861,14 @@ const ( TrafficMirrorTargetTypeNetworkLoadBalancer = "network-load-balancer" ) +// TrafficMirrorTargetType_Values returns all elements of the TrafficMirrorTargetType enum +func TrafficMirrorTargetType_Values() []string { + return []string{ + TrafficMirrorTargetTypeNetworkInterface, + TrafficMirrorTargetTypeNetworkLoadBalancer, + } +} + const ( // TrafficTypeAccept is a TrafficType enum value TrafficTypeAccept = "ACCEPT" @@ -111654,6 +119880,15 @@ const ( TrafficTypeAll = "ALL" ) +// TrafficType_Values returns all elements of the TrafficType enum +func TrafficType_Values() []string { + return []string{ + TrafficTypeAccept, + TrafficTypeReject, + TrafficTypeAll, + } +} + const ( // TransitGatewayAssociationStateAssociating is a TransitGatewayAssociationState enum value TransitGatewayAssociationStateAssociating = "associating" @@ -111668,6 +119903,16 @@ const ( TransitGatewayAssociationStateDisassociated = "disassociated" ) +// TransitGatewayAssociationState_Values returns all elements of the TransitGatewayAssociationState enum +func TransitGatewayAssociationState_Values() []string { + return []string{ + TransitGatewayAssociationStateAssociating, + TransitGatewayAssociationStateAssociated, + TransitGatewayAssociationStateDisassociating, + TransitGatewayAssociationStateDisassociated, + } +} + const ( // TransitGatewayAttachmentResourceTypeVpc is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeVpc = "vpc" @@ -111678,14 +119923,31 @@ const ( // TransitGatewayAttachmentResourceTypeDirectConnectGateway is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeDirectConnectGateway = "direct-connect-gateway" + // TransitGatewayAttachmentResourceTypePeering is a TransitGatewayAttachmentResourceType enum value + TransitGatewayAttachmentResourceTypePeering = "peering" + // TransitGatewayAttachmentResourceTypeTgwPeering is a TransitGatewayAttachmentResourceType enum value TransitGatewayAttachmentResourceTypeTgwPeering = "tgw-peering" ) +// TransitGatewayAttachmentResourceType_Values returns all elements of the TransitGatewayAttachmentResourceType enum +func TransitGatewayAttachmentResourceType_Values() []string { + return []string{ + TransitGatewayAttachmentResourceTypeVpc, + TransitGatewayAttachmentResourceTypeVpn, + TransitGatewayAttachmentResourceTypeDirectConnectGateway, + TransitGatewayAttachmentResourceTypePeering, + TransitGatewayAttachmentResourceTypeTgwPeering, + } +} + const ( // TransitGatewayAttachmentStateInitiating is a TransitGatewayAttachmentState enum value TransitGatewayAttachmentStateInitiating = "initiating" + // TransitGatewayAttachmentStateInitiatingRequest is a TransitGatewayAttachmentState enum value + TransitGatewayAttachmentStateInitiatingRequest = "initiatingRequest" + // TransitGatewayAttachmentStatePendingAcceptance is a TransitGatewayAttachmentState enum value TransitGatewayAttachmentStatePendingAcceptance = "pendingAcceptance" @@ -111720,6 +119982,25 @@ const ( TransitGatewayAttachmentStateFailing = "failing" ) +// TransitGatewayAttachmentState_Values returns all elements of the TransitGatewayAttachmentState enum +func TransitGatewayAttachmentState_Values() []string { + return []string{ + TransitGatewayAttachmentStateInitiating, + TransitGatewayAttachmentStateInitiatingRequest, + TransitGatewayAttachmentStatePendingAcceptance, + TransitGatewayAttachmentStateRollingBack, + TransitGatewayAttachmentStatePending, + TransitGatewayAttachmentStateAvailable, + TransitGatewayAttachmentStateModifying, + TransitGatewayAttachmentStateDeleting, + TransitGatewayAttachmentStateDeleted, + TransitGatewayAttachmentStateFailed, + TransitGatewayAttachmentStateRejected, + TransitGatewayAttachmentStateRejecting, + TransitGatewayAttachmentStateFailing, + } +} + const ( // TransitGatewayMulitcastDomainAssociationStateAssociating is a TransitGatewayMulitcastDomainAssociationState enum value TransitGatewayMulitcastDomainAssociationStateAssociating = "associating" @@ -111734,6 +120015,16 @@ const ( TransitGatewayMulitcastDomainAssociationStateDisassociated = "disassociated" ) +// TransitGatewayMulitcastDomainAssociationState_Values returns all elements of the TransitGatewayMulitcastDomainAssociationState enum +func TransitGatewayMulitcastDomainAssociationState_Values() []string { + return []string{ + TransitGatewayMulitcastDomainAssociationStateAssociating, + TransitGatewayMulitcastDomainAssociationStateAssociated, + TransitGatewayMulitcastDomainAssociationStateDisassociating, + TransitGatewayMulitcastDomainAssociationStateDisassociated, + } +} + const ( // TransitGatewayMulticastDomainStatePending is a TransitGatewayMulticastDomainState enum value TransitGatewayMulticastDomainStatePending = "pending" @@ -111748,6 +120039,40 @@ const ( TransitGatewayMulticastDomainStateDeleted = "deleted" ) +// TransitGatewayMulticastDomainState_Values returns all elements of the TransitGatewayMulticastDomainState enum +func TransitGatewayMulticastDomainState_Values() []string { + return []string{ + TransitGatewayMulticastDomainStatePending, + TransitGatewayMulticastDomainStateAvailable, + TransitGatewayMulticastDomainStateDeleting, + TransitGatewayMulticastDomainStateDeleted, + } +} + +const ( + // TransitGatewayPrefixListReferenceStatePending is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStatePending = "pending" + + // TransitGatewayPrefixListReferenceStateAvailable is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateAvailable = "available" + + // TransitGatewayPrefixListReferenceStateModifying is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateModifying = "modifying" + + // TransitGatewayPrefixListReferenceStateDeleting is a TransitGatewayPrefixListReferenceState enum value + TransitGatewayPrefixListReferenceStateDeleting = "deleting" +) + +// TransitGatewayPrefixListReferenceState_Values returns all elements of the TransitGatewayPrefixListReferenceState enum +func TransitGatewayPrefixListReferenceState_Values() []string { + return []string{ + TransitGatewayPrefixListReferenceStatePending, + TransitGatewayPrefixListReferenceStateAvailable, + TransitGatewayPrefixListReferenceStateModifying, + TransitGatewayPrefixListReferenceStateDeleting, + } +} + const ( // TransitGatewayPropagationStateEnabling is a TransitGatewayPropagationState enum value TransitGatewayPropagationStateEnabling = "enabling" @@ -111762,6 +120087,16 @@ const ( TransitGatewayPropagationStateDisabled = "disabled" ) +// TransitGatewayPropagationState_Values returns all elements of the TransitGatewayPropagationState enum +func TransitGatewayPropagationState_Values() []string { + return []string{ + TransitGatewayPropagationStateEnabling, + TransitGatewayPropagationStateEnabled, + TransitGatewayPropagationStateDisabling, + TransitGatewayPropagationStateDisabled, + } +} + const ( // TransitGatewayRouteStatePending is a TransitGatewayRouteState enum value TransitGatewayRouteStatePending = "pending" @@ -111779,6 +120114,17 @@ const ( TransitGatewayRouteStateDeleted = "deleted" ) +// TransitGatewayRouteState_Values returns all elements of the TransitGatewayRouteState enum +func TransitGatewayRouteState_Values() []string { + return []string{ + TransitGatewayRouteStatePending, + TransitGatewayRouteStateActive, + TransitGatewayRouteStateBlackhole, + TransitGatewayRouteStateDeleting, + TransitGatewayRouteStateDeleted, + } +} + const ( // TransitGatewayRouteTableStatePending is a TransitGatewayRouteTableState enum value TransitGatewayRouteTableStatePending = "pending" @@ -111793,6 +120139,16 @@ const ( TransitGatewayRouteTableStateDeleted = "deleted" ) +// TransitGatewayRouteTableState_Values returns all elements of the TransitGatewayRouteTableState enum +func TransitGatewayRouteTableState_Values() []string { + return []string{ + TransitGatewayRouteTableStatePending, + TransitGatewayRouteTableStateAvailable, + TransitGatewayRouteTableStateDeleting, + TransitGatewayRouteTableStateDeleted, + } +} + const ( // TransitGatewayRouteTypeStatic is a TransitGatewayRouteType enum value TransitGatewayRouteTypeStatic = "static" @@ -111801,6 +120157,14 @@ const ( TransitGatewayRouteTypePropagated = "propagated" ) +// TransitGatewayRouteType_Values returns all elements of the TransitGatewayRouteType enum +func TransitGatewayRouteType_Values() []string { + return []string{ + TransitGatewayRouteTypeStatic, + TransitGatewayRouteTypePropagated, + } +} + const ( // TransitGatewayStatePending is a TransitGatewayState enum value TransitGatewayStatePending = "pending" @@ -111818,6 +120182,17 @@ const ( TransitGatewayStateDeleted = "deleted" ) +// TransitGatewayState_Values returns all elements of the TransitGatewayState enum +func TransitGatewayState_Values() []string { + return []string{ + TransitGatewayStatePending, + TransitGatewayStateAvailable, + TransitGatewayStateModifying, + TransitGatewayStateDeleting, + TransitGatewayStateDeleted, + } +} + const ( // TransportProtocolTcp is a TransportProtocol enum value TransportProtocolTcp = "tcp" @@ -111826,6 +120201,30 @@ const ( TransportProtocolUdp = "udp" ) +// TransportProtocol_Values returns all elements of the TransportProtocol enum +func TransportProtocol_Values() []string { + return []string{ + TransportProtocolTcp, + TransportProtocolUdp, + } +} + +const ( + // TunnelInsideIpVersionIpv4 is a TunnelInsideIpVersion enum value + TunnelInsideIpVersionIpv4 = "ipv4" + + // TunnelInsideIpVersionIpv6 is a TunnelInsideIpVersion enum value + TunnelInsideIpVersionIpv6 = "ipv6" +) + +// TunnelInsideIpVersion_Values returns all elements of the TunnelInsideIpVersion enum +func TunnelInsideIpVersion_Values() []string { + return []string{ + TunnelInsideIpVersionIpv4, + TunnelInsideIpVersionIpv6, + } +} + const ( // UnlimitedSupportedInstanceFamilyT2 is a UnlimitedSupportedInstanceFamily enum value UnlimitedSupportedInstanceFamilyT2 = "t2" @@ -111835,8 +120234,21 @@ const ( // UnlimitedSupportedInstanceFamilyT3a is a UnlimitedSupportedInstanceFamily enum value UnlimitedSupportedInstanceFamilyT3a = "t3a" + + // UnlimitedSupportedInstanceFamilyT4g is a UnlimitedSupportedInstanceFamily enum value + UnlimitedSupportedInstanceFamilyT4g = "t4g" ) +// UnlimitedSupportedInstanceFamily_Values returns all elements of the UnlimitedSupportedInstanceFamily enum +func UnlimitedSupportedInstanceFamily_Values() []string { + return []string{ + UnlimitedSupportedInstanceFamilyT2, + UnlimitedSupportedInstanceFamilyT3, + UnlimitedSupportedInstanceFamilyT3a, + UnlimitedSupportedInstanceFamilyT4g, + } +} + const ( // UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed is a UnsuccessfulInstanceCreditSpecificationErrorCode enum value UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed = "InvalidInstanceID.Malformed" @@ -111851,6 +120263,16 @@ const ( UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported = "InstanceCreditSpecification.NotSupported" ) +// UnsuccessfulInstanceCreditSpecificationErrorCode_Values returns all elements of the UnsuccessfulInstanceCreditSpecificationErrorCode enum +func UnsuccessfulInstanceCreditSpecificationErrorCode_Values() []string { + return []string{ + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdMalformed, + UnsuccessfulInstanceCreditSpecificationErrorCodeInvalidInstanceIdNotFound, + UnsuccessfulInstanceCreditSpecificationErrorCodeIncorrectInstanceState, + UnsuccessfulInstanceCreditSpecificationErrorCodeInstanceCreditSpecificationNotSupported, + } +} + const ( // UsageClassTypeSpot is a UsageClassType enum value UsageClassTypeSpot = "spot" @@ -111859,6 +120281,14 @@ const ( UsageClassTypeOnDemand = "on-demand" ) +// UsageClassType_Values returns all elements of the UsageClassType enum +func UsageClassType_Values() []string { + return []string{ + UsageClassTypeSpot, + UsageClassTypeOnDemand, + } +} + const ( // VirtualizationTypeHvm is a VirtualizationType enum value VirtualizationTypeHvm = "hvm" @@ -111867,6 +120297,14 @@ const ( VirtualizationTypeParavirtual = "paravirtual" ) +// VirtualizationType_Values returns all elements of the VirtualizationType enum +func VirtualizationType_Values() []string { + return []string{ + VirtualizationTypeHvm, + VirtualizationTypeParavirtual, + } +} + const ( // VolumeAttachmentStateAttaching is a VolumeAttachmentState enum value VolumeAttachmentStateAttaching = "attaching" @@ -111884,6 +120322,17 @@ const ( VolumeAttachmentStateBusy = "busy" ) +// VolumeAttachmentState_Values returns all elements of the VolumeAttachmentState enum +func VolumeAttachmentState_Values() []string { + return []string{ + VolumeAttachmentStateAttaching, + VolumeAttachmentStateAttached, + VolumeAttachmentStateDetaching, + VolumeAttachmentStateDetached, + VolumeAttachmentStateBusy, + } +} + const ( // VolumeAttributeNameAutoEnableIo is a VolumeAttributeName enum value VolumeAttributeNameAutoEnableIo = "autoEnableIO" @@ -111892,6 +120341,14 @@ const ( VolumeAttributeNameProductCodes = "productCodes" ) +// VolumeAttributeName_Values returns all elements of the VolumeAttributeName enum +func VolumeAttributeName_Values() []string { + return []string{ + VolumeAttributeNameAutoEnableIo, + VolumeAttributeNameProductCodes, + } +} + const ( // VolumeModificationStateModifying is a VolumeModificationState enum value VolumeModificationStateModifying = "modifying" @@ -111906,6 +120363,16 @@ const ( VolumeModificationStateFailed = "failed" ) +// VolumeModificationState_Values returns all elements of the VolumeModificationState enum +func VolumeModificationState_Values() []string { + return []string{ + VolumeModificationStateModifying, + VolumeModificationStateOptimizing, + VolumeModificationStateCompleted, + VolumeModificationStateFailed, + } +} + const ( // VolumeStateCreating is a VolumeState enum value VolumeStateCreating = "creating" @@ -111926,6 +120393,18 @@ const ( VolumeStateError = "error" ) +// VolumeState_Values returns all elements of the VolumeState enum +func VolumeState_Values() []string { + return []string{ + VolumeStateCreating, + VolumeStateAvailable, + VolumeStateInUse, + VolumeStateDeleting, + VolumeStateDeleted, + VolumeStateError, + } +} + const ( // VolumeStatusInfoStatusOk is a VolumeStatusInfoStatus enum value VolumeStatusInfoStatusOk = "ok" @@ -111937,6 +120416,15 @@ const ( VolumeStatusInfoStatusInsufficientData = "insufficient-data" ) +// VolumeStatusInfoStatus_Values returns all elements of the VolumeStatusInfoStatus enum +func VolumeStatusInfoStatus_Values() []string { + return []string{ + VolumeStatusInfoStatusOk, + VolumeStatusInfoStatusImpaired, + VolumeStatusInfoStatusInsufficientData, + } +} + const ( // VolumeStatusNameIoEnabled is a VolumeStatusName enum value VolumeStatusNameIoEnabled = "io-enabled" @@ -111945,6 +120433,14 @@ const ( VolumeStatusNameIoPerformance = "io-performance" ) +// VolumeStatusName_Values returns all elements of the VolumeStatusName enum +func VolumeStatusName_Values() []string { + return []string{ + VolumeStatusNameIoEnabled, + VolumeStatusNameIoPerformance, + } +} + const ( // VolumeTypeStandard is a VolumeType enum value VolumeTypeStandard = "standard" @@ -111952,6 +120448,9 @@ const ( // VolumeTypeIo1 is a VolumeType enum value VolumeTypeIo1 = "io1" + // VolumeTypeIo2 is a VolumeType enum value + VolumeTypeIo2 = "io2" + // VolumeTypeGp2 is a VolumeType enum value VolumeTypeGp2 = "gp2" @@ -111962,6 +120461,18 @@ const ( VolumeTypeSt1 = "st1" ) +// VolumeType_Values returns all elements of the VolumeType enum +func VolumeType_Values() []string { + return []string{ + VolumeTypeStandard, + VolumeTypeIo1, + VolumeTypeIo2, + VolumeTypeGp2, + VolumeTypeSc1, + VolumeTypeSt1, + } +} + const ( // VpcAttributeNameEnableDnsSupport is a VpcAttributeName enum value VpcAttributeNameEnableDnsSupport = "enableDnsSupport" @@ -111970,6 +120481,14 @@ const ( VpcAttributeNameEnableDnsHostnames = "enableDnsHostnames" ) +// VpcAttributeName_Values returns all elements of the VpcAttributeName enum +func VpcAttributeName_Values() []string { + return []string{ + VpcAttributeNameEnableDnsSupport, + VpcAttributeNameEnableDnsHostnames, + } +} + const ( // VpcCidrBlockStateCodeAssociating is a VpcCidrBlockStateCode enum value VpcCidrBlockStateCodeAssociating = "associating" @@ -111990,6 +120509,18 @@ const ( VpcCidrBlockStateCodeFailed = "failed" ) +// VpcCidrBlockStateCode_Values returns all elements of the VpcCidrBlockStateCode enum +func VpcCidrBlockStateCode_Values() []string { + return []string{ + VpcCidrBlockStateCodeAssociating, + VpcCidrBlockStateCodeAssociated, + VpcCidrBlockStateCodeDisassociating, + VpcCidrBlockStateCodeDisassociated, + VpcCidrBlockStateCodeFailing, + VpcCidrBlockStateCodeFailed, + } +} + const ( // VpcEndpointTypeInterface is a VpcEndpointType enum value VpcEndpointTypeInterface = "Interface" @@ -111998,6 +120529,14 @@ const ( VpcEndpointTypeGateway = "Gateway" ) +// VpcEndpointType_Values returns all elements of the VpcEndpointType enum +func VpcEndpointType_Values() []string { + return []string{ + VpcEndpointTypeInterface, + VpcEndpointTypeGateway, + } +} + const ( // VpcPeeringConnectionStateReasonCodeInitiatingRequest is a VpcPeeringConnectionStateReasonCode enum value VpcPeeringConnectionStateReasonCodeInitiatingRequest = "initiating-request" @@ -112027,6 +120566,21 @@ const ( VpcPeeringConnectionStateReasonCodeDeleting = "deleting" ) +// VpcPeeringConnectionStateReasonCode_Values returns all elements of the VpcPeeringConnectionStateReasonCode enum +func VpcPeeringConnectionStateReasonCode_Values() []string { + return []string{ + VpcPeeringConnectionStateReasonCodeInitiatingRequest, + VpcPeeringConnectionStateReasonCodePendingAcceptance, + VpcPeeringConnectionStateReasonCodeActive, + VpcPeeringConnectionStateReasonCodeDeleted, + VpcPeeringConnectionStateReasonCodeRejected, + VpcPeeringConnectionStateReasonCodeFailed, + VpcPeeringConnectionStateReasonCodeExpired, + VpcPeeringConnectionStateReasonCodeProvisioning, + VpcPeeringConnectionStateReasonCodeDeleting, + } +} + const ( // VpcStatePending is a VpcState enum value VpcStatePending = "pending" @@ -112035,11 +120589,26 @@ const ( VpcStateAvailable = "available" ) +// VpcState_Values returns all elements of the VpcState enum +func VpcState_Values() []string { + return []string{ + VpcStatePending, + VpcStateAvailable, + } +} + const ( // VpcTenancyDefault is a VpcTenancy enum value VpcTenancyDefault = "default" ) +// VpcTenancy_Values returns all elements of the VpcTenancy enum +func VpcTenancy_Values() []string { + return []string{ + VpcTenancyDefault, + } +} + const ( // VpnEcmpSupportValueEnable is a VpnEcmpSupportValue enum value VpnEcmpSupportValueEnable = "enable" @@ -112048,11 +120617,26 @@ const ( VpnEcmpSupportValueDisable = "disable" ) +// VpnEcmpSupportValue_Values returns all elements of the VpnEcmpSupportValue enum +func VpnEcmpSupportValue_Values() []string { + return []string{ + VpnEcmpSupportValueEnable, + VpnEcmpSupportValueDisable, + } +} + const ( // VpnProtocolOpenvpn is a VpnProtocol enum value VpnProtocolOpenvpn = "openvpn" ) +// VpnProtocol_Values returns all elements of the VpnProtocol enum +func VpnProtocol_Values() []string { + return []string{ + VpnProtocolOpenvpn, + } +} + const ( // VpnStatePending is a VpnState enum value VpnStatePending = "pending" @@ -112067,7 +120651,24 @@ const ( VpnStateDeleted = "deleted" ) +// VpnState_Values returns all elements of the VpnState enum +func VpnState_Values() []string { + return []string{ + VpnStatePending, + VpnStateAvailable, + VpnStateDeleting, + VpnStateDeleted, + } +} + const ( // VpnStaticRouteSourceStatic is a VpnStaticRouteSource enum value VpnStaticRouteSourceStatic = "Static" ) + +// VpnStaticRouteSource_Values returns all elements of the VpnStaticRouteSource enum +func VpnStaticRouteSource_Values() []string { + return []string{ + VpnStaticRouteSourceStatic, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index efec8d8a9..3ad305918 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -26,8 +26,12 @@ func init() { // only set the retryer on request if config doesn't have a retryer if r.Config.Retryer == nil && (r.Operation.Name == opModifyNetworkInterfaceAttribute || r.Operation.Name == opAssignPrivateIpAddresses) { + maxRetries := client.DefaultRetryerMaxNumRetries + if m := r.Config.MaxRetries; m != nil && *m != aws.UseServiceDefaultRetries { + maxRetries = *m + } r.Retryer = client.DefaultRetryer{ - NumMaxRetries: client.DefaultRetryerMaxNumRetries, + NumMaxRetries: maxRetries, MinRetryDelay: customRetryerMinRetryDelay, MinThrottleDelay: customRetryerMinRetryDelay, MaxRetryDelay: customRetryerMaxRetryDelay, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index c80352a13..1bde2c2f5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/ec2query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index 2a5e6e3cd..462b049cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -60,14 +60,12 @@ func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabil // Checks the availability of one or more image layers in a repository. // // When an image is pushed to a repository, each image layer is checked to verify -// if it has been uploaded before. If it is, then the image layer is skipped. +// if it has been uploaded before. If it has been uploaded, then the image layer +// is skipped. // -// When an image is pulled from a repository, each image layer is checked once -// to verify it is available to be pulled. -// -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -347,9 +345,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // When an image is pushed, the CompleteLayerUpload API is called once per each // new image layer to verify that the upload has completed. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -371,7 +369,7 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // repository and ensure that you are performing operations on the correct registry. // // * UploadNotFoundException -// The upload could not be found, or the specified upload id is not valid for +// The upload could not be found, or the specified upload ID is not valid for // this repository. // // * InvalidLayerException @@ -387,6 +385,9 @@ func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req * // * EmptyUploadException // The specified layer upload does not contain any layer parts. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CompleteLayerUpload func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLayerUploadOutput, error) { req, out := c.CompleteLayerUploadRequest(input) @@ -485,10 +486,12 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // // * LimitExceededException // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/CreateRepository func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryOutput, error) { req, out := c.CreateRepositoryRequest(input) @@ -672,6 +675,9 @@ func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *reques // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DeleteRepository func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryOutput, error) { req, out := c.DeleteRepositoryRequest(input) @@ -1375,11 +1381,11 @@ func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) // layer. You can only get URLs for image layers that are referenced in an image. // // When an image is pulled, the GetDownloadUrlForLayer API is called once per -// image layer. +// image layer that is not already cached. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1807,12 +1813,12 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req * // Notifies Amazon ECR that you intend to upload an image layer. // // When an image is pushed, the InitiateLayerUpload API is called once per image -// layer that has not already been uploaded. Whether an image layer has been -// uploaded before is determined by the BatchCheckLayerAvailability API action. +// layer that has not already been uploaded. Whether or not an image layer has +// been uploaded is determined by the BatchCheckLayerAvailability API action. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1833,6 +1839,9 @@ func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req * // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/InitiateLayerUpload func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLayerUploadOutput, error) { req, out := c.InitiateLayerUploadRequest(input) @@ -2141,12 +2150,12 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // Creates or updates the image manifest and tags associated with an image. // // When an image is pushed and all new image layers have been uploaded, the -// PutImage API is called once to create or update the image manifest and tags -// associated with the image. +// PutImage API is called once to create or update the image manifest and the +// tags associated with the image. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2175,16 +2184,25 @@ func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, outpu // The specified layers could not be found, or the specified layer is not valid // for this repository. // +// * ReferencedImagesNotFoundException +// The manifest list is referencing an image that does not exist. +// // * LimitExceededException // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // // * ImageTagAlreadyExistsException // The specified image is tagged with a tag that already exists. The repository // is configured for tag immutability. // +// * ImageDigestDoesNotMatchException +// The specified image digest does not match the digest that Amazon ECR calculated +// for the image. +// +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImage func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { req, out := c.PutImageRequest(input) @@ -2516,7 +2534,7 @@ func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req * // SetRepositoryPolicy API operation for Amazon EC2 Container Registry. // // Applies a repository policy to the specified repository to control access -// permissions. For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicies.html) +// permissions. For more information, see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policies.html) // in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2624,6 +2642,14 @@ func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Re // The specified parameter is invalid. Review the available parameters for the // API request. // +// * UnsupportedImageTypeException +// The image is of a type that cannot be scanned. +// +// * LimitExceededException +// The operation did not succeed because it would have exceeded a service limit +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) +// in the Amazon Elastic Container Registry User Guide. +// // * RepositoryNotFoundException // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. @@ -2724,8 +2750,8 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // The lifecycle policy could not be found, and no policy is set to the repository. // // * LifecyclePolicyPreviewInProgressException -// The previous lifecycle policy preview request has not completed. Please try -// again later. +// The previous lifecycle policy preview request has not completed. Wait and +// try again. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { @@ -2994,9 +3020,9 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // size of each image layer part can be 20971520 bytes (or about 20MB). The // UploadLayerPart API is called once per each new image layer part. // -// This operation is used by the Amazon ECR proxy, and it is not intended for -// general use by customers for pulling and pushing images. In most cases, you -// should use the docker CLI to pull, tag, and push images. +// This operation is used by the Amazon ECR proxy and is not generally used +// by customers for pulling and pushing images. In most cases, you should use +// the docker CLI to pull, tag, and push images. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3022,15 +3048,17 @@ func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request. // repository and ensure that you are performing operations on the correct registry. // // * UploadNotFoundException -// The upload could not be found, or the specified upload id is not valid for +// The upload could not be found, or the specified upload ID is not valid for // this repository. // // * LimitExceededException // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. // +// * KmsException +// The operation failed due to a KMS exception. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/UploadLayerPart func (c *ECR) UploadLayerPart(input *UploadLayerPartInput) (*UploadLayerPartOutput, error) { req, out := c.UploadLayerPartRequest(input) @@ -3611,9 +3639,12 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu type CreateRepositoryInput struct { _ struct{} `type:"structure"` - // The image scanning configuration for the repository. This setting determines - // whether images are scanned for known vulnerabilities after being pushed to - // the repository. + // The encryption configuration for the repository. This determines how the + // contents of your repository are encrypted at rest. + EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"` + + // The image scanning configuration for the repository. This determines whether + // images are scanned for known vulnerabilities after being pushed to the repository. ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` // The tag mutability setting for the repository. If this parameter is omitted, @@ -3655,6 +3686,11 @@ func (s *CreateRepositoryInput) Validate() error { if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) } + if s.EncryptionConfiguration != nil { + if err := s.EncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("EncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3662,6 +3698,12 @@ func (s *CreateRepositoryInput) Validate() error { return nil } +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *CreateRepositoryInput) SetEncryptionConfiguration(v *EncryptionConfiguration) *CreateRepositoryInput { + s.EncryptionConfiguration = v + return s +} + // SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { s.ImageScanningConfiguration = v @@ -4457,8 +4499,8 @@ func (s *DescribeRepositoriesOutput) SetRepositories(v []*Repository) *DescribeR // The specified layer upload does not contain any layer parts. type EmptyUploadException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -4476,17 +4518,17 @@ func (s EmptyUploadException) GoString() string { func newErrorEmptyUploadException(v protocol.ResponseMetadata) error { return &EmptyUploadException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EmptyUploadException) Code() string { +func (s *EmptyUploadException) Code() string { return "EmptyUploadException" } // Message returns the exception's message. -func (s EmptyUploadException) Message() string { +func (s *EmptyUploadException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4494,22 +4536,103 @@ func (s EmptyUploadException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EmptyUploadException) OrigErr() error { +func (s *EmptyUploadException) OrigErr() error { return nil } -func (s EmptyUploadException) Error() string { +func (s *EmptyUploadException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EmptyUploadException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EmptyUploadException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EmptyUploadException) RequestID() string { - return s.respMetadata.RequestID +func (s *EmptyUploadException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The encryption configuration for the repository. This determines how the +// contents of your repository are encrypted at rest. +// +// By default, when no encryption configuration is set or the AES256 encryption +// type is used, Amazon ECR uses server-side encryption with Amazon S3-managed +// encryption keys which encrypts your data at rest using an AES-256 encryption +// algorithm. This does not require any action on your part. +// +// For more control over the encryption of the contents of your repository, +// you can use server-side encryption with customer master keys (CMKs) stored +// in AWS Key Management Service (AWS KMS) to encrypt your images. For more +// information, see Amazon ECR encryption at rest (https://docs.aws.amazon.com/AmazonECR/latest/userguide/encryption-at-rest.html) +// in the Amazon Elastic Container Registry User Guide. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // The encryption type to use. + // + // If you use the KMS encryption type, the contents of the repository will be + // encrypted using server-side encryption with customer master keys (CMKs) stored + // in AWS KMS. When you use AWS KMS to encrypt your data, you can either use + // the default AWS managed CMK for Amazon ECR, or specify your own CMK, which + // you already created. For more information, see Protecting Data Using Server-Side + // Encryption with CMKs Stored in AWS Key Management Service (SSE-KMS) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html) + // in the Amazon Simple Storage Service Console Developer Guide.. + // + // If you use the AES256 encryption type, Amazon ECR uses server-side encryption + // with Amazon S3-managed encryption keys which encrypts the images in the repository + // using an AES-256 encryption algorithm. For more information, see Protecting + // Data Using Server-Side Encryption with Amazon S3-Managed Encryption Keys + // (SSE-S3) (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon Simple Storage Service Console Developer Guide.. + // + // EncryptionType is a required field + EncryptionType *string `locationName:"encryptionType" type:"string" required:"true" enum:"EncryptionType"` + + // If you use the KMS encryption type, specify the CMK to use for encryption. + // The alias, key ID, or full ARN of the CMK can be specified. The key must + // exist in the same Region as the repository. If no key is specified, the default + // AWS managed CMK for Amazon ECR will be used. + KmsKey *string `locationName:"kmsKey" min:"1" type:"string"` +} + +// String returns the string representation +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EncryptionConfiguration"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + if s.KmsKey != nil && len(*s.KmsKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKey", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *EncryptionConfiguration) SetEncryptionType(v string) *EncryptionConfiguration { + s.EncryptionType = &v + return s +} + +// SetKmsKey sets the KmsKey field's value. +func (s *EncryptionConfiguration) SetKmsKey(v string) *EncryptionConfiguration { + s.KmsKey = &v + return s } type GetAuthorizationTokenInput struct { @@ -5070,6 +5193,9 @@ type Image struct { // The image manifest associated with the image. ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` + // The manifest media type of the image. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + // The AWS account ID associated with the registry containing the image. RegistryId *string `locationName:"registryId" type:"string"` @@ -5099,6 +5225,12 @@ func (s *Image) SetImageManifest(v string) *Image { return s } +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *Image) SetImageManifestMediaType(v string) *Image { + s.ImageManifestMediaType = &v + return s +} + // SetRegistryId sets the RegistryId field's value. func (s *Image) SetRegistryId(v string) *Image { s.RegistryId = &v @@ -5114,8 +5246,8 @@ func (s *Image) SetRepositoryName(v string) *Image { // The specified image has already been pushed, and there were no changes to // the manifest or image tag after the last push. type ImageAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -5133,17 +5265,17 @@ func (s ImageAlreadyExistsException) GoString() string { func newErrorImageAlreadyExistsException(v protocol.ResponseMetadata) error { return &ImageAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ImageAlreadyExistsException) Code() string { +func (s *ImageAlreadyExistsException) Code() string { return "ImageAlreadyExistsException" } // Message returns the exception's message. -func (s ImageAlreadyExistsException) Message() string { +func (s *ImageAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5151,31 +5283,37 @@ func (s ImageAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ImageAlreadyExistsException) OrigErr() error { +func (s *ImageAlreadyExistsException) OrigErr() error { return nil } -func (s ImageAlreadyExistsException) Error() string { +func (s *ImageAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ImageAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ImageAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ImageAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ImageAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An object that describes an image returned by a DescribeImages operation. type ImageDetail struct { _ struct{} `type:"structure"` + // The artifact media type of the image. + ArtifactMediaType *string `locationName:"artifactMediaType" type:"string"` + // The sha256 digest of the image manifest. ImageDigest *string `locationName:"imageDigest" type:"string"` + // The media type of the image manifest. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + // The date and time, expressed in standard JavaScript date format, at which // the current image was pushed to the repository. ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` @@ -5188,6 +5326,9 @@ type ImageDetail struct { // The size, in bytes, of the image in the repository. // + // If the image is a manifest list, this will be the max size of all manifests + // in the list. + // // Beginning with Docker version 1.9, the Docker client compresses image layers // before pushing them to a V2 Docker registry. The output of the docker images // command shows the uncompressed image size, so it may return a larger image @@ -5214,12 +5355,24 @@ func (s ImageDetail) GoString() string { return s.String() } +// SetArtifactMediaType sets the ArtifactMediaType field's value. +func (s *ImageDetail) SetArtifactMediaType(v string) *ImageDetail { + s.ArtifactMediaType = &v + return s +} + // SetImageDigest sets the ImageDigest field's value. func (s *ImageDetail) SetImageDigest(v string) *ImageDetail { s.ImageDigest = &v return s } +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *ImageDetail) SetImageManifestMediaType(v string) *ImageDetail { + s.ImageManifestMediaType = &v + return s +} + // SetImagePushedAt sets the ImagePushedAt field's value. func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { s.ImagePushedAt = &v @@ -5262,6 +5415,63 @@ func (s *ImageDetail) SetRepositoryName(v string) *ImageDetail { return s } +// The specified image digest does not match the digest that Amazon ECR calculated +// for the image. +type ImageDigestDoesNotMatchException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ImageDigestDoesNotMatchException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageDigestDoesNotMatchException) GoString() string { + return s.String() +} + +func newErrorImageDigestDoesNotMatchException(v protocol.ResponseMetadata) error { + return &ImageDigestDoesNotMatchException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ImageDigestDoesNotMatchException) Code() string { + return "ImageDigestDoesNotMatchException" +} + +// Message returns the exception's message. +func (s *ImageDigestDoesNotMatchException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ImageDigestDoesNotMatchException) OrigErr() error { + return nil +} + +func (s *ImageDigestDoesNotMatchException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ImageDigestDoesNotMatchException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ImageDigestDoesNotMatchException) RequestID() string { + return s.RespMetadata.RequestID +} + // An object representing an Amazon ECR image failure. type ImageFailure struct { _ struct{} `type:"structure"` @@ -5352,8 +5562,8 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { // The image requested does not exist in the specified repository. type ImageNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5370,17 +5580,17 @@ func (s ImageNotFoundException) GoString() string { func newErrorImageNotFoundException(v protocol.ResponseMetadata) error { return &ImageNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ImageNotFoundException) Code() string { +func (s *ImageNotFoundException) Code() string { return "ImageNotFoundException" } // Message returns the exception's message. -func (s ImageNotFoundException) Message() string { +func (s *ImageNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5388,22 +5598,22 @@ func (s ImageNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ImageNotFoundException) OrigErr() error { +func (s *ImageNotFoundException) OrigErr() error { return nil } -func (s ImageNotFoundException) Error() string { +func (s *ImageNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ImageNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ImageNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ImageNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ImageNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about an image scan finding. @@ -5623,8 +5833,8 @@ func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfigu // The specified image is tagged with a tag that already exists. The repository // is configured for tag immutability. type ImageTagAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5641,17 +5851,17 @@ func (s ImageTagAlreadyExistsException) GoString() string { func newErrorImageTagAlreadyExistsException(v protocol.ResponseMetadata) error { return &ImageTagAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ImageTagAlreadyExistsException) Code() string { +func (s *ImageTagAlreadyExistsException) Code() string { return "ImageTagAlreadyExistsException" } // Message returns the exception's message. -func (s ImageTagAlreadyExistsException) Message() string { +func (s *ImageTagAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5659,22 +5869,22 @@ func (s ImageTagAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ImageTagAlreadyExistsException) OrigErr() error { +func (s *ImageTagAlreadyExistsException) OrigErr() error { return nil } -func (s ImageTagAlreadyExistsException) Error() string { +func (s *ImageTagAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ImageTagAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ImageTagAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ImageTagAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ImageTagAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } type InitiateLayerUploadInput struct { @@ -5765,8 +5975,8 @@ func (s *InitiateLayerUploadOutput) SetUploadId(v string) *InitiateLayerUploadOu // The layer digest calculation performed by Amazon ECR upon receipt of the // image layer does not match the digest specified. type InvalidLayerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -5784,17 +5994,17 @@ func (s InvalidLayerException) GoString() string { func newErrorInvalidLayerException(v protocol.ResponseMetadata) error { return &InvalidLayerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLayerException) Code() string { +func (s *InvalidLayerException) Code() string { return "InvalidLayerException" } // Message returns the exception's message. -func (s InvalidLayerException) Message() string { +func (s *InvalidLayerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5802,29 +6012,29 @@ func (s InvalidLayerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLayerException) OrigErr() error { +func (s *InvalidLayerException) OrigErr() error { return nil } -func (s InvalidLayerException) Error() string { +func (s *InvalidLayerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLayerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLayerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLayerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLayerException) RequestID() string { + return s.RespMetadata.RequestID } // The layer part size is not valid, or the first byte specified is not consecutive // to the last byte of a previous layer part upload. type InvalidLayerPartException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The last valid byte received from the layer part upload that is associated // with the exception. @@ -5855,17 +6065,17 @@ func (s InvalidLayerPartException) GoString() string { func newErrorInvalidLayerPartException(v protocol.ResponseMetadata) error { return &InvalidLayerPartException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLayerPartException) Code() string { +func (s *InvalidLayerPartException) Code() string { return "InvalidLayerPartException" } // Message returns the exception's message. -func (s InvalidLayerPartException) Message() string { +func (s *InvalidLayerPartException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5873,29 +6083,29 @@ func (s InvalidLayerPartException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLayerPartException) OrigErr() error { +func (s *InvalidLayerPartException) OrigErr() error { return nil } -func (s InvalidLayerPartException) Error() string { +func (s *InvalidLayerPartException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLayerPartException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLayerPartException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLayerPartException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLayerPartException) RequestID() string { + return s.RespMetadata.RequestID } // The specified parameter is invalid. Review the available parameters for the // API request. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -5913,17 +6123,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5931,30 +6141,30 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // An invalid parameter has been specified. Tag keys can have a maximum character // length of 128 characters, and tag values can have a maximum length of 256 // characters. type InvalidTagParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5971,17 +6181,17 @@ func (s InvalidTagParameterException) GoString() string { func newErrorInvalidTagParameterException(v protocol.ResponseMetadata) error { return &InvalidTagParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagParameterException) Code() string { +func (s *InvalidTagParameterException) Code() string { return "InvalidTagParameterException" } // Message returns the exception's message. -func (s InvalidTagParameterException) Message() string { +func (s *InvalidTagParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5989,22 +6199,81 @@ func (s InvalidTagParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagParameterException) OrigErr() error { +func (s *InvalidTagParameterException) OrigErr() error { return nil } -func (s InvalidTagParameterException) Error() string { +func (s *InvalidTagParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidTagParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation failed due to a KMS exception. +type KmsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // The error code returned by AWS KMS. + KmsError *string `locationName:"kmsError" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s KmsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KmsException) GoString() string { + return s.String() +} + +func newErrorKmsException(v protocol.ResponseMetadata) error { + return &KmsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *KmsException) Code() string { + return "KmsException" +} + +// Message returns the exception's message. +func (s *KmsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *KmsException) OrigErr() error { + return nil +} + +func (s *KmsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *KmsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *KmsException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an Amazon ECR image layer. @@ -6061,8 +6330,8 @@ func (s *Layer) SetMediaType(v string) *Layer { // The image layer already exists in the associated repository. type LayerAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6080,17 +6349,17 @@ func (s LayerAlreadyExistsException) GoString() string { func newErrorLayerAlreadyExistsException(v protocol.ResponseMetadata) error { return &LayerAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayerAlreadyExistsException) Code() string { +func (s *LayerAlreadyExistsException) Code() string { return "LayerAlreadyExistsException" } // Message returns the exception's message. -func (s LayerAlreadyExistsException) Message() string { +func (s *LayerAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6098,22 +6367,22 @@ func (s LayerAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayerAlreadyExistsException) OrigErr() error { +func (s *LayerAlreadyExistsException) OrigErr() error { return nil } -func (s LayerAlreadyExistsException) Error() string { +func (s *LayerAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayerAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayerAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayerAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayerAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an Amazon ECR image layer failure. @@ -6161,8 +6430,8 @@ func (s *LayerFailure) SetLayerDigest(v string) *LayerFailure { // The specified layer is not available because it is not associated with an // image. Unassociated image layers may be cleaned up at any time. type LayerInaccessibleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6180,17 +6449,17 @@ func (s LayerInaccessibleException) GoString() string { func newErrorLayerInaccessibleException(v protocol.ResponseMetadata) error { return &LayerInaccessibleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayerInaccessibleException) Code() string { +func (s *LayerInaccessibleException) Code() string { return "LayerInaccessibleException" } // Message returns the exception's message. -func (s LayerInaccessibleException) Message() string { +func (s *LayerInaccessibleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6198,28 +6467,28 @@ func (s LayerInaccessibleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayerInaccessibleException) OrigErr() error { +func (s *LayerInaccessibleException) OrigErr() error { return nil } -func (s LayerInaccessibleException) Error() string { +func (s *LayerInaccessibleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayerInaccessibleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayerInaccessibleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayerInaccessibleException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayerInaccessibleException) RequestID() string { + return s.RespMetadata.RequestID } // Layer parts must be at least 5 MiB in size. type LayerPartTooSmallException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6237,17 +6506,17 @@ func (s LayerPartTooSmallException) GoString() string { func newErrorLayerPartTooSmallException(v protocol.ResponseMetadata) error { return &LayerPartTooSmallException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayerPartTooSmallException) Code() string { +func (s *LayerPartTooSmallException) Code() string { return "LayerPartTooSmallException" } // Message returns the exception's message. -func (s LayerPartTooSmallException) Message() string { +func (s *LayerPartTooSmallException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6255,29 +6524,29 @@ func (s LayerPartTooSmallException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayerPartTooSmallException) OrigErr() error { +func (s *LayerPartTooSmallException) OrigErr() error { return nil } -func (s LayerPartTooSmallException) Error() string { +func (s *LayerPartTooSmallException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayerPartTooSmallException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayerPartTooSmallException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayerPartTooSmallException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayerPartTooSmallException) RequestID() string { + return s.RespMetadata.RequestID } // The specified layers could not be found, or the specified layer is not valid // for this repository. type LayersNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6295,17 +6564,17 @@ func (s LayersNotFoundException) GoString() string { func newErrorLayersNotFoundException(v protocol.ResponseMetadata) error { return &LayersNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LayersNotFoundException) Code() string { +func (s *LayersNotFoundException) Code() string { return "LayersNotFoundException" } // Message returns the exception's message. -func (s LayersNotFoundException) Message() string { +func (s *LayersNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6313,28 +6582,28 @@ func (s LayersNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LayersNotFoundException) OrigErr() error { +func (s *LayersNotFoundException) OrigErr() error { return nil } -func (s LayersNotFoundException) Error() string { +func (s *LayersNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LayersNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LayersNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LayersNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *LayersNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The lifecycle policy could not be found, and no policy is set to the repository. type LifecyclePolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6351,17 +6620,17 @@ func (s LifecyclePolicyNotFoundException) GoString() string { func newErrorLifecyclePolicyNotFoundException(v protocol.ResponseMetadata) error { return &LifecyclePolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecyclePolicyNotFoundException) Code() string { +func (s *LifecyclePolicyNotFoundException) Code() string { return "LifecyclePolicyNotFoundException" } // Message returns the exception's message. -func (s LifecyclePolicyNotFoundException) Message() string { +func (s *LifecyclePolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6369,22 +6638,22 @@ func (s LifecyclePolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecyclePolicyNotFoundException) OrigErr() error { +func (s *LifecyclePolicyNotFoundException) OrigErr() error { return nil } -func (s LifecyclePolicyNotFoundException) Error() string { +func (s *LifecyclePolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecyclePolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecyclePolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecyclePolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecyclePolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The filter for the lifecycle policy preview. @@ -6411,11 +6680,11 @@ func (s *LifecyclePolicyPreviewFilter) SetTagStatus(v string) *LifecyclePolicyPr return s } -// The previous lifecycle policy preview request has not completed. Please try -// again later. +// The previous lifecycle policy preview request has not completed. Wait and +// try again. type LifecyclePolicyPreviewInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6432,17 +6701,17 @@ func (s LifecyclePolicyPreviewInProgressException) GoString() string { func newErrorLifecyclePolicyPreviewInProgressException(v protocol.ResponseMetadata) error { return &LifecyclePolicyPreviewInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecyclePolicyPreviewInProgressException) Code() string { +func (s *LifecyclePolicyPreviewInProgressException) Code() string { return "LifecyclePolicyPreviewInProgressException" } // Message returns the exception's message. -func (s LifecyclePolicyPreviewInProgressException) Message() string { +func (s *LifecyclePolicyPreviewInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6450,28 +6719,28 @@ func (s LifecyclePolicyPreviewInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecyclePolicyPreviewInProgressException) OrigErr() error { +func (s *LifecyclePolicyPreviewInProgressException) OrigErr() error { return nil } -func (s LifecyclePolicyPreviewInProgressException) Error() string { +func (s *LifecyclePolicyPreviewInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecyclePolicyPreviewInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecyclePolicyPreviewInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecyclePolicyPreviewInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecyclePolicyPreviewInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // There is no dry run for this repository. type LifecyclePolicyPreviewNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6488,17 +6757,17 @@ func (s LifecyclePolicyPreviewNotFoundException) GoString() string { func newErrorLifecyclePolicyPreviewNotFoundException(v protocol.ResponseMetadata) error { return &LifecyclePolicyPreviewNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LifecyclePolicyPreviewNotFoundException) Code() string { +func (s *LifecyclePolicyPreviewNotFoundException) Code() string { return "LifecyclePolicyPreviewNotFoundException" } // Message returns the exception's message. -func (s LifecyclePolicyPreviewNotFoundException) Message() string { +func (s *LifecyclePolicyPreviewNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6506,22 +6775,22 @@ func (s LifecyclePolicyPreviewNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LifecyclePolicyPreviewNotFoundException) OrigErr() error { +func (s *LifecyclePolicyPreviewNotFoundException) OrigErr() error { return nil } -func (s LifecyclePolicyPreviewNotFoundException) Error() string { +func (s *LifecyclePolicyPreviewNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LifecyclePolicyPreviewNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LifecyclePolicyPreviewNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LifecyclePolicyPreviewNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *LifecyclePolicyPreviewNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The result of the lifecycle policy preview. @@ -6634,12 +6903,11 @@ func (s *LifecyclePolicyRuleAction) SetType(v string) *LifecyclePolicyRuleAction } // The operation did not succeed because it would have exceeded a service limit -// for your account. For more information, see Amazon ECR Default Service Limits -// (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) +// for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -6657,17 +6925,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6675,22 +6943,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing a filter on a ListImages operation. @@ -6912,13 +7180,22 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput type PutImageInput struct { _ struct{} `type:"structure"` + // The image digest of the image manifest corresponding to the image. + ImageDigest *string `locationName:"imageDigest" type:"string"` + // The image manifest corresponding to the image to be uploaded. // // ImageManifest is a required field ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"` + // The media type of the image manifest. If you push an image manifest that + // does not contain the mediaType field, you must specify the imageManifestMediaType + // in the request. + ImageManifestMediaType *string `locationName:"imageManifestMediaType" type:"string"` + // The tag to associate with the image. This parameter is required for images - // that use the Docker Image Manifest V2 Schema 2 or OCI formats. + // that use the Docker Image Manifest V2 Schema 2 or Open Container Initiative + // (OCI) formats. ImageTag *string `locationName:"imageTag" min:"1" type:"string"` // The AWS account ID associated with the registry that contains the repository @@ -6967,12 +7244,24 @@ func (s *PutImageInput) Validate() error { return nil } +// SetImageDigest sets the ImageDigest field's value. +func (s *PutImageInput) SetImageDigest(v string) *PutImageInput { + s.ImageDigest = &v + return s +} + // SetImageManifest sets the ImageManifest field's value. func (s *PutImageInput) SetImageManifest(v string) *PutImageInput { s.ImageManifest = &v return s } +// SetImageManifestMediaType sets the ImageManifestMediaType field's value. +func (s *PutImageInput) SetImageManifestMediaType(v string) *PutImageInput { + s.ImageManifestMediaType = &v + return s +} + // SetImageTag sets the ImageTag field's value. func (s *PutImageInput) SetImageTag(v string) *PutImageInput { s.ImageTag = &v @@ -7342,6 +7631,62 @@ func (s *PutLifecyclePolicyOutput) SetRepositoryName(v string) *PutLifecyclePoli return s } +// The manifest list is referencing an image that does not exist. +type ReferencedImagesNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ReferencedImagesNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReferencedImagesNotFoundException) GoString() string { + return s.String() +} + +func newErrorReferencedImagesNotFoundException(v protocol.ResponseMetadata) error { + return &ReferencedImagesNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ReferencedImagesNotFoundException) Code() string { + return "ReferencedImagesNotFoundException" +} + +// Message returns the exception's message. +func (s *ReferencedImagesNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ReferencedImagesNotFoundException) OrigErr() error { + return nil +} + +func (s *ReferencedImagesNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ReferencedImagesNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ReferencedImagesNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + // An object representing a repository. type Repository struct { _ struct{} `type:"structure"` @@ -7349,6 +7694,10 @@ type Repository struct { // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The encryption configuration for the repository. This determines how the + // contents of your repository are encrypted at rest. + EncryptionConfiguration *EncryptionConfiguration `locationName:"encryptionConfiguration" type:"structure"` + // The image scanning configuration for a repository. ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` @@ -7367,8 +7716,8 @@ type Repository struct { // The name of the repository. RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` - // The URI for the repository. You can use this URI for Docker push or pull - // operations. + // The URI for the repository. You can use this URI for container image push + // and pull operations. RepositoryUri *string `locationName:"repositoryUri" type:"string"` } @@ -7388,6 +7737,12 @@ func (s *Repository) SetCreatedAt(v time.Time) *Repository { return s } +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Repository) SetEncryptionConfiguration(v *EncryptionConfiguration) *Repository { + s.EncryptionConfiguration = v + return s +} + // SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { s.ImageScanningConfiguration = v @@ -7426,8 +7781,8 @@ func (s *Repository) SetRepositoryUri(v string) *Repository { // The specified repository already exists in the specified registry. type RepositoryAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7445,17 +7800,17 @@ func (s RepositoryAlreadyExistsException) GoString() string { func newErrorRepositoryAlreadyExistsException(v protocol.ResponseMetadata) error { return &RepositoryAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryAlreadyExistsException) Code() string { +func (s *RepositoryAlreadyExistsException) Code() string { return "RepositoryAlreadyExistsException" } // Message returns the exception's message. -func (s RepositoryAlreadyExistsException) Message() string { +func (s *RepositoryAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7463,29 +7818,29 @@ func (s RepositoryAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryAlreadyExistsException) OrigErr() error { +func (s *RepositoryAlreadyExistsException) OrigErr() error { return nil } -func (s RepositoryAlreadyExistsException) Error() string { +func (s *RepositoryAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository contains images. To delete a repository that contains // images, you must force the deletion with the force parameter. type RepositoryNotEmptyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7503,17 +7858,17 @@ func (s RepositoryNotEmptyException) GoString() string { func newErrorRepositoryNotEmptyException(v protocol.ResponseMetadata) error { return &RepositoryNotEmptyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNotEmptyException) Code() string { +func (s *RepositoryNotEmptyException) Code() string { return "RepositoryNotEmptyException" } // Message returns the exception's message. -func (s RepositoryNotEmptyException) Message() string { +func (s *RepositoryNotEmptyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7521,29 +7876,29 @@ func (s RepositoryNotEmptyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNotEmptyException) OrigErr() error { +func (s *RepositoryNotEmptyException) OrigErr() error { return nil } -func (s RepositoryNotEmptyException) Error() string { +func (s *RepositoryNotEmptyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNotEmptyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNotEmptyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNotEmptyException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNotEmptyException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. type RepositoryNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7561,17 +7916,17 @@ func (s RepositoryNotFoundException) GoString() string { func newErrorRepositoryNotFoundException(v protocol.ResponseMetadata) error { return &RepositoryNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryNotFoundException) Code() string { +func (s *RepositoryNotFoundException) Code() string { return "RepositoryNotFoundException" } // Message returns the exception's message. -func (s RepositoryNotFoundException) Message() string { +func (s *RepositoryNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7579,29 +7934,29 @@ func (s RepositoryNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryNotFoundException) OrigErr() error { +func (s *RepositoryNotFoundException) OrigErr() error { return nil } -func (s RepositoryNotFoundException) Error() string { +func (s *RepositoryNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified repository and registry combination does not have an associated // repository policy. type RepositoryPolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7619,17 +7974,17 @@ func (s RepositoryPolicyNotFoundException) GoString() string { func newErrorRepositoryPolicyNotFoundException(v protocol.ResponseMetadata) error { return &RepositoryPolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RepositoryPolicyNotFoundException) Code() string { +func (s *RepositoryPolicyNotFoundException) Code() string { return "RepositoryPolicyNotFoundException" } // Message returns the exception's message. -func (s RepositoryPolicyNotFoundException) Message() string { +func (s *RepositoryPolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7637,29 +7992,29 @@ func (s RepositoryPolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RepositoryPolicyNotFoundException) OrigErr() error { +func (s *RepositoryPolicyNotFoundException) OrigErr() error { return nil } -func (s RepositoryPolicyNotFoundException) Error() string { +func (s *RepositoryPolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RepositoryPolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RepositoryPolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RepositoryPolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *RepositoryPolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified image scan could not be found. Ensure that image scanning is // enabled on the repository and try again. type ScanNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7676,17 +8031,17 @@ func (s ScanNotFoundException) GoString() string { func newErrorScanNotFoundException(v protocol.ResponseMetadata) error { return &ScanNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ScanNotFoundException) Code() string { +func (s *ScanNotFoundException) Code() string { return "ScanNotFoundException" } // Message returns the exception's message. -func (s ScanNotFoundException) Message() string { +func (s *ScanNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7694,28 +8049,28 @@ func (s ScanNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ScanNotFoundException) OrigErr() error { +func (s *ScanNotFoundException) OrigErr() error { return nil } -func (s ScanNotFoundException) Error() string { +func (s *ScanNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ScanNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ScanNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ScanNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ScanNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // These errors are usually caused by a server-side issue. type ServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -7733,17 +8088,17 @@ func (s ServerException) GoString() string { func newErrorServerException(v protocol.ResponseMetadata) error { return &ServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerException) Code() string { +func (s *ServerException) Code() string { return "ServerException" } // Message returns the exception's message. -func (s ServerException) Message() string { +func (s *ServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7751,22 +8106,22 @@ func (s ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerException) OrigErr() error { +func (s *ServerException) OrigErr() error { return nil } -func (s ServerException) Error() string { +func (s *ServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID } type SetRepositoryPolicyInput struct { @@ -7778,7 +8133,7 @@ type SetRepositoryPolicyInput struct { Force *bool `locationName:"force" type:"boolean"` // The JSON repository policy text to apply to the repository. For more information, - // see Amazon ECR Repository Policy Examples (https://docs.aws.amazon.com/AmazonECR/latest/userguide/RepositoryPolicyExamples.html) + // see Amazon ECR Repository Policies (https://docs.aws.amazon.com/AmazonECR/latest/userguide/repository-policy-examples.html) // in the Amazon Elastic Container Registry User Guide. // // PolicyText is a required field @@ -8233,8 +8588,8 @@ func (s TagResourceOutput) GoString() string { // The list of tags on the repository is over the limit. The maximum number // of tags that can be applied to a repository is 50. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8251,17 +8606,73 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The image is of a type that cannot be scanned. +type UnsupportedImageTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s UnsupportedImageTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnsupportedImageTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedImageTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedImageTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedImageTypeException) Code() string { + return "UnsupportedImageTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedImageTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8269,22 +8680,22 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *UnsupportedImageTypeException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *UnsupportedImageTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedImageTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedImageTypeException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -8364,12 +8775,14 @@ type UploadLayerPartInput struct { // LayerPartBlob is a required field LayerPartBlob []byte `locationName:"layerPartBlob" type:"blob" required:"true"` - // The integer value of the first byte of the layer part. + // The position of the first byte of the layer part witin the overall image + // layer. // // PartFirstByte is a required field PartFirstByte *int64 `locationName:"partFirstByte" type:"long" required:"true"` - // The integer value of the last byte of the layer part. + // The position of the last byte of the layer part within the overall image + // layer. // // PartLastByte is a required field PartLastByte *int64 `locationName:"partLastByte" type:"long" required:"true"` @@ -8514,11 +8927,11 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } -// The upload could not be found, or the specified upload id is not valid for +// The upload could not be found, or the specified upload ID is not valid for // this repository. type UploadNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message associated with the exception. Message_ *string `locationName:"message" type:"string"` @@ -8536,17 +8949,17 @@ func (s UploadNotFoundException) GoString() string { func newErrorUploadNotFoundException(v protocol.ResponseMetadata) error { return &UploadNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UploadNotFoundException) Code() string { +func (s *UploadNotFoundException) Code() string { return "UploadNotFoundException" } // Message returns the exception's message. -func (s UploadNotFoundException) Message() string { +func (s *UploadNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8554,22 +8967,38 @@ func (s UploadNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UploadNotFoundException) OrigErr() error { +func (s *UploadNotFoundException) OrigErr() error { return nil } -func (s UploadNotFoundException) Error() string { +func (s *UploadNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UploadNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UploadNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UploadNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *UploadNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +const ( + // EncryptionTypeAes256 is a EncryptionType enum value + EncryptionTypeAes256 = "AES256" + + // EncryptionTypeKms is a EncryptionType enum value + EncryptionTypeKms = "KMS" +) + +// EncryptionType_Values returns all elements of the EncryptionType enum +func EncryptionType_Values() []string { + return []string{ + EncryptionTypeAes256, + EncryptionTypeKms, + } } const ( @@ -8592,11 +9021,30 @@ const ( FindingSeverityUndefined = "UNDEFINED" ) +// FindingSeverity_Values returns all elements of the FindingSeverity enum +func FindingSeverity_Values() []string { + return []string{ + FindingSeverityInformational, + FindingSeverityLow, + FindingSeverityMedium, + FindingSeverityHigh, + FindingSeverityCritical, + FindingSeverityUndefined, + } +} + const ( // ImageActionTypeExpire is a ImageActionType enum value ImageActionTypeExpire = "EXPIRE" ) +// ImageActionType_Values returns all elements of the ImageActionType enum +func ImageActionType_Values() []string { + return []string{ + ImageActionTypeExpire, + } +} + const ( // ImageFailureCodeInvalidImageDigest is a ImageFailureCode enum value ImageFailureCodeInvalidImageDigest = "InvalidImageDigest" @@ -8612,8 +9060,27 @@ const ( // ImageFailureCodeMissingDigestAndTag is a ImageFailureCode enum value ImageFailureCodeMissingDigestAndTag = "MissingDigestAndTag" + + // ImageFailureCodeImageReferencedByManifestList is a ImageFailureCode enum value + ImageFailureCodeImageReferencedByManifestList = "ImageReferencedByManifestList" + + // ImageFailureCodeKmsError is a ImageFailureCode enum value + ImageFailureCodeKmsError = "KmsError" ) +// ImageFailureCode_Values returns all elements of the ImageFailureCode enum +func ImageFailureCode_Values() []string { + return []string{ + ImageFailureCodeInvalidImageDigest, + ImageFailureCodeInvalidImageTag, + ImageFailureCodeImageTagDoesNotMatchDigest, + ImageFailureCodeImageNotFound, + ImageFailureCodeMissingDigestAndTag, + ImageFailureCodeImageReferencedByManifestList, + ImageFailureCodeKmsError, + } +} + const ( // ImageTagMutabilityMutable is a ImageTagMutability enum value ImageTagMutabilityMutable = "MUTABLE" @@ -8622,6 +9089,14 @@ const ( ImageTagMutabilityImmutable = "IMMUTABLE" ) +// ImageTagMutability_Values returns all elements of the ImageTagMutability enum +func ImageTagMutability_Values() []string { + return []string{ + ImageTagMutabilityMutable, + ImageTagMutabilityImmutable, + } +} + const ( // LayerAvailabilityAvailable is a LayerAvailability enum value LayerAvailabilityAvailable = "AVAILABLE" @@ -8630,6 +9105,14 @@ const ( LayerAvailabilityUnavailable = "UNAVAILABLE" ) +// LayerAvailability_Values returns all elements of the LayerAvailability enum +func LayerAvailability_Values() []string { + return []string{ + LayerAvailabilityAvailable, + LayerAvailabilityUnavailable, + } +} + const ( // LayerFailureCodeInvalidLayerDigest is a LayerFailureCode enum value LayerFailureCodeInvalidLayerDigest = "InvalidLayerDigest" @@ -8638,6 +9121,14 @@ const ( LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" ) +// LayerFailureCode_Values returns all elements of the LayerFailureCode enum +func LayerFailureCode_Values() []string { + return []string{ + LayerFailureCodeInvalidLayerDigest, + LayerFailureCodeMissingLayerDigest, + } +} + const ( // LifecyclePolicyPreviewStatusInProgress is a LifecyclePolicyPreviewStatus enum value LifecyclePolicyPreviewStatusInProgress = "IN_PROGRESS" @@ -8652,6 +9143,16 @@ const ( LifecyclePolicyPreviewStatusFailed = "FAILED" ) +// LifecyclePolicyPreviewStatus_Values returns all elements of the LifecyclePolicyPreviewStatus enum +func LifecyclePolicyPreviewStatus_Values() []string { + return []string{ + LifecyclePolicyPreviewStatusInProgress, + LifecyclePolicyPreviewStatusComplete, + LifecyclePolicyPreviewStatusExpired, + LifecyclePolicyPreviewStatusFailed, + } +} + const ( // ScanStatusInProgress is a ScanStatus enum value ScanStatusInProgress = "IN_PROGRESS" @@ -8663,6 +9164,15 @@ const ( ScanStatusFailed = "FAILED" ) +// ScanStatus_Values returns all elements of the ScanStatus enum +func ScanStatus_Values() []string { + return []string{ + ScanStatusInProgress, + ScanStatusComplete, + ScanStatusFailed, + } +} + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" @@ -8673,3 +9183,12 @@ const ( // TagStatusAny is a TagStatus enum value TagStatusAny = "ANY" ) + +// TagStatus_Values returns all elements of the TagStatus enum +func TagStatus_Values() []string { + return []string{ + TagStatusTagged, + TagStatusUntagged, + TagStatusAny, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go index d970974bc..3c3843ae3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/doc.go @@ -3,12 +3,13 @@ // Package ecr provides the client and types for making API // requests to Amazon EC2 Container Registry. // -// Amazon Elastic Container Registry (Amazon ECR) is a managed Docker registry -// service. Customers can use the familiar Docker CLI to push, pull, and manage -// images. Amazon ECR provides a secure, scalable, and reliable registry. Amazon -// ECR supports private Docker repositories with resource-based permissions +// Amazon Elastic Container Registry (Amazon ECR) is a managed container image +// registry service. Customers can use the familiar Docker CLI, or their preferred +// client, to push, pull, and manage images. Amazon ECR provides a secure, scalable, +// and reliable registry for your Docker or Open Container Initiative (OCI) +// images. Amazon ECR supports private repositories with resource-based permissions // using IAM so that specific users or Amazon EC2 instances can access repositories -// and images. Developers can use the Docker CLI to author and manage images. +// and images. // // See https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index 732d865bf..819101326 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -21,6 +21,13 @@ const ( // the manifest or image tag after the last push. ErrCodeImageAlreadyExistsException = "ImageAlreadyExistsException" + // ErrCodeImageDigestDoesNotMatchException for service response error code + // "ImageDigestDoesNotMatchException". + // + // The specified image digest does not match the digest that Amazon ECR calculated + // for the image. + ErrCodeImageDigestDoesNotMatchException = "ImageDigestDoesNotMatchException" + // ErrCodeImageNotFoundException for service response error code // "ImageNotFoundException". // @@ -63,6 +70,12 @@ const ( // characters. ErrCodeInvalidTagParameterException = "InvalidTagParameterException" + // ErrCodeKmsException for service response error code + // "KmsException". + // + // The operation failed due to a KMS exception. + ErrCodeKmsException = "KmsException" + // ErrCodeLayerAlreadyExistsException for service response error code // "LayerAlreadyExistsException". // @@ -98,8 +111,8 @@ const ( // ErrCodeLifecyclePolicyPreviewInProgressException for service response error code // "LifecyclePolicyPreviewInProgressException". // - // The previous lifecycle policy preview request has not completed. Please try - // again later. + // The previous lifecycle policy preview request has not completed. Wait and + // try again. ErrCodeLifecyclePolicyPreviewInProgressException = "LifecyclePolicyPreviewInProgressException" // ErrCodeLifecyclePolicyPreviewNotFoundException for service response error code @@ -112,11 +125,16 @@ const ( // "LimitExceededException". // // The operation did not succeed because it would have exceeded a service limit - // for your account. For more information, see Amazon ECR Default Service Limits - // (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service_limits.html) + // for your account. For more information, see Amazon ECR Service Quotas (https://docs.aws.amazon.com/AmazonECR/latest/userguide/service-quotas.html) // in the Amazon Elastic Container Registry User Guide. ErrCodeLimitExceededException = "LimitExceededException" + // ErrCodeReferencedImagesNotFoundException for service response error code + // "ReferencedImagesNotFoundException". + // + // The manifest list is referencing an image that does not exist. + ErrCodeReferencedImagesNotFoundException = "ReferencedImagesNotFoundException" + // ErrCodeRepositoryAlreadyExistsException for service response error code // "RepositoryAlreadyExistsException". // @@ -164,10 +182,16 @@ const ( // of tags that can be applied to a repository is 50. ErrCodeTooManyTagsException = "TooManyTagsException" + // ErrCodeUnsupportedImageTypeException for service response error code + // "UnsupportedImageTypeException". + // + // The image is of a type that cannot be scanned. + ErrCodeUnsupportedImageTypeException = "UnsupportedImageTypeException" + // ErrCodeUploadNotFoundException for service response error code // "UploadNotFoundException". // - // The upload could not be found, or the specified upload id is not valid for + // The upload could not be found, or the specified upload ID is not valid for // this repository. ErrCodeUploadNotFoundException = "UploadNotFoundException" ) @@ -175,12 +199,14 @@ const ( var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "EmptyUploadException": newErrorEmptyUploadException, "ImageAlreadyExistsException": newErrorImageAlreadyExistsException, + "ImageDigestDoesNotMatchException": newErrorImageDigestDoesNotMatchException, "ImageNotFoundException": newErrorImageNotFoundException, "ImageTagAlreadyExistsException": newErrorImageTagAlreadyExistsException, "InvalidLayerException": newErrorInvalidLayerException, "InvalidLayerPartException": newErrorInvalidLayerPartException, "InvalidParameterException": newErrorInvalidParameterException, "InvalidTagParameterException": newErrorInvalidTagParameterException, + "KmsException": newErrorKmsException, "LayerAlreadyExistsException": newErrorLayerAlreadyExistsException, "LayerInaccessibleException": newErrorLayerInaccessibleException, "LayerPartTooSmallException": newErrorLayerPartTooSmallException, @@ -189,6 +215,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "LifecyclePolicyPreviewInProgressException": newErrorLifecyclePolicyPreviewInProgressException, "LifecyclePolicyPreviewNotFoundException": newErrorLifecyclePolicyPreviewNotFoundException, "LimitExceededException": newErrorLimitExceededException, + "ReferencedImagesNotFoundException": newErrorReferencedImagesNotFoundException, "RepositoryAlreadyExistsException": newErrorRepositoryAlreadyExistsException, "RepositoryNotEmptyException": newErrorRepositoryNotEmptyException, "RepositoryNotFoundException": newErrorRepositoryNotFoundException, @@ -196,5 +223,6 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ScanNotFoundException": newErrorScanNotFoundException, "ServerException": newErrorServerException, "TooManyTagsException": newErrorTooManyTagsException, + "UnsupportedImageTypeException": newErrorUnsupportedImageTypeException, "UploadNotFoundException": newErrorUploadNotFoundException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go index 43855124d..c4392395c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go index 912efcd8d..d9f695785 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -89,6 +89,13 @@ func (c *ECS) CreateCapacityProviderRequest(input *CreateCapacityProviderInput) // * LimitExceededException // The limit for the resource has been exceeded. // +// * UpdateInProgressException +// There is already a current Amazon ECS container agent update in progress +// on the specified container instance. If the container agent becomes disconnected +// while it is in a transitional stage, such as PENDING or STAGING, the update +// process can get stuck in that state. However, when the agent reconnects, +// it resumes where it stopped previously. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/CreateCapacityProvider func (c *ECS) CreateCapacityProvider(input *CreateCapacityProviderInput) (*CreateCapacityProviderOutput, error) { req, out := c.CreateCapacityProviderRequest(input) @@ -256,7 +263,7 @@ func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Requ // Runs and maintains a desired number of tasks from a specified task definition. // If the number of tasks running in a service drops below the desiredCount, // Amazon ECS runs another copy of the task in the specified cluster. To update -// an existing service, see UpdateService. +// an existing service, see the UpdateService action. // // In addition to maintaining the desired count of tasks in your service, you // can optionally run your service behind one or more load balancers. The load @@ -280,7 +287,9 @@ func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Requ // // * DAEMON - The daemon scheduling strategy deploys exactly one task on // each active container instance that meets all of the task placement constraints -// that you specify in your cluster. When using this strategy, you don't +// that you specify in your cluster. The service scheduler also evaluates +// the task placement constraints for running tasks and will stop tasks that +// do not meet the placement constraints. When using this strategy, you don't // need to specify a desired number of tasks, a task placement strategy, // or use Service Auto Scaling policies. For more information, see Service // Scheduler Concepts (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) @@ -706,6 +715,108 @@ func (c *ECS) DeleteAttributesWithContext(ctx aws.Context, input *DeleteAttribut return out, req.Send() } +const opDeleteCapacityProvider = "DeleteCapacityProvider" + +// DeleteCapacityProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCapacityProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCapacityProvider for more information on using the DeleteCapacityProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCapacityProviderRequest method. +// req, resp := client.DeleteCapacityProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/DeleteCapacityProvider +func (c *ECS) DeleteCapacityProviderRequest(input *DeleteCapacityProviderInput) (req *request.Request, output *DeleteCapacityProviderOutput) { + op := &request.Operation{ + Name: opDeleteCapacityProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCapacityProviderInput{} + } + + output = &DeleteCapacityProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCapacityProvider API operation for Amazon EC2 Container Service. +// +// Deletes the specified capacity provider. +// +// The FARGATE and FARGATE_SPOT capacity providers are reserved and cannot be +// deleted. You can disassociate them from a cluster using either the PutClusterCapacityProviders +// API or by deleting the cluster. +// +// Prior to a capacity provider being deleted, the capacity provider must be +// removed from the capacity provider strategy from all services. The UpdateService +// API can be used to remove a capacity provider from a service's capacity provider +// strategy. When updating a service, the forceNewDeployment option can be used +// to ensure that any tasks using the Amazon EC2 instance capacity provided +// by the capacity provider are transitioned to use the capacity from the remaining +// capacity providers. Only capacity providers that are not associated with +// a cluster can be deleted. To remove a capacity provider from a cluster, you +// can either use PutClusterCapacityProviders or delete the cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Service's +// API operation DeleteCapacityProvider for usage and error information. +// +// Returned Error Types: +// * ServerException +// These errors are usually caused by a server issue. +// +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that is not valid. +// +// * InvalidParameterException +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecs-2014-11-13/DeleteCapacityProvider +func (c *ECS) DeleteCapacityProvider(input *DeleteCapacityProviderInput) (*DeleteCapacityProviderOutput, error) { + req, out := c.DeleteCapacityProviderRequest(input) + return out, req.Send() +} + +// DeleteCapacityProviderWithContext is the same as DeleteCapacityProvider with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCapacityProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECS) DeleteCapacityProviderWithContext(ctx aws.Context, input *DeleteCapacityProviderInput, opts ...request.Option) (*DeleteCapacityProviderOutput, error) { + req, out := c.DeleteCapacityProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteCluster = "DeleteCluster" // DeleteClusterRequest generates a "aws/request.Request" representing the @@ -5132,24 +5243,32 @@ func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Requ // UpdateService API operation for Amazon EC2 Container Service. // +// +// Updating the task placement strategies and constraints on an Amazon ECS service +// remains in preview and is a Beta Service as defined by and subject to the +// Beta Service Participation Service Terms located at https://aws.amazon.com/service-terms +// (https://aws.amazon.com/service-terms) ("Beta Terms"). These Beta Terms apply +// to your participation in this preview. +// // Modifies the parameters of a service. // // For services using the rolling update (ECS) deployment controller, the desired -// count, deployment configuration, network configuration, or task definition -// used can be updated. +// count, deployment configuration, network configuration, task placement constraints +// and strategies, or task definition used can be updated. // // For services using the blue/green (CODE_DEPLOY) deployment controller, only -// the desired count, deployment configuration, and health check grace period -// can be updated using this API. If the network configuration, platform version, -// or task definition need to be updated, a new AWS CodeDeploy deployment should -// be created. For more information, see CreateDeployment (https://docs.aws.amazon.com/codedeploy/latest/APIReference/API_CreateDeployment.html) +// the desired count, deployment configuration, task placement constraints and +// strategies, and health check grace period can be updated using this API. +// If the network configuration, platform version, or task definition need to +// be updated, a new AWS CodeDeploy deployment should be created. For more information, +// see CreateDeployment (https://docs.aws.amazon.com/codedeploy/latest/APIReference/API_CreateDeployment.html) // in the AWS CodeDeploy API Reference. // // For services using an external deployment controller, you can update only -// the desired count and health check grace period using this API. If the launch -// type, load balancer, network configuration, platform version, or task definition -// need to be updated, you should create a new task set. For more information, -// see CreateTaskSet. +// the desired count, task placement constraints and strategies, and health +// check grace period using this API. If the launch type, load balancer, network +// configuration, platform version, or task definition need to be updated, you +// should create a new task set. For more information, see CreateTaskSet. // // You can add to or subtract from the number of instantiations of a task definition // in a service by specifying the cluster that the service is running in and @@ -5516,8 +5635,8 @@ func (c *ECS) UpdateTaskSetWithContext(ctx aws.Context, input *UpdateTaskSetInpu // You do not have authorization to perform the requested action. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5534,17 +5653,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5552,22 +5671,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing a container instance or task attachment. @@ -5756,8 +5875,8 @@ func (s *Attribute) SetValue(v string) *Attribute { // of a resource with ListAttributes. You can remove existing attributes on // a resource with DeleteAttributes. type AttributeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5774,17 +5893,17 @@ func (s AttributeLimitExceededException) GoString() string { func newErrorAttributeLimitExceededException(v protocol.ResponseMetadata) error { return &AttributeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AttributeLimitExceededException) Code() string { +func (s *AttributeLimitExceededException) Code() string { return "AttributeLimitExceededException" } // Message returns the exception's message. -func (s AttributeLimitExceededException) Message() string { +func (s *AttributeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5792,22 +5911,22 @@ func (s AttributeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AttributeLimitExceededException) OrigErr() error { +func (s *AttributeLimitExceededException) OrigErr() error { return nil } -func (s AttributeLimitExceededException) Error() string { +func (s *AttributeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AttributeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AttributeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AttributeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *AttributeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The details of the Auto Scaling group for the capacity provider. @@ -5955,8 +6074,8 @@ func (s *AwsVpcConfiguration) SetSubnets(v []*string) *AwsVpcConfiguration { // Your AWS account has been blocked. For more information, contact AWS Support // (http://aws.amazon.com/contact-us/). type BlockedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5973,17 +6092,17 @@ func (s BlockedException) GoString() string { func newErrorBlockedException(v protocol.ResponseMetadata) error { return &BlockedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BlockedException) Code() string { +func (s *BlockedException) Code() string { return "BlockedException" } // Message returns the exception's message. -func (s BlockedException) Message() string { +func (s *BlockedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5991,22 +6110,22 @@ func (s BlockedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BlockedException) OrigErr() error { +func (s *BlockedException) OrigErr() error { return nil } -func (s BlockedException) Error() string { +func (s *BlockedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BlockedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BlockedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BlockedException) RequestID() string { - return s.respMetadata.RequestID +func (s *BlockedException) RequestID() string { + return s.RespMetadata.RequestID } // The details of a capacity provider. @@ -6023,7 +6142,8 @@ type CapacityProvider struct { Name *string `locationName:"name" type:"string"` // The current status of the capacity provider. Only capacity providers in an - // ACTIVE state can be used in a cluster. + // ACTIVE state can be used in a cluster. When a capacity provider is successfully + // deleted, it will have an INACTIVE status. Status *string `locationName:"status" type:"string" enum:"CapacityProviderStatus"` // The metadata that you apply to the capacity provider to help you categorize @@ -6053,6 +6173,28 @@ type CapacityProvider struct { // cannot edit or delete tag keys or values with this prefix. Tags with this // prefix do not count against your tags per resource limit. Tags []*Tag `locationName:"tags" type:"list"` + + // The update status of the capacity provider. The following are the possible + // states that will be returned. + // + // DELETE_IN_PROGRESS + // + // The capacity provider is in the process of being deleted. + // + // DELETE_COMPLETE + // + // The capacity provider has been successfully deleted and will have an INACTIVE + // status. + // + // DELETE_FAILED + // + // The capacity provider was unable to be deleted. The update status reason + // will provide further details about why the delete failed. + UpdateStatus *string `locationName:"updateStatus" type:"string" enum:"CapacityProviderUpdateStatus"` + + // The update status reason. This provides further details about the update + // status for the capacity provider. + UpdateStatusReason *string `locationName:"updateStatusReason" type:"string"` } // String returns the string representation @@ -6095,6 +6237,18 @@ func (s *CapacityProvider) SetTags(v []*Tag) *CapacityProvider { return s } +// SetUpdateStatus sets the UpdateStatus field's value. +func (s *CapacityProvider) SetUpdateStatus(v string) *CapacityProvider { + s.UpdateStatus = &v + return s +} + +// SetUpdateStatusReason sets the UpdateStatusReason field's value. +func (s *CapacityProvider) SetUpdateStatusReason(v string) *CapacityProvider { + s.UpdateStatusReason = &v + return s +} + // The details of a capacity provider strategy. type CapacityProviderStrategyItem struct { _ struct{} `type:"structure"` @@ -6104,7 +6258,7 @@ type CapacityProviderStrategyItem struct { // can have a base defined. Base *int64 `locationName:"base" type:"integer"` - // The short name or full Amazon Resource Name (ARN) of the capacity provider. + // The short name of the capacity provider. // // CapacityProvider is a required field CapacityProvider *string `locationName:"capacityProvider" type:"string" required:"true"` @@ -6166,8 +6320,8 @@ func (s *CapacityProviderStrategyItem) SetWeight(v int64) *CapacityProviderStrat // or resource on behalf of a user that doesn't have permissions to use the // action or resource, or specifying an identifier that is not valid. type ClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6184,17 +6338,17 @@ func (s ClientException) GoString() string { func newErrorClientException(v protocol.ResponseMetadata) error { return &ClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientException) Code() string { +func (s *ClientException) Code() string { return "ClientException" } // Message returns the exception's message. -func (s ClientException) Message() string { +func (s *ClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6202,22 +6356,22 @@ func (s ClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientException) OrigErr() error { +func (s *ClientException) OrigErr() error { return nil } -func (s ClientException) Error() string { +func (s *ClientException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID } // A regional grouping of one or more container instances on which you can run @@ -6462,8 +6616,8 @@ func (s *Cluster) SetTags(v []*Tag) *Cluster { // deregister the container instances before you can delete the cluster. For // more information, see DeregisterContainerInstance. type ClusterContainsContainerInstancesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6480,17 +6634,17 @@ func (s ClusterContainsContainerInstancesException) GoString() string { func newErrorClusterContainsContainerInstancesException(v protocol.ResponseMetadata) error { return &ClusterContainsContainerInstancesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterContainsContainerInstancesException) Code() string { +func (s *ClusterContainsContainerInstancesException) Code() string { return "ClusterContainsContainerInstancesException" } // Message returns the exception's message. -func (s ClusterContainsContainerInstancesException) Message() string { +func (s *ClusterContainsContainerInstancesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6498,30 +6652,30 @@ func (s ClusterContainsContainerInstancesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterContainsContainerInstancesException) OrigErr() error { +func (s *ClusterContainsContainerInstancesException) OrigErr() error { return nil } -func (s ClusterContainsContainerInstancesException) Error() string { +func (s *ClusterContainsContainerInstancesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterContainsContainerInstancesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterContainsContainerInstancesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterContainsContainerInstancesException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterContainsContainerInstancesException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot delete a cluster that contains services. First, update the service // to reduce its desired task count to 0 and then delete the service. For more // information, see UpdateService and DeleteService. type ClusterContainsServicesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6538,17 +6692,17 @@ func (s ClusterContainsServicesException) GoString() string { func newErrorClusterContainsServicesException(v protocol.ResponseMetadata) error { return &ClusterContainsServicesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterContainsServicesException) Code() string { +func (s *ClusterContainsServicesException) Code() string { return "ClusterContainsServicesException" } // Message returns the exception's message. -func (s ClusterContainsServicesException) Message() string { +func (s *ClusterContainsServicesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6556,28 +6710,28 @@ func (s ClusterContainsServicesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterContainsServicesException) OrigErr() error { +func (s *ClusterContainsServicesException) OrigErr() error { return nil } -func (s ClusterContainsServicesException) Error() string { +func (s *ClusterContainsServicesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterContainsServicesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterContainsServicesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterContainsServicesException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterContainsServicesException) RequestID() string { + return s.RespMetadata.RequestID } // You cannot delete a cluster that has active tasks. type ClusterContainsTasksException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6594,17 +6748,17 @@ func (s ClusterContainsTasksException) GoString() string { func newErrorClusterContainsTasksException(v protocol.ResponseMetadata) error { return &ClusterContainsTasksException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterContainsTasksException) Code() string { +func (s *ClusterContainsTasksException) Code() string { return "ClusterContainsTasksException" } // Message returns the exception's message. -func (s ClusterContainsTasksException) Message() string { +func (s *ClusterContainsTasksException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6612,29 +6766,29 @@ func (s ClusterContainsTasksException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterContainsTasksException) OrigErr() error { +func (s *ClusterContainsTasksException) OrigErr() error { return nil } -func (s ClusterContainsTasksException) Error() string { +func (s *ClusterContainsTasksException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterContainsTasksException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterContainsTasksException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterContainsTasksException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterContainsTasksException) RequestID() string { + return s.RespMetadata.RequestID } // The specified cluster could not be found. You can view your available clusters // with ListClusters. Amazon ECS clusters are Region-specific. type ClusterNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6651,17 +6805,17 @@ func (s ClusterNotFoundException) GoString() string { func newErrorClusterNotFoundException(v protocol.ResponseMetadata) error { return &ClusterNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClusterNotFoundException) Code() string { +func (s *ClusterNotFoundException) Code() string { return "ClusterNotFoundException" } // Message returns the exception's message. -func (s ClusterNotFoundException) Message() string { +func (s *ClusterNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6669,22 +6823,22 @@ func (s ClusterNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClusterNotFoundException) OrigErr() error { +func (s *ClusterNotFoundException) OrigErr() error { return nil } -func (s ClusterNotFoundException) Error() string { +func (s *ClusterNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClusterNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClusterNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClusterNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClusterNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The settings to use when creating a cluster. This parameter is used to enable @@ -7050,6 +7204,27 @@ type ContainerDefinition struct { // such as credential data. Environment []*KeyValuePair `locationName:"environment" type:"list"` + // A list of files containing the environment variables to pass to a container. + // This parameter maps to the --env-file option to docker run (https://docs.docker.com/engine/reference/run/). + // + // You can specify up to ten environment files. The file must have a .env file + // extension. Each line in an environment file should contain an environment + // variable in VARIABLE=VALUE format. Lines beginning with # are treated as + // comments and are ignored. For more information on the environment variable + // file syntax, see Declare default environment variables in file (https://docs.docker.com/compose/env-file/). + // + // If there are environment variables specified using the environment parameter + // in a container definition, they take precedence over the variables contained + // within an environment file. If multiple environment files are specified that + // contain the same variable, they are processed from the top down. It is recommended + // to use unique variable names. For more information, see Specifying Environment + // Variables (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) + // in the Amazon Elastic Container Service Developer Guide. + // + // This field is not valid for containers in tasks using the Fargate launch + // type. + EnvironmentFiles []*EnvironmentFile `locationName:"environmentFiles" type:"list"` + // If the essential parameter of a container is marked as true, and that container // fails or stops for any reason, all other containers that are part of the // task are stopped. If the essential parameter of a container is marked as @@ -7080,10 +7255,11 @@ type ContainerDefinition struct { // in the Amazon Elastic Container Service Developer Guide. FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"` - // The health check command and associated configuration parameters for the - // container. This parameter maps to HealthCheck in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) - // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) - // and the HEALTHCHECK parameter of docker run (https://docs.docker.com/engine/reference/run/). + // The container health check command and associated configuration parameters + // for the container. This parameter maps to HealthCheck in the Create a container + // (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) section + // of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) and + // the HEALTHCHECK parameter of docker run (https://docs.docker.com/engine/reference/run/). HealthCheck *HealthCheck `locationName:"healthCheck" type:"structure"` // The hostname to use for your container. This parameter maps to Hostname in @@ -7327,10 +7503,19 @@ type ContainerDefinition struct { // give up and not start. This results in the task transitioning to a STOPPED // state. // - // For tasks using the EC2 launch type, the container instances require at least - // version 1.26.0 of the container agent to enable a container start timeout - // value. However, we recommend using the latest container agent version. For - // information about checking your agent version and updating to the latest + // For tasks using the Fargate launch type, this parameter requires that the + // task or service uses platform version 1.3.0 or later. If this parameter is + // not specified, the default value of 3 minutes is used. + // + // For tasks using the EC2 launch type, if the startTimeout parameter is not + // specified, the value set for the Amazon ECS container agent configuration + // variable ECS_CONTAINER_START_TIMEOUT is used by default. If neither the startTimeout + // parameter or the ECS_CONTAINER_START_TIMEOUT agent configuration variable + // are set, then the default values of 3 minutes for Linux containers and 8 + // minutes on Windows containers are used. Your container instances require + // at least version 1.26.0 of the container agent to enable a container start + // timeout value. However, we recommend using the latest container agent version. + // For information about checking your agent version and updating to the latest // version, see Updating the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 @@ -7339,24 +7524,25 @@ type ContainerDefinition struct { // agent and ecs-init. For more information, see Amazon ECS-optimized Linux // AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) // in the Amazon Elastic Container Service Developer Guide. - // - // For tasks using the Fargate launch type, the task or service requires platform - // version 1.3.0 or later. StartTimeout *int64 `locationName:"startTimeout" type:"integer"` // Time duration (in seconds) to wait before the container is forcefully killed // if it doesn't exit normally on its own. // - // For tasks using the Fargate launch type, the max stopTimeout value is 2 minutes - // and the task or service requires platform version 1.3.0 or later. - // - // For tasks using the EC2 launch type, the stop timeout value for the container - // takes precedence over the ECS_CONTAINER_STOP_TIMEOUT container agent configuration - // parameter, if used. Container instances require at least version 1.26.0 of - // the container agent to enable a container stop timeout value. However, we - // recommend using the latest container agent version. For information about - // checking your agent version and updating to the latest version, see Updating - // the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) + // For tasks using the Fargate launch type, the task or service requires platform + // version 1.3.0 or later. The max stop timeout value is 120 seconds and if + // the parameter is not specified, the default value of 30 seconds is used. + // + // For tasks using the EC2 launch type, if the stopTimeout parameter is not + // specified, the value set for the Amazon ECS container agent configuration + // variable ECS_CONTAINER_STOP_TIMEOUT is used by default. If neither the stopTimeout + // parameter or the ECS_CONTAINER_STOP_TIMEOUT agent configuration variable + // are set, then the default values of 30 seconds for Linux containers and 30 + // seconds on Windows containers are used. Your container instances require + // at least version 1.26.0 of the container agent to enable a container stop + // timeout value. However, we recommend using the latest container agent version. + // For information about checking your agent version and updating to the latest + // version, see Updating the Amazon ECS Container Agent (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html) // in the Amazon Elastic Container Service Developer Guide. If you are using // an Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 // of the ecs-init package. If your container instances are launched from version @@ -7379,8 +7565,9 @@ type ContainerDefinition struct { // namespaced kernel parameters as well as the containers. SystemControls []*SystemControl `locationName:"systemControls" type:"list"` - // A list of ulimits to set in the container. This parameter maps to Ulimits - // in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) + // A list of ulimits to set in the container. If a ulimit value is specified + // in a task definition, it will override the default values set by Docker. + // This parameter maps to Ulimits in the Create a container (https://docs.docker.com/engine/api/v1.35/#operation/ContainerCreate) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --ulimit option to docker run (https://docs.docker.com/engine/reference/run/). // Valid naming values are displayed in the Ulimit data type. This parameter @@ -7451,6 +7638,16 @@ func (s *ContainerDefinition) Validate() error { } } } + if s.EnvironmentFiles != nil { + for i, v := range s.EnvironmentFiles { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EnvironmentFiles", i), err.(request.ErrInvalidParams)) + } + } + } if s.ExtraHosts != nil { for i, v := range s.ExtraHosts { if v == nil { @@ -7583,6 +7780,12 @@ func (s *ContainerDefinition) SetEnvironment(v []*KeyValuePair) *ContainerDefini return s } +// SetEnvironmentFiles sets the EnvironmentFiles field's value. +func (s *ContainerDefinition) SetEnvironmentFiles(v []*EnvironmentFile) *ContainerDefinition { + s.EnvironmentFiles = v + return s +} + // SetEssential sets the Essential field's value. func (s *ContainerDefinition) SetEssential(v bool) *ContainerDefinition { s.Essential = &v @@ -8098,6 +8301,10 @@ type ContainerOverride struct { // You must also specify a container name. Environment []*KeyValuePair `locationName:"environment" type:"list"` + // A list of files containing the environment variables to pass to a container, + // instead of the value from the container definition. + EnvironmentFiles []*EnvironmentFile `locationName:"environmentFiles" type:"list"` + // The hard limit (in MiB) of memory to present to the container, instead of // the default value from the task definition. If your container attempts to // exceed the memory specified here, the container is killed. You must also @@ -8132,6 +8339,16 @@ func (s ContainerOverride) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ContainerOverride) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ContainerOverride"} + if s.EnvironmentFiles != nil { + for i, v := range s.EnvironmentFiles { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EnvironmentFiles", i), err.(request.ErrInvalidParams)) + } + } + } if s.ResourceRequirements != nil { for i, v := range s.ResourceRequirements { if v == nil { @@ -8167,6 +8384,12 @@ func (s *ContainerOverride) SetEnvironment(v []*KeyValuePair) *ContainerOverride return s } +// SetEnvironmentFiles sets the EnvironmentFiles field's value. +func (s *ContainerOverride) SetEnvironmentFiles(v []*EnvironmentFile) *ContainerOverride { + s.EnvironmentFiles = v + return s +} + // SetMemory sets the Memory field's value. func (s *ContainerOverride) SetMemory(v int64) *ContainerOverride { s.Memory = &v @@ -8399,8 +8622,7 @@ func (s *CreateCapacityProviderOutput) SetCapacityProvider(v *CapacityProvider) type CreateClusterInput struct { _ struct{} `type:"structure"` - // The short name or full Amazon Resource Name (ARN) of one or more capacity - // providers to associate with the cluster. + // The short name of one or more capacity providers to associate with the cluster. // // If specifying a capacity provider that uses an Auto Scaling group, the capacity // provider must already be created and not already associated with another @@ -8631,13 +8853,16 @@ type CreateServiceInput struct { // The period of time, in seconds, that the Amazon ECS service scheduler should // ignore unhealthy Elastic Load Balancing target health checks after a task - // has first started. This is only valid if your service is configured to use - // a load balancer. If your service's tasks take a while to start and respond - // to Elastic Load Balancing health checks, you can specify a health check grace - // period of up to 2,147,483,647 seconds. During that time, the ECS service - // scheduler ignores health check status. This grace period can prevent the - // ECS service scheduler from marking tasks as unhealthy and stopping them before - // they have time to come up. + // has first started. This is only used when your service is configured to use + // a load balancer. If your service has a load balancer defined and you don't + // specify a health check grace period value, the default value of 0 is used. + // + // If your service's tasks take a while to start and respond to Elastic Load + // Balancing health checks, you can specify a health check grace period of up + // to 2,147,483,647 seconds. During that time, the Amazon ECS service scheduler + // ignores health check status. This grace period can prevent the service scheduler + // from marking tasks as unhealthy and stopping them before they have time to + // come up. HealthCheckGracePeriodSeconds *int64 `locationName:"healthCheckGracePeriodSeconds" type:"integer"` // The launch type on which to run your service. For more information, see Amazon @@ -8764,11 +8989,13 @@ type CreateServiceInput struct { // // * DAEMON-The daemon scheduling strategy deploys exactly one task on each // active container instance that meets all of the task placement constraints - // that you specify in your cluster. When you're using this strategy, you - // don't need to specify a desired number of tasks, a task placement strategy, - // or use Service Auto Scaling policies. Tasks using the Fargate launch type - // or the CODE_DEPLOY or EXTERNAL deployment controller types don't support - // the DAEMON scheduling strategy. + // that you specify in your cluster. The service scheduler also evaluates + // the task placement constraints for running tasks and will stop tasks that + // do not meet the placement constraints. When you're using this strategy, + // you don't need to specify a desired number of tasks, a task placement + // strategy, or use Service Auto Scaling policies. Tasks using the Fargate + // launch type or the CODE_DEPLOY or EXTERNAL deployment controller types + // don't support the DAEMON scheduling strategy. SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, @@ -9467,6 +9694,68 @@ func (s *DeleteAttributesOutput) SetAttributes(v []*Attribute) *DeleteAttributes return s } +type DeleteCapacityProviderInput struct { + _ struct{} `type:"structure"` + + // The short name or full Amazon Resource Name (ARN) of the capacity provider + // to delete. + // + // CapacityProvider is a required field + CapacityProvider *string `locationName:"capacityProvider" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCapacityProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCapacityProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCapacityProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCapacityProviderInput"} + if s.CapacityProvider == nil { + invalidParams.Add(request.NewErrParamRequired("CapacityProvider")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCapacityProvider sets the CapacityProvider field's value. +func (s *DeleteCapacityProviderInput) SetCapacityProvider(v string) *DeleteCapacityProviderInput { + s.CapacityProvider = &v + return s +} + +type DeleteCapacityProviderOutput struct { + _ struct{} `type:"structure"` + + // The details of a capacity provider. + CapacityProvider *CapacityProvider `locationName:"capacityProvider" type:"structure"` +} + +// String returns the string representation +func (s DeleteCapacityProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCapacityProviderOutput) GoString() string { + return s.String() +} + +// SetCapacityProvider sets the CapacityProvider field's value. +func (s *DeleteCapacityProviderOutput) SetCapacityProvider(v *CapacityProvider) *DeleteCapacityProviderOutput { + s.CapacityProvider = v + return s +} + type DeleteClusterInput struct { _ struct{} `type:"structure"` @@ -11061,26 +11350,84 @@ func (s *DockerVolumeConfiguration) SetScope(v string) *DockerVolumeConfiguratio return s } +// The authorization configuration details for the Amazon EFS file system. +type EFSAuthorizationConfig struct { + _ struct{} `type:"structure"` + + // The Amazon EFS access point ID to use. If an access point is specified, the + // root directory value specified in the EFSVolumeConfiguration will be relative + // to the directory set for the access point. If an access point is used, transit + // encryption must be enabled in the EFSVolumeConfiguration. For more information, + // see Working with Amazon EFS Access Points (https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html) + // in the Amazon Elastic File System User Guide. + AccessPointId *string `locationName:"accessPointId" type:"string"` + + // Whether or not to use the Amazon ECS task IAM role defined in a task definition + // when mounting the Amazon EFS file system. If enabled, transit encryption + // must be enabled in the EFSVolumeConfiguration. If this parameter is omitted, + // the default value of DISABLED is used. For more information, see Using Amazon + // EFS Access Points (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html#efs-volume-accesspoints) + // in the Amazon Elastic Container Service Developer Guide. + Iam *string `locationName:"iam" type:"string" enum:"EFSAuthorizationConfigIAM"` +} + +// String returns the string representation +func (s EFSAuthorizationConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSAuthorizationConfig) GoString() string { + return s.String() +} + +// SetAccessPointId sets the AccessPointId field's value. +func (s *EFSAuthorizationConfig) SetAccessPointId(v string) *EFSAuthorizationConfig { + s.AccessPointId = &v + return s +} + +// SetIam sets the Iam field's value. +func (s *EFSAuthorizationConfig) SetIam(v string) *EFSAuthorizationConfig { + s.Iam = &v + return s +} + // This parameter is specified when you are using an Amazon Elastic File System -// (Amazon EFS) file storage. Amazon EFS file systems are only supported when -// you are using the EC2 launch type. -// -// EFSVolumeConfiguration remains in preview and is a Beta Service as defined -// by and subject to the Beta Service Participation Service Terms located at -// https://aws.amazon.com/service-terms (https://aws.amazon.com/service-terms) -// ("Beta Terms"). These Beta Terms apply to your participation in this preview -// of EFSVolumeConfiguration. +// file system for task storage. For more information, see Amazon EFS Volumes +// (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/efs-volumes.html) +// in the Amazon Elastic Container Service Developer Guide. type EFSVolumeConfiguration struct { _ struct{} `type:"structure"` + // The authorization configuration details for the Amazon EFS file system. + AuthorizationConfig *EFSAuthorizationConfig `locationName:"authorizationConfig" type:"structure"` + // The Amazon EFS file system ID to use. // // FileSystemId is a required field FileSystemId *string `locationName:"fileSystemId" type:"string" required:"true"` // The directory within the Amazon EFS file system to mount as the root directory - // inside the host. + // inside the host. If this parameter is omitted, the root of the Amazon EFS + // volume will be used. Specifying / will have the same effect as omitting this + // parameter. RootDirectory *string `locationName:"rootDirectory" type:"string"` + + // Whether or not to enable encryption for Amazon EFS data in transit between + // the Amazon ECS host and the Amazon EFS server. Transit encryption must be + // enabled if Amazon EFS IAM authorization is used. If this parameter is omitted, + // the default value of DISABLED is used. For more information, see Encrypting + // Data in Transit (https://docs.aws.amazon.com/efs/latest/ug/encryption-in-transit.html) + // in the Amazon Elastic File System User Guide. + TransitEncryption *string `locationName:"transitEncryption" type:"string" enum:"EFSTransitEncryption"` + + // The port to use when sending encrypted data between the Amazon ECS host and + // the Amazon EFS server. If you do not specify a transit encryption port, it + // will use the port selection strategy that the Amazon EFS mount helper uses. + // For more information, see EFS Mount Helper (https://docs.aws.amazon.com/efs/latest/ug/efs-mount-helper.html) + // in the Amazon Elastic File System User Guide. + TransitEncryptionPort *int64 `locationName:"transitEncryptionPort" type:"integer"` } // String returns the string representation @@ -11106,6 +11453,12 @@ func (s *EFSVolumeConfiguration) Validate() error { return nil } +// SetAuthorizationConfig sets the AuthorizationConfig field's value. +func (s *EFSVolumeConfiguration) SetAuthorizationConfig(v *EFSAuthorizationConfig) *EFSVolumeConfiguration { + s.AuthorizationConfig = v + return s +} + // SetFileSystemId sets the FileSystemId field's value. func (s *EFSVolumeConfiguration) SetFileSystemId(v string) *EFSVolumeConfiguration { s.FileSystemId = &v @@ -11118,6 +11471,88 @@ func (s *EFSVolumeConfiguration) SetRootDirectory(v string) *EFSVolumeConfigurat return s } +// SetTransitEncryption sets the TransitEncryption field's value. +func (s *EFSVolumeConfiguration) SetTransitEncryption(v string) *EFSVolumeConfiguration { + s.TransitEncryption = &v + return s +} + +// SetTransitEncryptionPort sets the TransitEncryptionPort field's value. +func (s *EFSVolumeConfiguration) SetTransitEncryptionPort(v int64) *EFSVolumeConfiguration { + s.TransitEncryptionPort = &v + return s +} + +// A list of files containing the environment variables to pass to a container. +// You can specify up to ten environment files. The file must have a .env file +// extension. Each line in an environment file should contain an environment +// variable in VARIABLE=VALUE format. Lines beginning with # are treated as +// comments and are ignored. For more information on the environment variable +// file syntax, see Declare default environment variables in file (https://docs.docker.com/compose/env-file/). +// +// If there are environment variables specified using the environment parameter +// in a container definition, they take precedence over the variables contained +// within an environment file. If multiple environment files are specified that +// contain the same variable, they are processed from the top down. It is recommended +// to use unique variable names. For more information, see Specifying Environment +// Variables (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html) +// in the Amazon Elastic Container Service Developer Guide. +// +// This field is not valid for containers in tasks using the Fargate launch +// type. +type EnvironmentFile struct { + _ struct{} `type:"structure"` + + // The file type to use. The only supported value is s3. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"EnvironmentFileType"` + + // The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment + // variable file. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnvironmentFile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnvironmentFile) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnvironmentFile) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnvironmentFile"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetType sets the Type field's value. +func (s *EnvironmentFile) SetType(v string) *EnvironmentFile { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *EnvironmentFile) SetValue(v string) *EnvironmentFile { + s.Value = &v + return s +} + // A failed resource. type Failure struct { _ struct{} `type:"structure"` @@ -11222,6 +11657,36 @@ func (s *FirelensConfiguration) SetType(v string) *FirelensConfiguration { // that exist in the container image (such as those specified in a parent image // or from the image's Dockerfile). // +// You can view the health status of both individual containers and a task with +// the DescribeTasks API operation or when viewing the task details in the console. +// +// The following describes the possible healthStatus values for a container: +// +// * HEALTHY-The container health check has passed successfully. +// +// * UNHEALTHY-The container health check has failed. +// +// * UNKNOWN-The container health check is being evaluated or there is no +// container health check defined. +// +// The following describes the possible healthStatus values for a task. The +// container health check status of nonessential containers do not have an effect +// on the health status of a task. +// +// * HEALTHY-All essential containers within the task have passed their health +// checks. +// +// * UNHEALTHY-One or more essential containers have failed their health +// check. +// +// * UNKNOWN-The essential containers within the task are still having their +// health checks evaluated or there are no container health checks defined. +// +// If a task is run manually, and not as part of a service, the task will continue +// its lifecycle regardless of its health status. For tasks that are part of +// a service, if the task reports as unhealthy then the task will be stopped +// and the service scheduler will replace it. +// // The following are notes about container health check support: // // * Container health checks require version 1.17.0 or greater of the Amazon @@ -11514,8 +11979,8 @@ func (s *InferenceAcceleratorOverride) SetDeviceType(v string) *InferenceAcceler // The specified parameter is invalid. Review the available parameters for the // API request. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11532,17 +11997,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11550,22 +12015,22 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The Linux capabilities for the container that are added to or dropped from @@ -11584,8 +12049,9 @@ type KernelCapabilities struct { // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.35/) // and the --cap-add option to docker run (https://docs.docker.com/engine/reference/run/). // - // If you are using tasks that use the Fargate launch type, the add parameter - // is not supported. + // The SYS_PTRACE capability is supported for tasks that use the Fargate launch + // type if they are also using platform version 1.4.0. The other capabilities + // are not supported for any platform versions. // // Valid values: "ALL" | "AUDIT_CONTROL" | "AUDIT_WRITE" | "BLOCK_SUSPEND" | // "CHOWN" | "DAC_OVERRIDE" | "DAC_READ_SEARCH" | "FOWNER" | "FSETID" | "IPC_LOCK" @@ -11673,8 +12139,8 @@ func (s *KeyValuePair) SetValue(v string) *KeyValuePair { // The limit for the resource has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11691,17 +12157,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11709,22 +12175,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Linux-specific options that are applied to the container, such as Linux KernelCapabilities. @@ -11734,8 +12200,9 @@ type LinuxParameters struct { // The Linux capabilities for the container that are added to or dropped from // the default configuration provided by Docker. // - // If you are using tasks that use the Fargate launch type, capabilities is - // supported but the add parameter is not supported. + // For tasks that use the Fargate launch type, capabilities is supported for + // all platform versions but the add parameter is only supported if using platform + // version 1.4.0 or later. Capabilities *KernelCapabilities `locationName:"capabilities" type:"structure"` // Any host devices to expose to the container. This parameter maps to Devices @@ -11897,7 +12364,7 @@ type ListAccountSettingsInput struct { // returns up to 10 results and a nextToken value if applicable. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The resource name you want to list the account settings for. + // The name of the account setting you want to list the settings for. Name *string `locationName:"name" type:"string" enum:"SettingName"` // The nextToken value returned from a ListAccountSettings request indicating @@ -12884,8 +13351,10 @@ func (s *ListTasksOutput) SetTaskArns(v []*string) *ListTasksOutput { return s } -// Details on the load balancer or load balancers to use with a service or task -// set. +// The load balancer configuration to use with a service or task set. +// +// For specific notes and restrictions regarding the use of load balancers with +// services and task sets, see the CreateService and CreateTaskSet actions. type LoadBalancer struct { _ struct{} `type:"structure"` @@ -12905,15 +13374,15 @@ type LoadBalancer struct { // // A load balancer name is only specified when using a Classic Load Balancer. // If you are using an Application Load Balancer or a Network Load Balancer - // this should be omitted. + // the load balancer name parameter should be omitted. LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` // The full Amazon Resource Name (ARN) of the Elastic Load Balancing target // group or groups associated with a service or task set. // // A target group ARN is only specified when using an Application Load Balancer - // or Network Load Balancer. If you are using a Classic Load Balancer this should - // be omitted. + // or Network Load Balancer. If you are using a Classic Load Balancer the target + // group ARN should be omitted. // // For services using the ECS deployment controller, you can specify one or // multiple target groups. For more information, see Registering Multiple Target @@ -13191,8 +13660,8 @@ func (s *ManagedScaling) SetTargetCapacity(v int64) *ManagedScaling { // with an update. This could be because the agent running on the container // instance is an older or custom version that does not use our version information. type MissingVersionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13209,17 +13678,17 @@ func (s MissingVersionException) GoString() string { func newErrorMissingVersionException(v protocol.ResponseMetadata) error { return &MissingVersionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MissingVersionException) Code() string { +func (s *MissingVersionException) Code() string { return "MissingVersionException" } // Message returns the exception's message. -func (s MissingVersionException) Message() string { +func (s *MissingVersionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13227,22 +13696,22 @@ func (s MissingVersionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MissingVersionException) OrigErr() error { +func (s *MissingVersionException) OrigErr() error { return nil } -func (s MissingVersionException) Error() string { +func (s *MissingVersionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MissingVersionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MissingVersionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MissingVersionException) RequestID() string { - return s.respMetadata.RequestID +func (s *MissingVersionException) RequestID() string { + return s.RespMetadata.RequestID } // Details on a volume mount point that is used in a container definition. @@ -13432,8 +13901,8 @@ func (s *NetworkInterface) SetPrivateIpv4Address(v string) *NetworkInterface { // be because the agent is already running the latest version, or it is so old // that there is no update path to the current version. type NoUpdateAvailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13450,17 +13919,17 @@ func (s NoUpdateAvailableException) GoString() string { func newErrorNoUpdateAvailableException(v protocol.ResponseMetadata) error { return &NoUpdateAvailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoUpdateAvailableException) Code() string { +func (s *NoUpdateAvailableException) Code() string { return "NoUpdateAvailableException" } // Message returns the exception's message. -func (s NoUpdateAvailableException) Message() string { +func (s *NoUpdateAvailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13468,22 +13937,22 @@ func (s NoUpdateAvailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoUpdateAvailableException) OrigErr() error { +func (s *NoUpdateAvailableException) OrigErr() error { return nil } -func (s NoUpdateAvailableException) Error() string { +func (s *NoUpdateAvailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoUpdateAvailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoUpdateAvailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoUpdateAvailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoUpdateAvailableException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing a constraint on task placement. For more information, @@ -13635,8 +14104,8 @@ func (s *PlatformDevice) SetType(v string) *PlatformDevice { // The specified platform version does not satisfy the task definition's required // capabilities. type PlatformTaskDefinitionIncompatibilityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13653,17 +14122,17 @@ func (s PlatformTaskDefinitionIncompatibilityException) GoString() string { func newErrorPlatformTaskDefinitionIncompatibilityException(v protocol.ResponseMetadata) error { return &PlatformTaskDefinitionIncompatibilityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PlatformTaskDefinitionIncompatibilityException) Code() string { +func (s *PlatformTaskDefinitionIncompatibilityException) Code() string { return "PlatformTaskDefinitionIncompatibilityException" } // Message returns the exception's message. -func (s PlatformTaskDefinitionIncompatibilityException) Message() string { +func (s *PlatformTaskDefinitionIncompatibilityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13671,28 +14140,28 @@ func (s PlatformTaskDefinitionIncompatibilityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PlatformTaskDefinitionIncompatibilityException) OrigErr() error { +func (s *PlatformTaskDefinitionIncompatibilityException) OrigErr() error { return nil } -func (s PlatformTaskDefinitionIncompatibilityException) Error() string { +func (s *PlatformTaskDefinitionIncompatibilityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PlatformTaskDefinitionIncompatibilityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PlatformTaskDefinitionIncompatibilityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PlatformTaskDefinitionIncompatibilityException) RequestID() string { - return s.respMetadata.RequestID +func (s *PlatformTaskDefinitionIncompatibilityException) RequestID() string { + return s.RespMetadata.RequestID } // The specified platform version does not exist. type PlatformUnknownException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13709,17 +14178,17 @@ func (s PlatformUnknownException) GoString() string { func newErrorPlatformUnknownException(v protocol.ResponseMetadata) error { return &PlatformUnknownException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PlatformUnknownException) Code() string { +func (s *PlatformUnknownException) Code() string { return "PlatformUnknownException" } // Message returns the exception's message. -func (s PlatformUnknownException) Message() string { +func (s *PlatformUnknownException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13727,22 +14196,22 @@ func (s PlatformUnknownException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PlatformUnknownException) OrigErr() error { +func (s *PlatformUnknownException) OrigErr() error { return nil } -func (s PlatformUnknownException) Error() string { +func (s *PlatformUnknownException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PlatformUnknownException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PlatformUnknownException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PlatformUnknownException) RequestID() string { - return s.respMetadata.RequestID +func (s *PlatformUnknownException) RequestID() string { + return s.RespMetadata.RequestID } // Port mappings allow containers to access ports on the host container instance @@ -14559,8 +15028,11 @@ type RegisterTaskDefinitionInput struct { // (30 GB) in increments of 1024 (1 GB) Cpu *string `locationName:"cpu" type:"string"` - // The Amazon Resource Name (ARN) of the task execution role that the Amazon - // ECS container agent and the Docker daemon can assume. + // The Amazon Resource Name (ARN) of the task execution role that grants the + // Amazon ECS container agent permission to make AWS API calls on your behalf. + // The task execution IAM role is required depending on the requirements of + // your task. For more information, see Amazon ECS task execution IAM role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) + // in the Amazon Elastic Container Service Developer Guide. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` // You must specify a family for a task definition, which allows you to track @@ -15065,8 +15537,8 @@ func (s *Resource) SetType(v string) *Resource { // The specified resource is in-use and cannot be removed. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15083,17 +15555,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15101,28 +15573,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource could not be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15139,17 +15611,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15157,22 +15629,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The type and amount of a resource to assign to a container. The supported @@ -15676,8 +16148,8 @@ func (s *Secret) SetValueFrom(v string) *Secret { // These errors are usually caused by a server issue. type ServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15694,17 +16166,17 @@ func (s ServerException) GoString() string { func newErrorServerException(v protocol.ResponseMetadata) error { return &ServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerException) Code() string { +func (s *ServerException) Code() string { return "ServerException" } // Message returns the exception's message. -func (s ServerException) Message() string { +func (s *ServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15712,22 +16184,22 @@ func (s ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerException) OrigErr() error { +func (s *ServerException) OrigErr() error { return nil } -func (s ServerException) Error() string { +func (s *ServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID } // Details on a service within a cluster @@ -15831,9 +16303,11 @@ type Service struct { // and constraints to customize task placement decisions. // // * DAEMON-The daemon scheduling strategy deploys exactly one task on each - // container instance in your cluster. When you are using this strategy, - // do not specify a desired number of tasks or any task placement strategies. - // Fargate tasks do not support the DAEMON scheduling strategy. + // active container instance that meets all of the task placement constraints + // that you specify in your cluster. The service scheduler also evaluates + // the task placement constraints for running tasks and will stop tasks that + // do not meet the placement constraints. Fargate tasks do not support the + // DAEMON scheduling strategy. SchedulingStrategy *string `locationName:"schedulingStrategy" type:"string" enum:"SchedulingStrategy"` // The ARN that identifies the service. The ARN contains the arn:aws:ecs namespace, @@ -16123,8 +16597,8 @@ func (s *ServiceEvent) SetMessage(v string) *ServiceEvent { // The specified service is not active. You can't update a service that is inactive. // If you have previously deleted a service, you can re-create it with CreateService. type ServiceNotActiveException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16141,17 +16615,17 @@ func (s ServiceNotActiveException) GoString() string { func newErrorServiceNotActiveException(v protocol.ResponseMetadata) error { return &ServiceNotActiveException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceNotActiveException) Code() string { +func (s *ServiceNotActiveException) Code() string { return "ServiceNotActiveException" } // Message returns the exception's message. -func (s ServiceNotActiveException) Message() string { +func (s *ServiceNotActiveException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16159,29 +16633,29 @@ func (s ServiceNotActiveException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceNotActiveException) OrigErr() error { +func (s *ServiceNotActiveException) OrigErr() error { return nil } -func (s ServiceNotActiveException) Error() string { +func (s *ServiceNotActiveException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceNotActiveException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceNotActiveException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceNotActiveException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceNotActiveException) RequestID() string { + return s.RespMetadata.RequestID } // The specified service could not be found. You can view your available services // with ListServices. Amazon ECS services are cluster-specific and Region-specific. type ServiceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16198,17 +16672,17 @@ func (s ServiceNotFoundException) GoString() string { func newErrorServiceNotFoundException(v protocol.ResponseMetadata) error { return &ServiceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceNotFoundException) Code() string { +func (s *ServiceNotFoundException) Code() string { return "ServiceNotFoundException" } // Message returns the exception's message. -func (s ServiceNotFoundException) Message() string { +func (s *ServiceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16216,22 +16690,22 @@ func (s ServiceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceNotFoundException) OrigErr() error { +func (s *ServiceNotFoundException) OrigErr() error { return nil } -func (s ServiceNotFoundException) Error() string { +func (s *ServiceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Details of the service registry. @@ -17223,8 +17697,8 @@ func (s TagResourceOutput) GoString() string { // instances with ListContainerInstances. Amazon ECS container instances are // cluster-specific and Region-specific. type TargetNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17241,17 +17715,17 @@ func (s TargetNotFoundException) GoString() string { func newErrorTargetNotFoundException(v protocol.ResponseMetadata) error { return &TargetNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TargetNotFoundException) Code() string { +func (s *TargetNotFoundException) Code() string { return "TargetNotFoundException" } // Message returns the exception's message. -func (s TargetNotFoundException) Message() string { +func (s *TargetNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17259,22 +17733,22 @@ func (s TargetNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TargetNotFoundException) OrigErr() error { +func (s *TargetNotFoundException) OrigErr() error { return nil } -func (s TargetNotFoundException) Error() string { +func (s *TargetNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TargetNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TargetNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TargetNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *TargetNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Details on a task in a cluster. @@ -17737,9 +18211,11 @@ type TaskDefinition struct { // (30 GB) in increments of 1024 (1 GB) Cpu *string `locationName:"cpu" type:"string"` - // The Amazon Resource Name (ARN) of the task execution role that containers - // in this task can assume. All containers in this task are granted the permissions - // that are specified in this role. + // The Amazon Resource Name (ARN) of the task execution role that grants the + // Amazon ECS container agent permission to make AWS API calls on your behalf. + // The task execution IAM role is required depending on the requirements of + // your task. For more information, see Amazon ECS task execution IAM role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) + // in the Amazon Elastic Container Service Developer Guide. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` // The name of a family that this task definition is registered to. Up to 255 @@ -17906,7 +18382,7 @@ type TaskDefinition struct { // The short name or full Amazon Resource Name (ARN) of the AWS Identity and // Access Management (IAM) role that grants containers in the task permission // to call AWS APIs on your behalf. For more information, see Amazon ECS Task - // Role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_IAM_role.html) + // Role (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) // in the Amazon Elastic Container Service Developer Guide. // // IAM roles for tasks on Windows require that the -EnableTaskIAMRole option @@ -18102,8 +18578,8 @@ type TaskOverride struct { // The cpu override for the task. Cpu *string `locationName:"cpu" type:"string"` - // The Amazon Resource Name (ARN) of the task execution role that the Amazon - // ECS container agent and the Docker daemon can assume. + // The Amazon Resource Name (ARN) of the task execution IAM role override for + // the task. ExecutionRoleArn *string `locationName:"executionRoleArn" type:"string"` // The Elastic Inference accelerator override for the task. @@ -18490,8 +18966,8 @@ func (s *TaskSet) SetUpdatedAt(v time.Time) *TaskSet { // sets with DescribeTaskSets. Task sets are specific to each cluster, service // and Region. type TaskSetNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18508,17 +18984,17 @@ func (s TaskSetNotFoundException) GoString() string { func newErrorTaskSetNotFoundException(v protocol.ResponseMetadata) error { return &TaskSetNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TaskSetNotFoundException) Code() string { +func (s *TaskSetNotFoundException) Code() string { return "TaskSetNotFoundException" } // Message returns the exception's message. -func (s TaskSetNotFoundException) Message() string { +func (s *TaskSetNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18526,22 +19002,22 @@ func (s TaskSetNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TaskSetNotFoundException) OrigErr() error { +func (s *TaskSetNotFoundException) OrigErr() error { return nil } -func (s TaskSetNotFoundException) Error() string { +func (s *TaskSetNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TaskSetNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TaskSetNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TaskSetNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *TaskSetNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The container path, mount options, and size of the tmpfs mount. @@ -18682,8 +19158,8 @@ func (s *Ulimit) SetSoftLimit(v int64) *Ulimit { // The specified task is not supported in this Region. type UnsupportedFeatureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18700,17 +19176,17 @@ func (s UnsupportedFeatureException) GoString() string { func newErrorUnsupportedFeatureException(v protocol.ResponseMetadata) error { return &UnsupportedFeatureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedFeatureException) Code() string { +func (s *UnsupportedFeatureException) Code() string { return "UnsupportedFeatureException" } // Message returns the exception's message. -func (s UnsupportedFeatureException) Message() string { +func (s *UnsupportedFeatureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18718,22 +19194,22 @@ func (s UnsupportedFeatureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedFeatureException) OrigErr() error { +func (s *UnsupportedFeatureException) OrigErr() error { return nil } -func (s UnsupportedFeatureException) Error() string { +func (s *UnsupportedFeatureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedFeatureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedFeatureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedFeatureException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedFeatureException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -19064,8 +19540,8 @@ func (s *UpdateContainerInstancesStateOutput) SetFailures(v []*Failure) *UpdateC // process can get stuck in that state. However, when the agent reconnects, // it resumes where it stopped previously. type UpdateInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19082,17 +19558,17 @@ func (s UpdateInProgressException) GoString() string { func newErrorUpdateInProgressException(v protocol.ResponseMetadata) error { return &UpdateInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UpdateInProgressException) Code() string { +func (s *UpdateInProgressException) Code() string { return "UpdateInProgressException" } // Message returns the exception's message. -func (s UpdateInProgressException) Message() string { +func (s *UpdateInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19100,22 +19576,22 @@ func (s UpdateInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UpdateInProgressException) OrigErr() error { +func (s *UpdateInProgressException) OrigErr() error { return nil } -func (s UpdateInProgressException) Error() string { +func (s *UpdateInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UpdateInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UpdateInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UpdateInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *UpdateInProgressException) RequestID() string { + return s.RespMetadata.RequestID } type UpdateServiceInput struct { @@ -19124,9 +19600,28 @@ type UpdateServiceInput struct { // The capacity provider strategy to update the service to use. // // If the service is using the default capacity provider strategy for the cluster, - // the service can be updated to use one or more capacity providers. However, - // when a service is using a non-default capacity provider strategy, the service - // cannot be updated to use the cluster's default capacity provider strategy. + // the service can be updated to use one or more capacity providers as opposed + // to the default capacity provider strategy. However, when a service is using + // a capacity provider strategy that is not the default capacity provider strategy, + // the service cannot be updated to use the cluster's default capacity provider + // strategy. + // + // A capacity provider strategy consists of one or more capacity providers along + // with the base and weight to assign to them. A capacity provider must be associated + // with the cluster to be used in a capacity provider strategy. The PutClusterCapacityProviders + // API is used to associate a capacity provider with a cluster. Only capacity + // providers with an ACTIVE or UPDATING status can be used. + // + // If specifying a capacity provider that uses an Auto Scaling group, the capacity + // provider must already be created. New capacity providers can be created with + // the CreateCapacityProvider API operation. + // + // To use a AWS Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT + // capacity providers. The AWS Fargate capacity providers are available to all + // accounts and only need to be associated with a cluster to be used. + // + // The PutClusterCapacityProviders API operation is used to update the list + // of available capacity providers for a cluster after the cluster is created. CapacityProviderStrategy []*CapacityProviderStrategyItem `locationName:"capacityProviderStrategy" type:"list"` // The short name or full Amazon Resource Name (ARN) of the cluster that your @@ -19163,6 +19658,25 @@ type UpdateServiceInput struct { // An object representing the network configuration for a task or service. NetworkConfiguration *NetworkConfiguration `locationName:"networkConfiguration" type:"structure"` + // An array of task placement constraint objects to update the service to use. + // If no value is specified, the existing placement constraints for the service + // will remain unchanged. If this value is specified, it will override any existing + // placement constraints defined for the service. To remove all existing placement + // constraints, specify an empty array. + // + // You can specify a maximum of 10 constraints per task (this limit includes + // constraints in the task definition and those specified at runtime). + PlacementConstraints []*PlacementConstraint `locationName:"placementConstraints" type:"list"` + + // The task placement strategy objects to update the service to use. If no value + // is specified, the existing placement strategy for the service will remain + // unchanged. If this value is specified, it will override the existing placement + // strategy defined for the service. To remove an existing placement strategy, + // specify an empty object. + // + // You can specify a maximum of five strategy rules per service. + PlacementStrategy []*PlacementStrategy `locationName:"placementStrategy" type:"list"` + // The platform version on which your tasks in the service are running. A platform // version is only specified for tasks using the Fargate launch type. If a platform // version is not specified, the LATEST platform version is used by default. @@ -19263,6 +19777,18 @@ func (s *UpdateServiceInput) SetNetworkConfiguration(v *NetworkConfiguration) *U return s } +// SetPlacementConstraints sets the PlacementConstraints field's value. +func (s *UpdateServiceInput) SetPlacementConstraints(v []*PlacementConstraint) *UpdateServiceInput { + s.PlacementConstraints = v + return s +} + +// SetPlacementStrategy sets the PlacementStrategy field's value. +func (s *UpdateServiceInput) SetPlacementStrategy(v []*PlacementStrategy) *UpdateServiceInput { + s.PlacementStrategy = v + return s +} + // SetPlatformVersion sets the PlatformVersion field's value. func (s *UpdateServiceInput) SetPlatformVersion(v string) *UpdateServiceInput { s.PlatformVersion = &v @@ -19552,10 +20078,11 @@ func (s *VersionInfo) SetDockerVersion(v string) *VersionInfo { return s } -// A data volume used in a task definition. For tasks that use a Docker volume, -// specify a DockerVolumeConfiguration. For tasks that use a bind mount host -// volume, specify a host and optional sourcePath. For more information, see -// Using Data Volumes in Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html). +// A data volume used in a task definition. For tasks that use Amazon Elastic +// File System (Amazon EFS) file storage, specify an efsVolumeConfiguration. +// For tasks that use a Docker volume, specify a DockerVolumeConfiguration. +// For tasks that use a bind mount host volume, specify a host and optional +// sourcePath. For more information, see Using Data Volumes in Tasks (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_data_volumes.html). type Volume struct { _ struct{} `type:"structure"` @@ -19566,23 +20093,15 @@ type Volume struct { DockerVolumeConfiguration *DockerVolumeConfiguration `locationName:"dockerVolumeConfiguration" type:"structure"` // This parameter is specified when you are using an Amazon Elastic File System - // (Amazon EFS) file storage. Amazon EFS file systems are only supported when - // you are using the EC2 launch type. - // - // EFSVolumeConfiguration remains in preview and is a Beta Service as defined - // by and subject to the Beta Service Participation Service Terms located at - // https://aws.amazon.com/service-terms (https://aws.amazon.com/service-terms) - // ("Beta Terms"). These Beta Terms apply to your participation in this preview - // of EFSVolumeConfiguration. + // file system for task storage. EfsVolumeConfiguration *EFSVolumeConfiguration `locationName:"efsVolumeConfiguration" type:"structure"` - // This parameter is specified when you are using bind mount host volumes. Bind - // mount host volumes are supported when you are using either the EC2 or Fargate - // launch types. The contents of the host parameter determine whether your bind - // mount host volume persists on the host container instance and where it is - // stored. If the host parameter is empty, then the Docker daemon assigns a - // host path for your data volume. However, the data is not guaranteed to persist - // after the containers associated with it stop running. + // This parameter is specified when you are using bind mount host volumes. The + // contents of the host parameter determine whether your bind mount host volume + // persists on the host container instance and where it is stored. If the host + // parameter is empty, then the Docker daemon assigns a host path for your data + // volume. However, the data is not guaranteed to persist after the containers + // associated with it stop running. // // Windows containers can mount whole directories on the same drive as $env:ProgramData. // Windows containers cannot mount directories on a different drive, and mount @@ -19701,6 +20220,18 @@ const ( AgentUpdateStatusFailed = "FAILED" ) +// AgentUpdateStatus_Values returns all elements of the AgentUpdateStatus enum +func AgentUpdateStatus_Values() []string { + return []string{ + AgentUpdateStatusPending, + AgentUpdateStatusStaging, + AgentUpdateStatusStaged, + AgentUpdateStatusUpdating, + AgentUpdateStatusUpdated, + AgentUpdateStatusFailed, + } +} + const ( // AssignPublicIpEnabled is a AssignPublicIp enum value AssignPublicIpEnabled = "ENABLED" @@ -19709,16 +20240,62 @@ const ( AssignPublicIpDisabled = "DISABLED" ) +// AssignPublicIp_Values returns all elements of the AssignPublicIp enum +func AssignPublicIp_Values() []string { + return []string{ + AssignPublicIpEnabled, + AssignPublicIpDisabled, + } +} + const ( // CapacityProviderFieldTags is a CapacityProviderField enum value CapacityProviderFieldTags = "TAGS" ) +// CapacityProviderField_Values returns all elements of the CapacityProviderField enum +func CapacityProviderField_Values() []string { + return []string{ + CapacityProviderFieldTags, + } +} + const ( // CapacityProviderStatusActive is a CapacityProviderStatus enum value CapacityProviderStatusActive = "ACTIVE" + + // CapacityProviderStatusInactive is a CapacityProviderStatus enum value + CapacityProviderStatusInactive = "INACTIVE" +) + +// CapacityProviderStatus_Values returns all elements of the CapacityProviderStatus enum +func CapacityProviderStatus_Values() []string { + return []string{ + CapacityProviderStatusActive, + CapacityProviderStatusInactive, + } +} + +const ( + // CapacityProviderUpdateStatusDeleteInProgress is a CapacityProviderUpdateStatus enum value + CapacityProviderUpdateStatusDeleteInProgress = "DELETE_IN_PROGRESS" + + // CapacityProviderUpdateStatusDeleteComplete is a CapacityProviderUpdateStatus enum value + CapacityProviderUpdateStatusDeleteComplete = "DELETE_COMPLETE" + + // CapacityProviderUpdateStatusDeleteFailed is a CapacityProviderUpdateStatus enum value + CapacityProviderUpdateStatusDeleteFailed = "DELETE_FAILED" ) +// CapacityProviderUpdateStatus_Values returns all elements of the CapacityProviderUpdateStatus enum +func CapacityProviderUpdateStatus_Values() []string { + return []string{ + CapacityProviderUpdateStatusDeleteInProgress, + CapacityProviderUpdateStatusDeleteComplete, + CapacityProviderUpdateStatusDeleteFailed, + } +} + const ( // ClusterFieldAttachments is a ClusterField enum value ClusterFieldAttachments = "ATTACHMENTS" @@ -19733,11 +20310,28 @@ const ( ClusterFieldTags = "TAGS" ) +// ClusterField_Values returns all elements of the ClusterField enum +func ClusterField_Values() []string { + return []string{ + ClusterFieldAttachments, + ClusterFieldSettings, + ClusterFieldStatistics, + ClusterFieldTags, + } +} + const ( // ClusterSettingNameContainerInsights is a ClusterSettingName enum value ClusterSettingNameContainerInsights = "containerInsights" ) +// ClusterSettingName_Values returns all elements of the ClusterSettingName enum +func ClusterSettingName_Values() []string { + return []string{ + ClusterSettingNameContainerInsights, + } +} + const ( // CompatibilityEc2 is a Compatibility enum value CompatibilityEc2 = "EC2" @@ -19746,6 +20340,14 @@ const ( CompatibilityFargate = "FARGATE" ) +// Compatibility_Values returns all elements of the Compatibility enum +func Compatibility_Values() []string { + return []string{ + CompatibilityEc2, + CompatibilityFargate, + } +} + const ( // ConnectivityConnected is a Connectivity enum value ConnectivityConnected = "CONNECTED" @@ -19754,6 +20356,14 @@ const ( ConnectivityDisconnected = "DISCONNECTED" ) +// Connectivity_Values returns all elements of the Connectivity enum +func Connectivity_Values() []string { + return []string{ + ConnectivityConnected, + ConnectivityDisconnected, + } +} + const ( // ContainerConditionStart is a ContainerCondition enum value ContainerConditionStart = "START" @@ -19768,11 +20378,28 @@ const ( ContainerConditionHealthy = "HEALTHY" ) +// ContainerCondition_Values returns all elements of the ContainerCondition enum +func ContainerCondition_Values() []string { + return []string{ + ContainerConditionStart, + ContainerConditionComplete, + ContainerConditionSuccess, + ContainerConditionHealthy, + } +} + const ( // ContainerInstanceFieldTags is a ContainerInstanceField enum value ContainerInstanceFieldTags = "TAGS" ) +// ContainerInstanceField_Values returns all elements of the ContainerInstanceField enum +func ContainerInstanceField_Values() []string { + return []string{ + ContainerInstanceFieldTags, + } +} + const ( // ContainerInstanceStatusActive is a ContainerInstanceStatus enum value ContainerInstanceStatusActive = "ACTIVE" @@ -19790,6 +20417,17 @@ const ( ContainerInstanceStatusRegistrationFailed = "REGISTRATION_FAILED" ) +// ContainerInstanceStatus_Values returns all elements of the ContainerInstanceStatus enum +func ContainerInstanceStatus_Values() []string { + return []string{ + ContainerInstanceStatusActive, + ContainerInstanceStatusDraining, + ContainerInstanceStatusRegistering, + ContainerInstanceStatusDeregistering, + ContainerInstanceStatusRegistrationFailed, + } +} + const ( // DeploymentControllerTypeEcs is a DeploymentControllerType enum value DeploymentControllerTypeEcs = "ECS" @@ -19801,6 +20439,15 @@ const ( DeploymentControllerTypeExternal = "EXTERNAL" ) +// DeploymentControllerType_Values returns all elements of the DeploymentControllerType enum +func DeploymentControllerType_Values() []string { + return []string{ + DeploymentControllerTypeEcs, + DeploymentControllerTypeCodeDeploy, + DeploymentControllerTypeExternal, + } +} + const ( // DesiredStatusRunning is a DesiredStatus enum value DesiredStatusRunning = "RUNNING" @@ -19812,6 +20459,15 @@ const ( DesiredStatusStopped = "STOPPED" ) +// DesiredStatus_Values returns all elements of the DesiredStatus enum +func DesiredStatus_Values() []string { + return []string{ + DesiredStatusRunning, + DesiredStatusPending, + DesiredStatusStopped, + } +} + const ( // DeviceCgroupPermissionRead is a DeviceCgroupPermission enum value DeviceCgroupPermissionRead = "read" @@ -19823,6 +20479,59 @@ const ( DeviceCgroupPermissionMknod = "mknod" ) +// DeviceCgroupPermission_Values returns all elements of the DeviceCgroupPermission enum +func DeviceCgroupPermission_Values() []string { + return []string{ + DeviceCgroupPermissionRead, + DeviceCgroupPermissionWrite, + DeviceCgroupPermissionMknod, + } +} + +const ( + // EFSAuthorizationConfigIAMEnabled is a EFSAuthorizationConfigIAM enum value + EFSAuthorizationConfigIAMEnabled = "ENABLED" + + // EFSAuthorizationConfigIAMDisabled is a EFSAuthorizationConfigIAM enum value + EFSAuthorizationConfigIAMDisabled = "DISABLED" +) + +// EFSAuthorizationConfigIAM_Values returns all elements of the EFSAuthorizationConfigIAM enum +func EFSAuthorizationConfigIAM_Values() []string { + return []string{ + EFSAuthorizationConfigIAMEnabled, + EFSAuthorizationConfigIAMDisabled, + } +} + +const ( + // EFSTransitEncryptionEnabled is a EFSTransitEncryption enum value + EFSTransitEncryptionEnabled = "ENABLED" + + // EFSTransitEncryptionDisabled is a EFSTransitEncryption enum value + EFSTransitEncryptionDisabled = "DISABLED" +) + +// EFSTransitEncryption_Values returns all elements of the EFSTransitEncryption enum +func EFSTransitEncryption_Values() []string { + return []string{ + EFSTransitEncryptionEnabled, + EFSTransitEncryptionDisabled, + } +} + +const ( + // EnvironmentFileTypeS3 is a EnvironmentFileType enum value + EnvironmentFileTypeS3 = "s3" +) + +// EnvironmentFileType_Values returns all elements of the EnvironmentFileType enum +func EnvironmentFileType_Values() []string { + return []string{ + EnvironmentFileTypeS3, + } +} + const ( // FirelensConfigurationTypeFluentd is a FirelensConfigurationType enum value FirelensConfigurationTypeFluentd = "fluentd" @@ -19831,6 +20540,14 @@ const ( FirelensConfigurationTypeFluentbit = "fluentbit" ) +// FirelensConfigurationType_Values returns all elements of the FirelensConfigurationType enum +func FirelensConfigurationType_Values() []string { + return []string{ + FirelensConfigurationTypeFluentd, + FirelensConfigurationTypeFluentbit, + } +} + const ( // HealthStatusHealthy is a HealthStatus enum value HealthStatusHealthy = "HEALTHY" @@ -19842,6 +20559,15 @@ const ( HealthStatusUnknown = "UNKNOWN" ) +// HealthStatus_Values returns all elements of the HealthStatus enum +func HealthStatus_Values() []string { + return []string{ + HealthStatusHealthy, + HealthStatusUnhealthy, + HealthStatusUnknown, + } +} + const ( // IpcModeHost is a IpcMode enum value IpcModeHost = "host" @@ -19853,6 +20579,15 @@ const ( IpcModeNone = "none" ) +// IpcMode_Values returns all elements of the IpcMode enum +func IpcMode_Values() []string { + return []string{ + IpcModeHost, + IpcModeTask, + IpcModeNone, + } +} + const ( // LaunchTypeEc2 is a LaunchType enum value LaunchTypeEc2 = "EC2" @@ -19861,6 +20596,14 @@ const ( LaunchTypeFargate = "FARGATE" ) +// LaunchType_Values returns all elements of the LaunchType enum +func LaunchType_Values() []string { + return []string{ + LaunchTypeEc2, + LaunchTypeFargate, + } +} + const ( // LogDriverJsonFile is a LogDriver enum value LogDriverJsonFile = "json-file" @@ -19887,6 +20630,20 @@ const ( LogDriverAwsfirelens = "awsfirelens" ) +// LogDriver_Values returns all elements of the LogDriver enum +func LogDriver_Values() []string { + return []string{ + LogDriverJsonFile, + LogDriverSyslog, + LogDriverJournald, + LogDriverGelf, + LogDriverFluentd, + LogDriverAwslogs, + LogDriverSplunk, + LogDriverAwsfirelens, + } +} + const ( // ManagedScalingStatusEnabled is a ManagedScalingStatus enum value ManagedScalingStatusEnabled = "ENABLED" @@ -19895,6 +20652,14 @@ const ( ManagedScalingStatusDisabled = "DISABLED" ) +// ManagedScalingStatus_Values returns all elements of the ManagedScalingStatus enum +func ManagedScalingStatus_Values() []string { + return []string{ + ManagedScalingStatusEnabled, + ManagedScalingStatusDisabled, + } +} + const ( // ManagedTerminationProtectionEnabled is a ManagedTerminationProtection enum value ManagedTerminationProtectionEnabled = "ENABLED" @@ -19903,6 +20668,14 @@ const ( ManagedTerminationProtectionDisabled = "DISABLED" ) +// ManagedTerminationProtection_Values returns all elements of the ManagedTerminationProtection enum +func ManagedTerminationProtection_Values() []string { + return []string{ + ManagedTerminationProtectionEnabled, + ManagedTerminationProtectionDisabled, + } +} + const ( // NetworkModeBridge is a NetworkMode enum value NetworkModeBridge = "bridge" @@ -19917,6 +20690,16 @@ const ( NetworkModeNone = "none" ) +// NetworkMode_Values returns all elements of the NetworkMode enum +func NetworkMode_Values() []string { + return []string{ + NetworkModeBridge, + NetworkModeHost, + NetworkModeAwsvpc, + NetworkModeNone, + } +} + const ( // PidModeHost is a PidMode enum value PidModeHost = "host" @@ -19925,6 +20708,14 @@ const ( PidModeTask = "task" ) +// PidMode_Values returns all elements of the PidMode enum +func PidMode_Values() []string { + return []string{ + PidModeHost, + PidModeTask, + } +} + const ( // PlacementConstraintTypeDistinctInstance is a PlacementConstraintType enum value PlacementConstraintTypeDistinctInstance = "distinctInstance" @@ -19933,6 +20724,14 @@ const ( PlacementConstraintTypeMemberOf = "memberOf" ) +// PlacementConstraintType_Values returns all elements of the PlacementConstraintType enum +func PlacementConstraintType_Values() []string { + return []string{ + PlacementConstraintTypeDistinctInstance, + PlacementConstraintTypeMemberOf, + } +} + const ( // PlacementStrategyTypeRandom is a PlacementStrategyType enum value PlacementStrategyTypeRandom = "random" @@ -19944,11 +20743,27 @@ const ( PlacementStrategyTypeBinpack = "binpack" ) +// PlacementStrategyType_Values returns all elements of the PlacementStrategyType enum +func PlacementStrategyType_Values() []string { + return []string{ + PlacementStrategyTypeRandom, + PlacementStrategyTypeSpread, + PlacementStrategyTypeBinpack, + } +} + const ( // PlatformDeviceTypeGpu is a PlatformDeviceType enum value PlatformDeviceTypeGpu = "GPU" ) +// PlatformDeviceType_Values returns all elements of the PlatformDeviceType enum +func PlatformDeviceType_Values() []string { + return []string{ + PlatformDeviceTypeGpu, + } +} + const ( // PropagateTagsTaskDefinition is a PropagateTags enum value PropagateTagsTaskDefinition = "TASK_DEFINITION" @@ -19957,11 +20772,26 @@ const ( PropagateTagsService = "SERVICE" ) +// PropagateTags_Values returns all elements of the PropagateTags enum +func PropagateTags_Values() []string { + return []string{ + PropagateTagsTaskDefinition, + PropagateTagsService, + } +} + const ( // ProxyConfigurationTypeAppmesh is a ProxyConfigurationType enum value ProxyConfigurationTypeAppmesh = "APPMESH" ) +// ProxyConfigurationType_Values returns all elements of the ProxyConfigurationType enum +func ProxyConfigurationType_Values() []string { + return []string{ + ProxyConfigurationTypeAppmesh, + } +} + const ( // ResourceTypeGpu is a ResourceType enum value ResourceTypeGpu = "GPU" @@ -19970,11 +20800,26 @@ const ( ResourceTypeInferenceAccelerator = "InferenceAccelerator" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeGpu, + ResourceTypeInferenceAccelerator, + } +} + const ( // ScaleUnitPercent is a ScaleUnit enum value ScaleUnitPercent = "PERCENT" ) +// ScaleUnit_Values returns all elements of the ScaleUnit enum +func ScaleUnit_Values() []string { + return []string{ + ScaleUnitPercent, + } +} + const ( // SchedulingStrategyReplica is a SchedulingStrategy enum value SchedulingStrategyReplica = "REPLICA" @@ -19983,6 +20828,14 @@ const ( SchedulingStrategyDaemon = "DAEMON" ) +// SchedulingStrategy_Values returns all elements of the SchedulingStrategy enum +func SchedulingStrategy_Values() []string { + return []string{ + SchedulingStrategyReplica, + SchedulingStrategyDaemon, + } +} + const ( // ScopeTask is a Scope enum value ScopeTask = "task" @@ -19991,11 +20844,26 @@ const ( ScopeShared = "shared" ) +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeTask, + ScopeShared, + } +} + const ( // ServiceFieldTags is a ServiceField enum value ServiceFieldTags = "TAGS" ) +// ServiceField_Values returns all elements of the ServiceField enum +func ServiceField_Values() []string { + return []string{ + ServiceFieldTags, + } +} + const ( // SettingNameServiceLongArnFormat is a SettingName enum value SettingNameServiceLongArnFormat = "serviceLongArnFormat" @@ -20013,6 +20881,17 @@ const ( SettingNameContainerInsights = "containerInsights" ) +// SettingName_Values returns all elements of the SettingName enum +func SettingName_Values() []string { + return []string{ + SettingNameServiceLongArnFormat, + SettingNameTaskLongArnFormat, + SettingNameContainerInstanceLongArnFormat, + SettingNameAwsvpcTrunking, + SettingNameContainerInsights, + } +} + const ( // SortOrderAsc is a SortOrder enum value SortOrderAsc = "ASC" @@ -20021,6 +20900,14 @@ const ( SortOrderDesc = "DESC" ) +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAsc, + SortOrderDesc, + } +} + const ( // StabilityStatusSteadyState is a StabilityStatus enum value StabilityStatusSteadyState = "STEADY_STATE" @@ -20029,11 +20916,26 @@ const ( StabilityStatusStabilizing = "STABILIZING" ) +// StabilityStatus_Values returns all elements of the StabilityStatus enum +func StabilityStatus_Values() []string { + return []string{ + StabilityStatusSteadyState, + StabilityStatusStabilizing, + } +} + const ( // TargetTypeContainerInstance is a TargetType enum value TargetTypeContainerInstance = "container-instance" ) +// TargetType_Values returns all elements of the TargetType enum +func TargetType_Values() []string { + return []string{ + TargetTypeContainerInstance, + } +} + const ( // TaskDefinitionFamilyStatusActive is a TaskDefinitionFamilyStatus enum value TaskDefinitionFamilyStatusActive = "ACTIVE" @@ -20045,16 +20947,39 @@ const ( TaskDefinitionFamilyStatusAll = "ALL" ) +// TaskDefinitionFamilyStatus_Values returns all elements of the TaskDefinitionFamilyStatus enum +func TaskDefinitionFamilyStatus_Values() []string { + return []string{ + TaskDefinitionFamilyStatusActive, + TaskDefinitionFamilyStatusInactive, + TaskDefinitionFamilyStatusAll, + } +} + const ( // TaskDefinitionFieldTags is a TaskDefinitionField enum value TaskDefinitionFieldTags = "TAGS" ) +// TaskDefinitionField_Values returns all elements of the TaskDefinitionField enum +func TaskDefinitionField_Values() []string { + return []string{ + TaskDefinitionFieldTags, + } +} + const ( // TaskDefinitionPlacementConstraintTypeMemberOf is a TaskDefinitionPlacementConstraintType enum value TaskDefinitionPlacementConstraintTypeMemberOf = "memberOf" ) +// TaskDefinitionPlacementConstraintType_Values returns all elements of the TaskDefinitionPlacementConstraintType enum +func TaskDefinitionPlacementConstraintType_Values() []string { + return []string{ + TaskDefinitionPlacementConstraintTypeMemberOf, + } +} + const ( // TaskDefinitionStatusActive is a TaskDefinitionStatus enum value TaskDefinitionStatusActive = "ACTIVE" @@ -20063,16 +20988,38 @@ const ( TaskDefinitionStatusInactive = "INACTIVE" ) +// TaskDefinitionStatus_Values returns all elements of the TaskDefinitionStatus enum +func TaskDefinitionStatus_Values() []string { + return []string{ + TaskDefinitionStatusActive, + TaskDefinitionStatusInactive, + } +} + const ( // TaskFieldTags is a TaskField enum value TaskFieldTags = "TAGS" ) +// TaskField_Values returns all elements of the TaskField enum +func TaskField_Values() []string { + return []string{ + TaskFieldTags, + } +} + const ( // TaskSetFieldTags is a TaskSetField enum value TaskSetFieldTags = "TAGS" ) +// TaskSetField_Values returns all elements of the TaskSetField enum +func TaskSetField_Values() []string { + return []string{ + TaskSetFieldTags, + } +} + const ( // TaskStopCodeTaskFailedToStart is a TaskStopCode enum value TaskStopCodeTaskFailedToStart = "TaskFailedToStart" @@ -20084,6 +21031,15 @@ const ( TaskStopCodeUserInitiated = "UserInitiated" ) +// TaskStopCode_Values returns all elements of the TaskStopCode enum +func TaskStopCode_Values() []string { + return []string{ + TaskStopCodeTaskFailedToStart, + TaskStopCodeEssentialContainerExited, + TaskStopCodeUserInitiated, + } +} + const ( // TransportProtocolTcp is a TransportProtocol enum value TransportProtocolTcp = "tcp" @@ -20092,6 +21048,14 @@ const ( TransportProtocolUdp = "udp" ) +// TransportProtocol_Values returns all elements of the TransportProtocol enum +func TransportProtocol_Values() []string { + return []string{ + TransportProtocolTcp, + TransportProtocolUdp, + } +} + const ( // UlimitNameCore is a UlimitName enum value UlimitNameCore = "core" @@ -20138,3 +21102,24 @@ const ( // UlimitNameStack is a UlimitName enum value UlimitNameStack = "stack" ) + +// UlimitName_Values returns all elements of the UlimitName enum +func UlimitName_Values() []string { + return []string{ + UlimitNameCore, + UlimitNameCpu, + UlimitNameData, + UlimitNameFsize, + UlimitNameLocks, + UlimitNameMemlock, + UlimitNameMsgqueue, + UlimitNameNice, + UlimitNameNofile, + UlimitNameNproc, + UlimitNameRss, + UlimitNameRtprio, + UlimitNameRttime, + UlimitNameSigpending, + UlimitNameStack, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go index 323754cd9..af14f7607 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/api.go b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go index e541b5f3a..32d25cdd2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go @@ -85,6 +85,9 @@ func (c *EFS) CreateAccessPointRequest(input *CreateAccessPointInput) (req *requ // Returned if the access point you are trying to create already exists, with // the creation token you provided in the request. // +// * IncorrectFileSystemLifeCycleState +// Returned if the file system's lifecycle state is not "available". +// // * InternalServerError // Returned if an error occurred on the server side. // @@ -1248,6 +1251,101 @@ func (c *EFS) DescribeAccessPointsPagesWithContext(ctx aws.Context, input *Descr return p.Err() } +const opDescribeBackupPolicy = "DescribeBackupPolicy" + +// DescribeBackupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBackupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBackupPolicy for more information on using the DescribeBackupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBackupPolicyRequest method. +// req, resp := client.DescribeBackupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/DescribeBackupPolicy +func (c *EFS) DescribeBackupPolicyRequest(input *DescribeBackupPolicyInput) (req *request.Request, output *DescribeBackupPolicyOutput) { + op := &request.Operation{ + Name: opDescribeBackupPolicy, + HTTPMethod: "GET", + HTTPPath: "/2015-02-01/file-systems/{FileSystemId}/backup-policy", + } + + if input == nil { + input = &DescribeBackupPolicyInput{} + } + + output = &DescribeBackupPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeBackupPolicy API operation for Amazon Elastic File System. +// +// Returns the backup policy for the specified EFS file system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic File System's +// API operation DescribeBackupPolicy for usage and error information. +// +// Returned Error Types: +// * BadRequest +// Returned if the request is malformed or contains an error such as an invalid +// parameter value or a missing required parameter. +// +// * FileSystemNotFound +// Returned if the specified FileSystemId value doesn't exist in the requester's +// AWS account. +// +// * InternalServerError +// Returned if an error occurred on the server side. +// +// * PolicyNotFound +// Returned if the default file system policy is in effect for the EFS file +// system specified. +// +// * ValidationException +// Returned if the AWS Backup service is not available in the region that the +// request was made. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/DescribeBackupPolicy +func (c *EFS) DescribeBackupPolicy(input *DescribeBackupPolicyInput) (*DescribeBackupPolicyOutput, error) { + req, out := c.DescribeBackupPolicyRequest(input) + return out, req.Send() +} + +// DescribeBackupPolicyWithContext is the same as DescribeBackupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBackupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EFS) DescribeBackupPolicyWithContext(ctx aws.Context, input *DescribeBackupPolicyInput, opts ...request.Option) (*DescribeBackupPolicyOutput, error) { + req, out := c.DescribeBackupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeFileSystemPolicy = "DescribeFileSystemPolicy" // DescribeFileSystemPolicyRequest generates a "aws/request.Request" representing the @@ -2231,6 +2329,101 @@ func (c *EFS) ModifyMountTargetSecurityGroupsWithContext(ctx aws.Context, input return out, req.Send() } +const opPutBackupPolicy = "PutBackupPolicy" + +// PutBackupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBackupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBackupPolicy for more information on using the PutBackupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBackupPolicyRequest method. +// req, resp := client.PutBackupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/PutBackupPolicy +func (c *EFS) PutBackupPolicyRequest(input *PutBackupPolicyInput) (req *request.Request, output *PutBackupPolicyOutput) { + op := &request.Operation{ + Name: opPutBackupPolicy, + HTTPMethod: "PUT", + HTTPPath: "/2015-02-01/file-systems/{FileSystemId}/backup-policy", + } + + if input == nil { + input = &PutBackupPolicyInput{} + } + + output = &PutBackupPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutBackupPolicy API operation for Amazon Elastic File System. +// +// Updates the file system's backup policy. Use this action to start or stop +// automatic backups of the file system. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic File System's +// API operation PutBackupPolicy for usage and error information. +// +// Returned Error Types: +// * BadRequest +// Returned if the request is malformed or contains an error such as an invalid +// parameter value or a missing required parameter. +// +// * FileSystemNotFound +// Returned if the specified FileSystemId value doesn't exist in the requester's +// AWS account. +// +// * IncorrectFileSystemLifeCycleState +// Returned if the file system's lifecycle state is not "available". +// +// * InternalServerError +// Returned if an error occurred on the server side. +// +// * ValidationException +// Returned if the AWS Backup service is not available in the region that the +// request was made. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticfilesystem-2015-02-01/PutBackupPolicy +func (c *EFS) PutBackupPolicy(input *PutBackupPolicyInput) (*PutBackupPolicyOutput, error) { + req, out := c.PutBackupPolicyRequest(input) + return out, req.Send() +} + +// PutBackupPolicyWithContext is the same as PutBackupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBackupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EFS) PutBackupPolicyWithContext(ctx aws.Context, input *PutBackupPolicyInput, opts ...request.Option) (*PutBackupPolicyOutput, error) { + req, out := c.PutBackupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutFileSystemPolicy = "PutFileSystemPolicy" // PutFileSystemPolicyRequest generates a "aws/request.Request" representing the @@ -2749,8 +2942,8 @@ func (c *EFS) UpdateFileSystemWithContext(ctx aws.Context, input *UpdateFileSyst // Returned if the access point you are trying to create already exists, with // the creation token you provided in the request. type AccessPointAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // AccessPointId is a required field AccessPointId *string `type:"string" required:"true"` @@ -2773,17 +2966,17 @@ func (s AccessPointAlreadyExists) GoString() string { func newErrorAccessPointAlreadyExists(v protocol.ResponseMetadata) error { return &AccessPointAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessPointAlreadyExists) Code() string { +func (s *AccessPointAlreadyExists) Code() string { return "AccessPointAlreadyExists" } // Message returns the exception's message. -func (s AccessPointAlreadyExists) Message() string { +func (s *AccessPointAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2791,22 +2984,22 @@ func (s AccessPointAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessPointAlreadyExists) OrigErr() error { +func (s *AccessPointAlreadyExists) OrigErr() error { return nil } -func (s AccessPointAlreadyExists) Error() string { +func (s *AccessPointAlreadyExists) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessPointAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessPointAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessPointAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessPointAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // Provides a description of an EFS file system access point. @@ -2920,8 +3113,8 @@ func (s *AccessPointDescription) SetTags(v []*Tag) *AccessPointDescription { // Returned if the AWS account has already created the maximum number of access // points allowed per file system. type AccessPointLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -2941,17 +3134,17 @@ func (s AccessPointLimitExceeded) GoString() string { func newErrorAccessPointLimitExceeded(v protocol.ResponseMetadata) error { return &AccessPointLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessPointLimitExceeded) Code() string { +func (s *AccessPointLimitExceeded) Code() string { return "AccessPointLimitExceeded" } // Message returns the exception's message. -func (s AccessPointLimitExceeded) Message() string { +func (s *AccessPointLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2959,29 +3152,29 @@ func (s AccessPointLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessPointLimitExceeded) OrigErr() error { +func (s *AccessPointLimitExceeded) OrigErr() error { return nil } -func (s AccessPointLimitExceeded) Error() string { +func (s *AccessPointLimitExceeded) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessPointLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessPointLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessPointLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessPointLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the specified AccessPointId value doesn't exist in the requester's // AWS account. type AccessPointNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -3001,17 +3194,17 @@ func (s AccessPointNotFound) GoString() string { func newErrorAccessPointNotFound(v protocol.ResponseMetadata) error { return &AccessPointNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessPointNotFound) Code() string { +func (s *AccessPointNotFound) Code() string { return "AccessPointNotFound" } // Message returns the exception's message. -func (s AccessPointNotFound) Message() string { +func (s *AccessPointNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3019,29 +3212,77 @@ func (s AccessPointNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessPointNotFound) OrigErr() error { +func (s *AccessPointNotFound) OrigErr() error { return nil } -func (s AccessPointNotFound) Error() string { +func (s *AccessPointNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessPointNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessPointNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessPointNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessPointNotFound) RequestID() string { + return s.RespMetadata.RequestID +} + +// The backup policy for the file system, showing the curent status. If ENABLED, +// the file system is being backed up. +type BackupPolicy struct { + _ struct{} `type:"structure"` + + // Describes the status of the file system's backup policy. + // + // * ENABLED - EFS is automatically backing up the file system. + // + // * ENABLING - EFS is turning on automatic backups for the file system. + // + // * DISABLED - automatic back ups are turned off for the file system. + // + // * DISABLED - EFS is turning off automatic backups for the file system. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"Status"` +} + +// String returns the string representation +func (s BackupPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BackupPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BackupPolicy"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *BackupPolicy) SetStatus(v string) *BackupPolicy { + s.Status = &v + return s } // Returned if the request is malformed or contains an error such as an invalid // parameter value or a missing required parameter. type BadRequest struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -3061,17 +3302,17 @@ func (s BadRequest) GoString() string { func newErrorBadRequest(v protocol.ResponseMetadata) error { return &BadRequest{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequest) Code() string { +func (s *BadRequest) Code() string { return "BadRequest" } // Message returns the exception's message. -func (s BadRequest) Message() string { +func (s *BadRequest) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3079,22 +3320,22 @@ func (s BadRequest) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequest) OrigErr() error { +func (s *BadRequest) OrigErr() error { return nil } -func (s BadRequest) Error() string { +func (s *BadRequest) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequest) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequest) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequest) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequest) RequestID() string { + return s.RespMetadata.RequestID } type CreateAccessPointInput struct { @@ -3343,7 +3584,7 @@ type CreateFileSystemInput struct { // // EFS accepts only symmetric CMKs. You cannot use asymmetric CMKs with EFS // file systems. - KmsKeyId *string `min:"1" type:"string"` + KmsKeyId *string `type:"string"` // The performance mode of the file system. We recommend generalPurpose performance // mode for most file systems. File systems using the maxIO performance mode @@ -3392,9 +3633,6 @@ func (s *CreateFileSystemInput) Validate() error { if s.CreationToken != nil && len(*s.CreationToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("CreationToken", 1)) } - if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) - } if s.ProvisionedThroughputInMibps != nil && *s.ProvisionedThroughputInMibps < 1 { invalidParams.Add(request.NewErrParamMinValue("ProvisionedThroughputInMibps", 1)) } @@ -3466,7 +3704,7 @@ type CreateMountTargetInput struct { FileSystemId *string `type:"string" required:"true"` // Valid IPv4 address within the address range of the specified subnet. - IpAddress *string `type:"string"` + IpAddress *string `min:"7" type:"string"` // Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be // for the same VPC as subnet specified. @@ -3475,7 +3713,7 @@ type CreateMountTargetInput struct { // The ID of the subnet to add the mount target in. // // SubnetId is a required field - SubnetId *string `type:"string" required:"true"` + SubnetId *string `min:"15" type:"string" required:"true"` } // String returns the string representation @@ -3494,9 +3732,15 @@ func (s *CreateMountTargetInput) Validate() error { if s.FileSystemId == nil { invalidParams.Add(request.NewErrParamRequired("FileSystemId")) } + if s.IpAddress != nil && len(*s.IpAddress) < 7 { + invalidParams.Add(request.NewErrParamMinLen("IpAddress", 7)) + } if s.SubnetId == nil { invalidParams.Add(request.NewErrParamRequired("SubnetId")) } + if s.SubnetId != nil && len(*s.SubnetId) < 15 { + invalidParams.Add(request.NewErrParamMinLen("SubnetId", 15)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3856,7 +4100,7 @@ type DeleteMountTargetInput struct { // The ID of the mount target to delete (String). // // MountTargetId is a required field - MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` + MountTargetId *string `location:"uri" locationName:"MountTargetId" min:"13" type:"string" required:"true"` } // String returns the string representation @@ -3875,8 +4119,8 @@ func (s *DeleteMountTargetInput) Validate() error { if s.MountTargetId == nil { invalidParams.Add(request.NewErrParamRequired("MountTargetId")) } - if s.MountTargetId != nil && len(*s.MountTargetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 1)) + if s.MountTargetId != nil && len(*s.MountTargetId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 13)) } if invalidParams.Len() > 0 { @@ -3980,8 +4224,8 @@ func (s DeleteTagsOutput) GoString() string { // The service timed out trying to fulfill the request, and the client should // try the call again. type DependencyTimeout struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -4001,17 +4245,17 @@ func (s DependencyTimeout) GoString() string { func newErrorDependencyTimeout(v protocol.ResponseMetadata) error { return &DependencyTimeout{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DependencyTimeout) Code() string { +func (s *DependencyTimeout) Code() string { return "DependencyTimeout" } // Message returns the exception's message. -func (s DependencyTimeout) Message() string { +func (s *DependencyTimeout) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4019,22 +4263,22 @@ func (s DependencyTimeout) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DependencyTimeout) OrigErr() error { +func (s *DependencyTimeout) OrigErr() error { return nil } -func (s DependencyTimeout) Error() string { +func (s *DependencyTimeout) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DependencyTimeout) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DependencyTimeout) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DependencyTimeout) RequestID() string { - return s.respMetadata.RequestID +func (s *DependencyTimeout) RequestID() string { + return s.RespMetadata.RequestID } type DescribeAccessPointsInput struct { @@ -4139,6 +4383,71 @@ func (s *DescribeAccessPointsOutput) SetNextToken(v string) *DescribeAccessPoint return s } +type DescribeBackupPolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies which EFS file system to retrieve the BackupPolicy for. + // + // FileSystemId is a required field + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBackupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBackupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBackupPolicyInput"} + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *DescribeBackupPolicyInput) SetFileSystemId(v string) *DescribeBackupPolicyInput { + s.FileSystemId = &v + return s +} + +type DescribeBackupPolicyOutput struct { + _ struct{} `type:"structure"` + + // Describes the file system's backup policy, indicating whether automatic backups + // are turned on or off.. + BackupPolicy *BackupPolicy `type:"structure"` +} + +// String returns the string representation +func (s DescribeBackupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupPolicyOutput) GoString() string { + return s.String() +} + +// SetBackupPolicy sets the BackupPolicy field's value. +func (s *DescribeBackupPolicyOutput) SetBackupPolicy(v *BackupPolicy) *DescribeBackupPolicyOutput { + s.BackupPolicy = v + return s +} + type DescribeFileSystemPolicyInput struct { _ struct{} `type:"structure"` @@ -4226,7 +4535,7 @@ type DescribeFileSystemsInput struct { // (Optional) Opaque pagination token returned from a previous DescribeFileSystems // operation (String). If present, specifies to continue the list from where // the returning call had left off. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + Marker *string `location:"querystring" locationName:"Marker" min:"1" type:"string"` // (Optional) Specifies the maximum number of file systems to return in the // response (integer). This number is automatically set to 100. The response @@ -4250,6 +4559,9 @@ func (s *DescribeFileSystemsInput) Validate() error { if s.CreationToken != nil && len(*s.CreationToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("CreationToken", 1)) } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } @@ -4291,11 +4603,11 @@ type DescribeFileSystemsOutput struct { FileSystems []*FileSystemDescription `type:"list"` // Present if provided by caller in the request (String). - Marker *string `type:"string"` + Marker *string `min:"1" type:"string"` // Present if there are more file systems than returned in the response (String). // You can use the NextMarker in the subsequent request to fetch the descriptions. - NextMarker *string `type:"string"` + NextMarker *string `min:"1" type:"string"` } // String returns the string representation @@ -4398,7 +4710,7 @@ type DescribeMountTargetSecurityGroupsInput struct { // The ID of the mount target whose security groups you want to retrieve. // // MountTargetId is a required field - MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` + MountTargetId *string `location:"uri" locationName:"MountTargetId" min:"13" type:"string" required:"true"` } // String returns the string representation @@ -4417,8 +4729,8 @@ func (s *DescribeMountTargetSecurityGroupsInput) Validate() error { if s.MountTargetId == nil { invalidParams.Add(request.NewErrParamRequired("MountTargetId")) } - if s.MountTargetId != nil && len(*s.MountTargetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 1)) + if s.MountTargetId != nil && len(*s.MountTargetId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 13)) } if invalidParams.Len() > 0 { @@ -4475,7 +4787,7 @@ type DescribeMountTargetsInput struct { // (Optional) Opaque pagination token returned from a previous DescribeMountTargets // operation (String). If present, it specifies to continue the list from where // the previous returning call left off. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + Marker *string `location:"querystring" locationName:"Marker" min:"1" type:"string"` // (Optional) Maximum number of mount targets to return in the response. Currently, // this number is automatically set to 10, and other values are ignored. The @@ -4485,7 +4797,7 @@ type DescribeMountTargetsInput struct { // (Optional) ID of the mount target that you want to have described (String). // It must be included in your request if FileSystemId is not included. Accepts // either a mount target ID or ARN as input. - MountTargetId *string `location:"querystring" locationName:"MountTargetId" type:"string"` + MountTargetId *string `location:"querystring" locationName:"MountTargetId" min:"13" type:"string"` } // String returns the string representation @@ -4501,9 +4813,15 @@ func (s DescribeMountTargetsInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeMountTargetsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeMountTargetsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } + if s.MountTargetId != nil && len(*s.MountTargetId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 13)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4546,7 +4864,7 @@ type DescribeMountTargetsOutput struct { // If the request included the Marker, the response returns that value in this // field. - Marker *string `type:"string"` + Marker *string `min:"1" type:"string"` // Returns the file system's mount targets as an array of MountTargetDescription // objects. @@ -4555,7 +4873,7 @@ type DescribeMountTargetsOutput struct { // If a value is present, there are more mount targets to return. In a subsequent // request, you can provide Marker in your request with this value to retrieve // the next set of mount targets. - NextMarker *string `type:"string"` + NextMarker *string `min:"1" type:"string"` } // String returns the string representation @@ -4597,7 +4915,7 @@ type DescribeTagsInput struct { // (Optional) An opaque pagination token returned from a previous DescribeTags // operation (String). If present, it specifies to continue the list from where // the previous call left off. - Marker *string `location:"querystring" locationName:"Marker" type:"string"` + Marker *string `location:"querystring" locationName:"Marker" min:"1" type:"string"` // (Optional) The maximum number of file system tags to return in the response. // Currently, this number is automatically set to 100, and other values are @@ -4625,6 +4943,9 @@ func (s *DescribeTagsInput) Validate() error { if s.FileSystemId != nil && len(*s.FileSystemId) < 1 { invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1)) } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } if s.MaxItems != nil && *s.MaxItems < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) } @@ -4658,12 +4979,12 @@ type DescribeTagsOutput struct { // If the request included a Marker, the response returns that value in this // field. - Marker *string `type:"string"` + Marker *string `min:"1" type:"string"` // If a value is present, there are more tags to return. In a subsequent request, // you can provide the value of NextMarker as the value of the Marker parameter // in your next request to retrieve the next set of tags. - NextMarker *string `type:"string"` + NextMarker *string `min:"1" type:"string"` // Returns tags associated with the file system as an array of Tag objects. // @@ -4702,8 +5023,8 @@ func (s *DescribeTagsOutput) SetTags(v []*Tag) *DescribeTagsOutput { // Returned if the file system you are trying to create already exists, with // the creation token you provided. type FileSystemAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -4726,17 +5047,17 @@ func (s FileSystemAlreadyExists) GoString() string { func newErrorFileSystemAlreadyExists(v protocol.ResponseMetadata) error { return &FileSystemAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileSystemAlreadyExists) Code() string { +func (s *FileSystemAlreadyExists) Code() string { return "FileSystemAlreadyExists" } // Message returns the exception's message. -func (s FileSystemAlreadyExists) Message() string { +func (s *FileSystemAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4744,22 +5065,22 @@ func (s FileSystemAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileSystemAlreadyExists) OrigErr() error { +func (s *FileSystemAlreadyExists) OrigErr() error { return nil } -func (s FileSystemAlreadyExists) Error() string { +func (s *FileSystemAlreadyExists) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s FileSystemAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileSystemAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileSystemAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *FileSystemAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // A description of the file system. @@ -4779,6 +5100,10 @@ type FileSystemDescription struct { // A Boolean value that, if true, indicates that the file system is encrypted. Encrypted *bool `type:"boolean"` + // The Amazon Resource Name (ARN) for the EFS file system, in the format arn:aws:elasticfilesystem:region:account-id:file-system/file-system-id + // . Example with sample data: arn:aws:elasticfilesystem:us-west-2:1111333322228888:file-system/fs-01234567 + FileSystemArn *string `type:"string"` + // The ID of the file system, assigned by Amazon EFS. // // FileSystemId is a required field @@ -4786,7 +5111,7 @@ type FileSystemDescription struct { // The ID of an AWS Key Management Service (AWS KMS) customer master key (CMK) // that was used to protect the encrypted file system. - KmsKeyId *string `min:"1" type:"string"` + KmsKeyId *string `type:"string"` // The lifecycle phase of the file system. // @@ -4878,6 +5203,12 @@ func (s *FileSystemDescription) SetEncrypted(v bool) *FileSystemDescription { return s } +// SetFileSystemArn sets the FileSystemArn field's value. +func (s *FileSystemDescription) SetFileSystemArn(v string) *FileSystemDescription { + s.FileSystemArn = &v + return s +} + // SetFileSystemId sets the FileSystemId field's value. func (s *FileSystemDescription) SetFileSystemId(v string) *FileSystemDescription { s.FileSystemId = &v @@ -4946,8 +5277,8 @@ func (s *FileSystemDescription) SetThroughputMode(v string) *FileSystemDescripti // Returned if a file system has mount targets. type FileSystemInUse struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -4967,17 +5298,17 @@ func (s FileSystemInUse) GoString() string { func newErrorFileSystemInUse(v protocol.ResponseMetadata) error { return &FileSystemInUse{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileSystemInUse) Code() string { +func (s *FileSystemInUse) Code() string { return "FileSystemInUse" } // Message returns the exception's message. -func (s FileSystemInUse) Message() string { +func (s *FileSystemInUse) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4985,29 +5316,29 @@ func (s FileSystemInUse) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileSystemInUse) OrigErr() error { +func (s *FileSystemInUse) OrigErr() error { return nil } -func (s FileSystemInUse) Error() string { +func (s *FileSystemInUse) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s FileSystemInUse) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileSystemInUse) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileSystemInUse) RequestID() string { - return s.respMetadata.RequestID +func (s *FileSystemInUse) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the AWS account has already created the maximum number of file // systems allowed per account. type FileSystemLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5027,17 +5358,17 @@ func (s FileSystemLimitExceeded) GoString() string { func newErrorFileSystemLimitExceeded(v protocol.ResponseMetadata) error { return &FileSystemLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileSystemLimitExceeded) Code() string { +func (s *FileSystemLimitExceeded) Code() string { return "FileSystemLimitExceeded" } // Message returns the exception's message. -func (s FileSystemLimitExceeded) Message() string { +func (s *FileSystemLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5045,29 +5376,29 @@ func (s FileSystemLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileSystemLimitExceeded) OrigErr() error { +func (s *FileSystemLimitExceeded) OrigErr() error { return nil } -func (s FileSystemLimitExceeded) Error() string { +func (s *FileSystemLimitExceeded) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s FileSystemLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileSystemLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileSystemLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *FileSystemLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the specified FileSystemId value doesn't exist in the requester's // AWS account. type FileSystemNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5087,17 +5418,17 @@ func (s FileSystemNotFound) GoString() string { func newErrorFileSystemNotFound(v protocol.ResponseMetadata) error { return &FileSystemNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileSystemNotFound) Code() string { +func (s *FileSystemNotFound) Code() string { return "FileSystemNotFound" } // Message returns the exception's message. -func (s FileSystemNotFound) Message() string { +func (s *FileSystemNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5105,22 +5436,22 @@ func (s FileSystemNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileSystemNotFound) OrigErr() error { +func (s *FileSystemNotFound) OrigErr() error { return nil } -func (s FileSystemNotFound) Error() string { +func (s *FileSystemNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s FileSystemNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileSystemNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileSystemNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *FileSystemNotFound) RequestID() string { + return s.RespMetadata.RequestID } // The latest known metered size (in bytes) of data stored in the file system, @@ -5188,8 +5519,8 @@ func (s *FileSystemSize) SetValueInStandard(v int64) *FileSystemSize { // Returned if the file system's lifecycle state is not "available". type IncorrectFileSystemLifeCycleState struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5209,17 +5540,17 @@ func (s IncorrectFileSystemLifeCycleState) GoString() string { func newErrorIncorrectFileSystemLifeCycleState(v protocol.ResponseMetadata) error { return &IncorrectFileSystemLifeCycleState{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectFileSystemLifeCycleState) Code() string { +func (s *IncorrectFileSystemLifeCycleState) Code() string { return "IncorrectFileSystemLifeCycleState" } // Message returns the exception's message. -func (s IncorrectFileSystemLifeCycleState) Message() string { +func (s *IncorrectFileSystemLifeCycleState) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5227,28 +5558,28 @@ func (s IncorrectFileSystemLifeCycleState) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectFileSystemLifeCycleState) OrigErr() error { +func (s *IncorrectFileSystemLifeCycleState) OrigErr() error { return nil } -func (s IncorrectFileSystemLifeCycleState) Error() string { +func (s *IncorrectFileSystemLifeCycleState) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectFileSystemLifeCycleState) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectFileSystemLifeCycleState) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectFileSystemLifeCycleState) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectFileSystemLifeCycleState) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the mount target is not in the correct state for the operation. type IncorrectMountTargetState struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5268,17 +5599,17 @@ func (s IncorrectMountTargetState) GoString() string { func newErrorIncorrectMountTargetState(v protocol.ResponseMetadata) error { return &IncorrectMountTargetState{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectMountTargetState) Code() string { +func (s *IncorrectMountTargetState) Code() string { return "IncorrectMountTargetState" } // Message returns the exception's message. -func (s IncorrectMountTargetState) Message() string { +func (s *IncorrectMountTargetState) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5286,22 +5617,22 @@ func (s IncorrectMountTargetState) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectMountTargetState) OrigErr() error { +func (s *IncorrectMountTargetState) OrigErr() error { return nil } -func (s IncorrectMountTargetState) Error() string { +func (s *IncorrectMountTargetState) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectMountTargetState) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectMountTargetState) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectMountTargetState) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectMountTargetState) RequestID() string { + return s.RespMetadata.RequestID } // Returned if there's not enough capacity to provision additional throughput. @@ -5310,8 +5641,8 @@ func (s IncorrectMountTargetState) RequestID() string { // of an existing file system, or when you attempt to change an existing file // system from bursting to provisioned throughput mode. type InsufficientThroughputCapacity struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5331,17 +5662,17 @@ func (s InsufficientThroughputCapacity) GoString() string { func newErrorInsufficientThroughputCapacity(v protocol.ResponseMetadata) error { return &InsufficientThroughputCapacity{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientThroughputCapacity) Code() string { +func (s *InsufficientThroughputCapacity) Code() string { return "InsufficientThroughputCapacity" } // Message returns the exception's message. -func (s InsufficientThroughputCapacity) Message() string { +func (s *InsufficientThroughputCapacity) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5349,28 +5680,28 @@ func (s InsufficientThroughputCapacity) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientThroughputCapacity) OrigErr() error { +func (s *InsufficientThroughputCapacity) OrigErr() error { return nil } -func (s InsufficientThroughputCapacity) Error() string { +func (s *InsufficientThroughputCapacity) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientThroughputCapacity) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientThroughputCapacity) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientThroughputCapacity) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientThroughputCapacity) RequestID() string { + return s.RespMetadata.RequestID } // Returned if an error occurred on the server side. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5390,17 +5721,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5408,30 +5739,30 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the FileSystemPolicy is is malformed or contains an error such // as an invalid parameter value or a missing required parameter. Returned in // the case of a policy lockout safety check error. type InvalidPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorCode *string `min:"1" type:"string"` @@ -5450,17 +5781,17 @@ func (s InvalidPolicyException) GoString() string { func newErrorInvalidPolicyException(v protocol.ResponseMetadata) error { return &InvalidPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPolicyException) Code() string { +func (s *InvalidPolicyException) Code() string { return "InvalidPolicyException" } // Message returns the exception's message. -func (s InvalidPolicyException) Message() string { +func (s *InvalidPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5468,29 +5799,29 @@ func (s InvalidPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPolicyException) OrigErr() error { +func (s *InvalidPolicyException) OrigErr() error { return nil } -func (s InvalidPolicyException) Error() string { +func (s *InvalidPolicyException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the request specified an IpAddress that is already in use in // the subnet. type IpAddressInUse struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5510,17 +5841,17 @@ func (s IpAddressInUse) GoString() string { func newErrorIpAddressInUse(v protocol.ResponseMetadata) error { return &IpAddressInUse{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IpAddressInUse) Code() string { +func (s *IpAddressInUse) Code() string { return "IpAddressInUse" } // Message returns the exception's message. -func (s IpAddressInUse) Message() string { +func (s *IpAddressInUse) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5528,22 +5859,22 @@ func (s IpAddressInUse) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IpAddressInUse) OrigErr() error { +func (s *IpAddressInUse) OrigErr() error { return nil } -func (s IpAddressInUse) Error() string { +func (s *IpAddressInUse) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s IpAddressInUse) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IpAddressInUse) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IpAddressInUse) RequestID() string { - return s.respMetadata.RequestID +func (s *IpAddressInUse) RequestID() string { + return s.RespMetadata.RequestID } // Describes a policy used by EFS lifecycle management to transition files to @@ -5677,7 +6008,7 @@ type ModifyMountTargetSecurityGroupsInput struct { // The ID of the mount target whose security groups you want to modify. // // MountTargetId is a required field - MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` + MountTargetId *string `location:"uri" locationName:"MountTargetId" min:"13" type:"string" required:"true"` // An array of up to five VPC security group IDs. SecurityGroups []*string `type:"list"` @@ -5699,8 +6030,8 @@ func (s *ModifyMountTargetSecurityGroupsInput) Validate() error { if s.MountTargetId == nil { invalidParams.Add(request.NewErrParamRequired("MountTargetId")) } - if s.MountTargetId != nil && len(*s.MountTargetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 1)) + if s.MountTargetId != nil && len(*s.MountTargetId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("MountTargetId", 13)) } if invalidParams.Len() > 0 { @@ -5738,8 +6069,8 @@ func (s ModifyMountTargetSecurityGroupsOutput) GoString() string { // Returned if the mount target would violate one of the specified restrictions // based on the file system's existing mount targets. type MountTargetConflict struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5759,17 +6090,17 @@ func (s MountTargetConflict) GoString() string { func newErrorMountTargetConflict(v protocol.ResponseMetadata) error { return &MountTargetConflict{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MountTargetConflict) Code() string { +func (s *MountTargetConflict) Code() string { return "MountTargetConflict" } // Message returns the exception's message. -func (s MountTargetConflict) Message() string { +func (s *MountTargetConflict) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5777,22 +6108,22 @@ func (s MountTargetConflict) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MountTargetConflict) OrigErr() error { +func (s *MountTargetConflict) OrigErr() error { return nil } -func (s MountTargetConflict) Error() string { +func (s *MountTargetConflict) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s MountTargetConflict) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MountTargetConflict) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MountTargetConflict) RequestID() string { - return s.respMetadata.RequestID +func (s *MountTargetConflict) RequestID() string { + return s.RespMetadata.RequestID } // Provides a description of a mount target. @@ -5816,7 +6147,7 @@ type MountTargetDescription struct { FileSystemId *string `type:"string" required:"true"` // Address at which the file system can be mounted by using the mount target. - IpAddress *string `type:"string"` + IpAddress *string `min:"7" type:"string"` // Lifecycle state of the mount target. // @@ -5826,7 +6157,7 @@ type MountTargetDescription struct { // System-assigned mount target ID. // // MountTargetId is a required field - MountTargetId *string `type:"string" required:"true"` + MountTargetId *string `min:"13" type:"string" required:"true"` // The ID of the network interface that Amazon EFS created when it created the // mount target. @@ -5838,7 +6169,10 @@ type MountTargetDescription struct { // The ID of the mount target's subnet. // // SubnetId is a required field - SubnetId *string `type:"string" required:"true"` + SubnetId *string `min:"15" type:"string" required:"true"` + + // The Virtual Private Cloud (VPC) ID that the mount target is configured in. + VpcId *string `type:"string"` } // String returns the string representation @@ -5905,11 +6239,17 @@ func (s *MountTargetDescription) SetSubnetId(v string) *MountTargetDescription { return s } +// SetVpcId sets the VpcId field's value. +func (s *MountTargetDescription) SetVpcId(v string) *MountTargetDescription { + s.VpcId = &v + return s +} + // Returned if there is no mount target with the specified ID found in the caller's // account. type MountTargetNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5929,17 +6269,17 @@ func (s MountTargetNotFound) GoString() string { func newErrorMountTargetNotFound(v protocol.ResponseMetadata) error { return &MountTargetNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MountTargetNotFound) Code() string { +func (s *MountTargetNotFound) Code() string { return "MountTargetNotFound" } // Message returns the exception's message. -func (s MountTargetNotFound) Message() string { +func (s *MountTargetNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5947,22 +6287,22 @@ func (s MountTargetNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MountTargetNotFound) OrigErr() error { +func (s *MountTargetNotFound) OrigErr() error { return nil } -func (s MountTargetNotFound) Error() string { +func (s *MountTargetNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s MountTargetNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MountTargetNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MountTargetNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *MountTargetNotFound) RequestID() string { + return s.RespMetadata.RequestID } // The calling account has reached the limit for elastic network interfaces @@ -5972,8 +6312,8 @@ func (s MountTargetNotFound) RequestID() string { // in the Amazon VPC User Guide (see the Network interfaces per VPC entry in // the table). type NetworkInterfaceLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -5993,17 +6333,17 @@ func (s NetworkInterfaceLimitExceeded) GoString() string { func newErrorNetworkInterfaceLimitExceeded(v protocol.ResponseMetadata) error { return &NetworkInterfaceLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NetworkInterfaceLimitExceeded) Code() string { +func (s *NetworkInterfaceLimitExceeded) Code() string { return "NetworkInterfaceLimitExceeded" } // Message returns the exception's message. -func (s NetworkInterfaceLimitExceeded) Message() string { +func (s *NetworkInterfaceLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6011,29 +6351,29 @@ func (s NetworkInterfaceLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NetworkInterfaceLimitExceeded) OrigErr() error { +func (s *NetworkInterfaceLimitExceeded) OrigErr() error { return nil } -func (s NetworkInterfaceLimitExceeded) Error() string { +func (s *NetworkInterfaceLimitExceeded) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NetworkInterfaceLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NetworkInterfaceLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NetworkInterfaceLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *NetworkInterfaceLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Returned if IpAddress was not specified in the request and there are no free // IP addresses in the subnet. type NoFreeAddressesInSubnet struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6053,17 +6393,17 @@ func (s NoFreeAddressesInSubnet) GoString() string { func newErrorNoFreeAddressesInSubnet(v protocol.ResponseMetadata) error { return &NoFreeAddressesInSubnet{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoFreeAddressesInSubnet) Code() string { +func (s *NoFreeAddressesInSubnet) Code() string { return "NoFreeAddressesInSubnet" } // Message returns the exception's message. -func (s NoFreeAddressesInSubnet) Message() string { +func (s *NoFreeAddressesInSubnet) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6071,29 +6411,29 @@ func (s NoFreeAddressesInSubnet) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoFreeAddressesInSubnet) OrigErr() error { +func (s *NoFreeAddressesInSubnet) OrigErr() error { return nil } -func (s NoFreeAddressesInSubnet) Error() string { +func (s *NoFreeAddressesInSubnet) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NoFreeAddressesInSubnet) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoFreeAddressesInSubnet) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoFreeAddressesInSubnet) RequestID() string { - return s.respMetadata.RequestID +func (s *NoFreeAddressesInSubnet) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the default file system policy is in effect for the EFS file // system specified. type PolicyNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorCode *string `min:"1" type:"string"` @@ -6112,17 +6452,17 @@ func (s PolicyNotFound) GoString() string { func newErrorPolicyNotFound(v protocol.ResponseMetadata) error { return &PolicyNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyNotFound) Code() string { +func (s *PolicyNotFound) Code() string { return "PolicyNotFound" } // Message returns the exception's message. -func (s PolicyNotFound) Message() string { +func (s *PolicyNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6130,22 +6470,22 @@ func (s PolicyNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyNotFound) OrigErr() error { +func (s *PolicyNotFound) OrigErr() error { return nil } -func (s PolicyNotFound) Error() string { +func (s *PolicyNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyNotFound) RequestID() string { + return s.RespMetadata.RequestID } // The full POSIX identity, including the user ID, group ID, and any secondary @@ -6214,6 +6554,90 @@ func (s *PosixUser) SetUid(v int64) *PosixUser { return s } +type PutBackupPolicyInput struct { + _ struct{} `type:"structure"` + + // The backup policy included in the PutBackupPolicy request. + // + // BackupPolicy is a required field + BackupPolicy *BackupPolicy `type:"structure" required:"true"` + + // Specifies which EFS file system to update the backup policy for. + // + // FileSystemId is a required field + FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBackupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBackupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBackupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBackupPolicyInput"} + if s.BackupPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("BackupPolicy")) + } + if s.FileSystemId == nil { + invalidParams.Add(request.NewErrParamRequired("FileSystemId")) + } + if s.FileSystemId != nil && len(*s.FileSystemId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 1)) + } + if s.BackupPolicy != nil { + if err := s.BackupPolicy.Validate(); err != nil { + invalidParams.AddNested("BackupPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupPolicy sets the BackupPolicy field's value. +func (s *PutBackupPolicyInput) SetBackupPolicy(v *BackupPolicy) *PutBackupPolicyInput { + s.BackupPolicy = v + return s +} + +// SetFileSystemId sets the FileSystemId field's value. +func (s *PutBackupPolicyInput) SetFileSystemId(v string) *PutBackupPolicyInput { + s.FileSystemId = &v + return s +} + +type PutBackupPolicyOutput struct { + _ struct{} `type:"structure"` + + // Describes the file system's backup policy, indicating whether automatic backups + // are turned on or off.. + BackupPolicy *BackupPolicy `type:"structure"` +} + +// String returns the string representation +func (s PutBackupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBackupPolicyOutput) GoString() string { + return s.String() +} + +// SetBackupPolicy sets the BackupPolicy field's value. +func (s *PutBackupPolicyOutput) SetBackupPolicy(v *BackupPolicy) *PutBackupPolicyOutput { + s.BackupPolicy = v + return s +} + type PutFileSystemPolicyInput struct { _ struct{} `type:"structure"` @@ -6471,8 +6895,8 @@ func (s *RootDirectory) SetPath(v string) *RootDirectory { // Returned if the size of SecurityGroups specified in the request is greater // than five. type SecurityGroupLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6492,17 +6916,17 @@ func (s SecurityGroupLimitExceeded) GoString() string { func newErrorSecurityGroupLimitExceeded(v protocol.ResponseMetadata) error { return &SecurityGroupLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SecurityGroupLimitExceeded) Code() string { +func (s *SecurityGroupLimitExceeded) Code() string { return "SecurityGroupLimitExceeded" } // Message returns the exception's message. -func (s SecurityGroupLimitExceeded) Message() string { +func (s *SecurityGroupLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6510,29 +6934,29 @@ func (s SecurityGroupLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SecurityGroupLimitExceeded) OrigErr() error { +func (s *SecurityGroupLimitExceeded) OrigErr() error { return nil } -func (s SecurityGroupLimitExceeded) Error() string { +func (s *SecurityGroupLimitExceeded) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s SecurityGroupLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SecurityGroupLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SecurityGroupLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *SecurityGroupLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Returned if one of the specified security groups doesn't exist in the subnet's // VPC. type SecurityGroupNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6552,17 +6976,17 @@ func (s SecurityGroupNotFound) GoString() string { func newErrorSecurityGroupNotFound(v protocol.ResponseMetadata) error { return &SecurityGroupNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SecurityGroupNotFound) Code() string { +func (s *SecurityGroupNotFound) Code() string { return "SecurityGroupNotFound" } // Message returns the exception's message. -func (s SecurityGroupNotFound) Message() string { +func (s *SecurityGroupNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6570,28 +6994,28 @@ func (s SecurityGroupNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SecurityGroupNotFound) OrigErr() error { +func (s *SecurityGroupNotFound) OrigErr() error { return nil } -func (s SecurityGroupNotFound) Error() string { +func (s *SecurityGroupNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s SecurityGroupNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SecurityGroupNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SecurityGroupNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *SecurityGroupNotFound) RequestID() string { + return s.RespMetadata.RequestID } // Returned if there is no subnet with ID SubnetId provided in the request. type SubnetNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6611,17 +7035,17 @@ func (s SubnetNotFound) GoString() string { func newErrorSubnetNotFound(v protocol.ResponseMetadata) error { return &SubnetNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetNotFound) Code() string { +func (s *SubnetNotFound) Code() string { return "SubnetNotFound" } // Message returns the exception's message. -func (s SubnetNotFound) Message() string { +func (s *SubnetNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6629,22 +7053,22 @@ func (s SubnetNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetNotFound) OrigErr() error { +func (s *SubnetNotFound) OrigErr() error { return nil } -func (s SubnetNotFound) Error() string { +func (s *SubnetNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetNotFound) RequestID() string { + return s.RespMetadata.RequestID } // A tag is a key-value pair. Allowed characters are letters, white space, and @@ -6785,8 +7209,8 @@ func (s TagResourceOutput) GoString() string { // Returned if the throughput mode or amount of provisioned throughput can't // be changed because the throughput limit of 1024 MiB/s has been reached. type ThroughputLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6806,17 +7230,17 @@ func (s ThroughputLimitExceeded) GoString() string { func newErrorThroughputLimitExceeded(v protocol.ResponseMetadata) error { return &ThroughputLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThroughputLimitExceeded) Code() string { +func (s *ThroughputLimitExceeded) Code() string { return "ThroughputLimitExceeded" } // Message returns the exception's message. -func (s ThroughputLimitExceeded) Message() string { +func (s *ThroughputLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6824,29 +7248,29 @@ func (s ThroughputLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThroughputLimitExceeded) OrigErr() error { +func (s *ThroughputLimitExceeded) OrigErr() error { return nil } -func (s ThroughputLimitExceeded) Error() string { +func (s *ThroughputLimitExceeded) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ThroughputLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThroughputLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThroughputLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ThroughputLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Returned if you don’t wait at least 24 hours before changing the throughput // mode, or decreasing the Provisioned Throughput value. type TooManyRequests struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6866,17 +7290,17 @@ func (s TooManyRequests) GoString() string { func newErrorTooManyRequests(v protocol.ResponseMetadata) error { return &TooManyRequests{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequests) Code() string { +func (s *TooManyRequests) Code() string { return "TooManyRequests" } // Message returns the exception's message. -func (s TooManyRequests) Message() string { +func (s *TooManyRequests) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6884,27 +7308,27 @@ func (s TooManyRequests) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequests) OrigErr() error { +func (s *TooManyRequests) OrigErr() error { return nil } -func (s TooManyRequests) Error() string { +func (s *TooManyRequests) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequests) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequests) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequests) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequests) RequestID() string { + return s.RespMetadata.RequestID } type UnsupportedAvailabilityZone struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // ErrorCode is a required field ErrorCode *string `min:"1" type:"string" required:"true"` @@ -6924,17 +7348,17 @@ func (s UnsupportedAvailabilityZone) GoString() string { func newErrorUnsupportedAvailabilityZone(v protocol.ResponseMetadata) error { return &UnsupportedAvailabilityZone{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedAvailabilityZone) Code() string { +func (s *UnsupportedAvailabilityZone) Code() string { return "UnsupportedAvailabilityZone" } // Message returns the exception's message. -func (s UnsupportedAvailabilityZone) Message() string { +func (s *UnsupportedAvailabilityZone) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6942,22 +7366,22 @@ func (s UnsupportedAvailabilityZone) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedAvailabilityZone) OrigErr() error { +func (s *UnsupportedAvailabilityZone) OrigErr() error { return nil } -func (s UnsupportedAvailabilityZone) Error() string { +func (s *UnsupportedAvailabilityZone) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedAvailabilityZone) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedAvailabilityZone) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedAvailabilityZone) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedAvailabilityZone) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -6970,7 +7394,9 @@ type UntagResourceInput struct { // The keys of the key:value tag pairs that you want to remove from the specified // EFS resource. - TagKeys []*string `min:"1" type:"list"` + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` } // String returns the string representation @@ -6992,6 +7418,9 @@ func (s *UntagResourceInput) Validate() error { if s.ResourceId != nil && len(*s.ResourceId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } if s.TagKeys != nil && len(s.TagKeys) < 1 { invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) } @@ -7114,6 +7543,10 @@ type UpdateFileSystemOutput struct { // A Boolean value that, if true, indicates that the file system is encrypted. Encrypted *bool `type:"boolean"` + // The Amazon Resource Name (ARN) for the EFS file system, in the format arn:aws:elasticfilesystem:region:account-id:file-system/file-system-id + // . Example with sample data: arn:aws:elasticfilesystem:us-west-2:1111333322228888:file-system/fs-01234567 + FileSystemArn *string `type:"string"` + // The ID of the file system, assigned by Amazon EFS. // // FileSystemId is a required field @@ -7121,7 +7554,7 @@ type UpdateFileSystemOutput struct { // The ID of an AWS Key Management Service (AWS KMS) customer master key (CMK) // that was used to protect the encrypted file system. - KmsKeyId *string `min:"1" type:"string"` + KmsKeyId *string `type:"string"` // The lifecycle phase of the file system. // @@ -7213,6 +7646,12 @@ func (s *UpdateFileSystemOutput) SetEncrypted(v bool) *UpdateFileSystemOutput { return s } +// SetFileSystemArn sets the FileSystemArn field's value. +func (s *UpdateFileSystemOutput) SetFileSystemArn(v string) *UpdateFileSystemOutput { + s.FileSystemArn = &v + return s +} + // SetFileSystemId sets the FileSystemId field's value. func (s *UpdateFileSystemOutput) SetFileSystemId(v string) *UpdateFileSystemOutput { s.FileSystemId = &v @@ -7279,6 +7718,66 @@ func (s *UpdateFileSystemOutput) SetThroughputMode(v string) *UpdateFileSystemOu return s } +// Returned if the AWS Backup service is not available in the region that the +// request was made. +type ValidationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // ErrorCode is a required field + ErrorCode *string `min:"1" type:"string" required:"true"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ValidationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationException) GoString() string { + return s.String() +} + +func newErrorValidationException(v protocol.ResponseMetadata) error { + return &ValidationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ValidationException) Code() string { + return "ValidationException" +} + +// Message returns the exception's message. +func (s *ValidationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ValidationException) OrigErr() error { + return nil +} + +func (s *ValidationException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID +} + const ( // LifeCycleStateCreating is a LifeCycleState enum value LifeCycleStateCreating = "creating" @@ -7296,6 +7795,17 @@ const ( LifeCycleStateDeleted = "deleted" ) +// LifeCycleState_Values returns all elements of the LifeCycleState enum +func LifeCycleState_Values() []string { + return []string{ + LifeCycleStateCreating, + LifeCycleStateAvailable, + LifeCycleStateUpdating, + LifeCycleStateDeleting, + LifeCycleStateDeleted, + } +} + const ( // PerformanceModeGeneralPurpose is a PerformanceMode enum value PerformanceModeGeneralPurpose = "generalPurpose" @@ -7304,6 +7814,38 @@ const ( PerformanceModeMaxIo = "maxIO" ) +// PerformanceMode_Values returns all elements of the PerformanceMode enum +func PerformanceMode_Values() []string { + return []string{ + PerformanceModeGeneralPurpose, + PerformanceModeMaxIo, + } +} + +const ( + // StatusEnabled is a Status enum value + StatusEnabled = "ENABLED" + + // StatusEnabling is a Status enum value + StatusEnabling = "ENABLING" + + // StatusDisabled is a Status enum value + StatusDisabled = "DISABLED" + + // StatusDisabling is a Status enum value + StatusDisabling = "DISABLING" +) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusEnabled, + StatusEnabling, + StatusDisabled, + StatusDisabling, + } +} + const ( // ThroughputModeBursting is a ThroughputMode enum value ThroughputModeBursting = "bursting" @@ -7312,6 +7854,14 @@ const ( ThroughputModeProvisioned = "provisioned" ) +// ThroughputMode_Values returns all elements of the ThroughputMode enum +func ThroughputMode_Values() []string { + return []string{ + ThroughputModeBursting, + ThroughputModeProvisioned, + } +} + const ( // TransitionToIARulesAfter7Days is a TransitionToIARules enum value TransitionToIARulesAfter7Days = "AFTER_7_DAYS" @@ -7328,3 +7878,14 @@ const ( // TransitionToIARulesAfter90Days is a TransitionToIARules enum value TransitionToIARulesAfter90Days = "AFTER_90_DAYS" ) + +// TransitionToIARules_Values returns all elements of the TransitionToIARules enum +func TransitionToIARules_Values() []string { + return []string{ + TransitionToIARulesAfter7Days, + TransitionToIARulesAfter14Days, + TransitionToIARulesAfter30Days, + TransitionToIARulesAfter60Days, + TransitionToIARulesAfter90Days, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go b/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go index bccb1f84f..7e2f36abb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/errors.go @@ -189,6 +189,13 @@ const ( // ErrCodeUnsupportedAvailabilityZone for service response error code // "UnsupportedAvailabilityZone". ErrCodeUnsupportedAvailabilityZone = "UnsupportedAvailabilityZone" + + // ErrCodeValidationException for service response error code + // "ValidationException". + // + // Returned if the AWS Backup service is not available in the region that the + // request was made. + ErrCodeValidationException = "ValidationException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ @@ -218,4 +225,5 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ThroughputLimitExceeded": newErrorThroughputLimitExceeded, "TooManyRequests": newErrorTooManyRequests, "UnsupportedAvailabilityZone": newErrorUnsupportedAvailabilityZone, + "ValidationException": newErrorValidationException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go index f44a7242e..ac105c46e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/eks/api.go b/vendor/github.com/aws/aws-sdk-go/service/eks/api.go index b37ef9e7d..90672748a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/eks/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/eks/api.go @@ -335,7 +335,9 @@ func (c *EKS) CreateNodegroupRequest(input *CreateNodegroupInput) (req *request. // Creates a managed worker node group for an Amazon EKS cluster. You can only // create a node group for your cluster that is equal to the current Kubernetes // version for the cluster. All node groups are created with the latest AMI -// release version for the respective minor Kubernetes version of the cluster. +// release version for the respective minor Kubernetes version of the cluster, +// unless you deploy a custom AMI using a launch template. For more information +// about using launch templates, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html). // // An Amazon EKS managed node group is an Amazon EC2 Auto Scaling group and // associated Amazon EC2 instances that are managed by AWS for an Amazon EKS @@ -452,7 +454,8 @@ func (c *EKS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Requ // in the Amazon EKS User Guide. // // If you have managed node groups or Fargate profiles attached to the cluster, -// you must delete them first. For more information, see DeleteNodegroup andDeleteFargateProfile. +// you must delete them first. For more information, see DeleteNodegroup and +// DeleteFargateProfile. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1443,8 +1446,9 @@ func (c *EKS) ListNodegroupsRequest(input *ListNodegroupsInput) (req *request.Re // ListNodegroups API operation for Amazon Elastic Kubernetes Service. // -// Lists the Amazon EKS node groups associated with the specified cluster in -// your AWS account in the specified Region. +// Lists the Amazon EKS managed node groups associated with the specified cluster +// in your AWS account in the specified Region. Self-managed node groups are +// not listed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2350,11 +2354,18 @@ func (c *EKS) UpdateNodegroupVersionRequest(input *UpdateNodegroupVersionInput) // Updates the Kubernetes version or AMI version of an Amazon EKS managed node // group. // -// You can update to the latest available AMI version of a node group's current -// Kubernetes version by not specifying a Kubernetes version in the request. -// You can update to the latest AMI version of your cluster's current Kubernetes -// version by specifying your cluster's Kubernetes version in the request. For -// more information, see Amazon EKS-Optimized Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) +// You can update a node group using a launch template only if the node group +// was originally deployed with a launch template. If you need to update a custom +// AMI in a node group that was deployed with a launch template, then update +// your custom AMI, specify the new ID in a new version of the launch template, +// and then update the node group to the new version of the launch template. +// +// If you update without a launch template, then you can update to the latest +// available AMI version of a node group's current Kubernetes version by not +// specifying a Kubernetes version in the request. You can update to the latest +// AMI version of your cluster's current Kubernetes version by specifying your +// cluster's Kubernetes version in the request. For more information, see Amazon +// EKS-Optimized Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) // in the Amazon EKS User Guide. // // You cannot roll back a node group to an earlier Kubernetes version or AMI @@ -2449,8 +2460,8 @@ func (s *AutoScalingGroup) SetName(v string) *AutoScalingGroup { // This exception is thrown if the request contains a semantic error. The precise // meaning will depend on the API, and will be documented in the error message. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2467,17 +2478,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2485,22 +2496,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing the certificate-authority-data for your cluster. @@ -2533,8 +2544,8 @@ func (s *Certificate) SetData(v string) *Certificate { // an action or resource on behalf of a user that doesn't have permissions to // use the action or resource or specifying an identifier that is not valid. type ClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -2557,17 +2568,17 @@ func (s ClientException) GoString() string { func newErrorClientException(v protocol.ResponseMetadata) error { return &ClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientException) Code() string { +func (s *ClientException) Code() string { return "ClientException" } // Message returns the exception's message. -func (s ClientException) Message() string { +func (s *ClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2575,22 +2586,22 @@ func (s ClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientException) OrigErr() error { +func (s *ClientException) OrigErr() error { return nil } -func (s ClientException) Error() string { +func (s *ClientException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an Amazon EKS cluster. @@ -2619,6 +2630,9 @@ type Cluster struct { // The identity provider information for the cluster. Identity *Identity `locationName:"identity" type:"structure"` + // Network configuration settings for your cluster. + KubernetesNetworkConfig *KubernetesNetworkConfigResponse `locationName:"kubernetesNetworkConfig" type:"structure"` + // The logging configuration for your cluster. Logging *Logging `locationName:"logging" type:"structure"` @@ -2707,6 +2721,12 @@ func (s *Cluster) SetIdentity(v *Identity) *Cluster { return s } +// SetKubernetesNetworkConfig sets the KubernetesNetworkConfig field's value. +func (s *Cluster) SetKubernetesNetworkConfig(v *KubernetesNetworkConfigResponse) *Cluster { + s.KubernetesNetworkConfig = v + return s +} + // SetLogging sets the Logging field's value. func (s *Cluster) SetLogging(v *Logging) *Cluster { s.Logging = v @@ -2765,6 +2785,9 @@ type CreateClusterInput struct { // The encryption configuration for the cluster. EncryptionConfig []*EncryptionConfig `locationName:"encryptionConfig" type:"list"` + // The Kubernetes network configuration for the cluster. + KubernetesNetworkConfig *KubernetesNetworkConfigRequest `locationName:"kubernetesNetworkConfig" type:"structure"` + // Enable or disable exporting the Kubernetes control plane logs for your cluster // to CloudWatch Logs. By default, cluster control plane logs aren't exported // to CloudWatch Logs. For more information, see Amazon EKS Cluster Control @@ -2793,8 +2816,8 @@ type CreateClusterInput struct { ResourcesVpcConfig *VpcConfigRequest `locationName:"resourcesVpcConfig" type:"structure" required:"true"` // The Amazon Resource Name (ARN) of the IAM role that provides permissions - // for Amazon EKS to make calls to other AWS API operations on your behalf. - // For more information, see Amazon EKS Service IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) + // for the Kubernetes control plane to make calls to AWS API operations on your + // behalf. For more information, see Amazon EKS Service IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/service_IAM_role.html) // in the Amazon EKS User Guide . // // RoleArn is a required field @@ -2856,6 +2879,12 @@ func (s *CreateClusterInput) SetEncryptionConfig(v []*EncryptionConfig) *CreateC return s } +// SetKubernetesNetworkConfig sets the KubernetesNetworkConfig field's value. +func (s *CreateClusterInput) SetKubernetesNetworkConfig(v *KubernetesNetworkConfigRequest) *CreateClusterInput { + s.KubernetesNetworkConfig = v + return s +} + // SetLogging sets the Logging field's value. func (s *CreateClusterInput) SetLogging(v *Logging) *CreateClusterInput { s.Logging = v @@ -3064,9 +3093,13 @@ type CreateNodegroupInput struct { _ struct{} `type:"structure"` // The AMI type for your node group. GPU instance types should use the AL2_x86_64_GPU - // AMI type, which uses the Amazon EKS-optimized Linux AMI with GPU support. - // Non-GPU instances should use the AL2_x86_64 AMI type, which uses the Amazon - // EKS-optimized Linux AMI. + // AMI type. Non-GPU instances should use the AL2_x86_64 AMI type. Arm instances + // should use the AL2_ARM_64 AMI type. All types use the Amazon EKS-optimized + // Amazon Linux 2 AMI. If you specify launchTemplate, and your launch template + // uses a custom AMI, then don't specify amiType, or the node group deployment + // will fail. For more information about using launch templates with Amazon + // EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. AmiType *string `locationName:"amiType" type:"string" enum:"AMITypes"` // Unique, case-sensitive identifier that you provide to ensure the idempotency @@ -3079,26 +3112,44 @@ type CreateNodegroupInput struct { ClusterName *string `location:"uri" locationName:"name" type:"string" required:"true"` // The root device disk size (in GiB) for your node group instances. The default - // disk size is 20 GiB. + // disk size is 20 GiB. If you specify launchTemplate, then don't specify diskSize, + // or the node group deployment will fail. For more information about using + // launch templates with Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. DiskSize *int64 `locationName:"diskSize" type:"integer"` - // The instance type to use for your node group. Currently, you can specify - // a single instance type for a node group. The default value for this parameter - // is t3.medium. If you choose a GPU instance type, be sure to specify the AL2_x86_64_GPU - // with the amiType parameter. + // The instance type to use for your node group. You can specify a single instance + // type for a node group. The default value for instanceTypes is t3.medium. + // If you choose a GPU instance type, be sure to specify AL2_x86_64_GPU with + // the amiType parameter. If you specify launchTemplate, then don't specify + // instanceTypes, or the node group deployment will fail. For more information + // about using launch templates with Amazon EKS, see Launch template support + // (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. InstanceTypes []*string `locationName:"instanceTypes" type:"list"` // The Kubernetes labels to be applied to the nodes in the node group when they // are created. Labels map[string]*string `locationName:"labels" type:"map"` - // The IAM role associated with your node group. The Amazon EKS worker node - // kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive - // permissions for these API calls through an IAM instance profile and associated - // policies. Before you can launch worker nodes and register them into a cluster, - // you must create an IAM role for those worker nodes to use when they are launched. - // For more information, see Amazon EKS Worker Node IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html) - // in the Amazon EKS User Guide . + // An object representing a node group's launch template specification. If specified, + // then do not specify instanceTypes, diskSize, or remoteAccess and make sure + // that the launch template meets the requirements in launchTemplateSpecification. + LaunchTemplate *LaunchTemplateSpecification `locationName:"launchTemplate" type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM role to associate with your node + // group. The Amazon EKS worker node kubelet daemon makes calls to AWS APIs + // on your behalf. Worker nodes receive permissions for these API calls through + // an IAM instance profile and associated policies. Before you can launch worker + // nodes and register them into a cluster, you must create an IAM role for those + // worker nodes to use when they are launched. For more information, see Amazon + // EKS Worker Node IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html) + // in the Amazon EKS User Guide . If you specify launchTemplate, then don't + // specify IamInstanceProfile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html) + // in your launch template, or the node group deployment will fail. For more + // information about using launch templates with Amazon EKS, see Launch template + // support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. // // NodeRole is a required field NodeRole *string `locationName:"nodeRole" type:"string" required:"true"` @@ -3112,10 +3163,18 @@ type CreateNodegroupInput struct { // By default, the latest available AMI version for the node group's current // Kubernetes version is used. For more information, see Amazon EKS-Optimized // Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) + // in the Amazon EKS User Guide. If you specify launchTemplate, and your launch + // template uses a custom AMI, then don't specify releaseVersion, or the node + // group deployment will fail. For more information about using launch templates + // with Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) // in the Amazon EKS User Guide. ReleaseVersion *string `locationName:"releaseVersion" type:"string"` - // The remote access (SSH) configuration to use with your node group. + // The remote access (SSH) configuration to use with your node group. If you + // specify launchTemplate, then don't specify remoteAccess, or the node group + // deployment will fail. For more information about using launch templates with + // Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. RemoteAccess *RemoteAccessConfig `locationName:"remoteAccess" type:"structure"` // The scaling configuration details for the Auto Scaling group that is created @@ -3125,7 +3184,11 @@ type CreateNodegroupInput struct { // The subnets to use for the Auto Scaling group that is created for your node // group. These subnets must have the tag key kubernetes.io/cluster/CLUSTER_NAME // with a value of shared, where CLUSTER_NAME is replaced with the name of your - // cluster. + // cluster. If you specify launchTemplate, then don't specify SubnetId (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html) + // in your launch template, or the node group deployment will fail. For more + // information about using launch templates with Amazon EKS, see Launch template + // support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. // // Subnets is a required field Subnets []*string `locationName:"subnets" type:"list" required:"true"` @@ -3138,6 +3201,11 @@ type CreateNodegroupInput struct { // The Kubernetes version to use for your managed nodes. By default, the Kubernetes // version of the cluster is used, and this is the only accepted specified value. + // If you specify launchTemplate, and your launch template uses a custom AMI, + // then don't specify version, or the node group deployment will fail. For more + // information about using launch templates with Amazon EKS, see Launch template + // support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. Version *string `locationName:"version" type:"string"` } @@ -3220,6 +3288,12 @@ func (s *CreateNodegroupInput) SetLabels(v map[string]*string) *CreateNodegroupI return s } +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *CreateNodegroupInput) SetLaunchTemplate(v *LaunchTemplateSpecification) *CreateNodegroupInput { + s.LaunchTemplate = v + return s +} + // SetNodeRole sets the NodeRole field's value. func (s *CreateNodegroupInput) SetNodeRole(v string) *CreateNodegroupInput { s.NodeRole = &v @@ -4095,8 +4169,8 @@ func (s *Identity) SetOidc(v *OIDC) *Identity { // The specified parameter is invalid. Review the available parameters for the // API request. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -4122,17 +4196,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4140,29 +4214,29 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The request is invalid given the state of the cluster. Check the state of // the cluster and the associated operations. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -4185,17 +4259,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4203,22 +4277,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing an issue with an Amazon EKS resource. @@ -4247,6 +4321,14 @@ type Issue struct { // created. You may be able to revert to the version that Amazon EKS created // to recover. // + // * Ec2SubnetInvalidConfiguration: One or more Amazon EC2 subnets specified + // for a node group do not automatically assign public IP addresses to instances + // launched into it. If you want your instances to be assigned a public IP + // address, then you need to enable the auto-assign public IP address setting + // for the subnet. See Modifying the public IPv4 addressing attribute for + // your subnet (https://docs.aws.amazon.com/vpc/latest/userguide/vpc-ip-addressing.html#subnet-public-ip) + // in the Amazon VPC User Guide. + // // * IamInstanceProfileNotFound: We couldn't find the IAM instance profile // for your managed node group. You may be able to recreate an instance profile // with the same settings to recover. @@ -4313,6 +4395,127 @@ func (s *Issue) SetResourceIds(v []*string) *Issue { return s } +// The Kubernetes network configuration for the cluster. +type KubernetesNetworkConfigRequest struct { + _ struct{} `type:"structure"` + + // The CIDR block to assign Kubernetes service IP addresses from. If you don't + // specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 + // or 172.20.0.0/16 CIDR blocks. We recommend that you specify a block that + // does not overlap with resources in other networks that are peered or connected + // to your VPC. The block must meet the following requirements: + // + // * Within one of the following private IP address blocks: 10.0.0.0/8, 172.16.0.0.0/12, + // or 192.168.0.0/16. + // + // * Doesn't overlap with any CIDR block assigned to the VPC that you selected + // for VPC. + // + // * Between /24 and /12. + // + // You can only specify a custom CIDR block when you create a cluster and can't + // change this value once the cluster is created. + ServiceIpv4Cidr *string `locationName:"serviceIpv4Cidr" type:"string"` +} + +// String returns the string representation +func (s KubernetesNetworkConfigRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KubernetesNetworkConfigRequest) GoString() string { + return s.String() +} + +// SetServiceIpv4Cidr sets the ServiceIpv4Cidr field's value. +func (s *KubernetesNetworkConfigRequest) SetServiceIpv4Cidr(v string) *KubernetesNetworkConfigRequest { + s.ServiceIpv4Cidr = &v + return s +} + +// The Kubernetes network configuration for the cluster. +type KubernetesNetworkConfigResponse struct { + _ struct{} `type:"structure"` + + // The CIDR block that Kubernetes service IP addresses are assigned from. If + // you didn't specify a CIDR block, then Kubernetes assigns addresses from either + // the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this was specified, then + // it was specified when the cluster was created and it cannot be changed. + ServiceIpv4Cidr *string `locationName:"serviceIpv4Cidr" type:"string"` +} + +// String returns the string representation +func (s KubernetesNetworkConfigResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KubernetesNetworkConfigResponse) GoString() string { + return s.String() +} + +// SetServiceIpv4Cidr sets the ServiceIpv4Cidr field's value. +func (s *KubernetesNetworkConfigResponse) SetServiceIpv4Cidr(v string) *KubernetesNetworkConfigResponse { + s.ServiceIpv4Cidr = &v + return s +} + +// An object representing a node group launch template specification. The launch +// template cannot include SubnetId (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateNetworkInterface.html), +// IamInstanceProfile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IamInstanceProfile.html), +// RequestSpotInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RequestSpotInstances.html), +// HibernationOptions (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_HibernationOptionsRequest.html), +// or TerminateInstances (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TerminateInstances.html), +// or the node group deployment or update will fail. For more information about +// launch templates, see CreateLaunchTemplate (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateLaunchTemplate.html) +// in the Amazon EC2 API Reference. For more information about using launch +// templates with Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) +// in the Amazon EKS User Guide. +// +// Specify either name or id, but not both. +type LaunchTemplateSpecification struct { + _ struct{} `type:"structure"` + + // The ID of the launch template. + Id *string `locationName:"id" type:"string"` + + // The name of the launch template. + Name *string `locationName:"name" type:"string"` + + // The version of the launch template to use. If no version is specified, then + // the template's default version is used. + Version *string `locationName:"version" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateSpecification) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *LaunchTemplateSpecification) SetId(v string) *LaunchTemplateSpecification { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *LaunchTemplateSpecification) SetName(v string) *LaunchTemplateSpecification { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecification { + s.Version = &v + return s +} + type ListClustersInput struct { _ struct{} `type:"structure"` @@ -4865,10 +5068,9 @@ func (s *Logging) SetClusterLogging(v []*LogSetup) *Logging { type Nodegroup struct { _ struct{} `type:"structure"` - // The AMI type associated with your node group. GPU instance types should use - // the AL2_x86_64_GPU AMI type, which uses the Amazon EKS-optimized Linux AMI - // with GPU support. Non-GPU instances should use the AL2_x86_64 AMI type, which - // uses the Amazon EKS-optimized Linux AMI. + // If the node group was deployed using a launch template with a custom AMI, + // then this is CUSTOM. For node groups that weren't deployed using a launch + // template, this is the AMI type that was specified in the node group configuration. AmiType *string `locationName:"amiType" type:"string" enum:"AMITypes"` // The name of the cluster that the managed node group resides in. @@ -4877,15 +5079,18 @@ type Nodegroup struct { // The Unix epoch timestamp in seconds for when the managed node group was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - // The root device disk size (in GiB) for your node group instances. The default - // disk size is 20 GiB. + // If the node group wasn't deployed with a launch template, then this is the + // disk size in the node group configuration. If the node group was deployed + // with a launch template, then this is null. DiskSize *int64 `locationName:"diskSize" type:"integer"` // The health status of the node group. If there are issues with your node group's // health, they are listed here. Health *NodegroupHealth `locationName:"health" type:"structure"` - // The instance types associated with your node group. + // If the node group wasn't deployed with a launch template, then this is the + // instance type that is associated with the node group. If the node group was + // deployed with a launch template, then this is null. InstanceTypes []*string `locationName:"instanceTypes" type:"list"` // The Kubernetes labels applied to the nodes in the node group. @@ -4894,6 +5099,10 @@ type Nodegroup struct { // may be other Kubernetes labels applied to the nodes in this group. Labels map[string]*string `locationName:"labels" type:"map"` + // If a launch template was used to create the node group, then this is the + // launch template that was used. + LaunchTemplate *LaunchTemplateSpecification `locationName:"launchTemplate" type:"structure"` + // The Unix epoch timestamp in seconds for when the managed node group was last // modified. ModifiedAt *time.Time `locationName:"modifiedAt" type:"timestamp"` @@ -4901,10 +5110,7 @@ type Nodegroup struct { // The IAM role associated with your node group. The Amazon EKS worker node // kubelet daemon makes calls to AWS APIs on your behalf. Worker nodes receive // permissions for these API calls through an IAM instance profile and associated - // policies. Before you can launch worker nodes and register them into a cluster, - // you must create an IAM role for those worker nodes to use when they are launched. - // For more information, see Amazon EKS Worker Node IAM Role (https://docs.aws.amazon.com/eks/latest/userguide/worker_node_IAM_role.html) - // in the Amazon EKS User Guide . + // policies. NodeRole *string `locationName:"nodeRole" type:"string"` // The Amazon Resource Name (ARN) associated with the managed node group. @@ -4913,12 +5119,15 @@ type Nodegroup struct { // The name associated with an Amazon EKS managed node group. NodegroupName *string `locationName:"nodegroupName" type:"string"` - // The AMI version of the managed node group. For more information, see Amazon - // EKS-Optimized Linux AMI Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) - // in the Amazon EKS User Guide. + // If the node group was deployed using a launch template with a custom AMI, + // then this is the AMI ID that was specified in the launch template. For node + // groups that weren't deployed using a launch template, this is the version + // of the Amazon EKS-optimized AMI that the node group was deployed with. ReleaseVersion *string `locationName:"releaseVersion" type:"string"` - // The remote access (SSH) configuration that is associated with the node group. + // If the node group wasn't deployed with a launch template, then this is the + // remote access configuration that is associated with the node group. If the + // node group was deployed with a launch template, then this is null. RemoteAccess *RemoteAccessConfig `locationName:"remoteAccess" type:"structure"` // The resources associated with the node group, such as Auto Scaling groups @@ -4932,9 +5141,8 @@ type Nodegroup struct { // The current status of the managed node group. Status *string `locationName:"status" type:"string" enum:"NodegroupStatus"` - // The subnets allowed for the Auto Scaling group that is associated with your - // node group. These subnets must have the following tag: kubernetes.io/cluster/CLUSTER_NAME, - // where CLUSTER_NAME is replaced with the name of your cluster. + // The subnets that were specified for the Auto Scaling group that is associated + // with your node group. Subnets []*string `locationName:"subnets" type:"list"` // The metadata applied to the node group to assist with categorization and @@ -4999,6 +5207,12 @@ func (s *Nodegroup) SetLabels(v map[string]*string) *Nodegroup { return s } +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *Nodegroup) SetLaunchTemplate(v *LaunchTemplateSpecification) *Nodegroup { + s.LaunchTemplate = v + return s +} + // SetModifiedAt sets the ModifiedAt field's value. func (s *Nodegroup) SetModifiedAt(v time.Time) *Nodegroup { s.ModifiedAt = &v @@ -5131,7 +5345,8 @@ func (s *NodegroupResources) SetRemoteAccessSecurityGroup(v string) *NodegroupRe } // An object representing the scaling configuration details for the Auto Scaling -// group that is associated with your node group. +// group that is associated with your node group. If you specify a value for +// any property, then you must specify values for all of the properties. type NodegroupScalingConfig struct { _ struct{} `type:"structure"` @@ -5197,8 +5412,8 @@ func (s *NodegroupScalingConfig) SetMinSize(v int64) *NodegroupScalingConfig { // A service resource associated with the request could not be found. Clients // should not retry such requests. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5215,17 +5430,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5233,22 +5448,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An object representing the OpenID Connect (https://openid.net/connect/) identity @@ -5350,8 +5565,8 @@ func (s *RemoteAccessConfig) SetSourceSecurityGroups(v []*string) *RemoteAccessC // The specified resource is in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -5374,17 +5589,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5392,28 +5607,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // You have encountered a service limit on the specified resource. type ResourceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -5436,17 +5651,17 @@ func (s ResourceLimitExceededException) GoString() string { func newErrorResourceLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceededException) Code() string { +func (s *ResourceLimitExceededException) Code() string { return "ResourceLimitExceededException" } // Message returns the exception's message. -func (s ResourceLimitExceededException) Message() string { +func (s *ResourceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5454,30 +5669,30 @@ func (s ResourceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceededException) OrigErr() error { +func (s *ResourceLimitExceededException) OrigErr() error { return nil } -func (s ResourceLimitExceededException) Error() string { +func (s *ResourceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource could not be found. You can view your available clusters // with ListClusters. You can view your available managed node groups with ListNodegroups. // Amazon EKS clusters and node groups are Region-specific. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -5503,17 +5718,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5521,28 +5736,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // These errors are usually caused by a server-side issue. type ServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -5565,17 +5780,17 @@ func (s ServerException) GoString() string { func newErrorServerException(v protocol.ResponseMetadata) error { return &ServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerException) Code() string { +func (s *ServerException) Code() string { return "ServerException" } // Message returns the exception's message. -func (s ServerException) Message() string { +func (s *ServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5583,28 +5798,28 @@ func (s ServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerException) OrigErr() error { +func (s *ServerException) OrigErr() error { return nil } -func (s ServerException) Error() string { +func (s *ServerException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerException) RequestID() string { + return s.RespMetadata.RequestID } // The service is unavailable. Back off and retry the operation. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5621,17 +5836,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5639,22 +5854,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -5735,8 +5950,8 @@ func (s TagResourceOutput) GoString() string { // Availability Zones for your account, from which you can choose subnets for // your cluster. type UnsupportedAvailabilityZoneException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The Amazon EKS cluster associated with the exception. ClusterName *string `locationName:"clusterName" type:"string"` @@ -5763,17 +5978,17 @@ func (s UnsupportedAvailabilityZoneException) GoString() string { func newErrorUnsupportedAvailabilityZoneException(v protocol.ResponseMetadata) error { return &UnsupportedAvailabilityZoneException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedAvailabilityZoneException) Code() string { +func (s *UnsupportedAvailabilityZoneException) Code() string { return "UnsupportedAvailabilityZoneException" } // Message returns the exception's message. -func (s UnsupportedAvailabilityZoneException) Message() string { +func (s *UnsupportedAvailabilityZoneException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5781,22 +5996,22 @@ func (s UnsupportedAvailabilityZoneException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedAvailabilityZoneException) OrigErr() error { +func (s *UnsupportedAvailabilityZoneException) OrigErr() error { return nil } -func (s UnsupportedAvailabilityZoneException) Error() string { +func (s *UnsupportedAvailabilityZoneException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedAvailabilityZoneException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedAvailabilityZoneException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedAvailabilityZoneException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedAvailabilityZoneException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -6297,6 +6512,11 @@ type UpdateNodegroupVersionInput struct { // old node whether or not any pods are running on the node. Force *bool `locationName:"force" type:"boolean"` + // An object representing a node group's launch template specification. You + // can only update a node group using a launch template if the node group was + // originally deployed with a launch template. + LaunchTemplate *LaunchTemplateSpecification `locationName:"launchTemplate" type:"structure"` + // The name of the managed node group to update. // // NodegroupName is a required field @@ -6306,13 +6526,21 @@ type UpdateNodegroupVersionInput struct { // default, the latest available AMI version for the node group's Kubernetes // version is used. For more information, see Amazon EKS-Optimized Linux AMI // Versions (https://docs.aws.amazon.com/eks/latest/userguide/eks-linux-ami-versions.html) + // in the Amazon EKS User Guide. If you specify launchTemplate, and your launch + // template uses a custom AMI, then don't specify releaseVersion, or the node + // group update will fail. For more information about using launch templates + // with Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) // in the Amazon EKS User Guide. ReleaseVersion *string `locationName:"releaseVersion" type:"string"` // The Kubernetes version to update to. If no version is specified, then the // Kubernetes version of the node group does not change. You can specify the // Kubernetes version of the cluster to update the node group to the latest - // AMI version of the cluster's Kubernetes version. + // AMI version of the cluster's Kubernetes version. If you specify launchTemplate, + // and your launch template uses a custom AMI, then don't specify version, or + // the node group update will fail. For more information about using launch + // templates with Amazon EKS, see Launch template support (https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) + // in the Amazon EKS User Guide. Version *string `locationName:"version" type:"string"` } @@ -6366,6 +6594,12 @@ func (s *UpdateNodegroupVersionInput) SetForce(v bool) *UpdateNodegroupVersionIn return s } +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *UpdateNodegroupVersionInput) SetLaunchTemplate(v *LaunchTemplateSpecification) *UpdateNodegroupVersionInput { + s.LaunchTemplate = v + return s +} + // SetNodegroupName sets the NodegroupName field's value. func (s *UpdateNodegroupVersionInput) SetNodegroupName(v string) *UpdateNodegroupVersionInput { s.NodegroupName = &v @@ -6632,8 +6866,20 @@ const ( // AMITypesAl2X8664Gpu is a AMITypes enum value AMITypesAl2X8664Gpu = "AL2_x86_64_GPU" + + // AMITypesAl2Arm64 is a AMITypes enum value + AMITypesAl2Arm64 = "AL2_ARM_64" ) +// AMITypes_Values returns all elements of the AMITypes enum +func AMITypes_Values() []string { + return []string{ + AMITypesAl2X8664, + AMITypesAl2X8664Gpu, + AMITypesAl2Arm64, + } +} + const ( // ClusterStatusCreating is a ClusterStatus enum value ClusterStatusCreating = "CREATING" @@ -6651,6 +6897,17 @@ const ( ClusterStatusUpdating = "UPDATING" ) +// ClusterStatus_Values returns all elements of the ClusterStatus enum +func ClusterStatus_Values() []string { + return []string{ + ClusterStatusCreating, + ClusterStatusActive, + ClusterStatusDeleting, + ClusterStatusFailed, + ClusterStatusUpdating, + } +} + const ( // ErrorCodeSubnetNotFound is a ErrorCode enum value ErrorCodeSubnetNotFound = "SubnetNotFound" @@ -6684,8 +6941,29 @@ const ( // ErrorCodeInsufficientFreeAddresses is a ErrorCode enum value ErrorCodeInsufficientFreeAddresses = "InsufficientFreeAddresses" + + // ErrorCodeClusterUnreachable is a ErrorCode enum value + ErrorCodeClusterUnreachable = "ClusterUnreachable" ) +// ErrorCode_Values returns all elements of the ErrorCode enum +func ErrorCode_Values() []string { + return []string{ + ErrorCodeSubnetNotFound, + ErrorCodeSecurityGroupNotFound, + ErrorCodeEniLimitReached, + ErrorCodeIpNotAvailable, + ErrorCodeAccessDenied, + ErrorCodeOperationNotPermitted, + ErrorCodeVpcIdNotFound, + ErrorCodeUnknown, + ErrorCodeNodeCreationFailure, + ErrorCodePodEvictionFailure, + ErrorCodeInsufficientFreeAddresses, + ErrorCodeClusterUnreachable, + } +} + const ( // FargateProfileStatusCreating is a FargateProfileStatus enum value FargateProfileStatusCreating = "CREATING" @@ -6703,6 +6981,17 @@ const ( FargateProfileStatusDeleteFailed = "DELETE_FAILED" ) +// FargateProfileStatus_Values returns all elements of the FargateProfileStatus enum +func FargateProfileStatus_Values() []string { + return []string{ + FargateProfileStatusCreating, + FargateProfileStatusActive, + FargateProfileStatusDeleting, + FargateProfileStatusCreateFailed, + FargateProfileStatusDeleteFailed, + } +} + const ( // LogTypeApi is a LogType enum value LogTypeApi = "api" @@ -6720,6 +7009,17 @@ const ( LogTypeScheduler = "scheduler" ) +// LogType_Values returns all elements of the LogType enum +func LogType_Values() []string { + return []string{ + LogTypeApi, + LogTypeAudit, + LogTypeAuthenticator, + LogTypeControllerManager, + LogTypeScheduler, + } +} + const ( // NodegroupIssueCodeAutoScalingGroupNotFound is a NodegroupIssueCode enum value NodegroupIssueCodeAutoScalingGroupNotFound = "AutoScalingGroupNotFound" @@ -6742,12 +7042,21 @@ const ( // NodegroupIssueCodeEc2subnetNotFound is a NodegroupIssueCode enum value NodegroupIssueCodeEc2subnetNotFound = "Ec2SubnetNotFound" + // NodegroupIssueCodeEc2subnetInvalidConfiguration is a NodegroupIssueCode enum value + NodegroupIssueCodeEc2subnetInvalidConfiguration = "Ec2SubnetInvalidConfiguration" + // NodegroupIssueCodeIamInstanceProfileNotFound is a NodegroupIssueCode enum value NodegroupIssueCodeIamInstanceProfileNotFound = "IamInstanceProfileNotFound" + // NodegroupIssueCodeIamLimitExceeded is a NodegroupIssueCode enum value + NodegroupIssueCodeIamLimitExceeded = "IamLimitExceeded" + // NodegroupIssueCodeIamNodeRoleNotFound is a NodegroupIssueCode enum value NodegroupIssueCodeIamNodeRoleNotFound = "IamNodeRoleNotFound" + // NodegroupIssueCodeNodeCreationFailure is a NodegroupIssueCode enum value + NodegroupIssueCodeNodeCreationFailure = "NodeCreationFailure" + // NodegroupIssueCodeAsgInstanceLaunchFailures is a NodegroupIssueCode enum value NodegroupIssueCodeAsgInstanceLaunchFailures = "AsgInstanceLaunchFailures" @@ -6762,8 +7071,35 @@ const ( // NodegroupIssueCodeInternalFailure is a NodegroupIssueCode enum value NodegroupIssueCodeInternalFailure = "InternalFailure" + + // NodegroupIssueCodeClusterUnreachable is a NodegroupIssueCode enum value + NodegroupIssueCodeClusterUnreachable = "ClusterUnreachable" ) +// NodegroupIssueCode_Values returns all elements of the NodegroupIssueCode enum +func NodegroupIssueCode_Values() []string { + return []string{ + NodegroupIssueCodeAutoScalingGroupNotFound, + NodegroupIssueCodeAutoScalingGroupInvalidConfiguration, + NodegroupIssueCodeEc2securityGroupNotFound, + NodegroupIssueCodeEc2securityGroupDeletionFailure, + NodegroupIssueCodeEc2launchTemplateNotFound, + NodegroupIssueCodeEc2launchTemplateVersionMismatch, + NodegroupIssueCodeEc2subnetNotFound, + NodegroupIssueCodeEc2subnetInvalidConfiguration, + NodegroupIssueCodeIamInstanceProfileNotFound, + NodegroupIssueCodeIamLimitExceeded, + NodegroupIssueCodeIamNodeRoleNotFound, + NodegroupIssueCodeNodeCreationFailure, + NodegroupIssueCodeAsgInstanceLaunchFailures, + NodegroupIssueCodeInstanceLimitExceeded, + NodegroupIssueCodeInsufficientFreeAddresses, + NodegroupIssueCodeAccessDenied, + NodegroupIssueCodeInternalFailure, + NodegroupIssueCodeClusterUnreachable, + } +} + const ( // NodegroupStatusCreating is a NodegroupStatus enum value NodegroupStatusCreating = "CREATING" @@ -6787,6 +7123,19 @@ const ( NodegroupStatusDegraded = "DEGRADED" ) +// NodegroupStatus_Values returns all elements of the NodegroupStatus enum +func NodegroupStatus_Values() []string { + return []string{ + NodegroupStatusCreating, + NodegroupStatusActive, + NodegroupStatusUpdating, + NodegroupStatusDeleting, + NodegroupStatusCreateFailed, + NodegroupStatusDeleteFailed, + NodegroupStatusDegraded, + } +} + const ( // UpdateParamTypeVersion is a UpdateParamType enum value UpdateParamTypeVersion = "Version" @@ -6825,6 +7174,24 @@ const ( UpdateParamTypePublicAccessCidrs = "PublicAccessCidrs" ) +// UpdateParamType_Values returns all elements of the UpdateParamType enum +func UpdateParamType_Values() []string { + return []string{ + UpdateParamTypeVersion, + UpdateParamTypePlatformVersion, + UpdateParamTypeEndpointPrivateAccess, + UpdateParamTypeEndpointPublicAccess, + UpdateParamTypeClusterLogging, + UpdateParamTypeDesiredSize, + UpdateParamTypeLabelsToAdd, + UpdateParamTypeLabelsToRemove, + UpdateParamTypeMaxSize, + UpdateParamTypeMinSize, + UpdateParamTypeReleaseVersion, + UpdateParamTypePublicAccessCidrs, + } +} + const ( // UpdateStatusInProgress is a UpdateStatus enum value UpdateStatusInProgress = "InProgress" @@ -6839,6 +7206,16 @@ const ( UpdateStatusSuccessful = "Successful" ) +// UpdateStatus_Values returns all elements of the UpdateStatus enum +func UpdateStatus_Values() []string { + return []string{ + UpdateStatusInProgress, + UpdateStatusFailed, + UpdateStatusCancelled, + UpdateStatusSuccessful, + } +} + const ( // UpdateTypeVersionUpdate is a UpdateType enum value UpdateTypeVersionUpdate = "VersionUpdate" @@ -6852,3 +7229,13 @@ const ( // UpdateTypeConfigUpdate is a UpdateType enum value UpdateTypeConfigUpdate = "ConfigUpdate" ) + +// UpdateType_Values returns all elements of the UpdateType enum +func UpdateType_Values() []string { + return []string{ + UpdateTypeVersionUpdate, + UpdateTypeEndpointAccessUpdate, + UpdateTypeLoggingUpdate, + UpdateTypeConfigUpdate, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/eks/service.go b/vendor/github.com/aws/aws-sdk-go/service/eks/service.go index 20b72ac3b..3fc217545 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/eks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/eks/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index d72e03e5e..bb9e651e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -1017,6 +1017,12 @@ func (c *ElastiCache) CreateCacheSubnetGroupRequest(input *CreateCacheSubnetGrou // * ErrCodeInvalidSubnet "InvalidSubnet" // An invalid subnet identifier was specified. // +// * ErrCodeSubnetNotAllowedFault "SubnetNotAllowedFault" +// At least one subnet ID does not match the other subnet IDs. This mismatch +// typically occurs when a user sets one subnet ID to a regional Availability +// Zone and a different one to an outpost. Or when a user sets the subnet ID +// to an Outpost when not subscribed on this service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSubnetGroup func (c *ElastiCache) CreateCacheSubnetGroup(input *CreateCacheSubnetGroupInput) (*CreateCacheSubnetGroupOutput, error) { req, out := c.CreateCacheSubnetGroupRequest(input) @@ -1039,6 +1045,106 @@ func (c *ElastiCache) CreateCacheSubnetGroupWithContext(ctx aws.Context, input * return out, req.Send() } +const opCreateGlobalReplicationGroup = "CreateGlobalReplicationGroup" + +// CreateGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateGlobalReplicationGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGlobalReplicationGroup for more information on using the CreateGlobalReplicationGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGlobalReplicationGroupRequest method. +// req, resp := client.CreateGlobalReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateGlobalReplicationGroup +func (c *ElastiCache) CreateGlobalReplicationGroupRequest(input *CreateGlobalReplicationGroupInput) (req *request.Request, output *CreateGlobalReplicationGroupOutput) { + op := &request.Operation{ + Name: opCreateGlobalReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGlobalReplicationGroupInput{} + } + + output = &CreateGlobalReplicationGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGlobalReplicationGroup API operation for Amazon ElastiCache. +// +// Global Datastore for Redis offers fully managed, fast, reliable and secure +// cross-region replication. Using Global Datastore for Redis, you can create +// cross-region read replica clusters for ElastiCache for Redis to enable low-latency +// reads and disaster recovery across regions. For more information, see Replication +// Across Regions Using Global Datastore (/AmazonElastiCache/latest/red-ug/Redis-Global-Clusters.html). +// +// * The GlobalReplicationGroupIdSuffix is the name of the Global Datastore. +// +// * The PrimaryReplicationGroupId represents the name of the primary cluster +// that accepts writes and will replicate updates to the secondary cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation CreateGlobalReplicationGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeGlobalReplicationGroupAlreadyExistsFault "GlobalReplicationGroupAlreadyExistsFault" +// The Global Datastore name already exists. +// +// * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateGlobalReplicationGroup +func (c *ElastiCache) CreateGlobalReplicationGroup(input *CreateGlobalReplicationGroupInput) (*CreateGlobalReplicationGroupOutput, error) { + req, out := c.CreateGlobalReplicationGroupRequest(input) + return out, req.Send() +} + +// CreateGlobalReplicationGroupWithContext is the same as CreateGlobalReplicationGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGlobalReplicationGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) CreateGlobalReplicationGroupWithContext(ctx aws.Context, input *CreateGlobalReplicationGroupInput, opts ...request.Option) (*CreateGlobalReplicationGroupOutput, error) { + req, out := c.CreateGlobalReplicationGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateReplicationGroup = "CreateReplicationGroup" // CreateReplicationGroupRequest generates a "aws/request.Request" representing the @@ -1086,6 +1192,9 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) // replication group. // +// This API can be used to create a standalone regional replication group or +// a secondary replication group associated with a Global Datastore. +// // A Redis (cluster mode disabled) replication group is a collection of clusters, // where one of the clusters is a read/write primary and the others are read-only // replicas. Writes to the primary are asynchronously propagated to the replicas. @@ -1098,11 +1207,9 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // // When a Redis (cluster mode disabled) replication group has been successfully // created, you can add one or more read replicas to it, up to a total of 5 -// read replicas. You cannot alter a Redis (cluster mode enabled) replication -// group after it has been created. However, if you need to increase or decrease -// the number of node groups (console: shards), you can avail yourself of ElastiCache -// for Redis' enhanced backup and restore. For more information, see Restoring -// From a Backup with Cluster Resizing (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-restoring.html) +// read replicas. If you need to increase or decrease the number of node groups +// (console: shards), you can avail yourself of ElastiCache for Redis' scaling. +// For more information, see Scaling ElastiCache for Redis Clusters (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Scaling.html) // in the ElastiCache User Guide. // // This operation is valid for Redis only. @@ -1124,6 +1231,12 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // * ErrCodeReplicationGroupAlreadyExistsFault "ReplicationGroupAlreadyExists" // The specified replication group already exists. // +// * ErrCodeInvalidUserGroupStateFault "InvalidUserGroupState" +// The user group is not in an active state. +// +// * ErrCodeUserGroupNotFoundFault "UserGroupNotFound" +// The user group was not found or does not exist +// // * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" // The requested cache node type is not available in the specified Availability // Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) @@ -1166,6 +1279,12 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // number of node groups (shards) in a single replication group. The default // maximum is 90 // +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist +// +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // @@ -1309,98 +1428,69 @@ func (c *ElastiCache) CreateSnapshotWithContext(ctx aws.Context, input *CreateSn return out, req.Send() } -const opDecreaseReplicaCount = "DecreaseReplicaCount" +const opCreateUser = "CreateUser" -// DecreaseReplicaCountRequest generates a "aws/request.Request" representing the -// client's request for the DecreaseReplicaCount operation. The "output" return +// CreateUserRequest generates a "aws/request.Request" representing the +// client's request for the CreateUser operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DecreaseReplicaCount for more information on using the DecreaseReplicaCount +// See CreateUser for more information on using the CreateUser // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DecreaseReplicaCountRequest method. -// req, resp := client.DecreaseReplicaCountRequest(params) +// // Example sending a request using the CreateUserRequest method. +// req, resp := client.CreateUserRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DecreaseReplicaCount -func (c *ElastiCache) DecreaseReplicaCountRequest(input *DecreaseReplicaCountInput) (req *request.Request, output *DecreaseReplicaCountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateUser +func (c *ElastiCache) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { op := &request.Operation{ - Name: opDecreaseReplicaCount, + Name: opCreateUser, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DecreaseReplicaCountInput{} + input = &CreateUserInput{} } - output = &DecreaseReplicaCountOutput{} + output = &CreateUserOutput{} req = c.newRequest(op, input, output) return } -// DecreaseReplicaCount API operation for Amazon ElastiCache. +// CreateUser API operation for Amazon ElastiCache. // -// Dynamically decreases the number of replics in a Redis (cluster mode disabled) -// replication group or the number of replica nodes in one or more node groups -// (shards) of a Redis (cluster mode enabled) replication group. This operation -// is performed with no cluster down time. +// For Redis engine version 6.04 onwards: Creates a Redis user. For more information, +// see Using Role Based Access Control (RBAC) (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DecreaseReplicaCount for usage and error information. +// API operation CreateUser for usage and error information. // // Returned Error Codes: -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. -// -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. -// -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. -// -// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" -// The requested cache node type is not available in the specified Availability -// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) -// in the ElastiCache User Guide. -// -// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of clusters per customer. +// * ErrCodeUserAlreadyExistsFault "UserAlreadyExists" +// A user with this ID already exists. // -// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" -// The request cannot be processed because it would exceed the maximum allowed -// number of node groups (shards) in a single replication group. The default -// maximum is 90 -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes per customer. +// * ErrCodeUserQuotaExceededFault "UserQuotaExceeded" +// The quota of users has been exceeded. // -// * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" -// The specified service linked role (SLR) was not found. -// -// * ErrCodeNoOperationFault "NoOperationFault" -// The operation was not performed because no changes were required. +// * ErrCodeDuplicateUserNameFault "DuplicateUserName" +// A user with this username already exists. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -1408,212 +1498,182 @@ func (c *ElastiCache) DecreaseReplicaCountRequest(input *DecreaseReplicaCountInp // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DecreaseReplicaCount -func (c *ElastiCache) DecreaseReplicaCount(input *DecreaseReplicaCountInput) (*DecreaseReplicaCountOutput, error) { - req, out := c.DecreaseReplicaCountRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateUser +func (c *ElastiCache) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) return out, req.Send() } -// DecreaseReplicaCountWithContext is the same as DecreaseReplicaCount with the addition of +// CreateUserWithContext is the same as CreateUser with the addition of // the ability to pass a context and additional request options. // -// See DecreaseReplicaCount for details on how to use this API operation. +// See CreateUser for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DecreaseReplicaCountWithContext(ctx aws.Context, input *DecreaseReplicaCountInput, opts ...request.Option) (*DecreaseReplicaCountOutput, error) { - req, out := c.DecreaseReplicaCountRequest(input) +func (c *ElastiCache) CreateUserWithContext(ctx aws.Context, input *CreateUserInput, opts ...request.Option) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteCacheCluster = "DeleteCacheCluster" +const opCreateUserGroup = "CreateUserGroup" -// DeleteCacheClusterRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCacheCluster operation. The "output" return +// CreateUserGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateUserGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteCacheCluster for more information on using the DeleteCacheCluster +// See CreateUserGroup for more information on using the CreateUserGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteCacheClusterRequest method. -// req, resp := client.DeleteCacheClusterRequest(params) +// // Example sending a request using the CreateUserGroupRequest method. +// req, resp := client.CreateUserGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheCluster -func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateUserGroup +func (c *ElastiCache) CreateUserGroupRequest(input *CreateUserGroupInput) (req *request.Request, output *CreateUserGroupOutput) { op := &request.Operation{ - Name: opDeleteCacheCluster, + Name: opCreateUserGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteCacheClusterInput{} + input = &CreateUserGroupInput{} } - output = &DeleteCacheClusterOutput{} + output = &CreateUserGroupOutput{} req = c.newRequest(op, input, output) return } -// DeleteCacheCluster API operation for Amazon ElastiCache. +// CreateUserGroup API operation for Amazon ElastiCache. // -// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all -// associated cache nodes, node endpoints and the cluster itself. When you receive -// a successful response from this operation, Amazon ElastiCache immediately -// begins deleting the cluster; you cannot cancel or revert this operation. -// -// This operation is not valid for: -// -// * Redis (cluster mode enabled) clusters -// -// * A cluster that is the last read replica of a replication group -// -// * A node group (shard) that has Multi-AZ mode enabled -// -// * A cluster from a Redis (cluster mode enabled) replication group -// -// * A cluster that is not in the available state +// For Redis engine version 6.04 onwards: Creates a Redis user group. For more +// information, see Using Role Based Access Control (RBAC) (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DeleteCacheCluster for usage and error information. +// API operation CreateUserGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. -// -// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault" -// You already have a snapshot with the given name. -// -// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault" -// You attempted one of the following operations: +// * ErrCodeUserNotFoundFault "UserNotFound" +// The user does not exist or could not be found. // -// * Creating a snapshot of a Redis cluster running on a cache.t1.micro cache -// node. +// * ErrCodeDuplicateUserNameFault "DuplicateUserName" +// A user with this username already exists. // -// * Creating a snapshot of a cluster that is running Memcached rather than -// Redis. +// * ErrCodeUserGroupAlreadyExistsFault "UserGroupAlreadyExists" +// The user group with this ID already exists. // -// Neither of these are supported by ElastiCache. +// * ErrCodeDefaultUserRequired "DefaultUserRequired" +// You must add default user to a user group. // -// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault" -// The request cannot be processed because it would exceed the maximum number -// of snapshots. +// * ErrCodeUserGroupQuotaExceededFault "UserGroupQuotaExceeded" +// The number of users exceeds the user group limit. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheCluster -func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*DeleteCacheClusterOutput, error) { - req, out := c.DeleteCacheClusterRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateUserGroup +func (c *ElastiCache) CreateUserGroup(input *CreateUserGroupInput) (*CreateUserGroupOutput, error) { + req, out := c.CreateUserGroupRequest(input) return out, req.Send() } -// DeleteCacheClusterWithContext is the same as DeleteCacheCluster with the addition of +// CreateUserGroupWithContext is the same as CreateUserGroup with the addition of // the ability to pass a context and additional request options. // -// See DeleteCacheCluster for details on how to use this API operation. +// See CreateUserGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DeleteCacheClusterWithContext(ctx aws.Context, input *DeleteCacheClusterInput, opts ...request.Option) (*DeleteCacheClusterOutput, error) { - req, out := c.DeleteCacheClusterRequest(input) +func (c *ElastiCache) CreateUserGroupWithContext(ctx aws.Context, input *CreateUserGroupInput, opts ...request.Option) (*CreateUserGroupOutput, error) { + req, out := c.CreateUserGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup" +const opDecreaseNodeGroupsInGlobalReplicationGroup = "DecreaseNodeGroupsInGlobalReplicationGroup" -// DeleteCacheParameterGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCacheParameterGroup operation. The "output" return +// DecreaseNodeGroupsInGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the DecreaseNodeGroupsInGlobalReplicationGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteCacheParameterGroup for more information on using the DeleteCacheParameterGroup +// See DecreaseNodeGroupsInGlobalReplicationGroup for more information on using the DecreaseNodeGroupsInGlobalReplicationGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteCacheParameterGroupRequest method. -// req, resp := client.DeleteCacheParameterGroupRequest(params) +// // Example sending a request using the DecreaseNodeGroupsInGlobalReplicationGroupRequest method. +// req, resp := client.DecreaseNodeGroupsInGlobalReplicationGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroup -func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DecreaseNodeGroupsInGlobalReplicationGroup +func (c *ElastiCache) DecreaseNodeGroupsInGlobalReplicationGroupRequest(input *DecreaseNodeGroupsInGlobalReplicationGroupInput) (req *request.Request, output *DecreaseNodeGroupsInGlobalReplicationGroupOutput) { op := &request.Operation{ - Name: opDeleteCacheParameterGroup, + Name: opDecreaseNodeGroupsInGlobalReplicationGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteCacheParameterGroupInput{} + input = &DecreaseNodeGroupsInGlobalReplicationGroupInput{} } - output = &DeleteCacheParameterGroupOutput{} + output = &DecreaseNodeGroupsInGlobalReplicationGroupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteCacheParameterGroup API operation for Amazon ElastiCache. +// DecreaseNodeGroupsInGlobalReplicationGroup API operation for Amazon ElastiCache. // -// Deletes the specified cache parameter group. You cannot delete a cache parameter -// group if it is associated with any cache clusters. +// Decreases the number of node groups in a Global Datastore // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DeleteCacheParameterGroup for usage and error information. +// API operation DecreaseNodeGroupsInGlobalReplicationGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState" -// The current state of the cache parameter group does not allow the requested -// operation to occur. +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist // -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -1621,91 +1681,120 @@ func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParamet // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroup -func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroupInput) (*DeleteCacheParameterGroupOutput, error) { - req, out := c.DeleteCacheParameterGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DecreaseNodeGroupsInGlobalReplicationGroup +func (c *ElastiCache) DecreaseNodeGroupsInGlobalReplicationGroup(input *DecreaseNodeGroupsInGlobalReplicationGroupInput) (*DecreaseNodeGroupsInGlobalReplicationGroupOutput, error) { + req, out := c.DecreaseNodeGroupsInGlobalReplicationGroupRequest(input) return out, req.Send() } -// DeleteCacheParameterGroupWithContext is the same as DeleteCacheParameterGroup with the addition of +// DecreaseNodeGroupsInGlobalReplicationGroupWithContext is the same as DecreaseNodeGroupsInGlobalReplicationGroup with the addition of // the ability to pass a context and additional request options. // -// See DeleteCacheParameterGroup for details on how to use this API operation. +// See DecreaseNodeGroupsInGlobalReplicationGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DeleteCacheParameterGroupWithContext(ctx aws.Context, input *DeleteCacheParameterGroupInput, opts ...request.Option) (*DeleteCacheParameterGroupOutput, error) { - req, out := c.DeleteCacheParameterGroupRequest(input) +func (c *ElastiCache) DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx aws.Context, input *DecreaseNodeGroupsInGlobalReplicationGroupInput, opts ...request.Option) (*DecreaseNodeGroupsInGlobalReplicationGroupOutput, error) { + req, out := c.DecreaseNodeGroupsInGlobalReplicationGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup" +const opDecreaseReplicaCount = "DecreaseReplicaCount" -// DeleteCacheSecurityGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCacheSecurityGroup operation. The "output" return +// DecreaseReplicaCountRequest generates a "aws/request.Request" representing the +// client's request for the DecreaseReplicaCount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteCacheSecurityGroup for more information on using the DeleteCacheSecurityGroup +// See DecreaseReplicaCount for more information on using the DecreaseReplicaCount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteCacheSecurityGroupRequest method. -// req, resp := client.DeleteCacheSecurityGroupRequest(params) +// // Example sending a request using the DecreaseReplicaCountRequest method. +// req, resp := client.DecreaseReplicaCountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroup -func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DecreaseReplicaCount +func (c *ElastiCache) DecreaseReplicaCountRequest(input *DecreaseReplicaCountInput) (req *request.Request, output *DecreaseReplicaCountOutput) { op := &request.Operation{ - Name: opDeleteCacheSecurityGroup, + Name: opDecreaseReplicaCount, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteCacheSecurityGroupInput{} + input = &DecreaseReplicaCountInput{} } - output = &DeleteCacheSecurityGroupOutput{} + output = &DecreaseReplicaCountOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteCacheSecurityGroup API operation for Amazon ElastiCache. -// -// Deletes a cache security group. +// DecreaseReplicaCount API operation for Amazon ElastiCache. // -// You cannot delete a cache security group if it is associated with any clusters. +// Dynamically decreases the number of replicas in a Redis (cluster mode disabled) +// replication group or the number of replica nodes in one or more node groups +// (shards) of a Redis (cluster mode enabled) replication group. This operation +// is performed with no cluster down time. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DeleteCacheSecurityGroup for usage and error information. +// API operation DecreaseReplicaCount for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" -// The current state of the cache security group does not allow deletion. +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. // -// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" -// The requested cache security group name does not refer to an existing cache -// security group. +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. +// +// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" +// The requested cache node type is not available in the specified Availability +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. +// +// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of clusters per customer. +// +// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" +// The request cannot be processed because it would exceed the maximum allowed +// number of node groups (shards) in a single replication group. The default +// maximum is 90 +// +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes per customer. +// +// * ErrCodeServiceLinkedRoleNotFoundFault "ServiceLinkedRoleNotFoundFault" +// The specified service linked role (SLR) was not found. +// +// * ErrCodeNoOperationFault "NoOperationFault" +// The operation was not performed because no changes were required. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -1713,201 +1802,212 @@ func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurity // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroup -func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) { - req, out := c.DeleteCacheSecurityGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DecreaseReplicaCount +func (c *ElastiCache) DecreaseReplicaCount(input *DecreaseReplicaCountInput) (*DecreaseReplicaCountOutput, error) { + req, out := c.DecreaseReplicaCountRequest(input) return out, req.Send() } -// DeleteCacheSecurityGroupWithContext is the same as DeleteCacheSecurityGroup with the addition of +// DecreaseReplicaCountWithContext is the same as DecreaseReplicaCount with the addition of // the ability to pass a context and additional request options. // -// See DeleteCacheSecurityGroup for details on how to use this API operation. +// See DecreaseReplicaCount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DeleteCacheSecurityGroupWithContext(ctx aws.Context, input *DeleteCacheSecurityGroupInput, opts ...request.Option) (*DeleteCacheSecurityGroupOutput, error) { - req, out := c.DeleteCacheSecurityGroupRequest(input) +func (c *ElastiCache) DecreaseReplicaCountWithContext(ctx aws.Context, input *DecreaseReplicaCountInput, opts ...request.Option) (*DecreaseReplicaCountOutput, error) { + req, out := c.DecreaseReplicaCountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup" +const opDeleteCacheCluster = "DeleteCacheCluster" -// DeleteCacheSubnetGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteCacheSubnetGroup operation. The "output" return +// DeleteCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheCluster operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteCacheSubnetGroup for more information on using the DeleteCacheSubnetGroup +// See DeleteCacheCluster for more information on using the DeleteCacheCluster // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteCacheSubnetGroupRequest method. -// req, resp := client.DeleteCacheSubnetGroupRequest(params) +// // Example sending a request using the DeleteCacheClusterRequest method. +// req, resp := client.DeleteCacheClusterRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroup -func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheCluster +func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) { op := &request.Operation{ - Name: opDeleteCacheSubnetGroup, + Name: opDeleteCacheCluster, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteCacheSubnetGroupInput{} + input = &DeleteCacheClusterInput{} } - output = &DeleteCacheSubnetGroupOutput{} + output = &DeleteCacheClusterOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteCacheSubnetGroup API operation for Amazon ElastiCache. +// DeleteCacheCluster API operation for Amazon ElastiCache. // -// Deletes a cache subnet group. +// Deletes a previously provisioned cluster. DeleteCacheCluster deletes all +// associated cache nodes, node endpoints and the cluster itself. When you receive +// a successful response from this operation, Amazon ElastiCache immediately +// begins deleting the cluster; you cannot cancel or revert this operation. // -// You cannot delete a cache subnet group if it is associated with any clusters. +// This operation is not valid for: +// +// * Redis (cluster mode enabled) clusters +// +// * A cluster that is the last read replica of a replication group +// +// * A node group (shard) that has Multi-AZ mode enabled +// +// * A cluster from a Redis (cluster mode enabled) replication group +// +// * A cluster that is not in the available state // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DeleteCacheSubnetGroup for usage and error information. +// API operation DeleteCacheCluster for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheSubnetGroupInUse "CacheSubnetGroupInUse" -// The requested cache subnet group is currently in use. +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. // -// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault" -// The requested cache subnet group name does not refer to an existing cache -// subnet group. +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroup -func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) { - req, out := c.DeleteCacheSubnetGroupRequest(input) +// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault" +// You already have a snapshot with the given name. +// +// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault" +// You attempted one of the following operations: +// +// * Creating a snapshot of a Redis cluster running on a cache.t1.micro cache +// node. +// +// * Creating a snapshot of a cluster that is running Memcached rather than +// Redis. +// +// Neither of these are supported by ElastiCache. +// +// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault" +// The request cannot be processed because it would exceed the maximum number +// of snapshots. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheCluster +func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*DeleteCacheClusterOutput, error) { + req, out := c.DeleteCacheClusterRequest(input) return out, req.Send() } -// DeleteCacheSubnetGroupWithContext is the same as DeleteCacheSubnetGroup with the addition of +// DeleteCacheClusterWithContext is the same as DeleteCacheCluster with the addition of // the ability to pass a context and additional request options. // -// See DeleteCacheSubnetGroup for details on how to use this API operation. +// See DeleteCacheCluster for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DeleteCacheSubnetGroupWithContext(ctx aws.Context, input *DeleteCacheSubnetGroupInput, opts ...request.Option) (*DeleteCacheSubnetGroupOutput, error) { - req, out := c.DeleteCacheSubnetGroupRequest(input) +func (c *ElastiCache) DeleteCacheClusterWithContext(ctx aws.Context, input *DeleteCacheClusterInput, opts ...request.Option) (*DeleteCacheClusterOutput, error) { + req, out := c.DeleteCacheClusterRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteReplicationGroup = "DeleteReplicationGroup" +const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup" -// DeleteReplicationGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteReplicationGroup operation. The "output" return +// DeleteCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheParameterGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteReplicationGroup for more information on using the DeleteReplicationGroup +// See DeleteCacheParameterGroup for more information on using the DeleteCacheParameterGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteReplicationGroupRequest method. -// req, resp := client.DeleteReplicationGroupRequest(params) +// // Example sending a request using the DeleteCacheParameterGroupRequest method. +// req, resp := client.DeleteCacheParameterGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroup -func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroup +func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) { op := &request.Operation{ - Name: opDeleteReplicationGroup, + Name: opDeleteCacheParameterGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteReplicationGroupInput{} + input = &DeleteCacheParameterGroupInput{} } - output = &DeleteReplicationGroupOutput{} + output = &DeleteCacheParameterGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteReplicationGroup API operation for Amazon ElastiCache. -// -// Deletes an existing replication group. By default, this operation deletes -// the entire replication group, including the primary/primaries and all of -// the read replicas. If the replication group has only one primary, you can -// optionally delete only the read replicas, while retaining the primary by -// setting RetainPrimaryCluster=true. -// -// When you receive a successful response from this operation, Amazon ElastiCache -// immediately begins deleting the selected resources; you cannot cancel or -// revert this operation. +// DeleteCacheParameterGroup API operation for Amazon ElastiCache. // -// This operation is valid for Redis only. +// Deletes the specified cache parameter group. You cannot delete a cache parameter +// group if it is associated with any cache clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DeleteReplicationGroup for usage and error information. +// API operation DeleteCacheParameterGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. -// -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. -// -// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault" -// You already have a snapshot with the given name. -// -// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault" -// You attempted one of the following operations: -// -// * Creating a snapshot of a Redis cluster running on a cache.t1.micro cache -// node. -// -// * Creating a snapshot of a cluster that is running Memcached rather than -// Redis. -// -// Neither of these are supported by ElastiCache. +// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState" +// The current state of the cache parameter group does not allow the requested +// operation to occur. // -// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault" -// The request cannot be processed because it would exceed the maximum number -// of snapshots. +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -1915,92 +2015,91 @@ func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGrou // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroup -func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) (*DeleteReplicationGroupOutput, error) { - req, out := c.DeleteReplicationGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroup +func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroupInput) (*DeleteCacheParameterGroupOutput, error) { + req, out := c.DeleteCacheParameterGroupRequest(input) return out, req.Send() } -// DeleteReplicationGroupWithContext is the same as DeleteReplicationGroup with the addition of +// DeleteCacheParameterGroupWithContext is the same as DeleteCacheParameterGroup with the addition of // the ability to pass a context and additional request options. // -// See DeleteReplicationGroup for details on how to use this API operation. +// See DeleteCacheParameterGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DeleteReplicationGroupWithContext(ctx aws.Context, input *DeleteReplicationGroupInput, opts ...request.Option) (*DeleteReplicationGroupOutput, error) { - req, out := c.DeleteReplicationGroupRequest(input) +func (c *ElastiCache) DeleteCacheParameterGroupWithContext(ctx aws.Context, input *DeleteCacheParameterGroupInput, opts ...request.Option) (*DeleteCacheParameterGroupOutput, error) { + req, out := c.DeleteCacheParameterGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteSnapshot = "DeleteSnapshot" +const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup" -// DeleteSnapshotRequest generates a "aws/request.Request" representing the -// client's request for the DeleteSnapshot operation. The "output" return +// DeleteCacheSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheSecurityGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteSnapshot for more information on using the DeleteSnapshot +// See DeleteCacheSecurityGroup for more information on using the DeleteCacheSecurityGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteSnapshotRequest method. -// req, resp := client.DeleteSnapshotRequest(params) +// // Example sending a request using the DeleteCacheSecurityGroupRequest method. +// req, resp := client.DeleteCacheSecurityGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshot -func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroup +func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) { op := &request.Operation{ - Name: opDeleteSnapshot, + Name: opDeleteCacheSecurityGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteSnapshotInput{} + input = &DeleteCacheSecurityGroupInput{} } - output = &DeleteSnapshotOutput{} + output = &DeleteCacheSecurityGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteSnapshot API operation for Amazon ElastiCache. +// DeleteCacheSecurityGroup API operation for Amazon ElastiCache. // -// Deletes an existing snapshot. When you receive a successful response from -// this operation, ElastiCache immediately begins deleting the snapshot; you -// cannot cancel or revert this operation. +// Deletes a cache security group. // -// This operation is valid for Redis only. +// You cannot delete a cache security group if it is associated with any clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DeleteSnapshot for usage and error information. +// API operation DeleteCacheSecurityGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" -// The requested snapshot name does not refer to an existing snapshot. +// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" +// The current state of the cache security group does not allow deletion. // -// * ErrCodeInvalidSnapshotStateFault "InvalidSnapshotState" -// The current state of the snapshot does not allow the requested operation -// to occur. +// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" +// The requested cache security group name does not refer to an existing cache +// security group. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -2008,389 +2107,300 @@ func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *re // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshot -func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { - req, out := c.DeleteSnapshotRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroup +func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) { + req, out := c.DeleteCacheSecurityGroupRequest(input) return out, req.Send() } -// DeleteSnapshotWithContext is the same as DeleteSnapshot with the addition of +// DeleteCacheSecurityGroupWithContext is the same as DeleteCacheSecurityGroup with the addition of // the ability to pass a context and additional request options. // -// See DeleteSnapshot for details on how to use this API operation. +// See DeleteCacheSecurityGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DeleteSnapshotWithContext(ctx aws.Context, input *DeleteSnapshotInput, opts ...request.Option) (*DeleteSnapshotOutput, error) { - req, out := c.DeleteSnapshotRequest(input) +func (c *ElastiCache) DeleteCacheSecurityGroupWithContext(ctx aws.Context, input *DeleteCacheSecurityGroupInput, opts ...request.Option) (*DeleteCacheSecurityGroupOutput, error) { + req, out := c.DeleteCacheSecurityGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeCacheClusters = "DescribeCacheClusters" +const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup" -// DescribeCacheClustersRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCacheClusters operation. The "output" return +// DeleteCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheSubnetGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCacheClusters for more information on using the DescribeCacheClusters +// See DeleteCacheSubnetGroup for more information on using the DeleteCacheSubnetGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCacheClustersRequest method. -// req, resp := client.DescribeCacheClustersRequest(params) +// // Example sending a request using the DeleteCacheSubnetGroupRequest method. +// req, resp := client.DeleteCacheSubnetGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClusters -func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroup +func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) { op := &request.Operation{ - Name: opDescribeCacheClusters, + Name: opDeleteCacheSubnetGroup, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"Marker"}, - LimitToken: "MaxRecords", - TruncationToken: "", - }, } if input == nil { - input = &DescribeCacheClustersInput{} + input = &DeleteCacheSubnetGroupInput{} } - output = &DescribeCacheClustersOutput{} + output = &DeleteCacheSubnetGroupOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DescribeCacheClusters API operation for Amazon ElastiCache. -// -// Returns information about all provisioned clusters if no cluster identifier -// is specified, or about a specific cache cluster if a cluster identifier is -// supplied. -// -// By default, abbreviated information about the clusters is returned. You can -// use the optional ShowCacheNodeInfo flag to retrieve detailed information -// about the cache nodes associated with the clusters. These details include -// the DNS address and port for the cache node endpoint. -// -// If the cluster is in the creating state, only cluster-level information is -// displayed until all of the nodes are successfully provisioned. -// -// If the cluster is in the deleting state, only cluster-level information is -// displayed. +// DeleteCacheSubnetGroup API operation for Amazon ElastiCache. // -// If cache nodes are currently being added to the cluster, node endpoint information -// and creation time for the additional nodes are not displayed until they are -// completely provisioned. When the cluster state is available, the cluster -// is ready for use. +// Deletes a cache subnet group. // -// If cache nodes are currently being removed from the cluster, no endpoint -// information for the removed nodes is displayed. +// You cannot delete a cache subnet group if it is associated with any clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeCacheClusters for usage and error information. +// API operation DeleteCacheSubnetGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. +// * ErrCodeCacheSubnetGroupInUse "CacheSubnetGroupInUse" +// The requested cache subnet group is currently in use. // -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. +// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault" +// The requested cache subnet group name does not refer to an existing cache +// subnet group. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClusters -func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) (*DescribeCacheClustersOutput, error) { - req, out := c.DescribeCacheClustersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroup +func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) { + req, out := c.DeleteCacheSubnetGroupRequest(input) return out, req.Send() } -// DescribeCacheClustersWithContext is the same as DescribeCacheClusters with the addition of +// DeleteCacheSubnetGroupWithContext is the same as DeleteCacheSubnetGroup with the addition of // the ability to pass a context and additional request options. // -// See DescribeCacheClusters for details on how to use this API operation. +// See DeleteCacheSubnetGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheClustersWithContext(ctx aws.Context, input *DescribeCacheClustersInput, opts ...request.Option) (*DescribeCacheClustersOutput, error) { - req, out := c.DescribeCacheClustersRequest(input) +func (c *ElastiCache) DeleteCacheSubnetGroupWithContext(ctx aws.Context, input *DeleteCacheSubnetGroupInput, opts ...request.Option) (*DeleteCacheSubnetGroupOutput, error) { + req, out := c.DeleteCacheSubnetGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeCacheClustersPages iterates over the pages of a DescribeCacheClusters operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeCacheClusters method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeCacheClusters operation. -// pageNum := 0 -// err := client.DescribeCacheClustersPages(params, -// func(page *elasticache.DescribeCacheClustersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(*DescribeCacheClustersOutput, bool) bool) error { - return c.DescribeCacheClustersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeCacheClustersPagesWithContext same as DescribeCacheClustersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElastiCache) DescribeCacheClustersPagesWithContext(ctx aws.Context, input *DescribeCacheClustersInput, fn func(*DescribeCacheClustersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeCacheClustersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeCacheClustersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeCacheClustersOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions" +const opDeleteGlobalReplicationGroup = "DeleteGlobalReplicationGroup" -// DescribeCacheEngineVersionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCacheEngineVersions operation. The "output" return +// DeleteGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGlobalReplicationGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCacheEngineVersions for more information on using the DescribeCacheEngineVersions +// See DeleteGlobalReplicationGroup for more information on using the DeleteGlobalReplicationGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCacheEngineVersionsRequest method. -// req, resp := client.DescribeCacheEngineVersionsRequest(params) +// // Example sending a request using the DeleteGlobalReplicationGroupRequest method. +// req, resp := client.DeleteGlobalReplicationGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersions -func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteGlobalReplicationGroup +func (c *ElastiCache) DeleteGlobalReplicationGroupRequest(input *DeleteGlobalReplicationGroupInput) (req *request.Request, output *DeleteGlobalReplicationGroupOutput) { op := &request.Operation{ - Name: opDescribeCacheEngineVersions, + Name: opDeleteGlobalReplicationGroup, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"Marker"}, - LimitToken: "MaxRecords", - TruncationToken: "", - }, } if input == nil { - input = &DescribeCacheEngineVersionsInput{} + input = &DeleteGlobalReplicationGroupInput{} } - output = &DescribeCacheEngineVersionsOutput{} + output = &DeleteGlobalReplicationGroupOutput{} req = c.newRequest(op, input, output) return } -// DescribeCacheEngineVersions API operation for Amazon ElastiCache. +// DeleteGlobalReplicationGroup API operation for Amazon ElastiCache. // -// Returns a list of the available cache engines and their versions. +// Deleting a Global Datastore is a two-step process: +// +// * First, you must DisassociateGlobalReplicationGroup to remove the secondary +// clusters in the Global Datastore. +// +// * Once the Global Datastore contains only the primary cluster, you can +// use DeleteGlobalReplicationGroup API to delete the Global Datastore while +// retainining the primary cluster using Retain…= true. +// +// Since the Global Datastore has only a primary cluster, you can delete the +// Global Datastore while retaining the primary by setting RetainPrimaryCluster=true. +// +// When you receive a successful response from this operation, Amazon ElastiCache +// immediately begins deleting the selected resources; you cannot cancel or +// revert this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeCacheEngineVersions for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersions -func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVersionsInput) (*DescribeCacheEngineVersionsOutput, error) { - req, out := c.DescribeCacheEngineVersionsRequest(input) +// API operation DeleteGlobalReplicationGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist +// +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteGlobalReplicationGroup +func (c *ElastiCache) DeleteGlobalReplicationGroup(input *DeleteGlobalReplicationGroupInput) (*DeleteGlobalReplicationGroupOutput, error) { + req, out := c.DeleteGlobalReplicationGroupRequest(input) return out, req.Send() } -// DescribeCacheEngineVersionsWithContext is the same as DescribeCacheEngineVersions with the addition of +// DeleteGlobalReplicationGroupWithContext is the same as DeleteGlobalReplicationGroup with the addition of // the ability to pass a context and additional request options. // -// See DescribeCacheEngineVersions for details on how to use this API operation. +// See DeleteGlobalReplicationGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheEngineVersionsWithContext(ctx aws.Context, input *DescribeCacheEngineVersionsInput, opts ...request.Option) (*DescribeCacheEngineVersionsOutput, error) { - req, out := c.DescribeCacheEngineVersionsRequest(input) +func (c *ElastiCache) DeleteGlobalReplicationGroupWithContext(ctx aws.Context, input *DeleteGlobalReplicationGroupInput, opts ...request.Option) (*DeleteGlobalReplicationGroupOutput, error) { + req, out := c.DeleteGlobalReplicationGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeCacheEngineVersionsPages iterates over the pages of a DescribeCacheEngineVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeCacheEngineVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeCacheEngineVersions operation. -// pageNum := 0 -// err := client.DescribeCacheEngineVersionsPages(params, -// func(page *elasticache.DescribeCacheEngineVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(*DescribeCacheEngineVersionsOutput, bool) bool) error { - return c.DescribeCacheEngineVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeCacheEngineVersionsPagesWithContext same as DescribeCacheEngineVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElastiCache) DescribeCacheEngineVersionsPagesWithContext(ctx aws.Context, input *DescribeCacheEngineVersionsInput, fn func(*DescribeCacheEngineVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeCacheEngineVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeCacheEngineVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeCacheEngineVersionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups" +const opDeleteReplicationGroup = "DeleteReplicationGroup" -// DescribeCacheParameterGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCacheParameterGroups operation. The "output" return +// DeleteReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCacheParameterGroups for more information on using the DescribeCacheParameterGroups +// See DeleteReplicationGroup for more information on using the DeleteReplicationGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCacheParameterGroupsRequest method. -// req, resp := client.DescribeCacheParameterGroupsRequest(params) +// // Example sending a request using the DeleteReplicationGroupRequest method. +// req, resp := client.DeleteReplicationGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroups -func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroup +func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) { op := &request.Operation{ - Name: opDescribeCacheParameterGroups, + Name: opDeleteReplicationGroup, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"Marker"}, - LimitToken: "MaxRecords", - TruncationToken: "", - }, } if input == nil { - input = &DescribeCacheParameterGroupsInput{} + input = &DeleteReplicationGroupInput{} } - output = &DescribeCacheParameterGroupsOutput{} + output = &DeleteReplicationGroupOutput{} req = c.newRequest(op, input, output) return } -// DescribeCacheParameterGroups API operation for Amazon ElastiCache. +// DeleteReplicationGroup API operation for Amazon ElastiCache. // -// Returns a list of cache parameter group descriptions. If a cache parameter -// group name is specified, the list contains only the descriptions for that -// group. +// Deletes an existing replication group. By default, this operation deletes +// the entire replication group, including the primary/primaries and all of +// the read replicas. If the replication group has only one primary, you can +// optionally delete only the read replicas, while retaining the primary by +// setting RetainPrimaryCluster=true. +// +// When you receive a successful response from this operation, Amazon ElastiCache +// immediately begins deleting the selected resources; you cannot cancel or +// revert this operation. +// +// This operation is valid for Redis only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeCacheParameterGroups for usage and error information. +// API operation DeleteReplicationGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault" +// You already have a snapshot with the given name. +// +// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault" +// You attempted one of the following operations: +// +// * Creating a snapshot of a Redis cluster running on a cache.t1.micro cache +// node. +// +// * Creating a snapshot of a cluster that is running Memcached rather than +// Redis. +// +// Neither of these are supported by ElastiCache. +// +// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault" +// The request cannot be processed because it would exceed the maximum number +// of snapshots. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -2398,143 +2408,92 @@ func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCachePa // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroups -func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameterGroupsInput) (*DescribeCacheParameterGroupsOutput, error) { - req, out := c.DescribeCacheParameterGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroup +func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) (*DeleteReplicationGroupOutput, error) { + req, out := c.DeleteReplicationGroupRequest(input) return out, req.Send() } -// DescribeCacheParameterGroupsWithContext is the same as DescribeCacheParameterGroups with the addition of +// DeleteReplicationGroupWithContext is the same as DeleteReplicationGroup with the addition of // the ability to pass a context and additional request options. // -// See DescribeCacheParameterGroups for details on how to use this API operation. +// See DeleteReplicationGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheParameterGroupsWithContext(ctx aws.Context, input *DescribeCacheParameterGroupsInput, opts ...request.Option) (*DescribeCacheParameterGroupsOutput, error) { - req, out := c.DescribeCacheParameterGroupsRequest(input) +func (c *ElastiCache) DeleteReplicationGroupWithContext(ctx aws.Context, input *DeleteReplicationGroupInput, opts ...request.Option) (*DeleteReplicationGroupOutput, error) { + req, out := c.DeleteReplicationGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeCacheParameterGroupsPages iterates over the pages of a DescribeCacheParameterGroups operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeCacheParameterGroups method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeCacheParameterGroups operation. -// pageNum := 0 -// err := client.DescribeCacheParameterGroupsPages(params, -// func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(*DescribeCacheParameterGroupsOutput, bool) bool) error { - return c.DescribeCacheParameterGroupsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeCacheParameterGroupsPagesWithContext same as DescribeCacheParameterGroupsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElastiCache) DescribeCacheParameterGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheParameterGroupsInput, fn func(*DescribeCacheParameterGroupsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeCacheParameterGroupsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeCacheParameterGroupsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeCacheParameterGroupsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeCacheParameters = "DescribeCacheParameters" +const opDeleteSnapshot = "DeleteSnapshot" -// DescribeCacheParametersRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCacheParameters operation. The "output" return +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCacheParameters for more information on using the DescribeCacheParameters +// See DeleteSnapshot for more information on using the DeleteSnapshot // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCacheParametersRequest method. -// req, resp := client.DescribeCacheParametersRequest(params) +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameters -func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshot +func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { op := &request.Operation{ - Name: opDescribeCacheParameters, + Name: opDeleteSnapshot, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"Marker"}, - LimitToken: "MaxRecords", - TruncationToken: "", - }, } if input == nil { - input = &DescribeCacheParametersInput{} + input = &DeleteSnapshotInput{} } - output = &DescribeCacheParametersOutput{} + output = &DeleteSnapshotOutput{} req = c.newRequest(op, input, output) return } -// DescribeCacheParameters API operation for Amazon ElastiCache. +// DeleteSnapshot API operation for Amazon ElastiCache. // -// Returns the detailed parameter list for a particular cache parameter group. +// Deletes an existing snapshot. When you receive a successful response from +// this operation, ElastiCache immediately begins deleting the snapshot; you +// cannot cancel or revert this operation. +// +// This operation is valid for Redis only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeCacheParameters for usage and error information. +// API operation DeleteSnapshot for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. +// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" +// The requested snapshot name does not refer to an existing snapshot. +// +// * ErrCodeInvalidSnapshotStateFault "InvalidSnapshotState" +// The current state of the snapshot does not allow the requested operation +// to occur. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -2542,255 +2501,233 @@ func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParamet // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameters -func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInput) (*DescribeCacheParametersOutput, error) { - req, out := c.DescribeCacheParametersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshot +func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) return out, req.Send() } -// DescribeCacheParametersWithContext is the same as DescribeCacheParameters with the addition of +// DeleteSnapshotWithContext is the same as DeleteSnapshot with the addition of // the ability to pass a context and additional request options. // -// See DescribeCacheParameters for details on how to use this API operation. +// See DeleteSnapshot for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheParametersWithContext(ctx aws.Context, input *DescribeCacheParametersInput, opts ...request.Option) (*DescribeCacheParametersOutput, error) { - req, out := c.DescribeCacheParametersRequest(input) +func (c *ElastiCache) DeleteSnapshotWithContext(ctx aws.Context, input *DeleteSnapshotInput, opts ...request.Option) (*DeleteSnapshotOutput, error) { + req, out := c.DeleteSnapshotRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeCacheParametersPages iterates over the pages of a DescribeCacheParameters operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeCacheParameters method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeCacheParameters operation. -// pageNum := 0 -// err := client.DescribeCacheParametersPages(params, -// func(page *elasticache.DescribeCacheParametersOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(*DescribeCacheParametersOutput, bool) bool) error { - return c.DescribeCacheParametersPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeCacheParametersPagesWithContext same as DescribeCacheParametersPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElastiCache) DescribeCacheParametersPagesWithContext(ctx aws.Context, input *DescribeCacheParametersInput, fn func(*DescribeCacheParametersOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeCacheParametersInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeCacheParametersRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeCacheParametersOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups" +const opDeleteUser = "DeleteUser" -// DescribeCacheSecurityGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCacheSecurityGroups operation. The "output" return +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCacheSecurityGroups for more information on using the DescribeCacheSecurityGroups +// See DeleteUser for more information on using the DeleteUser // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCacheSecurityGroupsRequest method. -// req, resp := client.DescribeCacheSecurityGroupsRequest(params) +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroups -func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteUser +func (c *ElastiCache) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { op := &request.Operation{ - Name: opDescribeCacheSecurityGroups, + Name: opDeleteUser, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"Marker"}, - LimitToken: "MaxRecords", - TruncationToken: "", - }, } if input == nil { - input = &DescribeCacheSecurityGroupsInput{} + input = &DeleteUserInput{} } - output = &DescribeCacheSecurityGroupsOutput{} + output = &DeleteUserOutput{} req = c.newRequest(op, input, output) return } -// DescribeCacheSecurityGroups API operation for Amazon ElastiCache. +// DeleteUser API operation for Amazon ElastiCache. // -// Returns a list of cache security group descriptions. If a cache security -// group name is specified, the list contains only the description of that group. -// This applicable only when you have ElastiCache in Classic setup +// For Redis engine version 6.04 onwards: Deletes a user. The user will be removed +// from all user groups and in turn removed from all replication groups. For +// more information, see Using Role Based Access Control (RBAC) (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeCacheSecurityGroups for usage and error information. +// API operation DeleteUser for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" -// The requested cache security group name does not refer to an existing cache -// security group. +// * ErrCodeInvalidUserStateFault "InvalidUserState" +// The user is not in active state. +// +// * ErrCodeUserNotFoundFault "UserNotFound" +// The user does not exist or could not be found. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. +// * ErrCodeDefaultUserAssociatedToUserGroupFault "DefaultUserAssociatedToUserGroup" // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroups -func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGroupsInput) (*DescribeCacheSecurityGroupsOutput, error) { - req, out := c.DescribeCacheSecurityGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteUser +func (c *ElastiCache) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) return out, req.Send() } -// DescribeCacheSecurityGroupsWithContext is the same as DescribeCacheSecurityGroups with the addition of +// DeleteUserWithContext is the same as DeleteUser with the addition of // the ability to pass a context and additional request options. // -// See DescribeCacheSecurityGroups for details on how to use this API operation. +// See DeleteUser for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheSecurityGroupsWithContext(ctx aws.Context, input *DescribeCacheSecurityGroupsInput, opts ...request.Option) (*DescribeCacheSecurityGroupsOutput, error) { - req, out := c.DescribeCacheSecurityGroupsRequest(input) +func (c *ElastiCache) DeleteUserWithContext(ctx aws.Context, input *DeleteUserInput, opts ...request.Option) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeCacheSecurityGroupsPages iterates over the pages of a DescribeCacheSecurityGroups operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opDeleteUserGroup = "DeleteUserGroup" + +// DeleteUserGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See DescribeCacheSecurityGroups method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See DeleteUserGroup for more information on using the DeleteUserGroup +// API call, and error handling. // -// // Example iterating over at most 3 pages of a DescribeCacheSecurityGroups operation. -// pageNum := 0 -// err := client.DescribeCacheSecurityGroupsPages(params, -// func(page *elasticache.DescribeCacheSecurityGroupsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(*DescribeCacheSecurityGroupsOutput, bool) bool) error { - return c.DescribeCacheSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +// +// // Example sending a request using the DeleteUserGroupRequest method. +// req, resp := client.DeleteUserGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteUserGroup +func (c *ElastiCache) DeleteUserGroupRequest(input *DeleteUserGroupInput) (req *request.Request, output *DeleteUserGroupOutput) { + op := &request.Operation{ + Name: opDeleteUserGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserGroupInput{} + } + + output = &DeleteUserGroupOutput{} + req = c.newRequest(op, input, output) + return } -// DescribeCacheSecurityGroupsPagesWithContext same as DescribeCacheSecurityGroupsPages except -// it takes a Context and allows setting request options on the pages. +// DeleteUserGroup API operation for Amazon ElastiCache. +// +// For Redis engine version 6.04 onwards: Deletes a ser group. The user group +// must first be disassociated from the replcation group before it can be deleted. +// For more information, see Using Role Based Access Control (RBAC) (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.RBAC.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation DeleteUserGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUserGroupNotFoundFault "UserGroupNotFound" +// The user group was not found or does not exist +// +// * ErrCodeInvalidUserGroupStateFault "InvalidUserGroupState" +// The user group is not in an active state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteUserGroup +func (c *ElastiCache) DeleteUserGroup(input *DeleteUserGroupInput) (*DeleteUserGroupOutput, error) { + req, out := c.DeleteUserGroupRequest(input) + return out, req.Send() +} + +// DeleteUserGroupWithContext is the same as DeleteUserGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUserGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheSecurityGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheSecurityGroupsInput, fn func(*DescribeCacheSecurityGroupsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeCacheSecurityGroupsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeCacheSecurityGroupsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeCacheSecurityGroupsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() +func (c *ElastiCache) DeleteUserGroupWithContext(ctx aws.Context, input *DeleteUserGroupInput, opts ...request.Option) (*DeleteUserGroupOutput, error) { + req, out := c.DeleteUserGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups" +const opDescribeCacheClusters = "DescribeCacheClusters" -// DescribeCacheSubnetGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCacheSubnetGroups operation. The "output" return +// DescribeCacheClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheClusters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCacheSubnetGroups for more information on using the DescribeCacheSubnetGroups +// See DescribeCacheClusters for more information on using the DescribeCacheClusters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCacheSubnetGroupsRequest method. -// req, resp := client.DescribeCacheSubnetGroupsRequest(params) +// // Example sending a request using the DescribeCacheClustersRequest method. +// req, resp := client.DescribeCacheClustersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroups -func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClusters +func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) { op := &request.Operation{ - Name: opDescribeCacheSubnetGroups, + Name: opDescribeCacheClusters, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -2802,92 +2739,115 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubne } if input == nil { - input = &DescribeCacheSubnetGroupsInput{} + input = &DescribeCacheClustersInput{} } - output = &DescribeCacheSubnetGroupsOutput{} + output = &DescribeCacheClustersOutput{} req = c.newRequest(op, input, output) return } -// DescribeCacheSubnetGroups API operation for Amazon ElastiCache. +// DescribeCacheClusters API operation for Amazon ElastiCache. // -// Returns a list of cache subnet group descriptions. If a subnet group name -// is specified, the list contains only the description of that group. This -// is applicable only when you have ElastiCache in VPC setup. All ElastiCache -// clusters now launch in VPC by default. +// Returns information about all provisioned clusters if no cluster identifier +// is specified, or about a specific cache cluster if a cluster identifier is +// supplied. +// +// By default, abbreviated information about the clusters is returned. You can +// use the optional ShowCacheNodeInfo flag to retrieve detailed information +// about the cache nodes associated with the clusters. These details include +// the DNS address and port for the cache node endpoint. +// +// If the cluster is in the creating state, only cluster-level information is +// displayed until all of the nodes are successfully provisioned. +// +// If the cluster is in the deleting state, only cluster-level information is +// displayed. +// +// If cache nodes are currently being added to the cluster, node endpoint information +// and creation time for the additional nodes are not displayed until they are +// completely provisioned. When the cluster state is available, the cluster +// is ready for use. +// +// If cache nodes are currently being removed from the cluster, no endpoint +// information for the removed nodes is displayed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeCacheSubnetGroups for usage and error information. +// API operation DescribeCacheClusters for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault" -// The requested cache subnet group name does not refer to an existing cache -// subnet group. +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroups -func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroupsInput) (*DescribeCacheSubnetGroupsOutput, error) { - req, out := c.DescribeCacheSubnetGroupsRequest(input) +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClusters +func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) (*DescribeCacheClustersOutput, error) { + req, out := c.DescribeCacheClustersRequest(input) return out, req.Send() } -// DescribeCacheSubnetGroupsWithContext is the same as DescribeCacheSubnetGroups with the addition of +// DescribeCacheClustersWithContext is the same as DescribeCacheClusters with the addition of // the ability to pass a context and additional request options. // -// See DescribeCacheSubnetGroups for details on how to use this API operation. +// See DescribeCacheClusters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheSubnetGroupsWithContext(ctx aws.Context, input *DescribeCacheSubnetGroupsInput, opts ...request.Option) (*DescribeCacheSubnetGroupsOutput, error) { - req, out := c.DescribeCacheSubnetGroupsRequest(input) +func (c *ElastiCache) DescribeCacheClustersWithContext(ctx aws.Context, input *DescribeCacheClustersInput, opts ...request.Option) (*DescribeCacheClustersOutput, error) { + req, out := c.DescribeCacheClustersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeCacheSubnetGroupsPages iterates over the pages of a DescribeCacheSubnetGroups operation, +// DescribeCacheClustersPages iterates over the pages of a DescribeCacheClusters operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeCacheSubnetGroups method for more information on how to use this operation. +// See DescribeCacheClusters method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeCacheSubnetGroups operation. +// // Example iterating over at most 3 pages of a DescribeCacheClusters operation. // pageNum := 0 -// err := client.DescribeCacheSubnetGroupsPages(params, -// func(page *elasticache.DescribeCacheSubnetGroupsOutput, lastPage bool) bool { +// err := client.DescribeCacheClustersPages(params, +// func(page *elasticache.DescribeCacheClustersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(*DescribeCacheSubnetGroupsOutput, bool) bool) error { - return c.DescribeCacheSubnetGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(*DescribeCacheClustersOutput, bool) bool) error { + return c.DescribeCacheClustersPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeCacheSubnetGroupsPagesWithContext same as DescribeCacheSubnetGroupsPages except +// DescribeCacheClustersPagesWithContext same as DescribeCacheClustersPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeCacheSubnetGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheSubnetGroupsInput, fn func(*DescribeCacheSubnetGroupsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeCacheClustersPagesWithContext(ctx aws.Context, input *DescribeCacheClustersInput, fn func(*DescribeCacheClustersOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeCacheSubnetGroupsInput + var inCpy *DescribeCacheClustersInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeCacheSubnetGroupsRequest(inCpy) + req, _ := c.DescribeCacheClustersRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -2895,7 +2855,7 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsPagesWithContext(ctx aws.Context, } for p.Next() { - if !fn(p.Page().(*DescribeCacheSubnetGroupsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeCacheClustersOutput), !p.HasNextPage()) { break } } @@ -2903,132 +2863,123 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsPagesWithContext(ctx aws.Context, return p.Err() } -const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" +const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions" -// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEngineDefaultParameters operation. The "output" return +// DescribeCacheEngineVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheEngineVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeEngineDefaultParameters for more information on using the DescribeEngineDefaultParameters +// See DescribeCacheEngineVersions for more information on using the DescribeCacheEngineVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeEngineDefaultParametersRequest method. -// req, resp := client.DescribeEngineDefaultParametersRequest(params) +// // Example sending a request using the DescribeCacheEngineVersionsRequest method. +// req, resp := client.DescribeCacheEngineVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParameters -func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersions +func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) { op := &request.Operation{ - Name: opDescribeEngineDefaultParameters, + Name: opDescribeCacheEngineVersions, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, - OutputTokens: []string{"EngineDefaults.Marker"}, + OutputTokens: []string{"Marker"}, LimitToken: "MaxRecords", TruncationToken: "", }, } if input == nil { - input = &DescribeEngineDefaultParametersInput{} + input = &DescribeCacheEngineVersionsInput{} } - output = &DescribeEngineDefaultParametersOutput{} + output = &DescribeCacheEngineVersionsOutput{} req = c.newRequest(op, input, output) return } -// DescribeEngineDefaultParameters API operation for Amazon ElastiCache. +// DescribeCacheEngineVersions API operation for Amazon ElastiCache. // -// Returns the default engine and system parameter information for the specified -// cache engine. +// Returns a list of the available cache engines and their versions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeEngineDefaultParameters for usage and error information. -// -// Returned Error Codes: -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParameters -func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { - req, out := c.DescribeEngineDefaultParametersRequest(input) +// API operation DescribeCacheEngineVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersions +func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVersionsInput) (*DescribeCacheEngineVersionsOutput, error) { + req, out := c.DescribeCacheEngineVersionsRequest(input) return out, req.Send() } -// DescribeEngineDefaultParametersWithContext is the same as DescribeEngineDefaultParameters with the addition of +// DescribeCacheEngineVersionsWithContext is the same as DescribeCacheEngineVersions with the addition of // the ability to pass a context and additional request options. // -// See DescribeEngineDefaultParameters for details on how to use this API operation. +// See DescribeCacheEngineVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeEngineDefaultParametersWithContext(ctx aws.Context, input *DescribeEngineDefaultParametersInput, opts ...request.Option) (*DescribeEngineDefaultParametersOutput, error) { - req, out := c.DescribeEngineDefaultParametersRequest(input) +func (c *ElastiCache) DescribeCacheEngineVersionsWithContext(ctx aws.Context, input *DescribeCacheEngineVersionsInput, opts ...request.Option) (*DescribeCacheEngineVersionsOutput, error) { + req, out := c.DescribeCacheEngineVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation, +// DescribeCacheEngineVersionsPages iterates over the pages of a DescribeCacheEngineVersions operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeEngineDefaultParameters method for more information on how to use this operation. +// See DescribeCacheEngineVersions method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. +// // Example iterating over at most 3 pages of a DescribeCacheEngineVersions operation. // pageNum := 0 -// err := client.DescribeEngineDefaultParametersPages(params, -// func(page *elasticache.DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// err := client.DescribeCacheEngineVersionsPages(params, +// func(page *elasticache.DescribeCacheEngineVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(*DescribeEngineDefaultParametersOutput, bool) bool) error { - return c.DescribeEngineDefaultParametersPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(*DescribeCacheEngineVersionsOutput, bool) bool) error { + return c.DescribeCacheEngineVersionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeEngineDefaultParametersPagesWithContext same as DescribeEngineDefaultParametersPages except +// DescribeCacheEngineVersionsPagesWithContext same as DescribeCacheEngineVersionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Context, input *DescribeEngineDefaultParametersInput, fn func(*DescribeEngineDefaultParametersOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeCacheEngineVersionsPagesWithContext(ctx aws.Context, input *DescribeCacheEngineVersionsInput, fn func(*DescribeCacheEngineVersionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeEngineDefaultParametersInput + var inCpy *DescribeCacheEngineVersionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeEngineDefaultParametersRequest(inCpy) + req, _ := c.DescribeCacheEngineVersionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3036,7 +2987,7 @@ func (c *ElastiCache) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Co } for p.Next() { - if !fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeCacheEngineVersionsOutput), !p.HasNextPage()) { break } } @@ -3044,35 +2995,35 @@ func (c *ElastiCache) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Co return p.Err() } -const opDescribeEvents = "DescribeEvents" +const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups" -// DescribeEventsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEvents operation. The "output" return +// DescribeCacheParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheParameterGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeEvents for more information on using the DescribeEvents +// See DescribeCacheParameterGroups for more information on using the DescribeCacheParameterGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeEventsRequest method. -// req, resp := client.DescribeEventsRequest(params) +// // Example sending a request using the DescribeCacheParameterGroupsRequest method. +// req, resp := client.DescribeCacheParameterGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEvents -func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroups +func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) { op := &request.Operation{ - Name: opDescribeEvents, + Name: opDescribeCacheParameterGroups, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3084,96 +3035,97 @@ func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *re } if input == nil { - input = &DescribeEventsInput{} + input = &DescribeCacheParameterGroupsInput{} } - output = &DescribeEventsOutput{} + output = &DescribeCacheParameterGroupsOutput{} req = c.newRequest(op, input, output) return } -// DescribeEvents API operation for Amazon ElastiCache. -// -// Returns events related to clusters, cache security groups, and cache parameter -// groups. You can obtain events specific to a particular cluster, cache security -// group, or cache parameter group by providing the name as a parameter. +// DescribeCacheParameterGroups API operation for Amazon ElastiCache. // -// By default, only the events occurring within the last hour are returned; -// however, you can retrieve up to 14 days' worth of events if necessary. +// Returns a list of cache parameter group descriptions. If a cache parameter +// group name is specified, the list contains only the descriptions for that +// group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeEvents for usage and error information. +// API operation DescribeCacheParameterGroups for usage and error information. // // Returned Error Codes: +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEvents -func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { - req, out := c.DescribeEventsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroups +func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameterGroupsInput) (*DescribeCacheParameterGroupsOutput, error) { + req, out := c.DescribeCacheParameterGroupsRequest(input) return out, req.Send() } -// DescribeEventsWithContext is the same as DescribeEvents with the addition of +// DescribeCacheParameterGroupsWithContext is the same as DescribeCacheParameterGroups with the addition of // the ability to pass a context and additional request options. // -// See DescribeEvents for details on how to use this API operation. +// See DescribeCacheParameterGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeEventsWithContext(ctx aws.Context, input *DescribeEventsInput, opts ...request.Option) (*DescribeEventsOutput, error) { - req, out := c.DescribeEventsRequest(input) +func (c *ElastiCache) DescribeCacheParameterGroupsWithContext(ctx aws.Context, input *DescribeCacheParameterGroupsInput, opts ...request.Option) (*DescribeCacheParameterGroupsOutput, error) { + req, out := c.DescribeCacheParameterGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// DescribeCacheParameterGroupsPages iterates over the pages of a DescribeCacheParameterGroups operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeEvents method for more information on how to use this operation. +// See DescribeCacheParameterGroups method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeEvents operation. +// // Example iterating over at most 3 pages of a DescribeCacheParameterGroups operation. // pageNum := 0 -// err := client.DescribeEventsPages(params, -// func(page *elasticache.DescribeEventsOutput, lastPage bool) bool { +// err := client.DescribeCacheParameterGroupsPages(params, +// func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(*DescribeEventsOutput, bool) bool) error { - return c.DescribeEventsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(*DescribeCacheParameterGroupsOutput, bool) bool) error { + return c.DescribeCacheParameterGroupsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeEventsPagesWithContext same as DescribeEventsPages except +// DescribeCacheParameterGroupsPagesWithContext same as DescribeCacheParameterGroupsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeEventsPagesWithContext(ctx aws.Context, input *DescribeEventsInput, fn func(*DescribeEventsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeCacheParameterGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheParameterGroupsInput, fn func(*DescribeCacheParameterGroupsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeEventsInput + var inCpy *DescribeCacheParameterGroupsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeEventsRequest(inCpy) + req, _ := c.DescribeCacheParameterGroupsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3181,7 +3133,7 @@ func (c *ElastiCache) DescribeEventsPagesWithContext(ctx aws.Context, input *Des } for p.Next() { - if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeCacheParameterGroupsOutput), !p.HasNextPage()) { break } } @@ -3189,35 +3141,35 @@ func (c *ElastiCache) DescribeEventsPagesWithContext(ctx aws.Context, input *Des return p.Err() } -const opDescribeReplicationGroups = "DescribeReplicationGroups" +const opDescribeCacheParameters = "DescribeCacheParameters" -// DescribeReplicationGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReplicationGroups operation. The "output" return +// DescribeCacheParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheParameters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReplicationGroups for more information on using the DescribeReplicationGroups +// See DescribeCacheParameters for more information on using the DescribeCacheParameters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReplicationGroupsRequest method. -// req, resp := client.DescribeReplicationGroupsRequest(params) +// // Example sending a request using the DescribeCacheParametersRequest method. +// req, resp := client.DescribeCacheParametersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroups -func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameters +func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) { op := &request.Operation{ - Name: opDescribeReplicationGroups, + Name: opDescribeCacheParameters, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3229,32 +3181,29 @@ func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicatio } if input == nil { - input = &DescribeReplicationGroupsInput{} + input = &DescribeCacheParametersInput{} } - output = &DescribeReplicationGroupsOutput{} + output = &DescribeCacheParametersOutput{} req = c.newRequest(op, input, output) return } -// DescribeReplicationGroups API operation for Amazon ElastiCache. -// -// Returns information about a particular replication group. If no identifier -// is specified, DescribeReplicationGroups returns information about all replication -// groups. +// DescribeCacheParameters API operation for Amazon ElastiCache. // -// This operation is valid for Redis only. +// Returns the detailed parameter list for a particular cache parameter group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeReplicationGroups for usage and error information. +// API operation DescribeCacheParameters for usage and error information. // // Returned Error Codes: -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -3262,65 +3211,65 @@ func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicatio // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroups -func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroupsInput) (*DescribeReplicationGroupsOutput, error) { - req, out := c.DescribeReplicationGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameters +func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInput) (*DescribeCacheParametersOutput, error) { + req, out := c.DescribeCacheParametersRequest(input) return out, req.Send() } -// DescribeReplicationGroupsWithContext is the same as DescribeReplicationGroups with the addition of +// DescribeCacheParametersWithContext is the same as DescribeCacheParameters with the addition of // the ability to pass a context and additional request options. // -// See DescribeReplicationGroups for details on how to use this API operation. +// See DescribeCacheParameters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeReplicationGroupsWithContext(ctx aws.Context, input *DescribeReplicationGroupsInput, opts ...request.Option) (*DescribeReplicationGroupsOutput, error) { - req, out := c.DescribeReplicationGroupsRequest(input) +func (c *ElastiCache) DescribeCacheParametersWithContext(ctx aws.Context, input *DescribeCacheParametersInput, opts ...request.Option) (*DescribeCacheParametersOutput, error) { + req, out := c.DescribeCacheParametersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeReplicationGroupsPages iterates over the pages of a DescribeReplicationGroups operation, +// DescribeCacheParametersPages iterates over the pages of a DescribeCacheParameters operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeReplicationGroups method for more information on how to use this operation. +// See DescribeCacheParameters method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeReplicationGroups operation. +// // Example iterating over at most 3 pages of a DescribeCacheParameters operation. // pageNum := 0 -// err := client.DescribeReplicationGroupsPages(params, -// func(page *elasticache.DescribeReplicationGroupsOutput, lastPage bool) bool { +// err := client.DescribeCacheParametersPages(params, +// func(page *elasticache.DescribeCacheParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(*DescribeReplicationGroupsOutput, bool) bool) error { - return c.DescribeReplicationGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(*DescribeCacheParametersOutput, bool) bool) error { + return c.DescribeCacheParametersPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeReplicationGroupsPagesWithContext same as DescribeReplicationGroupsPages except +// DescribeCacheParametersPagesWithContext same as DescribeCacheParametersPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeReplicationGroupsPagesWithContext(ctx aws.Context, input *DescribeReplicationGroupsInput, fn func(*DescribeReplicationGroupsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeCacheParametersPagesWithContext(ctx aws.Context, input *DescribeCacheParametersInput, fn func(*DescribeCacheParametersOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeReplicationGroupsInput + var inCpy *DescribeCacheParametersInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeReplicationGroupsRequest(inCpy) + req, _ := c.DescribeCacheParametersRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3328,7 +3277,7 @@ func (c *ElastiCache) DescribeReplicationGroupsPagesWithContext(ctx aws.Context, } for p.Next() { - if !fn(p.Page().(*DescribeReplicationGroupsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeCacheParametersOutput), !p.HasNextPage()) { break } } @@ -3336,35 +3285,35 @@ func (c *ElastiCache) DescribeReplicationGroupsPagesWithContext(ctx aws.Context, return p.Err() } -const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes" +const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups" -// DescribeReservedCacheNodesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReservedCacheNodes operation. The "output" return +// DescribeCacheSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheSecurityGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReservedCacheNodes for more information on using the DescribeReservedCacheNodes +// See DescribeCacheSecurityGroups for more information on using the DescribeCacheSecurityGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReservedCacheNodesRequest method. -// req, resp := client.DescribeReservedCacheNodesRequest(params) +// // Example sending a request using the DescribeCacheSecurityGroupsRequest method. +// req, resp := client.DescribeCacheSecurityGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodes -func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroups +func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) { op := &request.Operation{ - Name: opDescribeReservedCacheNodes, + Name: opDescribeCacheSecurityGroups, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3376,29 +3325,31 @@ func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedC } if input == nil { - input = &DescribeReservedCacheNodesInput{} + input = &DescribeCacheSecurityGroupsInput{} } - output = &DescribeReservedCacheNodesOutput{} + output = &DescribeCacheSecurityGroupsOutput{} req = c.newRequest(op, input, output) return } -// DescribeReservedCacheNodes API operation for Amazon ElastiCache. +// DescribeCacheSecurityGroups API operation for Amazon ElastiCache. // -// Returns information about reserved cache nodes for this account, or about -// a specified reserved cache node. +// Returns a list of cache security group descriptions. If a cache security +// group name is specified, the list contains only the description of that group. +// This applicable only when you have ElastiCache in Classic setup // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeReservedCacheNodes for usage and error information. +// API operation DescribeCacheSecurityGroups for usage and error information. // // Returned Error Codes: -// * ErrCodeReservedCacheNodeNotFoundFault "ReservedCacheNodeNotFound" -// The requested reserved cache node was not found. +// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" +// The requested cache security group name does not refer to an existing cache +// security group. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -3406,65 +3357,65 @@ func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedC // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodes -func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNodesInput) (*DescribeReservedCacheNodesOutput, error) { - req, out := c.DescribeReservedCacheNodesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroups +func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGroupsInput) (*DescribeCacheSecurityGroupsOutput, error) { + req, out := c.DescribeCacheSecurityGroupsRequest(input) return out, req.Send() } -// DescribeReservedCacheNodesWithContext is the same as DescribeReservedCacheNodes with the addition of +// DescribeCacheSecurityGroupsWithContext is the same as DescribeCacheSecurityGroups with the addition of // the ability to pass a context and additional request options. // -// See DescribeReservedCacheNodes for details on how to use this API operation. +// See DescribeCacheSecurityGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeReservedCacheNodesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesInput, opts ...request.Option) (*DescribeReservedCacheNodesOutput, error) { - req, out := c.DescribeReservedCacheNodesRequest(input) +func (c *ElastiCache) DescribeCacheSecurityGroupsWithContext(ctx aws.Context, input *DescribeCacheSecurityGroupsInput, opts ...request.Option) (*DescribeCacheSecurityGroupsOutput, error) { + req, out := c.DescribeCacheSecurityGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeReservedCacheNodesPages iterates over the pages of a DescribeReservedCacheNodes operation, +// DescribeCacheSecurityGroupsPages iterates over the pages of a DescribeCacheSecurityGroups operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeReservedCacheNodes method for more information on how to use this operation. +// See DescribeCacheSecurityGroups method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeReservedCacheNodes operation. +// // Example iterating over at most 3 pages of a DescribeCacheSecurityGroups operation. // pageNum := 0 -// err := client.DescribeReservedCacheNodesPages(params, -// func(page *elasticache.DescribeReservedCacheNodesOutput, lastPage bool) bool { +// err := client.DescribeCacheSecurityGroupsPages(params, +// func(page *elasticache.DescribeCacheSecurityGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(*DescribeReservedCacheNodesOutput, bool) bool) error { - return c.DescribeReservedCacheNodesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(*DescribeCacheSecurityGroupsOutput, bool) bool) error { + return c.DescribeCacheSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeReservedCacheNodesPagesWithContext same as DescribeReservedCacheNodesPages except +// DescribeCacheSecurityGroupsPagesWithContext same as DescribeCacheSecurityGroupsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeReservedCacheNodesPagesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesInput, fn func(*DescribeReservedCacheNodesOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeCacheSecurityGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheSecurityGroupsInput, fn func(*DescribeCacheSecurityGroupsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeReservedCacheNodesInput + var inCpy *DescribeCacheSecurityGroupsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeReservedCacheNodesRequest(inCpy) + req, _ := c.DescribeCacheSecurityGroupsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3472,7 +3423,7 @@ func (c *ElastiCache) DescribeReservedCacheNodesPagesWithContext(ctx aws.Context } for p.Next() { - if !fn(p.Page().(*DescribeReservedCacheNodesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeCacheSecurityGroupsOutput), !p.HasNextPage()) { break } } @@ -3480,35 +3431,35 @@ func (c *ElastiCache) DescribeReservedCacheNodesPagesWithContext(ctx aws.Context return p.Err() } -const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings" +const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups" -// DescribeReservedCacheNodesOfferingsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReservedCacheNodesOfferings operation. The "output" return +// DescribeCacheSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheSubnetGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReservedCacheNodesOfferings for more information on using the DescribeReservedCacheNodesOfferings +// See DescribeCacheSubnetGroups for more information on using the DescribeCacheSubnetGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReservedCacheNodesOfferingsRequest method. -// req, resp := client.DescribeReservedCacheNodesOfferingsRequest(params) +// // Example sending a request using the DescribeCacheSubnetGroupsRequest method. +// req, resp := client.DescribeCacheSubnetGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferings -func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroups +func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) { op := &request.Operation{ - Name: opDescribeReservedCacheNodesOfferings, + Name: opDescribeCacheSubnetGroups, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3520,94 +3471,92 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *Describe } if input == nil { - input = &DescribeReservedCacheNodesOfferingsInput{} + input = &DescribeCacheSubnetGroupsInput{} } - output = &DescribeReservedCacheNodesOfferingsOutput{} + output = &DescribeCacheSubnetGroupsOutput{} req = c.newRequest(op, input, output) return } -// DescribeReservedCacheNodesOfferings API operation for Amazon ElastiCache. +// DescribeCacheSubnetGroups API operation for Amazon ElastiCache. // -// Lists available reserved cache node offerings. +// Returns a list of cache subnet group descriptions. If a subnet group name +// is specified, the list contains only the description of that group. This +// is applicable only when you have ElastiCache in VPC setup. All ElastiCache +// clusters now launch in VPC by default. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeReservedCacheNodesOfferings for usage and error information. +// API operation DescribeCacheSubnetGroups for usage and error information. // // Returned Error Codes: -// * ErrCodeReservedCacheNodesOfferingNotFoundFault "ReservedCacheNodesOfferingNotFound" -// The requested cache node offering does not exist. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. +// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault" +// The requested cache subnet group name does not refer to an existing cache +// subnet group. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferings -func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReservedCacheNodesOfferingsInput) (*DescribeReservedCacheNodesOfferingsOutput, error) { - req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroups +func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroupsInput) (*DescribeCacheSubnetGroupsOutput, error) { + req, out := c.DescribeCacheSubnetGroupsRequest(input) return out, req.Send() } -// DescribeReservedCacheNodesOfferingsWithContext is the same as DescribeReservedCacheNodesOfferings with the addition of +// DescribeCacheSubnetGroupsWithContext is the same as DescribeCacheSubnetGroups with the addition of // the ability to pass a context and additional request options. // -// See DescribeReservedCacheNodesOfferings for details on how to use this API operation. +// See DescribeCacheSubnetGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeReservedCacheNodesOfferingsWithContext(ctx aws.Context, input *DescribeReservedCacheNodesOfferingsInput, opts ...request.Option) (*DescribeReservedCacheNodesOfferingsOutput, error) { - req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) +func (c *ElastiCache) DescribeCacheSubnetGroupsWithContext(ctx aws.Context, input *DescribeCacheSubnetGroupsInput, opts ...request.Option) (*DescribeCacheSubnetGroupsOutput, error) { + req, out := c.DescribeCacheSubnetGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeReservedCacheNodesOfferingsPages iterates over the pages of a DescribeReservedCacheNodesOfferings operation, +// DescribeCacheSubnetGroupsPages iterates over the pages of a DescribeCacheSubnetGroups operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeReservedCacheNodesOfferings method for more information on how to use this operation. +// See DescribeCacheSubnetGroups method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeReservedCacheNodesOfferings operation. +// // Example iterating over at most 3 pages of a DescribeCacheSubnetGroups operation. // pageNum := 0 -// err := client.DescribeReservedCacheNodesOfferingsPages(params, -// func(page *elasticache.DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool { +// err := client.DescribeCacheSubnetGroupsPages(params, +// func(page *elasticache.DescribeCacheSubnetGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(*DescribeReservedCacheNodesOfferingsOutput, bool) bool) error { - return c.DescribeReservedCacheNodesOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(*DescribeCacheSubnetGroupsOutput, bool) bool) error { + return c.DescribeCacheSubnetGroupsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeReservedCacheNodesOfferingsPagesWithContext same as DescribeReservedCacheNodesOfferingsPages except +// DescribeCacheSubnetGroupsPagesWithContext same as DescribeCacheSubnetGroupsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesOfferingsInput, fn func(*DescribeReservedCacheNodesOfferingsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeCacheSubnetGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheSubnetGroupsInput, fn func(*DescribeCacheSubnetGroupsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeReservedCacheNodesOfferingsInput + var inCpy *DescribeCacheSubnetGroupsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeReservedCacheNodesOfferingsRequest(inCpy) + req, _ := c.DescribeCacheSubnetGroupsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3615,7 +3564,7 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPagesWithContext(ctx aw } for p.Next() { - if !fn(p.Page().(*DescribeReservedCacheNodesOfferingsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeCacheSubnetGroupsOutput), !p.HasNextPage()) { break } } @@ -3623,134 +3572,132 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPagesWithContext(ctx aw return p.Err() } -const opDescribeServiceUpdates = "DescribeServiceUpdates" +const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" -// DescribeServiceUpdatesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeServiceUpdates operation. The "output" return +// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultParameters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeServiceUpdates for more information on using the DescribeServiceUpdates +// See DescribeEngineDefaultParameters for more information on using the DescribeEngineDefaultParameters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeServiceUpdatesRequest method. -// req, resp := client.DescribeServiceUpdatesRequest(params) +// // Example sending a request using the DescribeEngineDefaultParametersRequest method. +// req, resp := client.DescribeEngineDefaultParametersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeServiceUpdates -func (c *ElastiCache) DescribeServiceUpdatesRequest(input *DescribeServiceUpdatesInput) (req *request.Request, output *DescribeServiceUpdatesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParameters +func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { op := &request.Operation{ - Name: opDescribeServiceUpdates, + Name: opDescribeEngineDefaultParameters, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, - OutputTokens: []string{"Marker"}, + OutputTokens: []string{"EngineDefaults.Marker"}, LimitToken: "MaxRecords", TruncationToken: "", }, } if input == nil { - input = &DescribeServiceUpdatesInput{} + input = &DescribeEngineDefaultParametersInput{} } - output = &DescribeServiceUpdatesOutput{} + output = &DescribeEngineDefaultParametersOutput{} req = c.newRequest(op, input, output) return } -// DescribeServiceUpdates API operation for Amazon ElastiCache. +// DescribeEngineDefaultParameters API operation for Amazon ElastiCache. // -// Returns details of the service updates +// Returns the default engine and system parameter information for the specified +// cache engine. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeServiceUpdates for usage and error information. +// API operation DescribeEngineDefaultParameters for usage and error information. // // Returned Error Codes: -// * ErrCodeServiceUpdateNotFoundFault "ServiceUpdateNotFoundFault" -// The service update doesn't exist -// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeServiceUpdates -func (c *ElastiCache) DescribeServiceUpdates(input *DescribeServiceUpdatesInput) (*DescribeServiceUpdatesOutput, error) { - req, out := c.DescribeServiceUpdatesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParameters +func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) return out, req.Send() } -// DescribeServiceUpdatesWithContext is the same as DescribeServiceUpdates with the addition of +// DescribeEngineDefaultParametersWithContext is the same as DescribeEngineDefaultParameters with the addition of // the ability to pass a context and additional request options. // -// See DescribeServiceUpdates for details on how to use this API operation. +// See DescribeEngineDefaultParameters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeServiceUpdatesWithContext(ctx aws.Context, input *DescribeServiceUpdatesInput, opts ...request.Option) (*DescribeServiceUpdatesOutput, error) { - req, out := c.DescribeServiceUpdatesRequest(input) +func (c *ElastiCache) DescribeEngineDefaultParametersWithContext(ctx aws.Context, input *DescribeEngineDefaultParametersInput, opts ...request.Option) (*DescribeEngineDefaultParametersOutput, error) { + req, out := c.DescribeEngineDefaultParametersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeServiceUpdatesPages iterates over the pages of a DescribeServiceUpdates operation, +// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeServiceUpdates method for more information on how to use this operation. +// See DescribeEngineDefaultParameters method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeServiceUpdates operation. +// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. // pageNum := 0 -// err := client.DescribeServiceUpdatesPages(params, -// func(page *elasticache.DescribeServiceUpdatesOutput, lastPage bool) bool { +// err := client.DescribeEngineDefaultParametersPages(params, +// func(page *elasticache.DescribeEngineDefaultParametersOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeServiceUpdatesPages(input *DescribeServiceUpdatesInput, fn func(*DescribeServiceUpdatesOutput, bool) bool) error { - return c.DescribeServiceUpdatesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(*DescribeEngineDefaultParametersOutput, bool) bool) error { + return c.DescribeEngineDefaultParametersPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeServiceUpdatesPagesWithContext same as DescribeServiceUpdatesPages except +// DescribeEngineDefaultParametersPagesWithContext same as DescribeEngineDefaultParametersPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeServiceUpdatesPagesWithContext(ctx aws.Context, input *DescribeServiceUpdatesInput, fn func(*DescribeServiceUpdatesOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Context, input *DescribeEngineDefaultParametersInput, fn func(*DescribeEngineDefaultParametersOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeServiceUpdatesInput + var inCpy *DescribeEngineDefaultParametersInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeServiceUpdatesRequest(inCpy) + req, _ := c.DescribeEngineDefaultParametersRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3758,7 +3705,7 @@ func (c *ElastiCache) DescribeServiceUpdatesPagesWithContext(ctx aws.Context, in } for p.Next() { - if !fn(p.Page().(*DescribeServiceUpdatesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage()) { break } } @@ -3766,35 +3713,35 @@ func (c *ElastiCache) DescribeServiceUpdatesPagesWithContext(ctx aws.Context, in return p.Err() } -const opDescribeSnapshots = "DescribeSnapshots" +const opDescribeEvents = "DescribeEvents" -// DescribeSnapshotsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeSnapshots operation. The "output" return +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeSnapshots for more information on using the DescribeSnapshots +// See DescribeEvents for more information on using the DescribeEvents // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeSnapshotsRequest method. -// req, resp := client.DescribeSnapshotsRequest(params) +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots -func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEvents +func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { op := &request.Operation{ - Name: opDescribeSnapshots, + Name: opDescribeEvents, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3806,102 +3753,96 @@ func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (r } if input == nil { - input = &DescribeSnapshotsInput{} + input = &DescribeEventsInput{} } - output = &DescribeSnapshotsOutput{} + output = &DescribeEventsOutput{} req = c.newRequest(op, input, output) return } -// DescribeSnapshots API operation for Amazon ElastiCache. +// DescribeEvents API operation for Amazon ElastiCache. // -// Returns information about cluster or replication group snapshots. By default, -// DescribeSnapshots lists all of your snapshots; it can optionally describe -// a single snapshot, or just the snapshots associated with a particular cache -// cluster. +// Returns events related to clusters, cache security groups, and cache parameter +// groups. You can obtain events specific to a particular cluster, cache security +// group, or cache parameter group by providing the name as a parameter. // -// This operation is valid for Redis only. +// By default, only the events occurring within the last hour are returned; +// however, you can retrieve up to 14 days' worth of events if necessary. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeSnapshots for usage and error information. +// API operation DescribeEvents for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" -// The requested snapshot name does not refer to an existing snapshot. -// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots -func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { - req, out := c.DescribeSnapshotsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEvents +func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) return out, req.Send() } -// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of +// DescribeEventsWithContext is the same as DescribeEvents with the addition of // the ability to pass a context and additional request options. // -// See DescribeSnapshots for details on how to use this API operation. +// See DescribeEvents for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) { - req, out := c.DescribeSnapshotsRequest(input) +func (c *ElastiCache) DescribeEventsWithContext(ctx aws.Context, input *DescribeEventsInput, opts ...request.Option) (*DescribeEventsOutput, error) { + req, out := c.DescribeEventsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeSnapshots method for more information on how to use this operation. +// See DescribeEvents method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// // Example iterating over at most 3 pages of a DescribeEvents operation. // pageNum := 0 -// err := client.DescribeSnapshotsPages(params, -// func(page *elasticache.DescribeSnapshotsOutput, lastPage bool) bool { +// err := client.DescribeEventsPages(params, +// func(page *elasticache.DescribeEventsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error { - return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(*DescribeEventsOutput, bool) bool) error { + return c.DescribeEventsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except +// DescribeEventsPagesWithContext same as DescribeEventsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeEventsPagesWithContext(ctx aws.Context, input *DescribeEventsInput, fn func(*DescribeEventsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeSnapshotsInput + var inCpy *DescribeEventsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeSnapshotsRequest(inCpy) + req, _ := c.DescribeEventsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3909,7 +3850,7 @@ func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input * } for p.Next() { - if !fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage()) { break } } @@ -3917,35 +3858,35 @@ func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input * return p.Err() } -const opDescribeUpdateActions = "DescribeUpdateActions" +const opDescribeGlobalReplicationGroups = "DescribeGlobalReplicationGroups" -// DescribeUpdateActionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeUpdateActions operation. The "output" return +// DescribeGlobalReplicationGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalReplicationGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeUpdateActions for more information on using the DescribeUpdateActions +// See DescribeGlobalReplicationGroups for more information on using the DescribeGlobalReplicationGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeUpdateActionsRequest method. -// req, resp := client.DescribeUpdateActionsRequest(params) +// // Example sending a request using the DescribeGlobalReplicationGroupsRequest method. +// req, resp := client.DescribeGlobalReplicationGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUpdateActions -func (c *ElastiCache) DescribeUpdateActionsRequest(input *DescribeUpdateActionsInput) (req *request.Request, output *DescribeUpdateActionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeGlobalReplicationGroups +func (c *ElastiCache) DescribeGlobalReplicationGroupsRequest(input *DescribeGlobalReplicationGroupsInput) (req *request.Request, output *DescribeGlobalReplicationGroupsOutput) { op := &request.Operation{ - Name: opDescribeUpdateActions, + Name: opDescribeGlobalReplicationGroups, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -3957,91 +3898,95 @@ func (c *ElastiCache) DescribeUpdateActionsRequest(input *DescribeUpdateActionsI } if input == nil { - input = &DescribeUpdateActionsInput{} + input = &DescribeGlobalReplicationGroupsInput{} } - output = &DescribeUpdateActionsOutput{} + output = &DescribeGlobalReplicationGroupsOutput{} req = c.newRequest(op, input, output) return } -// DescribeUpdateActions API operation for Amazon ElastiCache. +// DescribeGlobalReplicationGroups API operation for Amazon ElastiCache. // -// Returns details of the update actions +// Returns information about a particular global replication group. If no identifier +// is specified, returns information about all Global Datastores. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation DescribeUpdateActions for usage and error information. +// API operation DescribeGlobalReplicationGroups for usage and error information. // // Returned Error Codes: +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist +// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUpdateActions -func (c *ElastiCache) DescribeUpdateActions(input *DescribeUpdateActionsInput) (*DescribeUpdateActionsOutput, error) { - req, out := c.DescribeUpdateActionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeGlobalReplicationGroups +func (c *ElastiCache) DescribeGlobalReplicationGroups(input *DescribeGlobalReplicationGroupsInput) (*DescribeGlobalReplicationGroupsOutput, error) { + req, out := c.DescribeGlobalReplicationGroupsRequest(input) return out, req.Send() } -// DescribeUpdateActionsWithContext is the same as DescribeUpdateActions with the addition of +// DescribeGlobalReplicationGroupsWithContext is the same as DescribeGlobalReplicationGroups with the addition of // the ability to pass a context and additional request options. // -// See DescribeUpdateActions for details on how to use this API operation. +// See DescribeGlobalReplicationGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeUpdateActionsWithContext(ctx aws.Context, input *DescribeUpdateActionsInput, opts ...request.Option) (*DescribeUpdateActionsOutput, error) { - req, out := c.DescribeUpdateActionsRequest(input) +func (c *ElastiCache) DescribeGlobalReplicationGroupsWithContext(ctx aws.Context, input *DescribeGlobalReplicationGroupsInput, opts ...request.Option) (*DescribeGlobalReplicationGroupsOutput, error) { + req, out := c.DescribeGlobalReplicationGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeUpdateActionsPages iterates over the pages of a DescribeUpdateActions operation, +// DescribeGlobalReplicationGroupsPages iterates over the pages of a DescribeGlobalReplicationGroups operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See DescribeUpdateActions method for more information on how to use this operation. +// See DescribeGlobalReplicationGroups method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a DescribeUpdateActions operation. +// // Example iterating over at most 3 pages of a DescribeGlobalReplicationGroups operation. // pageNum := 0 -// err := client.DescribeUpdateActionsPages(params, -// func(page *elasticache.DescribeUpdateActionsOutput, lastPage bool) bool { +// err := client.DescribeGlobalReplicationGroupsPages(params, +// func(page *elasticache.DescribeGlobalReplicationGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElastiCache) DescribeUpdateActionsPages(input *DescribeUpdateActionsInput, fn func(*DescribeUpdateActionsOutput, bool) bool) error { - return c.DescribeUpdateActionsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElastiCache) DescribeGlobalReplicationGroupsPages(input *DescribeGlobalReplicationGroupsInput, fn func(*DescribeGlobalReplicationGroupsOutput, bool) bool) error { + return c.DescribeGlobalReplicationGroupsPagesWithContext(aws.BackgroundContext(), input, fn) } -// DescribeUpdateActionsPagesWithContext same as DescribeUpdateActionsPages except +// DescribeGlobalReplicationGroupsPagesWithContext same as DescribeGlobalReplicationGroupsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) DescribeUpdateActionsPagesWithContext(ctx aws.Context, input *DescribeUpdateActionsInput, fn func(*DescribeUpdateActionsOutput, bool) bool, opts ...request.Option) error { +func (c *ElastiCache) DescribeGlobalReplicationGroupsPagesWithContext(ctx aws.Context, input *DescribeGlobalReplicationGroupsInput, fn func(*DescribeGlobalReplicationGroupsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *DescribeUpdateActionsInput + var inCpy *DescribeGlobalReplicationGroupsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.DescribeUpdateActionsRequest(inCpy) + req, _ := c.DescribeGlobalReplicationGroupsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -4049,7 +3994,7 @@ func (c *ElastiCache) DescribeUpdateActionsPagesWithContext(ctx aws.Context, inp } for p.Next() { - if !fn(p.Page().(*DescribeUpdateActionsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeGlobalReplicationGroupsOutput), !p.HasNextPage()) { break } } @@ -4057,402 +4002,359 @@ func (c *ElastiCache) DescribeUpdateActionsPagesWithContext(ctx aws.Context, inp return p.Err() } -const opIncreaseReplicaCount = "IncreaseReplicaCount" +const opDescribeReplicationGroups = "DescribeReplicationGroups" -// IncreaseReplicaCountRequest generates a "aws/request.Request" representing the -// client's request for the IncreaseReplicaCount operation. The "output" return +// DescribeReplicationGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See IncreaseReplicaCount for more information on using the IncreaseReplicaCount +// See DescribeReplicationGroups for more information on using the DescribeReplicationGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the IncreaseReplicaCountRequest method. -// req, resp := client.IncreaseReplicaCountRequest(params) +// // Example sending a request using the DescribeReplicationGroupsRequest method. +// req, resp := client.DescribeReplicationGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/IncreaseReplicaCount -func (c *ElastiCache) IncreaseReplicaCountRequest(input *IncreaseReplicaCountInput) (req *request.Request, output *IncreaseReplicaCountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroups +func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) { op := &request.Operation{ - Name: opIncreaseReplicaCount, + Name: opDescribeReplicationGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &IncreaseReplicaCountInput{} + input = &DescribeReplicationGroupsInput{} } - output = &IncreaseReplicaCountOutput{} + output = &DescribeReplicationGroupsOutput{} req = c.newRequest(op, input, output) return } -// IncreaseReplicaCount API operation for Amazon ElastiCache. +// DescribeReplicationGroups API operation for Amazon ElastiCache. // -// Dynamically increases the number of replics in a Redis (cluster mode disabled) -// replication group or the number of replica nodes in one or more node groups -// (shards) of a Redis (cluster mode enabled) replication group. This operation -// is performed with no cluster down time. +// Returns information about a particular replication group. If no identifier +// is specified, DescribeReplicationGroups returns information about all replication +// groups. +// +// This operation is valid for Redis only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation IncreaseReplicaCount for usage and error information. +// API operation DescribeReplicationGroups for usage and error information. // // Returned Error Codes: // * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" // The specified replication group does not exist. // -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. -// -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. -// -// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" -// The requested cache node type is not available in the specified Availability -// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) -// in the ElastiCache User Guide. -// -// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of clusters per customer. -// -// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" -// The request cannot be processed because it would exceed the maximum allowed -// number of node groups (shards) in a single replication group. The default -// maximum is 90 -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes per customer. -// -// * ErrCodeNoOperationFault "NoOperationFault" -// The operation was not performed because no changes were required. -// -// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" -// The KMS key supplied is not valid. -// // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/IncreaseReplicaCount -func (c *ElastiCache) IncreaseReplicaCount(input *IncreaseReplicaCountInput) (*IncreaseReplicaCountOutput, error) { - req, out := c.IncreaseReplicaCountRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroups +func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroupsInput) (*DescribeReplicationGroupsOutput, error) { + req, out := c.DescribeReplicationGroupsRequest(input) return out, req.Send() } -// IncreaseReplicaCountWithContext is the same as IncreaseReplicaCount with the addition of +// DescribeReplicationGroupsWithContext is the same as DescribeReplicationGroups with the addition of // the ability to pass a context and additional request options. // -// See IncreaseReplicaCount for details on how to use this API operation. +// See DescribeReplicationGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) IncreaseReplicaCountWithContext(ctx aws.Context, input *IncreaseReplicaCountInput, opts ...request.Option) (*IncreaseReplicaCountOutput, error) { - req, out := c.IncreaseReplicaCountRequest(input) +func (c *ElastiCache) DescribeReplicationGroupsWithContext(ctx aws.Context, input *DescribeReplicationGroupsInput, opts ...request.Option) (*DescribeReplicationGroupsOutput, error) { + req, out := c.DescribeReplicationGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListAllowedNodeTypeModifications = "ListAllowedNodeTypeModifications" +// DescribeReplicationGroupsPages iterates over the pages of a DescribeReplicationGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationGroups operation. +// pageNum := 0 +// err := client.DescribeReplicationGroupsPages(params, +// func(page *elasticache.DescribeReplicationGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(*DescribeReplicationGroupsOutput, bool) bool) error { + return c.DescribeReplicationGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ListAllowedNodeTypeModificationsRequest generates a "aws/request.Request" representing the -// client's request for the ListAllowedNodeTypeModifications operation. The "output" return +// DescribeReplicationGroupsPagesWithContext same as DescribeReplicationGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeReplicationGroupsPagesWithContext(ctx aws.Context, input *DescribeReplicationGroupsInput, fn func(*DescribeReplicationGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReplicationGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReplicationGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReplicationGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes" + +// DescribeReservedCacheNodesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedCacheNodes operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListAllowedNodeTypeModifications for more information on using the ListAllowedNodeTypeModifications +// See DescribeReservedCacheNodes for more information on using the DescribeReservedCacheNodes // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListAllowedNodeTypeModificationsRequest method. -// req, resp := client.ListAllowedNodeTypeModificationsRequest(params) +// // Example sending a request using the DescribeReservedCacheNodesRequest method. +// req, resp := client.DescribeReservedCacheNodesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModifications -func (c *ElastiCache) ListAllowedNodeTypeModificationsRequest(input *ListAllowedNodeTypeModificationsInput) (req *request.Request, output *ListAllowedNodeTypeModificationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodes +func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) { op := &request.Operation{ - Name: opListAllowedNodeTypeModifications, + Name: opDescribeReservedCacheNodes, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &ListAllowedNodeTypeModificationsInput{} + input = &DescribeReservedCacheNodesInput{} } - output = &ListAllowedNodeTypeModificationsOutput{} + output = &DescribeReservedCacheNodesOutput{} req = c.newRequest(op, input, output) return } -// ListAllowedNodeTypeModifications API operation for Amazon ElastiCache. -// -// Lists all available node types that you can scale your Redis cluster's or -// replication group's current node type. +// DescribeReservedCacheNodes API operation for Amazon ElastiCache. // -// When you use the ModifyCacheCluster or ModifyReplicationGroup operations -// to scale your cluster or replication group, the value of the CacheNodeType -// parameter must be one of the node types returned by this operation. +// Returns information about reserved cache nodes for this account, or about +// a specified reserved cache node. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ListAllowedNodeTypeModifications for usage and error information. +// API operation DescribeReservedCacheNodes for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. +// * ErrCodeReservedCacheNodeNotFoundFault "ReservedCacheNodeNotFound" +// The requested reserved cache node was not found. // -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModifications -func (c *ElastiCache) ListAllowedNodeTypeModifications(input *ListAllowedNodeTypeModificationsInput) (*ListAllowedNodeTypeModificationsOutput, error) { - req, out := c.ListAllowedNodeTypeModificationsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodes +func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNodesInput) (*DescribeReservedCacheNodesOutput, error) { + req, out := c.DescribeReservedCacheNodesRequest(input) return out, req.Send() } -// ListAllowedNodeTypeModificationsWithContext is the same as ListAllowedNodeTypeModifications with the addition of +// DescribeReservedCacheNodesWithContext is the same as DescribeReservedCacheNodes with the addition of // the ability to pass a context and additional request options. // -// See ListAllowedNodeTypeModifications for details on how to use this API operation. +// See DescribeReservedCacheNodes for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ListAllowedNodeTypeModificationsWithContext(ctx aws.Context, input *ListAllowedNodeTypeModificationsInput, opts ...request.Option) (*ListAllowedNodeTypeModificationsOutput, error) { - req, out := c.ListAllowedNodeTypeModificationsRequest(input) +func (c *ElastiCache) DescribeReservedCacheNodesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesInput, opts ...request.Option) (*DescribeReservedCacheNodesOutput, error) { + req, out := c.DescribeReservedCacheNodesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTagsForResource = "ListTagsForResource" - -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListTagsForResource for more information on using the ListTagsForResource -// API call, and error handling. +// DescribeReservedCacheNodesPages iterates over the pages of a DescribeReservedCacheNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// See DescribeReservedCacheNodes method for more information on how to use this operation. // +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example iterating over at most 3 pages of a DescribeReservedCacheNodes operation. +// pageNum := 0 +// err := client.DescribeReservedCacheNodesPages(params, +// func(page *elasticache.DescribeReservedCacheNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResource -func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) { - op := &request.Operation{ - Name: opListTagsForResource, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &ListTagsForResourceInput{} - } - - output = &TagListMessage{} - req = c.newRequest(op, input, output) - return -} - -// ListTagsForResource API operation for Amazon ElastiCache. -// -// Lists all cost allocation tags currently on the named resource. A cost allocation -// tag is a key-value pair where the key is case-sensitive and the value is -// optional. You can use cost allocation tags to categorize and track your AWS -// costs. -// -// If the cluster is not in the available state, ListTagsForResource returns -// an error. -// -// You can have a maximum of 50 cost allocation tags on an ElastiCache resource. -// For more information, see Monitoring Costs with Tags (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Amazon ElastiCache's -// API operation ListTagsForResource for usage and error information. -// -// Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" -// The requested snapshot name does not refer to an existing snapshot. -// -// * ErrCodeInvalidARNFault "InvalidARN" -// The requested Amazon Resource Name (ARN) does not refer to an existing resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResource -func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*TagListMessage, error) { - req, out := c.ListTagsForResourceRequest(input) - return out, req.Send() -} - -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of -// the ability to pass a context and additional request options. -// -// See ListTagsForResource for details on how to use this API operation. +func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(*DescribeReservedCacheNodesOutput, bool) bool) error { + return c.DescribeReservedCacheNodesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeReservedCacheNodesPagesWithContext same as DescribeReservedCacheNodesPages except +// it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*TagListMessage, error) { - req, out := c.ListTagsForResourceRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() +func (c *ElastiCache) DescribeReservedCacheNodesPagesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesInput, fn func(*DescribeReservedCacheNodesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedCacheNodesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedCacheNodesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReservedCacheNodesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() } -const opModifyCacheCluster = "ModifyCacheCluster" +const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings" -// ModifyCacheClusterRequest generates a "aws/request.Request" representing the -// client's request for the ModifyCacheCluster operation. The "output" return +// DescribeReservedCacheNodesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedCacheNodesOfferings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ModifyCacheCluster for more information on using the ModifyCacheCluster +// See DescribeReservedCacheNodesOfferings for more information on using the DescribeReservedCacheNodesOfferings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ModifyCacheClusterRequest method. -// req, resp := client.ModifyCacheClusterRequest(params) +// // Example sending a request using the DescribeReservedCacheNodesOfferingsRequest method. +// req, resp := client.DescribeReservedCacheNodesOfferingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheCluster -func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferings +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) { op := &request.Operation{ - Name: opModifyCacheCluster, + Name: opDescribeReservedCacheNodesOfferings, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &ModifyCacheClusterInput{} + input = &DescribeReservedCacheNodesOfferingsInput{} } - output = &ModifyCacheClusterOutput{} + output = &DescribeReservedCacheNodesOfferingsOutput{} req = c.newRequest(op, input, output) return } -// ModifyCacheCluster API operation for Amazon ElastiCache. +// DescribeReservedCacheNodesOfferings API operation for Amazon ElastiCache. // -// Modifies the settings for a cluster. You can use this operation to change -// one or more cluster configuration parameters by specifying the parameters -// and the new values. +// Lists available reserved cache node offerings. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ModifyCacheCluster for usage and error information. +// API operation DescribeReservedCacheNodesOfferings for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. -// -// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" -// The current state of the cache security group does not allow deletion. -// -// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" -// The requested cache node type is not available in the specified Availability -// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) -// in the ElastiCache User Guide. -// -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes in a single cluster. -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes per customer. -// -// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" -// The requested cache security group name does not refer to an existing cache -// security group. -// -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. +// * ErrCodeReservedCacheNodesOfferingNotFoundFault "ReservedCacheNodesOfferingNotFound" +// The requested cache node offering does not exist. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -4460,91 +4362,142 @@ func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheCluster -func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*ModifyCacheClusterOutput, error) { - req, out := c.ModifyCacheClusterRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferings +func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReservedCacheNodesOfferingsInput) (*DescribeReservedCacheNodesOfferingsOutput, error) { + req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) return out, req.Send() } -// ModifyCacheClusterWithContext is the same as ModifyCacheCluster with the addition of +// DescribeReservedCacheNodesOfferingsWithContext is the same as DescribeReservedCacheNodesOfferings with the addition of // the ability to pass a context and additional request options. // -// See ModifyCacheCluster for details on how to use this API operation. +// See DescribeReservedCacheNodesOfferings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ModifyCacheClusterWithContext(ctx aws.Context, input *ModifyCacheClusterInput, opts ...request.Option) (*ModifyCacheClusterOutput, error) { - req, out := c.ModifyCacheClusterRequest(input) +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsWithContext(ctx aws.Context, input *DescribeReservedCacheNodesOfferingsInput, opts ...request.Option) (*DescribeReservedCacheNodesOfferingsOutput, error) { + req, out := c.DescribeReservedCacheNodesOfferingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opModifyCacheParameterGroup = "ModifyCacheParameterGroup" +// DescribeReservedCacheNodesOfferingsPages iterates over the pages of a DescribeReservedCacheNodesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedCacheNodesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedCacheNodesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedCacheNodesOfferingsPages(params, +// func(page *elasticache.DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(*DescribeReservedCacheNodesOfferingsOutput, bool) bool) error { + return c.DescribeReservedCacheNodesOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ModifyCacheParameterGroupRequest generates a "aws/request.Request" representing the -// client's request for the ModifyCacheParameterGroup operation. The "output" return +// DescribeReservedCacheNodesOfferingsPagesWithContext same as DescribeReservedCacheNodesOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesOfferingsInput, fn func(*DescribeReservedCacheNodesOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedCacheNodesOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedCacheNodesOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReservedCacheNodesOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeServiceUpdates = "DescribeServiceUpdates" + +// DescribeServiceUpdatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServiceUpdates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ModifyCacheParameterGroup for more information on using the ModifyCacheParameterGroup +// See DescribeServiceUpdates for more information on using the DescribeServiceUpdates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ModifyCacheParameterGroupRequest method. -// req, resp := client.ModifyCacheParameterGroupRequest(params) +// // Example sending a request using the DescribeServiceUpdatesRequest method. +// req, resp := client.DescribeServiceUpdatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroup -func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeServiceUpdates +func (c *ElastiCache) DescribeServiceUpdatesRequest(input *DescribeServiceUpdatesInput) (req *request.Request, output *DescribeServiceUpdatesOutput) { op := &request.Operation{ - Name: opModifyCacheParameterGroup, + Name: opDescribeServiceUpdates, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &ModifyCacheParameterGroupInput{} + input = &DescribeServiceUpdatesInput{} } - output = &CacheParameterGroupNameMessage{} + output = &DescribeServiceUpdatesOutput{} req = c.newRequest(op, input, output) return } -// ModifyCacheParameterGroup API operation for Amazon ElastiCache. +// DescribeServiceUpdates API operation for Amazon ElastiCache. // -// Modifies the parameters of a cache parameter group. You can modify up to -// 20 parameters in a single request by submitting a list parameter name and -// value pairs. +// Returns details of the service updates // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ModifyCacheParameterGroup for usage and error information. +// API operation DescribeServiceUpdates for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. -// -// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState" -// The current state of the cache parameter group does not allow the requested -// operation to occur. +// * ErrCodeServiceUpdateNotFoundFault "ServiceUpdateNotFoundFault" +// The service update doesn't exist // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -4552,802 +4505,1008 @@ func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParamet // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroup -func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { - req, out := c.ModifyCacheParameterGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeServiceUpdates +func (c *ElastiCache) DescribeServiceUpdates(input *DescribeServiceUpdatesInput) (*DescribeServiceUpdatesOutput, error) { + req, out := c.DescribeServiceUpdatesRequest(input) return out, req.Send() } -// ModifyCacheParameterGroupWithContext is the same as ModifyCacheParameterGroup with the addition of +// DescribeServiceUpdatesWithContext is the same as DescribeServiceUpdates with the addition of // the ability to pass a context and additional request options. // -// See ModifyCacheParameterGroup for details on how to use this API operation. +// See DescribeServiceUpdates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ModifyCacheParameterGroupWithContext(ctx aws.Context, input *ModifyCacheParameterGroupInput, opts ...request.Option) (*CacheParameterGroupNameMessage, error) { - req, out := c.ModifyCacheParameterGroupRequest(input) +func (c *ElastiCache) DescribeServiceUpdatesWithContext(ctx aws.Context, input *DescribeServiceUpdatesInput, opts ...request.Option) (*DescribeServiceUpdatesOutput, error) { + req, out := c.DescribeServiceUpdatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup" +// DescribeServiceUpdatesPages iterates over the pages of a DescribeServiceUpdates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeServiceUpdates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeServiceUpdates operation. +// pageNum := 0 +// err := client.DescribeServiceUpdatesPages(params, +// func(page *elasticache.DescribeServiceUpdatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeServiceUpdatesPages(input *DescribeServiceUpdatesInput, fn func(*DescribeServiceUpdatesOutput, bool) bool) error { + return c.DescribeServiceUpdatesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ModifyCacheSubnetGroupRequest generates a "aws/request.Request" representing the -// client's request for the ModifyCacheSubnetGroup operation. The "output" return +// DescribeServiceUpdatesPagesWithContext same as DescribeServiceUpdatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeServiceUpdatesPagesWithContext(ctx aws.Context, input *DescribeServiceUpdatesInput, fn func(*DescribeServiceUpdatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeServiceUpdatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeServiceUpdatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeServiceUpdatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeSnapshots = "DescribeSnapshots" + +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ModifyCacheSubnetGroup for more information on using the ModifyCacheSubnetGroup +// See DescribeSnapshots for more information on using the DescribeSnapshots // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ModifyCacheSubnetGroupRequest method. -// req, resp := client.ModifyCacheSubnetGroupRequest(params) +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroup -func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots +func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { op := &request.Operation{ - Name: opModifyCacheSubnetGroup, + Name: opDescribeSnapshots, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &ModifyCacheSubnetGroupInput{} + input = &DescribeSnapshotsInput{} } - output = &ModifyCacheSubnetGroupOutput{} + output = &DescribeSnapshotsOutput{} req = c.newRequest(op, input, output) return } -// ModifyCacheSubnetGroup API operation for Amazon ElastiCache. +// DescribeSnapshots API operation for Amazon ElastiCache. // -// Modifies an existing cache subnet group. +// Returns information about cluster or replication group snapshots. By default, +// DescribeSnapshots lists all of your snapshots; it can optionally describe +// a single snapshot, or just the snapshots associated with a particular cache +// cluster. +// +// This operation is valid for Redis only. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ModifyCacheSubnetGroup for usage and error information. +// API operation DescribeSnapshots for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault" -// The requested cache subnet group name does not refer to an existing cache -// subnet group. +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. // -// * ErrCodeCacheSubnetQuotaExceededFault "CacheSubnetQuotaExceededFault" -// The request cannot be processed because it would exceed the allowed number -// of subnets in a cache subnet group. +// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" +// The requested snapshot name does not refer to an existing snapshot. // -// * ErrCodeSubnetInUse "SubnetInUse" -// The requested subnet is being used by another cache subnet group. +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. // -// * ErrCodeInvalidSubnet "InvalidSubnet" -// An invalid subnet identifier was specified. +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroup -func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) (*ModifyCacheSubnetGroupOutput, error) { - req, out := c.ModifyCacheSubnetGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots +func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) return out, req.Send() } -// ModifyCacheSubnetGroupWithContext is the same as ModifyCacheSubnetGroup with the addition of +// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of // the ability to pass a context and additional request options. // -// See ModifyCacheSubnetGroup for details on how to use this API operation. +// See DescribeSnapshots for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ModifyCacheSubnetGroupWithContext(ctx aws.Context, input *ModifyCacheSubnetGroupInput, opts ...request.Option) (*ModifyCacheSubnetGroupOutput, error) { - req, out := c.ModifyCacheSubnetGroupRequest(input) +func (c *ElastiCache) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) { + req, out := c.DescribeSnapshotsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opModifyReplicationGroup = "ModifyReplicationGroup" +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *elasticache.DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error { + return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ModifyReplicationGroupRequest generates a "aws/request.Request" representing the -// client's request for the ModifyReplicationGroup operation. The "output" return +// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeUpdateActions = "DescribeUpdateActions" + +// DescribeUpdateActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUpdateActions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ModifyReplicationGroup for more information on using the ModifyReplicationGroup +// See DescribeUpdateActions for more information on using the DescribeUpdateActions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ModifyReplicationGroupRequest method. -// req, resp := client.ModifyReplicationGroupRequest(params) +// // Example sending a request using the DescribeUpdateActionsRequest method. +// req, resp := client.DescribeUpdateActionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroup -func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUpdateActions +func (c *ElastiCache) DescribeUpdateActionsRequest(input *DescribeUpdateActionsInput) (req *request.Request, output *DescribeUpdateActionsOutput) { op := &request.Operation{ - Name: opModifyReplicationGroup, + Name: opDescribeUpdateActions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &ModifyReplicationGroupInput{} + input = &DescribeUpdateActionsInput{} } - output = &ModifyReplicationGroupOutput{} + output = &DescribeUpdateActionsOutput{} req = c.newRequest(op, input, output) return } -// ModifyReplicationGroup API operation for Amazon ElastiCache. -// -// Modifies the settings for a replication group. -// -// For Redis (cluster mode enabled) clusters, this operation cannot be used -// to change a cluster's node type or engine version. For more information, -// see: -// -// * Scaling for Amazon ElastiCache for Redis (cluster mode enabled) (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/scaling-redis-cluster-mode-enabled.html) -// in the ElastiCache User Guide -// -// * ModifyReplicationGroupShardConfiguration (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyReplicationGroupShardConfiguration.html) -// in the ElastiCache API Reference +// DescribeUpdateActions API operation for Amazon ElastiCache. // -// This operation is valid for Redis only. +// Returns details of the update actions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ModifyReplicationGroup for usage and error information. +// API operation DescribeUpdateActions for usage and error information. // // Returned Error Codes: -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. -// -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. -// -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. // -// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" -// The current state of the cache security group does not allow deletion. +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. // -// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" -// The requested cache node type is not available in the specified Availability -// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) -// in the ElastiCache User Guide. -// -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. -// -// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes in a single cluster. -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes per customer. -// -// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" -// The requested cache security group name does not refer to an existing cache -// security group. -// -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. -// -// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" -// The KMS key supplied is not valid. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. -// -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroup -func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) (*ModifyReplicationGroupOutput, error) { - req, out := c.ModifyReplicationGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUpdateActions +func (c *ElastiCache) DescribeUpdateActions(input *DescribeUpdateActionsInput) (*DescribeUpdateActionsOutput, error) { + req, out := c.DescribeUpdateActionsRequest(input) return out, req.Send() } -// ModifyReplicationGroupWithContext is the same as ModifyReplicationGroup with the addition of +// DescribeUpdateActionsWithContext is the same as DescribeUpdateActions with the addition of // the ability to pass a context and additional request options. // -// See ModifyReplicationGroup for details on how to use this API operation. +// See DescribeUpdateActions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ModifyReplicationGroupWithContext(ctx aws.Context, input *ModifyReplicationGroupInput, opts ...request.Option) (*ModifyReplicationGroupOutput, error) { - req, out := c.ModifyReplicationGroupRequest(input) +func (c *ElastiCache) DescribeUpdateActionsWithContext(ctx aws.Context, input *DescribeUpdateActionsInput, opts ...request.Option) (*DescribeUpdateActionsOutput, error) { + req, out := c.DescribeUpdateActionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opModifyReplicationGroupShardConfiguration = "ModifyReplicationGroupShardConfiguration" +// DescribeUpdateActionsPages iterates over the pages of a DescribeUpdateActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeUpdateActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeUpdateActions operation. +// pageNum := 0 +// err := client.DescribeUpdateActionsPages(params, +// func(page *elasticache.DescribeUpdateActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeUpdateActionsPages(input *DescribeUpdateActionsInput, fn func(*DescribeUpdateActionsOutput, bool) bool) error { + return c.DescribeUpdateActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ModifyReplicationGroupShardConfigurationRequest generates a "aws/request.Request" representing the -// client's request for the ModifyReplicationGroupShardConfiguration operation. The "output" return +// DescribeUpdateActionsPagesWithContext same as DescribeUpdateActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeUpdateActionsPagesWithContext(ctx aws.Context, input *DescribeUpdateActionsInput, fn func(*DescribeUpdateActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeUpdateActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeUpdateActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeUpdateActionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeUserGroups = "DescribeUserGroups" + +// DescribeUserGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUserGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ModifyReplicationGroupShardConfiguration for more information on using the ModifyReplicationGroupShardConfiguration +// See DescribeUserGroups for more information on using the DescribeUserGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ModifyReplicationGroupShardConfigurationRequest method. -// req, resp := client.ModifyReplicationGroupShardConfigurationRequest(params) +// // Example sending a request using the DescribeUserGroupsRequest method. +// req, resp := client.DescribeUserGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroupShardConfiguration -func (c *ElastiCache) ModifyReplicationGroupShardConfigurationRequest(input *ModifyReplicationGroupShardConfigurationInput) (req *request.Request, output *ModifyReplicationGroupShardConfigurationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUserGroups +func (c *ElastiCache) DescribeUserGroupsRequest(input *DescribeUserGroupsInput) (req *request.Request, output *DescribeUserGroupsOutput) { op := &request.Operation{ - Name: opModifyReplicationGroupShardConfiguration, + Name: opDescribeUserGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &ModifyReplicationGroupShardConfigurationInput{} + input = &DescribeUserGroupsInput{} } - output = &ModifyReplicationGroupShardConfigurationOutput{} + output = &DescribeUserGroupsOutput{} req = c.newRequest(op, input, output) return } -// ModifyReplicationGroupShardConfiguration API operation for Amazon ElastiCache. +// DescribeUserGroups API operation for Amazon ElastiCache. // -// Modifies a replication group's shards (node groups) by allowing you to add -// shards, remove shards, or rebalance the keyspaces among exisiting shards. +// Returns a list of user groups. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ModifyReplicationGroupShardConfiguration for usage and error information. +// API operation DescribeUserGroups for usage and error information. // // Returned Error Codes: -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. -// -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. -// -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. -// -// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" -// The VPC network is in an invalid state. -// -// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" -// The requested cache node type is not available in the specified Availability -// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) -// in the ElastiCache User Guide. -// -// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" -// The request cannot be processed because it would exceed the maximum allowed -// number of node groups (shards) in a single replication group. The default -// maximum is 90 -// -// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" -// The request cannot be processed because it would exceed the allowed number -// of cache nodes per customer. -// -// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" -// The KMS key supplied is not valid. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. +// * ErrCodeUserGroupNotFoundFault "UserGroupNotFound" +// The user group was not found or does not exist // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroupShardConfiguration -func (c *ElastiCache) ModifyReplicationGroupShardConfiguration(input *ModifyReplicationGroupShardConfigurationInput) (*ModifyReplicationGroupShardConfigurationOutput, error) { - req, out := c.ModifyReplicationGroupShardConfigurationRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUserGroups +func (c *ElastiCache) DescribeUserGroups(input *DescribeUserGroupsInput) (*DescribeUserGroupsOutput, error) { + req, out := c.DescribeUserGroupsRequest(input) return out, req.Send() } -// ModifyReplicationGroupShardConfigurationWithContext is the same as ModifyReplicationGroupShardConfiguration with the addition of +// DescribeUserGroupsWithContext is the same as DescribeUserGroups with the addition of // the ability to pass a context and additional request options. // -// See ModifyReplicationGroupShardConfiguration for details on how to use this API operation. +// See DescribeUserGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ModifyReplicationGroupShardConfigurationWithContext(ctx aws.Context, input *ModifyReplicationGroupShardConfigurationInput, opts ...request.Option) (*ModifyReplicationGroupShardConfigurationOutput, error) { - req, out := c.ModifyReplicationGroupShardConfigurationRequest(input) +func (c *ElastiCache) DescribeUserGroupsWithContext(ctx aws.Context, input *DescribeUserGroupsInput, opts ...request.Option) (*DescribeUserGroupsOutput, error) { + req, out := c.DescribeUserGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering" +// DescribeUserGroupsPages iterates over the pages of a DescribeUserGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeUserGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeUserGroups operation. +// pageNum := 0 +// err := client.DescribeUserGroupsPages(params, +// func(page *elasticache.DescribeUserGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElastiCache) DescribeUserGroupsPages(input *DescribeUserGroupsInput, fn func(*DescribeUserGroupsOutput, bool) bool) error { + return c.DescribeUserGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// PurchaseReservedCacheNodesOfferingRequest generates a "aws/request.Request" representing the -// client's request for the PurchaseReservedCacheNodesOffering operation. The "output" return +// DescribeUserGroupsPagesWithContext same as DescribeUserGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeUserGroupsPagesWithContext(ctx aws.Context, input *DescribeUserGroupsInput, fn func(*DescribeUserGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeUserGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeUserGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeUserGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeUsers = "DescribeUsers" + +// DescribeUsersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUsers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PurchaseReservedCacheNodesOffering for more information on using the PurchaseReservedCacheNodesOffering +// See DescribeUsers for more information on using the DescribeUsers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PurchaseReservedCacheNodesOfferingRequest method. -// req, resp := client.PurchaseReservedCacheNodesOfferingRequest(params) +// // Example sending a request using the DescribeUsersRequest method. +// req, resp := client.DescribeUsersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOffering -func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUsers +func (c *ElastiCache) DescribeUsersRequest(input *DescribeUsersInput) (req *request.Request, output *DescribeUsersOutput) { op := &request.Operation{ - Name: opPurchaseReservedCacheNodesOffering, + Name: opDescribeUsers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { - input = &PurchaseReservedCacheNodesOfferingInput{} + input = &DescribeUsersInput{} } - output = &PurchaseReservedCacheNodesOfferingOutput{} + output = &DescribeUsersOutput{} req = c.newRequest(op, input, output) return } -// PurchaseReservedCacheNodesOffering API operation for Amazon ElastiCache. +// DescribeUsers API operation for Amazon ElastiCache. // -// Allows you to purchase a reserved cache node offering. +// Returns a list of users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation PurchaseReservedCacheNodesOffering for usage and error information. +// API operation DescribeUsers for usage and error information. // // Returned Error Codes: -// * ErrCodeReservedCacheNodesOfferingNotFoundFault "ReservedCacheNodesOfferingNotFound" -// The requested cache node offering does not exist. -// -// * ErrCodeReservedCacheNodeAlreadyExistsFault "ReservedCacheNodeAlreadyExists" -// You already have a reservation with the given identifier. -// -// * ErrCodeReservedCacheNodeQuotaExceededFault "ReservedCacheNodeQuotaExceeded" -// The request cannot be processed because it would exceed the user's cache -// node quota. -// -// * ErrCodeInvalidParameterValueException "InvalidParameterValue" -// The value for a parameter is invalid. +// * ErrCodeUserNotFoundFault "UserNotFound" +// The user does not exist or could not be found. // // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOffering -func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReservedCacheNodesOfferingInput) (*PurchaseReservedCacheNodesOfferingOutput, error) { - req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeUsers +func (c *ElastiCache) DescribeUsers(input *DescribeUsersInput) (*DescribeUsersOutput, error) { + req, out := c.DescribeUsersRequest(input) return out, req.Send() } -// PurchaseReservedCacheNodesOfferingWithContext is the same as PurchaseReservedCacheNodesOffering with the addition of +// DescribeUsersWithContext is the same as DescribeUsers with the addition of // the ability to pass a context and additional request options. // -// See PurchaseReservedCacheNodesOffering for details on how to use this API operation. +// See DescribeUsers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) PurchaseReservedCacheNodesOfferingWithContext(ctx aws.Context, input *PurchaseReservedCacheNodesOfferingInput, opts ...request.Option) (*PurchaseReservedCacheNodesOfferingOutput, error) { - req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) +func (c *ElastiCache) DescribeUsersWithContext(ctx aws.Context, input *DescribeUsersInput, opts ...request.Option) (*DescribeUsersOutput, error) { + req, out := c.DescribeUsersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRebootCacheCluster = "RebootCacheCluster" - -// RebootCacheClusterRequest generates a "aws/request.Request" representing the -// client's request for the RebootCacheCluster operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// DescribeUsersPages iterates over the pages of a DescribeUsers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// See RebootCacheCluster for more information on using the RebootCacheCluster -// API call, and error handling. +// See DescribeUsers method for more information on how to use this operation. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// Note: This operation can generate multiple requests to a service. // +// // Example iterating over at most 3 pages of a DescribeUsers operation. +// pageNum := 0 +// err := client.DescribeUsersPages(params, +// func(page *elasticache.DescribeUsersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// // Example sending a request using the RebootCacheClusterRequest method. -// req, resp := client.RebootCacheClusterRequest(params) +func (c *ElastiCache) DescribeUsersPages(input *DescribeUsersInput, fn func(*DescribeUsersOutput, bool) bool) error { + return c.DescribeUsersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeUsersPagesWithContext same as DescribeUsersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) DescribeUsersPagesWithContext(ctx aws.Context, input *DescribeUsersInput, fn func(*DescribeUsersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeUsersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeUsersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeUsersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDisassociateGlobalReplicationGroup = "DisassociateGlobalReplicationGroup" + +// DisassociateGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateGlobalReplicationGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateGlobalReplicationGroup for more information on using the DisassociateGlobalReplicationGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateGlobalReplicationGroupRequest method. +// req, resp := client.DisassociateGlobalReplicationGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheCluster -func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DisassociateGlobalReplicationGroup +func (c *ElastiCache) DisassociateGlobalReplicationGroupRequest(input *DisassociateGlobalReplicationGroupInput) (req *request.Request, output *DisassociateGlobalReplicationGroupOutput) { op := &request.Operation{ - Name: opRebootCacheCluster, + Name: opDisassociateGlobalReplicationGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RebootCacheClusterInput{} + input = &DisassociateGlobalReplicationGroupInput{} } - output = &RebootCacheClusterOutput{} + output = &DisassociateGlobalReplicationGroupOutput{} req = c.newRequest(op, input, output) return } -// RebootCacheCluster API operation for Amazon ElastiCache. -// -// Reboots some, or all, of the cache nodes within a provisioned cluster. This -// operation applies any modified cache parameter groups to the cluster. The -// reboot operation takes place as soon as possible, and results in a momentary -// outage to the cluster. During the reboot, the cluster status is set to REBOOTING. -// -// The reboot causes the contents of the cache (for each cache node being rebooted) -// to be lost. -// -// When the reboot is complete, a cluster event is created. +// DisassociateGlobalReplicationGroup API operation for Amazon ElastiCache. // -// Rebooting a cluster is currently supported on Memcached and Redis (cluster -// mode disabled) clusters. Rebooting is not supported on Redis (cluster mode -// enabled) clusters. -// -// If you make changes to parameters that require a Redis (cluster mode enabled) -// cluster reboot for the changes to be applied, see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html) -// for an alternate process. +// Remove a secondary cluster from the Global Datastore using the Global Datastore +// name. The secondary cluster will no longer receive updates from the primary +// cluster, but will remain as a standalone cluster in that AWS region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation RebootCacheCluster for usage and error information. +// API operation DisassociateGlobalReplicationGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" -// The requested cluster is not in the available state. +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist // -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheCluster -func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*RebootCacheClusterOutput, error) { - req, out := c.RebootCacheClusterRequest(input) +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DisassociateGlobalReplicationGroup +func (c *ElastiCache) DisassociateGlobalReplicationGroup(input *DisassociateGlobalReplicationGroupInput) (*DisassociateGlobalReplicationGroupOutput, error) { + req, out := c.DisassociateGlobalReplicationGroupRequest(input) return out, req.Send() } -// RebootCacheClusterWithContext is the same as RebootCacheCluster with the addition of +// DisassociateGlobalReplicationGroupWithContext is the same as DisassociateGlobalReplicationGroup with the addition of // the ability to pass a context and additional request options. // -// See RebootCacheCluster for details on how to use this API operation. +// See DisassociateGlobalReplicationGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) RebootCacheClusterWithContext(ctx aws.Context, input *RebootCacheClusterInput, opts ...request.Option) (*RebootCacheClusterOutput, error) { - req, out := c.RebootCacheClusterRequest(input) +func (c *ElastiCache) DisassociateGlobalReplicationGroupWithContext(ctx aws.Context, input *DisassociateGlobalReplicationGroupInput, opts ...request.Option) (*DisassociateGlobalReplicationGroupOutput, error) { + req, out := c.DisassociateGlobalReplicationGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveTagsFromResource = "RemoveTagsFromResource" +const opFailoverGlobalReplicationGroup = "FailoverGlobalReplicationGroup" -// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the -// client's request for the RemoveTagsFromResource operation. The "output" return +// FailoverGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the FailoverGlobalReplicationGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveTagsFromResource for more information on using the RemoveTagsFromResource +// See FailoverGlobalReplicationGroup for more information on using the FailoverGlobalReplicationGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveTagsFromResourceRequest method. -// req, resp := client.RemoveTagsFromResourceRequest(params) +// // Example sending a request using the FailoverGlobalReplicationGroupRequest method. +// req, resp := client.FailoverGlobalReplicationGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResource -func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/FailoverGlobalReplicationGroup +func (c *ElastiCache) FailoverGlobalReplicationGroupRequest(input *FailoverGlobalReplicationGroupInput) (req *request.Request, output *FailoverGlobalReplicationGroupOutput) { op := &request.Operation{ - Name: opRemoveTagsFromResource, + Name: opFailoverGlobalReplicationGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RemoveTagsFromResourceInput{} + input = &FailoverGlobalReplicationGroupInput{} } - output = &TagListMessage{} + output = &FailoverGlobalReplicationGroupOutput{} req = c.newRequest(op, input, output) return } -// RemoveTagsFromResource API operation for Amazon ElastiCache. +// FailoverGlobalReplicationGroup API operation for Amazon ElastiCache. // -// Removes the tags identified by the TagKeys list from the named resource. +// Used to failover the primary region to a selected secondary region. The selected +// secondary region will become primary, and all other clusters will become +// secondary. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation RemoveTagsFromResource for usage and error information. +// API operation FailoverGlobalReplicationGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" -// The requested cluster ID does not refer to an existing cluster. +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist // -// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" -// The requested snapshot name does not refer to an existing snapshot. +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. // -// * ErrCodeInvalidARNFault "InvalidARN" -// The requested Amazon Resource Name (ARN) does not refer to an existing resource. +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. // -// * ErrCodeTagNotFoundFault "TagNotFound" -// The requested tag was not found on this resource. +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResource -func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*TagListMessage, error) { - req, out := c.RemoveTagsFromResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/FailoverGlobalReplicationGroup +func (c *ElastiCache) FailoverGlobalReplicationGroup(input *FailoverGlobalReplicationGroupInput) (*FailoverGlobalReplicationGroupOutput, error) { + req, out := c.FailoverGlobalReplicationGroupRequest(input) return out, req.Send() } -// RemoveTagsFromResourceWithContext is the same as RemoveTagsFromResource with the addition of +// FailoverGlobalReplicationGroupWithContext is the same as FailoverGlobalReplicationGroup with the addition of // the ability to pass a context and additional request options. // -// See RemoveTagsFromResource for details on how to use this API operation. +// See FailoverGlobalReplicationGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) RemoveTagsFromResourceWithContext(ctx aws.Context, input *RemoveTagsFromResourceInput, opts ...request.Option) (*TagListMessage, error) { - req, out := c.RemoveTagsFromResourceRequest(input) +func (c *ElastiCache) FailoverGlobalReplicationGroupWithContext(ctx aws.Context, input *FailoverGlobalReplicationGroupInput, opts ...request.Option) (*FailoverGlobalReplicationGroupOutput, error) { + req, out := c.FailoverGlobalReplicationGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opResetCacheParameterGroup = "ResetCacheParameterGroup" +const opIncreaseNodeGroupsInGlobalReplicationGroup = "IncreaseNodeGroupsInGlobalReplicationGroup" -// ResetCacheParameterGroupRequest generates a "aws/request.Request" representing the -// client's request for the ResetCacheParameterGroup operation. The "output" return +// IncreaseNodeGroupsInGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the IncreaseNodeGroupsInGlobalReplicationGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ResetCacheParameterGroup for more information on using the ResetCacheParameterGroup +// See IncreaseNodeGroupsInGlobalReplicationGroup for more information on using the IncreaseNodeGroupsInGlobalReplicationGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ResetCacheParameterGroupRequest method. -// req, resp := client.ResetCacheParameterGroupRequest(params) +// // Example sending a request using the IncreaseNodeGroupsInGlobalReplicationGroupRequest method. +// req, resp := client.IncreaseNodeGroupsInGlobalReplicationGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroup -func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/IncreaseNodeGroupsInGlobalReplicationGroup +func (c *ElastiCache) IncreaseNodeGroupsInGlobalReplicationGroupRequest(input *IncreaseNodeGroupsInGlobalReplicationGroupInput) (req *request.Request, output *IncreaseNodeGroupsInGlobalReplicationGroupOutput) { op := &request.Operation{ - Name: opResetCacheParameterGroup, + Name: opIncreaseNodeGroupsInGlobalReplicationGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ResetCacheParameterGroupInput{} + input = &IncreaseNodeGroupsInGlobalReplicationGroupInput{} } - output = &CacheParameterGroupNameMessage{} + output = &IncreaseNodeGroupsInGlobalReplicationGroupOutput{} req = c.newRequest(op, input, output) return } -// ResetCacheParameterGroup API operation for Amazon ElastiCache. +// IncreaseNodeGroupsInGlobalReplicationGroup API operation for Amazon ElastiCache. // -// Modifies the parameters of a cache parameter group to the engine or system -// default value. You can reset specific parameters by submitting a list of -// parameter names. To reset the entire cache parameter group, specify the ResetAllParameters -// and CacheParameterGroupName parameters. +// Increase the number of node groups in the Global Datastore // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation ResetCacheParameterGroup for usage and error information. +// API operation IncreaseNodeGroupsInGlobalReplicationGroup for usage and error information. // // Returned Error Codes: -// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState" -// The current state of the cache parameter group does not allow the requested -// operation to occur. +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist // -// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" -// The requested cache parameter group name does not refer to an existing cache -// parameter group. +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // -// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" -// Two or more incompatible parameters were specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroup -func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { - req, out := c.ResetCacheParameterGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/IncreaseNodeGroupsInGlobalReplicationGroup +func (c *ElastiCache) IncreaseNodeGroupsInGlobalReplicationGroup(input *IncreaseNodeGroupsInGlobalReplicationGroupInput) (*IncreaseNodeGroupsInGlobalReplicationGroupOutput, error) { + req, out := c.IncreaseNodeGroupsInGlobalReplicationGroupRequest(input) return out, req.Send() } -// ResetCacheParameterGroupWithContext is the same as ResetCacheParameterGroup with the addition of +// IncreaseNodeGroupsInGlobalReplicationGroupWithContext is the same as IncreaseNodeGroupsInGlobalReplicationGroup with the addition of // the ability to pass a context and additional request options. // -// See ResetCacheParameterGroup for details on how to use this API operation. +// See IncreaseNodeGroupsInGlobalReplicationGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) ResetCacheParameterGroupWithContext(ctx aws.Context, input *ResetCacheParameterGroupInput, opts ...request.Option) (*CacheParameterGroupNameMessage, error) { - req, out := c.ResetCacheParameterGroupRequest(input) +func (c *ElastiCache) IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx aws.Context, input *IncreaseNodeGroupsInGlobalReplicationGroupInput, opts ...request.Option) (*IncreaseNodeGroupsInGlobalReplicationGroupOutput, error) { + req, out := c.IncreaseNodeGroupsInGlobalReplicationGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress" +const opIncreaseReplicaCount = "IncreaseReplicaCount" -// RevokeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the -// client's request for the RevokeCacheSecurityGroupIngress operation. The "output" return +// IncreaseReplicaCountRequest generates a "aws/request.Request" representing the +// client's request for the IncreaseReplicaCount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RevokeCacheSecurityGroupIngress for more information on using the RevokeCacheSecurityGroupIngress +// See IncreaseReplicaCount for more information on using the IncreaseReplicaCount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RevokeCacheSecurityGroupIngressRequest method. -// req, resp := client.RevokeCacheSecurityGroupIngressRequest(params) +// // Example sending a request using the IncreaseReplicaCountRequest method. +// req, resp := client.IncreaseReplicaCountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngress -func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/IncreaseReplicaCount +func (c *ElastiCache) IncreaseReplicaCountRequest(input *IncreaseReplicaCountInput) (req *request.Request, output *IncreaseReplicaCountOutput) { op := &request.Operation{ - Name: opRevokeCacheSecurityGroupIngress, + Name: opIncreaseReplicaCount, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RevokeCacheSecurityGroupIngressInput{} + input = &IncreaseReplicaCountInput{} } - output = &RevokeCacheSecurityGroupIngressOutput{} + output = &IncreaseReplicaCountOutput{} req = c.newRequest(op, input, output) return } -// RevokeCacheSecurityGroupIngress API operation for Amazon ElastiCache. +// IncreaseReplicaCount API operation for Amazon ElastiCache. // -// Revokes ingress from a cache security group. Use this operation to disallow -// access from an Amazon EC2 security group that had been previously authorized. +// Dynamically increases the number of replics in a Redis (cluster mode disabled) +// replication group or the number of replica nodes in one or more node groups +// (shards) of a Redis (cluster mode enabled) replication group. This operation +// is performed with no cluster down time. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation RevokeCacheSecurityGroupIngress for usage and error information. +// API operation IncreaseReplicaCount for usage and error information. // // Returned Error Codes: -// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" -// The requested cache security group name does not refer to an existing cache -// security group. +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. // -// * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified Amazon EC2 security group is not authorized for the specified -// cache security group. +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. // -// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" -// The current state of the cache security group does not allow deletion. +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. +// +// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" +// The requested cache node type is not available in the specified Availability +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. +// +// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of clusters per customer. +// +// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" +// The request cannot be processed because it would exceed the maximum allowed +// number of node groups (shards) in a single replication group. The default +// maximum is 90 +// +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes per customer. +// +// * ErrCodeNoOperationFault "NoOperationFault" +// The operation was not performed because no changes were required. +// +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -5355,223 +5514,303 @@ func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheS // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngress -func (c *ElastiCache) RevokeCacheSecurityGroupIngress(input *RevokeCacheSecurityGroupIngressInput) (*RevokeCacheSecurityGroupIngressOutput, error) { - req, out := c.RevokeCacheSecurityGroupIngressRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/IncreaseReplicaCount +func (c *ElastiCache) IncreaseReplicaCount(input *IncreaseReplicaCountInput) (*IncreaseReplicaCountOutput, error) { + req, out := c.IncreaseReplicaCountRequest(input) return out, req.Send() } -// RevokeCacheSecurityGroupIngressWithContext is the same as RevokeCacheSecurityGroupIngress with the addition of +// IncreaseReplicaCountWithContext is the same as IncreaseReplicaCount with the addition of // the ability to pass a context and additional request options. // -// See RevokeCacheSecurityGroupIngress for details on how to use this API operation. +// See IncreaseReplicaCount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context, input *RevokeCacheSecurityGroupIngressInput, opts ...request.Option) (*RevokeCacheSecurityGroupIngressOutput, error) { - req, out := c.RevokeCacheSecurityGroupIngressRequest(input) +func (c *ElastiCache) IncreaseReplicaCountWithContext(ctx aws.Context, input *IncreaseReplicaCountInput, opts ...request.Option) (*IncreaseReplicaCountOutput, error) { + req, out := c.IncreaseReplicaCountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartMigration = "StartMigration" +const opListAllowedNodeTypeModifications = "ListAllowedNodeTypeModifications" -// StartMigrationRequest generates a "aws/request.Request" representing the -// client's request for the StartMigration operation. The "output" return +// ListAllowedNodeTypeModificationsRequest generates a "aws/request.Request" representing the +// client's request for the ListAllowedNodeTypeModifications operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartMigration for more information on using the StartMigration +// See ListAllowedNodeTypeModifications for more information on using the ListAllowedNodeTypeModifications // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartMigrationRequest method. -// req, resp := client.StartMigrationRequest(params) +// // Example sending a request using the ListAllowedNodeTypeModificationsRequest method. +// req, resp := client.ListAllowedNodeTypeModificationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration -func (c *ElastiCache) StartMigrationRequest(input *StartMigrationInput) (req *request.Request, output *StartMigrationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModifications +func (c *ElastiCache) ListAllowedNodeTypeModificationsRequest(input *ListAllowedNodeTypeModificationsInput) (req *request.Request, output *ListAllowedNodeTypeModificationsOutput) { op := &request.Operation{ - Name: opStartMigration, + Name: opListAllowedNodeTypeModifications, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartMigrationInput{} + input = &ListAllowedNodeTypeModificationsInput{} } - output = &StartMigrationOutput{} + output = &ListAllowedNodeTypeModificationsOutput{} req = c.newRequest(op, input, output) return } -// StartMigration API operation for Amazon ElastiCache. +// ListAllowedNodeTypeModifications API operation for Amazon ElastiCache. // -// Start the migration of data. +// Lists all available node types that you can scale your Redis cluster's or +// replication group's current node type. +// +// When you use the ModifyCacheCluster or ModifyReplicationGroup operations +// to scale your cluster or replication group, the value of the CacheNodeType +// parameter must be one of the node types returned by this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation StartMigration for usage and error information. +// API operation ListAllowedNodeTypeModifications for usage and error information. // // Returned Error Codes: +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. +// // * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" // The specified replication group does not exist. // -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. -// -// * ErrCodeReplicationGroupAlreadyUnderMigrationFault "ReplicationGroupAlreadyUnderMigrationFault" -// The targeted replication group is not available. +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration -func (c *ElastiCache) StartMigration(input *StartMigrationInput) (*StartMigrationOutput, error) { - req, out := c.StartMigrationRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModifications +func (c *ElastiCache) ListAllowedNodeTypeModifications(input *ListAllowedNodeTypeModificationsInput) (*ListAllowedNodeTypeModificationsOutput, error) { + req, out := c.ListAllowedNodeTypeModificationsRequest(input) return out, req.Send() } -// StartMigrationWithContext is the same as StartMigration with the addition of +// ListAllowedNodeTypeModificationsWithContext is the same as ListAllowedNodeTypeModifications with the addition of // the ability to pass a context and additional request options. // -// See StartMigration for details on how to use this API operation. +// See ListAllowedNodeTypeModifications for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) StartMigrationWithContext(ctx aws.Context, input *StartMigrationInput, opts ...request.Option) (*StartMigrationOutput, error) { - req, out := c.StartMigrationRequest(input) +func (c *ElastiCache) ListAllowedNodeTypeModificationsWithContext(ctx aws.Context, input *ListAllowedNodeTypeModificationsInput, opts ...request.Option) (*ListAllowedNodeTypeModificationsOutput, error) { + req, out := c.ListAllowedNodeTypeModificationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTestFailover = "TestFailover" +const opListTagsForResource = "ListTagsForResource" -// TestFailoverRequest generates a "aws/request.Request" representing the -// client's request for the TestFailover operation. The "output" return +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TestFailover for more information on using the TestFailover +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TestFailoverRequest method. -// req, resp := client.TestFailoverRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover -func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *request.Request, output *TestFailoverOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResource +func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) { op := &request.Operation{ - Name: opTestFailover, + Name: opListTagsForResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TestFailoverInput{} + input = &ListTagsForResourceInput{} } - output = &TestFailoverOutput{} + output = &TagListMessage{} req = c.newRequest(op, input, output) return } -// TestFailover API operation for Amazon ElastiCache. +// ListTagsForResource API operation for Amazon ElastiCache. // -// Represents the input of a TestFailover operation which test automatic failover -// on a specified node group (called shard in the console) in a replication -// group (called cluster in the console). +// Lists all cost allocation tags currently on the named resource. A cost allocation +// tag is a key-value pair where the key is case-sensitive and the value is +// optional. You can use cost allocation tags to categorize and track your AWS +// costs. // -// Note the following +// If the cluster is not in the available state, ListTagsForResource returns +// an error. // -// * A customer can use this operation to test automatic failover on up to -// 5 shards (called node groups in the ElastiCache API and AWS CLI) in any -// rolling 24-hour period. +// You can have a maximum of 50 cost allocation tags on an ElastiCache resource. +// For more information, see Monitoring Costs with Tags (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Tagging.html). // -// * If calling this operation on shards in different clusters (called replication -// groups in the API and CLI), the calls can be made concurrently. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// * If calling this operation multiple times on different shards in the -// same Redis (cluster mode enabled) replication group, the first node replacement -// must complete before a subsequent call can be made. +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ListTagsForResource for usage and error information. // -// * To determine whether the node replacement is complete you can check -// Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache -// API. Look for the following automatic failover related events, listed -// here in order of occurrance: Replication group message: Test Failover -// API called for node group Cache cluster message: Failover -// from master node to replica node completed -// Replication group message: Failover from master node -// to replica node completed Cache cluster message: Recovering -// cache nodes Cache cluster message: Finished recovery for cache -// nodes For more information see: Viewing ElastiCache Events (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html) -// in the ElastiCache User Guide DescribeEvents (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html) -// in the ElastiCache API Reference +// Returned Error Codes: +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. // -// Also see, Testing Multi-AZ with Automatic Failover (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html#auto-failover-test) -// in the ElastiCache User Guide. +// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" +// The requested snapshot name does not refer to an existing snapshot. +// +// * ErrCodeInvalidARNFault "InvalidARN" +// The requested Amazon Resource Name (ARN) does not refer to an existing resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResource +func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*TagListMessage, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*TagListMessage, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyCacheCluster = "ModifyCacheCluster" + +// ModifyCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyCacheCluster for more information on using the ModifyCacheCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyCacheClusterRequest method. +// req, resp := client.ModifyCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheCluster +func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) { + op := &request.Operation{ + Name: opModifyCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyCacheClusterInput{} + } + + output = &ModifyCacheClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyCacheCluster API operation for Amazon ElastiCache. +// +// Modifies the settings for a cluster. You can use this operation to change +// one or more cluster configuration parameters by specifying the parameters +// and the new values. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon ElastiCache's -// API operation TestFailover for usage and error information. +// API operation ModifyCacheCluster for usage and error information. // // Returned Error Codes: -// * ErrCodeAPICallRateForCustomerExceededFault "APICallRateForCustomerExceeded" -// The customer has exceeded the allowed rate of API calls. -// // * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" // The requested cluster is not in the available state. // -// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" -// The requested replication group is not in the available state. +// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" +// The current state of the cache security group does not allow deletion. // -// * ErrCodeNodeGroupNotFoundFault "NodeGroupNotFoundFault" -// The node group specified by the NodeGroupId parameter could not be found. -// Please verify that the node group exists and that you spelled the NodeGroupId -// value correctly. +// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" +// The requested cache node type is not available in the specified Availability +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. // -// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" -// The specified replication group does not exist. +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. // -// * ErrCodeTestFailoverNotAvailableFault "TestFailoverNotAvailableFault" -// The TestFailover action is not available. +// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes in a single cluster. // -// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" -// The KMS key supplied is not valid. +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes per customer. +// +// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" +// The requested cache security group name does not refer to an existing cache +// security group. +// +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. // // * ErrCodeInvalidParameterValueException "InvalidParameterValue" // The value for a parameter is invalid. @@ -5579,241 +5818,3542 @@ func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *reques // * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" // Two or more incompatible parameters were specified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover -func (c *ElastiCache) TestFailover(input *TestFailoverInput) (*TestFailoverOutput, error) { - req, out := c.TestFailoverRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheCluster +func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*ModifyCacheClusterOutput, error) { + req, out := c.ModifyCacheClusterRequest(input) return out, req.Send() } -// TestFailoverWithContext is the same as TestFailover with the addition of +// ModifyCacheClusterWithContext is the same as ModifyCacheCluster with the addition of // the ability to pass a context and additional request options. // -// See TestFailover for details on how to use this API operation. +// See ModifyCacheCluster for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElastiCache) TestFailoverWithContext(ctx aws.Context, input *TestFailoverInput, opts ...request.Option) (*TestFailoverOutput, error) { - req, out := c.TestFailoverRequest(input) +func (c *ElastiCache) ModifyCacheClusterWithContext(ctx aws.Context, input *ModifyCacheClusterInput, opts ...request.Option) (*ModifyCacheClusterOutput, error) { + req, out := c.ModifyCacheClusterRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Represents the input of an AddTagsToResource operation. -type AddTagsToResourceInput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the resource to which the tags are to be - // added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster - // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. ElastiCache - // resources are cluster and snapshot. - // - // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS - // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // - // ResourceName is a required field - ResourceName *string `type:"string" required:"true"` +const opModifyCacheParameterGroup = "ModifyCacheParameterGroup" - // A list of cost allocation tags to be added to this resource. A tag is a key-value - // pair. A tag key must be accompanied by a tag value. - // - // Tags is a required field - Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` -} - -// String returns the string representation -func (s AddTagsToResourceInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddTagsToResourceInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddTagsToResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) +// ModifyCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheParameterGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyCacheParameterGroup for more information on using the ModifyCacheParameterGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyCacheParameterGroupRequest method. +// req, resp := client.ModifyCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroup +func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opModifyCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", } - if invalidParams.Len() > 0 { - return invalidParams + if input == nil { + input = &ModifyCacheParameterGroupInput{} } - return nil -} - -// SetResourceName sets the ResourceName field's value. -func (s *AddTagsToResourceInput) SetResourceName(v string) *AddTagsToResourceInput { - s.ResourceName = &v - return s -} -// SetTags sets the Tags field's value. -func (s *AddTagsToResourceInput) SetTags(v []*Tag) *AddTagsToResourceInput { - s.Tags = v - return s + output = &CacheParameterGroupNameMessage{} + req = c.newRequest(op, input, output) + return } -// Represents the input of an AuthorizeCacheSecurityGroupIngress operation. -type AuthorizeCacheSecurityGroupIngressInput struct { - _ struct{} `type:"structure"` - - // The cache security group that allows network ingress. - // - // CacheSecurityGroupName is a required field - CacheSecurityGroupName *string `type:"string" required:"true"` - - // The Amazon EC2 security group to be authorized for ingress to the cache security - // group. - // - // EC2SecurityGroupName is a required field - EC2SecurityGroupName *string `type:"string" required:"true"` - - // The AWS account number of the Amazon EC2 security group owner. Note that - // this is not the same thing as an AWS access key ID - you must provide a valid - // AWS account number for this parameter. - // - // EC2SecurityGroupOwnerId is a required field - EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +// ModifyCacheParameterGroup API operation for Amazon ElastiCache. +// +// Modifies the parameters of a cache parameter group. You can modify up to +// 20 parameters in a single request by submitting a list parameter name and +// value pairs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyCacheParameterGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. +// +// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState" +// The current state of the cache parameter group does not allow the requested +// operation to occur. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroup +func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ModifyCacheParameterGroupRequest(input) + return out, req.Send() } -// String returns the string representation -func (s AuthorizeCacheSecurityGroupIngressInput) String() string { - return awsutil.Prettify(s) +// ModifyCacheParameterGroupWithContext is the same as ModifyCacheParameterGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyCacheParameterGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyCacheParameterGroupWithContext(ctx aws.Context, input *ModifyCacheParameterGroupInput, opts ...request.Option) (*CacheParameterGroupNameMessage, error) { + req, out := c.ModifyCacheParameterGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// GoString returns the string representation -func (s AuthorizeCacheSecurityGroupIngressInput) GoString() string { - return s.String() -} +const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup" -// Validate inspects the fields of the type to determine if they are valid. -func (s *AuthorizeCacheSecurityGroupIngressInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AuthorizeCacheSecurityGroupIngressInput"} - if s.CacheSecurityGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) - } - if s.EC2SecurityGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupName")) - } - if s.EC2SecurityGroupOwnerId == nil { - invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupOwnerId")) +// ModifyCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheSubnetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyCacheSubnetGroup for more information on using the ModifyCacheSubnetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyCacheSubnetGroupRequest method. +// req, resp := client.ModifyCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroup +func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) { + op := &request.Operation{ + Name: opModifyCacheSubnetGroup, + HTTPMethod: "POST", + HTTPPath: "/", } - if invalidParams.Len() > 0 { - return invalidParams + if input == nil { + input = &ModifyCacheSubnetGroupInput{} } - return nil -} -// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. -func (s *AuthorizeCacheSecurityGroupIngressInput) SetCacheSecurityGroupName(v string) *AuthorizeCacheSecurityGroupIngressInput { - s.CacheSecurityGroupName = &v - return s + output = &ModifyCacheSubnetGroupOutput{} + req = c.newRequest(op, input, output) + return } -// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value. -func (s *AuthorizeCacheSecurityGroupIngressInput) SetEC2SecurityGroupName(v string) *AuthorizeCacheSecurityGroupIngressInput { - s.EC2SecurityGroupName = &v - return s +// ModifyCacheSubnetGroup API operation for Amazon ElastiCache. +// +// Modifies an existing cache subnet group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyCacheSubnetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault" +// The requested cache subnet group name does not refer to an existing cache +// subnet group. +// +// * ErrCodeCacheSubnetQuotaExceededFault "CacheSubnetQuotaExceededFault" +// The request cannot be processed because it would exceed the allowed number +// of subnets in a cache subnet group. +// +// * ErrCodeSubnetInUse "SubnetInUse" +// The requested subnet is being used by another cache subnet group. +// +// * ErrCodeInvalidSubnet "InvalidSubnet" +// An invalid subnet identifier was specified. +// +// * ErrCodeSubnetNotAllowedFault "SubnetNotAllowedFault" +// At least one subnet ID does not match the other subnet IDs. This mismatch +// typically occurs when a user sets one subnet ID to a regional Availability +// Zone and a different one to an outpost. Or when a user sets the subnet ID +// to an Outpost when not subscribed on this service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroup +func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) (*ModifyCacheSubnetGroupOutput, error) { + req, out := c.ModifyCacheSubnetGroupRequest(input) + return out, req.Send() } -// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value. -func (s *AuthorizeCacheSecurityGroupIngressInput) SetEC2SecurityGroupOwnerId(v string) *AuthorizeCacheSecurityGroupIngressInput { - s.EC2SecurityGroupOwnerId = &v - return s +// ModifyCacheSubnetGroupWithContext is the same as ModifyCacheSubnetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyCacheSubnetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyCacheSubnetGroupWithContext(ctx aws.Context, input *ModifyCacheSubnetGroupInput, opts ...request.Option) (*ModifyCacheSubnetGroupOutput, error) { + req, out := c.ModifyCacheSubnetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -type AuthorizeCacheSecurityGroupIngressOutput struct { - _ struct{} `type:"structure"` +const opModifyGlobalReplicationGroup = "ModifyGlobalReplicationGroup" - // Represents the output of one of the following operations: +// ModifyGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyGlobalReplicationGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyGlobalReplicationGroup for more information on using the ModifyGlobalReplicationGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyGlobalReplicationGroupRequest method. +// req, resp := client.ModifyGlobalReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyGlobalReplicationGroup +func (c *ElastiCache) ModifyGlobalReplicationGroupRequest(input *ModifyGlobalReplicationGroupInput) (req *request.Request, output *ModifyGlobalReplicationGroupOutput) { + op := &request.Operation{ + Name: opModifyGlobalReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyGlobalReplicationGroupInput{} + } + + output = &ModifyGlobalReplicationGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyGlobalReplicationGroup API operation for Amazon ElastiCache. +// +// Modifies the settings for a Global Datastore. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyGlobalReplicationGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist +// +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyGlobalReplicationGroup +func (c *ElastiCache) ModifyGlobalReplicationGroup(input *ModifyGlobalReplicationGroupInput) (*ModifyGlobalReplicationGroupOutput, error) { + req, out := c.ModifyGlobalReplicationGroupRequest(input) + return out, req.Send() +} + +// ModifyGlobalReplicationGroupWithContext is the same as ModifyGlobalReplicationGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyGlobalReplicationGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyGlobalReplicationGroupWithContext(ctx aws.Context, input *ModifyGlobalReplicationGroupInput, opts ...request.Option) (*ModifyGlobalReplicationGroupOutput, error) { + req, out := c.ModifyGlobalReplicationGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyReplicationGroup = "ModifyReplicationGroup" + +// ModifyReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReplicationGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyReplicationGroup for more information on using the ModifyReplicationGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyReplicationGroupRequest method. +// req, resp := client.ModifyReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroup +func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) { + op := &request.Operation{ + Name: opModifyReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationGroupInput{} + } + + output = &ModifyReplicationGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyReplicationGroup API operation for Amazon ElastiCache. +// +// Modifies the settings for a replication group. +// +// * Scaling for Amazon ElastiCache for Redis (cluster mode enabled) (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/scaling-redis-cluster-mode-enabled.html) +// in the ElastiCache User Guide +// +// * ModifyReplicationGroupShardConfiguration (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyReplicationGroupShardConfiguration.html) +// in the ElastiCache API Reference +// +// This operation is valid for Redis only. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyReplicationGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeInvalidUserGroupStateFault "InvalidUserGroupState" +// The user group is not in an active state. +// +// * ErrCodeUserGroupNotFoundFault "UserGroupNotFound" +// The user group was not found or does not exist +// +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. +// +// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" +// The current state of the cache security group does not allow deletion. +// +// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" +// The requested cache node type is not available in the specified Availability +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. +// +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. +// +// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes in a single cluster. +// +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes per customer. +// +// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" +// The requested cache security group name does not refer to an existing cache +// security group. +// +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. +// +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroup +func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) (*ModifyReplicationGroupOutput, error) { + req, out := c.ModifyReplicationGroupRequest(input) + return out, req.Send() +} + +// ModifyReplicationGroupWithContext is the same as ModifyReplicationGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyReplicationGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyReplicationGroupWithContext(ctx aws.Context, input *ModifyReplicationGroupInput, opts ...request.Option) (*ModifyReplicationGroupOutput, error) { + req, out := c.ModifyReplicationGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyReplicationGroupShardConfiguration = "ModifyReplicationGroupShardConfiguration" + +// ModifyReplicationGroupShardConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReplicationGroupShardConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyReplicationGroupShardConfiguration for more information on using the ModifyReplicationGroupShardConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyReplicationGroupShardConfigurationRequest method. +// req, resp := client.ModifyReplicationGroupShardConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroupShardConfiguration +func (c *ElastiCache) ModifyReplicationGroupShardConfigurationRequest(input *ModifyReplicationGroupShardConfigurationInput) (req *request.Request, output *ModifyReplicationGroupShardConfigurationOutput) { + op := &request.Operation{ + Name: opModifyReplicationGroupShardConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyReplicationGroupShardConfigurationInput{} + } + + output = &ModifyReplicationGroupShardConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyReplicationGroupShardConfiguration API operation for Amazon ElastiCache. +// +// Modifies a replication group's shards (node groups) by allowing you to add +// shards, remove shards, or rebalance the keyspaces among exisiting shards. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyReplicationGroupShardConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. +// +// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault" +// The VPC network is in an invalid state. +// +// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity" +// The requested cache node type is not available in the specified Availability +// Zone. For more information, see InsufficientCacheClusterCapacity (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ErrorMessages.html#ErrorMessages.INSUFFICIENT_CACHE_CLUSTER_CAPACITY) +// in the ElastiCache User Guide. +// +// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded" +// The request cannot be processed because it would exceed the maximum allowed +// number of node groups (shards) in a single replication group. The default +// maximum is 90 +// +// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded" +// The request cannot be processed because it would exceed the allowed number +// of cache nodes per customer. +// +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroupShardConfiguration +func (c *ElastiCache) ModifyReplicationGroupShardConfiguration(input *ModifyReplicationGroupShardConfigurationInput) (*ModifyReplicationGroupShardConfigurationOutput, error) { + req, out := c.ModifyReplicationGroupShardConfigurationRequest(input) + return out, req.Send() +} + +// ModifyReplicationGroupShardConfigurationWithContext is the same as ModifyReplicationGroupShardConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyReplicationGroupShardConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyReplicationGroupShardConfigurationWithContext(ctx aws.Context, input *ModifyReplicationGroupShardConfigurationInput, opts ...request.Option) (*ModifyReplicationGroupShardConfigurationOutput, error) { + req, out := c.ModifyReplicationGroupShardConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyUser = "ModifyUser" + +// ModifyUserRequest generates a "aws/request.Request" representing the +// client's request for the ModifyUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyUser for more information on using the ModifyUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyUserRequest method. +// req, resp := client.ModifyUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyUser +func (c *ElastiCache) ModifyUserRequest(input *ModifyUserInput) (req *request.Request, output *ModifyUserOutput) { + op := &request.Operation{ + Name: opModifyUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyUserInput{} + } + + output = &ModifyUserOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyUser API operation for Amazon ElastiCache. +// +// Changes user password(s) and/or access string. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUserNotFoundFault "UserNotFound" +// The user does not exist or could not be found. +// +// * ErrCodeInvalidUserStateFault "InvalidUserState" +// The user is not in active state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyUser +func (c *ElastiCache) ModifyUser(input *ModifyUserInput) (*ModifyUserOutput, error) { + req, out := c.ModifyUserRequest(input) + return out, req.Send() +} + +// ModifyUserWithContext is the same as ModifyUser with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyUserWithContext(ctx aws.Context, input *ModifyUserInput, opts ...request.Option) (*ModifyUserOutput, error) { + req, out := c.ModifyUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opModifyUserGroup = "ModifyUserGroup" + +// ModifyUserGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyUserGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ModifyUserGroup for more information on using the ModifyUserGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ModifyUserGroupRequest method. +// req, resp := client.ModifyUserGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyUserGroup +func (c *ElastiCache) ModifyUserGroupRequest(input *ModifyUserGroupInput) (req *request.Request, output *ModifyUserGroupOutput) { + op := &request.Operation{ + Name: opModifyUserGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyUserGroupInput{} + } + + output = &ModifyUserGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ModifyUserGroup API operation for Amazon ElastiCache. +// +// Changes the list of users that belong to the user group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ModifyUserGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUserGroupNotFoundFault "UserGroupNotFound" +// The user group was not found or does not exist +// +// * ErrCodeUserNotFoundFault "UserNotFound" +// The user does not exist or could not be found. +// +// * ErrCodeDuplicateUserNameFault "DuplicateUserName" +// A user with this username already exists. +// +// * ErrCodeDefaultUserRequired "DefaultUserRequired" +// You must add default user to a user group. +// +// * ErrCodeInvalidUserGroupStateFault "InvalidUserGroupState" +// The user group is not in an active state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyUserGroup +func (c *ElastiCache) ModifyUserGroup(input *ModifyUserGroupInput) (*ModifyUserGroupOutput, error) { + req, out := c.ModifyUserGroupRequest(input) + return out, req.Send() +} + +// ModifyUserGroupWithContext is the same as ModifyUserGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyUserGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ModifyUserGroupWithContext(ctx aws.Context, input *ModifyUserGroupInput, opts ...request.Option) (*ModifyUserGroupOutput, error) { + req, out := c.ModifyUserGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering" + +// PurchaseReservedCacheNodesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedCacheNodesOffering operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PurchaseReservedCacheNodesOffering for more information on using the PurchaseReservedCacheNodesOffering +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PurchaseReservedCacheNodesOfferingRequest method. +// req, resp := client.PurchaseReservedCacheNodesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOffering +func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedCacheNodesOffering, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseReservedCacheNodesOfferingInput{} + } + + output = &PurchaseReservedCacheNodesOfferingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PurchaseReservedCacheNodesOffering API operation for Amazon ElastiCache. +// +// Allows you to purchase a reserved cache node offering. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation PurchaseReservedCacheNodesOffering for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReservedCacheNodesOfferingNotFoundFault "ReservedCacheNodesOfferingNotFound" +// The requested cache node offering does not exist. +// +// * ErrCodeReservedCacheNodeAlreadyExistsFault "ReservedCacheNodeAlreadyExists" +// You already have a reservation with the given identifier. +// +// * ErrCodeReservedCacheNodeQuotaExceededFault "ReservedCacheNodeQuotaExceeded" +// The request cannot be processed because it would exceed the user's cache +// node quota. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOffering +func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReservedCacheNodesOfferingInput) (*PurchaseReservedCacheNodesOfferingOutput, error) { + req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) + return out, req.Send() +} + +// PurchaseReservedCacheNodesOfferingWithContext is the same as PurchaseReservedCacheNodesOffering with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseReservedCacheNodesOffering for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) PurchaseReservedCacheNodesOfferingWithContext(ctx aws.Context, input *PurchaseReservedCacheNodesOfferingInput, opts ...request.Option) (*PurchaseReservedCacheNodesOfferingOutput, error) { + req, out := c.PurchaseReservedCacheNodesOfferingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRebalanceSlotsInGlobalReplicationGroup = "RebalanceSlotsInGlobalReplicationGroup" + +// RebalanceSlotsInGlobalReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the RebalanceSlotsInGlobalReplicationGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RebalanceSlotsInGlobalReplicationGroup for more information on using the RebalanceSlotsInGlobalReplicationGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RebalanceSlotsInGlobalReplicationGroupRequest method. +// req, resp := client.RebalanceSlotsInGlobalReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebalanceSlotsInGlobalReplicationGroup +func (c *ElastiCache) RebalanceSlotsInGlobalReplicationGroupRequest(input *RebalanceSlotsInGlobalReplicationGroupInput) (req *request.Request, output *RebalanceSlotsInGlobalReplicationGroupOutput) { + op := &request.Operation{ + Name: opRebalanceSlotsInGlobalReplicationGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebalanceSlotsInGlobalReplicationGroupInput{} + } + + output = &RebalanceSlotsInGlobalReplicationGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// RebalanceSlotsInGlobalReplicationGroup API operation for Amazon ElastiCache. +// +// Redistribute slots to ensure uniform distribution across existing shards +// in the cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation RebalanceSlotsInGlobalReplicationGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeGlobalReplicationGroupNotFoundFault "GlobalReplicationGroupNotFoundFault" +// The Global Datastore does not exist +// +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebalanceSlotsInGlobalReplicationGroup +func (c *ElastiCache) RebalanceSlotsInGlobalReplicationGroup(input *RebalanceSlotsInGlobalReplicationGroupInput) (*RebalanceSlotsInGlobalReplicationGroupOutput, error) { + req, out := c.RebalanceSlotsInGlobalReplicationGroupRequest(input) + return out, req.Send() +} + +// RebalanceSlotsInGlobalReplicationGroupWithContext is the same as RebalanceSlotsInGlobalReplicationGroup with the addition of +// the ability to pass a context and additional request options. +// +// See RebalanceSlotsInGlobalReplicationGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) RebalanceSlotsInGlobalReplicationGroupWithContext(ctx aws.Context, input *RebalanceSlotsInGlobalReplicationGroupInput, opts ...request.Option) (*RebalanceSlotsInGlobalReplicationGroupOutput, error) { + req, out := c.RebalanceSlotsInGlobalReplicationGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRebootCacheCluster = "RebootCacheCluster" + +// RebootCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the RebootCacheCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RebootCacheCluster for more information on using the RebootCacheCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RebootCacheClusterRequest method. +// req, resp := client.RebootCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheCluster +func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) { + op := &request.Operation{ + Name: opRebootCacheCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RebootCacheClusterInput{} + } + + output = &RebootCacheClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// RebootCacheCluster API operation for Amazon ElastiCache. +// +// Reboots some, or all, of the cache nodes within a provisioned cluster. This +// operation applies any modified cache parameter groups to the cluster. The +// reboot operation takes place as soon as possible, and results in a momentary +// outage to the cluster. During the reboot, the cluster status is set to REBOOTING. +// +// The reboot causes the contents of the cache (for each cache node being rebooted) +// to be lost. +// +// When the reboot is complete, a cluster event is created. +// +// Rebooting a cluster is currently supported on Memcached and Redis (cluster +// mode disabled) clusters. Rebooting is not supported on Redis (cluster mode +// enabled) clusters. +// +// If you make changes to parameters that require a Redis (cluster mode enabled) +// cluster reboot for the changes to be applied, see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html) +// for an alternate process. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation RebootCacheCluster for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. +// +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheCluster +func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*RebootCacheClusterOutput, error) { + req, out := c.RebootCacheClusterRequest(input) + return out, req.Send() +} + +// RebootCacheClusterWithContext is the same as RebootCacheCluster with the addition of +// the ability to pass a context and additional request options. +// +// See RebootCacheCluster for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) RebootCacheClusterWithContext(ctx aws.Context, input *RebootCacheClusterInput, opts ...request.Option) (*RebootCacheClusterOutput, error) { + req, out := c.RebootCacheClusterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveTagsFromResource for more information on using the RemoveTagsFromResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResource +func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + output = &TagListMessage{} + req = c.newRequest(op, input, output) + return +} + +// RemoveTagsFromResource API operation for Amazon ElastiCache. +// +// Removes the tags identified by the TagKeys list from the named resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation RemoveTagsFromResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound" +// The requested cluster ID does not refer to an existing cluster. +// +// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault" +// The requested snapshot name does not refer to an existing snapshot. +// +// * ErrCodeInvalidARNFault "InvalidARN" +// The requested Amazon Resource Name (ARN) does not refer to an existing resource. +// +// * ErrCodeTagNotFoundFault "TagNotFound" +// The requested tag was not found on this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResource +func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*TagListMessage, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + return out, req.Send() +} + +// RemoveTagsFromResourceWithContext is the same as RemoveTagsFromResource with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveTagsFromResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) RemoveTagsFromResourceWithContext(ctx aws.Context, input *RemoveTagsFromResourceInput, opts ...request.Option) (*TagListMessage, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetCacheParameterGroup = "ResetCacheParameterGroup" + +// ResetCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetCacheParameterGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetCacheParameterGroup for more information on using the ResetCacheParameterGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetCacheParameterGroupRequest method. +// req, resp := client.ResetCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroup +func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { + op := &request.Operation{ + Name: opResetCacheParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetCacheParameterGroupInput{} + } + + output = &CacheParameterGroupNameMessage{} + req = c.newRequest(op, input, output) + return +} + +// ResetCacheParameterGroup API operation for Amazon ElastiCache. +// +// Modifies the parameters of a cache parameter group to the engine or system +// default value. You can reset specific parameters by submitting a list of +// parameter names. To reset the entire cache parameter group, specify the ResetAllParameters +// and CacheParameterGroupName parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation ResetCacheParameterGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState" +// The current state of the cache parameter group does not allow the requested +// operation to occur. +// +// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound" +// The requested cache parameter group name does not refer to an existing cache +// parameter group. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// * ErrCodeInvalidGlobalReplicationGroupStateFault "InvalidGlobalReplicationGroupState" +// The Global Datastore is not available or in primary-only state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroup +func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) { + req, out := c.ResetCacheParameterGroupRequest(input) + return out, req.Send() +} + +// ResetCacheParameterGroupWithContext is the same as ResetCacheParameterGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ResetCacheParameterGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) ResetCacheParameterGroupWithContext(ctx aws.Context, input *ResetCacheParameterGroupInput, opts ...request.Option) (*CacheParameterGroupNameMessage, error) { + req, out := c.ResetCacheParameterGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress" + +// RevokeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeCacheSecurityGroupIngress operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RevokeCacheSecurityGroupIngress for more information on using the RevokeCacheSecurityGroupIngress +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RevokeCacheSecurityGroupIngressRequest method. +// req, resp := client.RevokeCacheSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngress +func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) { + op := &request.Operation{ + Name: opRevokeCacheSecurityGroupIngress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RevokeCacheSecurityGroupIngressInput{} + } + + output = &RevokeCacheSecurityGroupIngressOutput{} + req = c.newRequest(op, input, output) + return +} + +// RevokeCacheSecurityGroupIngress API operation for Amazon ElastiCache. +// +// Revokes ingress from a cache security group. Use this operation to disallow +// access from an Amazon EC2 security group that had been previously authorized. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation RevokeCacheSecurityGroupIngress for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound" +// The requested cache security group name does not refer to an existing cache +// security group. +// +// * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" +// The specified Amazon EC2 security group is not authorized for the specified +// cache security group. +// +// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState" +// The current state of the cache security group does not allow deletion. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngress +func (c *ElastiCache) RevokeCacheSecurityGroupIngress(input *RevokeCacheSecurityGroupIngressInput) (*RevokeCacheSecurityGroupIngressOutput, error) { + req, out := c.RevokeCacheSecurityGroupIngressRequest(input) + return out, req.Send() +} + +// RevokeCacheSecurityGroupIngressWithContext is the same as RevokeCacheSecurityGroupIngress with the addition of +// the ability to pass a context and additional request options. +// +// See RevokeCacheSecurityGroupIngress for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context, input *RevokeCacheSecurityGroupIngressInput, opts ...request.Option) (*RevokeCacheSecurityGroupIngressOutput, error) { + req, out := c.RevokeCacheSecurityGroupIngressRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartMigration = "StartMigration" + +// StartMigrationRequest generates a "aws/request.Request" representing the +// client's request for the StartMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMigration for more information on using the StartMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartMigrationRequest method. +// req, resp := client.StartMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigrationRequest(input *StartMigrationInput) (req *request.Request, output *StartMigrationOutput) { + op := &request.Operation{ + Name: opStartMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartMigrationInput{} + } + + output = &StartMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartMigration API operation for Amazon ElastiCache. +// +// Start the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation StartMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupAlreadyUnderMigrationFault "ReplicationGroupAlreadyUnderMigrationFault" +// The targeted replication group is not available. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigration(input *StartMigrationInput) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + return out, req.Send() +} + +// StartMigrationWithContext is the same as StartMigration with the addition of +// the ability to pass a context and additional request options. +// +// See StartMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) StartMigrationWithContext(ctx aws.Context, input *StartMigrationInput, opts ...request.Option) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTestFailover = "TestFailover" + +// TestFailoverRequest generates a "aws/request.Request" representing the +// client's request for the TestFailover operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TestFailover for more information on using the TestFailover +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TestFailoverRequest method. +// req, resp := client.TestFailoverRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover +func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *request.Request, output *TestFailoverOutput) { + op := &request.Operation{ + Name: opTestFailover, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TestFailoverInput{} + } + + output = &TestFailoverOutput{} + req = c.newRequest(op, input, output) + return +} + +// TestFailover API operation for Amazon ElastiCache. +// +// Represents the input of a TestFailover operation which test automatic failover +// on a specified node group (called shard in the console) in a replication +// group (called cluster in the console). +// +// Note the following +// +// * A customer can use this operation to test automatic failover on up to +// 5 shards (called node groups in the ElastiCache API and AWS CLI) in any +// rolling 24-hour period. +// +// * If calling this operation on shards in different clusters (called replication +// groups in the API and CLI), the calls can be made concurrently. +// +// * If calling this operation multiple times on different shards in the +// same Redis (cluster mode enabled) replication group, the first node replacement +// must complete before a subsequent call can be made. +// +// * To determine whether the node replacement is complete you can check +// Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache +// API. Look for the following automatic failover related events, listed +// here in order of occurrance: Replication group message: Test Failover +// API called for node group Cache cluster message: Failover +// from master node to replica node completed +// Replication group message: Failover from master node +// to replica node completed Cache cluster message: Recovering +// cache nodes Cache cluster message: Finished recovery for cache +// nodes For more information see: Viewing ElastiCache Events (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/ECEvents.Viewing.html) +// in the ElastiCache User Guide DescribeEvents (https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html) +// in the ElastiCache API Reference +// +// Also see, Testing Multi-AZ (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html#auto-failover-test) +// in the ElastiCache User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation TestFailover for usage and error information. +// +// Returned Error Codes: +// * ErrCodeAPICallRateForCustomerExceededFault "APICallRateForCustomerExceeded" +// The customer has exceeded the allowed rate of API calls. +// +// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState" +// The requested cluster is not in the available state. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeNodeGroupNotFoundFault "NodeGroupNotFoundFault" +// The node group specified by the NodeGroupId parameter could not be found. +// Please verify that the node group exists and that you spelled the NodeGroupId +// value correctly. +// +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeTestFailoverNotAvailableFault "TestFailoverNotAvailableFault" +// The TestFailover action is not available. +// +// * ErrCodeInvalidKMSKeyFault "InvalidKMSKeyFault" +// The KMS key supplied is not valid. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination" +// Two or more incompatible parameters were specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover +func (c *ElastiCache) TestFailover(input *TestFailoverInput) (*TestFailoverOutput, error) { + req, out := c.TestFailoverRequest(input) + return out, req.Send() +} + +// TestFailoverWithContext is the same as TestFailover with the addition of +// the ability to pass a context and additional request options. +// +// See TestFailover for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) TestFailoverWithContext(ctx aws.Context, input *TestFailoverInput, opts ...request.Option) (*TestFailoverOutput, error) { + req, out := c.TestFailoverRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Represents the input of an AddTagsToResource operation. +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource to which the tags are to be + // added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster + // or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot. ElastiCache + // resources are cluster and snapshot. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // + // ResourceName is a required field + ResourceName *string `type:"string" required:"true"` + + // A list of cost allocation tags to be added to this resource. A tag is a key-value + // pair. A tag key must be accompanied by a tag value. + // + // Tags is a required field + Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceName sets the ResourceName field's value. +func (s *AddTagsToResourceInput) SetResourceName(v string) *AddTagsToResourceInput { + s.ResourceName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AddTagsToResourceInput) SetTags(v []*Tag) *AddTagsToResourceInput { + s.Tags = v + return s +} + +// Indicates whether the user requires a password to authenticate. +type Authentication struct { + _ struct{} `type:"structure"` + + // The number of passwords belonging to the user. The maximum is two. + PasswordCount *int64 `type:"integer"` + + // Indicates whether the user requires a password to authenticate. + Type *string `type:"string" enum:"AuthenticationType"` +} + +// String returns the string representation +func (s Authentication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Authentication) GoString() string { + return s.String() +} + +// SetPasswordCount sets the PasswordCount field's value. +func (s *Authentication) SetPasswordCount(v int64) *Authentication { + s.PasswordCount = &v + return s +} + +// SetType sets the Type field's value. +func (s *Authentication) SetType(v string) *Authentication { + s.Type = &v + return s +} + +// Represents the input of an AuthorizeCacheSecurityGroupIngress operation. +type AuthorizeCacheSecurityGroupIngressInput struct { + _ struct{} `type:"structure"` + + // The cache security group that allows network ingress. + // + // CacheSecurityGroupName is a required field + CacheSecurityGroupName *string `type:"string" required:"true"` + + // The Amazon EC2 security group to be authorized for ingress to the cache security + // group. + // + // EC2SecurityGroupName is a required field + EC2SecurityGroupName *string `type:"string" required:"true"` + + // The AWS account number of the Amazon EC2 security group owner. Note that + // this is not the same thing as an AWS access key ID - you must provide a valid + // AWS account number for this parameter. + // + // EC2SecurityGroupOwnerId is a required field + EC2SecurityGroupOwnerId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthorizeCacheSecurityGroupIngressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthorizeCacheSecurityGroupIngressInput"} + if s.CacheSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) + } + if s.EC2SecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupName")) + } + if s.EC2SecurityGroupOwnerId == nil { + invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupOwnerId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. +func (s *AuthorizeCacheSecurityGroupIngressInput) SetCacheSecurityGroupName(v string) *AuthorizeCacheSecurityGroupIngressInput { + s.CacheSecurityGroupName = &v + return s +} + +// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value. +func (s *AuthorizeCacheSecurityGroupIngressInput) SetEC2SecurityGroupName(v string) *AuthorizeCacheSecurityGroupIngressInput { + s.EC2SecurityGroupName = &v + return s +} + +// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value. +func (s *AuthorizeCacheSecurityGroupIngressInput) SetEC2SecurityGroupOwnerId(v string) *AuthorizeCacheSecurityGroupIngressInput { + s.EC2SecurityGroupOwnerId = &v + return s +} + +type AuthorizeCacheSecurityGroupIngressOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following operations: + // + // * AuthorizeCacheSecurityGroupIngress + // + // * CreateCacheSecurityGroup + // + // * RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` +} + +// String returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuthorizeCacheSecurityGroupIngressOutput) GoString() string { + return s.String() +} + +// SetCacheSecurityGroup sets the CacheSecurityGroup field's value. +func (s *AuthorizeCacheSecurityGroupIngressOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *AuthorizeCacheSecurityGroupIngressOutput { + s.CacheSecurityGroup = v + return s +} + +// Describes an Availability Zone in which the cluster is launched. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the Availability Zone. + Name *string `type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { + s.Name = &v + return s +} + +type BatchApplyUpdateActionInput struct { + _ struct{} `type:"structure"` + + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The replication group IDs + ReplicationGroupIds []*string `type:"list"` + + // The unique ID of the service update + // + // ServiceUpdateName is a required field + ServiceUpdateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchApplyUpdateActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchApplyUpdateActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchApplyUpdateActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchApplyUpdateActionInput"} + if s.ServiceUpdateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *BatchApplyUpdateActionInput) SetCacheClusterIds(v []*string) *BatchApplyUpdateActionInput { + s.CacheClusterIds = v + return s +} + +// SetReplicationGroupIds sets the ReplicationGroupIds field's value. +func (s *BatchApplyUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchApplyUpdateActionInput { + s.ReplicationGroupIds = v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *BatchApplyUpdateActionInput) SetServiceUpdateName(v string) *BatchApplyUpdateActionInput { + s.ServiceUpdateName = &v + return s +} + +type BatchApplyUpdateActionOutput struct { + _ struct{} `type:"structure"` + + // Update actions that have been processed successfully + ProcessedUpdateActions []*ProcessedUpdateAction `locationNameList:"ProcessedUpdateAction" type:"list"` + + // Update actions that haven't been processed successfully + UnprocessedUpdateActions []*UnprocessedUpdateAction `locationNameList:"UnprocessedUpdateAction" type:"list"` +} + +// String returns the string representation +func (s BatchApplyUpdateActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchApplyUpdateActionOutput) GoString() string { + return s.String() +} + +// SetProcessedUpdateActions sets the ProcessedUpdateActions field's value. +func (s *BatchApplyUpdateActionOutput) SetProcessedUpdateActions(v []*ProcessedUpdateAction) *BatchApplyUpdateActionOutput { + s.ProcessedUpdateActions = v + return s +} + +// SetUnprocessedUpdateActions sets the UnprocessedUpdateActions field's value. +func (s *BatchApplyUpdateActionOutput) SetUnprocessedUpdateActions(v []*UnprocessedUpdateAction) *BatchApplyUpdateActionOutput { + s.UnprocessedUpdateActions = v + return s +} + +type BatchStopUpdateActionInput struct { + _ struct{} `type:"structure"` + + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The replication group IDs + ReplicationGroupIds []*string `type:"list"` + + // The unique ID of the service update + // + // ServiceUpdateName is a required field + ServiceUpdateName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchStopUpdateActionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopUpdateActionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchStopUpdateActionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchStopUpdateActionInput"} + if s.ServiceUpdateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *BatchStopUpdateActionInput) SetCacheClusterIds(v []*string) *BatchStopUpdateActionInput { + s.CacheClusterIds = v + return s +} + +// SetReplicationGroupIds sets the ReplicationGroupIds field's value. +func (s *BatchStopUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchStopUpdateActionInput { + s.ReplicationGroupIds = v + return s +} + +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *BatchStopUpdateActionInput) SetServiceUpdateName(v string) *BatchStopUpdateActionInput { + s.ServiceUpdateName = &v + return s +} + +type BatchStopUpdateActionOutput struct { + _ struct{} `type:"structure"` + + // Update actions that have been processed successfully + ProcessedUpdateActions []*ProcessedUpdateAction `locationNameList:"ProcessedUpdateAction" type:"list"` + + // Update actions that haven't been processed successfully + UnprocessedUpdateActions []*UnprocessedUpdateAction `locationNameList:"UnprocessedUpdateAction" type:"list"` +} + +// String returns the string representation +func (s BatchStopUpdateActionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopUpdateActionOutput) GoString() string { + return s.String() +} + +// SetProcessedUpdateActions sets the ProcessedUpdateActions field's value. +func (s *BatchStopUpdateActionOutput) SetProcessedUpdateActions(v []*ProcessedUpdateAction) *BatchStopUpdateActionOutput { + s.ProcessedUpdateActions = v + return s +} + +// SetUnprocessedUpdateActions sets the UnprocessedUpdateActions field's value. +func (s *BatchStopUpdateActionOutput) SetUnprocessedUpdateActions(v []*UnprocessedUpdateAction) *BatchStopUpdateActionOutput { + s.UnprocessedUpdateActions = v + return s +} + +// Contains all of the attributes of a specific cluster. +type CacheCluster struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the cache cluster. + ARN *string `type:"string"` + + // A flag that enables encryption at-rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the cluster + // is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled + // to true when you create a cluster. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using redis version 3.2.6, 4.x or later. + // + // Default: false + AtRestEncryptionEnabled *bool `type:"boolean"` + + // A flag that enables using an AuthToken (password) when issuing Redis commands. + // + // Default: false + AuthTokenEnabled *bool `type:"boolean"` + + // The date the auth token was last modified + AuthTokenLastModifiedDate *time.Time `type:"timestamp"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The date and time when the cluster was created. + CacheClusterCreateTime *time.Time `type:"timestamp"` + + // The user-supplied identifier of the cluster. This identifier is a unique + // key that identifies a cluster. + CacheClusterId *string `type:"string"` + + // The current state of this cluster, one of the following values: available, + // creating, deleted, deleting, incompatible-network, modifying, rebooting cluster + // nodes, restore-failed, or snapshotting. + CacheClusterStatus *string `type:"string"` + + // The name of the compute and memory capacity node type for the cluster. + // + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // * All current generation instance types are created in Amazon VPC by default. + // + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. + CacheNodeType *string `type:"string"` + + // A list of cache nodes that are members of the cluster. + CacheNodes []*CacheNode `locationNameList:"CacheNode" type:"list"` + + // Status of the cache parameter group. + CacheParameterGroup *CacheParameterGroupStatus `type:"structure"` + + // A list of cache security group elements, composed of name and status sub-elements. + CacheSecurityGroups []*CacheSecurityGroupMembership `locationNameList:"CacheSecurityGroup" type:"list"` + + // The name of the cache subnet group associated with the cluster. + CacheSubnetGroupName *string `type:"string"` + + // The URL of the web page where you can download the latest ElastiCache client + // library. + ClientDownloadLandingPage *string `type:"string"` + + // Represents a Memcached cluster endpoint which, if Automatic Discovery is + // enabled on the cluster, can be used by an application to connect to any node + // in the cluster. The configuration endpoint will always have .cfg in it. + // + // Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 + ConfigurationEndpoint *Endpoint `type:"structure"` + + // The name of the cache engine (memcached or redis) to be used for this cluster. + Engine *string `type:"string"` + + // The version of the cache engine that is used in this cluster. + EngineVersion *string `type:"string"` + + // Describes a notification topic and its status. Notification topics are used + // for publishing ElastiCache events to subscribers using Amazon Simple Notification + // Service (SNS). + NotificationConfiguration *NotificationConfiguration `type:"structure"` + + // The number of cache nodes in the cluster. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + NumCacheNodes *int64 `type:"integer"` + + // A group of settings that are applied to the cluster in the future, or that + // are currently being applied. + PendingModifiedValues *PendingModifiedValues `type:"structure"` + + // The name of the Availability Zone in which the cluster is located or "Multiple" + // if the cache nodes are located in different Availability Zones. + PreferredAvailabilityZone *string `type:"string"` + + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. + // + // Valid values for ddd are: + // + // * sun + // + // * mon + // + // * tue + // + // * wed + // + // * thu + // + // * fri + // + // * sat + // + // Example: sun:23:00-mon:01:30 + PreferredMaintenanceWindow *string `type:"string"` + + // The outpost ARN in which the cache cluster is created. + PreferredOutpostArn *string `type:"string"` + + // The replication group to which this cluster belongs. If this field is empty, + // the cluster is not associated with any replication group. + ReplicationGroupId *string `type:"string"` + + // A list of VPC Security Groups associated with the cluster. + SecurityGroups []*SecurityGroupMembership `type:"list"` + + // The number of days for which ElastiCache retains automatic cluster snapshots + // before deleting them. For example, if you set SnapshotRetentionLimit to 5, + // a snapshot that was taken today is retained for 5 days before being deleted. + // + // If the value of SnapshotRetentionLimit is set to zero (0), backups are turned + // off. + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache begins taking a daily + // snapshot of your cluster. + // + // Example: 05:00-09:00 + SnapshotWindow *string `type:"string"` + + // A flag that enables in-transit encryption when set to true. + // + // You cannot modify the value of TransitEncryptionEnabled after the cluster + // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled + // to true when you create a cluster. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using redis version 3.2.6, 4.x or later. + // + // Default: false + TransitEncryptionEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s CacheCluster) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheCluster) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *CacheCluster) SetARN(v string) *CacheCluster { + s.ARN = &v + return s +} + +// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. +func (s *CacheCluster) SetAtRestEncryptionEnabled(v bool) *CacheCluster { + s.AtRestEncryptionEnabled = &v + return s +} + +// SetAuthTokenEnabled sets the AuthTokenEnabled field's value. +func (s *CacheCluster) SetAuthTokenEnabled(v bool) *CacheCluster { + s.AuthTokenEnabled = &v + return s +} + +// SetAuthTokenLastModifiedDate sets the AuthTokenLastModifiedDate field's value. +func (s *CacheCluster) SetAuthTokenLastModifiedDate(v time.Time) *CacheCluster { + s.AuthTokenLastModifiedDate = &v + return s +} + +// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. +func (s *CacheCluster) SetAutoMinorVersionUpgrade(v bool) *CacheCluster { + s.AutoMinorVersionUpgrade = &v + return s +} + +// SetCacheClusterCreateTime sets the CacheClusterCreateTime field's value. +func (s *CacheCluster) SetCacheClusterCreateTime(v time.Time) *CacheCluster { + s.CacheClusterCreateTime = &v + return s +} + +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *CacheCluster) SetCacheClusterId(v string) *CacheCluster { + s.CacheClusterId = &v + return s +} + +// SetCacheClusterStatus sets the CacheClusterStatus field's value. +func (s *CacheCluster) SetCacheClusterStatus(v string) *CacheCluster { + s.CacheClusterStatus = &v + return s +} + +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *CacheCluster) SetCacheNodeType(v string) *CacheCluster { + s.CacheNodeType = &v + return s +} + +// SetCacheNodes sets the CacheNodes field's value. +func (s *CacheCluster) SetCacheNodes(v []*CacheNode) *CacheCluster { + s.CacheNodes = v + return s +} + +// SetCacheParameterGroup sets the CacheParameterGroup field's value. +func (s *CacheCluster) SetCacheParameterGroup(v *CacheParameterGroupStatus) *CacheCluster { + s.CacheParameterGroup = v + return s +} + +// SetCacheSecurityGroups sets the CacheSecurityGroups field's value. +func (s *CacheCluster) SetCacheSecurityGroups(v []*CacheSecurityGroupMembership) *CacheCluster { + s.CacheSecurityGroups = v + return s +} + +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *CacheCluster) SetCacheSubnetGroupName(v string) *CacheCluster { + s.CacheSubnetGroupName = &v + return s +} + +// SetClientDownloadLandingPage sets the ClientDownloadLandingPage field's value. +func (s *CacheCluster) SetClientDownloadLandingPage(v string) *CacheCluster { + s.ClientDownloadLandingPage = &v + return s +} + +// SetConfigurationEndpoint sets the ConfigurationEndpoint field's value. +func (s *CacheCluster) SetConfigurationEndpoint(v *Endpoint) *CacheCluster { + s.ConfigurationEndpoint = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *CacheCluster) SetEngine(v string) *CacheCluster { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *CacheCluster) SetEngineVersion(v string) *CacheCluster { + s.EngineVersion = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *CacheCluster) SetNotificationConfiguration(v *NotificationConfiguration) *CacheCluster { + s.NotificationConfiguration = v + return s +} + +// SetNumCacheNodes sets the NumCacheNodes field's value. +func (s *CacheCluster) SetNumCacheNodes(v int64) *CacheCluster { + s.NumCacheNodes = &v + return s +} + +// SetPendingModifiedValues sets the PendingModifiedValues field's value. +func (s *CacheCluster) SetPendingModifiedValues(v *PendingModifiedValues) *CacheCluster { + s.PendingModifiedValues = v + return s +} + +// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value. +func (s *CacheCluster) SetPreferredAvailabilityZone(v string) *CacheCluster { + s.PreferredAvailabilityZone = &v + return s +} + +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *CacheCluster) SetPreferredMaintenanceWindow(v string) *CacheCluster { + s.PreferredMaintenanceWindow = &v + return s +} + +// SetPreferredOutpostArn sets the PreferredOutpostArn field's value. +func (s *CacheCluster) SetPreferredOutpostArn(v string) *CacheCluster { + s.PreferredOutpostArn = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CacheCluster) SetReplicationGroupId(v string) *CacheCluster { + s.ReplicationGroupId = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *CacheCluster) SetSecurityGroups(v []*SecurityGroupMembership) *CacheCluster { + s.SecurityGroups = v + return s +} + +// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value. +func (s *CacheCluster) SetSnapshotRetentionLimit(v int64) *CacheCluster { + s.SnapshotRetentionLimit = &v + return s +} + +// SetSnapshotWindow sets the SnapshotWindow field's value. +func (s *CacheCluster) SetSnapshotWindow(v string) *CacheCluster { + s.SnapshotWindow = &v + return s +} + +// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. +func (s *CacheCluster) SetTransitEncryptionEnabled(v bool) *CacheCluster { + s.TransitEncryptionEnabled = &v + return s +} + +// Provides all of the details about a particular cache engine version. +type CacheEngineVersion struct { + _ struct{} `type:"structure"` + + // The description of the cache engine. + CacheEngineDescription *string `type:"string"` + + // The description of the cache engine version. + CacheEngineVersionDescription *string `type:"string"` + + // The name of the cache parameter group family associated with this cache engine. + // + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache engine. + Engine *string `type:"string"` + + // The version number of the cache engine. + EngineVersion *string `type:"string"` +} + +// String returns the string representation +func (s CacheEngineVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheEngineVersion) GoString() string { + return s.String() +} + +// SetCacheEngineDescription sets the CacheEngineDescription field's value. +func (s *CacheEngineVersion) SetCacheEngineDescription(v string) *CacheEngineVersion { + s.CacheEngineDescription = &v + return s +} + +// SetCacheEngineVersionDescription sets the CacheEngineVersionDescription field's value. +func (s *CacheEngineVersion) SetCacheEngineVersionDescription(v string) *CacheEngineVersion { + s.CacheEngineVersionDescription = &v + return s +} + +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *CacheEngineVersion) SetCacheParameterGroupFamily(v string) *CacheEngineVersion { + s.CacheParameterGroupFamily = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *CacheEngineVersion) SetEngine(v string) *CacheEngineVersion { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *CacheEngineVersion) SetEngineVersion(v string) *CacheEngineVersion { + s.EngineVersion = &v + return s +} + +// Represents an individual cache node within a cluster. Each cache node runs +// its own instance of the cluster's protocol-compliant caching software - either +// Memcached or Redis. +// +// The following node types are supported by ElastiCache. Generally speaking, +// the current generation types provide more memory and computational power +// at lower cost when compared to their equivalent previous generation counterparts. +// +// * General purpose: Current generation: M5 node types: cache.m5.large, +// cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, +// cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, +// cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, +// cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium +// Previous generation: (not recommended) T1 node types: cache.t1.micro M1 +// node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge +// M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// +// * Compute optimized: Previous generation: (not recommended) C1 node types: +// cache.c1.xlarge +// +// * Memory optimized: Current generation: R5 node types: cache.r5.large, +// cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, +// cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, +// cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: +// (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, +// cache.r3.8xlarge +// +// Additional node type info +// +// * All current generation instance types are created in Amazon VPC by default. +// +// * Redis append-only files (AOF) are not supported for T1 or T2 instances. +// +// * Redis Multi-AZ with automatic failover is not supported on T1 instances. +// +// * Redis configuration variables appendonly and appendfsync are not supported +// on Redis version 2.8.22 and later. +type CacheNode struct { + _ struct{} `type:"structure"` + + // The date and time when the cache node was created. + CacheNodeCreateTime *time.Time `type:"timestamp"` + + // The cache node identifier. A node ID is a numeric identifier (0001, 0002, + // etc.). The combination of cluster ID and node ID uniquely identifies every + // cache node used in a customer's AWS account. + CacheNodeId *string `type:"string"` + + // The current state of this cache node, one of the following values: available, + // creating, rebooting, or deleting. + CacheNodeStatus *string `type:"string"` + + // The Availability Zone where this node was created and now resides. + CustomerAvailabilityZone *string `type:"string"` + + // The customer outpost ARN of the cache node. + CustomerOutpostArn *string `type:"string"` + + // The hostname for connecting to this cache node. + Endpoint *Endpoint `type:"structure"` + + // The status of the parameter group applied to this cache node. + ParameterGroupStatus *string `type:"string"` + + // The ID of the primary node to which this read replica node is synchronized. + // If this field is empty, this node is not associated with a primary cluster. + SourceCacheNodeId *string `type:"string"` +} + +// String returns the string representation +func (s CacheNode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNode) GoString() string { + return s.String() +} + +// SetCacheNodeCreateTime sets the CacheNodeCreateTime field's value. +func (s *CacheNode) SetCacheNodeCreateTime(v time.Time) *CacheNode { + s.CacheNodeCreateTime = &v + return s +} + +// SetCacheNodeId sets the CacheNodeId field's value. +func (s *CacheNode) SetCacheNodeId(v string) *CacheNode { + s.CacheNodeId = &v + return s +} + +// SetCacheNodeStatus sets the CacheNodeStatus field's value. +func (s *CacheNode) SetCacheNodeStatus(v string) *CacheNode { + s.CacheNodeStatus = &v + return s +} + +// SetCustomerAvailabilityZone sets the CustomerAvailabilityZone field's value. +func (s *CacheNode) SetCustomerAvailabilityZone(v string) *CacheNode { + s.CustomerAvailabilityZone = &v + return s +} + +// SetCustomerOutpostArn sets the CustomerOutpostArn field's value. +func (s *CacheNode) SetCustomerOutpostArn(v string) *CacheNode { + s.CustomerOutpostArn = &v + return s +} + +// SetEndpoint sets the Endpoint field's value. +func (s *CacheNode) SetEndpoint(v *Endpoint) *CacheNode { + s.Endpoint = v + return s +} + +// SetParameterGroupStatus sets the ParameterGroupStatus field's value. +func (s *CacheNode) SetParameterGroupStatus(v string) *CacheNode { + s.ParameterGroupStatus = &v + return s +} + +// SetSourceCacheNodeId sets the SourceCacheNodeId field's value. +func (s *CacheNode) SetSourceCacheNodeId(v string) *CacheNode { + s.SourceCacheNodeId = &v + return s +} + +// A parameter that has a different value for each cache node type it is applied +// to. For example, in a Redis cluster, a cache.m1.large cache node type would +// have a larger maxmemory value than a cache.m1.small type. +type CacheNodeTypeSpecificParameter struct { + _ struct{} `type:"structure"` + + // The valid range of values for the parameter. + AllowedValues *string `type:"string"` + + // A list of cache node types and their corresponding values for this parameter. + CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"` + + // Indicates whether a change to the parameter is applied immediately or requires + // a reboot for the change to be applied. You can force a reboot or wait until + // the next maintenance window's reboot. For more information, see Rebooting + // a Cluster (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html). + ChangeType *string `type:"string" enum:"ChangeType"` + + // The valid data type for the parameter. + DataType *string `type:"string"` + + // A description of the parameter. + Description *string `type:"string"` + + // Indicates whether (true) or not (false) the parameter can be modified. Some + // parameters have security or operational implications that prevent them from + // being changed. + IsModifiable *bool `type:"boolean"` + + // The earliest cache engine version to which the parameter can apply. + MinimumEngineVersion *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` + + // The source of the parameter value. + Source *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificParameter) GoString() string { + return s.String() +} + +// SetAllowedValues sets the AllowedValues field's value. +func (s *CacheNodeTypeSpecificParameter) SetAllowedValues(v string) *CacheNodeTypeSpecificParameter { + s.AllowedValues = &v + return s +} + +// SetCacheNodeTypeSpecificValues sets the CacheNodeTypeSpecificValues field's value. +func (s *CacheNodeTypeSpecificParameter) SetCacheNodeTypeSpecificValues(v []*CacheNodeTypeSpecificValue) *CacheNodeTypeSpecificParameter { + s.CacheNodeTypeSpecificValues = v + return s +} + +// SetChangeType sets the ChangeType field's value. +func (s *CacheNodeTypeSpecificParameter) SetChangeType(v string) *CacheNodeTypeSpecificParameter { + s.ChangeType = &v + return s +} + +// SetDataType sets the DataType field's value. +func (s *CacheNodeTypeSpecificParameter) SetDataType(v string) *CacheNodeTypeSpecificParameter { + s.DataType = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CacheNodeTypeSpecificParameter) SetDescription(v string) *CacheNodeTypeSpecificParameter { + s.Description = &v + return s +} + +// SetIsModifiable sets the IsModifiable field's value. +func (s *CacheNodeTypeSpecificParameter) SetIsModifiable(v bool) *CacheNodeTypeSpecificParameter { + s.IsModifiable = &v + return s +} + +// SetMinimumEngineVersion sets the MinimumEngineVersion field's value. +func (s *CacheNodeTypeSpecificParameter) SetMinimumEngineVersion(v string) *CacheNodeTypeSpecificParameter { + s.MinimumEngineVersion = &v + return s +} + +// SetParameterName sets the ParameterName field's value. +func (s *CacheNodeTypeSpecificParameter) SetParameterName(v string) *CacheNodeTypeSpecificParameter { + s.ParameterName = &v + return s +} + +// SetSource sets the Source field's value. +func (s *CacheNodeTypeSpecificParameter) SetSource(v string) *CacheNodeTypeSpecificParameter { + s.Source = &v + return s +} + +// A value that applies only to a certain cache node type. +type CacheNodeTypeSpecificValue struct { + _ struct{} `type:"structure"` + + // The cache node type for which this value applies. + CacheNodeType *string `type:"string"` + + // The value for the cache node type. + Value *string `type:"string"` +} + +// String returns the string representation +func (s CacheNodeTypeSpecificValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeTypeSpecificValue) GoString() string { + return s.String() +} + +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *CacheNodeTypeSpecificValue) SetCacheNodeType(v string) *CacheNodeTypeSpecificValue { + s.CacheNodeType = &v + return s +} + +// SetValue sets the Value field's value. +func (s *CacheNodeTypeSpecificValue) SetValue(v string) *CacheNodeTypeSpecificValue { + s.Value = &v + return s +} + +// The status of the service update on the cache node +type CacheNodeUpdateStatus struct { + _ struct{} `type:"structure"` + + // The node ID of the cache cluster + CacheNodeId *string `type:"string"` + + // The deletion date of the node + NodeDeletionDate *time.Time `type:"timestamp"` + + // The end date of the update for a node + NodeUpdateEndDate *time.Time `type:"timestamp"` + + // Reflects whether the update was initiated by the customer or automatically + // applied + NodeUpdateInitiatedBy *string `type:"string" enum:"NodeUpdateInitiatedBy"` + + // The date when the update is triggered + NodeUpdateInitiatedDate *time.Time `type:"timestamp"` + + // The start date of the update for a node + NodeUpdateStartDate *time.Time `type:"timestamp"` + + // The update status of the node + NodeUpdateStatus *string `type:"string" enum:"NodeUpdateStatus"` + + // The date when the NodeUpdateStatus was last modified> + NodeUpdateStatusModifiedDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s CacheNodeUpdateStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheNodeUpdateStatus) GoString() string { + return s.String() +} + +// SetCacheNodeId sets the CacheNodeId field's value. +func (s *CacheNodeUpdateStatus) SetCacheNodeId(v string) *CacheNodeUpdateStatus { + s.CacheNodeId = &v + return s +} + +// SetNodeDeletionDate sets the NodeDeletionDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeDeletionDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeDeletionDate = &v + return s +} + +// SetNodeUpdateEndDate sets the NodeUpdateEndDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateEndDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateEndDate = &v + return s +} + +// SetNodeUpdateInitiatedBy sets the NodeUpdateInitiatedBy field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedBy(v string) *CacheNodeUpdateStatus { + s.NodeUpdateInitiatedBy = &v + return s +} + +// SetNodeUpdateInitiatedDate sets the NodeUpdateInitiatedDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateInitiatedDate = &v + return s +} + +// SetNodeUpdateStartDate sets the NodeUpdateStartDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStartDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateStartDate = &v + return s +} + +// SetNodeUpdateStatus sets the NodeUpdateStatus field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStatus(v string) *CacheNodeUpdateStatus { + s.NodeUpdateStatus = &v + return s +} + +// SetNodeUpdateStatusModifiedDate sets the NodeUpdateStatusModifiedDate field's value. +func (s *CacheNodeUpdateStatus) SetNodeUpdateStatusModifiedDate(v time.Time) *CacheNodeUpdateStatus { + s.NodeUpdateStatusModifiedDate = &v + return s +} + +// Represents the output of a CreateCacheParameterGroup operation. +type CacheParameterGroup struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the cache parameter group. + ARN *string `type:"string"` + + // The name of the cache parameter group family that this cache parameter group + // is compatible with. + // + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | + CacheParameterGroupFamily *string `type:"string"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The description for this cache parameter group. + Description *string `type:"string"` + + // Indicates whether the parameter group is associated with a Global Datastore + IsGlobal *bool `type:"boolean"` +} + +// String returns the string representation +func (s CacheParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroup) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *CacheParameterGroup) SetARN(v string) *CacheParameterGroup { + s.ARN = &v + return s +} + +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *CacheParameterGroup) SetCacheParameterGroupFamily(v string) *CacheParameterGroup { + s.CacheParameterGroupFamily = &v + return s +} + +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CacheParameterGroup) SetCacheParameterGroupName(v string) *CacheParameterGroup { + s.CacheParameterGroupName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CacheParameterGroup) SetDescription(v string) *CacheParameterGroup { + s.Description = &v + return s +} + +// SetIsGlobal sets the IsGlobal field's value. +func (s *CacheParameterGroup) SetIsGlobal(v bool) *CacheParameterGroup { + s.IsGlobal = &v + return s +} + +// Represents the output of one of the following operations: +// +// * ModifyCacheParameterGroup +// +// * ResetCacheParameterGroup +type CacheParameterGroupNameMessage struct { + _ struct{} `type:"structure"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupNameMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupNameMessage) GoString() string { + return s.String() +} + +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CacheParameterGroupNameMessage) SetCacheParameterGroupName(v string) *CacheParameterGroupNameMessage { + s.CacheParameterGroupName = &v + return s +} + +// Status of the cache parameter group. +type CacheParameterGroupStatus struct { + _ struct{} `type:"structure"` + + // A list of the cache node IDs which need to be rebooted for parameter changes + // to be applied. A node ID is a numeric identifier (0001, 0002, etc.). + CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list"` + + // The name of the cache parameter group. + CacheParameterGroupName *string `type:"string"` + + // The status of parameter updates. + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s CacheParameterGroupStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheParameterGroupStatus) GoString() string { + return s.String() +} + +// SetCacheNodeIdsToReboot sets the CacheNodeIdsToReboot field's value. +func (s *CacheParameterGroupStatus) SetCacheNodeIdsToReboot(v []*string) *CacheParameterGroupStatus { + s.CacheNodeIdsToReboot = v + return s +} + +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CacheParameterGroupStatus) SetCacheParameterGroupName(v string) *CacheParameterGroupStatus { + s.CacheParameterGroupName = &v + return s +} + +// SetParameterApplyStatus sets the ParameterApplyStatus field's value. +func (s *CacheParameterGroupStatus) SetParameterApplyStatus(v string) *CacheParameterGroupStatus { + s.ParameterApplyStatus = &v + return s +} + +// Represents the output of one of the following operations: +// +// * AuthorizeCacheSecurityGroupIngress +// +// * CreateCacheSecurityGroup +// +// * RevokeCacheSecurityGroupIngress +type CacheSecurityGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the cache security group, + ARN *string `type:"string"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The description of the cache security group. + Description *string `type:"string"` + + // A list of Amazon EC2 security groups that are associated with this cache + // security group. + EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + + // The AWS account ID of the cache security group owner. + OwnerId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroup) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *CacheSecurityGroup) SetARN(v string) *CacheSecurityGroup { + s.ARN = &v + return s +} + +// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. +func (s *CacheSecurityGroup) SetCacheSecurityGroupName(v string) *CacheSecurityGroup { + s.CacheSecurityGroupName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CacheSecurityGroup) SetDescription(v string) *CacheSecurityGroup { + s.Description = &v + return s +} + +// SetEC2SecurityGroups sets the EC2SecurityGroups field's value. +func (s *CacheSecurityGroup) SetEC2SecurityGroups(v []*EC2SecurityGroup) *CacheSecurityGroup { + s.EC2SecurityGroups = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *CacheSecurityGroup) SetOwnerId(v string) *CacheSecurityGroup { + s.OwnerId = &v + return s +} + +// Represents a cluster's status within a particular cache security group. +type CacheSecurityGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the cache security group. + CacheSecurityGroupName *string `type:"string"` + + // The membership status in the cache security group. The status changes when + // a cache security group is modified, or when the cache security groups assigned + // to a cluster are modified. + Status *string `type:"string"` +} + +// String returns the string representation +func (s CacheSecurityGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSecurityGroupMembership) GoString() string { + return s.String() +} + +// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. +func (s *CacheSecurityGroupMembership) SetCacheSecurityGroupName(v string) *CacheSecurityGroupMembership { + s.CacheSecurityGroupName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CacheSecurityGroupMembership) SetStatus(v string) *CacheSecurityGroupMembership { + s.Status = &v + return s +} + +// Represents the output of one of the following operations: +// +// * CreateCacheSubnetGroup +// +// * ModifyCacheSubnetGroup +type CacheSubnetGroup struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon Resource Name) of the cache subnet group. + ARN *string `type:"string"` + + // The description of the cache subnet group. + CacheSubnetGroupDescription *string `type:"string"` + + // The name of the cache subnet group. + CacheSubnetGroupName *string `type:"string"` + + // A list of subnets associated with the cache subnet group. + Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + + // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet + // group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s CacheSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheSubnetGroup) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *CacheSubnetGroup) SetARN(v string) *CacheSubnetGroup { + s.ARN = &v + return s +} + +// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value. +func (s *CacheSubnetGroup) SetCacheSubnetGroupDescription(v string) *CacheSubnetGroup { + s.CacheSubnetGroupDescription = &v + return s +} + +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *CacheSubnetGroup) SetCacheSubnetGroupName(v string) *CacheSubnetGroup { + s.CacheSubnetGroupName = &v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *CacheSubnetGroup) SetSubnets(v []*Subnet) *CacheSubnetGroup { + s.Subnets = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *CacheSubnetGroup) SetVpcId(v string) *CacheSubnetGroup { + s.VpcId = &v + return s +} + +type CompleteMigrationInput struct { + _ struct{} `type:"structure"` + + // Forces the migration to stop without ensuring that data is in sync. It is + // recommended to use this option only to abort the migration and not recommended + // when application wants to continue migration to ElastiCache. + Force *bool `type:"boolean"` + + // The ID of the replication group to which data is being migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMigrationInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForce sets the Force field's value. +func (s *CompleteMigrationInput) SetForce(v bool) *CompleteMigrationInput { + s.Force = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CompleteMigrationInput) SetReplicationGroupId(v string) *CompleteMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type CompleteMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CompleteMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CompleteMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *CompleteMigrationOutput { + s.ReplicationGroup = v + return s +} + +// Node group (shard) configuration options when adding or removing replicas. +// Each node group (shard) configuration has the following members: NodeGroupId, +// NewReplicaCount, and PreferredAvailabilityZones. +type ConfigureShard struct { + _ struct{} `type:"structure"` + + // The number of replicas you want in this node group at the end of this operation. + // The maximum value for NewReplicaCount is 5. The minimum value depends upon + // the type of Redis replication group you are working with. + // + // The minimum number of replicas in a shard or replication group is: + // + // * Redis (cluster mode disabled) If Multi-AZ: 1 If Multi-AZ: 0 + // + // * Redis (cluster mode enabled): 0 (though you will not be able to failover + // to a replica if your primary node fails) + // + // NewReplicaCount is a required field + NewReplicaCount *int64 `type:"integer" required:"true"` + + // The 4-digit id for the node group you are configuring. For Redis (cluster + // mode disabled) replication groups, the node group id is always 0001. To find + // a Redis (cluster mode enabled)'s node group's (shard's) id, see Finding a + // Shard's Id (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/shard-find-id.html). + // + // NodeGroupId is a required field + NodeGroupId *string `min:"1" type:"string" required:"true"` + + // A list of PreferredAvailabilityZone strings that specify which availability + // zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone + // values must equal the value of NewReplicaCount plus 1 to account for the + // primary node. If this member of ReplicaConfiguration is omitted, ElastiCache + // for Redis selects the availability zone for each of the replicas. + PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + + // The outpost ARNs in which the cache cluster is created. + PreferredOutpostArns []*string `locationNameList:"PreferredOutpostArn" type:"list"` +} + +// String returns the string representation +func (s ConfigureShard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureShard) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigureShard) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigureShard"} + if s.NewReplicaCount == nil { + invalidParams.Add(request.NewErrParamRequired("NewReplicaCount")) + } + if s.NodeGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("NodeGroupId")) + } + if s.NodeGroupId != nil && len(*s.NodeGroupId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NodeGroupId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNewReplicaCount sets the NewReplicaCount field's value. +func (s *ConfigureShard) SetNewReplicaCount(v int64) *ConfigureShard { + s.NewReplicaCount = &v + return s +} + +// SetNodeGroupId sets the NodeGroupId field's value. +func (s *ConfigureShard) SetNodeGroupId(v string) *ConfigureShard { + s.NodeGroupId = &v + return s +} + +// SetPreferredAvailabilityZones sets the PreferredAvailabilityZones field's value. +func (s *ConfigureShard) SetPreferredAvailabilityZones(v []*string) *ConfigureShard { + s.PreferredAvailabilityZones = v + return s +} + +// SetPreferredOutpostArns sets the PreferredOutpostArns field's value. +func (s *ConfigureShard) SetPreferredOutpostArns(v []*string) *ConfigureShard { + s.PreferredOutpostArns = v + return s +} + +// Represents the input of a CopySnapshotMessage operation. +type CopySnapshotInput struct { + _ struct{} `type:"structure"` + + // The ID of the KMS key used to encrypt the target snapshot. + KmsKeyId *string `type:"string"` + + // The name of an existing snapshot from which to make a copy. // - // * AuthorizeCacheSecurityGroupIngress + // SourceSnapshotName is a required field + SourceSnapshotName *string `type:"string" required:"true"` + + // The Amazon S3 bucket to which the snapshot is exported. This parameter is + // used only when exporting a snapshot for external access. // - // * CreateCacheSecurityGroup + // When using this parameter to export a snapshot, be sure Amazon ElastiCache + // has the needed permissions to this S3 bucket. For more information, see Step + // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) + // in the Amazon ElastiCache User Guide. // - // * RevokeCacheSecurityGroupIngress - CacheSecurityGroup *CacheSecurityGroup `type:"structure"` + // For more information, see Exporting a Snapshot (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html) + // in the Amazon ElastiCache User Guide. + TargetBucket *string `type:"string"` + + // A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot, + // therefore this name must be unique within its context - ElastiCache or an + // Amazon S3 bucket if exporting. + // + // TargetSnapshotName is a required field + TargetSnapshotName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopySnapshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopySnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} + if s.SourceSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName")) + } + if s.TargetSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CopySnapshotInput) SetKmsKeyId(v string) *CopySnapshotInput { + s.KmsKeyId = &v + return s +} + +// SetSourceSnapshotName sets the SourceSnapshotName field's value. +func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput { + s.SourceSnapshotName = &v + return s +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *CopySnapshotInput) SetTargetBucket(v string) *CopySnapshotInput { + s.TargetBucket = &v + return s +} + +// SetTargetSnapshotName sets the TargetSnapshotName field's value. +func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput { + s.TargetSnapshotName = &v + return s +} + +type CopySnapshotOutput struct { + _ struct{} `type:"structure"` + + // Represents a copy of an entire Redis cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} + +// String returns the string representation +func (s CopySnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopySnapshotOutput) GoString() string { + return s.String() +} + +// SetSnapshot sets the Snapshot field's value. +func (s *CopySnapshotOutput) SetSnapshot(v *Snapshot) *CopySnapshotOutput { + s.Snapshot = v + return s } -// String returns the string representation -func (s AuthorizeCacheSecurityGroupIngressOutput) String() string { - return awsutil.Prettify(s) -} +// Represents the input of a CreateCacheCluster operation. +type CreateCacheClusterInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the nodes in this Memcached cluster are created in a single + // Availability Zone or created across multiple Availability Zones in the cluster's + // region. + // + // This parameter is only supported for Memcached clusters. + // + // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache + // assumes single-az mode. + AZMode *string `type:"string" enum:"AZMode"` + + // Reserved parameter. The password used to access a password protected server. + // + // Password constraints: + // + // * Must be only printable ASCII characters. + // + // * Must be at least 16 characters and no more than 128 characters in length. + // + // * The only permitted printable special characters are !, &, #, $, ^, <, + // >, and -. Other printable special characters cannot be used in the AUTH + // token. + // + // For more information, see AUTH password (http://redis.io/commands/AUTH) at + // http://redis.io/commands/AUTH. + AuthToken *string `type:"string"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The node group (shard) identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // * A name must contain from 1 to 50 alphanumeric characters or hyphens. + // + // * The first character must be a letter. + // + // * A name cannot end with a hyphen or contain two consecutive hyphens. + // + // CacheClusterId is a required field + CacheClusterId *string `type:"string" required:"true"` + + // The compute and memory capacity of the nodes in the node group (shard). + // + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // * All current generation instance types are created in Amazon VPC by default. + // + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this cluster. If this argument + // is omitted, the default parameter group for the specified engine is used. + // You cannot use any parameter group which has cluster-enabled='yes' when creating + // a cluster. + CacheParameterGroupName *string `type:"string"` + + // A list of security group names to associate with this cluster. + // + // Use this parameter only when you are creating a cluster outside of an Amazon + // Virtual Private Cloud (Amazon VPC). + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the subnet group to be used for the cluster. + // + // Use this parameter only when you are creating a cluster in an Amazon Virtual + // Private Cloud (Amazon VPC). + // + // If you're going to launch your cluster in an Amazon VPC, you need to create + // a subnet group before you start creating a cluster. For more information, + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + CacheSubnetGroupName *string `type:"string"` + + // The name of the cache engine to be used for this cluster. + // + // Valid values for this parameter are: memcached | redis + Engine *string `type:"string"` + + // The version number of the cache engine to be used for this cluster. To view + // the supported cache engine versions, use the DescribeCacheEngineVersions + // operation. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), + // but you cannot downgrade to an earlier engine version. If you want to use + // an earlier engine version, you must delete the existing cluster or replication + // group and create it anew with the earlier engine version. + EngineVersion *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications are sent. + // + // The Amazon SNS topic owner must be the same as the cluster owner. + NotificationTopicArn *string `type:"string"` + + // The initial number of cache nodes that the cluster has. + // + // For clusters running Redis, this value must be 1. For clusters running Memcached, + // this value must be between 1 and 20. + // + // If you need more than 20 nodes for your Memcached cluster, please fill out + // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ + // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). + NumCacheNodes *int64 `type:"integer"` + + // Specifies whether the nodes in the cluster are created in a single outpost + // or across multiple outposts. + OutpostMode *string `type:"string" enum:"OutpostMode"` -// GoString returns the string representation -func (s AuthorizeCacheSecurityGroupIngressOutput) GoString() string { - return s.String() -} + // The port number on which each of the cache nodes accepts connections. + Port *int64 `type:"integer"` -// SetCacheSecurityGroup sets the CacheSecurityGroup field's value. -func (s *AuthorizeCacheSecurityGroupIngressOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *AuthorizeCacheSecurityGroupIngressOutput { - s.CacheSecurityGroup = v - return s -} + // The EC2 Availability Zone in which the cluster is created. + // + // All nodes belonging to this cluster are placed in the preferred Availability + // Zone. If you want to create your nodes across multiple Availability Zones, + // use PreferredAvailabilityZones. + // + // Default: System chosen Availability Zone. + PreferredAvailabilityZone *string `type:"string"` -// Describes an Availability Zone in which the cluster is launched. -type AvailabilityZone struct { - _ struct{} `type:"structure"` + // A list of the Availability Zones in which cache nodes are created. The order + // of the zones in the list is not important. + // + // This option is only supported on Memcached. + // + // If you are creating your cluster in an Amazon VPC (recommended) you can only + // locate nodes in Availability Zones that are associated with the subnets in + // the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheNodes. + // + // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone + // instead, or repeat the Availability Zone multiple times in the list. + // + // Default: System chosen Availability Zones. + PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` - // The name of the Availability Zone. - Name *string `type:"string"` -} + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. + // + // Valid values for ddd are: + // + // * sun + // + // * mon + // + // * tue + // + // * wed + // + // * thu + // + // * fri + // + // * sat + // + // Example: sun:23:00-mon:01:30 + PreferredMaintenanceWindow *string `type:"string"` -// String returns the string representation -func (s AvailabilityZone) String() string { - return awsutil.Prettify(s) -} + // The outpost ARN in which the cache cluster is created. + PreferredOutpostArn *string `type:"string"` -// GoString returns the string representation -func (s AvailabilityZone) GoString() string { - return s.String() -} + // The outpost ARNs in which the cache cluster is created. + PreferredOutpostArns []*string `locationNameList:"PreferredOutpostArn" type:"list"` -// SetName sets the Name field's value. -func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { - s.Name = &v - return s -} + // The ID of the replication group to which this cluster should belong. If this + // parameter is specified, the cluster is added to the specified replication + // group as a read replica; otherwise, the cluster is a standalone primary that + // is not part of any replication group. + // + // If the specified replication group is Multi-AZ enabled and the Availability + // Zone is not specified, the cluster is created in Availability Zones that + // provide the best spread of read replicas across Availability Zones. + // + // This parameter is only valid if the Engine parameter is redis. + ReplicationGroupId *string `type:"string"` -type BatchApplyUpdateActionInput struct { - _ struct{} `type:"structure"` + // One or more VPC security groups associated with the cluster. + // + // Use this parameter only when you are creating a cluster in an Amazon Virtual + // Private Cloud (Amazon VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` - // The cache cluster IDs - CacheClusterIds []*string `type:"list"` + // A single-element string list containing an Amazon Resource Name (ARN) that + // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot + // file is used to populate the node group (shard). The Amazon S3 object name + // in the ARN cannot contain any commas. + // + // This parameter is only valid if the Engine parameter is redis. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` - // The replication group IDs - ReplicationGroupIds []*string `type:"list"` + // The name of a Redis snapshot from which to restore data into the new node + // group (shard). The snapshot status changes to restoring while the new node + // group (shard) is being created. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotName *string `type:"string"` - // The unique ID of the service update + // The number of days for which ElastiCache retains automatic snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // taken today is retained for 5 days before being deleted. // - // ServiceUpdateName is a required field - ServiceUpdateName *string `type:"string" required:"true"` + // This parameter is only valid if the Engine parameter is redis. + // + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + SnapshotRetentionLimit *int64 `type:"integer"` + + // The daily time range (in UTC) during which ElastiCache begins taking a daily + // snapshot of your node group (shard). + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, ElastiCache automatically chooses an + // appropriate time range. + // + // This parameter is only valid if the Engine parameter is redis. + SnapshotWindow *string `type:"string"` + + // A list of cost allocation tags to be added to this resource. + Tags []*Tag `locationNameList:"Tag" type:"list"` } // String returns the string representation -func (s BatchApplyUpdateActionInput) String() string { +func (s CreateCacheClusterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchApplyUpdateActionInput) GoString() string { +func (s CreateCacheClusterInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *BatchApplyUpdateActionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchApplyUpdateActionInput"} - if s.ServiceUpdateName == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) +func (s *CreateCacheClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheClusterInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) } if invalidParams.Len() > 0 { @@ -5822,1293 +9362,1440 @@ func (s *BatchApplyUpdateActionInput) Validate() error { return nil } -// SetCacheClusterIds sets the CacheClusterIds field's value. -func (s *BatchApplyUpdateActionInput) SetCacheClusterIds(v []*string) *BatchApplyUpdateActionInput { - s.CacheClusterIds = v +// SetAZMode sets the AZMode field's value. +func (s *CreateCacheClusterInput) SetAZMode(v string) *CreateCacheClusterInput { + s.AZMode = &v return s } -// SetReplicationGroupIds sets the ReplicationGroupIds field's value. -func (s *BatchApplyUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchApplyUpdateActionInput { - s.ReplicationGroupIds = v +// SetAuthToken sets the AuthToken field's value. +func (s *CreateCacheClusterInput) SetAuthToken(v string) *CreateCacheClusterInput { + s.AuthToken = &v return s } -// SetServiceUpdateName sets the ServiceUpdateName field's value. -func (s *BatchApplyUpdateActionInput) SetServiceUpdateName(v string) *BatchApplyUpdateActionInput { - s.ServiceUpdateName = &v +// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. +func (s *CreateCacheClusterInput) SetAutoMinorVersionUpgrade(v bool) *CreateCacheClusterInput { + s.AutoMinorVersionUpgrade = &v return s } -type BatchApplyUpdateActionOutput struct { - _ struct{} `type:"structure"` +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *CreateCacheClusterInput) SetCacheClusterId(v string) *CreateCacheClusterInput { + s.CacheClusterId = &v + return s +} - // Update actions that have been processed successfully - ProcessedUpdateActions []*ProcessedUpdateAction `locationNameList:"ProcessedUpdateAction" type:"list"` +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *CreateCacheClusterInput) SetCacheNodeType(v string) *CreateCacheClusterInput { + s.CacheNodeType = &v + return s +} - // Update actions that haven't been processed successfully - UnprocessedUpdateActions []*UnprocessedUpdateAction `locationNameList:"UnprocessedUpdateAction" type:"list"` +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CreateCacheClusterInput) SetCacheParameterGroupName(v string) *CreateCacheClusterInput { + s.CacheParameterGroupName = &v + return s } -// String returns the string representation -func (s BatchApplyUpdateActionOutput) String() string { - return awsutil.Prettify(s) +// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value. +func (s *CreateCacheClusterInput) SetCacheSecurityGroupNames(v []*string) *CreateCacheClusterInput { + s.CacheSecurityGroupNames = v + return s } -// GoString returns the string representation -func (s BatchApplyUpdateActionOutput) GoString() string { - return s.String() +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *CreateCacheClusterInput) SetCacheSubnetGroupName(v string) *CreateCacheClusterInput { + s.CacheSubnetGroupName = &v + return s } -// SetProcessedUpdateActions sets the ProcessedUpdateActions field's value. -func (s *BatchApplyUpdateActionOutput) SetProcessedUpdateActions(v []*ProcessedUpdateAction) *BatchApplyUpdateActionOutput { - s.ProcessedUpdateActions = v +// SetEngine sets the Engine field's value. +func (s *CreateCacheClusterInput) SetEngine(v string) *CreateCacheClusterInput { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *CreateCacheClusterInput) SetEngineVersion(v string) *CreateCacheClusterInput { + s.EngineVersion = &v + return s +} + +// SetNotificationTopicArn sets the NotificationTopicArn field's value. +func (s *CreateCacheClusterInput) SetNotificationTopicArn(v string) *CreateCacheClusterInput { + s.NotificationTopicArn = &v + return s +} + +// SetNumCacheNodes sets the NumCacheNodes field's value. +func (s *CreateCacheClusterInput) SetNumCacheNodes(v int64) *CreateCacheClusterInput { + s.NumCacheNodes = &v + return s +} + +// SetOutpostMode sets the OutpostMode field's value. +func (s *CreateCacheClusterInput) SetOutpostMode(v string) *CreateCacheClusterInput { + s.OutpostMode = &v + return s +} + +// SetPort sets the Port field's value. +func (s *CreateCacheClusterInput) SetPort(v int64) *CreateCacheClusterInput { + s.Port = &v + return s +} + +// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value. +func (s *CreateCacheClusterInput) SetPreferredAvailabilityZone(v string) *CreateCacheClusterInput { + s.PreferredAvailabilityZone = &v + return s +} + +// SetPreferredAvailabilityZones sets the PreferredAvailabilityZones field's value. +func (s *CreateCacheClusterInput) SetPreferredAvailabilityZones(v []*string) *CreateCacheClusterInput { + s.PreferredAvailabilityZones = v return s } -// SetUnprocessedUpdateActions sets the UnprocessedUpdateActions field's value. -func (s *BatchApplyUpdateActionOutput) SetUnprocessedUpdateActions(v []*UnprocessedUpdateAction) *BatchApplyUpdateActionOutput { - s.UnprocessedUpdateActions = v +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *CreateCacheClusterInput) SetPreferredMaintenanceWindow(v string) *CreateCacheClusterInput { + s.PreferredMaintenanceWindow = &v return s } -type BatchStopUpdateActionInput struct { - _ struct{} `type:"structure"` - - // The cache cluster IDs - CacheClusterIds []*string `type:"list"` - - // The replication group IDs - ReplicationGroupIds []*string `type:"list"` +// SetPreferredOutpostArn sets the PreferredOutpostArn field's value. +func (s *CreateCacheClusterInput) SetPreferredOutpostArn(v string) *CreateCacheClusterInput { + s.PreferredOutpostArn = &v + return s +} - // The unique ID of the service update - // - // ServiceUpdateName is a required field - ServiceUpdateName *string `type:"string" required:"true"` +// SetPreferredOutpostArns sets the PreferredOutpostArns field's value. +func (s *CreateCacheClusterInput) SetPreferredOutpostArns(v []*string) *CreateCacheClusterInput { + s.PreferredOutpostArns = v + return s } -// String returns the string representation -func (s BatchStopUpdateActionInput) String() string { - return awsutil.Prettify(s) +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CreateCacheClusterInput) SetReplicationGroupId(v string) *CreateCacheClusterInput { + s.ReplicationGroupId = &v + return s } -// GoString returns the string representation -func (s BatchStopUpdateActionInput) GoString() string { - return s.String() +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateCacheClusterInput) SetSecurityGroupIds(v []*string) *CreateCacheClusterInput { + s.SecurityGroupIds = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *BatchStopUpdateActionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "BatchStopUpdateActionInput"} - if s.ServiceUpdateName == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceUpdateName")) - } +// SetSnapshotArns sets the SnapshotArns field's value. +func (s *CreateCacheClusterInput) SetSnapshotArns(v []*string) *CreateCacheClusterInput { + s.SnapshotArns = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSnapshotName sets the SnapshotName field's value. +func (s *CreateCacheClusterInput) SetSnapshotName(v string) *CreateCacheClusterInput { + s.SnapshotName = &v + return s } -// SetCacheClusterIds sets the CacheClusterIds field's value. -func (s *BatchStopUpdateActionInput) SetCacheClusterIds(v []*string) *BatchStopUpdateActionInput { - s.CacheClusterIds = v +// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value. +func (s *CreateCacheClusterInput) SetSnapshotRetentionLimit(v int64) *CreateCacheClusterInput { + s.SnapshotRetentionLimit = &v return s } -// SetReplicationGroupIds sets the ReplicationGroupIds field's value. -func (s *BatchStopUpdateActionInput) SetReplicationGroupIds(v []*string) *BatchStopUpdateActionInput { - s.ReplicationGroupIds = v +// SetSnapshotWindow sets the SnapshotWindow field's value. +func (s *CreateCacheClusterInput) SetSnapshotWindow(v string) *CreateCacheClusterInput { + s.SnapshotWindow = &v return s } -// SetServiceUpdateName sets the ServiceUpdateName field's value. -func (s *BatchStopUpdateActionInput) SetServiceUpdateName(v string) *BatchStopUpdateActionInput { - s.ServiceUpdateName = &v +// SetTags sets the Tags field's value. +func (s *CreateCacheClusterInput) SetTags(v []*Tag) *CreateCacheClusterInput { + s.Tags = v return s } -type BatchStopUpdateActionOutput struct { +type CreateCacheClusterOutput struct { _ struct{} `type:"structure"` - // Update actions that have been processed successfully - ProcessedUpdateActions []*ProcessedUpdateAction `locationNameList:"ProcessedUpdateAction" type:"list"` - - // Update actions that haven't been processed successfully - UnprocessedUpdateActions []*UnprocessedUpdateAction `locationNameList:"UnprocessedUpdateAction" type:"list"` + // Contains all of the attributes of a specific cluster. + CacheCluster *CacheCluster `type:"structure"` } // String returns the string representation -func (s BatchStopUpdateActionOutput) String() string { +func (s CreateCacheClusterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchStopUpdateActionOutput) GoString() string { +func (s CreateCacheClusterOutput) GoString() string { return s.String() } -// SetProcessedUpdateActions sets the ProcessedUpdateActions field's value. -func (s *BatchStopUpdateActionOutput) SetProcessedUpdateActions(v []*ProcessedUpdateAction) *BatchStopUpdateActionOutput { - s.ProcessedUpdateActions = v - return s -} - -// SetUnprocessedUpdateActions sets the UnprocessedUpdateActions field's value. -func (s *BatchStopUpdateActionOutput) SetUnprocessedUpdateActions(v []*UnprocessedUpdateAction) *BatchStopUpdateActionOutput { - s.UnprocessedUpdateActions = v +// SetCacheCluster sets the CacheCluster field's value. +func (s *CreateCacheClusterOutput) SetCacheCluster(v *CacheCluster) *CreateCacheClusterOutput { + s.CacheCluster = v return s } -// Contains all of the attributes of a specific cluster. -type CacheCluster struct { - _ struct{} `type:"structure"` - - // A flag that enables encryption at-rest when set to true. - // - // You cannot modify the value of AtRestEncryptionEnabled after the cluster - // is created. To enable at-rest encryption on a cluster you must set AtRestEncryptionEnabled - // to true when you create a cluster. - // - // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. - // - // Default: false - AtRestEncryptionEnabled *bool `type:"boolean"` - - // A flag that enables using an AuthToken (password) when issuing Redis commands. - // - // Default: false - AuthTokenEnabled *bool `type:"boolean"` - - // The date the auth token was last modified - AuthTokenLastModifiedDate *time.Time `type:"timestamp"` - - // This parameter is currently disabled. - AutoMinorVersionUpgrade *bool `type:"boolean"` - - // The date and time when the cluster was created. - CacheClusterCreateTime *time.Time `type:"timestamp"` - - // The user-supplied identifier of the cluster. This identifier is a unique - // key that identifies a cluster. - CacheClusterId *string `type:"string"` - - // The current state of this cluster, one of the following values: available, - // creating, deleted, deleting, incompatible-network, modifying, rebooting cluster - // nodes, restore-failed, or snapshotting. - CacheClusterStatus *string `type:"string"` - - // The name of the compute and memory capacity node type for the cluster. - // - // The following node types are supported by ElastiCache. Generally speaking, - // the current generation types provide more memory and computational power - // at lower cost when compared to their equivalent previous generation counterparts. - // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge - // - // * Compute optimized: Previous generation: (not recommended) C1 node types: - // cache.c1.xlarge - // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge - // - // Additional node type info - // - // * All current generation instance types are created in Amazon VPC by default. - // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. - // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. - // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. - CacheNodeType *string `type:"string"` - - // A list of cache nodes that are members of the cluster. - CacheNodes []*CacheNode `locationNameList:"CacheNode" type:"list"` - - // Status of the cache parameter group. - CacheParameterGroup *CacheParameterGroupStatus `type:"structure"` - - // A list of cache security group elements, composed of name and status sub-elements. - CacheSecurityGroups []*CacheSecurityGroupMembership `locationNameList:"CacheSecurityGroup" type:"list"` - - // The name of the cache subnet group associated with the cluster. - CacheSubnetGroupName *string `type:"string"` - - // The URL of the web page where you can download the latest ElastiCache client - // library. - ClientDownloadLandingPage *string `type:"string"` - - // Represents a Memcached cluster endpoint which, if Automatic Discovery is - // enabled on the cluster, can be used by an application to connect to any node - // in the cluster. The configuration endpoint will always have .cfg in it. - // - // Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211 - ConfigurationEndpoint *Endpoint `type:"structure"` - - // The name of the cache engine (memcached or redis) to be used for this cluster. - Engine *string `type:"string"` - - // The version of the cache engine that is used in this cluster. - EngineVersion *string `type:"string"` - - // Describes a notification topic and its status. Notification topics are used - // for publishing ElastiCache events to subscribers using Amazon Simple Notification - // Service (SNS). - NotificationConfiguration *NotificationConfiguration `type:"structure"` - - // The number of cache nodes in the cluster. - // - // For clusters running Redis, this value must be 1. For clusters running Memcached, - // this value must be between 1 and 20. - NumCacheNodes *int64 `type:"integer"` - - // A group of settings that are applied to the cluster in the future, or that - // are currently being applied. - PendingModifiedValues *PendingModifiedValues `type:"structure"` - - // The name of the Availability Zone in which the cluster is located or "Multiple" - // if the cache nodes are located in different Availability Zones. - PreferredAvailabilityZone *string `type:"string"` - - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. - // - // Valid values for ddd are: - // - // * sun - // - // * mon - // - // * tue - // - // * wed - // - // * thu - // - // * fri - // - // * sat - // - // Example: sun:23:00-mon:01:30 - PreferredMaintenanceWindow *string `type:"string"` - - // The replication group to which this cluster belongs. If this field is empty, - // the cluster is not associated with any replication group. - ReplicationGroupId *string `type:"string"` - - // A list of VPC Security Groups associated with the cluster. - SecurityGroups []*SecurityGroupMembership `type:"list"` - - // The number of days for which ElastiCache retains automatic cluster snapshots - // before deleting them. For example, if you set SnapshotRetentionLimit to 5, - // a snapshot that was taken today is retained for 5 days before being deleted. - // - // If the value of SnapshotRetentionLimit is set to zero (0), backups are turned - // off. - SnapshotRetentionLimit *int64 `type:"integer"` +// Represents the input of a CreateCacheParameterGroup operation. +type CreateCacheParameterGroupInput struct { + _ struct{} `type:"structure"` - // The daily time range (in UTC) during which ElastiCache begins taking a daily - // snapshot of your cluster. + // The name of the cache parameter group family that the cache parameter group + // can be used with. // - // Example: 05:00-09:00 - SnapshotWindow *string `type:"string"` - - // A flag that enables in-transit encryption when set to true. + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | // - // You cannot modify the value of TransitEncryptionEnabled after the cluster - // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled - // to true when you create a cluster. + // CacheParameterGroupFamily is a required field + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // A user-specified name for the cache parameter group. // - // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // CacheParameterGroupName is a required field + CacheParameterGroupName *string `type:"string" required:"true"` + + // A user-specified description for the cache parameter group. // - // Default: false - TransitEncryptionEnabled *bool `type:"boolean"` + // Description is a required field + Description *string `type:"string" required:"true"` } // String returns the string representation -func (s CacheCluster) String() string { +func (s CreateCacheParameterGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheCluster) GoString() string { +func (s CreateCacheParameterGroupInput) GoString() string { return s.String() } -// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. -func (s *CacheCluster) SetAtRestEncryptionEnabled(v bool) *CacheCluster { - s.AtRestEncryptionEnabled = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheParameterGroupInput"} + if s.CacheParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily")) + } + if s.CacheParameterGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } -// SetAuthTokenEnabled sets the AuthTokenEnabled field's value. -func (s *CacheCluster) SetAuthTokenEnabled(v bool) *CacheCluster { - s.AuthTokenEnabled = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetAuthTokenLastModifiedDate sets the AuthTokenLastModifiedDate field's value. -func (s *CacheCluster) SetAuthTokenLastModifiedDate(v time.Time) *CacheCluster { - s.AuthTokenLastModifiedDate = &v +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *CreateCacheParameterGroupInput) SetCacheParameterGroupFamily(v string) *CreateCacheParameterGroupInput { + s.CacheParameterGroupFamily = &v return s } -// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. -func (s *CacheCluster) SetAutoMinorVersionUpgrade(v bool) *CacheCluster { - s.AutoMinorVersionUpgrade = &v +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CreateCacheParameterGroupInput) SetCacheParameterGroupName(v string) *CreateCacheParameterGroupInput { + s.CacheParameterGroupName = &v return s } -// SetCacheClusterCreateTime sets the CacheClusterCreateTime field's value. -func (s *CacheCluster) SetCacheClusterCreateTime(v time.Time) *CacheCluster { - s.CacheClusterCreateTime = &v +// SetDescription sets the Description field's value. +func (s *CreateCacheParameterGroupInput) SetDescription(v string) *CreateCacheParameterGroupInput { + s.Description = &v return s } -// SetCacheClusterId sets the CacheClusterId field's value. -func (s *CacheCluster) SetCacheClusterId(v string) *CacheCluster { - s.CacheClusterId = &v - return s -} +type CreateCacheParameterGroupOutput struct { + _ struct{} `type:"structure"` -// SetCacheClusterStatus sets the CacheClusterStatus field's value. -func (s *CacheCluster) SetCacheClusterStatus(v string) *CacheCluster { - s.CacheClusterStatus = &v - return s + // Represents the output of a CreateCacheParameterGroup operation. + CacheParameterGroup *CacheParameterGroup `type:"structure"` } -// SetCacheNodeType sets the CacheNodeType field's value. -func (s *CacheCluster) SetCacheNodeType(v string) *CacheCluster { - s.CacheNodeType = &v - return s +// String returns the string representation +func (s CreateCacheParameterGroupOutput) String() string { + return awsutil.Prettify(s) } -// SetCacheNodes sets the CacheNodes field's value. -func (s *CacheCluster) SetCacheNodes(v []*CacheNode) *CacheCluster { - s.CacheNodes = v - return s +// GoString returns the string representation +func (s CreateCacheParameterGroupOutput) GoString() string { + return s.String() } // SetCacheParameterGroup sets the CacheParameterGroup field's value. -func (s *CacheCluster) SetCacheParameterGroup(v *CacheParameterGroupStatus) *CacheCluster { +func (s *CreateCacheParameterGroupOutput) SetCacheParameterGroup(v *CacheParameterGroup) *CreateCacheParameterGroupOutput { s.CacheParameterGroup = v return s } -// SetCacheSecurityGroups sets the CacheSecurityGroups field's value. -func (s *CacheCluster) SetCacheSecurityGroups(v []*CacheSecurityGroupMembership) *CacheCluster { - s.CacheSecurityGroups = v - return s +// Represents the input of a CreateCacheSecurityGroup operation. +type CreateCacheSecurityGroupInput struct { + _ struct{} `type:"structure"` + + // A name for the cache security group. This value is stored as a lowercase + // string. + // + // Constraints: Must contain no more than 255 alphanumeric characters. Cannot + // be the word "Default". + // + // Example: mysecuritygroup + // + // CacheSecurityGroupName is a required field + CacheSecurityGroupName *string `type:"string" required:"true"` + + // A description for the cache security group. + // + // Description is a required field + Description *string `type:"string" required:"true"` } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *CacheCluster) SetCacheSubnetGroupName(v string) *CacheCluster { - s.CacheSubnetGroupName = &v - return s +// String returns the string representation +func (s CreateCacheSecurityGroupInput) String() string { + return awsutil.Prettify(s) } -// SetClientDownloadLandingPage sets the ClientDownloadLandingPage field's value. -func (s *CacheCluster) SetClientDownloadLandingPage(v string) *CacheCluster { - s.ClientDownloadLandingPage = &v - return s +// GoString returns the string representation +func (s CreateCacheSecurityGroupInput) GoString() string { + return s.String() } -// SetConfigurationEndpoint sets the ConfigurationEndpoint field's value. -func (s *CacheCluster) SetConfigurationEndpoint(v *Endpoint) *CacheCluster { - s.ConfigurationEndpoint = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheSecurityGroupInput"} + if s.CacheSecurityGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) + } + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetEngine sets the Engine field's value. -func (s *CacheCluster) SetEngine(v string) *CacheCluster { - s.Engine = &v +// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. +func (s *CreateCacheSecurityGroupInput) SetCacheSecurityGroupName(v string) *CreateCacheSecurityGroupInput { + s.CacheSecurityGroupName = &v return s } -// SetEngineVersion sets the EngineVersion field's value. -func (s *CacheCluster) SetEngineVersion(v string) *CacheCluster { - s.EngineVersion = &v +// SetDescription sets the Description field's value. +func (s *CreateCacheSecurityGroupInput) SetDescription(v string) *CreateCacheSecurityGroupInput { + s.Description = &v return s } -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *CacheCluster) SetNotificationConfiguration(v *NotificationConfiguration) *CacheCluster { - s.NotificationConfiguration = v - return s +type CreateCacheSecurityGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following operations: + // + // * AuthorizeCacheSecurityGroupIngress + // + // * CreateCacheSecurityGroup + // + // * RevokeCacheSecurityGroupIngress + CacheSecurityGroup *CacheSecurityGroup `type:"structure"` } -// SetNumCacheNodes sets the NumCacheNodes field's value. -func (s *CacheCluster) SetNumCacheNodes(v int64) *CacheCluster { - s.NumCacheNodes = &v +// String returns the string representation +func (s CreateCacheSecurityGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSecurityGroupOutput) GoString() string { + return s.String() +} + +// SetCacheSecurityGroup sets the CacheSecurityGroup field's value. +func (s *CreateCacheSecurityGroupOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *CreateCacheSecurityGroupOutput { + s.CacheSecurityGroup = v return s } -// SetPendingModifiedValues sets the PendingModifiedValues field's value. -func (s *CacheCluster) SetPendingModifiedValues(v *PendingModifiedValues) *CacheCluster { - s.PendingModifiedValues = v +// Represents the input of a CreateCacheSubnetGroup operation. +type CreateCacheSubnetGroupInput struct { + _ struct{} `type:"structure"` + + // A description for the cache subnet group. + // + // CacheSubnetGroupDescription is a required field + CacheSubnetGroupDescription *string `type:"string" required:"true"` + + // A name for the cache subnet group. This value is stored as a lowercase string. + // + // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // + // Example: mysubnetgroup + // + // CacheSubnetGroupName is a required field + CacheSubnetGroupName *string `type:"string" required:"true"` + + // A list of VPC subnet IDs for the cache subnet group. + // + // SubnetIds is a required field + SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateCacheSubnetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCacheSubnetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCacheSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCacheSubnetGroupInput"} + if s.CacheSubnetGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupDescription")) + } + if s.CacheSubnetGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName")) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value. +func (s *CreateCacheSubnetGroupInput) SetCacheSubnetGroupDescription(v string) *CreateCacheSubnetGroupInput { + s.CacheSubnetGroupDescription = &v return s } -// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value. -func (s *CacheCluster) SetPreferredAvailabilityZone(v string) *CacheCluster { - s.PreferredAvailabilityZone = &v +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *CreateCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *CreateCacheSubnetGroupInput { + s.CacheSubnetGroupName = &v return s } -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *CacheCluster) SetPreferredMaintenanceWindow(v string) *CacheCluster { - s.PreferredMaintenanceWindow = &v +// SetSubnetIds sets the SubnetIds field's value. +func (s *CreateCacheSubnetGroupInput) SetSubnetIds(v []*string) *CreateCacheSubnetGroupInput { + s.SubnetIds = v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *CacheCluster) SetReplicationGroupId(v string) *CacheCluster { - s.ReplicationGroupId = &v - return s -} +type CreateCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *CacheCluster) SetSecurityGroups(v []*SecurityGroupMembership) *CacheCluster { - s.SecurityGroups = v - return s + // Represents the output of one of the following operations: + // + // * CreateCacheSubnetGroup + // + // * ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` } -// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value. -func (s *CacheCluster) SetSnapshotRetentionLimit(v int64) *CacheCluster { - s.SnapshotRetentionLimit = &v - return s +// String returns the string representation +func (s CreateCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) } -// SetSnapshotWindow sets the SnapshotWindow field's value. -func (s *CacheCluster) SetSnapshotWindow(v string) *CacheCluster { - s.SnapshotWindow = &v - return s +// GoString returns the string representation +func (s CreateCacheSubnetGroupOutput) GoString() string { + return s.String() } -// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. -func (s *CacheCluster) SetTransitEncryptionEnabled(v bool) *CacheCluster { - s.TransitEncryptionEnabled = &v +// SetCacheSubnetGroup sets the CacheSubnetGroup field's value. +func (s *CreateCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) *CreateCacheSubnetGroupOutput { + s.CacheSubnetGroup = v return s } -// Provides all of the details about a particular cache engine version. -type CacheEngineVersion struct { +type CreateGlobalReplicationGroupInput struct { _ struct{} `type:"structure"` - // The description of the cache engine. - CacheEngineDescription *string `type:"string"` - - // The description of the cache engine version. - CacheEngineVersionDescription *string `type:"string"` + // Provides details of the Global Datastore + GlobalReplicationGroupDescription *string `type:"string"` - // The name of the cache parameter group family associated with this cache engine. + // The suffix name of a Global Datastore. Amazon ElastiCache automatically applies + // a prefix to the Global Datastore ID when it is created. Each AWS Region has + // its own prefix. For instance, a Global Datastore ID created in the US-West-1 + // region will begin with "dsdfu" along with the suffix name you provide. The + // suffix, combined with the auto-generated prefix, guarantees uniqueness of + // the Global Datastore name across multiple regions. // - // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | - CacheParameterGroupFamily *string `type:"string"` - - // The name of the cache engine. - Engine *string `type:"string"` + // For a full list of AWS Regions and their respective Global Datastore iD prefixes, + // see Using the AWS CLI with Global Datastores (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Redis-Global-Clusters-CLI.html). + // + // GlobalReplicationGroupIdSuffix is a required field + GlobalReplicationGroupIdSuffix *string `type:"string" required:"true"` - // The version number of the cache engine. - EngineVersion *string `type:"string"` + // The name of the primary cluster that accepts writes and will replicate updates + // to the secondary cluster. + // + // PrimaryReplicationGroupId is a required field + PrimaryReplicationGroupId *string `type:"string" required:"true"` } // String returns the string representation -func (s CacheEngineVersion) String() string { +func (s CreateGlobalReplicationGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheEngineVersion) GoString() string { +func (s CreateGlobalReplicationGroupInput) GoString() string { return s.String() } -// SetCacheEngineDescription sets the CacheEngineDescription field's value. -func (s *CacheEngineVersion) SetCacheEngineDescription(v string) *CacheEngineVersion { - s.CacheEngineDescription = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalReplicationGroupInput"} + if s.GlobalReplicationGroupIdSuffix == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupIdSuffix")) + } + if s.PrimaryReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("PrimaryReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCacheEngineVersionDescription sets the CacheEngineVersionDescription field's value. -func (s *CacheEngineVersion) SetCacheEngineVersionDescription(v string) *CacheEngineVersion { - s.CacheEngineVersionDescription = &v +// SetGlobalReplicationGroupDescription sets the GlobalReplicationGroupDescription field's value. +func (s *CreateGlobalReplicationGroupInput) SetGlobalReplicationGroupDescription(v string) *CreateGlobalReplicationGroupInput { + s.GlobalReplicationGroupDescription = &v return s } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *CacheEngineVersion) SetCacheParameterGroupFamily(v string) *CacheEngineVersion { - s.CacheParameterGroupFamily = &v +// SetGlobalReplicationGroupIdSuffix sets the GlobalReplicationGroupIdSuffix field's value. +func (s *CreateGlobalReplicationGroupInput) SetGlobalReplicationGroupIdSuffix(v string) *CreateGlobalReplicationGroupInput { + s.GlobalReplicationGroupIdSuffix = &v return s } -// SetEngine sets the Engine field's value. -func (s *CacheEngineVersion) SetEngine(v string) *CacheEngineVersion { - s.Engine = &v +// SetPrimaryReplicationGroupId sets the PrimaryReplicationGroupId field's value. +func (s *CreateGlobalReplicationGroupInput) SetPrimaryReplicationGroupId(v string) *CreateGlobalReplicationGroupInput { + s.PrimaryReplicationGroupId = &v return s } -// SetEngineVersion sets the EngineVersion field's value. -func (s *CacheEngineVersion) SetEngineVersion(v string) *CacheEngineVersion { - s.EngineVersion = &v +type CreateGlobalReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. + // + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateGlobalReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalReplicationGroupOutput) GoString() string { + return s.String() +} + +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *CreateGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *CreateGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v return s } -// Represents an individual cache node within a cluster. Each cache node runs -// its own instance of the cluster's protocol-compliant caching software - either -// Memcached or Redis. -// -// The following node types are supported by ElastiCache. Generally speaking, -// the current generation types provide more memory and computational power -// at lower cost when compared to their equivalent previous generation counterparts. -// -// * General purpose: Current generation: M5 node types: cache.m5.large, -// cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, -// cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, -// cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, -// cache.t2.medium Previous generation: (not recommended) T1 node types: -// cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, -// cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, -// cache.m3.2xlarge -// -// * Compute optimized: Previous generation: (not recommended) C1 node types: -// cache.c1.xlarge -// -// * Memory optimized: Current generation: R5 node types: cache.r5.large, -// cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, -// cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, -// cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: -// (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge -// R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, -// cache.r3.8xlarge -// -// Additional node type info -// -// * All current generation instance types are created in Amazon VPC by default. -// -// * Redis append-only files (AOF) are not supported for T1 or T2 instances. -// -// * Redis Multi-AZ with automatic failover is not supported on T1 instances. -// -// * Redis configuration variables appendonly and appendfsync are not supported -// on Redis version 2.8.22 and later. -type CacheNode struct { +// Represents the input of a CreateReplicationGroup operation. +type CreateReplicationGroupInput struct { _ struct{} `type:"structure"` - // The date and time when the cache node was created. - CacheNodeCreateTime *time.Time `type:"timestamp"` + // A flag that enables encryption at rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the replication + // group is created. To enable encryption at rest on a replication group you + // must set AtRestEncryptionEnabled to true when you create the replication + // group. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using redis version 3.2.6, 4.x or later. + // + // Default: false + AtRestEncryptionEnabled *bool `type:"boolean"` - // The cache node identifier. A node ID is a numeric identifier (0001, 0002, - // etc.). The combination of cluster ID and node ID uniquely identifies every - // cache node used in a customer's AWS account. - CacheNodeId *string `type:"string"` + // Reserved parameter. The password used to access a password protected server. + // + // AuthToken can be specified only on replication groups where TransitEncryptionEnabled + // is true. + // + // For HIPAA compliance, you must specify TransitEncryptionEnabled as true, + // an AuthToken, and a CacheSubnetGroup. + // + // Password constraints: + // + // * Must be only printable ASCII characters. + // + // * Must be at least 16 characters and no more than 128 characters in length. + // + // * The only permitted printable special characters are !, &, #, $, ^, <, + // >, and -. Other printable special characters cannot be used in the AUTH + // token. + // + // For more information, see AUTH password (http://redis.io/commands/AUTH) at + // http://redis.io/commands/AUTH. + AuthToken *string `type:"string"` + + // This parameter is currently disabled. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // Specifies whether a read-only replica is automatically promoted to read/write + // primary if the existing primary fails. + // + // AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) + // replication groups. + // + // Default: false + AutomaticFailoverEnabled *bool `type:"boolean"` + + // The compute and memory capacity of the nodes in the node group (shard). + // + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // * All current generation instance types are created in Amazon VPC by default. + // + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. + CacheNodeType *string `type:"string"` + + // The name of the parameter group to associate with this replication group. + // If this argument is omitted, the default cache parameter group for the specified + // engine is used. + // + // If you are restoring to an engine version that is different than the original, + // you must specify the default version of that version. For example, CacheParameterGroupName=default.redis4.0. + // + // If you are running Redis version 3.2.4 or later, only one node group (shard), + // and want to use a default parameter group, we recommend that you specify + // the parameter group by name. + // + // * To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. + // + // * To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + CacheParameterGroupName *string `type:"string"` + + // A list of cache security group names to associate with this replication group. + CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + + // The name of the cache subnet group to be used for the replication group. + // + // If you're going to launch your cluster in an Amazon VPC, you need to create + // a subnet group before you start creating a cluster. For more information, + // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). + CacheSubnetGroupName *string `type:"string"` - // The current state of this cache node. - CacheNodeStatus *string `type:"string"` + // The name of the cache engine to be used for the clusters in this replication + // group. + Engine *string `type:"string"` - // The Availability Zone where this node was created and now resides. - CustomerAvailabilityZone *string `type:"string"` + // The version number of the cache engine to be used for the clusters in this + // replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions + // operation. + // + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) + // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine + // version. If you want to use an earlier engine version, you must delete the + // existing cluster or replication group and create it anew with the earlier + // engine version. + EngineVersion *string `type:"string"` - // The hostname for connecting to this cache node. - Endpoint *Endpoint `type:"structure"` + // The name of the Global Datastore + GlobalReplicationGroupId *string `type:"string"` - // The status of the parameter group applied to this cache node. - ParameterGroupStatus *string `type:"string"` + // The ID of the KMS key used to encrypt the disk in the cluster. + KmsKeyId *string `type:"string"` - // The ID of the primary node to which this read replica node is synchronized. - // If this field is empty, this node is not associated with a primary cluster. - SourceCacheNodeId *string `type:"string"` -} + // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html). + MultiAZEnabled *bool `type:"boolean"` -// String returns the string representation -func (s CacheNode) String() string { - return awsutil.Prettify(s) -} + // A list of node group (shard) configuration options. Each node group (shard) + // configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, + // ReplicaCount, and Slots. + // + // If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode + // enabled) replication group, you can use this parameter to individually configure + // each node group (shard), or you can omit this parameter. However, it is required + // when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You + // must configure each node group (shard) using this parameter because you must + // specify the slots for each node group. + NodeGroupConfiguration []*NodeGroupConfiguration `locationNameList:"NodeGroupConfiguration" type:"list"` -// GoString returns the string representation -func (s CacheNode) GoString() string { - return s.String() -} + // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service + // (SNS) topic to which notifications are sent. + // + // The Amazon SNS topic owner must be the same as the cluster owner. + NotificationTopicArn *string `type:"string"` -// SetCacheNodeCreateTime sets the CacheNodeCreateTime field's value. -func (s *CacheNode) SetCacheNodeCreateTime(v time.Time) *CacheNode { - s.CacheNodeCreateTime = &v - return s -} + // The number of clusters this replication group initially has. + // + // This parameter is not used if there is more than one node group (shard). + // You should use ReplicasPerNodeGroup instead. + // + // If AutomaticFailoverEnabled is true, the value of this parameter must be + // at least 2. If AutomaticFailoverEnabled is false you can omit this parameter + // (it will default to 1), or you can explicitly set it to a value between 2 + // and 6. + // + // The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). + NumCacheClusters *int64 `type:"integer"` -// SetCacheNodeId sets the CacheNodeId field's value. -func (s *CacheNode) SetCacheNodeId(v string) *CacheNode { - s.CacheNodeId = &v - return s -} + // An optional parameter that specifies the number of node groups (shards) for + // this Redis (cluster mode enabled) replication group. For Redis (cluster mode + // disabled) either omit this parameter or set it to 1. + // + // Default: 1 + NumNodeGroups *int64 `type:"integer"` -// SetCacheNodeStatus sets the CacheNodeStatus field's value. -func (s *CacheNode) SetCacheNodeStatus(v string) *CacheNode { - s.CacheNodeStatus = &v - return s -} + // The port number on which each member of the replication group accepts connections. + Port *int64 `type:"integer"` -// SetCustomerAvailabilityZone sets the CustomerAvailabilityZone field's value. -func (s *CacheNode) SetCustomerAvailabilityZone(v string) *CacheNode { - s.CustomerAvailabilityZone = &v - return s -} + // A list of EC2 Availability Zones in which the replication group's clusters + // are created. The order of the Availability Zones in the list is the order + // in which clusters are allocated. The primary cluster is created in the first + // AZ in the list. + // + // This parameter is not used if there is more than one node group (shard). + // You should use NodeGroupConfiguration instead. + // + // If you are creating your replication group in an Amazon VPC (recommended), + // you can only locate clusters in Availability Zones associated with the subnets + // in the selected subnet group. + // + // The number of Availability Zones listed must equal the value of NumCacheClusters. + // + // Default: system chosen Availability Zones. + PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"` -// SetEndpoint sets the Endpoint field's value. -func (s *CacheNode) SetEndpoint(v *Endpoint) *CacheNode { - s.Endpoint = v - return s -} + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid + // values for ddd are: + // + // Specifies the weekly time range during which maintenance on the cluster is + // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi + // (24H Clock UTC). The minimum maintenance window is a 60 minute period. + // + // Valid values for ddd are: + // + // * sun + // + // * mon + // + // * tue + // + // * wed + // + // * thu + // + // * fri + // + // * sat + // + // Example: sun:23:00-mon:01:30 + PreferredMaintenanceWindow *string `type:"string"` -// SetParameterGroupStatus sets the ParameterGroupStatus field's value. -func (s *CacheNode) SetParameterGroupStatus(v string) *CacheNode { - s.ParameterGroupStatus = &v - return s -} + // The identifier of the cluster that serves as the primary for this replication + // group. This cluster must already exist and have a status of available. + // + // This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup + // is specified. + PrimaryClusterId *string `type:"string"` -// SetSourceCacheNodeId sets the SourceCacheNodeId field's value. -func (s *CacheNode) SetSourceCacheNodeId(v string) *CacheNode { - s.SourceCacheNodeId = &v - return s -} + // An optional parameter that specifies the number of replica nodes in each + // node group (shard). Valid values are 0 to 5. + ReplicasPerNodeGroup *int64 `type:"integer"` -// A parameter that has a different value for each cache node type it is applied -// to. For example, in a Redis cluster, a cache.m1.large cache node type would -// have a larger maxmemory value than a cache.m1.small type. -type CacheNodeTypeSpecificParameter struct { - _ struct{} `type:"structure"` + // A user-created description for the replication group. + // + // ReplicationGroupDescription is a required field + ReplicationGroupDescription *string `type:"string" required:"true"` - // The valid range of values for the parameter. - AllowedValues *string `type:"string"` + // The replication group identifier. This parameter is stored as a lowercase + // string. + // + // Constraints: + // + // * A name must contain from 1 to 40 alphanumeric characters or hyphens. + // + // * The first character must be a letter. + // + // * A name cannot end with a hyphen or contain two consecutive hyphens. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` - // A list of cache node types and their corresponding values for this parameter. - CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"` + // One or more Amazon VPC security groups associated with this replication group. + // + // Use this parameter only when you are creating a replication group in an Amazon + // Virtual Private Cloud (Amazon VPC). + SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` - // Indicates whether a change to the parameter is applied immediately or requires - // a reboot for the change to be applied. You can force a reboot or wait until - // the next maintenance window's reboot. For more information, see Rebooting - // a Cluster (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Clusters.Rebooting.html). - ChangeType *string `type:"string" enum:"ChangeType"` + // A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB + // snapshot files stored in Amazon S3. The snapshot files are used to populate + // the new replication group. The Amazon S3 object name in the ARN cannot contain + // any commas. The new replication group will have the number of node groups + // (console: shards) specified by the parameter NumNodeGroups or the number + // of node groups configured by NodeGroupConfiguration regardless of the number + // of ARNs specified here. + // + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` - // The valid data type for the parameter. - DataType *string `type:"string"` + // The name of a snapshot from which to restore data into the new replication + // group. The snapshot status changes to restoring while the new replication + // group is being created. + SnapshotName *string `type:"string"` - // A description of the parameter. - Description *string `type:"string"` + // The number of days for which ElastiCache retains automatic snapshots before + // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot + // that was taken today is retained for 5 days before being deleted. + // + // Default: 0 (i.e., automatic backups are disabled for this cluster). + SnapshotRetentionLimit *int64 `type:"integer"` - // Indicates whether (true) or not (false) the parameter can be modified. Some - // parameters have security or operational implications that prevent them from - // being changed. - IsModifiable *bool `type:"boolean"` + // The daily time range (in UTC) during which ElastiCache begins taking a daily + // snapshot of your node group (shard). + // + // Example: 05:00-09:00 + // + // If you do not specify this parameter, ElastiCache automatically chooses an + // appropriate time range. + SnapshotWindow *string `type:"string"` - // The earliest cache engine version to which the parameter can apply. - MinimumEngineVersion *string `type:"string"` + // A list of cost allocation tags to be added to this resource. Tags are comma-separated + // key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple + // tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. + Tags []*Tag `locationNameList:"Tag" type:"list"` - // The name of the parameter. - ParameterName *string `type:"string"` + // A flag that enables in-transit encryption when set to true. + // + // You cannot modify the value of TransitEncryptionEnabled after the cluster + // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled + // to true when you create a cluster. + // + // This parameter is valid only if the Engine parameter is redis, the EngineVersion + // parameter is 3.2.6, 4.x or later, and the cluster is being created in an + // Amazon VPC. + // + // If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using redis version 3.2.6, 4.x or later. + // + // Default: false + // + // For HIPAA compliance, you must specify TransitEncryptionEnabled as true, + // an AuthToken, and a CacheSubnetGroup. + TransitEncryptionEnabled *bool `type:"boolean"` - // The source of the parameter value. - Source *string `type:"string"` + // The list of user groups to associate with the replication group. + UserGroupIds []*string `min:"1" type:"list"` } // String returns the string representation -func (s CacheNodeTypeSpecificParameter) String() string { +func (s CreateReplicationGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheNodeTypeSpecificParameter) GoString() string { +func (s CreateReplicationGroupInput) GoString() string { return s.String() } -// SetAllowedValues sets the AllowedValues field's value. -func (s *CacheNodeTypeSpecificParameter) SetAllowedValues(v string) *CacheNodeTypeSpecificParameter { - s.AllowedValues = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupInput"} + if s.ReplicationGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupDescription")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + if s.UserGroupIds != nil && len(s.UserGroupIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserGroupIds", 1)) + } + if s.NodeGroupConfiguration != nil { + for i, v := range s.NodeGroupConfiguration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NodeGroupConfiguration", i), err.(request.ErrInvalidParams)) + } + } + } -// SetCacheNodeTypeSpecificValues sets the CacheNodeTypeSpecificValues field's value. -func (s *CacheNodeTypeSpecificParameter) SetCacheNodeTypeSpecificValues(v []*CacheNodeTypeSpecificValue) *CacheNodeTypeSpecificParameter { - s.CacheNodeTypeSpecificValues = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetChangeType sets the ChangeType field's value. -func (s *CacheNodeTypeSpecificParameter) SetChangeType(v string) *CacheNodeTypeSpecificParameter { - s.ChangeType = &v +// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. +func (s *CreateReplicationGroupInput) SetAtRestEncryptionEnabled(v bool) *CreateReplicationGroupInput { + s.AtRestEncryptionEnabled = &v return s } -// SetDataType sets the DataType field's value. -func (s *CacheNodeTypeSpecificParameter) SetDataType(v string) *CacheNodeTypeSpecificParameter { - s.DataType = &v +// SetAuthToken sets the AuthToken field's value. +func (s *CreateReplicationGroupInput) SetAuthToken(v string) *CreateReplicationGroupInput { + s.AuthToken = &v return s } -// SetDescription sets the Description field's value. -func (s *CacheNodeTypeSpecificParameter) SetDescription(v string) *CacheNodeTypeSpecificParameter { - s.Description = &v +// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. +func (s *CreateReplicationGroupInput) SetAutoMinorVersionUpgrade(v bool) *CreateReplicationGroupInput { + s.AutoMinorVersionUpgrade = &v return s } -// SetIsModifiable sets the IsModifiable field's value. -func (s *CacheNodeTypeSpecificParameter) SetIsModifiable(v bool) *CacheNodeTypeSpecificParameter { - s.IsModifiable = &v +// SetAutomaticFailoverEnabled sets the AutomaticFailoverEnabled field's value. +func (s *CreateReplicationGroupInput) SetAutomaticFailoverEnabled(v bool) *CreateReplicationGroupInput { + s.AutomaticFailoverEnabled = &v return s } -// SetMinimumEngineVersion sets the MinimumEngineVersion field's value. -func (s *CacheNodeTypeSpecificParameter) SetMinimumEngineVersion(v string) *CacheNodeTypeSpecificParameter { - s.MinimumEngineVersion = &v +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *CreateReplicationGroupInput) SetCacheNodeType(v string) *CreateReplicationGroupInput { + s.CacheNodeType = &v return s } -// SetParameterName sets the ParameterName field's value. -func (s *CacheNodeTypeSpecificParameter) SetParameterName(v string) *CacheNodeTypeSpecificParameter { - s.ParameterName = &v +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *CreateReplicationGroupInput) SetCacheParameterGroupName(v string) *CreateReplicationGroupInput { + s.CacheParameterGroupName = &v return s } -// SetSource sets the Source field's value. -func (s *CacheNodeTypeSpecificParameter) SetSource(v string) *CacheNodeTypeSpecificParameter { - s.Source = &v +// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value. +func (s *CreateReplicationGroupInput) SetCacheSecurityGroupNames(v []*string) *CreateReplicationGroupInput { + s.CacheSecurityGroupNames = v return s } -// A value that applies only to a certain cache node type. -type CacheNodeTypeSpecificValue struct { - _ struct{} `type:"structure"` - - // The cache node type for which this value applies. - CacheNodeType *string `type:"string"` - - // The value for the cache node type. - Value *string `type:"string"` -} - -// String returns the string representation -func (s CacheNodeTypeSpecificValue) String() string { - return awsutil.Prettify(s) +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *CreateReplicationGroupInput) SetCacheSubnetGroupName(v string) *CreateReplicationGroupInput { + s.CacheSubnetGroupName = &v + return s } -// GoString returns the string representation -func (s CacheNodeTypeSpecificValue) GoString() string { - return s.String() +// SetEngine sets the Engine field's value. +func (s *CreateReplicationGroupInput) SetEngine(v string) *CreateReplicationGroupInput { + s.Engine = &v + return s } -// SetCacheNodeType sets the CacheNodeType field's value. -func (s *CacheNodeTypeSpecificValue) SetCacheNodeType(v string) *CacheNodeTypeSpecificValue { - s.CacheNodeType = &v +// SetEngineVersion sets the EngineVersion field's value. +func (s *CreateReplicationGroupInput) SetEngineVersion(v string) *CreateReplicationGroupInput { + s.EngineVersion = &v return s } -// SetValue sets the Value field's value. -func (s *CacheNodeTypeSpecificValue) SetValue(v string) *CacheNodeTypeSpecificValue { - s.Value = &v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *CreateReplicationGroupInput) SetGlobalReplicationGroupId(v string) *CreateReplicationGroupInput { + s.GlobalReplicationGroupId = &v return s } -// The status of the service update on the cache node -type CacheNodeUpdateStatus struct { - _ struct{} `type:"structure"` - - // The node ID of the cache cluster - CacheNodeId *string `type:"string"` - - // The deletion date of the node - NodeDeletionDate *time.Time `type:"timestamp"` - - // The end date of the update for a node - NodeUpdateEndDate *time.Time `type:"timestamp"` - - // Reflects whether the update was initiated by the customer or automatically - // applied - NodeUpdateInitiatedBy *string `type:"string" enum:"NodeUpdateInitiatedBy"` - - // The date when the update is triggered - NodeUpdateInitiatedDate *time.Time `type:"timestamp"` - - // The start date of the update for a node - NodeUpdateStartDate *time.Time `type:"timestamp"` - - // The update status of the node - NodeUpdateStatus *string `type:"string" enum:"NodeUpdateStatus"` - - // The date when the NodeUpdateStatus was last modified> - NodeUpdateStatusModifiedDate *time.Time `type:"timestamp"` +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateReplicationGroupInput) SetKmsKeyId(v string) *CreateReplicationGroupInput { + s.KmsKeyId = &v + return s } -// String returns the string representation -func (s CacheNodeUpdateStatus) String() string { - return awsutil.Prettify(s) +// SetMultiAZEnabled sets the MultiAZEnabled field's value. +func (s *CreateReplicationGroupInput) SetMultiAZEnabled(v bool) *CreateReplicationGroupInput { + s.MultiAZEnabled = &v + return s } -// GoString returns the string representation -func (s CacheNodeUpdateStatus) GoString() string { - return s.String() +// SetNodeGroupConfiguration sets the NodeGroupConfiguration field's value. +func (s *CreateReplicationGroupInput) SetNodeGroupConfiguration(v []*NodeGroupConfiguration) *CreateReplicationGroupInput { + s.NodeGroupConfiguration = v + return s } -// SetCacheNodeId sets the CacheNodeId field's value. -func (s *CacheNodeUpdateStatus) SetCacheNodeId(v string) *CacheNodeUpdateStatus { - s.CacheNodeId = &v +// SetNotificationTopicArn sets the NotificationTopicArn field's value. +func (s *CreateReplicationGroupInput) SetNotificationTopicArn(v string) *CreateReplicationGroupInput { + s.NotificationTopicArn = &v return s } -// SetNodeDeletionDate sets the NodeDeletionDate field's value. -func (s *CacheNodeUpdateStatus) SetNodeDeletionDate(v time.Time) *CacheNodeUpdateStatus { - s.NodeDeletionDate = &v +// SetNumCacheClusters sets the NumCacheClusters field's value. +func (s *CreateReplicationGroupInput) SetNumCacheClusters(v int64) *CreateReplicationGroupInput { + s.NumCacheClusters = &v return s } -// SetNodeUpdateEndDate sets the NodeUpdateEndDate field's value. -func (s *CacheNodeUpdateStatus) SetNodeUpdateEndDate(v time.Time) *CacheNodeUpdateStatus { - s.NodeUpdateEndDate = &v +// SetNumNodeGroups sets the NumNodeGroups field's value. +func (s *CreateReplicationGroupInput) SetNumNodeGroups(v int64) *CreateReplicationGroupInput { + s.NumNodeGroups = &v return s } -// SetNodeUpdateInitiatedBy sets the NodeUpdateInitiatedBy field's value. -func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedBy(v string) *CacheNodeUpdateStatus { - s.NodeUpdateInitiatedBy = &v +// SetPort sets the Port field's value. +func (s *CreateReplicationGroupInput) SetPort(v int64) *CreateReplicationGroupInput { + s.Port = &v return s } -// SetNodeUpdateInitiatedDate sets the NodeUpdateInitiatedDate field's value. -func (s *CacheNodeUpdateStatus) SetNodeUpdateInitiatedDate(v time.Time) *CacheNodeUpdateStatus { - s.NodeUpdateInitiatedDate = &v +// SetPreferredCacheClusterAZs sets the PreferredCacheClusterAZs field's value. +func (s *CreateReplicationGroupInput) SetPreferredCacheClusterAZs(v []*string) *CreateReplicationGroupInput { + s.PreferredCacheClusterAZs = v return s } -// SetNodeUpdateStartDate sets the NodeUpdateStartDate field's value. -func (s *CacheNodeUpdateStatus) SetNodeUpdateStartDate(v time.Time) *CacheNodeUpdateStatus { - s.NodeUpdateStartDate = &v +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *CreateReplicationGroupInput) SetPreferredMaintenanceWindow(v string) *CreateReplicationGroupInput { + s.PreferredMaintenanceWindow = &v return s } -// SetNodeUpdateStatus sets the NodeUpdateStatus field's value. -func (s *CacheNodeUpdateStatus) SetNodeUpdateStatus(v string) *CacheNodeUpdateStatus { - s.NodeUpdateStatus = &v +// SetPrimaryClusterId sets the PrimaryClusterId field's value. +func (s *CreateReplicationGroupInput) SetPrimaryClusterId(v string) *CreateReplicationGroupInput { + s.PrimaryClusterId = &v return s } -// SetNodeUpdateStatusModifiedDate sets the NodeUpdateStatusModifiedDate field's value. -func (s *CacheNodeUpdateStatus) SetNodeUpdateStatusModifiedDate(v time.Time) *CacheNodeUpdateStatus { - s.NodeUpdateStatusModifiedDate = &v +// SetReplicasPerNodeGroup sets the ReplicasPerNodeGroup field's value. +func (s *CreateReplicationGroupInput) SetReplicasPerNodeGroup(v int64) *CreateReplicationGroupInput { + s.ReplicasPerNodeGroup = &v return s } -// Represents the output of a CreateCacheParameterGroup operation. -type CacheParameterGroup struct { - _ struct{} `type:"structure"` - - // The name of the cache parameter group family that this cache parameter group - // is compatible with. - // - // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | - CacheParameterGroupFamily *string `type:"string"` - - // The name of the cache parameter group. - CacheParameterGroupName *string `type:"string"` - - // The description for this cache parameter group. - Description *string `type:"string"` +// SetReplicationGroupDescription sets the ReplicationGroupDescription field's value. +func (s *CreateReplicationGroupInput) SetReplicationGroupDescription(v string) *CreateReplicationGroupInput { + s.ReplicationGroupDescription = &v + return s } -// String returns the string representation -func (s CacheParameterGroup) String() string { - return awsutil.Prettify(s) +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CreateReplicationGroupInput) SetReplicationGroupId(v string) *CreateReplicationGroupInput { + s.ReplicationGroupId = &v + return s } -// GoString returns the string representation -func (s CacheParameterGroup) GoString() string { - return s.String() +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *CreateReplicationGroupInput) SetSecurityGroupIds(v []*string) *CreateReplicationGroupInput { + s.SecurityGroupIds = v + return s } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *CacheParameterGroup) SetCacheParameterGroupFamily(v string) *CacheParameterGroup { - s.CacheParameterGroupFamily = &v +// SetSnapshotArns sets the SnapshotArns field's value. +func (s *CreateReplicationGroupInput) SetSnapshotArns(v []*string) *CreateReplicationGroupInput { + s.SnapshotArns = v return s } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CacheParameterGroup) SetCacheParameterGroupName(v string) *CacheParameterGroup { - s.CacheParameterGroupName = &v +// SetSnapshotName sets the SnapshotName field's value. +func (s *CreateReplicationGroupInput) SetSnapshotName(v string) *CreateReplicationGroupInput { + s.SnapshotName = &v return s } -// SetDescription sets the Description field's value. -func (s *CacheParameterGroup) SetDescription(v string) *CacheParameterGroup { - s.Description = &v +// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value. +func (s *CreateReplicationGroupInput) SetSnapshotRetentionLimit(v int64) *CreateReplicationGroupInput { + s.SnapshotRetentionLimit = &v return s } -// Represents the output of one of the following operations: -// -// * ModifyCacheParameterGroup -// -// * ResetCacheParameterGroup -type CacheParameterGroupNameMessage struct { - _ struct{} `type:"structure"` - - // The name of the cache parameter group. - CacheParameterGroupName *string `type:"string"` +// SetSnapshotWindow sets the SnapshotWindow field's value. +func (s *CreateReplicationGroupInput) SetSnapshotWindow(v string) *CreateReplicationGroupInput { + s.SnapshotWindow = &v + return s } -// String returns the string representation -func (s CacheParameterGroupNameMessage) String() string { - return awsutil.Prettify(s) +// SetTags sets the Tags field's value. +func (s *CreateReplicationGroupInput) SetTags(v []*Tag) *CreateReplicationGroupInput { + s.Tags = v + return s } -// GoString returns the string representation -func (s CacheParameterGroupNameMessage) GoString() string { - return s.String() +// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. +func (s *CreateReplicationGroupInput) SetTransitEncryptionEnabled(v bool) *CreateReplicationGroupInput { + s.TransitEncryptionEnabled = &v + return s } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CacheParameterGroupNameMessage) SetCacheParameterGroupName(v string) *CacheParameterGroupNameMessage { - s.CacheParameterGroupName = &v +// SetUserGroupIds sets the UserGroupIds field's value. +func (s *CreateReplicationGroupInput) SetUserGroupIds(v []*string) *CreateReplicationGroupInput { + s.UserGroupIds = v return s } -// Status of the cache parameter group. -type CacheParameterGroupStatus struct { +type CreateReplicationGroupOutput struct { _ struct{} `type:"structure"` - // A list of the cache node IDs which need to be rebooted for parameter changes - // to be applied. A node ID is a numeric identifier (0001, 0002, etc.). - CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list"` - - // The name of the cache parameter group. - CacheParameterGroupName *string `type:"string"` - - // The status of parameter updates. - ParameterApplyStatus *string `type:"string"` + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` } // String returns the string representation -func (s CacheParameterGroupStatus) String() string { +func (s CreateReplicationGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheParameterGroupStatus) GoString() string { +func (s CreateReplicationGroupOutput) GoString() string { return s.String() } -// SetCacheNodeIdsToReboot sets the CacheNodeIdsToReboot field's value. -func (s *CacheParameterGroupStatus) SetCacheNodeIdsToReboot(v []*string) *CacheParameterGroupStatus { - s.CacheNodeIdsToReboot = v - return s -} - -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CacheParameterGroupStatus) SetCacheParameterGroupName(v string) *CacheParameterGroupStatus { - s.CacheParameterGroupName = &v - return s -} - -// SetParameterApplyStatus sets the ParameterApplyStatus field's value. -func (s *CacheParameterGroupStatus) SetParameterApplyStatus(v string) *CacheParameterGroupStatus { - s.ParameterApplyStatus = &v +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CreateReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *CreateReplicationGroupOutput { + s.ReplicationGroup = v return s } -// Represents the output of one of the following operations: -// -// * AuthorizeCacheSecurityGroupIngress -// -// * CreateCacheSecurityGroup -// -// * RevokeCacheSecurityGroupIngress -type CacheSecurityGroup struct { +// Represents the input of a CreateSnapshot operation. +type CreateSnapshotInput struct { _ struct{} `type:"structure"` - // The name of the cache security group. - CacheSecurityGroupName *string `type:"string"` + // The identifier of an existing cluster. The snapshot is created from this + // cluster. + CacheClusterId *string `type:"string"` - // The description of the cache security group. - Description *string `type:"string"` + // The ID of the KMS key used to encrypt the snapshot. + KmsKeyId *string `type:"string"` - // A list of Amazon EC2 security groups that are associated with this cache - // security group. - EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"` + // The identifier of an existing replication group. The snapshot is created + // from this replication group. + ReplicationGroupId *string `type:"string"` - // The AWS account ID of the cache security group owner. - OwnerId *string `type:"string"` + // A name for the snapshot being created. + // + // SnapshotName is a required field + SnapshotName *string `type:"string" required:"true"` } // String returns the string representation -func (s CacheSecurityGroup) String() string { +func (s CreateSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheSecurityGroup) GoString() string { +func (s CreateSnapshotInput) GoString() string { return s.String() } -// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. -func (s *CacheSecurityGroup) SetCacheSecurityGroupName(v string) *CacheSecurityGroup { - s.CacheSecurityGroupName = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} + if s.SnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *CreateSnapshotInput) SetCacheClusterId(v string) *CreateSnapshotInput { + s.CacheClusterId = &v return s } -// SetDescription sets the Description field's value. -func (s *CacheSecurityGroup) SetDescription(v string) *CacheSecurityGroup { - s.Description = &v +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *CreateSnapshotInput) SetKmsKeyId(v string) *CreateSnapshotInput { + s.KmsKeyId = &v return s } -// SetEC2SecurityGroups sets the EC2SecurityGroups field's value. -func (s *CacheSecurityGroup) SetEC2SecurityGroups(v []*EC2SecurityGroup) *CacheSecurityGroup { - s.EC2SecurityGroups = v +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CreateSnapshotInput) SetReplicationGroupId(v string) *CreateSnapshotInput { + s.ReplicationGroupId = &v return s } -// SetOwnerId sets the OwnerId field's value. -func (s *CacheSecurityGroup) SetOwnerId(v string) *CacheSecurityGroup { - s.OwnerId = &v +// SetSnapshotName sets the SnapshotName field's value. +func (s *CreateSnapshotInput) SetSnapshotName(v string) *CreateSnapshotInput { + s.SnapshotName = &v return s } -// Represents a cluster's status within a particular cache security group. -type CacheSecurityGroupMembership struct { +type CreateSnapshotOutput struct { _ struct{} `type:"structure"` - // The name of the cache security group. - CacheSecurityGroupName *string `type:"string"` - - // The membership status in the cache security group. The status changes when - // a cache security group is modified, or when the cache security groups assigned - // to a cluster are modified. - Status *string `type:"string"` + // Represents a copy of an entire Redis cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` } // String returns the string representation -func (s CacheSecurityGroupMembership) String() string { +func (s CreateSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheSecurityGroupMembership) GoString() string { +func (s CreateSnapshotOutput) GoString() string { return s.String() } -// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. -func (s *CacheSecurityGroupMembership) SetCacheSecurityGroupName(v string) *CacheSecurityGroupMembership { - s.CacheSecurityGroupName = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *CacheSecurityGroupMembership) SetStatus(v string) *CacheSecurityGroupMembership { - s.Status = &v +// SetSnapshot sets the Snapshot field's value. +func (s *CreateSnapshotOutput) SetSnapshot(v *Snapshot) *CreateSnapshotOutput { + s.Snapshot = v return s } -// Represents the output of one of the following operations: -// -// * CreateCacheSubnetGroup -// -// * ModifyCacheSubnetGroup -type CacheSubnetGroup struct { +type CreateUserGroupInput struct { _ struct{} `type:"structure"` - // The description of the cache subnet group. - CacheSubnetGroupDescription *string `type:"string"` - - // The name of the cache subnet group. - CacheSubnetGroupName *string `type:"string"` + // Must be Redis. + // + // Engine is a required field + Engine *string `type:"string" required:"true"` - // A list of subnets associated with the cache subnet group. - Subnets []*Subnet `locationNameList:"Subnet" type:"list"` + // The ID of the user group. + // + // UserGroupId is a required field + UserGroupId *string `type:"string" required:"true"` - // The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet - // group. - VpcId *string `type:"string"` + // The list of user IDs that belong to the user group. + UserIds []*string `min:"1" type:"list"` } // String returns the string representation -func (s CacheSubnetGroup) String() string { +func (s CreateUserGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CacheSubnetGroup) GoString() string { +func (s CreateUserGroupInput) GoString() string { return s.String() } -// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value. -func (s *CacheSubnetGroup) SetCacheSubnetGroupDescription(v string) *CacheSubnetGroup { - s.CacheSubnetGroupDescription = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserGroupInput"} + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.UserGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("UserGroupId")) + } + if s.UserIds != nil && len(s.UserIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *CacheSubnetGroup) SetCacheSubnetGroupName(v string) *CacheSubnetGroup { - s.CacheSubnetGroupName = &v +// SetEngine sets the Engine field's value. +func (s *CreateUserGroupInput) SetEngine(v string) *CreateUserGroupInput { + s.Engine = &v return s } -// SetSubnets sets the Subnets field's value. -func (s *CacheSubnetGroup) SetSubnets(v []*Subnet) *CacheSubnetGroup { - s.Subnets = v +// SetUserGroupId sets the UserGroupId field's value. +func (s *CreateUserGroupInput) SetUserGroupId(v string) *CreateUserGroupInput { + s.UserGroupId = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *CacheSubnetGroup) SetVpcId(v string) *CacheSubnetGroup { - s.VpcId = &v +// SetUserIds sets the UserIds field's value. +func (s *CreateUserGroupInput) SetUserIds(v []*string) *CreateUserGroupInput { + s.UserIds = v return s } -type CompleteMigrationInput struct { +type CreateUserGroupOutput struct { _ struct{} `type:"structure"` - // Forces the migration to stop without ensuring that data is in sync. It is - // recommended to use this option only to abort the migration and not recommended - // when application wants to continue migration to ElastiCache. - Force *bool `type:"boolean"` + // The Amazon Resource Name (ARN) of the user group. + ARN *string `type:"string"` - // The ID of the replication group to which data is being migrated. - // - // ReplicationGroupId is a required field - ReplicationGroupId *string `type:"string" required:"true"` + // Must be Redis. + Engine *string `type:"string"` + + // A list of updates being applied to the user groups. + PendingChanges *UserGroupPendingChanges `type:"structure"` + + // A list of replication groups that the user group can access. + ReplicationGroups []*string `type:"list"` + + // Indicates user group status. Can be "creating", "active", "modifying", "deleting". + Status *string `type:"string"` + + // The ID of the user group. + UserGroupId *string `type:"string"` + + // The list of user IDs that belong to the user group. + UserIds []*string `type:"list"` } // String returns the string representation -func (s CompleteMigrationInput) String() string { +func (s CreateUserGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompleteMigrationInput) GoString() string { +func (s CreateUserGroupOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CompleteMigrationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CompleteMigrationInput"} - if s.ReplicationGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetARN sets the ARN field's value. +func (s *CreateUserGroupOutput) SetARN(v string) *CreateUserGroupOutput { + s.ARN = &v + return s } -// SetForce sets the Force field's value. -func (s *CompleteMigrationInput) SetForce(v bool) *CompleteMigrationInput { - s.Force = &v +// SetEngine sets the Engine field's value. +func (s *CreateUserGroupOutput) SetEngine(v string) *CreateUserGroupOutput { + s.Engine = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *CompleteMigrationInput) SetReplicationGroupId(v string) *CompleteMigrationInput { - s.ReplicationGroupId = &v +// SetPendingChanges sets the PendingChanges field's value. +func (s *CreateUserGroupOutput) SetPendingChanges(v *UserGroupPendingChanges) *CreateUserGroupOutput { + s.PendingChanges = v return s } -type CompleteMigrationOutput struct { - _ struct{} `type:"structure"` - - // Contains all of the attributes of a specific Redis replication group. - ReplicationGroup *ReplicationGroup `type:"structure"` +// SetReplicationGroups sets the ReplicationGroups field's value. +func (s *CreateUserGroupOutput) SetReplicationGroups(v []*string) *CreateUserGroupOutput { + s.ReplicationGroups = v + return s } -// String returns the string representation -func (s CompleteMigrationOutput) String() string { - return awsutil.Prettify(s) +// SetStatus sets the Status field's value. +func (s *CreateUserGroupOutput) SetStatus(v string) *CreateUserGroupOutput { + s.Status = &v + return s } -// GoString returns the string representation -func (s CompleteMigrationOutput) GoString() string { - return s.String() +// SetUserGroupId sets the UserGroupId field's value. +func (s *CreateUserGroupOutput) SetUserGroupId(v string) *CreateUserGroupOutput { + s.UserGroupId = &v + return s } -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *CompleteMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *CompleteMigrationOutput { - s.ReplicationGroup = v +// SetUserIds sets the UserIds field's value. +func (s *CreateUserGroupOutput) SetUserIds(v []*string) *CreateUserGroupOutput { + s.UserIds = v return s } -// Node group (shard) configuration options when adding or removing replicas. -// Each node group (shard) configuration has the following members: NodeGroupId, -// NewReplicaCount, and PreferredAvailabilityZones. -type ConfigureShard struct { +type CreateUserInput struct { _ struct{} `type:"structure"` - // The number of replicas you want in this node group at the end of this operation. - // The maximum value for NewReplicaCount is 5. The minimum value depends upon - // the type of Redis replication group you are working with. - // - // The minimum number of replicas in a shard or replication group is: - // - // * Redis (cluster mode disabled) If Multi-AZ with Automatic Failover is - // enabled: 1 If Multi-AZ with Automatic Failover is not enable: 0 + // Access permissions string used for this user account. // - // * Redis (cluster mode enabled): 0 (though you will not be able to failover - // to a replica if your primary node fails) + // AccessString is a required field + AccessString *string `type:"string" required:"true"` + + // Must be Redis. // - // NewReplicaCount is a required field - NewReplicaCount *int64 `type:"integer" required:"true"` + // Engine is a required field + Engine *string `type:"string" required:"true"` - // The 4-digit id for the node group you are configuring. For Redis (cluster - // mode disabled) replication groups, the node group id is always 0001. To find - // a Redis (cluster mode enabled)'s node group's (shard's) id, see Finding a - // Shard's Id (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/shard-find-id.html). + // Indicates a password is not required for this user account. + NoPasswordRequired *bool `type:"boolean"` + + // Passwords used for this user account. You can create up to two passwords + // for each user. + Passwords []*string `min:"1" type:"list"` + + // The ID of the user. // - // NodeGroupId is a required field - NodeGroupId *string `min:"1" type:"string" required:"true"` + // UserId is a required field + UserId *string `min:"1" type:"string" required:"true"` - // A list of PreferredAvailabilityZone strings that specify which availability - // zones the replication group's nodes are to be in. The nummber of PreferredAvailabilityZone - // values must equal the value of NewReplicaCount plus 1 to account for the - // primary node. If this member of ReplicaConfiguration is omitted, ElastiCache - // for Redis selects the availability zone for each of the replicas. - PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + // The username of the user. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ConfigureShard) String() string { +func (s CreateUserInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConfigureShard) GoString() string { +func (s CreateUserInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ConfigureShard) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ConfigureShard"} - if s.NewReplicaCount == nil { - invalidParams.Add(request.NewErrParamRequired("NewReplicaCount")) +func (s *CreateUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.AccessString == nil { + invalidParams.Add(request.NewErrParamRequired("AccessString")) } - if s.NodeGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("NodeGroupId")) + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) } - if s.NodeGroupId != nil && len(*s.NodeGroupId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NodeGroupId", 1)) + if s.Passwords != nil && len(s.Passwords) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Passwords", 1)) + } + if s.UserId == nil { + invalidParams.Add(request.NewErrParamRequired("UserId")) + } + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) } if invalidParams.Len() > 0 { @@ -7117,400 +10804,351 @@ func (s *ConfigureShard) Validate() error { return nil } -// SetNewReplicaCount sets the NewReplicaCount field's value. -func (s *ConfigureShard) SetNewReplicaCount(v int64) *ConfigureShard { - s.NewReplicaCount = &v +// SetAccessString sets the AccessString field's value. +func (s *CreateUserInput) SetAccessString(v string) *CreateUserInput { + s.AccessString = &v return s } -// SetNodeGroupId sets the NodeGroupId field's value. -func (s *ConfigureShard) SetNodeGroupId(v string) *ConfigureShard { - s.NodeGroupId = &v +// SetEngine sets the Engine field's value. +func (s *CreateUserInput) SetEngine(v string) *CreateUserInput { + s.Engine = &v return s } -// SetPreferredAvailabilityZones sets the PreferredAvailabilityZones field's value. -func (s *ConfigureShard) SetPreferredAvailabilityZones(v []*string) *ConfigureShard { - s.PreferredAvailabilityZones = v +// SetNoPasswordRequired sets the NoPasswordRequired field's value. +func (s *CreateUserInput) SetNoPasswordRequired(v bool) *CreateUserInput { + s.NoPasswordRequired = &v return s } -// Represents the input of a CopySnapshotMessage operation. -type CopySnapshotInput struct { +// SetPasswords sets the Passwords field's value. +func (s *CreateUserInput) SetPasswords(v []*string) *CreateUserInput { + s.Passwords = v + return s +} + +// SetUserId sets the UserId field's value. +func (s *CreateUserInput) SetUserId(v string) *CreateUserInput { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *CreateUserInput) SetUserName(v string) *CreateUserInput { + s.UserName = &v + return s +} + +type CreateUserOutput struct { _ struct{} `type:"structure"` - // The ID of the KMS key used to encrypt the target snapshot. - KmsKeyId *string `type:"string"` + // The Amazon Resource Name (ARN) of the user account. + ARN *string `type:"string"` - // The name of an existing snapshot from which to make a copy. - // - // SourceSnapshotName is a required field - SourceSnapshotName *string `type:"string" required:"true"` + // Access permissions string used for this user account. + AccessString *string `type:"string"` - // The Amazon S3 bucket to which the snapshot is exported. This parameter is - // used only when exporting a snapshot for external access. - // - // When using this parameter to export a snapshot, be sure Amazon ElastiCache - // has the needed permissions to this S3 bucket. For more information, see Step - // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/backups-exporting.html#backups-exporting-grant-access) - // in the Amazon ElastiCache User Guide. - // - // For more information, see Exporting a Snapshot (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/Snapshots.Exporting.html) - // in the Amazon ElastiCache User Guide. - TargetBucket *string `type:"string"` + // Denotes whether the user requires a password to authenticate. + Authentication *Authentication `type:"structure"` - // A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot, - // therefore this name must be unique within its context - ElastiCache or an - // Amazon S3 bucket if exporting. - // - // TargetSnapshotName is a required field - TargetSnapshotName *string `type:"string" required:"true"` + // Must be Redis. + Engine *string `type:"string"` + + // Indicates the user status. Can be "active", "modifying" or "deleting". + Status *string `type:"string"` + + // Returns a list of the user group IDs the user belongs to. + UserGroupIds []*string `type:"list"` + + // The ID of the user. + UserId *string `type:"string"` + + // The username of the user. + UserName *string `type:"string"` } // String returns the string representation -func (s CopySnapshotInput) String() string { +func (s CreateUserOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopySnapshotInput) GoString() string { +func (s CreateUserOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CopySnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} - if s.SourceSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName")) - } - if s.TargetSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName")) - } +// SetARN sets the ARN field's value. +func (s *CreateUserOutput) SetARN(v string) *CreateUserOutput { + s.ARN = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAccessString sets the AccessString field's value. +func (s *CreateUserOutput) SetAccessString(v string) *CreateUserOutput { + s.AccessString = &v + return s } -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *CopySnapshotInput) SetKmsKeyId(v string) *CopySnapshotInput { - s.KmsKeyId = &v +// SetAuthentication sets the Authentication field's value. +func (s *CreateUserOutput) SetAuthentication(v *Authentication) *CreateUserOutput { + s.Authentication = v return s } -// SetSourceSnapshotName sets the SourceSnapshotName field's value. -func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput { - s.SourceSnapshotName = &v +// SetEngine sets the Engine field's value. +func (s *CreateUserOutput) SetEngine(v string) *CreateUserOutput { + s.Engine = &v return s } -// SetTargetBucket sets the TargetBucket field's value. -func (s *CopySnapshotInput) SetTargetBucket(v string) *CopySnapshotInput { - s.TargetBucket = &v +// SetStatus sets the Status field's value. +func (s *CreateUserOutput) SetStatus(v string) *CreateUserOutput { + s.Status = &v return s } -// SetTargetSnapshotName sets the TargetSnapshotName field's value. -func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput { - s.TargetSnapshotName = &v +// SetUserGroupIds sets the UserGroupIds field's value. +func (s *CreateUserOutput) SetUserGroupIds(v []*string) *CreateUserOutput { + s.UserGroupIds = v return s } -type CopySnapshotOutput struct { +// SetUserId sets the UserId field's value. +func (s *CreateUserOutput) SetUserId(v string) *CreateUserOutput { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *CreateUserOutput) SetUserName(v string) *CreateUserOutput { + s.UserName = &v + return s +} + +// The endpoint from which data should be migrated. +type CustomerNodeEndpoint struct { _ struct{} `type:"structure"` - // Represents a copy of an entire Redis cluster as of the time when the snapshot - // was taken. - Snapshot *Snapshot `type:"structure"` + // The address of the node endpoint + Address *string `type:"string"` + + // The port of the node endpoint + Port *int64 `type:"integer"` } // String returns the string representation -func (s CopySnapshotOutput) String() string { +func (s CustomerNodeEndpoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopySnapshotOutput) GoString() string { +func (s CustomerNodeEndpoint) GoString() string { return s.String() } -// SetSnapshot sets the Snapshot field's value. -func (s *CopySnapshotOutput) SetSnapshot(v *Snapshot) *CopySnapshotOutput { - s.Snapshot = v +// SetAddress sets the Address field's value. +func (s *CustomerNodeEndpoint) SetAddress(v string) *CustomerNodeEndpoint { + s.Address = &v return s } -// Represents the input of a CreateCacheCluster operation. -type CreateCacheClusterInput struct { - _ struct{} `type:"structure"` - - // Specifies whether the nodes in this Memcached cluster are created in a single - // Availability Zone or created across multiple Availability Zones in the cluster's - // region. - // - // This parameter is only supported for Memcached clusters. - // - // If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache - // assumes single-az mode. - AZMode *string `type:"string" enum:"AZMode"` - - // Reserved parameter. The password used to access a password protected server. - // - // Password constraints: - // - // * Must be only printable ASCII characters. - // - // * Must be at least 16 characters and no more than 128 characters in length. - // - // * The only permitted printable special characters are !, &, #, $, ^, <, - // >, and -. Other printable special characters cannot be used in the AUTH - // token. - // - // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // http://redis.io/commands/AUTH. - AuthToken *string `type:"string"` - - // This parameter is currently disabled. - AutoMinorVersionUpgrade *bool `type:"boolean"` - - // The node group (shard) identifier. This parameter is stored as a lowercase - // string. - // - // Constraints: - // - // * A name must contain from 1 to 50 alphanumeric characters or hyphens. - // - // * The first character must be a letter. - // - // * A name cannot end with a hyphen or contain two consecutive hyphens. - // - // CacheClusterId is a required field - CacheClusterId *string `type:"string" required:"true"` - - // The compute and memory capacity of the nodes in the node group (shard). - // - // The following node types are supported by ElastiCache. Generally speaking, - // the current generation types provide more memory and computational power - // at lower cost when compared to their equivalent previous generation counterparts. - // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge - // - // * Compute optimized: Previous generation: (not recommended) C1 node types: - // cache.c1.xlarge - // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge - // - // Additional node type info - // - // * All current generation instance types are created in Amazon VPC by default. - // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. - // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. - // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. - CacheNodeType *string `type:"string"` +// SetPort sets the Port field's value. +func (s *CustomerNodeEndpoint) SetPort(v int64) *CustomerNodeEndpoint { + s.Port = &v + return s +} - // The name of the parameter group to associate with this cluster. If this argument - // is omitted, the default parameter group for the specified engine is used. - // You cannot use any parameter group which has cluster-enabled='yes' when creating - // a cluster. - CacheParameterGroupName *string `type:"string"` +type DecreaseNodeGroupsInGlobalReplicationGroupInput struct { + _ struct{} `type:"structure"` - // A list of security group names to associate with this cluster. + // Indicates that the shard reconfiguration process begins immediately. At present, + // the only permitted value for this parameter is true. // - // Use this parameter only when you are creating a cluster outside of an Amazon - // Virtual Private Cloud (Amazon VPC). - CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + // ApplyImmediately is a required field + ApplyImmediately *bool `type:"boolean" required:"true"` - // The name of the subnet group to be used for the cluster. - // - // Use this parameter only when you are creating a cluster in an Amazon Virtual - // Private Cloud (Amazon VPC). - // - // If you're going to launch your cluster in an Amazon VPC, you need to create - // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). - CacheSubnetGroupName *string `type:"string"` + // If the value of NodeGroupCount is less than the current number of node groups + // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. + // NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. + // ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove + // from the cluster. + GlobalNodeGroupsToRemove []*string `locationNameList:"GlobalNodeGroupId" type:"list"` - // The name of the cache engine to be used for this cluster. - // - // Valid values for this parameter are: memcached | redis - Engine *string `type:"string"` + // If the value of NodeGroupCount is less than the current number of node groups + // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. + // NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. + // ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove + // from the cluster. + GlobalNodeGroupsToRetain []*string `locationNameList:"GlobalNodeGroupId" type:"list"` - // The version number of the cache engine to be used for this cluster. To view - // the supported cache engine versions, use the DescribeCacheEngineVersions - // operation. + // The name of the Global Datastore // - // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)), - // but you cannot downgrade to an earlier engine version. If you want to use - // an earlier engine version, you must delete the existing cluster or replication - // group and create it anew with the earlier engine version. - EngineVersion *string `type:"string"` + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` - // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service - // (SNS) topic to which notifications are sent. + // The number of node groups (shards) that results from the modification of + // the shard configuration // - // The Amazon SNS topic owner must be the same as the cluster owner. - NotificationTopicArn *string `type:"string"` + // NodeGroupCount is a required field + NodeGroupCount *int64 `type:"integer" required:"true"` +} - // The initial number of cache nodes that the cluster has. - // - // For clusters running Redis, this value must be 1. For clusters running Memcached, - // this value must be between 1 and 20. - // - // If you need more than 20 nodes for your Memcached cluster, please fill out - // the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/ - // (http://aws.amazon.com/contact-us/elasticache-node-limit-request/). - NumCacheNodes *int64 `type:"integer"` +// String returns the string representation +func (s DecreaseNodeGroupsInGlobalReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} - // The port number on which each of the cache nodes accepts connections. - Port *int64 `type:"integer"` +// GoString returns the string representation +func (s DecreaseNodeGroupsInGlobalReplicationGroupInput) GoString() string { + return s.String() +} - // The EC2 Availability Zone in which the cluster is created. - // - // All nodes belonging to this Memcached cluster are placed in the preferred - // Availability Zone. If you want to create your nodes across multiple Availability - // Zones, use PreferredAvailabilityZones. - // - // Default: System chosen Availability Zone. - PreferredAvailabilityZone *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecreaseNodeGroupsInGlobalReplicationGroupInput"} + if s.ApplyImmediately == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) + } + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + if s.NodeGroupCount == nil { + invalidParams.Add(request.NewErrParamRequired("NodeGroupCount")) + } - // A list of the Availability Zones in which cache nodes are created. The order - // of the zones in the list is not important. - // - // This option is only supported on Memcached. - // - // If you are creating your cluster in an Amazon VPC (recommended) you can only - // locate nodes in Availability Zones that are associated with the subnets in - // the selected subnet group. - // - // The number of Availability Zones listed must equal the value of NumCacheNodes. - // - // If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone - // instead, or repeat the Availability Zone multiple times in the list. - // - // Default: System chosen Availability Zones. - PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - // values for ddd are: - // - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. - // - // Valid values for ddd are: - // - // * sun - // - // * mon - // - // * tue - // - // * wed - // - // * thu - // - // * fri - // - // * sat - // - // Example: sun:23:00-mon:01:30 - PreferredMaintenanceWindow *string `type:"string"` +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupInput) SetApplyImmediately(v bool) *DecreaseNodeGroupsInGlobalReplicationGroupInput { + s.ApplyImmediately = &v + return s +} - // The ID of the replication group to which this cluster should belong. If this - // parameter is specified, the cluster is added to the specified replication - // group as a read replica; otherwise, the cluster is a standalone primary that - // is not part of any replication group. - // - // If the specified replication group is Multi-AZ enabled and the Availability - // Zone is not specified, the cluster is created in Availability Zones that - // provide the best spread of read replicas across Availability Zones. - // - // This parameter is only valid if the Engine parameter is redis. - ReplicationGroupId *string `type:"string"` +// SetGlobalNodeGroupsToRemove sets the GlobalNodeGroupsToRemove field's value. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupInput) SetGlobalNodeGroupsToRemove(v []*string) *DecreaseNodeGroupsInGlobalReplicationGroupInput { + s.GlobalNodeGroupsToRemove = v + return s +} - // One or more VPC security groups associated with the cluster. - // - // Use this parameter only when you are creating a cluster in an Amazon Virtual - // Private Cloud (Amazon VPC). - SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` +// SetGlobalNodeGroupsToRetain sets the GlobalNodeGroupsToRetain field's value. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupInput) SetGlobalNodeGroupsToRetain(v []*string) *DecreaseNodeGroupsInGlobalReplicationGroupInput { + s.GlobalNodeGroupsToRetain = v + return s +} - // A single-element string list containing an Amazon Resource Name (ARN) that - // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot - // file is used to populate the node group (shard). The Amazon S3 object name - // in the ARN cannot contain any commas. - // - // This parameter is only valid if the Engine parameter is redis. - // - // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb - SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *DecreaseNodeGroupsInGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v + return s +} - // The name of a Redis snapshot from which to restore data into the new node - // group (shard). The snapshot status changes to restoring while the new node - // group (shard) is being created. - // - // This parameter is only valid if the Engine parameter is redis. - SnapshotName *string `type:"string"` +// SetNodeGroupCount sets the NodeGroupCount field's value. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupInput) SetNodeGroupCount(v int64) *DecreaseNodeGroupsInGlobalReplicationGroupInput { + s.NodeGroupCount = &v + return s +} - // The number of days for which ElastiCache retains automatic snapshots before - // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot - // taken today is retained for 5 days before being deleted. +type DecreaseNodeGroupsInGlobalReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. // - // This parameter is only valid if the Engine parameter is redis. + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s DecreaseNodeGroupsInGlobalReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecreaseNodeGroupsInGlobalReplicationGroupOutput) GoString() string { + return s.String() +} + +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *DecreaseNodeGroupsInGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *DecreaseNodeGroupsInGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v + return s +} + +type DecreaseReplicaCountInput struct { + _ struct{} `type:"structure"` + + // If True, the number of replica nodes is decreased immediately. ApplyImmediately=False + // is not currently supported. // - // Default: 0 (i.e., automatic backups are disabled for this cache cluster). - SnapshotRetentionLimit *int64 `type:"integer"` + // ApplyImmediately is a required field + ApplyImmediately *bool `type:"boolean" required:"true"` - // The daily time range (in UTC) during which ElastiCache begins taking a daily - // snapshot of your node group (shard). + // The number of read replica nodes you want at the completion of this operation. + // For Redis (cluster mode disabled) replication groups, this is the number + // of replica nodes in the replication group. For Redis (cluster mode enabled) + // replication groups, this is the number of replica nodes in each of the replication + // group's node groups. // - // Example: 05:00-09:00 + // The minimum number of replicas in a shard or replication group is: // - // If you do not specify this parameter, ElastiCache automatically chooses an - // appropriate time range. + // * Redis (cluster mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ + // is not enabled: 0 // - // This parameter is only valid if the Engine parameter is redis. - SnapshotWindow *string `type:"string"` + // * Redis (cluster mode enabled): 0 (though you will not be able to failover + // to a replica if your primary node fails) + NewReplicaCount *int64 `type:"integer"` - // A list of cost allocation tags to be added to this resource. - Tags []*Tag `locationNameList:"Tag" type:"list"` + // A list of ConfigureShard objects that can be used to configure each shard + // in a Redis (cluster mode enabled) replication group. The ConfigureShard has + // three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. + ReplicaConfiguration []*ConfigureShard `locationNameList:"ConfigureShard" type:"list"` + + // A list of the node ids to remove from the replication group or node group + // (shard). + ReplicasToRemove []*string `type:"list"` + + // The id of the replication group from which you want to remove replica nodes. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateCacheClusterInput) String() string { +func (s DecreaseReplicaCountInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheClusterInput) GoString() string { +func (s DecreaseReplicaCountInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCacheClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCacheClusterInput"} - if s.CacheClusterId == nil { - invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) +func (s *DecreaseReplicaCountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecreaseReplicaCountInput"} + if s.ApplyImmediately == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + if s.ReplicaConfiguration != nil { + for i, v := range s.ReplicaConfiguration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaConfiguration", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -7519,145 +11157,111 @@ func (s *CreateCacheClusterInput) Validate() error { return nil } -// SetAZMode sets the AZMode field's value. -func (s *CreateCacheClusterInput) SetAZMode(v string) *CreateCacheClusterInput { - s.AZMode = &v - return s -} - -// SetAuthToken sets the AuthToken field's value. -func (s *CreateCacheClusterInput) SetAuthToken(v string) *CreateCacheClusterInput { - s.AuthToken = &v - return s -} - -// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. -func (s *CreateCacheClusterInput) SetAutoMinorVersionUpgrade(v bool) *CreateCacheClusterInput { - s.AutoMinorVersionUpgrade = &v - return s -} - -// SetCacheClusterId sets the CacheClusterId field's value. -func (s *CreateCacheClusterInput) SetCacheClusterId(v string) *CreateCacheClusterInput { - s.CacheClusterId = &v - return s -} - -// SetCacheNodeType sets the CacheNodeType field's value. -func (s *CreateCacheClusterInput) SetCacheNodeType(v string) *CreateCacheClusterInput { - s.CacheNodeType = &v +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *DecreaseReplicaCountInput) SetApplyImmediately(v bool) *DecreaseReplicaCountInput { + s.ApplyImmediately = &v return s } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CreateCacheClusterInput) SetCacheParameterGroupName(v string) *CreateCacheClusterInput { - s.CacheParameterGroupName = &v +// SetNewReplicaCount sets the NewReplicaCount field's value. +func (s *DecreaseReplicaCountInput) SetNewReplicaCount(v int64) *DecreaseReplicaCountInput { + s.NewReplicaCount = &v return s } -// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value. -func (s *CreateCacheClusterInput) SetCacheSecurityGroupNames(v []*string) *CreateCacheClusterInput { - s.CacheSecurityGroupNames = v +// SetReplicaConfiguration sets the ReplicaConfiguration field's value. +func (s *DecreaseReplicaCountInput) SetReplicaConfiguration(v []*ConfigureShard) *DecreaseReplicaCountInput { + s.ReplicaConfiguration = v return s } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *CreateCacheClusterInput) SetCacheSubnetGroupName(v string) *CreateCacheClusterInput { - s.CacheSubnetGroupName = &v +// SetReplicasToRemove sets the ReplicasToRemove field's value. +func (s *DecreaseReplicaCountInput) SetReplicasToRemove(v []*string) *DecreaseReplicaCountInput { + s.ReplicasToRemove = v return s } -// SetEngine sets the Engine field's value. -func (s *CreateCacheClusterInput) SetEngine(v string) *CreateCacheClusterInput { - s.Engine = &v +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *DecreaseReplicaCountInput) SetReplicationGroupId(v string) *DecreaseReplicaCountInput { + s.ReplicationGroupId = &v return s } -// SetEngineVersion sets the EngineVersion field's value. -func (s *CreateCacheClusterInput) SetEngineVersion(v string) *CreateCacheClusterInput { - s.EngineVersion = &v - return s -} +type DecreaseReplicaCountOutput struct { + _ struct{} `type:"structure"` -// SetNotificationTopicArn sets the NotificationTopicArn field's value. -func (s *CreateCacheClusterInput) SetNotificationTopicArn(v string) *CreateCacheClusterInput { - s.NotificationTopicArn = &v - return s + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` } -// SetNumCacheNodes sets the NumCacheNodes field's value. -func (s *CreateCacheClusterInput) SetNumCacheNodes(v int64) *CreateCacheClusterInput { - s.NumCacheNodes = &v - return s +// String returns the string representation +func (s DecreaseReplicaCountOutput) String() string { + return awsutil.Prettify(s) } -// SetPort sets the Port field's value. -func (s *CreateCacheClusterInput) SetPort(v int64) *CreateCacheClusterInput { - s.Port = &v - return s +// GoString returns the string representation +func (s DecreaseReplicaCountOutput) GoString() string { + return s.String() } -// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value. -func (s *CreateCacheClusterInput) SetPreferredAvailabilityZone(v string) *CreateCacheClusterInput { - s.PreferredAvailabilityZone = &v +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *DecreaseReplicaCountOutput) SetReplicationGroup(v *ReplicationGroup) *DecreaseReplicaCountOutput { + s.ReplicationGroup = v return s } -// SetPreferredAvailabilityZones sets the PreferredAvailabilityZones field's value. -func (s *CreateCacheClusterInput) SetPreferredAvailabilityZones(v []*string) *CreateCacheClusterInput { - s.PreferredAvailabilityZones = v - return s -} +// Represents the input of a DeleteCacheCluster operation. +type DeleteCacheClusterInput struct { + _ struct{} `type:"structure"` -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *CreateCacheClusterInput) SetPreferredMaintenanceWindow(v string) *CreateCacheClusterInput { - s.PreferredMaintenanceWindow = &v - return s -} + // The cluster identifier for the cluster to be deleted. This parameter is not + // case sensitive. + // + // CacheClusterId is a required field + CacheClusterId *string `type:"string" required:"true"` -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *CreateCacheClusterInput) SetReplicationGroupId(v string) *CreateCacheClusterInput { - s.ReplicationGroupId = &v - return s + // The user-supplied name of a final cluster snapshot. This is the unique name + // that identifies the snapshot. ElastiCache creates the snapshot, and then + // deletes the cluster immediately afterward. + FinalSnapshotIdentifier *string `type:"string"` } -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateCacheClusterInput) SetSecurityGroupIds(v []*string) *CreateCacheClusterInput { - s.SecurityGroupIds = v - return s +// String returns the string representation +func (s DeleteCacheClusterInput) String() string { + return awsutil.Prettify(s) } -// SetSnapshotArns sets the SnapshotArns field's value. -func (s *CreateCacheClusterInput) SetSnapshotArns(v []*string) *CreateCacheClusterInput { - s.SnapshotArns = v - return s +// GoString returns the string representation +func (s DeleteCacheClusterInput) GoString() string { + return s.String() } -// SetSnapshotName sets the SnapshotName field's value. -func (s *CreateCacheClusterInput) SetSnapshotName(v string) *CreateCacheClusterInput { - s.SnapshotName = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCacheClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheClusterInput"} + if s.CacheClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) + } -// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value. -func (s *CreateCacheClusterInput) SetSnapshotRetentionLimit(v int64) *CreateCacheClusterInput { - s.SnapshotRetentionLimit = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSnapshotWindow sets the SnapshotWindow field's value. -func (s *CreateCacheClusterInput) SetSnapshotWindow(v string) *CreateCacheClusterInput { - s.SnapshotWindow = &v +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *DeleteCacheClusterInput) SetCacheClusterId(v string) *DeleteCacheClusterInput { + s.CacheClusterId = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateCacheClusterInput) SetTags(v []*Tag) *CreateCacheClusterInput { - s.Tags = v +// SetFinalSnapshotIdentifier sets the FinalSnapshotIdentifier field's value. +func (s *DeleteCacheClusterInput) SetFinalSnapshotIdentifier(v string) *DeleteCacheClusterInput { + s.FinalSnapshotIdentifier = &v return s } -type CreateCacheClusterOutput struct { +type DeleteCacheClusterOutput struct { _ struct{} `type:"structure"` // Contains all of the attributes of a specific cluster. @@ -7665,67 +11269,49 @@ type CreateCacheClusterOutput struct { } // String returns the string representation -func (s CreateCacheClusterOutput) String() string { +func (s DeleteCacheClusterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheClusterOutput) GoString() string { +func (s DeleteCacheClusterOutput) GoString() string { return s.String() } // SetCacheCluster sets the CacheCluster field's value. -func (s *CreateCacheClusterOutput) SetCacheCluster(v *CacheCluster) *CreateCacheClusterOutput { +func (s *DeleteCacheClusterOutput) SetCacheCluster(v *CacheCluster) *DeleteCacheClusterOutput { s.CacheCluster = v return s } -// Represents the input of a CreateCacheParameterGroup operation. -type CreateCacheParameterGroupInput struct { +// Represents the input of a DeleteCacheParameterGroup operation. +type DeleteCacheParameterGroupInput struct { _ struct{} `type:"structure"` - // The name of the cache parameter group family that the cache parameter group - // can be used with. - // - // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | + // The name of the cache parameter group to delete. // - // CacheParameterGroupFamily is a required field - CacheParameterGroupFamily *string `type:"string" required:"true"` - - // A user-specified name for the cache parameter group. + // The specified cache security group must not be associated with any clusters. // // CacheParameterGroupName is a required field CacheParameterGroupName *string `type:"string" required:"true"` - - // A user-specified description for the cache parameter group. - // - // Description is a required field - Description *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateCacheParameterGroupInput) String() string { +func (s DeleteCacheParameterGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheParameterGroupInput) GoString() string { +func (s DeleteCacheParameterGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCacheParameterGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCacheParameterGroupInput"} - if s.CacheParameterGroupFamily == nil { - invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily")) - } +func (s *DeleteCacheParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheParameterGroupInput"} if s.CacheParameterGroupName == nil { invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) } - if s.Description == nil { - invalidParams.Add(request.NewErrParamRequired("Description")) - } if invalidParams.Len() > 0 { return invalidParams @@ -7733,87 +11319,54 @@ func (s *CreateCacheParameterGroupInput) Validate() error { return nil } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *CreateCacheParameterGroupInput) SetCacheParameterGroupFamily(v string) *CreateCacheParameterGroupInput { - s.CacheParameterGroupFamily = &v - return s -} - // SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CreateCacheParameterGroupInput) SetCacheParameterGroupName(v string) *CreateCacheParameterGroupInput { +func (s *DeleteCacheParameterGroupInput) SetCacheParameterGroupName(v string) *DeleteCacheParameterGroupInput { s.CacheParameterGroupName = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateCacheParameterGroupInput) SetDescription(v string) *CreateCacheParameterGroupInput { - s.Description = &v - return s -} - -type CreateCacheParameterGroupOutput struct { +type DeleteCacheParameterGroupOutput struct { _ struct{} `type:"structure"` - - // Represents the output of a CreateCacheParameterGroup operation. - CacheParameterGroup *CacheParameterGroup `type:"structure"` } // String returns the string representation -func (s CreateCacheParameterGroupOutput) String() string { +func (s DeleteCacheParameterGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheParameterGroupOutput) GoString() string { +func (s DeleteCacheParameterGroupOutput) GoString() string { return s.String() } -// SetCacheParameterGroup sets the CacheParameterGroup field's value. -func (s *CreateCacheParameterGroupOutput) SetCacheParameterGroup(v *CacheParameterGroup) *CreateCacheParameterGroupOutput { - s.CacheParameterGroup = v - return s -} - -// Represents the input of a CreateCacheSecurityGroup operation. -type CreateCacheSecurityGroupInput struct { +// Represents the input of a DeleteCacheSecurityGroup operation. +type DeleteCacheSecurityGroupInput struct { _ struct{} `type:"structure"` - // A name for the cache security group. This value is stored as a lowercase - // string. - // - // Constraints: Must contain no more than 255 alphanumeric characters. Cannot - // be the word "Default". + // The name of the cache security group to delete. // - // Example: mysecuritygroup + // You cannot delete the default security group. // // CacheSecurityGroupName is a required field CacheSecurityGroupName *string `type:"string" required:"true"` - - // A description for the cache security group. - // - // Description is a required field - Description *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateCacheSecurityGroupInput) String() string { +func (s DeleteCacheSecurityGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheSecurityGroupInput) GoString() string { +func (s DeleteCacheSecurityGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCacheSecurityGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCacheSecurityGroupInput"} +func (s *DeleteCacheSecurityGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSecurityGroupInput"} if s.CacheSecurityGroupName == nil { invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) } - if s.Description == nil { - invalidParams.Add(request.NewErrParamRequired("Description")) - } if invalidParams.Len() > 0 { return invalidParams @@ -7822,92 +11375,53 @@ func (s *CreateCacheSecurityGroupInput) Validate() error { } // SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. -func (s *CreateCacheSecurityGroupInput) SetCacheSecurityGroupName(v string) *CreateCacheSecurityGroupInput { +func (s *DeleteCacheSecurityGroupInput) SetCacheSecurityGroupName(v string) *DeleteCacheSecurityGroupInput { s.CacheSecurityGroupName = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateCacheSecurityGroupInput) SetDescription(v string) *CreateCacheSecurityGroupInput { - s.Description = &v - return s -} - -type CreateCacheSecurityGroupOutput struct { +type DeleteCacheSecurityGroupOutput struct { _ struct{} `type:"structure"` - - // Represents the output of one of the following operations: - // - // * AuthorizeCacheSecurityGroupIngress - // - // * CreateCacheSecurityGroup - // - // * RevokeCacheSecurityGroupIngress - CacheSecurityGroup *CacheSecurityGroup `type:"structure"` } // String returns the string representation -func (s CreateCacheSecurityGroupOutput) String() string { +func (s DeleteCacheSecurityGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheSecurityGroupOutput) GoString() string { +func (s DeleteCacheSecurityGroupOutput) GoString() string { return s.String() } -// SetCacheSecurityGroup sets the CacheSecurityGroup field's value. -func (s *CreateCacheSecurityGroupOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *CreateCacheSecurityGroupOutput { - s.CacheSecurityGroup = v - return s -} - -// Represents the input of a CreateCacheSubnetGroup operation. -type CreateCacheSubnetGroupInput struct { +// Represents the input of a DeleteCacheSubnetGroup operation. +type DeleteCacheSubnetGroupInput struct { _ struct{} `type:"structure"` - // A description for the cache subnet group. - // - // CacheSubnetGroupDescription is a required field - CacheSubnetGroupDescription *string `type:"string" required:"true"` - - // A name for the cache subnet group. This value is stored as a lowercase string. + // The name of the cache subnet group to delete. // // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. // - // Example: mysubnetgroup - // // CacheSubnetGroupName is a required field CacheSubnetGroupName *string `type:"string" required:"true"` - - // A list of VPC subnet IDs for the cache subnet group. - // - // SubnetIds is a required field - SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"` } // String returns the string representation -func (s CreateCacheSubnetGroupInput) String() string { +func (s DeleteCacheSubnetGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheSubnetGroupInput) GoString() string { +func (s DeleteCacheSubnetGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCacheSubnetGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCacheSubnetGroupInput"} - if s.CacheSubnetGroupDescription == nil { - invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupDescription")) - } +func (s *DeleteCacheSubnetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSubnetGroupInput"} if s.CacheSubnetGroupName == nil { invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName")) } - if s.SubnetIds == nil { - invalidParams.Add(request.NewErrParamRequired("SubnetIds")) - } if invalidParams.Len() > 0 { return invalidParams @@ -7915,406 +11429,279 @@ func (s *CreateCacheSubnetGroupInput) Validate() error { return nil } -// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value. -func (s *CreateCacheSubnetGroupInput) SetCacheSubnetGroupDescription(v string) *CreateCacheSubnetGroupInput { - s.CacheSubnetGroupDescription = &v - return s -} - // SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *CreateCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *CreateCacheSubnetGroupInput { +func (s *DeleteCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *DeleteCacheSubnetGroupInput { s.CacheSubnetGroupName = &v return s } -// SetSubnetIds sets the SubnetIds field's value. -func (s *CreateCacheSubnetGroupInput) SetSubnetIds(v []*string) *CreateCacheSubnetGroupInput { - s.SubnetIds = v - return s +type DeleteCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` } -type CreateCacheSubnetGroupOutput struct { +// String returns the string representation +func (s DeleteCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +type DeleteGlobalReplicationGroupInput struct { _ struct{} `type:"structure"` - // Represents the output of one of the following operations: + // The name of the Global Datastore // - // * CreateCacheSubnetGroup + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` + + // The primary replication group is retained as a standalone replication group. // - // * ModifyCacheSubnetGroup - CacheSubnetGroup *CacheSubnetGroup `type:"structure"` + // RetainPrimaryReplicationGroup is a required field + RetainPrimaryReplicationGroup *bool `type:"boolean" required:"true"` } // String returns the string representation -func (s CreateCacheSubnetGroupOutput) String() string { +func (s DeleteGlobalReplicationGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCacheSubnetGroupOutput) GoString() string { +func (s DeleteGlobalReplicationGroupInput) GoString() string { return s.String() } -// SetCacheSubnetGroup sets the CacheSubnetGroup field's value. -func (s *CreateCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) *CreateCacheSubnetGroupOutput { - s.CacheSubnetGroup = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalReplicationGroupInput"} + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + if s.RetainPrimaryReplicationGroup == nil { + invalidParams.Add(request.NewErrParamRequired("RetainPrimaryReplicationGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *DeleteGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *DeleteGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v return s } -// Represents the input of a CreateReplicationGroup operation. -type CreateReplicationGroupInput struct { +// SetRetainPrimaryReplicationGroup sets the RetainPrimaryReplicationGroup field's value. +func (s *DeleteGlobalReplicationGroupInput) SetRetainPrimaryReplicationGroup(v bool) *DeleteGlobalReplicationGroupInput { + s.RetainPrimaryReplicationGroup = &v + return s +} + +type DeleteGlobalReplicationGroupOutput struct { _ struct{} `type:"structure"` - // A flag that enables encryption at rest when set to true. - // - // You cannot modify the value of AtRestEncryptionEnabled after the replication - // group is created. To enable encryption at rest on a replication group you - // must set AtRestEncryptionEnabled to true when you create the replication - // group. - // - // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. // - // Default: false - AtRestEncryptionEnabled *bool `type:"boolean"` + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` +} - // Reserved parameter. The password used to access a password protected server. - // - // AuthToken can be specified only on replication groups where TransitEncryptionEnabled - // is true. - // - // For HIPAA compliance, you must specify TransitEncryptionEnabled as true, - // an AuthToken, and a CacheSubnetGroup. - // - // Password constraints: - // - // * Must be only printable ASCII characters. - // - // * Must be at least 16 characters and no more than 128 characters in length. - // - // * The only permitted printable special characters are !, &, #, $, ^, <, - // >, and -. Other printable special characters cannot be used in the AUTH - // token. - // - // For more information, see AUTH password (http://redis.io/commands/AUTH) at - // http://redis.io/commands/AUTH. - AuthToken *string `type:"string"` +// String returns the string representation +func (s DeleteGlobalReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} - // This parameter is currently disabled. - AutoMinorVersionUpgrade *bool `type:"boolean"` +// GoString returns the string representation +func (s DeleteGlobalReplicationGroupOutput) GoString() string { + return s.String() +} - // Specifies whether a read-only replica is automatically promoted to read/write - // primary if the existing primary fails. - // - // If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ - // is disabled for this replication group. - // - // AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled) - // replication groups. - // - // Default: false - // - // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover - // on: - // - // * Redis versions earlier than 2.8.6. - // - // * Redis (cluster mode disabled): T1 node types. - // - // * Redis (cluster mode enabled): T1 node types. - AutomaticFailoverEnabled *bool `type:"boolean"` +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *DeleteGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *DeleteGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v + return s +} - // The compute and memory capacity of the nodes in the node group (shard). - // - // The following node types are supported by ElastiCache. Generally speaking, - // the current generation types provide more memory and computational power - // at lower cost when compared to their equivalent previous generation counterparts. - // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge - // - // * Compute optimized: Previous generation: (not recommended) C1 node types: - // cache.c1.xlarge - // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge - // - // Additional node type info - // - // * All current generation instance types are created in Amazon VPC by default. - // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. - // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. - // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. - CacheNodeType *string `type:"string"` +// Represents the input of a DeleteReplicationGroup operation. +type DeleteReplicationGroupInput struct { + _ struct{} `type:"structure"` - // The name of the parameter group to associate with this replication group. - // If this argument is omitted, the default cache parameter group for the specified - // engine is used. - // - // If you are restoring to an engine version that is different than the original, - // you must specify the default version of that version. For example, CacheParameterGroupName=default.redis4.0. - // - // If you are running Redis version 3.2.4 or later, only one node group (shard), - // and want to use a default parameter group, we recommend that you specify - // the parameter group by name. - // - // * To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2. + // The name of a final node group (shard) snapshot. ElastiCache creates the + // snapshot from the primary node in the cluster, rather than one of the replicas; + // this is to ensure that it captures the freshest data. After the final snapshot + // is taken, the replication group is immediately deleted. + FinalSnapshotIdentifier *string `type:"string"` + + // The identifier for the cluster to be deleted. This parameter is not case + // sensitive. // - // * To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. - CacheParameterGroupName *string `type:"string"` + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` - // A list of cache security group names to associate with this replication group. - CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"` + // If set to true, all of the read replicas are deleted, but the primary node + // is retained. + RetainPrimaryCluster *bool `type:"boolean"` +} - // The name of the cache subnet group to be used for the replication group. - // - // If you're going to launch your cluster in an Amazon VPC, you need to create - // a subnet group before you start creating a cluster. For more information, - // see Subnets and Subnet Groups (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SubnetGroups.html). - CacheSubnetGroupName *string `type:"string"` +// String returns the string representation +func (s DeleteReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} - // The name of the cache engine to be used for the clusters in this replication - // group. - Engine *string `type:"string"` +// GoString returns the string representation +func (s DeleteReplicationGroupInput) GoString() string { + return s.String() +} - // The version number of the cache engine to be used for the clusters in this - // replication group. To view the supported cache engine versions, use the DescribeCacheEngineVersions - // operation. - // - // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/SelectEngine.html#VersionManagement)) - // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine - // version. If you want to use an earlier engine version, you must delete the - // existing cluster or replication group and create it anew with the earlier - // engine version. - EngineVersion *string `type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } - // The ID of the KMS key used to encrypt the disk on the cluster. - KmsKeyId *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // A list of node group (shard) configuration options. Each node group (shard) - // configuration has the following members: PrimaryAvailabilityZone, ReplicaAvailabilityZones, - // ReplicaCount, and Slots. - // - // If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode - // enabled) replication group, you can use this parameter to individually configure - // each node group (shard), or you can omit this parameter. However, when seeding - // a Redis (cluster mode enabled) cluster from a S3 rdb file, you must configure - // each node group (shard) using this parameter because you must specify the - // slots for each node group. - NodeGroupConfiguration []*NodeGroupConfiguration `locationNameList:"NodeGroupConfiguration" type:"list"` +// SetFinalSnapshotIdentifier sets the FinalSnapshotIdentifier field's value. +func (s *DeleteReplicationGroupInput) SetFinalSnapshotIdentifier(v string) *DeleteReplicationGroupInput { + s.FinalSnapshotIdentifier = &v + return s +} - // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service - // (SNS) topic to which notifications are sent. - // - // The Amazon SNS topic owner must be the same as the cluster owner. - NotificationTopicArn *string `type:"string"` +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *DeleteReplicationGroupInput) SetReplicationGroupId(v string) *DeleteReplicationGroupInput { + s.ReplicationGroupId = &v + return s +} - // The number of clusters this replication group initially has. - // - // This parameter is not used if there is more than one node group (shard). - // You should use ReplicasPerNodeGroup instead. - // - // If AutomaticFailoverEnabled is true, the value of this parameter must be - // at least 2. If AutomaticFailoverEnabled is false you can omit this parameter - // (it will default to 1), or you can explicitly set it to a value between 2 - // and 6. - // - // The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). - NumCacheClusters *int64 `type:"integer"` +// SetRetainPrimaryCluster sets the RetainPrimaryCluster field's value. +func (s *DeleteReplicationGroupInput) SetRetainPrimaryCluster(v bool) *DeleteReplicationGroupInput { + s.RetainPrimaryCluster = &v + return s +} - // An optional parameter that specifies the number of node groups (shards) for - // this Redis (cluster mode enabled) replication group. For Redis (cluster mode - // disabled) either omit this parameter or set it to 1. - // - // Default: 1 - NumNodeGroups *int64 `type:"integer"` +type DeleteReplicationGroupOutput struct { + _ struct{} `type:"structure"` - // The port number on which each member of the replication group accepts connections. - Port *int64 `type:"integer"` + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} - // A list of EC2 Availability Zones in which the replication group's clusters - // are created. The order of the Availability Zones in the list is the order - // in which clusters are allocated. The primary cluster is created in the first - // AZ in the list. - // - // This parameter is not used if there is more than one node group (shard). - // You should use NodeGroupConfiguration instead. - // - // If you are creating your replication group in an Amazon VPC (recommended), - // you can only locate clusters in Availability Zones associated with the subnets - // in the selected subnet group. - // - // The number of Availability Zones listed must equal the value of NumCacheClusters. - // - // Default: system chosen Availability Zones. - PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"` +// String returns the string representation +func (s DeleteReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid - // values for ddd are: - // - // Specifies the weekly time range during which maintenance on the cluster is - // performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi - // (24H Clock UTC). The minimum maintenance window is a 60 minute period. - // - // Valid values for ddd are: - // - // * sun - // - // * mon - // - // * tue - // - // * wed - // - // * thu - // - // * fri - // - // * sat - // - // Example: sun:23:00-mon:01:30 - PreferredMaintenanceWindow *string `type:"string"` +// GoString returns the string representation +func (s DeleteReplicationGroupOutput) GoString() string { + return s.String() +} - // The identifier of the cluster that serves as the primary for this replication - // group. This cluster must already exist and have a status of available. - // - // This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup - // is specified. - PrimaryClusterId *string `type:"string"` +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *DeleteReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *DeleteReplicationGroupOutput { + s.ReplicationGroup = v + return s +} - // An optional parameter that specifies the number of replica nodes in each - // node group (shard). Valid values are 0 to 5. - ReplicasPerNodeGroup *int64 `type:"integer"` +// Represents the input of a DeleteSnapshot operation. +type DeleteSnapshotInput struct { + _ struct{} `type:"structure"` - // A user-created description for the replication group. + // The name of the snapshot to be deleted. // - // ReplicationGroupDescription is a required field - ReplicationGroupDescription *string `type:"string" required:"true"` + // SnapshotName is a required field + SnapshotName *string `type:"string" required:"true"` +} - // The replication group identifier. This parameter is stored as a lowercase - // string. - // - // Constraints: - // - // * A name must contain from 1 to 40 alphanumeric characters or hyphens. - // - // * The first character must be a letter. - // - // * A name cannot end with a hyphen or contain two consecutive hyphens. - // - // ReplicationGroupId is a required field - ReplicationGroupId *string `type:"string" required:"true"` +// String returns the string representation +func (s DeleteSnapshotInput) String() string { + return awsutil.Prettify(s) +} - // One or more Amazon VPC security groups associated with this replication group. - // - // Use this parameter only when you are creating a replication group in an Amazon - // Virtual Private Cloud (Amazon VPC). - SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"` +// GoString returns the string representation +func (s DeleteSnapshotInput) GoString() string { + return s.String() +} - // A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB - // snapshot files stored in Amazon S3. The snapshot files are used to populate - // the new replication group. The Amazon S3 object name in the ARN cannot contain - // any commas. The new replication group will have the number of node groups - // (console: shards) specified by the parameter NumNodeGroups or the number - // of node groups configured by NodeGroupConfiguration regardless of the number - // of ARNs specified here. - // - // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb - SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"} + if s.SnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SnapshotName")) + } - // The name of a snapshot from which to restore data into the new replication - // group. The snapshot status changes to restoring while the new replication - // group is being created. - SnapshotName *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The number of days for which ElastiCache retains automatic snapshots before - // deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot - // that was taken today is retained for 5 days before being deleted. - // - // Default: 0 (i.e., automatic backups are disabled for this cluster). - SnapshotRetentionLimit *int64 `type:"integer"` +// SetSnapshotName sets the SnapshotName field's value. +func (s *DeleteSnapshotInput) SetSnapshotName(v string) *DeleteSnapshotInput { + s.SnapshotName = &v + return s +} - // The daily time range (in UTC) during which ElastiCache begins taking a daily - // snapshot of your node group (shard). - // - // Example: 05:00-09:00 - // - // If you do not specify this parameter, ElastiCache automatically chooses an - // appropriate time range. - SnapshotWindow *string `type:"string"` +type DeleteSnapshotOutput struct { + _ struct{} `type:"structure"` - // A list of cost allocation tags to be added to this resource. Tags are comma-separated - // key,value pairs (e.g. Key=myKey, Value=myKeyValue. You can include multiple - // tags as shown following: Key=myKey, Value=myKeyValue Key=mySecondKey, Value=mySecondKeyValue. - Tags []*Tag `locationNameList:"Tag" type:"list"` + // Represents a copy of an entire Redis cluster as of the time when the snapshot + // was taken. + Snapshot *Snapshot `type:"structure"` +} - // A flag that enables in-transit encryption when set to true. - // - // You cannot modify the value of TransitEncryptionEnabled after the cluster - // is created. To enable in-transit encryption on a cluster you must set TransitEncryptionEnabled - // to true when you create a cluster. - // - // This parameter is valid only if the Engine parameter is redis, the EngineVersion - // parameter is 3.2.6, 4.x or later, and the cluster is being created in an - // Amazon VPC. - // - // If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. - // - // Required: Only available when creating a replication group in an Amazon VPC - // using redis version 3.2.6, 4.x or later. - // - // Default: false +// String returns the string representation +func (s DeleteSnapshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSnapshotOutput) GoString() string { + return s.String() +} + +// SetSnapshot sets the Snapshot field's value. +func (s *DeleteSnapshotOutput) SetSnapshot(v *Snapshot) *DeleteSnapshotOutput { + s.Snapshot = v + return s +} + +type DeleteUserGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the user group. // - // For HIPAA compliance, you must specify TransitEncryptionEnabled as true, - // an AuthToken, and a CacheSubnetGroup. - TransitEncryptionEnabled *bool `type:"boolean"` + // UserGroupId is a required field + UserGroupId *string `type:"string" required:"true"` } // String returns the string representation -func (s CreateReplicationGroupInput) String() string { +func (s DeleteUserGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateReplicationGroupInput) GoString() string { +func (s DeleteUserGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateReplicationGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupInput"} - if s.ReplicationGroupDescription == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupDescription")) - } - if s.ReplicationGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) - } - if s.NodeGroupConfiguration != nil { - for i, v := range s.NodeGroupConfiguration { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "NodeGroupConfiguration", i), err.(request.ErrInvalidParams)) - } - } +func (s *DeleteUserGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserGroupInput"} + if s.UserGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("UserGroupId")) } if invalidParams.Len() > 0 { @@ -8323,555 +11710,574 @@ func (s *CreateReplicationGroupInput) Validate() error { return nil } -// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. -func (s *CreateReplicationGroupInput) SetAtRestEncryptionEnabled(v bool) *CreateReplicationGroupInput { - s.AtRestEncryptionEnabled = &v +// SetUserGroupId sets the UserGroupId field's value. +func (s *DeleteUserGroupInput) SetUserGroupId(v string) *DeleteUserGroupInput { + s.UserGroupId = &v return s } -// SetAuthToken sets the AuthToken field's value. -func (s *CreateReplicationGroupInput) SetAuthToken(v string) *CreateReplicationGroupInput { - s.AuthToken = &v - return s -} +type DeleteUserGroupOutput struct { + _ struct{} `type:"structure"` -// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. -func (s *CreateReplicationGroupInput) SetAutoMinorVersionUpgrade(v bool) *CreateReplicationGroupInput { - s.AutoMinorVersionUpgrade = &v - return s -} + // The Amazon Resource Name (ARN) of the user group. + ARN *string `type:"string"` -// SetAutomaticFailoverEnabled sets the AutomaticFailoverEnabled field's value. -func (s *CreateReplicationGroupInput) SetAutomaticFailoverEnabled(v bool) *CreateReplicationGroupInput { - s.AutomaticFailoverEnabled = &v - return s -} + // Must be Redis. + Engine *string `type:"string"` -// SetCacheNodeType sets the CacheNodeType field's value. -func (s *CreateReplicationGroupInput) SetCacheNodeType(v string) *CreateReplicationGroupInput { - s.CacheNodeType = &v - return s + // A list of updates being applied to the user groups. + PendingChanges *UserGroupPendingChanges `type:"structure"` + + // A list of replication groups that the user group can access. + ReplicationGroups []*string `type:"list"` + + // Indicates user group status. Can be "creating", "active", "modifying", "deleting". + Status *string `type:"string"` + + // The ID of the user group. + UserGroupId *string `type:"string"` + + // The list of user IDs that belong to the user group. + UserIds []*string `type:"list"` } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *CreateReplicationGroupInput) SetCacheParameterGroupName(v string) *CreateReplicationGroupInput { - s.CacheParameterGroupName = &v - return s +// String returns the string representation +func (s DeleteUserGroupOutput) String() string { + return awsutil.Prettify(s) } -// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value. -func (s *CreateReplicationGroupInput) SetCacheSecurityGroupNames(v []*string) *CreateReplicationGroupInput { - s.CacheSecurityGroupNames = v - return s +// GoString returns the string representation +func (s DeleteUserGroupOutput) GoString() string { + return s.String() } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *CreateReplicationGroupInput) SetCacheSubnetGroupName(v string) *CreateReplicationGroupInput { - s.CacheSubnetGroupName = &v +// SetARN sets the ARN field's value. +func (s *DeleteUserGroupOutput) SetARN(v string) *DeleteUserGroupOutput { + s.ARN = &v return s } // SetEngine sets the Engine field's value. -func (s *CreateReplicationGroupInput) SetEngine(v string) *CreateReplicationGroupInput { +func (s *DeleteUserGroupOutput) SetEngine(v string) *DeleteUserGroupOutput { s.Engine = &v return s } -// SetEngineVersion sets the EngineVersion field's value. -func (s *CreateReplicationGroupInput) SetEngineVersion(v string) *CreateReplicationGroupInput { - s.EngineVersion = &v +// SetPendingChanges sets the PendingChanges field's value. +func (s *DeleteUserGroupOutput) SetPendingChanges(v *UserGroupPendingChanges) *DeleteUserGroupOutput { + s.PendingChanges = v return s } -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *CreateReplicationGroupInput) SetKmsKeyId(v string) *CreateReplicationGroupInput { - s.KmsKeyId = &v +// SetReplicationGroups sets the ReplicationGroups field's value. +func (s *DeleteUserGroupOutput) SetReplicationGroups(v []*string) *DeleteUserGroupOutput { + s.ReplicationGroups = v return s } -// SetNodeGroupConfiguration sets the NodeGroupConfiguration field's value. -func (s *CreateReplicationGroupInput) SetNodeGroupConfiguration(v []*NodeGroupConfiguration) *CreateReplicationGroupInput { - s.NodeGroupConfiguration = v +// SetStatus sets the Status field's value. +func (s *DeleteUserGroupOutput) SetStatus(v string) *DeleteUserGroupOutput { + s.Status = &v return s } -// SetNotificationTopicArn sets the NotificationTopicArn field's value. -func (s *CreateReplicationGroupInput) SetNotificationTopicArn(v string) *CreateReplicationGroupInput { - s.NotificationTopicArn = &v +// SetUserGroupId sets the UserGroupId field's value. +func (s *DeleteUserGroupOutput) SetUserGroupId(v string) *DeleteUserGroupOutput { + s.UserGroupId = &v return s } -// SetNumCacheClusters sets the NumCacheClusters field's value. -func (s *CreateReplicationGroupInput) SetNumCacheClusters(v int64) *CreateReplicationGroupInput { - s.NumCacheClusters = &v +// SetUserIds sets the UserIds field's value. +func (s *DeleteUserGroupOutput) SetUserIds(v []*string) *DeleteUserGroupOutput { + s.UserIds = v return s } -// SetNumNodeGroups sets the NumNodeGroups field's value. -func (s *CreateReplicationGroupInput) SetNumNodeGroups(v int64) *CreateReplicationGroupInput { - s.NumNodeGroups = &v - return s -} +type DeleteUserInput struct { + _ struct{} `type:"structure"` -// SetPort sets the Port field's value. -func (s *CreateReplicationGroupInput) SetPort(v int64) *CreateReplicationGroupInput { - s.Port = &v - return s + // The ID of the user. + // + // UserId is a required field + UserId *string `min:"1" type:"string" required:"true"` } -// SetPreferredCacheClusterAZs sets the PreferredCacheClusterAZs field's value. -func (s *CreateReplicationGroupInput) SetPreferredCacheClusterAZs(v []*string) *CreateReplicationGroupInput { - s.PreferredCacheClusterAZs = v - return s +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) } -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *CreateReplicationGroupInput) SetPreferredMaintenanceWindow(v string) *CreateReplicationGroupInput { - s.PreferredMaintenanceWindow = &v - return s +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() } -// SetPrimaryClusterId sets the PrimaryClusterId field's value. -func (s *CreateReplicationGroupInput) SetPrimaryClusterId(v string) *CreateReplicationGroupInput { - s.PrimaryClusterId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserInput"} + if s.UserId == nil { + invalidParams.Add(request.NewErrParamRequired("UserId")) + } + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) + } -// SetReplicasPerNodeGroup sets the ReplicasPerNodeGroup field's value. -func (s *CreateReplicationGroupInput) SetReplicasPerNodeGroup(v int64) *CreateReplicationGroupInput { - s.ReplicasPerNodeGroup = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetReplicationGroupDescription sets the ReplicationGroupDescription field's value. -func (s *CreateReplicationGroupInput) SetReplicationGroupDescription(v string) *CreateReplicationGroupInput { - s.ReplicationGroupDescription = &v +// SetUserId sets the UserId field's value. +func (s *DeleteUserInput) SetUserId(v string) *DeleteUserInput { + s.UserId = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *CreateReplicationGroupInput) SetReplicationGroupId(v string) *CreateReplicationGroupInput { - s.ReplicationGroupId = &v - return s -} +type DeleteUserOutput struct { + _ struct{} `type:"structure"` -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *CreateReplicationGroupInput) SetSecurityGroupIds(v []*string) *CreateReplicationGroupInput { - s.SecurityGroupIds = v - return s + // The Amazon Resource Name (ARN) of the user account. + ARN *string `type:"string"` + + // Access permissions string used for this user account. + AccessString *string `type:"string"` + + // Denotes whether the user requires a password to authenticate. + Authentication *Authentication `type:"structure"` + + // Must be Redis. + Engine *string `type:"string"` + + // Indicates the user status. Can be "active", "modifying" or "deleting". + Status *string `type:"string"` + + // Returns a list of the user group IDs the user belongs to. + UserGroupIds []*string `type:"list"` + + // The ID of the user. + UserId *string `type:"string"` + + // The username of the user. + UserName *string `type:"string"` } -// SetSnapshotArns sets the SnapshotArns field's value. -func (s *CreateReplicationGroupInput) SetSnapshotArns(v []*string) *CreateReplicationGroupInput { - s.SnapshotArns = v - return s +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) } -// SetSnapshotName sets the SnapshotName field's value. -func (s *CreateReplicationGroupInput) SetSnapshotName(v string) *CreateReplicationGroupInput { - s.SnapshotName = &v - return s +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() } -// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value. -func (s *CreateReplicationGroupInput) SetSnapshotRetentionLimit(v int64) *CreateReplicationGroupInput { - s.SnapshotRetentionLimit = &v +// SetARN sets the ARN field's value. +func (s *DeleteUserOutput) SetARN(v string) *DeleteUserOutput { + s.ARN = &v return s } -// SetSnapshotWindow sets the SnapshotWindow field's value. -func (s *CreateReplicationGroupInput) SetSnapshotWindow(v string) *CreateReplicationGroupInput { - s.SnapshotWindow = &v +// SetAccessString sets the AccessString field's value. +func (s *DeleteUserOutput) SetAccessString(v string) *DeleteUserOutput { + s.AccessString = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateReplicationGroupInput) SetTags(v []*Tag) *CreateReplicationGroupInput { - s.Tags = v +// SetAuthentication sets the Authentication field's value. +func (s *DeleteUserOutput) SetAuthentication(v *Authentication) *DeleteUserOutput { + s.Authentication = v return s } -// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. -func (s *CreateReplicationGroupInput) SetTransitEncryptionEnabled(v bool) *CreateReplicationGroupInput { - s.TransitEncryptionEnabled = &v +// SetEngine sets the Engine field's value. +func (s *DeleteUserOutput) SetEngine(v string) *DeleteUserOutput { + s.Engine = &v return s } -type CreateReplicationGroupOutput struct { - _ struct{} `type:"structure"` - - // Contains all of the attributes of a specific Redis replication group. - ReplicationGroup *ReplicationGroup `type:"structure"` +// SetStatus sets the Status field's value. +func (s *DeleteUserOutput) SetStatus(v string) *DeleteUserOutput { + s.Status = &v + return s } -// String returns the string representation -func (s CreateReplicationGroupOutput) String() string { - return awsutil.Prettify(s) +// SetUserGroupIds sets the UserGroupIds field's value. +func (s *DeleteUserOutput) SetUserGroupIds(v []*string) *DeleteUserOutput { + s.UserGroupIds = v + return s } -// GoString returns the string representation -func (s CreateReplicationGroupOutput) GoString() string { - return s.String() +// SetUserId sets the UserId field's value. +func (s *DeleteUserOutput) SetUserId(v string) *DeleteUserOutput { + s.UserId = &v + return s } -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *CreateReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *CreateReplicationGroupOutput { - s.ReplicationGroup = v +// SetUserName sets the UserName field's value. +func (s *DeleteUserOutput) SetUserName(v string) *DeleteUserOutput { + s.UserName = &v return s } -// Represents the input of a CreateSnapshot operation. -type CreateSnapshotInput struct { +// Represents the input of a DescribeCacheClusters operation. +type DescribeCacheClustersInput struct { _ struct{} `type:"structure"` - // The identifier of an existing cluster. The snapshot is created from this - // cluster. + // The user-supplied cluster identifier. If this parameter is specified, only + // information about that specific cluster is returned. This parameter isn't + // case sensitive. CacheClusterId *string `type:"string"` - // The ID of the KMS key used to encrypt the snapshot. - KmsKeyId *string `type:"string"` - - // The identifier of an existing replication group. The snapshot is created - // from this replication group. - ReplicationGroupId *string `type:"string"` + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` - // A name for the snapshot being created. + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. // - // SnapshotName is a required field - SnapshotName *string `type:"string" required:"true"` -} + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` -// String returns the string representation -func (s CreateSnapshotInput) String() string { - return awsutil.Prettify(s) -} + // An optional flag that can be included in the DescribeCacheCluster request + // to show only nodes (API/CLI: clusters) that are not members of a replication + // group. In practice, this mean Memcached and single node Redis clusters. + ShowCacheClustersNotInReplicationGroups *bool `type:"boolean"` -// GoString returns the string representation -func (s CreateSnapshotInput) GoString() string { - return s.String() + // An optional flag that can be included in the DescribeCacheCluster request + // to retrieve information about the individual cache nodes. + ShowCacheNodeInfo *bool `type:"boolean"` } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"} - if s.SnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("SnapshotName")) - } +// String returns the string representation +func (s DescribeCacheClustersInput) String() string { + return awsutil.Prettify(s) +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// GoString returns the string representation +func (s DescribeCacheClustersInput) GoString() string { + return s.String() } // SetCacheClusterId sets the CacheClusterId field's value. -func (s *CreateSnapshotInput) SetCacheClusterId(v string) *CreateSnapshotInput { +func (s *DescribeCacheClustersInput) SetCacheClusterId(v string) *DescribeCacheClustersInput { s.CacheClusterId = &v return s } -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *CreateSnapshotInput) SetKmsKeyId(v string) *CreateSnapshotInput { - s.KmsKeyId = &v +// SetMarker sets the Marker field's value. +func (s *DescribeCacheClustersInput) SetMarker(v string) *DescribeCacheClustersInput { + s.Marker = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *CreateSnapshotInput) SetReplicationGroupId(v string) *CreateSnapshotInput { - s.ReplicationGroupId = &v +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCacheClustersInput) SetMaxRecords(v int64) *DescribeCacheClustersInput { + s.MaxRecords = &v return s } -// SetSnapshotName sets the SnapshotName field's value. -func (s *CreateSnapshotInput) SetSnapshotName(v string) *CreateSnapshotInput { - s.SnapshotName = &v +// SetShowCacheClustersNotInReplicationGroups sets the ShowCacheClustersNotInReplicationGroups field's value. +func (s *DescribeCacheClustersInput) SetShowCacheClustersNotInReplicationGroups(v bool) *DescribeCacheClustersInput { + s.ShowCacheClustersNotInReplicationGroups = &v return s } -type CreateSnapshotOutput struct { - _ struct{} `type:"structure"` - - // Represents a copy of an entire Redis cluster as of the time when the snapshot - // was taken. - Snapshot *Snapshot `type:"structure"` -} - -// String returns the string representation -func (s CreateSnapshotOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateSnapshotOutput) GoString() string { - return s.String() -} - -// SetSnapshot sets the Snapshot field's value. -func (s *CreateSnapshotOutput) SetSnapshot(v *Snapshot) *CreateSnapshotOutput { - s.Snapshot = v +// SetShowCacheNodeInfo sets the ShowCacheNodeInfo field's value. +func (s *DescribeCacheClustersInput) SetShowCacheNodeInfo(v bool) *DescribeCacheClustersInput { + s.ShowCacheNodeInfo = &v return s } -// The endpoint from which data should be migrated. -type CustomerNodeEndpoint struct { +// Represents the output of a DescribeCacheClusters operation. +type DescribeCacheClustersOutput struct { _ struct{} `type:"structure"` - // The address of the node endpoint - Address *string `type:"string"` + // A list of clusters. Each item in the list contains detailed information about + // one cluster. + CacheClusters []*CacheCluster `locationNameList:"CacheCluster" type:"list"` - // The port of the node endpoint - Port *int64 `type:"integer"` + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` } // String returns the string representation -func (s CustomerNodeEndpoint) String() string { +func (s DescribeCacheClustersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CustomerNodeEndpoint) GoString() string { +func (s DescribeCacheClustersOutput) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *CustomerNodeEndpoint) SetAddress(v string) *CustomerNodeEndpoint { - s.Address = &v +// SetCacheClusters sets the CacheClusters field's value. +func (s *DescribeCacheClustersOutput) SetCacheClusters(v []*CacheCluster) *DescribeCacheClustersOutput { + s.CacheClusters = v return s } -// SetPort sets the Port field's value. -func (s *CustomerNodeEndpoint) SetPort(v int64) *CustomerNodeEndpoint { - s.Port = &v +// SetMarker sets the Marker field's value. +func (s *DescribeCacheClustersOutput) SetMarker(v string) *DescribeCacheClustersOutput { + s.Marker = &v return s } -type DecreaseReplicaCountInput struct { +// Represents the input of a DescribeCacheEngineVersions operation. +type DescribeCacheEngineVersionsInput struct { _ struct{} `type:"structure"` - // If True, the number of replica nodes is decreased immediately. ApplyImmediately=False - // is not currently supported. + // The name of a specific cache parameter group family to return details for. // - // ApplyImmediately is a required field - ApplyImmediately *bool `type:"boolean" required:"true"` - - // The number of read replica nodes you want at the completion of this operation. - // For Redis (cluster mode disabled) replication groups, this is the number - // of replica nodes in the replication group. For Redis (cluster mode enabled) - // replication groups, this is the number of replica nodes in each of the replication - // group's node groups. + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | // - // The minimum number of replicas in a shard or replication group is: + // Constraints: // - // * Redis (cluster mode disabled) If Multi-AZ with Automatic Failover is - // enabled: 1 If Multi-AZ with Automatic Failover is not enabled: 0 + // * Must be 1 to 255 alphanumeric characters // - // * Redis (cluster mode enabled): 0 (though you will not be able to failover - // to a replica if your primary node fails) - NewReplicaCount *int64 `type:"integer"` + // * First character must be a letter + // + // * Cannot end with a hyphen or contain two consecutive hyphens + CacheParameterGroupFamily *string `type:"string"` - // A list of ConfigureShard objects that can be used to configure each shard - // in a Redis (cluster mode enabled) replication group. The ConfigureShard has - // three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. - ReplicaConfiguration []*ConfigureShard `locationNameList:"ConfigureShard" type:"list"` + // If true, specifies that only the default version of the specified engine + // or engine and major version combination is to be returned. + DefaultOnly *bool `type:"boolean"` - // A list of the node ids to remove from the replication group or node group - // (shard). - ReplicasToRemove []*string `type:"list"` + // The cache engine to return. Valid values: memcached | redis + Engine *string `type:"string"` - // The id of the replication group from which you want to remove replica nodes. + // The cache engine version to return. // - // ReplicationGroupId is a required field - ReplicationGroupId *string `type:"string" required:"true"` + // Example: 1.4.14 + EngineVersion *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` } // String returns the string representation -func (s DecreaseReplicaCountInput) String() string { +func (s DescribeCacheEngineVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DecreaseReplicaCountInput) GoString() string { +func (s DescribeCacheEngineVersionsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecreaseReplicaCountInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecreaseReplicaCountInput"} - if s.ApplyImmediately == nil { - invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) - } - if s.ReplicationGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) - } - if s.ReplicaConfiguration != nil { - for i, v := range s.ReplicaConfiguration { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaConfiguration", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *DescribeCacheEngineVersionsInput) SetCacheParameterGroupFamily(v string) *DescribeCacheEngineVersionsInput { + s.CacheParameterGroupFamily = &v + return s } -// SetApplyImmediately sets the ApplyImmediately field's value. -func (s *DecreaseReplicaCountInput) SetApplyImmediately(v bool) *DecreaseReplicaCountInput { - s.ApplyImmediately = &v +// SetDefaultOnly sets the DefaultOnly field's value. +func (s *DescribeCacheEngineVersionsInput) SetDefaultOnly(v bool) *DescribeCacheEngineVersionsInput { + s.DefaultOnly = &v return s } -// SetNewReplicaCount sets the NewReplicaCount field's value. -func (s *DecreaseReplicaCountInput) SetNewReplicaCount(v int64) *DecreaseReplicaCountInput { - s.NewReplicaCount = &v +// SetEngine sets the Engine field's value. +func (s *DescribeCacheEngineVersionsInput) SetEngine(v string) *DescribeCacheEngineVersionsInput { + s.Engine = &v return s } -// SetReplicaConfiguration sets the ReplicaConfiguration field's value. -func (s *DecreaseReplicaCountInput) SetReplicaConfiguration(v []*ConfigureShard) *DecreaseReplicaCountInput { - s.ReplicaConfiguration = v +// SetEngineVersion sets the EngineVersion field's value. +func (s *DescribeCacheEngineVersionsInput) SetEngineVersion(v string) *DescribeCacheEngineVersionsInput { + s.EngineVersion = &v return s } -// SetReplicasToRemove sets the ReplicasToRemove field's value. -func (s *DecreaseReplicaCountInput) SetReplicasToRemove(v []*string) *DecreaseReplicaCountInput { - s.ReplicasToRemove = v +// SetMarker sets the Marker field's value. +func (s *DescribeCacheEngineVersionsInput) SetMarker(v string) *DescribeCacheEngineVersionsInput { + s.Marker = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *DecreaseReplicaCountInput) SetReplicationGroupId(v string) *DecreaseReplicaCountInput { - s.ReplicationGroupId = &v +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCacheEngineVersionsInput) SetMaxRecords(v int64) *DescribeCacheEngineVersionsInput { + s.MaxRecords = &v return s } -type DecreaseReplicaCountOutput struct { +// Represents the output of a DescribeCacheEngineVersions operation. +type DescribeCacheEngineVersionsOutput struct { _ struct{} `type:"structure"` - // Contains all of the attributes of a specific Redis replication group. - ReplicationGroup *ReplicationGroup `type:"structure"` + // A list of cache engine version details. Each element in the list contains + // detailed information about one cache engine version. + CacheEngineVersions []*CacheEngineVersion `locationNameList:"CacheEngineVersion" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` } // String returns the string representation -func (s DecreaseReplicaCountOutput) String() string { +func (s DescribeCacheEngineVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DecreaseReplicaCountOutput) GoString() string { +func (s DescribeCacheEngineVersionsOutput) GoString() string { return s.String() } -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *DecreaseReplicaCountOutput) SetReplicationGroup(v *ReplicationGroup) *DecreaseReplicaCountOutput { - s.ReplicationGroup = v +// SetCacheEngineVersions sets the CacheEngineVersions field's value. +func (s *DescribeCacheEngineVersionsOutput) SetCacheEngineVersions(v []*CacheEngineVersion) *DescribeCacheEngineVersionsOutput { + s.CacheEngineVersions = v return s } -// Represents the input of a DeleteCacheCluster operation. -type DeleteCacheClusterInput struct { +// SetMarker sets the Marker field's value. +func (s *DescribeCacheEngineVersionsOutput) SetMarker(v string) *DescribeCacheEngineVersionsOutput { + s.Marker = &v + return s +} + +// Represents the input of a DescribeCacheParameterGroups operation. +type DescribeCacheParameterGroupsInput struct { _ struct{} `type:"structure"` - // The cluster identifier for the cluster to be deleted. This parameter is not - // case sensitive. - // - // CacheClusterId is a required field - CacheClusterId *string `type:"string" required:"true"` + // The name of a specific cache parameter group to return details for. + CacheParameterGroupName *string `type:"string"` - // The user-supplied name of a final cluster snapshot. This is the unique name - // that identifies the snapshot. ElastiCache creates the snapshot, and then - // deletes the cluster immediately afterward. - FinalSnapshotIdentifier *string `type:"string"` + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` } // String returns the string representation -func (s DeleteCacheClusterInput) String() string { +func (s DescribeCacheParameterGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCacheClusterInput) GoString() string { +func (s DescribeCacheParameterGroupsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCacheClusterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCacheClusterInput"} - if s.CacheClusterId == nil { - invalidParams.Add(request.NewErrParamRequired("CacheClusterId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. +func (s *DescribeCacheParameterGroupsInput) SetCacheParameterGroupName(v string) *DescribeCacheParameterGroupsInput { + s.CacheParameterGroupName = &v + return s } -// SetCacheClusterId sets the CacheClusterId field's value. -func (s *DeleteCacheClusterInput) SetCacheClusterId(v string) *DeleteCacheClusterInput { - s.CacheClusterId = &v +// SetMarker sets the Marker field's value. +func (s *DescribeCacheParameterGroupsInput) SetMarker(v string) *DescribeCacheParameterGroupsInput { + s.Marker = &v return s } -// SetFinalSnapshotIdentifier sets the FinalSnapshotIdentifier field's value. -func (s *DeleteCacheClusterInput) SetFinalSnapshotIdentifier(v string) *DeleteCacheClusterInput { - s.FinalSnapshotIdentifier = &v +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCacheParameterGroupsInput) SetMaxRecords(v int64) *DescribeCacheParameterGroupsInput { + s.MaxRecords = &v return s } -type DeleteCacheClusterOutput struct { +// Represents the output of a DescribeCacheParameterGroups operation. +type DescribeCacheParameterGroupsOutput struct { _ struct{} `type:"structure"` - // Contains all of the attributes of a specific cluster. - CacheCluster *CacheCluster `type:"structure"` + // A list of cache parameter groups. Each element in the list contains detailed + // information about one cache parameter group. + CacheParameterGroups []*CacheParameterGroup `locationNameList:"CacheParameterGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` } // String returns the string representation -func (s DeleteCacheClusterOutput) String() string { +func (s DescribeCacheParameterGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCacheClusterOutput) GoString() string { +func (s DescribeCacheParameterGroupsOutput) GoString() string { return s.String() } -// SetCacheCluster sets the CacheCluster field's value. -func (s *DeleteCacheClusterOutput) SetCacheCluster(v *CacheCluster) *DeleteCacheClusterOutput { - s.CacheCluster = v +// SetCacheParameterGroups sets the CacheParameterGroups field's value. +func (s *DescribeCacheParameterGroupsOutput) SetCacheParameterGroups(v []*CacheParameterGroup) *DescribeCacheParameterGroupsOutput { + s.CacheParameterGroups = v return s } -// Represents the input of a DeleteCacheParameterGroup operation. -type DeleteCacheParameterGroupInput struct { +// SetMarker sets the Marker field's value. +func (s *DescribeCacheParameterGroupsOutput) SetMarker(v string) *DescribeCacheParameterGroupsOutput { + s.Marker = &v + return s +} + +// Represents the input of a DescribeCacheParameters operation. +type DescribeCacheParametersInput struct { _ struct{} `type:"structure"` - // The name of the cache parameter group to delete. - // - // The specified cache security group must not be associated with any clusters. + // The name of a specific cache parameter group to return details for. // // CacheParameterGroupName is a required field CacheParameterGroupName *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` + + // The parameter types to return. + // + // Valid values: user | system | engine-default + Source *string `type:"string"` } // String returns the string representation -func (s DeleteCacheParameterGroupInput) String() string { +func (s DescribeCacheParametersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCacheParameterGroupInput) GoString() string { +func (s DescribeCacheParametersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCacheParameterGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCacheParameterGroupInput"} +func (s *DescribeCacheParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCacheParametersInput"} if s.CacheParameterGroupName == nil { invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) } @@ -8883,245 +12289,282 @@ func (s *DeleteCacheParameterGroupInput) Validate() error { } // SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *DeleteCacheParameterGroupInput) SetCacheParameterGroupName(v string) *DeleteCacheParameterGroupInput { +func (s *DescribeCacheParametersInput) SetCacheParameterGroupName(v string) *DescribeCacheParametersInput { s.CacheParameterGroupName = &v return s } -type DeleteCacheParameterGroupOutput struct { - _ struct{} `type:"structure"` +// SetMarker sets the Marker field's value. +func (s *DescribeCacheParametersInput) SetMarker(v string) *DescribeCacheParametersInput { + s.Marker = &v + return s } -// String returns the string representation -func (s DeleteCacheParameterGroupOutput) String() string { - return awsutil.Prettify(s) +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCacheParametersInput) SetMaxRecords(v int64) *DescribeCacheParametersInput { + s.MaxRecords = &v + return s } -// GoString returns the string representation -func (s DeleteCacheParameterGroupOutput) GoString() string { - return s.String() +// SetSource sets the Source field's value. +func (s *DescribeCacheParametersInput) SetSource(v string) *DescribeCacheParametersInput { + s.Source = &v + return s } -// Represents the input of a DeleteCacheSecurityGroup operation. -type DeleteCacheSecurityGroupInput struct { +// Represents the output of a DescribeCacheParameters operation. +type DescribeCacheParametersOutput struct { _ struct{} `type:"structure"` - // The name of the cache security group to delete. - // - // You cannot delete the default security group. - // - // CacheSecurityGroupName is a required field - CacheSecurityGroupName *string `type:"string" required:"true"` + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` + + // A list of Parameter instances. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` } // String returns the string representation -func (s DeleteCacheSecurityGroupInput) String() string { +func (s DescribeCacheParametersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCacheSecurityGroupInput) GoString() string { +func (s DescribeCacheParametersOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCacheSecurityGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSecurityGroupInput"} - if s.CacheSecurityGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCacheNodeTypeSpecificParameters sets the CacheNodeTypeSpecificParameters field's value. +func (s *DescribeCacheParametersOutput) SetCacheNodeTypeSpecificParameters(v []*CacheNodeTypeSpecificParameter) *DescribeCacheParametersOutput { + s.CacheNodeTypeSpecificParameters = v + return s } -// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. -func (s *DeleteCacheSecurityGroupInput) SetCacheSecurityGroupName(v string) *DeleteCacheSecurityGroupInput { - s.CacheSecurityGroupName = &v +// SetMarker sets the Marker field's value. +func (s *DescribeCacheParametersOutput) SetMarker(v string) *DescribeCacheParametersOutput { + s.Marker = &v return s } -type DeleteCacheSecurityGroupOutput struct { - _ struct{} `type:"structure"` +// SetParameters sets the Parameters field's value. +func (s *DescribeCacheParametersOutput) SetParameters(v []*Parameter) *DescribeCacheParametersOutput { + s.Parameters = v + return s } -// String returns the string representation -func (s DeleteCacheSecurityGroupOutput) String() string { - return awsutil.Prettify(s) -} +// Represents the input of a DescribeCacheSecurityGroups operation. +type DescribeCacheSecurityGroupsInput struct { + _ struct{} `type:"structure"` -// GoString returns the string representation -func (s DeleteCacheSecurityGroupOutput) GoString() string { - return s.String() -} + // The name of the cache security group to return details for. + CacheSecurityGroupName *string `type:"string"` -// Represents the input of a DeleteCacheSubnetGroup operation. -type DeleteCacheSubnetGroupInput struct { - _ struct{} `type:"structure"` + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` - // The name of the cache subnet group to delete. + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. // - // Constraints: Must contain no more than 255 alphanumeric characters or hyphens. + // Default: 100 // - // CacheSubnetGroupName is a required field - CacheSubnetGroupName *string `type:"string" required:"true"` + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` } // String returns the string representation -func (s DeleteCacheSubnetGroupInput) String() string { +func (s DescribeCacheSecurityGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCacheSubnetGroupInput) GoString() string { +func (s DescribeCacheSecurityGroupsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteCacheSubnetGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSubnetGroupInput"} - if s.CacheSubnetGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName")) - } +// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. +func (s *DescribeCacheSecurityGroupsInput) SetCacheSecurityGroupName(v string) *DescribeCacheSecurityGroupsInput { + s.CacheSecurityGroupName = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetMarker sets the Marker field's value. +func (s *DescribeCacheSecurityGroupsInput) SetMarker(v string) *DescribeCacheSecurityGroupsInput { + s.Marker = &v + return s } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *DeleteCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *DeleteCacheSubnetGroupInput { - s.CacheSubnetGroupName = &v +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCacheSecurityGroupsInput) SetMaxRecords(v int64) *DescribeCacheSecurityGroupsInput { + s.MaxRecords = &v return s } -type DeleteCacheSubnetGroupOutput struct { +// Represents the output of a DescribeCacheSecurityGroups operation. +type DescribeCacheSecurityGroupsOutput struct { _ struct{} `type:"structure"` + + // A list of cache security groups. Each element in the list contains detailed + // information about one group. + CacheSecurityGroups []*CacheSecurityGroup `locationNameList:"CacheSecurityGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` } // String returns the string representation -func (s DeleteCacheSubnetGroupOutput) String() string { +func (s DescribeCacheSecurityGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteCacheSubnetGroupOutput) GoString() string { +func (s DescribeCacheSecurityGroupsOutput) GoString() string { return s.String() } -// Represents the input of a DeleteReplicationGroup operation. -type DeleteReplicationGroupInput struct { +// SetCacheSecurityGroups sets the CacheSecurityGroups field's value. +func (s *DescribeCacheSecurityGroupsOutput) SetCacheSecurityGroups(v []*CacheSecurityGroup) *DescribeCacheSecurityGroupsOutput { + s.CacheSecurityGroups = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCacheSecurityGroupsOutput) SetMarker(v string) *DescribeCacheSecurityGroupsOutput { + s.Marker = &v + return s +} + +// Represents the input of a DescribeCacheSubnetGroups operation. +type DescribeCacheSubnetGroupsInput struct { _ struct{} `type:"structure"` - // The name of a final node group (shard) snapshot. ElastiCache creates the - // snapshot from the primary node in the cluster, rather than one of the replicas; - // this is to ensure that it captures the freshest data. After the final snapshot - // is taken, the replication group is immediately deleted. - FinalSnapshotIdentifier *string `type:"string"` + // The name of the cache subnet group to return details for. + CacheSubnetGroupName *string `type:"string"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` - // The identifier for the cluster to be deleted. This parameter is not case - // sensitive. + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. // - // ReplicationGroupId is a required field - ReplicationGroupId *string `type:"string" required:"true"` - - // If set to true, all of the read replicas are deleted, but the primary node - // is retained. - RetainPrimaryCluster *bool `type:"boolean"` + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` } // String returns the string representation -func (s DeleteReplicationGroupInput) String() string { +func (s DescribeCacheSubnetGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReplicationGroupInput) GoString() string { +func (s DescribeCacheSubnetGroupsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReplicationGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupInput"} - if s.ReplicationGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFinalSnapshotIdentifier sets the FinalSnapshotIdentifier field's value. -func (s *DeleteReplicationGroupInput) SetFinalSnapshotIdentifier(v string) *DeleteReplicationGroupInput { - s.FinalSnapshotIdentifier = &v +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *DescribeCacheSubnetGroupsInput) SetCacheSubnetGroupName(v string) *DescribeCacheSubnetGroupsInput { + s.CacheSubnetGroupName = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *DeleteReplicationGroupInput) SetReplicationGroupId(v string) *DeleteReplicationGroupInput { - s.ReplicationGroupId = &v +// SetMarker sets the Marker field's value. +func (s *DescribeCacheSubnetGroupsInput) SetMarker(v string) *DescribeCacheSubnetGroupsInput { + s.Marker = &v return s } -// SetRetainPrimaryCluster sets the RetainPrimaryCluster field's value. -func (s *DeleteReplicationGroupInput) SetRetainPrimaryCluster(v bool) *DeleteReplicationGroupInput { - s.RetainPrimaryCluster = &v +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCacheSubnetGroupsInput) SetMaxRecords(v int64) *DescribeCacheSubnetGroupsInput { + s.MaxRecords = &v return s } -type DeleteReplicationGroupOutput struct { +// Represents the output of a DescribeCacheSubnetGroups operation. +type DescribeCacheSubnetGroupsOutput struct { _ struct{} `type:"structure"` - // Contains all of the attributes of a specific Redis replication group. - ReplicationGroup *ReplicationGroup `type:"structure"` + // A list of cache subnet groups. Each element in the list contains detailed + // information about one group. + CacheSubnetGroups []*CacheSubnetGroup `locationNameList:"CacheSubnetGroup" type:"list"` + + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` } // String returns the string representation -func (s DeleteReplicationGroupOutput) String() string { +func (s DescribeCacheSubnetGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReplicationGroupOutput) GoString() string { +func (s DescribeCacheSubnetGroupsOutput) GoString() string { return s.String() } -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *DeleteReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *DeleteReplicationGroupOutput { - s.ReplicationGroup = v +// SetCacheSubnetGroups sets the CacheSubnetGroups field's value. +func (s *DescribeCacheSubnetGroupsOutput) SetCacheSubnetGroups(v []*CacheSubnetGroup) *DescribeCacheSubnetGroupsOutput { + s.CacheSubnetGroups = v return s } -// Represents the input of a DeleteSnapshot operation. -type DeleteSnapshotInput struct { +// SetMarker sets the Marker field's value. +func (s *DescribeCacheSubnetGroupsOutput) SetMarker(v string) *DescribeCacheSubnetGroupsOutput { + s.Marker = &v + return s +} + +// Represents the input of a DescribeEngineDefaultParameters operation. +type DescribeEngineDefaultParametersInput struct { _ struct{} `type:"structure"` - // The name of the snapshot to be deleted. + // The name of the cache parameter group family. // - // SnapshotName is a required field - SnapshotName *string `type:"string" required:"true"` + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | + // + // CacheParameterGroupFamily is a required field + CacheParameterGroupFamily *string `type:"string" required:"true"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a marker is included in the response + // so that the remaining results can be retrieved. + // + // Default: 100 + // + // Constraints: minimum 20; maximum 100. + MaxRecords *int64 `type:"integer"` } // String returns the string representation -func (s DeleteSnapshotInput) String() string { +func (s DescribeEngineDefaultParametersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSnapshotInput) GoString() string { +func (s DescribeEngineDefaultParametersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"} - if s.SnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("SnapshotName")) +func (s *DescribeEngineDefaultParametersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeEngineDefaultParametersInput"} + if s.CacheParameterGroupFamily == nil { + invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily")) } if invalidParams.Len() > 0 { @@ -9130,44 +12573,59 @@ func (s *DeleteSnapshotInput) Validate() error { return nil } -// SetSnapshotName sets the SnapshotName field's value. -func (s *DeleteSnapshotInput) SetSnapshotName(v string) *DeleteSnapshotInput { - s.SnapshotName = &v +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *DescribeEngineDefaultParametersInput) SetCacheParameterGroupFamily(v string) *DescribeEngineDefaultParametersInput { + s.CacheParameterGroupFamily = &v return s } -type DeleteSnapshotOutput struct { +// SetMarker sets the Marker field's value. +func (s *DescribeEngineDefaultParametersInput) SetMarker(v string) *DescribeEngineDefaultParametersInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeEngineDefaultParametersInput) SetMaxRecords(v int64) *DescribeEngineDefaultParametersInput { + s.MaxRecords = &v + return s +} + +type DescribeEngineDefaultParametersOutput struct { _ struct{} `type:"structure"` - // Represents a copy of an entire Redis cluster as of the time when the snapshot - // was taken. - Snapshot *Snapshot `type:"structure"` + // Represents the output of a DescribeEngineDefaultParameters operation. + EngineDefaults *EngineDefaults `type:"structure"` } // String returns the string representation -func (s DeleteSnapshotOutput) String() string { +func (s DescribeEngineDefaultParametersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSnapshotOutput) GoString() string { +func (s DescribeEngineDefaultParametersOutput) GoString() string { return s.String() } -// SetSnapshot sets the Snapshot field's value. -func (s *DeleteSnapshotOutput) SetSnapshot(v *Snapshot) *DeleteSnapshotOutput { - s.Snapshot = v +// SetEngineDefaults sets the EngineDefaults field's value. +func (s *DescribeEngineDefaultParametersOutput) SetEngineDefaults(v *EngineDefaults) *DescribeEngineDefaultParametersOutput { + s.EngineDefaults = v return s } -// Represents the input of a DescribeCacheClusters operation. -type DescribeCacheClustersInput struct { +// Represents the input of a DescribeEvents operation. +type DescribeEventsInput struct { _ struct{} `type:"structure"` - // The user-supplied cluster identifier. If this parameter is specified, only - // information about that specific cluster is returned. This parameter isn't - // case sensitive. - CacheClusterId *string `type:"string"` + // The number of minutes worth of events to retrieve. + Duration *int64 `type:"integer"` + + // The end of the time interval for which to retrieve events, specified in ISO + // 8601 format. + // + // Example: 2017-03-30T07:03:49.555Z + EndTime *time.Time `type:"timestamp"` // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response @@ -9183,119 +12641,112 @@ type DescribeCacheClustersInput struct { // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` - // An optional flag that can be included in the DescribeCacheCluster request - // to show only nodes (API/CLI: clusters) that are not members of a replication - // group. In practice, this mean Memcached and single node Redis clusters. - ShowCacheClustersNotInReplicationGroups *bool `type:"boolean"` + // The identifier of the event source for which events are returned. If not + // specified, all sources are included in the response. + SourceIdentifier *string `type:"string"` - // An optional flag that can be included in the DescribeCacheCluster request - // to retrieve information about the individual cache nodes. - ShowCacheNodeInfo *bool `type:"boolean"` + // The event source to retrieve events for. If no value is specified, all events + // are returned. + SourceType *string `type:"string" enum:"SourceType"` + + // The beginning of the time interval to retrieve events for, specified in ISO + // 8601 format. + // + // Example: 2017-03-30T07:03:49.555Z + StartTime *time.Time `type:"timestamp"` } // String returns the string representation -func (s DescribeCacheClustersInput) String() string { +func (s DescribeEventsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheClustersInput) GoString() string { +func (s DescribeEventsInput) GoString() string { return s.String() } -// SetCacheClusterId sets the CacheClusterId field's value. -func (s *DescribeCacheClustersInput) SetCacheClusterId(v string) *DescribeCacheClustersInput { - s.CacheClusterId = &v +// SetDuration sets the Duration field's value. +func (s *DescribeEventsInput) SetDuration(v int64) *DescribeEventsInput { + s.Duration = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeEventsInput) SetEndTime(v time.Time) *DescribeEventsInput { + s.EndTime = &v return s } // SetMarker sets the Marker field's value. -func (s *DescribeCacheClustersInput) SetMarker(v string) *DescribeCacheClustersInput { +func (s *DescribeEventsInput) SetMarker(v string) *DescribeEventsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeCacheClustersInput) SetMaxRecords(v int64) *DescribeCacheClustersInput { +func (s *DescribeEventsInput) SetMaxRecords(v int64) *DescribeEventsInput { s.MaxRecords = &v return s } -// SetShowCacheClustersNotInReplicationGroups sets the ShowCacheClustersNotInReplicationGroups field's value. -func (s *DescribeCacheClustersInput) SetShowCacheClustersNotInReplicationGroups(v bool) *DescribeCacheClustersInput { - s.ShowCacheClustersNotInReplicationGroups = &v +// SetSourceIdentifier sets the SourceIdentifier field's value. +func (s *DescribeEventsInput) SetSourceIdentifier(v string) *DescribeEventsInput { + s.SourceIdentifier = &v return s } -// SetShowCacheNodeInfo sets the ShowCacheNodeInfo field's value. -func (s *DescribeCacheClustersInput) SetShowCacheNodeInfo(v bool) *DescribeCacheClustersInput { - s.ShowCacheNodeInfo = &v +// SetSourceType sets the SourceType field's value. +func (s *DescribeEventsInput) SetSourceType(v string) *DescribeEventsInput { + s.SourceType = &v return s } -// Represents the output of a DescribeCacheClusters operation. -type DescribeCacheClustersOutput struct { +// SetStartTime sets the StartTime field's value. +func (s *DescribeEventsInput) SetStartTime(v time.Time) *DescribeEventsInput { + s.StartTime = &v + return s +} + +// Represents the output of a DescribeEvents operation. +type DescribeEventsOutput struct { _ struct{} `type:"structure"` - // A list of clusters. Each item in the list contains detailed information about - // one cluster. - CacheClusters []*CacheCluster `locationNameList:"CacheCluster" type:"list"` + // A list of events. Each element in the list contains detailed information + // about one event. + Events []*Event `locationNameList:"Event" type:"list"` // Provides an identifier to allow retrieval of paginated results. Marker *string `type:"string"` } // String returns the string representation -func (s DescribeCacheClustersOutput) String() string { +func (s DescribeEventsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheClustersOutput) GoString() string { +func (s DescribeEventsOutput) GoString() string { return s.String() } -// SetCacheClusters sets the CacheClusters field's value. -func (s *DescribeCacheClustersOutput) SetCacheClusters(v []*CacheCluster) *DescribeCacheClustersOutput { - s.CacheClusters = v +// SetEvents sets the Events field's value. +func (s *DescribeEventsOutput) SetEvents(v []*Event) *DescribeEventsOutput { + s.Events = v return s } // SetMarker sets the Marker field's value. -func (s *DescribeCacheClustersOutput) SetMarker(v string) *DescribeCacheClustersOutput { +func (s *DescribeEventsOutput) SetMarker(v string) *DescribeEventsOutput { s.Marker = &v return s } -// Represents the input of a DescribeCacheEngineVersions operation. -type DescribeCacheEngineVersionsInput struct { +type DescribeGlobalReplicationGroupsInput struct { _ struct{} `type:"structure"` - // The name of a specific cache parameter group family to return details for. - // - // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | - // - // Constraints: - // - // * Must be 1 to 255 alphanumeric characters - // - // * First character must be a letter - // - // * Cannot end with a hyphen or contain two consecutive hyphens - CacheParameterGroupFamily *string `type:"string"` - - // If true, specifies that only the default version of the specified engine - // or engine and major version combination is to be returned. - DefaultOnly *bool `type:"boolean"` - - // The cache engine to return. Valid values: memcached | redis - Engine *string `type:"string"` - - // The cache engine version to return. - // - // Example: 1.4.14 - EngineVersion *string `type:"string"` + // The name of the Global Datastore + GlobalReplicationGroupId *string `type:"string"` // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response @@ -9305,100 +12756,85 @@ type DescribeCacheEngineVersionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a marker is included in the response // so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` + + // Returns the list of members that comprise the Global Datastore. + ShowMemberInfo *bool `type:"boolean"` } // String returns the string representation -func (s DescribeCacheEngineVersionsInput) String() string { +func (s DescribeGlobalReplicationGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheEngineVersionsInput) GoString() string { +func (s DescribeGlobalReplicationGroupsInput) GoString() string { return s.String() } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *DescribeCacheEngineVersionsInput) SetCacheParameterGroupFamily(v string) *DescribeCacheEngineVersionsInput { - s.CacheParameterGroupFamily = &v - return s -} - -// SetDefaultOnly sets the DefaultOnly field's value. -func (s *DescribeCacheEngineVersionsInput) SetDefaultOnly(v bool) *DescribeCacheEngineVersionsInput { - s.DefaultOnly = &v - return s -} - -// SetEngine sets the Engine field's value. -func (s *DescribeCacheEngineVersionsInput) SetEngine(v string) *DescribeCacheEngineVersionsInput { - s.Engine = &v - return s -} - -// SetEngineVersion sets the EngineVersion field's value. -func (s *DescribeCacheEngineVersionsInput) SetEngineVersion(v string) *DescribeCacheEngineVersionsInput { - s.EngineVersion = &v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *DescribeGlobalReplicationGroupsInput) SetGlobalReplicationGroupId(v string) *DescribeGlobalReplicationGroupsInput { + s.GlobalReplicationGroupId = &v return s } // SetMarker sets the Marker field's value. -func (s *DescribeCacheEngineVersionsInput) SetMarker(v string) *DescribeCacheEngineVersionsInput { +func (s *DescribeGlobalReplicationGroupsInput) SetMarker(v string) *DescribeGlobalReplicationGroupsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeCacheEngineVersionsInput) SetMaxRecords(v int64) *DescribeCacheEngineVersionsInput { +func (s *DescribeGlobalReplicationGroupsInput) SetMaxRecords(v int64) *DescribeGlobalReplicationGroupsInput { s.MaxRecords = &v return s } -// Represents the output of a DescribeCacheEngineVersions operation. -type DescribeCacheEngineVersionsOutput struct { +// SetShowMemberInfo sets the ShowMemberInfo field's value. +func (s *DescribeGlobalReplicationGroupsInput) SetShowMemberInfo(v bool) *DescribeGlobalReplicationGroupsInput { + s.ShowMemberInfo = &v + return s +} + +type DescribeGlobalReplicationGroupsOutput struct { _ struct{} `type:"structure"` - // A list of cache engine version details. Each element in the list contains - // detailed information about one cache engine version. - CacheEngineVersions []*CacheEngineVersion `locationNameList:"CacheEngineVersion" type:"list"` + // Indicates the slot configuration and global identifier for each slice group. + GlobalReplicationGroups []*GlobalReplicationGroup `locationNameList:"GlobalReplicationGroup" type:"list"` - // Provides an identifier to allow retrieval of paginated results. + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + // > Marker *string `type:"string"` } // String returns the string representation -func (s DescribeCacheEngineVersionsOutput) String() string { +func (s DescribeGlobalReplicationGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheEngineVersionsOutput) GoString() string { +func (s DescribeGlobalReplicationGroupsOutput) GoString() string { return s.String() } -// SetCacheEngineVersions sets the CacheEngineVersions field's value. -func (s *DescribeCacheEngineVersionsOutput) SetCacheEngineVersions(v []*CacheEngineVersion) *DescribeCacheEngineVersionsOutput { - s.CacheEngineVersions = v +// SetGlobalReplicationGroups sets the GlobalReplicationGroups field's value. +func (s *DescribeGlobalReplicationGroupsOutput) SetGlobalReplicationGroups(v []*GlobalReplicationGroup) *DescribeGlobalReplicationGroupsOutput { + s.GlobalReplicationGroups = v return s } // SetMarker sets the Marker field's value. -func (s *DescribeCacheEngineVersionsOutput) SetMarker(v string) *DescribeCacheEngineVersionsOutput { +func (s *DescribeGlobalReplicationGroupsOutput) SetMarker(v string) *DescribeGlobalReplicationGroupsOutput { s.Marker = &v return s } -// Represents the input of a DescribeCacheParameterGroups operation. -type DescribeCacheParameterGroupsInput struct { +// Represents the input of a DescribeReplicationGroups operation. +type DescribeReplicationGroupsInput struct { _ struct{} `type:"structure"` - // The name of a specific cache parameter group to return details for. - CacheParameterGroupName *string `type:"string"` - // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. @@ -9412,78 +12848,125 @@ type DescribeCacheParameterGroupsInput struct { // // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` + + // The identifier for the replication group to be described. This parameter + // is not case sensitive. + // + // If you do not specify this parameter, information about all replication groups + // is returned. + ReplicationGroupId *string `type:"string"` } // String returns the string representation -func (s DescribeCacheParameterGroupsInput) String() string { +func (s DescribeReplicationGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheParameterGroupsInput) GoString() string { +func (s DescribeReplicationGroupsInput) GoString() string { return s.String() } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *DescribeCacheParameterGroupsInput) SetCacheParameterGroupName(v string) *DescribeCacheParameterGroupsInput { - s.CacheParameterGroupName = &v - return s -} - // SetMarker sets the Marker field's value. -func (s *DescribeCacheParameterGroupsInput) SetMarker(v string) *DescribeCacheParameterGroupsInput { +func (s *DescribeReplicationGroupsInput) SetMarker(v string) *DescribeReplicationGroupsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeCacheParameterGroupsInput) SetMaxRecords(v int64) *DescribeCacheParameterGroupsInput { +func (s *DescribeReplicationGroupsInput) SetMaxRecords(v int64) *DescribeReplicationGroupsInput { s.MaxRecords = &v return s } -// Represents the output of a DescribeCacheParameterGroups operation. -type DescribeCacheParameterGroupsOutput struct { - _ struct{} `type:"structure"` +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *DescribeReplicationGroupsInput) SetReplicationGroupId(v string) *DescribeReplicationGroupsInput { + s.ReplicationGroupId = &v + return s +} - // A list of cache parameter groups. Each element in the list contains detailed - // information about one cache parameter group. - CacheParameterGroups []*CacheParameterGroup `locationNameList:"CacheParameterGroup" type:"list"` +// Represents the output of a DescribeReplicationGroups operation. +type DescribeReplicationGroupsOutput struct { + _ struct{} `type:"structure"` // Provides an identifier to allow retrieval of paginated results. Marker *string `type:"string"` + + // A list of replication groups. Each item in the list contains detailed information + // about one replication group. + ReplicationGroups []*ReplicationGroup `locationNameList:"ReplicationGroup" type:"list"` } // String returns the string representation -func (s DescribeCacheParameterGroupsOutput) String() string { +func (s DescribeReplicationGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheParameterGroupsOutput) GoString() string { +func (s DescribeReplicationGroupsOutput) GoString() string { return s.String() } -// SetCacheParameterGroups sets the CacheParameterGroups field's value. -func (s *DescribeCacheParameterGroupsOutput) SetCacheParameterGroups(v []*CacheParameterGroup) *DescribeCacheParameterGroupsOutput { - s.CacheParameterGroups = v +// SetMarker sets the Marker field's value. +func (s *DescribeReplicationGroupsOutput) SetMarker(v string) *DescribeReplicationGroupsOutput { + s.Marker = &v return s } -// SetMarker sets the Marker field's value. -func (s *DescribeCacheParameterGroupsOutput) SetMarker(v string) *DescribeCacheParameterGroupsOutput { - s.Marker = &v +// SetReplicationGroups sets the ReplicationGroups field's value. +func (s *DescribeReplicationGroupsOutput) SetReplicationGroups(v []*ReplicationGroup) *DescribeReplicationGroupsOutput { + s.ReplicationGroups = v return s } -// Represents the input of a DescribeCacheParameters operation. -type DescribeCacheParametersInput struct { +// Represents the input of a DescribeReservedCacheNodes operation. +type DescribeReservedCacheNodesInput struct { _ struct{} `type:"structure"` - // The name of a specific cache parameter group to return details for. + // The cache node type filter value. Use this parameter to show only those reservations + // matching the specified cache node type. // - // CacheParameterGroupName is a required field - CacheParameterGroupName *string `type:"string" required:"true"` + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // * All current generation instance types are created in Amazon VPC by default. + // + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. + CacheNodeType *string `type:"string"` + + // The duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for this duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response @@ -9499,108 +12982,131 @@ type DescribeCacheParametersInput struct { // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` - // The parameter types to return. + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. // - // Valid values: user | system | engine-default - Source *string `type:"string"` + // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only those + // reservations matching the specified product description. + ProductDescription *string `type:"string"` + + // The reserved cache node identifier filter value. Use this parameter to show + // only the reservation that matches the specified reservation ID. + ReservedCacheNodeId *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only purchased + // reservations matching the specified offering identifier. + ReservedCacheNodesOfferingId *string `type:"string"` } // String returns the string representation -func (s DescribeCacheParametersInput) String() string { +func (s DescribeReservedCacheNodesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheParametersInput) GoString() string { +func (s DescribeReservedCacheNodesInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeCacheParametersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeCacheParametersInput"} - if s.CacheParameterGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *DescribeReservedCacheNodesInput) SetCacheNodeType(v string) *DescribeReservedCacheNodesInput { + s.CacheNodeType = &v + return s } -// SetCacheParameterGroupName sets the CacheParameterGroupName field's value. -func (s *DescribeCacheParametersInput) SetCacheParameterGroupName(v string) *DescribeCacheParametersInput { - s.CacheParameterGroupName = &v +// SetDuration sets the Duration field's value. +func (s *DescribeReservedCacheNodesInput) SetDuration(v string) *DescribeReservedCacheNodesInput { + s.Duration = &v return s } // SetMarker sets the Marker field's value. -func (s *DescribeCacheParametersInput) SetMarker(v string) *DescribeCacheParametersInput { +func (s *DescribeReservedCacheNodesInput) SetMarker(v string) *DescribeReservedCacheNodesInput { s.Marker = &v - return s -} - -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeCacheParametersInput) SetMaxRecords(v int64) *DescribeCacheParametersInput { - s.MaxRecords = &v - return s -} - -// SetSource sets the Source field's value. -func (s *DescribeCacheParametersInput) SetSource(v string) *DescribeCacheParametersInput { - s.Source = &v - return s -} - -// Represents the output of a DescribeCacheParameters operation. -type DescribeCacheParametersOutput struct { - _ struct{} `type:"structure"` - - // A list of parameters specific to a particular cache node type. Each element - // in the list contains detailed information about one parameter. - CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` - - // Provides an identifier to allow retrieval of paginated results. - Marker *string `type:"string"` - - // A list of Parameter instances. - Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + return s } -// String returns the string representation -func (s DescribeCacheParametersOutput) String() string { - return awsutil.Prettify(s) +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeReservedCacheNodesInput) SetMaxRecords(v int64) *DescribeReservedCacheNodesInput { + s.MaxRecords = &v + return s } -// GoString returns the string representation -func (s DescribeCacheParametersOutput) GoString() string { - return s.String() +// SetOfferingType sets the OfferingType field's value. +func (s *DescribeReservedCacheNodesInput) SetOfferingType(v string) *DescribeReservedCacheNodesInput { + s.OfferingType = &v + return s } -// SetCacheNodeTypeSpecificParameters sets the CacheNodeTypeSpecificParameters field's value. -func (s *DescribeCacheParametersOutput) SetCacheNodeTypeSpecificParameters(v []*CacheNodeTypeSpecificParameter) *DescribeCacheParametersOutput { - s.CacheNodeTypeSpecificParameters = v +// SetProductDescription sets the ProductDescription field's value. +func (s *DescribeReservedCacheNodesInput) SetProductDescription(v string) *DescribeReservedCacheNodesInput { + s.ProductDescription = &v return s } -// SetMarker sets the Marker field's value. -func (s *DescribeCacheParametersOutput) SetMarker(v string) *DescribeCacheParametersOutput { - s.Marker = &v +// SetReservedCacheNodeId sets the ReservedCacheNodeId field's value. +func (s *DescribeReservedCacheNodesInput) SetReservedCacheNodeId(v string) *DescribeReservedCacheNodesInput { + s.ReservedCacheNodeId = &v return s } -// SetParameters sets the Parameters field's value. -func (s *DescribeCacheParametersOutput) SetParameters(v []*Parameter) *DescribeCacheParametersOutput { - s.Parameters = v +// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value. +func (s *DescribeReservedCacheNodesInput) SetReservedCacheNodesOfferingId(v string) *DescribeReservedCacheNodesInput { + s.ReservedCacheNodesOfferingId = &v return s } -// Represents the input of a DescribeCacheSecurityGroups operation. -type DescribeCacheSecurityGroupsInput struct { +// Represents the input of a DescribeReservedCacheNodesOfferings operation. +type DescribeReservedCacheNodesOfferingsInput struct { _ struct{} `type:"structure"` - // The name of the cache security group to return details for. - CacheSecurityGroupName *string `type:"string"` + // The cache node type filter value. Use this parameter to show only the available + // offerings matching the specified cache node type. + // + // The following node types are supported by ElastiCache. Generally speaking, + // the current generation types provide more memory and computational power + // at lower cost when compared to their equivalent previous generation counterparts. + // + // * General purpose: Current generation: M5 node types: cache.m5.large, + // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, + // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // * Compute optimized: Previous generation: (not recommended) C1 node types: + // cache.c1.xlarge + // + // * Memory optimized: Current generation: R5 node types: cache.r5.large, + // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, + // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, + // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: + // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, + // cache.r3.8xlarge + // + // Additional node type info + // + // * All current generation instance types are created in Amazon VPC by default. + // + // * Redis append-only files (AOF) are not supported for T1 or T2 instances. + // + // * Redis Multi-AZ with automatic failover is not supported on T1 instances. + // + // * Redis configuration variables appendonly and appendfsync are not supported + // on Redis version 2.8.22 and later. + CacheNodeType *string `type:"string"` + + // Duration filter value, specified in years or seconds. Use this parameter + // to show only reservations for a given duration. + // + // Valid Values: 1 | 3 | 31536000 | 94608000 + Duration *string `type:"string"` // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response @@ -9615,257 +13121,237 @@ type DescribeCacheSecurityGroupsInput struct { // // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` + + // The offering type filter value. Use this parameter to show only the available + // offerings matching the specified offering type. + // + // Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" + OfferingType *string `type:"string"` + + // The product description filter value. Use this parameter to show only the + // available offerings matching the specified product description. + ProductDescription *string `type:"string"` + + // The offering identifier filter value. Use this parameter to show only the + // available offering that matches the specified reservation identifier. + // + // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 + ReservedCacheNodesOfferingId *string `type:"string"` } // String returns the string representation -func (s DescribeCacheSecurityGroupsInput) String() string { +func (s DescribeReservedCacheNodesOfferingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheSecurityGroupsInput) GoString() string { +func (s DescribeReservedCacheNodesOfferingsInput) GoString() string { return s.String() } -// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value. -func (s *DescribeCacheSecurityGroupsInput) SetCacheSecurityGroupName(v string) *DescribeCacheSecurityGroupsInput { - s.CacheSecurityGroupName = &v +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *DescribeReservedCacheNodesOfferingsInput) SetCacheNodeType(v string) *DescribeReservedCacheNodesOfferingsInput { + s.CacheNodeType = &v + return s +} + +// SetDuration sets the Duration field's value. +func (s *DescribeReservedCacheNodesOfferingsInput) SetDuration(v string) *DescribeReservedCacheNodesOfferingsInput { + s.Duration = &v return s } // SetMarker sets the Marker field's value. -func (s *DescribeCacheSecurityGroupsInput) SetMarker(v string) *DescribeCacheSecurityGroupsInput { +func (s *DescribeReservedCacheNodesOfferingsInput) SetMarker(v string) *DescribeReservedCacheNodesOfferingsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeCacheSecurityGroupsInput) SetMaxRecords(v int64) *DescribeCacheSecurityGroupsInput { +func (s *DescribeReservedCacheNodesOfferingsInput) SetMaxRecords(v int64) *DescribeReservedCacheNodesOfferingsInput { s.MaxRecords = &v return s } -// Represents the output of a DescribeCacheSecurityGroups operation. -type DescribeCacheSecurityGroupsOutput struct { - _ struct{} `type:"structure"` - - // A list of cache security groups. Each element in the list contains detailed - // information about one group. - CacheSecurityGroups []*CacheSecurityGroup `locationNameList:"CacheSecurityGroup" type:"list"` - - // Provides an identifier to allow retrieval of paginated results. - Marker *string `type:"string"` -} - -// String returns the string representation -func (s DescribeCacheSecurityGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeCacheSecurityGroupsOutput) GoString() string { - return s.String() +// SetOfferingType sets the OfferingType field's value. +func (s *DescribeReservedCacheNodesOfferingsInput) SetOfferingType(v string) *DescribeReservedCacheNodesOfferingsInput { + s.OfferingType = &v + return s } -// SetCacheSecurityGroups sets the CacheSecurityGroups field's value. -func (s *DescribeCacheSecurityGroupsOutput) SetCacheSecurityGroups(v []*CacheSecurityGroup) *DescribeCacheSecurityGroupsOutput { - s.CacheSecurityGroups = v +// SetProductDescription sets the ProductDescription field's value. +func (s *DescribeReservedCacheNodesOfferingsInput) SetProductDescription(v string) *DescribeReservedCacheNodesOfferingsInput { + s.ProductDescription = &v return s } -// SetMarker sets the Marker field's value. -func (s *DescribeCacheSecurityGroupsOutput) SetMarker(v string) *DescribeCacheSecurityGroupsOutput { - s.Marker = &v +// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value. +func (s *DescribeReservedCacheNodesOfferingsInput) SetReservedCacheNodesOfferingId(v string) *DescribeReservedCacheNodesOfferingsInput { + s.ReservedCacheNodesOfferingId = &v return s } -// Represents the input of a DescribeCacheSubnetGroups operation. -type DescribeCacheSubnetGroupsInput struct { +// Represents the output of a DescribeReservedCacheNodesOfferings operation. +type DescribeReservedCacheNodesOfferingsOutput struct { _ struct{} `type:"structure"` - // The name of the cache subnet group to return details for. - CacheSubnetGroupName *string `type:"string"` - - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. + // Provides an identifier to allow retrieval of paginated results. Marker *string `type:"string"` - // The maximum number of records to include in the response. If more records - // exist than the specified MaxRecords value, a marker is included in the response - // so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: minimum 20; maximum 100. - MaxRecords *int64 `type:"integer"` + // A list of reserved cache node offerings. Each element in the list contains + // detailed information about one offering. + ReservedCacheNodesOfferings []*ReservedCacheNodesOffering `locationNameList:"ReservedCacheNodesOffering" type:"list"` } // String returns the string representation -func (s DescribeCacheSubnetGroupsInput) String() string { +func (s DescribeReservedCacheNodesOfferingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheSubnetGroupsInput) GoString() string { +func (s DescribeReservedCacheNodesOfferingsOutput) GoString() string { return s.String() } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *DescribeCacheSubnetGroupsInput) SetCacheSubnetGroupName(v string) *DescribeCacheSubnetGroupsInput { - s.CacheSubnetGroupName = &v - return s -} - // SetMarker sets the Marker field's value. -func (s *DescribeCacheSubnetGroupsInput) SetMarker(v string) *DescribeCacheSubnetGroupsInput { +func (s *DescribeReservedCacheNodesOfferingsOutput) SetMarker(v string) *DescribeReservedCacheNodesOfferingsOutput { s.Marker = &v return s } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeCacheSubnetGroupsInput) SetMaxRecords(v int64) *DescribeCacheSubnetGroupsInput { - s.MaxRecords = &v +// SetReservedCacheNodesOfferings sets the ReservedCacheNodesOfferings field's value. +func (s *DescribeReservedCacheNodesOfferingsOutput) SetReservedCacheNodesOfferings(v []*ReservedCacheNodesOffering) *DescribeReservedCacheNodesOfferingsOutput { + s.ReservedCacheNodesOfferings = v return s } -// Represents the output of a DescribeCacheSubnetGroups operation. -type DescribeCacheSubnetGroupsOutput struct { +// Represents the output of a DescribeReservedCacheNodes operation. +type DescribeReservedCacheNodesOutput struct { _ struct{} `type:"structure"` - // A list of cache subnet groups. Each element in the list contains detailed - // information about one group. - CacheSubnetGroups []*CacheSubnetGroup `locationNameList:"CacheSubnetGroup" type:"list"` - // Provides an identifier to allow retrieval of paginated results. Marker *string `type:"string"` + + // A list of reserved cache nodes. Each element in the list contains detailed + // information about one node. + ReservedCacheNodes []*ReservedCacheNode `locationNameList:"ReservedCacheNode" type:"list"` } // String returns the string representation -func (s DescribeCacheSubnetGroupsOutput) String() string { +func (s DescribeReservedCacheNodesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeCacheSubnetGroupsOutput) GoString() string { +func (s DescribeReservedCacheNodesOutput) GoString() string { return s.String() } -// SetCacheSubnetGroups sets the CacheSubnetGroups field's value. -func (s *DescribeCacheSubnetGroupsOutput) SetCacheSubnetGroups(v []*CacheSubnetGroup) *DescribeCacheSubnetGroupsOutput { - s.CacheSubnetGroups = v +// SetMarker sets the Marker field's value. +func (s *DescribeReservedCacheNodesOutput) SetMarker(v string) *DescribeReservedCacheNodesOutput { + s.Marker = &v return s } -// SetMarker sets the Marker field's value. -func (s *DescribeCacheSubnetGroupsOutput) SetMarker(v string) *DescribeCacheSubnetGroupsOutput { - s.Marker = &v +// SetReservedCacheNodes sets the ReservedCacheNodes field's value. +func (s *DescribeReservedCacheNodesOutput) SetReservedCacheNodes(v []*ReservedCacheNode) *DescribeReservedCacheNodesOutput { + s.ReservedCacheNodes = v return s } -// Represents the input of a DescribeEngineDefaultParameters operation. -type DescribeEngineDefaultParametersInput struct { +type DescribeServiceUpdatesInput struct { _ struct{} `type:"structure"` - // The name of the cache parameter group family. - // - // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | - // - // CacheParameterGroupFamily is a required field - CacheParameterGroupFamily *string `type:"string" required:"true"` - // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` - // The maximum number of records to include in the response. If more records - // exist than the specified MaxRecords value, a marker is included in the response - // so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: minimum 20; maximum 100. + // The maximum number of records to include in the response MaxRecords *int64 `type:"integer"` -} - -// String returns the string representation -func (s DescribeEngineDefaultParametersInput) String() string { - return awsutil.Prettify(s) -} -// GoString returns the string representation -func (s DescribeEngineDefaultParametersInput) GoString() string { - return s.String() -} + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeEngineDefaultParametersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeEngineDefaultParametersInput"} - if s.CacheParameterGroupFamily == nil { - invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily")) - } + // The status of the service update + ServiceUpdateStatus []*string `type:"list"` +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// String returns the string representation +func (s DescribeServiceUpdatesInput) String() string { + return awsutil.Prettify(s) } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *DescribeEngineDefaultParametersInput) SetCacheParameterGroupFamily(v string) *DescribeEngineDefaultParametersInput { - s.CacheParameterGroupFamily = &v - return s +// GoString returns the string representation +func (s DescribeServiceUpdatesInput) GoString() string { + return s.String() } // SetMarker sets the Marker field's value. -func (s *DescribeEngineDefaultParametersInput) SetMarker(v string) *DescribeEngineDefaultParametersInput { +func (s *DescribeServiceUpdatesInput) SetMarker(v string) *DescribeServiceUpdatesInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeEngineDefaultParametersInput) SetMaxRecords(v int64) *DescribeEngineDefaultParametersInput { +func (s *DescribeServiceUpdatesInput) SetMaxRecords(v int64) *DescribeServiceUpdatesInput { s.MaxRecords = &v return s } -type DescribeEngineDefaultParametersOutput struct { +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *DescribeServiceUpdatesInput) SetServiceUpdateName(v string) *DescribeServiceUpdatesInput { + s.ServiceUpdateName = &v + return s +} + +// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. +func (s *DescribeServiceUpdatesInput) SetServiceUpdateStatus(v []*string) *DescribeServiceUpdatesInput { + s.ServiceUpdateStatus = v + return s +} + +type DescribeServiceUpdatesOutput struct { _ struct{} `type:"structure"` - // Represents the output of a DescribeEngineDefaultParameters operation. - EngineDefaults *EngineDefaults `type:"structure"` + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // A list of service updates + ServiceUpdates []*ServiceUpdate `locationNameList:"ServiceUpdate" type:"list"` } // String returns the string representation -func (s DescribeEngineDefaultParametersOutput) String() string { +func (s DescribeServiceUpdatesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEngineDefaultParametersOutput) GoString() string { +func (s DescribeServiceUpdatesOutput) GoString() string { return s.String() } -// SetEngineDefaults sets the EngineDefaults field's value. -func (s *DescribeEngineDefaultParametersOutput) SetEngineDefaults(v *EngineDefaults) *DescribeEngineDefaultParametersOutput { - s.EngineDefaults = v +// SetMarker sets the Marker field's value. +func (s *DescribeServiceUpdatesOutput) SetMarker(v string) *DescribeServiceUpdatesOutput { + s.Marker = &v return s } -// Represents the input of a DescribeEvents operation. -type DescribeEventsInput struct { - _ struct{} `type:"structure"` +// SetServiceUpdates sets the ServiceUpdates field's value. +func (s *DescribeServiceUpdatesOutput) SetServiceUpdates(v []*ServiceUpdate) *DescribeServiceUpdatesOutput { + s.ServiceUpdates = v + return s +} - // The number of minutes worth of events to retrieve. - Duration *int64 `type:"integer"` +// Represents the input of a DescribeSnapshotsMessage operation. +type DescribeSnapshotsInput struct { + _ struct{} `type:"structure"` - // The end of the time interval for which to retrieve events, specified in ISO - // 8601 format. - // - // Example: 2017-03-30T07:03:49.555Z - EndTime *time.Time `type:"timestamp"` + // A user-supplied cluster identifier. If this parameter is specified, only + // snapshots associated with that specific cluster are described. + CacheClusterId *string `type:"string"` // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response @@ -9876,1080 +13362,1289 @@ type DescribeEventsInput struct { // exist than the specified MaxRecords value, a marker is included in the response // so that the remaining results can be retrieved. // - // Default: 100 + // Default: 50 // - // Constraints: minimum 20; maximum 100. + // Constraints: minimum 20; maximum 50. MaxRecords *int64 `type:"integer"` - // The identifier of the event source for which events are returned. If not - // specified, all sources are included in the response. - SourceIdentifier *string `type:"string"` + // A user-supplied replication group identifier. If this parameter is specified, + // only snapshots associated with that specific replication group are described. + ReplicationGroupId *string `type:"string"` - // The event source to retrieve events for. If no value is specified, all events - // are returned. - SourceType *string `type:"string" enum:"SourceType"` + // A Boolean value which if true, the node group (shard) configuration is included + // in the snapshot description. + ShowNodeGroupConfig *bool `type:"boolean"` - // The beginning of the time interval to retrieve events for, specified in ISO - // 8601 format. - // - // Example: 2017-03-30T07:03:49.555Z - StartTime *time.Time `type:"timestamp"` + // A user-supplied name of the snapshot. If this parameter is specified, only + // this snapshot are described. + SnapshotName *string `type:"string"` + + // If set to system, the output shows snapshots that were automatically created + // by ElastiCache. If set to user the output shows snapshots that were manually + // created. If omitted, the output shows both automatically and manually created + // snapshots. + SnapshotSource *string `type:"string"` } // String returns the string representation -func (s DescribeEventsInput) String() string { +func (s DescribeSnapshotsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEventsInput) GoString() string { +func (s DescribeSnapshotsInput) GoString() string { return s.String() } -// SetDuration sets the Duration field's value. -func (s *DescribeEventsInput) SetDuration(v int64) *DescribeEventsInput { - s.Duration = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *DescribeEventsInput) SetEndTime(v time.Time) *DescribeEventsInput { - s.EndTime = &v +// SetCacheClusterId sets the CacheClusterId field's value. +func (s *DescribeSnapshotsInput) SetCacheClusterId(v string) *DescribeSnapshotsInput { + s.CacheClusterId = &v return s } // SetMarker sets the Marker field's value. -func (s *DescribeEventsInput) SetMarker(v string) *DescribeEventsInput { +func (s *DescribeSnapshotsInput) SetMarker(v string) *DescribeSnapshotsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeEventsInput) SetMaxRecords(v int64) *DescribeEventsInput { +func (s *DescribeSnapshotsInput) SetMaxRecords(v int64) *DescribeSnapshotsInput { s.MaxRecords = &v return s } -// SetSourceIdentifier sets the SourceIdentifier field's value. -func (s *DescribeEventsInput) SetSourceIdentifier(v string) *DescribeEventsInput { - s.SourceIdentifier = &v +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *DescribeSnapshotsInput) SetReplicationGroupId(v string) *DescribeSnapshotsInput { + s.ReplicationGroupId = &v return s } -// SetSourceType sets the SourceType field's value. -func (s *DescribeEventsInput) SetSourceType(v string) *DescribeEventsInput { - s.SourceType = &v +// SetShowNodeGroupConfig sets the ShowNodeGroupConfig field's value. +func (s *DescribeSnapshotsInput) SetShowNodeGroupConfig(v bool) *DescribeSnapshotsInput { + s.ShowNodeGroupConfig = &v return s } -// SetStartTime sets the StartTime field's value. -func (s *DescribeEventsInput) SetStartTime(v time.Time) *DescribeEventsInput { - s.StartTime = &v +// SetSnapshotName sets the SnapshotName field's value. +func (s *DescribeSnapshotsInput) SetSnapshotName(v string) *DescribeSnapshotsInput { + s.SnapshotName = &v return s } -// Represents the output of a DescribeEvents operation. -type DescribeEventsOutput struct { - _ struct{} `type:"structure"` +// SetSnapshotSource sets the SnapshotSource field's value. +func (s *DescribeSnapshotsInput) SetSnapshotSource(v string) *DescribeSnapshotsInput { + s.SnapshotSource = &v + return s +} - // A list of events. Each element in the list contains detailed information - // about one event. - Events []*Event `locationNameList:"Event" type:"list"` +// Represents the output of a DescribeSnapshots operation. +type DescribeSnapshotsOutput struct { + _ struct{} `type:"structure"` - // Provides an identifier to allow retrieval of paginated results. + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` + + // A list of snapshots. Each item in the list contains detailed information + // about one snapshot. + Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` } // String returns the string representation -func (s DescribeEventsOutput) String() string { +func (s DescribeSnapshotsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEventsOutput) GoString() string { +func (s DescribeSnapshotsOutput) GoString() string { return s.String() } -// SetEvents sets the Events field's value. -func (s *DescribeEventsOutput) SetEvents(v []*Event) *DescribeEventsOutput { - s.Events = v +// SetMarker sets the Marker field's value. +func (s *DescribeSnapshotsOutput) SetMarker(v string) *DescribeSnapshotsOutput { + s.Marker = &v return s } -// SetMarker sets the Marker field's value. -func (s *DescribeEventsOutput) SetMarker(v string) *DescribeEventsOutput { - s.Marker = &v +// SetSnapshots sets the Snapshots field's value. +func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshotsOutput { + s.Snapshots = v return s } -// Represents the input of a DescribeReplicationGroups operation. -type DescribeReplicationGroupsInput struct { +type DescribeUpdateActionsInput struct { _ struct{} `type:"structure"` + // The cache cluster IDs + CacheClusterIds []*string `type:"list"` + + // The Elasticache engine to which the update applies. Either Redis or Memcached + Engine *string `type:"string"` + // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` - // The maximum number of records to include in the response. If more records - // exist than the specified MaxRecords value, a marker is included in the response - // so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: minimum 20; maximum 100. + // The maximum number of records to include in the response MaxRecords *int64 `type:"integer"` - // The identifier for the replication group to be described. This parameter - // is not case sensitive. - // - // If you do not specify this parameter, information about all replication groups - // is returned. - ReplicationGroupId *string `type:"string"` + // The replication group IDs + ReplicationGroupIds []*string `type:"list"` + + // The unique ID of the service update + ServiceUpdateName *string `type:"string"` + + // The status of the service update + ServiceUpdateStatus []*string `type:"list"` + + // The range of time specified to search for service updates that are in available + // status + ServiceUpdateTimeRange *TimeRangeFilter `type:"structure"` + + // Dictates whether to include node level update status in the response + ShowNodeLevelUpdateStatus *bool `type:"boolean"` + + // The status of the update action. + UpdateActionStatus []*string `type:"list"` +} + +// String returns the string representation +func (s DescribeUpdateActionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUpdateActionsInput) GoString() string { + return s.String() +} + +// SetCacheClusterIds sets the CacheClusterIds field's value. +func (s *DescribeUpdateActionsInput) SetCacheClusterIds(v []*string) *DescribeUpdateActionsInput { + s.CacheClusterIds = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *DescribeUpdateActionsInput) SetEngine(v string) *DescribeUpdateActionsInput { + s.Engine = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeUpdateActionsInput) SetMarker(v string) *DescribeUpdateActionsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeUpdateActionsInput) SetMaxRecords(v int64) *DescribeUpdateActionsInput { + s.MaxRecords = &v + return s +} + +// SetReplicationGroupIds sets the ReplicationGroupIds field's value. +func (s *DescribeUpdateActionsInput) SetReplicationGroupIds(v []*string) *DescribeUpdateActionsInput { + s.ReplicationGroupIds = v + return s } -// String returns the string representation -func (s DescribeReplicationGroupsInput) String() string { - return awsutil.Prettify(s) +// SetServiceUpdateName sets the ServiceUpdateName field's value. +func (s *DescribeUpdateActionsInput) SetServiceUpdateName(v string) *DescribeUpdateActionsInput { + s.ServiceUpdateName = &v + return s } -// GoString returns the string representation -func (s DescribeReplicationGroupsInput) GoString() string { - return s.String() +// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. +func (s *DescribeUpdateActionsInput) SetServiceUpdateStatus(v []*string) *DescribeUpdateActionsInput { + s.ServiceUpdateStatus = v + return s } -// SetMarker sets the Marker field's value. -func (s *DescribeReplicationGroupsInput) SetMarker(v string) *DescribeReplicationGroupsInput { - s.Marker = &v +// SetServiceUpdateTimeRange sets the ServiceUpdateTimeRange field's value. +func (s *DescribeUpdateActionsInput) SetServiceUpdateTimeRange(v *TimeRangeFilter) *DescribeUpdateActionsInput { + s.ServiceUpdateTimeRange = v return s } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeReplicationGroupsInput) SetMaxRecords(v int64) *DescribeReplicationGroupsInput { - s.MaxRecords = &v +// SetShowNodeLevelUpdateStatus sets the ShowNodeLevelUpdateStatus field's value. +func (s *DescribeUpdateActionsInput) SetShowNodeLevelUpdateStatus(v bool) *DescribeUpdateActionsInput { + s.ShowNodeLevelUpdateStatus = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *DescribeReplicationGroupsInput) SetReplicationGroupId(v string) *DescribeReplicationGroupsInput { - s.ReplicationGroupId = &v +// SetUpdateActionStatus sets the UpdateActionStatus field's value. +func (s *DescribeUpdateActionsInput) SetUpdateActionStatus(v []*string) *DescribeUpdateActionsInput { + s.UpdateActionStatus = v return s } -// Represents the output of a DescribeReplicationGroups operation. -type DescribeReplicationGroupsOutput struct { +type DescribeUpdateActionsOutput struct { _ struct{} `type:"structure"` - // Provides an identifier to allow retrieval of paginated results. + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` - // A list of replication groups. Each item in the list contains detailed information - // about one replication group. - ReplicationGroups []*ReplicationGroup `locationNameList:"ReplicationGroup" type:"list"` + // Returns a list of update actions + UpdateActions []*UpdateAction `locationNameList:"UpdateAction" type:"list"` } // String returns the string representation -func (s DescribeReplicationGroupsOutput) String() string { +func (s DescribeUpdateActionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReplicationGroupsOutput) GoString() string { +func (s DescribeUpdateActionsOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. -func (s *DescribeReplicationGroupsOutput) SetMarker(v string) *DescribeReplicationGroupsOutput { +func (s *DescribeUpdateActionsOutput) SetMarker(v string) *DescribeUpdateActionsOutput { s.Marker = &v return s } -// SetReplicationGroups sets the ReplicationGroups field's value. -func (s *DescribeReplicationGroupsOutput) SetReplicationGroups(v []*ReplicationGroup) *DescribeReplicationGroupsOutput { - s.ReplicationGroups = v +// SetUpdateActions sets the UpdateActions field's value. +func (s *DescribeUpdateActionsOutput) SetUpdateActions(v []*UpdateAction) *DescribeUpdateActionsOutput { + s.UpdateActions = v return s } -// Represents the input of a DescribeReservedCacheNodes operation. -type DescribeReservedCacheNodesInput struct { +type DescribeUserGroupsInput struct { _ struct{} `type:"structure"` - // The cache node type filter value. Use this parameter to show only those reservations - // matching the specified cache node type. - // - // The following node types are supported by ElastiCache. Generally speaking, - // the current generation types provide more memory and computational power - // at lower cost when compared to their equivalent previous generation counterparts. - // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge - // - // * Compute optimized: Previous generation: (not recommended) C1 node types: - // cache.c1.xlarge - // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge - // - // Additional node type info - // - // * All current generation instance types are created in Amazon VPC by default. - // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. - // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. - // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. - CacheNodeType *string `type:"string"` - - // The duration filter value, specified in years or seconds. Use this parameter - // to show only reservations for this duration. - // - // Valid Values: 1 | 3 | 31536000 | 94608000 - Duration *string `type:"string"` - // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. + // > Marker *string `type:"string"` // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a marker is included in the response // so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` - // The offering type filter value. Use this parameter to show only the available - // offerings matching the specified offering type. - // - // Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" - OfferingType *string `type:"string"` - - // The product description filter value. Use this parameter to show only those - // reservations matching the specified product description. - ProductDescription *string `type:"string"` - - // The reserved cache node identifier filter value. Use this parameter to show - // only the reservation that matches the specified reservation ID. - ReservedCacheNodeId *string `type:"string"` - - // The offering identifier filter value. Use this parameter to show only purchased - // reservations matching the specified offering identifier. - ReservedCacheNodesOfferingId *string `type:"string"` + // The ID of the user group. + UserGroupId *string `type:"string"` } // String returns the string representation -func (s DescribeReservedCacheNodesInput) String() string { +func (s DescribeUserGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReservedCacheNodesInput) GoString() string { +func (s DescribeUserGroupsInput) GoString() string { return s.String() } -// SetCacheNodeType sets the CacheNodeType field's value. -func (s *DescribeReservedCacheNodesInput) SetCacheNodeType(v string) *DescribeReservedCacheNodesInput { - s.CacheNodeType = &v - return s -} - -// SetDuration sets the Duration field's value. -func (s *DescribeReservedCacheNodesInput) SetDuration(v string) *DescribeReservedCacheNodesInput { - s.Duration = &v - return s -} - // SetMarker sets the Marker field's value. -func (s *DescribeReservedCacheNodesInput) SetMarker(v string) *DescribeReservedCacheNodesInput { +func (s *DescribeUserGroupsInput) SetMarker(v string) *DescribeUserGroupsInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeReservedCacheNodesInput) SetMaxRecords(v int64) *DescribeReservedCacheNodesInput { +func (s *DescribeUserGroupsInput) SetMaxRecords(v int64) *DescribeUserGroupsInput { s.MaxRecords = &v return s } -// SetOfferingType sets the OfferingType field's value. -func (s *DescribeReservedCacheNodesInput) SetOfferingType(v string) *DescribeReservedCacheNodesInput { - s.OfferingType = &v +// SetUserGroupId sets the UserGroupId field's value. +func (s *DescribeUserGroupsInput) SetUserGroupId(v string) *DescribeUserGroupsInput { + s.UserGroupId = &v return s } -// SetProductDescription sets the ProductDescription field's value. -func (s *DescribeReservedCacheNodesInput) SetProductDescription(v string) *DescribeReservedCacheNodesInput { - s.ProductDescription = &v - return s +type DescribeUserGroupsOutput struct { + _ struct{} `type:"structure"` + + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + // > + Marker *string `type:"string"` + + // Returns a list of user groups. + UserGroups []*UserGroup `type:"list"` } -// SetReservedCacheNodeId sets the ReservedCacheNodeId field's value. -func (s *DescribeReservedCacheNodesInput) SetReservedCacheNodeId(v string) *DescribeReservedCacheNodesInput { - s.ReservedCacheNodeId = &v +// String returns the string representation +func (s DescribeUserGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeUserGroupsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeUserGroupsOutput) SetMarker(v string) *DescribeUserGroupsOutput { + s.Marker = &v return s } -// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value. -func (s *DescribeReservedCacheNodesInput) SetReservedCacheNodesOfferingId(v string) *DescribeReservedCacheNodesInput { - s.ReservedCacheNodesOfferingId = &v +// SetUserGroups sets the UserGroups field's value. +func (s *DescribeUserGroupsOutput) SetUserGroups(v []*UserGroup) *DescribeUserGroupsOutput { + s.UserGroups = v return s } -// Represents the input of a DescribeReservedCacheNodesOfferings operation. -type DescribeReservedCacheNodesOfferingsInput struct { +type DescribeUsersInput struct { _ struct{} `type:"structure"` - // The cache node type filter value. Use this parameter to show only the available - // offerings matching the specified cache node type. - // - // The following node types are supported by ElastiCache. Generally speaking, - // the current generation types provide more memory and computational power - // at lower cost when compared to their equivalent previous generation counterparts. - // - // * General purpose: Current generation: M5 node types: cache.m5.large, - // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, - // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge - // - // * Compute optimized: Previous generation: (not recommended) C1 node types: - // cache.c1.xlarge - // - // * Memory optimized: Current generation: R5 node types: cache.r5.large, - // cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, - // cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, - // cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge Previous generation: - // (not recommended) M2 node types: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge - // R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge - // - // Additional node type info - // - // * All current generation instance types are created in Amazon VPC by default. - // - // * Redis append-only files (AOF) are not supported for T1 or T2 instances. - // - // * Redis Multi-AZ with automatic failover is not supported on T1 instances. - // - // * Redis configuration variables appendonly and appendfsync are not supported - // on Redis version 2.8.22 and later. - CacheNodeType *string `type:"string"` + // The Redis engine. + Engine *string `type:"string"` - // Duration filter value, specified in years or seconds. Use this parameter - // to show only reservations for a given duration. - // - // Valid Values: 1 | 3 | 31536000 | 94608000 - Duration *string `type:"string"` + // Filter to determine the list of User IDs to return. + Filters []*Filter `type:"list"` // An optional marker returned from a prior request. Use this marker for pagination // of results from this operation. If this parameter is specified, the response // includes only records beyond the marker, up to the value specified by MaxRecords. + // > Marker *string `type:"string"` // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a marker is included in the response // so that the remaining results can be retrieved. - // - // Default: 100 - // - // Constraints: minimum 20; maximum 100. MaxRecords *int64 `type:"integer"` - // The offering type filter value. Use this parameter to show only the available - // offerings matching the specified offering type. - // - // Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization" - OfferingType *string `type:"string"` - - // The product description filter value. Use this parameter to show only the - // available offerings matching the specified product description. - ProductDescription *string `type:"string"` - - // The offering identifier filter value. Use this parameter to show only the - // available offering that matches the specified reservation identifier. - // - // Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706 - ReservedCacheNodesOfferingId *string `type:"string"` + // The ID of the user. + UserId *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeReservedCacheNodesOfferingsInput) String() string { +func (s DescribeUsersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReservedCacheNodesOfferingsInput) GoString() string { +func (s DescribeUsersInput) GoString() string { return s.String() } -// SetCacheNodeType sets the CacheNodeType field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetCacheNodeType(v string) *DescribeReservedCacheNodesOfferingsInput { - s.CacheNodeType = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUsersInput"} + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEngine sets the Engine field's value. +func (s *DescribeUsersInput) SetEngine(v string) *DescribeUsersInput { + s.Engine = &v return s } -// SetDuration sets the Duration field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetDuration(v string) *DescribeReservedCacheNodesOfferingsInput { - s.Duration = &v +// SetFilters sets the Filters field's value. +func (s *DescribeUsersInput) SetFilters(v []*Filter) *DescribeUsersInput { + s.Filters = v return s } // SetMarker sets the Marker field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetMarker(v string) *DescribeReservedCacheNodesOfferingsInput { +func (s *DescribeUsersInput) SetMarker(v string) *DescribeUsersInput { s.Marker = &v return s } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetMaxRecords(v int64) *DescribeReservedCacheNodesOfferingsInput { +func (s *DescribeUsersInput) SetMaxRecords(v int64) *DescribeUsersInput { s.MaxRecords = &v return s } -// SetOfferingType sets the OfferingType field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetOfferingType(v string) *DescribeReservedCacheNodesOfferingsInput { - s.OfferingType = &v - return s -} - -// SetProductDescription sets the ProductDescription field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetProductDescription(v string) *DescribeReservedCacheNodesOfferingsInput { - s.ProductDescription = &v - return s -} - -// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value. -func (s *DescribeReservedCacheNodesOfferingsInput) SetReservedCacheNodesOfferingId(v string) *DescribeReservedCacheNodesOfferingsInput { - s.ReservedCacheNodesOfferingId = &v +// SetUserId sets the UserId field's value. +func (s *DescribeUsersInput) SetUserId(v string) *DescribeUsersInput { + s.UserId = &v return s } -// Represents the output of a DescribeReservedCacheNodesOfferings operation. -type DescribeReservedCacheNodesOfferingsOutput struct { +type DescribeUsersOutput struct { _ struct{} `type:"structure"` - // Provides an identifier to allow retrieval of paginated results. + // An optional marker returned from a prior request. Use this marker for pagination + // of results from this operation. If this parameter is specified, the response + // includes only records beyond the marker, up to the value specified by MaxRecords. + // > Marker *string `type:"string"` - // A list of reserved cache node offerings. Each element in the list contains - // detailed information about one offering. - ReservedCacheNodesOfferings []*ReservedCacheNodesOffering `locationNameList:"ReservedCacheNodesOffering" type:"list"` + // A list of users. + Users []*User `type:"list"` } // String returns the string representation -func (s DescribeReservedCacheNodesOfferingsOutput) String() string { +func (s DescribeUsersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReservedCacheNodesOfferingsOutput) GoString() string { +func (s DescribeUsersOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. -func (s *DescribeReservedCacheNodesOfferingsOutput) SetMarker(v string) *DescribeReservedCacheNodesOfferingsOutput { +func (s *DescribeUsersOutput) SetMarker(v string) *DescribeUsersOutput { s.Marker = &v return s } -// SetReservedCacheNodesOfferings sets the ReservedCacheNodesOfferings field's value. -func (s *DescribeReservedCacheNodesOfferingsOutput) SetReservedCacheNodesOfferings(v []*ReservedCacheNodesOffering) *DescribeReservedCacheNodesOfferingsOutput { - s.ReservedCacheNodesOfferings = v +// SetUsers sets the Users field's value. +func (s *DescribeUsersOutput) SetUsers(v []*User) *DescribeUsersOutput { + s.Users = v return s } -// Represents the output of a DescribeReservedCacheNodes operation. -type DescribeReservedCacheNodesOutput struct { +type DisassociateGlobalReplicationGroupInput struct { _ struct{} `type:"structure"` - // Provides an identifier to allow retrieval of paginated results. - Marker *string `type:"string"` + // The name of the Global Datastore + // + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` - // A list of reserved cache nodes. Each element in the list contains detailed - // information about one node. - ReservedCacheNodes []*ReservedCacheNode `locationNameList:"ReservedCacheNode" type:"list"` + // The name of the secondary cluster you wish to remove from the Global Datastore + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` + + // The AWS region of secondary cluster you wish to remove from the Global Datastore + // + // ReplicationGroupRegion is a required field + ReplicationGroupRegion *string `type:"string" required:"true"` } // String returns the string representation -func (s DescribeReservedCacheNodesOutput) String() string { +func (s DisassociateGlobalReplicationGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeReservedCacheNodesOutput) GoString() string { +func (s DisassociateGlobalReplicationGroupInput) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *DescribeReservedCacheNodesOutput) SetMarker(v string) *DescribeReservedCacheNodesOutput { - s.Marker = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateGlobalReplicationGroupInput"} + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + if s.ReplicationGroupRegion == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupRegion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetReservedCacheNodes sets the ReservedCacheNodes field's value. -func (s *DescribeReservedCacheNodesOutput) SetReservedCacheNodes(v []*ReservedCacheNode) *DescribeReservedCacheNodesOutput { - s.ReservedCacheNodes = v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *DisassociateGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *DisassociateGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v return s } -type DescribeServiceUpdatesInput struct { - _ struct{} `type:"structure"` - - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. - Marker *string `type:"string"` +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *DisassociateGlobalReplicationGroupInput) SetReplicationGroupId(v string) *DisassociateGlobalReplicationGroupInput { + s.ReplicationGroupId = &v + return s +} - // The maximum number of records to include in the response - MaxRecords *int64 `type:"integer"` +// SetReplicationGroupRegion sets the ReplicationGroupRegion field's value. +func (s *DisassociateGlobalReplicationGroupInput) SetReplicationGroupRegion(v string) *DisassociateGlobalReplicationGroupInput { + s.ReplicationGroupRegion = &v + return s +} - // The unique ID of the service update - ServiceUpdateName *string `type:"string"` +type DisassociateGlobalReplicationGroupOutput struct { + _ struct{} `type:"structure"` - // The status of the service update - ServiceUpdateStatus []*string `type:"list"` + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. + // + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` } // String returns the string representation -func (s DescribeServiceUpdatesInput) String() string { +func (s DisassociateGlobalReplicationGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeServiceUpdatesInput) GoString() string { +func (s DisassociateGlobalReplicationGroupOutput) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *DescribeServiceUpdatesInput) SetMarker(v string) *DescribeServiceUpdatesInput { - s.Marker = &v +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *DisassociateGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *DisassociateGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v return s } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeServiceUpdatesInput) SetMaxRecords(v int64) *DescribeServiceUpdatesInput { - s.MaxRecords = &v +// Provides ownership and status information for an Amazon EC2 security group. +type EC2SecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the Amazon EC2 security group. + EC2SecurityGroupName *string `type:"string"` + + // The AWS account ID of the Amazon EC2 security group owner. + EC2SecurityGroupOwnerId *string `type:"string"` + + // The status of the Amazon EC2 security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s EC2SecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EC2SecurityGroup) GoString() string { + return s.String() +} + +// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value. +func (s *EC2SecurityGroup) SetEC2SecurityGroupName(v string) *EC2SecurityGroup { + s.EC2SecurityGroupName = &v return s } -// SetServiceUpdateName sets the ServiceUpdateName field's value. -func (s *DescribeServiceUpdatesInput) SetServiceUpdateName(v string) *DescribeServiceUpdatesInput { - s.ServiceUpdateName = &v +// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value. +func (s *EC2SecurityGroup) SetEC2SecurityGroupOwnerId(v string) *EC2SecurityGroup { + s.EC2SecurityGroupOwnerId = &v return s } -// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. -func (s *DescribeServiceUpdatesInput) SetServiceUpdateStatus(v []*string) *DescribeServiceUpdatesInput { - s.ServiceUpdateStatus = v +// SetStatus sets the Status field's value. +func (s *EC2SecurityGroup) SetStatus(v string) *EC2SecurityGroup { + s.Status = &v return s } -type DescribeServiceUpdatesOutput struct { +// Represents the information required for client programs to connect to a cache +// node. +type Endpoint struct { _ struct{} `type:"structure"` - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. - Marker *string `type:"string"` + // The DNS hostname of the cache node. + Address *string `type:"string"` - // A list of service updates - ServiceUpdates []*ServiceUpdate `locationNameList:"ServiceUpdate" type:"list"` + // The port number that the cache engine is listening on. + Port *int64 `type:"integer"` } // String returns the string representation -func (s DescribeServiceUpdatesOutput) String() string { +func (s Endpoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeServiceUpdatesOutput) GoString() string { +func (s Endpoint) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *DescribeServiceUpdatesOutput) SetMarker(v string) *DescribeServiceUpdatesOutput { - s.Marker = &v +// SetAddress sets the Address field's value. +func (s *Endpoint) SetAddress(v string) *Endpoint { + s.Address = &v return s } -// SetServiceUpdates sets the ServiceUpdates field's value. -func (s *DescribeServiceUpdatesOutput) SetServiceUpdates(v []*ServiceUpdate) *DescribeServiceUpdatesOutput { - s.ServiceUpdates = v +// SetPort sets the Port field's value. +func (s *Endpoint) SetPort(v int64) *Endpoint { + s.Port = &v return s } -// Represents the input of a DescribeSnapshotsMessage operation. -type DescribeSnapshotsInput struct { +// Represents the output of a DescribeEngineDefaultParameters operation. +type EngineDefaults struct { _ struct{} `type:"structure"` - // A user-supplied cluster identifier. If this parameter is specified, only - // snapshots associated with that specific cluster are described. - CacheClusterId *string `type:"string"` - - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. - Marker *string `type:"string"` + // A list of parameters specific to a particular cache node type. Each element + // in the list contains detailed information about one parameter. + CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` - // The maximum number of records to include in the response. If more records - // exist than the specified MaxRecords value, a marker is included in the response - // so that the remaining results can be retrieved. - // - // Default: 50 + // Specifies the name of the cache parameter group family to which the engine + // default parameters apply. // - // Constraints: minimum 20; maximum 50. - MaxRecords *int64 `type:"integer"` - - // A user-supplied replication group identifier. If this parameter is specified, - // only snapshots associated with that specific replication group are described. - ReplicationGroupId *string `type:"string"` - - // A Boolean value which if true, the node group (shard) configuration is included - // in the snapshot description. - ShowNodeGroupConfig *bool `type:"boolean"` + // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 + // | redis4.0 | redis5.0 | + CacheParameterGroupFamily *string `type:"string"` - // A user-supplied name of the snapshot. If this parameter is specified, only - // this snapshot are described. - SnapshotName *string `type:"string"` + // Provides an identifier to allow retrieval of paginated results. + Marker *string `type:"string"` - // If set to system, the output shows snapshots that were automatically created - // by ElastiCache. If set to user the output shows snapshots that were manually - // created. If omitted, the output shows both automatically and manually created - // snapshots. - SnapshotSource *string `type:"string"` + // Contains a list of engine default parameters. + Parameters []*Parameter `locationNameList:"Parameter" type:"list"` } // String returns the string representation -func (s DescribeSnapshotsInput) String() string { +func (s EngineDefaults) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeSnapshotsInput) GoString() string { +func (s EngineDefaults) GoString() string { return s.String() } -// SetCacheClusterId sets the CacheClusterId field's value. -func (s *DescribeSnapshotsInput) SetCacheClusterId(v string) *DescribeSnapshotsInput { - s.CacheClusterId = &v +// SetCacheNodeTypeSpecificParameters sets the CacheNodeTypeSpecificParameters field's value. +func (s *EngineDefaults) SetCacheNodeTypeSpecificParameters(v []*CacheNodeTypeSpecificParameter) *EngineDefaults { + s.CacheNodeTypeSpecificParameters = v + return s +} + +// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. +func (s *EngineDefaults) SetCacheParameterGroupFamily(v string) *EngineDefaults { + s.CacheParameterGroupFamily = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *EngineDefaults) SetMarker(v string) *EngineDefaults { + s.Marker = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *EngineDefaults) SetParameters(v []*Parameter) *EngineDefaults { + s.Parameters = v return s } -// SetMarker sets the Marker field's value. -func (s *DescribeSnapshotsInput) SetMarker(v string) *DescribeSnapshotsInput { - s.Marker = &v - return s +// Represents a single occurrence of something interesting within the system. +// Some examples of events are creating a cluster, adding or removing a cache +// node, or rebooting a node. +type Event struct { + _ struct{} `type:"structure"` + + // The date and time when the event occurred. + Date *time.Time `type:"timestamp"` + + // The text of the event. + Message *string `type:"string"` + + // The identifier for the source of the event. For example, if the event occurred + // at the cluster level, the identifier would be the name of the cluster. + SourceIdentifier *string `type:"string"` + + // Specifies the origin of this event - a cluster, a parameter group, a security + // group, etc. + SourceType *string `type:"string" enum:"SourceType"` +} + +// String returns the string representation +func (s Event) String() string { + return awsutil.Prettify(s) } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeSnapshotsInput) SetMaxRecords(v int64) *DescribeSnapshotsInput { - s.MaxRecords = &v - return s +// GoString returns the string representation +func (s Event) GoString() string { + return s.String() } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *DescribeSnapshotsInput) SetReplicationGroupId(v string) *DescribeSnapshotsInput { - s.ReplicationGroupId = &v +// SetDate sets the Date field's value. +func (s *Event) SetDate(v time.Time) *Event { + s.Date = &v return s } -// SetShowNodeGroupConfig sets the ShowNodeGroupConfig field's value. -func (s *DescribeSnapshotsInput) SetShowNodeGroupConfig(v bool) *DescribeSnapshotsInput { - s.ShowNodeGroupConfig = &v +// SetMessage sets the Message field's value. +func (s *Event) SetMessage(v string) *Event { + s.Message = &v return s } -// SetSnapshotName sets the SnapshotName field's value. -func (s *DescribeSnapshotsInput) SetSnapshotName(v string) *DescribeSnapshotsInput { - s.SnapshotName = &v +// SetSourceIdentifier sets the SourceIdentifier field's value. +func (s *Event) SetSourceIdentifier(v string) *Event { + s.SourceIdentifier = &v return s } -// SetSnapshotSource sets the SnapshotSource field's value. -func (s *DescribeSnapshotsInput) SetSnapshotSource(v string) *DescribeSnapshotsInput { - s.SnapshotSource = &v +// SetSourceType sets the SourceType field's value. +func (s *Event) SetSourceType(v string) *Event { + s.SourceType = &v return s } -// Represents the output of a DescribeSnapshots operation. -type DescribeSnapshotsOutput struct { +type FailoverGlobalReplicationGroupInput struct { _ struct{} `type:"structure"` - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. - Marker *string `type:"string"` + // The name of the Global Datastore + // + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` - // A list of snapshots. Each item in the list contains detailed information - // about one snapshot. - Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"` + // The AWS region of the primary cluster of the Global Datastore + // + // PrimaryRegion is a required field + PrimaryRegion *string `type:"string" required:"true"` + + // The name of the primary replication group + // + // PrimaryReplicationGroupId is a required field + PrimaryReplicationGroupId *string `type:"string" required:"true"` } // String returns the string representation -func (s DescribeSnapshotsOutput) String() string { +func (s FailoverGlobalReplicationGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeSnapshotsOutput) GoString() string { +func (s FailoverGlobalReplicationGroupInput) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *DescribeSnapshotsOutput) SetMarker(v string) *DescribeSnapshotsOutput { - s.Marker = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *FailoverGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FailoverGlobalReplicationGroupInput"} + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + if s.PrimaryRegion == nil { + invalidParams.Add(request.NewErrParamRequired("PrimaryRegion")) + } + if s.PrimaryReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("PrimaryReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSnapshots sets the Snapshots field's value. -func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshotsOutput { - s.Snapshots = v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *FailoverGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *FailoverGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v return s } -type DescribeUpdateActionsInput struct { - _ struct{} `type:"structure"` - - // The cache cluster IDs - CacheClusterIds []*string `type:"list"` +// SetPrimaryRegion sets the PrimaryRegion field's value. +func (s *FailoverGlobalReplicationGroupInput) SetPrimaryRegion(v string) *FailoverGlobalReplicationGroupInput { + s.PrimaryRegion = &v + return s +} - // The Elasticache engine to which the update applies. Either Redis or Memcached - Engine *string `type:"string"` +// SetPrimaryReplicationGroupId sets the PrimaryReplicationGroupId field's value. +func (s *FailoverGlobalReplicationGroupInput) SetPrimaryReplicationGroupId(v string) *FailoverGlobalReplicationGroupInput { + s.PrimaryReplicationGroupId = &v + return s +} - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. - Marker *string `type:"string"` +type FailoverGlobalReplicationGroupOutput struct { + _ struct{} `type:"structure"` - // The maximum number of records to include in the response - MaxRecords *int64 `type:"integer"` + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. + // + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` +} - // The replication group IDs - ReplicationGroupIds []*string `type:"list"` +// String returns the string representation +func (s FailoverGlobalReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} - // The unique ID of the service update - ServiceUpdateName *string `type:"string"` +// GoString returns the string representation +func (s FailoverGlobalReplicationGroupOutput) GoString() string { + return s.String() +} - // The status of the service update - ServiceUpdateStatus []*string `type:"list"` +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *FailoverGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *FailoverGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v + return s +} - // The range of time specified to search for service updates that are in available - // status - ServiceUpdateTimeRange *TimeRangeFilter `type:"structure"` +// Used to streamline results of a search based on the property being filtered. +type Filter struct { + _ struct{} `type:"structure"` - // Dictates whether to include node level update status in the response - ShowNodeLevelUpdateStatus *bool `type:"boolean"` + // The property being filtered. For example, UserId. + // + // Name is a required field + Name *string `type:"string" required:"true"` - // The status of the update action. - UpdateActionStatus []*string `type:"list"` + // The property values to filter on. For example, "user-123". + // + // Values is a required field + Values []*string `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s DescribeUpdateActionsInput) String() string { +func (s Filter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeUpdateActionsInput) GoString() string { +func (s Filter) GoString() string { return s.String() } -// SetCacheClusterIds sets the CacheClusterIds field's value. -func (s *DescribeUpdateActionsInput) SetCacheClusterIds(v []*string) *DescribeUpdateActionsInput { - s.CacheClusterIds = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } -// SetEngine sets the Engine field's value. -func (s *DescribeUpdateActionsInput) SetEngine(v string) *DescribeUpdateActionsInput { - s.Engine = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetMarker sets the Marker field's value. -func (s *DescribeUpdateActionsInput) SetMarker(v string) *DescribeUpdateActionsInput { - s.Marker = &v +// SetName sets the Name field's value. +func (s *Filter) SetName(v string) *Filter { + s.Name = &v return s } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeUpdateActionsInput) SetMaxRecords(v int64) *DescribeUpdateActionsInput { - s.MaxRecords = &v +// SetValues sets the Values field's value. +func (s *Filter) SetValues(v []*string) *Filter { + s.Values = v return s } -// SetReplicationGroupIds sets the ReplicationGroupIds field's value. -func (s *DescribeUpdateActionsInput) SetReplicationGroupIds(v []*string) *DescribeUpdateActionsInput { - s.ReplicationGroupIds = v - return s -} +// Indicates the slot configuration and global identifier for a slice group. +type GlobalNodeGroup struct { + _ struct{} `type:"structure"` -// SetServiceUpdateName sets the ServiceUpdateName field's value. -func (s *DescribeUpdateActionsInput) SetServiceUpdateName(v string) *DescribeUpdateActionsInput { - s.ServiceUpdateName = &v - return s + // The name of the global node group + GlobalNodeGroupId *string `type:"string"` + + // The keyspace for this node group + Slots *string `type:"string"` } -// SetServiceUpdateStatus sets the ServiceUpdateStatus field's value. -func (s *DescribeUpdateActionsInput) SetServiceUpdateStatus(v []*string) *DescribeUpdateActionsInput { - s.ServiceUpdateStatus = v - return s +// String returns the string representation +func (s GlobalNodeGroup) String() string { + return awsutil.Prettify(s) } -// SetServiceUpdateTimeRange sets the ServiceUpdateTimeRange field's value. -func (s *DescribeUpdateActionsInput) SetServiceUpdateTimeRange(v *TimeRangeFilter) *DescribeUpdateActionsInput { - s.ServiceUpdateTimeRange = v - return s +// GoString returns the string representation +func (s GlobalNodeGroup) GoString() string { + return s.String() } -// SetShowNodeLevelUpdateStatus sets the ShowNodeLevelUpdateStatus field's value. -func (s *DescribeUpdateActionsInput) SetShowNodeLevelUpdateStatus(v bool) *DescribeUpdateActionsInput { - s.ShowNodeLevelUpdateStatus = &v +// SetGlobalNodeGroupId sets the GlobalNodeGroupId field's value. +func (s *GlobalNodeGroup) SetGlobalNodeGroupId(v string) *GlobalNodeGroup { + s.GlobalNodeGroupId = &v return s } -// SetUpdateActionStatus sets the UpdateActionStatus field's value. -func (s *DescribeUpdateActionsInput) SetUpdateActionStatus(v []*string) *DescribeUpdateActionsInput { - s.UpdateActionStatus = v +// SetSlots sets the Slots field's value. +func (s *GlobalNodeGroup) SetSlots(v string) *GlobalNodeGroup { + s.Slots = &v return s } -type DescribeUpdateActionsOutput struct { +// Consists of a primary cluster that accepts writes and an associated secondary +// cluster that resides in a different AWS region. The secondary cluster accepts +// only reads. The primary cluster automatically replicates updates to the secondary +// cluster. +// +// * The GlobalReplicationGroupIdSuffix represents the name of the Global +// Datastore, which is what you use to associate a secondary cluster. +type GlobalReplicationGroup struct { _ struct{} `type:"structure"` - // An optional marker returned from a prior request. Use this marker for pagination - // of results from this operation. If this parameter is specified, the response - // includes only records beyond the marker, up to the value specified by MaxRecords. - Marker *string `type:"string"` + // The ARN (Amazon Resource Name) of the global replication group. + ARN *string `type:"string"` - // Returns a list of update actions - UpdateActions []*UpdateAction `locationNameList:"UpdateAction" type:"list"` + // A flag that enables encryption at rest when set to true. + // + // You cannot modify the value of AtRestEncryptionEnabled after the replication + // group is created. To enable encryption at rest on a replication group you + // must set AtRestEncryptionEnabled to true when you create the replication + // group. + // + // Required: Only available when creating a replication group in an Amazon VPC + // using redis version 3.2.6, 4.x or later. + AtRestEncryptionEnabled *bool `type:"boolean"` + + // A flag that enables using an AuthToken (password) when issuing Redis commands. + // + // Default: false + AuthTokenEnabled *bool `type:"boolean"` + + // The cache node type of the Global Datastore + CacheNodeType *string `type:"string"` + + // A flag that indicates whether the Global Datastore is cluster enabled. + ClusterEnabled *bool `type:"boolean"` + + // The Elasticache engine. For Redis only. + Engine *string `type:"string"` + + // The Elasticache Redis engine version. For preview, it is Redis version 5.0.5 + // only. + EngineVersion *string `type:"string"` + + // Indicates the slot configuration and global identifier for each slice group. + GlobalNodeGroups []*GlobalNodeGroup `locationNameList:"GlobalNodeGroup" type:"list"` + + // The optional description of the Global Datastore + GlobalReplicationGroupDescription *string `type:"string"` + + // The name of the Global Datastore + GlobalReplicationGroupId *string `type:"string"` + + // The replication groups that comprise the Global Datastore. + Members []*GlobalReplicationGroupMember `locationNameList:"GlobalReplicationGroupMember" type:"list"` + + // The status of the Global Datastore + Status *string `type:"string"` + + // A flag that enables in-transit encryption when set to true. You cannot modify + // the value of TransitEncryptionEnabled after the cluster is created. To enable + // in-transit encryption on a cluster you must set TransitEncryptionEnabled + // to true when you create a cluster. + TransitEncryptionEnabled *bool `type:"boolean"` } // String returns the string representation -func (s DescribeUpdateActionsOutput) String() string { +func (s GlobalReplicationGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeUpdateActionsOutput) GoString() string { +func (s GlobalReplicationGroup) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *DescribeUpdateActionsOutput) SetMarker(v string) *DescribeUpdateActionsOutput { - s.Marker = &v +// SetARN sets the ARN field's value. +func (s *GlobalReplicationGroup) SetARN(v string) *GlobalReplicationGroup { + s.ARN = &v return s } -// SetUpdateActions sets the UpdateActions field's value. -func (s *DescribeUpdateActionsOutput) SetUpdateActions(v []*UpdateAction) *DescribeUpdateActionsOutput { - s.UpdateActions = v +// SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. +func (s *GlobalReplicationGroup) SetAtRestEncryptionEnabled(v bool) *GlobalReplicationGroup { + s.AtRestEncryptionEnabled = &v return s } -// Provides ownership and status information for an Amazon EC2 security group. -type EC2SecurityGroup struct { - _ struct{} `type:"structure"` +// SetAuthTokenEnabled sets the AuthTokenEnabled field's value. +func (s *GlobalReplicationGroup) SetAuthTokenEnabled(v bool) *GlobalReplicationGroup { + s.AuthTokenEnabled = &v + return s +} - // The name of the Amazon EC2 security group. - EC2SecurityGroupName *string `type:"string"` +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *GlobalReplicationGroup) SetCacheNodeType(v string) *GlobalReplicationGroup { + s.CacheNodeType = &v + return s +} - // The AWS account ID of the Amazon EC2 security group owner. - EC2SecurityGroupOwnerId *string `type:"string"` +// SetClusterEnabled sets the ClusterEnabled field's value. +func (s *GlobalReplicationGroup) SetClusterEnabled(v bool) *GlobalReplicationGroup { + s.ClusterEnabled = &v + return s +} - // The status of the Amazon EC2 security group. - Status *string `type:"string"` +// SetEngine sets the Engine field's value. +func (s *GlobalReplicationGroup) SetEngine(v string) *GlobalReplicationGroup { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *GlobalReplicationGroup) SetEngineVersion(v string) *GlobalReplicationGroup { + s.EngineVersion = &v + return s } -// String returns the string representation -func (s EC2SecurityGroup) String() string { - return awsutil.Prettify(s) +// SetGlobalNodeGroups sets the GlobalNodeGroups field's value. +func (s *GlobalReplicationGroup) SetGlobalNodeGroups(v []*GlobalNodeGroup) *GlobalReplicationGroup { + s.GlobalNodeGroups = v + return s } -// GoString returns the string representation -func (s EC2SecurityGroup) GoString() string { - return s.String() +// SetGlobalReplicationGroupDescription sets the GlobalReplicationGroupDescription field's value. +func (s *GlobalReplicationGroup) SetGlobalReplicationGroupDescription(v string) *GlobalReplicationGroup { + s.GlobalReplicationGroupDescription = &v + return s } -// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value. -func (s *EC2SecurityGroup) SetEC2SecurityGroupName(v string) *EC2SecurityGroup { - s.EC2SecurityGroupName = &v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *GlobalReplicationGroup) SetGlobalReplicationGroupId(v string) *GlobalReplicationGroup { + s.GlobalReplicationGroupId = &v return s } -// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value. -func (s *EC2SecurityGroup) SetEC2SecurityGroupOwnerId(v string) *EC2SecurityGroup { - s.EC2SecurityGroupOwnerId = &v +// SetMembers sets the Members field's value. +func (s *GlobalReplicationGroup) SetMembers(v []*GlobalReplicationGroupMember) *GlobalReplicationGroup { + s.Members = v return s } // SetStatus sets the Status field's value. -func (s *EC2SecurityGroup) SetStatus(v string) *EC2SecurityGroup { +func (s *GlobalReplicationGroup) SetStatus(v string) *GlobalReplicationGroup { s.Status = &v return s } -// Represents the information required for client programs to connect to a cache -// node. -type Endpoint struct { +// SetTransitEncryptionEnabled sets the TransitEncryptionEnabled field's value. +func (s *GlobalReplicationGroup) SetTransitEncryptionEnabled(v bool) *GlobalReplicationGroup { + s.TransitEncryptionEnabled = &v + return s +} + +// The name of the Global Datastore and role of this replication group in the +// Global Datastore. +type GlobalReplicationGroupInfo struct { _ struct{} `type:"structure"` - // The DNS hostname of the cache node. - Address *string `type:"string"` + // The name of the Global Datastore + GlobalReplicationGroupId *string `type:"string"` - // The port number that the cache engine is listening on. - Port *int64 `type:"integer"` + // The role of the replication group in a Global Datastore. Can be primary or + // secondary. + GlobalReplicationGroupMemberRole *string `type:"string"` } // String returns the string representation -func (s Endpoint) String() string { +func (s GlobalReplicationGroupInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Endpoint) GoString() string { +func (s GlobalReplicationGroupInfo) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *Endpoint) SetAddress(v string) *Endpoint { - s.Address = &v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *GlobalReplicationGroupInfo) SetGlobalReplicationGroupId(v string) *GlobalReplicationGroupInfo { + s.GlobalReplicationGroupId = &v return s } -// SetPort sets the Port field's value. -func (s *Endpoint) SetPort(v int64) *Endpoint { - s.Port = &v +// SetGlobalReplicationGroupMemberRole sets the GlobalReplicationGroupMemberRole field's value. +func (s *GlobalReplicationGroupInfo) SetGlobalReplicationGroupMemberRole(v string) *GlobalReplicationGroupInfo { + s.GlobalReplicationGroupMemberRole = &v return s } -// Represents the output of a DescribeEngineDefaultParameters operation. -type EngineDefaults struct { +// A member of a Global Datastore. It contains the Replication Group Id, the +// AWS region and the role of the replication group. +type GlobalReplicationGroupMember struct { _ struct{} `type:"structure"` - // A list of parameters specific to a particular cache node type. Each element - // in the list contains detailed information about one parameter. - CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"` + // Indicates whether automatic failover is enabled for the replication group. + AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` - // Specifies the name of the cache parameter group family to which the engine - // default parameters apply. - // - // Valid values are: memcached1.4 | memcached1.5 | redis2.6 | redis2.8 | redis3.2 - // | redis4.0 | redis5.0 | - CacheParameterGroupFamily *string `type:"string"` + // The replication group id of the Global Datastore member. + ReplicationGroupId *string `type:"string"` - // Provides an identifier to allow retrieval of paginated results. - Marker *string `type:"string"` + // The AWS region of the Global Datastore member. + ReplicationGroupRegion *string `type:"string"` - // Contains a list of engine default parameters. - Parameters []*Parameter `locationNameList:"Parameter" type:"list"` + // Indicates the role of the replication group, primary or secondary. + Role *string `type:"string"` + + // The status of the membership of the replication group. + Status *string `type:"string"` } // String returns the string representation -func (s EngineDefaults) String() string { +func (s GlobalReplicationGroupMember) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EngineDefaults) GoString() string { +func (s GlobalReplicationGroupMember) GoString() string { return s.String() } -// SetCacheNodeTypeSpecificParameters sets the CacheNodeTypeSpecificParameters field's value. -func (s *EngineDefaults) SetCacheNodeTypeSpecificParameters(v []*CacheNodeTypeSpecificParameter) *EngineDefaults { - s.CacheNodeTypeSpecificParameters = v +// SetAutomaticFailover sets the AutomaticFailover field's value. +func (s *GlobalReplicationGroupMember) SetAutomaticFailover(v string) *GlobalReplicationGroupMember { + s.AutomaticFailover = &v return s } -// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value. -func (s *EngineDefaults) SetCacheParameterGroupFamily(v string) *EngineDefaults { - s.CacheParameterGroupFamily = &v +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *GlobalReplicationGroupMember) SetReplicationGroupId(v string) *GlobalReplicationGroupMember { + s.ReplicationGroupId = &v return s } -// SetMarker sets the Marker field's value. -func (s *EngineDefaults) SetMarker(v string) *EngineDefaults { - s.Marker = &v +// SetReplicationGroupRegion sets the ReplicationGroupRegion field's value. +func (s *GlobalReplicationGroupMember) SetReplicationGroupRegion(v string) *GlobalReplicationGroupMember { + s.ReplicationGroupRegion = &v return s } -// SetParameters sets the Parameters field's value. -func (s *EngineDefaults) SetParameters(v []*Parameter) *EngineDefaults { - s.Parameters = v +// SetRole sets the Role field's value. +func (s *GlobalReplicationGroupMember) SetRole(v string) *GlobalReplicationGroupMember { + s.Role = &v return s } -// Represents a single occurrence of something interesting within the system. -// Some examples of events are creating a cluster, adding or removing a cache -// node, or rebooting a node. -type Event struct { +// SetStatus sets the Status field's value. +func (s *GlobalReplicationGroupMember) SetStatus(v string) *GlobalReplicationGroupMember { + s.Status = &v + return s +} + +type IncreaseNodeGroupsInGlobalReplicationGroupInput struct { _ struct{} `type:"structure"` - // The date and time when the event occurred. - Date *time.Time `type:"timestamp"` + // Indicates that the process begins immediately. At present, the only permitted + // value for this parameter is true. + // + // ApplyImmediately is a required field + ApplyImmediately *bool `type:"boolean" required:"true"` - // The text of the event. - Message *string `type:"string"` + // The name of the Global Datastore + // + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` - // The identifier for the source of the event. For example, if the event occurred - // at the cluster level, the identifier would be the name of the cluster. - SourceIdentifier *string `type:"string"` + // The number of node groups you wish to add + // + // NodeGroupCount is a required field + NodeGroupCount *int64 `type:"integer" required:"true"` - // Specifies the origin of this event - a cluster, a parameter group, a security - // group, etc. - SourceType *string `type:"string" enum:"SourceType"` + // Describes the replication group IDs, the AWS regions where they are stored + // and the shard configuration for each that comprise the Global Datastore + RegionalConfigurations []*RegionalConfiguration `locationNameList:"RegionalConfiguration" type:"list"` } // String returns the string representation -func (s Event) String() string { +func (s IncreaseNodeGroupsInGlobalReplicationGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Event) GoString() string { +func (s IncreaseNodeGroupsInGlobalReplicationGroupInput) GoString() string { return s.String() } -// SetDate sets the Date field's value. -func (s *Event) SetDate(v time.Time) *Event { - s.Date = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *IncreaseNodeGroupsInGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IncreaseNodeGroupsInGlobalReplicationGroupInput"} + if s.ApplyImmediately == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) + } + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + if s.NodeGroupCount == nil { + invalidParams.Add(request.NewErrParamRequired("NodeGroupCount")) + } + if s.RegionalConfigurations != nil { + for i, v := range s.RegionalConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RegionalConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *IncreaseNodeGroupsInGlobalReplicationGroupInput) SetApplyImmediately(v bool) *IncreaseNodeGroupsInGlobalReplicationGroupInput { + s.ApplyImmediately = &v return s } -// SetMessage sets the Message field's value. -func (s *Event) SetMessage(v string) *Event { - s.Message = &v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *IncreaseNodeGroupsInGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *IncreaseNodeGroupsInGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v return s } -// SetSourceIdentifier sets the SourceIdentifier field's value. -func (s *Event) SetSourceIdentifier(v string) *Event { - s.SourceIdentifier = &v +// SetNodeGroupCount sets the NodeGroupCount field's value. +func (s *IncreaseNodeGroupsInGlobalReplicationGroupInput) SetNodeGroupCount(v int64) *IncreaseNodeGroupsInGlobalReplicationGroupInput { + s.NodeGroupCount = &v return s } -// SetSourceType sets the SourceType field's value. -func (s *Event) SetSourceType(v string) *Event { - s.SourceType = &v +// SetRegionalConfigurations sets the RegionalConfigurations field's value. +func (s *IncreaseNodeGroupsInGlobalReplicationGroupInput) SetRegionalConfigurations(v []*RegionalConfiguration) *IncreaseNodeGroupsInGlobalReplicationGroupInput { + s.RegionalConfigurations = v + return s +} + +type IncreaseNodeGroupsInGlobalReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. + // + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s IncreaseNodeGroupsInGlobalReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IncreaseNodeGroupsInGlobalReplicationGroupOutput) GoString() string { + return s.String() +} + +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *IncreaseNodeGroupsInGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *IncreaseNodeGroupsInGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v return s } @@ -11111,11 +14806,9 @@ type ListAllowedNodeTypeModificationsOutput struct { _ struct{} `type:"structure"` // A string list, each element of which specifies a cache node type which you - // can use to scale your cluster or replication group. - // - // When scaling down on a Redis cluster or replication group using ModifyCacheCluster - // or ModifyReplicationGroup, use a value from this list for the CacheNodeType - // parameter. + // can use to scale your cluster or replication group. When scaling down a Redis + // cluster or replication group using ModifyCacheCluster or ModifyReplicationGroup, + // use a value from this list for the CacheNodeType parameter. ScaleDownModifications []*string `type:"list"` // A string list, each element of which specifies a cache node type which you @@ -11698,42 +15391,163 @@ func (s *ModifyCacheSubnetGroupInput) SetCacheSubnetGroupDescription(v string) * return s } -// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. -func (s *ModifyCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *ModifyCacheSubnetGroupInput { - s.CacheSubnetGroupName = &v +// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value. +func (s *ModifyCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *ModifyCacheSubnetGroupInput { + s.CacheSubnetGroupName = &v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *ModifyCacheSubnetGroupInput) SetSubnetIds(v []*string) *ModifyCacheSubnetGroupInput { + s.SubnetIds = v + return s +} + +type ModifyCacheSubnetGroupOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of one of the following operations: + // + // * CreateCacheSubnetGroup + // + // * ModifyCacheSubnetGroup + CacheSubnetGroup *CacheSubnetGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyCacheSubnetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyCacheSubnetGroupOutput) GoString() string { + return s.String() +} + +// SetCacheSubnetGroup sets the CacheSubnetGroup field's value. +func (s *ModifyCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) *ModifyCacheSubnetGroupOutput { + s.CacheSubnetGroup = v + return s +} + +type ModifyGlobalReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // This parameter causes the modifications in this request and any pending modifications + // to be applied, asynchronously and as soon as possible. Modifications to Global + // Replication Groups cannot be requested to be applied in PreferredMaintenceWindow. + // + // ApplyImmediately is a required field + ApplyImmediately *bool `type:"boolean" required:"true"` + + // Determines whether a read replica is automatically promoted to read/write + // primary if the existing primary encounters a failure. + AutomaticFailoverEnabled *bool `type:"boolean"` + + // A valid cache node type that you want to scale this Global Datastore to. + CacheNodeType *string `type:"string"` + + // The upgraded version of the cache engine to be run on the clusters in the + // Global Datastore. + EngineVersion *string `type:"string"` + + // A description of the Global Datastore + GlobalReplicationGroupDescription *string `type:"string"` + + // The name of the Global Datastore + // + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyGlobalReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyGlobalReplicationGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyGlobalReplicationGroupInput"} + if s.ApplyImmediately == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) + } + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *ModifyGlobalReplicationGroupInput) SetApplyImmediately(v bool) *ModifyGlobalReplicationGroupInput { + s.ApplyImmediately = &v + return s +} + +// SetAutomaticFailoverEnabled sets the AutomaticFailoverEnabled field's value. +func (s *ModifyGlobalReplicationGroupInput) SetAutomaticFailoverEnabled(v bool) *ModifyGlobalReplicationGroupInput { + s.AutomaticFailoverEnabled = &v + return s +} + +// SetCacheNodeType sets the CacheNodeType field's value. +func (s *ModifyGlobalReplicationGroupInput) SetCacheNodeType(v string) *ModifyGlobalReplicationGroupInput { + s.CacheNodeType = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ModifyGlobalReplicationGroupInput) SetEngineVersion(v string) *ModifyGlobalReplicationGroupInput { + s.EngineVersion = &v + return s +} + +// SetGlobalReplicationGroupDescription sets the GlobalReplicationGroupDescription field's value. +func (s *ModifyGlobalReplicationGroupInput) SetGlobalReplicationGroupDescription(v string) *ModifyGlobalReplicationGroupInput { + s.GlobalReplicationGroupDescription = &v return s } -// SetSubnetIds sets the SubnetIds field's value. -func (s *ModifyCacheSubnetGroupInput) SetSubnetIds(v []*string) *ModifyCacheSubnetGroupInput { - s.SubnetIds = v +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *ModifyGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *ModifyGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v return s } -type ModifyCacheSubnetGroupOutput struct { +type ModifyGlobalReplicationGroupOutput struct { _ struct{} `type:"structure"` - // Represents the output of one of the following operations: - // - // * CreateCacheSubnetGroup + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. // - // * ModifyCacheSubnetGroup - CacheSubnetGroup *CacheSubnetGroup `type:"structure"` + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` } // String returns the string representation -func (s ModifyCacheSubnetGroupOutput) String() string { +func (s ModifyGlobalReplicationGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ModifyCacheSubnetGroupOutput) GoString() string { +func (s ModifyGlobalReplicationGroupOutput) GoString() string { return s.String() } -// SetCacheSubnetGroup sets the CacheSubnetGroup field's value. -func (s *ModifyCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) *ModifyCacheSubnetGroupOutput { - s.CacheSubnetGroup = v +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *ModifyGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *ModifyGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v return s } @@ -11784,15 +15598,6 @@ type ModifyReplicationGroupInput struct { // primary if the existing primary encounters a failure. // // Valid values: true | false - // - // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover - // on: - // - // * Redis versions earlier than 2.8.6. - // - // * Redis (cluster mode disabled): T1 node types. - // - // * Redis (cluster mode enabled): T1 node types. AutomaticFailoverEnabled *bool `type:"boolean"` // A valid cache node type that you want to scale this replication group to. @@ -11824,6 +15629,10 @@ type ModifyReplicationGroupInput struct { // and create it anew with the earlier engine version. EngineVersion *string `type:"string"` + // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html). + MultiAZEnabled *bool `type:"boolean"` + // Deprecated. This parameter is not used. // // Deprecated: NodeGroupId has been deprecated @@ -11870,6 +15679,9 @@ type ModifyReplicationGroupInput struct { // are read replicas. PrimaryClusterId *string `type:"string"` + // Removes the user groups that can access this replication group. + RemoveUserGroups *bool `type:"boolean"` + // A description for the replication group. Maximum length is 255 characters. ReplicationGroupDescription *string `type:"string"` @@ -11907,6 +15719,13 @@ type ModifyReplicationGroupInput struct { // group. This parameter cannot be set for Redis (cluster mode enabled) replication // groups. SnapshottingClusterId *string `type:"string"` + + // A list of user group IDs. + UserGroupIdsToAdd []*string `type:"list"` + + // A list of users groups to remove, meaning the users in the group no longer + // can access thereplication group. + UserGroupIdsToRemove []*string `type:"list"` } // String returns the string representation @@ -11986,6 +15805,12 @@ func (s *ModifyReplicationGroupInput) SetEngineVersion(v string) *ModifyReplicat return s } +// SetMultiAZEnabled sets the MultiAZEnabled field's value. +func (s *ModifyReplicationGroupInput) SetMultiAZEnabled(v bool) *ModifyReplicationGroupInput { + s.MultiAZEnabled = &v + return s +} + // SetNodeGroupId sets the NodeGroupId field's value. func (s *ModifyReplicationGroupInput) SetNodeGroupId(v string) *ModifyReplicationGroupInput { s.NodeGroupId = &v @@ -12016,6 +15841,12 @@ func (s *ModifyReplicationGroupInput) SetPrimaryClusterId(v string) *ModifyRepli return s } +// SetRemoveUserGroups sets the RemoveUserGroups field's value. +func (s *ModifyReplicationGroupInput) SetRemoveUserGroups(v bool) *ModifyReplicationGroupInput { + s.RemoveUserGroups = &v + return s +} + // SetReplicationGroupDescription sets the ReplicationGroupDescription field's value. func (s *ModifyReplicationGroupInput) SetReplicationGroupDescription(v string) *ModifyReplicationGroupInput { s.ReplicationGroupDescription = &v @@ -12052,6 +15883,18 @@ func (s *ModifyReplicationGroupInput) SetSnapshottingClusterId(v string) *Modify return s } +// SetUserGroupIdsToAdd sets the UserGroupIdsToAdd field's value. +func (s *ModifyReplicationGroupInput) SetUserGroupIdsToAdd(v []*string) *ModifyReplicationGroupInput { + s.UserGroupIdsToAdd = v + return s +} + +// SetUserGroupIdsToRemove sets the UserGroupIdsToRemove field's value. +func (s *ModifyReplicationGroupInput) SetUserGroupIdsToRemove(v []*string) *ModifyReplicationGroupInput { + s.UserGroupIdsToRemove = v + return s +} + type ModifyReplicationGroupOutput struct { _ struct{} `type:"structure"` @@ -12087,76 +15930,324 @@ type ModifyReplicationGroupShardConfigurationInput struct { // ApplyImmediately is a required field ApplyImmediately *bool `type:"boolean" required:"true"` - // The number of node groups (shards) that results from the modification of - // the shard configuration. - // - // NodeGroupCount is a required field - NodeGroupCount *int64 `type:"integer" required:"true"` + // The number of node groups (shards) that results from the modification of + // the shard configuration. + // + // NodeGroupCount is a required field + NodeGroupCount *int64 `type:"integer" required:"true"` + + // If the value of NodeGroupCount is less than the current number of node groups + // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. + // NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. + // + // ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove + // from the cluster. + NodeGroupsToRemove []*string `locationNameList:"NodeGroupToRemove" type:"list"` + + // If the value of NodeGroupCount is less than the current number of node groups + // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. + // NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. + // + // ElastiCache for Redis will attempt to remove all node groups except those + // listed by NodeGroupsToRetain from the cluster. + NodeGroupsToRetain []*string `locationNameList:"NodeGroupToRetain" type:"list"` + + // The name of the Redis (cluster mode enabled) cluster (replication group) + // on which the shards are to be configured. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` + + // Specifies the preferred availability zones for each node group in the cluster. + // If the value of NodeGroupCount is greater than the current number of node + // groups (shards), you can use this parameter to specify the preferred availability + // zones of the cluster's shards. If you omit this parameter ElastiCache selects + // availability zones for you. + // + // You can specify this parameter only if the value of NodeGroupCount is greater + // than the current number of node groups (shards). + ReshardingConfiguration []*ReshardingConfiguration `locationNameList:"ReshardingConfiguration" type:"list"` +} + +// String returns the string representation +func (s ModifyReplicationGroupShardConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupShardConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyReplicationGroupShardConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyReplicationGroupShardConfigurationInput"} + if s.ApplyImmediately == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) + } + if s.NodeGroupCount == nil { + invalidParams.Add(request.NewErrParamRequired("NodeGroupCount")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + if s.ReshardingConfiguration != nil { + for i, v := range s.ReshardingConfiguration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReshardingConfiguration", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *ModifyReplicationGroupShardConfigurationInput) SetApplyImmediately(v bool) *ModifyReplicationGroupShardConfigurationInput { + s.ApplyImmediately = &v + return s +} + +// SetNodeGroupCount sets the NodeGroupCount field's value. +func (s *ModifyReplicationGroupShardConfigurationInput) SetNodeGroupCount(v int64) *ModifyReplicationGroupShardConfigurationInput { + s.NodeGroupCount = &v + return s +} + +// SetNodeGroupsToRemove sets the NodeGroupsToRemove field's value. +func (s *ModifyReplicationGroupShardConfigurationInput) SetNodeGroupsToRemove(v []*string) *ModifyReplicationGroupShardConfigurationInput { + s.NodeGroupsToRemove = v + return s +} + +// SetNodeGroupsToRetain sets the NodeGroupsToRetain field's value. +func (s *ModifyReplicationGroupShardConfigurationInput) SetNodeGroupsToRetain(v []*string) *ModifyReplicationGroupShardConfigurationInput { + s.NodeGroupsToRetain = v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *ModifyReplicationGroupShardConfigurationInput) SetReplicationGroupId(v string) *ModifyReplicationGroupShardConfigurationInput { + s.ReplicationGroupId = &v + return s +} + +// SetReshardingConfiguration sets the ReshardingConfiguration field's value. +func (s *ModifyReplicationGroupShardConfigurationInput) SetReshardingConfiguration(v []*ReshardingConfiguration) *ModifyReplicationGroupShardConfigurationInput { + s.ReshardingConfiguration = v + return s +} + +type ModifyReplicationGroupShardConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s ModifyReplicationGroupShardConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyReplicationGroupShardConfigurationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *ModifyReplicationGroupShardConfigurationOutput) SetReplicationGroup(v *ReplicationGroup) *ModifyReplicationGroupShardConfigurationOutput { + s.ReplicationGroup = v + return s +} + +type ModifyUserGroupInput struct { + _ struct{} `type:"structure"` + + // The ID of the user group. + // + // UserGroupId is a required field + UserGroupId *string `type:"string" required:"true"` + + // The list of user IDs to add to the user group. + UserIdsToAdd []*string `min:"1" type:"list"` + + // The list of user IDs to remove from the user group. + UserIdsToRemove []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s ModifyUserGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyUserGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyUserGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyUserGroupInput"} + if s.UserGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("UserGroupId")) + } + if s.UserIdsToAdd != nil && len(s.UserIdsToAdd) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserIdsToAdd", 1)) + } + if s.UserIdsToRemove != nil && len(s.UserIdsToRemove) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserIdsToRemove", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserGroupId sets the UserGroupId field's value. +func (s *ModifyUserGroupInput) SetUserGroupId(v string) *ModifyUserGroupInput { + s.UserGroupId = &v + return s +} + +// SetUserIdsToAdd sets the UserIdsToAdd field's value. +func (s *ModifyUserGroupInput) SetUserIdsToAdd(v []*string) *ModifyUserGroupInput { + s.UserIdsToAdd = v + return s +} + +// SetUserIdsToRemove sets the UserIdsToRemove field's value. +func (s *ModifyUserGroupInput) SetUserIdsToRemove(v []*string) *ModifyUserGroupInput { + s.UserIdsToRemove = v + return s +} + +type ModifyUserGroupOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the user group. + ARN *string `type:"string"` + + // Must be Redis. + Engine *string `type:"string"` + + // A list of updates being applied to the user groups. + PendingChanges *UserGroupPendingChanges `type:"structure"` + + // A list of replication groups that the user group can access. + ReplicationGroups []*string `type:"list"` + + // Indicates user group status. Can be "creating", "active", "modifying", "deleting". + Status *string `type:"string"` + + // The ID of the user group. + UserGroupId *string `type:"string"` + + // The list of user IDs that belong to the user group. + UserIds []*string `type:"list"` +} + +// String returns the string representation +func (s ModifyUserGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyUserGroupOutput) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *ModifyUserGroupOutput) SetARN(v string) *ModifyUserGroupOutput { + s.ARN = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ModifyUserGroupOutput) SetEngine(v string) *ModifyUserGroupOutput { + s.Engine = &v + return s +} + +// SetPendingChanges sets the PendingChanges field's value. +func (s *ModifyUserGroupOutput) SetPendingChanges(v *UserGroupPendingChanges) *ModifyUserGroupOutput { + s.PendingChanges = v + return s +} + +// SetReplicationGroups sets the ReplicationGroups field's value. +func (s *ModifyUserGroupOutput) SetReplicationGroups(v []*string) *ModifyUserGroupOutput { + s.ReplicationGroups = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ModifyUserGroupOutput) SetStatus(v string) *ModifyUserGroupOutput { + s.Status = &v + return s +} + +// SetUserGroupId sets the UserGroupId field's value. +func (s *ModifyUserGroupOutput) SetUserGroupId(v string) *ModifyUserGroupOutput { + s.UserGroupId = &v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *ModifyUserGroupOutput) SetUserIds(v []*string) *ModifyUserGroupOutput { + s.UserIds = v + return s +} - // If the value of NodeGroupCount is less than the current number of node groups - // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. - // NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. - // - // ElastiCache for Redis will attempt to remove all node groups listed by NodeGroupsToRemove - // from the cluster. - NodeGroupsToRemove []*string `locationNameList:"NodeGroupToRemove" type:"list"` +type ModifyUserInput struct { + _ struct{} `type:"structure"` - // If the value of NodeGroupCount is less than the current number of node groups - // (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. - // NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. - // - // ElastiCache for Redis will attempt to remove all node groups except those - // listed by NodeGroupsToRetain from the cluster. - NodeGroupsToRetain []*string `locationNameList:"NodeGroupToRetain" type:"list"` + // Access permissions string used for this user account. + AccessString *string `type:"string"` - // The name of the Redis (cluster mode enabled) cluster (replication group) - // on which the shards are to be configured. - // - // ReplicationGroupId is a required field - ReplicationGroupId *string `type:"string" required:"true"` + // Adds additional user permissions to the access string. + AppendAccessString *string `type:"string"` - // Specifies the preferred availability zones for each node group in the cluster. - // If the value of NodeGroupCount is greater than the current number of node - // groups (shards), you can use this parameter to specify the preferred availability - // zones of the cluster's shards. If you omit this parameter ElastiCache selects - // availability zones for you. + // Indicates no password is required for the user account. + NoPasswordRequired *bool `type:"boolean"` + + // The passwords belonging to the user account. You are allowed up to two. + Passwords []*string `min:"1" type:"list"` + + // The ID of the user. // - // You can specify this parameter only if the value of NodeGroupCount is greater - // than the current number of node groups (shards). - ReshardingConfiguration []*ReshardingConfiguration `locationNameList:"ReshardingConfiguration" type:"list"` + // UserId is a required field + UserId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ModifyReplicationGroupShardConfigurationInput) String() string { +func (s ModifyUserInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ModifyReplicationGroupShardConfigurationInput) GoString() string { +func (s ModifyUserInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ModifyReplicationGroupShardConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ModifyReplicationGroupShardConfigurationInput"} - if s.ApplyImmediately == nil { - invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) - } - if s.NodeGroupCount == nil { - invalidParams.Add(request.NewErrParamRequired("NodeGroupCount")) +func (s *ModifyUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyUserInput"} + if s.Passwords != nil && len(s.Passwords) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Passwords", 1)) } - if s.ReplicationGroupId == nil { - invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + if s.UserId == nil { + invalidParams.Add(request.NewErrParamRequired("UserId")) } - if s.ReshardingConfiguration != nil { - for i, v := range s.ReshardingConfiguration { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReshardingConfiguration", i), err.(request.ErrInvalidParams)) - } - } + if s.UserId != nil && len(*s.UserId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserId", 1)) } if invalidParams.Len() > 0 { @@ -12165,62 +16256,119 @@ func (s *ModifyReplicationGroupShardConfigurationInput) Validate() error { return nil } -// SetApplyImmediately sets the ApplyImmediately field's value. -func (s *ModifyReplicationGroupShardConfigurationInput) SetApplyImmediately(v bool) *ModifyReplicationGroupShardConfigurationInput { - s.ApplyImmediately = &v - return s -} - -// SetNodeGroupCount sets the NodeGroupCount field's value. -func (s *ModifyReplicationGroupShardConfigurationInput) SetNodeGroupCount(v int64) *ModifyReplicationGroupShardConfigurationInput { - s.NodeGroupCount = &v +// SetAccessString sets the AccessString field's value. +func (s *ModifyUserInput) SetAccessString(v string) *ModifyUserInput { + s.AccessString = &v return s } -// SetNodeGroupsToRemove sets the NodeGroupsToRemove field's value. -func (s *ModifyReplicationGroupShardConfigurationInput) SetNodeGroupsToRemove(v []*string) *ModifyReplicationGroupShardConfigurationInput { - s.NodeGroupsToRemove = v +// SetAppendAccessString sets the AppendAccessString field's value. +func (s *ModifyUserInput) SetAppendAccessString(v string) *ModifyUserInput { + s.AppendAccessString = &v return s } -// SetNodeGroupsToRetain sets the NodeGroupsToRetain field's value. -func (s *ModifyReplicationGroupShardConfigurationInput) SetNodeGroupsToRetain(v []*string) *ModifyReplicationGroupShardConfigurationInput { - s.NodeGroupsToRetain = v +// SetNoPasswordRequired sets the NoPasswordRequired field's value. +func (s *ModifyUserInput) SetNoPasswordRequired(v bool) *ModifyUserInput { + s.NoPasswordRequired = &v return s } -// SetReplicationGroupId sets the ReplicationGroupId field's value. -func (s *ModifyReplicationGroupShardConfigurationInput) SetReplicationGroupId(v string) *ModifyReplicationGroupShardConfigurationInput { - s.ReplicationGroupId = &v +// SetPasswords sets the Passwords field's value. +func (s *ModifyUserInput) SetPasswords(v []*string) *ModifyUserInput { + s.Passwords = v return s } -// SetReshardingConfiguration sets the ReshardingConfiguration field's value. -func (s *ModifyReplicationGroupShardConfigurationInput) SetReshardingConfiguration(v []*ReshardingConfiguration) *ModifyReplicationGroupShardConfigurationInput { - s.ReshardingConfiguration = v +// SetUserId sets the UserId field's value. +func (s *ModifyUserInput) SetUserId(v string) *ModifyUserInput { + s.UserId = &v return s } -type ModifyReplicationGroupShardConfigurationOutput struct { +type ModifyUserOutput struct { _ struct{} `type:"structure"` - // Contains all of the attributes of a specific Redis replication group. - ReplicationGroup *ReplicationGroup `type:"structure"` + // The Amazon Resource Name (ARN) of the user account. + ARN *string `type:"string"` + + // Access permissions string used for this user account. + AccessString *string `type:"string"` + + // Denotes whether the user requires a password to authenticate. + Authentication *Authentication `type:"structure"` + + // Must be Redis. + Engine *string `type:"string"` + + // Indicates the user status. Can be "active", "modifying" or "deleting". + Status *string `type:"string"` + + // Returns a list of the user group IDs the user belongs to. + UserGroupIds []*string `type:"list"` + + // The ID of the user. + UserId *string `type:"string"` + + // The username of the user. + UserName *string `type:"string"` } // String returns the string representation -func (s ModifyReplicationGroupShardConfigurationOutput) String() string { +func (s ModifyUserOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ModifyReplicationGroupShardConfigurationOutput) GoString() string { +func (s ModifyUserOutput) GoString() string { return s.String() } -// SetReplicationGroup sets the ReplicationGroup field's value. -func (s *ModifyReplicationGroupShardConfigurationOutput) SetReplicationGroup(v *ReplicationGroup) *ModifyReplicationGroupShardConfigurationOutput { - s.ReplicationGroup = v +// SetARN sets the ARN field's value. +func (s *ModifyUserOutput) SetARN(v string) *ModifyUserOutput { + s.ARN = &v + return s +} + +// SetAccessString sets the AccessString field's value. +func (s *ModifyUserOutput) SetAccessString(v string) *ModifyUserOutput { + s.AccessString = &v + return s +} + +// SetAuthentication sets the Authentication field's value. +func (s *ModifyUserOutput) SetAuthentication(v *Authentication) *ModifyUserOutput { + s.Authentication = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ModifyUserOutput) SetEngine(v string) *ModifyUserOutput { + s.Engine = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ModifyUserOutput) SetStatus(v string) *ModifyUserOutput { + s.Status = &v + return s +} + +// SetUserGroupIds sets the UserGroupIds field's value. +func (s *ModifyUserOutput) SetUserGroupIds(v []*string) *ModifyUserOutput { + s.UserGroupIds = v + return s +} + +// SetUserId sets the UserId field's value. +func (s *ModifyUserOutput) SetUserId(v string) *ModifyUserOutput { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ModifyUserOutput) SetUserName(v string) *ModifyUserOutput { + s.UserName = &v return s } @@ -12250,7 +16398,8 @@ type NodeGroup struct { // The keyspace for this node group (shard). Slots *string `type:"string"` - // The current state of this replication group - creating, available, etc. + // The current state of this replication group - creating, available, modifying, + // deleting. Status *string `type:"string"` } @@ -12314,6 +16463,9 @@ type NodeGroupConfiguration struct { // launched. PrimaryAvailabilityZone *string `type:"string"` + // The output ARN of the primary node. + PrimaryOutpostArn *string `type:"string"` + // A list of Availability Zones to be used for the read replicas. The number // of Availability Zones in this list must match the value of ReplicaCount or // ReplicasPerNodeGroup if not specified. @@ -12322,6 +16474,9 @@ type NodeGroupConfiguration struct { // The number of read replica nodes in this node group (shard). ReplicaCount *int64 `type:"integer"` + // The outpost ARN of the node replicas. + ReplicaOutpostArns []*string `locationNameList:"OutpostArn" type:"list"` + // A string that specifies the keyspace for a particular node group. Keyspaces // range from 0 to 16,383. The string is in the format startkey-endkey. // @@ -12364,6 +16519,12 @@ func (s *NodeGroupConfiguration) SetPrimaryAvailabilityZone(v string) *NodeGroup return s } +// SetPrimaryOutpostArn sets the PrimaryOutpostArn field's value. +func (s *NodeGroupConfiguration) SetPrimaryOutpostArn(v string) *NodeGroupConfiguration { + s.PrimaryOutpostArn = &v + return s +} + // SetReplicaAvailabilityZones sets the ReplicaAvailabilityZones field's value. func (s *NodeGroupConfiguration) SetReplicaAvailabilityZones(v []*string) *NodeGroupConfiguration { s.ReplicaAvailabilityZones = v @@ -12376,6 +16537,12 @@ func (s *NodeGroupConfiguration) SetReplicaCount(v int64) *NodeGroupConfiguratio return s } +// SetReplicaOutpostArns sets the ReplicaOutpostArns field's value. +func (s *NodeGroupConfiguration) SetReplicaOutpostArns(v []*string) *NodeGroupConfiguration { + s.ReplicaOutpostArns = v + return s +} + // SetSlots sets the Slots field's value. func (s *NodeGroupConfiguration) SetSlots(v string) *NodeGroupConfiguration { s.Slots = &v @@ -12400,6 +16567,9 @@ type NodeGroupMember struct { // The name of the Availability Zone in which the node is located. PreferredAvailabilityZone *string `type:"string"` + // The outpost ARN of the node group member. + PreferredOutpostArn *string `type:"string"` + // The information required for client programs to connect to a node for read // operations. The read endpoint is only applicable on Redis (cluster mode disabled) // clusters. @@ -12440,6 +16610,12 @@ func (s *NodeGroupMember) SetPreferredAvailabilityZone(v string) *NodeGroupMembe return s } +// SetPreferredOutpostArn sets the PreferredOutpostArn field's value. +func (s *NodeGroupMember) SetPreferredOutpostArn(v string) *NodeGroupMember { + s.PreferredOutpostArn = &v + return s +} + // SetReadEndpoint sets the ReadEndpoint field's value. func (s *NodeGroupMember) SetReadEndpoint(v *Endpoint) *NodeGroupMember { s.ReadEndpoint = v @@ -13031,6 +17207,87 @@ func (s *PurchaseReservedCacheNodesOfferingOutput) SetReservedCacheNode(v *Reser return s } +type RebalanceSlotsInGlobalReplicationGroupInput struct { + _ struct{} `type:"structure"` + + // If True, redistribution is applied immediately. + // + // ApplyImmediately is a required field + ApplyImmediately *bool `type:"boolean" required:"true"` + + // The name of the Global Datastore + // + // GlobalReplicationGroupId is a required field + GlobalReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RebalanceSlotsInGlobalReplicationGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebalanceSlotsInGlobalReplicationGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebalanceSlotsInGlobalReplicationGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebalanceSlotsInGlobalReplicationGroupInput"} + if s.ApplyImmediately == nil { + invalidParams.Add(request.NewErrParamRequired("ApplyImmediately")) + } + if s.GlobalReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyImmediately sets the ApplyImmediately field's value. +func (s *RebalanceSlotsInGlobalReplicationGroupInput) SetApplyImmediately(v bool) *RebalanceSlotsInGlobalReplicationGroupInput { + s.ApplyImmediately = &v + return s +} + +// SetGlobalReplicationGroupId sets the GlobalReplicationGroupId field's value. +func (s *RebalanceSlotsInGlobalReplicationGroupInput) SetGlobalReplicationGroupId(v string) *RebalanceSlotsInGlobalReplicationGroupInput { + s.GlobalReplicationGroupId = &v + return s +} + +type RebalanceSlotsInGlobalReplicationGroupOutput struct { + _ struct{} `type:"structure"` + + // Consists of a primary cluster that accepts writes and an associated secondary + // cluster that resides in a different AWS region. The secondary cluster accepts + // only reads. The primary cluster automatically replicates updates to the secondary + // cluster. + // + // * The GlobalReplicationGroupIdSuffix represents the name of the Global + // Datastore, which is what you use to associate a secondary cluster. + GlobalReplicationGroup *GlobalReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s RebalanceSlotsInGlobalReplicationGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebalanceSlotsInGlobalReplicationGroupOutput) GoString() string { + return s.String() +} + +// SetGlobalReplicationGroup sets the GlobalReplicationGroup field's value. +func (s *RebalanceSlotsInGlobalReplicationGroupOutput) SetGlobalReplicationGroup(v *GlobalReplicationGroup) *RebalanceSlotsInGlobalReplicationGroupOutput { + s.GlobalReplicationGroup = v + return s +} + // Represents the input of a RebootCacheCluster operation. type RebootCacheClusterInput struct { _ struct{} `type:"structure"` @@ -13142,6 +17399,84 @@ func (s *RecurringCharge) SetRecurringChargeFrequency(v string) *RecurringCharge return s } +// A list of the replication groups +type RegionalConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the secondary cluster + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` + + // The AWS region where the cluster is stored + // + // ReplicationGroupRegion is a required field + ReplicationGroupRegion *string `type:"string" required:"true"` + + // A list of PreferredAvailabilityZones objects that specifies the configuration + // of a node group in the resharded cluster. + // + // ReshardingConfiguration is a required field + ReshardingConfiguration []*ReshardingConfiguration `locationNameList:"ReshardingConfiguration" type:"list" required:"true"` +} + +// String returns the string representation +func (s RegionalConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegionalConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegionalConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegionalConfiguration"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + if s.ReplicationGroupRegion == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupRegion")) + } + if s.ReshardingConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReshardingConfiguration")) + } + if s.ReshardingConfiguration != nil { + for i, v := range s.ReshardingConfiguration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReshardingConfiguration", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *RegionalConfiguration) SetReplicationGroupId(v string) *RegionalConfiguration { + s.ReplicationGroupId = &v + return s +} + +// SetReplicationGroupRegion sets the ReplicationGroupRegion field's value. +func (s *RegionalConfiguration) SetReplicationGroupRegion(v string) *RegionalConfiguration { + s.ReplicationGroupRegion = &v + return s +} + +// SetReshardingConfiguration sets the ReshardingConfiguration field's value. +func (s *RegionalConfiguration) SetReshardingConfiguration(v []*ReshardingConfiguration) *RegionalConfiguration { + s.ReshardingConfiguration = v + return s +} + // Represents the input of a RemoveTagsFromResource operation. type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` @@ -13204,6 +17539,9 @@ func (s *RemoveTagsFromResourceInput) SetTagKeys(v []*string) *RemoveTagsFromRes type ReplicationGroup struct { _ struct{} `type:"structure"` + // The ARN (Amazon Resource Name) of the replication group. + ARN *string `type:"string"` + // A flag that enables encryption at-rest when set to true. // // You cannot modify the value of AtRestEncryptionEnabled after the cluster @@ -13224,17 +17562,7 @@ type ReplicationGroup struct { // The date the auth token was last modified AuthTokenLastModifiedDate *time.Time `type:"timestamp"` - // Indicates the status of Multi-AZ with automatic failover for this Redis replication - // group. - // - // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover - // on: - // - // * Redis versions earlier than 2.8.6. - // - // * Redis (cluster mode disabled): T1 node types. - // - // * Redis (cluster mode enabled): T1 node types. + // Indicates the status of automatic failover for this Redis replication group. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` // The name of the compute and memory capacity node type for each node in the @@ -13255,12 +17583,23 @@ type ReplicationGroup struct { // The user supplied description of the replication group. Description *string `type:"string"` + // The name of the Global Datastore and role of this replication group in the + // Global Datastore. + GlobalReplicationGroupInfo *GlobalReplicationGroupInfo `type:"structure"` + // The ID of the KMS key used to encrypt the disk in the cluster. KmsKeyId *string `type:"string"` // The names of all the cache clusters that are part of this replication group. MemberClusters []*string `locationNameList:"ClusterId" type:"list"` + // The outpost ARNs of the replication group's member clusters. + MemberClustersOutpostArns []*string `locationNameList:"ReplicationGroupOutpostArn" type:"list"` + + // A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. + // For more information, see Minimizing Downtime: Multi-AZ (http://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/AutoFailover.html) + MultiAZ *string `type:"string" enum:"MultiAZStatus"` + // A list of node groups in this replication group. For Redis (cluster mode // disabled) replication groups, this is a single-element list. For Redis (cluster // mode enabled) replication groups, the list contains an entry for each node @@ -13312,6 +17651,9 @@ type ReplicationGroup struct { // // Default: false TransitEncryptionEnabled *bool `type:"boolean"` + + // The list of user group IDs that have access to the replication group. + UserGroupIds []*string `type:"list"` } // String returns the string representation @@ -13324,6 +17666,12 @@ func (s ReplicationGroup) GoString() string { return s.String() } +// SetARN sets the ARN field's value. +func (s *ReplicationGroup) SetARN(v string) *ReplicationGroup { + s.ARN = &v + return s +} + // SetAtRestEncryptionEnabled sets the AtRestEncryptionEnabled field's value. func (s *ReplicationGroup) SetAtRestEncryptionEnabled(v bool) *ReplicationGroup { s.AtRestEncryptionEnabled = &v @@ -13372,6 +17720,12 @@ func (s *ReplicationGroup) SetDescription(v string) *ReplicationGroup { return s } +// SetGlobalReplicationGroupInfo sets the GlobalReplicationGroupInfo field's value. +func (s *ReplicationGroup) SetGlobalReplicationGroupInfo(v *GlobalReplicationGroupInfo) *ReplicationGroup { + s.GlobalReplicationGroupInfo = v + return s +} + // SetKmsKeyId sets the KmsKeyId field's value. func (s *ReplicationGroup) SetKmsKeyId(v string) *ReplicationGroup { s.KmsKeyId = &v @@ -13384,6 +17738,18 @@ func (s *ReplicationGroup) SetMemberClusters(v []*string) *ReplicationGroup { return s } +// SetMemberClustersOutpostArns sets the MemberClustersOutpostArns field's value. +func (s *ReplicationGroup) SetMemberClustersOutpostArns(v []*string) *ReplicationGroup { + s.MemberClustersOutpostArns = v + return s +} + +// SetMultiAZ sets the MultiAZ field's value. +func (s *ReplicationGroup) SetMultiAZ(v string) *ReplicationGroup { + s.MultiAZ = &v + return s +} + // SetNodeGroups sets the NodeGroups field's value. func (s *ReplicationGroup) SetNodeGroups(v []*NodeGroup) *ReplicationGroup { s.NodeGroups = v @@ -13432,6 +17798,12 @@ func (s *ReplicationGroup) SetTransitEncryptionEnabled(v bool) *ReplicationGroup return s } +// SetUserGroupIds sets the UserGroupIds field's value. +func (s *ReplicationGroup) SetUserGroupIds(v []*string) *ReplicationGroup { + s.UserGroupIds = v + return s +} + // The settings to be applied to the Redis replication group, either immediately // or during the next maintenance window. type ReplicationGroupPendingModifiedValues struct { @@ -13440,17 +17812,7 @@ type ReplicationGroupPendingModifiedValues struct { // The auth token status AuthTokenStatus *string `type:"string" enum:"AuthTokenUpdateStatus"` - // Indicates the status of Multi-AZ with automatic failover for this Redis replication - // group. - // - // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover - // on: - // - // * Redis versions earlier than 2.8.6. - // - // * Redis (cluster mode disabled): T1 node types. - // - // * Redis (cluster mode enabled): T1 node types. + // Indicates the status of automatic failover for this Redis replication group. AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` // The primary cluster ID that is applied immediately (if --apply-immediately @@ -13459,6 +17821,9 @@ type ReplicationGroupPendingModifiedValues struct { // The status of an online resharding operation. Resharding *ReshardingStatus `type:"structure"` + + // The user groups being modified. + UserGroups *UserGroupsUpdateStatus `type:"structure"` } // String returns the string representation @@ -13495,6 +17860,12 @@ func (s *ReplicationGroupPendingModifiedValues) SetResharding(v *ReshardingStatu return s } +// SetUserGroups sets the UserGroups field's value. +func (s *ReplicationGroupPendingModifiedValues) SetUserGroups(v *UserGroupsUpdateStatus) *ReplicationGroupPendingModifiedValues { + s.UserGroups = v + return s +} + // Represents the output of a PurchaseReservedCacheNodesOffering operation. type ReservedCacheNode struct { _ struct{} `type:"structure"` @@ -13511,11 +17882,11 @@ type ReservedCacheNode struct { // * General purpose: Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge @@ -13677,11 +18048,11 @@ type ReservedCacheNodesOffering struct { // * General purpose: Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge @@ -14210,20 +18581,14 @@ func (s *SlotMigration) SetProgressPercentage(v float64) *SlotMigration { type Snapshot struct { _ struct{} `type:"structure"` + // The ARN (Amazon Resource Name) of the snapshot. + ARN *string `type:"string"` + // This parameter is currently disabled. AutoMinorVersionUpgrade *bool `type:"boolean"` - // Indicates the status of Multi-AZ with automatic failover for the source Redis - // replication group. - // - // Amazon ElastiCache for Redis does not support Multi-AZ with automatic failover - // on: - // - // * Redis versions earlier than 2.8.6. - // - // * Redis (cluster mode disabled): T1 node types. - // - // * Redis (cluster mode enabled): T1 node types. + // Indicates the status of automatic failover for the source Redis replication + // group. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` // The date and time when the source cluster was created. @@ -14241,11 +18606,11 @@ type Snapshot struct { // * General purpose: Current generation: M5 node types: cache.m5.large, // cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, // cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, - // cache.m4.4xlarge, cache.m4.10xlarge T2 node types: cache.t2.micro, cache.t2.small, - // cache.t2.medium Previous generation: (not recommended) T1 node types: - // cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - // cache.m3.2xlarge + // cache.m4.4xlarge, cache.m4.10xlarge T3 node types: cache.t3.micro, cache.t3.small, + // cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium + // Previous generation: (not recommended) T1 node types: cache.t1.micro M1 + // node types: cache.m1.small, cache.m1.medium, cache.m1.large, cache.m1.xlarge + // M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge // // * Compute optimized: Previous generation: (not recommended) C1 node types: // cache.c1.xlarge @@ -14328,6 +18693,9 @@ type Snapshot struct { // Example: sun:23:00-mon:01:30 PreferredMaintenanceWindow *string `type:"string"` + // The ARN (Amazon Resource Name) of the preferred outpost. + PreferredOutpostArn *string `type:"string"` + // A description of the source replication group. ReplicationGroupDescription *string `type:"string"` @@ -14381,6 +18749,12 @@ func (s Snapshot) GoString() string { return s.String() } +// SetARN sets the ARN field's value. +func (s *Snapshot) SetARN(v string) *Snapshot { + s.ARN = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *Snapshot) SetAutoMinorVersionUpgrade(v bool) *Snapshot { s.AutoMinorVersionUpgrade = &v @@ -14477,6 +18851,12 @@ func (s *Snapshot) SetPreferredMaintenanceWindow(v string) *Snapshot { return s } +// SetPreferredOutpostArn sets the PreferredOutpostArn field's value. +func (s *Snapshot) SetPreferredOutpostArn(v string) *Snapshot { + s.PreferredOutpostArn = &v + return s +} + // SetReplicationGroupDescription sets the ReplicationGroupDescription field's value. func (s *Snapshot) SetReplicationGroupDescription(v string) *Snapshot { s.ReplicationGroupDescription = &v @@ -14618,6 +18998,9 @@ type Subnet struct { // The unique identifier for the subnet. SubnetIdentifier *string `type:"string"` + + // The outpost ARN of the subnet. + SubnetOutpost *SubnetOutpost `type:"structure"` } // String returns the string representation @@ -14642,6 +19025,36 @@ func (s *Subnet) SetSubnetIdentifier(v string) *Subnet { return s } +// SetSubnetOutpost sets the SubnetOutpost field's value. +func (s *Subnet) SetSubnetOutpost(v *SubnetOutpost) *Subnet { + s.SubnetOutpost = v + return s +} + +// The ID of the outpost subnet. +type SubnetOutpost struct { + _ struct{} `type:"structure"` + + // The outpost ARN of the subnet. + SubnetOutpostArn *string `type:"string"` +} + +// String returns the string representation +func (s SubnetOutpost) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SubnetOutpost) GoString() string { + return s.String() +} + +// SetSubnetOutpostArn sets the SubnetOutpostArn field's value. +func (s *SubnetOutpost) SetSubnetOutpostArn(v string) *SubnetOutpost { + s.SubnetOutpostArn = &v + return s +} + // A cost allocation Tag that can be added to an ElastiCache cluster or replication // group. Tags are composed of a Key/Value pair. A tag with a null Value is // permitted. @@ -15051,6 +19464,235 @@ func (s *UpdateAction) SetUpdateActionStatusModifiedDate(v time.Time) *UpdateAct return s } +type User struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the user account. + ARN *string `type:"string"` + + // Access permissions string used for this user account. + AccessString *string `type:"string"` + + // Denotes whether the user requires a password to authenticate. + Authentication *Authentication `type:"structure"` + + // Must be Redis. + Engine *string `type:"string"` + + // Indicates the user status. Can be "active", "modifying" or "deleting". + Status *string `type:"string"` + + // Returns a list of the user group IDs the user belongs to. + UserGroupIds []*string `type:"list"` + + // The ID of the user. + UserId *string `type:"string"` + + // The username of the user. + UserName *string `type:"string"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s User) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *User) SetARN(v string) *User { + s.ARN = &v + return s +} + +// SetAccessString sets the AccessString field's value. +func (s *User) SetAccessString(v string) *User { + s.AccessString = &v + return s +} + +// SetAuthentication sets the Authentication field's value. +func (s *User) SetAuthentication(v *Authentication) *User { + s.Authentication = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *User) SetEngine(v string) *User { + s.Engine = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *User) SetStatus(v string) *User { + s.Status = &v + return s +} + +// SetUserGroupIds sets the UserGroupIds field's value. +func (s *User) SetUserGroupIds(v []*string) *User { + s.UserGroupIds = v + return s +} + +// SetUserId sets the UserId field's value. +func (s *User) SetUserId(v string) *User { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *User) SetUserName(v string) *User { + s.UserName = &v + return s +} + +type UserGroup struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the user group. + ARN *string `type:"string"` + + // Must be Redis. + Engine *string `type:"string"` + + // A list of updates being applied to the user groups. + PendingChanges *UserGroupPendingChanges `type:"structure"` + + // A list of replication groups that the user group can access. + ReplicationGroups []*string `type:"list"` + + // Indicates user group status. Can be "creating", "active", "modifying", "deleting". + Status *string `type:"string"` + + // The ID of the user group. + UserGroupId *string `type:"string"` + + // The list of user IDs that belong to the user group. + UserIds []*string `type:"list"` +} + +// String returns the string representation +func (s UserGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserGroup) GoString() string { + return s.String() +} + +// SetARN sets the ARN field's value. +func (s *UserGroup) SetARN(v string) *UserGroup { + s.ARN = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *UserGroup) SetEngine(v string) *UserGroup { + s.Engine = &v + return s +} + +// SetPendingChanges sets the PendingChanges field's value. +func (s *UserGroup) SetPendingChanges(v *UserGroupPendingChanges) *UserGroup { + s.PendingChanges = v + return s +} + +// SetReplicationGroups sets the ReplicationGroups field's value. +func (s *UserGroup) SetReplicationGroups(v []*string) *UserGroup { + s.ReplicationGroups = v + return s +} + +// SetStatus sets the Status field's value. +func (s *UserGroup) SetStatus(v string) *UserGroup { + s.Status = &v + return s +} + +// SetUserGroupId sets the UserGroupId field's value. +func (s *UserGroup) SetUserGroupId(v string) *UserGroup { + s.UserGroupId = &v + return s +} + +// SetUserIds sets the UserIds field's value. +func (s *UserGroup) SetUserIds(v []*string) *UserGroup { + s.UserIds = v + return s +} + +// Returns the updates being applied to the user group. +type UserGroupPendingChanges struct { + _ struct{} `type:"structure"` + + // The list of user IDs to add. + UserIdsToAdd []*string `type:"list"` + + // The list of user group IDs ro remove. + UserIdsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s UserGroupPendingChanges) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserGroupPendingChanges) GoString() string { + return s.String() +} + +// SetUserIdsToAdd sets the UserIdsToAdd field's value. +func (s *UserGroupPendingChanges) SetUserIdsToAdd(v []*string) *UserGroupPendingChanges { + s.UserIdsToAdd = v + return s +} + +// SetUserIdsToRemove sets the UserIdsToRemove field's value. +func (s *UserGroupPendingChanges) SetUserIdsToRemove(v []*string) *UserGroupPendingChanges { + s.UserIdsToRemove = v + return s +} + +// The status of the user group update. +type UserGroupsUpdateStatus struct { + _ struct{} `type:"structure"` + + // The list of user group IDs to add. + UserGroupIdsToAdd []*string `type:"list"` + + // The list of user group IDs to remove. + UserGroupIdsToRemove []*string `type:"list"` +} + +// String returns the string representation +func (s UserGroupsUpdateStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserGroupsUpdateStatus) GoString() string { + return s.String() +} + +// SetUserGroupIdsToAdd sets the UserGroupIdsToAdd field's value. +func (s *UserGroupsUpdateStatus) SetUserGroupIdsToAdd(v []*string) *UserGroupsUpdateStatus { + s.UserGroupIdsToAdd = v + return s +} + +// SetUserGroupIdsToRemove sets the UserGroupIdsToRemove field's value. +func (s *UserGroupsUpdateStatus) SetUserGroupIdsToRemove(v []*string) *UserGroupsUpdateStatus { + s.UserGroupIdsToRemove = v + return s +} + const ( // AZModeSingleAz is a AZMode enum value AZModeSingleAz = "single-az" @@ -15059,6 +19701,14 @@ const ( AZModeCrossAz = "cross-az" ) +// AZMode_Values returns all elements of the AZMode enum +func AZMode_Values() []string { + return []string{ + AZModeSingleAz, + AZModeCrossAz, + } +} + const ( // AuthTokenUpdateStatusSetting is a AuthTokenUpdateStatus enum value AuthTokenUpdateStatusSetting = "SETTING" @@ -15067,14 +19717,50 @@ const ( AuthTokenUpdateStatusRotating = "ROTATING" ) +// AuthTokenUpdateStatus_Values returns all elements of the AuthTokenUpdateStatus enum +func AuthTokenUpdateStatus_Values() []string { + return []string{ + AuthTokenUpdateStatusSetting, + AuthTokenUpdateStatusRotating, + } +} + const ( // AuthTokenUpdateStrategyTypeSet is a AuthTokenUpdateStrategyType enum value AuthTokenUpdateStrategyTypeSet = "SET" // AuthTokenUpdateStrategyTypeRotate is a AuthTokenUpdateStrategyType enum value AuthTokenUpdateStrategyTypeRotate = "ROTATE" + + // AuthTokenUpdateStrategyTypeDelete is a AuthTokenUpdateStrategyType enum value + AuthTokenUpdateStrategyTypeDelete = "DELETE" +) + +// AuthTokenUpdateStrategyType_Values returns all elements of the AuthTokenUpdateStrategyType enum +func AuthTokenUpdateStrategyType_Values() []string { + return []string{ + AuthTokenUpdateStrategyTypeSet, + AuthTokenUpdateStrategyTypeRotate, + AuthTokenUpdateStrategyTypeDelete, + } +} + +const ( + // AuthenticationTypePassword is a AuthenticationType enum value + AuthenticationTypePassword = "password" + + // AuthenticationTypeNoPassword is a AuthenticationType enum value + AuthenticationTypeNoPassword = "no-password" ) +// AuthenticationType_Values returns all elements of the AuthenticationType enum +func AuthenticationType_Values() []string { + return []string{ + AuthenticationTypePassword, + AuthenticationTypeNoPassword, + } +} + const ( // AutomaticFailoverStatusEnabled is a AutomaticFailoverStatus enum value AutomaticFailoverStatusEnabled = "enabled" @@ -15089,6 +19775,16 @@ const ( AutomaticFailoverStatusDisabling = "disabling" ) +// AutomaticFailoverStatus_Values returns all elements of the AutomaticFailoverStatus enum +func AutomaticFailoverStatus_Values() []string { + return []string{ + AutomaticFailoverStatusEnabled, + AutomaticFailoverStatusDisabled, + AutomaticFailoverStatusEnabling, + AutomaticFailoverStatusDisabling, + } +} + const ( // ChangeTypeImmediate is a ChangeType enum value ChangeTypeImmediate = "immediate" @@ -15097,6 +19793,30 @@ const ( ChangeTypeRequiresReboot = "requires-reboot" ) +// ChangeType_Values returns all elements of the ChangeType enum +func ChangeType_Values() []string { + return []string{ + ChangeTypeImmediate, + ChangeTypeRequiresReboot, + } +} + +const ( + // MultiAZStatusEnabled is a MultiAZStatus enum value + MultiAZStatusEnabled = "enabled" + + // MultiAZStatusDisabled is a MultiAZStatus enum value + MultiAZStatusDisabled = "disabled" +) + +// MultiAZStatus_Values returns all elements of the MultiAZStatus enum +func MultiAZStatus_Values() []string { + return []string{ + MultiAZStatusEnabled, + MultiAZStatusDisabled, + } +} + const ( // NodeUpdateInitiatedBySystem is a NodeUpdateInitiatedBy enum value NodeUpdateInitiatedBySystem = "system" @@ -15105,6 +19825,14 @@ const ( NodeUpdateInitiatedByCustomer = "customer" ) +// NodeUpdateInitiatedBy_Values returns all elements of the NodeUpdateInitiatedBy enum +func NodeUpdateInitiatedBy_Values() []string { + return []string{ + NodeUpdateInitiatedBySystem, + NodeUpdateInitiatedByCustomer, + } +} + const ( // NodeUpdateStatusNotApplied is a NodeUpdateStatus enum value NodeUpdateStatusNotApplied = "not-applied" @@ -15125,6 +19853,34 @@ const ( NodeUpdateStatusComplete = "complete" ) +// NodeUpdateStatus_Values returns all elements of the NodeUpdateStatus enum +func NodeUpdateStatus_Values() []string { + return []string{ + NodeUpdateStatusNotApplied, + NodeUpdateStatusWaitingToStart, + NodeUpdateStatusInProgress, + NodeUpdateStatusStopping, + NodeUpdateStatusStopped, + NodeUpdateStatusComplete, + } +} + +const ( + // OutpostModeSingleOutpost is a OutpostMode enum value + OutpostModeSingleOutpost = "single-outpost" + + // OutpostModeCrossOutpost is a OutpostMode enum value + OutpostModeCrossOutpost = "cross-outpost" +) + +// OutpostMode_Values returns all elements of the OutpostMode enum +func OutpostMode_Values() []string { + return []string{ + OutpostModeSingleOutpost, + OutpostModeCrossOutpost, + } +} + const ( // PendingAutomaticFailoverStatusEnabled is a PendingAutomaticFailoverStatus enum value PendingAutomaticFailoverStatusEnabled = "enabled" @@ -15133,6 +19889,14 @@ const ( PendingAutomaticFailoverStatusDisabled = "disabled" ) +// PendingAutomaticFailoverStatus_Values returns all elements of the PendingAutomaticFailoverStatus enum +func PendingAutomaticFailoverStatus_Values() []string { + return []string{ + PendingAutomaticFailoverStatusEnabled, + PendingAutomaticFailoverStatusDisabled, + } +} + const ( // ServiceUpdateSeverityCritical is a ServiceUpdateSeverity enum value ServiceUpdateSeverityCritical = "critical" @@ -15147,6 +19911,16 @@ const ( ServiceUpdateSeverityLow = "low" ) +// ServiceUpdateSeverity_Values returns all elements of the ServiceUpdateSeverity enum +func ServiceUpdateSeverity_Values() []string { + return []string{ + ServiceUpdateSeverityCritical, + ServiceUpdateSeverityImportant, + ServiceUpdateSeverityMedium, + ServiceUpdateSeverityLow, + } +} + const ( // ServiceUpdateStatusAvailable is a ServiceUpdateStatus enum value ServiceUpdateStatusAvailable = "available" @@ -15158,11 +19932,27 @@ const ( ServiceUpdateStatusExpired = "expired" ) +// ServiceUpdateStatus_Values returns all elements of the ServiceUpdateStatus enum +func ServiceUpdateStatus_Values() []string { + return []string{ + ServiceUpdateStatusAvailable, + ServiceUpdateStatusCancelled, + ServiceUpdateStatusExpired, + } +} + const ( // ServiceUpdateTypeSecurityUpdate is a ServiceUpdateType enum value ServiceUpdateTypeSecurityUpdate = "security-update" ) +// ServiceUpdateType_Values returns all elements of the ServiceUpdateType enum +func ServiceUpdateType_Values() []string { + return []string{ + ServiceUpdateTypeSecurityUpdate, + } +} + const ( // SlaMetYes is a SlaMet enum value SlaMetYes = "yes" @@ -15174,6 +19964,15 @@ const ( SlaMetNA = "n/a" ) +// SlaMet_Values returns all elements of the SlaMet enum +func SlaMet_Values() []string { + return []string{ + SlaMetYes, + SlaMetNo, + SlaMetNA, + } +} + const ( // SourceTypeCacheCluster is a SourceType enum value SourceTypeCacheCluster = "cache-cluster" @@ -15189,8 +19988,27 @@ const ( // SourceTypeReplicationGroup is a SourceType enum value SourceTypeReplicationGroup = "replication-group" + + // SourceTypeUser is a SourceType enum value + SourceTypeUser = "user" + + // SourceTypeUserGroup is a SourceType enum value + SourceTypeUserGroup = "user-group" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeCacheCluster, + SourceTypeCacheParameterGroup, + SourceTypeCacheSecurityGroup, + SourceTypeCacheSubnetGroup, + SourceTypeReplicationGroup, + SourceTypeUser, + SourceTypeUserGroup, + } +} + const ( // UpdateActionStatusNotApplied is a UpdateActionStatus enum value UpdateActionStatusNotApplied = "not-applied" @@ -15209,4 +20027,28 @@ const ( // UpdateActionStatusComplete is a UpdateActionStatus enum value UpdateActionStatusComplete = "complete" + + // UpdateActionStatusScheduling is a UpdateActionStatus enum value + UpdateActionStatusScheduling = "scheduling" + + // UpdateActionStatusScheduled is a UpdateActionStatus enum value + UpdateActionStatusScheduled = "scheduled" + + // UpdateActionStatusNotApplicable is a UpdateActionStatus enum value + UpdateActionStatusNotApplicable = "not-applicable" ) + +// UpdateActionStatus_Values returns all elements of the UpdateActionStatus enum +func UpdateActionStatus_Values() []string { + return []string{ + UpdateActionStatusNotApplied, + UpdateActionStatusWaitingToStart, + UpdateActionStatusInProgress, + UpdateActionStatusStopping, + UpdateActionStatusStopped, + UpdateActionStatusComplete, + UpdateActionStatusScheduling, + UpdateActionStatusScheduled, + UpdateActionStatusNotApplicable, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go index 25579b1d5..22b5e4e4c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go @@ -117,6 +117,34 @@ const ( // of clusters per customer. ErrCodeClusterQuotaForCustomerExceededFault = "ClusterQuotaForCustomerExceeded" + // ErrCodeDefaultUserAssociatedToUserGroupFault for service response error code + // "DefaultUserAssociatedToUserGroup". + ErrCodeDefaultUserAssociatedToUserGroupFault = "DefaultUserAssociatedToUserGroup" + + // ErrCodeDefaultUserRequired for service response error code + // "DefaultUserRequired". + // + // You must add default user to a user group. + ErrCodeDefaultUserRequired = "DefaultUserRequired" + + // ErrCodeDuplicateUserNameFault for service response error code + // "DuplicateUserName". + // + // A user with this username already exists. + ErrCodeDuplicateUserNameFault = "DuplicateUserName" + + // ErrCodeGlobalReplicationGroupAlreadyExistsFault for service response error code + // "GlobalReplicationGroupAlreadyExistsFault". + // + // The Global Datastore name already exists. + ErrCodeGlobalReplicationGroupAlreadyExistsFault = "GlobalReplicationGroupAlreadyExistsFault" + + // ErrCodeGlobalReplicationGroupNotFoundFault for service response error code + // "GlobalReplicationGroupNotFoundFault". + // + // The Global Datastore does not exist + ErrCodeGlobalReplicationGroupNotFoundFault = "GlobalReplicationGroupNotFoundFault" + // ErrCodeInsufficientCacheClusterCapacityFault for service response error code // "InsufficientCacheClusterCapacity". // @@ -150,6 +178,12 @@ const ( // The current state of the cache security group does not allow deletion. ErrCodeInvalidCacheSecurityGroupStateFault = "InvalidCacheSecurityGroupState" + // ErrCodeInvalidGlobalReplicationGroupStateFault for service response error code + // "InvalidGlobalReplicationGroupState". + // + // The Global Datastore is not available or in primary-only state. + ErrCodeInvalidGlobalReplicationGroupStateFault = "InvalidGlobalReplicationGroupState" + // ErrCodeInvalidKMSKeyFault for service response error code // "InvalidKMSKeyFault". // @@ -187,6 +221,18 @@ const ( // An invalid subnet identifier was specified. ErrCodeInvalidSubnet = "InvalidSubnet" + // ErrCodeInvalidUserGroupStateFault for service response error code + // "InvalidUserGroupState". + // + // The user group is not in an active state. + ErrCodeInvalidUserGroupStateFault = "InvalidUserGroupState" + + // ErrCodeInvalidUserStateFault for service response error code + // "InvalidUserState". + // + // The user is not in active state. + ErrCodeInvalidUserStateFault = "InvalidUserState" + // ErrCodeInvalidVPCNetworkStateFault for service response error code // "InvalidVPCNetworkStateFault". // @@ -329,6 +375,15 @@ const ( // The requested subnet is being used by another cache subnet group. ErrCodeSubnetInUse = "SubnetInUse" + // ErrCodeSubnetNotAllowedFault for service response error code + // "SubnetNotAllowedFault". + // + // At least one subnet ID does not match the other subnet IDs. This mismatch + // typically occurs when a user sets one subnet ID to a regional Availability + // Zone and a different one to an outpost. Or when a user sets the subnet ID + // to an Outpost when not subscribed on this service. + ErrCodeSubnetNotAllowedFault = "SubnetNotAllowedFault" + // ErrCodeTagNotFoundFault for service response error code // "TagNotFound". // @@ -348,4 +403,40 @@ const ( // // The TestFailover action is not available. ErrCodeTestFailoverNotAvailableFault = "TestFailoverNotAvailableFault" + + // ErrCodeUserAlreadyExistsFault for service response error code + // "UserAlreadyExists". + // + // A user with this ID already exists. + ErrCodeUserAlreadyExistsFault = "UserAlreadyExists" + + // ErrCodeUserGroupAlreadyExistsFault for service response error code + // "UserGroupAlreadyExists". + // + // The user group with this ID already exists. + ErrCodeUserGroupAlreadyExistsFault = "UserGroupAlreadyExists" + + // ErrCodeUserGroupNotFoundFault for service response error code + // "UserGroupNotFound". + // + // The user group was not found or does not exist + ErrCodeUserGroupNotFoundFault = "UserGroupNotFound" + + // ErrCodeUserGroupQuotaExceededFault for service response error code + // "UserGroupQuotaExceeded". + // + // The number of users exceeds the user group limit. + ErrCodeUserGroupQuotaExceededFault = "UserGroupQuotaExceeded" + + // ErrCodeUserNotFoundFault for service response error code + // "UserNotFound". + // + // The user does not exist or could not be found. + ErrCodeUserNotFoundFault = "UserNotFound" + + // ErrCodeUserQuotaExceededFault for service response error code + // "UserQuotaExceeded". + // + // The quota of users has been exceeded. + ErrCodeUserQuotaExceededFault = "UserQuotaExceeded" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go index 5fd2d34eb..ecd863c8a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go index 2e91780c4..4f9e06cbe 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go @@ -179,6 +179,91 @@ func (c *ElasticBeanstalk) ApplyEnvironmentManagedActionWithContext(ctx aws.Cont return out, req.Send() } +const opAssociateEnvironmentOperationsRole = "AssociateEnvironmentOperationsRole" + +// AssociateEnvironmentOperationsRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssociateEnvironmentOperationsRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateEnvironmentOperationsRole for more information on using the AssociateEnvironmentOperationsRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateEnvironmentOperationsRoleRequest method. +// req, resp := client.AssociateEnvironmentOperationsRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/AssociateEnvironmentOperationsRole +func (c *ElasticBeanstalk) AssociateEnvironmentOperationsRoleRequest(input *AssociateEnvironmentOperationsRoleInput) (req *request.Request, output *AssociateEnvironmentOperationsRoleOutput) { + op := &request.Operation{ + Name: opAssociateEnvironmentOperationsRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateEnvironmentOperationsRoleInput{} + } + + output = &AssociateEnvironmentOperationsRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateEnvironmentOperationsRole API operation for AWS Elastic Beanstalk. +// +// Add or change the operations role used by an environment. After this call +// is made, Elastic Beanstalk uses the associated operations role for permissions +// to downstream services during subsequent calls acting on this environment. +// For more information, see Operations roles (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/iam-operationsrole.html) +// in the AWS Elastic Beanstalk Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elastic Beanstalk's +// API operation AssociateEnvironmentOperationsRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInsufficientPrivilegesException "InsufficientPrivilegesException" +// The specified account does not have sufficient privileges for one or more +// AWS services. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/AssociateEnvironmentOperationsRole +func (c *ElasticBeanstalk) AssociateEnvironmentOperationsRole(input *AssociateEnvironmentOperationsRoleInput) (*AssociateEnvironmentOperationsRoleOutput, error) { + req, out := c.AssociateEnvironmentOperationsRoleRequest(input) + return out, req.Send() +} + +// AssociateEnvironmentOperationsRoleWithContext is the same as AssociateEnvironmentOperationsRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateEnvironmentOperationsRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) AssociateEnvironmentOperationsRoleWithContext(ctx aws.Context, input *AssociateEnvironmentOperationsRoleInput, opts ...request.Option) (*AssociateEnvironmentOperationsRoleOutput, error) { + req, out := c.AssociateEnvironmentOperationsRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCheckDNSAvailability = "CheckDNSAvailability" // CheckDNSAvailabilityRequest generates a "aws/request.Request" representing the @@ -479,8 +564,8 @@ func (c *ElasticBeanstalk) CreateApplicationVersionRequest(input *CreateApplicat // Omit both SourceBuildInformation and SourceBundle to use the default sample // application. // -// Once you create an application version with a specified Amazon S3 bucket -// and key location, you cannot change that Amazon S3 location. If you change +// After you create an application version with a specified Amazon S3 bucket +// and key location, you can't change that Amazon S3 location. If you change // the Amazon S3 location, you receive an exception when you attempt to launch // an environment from the application version. // @@ -581,9 +666,11 @@ func (c *ElasticBeanstalk) CreateConfigurationTemplateRequest(input *CreateConfi // CreateConfigurationTemplate API operation for AWS Elastic Beanstalk. // -// Creates a configuration template. Templates are associated with a specific -// application and are used to deploy different versions of the application -// with the same configuration settings. +// Creates an AWS Elastic Beanstalk configuration template, associated with +// a specific Elastic Beanstalk application. You define application configuration +// settings in a configuration template. You can then use the configuration +// template to deploy different versions of the application with the same configuration +// settings. // // Templates aren't associated with any environment. The EnvironmentName response // element is always null. @@ -680,8 +767,8 @@ func (c *ElasticBeanstalk) CreateEnvironmentRequest(input *CreateEnvironmentInpu // CreateEnvironment API operation for AWS Elastic Beanstalk. // -// Launches an environment for the specified application using the specified -// configuration. +// Launches an AWS Elastic Beanstalk environment for the specified application +// using the specified configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1863,6 +1950,12 @@ func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistoryRequest(input Name: opDescribeEnvironmentManagedActionHistory, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, } if input == nil { @@ -1911,6 +2004,58 @@ func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistoryWithContext(ct return out, req.Send() } +// DescribeEnvironmentManagedActionHistoryPages iterates over the pages of a DescribeEnvironmentManagedActionHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEnvironmentManagedActionHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEnvironmentManagedActionHistory operation. +// pageNum := 0 +// err := client.DescribeEnvironmentManagedActionHistoryPages(params, +// func(page *elasticbeanstalk.DescribeEnvironmentManagedActionHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistoryPages(input *DescribeEnvironmentManagedActionHistoryInput, fn func(*DescribeEnvironmentManagedActionHistoryOutput, bool) bool) error { + return c.DescribeEnvironmentManagedActionHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeEnvironmentManagedActionHistoryPagesWithContext same as DescribeEnvironmentManagedActionHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistoryPagesWithContext(ctx aws.Context, input *DescribeEnvironmentManagedActionHistoryInput, fn func(*DescribeEnvironmentManagedActionHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeEnvironmentManagedActionHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEnvironmentManagedActionHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeEnvironmentManagedActionHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeEnvironmentManagedActions = "DescribeEnvironmentManagedActions" // DescribeEnvironmentManagedActionsRequest generates a "aws/request.Request" representing the @@ -2406,7 +2551,11 @@ func (c *ElasticBeanstalk) DescribePlatformVersionRequest(input *DescribePlatfor // DescribePlatformVersion API operation for AWS Elastic Beanstalk. // -// Describes the version of the platform. +// Describes a platform version. Provides full details. Compare to ListPlatformVersions, +// which provides summary information about a list of platform versions. +// +// For definitions of platform version and other platform-related terms, see +// AWS Elastic Beanstalk Platforms Glossary (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/platforms-glossary.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2445,6 +2594,91 @@ func (c *ElasticBeanstalk) DescribePlatformVersionWithContext(ctx aws.Context, i return out, req.Send() } +const opDisassociateEnvironmentOperationsRole = "DisassociateEnvironmentOperationsRole" + +// DisassociateEnvironmentOperationsRoleRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateEnvironmentOperationsRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateEnvironmentOperationsRole for more information on using the DisassociateEnvironmentOperationsRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateEnvironmentOperationsRoleRequest method. +// req, resp := client.DisassociateEnvironmentOperationsRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/DisassociateEnvironmentOperationsRole +func (c *ElasticBeanstalk) DisassociateEnvironmentOperationsRoleRequest(input *DisassociateEnvironmentOperationsRoleInput) (req *request.Request, output *DisassociateEnvironmentOperationsRoleOutput) { + op := &request.Operation{ + Name: opDisassociateEnvironmentOperationsRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateEnvironmentOperationsRoleInput{} + } + + output = &DisassociateEnvironmentOperationsRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateEnvironmentOperationsRole API operation for AWS Elastic Beanstalk. +// +// Disassociate the operations role from an environment. After this call is +// made, Elastic Beanstalk uses the caller's permissions for permissions to +// downstream services during subsequent calls acting on this environment. For +// more information, see Operations roles (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/iam-operationsrole.html) +// in the AWS Elastic Beanstalk Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elastic Beanstalk's +// API operation DisassociateEnvironmentOperationsRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInsufficientPrivilegesException "InsufficientPrivilegesException" +// The specified account does not have sufficient privileges for one or more +// AWS services. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/DisassociateEnvironmentOperationsRole +func (c *ElasticBeanstalk) DisassociateEnvironmentOperationsRole(input *DisassociateEnvironmentOperationsRoleInput) (*DisassociateEnvironmentOperationsRoleOutput, error) { + req, out := c.DisassociateEnvironmentOperationsRoleRequest(input) + return out, req.Send() +} + +// DisassociateEnvironmentOperationsRoleWithContext is the same as DisassociateEnvironmentOperationsRole with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateEnvironmentOperationsRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) DisassociateEnvironmentOperationsRoleWithContext(ctx aws.Context, input *DisassociateEnvironmentOperationsRoleInput, opts ...request.Option) (*DisassociateEnvironmentOperationsRoleOutput, error) { + req, out := c.DisassociateEnvironmentOperationsRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListAvailableSolutionStacks = "ListAvailableSolutionStacks" // ListAvailableSolutionStacksRequest generates a "aws/request.Request" representing the @@ -2520,6 +2754,142 @@ func (c *ElasticBeanstalk) ListAvailableSolutionStacksWithContext(ctx aws.Contex return out, req.Send() } +const opListPlatformBranches = "ListPlatformBranches" + +// ListPlatformBranchesRequest generates a "aws/request.Request" representing the +// client's request for the ListPlatformBranches operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPlatformBranches for more information on using the ListPlatformBranches +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPlatformBranchesRequest method. +// req, resp := client.ListPlatformBranchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ListPlatformBranches +func (c *ElasticBeanstalk) ListPlatformBranchesRequest(input *ListPlatformBranchesInput) (req *request.Request, output *ListPlatformBranchesOutput) { + op := &request.Operation{ + Name: opListPlatformBranches, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPlatformBranchesInput{} + } + + output = &ListPlatformBranchesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPlatformBranches API operation for AWS Elastic Beanstalk. +// +// Lists the platform branches available for your account in an AWS Region. +// Provides summary information about each platform branch. +// +// For definitions of platform branch and other platform-related terms, see +// AWS Elastic Beanstalk Platforms Glossary (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/platforms-glossary.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elastic Beanstalk's +// API operation ListPlatformBranches for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticbeanstalk-2010-12-01/ListPlatformBranches +func (c *ElasticBeanstalk) ListPlatformBranches(input *ListPlatformBranchesInput) (*ListPlatformBranchesOutput, error) { + req, out := c.ListPlatformBranchesRequest(input) + return out, req.Send() +} + +// ListPlatformBranchesWithContext is the same as ListPlatformBranches with the addition of +// the ability to pass a context and additional request options. +// +// See ListPlatformBranches for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) ListPlatformBranchesWithContext(ctx aws.Context, input *ListPlatformBranchesInput, opts ...request.Option) (*ListPlatformBranchesOutput, error) { + req, out := c.ListPlatformBranchesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPlatformBranchesPages iterates over the pages of a ListPlatformBranches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPlatformBranches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPlatformBranches operation. +// pageNum := 0 +// err := client.ListPlatformBranchesPages(params, +// func(page *elasticbeanstalk.ListPlatformBranchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticBeanstalk) ListPlatformBranchesPages(input *ListPlatformBranchesInput, fn func(*ListPlatformBranchesOutput, bool) bool) error { + return c.ListPlatformBranchesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPlatformBranchesPagesWithContext same as ListPlatformBranchesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) ListPlatformBranchesPagesWithContext(ctx aws.Context, input *ListPlatformBranchesInput, fn func(*ListPlatformBranchesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPlatformBranchesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPlatformBranchesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPlatformBranchesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPlatformVersions = "ListPlatformVersions" // ListPlatformVersionsRequest generates a "aws/request.Request" representing the @@ -2551,6 +2921,12 @@ func (c *ElasticBeanstalk) ListPlatformVersionsRequest(input *ListPlatformVersio Name: opListPlatformVersions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -2564,7 +2940,12 @@ func (c *ElasticBeanstalk) ListPlatformVersionsRequest(input *ListPlatformVersio // ListPlatformVersions API operation for AWS Elastic Beanstalk. // -// Lists the available platforms. +// Lists the platform versions available for your account in an AWS Region. +// Provides summary information about each platform version. Compare to DescribePlatformVersion, +// which provides full details about a single platform version. +// +// For definitions of platform version and other platform-related terms, see +// AWS Elastic Beanstalk Platforms Glossary (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/platforms-glossary.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2603,6 +2984,58 @@ func (c *ElasticBeanstalk) ListPlatformVersionsWithContext(ctx aws.Context, inpu return out, req.Send() } +// ListPlatformVersionsPages iterates over the pages of a ListPlatformVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPlatformVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPlatformVersions operation. +// pageNum := 0 +// err := client.ListPlatformVersionsPages(params, +// func(page *elasticbeanstalk.ListPlatformVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticBeanstalk) ListPlatformVersionsPages(input *ListPlatformVersionsInput, fn func(*ListPlatformVersionsOutput, bool) bool) error { + return c.ListPlatformVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPlatformVersionsPagesWithContext same as ListPlatformVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) ListPlatformVersionsPagesWithContext(ctx aws.Context, input *ListPlatformVersionsInput, fn func(*ListPlatformVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPlatformVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPlatformVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPlatformVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -2647,12 +3080,11 @@ func (c *ElasticBeanstalk) ListTagsForResourceRequest(input *ListTagsForResource // ListTagsForResource API operation for AWS Elastic Beanstalk. // -// Returns the tags applied to an AWS Elastic Beanstalk resource. The response +// Return the tags applied to an AWS Elastic Beanstalk resource. The response // contains a list of tag key-value pairs. // -// Currently, Elastic Beanstalk only supports tagging of Elastic Beanstalk environments. -// For details about environment tagging, see Tagging Resources in Your Elastic -// Beanstalk Environment (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/using-features.tagging.html). +// Elastic Beanstalk supports tagging of all of its resources. For details about +// resource tagging, see Tagging Application Resources (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/applications-tagging-resources.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3641,9 +4073,8 @@ func (c *ElasticBeanstalk) UpdateTagsForResourceRequest(input *UpdateTagsForReso // Update the list of tags applied to an AWS Elastic Beanstalk resource. Two // lists can be passed: TagsToAdd for tags to add or update, and TagsToRemove. // -// Currently, Elastic Beanstalk only supports tagging of Elastic Beanstalk environments. -// For details about environment tagging, see Tagging Resources in Your Elastic -// Beanstalk Environment (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/using-features.tagging.html). +// Elastic Beanstalk supports tagging of all of its resources. For details about +// resource tagging, see Tagging Application Resources (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/applications-tagging-resources.html). // // If you create a custom IAM user policy to control permission to this operation, // specify one of the following two virtual actions (or both) instead of the @@ -4031,8 +4462,8 @@ func (s *ApplicationMetrics) SetStatusCodes(v *StatusCodes) *ApplicationMetrics // The resource lifecycle configuration for an application. Defines lifecycle // settings for resources that belong to the application, and the service role -// that Elastic Beanstalk assumes in order to apply lifecycle settings. The -// version lifecycle configuration defines lifecycle settings for application +// that AWS Elastic Beanstalk assumes in order to apply lifecycle settings. +// The version lifecycle configuration defines lifecycle settings for application // versions. type ApplicationResourceLifecycleConfig struct { _ struct{} `type:"structure"` @@ -4048,7 +4479,7 @@ type ApplicationResourceLifecycleConfig struct { // Role to another value. ServiceRole *string `type:"string"` - // The application version lifecycle configuration. + // Defines lifecycle settings for application versions. VersionLifecycleConfig *ApplicationVersionLifecycleConfig `type:"structure"` } @@ -4407,6 +4838,80 @@ func (s *ApplyEnvironmentManagedActionOutput) SetStatus(v string) *ApplyEnvironm return s } +// Request to add or change the operations role used by an environment. +type AssociateEnvironmentOperationsRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the environment to which to set the operations role. + // + // EnvironmentName is a required field + EnvironmentName *string `min:"4" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of an existing IAM role to be used as the + // environment's operations role. + // + // OperationsRole is a required field + OperationsRole *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateEnvironmentOperationsRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnvironmentOperationsRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateEnvironmentOperationsRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateEnvironmentOperationsRoleInput"} + if s.EnvironmentName == nil { + invalidParams.Add(request.NewErrParamRequired("EnvironmentName")) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + if s.OperationsRole == nil { + invalidParams.Add(request.NewErrParamRequired("OperationsRole")) + } + if s.OperationsRole != nil && len(*s.OperationsRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationsRole", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnvironmentName sets the EnvironmentName field's value. +func (s *AssociateEnvironmentOperationsRoleInput) SetEnvironmentName(v string) *AssociateEnvironmentOperationsRoleInput { + s.EnvironmentName = &v + return s +} + +// SetOperationsRole sets the OperationsRole field's value. +func (s *AssociateEnvironmentOperationsRoleInput) SetOperationsRole(v string) *AssociateEnvironmentOperationsRoleInput { + s.OperationsRole = &v + return s +} + +type AssociateEnvironmentOperationsRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateEnvironmentOperationsRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateEnvironmentOperationsRoleOutput) GoString() string { + return s.String() +} + // Describes an Auto Scaling launch configuration. type AutoScalingGroup struct { _ struct{} `type:"structure"` @@ -4955,19 +5460,20 @@ func (s *ConfigurationOptionDescription) SetValueType(v string) *ConfigurationOp } // A specification identifying an individual configuration option along with -// its current value. For a list of possible option values, go to Option Values -// (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options.html) +// its current value. For a list of possible namespaces and option values, see +// Option Values (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options.html) // in the AWS Elastic Beanstalk Developer Guide. type ConfigurationOptionSetting struct { _ struct{} `type:"structure"` - // A unique namespace identifying the option's associated AWS resource. + // A unique namespace that identifies the option's associated AWS resource. Namespace *string `type:"string"` // The name of the configuration option. OptionName *string `type:"string"` - // A unique resource name for a time-based scaling configuration option. + // A unique resource name for the option setting. Use it for a time–based + // scaling configuration option. ResourceName *string `min:"1" type:"string"` // The current value for the configuration option. @@ -5058,7 +5564,7 @@ type ConfigurationSettingsDescription struct { // set. OptionSettings []*ConfigurationOptionSetting `type:"list"` - // The ARN of the platform. + // The ARN of the platform version. PlatformArn *string `type:"string"` // The name of the solution stack this configuration set uses. @@ -5143,19 +5649,16 @@ func (s *ConfigurationSettingsDescription) SetTemplateName(v string) *Configurat type CreateApplicationInput struct { _ struct{} `type:"structure"` - // The name of the application. - // - // Constraint: This name must be unique within your account. If the specified - // name already exists, the action returns an InvalidParameterValue error. + // The name of the application. Must be unique within your account. // // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` - // Describes the application. + // Your description of the application. Description *string `type:"string"` - // Specify an application resource lifecycle configuration to prevent your application - // from accumulating too many versions. + // Specifies an application resource lifecycle configuration to prevent your + // application from accumulating too many versions. ResourceLifecycleConfig *ApplicationResourceLifecycleConfig `type:"structure"` // Specifies the tags applied to the application. @@ -5246,7 +5749,7 @@ type CreateApplicationVersionInput struct { // Settings for an AWS CodeBuild build. BuildConfiguration *BuildConfiguration `type:"structure"` - // Describes this version. + // A description of this application version. Description *string `type:"string"` // Pre-processes and validates the environment manifest (env.yaml) and configuration @@ -5403,54 +5906,62 @@ func (s *CreateApplicationVersionInput) SetVersionLabel(v string) *CreateApplica type CreateConfigurationTemplateInput struct { _ struct{} `type:"structure"` - // The name of the application to associate with this configuration template. - // If no application is found with this name, AWS Elastic Beanstalk returns - // an InvalidParameterValue error. + // The name of the Elastic Beanstalk application to associate with this configuration + // template. // // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` - // Describes this configuration. + // An optional description for this configuration. Description *string `type:"string"` - // The ID of the environment used with this configuration template. + // The ID of an environment whose settings you want to use to create the configuration + // template. You must specify EnvironmentId if you don't specify PlatformArn, + // SolutionStackName, or SourceConfiguration. EnvironmentId *string `type:"string"` - // If specified, AWS Elastic Beanstalk sets the specified configuration option - // to the requested value. The new value overrides the value obtained from the - // solution stack or the source configuration template. + // Option values for the Elastic Beanstalk configuration, such as the instance + // type. If specified, these values override the values obtained from the solution + // stack or the source configuration template. For a complete list of Elastic + // Beanstalk configuration options, see Option Values (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options.html) + // in the AWS Elastic Beanstalk Developer Guide. OptionSettings []*ConfigurationOptionSetting `type:"list"` - // The ARN of the custom platform. + // The Amazon Resource Name (ARN) of the custom platform. For more information, + // see Custom Platforms (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/custom-platforms.html) + // in the AWS Elastic Beanstalk Developer Guide. + // + // If you specify PlatformArn, then don't specify SolutionStackName. PlatformArn *string `type:"string"` - // The name of the solution stack used by this configuration. The solution stack - // specifies the operating system, architecture, and application server for - // a configuration template. It determines the set of configuration options - // as well as the possible and default values. + // The name of an Elastic Beanstalk solution stack (platform version) that this + // configuration uses. For example, 64bit Amazon Linux 2013.09 running Tomcat + // 7 Java 7. A solution stack specifies the operating system, runtime, and application + // server for a configuration template. It also determines the set of configuration + // options as well as the possible and default values. For more information, + // see Supported Platforms (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html) + // in the AWS Elastic Beanstalk Developer Guide. // - // Use ListAvailableSolutionStacks to obtain a list of available solution stacks. + // You must specify SolutionStackName if you don't specify PlatformArn, EnvironmentId, + // or SourceConfiguration. // - // A solution stack name or a source configuration parameter must be specified, - // otherwise AWS Elastic Beanstalk returns an InvalidParameterValue error. - // - // If a solution stack name is not specified and the source configuration parameter - // is specified, AWS Elastic Beanstalk uses the same solution stack as the source - // configuration template. + // Use the ListAvailableSolutionStacks (https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_ListAvailableSolutionStacks.html) + // API to obtain a list of available solution stacks. SolutionStackName *string `type:"string"` - // If specified, AWS Elastic Beanstalk uses the configuration values from the - // specified configuration template to create a new configuration. + // An Elastic Beanstalk configuration template to base this one on. If specified, + // Elastic Beanstalk uses the configuration values from the specified configuration + // template to create a new configuration. // - // Values specified in the OptionSettings parameter of this call overrides any - // values obtained from the SourceConfiguration. + // Values specified in OptionSettings override any values obtained from the + // SourceConfiguration. // - // If no configuration template is found, returns an InvalidParameterValue error. + // You must specify SourceConfiguration if you don't specify PlatformArn, EnvironmentId, + // or SolutionStackName. // - // Constraint: If both the solution stack name parameter and the source configuration - // parameters are specified, the solution stack of the source configuration - // template must match the specified solution stack name or else AWS Elastic - // Beanstalk returns an InvalidParameterCombination error. + // Constraint: If both solution stack name and source configuration are specified, + // the solution stack of the source configuration template must match the specified + // solution stack name. SourceConfiguration *SourceConfiguration `type:"structure"` // Specifies the tags applied to the configuration template. @@ -5460,9 +5971,6 @@ type CreateConfigurationTemplateInput struct { // // Constraint: This name must be unique per application. // - // Default: If a configuration template already exists with this name, AWS Elastic - // Beanstalk returns an InvalidParameterValue error. - // // TemplateName is a required field TemplateName *string `min:"1" type:"string" required:"true"` } @@ -5581,31 +6089,29 @@ func (s *CreateConfigurationTemplateInput) SetTemplateName(v string) *CreateConf type CreateEnvironmentInput struct { _ struct{} `type:"structure"` - // The name of the application that contains the version to be deployed. - // - // If no application is found with this name, CreateEnvironment returns an InvalidParameterValue - // error. + // The name of the application that is associated with this environment. // // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` // If specified, the environment attempts to use this value as the prefix for - // the CNAME. If not specified, the CNAME is generated automatically by appending - // a random alphanumeric string to the environment name. + // the CNAME in your Elastic Beanstalk environment URL. If not specified, the + // CNAME is generated automatically by appending a random alphanumeric string + // to the environment name. CNAMEPrefix *string `min:"4" type:"string"` - // Describes this environment. + // Your description for this environment. Description *string `type:"string"` - // A unique name for the deployment environment. Used in the application URL. + // A unique name for the environment. // // Constraint: Must be from 4 to 40 characters in length. The name can contain - // only letters, numbers, and hyphens. It cannot start or end with a hyphen. + // only letters, numbers, and hyphens. It can't start or end with a hyphen. // This name must be unique within a region in your account. If the specified - // name already exists in the region, AWS Elastic Beanstalk returns an InvalidParameterValue + // name already exists in the region, Elastic Beanstalk returns an InvalidParameterValue // error. // - // Default: If the CNAME parameter is not specified, the environment name becomes + // If you don't specify the CNAMEPrefix parameter, the environment name becomes // part of the CNAME, and therefore part of the visible URL for your application. EnvironmentName *string `min:"4" type:"string"` @@ -5616,6 +6122,15 @@ type CreateEnvironmentInput struct { // for details. GroupName *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of an existing IAM role to be used as the + // environment's operations role. If specified, Elastic Beanstalk uses the operations + // role for permissions to downstream services during this call and during subsequent + // calls acting on this environment. To specify an operations role, you must + // have the iam:PassRole permission for the role. For more information, see + // Operations roles (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/iam-operationsrole.html) + // in the AWS Elastic Beanstalk Developer Guide. + OperationsRole *string `min:"1" type:"string"` + // If specified, AWS Elastic Beanstalk sets the specified configuration options // to the requested value in the configuration set for the new environment. // These override the values obtained from the solution stack or the configuration @@ -5626,35 +6141,42 @@ type CreateEnvironmentInput struct { // set for this new environment. OptionsToRemove []*OptionSpecification `type:"list"` - // The ARN of the platform. + // The Amazon Resource Name (ARN) of the custom platform to use with the environment. + // For more information, see Custom Platforms (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/custom-platforms.html) + // in the AWS Elastic Beanstalk Developer Guide. + // + // If you specify PlatformArn, don't specify SolutionStackName. PlatformArn *string `type:"string"` - // This is an alternative to specifying a template name. If specified, AWS Elastic - // Beanstalk sets the configuration values to the default values associated - // with the specified solution stack. - // + // The name of an Elastic Beanstalk solution stack (platform version) to use + // with the environment. If specified, Elastic Beanstalk sets the configuration + // values to the default values associated with the specified solution stack. // For a list of current solution stacks, see Elastic Beanstalk Supported Platforms - // (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html). + // (https://docs.aws.amazon.com/elasticbeanstalk/latest/platforms/platforms-supported.html) + // in the AWS Elastic Beanstalk Platforms guide. + // + // If you specify SolutionStackName, don't specify PlatformArn or TemplateName. SolutionStackName *string `type:"string"` // Specifies the tags applied to resources in the environment. Tags []*Tag `type:"list"` - // The name of the configuration template to use in deployment. If no configuration - // template is found with this name, AWS Elastic Beanstalk returns an InvalidParameterValue - // error. + // The name of the Elastic Beanstalk configuration template to use with the + // environment. + // + // If you specify TemplateName, then don't specify SolutionStackName. TemplateName *string `min:"1" type:"string"` - // This specifies the tier to use for creating this environment. + // Specifies the tier to use in creating this environment. The environment tier + // that you choose determines whether Elastic Beanstalk provisions resources + // to support a web application that handles HTTP(S) requests or a web application + // that handles background-processing tasks. Tier *EnvironmentTier `type:"structure"` // The name of the application version to deploy. // - // If the specified application has no associated application versions, AWS - // Elastic Beanstalk UpdateEnvironment returns an InvalidParameterValue error. - // - // Default: If not specified, AWS Elastic Beanstalk attempts to launch the sample - // application in the container. + // Default: If not specified, Elastic Beanstalk attempts to deploy the sample + // application. VersionLabel *string `min:"1" type:"string"` } @@ -5686,6 +6208,9 @@ func (s *CreateEnvironmentInput) Validate() error { if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) } + if s.OperationsRole != nil && len(*s.OperationsRole) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OperationsRole", 1)) + } if s.TemplateName != nil && len(*s.TemplateName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } @@ -5759,6 +6284,12 @@ func (s *CreateEnvironmentInput) SetGroupName(v string) *CreateEnvironmentInput return s } +// SetOperationsRole sets the OperationsRole field's value. +func (s *CreateEnvironmentInput) SetOperationsRole(v string) *CreateEnvironmentInput { + s.OperationsRole = &v + return s +} + // SetOptionSettings sets the OptionSettings field's value. func (s *CreateEnvironmentInput) SetOptionSettings(v []*ConfigurationOptionSetting) *CreateEnvironmentInput { s.OptionSettings = v @@ -6735,7 +7266,7 @@ type DescribeConfigurationOptionsOutput struct { // A list of ConfigurationOptionDescription. Options []*ConfigurationOptionDescription `type:"list"` - // The ARN of the platform. + // The ARN of the platform version. PlatformArn *string `type:"string"` // The name of the solution stack these configuration options belong to. @@ -7415,7 +7946,9 @@ type DescribeEventsInput struct { // Pagination token. If specified, the events return the next batch of results. NextToken *string `type:"string"` - // The ARN of the version of the custom platform. + // The ARN of a custom platform version. If specified, AWS Elastic Beanstalk + // restricts the returned descriptions to those associated with this custom + // platform version. PlatformArn *string `type:"string"` // If specified, AWS Elastic Beanstalk restricts the described events to include @@ -7698,7 +8231,7 @@ func (s *DescribeInstancesHealthOutput) SetRefreshedAt(v time.Time) *DescribeIns type DescribePlatformVersionInput struct { _ struct{} `type:"structure"` - // The ARN of the version of the platform. + // The ARN of the platform version. PlatformArn *string `type:"string"` } @@ -7721,7 +8254,7 @@ func (s *DescribePlatformVersionInput) SetPlatformArn(v string) *DescribePlatfor type DescribePlatformVersionOutput struct { _ struct{} `type:"structure"` - // Detailed information about the version of the platform. + // Detailed information about the platform version. PlatformDescription *PlatformDescription `type:"structure"` } @@ -7741,6 +8274,62 @@ func (s *DescribePlatformVersionOutput) SetPlatformDescription(v *PlatformDescri return s } +// Request to disassociate the operations role from an environment. +type DisassociateEnvironmentOperationsRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the environment from which to disassociate the operations role. + // + // EnvironmentName is a required field + EnvironmentName *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateEnvironmentOperationsRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnvironmentOperationsRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateEnvironmentOperationsRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateEnvironmentOperationsRoleInput"} + if s.EnvironmentName == nil { + invalidParams.Add(request.NewErrParamRequired("EnvironmentName")) + } + if s.EnvironmentName != nil && len(*s.EnvironmentName) < 4 { + invalidParams.Add(request.NewErrParamMinLen("EnvironmentName", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnvironmentName sets the EnvironmentName field's value. +func (s *DisassociateEnvironmentOperationsRoleInput) SetEnvironmentName(v string) *DisassociateEnvironmentOperationsRoleInput { + s.EnvironmentName = &v + return s +} + +type DisassociateEnvironmentOperationsRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisassociateEnvironmentOperationsRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateEnvironmentOperationsRoleOutput) GoString() string { + return s.String() +} + // Describes the properties of an environment. type EnvironmentDescription struct { _ struct{} `type:"structure"` @@ -7807,7 +8396,12 @@ type EnvironmentDescription struct { // For more information, see Health Colors and Statuses (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/health-enhanced-status.html). HealthStatus *string `type:"string" enum:"EnvironmentHealthStatus"` - // The ARN of the platform. + // The Amazon Resource Name (ARN) of the environment's operations role. For + // more information, see Operations roles (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/iam-operationsrole.html) + // in the AWS Elastic Beanstalk Developer Guide. + OperationsRole *string `min:"1" type:"string"` + + // The ARN of the platform version. PlatformArn *string `type:"string"` // The description of the AWS resources used by this environment. @@ -7929,6 +8523,12 @@ func (s *EnvironmentDescription) SetHealthStatus(v string) *EnvironmentDescripti return s } +// SetOperationsRole sets the OperationsRole field's value. +func (s *EnvironmentDescription) SetOperationsRole(v string) *EnvironmentDescription { + s.OperationsRole = &v + return s +} + // SetPlatformArn sets the PlatformArn field's value. func (s *EnvironmentDescription) SetPlatformArn(v string) *EnvironmentDescription { s.PlatformArn = &v @@ -8283,7 +8883,7 @@ type EventDescription struct { // The event message. Message *string `type:"string"` - // The ARN of the platform. + // The ARN of the platform version. PlatformArn *string `type:"string"` // The web service request ID for the activity of this event. @@ -8672,18 +9272,135 @@ func (s *ListAvailableSolutionStacksOutput) SetSolutionStacks(v []*string) *List return s } +type ListPlatformBranchesInput struct { + _ struct{} `type:"structure"` + + // Criteria for restricting the resulting list of platform branches. The filter + // is evaluated as a logical conjunction (AND) of the separate SearchFilter + // terms. + // + // The following list shows valid attribute values for each of the SearchFilter + // terms. Most operators take a single value. The in and not_in operators can + // take multiple values. + // + // * Attribute = BranchName: Operator: = | != | begins_with | ends_with | + // contains | in | not_in + // + // * Attribute = LifecycleState: Operator: = | != | in | not_in Values: beta + // | supported | deprecated | retired + // + // * Attribute = PlatformName: Operator: = | != | begins_with | ends_with + // | contains | in | not_in + // + // * Attribute = TierType: Operator: = | != Values: WebServer/Standard | + // Worker/SQS/HTTP + // + // Array size: limited to 10 SearchFilter objects. + // + // Within each SearchFilter item, the Values array is limited to 10 items. + Filters []*SearchFilter `type:"list"` + + // The maximum number of platform branch values returned in one call. + MaxRecords *int64 `min:"1" type:"integer"` + + // For a paginated request. Specify a token from a previous response page to + // retrieve the next response page. All other parameter values must be identical + // to the ones specified in the initial request. + // + // If no NextToken is specified, the first page is retrieved. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListPlatformBranchesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformBranchesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPlatformBranchesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPlatformBranchesInput"} + if s.MaxRecords != nil && *s.MaxRecords < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListPlatformBranchesInput) SetFilters(v []*SearchFilter) *ListPlatformBranchesInput { + s.Filters = v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *ListPlatformBranchesInput) SetMaxRecords(v int64) *ListPlatformBranchesInput { + s.MaxRecords = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPlatformBranchesInput) SetNextToken(v string) *ListPlatformBranchesInput { + s.NextToken = &v + return s +} + +type ListPlatformBranchesOutput struct { + _ struct{} `type:"structure"` + + // In a paginated request, if this value isn't null, it's the token that you + // can pass in a subsequent request to get the next response page. + NextToken *string `type:"string"` + + // Summary information about the platform branches. + PlatformBranchSummaryList []*PlatformBranchSummary `type:"list"` +} + +// String returns the string representation +func (s ListPlatformBranchesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPlatformBranchesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPlatformBranchesOutput) SetNextToken(v string) *ListPlatformBranchesOutput { + s.NextToken = &v + return s +} + +// SetPlatformBranchSummaryList sets the PlatformBranchSummaryList field's value. +func (s *ListPlatformBranchesOutput) SetPlatformBranchSummaryList(v []*PlatformBranchSummary) *ListPlatformBranchesOutput { + s.PlatformBranchSummaryList = v + return s +} + type ListPlatformVersionsInput struct { _ struct{} `type:"structure"` - // List only the platforms where the platform member value relates to one of - // the supplied values. + // Criteria for restricting the resulting list of platform versions. The filter + // is interpreted as a logical conjunction (AND) of the separate PlatformFilter + // terms. Filters []*PlatformFilter `type:"list"` - // The maximum number of platform values returned in one call. + // The maximum number of platform version values returned in one call. MaxRecords *int64 `min:"1" type:"integer"` - // The starting index into the remaining list of platforms. Use the NextToken - // value from a previous ListPlatformVersion call. + // For a paginated request. Specify a token from a previous response page to + // retrieve the next response page. All other parameter values must be identical + // to the ones specified in the initial request. + // + // If no NextToken is specified, the first page is retrieved. NextToken *string `type:"string"` } @@ -8731,11 +9448,11 @@ func (s *ListPlatformVersionsInput) SetNextToken(v string) *ListPlatformVersions type ListPlatformVersionsOutput struct { _ struct{} `type:"structure"` - // The starting index into the remaining list of platforms. if this value is - // not null, you can use it in a subsequent ListPlatformVersion call. + // In a paginated request, if this value isn't null, it's the token that you + // can pass in a subsequent request to get the next response page. NextToken *string `type:"string"` - // Detailed information about the platforms. + // Summary information about the platform versions. PlatformSummaryList []*PlatformSummary `type:"list"` } @@ -8766,7 +9483,7 @@ type ListTagsForResourceInput struct { // The Amazon Resource Name (ARN) of the resouce for which a tag list is requested. // - // Must be the ARN of an Elastic Beanstalk environment. + // Must be the ARN of an Elastic Beanstalk resource. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` @@ -8804,7 +9521,7 @@ func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResource type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resouce for which a tag list was requested. + // The Amazon Resource Name (ARN) of the resource for which a tag list was requested. ResourceArn *string `type:"string"` // A list of tag key-value pairs. @@ -9289,62 +10006,149 @@ func (s *OptionSpecification) SetResourceName(v string) *OptionSpecification { return s } -// Detailed information about a platform. +// Summary information about a platform branch. +type PlatformBranchSummary struct { + _ struct{} `type:"structure"` + + // The name of the platform branch. + BranchName *string `type:"string"` + + // An ordinal number that designates the order in which platform branches have + // been added to a platform. This can be helpful, for example, if your code + // calls the ListPlatformBranches action and then displays a list of platform + // branches. + // + // A larger BranchOrder value designates a newer platform branch within the + // platform. + BranchOrder *int64 `type:"integer"` + + // The support life cycle state of the platform branch. + // + // Possible values: beta | supported | deprecated | retired + LifecycleState *string `type:"string"` + + // The name of the platform to which this platform branch belongs. + PlatformName *string `type:"string"` + + // The environment tiers that platform versions in this branch support. + // + // Possible values: WebServer/Standard | Worker/SQS/HTTP + SupportedTierList []*string `type:"list"` +} + +// String returns the string representation +func (s PlatformBranchSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PlatformBranchSummary) GoString() string { + return s.String() +} + +// SetBranchName sets the BranchName field's value. +func (s *PlatformBranchSummary) SetBranchName(v string) *PlatformBranchSummary { + s.BranchName = &v + return s +} + +// SetBranchOrder sets the BranchOrder field's value. +func (s *PlatformBranchSummary) SetBranchOrder(v int64) *PlatformBranchSummary { + s.BranchOrder = &v + return s +} + +// SetLifecycleState sets the LifecycleState field's value. +func (s *PlatformBranchSummary) SetLifecycleState(v string) *PlatformBranchSummary { + s.LifecycleState = &v + return s +} + +// SetPlatformName sets the PlatformName field's value. +func (s *PlatformBranchSummary) SetPlatformName(v string) *PlatformBranchSummary { + s.PlatformName = &v + return s +} + +// SetSupportedTierList sets the SupportedTierList field's value. +func (s *PlatformBranchSummary) SetSupportedTierList(v []*string) *PlatformBranchSummary { + s.SupportedTierList = v + return s +} + +// Detailed information about a platform version. type PlatformDescription struct { _ struct{} `type:"structure"` - // The custom AMIs supported by the platform. + // The custom AMIs supported by the platform version. CustomAmiList []*CustomAmi `type:"list"` - // The date when the platform was created. + // The date when the platform version was created. DateCreated *time.Time `type:"timestamp"` - // The date when the platform was last updated. + // The date when the platform version was last updated. DateUpdated *time.Time `type:"timestamp"` - // The description of the platform. + // The description of the platform version. Description *string `type:"string"` - // The frameworks supported by the platform. + // The frameworks supported by the platform version. Frameworks []*PlatformFramework `type:"list"` - // Information about the maintainer of the platform. + // Information about the maintainer of the platform version. Maintainer *string `type:"string"` - // The operating system used by the platform. + // The operating system used by the platform version. OperatingSystemName *string `type:"string"` - // The version of the operating system used by the platform. + // The version of the operating system used by the platform version. OperatingSystemVersion *string `type:"string"` - // The ARN of the platform. + // The ARN of the platform version. PlatformArn *string `type:"string"` - // The category of the platform. + // The state of the platform version's branch in its lifecycle. + // + // Possible values: Beta | Supported | Deprecated | Retired + PlatformBranchLifecycleState *string `type:"string"` + + // The platform branch to which the platform version belongs. + PlatformBranchName *string `type:"string"` + + // The category of the platform version. PlatformCategory *string `type:"string"` - // The name of the platform. + // The state of the platform version in its lifecycle. + // + // Possible values: Recommended | null + // + // If a null value is returned, the platform version isn't the recommended one + // for its branch. Each platform branch has a single recommended platform version, + // typically the most recent one. + PlatformLifecycleState *string `type:"string"` + + // The name of the platform version. PlatformName *string `type:"string"` - // The AWS account ID of the person who created the platform. + // The AWS account ID of the person who created the platform version. PlatformOwner *string `type:"string"` - // The status of the platform. + // The status of the platform version. PlatformStatus *string `type:"string" enum:"PlatformStatus"` - // The version of the platform. + // The version of the platform version. PlatformVersion *string `type:"string"` - // The programming languages supported by the platform. + // The programming languages supported by the platform version. ProgrammingLanguages []*PlatformProgrammingLanguage `type:"list"` - // The name of the solution stack used by the platform. + // The name of the solution stack used by the platform version. SolutionStackName *string `type:"string"` - // The additions supported by the platform. + // The additions supported by the platform version. SupportedAddonList []*string `type:"list"` - // The tiers supported by the platform. + // The tiers supported by the platform version. SupportedTierList []*string `type:"list"` } @@ -9412,12 +10216,30 @@ func (s *PlatformDescription) SetPlatformArn(v string) *PlatformDescription { return s } +// SetPlatformBranchLifecycleState sets the PlatformBranchLifecycleState field's value. +func (s *PlatformDescription) SetPlatformBranchLifecycleState(v string) *PlatformDescription { + s.PlatformBranchLifecycleState = &v + return s +} + +// SetPlatformBranchName sets the PlatformBranchName field's value. +func (s *PlatformDescription) SetPlatformBranchName(v string) *PlatformDescription { + s.PlatformBranchName = &v + return s +} + // SetPlatformCategory sets the PlatformCategory field's value. func (s *PlatformDescription) SetPlatformCategory(v string) *PlatformDescription { s.PlatformCategory = &v return s } +// SetPlatformLifecycleState sets the PlatformLifecycleState field's value. +func (s *PlatformDescription) SetPlatformLifecycleState(v string) *PlatformDescription { + s.PlatformLifecycleState = &v + return s +} + // SetPlatformName sets the PlatformName field's value. func (s *PlatformDescription) SetPlatformName(v string) *PlatformDescription { s.PlatformName = &v @@ -9466,27 +10288,36 @@ func (s *PlatformDescription) SetSupportedTierList(v []*string) *PlatformDescrip return s } -// Specify criteria to restrict the results when listing custom platforms. -// -// The filter is evaluated as the expression: +// Describes criteria to restrict the results when listing platform versions. // -// Type Operator Values[i] +// The filter is evaluated as follows: Type Operator Values[1] type PlatformFilter struct { _ struct{} `type:"structure"` // The operator to apply to the Type with each of the Values. // - // Valid Values: = (equal to) | != (not equal to) | < (less than) | <= (less - // than or equal to) | > (greater than) | >= (greater than or equal to) | contains - // | begins_with | ends_with + // Valid values: = | != | < | <= | > | >= | contains | begins_with | ends_with Operator *string `type:"string"` - // The custom platform attribute to which the filter values are applied. + // The platform version attribute to which the filter values are applied. // - // Valid Values: PlatformName | PlatformVersion | PlatformStatus | PlatformOwner + // Valid values: PlatformName | PlatformVersion | PlatformStatus | PlatformBranchName + // | PlatformLifecycleState | PlatformOwner | SupportedTier | SupportedAddon + // | ProgrammingLanguageName | OperatingSystemName Type *string `type:"string"` - // The list of values applied to the custom platform attribute. + // The list of values applied to the filtering platform version attribute. Only + // one value is supported for all current operators. + // + // The following list shows valid filter values for some filter attributes. + // + // * PlatformStatus: Creating | Failed | Ready | Deleting | Deleted + // + // * PlatformLifecycleState: recommended + // + // * SupportedTier: WebServer/Standard | Worker/SQS/HTTP + // + // * SupportedAddon: Log/S3 | Monitoring/Healthd | WorkerDaemon/SQSD Values []*string `type:"list"` } @@ -9518,7 +10349,7 @@ func (s *PlatformFilter) SetValues(v []*string) *PlatformFilter { return s } -// A framework supported by the custom platform. +// A framework supported by the platform. type PlatformFramework struct { _ struct{} `type:"structure"` @@ -9584,33 +10415,52 @@ func (s *PlatformProgrammingLanguage) SetVersion(v string) *PlatformProgrammingL return s } -// Detailed information about a platform. +// Summary information about a platform version. type PlatformSummary struct { _ struct{} `type:"structure"` - // The operating system used by the platform. + // The operating system used by the platform version. OperatingSystemName *string `type:"string"` - // The version of the operating system used by the platform. + // The version of the operating system used by the platform version. OperatingSystemVersion *string `type:"string"` - // The ARN of the platform. + // The ARN of the platform version. PlatformArn *string `type:"string"` - // The category of platform. + // The state of the platform version's branch in its lifecycle. + // + // Possible values: beta | supported | deprecated | retired + PlatformBranchLifecycleState *string `type:"string"` + + // The platform branch to which the platform version belongs. + PlatformBranchName *string `type:"string"` + + // The category of platform version. PlatformCategory *string `type:"string"` - // The AWS account ID of the person who created the platform. + // The state of the platform version in its lifecycle. + // + // Possible values: recommended | empty + // + // If an empty value is returned, the platform version is supported but isn't + // the recommended one for its branch. + PlatformLifecycleState *string `type:"string"` + + // The AWS account ID of the person who created the platform version. PlatformOwner *string `type:"string"` - // The status of the platform. You can create an environment from the platform - // once it is ready. + // The status of the platform version. You can create an environment from the + // platform version once it is ready. PlatformStatus *string `type:"string" enum:"PlatformStatus"` - // The additions associated with the platform. + // The version string of the platform version. + PlatformVersion *string `type:"string"` + + // The additions associated with the platform version. SupportedAddonList []*string `type:"list"` - // The tiers in which the platform runs. + // The tiers in which the platform version runs. SupportedTierList []*string `type:"list"` } @@ -9642,12 +10492,30 @@ func (s *PlatformSummary) SetPlatformArn(v string) *PlatformSummary { return s } +// SetPlatformBranchLifecycleState sets the PlatformBranchLifecycleState field's value. +func (s *PlatformSummary) SetPlatformBranchLifecycleState(v string) *PlatformSummary { + s.PlatformBranchLifecycleState = &v + return s +} + +// SetPlatformBranchName sets the PlatformBranchName field's value. +func (s *PlatformSummary) SetPlatformBranchName(v string) *PlatformSummary { + s.PlatformBranchName = &v + return s +} + // SetPlatformCategory sets the PlatformCategory field's value. func (s *PlatformSummary) SetPlatformCategory(v string) *PlatformSummary { s.PlatformCategory = &v return s } +// SetPlatformLifecycleState sets the PlatformLifecycleState field's value. +func (s *PlatformSummary) SetPlatformLifecycleState(v string) *PlatformSummary { + s.PlatformLifecycleState = &v + return s +} + // SetPlatformOwner sets the PlatformOwner field's value. func (s *PlatformSummary) SetPlatformOwner(v string) *PlatformSummary { s.PlatformOwner = &v @@ -9660,6 +10528,12 @@ func (s *PlatformSummary) SetPlatformStatus(v string) *PlatformSummary { return s } +// SetPlatformVersion sets the PlatformVersion field's value. +func (s *PlatformSummary) SetPlatformVersion(v string) *PlatformSummary { + s.PlatformVersion = &v + return s +} + // SetSupportedAddonList sets the SupportedAddonList field's value. func (s *PlatformSummary) SetSupportedAddonList(v []*string) *PlatformSummary { s.SupportedAddonList = v @@ -10144,6 +11018,63 @@ func (s *S3Location) SetS3Key(v string) *S3Location { return s } +// Describes criteria to restrict a list of results. +// +// For operators that apply a single value to the attribute, the filter is evaluated +// as follows: Attribute Operator Values[1] +// +// Some operators, e.g. in, can apply multiple values. In this case, the filter +// is evaluated as a logical union (OR) of applications of the operator to the +// attribute with each one of the values: (Attribute Operator Values[1]) OR +// (Attribute Operator Values[2]) OR ... +// +// The valid values for attributes of SearchFilter depend on the API action. +// For valid values, see the reference page for the API action you're calling +// that takes a SearchFilter parameter. +type SearchFilter struct { + _ struct{} `type:"structure"` + + // The result attribute to which the filter values are applied. Valid values + // vary by API action. + Attribute *string `type:"string"` + + // The operator to apply to the Attribute with each of the Values. Valid values + // vary by Attribute. + Operator *string `type:"string"` + + // The list of values applied to the Attribute and Operator attributes. Number + // of values and valid values vary by Attribute. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s SearchFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SearchFilter) GoString() string { + return s.String() +} + +// SetAttribute sets the Attribute field's value. +func (s *SearchFilter) SetAttribute(v string) *SearchFilter { + s.Attribute = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *SearchFilter) SetOperator(v string) *SearchFilter { + s.Operator = &v + return s +} + +// SetValues sets the Values field's value. +func (s *SearchFilter) SetValues(v []*string) *SearchFilter { + s.Values = v + return s +} + // Detailed health information about an Amazon EC2 instance in your Elastic // Beanstalk environment. type SingleInstanceHealth struct { @@ -10372,7 +11303,7 @@ func (s *SourceBuildInformation) SetSourceType(v string) *SourceBuildInformation return s } -// A specification for an environment configuration +// A specification for an environment configuration. type SourceConfiguration struct { _ struct{} `type:"structure"` @@ -11304,19 +12235,21 @@ type UpdateTagsForResourceInput struct { // The Amazon Resource Name (ARN) of the resouce to be updated. // - // Must be the ARN of an Elastic Beanstalk environment. + // Must be the ARN of an Elastic Beanstalk resource. // // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` - // A list of tags to add or update. + // A list of tags to add or update. If a key of an existing tag is added, the + // tag's value is updated. // - // If a key of an existing tag is added, the tag's value is updated. + // Specify at least one of these parameters: TagsToAdd, TagsToRemove. TagsToAdd []*Tag `type:"list"` - // A list of tag keys to remove. + // A list of tag keys to remove. If a tag key doesn't exist, it is silently + // ignored. // - // If a tag key doesn't exist, it is silently ignored. + // Specify at least one of these parameters: TagsToAdd, TagsToRemove. TagsToRemove []*string `type:"list"` } @@ -11572,6 +12505,15 @@ const ( ActionHistoryStatusUnknown = "Unknown" ) +// ActionHistoryStatus_Values returns all elements of the ActionHistoryStatus enum +func ActionHistoryStatus_Values() []string { + return []string{ + ActionHistoryStatusCompleted, + ActionHistoryStatusFailed, + ActionHistoryStatusUnknown, + } +} + const ( // ActionStatusScheduled is a ActionStatus enum value ActionStatusScheduled = "Scheduled" @@ -11586,6 +12528,16 @@ const ( ActionStatusUnknown = "Unknown" ) +// ActionStatus_Values returns all elements of the ActionStatus enum +func ActionStatus_Values() []string { + return []string{ + ActionStatusScheduled, + ActionStatusPending, + ActionStatusRunning, + ActionStatusUnknown, + } +} + const ( // ActionTypeInstanceRefresh is a ActionType enum value ActionTypeInstanceRefresh = "InstanceRefresh" @@ -11597,6 +12549,15 @@ const ( ActionTypeUnknown = "Unknown" ) +// ActionType_Values returns all elements of the ActionType enum +func ActionType_Values() []string { + return []string{ + ActionTypeInstanceRefresh, + ActionTypePlatformUpdate, + ActionTypeUnknown, + } +} + const ( // ApplicationVersionStatusProcessed is a ApplicationVersionStatus enum value ApplicationVersionStatusProcessed = "Processed" @@ -11614,6 +12575,17 @@ const ( ApplicationVersionStatusBuilding = "Building" ) +// ApplicationVersionStatus_Values returns all elements of the ApplicationVersionStatus enum +func ApplicationVersionStatus_Values() []string { + return []string{ + ApplicationVersionStatusProcessed, + ApplicationVersionStatusUnprocessed, + ApplicationVersionStatusFailed, + ApplicationVersionStatusProcessing, + ApplicationVersionStatusBuilding, + } +} + const ( // ComputeTypeBuildGeneral1Small is a ComputeType enum value ComputeTypeBuildGeneral1Small = "BUILD_GENERAL1_SMALL" @@ -11625,6 +12597,15 @@ const ( ComputeTypeBuildGeneral1Large = "BUILD_GENERAL1_LARGE" ) +// ComputeType_Values returns all elements of the ComputeType enum +func ComputeType_Values() []string { + return []string{ + ComputeTypeBuildGeneral1Small, + ComputeTypeBuildGeneral1Medium, + ComputeTypeBuildGeneral1Large, + } +} + const ( // ConfigurationDeploymentStatusDeployed is a ConfigurationDeploymentStatus enum value ConfigurationDeploymentStatusDeployed = "deployed" @@ -11636,6 +12617,15 @@ const ( ConfigurationDeploymentStatusFailed = "failed" ) +// ConfigurationDeploymentStatus_Values returns all elements of the ConfigurationDeploymentStatus enum +func ConfigurationDeploymentStatus_Values() []string { + return []string{ + ConfigurationDeploymentStatusDeployed, + ConfigurationDeploymentStatusPending, + ConfigurationDeploymentStatusFailed, + } +} + const ( // ConfigurationOptionValueTypeScalar is a ConfigurationOptionValueType enum value ConfigurationOptionValueTypeScalar = "Scalar" @@ -11644,6 +12634,14 @@ const ( ConfigurationOptionValueTypeList = "List" ) +// ConfigurationOptionValueType_Values returns all elements of the ConfigurationOptionValueType enum +func ConfigurationOptionValueType_Values() []string { + return []string{ + ConfigurationOptionValueTypeScalar, + ConfigurationOptionValueTypeList, + } +} + const ( // EnvironmentHealthGreen is a EnvironmentHealth enum value EnvironmentHealthGreen = "Green" @@ -11658,6 +12656,16 @@ const ( EnvironmentHealthGrey = "Grey" ) +// EnvironmentHealth_Values returns all elements of the EnvironmentHealth enum +func EnvironmentHealth_Values() []string { + return []string{ + EnvironmentHealthGreen, + EnvironmentHealthYellow, + EnvironmentHealthRed, + EnvironmentHealthGrey, + } +} + const ( // EnvironmentHealthAttributeStatus is a EnvironmentHealthAttribute enum value EnvironmentHealthAttributeStatus = "Status" @@ -11684,6 +12692,20 @@ const ( EnvironmentHealthAttributeRefreshedAt = "RefreshedAt" ) +// EnvironmentHealthAttribute_Values returns all elements of the EnvironmentHealthAttribute enum +func EnvironmentHealthAttribute_Values() []string { + return []string{ + EnvironmentHealthAttributeStatus, + EnvironmentHealthAttributeColor, + EnvironmentHealthAttributeCauses, + EnvironmentHealthAttributeApplicationMetrics, + EnvironmentHealthAttributeInstancesHealth, + EnvironmentHealthAttributeAll, + EnvironmentHealthAttributeHealthStatus, + EnvironmentHealthAttributeRefreshedAt, + } +} + const ( // EnvironmentHealthStatusNoData is a EnvironmentHealthStatus enum value EnvironmentHealthStatusNoData = "NoData" @@ -11713,6 +12735,21 @@ const ( EnvironmentHealthStatusSuspended = "Suspended" ) +// EnvironmentHealthStatus_Values returns all elements of the EnvironmentHealthStatus enum +func EnvironmentHealthStatus_Values() []string { + return []string{ + EnvironmentHealthStatusNoData, + EnvironmentHealthStatusUnknown, + EnvironmentHealthStatusPending, + EnvironmentHealthStatusOk, + EnvironmentHealthStatusInfo, + EnvironmentHealthStatusWarning, + EnvironmentHealthStatusDegraded, + EnvironmentHealthStatusSevere, + EnvironmentHealthStatusSuspended, + } +} + const ( // EnvironmentInfoTypeTail is a EnvironmentInfoType enum value EnvironmentInfoTypeTail = "tail" @@ -11721,6 +12758,14 @@ const ( EnvironmentInfoTypeBundle = "bundle" ) +// EnvironmentInfoType_Values returns all elements of the EnvironmentInfoType enum +func EnvironmentInfoType_Values() []string { + return []string{ + EnvironmentInfoTypeTail, + EnvironmentInfoTypeBundle, + } +} + const ( // EnvironmentStatusLaunching is a EnvironmentStatus enum value EnvironmentStatusLaunching = "Launching" @@ -11738,6 +12783,17 @@ const ( EnvironmentStatusTerminated = "Terminated" ) +// EnvironmentStatus_Values returns all elements of the EnvironmentStatus enum +func EnvironmentStatus_Values() []string { + return []string{ + EnvironmentStatusLaunching, + EnvironmentStatusUpdating, + EnvironmentStatusReady, + EnvironmentStatusTerminating, + EnvironmentStatusTerminated, + } +} + const ( // EventSeverityTrace is a EventSeverity enum value EventSeverityTrace = "TRACE" @@ -11758,6 +12814,18 @@ const ( EventSeverityFatal = "FATAL" ) +// EventSeverity_Values returns all elements of the EventSeverity enum +func EventSeverity_Values() []string { + return []string{ + EventSeverityTrace, + EventSeverityDebug, + EventSeverityInfo, + EventSeverityWarn, + EventSeverityError, + EventSeverityFatal, + } +} + const ( // FailureTypeUpdateCancelled is a FailureType enum value FailureTypeUpdateCancelled = "UpdateCancelled" @@ -11781,6 +12849,19 @@ const ( FailureTypePermissionsError = "PermissionsError" ) +// FailureType_Values returns all elements of the FailureType enum +func FailureType_Values() []string { + return []string{ + FailureTypeUpdateCancelled, + FailureTypeCancellationFailed, + FailureTypeRollbackFailed, + FailureTypeRollbackSuccessful, + FailureTypeInternalFailure, + FailureTypeInvalidEnvironmentState, + FailureTypePermissionsError, + } +} + const ( // InstancesHealthAttributeHealthStatus is a InstancesHealthAttribute enum value InstancesHealthAttributeHealthStatus = "HealthStatus" @@ -11816,6 +12897,23 @@ const ( InstancesHealthAttributeAll = "All" ) +// InstancesHealthAttribute_Values returns all elements of the InstancesHealthAttribute enum +func InstancesHealthAttribute_Values() []string { + return []string{ + InstancesHealthAttributeHealthStatus, + InstancesHealthAttributeColor, + InstancesHealthAttributeCauses, + InstancesHealthAttributeApplicationMetrics, + InstancesHealthAttributeRefreshedAt, + InstancesHealthAttributeLaunchedAt, + InstancesHealthAttributeSystem, + InstancesHealthAttributeDeployment, + InstancesHealthAttributeAvailabilityZone, + InstancesHealthAttributeInstanceType, + InstancesHealthAttributeAll, + } +} + const ( // PlatformStatusCreating is a PlatformStatus enum value PlatformStatusCreating = "Creating" @@ -11833,6 +12931,17 @@ const ( PlatformStatusDeleted = "Deleted" ) +// PlatformStatus_Values returns all elements of the PlatformStatus enum +func PlatformStatus_Values() []string { + return []string{ + PlatformStatusCreating, + PlatformStatusFailed, + PlatformStatusReady, + PlatformStatusDeleting, + PlatformStatusDeleted, + } +} + const ( // SourceRepositoryCodeCommit is a SourceRepository enum value SourceRepositoryCodeCommit = "CodeCommit" @@ -11841,6 +12950,14 @@ const ( SourceRepositoryS3 = "S3" ) +// SourceRepository_Values returns all elements of the SourceRepository enum +func SourceRepository_Values() []string { + return []string{ + SourceRepositoryCodeCommit, + SourceRepositoryS3, + } +} + const ( // SourceTypeGit is a SourceType enum value SourceTypeGit = "Git" @@ -11849,6 +12966,14 @@ const ( SourceTypeZip = "Zip" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeGit, + SourceTypeZip, + } +} + const ( // ValidationSeverityError is a ValidationSeverity enum value ValidationSeverityError = "error" @@ -11856,3 +12981,11 @@ const ( // ValidationSeverityWarning is a ValidationSeverity enum value ValidationSeverityWarning = "warning" ) + +// ValidationSeverity_Values returns all elements of the ValidationSeverity enum +func ValidationSeverity_Values() []string { + return []string{ + ValidationSeverityError, + ValidationSeverityWarning, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/doc.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/doc.go index 3a1804958..9e4abcaea 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/doc.go @@ -9,8 +9,8 @@ // // For more information about this product, go to the AWS Elastic Beanstalk // (http://aws.amazon.com/elasticbeanstalk/) details page. The location of the -// latest AWS Elastic Beanstalk WSDL is http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl -// (http://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl). +// latest AWS Elastic Beanstalk WSDL is https://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl +// (https://elasticbeanstalk.s3.amazonaws.com/doc/2010-12-01/AWSElasticBeanstalk.wsdl). // To install the Software Development Kits (SDKs), Integrated Development Environment // (IDE) Toolkits, and command line tools that enable you to access the API, // go to Tools for Amazon Web Services (http://aws.amazon.com/tools/). diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go index fb7e17fad..96a82b0c5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/waiters.go new file mode 100644 index 000000000..c02c0aba9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/waiters.go @@ -0,0 +1,163 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package elasticbeanstalk + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilEnvironmentExists uses the Elastic Beanstalk API operation +// DescribeEnvironments to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ElasticBeanstalk) WaitUntilEnvironmentExists(input *DescribeEnvironmentsInput) error { + return c.WaitUntilEnvironmentExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilEnvironmentExistsWithContext is an extended version of WaitUntilEnvironmentExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) WaitUntilEnvironmentExistsWithContext(ctx aws.Context, input *DescribeEnvironmentsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilEnvironmentExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Environments[].Status", + Expected: "Ready", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Environments[].Status", + Expected: "Launching", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeEnvironmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEnvironmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilEnvironmentTerminated uses the Elastic Beanstalk API operation +// DescribeEnvironments to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ElasticBeanstalk) WaitUntilEnvironmentTerminated(input *DescribeEnvironmentsInput) error { + return c.WaitUntilEnvironmentTerminatedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilEnvironmentTerminatedWithContext is an extended version of WaitUntilEnvironmentTerminated. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) WaitUntilEnvironmentTerminatedWithContext(ctx aws.Context, input *DescribeEnvironmentsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilEnvironmentTerminated", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Environments[].Status", + Expected: "Terminated", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Environments[].Status", + Expected: "Terminating", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeEnvironmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEnvironmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilEnvironmentUpdated uses the Elastic Beanstalk API operation +// DescribeEnvironments to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *ElasticBeanstalk) WaitUntilEnvironmentUpdated(input *DescribeEnvironmentsInput) error { + return c.WaitUntilEnvironmentUpdatedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilEnvironmentUpdatedWithContext is an extended version of WaitUntilEnvironmentUpdated. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticBeanstalk) WaitUntilEnvironmentUpdatedWithContext(ctx aws.Context, input *DescribeEnvironmentsInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilEnvironmentUpdated", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Environments[].Status", + Expected: "Ready", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathAllWaiterMatch, Argument: "Environments[].Status", + Expected: "Updating", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeEnvironmentsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEnvironmentsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go index 3acde019b..9511ad489 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -13,6 +13,92 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opAcceptInboundCrossClusterSearchConnection = "AcceptInboundCrossClusterSearchConnection" + +// AcceptInboundCrossClusterSearchConnectionRequest generates a "aws/request.Request" representing the +// client's request for the AcceptInboundCrossClusterSearchConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AcceptInboundCrossClusterSearchConnection for more information on using the AcceptInboundCrossClusterSearchConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AcceptInboundCrossClusterSearchConnectionRequest method. +// req, resp := client.AcceptInboundCrossClusterSearchConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) AcceptInboundCrossClusterSearchConnectionRequest(input *AcceptInboundCrossClusterSearchConnectionInput) (req *request.Request, output *AcceptInboundCrossClusterSearchConnectionOutput) { + op := &request.Operation{ + Name: opAcceptInboundCrossClusterSearchConnection, + HTTPMethod: "PUT", + HTTPPath: "/2015-01-01/es/ccs/inboundConnection/{ConnectionId}/accept", + } + + if input == nil { + input = &AcceptInboundCrossClusterSearchConnectionInput{} + } + + output = &AcceptInboundCrossClusterSearchConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// AcceptInboundCrossClusterSearchConnection API operation for Amazon Elasticsearch Service. +// +// Allows the destination domain owner to accept an inbound cross-cluster search +// connection request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation AcceptInboundCrossClusterSearchConnection for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. +// +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// +func (c *ElasticsearchService) AcceptInboundCrossClusterSearchConnection(input *AcceptInboundCrossClusterSearchConnectionInput) (*AcceptInboundCrossClusterSearchConnectionOutput, error) { + req, out := c.AcceptInboundCrossClusterSearchConnectionRequest(input) + return out, req.Send() +} + +// AcceptInboundCrossClusterSearchConnectionWithContext is the same as AcceptInboundCrossClusterSearchConnection with the addition of +// the ability to pass a context and additional request options. +// +// See AcceptInboundCrossClusterSearchConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) AcceptInboundCrossClusterSearchConnectionWithContext(ctx aws.Context, input *AcceptInboundCrossClusterSearchConnectionInput, opts ...request.Option) (*AcceptInboundCrossClusterSearchConnectionOutput, error) { + req, out := c.AcceptInboundCrossClusterSearchConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAddTags = "AddTags" // AddTagsRequest generates a "aws/request.Request" representing the @@ -105,6 +191,103 @@ func (c *ElasticsearchService) AddTagsWithContext(ctx aws.Context, input *AddTag return out, req.Send() } +const opAssociatePackage = "AssociatePackage" + +// AssociatePackageRequest generates a "aws/request.Request" representing the +// client's request for the AssociatePackage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociatePackage for more information on using the AssociatePackage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociatePackageRequest method. +// req, resp := client.AssociatePackageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) AssociatePackageRequest(input *AssociatePackageInput) (req *request.Request, output *AssociatePackageOutput) { + op := &request.Operation{ + Name: opAssociatePackage, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/packages/associate/{PackageID}/{DomainName}", + } + + if input == nil { + input = &AssociatePackageInput{} + } + + output = &AssociatePackageOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociatePackage API operation for Amazon Elasticsearch Service. +// +// Associates a package with an Amazon ES domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation AssociatePackage for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +// * ConflictException +// An error occurred because the client attempts to remove a resource that is +// currently in use. Returns HTTP status code 409. +// +func (c *ElasticsearchService) AssociatePackage(input *AssociatePackageInput) (*AssociatePackageOutput, error) { + req, out := c.AssociatePackageRequest(input) + return out, req.Send() +} + +// AssociatePackageWithContext is the same as AssociatePackage with the addition of +// the ability to pass a context and additional request options. +// +// See AssociatePackage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) AssociatePackageWithContext(ctx aws.Context, input *AssociatePackageInput, opts ...request.Option) (*AssociatePackageOutput, error) { + req, out := c.AssociatePackageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCancelElasticsearchServiceSoftwareUpdate = "CancelElasticsearchServiceSoftwareUpdate" // CancelElasticsearchServiceSoftwareUpdateRequest generates a "aws/request.Request" representing the @@ -299,151 +482,147 @@ func (c *ElasticsearchService) CreateElasticsearchDomainWithContext(ctx aws.Cont return out, req.Send() } -const opDeleteElasticsearchDomain = "DeleteElasticsearchDomain" +const opCreateOutboundCrossClusterSearchConnection = "CreateOutboundCrossClusterSearchConnection" -// DeleteElasticsearchDomainRequest generates a "aws/request.Request" representing the -// client's request for the DeleteElasticsearchDomain operation. The "output" return +// CreateOutboundCrossClusterSearchConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateOutboundCrossClusterSearchConnection operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteElasticsearchDomain for more information on using the DeleteElasticsearchDomain +// See CreateOutboundCrossClusterSearchConnection for more information on using the CreateOutboundCrossClusterSearchConnection // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteElasticsearchDomainRequest method. -// req, resp := client.DeleteElasticsearchDomainRequest(params) +// // Example sending a request using the CreateOutboundCrossClusterSearchConnectionRequest method. +// req, resp := client.CreateOutboundCrossClusterSearchConnectionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DeleteElasticsearchDomainRequest(input *DeleteElasticsearchDomainInput) (req *request.Request, output *DeleteElasticsearchDomainOutput) { +func (c *ElasticsearchService) CreateOutboundCrossClusterSearchConnectionRequest(input *CreateOutboundCrossClusterSearchConnectionInput) (req *request.Request, output *CreateOutboundCrossClusterSearchConnectionOutput) { op := &request.Operation{ - Name: opDeleteElasticsearchDomain, - HTTPMethod: "DELETE", - HTTPPath: "/2015-01-01/es/domain/{DomainName}", + Name: opCreateOutboundCrossClusterSearchConnection, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/ccs/outboundConnection", } if input == nil { - input = &DeleteElasticsearchDomainInput{} + input = &CreateOutboundCrossClusterSearchConnectionInput{} } - output = &DeleteElasticsearchDomainOutput{} + output = &CreateOutboundCrossClusterSearchConnectionOutput{} req = c.newRequest(op, input, output) return } -// DeleteElasticsearchDomain API operation for Amazon Elasticsearch Service. +// CreateOutboundCrossClusterSearchConnection API operation for Amazon Elasticsearch Service. // -// Permanently deletes the specified Elasticsearch domain and all of its data. -// Once a domain is deleted, it cannot be recovered. +// Creates a new cross-cluster search connection from a source domain to a destination +// domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DeleteElasticsearchDomain for usage and error information. +// API operation CreateOutboundCrossClusterSearchConnection for usage and error information. // // Returned Error Types: -// * BaseException -// An error occurred while processing the request. +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. // // * InternalException // The request processing has failed because of an unknown error, exception // or failure (the failure is internal to the service) . Gives http status code // of 500. // -// * ResourceNotFoundException -// An exception for accessing or deleting a resource that does not exist. Gives -// http status code of 400. +// * ResourceAlreadyExistsException +// An exception for creating a resource that already exists. Gives http status +// code of 400. // -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. // -func (c *ElasticsearchService) DeleteElasticsearchDomain(input *DeleteElasticsearchDomainInput) (*DeleteElasticsearchDomainOutput, error) { - req, out := c.DeleteElasticsearchDomainRequest(input) +func (c *ElasticsearchService) CreateOutboundCrossClusterSearchConnection(input *CreateOutboundCrossClusterSearchConnectionInput) (*CreateOutboundCrossClusterSearchConnectionOutput, error) { + req, out := c.CreateOutboundCrossClusterSearchConnectionRequest(input) return out, req.Send() } -// DeleteElasticsearchDomainWithContext is the same as DeleteElasticsearchDomain with the addition of +// CreateOutboundCrossClusterSearchConnectionWithContext is the same as CreateOutboundCrossClusterSearchConnection with the addition of // the ability to pass a context and additional request options. // -// See DeleteElasticsearchDomain for details on how to use this API operation. +// See CreateOutboundCrossClusterSearchConnection for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DeleteElasticsearchDomainWithContext(ctx aws.Context, input *DeleteElasticsearchDomainInput, opts ...request.Option) (*DeleteElasticsearchDomainOutput, error) { - req, out := c.DeleteElasticsearchDomainRequest(input) +func (c *ElasticsearchService) CreateOutboundCrossClusterSearchConnectionWithContext(ctx aws.Context, input *CreateOutboundCrossClusterSearchConnectionInput, opts ...request.Option) (*CreateOutboundCrossClusterSearchConnectionOutput, error) { + req, out := c.CreateOutboundCrossClusterSearchConnectionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteElasticsearchServiceRole = "DeleteElasticsearchServiceRole" +const opCreatePackage = "CreatePackage" -// DeleteElasticsearchServiceRoleRequest generates a "aws/request.Request" representing the -// client's request for the DeleteElasticsearchServiceRole operation. The "output" return +// CreatePackageRequest generates a "aws/request.Request" representing the +// client's request for the CreatePackage operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteElasticsearchServiceRole for more information on using the DeleteElasticsearchServiceRole +// See CreatePackage for more information on using the CreatePackage // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteElasticsearchServiceRoleRequest method. -// req, resp := client.DeleteElasticsearchServiceRoleRequest(params) +// // Example sending a request using the CreatePackageRequest method. +// req, resp := client.CreatePackageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DeleteElasticsearchServiceRoleRequest(input *DeleteElasticsearchServiceRoleInput) (req *request.Request, output *DeleteElasticsearchServiceRoleOutput) { +func (c *ElasticsearchService) CreatePackageRequest(input *CreatePackageInput) (req *request.Request, output *CreatePackageOutput) { op := &request.Operation{ - Name: opDeleteElasticsearchServiceRole, - HTTPMethod: "DELETE", - HTTPPath: "/2015-01-01/es/role", + Name: opCreatePackage, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/packages", } if input == nil { - input = &DeleteElasticsearchServiceRoleInput{} + input = &CreatePackageInput{} } - output = &DeleteElasticsearchServiceRoleOutput{} + output = &CreatePackageOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteElasticsearchServiceRole API operation for Amazon Elasticsearch Service. +// CreatePackage API operation for Amazon Elasticsearch Service. // -// Deletes the service-linked role that Elasticsearch Service uses to manage -// and maintain VPC domains. Role deletion will fail if any existing VPC domains -// use the role. You must delete any such Elasticsearch domains before deleting -// the role. See Deleting Elasticsearch Service Role (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-enabling-slr) -// in VPC Endpoints for Amazon Elasticsearch Service Domains. +// Create a package for use with Amazon ES domains. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DeleteElasticsearchServiceRole for usage and error information. +// API operation CreatePackage for usage and error information. // // Returned Error Types: // * BaseException @@ -454,82 +633,98 @@ func (c *ElasticsearchService) DeleteElasticsearchServiceRoleRequest(input *Dele // or failure (the failure is internal to the service) . Gives http status code // of 500. // +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. +// +// * InvalidTypeException +// An exception for trying to create or access sub-resource that is either invalid +// or not supported. Gives http status code of 409. +// +// * ResourceAlreadyExistsException +// An exception for creating a resource that already exists. Gives http status +// code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -func (c *ElasticsearchService) DeleteElasticsearchServiceRole(input *DeleteElasticsearchServiceRoleInput) (*DeleteElasticsearchServiceRoleOutput, error) { - req, out := c.DeleteElasticsearchServiceRoleRequest(input) +func (c *ElasticsearchService) CreatePackage(input *CreatePackageInput) (*CreatePackageOutput, error) { + req, out := c.CreatePackageRequest(input) return out, req.Send() } -// DeleteElasticsearchServiceRoleWithContext is the same as DeleteElasticsearchServiceRole with the addition of +// CreatePackageWithContext is the same as CreatePackage with the addition of // the ability to pass a context and additional request options. // -// See DeleteElasticsearchServiceRole for details on how to use this API operation. +// See CreatePackage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DeleteElasticsearchServiceRoleWithContext(ctx aws.Context, input *DeleteElasticsearchServiceRoleInput, opts ...request.Option) (*DeleteElasticsearchServiceRoleOutput, error) { - req, out := c.DeleteElasticsearchServiceRoleRequest(input) +func (c *ElasticsearchService) CreatePackageWithContext(ctx aws.Context, input *CreatePackageInput, opts ...request.Option) (*CreatePackageOutput, error) { + req, out := c.CreatePackageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" +const opDeleteElasticsearchDomain = "DeleteElasticsearchDomain" -// DescribeElasticsearchDomainRequest generates a "aws/request.Request" representing the -// client's request for the DescribeElasticsearchDomain operation. The "output" return +// DeleteElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteElasticsearchDomain operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeElasticsearchDomain for more information on using the DescribeElasticsearchDomain +// See DeleteElasticsearchDomain for more information on using the DeleteElasticsearchDomain // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeElasticsearchDomainRequest method. -// req, resp := client.DescribeElasticsearchDomainRequest(params) +// // Example sending a request using the DeleteElasticsearchDomainRequest method. +// req, resp := client.DeleteElasticsearchDomainRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *DescribeElasticsearchDomainInput) (req *request.Request, output *DescribeElasticsearchDomainOutput) { +func (c *ElasticsearchService) DeleteElasticsearchDomainRequest(input *DeleteElasticsearchDomainInput) (req *request.Request, output *DeleteElasticsearchDomainOutput) { op := &request.Operation{ - Name: opDescribeElasticsearchDomain, - HTTPMethod: "GET", + Name: opDeleteElasticsearchDomain, + HTTPMethod: "DELETE", HTTPPath: "/2015-01-01/es/domain/{DomainName}", } if input == nil { - input = &DescribeElasticsearchDomainInput{} + input = &DeleteElasticsearchDomainInput{} } - output = &DescribeElasticsearchDomainOutput{} + output = &DeleteElasticsearchDomainOutput{} req = c.newRequest(op, input, output) return } -// DescribeElasticsearchDomain API operation for Amazon Elasticsearch Service. +// DeleteElasticsearchDomain API operation for Amazon Elasticsearch Service. // -// Returns domain configuration information about the specified Elasticsearch -// domain, including the domain ID, domain endpoint, and domain ARN. +// Permanently deletes the specified Elasticsearch domain and all of its data. +// Once a domain is deleted, it cannot be recovered. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DescribeElasticsearchDomain for usage and error information. +// API operation DeleteElasticsearchDomain for usage and error information. // // Returned Error Types: // * BaseException @@ -548,79 +743,82 @@ func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *Describ // An exception for missing / invalid input fields. Gives http status code of // 400. // -func (c *ElasticsearchService) DescribeElasticsearchDomain(input *DescribeElasticsearchDomainInput) (*DescribeElasticsearchDomainOutput, error) { - req, out := c.DescribeElasticsearchDomainRequest(input) +func (c *ElasticsearchService) DeleteElasticsearchDomain(input *DeleteElasticsearchDomainInput) (*DeleteElasticsearchDomainOutput, error) { + req, out := c.DeleteElasticsearchDomainRequest(input) return out, req.Send() } -// DescribeElasticsearchDomainWithContext is the same as DescribeElasticsearchDomain with the addition of +// DeleteElasticsearchDomainWithContext is the same as DeleteElasticsearchDomain with the addition of // the ability to pass a context and additional request options. // -// See DescribeElasticsearchDomain for details on how to use this API operation. +// See DeleteElasticsearchDomain for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DescribeElasticsearchDomainWithContext(ctx aws.Context, input *DescribeElasticsearchDomainInput, opts ...request.Option) (*DescribeElasticsearchDomainOutput, error) { - req, out := c.DescribeElasticsearchDomainRequest(input) +func (c *ElasticsearchService) DeleteElasticsearchDomainWithContext(ctx aws.Context, input *DeleteElasticsearchDomainInput, opts ...request.Option) (*DeleteElasticsearchDomainOutput, error) { + req, out := c.DeleteElasticsearchDomainRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeElasticsearchDomainConfig = "DescribeElasticsearchDomainConfig" +const opDeleteElasticsearchServiceRole = "DeleteElasticsearchServiceRole" -// DescribeElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the -// client's request for the DescribeElasticsearchDomainConfig operation. The "output" return +// DeleteElasticsearchServiceRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteElasticsearchServiceRole operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeElasticsearchDomainConfig for more information on using the DescribeElasticsearchDomainConfig +// See DeleteElasticsearchServiceRole for more information on using the DeleteElasticsearchServiceRole // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeElasticsearchDomainConfigRequest method. -// req, resp := client.DescribeElasticsearchDomainConfigRequest(params) +// // Example sending a request using the DeleteElasticsearchServiceRoleRequest method. +// req, resp := client.DeleteElasticsearchServiceRoleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *DescribeElasticsearchDomainConfigInput) (req *request.Request, output *DescribeElasticsearchDomainConfigOutput) { +func (c *ElasticsearchService) DeleteElasticsearchServiceRoleRequest(input *DeleteElasticsearchServiceRoleInput) (req *request.Request, output *DeleteElasticsearchServiceRoleOutput) { op := &request.Operation{ - Name: opDescribeElasticsearchDomainConfig, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + Name: opDeleteElasticsearchServiceRole, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/role", } if input == nil { - input = &DescribeElasticsearchDomainConfigInput{} + input = &DeleteElasticsearchServiceRoleInput{} } - output = &DescribeElasticsearchDomainConfigOutput{} + output = &DeleteElasticsearchServiceRoleOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DescribeElasticsearchDomainConfig API operation for Amazon Elasticsearch Service. +// DeleteElasticsearchServiceRole API operation for Amazon Elasticsearch Service. // -// Provides cluster configuration information about the specified Elasticsearch -// domain, such as the state, creation date, update version, and update date -// for cluster options. +// Deletes the service-linked role that Elasticsearch Service uses to manage +// and maintain VPC domains. Role deletion will fail if any existing VPC domains +// use the role. You must delete any such Elasticsearch domains before deleting +// the role. See Deleting Elasticsearch Service Role (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-enabling-slr) +// in VPC Endpoints for Amazon Elasticsearch Service Domains. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DescribeElasticsearchDomainConfig for usage and error information. +// API operation DeleteElasticsearchServiceRole for usage and error information. // // Returned Error Types: // * BaseException @@ -631,967 +829,827 @@ func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *D // or failure (the failure is internal to the service) . Gives http status code // of 500. // -// * ResourceNotFoundException -// An exception for accessing or deleting a resource that does not exist. Gives -// http status code of 400. -// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -func (c *ElasticsearchService) DescribeElasticsearchDomainConfig(input *DescribeElasticsearchDomainConfigInput) (*DescribeElasticsearchDomainConfigOutput, error) { - req, out := c.DescribeElasticsearchDomainConfigRequest(input) +func (c *ElasticsearchService) DeleteElasticsearchServiceRole(input *DeleteElasticsearchServiceRoleInput) (*DeleteElasticsearchServiceRoleOutput, error) { + req, out := c.DeleteElasticsearchServiceRoleRequest(input) return out, req.Send() } -// DescribeElasticsearchDomainConfigWithContext is the same as DescribeElasticsearchDomainConfig with the addition of +// DeleteElasticsearchServiceRoleWithContext is the same as DeleteElasticsearchServiceRole with the addition of // the ability to pass a context and additional request options. // -// See DescribeElasticsearchDomainConfig for details on how to use this API operation. +// See DeleteElasticsearchServiceRole for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DescribeElasticsearchDomainConfigWithContext(ctx aws.Context, input *DescribeElasticsearchDomainConfigInput, opts ...request.Option) (*DescribeElasticsearchDomainConfigOutput, error) { - req, out := c.DescribeElasticsearchDomainConfigRequest(input) +func (c *ElasticsearchService) DeleteElasticsearchServiceRoleWithContext(ctx aws.Context, input *DeleteElasticsearchServiceRoleInput, opts ...request.Option) (*DeleteElasticsearchServiceRoleOutput, error) { + req, out := c.DeleteElasticsearchServiceRoleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeElasticsearchDomains = "DescribeElasticsearchDomains" +const opDeleteInboundCrossClusterSearchConnection = "DeleteInboundCrossClusterSearchConnection" -// DescribeElasticsearchDomainsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeElasticsearchDomains operation. The "output" return +// DeleteInboundCrossClusterSearchConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInboundCrossClusterSearchConnection operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeElasticsearchDomains for more information on using the DescribeElasticsearchDomains +// See DeleteInboundCrossClusterSearchConnection for more information on using the DeleteInboundCrossClusterSearchConnection // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeElasticsearchDomainsRequest method. -// req, resp := client.DescribeElasticsearchDomainsRequest(params) +// // Example sending a request using the DeleteInboundCrossClusterSearchConnectionRequest method. +// req, resp := client.DeleteInboundCrossClusterSearchConnectionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DescribeElasticsearchDomainsRequest(input *DescribeElasticsearchDomainsInput) (req *request.Request, output *DescribeElasticsearchDomainsOutput) { +func (c *ElasticsearchService) DeleteInboundCrossClusterSearchConnectionRequest(input *DeleteInboundCrossClusterSearchConnectionInput) (req *request.Request, output *DeleteInboundCrossClusterSearchConnectionOutput) { op := &request.Operation{ - Name: opDescribeElasticsearchDomains, - HTTPMethod: "POST", - HTTPPath: "/2015-01-01/es/domain-info", + Name: opDeleteInboundCrossClusterSearchConnection, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/ccs/inboundConnection/{ConnectionId}", } if input == nil { - input = &DescribeElasticsearchDomainsInput{} + input = &DeleteInboundCrossClusterSearchConnectionInput{} } - output = &DescribeElasticsearchDomainsOutput{} + output = &DeleteInboundCrossClusterSearchConnectionOutput{} req = c.newRequest(op, input, output) return } -// DescribeElasticsearchDomains API operation for Amazon Elasticsearch Service. +// DeleteInboundCrossClusterSearchConnection API operation for Amazon Elasticsearch Service. // -// Returns domain configuration information about the specified Elasticsearch -// domains, including the domain ID, domain endpoint, and domain ARN. +// Allows the destination domain owner to delete an existing inbound cross-cluster +// search connection. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DescribeElasticsearchDomains for usage and error information. +// API operation DeleteInboundCrossClusterSearchConnection for usage and error information. // // Returned Error Types: -// * BaseException -// An error occurred while processing the request. -// -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. // -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. // -func (c *ElasticsearchService) DescribeElasticsearchDomains(input *DescribeElasticsearchDomainsInput) (*DescribeElasticsearchDomainsOutput, error) { - req, out := c.DescribeElasticsearchDomainsRequest(input) +func (c *ElasticsearchService) DeleteInboundCrossClusterSearchConnection(input *DeleteInboundCrossClusterSearchConnectionInput) (*DeleteInboundCrossClusterSearchConnectionOutput, error) { + req, out := c.DeleteInboundCrossClusterSearchConnectionRequest(input) return out, req.Send() } -// DescribeElasticsearchDomainsWithContext is the same as DescribeElasticsearchDomains with the addition of +// DeleteInboundCrossClusterSearchConnectionWithContext is the same as DeleteInboundCrossClusterSearchConnection with the addition of // the ability to pass a context and additional request options. // -// See DescribeElasticsearchDomains for details on how to use this API operation. +// See DeleteInboundCrossClusterSearchConnection for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DescribeElasticsearchDomainsWithContext(ctx aws.Context, input *DescribeElasticsearchDomainsInput, opts ...request.Option) (*DescribeElasticsearchDomainsOutput, error) { - req, out := c.DescribeElasticsearchDomainsRequest(input) +func (c *ElasticsearchService) DeleteInboundCrossClusterSearchConnectionWithContext(ctx aws.Context, input *DeleteInboundCrossClusterSearchConnectionInput, opts ...request.Option) (*DeleteInboundCrossClusterSearchConnectionOutput, error) { + req, out := c.DeleteInboundCrossClusterSearchConnectionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeElasticsearchInstanceTypeLimits = "DescribeElasticsearchInstanceTypeLimits" +const opDeleteOutboundCrossClusterSearchConnection = "DeleteOutboundCrossClusterSearchConnection" -// DescribeElasticsearchInstanceTypeLimitsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeElasticsearchInstanceTypeLimits operation. The "output" return +// DeleteOutboundCrossClusterSearchConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOutboundCrossClusterSearchConnection operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeElasticsearchInstanceTypeLimits for more information on using the DescribeElasticsearchInstanceTypeLimits +// See DeleteOutboundCrossClusterSearchConnection for more information on using the DeleteOutboundCrossClusterSearchConnection // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeElasticsearchInstanceTypeLimitsRequest method. -// req, resp := client.DescribeElasticsearchInstanceTypeLimitsRequest(params) +// // Example sending a request using the DeleteOutboundCrossClusterSearchConnectionRequest method. +// req, resp := client.DeleteOutboundCrossClusterSearchConnectionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DescribeElasticsearchInstanceTypeLimitsRequest(input *DescribeElasticsearchInstanceTypeLimitsInput) (req *request.Request, output *DescribeElasticsearchInstanceTypeLimitsOutput) { +func (c *ElasticsearchService) DeleteOutboundCrossClusterSearchConnectionRequest(input *DeleteOutboundCrossClusterSearchConnectionInput) (req *request.Request, output *DeleteOutboundCrossClusterSearchConnectionOutput) { op := &request.Operation{ - Name: opDescribeElasticsearchInstanceTypeLimits, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/instanceTypeLimits/{ElasticsearchVersion}/{InstanceType}", + Name: opDeleteOutboundCrossClusterSearchConnection, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/es/ccs/outboundConnection/{ConnectionId}", } if input == nil { - input = &DescribeElasticsearchInstanceTypeLimitsInput{} + input = &DeleteOutboundCrossClusterSearchConnectionInput{} } - output = &DescribeElasticsearchInstanceTypeLimitsOutput{} + output = &DeleteOutboundCrossClusterSearchConnectionOutput{} req = c.newRequest(op, input, output) return } -// DescribeElasticsearchInstanceTypeLimits API operation for Amazon Elasticsearch Service. +// DeleteOutboundCrossClusterSearchConnection API operation for Amazon Elasticsearch Service. // -// Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. -// When modifying existing Domain, specify the DomainName to know what Limits -// are supported for modifying. +// Allows the source domain owner to delete an existing outbound cross-cluster +// search connection. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DescribeElasticsearchInstanceTypeLimits for usage and error information. +// API operation DeleteOutboundCrossClusterSearchConnection for usage and error information. // // Returned Error Types: -// * BaseException -// An error occurred while processing the request. -// -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. -// -// * InvalidTypeException -// An exception for trying to create or access sub-resource that is either invalid -// or not supported. Gives http status code of 409. -// -// * LimitExceededException -// An exception for trying to create more than allowed resources or sub-resources. -// Gives http status code of 409. -// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. // -func (c *ElasticsearchService) DescribeElasticsearchInstanceTypeLimits(input *DescribeElasticsearchInstanceTypeLimitsInput) (*DescribeElasticsearchInstanceTypeLimitsOutput, error) { - req, out := c.DescribeElasticsearchInstanceTypeLimitsRequest(input) +func (c *ElasticsearchService) DeleteOutboundCrossClusterSearchConnection(input *DeleteOutboundCrossClusterSearchConnectionInput) (*DeleteOutboundCrossClusterSearchConnectionOutput, error) { + req, out := c.DeleteOutboundCrossClusterSearchConnectionRequest(input) return out, req.Send() } -// DescribeElasticsearchInstanceTypeLimitsWithContext is the same as DescribeElasticsearchInstanceTypeLimits with the addition of +// DeleteOutboundCrossClusterSearchConnectionWithContext is the same as DeleteOutboundCrossClusterSearchConnection with the addition of // the ability to pass a context and additional request options. // -// See DescribeElasticsearchInstanceTypeLimits for details on how to use this API operation. +// See DeleteOutboundCrossClusterSearchConnection for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DescribeElasticsearchInstanceTypeLimitsWithContext(ctx aws.Context, input *DescribeElasticsearchInstanceTypeLimitsInput, opts ...request.Option) (*DescribeElasticsearchInstanceTypeLimitsOutput, error) { - req, out := c.DescribeElasticsearchInstanceTypeLimitsRequest(input) +func (c *ElasticsearchService) DeleteOutboundCrossClusterSearchConnectionWithContext(ctx aws.Context, input *DeleteOutboundCrossClusterSearchConnectionInput, opts ...request.Option) (*DeleteOutboundCrossClusterSearchConnectionOutput, error) { + req, out := c.DeleteOutboundCrossClusterSearchConnectionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeReservedElasticsearchInstanceOfferings = "DescribeReservedElasticsearchInstanceOfferings" +const opDeletePackage = "DeletePackage" -// DescribeReservedElasticsearchInstanceOfferingsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReservedElasticsearchInstanceOfferings operation. The "output" return +// DeletePackageRequest generates a "aws/request.Request" representing the +// client's request for the DeletePackage operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReservedElasticsearchInstanceOfferings for more information on using the DescribeReservedElasticsearchInstanceOfferings +// See DeletePackage for more information on using the DeletePackage // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReservedElasticsearchInstanceOfferingsRequest method. -// req, resp := client.DescribeReservedElasticsearchInstanceOfferingsRequest(params) +// // Example sending a request using the DeletePackageRequest method. +// req, resp := client.DeletePackageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsRequest(input *DescribeReservedElasticsearchInstanceOfferingsInput) (req *request.Request, output *DescribeReservedElasticsearchInstanceOfferingsOutput) { +func (c *ElasticsearchService) DeletePackageRequest(input *DeletePackageInput) (req *request.Request, output *DeletePackageOutput) { op := &request.Operation{ - Name: opDescribeReservedElasticsearchInstanceOfferings, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/reservedInstanceOfferings", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + Name: opDeletePackage, + HTTPMethod: "DELETE", + HTTPPath: "/2015-01-01/packages/{PackageID}", } if input == nil { - input = &DescribeReservedElasticsearchInstanceOfferingsInput{} + input = &DeletePackageInput{} } - output = &DescribeReservedElasticsearchInstanceOfferingsOutput{} + output = &DeletePackageOutput{} req = c.newRequest(op, input, output) return } -// DescribeReservedElasticsearchInstanceOfferings API operation for Amazon Elasticsearch Service. +// DeletePackage API operation for Amazon Elasticsearch Service. // -// Lists available reserved Elasticsearch instance offerings. +// Delete the package. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DescribeReservedElasticsearchInstanceOfferings for usage and error information. +// API operation DeletePackage for usage and error information. // // Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -// * DisabledOperationException -// An error occured because the client wanted to access a not supported operation. -// Gives http status code of 409. -// -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. +// * ConflictException +// An error occurred because the client attempts to remove a resource that is +// currently in use. Returns HTTP status code 409. // -func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferings(input *DescribeReservedElasticsearchInstanceOfferingsInput) (*DescribeReservedElasticsearchInstanceOfferingsOutput, error) { - req, out := c.DescribeReservedElasticsearchInstanceOfferingsRequest(input) +func (c *ElasticsearchService) DeletePackage(input *DeletePackageInput) (*DeletePackageOutput, error) { + req, out := c.DeletePackageRequest(input) return out, req.Send() } -// DescribeReservedElasticsearchInstanceOfferingsWithContext is the same as DescribeReservedElasticsearchInstanceOfferings with the addition of +// DeletePackageWithContext is the same as DeletePackage with the addition of // the ability to pass a context and additional request options. // -// See DescribeReservedElasticsearchInstanceOfferings for details on how to use this API operation. +// See DeletePackage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstanceOfferingsInput, opts ...request.Option) (*DescribeReservedElasticsearchInstanceOfferingsOutput, error) { - req, out := c.DescribeReservedElasticsearchInstanceOfferingsRequest(input) +func (c *ElasticsearchService) DeletePackageWithContext(ctx aws.Context, input *DeletePackageInput, opts ...request.Option) (*DeletePackageOutput, error) { + req, out := c.DeletePackageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeReservedElasticsearchInstanceOfferingsPages iterates over the pages of a DescribeReservedElasticsearchInstanceOfferings operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeReservedElasticsearchInstanceOfferings method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeReservedElasticsearchInstanceOfferings operation. -// pageNum := 0 -// err := client.DescribeReservedElasticsearchInstanceOfferingsPages(params, -// func(page *elasticsearchservice.DescribeReservedElasticsearchInstanceOfferingsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsPages(input *DescribeReservedElasticsearchInstanceOfferingsInput, fn func(*DescribeReservedElasticsearchInstanceOfferingsOutput, bool) bool) error { - return c.DescribeReservedElasticsearchInstanceOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeReservedElasticsearchInstanceOfferingsPagesWithContext same as DescribeReservedElasticsearchInstanceOfferingsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstanceOfferingsInput, fn func(*DescribeReservedElasticsearchInstanceOfferingsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeReservedElasticsearchInstanceOfferingsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeReservedElasticsearchInstanceOfferingsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeReservedElasticsearchInstanceOfferingsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeReservedElasticsearchInstances = "DescribeReservedElasticsearchInstances" +const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" -// DescribeReservedElasticsearchInstancesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeReservedElasticsearchInstances operation. The "output" return +// DescribeElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomain operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeReservedElasticsearchInstances for more information on using the DescribeReservedElasticsearchInstances +// See DescribeElasticsearchDomain for more information on using the DescribeElasticsearchDomain // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeReservedElasticsearchInstancesRequest method. -// req, resp := client.DescribeReservedElasticsearchInstancesRequest(params) +// // Example sending a request using the DescribeElasticsearchDomainRequest method. +// req, resp := client.DescribeElasticsearchDomainRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesRequest(input *DescribeReservedElasticsearchInstancesInput) (req *request.Request, output *DescribeReservedElasticsearchInstancesOutput) { +func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *DescribeElasticsearchDomainInput) (req *request.Request, output *DescribeElasticsearchDomainOutput) { op := &request.Operation{ - Name: opDescribeReservedElasticsearchInstances, + Name: opDescribeElasticsearchDomain, HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/reservedInstances", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/2015-01-01/es/domain/{DomainName}", } if input == nil { - input = &DescribeReservedElasticsearchInstancesInput{} + input = &DescribeElasticsearchDomainInput{} } - output = &DescribeReservedElasticsearchInstancesOutput{} + output = &DescribeElasticsearchDomainOutput{} req = c.newRequest(op, input, output) return } -// DescribeReservedElasticsearchInstances API operation for Amazon Elasticsearch Service. +// DescribeElasticsearchDomain API operation for Amazon Elasticsearch Service. // -// Returns information about reserved Elasticsearch instances for this account. +// Returns domain configuration information about the specified Elasticsearch +// domain, including the domain ID, domain endpoint, and domain ARN. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation DescribeReservedElasticsearchInstances for usage and error information. +// API operation DescribeElasticsearchDomain for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// An exception for accessing or deleting a resource that does not exist. Gives -// http status code of 400. +// * BaseException +// An error occurred while processing the request. // // * InternalException // The request processing has failed because of an unknown error, exception // or failure (the failure is internal to the service) . Gives http status code // of 500. // +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -// * DisabledOperationException -// An error occured because the client wanted to access a not supported operation. -// Gives http status code of 409. -// -func (c *ElasticsearchService) DescribeReservedElasticsearchInstances(input *DescribeReservedElasticsearchInstancesInput) (*DescribeReservedElasticsearchInstancesOutput, error) { - req, out := c.DescribeReservedElasticsearchInstancesRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchDomain(input *DescribeElasticsearchDomainInput) (*DescribeElasticsearchDomainOutput, error) { + req, out := c.DescribeElasticsearchDomainRequest(input) return out, req.Send() } -// DescribeReservedElasticsearchInstancesWithContext is the same as DescribeReservedElasticsearchInstances with the addition of +// DescribeElasticsearchDomainWithContext is the same as DescribeElasticsearchDomain with the addition of // the ability to pass a context and additional request options. // -// See DescribeReservedElasticsearchInstances for details on how to use this API operation. +// See DescribeElasticsearchDomain for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstancesInput, opts ...request.Option) (*DescribeReservedElasticsearchInstancesOutput, error) { - req, out := c.DescribeReservedElasticsearchInstancesRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchDomainWithContext(ctx aws.Context, input *DescribeElasticsearchDomainInput, opts ...request.Option) (*DescribeElasticsearchDomainOutput, error) { + req, out := c.DescribeElasticsearchDomainRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// DescribeReservedElasticsearchInstancesPages iterates over the pages of a DescribeReservedElasticsearchInstances operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeReservedElasticsearchInstances method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeReservedElasticsearchInstances operation. -// pageNum := 0 -// err := client.DescribeReservedElasticsearchInstancesPages(params, -// func(page *elasticsearchservice.DescribeReservedElasticsearchInstancesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesPages(input *DescribeReservedElasticsearchInstancesInput, fn func(*DescribeReservedElasticsearchInstancesOutput, bool) bool) error { - return c.DescribeReservedElasticsearchInstancesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeReservedElasticsearchInstancesPagesWithContext same as DescribeReservedElasticsearchInstancesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesPagesWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstancesInput, fn func(*DescribeReservedElasticsearchInstancesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeReservedElasticsearchInstancesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeReservedElasticsearchInstancesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeReservedElasticsearchInstancesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opGetCompatibleElasticsearchVersions = "GetCompatibleElasticsearchVersions" +const opDescribeElasticsearchDomainConfig = "DescribeElasticsearchDomainConfig" -// GetCompatibleElasticsearchVersionsRequest generates a "aws/request.Request" representing the -// client's request for the GetCompatibleElasticsearchVersions operation. The "output" return +// DescribeElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomainConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCompatibleElasticsearchVersions for more information on using the GetCompatibleElasticsearchVersions +// See DescribeElasticsearchDomainConfig for more information on using the DescribeElasticsearchDomainConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCompatibleElasticsearchVersionsRequest method. -// req, resp := client.GetCompatibleElasticsearchVersionsRequest(params) +// // Example sending a request using the DescribeElasticsearchDomainConfigRequest method. +// req, resp := client.DescribeElasticsearchDomainConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) GetCompatibleElasticsearchVersionsRequest(input *GetCompatibleElasticsearchVersionsInput) (req *request.Request, output *GetCompatibleElasticsearchVersionsOutput) { +func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *DescribeElasticsearchDomainConfigInput) (req *request.Request, output *DescribeElasticsearchDomainConfigOutput) { op := &request.Operation{ - Name: opGetCompatibleElasticsearchVersions, + Name: opDescribeElasticsearchDomainConfig, HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/compatibleVersions", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", } if input == nil { - input = &GetCompatibleElasticsearchVersionsInput{} + input = &DescribeElasticsearchDomainConfigInput{} } - output = &GetCompatibleElasticsearchVersionsOutput{} + output = &DescribeElasticsearchDomainConfigOutput{} req = c.newRequest(op, input, output) return } -// GetCompatibleElasticsearchVersions API operation for Amazon Elasticsearch Service. +// DescribeElasticsearchDomainConfig API operation for Amazon Elasticsearch Service. // -// Returns a list of upgrade compatible Elastisearch versions. You can optionally -// pass a DomainName to get all upgrade compatible Elasticsearch versions for -// that specific domain. +// Provides cluster configuration information about the specified Elasticsearch +// domain, such as the state, creation date, update version, and update date +// for cluster options. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation GetCompatibleElasticsearchVersions for usage and error information. +// API operation DescribeElasticsearchDomainConfig for usage and error information. // // Returned Error Types: // * BaseException // An error occurred while processing the request. // +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // -// * DisabledOperationException -// An error occured because the client wanted to access a not supported operation. -// Gives http status code of 409. -// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. -// -func (c *ElasticsearchService) GetCompatibleElasticsearchVersions(input *GetCompatibleElasticsearchVersionsInput) (*GetCompatibleElasticsearchVersionsOutput, error) { - req, out := c.GetCompatibleElasticsearchVersionsRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchDomainConfig(input *DescribeElasticsearchDomainConfigInput) (*DescribeElasticsearchDomainConfigOutput, error) { + req, out := c.DescribeElasticsearchDomainConfigRequest(input) return out, req.Send() } -// GetCompatibleElasticsearchVersionsWithContext is the same as GetCompatibleElasticsearchVersions with the addition of +// DescribeElasticsearchDomainConfigWithContext is the same as DescribeElasticsearchDomainConfig with the addition of // the ability to pass a context and additional request options. // -// See GetCompatibleElasticsearchVersions for details on how to use this API operation. +// See DescribeElasticsearchDomainConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) GetCompatibleElasticsearchVersionsWithContext(ctx aws.Context, input *GetCompatibleElasticsearchVersionsInput, opts ...request.Option) (*GetCompatibleElasticsearchVersionsOutput, error) { - req, out := c.GetCompatibleElasticsearchVersionsRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchDomainConfigWithContext(ctx aws.Context, input *DescribeElasticsearchDomainConfigInput, opts ...request.Option) (*DescribeElasticsearchDomainConfigOutput, error) { + req, out := c.DescribeElasticsearchDomainConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetUpgradeHistory = "GetUpgradeHistory" +const opDescribeElasticsearchDomains = "DescribeElasticsearchDomains" -// GetUpgradeHistoryRequest generates a "aws/request.Request" representing the -// client's request for the GetUpgradeHistory operation. The "output" return +// DescribeElasticsearchDomainsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomains operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetUpgradeHistory for more information on using the GetUpgradeHistory +// See DescribeElasticsearchDomains for more information on using the DescribeElasticsearchDomains // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetUpgradeHistoryRequest method. -// req, resp := client.GetUpgradeHistoryRequest(params) +// // Example sending a request using the DescribeElasticsearchDomainsRequest method. +// req, resp := client.DescribeElasticsearchDomainsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) GetUpgradeHistoryRequest(input *GetUpgradeHistoryInput) (req *request.Request, output *GetUpgradeHistoryOutput) { +func (c *ElasticsearchService) DescribeElasticsearchDomainsRequest(input *DescribeElasticsearchDomainsInput) (req *request.Request, output *DescribeElasticsearchDomainsOutput) { op := &request.Operation{ - Name: opGetUpgradeHistory, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/upgradeDomain/{DomainName}/history", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + Name: opDescribeElasticsearchDomains, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain-info", } if input == nil { - input = &GetUpgradeHistoryInput{} + input = &DescribeElasticsearchDomainsInput{} } - output = &GetUpgradeHistoryOutput{} + output = &DescribeElasticsearchDomainsOutput{} req = c.newRequest(op, input, output) return } -// GetUpgradeHistory API operation for Amazon Elasticsearch Service. +// DescribeElasticsearchDomains API operation for Amazon Elasticsearch Service. // -// Retrieves the complete history of the last 10 upgrades that were performed -// on the domain. +// Returns domain configuration information about the specified Elasticsearch +// domains, including the domain ID, domain endpoint, and domain ARN. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation GetUpgradeHistory for usage and error information. +// API operation DescribeElasticsearchDomains for usage and error information. // // Returned Error Types: // * BaseException // An error occurred while processing the request. // -// * ResourceNotFoundException -// An exception for accessing or deleting a resource that does not exist. Gives -// http status code of 400. -// -// * DisabledOperationException -// An error occured because the client wanted to access a not supported operation. -// Gives http status code of 409. -// -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. -// // * InternalException // The request processing has failed because of an unknown error, exception // or failure (the failure is internal to the service) . Gives http status code // of 500. // -func (c *ElasticsearchService) GetUpgradeHistory(input *GetUpgradeHistoryInput) (*GetUpgradeHistoryOutput, error) { - req, out := c.GetUpgradeHistoryRequest(input) +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) DescribeElasticsearchDomains(input *DescribeElasticsearchDomainsInput) (*DescribeElasticsearchDomainsOutput, error) { + req, out := c.DescribeElasticsearchDomainsRequest(input) return out, req.Send() } -// GetUpgradeHistoryWithContext is the same as GetUpgradeHistory with the addition of +// DescribeElasticsearchDomainsWithContext is the same as DescribeElasticsearchDomains with the addition of // the ability to pass a context and additional request options. // -// See GetUpgradeHistory for details on how to use this API operation. +// See DescribeElasticsearchDomains for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) GetUpgradeHistoryWithContext(ctx aws.Context, input *GetUpgradeHistoryInput, opts ...request.Option) (*GetUpgradeHistoryOutput, error) { - req, out := c.GetUpgradeHistoryRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchDomainsWithContext(ctx aws.Context, input *DescribeElasticsearchDomainsInput, opts ...request.Option) (*DescribeElasticsearchDomainsOutput, error) { + req, out := c.DescribeElasticsearchDomainsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// GetUpgradeHistoryPages iterates over the pages of a GetUpgradeHistory operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opDescribeElasticsearchInstanceTypeLimits = "DescribeElasticsearchInstanceTypeLimits" + +// DescribeElasticsearchInstanceTypeLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchInstanceTypeLimits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See GetUpgradeHistory method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See DescribeElasticsearchInstanceTypeLimits for more information on using the DescribeElasticsearchInstanceTypeLimits +// API call, and error handling. // -// // Example iterating over at most 3 pages of a GetUpgradeHistory operation. -// pageNum := 0 -// err := client.GetUpgradeHistoryPages(params, -// func(page *elasticsearchservice.GetUpgradeHistoryOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *ElasticsearchService) GetUpgradeHistoryPages(input *GetUpgradeHistoryInput, fn func(*GetUpgradeHistoryOutput, bool) bool) error { - return c.GetUpgradeHistoryPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// GetUpgradeHistoryPagesWithContext same as GetUpgradeHistoryPages except -// it takes a Context and allows setting request options on the pages. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ElasticsearchService) GetUpgradeHistoryPagesWithContext(ctx aws.Context, input *GetUpgradeHistoryInput, fn func(*GetUpgradeHistoryOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *GetUpgradeHistoryInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.GetUpgradeHistoryRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*GetUpgradeHistoryOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opGetUpgradeStatus = "GetUpgradeStatus" - -// GetUpgradeStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetUpgradeStatus operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See GetUpgradeStatus for more information on using the GetUpgradeStatus -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the GetUpgradeStatusRequest method. -// req, resp := client.GetUpgradeStatusRequest(params) +// // Example sending a request using the DescribeElasticsearchInstanceTypeLimitsRequest method. +// req, resp := client.DescribeElasticsearchInstanceTypeLimitsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) GetUpgradeStatusRequest(input *GetUpgradeStatusInput) (req *request.Request, output *GetUpgradeStatusOutput) { +func (c *ElasticsearchService) DescribeElasticsearchInstanceTypeLimitsRequest(input *DescribeElasticsearchInstanceTypeLimitsInput) (req *request.Request, output *DescribeElasticsearchInstanceTypeLimitsOutput) { op := &request.Operation{ - Name: opGetUpgradeStatus, + Name: opDescribeElasticsearchInstanceTypeLimits, HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/upgradeDomain/{DomainName}/status", + HTTPPath: "/2015-01-01/es/instanceTypeLimits/{ElasticsearchVersion}/{InstanceType}", } if input == nil { - input = &GetUpgradeStatusInput{} + input = &DescribeElasticsearchInstanceTypeLimitsInput{} } - output = &GetUpgradeStatusOutput{} + output = &DescribeElasticsearchInstanceTypeLimitsOutput{} req = c.newRequest(op, input, output) return } -// GetUpgradeStatus API operation for Amazon Elasticsearch Service. +// DescribeElasticsearchInstanceTypeLimits API operation for Amazon Elasticsearch Service. // -// Retrieves the latest status of the last upgrade or upgrade eligibility check -// that was performed on the domain. +// Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. +// When modifying existing Domain, specify the DomainName to know what Limits +// are supported for modifying. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation GetUpgradeStatus for usage and error information. +// API operation DescribeElasticsearchInstanceTypeLimits for usage and error information. // // Returned Error Types: // * BaseException // An error occurred while processing the request. // +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * InvalidTypeException +// An exception for trying to create or access sub-resource that is either invalid +// or not supported. Gives http status code of 409. +// +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. +// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // -// * DisabledOperationException -// An error occured because the client wanted to access a not supported operation. -// Gives http status code of 409. -// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. -// -func (c *ElasticsearchService) GetUpgradeStatus(input *GetUpgradeStatusInput) (*GetUpgradeStatusOutput, error) { - req, out := c.GetUpgradeStatusRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchInstanceTypeLimits(input *DescribeElasticsearchInstanceTypeLimitsInput) (*DescribeElasticsearchInstanceTypeLimitsOutput, error) { + req, out := c.DescribeElasticsearchInstanceTypeLimitsRequest(input) return out, req.Send() } -// GetUpgradeStatusWithContext is the same as GetUpgradeStatus with the addition of +// DescribeElasticsearchInstanceTypeLimitsWithContext is the same as DescribeElasticsearchInstanceTypeLimits with the addition of // the ability to pass a context and additional request options. // -// See GetUpgradeStatus for details on how to use this API operation. +// See DescribeElasticsearchInstanceTypeLimits for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) GetUpgradeStatusWithContext(ctx aws.Context, input *GetUpgradeStatusInput, opts ...request.Option) (*GetUpgradeStatusOutput, error) { - req, out := c.GetUpgradeStatusRequest(input) +func (c *ElasticsearchService) DescribeElasticsearchInstanceTypeLimitsWithContext(ctx aws.Context, input *DescribeElasticsearchInstanceTypeLimitsInput, opts ...request.Option) (*DescribeElasticsearchInstanceTypeLimitsOutput, error) { + req, out := c.DescribeElasticsearchInstanceTypeLimitsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListDomainNames = "ListDomainNames" +const opDescribeInboundCrossClusterSearchConnections = "DescribeInboundCrossClusterSearchConnections" -// ListDomainNamesRequest generates a "aws/request.Request" representing the -// client's request for the ListDomainNames operation. The "output" return +// DescribeInboundCrossClusterSearchConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInboundCrossClusterSearchConnections operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDomainNames for more information on using the ListDomainNames +// See DescribeInboundCrossClusterSearchConnections for more information on using the DescribeInboundCrossClusterSearchConnections // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDomainNamesRequest method. -// req, resp := client.ListDomainNamesRequest(params) +// // Example sending a request using the DescribeInboundCrossClusterSearchConnectionsRequest method. +// req, resp := client.DescribeInboundCrossClusterSearchConnectionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { +func (c *ElasticsearchService) DescribeInboundCrossClusterSearchConnectionsRequest(input *DescribeInboundCrossClusterSearchConnectionsInput) (req *request.Request, output *DescribeInboundCrossClusterSearchConnectionsOutput) { op := &request.Operation{ - Name: opListDomainNames, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/domain", + Name: opDescribeInboundCrossClusterSearchConnections, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/ccs/inboundConnection/search", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListDomainNamesInput{} + input = &DescribeInboundCrossClusterSearchConnectionsInput{} } - output = &ListDomainNamesOutput{} + output = &DescribeInboundCrossClusterSearchConnectionsOutput{} req = c.newRequest(op, input, output) return } -// ListDomainNames API operation for Amazon Elasticsearch Service. +// DescribeInboundCrossClusterSearchConnections API operation for Amazon Elasticsearch Service. // -// Returns the name of all Elasticsearch domains owned by the current user's -// account. +// Lists all the inbound cross-cluster search connections for a destination +// domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation ListDomainNames for usage and error information. +// API operation DescribeInboundCrossClusterSearchConnections for usage and error information. // // Returned Error Types: -// * BaseException -// An error occurred while processing the request. +// * InvalidPaginationTokenException +// The request processing has failed because of invalid pagination token provided +// by customer. Returns an HTTP status code of 400. // -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. // -func (c *ElasticsearchService) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { - req, out := c.ListDomainNamesRequest(input) +func (c *ElasticsearchService) DescribeInboundCrossClusterSearchConnections(input *DescribeInboundCrossClusterSearchConnectionsInput) (*DescribeInboundCrossClusterSearchConnectionsOutput, error) { + req, out := c.DescribeInboundCrossClusterSearchConnectionsRequest(input) return out, req.Send() } -// ListDomainNamesWithContext is the same as ListDomainNames with the addition of +// DescribeInboundCrossClusterSearchConnectionsWithContext is the same as DescribeInboundCrossClusterSearchConnections with the addition of // the ability to pass a context and additional request options. // -// See ListDomainNames for details on how to use this API operation. +// See DescribeInboundCrossClusterSearchConnections for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) ListDomainNamesWithContext(ctx aws.Context, input *ListDomainNamesInput, opts ...request.Option) (*ListDomainNamesOutput, error) { - req, out := c.ListDomainNamesRequest(input) +func (c *ElasticsearchService) DescribeInboundCrossClusterSearchConnectionsWithContext(ctx aws.Context, input *DescribeInboundCrossClusterSearchConnectionsInput, opts ...request.Option) (*DescribeInboundCrossClusterSearchConnectionsOutput, error) { + req, out := c.DescribeInboundCrossClusterSearchConnectionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListElasticsearchInstanceTypes = "ListElasticsearchInstanceTypes" +// DescribeInboundCrossClusterSearchConnectionsPages iterates over the pages of a DescribeInboundCrossClusterSearchConnections operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInboundCrossClusterSearchConnections method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInboundCrossClusterSearchConnections operation. +// pageNum := 0 +// err := client.DescribeInboundCrossClusterSearchConnectionsPages(params, +// func(page *elasticsearchservice.DescribeInboundCrossClusterSearchConnectionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) DescribeInboundCrossClusterSearchConnectionsPages(input *DescribeInboundCrossClusterSearchConnectionsInput, fn func(*DescribeInboundCrossClusterSearchConnectionsOutput, bool) bool) error { + return c.DescribeInboundCrossClusterSearchConnectionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ListElasticsearchInstanceTypesRequest generates a "aws/request.Request" representing the -// client's request for the ListElasticsearchInstanceTypes operation. The "output" return +// DescribeInboundCrossClusterSearchConnectionsPagesWithContext same as DescribeInboundCrossClusterSearchConnectionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) DescribeInboundCrossClusterSearchConnectionsPagesWithContext(ctx aws.Context, input *DescribeInboundCrossClusterSearchConnectionsInput, fn func(*DescribeInboundCrossClusterSearchConnectionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInboundCrossClusterSearchConnectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInboundCrossClusterSearchConnectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInboundCrossClusterSearchConnectionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeOutboundCrossClusterSearchConnections = "DescribeOutboundCrossClusterSearchConnections" + +// DescribeOutboundCrossClusterSearchConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOutboundCrossClusterSearchConnections operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListElasticsearchInstanceTypes for more information on using the ListElasticsearchInstanceTypes +// See DescribeOutboundCrossClusterSearchConnections for more information on using the DescribeOutboundCrossClusterSearchConnections // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListElasticsearchInstanceTypesRequest method. -// req, resp := client.ListElasticsearchInstanceTypesRequest(params) +// // Example sending a request using the DescribeOutboundCrossClusterSearchConnectionsRequest method. +// req, resp := client.DescribeOutboundCrossClusterSearchConnectionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) ListElasticsearchInstanceTypesRequest(input *ListElasticsearchInstanceTypesInput) (req *request.Request, output *ListElasticsearchInstanceTypesOutput) { +func (c *ElasticsearchService) DescribeOutboundCrossClusterSearchConnectionsRequest(input *DescribeOutboundCrossClusterSearchConnectionsInput) (req *request.Request, output *DescribeOutboundCrossClusterSearchConnectionsOutput) { op := &request.Operation{ - Name: opListElasticsearchInstanceTypes, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/instanceTypes/{ElasticsearchVersion}", + Name: opDescribeOutboundCrossClusterSearchConnections, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/ccs/outboundConnection/search", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -1601,100 +1659,92 @@ func (c *ElasticsearchService) ListElasticsearchInstanceTypesRequest(input *List } if input == nil { - input = &ListElasticsearchInstanceTypesInput{} + input = &DescribeOutboundCrossClusterSearchConnectionsInput{} } - output = &ListElasticsearchInstanceTypesOutput{} + output = &DescribeOutboundCrossClusterSearchConnectionsOutput{} req = c.newRequest(op, input, output) return } -// ListElasticsearchInstanceTypes API operation for Amazon Elasticsearch Service. +// DescribeOutboundCrossClusterSearchConnections API operation for Amazon Elasticsearch Service. // -// List all Elasticsearch instance types that are supported for given ElasticsearchVersion +// Lists all the outbound cross-cluster search connections for a source domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation ListElasticsearchInstanceTypes for usage and error information. +// API operation DescribeOutboundCrossClusterSearchConnections for usage and error information. // // Returned Error Types: -// * BaseException -// An error occurred while processing the request. -// -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. -// -// * ResourceNotFoundException -// An exception for accessing or deleting a resource that does not exist. Gives -// http status code of 400. +// * InvalidPaginationTokenException +// The request processing has failed because of invalid pagination token provided +// by customer. Returns an HTTP status code of 400. // -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. // -func (c *ElasticsearchService) ListElasticsearchInstanceTypes(input *ListElasticsearchInstanceTypesInput) (*ListElasticsearchInstanceTypesOutput, error) { - req, out := c.ListElasticsearchInstanceTypesRequest(input) +func (c *ElasticsearchService) DescribeOutboundCrossClusterSearchConnections(input *DescribeOutboundCrossClusterSearchConnectionsInput) (*DescribeOutboundCrossClusterSearchConnectionsOutput, error) { + req, out := c.DescribeOutboundCrossClusterSearchConnectionsRequest(input) return out, req.Send() } -// ListElasticsearchInstanceTypesWithContext is the same as ListElasticsearchInstanceTypes with the addition of +// DescribeOutboundCrossClusterSearchConnectionsWithContext is the same as DescribeOutboundCrossClusterSearchConnections with the addition of // the ability to pass a context and additional request options. // -// See ListElasticsearchInstanceTypes for details on how to use this API operation. +// See DescribeOutboundCrossClusterSearchConnections for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) ListElasticsearchInstanceTypesWithContext(ctx aws.Context, input *ListElasticsearchInstanceTypesInput, opts ...request.Option) (*ListElasticsearchInstanceTypesOutput, error) { - req, out := c.ListElasticsearchInstanceTypesRequest(input) +func (c *ElasticsearchService) DescribeOutboundCrossClusterSearchConnectionsWithContext(ctx aws.Context, input *DescribeOutboundCrossClusterSearchConnectionsInput, opts ...request.Option) (*DescribeOutboundCrossClusterSearchConnectionsOutput, error) { + req, out := c.DescribeOutboundCrossClusterSearchConnectionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListElasticsearchInstanceTypesPages iterates over the pages of a ListElasticsearchInstanceTypes operation, +// DescribeOutboundCrossClusterSearchConnectionsPages iterates over the pages of a DescribeOutboundCrossClusterSearchConnections operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListElasticsearchInstanceTypes method for more information on how to use this operation. +// See DescribeOutboundCrossClusterSearchConnections method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListElasticsearchInstanceTypes operation. +// // Example iterating over at most 3 pages of a DescribeOutboundCrossClusterSearchConnections operation. // pageNum := 0 -// err := client.ListElasticsearchInstanceTypesPages(params, -// func(page *elasticsearchservice.ListElasticsearchInstanceTypesOutput, lastPage bool) bool { +// err := client.DescribeOutboundCrossClusterSearchConnectionsPages(params, +// func(page *elasticsearchservice.DescribeOutboundCrossClusterSearchConnectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElasticsearchService) ListElasticsearchInstanceTypesPages(input *ListElasticsearchInstanceTypesInput, fn func(*ListElasticsearchInstanceTypesOutput, bool) bool) error { - return c.ListElasticsearchInstanceTypesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElasticsearchService) DescribeOutboundCrossClusterSearchConnectionsPages(input *DescribeOutboundCrossClusterSearchConnectionsInput, fn func(*DescribeOutboundCrossClusterSearchConnectionsOutput, bool) bool) error { + return c.DescribeOutboundCrossClusterSearchConnectionsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListElasticsearchInstanceTypesPagesWithContext same as ListElasticsearchInstanceTypesPages except +// DescribeOutboundCrossClusterSearchConnectionsPagesWithContext same as DescribeOutboundCrossClusterSearchConnectionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) ListElasticsearchInstanceTypesPagesWithContext(ctx aws.Context, input *ListElasticsearchInstanceTypesInput, fn func(*ListElasticsearchInstanceTypesOutput, bool) bool, opts ...request.Option) error { +func (c *ElasticsearchService) DescribeOutboundCrossClusterSearchConnectionsPagesWithContext(ctx aws.Context, input *DescribeOutboundCrossClusterSearchConnectionsInput, fn func(*DescribeOutboundCrossClusterSearchConnectionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListElasticsearchInstanceTypesInput + var inCpy *DescribeOutboundCrossClusterSearchConnectionsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListElasticsearchInstanceTypesRequest(inCpy) + req, _ := c.DescribeOutboundCrossClusterSearchConnectionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -1702,7 +1752,7 @@ func (c *ElasticsearchService) ListElasticsearchInstanceTypesPagesWithContext(ct } for p.Next() { - if !fn(p.Page().(*ListElasticsearchInstanceTypesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribeOutboundCrossClusterSearchConnectionsOutput), !p.HasNextPage()) { break } } @@ -1710,35 +1760,35 @@ func (c *ElasticsearchService) ListElasticsearchInstanceTypesPagesWithContext(ct return p.Err() } -const opListElasticsearchVersions = "ListElasticsearchVersions" +const opDescribePackages = "DescribePackages" -// ListElasticsearchVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListElasticsearchVersions operation. The "output" return +// DescribePackagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribePackages operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListElasticsearchVersions for more information on using the ListElasticsearchVersions +// See DescribePackages for more information on using the DescribePackages // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListElasticsearchVersionsRequest method. -// req, resp := client.ListElasticsearchVersionsRequest(params) +// // Example sending a request using the DescribePackagesRequest method. +// req, resp := client.DescribePackagesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) ListElasticsearchVersionsRequest(input *ListElasticsearchVersionsInput) (req *request.Request, output *ListElasticsearchVersionsOutput) { +func (c *ElasticsearchService) DescribePackagesRequest(input *DescribePackagesInput) (req *request.Request, output *DescribePackagesOutput) { op := &request.Operation{ - Name: opListElasticsearchVersions, - HTTPMethod: "GET", - HTTPPath: "/2015-01-01/es/versions", + Name: opDescribePackages, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/packages/describe", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -1748,24 +1798,25 @@ func (c *ElasticsearchService) ListElasticsearchVersionsRequest(input *ListElast } if input == nil { - input = &ListElasticsearchVersionsInput{} + input = &DescribePackagesInput{} } - output = &ListElasticsearchVersionsOutput{} + output = &DescribePackagesOutput{} req = c.newRequest(op, input, output) return } -// ListElasticsearchVersions API operation for Amazon Elasticsearch Service. +// DescribePackages API operation for Amazon Elasticsearch Service. // -// List all supported Elasticsearch versions +// Describes all packages available to Amazon ES. Includes options for filtering, +// limiting the number of results, and pagination. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation ListElasticsearchVersions for usage and error information. +// API operation DescribePackages for usage and error information. // // Returned Error Types: // * BaseException @@ -1780,68 +1831,72 @@ func (c *ElasticsearchService) ListElasticsearchVersionsRequest(input *ListElast // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -func (c *ElasticsearchService) ListElasticsearchVersions(input *ListElasticsearchVersionsInput) (*ListElasticsearchVersionsOutput, error) { - req, out := c.ListElasticsearchVersionsRequest(input) +func (c *ElasticsearchService) DescribePackages(input *DescribePackagesInput) (*DescribePackagesOutput, error) { + req, out := c.DescribePackagesRequest(input) return out, req.Send() } -// ListElasticsearchVersionsWithContext is the same as ListElasticsearchVersions with the addition of +// DescribePackagesWithContext is the same as DescribePackages with the addition of // the ability to pass a context and additional request options. // -// See ListElasticsearchVersions for details on how to use this API operation. +// See DescribePackages for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) ListElasticsearchVersionsWithContext(ctx aws.Context, input *ListElasticsearchVersionsInput, opts ...request.Option) (*ListElasticsearchVersionsOutput, error) { - req, out := c.ListElasticsearchVersionsRequest(input) +func (c *ElasticsearchService) DescribePackagesWithContext(ctx aws.Context, input *DescribePackagesInput, opts ...request.Option) (*DescribePackagesOutput, error) { + req, out := c.DescribePackagesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListElasticsearchVersionsPages iterates over the pages of a ListElasticsearchVersions operation, +// DescribePackagesPages iterates over the pages of a DescribePackages operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListElasticsearchVersions method for more information on how to use this operation. +// See DescribePackages method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListElasticsearchVersions operation. +// // Example iterating over at most 3 pages of a DescribePackages operation. // pageNum := 0 -// err := client.ListElasticsearchVersionsPages(params, -// func(page *elasticsearchservice.ListElasticsearchVersionsOutput, lastPage bool) bool { +// err := client.DescribePackagesPages(params, +// func(page *elasticsearchservice.DescribePackagesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *ElasticsearchService) ListElasticsearchVersionsPages(input *ListElasticsearchVersionsInput, fn func(*ListElasticsearchVersionsOutput, bool) bool) error { - return c.ListElasticsearchVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *ElasticsearchService) DescribePackagesPages(input *DescribePackagesInput, fn func(*DescribePackagesOutput, bool) bool) error { + return c.DescribePackagesPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListElasticsearchVersionsPagesWithContext same as ListElasticsearchVersionsPages except +// DescribePackagesPagesWithContext same as DescribePackagesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) ListElasticsearchVersionsPagesWithContext(ctx aws.Context, input *ListElasticsearchVersionsInput, fn func(*ListElasticsearchVersionsOutput, bool) bool, opts ...request.Option) error { +func (c *ElasticsearchService) DescribePackagesPagesWithContext(ctx aws.Context, input *DescribePackagesInput, fn func(*DescribePackagesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListElasticsearchVersionsInput + var inCpy *DescribePackagesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListElasticsearchVersionsRequest(inCpy) + req, _ := c.DescribePackagesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -1849,7 +1904,7 @@ func (c *ElasticsearchService) ListElasticsearchVersionsPagesWithContext(ctx aws } for p.Next() { - if !fn(p.Page().(*ListElasticsearchVersionsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*DescribePackagesOutput), !p.HasNextPage()) { break } } @@ -1857,61 +1912,64 @@ func (c *ElasticsearchService) ListElasticsearchVersionsPagesWithContext(ctx aws return p.Err() } -const opListTags = "ListTags" +const opDescribeReservedElasticsearchInstanceOfferings = "DescribeReservedElasticsearchInstanceOfferings" -// ListTagsRequest generates a "aws/request.Request" representing the -// client's request for the ListTags operation. The "output" return +// DescribeReservedElasticsearchInstanceOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedElasticsearchInstanceOfferings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTags for more information on using the ListTags +// See DescribeReservedElasticsearchInstanceOfferings for more information on using the DescribeReservedElasticsearchInstanceOfferings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsRequest method. -// req, resp := client.ListTagsRequest(params) +// // Example sending a request using the DescribeReservedElasticsearchInstanceOfferingsRequest method. +// req, resp := client.DescribeReservedElasticsearchInstanceOfferingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { +func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsRequest(input *DescribeReservedElasticsearchInstanceOfferingsInput) (req *request.Request, output *DescribeReservedElasticsearchInstanceOfferingsOutput) { op := &request.Operation{ - Name: opListTags, + Name: opDescribeReservedElasticsearchInstanceOfferings, HTTPMethod: "GET", - HTTPPath: "/2015-01-01/tags/", + HTTPPath: "/2015-01-01/es/reservedInstanceOfferings", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsInput{} + input = &DescribeReservedElasticsearchInstanceOfferingsInput{} } - output = &ListTagsOutput{} + output = &DescribeReservedElasticsearchInstanceOfferingsOutput{} req = c.newRequest(op, input, output) return } -// ListTags API operation for Amazon Elasticsearch Service. +// DescribeReservedElasticsearchInstanceOfferings API operation for Amazon Elasticsearch Service. // -// Returns all tags for the given Elasticsearch domain. +// Lists available reserved Elasticsearch instance offerings. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation ListTags for usage and error information. +// API operation DescribeReservedElasticsearchInstanceOfferings for usage and error information. // // Returned Error Types: -// * BaseException -// An error occurred while processing the request. -// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. @@ -1920,454 +1978,631 @@ func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *reque // An exception for missing / invalid input fields. Gives http status code of // 400. // +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// // * InternalException // The request processing has failed because of an unknown error, exception // or failure (the failure is internal to the service) . Gives http status code // of 500. // -func (c *ElasticsearchService) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { - req, out := c.ListTagsRequest(input) +func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferings(input *DescribeReservedElasticsearchInstanceOfferingsInput) (*DescribeReservedElasticsearchInstanceOfferingsOutput, error) { + req, out := c.DescribeReservedElasticsearchInstanceOfferingsRequest(input) return out, req.Send() } -// ListTagsWithContext is the same as ListTags with the addition of +// DescribeReservedElasticsearchInstanceOfferingsWithContext is the same as DescribeReservedElasticsearchInstanceOfferings with the addition of // the ability to pass a context and additional request options. // -// See ListTags for details on how to use this API operation. +// See DescribeReservedElasticsearchInstanceOfferings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { - req, out := c.ListTagsRequest(input) +func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstanceOfferingsInput, opts ...request.Option) (*DescribeReservedElasticsearchInstanceOfferingsOutput, error) { + req, out := c.DescribeReservedElasticsearchInstanceOfferingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPurchaseReservedElasticsearchInstanceOffering = "PurchaseReservedElasticsearchInstanceOffering" +// DescribeReservedElasticsearchInstanceOfferingsPages iterates over the pages of a DescribeReservedElasticsearchInstanceOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedElasticsearchInstanceOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedElasticsearchInstanceOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedElasticsearchInstanceOfferingsPages(params, +// func(page *elasticsearchservice.DescribeReservedElasticsearchInstanceOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsPages(input *DescribeReservedElasticsearchInstanceOfferingsInput, fn func(*DescribeReservedElasticsearchInstanceOfferingsOutput, bool) bool) error { + return c.DescribeReservedElasticsearchInstanceOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// PurchaseReservedElasticsearchInstanceOfferingRequest generates a "aws/request.Request" representing the -// client's request for the PurchaseReservedElasticsearchInstanceOffering operation. The "output" return +// DescribeReservedElasticsearchInstanceOfferingsPagesWithContext same as DescribeReservedElasticsearchInstanceOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) DescribeReservedElasticsearchInstanceOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstanceOfferingsInput, fn func(*DescribeReservedElasticsearchInstanceOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedElasticsearchInstanceOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedElasticsearchInstanceOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReservedElasticsearchInstanceOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeReservedElasticsearchInstances = "DescribeReservedElasticsearchInstances" + +// DescribeReservedElasticsearchInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedElasticsearchInstances operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PurchaseReservedElasticsearchInstanceOffering for more information on using the PurchaseReservedElasticsearchInstanceOffering +// See DescribeReservedElasticsearchInstances for more information on using the DescribeReservedElasticsearchInstances // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PurchaseReservedElasticsearchInstanceOfferingRequest method. -// req, resp := client.PurchaseReservedElasticsearchInstanceOfferingRequest(params) +// // Example sending a request using the DescribeReservedElasticsearchInstancesRequest method. +// req, resp := client.DescribeReservedElasticsearchInstancesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) PurchaseReservedElasticsearchInstanceOfferingRequest(input *PurchaseReservedElasticsearchInstanceOfferingInput) (req *request.Request, output *PurchaseReservedElasticsearchInstanceOfferingOutput) { +func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesRequest(input *DescribeReservedElasticsearchInstancesInput) (req *request.Request, output *DescribeReservedElasticsearchInstancesOutput) { op := &request.Operation{ - Name: opPurchaseReservedElasticsearchInstanceOffering, - HTTPMethod: "POST", - HTTPPath: "/2015-01-01/es/purchaseReservedInstanceOffering", + Name: opDescribeReservedElasticsearchInstances, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/reservedInstances", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &PurchaseReservedElasticsearchInstanceOfferingInput{} + input = &DescribeReservedElasticsearchInstancesInput{} } - output = &PurchaseReservedElasticsearchInstanceOfferingOutput{} + output = &DescribeReservedElasticsearchInstancesOutput{} req = c.newRequest(op, input, output) return } -// PurchaseReservedElasticsearchInstanceOffering API operation for Amazon Elasticsearch Service. +// DescribeReservedElasticsearchInstances API operation for Amazon Elasticsearch Service. // -// Allows you to purchase reserved Elasticsearch instances. +// Returns information about reserved Elasticsearch instances for this account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation PurchaseReservedElasticsearchInstanceOffering for usage and error information. +// API operation DescribeReservedElasticsearchInstances for usage and error information. // // Returned Error Types: // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // -// * ResourceAlreadyExistsException -// An exception for creating a resource that already exists. Gives http status -// code of 400. -// -// * LimitExceededException -// An exception for trying to create more than allowed resources or sub-resources. -// Gives http status code of 409. -// -// * DisabledOperationException -// An error occured because the client wanted to access a not supported operation. -// Gives http status code of 409. +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. // // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. // -func (c *ElasticsearchService) PurchaseReservedElasticsearchInstanceOffering(input *PurchaseReservedElasticsearchInstanceOfferingInput) (*PurchaseReservedElasticsearchInstanceOfferingOutput, error) { - req, out := c.PurchaseReservedElasticsearchInstanceOfferingRequest(input) +func (c *ElasticsearchService) DescribeReservedElasticsearchInstances(input *DescribeReservedElasticsearchInstancesInput) (*DescribeReservedElasticsearchInstancesOutput, error) { + req, out := c.DescribeReservedElasticsearchInstancesRequest(input) return out, req.Send() } -// PurchaseReservedElasticsearchInstanceOfferingWithContext is the same as PurchaseReservedElasticsearchInstanceOffering with the addition of +// DescribeReservedElasticsearchInstancesWithContext is the same as DescribeReservedElasticsearchInstances with the addition of // the ability to pass a context and additional request options. // -// See PurchaseReservedElasticsearchInstanceOffering for details on how to use this API operation. +// See DescribeReservedElasticsearchInstances for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) PurchaseReservedElasticsearchInstanceOfferingWithContext(ctx aws.Context, input *PurchaseReservedElasticsearchInstanceOfferingInput, opts ...request.Option) (*PurchaseReservedElasticsearchInstanceOfferingOutput, error) { - req, out := c.PurchaseReservedElasticsearchInstanceOfferingRequest(input) +func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstancesInput, opts ...request.Option) (*DescribeReservedElasticsearchInstancesOutput, error) { + req, out := c.DescribeReservedElasticsearchInstancesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveTags = "RemoveTags" +// DescribeReservedElasticsearchInstancesPages iterates over the pages of a DescribeReservedElasticsearchInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedElasticsearchInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedElasticsearchInstances operation. +// pageNum := 0 +// err := client.DescribeReservedElasticsearchInstancesPages(params, +// func(page *elasticsearchservice.DescribeReservedElasticsearchInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesPages(input *DescribeReservedElasticsearchInstancesInput, fn func(*DescribeReservedElasticsearchInstancesOutput, bool) bool) error { + return c.DescribeReservedElasticsearchInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// RemoveTagsRequest generates a "aws/request.Request" representing the -// client's request for the RemoveTags operation. The "output" return +// DescribeReservedElasticsearchInstancesPagesWithContext same as DescribeReservedElasticsearchInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) DescribeReservedElasticsearchInstancesPagesWithContext(ctx aws.Context, input *DescribeReservedElasticsearchInstancesInput, fn func(*DescribeReservedElasticsearchInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeReservedElasticsearchInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeReservedElasticsearchInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeReservedElasticsearchInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDissociatePackage = "DissociatePackage" + +// DissociatePackageRequest generates a "aws/request.Request" representing the +// client's request for the DissociatePackage operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveTags for more information on using the RemoveTags +// See DissociatePackage for more information on using the DissociatePackage // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveTagsRequest method. -// req, resp := client.RemoveTagsRequest(params) +// // Example sending a request using the DissociatePackageRequest method. +// req, resp := client.DissociatePackageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { +func (c *ElasticsearchService) DissociatePackageRequest(input *DissociatePackageInput) (req *request.Request, output *DissociatePackageOutput) { op := &request.Operation{ - Name: opRemoveTags, + Name: opDissociatePackage, HTTPMethod: "POST", - HTTPPath: "/2015-01-01/tags-removal", + HTTPPath: "/2015-01-01/packages/dissociate/{PackageID}/{DomainName}", } if input == nil { - input = &RemoveTagsInput{} + input = &DissociatePackageInput{} } - output = &RemoveTagsOutput{} + output = &DissociatePackageOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// RemoveTags API operation for Amazon Elasticsearch Service. +// DissociatePackage API operation for Amazon Elasticsearch Service. // -// Removes the specified set of tags from the specified Elasticsearch domain. +// Dissociates a package from the Amazon ES domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation RemoveTags for usage and error information. +// API operation DissociatePackage for usage and error information. // // Returned Error Types: // * BaseException // An error occurred while processing the request. // -// * ValidationException -// An exception for missing / invalid input fields. Gives http status code of -// 400. -// // * InternalException // The request processing has failed because of an unknown error, exception // or failure (the failure is internal to the service) . Gives http status code // of 500. // -func (c *ElasticsearchService) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { - req, out := c.RemoveTagsRequest(input) - return out, req.Send() -} - -// RemoveTagsWithContext is the same as RemoveTags with the addition of -// the ability to pass a context and additional request options. -// -// See RemoveTags for details on how to use this API operation. +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +// * ConflictException +// An error occurred because the client attempts to remove a resource that is +// currently in use. Returns HTTP status code 409. +// +func (c *ElasticsearchService) DissociatePackage(input *DissociatePackageInput) (*DissociatePackageOutput, error) { + req, out := c.DissociatePackageRequest(input) + return out, req.Send() +} + +// DissociatePackageWithContext is the same as DissociatePackage with the addition of +// the ability to pass a context and additional request options. +// +// See DissociatePackage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) RemoveTagsWithContext(ctx aws.Context, input *RemoveTagsInput, opts ...request.Option) (*RemoveTagsOutput, error) { - req, out := c.RemoveTagsRequest(input) +func (c *ElasticsearchService) DissociatePackageWithContext(ctx aws.Context, input *DissociatePackageInput, opts ...request.Option) (*DissociatePackageOutput, error) { + req, out := c.DissociatePackageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartElasticsearchServiceSoftwareUpdate = "StartElasticsearchServiceSoftwareUpdate" +const opGetCompatibleElasticsearchVersions = "GetCompatibleElasticsearchVersions" -// StartElasticsearchServiceSoftwareUpdateRequest generates a "aws/request.Request" representing the -// client's request for the StartElasticsearchServiceSoftwareUpdate operation. The "output" return +// GetCompatibleElasticsearchVersionsRequest generates a "aws/request.Request" representing the +// client's request for the GetCompatibleElasticsearchVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartElasticsearchServiceSoftwareUpdate for more information on using the StartElasticsearchServiceSoftwareUpdate +// See GetCompatibleElasticsearchVersions for more information on using the GetCompatibleElasticsearchVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartElasticsearchServiceSoftwareUpdateRequest method. -// req, resp := client.StartElasticsearchServiceSoftwareUpdateRequest(params) +// // Example sending a request using the GetCompatibleElasticsearchVersionsRequest method. +// req, resp := client.GetCompatibleElasticsearchVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) StartElasticsearchServiceSoftwareUpdateRequest(input *StartElasticsearchServiceSoftwareUpdateInput) (req *request.Request, output *StartElasticsearchServiceSoftwareUpdateOutput) { +func (c *ElasticsearchService) GetCompatibleElasticsearchVersionsRequest(input *GetCompatibleElasticsearchVersionsInput) (req *request.Request, output *GetCompatibleElasticsearchVersionsOutput) { op := &request.Operation{ - Name: opStartElasticsearchServiceSoftwareUpdate, - HTTPMethod: "POST", - HTTPPath: "/2015-01-01/es/serviceSoftwareUpdate/start", + Name: opGetCompatibleElasticsearchVersions, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/compatibleVersions", } if input == nil { - input = &StartElasticsearchServiceSoftwareUpdateInput{} + input = &GetCompatibleElasticsearchVersionsInput{} } - output = &StartElasticsearchServiceSoftwareUpdateOutput{} + output = &GetCompatibleElasticsearchVersionsOutput{} req = c.newRequest(op, input, output) return } -// StartElasticsearchServiceSoftwareUpdate API operation for Amazon Elasticsearch Service. +// GetCompatibleElasticsearchVersions API operation for Amazon Elasticsearch Service. // -// Schedules a service software update for an Amazon ES domain. +// Returns a list of upgrade compatible Elastisearch versions. You can optionally +// pass a DomainName to get all upgrade compatible Elasticsearch versions for +// that specific domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation StartElasticsearchServiceSoftwareUpdate for usage and error information. +// API operation GetCompatibleElasticsearchVersions for usage and error information. // // Returned Error Types: // * BaseException // An error occurred while processing the request. // -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. -// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -func (c *ElasticsearchService) StartElasticsearchServiceSoftwareUpdate(input *StartElasticsearchServiceSoftwareUpdateInput) (*StartElasticsearchServiceSoftwareUpdateOutput, error) { - req, out := c.StartElasticsearchServiceSoftwareUpdateRequest(input) +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +func (c *ElasticsearchService) GetCompatibleElasticsearchVersions(input *GetCompatibleElasticsearchVersionsInput) (*GetCompatibleElasticsearchVersionsOutput, error) { + req, out := c.GetCompatibleElasticsearchVersionsRequest(input) return out, req.Send() } -// StartElasticsearchServiceSoftwareUpdateWithContext is the same as StartElasticsearchServiceSoftwareUpdate with the addition of +// GetCompatibleElasticsearchVersionsWithContext is the same as GetCompatibleElasticsearchVersions with the addition of // the ability to pass a context and additional request options. // -// See StartElasticsearchServiceSoftwareUpdate for details on how to use this API operation. +// See GetCompatibleElasticsearchVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) StartElasticsearchServiceSoftwareUpdateWithContext(ctx aws.Context, input *StartElasticsearchServiceSoftwareUpdateInput, opts ...request.Option) (*StartElasticsearchServiceSoftwareUpdateOutput, error) { - req, out := c.StartElasticsearchServiceSoftwareUpdateRequest(input) +func (c *ElasticsearchService) GetCompatibleElasticsearchVersionsWithContext(ctx aws.Context, input *GetCompatibleElasticsearchVersionsInput, opts ...request.Option) (*GetCompatibleElasticsearchVersionsOutput, error) { + req, out := c.GetCompatibleElasticsearchVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateElasticsearchDomainConfig = "UpdateElasticsearchDomainConfig" +const opGetUpgradeHistory = "GetUpgradeHistory" -// UpdateElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the -// client's request for the UpdateElasticsearchDomainConfig operation. The "output" return +// GetUpgradeHistoryRequest generates a "aws/request.Request" representing the +// client's request for the GetUpgradeHistory operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateElasticsearchDomainConfig for more information on using the UpdateElasticsearchDomainConfig +// See GetUpgradeHistory for more information on using the GetUpgradeHistory // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateElasticsearchDomainConfigRequest method. -// req, resp := client.UpdateElasticsearchDomainConfigRequest(params) +// // Example sending a request using the GetUpgradeHistoryRequest method. +// req, resp := client.GetUpgradeHistoryRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) UpdateElasticsearchDomainConfigRequest(input *UpdateElasticsearchDomainConfigInput) (req *request.Request, output *UpdateElasticsearchDomainConfigOutput) { +func (c *ElasticsearchService) GetUpgradeHistoryRequest(input *GetUpgradeHistoryInput) (req *request.Request, output *GetUpgradeHistoryOutput) { op := &request.Operation{ - Name: opUpdateElasticsearchDomainConfig, - HTTPMethod: "POST", - HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + Name: opGetUpgradeHistory, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/upgradeDomain/{DomainName}/history", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateElasticsearchDomainConfigInput{} + input = &GetUpgradeHistoryInput{} } - output = &UpdateElasticsearchDomainConfigOutput{} + output = &GetUpgradeHistoryOutput{} req = c.newRequest(op, input, output) return } -// UpdateElasticsearchDomainConfig API operation for Amazon Elasticsearch Service. +// GetUpgradeHistory API operation for Amazon Elasticsearch Service. // -// Modifies the cluster configuration of the specified Elasticsearch domain, -// setting as setting the instance type and the number of instances. +// Retrieves the complete history of the last 10 upgrades that were performed +// on the domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation UpdateElasticsearchDomainConfig for usage and error information. +// API operation GetUpgradeHistory for usage and error information. // // Returned Error Types: // * BaseException // An error occurred while processing the request. // -// * InternalException -// The request processing has failed because of an unknown error, exception -// or failure (the failure is internal to the service) . Gives http status code -// of 500. -// -// * InvalidTypeException -// An exception for trying to create or access sub-resource that is either invalid -// or not supported. Gives http status code of 409. -// -// * LimitExceededException -// An exception for trying to create more than allowed resources or sub-resources. -// Gives http status code of 409. -// // * ResourceNotFoundException // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// // * ValidationException // An exception for missing / invalid input fields. Gives http status code of // 400. // -func (c *ElasticsearchService) UpdateElasticsearchDomainConfig(input *UpdateElasticsearchDomainConfigInput) (*UpdateElasticsearchDomainConfigOutput, error) { - req, out := c.UpdateElasticsearchDomainConfigRequest(input) +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +func (c *ElasticsearchService) GetUpgradeHistory(input *GetUpgradeHistoryInput) (*GetUpgradeHistoryOutput, error) { + req, out := c.GetUpgradeHistoryRequest(input) return out, req.Send() } -// UpdateElasticsearchDomainConfigWithContext is the same as UpdateElasticsearchDomainConfig with the addition of +// GetUpgradeHistoryWithContext is the same as GetUpgradeHistory with the addition of // the ability to pass a context and additional request options. // -// See UpdateElasticsearchDomainConfig for details on how to use this API operation. +// See GetUpgradeHistory for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) UpdateElasticsearchDomainConfigWithContext(ctx aws.Context, input *UpdateElasticsearchDomainConfigInput, opts ...request.Option) (*UpdateElasticsearchDomainConfigOutput, error) { - req, out := c.UpdateElasticsearchDomainConfigRequest(input) +func (c *ElasticsearchService) GetUpgradeHistoryWithContext(ctx aws.Context, input *GetUpgradeHistoryInput, opts ...request.Option) (*GetUpgradeHistoryOutput, error) { + req, out := c.GetUpgradeHistoryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpgradeElasticsearchDomain = "UpgradeElasticsearchDomain" +// GetUpgradeHistoryPages iterates over the pages of a GetUpgradeHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetUpgradeHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetUpgradeHistory operation. +// pageNum := 0 +// err := client.GetUpgradeHistoryPages(params, +// func(page *elasticsearchservice.GetUpgradeHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) GetUpgradeHistoryPages(input *GetUpgradeHistoryInput, fn func(*GetUpgradeHistoryOutput, bool) bool) error { + return c.GetUpgradeHistoryPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpgradeElasticsearchDomainRequest generates a "aws/request.Request" representing the -// client's request for the UpgradeElasticsearchDomain operation. The "output" return +// GetUpgradeHistoryPagesWithContext same as GetUpgradeHistoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) GetUpgradeHistoryPagesWithContext(ctx aws.Context, input *GetUpgradeHistoryInput, fn func(*GetUpgradeHistoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetUpgradeHistoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetUpgradeHistoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetUpgradeHistoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opGetUpgradeStatus = "GetUpgradeStatus" + +// GetUpgradeStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetUpgradeStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpgradeElasticsearchDomain for more information on using the UpgradeElasticsearchDomain +// See GetUpgradeStatus for more information on using the GetUpgradeStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpgradeElasticsearchDomainRequest method. -// req, resp := client.UpgradeElasticsearchDomainRequest(params) +// // Example sending a request using the GetUpgradeStatusRequest method. +// req, resp := client.GetUpgradeStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } -func (c *ElasticsearchService) UpgradeElasticsearchDomainRequest(input *UpgradeElasticsearchDomainInput) (req *request.Request, output *UpgradeElasticsearchDomainOutput) { +func (c *ElasticsearchService) GetUpgradeStatusRequest(input *GetUpgradeStatusInput) (req *request.Request, output *GetUpgradeStatusOutput) { op := &request.Operation{ - Name: opUpgradeElasticsearchDomain, - HTTPMethod: "POST", - HTTPPath: "/2015-01-01/es/upgradeDomain", + Name: opGetUpgradeStatus, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/upgradeDomain/{DomainName}/status", } if input == nil { - input = &UpgradeElasticsearchDomainInput{} + input = &GetUpgradeStatusInput{} } - output = &UpgradeElasticsearchDomainOutput{} + output = &GetUpgradeStatusOutput{} req = c.newRequest(op, input, output) return } -// UpgradeElasticsearchDomain API operation for Amazon Elasticsearch Service. +// GetUpgradeStatus API operation for Amazon Elasticsearch Service. // -// Allows you to either upgrade your domain or perform an Upgrade eligibility -// check to a compatible Elasticsearch version. +// Retrieves the latest status of the last upgrade or upgrade eligibility check +// that was performed on the domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elasticsearch Service's -// API operation UpgradeElasticsearchDomain for usage and error information. +// API operation GetUpgradeStatus for usage and error information. // // Returned Error Types: // * BaseException @@ -2377,10 +2612,6 @@ func (c *ElasticsearchService) UpgradeElasticsearchDomainRequest(input *UpgradeE // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. // -// * ResourceAlreadyExistsException -// An exception for creating a resource that already exists. Gives http status -// code of 400. -// // * DisabledOperationException // An error occured because the client wanted to access a not supported operation. // Gives http status code of 409. @@ -2394,112 +2625,2485 @@ func (c *ElasticsearchService) UpgradeElasticsearchDomainRequest(input *UpgradeE // or failure (the failure is internal to the service) . Gives http status code // of 500. // -func (c *ElasticsearchService) UpgradeElasticsearchDomain(input *UpgradeElasticsearchDomainInput) (*UpgradeElasticsearchDomainOutput, error) { - req, out := c.UpgradeElasticsearchDomainRequest(input) +func (c *ElasticsearchService) GetUpgradeStatus(input *GetUpgradeStatusInput) (*GetUpgradeStatusOutput, error) { + req, out := c.GetUpgradeStatusRequest(input) return out, req.Send() } -// UpgradeElasticsearchDomainWithContext is the same as UpgradeElasticsearchDomain with the addition of +// GetUpgradeStatusWithContext is the same as GetUpgradeStatus with the addition of // the ability to pass a context and additional request options. // -// See UpgradeElasticsearchDomain for details on how to use this API operation. +// See GetUpgradeStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ElasticsearchService) UpgradeElasticsearchDomainWithContext(ctx aws.Context, input *UpgradeElasticsearchDomainInput, opts ...request.Option) (*UpgradeElasticsearchDomainOutput, error) { - req, out := c.UpgradeElasticsearchDomainRequest(input) +func (c *ElasticsearchService) GetUpgradeStatusWithContext(ctx aws.Context, input *GetUpgradeStatusInput, opts ...request.Option) (*GetUpgradeStatusOutput, error) { + req, out := c.GetUpgradeStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// The configured access rules for the domain's document and search endpoints, -// and the current status of those rules. -type AccessPoliciesStatus struct { - _ struct{} `type:"structure"` +const opListDomainNames = "ListDomainNames" - // The access policy configured for the Elasticsearch domain. Access policies - // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies - // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies)for +// ListDomainNamesRequest generates a "aws/request.Request" representing the +// client's request for the ListDomainNames operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDomainNames for more information on using the ListDomainNames +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDomainNamesRequest method. +// req, resp := client.ListDomainNamesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { + op := &request.Operation{ + Name: opListDomainNames, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/domain", + } + + if input == nil { + input = &ListDomainNamesInput{} + } + + output = &ListDomainNamesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDomainNames API operation for Amazon Elasticsearch Service. +// +// Returns the name of all Elasticsearch domains owned by the current user's +// account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation ListDomainNames for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) ListDomainNames(input *ListDomainNamesInput) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + return out, req.Send() +} + +// ListDomainNamesWithContext is the same as ListDomainNames with the addition of +// the ability to pass a context and additional request options. +// +// See ListDomainNames for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListDomainNamesWithContext(ctx aws.Context, input *ListDomainNamesInput, opts ...request.Option) (*ListDomainNamesOutput, error) { + req, out := c.ListDomainNamesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDomainsForPackage = "ListDomainsForPackage" + +// ListDomainsForPackageRequest generates a "aws/request.Request" representing the +// client's request for the ListDomainsForPackage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDomainsForPackage for more information on using the ListDomainsForPackage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDomainsForPackageRequest method. +// req, resp := client.ListDomainsForPackageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) ListDomainsForPackageRequest(input *ListDomainsForPackageInput) (req *request.Request, output *ListDomainsForPackageOutput) { + op := &request.Operation{ + Name: opListDomainsForPackage, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/packages/{PackageID}/domains", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsForPackageInput{} + } + + output = &ListDomainsForPackageOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDomainsForPackage API operation for Amazon Elasticsearch Service. +// +// Lists all Amazon ES domains associated with the package. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation ListDomainsForPackage for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) ListDomainsForPackage(input *ListDomainsForPackageInput) (*ListDomainsForPackageOutput, error) { + req, out := c.ListDomainsForPackageRequest(input) + return out, req.Send() +} + +// ListDomainsForPackageWithContext is the same as ListDomainsForPackage with the addition of +// the ability to pass a context and additional request options. +// +// See ListDomainsForPackage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListDomainsForPackageWithContext(ctx aws.Context, input *ListDomainsForPackageInput, opts ...request.Option) (*ListDomainsForPackageOutput, error) { + req, out := c.ListDomainsForPackageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDomainsForPackagePages iterates over the pages of a ListDomainsForPackage operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDomainsForPackage method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDomainsForPackage operation. +// pageNum := 0 +// err := client.ListDomainsForPackagePages(params, +// func(page *elasticsearchservice.ListDomainsForPackageOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) ListDomainsForPackagePages(input *ListDomainsForPackageInput, fn func(*ListDomainsForPackageOutput, bool) bool) error { + return c.ListDomainsForPackagePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDomainsForPackagePagesWithContext same as ListDomainsForPackagePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListDomainsForPackagePagesWithContext(ctx aws.Context, input *ListDomainsForPackageInput, fn func(*ListDomainsForPackageOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDomainsForPackageInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDomainsForPackageRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDomainsForPackageOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListElasticsearchInstanceTypes = "ListElasticsearchInstanceTypes" + +// ListElasticsearchInstanceTypesRequest generates a "aws/request.Request" representing the +// client's request for the ListElasticsearchInstanceTypes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListElasticsearchInstanceTypes for more information on using the ListElasticsearchInstanceTypes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListElasticsearchInstanceTypesRequest method. +// req, resp := client.ListElasticsearchInstanceTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) ListElasticsearchInstanceTypesRequest(input *ListElasticsearchInstanceTypesInput) (req *request.Request, output *ListElasticsearchInstanceTypesOutput) { + op := &request.Operation{ + Name: opListElasticsearchInstanceTypes, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/instanceTypes/{ElasticsearchVersion}", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListElasticsearchInstanceTypesInput{} + } + + output = &ListElasticsearchInstanceTypesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListElasticsearchInstanceTypes API operation for Amazon Elasticsearch Service. +// +// List all Elasticsearch instance types that are supported for given ElasticsearchVersion +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation ListElasticsearchInstanceTypes for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) ListElasticsearchInstanceTypes(input *ListElasticsearchInstanceTypesInput) (*ListElasticsearchInstanceTypesOutput, error) { + req, out := c.ListElasticsearchInstanceTypesRequest(input) + return out, req.Send() +} + +// ListElasticsearchInstanceTypesWithContext is the same as ListElasticsearchInstanceTypes with the addition of +// the ability to pass a context and additional request options. +// +// See ListElasticsearchInstanceTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListElasticsearchInstanceTypesWithContext(ctx aws.Context, input *ListElasticsearchInstanceTypesInput, opts ...request.Option) (*ListElasticsearchInstanceTypesOutput, error) { + req, out := c.ListElasticsearchInstanceTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListElasticsearchInstanceTypesPages iterates over the pages of a ListElasticsearchInstanceTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListElasticsearchInstanceTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListElasticsearchInstanceTypes operation. +// pageNum := 0 +// err := client.ListElasticsearchInstanceTypesPages(params, +// func(page *elasticsearchservice.ListElasticsearchInstanceTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) ListElasticsearchInstanceTypesPages(input *ListElasticsearchInstanceTypesInput, fn func(*ListElasticsearchInstanceTypesOutput, bool) bool) error { + return c.ListElasticsearchInstanceTypesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListElasticsearchInstanceTypesPagesWithContext same as ListElasticsearchInstanceTypesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListElasticsearchInstanceTypesPagesWithContext(ctx aws.Context, input *ListElasticsearchInstanceTypesInput, fn func(*ListElasticsearchInstanceTypesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListElasticsearchInstanceTypesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListElasticsearchInstanceTypesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListElasticsearchInstanceTypesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListElasticsearchVersions = "ListElasticsearchVersions" + +// ListElasticsearchVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListElasticsearchVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListElasticsearchVersions for more information on using the ListElasticsearchVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListElasticsearchVersionsRequest method. +// req, resp := client.ListElasticsearchVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) ListElasticsearchVersionsRequest(input *ListElasticsearchVersionsInput) (req *request.Request, output *ListElasticsearchVersionsOutput) { + op := &request.Operation{ + Name: opListElasticsearchVersions, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/es/versions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListElasticsearchVersionsInput{} + } + + output = &ListElasticsearchVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListElasticsearchVersions API operation for Amazon Elasticsearch Service. +// +// List all supported Elasticsearch versions +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation ListElasticsearchVersions for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) ListElasticsearchVersions(input *ListElasticsearchVersionsInput) (*ListElasticsearchVersionsOutput, error) { + req, out := c.ListElasticsearchVersionsRequest(input) + return out, req.Send() +} + +// ListElasticsearchVersionsWithContext is the same as ListElasticsearchVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListElasticsearchVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListElasticsearchVersionsWithContext(ctx aws.Context, input *ListElasticsearchVersionsInput, opts ...request.Option) (*ListElasticsearchVersionsOutput, error) { + req, out := c.ListElasticsearchVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListElasticsearchVersionsPages iterates over the pages of a ListElasticsearchVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListElasticsearchVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListElasticsearchVersions operation. +// pageNum := 0 +// err := client.ListElasticsearchVersionsPages(params, +// func(page *elasticsearchservice.ListElasticsearchVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) ListElasticsearchVersionsPages(input *ListElasticsearchVersionsInput, fn func(*ListElasticsearchVersionsOutput, bool) bool) error { + return c.ListElasticsearchVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListElasticsearchVersionsPagesWithContext same as ListElasticsearchVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListElasticsearchVersionsPagesWithContext(ctx aws.Context, input *ListElasticsearchVersionsInput, fn func(*ListElasticsearchVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListElasticsearchVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListElasticsearchVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListElasticsearchVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListPackagesForDomain = "ListPackagesForDomain" + +// ListPackagesForDomainRequest generates a "aws/request.Request" representing the +// client's request for the ListPackagesForDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPackagesForDomain for more information on using the ListPackagesForDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPackagesForDomainRequest method. +// req, resp := client.ListPackagesForDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) ListPackagesForDomainRequest(input *ListPackagesForDomainInput) (req *request.Request, output *ListPackagesForDomainOutput) { + op := &request.Operation{ + Name: opListPackagesForDomain, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/domain/{DomainName}/packages", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListPackagesForDomainInput{} + } + + output = &ListPackagesForDomainOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPackagesForDomain API operation for Amazon Elasticsearch Service. +// +// Lists all packages associated with the Amazon ES domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation ListPackagesForDomain for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * AccessDeniedException +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) ListPackagesForDomain(input *ListPackagesForDomainInput) (*ListPackagesForDomainOutput, error) { + req, out := c.ListPackagesForDomainRequest(input) + return out, req.Send() +} + +// ListPackagesForDomainWithContext is the same as ListPackagesForDomain with the addition of +// the ability to pass a context and additional request options. +// +// See ListPackagesForDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListPackagesForDomainWithContext(ctx aws.Context, input *ListPackagesForDomainInput, opts ...request.Option) (*ListPackagesForDomainOutput, error) { + req, out := c.ListPackagesForDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPackagesForDomainPages iterates over the pages of a ListPackagesForDomain operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPackagesForDomain method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPackagesForDomain operation. +// pageNum := 0 +// err := client.ListPackagesForDomainPages(params, +// func(page *elasticsearchservice.ListPackagesForDomainOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ElasticsearchService) ListPackagesForDomainPages(input *ListPackagesForDomainInput, fn func(*ListPackagesForDomainOutput, bool) bool) error { + return c.ListPackagesForDomainPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPackagesForDomainPagesWithContext same as ListPackagesForDomainPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListPackagesForDomainPagesWithContext(ctx aws.Context, input *ListPackagesForDomainInput, fn func(*ListPackagesForDomainOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPackagesForDomainInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPackagesForDomainRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPackagesForDomainOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTags = "ListTags" + +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTags for more information on using the ListTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { + op := &request.Operation{ + Name: opListTags, + HTTPMethod: "GET", + HTTPPath: "/2015-01-01/tags/", + } + + if input == nil { + input = &ListTagsInput{} + } + + output = &ListTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTags API operation for Amazon Elasticsearch Service. +// +// Returns all tags for the given Elasticsearch domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation ListTags for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +func (c *ElasticsearchService) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + return out, req.Send() +} + +// ListTagsWithContext is the same as ListTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) ListTagsWithContext(ctx aws.Context, input *ListTagsInput, opts ...request.Option) (*ListTagsOutput, error) { + req, out := c.ListTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPurchaseReservedElasticsearchInstanceOffering = "PurchaseReservedElasticsearchInstanceOffering" + +// PurchaseReservedElasticsearchInstanceOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedElasticsearchInstanceOffering operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PurchaseReservedElasticsearchInstanceOffering for more information on using the PurchaseReservedElasticsearchInstanceOffering +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PurchaseReservedElasticsearchInstanceOfferingRequest method. +// req, resp := client.PurchaseReservedElasticsearchInstanceOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) PurchaseReservedElasticsearchInstanceOfferingRequest(input *PurchaseReservedElasticsearchInstanceOfferingInput) (req *request.Request, output *PurchaseReservedElasticsearchInstanceOfferingOutput) { + op := &request.Operation{ + Name: opPurchaseReservedElasticsearchInstanceOffering, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/purchaseReservedInstanceOffering", + } + + if input == nil { + input = &PurchaseReservedElasticsearchInstanceOfferingInput{} + } + + output = &PurchaseReservedElasticsearchInstanceOfferingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PurchaseReservedElasticsearchInstanceOffering API operation for Amazon Elasticsearch Service. +// +// Allows you to purchase reserved Elasticsearch instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation PurchaseReservedElasticsearchInstanceOffering for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ResourceAlreadyExistsException +// An exception for creating a resource that already exists. Gives http status +// code of 400. +// +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. +// +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +func (c *ElasticsearchService) PurchaseReservedElasticsearchInstanceOffering(input *PurchaseReservedElasticsearchInstanceOfferingInput) (*PurchaseReservedElasticsearchInstanceOfferingOutput, error) { + req, out := c.PurchaseReservedElasticsearchInstanceOfferingRequest(input) + return out, req.Send() +} + +// PurchaseReservedElasticsearchInstanceOfferingWithContext is the same as PurchaseReservedElasticsearchInstanceOffering with the addition of +// the ability to pass a context and additional request options. +// +// See PurchaseReservedElasticsearchInstanceOffering for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) PurchaseReservedElasticsearchInstanceOfferingWithContext(ctx aws.Context, input *PurchaseReservedElasticsearchInstanceOfferingInput, opts ...request.Option) (*PurchaseReservedElasticsearchInstanceOfferingOutput, error) { + req, out := c.PurchaseReservedElasticsearchInstanceOfferingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRejectInboundCrossClusterSearchConnection = "RejectInboundCrossClusterSearchConnection" + +// RejectInboundCrossClusterSearchConnectionRequest generates a "aws/request.Request" representing the +// client's request for the RejectInboundCrossClusterSearchConnection operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RejectInboundCrossClusterSearchConnection for more information on using the RejectInboundCrossClusterSearchConnection +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RejectInboundCrossClusterSearchConnectionRequest method. +// req, resp := client.RejectInboundCrossClusterSearchConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) RejectInboundCrossClusterSearchConnectionRequest(input *RejectInboundCrossClusterSearchConnectionInput) (req *request.Request, output *RejectInboundCrossClusterSearchConnectionOutput) { + op := &request.Operation{ + Name: opRejectInboundCrossClusterSearchConnection, + HTTPMethod: "PUT", + HTTPPath: "/2015-01-01/es/ccs/inboundConnection/{ConnectionId}/reject", + } + + if input == nil { + input = &RejectInboundCrossClusterSearchConnectionInput{} + } + + output = &RejectInboundCrossClusterSearchConnectionOutput{} + req = c.newRequest(op, input, output) + return +} + +// RejectInboundCrossClusterSearchConnection API operation for Amazon Elasticsearch Service. +// +// Allows the destination domain owner to reject an inbound cross-cluster search +// connection request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation RejectInboundCrossClusterSearchConnection for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// +func (c *ElasticsearchService) RejectInboundCrossClusterSearchConnection(input *RejectInboundCrossClusterSearchConnectionInput) (*RejectInboundCrossClusterSearchConnectionOutput, error) { + req, out := c.RejectInboundCrossClusterSearchConnectionRequest(input) + return out, req.Send() +} + +// RejectInboundCrossClusterSearchConnectionWithContext is the same as RejectInboundCrossClusterSearchConnection with the addition of +// the ability to pass a context and additional request options. +// +// See RejectInboundCrossClusterSearchConnection for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) RejectInboundCrossClusterSearchConnectionWithContext(ctx aws.Context, input *RejectInboundCrossClusterSearchConnectionInput, opts ...request.Option) (*RejectInboundCrossClusterSearchConnectionOutput, error) { + req, out := c.RejectInboundCrossClusterSearchConnectionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveTags for more information on using the RemoveTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { + op := &request.Operation{ + Name: opRemoveTags, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/tags-removal", + } + + if input == nil { + input = &RemoveTagsInput{} + } + + output = &RemoveTagsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveTags API operation for Amazon Elasticsearch Service. +// +// Removes the specified set of tags from the specified Elasticsearch domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation RemoveTags for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +func (c *ElasticsearchService) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + return out, req.Send() +} + +// RemoveTagsWithContext is the same as RemoveTags with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) RemoveTagsWithContext(ctx aws.Context, input *RemoveTagsInput, opts ...request.Option) (*RemoveTagsOutput, error) { + req, out := c.RemoveTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartElasticsearchServiceSoftwareUpdate = "StartElasticsearchServiceSoftwareUpdate" + +// StartElasticsearchServiceSoftwareUpdateRequest generates a "aws/request.Request" representing the +// client's request for the StartElasticsearchServiceSoftwareUpdate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartElasticsearchServiceSoftwareUpdate for more information on using the StartElasticsearchServiceSoftwareUpdate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartElasticsearchServiceSoftwareUpdateRequest method. +// req, resp := client.StartElasticsearchServiceSoftwareUpdateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) StartElasticsearchServiceSoftwareUpdateRequest(input *StartElasticsearchServiceSoftwareUpdateInput) (req *request.Request, output *StartElasticsearchServiceSoftwareUpdateOutput) { + op := &request.Operation{ + Name: opStartElasticsearchServiceSoftwareUpdate, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/serviceSoftwareUpdate/start", + } + + if input == nil { + input = &StartElasticsearchServiceSoftwareUpdateInput{} + } + + output = &StartElasticsearchServiceSoftwareUpdateOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartElasticsearchServiceSoftwareUpdate API operation for Amazon Elasticsearch Service. +// +// Schedules a service software update for an Amazon ES domain. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation StartElasticsearchServiceSoftwareUpdate for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) StartElasticsearchServiceSoftwareUpdate(input *StartElasticsearchServiceSoftwareUpdateInput) (*StartElasticsearchServiceSoftwareUpdateOutput, error) { + req, out := c.StartElasticsearchServiceSoftwareUpdateRequest(input) + return out, req.Send() +} + +// StartElasticsearchServiceSoftwareUpdateWithContext is the same as StartElasticsearchServiceSoftwareUpdate with the addition of +// the ability to pass a context and additional request options. +// +// See StartElasticsearchServiceSoftwareUpdate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) StartElasticsearchServiceSoftwareUpdateWithContext(ctx aws.Context, input *StartElasticsearchServiceSoftwareUpdateInput, opts ...request.Option) (*StartElasticsearchServiceSoftwareUpdateOutput, error) { + req, out := c.StartElasticsearchServiceSoftwareUpdateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateElasticsearchDomainConfig = "UpdateElasticsearchDomainConfig" + +// UpdateElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateElasticsearchDomainConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateElasticsearchDomainConfig for more information on using the UpdateElasticsearchDomainConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateElasticsearchDomainConfigRequest method. +// req, resp := client.UpdateElasticsearchDomainConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) UpdateElasticsearchDomainConfigRequest(input *UpdateElasticsearchDomainConfigInput) (req *request.Request, output *UpdateElasticsearchDomainConfigOutput) { + op := &request.Operation{ + Name: opUpdateElasticsearchDomainConfig, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/domain/{DomainName}/config", + } + + if input == nil { + input = &UpdateElasticsearchDomainConfigInput{} + } + + output = &UpdateElasticsearchDomainConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateElasticsearchDomainConfig API operation for Amazon Elasticsearch Service. +// +// Modifies the cluster configuration of the specified Elasticsearch domain, +// setting as setting the instance type and the number of instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation UpdateElasticsearchDomainConfig for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +// * InvalidTypeException +// An exception for trying to create or access sub-resource that is either invalid +// or not supported. Gives http status code of 409. +// +// * LimitExceededException +// An exception for trying to create more than allowed resources or sub-resources. +// Gives http status code of 409. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +func (c *ElasticsearchService) UpdateElasticsearchDomainConfig(input *UpdateElasticsearchDomainConfigInput) (*UpdateElasticsearchDomainConfigOutput, error) { + req, out := c.UpdateElasticsearchDomainConfigRequest(input) + return out, req.Send() +} + +// UpdateElasticsearchDomainConfigWithContext is the same as UpdateElasticsearchDomainConfig with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateElasticsearchDomainConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) UpdateElasticsearchDomainConfigWithContext(ctx aws.Context, input *UpdateElasticsearchDomainConfigInput, opts ...request.Option) (*UpdateElasticsearchDomainConfigOutput, error) { + req, out := c.UpdateElasticsearchDomainConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpgradeElasticsearchDomain = "UpgradeElasticsearchDomain" + +// UpgradeElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the UpgradeElasticsearchDomain operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpgradeElasticsearchDomain for more information on using the UpgradeElasticsearchDomain +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpgradeElasticsearchDomainRequest method. +// req, resp := client.UpgradeElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *ElasticsearchService) UpgradeElasticsearchDomainRequest(input *UpgradeElasticsearchDomainInput) (req *request.Request, output *UpgradeElasticsearchDomainOutput) { + op := &request.Operation{ + Name: opUpgradeElasticsearchDomain, + HTTPMethod: "POST", + HTTPPath: "/2015-01-01/es/upgradeDomain", + } + + if input == nil { + input = &UpgradeElasticsearchDomainInput{} + } + + output = &UpgradeElasticsearchDomainOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpgradeElasticsearchDomain API operation for Amazon Elasticsearch Service. +// +// Allows you to either upgrade your domain or perform an Upgrade eligibility +// check to a compatible Elasticsearch version. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elasticsearch Service's +// API operation UpgradeElasticsearchDomain for usage and error information. +// +// Returned Error Types: +// * BaseException +// An error occurred while processing the request. +// +// * ResourceNotFoundException +// An exception for accessing or deleting a resource that does not exist. Gives +// http status code of 400. +// +// * ResourceAlreadyExistsException +// An exception for creating a resource that already exists. Gives http status +// code of 400. +// +// * DisabledOperationException +// An error occured because the client wanted to access a not supported operation. +// Gives http status code of 409. +// +// * ValidationException +// An exception for missing / invalid input fields. Gives http status code of +// 400. +// +// * InternalException +// The request processing has failed because of an unknown error, exception +// or failure (the failure is internal to the service) . Gives http status code +// of 500. +// +func (c *ElasticsearchService) UpgradeElasticsearchDomain(input *UpgradeElasticsearchDomainInput) (*UpgradeElasticsearchDomainOutput, error) { + req, out := c.UpgradeElasticsearchDomainRequest(input) + return out, req.Send() +} + +// UpgradeElasticsearchDomainWithContext is the same as UpgradeElasticsearchDomain with the addition of +// the ability to pass a context and additional request options. +// +// See UpgradeElasticsearchDomain for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElasticsearchService) UpgradeElasticsearchDomainWithContext(ctx aws.Context, input *UpgradeElasticsearchDomainInput, opts ...request.Option) (*UpgradeElasticsearchDomainOutput, error) { + req, out := c.UpgradeElasticsearchDomainRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Container for the parameters to the AcceptInboundCrossClusterSearchConnection +// operation. +type AcceptInboundCrossClusterSearchConnectionInput struct { + _ struct{} `type:"structure"` + + // The id of the inbound connection that you want to accept. + // + // CrossClusterSearchConnectionId is a required field + CrossClusterSearchConnectionId *string `location:"uri" locationName:"ConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AcceptInboundCrossClusterSearchConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptInboundCrossClusterSearchConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AcceptInboundCrossClusterSearchConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AcceptInboundCrossClusterSearchConnectionInput"} + if s.CrossClusterSearchConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("CrossClusterSearchConnectionId")) + } + if s.CrossClusterSearchConnectionId != nil && len(*s.CrossClusterSearchConnectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrossClusterSearchConnectionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *AcceptInboundCrossClusterSearchConnectionInput) SetCrossClusterSearchConnectionId(v string) *AcceptInboundCrossClusterSearchConnectionInput { + s.CrossClusterSearchConnectionId = &v + return s +} + +// The result of a AcceptInboundCrossClusterSearchConnection operation. Contains +// details of accepted inbound connection. +type AcceptInboundCrossClusterSearchConnectionOutput struct { + _ struct{} `type:"structure"` + + // Specifies the InboundCrossClusterSearchConnection of accepted inbound connection. + CrossClusterSearchConnection *InboundCrossClusterSearchConnection `type:"structure"` +} + +// String returns the string representation +func (s AcceptInboundCrossClusterSearchConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AcceptInboundCrossClusterSearchConnectionOutput) GoString() string { + return s.String() +} + +// SetCrossClusterSearchConnection sets the CrossClusterSearchConnection field's value. +func (s *AcceptInboundCrossClusterSearchConnectionOutput) SetCrossClusterSearchConnection(v *InboundCrossClusterSearchConnection) *AcceptInboundCrossClusterSearchConnectionOutput { + s.CrossClusterSearchConnection = v + return s +} + +// An error occurred because user does not have permissions to access the resource. +// Returns HTTP status code 403. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The configured access rules for the domain's document and search endpoints, +// and the current status of those rules. +type AccessPoliciesStatus struct { + _ struct{} `type:"structure"` + + // The access policy configured for the Elasticsearch domain. Access policies + // may be resource-based, IP-based, or IAM-based. See Configuring Access Policies + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-access-policies)for // more information. // // Options is a required field - Options *string `type:"string" required:"true"` + Options *string `type:"string" required:"true"` + + // The status of the access policy for the Elasticsearch domain. See OptionStatus + // for the status information that's included. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AccessPoliciesStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoliciesStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *AccessPoliciesStatus) SetOptions(v string) *AccessPoliciesStatus { + s.Options = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AccessPoliciesStatus) SetStatus(v *OptionStatus) *AccessPoliciesStatus { + s.Status = v + return s +} + +// Container for the parameters to the AddTags operation. Specify the tags that +// you want to attach to the Elasticsearch domain. +type AddTagsInput struct { + _ struct{} `type:"structure"` + + // Specify the ARN for which you want to add the tags. + // + // ARN is a required field + ARN *string `type:"string" required:"true"` + + // List of Tag that need to be added for the Elasticsearch domain. + // + // TagList is a required field + TagList []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ARN == nil { + invalidParams.Add(request.NewErrParamRequired("ARN")) + } + if s.TagList == nil { + invalidParams.Add(request.NewErrParamRequired("TagList")) + } + if s.TagList != nil { + for i, v := range s.TagList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetARN sets the ARN field's value. +func (s *AddTagsInput) SetARN(v string) *AddTagsInput { + s.ARN = &v + return s +} + +// SetTagList sets the TagList field's value. +func (s *AddTagsInput) SetTagList(v []*Tag) *AddTagsInput { + s.TagList = v + return s +} + +type AddTagsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsOutput) GoString() string { + return s.String() +} + +// List of limits that are specific to a given InstanceType and for each of +// it's InstanceRole . +type AdditionalLimit struct { + _ struct{} `type:"structure"` + + // Name of Additional Limit is specific to a given InstanceType and for each + // of it's InstanceRole etc. Attributes and their details: + // * MaximumNumberOfDataNodesSupported + // + // * MaximumNumberOfDataNodesWithoutMasterNode + LimitName *string `type:"string"` + + // Value for given AdditionalLimit$LimitName . + LimitValues []*string `type:"list"` +} + +// String returns the string representation +func (s AdditionalLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdditionalLimit) GoString() string { + return s.String() +} + +// SetLimitName sets the LimitName field's value. +func (s *AdditionalLimit) SetLimitName(v string) *AdditionalLimit { + s.LimitName = &v + return s +} + +// SetLimitValues sets the LimitValues field's value. +func (s *AdditionalLimit) SetLimitValues(v []*string) *AdditionalLimit { + s.LimitValues = v + return s +} + +// Status of the advanced options for the specified Elasticsearch domain. Currently, +// the following advanced options are available: +// +// * Option to allow references to indices in an HTTP request body. Must +// be false when configuring access to individual sub-resources. By default, +// the value is true. See Configuration Advanced Options for more information. +// +// * Option to specify the percentage of heap space that is allocated to +// field data. By default, this setting is unbounded. +// +// For more information, see Configuring Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options). +type AdvancedOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the status of advanced options for the specified Elasticsearch + // domain. + // + // Options is a required field + Options map[string]*string `type:"map" required:"true"` + + // Specifies the status of OptionStatus for advanced options for the specified + // Elasticsearch domain. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AdvancedOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *AdvancedOptionsStatus) SetOptions(v map[string]*string) *AdvancedOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AdvancedOptionsStatus) SetStatus(v *OptionStatus) *AdvancedOptionsStatus { + s.Status = v + return s +} + +// Specifies the advanced security configuration: whether advanced security +// is enabled, whether the internal database option is enabled. +type AdvancedSecurityOptions struct { + _ struct{} `type:"structure"` + + // True if advanced security is enabled. + Enabled *bool `type:"boolean"` + + // True if the internal user database is enabled. + InternalUserDatabaseEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s AdvancedSecurityOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedSecurityOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *AdvancedSecurityOptions) SetEnabled(v bool) *AdvancedSecurityOptions { + s.Enabled = &v + return s +} + +// SetInternalUserDatabaseEnabled sets the InternalUserDatabaseEnabled field's value. +func (s *AdvancedSecurityOptions) SetInternalUserDatabaseEnabled(v bool) *AdvancedSecurityOptions { + s.InternalUserDatabaseEnabled = &v + return s +} + +// Specifies the advanced security configuration: whether advanced security +// is enabled, whether the internal database option is enabled, master username +// and password (if internal database is enabled), and master user ARN (if IAM +// is enabled). +type AdvancedSecurityOptionsInput struct { + _ struct{} `type:"structure"` + + // True if advanced security is enabled. + Enabled *bool `type:"boolean"` + + // True if the internal user database is enabled. + InternalUserDatabaseEnabled *bool `type:"boolean"` + + // Credentials for the master user: username and password, ARN, or both. + MasterUserOptions *MasterUserOptions `type:"structure"` +} + +// String returns the string representation +func (s AdvancedSecurityOptionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedSecurityOptionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AdvancedSecurityOptionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AdvancedSecurityOptionsInput"} + if s.MasterUserOptions != nil { + if err := s.MasterUserOptions.Validate(); err != nil { + invalidParams.AddNested("MasterUserOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *AdvancedSecurityOptionsInput) SetEnabled(v bool) *AdvancedSecurityOptionsInput { + s.Enabled = &v + return s +} + +// SetInternalUserDatabaseEnabled sets the InternalUserDatabaseEnabled field's value. +func (s *AdvancedSecurityOptionsInput) SetInternalUserDatabaseEnabled(v bool) *AdvancedSecurityOptionsInput { + s.InternalUserDatabaseEnabled = &v + return s +} + +// SetMasterUserOptions sets the MasterUserOptions field's value. +func (s *AdvancedSecurityOptionsInput) SetMasterUserOptions(v *MasterUserOptions) *AdvancedSecurityOptionsInput { + s.MasterUserOptions = v + return s +} + +// Specifies the status of advanced security options for the specified Elasticsearch +// domain. +type AdvancedSecurityOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies advanced security options for the specified Elasticsearch domain. + // + // Options is a required field + Options *AdvancedSecurityOptions `type:"structure" required:"true"` + + // Status of the advanced security options for the specified Elasticsearch domain. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AdvancedSecurityOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdvancedSecurityOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *AdvancedSecurityOptionsStatus) SetOptions(v *AdvancedSecurityOptions) *AdvancedSecurityOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AdvancedSecurityOptionsStatus) SetStatus(v *OptionStatus) *AdvancedSecurityOptionsStatus { + s.Status = v + return s +} + +// Container for request parameters to AssociatePackage operation. +type AssociatePackageInput struct { + _ struct{} `type:"structure"` + + // Name of the domain that you want to associate the package with. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + + // Internal ID of the package that you want to associate with a domain. Use + // DescribePackages to find this value. + // + // PackageID is a required field + PackageID *string `location:"uri" locationName:"PackageID" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociatePackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatePackageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociatePackageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociatePackageInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.PackageID == nil { + invalidParams.Add(request.NewErrParamRequired("PackageID")) + } + if s.PackageID != nil && len(*s.PackageID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PackageID", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *AssociatePackageInput) SetDomainName(v string) *AssociatePackageInput { + s.DomainName = &v + return s +} + +// SetPackageID sets the PackageID field's value. +func (s *AssociatePackageInput) SetPackageID(v string) *AssociatePackageInput { + s.PackageID = &v + return s +} + +// Container for response returned by AssociatePackage operation. +type AssociatePackageOutput struct { + _ struct{} `type:"structure"` + + // DomainPackageDetails + DomainPackageDetails *DomainPackageDetails `type:"structure"` +} + +// String returns the string representation +func (s AssociatePackageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociatePackageOutput) GoString() string { + return s.String() +} + +// SetDomainPackageDetails sets the DomainPackageDetails field's value. +func (s *AssociatePackageOutput) SetDomainPackageDetails(v *DomainPackageDetails) *AssociatePackageOutput { + s.DomainPackageDetails = v + return s +} + +// An error occurred while processing the request. +type BaseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A description of the error. + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BaseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BaseException) GoString() string { + return s.String() +} + +func newErrorBaseException(v protocol.ResponseMetadata) error { + return &BaseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BaseException) Code() string { + return "BaseException" +} + +// Message returns the exception's message. +func (s *BaseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BaseException) OrigErr() error { + return nil +} + +func (s *BaseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BaseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BaseException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Container for the parameters to the CancelElasticsearchServiceSoftwareUpdate +// operation. Specifies the name of the Elasticsearch domain that you wish to +// cancel a service software update on. +type CancelElasticsearchServiceSoftwareUpdateInput struct { + _ struct{} `type:"structure"` + + // The name of the domain that you want to stop the latest service software + // update on. + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelElasticsearchServiceSoftwareUpdateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelElasticsearchServiceSoftwareUpdateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelElasticsearchServiceSoftwareUpdateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelElasticsearchServiceSoftwareUpdateInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *CancelElasticsearchServiceSoftwareUpdateInput) SetDomainName(v string) *CancelElasticsearchServiceSoftwareUpdateInput { + s.DomainName = &v + return s +} + +// The result of a CancelElasticsearchServiceSoftwareUpdate operation. Contains +// the status of the update. +type CancelElasticsearchServiceSoftwareUpdateOutput struct { + _ struct{} `type:"structure"` + + // The current status of the Elasticsearch service software update. + ServiceSoftwareOptions *ServiceSoftwareOptions `type:"structure"` +} + +// String returns the string representation +func (s CancelElasticsearchServiceSoftwareUpdateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelElasticsearchServiceSoftwareUpdateOutput) GoString() string { + return s.String() +} + +// SetServiceSoftwareOptions sets the ServiceSoftwareOptions field's value. +func (s *CancelElasticsearchServiceSoftwareUpdateOutput) SetServiceSoftwareOptions(v *ServiceSoftwareOptions) *CancelElasticsearchServiceSoftwareUpdateOutput { + s.ServiceSoftwareOptions = v + return s +} + +// Options to specify the Cognito user and identity pools for Kibana authentication. +// For more information, see Amazon Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). +type CognitoOptions struct { + _ struct{} `type:"structure"` + + // Specifies the option to enable Cognito for Kibana authentication. + Enabled *bool `type:"boolean"` + + // Specifies the Cognito identity pool ID for Kibana authentication. + IdentityPoolId *string `min:"1" type:"string"` + + // Specifies the role ARN that provides Elasticsearch permissions for accessing + // Cognito resources. + RoleArn *string `min:"20" type:"string"` + + // Specifies the Cognito user pool ID for Kibana authentication. + UserPoolId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CognitoOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CognitoOptions) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CognitoOptions) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CognitoOptions"} + if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnabled sets the Enabled field's value. +func (s *CognitoOptions) SetEnabled(v bool) *CognitoOptions { + s.Enabled = &v + return s +} + +// SetIdentityPoolId sets the IdentityPoolId field's value. +func (s *CognitoOptions) SetIdentityPoolId(v string) *CognitoOptions { + s.IdentityPoolId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CognitoOptions) SetRoleArn(v string) *CognitoOptions { + s.RoleArn = &v + return s +} + +// SetUserPoolId sets the UserPoolId field's value. +func (s *CognitoOptions) SetUserPoolId(v string) *CognitoOptions { + s.UserPoolId = &v + return s +} + +// Status of the Cognito options for the specified Elasticsearch domain. +type CognitoOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the Cognito options for the specified Elasticsearch domain. + // + // Options is a required field + Options *CognitoOptions `type:"structure" required:"true"` + + // Specifies the status of the Cognito options for the specified Elasticsearch + // domain. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CognitoOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CognitoOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *CognitoOptionsStatus) SetOptions(v *CognitoOptions) *CognitoOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *CognitoOptionsStatus) SetStatus(v *OptionStatus) *CognitoOptionsStatus { + s.Status = v + return s +} + +// A map from an ElasticsearchVersion to a list of compatible ElasticsearchVersion +// s to which the domain can be upgraded. +type CompatibleVersionsMap struct { + _ struct{} `type:"structure"` + + // The current version of Elasticsearch on which a domain is. + SourceVersion *string `type:"string"` + + // List of supported elastic search versions. + TargetVersions []*string `type:"list"` +} + +// String returns the string representation +func (s CompatibleVersionsMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompatibleVersionsMap) GoString() string { + return s.String() +} + +// SetSourceVersion sets the SourceVersion field's value. +func (s *CompatibleVersionsMap) SetSourceVersion(v string) *CompatibleVersionsMap { + s.SourceVersion = &v + return s +} + +// SetTargetVersions sets the TargetVersions field's value. +func (s *CompatibleVersionsMap) SetTargetVersions(v []*string) *CompatibleVersionsMap { + s.TargetVersions = v + return s +} + +// An error occurred because the client attempts to remove a resource that is +// currently in use. Returns HTTP status code 409. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateElasticsearchDomainInput struct { + _ struct{} `type:"structure"` + + // IAM access policy as a JSON-formatted string. + AccessPolicies *string `type:"string"` + + // Option to allow references to indices in an HTTP request body. Must be false + // when configuring access to individual sub-resources. By default, the value + // is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options) + // for more information. + AdvancedOptions map[string]*string `type:"map"` + + // Specifies advanced security options. + AdvancedSecurityOptions *AdvancedSecurityOptionsInput `type:"structure"` + + // Options to specify the Cognito user and identity pools for Kibana authentication. + // For more information, see Amazon Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). + CognitoOptions *CognitoOptions `type:"structure"` + + // Options to specify configuration that will be applied to the domain endpoint. + DomainEndpointOptions *DomainEndpointOptions `type:"structure"` + + // The name of the Elasticsearch domain that you are creating. Domain names + // are unique across the domains owned by an account within an AWS region. Domain + // names must start with a lowercase letter and can contain the following characters: + // a-z (lowercase), 0-9, and - (hyphen). + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` + + // Options to enable, disable and specify the type and size of EBS storage volumes. + EBSOptions *EBSOptions `type:"structure"` + + // Configuration options for an Elasticsearch domain. Specifies the instance + // type and number of instances in the domain cluster. + ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + + // String of format X.Y to specify version for the Elasticsearch domain eg. + // "1.5" or "2.3". For more information, see Creating Elasticsearch Domains + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains) + // in the Amazon Elasticsearch Service Developer Guide. + ElasticsearchVersion *string `type:"string"` + + // Specifies the Encryption At Rest Options. + EncryptionAtRestOptions *EncryptionAtRestOptions `type:"structure"` + + // Map of LogType and LogPublishingOption, each containing options to publish + // a given type of Elasticsearch log. + LogPublishingOptions map[string]*LogPublishingOption `type:"map"` + + // Specifies the NodeToNodeEncryptionOptions. + NodeToNodeEncryptionOptions *NodeToNodeEncryptionOptions `type:"structure"` + + // Option to set time, in UTC format, of the daily automated snapshot. Default + // value is 0 hours. + SnapshotOptions *SnapshotOptions `type:"structure"` + + // Options to specify the subnets and security groups for VPC endpoint. For + // more information, see Creating a VPC (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-creating-vpc) + // in VPC Endpoints for Amazon Elasticsearch Service Domains + VPCOptions *VPCOptions `type:"structure"` +} + +// String returns the string representation +func (s CreateElasticsearchDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateElasticsearchDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateElasticsearchDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateElasticsearchDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.AdvancedSecurityOptions != nil { + if err := s.AdvancedSecurityOptions.Validate(); err != nil { + invalidParams.AddNested("AdvancedSecurityOptions", err.(request.ErrInvalidParams)) + } + } + if s.CognitoOptions != nil { + if err := s.CognitoOptions.Validate(); err != nil { + invalidParams.AddNested("CognitoOptions", err.(request.ErrInvalidParams)) + } + } + if s.EncryptionAtRestOptions != nil { + if err := s.EncryptionAtRestOptions.Validate(); err != nil { + invalidParams.AddNested("EncryptionAtRestOptions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPolicies sets the AccessPolicies field's value. +func (s *CreateElasticsearchDomainInput) SetAccessPolicies(v string) *CreateElasticsearchDomainInput { + s.AccessPolicies = &v + return s +} + +// SetAdvancedOptions sets the AdvancedOptions field's value. +func (s *CreateElasticsearchDomainInput) SetAdvancedOptions(v map[string]*string) *CreateElasticsearchDomainInput { + s.AdvancedOptions = v + return s +} + +// SetAdvancedSecurityOptions sets the AdvancedSecurityOptions field's value. +func (s *CreateElasticsearchDomainInput) SetAdvancedSecurityOptions(v *AdvancedSecurityOptionsInput) *CreateElasticsearchDomainInput { + s.AdvancedSecurityOptions = v + return s +} - // The status of the access policy for the Elasticsearch domain. See OptionStatus - // for the status information that's included. - // - // Status is a required field - Status *OptionStatus `type:"structure" required:"true"` +// SetCognitoOptions sets the CognitoOptions field's value. +func (s *CreateElasticsearchDomainInput) SetCognitoOptions(v *CognitoOptions) *CreateElasticsearchDomainInput { + s.CognitoOptions = v + return s +} + +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *CreateElasticsearchDomainInput) SetDomainEndpointOptions(v *DomainEndpointOptions) *CreateElasticsearchDomainInput { + s.DomainEndpointOptions = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *CreateElasticsearchDomainInput) SetDomainName(v string) *CreateElasticsearchDomainInput { + s.DomainName = &v + return s +} + +// SetEBSOptions sets the EBSOptions field's value. +func (s *CreateElasticsearchDomainInput) SetEBSOptions(v *EBSOptions) *CreateElasticsearchDomainInput { + s.EBSOptions = v + return s +} + +// SetElasticsearchClusterConfig sets the ElasticsearchClusterConfig field's value. +func (s *CreateElasticsearchDomainInput) SetElasticsearchClusterConfig(v *ElasticsearchClusterConfig) *CreateElasticsearchDomainInput { + s.ElasticsearchClusterConfig = v + return s +} + +// SetElasticsearchVersion sets the ElasticsearchVersion field's value. +func (s *CreateElasticsearchDomainInput) SetElasticsearchVersion(v string) *CreateElasticsearchDomainInput { + s.ElasticsearchVersion = &v + return s +} + +// SetEncryptionAtRestOptions sets the EncryptionAtRestOptions field's value. +func (s *CreateElasticsearchDomainInput) SetEncryptionAtRestOptions(v *EncryptionAtRestOptions) *CreateElasticsearchDomainInput { + s.EncryptionAtRestOptions = v + return s +} + +// SetLogPublishingOptions sets the LogPublishingOptions field's value. +func (s *CreateElasticsearchDomainInput) SetLogPublishingOptions(v map[string]*LogPublishingOption) *CreateElasticsearchDomainInput { + s.LogPublishingOptions = v + return s +} + +// SetNodeToNodeEncryptionOptions sets the NodeToNodeEncryptionOptions field's value. +func (s *CreateElasticsearchDomainInput) SetNodeToNodeEncryptionOptions(v *NodeToNodeEncryptionOptions) *CreateElasticsearchDomainInput { + s.NodeToNodeEncryptionOptions = v + return s +} + +// SetSnapshotOptions sets the SnapshotOptions field's value. +func (s *CreateElasticsearchDomainInput) SetSnapshotOptions(v *SnapshotOptions) *CreateElasticsearchDomainInput { + s.SnapshotOptions = v + return s +} + +// SetVPCOptions sets the VPCOptions field's value. +func (s *CreateElasticsearchDomainInput) SetVPCOptions(v *VPCOptions) *CreateElasticsearchDomainInput { + s.VPCOptions = v + return s +} + +// The result of a CreateElasticsearchDomain operation. Contains the status +// of the newly created Elasticsearch domain. +type CreateElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the newly created Elasticsearch domain. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` } // String returns the string representation -func (s AccessPoliciesStatus) String() string { +func (s CreateElasticsearchDomainOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccessPoliciesStatus) GoString() string { +func (s CreateElasticsearchDomainOutput) GoString() string { return s.String() } -// SetOptions sets the Options field's value. -func (s *AccessPoliciesStatus) SetOptions(v string) *AccessPoliciesStatus { - s.Options = &v - return s -} - -// SetStatus sets the Status field's value. -func (s *AccessPoliciesStatus) SetStatus(v *OptionStatus) *AccessPoliciesStatus { - s.Status = v +// SetDomainStatus sets the DomainStatus field's value. +func (s *CreateElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomainStatus) *CreateElasticsearchDomainOutput { + s.DomainStatus = v return s } -// Container for the parameters to the AddTags operation. Specify the tags that -// you want to attach to the Elasticsearch domain. -type AddTagsInput struct { +// Container for the parameters to the CreateOutboundCrossClusterSearchConnection +// operation. +type CreateOutboundCrossClusterSearchConnectionInput struct { _ struct{} `type:"structure"` - // Specify the ARN for which you want to add the tags. + // Specifies the connection alias that will be used by the customer for this + // connection. // - // ARN is a required field - ARN *string `type:"string" required:"true"` + // ConnectionAlias is a required field + ConnectionAlias *string `type:"string" required:"true"` - // List of Tag that need to be added for the Elasticsearch domain. + // Specifies the DomainInformation for the destination Elasticsearch domain. // - // TagList is a required field - TagList []*Tag `type:"list" required:"true"` + // DestinationDomainInfo is a required field + DestinationDomainInfo *DomainInformation `type:"structure" required:"true"` + + // Specifies the DomainInformation for the source Elasticsearch domain. + // + // SourceDomainInfo is a required field + SourceDomainInfo *DomainInformation `type:"structure" required:"true"` } // String returns the string representation -func (s AddTagsInput) String() string { +func (s CreateOutboundCrossClusterSearchConnectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddTagsInput) GoString() string { +func (s CreateOutboundCrossClusterSearchConnectionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AddTagsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} - if s.ARN == nil { - invalidParams.Add(request.NewErrParamRequired("ARN")) +func (s *CreateOutboundCrossClusterSearchConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOutboundCrossClusterSearchConnectionInput"} + if s.ConnectionAlias == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionAlias")) } - if s.TagList == nil { - invalidParams.Add(request.NewErrParamRequired("TagList")) + if s.DestinationDomainInfo == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationDomainInfo")) } - if s.TagList != nil { - for i, v := range s.TagList { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagList", i), err.(request.ErrInvalidParams)) - } + if s.SourceDomainInfo == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDomainInfo")) + } + if s.DestinationDomainInfo != nil { + if err := s.DestinationDomainInfo.Validate(); err != nil { + invalidParams.AddNested("DestinationDomainInfo", err.(request.ErrInvalidParams)) + } + } + if s.SourceDomainInfo != nil { + if err := s.SourceDomainInfo.Validate(); err != nil { + invalidParams.AddNested("SourceDomainInfo", err.(request.ErrInvalidParams)) } } @@ -2509,187 +5113,224 @@ func (s *AddTagsInput) Validate() error { return nil } -// SetARN sets the ARN field's value. -func (s *AddTagsInput) SetARN(v string) *AddTagsInput { - s.ARN = &v +// SetConnectionAlias sets the ConnectionAlias field's value. +func (s *CreateOutboundCrossClusterSearchConnectionInput) SetConnectionAlias(v string) *CreateOutboundCrossClusterSearchConnectionInput { + s.ConnectionAlias = &v return s } -// SetTagList sets the TagList field's value. -func (s *AddTagsInput) SetTagList(v []*Tag) *AddTagsInput { - s.TagList = v +// SetDestinationDomainInfo sets the DestinationDomainInfo field's value. +func (s *CreateOutboundCrossClusterSearchConnectionInput) SetDestinationDomainInfo(v *DomainInformation) *CreateOutboundCrossClusterSearchConnectionInput { + s.DestinationDomainInfo = v return s } -type AddTagsOutput struct { +// SetSourceDomainInfo sets the SourceDomainInfo field's value. +func (s *CreateOutboundCrossClusterSearchConnectionInput) SetSourceDomainInfo(v *DomainInformation) *CreateOutboundCrossClusterSearchConnectionInput { + s.SourceDomainInfo = v + return s +} + +// The result of a CreateOutboundCrossClusterSearchConnection request. Contains +// the details of the newly created cross-cluster search connection. +type CreateOutboundCrossClusterSearchConnectionOutput struct { _ struct{} `type:"structure"` + + // Specifies the connection alias provided during the create connection request. + ConnectionAlias *string `type:"string"` + + // Specifies the OutboundCrossClusterSearchConnectionStatus for the newly created + // connection. + ConnectionStatus *OutboundCrossClusterSearchConnectionStatus `type:"structure"` + + // Unique id for the created outbound connection, which is used for subsequent + // operations on connection. + CrossClusterSearchConnectionId *string `type:"string"` + + // Specifies the DomainInformation for the destination Elasticsearch domain. + DestinationDomainInfo *DomainInformation `type:"structure"` + + // Specifies the DomainInformation for the source Elasticsearch domain. + SourceDomainInfo *DomainInformation `type:"structure"` } // String returns the string representation -func (s AddTagsOutput) String() string { +func (s CreateOutboundCrossClusterSearchConnectionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddTagsOutput) GoString() string { +func (s CreateOutboundCrossClusterSearchConnectionOutput) GoString() string { return s.String() } -// List of limits that are specific to a given InstanceType and for each of -// it's InstanceRole . -type AdditionalLimit struct { - _ struct{} `type:"structure"` - - // Name of Additional Limit is specific to a given InstanceType and for each - // of it's InstanceRole etc. Attributes and their details: - // * MaximumNumberOfDataNodesSupported - // - // * MaximumNumberOfDataNodesWithoutMasterNode - LimitName *string `type:"string"` - - // Value for given AdditionalLimit$LimitName . - LimitValues []*string `type:"list"` +// SetConnectionAlias sets the ConnectionAlias field's value. +func (s *CreateOutboundCrossClusterSearchConnectionOutput) SetConnectionAlias(v string) *CreateOutboundCrossClusterSearchConnectionOutput { + s.ConnectionAlias = &v + return s } -// String returns the string representation -func (s AdditionalLimit) String() string { - return awsutil.Prettify(s) +// SetConnectionStatus sets the ConnectionStatus field's value. +func (s *CreateOutboundCrossClusterSearchConnectionOutput) SetConnectionStatus(v *OutboundCrossClusterSearchConnectionStatus) *CreateOutboundCrossClusterSearchConnectionOutput { + s.ConnectionStatus = v + return s } -// GoString returns the string representation -func (s AdditionalLimit) GoString() string { - return s.String() +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *CreateOutboundCrossClusterSearchConnectionOutput) SetCrossClusterSearchConnectionId(v string) *CreateOutboundCrossClusterSearchConnectionOutput { + s.CrossClusterSearchConnectionId = &v + return s } -// SetLimitName sets the LimitName field's value. -func (s *AdditionalLimit) SetLimitName(v string) *AdditionalLimit { - s.LimitName = &v +// SetDestinationDomainInfo sets the DestinationDomainInfo field's value. +func (s *CreateOutboundCrossClusterSearchConnectionOutput) SetDestinationDomainInfo(v *DomainInformation) *CreateOutboundCrossClusterSearchConnectionOutput { + s.DestinationDomainInfo = v return s } -// SetLimitValues sets the LimitValues field's value. -func (s *AdditionalLimit) SetLimitValues(v []*string) *AdditionalLimit { - s.LimitValues = v +// SetSourceDomainInfo sets the SourceDomainInfo field's value. +func (s *CreateOutboundCrossClusterSearchConnectionOutput) SetSourceDomainInfo(v *DomainInformation) *CreateOutboundCrossClusterSearchConnectionOutput { + s.SourceDomainInfo = v return s } -// Status of the advanced options for the specified Elasticsearch domain. Currently, -// the following advanced options are available: -// -// * Option to allow references to indices in an HTTP request body. Must -// be false when configuring access to individual sub-resources. By default, -// the value is true. See Configuration Advanced Options for more information. -// -// * Option to specify the percentage of heap space that is allocated to -// field data. By default, this setting is unbounded. -// -// For more information, see Configuring Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options). -type AdvancedOptionsStatus struct { +// Container for request parameters to CreatePackage operation. +type CreatePackageInput struct { _ struct{} `type:"structure"` - // Specifies the status of advanced options for the specified Elasticsearch - // domain. + // Description of the package. + PackageDescription *string `type:"string"` + + // Unique identifier for the package. // - // Options is a required field - Options map[string]*string `type:"map" required:"true"` + // PackageName is a required field + PackageName *string `min:"3" type:"string" required:"true"` - // Specifies the status of OptionStatus for advanced options for the specified - // Elasticsearch domain. + // The customer S3 location PackageSource for importing the package. // - // Status is a required field - Status *OptionStatus `type:"structure" required:"true"` + // PackageSource is a required field + PackageSource *PackageSource `type:"structure" required:"true"` + + // Type of package. Currently supports only TXT-DICTIONARY. + // + // PackageType is a required field + PackageType *string `type:"string" required:"true" enum:"PackageType"` } // String returns the string representation -func (s AdvancedOptionsStatus) String() string { +func (s CreatePackageInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdvancedOptionsStatus) GoString() string { +func (s CreatePackageInput) GoString() string { return s.String() } -// SetOptions sets the Options field's value. -func (s *AdvancedOptionsStatus) SetOptions(v map[string]*string) *AdvancedOptionsStatus { - s.Options = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePackageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePackageInput"} + if s.PackageName == nil { + invalidParams.Add(request.NewErrParamRequired("PackageName")) + } + if s.PackageName != nil && len(*s.PackageName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("PackageName", 3)) + } + if s.PackageSource == nil { + invalidParams.Add(request.NewErrParamRequired("PackageSource")) + } + if s.PackageType == nil { + invalidParams.Add(request.NewErrParamRequired("PackageType")) + } + if s.PackageSource != nil { + if err := s.PackageSource.Validate(); err != nil { + invalidParams.AddNested("PackageSource", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPackageDescription sets the PackageDescription field's value. +func (s *CreatePackageInput) SetPackageDescription(v string) *CreatePackageInput { + s.PackageDescription = &v return s } -// SetStatus sets the Status field's value. -func (s *AdvancedOptionsStatus) SetStatus(v *OptionStatus) *AdvancedOptionsStatus { - s.Status = v +// SetPackageName sets the PackageName field's value. +func (s *CreatePackageInput) SetPackageName(v string) *CreatePackageInput { + s.PackageName = &v return s } -// Specifies the advanced security configuration: whether advanced security -// is enabled, whether the internal database option is enabled. -type AdvancedSecurityOptions struct { - _ struct{} `type:"structure"` +// SetPackageSource sets the PackageSource field's value. +func (s *CreatePackageInput) SetPackageSource(v *PackageSource) *CreatePackageInput { + s.PackageSource = v + return s +} - // True if advanced security is enabled. - Enabled *bool `type:"boolean"` +// SetPackageType sets the PackageType field's value. +func (s *CreatePackageInput) SetPackageType(v string) *CreatePackageInput { + s.PackageType = &v + return s +} - // True if the internal user database is enabled. - InternalUserDatabaseEnabled *bool `type:"boolean"` +// Container for response returned by CreatePackage operation. +type CreatePackageOutput struct { + _ struct{} `type:"structure"` + + // Information about the package PackageDetails. + PackageDetails *PackageDetails `type:"structure"` } // String returns the string representation -func (s AdvancedSecurityOptions) String() string { +func (s CreatePackageOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdvancedSecurityOptions) GoString() string { +func (s CreatePackageOutput) GoString() string { return s.String() } -// SetEnabled sets the Enabled field's value. -func (s *AdvancedSecurityOptions) SetEnabled(v bool) *AdvancedSecurityOptions { - s.Enabled = &v - return s -} - -// SetInternalUserDatabaseEnabled sets the InternalUserDatabaseEnabled field's value. -func (s *AdvancedSecurityOptions) SetInternalUserDatabaseEnabled(v bool) *AdvancedSecurityOptions { - s.InternalUserDatabaseEnabled = &v +// SetPackageDetails sets the PackageDetails field's value. +func (s *CreatePackageOutput) SetPackageDetails(v *PackageDetails) *CreatePackageOutput { + s.PackageDetails = v return s } -// Specifies the advanced security configuration: whether advanced security -// is enabled, whether the internal database option is enabled, master username -// and password (if internal database is enabled), and master user ARN (if IAM -// is enabled). -type AdvancedSecurityOptionsInput struct { - _ struct{} `type:"structure"` - - // True if advanced security is enabled. - Enabled *bool `type:"boolean"` - - // True if the internal user database is enabled. - InternalUserDatabaseEnabled *bool `type:"boolean"` +// Container for the parameters to the DeleteElasticsearchDomain operation. +// Specifies the name of the Elasticsearch domain that you want to delete. +type DeleteElasticsearchDomainInput struct { + _ struct{} `type:"structure"` - // Credentials for the master user: username and password, ARN, or both. - MasterUserOptions *MasterUserOptions `type:"structure"` + // The name of the Elasticsearch domain that you want to permanently delete. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` } // String returns the string representation -func (s AdvancedSecurityOptionsInput) String() string { +func (s DeleteElasticsearchDomainInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdvancedSecurityOptionsInput) GoString() string { +func (s DeleteElasticsearchDomainInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AdvancedSecurityOptionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AdvancedSecurityOptionsInput"} - if s.MasterUserOptions != nil { - if err := s.MasterUserOptions.Validate(); err != nil { - invalidParams.AddNested("MasterUserOptions", err.(request.ErrInvalidParams)) - } +func (s *DeleteElasticsearchDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteElasticsearchDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) } if invalidParams.Len() > 0 { @@ -2698,150 +5339,163 @@ func (s *AdvancedSecurityOptionsInput) Validate() error { return nil } -// SetEnabled sets the Enabled field's value. -func (s *AdvancedSecurityOptionsInput) SetEnabled(v bool) *AdvancedSecurityOptionsInput { - s.Enabled = &v +// SetDomainName sets the DomainName field's value. +func (s *DeleteElasticsearchDomainInput) SetDomainName(v string) *DeleteElasticsearchDomainInput { + s.DomainName = &v return s } -// SetInternalUserDatabaseEnabled sets the InternalUserDatabaseEnabled field's value. -func (s *AdvancedSecurityOptionsInput) SetInternalUserDatabaseEnabled(v bool) *AdvancedSecurityOptionsInput { - s.InternalUserDatabaseEnabled = &v - return s +// The result of a DeleteElasticsearchDomain request. Contains the status of +// the pending deletion, or no status if the domain and all of its resources +// have been deleted. +type DeleteElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` + + // The status of the Elasticsearch domain being deleted. + DomainStatus *ElasticsearchDomainStatus `type:"structure"` } -// SetMasterUserOptions sets the MasterUserOptions field's value. -func (s *AdvancedSecurityOptionsInput) SetMasterUserOptions(v *MasterUserOptions) *AdvancedSecurityOptionsInput { - s.MasterUserOptions = v - return s +// String returns the string representation +func (s DeleteElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) } -// Specifies the status of advanced security options for the specified Elasticsearch -// domain. -type AdvancedSecurityOptionsStatus struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s DeleteElasticsearchDomainOutput) GoString() string { + return s.String() +} - // Specifies advanced security options for the specified Elasticsearch domain. - // - // Options is a required field - Options *AdvancedSecurityOptions `type:"structure" required:"true"` +// SetDomainStatus sets the DomainStatus field's value. +func (s *DeleteElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomainStatus) *DeleteElasticsearchDomainOutput { + s.DomainStatus = v + return s +} - // Status of the advanced security options for the specified Elasticsearch domain. - // - // Status is a required field - Status *OptionStatus `type:"structure" required:"true"` +type DeleteElasticsearchServiceRoleInput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s AdvancedSecurityOptionsStatus) String() string { +func (s DeleteElasticsearchServiceRoleInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdvancedSecurityOptionsStatus) GoString() string { +func (s DeleteElasticsearchServiceRoleInput) GoString() string { return s.String() } -// SetOptions sets the Options field's value. -func (s *AdvancedSecurityOptionsStatus) SetOptions(v *AdvancedSecurityOptions) *AdvancedSecurityOptionsStatus { - s.Options = v - return s +type DeleteElasticsearchServiceRoleOutput struct { + _ struct{} `type:"structure"` } -// SetStatus sets the Status field's value. -func (s *AdvancedSecurityOptionsStatus) SetStatus(v *OptionStatus) *AdvancedSecurityOptionsStatus { - s.Status = v - return s +// String returns the string representation +func (s DeleteElasticsearchServiceRoleOutput) String() string { + return awsutil.Prettify(s) } -// An error occurred while processing the request. -type BaseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// GoString returns the string representation +func (s DeleteElasticsearchServiceRoleOutput) GoString() string { + return s.String() +} - // A description of the error. - Message_ *string `locationName:"message" type:"string"` +// Container for the parameters to the DeleteInboundCrossClusterSearchConnection +// operation. +type DeleteInboundCrossClusterSearchConnectionInput struct { + _ struct{} `type:"structure"` + + // The id of the inbound connection that you want to permanently delete. + // + // CrossClusterSearchConnectionId is a required field + CrossClusterSearchConnectionId *string `location:"uri" locationName:"ConnectionId" type:"string" required:"true"` } // String returns the string representation -func (s BaseException) String() string { +func (s DeleteInboundCrossClusterSearchConnectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BaseException) GoString() string { +func (s DeleteInboundCrossClusterSearchConnectionInput) GoString() string { return s.String() } -func newErrorBaseException(v protocol.ResponseMetadata) error { - return &BaseException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInboundCrossClusterSearchConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInboundCrossClusterSearchConnectionInput"} + if s.CrossClusterSearchConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("CrossClusterSearchConnectionId")) + } + if s.CrossClusterSearchConnectionId != nil && len(*s.CrossClusterSearchConnectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrossClusterSearchConnectionId", 1)) } -} -// Code returns the exception type name. -func (s BaseException) Code() string { - return "BaseException" + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Message returns the exception's message. -func (s BaseException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *DeleteInboundCrossClusterSearchConnectionInput) SetCrossClusterSearchConnectionId(v string) *DeleteInboundCrossClusterSearchConnectionInput { + s.CrossClusterSearchConnectionId = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s BaseException) OrigErr() error { - return nil +// The result of a DeleteInboundCrossClusterSearchConnection operation. Contains +// details of deleted inbound connection. +type DeleteInboundCrossClusterSearchConnectionOutput struct { + _ struct{} `type:"structure"` + + // Specifies the InboundCrossClusterSearchConnection of deleted inbound connection. + CrossClusterSearchConnection *InboundCrossClusterSearchConnection `type:"structure"` } -func (s BaseException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// String returns the string representation +func (s DeleteInboundCrossClusterSearchConnectionOutput) String() string { + return awsutil.Prettify(s) } -// Status code returns the HTTP status code for the request's response error. -func (s BaseException) StatusCode() int { - return s.respMetadata.StatusCode +// GoString returns the string representation +func (s DeleteInboundCrossClusterSearchConnectionOutput) GoString() string { + return s.String() } -// RequestID returns the service's response RequestID for request. -func (s BaseException) RequestID() string { - return s.respMetadata.RequestID +// SetCrossClusterSearchConnection sets the CrossClusterSearchConnection field's value. +func (s *DeleteInboundCrossClusterSearchConnectionOutput) SetCrossClusterSearchConnection(v *InboundCrossClusterSearchConnection) *DeleteInboundCrossClusterSearchConnectionOutput { + s.CrossClusterSearchConnection = v + return s } -// Container for the parameters to the CancelElasticsearchServiceSoftwareUpdate -// operation. Specifies the name of the Elasticsearch domain that you wish to -// cancel a service software update on. -type CancelElasticsearchServiceSoftwareUpdateInput struct { +// Container for the parameters to the DeleteOutboundCrossClusterSearchConnection +// operation. +type DeleteOutboundCrossClusterSearchConnectionInput struct { _ struct{} `type:"structure"` - // The name of the domain that you want to stop the latest service software - // update on. + // The id of the outbound connection that you want to permanently delete. // - // DomainName is a required field - DomainName *string `min:"3" type:"string" required:"true"` + // CrossClusterSearchConnectionId is a required field + CrossClusterSearchConnectionId *string `location:"uri" locationName:"ConnectionId" type:"string" required:"true"` } // String returns the string representation -func (s CancelElasticsearchServiceSoftwareUpdateInput) String() string { +func (s DeleteOutboundCrossClusterSearchConnectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CancelElasticsearchServiceSoftwareUpdateInput) GoString() string { +func (s DeleteOutboundCrossClusterSearchConnectionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CancelElasticsearchServiceSoftwareUpdateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelElasticsearchServiceSoftwareUpdateInput"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) +func (s *DeleteOutboundCrossClusterSearchConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOutboundCrossClusterSearchConnectionInput"} + if s.CrossClusterSearchConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("CrossClusterSearchConnectionId")) } - if s.DomainName != nil && len(*s.DomainName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + if s.CrossClusterSearchConnectionId != nil && len(*s.CrossClusterSearchConnectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrossClusterSearchConnectionId", 1)) } if invalidParams.Len() > 0 { @@ -2850,77 +5504,66 @@ func (s *CancelElasticsearchServiceSoftwareUpdateInput) Validate() error { return nil } -// SetDomainName sets the DomainName field's value. -func (s *CancelElasticsearchServiceSoftwareUpdateInput) SetDomainName(v string) *CancelElasticsearchServiceSoftwareUpdateInput { - s.DomainName = &v +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *DeleteOutboundCrossClusterSearchConnectionInput) SetCrossClusterSearchConnectionId(v string) *DeleteOutboundCrossClusterSearchConnectionInput { + s.CrossClusterSearchConnectionId = &v return s } -// The result of a CancelElasticsearchServiceSoftwareUpdate operation. Contains -// the status of the update. -type CancelElasticsearchServiceSoftwareUpdateOutput struct { +// The result of a DeleteOutboundCrossClusterSearchConnection operation. Contains +// details of deleted outbound connection. +type DeleteOutboundCrossClusterSearchConnectionOutput struct { _ struct{} `type:"structure"` - // The current status of the Elasticsearch service software update. - ServiceSoftwareOptions *ServiceSoftwareOptions `type:"structure"` + // Specifies the OutboundCrossClusterSearchConnection of deleted outbound connection. + CrossClusterSearchConnection *OutboundCrossClusterSearchConnection `type:"structure"` } // String returns the string representation -func (s CancelElasticsearchServiceSoftwareUpdateOutput) String() string { +func (s DeleteOutboundCrossClusterSearchConnectionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CancelElasticsearchServiceSoftwareUpdateOutput) GoString() string { +func (s DeleteOutboundCrossClusterSearchConnectionOutput) GoString() string { return s.String() } -// SetServiceSoftwareOptions sets the ServiceSoftwareOptions field's value. -func (s *CancelElasticsearchServiceSoftwareUpdateOutput) SetServiceSoftwareOptions(v *ServiceSoftwareOptions) *CancelElasticsearchServiceSoftwareUpdateOutput { - s.ServiceSoftwareOptions = v +// SetCrossClusterSearchConnection sets the CrossClusterSearchConnection field's value. +func (s *DeleteOutboundCrossClusterSearchConnectionOutput) SetCrossClusterSearchConnection(v *OutboundCrossClusterSearchConnection) *DeleteOutboundCrossClusterSearchConnectionOutput { + s.CrossClusterSearchConnection = v return s } -// Options to specify the Cognito user and identity pools for Kibana authentication. -// For more information, see Amazon Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). -type CognitoOptions struct { +// Container for request parameters to DeletePackage operation. +type DeletePackageInput struct { _ struct{} `type:"structure"` - // Specifies the option to enable Cognito for Kibana authentication. - Enabled *bool `type:"boolean"` - - // Specifies the Cognito identity pool ID for Kibana authentication. - IdentityPoolId *string `min:"1" type:"string"` - - // Specifies the role ARN that provides Elasticsearch permissions for accessing - // Cognito resources. - RoleArn *string `min:"20" type:"string"` - - // Specifies the Cognito user pool ID for Kibana authentication. - UserPoolId *string `min:"1" type:"string"` + // Internal ID of the package that you want to delete. Use DescribePackages + // to find this value. + // + // PackageID is a required field + PackageID *string `location:"uri" locationName:"PackageID" type:"string" required:"true"` } // String returns the string representation -func (s CognitoOptions) String() string { +func (s DeletePackageInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CognitoOptions) GoString() string { +func (s DeletePackageInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CognitoOptions) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CognitoOptions"} - if s.IdentityPoolId != nil && len(*s.IdentityPoolId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IdentityPoolId", 1)) - } - if s.RoleArn != nil && len(*s.RoleArn) < 20 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) +func (s *DeletePackageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePackageInput"} + if s.PackageID == nil { + invalidParams.Add(request.NewErrParamRequired("PackageID")) } - if s.UserPoolId != nil && len(*s.UserPoolId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserPoolId", 1)) + if s.PackageID != nil && len(*s.PackageID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PackageID", 1)) } if invalidParams.Len() > 0 { @@ -2929,199 +5572,136 @@ func (s *CognitoOptions) Validate() error { return nil } -// SetEnabled sets the Enabled field's value. -func (s *CognitoOptions) SetEnabled(v bool) *CognitoOptions { - s.Enabled = &v +// SetPackageID sets the PackageID field's value. +func (s *DeletePackageInput) SetPackageID(v string) *DeletePackageInput { + s.PackageID = &v return s } -// SetIdentityPoolId sets the IdentityPoolId field's value. -func (s *CognitoOptions) SetIdentityPoolId(v string) *CognitoOptions { - s.IdentityPoolId = &v - return s +// Container for response parameters to DeletePackage operation. +type DeletePackageOutput struct { + _ struct{} `type:"structure"` + + // PackageDetails + PackageDetails *PackageDetails `type:"structure"` } -// SetRoleArn sets the RoleArn field's value. -func (s *CognitoOptions) SetRoleArn(v string) *CognitoOptions { - s.RoleArn = &v - return s +// String returns the string representation +func (s DeletePackageOutput) String() string { + return awsutil.Prettify(s) } -// SetUserPoolId sets the UserPoolId field's value. -func (s *CognitoOptions) SetUserPoolId(v string) *CognitoOptions { - s.UserPoolId = &v +// GoString returns the string representation +func (s DeletePackageOutput) GoString() string { + return s.String() +} + +// SetPackageDetails sets the PackageDetails field's value. +func (s *DeletePackageOutput) SetPackageDetails(v *PackageDetails) *DeletePackageOutput { + s.PackageDetails = v return s } -// Status of the Cognito options for the specified Elasticsearch domain. -type CognitoOptionsStatus struct { +// Container for the parameters to the DescribeElasticsearchDomainConfig operation. +// Specifies the domain name for which you want configuration information. +type DescribeElasticsearchDomainConfigInput struct { _ struct{} `type:"structure"` - // Specifies the Cognito options for the specified Elasticsearch domain. - // - // Options is a required field - Options *CognitoOptions `type:"structure" required:"true"` - - // Specifies the status of the Cognito options for the specified Elasticsearch - // domain. + // The Elasticsearch domain that you want to get information about. // - // Status is a required field - Status *OptionStatus `type:"structure" required:"true"` + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` } // String returns the string representation -func (s CognitoOptionsStatus) String() string { +func (s DescribeElasticsearchDomainConfigInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CognitoOptionsStatus) GoString() string { +func (s DescribeElasticsearchDomainConfigInput) GoString() string { return s.String() } -// SetOptions sets the Options field's value. -func (s *CognitoOptionsStatus) SetOptions(v *CognitoOptions) *CognitoOptionsStatus { - s.Options = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticsearchDomainConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainConfigInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStatus sets the Status field's value. -func (s *CognitoOptionsStatus) SetStatus(v *OptionStatus) *CognitoOptionsStatus { - s.Status = v +// SetDomainName sets the DomainName field's value. +func (s *DescribeElasticsearchDomainConfigInput) SetDomainName(v string) *DescribeElasticsearchDomainConfigInput { + s.DomainName = &v return s } -// A map from an ElasticsearchVersion to a list of compatible ElasticsearchVersion -// s to which the domain can be upgraded. -type CompatibleVersionsMap struct { +// The result of a DescribeElasticsearchDomainConfig request. Contains the configuration +// information of the requested domain. +type DescribeElasticsearchDomainConfigOutput struct { _ struct{} `type:"structure"` - // The current version of Elasticsearch on which a domain is. - SourceVersion *string `type:"string"` - - // List of supported elastic search versions. - TargetVersions []*string `type:"list"` + // The configuration information of the domain requested in the DescribeElasticsearchDomainConfig + // request. + // + // DomainConfig is a required field + DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` } // String returns the string representation -func (s CompatibleVersionsMap) String() string { +func (s DescribeElasticsearchDomainConfigOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CompatibleVersionsMap) GoString() string { +func (s DescribeElasticsearchDomainConfigOutput) GoString() string { return s.String() } -// SetSourceVersion sets the SourceVersion field's value. -func (s *CompatibleVersionsMap) SetSourceVersion(v string) *CompatibleVersionsMap { - s.SourceVersion = &v - return s -} - -// SetTargetVersions sets the TargetVersions field's value. -func (s *CompatibleVersionsMap) SetTargetVersions(v []*string) *CompatibleVersionsMap { - s.TargetVersions = v +// SetDomainConfig sets the DomainConfig field's value. +func (s *DescribeElasticsearchDomainConfigOutput) SetDomainConfig(v *ElasticsearchDomainConfig) *DescribeElasticsearchDomainConfigOutput { + s.DomainConfig = v return s } -type CreateElasticsearchDomainInput struct { +// Container for the parameters to the DescribeElasticsearchDomain operation. +type DescribeElasticsearchDomainInput struct { _ struct{} `type:"structure"` - // IAM access policy as a JSON-formatted string. - AccessPolicies *string `type:"string"` - - // Option to allow references to indices in an HTTP request body. Must be false - // when configuring access to individual sub-resources. By default, the value - // is true. See Configuration Advanced Options (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-advanced-options) - // for more information. - AdvancedOptions map[string]*string `type:"map"` - - // Specifies advanced security options. - AdvancedSecurityOptions *AdvancedSecurityOptionsInput `type:"structure"` - - // Options to specify the Cognito user and identity pools for Kibana authentication. - // For more information, see Amazon Cognito Authentication for Kibana (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-cognito-auth.html). - CognitoOptions *CognitoOptions `type:"structure"` - - // Options to specify configuration that will be applied to the domain endpoint. - DomainEndpointOptions *DomainEndpointOptions `type:"structure"` - - // The name of the Elasticsearch domain that you are creating. Domain names - // are unique across the domains owned by an account within an AWS region. Domain - // names must start with a lowercase letter and can contain the following characters: - // a-z (lowercase), 0-9, and - (hyphen). + // The name of the Elasticsearch domain for which you want information. // // DomainName is a required field - DomainName *string `min:"3" type:"string" required:"true"` - - // Options to enable, disable and specify the type and size of EBS storage volumes. - EBSOptions *EBSOptions `type:"structure"` - - // Configuration options for an Elasticsearch domain. Specifies the instance - // type and number of instances in the domain cluster. - ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` - - // String of format X.Y to specify version for the Elasticsearch domain eg. - // "1.5" or "2.3". For more information, see Creating Elasticsearch Domains - // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains) - // in the Amazon Elasticsearch Service Developer Guide. - ElasticsearchVersion *string `type:"string"` - - // Specifies the Encryption At Rest Options. - EncryptionAtRestOptions *EncryptionAtRestOptions `type:"structure"` - - // Map of LogType and LogPublishingOption, each containing options to publish - // a given type of Elasticsearch log. - LogPublishingOptions map[string]*LogPublishingOption `type:"map"` - - // Specifies the NodeToNodeEncryptionOptions. - NodeToNodeEncryptionOptions *NodeToNodeEncryptionOptions `type:"structure"` - - // Option to set time, in UTC format, of the daily automated snapshot. Default - // value is 0 hours. - SnapshotOptions *SnapshotOptions `type:"structure"` - - // Options to specify the subnets and security groups for VPC endpoint. For - // more information, see Creating a VPC (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-creating-vpc) - // in VPC Endpoints for Amazon Elasticsearch Service Domains - VPCOptions *VPCOptions `type:"structure"` + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` } // String returns the string representation -func (s CreateElasticsearchDomainInput) String() string { +func (s DescribeElasticsearchDomainInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateElasticsearchDomainInput) GoString() string { +func (s DescribeElasticsearchDomainInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateElasticsearchDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateElasticsearchDomainInput"} +func (s *DescribeElasticsearchDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainInput"} if s.DomainName == nil { invalidParams.Add(request.NewErrParamRequired("DomainName")) } if s.DomainName != nil && len(*s.DomainName) < 3 { invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) } - if s.AdvancedSecurityOptions != nil { - if err := s.AdvancedSecurityOptions.Validate(); err != nil { - invalidParams.AddNested("AdvancedSecurityOptions", err.(request.ErrInvalidParams)) - } - } - if s.CognitoOptions != nil { - if err := s.CognitoOptions.Validate(); err != nil { - invalidParams.AddNested("CognitoOptions", err.(request.ErrInvalidParams)) - } - } - if s.EncryptionAtRestOptions != nil { - if err := s.EncryptionAtRestOptions.Validate(); err != nil { - invalidParams.AddNested("EncryptionAtRestOptions", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -3129,145 +5709,155 @@ func (s *CreateElasticsearchDomainInput) Validate() error { return nil } -// SetAccessPolicies sets the AccessPolicies field's value. -func (s *CreateElasticsearchDomainInput) SetAccessPolicies(v string) *CreateElasticsearchDomainInput { - s.AccessPolicies = &v - return s -} - -// SetAdvancedOptions sets the AdvancedOptions field's value. -func (s *CreateElasticsearchDomainInput) SetAdvancedOptions(v map[string]*string) *CreateElasticsearchDomainInput { - s.AdvancedOptions = v +// SetDomainName sets the DomainName field's value. +func (s *DescribeElasticsearchDomainInput) SetDomainName(v string) *DescribeElasticsearchDomainInput { + s.DomainName = &v return s } -// SetAdvancedSecurityOptions sets the AdvancedSecurityOptions field's value. -func (s *CreateElasticsearchDomainInput) SetAdvancedSecurityOptions(v *AdvancedSecurityOptionsInput) *CreateElasticsearchDomainInput { - s.AdvancedSecurityOptions = v - return s -} +// The result of a DescribeElasticsearchDomain request. Contains the status +// of the domain specified in the request. +type DescribeElasticsearchDomainOutput struct { + _ struct{} `type:"structure"` -// SetCognitoOptions sets the CognitoOptions field's value. -func (s *CreateElasticsearchDomainInput) SetCognitoOptions(v *CognitoOptions) *CreateElasticsearchDomainInput { - s.CognitoOptions = v - return s + // The current status of the Elasticsearch domain. + // + // DomainStatus is a required field + DomainStatus *ElasticsearchDomainStatus `type:"structure" required:"true"` } -// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. -func (s *CreateElasticsearchDomainInput) SetDomainEndpointOptions(v *DomainEndpointOptions) *CreateElasticsearchDomainInput { - s.DomainEndpointOptions = v - return s +// String returns the string representation +func (s DescribeElasticsearchDomainOutput) String() string { + return awsutil.Prettify(s) } -// SetDomainName sets the DomainName field's value. -func (s *CreateElasticsearchDomainInput) SetDomainName(v string) *CreateElasticsearchDomainInput { - s.DomainName = &v - return s +// GoString returns the string representation +func (s DescribeElasticsearchDomainOutput) GoString() string { + return s.String() } -// SetEBSOptions sets the EBSOptions field's value. -func (s *CreateElasticsearchDomainInput) SetEBSOptions(v *EBSOptions) *CreateElasticsearchDomainInput { - s.EBSOptions = v +// SetDomainStatus sets the DomainStatus field's value. +func (s *DescribeElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomainStatus) *DescribeElasticsearchDomainOutput { + s.DomainStatus = v return s } -// SetElasticsearchClusterConfig sets the ElasticsearchClusterConfig field's value. -func (s *CreateElasticsearchDomainInput) SetElasticsearchClusterConfig(v *ElasticsearchClusterConfig) *CreateElasticsearchDomainInput { - s.ElasticsearchClusterConfig = v - return s -} +// Container for the parameters to the DescribeElasticsearchDomains operation. +// By default, the API returns the status of all Elasticsearch domains. +type DescribeElasticsearchDomainsInput struct { + _ struct{} `type:"structure"` -// SetElasticsearchVersion sets the ElasticsearchVersion field's value. -func (s *CreateElasticsearchDomainInput) SetElasticsearchVersion(v string) *CreateElasticsearchDomainInput { - s.ElasticsearchVersion = &v - return s + // The Elasticsearch domains for which you want information. + // + // DomainNames is a required field + DomainNames []*string `type:"list" required:"true"` } -// SetEncryptionAtRestOptions sets the EncryptionAtRestOptions field's value. -func (s *CreateElasticsearchDomainInput) SetEncryptionAtRestOptions(v *EncryptionAtRestOptions) *CreateElasticsearchDomainInput { - s.EncryptionAtRestOptions = v - return s +// String returns the string representation +func (s DescribeElasticsearchDomainsInput) String() string { + return awsutil.Prettify(s) } -// SetLogPublishingOptions sets the LogPublishingOptions field's value. -func (s *CreateElasticsearchDomainInput) SetLogPublishingOptions(v map[string]*LogPublishingOption) *CreateElasticsearchDomainInput { - s.LogPublishingOptions = v - return s +// GoString returns the string representation +func (s DescribeElasticsearchDomainsInput) GoString() string { + return s.String() } -// SetNodeToNodeEncryptionOptions sets the NodeToNodeEncryptionOptions field's value. -func (s *CreateElasticsearchDomainInput) SetNodeToNodeEncryptionOptions(v *NodeToNodeEncryptionOptions) *CreateElasticsearchDomainInput { - s.NodeToNodeEncryptionOptions = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeElasticsearchDomainsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainsInput"} + if s.DomainNames == nil { + invalidParams.Add(request.NewErrParamRequired("DomainNames")) + } -// SetSnapshotOptions sets the SnapshotOptions field's value. -func (s *CreateElasticsearchDomainInput) SetSnapshotOptions(v *SnapshotOptions) *CreateElasticsearchDomainInput { - s.SnapshotOptions = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetVPCOptions sets the VPCOptions field's value. -func (s *CreateElasticsearchDomainInput) SetVPCOptions(v *VPCOptions) *CreateElasticsearchDomainInput { - s.VPCOptions = v +// SetDomainNames sets the DomainNames field's value. +func (s *DescribeElasticsearchDomainsInput) SetDomainNames(v []*string) *DescribeElasticsearchDomainsInput { + s.DomainNames = v return s } -// The result of a CreateElasticsearchDomain operation. Contains the status -// of the newly created Elasticsearch domain. -type CreateElasticsearchDomainOutput struct { +// The result of a DescribeElasticsearchDomains request. Contains the status +// of the specified domains or all domains owned by the account. +type DescribeElasticsearchDomainsOutput struct { _ struct{} `type:"structure"` - // The status of the newly created Elasticsearch domain. - DomainStatus *ElasticsearchDomainStatus `type:"structure"` + // The status of the domains requested in the DescribeElasticsearchDomains request. + // + // DomainStatusList is a required field + DomainStatusList []*ElasticsearchDomainStatus `type:"list" required:"true"` } // String returns the string representation -func (s CreateElasticsearchDomainOutput) String() string { +func (s DescribeElasticsearchDomainsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateElasticsearchDomainOutput) GoString() string { +func (s DescribeElasticsearchDomainsOutput) GoString() string { return s.String() } -// SetDomainStatus sets the DomainStatus field's value. -func (s *CreateElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomainStatus) *CreateElasticsearchDomainOutput { - s.DomainStatus = v +// SetDomainStatusList sets the DomainStatusList field's value. +func (s *DescribeElasticsearchDomainsOutput) SetDomainStatusList(v []*ElasticsearchDomainStatus) *DescribeElasticsearchDomainsOutput { + s.DomainStatusList = v return s } -// Container for the parameters to the DeleteElasticsearchDomain operation. -// Specifies the name of the Elasticsearch domain that you want to delete. -type DeleteElasticsearchDomainInput struct { +// Container for the parameters to DescribeElasticsearchInstanceTypeLimits operation. +type DescribeElasticsearchInstanceTypeLimitsInput struct { _ struct{} `type:"structure"` - // The name of the Elasticsearch domain that you want to permanently delete. + // DomainName represents the name of the Domain that we are trying to modify. + // This should be present only if we are querying for Elasticsearch Limits for + // existing domain. + DomainName *string `location:"querystring" locationName:"domainName" min:"3" type:"string"` + + // Version of Elasticsearch for which Limits are needed. // - // DomainName is a required field - DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + // ElasticsearchVersion is a required field + ElasticsearchVersion *string `location:"uri" locationName:"ElasticsearchVersion" type:"string" required:"true"` + + // The instance type for an Elasticsearch cluster for which Elasticsearch Limits + // are needed. + // + // InstanceType is a required field + InstanceType *string `location:"uri" locationName:"InstanceType" type:"string" required:"true" enum:"ESPartitionInstanceType"` } // String returns the string representation -func (s DeleteElasticsearchDomainInput) String() string { +func (s DescribeElasticsearchInstanceTypeLimitsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteElasticsearchDomainInput) GoString() string { +func (s DescribeElasticsearchInstanceTypeLimitsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteElasticsearchDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteElasticsearchDomainInput"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) - } +func (s *DescribeElasticsearchInstanceTypeLimitsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchInstanceTypeLimitsInput"} if s.DomainName != nil && len(*s.DomainName) < 3 { invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) } + if s.ElasticsearchVersion == nil { + invalidParams.Add(request.NewErrParamRequired("ElasticsearchVersion")) + } + if s.ElasticsearchVersion != nil && len(*s.ElasticsearchVersion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ElasticsearchVersion", 1)) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3276,94 +5866,103 @@ func (s *DeleteElasticsearchDomainInput) Validate() error { } // SetDomainName sets the DomainName field's value. -func (s *DeleteElasticsearchDomainInput) SetDomainName(v string) *DeleteElasticsearchDomainInput { +func (s *DescribeElasticsearchInstanceTypeLimitsInput) SetDomainName(v string) *DescribeElasticsearchInstanceTypeLimitsInput { s.DomainName = &v return s } -// The result of a DeleteElasticsearchDomain request. Contains the status of -// the pending deletion, or no status if the domain and all of its resources -// have been deleted. -type DeleteElasticsearchDomainOutput struct { - _ struct{} `type:"structure"` - - // The status of the Elasticsearch domain being deleted. - DomainStatus *ElasticsearchDomainStatus `type:"structure"` -} - -// String returns the string representation -func (s DeleteElasticsearchDomainOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteElasticsearchDomainOutput) GoString() string { - return s.String() +// SetElasticsearchVersion sets the ElasticsearchVersion field's value. +func (s *DescribeElasticsearchInstanceTypeLimitsInput) SetElasticsearchVersion(v string) *DescribeElasticsearchInstanceTypeLimitsInput { + s.ElasticsearchVersion = &v + return s } -// SetDomainStatus sets the DomainStatus field's value. -func (s *DeleteElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomainStatus) *DeleteElasticsearchDomainOutput { - s.DomainStatus = v +// SetInstanceType sets the InstanceType field's value. +func (s *DescribeElasticsearchInstanceTypeLimitsInput) SetInstanceType(v string) *DescribeElasticsearchInstanceTypeLimitsInput { + s.InstanceType = &v return s } -type DeleteElasticsearchServiceRoleInput struct { +// Container for the parameters received from DescribeElasticsearchInstanceTypeLimits +// operation. +type DescribeElasticsearchInstanceTypeLimitsOutput struct { _ struct{} `type:"structure"` + + // Map of Role of the Instance and Limits that are applicable. Role performed + // by given Instance in Elasticsearch can be one of the following: + // * data: If the given InstanceType is used as data node + // + // * master: If the given InstanceType is used as master node + // + // * ultra_warm: If the given InstanceType is used as warm node + LimitsByRole map[string]*Limits `type:"map"` } // String returns the string representation -func (s DeleteElasticsearchServiceRoleInput) String() string { +func (s DescribeElasticsearchInstanceTypeLimitsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteElasticsearchServiceRoleInput) GoString() string { +func (s DescribeElasticsearchInstanceTypeLimitsOutput) GoString() string { return s.String() } -type DeleteElasticsearchServiceRoleOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteElasticsearchServiceRoleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteElasticsearchServiceRoleOutput) GoString() string { - return s.String() +// SetLimitsByRole sets the LimitsByRole field's value. +func (s *DescribeElasticsearchInstanceTypeLimitsOutput) SetLimitsByRole(v map[string]*Limits) *DescribeElasticsearchInstanceTypeLimitsOutput { + s.LimitsByRole = v + return s } -// Container for the parameters to the DescribeElasticsearchDomainConfig operation. -// Specifies the domain name for which you want configuration information. -type DescribeElasticsearchDomainConfigInput struct { +// Container for the parameters to the DescribeInboundCrossClusterSearchConnections +// operation. +type DescribeInboundCrossClusterSearchConnectionsInput struct { _ struct{} `type:"structure"` - // The Elasticsearch domain that you want to get information about. + // A list of filters used to match properties for inbound cross-cluster search + // connection. Available Filter names for this operation are: + // * cross-cluster-search-connection-id // - // DomainName is a required field - DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + // * source-domain-info.domain-name + // + // * source-domain-info.owner-id + // + // * source-domain-info.region + // + // * destination-domain-info.domain-name + Filters []*Filter `type:"list"` + + // Set this value to limit the number of results returned. If not specified, + // defaults to 100. + MaxResults *int64 `type:"integer"` + + // NextToken is sent in case the earlier API call results contain the NextToken. + // It is used for pagination. + NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeElasticsearchDomainConfigInput) String() string { +func (s DescribeInboundCrossClusterSearchConnectionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchDomainConfigInput) GoString() string { +func (s DescribeInboundCrossClusterSearchConnectionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeElasticsearchDomainConfigInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainConfigInput"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) - } - if s.DomainName != nil && len(*s.DomainName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) +func (s *DescribeInboundCrossClusterSearchConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInboundCrossClusterSearchConnectionsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -3372,68 +5971,109 @@ func (s *DescribeElasticsearchDomainConfigInput) Validate() error { return nil } -// SetDomainName sets the DomainName field's value. -func (s *DescribeElasticsearchDomainConfigInput) SetDomainName(v string) *DescribeElasticsearchDomainConfigInput { - s.DomainName = &v +// SetFilters sets the Filters field's value. +func (s *DescribeInboundCrossClusterSearchConnectionsInput) SetFilters(v []*Filter) *DescribeInboundCrossClusterSearchConnectionsInput { + s.Filters = v return s } -// The result of a DescribeElasticsearchDomainConfig request. Contains the configuration -// information of the requested domain. -type DescribeElasticsearchDomainConfigOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeInboundCrossClusterSearchConnectionsInput) SetMaxResults(v int64) *DescribeInboundCrossClusterSearchConnectionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeInboundCrossClusterSearchConnectionsInput) SetNextToken(v string) *DescribeInboundCrossClusterSearchConnectionsInput { + s.NextToken = &v + return s +} + +// The result of a DescribeInboundCrossClusterSearchConnections request. Contains +// the list of connections matching the filter criteria. +type DescribeInboundCrossClusterSearchConnectionsOutput struct { _ struct{} `type:"structure"` - // The configuration information of the domain requested in the DescribeElasticsearchDomainConfig - // request. - // - // DomainConfig is a required field - DomainConfig *ElasticsearchDomainConfig `type:"structure" required:"true"` + // Consists of list of InboundCrossClusterSearchConnection matching the specified + // filter criteria. + CrossClusterSearchConnections []*InboundCrossClusterSearchConnection `type:"list"` + + // If more results are available and NextToken is present, make the next request + // to the same API with the received NextToken to paginate the remaining results. + NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeElasticsearchDomainConfigOutput) String() string { +func (s DescribeInboundCrossClusterSearchConnectionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchDomainConfigOutput) GoString() string { +func (s DescribeInboundCrossClusterSearchConnectionsOutput) GoString() string { return s.String() } -// SetDomainConfig sets the DomainConfig field's value. -func (s *DescribeElasticsearchDomainConfigOutput) SetDomainConfig(v *ElasticsearchDomainConfig) *DescribeElasticsearchDomainConfigOutput { - s.DomainConfig = v +// SetCrossClusterSearchConnections sets the CrossClusterSearchConnections field's value. +func (s *DescribeInboundCrossClusterSearchConnectionsOutput) SetCrossClusterSearchConnections(v []*InboundCrossClusterSearchConnection) *DescribeInboundCrossClusterSearchConnectionsOutput { + s.CrossClusterSearchConnections = v return s } -// Container for the parameters to the DescribeElasticsearchDomain operation. -type DescribeElasticsearchDomainInput struct { +// SetNextToken sets the NextToken field's value. +func (s *DescribeInboundCrossClusterSearchConnectionsOutput) SetNextToken(v string) *DescribeInboundCrossClusterSearchConnectionsOutput { + s.NextToken = &v + return s +} + +// Container for the parameters to the DescribeOutboundCrossClusterSearchConnections +// operation. +type DescribeOutboundCrossClusterSearchConnectionsInput struct { _ struct{} `type:"structure"` - // The name of the Elasticsearch domain for which you want information. + // A list of filters used to match properties for outbound cross-cluster search + // connection. Available Filter names for this operation are: + // * cross-cluster-search-connection-id // - // DomainName is a required field - DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + // * destination-domain-info.domain-name + // + // * destination-domain-info.owner-id + // + // * destination-domain-info.region + // + // * source-domain-info.domain-name + Filters []*Filter `type:"list"` + + // Set this value to limit the number of results returned. If not specified, + // defaults to 100. + MaxResults *int64 `type:"integer"` + + // NextToken is sent in case the earlier API call results contain the NextToken. + // It is used for pagination. + NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeElasticsearchDomainInput) String() string { +func (s DescribeOutboundCrossClusterSearchConnectionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchDomainInput) GoString() string { +func (s DescribeOutboundCrossClusterSearchConnectionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeElasticsearchDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainInput"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) - } - if s.DomainName != nil && len(*s.DomainName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) +func (s *DescribeOutboundCrossClusterSearchConnectionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOutboundCrossClusterSearchConnectionsInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -3442,208 +6082,165 @@ func (s *DescribeElasticsearchDomainInput) Validate() error { return nil } -// SetDomainName sets the DomainName field's value. -func (s *DescribeElasticsearchDomainInput) SetDomainName(v string) *DescribeElasticsearchDomainInput { - s.DomainName = &v +// SetFilters sets the Filters field's value. +func (s *DescribeOutboundCrossClusterSearchConnectionsInput) SetFilters(v []*Filter) *DescribeOutboundCrossClusterSearchConnectionsInput { + s.Filters = v return s } -// The result of a DescribeElasticsearchDomain request. Contains the status -// of the domain specified in the request. -type DescribeElasticsearchDomainOutput struct { - _ struct{} `type:"structure"` - - // The current status of the Elasticsearch domain. - // - // DomainStatus is a required field - DomainStatus *ElasticsearchDomainStatus `type:"structure" required:"true"` -} - -// String returns the string representation -func (s DescribeElasticsearchDomainOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeElasticsearchDomainOutput) GoString() string { - return s.String() +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeOutboundCrossClusterSearchConnectionsInput) SetMaxResults(v int64) *DescribeOutboundCrossClusterSearchConnectionsInput { + s.MaxResults = &v + return s } -// SetDomainStatus sets the DomainStatus field's value. -func (s *DescribeElasticsearchDomainOutput) SetDomainStatus(v *ElasticsearchDomainStatus) *DescribeElasticsearchDomainOutput { - s.DomainStatus = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeOutboundCrossClusterSearchConnectionsInput) SetNextToken(v string) *DescribeOutboundCrossClusterSearchConnectionsInput { + s.NextToken = &v return s } -// Container for the parameters to the DescribeElasticsearchDomains operation. -// By default, the API returns the status of all Elasticsearch domains. -type DescribeElasticsearchDomainsInput struct { +// The result of a DescribeOutboundCrossClusterSearchConnections request. Contains +// the list of connections matching the filter criteria. +type DescribeOutboundCrossClusterSearchConnectionsOutput struct { _ struct{} `type:"structure"` - // The Elasticsearch domains for which you want information. - // - // DomainNames is a required field - DomainNames []*string `type:"list" required:"true"` + // Consists of list of OutboundCrossClusterSearchConnection matching the specified + // filter criteria. + CrossClusterSearchConnections []*OutboundCrossClusterSearchConnection `type:"list"` + + // If more results are available and NextToken is present, make the next request + // to the same API with the received NextToken to paginate the remaining results. + NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeElasticsearchDomainsInput) String() string { +func (s DescribeOutboundCrossClusterSearchConnectionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchDomainsInput) GoString() string { +func (s DescribeOutboundCrossClusterSearchConnectionsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeElasticsearchDomainsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchDomainsInput"} - if s.DomainNames == nil { - invalidParams.Add(request.NewErrParamRequired("DomainNames")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCrossClusterSearchConnections sets the CrossClusterSearchConnections field's value. +func (s *DescribeOutboundCrossClusterSearchConnectionsOutput) SetCrossClusterSearchConnections(v []*OutboundCrossClusterSearchConnection) *DescribeOutboundCrossClusterSearchConnectionsOutput { + s.CrossClusterSearchConnections = v + return s } -// SetDomainNames sets the DomainNames field's value. -func (s *DescribeElasticsearchDomainsInput) SetDomainNames(v []*string) *DescribeElasticsearchDomainsInput { - s.DomainNames = v +// SetNextToken sets the NextToken field's value. +func (s *DescribeOutboundCrossClusterSearchConnectionsOutput) SetNextToken(v string) *DescribeOutboundCrossClusterSearchConnectionsOutput { + s.NextToken = &v return s } -// The result of a DescribeElasticsearchDomains request. Contains the status -// of the specified domains or all domains owned by the account. -type DescribeElasticsearchDomainsOutput struct { +// Filter to apply in DescribePackage response. +type DescribePackagesFilter struct { _ struct{} `type:"structure"` - // The status of the domains requested in the DescribeElasticsearchDomains request. - // - // DomainStatusList is a required field - DomainStatusList []*ElasticsearchDomainStatus `type:"list" required:"true"` + // Any field from PackageDetails. + Name *string `type:"string" enum:"DescribePackagesFilterName"` + + // A list of values for the specified field. + Value []*string `type:"list"` } // String returns the string representation -func (s DescribeElasticsearchDomainsOutput) String() string { +func (s DescribePackagesFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchDomainsOutput) GoString() string { +func (s DescribePackagesFilter) GoString() string { return s.String() } -// SetDomainStatusList sets the DomainStatusList field's value. -func (s *DescribeElasticsearchDomainsOutput) SetDomainStatusList(v []*ElasticsearchDomainStatus) *DescribeElasticsearchDomainsOutput { - s.DomainStatusList = v +// SetName sets the Name field's value. +func (s *DescribePackagesFilter) SetName(v string) *DescribePackagesFilter { + s.Name = &v return s } -// Container for the parameters to DescribeElasticsearchInstanceTypeLimits operation. -type DescribeElasticsearchInstanceTypeLimitsInput struct { +// SetValue sets the Value field's value. +func (s *DescribePackagesFilter) SetValue(v []*string) *DescribePackagesFilter { + s.Value = v + return s +} + +// Container for request parameters to DescribePackage operation. +type DescribePackagesInput struct { _ struct{} `type:"structure"` - // DomainName represents the name of the Domain that we are trying to modify. - // This should be present only if we are querying for Elasticsearch Limits for - // existing domain. - DomainName *string `location:"querystring" locationName:"domainName" min:"3" type:"string"` + // Only returns packages that match the DescribePackagesFilterList values. + Filters []*DescribePackagesFilter `type:"list"` - // Version of Elasticsearch for which Limits are needed. - // - // ElasticsearchVersion is a required field - ElasticsearchVersion *string `location:"uri" locationName:"ElasticsearchVersion" type:"string" required:"true"` + // Limits results to a maximum number of packages. + MaxResults *int64 `type:"integer"` - // The instance type for an Elasticsearch cluster for which Elasticsearch Limits - // are needed. - // - // InstanceType is a required field - InstanceType *string `location:"uri" locationName:"InstanceType" type:"string" required:"true" enum:"ESPartitionInstanceType"` + // Used for pagination. Only necessary if a previous API call includes a non-null + // NextToken value. If provided, returns results for the next page. + NextToken *string `type:"string"` } // String returns the string representation -func (s DescribeElasticsearchInstanceTypeLimitsInput) String() string { +func (s DescribePackagesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchInstanceTypeLimitsInput) GoString() string { +func (s DescribePackagesInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeElasticsearchInstanceTypeLimitsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeElasticsearchInstanceTypeLimitsInput"} - if s.DomainName != nil && len(*s.DomainName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) - } - if s.ElasticsearchVersion == nil { - invalidParams.Add(request.NewErrParamRequired("ElasticsearchVersion")) - } - if s.ElasticsearchVersion != nil && len(*s.ElasticsearchVersion) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ElasticsearchVersion", 1)) - } - if s.InstanceType == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceType")) - } - if s.InstanceType != nil && len(*s.InstanceType) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDomainName sets the DomainName field's value. -func (s *DescribeElasticsearchInstanceTypeLimitsInput) SetDomainName(v string) *DescribeElasticsearchInstanceTypeLimitsInput { - s.DomainName = &v +// SetFilters sets the Filters field's value. +func (s *DescribePackagesInput) SetFilters(v []*DescribePackagesFilter) *DescribePackagesInput { + s.Filters = v return s } -// SetElasticsearchVersion sets the ElasticsearchVersion field's value. -func (s *DescribeElasticsearchInstanceTypeLimitsInput) SetElasticsearchVersion(v string) *DescribeElasticsearchInstanceTypeLimitsInput { - s.ElasticsearchVersion = &v +// SetMaxResults sets the MaxResults field's value. +func (s *DescribePackagesInput) SetMaxResults(v int64) *DescribePackagesInput { + s.MaxResults = &v return s } -// SetInstanceType sets the InstanceType field's value. -func (s *DescribeElasticsearchInstanceTypeLimitsInput) SetInstanceType(v string) *DescribeElasticsearchInstanceTypeLimitsInput { - s.InstanceType = &v +// SetNextToken sets the NextToken field's value. +func (s *DescribePackagesInput) SetNextToken(v string) *DescribePackagesInput { + s.NextToken = &v return s } -// Container for the parameters received from DescribeElasticsearchInstanceTypeLimits -// operation. -type DescribeElasticsearchInstanceTypeLimitsOutput struct { +// Container for response returned by DescribePackages operation. +type DescribePackagesOutput struct { _ struct{} `type:"structure"` - // Map of Role of the Instance and Limits that are applicable. Role performed - // by given Instance in Elasticsearch can be one of the following: - // * data: If the given InstanceType is used as data node - // - // * master: If the given InstanceType is used as master node - // - // * ultra_warm: If the given InstanceType is used as warm node - LimitsByRole map[string]*Limits `type:"map"` + NextToken *string `type:"string"` + + // List of PackageDetails objects. + PackageDetailsList []*PackageDetails `type:"list"` } // String returns the string representation -func (s DescribeElasticsearchInstanceTypeLimitsOutput) String() string { +func (s DescribePackagesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeElasticsearchInstanceTypeLimitsOutput) GoString() string { +func (s DescribePackagesOutput) GoString() string { return s.String() } -// SetLimitsByRole sets the LimitsByRole field's value. -func (s *DescribeElasticsearchInstanceTypeLimitsOutput) SetLimitsByRole(v map[string]*Limits) *DescribeElasticsearchInstanceTypeLimitsOutput { - s.LimitsByRole = v +// SetNextToken sets the NextToken field's value. +func (s *DescribePackagesOutput) SetNextToken(v string) *DescribePackagesOutput { + s.NextToken = &v + return s +} + +// SetPackageDetailsList sets the PackageDetailsList field's value. +func (s *DescribePackagesOutput) SetPackageDetailsList(v []*PackageDetails) *DescribePackagesOutput { + s.PackageDetailsList = v return s } @@ -3807,8 +6404,8 @@ func (s *DescribeReservedElasticsearchInstancesOutput) SetReservedElasticsearchI // An error occured because the client wanted to access a not supported operation. // Gives http status code of 409. type DisabledOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3825,17 +6422,17 @@ func (s DisabledOperationException) GoString() string { func newErrorDisabledOperationException(v protocol.ResponseMetadata) error { return &DisabledOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DisabledOperationException) Code() string { +func (s *DisabledOperationException) Code() string { return "DisabledOperationException" } // Message returns the exception's message. -func (s DisabledOperationException) Message() string { +func (s *DisabledOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3843,22 +6440,106 @@ func (s DisabledOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DisabledOperationException) OrigErr() error { +func (s *DisabledOperationException) OrigErr() error { return nil } -func (s DisabledOperationException) Error() string { +func (s *DisabledOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DisabledOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DisabledOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DisabledOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *DisabledOperationException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Container for request parameters to DissociatePackage operation. +type DissociatePackageInput struct { + _ struct{} `type:"structure"` + + // Name of the domain that you want to associate the package with. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + + // Internal ID of the package that you want to associate with a domain. Use + // DescribePackages to find this value. + // + // PackageID is a required field + PackageID *string `location:"uri" locationName:"PackageID" type:"string" required:"true"` +} + +// String returns the string representation +func (s DissociatePackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DissociatePackageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DissociatePackageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DissociatePackageInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.PackageID == nil { + invalidParams.Add(request.NewErrParamRequired("PackageID")) + } + if s.PackageID != nil && len(*s.PackageID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PackageID", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *DissociatePackageInput) SetDomainName(v string) *DissociatePackageInput { + s.DomainName = &v + return s +} + +// SetPackageID sets the PackageID field's value. +func (s *DissociatePackageInput) SetPackageID(v string) *DissociatePackageInput { + s.PackageID = &v + return s +} + +// Container for response returned by DissociatePackage operation. +type DissociatePackageOutput struct { + _ struct{} `type:"structure"` + + // DomainPackageDetails + DomainPackageDetails *DomainPackageDetails `type:"structure"` +} + +// String returns the string representation +func (s DissociatePackageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DissociatePackageOutput) GoString() string { + return s.String() +} + +// SetDomainPackageDetails sets the DomainPackageDetails field's value. +func (s *DissociatePackageOutput) SetDomainPackageDetails(v *DomainPackageDetails) *DissociatePackageOutput { + s.DomainPackageDetails = v + return s } // Options to configure endpoint for the Elasticsearch domain. @@ -3888,76 +6569,227 @@ func (s DomainEndpointOptions) GoString() string { return s.String() } -// SetEnforceHTTPS sets the EnforceHTTPS field's value. -func (s *DomainEndpointOptions) SetEnforceHTTPS(v bool) *DomainEndpointOptions { - s.EnforceHTTPS = &v +// SetEnforceHTTPS sets the EnforceHTTPS field's value. +func (s *DomainEndpointOptions) SetEnforceHTTPS(v bool) *DomainEndpointOptions { + s.EnforceHTTPS = &v + return s +} + +// SetTLSSecurityPolicy sets the TLSSecurityPolicy field's value. +func (s *DomainEndpointOptions) SetTLSSecurityPolicy(v string) *DomainEndpointOptions { + s.TLSSecurityPolicy = &v + return s +} + +// The configured endpoint options for the domain and their current status. +type DomainEndpointOptionsStatus struct { + _ struct{} `type:"structure"` + + // Options to configure endpoint for the Elasticsearch domain. + // + // Options is a required field + Options *DomainEndpointOptions `type:"structure" required:"true"` + + // The status of the endpoint options for the Elasticsearch domain. See OptionStatus + // for the status information that's included. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DomainEndpointOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainEndpointOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *DomainEndpointOptionsStatus) SetOptions(v *DomainEndpointOptions) *DomainEndpointOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DomainEndpointOptionsStatus) SetStatus(v *OptionStatus) *DomainEndpointOptionsStatus { + s.Status = v + return s +} + +type DomainInfo struct { + _ struct{} `type:"structure"` + + // Specifies the DomainName. + DomainName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s DomainInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInfo) GoString() string { + return s.String() +} + +// SetDomainName sets the DomainName field's value. +func (s *DomainInfo) SetDomainName(v string) *DomainInfo { + s.DomainName = &v + return s +} + +type DomainInformation struct { + _ struct{} `type:"structure"` + + // The name of an Elasticsearch domain. Domain names are unique across the domains + // owned by an account within an AWS region. Domain names start with a letter + // or number and can contain the following characters: a-z (lowercase), 0-9, + // and - (hyphen). + // + // DomainName is a required field + DomainName *string `min:"3" type:"string" required:"true"` + + OwnerId *string `min:"12" type:"string"` + + Region *string `type:"string"` +} + +// String returns the string representation +func (s DomainInformation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainInformation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DomainInformation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DomainInformation"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.OwnerId != nil && len(*s.OwnerId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("OwnerId", 12)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *DomainInformation) SetDomainName(v string) *DomainInformation { + s.DomainName = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *DomainInformation) SetOwnerId(v string) *DomainInformation { + s.OwnerId = &v return s } -// SetTLSSecurityPolicy sets the TLSSecurityPolicy field's value. -func (s *DomainEndpointOptions) SetTLSSecurityPolicy(v string) *DomainEndpointOptions { - s.TLSSecurityPolicy = &v +// SetRegion sets the Region field's value. +func (s *DomainInformation) SetRegion(v string) *DomainInformation { + s.Region = &v return s } -// The configured endpoint options for the domain and their current status. -type DomainEndpointOptionsStatus struct { +// Information on a package that is associated with a domain. +type DomainPackageDetails struct { _ struct{} `type:"structure"` - // Options to configure endpoint for the Elasticsearch domain. - // - // Options is a required field - Options *DomainEndpointOptions `type:"structure" required:"true"` + // Name of the domain you've associated a package with. + DomainName *string `min:"3" type:"string"` - // The status of the endpoint options for the Elasticsearch domain. See OptionStatus - // for the status information that's included. - // - // Status is a required field - Status *OptionStatus `type:"structure" required:"true"` + // State of the association. Values are ASSOCIATING/ASSOCIATION_FAILED/ACTIVE/DISSOCIATING/DISSOCIATION_FAILED. + DomainPackageStatus *string `type:"string" enum:"DomainPackageStatus"` + + // Additional information if the package is in an error state. Null otherwise. + ErrorDetails *ErrorDetails `type:"structure"` + + // Timestamp of the most-recent update to the association status. + LastUpdated *time.Time `type:"timestamp"` + + // Internal ID of the package. + PackageID *string `type:"string"` + + // User specified name of the package. + PackageName *string `min:"3" type:"string"` + + // Currently supports only TXT-DICTIONARY. + PackageType *string `type:"string" enum:"PackageType"` + + // The relative path on Amazon ES nodes, which can be used as synonym_path when + // the package is synonym file. + ReferencePath *string `type:"string"` } // String returns the string representation -func (s DomainEndpointOptionsStatus) String() string { +func (s DomainPackageDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DomainEndpointOptionsStatus) GoString() string { +func (s DomainPackageDetails) GoString() string { return s.String() } -// SetOptions sets the Options field's value. -func (s *DomainEndpointOptionsStatus) SetOptions(v *DomainEndpointOptions) *DomainEndpointOptionsStatus { - s.Options = v +// SetDomainName sets the DomainName field's value. +func (s *DomainPackageDetails) SetDomainName(v string) *DomainPackageDetails { + s.DomainName = &v return s } -// SetStatus sets the Status field's value. -func (s *DomainEndpointOptionsStatus) SetStatus(v *OptionStatus) *DomainEndpointOptionsStatus { - s.Status = v +// SetDomainPackageStatus sets the DomainPackageStatus field's value. +func (s *DomainPackageDetails) SetDomainPackageStatus(v string) *DomainPackageDetails { + s.DomainPackageStatus = &v return s } -type DomainInfo struct { - _ struct{} `type:"structure"` +// SetErrorDetails sets the ErrorDetails field's value. +func (s *DomainPackageDetails) SetErrorDetails(v *ErrorDetails) *DomainPackageDetails { + s.ErrorDetails = v + return s +} - // Specifies the DomainName. - DomainName *string `min:"3" type:"string"` +// SetLastUpdated sets the LastUpdated field's value. +func (s *DomainPackageDetails) SetLastUpdated(v time.Time) *DomainPackageDetails { + s.LastUpdated = &v + return s } -// String returns the string representation -func (s DomainInfo) String() string { - return awsutil.Prettify(s) +// SetPackageID sets the PackageID field's value. +func (s *DomainPackageDetails) SetPackageID(v string) *DomainPackageDetails { + s.PackageID = &v + return s } -// GoString returns the string representation -func (s DomainInfo) GoString() string { - return s.String() +// SetPackageName sets the PackageName field's value. +func (s *DomainPackageDetails) SetPackageName(v string) *DomainPackageDetails { + s.PackageName = &v + return s } -// SetDomainName sets the DomainName field's value. -func (s *DomainInfo) SetDomainName(v string) *DomainInfo { - s.DomainName = &v +// SetPackageType sets the PackageType field's value. +func (s *DomainPackageDetails) SetPackageType(v string) *DomainPackageDetails { + s.PackageType = &v + return s +} + +// SetReferencePath sets the ReferencePath field's value. +func (s *DomainPackageDetails) SetReferencePath(v string) *DomainPackageDetails { + s.ReferencePath = &v return s } @@ -4708,6 +7540,88 @@ func (s *EncryptionAtRestOptionsStatus) SetStatus(v *OptionStatus) *EncryptionAt return s } +type ErrorDetails struct { + _ struct{} `type:"structure"` + + ErrorMessage *string `type:"string"` + + ErrorType *string `type:"string"` +} + +// String returns the string representation +func (s ErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDetails) GoString() string { + return s.String() +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ErrorDetails) SetErrorMessage(v string) *ErrorDetails { + s.ErrorMessage = &v + return s +} + +// SetErrorType sets the ErrorType field's value. +func (s *ErrorDetails) SetErrorType(v string) *ErrorDetails { + s.ErrorType = &v + return s +} + +// A filter used to limit results when describing inbound or outbound cross-cluster +// search connections. Multiple values can be specified per filter. A cross-cluster +// search connection must match at least one of the specified values for it +// to be returned from an operation. +type Filter struct { + _ struct{} `type:"structure"` + + // Specifies the name of the filter. + Name *string `min:"1" type:"string"` + + // Contains one or more values for the filter. + Values []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *Filter) SetName(v string) *Filter { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *Filter) SetValues(v []*string) *Filter { + s.Values = v + return s +} + // Container for request parameters to GetCompatibleElasticsearchVersions operation. type GetCompatibleElasticsearchVersionsInput struct { _ struct{} `type:"structure"` @@ -4975,6 +7889,105 @@ func (s *GetUpgradeStatusOutput) SetUpgradeStep(v string) *GetUpgradeStatusOutpu return s } +// Specifies details of an inbound connection. +type InboundCrossClusterSearchConnection struct { + _ struct{} `type:"structure"` + + // Specifies the InboundCrossClusterSearchConnectionStatus for the outbound + // connection. + ConnectionStatus *InboundCrossClusterSearchConnectionStatus `type:"structure"` + + // Specifies the connection id for the inbound cross-cluster search connection. + CrossClusterSearchConnectionId *string `type:"string"` + + // Specifies the DomainInformation for the destination Elasticsearch domain. + DestinationDomainInfo *DomainInformation `type:"structure"` + + // Specifies the DomainInformation for the source Elasticsearch domain. + SourceDomainInfo *DomainInformation `type:"structure"` +} + +// String returns the string representation +func (s InboundCrossClusterSearchConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InboundCrossClusterSearchConnection) GoString() string { + return s.String() +} + +// SetConnectionStatus sets the ConnectionStatus field's value. +func (s *InboundCrossClusterSearchConnection) SetConnectionStatus(v *InboundCrossClusterSearchConnectionStatus) *InboundCrossClusterSearchConnection { + s.ConnectionStatus = v + return s +} + +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *InboundCrossClusterSearchConnection) SetCrossClusterSearchConnectionId(v string) *InboundCrossClusterSearchConnection { + s.CrossClusterSearchConnectionId = &v + return s +} + +// SetDestinationDomainInfo sets the DestinationDomainInfo field's value. +func (s *InboundCrossClusterSearchConnection) SetDestinationDomainInfo(v *DomainInformation) *InboundCrossClusterSearchConnection { + s.DestinationDomainInfo = v + return s +} + +// SetSourceDomainInfo sets the SourceDomainInfo field's value. +func (s *InboundCrossClusterSearchConnection) SetSourceDomainInfo(v *DomainInformation) *InboundCrossClusterSearchConnection { + s.SourceDomainInfo = v + return s +} + +// Specifies the coonection status of an inbound cross-cluster search connection. +type InboundCrossClusterSearchConnectionStatus struct { + _ struct{} `type:"structure"` + + // Specifies verbose information for the inbound connection status. + Message *string `type:"string"` + + // The state code for inbound connection. This can be one of the following: + // + // * PENDING_ACCEPTANCE: Inbound connection is not yet accepted by destination + // domain owner. + // + // * APPROVED: Inbound connection is pending acceptance by destination domain + // owner. + // + // * REJECTING: Inbound connection rejection is in process. + // + // * REJECTED: Inbound connection is rejected. + // + // * DELETING: Inbound connection deletion is in progress. + // + // * DELETED: Inbound connection is deleted and cannot be used further. + StatusCode *string `type:"string" enum:"InboundCrossClusterSearchConnectionStatusCode"` +} + +// String returns the string representation +func (s InboundCrossClusterSearchConnectionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InboundCrossClusterSearchConnectionStatus) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *InboundCrossClusterSearchConnectionStatus) SetMessage(v string) *InboundCrossClusterSearchConnectionStatus { + s.Message = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *InboundCrossClusterSearchConnectionStatus) SetStatusCode(v string) *InboundCrossClusterSearchConnectionStatus { + s.StatusCode = &v + return s +} + // InstanceCountLimits represents the limits on number of instances that be // created in Amazon Elasticsearch for given InstanceType. type InstanceCountLimits struct { @@ -5039,8 +8052,8 @@ func (s *InstanceLimits) SetInstanceCountLimits(v *InstanceCountLimits) *Instanc // or failure (the failure is internal to the service) . Gives http status code // of 500. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5057,17 +8070,74 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalException) OrigErr() error { + return nil +} + +func (s *InternalException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The request processing has failed because of invalid pagination token provided +// by customer. Returns an HTTP status code of 400. +type InvalidPaginationTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidPaginationTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidPaginationTokenException) GoString() string { + return s.String() +} + +func newErrorInvalidPaginationTokenException(v protocol.ResponseMetadata) error { + return &InvalidPaginationTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidPaginationTokenException) Code() string { + return "InvalidPaginationTokenException" +} + +// Message returns the exception's message. +func (s *InvalidPaginationTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5075,29 +8145,29 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InvalidPaginationTokenException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InvalidPaginationTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPaginationTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPaginationTokenException) RequestID() string { + return s.RespMetadata.RequestID } // An exception for trying to create or access sub-resource that is either invalid // or not supported. Gives http status code of 409. type InvalidTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5114,17 +8184,17 @@ func (s InvalidTypeException) GoString() string { func newErrorInvalidTypeException(v protocol.ResponseMetadata) error { return &InvalidTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTypeException) Code() string { +func (s *InvalidTypeException) Code() string { return "InvalidTypeException" } // Message returns the exception's message. -func (s InvalidTypeException) Message() string { +func (s *InvalidTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5132,29 +8202,29 @@ func (s InvalidTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTypeException) OrigErr() error { +func (s *InvalidTypeException) OrigErr() error { return nil } -func (s InvalidTypeException) Error() string { +func (s *InvalidTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTypeException) RequestID() string { + return s.RespMetadata.RequestID } // An exception for trying to create more than allowed resources or sub-resources. // Gives http status code of 409. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5171,17 +8241,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5189,22 +8259,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Limits for given InstanceType and for each of it's role. Limits contains @@ -5272,23 +8342,116 @@ func (s ListDomainNamesInput) GoString() string { type ListDomainNamesOutput struct { _ struct{} `type:"structure"` - // List of Elasticsearch domain names. - DomainNames []*DomainInfo `type:"list"` + // List of Elasticsearch domain names. + DomainNames []*DomainInfo `type:"list"` +} + +// String returns the string representation +func (s ListDomainNamesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainNamesOutput) GoString() string { + return s.String() +} + +// SetDomainNames sets the DomainNames field's value. +func (s *ListDomainNamesOutput) SetDomainNames(v []*DomainInfo) *ListDomainNamesOutput { + s.DomainNames = v + return s +} + +// Container for request parameters to ListDomainsForPackage operation. +type ListDomainsForPackageInput struct { + _ struct{} `type:"structure"` + + // Limits results to a maximum number of domains. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // Used for pagination. Only necessary if a previous API call includes a non-null + // NextToken value. If provided, returns results for the next page. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + + // The package for which to list domains. + // + // PackageID is a required field + PackageID *string `location:"uri" locationName:"PackageID" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListDomainsForPackageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsForPackageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDomainsForPackageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDomainsForPackageInput"} + if s.PackageID == nil { + invalidParams.Add(request.NewErrParamRequired("PackageID")) + } + if s.PackageID != nil && len(*s.PackageID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PackageID", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDomainsForPackageInput) SetMaxResults(v int64) *ListDomainsForPackageInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDomainsForPackageInput) SetNextToken(v string) *ListDomainsForPackageInput { + s.NextToken = &v + return s +} + +// SetPackageID sets the PackageID field's value. +func (s *ListDomainsForPackageInput) SetPackageID(v string) *ListDomainsForPackageInput { + s.PackageID = &v + return s +} + +// Container for response parameters to ListDomainsForPackage operation. +type ListDomainsForPackageOutput struct { + _ struct{} `type:"structure"` + + // List of DomainPackageDetails objects. + DomainPackageDetailsList []*DomainPackageDetails `type:"list"` + + NextToken *string `type:"string"` } // String returns the string representation -func (s ListDomainNamesOutput) String() string { +func (s ListDomainsForPackageOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDomainNamesOutput) GoString() string { +func (s ListDomainsForPackageOutput) GoString() string { return s.String() } -// SetDomainNames sets the DomainNames field's value. -func (s *ListDomainNamesOutput) SetDomainNames(v []*DomainInfo) *ListDomainNamesOutput { - s.DomainNames = v +// SetDomainPackageDetailsList sets the DomainPackageDetailsList field's value. +func (s *ListDomainsForPackageOutput) SetDomainPackageDetailsList(v []*DomainPackageDetails) *ListDomainsForPackageOutput { + s.DomainPackageDetailsList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDomainsForPackageOutput) SetNextToken(v string) *ListDomainsForPackageOutput { + s.NextToken = &v return s } @@ -5482,6 +8645,101 @@ func (s *ListElasticsearchVersionsOutput) SetNextToken(v string) *ListElasticsea return s } +// Container for request parameters to ListPackagesForDomain operation. +type ListPackagesForDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain for which you want to list associated packages. + // + // DomainName is a required field + DomainName *string `location:"uri" locationName:"DomainName" min:"3" type:"string" required:"true"` + + // Limits results to a maximum number of packages. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // Used for pagination. Only necessary if a previous API call includes a non-null + // NextToken value. If provided, returns results for the next page. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPackagesForDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPackagesForDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPackagesForDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPackagesForDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *ListPackagesForDomainInput) SetDomainName(v string) *ListPackagesForDomainInput { + s.DomainName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListPackagesForDomainInput) SetMaxResults(v int64) *ListPackagesForDomainInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPackagesForDomainInput) SetNextToken(v string) *ListPackagesForDomainInput { + s.NextToken = &v + return s +} + +// Container for response parameters to ListPackagesForDomain operation. +type ListPackagesForDomainOutput struct { + _ struct{} `type:"structure"` + + // List of DomainPackageDetails objects. + DomainPackageDetailsList []*DomainPackageDetails `type:"list"` + + // Pagination token that needs to be supplied to the next call to get the next + // page of results. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListPackagesForDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPackagesForDomainOutput) GoString() string { + return s.String() +} + +// SetDomainPackageDetailsList sets the DomainPackageDetailsList field's value. +func (s *ListPackagesForDomainOutput) SetDomainPackageDetailsList(v []*DomainPackageDetails) *ListPackagesForDomainOutput { + s.DomainPackageDetailsList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPackagesForDomainOutput) SetNextToken(v string) *ListPackagesForDomainOutput { + s.NextToken = &v + return s +} + // Container for the parameters to the ListTags operation. Specify the ARN for // the Elasticsearch domain to which the tags are attached that you want to // view are attached. @@ -5664,151 +8922,387 @@ func (s *MasterUserOptions) Validate() error { return nil } -// SetMasterUserARN sets the MasterUserARN field's value. -func (s *MasterUserOptions) SetMasterUserARN(v string) *MasterUserOptions { - s.MasterUserARN = &v +// SetMasterUserARN sets the MasterUserARN field's value. +func (s *MasterUserOptions) SetMasterUserARN(v string) *MasterUserOptions { + s.MasterUserARN = &v + return s +} + +// SetMasterUserName sets the MasterUserName field's value. +func (s *MasterUserOptions) SetMasterUserName(v string) *MasterUserOptions { + s.MasterUserName = &v + return s +} + +// SetMasterUserPassword sets the MasterUserPassword field's value. +func (s *MasterUserOptions) SetMasterUserPassword(v string) *MasterUserOptions { + s.MasterUserPassword = &v + return s +} + +// Specifies the node-to-node encryption options. +type NodeToNodeEncryptionOptions struct { + _ struct{} `type:"structure"` + + // Specify true to enable node-to-node encryption. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s NodeToNodeEncryptionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeToNodeEncryptionOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *NodeToNodeEncryptionOptions) SetEnabled(v bool) *NodeToNodeEncryptionOptions { + s.Enabled = &v + return s +} + +// Status of the node-to-node encryption options for the specified Elasticsearch +// domain. +type NodeToNodeEncryptionOptionsStatus struct { + _ struct{} `type:"structure"` + + // Specifies the node-to-node encryption options for the specified Elasticsearch + // domain. + // + // Options is a required field + Options *NodeToNodeEncryptionOptions `type:"structure" required:"true"` + + // Specifies the status of the node-to-node encryption options for the specified + // Elasticsearch domain. + // + // Status is a required field + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s NodeToNodeEncryptionOptionsStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeToNodeEncryptionOptionsStatus) GoString() string { + return s.String() +} + +// SetOptions sets the Options field's value. +func (s *NodeToNodeEncryptionOptionsStatus) SetOptions(v *NodeToNodeEncryptionOptions) *NodeToNodeEncryptionOptionsStatus { + s.Options = v + return s +} + +// SetStatus sets the Status field's value. +func (s *NodeToNodeEncryptionOptionsStatus) SetStatus(v *OptionStatus) *NodeToNodeEncryptionOptionsStatus { + s.Status = v + return s +} + +// Provides the current status of the entity. +type OptionStatus struct { + _ struct{} `type:"structure"` + + // Timestamp which tells the creation date for the entity. + // + // CreationDate is a required field + CreationDate *time.Time `type:"timestamp" required:"true"` + + // Indicates whether the Elasticsearch domain is being deleted. + PendingDeletion *bool `type:"boolean"` + + // Provides the OptionState for the Elasticsearch domain. + // + // State is a required field + State *string `type:"string" required:"true" enum:"OptionState"` + + // Timestamp which tells the last updated time for the entity. + // + // UpdateDate is a required field + UpdateDate *time.Time `type:"timestamp" required:"true"` + + // Specifies the latest version for the entity. + UpdateVersion *int64 `type:"integer"` +} + +// String returns the string representation +func (s OptionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionStatus) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *OptionStatus) SetCreationDate(v time.Time) *OptionStatus { + s.CreationDate = &v + return s +} + +// SetPendingDeletion sets the PendingDeletion field's value. +func (s *OptionStatus) SetPendingDeletion(v bool) *OptionStatus { + s.PendingDeletion = &v + return s +} + +// SetState sets the State field's value. +func (s *OptionStatus) SetState(v string) *OptionStatus { + s.State = &v + return s +} + +// SetUpdateDate sets the UpdateDate field's value. +func (s *OptionStatus) SetUpdateDate(v time.Time) *OptionStatus { + s.UpdateDate = &v + return s +} + +// SetUpdateVersion sets the UpdateVersion field's value. +func (s *OptionStatus) SetUpdateVersion(v int64) *OptionStatus { + s.UpdateVersion = &v + return s +} + +// Specifies details of an outbound connection. +type OutboundCrossClusterSearchConnection struct { + _ struct{} `type:"structure"` + + // Specifies the connection alias for the outbound cross-cluster search connection. + ConnectionAlias *string `type:"string"` + + // Specifies the OutboundCrossClusterSearchConnectionStatus for the outbound + // connection. + ConnectionStatus *OutboundCrossClusterSearchConnectionStatus `type:"structure"` + + // Specifies the connection id for the outbound cross-cluster search connection. + CrossClusterSearchConnectionId *string `type:"string"` + + // Specifies the DomainInformation for the destination Elasticsearch domain. + DestinationDomainInfo *DomainInformation `type:"structure"` + + // Specifies the DomainInformation for the source Elasticsearch domain. + SourceDomainInfo *DomainInformation `type:"structure"` +} + +// String returns the string representation +func (s OutboundCrossClusterSearchConnection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutboundCrossClusterSearchConnection) GoString() string { + return s.String() +} + +// SetConnectionAlias sets the ConnectionAlias field's value. +func (s *OutboundCrossClusterSearchConnection) SetConnectionAlias(v string) *OutboundCrossClusterSearchConnection { + s.ConnectionAlias = &v + return s +} + +// SetConnectionStatus sets the ConnectionStatus field's value. +func (s *OutboundCrossClusterSearchConnection) SetConnectionStatus(v *OutboundCrossClusterSearchConnectionStatus) *OutboundCrossClusterSearchConnection { + s.ConnectionStatus = v + return s +} + +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *OutboundCrossClusterSearchConnection) SetCrossClusterSearchConnectionId(v string) *OutboundCrossClusterSearchConnection { + s.CrossClusterSearchConnectionId = &v return s } -// SetMasterUserName sets the MasterUserName field's value. -func (s *MasterUserOptions) SetMasterUserName(v string) *MasterUserOptions { - s.MasterUserName = &v +// SetDestinationDomainInfo sets the DestinationDomainInfo field's value. +func (s *OutboundCrossClusterSearchConnection) SetDestinationDomainInfo(v *DomainInformation) *OutboundCrossClusterSearchConnection { + s.DestinationDomainInfo = v return s } -// SetMasterUserPassword sets the MasterUserPassword field's value. -func (s *MasterUserOptions) SetMasterUserPassword(v string) *MasterUserOptions { - s.MasterUserPassword = &v +// SetSourceDomainInfo sets the SourceDomainInfo field's value. +func (s *OutboundCrossClusterSearchConnection) SetSourceDomainInfo(v *DomainInformation) *OutboundCrossClusterSearchConnection { + s.SourceDomainInfo = v return s } -// Specifies the node-to-node encryption options. -type NodeToNodeEncryptionOptions struct { +// Specifies the connection status of an outbound cross-cluster search connection. +type OutboundCrossClusterSearchConnectionStatus struct { _ struct{} `type:"structure"` - // Specify true to enable node-to-node encryption. - Enabled *bool `type:"boolean"` + // Specifies verbose information for the outbound connection status. + Message *string `type:"string"` + + // The state code for outbound connection. This can be one of the following: + // + // * VALIDATING: The outbound connection request is being validated. + // + // * VALIDATION_FAILED: Validation failed for the connection request. + // + // * PENDING_ACCEPTANCE: Outbound connection request is validated and is + // not yet accepted by destination domain owner. + // + // * PROVISIONING: Outbound connection request is in process. + // + // * ACTIVE: Outbound connection is active and ready to use. + // + // * REJECTED: Outbound connection request is rejected by destination domain + // owner. + // + // * DELETING: Outbound connection deletion is in progress. + // + // * DELETED: Outbound connection is deleted and cannot be used further. + StatusCode *string `type:"string" enum:"OutboundCrossClusterSearchConnectionStatusCode"` } // String returns the string representation -func (s NodeToNodeEncryptionOptions) String() string { +func (s OutboundCrossClusterSearchConnectionStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NodeToNodeEncryptionOptions) GoString() string { +func (s OutboundCrossClusterSearchConnectionStatus) GoString() string { return s.String() } -// SetEnabled sets the Enabled field's value. -func (s *NodeToNodeEncryptionOptions) SetEnabled(v bool) *NodeToNodeEncryptionOptions { - s.Enabled = &v +// SetMessage sets the Message field's value. +func (s *OutboundCrossClusterSearchConnectionStatus) SetMessage(v string) *OutboundCrossClusterSearchConnectionStatus { + s.Message = &v return s } -// Status of the node-to-node encryption options for the specified Elasticsearch -// domain. -type NodeToNodeEncryptionOptionsStatus struct { +// SetStatusCode sets the StatusCode field's value. +func (s *OutboundCrossClusterSearchConnectionStatus) SetStatusCode(v string) *OutboundCrossClusterSearchConnectionStatus { + s.StatusCode = &v + return s +} + +// Basic information about a package. +type PackageDetails struct { _ struct{} `type:"structure"` - // Specifies the node-to-node encryption options for the specified Elasticsearch - // domain. - // - // Options is a required field - Options *NodeToNodeEncryptionOptions `type:"structure" required:"true"` + // Timestamp which tells creation date of the package. + CreatedAt *time.Time `type:"timestamp"` - // Specifies the status of the node-to-node encryption options for the specified - // Elasticsearch domain. - // - // Status is a required field - Status *OptionStatus `type:"structure" required:"true"` + // Additional information if the package is in an error state. Null otherwise. + ErrorDetails *ErrorDetails `type:"structure"` + + // User-specified description of the package. + PackageDescription *string `type:"string"` + + // Internal ID of the package. + PackageID *string `type:"string"` + + // User specified name of the package. + PackageName *string `min:"3" type:"string"` + + // Current state of the package. Values are COPYING/COPY_FAILED/AVAILABLE/DELETING/DELETE_FAILED + PackageStatus *string `type:"string" enum:"PackageStatus"` + + // Currently supports only TXT-DICTIONARY. + PackageType *string `type:"string" enum:"PackageType"` } // String returns the string representation -func (s NodeToNodeEncryptionOptionsStatus) String() string { +func (s PackageDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NodeToNodeEncryptionOptionsStatus) GoString() string { +func (s PackageDetails) GoString() string { return s.String() } -// SetOptions sets the Options field's value. -func (s *NodeToNodeEncryptionOptionsStatus) SetOptions(v *NodeToNodeEncryptionOptions) *NodeToNodeEncryptionOptionsStatus { - s.Options = v +// SetCreatedAt sets the CreatedAt field's value. +func (s *PackageDetails) SetCreatedAt(v time.Time) *PackageDetails { + s.CreatedAt = &v return s } -// SetStatus sets the Status field's value. -func (s *NodeToNodeEncryptionOptionsStatus) SetStatus(v *OptionStatus) *NodeToNodeEncryptionOptionsStatus { - s.Status = v +// SetErrorDetails sets the ErrorDetails field's value. +func (s *PackageDetails) SetErrorDetails(v *ErrorDetails) *PackageDetails { + s.ErrorDetails = v return s } -// Provides the current status of the entity. -type OptionStatus struct { - _ struct{} `type:"structure"` +// SetPackageDescription sets the PackageDescription field's value. +func (s *PackageDetails) SetPackageDescription(v string) *PackageDetails { + s.PackageDescription = &v + return s +} - // Timestamp which tells the creation date for the entity. - // - // CreationDate is a required field - CreationDate *time.Time `type:"timestamp" required:"true"` +// SetPackageID sets the PackageID field's value. +func (s *PackageDetails) SetPackageID(v string) *PackageDetails { + s.PackageID = &v + return s +} - // Indicates whether the Elasticsearch domain is being deleted. - PendingDeletion *bool `type:"boolean"` +// SetPackageName sets the PackageName field's value. +func (s *PackageDetails) SetPackageName(v string) *PackageDetails { + s.PackageName = &v + return s +} - // Provides the OptionState for the Elasticsearch domain. - // - // State is a required field - State *string `type:"string" required:"true" enum:"OptionState"` +// SetPackageStatus sets the PackageStatus field's value. +func (s *PackageDetails) SetPackageStatus(v string) *PackageDetails { + s.PackageStatus = &v + return s +} - // Timestamp which tells the last updated time for the entity. - // - // UpdateDate is a required field - UpdateDate *time.Time `type:"timestamp" required:"true"` +// SetPackageType sets the PackageType field's value. +func (s *PackageDetails) SetPackageType(v string) *PackageDetails { + s.PackageType = &v + return s +} - // Specifies the latest version for the entity. - UpdateVersion *int64 `type:"integer"` +// The S3 location for importing the package specified as S3BucketName and S3Key +type PackageSource struct { + _ struct{} `type:"structure"` + + // Name of the bucket containing the package. + S3BucketName *string `min:"3" type:"string"` + + // Key (file name) of the package. + S3Key *string `type:"string"` } // String returns the string representation -func (s OptionStatus) String() string { +func (s PackageSource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OptionStatus) GoString() string { +func (s PackageSource) GoString() string { return s.String() } -// SetCreationDate sets the CreationDate field's value. -func (s *OptionStatus) SetCreationDate(v time.Time) *OptionStatus { - s.CreationDate = &v - return s -} - -// SetPendingDeletion sets the PendingDeletion field's value. -func (s *OptionStatus) SetPendingDeletion(v bool) *OptionStatus { - s.PendingDeletion = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *PackageSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PackageSource"} + if s.S3BucketName != nil && len(*s.S3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3BucketName", 3)) + } -// SetState sets the State field's value. -func (s *OptionStatus) SetState(v string) *OptionStatus { - s.State = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetUpdateDate sets the UpdateDate field's value. -func (s *OptionStatus) SetUpdateDate(v time.Time) *OptionStatus { - s.UpdateDate = &v +// SetS3BucketName sets the S3BucketName field's value. +func (s *PackageSource) SetS3BucketName(v string) *PackageSource { + s.S3BucketName = &v return s } -// SetUpdateVersion sets the UpdateVersion field's value. -func (s *OptionStatus) SetUpdateVersion(v int64) *OptionStatus { - s.UpdateVersion = &v +// SetS3Key sets the S3Key field's value. +func (s *PackageSource) SetS3Key(v string) *PackageSource { + s.S3Key = &v return s } @@ -5948,6 +9442,74 @@ func (s *RecurringCharge) SetRecurringChargeFrequency(v string) *RecurringCharge return s } +// Container for the parameters to the RejectInboundCrossClusterSearchConnection +// operation. +type RejectInboundCrossClusterSearchConnectionInput struct { + _ struct{} `type:"structure"` + + // The id of the inbound connection that you want to reject. + // + // CrossClusterSearchConnectionId is a required field + CrossClusterSearchConnectionId *string `location:"uri" locationName:"ConnectionId" type:"string" required:"true"` +} + +// String returns the string representation +func (s RejectInboundCrossClusterSearchConnectionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectInboundCrossClusterSearchConnectionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RejectInboundCrossClusterSearchConnectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RejectInboundCrossClusterSearchConnectionInput"} + if s.CrossClusterSearchConnectionId == nil { + invalidParams.Add(request.NewErrParamRequired("CrossClusterSearchConnectionId")) + } + if s.CrossClusterSearchConnectionId != nil && len(*s.CrossClusterSearchConnectionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CrossClusterSearchConnectionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCrossClusterSearchConnectionId sets the CrossClusterSearchConnectionId field's value. +func (s *RejectInboundCrossClusterSearchConnectionInput) SetCrossClusterSearchConnectionId(v string) *RejectInboundCrossClusterSearchConnectionInput { + s.CrossClusterSearchConnectionId = &v + return s +} + +// The result of a RejectInboundCrossClusterSearchConnection operation. Contains +// details of rejected inbound connection. +type RejectInboundCrossClusterSearchConnectionOutput struct { + _ struct{} `type:"structure"` + + // Specifies the InboundCrossClusterSearchConnection of rejected inbound connection. + CrossClusterSearchConnection *InboundCrossClusterSearchConnection `type:"structure"` +} + +// String returns the string representation +func (s RejectInboundCrossClusterSearchConnectionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RejectInboundCrossClusterSearchConnectionOutput) GoString() string { + return s.String() +} + +// SetCrossClusterSearchConnection sets the CrossClusterSearchConnection field's value. +func (s *RejectInboundCrossClusterSearchConnectionOutput) SetCrossClusterSearchConnection(v *InboundCrossClusterSearchConnection) *RejectInboundCrossClusterSearchConnectionOutput { + s.CrossClusterSearchConnection = v + return s +} + // Container for the parameters to the RemoveTags operation. Specify the ARN // for the Elasticsearch domain from which you want to remove the specified // TagKey. @@ -6248,8 +9810,8 @@ func (s *ReservedElasticsearchInstanceOffering) SetUsagePrice(v float64) *Reserv // An exception for creating a resource that already exists. Gives http status // code of 400. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6266,17 +9828,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6284,29 +9846,29 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An exception for accessing or deleting a resource that does not exist. Gives // http status code of 400. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6323,17 +9885,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6341,22 +9903,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The current options of an Elasticsearch domain service software options. @@ -6380,6 +9942,10 @@ type ServiceSoftwareOptions struct { // The new service software version if one is available. NewVersion *string `type:"string"` + // True if a service software is never automatically updated. False if a service + // software is automatically updated after AutomatedUpdateDate. + OptionalDeployment *bool `type:"boolean"` + // True if you are able to update you service software version. False if you // are not able to update your service software version. UpdateAvailable *bool `type:"boolean"` @@ -6429,6 +9995,12 @@ func (s *ServiceSoftwareOptions) SetNewVersion(v string) *ServiceSoftwareOptions return s } +// SetOptionalDeployment sets the OptionalDeployment field's value. +func (s *ServiceSoftwareOptions) SetOptionalDeployment(v bool) *ServiceSoftwareOptions { + s.OptionalDeployment = &v + return s +} + // SetUpdateAvailable sets the UpdateAvailable field's value. func (s *ServiceSoftwareOptions) SetUpdateAvailable(v bool) *ServiceSoftwareOptions { s.UpdateAvailable = &v @@ -7273,8 +10845,8 @@ func (s *VPCOptions) SetSubnetIds(v []*string) *VPCOptions { // An exception for missing / invalid input fields. Gives http status code of // 400. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7291,17 +10863,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7309,22 +10881,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the zone awareness configuration for the domain cluster, such as @@ -7371,6 +10943,65 @@ const ( DeploymentStatusEligible = "ELIGIBLE" ) +// DeploymentStatus_Values returns all elements of the DeploymentStatus enum +func DeploymentStatus_Values() []string { + return []string{ + DeploymentStatusPendingUpdate, + DeploymentStatusInProgress, + DeploymentStatusCompleted, + DeploymentStatusNotEligible, + DeploymentStatusEligible, + } +} + +const ( + // DescribePackagesFilterNamePackageId is a DescribePackagesFilterName enum value + DescribePackagesFilterNamePackageId = "PackageID" + + // DescribePackagesFilterNamePackageName is a DescribePackagesFilterName enum value + DescribePackagesFilterNamePackageName = "PackageName" + + // DescribePackagesFilterNamePackageStatus is a DescribePackagesFilterName enum value + DescribePackagesFilterNamePackageStatus = "PackageStatus" +) + +// DescribePackagesFilterName_Values returns all elements of the DescribePackagesFilterName enum +func DescribePackagesFilterName_Values() []string { + return []string{ + DescribePackagesFilterNamePackageId, + DescribePackagesFilterNamePackageName, + DescribePackagesFilterNamePackageStatus, + } +} + +const ( + // DomainPackageStatusAssociating is a DomainPackageStatus enum value + DomainPackageStatusAssociating = "ASSOCIATING" + + // DomainPackageStatusAssociationFailed is a DomainPackageStatus enum value + DomainPackageStatusAssociationFailed = "ASSOCIATION_FAILED" + + // DomainPackageStatusActive is a DomainPackageStatus enum value + DomainPackageStatusActive = "ACTIVE" + + // DomainPackageStatusDissociating is a DomainPackageStatus enum value + DomainPackageStatusDissociating = "DISSOCIATING" + + // DomainPackageStatusDissociationFailed is a DomainPackageStatus enum value + DomainPackageStatusDissociationFailed = "DISSOCIATION_FAILED" +) + +// DomainPackageStatus_Values returns all elements of the DomainPackageStatus enum +func DomainPackageStatus_Values() []string { + return []string{ + DomainPackageStatusAssociating, + DomainPackageStatusAssociationFailed, + DomainPackageStatusActive, + DomainPackageStatusDissociating, + DomainPackageStatusDissociationFailed, + } +} + const ( // ESPartitionInstanceTypeM3MediumElasticsearch is a ESPartitionInstanceType enum value ESPartitionInstanceTypeM3MediumElasticsearch = "m3.medium.elasticsearch" @@ -7547,6 +11178,70 @@ const ( ESPartitionInstanceTypeI316xlargeElasticsearch = "i3.16xlarge.elasticsearch" ) +// ESPartitionInstanceType_Values returns all elements of the ESPartitionInstanceType enum +func ESPartitionInstanceType_Values() []string { + return []string{ + ESPartitionInstanceTypeM3MediumElasticsearch, + ESPartitionInstanceTypeM3LargeElasticsearch, + ESPartitionInstanceTypeM3XlargeElasticsearch, + ESPartitionInstanceTypeM32xlargeElasticsearch, + ESPartitionInstanceTypeM4LargeElasticsearch, + ESPartitionInstanceTypeM4XlargeElasticsearch, + ESPartitionInstanceTypeM42xlargeElasticsearch, + ESPartitionInstanceTypeM44xlargeElasticsearch, + ESPartitionInstanceTypeM410xlargeElasticsearch, + ESPartitionInstanceTypeM5LargeElasticsearch, + ESPartitionInstanceTypeM5XlargeElasticsearch, + ESPartitionInstanceTypeM52xlargeElasticsearch, + ESPartitionInstanceTypeM54xlargeElasticsearch, + ESPartitionInstanceTypeM512xlargeElasticsearch, + ESPartitionInstanceTypeR5LargeElasticsearch, + ESPartitionInstanceTypeR5XlargeElasticsearch, + ESPartitionInstanceTypeR52xlargeElasticsearch, + ESPartitionInstanceTypeR54xlargeElasticsearch, + ESPartitionInstanceTypeR512xlargeElasticsearch, + ESPartitionInstanceTypeC5LargeElasticsearch, + ESPartitionInstanceTypeC5XlargeElasticsearch, + ESPartitionInstanceTypeC52xlargeElasticsearch, + ESPartitionInstanceTypeC54xlargeElasticsearch, + ESPartitionInstanceTypeC59xlargeElasticsearch, + ESPartitionInstanceTypeC518xlargeElasticsearch, + ESPartitionInstanceTypeUltrawarm1MediumElasticsearch, + ESPartitionInstanceTypeUltrawarm1LargeElasticsearch, + ESPartitionInstanceTypeT2MicroElasticsearch, + ESPartitionInstanceTypeT2SmallElasticsearch, + ESPartitionInstanceTypeT2MediumElasticsearch, + ESPartitionInstanceTypeR3LargeElasticsearch, + ESPartitionInstanceTypeR3XlargeElasticsearch, + ESPartitionInstanceTypeR32xlargeElasticsearch, + ESPartitionInstanceTypeR34xlargeElasticsearch, + ESPartitionInstanceTypeR38xlargeElasticsearch, + ESPartitionInstanceTypeI2XlargeElasticsearch, + ESPartitionInstanceTypeI22xlargeElasticsearch, + ESPartitionInstanceTypeD2XlargeElasticsearch, + ESPartitionInstanceTypeD22xlargeElasticsearch, + ESPartitionInstanceTypeD24xlargeElasticsearch, + ESPartitionInstanceTypeD28xlargeElasticsearch, + ESPartitionInstanceTypeC4LargeElasticsearch, + ESPartitionInstanceTypeC4XlargeElasticsearch, + ESPartitionInstanceTypeC42xlargeElasticsearch, + ESPartitionInstanceTypeC44xlargeElasticsearch, + ESPartitionInstanceTypeC48xlargeElasticsearch, + ESPartitionInstanceTypeR4LargeElasticsearch, + ESPartitionInstanceTypeR4XlargeElasticsearch, + ESPartitionInstanceTypeR42xlargeElasticsearch, + ESPartitionInstanceTypeR44xlargeElasticsearch, + ESPartitionInstanceTypeR48xlargeElasticsearch, + ESPartitionInstanceTypeR416xlargeElasticsearch, + ESPartitionInstanceTypeI3LargeElasticsearch, + ESPartitionInstanceTypeI3XlargeElasticsearch, + ESPartitionInstanceTypeI32xlargeElasticsearch, + ESPartitionInstanceTypeI34xlargeElasticsearch, + ESPartitionInstanceTypeI38xlargeElasticsearch, + ESPartitionInstanceTypeI316xlargeElasticsearch, + } +} + const ( // ESWarmPartitionInstanceTypeUltrawarm1MediumElasticsearch is a ESWarmPartitionInstanceType enum value ESWarmPartitionInstanceTypeUltrawarm1MediumElasticsearch = "ultrawarm1.medium.elasticsearch" @@ -7555,6 +11250,46 @@ const ( ESWarmPartitionInstanceTypeUltrawarm1LargeElasticsearch = "ultrawarm1.large.elasticsearch" ) +// ESWarmPartitionInstanceType_Values returns all elements of the ESWarmPartitionInstanceType enum +func ESWarmPartitionInstanceType_Values() []string { + return []string{ + ESWarmPartitionInstanceTypeUltrawarm1MediumElasticsearch, + ESWarmPartitionInstanceTypeUltrawarm1LargeElasticsearch, + } +} + +const ( + // InboundCrossClusterSearchConnectionStatusCodePendingAcceptance is a InboundCrossClusterSearchConnectionStatusCode enum value + InboundCrossClusterSearchConnectionStatusCodePendingAcceptance = "PENDING_ACCEPTANCE" + + // InboundCrossClusterSearchConnectionStatusCodeApproved is a InboundCrossClusterSearchConnectionStatusCode enum value + InboundCrossClusterSearchConnectionStatusCodeApproved = "APPROVED" + + // InboundCrossClusterSearchConnectionStatusCodeRejecting is a InboundCrossClusterSearchConnectionStatusCode enum value + InboundCrossClusterSearchConnectionStatusCodeRejecting = "REJECTING" + + // InboundCrossClusterSearchConnectionStatusCodeRejected is a InboundCrossClusterSearchConnectionStatusCode enum value + InboundCrossClusterSearchConnectionStatusCodeRejected = "REJECTED" + + // InboundCrossClusterSearchConnectionStatusCodeDeleting is a InboundCrossClusterSearchConnectionStatusCode enum value + InboundCrossClusterSearchConnectionStatusCodeDeleting = "DELETING" + + // InboundCrossClusterSearchConnectionStatusCodeDeleted is a InboundCrossClusterSearchConnectionStatusCode enum value + InboundCrossClusterSearchConnectionStatusCodeDeleted = "DELETED" +) + +// InboundCrossClusterSearchConnectionStatusCode_Values returns all elements of the InboundCrossClusterSearchConnectionStatusCode enum +func InboundCrossClusterSearchConnectionStatusCode_Values() []string { + return []string{ + InboundCrossClusterSearchConnectionStatusCodePendingAcceptance, + InboundCrossClusterSearchConnectionStatusCodeApproved, + InboundCrossClusterSearchConnectionStatusCodeRejecting, + InboundCrossClusterSearchConnectionStatusCodeRejected, + InboundCrossClusterSearchConnectionStatusCodeDeleting, + InboundCrossClusterSearchConnectionStatusCodeDeleted, + } +} + // Type of Log File, it can be one of the following: // * INDEX_SLOW_LOGS: Index slow logs contain insert requests that took more // time than configured index query log threshold to execute. @@ -7565,6 +11300,9 @@ const ( // * ES_APPLICATION_LOGS: Elasticsearch application logs contain information // about errors and warnings raised during the operation of the service and // can be useful for troubleshooting. +// +// * AUDIT_LOGS: Audit logs contain records of user requests for access from +// the domain. const ( // LogTypeIndexSlowLogs is a LogType enum value LogTypeIndexSlowLogs = "INDEX_SLOW_LOGS" @@ -7574,8 +11312,21 @@ const ( // LogTypeEsApplicationLogs is a LogType enum value LogTypeEsApplicationLogs = "ES_APPLICATION_LOGS" + + // LogTypeAuditLogs is a LogType enum value + LogTypeAuditLogs = "AUDIT_LOGS" ) +// LogType_Values returns all elements of the LogType enum +func LogType_Values() []string { + return []string{ + LogTypeIndexSlowLogs, + LogTypeSearchSlowLogs, + LogTypeEsApplicationLogs, + LogTypeAuditLogs, + } +} + // The state of a requested change. One of the following: // // * Processing: The request change is still in-process. @@ -7593,6 +11344,107 @@ const ( OptionStateActive = "Active" ) +// OptionState_Values returns all elements of the OptionState enum +func OptionState_Values() []string { + return []string{ + OptionStateRequiresIndexDocuments, + OptionStateProcessing, + OptionStateActive, + } +} + +const ( + // OutboundCrossClusterSearchConnectionStatusCodePendingAcceptance is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodePendingAcceptance = "PENDING_ACCEPTANCE" + + // OutboundCrossClusterSearchConnectionStatusCodeValidating is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeValidating = "VALIDATING" + + // OutboundCrossClusterSearchConnectionStatusCodeValidationFailed is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeValidationFailed = "VALIDATION_FAILED" + + // OutboundCrossClusterSearchConnectionStatusCodeProvisioning is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeProvisioning = "PROVISIONING" + + // OutboundCrossClusterSearchConnectionStatusCodeActive is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeActive = "ACTIVE" + + // OutboundCrossClusterSearchConnectionStatusCodeRejected is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeRejected = "REJECTED" + + // OutboundCrossClusterSearchConnectionStatusCodeDeleting is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeDeleting = "DELETING" + + // OutboundCrossClusterSearchConnectionStatusCodeDeleted is a OutboundCrossClusterSearchConnectionStatusCode enum value + OutboundCrossClusterSearchConnectionStatusCodeDeleted = "DELETED" +) + +// OutboundCrossClusterSearchConnectionStatusCode_Values returns all elements of the OutboundCrossClusterSearchConnectionStatusCode enum +func OutboundCrossClusterSearchConnectionStatusCode_Values() []string { + return []string{ + OutboundCrossClusterSearchConnectionStatusCodePendingAcceptance, + OutboundCrossClusterSearchConnectionStatusCodeValidating, + OutboundCrossClusterSearchConnectionStatusCodeValidationFailed, + OutboundCrossClusterSearchConnectionStatusCodeProvisioning, + OutboundCrossClusterSearchConnectionStatusCodeActive, + OutboundCrossClusterSearchConnectionStatusCodeRejected, + OutboundCrossClusterSearchConnectionStatusCodeDeleting, + OutboundCrossClusterSearchConnectionStatusCodeDeleted, + } +} + +const ( + // PackageStatusCopying is a PackageStatus enum value + PackageStatusCopying = "COPYING" + + // PackageStatusCopyFailed is a PackageStatus enum value + PackageStatusCopyFailed = "COPY_FAILED" + + // PackageStatusValidating is a PackageStatus enum value + PackageStatusValidating = "VALIDATING" + + // PackageStatusValidationFailed is a PackageStatus enum value + PackageStatusValidationFailed = "VALIDATION_FAILED" + + // PackageStatusAvailable is a PackageStatus enum value + PackageStatusAvailable = "AVAILABLE" + + // PackageStatusDeleting is a PackageStatus enum value + PackageStatusDeleting = "DELETING" + + // PackageStatusDeleted is a PackageStatus enum value + PackageStatusDeleted = "DELETED" + + // PackageStatusDeleteFailed is a PackageStatus enum value + PackageStatusDeleteFailed = "DELETE_FAILED" +) + +// PackageStatus_Values returns all elements of the PackageStatus enum +func PackageStatus_Values() []string { + return []string{ + PackageStatusCopying, + PackageStatusCopyFailed, + PackageStatusValidating, + PackageStatusValidationFailed, + PackageStatusAvailable, + PackageStatusDeleting, + PackageStatusDeleted, + PackageStatusDeleteFailed, + } +} + +const ( + // PackageTypeTxtDictionary is a PackageType enum value + PackageTypeTxtDictionary = "TXT-DICTIONARY" +) + +// PackageType_Values returns all elements of the PackageType enum +func PackageType_Values() []string { + return []string{ + PackageTypeTxtDictionary, + } +} + const ( // ReservedElasticsearchInstancePaymentOptionAllUpfront is a ReservedElasticsearchInstancePaymentOption enum value ReservedElasticsearchInstancePaymentOptionAllUpfront = "ALL_UPFRONT" @@ -7604,6 +11456,15 @@ const ( ReservedElasticsearchInstancePaymentOptionNoUpfront = "NO_UPFRONT" ) +// ReservedElasticsearchInstancePaymentOption_Values returns all elements of the ReservedElasticsearchInstancePaymentOption enum +func ReservedElasticsearchInstancePaymentOption_Values() []string { + return []string{ + ReservedElasticsearchInstancePaymentOptionAllUpfront, + ReservedElasticsearchInstancePaymentOptionPartialUpfront, + ReservedElasticsearchInstancePaymentOptionNoUpfront, + } +} + const ( // TLSSecurityPolicyPolicyMinTls10201907 is a TLSSecurityPolicy enum value TLSSecurityPolicyPolicyMinTls10201907 = "Policy-Min-TLS-1-0-2019-07" @@ -7612,6 +11473,14 @@ const ( TLSSecurityPolicyPolicyMinTls12201907 = "Policy-Min-TLS-1-2-2019-07" ) +// TLSSecurityPolicy_Values returns all elements of the TLSSecurityPolicy enum +func TLSSecurityPolicy_Values() []string { + return []string{ + TLSSecurityPolicyPolicyMinTls10201907, + TLSSecurityPolicyPolicyMinTls12201907, + } +} + const ( // UpgradeStatusInProgress is a UpgradeStatus enum value UpgradeStatusInProgress = "IN_PROGRESS" @@ -7626,6 +11495,16 @@ const ( UpgradeStatusFailed = "FAILED" ) +// UpgradeStatus_Values returns all elements of the UpgradeStatus enum +func UpgradeStatus_Values() []string { + return []string{ + UpgradeStatusInProgress, + UpgradeStatusSucceeded, + UpgradeStatusSucceededWithIssues, + UpgradeStatusFailed, + } +} + const ( // UpgradeStepPreUpgradeCheck is a UpgradeStep enum value UpgradeStepPreUpgradeCheck = "PRE_UPGRADE_CHECK" @@ -7637,6 +11516,15 @@ const ( UpgradeStepUpgrade = "UPGRADE" ) +// UpgradeStep_Values returns all elements of the UpgradeStep enum +func UpgradeStep_Values() []string { + return []string{ + UpgradeStepPreUpgradeCheck, + UpgradeStepSnapshot, + UpgradeStepUpgrade, + } +} + // The type of EBS volume, standard, gp2, or io1. See Configuring EBS-based // Storage (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomain-configure-ebs)for // more information. @@ -7650,3 +11538,12 @@ const ( // VolumeTypeIo1 is a VolumeType enum value VolumeTypeIo1 = "io1" ) + +// VolumeType_Values returns all elements of the VolumeType enum +func VolumeType_Values() []string { + return []string{ + VolumeTypeStandard, + VolumeTypeGp2, + VolumeTypeIo1, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/errors.go index 9773068ca..46f3ae0da 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/errors.go @@ -8,12 +8,26 @@ import ( const ( + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // An error occurred because user does not have permissions to access the resource. + // Returns HTTP status code 403. + ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeBaseException for service response error code // "BaseException". // // An error occurred while processing the request. ErrCodeBaseException = "BaseException" + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // An error occurred because the client attempts to remove a resource that is + // currently in use. Returns HTTP status code 409. + ErrCodeConflictException = "ConflictException" + // ErrCodeDisabledOperationException for service response error code // "DisabledOperationException". // @@ -29,6 +43,13 @@ const ( // of 500. ErrCodeInternalException = "InternalException" + // ErrCodeInvalidPaginationTokenException for service response error code + // "InvalidPaginationTokenException". + // + // The request processing has failed because of invalid pagination token provided + // by customer. Returns an HTTP status code of 400. + ErrCodeInvalidPaginationTokenException = "InvalidPaginationTokenException" + // ErrCodeInvalidTypeException for service response error code // "InvalidTypeException". // @@ -66,12 +87,15 @@ const ( ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "BaseException": newErrorBaseException, - "DisabledOperationException": newErrorDisabledOperationException, - "InternalException": newErrorInternalException, - "InvalidTypeException": newErrorInvalidTypeException, - "LimitExceededException": newErrorLimitExceededException, - "ResourceAlreadyExistsException": newErrorResourceAlreadyExistsException, - "ResourceNotFoundException": newErrorResourceNotFoundException, - "ValidationException": newErrorValidationException, + "AccessDeniedException": newErrorAccessDeniedException, + "BaseException": newErrorBaseException, + "ConflictException": newErrorConflictException, + "DisabledOperationException": newErrorDisabledOperationException, + "InternalException": newErrorInternalException, + "InvalidPaginationTokenException": newErrorInvalidPaginationTokenException, + "InvalidTypeException": newErrorInvalidTypeException, + "LimitExceededException": newErrorLimitExceededException, + "ResourceAlreadyExistsException": newErrorResourceAlreadyExistsException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ValidationException": newErrorValidationException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go index 8a5f5389a..91476dd9b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go index 504a3e308..7d37b50e9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go @@ -1865,8 +1865,8 @@ func (c *ElasticTranscoder) UpdatePipelineStatusWithContext(ctx aws.Context, inp // General authentication failure. The request was not signed correctly. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -1883,17 +1883,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1901,22 +1901,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The file to be used as album art. There can be multiple artworks associated @@ -4095,8 +4095,8 @@ func (s *HlsContentProtection) SetMethod(v string) *HlsContentProtection { } type IncompatibleVersionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4113,17 +4113,17 @@ func (s IncompatibleVersionException) GoString() string { func newErrorIncompatibleVersionException(v protocol.ResponseMetadata) error { return &IncompatibleVersionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncompatibleVersionException) Code() string { +func (s *IncompatibleVersionException) Code() string { return "IncompatibleVersionException" } // Message returns the exception's message. -func (s IncompatibleVersionException) Message() string { +func (s *IncompatibleVersionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4131,22 +4131,22 @@ func (s IncompatibleVersionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncompatibleVersionException) OrigErr() error { +func (s *IncompatibleVersionException) OrigErr() error { return nil } -func (s IncompatibleVersionException) Error() string { +func (s *IncompatibleVersionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncompatibleVersionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncompatibleVersionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncompatibleVersionException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncompatibleVersionException) RequestID() string { + return s.RespMetadata.RequestID } // The captions to be created, if any. @@ -4224,8 +4224,8 @@ func (s *InputCaptions) SetMergePolicy(v string) *InputCaptions { // Elastic Transcoder encountered an unexpected exception while trying to fulfill // the request. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4242,17 +4242,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4260,22 +4260,22 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // A section of the response body that provides information about the job that @@ -5131,8 +5131,8 @@ func (s *JobWatermark) SetPresetWatermarkId(v string) *JobWatermark { // Too many operations for a given AWS account. For example, the number of pipelines // exceeds the maximum allowed. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5149,17 +5149,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5167,22 +5167,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The ListJobsByPipelineRequest structure. @@ -6763,8 +6763,8 @@ func (s *ReadPresetOutput) SetPreset(v *Preset) *ReadPresetOutput { // The resource you are attempting to change is in use. For example, you are // attempting to delete a pipeline that is currently in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6781,17 +6781,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6799,30 +6799,30 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource does not exist or is not available. For example, the // pipeline to which you're trying to add a job doesn't exist or is still being // created. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6839,17 +6839,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6857,22 +6857,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The TestRoleRequest structure. @@ -7708,8 +7708,8 @@ func (s *UpdatePipelineStatusOutput) SetPipeline(v *Pipeline) *UpdatePipelineSta // One or more required parameter values were not provided in the request. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7726,17 +7726,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7744,22 +7744,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // The VideoParameters structure. diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go index 0754fbc20..b9d182eb7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go index c2e93fa72..00eba4332 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -64,7 +64,7 @@ func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output // Each tag consists of a key and an optional value. If a tag with the same // key is already associated with the load balancer, AddTags updates its value. // -// For more information, see Tag Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) +// For more information, see Tag Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -155,7 +155,7 @@ func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroup // private cloud (VPC). The specified security groups override the previously // associated security groups. // -// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-groups.html#elb-vpc-security-groups) +// For more information, see Security Groups for Load Balancers in a VPC (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-groups.html#elb-vpc-security-groups) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -246,7 +246,7 @@ func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubn // // The load balancer evenly distributes requests across all registered subnets. // For more information, see Add or Remove Subnets for Your Load Balancer in -// a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-manage-subnets.html) +// a VPC (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-manage-subnets.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -339,7 +339,7 @@ func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req // of your EC2 instances. // // For more information, see Configure Health Checks for Your Load Balancer -// (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) +// (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -433,7 +433,7 @@ func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStick // If the application cookie is explicitly removed or expires, the session stops // being sticky until a new application cookie is issued. // -// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) +// For more information, see Application-Controlled Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -538,7 +538,7 @@ func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickin // the same user to that server. The validity of the cookie is based on the // cookie expiration time, which is specified in the policy configuration. // -// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration) +// For more information, see Duration-Based Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -638,7 +638,7 @@ func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *re // // You can create up to 20 load balancers per region per account. You can request // an increase for the number of load balancers for your account. For more information, -// see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) +// see Limits for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -762,7 +762,7 @@ func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListen // the properties of the new listener must match the properties of the existing // listener. // -// For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) +// For more information, see Listeners for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1209,7 +1209,7 @@ func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstan // You can use DescribeLoadBalancers to verify that the instance is deregistered // from the load balancer. // -// For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) +// For more information, see Register or De-Register EC2 Instances (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1295,7 +1295,7 @@ func (c *ELB) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (r // Describes the current Elastic Load Balancing resource limits for your AWS // account. // -// For more information, see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) +// For more information, see Limits for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2037,7 +2037,7 @@ func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvail // the OutOfService state. Then, the load balancer attempts to equally balance // the traffic among its remaining Availability Zones. // -// For more information, see Add or Remove Availability Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) +// For more information, see Add or Remove Availability Zones (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2127,7 +2127,7 @@ func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailab // // The load balancer evenly distributes requests across all its registered Availability // Zones that contain instances. For more information, see Add or Remove Availability -// Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) +// Zones (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2216,13 +2216,13 @@ func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttri // // For more information, see the following in the Classic Load Balancers Guide: // -// * Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) +// * Cross-Zone Load Balancing (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) // -// * Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) +// * Connection Draining (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) // -// * Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html) +// * Access Logs (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html) // -// * Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) +// * Idle Connection Timeout (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2328,7 +2328,7 @@ func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesW // // To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. // -// For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) +// For more information, see Register or De-Register EC2 Instances (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2497,7 +2497,7 @@ func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalance // the same load balancer and port. // // For more information about updating your SSL certificate, see Replace the -// SSL Certificate for Your Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html) +// SSL Certificate for Your Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2605,9 +2605,9 @@ func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalan // that the policy is associated with the EC2 instance. // // For more information about enabling back-end instance authentication, see -// Configure Back-end Instance Authentication (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html#configure_backendauth_clt) +// Configure Back-end Instance Authentication (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html#configure_backendauth_clt) // in the Classic Load Balancers Guide. For more information about Proxy Protocol, -// see Configure Proxy Protocol Support (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html) +// see Configure Proxy Protocol Support (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2700,9 +2700,9 @@ func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPol // To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer. // // For more information about setting policies, see Update the SSL Negotiation -// Configuration (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/ssl-config-update.html), -// Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration), -// and Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) +// Configuration (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/ssl-config-update.html), +// Duration-Based Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration), +// and Application-Controlled Session Stickiness (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) // in the Classic Load Balancers Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2899,14 +2899,20 @@ func (s AddTagsOutput) GoString() string { return s.String() } -// This data type is reserved. +// Information about additional load balancer attributes. type AdditionalAttribute struct { _ struct{} `type:"structure"` - // This parameter is reserved. + // The name of the attribute. + // + // The following attribute is supported. + // + // * elb.http.desyncmitigationmode - Determines how the load balancer handles + // requests that might pose a security risk to your application. The possible + // values are monitor, defensive, and strictest. The default is defensive. Key *string `type:"string"` - // This parameter is reserved. + // This value of the attribute. Value *string `type:"string"` } @@ -3509,7 +3515,7 @@ type CreateLoadBalancerInput struct { // The listeners. // - // For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) + // For more information, see Listeners for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) // in the Classic Load Balancers Guide. // // Listeners is a required field @@ -3529,7 +3535,7 @@ type CreateLoadBalancerInput struct { // By default, Elastic Load Balancing creates an Internet-facing load balancer // with a DNS name that resolves to public IP addresses. For more information // about Internet-facing and Internal load balancers, see Load Balancer Scheme - // (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#load-balancer-scheme) + // (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#load-balancer-scheme) // in the Elastic Load Balancing User Guide. // // Specify internal to create a load balancer with a DNS name that resolves @@ -3546,7 +3552,7 @@ type CreateLoadBalancerInput struct { // A list of tags to assign to the load balancer. // // For more information about tagging your load balancer, see Tag Your Classic - // Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) + // Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) // in the Classic Load Balancers Guide. Tags []*Tag `min:"1" type:"list"` } @@ -5166,7 +5172,7 @@ func (s *Limit) SetName(v string) *Limit { // Information about a listener. // // For information about the protocols and the ports supported by Elastic Load -// Balancing, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) +// Balancing, see Listeners for Your Classic Load Balancer (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) // in the Classic Load Balancers Guide. type Listener struct { _ struct{} `type:"structure"` @@ -5179,8 +5185,9 @@ type Listener struct { // The protocol to use for routing traffic to instances: HTTP, HTTPS, TCP, or // SSL. // - // If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol must - // be at the same protocol. + // If the front-end protocol is TCP or SSL, the back-end protocol must be TCP + // or SSL. If the front-end protocol is HTTP or HTTPS, the back-end protocol + // must be HTTP or HTTPS. // // If there is another listener with the same InstancePort whose InstanceProtocol // is secure, (HTTPS or SSL), the listener's InstanceProtocol must also be secure. @@ -5308,17 +5315,17 @@ type LoadBalancerAttributes struct { // If enabled, the load balancer captures detailed information of all requests // and delivers the information to the Amazon S3 bucket that you specify. // - // For more information, see Enable Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) + // For more information, see Enable Access Logs (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) // in the Classic Load Balancers Guide. AccessLog *AccessLog `type:"structure"` - // This parameter is reserved. + // Any additional attributes. AdditionalAttributes []*AdditionalAttribute `type:"list"` // If enabled, the load balancer allows existing requests to complete before // the load balancer shifts traffic away from a deregistered or unhealthy instance. // - // For more information, see Configure Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) + // For more information, see Configure Connection Draining (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) // in the Classic Load Balancers Guide. ConnectionDraining *ConnectionDraining `type:"structure"` @@ -5327,14 +5334,14 @@ type LoadBalancerAttributes struct { // // By default, Elastic Load Balancing maintains a 60-second idle connection // timeout for both front-end and back-end connections of your load balancer. - // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) + // For more information, see Configure Idle Connection Timeout (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) // in the Classic Load Balancers Guide. ConnectionSettings *ConnectionSettings `type:"structure"` // If enabled, the load balancer routes the request traffic evenly across all // instances regardless of the Availability Zones. // - // For more information, see Configure Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) + // For more information, see Configure Cross-Zone Load Balancing (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) // in the Classic Load Balancers Guide. CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` } @@ -5421,7 +5428,7 @@ type LoadBalancerDescription struct { // The DNS name of the load balancer. // - // For more information, see Configure a Custom Domain Name (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/using-domain-names-with-elb.html) + // For more information, see Configure a Custom Domain Name (https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/using-domain-names-with-elb.html) // in the Classic Load Balancers Guide. CanonicalHostedZoneName *string `type:"string"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go index 0b93ed474..5fa8e5154 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/doc.go @@ -15,7 +15,7 @@ // Elastic Load Balancing supports three types of load balancers: Application // Load Balancers, Network Load Balancers, and Classic Load Balancers. You can // select a load balancer based on your application needs. For more information, -// see the Elastic Load Balancing User Guide (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). +// see the Elastic Load Balancing User Guide (https://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/). // // This reference covers the 2012-06-01 API, which supports Classic Load Balancers. // The 2015-12-01 API supports Application Load Balancers and Network Load Balancers. diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 81659d201..7ed1a6098 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go index c1903d514..89eed6b4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/api.go @@ -155,8 +155,8 @@ func (c *ELBV2) AddTagsRequest(input *AddTagsInput) (req *request.Request, outpu // AddTags API operation for Elastic Load Balancing. // // Adds the specified tags to the specified Elastic Load Balancing resource. -// You can tag your Application Load Balancers, Network Load Balancers, and -// your target groups. +// You can tag your Application Load Balancers, Network Load Balancers, target +// groups, listeners, and rules. // // Each tag consists of a key and an optional value. If a resource already has // a tag with the same key, AddTags updates its value. @@ -325,6 +325,12 @@ func (c *ELBV2) CreateListenerRequest(input *CreateListenerInput) (req *request. // across all listeners. If a target group is used by multiple actions for a // load balancer, it is counted as only one use. // +// * ErrCodeALPNPolicyNotSupportedException "ALPNPolicyNotFound" +// The specified ALPN policy is not supported. +// +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateListener func (c *ELBV2) CreateListener(input *CreateListenerInput) (*CreateListenerOutput, error) { req, out := c.CreateListenerRequest(input) @@ -530,6 +536,7 @@ func (c *ELBV2) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, // Creates a rule for the specified listener. The listener must be associated // with an Application Load Balancer. // +// Each rule consists of a priority, one or more actions, and one or more conditions. // Rules are evaluated in priority order, from the lowest value to the highest // value. When the conditions for a rule are met, its actions are performed. // If the conditions for no rules are met, the actions for the default rule @@ -593,6 +600,9 @@ func (c *ELBV2) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, // across all listeners. If a target group is used by multiple actions for a // load balancer, it is counted as only one use. // +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateRule func (c *ELBV2) CreateRule(input *CreateRuleInput) (*CreateRuleOutput, error) { req, out := c.CreateRuleRequest(input) @@ -697,6 +707,9 @@ func (c *ELBV2) CreateTargetGroupRequest(input *CreateTargetGroupInput) (req *re // * ErrCodeInvalidConfigurationRequestException "InvalidConfigurationRequest" // The requested configuration is not valid. // +// * ErrCodeTooManyTagsException "TooManyTags" +// You've reached the limit on the number of tags per load balancer. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/CreateTargetGroup func (c *ELBV2) CreateTargetGroup(input *CreateTargetGroupInput) (*CreateTargetGroupOutput, error) { req, out := c.CreateTargetGroupRequest(input) @@ -944,6 +957,8 @@ func (c *ELBV2) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, // // Deletes the specified rule. // +// You can't delete the default rule. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1901,9 +1916,9 @@ func (c *ELBV2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Requ // DescribeTags API operation for Elastic Load Balancing. // -// Describes the tags for the specified resources. You can describe the tags -// for one or more Application Load Balancers, Network Load Balancers, and target -// groups. +// Describes the tags for the specified Elastic Load Balancing resources. You +// can describe the tags for one or more Application Load Balancers, Network +// Load Balancers, target groups, listeners, or rules. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2378,6 +2393,9 @@ func (c *ELBV2) ModifyListenerRequest(input *ModifyListenerInput) (req *request. // across all listeners. If a target group is used by multiple actions for a // load balancer, it is counted as only one use. // +// * ErrCodeALPNPolicyNotSupportedException "ALPNPolicyNotFound" +// The specified ALPN policy is not supported. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/elasticloadbalancingv2-2015-12-01/ModifyListener func (c *ELBV2) ModifyListener(input *ModifyListenerInput) (*ModifyListenerOutput, error) { req, out := c.ModifyListenerRequest(input) @@ -3013,7 +3031,9 @@ func (c *ELBV2) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, // RemoveTags API operation for Elastic Load Balancing. // -// Removes the specified tags from the specified Elastic Load Balancing resource. +// Removes the specified tags from the specified Elastic Load Balancing resources. +// You can remove the tags for one or more Application Load Balancers, Network +// Load Balancers, target groups, listeners, or rules. // // To list the current tags for your resources, use DescribeTags. // @@ -3426,6 +3446,9 @@ func (c *ELBV2) SetSubnetsWithContext(ctx aws.Context, input *SetSubnetsInput, o } // Information about an action. +// +// Each rule must include exactly one of the following types of actions: forward, +// fixed-response, or redirect, and it must be the last action to be performed. type Action struct { _ struct{} `type:"structure"` @@ -3449,9 +3472,7 @@ type Action struct { ForwardConfig *ForwardActionConfig `type:"structure"` // The order for the action. This value is required for rules with multiple - // actions. The action with the lowest value for order is performed first. The - // last action to be performed must be one of the following types of actions: - // a forward, fixed-response, or redirect. + // actions. The action with the lowest value for order is performed first. Order *int64 `min:"1" type:"integer"` // [Application Load Balancer] Information for creating a redirect action. Specify @@ -4036,6 +4057,9 @@ type AvailabilityZone struct { // a private IP address from the IPv4 range of the subnet. LoadBalancerAddresses []*LoadBalancerAddress `type:"list"` + // [Application Load Balancers on Outposts] The ID of the Outpost. + OutpostId *string `type:"string"` + // The ID of the subnet. You can specify one subnet per Availability Zone. SubnetId *string `type:"string"` @@ -4059,6 +4083,12 @@ func (s *AvailabilityZone) SetLoadBalancerAddresses(v []*LoadBalancerAddress) *A return s } +// SetOutpostId sets the OutpostId field's value. +func (s *AvailabilityZone) SetOutpostId(v string) *AvailabilityZone { + s.OutpostId = &v + return s +} + // SetSubnetId sets the SubnetId field's value. func (s *AvailabilityZone) SetSubnetId(v string) *AvailabilityZone { s.SubnetId = &v @@ -4143,6 +4173,23 @@ func (s *Cipher) SetPriority(v int64) *Cipher { type CreateListenerInput struct { _ struct{} `type:"structure"` + // [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) + // policy. You can specify one policy name. The following are the possible values: + // + // * HTTP1Only + // + // * HTTP2Only + // + // * HTTP2Optional + // + // * HTTP2Preferred + // + // * None + // + // For more information, see ALPN Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies) + // in the Network Load Balancers Guide. + AlpnPolicy []*string `type:"list"` + // [HTTPS and TLS listeners] The default certificate for the listener. You must // provide exactly one certificate. Set CertificateArn to the certificate ARN // but do not set IsDefault. @@ -4215,6 +4262,9 @@ type CreateListenerInput struct { // in the Application Load Balancers Guide and Security Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#describe-ssl-policies) // in the Network Load Balancers Guide. SslPolicy *string `type:"string"` + + // The tags to assign to the listener. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -4245,6 +4295,9 @@ func (s *CreateListenerInput) Validate() error { if s.Protocol == nil { invalidParams.Add(request.NewErrParamRequired("Protocol")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.DefaultActions != nil { for i, v := range s.DefaultActions { if v == nil { @@ -4255,6 +4308,16 @@ func (s *CreateListenerInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4262,6 +4325,12 @@ func (s *CreateListenerInput) Validate() error { return nil } +// SetAlpnPolicy sets the AlpnPolicy field's value. +func (s *CreateListenerInput) SetAlpnPolicy(v []*string) *CreateListenerInput { + s.AlpnPolicy = v + return s +} + // SetCertificates sets the Certificates field's value. func (s *CreateListenerInput) SetCertificates(v []*Certificate) *CreateListenerInput { s.Certificates = v @@ -4298,6 +4367,12 @@ func (s *CreateListenerInput) SetSslPolicy(v string) *CreateListenerInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateListenerInput) SetTags(v []*Tag) *CreateListenerInput { + s.Tags = v + return s +} + type CreateListenerOutput struct { _ struct{} `type:"structure"` @@ -4324,6 +4399,10 @@ func (s *CreateListenerOutput) SetListeners(v []*Listener) *CreateListenerOutput type CreateLoadBalancerInput struct { _ struct{} `type:"structure"` + // [Application Load Balancers on Outposts] The ID of the customer-owned address + // pool (CoIP pool). + CustomerOwnedIpv4Pool *string `type:"string"` + // [Application Load Balancers] The type of IP addresses used by the subnets // for your load balancer. The possible values are ipv4 (for IPv4 addresses) // and dualstack (for IPv4 and IPv6 addresses). Internal load balancers must @@ -4362,6 +4441,11 @@ type CreateLoadBalancerInput struct { // [Application Load Balancers] You must specify subnets from at least two Availability // Zones. You cannot specify Elastic IP addresses for your subnets. // + // [Application Load Balancers on Outposts] You must specify one Outpost subnet. + // + // [Application Load Balancers on Local Zones] You can specify subnets from + // one or more Local Zones. + // // [Network Load Balancers] You can specify subnets from one or more Availability // Zones. You can specify one Elastic IP address per subnet if you need static // IP addresses for your internet-facing load balancer. For internal load balancers, @@ -4375,11 +4459,16 @@ type CreateLoadBalancerInput struct { // [Application Load Balancers] You must specify subnets from at least two Availability // Zones. // + // [Application Load Balancers on Outposts] You must specify one Outpost subnet. + // + // [Application Load Balancers on Local Zones] You can specify subnets from + // one or more Local Zones. + // // [Network Load Balancers] You can specify subnets from one or more Availability // Zones. Subnets []*string `type:"list"` - // One or more tags to assign to the load balancer. + // The tags to assign to the load balancer. Tags []*Tag `min:"1" type:"list"` // The type of load balancer. The default is application. @@ -4422,6 +4511,12 @@ func (s *CreateLoadBalancerInput) Validate() error { return nil } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *CreateLoadBalancerInput) SetCustomerOwnedIpv4Pool(v string) *CreateLoadBalancerInput { + s.CustomerOwnedIpv4Pool = &v + return s +} + // SetIpAddressType sets the IpAddressType field's value. func (s *CreateLoadBalancerInput) SetIpAddressType(v string) *CreateLoadBalancerInput { s.IpAddressType = &v @@ -4520,9 +4615,10 @@ type CreateRuleInput struct { // Actions is a required field Actions []*Action `type:"list" required:"true"` - // The conditions. Each rule can include zero or one of the following conditions: - // http-request-method, host-header, path-pattern, and source-ip, and zero or - // more of the following conditions: http-header and query-string. + // The conditions. Each rule can optionally include up to one of each of the + // following conditions: http-request-method, host-header, path-pattern, and + // source-ip. Each rule can also optionally include one or more of each of the + // following conditions: http-header and query-string. // // Conditions is a required field Conditions []*RuleCondition `type:"list" required:"true"` @@ -4536,6 +4632,9 @@ type CreateRuleInput struct { // // Priority is a required field Priority *int64 `min:"1" type:"integer" required:"true"` + + // The tags to assign to the rule. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -4566,6 +4665,9 @@ func (s *CreateRuleInput) Validate() error { if s.Priority != nil && *s.Priority < 1 { invalidParams.Add(request.NewErrParamMinValue("Priority", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Actions != nil { for i, v := range s.Actions { if v == nil { @@ -4576,6 +4678,16 @@ func (s *CreateRuleInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4607,6 +4719,12 @@ func (s *CreateRuleInput) SetPriority(v int64) *CreateRuleInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateRuleInput) SetTags(v []*Tag) *CreateRuleInput { + s.Tags = v + return s +} + type CreateRuleOutput struct { _ struct{} `type:"structure"` @@ -4700,13 +4818,15 @@ type CreateTargetGroupInput struct { // function, this parameter does not apply. Protocol *string `type:"string" enum:"ProtocolEnum"` + // The tags to assign to the target group. + Tags []*Tag `min:"1" type:"list"` + // The type of target that you must specify when registering targets with this // target group. You can't specify targets for a target group using more than // one target type. // // * instance - Targets are specified by instance ID. This is the default - // value. If the target group protocol is UDP or TCP_UDP, the target type - // must be instance. + // value. // // * ip - Targets are specified by IP address. You can specify IP addresses // from the subnets of the virtual private cloud (VPC) for the target group, @@ -4760,6 +4880,9 @@ func (s *CreateTargetGroupInput) Validate() error { if s.Port != nil && *s.Port < 1 { invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.UnhealthyThresholdCount != nil && *s.UnhealthyThresholdCount < 2 { invalidParams.Add(request.NewErrParamMinValue("UnhealthyThresholdCount", 2)) } @@ -4768,6 +4891,16 @@ func (s *CreateTargetGroupInput) Validate() error { invalidParams.AddNested("Matcher", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4841,6 +4974,12 @@ func (s *CreateTargetGroupInput) SetProtocol(v string) *CreateTargetGroupInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateTargetGroupInput) SetTags(v []*Tag) *CreateTargetGroupInput { + s.Tags = v + return s +} + // SetTargetType sets the TargetType field's value. func (s *CreateTargetGroupInput) SetTargetType(v string) *CreateTargetGroupInput { s.TargetType = &v @@ -6360,6 +6499,10 @@ func (s *Limit) SetName(v string) *Limit { type Listener struct { _ struct{} `type:"structure"` + // [TLS listener] The name of the Application-Layer Protocol Negotiation (ALPN) + // policy. + AlpnPolicy []*string `type:"list"` + // [HTTPS or TLS listener] The default certificate for the listener. Certificates []*Certificate `type:"list"` @@ -6393,6 +6536,12 @@ func (s Listener) GoString() string { return s.String() } +// SetAlpnPolicy sets the AlpnPolicy field's value. +func (s *Listener) SetAlpnPolicy(v []*string) *Listener { + s.AlpnPolicy = v + return s +} + // SetCertificates sets the Certificates field's value. func (s *Listener) SetCertificates(v []*Certificate) *Listener { s.Certificates = v @@ -6439,7 +6588,7 @@ func (s *Listener) SetSslPolicy(v string) *Listener { type LoadBalancer struct { _ struct{} `type:"structure"` - // The Availability Zones for the load balancer. + // The subnets for the load balancer. AvailabilityZones []*AvailabilityZone `type:"list"` // The ID of the Amazon Route 53 hosted zone associated with the load balancer. @@ -6448,6 +6597,10 @@ type LoadBalancer struct { // The date and time the load balancer was created. CreatedTime *time.Time `type:"timestamp"` + // [Application Load Balancers on Outposts] The ID of the customer-owned address + // pool. + CustomerOwnedIpv4Pool *string `type:"string"` + // The public DNS name of the load balancer. DNSName *string `type:"string"` @@ -6514,6 +6667,12 @@ func (s *LoadBalancer) SetCreatedTime(v time.Time) *LoadBalancer { return s } +// SetCustomerOwnedIpv4Pool sets the CustomerOwnedIpv4Pool field's value. +func (s *LoadBalancer) SetCustomerOwnedIpv4Pool(v string) *LoadBalancer { + s.CustomerOwnedIpv4Pool = &v + return s +} + // SetDNSName sets the DNSName field's value. func (s *LoadBalancer) SetDNSName(v string) *LoadBalancer { s.DNSName = &v @@ -6639,6 +6798,11 @@ type LoadBalancerAttribute struct { // * idle_timeout.timeout_seconds - The idle timeout value, in seconds. The // valid range is 1-4000 seconds. The default is 60 seconds. // + // * routing.http.desync_mitigation_mode - Determines how the load balancer + // handles requests that might pose a security risk to your application. + // The possible values are monitor, defensive, and strictest. The default + // is defensive. + // // * routing.http.drop_invalid_header_fields.enabled - Indicates whether // HTTP headers with invalid header fields are removed by the load balancer // (true) or routed to targets (false). The default is false. @@ -6762,6 +6926,23 @@ func (s *Matcher) SetHttpCode(v string) *Matcher { type ModifyListenerInput struct { _ struct{} `type:"structure"` + // [TLS listeners] The name of the Application-Layer Protocol Negotiation (ALPN) + // policy. You can specify one policy name. The following are the possible values: + // + // * HTTP1Only + // + // * HTTP2Only + // + // * HTTP2Optional + // + // * HTTP2Preferred + // + // * None + // + // For more information, see ALPN Policies (https://docs.aws.amazon.com/elasticloadbalancing/latest/network/create-tls-listener.html#alpn-policies) + // in the Network Load Balancers Guide. + AlpnPolicy []*string `type:"list"` + // [HTTPS and TLS listeners] The default certificate for the listener. You must // provide exactly one certificate. Set CertificateArn to the certificate ARN // but do not set IsDefault. @@ -6866,6 +7047,12 @@ func (s *ModifyListenerInput) Validate() error { return nil } +// SetAlpnPolicy sets the AlpnPolicy field's value. +func (s *ModifyListenerInput) SetAlpnPolicy(v []*string) *ModifyListenerInput { + s.AlpnPolicy = v + return s +} + // SetCertificates sets the Certificates field's value. func (s *ModifyListenerInput) SetCertificates(v []*Certificate) *ModifyListenerInput { s.Certificates = v @@ -7224,7 +7411,9 @@ type ModifyTargetGroupInput struct { HealthyThresholdCount *int64 `min:"2" type:"integer"` // [HTTP/HTTPS health checks] The HTTP codes to use when checking for a successful - // response from a target. + // response from a target. The possible values are from 200 to 499. You can + // specify multiple values (for example, "200,202") or a range of values (for + // example, "200-299"). The default is 200. // // With Network Load Balancers, you can't modify this setting. Matcher *Matcher `type:"structure"` @@ -7863,6 +8052,11 @@ func (s *Rule) SetRuleArn(v string) *Rule { } // Information about a condition for a rule. +// +// Each rule can optionally include up to one of each of the following conditions: +// http-request-method, host-header, path-pattern, and source-ip. Each rule +// can also optionally include one or more of each of the following conditions: +// http-header and query-string. type RuleCondition struct { _ struct{} `type:"structure"` @@ -7899,13 +8093,14 @@ type RuleCondition struct { // Information for a source IP condition. Specify only when Field is source-ip. SourceIpConfig *SourceIpConditionConfig `type:"structure"` - // The condition value. You can use Values if the rule contains only host-header - // and path-pattern conditions. Otherwise, you can use HostHeaderConfig for - // host-header conditions and PathPatternConfig for path-pattern conditions. + // The condition value. Specify only when Field is host-header or path-pattern. + // Alternatively, to specify multiple host names or multiple path patterns, + // use HostHeaderConfig or PathPatternConfig. // - // If Field is host-header, you can specify a single host name (for example, - // my.example.com). A host name is case insensitive, can be up to 128 characters - // in length, and can contain any of the following characters. + // If Field is host-header and you are not using HostHeaderConfig, you can specify + // a single host name (for example, my.example.com) in Values. A host name is + // case insensitive, can be up to 128 characters in length, and can contain + // any of the following characters. // // * A-Z, a-z, 0-9 // @@ -7915,9 +8110,10 @@ type RuleCondition struct { // // * ? (matches exactly 1 character) // - // If Field is path-pattern, you can specify a single path pattern (for example, - // /img/*). A path pattern is case-sensitive, can be up to 128 characters in - // length, and can contain any of the following characters. + // If Field is path-pattern and you are not using PathPatternConfig, you can + // specify a single path pattern (for example, /img/*) in Values. A path pattern + // is case-sensitive, can be up to 128 characters in length, and can contain + // any of the following characters. // // * A-Z, a-z, 0-9 // @@ -8329,7 +8525,7 @@ func (s *SetSubnetsInput) SetSubnets(v []*string) *SetSubnetsInput { type SetSubnetsOutput struct { _ struct{} `type:"structure"` - // Information about the subnet and Availability Zone. + // Information about the subnets. AvailabilityZones []*AvailabilityZone `type:"list"` } @@ -8820,18 +9016,18 @@ type TargetGroupAttribute struct { // lb_cookie for Application Load Balancers or source_ip for Network Load // Balancers. // - // The following attributes are supported by Application Load Balancers if the - // target is not a Lambda function: + // The following attributes are supported only if the load balancer is an Application + // Load Balancer and the target is an instance or an IP address: // // * load_balancing.algorithm.type - The load balancing algorithm determines // how the load balancer selects targets when routing requests. The value // is round_robin or least_outstanding_requests. The default is round_robin. // // * slow_start.duration_seconds - The time period, in seconds, during which - // a newly registered target receives a linearly increasing share of the - // traffic to the target group. After this time period ends, the target receives - // its full share of traffic. The range is 30-900 seconds (15 minutes). Slow - // start mode is disabled by default. + // a newly registered target receives an increasing share of the traffic + // to the target group. After this time period ends, the target receives + // its full share of traffic. The range is 30-900 seconds (15 minutes). The + // default is 0 seconds (disabled). // // * stickiness.lb_cookie.duration_seconds - The time period, in seconds, // during which requests from a client should be routed to the same target. @@ -8839,14 +9035,15 @@ type TargetGroupAttribute struct { // considered stale. The range is 1 second to 1 week (604800 seconds). The // default value is 1 day (86400 seconds). // - // The following attribute is supported only if the target is a Lambda function. + // The following attribute is supported only if the load balancer is an Application + // Load Balancer and the target is a Lambda function: // // * lambda.multi_value_headers.enabled - Indicates whether the request and - // response headers exchanged between the load balancer and the Lambda function - // include arrays of values or strings. The value is true or false. The default - // is false. If the value is false and the request contains a duplicate header - // field name or query parameter key, the load balancer uses the last value - // sent by the client. + // response headers that are exchanged between the load balancer and the + // Lambda function include arrays of values or strings. The value is true + // or false. The default is false. If the value is false and the request + // contains a duplicate header field name or query parameter key, the load + // balancer uses the last value sent by the client. // // The following attribute is supported only by Network Load Balancers: // @@ -9105,6 +9302,17 @@ const ( ActionTypeEnumFixedResponse = "fixed-response" ) +// ActionTypeEnum_Values returns all elements of the ActionTypeEnum enum +func ActionTypeEnum_Values() []string { + return []string{ + ActionTypeEnumForward, + ActionTypeEnumAuthenticateOidc, + ActionTypeEnumAuthenticateCognito, + ActionTypeEnumRedirect, + ActionTypeEnumFixedResponse, + } +} + const ( // AuthenticateCognitoActionConditionalBehaviorEnumDeny is a AuthenticateCognitoActionConditionalBehaviorEnum enum value AuthenticateCognitoActionConditionalBehaviorEnumDeny = "deny" @@ -9116,6 +9324,15 @@ const ( AuthenticateCognitoActionConditionalBehaviorEnumAuthenticate = "authenticate" ) +// AuthenticateCognitoActionConditionalBehaviorEnum_Values returns all elements of the AuthenticateCognitoActionConditionalBehaviorEnum enum +func AuthenticateCognitoActionConditionalBehaviorEnum_Values() []string { + return []string{ + AuthenticateCognitoActionConditionalBehaviorEnumDeny, + AuthenticateCognitoActionConditionalBehaviorEnumAllow, + AuthenticateCognitoActionConditionalBehaviorEnumAuthenticate, + } +} + const ( // AuthenticateOidcActionConditionalBehaviorEnumDeny is a AuthenticateOidcActionConditionalBehaviorEnum enum value AuthenticateOidcActionConditionalBehaviorEnumDeny = "deny" @@ -9127,6 +9344,15 @@ const ( AuthenticateOidcActionConditionalBehaviorEnumAuthenticate = "authenticate" ) +// AuthenticateOidcActionConditionalBehaviorEnum_Values returns all elements of the AuthenticateOidcActionConditionalBehaviorEnum enum +func AuthenticateOidcActionConditionalBehaviorEnum_Values() []string { + return []string{ + AuthenticateOidcActionConditionalBehaviorEnumDeny, + AuthenticateOidcActionConditionalBehaviorEnumAllow, + AuthenticateOidcActionConditionalBehaviorEnumAuthenticate, + } +} + const ( // IpAddressTypeIpv4 is a IpAddressType enum value IpAddressTypeIpv4 = "ipv4" @@ -9135,6 +9361,14 @@ const ( IpAddressTypeDualstack = "dualstack" ) +// IpAddressType_Values returns all elements of the IpAddressType enum +func IpAddressType_Values() []string { + return []string{ + IpAddressTypeIpv4, + IpAddressTypeDualstack, + } +} + const ( // LoadBalancerSchemeEnumInternetFacing is a LoadBalancerSchemeEnum enum value LoadBalancerSchemeEnumInternetFacing = "internet-facing" @@ -9143,6 +9377,14 @@ const ( LoadBalancerSchemeEnumInternal = "internal" ) +// LoadBalancerSchemeEnum_Values returns all elements of the LoadBalancerSchemeEnum enum +func LoadBalancerSchemeEnum_Values() []string { + return []string{ + LoadBalancerSchemeEnumInternetFacing, + LoadBalancerSchemeEnumInternal, + } +} + const ( // LoadBalancerStateEnumActive is a LoadBalancerStateEnum enum value LoadBalancerStateEnumActive = "active" @@ -9157,6 +9399,16 @@ const ( LoadBalancerStateEnumFailed = "failed" ) +// LoadBalancerStateEnum_Values returns all elements of the LoadBalancerStateEnum enum +func LoadBalancerStateEnum_Values() []string { + return []string{ + LoadBalancerStateEnumActive, + LoadBalancerStateEnumProvisioning, + LoadBalancerStateEnumActiveImpaired, + LoadBalancerStateEnumFailed, + } +} + const ( // LoadBalancerTypeEnumApplication is a LoadBalancerTypeEnum enum value LoadBalancerTypeEnumApplication = "application" @@ -9165,6 +9417,14 @@ const ( LoadBalancerTypeEnumNetwork = "network" ) +// LoadBalancerTypeEnum_Values returns all elements of the LoadBalancerTypeEnum enum +func LoadBalancerTypeEnum_Values() []string { + return []string{ + LoadBalancerTypeEnumApplication, + LoadBalancerTypeEnumNetwork, + } +} + const ( // ProtocolEnumHttp is a ProtocolEnum enum value ProtocolEnumHttp = "HTTP" @@ -9185,6 +9445,18 @@ const ( ProtocolEnumTcpUdp = "TCP_UDP" ) +// ProtocolEnum_Values returns all elements of the ProtocolEnum enum +func ProtocolEnum_Values() []string { + return []string{ + ProtocolEnumHttp, + ProtocolEnumHttps, + ProtocolEnumTcp, + ProtocolEnumTls, + ProtocolEnumUdp, + ProtocolEnumTcpUdp, + } +} + const ( // RedirectActionStatusCodeEnumHttp301 is a RedirectActionStatusCodeEnum enum value RedirectActionStatusCodeEnumHttp301 = "HTTP_301" @@ -9193,6 +9465,14 @@ const ( RedirectActionStatusCodeEnumHttp302 = "HTTP_302" ) +// RedirectActionStatusCodeEnum_Values returns all elements of the RedirectActionStatusCodeEnum enum +func RedirectActionStatusCodeEnum_Values() []string { + return []string{ + RedirectActionStatusCodeEnumHttp301, + RedirectActionStatusCodeEnumHttp302, + } +} + const ( // TargetHealthReasonEnumElbRegistrationInProgress is a TargetHealthReasonEnum enum value TargetHealthReasonEnumElbRegistrationInProgress = "Elb.RegistrationInProgress" @@ -9231,6 +9511,24 @@ const ( TargetHealthReasonEnumElbInternalError = "Elb.InternalError" ) +// TargetHealthReasonEnum_Values returns all elements of the TargetHealthReasonEnum enum +func TargetHealthReasonEnum_Values() []string { + return []string{ + TargetHealthReasonEnumElbRegistrationInProgress, + TargetHealthReasonEnumElbInitialHealthChecking, + TargetHealthReasonEnumTargetResponseCodeMismatch, + TargetHealthReasonEnumTargetTimeout, + TargetHealthReasonEnumTargetFailedHealthChecks, + TargetHealthReasonEnumTargetNotRegistered, + TargetHealthReasonEnumTargetNotInUse, + TargetHealthReasonEnumTargetDeregistrationInProgress, + TargetHealthReasonEnumTargetInvalidState, + TargetHealthReasonEnumTargetIpUnusable, + TargetHealthReasonEnumTargetHealthCheckDisabled, + TargetHealthReasonEnumElbInternalError, + } +} + const ( // TargetHealthStateEnumInitial is a TargetHealthStateEnum enum value TargetHealthStateEnumInitial = "initial" @@ -9251,6 +9549,18 @@ const ( TargetHealthStateEnumUnavailable = "unavailable" ) +// TargetHealthStateEnum_Values returns all elements of the TargetHealthStateEnum enum +func TargetHealthStateEnum_Values() []string { + return []string{ + TargetHealthStateEnumInitial, + TargetHealthStateEnumHealthy, + TargetHealthStateEnumUnhealthy, + TargetHealthStateEnumUnused, + TargetHealthStateEnumDraining, + TargetHealthStateEnumUnavailable, + } +} + const ( // TargetTypeEnumInstance is a TargetTypeEnum enum value TargetTypeEnumInstance = "instance" @@ -9261,3 +9571,12 @@ const ( // TargetTypeEnumLambda is a TargetTypeEnum enum value TargetTypeEnumLambda = "lambda" ) + +// TargetTypeEnum_Values returns all elements of the TargetTypeEnum enum +func TargetTypeEnum_Values() []string { + return []string{ + TargetTypeEnumInstance, + TargetTypeEnumIp, + TargetTypeEnumLambda, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go index fa10830ff..3a9ab86f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/errors.go @@ -4,6 +4,12 @@ package elbv2 const ( + // ErrCodeALPNPolicyNotSupportedException for service response error code + // "ALPNPolicyNotFound". + // + // The specified ALPN policy is not supported. + ErrCodeALPNPolicyNotSupportedException = "ALPNPolicyNotFound" + // ErrCodeAllocationIdNotFoundException for service response error code // "AllocationIdNotFound". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go index 63f0fd696..1c869cf04 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go index f0de166ef..89ce40a41 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -809,6 +809,89 @@ func (c *EMR) DescribeJobFlowsWithContext(ctx aws.Context, input *DescribeJobFlo return out, req.Send() } +const opDescribeNotebookExecution = "DescribeNotebookExecution" + +// DescribeNotebookExecutionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNotebookExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeNotebookExecution for more information on using the DescribeNotebookExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeNotebookExecutionRequest method. +// req, resp := client.DescribeNotebookExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/DescribeNotebookExecution +func (c *EMR) DescribeNotebookExecutionRequest(input *DescribeNotebookExecutionInput) (req *request.Request, output *DescribeNotebookExecutionOutput) { + op := &request.Operation{ + Name: opDescribeNotebookExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeNotebookExecutionInput{} + } + + output = &DescribeNotebookExecutionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeNotebookExecution API operation for Amazon Elastic MapReduce. +// +// Provides details of a notebook execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation DescribeNotebookExecution for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// Indicates that an error occurred while processing the request and that the +// request was not completed. +// +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/DescribeNotebookExecution +func (c *EMR) DescribeNotebookExecution(input *DescribeNotebookExecutionInput) (*DescribeNotebookExecutionOutput, error) { + req, out := c.DescribeNotebookExecutionRequest(input) + return out, req.Send() +} + +// DescribeNotebookExecutionWithContext is the same as DescribeNotebookExecution with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeNotebookExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) DescribeNotebookExecutionWithContext(ctx aws.Context, input *DescribeNotebookExecutionInput, opts ...request.Option) (*DescribeNotebookExecutionOutput, error) { + req, out := c.DescribeNotebookExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeSecurityConfiguration = "DescribeSecurityConfiguration" // DescribeSecurityConfigurationRequest generates a "aws/request.Request" representing the @@ -1059,6 +1142,80 @@ func (c *EMR) GetBlockPublicAccessConfigurationWithContext(ctx aws.Context, inpu return out, req.Send() } +const opGetManagedScalingPolicy = "GetManagedScalingPolicy" + +// GetManagedScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetManagedScalingPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetManagedScalingPolicy for more information on using the GetManagedScalingPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetManagedScalingPolicyRequest method. +// req, resp := client.GetManagedScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetManagedScalingPolicy +func (c *EMR) GetManagedScalingPolicyRequest(input *GetManagedScalingPolicyInput) (req *request.Request, output *GetManagedScalingPolicyOutput) { + op := &request.Operation{ + Name: opGetManagedScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetManagedScalingPolicyInput{} + } + + output = &GetManagedScalingPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetManagedScalingPolicy API operation for Amazon Elastic MapReduce. +// +// Fetches the attached managed scaling policy for an Amazon EMR cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation GetManagedScalingPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/GetManagedScalingPolicy +func (c *EMR) GetManagedScalingPolicy(input *GetManagedScalingPolicyInput) (*GetManagedScalingPolicyOutput, error) { + req, out := c.GetManagedScalingPolicyRequest(input) + return out, req.Send() +} + +// GetManagedScalingPolicyWithContext is the same as GetManagedScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetManagedScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) GetManagedScalingPolicyWithContext(ctx aws.Context, input *GetManagedScalingPolicyInput, opts ...request.Option) (*GetManagedScalingPolicyOutput, error) { + req, out := c.GetManagedScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBootstrapActions = "ListBootstrapActions" // ListBootstrapActionsRequest generates a "aws/request.Request" representing the @@ -1769,6 +1926,150 @@ func (c *EMR) ListInstancesPagesWithContext(ctx aws.Context, input *ListInstance return p.Err() } +const opListNotebookExecutions = "ListNotebookExecutions" + +// ListNotebookExecutionsRequest generates a "aws/request.Request" representing the +// client's request for the ListNotebookExecutions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListNotebookExecutions for more information on using the ListNotebookExecutions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListNotebookExecutionsRequest method. +// req, resp := client.ListNotebookExecutionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/ListNotebookExecutions +func (c *EMR) ListNotebookExecutionsRequest(input *ListNotebookExecutionsInput) (req *request.Request, output *ListNotebookExecutionsOutput) { + op := &request.Operation{ + Name: opListNotebookExecutions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListNotebookExecutionsInput{} + } + + output = &ListNotebookExecutionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListNotebookExecutions API operation for Amazon Elastic MapReduce. +// +// Provides summaries of all notebook executions. You can filter the list based +// on multiple criteria such as status, time range, and editor id. Returns a +// maximum of 50 notebook executions and a marker to track the paging of a longer +// notebook execution list across multiple ListNotebookExecution calls. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation ListNotebookExecutions for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// Indicates that an error occurred while processing the request and that the +// request was not completed. +// +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/ListNotebookExecutions +func (c *EMR) ListNotebookExecutions(input *ListNotebookExecutionsInput) (*ListNotebookExecutionsOutput, error) { + req, out := c.ListNotebookExecutionsRequest(input) + return out, req.Send() +} + +// ListNotebookExecutionsWithContext is the same as ListNotebookExecutions with the addition of +// the ability to pass a context and additional request options. +// +// See ListNotebookExecutions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) ListNotebookExecutionsWithContext(ctx aws.Context, input *ListNotebookExecutionsInput, opts ...request.Option) (*ListNotebookExecutionsOutput, error) { + req, out := c.ListNotebookExecutionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListNotebookExecutionsPages iterates over the pages of a ListNotebookExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNotebookExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNotebookExecutions operation. +// pageNum := 0 +// err := client.ListNotebookExecutionsPages(params, +// func(page *emr.ListNotebookExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EMR) ListNotebookExecutionsPages(input *ListNotebookExecutionsInput, fn func(*ListNotebookExecutionsOutput, bool) bool) error { + return c.ListNotebookExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListNotebookExecutionsPagesWithContext same as ListNotebookExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) ListNotebookExecutionsPagesWithContext(ctx aws.Context, input *ListNotebookExecutionsInput, fn func(*ListNotebookExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNotebookExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNotebookExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListNotebookExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSecurityConfigurations = "ListSecurityConfigurations" // ListSecurityConfigurationsRequest generates a "aws/request.Request" representing the @@ -2473,102 +2774,255 @@ func (c *EMR) PutBlockPublicAccessConfigurationWithContext(ctx aws.Context, inpu return out, req.Send() } -const opRemoveAutoScalingPolicy = "RemoveAutoScalingPolicy" +const opPutManagedScalingPolicy = "PutManagedScalingPolicy" -// RemoveAutoScalingPolicyRequest generates a "aws/request.Request" representing the -// client's request for the RemoveAutoScalingPolicy operation. The "output" return +// PutManagedScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutManagedScalingPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveAutoScalingPolicy for more information on using the RemoveAutoScalingPolicy +// See PutManagedScalingPolicy for more information on using the PutManagedScalingPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveAutoScalingPolicyRequest method. -// req, resp := client.RemoveAutoScalingPolicyRequest(params) +// // Example sending a request using the PutManagedScalingPolicyRequest method. +// req, resp := client.PutManagedScalingPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveAutoScalingPolicy -func (c *EMR) RemoveAutoScalingPolicyRequest(input *RemoveAutoScalingPolicyInput) (req *request.Request, output *RemoveAutoScalingPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutManagedScalingPolicy +func (c *EMR) PutManagedScalingPolicyRequest(input *PutManagedScalingPolicyInput) (req *request.Request, output *PutManagedScalingPolicyOutput) { op := &request.Operation{ - Name: opRemoveAutoScalingPolicy, + Name: opPutManagedScalingPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RemoveAutoScalingPolicyInput{} + input = &PutManagedScalingPolicyInput{} } - output = &RemoveAutoScalingPolicyOutput{} + output = &PutManagedScalingPolicyOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// RemoveAutoScalingPolicy API operation for Amazon Elastic MapReduce. +// PutManagedScalingPolicy API operation for Amazon Elastic MapReduce. // -// Removes an automatic scaling policy from a specified instance group within -// an EMR cluster. +// Creates or updates a managed scaling policy for an Amazon EMR cluster. The +// managed scaling policy defines the limits for resources, such as EC2 instances +// that can be added or terminated from a cluster. The policy only applies to +// the core and task nodes. The master node cannot be scaled after initial configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic MapReduce's -// API operation RemoveAutoScalingPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveAutoScalingPolicy -func (c *EMR) RemoveAutoScalingPolicy(input *RemoveAutoScalingPolicyInput) (*RemoveAutoScalingPolicyOutput, error) { - req, out := c.RemoveAutoScalingPolicyRequest(input) +// API operation PutManagedScalingPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/PutManagedScalingPolicy +func (c *EMR) PutManagedScalingPolicy(input *PutManagedScalingPolicyInput) (*PutManagedScalingPolicyOutput, error) { + req, out := c.PutManagedScalingPolicyRequest(input) return out, req.Send() } -// RemoveAutoScalingPolicyWithContext is the same as RemoveAutoScalingPolicy with the addition of +// PutManagedScalingPolicyWithContext is the same as PutManagedScalingPolicy with the addition of // the ability to pass a context and additional request options. // -// See RemoveAutoScalingPolicy for details on how to use this API operation. +// See PutManagedScalingPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EMR) RemoveAutoScalingPolicyWithContext(ctx aws.Context, input *RemoveAutoScalingPolicyInput, opts ...request.Option) (*RemoveAutoScalingPolicyOutput, error) { - req, out := c.RemoveAutoScalingPolicyRequest(input) +func (c *EMR) PutManagedScalingPolicyWithContext(ctx aws.Context, input *PutManagedScalingPolicyInput, opts ...request.Option) (*PutManagedScalingPolicyOutput, error) { + req, out := c.PutManagedScalingPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveTags = "RemoveTags" +const opRemoveAutoScalingPolicy = "RemoveAutoScalingPolicy" -// RemoveTagsRequest generates a "aws/request.Request" representing the -// client's request for the RemoveTags operation. The "output" return +// RemoveAutoScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the RemoveAutoScalingPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveTags for more information on using the RemoveTags +// See RemoveAutoScalingPolicy for more information on using the RemoveAutoScalingPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveTagsRequest method. -// req, resp := client.RemoveTagsRequest(params) -// +// // Example sending a request using the RemoveAutoScalingPolicyRequest method. +// req, resp := client.RemoveAutoScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveAutoScalingPolicy +func (c *EMR) RemoveAutoScalingPolicyRequest(input *RemoveAutoScalingPolicyInput) (req *request.Request, output *RemoveAutoScalingPolicyOutput) { + op := &request.Operation{ + Name: opRemoveAutoScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveAutoScalingPolicyInput{} + } + + output = &RemoveAutoScalingPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveAutoScalingPolicy API operation for Amazon Elastic MapReduce. +// +// Removes an automatic scaling policy from a specified instance group within +// an EMR cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation RemoveAutoScalingPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveAutoScalingPolicy +func (c *EMR) RemoveAutoScalingPolicy(input *RemoveAutoScalingPolicyInput) (*RemoveAutoScalingPolicyOutput, error) { + req, out := c.RemoveAutoScalingPolicyRequest(input) + return out, req.Send() +} + +// RemoveAutoScalingPolicyWithContext is the same as RemoveAutoScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveAutoScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) RemoveAutoScalingPolicyWithContext(ctx aws.Context, input *RemoveAutoScalingPolicyInput, opts ...request.Option) (*RemoveAutoScalingPolicyOutput, error) { + req, out := c.RemoveAutoScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveManagedScalingPolicy = "RemoveManagedScalingPolicy" + +// RemoveManagedScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the RemoveManagedScalingPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveManagedScalingPolicy for more information on using the RemoveManagedScalingPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveManagedScalingPolicyRequest method. +// req, resp := client.RemoveManagedScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveManagedScalingPolicy +func (c *EMR) RemoveManagedScalingPolicyRequest(input *RemoveManagedScalingPolicyInput) (req *request.Request, output *RemoveManagedScalingPolicyOutput) { + op := &request.Operation{ + Name: opRemoveManagedScalingPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveManagedScalingPolicyInput{} + } + + output = &RemoveManagedScalingPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveManagedScalingPolicy API operation for Amazon Elastic MapReduce. +// +// Removes a managed scaling policy from a specified EMR cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation RemoveManagedScalingPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/RemoveManagedScalingPolicy +func (c *EMR) RemoveManagedScalingPolicy(input *RemoveManagedScalingPolicyInput) (*RemoveManagedScalingPolicyOutput, error) { + req, out := c.RemoveManagedScalingPolicyRequest(input) + return out, req.Send() +} + +// RemoveManagedScalingPolicyWithContext is the same as RemoveManagedScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveManagedScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) RemoveManagedScalingPolicyWithContext(ctx aws.Context, input *RemoveManagedScalingPolicyInput, opts ...request.Option) (*RemoveManagedScalingPolicyOutput, error) { + req, out := c.RemoveManagedScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveTags = "RemoveTags" + +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveTags for more information on using the RemoveTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) @@ -2928,133 +3382,299 @@ func (c *EMR) SetVisibleToAllUsersWithContext(ctx aws.Context, input *SetVisible return out, req.Send() } -const opTerminateJobFlows = "TerminateJobFlows" +const opStartNotebookExecution = "StartNotebookExecution" -// TerminateJobFlowsRequest generates a "aws/request.Request" representing the -// client's request for the TerminateJobFlows operation. The "output" return +// StartNotebookExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StartNotebookExecution operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TerminateJobFlows for more information on using the TerminateJobFlows +// See StartNotebookExecution for more information on using the StartNotebookExecution // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TerminateJobFlowsRequest method. -// req, resp := client.TerminateJobFlowsRequest(params) +// // Example sending a request using the StartNotebookExecutionRequest method. +// req, resp := client.StartNotebookExecutionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/TerminateJobFlows -func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *request.Request, output *TerminateJobFlowsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/StartNotebookExecution +func (c *EMR) StartNotebookExecutionRequest(input *StartNotebookExecutionInput) (req *request.Request, output *StartNotebookExecutionOutput) { op := &request.Operation{ - Name: opTerminateJobFlows, + Name: opStartNotebookExecution, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TerminateJobFlowsInput{} + input = &StartNotebookExecutionInput{} } - output = &TerminateJobFlowsOutput{} + output = &StartNotebookExecutionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TerminateJobFlows API operation for Amazon Elastic MapReduce. -// -// TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow -// is shut down, any step not yet completed is canceled and the EC2 instances -// on which the cluster is running are stopped. Any log files not already saved -// are uploaded to Amazon S3 if a LogUri was specified when the cluster was -// created. +// StartNotebookExecution API operation for Amazon Elastic MapReduce. // -// The maximum number of clusters allowed is 10. The call to TerminateJobFlows -// is asynchronous. Depending on the configuration of the cluster, it may take -// up to 1-5 minutes for the cluster to completely terminate and release allocated -// resources, such as Amazon EC2 instances. +// Starts a notebook execution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Elastic MapReduce's -// API operation TerminateJobFlows for usage and error information. +// API operation StartNotebookExecution for usage and error information. // // Returned Error Types: -// * InternalServerError -// Indicates that an error occurred while processing the request and that the -// request was not completed. +// * InternalServerException +// This exception occurs when there is an internal failure in the EMR service. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/TerminateJobFlows -func (c *EMR) TerminateJobFlows(input *TerminateJobFlowsInput) (*TerminateJobFlowsOutput, error) { - req, out := c.TerminateJobFlowsRequest(input) +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/StartNotebookExecution +func (c *EMR) StartNotebookExecution(input *StartNotebookExecutionInput) (*StartNotebookExecutionOutput, error) { + req, out := c.StartNotebookExecutionRequest(input) return out, req.Send() } -// TerminateJobFlowsWithContext is the same as TerminateJobFlows with the addition of +// StartNotebookExecutionWithContext is the same as StartNotebookExecution with the addition of // the ability to pass a context and additional request options. // -// See TerminateJobFlows for details on how to use this API operation. +// See StartNotebookExecution for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *EMR) TerminateJobFlowsWithContext(ctx aws.Context, input *TerminateJobFlowsInput, opts ...request.Option) (*TerminateJobFlowsOutput, error) { - req, out := c.TerminateJobFlowsRequest(input) +func (c *EMR) StartNotebookExecutionWithContext(ctx aws.Context, input *StartNotebookExecutionInput, opts ...request.Option) (*StartNotebookExecutionOutput, error) { + req, out := c.StartNotebookExecutionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -type AddInstanceFleetInput struct { - _ struct{} `type:"structure"` +const opStopNotebookExecution = "StopNotebookExecution" - // The unique identifier of the cluster. - // - // ClusterId is a required field - ClusterId *string `type:"string" required:"true"` +// StopNotebookExecutionRequest generates a "aws/request.Request" representing the +// client's request for the StopNotebookExecution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopNotebookExecution for more information on using the StopNotebookExecution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopNotebookExecutionRequest method. +// req, resp := client.StopNotebookExecutionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/StopNotebookExecution +func (c *EMR) StopNotebookExecutionRequest(input *StopNotebookExecutionInput) (req *request.Request, output *StopNotebookExecutionOutput) { + op := &request.Operation{ + Name: opStopNotebookExecution, + HTTPMethod: "POST", + HTTPPath: "/", + } - // Specifies the configuration of the instance fleet. - // - // InstanceFleet is a required field - InstanceFleet *InstanceFleetConfig `type:"structure" required:"true"` + if input == nil { + input = &StopNotebookExecutionInput{} + } + + output = &StopNotebookExecutionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return } -// String returns the string representation -func (s AddInstanceFleetInput) String() string { - return awsutil.Prettify(s) +// StopNotebookExecution API operation for Amazon Elastic MapReduce. +// +// Stops a notebook execution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation StopNotebookExecution for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// Indicates that an error occurred while processing the request and that the +// request was not completed. +// +// * InvalidRequestException +// This exception occurs when there is something wrong with user input. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/StopNotebookExecution +func (c *EMR) StopNotebookExecution(input *StopNotebookExecutionInput) (*StopNotebookExecutionOutput, error) { + req, out := c.StopNotebookExecutionRequest(input) + return out, req.Send() } -// GoString returns the string representation -func (s AddInstanceFleetInput) GoString() string { - return s.String() +// StopNotebookExecutionWithContext is the same as StopNotebookExecution with the addition of +// the ability to pass a context and additional request options. +// +// See StopNotebookExecution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) StopNotebookExecutionWithContext(ctx aws.Context, input *StopNotebookExecutionInput, opts ...request.Option) (*StopNotebookExecutionOutput, error) { + req, out := c.StopNotebookExecutionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddInstanceFleetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddInstanceFleetInput"} - if s.ClusterId == nil { - invalidParams.Add(request.NewErrParamRequired("ClusterId")) - } - if s.InstanceFleet == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceFleet")) - } - if s.InstanceFleet != nil { - if err := s.InstanceFleet.Validate(); err != nil { - invalidParams.AddNested("InstanceFleet", err.(request.ErrInvalidParams)) - } +const opTerminateJobFlows = "TerminateJobFlows" + +// TerminateJobFlowsRequest generates a "aws/request.Request" representing the +// client's request for the TerminateJobFlows operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TerminateJobFlows for more information on using the TerminateJobFlows +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TerminateJobFlowsRequest method. +// req, resp := client.TerminateJobFlowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/TerminateJobFlows +func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *request.Request, output *TerminateJobFlowsOutput) { + op := &request.Operation{ + Name: opTerminateJobFlows, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TerminateJobFlowsInput{} + } + + output = &TerminateJobFlowsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TerminateJobFlows API operation for Amazon Elastic MapReduce. +// +// TerminateJobFlows shuts a list of clusters (job flows) down. When a job flow +// is shut down, any step not yet completed is canceled and the EC2 instances +// on which the cluster is running are stopped. Any log files not already saved +// are uploaded to Amazon S3 if a LogUri was specified when the cluster was +// created. +// +// The maximum number of clusters allowed is 10. The call to TerminateJobFlows +// is asynchronous. Depending on the configuration of the cluster, it may take +// up to 1-5 minutes for the cluster to completely terminate and release allocated +// resources, such as Amazon EC2 instances. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Elastic MapReduce's +// API operation TerminateJobFlows for usage and error information. +// +// Returned Error Types: +// * InternalServerError +// Indicates that an error occurred while processing the request and that the +// request was not completed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticmapreduce-2009-03-31/TerminateJobFlows +func (c *EMR) TerminateJobFlows(input *TerminateJobFlowsInput) (*TerminateJobFlowsOutput, error) { + req, out := c.TerminateJobFlowsRequest(input) + return out, req.Send() +} + +// TerminateJobFlowsWithContext is the same as TerminateJobFlows with the addition of +// the ability to pass a context and additional request options. +// +// See TerminateJobFlows for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *EMR) TerminateJobFlowsWithContext(ctx aws.Context, input *TerminateJobFlowsInput, opts ...request.Option) (*TerminateJobFlowsOutput, error) { + req, out := c.TerminateJobFlowsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AddInstanceFleetInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the cluster. + // + // ClusterId is a required field + ClusterId *string `type:"string" required:"true"` + + // Specifies the configuration of the instance fleet. + // + // InstanceFleet is a required field + InstanceFleet *InstanceFleetConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AddInstanceFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddInstanceFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddInstanceFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddInstanceFleetInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + if s.InstanceFleet == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceFleet")) + } + if s.InstanceFleet != nil { + if err := s.InstanceFleet.Validate(); err != nil { + invalidParams.AddNested("InstanceFleet", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -4146,6 +4766,11 @@ type Cluster struct { // in the EMR Management Guide. KerberosAttributes *KerberosAttributes `type:"structure"` + // The AWS KMS customer master key (CMK) used for encrypting log files. This + // attribute is only available with EMR version 5.30.0 and later, excluding + // EMR 6.0.0. + LogEncryptionKmsKeyId *string `type:"string"` + // The path to the Amazon S3 location where logs for this cluster are stored. LogUri *string `type:"string"` @@ -4167,6 +4792,9 @@ type Cluster struct { // The Amazon Resource Name (ARN) of the Outpost where the cluster is launched. OutpostArn *string `type:"string"` + // Placement group configured for an Amazon EMR cluster. + PlacementGroups []*PlacementGroupConfig `type:"list"` + // The Amazon EMR release label, which determines the version of open-source // application packages installed on the cluster. Release labels are in the // form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. @@ -4310,6 +4938,12 @@ func (s *Cluster) SetKerberosAttributes(v *KerberosAttributes) *Cluster { return s } +// SetLogEncryptionKmsKeyId sets the LogEncryptionKmsKeyId field's value. +func (s *Cluster) SetLogEncryptionKmsKeyId(v string) *Cluster { + s.LogEncryptionKmsKeyId = &v + return s +} + // SetLogUri sets the LogUri field's value. func (s *Cluster) SetLogUri(v string) *Cluster { s.LogUri = &v @@ -4340,6 +4974,12 @@ func (s *Cluster) SetOutpostArn(v string) *Cluster { return s } +// SetPlacementGroups sets the PlacementGroups field's value. +func (s *Cluster) SetPlacementGroups(v []*PlacementGroupConfig) *Cluster { + s.PlacementGroups = v + return s +} + // SetReleaseLabel sets the ReleaseLabel field's value. func (s *Cluster) SetReleaseLabel(v string) *Cluster { s.ReleaseLabel = &v @@ -4646,6 +5286,110 @@ func (s *Command) SetScriptPath(v string) *Command { return s } +// The EC2 unit limits for a managed scaling policy. The managed scaling activity +// of a cluster can not be above or below these limits. The limit only applies +// to the core and task nodes. The master node cannot be scaled after initial +// configuration. +type ComputeLimits struct { + _ struct{} `type:"structure"` + + // The upper boundary of EC2 units. It is measured through VCPU cores or instances + // for instance groups and measured through units for instance fleets. Managed + // scaling activities are not allowed beyond this boundary. The limit only applies + // to the core and task nodes. The master node cannot be scaled after initial + // configuration. + // + // MaximumCapacityUnits is a required field + MaximumCapacityUnits *int64 `type:"integer" required:"true"` + + // The upper boundary of EC2 units for core node type in a cluster. It is measured + // through VCPU cores or instances for instance groups and measured through + // units for instance fleets. The core units are not allowed to scale beyond + // this boundary. The parameter is used to split capacity allocation between + // core and task nodes. + MaximumCoreCapacityUnits *int64 `type:"integer"` + + // The upper boundary of On-Demand EC2 units. It is measured through VCPU cores + // or instances for instance groups and measured through units for instance + // fleets. The On-Demand units are not allowed to scale beyond this boundary. + // The parameter is used to split capacity allocation between On-Demand and + // Spot instances. + MaximumOnDemandCapacityUnits *int64 `type:"integer"` + + // The lower boundary of EC2 units. It is measured through VCPU cores or instances + // for instance groups and measured through units for instance fleets. Managed + // scaling activities are not allowed beyond this boundary. The limit only applies + // to the core and task nodes. The master node cannot be scaled after initial + // configuration. + // + // MinimumCapacityUnits is a required field + MinimumCapacityUnits *int64 `type:"integer" required:"true"` + + // The unit type used for specifying a managed scaling policy. + // + // UnitType is a required field + UnitType *string `type:"string" required:"true" enum:"ComputeLimitsUnitType"` +} + +// String returns the string representation +func (s ComputeLimits) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ComputeLimits) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ComputeLimits) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ComputeLimits"} + if s.MaximumCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumCapacityUnits")) + } + if s.MinimumCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("MinimumCapacityUnits")) + } + if s.UnitType == nil { + invalidParams.Add(request.NewErrParamRequired("UnitType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumCapacityUnits sets the MaximumCapacityUnits field's value. +func (s *ComputeLimits) SetMaximumCapacityUnits(v int64) *ComputeLimits { + s.MaximumCapacityUnits = &v + return s +} + +// SetMaximumCoreCapacityUnits sets the MaximumCoreCapacityUnits field's value. +func (s *ComputeLimits) SetMaximumCoreCapacityUnits(v int64) *ComputeLimits { + s.MaximumCoreCapacityUnits = &v + return s +} + +// SetMaximumOnDemandCapacityUnits sets the MaximumOnDemandCapacityUnits field's value. +func (s *ComputeLimits) SetMaximumOnDemandCapacityUnits(v int64) *ComputeLimits { + s.MaximumOnDemandCapacityUnits = &v + return s +} + +// SetMinimumCapacityUnits sets the MinimumCapacityUnits field's value. +func (s *ComputeLimits) SetMinimumCapacityUnits(v int64) *ComputeLimits { + s.MinimumCapacityUnits = &v + return s +} + +// SetUnitType sets the UnitType field's value. +func (s *ComputeLimits) SetUnitType(v string) *ComputeLimits { + s.UnitType = &v + return s +} + // // Amazon EMR releases 4.x or later. // @@ -4976,6 +5720,67 @@ func (s *DescribeJobFlowsOutput) SetJobFlows(v []*JobFlowDetail) *DescribeJobFlo return s } +type DescribeNotebookExecutionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the notebook execution. + // + // NotebookExecutionId is a required field + NotebookExecutionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeNotebookExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotebookExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeNotebookExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNotebookExecutionInput"} + if s.NotebookExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookExecutionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookExecutionId sets the NotebookExecutionId field's value. +func (s *DescribeNotebookExecutionInput) SetNotebookExecutionId(v string) *DescribeNotebookExecutionInput { + s.NotebookExecutionId = &v + return s +} + +type DescribeNotebookExecutionOutput struct { + _ struct{} `type:"structure"` + + // Properties of the notebook execution. + NotebookExecution *NotebookExecution `type:"structure"` +} + +// String returns the string representation +func (s DescribeNotebookExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeNotebookExecutionOutput) GoString() string { + return s.String() +} + +// SetNotebookExecution sets the NotebookExecution field's value. +func (s *DescribeNotebookExecutionOutput) SetNotebookExecution(v *NotebookExecution) *DescribeNotebookExecutionOutput { + s.NotebookExecution = v + return s +} + type DescribeSecurityConfigurationInput struct { _ struct{} `type:"structure"` @@ -5446,6 +6251,68 @@ func (s *Ec2InstanceAttributes) SetServiceAccessSecurityGroup(v string) *Ec2Inst return s } +// Specifies the execution engine (cluster) to run the notebook and perform +// the notebook execution, for example, an EMR cluster. +type ExecutionEngineConfig struct { + _ struct{} `type:"structure"` + + // The unique identifier of the execution engine. For an EMR cluster, this is + // the cluster ID. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // An optional unique ID of an EC2 security group to associate with the master + // instance of the EMR cluster for this notebook execution. For more information + // see Specifying EC2 Security Groups for EMR Notebooks (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html) + // in the EMR Management Guide. + MasterInstanceSecurityGroupId *string `type:"string"` + + // The type of execution engine. A value of EMR specifies an EMR cluster. + Type *string `type:"string" enum:"ExecutionEngineType"` +} + +// String returns the string representation +func (s ExecutionEngineConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExecutionEngineConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExecutionEngineConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecutionEngineConfig"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *ExecutionEngineConfig) SetId(v string) *ExecutionEngineConfig { + s.Id = &v + return s +} + +// SetMasterInstanceSecurityGroupId sets the MasterInstanceSecurityGroupId field's value. +func (s *ExecutionEngineConfig) SetMasterInstanceSecurityGroupId(v string) *ExecutionEngineConfig { + s.MasterInstanceSecurityGroupId = &v + return s +} + +// SetType sets the Type field's value. +func (s *ExecutionEngineConfig) SetType(v string) *ExecutionEngineConfig { + s.Type = &v + return s +} + // The details of the step failure. The service attempts to detect the root // cause for many common failures. type FailureDetails struct { @@ -5521,6 +6388,12 @@ type GetBlockPublicAccessConfigurationOutput struct { // and public access is allowed on this port. You can change this by updating // the block public access configuration to remove the exception. // + // For accounts that created clusters in a Region before November 25, 2019, + // block public access is disabled by default in that Region. To use this feature, + // you must manually enable and configure it. For accounts that did not create + // an EMR cluster in a Region before this date, block public access is enabled + // by default in that Region. + // // BlockPublicAccessConfiguration is a required field BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` @@ -5555,6 +6428,68 @@ func (s *GetBlockPublicAccessConfigurationOutput) SetBlockPublicAccessConfigurat return s } +type GetManagedScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies the ID of the cluster for which the managed scaling policy will + // be fetched. + // + // ClusterId is a required field + ClusterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetManagedScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetManagedScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetManagedScalingPolicyInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterId sets the ClusterId field's value. +func (s *GetManagedScalingPolicyInput) SetClusterId(v string) *GetManagedScalingPolicyInput { + s.ClusterId = &v + return s +} + +type GetManagedScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // Specifies the managed scaling policy that is attached to an Amazon EMR cluster. + ManagedScalingPolicy *ManagedScalingPolicy `type:"structure"` +} + +// String returns the string representation +func (s GetManagedScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetManagedScalingPolicyOutput) GoString() string { + return s.String() +} + +// SetManagedScalingPolicy sets the ManagedScalingPolicy field's value. +func (s *GetManagedScalingPolicyOutput) SetManagedScalingPolicy(v *ManagedScalingPolicy) *GetManagedScalingPolicyOutput { + s.ManagedScalingPolicy = v + return s +} + // A job flow step consisting of a JAR file whose main function will be executed. // The main function submits a job for Hadoop to execute and waits for the job // to finish or fail. @@ -6152,18 +7087,25 @@ func (s *InstanceFleetModifyConfig) SetTargetSpotCapacity(v int64) *InstanceFlee } // The launch specification for Spot instances in the fleet, which determines -// the defined duration and provisioning timeout behavior. +// the defined duration, provisioning timeout behavior, and allocation strategy. // // The instance fleet configuration is available only in Amazon EMR versions -// 4.8.0 and later, excluding 5.0.x versions. +// 4.8.0 and later, excluding 5.0.x versions. On-Demand and Spot instance allocation +// strategies are available in Amazon EMR version 5.12.1 and later. type InstanceFleetProvisioningSpecifications struct { _ struct{} `type:"structure"` - // The launch specification for Spot instances in the fleet, which determines - // the defined duration and provisioning timeout behavior. + // The launch specification for On-Demand instances in the instance fleet, which + // determines the allocation strategy. // - // SpotSpecification is a required field - SpotSpecification *SpotProvisioningSpecification `type:"structure" required:"true"` + // The instance fleet configuration is available only in Amazon EMR versions + // 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation + // strategy is available in Amazon EMR version 5.12.1 and later. + OnDemandSpecification *OnDemandProvisioningSpecification `type:"structure"` + + // The launch specification for Spot instances in the fleet, which determines + // the defined duration, provisioning timeout behavior, and allocation strategy. + SpotSpecification *SpotProvisioningSpecification `type:"structure"` } // String returns the string representation @@ -6179,8 +7121,10 @@ func (s InstanceFleetProvisioningSpecifications) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *InstanceFleetProvisioningSpecifications) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InstanceFleetProvisioningSpecifications"} - if s.SpotSpecification == nil { - invalidParams.Add(request.NewErrParamRequired("SpotSpecification")) + if s.OnDemandSpecification != nil { + if err := s.OnDemandSpecification.Validate(); err != nil { + invalidParams.AddNested("OnDemandSpecification", err.(request.ErrInvalidParams)) + } } if s.SpotSpecification != nil { if err := s.SpotSpecification.Validate(); err != nil { @@ -6194,6 +7138,12 @@ func (s *InstanceFleetProvisioningSpecifications) Validate() error { return nil } +// SetOnDemandSpecification sets the OnDemandSpecification field's value. +func (s *InstanceFleetProvisioningSpecifications) SetOnDemandSpecification(v *OnDemandProvisioningSpecification) *InstanceFleetProvisioningSpecifications { + s.OnDemandSpecification = v + return s +} + // SetSpotSpecification sets the SpotSpecification field's value. func (s *InstanceFleetProvisioningSpecifications) SetSpotSpecification(v *SpotProvisioningSpecification) *InstanceFleetProvisioningSpecifications { s.SpotSpecification = v @@ -7392,8 +8342,8 @@ func (s *InstanceTypeSpecification) SetWeightedCapacity(v int64) *InstanceTypeSp // Indicates that an error occurred while processing the request and that the // request was not completed. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7410,17 +8360,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7428,28 +8378,28 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // This exception occurs when there is an internal failure in the EMR service. type InternalServerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message associated with the exception. Message_ *string `locationName:"Message" type:"string"` @@ -7467,17 +8417,17 @@ func (s InternalServerException) GoString() string { func newErrorInternalServerException(v protocol.ResponseMetadata) error { return &InternalServerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerException) Code() string { +func (s *InternalServerException) Code() string { return "InternalServerException" } // Message returns the exception's message. -func (s InternalServerException) Message() string { +func (s *InternalServerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7485,28 +8435,28 @@ func (s InternalServerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerException) OrigErr() error { +func (s *InternalServerException) OrigErr() error { return nil } -func (s InternalServerException) Error() string { +func (s *InternalServerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID } // This exception occurs when there is something wrong with user input. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error code associated with the exception. ErrorCode *string `min:"1" type:"string"` @@ -7527,17 +8477,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7545,22 +8495,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // A description of a cluster (job flow). @@ -7599,6 +8549,11 @@ type JobFlowDetail struct { // of the job flow assume this role. JobFlowRole *string `type:"string"` + // The AWS KMS customer master key (CMK) used for encrypting log files. This + // attribute is only available with EMR version 5.30.0 and later, excluding + // EMR 6.0.0. + LogEncryptionKmsKeyId *string `type:"string"` + // The location in Amazon S3 where log files for the job are stored. LogUri *string `type:"string"` @@ -7696,6 +8651,12 @@ func (s *JobFlowDetail) SetJobFlowRole(v string) *JobFlowDetail { return s } +// SetLogEncryptionKmsKeyId sets the LogEncryptionKmsKeyId field's value. +func (s *JobFlowDetail) SetLogEncryptionKmsKeyId(v string) *JobFlowDetail { + s.LogEncryptionKmsKeyId = &v + return s +} + // SetLogUri sets the LogUri field's value. func (s *JobFlowDetail) SetLogUri(v string) *JobFlowDetail { s.LogUri = &v @@ -8774,6 +9735,126 @@ func (s *ListInstancesOutput) SetMarker(v string) *ListInstancesOutput { return s } +type ListNotebookExecutionsInput struct { + _ struct{} `type:"structure"` + + // The unique ID of the editor associated with the notebook execution. + EditorId *string `type:"string"` + + // The beginning of time range filter for listing notebook executions. The default + // is the timestamp of 30 days ago. + From *time.Time `type:"timestamp"` + + // The pagination token, returned by a previous ListNotebookExecutions call, + // that indicates the start of the list for this ListNotebookExecutions call. + Marker *string `type:"string"` + + // The status filter for listing notebook executions. + // + // * START_PENDING indicates that the cluster has received the execution + // request but execution has not begun. + // + // * STARTING indicates that the execution is starting on the cluster. + // + // * RUNNING indicates that the execution is being processed by the cluster. + // + // * FINISHING indicates that execution processing is in the final stages. + // + // * FINISHED indicates that the execution has completed without error. + // + // * FAILING indicates that the execution is failing and will not finish + // successfully. + // + // * FAILED indicates that the execution failed. + // + // * STOP_PENDING indicates that the cluster has received a StopNotebookExecution + // request and the stop is pending. + // + // * STOPPING indicates that the cluster is in the process of stopping the + // execution as a result of a StopNotebookExecution request. + // + // * STOPPED indicates that the execution stopped because of a StopNotebookExecution + // request. + Status *string `type:"string" enum:"NotebookExecutionStatus"` + + // The end of time range filter for listing notebook executions. The default + // is the current timestamp. + To *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ListNotebookExecutionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListNotebookExecutionsInput) GoString() string { + return s.String() +} + +// SetEditorId sets the EditorId field's value. +func (s *ListNotebookExecutionsInput) SetEditorId(v string) *ListNotebookExecutionsInput { + s.EditorId = &v + return s +} + +// SetFrom sets the From field's value. +func (s *ListNotebookExecutionsInput) SetFrom(v time.Time) *ListNotebookExecutionsInput { + s.From = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListNotebookExecutionsInput) SetMarker(v string) *ListNotebookExecutionsInput { + s.Marker = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListNotebookExecutionsInput) SetStatus(v string) *ListNotebookExecutionsInput { + s.Status = &v + return s +} + +// SetTo sets the To field's value. +func (s *ListNotebookExecutionsInput) SetTo(v time.Time) *ListNotebookExecutionsInput { + s.To = &v + return s +} + +type ListNotebookExecutionsOutput struct { + _ struct{} `type:"structure"` + + // A pagination token that a subsequent ListNotebookExecutions can use to determine + // the next set of results to retrieve. + Marker *string `type:"string"` + + // A list of notebook executions. + NotebookExecutions []*NotebookExecutionSummary `type:"list"` +} + +// String returns the string representation +func (s ListNotebookExecutionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListNotebookExecutionsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *ListNotebookExecutionsOutput) SetMarker(v string) *ListNotebookExecutionsOutput { + s.Marker = &v + return s +} + +// SetNotebookExecutions sets the NotebookExecutions field's value. +func (s *ListNotebookExecutionsOutput) SetNotebookExecutions(v []*NotebookExecutionSummary) *ListNotebookExecutionsOutput { + s.NotebookExecutions = v + return s +} + type ListSecurityConfigurationsInput struct { _ struct{} `type:"structure"` @@ -8933,6 +10014,51 @@ func (s *ListStepsOutput) SetSteps(v []*StepSummary) *ListStepsOutput { return s } +// Managed scaling policy for an Amazon EMR cluster. The policy specifies the +// limits for resources that can be added or terminated from a cluster. The +// policy only applies to the core and task nodes. The master node cannot be +// scaled after initial configuration. +type ManagedScalingPolicy struct { + _ struct{} `type:"structure"` + + // The EC2 unit limits for a managed scaling policy. The managed scaling activity + // of a cluster is not allowed to go above or below these limits. The limit + // only applies to the core and task nodes. The master node cannot be scaled + // after initial configuration. + ComputeLimits *ComputeLimits `type:"structure"` +} + +// String returns the string representation +func (s ManagedScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedScalingPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ManagedScalingPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ManagedScalingPolicy"} + if s.ComputeLimits != nil { + if err := s.ComputeLimits.Validate(); err != nil { + invalidParams.AddNested("ComputeLimits", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComputeLimits sets the ComputeLimits field's value. +func (s *ManagedScalingPolicy) SetComputeLimits(v *ComputeLimits) *ManagedScalingPolicy { + s.ComputeLimits = v + return s +} + // A CloudWatch dimension, which is specified using a Key (known as a Name in // CloudWatch), Value pair. By default, Amazon EMR uses one dimension whose // Key is JobFlowID and Value is a variable representing the cluster ID, which @@ -9119,32 +10245,356 @@ type ModifyInstanceGroupsInput struct { // The ID of the cluster to which the instance group belongs. ClusterId *string `type:"string"` - // Instance groups to change. - InstanceGroups []*InstanceGroupModifyConfig `type:"list"` + // Instance groups to change. + InstanceGroups []*InstanceGroupModifyConfig `type:"list"` +} + +// String returns the string representation +func (s ModifyInstanceGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceGroupsInput"} + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterId sets the ClusterId field's value. +func (s *ModifyInstanceGroupsInput) SetClusterId(v string) *ModifyInstanceGroupsInput { + s.ClusterId = &v + return s +} + +// SetInstanceGroups sets the InstanceGroups field's value. +func (s *ModifyInstanceGroupsInput) SetInstanceGroups(v []*InstanceGroupModifyConfig) *ModifyInstanceGroupsInput { + s.InstanceGroups = v + return s +} + +type ModifyInstanceGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyInstanceGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyInstanceGroupsOutput) GoString() string { + return s.String() +} + +// A notebook execution. An execution is a specific instance that an EMR Notebook +// is run using the StartNotebookExecution action. +type NotebookExecution struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the notebook execution. + Arn *string `type:"string"` + + // The unique identifier of the EMR Notebook that is used for the notebook execution. + EditorId *string `type:"string"` + + // The timestamp when notebook execution ended. + EndTime *time.Time `type:"timestamp"` + + // The execution engine, such as an EMR cluster, used to run the EMR notebook + // and perform the notebook execution. + ExecutionEngine *ExecutionEngineConfig `type:"structure"` + + // The reason for the latest status change of the notebook execution. + LastStateChangeReason *string `type:"string"` + + // The unique identifier of a notebook execution. + NotebookExecutionId *string `type:"string"` + + // A name for the notebook execution. + NotebookExecutionName *string `type:"string"` + + // The unique identifier of the EC2 security group associated with the EMR Notebook + // instance. For more information see Specifying EC2 Security Groups for EMR + // Notebooks (https://docs.aws.amazon.com/emr/latest/ManagementGuide/emr-managed-notebooks-security-groups.html) + // in the EMR Management Guide. + NotebookInstanceSecurityGroupId *string `type:"string"` + + // Input parameters in JSON format passed to the EMR Notebook at runtime for + // execution. + NotebookParams *string `type:"string"` + + // The location of the notebook execution's output file in Amazon S3. + OutputNotebookURI *string `type:"string"` + + // The timestamp when notebook execution started. + StartTime *time.Time `type:"timestamp"` + + // The status of the notebook execution. + // + // * START_PENDING indicates that the cluster has received the execution + // request but execution has not begun. + // + // * STARTING indicates that the execution is starting on the cluster. + // + // * RUNNING indicates that the execution is being processed by the cluster. + // + // * FINISHING indicates that execution processing is in the final stages. + // + // * FINISHED indicates that the execution has completed without error. + // + // * FAILING indicates that the execution is failing and will not finish + // successfully. + // + // * FAILED indicates that the execution failed. + // + // * STOP_PENDING indicates that the cluster has received a StopNotebookExecution + // request and the stop is pending. + // + // * STOPPING indicates that the cluster is in the process of stopping the + // execution as a result of a StopNotebookExecution request. + // + // * STOPPED indicates that the execution stopped because of a StopNotebookExecution + // request. + Status *string `type:"string" enum:"NotebookExecutionStatus"` + + // A list of tags associated with a notebook execution. Tags are user-defined + // key value pairs that consist of a required key string with a maximum of 128 + // characters and an optional value string with a maximum of 256 characters. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s NotebookExecution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotebookExecution) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *NotebookExecution) SetArn(v string) *NotebookExecution { + s.Arn = &v + return s +} + +// SetEditorId sets the EditorId field's value. +func (s *NotebookExecution) SetEditorId(v string) *NotebookExecution { + s.EditorId = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *NotebookExecution) SetEndTime(v time.Time) *NotebookExecution { + s.EndTime = &v + return s +} + +// SetExecutionEngine sets the ExecutionEngine field's value. +func (s *NotebookExecution) SetExecutionEngine(v *ExecutionEngineConfig) *NotebookExecution { + s.ExecutionEngine = v + return s +} + +// SetLastStateChangeReason sets the LastStateChangeReason field's value. +func (s *NotebookExecution) SetLastStateChangeReason(v string) *NotebookExecution { + s.LastStateChangeReason = &v + return s +} + +// SetNotebookExecutionId sets the NotebookExecutionId field's value. +func (s *NotebookExecution) SetNotebookExecutionId(v string) *NotebookExecution { + s.NotebookExecutionId = &v + return s +} + +// SetNotebookExecutionName sets the NotebookExecutionName field's value. +func (s *NotebookExecution) SetNotebookExecutionName(v string) *NotebookExecution { + s.NotebookExecutionName = &v + return s +} + +// SetNotebookInstanceSecurityGroupId sets the NotebookInstanceSecurityGroupId field's value. +func (s *NotebookExecution) SetNotebookInstanceSecurityGroupId(v string) *NotebookExecution { + s.NotebookInstanceSecurityGroupId = &v + return s +} + +// SetNotebookParams sets the NotebookParams field's value. +func (s *NotebookExecution) SetNotebookParams(v string) *NotebookExecution { + s.NotebookParams = &v + return s +} + +// SetOutputNotebookURI sets the OutputNotebookURI field's value. +func (s *NotebookExecution) SetOutputNotebookURI(v string) *NotebookExecution { + s.OutputNotebookURI = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *NotebookExecution) SetStartTime(v time.Time) *NotebookExecution { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *NotebookExecution) SetStatus(v string) *NotebookExecution { + s.Status = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *NotebookExecution) SetTags(v []*Tag) *NotebookExecution { + s.Tags = v + return s +} + +type NotebookExecutionSummary struct { + _ struct{} `type:"structure"` + + // The unique identifier of the editor associated with the notebook execution. + EditorId *string `type:"string"` + + // The timestamp when notebook execution started. + EndTime *time.Time `type:"timestamp"` + + // The unique identifier of the notebook execution. + NotebookExecutionId *string `type:"string"` + + // The name of the notebook execution. + NotebookExecutionName *string `type:"string"` + + // The timestamp when notebook execution started. + StartTime *time.Time `type:"timestamp"` + + // The status of the notebook execution. + // + // * START_PENDING indicates that the cluster has received the execution + // request but execution has not begun. + // + // * STARTING indicates that the execution is starting on the cluster. + // + // * RUNNING indicates that the execution is being processed by the cluster. + // + // * FINISHING indicates that execution processing is in the final stages. + // + // * FINISHED indicates that the execution has completed without error. + // + // * FAILING indicates that the execution is failing and will not finish + // successfully. + // + // * FAILED indicates that the execution failed. + // + // * STOP_PENDING indicates that the cluster has received a StopNotebookExecution + // request and the stop is pending. + // + // * STOPPING indicates that the cluster is in the process of stopping the + // execution as a result of a StopNotebookExecution request. + // + // * STOPPED indicates that the execution stopped because of a StopNotebookExecution + // request. + Status *string `type:"string" enum:"NotebookExecutionStatus"` +} + +// String returns the string representation +func (s NotebookExecutionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotebookExecutionSummary) GoString() string { + return s.String() +} + +// SetEditorId sets the EditorId field's value. +func (s *NotebookExecutionSummary) SetEditorId(v string) *NotebookExecutionSummary { + s.EditorId = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *NotebookExecutionSummary) SetEndTime(v time.Time) *NotebookExecutionSummary { + s.EndTime = &v + return s +} + +// SetNotebookExecutionId sets the NotebookExecutionId field's value. +func (s *NotebookExecutionSummary) SetNotebookExecutionId(v string) *NotebookExecutionSummary { + s.NotebookExecutionId = &v + return s +} + +// SetNotebookExecutionName sets the NotebookExecutionName field's value. +func (s *NotebookExecutionSummary) SetNotebookExecutionName(v string) *NotebookExecutionSummary { + s.NotebookExecutionName = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *NotebookExecutionSummary) SetStartTime(v time.Time) *NotebookExecutionSummary { + s.StartTime = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *NotebookExecutionSummary) SetStatus(v string) *NotebookExecutionSummary { + s.Status = &v + return s +} + +// The launch specification for On-Demand instances in the instance fleet, which +// determines the allocation strategy. +// +// The instance fleet configuration is available only in Amazon EMR versions +// 4.8.0 and later, excluding 5.0.x versions. On-Demand instances allocation +// strategy is available in Amazon EMR version 5.12.1 and later. +type OnDemandProvisioningSpecification struct { + _ struct{} `type:"structure"` + + // Specifies the strategy to use in launching On-Demand instance fleets. Currently, + // the only option is lowest-price (the default), which launches the lowest + // price first. + // + // AllocationStrategy is a required field + AllocationStrategy *string `type:"string" required:"true" enum:"OnDemandProvisioningAllocationStrategy"` } // String returns the string representation -func (s ModifyInstanceGroupsInput) String() string { +func (s OnDemandProvisioningSpecification) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ModifyInstanceGroupsInput) GoString() string { +func (s OnDemandProvisioningSpecification) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ModifyInstanceGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceGroupsInput"} - if s.InstanceGroups != nil { - for i, v := range s.InstanceGroups { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) - } - } +func (s *OnDemandProvisioningSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OnDemandProvisioningSpecification"} + if s.AllocationStrategy == nil { + invalidParams.Add(request.NewErrParamRequired("AllocationStrategy")) } if invalidParams.Len() > 0 { @@ -9153,32 +10603,71 @@ func (s *ModifyInstanceGroupsInput) Validate() error { return nil } -// SetClusterId sets the ClusterId field's value. -func (s *ModifyInstanceGroupsInput) SetClusterId(v string) *ModifyInstanceGroupsInput { - s.ClusterId = &v - return s -} - -// SetInstanceGroups sets the InstanceGroups field's value. -func (s *ModifyInstanceGroupsInput) SetInstanceGroups(v []*InstanceGroupModifyConfig) *ModifyInstanceGroupsInput { - s.InstanceGroups = v +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *OnDemandProvisioningSpecification) SetAllocationStrategy(v string) *OnDemandProvisioningSpecification { + s.AllocationStrategy = &v return s } -type ModifyInstanceGroupsOutput struct { +// Placement group configuration for an Amazon EMR cluster. The configuration +// specifies the placement strategy that can be applied to instance roles during +// cluster creation. +// +// To use this configuration, consider attaching managed policy AmazonElasticMapReducePlacementGroupPolicy +// to the EMR role. +type PlacementGroupConfig struct { _ struct{} `type:"structure"` + + // Role of the instance in the cluster. + // + // Starting with Amazon EMR version 5.23.0, the only supported instance role + // is MASTER. + // + // InstanceRole is a required field + InstanceRole *string `type:"string" required:"true" enum:"InstanceRoleType"` + + // EC2 Placement Group strategy associated with instance role. + // + // Starting with Amazon EMR version 5.23.0, the only supported placement strategy + // is SPREAD for the MASTER instance role. + PlacementStrategy *string `type:"string" enum:"PlacementGroupStrategy"` } // String returns the string representation -func (s ModifyInstanceGroupsOutput) String() string { +func (s PlacementGroupConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ModifyInstanceGroupsOutput) GoString() string { +func (s PlacementGroupConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PlacementGroupConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PlacementGroupConfig"} + if s.InstanceRole == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceRole")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceRole sets the InstanceRole field's value. +func (s *PlacementGroupConfig) SetInstanceRole(v string) *PlacementGroupConfig { + s.InstanceRole = &v + return s +} + +// SetPlacementStrategy sets the PlacementStrategy field's value. +func (s *PlacementGroupConfig) SetPlacementStrategy(v string) *PlacementGroupConfig { + s.PlacementStrategy = &v + return s +} + // The Amazon EC2 Availability Zone configuration of the cluster (job flow). type PlacementType struct { _ struct{} `type:"structure"` @@ -9248,9 +10737,15 @@ func (s PortRange) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *PortRange) Validate() error { invalidParams := request.ErrInvalidParams{Context: "PortRange"} + if s.MaxRange != nil && *s.MaxRange < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRange", -1)) + } if s.MinRange == nil { invalidParams.Add(request.NewErrParamRequired("MinRange")) } + if s.MinRange != nil && *s.MinRange < -1 { + invalidParams.Add(request.NewErrParamMinValue("MinRange", -1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9407,6 +10902,12 @@ type PutBlockPublicAccessConfigurationInput struct { // and public access is allowed on this port. You can change this by updating // BlockPublicSecurityGroupRules to remove the exception. // + // For accounts that created clusters in a Region before November 25, 2019, + // block public access is disabled by default in that Region. To use this feature, + // you must manually enable and configure it. For accounts that did not create + // an EMR cluster in a Region before this date, block public access is enabled + // by default in that Region. + // // BlockPublicAccessConfiguration is a required field BlockPublicAccessConfiguration *BlockPublicAccessConfiguration `type:"structure" required:"true"` } @@ -9459,6 +10960,77 @@ func (s PutBlockPublicAccessConfigurationOutput) GoString() string { return s.String() } +type PutManagedScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies the ID of an EMR cluster where the managed scaling policy is attached. + // + // ClusterId is a required field + ClusterId *string `type:"string" required:"true"` + + // Specifies the constraints for the managed scaling policy. + // + // ManagedScalingPolicy is a required field + ManagedScalingPolicy *ManagedScalingPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutManagedScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutManagedScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutManagedScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutManagedScalingPolicyInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + if s.ManagedScalingPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ManagedScalingPolicy")) + } + if s.ManagedScalingPolicy != nil { + if err := s.ManagedScalingPolicy.Validate(); err != nil { + invalidParams.AddNested("ManagedScalingPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterId sets the ClusterId field's value. +func (s *PutManagedScalingPolicyInput) SetClusterId(v string) *PutManagedScalingPolicyInput { + s.ClusterId = &v + return s +} + +// SetManagedScalingPolicy sets the ManagedScalingPolicy field's value. +func (s *PutManagedScalingPolicyInput) SetManagedScalingPolicy(v *ManagedScalingPolicy) *PutManagedScalingPolicyInput { + s.ManagedScalingPolicy = v + return s +} + +type PutManagedScalingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutManagedScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutManagedScalingPolicyOutput) GoString() string { + return s.String() +} + type RemoveAutoScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -9526,6 +11098,59 @@ func (s RemoveAutoScalingPolicyOutput) GoString() string { return s.String() } +type RemoveManagedScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies the ID of the cluster from which the managed scaling policy will + // be removed. + // + // ClusterId is a required field + ClusterId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveManagedScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveManagedScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveManagedScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveManagedScalingPolicyInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterId sets the ClusterId field's value. +func (s *RemoveManagedScalingPolicyInput) SetClusterId(v string) *RemoveManagedScalingPolicyInput { + s.ClusterId = &v + return s +} + +type RemoveManagedScalingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveManagedScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveManagedScalingPolicyOutput) GoString() string { + return s.String() +} + // This input identifies a cluster and a list of tags to remove. type RemoveTagsInput struct { _ struct{} `type:"structure"` @@ -9659,10 +11284,18 @@ type RunJobFlowInput struct { // in the EMR Management Guide. KerberosAttributes *KerberosAttributes `type:"structure"` + // The AWS KMS customer master key (CMK) used for encrypting log files. If a + // value is not provided, the logs will remain encrypted by AES-256. This attribute + // is only available with EMR version 5.30.0 and later, excluding EMR 6.0.0. + LogEncryptionKmsKeyId *string `type:"string"` + // The location in Amazon S3 to write the log files of the job flow. If a value // is not provided, logs are not created. LogUri *string `type:"string"` + // The specified managed scaling policy for an Amazon EMR cluster. + ManagedScalingPolicy *ManagedScalingPolicy `type:"structure"` + // The name of the job flow. // // Name is a required field @@ -9697,6 +11330,9 @@ type RunJobFlowInput struct { // * "ganglia" - launch the cluster with the Ganglia Monitoring System installed. NewSupportedProducts []*SupportedProductConfig `type:"list"` + // The specified placement group configuration for an Amazon EMR cluster. + PlacementGroupConfigs []*PlacementGroupConfig `type:"list"` + // The Amazon EMR release label, which determines the version of open-source // application packages installed on the cluster. Release labels are in the // form emr-x.x.x, where x.x.x is an Amazon EMR release version such as emr-5.14.0. @@ -9804,6 +11440,21 @@ func (s *RunJobFlowInput) Validate() error { invalidParams.AddNested("KerberosAttributes", err.(request.ErrInvalidParams)) } } + if s.ManagedScalingPolicy != nil { + if err := s.ManagedScalingPolicy.Validate(); err != nil { + invalidParams.AddNested("ManagedScalingPolicy", err.(request.ErrInvalidParams)) + } + } + if s.PlacementGroupConfigs != nil { + for i, v := range s.PlacementGroupConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PlacementGroupConfigs", i), err.(request.ErrInvalidParams)) + } + } + } if s.Steps != nil { for i, v := range s.Steps { if v == nil { @@ -9887,12 +11538,24 @@ func (s *RunJobFlowInput) SetKerberosAttributes(v *KerberosAttributes) *RunJobFl return s } +// SetLogEncryptionKmsKeyId sets the LogEncryptionKmsKeyId field's value. +func (s *RunJobFlowInput) SetLogEncryptionKmsKeyId(v string) *RunJobFlowInput { + s.LogEncryptionKmsKeyId = &v + return s +} + // SetLogUri sets the LogUri field's value. func (s *RunJobFlowInput) SetLogUri(v string) *RunJobFlowInput { s.LogUri = &v return s } +// SetManagedScalingPolicy sets the ManagedScalingPolicy field's value. +func (s *RunJobFlowInput) SetManagedScalingPolicy(v *ManagedScalingPolicy) *RunJobFlowInput { + s.ManagedScalingPolicy = v + return s +} + // SetName sets the Name field's value. func (s *RunJobFlowInput) SetName(v string) *RunJobFlowInput { s.Name = &v @@ -9905,6 +11568,12 @@ func (s *RunJobFlowInput) SetNewSupportedProducts(v []*SupportedProductConfig) * return s } +// SetPlacementGroupConfigs sets the PlacementGroupConfigs field's value. +func (s *RunJobFlowInput) SetPlacementGroupConfigs(v []*PlacementGroupConfig) *RunJobFlowInput { + s.PlacementGroupConfigs = v + return s +} + // SetReleaseLabel sets the ReleaseLabel field's value. func (s *RunJobFlowInput) SetReleaseLabel(v string) *RunJobFlowInput { s.ReleaseLabel = &v @@ -10585,60 +12254,175 @@ func (s *SimpleScalingPolicyConfiguration) SetScalingAdjustment(v int64) *Simple } // The launch specification for Spot instances in the instance fleet, which -// determines the defined duration and provisioning timeout behavior. +// determines the defined duration, provisioning timeout behavior, and allocation +// strategy. // // The instance fleet configuration is available only in Amazon EMR versions -// 4.8.0 and later, excluding 5.0.x versions. +// 4.8.0 and later, excluding 5.0.x versions. Spot instance allocation strategy +// is available in Amazon EMR version 5.12.1 and later. type SpotProvisioningSpecification struct { _ struct{} `type:"structure"` - // The defined duration for Spot instances (also known as Spot blocks) in minutes. - // When specified, the Spot instance does not terminate before the defined duration - // expires, and defined duration pricing for Spot instances applies. Valid values - // are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as - // a Spot instance receives its instance ID. At the end of the duration, Amazon - // EC2 marks the Spot instance for termination and provides a Spot instance - // termination notice, which gives the instance a two-minute warning before - // it terminates. - BlockDurationMinutes *int64 `type:"integer"` + // Specifies the strategy to use in launching Spot instance fleets. Currently, + // the only option is capacity-optimized (the default), which launches instances + // from Spot instance pools with optimal capacity for the number of instances + // that are launching. + AllocationStrategy *string `type:"string" enum:"SpotProvisioningAllocationStrategy"` + + // The defined duration for Spot instances (also known as Spot blocks) in minutes. + // When specified, the Spot instance does not terminate before the defined duration + // expires, and defined duration pricing for Spot instances applies. Valid values + // are 60, 120, 180, 240, 300, or 360. The duration period starts as soon as + // a Spot instance receives its instance ID. At the end of the duration, Amazon + // EC2 marks the Spot instance for termination and provides a Spot instance + // termination notice, which gives the instance a two-minute warning before + // it terminates. + BlockDurationMinutes *int64 `type:"integer"` + + // The action to take when TargetSpotCapacity has not been fulfilled when the + // TimeoutDurationMinutes has expired; that is, when all Spot instances could + // not be provisioned within the Spot provisioning timeout. Valid values are + // TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies + // that if no Spot instances are available, On-Demand Instances should be provisioned + // to fulfill any remaining Spot capacity. + // + // TimeoutAction is a required field + TimeoutAction *string `type:"string" required:"true" enum:"SpotProvisioningTimeoutAction"` + + // The spot provisioning timeout period in minutes. If Spot instances are not + // provisioned within this time period, the TimeOutAction is taken. Minimum + // value is 5 and maximum value is 1440. The timeout applies only during initial + // provisioning, when the cluster is first created. + // + // TimeoutDurationMinutes is a required field + TimeoutDurationMinutes *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s SpotProvisioningSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SpotProvisioningSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SpotProvisioningSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SpotProvisioningSpecification"} + if s.TimeoutAction == nil { + invalidParams.Add(request.NewErrParamRequired("TimeoutAction")) + } + if s.TimeoutDurationMinutes == nil { + invalidParams.Add(request.NewErrParamRequired("TimeoutDurationMinutes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *SpotProvisioningSpecification) SetAllocationStrategy(v string) *SpotProvisioningSpecification { + s.AllocationStrategy = &v + return s +} + +// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. +func (s *SpotProvisioningSpecification) SetBlockDurationMinutes(v int64) *SpotProvisioningSpecification { + s.BlockDurationMinutes = &v + return s +} + +// SetTimeoutAction sets the TimeoutAction field's value. +func (s *SpotProvisioningSpecification) SetTimeoutAction(v string) *SpotProvisioningSpecification { + s.TimeoutAction = &v + return s +} + +// SetTimeoutDurationMinutes sets the TimeoutDurationMinutes field's value. +func (s *SpotProvisioningSpecification) SetTimeoutDurationMinutes(v int64) *SpotProvisioningSpecification { + s.TimeoutDurationMinutes = &v + return s +} + +type StartNotebookExecutionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the EMR Notebook to use for notebook execution. + // + // EditorId is a required field + EditorId *string `type:"string" required:"true"` + + // Specifies the execution engine (cluster) that runs the notebook execution. + // + // ExecutionEngine is a required field + ExecutionEngine *ExecutionEngineConfig `type:"structure" required:"true"` + + // An optional name for the notebook execution. + NotebookExecutionName *string `type:"string"` - // The action to take when TargetSpotCapacity has not been fulfilled when the - // TimeoutDurationMinutes has expired; that is, when all Spot instances could - // not be provisioned within the Spot provisioning timeout. Valid values are - // TERMINATE_CLUSTER and SWITCH_TO_ON_DEMAND. SWITCH_TO_ON_DEMAND specifies - // that if no Spot instances are available, On-Demand Instances should be provisioned - // to fulfill any remaining Spot capacity. + // The unique identifier of the Amazon EC2 security group to associate with + // the EMR Notebook for this notebook execution. + NotebookInstanceSecurityGroupId *string `type:"string"` + + // Input parameters in JSON format passed to the EMR Notebook at runtime for + // execution. + NotebookParams *string `type:"string"` + + // The path and file name of the notebook file for this execution, relative + // to the path specified for the EMR Notebook. For example, if you specify a + // path of s3://MyBucket/MyNotebooks when you create an EMR Notebook for a notebook + // with an ID of e-ABCDEFGHIJK1234567890ABCD (the EditorID of this request), + // and you specify a RelativePath of my_notebook_executions/notebook_execution.ipynb, + // the location of the file for the notebook execution is s3://MyBucket/MyNotebooks/e-ABCDEFGHIJK1234567890ABCD/my_notebook_executions/notebook_execution.ipynb. // - // TimeoutAction is a required field - TimeoutAction *string `type:"string" required:"true" enum:"SpotProvisioningTimeoutAction"` + // RelativePath is a required field + RelativePath *string `type:"string" required:"true"` - // The spot provisioning timeout period in minutes. If Spot instances are not - // provisioned within this time period, the TimeOutAction is taken. Minimum - // value is 5 and maximum value is 1440. The timeout applies only during initial - // provisioning, when the cluster is first created. + // The name or ARN of the IAM role that is used as the service role for Amazon + // EMR (the EMR role) for the notebook execution. // - // TimeoutDurationMinutes is a required field - TimeoutDurationMinutes *int64 `type:"integer" required:"true"` + // ServiceRole is a required field + ServiceRole *string `type:"string" required:"true"` + + // A list of tags associated with a notebook execution. Tags are user-defined + // key value pairs that consist of a required key string with a maximum of 128 + // characters and an optional value string with a maximum of 256 characters. + Tags []*Tag `type:"list"` } // String returns the string representation -func (s SpotProvisioningSpecification) String() string { +func (s StartNotebookExecutionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SpotProvisioningSpecification) GoString() string { +func (s StartNotebookExecutionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SpotProvisioningSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SpotProvisioningSpecification"} - if s.TimeoutAction == nil { - invalidParams.Add(request.NewErrParamRequired("TimeoutAction")) +func (s *StartNotebookExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartNotebookExecutionInput"} + if s.EditorId == nil { + invalidParams.Add(request.NewErrParamRequired("EditorId")) } - if s.TimeoutDurationMinutes == nil { - invalidParams.Add(request.NewErrParamRequired("TimeoutDurationMinutes")) + if s.ExecutionEngine == nil { + invalidParams.Add(request.NewErrParamRequired("ExecutionEngine")) + } + if s.RelativePath == nil { + invalidParams.Add(request.NewErrParamRequired("RelativePath")) + } + if s.ServiceRole == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceRole")) + } + if s.ExecutionEngine != nil { + if err := s.ExecutionEngine.Validate(); err != nil { + invalidParams.AddNested("ExecutionEngine", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -10647,21 +12431,74 @@ func (s *SpotProvisioningSpecification) Validate() error { return nil } -// SetBlockDurationMinutes sets the BlockDurationMinutes field's value. -func (s *SpotProvisioningSpecification) SetBlockDurationMinutes(v int64) *SpotProvisioningSpecification { - s.BlockDurationMinutes = &v +// SetEditorId sets the EditorId field's value. +func (s *StartNotebookExecutionInput) SetEditorId(v string) *StartNotebookExecutionInput { + s.EditorId = &v return s } -// SetTimeoutAction sets the TimeoutAction field's value. -func (s *SpotProvisioningSpecification) SetTimeoutAction(v string) *SpotProvisioningSpecification { - s.TimeoutAction = &v +// SetExecutionEngine sets the ExecutionEngine field's value. +func (s *StartNotebookExecutionInput) SetExecutionEngine(v *ExecutionEngineConfig) *StartNotebookExecutionInput { + s.ExecutionEngine = v return s } -// SetTimeoutDurationMinutes sets the TimeoutDurationMinutes field's value. -func (s *SpotProvisioningSpecification) SetTimeoutDurationMinutes(v int64) *SpotProvisioningSpecification { - s.TimeoutDurationMinutes = &v +// SetNotebookExecutionName sets the NotebookExecutionName field's value. +func (s *StartNotebookExecutionInput) SetNotebookExecutionName(v string) *StartNotebookExecutionInput { + s.NotebookExecutionName = &v + return s +} + +// SetNotebookInstanceSecurityGroupId sets the NotebookInstanceSecurityGroupId field's value. +func (s *StartNotebookExecutionInput) SetNotebookInstanceSecurityGroupId(v string) *StartNotebookExecutionInput { + s.NotebookInstanceSecurityGroupId = &v + return s +} + +// SetNotebookParams sets the NotebookParams field's value. +func (s *StartNotebookExecutionInput) SetNotebookParams(v string) *StartNotebookExecutionInput { + s.NotebookParams = &v + return s +} + +// SetRelativePath sets the RelativePath field's value. +func (s *StartNotebookExecutionInput) SetRelativePath(v string) *StartNotebookExecutionInput { + s.RelativePath = &v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *StartNotebookExecutionInput) SetServiceRole(v string) *StartNotebookExecutionInput { + s.ServiceRole = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *StartNotebookExecutionInput) SetTags(v []*Tag) *StartNotebookExecutionInput { + s.Tags = v + return s +} + +type StartNotebookExecutionOutput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the notebook execution. + NotebookExecutionId *string `type:"string"` +} + +// String returns the string representation +func (s StartNotebookExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartNotebookExecutionOutput) GoString() string { + return s.String() +} + +// SetNotebookExecutionId sets the NotebookExecutionId field's value. +func (s *StartNotebookExecutionOutput) SetNotebookExecutionId(v string) *StartNotebookExecutionOutput { + s.NotebookExecutionId = &v return s } @@ -11087,6 +12924,58 @@ func (s *StepTimeline) SetStartDateTime(v time.Time) *StepTimeline { return s } +type StopNotebookExecutionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the notebook execution. + // + // NotebookExecutionId is a required field + NotebookExecutionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StopNotebookExecutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopNotebookExecutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopNotebookExecutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopNotebookExecutionInput"} + if s.NotebookExecutionId == nil { + invalidParams.Add(request.NewErrParamRequired("NotebookExecutionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNotebookExecutionId sets the NotebookExecutionId field's value. +func (s *StopNotebookExecutionInput) SetNotebookExecutionId(v string) *StopNotebookExecutionInput { + s.NotebookExecutionId = &v + return s +} + +type StopNotebookExecutionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopNotebookExecutionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopNotebookExecutionOutput) GoString() string { + return s.String() +} + // The list of supported product configurations which allow user-supplied arguments. // EMR accepts these arguments and forwards them to the corresponding installation // script as bootstrap action arguments. @@ -11291,6 +13180,16 @@ const ( ActionOnFailureContinue = "CONTINUE" ) +// ActionOnFailure_Values returns all elements of the ActionOnFailure enum +func ActionOnFailure_Values() []string { + return []string{ + ActionOnFailureTerminateJobFlow, + ActionOnFailureTerminateCluster, + ActionOnFailureCancelAndWait, + ActionOnFailureContinue, + } +} + const ( // AdjustmentTypeChangeInCapacity is a AdjustmentType enum value AdjustmentTypeChangeInCapacity = "CHANGE_IN_CAPACITY" @@ -11302,6 +13201,15 @@ const ( AdjustmentTypeExactCapacity = "EXACT_CAPACITY" ) +// AdjustmentType_Values returns all elements of the AdjustmentType enum +func AdjustmentType_Values() []string { + return []string{ + AdjustmentTypeChangeInCapacity, + AdjustmentTypePercentChangeInCapacity, + AdjustmentTypeExactCapacity, + } +} + const ( // AutoScalingPolicyStatePending is a AutoScalingPolicyState enum value AutoScalingPolicyStatePending = "PENDING" @@ -11322,6 +13230,18 @@ const ( AutoScalingPolicyStateFailed = "FAILED" ) +// AutoScalingPolicyState_Values returns all elements of the AutoScalingPolicyState enum +func AutoScalingPolicyState_Values() []string { + return []string{ + AutoScalingPolicyStatePending, + AutoScalingPolicyStateAttaching, + AutoScalingPolicyStateAttached, + AutoScalingPolicyStateDetaching, + AutoScalingPolicyStateDetached, + AutoScalingPolicyStateFailed, + } +} + const ( // AutoScalingPolicyStateChangeReasonCodeUserRequest is a AutoScalingPolicyStateChangeReasonCode enum value AutoScalingPolicyStateChangeReasonCodeUserRequest = "USER_REQUEST" @@ -11333,6 +13253,15 @@ const ( AutoScalingPolicyStateChangeReasonCodeCleanupFailure = "CLEANUP_FAILURE" ) +// AutoScalingPolicyStateChangeReasonCode_Values returns all elements of the AutoScalingPolicyStateChangeReasonCode enum +func AutoScalingPolicyStateChangeReasonCode_Values() []string { + return []string{ + AutoScalingPolicyStateChangeReasonCodeUserRequest, + AutoScalingPolicyStateChangeReasonCodeProvisionFailure, + AutoScalingPolicyStateChangeReasonCodeCleanupFailure, + } +} + const ( // CancelStepsRequestStatusSubmitted is a CancelStepsRequestStatus enum value CancelStepsRequestStatusSubmitted = "SUBMITTED" @@ -11341,6 +13270,14 @@ const ( CancelStepsRequestStatusFailed = "FAILED" ) +// CancelStepsRequestStatus_Values returns all elements of the CancelStepsRequestStatus enum +func CancelStepsRequestStatus_Values() []string { + return []string{ + CancelStepsRequestStatusSubmitted, + CancelStepsRequestStatusFailed, + } +} + const ( // ClusterStateStarting is a ClusterState enum value ClusterStateStarting = "STARTING" @@ -11364,6 +13301,19 @@ const ( ClusterStateTerminatedWithErrors = "TERMINATED_WITH_ERRORS" ) +// ClusterState_Values returns all elements of the ClusterState enum +func ClusterState_Values() []string { + return []string{ + ClusterStateStarting, + ClusterStateBootstrapping, + ClusterStateRunning, + ClusterStateWaiting, + ClusterStateTerminating, + ClusterStateTerminated, + ClusterStateTerminatedWithErrors, + } +} + const ( // ClusterStateChangeReasonCodeInternalError is a ClusterStateChangeReasonCode enum value ClusterStateChangeReasonCodeInternalError = "INTERNAL_ERROR" @@ -11390,6 +13340,20 @@ const ( ClusterStateChangeReasonCodeAllStepsCompleted = "ALL_STEPS_COMPLETED" ) +// ClusterStateChangeReasonCode_Values returns all elements of the ClusterStateChangeReasonCode enum +func ClusterStateChangeReasonCode_Values() []string { + return []string{ + ClusterStateChangeReasonCodeInternalError, + ClusterStateChangeReasonCodeValidationError, + ClusterStateChangeReasonCodeInstanceFailure, + ClusterStateChangeReasonCodeInstanceFleetTimeout, + ClusterStateChangeReasonCodeBootstrapFailure, + ClusterStateChangeReasonCodeUserRequest, + ClusterStateChangeReasonCodeStepFailure, + ClusterStateChangeReasonCodeAllStepsCompleted, + } +} + const ( // ComparisonOperatorGreaterThanOrEqual is a ComparisonOperator enum value ComparisonOperatorGreaterThanOrEqual = "GREATER_THAN_OR_EQUAL" @@ -11404,6 +13368,48 @@ const ( ComparisonOperatorLessThanOrEqual = "LESS_THAN_OR_EQUAL" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorGreaterThanOrEqual, + ComparisonOperatorGreaterThan, + ComparisonOperatorLessThan, + ComparisonOperatorLessThanOrEqual, + } +} + +const ( + // ComputeLimitsUnitTypeInstanceFleetUnits is a ComputeLimitsUnitType enum value + ComputeLimitsUnitTypeInstanceFleetUnits = "InstanceFleetUnits" + + // ComputeLimitsUnitTypeInstances is a ComputeLimitsUnitType enum value + ComputeLimitsUnitTypeInstances = "Instances" + + // ComputeLimitsUnitTypeVcpu is a ComputeLimitsUnitType enum value + ComputeLimitsUnitTypeVcpu = "VCPU" +) + +// ComputeLimitsUnitType_Values returns all elements of the ComputeLimitsUnitType enum +func ComputeLimitsUnitType_Values() []string { + return []string{ + ComputeLimitsUnitTypeInstanceFleetUnits, + ComputeLimitsUnitTypeInstances, + ComputeLimitsUnitTypeVcpu, + } +} + +const ( + // ExecutionEngineTypeEmr is a ExecutionEngineType enum value + ExecutionEngineTypeEmr = "EMR" +) + +// ExecutionEngineType_Values returns all elements of the ExecutionEngineType enum +func ExecutionEngineType_Values() []string { + return []string{ + ExecutionEngineTypeEmr, + } +} + const ( // InstanceCollectionTypeInstanceFleet is a InstanceCollectionType enum value InstanceCollectionTypeInstanceFleet = "INSTANCE_FLEET" @@ -11412,6 +13418,14 @@ const ( InstanceCollectionTypeInstanceGroup = "INSTANCE_GROUP" ) +// InstanceCollectionType_Values returns all elements of the InstanceCollectionType enum +func InstanceCollectionType_Values() []string { + return []string{ + InstanceCollectionTypeInstanceFleet, + InstanceCollectionTypeInstanceGroup, + } +} + const ( // InstanceFleetStateProvisioning is a InstanceFleetState enum value InstanceFleetStateProvisioning = "PROVISIONING" @@ -11435,6 +13449,19 @@ const ( InstanceFleetStateTerminated = "TERMINATED" ) +// InstanceFleetState_Values returns all elements of the InstanceFleetState enum +func InstanceFleetState_Values() []string { + return []string{ + InstanceFleetStateProvisioning, + InstanceFleetStateBootstrapping, + InstanceFleetStateRunning, + InstanceFleetStateResizing, + InstanceFleetStateSuspended, + InstanceFleetStateTerminating, + InstanceFleetStateTerminated, + } +} + const ( // InstanceFleetStateChangeReasonCodeInternalError is a InstanceFleetStateChangeReasonCode enum value InstanceFleetStateChangeReasonCodeInternalError = "INTERNAL_ERROR" @@ -11449,6 +13476,16 @@ const ( InstanceFleetStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" ) +// InstanceFleetStateChangeReasonCode_Values returns all elements of the InstanceFleetStateChangeReasonCode enum +func InstanceFleetStateChangeReasonCode_Values() []string { + return []string{ + InstanceFleetStateChangeReasonCodeInternalError, + InstanceFleetStateChangeReasonCodeValidationError, + InstanceFleetStateChangeReasonCodeInstanceFailure, + InstanceFleetStateChangeReasonCodeClusterTerminated, + } +} + const ( // InstanceFleetTypeMaster is a InstanceFleetType enum value InstanceFleetTypeMaster = "MASTER" @@ -11460,6 +13497,15 @@ const ( InstanceFleetTypeTask = "TASK" ) +// InstanceFleetType_Values returns all elements of the InstanceFleetType enum +func InstanceFleetType_Values() []string { + return []string{ + InstanceFleetTypeMaster, + InstanceFleetTypeCore, + InstanceFleetTypeTask, + } +} + const ( // InstanceGroupStateProvisioning is a InstanceGroupState enum value InstanceGroupStateProvisioning = "PROVISIONING" @@ -11495,6 +13541,23 @@ const ( InstanceGroupStateEnded = "ENDED" ) +// InstanceGroupState_Values returns all elements of the InstanceGroupState enum +func InstanceGroupState_Values() []string { + return []string{ + InstanceGroupStateProvisioning, + InstanceGroupStateBootstrapping, + InstanceGroupStateRunning, + InstanceGroupStateReconfiguring, + InstanceGroupStateResizing, + InstanceGroupStateSuspended, + InstanceGroupStateTerminating, + InstanceGroupStateTerminated, + InstanceGroupStateArrested, + InstanceGroupStateShuttingDown, + InstanceGroupStateEnded, + } +} + const ( // InstanceGroupStateChangeReasonCodeInternalError is a InstanceGroupStateChangeReasonCode enum value InstanceGroupStateChangeReasonCodeInternalError = "INTERNAL_ERROR" @@ -11509,6 +13572,16 @@ const ( InstanceGroupStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" ) +// InstanceGroupStateChangeReasonCode_Values returns all elements of the InstanceGroupStateChangeReasonCode enum +func InstanceGroupStateChangeReasonCode_Values() []string { + return []string{ + InstanceGroupStateChangeReasonCodeInternalError, + InstanceGroupStateChangeReasonCodeValidationError, + InstanceGroupStateChangeReasonCodeInstanceFailure, + InstanceGroupStateChangeReasonCodeClusterTerminated, + } +} + const ( // InstanceGroupTypeMaster is a InstanceGroupType enum value InstanceGroupTypeMaster = "MASTER" @@ -11520,6 +13593,15 @@ const ( InstanceGroupTypeTask = "TASK" ) +// InstanceGroupType_Values returns all elements of the InstanceGroupType enum +func InstanceGroupType_Values() []string { + return []string{ + InstanceGroupTypeMaster, + InstanceGroupTypeCore, + InstanceGroupTypeTask, + } +} + const ( // InstanceRoleTypeMaster is a InstanceRoleType enum value InstanceRoleTypeMaster = "MASTER" @@ -11531,6 +13613,15 @@ const ( InstanceRoleTypeTask = "TASK" ) +// InstanceRoleType_Values returns all elements of the InstanceRoleType enum +func InstanceRoleType_Values() []string { + return []string{ + InstanceRoleTypeMaster, + InstanceRoleTypeCore, + InstanceRoleTypeTask, + } +} + const ( // InstanceStateAwaitingFulfillment is a InstanceState enum value InstanceStateAwaitingFulfillment = "AWAITING_FULFILLMENT" @@ -11548,6 +13639,17 @@ const ( InstanceStateTerminated = "TERMINATED" ) +// InstanceState_Values returns all elements of the InstanceState enum +func InstanceState_Values() []string { + return []string{ + InstanceStateAwaitingFulfillment, + InstanceStateProvisioning, + InstanceStateBootstrapping, + InstanceStateRunning, + InstanceStateTerminated, + } +} + const ( // InstanceStateChangeReasonCodeInternalError is a InstanceStateChangeReasonCode enum value InstanceStateChangeReasonCodeInternalError = "INTERNAL_ERROR" @@ -11565,6 +13667,17 @@ const ( InstanceStateChangeReasonCodeClusterTerminated = "CLUSTER_TERMINATED" ) +// InstanceStateChangeReasonCode_Values returns all elements of the InstanceStateChangeReasonCode enum +func InstanceStateChangeReasonCode_Values() []string { + return []string{ + InstanceStateChangeReasonCodeInternalError, + InstanceStateChangeReasonCodeValidationError, + InstanceStateChangeReasonCodeInstanceFailure, + InstanceStateChangeReasonCodeBootstrapFailure, + InstanceStateChangeReasonCodeClusterTerminated, + } +} + // The type of instance. const ( // JobFlowExecutionStateStarting is a JobFlowExecutionState enum value @@ -11592,6 +13705,20 @@ const ( JobFlowExecutionStateFailed = "FAILED" ) +// JobFlowExecutionState_Values returns all elements of the JobFlowExecutionState enum +func JobFlowExecutionState_Values() []string { + return []string{ + JobFlowExecutionStateStarting, + JobFlowExecutionStateBootstrapping, + JobFlowExecutionStateRunning, + JobFlowExecutionStateWaiting, + JobFlowExecutionStateShuttingDown, + JobFlowExecutionStateTerminated, + JobFlowExecutionStateCompleted, + JobFlowExecutionStateFailed, + } +} + const ( // MarketTypeOnDemand is a MarketType enum value MarketTypeOnDemand = "ON_DEMAND" @@ -11600,6 +13727,98 @@ const ( MarketTypeSpot = "SPOT" ) +// MarketType_Values returns all elements of the MarketType enum +func MarketType_Values() []string { + return []string{ + MarketTypeOnDemand, + MarketTypeSpot, + } +} + +const ( + // NotebookExecutionStatusStartPending is a NotebookExecutionStatus enum value + NotebookExecutionStatusStartPending = "START_PENDING" + + // NotebookExecutionStatusStarting is a NotebookExecutionStatus enum value + NotebookExecutionStatusStarting = "STARTING" + + // NotebookExecutionStatusRunning is a NotebookExecutionStatus enum value + NotebookExecutionStatusRunning = "RUNNING" + + // NotebookExecutionStatusFinishing is a NotebookExecutionStatus enum value + NotebookExecutionStatusFinishing = "FINISHING" + + // NotebookExecutionStatusFinished is a NotebookExecutionStatus enum value + NotebookExecutionStatusFinished = "FINISHED" + + // NotebookExecutionStatusFailing is a NotebookExecutionStatus enum value + NotebookExecutionStatusFailing = "FAILING" + + // NotebookExecutionStatusFailed is a NotebookExecutionStatus enum value + NotebookExecutionStatusFailed = "FAILED" + + // NotebookExecutionStatusStopPending is a NotebookExecutionStatus enum value + NotebookExecutionStatusStopPending = "STOP_PENDING" + + // NotebookExecutionStatusStopping is a NotebookExecutionStatus enum value + NotebookExecutionStatusStopping = "STOPPING" + + // NotebookExecutionStatusStopped is a NotebookExecutionStatus enum value + NotebookExecutionStatusStopped = "STOPPED" +) + +// NotebookExecutionStatus_Values returns all elements of the NotebookExecutionStatus enum +func NotebookExecutionStatus_Values() []string { + return []string{ + NotebookExecutionStatusStartPending, + NotebookExecutionStatusStarting, + NotebookExecutionStatusRunning, + NotebookExecutionStatusFinishing, + NotebookExecutionStatusFinished, + NotebookExecutionStatusFailing, + NotebookExecutionStatusFailed, + NotebookExecutionStatusStopPending, + NotebookExecutionStatusStopping, + NotebookExecutionStatusStopped, + } +} + +const ( + // OnDemandProvisioningAllocationStrategyLowestPrice is a OnDemandProvisioningAllocationStrategy enum value + OnDemandProvisioningAllocationStrategyLowestPrice = "lowest-price" +) + +// OnDemandProvisioningAllocationStrategy_Values returns all elements of the OnDemandProvisioningAllocationStrategy enum +func OnDemandProvisioningAllocationStrategy_Values() []string { + return []string{ + OnDemandProvisioningAllocationStrategyLowestPrice, + } +} + +const ( + // PlacementGroupStrategySpread is a PlacementGroupStrategy enum value + PlacementGroupStrategySpread = "SPREAD" + + // PlacementGroupStrategyPartition is a PlacementGroupStrategy enum value + PlacementGroupStrategyPartition = "PARTITION" + + // PlacementGroupStrategyCluster is a PlacementGroupStrategy enum value + PlacementGroupStrategyCluster = "CLUSTER" + + // PlacementGroupStrategyNone is a PlacementGroupStrategy enum value + PlacementGroupStrategyNone = "NONE" +) + +// PlacementGroupStrategy_Values returns all elements of the PlacementGroupStrategy enum +func PlacementGroupStrategy_Values() []string { + return []string{ + PlacementGroupStrategySpread, + PlacementGroupStrategyPartition, + PlacementGroupStrategyCluster, + PlacementGroupStrategyNone, + } +} + const ( // RepoUpgradeOnBootSecurity is a RepoUpgradeOnBoot enum value RepoUpgradeOnBootSecurity = "SECURITY" @@ -11608,6 +13827,14 @@ const ( RepoUpgradeOnBootNone = "NONE" ) +// RepoUpgradeOnBoot_Values returns all elements of the RepoUpgradeOnBoot enum +func RepoUpgradeOnBoot_Values() []string { + return []string{ + RepoUpgradeOnBootSecurity, + RepoUpgradeOnBootNone, + } +} + const ( // ScaleDownBehaviorTerminateAtInstanceHour is a ScaleDownBehavior enum value ScaleDownBehaviorTerminateAtInstanceHour = "TERMINATE_AT_INSTANCE_HOUR" @@ -11616,6 +13843,26 @@ const ( ScaleDownBehaviorTerminateAtTaskCompletion = "TERMINATE_AT_TASK_COMPLETION" ) +// ScaleDownBehavior_Values returns all elements of the ScaleDownBehavior enum +func ScaleDownBehavior_Values() []string { + return []string{ + ScaleDownBehaviorTerminateAtInstanceHour, + ScaleDownBehaviorTerminateAtTaskCompletion, + } +} + +const ( + // SpotProvisioningAllocationStrategyCapacityOptimized is a SpotProvisioningAllocationStrategy enum value + SpotProvisioningAllocationStrategyCapacityOptimized = "capacity-optimized" +) + +// SpotProvisioningAllocationStrategy_Values returns all elements of the SpotProvisioningAllocationStrategy enum +func SpotProvisioningAllocationStrategy_Values() []string { + return []string{ + SpotProvisioningAllocationStrategyCapacityOptimized, + } +} + const ( // SpotProvisioningTimeoutActionSwitchToOnDemand is a SpotProvisioningTimeoutAction enum value SpotProvisioningTimeoutActionSwitchToOnDemand = "SWITCH_TO_ON_DEMAND" @@ -11624,6 +13871,14 @@ const ( SpotProvisioningTimeoutActionTerminateCluster = "TERMINATE_CLUSTER" ) +// SpotProvisioningTimeoutAction_Values returns all elements of the SpotProvisioningTimeoutAction enum +func SpotProvisioningTimeoutAction_Values() []string { + return []string{ + SpotProvisioningTimeoutActionSwitchToOnDemand, + SpotProvisioningTimeoutActionTerminateCluster, + } +} + const ( // StatisticSampleCount is a Statistic enum value StatisticSampleCount = "SAMPLE_COUNT" @@ -11641,6 +13896,17 @@ const ( StatisticMaximum = "MAXIMUM" ) +// Statistic_Values returns all elements of the Statistic enum +func Statistic_Values() []string { + return []string{ + StatisticSampleCount, + StatisticAverage, + StatisticSum, + StatisticMinimum, + StatisticMaximum, + } +} + const ( // StepCancellationOptionSendInterrupt is a StepCancellationOption enum value StepCancellationOptionSendInterrupt = "SEND_INTERRUPT" @@ -11649,6 +13915,14 @@ const ( StepCancellationOptionTerminateProcess = "TERMINATE_PROCESS" ) +// StepCancellationOption_Values returns all elements of the StepCancellationOption enum +func StepCancellationOption_Values() []string { + return []string{ + StepCancellationOptionSendInterrupt, + StepCancellationOptionTerminateProcess, + } +} + const ( // StepExecutionStatePending is a StepExecutionState enum value StepExecutionStatePending = "PENDING" @@ -11672,6 +13946,19 @@ const ( StepExecutionStateInterrupted = "INTERRUPTED" ) +// StepExecutionState_Values returns all elements of the StepExecutionState enum +func StepExecutionState_Values() []string { + return []string{ + StepExecutionStatePending, + StepExecutionStateRunning, + StepExecutionStateContinue, + StepExecutionStateCompleted, + StepExecutionStateCancelled, + StepExecutionStateFailed, + StepExecutionStateInterrupted, + } +} + const ( // StepStatePending is a StepState enum value StepStatePending = "PENDING" @@ -11695,11 +13982,31 @@ const ( StepStateInterrupted = "INTERRUPTED" ) +// StepState_Values returns all elements of the StepState enum +func StepState_Values() []string { + return []string{ + StepStatePending, + StepStateCancelPending, + StepStateRunning, + StepStateCompleted, + StepStateCancelled, + StepStateFailed, + StepStateInterrupted, + } +} + const ( // StepStateChangeReasonCodeNone is a StepStateChangeReasonCode enum value StepStateChangeReasonCodeNone = "NONE" ) +// StepStateChangeReasonCode_Values returns all elements of the StepStateChangeReasonCode enum +func StepStateChangeReasonCode_Values() []string { + return []string{ + StepStateChangeReasonCodeNone, + } +} + const ( // UnitNone is a Unit enum value UnitNone = "NONE" @@ -11782,3 +14089,36 @@ const ( // UnitCountPerSecond is a Unit enum value UnitCountPerSecond = "COUNT_PER_SECOND" ) + +// Unit_Values returns all elements of the Unit enum +func Unit_Values() []string { + return []string{ + UnitNone, + UnitSeconds, + UnitMicroSeconds, + UnitMilliSeconds, + UnitBytes, + UnitKiloBytes, + UnitMegaBytes, + UnitGigaBytes, + UnitTeraBytes, + UnitBits, + UnitKiloBits, + UnitMegaBits, + UnitGigaBits, + UnitTeraBits, + UnitPercent, + UnitCount, + UnitBytesPerSecond, + UnitKiloBytesPerSecond, + UnitMegaBytesPerSecond, + UnitGigaBytesPerSecond, + UnitTeraBytesPerSecond, + UnitBitsPerSecond, + UnitKiloBitsPerSecond, + UnitMegaBitsPerSecond, + UnitGigaBitsPerSecond, + UnitTeraBitsPerSecond, + UnitCountPerSecond, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go index 5963fa54a..27a55b252 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go index b07a4fc7e..3db7c5a87 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go @@ -696,11 +696,8 @@ func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *reque // To write single data records into a delivery stream, use PutRecord. Applications // using these operations are referred to as producers. // -// By default, each delivery stream can take in up to 2,000 transactions per -// second, 5,000 records per second, or 5 MB per second. If you use PutRecord -// and PutRecordBatch, the limits are an aggregate across these two operations -// for each delivery stream. For more information about limits, see Amazon Kinesis -// Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). +// For information about service quota, see Amazon Kinesis Data Firehose Quota +// (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). // // Each PutRecordBatch request supports up to 500 records. Each record in the // request can be as large as 1,000 KB (before 64-bit encoding), up to a limit @@ -866,9 +863,11 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // // Even if encryption is currently enabled for a delivery stream, you can still // invoke this operation on it to change the ARN of the CMK or both its type -// and ARN. In this case, Kinesis Data Firehose schedules the grant it had on -// the old CMK for retirement and creates a grant that enables it to use the -// new CMK to encrypt and decrypt data and to manage the grant. +// and ARN. If you invoke this method to change the CMK, and the old CMK is +// of type CUSTOMER_MANAGED_CMK, Kinesis Data Firehose schedules the grant it +// had on the old CMK for retirement. If the new CMK is of type CUSTOMER_MANAGED_CMK, +// Kinesis Data Firehose creates a grant that enables it to use the new CMK +// to encrypt and decrypt data and to manage the grant. // // If a delivery stream already has encryption enabled and then you invoke this // operation to change the ARN of the CMK or both its type and ARN and you get @@ -876,10 +875,12 @@ func (c *Firehose) StartDeliveryStreamEncryptionRequest(input *StartDeliveryStre // In this case, encryption remains enabled with the old CMK. // // If the encryption status of your delivery stream is ENABLING_FAILED, you -// can invoke this operation again. +// can invoke this operation again with a valid CMK. The CMK must be enabled +// and the key policy mustn't explicitly deny the permission for Kinesis Data +// Firehose to invoke KMS encrypt and decrypt operations. // -// You can only enable SSE for a delivery stream that uses DirectPut as its -// source. +// You can enable SSE for a delivery stream only if it's a delivery stream that +// uses DirectPut as its source. // // The StartDeliveryStreamEncryption and StopDeliveryStreamEncryption operations // have a combined limit of 25 calls per delivery stream per 24 hours. For example, @@ -1470,8 +1471,8 @@ func (s *CloudWatchLoggingOptions) SetLogStreamName(v string) *CloudWatchLogging // Another modification has already happened. Fetch VersionId again and use // it to update the destination. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -1489,17 +1490,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1507,22 +1508,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a COPY command for Amazon Redshift. @@ -1633,6 +1634,10 @@ type CreateDeliveryStreamInput struct { // The destination in Amazon S3. You can specify only one destination. ExtendedS3DestinationConfiguration *ExtendedS3DestinationConfiguration `type:"structure"` + // Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint + // destination. You can specify only one destination. + HttpEndpointDestinationConfiguration *HttpEndpointDestinationConfiguration `type:"structure"` + // When a Kinesis data stream is used as the source for the delivery stream, // a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon // Resource Name (ARN) and the role ARN for the source stream. @@ -1697,6 +1702,11 @@ func (s *CreateDeliveryStreamInput) Validate() error { invalidParams.AddNested("ExtendedS3DestinationConfiguration", err.(request.ErrInvalidParams)) } } + if s.HttpEndpointDestinationConfiguration != nil { + if err := s.HttpEndpointDestinationConfiguration.Validate(); err != nil { + invalidParams.AddNested("HttpEndpointDestinationConfiguration", err.(request.ErrInvalidParams)) + } + } if s.KinesisStreamSourceConfiguration != nil { if err := s.KinesisStreamSourceConfiguration.Validate(); err != nil { invalidParams.AddNested("KinesisStreamSourceConfiguration", err.(request.ErrInvalidParams)) @@ -1764,6 +1774,12 @@ func (s *CreateDeliveryStreamInput) SetExtendedS3DestinationConfiguration(v *Ext return s } +// SetHttpEndpointDestinationConfiguration sets the HttpEndpointDestinationConfiguration field's value. +func (s *CreateDeliveryStreamInput) SetHttpEndpointDestinationConfiguration(v *HttpEndpointDestinationConfiguration) *CreateDeliveryStreamInput { + s.HttpEndpointDestinationConfiguration = v + return s +} + // SetKinesisStreamSourceConfiguration sets the KinesisStreamSourceConfiguration field's value. func (s *CreateDeliveryStreamInput) SetKinesisStreamSourceConfiguration(v *KinesisStreamSourceConfiguration) *CreateDeliveryStreamInput { s.KinesisStreamSourceConfiguration = v @@ -1831,14 +1847,17 @@ type DataFormatConversionConfiguration struct { Enabled *bool `type:"boolean"` // Specifies the deserializer that you want Kinesis Data Firehose to use to - // convert the format of your data from JSON. + // convert the format of your data from JSON. This parameter is required if + // Enabled is set to true. InputFormatConfiguration *InputFormatConfiguration `type:"structure"` // Specifies the serializer that you want Kinesis Data Firehose to use to convert - // the format of your data to the Parquet or ORC format. + // the format of your data to the Parquet or ORC format. This parameter is required + // if Enabled is set to true. OutputFormatConfiguration *OutputFormatConfiguration `type:"structure"` // Specifies the AWS Glue Data Catalog table that contains the column information. + // This parameter is required if Enabled is set to true. SchemaConfiguration *SchemaConfiguration `type:"structure"` } @@ -1860,6 +1879,11 @@ func (s *DataFormatConversionConfiguration) Validate() error { invalidParams.AddNested("OutputFormatConfiguration", err.(request.ErrInvalidParams)) } } + if s.SchemaConfiguration != nil { + if err := s.SchemaConfiguration.Validate(); err != nil { + invalidParams.AddNested("SchemaConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2179,8 +2203,8 @@ func (s *DeliveryStreamEncryptionConfiguration) SetStatus(v string) *DeliveryStr return s } -// Used to specify the type and Amazon Resource Name (ARN) of the CMK needed -// for Server-Side Encryption (SSE). +// Specifies the type and Amazon Resource Name (ARN) of the CMK to use for Server-Side +// Encryption (SSE). type DeliveryStreamEncryptionConfigurationInput struct { _ struct{} `type:"structure"` @@ -2200,8 +2224,17 @@ type DeliveryStreamEncryptionConfigurationInput struct { // manages that grant. // // When you invoke StartDeliveryStreamEncryption to change the CMK for a delivery - // stream that is already encrypted with a customer managed CMK, Kinesis Data - // Firehose schedules the grant it had on the old CMK for retirement. + // stream that is encrypted with a customer managed CMK, Kinesis Data Firehose + // schedules the grant it had on the old CMK for retirement. + // + // You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery + // streams. If a CreateDeliveryStream or StartDeliveryStreamEncryption operation + // exceeds this limit, Kinesis Data Firehose throws a LimitExceededException. + // + // To encrypt your delivery stream, use symmetric CMKs. Kinesis Data Firehose + // doesn't support asymmetric CMKs. For information about symmetric and asymmetric + // CMKs, see About Symmetric and Asymmetric CMKs (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html) + // in the AWS Key Management Service developer guide. // // KeyType is a required field KeyType *string `type:"string" required:"true" enum:"KeyType"` @@ -2397,6 +2430,9 @@ type DestinationDescription struct { // The destination in Amazon S3. ExtendedS3DestinationDescription *ExtendedS3DestinationDescription `type:"structure"` + // Describes the specified HTTP endpoint destination. + HttpEndpointDestinationDescription *HttpEndpointDestinationDescription `type:"structure"` + // The destination in Amazon Redshift. RedshiftDestinationDescription *RedshiftDestinationDescription `type:"structure"` @@ -2435,6 +2471,12 @@ func (s *DestinationDescription) SetExtendedS3DestinationDescription(v *Extended return s } +// SetHttpEndpointDestinationDescription sets the HttpEndpointDestinationDescription field's value. +func (s *DestinationDescription) SetHttpEndpointDestinationDescription(v *HttpEndpointDestinationDescription) *DestinationDescription { + s.HttpEndpointDestinationDescription = v + return s +} + // SetRedshiftDestinationDescription sets the RedshiftDestinationDescription field's value. func (s *DestinationDescription) SetRedshiftDestinationDescription(v *RedshiftDestinationDescription) *DestinationDescription { s.RedshiftDestinationDescription = v @@ -2567,6 +2609,8 @@ type ElasticsearchDestinationConfiguration struct { // with elasticsearch-failed/ appended to the prefix. For more information, // see Amazon S3 Backup for the Amazon ES Destination (https://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html#es-s3-backup). // Default value is FailedDocumentsOnly. + // + // You can't change this backup mode after you create the delivery stream. S3BackupMode *string `type:"string" enum:"ElasticsearchS3BackupMode"` // The configuration for the backup Amazon S3 location. @@ -2581,6 +2625,9 @@ type ElasticsearchDestinationConfiguration struct { // // For Elasticsearch 7.x, don't specify a TypeName. TypeName *string `type:"string"` + + // The details of the VPC of the Amazon ES destination. + VpcConfiguration *VpcConfiguration `type:"structure"` } // String returns the string representation @@ -2632,6 +2679,11 @@ func (s *ElasticsearchDestinationConfiguration) Validate() error { invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams)) } } + if s.VpcConfiguration != nil { + if err := s.VpcConfiguration.Validate(); err != nil { + invalidParams.AddNested("VpcConfiguration", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2711,6 +2763,12 @@ func (s *ElasticsearchDestinationConfiguration) SetTypeName(v string) *Elasticse return s } +// SetVpcConfiguration sets the VpcConfiguration field's value. +func (s *ElasticsearchDestinationConfiguration) SetVpcConfiguration(v *VpcConfiguration) *ElasticsearchDestinationConfiguration { + s.VpcConfiguration = v + return s +} + // The destination description in Amazon ES. type ElasticsearchDestinationDescription struct { _ struct{} `type:"structure"` @@ -2758,6 +2816,9 @@ type ElasticsearchDestinationDescription struct { // The Elasticsearch type name. This applies to Elasticsearch 6.x and lower // versions. For Elasticsearch 7.x, there's no value for TypeName. TypeName *string `type:"string"` + + // The details of the VPC of the Amazon ES destination. + VpcConfigurationDescription *VpcConfigurationDescription `type:"structure"` } // String returns the string representation @@ -2842,6 +2903,12 @@ func (s *ElasticsearchDestinationDescription) SetTypeName(v string) *Elasticsear return s } +// SetVpcConfigurationDescription sets the VpcConfigurationDescription field's value. +func (s *ElasticsearchDestinationDescription) SetVpcConfigurationDescription(v *VpcConfigurationDescription) *ElasticsearchDestinationDescription { + s.VpcConfigurationDescription = v + return s +} + // Describes an update for a destination in Amazon ES. type ElasticsearchDestinationUpdate struct { _ struct{} `type:"structure"` @@ -3143,7 +3210,9 @@ type ExtendedS3DestinationConfiguration struct { // The configuration for backup in Amazon S3. S3BackupConfiguration *S3DestinationConfiguration `type:"structure"` - // The Amazon S3 backup mode. + // The Amazon S3 backup mode. After you create a delivery stream, you can update + // it to enable Amazon S3 backup if it is disabled. If backup is enabled, you + // can't update the delivery stream to disable it. S3BackupMode *string `type:"string" enum:"S3BackupMode"` } @@ -3461,7 +3530,8 @@ type ExtendedS3DestinationUpdate struct { // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). RoleARN *string `min:"1" type:"string"` - // Enables or disables Amazon S3 backup mode. + // You can update a delivery stream to enable Amazon S3 backup if it is disabled. + // If backup is enabled, you can't update the delivery stream to disable it. S3BackupMode *string `type:"string" enum:"S3BackupMode"` // The Amazon S3 destination for backup. @@ -3600,7 +3670,7 @@ type FailureDescription struct { // A message providing details about the error that caused the failure. // // Details is a required field - Details *string `type:"string" required:"true"` + Details *string `min:"1" type:"string" required:"true"` // The type of error that caused the failure. // @@ -3613,59 +3683,764 @@ func (s FailureDescription) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s FailureDescription) GoString() string { - return s.String() +// GoString returns the string representation +func (s FailureDescription) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *FailureDescription) SetDetails(v string) *FailureDescription { + s.Details = &v + return s +} + +// SetType sets the Type field's value. +func (s *FailureDescription) SetType(v string) *FailureDescription { + s.Type = &v + return s +} + +// The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing +// data, which means converting it from the JSON format in preparation for serializing +// it to the Parquet or ORC format. This is one of two deserializers you can +// choose, depending on which one offers the functionality you need. The other +// option is the OpenX SerDe. +type HiveJsonSerDe struct { + _ struct{} `type:"structure"` + + // Indicates how you want Kinesis Data Firehose to parse the date and timestamps + // that may be present in your input data JSON. To specify these format strings, + // follow the pattern syntax of JodaTime's DateTimeFormat format strings. For + // more information, see Class DateTimeFormat (https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). + // You can also use the special value millis to parse timestamps in epoch milliseconds. + // If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf + // by default. + TimestampFormats []*string `type:"list"` +} + +// String returns the string representation +func (s HiveJsonSerDe) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HiveJsonSerDe) GoString() string { + return s.String() +} + +// SetTimestampFormats sets the TimestampFormats field's value. +func (s *HiveJsonSerDe) SetTimestampFormats(v []*string) *HiveJsonSerDe { + s.TimestampFormats = v + return s +} + +// Describes the buffering options that can be applied before data is delivered +// to the HTTP endpoint destination. Kinesis Data Firehose treats these options +// as hints, and it might choose to use more optimal values. The SizeInMBs and +// IntervalInSeconds parameters are optional. However, if specify a value for +// one of them, you must also provide a value for the other. +type HttpEndpointBufferingHints struct { + _ struct{} `type:"structure"` + + // Buffer incoming data for the specified period of time, in seconds, before + // delivering it to the destination. The default value is 300 (5 minutes). + IntervalInSeconds *int64 `min:"60" type:"integer"` + + // Buffer incoming data to the specified size, in MBs, before delivering it + // to the destination. The default value is 5. + // + // We recommend setting this parameter to a value greater than the amount of + // data you typically ingest into the delivery stream in 10 seconds. For example, + // if you typically ingest data at 1 MB/sec, the value should be 10 MB or higher. + SizeInMBs *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s HttpEndpointBufferingHints) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointBufferingHints) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpEndpointBufferingHints) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpEndpointBufferingHints"} + if s.IntervalInSeconds != nil && *s.IntervalInSeconds < 60 { + invalidParams.Add(request.NewErrParamMinValue("IntervalInSeconds", 60)) + } + if s.SizeInMBs != nil && *s.SizeInMBs < 1 { + invalidParams.Add(request.NewErrParamMinValue("SizeInMBs", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIntervalInSeconds sets the IntervalInSeconds field's value. +func (s *HttpEndpointBufferingHints) SetIntervalInSeconds(v int64) *HttpEndpointBufferingHints { + s.IntervalInSeconds = &v + return s +} + +// SetSizeInMBs sets the SizeInMBs field's value. +func (s *HttpEndpointBufferingHints) SetSizeInMBs(v int64) *HttpEndpointBufferingHints { + s.SizeInMBs = &v + return s +} + +// Describes the metadata that's delivered to the specified HTTP endpoint destination. +type HttpEndpointCommonAttribute struct { + _ struct{} `type:"structure"` + + // The name of the HTTP endpoint common attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The value of the HTTP endpoint common attribute. + // + // AttributeValue is a required field + AttributeValue *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s HttpEndpointCommonAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointCommonAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpEndpointCommonAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpEndpointCommonAttribute"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.AttributeValue == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *HttpEndpointCommonAttribute) SetAttributeName(v string) *HttpEndpointCommonAttribute { + s.AttributeName = &v + return s +} + +// SetAttributeValue sets the AttributeValue field's value. +func (s *HttpEndpointCommonAttribute) SetAttributeValue(v string) *HttpEndpointCommonAttribute { + s.AttributeValue = &v + return s +} + +// Describes the configuration of the HTTP endpoint to which Kinesis Firehose +// delivers data. +type HttpEndpointConfiguration struct { + _ struct{} `type:"structure"` + + // The access key required for Kinesis Firehose to authenticate with the HTTP + // endpoint selected as the destination. + AccessKey *string `type:"string" sensitive:"true"` + + // The name of the HTTP endpoint selected as the destination. + Name *string `min:"1" type:"string"` + + // The URL of the HTTP endpoint selected as the destination. + // + // Url is a required field + Url *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s HttpEndpointConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpEndpointConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpEndpointConfiguration"} + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Url == nil { + invalidParams.Add(request.NewErrParamRequired("Url")) + } + if s.Url != nil && len(*s.Url) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Url", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKey sets the AccessKey field's value. +func (s *HttpEndpointConfiguration) SetAccessKey(v string) *HttpEndpointConfiguration { + s.AccessKey = &v + return s +} + +// SetName sets the Name field's value. +func (s *HttpEndpointConfiguration) SetName(v string) *HttpEndpointConfiguration { + s.Name = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *HttpEndpointConfiguration) SetUrl(v string) *HttpEndpointConfiguration { + s.Url = &v + return s +} + +// Describes the HTTP endpoint selected as the destination. +type HttpEndpointDescription struct { + _ struct{} `type:"structure"` + + // The name of the HTTP endpoint selected as the destination. + Name *string `min:"1" type:"string"` + + // The URL of the HTTP endpoint selected as the destination. + Url *string `min:"1" type:"string" sensitive:"true"` +} + +// String returns the string representation +func (s HttpEndpointDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointDescription) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *HttpEndpointDescription) SetName(v string) *HttpEndpointDescription { + s.Name = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *HttpEndpointDescription) SetUrl(v string) *HttpEndpointDescription { + s.Url = &v + return s +} + +// Describes the configuration of the HTTP endpoint destination. +type HttpEndpointDestinationConfiguration struct { + _ struct{} `type:"structure"` + + // The buffering options that can be used before data is delivered to the specified + // destination. Kinesis Data Firehose treats these options as hints, and it + // might choose to use more optimal values. The SizeInMBs and IntervalInSeconds + // parameters are optional. However, if you specify a value for one of them, + // you must also provide a value for the other. + BufferingHints *HttpEndpointBufferingHints `type:"structure"` + + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The configuration of the HTTP endpoint selected as the destination. + // + // EndpointConfiguration is a required field + EndpointConfiguration *HttpEndpointConfiguration `type:"structure" required:"true"` + + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `type:"structure"` + + // The configuration of the requeste sent to the HTTP endpoint specified as + // the destination. + RequestConfiguration *HttpEndpointRequestConfiguration `type:"structure"` + + // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver + // data to the specified HTTP endpoint destination, or if it doesn't receive + // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + RetryOptions *HttpEndpointRetryOptions `type:"structure"` + + // Kinesis Data Firehose uses this IAM role for all the permissions that the + // delivery stream needs. + RoleARN *string `min:"1" type:"string"` + + // Describes the S3 bucket backup options for the data that Kinesis Data Firehose + // delivers to the HTTP endpoint destination. You can back up all documents + // (AllData) or only the documents that Kinesis Data Firehose could not deliver + // to the specified HTTP endpoint destination (FailedDataOnly). + S3BackupMode *string `type:"string" enum:"HttpEndpointS3BackupMode"` + + // Describes the configuration of a destination in Amazon S3. + // + // S3Configuration is a required field + S3Configuration *S3DestinationConfiguration `type:"structure" required:"true"` +} + +// String returns the string representation +func (s HttpEndpointDestinationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointDestinationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpEndpointDestinationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpEndpointDestinationConfiguration"} + if s.EndpointConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("EndpointConfiguration")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.S3Configuration == nil { + invalidParams.Add(request.NewErrParamRequired("S3Configuration")) + } + if s.BufferingHints != nil { + if err := s.BufferingHints.Validate(); err != nil { + invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) + } + } + if s.EndpointConfiguration != nil { + if err := s.EndpointConfiguration.Validate(); err != nil { + invalidParams.AddNested("EndpointConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.ProcessingConfiguration != nil { + if err := s.ProcessingConfiguration.Validate(); err != nil { + invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.RequestConfiguration != nil { + if err := s.RequestConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.S3Configuration != nil { + if err := s.S3Configuration.Validate(); err != nil { + invalidParams.AddNested("S3Configuration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBufferingHints sets the BufferingHints field's value. +func (s *HttpEndpointDestinationConfiguration) SetBufferingHints(v *HttpEndpointBufferingHints) *HttpEndpointDestinationConfiguration { + s.BufferingHints = v + return s +} + +// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value. +func (s *HttpEndpointDestinationConfiguration) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *HttpEndpointDestinationConfiguration { + s.CloudWatchLoggingOptions = v + return s +} + +// SetEndpointConfiguration sets the EndpointConfiguration field's value. +func (s *HttpEndpointDestinationConfiguration) SetEndpointConfiguration(v *HttpEndpointConfiguration) *HttpEndpointDestinationConfiguration { + s.EndpointConfiguration = v + return s +} + +// SetProcessingConfiguration sets the ProcessingConfiguration field's value. +func (s *HttpEndpointDestinationConfiguration) SetProcessingConfiguration(v *ProcessingConfiguration) *HttpEndpointDestinationConfiguration { + s.ProcessingConfiguration = v + return s +} + +// SetRequestConfiguration sets the RequestConfiguration field's value. +func (s *HttpEndpointDestinationConfiguration) SetRequestConfiguration(v *HttpEndpointRequestConfiguration) *HttpEndpointDestinationConfiguration { + s.RequestConfiguration = v + return s +} + +// SetRetryOptions sets the RetryOptions field's value. +func (s *HttpEndpointDestinationConfiguration) SetRetryOptions(v *HttpEndpointRetryOptions) *HttpEndpointDestinationConfiguration { + s.RetryOptions = v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *HttpEndpointDestinationConfiguration) SetRoleARN(v string) *HttpEndpointDestinationConfiguration { + s.RoleARN = &v + return s +} + +// SetS3BackupMode sets the S3BackupMode field's value. +func (s *HttpEndpointDestinationConfiguration) SetS3BackupMode(v string) *HttpEndpointDestinationConfiguration { + s.S3BackupMode = &v + return s +} + +// SetS3Configuration sets the S3Configuration field's value. +func (s *HttpEndpointDestinationConfiguration) SetS3Configuration(v *S3DestinationConfiguration) *HttpEndpointDestinationConfiguration { + s.S3Configuration = v + return s +} + +// Describes the HTTP endpoint destination. +type HttpEndpointDestinationDescription struct { + _ struct{} `type:"structure"` + + // Describes buffering options that can be applied to the data before it is + // delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats + // these options as hints, and it might choose to use more optimal values. The + // SizeInMBs and IntervalInSeconds parameters are optional. However, if specify + // a value for one of them, you must also provide a value for the other. + BufferingHints *HttpEndpointBufferingHints `type:"structure"` + + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // The configuration of the specified HTTP endpoint destination. + EndpointConfiguration *HttpEndpointDescription `type:"structure"` + + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `type:"structure"` + + // The configuration of request sent to the HTTP endpoint specified as the destination. + RequestConfiguration *HttpEndpointRequestConfiguration `type:"structure"` + + // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver + // data to the specified HTTP endpoint destination, or if it doesn't receive + // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + RetryOptions *HttpEndpointRetryOptions `type:"structure"` + + // Kinesis Data Firehose uses this IAM role for all the permissions that the + // delivery stream needs. + RoleARN *string `min:"1" type:"string"` + + // Describes the S3 bucket backup options for the data that Kinesis Firehose + // delivers to the HTTP endpoint destination. You can back up all documents + // (AllData) or only the documents that Kinesis Data Firehose could not deliver + // to the specified HTTP endpoint destination (FailedDataOnly). + S3BackupMode *string `type:"string" enum:"HttpEndpointS3BackupMode"` + + // Describes a destination in Amazon S3. + S3DestinationDescription *S3DestinationDescription `type:"structure"` +} + +// String returns the string representation +func (s HttpEndpointDestinationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointDestinationDescription) GoString() string { + return s.String() +} + +// SetBufferingHints sets the BufferingHints field's value. +func (s *HttpEndpointDestinationDescription) SetBufferingHints(v *HttpEndpointBufferingHints) *HttpEndpointDestinationDescription { + s.BufferingHints = v + return s +} + +// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value. +func (s *HttpEndpointDestinationDescription) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *HttpEndpointDestinationDescription { + s.CloudWatchLoggingOptions = v + return s +} + +// SetEndpointConfiguration sets the EndpointConfiguration field's value. +func (s *HttpEndpointDestinationDescription) SetEndpointConfiguration(v *HttpEndpointDescription) *HttpEndpointDestinationDescription { + s.EndpointConfiguration = v + return s +} + +// SetProcessingConfiguration sets the ProcessingConfiguration field's value. +func (s *HttpEndpointDestinationDescription) SetProcessingConfiguration(v *ProcessingConfiguration) *HttpEndpointDestinationDescription { + s.ProcessingConfiguration = v + return s +} + +// SetRequestConfiguration sets the RequestConfiguration field's value. +func (s *HttpEndpointDestinationDescription) SetRequestConfiguration(v *HttpEndpointRequestConfiguration) *HttpEndpointDestinationDescription { + s.RequestConfiguration = v + return s +} + +// SetRetryOptions sets the RetryOptions field's value. +func (s *HttpEndpointDestinationDescription) SetRetryOptions(v *HttpEndpointRetryOptions) *HttpEndpointDestinationDescription { + s.RetryOptions = v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *HttpEndpointDestinationDescription) SetRoleARN(v string) *HttpEndpointDestinationDescription { + s.RoleARN = &v + return s +} + +// SetS3BackupMode sets the S3BackupMode field's value. +func (s *HttpEndpointDestinationDescription) SetS3BackupMode(v string) *HttpEndpointDestinationDescription { + s.S3BackupMode = &v + return s +} + +// SetS3DestinationDescription sets the S3DestinationDescription field's value. +func (s *HttpEndpointDestinationDescription) SetS3DestinationDescription(v *S3DestinationDescription) *HttpEndpointDestinationDescription { + s.S3DestinationDescription = v + return s +} + +// Updates the specified HTTP endpoint destination. +type HttpEndpointDestinationUpdate struct { + _ struct{} `type:"structure"` + + // Describes buffering options that can be applied to the data before it is + // delivered to the HTTPS endpoint destination. Kinesis Data Firehose teats + // these options as hints, and it might choose to use more optimal values. The + // SizeInMBs and IntervalInSeconds parameters are optional. However, if specify + // a value for one of them, you must also provide a value for the other. + BufferingHints *HttpEndpointBufferingHints `type:"structure"` + + // Describes the Amazon CloudWatch logging options for your delivery stream. + CloudWatchLoggingOptions *CloudWatchLoggingOptions `type:"structure"` + + // Describes the configuration of the HTTP endpoint destination. + EndpointConfiguration *HttpEndpointConfiguration `type:"structure"` + + // Describes a data processing configuration. + ProcessingConfiguration *ProcessingConfiguration `type:"structure"` + + // The configuration of the request sent to the HTTP endpoint specified as the + // destination. + RequestConfiguration *HttpEndpointRequestConfiguration `type:"structure"` + + // Describes the retry behavior in case Kinesis Data Firehose is unable to deliver + // data to the specified HTTP endpoint destination, or if it doesn't receive + // a valid acknowledgment of receipt from the specified HTTP endpoint destination. + RetryOptions *HttpEndpointRetryOptions `type:"structure"` + + // Kinesis Data Firehose uses this IAM role for all the permissions that the + // delivery stream needs. + RoleARN *string `min:"1" type:"string"` + + // Describes the S3 bucket backup options for the data that Kinesis Firehose + // delivers to the HTTP endpoint destination. You can back up all documents + // (AllData) or only the documents that Kinesis Data Firehose could not deliver + // to the specified HTTP endpoint destination (FailedDataOnly). + S3BackupMode *string `type:"string" enum:"HttpEndpointS3BackupMode"` + + // Describes an update for a destination in Amazon S3. + S3Update *S3DestinationUpdate `type:"structure"` +} + +// String returns the string representation +func (s HttpEndpointDestinationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointDestinationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpEndpointDestinationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpEndpointDestinationUpdate"} + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.BufferingHints != nil { + if err := s.BufferingHints.Validate(); err != nil { + invalidParams.AddNested("BufferingHints", err.(request.ErrInvalidParams)) + } + } + if s.EndpointConfiguration != nil { + if err := s.EndpointConfiguration.Validate(); err != nil { + invalidParams.AddNested("EndpointConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.ProcessingConfiguration != nil { + if err := s.ProcessingConfiguration.Validate(); err != nil { + invalidParams.AddNested("ProcessingConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.RequestConfiguration != nil { + if err := s.RequestConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestConfiguration", err.(request.ErrInvalidParams)) + } + } + if s.S3Update != nil { + if err := s.S3Update.Validate(); err != nil { + invalidParams.AddNested("S3Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBufferingHints sets the BufferingHints field's value. +func (s *HttpEndpointDestinationUpdate) SetBufferingHints(v *HttpEndpointBufferingHints) *HttpEndpointDestinationUpdate { + s.BufferingHints = v + return s +} + +// SetCloudWatchLoggingOptions sets the CloudWatchLoggingOptions field's value. +func (s *HttpEndpointDestinationUpdate) SetCloudWatchLoggingOptions(v *CloudWatchLoggingOptions) *HttpEndpointDestinationUpdate { + s.CloudWatchLoggingOptions = v + return s +} + +// SetEndpointConfiguration sets the EndpointConfiguration field's value. +func (s *HttpEndpointDestinationUpdate) SetEndpointConfiguration(v *HttpEndpointConfiguration) *HttpEndpointDestinationUpdate { + s.EndpointConfiguration = v + return s +} + +// SetProcessingConfiguration sets the ProcessingConfiguration field's value. +func (s *HttpEndpointDestinationUpdate) SetProcessingConfiguration(v *ProcessingConfiguration) *HttpEndpointDestinationUpdate { + s.ProcessingConfiguration = v + return s +} + +// SetRequestConfiguration sets the RequestConfiguration field's value. +func (s *HttpEndpointDestinationUpdate) SetRequestConfiguration(v *HttpEndpointRequestConfiguration) *HttpEndpointDestinationUpdate { + s.RequestConfiguration = v + return s +} + +// SetRetryOptions sets the RetryOptions field's value. +func (s *HttpEndpointDestinationUpdate) SetRetryOptions(v *HttpEndpointRetryOptions) *HttpEndpointDestinationUpdate { + s.RetryOptions = v + return s +} + +// SetRoleARN sets the RoleARN field's value. +func (s *HttpEndpointDestinationUpdate) SetRoleARN(v string) *HttpEndpointDestinationUpdate { + s.RoleARN = &v + return s +} + +// SetS3BackupMode sets the S3BackupMode field's value. +func (s *HttpEndpointDestinationUpdate) SetS3BackupMode(v string) *HttpEndpointDestinationUpdate { + s.S3BackupMode = &v + return s +} + +// SetS3Update sets the S3Update field's value. +func (s *HttpEndpointDestinationUpdate) SetS3Update(v *S3DestinationUpdate) *HttpEndpointDestinationUpdate { + s.S3Update = v + return s +} + +// The configuration of the HTTP endpoint request. +type HttpEndpointRequestConfiguration struct { + _ struct{} `type:"structure"` + + // Describes the metadata sent to the HTTP endpoint destination. + CommonAttributes []*HttpEndpointCommonAttribute `type:"list"` + + // Kinesis Data Firehose uses the content encoding to compress the body of a + // request before sending the request to the destination. For more information, + // see Content-Encoding (https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) + // in MDN Web Docs, the official Mozilla documentation. + ContentEncoding *string `type:"string" enum:"ContentEncoding"` +} + +// String returns the string representation +func (s HttpEndpointRequestConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HttpEndpointRequestConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HttpEndpointRequestConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HttpEndpointRequestConfiguration"} + if s.CommonAttributes != nil { + for i, v := range s.CommonAttributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CommonAttributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetDetails sets the Details field's value. -func (s *FailureDescription) SetDetails(v string) *FailureDescription { - s.Details = &v +// SetCommonAttributes sets the CommonAttributes field's value. +func (s *HttpEndpointRequestConfiguration) SetCommonAttributes(v []*HttpEndpointCommonAttribute) *HttpEndpointRequestConfiguration { + s.CommonAttributes = v return s } -// SetType sets the Type field's value. -func (s *FailureDescription) SetType(v string) *FailureDescription { - s.Type = &v +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HttpEndpointRequestConfiguration) SetContentEncoding(v string) *HttpEndpointRequestConfiguration { + s.ContentEncoding = &v return s } -// The native Hive / HCatalog JsonSerDe. Used by Kinesis Data Firehose for deserializing -// data, which means converting it from the JSON format in preparation for serializing -// it to the Parquet or ORC format. This is one of two deserializers you can -// choose, depending on which one offers the functionality you need. The other -// option is the OpenX SerDe. -type HiveJsonSerDe struct { +// Describes the retry behavior in case Kinesis Data Firehose is unable to deliver +// data to the specified HTTP endpoint destination, or if it doesn't receive +// a valid acknowledgment of receipt from the specified HTTP endpoint destination. +type HttpEndpointRetryOptions struct { _ struct{} `type:"structure"` - // Indicates how you want Kinesis Data Firehose to parse the date and timestamps - // that may be present in your input data JSON. To specify these format strings, - // follow the pattern syntax of JodaTime's DateTimeFormat format strings. For - // more information, see Class DateTimeFormat (https://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html). - // You can also use the special value millis to parse timestamps in epoch milliseconds. - // If you don't specify a format, Kinesis Data Firehose uses java.sql.Timestamp::valueOf - // by default. - TimestampFormats []*string `type:"list"` + // The total amount of time that Kinesis Data Firehose spends on retries. This + // duration starts after the initial attempt to send data to the custom destination + // via HTTPS endpoint fails. It doesn't include the periods during which Kinesis + // Data Firehose waits for acknowledgment from the specified destination after + // each attempt. + DurationInSeconds *int64 `type:"integer"` } // String returns the string representation -func (s HiveJsonSerDe) String() string { +func (s HttpEndpointRetryOptions) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s HiveJsonSerDe) GoString() string { +func (s HttpEndpointRetryOptions) GoString() string { return s.String() } -// SetTimestampFormats sets the TimestampFormats field's value. -func (s *HiveJsonSerDe) SetTimestampFormats(v []*string) *HiveJsonSerDe { - s.TimestampFormats = v +// SetDurationInSeconds sets the DurationInSeconds field's value. +func (s *HttpEndpointRetryOptions) SetDurationInSeconds(v int64) *HttpEndpointRetryOptions { + s.DurationInSeconds = &v return s } // Specifies the deserializer you want to use to convert the format of the input -// data. +// data. This parameter is required if Enabled is set to true. type InputFormatConfiguration struct { _ struct{} `type:"structure"` @@ -3693,8 +4468,8 @@ func (s *InputFormatConfiguration) SetDeserializer(v *Deserializer) *InputFormat // The specified input parameter has a value that is not valid. type InvalidArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -3712,17 +4487,17 @@ func (s InvalidArgumentException) GoString() string { func newErrorInvalidArgumentException(v protocol.ResponseMetadata) error { return &InvalidArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgumentException) Code() string { +func (s *InvalidArgumentException) Code() string { return "InvalidArgumentException" } // Message returns the exception's message. -func (s InvalidArgumentException) Message() string { +func (s *InvalidArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3730,22 +4505,22 @@ func (s InvalidArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgumentException) OrigErr() error { +func (s *InvalidArgumentException) OrigErr() error { return nil } -func (s InvalidArgumentException) Error() string { +func (s *InvalidArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // Kinesis Data Firehose throws this exception when an attempt to put records @@ -3753,8 +4528,8 @@ func (s InvalidArgumentException) RequestID() string { // KMS service throws one of the following exception types: AccessDeniedException, // InvalidStateException, DisabledException, or NotFoundException. type InvalidKMSResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -3773,17 +4548,17 @@ func (s InvalidKMSResourceException) GoString() string { func newErrorInvalidKMSResourceException(v protocol.ResponseMetadata) error { return &InvalidKMSResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidKMSResourceException) Code() string { +func (s *InvalidKMSResourceException) Code() string { return "InvalidKMSResourceException" } // Message returns the exception's message. -func (s InvalidKMSResourceException) Message() string { +func (s *InvalidKMSResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3791,22 +4566,22 @@ func (s InvalidKMSResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidKMSResourceException) OrigErr() error { +func (s *InvalidKMSResourceException) OrigErr() error { return nil } -func (s InvalidKMSResourceException) Error() string { +func (s *InvalidKMSResourceException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidKMSResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidKMSResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidKMSResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidKMSResourceException) RequestID() string { + return s.RespMetadata.RequestID } // Describes an encryption key for a destination in Amazon S3. @@ -3964,8 +4739,8 @@ func (s *KinesisStreamSourceDescription) SetRoleARN(v string) *KinesisStreamSour // You have already reached the limit for a requested resource. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -3983,17 +4758,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4001,22 +4776,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListDeliveryStreamsInput struct { @@ -4438,7 +5213,8 @@ func (s *OrcSerDe) SetStripeSizeBytes(v int64) *OrcSerDe { } // Specifies the serializer that you want Kinesis Data Firehose to use to convert -// the format of your data before it writes it to Amazon S3. +// the format of your data before it writes it to Amazon S3. This parameter +// is required if Enabled is set to true. type OutputFormatConfiguration struct { _ struct{} `type:"structure"` @@ -5075,7 +5851,9 @@ type RedshiftDestinationConfiguration struct { // The configuration for backup in Amazon S3. S3BackupConfiguration *S3DestinationConfiguration `type:"structure"` - // The Amazon S3 backup mode. + // The Amazon S3 backup mode. After you create a delivery stream, you can update + // it to enable Amazon S3 backup if it is disabled. If backup is enabled, you + // can't update the delivery stream to disable it. S3BackupMode *string `type:"string" enum:"RedshiftS3BackupMode"` // The configuration for the intermediate Amazon S3 location from which Amazon @@ -5374,7 +6152,8 @@ type RedshiftDestinationUpdate struct { // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). RoleARN *string `min:"1" type:"string"` - // The Amazon S3 backup mode. + // You can update a delivery stream to enable Amazon S3 backup if it is disabled. + // If backup is enabled, you can't update the delivery stream to disable it. S3BackupMode *string `type:"string" enum:"RedshiftS3BackupMode"` // The Amazon S3 destination for backup. @@ -5540,8 +6319,8 @@ func (s *RedshiftRetryOptions) SetDurationInSeconds(v int64) *RedshiftRetryOptio // The resource is already in use and not available for this operation. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5559,17 +6338,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5577,28 +6356,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource could not be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5616,17 +6395,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5634,22 +6413,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the configuration of a destination in Amazon S3. @@ -6021,35 +6800,36 @@ func (s *S3DestinationUpdate) SetRoleARN(v string) *S3DestinationUpdate { } // Specifies the schema to which you want Kinesis Data Firehose to configure -// your data before it writes it to Amazon S3. +// your data before it writes it to Amazon S3. This parameter is required if +// Enabled is set to true. type SchemaConfiguration struct { _ struct{} `type:"structure"` // The ID of the AWS Glue Data Catalog. If you don't supply this, the AWS account // ID is used by default. - CatalogId *string `type:"string"` + CatalogId *string `min:"1" type:"string"` // Specifies the name of the AWS Glue database that contains the schema for // the output data. - DatabaseName *string `type:"string"` + DatabaseName *string `min:"1" type:"string"` // If you don't specify an AWS Region, the default is the current Region. - Region *string `type:"string"` + Region *string `min:"1" type:"string"` // The role that Kinesis Data Firehose can use to access AWS Glue. This role // must be in the same account you use for Kinesis Data Firehose. Cross-account // roles aren't allowed. - RoleARN *string `type:"string"` + RoleARN *string `min:"1" type:"string"` // Specifies the AWS Glue table that contains the column information that constitutes // your data schema. - TableName *string `type:"string"` + TableName *string `min:"1" type:"string"` // Specifies the table version for the output data schema. If you don't specify // this version ID, or if you set it to LATEST, Kinesis Data Firehose uses the // most recent version. This means that any updates to the table are automatically // picked up. - VersionId *string `type:"string"` + VersionId *string `min:"1" type:"string"` } // String returns the string representation @@ -6062,6 +6842,34 @@ func (s SchemaConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SchemaConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SchemaConfiguration"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Region != nil && len(*s.Region) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Region", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.VersionId != nil && len(*s.VersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetCatalogId sets the CatalogId field's value. func (s *SchemaConfiguration) SetCatalogId(v string) *SchemaConfiguration { s.CatalogId = &v @@ -6161,8 +6969,8 @@ func (s *Serializer) SetParquetSerDe(v *ParquetSerDe) *Serializer { // been exceeded. For more information about limits and how to request an increase, // see Amazon Kinesis Data Firehose Limits (https://docs.aws.amazon.com/firehose/latest/dev/limits.html). type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -6180,17 +6988,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6198,22 +7006,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Details about a Kinesis data stream used as the source for a Kinesis Data @@ -6278,11 +7086,14 @@ type SplunkDestinationConfiguration struct { // to Splunk, or if it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions `type:"structure"` - // Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, + // Defines how documents should be delivered to Amazon S3. When set to FailedEventsOnly, // Kinesis Data Firehose writes any data that could not be indexed to the configured - // Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers + // Amazon S3 destination. When set to AllEvents, Kinesis Data Firehose delivers // all incoming records to Amazon S3, and also writes failed documents to Amazon - // S3. Default value is FailedDocumentsOnly. + // S3. The default value is FailedEventsOnly. + // + // You can update this backup mode from FailedEventsOnly to AllEvents. You can't + // update it from AllEvents to FailedEventsOnly. S3BackupMode *string `type:"string" enum:"SplunkS3BackupMode"` // The configuration for the backup Amazon S3 location. @@ -6526,11 +7337,14 @@ type SplunkDestinationUpdate struct { // to Splunk or if it doesn't receive an acknowledgment of receipt from Splunk. RetryOptions *SplunkRetryOptions `type:"structure"` - // Defines how documents should be delivered to Amazon S3. When set to FailedDocumentsOnly, - // Kinesis Data Firehose writes any data that could not be indexed to the configured - // Amazon S3 destination. When set to AllDocuments, Kinesis Data Firehose delivers - // all incoming records to Amazon S3, and also writes failed documents to Amazon - // S3. Default value is FailedDocumentsOnly. + // Specifies how you want Kinesis Data Firehose to back up documents to Amazon + // S3. When set to FailedDocumentsOnly, Kinesis Data Firehose writes any data + // that could not be indexed to the configured Amazon S3 destination. When set + // to AllEvents, Kinesis Data Firehose delivers all incoming records to Amazon + // S3, and also writes failed documents to Amazon S3. The default value is FailedEventsOnly. + // + // You can update this backup mode from FailedEventsOnly to AllEvents. You can't + // update it from AllEvents to FailedEventsOnly. S3BackupMode *string `type:"string" enum:"SplunkS3BackupMode"` // Your update to the configuration of the backup Amazon S3 location. @@ -7017,6 +7831,9 @@ type UpdateDestinationInput struct { // Describes an update for a destination in Amazon S3. ExtendedS3DestinationUpdate *ExtendedS3DestinationUpdate `type:"structure"` + // Describes an update to the specified HTTP endpoint destination. + HttpEndpointDestinationUpdate *HttpEndpointDestinationUpdate `type:"structure"` + // Describes an update for a destination in Amazon Redshift. RedshiftDestinationUpdate *RedshiftDestinationUpdate `type:"structure"` @@ -7070,6 +7887,11 @@ func (s *UpdateDestinationInput) Validate() error { invalidParams.AddNested("ExtendedS3DestinationUpdate", err.(request.ErrInvalidParams)) } } + if s.HttpEndpointDestinationUpdate != nil { + if err := s.HttpEndpointDestinationUpdate.Validate(); err != nil { + invalidParams.AddNested("HttpEndpointDestinationUpdate", err.(request.ErrInvalidParams)) + } + } if s.RedshiftDestinationUpdate != nil { if err := s.RedshiftDestinationUpdate.Validate(); err != nil { invalidParams.AddNested("RedshiftDestinationUpdate", err.(request.ErrInvalidParams)) @@ -7122,6 +7944,12 @@ func (s *UpdateDestinationInput) SetExtendedS3DestinationUpdate(v *ExtendedS3Des return s } +// SetHttpEndpointDestinationUpdate sets the HttpEndpointDestinationUpdate field's value. +func (s *UpdateDestinationInput) SetHttpEndpointDestinationUpdate(v *HttpEndpointDestinationUpdate) *UpdateDestinationInput { + s.HttpEndpointDestinationUpdate = v + return s +} + // SetRedshiftDestinationUpdate sets the RedshiftDestinationUpdate field's value. func (s *UpdateDestinationInput) SetRedshiftDestinationUpdate(v *RedshiftDestinationUpdate) *UpdateDestinationInput { s.RedshiftDestinationUpdate = v @@ -7154,6 +7982,237 @@ func (s UpdateDestinationOutput) GoString() string { return s.String() } +// The details of the VPC of the Amazon ES destination. +type VpcConfiguration struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that you want the delivery stream to use to create + // endpoints in the destination VPC. You can use your existing Kinesis Data + // Firehose delivery role or you can specify a new role. In either case, make + // sure that the role trusts the Kinesis Data Firehose service principal and + // that it grants the following permissions: + // + // * ec2:DescribeVpcs + // + // * ec2:DescribeVpcAttribute + // + // * ec2:DescribeSubnets + // + // * ec2:DescribeSecurityGroups + // + // * ec2:DescribeNetworkInterfaces + // + // * ec2:CreateNetworkInterface + // + // * ec2:CreateNetworkInterfacePermission + // + // * ec2:DeleteNetworkInterface + // + // If you revoke these permissions after you create the delivery stream, Kinesis + // Data Firehose can't scale out by creating more ENIs when necessary. You might + // therefore see a degradation in performance. + // + // RoleARN is a required field + RoleARN *string `min:"1" type:"string" required:"true"` + + // The IDs of the security groups that you want Kinesis Data Firehose to use + // when it creates ENIs in the VPC of the Amazon ES destination. You can use + // the same security group that the Amazon ES domain uses or different ones. + // If you specify different security groups here, ensure that they allow outbound + // HTTPS traffic to the Amazon ES domain's security group. Also ensure that + // the Amazon ES domain's security group allows HTTPS traffic from the security + // groups specified here. If you use the same security group for both your delivery + // stream and the Amazon ES domain, make sure the security group inbound rule + // allows HTTPS traffic. For more information about security group rules, see + // Security group rules (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) + // in the Amazon VPC documentation. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `min:"1" type:"list" required:"true"` + + // The IDs of the subnets that you want Kinesis Data Firehose to use to create + // ENIs in the VPC of the Amazon ES destination. Make sure that the routing + // tables and inbound and outbound rules allow traffic to flow from the subnets + // whose IDs are specified here to the subnets that have the destination Amazon + // ES endpoints. Kinesis Data Firehose creates at least one ENI in each of the + // subnets that are specified here. Do not delete or modify these ENIs. + // + // The number of ENIs that Kinesis Data Firehose creates in the subnets specified + // here scales up and down automatically based on throughput. To enable Kinesis + // Data Firehose to scale up the number of ENIs to match throughput, ensure + // that you have sufficient quota. To help you calculate the quota you need, + // assume that Kinesis Data Firehose can create up to three ENIs for this delivery + // stream for each of the subnets specified here. For more information about + // ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // in the Amazon VPC Quotas topic. + // + // SubnetIds is a required field + SubnetIds []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s VpcConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VpcConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VpcConfiguration"} + if s.RoleARN == nil { + invalidParams.Add(request.NewErrParamRequired("RoleARN")) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.SecurityGroupIds == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) + } + if s.SecurityGroupIds != nil && len(s.SecurityGroupIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecurityGroupIds", 1)) + } + if s.SubnetIds == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetIds")) + } + if s.SubnetIds != nil && len(s.SubnetIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SubnetIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleARN sets the RoleARN field's value. +func (s *VpcConfiguration) SetRoleARN(v string) *VpcConfiguration { + s.RoleARN = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VpcConfiguration) SetSecurityGroupIds(v []*string) *VpcConfiguration { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *VpcConfiguration) SetSubnetIds(v []*string) *VpcConfiguration { + s.SubnetIds = v + return s +} + +// The details of the VPC of the Amazon ES destination. +type VpcConfigurationDescription struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role that the delivery stream uses to create endpoints + // in the destination VPC. You can use your existing Kinesis Data Firehose delivery + // role or you can specify a new role. In either case, make sure that the role + // trusts the Kinesis Data Firehose service principal and that it grants the + // following permissions: + // + // * ec2:DescribeVpcs + // + // * ec2:DescribeVpcAttribute + // + // * ec2:DescribeSubnets + // + // * ec2:DescribeSecurityGroups + // + // * ec2:DescribeNetworkInterfaces + // + // * ec2:CreateNetworkInterface + // + // * ec2:CreateNetworkInterfacePermission + // + // * ec2:DeleteNetworkInterface + // + // If you revoke these permissions after you create the delivery stream, Kinesis + // Data Firehose can't scale out by creating more ENIs when necessary. You might + // therefore see a degradation in performance. + // + // RoleARN is a required field + RoleARN *string `min:"1" type:"string" required:"true"` + + // The IDs of the security groups that Kinesis Data Firehose uses when it creates + // ENIs in the VPC of the Amazon ES destination. You can use the same security + // group that the Amazon ES domain uses or different ones. If you specify different + // security groups, ensure that they allow outbound HTTPS traffic to the Amazon + // ES domain's security group. Also ensure that the Amazon ES domain's security + // group allows HTTPS traffic from the security groups specified here. If you + // use the same security group for both your delivery stream and the Amazon + // ES domain, make sure the security group inbound rule allows HTTPS traffic. + // For more information about security group rules, see Security group rules + // (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#SecurityGroupRules) + // in the Amazon VPC documentation. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `min:"1" type:"list" required:"true"` + + // The IDs of the subnets that Kinesis Data Firehose uses to create ENIs in + // the VPC of the Amazon ES destination. Make sure that the routing tables and + // inbound and outbound rules allow traffic to flow from the subnets whose IDs + // are specified here to the subnets that have the destination Amazon ES endpoints. + // Kinesis Data Firehose creates at least one ENI in each of the subnets that + // are specified here. Do not delete or modify these ENIs. + // + // The number of ENIs that Kinesis Data Firehose creates in the subnets specified + // here scales up and down automatically based on throughput. To enable Kinesis + // Data Firehose to scale up the number of ENIs to match throughput, ensure + // that you have sufficient quota. To help you calculate the quota you need, + // assume that Kinesis Data Firehose can create up to three ENIs for this delivery + // stream for each of the subnets specified here. For more information about + // ENI quota, see Network Interfaces (https://docs.aws.amazon.com/vpc/latest/userguide/amazon-vpc-limits.html#vpc-limits-enis) + // in the Amazon VPC Quotas topic. + // + // SubnetIds is a required field + SubnetIds []*string `min:"1" type:"list" required:"true"` + + // The ID of the Amazon ES destination's VPC. + // + // VpcId is a required field + VpcId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s VpcConfigurationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcConfigurationDescription) GoString() string { + return s.String() +} + +// SetRoleARN sets the RoleARN field's value. +func (s *VpcConfigurationDescription) SetRoleARN(v string) *VpcConfigurationDescription { + s.RoleARN = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VpcConfigurationDescription) SetSecurityGroupIds(v []*string) *VpcConfigurationDescription { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *VpcConfigurationDescription) SetSubnetIds(v []*string) *VpcConfigurationDescription { + s.SubnetIds = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *VpcConfigurationDescription) SetVpcId(v string) *VpcConfigurationDescription { + s.VpcId = &v + return s +} + const ( // CompressionFormatUncompressed is a CompressionFormat enum value CompressionFormatUncompressed = "UNCOMPRESSED" @@ -7166,8 +8225,38 @@ const ( // CompressionFormatSnappy is a CompressionFormat enum value CompressionFormatSnappy = "Snappy" + + // CompressionFormatHadoopSnappy is a CompressionFormat enum value + CompressionFormatHadoopSnappy = "HADOOP_SNAPPY" +) + +// CompressionFormat_Values returns all elements of the CompressionFormat enum +func CompressionFormat_Values() []string { + return []string{ + CompressionFormatUncompressed, + CompressionFormatGzip, + CompressionFormatZip, + CompressionFormatSnappy, + CompressionFormatHadoopSnappy, + } +} + +const ( + // ContentEncodingNone is a ContentEncoding enum value + ContentEncodingNone = "NONE" + + // ContentEncodingGzip is a ContentEncoding enum value + ContentEncodingGzip = "GZIP" ) +// ContentEncoding_Values returns all elements of the ContentEncoding enum +func ContentEncoding_Values() []string { + return []string{ + ContentEncodingNone, + ContentEncodingGzip, + } +} + const ( // DeliveryStreamEncryptionStatusEnabled is a DeliveryStreamEncryptionStatus enum value DeliveryStreamEncryptionStatusEnabled = "ENABLED" @@ -7188,6 +8277,18 @@ const ( DeliveryStreamEncryptionStatusDisablingFailed = "DISABLING_FAILED" ) +// DeliveryStreamEncryptionStatus_Values returns all elements of the DeliveryStreamEncryptionStatus enum +func DeliveryStreamEncryptionStatus_Values() []string { + return []string{ + DeliveryStreamEncryptionStatusEnabled, + DeliveryStreamEncryptionStatusEnabling, + DeliveryStreamEncryptionStatusEnablingFailed, + DeliveryStreamEncryptionStatusDisabled, + DeliveryStreamEncryptionStatusDisabling, + DeliveryStreamEncryptionStatusDisablingFailed, + } +} + const ( // DeliveryStreamFailureTypeRetireKmsGrantFailed is a DeliveryStreamFailureType enum value DeliveryStreamFailureTypeRetireKmsGrantFailed = "RETIRE_KMS_GRANT_FAILED" @@ -7210,10 +8311,52 @@ const ( // DeliveryStreamFailureTypeKmsOptInRequired is a DeliveryStreamFailureType enum value DeliveryStreamFailureTypeKmsOptInRequired = "KMS_OPT_IN_REQUIRED" + // DeliveryStreamFailureTypeCreateEniFailed is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeCreateEniFailed = "CREATE_ENI_FAILED" + + // DeliveryStreamFailureTypeDeleteEniFailed is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeDeleteEniFailed = "DELETE_ENI_FAILED" + + // DeliveryStreamFailureTypeSubnetNotFound is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeSubnetNotFound = "SUBNET_NOT_FOUND" + + // DeliveryStreamFailureTypeSecurityGroupNotFound is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeSecurityGroupNotFound = "SECURITY_GROUP_NOT_FOUND" + + // DeliveryStreamFailureTypeEniAccessDenied is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeEniAccessDenied = "ENI_ACCESS_DENIED" + + // DeliveryStreamFailureTypeSubnetAccessDenied is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeSubnetAccessDenied = "SUBNET_ACCESS_DENIED" + + // DeliveryStreamFailureTypeSecurityGroupAccessDenied is a DeliveryStreamFailureType enum value + DeliveryStreamFailureTypeSecurityGroupAccessDenied = "SECURITY_GROUP_ACCESS_DENIED" + // DeliveryStreamFailureTypeUnknownError is a DeliveryStreamFailureType enum value DeliveryStreamFailureTypeUnknownError = "UNKNOWN_ERROR" ) +// DeliveryStreamFailureType_Values returns all elements of the DeliveryStreamFailureType enum +func DeliveryStreamFailureType_Values() []string { + return []string{ + DeliveryStreamFailureTypeRetireKmsGrantFailed, + DeliveryStreamFailureTypeCreateKmsGrantFailed, + DeliveryStreamFailureTypeKmsAccessDenied, + DeliveryStreamFailureTypeDisabledKmsKey, + DeliveryStreamFailureTypeInvalidKmsKey, + DeliveryStreamFailureTypeKmsKeyNotFound, + DeliveryStreamFailureTypeKmsOptInRequired, + DeliveryStreamFailureTypeCreateEniFailed, + DeliveryStreamFailureTypeDeleteEniFailed, + DeliveryStreamFailureTypeSubnetNotFound, + DeliveryStreamFailureTypeSecurityGroupNotFound, + DeliveryStreamFailureTypeEniAccessDenied, + DeliveryStreamFailureTypeSubnetAccessDenied, + DeliveryStreamFailureTypeSecurityGroupAccessDenied, + DeliveryStreamFailureTypeUnknownError, + } +} + const ( // DeliveryStreamStatusCreating is a DeliveryStreamStatus enum value DeliveryStreamStatusCreating = "CREATING" @@ -7231,6 +8374,17 @@ const ( DeliveryStreamStatusActive = "ACTIVE" ) +// DeliveryStreamStatus_Values returns all elements of the DeliveryStreamStatus enum +func DeliveryStreamStatus_Values() []string { + return []string{ + DeliveryStreamStatusCreating, + DeliveryStreamStatusCreatingFailed, + DeliveryStreamStatusDeleting, + DeliveryStreamStatusDeletingFailed, + DeliveryStreamStatusActive, + } +} + const ( // DeliveryStreamTypeDirectPut is a DeliveryStreamType enum value DeliveryStreamTypeDirectPut = "DirectPut" @@ -7239,6 +8393,14 @@ const ( DeliveryStreamTypeKinesisStreamAsSource = "KinesisStreamAsSource" ) +// DeliveryStreamType_Values returns all elements of the DeliveryStreamType enum +func DeliveryStreamType_Values() []string { + return []string{ + DeliveryStreamTypeDirectPut, + DeliveryStreamTypeKinesisStreamAsSource, + } +} + const ( // ElasticsearchIndexRotationPeriodNoRotation is a ElasticsearchIndexRotationPeriod enum value ElasticsearchIndexRotationPeriodNoRotation = "NoRotation" @@ -7256,6 +8418,17 @@ const ( ElasticsearchIndexRotationPeriodOneMonth = "OneMonth" ) +// ElasticsearchIndexRotationPeriod_Values returns all elements of the ElasticsearchIndexRotationPeriod enum +func ElasticsearchIndexRotationPeriod_Values() []string { + return []string{ + ElasticsearchIndexRotationPeriodNoRotation, + ElasticsearchIndexRotationPeriodOneHour, + ElasticsearchIndexRotationPeriodOneDay, + ElasticsearchIndexRotationPeriodOneWeek, + ElasticsearchIndexRotationPeriodOneMonth, + } +} + const ( // ElasticsearchS3BackupModeFailedDocumentsOnly is a ElasticsearchS3BackupMode enum value ElasticsearchS3BackupModeFailedDocumentsOnly = "FailedDocumentsOnly" @@ -7264,6 +8437,14 @@ const ( ElasticsearchS3BackupModeAllDocuments = "AllDocuments" ) +// ElasticsearchS3BackupMode_Values returns all elements of the ElasticsearchS3BackupMode enum +func ElasticsearchS3BackupMode_Values() []string { + return []string{ + ElasticsearchS3BackupModeFailedDocumentsOnly, + ElasticsearchS3BackupModeAllDocuments, + } +} + const ( // HECEndpointTypeRaw is a HECEndpointType enum value HECEndpointTypeRaw = "Raw" @@ -7272,6 +8453,30 @@ const ( HECEndpointTypeEvent = "Event" ) +// HECEndpointType_Values returns all elements of the HECEndpointType enum +func HECEndpointType_Values() []string { + return []string{ + HECEndpointTypeRaw, + HECEndpointTypeEvent, + } +} + +const ( + // HttpEndpointS3BackupModeFailedDataOnly is a HttpEndpointS3BackupMode enum value + HttpEndpointS3BackupModeFailedDataOnly = "FailedDataOnly" + + // HttpEndpointS3BackupModeAllData is a HttpEndpointS3BackupMode enum value + HttpEndpointS3BackupModeAllData = "AllData" +) + +// HttpEndpointS3BackupMode_Values returns all elements of the HttpEndpointS3BackupMode enum +func HttpEndpointS3BackupMode_Values() []string { + return []string{ + HttpEndpointS3BackupModeFailedDataOnly, + HttpEndpointS3BackupModeAllData, + } +} + const ( // KeyTypeAwsOwnedCmk is a KeyType enum value KeyTypeAwsOwnedCmk = "AWS_OWNED_CMK" @@ -7280,11 +8485,26 @@ const ( KeyTypeCustomerManagedCmk = "CUSTOMER_MANAGED_CMK" ) +// KeyType_Values returns all elements of the KeyType enum +func KeyType_Values() []string { + return []string{ + KeyTypeAwsOwnedCmk, + KeyTypeCustomerManagedCmk, + } +} + const ( // NoEncryptionConfigNoEncryption is a NoEncryptionConfig enum value NoEncryptionConfigNoEncryption = "NoEncryption" ) +// NoEncryptionConfig_Values returns all elements of the NoEncryptionConfig enum +func NoEncryptionConfig_Values() []string { + return []string{ + NoEncryptionConfigNoEncryption, + } +} + const ( // OrcCompressionNone is a OrcCompression enum value OrcCompressionNone = "NONE" @@ -7296,6 +8516,15 @@ const ( OrcCompressionSnappy = "SNAPPY" ) +// OrcCompression_Values returns all elements of the OrcCompression enum +func OrcCompression_Values() []string { + return []string{ + OrcCompressionNone, + OrcCompressionZlib, + OrcCompressionSnappy, + } +} + const ( // OrcFormatVersionV011 is a OrcFormatVersion enum value OrcFormatVersionV011 = "V0_11" @@ -7304,6 +8533,14 @@ const ( OrcFormatVersionV012 = "V0_12" ) +// OrcFormatVersion_Values returns all elements of the OrcFormatVersion enum +func OrcFormatVersion_Values() []string { + return []string{ + OrcFormatVersionV011, + OrcFormatVersionV012, + } +} + const ( // ParquetCompressionUncompressed is a ParquetCompression enum value ParquetCompressionUncompressed = "UNCOMPRESSED" @@ -7315,6 +8552,15 @@ const ( ParquetCompressionSnappy = "SNAPPY" ) +// ParquetCompression_Values returns all elements of the ParquetCompression enum +func ParquetCompression_Values() []string { + return []string{ + ParquetCompressionUncompressed, + ParquetCompressionGzip, + ParquetCompressionSnappy, + } +} + const ( // ParquetWriterVersionV1 is a ParquetWriterVersion enum value ParquetWriterVersionV1 = "V1" @@ -7323,6 +8569,14 @@ const ( ParquetWriterVersionV2 = "V2" ) +// ParquetWriterVersion_Values returns all elements of the ParquetWriterVersion enum +func ParquetWriterVersion_Values() []string { + return []string{ + ParquetWriterVersionV1, + ParquetWriterVersionV2, + } +} + const ( // ProcessorParameterNameLambdaArn is a ProcessorParameterName enum value ProcessorParameterNameLambdaArn = "LambdaArn" @@ -7340,11 +8594,29 @@ const ( ProcessorParameterNameBufferIntervalInSeconds = "BufferIntervalInSeconds" ) +// ProcessorParameterName_Values returns all elements of the ProcessorParameterName enum +func ProcessorParameterName_Values() []string { + return []string{ + ProcessorParameterNameLambdaArn, + ProcessorParameterNameNumberOfRetries, + ProcessorParameterNameRoleArn, + ProcessorParameterNameBufferSizeInMbs, + ProcessorParameterNameBufferIntervalInSeconds, + } +} + const ( // ProcessorTypeLambda is a ProcessorType enum value ProcessorTypeLambda = "Lambda" ) +// ProcessorType_Values returns all elements of the ProcessorType enum +func ProcessorType_Values() []string { + return []string{ + ProcessorTypeLambda, + } +} + const ( // RedshiftS3BackupModeDisabled is a RedshiftS3BackupMode enum value RedshiftS3BackupModeDisabled = "Disabled" @@ -7353,6 +8625,14 @@ const ( RedshiftS3BackupModeEnabled = "Enabled" ) +// RedshiftS3BackupMode_Values returns all elements of the RedshiftS3BackupMode enum +func RedshiftS3BackupMode_Values() []string { + return []string{ + RedshiftS3BackupModeDisabled, + RedshiftS3BackupModeEnabled, + } +} + const ( // S3BackupModeDisabled is a S3BackupMode enum value S3BackupModeDisabled = "Disabled" @@ -7361,6 +8641,14 @@ const ( S3BackupModeEnabled = "Enabled" ) +// S3BackupMode_Values returns all elements of the S3BackupMode enum +func S3BackupMode_Values() []string { + return []string{ + S3BackupModeDisabled, + S3BackupModeEnabled, + } +} + const ( // SplunkS3BackupModeFailedEventsOnly is a SplunkS3BackupMode enum value SplunkS3BackupModeFailedEventsOnly = "FailedEventsOnly" @@ -7368,3 +8656,11 @@ const ( // SplunkS3BackupModeAllEvents is a SplunkS3BackupMode enum value SplunkS3BackupModeAllEvents = "AllEvents" ) + +// SplunkS3BackupMode_Values returns all elements of the SplunkS3BackupMode enum +func SplunkS3BackupMode_Values() []string { + return []string{ + SplunkS3BackupModeFailedEventsOnly, + SplunkS3BackupModeAllEvents, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go index d2acfb9de..d279f87a6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/fms/api.go b/vendor/github.com/aws/aws-sdk-go/service/fms/api.go index 52cabde0c..7c4013863 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fms/api.go @@ -76,9 +76,12 @@ func (c *FMS) AssociateAdminAccountRequest(input *AssociateAdminAccountInput) (r // // Returned Error Types: // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InvalidInputException // The parameters of the request were invalid. @@ -112,6 +115,98 @@ func (c *FMS) AssociateAdminAccountWithContext(ctx aws.Context, input *Associate return out, req.Send() } +const opDeleteAppsList = "DeleteAppsList" + +// DeleteAppsListRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAppsList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAppsList for more information on using the DeleteAppsList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAppsListRequest method. +// req, resp := client.DeleteAppsListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/DeleteAppsList +func (c *FMS) DeleteAppsListRequest(input *DeleteAppsListInput) (req *request.Request, output *DeleteAppsListOutput) { + op := &request.Operation{ + Name: opDeleteAppsList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAppsListInput{} + } + + output = &DeleteAppsListOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAppsList API operation for Firewall Management Service. +// +// Permanently deletes an AWS Firewall Manager applications list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation DeleteAppsList for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/DeleteAppsList +func (c *FMS) DeleteAppsList(input *DeleteAppsListInput) (*DeleteAppsListOutput, error) { + req, out := c.DeleteAppsListRequest(input) + return out, req.Send() +} + +// DeleteAppsListWithContext is the same as DeleteAppsList with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAppsList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) DeleteAppsListWithContext(ctx aws.Context, input *DeleteAppsListInput, opts ...request.Option) (*DeleteAppsListOutput, error) { + req, out := c.DeleteAppsListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteNotificationChannel = "DeleteNotificationChannel" // DeleteNotificationChannelRequest generates a "aws/request.Request" representing the @@ -173,9 +268,12 @@ func (c *FMS) DeleteNotificationChannelRequest(input *DeleteNotificationChannelI // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -262,9 +360,12 @@ func (c *FMS) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Reques // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -292,6 +393,98 @@ func (c *FMS) DeletePolicyWithContext(ctx aws.Context, input *DeletePolicyInput, return out, req.Send() } +const opDeleteProtocolsList = "DeleteProtocolsList" + +// DeleteProtocolsListRequest generates a "aws/request.Request" representing the +// client's request for the DeleteProtocolsList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteProtocolsList for more information on using the DeleteProtocolsList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteProtocolsListRequest method. +// req, resp := client.DeleteProtocolsListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/DeleteProtocolsList +func (c *FMS) DeleteProtocolsListRequest(input *DeleteProtocolsListInput) (req *request.Request, output *DeleteProtocolsListOutput) { + op := &request.Operation{ + Name: opDeleteProtocolsList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteProtocolsListInput{} + } + + output = &DeleteProtocolsListOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteProtocolsList API operation for Firewall Management Service. +// +// Permanently deletes an AWS Firewall Manager protocols list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation DeleteProtocolsList for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/DeleteProtocolsList +func (c *FMS) DeleteProtocolsList(input *DeleteProtocolsListInput) (*DeleteProtocolsListOutput, error) { + req, out := c.DeleteProtocolsListRequest(input) + return out, req.Send() +} + +// DeleteProtocolsListWithContext is the same as DeleteProtocolsList with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteProtocolsList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) DeleteProtocolsListWithContext(ctx aws.Context, input *DeleteProtocolsListInput, opts ...request.Option) (*DeleteProtocolsListOutput, error) { + req, out := c.DeleteProtocolsListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisassociateAdminAccount = "DisassociateAdminAccount" // DisassociateAdminAccountRequest generates a "aws/request.Request" representing the @@ -350,9 +543,12 @@ func (c *FMS) DisassociateAdminAccountRequest(input *DisassociateAdminAccountInp // // Returned Error Types: // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * ResourceNotFoundException // The specified resource was not found. @@ -439,9 +635,12 @@ func (c *FMS) GetAdminAccountRequest(input *GetAdminAccountInput) (req *request. // // Returned Error Types: // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * ResourceNotFoundException // The specified resource was not found. @@ -472,6 +671,98 @@ func (c *FMS) GetAdminAccountWithContext(ctx aws.Context, input *GetAdminAccount return out, req.Send() } +const opGetAppsList = "GetAppsList" + +// GetAppsListRequest generates a "aws/request.Request" representing the +// client's request for the GetAppsList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAppsList for more information on using the GetAppsList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAppsListRequest method. +// req, resp := client.GetAppsListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetAppsList +func (c *FMS) GetAppsListRequest(input *GetAppsListInput) (req *request.Request, output *GetAppsListOutput) { + op := &request.Operation{ + Name: opGetAppsList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAppsListInput{} + } + + output = &GetAppsListOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAppsList API operation for Firewall Management Service. +// +// Returns information about the specified AWS Firewall Manager applications +// list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation GetAppsList for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetAppsList +func (c *FMS) GetAppsList(input *GetAppsListInput) (*GetAppsListOutput, error) { + req, out := c.GetAppsListRequest(input) + return out, req.Send() +} + +// GetAppsListWithContext is the same as GetAppsList with the addition of +// the ability to pass a context and additional request options. +// +// See GetAppsList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) GetAppsListWithContext(ctx aws.Context, input *GetAppsListInput, opts ...request.Option) (*GetAppsListOutput, error) { + req, out := c.GetAppsListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetComplianceDetail = "GetComplianceDetail" // GetComplianceDetailRequest generates a "aws/request.Request" representing the @@ -539,6 +830,17 @@ func (c *FMS) GetComplianceDetailRequest(input *GetComplianceDetailInput) (req * // The operation failed because of a system problem, even though the request // was valid. Retry your request. // +// * InvalidInputException +// The parameters of the request were invalid. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetComplianceDetail func (c *FMS) GetComplianceDetail(input *GetComplianceDetailInput) (*GetComplianceDetailOutput, error) { req, out := c.GetComplianceDetailRequest(input) @@ -620,9 +922,12 @@ func (c *FMS) GetNotificationChannelRequest(input *GetNotificationChannelInput) // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -708,9 +1013,12 @@ func (c *FMS) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, out // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -829,120 +1137,395 @@ func (c *FMS) GetProtectionStatusWithContext(ctx aws.Context, input *GetProtecti return out, req.Send() } -const opListComplianceStatus = "ListComplianceStatus" +const opGetProtocolsList = "GetProtocolsList" -// ListComplianceStatusRequest generates a "aws/request.Request" representing the -// client's request for the ListComplianceStatus operation. The "output" return +// GetProtocolsListRequest generates a "aws/request.Request" representing the +// client's request for the GetProtocolsList operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListComplianceStatus for more information on using the ListComplianceStatus +// See GetProtocolsList for more information on using the GetProtocolsList // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListComplianceStatusRequest method. -// req, resp := client.ListComplianceStatusRequest(params) +// // Example sending a request using the GetProtocolsListRequest method. +// req, resp := client.GetProtocolsListRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListComplianceStatus -func (c *FMS) ListComplianceStatusRequest(input *ListComplianceStatusInput) (req *request.Request, output *ListComplianceStatusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetProtocolsList +func (c *FMS) GetProtocolsListRequest(input *GetProtocolsListInput) (req *request.Request, output *GetProtocolsListOutput) { op := &request.Operation{ - Name: opListComplianceStatus, + Name: opGetProtocolsList, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListComplianceStatusInput{} + input = &GetProtocolsListInput{} } - output = &ListComplianceStatusOutput{} + output = &GetProtocolsListOutput{} req = c.newRequest(op, input, output) return } -// ListComplianceStatus API operation for Firewall Management Service. +// GetProtocolsList API operation for Firewall Management Service. // -// Returns an array of PolicyComplianceStatus objects in the response. Use PolicyComplianceStatus -// to get a summary of which member accounts are protected by the specified -// policy. +// Returns information about the specified AWS Firewall Manager protocols list. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Firewall Management Service's -// API operation ListComplianceStatus for usage and error information. +// API operation GetProtocolsList for usage and error information. // // Returned Error Types: // * ResourceNotFoundException // The specified resource was not found. // +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// // * InternalErrorException // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListComplianceStatus -func (c *FMS) ListComplianceStatus(input *ListComplianceStatusInput) (*ListComplianceStatusOutput, error) { - req, out := c.ListComplianceStatusRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetProtocolsList +func (c *FMS) GetProtocolsList(input *GetProtocolsListInput) (*GetProtocolsListOutput, error) { + req, out := c.GetProtocolsListRequest(input) return out, req.Send() } -// ListComplianceStatusWithContext is the same as ListComplianceStatus with the addition of +// GetProtocolsListWithContext is the same as GetProtocolsList with the addition of // the ability to pass a context and additional request options. // -// See ListComplianceStatus for details on how to use this API operation. +// See GetProtocolsList for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *FMS) ListComplianceStatusWithContext(ctx aws.Context, input *ListComplianceStatusInput, opts ...request.Option) (*ListComplianceStatusOutput, error) { - req, out := c.ListComplianceStatusRequest(input) +func (c *FMS) GetProtocolsListWithContext(ctx aws.Context, input *GetProtocolsListInput, opts ...request.Option) (*GetProtocolsListOutput, error) { + req, out := c.GetProtocolsListRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListComplianceStatusPages iterates over the pages of a ListComplianceStatus operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGetViolationDetails = "GetViolationDetails" + +// GetViolationDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetViolationDetails operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListComplianceStatus method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See GetViolationDetails for more information on using the GetViolationDetails +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListComplianceStatus operation. -// pageNum := 0 -// err := client.ListComplianceStatusPages(params, -// func(page *fms.ListComplianceStatusOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *FMS) ListComplianceStatusPages(input *ListComplianceStatusInput, fn func(*ListComplianceStatusOutput, bool) bool) error { - return c.ListComplianceStatusPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListComplianceStatusPagesWithContext same as ListComplianceStatusPages except -// it takes a Context and allows setting request options on the pages. +// +// // Example sending a request using the GetViolationDetailsRequest method. +// req, resp := client.GetViolationDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetViolationDetails +func (c *FMS) GetViolationDetailsRequest(input *GetViolationDetailsInput) (req *request.Request, output *GetViolationDetailsOutput) { + op := &request.Operation{ + Name: opGetViolationDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetViolationDetailsInput{} + } + + output = &GetViolationDetailsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetViolationDetails API operation for Firewall Management Service. +// +// Retrieves violations for a resource based on the specified AWS Firewall Manager +// policy and AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation GetViolationDetails for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidInputException +// The parameters of the request were invalid. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/GetViolationDetails +func (c *FMS) GetViolationDetails(input *GetViolationDetailsInput) (*GetViolationDetailsOutput, error) { + req, out := c.GetViolationDetailsRequest(input) + return out, req.Send() +} + +// GetViolationDetailsWithContext is the same as GetViolationDetails with the addition of +// the ability to pass a context and additional request options. +// +// See GetViolationDetails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) GetViolationDetailsWithContext(ctx aws.Context, input *GetViolationDetailsInput, opts ...request.Option) (*GetViolationDetailsOutput, error) { + req, out := c.GetViolationDetailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAppsLists = "ListAppsLists" + +// ListAppsListsRequest generates a "aws/request.Request" representing the +// client's request for the ListAppsLists operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAppsLists for more information on using the ListAppsLists +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAppsListsRequest method. +// req, resp := client.ListAppsListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListAppsLists +func (c *FMS) ListAppsListsRequest(input *ListAppsListsInput) (req *request.Request, output *ListAppsListsOutput) { + op := &request.Operation{ + Name: opListAppsLists, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListAppsListsInput{} + } + + output = &ListAppsListsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAppsLists API operation for Firewall Management Service. +// +// Returns an array of AppsListDataSummary objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation ListAppsLists for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * LimitExceededException +// The operation exceeds a resource limit, for example, the maximum number of +// policy objects that you can create for an AWS account. For more information, +// see Firewall Manager Limits (https://docs.aws.amazon.com/waf/latest/developerguide/fms-limits.html) +// in the AWS WAF Developer Guide. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListAppsLists +func (c *FMS) ListAppsLists(input *ListAppsListsInput) (*ListAppsListsOutput, error) { + req, out := c.ListAppsListsRequest(input) + return out, req.Send() +} + +// ListAppsListsWithContext is the same as ListAppsLists with the addition of +// the ability to pass a context and additional request options. +// +// See ListAppsLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) ListAppsListsWithContext(ctx aws.Context, input *ListAppsListsInput, opts ...request.Option) (*ListAppsListsOutput, error) { + req, out := c.ListAppsListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListComplianceStatus = "ListComplianceStatus" + +// ListComplianceStatusRequest generates a "aws/request.Request" representing the +// client's request for the ListComplianceStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListComplianceStatus for more information on using the ListComplianceStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListComplianceStatusRequest method. +// req, resp := client.ListComplianceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListComplianceStatus +func (c *FMS) ListComplianceStatusRequest(input *ListComplianceStatusInput) (req *request.Request, output *ListComplianceStatusOutput) { + op := &request.Operation{ + Name: opListComplianceStatus, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListComplianceStatusInput{} + } + + output = &ListComplianceStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListComplianceStatus API operation for Firewall Management Service. +// +// Returns an array of PolicyComplianceStatus objects. Use PolicyComplianceStatus +// to get a summary of which member accounts are protected by the specified +// policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation ListComplianceStatus for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListComplianceStatus +func (c *FMS) ListComplianceStatus(input *ListComplianceStatusInput) (*ListComplianceStatusOutput, error) { + req, out := c.ListComplianceStatusRequest(input) + return out, req.Send() +} + +// ListComplianceStatusWithContext is the same as ListComplianceStatus with the addition of +// the ability to pass a context and additional request options. +// +// See ListComplianceStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) ListComplianceStatusWithContext(ctx aws.Context, input *ListComplianceStatusInput, opts ...request.Option) (*ListComplianceStatusOutput, error) { + req, out := c.ListComplianceStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListComplianceStatusPages iterates over the pages of a ListComplianceStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListComplianceStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListComplianceStatus operation. +// pageNum := 0 +// err := client.ListComplianceStatusPages(params, +// func(page *fms.ListComplianceStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *FMS) ListComplianceStatusPages(input *ListComplianceStatusInput, fn func(*ListComplianceStatusOutput, bool) bool) error { + return c.ListComplianceStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListComplianceStatusPagesWithContext same as ListComplianceStatusPages except +// it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create @@ -1167,7 +1750,7 @@ func (c *FMS) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Reques // ListPolicies API operation for Firewall Management Service. // -// Returns an array of PolicySummary objects in the response. +// Returns an array of PolicySummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1181,9 +1764,12 @@ func (c *FMS) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Reques // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * LimitExceededException // The operation exceeds a resource limit, for example, the maximum number of @@ -1269,78 +1855,172 @@ func (c *FMS) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesI return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListProtocolsLists = "ListProtocolsLists" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListProtocolsListsRequest generates a "aws/request.Request" representing the +// client's request for the ListProtocolsLists operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListProtocolsLists for more information on using the ListProtocolsLists // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListProtocolsListsRequest method. +// req, resp := client.ListProtocolsListsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListTagsForResource -func (c *FMS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListProtocolsLists +func (c *FMS) ListProtocolsListsRequest(input *ListProtocolsListsInput) (req *request.Request, output *ListProtocolsListsOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opListProtocolsLists, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListProtocolsListsInput{} } - output = &ListTagsForResourceOutput{} + output = &ListProtocolsListsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Firewall Management Service. +// ListProtocolsLists API operation for Firewall Management Service. // -// Retrieves the list of tags for the specified AWS resource. +// Returns an array of ProtocolsListDataSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Firewall Management Service's -// API operation ListTagsForResource for usage and error information. +// API operation ListProtocolsLists for usage and error information. // // Returned Error Types: // * ResourceNotFoundException // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request // was valid. Retry your request. // -// * InvalidInputException -// The parameters of the request were invalid. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListTagsForResource -func (c *FMS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListProtocolsLists +func (c *FMS) ListProtocolsLists(input *ListProtocolsListsInput) (*ListProtocolsListsOutput, error) { + req, out := c.ListProtocolsListsRequest(input) + return out, req.Send() +} + +// ListProtocolsListsWithContext is the same as ListProtocolsLists with the addition of +// the ability to pass a context and additional request options. +// +// See ListProtocolsLists for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) ListProtocolsListsWithContext(ctx aws.Context, input *ListProtocolsListsInput, opts ...request.Option) (*ListProtocolsListsOutput, error) { + req, out := c.ListProtocolsListsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListTagsForResource +func (c *FMS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Firewall Management Service. +// +// Retrieves the list of tags for the specified AWS resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * InvalidInputException +// The parameters of the request were invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/ListTagsForResource +func (c *FMS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } @@ -1360,6 +2040,106 @@ func (c *FMS) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsFor return out, req.Send() } +const opPutAppsList = "PutAppsList" + +// PutAppsListRequest generates a "aws/request.Request" representing the +// client's request for the PutAppsList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAppsList for more information on using the PutAppsList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutAppsListRequest method. +// req, resp := client.PutAppsListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/PutAppsList +func (c *FMS) PutAppsListRequest(input *PutAppsListInput) (req *request.Request, output *PutAppsListOutput) { + op := &request.Operation{ + Name: opPutAppsList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAppsListInput{} + } + + output = &PutAppsListOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutAppsList API operation for Firewall Management Service. +// +// Creates an AWS Firewall Manager applications list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation PutAppsList for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * InvalidInputException +// The parameters of the request were invalid. +// +// * LimitExceededException +// The operation exceeds a resource limit, for example, the maximum number of +// policy objects that you can create for an AWS account. For more information, +// see Firewall Manager Limits (https://docs.aws.amazon.com/waf/latest/developerguide/fms-limits.html) +// in the AWS WAF Developer Guide. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/PutAppsList +func (c *FMS) PutAppsList(input *PutAppsListInput) (*PutAppsListOutput, error) { + req, out := c.PutAppsListRequest(input) + return out, req.Send() +} + +// PutAppsListWithContext is the same as PutAppsList with the addition of +// the ability to pass a context and additional request options. +// +// See PutAppsList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) PutAppsListWithContext(ctx aws.Context, input *PutAppsListInput, opts ...request.Option) (*PutAppsListOutput, error) { + req, out := c.PutAppsListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutNotificationChannel = "PutNotificationChannel" // PutNotificationChannelRequest generates a "aws/request.Request" representing the @@ -1420,9 +2200,12 @@ func (c *FMS) PutNotificationChannelRequest(input *PutNotificationChannelInput) // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -1501,15 +2284,18 @@ func (c *FMS) PutPolicyRequest(input *PutPolicyInput) (req *request.Request, out // * A Shield Advanced policy, which applies Shield Advanced protection to // specified accounts and resources // -// * An AWS WAF policy, which contains a rule group and defines which resources -// are to be protected by that rule group +// * An AWS WAF policy (type WAFV2), which defines rule groups to run first +// in the corresponding AWS WAF web ACL and rule groups to run last in the +// web ACL. +// +// * An AWS WAF Classic policy (type WAF), which defines a rule group. // // * A security group policy, which manages VPC security groups across your // AWS organization. // -// Each policy is specific to one of the three types. If you want to enforce -// more than one policy type across accounts, you can create multiple policies. -// You can create multiple policies for each type. +// Each policy is specific to one of the types. If you want to enforce more +// than one policy type across accounts, create multiple policies. You can create +// multiple policies for each type. // // You must be subscribed to Shield Advanced to create a Shield Advanced policy. // For more information about subscribing to Shield Advanced, see CreateSubscription @@ -1527,9 +2313,12 @@ func (c *FMS) PutPolicyRequest(input *PutPolicyInput) (req *request.Request, out // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InvalidInputException // The parameters of the request were invalid. @@ -1569,6 +2358,106 @@ func (c *FMS) PutPolicyWithContext(ctx aws.Context, input *PutPolicyInput, opts return out, req.Send() } +const opPutProtocolsList = "PutProtocolsList" + +// PutProtocolsListRequest generates a "aws/request.Request" representing the +// client's request for the PutProtocolsList operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutProtocolsList for more information on using the PutProtocolsList +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutProtocolsListRequest method. +// req, resp := client.PutProtocolsListRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/PutProtocolsList +func (c *FMS) PutProtocolsListRequest(input *PutProtocolsListInput) (req *request.Request, output *PutProtocolsListOutput) { + op := &request.Operation{ + Name: opPutProtocolsList, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutProtocolsListInput{} + } + + output = &PutProtocolsListOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutProtocolsList API operation for Firewall Management Service. +// +// Creates an AWS Firewall Manager protocols list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Firewall Management Service's +// API operation PutProtocolsList for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource was not found. +// +// * InvalidOperationException +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. +// +// * InvalidInputException +// The parameters of the request were invalid. +// +// * LimitExceededException +// The operation exceeds a resource limit, for example, the maximum number of +// policy objects that you can create for an AWS account. For more information, +// see Firewall Manager Limits (https://docs.aws.amazon.com/waf/latest/developerguide/fms-limits.html) +// in the AWS WAF Developer Guide. +// +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/fms-2018-01-01/PutProtocolsList +func (c *FMS) PutProtocolsList(input *PutProtocolsListInput) (*PutProtocolsListOutput, error) { + req, out := c.PutProtocolsListRequest(input) + return out, req.Send() +} + +// PutProtocolsListWithContext is the same as PutProtocolsList with the addition of +// the ability to pass a context and additional request options. +// +// See PutProtocolsList for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *FMS) PutProtocolsListWithContext(ctx aws.Context, input *PutProtocolsListInput, opts ...request.Option) (*PutProtocolsListOutput, error) { + req, out := c.PutProtocolsListRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -1628,9 +2517,12 @@ func (c *FMS) TagResourceRequest(input *TagResourceInput) (req *request.Request, // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -1726,9 +2618,12 @@ func (c *FMS) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // The specified resource was not found. // // * InvalidOperationException -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. // // * InternalErrorException // The operation failed because of a system problem, even though the request @@ -1759,6 +2654,250 @@ func (c *FMS) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInpu return out, req.Send() } +// An individual AWS Firewall Manager application. +type App struct { + _ struct{} `type:"structure"` + + // The application's name. + // + // AppName is a required field + AppName *string `min:"1" type:"string" required:"true"` + + // The application's port number, for example 80. + // + // Port is a required field + Port *int64 `type:"long" required:"true"` + + // The IP protocol name or number. The name can be one of tcp, udp, or icmp. + // For information on possible numbers, see Protocol Numbers (https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). + // + // Protocol is a required field + Protocol *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s App) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s App) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *App) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "App"} + if s.AppName == nil { + invalidParams.Add(request.NewErrParamRequired("AppName")) + } + if s.AppName != nil && len(*s.AppName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AppName", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.Protocol != nil && len(*s.Protocol) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Protocol", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppName sets the AppName field's value. +func (s *App) SetAppName(v string) *App { + s.AppName = &v + return s +} + +// SetPort sets the Port field's value. +func (s *App) SetPort(v int64) *App { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *App) SetProtocol(v string) *App { + s.Protocol = &v + return s +} + +// An AWS Firewall Manager applications list. +type AppsListData struct { + _ struct{} `type:"structure"` + + // An array of applications in the AWS Firewall Manager applications list. + // + // AppsList is a required field + AppsList []*App `type:"list" required:"true"` + + // The time that the AWS Firewall Manager applications list was created. + CreateTime *time.Time `type:"timestamp"` + + // The time that the AWS Firewall Manager applications list was last updated. + LastUpdateTime *time.Time `type:"timestamp"` + + // The ID of the AWS Firewall Manager applications list. + ListId *string `min:"36" type:"string"` + + // The name of the AWS Firewall Manager applications list. + // + // ListName is a required field + ListName *string `min:"1" type:"string" required:"true"` + + // A unique identifier for each update to the list. When you update the list, + // the update token must match the token of the current version of the application + // list. You can retrieve the update token by getting the list. + ListUpdateToken *string `min:"1" type:"string"` + + // A map of previous version numbers to their corresponding App object arrays. + PreviousAppsList map[string][]*App `type:"map"` +} + +// String returns the string representation +func (s AppsListData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AppsListData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AppsListData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AppsListData"} + if s.AppsList == nil { + invalidParams.Add(request.NewErrParamRequired("AppsList")) + } + if s.ListId != nil && len(*s.ListId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ListId", 36)) + } + if s.ListName == nil { + invalidParams.Add(request.NewErrParamRequired("ListName")) + } + if s.ListName != nil && len(*s.ListName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ListName", 1)) + } + if s.ListUpdateToken != nil && len(*s.ListUpdateToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ListUpdateToken", 1)) + } + if s.AppsList != nil { + for i, v := range s.AppsList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AppsList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppsList sets the AppsList field's value. +func (s *AppsListData) SetAppsList(v []*App) *AppsListData { + s.AppsList = v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *AppsListData) SetCreateTime(v time.Time) *AppsListData { + s.CreateTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *AppsListData) SetLastUpdateTime(v time.Time) *AppsListData { + s.LastUpdateTime = &v + return s +} + +// SetListId sets the ListId field's value. +func (s *AppsListData) SetListId(v string) *AppsListData { + s.ListId = &v + return s +} + +// SetListName sets the ListName field's value. +func (s *AppsListData) SetListName(v string) *AppsListData { + s.ListName = &v + return s +} + +// SetListUpdateToken sets the ListUpdateToken field's value. +func (s *AppsListData) SetListUpdateToken(v string) *AppsListData { + s.ListUpdateToken = &v + return s +} + +// SetPreviousAppsList sets the PreviousAppsList field's value. +func (s *AppsListData) SetPreviousAppsList(v map[string][]*App) *AppsListData { + s.PreviousAppsList = v + return s +} + +// Details of the AWS Firewall Manager applications list. +type AppsListDataSummary struct { + _ struct{} `type:"structure"` + + // An array of App objects in the AWS Firewall Manager applications list. + AppsList []*App `type:"list"` + + // The Amazon Resource Name (ARN) of the applications list. + ListArn *string `min:"1" type:"string"` + + // The ID of the applications list. + ListId *string `min:"36" type:"string"` + + // The name of the applications list. + ListName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AppsListDataSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AppsListDataSummary) GoString() string { + return s.String() +} + +// SetAppsList sets the AppsList field's value. +func (s *AppsListDataSummary) SetAppsList(v []*App) *AppsListDataSummary { + s.AppsList = v + return s +} + +// SetListArn sets the ListArn field's value. +func (s *AppsListDataSummary) SetListArn(v string) *AppsListDataSummary { + s.ListArn = &v + return s +} + +// SetListId sets the ListId field's value. +func (s *AppsListDataSummary) SetListId(v string) *AppsListDataSummary { + s.ListId = &v + return s +} + +// SetListName sets the ListName field's value. +func (s *AppsListDataSummary) SetListName(v string) *AppsListDataSummary { + s.ListName = &v + return s +} + type AssociateAdminAccountInput struct { _ struct{} `type:"structure"` @@ -1817,6 +2956,126 @@ func (s AssociateAdminAccountOutput) GoString() string { return s.String() } +// Violations for an EC2 instance resource. +type AwsEc2InstanceViolation struct { + _ struct{} `type:"structure"` + + // Violations for network interfaces associated with the EC2 instance. + AwsEc2NetworkInterfaceViolations []*AwsEc2NetworkInterfaceViolation `type:"list"` + + // The resource ID of the EC2 instance. + ViolationTarget *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2InstanceViolation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2InstanceViolation) GoString() string { + return s.String() +} + +// SetAwsEc2NetworkInterfaceViolations sets the AwsEc2NetworkInterfaceViolations field's value. +func (s *AwsEc2InstanceViolation) SetAwsEc2NetworkInterfaceViolations(v []*AwsEc2NetworkInterfaceViolation) *AwsEc2InstanceViolation { + s.AwsEc2NetworkInterfaceViolations = v + return s +} + +// SetViolationTarget sets the ViolationTarget field's value. +func (s *AwsEc2InstanceViolation) SetViolationTarget(v string) *AwsEc2InstanceViolation { + s.ViolationTarget = &v + return s +} + +// Violations for network interfaces associated with an EC2 instance. +type AwsEc2NetworkInterfaceViolation struct { + _ struct{} `type:"structure"` + + // List of security groups that violate the rules specified in the master security + // group of the AWS Firewall Manager policy. + ViolatingSecurityGroups []*string `type:"list"` + + // The resource ID of the network interface. + ViolationTarget *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2NetworkInterfaceViolation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2NetworkInterfaceViolation) GoString() string { + return s.String() +} + +// SetViolatingSecurityGroups sets the ViolatingSecurityGroups field's value. +func (s *AwsEc2NetworkInterfaceViolation) SetViolatingSecurityGroups(v []*string) *AwsEc2NetworkInterfaceViolation { + s.ViolatingSecurityGroups = v + return s +} + +// SetViolationTarget sets the ViolationTarget field's value. +func (s *AwsEc2NetworkInterfaceViolation) SetViolationTarget(v string) *AwsEc2NetworkInterfaceViolation { + s.ViolationTarget = &v + return s +} + +// Details of the rule violation in a security group when compared to the master +// security group of the AWS Firewall Manager policy. +type AwsVPCSecurityGroupViolation struct { + _ struct{} `type:"structure"` + + // List of rules specified in the security group of the AWS Firewall Manager + // policy that partially match the ViolationTarget rule. + PartialMatches []*PartialMatch `type:"list"` + + // Remediation options for the rule specified in the ViolationTarget. + PossibleSecurityGroupRemediationActions []*SecurityGroupRemediationAction `type:"list"` + + // The security group rule that is being evaluated. + ViolationTarget *string `type:"string"` + + // A description of the security group that violates the policy. + ViolationTargetDescription *string `type:"string"` +} + +// String returns the string representation +func (s AwsVPCSecurityGroupViolation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsVPCSecurityGroupViolation) GoString() string { + return s.String() +} + +// SetPartialMatches sets the PartialMatches field's value. +func (s *AwsVPCSecurityGroupViolation) SetPartialMatches(v []*PartialMatch) *AwsVPCSecurityGroupViolation { + s.PartialMatches = v + return s +} + +// SetPossibleSecurityGroupRemediationActions sets the PossibleSecurityGroupRemediationActions field's value. +func (s *AwsVPCSecurityGroupViolation) SetPossibleSecurityGroupRemediationActions(v []*SecurityGroupRemediationAction) *AwsVPCSecurityGroupViolation { + s.PossibleSecurityGroupRemediationActions = v + return s +} + +// SetViolationTarget sets the ViolationTarget field's value. +func (s *AwsVPCSecurityGroupViolation) SetViolationTarget(v string) *AwsVPCSecurityGroupViolation { + s.ViolationTarget = &v + return s +} + +// SetViolationTargetDescription sets the ViolationTargetDescription field's value. +func (s *AwsVPCSecurityGroupViolation) SetViolationTargetDescription(v string) *AwsVPCSecurityGroupViolation { + s.ViolationTargetDescription = &v + return s +} + // Details of the resource that is not protected by the policy. type ComplianceViolator struct { _ struct{} `type:"structure"` @@ -1861,6 +3120,62 @@ func (s *ComplianceViolator) SetViolationReason(v string) *ComplianceViolator { return s } +type DeleteAppsListInput struct { + _ struct{} `type:"structure"` + + // The ID of the applications list that you want to delete. You can retrieve + // this ID from PutAppsList, ListAppsLists, and GetAppsList. + // + // ListId is a required field + ListId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAppsListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppsListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAppsListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAppsListInput"} + if s.ListId == nil { + invalidParams.Add(request.NewErrParamRequired("ListId")) + } + if s.ListId != nil && len(*s.ListId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ListId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetListId sets the ListId field's value. +func (s *DeleteAppsListInput) SetListId(v string) *DeleteAppsListInput { + s.ListId = &v + return s +} + +type DeleteAppsListOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAppsListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAppsListOutput) GoString() string { + return s.String() +} + type DeleteNotificationChannelInput struct { _ struct{} `type:"structure"` } @@ -1919,8 +3234,8 @@ type DeletePolicyInput struct { // If you don't specify tags or accounts, all resources are in scope. DeleteAllPolicyResources *bool `type:"boolean"` - // The ID of the policy that you want to delete. PolicyId is returned by PutPolicy - // and by ListPolicies. + // The ID of the policy that you want to delete. You can retrieve this ID from + // PutPolicy and ListPolicies. // // PolicyId is a required field PolicyId *string `min:"36" type:"string" required:"true"` @@ -1964,17 +3279,73 @@ func (s *DeletePolicyInput) SetPolicyId(v string) *DeletePolicyInput { return s } -type DeletePolicyOutput struct { +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeleteProtocolsListInput struct { + _ struct{} `type:"structure"` + + // The ID of the protocols list that you want to delete. You can retrieve this + // ID from PutProtocolsList, ListProtocolsLists, and GetProtocolsLost. + // + // ListId is a required field + ListId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteProtocolsListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteProtocolsListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteProtocolsListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteProtocolsListInput"} + if s.ListId == nil { + invalidParams.Add(request.NewErrParamRequired("ListId")) + } + if s.ListId != nil && len(*s.ListId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ListId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetListId sets the ListId field's value. +func (s *DeleteProtocolsListInput) SetListId(v string) *DeleteProtocolsListInput { + s.ListId = &v + return s +} + +type DeleteProtocolsListOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeletePolicyOutput) String() string { +func (s DeleteProtocolsListOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePolicyOutput) GoString() string { +func (s DeleteProtocolsListOutput) GoString() string { return s.String() } @@ -2101,6 +3472,90 @@ func (s *GetAdminAccountOutput) SetRoleStatus(v string) *GetAdminAccountOutput { return s } +type GetAppsListInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the list to retrieve is a default list owned by AWS Firewall + // Manager. + DefaultList *bool `type:"boolean"` + + // The ID of the AWS Firewall Manager applications list that you want the details + // for. + // + // ListId is a required field + ListId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAppsListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAppsListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAppsListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAppsListInput"} + if s.ListId == nil { + invalidParams.Add(request.NewErrParamRequired("ListId")) + } + if s.ListId != nil && len(*s.ListId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ListId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultList sets the DefaultList field's value. +func (s *GetAppsListInput) SetDefaultList(v bool) *GetAppsListInput { + s.DefaultList = &v + return s +} + +// SetListId sets the ListId field's value. +func (s *GetAppsListInput) SetListId(v string) *GetAppsListInput { + s.ListId = &v + return s +} + +type GetAppsListOutput struct { + _ struct{} `type:"structure"` + + // Information about the specified AWS Firewall Manager applications list. + AppsList *AppsListData `type:"structure"` + + // The Amazon Resource Name (ARN) of the applications list. + AppsListArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetAppsListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAppsListOutput) GoString() string { + return s.String() +} + +// SetAppsList sets the AppsList field's value. +func (s *GetAppsListOutput) SetAppsList(v *AppsListData) *GetAppsListOutput { + s.AppsList = v + return s +} + +// SetAppsListArn sets the AppsListArn field's value. +func (s *GetAppsListOutput) SetAppsListArn(v string) *GetAppsListOutput { + s.AppsListArn = &v + return s +} + type GetComplianceDetailInput struct { _ struct{} `type:"structure"` @@ -2485,11 +3940,214 @@ func (s *GetProtectionStatusOutput) SetServiceType(v string) *GetProtectionStatu return s } +type GetProtocolsListInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the list to retrieve is a default list owned by AWS Firewall + // Manager. + DefaultList *bool `type:"boolean"` + + // The ID of the AWS Firewall Manager protocols list that you want the details + // for. + // + // ListId is a required field + ListId *string `min:"36" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetProtocolsListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProtocolsListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetProtocolsListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetProtocolsListInput"} + if s.ListId == nil { + invalidParams.Add(request.NewErrParamRequired("ListId")) + } + if s.ListId != nil && len(*s.ListId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ListId", 36)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultList sets the DefaultList field's value. +func (s *GetProtocolsListInput) SetDefaultList(v bool) *GetProtocolsListInput { + s.DefaultList = &v + return s +} + +// SetListId sets the ListId field's value. +func (s *GetProtocolsListInput) SetListId(v string) *GetProtocolsListInput { + s.ListId = &v + return s +} + +type GetProtocolsListOutput struct { + _ struct{} `type:"structure"` + + // Information about the specified AWS Firewall Manager protocols list. + ProtocolsList *ProtocolsListData `type:"structure"` + + // The Amazon Resource Name (ARN) of the specified protocols list. + ProtocolsListArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetProtocolsListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProtocolsListOutput) GoString() string { + return s.String() +} + +// SetProtocolsList sets the ProtocolsList field's value. +func (s *GetProtocolsListOutput) SetProtocolsList(v *ProtocolsListData) *GetProtocolsListOutput { + s.ProtocolsList = v + return s +} + +// SetProtocolsListArn sets the ProtocolsListArn field's value. +func (s *GetProtocolsListOutput) SetProtocolsListArn(v string) *GetProtocolsListOutput { + s.ProtocolsListArn = &v + return s +} + +type GetViolationDetailsInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID that you want the details for. + // + // MemberAccount is a required field + MemberAccount *string `min:"1" type:"string" required:"true"` + + // The ID of the AWS Firewall Manager policy that you want the details for. + // This currently only supports security group content audit policies. + // + // PolicyId is a required field + PolicyId *string `min:"36" type:"string" required:"true"` + + // The ID of the resource that has violations. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The resource type. This is in the format shown in the AWS Resource Types + // Reference (https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html). + // Supported resource types are: AWS::EC2::Instance, AWS::EC2::NetworkInterface, + // or AWS::EC2::SecurityGroup. + // + // ResourceType is a required field + ResourceType *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetViolationDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetViolationDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetViolationDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetViolationDetailsInput"} + if s.MemberAccount == nil { + invalidParams.Add(request.NewErrParamRequired("MemberAccount")) + } + if s.MemberAccount != nil && len(*s.MemberAccount) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MemberAccount", 1)) + } + if s.PolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyId")) + } + if s.PolicyId != nil && len(*s.PolicyId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("PolicyId", 36)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ResourceType == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceType")) + } + if s.ResourceType != nil && len(*s.ResourceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMemberAccount sets the MemberAccount field's value. +func (s *GetViolationDetailsInput) SetMemberAccount(v string) *GetViolationDetailsInput { + s.MemberAccount = &v + return s +} + +// SetPolicyId sets the PolicyId field's value. +func (s *GetViolationDetailsInput) SetPolicyId(v string) *GetViolationDetailsInput { + s.PolicyId = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *GetViolationDetailsInput) SetResourceId(v string) *GetViolationDetailsInput { + s.ResourceId = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *GetViolationDetailsInput) SetResourceType(v string) *GetViolationDetailsInput { + s.ResourceType = &v + return s +} + +type GetViolationDetailsOutput struct { + _ struct{} `type:"structure"` + + // Violation detail for a resource. + ViolationDetail *ViolationDetail `type:"structure"` +} + +// String returns the string representation +func (s GetViolationDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetViolationDetailsOutput) GoString() string { + return s.String() +} + +// SetViolationDetail sets the ViolationDetail field's value. +func (s *GetViolationDetailsOutput) SetViolationDetail(v *ViolationDetail) *GetViolationDetailsOutput { + s.ViolationDetail = v + return s +} + // The operation failed because of a system problem, even though the request // was valid. Retry your request. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2506,17 +4164,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2524,28 +4182,28 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The parameters of the request were invalid. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2562,17 +4220,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2580,30 +4238,33 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } -// The operation failed because there was nothing to do. For example, you might -// have submitted an AssociateAdminAccount request, but the account ID that -// you submitted was already set as the AWS Firewall Manager administrator. +// The operation failed because there was nothing to do or the operation wasn't +// possible. For example, you might have submitted an AssociateAdminAccount +// request for an account ID that was already set as the AWS Firewall Manager +// administrator. Or you might have tried to access a Region that's disabled +// by default, and that you need to enable for the Firewall Manager administrator +// account and for AWS Organizations before you can access it. type InvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2620,17 +4281,17 @@ func (s InvalidOperationException) GoString() string { func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { return &InvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOperationException) Code() string { +func (s *InvalidOperationException) Code() string { return "InvalidOperationException" } // Message returns the exception's message. -func (s InvalidOperationException) Message() string { +func (s *InvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2638,28 +4299,28 @@ func (s InvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOperationException) OrigErr() error { +func (s *InvalidOperationException) OrigErr() error { return nil } -func (s InvalidOperationException) Error() string { +func (s *InvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // The value of the Type parameter is invalid. type InvalidTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2676,17 +4337,17 @@ func (s InvalidTypeException) GoString() string { func newErrorInvalidTypeException(v protocol.ResponseMetadata) error { return &InvalidTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTypeException) Code() string { +func (s *InvalidTypeException) Code() string { return "InvalidTypeException" } // Message returns the exception's message. -func (s InvalidTypeException) Message() string { +func (s *InvalidTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2694,22 +4355,22 @@ func (s InvalidTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTypeException) OrigErr() error { +func (s *InvalidTypeException) OrigErr() error { return nil } -func (s InvalidTypeException) Error() string { +func (s *InvalidTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The operation exceeds a resource limit, for example, the maximum number of @@ -2717,8 +4378,8 @@ func (s InvalidTypeException) RequestID() string { // see Firewall Manager Limits (https://docs.aws.amazon.com/waf/latest/developerguide/fms-limits.html) // in the AWS WAF Developer Guide. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2735,17 +4396,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2753,22 +4414,129 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } -// Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAppsListsInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the lists to retrieve are default lists owned by AWS Firewall + // Manager. + DefaultLists *bool `type:"boolean"` + + // The maximum number of objects that you want AWS Firewall Manager to return + // for this request. If more objects are available, in the response, AWS Firewall + // Manager provides a NextToken value that you can use in a subsequent call + // to get the next batch of objects. + // + // If you don't specify this, AWS Firewall Manager returns all available objects. + // + // MaxResults is a required field + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for MaxResults in your list request, and you have + // more objects than the maximum, AWS Firewall Manager returns this token in + // the response. For all but the first request, you provide the token returned + // by the prior request in the request parameters, to retrieve the next batch + // of objects. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAppsListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAppsListsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAppsListsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAppsListsInput"} + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultLists sets the DefaultLists field's value. +func (s *ListAppsListsInput) SetDefaultLists(v bool) *ListAppsListsInput { + s.DefaultLists = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAppsListsInput) SetMaxResults(v int64) *ListAppsListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAppsListsInput) SetNextToken(v string) *ListAppsListsInput { + s.NextToken = &v + return s +} + +type ListAppsListsOutput struct { + _ struct{} `type:"structure"` + + // An array of AppsListDataSummary objects. + AppsLists []*AppsListDataSummary `type:"list"` + + // If you specify a value for MaxResults in your list request, and you have + // more objects than the maximum, AWS Firewall Manager returns this token in + // the response. You can use this token in subsequent requests to retrieve the + // next batch of objects. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAppsListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAppsListsOutput) GoString() string { + return s.String() +} + +// SetAppsLists sets the AppsLists field's value. +func (s *ListAppsListsOutput) SetAppsLists(v []*AppsListDataSummary) *ListAppsListsOutput { + s.AppsLists = v + return s } -// RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +// SetNextToken sets the NextToken field's value. +func (s *ListAppsListsOutput) SetNextToken(v string) *ListAppsListsOutput { + s.NextToken = &v + return s } type ListComplianceStatusInput struct { @@ -3064,12 +4832,119 @@ func (s *ListPoliciesOutput) SetPolicyList(v []*PolicySummary) *ListPoliciesOutp return s } +type ListProtocolsListsInput struct { + _ struct{} `type:"structure"` + + // Specifies whether the lists to retrieve are default lists owned by AWS Firewall + // Manager. + DefaultLists *bool `type:"boolean"` + + // The maximum number of objects that you want AWS Firewall Manager to return + // for this request. If more objects are available, in the response, AWS Firewall + // Manager provides a NextToken value that you can use in a subsequent call + // to get the next batch of objects. + // + // If you don't specify this, AWS Firewall Manager returns all available objects. + // + // MaxResults is a required field + MaxResults *int64 `min:"1" type:"integer" required:"true"` + + // If you specify a value for MaxResults in your list request, and you have + // more objects than the maximum, AWS Firewall Manager returns this token in + // the response. For all but the first request, you provide the token returned + // by the prior request in the request parameters, to retrieve the next batch + // of objects. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListProtocolsListsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProtocolsListsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListProtocolsListsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListProtocolsListsInput"} + if s.MaxResults == nil { + invalidParams.Add(request.NewErrParamRequired("MaxResults")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDefaultLists sets the DefaultLists field's value. +func (s *ListProtocolsListsInput) SetDefaultLists(v bool) *ListProtocolsListsInput { + s.DefaultLists = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListProtocolsListsInput) SetMaxResults(v int64) *ListProtocolsListsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProtocolsListsInput) SetNextToken(v string) *ListProtocolsListsInput { + s.NextToken = &v + return s +} + +type ListProtocolsListsOutput struct { + _ struct{} `type:"structure"` + + // If you specify a value for MaxResults in your list request, and you have + // more objects than the maximum, AWS Firewall Manager returns this token in + // the response. You can use this token in subsequent requests to retrieve the + // next batch of objects. + NextToken *string `min:"1" type:"string"` + + // An array of ProtocolsListDataSummary objects. + ProtocolsLists []*ProtocolsListDataSummary `type:"list"` +} + +// String returns the string representation +func (s ListProtocolsListsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListProtocolsListsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListProtocolsListsOutput) SetNextToken(v string) *ListProtocolsListsOutput { + s.NextToken = &v + return s +} + +// SetProtocolsLists sets the ProtocolsLists field's value. +func (s *ListProtocolsListsOutput) SetProtocolsLists(v []*ProtocolsListDataSummary) *ListProtocolsListsOutput { + s.ProtocolsLists = v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource to return tags for. The Firewall - // Manager policy is the only AWS resource that supports tagging, so this ARN - // is a policy ARN.. + // The Amazon Resource Name (ARN) of the resource to return tags for. The AWS + // Firewall Manager resources that support tagging are policies, applications + // lists, and protocols lists. // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -3130,17 +5005,67 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut return s } +// The reference rule that partially matches the ViolationTarget rule and violation +// reason. +type PartialMatch struct { + _ struct{} `type:"structure"` + + // The reference rule from the master security group of the AWS Firewall Manager + // policy. + Reference *string `type:"string"` + + // The violation reason. + TargetViolationReasons []*string `type:"list"` +} + +// String returns the string representation +func (s PartialMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartialMatch) GoString() string { + return s.String() +} + +// SetReference sets the Reference field's value. +func (s *PartialMatch) SetReference(v string) *PartialMatch { + s.Reference = &v + return s +} + +// SetTargetViolationReasons sets the TargetViolationReasons field's value. +func (s *PartialMatch) SetTargetViolationReasons(v []*string) *PartialMatch { + s.TargetViolationReasons = v + return s +} + // An AWS Firewall Manager policy. type Policy struct { _ struct{} `type:"structure"` - // Specifies the AWS account IDs to exclude from the policy. The IncludeMap - // values are evaluated first, with all the appropriate account IDs added to - // the policy. Then the accounts listed in ExcludeMap are removed, resulting - // in the final list of accounts to add to the policy. + // Specifies the AWS account IDs and AWS Organizations organizational units + // (OUs) to exclude from the policy. Specifying an OU is the equivalent of specifying + // all accounts in the OU and in any of its child OUs, including any child OUs + // and accounts that are added at a later time. + // + // You can specify inclusions or exclusions, but not both. If you specify an + // IncludeMap, AWS Firewall Manager applies the policy to all accounts specified + // by the IncludeMap, and does not evaluate any ExcludeMap specifications. If + // you do not specify an IncludeMap, then Firewall Manager applies the policy + // to all accounts except for those specified by the ExcludeMap. + // + // You can specify account IDs, OUs, or a combination: // - // The key to the map is ACCOUNT. For example, a valid ExcludeMap would be {“ACCOUNT” - // : [“accountID1”, “accountID2”]}. + // * Specify account IDs by setting the key to ACCOUNT. For example, the + // following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. + // + // * Specify OUs by setting the key to ORG_UNIT. For example, the following + // is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. + // + // * Specify accounts and OUs together in a single map, separated with a + // comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, + // “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. ExcludeMap map[string][]*string `type:"map"` // If set to True, resources with the tags that are specified in the ResourceTag @@ -3151,19 +5076,34 @@ type Policy struct { // ExcludeResourceTags is a required field ExcludeResourceTags *bool `type:"boolean" required:"true"` - // Specifies the AWS account IDs to include in the policy. If IncludeMap is - // null, all accounts in the organization in AWS Organizations are included - // in the policy. If IncludeMap is not null, only values listed in IncludeMap - // are included in the policy. + // Specifies the AWS account IDs and AWS Organizations organizational units + // (OUs) to include in the policy. Specifying an OU is the equivalent of specifying + // all accounts in the OU and in any of its child OUs, including any child OUs + // and accounts that are added at a later time. + // + // You can specify inclusions or exclusions, but not both. If you specify an + // IncludeMap, AWS Firewall Manager applies the policy to all accounts specified + // by the IncludeMap, and does not evaluate any ExcludeMap specifications. If + // you do not specify an IncludeMap, then Firewall Manager applies the policy + // to all accounts except for those specified by the ExcludeMap. + // + // You can specify account IDs, OUs, or a combination: // - // The key to the map is ACCOUNT. For example, a valid IncludeMap would be {“ACCOUNT” - // : [“accountID1”, “accountID2”]}. + // * Specify account IDs by setting the key to ACCOUNT. For example, the + // following is a valid map: {“ACCOUNT” : [“accountID1”, “accountID2”]}. + // + // * Specify OUs by setting the key to ORG_UNIT. For example, the following + // is a valid map: {“ORG_UNIT” : [“ouid111”, “ouid112”]}. + // + // * Specify accounts and OUs together in a single map, separated with a + // comma. For example, the following is a valid map: {“ACCOUNT” : [“accountID1”, + // “accountID2”], “ORG_UNIT” : [“ouid111”, “ouid112”]}. IncludeMap map[string][]*string `type:"map"` // The ID of the AWS Firewall Manager policy. PolicyId *string `min:"36" type:"string"` - // The friendly name of the AWS Firewall Manager policy. + // The name of the AWS Firewall Manager policy. // // PolicyName is a required field PolicyName *string `min:"1" type:"string" required:"true"` @@ -3442,7 +5382,7 @@ type PolicyComplianceStatus struct { // The ID of the AWS Firewall Manager policy. PolicyId *string `min:"36" type:"string"` - // The friendly name of the AWS Firewall Manager policy. + // The name of the AWS Firewall Manager policy. PolicyName *string `min:"1" type:"string"` // The AWS account that created the AWS Firewall Manager policy. @@ -3511,7 +5451,7 @@ type PolicySummary struct { // The ID of the specified policy. PolicyId *string `min:"36" type:"string"` - // The friendly name of the specified policy. + // The name of the specified policy. PolicyName *string `min:"1" type:"string"` // Indicates if the policy should be automatically applied to new resources. @@ -3555,27 +5495,281 @@ func (s *PolicySummary) SetPolicyId(v string) *PolicySummary { return s } -// SetPolicyName sets the PolicyName field's value. -func (s *PolicySummary) SetPolicyName(v string) *PolicySummary { - s.PolicyName = &v - return s +// SetPolicyName sets the PolicyName field's value. +func (s *PolicySummary) SetPolicyName(v string) *PolicySummary { + s.PolicyName = &v + return s +} + +// SetRemediationEnabled sets the RemediationEnabled field's value. +func (s *PolicySummary) SetRemediationEnabled(v bool) *PolicySummary { + s.RemediationEnabled = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *PolicySummary) SetResourceType(v string) *PolicySummary { + s.ResourceType = &v + return s +} + +// SetSecurityServiceType sets the SecurityServiceType field's value. +func (s *PolicySummary) SetSecurityServiceType(v string) *PolicySummary { + s.SecurityServiceType = &v + return s +} + +// An AWS Firewall Manager protocols list. +type ProtocolsListData struct { + _ struct{} `type:"structure"` + + // The time that the AWS Firewall Manager protocols list was created. + CreateTime *time.Time `type:"timestamp"` + + // The time that the AWS Firewall Manager protocols list was last updated. + LastUpdateTime *time.Time `type:"timestamp"` + + // The ID of the AWS Firewall Manager protocols list. + ListId *string `min:"36" type:"string"` + + // The name of the AWS Firewall Manager protocols list. + // + // ListName is a required field + ListName *string `min:"1" type:"string" required:"true"` + + // A unique identifier for each update to the list. When you update the list, + // the update token must match the token of the current version of the application + // list. You can retrieve the update token by getting the list. + ListUpdateToken *string `min:"1" type:"string"` + + // A map of previous version numbers to their corresponding protocol arrays. + PreviousProtocolsList map[string][]*string `type:"map"` + + // An array of protocols in the AWS Firewall Manager protocols list. + // + // ProtocolsList is a required field + ProtocolsList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ProtocolsListData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProtocolsListData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProtocolsListData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProtocolsListData"} + if s.ListId != nil && len(*s.ListId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("ListId", 36)) + } + if s.ListName == nil { + invalidParams.Add(request.NewErrParamRequired("ListName")) + } + if s.ListName != nil && len(*s.ListName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ListName", 1)) + } + if s.ListUpdateToken != nil && len(*s.ListUpdateToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ListUpdateToken", 1)) + } + if s.ProtocolsList == nil { + invalidParams.Add(request.NewErrParamRequired("ProtocolsList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreateTime sets the CreateTime field's value. +func (s *ProtocolsListData) SetCreateTime(v time.Time) *ProtocolsListData { + s.CreateTime = &v + return s +} + +// SetLastUpdateTime sets the LastUpdateTime field's value. +func (s *ProtocolsListData) SetLastUpdateTime(v time.Time) *ProtocolsListData { + s.LastUpdateTime = &v + return s +} + +// SetListId sets the ListId field's value. +func (s *ProtocolsListData) SetListId(v string) *ProtocolsListData { + s.ListId = &v + return s +} + +// SetListName sets the ListName field's value. +func (s *ProtocolsListData) SetListName(v string) *ProtocolsListData { + s.ListName = &v + return s +} + +// SetListUpdateToken sets the ListUpdateToken field's value. +func (s *ProtocolsListData) SetListUpdateToken(v string) *ProtocolsListData { + s.ListUpdateToken = &v + return s +} + +// SetPreviousProtocolsList sets the PreviousProtocolsList field's value. +func (s *ProtocolsListData) SetPreviousProtocolsList(v map[string][]*string) *ProtocolsListData { + s.PreviousProtocolsList = v + return s +} + +// SetProtocolsList sets the ProtocolsList field's value. +func (s *ProtocolsListData) SetProtocolsList(v []*string) *ProtocolsListData { + s.ProtocolsList = v + return s +} + +// Details of the AWS Firewall Manager protocols list. +type ProtocolsListDataSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the specified protocols list. + ListArn *string `min:"1" type:"string"` + + // The ID of the specified protocols list. + ListId *string `min:"36" type:"string"` + + // The name of the specified protocols list. + ListName *string `min:"1" type:"string"` + + // An array of protocols in the AWS Firewall Manager protocols list. + ProtocolsList []*string `type:"list"` +} + +// String returns the string representation +func (s ProtocolsListDataSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProtocolsListDataSummary) GoString() string { + return s.String() +} + +// SetListArn sets the ListArn field's value. +func (s *ProtocolsListDataSummary) SetListArn(v string) *ProtocolsListDataSummary { + s.ListArn = &v + return s +} + +// SetListId sets the ListId field's value. +func (s *ProtocolsListDataSummary) SetListId(v string) *ProtocolsListDataSummary { + s.ListId = &v + return s +} + +// SetListName sets the ListName field's value. +func (s *ProtocolsListDataSummary) SetListName(v string) *ProtocolsListDataSummary { + s.ListName = &v + return s +} + +// SetProtocolsList sets the ProtocolsList field's value. +func (s *ProtocolsListDataSummary) SetProtocolsList(v []*string) *ProtocolsListDataSummary { + s.ProtocolsList = v + return s +} + +type PutAppsListInput struct { + _ struct{} `type:"structure"` + + // The details of the AWS Firewall Manager applications list to be created. + // + // AppsList is a required field + AppsList *AppsListData `type:"structure" required:"true"` + + // The tags associated with the resource. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s PutAppsListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAppsListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAppsListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAppsListInput"} + if s.AppsList == nil { + invalidParams.Add(request.NewErrParamRequired("AppsList")) + } + if s.AppsList != nil { + if err := s.AppsList.Validate(); err != nil { + invalidParams.AddNested("AppsList", err.(request.ErrInvalidParams)) + } + } + if s.TagList != nil { + for i, v := range s.TagList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAppsList sets the AppsList field's value. +func (s *PutAppsListInput) SetAppsList(v *AppsListData) *PutAppsListInput { + s.AppsList = v + return s +} + +// SetTagList sets the TagList field's value. +func (s *PutAppsListInput) SetTagList(v []*Tag) *PutAppsListInput { + s.TagList = v + return s +} + +type PutAppsListOutput struct { + _ struct{} `type:"structure"` + + // The details of the AWS Firewall Manager applications list. + AppsList *AppsListData `type:"structure"` + + // The Amazon Resource Name (ARN) of the applications list. + AppsListArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutAppsListOutput) String() string { + return awsutil.Prettify(s) } -// SetRemediationEnabled sets the RemediationEnabled field's value. -func (s *PolicySummary) SetRemediationEnabled(v bool) *PolicySummary { - s.RemediationEnabled = &v - return s +// GoString returns the string representation +func (s PutAppsListOutput) GoString() string { + return s.String() } -// SetResourceType sets the ResourceType field's value. -func (s *PolicySummary) SetResourceType(v string) *PolicySummary { - s.ResourceType = &v +// SetAppsList sets the AppsList field's value. +func (s *PutAppsListOutput) SetAppsList(v *AppsListData) *PutAppsListOutput { + s.AppsList = v return s } -// SetSecurityServiceType sets the SecurityServiceType field's value. -func (s *PolicySummary) SetSecurityServiceType(v string) *PolicySummary { - s.SecurityServiceType = &v +// SetAppsListArn sets the AppsListArn field's value. +func (s *PutAppsListOutput) SetAppsListArn(v string) *PutAppsListOutput { + s.AppsListArn = &v return s } @@ -3718,10 +5912,10 @@ func (s *PutPolicyInput) SetTagList(v []*Tag) *PutPolicyInput { type PutPolicyOutput struct { _ struct{} `type:"structure"` - // The details of the AWS Firewall Manager policy that was created. + // The details of the AWS Firewall Manager policy. Policy *Policy `type:"structure"` - // The Amazon Resource Name (ARN) of the policy that was created. + // The Amazon Resource Name (ARN) of the policy. PolicyArn *string `min:"1" type:"string"` } @@ -3747,10 +5941,104 @@ func (s *PutPolicyOutput) SetPolicyArn(v string) *PutPolicyOutput { return s } +type PutProtocolsListInput struct { + _ struct{} `type:"structure"` + + // The details of the AWS Firewall Manager protocols list to be created. + // + // ProtocolsList is a required field + ProtocolsList *ProtocolsListData `type:"structure" required:"true"` + + // The tags associated with the resource. + TagList []*Tag `type:"list"` +} + +// String returns the string representation +func (s PutProtocolsListInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutProtocolsListInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutProtocolsListInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutProtocolsListInput"} + if s.ProtocolsList == nil { + invalidParams.Add(request.NewErrParamRequired("ProtocolsList")) + } + if s.ProtocolsList != nil { + if err := s.ProtocolsList.Validate(); err != nil { + invalidParams.AddNested("ProtocolsList", err.(request.ErrInvalidParams)) + } + } + if s.TagList != nil { + for i, v := range s.TagList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetProtocolsList sets the ProtocolsList field's value. +func (s *PutProtocolsListInput) SetProtocolsList(v *ProtocolsListData) *PutProtocolsListInput { + s.ProtocolsList = v + return s +} + +// SetTagList sets the TagList field's value. +func (s *PutProtocolsListInput) SetTagList(v []*Tag) *PutProtocolsListInput { + s.TagList = v + return s +} + +type PutProtocolsListOutput struct { + _ struct{} `type:"structure"` + + // The details of the AWS Firewall Manager protocols list. + ProtocolsList *ProtocolsListData `type:"structure"` + + // The Amazon Resource Name (ARN) of the protocols list. + ProtocolsListArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PutProtocolsListOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutProtocolsListOutput) GoString() string { + return s.String() +} + +// SetProtocolsList sets the ProtocolsList field's value. +func (s *PutProtocolsListOutput) SetProtocolsList(v *ProtocolsListData) *PutProtocolsListOutput { + s.ProtocolsList = v + return s +} + +// SetProtocolsListArn sets the ProtocolsListArn field's value. +func (s *PutProtocolsListOutput) SetProtocolsListArn(v string) *PutProtocolsListOutput { + s.ProtocolsListArn = &v + return s +} + // The specified resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3767,17 +6055,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3785,22 +6073,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The resource tags that AWS Firewall Manager uses to determine if a particular @@ -3861,6 +6149,171 @@ func (s *ResourceTag) SetValue(v string) *ResourceTag { return s } +// Violation detail based on resource type. +type ResourceViolation struct { + _ struct{} `type:"structure"` + + // Violation details for an EC2 instance. + AwsEc2InstanceViolation *AwsEc2InstanceViolation `type:"structure"` + + // Violation details for network interface. + AwsEc2NetworkInterfaceViolation *AwsEc2NetworkInterfaceViolation `type:"structure"` + + // Violation details for security groups. + AwsVPCSecurityGroupViolation *AwsVPCSecurityGroupViolation `type:"structure"` +} + +// String returns the string representation +func (s ResourceViolation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceViolation) GoString() string { + return s.String() +} + +// SetAwsEc2InstanceViolation sets the AwsEc2InstanceViolation field's value. +func (s *ResourceViolation) SetAwsEc2InstanceViolation(v *AwsEc2InstanceViolation) *ResourceViolation { + s.AwsEc2InstanceViolation = v + return s +} + +// SetAwsEc2NetworkInterfaceViolation sets the AwsEc2NetworkInterfaceViolation field's value. +func (s *ResourceViolation) SetAwsEc2NetworkInterfaceViolation(v *AwsEc2NetworkInterfaceViolation) *ResourceViolation { + s.AwsEc2NetworkInterfaceViolation = v + return s +} + +// SetAwsVPCSecurityGroupViolation sets the AwsVPCSecurityGroupViolation field's value. +func (s *ResourceViolation) SetAwsVPCSecurityGroupViolation(v *AwsVPCSecurityGroupViolation) *ResourceViolation { + s.AwsVPCSecurityGroupViolation = v + return s +} + +// Remediation option for the rule specified in the ViolationTarget. +type SecurityGroupRemediationAction struct { + _ struct{} `type:"structure"` + + // Brief description of the action that will be performed. + Description *string `type:"string"` + + // Indicates if the current action is the default action. + IsDefaultAction *bool `type:"boolean"` + + // The remediation action that will be performed. + RemediationActionType *string `type:"string" enum:"RemediationActionType"` + + // The final state of the rule specified in the ViolationTarget after it is + // remediated. + RemediationResult *SecurityGroupRuleDescription `type:"structure"` +} + +// String returns the string representation +func (s SecurityGroupRemediationAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupRemediationAction) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *SecurityGroupRemediationAction) SetDescription(v string) *SecurityGroupRemediationAction { + s.Description = &v + return s +} + +// SetIsDefaultAction sets the IsDefaultAction field's value. +func (s *SecurityGroupRemediationAction) SetIsDefaultAction(v bool) *SecurityGroupRemediationAction { + s.IsDefaultAction = &v + return s +} + +// SetRemediationActionType sets the RemediationActionType field's value. +func (s *SecurityGroupRemediationAction) SetRemediationActionType(v string) *SecurityGroupRemediationAction { + s.RemediationActionType = &v + return s +} + +// SetRemediationResult sets the RemediationResult field's value. +func (s *SecurityGroupRemediationAction) SetRemediationResult(v *SecurityGroupRuleDescription) *SecurityGroupRemediationAction { + s.RemediationResult = v + return s +} + +// Describes a set of permissions for a security group rule. +type SecurityGroupRuleDescription struct { + _ struct{} `type:"structure"` + + // The start of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 + // type number. A value of -1 indicates all ICMP/ICMPv6 types. + FromPort *int64 `type:"long"` + + // The IPv4 ranges for the security group rule. + IPV4Range *string `type:"string"` + + // The IPv6 ranges for the security group rule. + IPV6Range *string `type:"string"` + + // The ID of the prefix list for the security group rule. + PrefixListId *string `min:"1" type:"string"` + + // The IP protocol name (tcp, udp, icmp, icmpv6) or number. + Protocol *string `type:"string"` + + // The end of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 + // code. A value of -1 indicates all ICMP/ICMPv6 codes. + ToPort *int64 `type:"long"` +} + +// String returns the string representation +func (s SecurityGroupRuleDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupRuleDescription) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *SecurityGroupRuleDescription) SetFromPort(v int64) *SecurityGroupRuleDescription { + s.FromPort = &v + return s +} + +// SetIPV4Range sets the IPV4Range field's value. +func (s *SecurityGroupRuleDescription) SetIPV4Range(v string) *SecurityGroupRuleDescription { + s.IPV4Range = &v + return s +} + +// SetIPV6Range sets the IPV6Range field's value. +func (s *SecurityGroupRuleDescription) SetIPV6Range(v string) *SecurityGroupRuleDescription { + s.IPV6Range = &v + return s +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *SecurityGroupRuleDescription) SetPrefixListId(v string) *SecurityGroupRuleDescription { + s.PrefixListId = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *SecurityGroupRuleDescription) SetProtocol(v string) *SecurityGroupRuleDescription { + s.Protocol = &v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *SecurityGroupRuleDescription) SetToPort(v int64) *SecurityGroupRuleDescription { + s.ToPort = &v + return s +} + // Details about the security service that is being used to protect the resources. type SecurityServicePolicyData struct { _ struct{} `type:"structure"` @@ -3868,12 +6321,14 @@ type SecurityServicePolicyData struct { // Details about the service that are specific to the service type, in JSON // format. For service type SHIELD_ADVANCED, this is an empty string. // - // * Example: WAF ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": + // * Example: WAFV2 "ManagedServiceData": "{\"type\":\"WAFV2\",\"defaultAction\":{\"type\":\"ALLOW\"},\"preProcessRuleGroups\":[{\"managedRuleGroupIdentifier\":null,\"ruleGroupArn\":\"rulegrouparn\",\"overrideAction\":{\"type\":\"COUNT\"},\"excludeRules\":[{\"name\":\"EntityName\"}],\"ruleGroupType\":\"RuleGroup\"}],\"postProcessRuleGroups\":[{\"managedRuleGroupIdentifier\":{\"managedRuleGroupName\":\"AWSManagedRulesAdminProtectionRuleSet\",\"vendorName\":\"AWS\"},\"ruleGroupArn\":\"rulegrouparn\",\"overrideAction\":{\"type\":\"NONE\"},\"excludeRules\":[],\"ruleGroupType\":\"ManagedRuleGroup\"}],\"overrideCustomerWebACLAssociation\":false}" + // + // * Example: WAF Classic "ManagedServiceData": "{\"type\": \"WAF\", \"ruleGroups\": // [{\"id\": \"12345678-1bcd-9012-efga-0987654321ab\", \"overrideAction\" // : {\"type\": \"COUNT\"}}], \"defaultAction\": {\"type\": \"BLOCK\"}} // - // * Example: SECURITY_GROUPS_COMMON "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_COMMON","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_COMMON\",\"revertManualSecurityGroupChanges\":false,\"exclusiveResourceSecurityGroupManagement\":false,\"securityGroups\":[{\"id\":\" - // sg-000e55995d61a06bd\"}]}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} + // * Example: SECURITY_GROUPS_COMMON "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_COMMON","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_COMMON\",\"revertManualSecurityGroupChanges\":false,\"exclusiveResourceSecurityGroupManagement\":false, + // \"applyToAllEC2InstanceENIs\":false,\"securityGroups\":[{\"id\":\" sg-000e55995d61a06bd\"}]}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} // // * Example: SECURITY_GROUPS_CONTENT_AUDIT "SecurityServicePolicyData":{"Type":"SECURITY_GROUPS_CONTENT_AUDIT","ManagedServiceData":"{\"type\":\"SECURITY_GROUPS_CONTENT_AUDIT\",\"securityGroups\":[{\"id\":\" // sg-000e55995d61a06bd \"}],\"securityGroupAction\":{\"type\":\"ALLOW\"}}"},"RemediationEnabled":false,"ResourceType":"AWS::EC2::NetworkInterface"} @@ -4002,8 +6457,9 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. The Firewall Manager policy - // is the only AWS resource that supports tagging, so this ARN is a policy ARN. + // The Amazon Resource Name (ARN) of the resource to return tags for. The AWS + // Firewall Manager resources that support tagging are policies, applications + // lists, and protocols lists. // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -4082,8 +6538,9 @@ func (s TagResourceOutput) GoString() string { type UntagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. The Firewall Manager policy - // is the only AWS resource that supports tagging, so this ARN is a policy ARN. + // The Amazon Resource Name (ARN) of the resource to return tags for. The AWS + // Firewall Manager resources that support tagging are policies, applications + // lists, and protocols lists. // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -4149,6 +6606,96 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +// Violations for a resource based on the specified AWS Firewall Manager policy +// and AWS account. +type ViolationDetail struct { + _ struct{} `type:"structure"` + + // The AWS account that the violation details were requested for. + // + // MemberAccount is a required field + MemberAccount *string `min:"1" type:"string" required:"true"` + + // The ID of the AWS Firewall Manager policy that the violation details were + // requested for. + // + // PolicyId is a required field + PolicyId *string `min:"36" type:"string" required:"true"` + + // Brief description for the requested resource. + ResourceDescription *string `type:"string"` + + // The resource ID that the violation details were requested for. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` + + // The ResourceTag objects associated with the resource. + ResourceTags []*Tag `type:"list"` + + // The resource type that the violation details were requested for. + // + // ResourceType is a required field + ResourceType *string `min:"1" type:"string" required:"true"` + + // List of violations for the requested resource. + // + // ResourceViolations is a required field + ResourceViolations []*ResourceViolation `type:"list" required:"true"` +} + +// String returns the string representation +func (s ViolationDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ViolationDetail) GoString() string { + return s.String() +} + +// SetMemberAccount sets the MemberAccount field's value. +func (s *ViolationDetail) SetMemberAccount(v string) *ViolationDetail { + s.MemberAccount = &v + return s +} + +// SetPolicyId sets the PolicyId field's value. +func (s *ViolationDetail) SetPolicyId(v string) *ViolationDetail { + s.PolicyId = &v + return s +} + +// SetResourceDescription sets the ResourceDescription field's value. +func (s *ViolationDetail) SetResourceDescription(v string) *ViolationDetail { + s.ResourceDescription = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ViolationDetail) SetResourceId(v string) *ViolationDetail { + s.ResourceId = &v + return s +} + +// SetResourceTags sets the ResourceTags field's value. +func (s *ViolationDetail) SetResourceTags(v []*Tag) *ViolationDetail { + s.ResourceTags = v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *ViolationDetail) SetResourceType(v string) *ViolationDetail { + s.ResourceType = &v + return s +} + +// SetResourceViolations sets the ResourceViolations field's value. +func (s *ViolationDetail) SetResourceViolations(v []*ResourceViolation) *ViolationDetail { + s.ResourceViolations = v + return s +} + const ( // AccountRoleStatusReady is a AccountRoleStatus enum value AccountRoleStatusReady = "READY" @@ -4166,11 +6713,33 @@ const ( AccountRoleStatusDeleted = "DELETED" ) +// AccountRoleStatus_Values returns all elements of the AccountRoleStatus enum +func AccountRoleStatus_Values() []string { + return []string{ + AccountRoleStatusReady, + AccountRoleStatusCreating, + AccountRoleStatusPendingDeletion, + AccountRoleStatusDeleting, + AccountRoleStatusDeleted, + } +} + const ( // CustomerPolicyScopeIdTypeAccount is a CustomerPolicyScopeIdType enum value CustomerPolicyScopeIdTypeAccount = "ACCOUNT" + + // CustomerPolicyScopeIdTypeOrgUnit is a CustomerPolicyScopeIdType enum value + CustomerPolicyScopeIdTypeOrgUnit = "ORG_UNIT" ) +// CustomerPolicyScopeIdType_Values returns all elements of the CustomerPolicyScopeIdType enum +func CustomerPolicyScopeIdType_Values() []string { + return []string{ + CustomerPolicyScopeIdTypeAccount, + CustomerPolicyScopeIdTypeOrgUnit, + } +} + const ( // DependentServiceNameAwsconfig is a DependentServiceName enum value DependentServiceNameAwsconfig = "AWSCONFIG" @@ -4185,6 +6754,16 @@ const ( DependentServiceNameAwsvpc = "AWSVPC" ) +// DependentServiceName_Values returns all elements of the DependentServiceName enum +func DependentServiceName_Values() []string { + return []string{ + DependentServiceNameAwsconfig, + DependentServiceNameAwswaf, + DependentServiceNameAwsshieldAdvanced, + DependentServiceNameAwsvpc, + } +} + const ( // PolicyComplianceStatusTypeCompliant is a PolicyComplianceStatusType enum value PolicyComplianceStatusTypeCompliant = "COMPLIANT" @@ -4193,10 +6772,37 @@ const ( PolicyComplianceStatusTypeNonCompliant = "NON_COMPLIANT" ) +// PolicyComplianceStatusType_Values returns all elements of the PolicyComplianceStatusType enum +func PolicyComplianceStatusType_Values() []string { + return []string{ + PolicyComplianceStatusTypeCompliant, + PolicyComplianceStatusTypeNonCompliant, + } +} + +const ( + // RemediationActionTypeRemove is a RemediationActionType enum value + RemediationActionTypeRemove = "REMOVE" + + // RemediationActionTypeModify is a RemediationActionType enum value + RemediationActionTypeModify = "MODIFY" +) + +// RemediationActionType_Values returns all elements of the RemediationActionType enum +func RemediationActionType_Values() []string { + return []string{ + RemediationActionTypeRemove, + RemediationActionTypeModify, + } +} + const ( // SecurityServiceTypeWaf is a SecurityServiceType enum value SecurityServiceTypeWaf = "WAF" + // SecurityServiceTypeWafv2 is a SecurityServiceType enum value + SecurityServiceTypeWafv2 = "WAFV2" + // SecurityServiceTypeShieldAdvanced is a SecurityServiceType enum value SecurityServiceTypeShieldAdvanced = "SHIELD_ADVANCED" @@ -4210,6 +6816,18 @@ const ( SecurityServiceTypeSecurityGroupsUsageAudit = "SECURITY_GROUPS_USAGE_AUDIT" ) +// SecurityServiceType_Values returns all elements of the SecurityServiceType enum +func SecurityServiceType_Values() []string { + return []string{ + SecurityServiceTypeWaf, + SecurityServiceTypeWafv2, + SecurityServiceTypeShieldAdvanced, + SecurityServiceTypeSecurityGroupsCommon, + SecurityServiceTypeSecurityGroupsContentAudit, + SecurityServiceTypeSecurityGroupsUsageAudit, + } +} + const ( // ViolationReasonWebAclMissingRuleGroup is a ViolationReason enum value ViolationReasonWebAclMissingRuleGroup = "WEB_ACL_MISSING_RULE_GROUP" @@ -4238,3 +6856,18 @@ const ( // ViolationReasonSecurityGroupRedundant is a ViolationReason enum value ViolationReasonSecurityGroupRedundant = "SECURITY_GROUP_REDUNDANT" ) + +// ViolationReason_Values returns all elements of the ViolationReason enum +func ViolationReason_Values() []string { + return []string{ + ViolationReasonWebAclMissingRuleGroup, + ViolationReasonResourceMissingWebAcl, + ViolationReasonResourceIncorrectWebAcl, + ViolationReasonResourceMissingShieldProtection, + ViolationReasonResourceMissingWebAclOrShieldProtection, + ViolationReasonResourceMissingSecurityGroup, + ViolationReasonResourceViolatesAuditSecurityGroup, + ViolationReasonSecurityGroupUnused, + ViolationReasonSecurityGroupRedundant, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/fms/errors.go b/vendor/github.com/aws/aws-sdk-go/service/fms/errors.go index 0fc209a4a..fc0663349 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fms/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fms/errors.go @@ -24,9 +24,12 @@ const ( // ErrCodeInvalidOperationException for service response error code // "InvalidOperationException". // - // The operation failed because there was nothing to do. For example, you might - // have submitted an AssociateAdminAccount request, but the account ID that - // you submitted was already set as the AWS Firewall Manager administrator. + // The operation failed because there was nothing to do or the operation wasn't + // possible. For example, you might have submitted an AssociateAdminAccount + // request for an account ID that was already set as the AWS Firewall Manager + // administrator. Or you might have tried to access a Region that's disabled + // by default, and that you need to enable for the Firewall Manager administrator + // account and for AWS Organizations before you can access it. ErrCodeInvalidOperationException = "InvalidOperationException" // ErrCodeInvalidTypeException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/fms/service.go b/vendor/github.com/aws/aws-sdk-go/service/fms/service.go index d9b74b926..5f05bfe60 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fms/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go index 93cc9f066..1fcdbefb2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/api.go @@ -79,7 +79,7 @@ func (c *ForecastService) CreateDatasetRequest(input *CreateDatasetInput) (req * // To get a list of all your datasets, use the ListDatasets operation. // // For example Forecast datasets, see the Amazon Forecast Sample GitHub repository -// (https://github.com/aws-samples/amazon-forecast-samples/tree/master/data). +// (https://github.com/aws-samples/amazon-forecast-samples). // // The Status of a dataset must be ACTIVE before you can import training data. // Use the DescribeDataset operation to get the status. @@ -278,8 +278,9 @@ func (c *ForecastService) CreateDatasetImportJobRequest(input *CreateDatasetImpo // to import the data to. // // You must specify a DataSource object that includes an AWS Identity and Access -// Management (IAM) role that Amazon Forecast can assume to access the data. -// For more information, see aws-forecast-iam-roles. +// Management (IAM) role that Amazon Forecast can assume to access the data, +// as Amazon Forecast makes a copy of your data and processes it in an internal +// AWS system. For more information, see aws-forecast-iam-roles. // // The training data must be in CSV format. The delimiter must be a comma (,). // @@ -287,6 +288,12 @@ func (c *ForecastService) CreateDatasetImportJobRequest(input *CreateDatasetImpo // in the S3 bucket. For the latter two cases, Amazon Forecast imports all files // up to the limit of 10,000 files. // +// Because dataset imports are not aggregated, your most recent dataset import +// is the one that is used when training a predictor or generating a forecast. +// Make sure that your most recent dataset import contains all of the data you +// want to model off of, and not just the new data collected since the previous +// import. +// // To get a list of all your dataset import jobs, filtered by specified criteria, // use the ListDatasetImportJobs operation. // @@ -388,9 +395,8 @@ func (c *ForecastService) CreateForecastRequest(input *CreateForecastInput) (req // use the CreateForecastExportJob operation. // // The range of the forecast is determined by the ForecastHorizon value, which -// you specify in the CreatePredictor request, multiplied by the DataFrequency -// value, which you specify in the CreateDataset request. When you query a forecast, -// you can request a specific date range within the forecast. +// you specify in the CreatePredictor request. When you query a forecast, you +// can request a specific date range within the forecast. // // To get a list of all your forecasts, use the ListForecasts operation. // @@ -497,7 +503,7 @@ func (c *ForecastService) CreateForecastExportJobRequest(input *CreateForecastEx // Simple Storage Service (Amazon S3) bucket. The forecast file name will match // the following conventions: // -// __ +// __ // // where the component is in Java SimpleDateFormat (yyyy-MM-ddTHH-mm-ssZ). // @@ -748,6 +754,10 @@ func (c *ForecastService) DeleteDatasetRequest(input *DeleteDatasetInput) (req * // operation. You can only delete datasets that have a status of ACTIVE or CREATE_FAILED. // To get the status use the DescribeDataset operation. // +// Forecast does not automatically update any dataset groups that contain the +// deleted dataset. In order to update the dataset group, use the operation, +// omitting the deleted dataset's ARN. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2791,6 +2801,266 @@ func (c *ForecastService) ListPredictorsPagesWithContext(ctx aws.Context, input return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListTagsForResource +func (c *ForecastService) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon Forecast Service. +// +// Lists the tags for an Amazon Forecast resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * InvalidInputException +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/ListTagsForResource +func (c *ForecastService) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/TagResource +func (c *ForecastService) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon Forecast Service. +// +// Associates the specified tags to a resource with the specified resourceArn. +// If existing tags on a resource are not specified in the request parameters, +// they are not changed. When a resource is deleted, the tags associated with +// that resource are also deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * LimitExceededException +// The limit on the number of resources per account has been exceeded. +// +// * InvalidInputException +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/TagResource +func (c *ForecastService) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UntagResource +func (c *ForecastService) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon Forecast Service. +// +// Deletes the specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Forecast Service's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// We can't find a resource with that Amazon Resource Name (ARN). Check the +// ARN and try again. +// +// * InvalidInputException +// We can't process the request because it includes an invalid value or a value +// that exceeds the valid range. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/forecast-2018-06-26/UntagResource +func (c *ForecastService) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ForecastService) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateDatasetGroup = "UpdateDatasetGroup" // UpdateDatasetGroupRequest generates a "aws/request.Request" representing the @@ -3076,6 +3346,37 @@ type CreateDatasetGroupInput struct { // // Domain is a required field Domain *string `type:"string" required:"true" enum:"Domain"` + + // The optional metadata that you apply to the dataset group to help you categorize + // and organize them. Each tag consists of a key and an optional value, both + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -3100,6 +3401,16 @@ func (s *CreateDatasetGroupInput) Validate() error { if s.Domain == nil { invalidParams.Add(request.NewErrParamRequired("Domain")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3125,6 +3436,12 @@ func (s *CreateDatasetGroupInput) SetDomain(v string) *CreateDatasetGroupInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateDatasetGroupInput) SetTags(v []*Tag) *CreateDatasetGroupInput { + s.Tags = v + return s +} + type CreateDatasetGroupOutput struct { _ struct{} `type:"structure"` @@ -3176,6 +3493,37 @@ type CreateDatasetImportJobInput struct { // DatasetImportJobName is a required field DatasetImportJobName *string `min:"1" type:"string" required:"true"` + // The optional metadata that you apply to the dataset import job to help you + // categorize and organize them. Each tag consists of a key and an optional + // value, both of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + Tags []*Tag `type:"list"` + // The format of timestamps in the dataset. The format that you specify depends // on the DataFrequency specified when the dataset was created. The following // formats are supported @@ -3220,6 +3568,16 @@ func (s *CreateDatasetImportJobInput) Validate() error { invalidParams.AddNested("DataSource", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3245,6 +3603,12 @@ func (s *CreateDatasetImportJobInput) SetDatasetImportJobName(v string) *CreateD return s } +// SetTags sets the Tags field's value. +func (s *CreateDatasetImportJobInput) SetTags(v []*Tag) *CreateDatasetImportJobInput { + s.Tags = v + return s +} + // SetTimestampFormat sets the TimestampFormat field's value. func (s *CreateDatasetImportJobInput) SetTimestampFormat(v string) *CreateDatasetImportJobInput { s.TimestampFormat = &v @@ -3320,6 +3684,37 @@ type CreateDatasetInput struct { // // Schema is a required field Schema *Schema `type:"structure" required:"true"` + + // The optional metadata that you apply to the dataset to help you categorize + // and organize them. Each tag consists of a key and an optional value, both + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -3360,6 +3755,16 @@ func (s *CreateDatasetInput) Validate() error { invalidParams.AddNested("Schema", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3403,6 +3808,12 @@ func (s *CreateDatasetInput) SetSchema(v *Schema) *CreateDatasetInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateDatasetInput) SetTags(v []*Tag) *CreateDatasetInput { + s.Tags = v + return s +} + type CreateDatasetOutput struct { _ struct{} `type:"structure"` @@ -3449,10 +3860,41 @@ type CreateForecastExportJobInput struct { // // ForecastExportJobName is a required field ForecastExportJobName *string `min:"1" type:"string" required:"true"` -} -// String returns the string representation -func (s CreateForecastExportJobInput) String() string { + // The optional metadata that you apply to the forecast export job to help you + // categorize and organize them. Each tag consists of a key and an optional + // value, both of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateForecastExportJobInput) String() string { return awsutil.Prettify(s) } @@ -3481,6 +3923,16 @@ func (s *CreateForecastExportJobInput) Validate() error { invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3506,6 +3958,12 @@ func (s *CreateForecastExportJobInput) SetForecastExportJobName(v string) *Creat return s } +// SetTags sets the Tags field's value. +func (s *CreateForecastExportJobInput) SetTags(v []*Tag) *CreateForecastExportJobInput { + s.Tags = v + return s +} + type CreateForecastExportJobOutput struct { _ struct{} `type:"structure"` @@ -3537,17 +3995,48 @@ type CreateForecastInput struct { // ForecastName is a required field ForecastName *string `min:"1" type:"string" required:"true"` - // The quantiles at which probabilistic forecasts are generated. You can specify - // up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 (increments - // of .01 only) and mean. The mean forecast is different from the median (0.50) - // when the distribution is not symmetric (e.g. Beta, Negative Binomial). The - // default value is ["0.1", "0.5", "0.9"]. + // The quantiles at which probabilistic forecasts are generated. You can currently + // specify up to 5 quantiles per forecast. Accepted values include 0.01 to 0.99 + // (increments of .01 only) and mean. The mean forecast is different from the + // median (0.50) when the distribution is not symmetric (for example, Beta and + // Negative Binomial). The default value is ["0.1", "0.5", "0.9"]. ForecastTypes []*string `min:"1" type:"list"` // The Amazon Resource Name (ARN) of the predictor to use to generate the forecast. // // PredictorArn is a required field PredictorArn *string `type:"string" required:"true"` + + // The optional metadata that you apply to the forecast to help you categorize + // and organize them. Each tag consists of a key and an optional value, both + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -3575,6 +4064,16 @@ func (s *CreateForecastInput) Validate() error { if s.PredictorArn == nil { invalidParams.Add(request.NewErrParamRequired("PredictorArn")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3600,6 +4099,12 @@ func (s *CreateForecastInput) SetPredictorArn(v string) *CreateForecastInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateForecastInput) SetTags(v []*Tag) *CreateForecastInput { + s.Tags = v + return s +} + type CreateForecastOutput struct { _ struct{} `type:"structure"` @@ -3719,6 +4224,37 @@ type CreatePredictorInput struct { // PredictorName is a required field PredictorName *string `min:"1" type:"string" required:"true"` + // The optional metadata that you apply to the predictor to help you categorize + // and organize them. Each tag consists of a key and an optional value, both + // of which you define. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + Tags []*Tag `type:"list"` + // The hyperparameters to override for model training. The hyperparameters that // you can override are listed in the individual algorithms. For the list of // supported algorithms, see aws-forecast-choosing-recipes. @@ -3773,6 +4309,16 @@ func (s *CreatePredictorInput) Validate() error { invalidParams.AddNested("InputDataConfig", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3840,6 +4386,12 @@ func (s *CreatePredictorInput) SetPredictorName(v string) *CreatePredictorInput return s } +// SetTags sets the Tags field's value. +func (s *CreatePredictorInput) SetTags(v []*Tag) *CreatePredictorInput { + s.Tags = v + return s +} + // SetTrainingParameters sets the TrainingParameters field's value. func (s *CreatePredictorInput) SetTrainingParameters(v map[string]*string) *CreatePredictorInput { s.TrainingParameters = v @@ -5170,7 +5722,7 @@ type DescribeForecastOutput struct { // The name of the forecast. ForecastName *string `min:"1" type:"string"` - // The quantiles at which proababilistic forecasts were generated. + // The quantiles at which probabilistic forecasts were generated. ForecastTypes []*string `min:"1" type:"list"` // Initially, the same as CreationTime (status is CREATE_PENDING). Updated when @@ -5673,9 +6225,10 @@ type Featurization struct { _ struct{} `type:"structure"` // The name of the schema attribute that specifies the data field to be featurized. - // Only the target field of the TARGET_TIME_SERIES dataset type is supported. - // For example, for the RETAIL domain, the target is demand, and for the CUSTOM - // domain, the target is target_value. + // Amazon Forecast supports the target field of the TARGET_TIME_SERIES and the + // RELATED_TIME_SERIES datasets. For example, for the RETAIL domain, the target + // is demand, and for the CUSTOM domain, the target is target_value. For more + // information, see howitworks-missing-values. // // AttributeName is a required field AttributeName *string `min:"1" type:"string" required:"true"` @@ -5744,8 +6297,8 @@ func (s *Featurization) SetFeaturizationPipeline(v []*FeaturizationMethod) *Feat // You define featurization using the FeaturizationConfig object. You specify // an array of transformations, one for each field that you want to featurize. // You then include the FeaturizationConfig object in your CreatePredictor request. -// Amazon Forecast applies the featurization to the TARGET_TIME_SERIES dataset -// before model training. +// Amazon Forecast applies the featurization to the TARGET_TIME_SERIES and RELATED_TIME_SERIES +// datasets before model training. // // You can create multiple featurization configurations. For example, you might // call the CreatePredictor operation twice by specifying different featurization @@ -5754,7 +6307,7 @@ type FeaturizationConfig struct { _ struct{} `type:"structure"` // An array of featurization (transformation) information for the fields of - // a dataset. Only a single featurization is supported. + // a dataset. Featurizations []*Featurization `min:"1" type:"list"` // An array of dimension (field) names that specify how to group the generated @@ -5847,8 +6400,7 @@ func (s *FeaturizationConfig) SetForecastFrequency(v string) *FeaturizationConfi // Provides information about the method that featurizes (transforms) a dataset // field. The method is part of the FeaturizationPipeline of the Featurization -// object. If you don't specify FeaturizationMethodParameters, Amazon Forecast -// uses default parameters. +// object. // // The following is an example of how you specify a FeaturizationMethod object. // @@ -5856,7 +6408,8 @@ func (s *FeaturizationConfig) SetForecastFrequency(v string) *FeaturizationConfi // // "FeaturizationMethodName": "filling", // -// "FeaturizationMethodParameters": {"aggregation": "avg", "backfill": "nan"} +// "FeaturizationMethodParameters": {"aggregation": "sum", "middlefill": "zero", +// "backfill": "zero"} // // } type FeaturizationMethod struct { @@ -5867,17 +6420,30 @@ type FeaturizationMethod struct { // FeaturizationMethodName is a required field FeaturizationMethodName *string `type:"string" required:"true" enum:"FeaturizationMethodName"` - // The method parameters (key-value pairs). Specify these parameters to override - // the default values. The following list shows the parameters and their valid - // values. Bold signifies the default value. + // The method parameters (key-value pairs), which are a map of override parameters. + // Specify these parameters to override the default values. Related Time Series + // attributes do not accept aggregation parameters. + // + // The following list shows the parameters and their valid values for the "filling" + // featurization method for a Target Time Series dataset. Bold signifies the + // default value. // // * aggregation: sum, avg, first, min, max // // * frontfill: none // - // * middlefill: zero, nan (not a number) + // * middlefill: zero, nan (not a number), value, median, mean, min, max + // + // * backfill: zero, nan, value, median, mean, min, max + // + // The following list shows the parameters and their valid values for a Related + // Time Series featurization method (there are no defaults): // - // * backfill: zero, nan + // * middlefill: zero, value, median, mean, min, max + // + // * backfill: zero, value, median, mean, min, max + // + // * futurefill: zero, value, median, mean, min, max FeaturizationMethodParameters map[string]*string `min:"1" type:"map"` } @@ -6469,8 +7035,8 @@ func (s *IntegerParameterRange) SetScalingType(v string) *IntegerParameterRange // We can't process the request because it includes an invalid value or a value // that exceeds the valid range. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6487,17 +7053,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6505,28 +7071,28 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // The token is not valid. Tokens expire after 24 hours. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6543,17 +7109,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6561,28 +7127,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The limit on the number of resources per account has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6599,17 +7165,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6617,22 +7183,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListDatasetGroupsInput struct { @@ -7286,6 +7852,70 @@ func (s *ListPredictorsOutput) SetPredictors(v []*PredictorSummary) *ListPredict return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the supported resources are Forecast dataset groups, + // datasets, dataset import jobs, predictors, forecasts, and forecast export + // jobs. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags for the resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // Provides metrics that are used to evaluate the performance of a predictor. // This object is part of the WindowSummary object. type Metrics struct { @@ -7575,8 +8205,8 @@ func (s *PredictorSummary) SetStatus(v string) *PredictorSummary { // There is already a resource with this name. Try again with a different name. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7593,17 +8223,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7611,28 +8241,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource is in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7649,17 +8279,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7667,29 +8297,29 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // We can't find a resource with that Amazon Resource Name (ARN). Check the // ARN and try again. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7706,17 +8336,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7724,22 +8354,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The path to the file(s) in an Amazon Simple Storage Service (Amazon S3) bucket, @@ -7821,7 +8451,7 @@ type Schema struct { _ struct{} `type:"structure"` // An array of attributes specifying the name and type of each field in a dataset. - Attributes []*SchemaAttribute `type:"list"` + Attributes []*SchemaAttribute `min:"1" type:"list"` } // String returns the string representation @@ -7837,6 +8467,9 @@ func (s Schema) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Schema) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Schema"} + if s.Attributes != nil && len(s.Attributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Attributes", 1)) + } if s.Attributes != nil { for i, v := range s.Attributes { if v == nil { @@ -8003,6 +8636,35 @@ func (s *Statistics) SetStddev(v float64) *Statistics { // all data in the datasets should belong to the same country as the calendar. // For the holiday calendar data, see the Jollyday (http://jollyday.sourceforge.net/data.html) // web site. +// +// India and Korea's holidays are not included in the Jollyday library, but +// both are supported by Amazon Forecast. Their holidays are: +// +// "IN" - INDIA +// +// * JANUARY 26 - REPUBLIC DAY +// +// * AUGUST 15 - INDEPENDENCE DAY +// +// * OCTOBER 2 GANDHI'S BIRTHDAY +// +// "KR" - KOREA +// +// * JANUARY 1 - NEW YEAR +// +// * MARCH 1 - INDEPENDENCE MOVEMENT DAY +// +// * MAY 5 - CHILDREN'S DAY +// +// * JUNE 6 - MEMORIAL DAY +// +// * AUGUST 15 - LIBERATION DAY +// +// * OCTOBER 3 - NATIONAL FOUNDATION DAY +// +// * OCTOBER 9 - HANGEUL DAY +// +// * DECEMBER 25 - CHRISTMAS DAY type SupplementaryFeature struct { _ struct{} `type:"structure"` @@ -8013,15 +8675,69 @@ type SupplementaryFeature struct { // One of the following 2 letter country codes: // + // * "AR" - ARGENTINA + // + // * "AT" - AUSTRIA + // // * "AU" - AUSTRALIA // + // * "BE" - BELGIUM + // + // * "BR" - BRAZIL + // + // * "CA" - CANADA + // + // * "CN" - CHINA + // + // * "CZ" - CZECH REPUBLIC + // + // * "DK" - DENMARK + // + // * "EC" - ECUADOR + // + // * "FI" - FINLAND + // + // * "FR" - FRANCE + // // * "DE" - GERMANY // + // * "HU" - HUNGARY + // + // * "IE" - IRELAND + // + // * "IN" - INDIA + // + // * "IT" - ITALY + // // * "JP" - JAPAN // - // * "US" - UNITED_STATES + // * "KR" - KOREA + // + // * "LU" - LUXEMBOURG + // + // * "MX" - MEXICO + // + // * "NL" - NETHERLANDS // - // * "UK" - UNITED_KINGDOM + // * "NO" - NORWAY + // + // * "PL" - POLAND + // + // * "PT" - PORTUGAL + // + // * "RU" - RUSSIA + // + // * "ZA" - SOUTH AFRICA + // + // * "ES" - SPAIN + // + // * "SE" - SWEDEN + // + // * "CH" - SWITZERLAND + // + // * "US" - UNITED STATES + // + // * "UK" - UNITED KINGDOM // // Value is a required field Value *string `type:"string" required:"true"` @@ -8068,6 +8784,197 @@ func (s *SupplementaryFeature) SetValue(v string) *SupplementaryFeature { return s } +// The optional metadata that you apply to a resource to help you categorize +// and organize them. Each tag consists of a key and an optional value, both +// of which you define. +// +// The following basic restrictions apply to tags: +// +// * Maximum number of tags per resource - 50. +// +// * For each resource, each tag key must be unique, and each tag key can +// have only one value. +// +// * Maximum key length - 128 Unicode characters in UTF-8. +// +// * Maximum value length - 256 Unicode characters in UTF-8. +// +// * If your tagging schema is used across multiple services and resources, +// remember that other services may have restrictions on allowed characters. +// Generally allowed characters are: letters, numbers, and spaces representable +// in UTF-8, and the following characters: + - = . _ : / @. +// +// * Tag keys and values are case sensitive. +// +// * Do not use aws:, AWS:, or any upper or lowercase combination of such +// as a prefix for keys as it is reserved for AWS use. You cannot edit or +// delete tag keys with this prefix. Values can have this prefix. If a tag +// value has aws as its prefix but the key does not, then Forecast considers +// it to be a user tag and will count against the limit of 50 tags. Tags +// with only the key prefix of aws do not count against your tags per resource +// limit. +type Tag struct { + _ struct{} `type:"structure"` + + // One part of a key-value pair that makes up a tag. A key is a general label + // that acts like a category for more specific tag values. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The optional part of a key-value pair that makes up a tag. A value acts as + // a descriptor within a tag category (key). + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the supported resources are Forecast dataset groups, + // datasets, dataset import jobs, predictors, forecasts, and forecast export + // jobs. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // The tags to add to the resource. A tag is an array of key-value pairs. + // + // The following basic restrictions apply to tags: + // + // * Maximum number of tags per resource - 50. + // + // * For each resource, each tag key must be unique, and each tag key can + // have only one value. + // + // * Maximum key length - 128 Unicode characters in UTF-8. + // + // * Maximum value length - 256 Unicode characters in UTF-8. + // + // * If your tagging schema is used across multiple services and resources, + // remember that other services may have restrictions on allowed characters. + // Generally allowed characters are: letters, numbers, and spaces representable + // in UTF-8, and the following characters: + - = . _ : / @. + // + // * Tag keys and values are case sensitive. + // + // * Do not use aws:, AWS:, or any upper or lowercase combination of such + // as a prefix for keys as it is reserved for AWS use. You cannot edit or + // delete tag keys with this prefix. Values can have this prefix. If a tag + // value has aws as its prefix but the key does not, then Forecast considers + // it to be a user tag and will count against the limit of 50 tags. Tags + // with only the key prefix of aws do not count against your tags per resource + // limit. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // The status, start time, and end time of a backtest, as well as a failure // reason if applicable. type TestWindowSummary struct { @@ -8126,6 +9033,74 @@ func (s *TestWindowSummary) SetTestWindowStart(v time.Time) *TestWindowSummary { return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the resource for which to + // list the tags. Currently, the supported resources are Forecast dataset groups, + // datasets, dataset import jobs, predictors, forecasts, and forecast exports. + // + // ResourceArn is a required field + ResourceArn *string `type:"string" required:"true"` + + // The keys of the tags to be removed. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateDatasetGroupInput struct { _ struct{} `type:"structure"` @@ -8312,6 +9287,16 @@ const ( AttributeTypeTimestamp = "timestamp" ) +// AttributeType_Values returns all elements of the AttributeType enum +func AttributeType_Values() []string { + return []string{ + AttributeTypeString, + AttributeTypeInteger, + AttributeTypeFloat, + AttributeTypeTimestamp, + } +} + const ( // DatasetTypeTargetTimeSeries is a DatasetType enum value DatasetTypeTargetTimeSeries = "TARGET_TIME_SERIES" @@ -8323,6 +9308,15 @@ const ( DatasetTypeItemMetadata = "ITEM_METADATA" ) +// DatasetType_Values returns all elements of the DatasetType enum +func DatasetType_Values() []string { + return []string{ + DatasetTypeTargetTimeSeries, + DatasetTypeRelatedTimeSeries, + DatasetTypeItemMetadata, + } +} + const ( // DomainRetail is a Domain enum value DomainRetail = "RETAIL" @@ -8346,6 +9340,19 @@ const ( DomainMetrics = "METRICS" ) +// Domain_Values returns all elements of the Domain enum +func Domain_Values() []string { + return []string{ + DomainRetail, + DomainCustom, + DomainInventoryPlanning, + DomainEc2Capacity, + DomainWorkForce, + DomainWebTraffic, + DomainMetrics, + } +} + const ( // EvaluationTypeSummary is a EvaluationType enum value EvaluationTypeSummary = "SUMMARY" @@ -8354,11 +9361,26 @@ const ( EvaluationTypeComputed = "COMPUTED" ) +// EvaluationType_Values returns all elements of the EvaluationType enum +func EvaluationType_Values() []string { + return []string{ + EvaluationTypeSummary, + EvaluationTypeComputed, + } +} + const ( // FeaturizationMethodNameFilling is a FeaturizationMethodName enum value FeaturizationMethodNameFilling = "filling" ) +// FeaturizationMethodName_Values returns all elements of the FeaturizationMethodName enum +func FeaturizationMethodName_Values() []string { + return []string{ + FeaturizationMethodNameFilling, + } +} + const ( // FilterConditionStringIs is a FilterConditionString enum value FilterConditionStringIs = "IS" @@ -8367,6 +9389,14 @@ const ( FilterConditionStringIsNot = "IS_NOT" ) +// FilterConditionString_Values returns all elements of the FilterConditionString enum +func FilterConditionString_Values() []string { + return []string{ + FilterConditionStringIs, + FilterConditionStringIsNot, + } +} + const ( // ScalingTypeAuto is a ScalingType enum value ScalingTypeAuto = "Auto" @@ -8380,3 +9410,13 @@ const ( // ScalingTypeReverseLogarithmic is a ScalingType enum value ScalingTypeReverseLogarithmic = "ReverseLogarithmic" ) + +// ScalingType_Values returns all elements of the ScalingType enum +func ScalingType_Values() []string { + return []string{ + ScalingTypeAuto, + ScalingTypeLinear, + ScalingTypeLogarithmic, + ScalingTypeReverseLogarithmic, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go index 34d2e4a11..55b1aaccd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go b/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go index eb676ca15..41919d1cd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fsx/api.go @@ -158,10 +158,22 @@ func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Reques // CreateBackup API operation for Amazon FSx. // -// Creates a backup of an existing Amazon FSx for Windows File Server file system. -// Creating regular backups for your file system is a best practice that complements -// the replication that Amazon FSx for Windows File Server performs for your -// file system. It also enables you to restore from user modification of data. +// Creates a backup of an existing Amazon FSx file system. Creating regular +// backups for your file system is a best practice, enabling you to restore +// a file system from a backup if an issue arises with the original file system. +// +// For Amazon FSx for Lustre file systems, you can create a backup only for +// file systems with the following configuration: +// +// * a Persistent deployment type +// +// * is not linked to a data respository. +// +// For more information about backing up Amazon FSx for Lustre file systems, +// see Working with FSx for Lustre backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). +// +// For more information about backing up Amazon FSx for Lustre file systems, +// see Working with FSx for Windows backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html). // // If a backup with the specified client request token exists, and the parameters // match, this operation returns the description of the existing backup. If @@ -181,10 +193,9 @@ func (c *FSx) CreateBackupRequest(input *CreateBackupInput) (req *request.Reques // created a backup, the operation returns a successful result because all the // parameters are the same. // -// The CreateFileSystem operation returns while the backup's lifecycle state -// is still CREATING. You can check the file system creation status by calling -// the DescribeBackups operation, which returns the backup state along with -// other information. +// The CreateBackup operation returns while the backup's lifecycle state is +// still CREATING. You can check the backup creation status by calling the DescribeBackups +// operation, which returns the backup state along with other information. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -293,8 +304,8 @@ func (c *FSx) CreateDataRepositoryTaskRequest(input *CreateDataRepositoryTaskInp // repository. A CreateDataRepositoryTask operation will fail if a data repository // is not linked to the FSx file system. To learn more about data repository // tasks, see Using Data Repository Tasks (https://docs.aws.amazon.com/fsx/latest/LustreGuide/data-repository-tasks.html). -// To learn more about linking a data repository to your file system, see Step -// 1: Create Your Amazon FSx for Lustre File System (https://docs.aws.amazon.com/fsx/latest/LustreGuide/getting-started-step1.html). +// To learn more about linking a data repository to your file system, see Setting +// the Export Prefix (https://docs.aws.amazon.com/fsx/latest/LustreGuide/export-data-repository.html#export-prefix). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -538,8 +549,7 @@ func (c *FSx) CreateFileSystemFromBackupRequest(input *CreateFileSystemFromBacku // CreateFileSystemFromBackup API operation for Amazon FSx. // -// Creates a new Amazon FSx file system from an existing Amazon FSx for Windows -// File Server backup. +// Creates a new Amazon FSx file system from an existing Amazon FSx backup. // // If a file system with the specified client request token exists and the parameters // match, this operation returns the description of the file system. If a client @@ -597,6 +607,10 @@ func (c *FSx) CreateFileSystemFromBackupRequest(input *CreateFileSystemFromBacku // of IDs for security groups that are either invalid or not part of the VPC // specified. // +// * InvalidPerUnitStorageThroughput +// An invalid value for PerUnitStorageThroughput was provided. Please create +// your file system again, using a valid value. +// // * ServiceLimitExceeded // An error indicating that a particular service limit was exceeded. You can // increase some service limits by contacting AWS Support. @@ -676,8 +690,8 @@ func (c *FSx) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Reques // DeleteBackup API operation for Amazon FSx. // -// Deletes an Amazon FSx for Windows File Server backup, deleting its contents. -// After deletion, the backup no longer exists, and its data is gone. +// Deletes an Amazon FSx backup, deleting its contents. After deletion, the +// backup no longer exists, and its data is gone. // // The DeleteBackup call returns instantly. The backup will not show up in later // DescribeBackups calls. @@ -898,10 +912,9 @@ func (c *FSx) DescribeBackupsRequest(input *DescribeBackupsInput) (req *request. // DescribeBackups API operation for Amazon FSx. // -// Returns the description of specific Amazon FSx for Windows File Server backups, -// if a BackupIds value is provided for that backup. Otherwise, it returns all -// backups owned by your AWS account in the AWS Region of the endpoint that -// you're calling. +// Returns the description of specific Amazon FSx backups, if a BackupIds value +// is provided for that backup. Otherwise, it returns all backups owned by your +// AWS account in the AWS Region of the endpoint that you're calling. // // When retrieving all backups, you can optionally specify the MaxResults parameter // to limit the number of backups in a response. If more backups remain, Amazon @@ -1683,7 +1696,33 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // UpdateFileSystem API operation for Amazon FSx. // -// Updates a file system configuration. +// Use this operation to update the configuration of an existing Amazon FSx +// file system. You can update multiple properties in a single request. +// +// For Amazon FSx for Windows File Server file systems, you can update the following +// properties: +// +// * AutomaticBackupRetentionDays +// +// * DailyAutomaticBackupStartTime +// +// * SelfManagedActiveDirectoryConfiguration +// +// * StorageCapacity +// +// * ThroughputCapacity +// +// * WeeklyMaintenanceStartTime +// +// For Amazon FSx for Lustre file systems, you can update the following properties: +// +// * AutoImportPolicy +// +// * AutomaticBackupRetentionDays +// +// * DailyAutomaticBackupStartTime +// +// * WeeklyMaintenanceStartTime // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1713,6 +1752,10 @@ func (c *FSx) UpdateFileSystemRequest(input *UpdateFileSystemInput) (req *reques // * MissingFileSystemConfiguration // A file system configuration is required for this operation. // +// * ServiceLimitExceeded +// An error indicating that a particular service limit was exceeded. You can +// increase some service limits by contacting AWS Support. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/fsx-2018-03-01/UpdateFileSystem func (c *FSx) UpdateFileSystem(input *UpdateFileSystemInput) (*UpdateFileSystemOutput, error) { req, out := c.UpdateFileSystemRequest(input) @@ -1772,8 +1815,8 @@ func (s *ActiveDirectoryBackupAttributes) SetDomainName(v string) *ActiveDirecto // An Active Directory error. type ActiveDirectoryError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The directory ID of the directory that an error pertains to. // @@ -1799,17 +1842,17 @@ func (s ActiveDirectoryError) GoString() string { func newErrorActiveDirectoryError(v protocol.ResponseMetadata) error { return &ActiveDirectoryError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActiveDirectoryError) Code() string { +func (s *ActiveDirectoryError) Code() string { return "ActiveDirectoryError" } // Message returns the exception's message. -func (s ActiveDirectoryError) Message() string { +func (s *ActiveDirectoryError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1817,26 +1860,146 @@ func (s ActiveDirectoryError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActiveDirectoryError) OrigErr() error { +func (s *ActiveDirectoryError) OrigErr() error { return nil } -func (s ActiveDirectoryError) Error() string { +func (s *ActiveDirectoryError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ActiveDirectoryError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActiveDirectoryError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActiveDirectoryError) RequestID() string { - return s.respMetadata.RequestID +func (s *ActiveDirectoryError) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes a specific Amazon FSx Administrative Action for the current Windows +// file system. +type AdministrativeAction struct { + _ struct{} `type:"structure"` + + // Describes the type of administrative action, as follows: + // + // * FILE_SYSTEM_UPDATE - A file system update administrative action initiated + // by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI + // (update-file-system). A + // + // * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase + // a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION + // task starts. Storage optimization is the process of migrating the file + // system data to the new, larger disks. You can track the storage migration + // progress using the ProgressPercent property. When STORAGE_OPTIMIZATION + // completes successfully, the parent FILE_SYSTEM_UPDATE action status changes + // to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + AdministrativeActionType *string `type:"string" enum:"AdministrativeActionType"` + + // Provides information about a failed administrative action. + FailureDetails *AdministrativeActionFailureDetails `type:"structure"` + + // Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. + ProgressPercent *int64 `type:"integer"` + + // Time that the administrative action request was received. + RequestTime *time.Time `type:"timestamp"` + + // Describes the status of the administrative action, as follows: + // + // * FAILED - Amazon FSx failed to process the administrative action successfully. + // + // * IN_PROGRESS - Amazon FSx is processing the administrative action. + // + // * PENDING - Amazon FSx is waiting to process the administrative action. + // + // * COMPLETED - Amazon FSx has finished processing the administrative task. + // + // * UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon + // FSx has updated the file system with the new storage capacity, and is + // now performing the storage optimization process. For more information, + // see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + Status *string `type:"string" enum:"Status"` + + // Describes the target StorageCapacity or ThroughputCapacity value provided + // in the UpdateFileSystem operation. Returned for FILE_SYSTEM_UPDATE administrative + // actions. + TargetFileSystemValues *FileSystem `type:"structure"` +} + +// String returns the string representation +func (s AdministrativeAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdministrativeAction) GoString() string { + return s.String() +} + +// SetAdministrativeActionType sets the AdministrativeActionType field's value. +func (s *AdministrativeAction) SetAdministrativeActionType(v string) *AdministrativeAction { + s.AdministrativeActionType = &v + return s +} + +// SetFailureDetails sets the FailureDetails field's value. +func (s *AdministrativeAction) SetFailureDetails(v *AdministrativeActionFailureDetails) *AdministrativeAction { + s.FailureDetails = v + return s +} + +// SetProgressPercent sets the ProgressPercent field's value. +func (s *AdministrativeAction) SetProgressPercent(v int64) *AdministrativeAction { + s.ProgressPercent = &v + return s +} + +// SetRequestTime sets the RequestTime field's value. +func (s *AdministrativeAction) SetRequestTime(v time.Time) *AdministrativeAction { + s.RequestTime = &v + return s } -// A backup of an Amazon FSx for Windows File Server file system. You can create -// a new file system from a backup to protect against data loss. +// SetStatus sets the Status field's value. +func (s *AdministrativeAction) SetStatus(v string) *AdministrativeAction { + s.Status = &v + return s +} + +// SetTargetFileSystemValues sets the TargetFileSystemValues field's value. +func (s *AdministrativeAction) SetTargetFileSystemValues(v *FileSystem) *AdministrativeAction { + s.TargetFileSystemValues = v + return s +} + +// Provides information about a failed administrative action. +type AdministrativeActionFailureDetails struct { + _ struct{} `type:"structure"` + + // Error message providing details about the failure. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AdministrativeActionFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdministrativeActionFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *AdministrativeActionFailureDetails) SetMessage(v string) *AdministrativeActionFailureDetails { + s.Message = &v + return s +} + +// A backup of an Amazon FSx for file system. type Backup struct { _ struct{} `type:"structure"` @@ -1863,13 +2026,23 @@ type Backup struct { // FileSystem is a required field FileSystem *FileSystem `type:"structure" required:"true"` - // The ID of the AWS Key Management Service (AWS KMS) key used to encrypt this - // backup of the Amazon FSx for Windows file system's data at rest. Amazon FSx - // for Lustre does not support KMS encryption. + // The ID of the AWS Key Management Service (AWS KMS) key used to encrypt the + // backup of the Amazon FSx file system's data at rest. KmsKeyId *string `min:"1" type:"string"` // The lifecycle status of the backup. // + // * AVAILABLE - The backup is fully available. + // + // * CREATING - FSx is creating the backup. + // + // * TRANSFERRING - For Lustre file systems only; FSx is transferring the + // backup to S3. + // + // * DELETED - The backup was deleted is no longer available. + // + // * FAILED - Amazon FSx could not complete the backup. + // // Lifecycle is a required field Lifecycle *string `type:"string" required:"true" enum:"BackupLifecycle"` @@ -1882,7 +2055,7 @@ type Backup struct { // Tags associated with a particular file system. Tags []*Tag `min:"1" type:"list"` - // The type of the backup. + // The type of the file system backup. // // Type is a required field Type *string `type:"string" required:"true" enum:"BackupType"` @@ -1991,8 +2164,8 @@ func (s *BackupFailureDetails) SetMessage(v string) *BackupFailureDetails { // Another backup is already under way. Wait for completion before initiating // additional backups of this file system. type BackupInProgress struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -2010,17 +2183,17 @@ func (s BackupInProgress) GoString() string { func newErrorBackupInProgress(v protocol.ResponseMetadata) error { return &BackupInProgress{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BackupInProgress) Code() string { +func (s *BackupInProgress) Code() string { return "BackupInProgress" } // Message returns the exception's message. -func (s BackupInProgress) Message() string { +func (s *BackupInProgress) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2028,28 +2201,28 @@ func (s BackupInProgress) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BackupInProgress) OrigErr() error { +func (s *BackupInProgress) OrigErr() error { return nil } -func (s BackupInProgress) Error() string { +func (s *BackupInProgress) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BackupInProgress) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BackupInProgress) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BackupInProgress) RequestID() string { - return s.respMetadata.RequestID +func (s *BackupInProgress) RequestID() string { + return s.RespMetadata.RequestID } // No Amazon FSx backups were found based upon the supplied parameters. type BackupNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -2067,17 +2240,17 @@ func (s BackupNotFound) GoString() string { func newErrorBackupNotFound(v protocol.ResponseMetadata) error { return &BackupNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BackupNotFound) Code() string { +func (s *BackupNotFound) Code() string { return "BackupNotFound" } // Message returns the exception's message. -func (s BackupNotFound) Message() string { +func (s *BackupNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2085,28 +2258,28 @@ func (s BackupNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BackupNotFound) OrigErr() error { +func (s *BackupNotFound) OrigErr() error { return nil } -func (s BackupNotFound) Error() string { +func (s *BackupNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BackupNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BackupNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BackupNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *BackupNotFound) RequestID() string { + return s.RespMetadata.RequestID } // You can't delete a backup while it's being used to restore a file system. type BackupRestoring struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The ID of a file system being restored from the backup. FileSystemId *string `min:"11" type:"string"` @@ -2127,17 +2300,17 @@ func (s BackupRestoring) GoString() string { func newErrorBackupRestoring(v protocol.ResponseMetadata) error { return &BackupRestoring{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BackupRestoring) Code() string { +func (s *BackupRestoring) Code() string { return "BackupRestoring" } // Message returns the exception's message. -func (s BackupRestoring) Message() string { +func (s *BackupRestoring) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2145,28 +2318,28 @@ func (s BackupRestoring) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BackupRestoring) OrigErr() error { +func (s *BackupRestoring) OrigErr() error { return nil } -func (s BackupRestoring) Error() string { +func (s *BackupRestoring) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BackupRestoring) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BackupRestoring) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BackupRestoring) RequestID() string { - return s.respMetadata.RequestID +func (s *BackupRestoring) RequestID() string { + return s.RespMetadata.RequestID } // A generic error indicating a failure with a client request. type BadRequest struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -2184,17 +2357,17 @@ func (s BadRequest) GoString() string { func newErrorBadRequest(v protocol.ResponseMetadata) error { return &BadRequest{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequest) Code() string { +func (s *BadRequest) Code() string { return "BadRequest" } // Message returns the exception's message. -func (s BadRequest) Message() string { +func (s *BadRequest) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2202,22 +2375,22 @@ func (s BadRequest) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequest) OrigErr() error { +func (s *BadRequest) OrigErr() error { return nil } -func (s BadRequest) Error() string { +func (s *BadRequest) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequest) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequest) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequest) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequest) RequestID() string { + return s.RespMetadata.RequestID } // Cancels a data repository task. @@ -2411,8 +2584,11 @@ type CreateBackupInput struct { // FileSystemId is a required field FileSystemId *string `min:"11" type:"string" required:"true"` - // The tags to apply to the backup at backup creation. The key value of the - // Name tag appears in the console as the backup name. + // (Optional) The tags to apply to the backup at backup creation. The key value + // of the Name tag appears in the console as the backup name. If you have set + // CopyTagsToBackups to true, and you specify one or more tags using the CreateBackup + // action, no existing file system tags are copied from the file system to the + // backup. Tags []*Tag `min:"1" type:"list"` } @@ -2515,12 +2691,16 @@ type CreateDataRepositoryTaskInput struct { // (Optional) The path or paths on the Amazon FSx file system to use when the // data repository task is processed. The default path is the file system root - // directory. + // directory. The paths you provide need to be relative to the mount point of + // the file system. If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory + // or file on the file system you want to export, then the path to provide is + // path1. If a path that you provide isn't valid, the task fails. Paths []*string `type:"list"` // Defines whether or not Amazon FSx provides a CompletionReport once the task // has completed. A CompletionReport provides a detailed report on the files // that Amazon FSx processed that meet the criteria specified by the Scope parameter. + // For more information, see Working with Task Completion Reports (https://docs.aws.amazon.com/fsx/latest/LustreGuide/task-completion-report.html). // // Report is a required field Report *CompletionReport `type:"structure" required:"true"` @@ -2656,19 +2836,47 @@ type CreateFileSystemFromBackupInput struct { // BackupId is a required field BackupId *string `min:"12" type:"string" required:"true"` - // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to - // ensure idempotent creation. This string is automatically filled on your behalf - // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // creation. This string is automatically filled on your behalf when you use + // the AWS Command Line Interface (AWS CLI) or an AWS SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + // The Lustre configuration for the file system being created. + LustreConfiguration *CreateFileSystemLustreConfiguration `type:"structure"` + // A list of IDs for the security groups that apply to the specified network // interfaces created for file system access. These security groups apply to - // all network interfaces. This value isn't returned in later describe requests. + // all network interfaces. This value isn't returned in later DescribeFileSystem + // requests. SecurityGroupIds []*string `type:"list"` - // A list of IDs for the subnets that the file system will be accessible from. - // Currently, you can specify only one subnet. The file server is also launched - // in that subnet's Availability Zone. + // Sets the storage type for the Windows file system you're creating from a + // backup. Valid values are SSD and HDD. + // + // * Set to SSD to use solid state drive storage. Supported on all Windows + // deployment types. + // + // * Set to HDD to use hard disk drive storage. Supported on SINGLE_AZ_2 + // and MULTI_AZ_1 Windows file system deployment types. + // + // Default value is SSD. + // + // HDD and SSD storage types have different minimum storage capacity requirements. + // A restored file system's storage capacity is tied to the file system that + // was backed up. You can create a file system that uses HDD storage from a + // backup of a file system that used SSD storage only if the original SSD file + // system had a storage capacity of at least 2000 GiB. + StorageType *string `type:"string" enum:"StorageType"` + + // Specifies the IDs of the subnets that the file system will be accessible + // from. For Windows MULTI_AZ_1 file system deployment types, provide exactly + // two subnet IDs, one for the preferred file server and one for the standby + // file server. You specify one of these subnets as the preferred subnet using + // the WindowsConfiguration > PreferredSubnetID property. + // + // For Windows SINGLE_AZ_1 and SINGLE_AZ_2 deployment types and Lustre file + // systems, provide exactly one subnet ID. The file server is launched in that + // subnet's Availability Zone. // // SubnetIds is a required field SubnetIds []*string `type:"list" required:"true"` @@ -2709,6 +2917,11 @@ func (s *CreateFileSystemFromBackupInput) Validate() error { if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.LustreConfiguration != nil { + if err := s.LustreConfiguration.Validate(); err != nil { + invalidParams.AddNested("LustreConfiguration", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2743,12 +2956,24 @@ func (s *CreateFileSystemFromBackupInput) SetClientRequestToken(v string) *Creat return s } +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *CreateFileSystemFromBackupInput) SetLustreConfiguration(v *CreateFileSystemLustreConfiguration) *CreateFileSystemFromBackupInput { + s.LustreConfiguration = v + return s +} + // SetSecurityGroupIds sets the SecurityGroupIds field's value. func (s *CreateFileSystemFromBackupInput) SetSecurityGroupIds(v []*string) *CreateFileSystemFromBackupInput { s.SecurityGroupIds = v return s } +// SetStorageType sets the StorageType field's value. +func (s *CreateFileSystemFromBackupInput) SetStorageType(v string) *CreateFileSystemFromBackupInput { + s.StorageType = &v + return s +} + // SetSubnetIds sets the SubnetIds field's value. func (s *CreateFileSystemFromBackupInput) SetSubnetIds(v []*string) *CreateFileSystemFromBackupInput { s.SubnetIds = v @@ -2795,9 +3020,9 @@ func (s *CreateFileSystemFromBackupOutput) SetFileSystem(v *FileSystem) *CreateF type CreateFileSystemInput struct { _ struct{} `type:"structure"` - // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to - // ensure idempotent creation. This string is automatically filled on your behalf - // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // creation. This string is automatically filled on your behalf when you use + // the AWS Command Line Interface (AWS CLI) or an AWS SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` // The type of Amazon FSx file system to create, either WINDOWS or LUSTRE. @@ -2814,8 +3039,7 @@ type CreateFileSystemInput struct { // in the AWS Key Management Service API Reference. KmsKeyId *string `min:"1" type:"string"` - // The Lustre configuration for the file system being created. This value is - // required if FileSystemType is set to LUSTRE. + // The Lustre configuration for the file system being created. LustreConfiguration *CreateFileSystemLustreConfiguration `type:"structure"` // A list of IDs specifying the security groups to apply to all network interfaces @@ -2823,27 +3047,53 @@ type CreateFileSystemInput struct { // to describe the file system. SecurityGroupIds []*string `type:"list"` - // The storage capacity of the file system being created. + // Sets the storage capacity of the file system that you're creating. + // + // For Lustre file systems: + // + // * For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are + // 1200 GiB, 2400 GiB, and increments of 2400 GiB. + // + // * For PERSISTENT HDD file systems, valid values are increments of 6000 + // GiB for 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB + // file systems. // - // For Windows file systems, valid values are 32 GiB - 65,536 GiB. + // * For SCRATCH_1 deployment type, valid values are 1200 GiB, 2400 GiB, + // and increments of 3600 GiB. // - // For SCRATCH_1 Lustre file systems, valid values are 1,200, 2,400, 3,600, - // then continuing in increments of 3600 GiB. For SCRATCH_2 and PERSISTENT_1 - // file systems, valid values are 1200, 2400, then continuing in increments - // of 2400 GiB. + // For Windows file systems: + // + // * If StorageType=SSD, valid values are 32 GiB - 65,536 GiB (64 TiB). + // + // * If StorageType=HDD, valid values are 2000 GiB - 65,536 GiB (64 TiB). // // StorageCapacity is a required field StorageCapacity *int64 `type:"integer" required:"true"` + // Sets the storage type for the file system you're creating. Valid values are + // SSD and HDD. + // + // * Set to SSD to use solid state drive storage. SSD is supported on all + // Windows and Lustre deployment types. + // + // * Set to HDD to use hard disk drive storage. HDD is supported on SINGLE_AZ_2 + // and MULTI_AZ_1 Windows file system deployment types, and on PERSISTENT + // Lustre file system deployment types. + // + // Default value is SSD. For more information, see Storage Type Options (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/optimize-fsx-costs.html#storage-type-options) + // in the Amazon FSx for Windows User Guide and Multiple Storage Options (https://docs.aws.amazon.com/fsx/latest/LustreGuide/what-is.html#storage-options) + // in the Amazon FSx for Lustre User Guide. + StorageType *string `type:"string" enum:"StorageType"` + // Specifies the IDs of the subnets that the file system will be accessible // from. For Windows MULTI_AZ_1 file system deployment types, provide exactly // two subnet IDs, one for the preferred file server and one for the standby // file server. You specify one of these subnets as the preferred subnet using // the WindowsConfiguration > PreferredSubnetID property. // - // For Windows SINGLE_AZ_1 file system deployment types and Lustre file systems, - // provide exactly one subnet ID. The file server is launched in that subnet's - // Availability Zone. + // For Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment types and + // Lustre file systems, provide exactly one subnet ID. The file server is launched + // in that subnet's Availability Zone. // // SubnetIds is a required field SubnetIds []*string `type:"list" required:"true"` @@ -2852,8 +3102,7 @@ type CreateFileSystemInput struct { // Name tag appears in the console as the file system name. Tags []*Tag `min:"1" type:"list"` - // The Microsoft Windows configuration for the file system being created. This - // value is required if FileSystemType is set to WINDOWS. + // The Microsoft Windows configuration for the file system being created. WindowsConfiguration *CreateFileSystemWindowsConfiguration `type:"structure"` } @@ -2951,6 +3200,12 @@ func (s *CreateFileSystemInput) SetStorageCapacity(v int64) *CreateFileSystemInp return s } +// SetStorageType sets the StorageType field's value. +func (s *CreateFileSystemInput) SetStorageType(v string) *CreateFileSystemInput { + s.StorageType = &v + return s +} + // SetSubnetIds sets the SubnetIds field's value. func (s *CreateFileSystemInput) SetSubnetIds(v []*string) *CreateFileSystemInput { s.SubnetIds = v @@ -2969,15 +3224,58 @@ func (s *CreateFileSystemInput) SetWindowsConfiguration(v *CreateFileSystemWindo return s } -// The Lustre configuration for the file system being created. This value is -// required if FileSystemType is set to LUSTRE. +// The Lustre configuration for the file system being created. type CreateFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` - // (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment types when you need - // temporary storage and shorter-term processing of data. The SCRATCH_2 deployment - // type provides in-transit encryption of data and higher burst throughput capacity - // than SCRATCH_1. + // (Optional) When you create your file system, your existing S3 objects appear + // as file and directory listings. Use this property to choose how Amazon FSx + // keeps your file and directory listings up to date as you add or modify objects + // in your linked S3 bucket. AutoImportPolicy can have the following values: + // + // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and + // directory listings from the linked S3 bucket when the file system is created. + // FSx does not update file and directory listings for any new or changed + // objects after choosing this option. + // + // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings + // of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. + // + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file + // and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this + // option. + // + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). + AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` + + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + // The default is 0. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // (Optional) Not available to use with file systems that are linked to a data + // repository. A boolean flag indicating whether tags for the file system should + // be copied to backups. The default value is false. If it's set to true, all + // file system tags are copied to all automatic and user-initiated backups when + // the user doesn't specify any backup-specific tags. If this value is true, + // and you specify one or more backup tags, only the specified tags are copied + // to backups. If you specify one or more tags when creating a user-initiated + // backup, no tags are copied from the file system, regardless of this value. + // + // For more information, see Working with backups (https://docs.aws.amazon.com/fsx/latest/LustreGuide/using-backups-fsx.html). + CopyTagsToBackups *bool `type:"boolean"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of + // the day (0-23), and MM is the zero-padded minute of the hour. For example, + // 05:00 specifies 5 AM daily. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // Choose SCRATCH_1 and SCRATCH_2 deployment types when you need temporary storage + // and shorter-term processing of data. The SCRATCH_2 deployment type provides + // in-transit encryption of data and higher burst throughput capacity than SCRATCH_1. // // Choose PERSISTENT_1 deployment type for longer-term storage and workloads // and encryption of data in transit. To learn more about deployment types, @@ -2993,6 +3291,14 @@ type CreateFileSystemLustreConfiguration struct { // Regions. To learn more, Encrypting Data in Transit (https://docs.aws.amazon.com/fsx/latest/LustreGuide/encryption-in-transit-fsxl.html). DeploymentType *string `type:"string" enum:"LustreDeploymentType"` + // The type of drive cache used by PERSISTENT_1 file systems that are provisioned + // with HDD storage devices. This parameter is required when storage type is + // HDD. Set to READ, improve the performance for frequently accessed files and + // allows 20% of the total storage capacity of the file system to be cached. + // + // This parameter is required when StorageType is set to HDD. + DriveCacheType *string `type:"string" enum:"DriveCacheType"` + // (Optional) The path in Amazon S3 where the root of your Amazon FSx file system // is exported. The path must use the same Amazon S3 bucket as specified in // ImportPath. You can provide an optional prefix to which new and changed data @@ -3027,18 +3333,20 @@ type CreateFileSystemLustreConfiguration struct { // MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. ImportedFileChunkSize *int64 `min:"1" type:"integer"` - // (Optional) For the PERSISTENT_1 deployment type, describes the amount of - // read and write throughput for each 1 tebibyte of storage, in MB/s/TiB. File - // system throughput capacity is calculated by multiplying file system storage - // capacity (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB - // file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields - // 120 MB/s of file system throughput. You pay for the amount of throughput - // that you provision. (Default = 200 MB/s/TiB) + // Required for the PERSISTENT_1 deployment type, describes the amount of read + // and write throughput for each 1 tebibyte of storage, in MB/s/TiB. File system + // throughput capacity is calculated by multiplying file system storage capacity + // (TiB) by the PerUnitStorageThroughput (MB/s/TiB). For a 2.4 TiB file system, + // provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file + // system throughput. You pay for the amount of throughput that you provision. // - // Valid values are 50, 100, 200. - PerUnitStorageThroughput *int64 `min:"50" type:"integer"` + // Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: + // 12, 40. + PerUnitStorageThroughput *int64 `min:"12" type:"integer"` - // The preferred time to perform weekly maintenance, in the UTC time zone. + // (Optional) The preferred start time to perform weekly maintenance, formatted + // d:HH:MM in the UTC time zone, where d is the weekday number, from 1 through + // 7, beginning with Monday and ending with Sunday. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -3055,6 +3363,9 @@ func (s CreateFileSystemLustreConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateFileSystemLustreConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateFileSystemLustreConfiguration"} + if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { + invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) + } if s.ExportPath != nil && len(*s.ExportPath) < 3 { invalidParams.Add(request.NewErrParamMinLen("ExportPath", 3)) } @@ -3064,8 +3375,8 @@ func (s *CreateFileSystemLustreConfiguration) Validate() error { if s.ImportedFileChunkSize != nil && *s.ImportedFileChunkSize < 1 { invalidParams.Add(request.NewErrParamMinValue("ImportedFileChunkSize", 1)) } - if s.PerUnitStorageThroughput != nil && *s.PerUnitStorageThroughput < 50 { - invalidParams.Add(request.NewErrParamMinValue("PerUnitStorageThroughput", 50)) + if s.PerUnitStorageThroughput != nil && *s.PerUnitStorageThroughput < 12 { + invalidParams.Add(request.NewErrParamMinValue("PerUnitStorageThroughput", 12)) } if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) @@ -3077,12 +3388,42 @@ func (s *CreateFileSystemLustreConfiguration) Validate() error { return nil } +// SetAutoImportPolicy sets the AutoImportPolicy field's value. +func (s *CreateFileSystemLustreConfiguration) SetAutoImportPolicy(v string) *CreateFileSystemLustreConfiguration { + s.AutoImportPolicy = &v + return s +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *CreateFileSystemLustreConfiguration) SetAutomaticBackupRetentionDays(v int64) *CreateFileSystemLustreConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetCopyTagsToBackups sets the CopyTagsToBackups field's value. +func (s *CreateFileSystemLustreConfiguration) SetCopyTagsToBackups(v bool) *CreateFileSystemLustreConfiguration { + s.CopyTagsToBackups = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *CreateFileSystemLustreConfiguration) SetDailyAutomaticBackupStartTime(v string) *CreateFileSystemLustreConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + // SetDeploymentType sets the DeploymentType field's value. func (s *CreateFileSystemLustreConfiguration) SetDeploymentType(v string) *CreateFileSystemLustreConfiguration { s.DeploymentType = &v return s } +// SetDriveCacheType sets the DriveCacheType field's value. +func (s *CreateFileSystemLustreConfiguration) SetDriveCacheType(v string) *CreateFileSystemLustreConfiguration { + s.DriveCacheType = &v + return s +} + // SetExportPath sets the ExportPath field's value. func (s *CreateFileSystemLustreConfiguration) SetExportPath(v string) *CreateFileSystemLustreConfiguration { s.ExportPath = &v @@ -3148,7 +3489,7 @@ type CreateFileSystemWindowsConfiguration struct { // The number of days to retain automatic backups. The default is to retain // backups for 7 days. Setting this value to 0 disables the creation of automatic - // backups. The maximum retention period for backups is 35 days. + // backups. The maximum retention period for backups is 90 days. AutomaticBackupRetentionDays *int64 `type:"integer"` // A boolean flag indicating whether tags for the file system should be copied @@ -3169,13 +3510,17 @@ type CreateFileSystemWindowsConfiguration struct { // * MULTI_AZ_1 - Deploys a high availability file system that is configured // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. // You can only deploy a Multi-AZ file system in AWS Regions that have a - // minimum of three Availability Zones. + // minimum of three Availability Zones. Also supports HDD storage type // // * SINGLE_AZ_1 - (Default) Choose to deploy a file system that is configured // for single AZ redundancy. // - // To learn more about high availability Multi-AZ file systems, see High Availability - // for Amazon FSx for Windows File Server (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). + // * SINGLE_AZ_2 - The latest generation Single AZ file system. Specifies + // a file system that is configured for single AZ redundancy and supports + // HDD storage type. + // + // For more information, see Availability and Durability: Single-AZ and Multi-AZ + // File Systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). DeploymentType *string `type:"string" enum:"WindowsDeploymentType"` // Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet @@ -3197,7 +3542,8 @@ type CreateFileSystemWindowsConfiguration struct { ThroughputCapacity *int64 `min:"8" type:"integer" required:"true"` // The preferred start time to perform weekly maintenance, formatted d:HH:MM - // in the UTC time zone. + // in the UTC time zone, where d is the weekday number, from 1 through 7, beginning + // with Monday and ending with Sunday. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -3303,10 +3649,37 @@ func (s *CreateFileSystemWindowsConfiguration) SetWeeklyMaintenanceStartTime(v s type DataRepositoryConfiguration struct { _ struct{} `type:"structure"` + // Describes the file system's linked S3 data repository's AutoImportPolicy. + // The AutoImportPolicy configures how Amazon FSx keeps your file and directory + // listings up to date as you add or modify objects in your linked S3 bucket. + // AutoImportPolicy can have the following values: + // + // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and + // directory listings from the linked S3 bucket when the file system is created. + // FSx does not update file and directory listings for any new or changed + // objects after choosing this option. + // + // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings + // of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. + // + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file + // and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this + // option. + // + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). + AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` + // The export path to the Amazon S3 bucket (and prefix) that you are using to // store new and changed Lustre file system files in S3. ExportPath *string `min:"3" type:"string"` + // Provides detailed information about the data respository if its Lifecycle + // is set to MISCONFIGURED. + FailureDetails *DataRepositoryFailureDetails `type:"structure"` + // The import path to the Amazon S3 bucket (and optional prefix) that you're // using as the data repository for your FSx for Lustre file system, for example // s3://import-bucket/optional-prefix. If a prefix is specified after the Amazon @@ -3322,6 +3695,25 @@ type DataRepositoryConfiguration struct { // The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 // MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB. ImportedFileChunkSize *int64 `min:"1" type:"integer"` + + // Describes the state of the file system's S3 durable data repository, if it + // is configured with an S3 repository. The lifecycle can have the following + // values: + // + // * CREATING - The data repository configuration between the FSx file system + // and the linked S3 data repository is being created. The data repository + // is unavailable. + // + // * AVAILABLE - The data repository is available for use. + // + // * MISCONFIGURED - Amazon FSx cannot automatically import updates from + // the S3 bucket until the data repository configuration is corrected. For + // more information, see Troubleshooting a Misconfigured linked S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/troubleshooting.html#troubleshooting-misconfigured-data-repository). + // + // * UPDATING - The data repository is undergoing a customer initiated update + // and availability may be impacted. + Lifecycle *string `type:"string" enum:"DataRepositoryLifecycle"` } // String returns the string representation @@ -3334,12 +3726,24 @@ func (s DataRepositoryConfiguration) GoString() string { return s.String() } +// SetAutoImportPolicy sets the AutoImportPolicy field's value. +func (s *DataRepositoryConfiguration) SetAutoImportPolicy(v string) *DataRepositoryConfiguration { + s.AutoImportPolicy = &v + return s +} + // SetExportPath sets the ExportPath field's value. func (s *DataRepositoryConfiguration) SetExportPath(v string) *DataRepositoryConfiguration { s.ExportPath = &v return s } +// SetFailureDetails sets the FailureDetails field's value. +func (s *DataRepositoryConfiguration) SetFailureDetails(v *DataRepositoryFailureDetails) *DataRepositoryConfiguration { + s.FailureDetails = v + return s +} + // SetImportPath sets the ImportPath field's value. func (s *DataRepositoryConfiguration) SetImportPath(v string) *DataRepositoryConfiguration { s.ImportPath = &v @@ -3352,6 +3756,37 @@ func (s *DataRepositoryConfiguration) SetImportedFileChunkSize(v int64) *DataRep return s } +// SetLifecycle sets the Lifecycle field's value. +func (s *DataRepositoryConfiguration) SetLifecycle(v string) *DataRepositoryConfiguration { + s.Lifecycle = &v + return s +} + +// Provides detailed information about the data respository if its Lifecycle +// is set to MISCONFIGURED. +type DataRepositoryFailureDetails struct { + _ struct{} `type:"structure"` + + // A detailed error message. + Message *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DataRepositoryFailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataRepositoryFailureDetails) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DataRepositoryFailureDetails) SetMessage(v string) *DataRepositoryFailureDetails { + s.Message = &v + return s +} + // A description of the data repository task. You use data repository tasks // to perform bulk transfer operations between your Amazon FSx file system and // its linked data repository. @@ -3538,8 +3973,8 @@ func (s *DataRepositoryTask) SetType(v string) *DataRepositoryTask { // The data repository task could not be canceled because the task has already // ended. type DataRepositoryTaskEnded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -3557,17 +3992,17 @@ func (s DataRepositoryTaskEnded) GoString() string { func newErrorDataRepositoryTaskEnded(v protocol.ResponseMetadata) error { return &DataRepositoryTaskEnded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DataRepositoryTaskEnded) Code() string { +func (s *DataRepositoryTaskEnded) Code() string { return "DataRepositoryTaskEnded" } // Message returns the exception's message. -func (s DataRepositoryTaskEnded) Message() string { +func (s *DataRepositoryTaskEnded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3575,29 +4010,29 @@ func (s DataRepositoryTaskEnded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DataRepositoryTaskEnded) OrigErr() error { +func (s *DataRepositoryTaskEnded) OrigErr() error { return nil } -func (s DataRepositoryTaskEnded) Error() string { +func (s *DataRepositoryTaskEnded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DataRepositoryTaskEnded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DataRepositoryTaskEnded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DataRepositoryTaskEnded) RequestID() string { - return s.respMetadata.RequestID +func (s *DataRepositoryTaskEnded) RequestID() string { + return s.RespMetadata.RequestID } // An existing data repository task is currently executing on the file system. // Wait until the existing task has completed, then create the new task. type DataRepositoryTaskExecuting struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -3615,17 +4050,17 @@ func (s DataRepositoryTaskExecuting) GoString() string { func newErrorDataRepositoryTaskExecuting(v protocol.ResponseMetadata) error { return &DataRepositoryTaskExecuting{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DataRepositoryTaskExecuting) Code() string { +func (s *DataRepositoryTaskExecuting) Code() string { return "DataRepositoryTaskExecuting" } // Message returns the exception's message. -func (s DataRepositoryTaskExecuting) Message() string { +func (s *DataRepositoryTaskExecuting) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3633,22 +4068,22 @@ func (s DataRepositoryTaskExecuting) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DataRepositoryTaskExecuting) OrigErr() error { +func (s *DataRepositoryTaskExecuting) OrigErr() error { return nil } -func (s DataRepositoryTaskExecuting) Error() string { +func (s *DataRepositoryTaskExecuting) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DataRepositoryTaskExecuting) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DataRepositoryTaskExecuting) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DataRepositoryTaskExecuting) RequestID() string { - return s.respMetadata.RequestID +func (s *DataRepositoryTaskExecuting) RequestID() string { + return s.RespMetadata.RequestID } // Provides information about why a data repository task failed. Only populated @@ -3723,8 +4158,8 @@ func (s *DataRepositoryTaskFilter) SetValues(v []*string) *DataRepositoryTaskFil // The data repository task or tasks you specified could not be found. type DataRepositoryTaskNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -3742,17 +4177,17 @@ func (s DataRepositoryTaskNotFound) GoString() string { func newErrorDataRepositoryTaskNotFound(v protocol.ResponseMetadata) error { return &DataRepositoryTaskNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DataRepositoryTaskNotFound) Code() string { +func (s *DataRepositoryTaskNotFound) Code() string { return "DataRepositoryTaskNotFound" } // Message returns the exception's message. -func (s DataRepositoryTaskNotFound) Message() string { +func (s *DataRepositoryTaskNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3760,22 +4195,22 @@ func (s DataRepositoryTaskNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DataRepositoryTaskNotFound) OrigErr() error { +func (s *DataRepositoryTaskNotFound) OrigErr() error { return nil } -func (s DataRepositoryTaskNotFound) Error() string { +func (s *DataRepositoryTaskNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DataRepositoryTaskNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DataRepositoryTaskNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DataRepositoryTaskNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *DataRepositoryTaskNotFound) RequestID() string { + return s.RespMetadata.RequestID } // Provides the task status showing a running total of the total number of files @@ -3842,9 +4277,9 @@ type DeleteBackupInput struct { // BackupId is a required field BackupId *string `min:"12" type:"string" required:"true"` - // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to - // ensure idempotent deletion. This is automatically filled on your behalf when - // using the AWS CLI or SDK. + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // deletion. This is automatically filled on your behalf when using the AWS + // CLI or SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` } @@ -3926,9 +4361,9 @@ func (s *DeleteBackupOutput) SetLifecycle(v string) *DeleteBackupOutput { type DeleteFileSystemInput struct { _ struct{} `type:"structure"` - // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to - // ensure idempotent deletion. This is automatically filled on your behalf when - // using the AWS CLI or SDK. + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // deletion. This is automatically filled on your behalf when using the AWS + // CLI or SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` // The ID of the file system you want to delete. @@ -3936,6 +4371,10 @@ type DeleteFileSystemInput struct { // FileSystemId is a required field FileSystemId *string `min:"11" type:"string" required:"true"` + // The configuration object for the Amazon FSx for Lustre file system being + // deleted in the DeleteFileSystem operation. + LustreConfiguration *DeleteFileSystemLustreConfiguration `type:"structure"` + // The configuration object for the Microsoft Windows file system used in the // DeleteFileSystem operation. WindowsConfiguration *DeleteFileSystemWindowsConfiguration `type:"structure"` @@ -3963,6 +4402,11 @@ func (s *DeleteFileSystemInput) Validate() error { if s.FileSystemId != nil && len(*s.FileSystemId) < 11 { invalidParams.Add(request.NewErrParamMinLen("FileSystemId", 11)) } + if s.LustreConfiguration != nil { + if err := s.LustreConfiguration.Validate(); err != nil { + invalidParams.AddNested("LustreConfiguration", err.(request.ErrInvalidParams)) + } + } if s.WindowsConfiguration != nil { if err := s.WindowsConfiguration.Validate(); err != nil { invalidParams.AddNested("WindowsConfiguration", err.(request.ErrInvalidParams)) @@ -3987,38 +4431,145 @@ func (s *DeleteFileSystemInput) SetFileSystemId(v string) *DeleteFileSystemInput return s } +// SetLustreConfiguration sets the LustreConfiguration field's value. +func (s *DeleteFileSystemInput) SetLustreConfiguration(v *DeleteFileSystemLustreConfiguration) *DeleteFileSystemInput { + s.LustreConfiguration = v + return s +} + // SetWindowsConfiguration sets the WindowsConfiguration field's value. func (s *DeleteFileSystemInput) SetWindowsConfiguration(v *DeleteFileSystemWindowsConfiguration) *DeleteFileSystemInput { s.WindowsConfiguration = v return s } -// The response object for the DeleteFileSystem operation. -type DeleteFileSystemOutput struct { +// The configuration object for the Amazon FSx for Lustre file system being +// deleted in the DeleteFileSystem operation. +type DeleteFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` - // The ID of the file system being deleted. - FileSystemId *string `min:"11" type:"string"` - - // The file system lifecycle for the deletion request. Should be DELETING. - Lifecycle *string `type:"string" enum:"FileSystemLifecycle"` + // Use if SkipFinalBackup is set to false, and you want to apply an array of + // tags to the final backup. If you have set the file system property CopyTagsToBackups + // to true, and you specify one or more FinalBackupTags when deleting a file + // system, Amazon FSx will not copy any existing file system tags to the backup. + FinalBackupTags []*Tag `min:"1" type:"list"` - // The response object for the Microsoft Windows file system used in the DeleteFileSystem - // operation. - WindowsResponse *DeleteFileSystemWindowsResponse `type:"structure"` + // Set SkipFinalBackup to false if you want to take a final backup of the file + // system you are deleting. By default, Amazon FSx will not take a final backup + // on your behalf when the DeleteFileSystem operation is invoked. (Default = + // true) + SkipFinalBackup *bool `type:"boolean"` } // String returns the string representation -func (s DeleteFileSystemOutput) String() string { +func (s DeleteFileSystemLustreConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFileSystemOutput) GoString() string { +func (s DeleteFileSystemLustreConfiguration) GoString() string { return s.String() } -// SetFileSystemId sets the FileSystemId field's value. +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFileSystemLustreConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFileSystemLustreConfiguration"} + if s.FinalBackupTags != nil && len(s.FinalBackupTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FinalBackupTags", 1)) + } + if s.FinalBackupTags != nil { + for i, v := range s.FinalBackupTags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FinalBackupTags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFinalBackupTags sets the FinalBackupTags field's value. +func (s *DeleteFileSystemLustreConfiguration) SetFinalBackupTags(v []*Tag) *DeleteFileSystemLustreConfiguration { + s.FinalBackupTags = v + return s +} + +// SetSkipFinalBackup sets the SkipFinalBackup field's value. +func (s *DeleteFileSystemLustreConfiguration) SetSkipFinalBackup(v bool) *DeleteFileSystemLustreConfiguration { + s.SkipFinalBackup = &v + return s +} + +// The response object for the Amazon FSx for Lustre file system being deleted +// in the DeleteFileSystem operation. +type DeleteFileSystemLustreResponse struct { + _ struct{} `type:"structure"` + + // The ID of the final backup for this file system. + FinalBackupId *string `min:"12" type:"string"` + + // The set of tags applied to the final backup. + FinalBackupTags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s DeleteFileSystemLustreResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemLustreResponse) GoString() string { + return s.String() +} + +// SetFinalBackupId sets the FinalBackupId field's value. +func (s *DeleteFileSystemLustreResponse) SetFinalBackupId(v string) *DeleteFileSystemLustreResponse { + s.FinalBackupId = &v + return s +} + +// SetFinalBackupTags sets the FinalBackupTags field's value. +func (s *DeleteFileSystemLustreResponse) SetFinalBackupTags(v []*Tag) *DeleteFileSystemLustreResponse { + s.FinalBackupTags = v + return s +} + +// The response object for the DeleteFileSystem operation. +type DeleteFileSystemOutput struct { + _ struct{} `type:"structure"` + + // The ID of the file system being deleted. + FileSystemId *string `min:"11" type:"string"` + + // The file system lifecycle for the deletion request. Should be DELETING. + Lifecycle *string `type:"string" enum:"FileSystemLifecycle"` + + // The response object for the Amazon FSx for Lustre file system being deleted + // in the DeleteFileSystem operation. + LustreResponse *DeleteFileSystemLustreResponse `type:"structure"` + + // The response object for the Microsoft Windows file system used in the DeleteFileSystem + // operation. + WindowsResponse *DeleteFileSystemWindowsResponse `type:"structure"` +} + +// String returns the string representation +func (s DeleteFileSystemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFileSystemOutput) GoString() string { + return s.String() +} + +// SetFileSystemId sets the FileSystemId field's value. func (s *DeleteFileSystemOutput) SetFileSystemId(v string) *DeleteFileSystemOutput { s.FileSystemId = &v return s @@ -4030,6 +4581,12 @@ func (s *DeleteFileSystemOutput) SetLifecycle(v string) *DeleteFileSystemOutput return s } +// SetLustreResponse sets the LustreResponse field's value. +func (s *DeleteFileSystemOutput) SetLustreResponse(v *DeleteFileSystemLustreResponse) *DeleteFileSystemOutput { + s.LustreResponse = v + return s +} + // SetWindowsResponse sets the WindowsResponse field's value. func (s *DeleteFileSystemOutput) SetWindowsResponse(v *DeleteFileSystemWindowsResponse) *DeleteFileSystemOutput { s.WindowsResponse = v @@ -4134,22 +4691,22 @@ func (s *DeleteFileSystemWindowsResponse) SetFinalBackupTags(v []*Tag) *DeleteFi type DescribeBackupsInput struct { _ struct{} `type:"structure"` - // (Optional) IDs of the backups you want to retrieve (String). This overrides - // any filters. If any IDs are not found, BackupNotFound will be thrown. + // IDs of the backups you want to retrieve (String). This overrides any filters. + // If any IDs are not found, BackupNotFound will be thrown. BackupIds []*string `type:"list"` - // (Optional) Filters structure. Supported names are file-system-id and backup-type. + // Filters structure. Supported names are file-system-id and backup-type. Filters []*Filter `type:"list"` - // (Optional) Maximum number of backups to return in the response (integer). - // This parameter value must be greater than 0. The number of items that Amazon - // FSx returns is the minimum of the MaxResults parameter specified in the request - // and the service's internal maximum number of items per page. + // Maximum number of backups to return in the response (integer). This parameter + // value must be greater than 0. The number of items that Amazon FSx returns + // is the minimum of the MaxResults parameter specified in the request and the + // service's internal maximum number of items per page. MaxResults *int64 `min:"1" type:"integer"` - // (Optional) Opaque pagination token returned from a previous DescribeBackups - // operation (String). If a token present, the action continues the list from - // where the returning call left off. + // Opaque pagination token returned from a previous DescribeBackups operation + // (String). If a token present, the action continues the list from where the + // returning call left off. NextToken *string `min:"1" type:"string"` } @@ -4346,19 +4903,18 @@ func (s *DescribeDataRepositoryTasksOutput) SetNextToken(v string) *DescribeData type DescribeFileSystemsInput struct { _ struct{} `type:"structure"` - // (Optional) IDs of the file systems whose descriptions you want to retrieve - // (String). + // IDs of the file systems whose descriptions you want to retrieve (String). FileSystemIds []*string `type:"list"` - // (Optional) Maximum number of file systems to return in the response (integer). - // This parameter value must be greater than 0. The number of items that Amazon - // FSx returns is the minimum of the MaxResults parameter specified in the request + // Maximum number of file systems to return in the response (integer). This + // parameter value must be greater than 0. The number of items that Amazon FSx + // returns is the minimum of the MaxResults parameter specified in the request // and the service's internal maximum number of items per page. MaxResults *int64 `min:"1" type:"integer"` - // (Optional) Opaque pagination token returned from a previous DescribeFileSystems - // operation (String). If a token present, the action continues the list from - // where the returning call left off. + // Opaque pagination token returned from a previous DescribeFileSystems operation + // (String). If a token present, the action continues the list from where the + // returning call left off. NextToken *string `min:"1" type:"string"` } @@ -4444,6 +5000,11 @@ func (s *DescribeFileSystemsOutput) SetNextToken(v string) *DescribeFileSystemsO type FileSystem struct { _ struct{} `type:"structure"` + // A list of administrative actions for the file system that are in process + // or waiting to be processed. Administrative actions describe changes to the + // Windows file system that you have initiated using the UpdateFileSystem action. + AdministrativeActions []*AdministrativeAction `type:"list"` + // The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), // also known as Unix time. CreationTime *time.Time `type:"timestamp"` @@ -4516,9 +5077,21 @@ type FileSystem struct { // The storage capacity of the file system in gigabytes (GB). StorageCapacity *int64 `type:"integer"` - // The ID of the subnet to contain the endpoint for the file system. One and - // only one is supported. The file system is launched in the Availability Zone - // associated with this subnet. + // The storage type of the file system. Valid values are SSD and HDD. If set + // to SSD, the file system uses solid state drive storage. If set to HDD, the + // file system uses hard disk drive storage. + StorageType *string `type:"string" enum:"StorageType"` + + // Specifies the IDs of the subnets that the file system is accessible from. + // For Windows MULTI_AZ_1 file system deployment type, there are two subnet + // IDs, one for the preferred file server and one for the standby file server. + // The preferred file server subnet identified in the PreferredSubnetID property. + // All other file systems have only one subnet ID. + // + // For Lustre file systems, and Single-AZ Windows file systems, this is the + // ID of the subnet that contains the endpoint for the file system. For MULTI_AZ_1 + // Windows file systems, the endpoint for the file system is available in the + // PreferredSubnetID. SubnetIds []*string `type:"list"` // The tags to associate with the file system. For more information, see Tagging @@ -4543,6 +5116,12 @@ func (s FileSystem) GoString() string { return s.String() } +// SetAdministrativeActions sets the AdministrativeActions field's value. +func (s *FileSystem) SetAdministrativeActions(v []*AdministrativeAction) *FileSystem { + s.AdministrativeActions = v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *FileSystem) SetCreationTime(v time.Time) *FileSystem { s.CreationTime = &v @@ -4615,6 +5194,12 @@ func (s *FileSystem) SetStorageCapacity(v int64) *FileSystem { return s } +// SetStorageType sets the StorageType field's value. +func (s *FileSystem) SetStorageType(v string) *FileSystem { + s.StorageType = &v + return s +} + // SetSubnetIds sets the SubnetIds field's value. func (s *FileSystem) SetSubnetIds(v []*string) *FileSystem { s.SubnetIds = v @@ -4666,8 +5251,8 @@ func (s *FileSystemFailureDetails) SetMessage(v string) *FileSystemFailureDetail // No Amazon FSx file systems were found based upon supplied parameters. type FileSystemNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -4685,17 +5270,17 @@ func (s FileSystemNotFound) GoString() string { func newErrorFileSystemNotFound(v protocol.ResponseMetadata) error { return &FileSystemNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FileSystemNotFound) Code() string { +func (s *FileSystemNotFound) Code() string { return "FileSystemNotFound" } // Message returns the exception's message. -func (s FileSystemNotFound) Message() string { +func (s *FileSystemNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4703,22 +5288,22 @@ func (s FileSystemNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FileSystemNotFound) OrigErr() error { +func (s *FileSystemNotFound) OrigErr() error { return nil } -func (s FileSystemNotFound) Error() string { +func (s *FileSystemNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FileSystemNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FileSystemNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FileSystemNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *FileSystemNotFound) RequestID() string { + return s.RespMetadata.RequestID } // A filter used to restrict the results of describe calls. You can use multiple @@ -4760,8 +5345,8 @@ func (s *Filter) SetValues(v []*string) *Filter { // request token but different parameters settings. A client request token should // always uniquely identify a single request. type IncompatibleParameterError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -4784,17 +5369,17 @@ func (s IncompatibleParameterError) GoString() string { func newErrorIncompatibleParameterError(v protocol.ResponseMetadata) error { return &IncompatibleParameterError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncompatibleParameterError) Code() string { +func (s *IncompatibleParameterError) Code() string { return "IncompatibleParameterError" } // Message returns the exception's message. -func (s IncompatibleParameterError) Message() string { +func (s *IncompatibleParameterError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4802,28 +5387,28 @@ func (s IncompatibleParameterError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncompatibleParameterError) OrigErr() error { +func (s *IncompatibleParameterError) OrigErr() error { return nil } -func (s IncompatibleParameterError) Error() string { +func (s *IncompatibleParameterError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s IncompatibleParameterError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncompatibleParameterError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncompatibleParameterError) RequestID() string { - return s.respMetadata.RequestID +func (s *IncompatibleParameterError) RequestID() string { + return s.RespMetadata.RequestID } // A generic error indicating a server-side failure. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -4841,17 +5426,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4859,28 +5444,28 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // The path provided for data repository export isn't valid. type InvalidExportPath struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -4898,17 +5483,17 @@ func (s InvalidExportPath) GoString() string { func newErrorInvalidExportPath(v protocol.ResponseMetadata) error { return &InvalidExportPath{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidExportPath) Code() string { +func (s *InvalidExportPath) Code() string { return "InvalidExportPath" } // Message returns the exception's message. -func (s InvalidExportPath) Message() string { +func (s *InvalidExportPath) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4916,28 +5501,28 @@ func (s InvalidExportPath) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidExportPath) OrigErr() error { +func (s *InvalidExportPath) OrigErr() error { return nil } -func (s InvalidExportPath) Error() string { +func (s *InvalidExportPath) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidExportPath) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidExportPath) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidExportPath) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidExportPath) RequestID() string { + return s.RespMetadata.RequestID } // The path provided for data repository import isn't valid. type InvalidImportPath struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -4955,17 +5540,17 @@ func (s InvalidImportPath) GoString() string { func newErrorInvalidImportPath(v protocol.ResponseMetadata) error { return &InvalidImportPath{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidImportPath) Code() string { +func (s *InvalidImportPath) Code() string { return "InvalidImportPath" } // Message returns the exception's message. -func (s InvalidImportPath) Message() string { +func (s *InvalidImportPath) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4973,22 +5558,22 @@ func (s InvalidImportPath) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidImportPath) OrigErr() error { +func (s *InvalidImportPath) OrigErr() error { return nil } -func (s InvalidImportPath) Error() string { +func (s *InvalidImportPath) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidImportPath) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidImportPath) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidImportPath) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidImportPath) RequestID() string { + return s.RespMetadata.RequestID } // One or more network settings specified in the request are invalid. InvalidVpcId @@ -4998,8 +5583,8 @@ func (s InvalidImportPath) RequestID() string { // of IDs for security groups that are either invalid or not part of the VPC // specified. type InvalidNetworkSettings struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The ID of your Amazon EC2 security group. This ID is used to control network // access to the endpoint that Amazon FSx creates on your behalf in each subnet. @@ -5029,17 +5614,17 @@ func (s InvalidNetworkSettings) GoString() string { func newErrorInvalidNetworkSettings(v protocol.ResponseMetadata) error { return &InvalidNetworkSettings{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNetworkSettings) Code() string { +func (s *InvalidNetworkSettings) Code() string { return "InvalidNetworkSettings" } // Message returns the exception's message. -func (s InvalidNetworkSettings) Message() string { +func (s *InvalidNetworkSettings) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5047,29 +5632,29 @@ func (s InvalidNetworkSettings) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNetworkSettings) OrigErr() error { +func (s *InvalidNetworkSettings) OrigErr() error { return nil } -func (s InvalidNetworkSettings) Error() string { +func (s *InvalidNetworkSettings) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNetworkSettings) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNetworkSettings) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNetworkSettings) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNetworkSettings) RequestID() string { + return s.RespMetadata.RequestID } // An invalid value for PerUnitStorageThroughput was provided. Please create // your file system again, using a valid value. type InvalidPerUnitStorageThroughput struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -5087,17 +5672,17 @@ func (s InvalidPerUnitStorageThroughput) GoString() string { func newErrorInvalidPerUnitStorageThroughput(v protocol.ResponseMetadata) error { return &InvalidPerUnitStorageThroughput{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPerUnitStorageThroughput) Code() string { +func (s *InvalidPerUnitStorageThroughput) Code() string { return "InvalidPerUnitStorageThroughput" } // Message returns the exception's message. -func (s InvalidPerUnitStorageThroughput) Message() string { +func (s *InvalidPerUnitStorageThroughput) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5105,37 +5690,37 @@ func (s InvalidPerUnitStorageThroughput) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPerUnitStorageThroughput) OrigErr() error { +func (s *InvalidPerUnitStorageThroughput) OrigErr() error { return nil } -func (s InvalidPerUnitStorageThroughput) Error() string { +func (s *InvalidPerUnitStorageThroughput) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPerUnitStorageThroughput) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPerUnitStorageThroughput) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPerUnitStorageThroughput) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPerUnitStorageThroughput) RequestID() string { + return s.RespMetadata.RequestID } // The request object for ListTagsForResource operation. type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // (Optional) Maximum number of tags to return in the response (integer). This - // parameter value must be greater than 0. The number of items that Amazon FSx - // returns is the minimum of the MaxResults parameter specified in the request - // and the service's internal maximum number of items per page. + // Maximum number of tags to return in the response (integer). This parameter + // value must be greater than 0. The number of items that Amazon FSx returns + // is the minimum of the MaxResults parameter specified in the request and the + // service's internal maximum number of items per page. MaxResults *int64 `min:"1" type:"integer"` - // (Optional) Opaque pagination token returned from a previous ListTagsForResource - // operation (String). If a token present, the action continues the list from - // where the returning call left off. + // Opaque pagination token returned from a previous ListTagsForResource operation + // (String). If a token present, the action continues the list from where the + // returning call left off. NextToken *string `min:"1" type:"string"` // The ARN of the Amazon FSx resource that will have its tags listed. @@ -5232,13 +5817,51 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput type LustreFileSystemConfiguration struct { _ struct{} `type:"structure"` + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + // The default is 0. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // A boolean flag indicating whether tags on the file system should be copied + // to backups. If it's set to true, all tags on the file system are copied to + // all automatic backups and any user-initiated backups where the user doesn't + // specify any tags. If this value is true, and you specify one or more tags, + // only the specified tags are copied to backups. If you specify one or more + // tags when creating a user-initiated backup, no tags are copied from the file + // system, regardless of this value. (Default = false) + CopyTagsToBackups *bool `type:"boolean"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of + // the day (0-23), and MM is the zero-padded minute of the hour. For example, + // 05:00 specifies 5 AM daily. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + // The data repository configuration object for Lustre file systems returned // in the response of the CreateFileSystem operation. DataRepositoryConfiguration *DataRepositoryConfiguration `type:"structure"` - // The deployment type of the FSX for Lustre file system. + // The deployment type of the FSX for Lustre file system. Scratch deployment + // type is designed for temporary storage and shorter-term processing of data. + // + // SCRATCH_1 and SCRATCH_2 deployment types are best suited for when you need + // temporary storage and shorter-term processing of data. The SCRATCH_2 deployment + // type provides in-transit encryption of data and higher burst throughput capacity + // than SCRATCH_1. + // + // The PERSISTENT_1 deployment type is used for longer-term storage and workloads + // and encryption of data in transit. To learn more about deployment types, + // see FSx for Lustre Deployment Options (https://docs.aws.amazon.com/fsx/latest/LustreGuide/lustre-deployment-types.html). + // (Default = SCRATCH_1) DeploymentType *string `type:"string" enum:"LustreDeploymentType"` + // The type of drive cache used by PERSISTENT_1 file systems that are provisioned + // with HDD storage devices. This parameter is required when storage type is + // HDD. Set to READ, improve the performance for frequently accessed files and + // allows 20% of the total storage capacity of the file system to be cached. + // + // This parameter is required when StorageType is set to HDD. + DriveCacheType *string `type:"string" enum:"DriveCacheType"` + // You use the MountName value when mounting the file system. // // For the SCRATCH_1 deployment type, this value is always "fsx". For SCRATCH_2 @@ -5249,11 +5872,15 @@ type LustreFileSystemConfiguration struct { // Per unit storage throughput represents the megabytes per second of read or // write throughput per 1 tebibyte of storage provisioned. File system throughput // capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). - // This option is only valid for PERSISTENT_1 deployment types. Valid values - // are 50, 100, 200. - PerUnitStorageThroughput *int64 `min:"50" type:"integer"` + // This option is only valid for PERSISTENT_1 deployment types. + // + // Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: + // 12, 40. + PerUnitStorageThroughput *int64 `min:"12" type:"integer"` - // The UTC time that you want to begin your weekly maintenance window. + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. d is the weekday number, from 1 through 7, beginning + // with Monday and ending with Sunday. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -5267,6 +5894,24 @@ func (s LustreFileSystemConfiguration) GoString() string { return s.String() } +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *LustreFileSystemConfiguration) SetAutomaticBackupRetentionDays(v int64) *LustreFileSystemConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetCopyTagsToBackups sets the CopyTagsToBackups field's value. +func (s *LustreFileSystemConfiguration) SetCopyTagsToBackups(v bool) *LustreFileSystemConfiguration { + s.CopyTagsToBackups = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *LustreFileSystemConfiguration) SetDailyAutomaticBackupStartTime(v string) *LustreFileSystemConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + // SetDataRepositoryConfiguration sets the DataRepositoryConfiguration field's value. func (s *LustreFileSystemConfiguration) SetDataRepositoryConfiguration(v *DataRepositoryConfiguration) *LustreFileSystemConfiguration { s.DataRepositoryConfiguration = v @@ -5279,6 +5924,12 @@ func (s *LustreFileSystemConfiguration) SetDeploymentType(v string) *LustreFileS return s } +// SetDriveCacheType sets the DriveCacheType field's value. +func (s *LustreFileSystemConfiguration) SetDriveCacheType(v string) *LustreFileSystemConfiguration { + s.DriveCacheType = &v + return s +} + // SetMountName sets the MountName field's value. func (s *LustreFileSystemConfiguration) SetMountName(v string) *LustreFileSystemConfiguration { s.MountName = &v @@ -5299,8 +5950,8 @@ func (s *LustreFileSystemConfiguration) SetWeeklyMaintenanceStartTime(v string) // A file system configuration is required for this operation. type MissingFileSystemConfiguration struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -5318,17 +5969,17 @@ func (s MissingFileSystemConfiguration) GoString() string { func newErrorMissingFileSystemConfiguration(v protocol.ResponseMetadata) error { return &MissingFileSystemConfiguration{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MissingFileSystemConfiguration) Code() string { +func (s *MissingFileSystemConfiguration) Code() string { return "MissingFileSystemConfiguration" } // Message returns the exception's message. -func (s MissingFileSystemConfiguration) Message() string { +func (s *MissingFileSystemConfiguration) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5336,29 +5987,29 @@ func (s MissingFileSystemConfiguration) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MissingFileSystemConfiguration) OrigErr() error { +func (s *MissingFileSystemConfiguration) OrigErr() error { return nil } -func (s MissingFileSystemConfiguration) Error() string { +func (s *MissingFileSystemConfiguration) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MissingFileSystemConfiguration) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MissingFileSystemConfiguration) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MissingFileSystemConfiguration) RequestID() string { - return s.respMetadata.RequestID +func (s *MissingFileSystemConfiguration) RequestID() string { + return s.RespMetadata.RequestID } // The resource specified for the tagging operation is not a resource type owned // by Amazon FSx. Use the API of the relevant service to perform the operation. type NotServiceResourceError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -5381,17 +6032,17 @@ func (s NotServiceResourceError) GoString() string { func newErrorNotServiceResourceError(v protocol.ResponseMetadata) error { return &NotServiceResourceError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotServiceResourceError) Code() string { +func (s *NotServiceResourceError) Code() string { return "NotServiceResourceError" } // Message returns the exception's message. -func (s NotServiceResourceError) Message() string { +func (s *NotServiceResourceError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5399,28 +6050,28 @@ func (s NotServiceResourceError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotServiceResourceError) OrigErr() error { +func (s *NotServiceResourceError) OrigErr() error { return nil } -func (s NotServiceResourceError) Error() string { +func (s *NotServiceResourceError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotServiceResourceError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotServiceResourceError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotServiceResourceError) RequestID() string { - return s.respMetadata.RequestID +func (s *NotServiceResourceError) RequestID() string { + return s.RespMetadata.RequestID } // The resource specified does not support tagging. type ResourceDoesNotSupportTagging struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -5443,17 +6094,17 @@ func (s ResourceDoesNotSupportTagging) GoString() string { func newErrorResourceDoesNotSupportTagging(v protocol.ResponseMetadata) error { return &ResourceDoesNotSupportTagging{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDoesNotSupportTagging) Code() string { +func (s *ResourceDoesNotSupportTagging) Code() string { return "ResourceDoesNotSupportTagging" } // Message returns the exception's message. -func (s ResourceDoesNotSupportTagging) Message() string { +func (s *ResourceDoesNotSupportTagging) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5461,28 +6112,28 @@ func (s ResourceDoesNotSupportTagging) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDoesNotSupportTagging) OrigErr() error { +func (s *ResourceDoesNotSupportTagging) OrigErr() error { return nil } -func (s ResourceDoesNotSupportTagging) Error() string { +func (s *ResourceDoesNotSupportTagging) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDoesNotSupportTagging) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDoesNotSupportTagging) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDoesNotSupportTagging) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDoesNotSupportTagging) RequestID() string { + return s.RespMetadata.RequestID } // The resource specified by the Amazon Resource Name (ARN) can't be found. type ResourceNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -5505,17 +6156,17 @@ func (s ResourceNotFound) GoString() string { func newErrorResourceNotFound(v protocol.ResponseMetadata) error { return &ResourceNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFound) Code() string { +func (s *ResourceNotFound) Code() string { return "ResourceNotFound" } // Message returns the exception's message. -func (s ResourceNotFound) Message() string { +func (s *ResourceNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5523,22 +6174,22 @@ func (s ResourceNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFound) OrigErr() error { +func (s *ResourceNotFound) OrigErr() error { return nil } -func (s ResourceNotFound) Error() string { +func (s *ResourceNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFound) RequestID() string { + return s.RespMetadata.RequestID } // The configuration of the self-managed Microsoft Active Directory (AD) directory @@ -5757,7 +6408,7 @@ func (s *SelfManagedActiveDirectoryConfiguration) SetUserName(v string) *SelfMan } // The configuration that Amazon FSx uses to join the Windows File Server instance -// to the self-managed Microsoft Active Directory (AD) directory. +// to a self-managed Microsoft Active Directory (AD) directory. type SelfManagedActiveDirectoryConfigurationUpdates struct { _ struct{} `type:"structure"` @@ -5826,8 +6477,8 @@ func (s *SelfManagedActiveDirectoryConfigurationUpdates) SetUserName(v string) * // An error indicating that a particular service limit was exceeded. You can // increase some service limits by contacting AWS Support. type ServiceLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Enumeration of the service limit that was exceeded. // @@ -5850,17 +6501,17 @@ func (s ServiceLimitExceeded) GoString() string { func newErrorServiceLimitExceeded(v protocol.ResponseMetadata) error { return &ServiceLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceLimitExceeded) Code() string { +func (s *ServiceLimitExceeded) Code() string { return "ServiceLimitExceeded" } // Message returns the exception's message. -func (s ServiceLimitExceeded) Message() string { +func (s *ServiceLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5868,22 +6519,22 @@ func (s ServiceLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceLimitExceeded) OrigErr() error { +func (s *ServiceLimitExceeded) OrigErr() error { return nil } -func (s ServiceLimitExceeded) Error() string { +func (s *ServiceLimitExceeded) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Specifies a key-value pair for a resource tag. @@ -5892,13 +6543,17 @@ type Tag struct { // A value that specifies the TagKey, the name of the tag. Tag keys must be // unique for the resource to which they are attached. - Key *string `min:"1" type:"string"` + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` // A value that specifies the TagValue, the value assigned to the corresponding // tag key. Tag values can be null and don't have to be unique in a tag set. // For example, you can have a key-value pair in a tag set of finances : April // and also of payroll : April. - Value *string `type:"string"` + // + // Value is a required field + Value *string `type:"string" required:"true"` } // String returns the string representation @@ -5914,9 +6569,15 @@ func (s Tag) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Tag) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } if invalidParams.Len() > 0 { return invalidParams @@ -6024,8 +6685,8 @@ func (s TagResourceOutput) GoString() string { // The requested operation is not supported for this resource or API. type UnsupportedOperation struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A detailed error message. Message_ *string `locationName:"Message" min:"1" type:"string"` @@ -6043,17 +6704,17 @@ func (s UnsupportedOperation) GoString() string { func newErrorUnsupportedOperation(v protocol.ResponseMetadata) error { return &UnsupportedOperation{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperation) Code() string { +func (s *UnsupportedOperation) Code() string { return "UnsupportedOperation" } // Message returns the exception's message. -func (s UnsupportedOperation) Message() string { +func (s *UnsupportedOperation) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6061,22 +6722,22 @@ func (s UnsupportedOperation) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperation) OrigErr() error { +func (s *UnsupportedOperation) OrigErr() error { return nil } -func (s UnsupportedOperation) Error() string { +func (s *UnsupportedOperation) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperation) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperation) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperation) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperation) RequestID() string { + return s.RespMetadata.RequestID } // The request object for UntagResource action. @@ -6158,12 +6819,12 @@ func (s UntagResourceOutput) GoString() string { type UpdateFileSystemInput struct { _ struct{} `type:"structure"` - // (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to - // ensure idempotent updates. This string is automatically filled on your behalf - // when you use the AWS Command Line Interface (AWS CLI) or an AWS SDK. + // A string of up to 64 ASCII characters that Amazon FSx uses to ensure idempotent + // updates. This string is automatically filled on your behalf when you use + // the AWS Command Line Interface (AWS CLI) or an AWS SDK. ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` - // The globally unique ID of the file system, assigned by Amazon FSx. + // Identifies the file system that you are updating. // // FileSystemId is a required field FileSystemId *string `min:"11" type:"string" required:"true"` @@ -6172,9 +6833,18 @@ type UpdateFileSystemInput struct { // UpdateFileSystem operation. LustreConfiguration *UpdateFileSystemLustreConfiguration `type:"structure"` - // The configuration update for this Microsoft Windows file system. The only - // supported options are for backup and maintenance and for self-managed Active - // Directory configuration. + // Use this parameter to increase the storage capacity of an Amazon FSx for + // Windows File Server file system. Specifies the storage capacity target value, + // GiB, for the file system you're updating. The storage capacity target value + // must be at least 10 percent (%) greater than the current storage capacity + // value. In order to increase storage capacity, the file system needs to have + // at least 16 MB/s of throughput capacity. You cannot make a storage capacity + // increase request if there is an existing storage capacity increase request + // in progress. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). + StorageCapacity *int64 `type:"integer"` + + // The configuration updates for an Amazon FSx for Windows File Server file + // system. WindowsConfiguration *UpdateFileSystemWindowsConfiguration `type:"structure"` } @@ -6235,6 +6905,12 @@ func (s *UpdateFileSystemInput) SetLustreConfiguration(v *UpdateFileSystemLustre return s } +// SetStorageCapacity sets the StorageCapacity field's value. +func (s *UpdateFileSystemInput) SetStorageCapacity(v int64) *UpdateFileSystemInput { + s.StorageCapacity = &v + return s +} + // SetWindowsConfiguration sets the WindowsConfiguration field's value. func (s *UpdateFileSystemInput) SetWindowsConfiguration(v *UpdateFileSystemWindowsConfiguration) *UpdateFileSystemInput { s.WindowsConfiguration = v @@ -6246,7 +6922,42 @@ func (s *UpdateFileSystemInput) SetWindowsConfiguration(v *UpdateFileSystemWindo type UpdateFileSystemLustreConfiguration struct { _ struct{} `type:"structure"` - // The preferred time to perform weekly maintenance, in the UTC time zone. + // (Optional) When you create your file system, your existing S3 objects appear + // as file and directory listings. Use this property to choose how Amazon FSx + // keeps your file and directory listing up to date as you add or modify objects + // in your linked S3 bucket. AutoImportPolicy can have the following values: + // + // * NONE - (Default) AutoImport is off. Amazon FSx only updates file and + // directory listings from the linked S3 bucket when the file system is created. + // FSx does not update the file and directory listing for any new or changed + // objects after choosing this option. + // + // * NEW - AutoImport is on. Amazon FSx automatically imports directory listings + // of any new objects added to the linked S3 bucket that do not currently + // exist in the FSx file system. + // + // * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports file + // and directory listings of any new objects added to the S3 bucket and any + // existing objects that are changed in the S3 bucket after you choose this + // option. + // + // For more information, see Automatically import updates from your S3 bucket + // (https://docs.aws.amazon.com/fsx/latest/LustreGuide/autoimport-data-repo.html). + AutoImportPolicy *string `type:"string" enum:"AutoImportPolicyType"` + + // The number of days to retain automatic backups. Setting this to 0 disables + // automatic backups. You can retain automatic backups for a maximum of 90 days. + // The default is 0. + AutomaticBackupRetentionDays *int64 `type:"integer"` + + // A recurring daily time, in the format HH:MM. HH is the zero-padded hour of + // the day (0-23), and MM is the zero-padded minute of the hour. For example, + // 05:00 specifies 5 AM daily. + DailyAutomaticBackupStartTime *string `min:"5" type:"string"` + + // (Optional) The preferred start time to perform weekly maintenance, formatted + // d:HH:MM in the UTC time zone. d is the weekday number, from 1 through 7, + // beginning with Monday and ending with Sunday. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -6263,6 +6974,9 @@ func (s UpdateFileSystemLustreConfiguration) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateFileSystemLustreConfiguration) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateFileSystemLustreConfiguration"} + if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { + invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) + } if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) } @@ -6273,6 +6987,24 @@ func (s *UpdateFileSystemLustreConfiguration) Validate() error { return nil } +// SetAutoImportPolicy sets the AutoImportPolicy field's value. +func (s *UpdateFileSystemLustreConfiguration) SetAutoImportPolicy(v string) *UpdateFileSystemLustreConfiguration { + s.AutoImportPolicy = &v + return s +} + +// SetAutomaticBackupRetentionDays sets the AutomaticBackupRetentionDays field's value. +func (s *UpdateFileSystemLustreConfiguration) SetAutomaticBackupRetentionDays(v int64) *UpdateFileSystemLustreConfiguration { + s.AutomaticBackupRetentionDays = &v + return s +} + +// SetDailyAutomaticBackupStartTime sets the DailyAutomaticBackupStartTime field's value. +func (s *UpdateFileSystemLustreConfiguration) SetDailyAutomaticBackupStartTime(v string) *UpdateFileSystemLustreConfiguration { + s.DailyAutomaticBackupStartTime = &v + return s +} + // SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. func (s *UpdateFileSystemLustreConfiguration) SetWeeklyMaintenanceStartTime(v string) *UpdateFileSystemLustreConfiguration { s.WeeklyMaintenanceStartTime = &v @@ -6303,25 +7035,38 @@ func (s *UpdateFileSystemOutput) SetFileSystem(v *FileSystem) *UpdateFileSystemO return s } -// Updates the Microsoft Windows configuration for an existing Amazon FSx for -// Windows File Server file system. Amazon FSx overwrites existing properties -// with non-null values provided in the request. If you don't specify a non-null -// value for a property, that property is not updated. +// Updates the configuration for an existing Amazon FSx for Windows File Server +// file system. Amazon FSx only overwrites existing properties with non-null +// values provided in the request. type UpdateFileSystemWindowsConfiguration struct { _ struct{} `type:"structure"` - // The number of days to retain automatic backups. Setting this to 0 disables - // automatic backups. You can retain automatic backups for a maximum of 35 days. + // The number of days to retain automatic daily backups. Setting this to zero + // (0) disables automatic daily backups. You can retain automatic daily backups + // for a maximum of 90 days. For more information, see Working with Automatic + // Daily Backups (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/using-backups.html#automatic-backups). AutomaticBackupRetentionDays *int64 `type:"integer"` - // The preferred time to take daily automatic backups, in the UTC time zone. + // The preferred time to start the daily automatic backup, in the UTC time zone, + // for example, 02:00 DailyAutomaticBackupStartTime *string `min:"5" type:"string"` // The configuration Amazon FSx uses to join the Windows File Server instance - // to the self-managed Microsoft AD directory. + // to the self-managed Microsoft AD directory. You cannot make a self-managed + // Microsoft AD update request if there is an existing self-managed Microsoft + // AD update request in progress. SelfManagedActiveDirectoryConfiguration *SelfManagedActiveDirectoryConfigurationUpdates `type:"structure"` - // The preferred time to perform weekly maintenance, in the UTC time zone. + // Sets the target value for a file system's throughput capacity, in MB/s, that + // you are updating the file system to. Valid values are 8, 16, 32, 64, 128, + // 256, 512, 1024, 2048. You cannot make a throughput capacity update request + // if there is an existing throughput capacity update request in progress. For + // more information, see Managing Throughput Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-throughput-capacity.html). + ThroughputCapacity *int64 `min:"8" type:"integer"` + + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. Where d is the weekday number, from 1 through 7, with + // 1 = Monday and 7 = Sunday. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -6341,6 +7086,9 @@ func (s *UpdateFileSystemWindowsConfiguration) Validate() error { if s.DailyAutomaticBackupStartTime != nil && len(*s.DailyAutomaticBackupStartTime) < 5 { invalidParams.Add(request.NewErrParamMinLen("DailyAutomaticBackupStartTime", 5)) } + if s.ThroughputCapacity != nil && *s.ThroughputCapacity < 8 { + invalidParams.Add(request.NewErrParamMinValue("ThroughputCapacity", 8)) + } if s.WeeklyMaintenanceStartTime != nil && len(*s.WeeklyMaintenanceStartTime) < 7 { invalidParams.Add(request.NewErrParamMinLen("WeeklyMaintenanceStartTime", 7)) } @@ -6374,6 +7122,12 @@ func (s *UpdateFileSystemWindowsConfiguration) SetSelfManagedActiveDirectoryConf return s } +// SetThroughputCapacity sets the ThroughputCapacity field's value. +func (s *UpdateFileSystemWindowsConfiguration) SetThroughputCapacity(v int64) *UpdateFileSystemWindowsConfiguration { + s.ThroughputCapacity = &v + return s +} + // SetWeeklyMaintenanceStartTime sets the WeeklyMaintenanceStartTime field's value. func (s *UpdateFileSystemWindowsConfiguration) SetWeeklyMaintenanceStartTime(v string) *UpdateFileSystemWindowsConfiguration { s.WeeklyMaintenanceStartTime = &v @@ -6389,7 +7143,7 @@ type WindowsFileSystemConfiguration struct { ActiveDirectoryId *string `min:"12" type:"string"` // The number of days to retain automatic backups. Setting this to 0 disables - // automatic backups. You can retain automatic backups for a maximum of 35 days. + // automatic backups. You can retain automatic backups for a maximum of 90 days. AutomaticBackupRetentionDays *int64 `type:"integer"` // A boolean flag indicating whether tags on the file system should be copied @@ -6407,10 +7161,17 @@ type WindowsFileSystemConfiguration struct { // Specifies the file system deployment type, valid values are the following: // // * MULTI_AZ_1 - Specifies a high availability file system that is configured - // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability. + // for Multi-AZ redundancy to tolerate temporary Availability Zone (AZ) unavailability, + // and supports SSD and HDD storage. // // * SINGLE_AZ_1 - (Default) Specifies a file system that is configured for - // single AZ redundancy. + // single AZ redundancy, only supports SSD storage. + // + // * SINGLE_AZ_2 - Latest generation Single AZ file system. Specifies a file + // system that is configured for single AZ redundancy and supports SSD and + // HDD storage. + // + // For more information, see Single-AZ and Multi-AZ File Systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html). DeploymentType *string `type:"string" enum:"WindowsDeploymentType"` // The list of maintenance operations in progress for this file system. @@ -6421,12 +7182,11 @@ type WindowsFileSystemConfiguration struct { // // Use this IP address when mounting the file system on Linux SMB clients or // Windows SMB clients that are not joined to a Microsoft Active Directory. - // Applicable for both SINGLE_AZ_1 and MULTI_AZ_1 deployment types. This IP - // address is temporarily unavailable when the file system is undergoing maintenance. + // Applicable for all Windows file system deployment types. This IP address + // is temporarily unavailable when the file system is undergoing maintenance. // For Linux and Windows SMB clients that are joined to an Active Directory, - // use the file system's DNSName instead. For more information and instruction - // on mapping and mounting file shares, see https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html - // (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html). + // use the file system's DNSName instead. For more information on mapping and + // mounting file shares, see Accessing File Shares (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/accessing-file-shares.html). PreferredFileServerIp *string `min:"7" type:"string"` // For MULTI_AZ_1 deployment types, it specifies the ID of the subnet where @@ -6434,13 +7194,16 @@ type WindowsFileSystemConfiguration struct { // in SubnetIds property. Amazon FSx serves traffic from this subnet except // in the event of a failover to the secondary file server. // - // For SINGLE_AZ_1 deployment types, this value is the same as that for SubnetIDs. + // For SINGLE_AZ_1 and SINGLE_AZ_2 deployment types, this value is the same + // as that for SubnetIDs. For more information, see Availability and Durability: + // Single-AZ and Multi-AZ File Systems (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/high-availability-multiAZ.html#single-multi-az-resources) PreferredSubnetId *string `min:"15" type:"string"` // For MULTI_AZ_1 deployment types, use this endpoint when performing administrative // tasks on the file system using Amazon FSx Remote PowerShell. // - // For SINGLE_AZ_1 deployment types, this is the DNS name of the file system. + // For SINGLE_AZ_1 and SINGLE_AZ_2 deployment types, this is the DNS name of + // the file system. // // This endpoint is temporarily unavailable when the file system is undergoing // maintenance. @@ -6453,7 +7216,9 @@ type WindowsFileSystemConfiguration struct { // The throughput of an Amazon FSx file system, measured in megabytes per second. ThroughputCapacity *int64 `min:"8" type:"integer"` - // The preferred time to perform weekly maintenance, in the UTC time zone. + // The preferred start time to perform weekly maintenance, formatted d:HH:MM + // in the UTC time zone. d is the weekday number, from 1 through 7, beginning + // with Monday and ending with Sunday. WeeklyMaintenanceStartTime *string `min:"7" type:"string"` } @@ -6559,7 +7324,77 @@ const ( ActiveDirectoryErrorTypeInvalidDomainStage = "INVALID_DOMAIN_STAGE" ) +// ActiveDirectoryErrorType_Values returns all elements of the ActiveDirectoryErrorType enum +func ActiveDirectoryErrorType_Values() []string { + return []string{ + ActiveDirectoryErrorTypeDomainNotFound, + ActiveDirectoryErrorTypeIncompatibleDomainMode, + ActiveDirectoryErrorTypeWrongVpc, + ActiveDirectoryErrorTypeInvalidDomainStage, + } +} + +// Describes the type of administrative action, as follows: +// +// * FILE_SYSTEM_UPDATE - A file system update administrative action initiated +// by the user from the Amazon FSx console, API (UpdateFileSystem), or CLI +// (update-file-system). A +// +// * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE task to increase +// a file system's storage capacity completes successfully, a STORAGE_OPTIMIZATION +// task starts. Storage optimization is the process of migrating the file +// system data to the new, larger disks. You can track the storage migration +// progress using the ProgressPercent property. When STORAGE_OPTIMIZATION +// completes successfully, the parent FILE_SYSTEM_UPDATE action status changes +// to COMPLETED. For more information, see Managing Storage Capacity (https://docs.aws.amazon.com/fsx/latest/WindowsGuide/managing-storage-capacity.html). +const ( + // AdministrativeActionTypeFileSystemUpdate is a AdministrativeActionType enum value + AdministrativeActionTypeFileSystemUpdate = "FILE_SYSTEM_UPDATE" + + // AdministrativeActionTypeStorageOptimization is a AdministrativeActionType enum value + AdministrativeActionTypeStorageOptimization = "STORAGE_OPTIMIZATION" +) + +// AdministrativeActionType_Values returns all elements of the AdministrativeActionType enum +func AdministrativeActionType_Values() []string { + return []string{ + AdministrativeActionTypeFileSystemUpdate, + AdministrativeActionTypeStorageOptimization, + } +} + +const ( + // AutoImportPolicyTypeNone is a AutoImportPolicyType enum value + AutoImportPolicyTypeNone = "NONE" + + // AutoImportPolicyTypeNew is a AutoImportPolicyType enum value + AutoImportPolicyTypeNew = "NEW" + + // AutoImportPolicyTypeNewChanged is a AutoImportPolicyType enum value + AutoImportPolicyTypeNewChanged = "NEW_CHANGED" +) + +// AutoImportPolicyType_Values returns all elements of the AutoImportPolicyType enum +func AutoImportPolicyType_Values() []string { + return []string{ + AutoImportPolicyTypeNone, + AutoImportPolicyTypeNew, + AutoImportPolicyTypeNewChanged, + } +} + // The lifecycle status of the backup. +// +// * AVAILABLE - The backup is fully available. +// +// * CREATING - FSx is creating the new user-intiated backup +// +// * TRANSFERRING - For user-initiated backups on Lustre file systems only; +// FSx is backing up the file system. +// +// * DELETED - The backup was deleted is no longer available. +// +// * FAILED - Amazon FSx could not complete the backup. const ( // BackupLifecycleAvailable is a BackupLifecycle enum value BackupLifecycleAvailable = "AVAILABLE" @@ -6567,6 +7402,9 @@ const ( // BackupLifecycleCreating is a BackupLifecycle enum value BackupLifecycleCreating = "CREATING" + // BackupLifecycleTransferring is a BackupLifecycle enum value + BackupLifecycleTransferring = "TRANSFERRING" + // BackupLifecycleDeleted is a BackupLifecycle enum value BackupLifecycleDeleted = "DELETED" @@ -6574,6 +7412,17 @@ const ( BackupLifecycleFailed = "FAILED" ) +// BackupLifecycle_Values returns all elements of the BackupLifecycle enum +func BackupLifecycle_Values() []string { + return []string{ + BackupLifecycleAvailable, + BackupLifecycleCreating, + BackupLifecycleTransferring, + BackupLifecycleDeleted, + BackupLifecycleFailed, + } +} + // The type of the backup. const ( // BackupTypeAutomatic is a BackupType enum value @@ -6583,6 +7432,42 @@ const ( BackupTypeUserInitiated = "USER_INITIATED" ) +// BackupType_Values returns all elements of the BackupType enum +func BackupType_Values() []string { + return []string{ + BackupTypeAutomatic, + BackupTypeUserInitiated, + } +} + +const ( + // DataRepositoryLifecycleCreating is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleCreating = "CREATING" + + // DataRepositoryLifecycleAvailable is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleAvailable = "AVAILABLE" + + // DataRepositoryLifecycleMisconfigured is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleMisconfigured = "MISCONFIGURED" + + // DataRepositoryLifecycleUpdating is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleUpdating = "UPDATING" + + // DataRepositoryLifecycleDeleting is a DataRepositoryLifecycle enum value + DataRepositoryLifecycleDeleting = "DELETING" +) + +// DataRepositoryLifecycle_Values returns all elements of the DataRepositoryLifecycle enum +func DataRepositoryLifecycle_Values() []string { + return []string{ + DataRepositoryLifecycleCreating, + DataRepositoryLifecycleAvailable, + DataRepositoryLifecycleMisconfigured, + DataRepositoryLifecycleUpdating, + DataRepositoryLifecycleDeleting, + } +} + const ( // DataRepositoryTaskFilterNameFileSystemId is a DataRepositoryTaskFilterName enum value DataRepositoryTaskFilterNameFileSystemId = "file-system-id" @@ -6591,6 +7476,14 @@ const ( DataRepositoryTaskFilterNameTaskLifecycle = "task-lifecycle" ) +// DataRepositoryTaskFilterName_Values returns all elements of the DataRepositoryTaskFilterName enum +func DataRepositoryTaskFilterName_Values() []string { + return []string{ + DataRepositoryTaskFilterNameFileSystemId, + DataRepositoryTaskFilterNameTaskLifecycle, + } +} + const ( // DataRepositoryTaskLifecyclePending is a DataRepositoryTaskLifecycle enum value DataRepositoryTaskLifecyclePending = "PENDING" @@ -6611,11 +7504,46 @@ const ( DataRepositoryTaskLifecycleCanceling = "CANCELING" ) +// DataRepositoryTaskLifecycle_Values returns all elements of the DataRepositoryTaskLifecycle enum +func DataRepositoryTaskLifecycle_Values() []string { + return []string{ + DataRepositoryTaskLifecyclePending, + DataRepositoryTaskLifecycleExecuting, + DataRepositoryTaskLifecycleFailed, + DataRepositoryTaskLifecycleSucceeded, + DataRepositoryTaskLifecycleCanceled, + DataRepositoryTaskLifecycleCanceling, + } +} + const ( // DataRepositoryTaskTypeExportToRepository is a DataRepositoryTaskType enum value DataRepositoryTaskTypeExportToRepository = "EXPORT_TO_REPOSITORY" ) +// DataRepositoryTaskType_Values returns all elements of the DataRepositoryTaskType enum +func DataRepositoryTaskType_Values() []string { + return []string{ + DataRepositoryTaskTypeExportToRepository, + } +} + +const ( + // DriveCacheTypeNone is a DriveCacheType enum value + DriveCacheTypeNone = "NONE" + + // DriveCacheTypeRead is a DriveCacheType enum value + DriveCacheTypeRead = "READ" +) + +// DriveCacheType_Values returns all elements of the DriveCacheType enum +func DriveCacheType_Values() []string { + return []string{ + DriveCacheTypeNone, + DriveCacheTypeRead, + } +} + // The lifecycle status of the file system. const ( // FileSystemLifecycleAvailable is a FileSystemLifecycle enum value @@ -6637,6 +7565,18 @@ const ( FileSystemLifecycleUpdating = "UPDATING" ) +// FileSystemLifecycle_Values returns all elements of the FileSystemLifecycle enum +func FileSystemLifecycle_Values() []string { + return []string{ + FileSystemLifecycleAvailable, + FileSystemLifecycleCreating, + FileSystemLifecycleFailed, + FileSystemLifecycleDeleting, + FileSystemLifecycleMisconfigured, + FileSystemLifecycleUpdating, + } +} + // An enumeration specifying the currently ongoing maintenance operation. const ( // FileSystemMaintenanceOperationPatching is a FileSystemMaintenanceOperation enum value @@ -6646,6 +7586,14 @@ const ( FileSystemMaintenanceOperationBackingUp = "BACKING_UP" ) +// FileSystemMaintenanceOperation_Values returns all elements of the FileSystemMaintenanceOperation enum +func FileSystemMaintenanceOperation_Values() []string { + return []string{ + FileSystemMaintenanceOperationPatching, + FileSystemMaintenanceOperationBackingUp, + } +} + // The type of file system. const ( // FileSystemTypeWindows is a FileSystemType enum value @@ -6655,6 +7603,14 @@ const ( FileSystemTypeLustre = "LUSTRE" ) +// FileSystemType_Values returns all elements of the FileSystemType enum +func FileSystemType_Values() []string { + return []string{ + FileSystemTypeWindows, + FileSystemTypeLustre, + } +} + // The name for a filter. const ( // FilterNameFileSystemId is a FilterName enum value @@ -6662,8 +7618,20 @@ const ( // FilterNameBackupType is a FilterName enum value FilterNameBackupType = "backup-type" + + // FilterNameFileSystemType is a FilterName enum value + FilterNameFileSystemType = "file-system-type" ) +// FilterName_Values returns all elements of the FilterName enum +func FilterName_Values() []string { + return []string{ + FilterNameFileSystemId, + FilterNameBackupType, + FilterNameFileSystemType, + } +} + const ( // LustreDeploymentTypeScratch1 is a LustreDeploymentType enum value LustreDeploymentTypeScratch1 = "SCRATCH_1" @@ -6675,16 +7643,39 @@ const ( LustreDeploymentTypePersistent1 = "PERSISTENT_1" ) +// LustreDeploymentType_Values returns all elements of the LustreDeploymentType enum +func LustreDeploymentType_Values() []string { + return []string{ + LustreDeploymentTypeScratch1, + LustreDeploymentTypeScratch2, + LustreDeploymentTypePersistent1, + } +} + const ( // ReportFormatReportCsv20191124 is a ReportFormat enum value ReportFormatReportCsv20191124 = "REPORT_CSV_20191124" ) +// ReportFormat_Values returns all elements of the ReportFormat enum +func ReportFormat_Values() []string { + return []string{ + ReportFormatReportCsv20191124, + } +} + const ( // ReportScopeFailedFilesOnly is a ReportScope enum value ReportScopeFailedFilesOnly = "FAILED_FILES_ONLY" ) +// ReportScope_Values returns all elements of the ReportScope enum +func ReportScope_Values() []string { + return []string{ + ReportScopeFailedFilesOnly, + } +} + // The types of limits on your service utilization. Limits include file system // count, total throughput capacity, total storage, and total user-initiated // backups. These limits apply for a specific account in a specific AWS Region. @@ -6703,10 +7694,77 @@ const ( ServiceLimitTotalUserInitiatedBackups = "TOTAL_USER_INITIATED_BACKUPS" ) +// ServiceLimit_Values returns all elements of the ServiceLimit enum +func ServiceLimit_Values() []string { + return []string{ + ServiceLimitFileSystemCount, + ServiceLimitTotalThroughputCapacity, + ServiceLimitTotalStorage, + ServiceLimitTotalUserInitiatedBackups, + } +} + +const ( + // StatusFailed is a Status enum value + StatusFailed = "FAILED" + + // StatusInProgress is a Status enum value + StatusInProgress = "IN_PROGRESS" + + // StatusPending is a Status enum value + StatusPending = "PENDING" + + // StatusCompleted is a Status enum value + StatusCompleted = "COMPLETED" + + // StatusUpdatedOptimizing is a Status enum value + StatusUpdatedOptimizing = "UPDATED_OPTIMIZING" +) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusFailed, + StatusInProgress, + StatusPending, + StatusCompleted, + StatusUpdatedOptimizing, + } +} + +// The storage type for your Amazon FSx file system. +const ( + // StorageTypeSsd is a StorageType enum value + StorageTypeSsd = "SSD" + + // StorageTypeHdd is a StorageType enum value + StorageTypeHdd = "HDD" +) + +// StorageType_Values returns all elements of the StorageType enum +func StorageType_Values() []string { + return []string{ + StorageTypeSsd, + StorageTypeHdd, + } +} + const ( // WindowsDeploymentTypeMultiAz1 is a WindowsDeploymentType enum value WindowsDeploymentTypeMultiAz1 = "MULTI_AZ_1" // WindowsDeploymentTypeSingleAz1 is a WindowsDeploymentType enum value WindowsDeploymentTypeSingleAz1 = "SINGLE_AZ_1" + + // WindowsDeploymentTypeSingleAz2 is a WindowsDeploymentType enum value + WindowsDeploymentTypeSingleAz2 = "SINGLE_AZ_2" ) + +// WindowsDeploymentType_Values returns all elements of the WindowsDeploymentType enum +func WindowsDeploymentType_Values() []string { + return []string{ + WindowsDeploymentTypeMultiAz1, + WindowsDeploymentTypeSingleAz1, + WindowsDeploymentTypeSingleAz2, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go b/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go index 4d7aa9cd6..41c2a1d5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) @@ -49,6 +49,9 @@ const ( // svc := fsx.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FSx { c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "fsx" + } return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go index 9f89e27cd..d756494a8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go @@ -145,6 +145,158 @@ func (c *GameLift) AcceptMatchWithContext(ctx aws.Context, input *AcceptMatchInp return out, req.Send() } +const opClaimGameServer = "ClaimGameServer" + +// ClaimGameServerRequest generates a "aws/request.Request" representing the +// client's request for the ClaimGameServer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ClaimGameServer for more information on using the ClaimGameServer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ClaimGameServerRequest method. +// req, resp := client.ClaimGameServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ClaimGameServer +func (c *GameLift) ClaimGameServerRequest(input *ClaimGameServerInput) (req *request.Request, output *ClaimGameServerOutput) { + op := &request.Operation{ + Name: opClaimGameServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ClaimGameServerInput{} + } + + output = &ClaimGameServerOutput{} + req = c.newRequest(op, input, output) + return +} + +// ClaimGameServer API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Locates an available game server and temporarily reserves it to host gameplay +// and players. This operation is called from a game client or client service +// (such as a matchmaker) to request hosting resources for a new game session. +// In response, GameLift FleetIQ locates an available game server, places it +// in CLAIMED status for 60 seconds, and returns connection information that +// players can use to connect to the game server. +// +// To claim a game server, identify a game server group. You can also specify +// a game server ID, although this approach bypasses GameLift FleetIQ placement +// optimization. Optionally, include game data to pass to the game server at +// the start of a game session, such as a game map or player information. +// +// When a game server is successfully claimed, connection information is returned. +// A claimed game server's utilization status remains AVAILABLE while the claim +// status is set to CLAIMED for up to 60 seconds. This time period gives the +// game server time to update its status to UTILIZED (using UpdateGameServer) +// once players join. If the game server's status is not updated within 60 seconds, +// the game server reverts to unclaimed status and is available to be claimed +// by another request. The claim time period is a fixed value and is not configurable. +// +// If you try to claim a specific game server, this request will fail in the +// following cases: +// +// * If the game server utilization status is UTILIZED. +// +// * If the game server claim status is CLAIMED. +// +// When claiming a specific game server, this request will succeed even if the +// game server is running on an instance in DRAINING status. To avoid this, +// first check the instance status by calling DescribeGameServerInstances. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * RegisterGameServer +// +// * ListGameServers +// +// * ClaimGameServer +// +// * DescribeGameServer +// +// * UpdateGameServer +// +// * DeregisterGameServer +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ClaimGameServer for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * ConflictException +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * OutOfCapacityException +// The specified game server group has no available game servers to fulfill +// a ClaimGameServer request. Clients can retry such requests immediately or +// after a waiting period. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ClaimGameServer +func (c *GameLift) ClaimGameServer(input *ClaimGameServerInput) (*ClaimGameServerOutput, error) { + req, out := c.ClaimGameServerRequest(input) + return out, req.Send() +} + +// ClaimGameServerWithContext is the same as ClaimGameServer with the addition of +// the ability to pass a context and additional request options. +// +// See ClaimGameServer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ClaimGameServerWithContext(ctx aws.Context, input *ClaimGameServerInput, opts ...request.Option) (*ClaimGameServerOutput, error) { + req, out := c.ClaimGameServerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateAlias = "CreateAlias" // CreateAliasRequest generates a "aws/request.Request" representing the @@ -319,45 +471,40 @@ func (c *GameLift) CreateBuildRequest(input *CreateBuildInput) (req *request.Req // CreateBuild API operation for Amazon GameLift. // -// Creates a new Amazon GameLift build record for your game server binary files -// and points to the location of your game server build files in an Amazon Simple -// Storage Service (Amazon S3) location. -// -// Game server binaries must be combined into a zip file for use with Amazon -// GameLift. -// -// To create new builds directly from a file directory, use the AWS CLI command -// upload-build (https://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html) -// . This helper command uploads build files and creates a new build record -// in one step, and automatically handles the necessary permissions. -// -// The CreateBuild operation should be used only in the following scenarios: -// -// * To create a new game build with build files that are in an Amazon S3 -// bucket under your own AWS account. To use this option, you must first -// give Amazon GameLift access to that Amazon S3 bucket. Then call CreateBuild -// and specify a build name, operating system, and the Amazon S3 storage -// location of your game build. -// -// * To upload build files directly to Amazon GameLift's Amazon S3 account. -// To use this option, first call CreateBuild and specify a build name and -// operating system. This action creates a new build record and returns an -// Amazon S3 storage location (bucket and key only) and temporary access -// credentials. Use the credentials to manually upload your build file to -// the provided storage location (see the Amazon S3 topic Uploading Objects -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html)). -// You can upload build files to the GameLift Amazon S3 location only once. -// -// If successful, this operation creates a new build record with a unique build -// ID and places it in INITIALIZED status. You can use DescribeBuild to check -// the status of your build. A build must be in READY status before it can be -// used to create fleets. +// Creates a new Amazon GameLift build resource for your game server binary +// files. Game server binaries must be combined into a zip file for use with +// Amazon GameLift. +// +// When setting up a new game build for GameLift, we recommend using the AWS +// CLI command upload-build (https://docs.aws.amazon.com/cli/latest/reference/gamelift/upload-build.html) +// . This helper command combines two tasks: (1) it uploads your build files +// from a file directory to a GameLift Amazon S3 location, and (2) it creates +// a new build resource. +// +// The CreateBuild operation can used in the following scenarios: +// +// * To create a new game build with build files that are in an S3 location +// under an AWS account that you control. To use this option, you must first +// give Amazon GameLift access to the S3 bucket. With permissions in place, +// call CreateBuild and specify a build name, operating system, and the S3 +// storage location of your game build. +// +// * To directly upload your build files to a GameLift S3 location. To use +// this option, first call CreateBuild and specify a build name and operating +// system. This operation creates a new build resource and also returns an +// S3 location with temporary access credentials. Use the credentials to +// manually upload your build files to the specified S3 location. For more +// information, see Uploading Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html) +// in the Amazon S3 Developer Guide. Build files can be uploaded to the GameLift +// S3 location once only; that can't be updated. +// +// If successful, this operation creates a new build resource with a unique +// build ID and places it in INITIALIZED status. A build must be in READY status +// before you can create fleets with it. // // Learn more // // Uploading Your Game (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) -// https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -// (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // // Create a Build with Files in Amazon S3 (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build) // @@ -476,17 +623,16 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // set some configuration options, and specify the game server to deploy on // the new fleet. // -// To create a new fleet, you must provide the following: (1) a fleet name, -// (2) an EC2 instance type and fleet type (spot or on-demand), (3) the build -// ID for your game build or script ID if using Realtime Servers, and (4) a -// runtime configuration, which determines how game servers will run on each -// instance in the fleet. +// To create a new fleet, provide the following: (1) a fleet name, (2) an EC2 +// instance type and fleet type (spot or on-demand), (3) the build ID for your +// game build or script ID if using Realtime Servers, and (4) a runtime configuration, +// which determines how game servers will run on each instance in the fleet. // // If the CreateFleet call is successful, Amazon GameLift performs the following // tasks. You can track the process of a fleet by checking the fleet status // or by monitoring fleet creation events: // -// * Creates a fleet record. Status: NEW. +// * Creates a fleet resource. Status: NEW. // // * Begins writing events to the fleet event log, which can be accessed // in the Amazon GameLift console. @@ -506,9 +652,9 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // // Learn more // -// Setting Up Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) +// Setting Up Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // -// Debug Fleet Creation Issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation) +// Debug Fleet Creation Issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html#fleets-creating-debug-creation) // // Related operations // @@ -522,7 +668,7 @@ func (c *GameLift) CreateFleetRequest(input *CreateFleetInput) (req *request.Req // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -584,6 +730,159 @@ func (c *GameLift) CreateFleetWithContext(ctx aws.Context, input *CreateFleetInp return out, req.Send() } +const opCreateGameServerGroup = "CreateGameServerGroup" + +// CreateGameServerGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateGameServerGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGameServerGroup for more information on using the CreateGameServerGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGameServerGroupRequest method. +// req, resp := client.CreateGameServerGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameServerGroup +func (c *GameLift) CreateGameServerGroupRequest(input *CreateGameServerGroupInput) (req *request.Request, output *CreateGameServerGroupOutput) { + op := &request.Operation{ + Name: opCreateGameServerGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGameServerGroupInput{} + } + + output = &CreateGameServerGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGameServerGroup API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Creates a GameLift FleetIQ game server group for managing game hosting on +// a collection of Amazon EC2 instances for game hosting. This operation creates +// the game server group, creates an Auto Scaling group in your AWS account, +// and establishes a link between the two groups. You can view the status of +// your game server groups in the GameLift console. Game server group metrics +// and events are emitted to Amazon CloudWatch. +// +// Before creating a new game server group, you must have the following: +// +// * An Amazon EC2 launch template that specifies how to launch Amazon EC2 +// instances with your game server build. For more information, see Launching +// an Instance from a Launch Template (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-launch-templates.html) +// in the Amazon EC2 User Guide. +// +// * An IAM role that extends limited access to your AWS account to allow +// GameLift FleetIQ to create and interact with the Auto Scaling group. For +// more information, see Create IAM roles for cross-service interaction (https://docs.aws.amazon.com/gamelift/latest/developerguide/gsg-iam-permissions-roles.html) +// in the GameLift FleetIQ Developer Guide. +// +// To create a new game server group, specify a unique group name, IAM role +// and Amazon EC2 launch template, and provide a list of instance types that +// can be used in the group. You must also set initial maximum and minimum limits +// on the group's instance count. You can optionally set an Auto Scaling policy +// with target tracking based on a GameLift FleetIQ metric. +// +// Once the game server group and corresponding Auto Scaling group are created, +// you have full access to change the Auto Scaling group's configuration as +// needed. Several properties that are set when creating a game server group, +// including maximum/minimum size and auto-scaling policy settings, must be +// updated directly in the Auto Scaling group. Keep in mind that some Auto Scaling +// group properties are periodically updated by GameLift FleetIQ as part of +// its balancing activities to optimize for availability and cost. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * CreateGameServerGroup +// +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation CreateGameServerGroup for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ConflictException +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * LimitExceededException +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/CreateGameServerGroup +func (c *GameLift) CreateGameServerGroup(input *CreateGameServerGroupInput) (*CreateGameServerGroupOutput, error) { + req, out := c.CreateGameServerGroupRequest(input) + return out, req.Send() +} + +// CreateGameServerGroupWithContext is the same as CreateGameServerGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGameServerGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) CreateGameServerGroupWithContext(ctx aws.Context, input *CreateGameServerGroupInput, opts ...request.Option) (*CreateGameServerGroupOutput, error) { + req, out := c.CreateGameServerGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateGameSession = "CreateGameSession" // CreateGameSessionRequest generates a "aws/request.Request" representing the @@ -628,10 +927,10 @@ func (c *GameLift) CreateGameSessionRequest(input *CreateGameSessionInput) (req // CreateGameSession API operation for Amazon GameLift. // -// Creates a multiplayer game session for players. This action creates a game -// session record and assigns an available server process in the specified fleet -// to host the game session. A fleet must have an ACTIVE status before a game -// session can be created in it. +// Creates a multiplayer game session for players. This operation creates a +// game session record and assigns an available server process in the specified +// fleet to host the game session. A fleet must have an ACTIVE status before +// a game session can be created in it. // // To create a game session, specify either fleet ID or alias ID and indicate // a maximum number of players to allow in the game session. You can also provide @@ -824,6 +1123,14 @@ func (c *GameLift) CreateGameSessionQueueRequest(input *CreateGameSessionQueueIn // and, if desired, a set of latency policies. If successful, a new queue object // is returned. // +// Learn more +// +// Design a Game Session Queue (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-design.html) +// +// Create a Game Session Queue (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-creating.html) +// +// Related operations +// // * CreateGameSessionQueue // // * DescribeGameSessionQueues @@ -940,19 +1247,17 @@ func (c *GameLift) CreateMatchmakingConfigurationRequest(input *CreateMatchmakin // game session for the match; and the maximum time allowed for a matchmaking // attempt. // -// There are two ways to track the progress of matchmaking tickets: (1) polling -// ticket status with DescribeMatchmaking; or (2) receiving notifications with -// Amazon Simple Notification Service (SNS). To use notifications, you first -// need to set up an SNS topic to receive the notifications, and provide the -// topic ARN in the matchmaking configuration. Since notifications promise only -// "best effort" delivery, we recommend calling DescribeMatchmaking if no notifications -// are received within 30 seconds. +// To track the progress of matchmaking tickets, set up an Amazon Simple Notification +// Service (SNS) to receive notifications, and provide the topic ARN in the +// matchmaking configuration. An alternative method, continuously poling ticket +// status with DescribeMatchmaking, should only be used for games in development +// with low matchmaking usage. // // Learn more // // Design a FlexMatch Matchmaker (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-configuration.html) // -// Setting up Notifications for Matchmaking (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) // // Related operations // @@ -1857,7 +2162,7 @@ func (c *GameLift) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Req // DeleteAlias API operation for Amazon GameLift. // -// Deletes an alias. This action removes all record of the alias. Game clients +// Deletes an alias. This operation removes all record of the alias. Game clients // attempting to access a server process using the deleted alias receive an // error. To delete an alias, specify the alias ID to be deleted. // @@ -1969,16 +2274,16 @@ func (c *GameLift) DeleteBuildRequest(input *DeleteBuildInput) (req *request.Req // DeleteBuild API operation for Amazon GameLift. // -// Deletes a build. This action permanently deletes the build record and any -// uploaded build files. -// -// To delete a build, specify its ID. Deleting a build does not affect the status -// of any active fleets using the build, but you can no longer create new fleets +// Deletes a build. This operation permanently deletes the build resource and +// any uploaded build files. Deleting a build does not affect the status of +// any active fleets using the build, but you can no longer create new fleets // with the deleted build. // +// To delete a build, specify the build ID. +// // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Upload a Custom Server Build (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // // Related operations // @@ -2096,12 +2401,12 @@ func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Req // You do not need to explicitly delete the VPC peering connection--this is // done as part of the delete fleet process. // -// This action removes the fleet's resources and the fleet record. Once a fleet -// is deleted, you can no longer use that fleet. +// This operation removes the fleet and its resources. Once a fleet is deleted, +// you can no longer use any of the resource in that fleet. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -2115,7 +2420,7 @@ func (c *GameLift) DeleteFleetRequest(input *DeleteFleetInput) (req *request.Req // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2173,76 +2478,107 @@ func (c *GameLift) DeleteFleetWithContext(ctx aws.Context, input *DeleteFleetInp return out, req.Send() } -const opDeleteGameSessionQueue = "DeleteGameSessionQueue" +const opDeleteGameServerGroup = "DeleteGameServerGroup" -// DeleteGameSessionQueueRequest generates a "aws/request.Request" representing the -// client's request for the DeleteGameSessionQueue operation. The "output" return +// DeleteGameServerGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGameServerGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteGameSessionQueue for more information on using the DeleteGameSessionQueue +// See DeleteGameServerGroup for more information on using the DeleteGameServerGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteGameSessionQueueRequest method. -// req, resp := client.DeleteGameSessionQueueRequest(params) +// // Example sending a request using the DeleteGameServerGroupRequest method. +// req, resp := client.DeleteGameServerGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue -func (c *GameLift) DeleteGameSessionQueueRequest(input *DeleteGameSessionQueueInput) (req *request.Request, output *DeleteGameSessionQueueOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameServerGroup +func (c *GameLift) DeleteGameServerGroupRequest(input *DeleteGameServerGroupInput) (req *request.Request, output *DeleteGameServerGroupOutput) { op := &request.Operation{ - Name: opDeleteGameSessionQueue, + Name: opDeleteGameServerGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteGameSessionQueueInput{} + input = &DeleteGameServerGroupInput{} } - output = &DeleteGameSessionQueueOutput{} + output = &DeleteGameServerGroupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DeleteGameSessionQueue API operation for Amazon GameLift. +// DeleteGameServerGroup API operation for Amazon GameLift. // -// Deletes a game session queue. This action means that any StartGameSessionPlacement -// requests that reference this queue will fail. To delete a queue, specify -// the queue name. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// * CreateGameSessionQueue +// Terminates a game server group and permanently deletes the game server group +// record. You have several options for how these resources are impacted when +// deleting the game server group. Depending on the type of delete operation +// selected, this operation might affect these resources: // -// * DescribeGameSessionQueues +// * The game server group // -// * UpdateGameSessionQueue +// * The corresponding Auto Scaling group // -// * DeleteGameSessionQueue +// * All game servers that are currently running in the group +// +// To delete a game server group, identify the game server group to delete and +// specify the type of delete operation to initiate. Game server groups can +// only be deleted if they are in ACTIVE or ERROR status. +// +// If the delete request is successful, a series of operations are kicked off. +// The game server group status is changed to DELETE_SCHEDULED, which prevents +// new game servers from being registered and stops automatic scaling activity. +// Once all game servers in the game server group are deregistered, GameLift +// FleetIQ can begin deleting resources. If any of the delete operations fail, +// the game server group is placed in ERROR status. +// +// GameLift FleetIQ emits delete events to Amazon CloudWatch. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * CreateGameServerGroup +// +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DeleteGameSessionQueue for usage and error information. +// API operation DeleteGameServerGroup for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. @@ -2254,21 +2590,135 @@ func (c *GameLift) DeleteGameSessionQueueRequest(input *DeleteGameSessionQueueIn // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// * TaggingFailedException -// The requested tagging operation did not succeed. This may be due to invalid -// tag format or the maximum tag limit may have been exceeded. Resolve the issue -// before retrying. +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue -func (c *GameLift) DeleteGameSessionQueue(input *DeleteGameSessionQueueInput) (*DeleteGameSessionQueueOutput, error) { - req, out := c.DeleteGameSessionQueueRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameServerGroup +func (c *GameLift) DeleteGameServerGroup(input *DeleteGameServerGroupInput) (*DeleteGameServerGroupOutput, error) { + req, out := c.DeleteGameServerGroupRequest(input) return out, req.Send() } -// DeleteGameSessionQueueWithContext is the same as DeleteGameSessionQueue with the addition of +// DeleteGameServerGroupWithContext is the same as DeleteGameServerGroup with the addition of // the ability to pass a context and additional request options. // -// See DeleteGameSessionQueue for details on how to use this API operation. +// See DeleteGameServerGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeleteGameServerGroupWithContext(ctx aws.Context, input *DeleteGameServerGroupInput, opts ...request.Option) (*DeleteGameServerGroupOutput, error) { + req, out := c.DeleteGameServerGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteGameSessionQueue = "DeleteGameSessionQueue" + +// DeleteGameSessionQueueRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGameSessionQueue operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteGameSessionQueue for more information on using the DeleteGameSessionQueue +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteGameSessionQueueRequest method. +// req, resp := client.DeleteGameSessionQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue +func (c *GameLift) DeleteGameSessionQueueRequest(input *DeleteGameSessionQueueInput) (req *request.Request, output *DeleteGameSessionQueueOutput) { + op := &request.Operation{ + Name: opDeleteGameSessionQueue, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGameSessionQueueInput{} + } + + output = &DeleteGameSessionQueueOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteGameSessionQueue API operation for Amazon GameLift. +// +// Deletes a game session queue. Once a queue is successfully deleted, unfulfilled +// StartGameSessionPlacement requests that reference the queue will fail. To +// delete a queue, specify the queue name. +// +// Learn more +// +// Using Multi-Region Queues (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html) +// +// Related operations +// +// * CreateGameSessionQueue +// +// * DescribeGameSessionQueues +// +// * UpdateGameSessionQueue +// +// * DeleteGameSessionQueue +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeleteGameSessionQueue for usage and error information. +// +// Returned Error Types: +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * TaggingFailedException +// The requested tagging operation did not succeed. This may be due to invalid +// tag format or the maximum tag limit may have been exceeded. Resolve the issue +// before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeleteGameSessionQueue +func (c *GameLift) DeleteGameSessionQueue(input *DeleteGameSessionQueueInput) (*DeleteGameSessionQueueOutput, error) { + req, out := c.DeleteGameSessionQueueRequest(input) + return out, req.Send() +} + +// DeleteGameSessionQueueWithContext is the same as DeleteGameSessionQueue with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteGameSessionQueue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create @@ -2566,9 +3016,9 @@ func (c *GameLift) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) ( // DeleteScalingPolicy API operation for Amazon GameLift. // -// Deletes a fleet scaling policy. This action means that the policy is no longer -// in force and removes all record of it. To delete a scaling policy, specify -// both the scaling policy name and the fleet ID it is associated with. +// Deletes a fleet scaling policy. Once deleted, the policy is no longer in +// force and GameLift removes all record of it. To delete a scaling policy, +// specify both the scaling policy name and the fleet ID it is associated with. // // To temporarily suspend scaling policies, call StopFleetActions. This operation // suspends all policies for the fleet. @@ -2675,9 +3125,9 @@ func (c *GameLift) DeleteScriptRequest(input *DeleteScriptInput) (req *request.R // DeleteScript API operation for Amazon GameLift. // -// Deletes a Realtime script. This action permanently deletes the script record. -// If script files were uploaded, they are also deleted (files stored in an -// S3 bucket are not deleted). +// Deletes a Realtime script. This operation permanently deletes the script +// record. If script files were uploaded, they are also deleted (files stored +// in an S3 bucket are not deleted). // // To delete a script, specify the script ID. Before deleting a script, be sure // to terminate all fleets that are deployed with the script being deleted. @@ -2971,6 +3421,126 @@ func (c *GameLift) DeleteVpcPeeringConnectionWithContext(ctx aws.Context, input return out, req.Send() } +const opDeregisterGameServer = "DeregisterGameServer" + +// DeregisterGameServerRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterGameServer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeregisterGameServer for more information on using the DeregisterGameServer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeregisterGameServerRequest method. +// req, resp := client.DeregisterGameServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeregisterGameServer +func (c *GameLift) DeregisterGameServerRequest(input *DeregisterGameServerInput) (req *request.Request, output *DeregisterGameServerOutput) { + op := &request.Operation{ + Name: opDeregisterGameServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeregisterGameServerInput{} + } + + output = &DeregisterGameServerOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeregisterGameServer API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Removes the game server from a game server group. As a result of this operation, +// the deregistered game server can no longer be claimed and will not be returned +// in a list of active game servers. +// +// To deregister a game server, specify the game server group and game server +// ID. If successful, this operation emits a CloudWatch event with termination +// timestamp and reason. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * RegisterGameServer +// +// * ListGameServers +// +// * ClaimGameServer +// +// * DescribeGameServer +// +// * UpdateGameServer +// +// * DeregisterGameServer +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DeregisterGameServer for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DeregisterGameServer +func (c *GameLift) DeregisterGameServer(input *DeregisterGameServerInput) (*DeregisterGameServerOutput, error) { + req, out := c.DeregisterGameServerRequest(input) + return out, req.Send() +} + +// DeregisterGameServerWithContext is the same as DeregisterGameServer with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterGameServer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DeregisterGameServerWithContext(ctx aws.Context, input *DeregisterGameServerInput, opts ...request.Option) (*DeregisterGameServerOutput, error) { + req, out := c.DeregisterGameServerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAlias = "DescribeAlias" // DescribeAliasRequest generates a "aws/request.Request" representing the @@ -3123,12 +3693,13 @@ func (c *GameLift) DescribeBuildRequest(input *DescribeBuildInput) (req *request // DescribeBuild API operation for Amazon GameLift. // -// Retrieves properties for a build. To request a build record, specify a build -// ID. If successful, an object containing the build properties is returned. +// Retrieves properties for a custom game build. To request a build resource, +// specify a build ID. If successful, an object containing the build properties +// is returned. // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Upload a Custom Server Build (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // // Related operations // @@ -3234,17 +3805,17 @@ func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLi // // Retrieves the following information for the specified EC2 instance type: // -// * maximum number of instances allowed per AWS account (service limit) +// * Maximum number of instances allowed per AWS account (service limit). // -// * current usage level for the AWS account +// * Current usage for the AWS account. // -// Service limits vary depending on Region. Available Regions for Amazon GameLift -// can be found in the AWS Management Console for Amazon GameLift (see the drop-down -// list in the upper right corner). +// To learn more about the capabilities of each instance type, see Amazon EC2 +// Instance Types (http://aws.amazon.com/ec2/instance-types/). Note that the +// instance types offered may vary depending on the region. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -3254,14 +3825,11 @@ func (c *GameLift) DescribeEC2InstanceLimitsRequest(input *DescribeEC2InstanceLi // // * DeleteFleet // -// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings -// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits -// DescribeFleetEvents +// * DescribeFleetAttributes // -// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings -// UpdateRuntimeConfiguration +// * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3336,6 +3904,12 @@ func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributes Name: opDescribeFleetAttributes, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -3349,21 +3923,23 @@ func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributes // DescribeFleetAttributes API operation for Amazon GameLift. // -// Retrieves fleet properties, including metadata, status, and configuration, -// for one or more fleets. You can request attributes for all fleets, or specify -// a list of one or more fleet IDs. When requesting multiple fleets, use the -// pagination parameters to retrieve results as a set of sequential pages. If -// successful, a FleetAttributes object is returned for each requested fleet -// ID. When specifying a list of fleet IDs, attribute objects are returned only -// for fleets that currently exist. +// Retrieves core properties, including configuration, status, and metadata, +// for a fleet. // -// Some API actions may limit the number of fleet IDs allowed in one request. +// To get attributes for one or more fleets, provide a list of fleet IDs or +// fleet ARNs. To get attributes for all fleets, do not specify a fleet identifier. +// When requesting attributes for multiple fleets, use the pagination parameters +// to retrieve results as a set of sequential pages. If successful, a FleetAttributes +// object is returned for each fleet requested, unless the fleet identifier +// is not found. +// +// Some API operations may limit the number of fleet IDs allowed in one request. // If a request exceeds this limit, the request fails and the error message -// includes the maximum allowed. +// includes the maximum allowed number. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -3379,7 +3955,7 @@ func (c *GameLift) DescribeFleetAttributesRequest(input *DescribeFleetAttributes // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3427,6 +4003,58 @@ func (c *GameLift) DescribeFleetAttributesWithContext(ctx aws.Context, input *De return out, req.Send() } +// DescribeFleetAttributesPages iterates over the pages of a DescribeFleetAttributes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFleetAttributes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFleetAttributes operation. +// pageNum := 0 +// err := client.DescribeFleetAttributesPages(params, +// func(page *gamelift.DescribeFleetAttributesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeFleetAttributesPages(input *DescribeFleetAttributesInput, fn func(*DescribeFleetAttributesOutput, bool) bool) error { + return c.DescribeFleetAttributesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFleetAttributesPagesWithContext same as DescribeFleetAttributesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetAttributesPagesWithContext(ctx aws.Context, input *DescribeFleetAttributesInput, fn func(*DescribeFleetAttributesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFleetAttributesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetAttributesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFleetAttributesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeFleetCapacity = "DescribeFleetCapacity" // DescribeFleetCapacityRequest generates a "aws/request.Request" representing the @@ -3458,6 +4086,12 @@ func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInpu Name: opDescribeFleetCapacity, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -3471,22 +4105,27 @@ func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInpu // DescribeFleetCapacity API operation for Amazon GameLift. // -// Retrieves the current status of fleet capacity for one or more fleets. This -// information includes the number of instances that have been requested for -// the fleet and the number currently active. You can request capacity for all -// fleets, or specify a list of one or more fleet IDs. When requesting multiple -// fleets, use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, a FleetCapacity object is returned for each requested -// fleet ID. When specifying a list of fleet IDs, attribute objects are returned -// only for fleets that currently exist. +// Retrieves the current capacity statistics for one or more fleets. These statistics +// present a snapshot of the fleet's instances and provide insight on current +// or imminent scaling activity. To get statistics on game hosting activity +// in the fleet, see DescribeFleetUtilization. // -// Some API actions may limit the number of fleet IDs allowed in one request. +// You can request capacity for all fleets or specify a list of one or more +// fleet identifiers. When requesting multiple fleets, use the pagination parameters +// to retrieve results as a set of sequential pages. If successful, a FleetCapacity +// object is returned for each requested fleet ID. When a list of fleet IDs +// is provided, attribute objects are returned only for fleets that currently +// exist. +// +// Some API operations may limit the number of fleet IDs allowed in one request. // If a request exceeds this limit, the request fails and the error message // includes the maximum allowed. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) +// +// GameLift Metrics for Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet) // // Related operations // @@ -3502,7 +4141,7 @@ func (c *GameLift) DescribeFleetCapacityRequest(input *DescribeFleetCapacityInpu // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3550,6 +4189,58 @@ func (c *GameLift) DescribeFleetCapacityWithContext(ctx aws.Context, input *Desc return out, req.Send() } +// DescribeFleetCapacityPages iterates over the pages of a DescribeFleetCapacity operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFleetCapacity method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFleetCapacity operation. +// pageNum := 0 +// err := client.DescribeFleetCapacityPages(params, +// func(page *gamelift.DescribeFleetCapacityOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeFleetCapacityPages(input *DescribeFleetCapacityInput, fn func(*DescribeFleetCapacityOutput, bool) bool) error { + return c.DescribeFleetCapacityPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFleetCapacityPagesWithContext same as DescribeFleetCapacityPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetCapacityPagesWithContext(ctx aws.Context, input *DescribeFleetCapacityInput, fn func(*DescribeFleetCapacityOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFleetCapacityInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetCapacityRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFleetCapacityOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeFleetEvents = "DescribeFleetEvents" // DescribeFleetEventsRequest generates a "aws/request.Request" representing the @@ -3581,6 +4272,12 @@ func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) ( Name: opDescribeFleetEvents, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -3601,7 +4298,7 @@ func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) ( // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -3617,7 +4314,7 @@ func (c *GameLift) DescribeFleetEventsRequest(input *DescribeFleetEventsInput) ( // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3665,17 +4362,69 @@ func (c *GameLift) DescribeFleetEventsWithContext(ctx aws.Context, input *Descri return out, req.Send() } -const opDescribeFleetPortSettings = "DescribeFleetPortSettings" - -// DescribeFleetPortSettingsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeFleetPortSettings operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// DescribeFleetEventsPages iterates over the pages of a DescribeFleetEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// See DescribeFleetEvents method for more information on how to use this operation. // -// See DescribeFleetPortSettings for more information on using the DescribeFleetPortSettings +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFleetEvents operation. +// pageNum := 0 +// err := client.DescribeFleetEventsPages(params, +// func(page *gamelift.DescribeFleetEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeFleetEventsPages(input *DescribeFleetEventsInput, fn func(*DescribeFleetEventsOutput, bool) bool) error { + return c.DescribeFleetEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeFleetEventsPagesWithContext same as DescribeFleetEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetEventsPagesWithContext(ctx aws.Context, input *DescribeFleetEventsInput, fn func(*DescribeFleetEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFleetEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFleetEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeFleetPortSettings = "DescribeFleetPortSettings" + +// DescribeFleetPortSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFleetPortSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFleetPortSettings for more information on using the DescribeFleetPortSettings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration @@ -3709,16 +4458,19 @@ func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSett // DescribeFleetPortSettings API operation for Amazon GameLift. // -// Retrieves the inbound connection permissions for a fleet. Connection permissions -// include a range of IP addresses and port settings that incoming traffic can -// use to access server processes in the fleet. To get a fleet's inbound connection -// permissions, specify a fleet ID. If successful, a collection of IpPermission -// objects is returned for the requested fleet ID. If the requested fleet has -// been deleted, the result set is empty. +// Retrieves a fleet's inbound connection permissions. Connection permissions +// specify the range of IP addresses and port settings that incoming traffic +// can use to access server processes in the fleet. Game sessions that are running +// on instances in the fleet use connections that fall in this range. +// +// To get a fleet's inbound connection permissions, specify the fleet's unique +// identifier. If successful, a collection of IpPermission objects is returned +// for the requested fleet ID. If the requested fleet has been deleted, the +// result set is empty. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -3734,7 +4486,7 @@ func (c *GameLift) DescribeFleetPortSettingsRequest(input *DescribeFleetPortSett // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3813,6 +4565,12 @@ func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizati Name: opDescribeFleetUtilization, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -3826,20 +4584,25 @@ func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizati // DescribeFleetUtilization API operation for Amazon GameLift. // -// Retrieves utilization statistics for one or more fleets. You can request -// utilization data for all fleets, or specify a list of one or more fleet IDs. -// When requesting multiple fleets, use the pagination parameters to retrieve -// results as a set of sequential pages. If successful, a FleetUtilization object -// is returned for each requested fleet ID. When specifying a list of fleet -// IDs, utilization objects are returned only for fleets that currently exist. +// Retrieves utilization statistics for one or more fleets. These statistics +// provide insight into how available hosting resources are currently being +// used. To get statistics on available hosting resources, see DescribeFleetCapacity. // -// Some API actions may limit the number of fleet IDs allowed in one request. +// You can request utilization data for all fleets, or specify a list of one +// or more fleet IDs. When requesting multiple fleets, use the pagination parameters +// to retrieve results as a set of sequential pages. If successful, a FleetUtilization +// object is returned for each requested fleet ID, unless the fleet identifier +// is not found. +// +// Some API operations may limit the number of fleet IDs allowed in one request. // If a request exceeds this limit, the request fails and the error message // includes the maximum allowed. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) +// +// GameLift Metrics for Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/monitoring-cloudwatch.html#gamelift-metrics-fleet) // // Related operations // @@ -3855,7 +4618,7 @@ func (c *GameLift) DescribeFleetUtilizationRequest(input *DescribeFleetUtilizati // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3903,206 +4666,261 @@ func (c *GameLift) DescribeFleetUtilizationWithContext(ctx aws.Context, input *D return out, req.Send() } -const opDescribeGameSessionDetails = "DescribeGameSessionDetails" +// DescribeFleetUtilizationPages iterates over the pages of a DescribeFleetUtilization operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeFleetUtilization method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeFleetUtilization operation. +// pageNum := 0 +// err := client.DescribeFleetUtilizationPages(params, +// func(page *gamelift.DescribeFleetUtilizationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeFleetUtilizationPages(input *DescribeFleetUtilizationInput, fn func(*DescribeFleetUtilizationOutput, bool) bool) error { + return c.DescribeFleetUtilizationPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DescribeGameSessionDetailsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessionDetails operation. The "output" return +// DescribeFleetUtilizationPagesWithContext same as DescribeFleetUtilizationPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeFleetUtilizationPagesWithContext(ctx aws.Context, input *DescribeFleetUtilizationInput, fn func(*DescribeFleetUtilizationOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeFleetUtilizationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeFleetUtilizationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeFleetUtilizationOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeGameServer = "DescribeGameServer" + +// DescribeGameServerRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameServer operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeGameSessionDetails for more information on using the DescribeGameSessionDetails +// See DescribeGameServer for more information on using the DescribeGameServer // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeGameSessionDetailsRequest method. -// req, resp := client.DescribeGameSessionDetailsRequest(params) +// // Example sending a request using the DescribeGameServerRequest method. +// req, resp := client.DescribeGameServerRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails -func (c *GameLift) DescribeGameSessionDetailsRequest(input *DescribeGameSessionDetailsInput) (req *request.Request, output *DescribeGameSessionDetailsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameServer +func (c *GameLift) DescribeGameServerRequest(input *DescribeGameServerInput) (req *request.Request, output *DescribeGameServerOutput) { op := &request.Operation{ - Name: opDescribeGameSessionDetails, + Name: opDescribeGameServer, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeGameSessionDetailsInput{} + input = &DescribeGameServerInput{} } - output = &DescribeGameSessionDetailsOutput{} + output = &DescribeGameServerOutput{} req = c.newRequest(op, input, output) return } -// DescribeGameSessionDetails API operation for Amazon GameLift. +// DescribeGameServer API operation for Amazon GameLift. // -// Retrieves properties, including the protection policy in force, for one or -// more game sessions. This action can be used in several ways: (1) provide -// a GameSessionId or GameSessionArn to request details for a specific game -// session; (2) provide either a FleetId or an AliasId to request properties -// for all game sessions running on a fleet. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// To get game session record(s), specify just one of the following: game session -// ID, fleet ID, or alias ID. You can filter this request by game session status. -// Use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, a GameSessionDetail object is returned for each session -// matching the request. +// Retrieves information for a registered game server. Information includes +// game server status, health check info, and the instance that the game server +// is running on. // -// * CreateGameSession +// To retrieve game server information, specify the game server ID. If successful, +// the requested game server object is returned. // -// * DescribeGameSessions +// Learn more // -// * DescribeGameSessionDetails +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // -// * SearchGameSessions +// Related operations // -// * UpdateGameSession +// * RegisterGameServer // -// * GetGameSessionLogUrl +// * ListGameServers // -// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement -// StopGameSessionPlacement +// * ClaimGameServer +// +// * DescribeGameServer +// +// * UpdateGameServer +// +// * DeregisterGameServer // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessionDetails for usage and error information. +// API operation DescribeGameServer for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. // // * NotFoundException // A service resource associated with the request could not be found. Clients // should not retry such requests. // -// * InvalidRequestException -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// * TerminalRoutingStrategyException -// The service is unable to resolve the routing for a particular alias because -// it has a terminal RoutingStrategy associated with it. The message returned -// in this exception is the message defined in the routing strategy itself. -// Such requests should only be retried if the routing strategy for the specified -// alias is modified. +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails -func (c *GameLift) DescribeGameSessionDetails(input *DescribeGameSessionDetailsInput) (*DescribeGameSessionDetailsOutput, error) { - req, out := c.DescribeGameSessionDetailsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameServer +func (c *GameLift) DescribeGameServer(input *DescribeGameServerInput) (*DescribeGameServerOutput, error) { + req, out := c.DescribeGameServerRequest(input) return out, req.Send() } -// DescribeGameSessionDetailsWithContext is the same as DescribeGameSessionDetails with the addition of +// DescribeGameServerWithContext is the same as DescribeGameServer with the addition of // the ability to pass a context and additional request options. // -// See DescribeGameSessionDetails for details on how to use this API operation. +// See DescribeGameServer for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribeGameSessionDetailsWithContext(ctx aws.Context, input *DescribeGameSessionDetailsInput, opts ...request.Option) (*DescribeGameSessionDetailsOutput, error) { - req, out := c.DescribeGameSessionDetailsRequest(input) +func (c *GameLift) DescribeGameServerWithContext(ctx aws.Context, input *DescribeGameServerInput, opts ...request.Option) (*DescribeGameServerOutput, error) { + req, out := c.DescribeGameServerRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeGameSessionPlacement = "DescribeGameSessionPlacement" +const opDescribeGameServerGroup = "DescribeGameServerGroup" -// DescribeGameSessionPlacementRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessionPlacement operation. The "output" return +// DescribeGameServerGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameServerGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeGameSessionPlacement for more information on using the DescribeGameSessionPlacement +// See DescribeGameServerGroup for more information on using the DescribeGameServerGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeGameSessionPlacementRequest method. -// req, resp := client.DescribeGameSessionPlacementRequest(params) +// // Example sending a request using the DescribeGameServerGroupRequest method. +// req, resp := client.DescribeGameServerGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement -func (c *GameLift) DescribeGameSessionPlacementRequest(input *DescribeGameSessionPlacementInput) (req *request.Request, output *DescribeGameSessionPlacementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameServerGroup +func (c *GameLift) DescribeGameServerGroupRequest(input *DescribeGameServerGroupInput) (req *request.Request, output *DescribeGameServerGroupOutput) { op := &request.Operation{ - Name: opDescribeGameSessionPlacement, + Name: opDescribeGameServerGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeGameSessionPlacementInput{} + input = &DescribeGameServerGroupInput{} } - output = &DescribeGameSessionPlacementOutput{} + output = &DescribeGameServerGroupOutput{} req = c.newRequest(op, input, output) return } -// DescribeGameSessionPlacement API operation for Amazon GameLift. +// DescribeGameServerGroup API operation for Amazon GameLift. // -// Retrieves properties and current status of a game session placement request. -// To get game session placement details, specify the placement ID. If successful, -// a GameSessionPlacement object is returned. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// * CreateGameSession +// Retrieves information on a game server group. This operation returns only +// properties related to GameLift FleetIQ. To view or update properties for +// the corresponding Auto Scaling group, such as launch template, auto scaling +// policies, and maximum/minimum group size, access the Auto Scaling group directly. // -// * DescribeGameSessions +// To get attributes for a game server group, provide a group name or ARN value. +// If successful, a GameServerGroup object is returned. // -// * DescribeGameSessionDetails +// Learn more // -// * SearchGameSessions +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // -// * UpdateGameSession +// Related operations // -// * GetGameSessionLogUrl +// * CreateGameServerGroup // -// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement -// StopGameSessionPlacement +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessionPlacement for usage and error information. +// API operation DescribeGameServerGroup for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. @@ -4114,99 +4932,133 @@ func (c *GameLift) DescribeGameSessionPlacementRequest(input *DescribeGameSessio // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement -func (c *GameLift) DescribeGameSessionPlacement(input *DescribeGameSessionPlacementInput) (*DescribeGameSessionPlacementOutput, error) { - req, out := c.DescribeGameSessionPlacementRequest(input) +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameServerGroup +func (c *GameLift) DescribeGameServerGroup(input *DescribeGameServerGroupInput) (*DescribeGameServerGroupOutput, error) { + req, out := c.DescribeGameServerGroupRequest(input) return out, req.Send() } -// DescribeGameSessionPlacementWithContext is the same as DescribeGameSessionPlacement with the addition of +// DescribeGameServerGroupWithContext is the same as DescribeGameServerGroup with the addition of // the ability to pass a context and additional request options. // -// See DescribeGameSessionPlacement for details on how to use this API operation. +// See DescribeGameServerGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribeGameSessionPlacementWithContext(ctx aws.Context, input *DescribeGameSessionPlacementInput, opts ...request.Option) (*DescribeGameSessionPlacementOutput, error) { - req, out := c.DescribeGameSessionPlacementRequest(input) +func (c *GameLift) DescribeGameServerGroupWithContext(ctx aws.Context, input *DescribeGameServerGroupInput, opts ...request.Option) (*DescribeGameServerGroupOutput, error) { + req, out := c.DescribeGameServerGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeGameSessionQueues = "DescribeGameSessionQueues" +const opDescribeGameServerInstances = "DescribeGameServerInstances" -// DescribeGameSessionQueuesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessionQueues operation. The "output" return +// DescribeGameServerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameServerInstances operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeGameSessionQueues for more information on using the DescribeGameSessionQueues +// See DescribeGameServerInstances for more information on using the DescribeGameServerInstances // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeGameSessionQueuesRequest method. -// req, resp := client.DescribeGameSessionQueuesRequest(params) +// // Example sending a request using the DescribeGameServerInstancesRequest method. +// req, resp := client.DescribeGameServerInstancesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues -func (c *GameLift) DescribeGameSessionQueuesRequest(input *DescribeGameSessionQueuesInput) (req *request.Request, output *DescribeGameSessionQueuesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameServerInstances +func (c *GameLift) DescribeGameServerInstancesRequest(input *DescribeGameServerInstancesInput) (req *request.Request, output *DescribeGameServerInstancesOutput) { op := &request.Operation{ - Name: opDescribeGameSessionQueues, + Name: opDescribeGameServerInstances, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &DescribeGameSessionQueuesInput{} + input = &DescribeGameServerInstancesInput{} } - output = &DescribeGameSessionQueuesOutput{} + output = &DescribeGameServerInstancesOutput{} req = c.newRequest(op, input, output) return } -// DescribeGameSessionQueues API operation for Amazon GameLift. +// DescribeGameServerInstances API operation for Amazon GameLift. // -// Retrieves the properties for one or more game session queues. When requesting -// multiple queues, use the pagination parameters to retrieve results as a set -// of sequential pages. If successful, a GameSessionQueue object is returned -// for each requested queue. When specifying a list of queues, objects are returned -// only for queues that currently exist in the Region. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// * CreateGameSessionQueue +// Retrieves status information about the Amazon EC2 instances associated with +// a GameLift FleetIQ game server group. Use this operation to detect when instances +// are active or not available to host new game servers. If you are looking +// for instance configuration information, call DescribeGameServerGroup or access +// the corresponding Auto Scaling group properties. // -// * DescribeGameSessionQueues +// To request status for all instances in the game server group, provide a game +// server group ID only. To request status for specific instances, provide the +// game server group ID and one or more instance IDs. Use the pagination parameters +// to retrieve results in sequential segments. If successful, a collection of +// GameServerInstance objects is returned. // -// * UpdateGameSessionQueue +// This operation is not designed to be called with every game server claim +// request; this practice can cause you to exceed your API limit, which results +// in errors. Instead, as a best practice, cache the results and refresh your +// cache no more than once every 10 seconds. // -// * DeleteGameSessionQueue +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * CreateGameServerGroup +// +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessionQueues for usage and error information. +// API operation DescribeGameServerInstances for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. @@ -4218,84 +5070,146 @@ func (c *GameLift) DescribeGameSessionQueuesRequest(input *DescribeGameSessionQu // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues -func (c *GameLift) DescribeGameSessionQueues(input *DescribeGameSessionQueuesInput) (*DescribeGameSessionQueuesOutput, error) { - req, out := c.DescribeGameSessionQueuesRequest(input) +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameServerInstances +func (c *GameLift) DescribeGameServerInstances(input *DescribeGameServerInstancesInput) (*DescribeGameServerInstancesOutput, error) { + req, out := c.DescribeGameServerInstancesRequest(input) return out, req.Send() } -// DescribeGameSessionQueuesWithContext is the same as DescribeGameSessionQueues with the addition of +// DescribeGameServerInstancesWithContext is the same as DescribeGameServerInstances with the addition of // the ability to pass a context and additional request options. // -// See DescribeGameSessionQueues for details on how to use this API operation. +// See DescribeGameServerInstances for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribeGameSessionQueuesWithContext(ctx aws.Context, input *DescribeGameSessionQueuesInput, opts ...request.Option) (*DescribeGameSessionQueuesOutput, error) { - req, out := c.DescribeGameSessionQueuesRequest(input) +func (c *GameLift) DescribeGameServerInstancesWithContext(ctx aws.Context, input *DescribeGameServerInstancesInput, opts ...request.Option) (*DescribeGameServerInstancesOutput, error) { + req, out := c.DescribeGameServerInstancesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeGameSessions = "DescribeGameSessions" +// DescribeGameServerInstancesPages iterates over the pages of a DescribeGameServerInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeGameServerInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeGameServerInstances operation. +// pageNum := 0 +// err := client.DescribeGameServerInstancesPages(params, +// func(page *gamelift.DescribeGameServerInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeGameServerInstancesPages(input *DescribeGameServerInstancesInput, fn func(*DescribeGameServerInstancesOutput, bool) bool) error { + return c.DescribeGameServerInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DescribeGameSessionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGameSessions operation. The "output" return +// DescribeGameServerInstancesPagesWithContext same as DescribeGameServerInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameServerInstancesPagesWithContext(ctx aws.Context, input *DescribeGameServerInstancesInput, fn func(*DescribeGameServerInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeGameServerInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeGameServerInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeGameServerInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeGameSessionDetails = "DescribeGameSessionDetails" + +// DescribeGameSessionDetailsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionDetails operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeGameSessions for more information on using the DescribeGameSessions +// See DescribeGameSessionDetails for more information on using the DescribeGameSessionDetails // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeGameSessionsRequest method. -// req, resp := client.DescribeGameSessionsRequest(params) +// // Example sending a request using the DescribeGameSessionDetailsRequest method. +// req, resp := client.DescribeGameSessionDetailsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions -func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) (req *request.Request, output *DescribeGameSessionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails +func (c *GameLift) DescribeGameSessionDetailsRequest(input *DescribeGameSessionDetailsInput) (req *request.Request, output *DescribeGameSessionDetailsOutput) { op := &request.Operation{ - Name: opDescribeGameSessions, + Name: opDescribeGameSessionDetails, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &DescribeGameSessionsInput{} + input = &DescribeGameSessionDetailsInput{} } - output = &DescribeGameSessionsOutput{} + output = &DescribeGameSessionDetailsOutput{} req = c.newRequest(op, input, output) return } -// DescribeGameSessions API operation for Amazon GameLift. -// -// Retrieves a set of one or more game sessions. Request a specific game session -// or request all game sessions on a fleet. Alternatively, use SearchGameSessions -// to request a set of active game sessions that are filtered by certain criteria. -// To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails. +// DescribeGameSessionDetails API operation for Amazon GameLift. // -// To get game sessions, specify one of the following: game session ID, fleet -// ID, or alias ID. You can filter this request by game session status. Use -// the pagination parameters to retrieve results as a set of sequential pages. -// If successful, a GameSession object is returned for each game session matching -// the request. +// Retrieves properties, including the protection policy in force, for one or +// more game sessions. This operation can be used in several ways: (1) provide +// a GameSessionId or GameSessionArn to request details for a specific game +// session; (2) provide either a FleetId or an AliasId to request properties +// for all game sessions running on a fleet. // -// Available in Amazon GameLift Local. +// To get game session record(s), specify just one of the following: game session +// ID, fleet ID, or alias ID. You can filter this request by game session status. +// Use the pagination parameters to retrieve results as a set of sequential +// pages. If successful, a GameSessionDetail object is returned for each session +// matching the request. // // * CreateGameSession // @@ -4317,7 +5231,7 @@ func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DescribeGameSessions for usage and error information. +// API operation DescribeGameSessionDetails for usage and error information. // // Returned Error Types: // * InternalServiceException @@ -4343,91 +5257,155 @@ func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) // Such requests should only be retried if the routing strategy for the specified // alias is modified. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions -func (c *GameLift) DescribeGameSessions(input *DescribeGameSessionsInput) (*DescribeGameSessionsOutput, error) { - req, out := c.DescribeGameSessionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionDetails +func (c *GameLift) DescribeGameSessionDetails(input *DescribeGameSessionDetailsInput) (*DescribeGameSessionDetailsOutput, error) { + req, out := c.DescribeGameSessionDetailsRequest(input) return out, req.Send() } -// DescribeGameSessionsWithContext is the same as DescribeGameSessions with the addition of +// DescribeGameSessionDetailsWithContext is the same as DescribeGameSessionDetails with the addition of // the ability to pass a context and additional request options. // -// See DescribeGameSessions for details on how to use this API operation. +// See DescribeGameSessionDetails for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribeGameSessionsWithContext(ctx aws.Context, input *DescribeGameSessionsInput, opts ...request.Option) (*DescribeGameSessionsOutput, error) { - req, out := c.DescribeGameSessionsRequest(input) +func (c *GameLift) DescribeGameSessionDetailsWithContext(ctx aws.Context, input *DescribeGameSessionDetailsInput, opts ...request.Option) (*DescribeGameSessionDetailsOutput, error) { + req, out := c.DescribeGameSessionDetailsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeInstances = "DescribeInstances" +// DescribeGameSessionDetailsPages iterates over the pages of a DescribeGameSessionDetails operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeGameSessionDetails method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeGameSessionDetails operation. +// pageNum := 0 +// err := client.DescribeGameSessionDetailsPages(params, +// func(page *gamelift.DescribeGameSessionDetailsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeGameSessionDetailsPages(input *DescribeGameSessionDetailsInput, fn func(*DescribeGameSessionDetailsOutput, bool) bool) error { + return c.DescribeGameSessionDetailsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DescribeInstancesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeInstances operation. The "output" return +// DescribeGameSessionDetailsPagesWithContext same as DescribeGameSessionDetailsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionDetailsPagesWithContext(ctx aws.Context, input *DescribeGameSessionDetailsInput, fn func(*DescribeGameSessionDetailsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeGameSessionDetailsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeGameSessionDetailsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeGameSessionDetailsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeGameSessionPlacement = "DescribeGameSessionPlacement" + +// DescribeGameSessionPlacementRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionPlacement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeInstances for more information on using the DescribeInstances +// See DescribeGameSessionPlacement for more information on using the DescribeGameSessionPlacement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeInstancesRequest method. -// req, resp := client.DescribeInstancesRequest(params) +// // Example sending a request using the DescribeGameSessionPlacementRequest method. +// req, resp := client.DescribeGameSessionPlacementRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances -func (c *GameLift) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement +func (c *GameLift) DescribeGameSessionPlacementRequest(input *DescribeGameSessionPlacementInput) (req *request.Request, output *DescribeGameSessionPlacementOutput) { op := &request.Operation{ - Name: opDescribeInstances, + Name: opDescribeGameSessionPlacement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeInstancesInput{} + input = &DescribeGameSessionPlacementInput{} } - output = &DescribeInstancesOutput{} + output = &DescribeGameSessionPlacementOutput{} req = c.newRequest(op, input, output) return } -// DescribeInstances API operation for Amazon GameLift. +// DescribeGameSessionPlacement API operation for Amazon GameLift. // -// Retrieves information about a fleet's instances, including instance IDs. -// Use this action to get details on all instances in the fleet or get details -// on one specific instance. +// Retrieves properties and current status of a game session placement request. +// To get game session placement details, specify the placement ID. If successful, +// a GameSessionPlacement object is returned. // -// To get a specific instance, specify fleet ID and instance ID. To get all -// instances in a fleet, specify a fleet ID only. Use the pagination parameters -// to retrieve results as a set of sequential pages. If successful, an Instance -// object is returned for each result. +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DescribeInstances for usage and error information. +// API operation DescribeGameSessionPlacement for usage and error information. // // Returned Error Types: -// * UnauthorizedException -// The client failed authentication. Clients should not retry such requests. +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. // // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid @@ -4437,202 +5415,731 @@ func (c *GameLift) DescribeInstancesRequest(input *DescribeInstancesInput) (req // A service resource associated with the request could not be found. Clients // should not retry such requests. // -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances -func (c *GameLift) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { - req, out := c.DescribeInstancesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionPlacement +func (c *GameLift) DescribeGameSessionPlacement(input *DescribeGameSessionPlacementInput) (*DescribeGameSessionPlacementOutput, error) { + req, out := c.DescribeGameSessionPlacementRequest(input) return out, req.Send() } -// DescribeInstancesWithContext is the same as DescribeInstances with the addition of +// DescribeGameSessionPlacementWithContext is the same as DescribeGameSessionPlacement with the addition of // the ability to pass a context and additional request options. // -// See DescribeInstances for details on how to use this API operation. +// See DescribeGameSessionPlacement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.Option) (*DescribeInstancesOutput, error) { - req, out := c.DescribeInstancesRequest(input) +func (c *GameLift) DescribeGameSessionPlacementWithContext(ctx aws.Context, input *DescribeGameSessionPlacementInput, opts ...request.Option) (*DescribeGameSessionPlacementOutput, error) { + req, out := c.DescribeGameSessionPlacementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeMatchmaking = "DescribeMatchmaking" +const opDescribeGameSessionQueues = "DescribeGameSessionQueues" -// DescribeMatchmakingRequest generates a "aws/request.Request" representing the -// client's request for the DescribeMatchmaking operation. The "output" return +// DescribeGameSessionQueuesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessionQueues operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeMatchmaking for more information on using the DescribeMatchmaking +// See DescribeGameSessionQueues for more information on using the DescribeGameSessionQueues // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeMatchmakingRequest method. -// req, resp := client.DescribeMatchmakingRequest(params) +// // Example sending a request using the DescribeGameSessionQueuesRequest method. +// req, resp := client.DescribeGameSessionQueuesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking -func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) (req *request.Request, output *DescribeMatchmakingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues +func (c *GameLift) DescribeGameSessionQueuesRequest(input *DescribeGameSessionQueuesInput) (req *request.Request, output *DescribeGameSessionQueuesOutput) { op := &request.Operation{ - Name: opDescribeMatchmaking, + Name: opDescribeGameSessionQueues, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &DescribeMatchmakingInput{} + input = &DescribeGameSessionQueuesInput{} } - output = &DescribeMatchmakingOutput{} + output = &DescribeGameSessionQueuesOutput{} req = c.newRequest(op, input, output) return } -// DescribeMatchmaking API operation for Amazon GameLift. -// -// Retrieves one or more matchmaking tickets. Use this operation to retrieve -// ticket information, including status and--once a successful match is made--acquire -// connection information for the resulting new game session. -// -// You can use this operation to track the progress of matchmaking requests -// (through polling) as an alternative to using event notifications. See more -// details on tracking matchmaking requests through polling or notifications -// in StartMatchmaking. +// DescribeGameSessionQueues API operation for Amazon GameLift. // -// To request matchmaking tickets, provide a list of up to 10 ticket IDs. If -// the request is successful, a ticket object is returned for each requested -// ID that currently exists. +// Retrieves the properties for one or more game session queues. When requesting +// multiple queues, use the pagination parameters to retrieve results as a set +// of sequential pages. If successful, a GameSessionQueue object is returned +// for each requested queue. When specifying a list of queues, objects are returned +// only for queues that currently exist in the Region. // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) -// -// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// View Your Queues (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-console.html) // // Related operations // -// * StartMatchmaking -// -// * DescribeMatchmaking +// * CreateGameSessionQueue // -// * StopMatchmaking +// * DescribeGameSessionQueues // -// * AcceptMatch +// * UpdateGameSessionQueue // -// * StartMatchBackfill +// * DeleteGameSessionQueue // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation DescribeMatchmaking for usage and error information. +// API operation DescribeGameSessionQueues for usage and error information. // // Returned Error Types: -// * InvalidRequestException -// One or more parameter values in the request are invalid. Correct the invalid -// parameter values before retrying. -// // * InternalServiceException // The service encountered an unrecoverable internal failure while processing // the request. Clients can retry such requests immediately or after a waiting // period. // -// * UnsupportedRegionException -// The requested operation is not supported in the Region specified. +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking -func (c *GameLift) DescribeMatchmaking(input *DescribeMatchmakingInput) (*DescribeMatchmakingOutput, error) { - req, out := c.DescribeMatchmakingRequest(input) +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessionQueues +func (c *GameLift) DescribeGameSessionQueues(input *DescribeGameSessionQueuesInput) (*DescribeGameSessionQueuesOutput, error) { + req, out := c.DescribeGameSessionQueuesRequest(input) return out, req.Send() } -// DescribeMatchmakingWithContext is the same as DescribeMatchmaking with the addition of +// DescribeGameSessionQueuesWithContext is the same as DescribeGameSessionQueues with the addition of // the ability to pass a context and additional request options. // -// See DescribeMatchmaking for details on how to use this API operation. +// See DescribeGameSessionQueues for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribeMatchmakingWithContext(ctx aws.Context, input *DescribeMatchmakingInput, opts ...request.Option) (*DescribeMatchmakingOutput, error) { - req, out := c.DescribeMatchmakingRequest(input) +func (c *GameLift) DescribeGameSessionQueuesWithContext(ctx aws.Context, input *DescribeGameSessionQueuesInput, opts ...request.Option) (*DescribeGameSessionQueuesOutput, error) { + req, out := c.DescribeGameSessionQueuesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeMatchmakingConfigurations = "DescribeMatchmakingConfigurations" +// DescribeGameSessionQueuesPages iterates over the pages of a DescribeGameSessionQueues operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeGameSessionQueues method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeGameSessionQueues operation. +// pageNum := 0 +// err := client.DescribeGameSessionQueuesPages(params, +// func(page *gamelift.DescribeGameSessionQueuesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeGameSessionQueuesPages(input *DescribeGameSessionQueuesInput, fn func(*DescribeGameSessionQueuesOutput, bool) bool) error { + return c.DescribeGameSessionQueuesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// DescribeMatchmakingConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeMatchmakingConfigurations operation. The "output" return +// DescribeGameSessionQueuesPagesWithContext same as DescribeGameSessionQueuesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionQueuesPagesWithContext(ctx aws.Context, input *DescribeGameSessionQueuesInput, fn func(*DescribeGameSessionQueuesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeGameSessionQueuesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeGameSessionQueuesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeGameSessionQueuesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeGameSessions = "DescribeGameSessions" + +// DescribeGameSessionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGameSessions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeMatchmakingConfigurations for more information on using the DescribeMatchmakingConfigurations +// See DescribeGameSessions for more information on using the DescribeGameSessions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeMatchmakingConfigurationsRequest method. -// req, resp := client.DescribeMatchmakingConfigurationsRequest(params) +// // Example sending a request using the DescribeGameSessionsRequest method. +// req, resp := client.DescribeGameSessionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurations -func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatchmakingConfigurationsInput) (req *request.Request, output *DescribeMatchmakingConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions +func (c *GameLift) DescribeGameSessionsRequest(input *DescribeGameSessionsInput) (req *request.Request, output *DescribeGameSessionsOutput) { op := &request.Operation{ - Name: opDescribeMatchmakingConfigurations, + Name: opDescribeGameSessions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &DescribeMatchmakingConfigurationsInput{} + input = &DescribeGameSessionsInput{} } - output = &DescribeMatchmakingConfigurationsOutput{} + output = &DescribeGameSessionsOutput{} req = c.newRequest(op, input, output) return } -// DescribeMatchmakingConfigurations API operation for Amazon GameLift. -// -// Retrieves the details of FlexMatch matchmaking configurations. With this -// operation, you have the following options: (1) retrieve all existing configurations, -// (2) provide the names of one or more configurations to retrieve, or (3) retrieve -// all configurations that use a specified rule set name. When requesting multiple -// items, use the pagination parameters to retrieve results as a set of sequential -// pages. If successful, a configuration is returned for each requested name. -// When specifying a list of names, only configurations that currently exist -// are returned. +// DescribeGameSessions API operation for Amazon GameLift. +// +// Retrieves a set of one or more game sessions. Request a specific game session +// or request all game sessions on a fleet. Alternatively, use SearchGameSessions +// to request a set of active game sessions that are filtered by certain criteria. +// To retrieve protection policy settings for game sessions, use DescribeGameSessionDetails. +// +// To get game sessions, specify one of the following: game session ID, fleet +// ID, or alias ID. You can filter this request by game session status. Use +// the pagination parameters to retrieve results as a set of sequential pages. +// If successful, a GameSession object is returned for each game session matching +// the request. +// +// Available in Amazon GameLift Local. +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeGameSessions for usage and error information. +// +// Returned Error Types: +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * TerminalRoutingStrategyException +// The service is unable to resolve the routing for a particular alias because +// it has a terminal RoutingStrategy associated with it. The message returned +// in this exception is the message defined in the routing strategy itself. +// Such requests should only be retried if the routing strategy for the specified +// alias is modified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeGameSessions +func (c *GameLift) DescribeGameSessions(input *DescribeGameSessionsInput) (*DescribeGameSessionsOutput, error) { + req, out := c.DescribeGameSessionsRequest(input) + return out, req.Send() +} + +// DescribeGameSessionsWithContext is the same as DescribeGameSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGameSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionsWithContext(ctx aws.Context, input *DescribeGameSessionsInput, opts ...request.Option) (*DescribeGameSessionsOutput, error) { + req, out := c.DescribeGameSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeGameSessionsPages iterates over the pages of a DescribeGameSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeGameSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeGameSessions operation. +// pageNum := 0 +// err := client.DescribeGameSessionsPages(params, +// func(page *gamelift.DescribeGameSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeGameSessionsPages(input *DescribeGameSessionsInput, fn func(*DescribeGameSessionsOutput, bool) bool) error { + return c.DescribeGameSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeGameSessionsPagesWithContext same as DescribeGameSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeGameSessionsPagesWithContext(ctx aws.Context, input *DescribeGameSessionsInput, fn func(*DescribeGameSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeGameSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeGameSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeGameSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeInstances = "DescribeInstances" + +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstances for more information on using the DescribeInstances +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances +func (c *GameLift) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { + op := &request.Operation{ + Name: opDescribeInstances, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstancesInput{} + } + + output = &DescribeInstancesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstances API operation for Amazon GameLift. +// +// Retrieves information about a fleet's instances, including instance IDs. +// Use this operation to get details on all instances in the fleet or get details +// on one specific instance. +// +// To get a specific instance, specify fleet ID and instance ID. To get all +// instances in a fleet, specify a fleet ID only. Use the pagination parameters +// to retrieve results as a set of sequential pages. If successful, an Instance +// object is returned for each result. +// +// Learn more +// +// Remotely Access Fleet Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html) +// +// Debug Fleet Issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html) +// +// Related operations +// +// * DescribeInstances +// +// * GetInstanceAccess +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeInstances for usage and error information. +// +// Returned Error Types: +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeInstances +func (c *GameLift) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + return out, req.Send() +} + +// DescribeInstancesWithContext is the same as DescribeInstances with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstances for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeInstancesWithContext(ctx aws.Context, input *DescribeInstancesInput, opts ...request.Option) (*DescribeInstancesOutput, error) { + req, out := c.DescribeInstancesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstancesPages iterates over the pages of a DescribeInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstances operation. +// pageNum := 0 +// err := client.DescribeInstancesPages(params, +// func(page *gamelift.DescribeInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeInstancesPages(input *DescribeInstancesInput, fn func(*DescribeInstancesOutput, bool) bool) error { + return c.DescribeInstancesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancesPagesWithContext same as DescribeInstancesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeInstancesPagesWithContext(ctx aws.Context, input *DescribeInstancesInput, fn func(*DescribeInstancesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opDescribeMatchmaking = "DescribeMatchmaking" + +// DescribeMatchmakingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMatchmaking operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMatchmaking for more information on using the DescribeMatchmaking +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMatchmakingRequest method. +// req, resp := client.DescribeMatchmakingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking +func (c *GameLift) DescribeMatchmakingRequest(input *DescribeMatchmakingInput) (req *request.Request, output *DescribeMatchmakingOutput) { + op := &request.Operation{ + Name: opDescribeMatchmaking, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeMatchmakingInput{} + } + + output = &DescribeMatchmakingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMatchmaking API operation for Amazon GameLift. +// +// Retrieves one or more matchmaking tickets. Use this operation to retrieve +// ticket information, including--after a successful match is made--connection +// information for the resulting new game session. +// +// To request matchmaking tickets, provide a list of up to 10 ticket IDs. If +// the request is successful, a ticket object is returned for each requested +// ID that currently exists. +// +// This operation is not designed to be continually called to track matchmaking +// ticket status. This practice can cause you to exceed your API limit, which +// results in errors. Instead, as a best practice, set up an Amazon Simple Notification +// Service (SNS) to receive notifications, and provide the topic ARN in the +// matchmaking configuration. Continuously poling ticket status with DescribeMatchmaking +// should only be used for games in development with low matchmaking usage. +// +// Learn more +// +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// +// Set Up FlexMatch Event Notification (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-notification.html) +// +// Related operations +// +// * StartMatchmaking +// +// * DescribeMatchmaking +// +// * StopMatchmaking +// +// * AcceptMatch +// +// * StartMatchBackfill +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation DescribeMatchmaking for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * UnsupportedRegionException +// The requested operation is not supported in the Region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmaking +func (c *GameLift) DescribeMatchmaking(input *DescribeMatchmakingInput) (*DescribeMatchmakingOutput, error) { + req, out := c.DescribeMatchmakingRequest(input) + return out, req.Send() +} + +// DescribeMatchmakingWithContext is the same as DescribeMatchmaking with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMatchmaking for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeMatchmakingWithContext(ctx aws.Context, input *DescribeMatchmakingInput, opts ...request.Option) (*DescribeMatchmakingOutput, error) { + req, out := c.DescribeMatchmakingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeMatchmakingConfigurations = "DescribeMatchmakingConfigurations" + +// DescribeMatchmakingConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMatchmakingConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMatchmakingConfigurations for more information on using the DescribeMatchmakingConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMatchmakingConfigurationsRequest method. +// req, resp := client.DescribeMatchmakingConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/DescribeMatchmakingConfigurations +func (c *GameLift) DescribeMatchmakingConfigurationsRequest(input *DescribeMatchmakingConfigurationsInput) (req *request.Request, output *DescribeMatchmakingConfigurationsOutput) { + op := &request.Operation{ + Name: opDescribeMatchmakingConfigurations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeMatchmakingConfigurationsInput{} + } + + output = &DescribeMatchmakingConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMatchmakingConfigurations API operation for Amazon GameLift. +// +// Retrieves the details of FlexMatch matchmaking configurations. +// +// This operation offers the following options: (1) retrieve all matchmaking +// configurations, (2) retrieve configurations for a specified list, or (3) +// retrieve all configurations that use a specified rule set name. When requesting +// multiple items, use the pagination parameters to retrieve results as a set +// of sequential pages. +// +// If successful, a configuration is returned for each requested name. When +// specifying a list of names, only configurations that currently exist are +// returned. // // Learn more // @@ -4698,6 +6205,58 @@ func (c *GameLift) DescribeMatchmakingConfigurationsWithContext(ctx aws.Context, return out, req.Send() } +// DescribeMatchmakingConfigurationsPages iterates over the pages of a DescribeMatchmakingConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMatchmakingConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMatchmakingConfigurations operation. +// pageNum := 0 +// err := client.DescribeMatchmakingConfigurationsPages(params, +// func(page *gamelift.DescribeMatchmakingConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeMatchmakingConfigurationsPages(input *DescribeMatchmakingConfigurationsInput, fn func(*DescribeMatchmakingConfigurationsOutput, bool) bool) error { + return c.DescribeMatchmakingConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMatchmakingConfigurationsPagesWithContext same as DescribeMatchmakingConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeMatchmakingConfigurationsPagesWithContext(ctx aws.Context, input *DescribeMatchmakingConfigurationsInput, fn func(*DescribeMatchmakingConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMatchmakingConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMatchmakingConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMatchmakingConfigurationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMatchmakingRuleSets = "DescribeMatchmakingRuleSets" // DescribeMatchmakingRuleSetsRequest generates a "aws/request.Request" representing the @@ -4729,6 +6288,12 @@ func (c *GameLift) DescribeMatchmakingRuleSetsRequest(input *DescribeMatchmaking Name: opDescribeMatchmakingRuleSets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -4816,6 +6381,58 @@ func (c *GameLift) DescribeMatchmakingRuleSetsWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeMatchmakingRuleSetsPages iterates over the pages of a DescribeMatchmakingRuleSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMatchmakingRuleSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMatchmakingRuleSets operation. +// pageNum := 0 +// err := client.DescribeMatchmakingRuleSetsPages(params, +// func(page *gamelift.DescribeMatchmakingRuleSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeMatchmakingRuleSetsPages(input *DescribeMatchmakingRuleSetsInput, fn func(*DescribeMatchmakingRuleSetsOutput, bool) bool) error { + return c.DescribeMatchmakingRuleSetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMatchmakingRuleSetsPagesWithContext same as DescribeMatchmakingRuleSetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeMatchmakingRuleSetsPagesWithContext(ctx aws.Context, input *DescribeMatchmakingRuleSetsInput, fn func(*DescribeMatchmakingRuleSetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMatchmakingRuleSetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMatchmakingRuleSetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMatchmakingRuleSetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribePlayerSessions = "DescribePlayerSessions" // DescribePlayerSessionsRequest generates a "aws/request.Request" representing the @@ -4847,6 +6464,12 @@ func (c *GameLift) DescribePlayerSessionsRequest(input *DescribePlayerSessionsIn Name: opDescribePlayerSessions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -4860,8 +6483,8 @@ func (c *GameLift) DescribePlayerSessionsRequest(input *DescribePlayerSessionsIn // DescribePlayerSessions API operation for Amazon GameLift. // -// Retrieves properties for one or more player sessions. This action can be -// used in several ways: (1) provide a PlayerSessionId to request properties +// Retrieves properties for one or more player sessions. This operation can +// be used in several ways: (1) provide a PlayerSessionId to request properties // for a specific player session; (2) provide a GameSessionId to request properties // for all player sessions in the specified game session; (3) provide a PlayerId // to request properties for all player sessions of a specified player. @@ -4913,20 +6536,72 @@ func (c *GameLift) DescribePlayerSessions(input *DescribePlayerSessionsInput) (* return out, req.Send() } -// DescribePlayerSessionsWithContext is the same as DescribePlayerSessions with the addition of -// the ability to pass a context and additional request options. -// -// See DescribePlayerSessions for details on how to use this API operation. +// DescribePlayerSessionsWithContext is the same as DescribePlayerSessions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribePlayerSessions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribePlayerSessionsWithContext(ctx aws.Context, input *DescribePlayerSessionsInput, opts ...request.Option) (*DescribePlayerSessionsOutput, error) { + req, out := c.DescribePlayerSessionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribePlayerSessionsPages iterates over the pages of a DescribePlayerSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePlayerSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePlayerSessions operation. +// pageNum := 0 +// err := client.DescribePlayerSessionsPages(params, +// func(page *gamelift.DescribePlayerSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribePlayerSessionsPages(input *DescribePlayerSessionsInput, fn func(*DescribePlayerSessionsOutput, bool) bool) error { + return c.DescribePlayerSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePlayerSessionsPagesWithContext same as DescribePlayerSessionsPages except +// it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) DescribePlayerSessionsWithContext(ctx aws.Context, input *DescribePlayerSessionsInput, opts ...request.Option) (*DescribePlayerSessionsOutput, error) { - req, out := c.DescribePlayerSessionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() +func (c *GameLift) DescribePlayerSessionsPagesWithContext(ctx aws.Context, input *DescribePlayerSessionsInput, fn func(*DescribePlayerSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePlayerSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePlayerSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePlayerSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() } const opDescribeRuntimeConfiguration = "DescribeRuntimeConfiguration" @@ -4973,13 +6648,19 @@ func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeCon // DescribeRuntimeConfiguration API operation for Amazon GameLift. // -// Retrieves the current runtime configuration for the specified fleet. The -// runtime configuration tells Amazon GameLift how to launch server processes -// on instances in the fleet. +// Retrieves a fleet's runtime configuration settings. The runtime configuration +// tells Amazon GameLift which server processes to run (and how) on each instance +// in the fleet. +// +// To get a runtime configuration, specify the fleet's unique identifier. If +// successful, a RuntimeConfiguration object is returned for the requested fleet. +// If the requested fleet has been deleted, the result set is empty. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) +// +// Running Multiple Processes on a Fleet (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-multiprocess.html) // // Related operations // @@ -4995,7 +6676,7 @@ func (c *GameLift) DescribeRuntimeConfigurationRequest(input *DescribeRuntimeCon // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5074,6 +6755,12 @@ func (c *GameLift) DescribeScalingPoliciesRequest(input *DescribeScalingPolicies Name: opDescribeScalingPolicies, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -5095,9 +6782,9 @@ func (c *GameLift) DescribeScalingPoliciesRequest(input *DescribeScalingPolicies // pages. If successful, set of ScalingPolicy objects is returned for the fleet. // // A fleet may have all of its scaling policies suspended (StopFleetActions). -// This action does not affect the status of the scaling policies, which remains -// ACTIVE. To see whether a fleet's scaling policies are in force or suspended, -// call DescribeFleetAttributes and check the stopped actions. +// This operation does not affect the status of the scaling policies, which +// remains ACTIVE. To see whether a fleet's scaling policies are in force or +// suspended, call DescribeFleetAttributes and check the stopped actions. // // * DescribeFleetCapacity // @@ -5156,6 +6843,58 @@ func (c *GameLift) DescribeScalingPoliciesWithContext(ctx aws.Context, input *De return out, req.Send() } +// DescribeScalingPoliciesPages iterates over the pages of a DescribeScalingPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScalingPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScalingPolicies operation. +// pageNum := 0 +// err := client.DescribeScalingPoliciesPages(params, +// func(page *gamelift.DescribeScalingPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) DescribeScalingPoliciesPages(input *DescribeScalingPoliciesInput, fn func(*DescribeScalingPoliciesOutput, bool) bool) error { + return c.DescribeScalingPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeScalingPoliciesPagesWithContext same as DescribeScalingPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) DescribeScalingPoliciesPagesWithContext(ctx aws.Context, input *DescribeScalingPoliciesInput, fn func(*DescribeScalingPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeScalingPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeScalingPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeScalingPoliciesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeScript = "DescribeScript" // DescribeScriptRequest generates a "aws/request.Request" representing the @@ -5490,81 +7229,306 @@ const opGetGameSessionLogUrl = "GetGameSessionLogUrl" // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetGameSessionLogUrl for more information on using the GetGameSessionLogUrl +// See GetGameSessionLogUrl for more information on using the GetGameSessionLogUrl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGameSessionLogUrlRequest method. +// req, resp := client.GetGameSessionLogUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl +func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) (req *request.Request, output *GetGameSessionLogUrlOutput) { + op := &request.Operation{ + Name: opGetGameSessionLogUrl, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGameSessionLogUrlInput{} + } + + output = &GetGameSessionLogUrlOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGameSessionLogUrl API operation for Amazon GameLift. +// +// Retrieves the location of stored game session logs for a specified game session. +// When a game session is terminated, Amazon GameLift automatically stores the +// logs in Amazon S3 and retains them for 14 days. Use this URL to download +// the logs. +// +// See the AWS Service Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift) +// page for maximum log file sizes. Log files that exceed this limit are not +// saved. +// +// * CreateGameSession +// +// * DescribeGameSessions +// +// * DescribeGameSessionDetails +// +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation GetGameSessionLogUrl for usage and error information. +// +// Returned Error Types: +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl +func (c *GameLift) GetGameSessionLogUrl(input *GetGameSessionLogUrlInput) (*GetGameSessionLogUrlOutput, error) { + req, out := c.GetGameSessionLogUrlRequest(input) + return out, req.Send() +} + +// GetGameSessionLogUrlWithContext is the same as GetGameSessionLogUrl with the addition of +// the ability to pass a context and additional request options. +// +// See GetGameSessionLogUrl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) GetGameSessionLogUrlWithContext(ctx aws.Context, input *GetGameSessionLogUrlInput, opts ...request.Option) (*GetGameSessionLogUrlOutput, error) { + req, out := c.GetGameSessionLogUrlRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetInstanceAccess = "GetInstanceAccess" + +// GetInstanceAccessRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceAccess operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInstanceAccess for more information on using the GetInstanceAccess +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInstanceAccessRequest method. +// req, resp := client.GetInstanceAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess +func (c *GameLift) GetInstanceAccessRequest(input *GetInstanceAccessInput) (req *request.Request, output *GetInstanceAccessOutput) { + op := &request.Operation{ + Name: opGetInstanceAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceAccessInput{} + } + + output = &GetInstanceAccessOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInstanceAccess API operation for Amazon GameLift. +// +// Requests remote access to a fleet instance. Remote access is useful for debugging, +// gathering benchmarking data, or observing activity in real time. +// +// To remotely access an instance, you need credentials that match the operating +// system of the instance. For a Windows instance, Amazon GameLift returns a +// user name and password as strings for use with a Windows Remote Desktop client. +// For a Linux instance, Amazon GameLift returns a user name and RSA private +// key, also as strings, for use with an SSH client. The private key must be +// saved in the proper format to a .pem file before using. If you're making +// this request using the AWS CLI, saving the secret can be handled as part +// of the GetInstanceAccess request, as shown in one of the examples for this +// operation. +// +// To request access to a specific instance, specify the IDs of both the instance +// and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling +// DescribeInstances. If successful, an InstanceAccess object is returned that +// contains the instance's IP address and a set of credentials. +// +// Learn more +// +// Remotely Access Fleet Instances (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html) +// +// Debug Fleet Issues (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-creating-debug.html) +// +// Related operations +// +// * DescribeInstances +// +// * GetInstanceAccess +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation GetInstanceAccess for usage and error information. +// +// Returned Error Types: +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess +func (c *GameLift) GetInstanceAccess(input *GetInstanceAccessInput) (*GetInstanceAccessOutput, error) { + req, out := c.GetInstanceAccessRequest(input) + return out, req.Send() +} + +// GetInstanceAccessWithContext is the same as GetInstanceAccess with the addition of +// the ability to pass a context and additional request options. +// +// See GetInstanceAccess for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) GetInstanceAccessWithContext(ctx aws.Context, input *GetInstanceAccessInput, opts ...request.Option) (*GetInstanceAccessOutput, error) { + req, out := c.GetInstanceAccessRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAliases = "ListAliases" + +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAliases for more information on using the ListAliases // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetGameSessionLogUrlRequest method. -// req, resp := client.GetGameSessionLogUrlRequest(params) +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl -func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) (req *request.Request, output *GetGameSessionLogUrlOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases +func (c *GameLift) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { op := &request.Operation{ - Name: opGetGameSessionLogUrl, + Name: opListAliases, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &GetGameSessionLogUrlInput{} + input = &ListAliasesInput{} } - output = &GetGameSessionLogUrlOutput{} + output = &ListAliasesOutput{} req = c.newRequest(op, input, output) return } -// GetGameSessionLogUrl API operation for Amazon GameLift. -// -// Retrieves the location of stored game session logs for a specified game session. -// When a game session is terminated, Amazon GameLift automatically stores the -// logs in Amazon S3 and retains them for 14 days. Use this URL to download -// the logs. +// ListAliases API operation for Amazon GameLift. // -// See the AWS Service Limits (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html#limits_gamelift) -// page for maximum log file sizes. Log files that exceed this limit are not -// saved. +// Retrieves all aliases for this AWS account. You can filter the result set +// by alias name and/or routing strategy type. Use the pagination parameters +// to retrieve results in sequential pages. // -// * CreateGameSession +// Returned aliases are not listed in any particular order. // -// * DescribeGameSessions +// * CreateAlias // -// * DescribeGameSessionDetails +// * ListAliases // -// * SearchGameSessions +// * DescribeAlias // -// * UpdateGameSession +// * UpdateAlias // -// * GetGameSessionLogUrl +// * DeleteAlias // -// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement -// StopGameSessionPlacement +// * ResolveAlias // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation GetGameSessionLogUrl for usage and error information. +// API operation ListAliases for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * NotFoundException -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // @@ -5572,96 +7536,164 @@ func (c *GameLift) GetGameSessionLogUrlRequest(input *GetGameSessionLogUrlInput) // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetGameSessionLogUrl -func (c *GameLift) GetGameSessionLogUrl(input *GetGameSessionLogUrlInput) (*GetGameSessionLogUrlOutput, error) { - req, out := c.GetGameSessionLogUrlRequest(input) +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases +func (c *GameLift) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) return out, req.Send() } -// GetGameSessionLogUrlWithContext is the same as GetGameSessionLogUrl with the addition of +// ListAliasesWithContext is the same as ListAliases with the addition of // the ability to pass a context and additional request options. // -// See GetGameSessionLogUrl for details on how to use this API operation. +// See ListAliases for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) GetGameSessionLogUrlWithContext(ctx aws.Context, input *GetGameSessionLogUrlInput, opts ...request.Option) (*GetGameSessionLogUrlOutput, error) { - req, out := c.GetGameSessionLogUrlRequest(input) +func (c *GameLift) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { + req, out := c.ListAliasesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstanceAccess = "GetInstanceAccess" +// ListAliasesPages iterates over the pages of a ListAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAliases operation. +// pageNum := 0 +// err := client.ListAliasesPages(params, +// func(page *gamelift.ListAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) ListAliasesPages(input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool) error { + return c.ListAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// GetInstanceAccessRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceAccess operation. The "output" return +// ListAliasesPagesWithContext same as ListAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListAliasesPagesWithContext(ctx aws.Context, input *ListAliasesInput, fn func(*ListAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAliasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListBuilds = "ListBuilds" + +// ListBuildsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuilds operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstanceAccess for more information on using the GetInstanceAccess +// See ListBuilds for more information on using the ListBuilds // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceAccessRequest method. -// req, resp := client.GetInstanceAccessRequest(params) +// // Example sending a request using the ListBuildsRequest method. +// req, resp := client.ListBuildsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess -func (c *GameLift) GetInstanceAccessRequest(input *GetInstanceAccessInput) (req *request.Request, output *GetInstanceAccessOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds +func (c *GameLift) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { op := &request.Operation{ - Name: opGetInstanceAccess, + Name: opListBuilds, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &GetInstanceAccessInput{} + input = &ListBuildsInput{} } - output = &GetInstanceAccessOutput{} + output = &ListBuildsOutput{} req = c.newRequest(op, input, output) return } -// GetInstanceAccess API operation for Amazon GameLift. +// ListBuilds API operation for Amazon GameLift. // -// Requests remote access to a fleet instance. Remote access is useful for debugging, -// gathering benchmarking data, or watching activity in real time. -// -// Access requires credentials that match the operating system of the instance. -// For a Windows instance, Amazon GameLift returns a user name and password -// as strings for use with a Windows Remote Desktop client. For a Linux instance, -// Amazon GameLift returns a user name and RSA private key, also as strings, -// for use with an SSH client. The private key must be saved in the proper format -// to a .pem file before using. If you're making this request using the AWS -// CLI, saving the secret can be handled as part of the GetInstanceAccess request. -// (See the example later in this topic). For more information on remote access, -// see Remotely Accessing an Instance (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-remote-access.html). +// Retrieves build resources for all builds associated with the AWS account +// in use. You can limit results to builds that are in a specific status by +// using the Status parameter. Use the pagination parameters to retrieve results +// in a set of sequential pages. // -// To request access to a specific instance, specify the IDs of both the instance -// and the fleet it belongs to. You can retrieve a fleet's instance IDs by calling -// DescribeInstances. If successful, an InstanceAccess object is returned containing -// the instance's IP address and a set of credentials. +// Build resources are not listed in any particular order. +// +// Learn more +// +// Upload a Custom Server Build (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) +// +// Related operations +// +// * CreateBuild +// +// * ListBuilds +// +// * DescribeBuild +// +// * UpdateBuild +// +// * DeleteBuild // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation GetInstanceAccess for usage and error information. +// API operation ListBuilds for usage and error information. // // Returned Error Types: // * UnauthorizedException @@ -5671,336 +7703,515 @@ func (c *GameLift) GetInstanceAccessRequest(input *GetInstanceAccessInput) (req // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. // -// * NotFoundException -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// // * InternalServiceException // The service encountered an unrecoverable internal failure while processing // the request. Clients can retry such requests immediately or after a waiting // period. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/GetInstanceAccess -func (c *GameLift) GetInstanceAccess(input *GetInstanceAccessInput) (*GetInstanceAccessOutput, error) { - req, out := c.GetInstanceAccessRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds +func (c *GameLift) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) return out, req.Send() } -// GetInstanceAccessWithContext is the same as GetInstanceAccess with the addition of +// ListBuildsWithContext is the same as ListBuilds with the addition of // the ability to pass a context and additional request options. // -// See GetInstanceAccess for details on how to use this API operation. +// See ListBuilds for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) GetInstanceAccessWithContext(ctx aws.Context, input *GetInstanceAccessInput, opts ...request.Option) (*GetInstanceAccessOutput, error) { - req, out := c.GetInstanceAccessRequest(input) +func (c *GameLift) ListBuildsWithContext(ctx aws.Context, input *ListBuildsInput, opts ...request.Option) (*ListBuildsOutput, error) { + req, out := c.ListBuildsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListAliases = "ListAliases" +// ListBuildsPages iterates over the pages of a ListBuilds operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBuilds method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBuilds operation. +// pageNum := 0 +// err := client.ListBuildsPages(params, +// func(page *gamelift.ListBuildsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) ListBuildsPages(input *ListBuildsInput, fn func(*ListBuildsOutput, bool) bool) error { + return c.ListBuildsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ListAliasesRequest generates a "aws/request.Request" representing the -// client's request for the ListAliases operation. The "output" return +// ListBuildsPagesWithContext same as ListBuildsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListBuildsPagesWithContext(ctx aws.Context, input *ListBuildsInput, fn func(*ListBuildsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBuildsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBuildsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBuildsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListFleets = "ListFleets" + +// ListFleetsRequest generates a "aws/request.Request" representing the +// client's request for the ListFleets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListAliases for more information on using the ListAliases +// See ListFleets for more information on using the ListFleets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListAliasesRequest method. -// req, resp := client.ListAliasesRequest(params) +// // Example sending a request using the ListFleetsRequest method. +// req, resp := client.ListFleetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases -func (c *GameLift) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets +func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Request, output *ListFleetsOutput) { op := &request.Operation{ - Name: opListAliases, + Name: opListFleets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &ListAliasesInput{} + input = &ListFleetsInput{} } - output = &ListAliasesOutput{} + output = &ListFleetsOutput{} req = c.newRequest(op, input, output) return } -// ListAliases API operation for Amazon GameLift. +// ListFleets API operation for Amazon GameLift. // -// Retrieves all aliases for this AWS account. You can filter the result set -// by alias name and/or routing strategy type. Use the pagination parameters -// to retrieve results in sequential pages. +// Retrieves a collection of fleet resources for this AWS account. You can filter +// the result set to find only those fleets that are deployed with a specific +// build or script. Use the pagination parameters to retrieve results in sequential +// pages. // -// Returned aliases are not listed in any particular order. +// Fleet resources are not listed in a particular order. // -// * CreateAlias +// Learn more // -// * ListAliases +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // -// * DescribeAlias +// Related operations // -// * UpdateAlias +// * CreateFleet // -// * DeleteAlias +// * ListFleets // -// * ResolveAlias +// * DeleteFleet +// +// * DescribeFleetAttributes +// +// * UpdateFleetAttributes +// +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation ListAliases for usage and error information. +// API operation ListFleets for usage and error information. // // Returned Error Types: -// * UnauthorizedException -// The client failed authentication. Clients should not retry such requests. +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. // // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. // -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListAliases -func (c *GameLift) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets +func (c *GameLift) ListFleets(input *ListFleetsInput) (*ListFleetsOutput, error) { + req, out := c.ListFleetsRequest(input) + return out, req.Send() +} + +// ListFleetsWithContext is the same as ListFleets with the addition of +// the ability to pass a context and additional request options. +// +// See ListFleets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListFleetsWithContext(ctx aws.Context, input *ListFleetsInput, opts ...request.Option) (*ListFleetsOutput, error) { + req, out := c.ListFleetsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) return out, req.Send() } -// ListAliasesWithContext is the same as ListAliases with the addition of -// the ability to pass a context and additional request options. +// ListFleetsPages iterates over the pages of a ListFleets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// See ListAliases for details on how to use this API operation. +// See ListFleets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFleets operation. +// pageNum := 0 +// err := client.ListFleetsPages(params, +// func(page *gamelift.ListFleetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) ListFleetsPages(input *ListFleetsInput, fn func(*ListFleetsOutput, bool) bool) error { + return c.ListFleetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListFleetsPagesWithContext same as ListFleetsPages except +// it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) ListAliasesWithContext(ctx aws.Context, input *ListAliasesInput, opts ...request.Option) (*ListAliasesOutput, error) { - req, out := c.ListAliasesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() +func (c *GameLift) ListFleetsPagesWithContext(ctx aws.Context, input *ListFleetsInput, fn func(*ListFleetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListFleetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListFleetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListFleetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() } -const opListBuilds = "ListBuilds" +const opListGameServerGroups = "ListGameServerGroups" -// ListBuildsRequest generates a "aws/request.Request" representing the -// client's request for the ListBuilds operation. The "output" return +// ListGameServerGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListGameServerGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBuilds for more information on using the ListBuilds +// See ListGameServerGroups for more information on using the ListGameServerGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBuildsRequest method. -// req, resp := client.ListBuildsRequest(params) +// // Example sending a request using the ListGameServerGroupsRequest method. +// req, resp := client.ListGameServerGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds -func (c *GameLift) ListBuildsRequest(input *ListBuildsInput) (req *request.Request, output *ListBuildsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListGameServerGroups +func (c *GameLift) ListGameServerGroupsRequest(input *ListGameServerGroupsInput) (req *request.Request, output *ListGameServerGroupsOutput) { op := &request.Operation{ - Name: opListBuilds, + Name: opListGameServerGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &ListBuildsInput{} + input = &ListGameServerGroupsInput{} } - output = &ListBuildsOutput{} + output = &ListGameServerGroupsOutput{} req = c.newRequest(op, input, output) return } -// ListBuilds API operation for Amazon GameLift. +// ListGameServerGroups API operation for Amazon GameLift. // -// Retrieves build records for all builds associated with the AWS account in -// use. You can limit results to builds that are in a specific status by using -// the Status parameter. Use the pagination parameters to retrieve results in -// a set of sequential pages. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// Build records are not listed in any particular order. +// Retrieves information on all game servers groups that exist in the current +// AWS account for the selected Region. Use the pagination parameters to retrieve +// results in a set of sequential segments. // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // // Related operations // -// * CreateBuild +// * CreateGameServerGroup // -// * ListBuilds +// * ListGameServerGroups // -// * DescribeBuild +// * DescribeGameServerGroup // -// * UpdateBuild +// * UpdateGameServerGroup // -// * DeleteBuild +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation ListBuilds for usage and error information. +// API operation ListGameServerGroups for usage and error information. // // Returned Error Types: -// * UnauthorizedException -// The client failed authentication. Clients should not retry such requests. -// // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. // +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// // * InternalServiceException // The service encountered an unrecoverable internal failure while processing // the request. Clients can retry such requests immediately or after a waiting // period. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListBuilds -func (c *GameLift) ListBuilds(input *ListBuildsInput) (*ListBuildsOutput, error) { - req, out := c.ListBuildsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListGameServerGroups +func (c *GameLift) ListGameServerGroups(input *ListGameServerGroupsInput) (*ListGameServerGroupsOutput, error) { + req, out := c.ListGameServerGroupsRequest(input) return out, req.Send() } -// ListBuildsWithContext is the same as ListBuilds with the addition of +// ListGameServerGroupsWithContext is the same as ListGameServerGroups with the addition of // the ability to pass a context and additional request options. // -// See ListBuilds for details on how to use this API operation. +// See ListGameServerGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) ListBuildsWithContext(ctx aws.Context, input *ListBuildsInput, opts ...request.Option) (*ListBuildsOutput, error) { - req, out := c.ListBuildsRequest(input) +func (c *GameLift) ListGameServerGroupsWithContext(ctx aws.Context, input *ListGameServerGroupsInput, opts ...request.Option) (*ListGameServerGroupsOutput, error) { + req, out := c.ListGameServerGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListFleets = "ListFleets" +// ListGameServerGroupsPages iterates over the pages of a ListGameServerGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGameServerGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGameServerGroups operation. +// pageNum := 0 +// err := client.ListGameServerGroupsPages(params, +// func(page *gamelift.ListGameServerGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) ListGameServerGroupsPages(input *ListGameServerGroupsInput, fn func(*ListGameServerGroupsOutput, bool) bool) error { + return c.ListGameServerGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// ListFleetsRequest generates a "aws/request.Request" representing the -// client's request for the ListFleets operation. The "output" return +// ListGameServerGroupsPagesWithContext same as ListGameServerGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListGameServerGroupsPagesWithContext(ctx aws.Context, input *ListGameServerGroupsInput, fn func(*ListGameServerGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGameServerGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGameServerGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListGameServerGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGameServers = "ListGameServers" + +// ListGameServersRequest generates a "aws/request.Request" representing the +// client's request for the ListGameServers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListFleets for more information on using the ListFleets +// See ListGameServers for more information on using the ListGameServers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListFleetsRequest method. -// req, resp := client.ListFleetsRequest(params) +// // Example sending a request using the ListGameServersRequest method. +// req, resp := client.ListGameServersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets -func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Request, output *ListFleetsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListGameServers +func (c *GameLift) ListGameServersRequest(input *ListGameServersInput) (req *request.Request, output *ListGameServersOutput) { op := &request.Operation{ - Name: opListFleets, + Name: opListGameServers, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &ListFleetsInput{} + input = &ListGameServersInput{} } - output = &ListFleetsOutput{} + output = &ListGameServersOutput{} req = c.newRequest(op, input, output) return } -// ListFleets API operation for Amazon GameLift. +// ListGameServers API operation for Amazon GameLift. // -// Retrieves a collection of fleet records for this AWS account. You can filter -// the result set to find only those fleets that are deployed with a specific -// build or script. Use the pagination parameters to retrieve results in sequential -// pages. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// Fleet records are not listed in a particular order. +// Retrieves information on all game servers that are currently active in a +// specified game server group. You can opt to sort the list by game server +// age. Use the pagination parameters to retrieve results in a set of sequential +// segments. // // Learn more // -// Set Up Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // // Related operations // -// * CreateFleet +// * RegisterGameServer // -// * ListFleets +// * ListGameServers // -// * DeleteFleet +// * ClaimGameServer // -// * DescribeFleetAttributes +// * DescribeGameServer // -// * UpdateFleetAttributes +// * UpdateGameServer // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * DeregisterGameServer // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation ListFleets for usage and error information. +// API operation ListGameServers for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// -// * NotFoundException -// A service resource associated with the request could not be found. Clients -// should not retry such requests. -// // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. @@ -6008,28 +8219,85 @@ func (c *GameLift) ListFleetsRequest(input *ListFleetsInput) (req *request.Reque // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListFleets -func (c *GameLift) ListFleets(input *ListFleetsInput) (*ListFleetsOutput, error) { - req, out := c.ListFleetsRequest(input) +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ListGameServers +func (c *GameLift) ListGameServers(input *ListGameServersInput) (*ListGameServersOutput, error) { + req, out := c.ListGameServersRequest(input) return out, req.Send() } -// ListFleetsWithContext is the same as ListFleets with the addition of +// ListGameServersWithContext is the same as ListGameServers with the addition of // the ability to pass a context and additional request options. // -// See ListFleets for details on how to use this API operation. +// See ListGameServers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) ListFleetsWithContext(ctx aws.Context, input *ListFleetsInput, opts ...request.Option) (*ListFleetsOutput, error) { - req, out := c.ListFleetsRequest(input) +func (c *GameLift) ListGameServersWithContext(ctx aws.Context, input *ListGameServersInput, opts ...request.Option) (*ListGameServersOutput, error) { + req, out := c.ListGameServersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } +// ListGameServersPages iterates over the pages of a ListGameServers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGameServers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGameServers operation. +// pageNum := 0 +// err := client.ListGameServersPages(params, +// func(page *gamelift.ListGameServersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) ListGameServersPages(input *ListGameServersInput, fn func(*ListGameServersOutput, bool) bool) error { + return c.ListGameServersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGameServersPagesWithContext same as ListGameServersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListGameServersPagesWithContext(ctx aws.Context, input *ListGameServersInput, fn func(*ListGameServersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGameServersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGameServersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListGameServersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListScripts = "ListScripts" // ListScriptsRequest generates a "aws/request.Request" representing the @@ -6061,6 +8329,12 @@ func (c *GameLift) ListScriptsRequest(input *ListScriptsInput) (req *request.Req Name: opListScripts, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -6135,6 +8409,58 @@ func (c *GameLift) ListScriptsWithContext(ctx aws.Context, input *ListScriptsInp return out, req.Send() } +// ListScriptsPages iterates over the pages of a ListScripts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListScripts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListScripts operation. +// pageNum := 0 +// err := client.ListScriptsPages(params, +// func(page *gamelift.ListScriptsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) ListScriptsPages(input *ListScriptsInput, fn func(*ListScriptsOutput, bool) bool) error { + return c.ListScriptsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListScriptsPagesWithContext same as ListScriptsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ListScriptsPagesWithContext(ctx aws.Context, input *ListScriptsInput, fn func(*ListScriptsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListScriptsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListScriptsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListScriptsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -6180,9 +8506,9 @@ func (c *GameLift) ListTagsForResourceRequest(input *ListTagsForResourceInput) ( // ListTagsForResource API operation for Amazon GameLift. // // Retrieves all tags that are assigned to a GameLift resource. Resource tags -// are used to organize AWS resources for a range of purposes. This action handles -// the permissions necessary to manage tags for the following GameLift resource -// types: +// are used to organize AWS resources for a range of purposes. This operation +// handles the permissions necessary to manage tags for the following GameLift +// resource types: // // * Build // @@ -6413,27 +8739,161 @@ func (c *GameLift) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *r // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// * NotFoundException -// A service resource associated with the request could not be found. Clients -// should not retry such requests. +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicy +func (c *GameLift) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + return out, req.Send() +} + +// PutScalingPolicyWithContext is the same as PutScalingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutScalingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) PutScalingPolicyWithContext(ctx aws.Context, input *PutScalingPolicyInput, opts ...request.Option) (*PutScalingPolicyOutput, error) { + req, out := c.PutScalingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterGameServer = "RegisterGameServer" + +// RegisterGameServerRequest generates a "aws/request.Request" representing the +// client's request for the RegisterGameServer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterGameServer for more information on using the RegisterGameServer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterGameServerRequest method. +// req, resp := client.RegisterGameServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RegisterGameServer +func (c *GameLift) RegisterGameServerRequest(input *RegisterGameServerInput) (req *request.Request, output *RegisterGameServerOutput) { + op := &request.Operation{ + Name: opRegisterGameServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterGameServerInput{} + } + + output = &RegisterGameServerOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterGameServer API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Creates a new game server resource and notifies GameLift FleetIQ that the +// game server is ready to host gameplay and players. This operation is called +// by a game server process that is running on an instance in a game server +// group. Registering game servers enables GameLift FleetIQ to track available +// game servers and enables game clients and services to claim a game server +// for a new game session. +// +// To register a game server, identify the game server group and instance where +// the game server is running, and provide a unique identifier for the game +// server. You can also include connection and game server data. When a game +// client or service requests a game server by calling ClaimGameServer, this +// information is returned in the response. +// +// Once a game server is successfully registered, it is put in status AVAILABLE. +// A request to register a game server may fail if the instance it is running +// on is in the process of shutting down as part of instance balancing or scale-down +// activity. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * RegisterGameServer +// +// * ListGameServers +// +// * ClaimGameServer +// +// * DescribeGameServer +// +// * UpdateGameServer +// +// * DeregisterGameServer +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation RegisterGameServer for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * ConflictException +// The requested operation would cause a conflict with the current state of +// a service resource associated with the request. Resolve the conflict before +// retrying this request. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * LimitExceededException +// The requested operation would cause the resource to exceed the allowed service +// limit. Resolve the issue before retrying. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/PutScalingPolicy -func (c *GameLift) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { - req, out := c.PutScalingPolicyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/RegisterGameServer +func (c *GameLift) RegisterGameServer(input *RegisterGameServerInput) (*RegisterGameServerOutput, error) { + req, out := c.RegisterGameServerRequest(input) return out, req.Send() } -// PutScalingPolicyWithContext is the same as PutScalingPolicy with the addition of +// RegisterGameServerWithContext is the same as RegisterGameServer with the addition of // the ability to pass a context and additional request options. // -// See PutScalingPolicy for details on how to use this API operation. +// See RegisterGameServer for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) PutScalingPolicyWithContext(ctx aws.Context, input *PutScalingPolicyInput, opts ...request.Option) (*PutScalingPolicyOutput, error) { - req, out := c.PutScalingPolicyRequest(input) +func (c *GameLift) RegisterGameServerWithContext(ctx aws.Context, input *RegisterGameServerInput, opts ...request.Option) (*RegisterGameServerOutput, error) { + req, out := c.RegisterGameServerRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -6493,7 +8953,7 @@ func (c *GameLift) RequestUploadCredentialsRequest(input *RequestUploadCredentia // // Learn more // -// Uploading Your Game (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) +// Create a Build with Files in S3 (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-cli-uploading.html#gamelift-build-cli-uploading-create-build) // // Related operations // @@ -6664,6 +9124,132 @@ func (c *GameLift) ResolveAliasWithContext(ctx aws.Context, input *ResolveAliasI return out, req.Send() } +const opResumeGameServerGroup = "ResumeGameServerGroup" + +// ResumeGameServerGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResumeGameServerGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResumeGameServerGroup for more information on using the ResumeGameServerGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResumeGameServerGroupRequest method. +// req, resp := client.ResumeGameServerGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResumeGameServerGroup +func (c *GameLift) ResumeGameServerGroupRequest(input *ResumeGameServerGroupInput) (req *request.Request, output *ResumeGameServerGroupOutput) { + op := &request.Operation{ + Name: opResumeGameServerGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResumeGameServerGroupInput{} + } + + output = &ResumeGameServerGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResumeGameServerGroup API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Reinstates activity on a game server group after it has been suspended. A +// game server group might be suspended by theSuspendGameServerGroup operation, +// or it might be suspended involuntarily due to a configuration problem. In +// the second case, you can manually resume activity on the group once the configuration +// problem has been resolved. Refer to the game server group status and status +// reason for more information on why group activity is suspended. +// +// To resume activity, specify a game server group ARN and the type of activity +// to be resumed. If successful, a GameServerGroup object is returned showing +// that the resumed activity is no longer listed in SuspendedActions. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * CreateGameServerGroup +// +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation ResumeGameServerGroup for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/ResumeGameServerGroup +func (c *GameLift) ResumeGameServerGroup(input *ResumeGameServerGroupInput) (*ResumeGameServerGroupOutput, error) { + req, out := c.ResumeGameServerGroupRequest(input) + return out, req.Send() +} + +// ResumeGameServerGroupWithContext is the same as ResumeGameServerGroup with the addition of +// the ability to pass a context and additional request options. +// +// See ResumeGameServerGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) ResumeGameServerGroupWithContext(ctx aws.Context, input *ResumeGameServerGroupInput, opts ...request.Option) (*ResumeGameServerGroupOutput, error) { + req, out := c.ResumeGameServerGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSearchGameSessions = "SearchGameSessions" // SearchGameSessionsRequest generates a "aws/request.Request" representing the @@ -6695,6 +9281,12 @@ func (c *GameLift) SearchGameSessionsRequest(input *SearchGameSessionsInput) (re Name: opSearchGameSessions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { @@ -6826,6 +9418,58 @@ func (c *GameLift) SearchGameSessionsWithContext(ctx aws.Context, input *SearchG return out, req.Send() } +// SearchGameSessionsPages iterates over the pages of a SearchGameSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchGameSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchGameSessions operation. +// pageNum := 0 +// err := client.SearchGameSessionsPages(params, +// func(page *gamelift.SearchGameSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GameLift) SearchGameSessionsPages(input *SearchGameSessionsInput, fn func(*SearchGameSessionsOutput, bool) bool) error { + return c.SearchGameSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchGameSessionsPagesWithContext same as SearchGameSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) SearchGameSessionsPagesWithContext(ctx aws.Context, input *SearchGameSessionsInput, fn func(*SearchGameSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchGameSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchGameSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchGameSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opStartFleetActions = "StartFleetActions" // StartFleetActionsRequest generates a "aws/request.Request" representing the @@ -6882,7 +9526,7 @@ func (c *GameLift) StartFleetActionsRequest(input *StartFleetActionsInput) (req // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -6892,14 +9536,11 @@ func (c *GameLift) StartFleetActionsRequest(input *StartFleetActionsInput) (req // // * DeleteFleet // -// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings -// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits -// DescribeFleetEvents +// * DescribeFleetAttributes // -// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings -// UpdateRuntimeConfiguration +// * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7285,25 +9926,12 @@ func (c *GameLift) StartMatchmakingRequest(input *StartMatchmakingInput) (req *r // To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, // and include the players to be matched. You must also include a set of player // attributes relevant for the matchmaking configuration. If successful, a matchmaking -// ticket is returned with status set to QUEUED. Track the status of the ticket -// to respond as needed and acquire game session connection information for -// successfully completed matches. -// -// Tracking ticket status -- A couple of options are available for tracking -// the status of matchmaking requests: -// -// * Polling -- Call DescribeMatchmaking. This operation returns the full -// ticket object, including current status and (for completed tickets) game -// session connection info. We recommend polling no more than once every -// 10 seconds. -// -// * Notifications -- Get event notifications for changes in ticket status -// using Amazon Simple Notification Service (SNS). Notifications are easy -// to set up (see CreateMatchmakingConfiguration) and typically deliver match -// status changes faster and more efficiently than polling. We recommend -// that you use polling to back up to notifications (since delivery is not -// guaranteed) and call DescribeMatchmaking only when notifications are not -// received within 30 seconds. +// ticket is returned with status set to QUEUED. +// +// Track the status of the ticket to respond as needed and acquire game session +// connection information for successfully completed matches. Ticket status +// updates are tracked using event notification through Amazon Simple Notification +// Service (SNS), which is defined in the matchmaking configuration. // // Processing a matchmaking request -- FlexMatch handles a matchmaking request // as follows: @@ -7446,19 +10074,18 @@ func (c *GameLift) StopFleetActionsRequest(input *StopFleetActionsInput) (req *r // StopFleetActions API operation for Amazon GameLift. // // Suspends activity on a fleet. Currently, this operation is used to stop a -// fleet's auto-scaling activity. It is used to temporarily stop scaling events -// triggered by the fleet's scaling policies. The policies can be retained and -// auto-scaling activity can be restarted using StartFleetActions. You can view -// a fleet's stopped actions using DescribeFleetAttributes. +// fleet's auto-scaling activity. It is used to temporarily stop triggering +// scaling events. The policies can be retained and auto-scaling activity can +// be restarted using StartFleetActions. You can view a fleet's stopped actions +// using DescribeFleetAttributes. // // To stop fleet actions, specify the fleet ID and the type of actions to suspend. // When auto-scaling fleet actions are stopped, Amazon GameLift no longer initiates -// scaling events except to maintain the fleet's desired instances setting (FleetCapacity. -// Changes to the fleet's capacity must be done manually using UpdateFleetCapacity. +// scaling events except in response to manual changes using UpdateFleetCapacity. // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -7468,14 +10095,11 @@ func (c *GameLift) StopFleetActionsRequest(input *StopFleetActionsInput) (req *r // // * DeleteFleet // -// * Describe fleets: DescribeFleetAttributes DescribeFleetCapacity DescribeFleetPortSettings -// DescribeFleetUtilization DescribeRuntimeConfiguration DescribeEC2InstanceLimits -// DescribeFleetEvents +// * DescribeFleetAttributes // -// * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings -// UpdateRuntimeConfiguration +// * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7577,28 +10201,142 @@ func (c *GameLift) StopGameSessionPlacementRequest(input *StopGameSessionPlaceme // // * DescribeGameSessionDetails // -// * SearchGameSessions +// * SearchGameSessions +// +// * UpdateGameSession +// +// * GetGameSessionLogUrl +// +// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement +// StopGameSessionPlacement +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation StopGameSessionPlacement for usage and error information. +// +// Returned Error Types: +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacement +func (c *GameLift) StopGameSessionPlacement(input *StopGameSessionPlacementInput) (*StopGameSessionPlacementOutput, error) { + req, out := c.StopGameSessionPlacementRequest(input) + return out, req.Send() +} + +// StopGameSessionPlacementWithContext is the same as StopGameSessionPlacement with the addition of +// the ability to pass a context and additional request options. +// +// See StopGameSessionPlacement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) StopGameSessionPlacementWithContext(ctx aws.Context, input *StopGameSessionPlacementInput, opts ...request.Option) (*StopGameSessionPlacementOutput, error) { + req, out := c.StopGameSessionPlacementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStopMatchmaking = "StopMatchmaking" + +// StopMatchmakingRequest generates a "aws/request.Request" representing the +// client's request for the StopMatchmaking operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopMatchmaking for more information on using the StopMatchmaking +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopMatchmakingRequest method. +// req, resp := client.StopMatchmakingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking +func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *request.Request, output *StopMatchmakingOutput) { + op := &request.Operation{ + Name: opStopMatchmaking, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopMatchmakingInput{} + } + + output = &StopMatchmakingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopMatchmaking API operation for Amazon GameLift. +// +// Cancels a matchmaking ticket or match backfill ticket that is currently being +// processed. To stop the matchmaking operation, specify the ticket ID. If successful, +// work on the ticket is stopped, and the ticket status is changed to CANCELLED. +// +// This call is also used to turn off automatic backfill for an individual game +// session. This is for game sessions that are created with a matchmaking configuration +// that has automatic backfill enabled. The ticket ID is included in the MatchmakerData +// of an updated game session object, which is provided to the game server. +// +// If the operation is successful, the service sends back an empty JSON struct +// with the HTTP 200 response (not an empty HTTP body). +// +// Learn more +// +// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// +// Related operations +// +// * StartMatchmaking +// +// * DescribeMatchmaking // -// * UpdateGameSession +// * StopMatchmaking // -// * GetGameSessionLogUrl +// * AcceptMatch // -// * Game session placements StartGameSessionPlacement DescribeGameSessionPlacement -// StopGameSessionPlacement +// * StartMatchBackfill // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation StopGameSessionPlacement for usage and error information. +// API operation StopMatchmaking for usage and error information. // // Returned Error Types: -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. -// // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. @@ -7607,110 +10345,128 @@ func (c *GameLift) StopGameSessionPlacementRequest(input *StopGameSessionPlaceme // A service resource associated with the request could not be found. Clients // should not retry such requests. // -// * UnauthorizedException -// The client failed authentication. Clients should not retry such requests. +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopGameSessionPlacement -func (c *GameLift) StopGameSessionPlacement(input *StopGameSessionPlacementInput) (*StopGameSessionPlacementOutput, error) { - req, out := c.StopGameSessionPlacementRequest(input) +// * UnsupportedRegionException +// The requested operation is not supported in the Region specified. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking +func (c *GameLift) StopMatchmaking(input *StopMatchmakingInput) (*StopMatchmakingOutput, error) { + req, out := c.StopMatchmakingRequest(input) return out, req.Send() } -// StopGameSessionPlacementWithContext is the same as StopGameSessionPlacement with the addition of +// StopMatchmakingWithContext is the same as StopMatchmaking with the addition of // the ability to pass a context and additional request options. // -// See StopGameSessionPlacement for details on how to use this API operation. +// See StopMatchmaking for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) StopGameSessionPlacementWithContext(ctx aws.Context, input *StopGameSessionPlacementInput, opts ...request.Option) (*StopGameSessionPlacementOutput, error) { - req, out := c.StopGameSessionPlacementRequest(input) +func (c *GameLift) StopMatchmakingWithContext(ctx aws.Context, input *StopMatchmakingInput, opts ...request.Option) (*StopMatchmakingOutput, error) { + req, out := c.StopMatchmakingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopMatchmaking = "StopMatchmaking" +const opSuspendGameServerGroup = "SuspendGameServerGroup" -// StopMatchmakingRequest generates a "aws/request.Request" representing the -// client's request for the StopMatchmaking operation. The "output" return +// SuspendGameServerGroupRequest generates a "aws/request.Request" representing the +// client's request for the SuspendGameServerGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopMatchmaking for more information on using the StopMatchmaking +// See SuspendGameServerGroup for more information on using the SuspendGameServerGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopMatchmakingRequest method. -// req, resp := client.StopMatchmakingRequest(params) +// // Example sending a request using the SuspendGameServerGroupRequest method. +// req, resp := client.SuspendGameServerGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking -func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *request.Request, output *StopMatchmakingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SuspendGameServerGroup +func (c *GameLift) SuspendGameServerGroupRequest(input *SuspendGameServerGroupInput) (req *request.Request, output *SuspendGameServerGroupOutput) { op := &request.Operation{ - Name: opStopMatchmaking, + Name: opSuspendGameServerGroup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopMatchmakingInput{} + input = &SuspendGameServerGroupInput{} } - output = &StopMatchmakingOutput{} + output = &SuspendGameServerGroupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// StopMatchmaking API operation for Amazon GameLift. +// SuspendGameServerGroup API operation for Amazon GameLift. // -// Cancels a matchmaking ticket or match backfill ticket that is currently being -// processed. To stop the matchmaking operation, specify the ticket ID. If successful, -// work on the ticket is stopped, and the ticket status is changed to CANCELLED. +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. // -// This call is also used to turn off automatic backfill for an individual game -// session. This is for game sessions that are created with a matchmaking configuration -// that has automatic backfill enabled. The ticket ID is included in the MatchmakerData -// of an updated game session object, which is provided to the game server. +// Temporarily stops activity on a game server group without terminating instances +// or the game server group. You can restart activity by calling ResumeGameServerGroup. +// You can suspend the following activity: // -// If the action is successful, the service sends back an empty JSON struct -// with the HTTP 200 response (not an empty HTTP body). +// * Instance type replacement - This activity evaluates the current game +// hosting viability of all Spot instance types that are defined for the +// game server group. It updates the Auto Scaling group to remove nonviable +// Spot Instance types, which have a higher chance of game server interruptions. +// It then balances capacity across the remaining viable Spot Instance types. +// When this activity is suspended, the Auto Scaling group continues with +// its current balance, regardless of viability. Instance protection, utilization +// metrics, and capacity scaling activities continue to be active. +// +// To suspend activity, specify a game server group ARN and the type of activity +// to be suspended. If successful, a GameServerGroup object is returned showing +// that the activity is listed in SuspendedActions. // // Learn more // -// Add FlexMatch to a Game Client (https://docs.aws.amazon.com/gamelift/latest/developerguide/match-client.html) +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) // // Related operations // -// * StartMatchmaking +// * CreateGameServerGroup // -// * DescribeMatchmaking +// * ListGameServerGroups // -// * StopMatchmaking +// * DescribeGameServerGroup // -// * AcceptMatch +// * UpdateGameServerGroup // -// * StartMatchBackfill +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GameLift's -// API operation StopMatchmaking for usage and error information. +// API operation SuspendGameServerGroup for usage and error information. // // Returned Error Types: // * InvalidRequestException @@ -7721,31 +10477,31 @@ func (c *GameLift) StopMatchmakingRequest(input *StopMatchmakingInput) (req *req // A service resource associated with the request could not be found. Clients // should not retry such requests. // +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// // * InternalServiceException // The service encountered an unrecoverable internal failure while processing // the request. Clients can retry such requests immediately or after a waiting // period. // -// * UnsupportedRegionException -// The requested operation is not supported in the Region specified. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/StopMatchmaking -func (c *GameLift) StopMatchmaking(input *StopMatchmakingInput) (*StopMatchmakingOutput, error) { - req, out := c.StopMatchmakingRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/SuspendGameServerGroup +func (c *GameLift) SuspendGameServerGroup(input *SuspendGameServerGroupInput) (*SuspendGameServerGroupOutput, error) { + req, out := c.SuspendGameServerGroupRequest(input) return out, req.Send() } -// StopMatchmakingWithContext is the same as StopMatchmaking with the addition of +// SuspendGameServerGroupWithContext is the same as SuspendGameServerGroup with the addition of // the ability to pass a context and additional request options. // -// See StopMatchmaking for details on how to use this API operation. +// See SuspendGameServerGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) StopMatchmakingWithContext(ctx aws.Context, input *StopMatchmakingInput, opts ...request.Option) (*StopMatchmakingOutput, error) { - req, out := c.StopMatchmakingRequest(input) +func (c *GameLift) SuspendGameServerGroupWithContext(ctx aws.Context, input *SuspendGameServerGroupInput, opts ...request.Option) (*SuspendGameServerGroupOutput, error) { + req, out := c.SuspendGameServerGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -7799,8 +10555,8 @@ func (c *GameLift) TagResourceRequest(input *TagResourceInput) (req *request.Req // Assigns a tag to a GameLift resource. AWS resource tags provide an additional // management tool set. You can use tags to organize resources, create IAM permissions // policies to manage access to groups of resources, customize AWS cost breakdowns, -// etc. This action handles the permissions necessary to manage tags for the -// following GameLift resource types: +// etc. This operation handles the permissions necessary to manage tags for +// the following GameLift resource types: // // * Build // @@ -7817,7 +10573,7 @@ func (c *GameLift) TagResourceRequest(input *TagResourceInput) (req *request.Req // * MatchmakingRuleSet // // To add a tag to a resource, specify the unique ARN value for the resource -// and provide a trig list containing one or more tags. The operation succeeds +// and provide a tag list containing one or more tags. The operation succeeds // even if the list includes tags that are already assigned to the specified // resource. // @@ -7930,7 +10686,7 @@ func (c *GameLift) UntagResourceRequest(input *UntagResourceInput) (req *request // UntagResource API operation for Amazon GameLift. // // Removes a tag that is assigned to a GameLift resource. Resource tags are -// used to organize AWS resources for a range of purposes. This action handles +// used to organize AWS resources for a range of purposes. This operation handles // the permissions necessary to manage tags for the following GameLift resource // types: // @@ -7950,8 +10706,8 @@ func (c *GameLift) UntagResourceRequest(input *UntagResourceInput) (req *request // // To remove a tag from a resource, specify the unique ARN value for the resource // and provide a string list containing one or more tags to be removed. This -// action succeeds even if the list includes tags that are not currently assigned -// to the specified resource. +// operation succeeds even if the list includes tags that are not currently +// assigned to the specified resource. // // Learn more // @@ -8167,14 +10923,14 @@ func (c *GameLift) UpdateBuildRequest(input *UpdateBuildInput) (req *request.Req // UpdateBuild API operation for Amazon GameLift. // -// Updates metadata in a build record, including the build name and version. +// Updates metadata in a build resource, including the build name and version. // To update the metadata, specify the build ID to update and provide the new // values. If successful, a build object containing the updated metadata is // returned. // // Learn more // -// Working with Builds (https://docs.aws.amazon.com/gamelift/latest/developerguide/build-intro.html) +// Upload a Custom Server Build (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-build-intro.html) // // Related operations // @@ -8284,7 +11040,7 @@ func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInpu // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -8299,7 +11055,7 @@ func (c *GameLift) UpdateFleetAttributesRequest(input *UpdateFleetAttributesInpu // * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings // UpdateRuntimeConfiguration // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8405,10 +11161,10 @@ func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) ( // UpdateFleetCapacity API operation for Amazon GameLift. // -// Updates capacity settings for a fleet. Use this action to specify the number -// of EC2 instances (hosts) that you want this fleet to contain. Before calling -// this action, you may want to call DescribeEC2InstanceLimits to get the maximum -// capacity based on the fleet's EC2 instance type. +// Updates capacity settings for a fleet. Use this operation to specify the +// number of EC2 instances (hosts) that you want this fleet to contain. Before +// calling this operation, you may want to call DescribeEC2InstanceLimits to +// get the maximum capacity based on the fleet's EC2 instance type. // // Specify minimum and maximum number of instances. Amazon GameLift will not // change fleet capacity to values fall outside of this range. This is particularly @@ -8424,7 +11180,7 @@ func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) ( // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -8439,7 +11195,7 @@ func (c *GameLift) UpdateFleetCapacityRequest(input *UpdateFleetCapacityInput) ( // * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings // UpdateRuntimeConfiguration // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8554,7 +11310,7 @@ func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettings // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -8569,7 +11325,7 @@ func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettings // * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings // UpdateRuntimeConfiguration // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8597,35 +11353,295 @@ func (c *GameLift) UpdateFleetPortSettingsRequest(input *UpdateFleetPortSettings // The requested operation would cause the resource to exceed the allowed service // limit. Resolve the issue before retrying. // -// * InternalServiceException -// The service encountered an unrecoverable internal failure while processing -// the request. Clients can retry such requests immediately or after a waiting -// period. +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettings +func (c *GameLift) UpdateFleetPortSettings(input *UpdateFleetPortSettingsInput) (*UpdateFleetPortSettingsOutput, error) { + req, out := c.UpdateFleetPortSettingsRequest(input) + return out, req.Send() +} + +// UpdateFleetPortSettingsWithContext is the same as UpdateFleetPortSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFleetPortSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateFleetPortSettingsWithContext(ctx aws.Context, input *UpdateFleetPortSettingsInput, opts ...request.Option) (*UpdateFleetPortSettingsOutput, error) { + req, out := c.UpdateFleetPortSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGameServer = "UpdateGameServer" + +// UpdateGameServerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGameServer operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGameServer for more information on using the UpdateGameServer +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGameServerRequest method. +// req, resp := client.UpdateGameServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameServer +func (c *GameLift) UpdateGameServerRequest(input *UpdateGameServerInput) (req *request.Request, output *UpdateGameServerOutput) { + op := &request.Operation{ + Name: opUpdateGameServer, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGameServerInput{} + } + + output = &UpdateGameServerOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGameServer API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Updates information about a registered game server to help GameLift FleetIQ +// to track game server availability. This operation is called by a game server +// process that is running on an instance in a game server group. +// +// Use this operation to update the following types of game server information. +// You can make all three types of updates in the same request: +// +// * To update the game server's utilization status, identify the game server +// and game server group and specify the current utilization status. Use +// this status to identify when game servers are currently hosting games +// and when they are available to be claimed. +// +// * To report health status, identify the game server and game server group +// and set health check to HEALTHY. If a game server does not report health +// status for a certain length of time, the game server is no longer considered +// healthy. As a result, it will be eventually deregistered from the game +// server group to avoid affecting utilization metrics. The best practice +// is to report health every 60 seconds. +// +// * To change game server metadata, provide updated game server data. +// +// Once a game server is successfully updated, the relevant statuses and timestamps +// are updated. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * RegisterGameServer +// +// * ListGameServers +// +// * ClaimGameServer +// +// * DescribeGameServer +// +// * UpdateGameServer +// +// * DeregisterGameServer +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateGameServer for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// One or more parameter values in the request are invalid. Correct the invalid +// parameter values before retrying. +// +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// +// * UnauthorizedException +// The client failed authentication. Clients should not retry such requests. +// +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameServer +func (c *GameLift) UpdateGameServer(input *UpdateGameServerInput) (*UpdateGameServerOutput, error) { + req, out := c.UpdateGameServerRequest(input) + return out, req.Send() +} + +// UpdateGameServerWithContext is the same as UpdateGameServer with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGameServer for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GameLift) UpdateGameServerWithContext(ctx aws.Context, input *UpdateGameServerInput, opts ...request.Option) (*UpdateGameServerOutput, error) { + req, out := c.UpdateGameServerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGameServerGroup = "UpdateGameServerGroup" + +// UpdateGameServerGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGameServerGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGameServerGroup for more information on using the UpdateGameServerGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGameServerGroupRequest method. +// req, resp := client.UpdateGameServerGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameServerGroup +func (c *GameLift) UpdateGameServerGroupRequest(input *UpdateGameServerGroupInput) (req *request.Request, output *UpdateGameServerGroupOutput) { + op := &request.Operation{ + Name: opUpdateGameServerGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGameServerGroupInput{} + } + + output = &UpdateGameServerGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGameServerGroup API operation for Amazon GameLift. +// +// This operation is used with the Amazon GameLift FleetIQ solution and game +// server groups. +// +// Updates GameLift FleetIQ-specific properties for a game server group. Many +// Auto Scaling group properties are updated on the Auto Scaling group directly, +// including the launch template, Auto Scaling policies, and maximum/minimum/desired +// instance counts. +// +// To update the game server group, specify the game server group ID and provide +// the updated values. Before applying the updates, the new values are validated +// to ensure that GameLift FleetIQ can continue to perform instance balancing +// activity. If successful, a GameServerGroup object is returned. +// +// Learn more +// +// GameLift FleetIQ Guide (https://docs.aws.amazon.com/gamelift/latest/fleetiqguide/gsg-intro.html) +// +// Related operations +// +// * CreateGameServerGroup +// +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // +// See the AWS API reference guide for Amazon GameLift's +// API operation UpdateGameServerGroup for usage and error information. +// +// Returned Error Types: // * InvalidRequestException // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. // +// * NotFoundException +// A service resource associated with the request could not be found. Clients +// should not retry such requests. +// // * UnauthorizedException // The client failed authentication. Clients should not retry such requests. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateFleetPortSettings -func (c *GameLift) UpdateFleetPortSettings(input *UpdateFleetPortSettingsInput) (*UpdateFleetPortSettingsOutput, error) { - req, out := c.UpdateFleetPortSettingsRequest(input) +// * InternalServiceException +// The service encountered an unrecoverable internal failure while processing +// the request. Clients can retry such requests immediately or after a waiting +// period. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01/UpdateGameServerGroup +func (c *GameLift) UpdateGameServerGroup(input *UpdateGameServerGroupInput) (*UpdateGameServerGroupOutput, error) { + req, out := c.UpdateGameServerGroupRequest(input) return out, req.Send() } -// UpdateFleetPortSettingsWithContext is the same as UpdateFleetPortSettings with the addition of +// UpdateGameServerGroupWithContext is the same as UpdateGameServerGroup with the addition of // the ability to pass a context and additional request options. // -// See UpdateFleetPortSettings for details on how to use this API operation. +// See UpdateGameServerGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GameLift) UpdateFleetPortSettingsWithContext(ctx aws.Context, input *UpdateFleetPortSettingsInput, opts ...request.Option) (*UpdateFleetPortSettingsOutput, error) { - req, out := c.UpdateFleetPortSettingsRequest(input) +func (c *GameLift) UpdateGameServerGroupWithContext(ctx aws.Context, input *UpdateGameServerGroupInput, opts ...request.Option) (*UpdateGameServerGroupOutput, error) { + req, out := c.UpdateGameServerGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -8803,6 +11819,12 @@ func (c *GameLift) UpdateGameSessionQueueRequest(input *UpdateGameSessionQueueIn // the queue name to be updated and provide the new settings. When updating // destinations, provide a complete list of destinations. // +// Learn more +// +// Using Multi-Region Queues (https://docs.aws.amazon.com/gamelift/latest/developerguide/queues-intro.html) +// +// Related operations +// // * CreateGameSessionQueue // // * DescribeGameSessionQueues @@ -9035,7 +12057,7 @@ func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigu // // Learn more // -// Working with Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html). +// Setting up GameLift Fleets (https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html) // // Related operations // @@ -9050,7 +12072,7 @@ func (c *GameLift) UpdateRuntimeConfigurationRequest(input *UpdateRuntimeConfigu // * Update fleets: UpdateFleetAttributes UpdateFleetCapacity UpdateFleetPortSettings // UpdateRuntimeConfiguration // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9337,7 +12359,7 @@ func (c *GameLift) ValidateMatchmakingRuleSetWithContext(ctx aws.Context, input return out, req.Send() } -// Represents the input for a request action. +// Represents the input for a request operation. type AcceptMatchInput struct { _ struct{} `type:"structure"` @@ -9438,9 +12460,9 @@ type Alias struct { // Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // that is assigned to a GameLift alias resource and uniquely identifies it. - // ARNs are unique across all Regions.. In a GameLift alias ARN, the resource + // ARNs are unique across all Regions. In a GameLift alias ARN, the resource // ID matches the alias ID value. - AliasArn *string `min:"1" type:"string"` + AliasArn *string `type:"string"` // A unique identifier for an alias. Alias IDs are unique within a Region. AliasId *string `type:"string"` @@ -9795,12 +12817,107 @@ func (s *CertificateConfiguration) SetCertificateType(v string) *CertificateConf return s } +type ClaimGameServerInput struct { + _ struct{} `type:"structure"` + + // A set of custom game server properties, formatted as a single string value. + // This data is passed to a game client or service when it requests information + // on game servers using ListGameServers or ClaimGameServer. + GameServerData *string `min:"1" type:"string"` + + // A unique identifier for the game server group where the game server is running. + // Use either the GameServerGroup name or ARN value.. If you are not specifying + // a game server to claim, this value identifies where you want GameLift FleetIQ + // to look for an available game server to claim. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // A custom string that uniquely identifies the game server to claim. If this + // parameter is left empty, GameLift FleetIQ searches for an available game + // server in the specified game server group. + GameServerId *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ClaimGameServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClaimGameServerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ClaimGameServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ClaimGameServerInput"} + if s.GameServerData != nil && len(*s.GameServerData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerData", 1)) + } + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.GameServerId != nil && len(*s.GameServerId) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GameServerId", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerData sets the GameServerData field's value. +func (s *ClaimGameServerInput) SetGameServerData(v string) *ClaimGameServerInput { + s.GameServerData = &v + return s +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *ClaimGameServerInput) SetGameServerGroupName(v string) *ClaimGameServerInput { + s.GameServerGroupName = &v + return s +} + +// SetGameServerId sets the GameServerId field's value. +func (s *ClaimGameServerInput) SetGameServerId(v string) *ClaimGameServerInput { + s.GameServerId = &v + return s +} + +type ClaimGameServerOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly claimed game server. + GameServer *GameServer `type:"structure"` +} + +// String returns the string representation +func (s ClaimGameServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClaimGameServerOutput) GoString() string { + return s.String() +} + +// SetGameServer sets the GameServer field's value. +func (s *ClaimGameServerOutput) SetGameServer(v *GameServer) *ClaimGameServerOutput { + s.GameServer = v + return s +} + // The requested operation would cause a conflict with the current state of // a service resource associated with the request. Resolve the conflict before // retrying this request. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -9817,17 +12934,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9835,25 +12952,25 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateAliasInput struct { _ struct{} `type:"structure"` @@ -9949,7 +13066,7 @@ func (s *CreateAliasInput) SetTags(v []*Tag) *CreateAliasInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateAliasOutput struct { _ struct{} `type:"structure"` @@ -9973,7 +13090,7 @@ func (s *CreateAliasOutput) SetAlias(v *Alias) *CreateAliasOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateBuildInput struct { _ struct{} `type:"structure"` @@ -9990,11 +13107,11 @@ type CreateBuildInput struct { OperatingSystem *string `type:"string" enum:"OperatingSystem"` // Information indicating where your game build files are stored. Use this parameter - // only when creating a build with files stored in an Amazon S3 bucket that - // you own. The storage location must specify an Amazon S3 bucket name and key. - // The location must also specify a role ARN that you set up to allow Amazon - // GameLift to access your Amazon S3 bucket. The S3 bucket and your new build - // must be in the same Region. + // only when creating a build with files stored in an S3 bucket that you own. + // The storage location must specify an S3 bucket name and key. The location + // must also specify a role ARN that you set up to allow Amazon GameLift to + // access your S3 bucket. The S3 bucket and your new build must be in the same + // Region. StorageLocation *S3Location `type:"structure"` // A list of labels to assign to the new build resource. Tags are developer-defined @@ -10083,11 +13200,11 @@ func (s *CreateBuildInput) SetVersion(v string) *CreateBuildInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateBuildOutput struct { _ struct{} `type:"structure"` - // The newly created build record, including a unique build IDs and status. + // The newly created build resource, including a unique build IDs and status. Build *Build `type:"structure"` // Amazon S3 location for your game build file, including bucket name and key. @@ -10095,8 +13212,8 @@ type CreateBuildOutput struct { // This element is returned only when the operation is called without a storage // location. It contains credentials to use when you are uploading a build file - // to an Amazon S3 bucket that is owned by Amazon GameLift. Credentials have - // a limited life span. To refresh these credentials, call RequestUploadCredentials. + // to an S3 bucket that is owned by Amazon GameLift. Credentials have a limited + // life span. To refresh these credentials, call RequestUploadCredentials. UploadCredentials *AwsCredentials `type:"structure" sensitive:"true"` } @@ -10128,7 +13245,7 @@ func (s *CreateBuildOutput) SetUploadCredentials(v *AwsCredentials) *CreateBuild return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateFleetInput struct { _ struct{} `type:"structure"` @@ -10474,7 +13591,7 @@ func (s *CreateFleetInput) SetTags(v []*Tag) *CreateFleetInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateFleetOutput struct { _ struct{} `type:"structure"` @@ -10498,7 +13615,299 @@ func (s *CreateFleetOutput) SetFleetAttributes(v *FleetAttributes) *CreateFleetO return s } -// Represents the input for a request action. +type CreateGameServerGroupInput struct { + _ struct{} `type:"structure"` + + // Configuration settings to define a scaling policy for the Auto Scaling group + // that is optimized for game hosting. The scaling policy uses the metric "PercentUtilizedGameServers" + // to maintain a buffer of idle game servers that can immediately accommodate + // new games and players. After the Auto Scaling group is created, update this + // value directly in the Auto Scaling group using the AWS console or APIs. + AutoScalingPolicy *GameServerGroupAutoScalingPolicy `type:"structure"` + + // Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand + // Instances in the game server group. Method options include the following: + // + // * SPOT_ONLY - Only Spot Instances are used in the game server group. If + // Spot Instances are unavailable or not viable for game hosting, the game + // server group provides no hosting capacity until Spot Instances can again + // be used. Until then, no new instances are started, and the existing nonviable + // Spot Instances are terminated (after current gameplay ends) and are not + // replaced. + // + // * SPOT_PREFERRED - (default value) Spot Instances are used whenever available + // in the game server group. If Spot Instances are unavailable, the game + // server group continues to provide hosting capacity by falling back to + // On-Demand Instances. Existing nonviable Spot Instances are terminated + // (after current gameplay ends) and are replaced with new On-Demand Instances. + // + // * ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server + // group. No Spot Instances are used, even when available, while this balancing + // strategy is in force. + BalancingStrategy *string `type:"string" enum:"BalancingStrategy"` + + // An identifier for the new game server group. This value is used to generate + // unique ARN identifiers for the EC2 Auto Scaling group and the GameLift FleetIQ + // game server group. The name must be unique per Region per AWS account. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // A flag that indicates whether instances in the game server group are protected + // from early termination. Unprotected instances that have active game servers + // running might be terminated during a scale-down event, causing players to + // be dropped from the game. Protected instances cannot be terminated while + // there are active game servers running except in the event of a forced game + // server group deletion (see ). An exception to this is with Spot Instances, + // which can be terminated by AWS regardless of protection status. This property + // is set to NO_PROTECTION by default. + GameServerProtectionPolicy *string `type:"string" enum:"GameServerProtectionPolicy"` + + // The EC2 instance types and sizes to use in the Auto Scaling group. The instance + // definitions must specify at least two different instance types that are supported + // by GameLift FleetIQ. For more information on instance types, see EC2 Instance + // Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon EC2 User Guide. You can optionally specify capacity weighting + // for each instance type. If no weight value is specified for an instance type, + // it is set to the default value "1". For more information about capacity weighting, + // see Instance Weighting for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-weighting.html) + // in the Amazon EC2 Auto Scaling User Guide. + // + // InstanceDefinitions is a required field + InstanceDefinitions []*InstanceDefinition `min:"2" type:"list" required:"true"` + + // The EC2 launch template that contains configuration settings and game server + // code to be deployed to all instances in the game server group. You can specify + // the template using either the template name or ID. For help with creating + // a launch template, see Creating a Launch Template for an Auto Scaling Group + // (https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html) + // in the Amazon EC2 Auto Scaling User Guide. After the Auto Scaling group is + // created, update this value directly in the Auto Scaling group using the AWS + // console or APIs. + // + // LaunchTemplate is a required field + LaunchTemplate *LaunchTemplateSpecification `type:"structure" required:"true"` + + // The maximum number of instances allowed in the EC2 Auto Scaling group. During + // automatic scaling events, GameLift FleetIQ and EC2 do not scale up the group + // above this maximum. After the Auto Scaling group is created, update this + // value directly in the Auto Scaling group using the AWS console or APIs. + // + // MaxSize is a required field + MaxSize *int64 `min:"1" type:"integer" required:"true"` + + // The minimum number of instances allowed in the EC2 Auto Scaling group. During + // automatic scaling events, GameLift FleetIQ and EC2 do not scale down the + // group below this minimum. In production, this value should be set to at least + // 1. After the Auto Scaling group is created, update this value directly in + // the Auto Scaling group using the AWS console or APIs. + // + // MinSize is a required field + MinSize *int64 `type:"integer" required:"true"` + + // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // for an IAM role that allows Amazon GameLift to access your EC2 Auto Scaling + // groups. + // + // RoleArn is a required field + RoleArn *string `min:"1" type:"string" required:"true"` + + // A list of labels to assign to the new game server group resource. Tags are + // developer-defined key-value pairs. Tagging AWS resources is useful for resource + // management, access management, and cost allocation. For more information, + // see Tagging AWS Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the AWS General Reference. Once the resource is created, you can use TagResource, + // UntagResource, and ListTagsForResource to add, remove, and view tags, respectively. + // The maximum tag limit may be lower than stated. See the AWS General Reference + // for actual tagging limits. + Tags []*Tag `type:"list"` + + // A list of virtual private cloud (VPC) subnets to use with instances in the + // game server group. By default, all GameLift FleetIQ-supported Availability + // Zones are used. You can use this parameter to specify VPCs that you've set + // up. This property cannot be updated after the game server group is created, + // and the corresponding Auto Scaling group will always use the property value + // that is set with this request, even if the Auto Scaling group is updated + // directly + VpcSubnets []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateGameServerGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameServerGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGameServerGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGameServerGroupInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.InstanceDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceDefinitions")) + } + if s.InstanceDefinitions != nil && len(s.InstanceDefinitions) < 2 { + invalidParams.Add(request.NewErrParamMinLen("InstanceDefinitions", 2)) + } + if s.LaunchTemplate == nil { + invalidParams.Add(request.NewErrParamRequired("LaunchTemplate")) + } + if s.MaxSize == nil { + invalidParams.Add(request.NewErrParamRequired("MaxSize")) + } + if s.MaxSize != nil && *s.MaxSize < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxSize", 1)) + } + if s.MinSize == nil { + invalidParams.Add(request.NewErrParamRequired("MinSize")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.VpcSubnets != nil && len(s.VpcSubnets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VpcSubnets", 1)) + } + if s.AutoScalingPolicy != nil { + if err := s.AutoScalingPolicy.Validate(); err != nil { + invalidParams.AddNested("AutoScalingPolicy", err.(request.ErrInvalidParams)) + } + } + if s.InstanceDefinitions != nil { + for i, v := range s.InstanceDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LaunchTemplate != nil { + if err := s.LaunchTemplate.Validate(); err != nil { + invalidParams.AddNested("LaunchTemplate", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingPolicy sets the AutoScalingPolicy field's value. +func (s *CreateGameServerGroupInput) SetAutoScalingPolicy(v *GameServerGroupAutoScalingPolicy) *CreateGameServerGroupInput { + s.AutoScalingPolicy = v + return s +} + +// SetBalancingStrategy sets the BalancingStrategy field's value. +func (s *CreateGameServerGroupInput) SetBalancingStrategy(v string) *CreateGameServerGroupInput { + s.BalancingStrategy = &v + return s +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *CreateGameServerGroupInput) SetGameServerGroupName(v string) *CreateGameServerGroupInput { + s.GameServerGroupName = &v + return s +} + +// SetGameServerProtectionPolicy sets the GameServerProtectionPolicy field's value. +func (s *CreateGameServerGroupInput) SetGameServerProtectionPolicy(v string) *CreateGameServerGroupInput { + s.GameServerProtectionPolicy = &v + return s +} + +// SetInstanceDefinitions sets the InstanceDefinitions field's value. +func (s *CreateGameServerGroupInput) SetInstanceDefinitions(v []*InstanceDefinition) *CreateGameServerGroupInput { + s.InstanceDefinitions = v + return s +} + +// SetLaunchTemplate sets the LaunchTemplate field's value. +func (s *CreateGameServerGroupInput) SetLaunchTemplate(v *LaunchTemplateSpecification) *CreateGameServerGroupInput { + s.LaunchTemplate = v + return s +} + +// SetMaxSize sets the MaxSize field's value. +func (s *CreateGameServerGroupInput) SetMaxSize(v int64) *CreateGameServerGroupInput { + s.MaxSize = &v + return s +} + +// SetMinSize sets the MinSize field's value. +func (s *CreateGameServerGroupInput) SetMinSize(v int64) *CreateGameServerGroupInput { + s.MinSize = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateGameServerGroupInput) SetRoleArn(v string) *CreateGameServerGroupInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateGameServerGroupInput) SetTags(v []*Tag) *CreateGameServerGroupInput { + s.Tags = v + return s +} + +// SetVpcSubnets sets the VpcSubnets field's value. +func (s *CreateGameServerGroupInput) SetVpcSubnets(v []*string) *CreateGameServerGroupInput { + s.VpcSubnets = v + return s +} + +type CreateGameServerGroupOutput struct { + _ struct{} `type:"structure"` + + // The newly created game server group object, including the new ARN value for + // the GameLift FleetIQ game server group and the object's status. The EC2 Auto + // Scaling group ARN is initially null, since the group has not yet been created. + // This value is added once the game server group status reaches ACTIVE. + GameServerGroup *GameServerGroup `type:"structure"` +} + +// String returns the string representation +func (s CreateGameServerGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGameServerGroupOutput) GoString() string { + return s.String() +} + +// SetGameServerGroup sets the GameServerGroup field's value. +func (s *CreateGameServerGroupOutput) SetGameServerGroup(v *GameServerGroup) *CreateGameServerGroupOutput { + s.GameServerGroup = v + return s +} + +// Represents the input for a request operation. type CreateGameSessionInput struct { _ struct{} `type:"structure"` @@ -10657,7 +14066,7 @@ func (s *CreateGameSessionInput) SetName(v string) *CreateGameSessionInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateGameSessionOutput struct { _ struct{} `type:"structure"` @@ -10681,7 +14090,7 @@ func (s *CreateGameSessionOutput) SetGameSession(v *GameSession) *CreateGameSess return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateGameSessionQueueInput struct { _ struct{} `type:"structure"` @@ -10800,7 +14209,7 @@ func (s *CreateGameSessionQueueInput) SetTimeoutInSeconds(v int64) *CreateGameSe return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateGameSessionQueueOutput struct { _ struct{} `type:"structure"` @@ -10824,7 +14233,7 @@ func (s *CreateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateMatchmakingConfigurationInput struct { _ struct{} `type:"structure"` @@ -11070,7 +14479,7 @@ func (s *CreateMatchmakingConfigurationInput) SetTags(v []*Tag) *CreateMatchmaki return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateMatchmakingConfigurationOutput struct { _ struct{} `type:"structure"` @@ -11094,7 +14503,7 @@ func (s *CreateMatchmakingConfigurationOutput) SetConfiguration(v *MatchmakingCo return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateMatchmakingRuleSetInput struct { _ struct{} `type:"structure"` @@ -11179,7 +14588,7 @@ func (s *CreateMatchmakingRuleSetInput) SetTags(v []*Tag) *CreateMatchmakingRule return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateMatchmakingRuleSetOutput struct { _ struct{} `type:"structure"` @@ -11205,7 +14614,7 @@ func (s *CreateMatchmakingRuleSetOutput) SetRuleSet(v *MatchmakingRuleSet) *Crea return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreatePlayerSessionInput struct { _ struct{} `type:"structure"` @@ -11277,7 +14686,7 @@ func (s *CreatePlayerSessionInput) SetPlayerId(v string) *CreatePlayerSessionInp return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreatePlayerSessionOutput struct { _ struct{} `type:"structure"` @@ -11301,7 +14710,7 @@ func (s *CreatePlayerSessionOutput) SetPlayerSession(v *PlayerSession) *CreatePl return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreatePlayerSessionsInput struct { _ struct{} `type:"structure"` @@ -11372,7 +14781,7 @@ func (s *CreatePlayerSessionsInput) SetPlayerIds(v []*string) *CreatePlayerSessi return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreatePlayerSessionsOutput struct { _ struct{} `type:"structure"` @@ -11538,7 +14947,7 @@ func (s *CreateScriptOutput) SetScript(v *Script) *CreateScriptOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateVpcPeeringAuthorizationInput struct { _ struct{} `type:"structure"` @@ -11603,7 +15012,7 @@ func (s *CreateVpcPeeringAuthorizationInput) SetPeerVpcId(v string) *CreateVpcPe return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type CreateVpcPeeringAuthorizationOutput struct { _ struct{} `type:"structure"` @@ -11627,7 +15036,7 @@ func (s *CreateVpcPeeringAuthorizationOutput) SetVpcPeeringAuthorization(v *VpcP return s } -// Represents the input for a request action. +// Represents the input for a request operation. type CreateVpcPeeringConnectionInput struct { _ struct{} `type:"structure"` @@ -11721,7 +15130,7 @@ func (s CreateVpcPeeringConnectionOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteAliasInput struct { _ struct{} `type:"structure"` @@ -11775,7 +15184,7 @@ func (s DeleteAliasOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteBuildInput struct { _ struct{} `type:"structure"` @@ -11820,41 +15229,110 @@ type DeleteBuildOutput struct { } // String returns the string representation -func (s DeleteBuildOutput) String() string { +func (s DeleteBuildOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBuildOutput) GoString() string { + return s.String() +} + +// Represents the input for a request operation. +type DeleteFleetInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for a fleet to be deleted. You can use either the fleet + // ID or ARN value. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFleetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFleetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFleetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFleetInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *DeleteFleetInput) SetFleetId(v string) *DeleteFleetInput { + s.FleetId = &v + return s +} + +type DeleteFleetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteFleetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBuildOutput) GoString() string { +func (s DeleteFleetOutput) GoString() string { return s.String() } -// Represents the input for a request action. -type DeleteFleetInput struct { +type DeleteGameServerGroupInput struct { _ struct{} `type:"structure"` - // A unique identifier for a fleet to be deleted. You can use either the fleet - // ID or ARN value. + // The type of delete to perform. Options include the following: // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` + // * SAFE_DELETE – Terminates the game server group and EC2 Auto Scaling + // group only when it has no game servers that are in UTILIZED status. + // + // * FORCE_DELETE – Terminates the game server group, including all active + // game servers regardless of their utilization status, and the EC2 Auto + // Scaling group. + // + // * RETAIN – Does a safe delete of the game server group but retains the + // EC2 Auto Scaling group as is. + DeleteOption *string `type:"string" enum:"GameServerGroupDeleteOption"` + + // A unique identifier for the game server group. Use either the GameServerGroup + // name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteFleetInput) String() string { +func (s DeleteGameServerGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFleetInput) GoString() string { +func (s DeleteGameServerGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFleetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFleetInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) +func (s *DeleteGameServerGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGameServerGroupInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) } if invalidParams.Len() > 0 { @@ -11863,27 +15341,43 @@ func (s *DeleteFleetInput) Validate() error { return nil } -// SetFleetId sets the FleetId field's value. -func (s *DeleteFleetInput) SetFleetId(v string) *DeleteFleetInput { - s.FleetId = &v +// SetDeleteOption sets the DeleteOption field's value. +func (s *DeleteGameServerGroupInput) SetDeleteOption(v string) *DeleteGameServerGroupInput { + s.DeleteOption = &v return s } -type DeleteFleetOutput struct { +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *DeleteGameServerGroupInput) SetGameServerGroupName(v string) *DeleteGameServerGroupInput { + s.GameServerGroupName = &v + return s +} + +type DeleteGameServerGroupOutput struct { _ struct{} `type:"structure"` + + // An object that describes the deleted game server group resource, with status + // updated to DELETE_SCHEDULED. + GameServerGroup *GameServerGroup `type:"structure"` } // String returns the string representation -func (s DeleteFleetOutput) String() string { +func (s DeleteGameServerGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFleetOutput) GoString() string { +func (s DeleteGameServerGroupOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// SetGameServerGroup sets the GameServerGroup field's value. +func (s *DeleteGameServerGroupOutput) SetGameServerGroup(v *GameServerGroup) *DeleteGameServerGroupOutput { + s.GameServerGroup = v + return s +} + +// Represents the input for a request operation. type DeleteGameSessionQueueInput struct { _ struct{} `type:"structure"` @@ -11941,7 +15435,7 @@ func (s DeleteGameSessionQueueOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteMatchmakingConfigurationInput struct { _ struct{} `type:"structure"` @@ -11998,7 +15492,7 @@ func (s DeleteMatchmakingConfigurationOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteMatchmakingRuleSetInput struct { _ struct{} `type:"structure"` @@ -12042,7 +15536,7 @@ func (s *DeleteMatchmakingRuleSetInput) SetName(v string) *DeleteMatchmakingRule return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DeleteMatchmakingRuleSetOutput struct { _ struct{} `type:"structure"` } @@ -12057,7 +15551,7 @@ func (s DeleteMatchmakingRuleSetOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -12182,7 +15676,7 @@ func (s DeleteScriptOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteVpcPeeringAuthorizationInput struct { _ struct{} `type:"structure"` @@ -12261,7 +15755,7 @@ func (s DeleteVpcPeeringAuthorizationOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type DeleteVpcPeeringConnectionInput struct { _ struct{} `type:"structure"` @@ -12334,7 +15828,80 @@ func (s DeleteVpcPeeringConnectionOutput) GoString() string { return s.String() } -// Represents the input for a request action. +type DeregisterGameServerInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the game server group where the game server is running. + // Use either the GameServerGroup name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // A custom string that uniquely identifies the game server to deregister. + // + // GameServerId is a required field + GameServerId *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterGameServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterGameServerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterGameServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterGameServerInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.GameServerId == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerId")) + } + if s.GameServerId != nil && len(*s.GameServerId) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GameServerId", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *DeregisterGameServerInput) SetGameServerGroupName(v string) *DeregisterGameServerInput { + s.GameServerGroupName = &v + return s +} + +// SetGameServerId sets the GameServerId field's value. +func (s *DeregisterGameServerInput) SetGameServerId(v string) *DeregisterGameServerInput { + s.GameServerId = &v + return s +} + +type DeregisterGameServerOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterGameServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterGameServerOutput) GoString() string { + return s.String() +} + +// Represents the input for a request operation. type DescribeAliasInput struct { _ struct{} `type:"structure"` @@ -12374,7 +15941,7 @@ func (s *DescribeAliasInput) SetAliasId(v string) *DescribeAliasInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeAliasOutput struct { _ struct{} `type:"structure"` @@ -12398,7 +15965,7 @@ func (s *DescribeAliasOutput) SetAlias(v *Alias) *DescribeAliasOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeBuildInput struct { _ struct{} `type:"structure"` @@ -12438,7 +16005,7 @@ func (s *DescribeBuildInput) SetBuildId(v string) *DescribeBuildInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeBuildOutput struct { _ struct{} `type:"structure"` @@ -12462,7 +16029,7 @@ func (s *DescribeBuildOutput) SetBuild(v *Build) *DescribeBuildOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeEC2InstanceLimitsInput struct { _ struct{} `type:"structure"` @@ -12491,7 +16058,7 @@ func (s *DescribeEC2InstanceLimitsInput) SetEC2InstanceType(v string) *DescribeE return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeEC2InstanceLimitsOutput struct { _ struct{} `type:"structure"` @@ -12515,12 +16082,15 @@ func (s *DescribeEC2InstanceLimitsOutput) SetEC2InstanceLimits(v []*EC2InstanceL return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeFleetAttributesInput struct { _ struct{} `type:"structure"` - // A unique identifier for a fleet(s) to retrieve attributes for. You can use - // either the fleet ID or ARN value. + // A list of unique fleet identifiers to retrieve attributes for. You can use + // either the fleet ID or ARN value. To retrieve attributes for all current + // fleets, do not include this parameter. If the list of fleet identifiers includes + // fleets that don't currently exist, the request succeeds but no attributes + // for that fleet are returned. FleetIds []*string `min:"1" type:"list"` // The maximum number of results to return. Use this parameter with NextToken @@ -12529,7 +16099,7 @@ type DescribeFleetAttributesInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. This parameter // is ignored when the request specifies one or a list of fleet IDs. NextToken *string `min:"1" type:"string"` @@ -12582,16 +16152,16 @@ func (s *DescribeFleetAttributesInput) SetNextToken(v string) *DescribeFleetAttr return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeFleetAttributesOutput struct { _ struct{} `type:"structure"` // A collection of objects containing attribute metadata for each requested - // fleet ID. + // fleet ID. Attribute objects are returned only for fleets that currently exist. FleetAttributes []*FleetAttributes `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -12618,7 +16188,7 @@ func (s *DescribeFleetAttributesOutput) SetNextToken(v string) *DescribeFleetAtt return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeFleetCapacityInput struct { _ struct{} `type:"structure"` @@ -12632,7 +16202,7 @@ type DescribeFleetCapacityInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. This parameter // is ignored when the request specifies one or a list of fleet IDs. NextToken *string `min:"1" type:"string"` @@ -12685,7 +16255,7 @@ func (s *DescribeFleetCapacityInput) SetNextToken(v string) *DescribeFleetCapaci return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeFleetCapacityOutput struct { _ struct{} `type:"structure"` @@ -12695,7 +16265,7 @@ type DescribeFleetCapacityOutput struct { FleetCapacity []*FleetCapacity `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -12722,7 +16292,7 @@ func (s *DescribeFleetCapacityOutput) SetNextToken(v string) *DescribeFleetCapac return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeFleetEventsInput struct { _ struct{} `type:"structure"` @@ -12742,7 +16312,7 @@ type DescribeFleetEventsInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` @@ -12812,7 +16382,7 @@ func (s *DescribeFleetEventsInput) SetStartTime(v time.Time) *DescribeFleetEvent return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeFleetEventsOutput struct { _ struct{} `type:"structure"` @@ -12820,7 +16390,7 @@ type DescribeFleetEventsOutput struct { Events []*Event `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -12847,7 +16417,7 @@ func (s *DescribeFleetEventsOutput) SetNextToken(v string) *DescribeFleetEventsO return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeFleetPortSettingsInput struct { _ struct{} `type:"structure"` @@ -12887,7 +16457,7 @@ func (s *DescribeFleetPortSettingsInput) SetFleetId(v string) *DescribeFleetPort return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeFleetPortSettingsOutput struct { _ struct{} `type:"structure"` @@ -12911,12 +16481,15 @@ func (s *DescribeFleetPortSettingsOutput) SetInboundPermissions(v []*IpPermissio return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeFleetUtilizationInput struct { _ struct{} `type:"structure"` // A unique identifier for a fleet(s) to retrieve utilization data for. You - // can use either the fleet ID or ARN value. + // can use either the fleet ID or ARN value. To retrieve attributes for all + // current fleets, do not include this parameter. If the list of fleet identifiers + // includes fleets that don't currently exist, the request succeeds but no attributes + // for that fleet are returned. FleetIds []*string `min:"1" type:"list"` // The maximum number of results to return. Use this parameter with NextToken @@ -12925,7 +16498,7 @@ type DescribeFleetUtilizationInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. This parameter // is ignored when the request specifies one or a list of fleet IDs. NextToken *string `min:"1" type:"string"` @@ -12978,7 +16551,7 @@ func (s *DescribeFleetUtilizationInput) SetNextToken(v string) *DescribeFleetUti return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeFleetUtilizationOutput struct { _ struct{} `type:"structure"` @@ -12987,34 +16560,300 @@ type DescribeFleetUtilizationOutput struct { FleetUtilization []*FleetUtilization `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeFleetUtilizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFleetUtilizationOutput) GoString() string { + return s.String() +} + +// SetFleetUtilization sets the FleetUtilization field's value. +func (s *DescribeFleetUtilizationOutput) SetFleetUtilization(v []*FleetUtilization) *DescribeFleetUtilizationOutput { + s.FleetUtilization = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeFleetUtilizationOutput) SetNextToken(v string) *DescribeFleetUtilizationOutput { + s.NextToken = &v + return s +} + +type DescribeGameServerGroupInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the game server group. Use either the GameServerGroup + // name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGameServerGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameServerGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameServerGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameServerGroupInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *DescribeGameServerGroupInput) SetGameServerGroupName(v string) *DescribeGameServerGroupInput { + s.GameServerGroupName = &v + return s +} + +type DescribeGameServerGroupOutput struct { + _ struct{} `type:"structure"` + + // An object with the property settings for the requested game server group + // resource. + GameServerGroup *GameServerGroup `type:"structure"` +} + +// String returns the string representation +func (s DescribeGameServerGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameServerGroupOutput) GoString() string { + return s.String() +} + +// SetGameServerGroup sets the GameServerGroup field's value. +func (s *DescribeGameServerGroupOutput) SetGameServerGroup(v *GameServerGroup) *DescribeGameServerGroupOutput { + s.GameServerGroup = v + return s +} + +type DescribeGameServerInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the game server group where the game server is running. + // Use either the GameServerGroup name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // A custom string that uniquely identifies the game server information to be + // retrieved. + // + // GameServerId is a required field + GameServerId *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGameServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameServerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameServerInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.GameServerId == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerId")) + } + if s.GameServerId != nil && len(*s.GameServerId) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GameServerId", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *DescribeGameServerInput) SetGameServerGroupName(v string) *DescribeGameServerInput { + s.GameServerGroupName = &v + return s +} + +// SetGameServerId sets the GameServerId field's value. +func (s *DescribeGameServerInput) SetGameServerId(v string) *DescribeGameServerInput { + s.GameServerId = &v + return s +} + +type DescribeGameServerInstancesInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the game server group. Use either the GameServerGroup + // name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // The EC2 instance IDs that you want to retrieve status on. EC2 instance IDs + // use a 17-character format, for example: i-1234567890abcdef0. To retrieve + // all instances in the game server group, leave this parameter empty. + InstanceIds []*string `min:"1" type:"list"` + + // The maximum number of results to return. Use this parameter with NextToken + // to get results as a set of sequential segments. + Limit *int64 `min:"1" type:"integer"` + + // A token that indicates the start of the next sequential segment of results. + // Use the token returned with the previous call to this operation. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeGameServerInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameServerInstancesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGameServerInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGameServerInstancesInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.InstanceIds != nil && len(s.InstanceIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *DescribeGameServerInstancesInput) SetGameServerGroupName(v string) *DescribeGameServerInstancesInput { + s.GameServerGroupName = &v + return s +} + +// SetInstanceIds sets the InstanceIds field's value. +func (s *DescribeGameServerInstancesInput) SetInstanceIds(v []*string) *DescribeGameServerInstancesInput { + s.InstanceIds = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeGameServerInstancesInput) SetLimit(v int64) *DescribeGameServerInstancesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameServerInstancesInput) SetNextToken(v string) *DescribeGameServerInstancesInput { + s.NextToken = &v + return s +} + +type DescribeGameServerInstancesOutput struct { + _ struct{} `type:"structure"` + + // The collection of requested game server instances. + GameServerInstances []*GameServerInstance `type:"list"` + + // A token that indicates where to resume retrieving results on the next call + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeFleetUtilizationOutput) String() string { +func (s DescribeGameServerInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGameServerInstancesOutput) GoString() string { + return s.String() +} + +// SetGameServerInstances sets the GameServerInstances field's value. +func (s *DescribeGameServerInstancesOutput) SetGameServerInstances(v []*GameServerInstance) *DescribeGameServerInstancesOutput { + s.GameServerInstances = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeGameServerInstancesOutput) SetNextToken(v string) *DescribeGameServerInstancesOutput { + s.NextToken = &v + return s +} + +type DescribeGameServerOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the requested game server. + GameServer *GameServer `type:"structure"` +} + +// String returns the string representation +func (s DescribeGameServerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeFleetUtilizationOutput) GoString() string { +func (s DescribeGameServerOutput) GoString() string { return s.String() } -// SetFleetUtilization sets the FleetUtilization field's value. -func (s *DescribeFleetUtilizationOutput) SetFleetUtilization(v []*FleetUtilization) *DescribeFleetUtilizationOutput { - s.FleetUtilization = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeFleetUtilizationOutput) SetNextToken(v string) *DescribeFleetUtilizationOutput { - s.NextToken = &v +// SetGameServer sets the GameServer field's value. +func (s *DescribeGameServerOutput) SetGameServer(v *GameServer) *DescribeGameServerOutput { + s.GameServer = v return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeGameSessionDetailsInput struct { _ struct{} `type:"structure"` @@ -13034,7 +16873,7 @@ type DescribeGameSessionDetailsInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` @@ -13112,7 +16951,7 @@ func (s *DescribeGameSessionDetailsInput) SetStatusFilter(v string) *DescribeGam return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeGameSessionDetailsOutput struct { _ struct{} `type:"structure"` @@ -13121,7 +16960,7 @@ type DescribeGameSessionDetailsOutput struct { GameSessionDetails []*GameSessionDetail `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -13148,7 +16987,7 @@ func (s *DescribeGameSessionDetailsOutput) SetNextToken(v string) *DescribeGameS return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeGameSessionPlacementInput struct { _ struct{} `type:"structure"` @@ -13190,7 +17029,7 @@ func (s *DescribeGameSessionPlacementInput) SetPlacementId(v string) *DescribeGa return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeGameSessionPlacementOutput struct { _ struct{} `type:"structure"` @@ -13214,12 +17053,12 @@ func (s *DescribeGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSess return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeGameSessionQueuesInput struct { _ struct{} `type:"structure"` // The maximum number of results to return. Use this parameter with NextToken - // to get results as a set of sequential pages. + // to get results as a set of sequential pages. You can request up to 50 results. Limit *int64 `min:"1" type:"integer"` // A list of queue names to retrieve information for. You can use either the @@ -13228,8 +17067,8 @@ type DescribeGameSessionQueuesInput struct { Names []*string `type:"list"` // A token that indicates the start of the next sequential page of results. - // Use the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. + // Use the token that is returned with a previous call to this operation. To + // start at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` } @@ -13277,7 +17116,7 @@ func (s *DescribeGameSessionQueuesInput) SetNextToken(v string) *DescribeGameSes return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeGameSessionQueuesOutput struct { _ struct{} `type:"structure"` @@ -13285,7 +17124,7 @@ type DescribeGameSessionQueuesOutput struct { GameSessionQueues []*GameSessionQueue `type:"list"` // A token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -13312,7 +17151,7 @@ func (s *DescribeGameSessionQueuesOutput) SetNextToken(v string) *DescribeGameSe return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeGameSessionsInput struct { _ struct{} `type:"structure"` @@ -13332,7 +17171,7 @@ type DescribeGameSessionsInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` @@ -13410,7 +17249,7 @@ func (s *DescribeGameSessionsInput) SetStatusFilter(v string) *DescribeGameSessi return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeGameSessionsOutput struct { _ struct{} `type:"structure"` @@ -13419,7 +17258,7 @@ type DescribeGameSessionsOutput struct { GameSessions []*GameSession `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -13446,7 +17285,7 @@ func (s *DescribeGameSessionsOutput) SetNextToken(v string) *DescribeGameSession return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeInstancesInput struct { _ struct{} `type:"structure"` @@ -13465,7 +17304,7 @@ type DescribeInstancesInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` } @@ -13523,7 +17362,7 @@ func (s *DescribeInstancesInput) SetNextToken(v string) *DescribeInstancesInput return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeInstancesOutput struct { _ struct{} `type:"structure"` @@ -13531,7 +17370,7 @@ type DescribeInstancesOutput struct { Instances []*Instance `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -13558,7 +17397,7 @@ func (s *DescribeInstancesOutput) SetNextToken(v string) *DescribeInstancesOutpu return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeMatchmakingConfigurationsInput struct { _ struct{} `type:"structure"` @@ -13573,8 +17412,8 @@ type DescribeMatchmakingConfigurationsInput struct { Names []*string `type:"list"` // A token that indicates the start of the next sequential page of results. - // Use the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. + // Use the token that is returned with a previous call to this operation. To + // start at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` // A unique identifier for a matchmaking rule set. You can use either the rule @@ -13636,7 +17475,7 @@ func (s *DescribeMatchmakingConfigurationsInput) SetRuleSetName(v string) *Descr return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeMatchmakingConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -13644,7 +17483,7 @@ type DescribeMatchmakingConfigurationsOutput struct { Configurations []*MatchmakingConfiguration `type:"list"` // A token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -13671,7 +17510,7 @@ func (s *DescribeMatchmakingConfigurationsOutput) SetNextToken(v string) *Descri return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeMatchmakingInput struct { _ struct{} `type:"structure"` @@ -13711,7 +17550,7 @@ func (s *DescribeMatchmakingInput) SetTicketIds(v []*string) *DescribeMatchmakin return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeMatchmakingOutput struct { _ struct{} `type:"structure"` @@ -13735,7 +17574,7 @@ func (s *DescribeMatchmakingOutput) SetTicketList(v []*MatchmakingTicket) *Descr return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeMatchmakingRuleSetsInput struct { _ struct{} `type:"structure"` @@ -13749,8 +17588,8 @@ type DescribeMatchmakingRuleSetsInput struct { Names []*string `min:"1" type:"list"` // A token that indicates the start of the next sequential page of results. - // Use the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. + // Use the token that is returned with a previous call to this operation. To + // start at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` } @@ -13801,12 +17640,12 @@ func (s *DescribeMatchmakingRuleSetsInput) SetNextToken(v string) *DescribeMatch return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeMatchmakingRuleSetsOutput struct { _ struct{} `type:"structure"` // A token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` @@ -13838,7 +17677,7 @@ func (s *DescribeMatchmakingRuleSetsOutput) SetRuleSets(v []*MatchmakingRuleSet) return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribePlayerSessionsInput struct { _ struct{} `type:"structure"` @@ -13851,7 +17690,7 @@ type DescribePlayerSessionsInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. If a player session // ID is specified, this parameter is ignored. NextToken *string `min:"1" type:"string"` @@ -13950,12 +17789,12 @@ func (s *DescribePlayerSessionsInput) SetPlayerSessionStatusFilter(v string) *De return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribePlayerSessionsOutput struct { _ struct{} `type:"structure"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` @@ -13986,7 +17825,7 @@ func (s *DescribePlayerSessionsOutput) SetPlayerSessions(v []*PlayerSession) *De return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeRuntimeConfigurationInput struct { _ struct{} `type:"structure"` @@ -14026,7 +17865,7 @@ func (s *DescribeRuntimeConfigurationInput) SetFleetId(v string) *DescribeRuntim return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeRuntimeConfigurationOutput struct { _ struct{} `type:"structure"` @@ -14051,7 +17890,7 @@ func (s *DescribeRuntimeConfigurationOutput) SetRuntimeConfiguration(v *RuntimeC return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeScalingPoliciesInput struct { _ struct{} `type:"structure"` @@ -14066,7 +17905,7 @@ type DescribeScalingPoliciesInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` @@ -14143,12 +17982,12 @@ func (s *DescribeScalingPoliciesInput) SetStatusFilter(v string) *DescribeScalin return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeScalingPoliciesOutput struct { _ struct{} `type:"structure"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` @@ -14278,7 +18117,7 @@ func (s *DescribeVpcPeeringAuthorizationsOutput) SetVpcPeeringAuthorizations(v [ return s } -// Represents the input for a request action. +// Represents the input for a request operation. type DescribeVpcPeeringConnectionsInput struct { _ struct{} `type:"structure"` @@ -14302,7 +18141,7 @@ func (s *DescribeVpcPeeringConnectionsInput) SetFleetId(v string) *DescribeVpcPe return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type DescribeVpcPeeringConnectionsOutput struct { _ struct{} `type:"structure"` @@ -14393,7 +18232,7 @@ func (s *DesiredPlayerSession) SetPlayerId(v string) *DesiredPlayerSession { // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions type EC2InstanceCounts struct { _ struct{} `type:"structure"` @@ -14532,7 +18371,7 @@ type Event struct { // // Fleet creation events (ordered by fleet creation activity): // - // * FLEET_CREATED -- A fleet record was successfully created with a status + // * FLEET_CREATED -- A fleet resource was successfully created with a status // of NEW. Event messaging includes the fleet ID. // // * FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW to DOWNLOADING. @@ -14700,7 +18539,7 @@ func (s *Event) SetResourceId(v string) *Event { // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions type FleetAttributes struct { _ struct{} `type:"structure"` @@ -14727,7 +18566,7 @@ type FleetAttributes struct { // that is assigned to a GameLift fleet resource and uniquely identifies it. // ARNs are unique across all Regions. In a GameLift fleet ARN, the resource // ID matches the FleetId value. - FleetArn *string `min:"1" type:"string"` + FleetArn *string `type:"string"` // A unique identifier for a fleet. FleetId *string `type:"string"` @@ -14830,7 +18669,7 @@ type FleetAttributes struct { // * TERMINATED -- The fleet no longer exists. Status *string `type:"string" enum:"FleetStatus"` - // List of fleet actions that have been suspended using StopFleetActions. This + // List of fleet activity that have been suspended using StopFleetActions. This // includes auto-scaling. StoppedActions []*string `min:"1" type:"list"` @@ -15002,7 +18841,7 @@ func (s *FleetAttributes) SetTerminationTime(v time.Time) *FleetAttributes { // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions type FleetCapacity struct { _ struct{} `type:"structure"` @@ -15012,214 +18851,645 @@ type FleetCapacity struct { // Current status of fleet capacity. InstanceCounts *EC2InstanceCounts `type:"structure"` - // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet - // instance type determines the computing resources of each instance in the - // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift - // supports the following EC2 instance types. See Amazon EC2 Instance Types - // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. - InstanceType *string `type:"string" enum:"EC2InstanceType"` + // Name of an EC2 instance type that is supported in Amazon GameLift. A fleet + // instance type determines the computing resources of each instance in the + // fleet, including CPU, memory, storage, and networking capacity. Amazon GameLift + // supports the following EC2 instance types. See Amazon EC2 Instance Types + // (http://aws.amazon.com/ec2/instance-types/) for detailed descriptions. + InstanceType *string `type:"string" enum:"EC2InstanceType"` +} + +// String returns the string representation +func (s FleetCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetCapacity) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetCapacity) SetFleetId(v string) *FleetCapacity { + s.FleetId = &v + return s +} + +// SetInstanceCounts sets the InstanceCounts field's value. +func (s *FleetCapacity) SetInstanceCounts(v *EC2InstanceCounts) *FleetCapacity { + s.InstanceCounts = v + return s +} + +// SetInstanceType sets the InstanceType field's value. +func (s *FleetCapacity) SetInstanceType(v string) *FleetCapacity { + s.InstanceType = &v + return s +} + +// The specified fleet has no available instances to fulfill a CreateGameSession +// request. Clients can retry such requests immediately or after a waiting period. +type FleetCapacityExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s FleetCapacityExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetCapacityExceededException) GoString() string { + return s.String() +} + +func newErrorFleetCapacityExceededException(v protocol.ResponseMetadata) error { + return &FleetCapacityExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *FleetCapacityExceededException) Code() string { + return "FleetCapacityExceededException" +} + +// Message returns the exception's message. +func (s *FleetCapacityExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *FleetCapacityExceededException) OrigErr() error { + return nil +} + +func (s *FleetCapacityExceededException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *FleetCapacityExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *FleetCapacityExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Current status of fleet utilization, including the number of game and player +// sessions being hosted. +// +// * CreateFleet +// +// * ListFleets +// +// * DeleteFleet +// +// * DescribeFleetAttributes +// +// * UpdateFleetAttributes +// +// * StartFleetActions or StopFleetActions +type FleetUtilization struct { + _ struct{} `type:"structure"` + + // Number of active game sessions currently being hosted on all instances in + // the fleet. + ActiveGameSessionCount *int64 `type:"integer"` + + // Number of server processes in an ACTIVE status currently running across all + // instances in the fleet + ActiveServerProcessCount *int64 `type:"integer"` + + // Number of active player sessions currently being hosted on all instances + // in the fleet. + CurrentPlayerSessionCount *int64 `type:"integer"` + + // A unique identifier for a fleet. + FleetId *string `type:"string"` + + // The maximum number of players allowed across all game sessions currently + // being hosted on all instances in the fleet. + MaximumPlayerSessionCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s FleetUtilization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FleetUtilization) GoString() string { + return s.String() +} + +// SetActiveGameSessionCount sets the ActiveGameSessionCount field's value. +func (s *FleetUtilization) SetActiveGameSessionCount(v int64) *FleetUtilization { + s.ActiveGameSessionCount = &v + return s +} + +// SetActiveServerProcessCount sets the ActiveServerProcessCount field's value. +func (s *FleetUtilization) SetActiveServerProcessCount(v int64) *FleetUtilization { + s.ActiveServerProcessCount = &v + return s +} + +// SetCurrentPlayerSessionCount sets the CurrentPlayerSessionCount field's value. +func (s *FleetUtilization) SetCurrentPlayerSessionCount(v int64) *FleetUtilization { + s.CurrentPlayerSessionCount = &v + return s +} + +// SetFleetId sets the FleetId field's value. +func (s *FleetUtilization) SetFleetId(v string) *FleetUtilization { + s.FleetId = &v + return s +} + +// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. +func (s *FleetUtilization) SetMaximumPlayerSessionCount(v int64) *FleetUtilization { + s.MaximumPlayerSessionCount = &v + return s +} + +// Set of key-value pairs that contain information about a game session. When +// included in a game session request, these properties communicate details +// to be used when setting up the new game session. For example, a game property +// might specify a game mode, level, or map. Game properties are passed to the +// game server process when initiating a new game session. For more information, +// see the Amazon GameLift Developer Guide (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create). +type GameProperty struct { + _ struct{} `type:"structure"` + + // The game property identifier. + // + // Key is a required field + Key *string `type:"string" required:"true"` + + // The game property value. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GameProperty) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameProperty) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GameProperty) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GameProperty"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *GameProperty) SetKey(v string) *GameProperty { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *GameProperty) SetValue(v string) *GameProperty { + s.Value = &v + return s +} + +// This data type is used with the Amazon GameLift FleetIQ and game server groups. +// +// Properties describing a game server that is running on an instance in a GameServerGroup. +// +// A game server is created by a successful call to RegisterGameServer and deleted +// by calling DeregisterGameServer. A game server is claimed to host a game +// session by calling ClaimGameServer. +// +// * RegisterGameServer +// +// * ListGameServers +// +// * ClaimGameServer +// +// * DescribeGameServer +// +// * UpdateGameServer +// +// * DeregisterGameServer +type GameServer struct { + _ struct{} `type:"structure"` + + // Indicates when an available game server has been reserved for gameplay but + // has not yet started hosting a game. Once it is claimed, the game server remains + // in CLAIMED status for a maximum of one minute. During this time, game clients + // connect to the game server to start the game and trigger the game server + // to update its utilization status. After one minute, the game server claim + // status reverts to null. + ClaimStatus *string `type:"string" enum:"GameServerClaimStatus"` + + // The port and IP address that must be used to establish a client connection + // to the game server. + ConnectionInfo *string `min:"1" type:"string"` + + // A set of custom game server properties, formatted as a single string value. + // This data is passed to a game client or service when it requests information + // on game servers using ListGameServers or ClaimGameServer. + GameServerData *string `min:"1" type:"string"` + + // The ARN identifier for the game server group where the game server is located. + GameServerGroupArn *string `min:"1" type:"string"` + + // A unique identifier for the game server group where the game server is running. + // Use either the GameServerGroup name or ARN value. + GameServerGroupName *string `min:"1" type:"string"` + + // A custom string that uniquely identifies the game server. Game server IDs + // are developer-defined and are unique across all game server groups in an + // AWS account. + GameServerId *string `min:"3" type:"string"` + + // The unique identifier for the instance where the game server is running. + // This ID is available in the instance metadata. EC2 instance IDs use a 17-character + // format, for example: i-1234567890abcdef0. + InstanceId *string `min:"19" type:"string"` + + // Timestamp that indicates the last time the game server was claimed with a + // ClaimGameServer request. The format is a number expressed in Unix time as + // milliseconds (for example "1469498468.057"). This value is used to calculate + // when a claimed game server's status should revert to null. + LastClaimTime *time.Time `type:"timestamp"` + + // Timestamp that indicates the last time the game server was updated with health + // status using an UpdateGameServer request. The format is a number expressed + // in Unix time as milliseconds (for example "1469498468.057"). After game server + // registration, this property is only changed when a game server update specifies + // a health check value. + LastHealthCheckTime *time.Time `type:"timestamp"` + + // Timestamp that indicates when the game server was created with a RegisterGameServer + // request. The format is a number expressed in Unix time as milliseconds (for + // example "1469498468.057"). + RegistrationTime *time.Time `type:"timestamp"` + + // Indicates whether the game server is currently available for new games or + // is busy. Possible statuses include: + // + // * AVAILABLE - The game server is available to be claimed. A game server + // that has been claimed remains in this status until it reports game hosting + // activity. + // + // * UTILIZED - The game server is currently hosting a game session with + // players. + UtilizationStatus *string `type:"string" enum:"GameServerUtilizationStatus"` } // String returns the string representation -func (s FleetCapacity) String() string { +func (s GameServer) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FleetCapacity) GoString() string { +func (s GameServer) GoString() string { return s.String() } -// SetFleetId sets the FleetId field's value. -func (s *FleetCapacity) SetFleetId(v string) *FleetCapacity { - s.FleetId = &v +// SetClaimStatus sets the ClaimStatus field's value. +func (s *GameServer) SetClaimStatus(v string) *GameServer { + s.ClaimStatus = &v return s } -// SetInstanceCounts sets the InstanceCounts field's value. -func (s *FleetCapacity) SetInstanceCounts(v *EC2InstanceCounts) *FleetCapacity { - s.InstanceCounts = v +// SetConnectionInfo sets the ConnectionInfo field's value. +func (s *GameServer) SetConnectionInfo(v string) *GameServer { + s.ConnectionInfo = &v return s } -// SetInstanceType sets the InstanceType field's value. -func (s *FleetCapacity) SetInstanceType(v string) *FleetCapacity { - s.InstanceType = &v +// SetGameServerData sets the GameServerData field's value. +func (s *GameServer) SetGameServerData(v string) *GameServer { + s.GameServerData = &v return s } -// The specified fleet has no available instances to fulfill a CreateGameSession -// request. Clients can retry such requests immediately or after a waiting period. -type FleetCapacityExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"Message" min:"1" type:"string"` -} - -// String returns the string representation -func (s FleetCapacityExceededException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FleetCapacityExceededException) GoString() string { - return s.String() +// SetGameServerGroupArn sets the GameServerGroupArn field's value. +func (s *GameServer) SetGameServerGroupArn(v string) *GameServer { + s.GameServerGroupArn = &v + return s } -func newErrorFleetCapacityExceededException(v protocol.ResponseMetadata) error { - return &FleetCapacityExceededException{ - respMetadata: v, - } +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *GameServer) SetGameServerGroupName(v string) *GameServer { + s.GameServerGroupName = &v + return s } -// Code returns the exception type name. -func (s FleetCapacityExceededException) Code() string { - return "FleetCapacityExceededException" +// SetGameServerId sets the GameServerId field's value. +func (s *GameServer) SetGameServerId(v string) *GameServer { + s.GameServerId = &v + return s } -// Message returns the exception's message. -func (s FleetCapacityExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetInstanceId sets the InstanceId field's value. +func (s *GameServer) SetInstanceId(v string) *GameServer { + s.InstanceId = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s FleetCapacityExceededException) OrigErr() error { - return nil +// SetLastClaimTime sets the LastClaimTime field's value. +func (s *GameServer) SetLastClaimTime(v time.Time) *GameServer { + s.LastClaimTime = &v + return s } -func (s FleetCapacityExceededException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetLastHealthCheckTime sets the LastHealthCheckTime field's value. +func (s *GameServer) SetLastHealthCheckTime(v time.Time) *GameServer { + s.LastHealthCheckTime = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s FleetCapacityExceededException) StatusCode() int { - return s.respMetadata.StatusCode +// SetRegistrationTime sets the RegistrationTime field's value. +func (s *GameServer) SetRegistrationTime(v time.Time) *GameServer { + s.RegistrationTime = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s FleetCapacityExceededException) RequestID() string { - return s.respMetadata.RequestID +// SetUtilizationStatus sets the UtilizationStatus field's value. +func (s *GameServer) SetUtilizationStatus(v string) *GameServer { + s.UtilizationStatus = &v + return s } -// Current status of fleet utilization, including the number of game and player -// sessions being hosted. +// This data type is used with the Amazon GameLift FleetIQ and game server groups. // -// * CreateFleet +// Properties that describe a game server group resource. A game server group +// manages certain properties related to a corresponding EC2 Auto Scaling group. // -// * ListFleets +// A game server group is created by a successful call to CreateGameServerGroup +// and deleted by calling DeleteGameServerGroup. Game server group activity +// can be temporarily suspended and resumed by calling SuspendGameServerGroup +// and ResumeGameServerGroup, respectively. // -// * DeleteFleet +// * CreateGameServerGroup // -// * DescribeFleetAttributes +// * ListGameServerGroups // -// * UpdateFleetAttributes +// * DescribeGameServerGroup // -// * Manage fleet actions: StartFleetActions StopFleetActions -type FleetUtilization struct { +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances +type GameServerGroup struct { _ struct{} `type:"structure"` - // Number of active game sessions currently being hosted on all instances in - // the fleet. - ActiveGameSessionCount *int64 `type:"integer"` + // A generated unique ID for the EC2 Auto Scaling group that is associated with + // this game server group. + AutoScalingGroupArn *string `type:"string"` + + // Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand + // Instances in the game server group. Method options include the following: + // + // * SPOT_ONLY - Only Spot Instances are used in the game server group. If + // Spot Instances are unavailable or not viable for game hosting, the game + // server group provides no hosting capacity until Spot Instances can again + // be used. Until then, no new instances are started, and the existing nonviable + // Spot Instances are terminated (after current gameplay ends) and are not + // replaced. + // + // * SPOT_PREFERRED - (default value) Spot Instances are used whenever available + // in the game server group. If Spot Instances are unavailable, the game + // server group continues to provide hosting capacity by falling back to + // On-Demand Instances. Existing nonviable Spot Instances are terminated + // (after current gameplay ends) and are replaced with new On-Demand Instances. + // + // * ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server + // group. No Spot Instances are used, even when available, while this balancing + // strategy is in force. + BalancingStrategy *string `type:"string" enum:"BalancingStrategy"` + + // A timestamp that indicates when this data object was created. Format is a + // number expressed in Unix time as milliseconds (for example "1469498468.057"). + CreationTime *time.Time `type:"timestamp"` - // Number of server processes in an ACTIVE status currently running across all - // instances in the fleet - ActiveServerProcessCount *int64 `type:"integer"` + // A generated unique ID for the game server group. + GameServerGroupArn *string `min:"1" type:"string"` - // Number of active player sessions currently being hosted on all instances - // in the fleet. - CurrentPlayerSessionCount *int64 `type:"integer"` + // A developer-defined identifier for the game server group. The name is unique + // for each Region in each AWS account. + GameServerGroupName *string `min:"1" type:"string"` - // A unique identifier for a fleet. - FleetId *string `type:"string"` + // A flag that indicates whether instances in the game server group are protected + // from early termination. Unprotected instances that have active game servers + // running might be terminated during a scale-down event, causing players to + // be dropped from the game. Protected instances cannot be terminated while + // there are active game servers running except in the event of a forced game + // server group deletion (see ). An exception to this is with Spot Instances, + // which can be terminated by AWS regardless of protection status. + GameServerProtectionPolicy *string `type:"string" enum:"GameServerProtectionPolicy"` - // The maximum number of players allowed across all game sessions currently - // being hosted on all instances in the fleet. - MaximumPlayerSessionCount *int64 `type:"integer"` + // The set of EC2 instance types that GameLift FleetIQ can use when balancing + // and automatically scaling instances in the corresponding Auto Scaling group. + InstanceDefinitions []*InstanceDefinition `min:"2" type:"list"` + + // A timestamp that indicates when this game server group was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // for an IAM role that allows Amazon GameLift to access your EC2 Auto Scaling + // groups. + RoleArn *string `min:"1" type:"string"` + + // The current status of the game server group. Possible statuses include: + // + // * NEW - GameLift FleetIQ has validated the CreateGameServerGroup() request. + // + // * ACTIVATING - GameLift FleetIQ is setting up a game server group, which + // includes creating an Auto Scaling group in your AWS account. + // + // * ACTIVE - The game server group has been successfully created. + // + // * DELETE_SCHEDULED - A request to delete the game server group has been + // received. + // + // * DELETING - GameLift FleetIQ has received a valid DeleteGameServerGroup() + // request and is processing it. GameLift FleetIQ must first complete and + // release hosts before it deletes the Auto Scaling group and the game server + // group. + // + // * DELETED - The game server group has been successfully deleted. + // + // * ERROR - The asynchronous processes of activating or deleting a game + // server group has failed, resulting in an error state. + Status *string `type:"string" enum:"GameServerGroupStatus"` + + // Additional information about the current game server group status. This information + // might provide additional insight on groups that are in ERROR status. + StatusReason *string `min:"1" type:"string"` + + // A list of activities that are currently suspended for this game server group. + // If this property is empty, all activities are occurring. + SuspendedActions []*string `min:"1" type:"list"` } // String returns the string representation -func (s FleetUtilization) String() string { +func (s GameServerGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FleetUtilization) GoString() string { +func (s GameServerGroup) GoString() string { return s.String() } -// SetActiveGameSessionCount sets the ActiveGameSessionCount field's value. -func (s *FleetUtilization) SetActiveGameSessionCount(v int64) *FleetUtilization { - s.ActiveGameSessionCount = &v +// SetAutoScalingGroupArn sets the AutoScalingGroupArn field's value. +func (s *GameServerGroup) SetAutoScalingGroupArn(v string) *GameServerGroup { + s.AutoScalingGroupArn = &v return s } -// SetActiveServerProcessCount sets the ActiveServerProcessCount field's value. -func (s *FleetUtilization) SetActiveServerProcessCount(v int64) *FleetUtilization { - s.ActiveServerProcessCount = &v +// SetBalancingStrategy sets the BalancingStrategy field's value. +func (s *GameServerGroup) SetBalancingStrategy(v string) *GameServerGroup { + s.BalancingStrategy = &v return s } -// SetCurrentPlayerSessionCount sets the CurrentPlayerSessionCount field's value. -func (s *FleetUtilization) SetCurrentPlayerSessionCount(v int64) *FleetUtilization { - s.CurrentPlayerSessionCount = &v +// SetCreationTime sets the CreationTime field's value. +func (s *GameServerGroup) SetCreationTime(v time.Time) *GameServerGroup { + s.CreationTime = &v return s } -// SetFleetId sets the FleetId field's value. -func (s *FleetUtilization) SetFleetId(v string) *FleetUtilization { - s.FleetId = &v +// SetGameServerGroupArn sets the GameServerGroupArn field's value. +func (s *GameServerGroup) SetGameServerGroupArn(v string) *GameServerGroup { + s.GameServerGroupArn = &v return s } -// SetMaximumPlayerSessionCount sets the MaximumPlayerSessionCount field's value. -func (s *FleetUtilization) SetMaximumPlayerSessionCount(v int64) *FleetUtilization { - s.MaximumPlayerSessionCount = &v +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *GameServerGroup) SetGameServerGroupName(v string) *GameServerGroup { + s.GameServerGroupName = &v return s } -// Set of key-value pairs that contain information about a game session. When -// included in a game session request, these properties communicate details -// to be used when setting up the new game session. For example, a game property -// might specify a game mode, level, or map. Game properties are passed to the -// game server process when initiating a new game session. For more information, -// see the Amazon GameLift Developer Guide (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-sdk-client-api.html#gamelift-sdk-client-api-create). -type GameProperty struct { +// SetGameServerProtectionPolicy sets the GameServerProtectionPolicy field's value. +func (s *GameServerGroup) SetGameServerProtectionPolicy(v string) *GameServerGroup { + s.GameServerProtectionPolicy = &v + return s +} + +// SetInstanceDefinitions sets the InstanceDefinitions field's value. +func (s *GameServerGroup) SetInstanceDefinitions(v []*InstanceDefinition) *GameServerGroup { + s.InstanceDefinitions = v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *GameServerGroup) SetLastUpdatedTime(v time.Time) *GameServerGroup { + s.LastUpdatedTime = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *GameServerGroup) SetRoleArn(v string) *GameServerGroup { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GameServerGroup) SetStatus(v string) *GameServerGroup { + s.Status = &v + return s +} + +// SetStatusReason sets the StatusReason field's value. +func (s *GameServerGroup) SetStatusReason(v string) *GameServerGroup { + s.StatusReason = &v + return s +} + +// SetSuspendedActions sets the SuspendedActions field's value. +func (s *GameServerGroup) SetSuspendedActions(v []*string) *GameServerGroup { + s.SuspendedActions = v + return s +} + +// This data type is used with the Amazon GameLift FleetIQ and game server groups. +// +// Configuration settings for intelligent automatic scaling that uses target +// tracking. These settings are used to add an Auto Scaling policy when creating +// the corresponding Auto Scaling group with CreateGameServerGroup. After the +// Auto Scaling group is created, all updates to Auto Scaling policies, including +// changing this policy and adding or removing other policies, is done directly +// on the Auto Scaling group. +type GameServerGroupAutoScalingPolicy struct { _ struct{} `type:"structure"` - // The game property identifier. - // - // Key is a required field - Key *string `type:"string" required:"true"` + // Length of time, in seconds, it takes for a new instance to start new game + // server processes and register with GameLift FleetIQ. Specifying a warm-up + // time can be useful, particularly with game servers that take a long time + // to start up, because it avoids prematurely starting new instances. + EstimatedInstanceWarmup *int64 `min:"1" type:"integer"` - // The game property value. + // Settings for a target-based scaling policy applied to Auto Scaling group. + // These settings are used to create a target-based policy that tracks the GameLift + // FleetIQ metric "PercentUtilizedGameServers" and specifies a target value + // for the metric. As player usage changes, the policy triggers to adjust the + // game server group capacity so that the metric returns to the target value. // - // Value is a required field - Value *string `type:"string" required:"true"` + // TargetTrackingConfiguration is a required field + TargetTrackingConfiguration *TargetTrackingConfiguration `type:"structure" required:"true"` } // String returns the string representation -func (s GameProperty) String() string { +func (s GameServerGroupAutoScalingPolicy) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GameProperty) GoString() string { +func (s GameServerGroupAutoScalingPolicy) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GameProperty) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GameProperty"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) +func (s *GameServerGroupAutoScalingPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GameServerGroupAutoScalingPolicy"} + if s.EstimatedInstanceWarmup != nil && *s.EstimatedInstanceWarmup < 1 { + invalidParams.Add(request.NewErrParamMinValue("EstimatedInstanceWarmup", 1)) } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) + if s.TargetTrackingConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTrackingConfiguration")) + } + if s.TargetTrackingConfiguration != nil { + if err := s.TargetTrackingConfiguration.Validate(); err != nil { + invalidParams.AddNested("TargetTrackingConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -15228,15 +19498,106 @@ func (s *GameProperty) Validate() error { return nil } -// SetKey sets the Key field's value. -func (s *GameProperty) SetKey(v string) *GameProperty { - s.Key = &v +// SetEstimatedInstanceWarmup sets the EstimatedInstanceWarmup field's value. +func (s *GameServerGroupAutoScalingPolicy) SetEstimatedInstanceWarmup(v int64) *GameServerGroupAutoScalingPolicy { + s.EstimatedInstanceWarmup = &v return s } -// SetValue sets the Value field's value. -func (s *GameProperty) SetValue(v string) *GameProperty { - s.Value = &v +// SetTargetTrackingConfiguration sets the TargetTrackingConfiguration field's value. +func (s *GameServerGroupAutoScalingPolicy) SetTargetTrackingConfiguration(v *TargetTrackingConfiguration) *GameServerGroupAutoScalingPolicy { + s.TargetTrackingConfiguration = v + return s +} + +// This data type is used with the Amazon GameLift FleetIQ and game server groups. +// +// Additional properties, including status, that describe an EC2 instance in +// a game server group. Instance configurations are set with game server group +// properties (see DescribeGameServerGroup and with the EC2 launch template +// that was used when creating the game server group. +// +// Retrieve game server instances for a game server group by calling DescribeGameServerInstances. +// +// * CreateGameServerGroup +// +// * ListGameServerGroups +// +// * DescribeGameServerGroup +// +// * UpdateGameServerGroup +// +// * DeleteGameServerGroup +// +// * ResumeGameServerGroup +// +// * SuspendGameServerGroup +// +// * DescribeGameServerInstances +type GameServerInstance struct { + _ struct{} `type:"structure"` + + // A generated unique identifier for the game server group that includes the + // game server instance. + GameServerGroupArn *string `min:"1" type:"string"` + + // A developer-defined identifier for the game server group that includes the + // game server instance. The name is unique for each Region in each AWS account. + GameServerGroupName *string `min:"1" type:"string"` + + // The unique identifier for the instance where the game server is running. + // This ID is available in the instance metadata. EC2 instance IDs use a 17-character + // format, for example: i-1234567890abcdef0. + InstanceId *string `min:"19" type:"string"` + + // Current status of the game server instance. + // + // * ACTIVE -- The instance is viable for hosting game servers. + // + // * DRAINING -- The instance is not viable for hosting game servers. Existing + // game servers are in the process of ending, and new game servers are not + // started on this instance unless no other resources are available. When + // the instance is put in DRAINING, a new instance is started up to replace + // it. Once the instance has no UTILIZED game servers, it will be terminated + // in favor of the new instance. + // + // * SPOT_TERMINATING -- The instance is in the process of shutting down + // due to a Spot instance interruption. No new game servers are started on + // this instance. + InstanceStatus *string `type:"string" enum:"GameServerInstanceStatus"` +} + +// String returns the string representation +func (s GameServerInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GameServerInstance) GoString() string { + return s.String() +} + +// SetGameServerGroupArn sets the GameServerGroupArn field's value. +func (s *GameServerInstance) SetGameServerGroupArn(v string) *GameServerInstance { + s.GameServerGroupArn = &v + return s +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *GameServerInstance) SetGameServerGroupName(v string) *GameServerInstance { + s.GameServerGroupName = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *GameServerInstance) SetInstanceId(v string) *GameServerInstance { + s.InstanceId = &v + return s +} + +// SetInstanceStatus sets the InstanceStatus field's value. +func (s *GameServerInstance) SetInstanceStatus(v string) *GameServerInstance { + s.InstanceStatus = &v return s } @@ -15292,7 +19653,7 @@ type GameSession struct { // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // associated with the GameLift fleet that this game session is running on. - FleetArn *string `min:"1" type:"string"` + FleetArn *string `type:"string"` // A unique identifier for a fleet that the game session is running on. FleetId *string `type:"string"` @@ -15595,8 +19956,8 @@ func (s *GameSessionDetail) SetProtectionPolicy(v string) *GameSessionDetail { // The game instance is currently full and cannot allow the requested player(s) // to join. Clients can retry such requests immediately or after a waiting period. type GameSessionFullException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -15613,17 +19974,17 @@ func (s GameSessionFullException) GoString() string { func newErrorGameSessionFullException(v protocol.ResponseMetadata) error { return &GameSessionFullException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GameSessionFullException) Code() string { +func (s *GameSessionFullException) Code() string { return "GameSessionFullException" } // Message returns the exception's message. -func (s GameSessionFullException) Message() string { +func (s *GameSessionFullException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15631,22 +19992,22 @@ func (s GameSessionFullException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GameSessionFullException) OrigErr() error { +func (s *GameSessionFullException) OrigErr() error { return nil } -func (s GameSessionFullException) Error() string { +func (s *GameSessionFullException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GameSessionFullException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GameSessionFullException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GameSessionFullException) RequestID() string { - return s.respMetadata.RequestID +func (s *GameSessionFullException) RequestID() string { + return s.RespMetadata.RequestID } // Object that describes a StartGameSessionPlacement request. This object includes @@ -16038,7 +20399,7 @@ func (s *GameSessionQueueDestination) SetDestinationArn(v string) *GameSessionQu return s } -// Represents the input for a request action. +// Represents the input for a request operation. type GetGameSessionLogUrlInput struct { _ struct{} `type:"structure"` @@ -16080,7 +20441,7 @@ func (s *GetGameSessionLogUrlInput) SetGameSessionId(v string) *GetGameSessionLo return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type GetGameSessionLogUrlOutput struct { _ struct{} `type:"structure"` @@ -16107,7 +20468,7 @@ func (s *GetGameSessionLogUrlOutput) SetPreSignedUrl(v string) *GetGameSessionLo return s } -// Represents the input for a request action. +// Represents the input for a request operation. type GetInstanceAccessInput struct { _ struct{} `type:"structure"` @@ -16164,7 +20525,7 @@ func (s *GetInstanceAccessInput) SetInstanceId(v string) *GetInstanceAccessInput return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type GetInstanceAccessOutput struct { _ struct{} `type:"structure"` @@ -16192,8 +20553,8 @@ func (s *GetInstanceAccessOutput) SetInstanceAccess(v *InstanceAccess) *GetInsta // A game session with this custom ID string already exists in this fleet. Resolve // this conflict before retrying this request. type IdempotentParameterMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -16210,17 +20571,17 @@ func (s IdempotentParameterMismatchException) GoString() string { func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { return &IdempotentParameterMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotentParameterMismatchException) Code() string { +func (s *IdempotentParameterMismatchException) Code() string { return "IdempotentParameterMismatchException" } // Message returns the exception's message. -func (s IdempotentParameterMismatchException) Message() string { +func (s *IdempotentParameterMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16228,22 +20589,22 @@ func (s IdempotentParameterMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotentParameterMismatchException) OrigErr() error { +func (s *IdempotentParameterMismatchException) OrigErr() error { return nil } -func (s IdempotentParameterMismatchException) Error() string { +func (s *IdempotentParameterMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotentParameterMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotentParameterMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // Properties that describe an instance of a virtual computing resource that @@ -16448,9 +20809,70 @@ func (s *InstanceCredentials) SetSecret(v string) *InstanceCredentials { return s } -// SetUserName sets the UserName field's value. -func (s *InstanceCredentials) SetUserName(v string) *InstanceCredentials { - s.UserName = &v +// SetUserName sets the UserName field's value. +func (s *InstanceCredentials) SetUserName(v string) *InstanceCredentials { + s.UserName = &v + return s +} + +// This data type is used with the Amazon GameLift FleetIQ and game server groups. +// +// An allowed instance type for a GameServerGroup. All game server groups must +// have at least two instance types defined for it. GameLift FleetIQ periodically +// evaluates each defined instance type for viability. It then updates the Auto +// Scaling group with the list of viable instance types. +type InstanceDefinition struct { + _ struct{} `type:"structure"` + + // An EC2 instance type designation. + // + // InstanceType is a required field + InstanceType *string `type:"string" required:"true" enum:"GameServerGroupInstanceType"` + + // Instance weighting that indicates how much this instance type contributes + // to the total capacity of a game server group. Instance weights are used by + // GameLift FleetIQ to calculate the instance type's cost per unit hour and + // better identify the most cost-effective options. For detailed information + // on weighting instance capacity, see Instance Weighting (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-weighting.html) + // in the Amazon EC2 Auto Scaling User Guide. Default value is "1". + WeightedCapacity *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s InstanceDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceDefinition"} + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.WeightedCapacity != nil && len(*s.WeightedCapacity) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedCapacity", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceType sets the InstanceType field's value. +func (s *InstanceDefinition) SetInstanceType(v string) *InstanceDefinition { + s.InstanceType = &v + return s +} + +// SetWeightedCapacity sets the WeightedCapacity field's value. +func (s *InstanceDefinition) SetWeightedCapacity(v string) *InstanceDefinition { + s.WeightedCapacity = &v return s } @@ -16458,8 +20880,8 @@ func (s *InstanceCredentials) SetUserName(v string) *InstanceCredentials { // the request. Clients can retry such requests immediately or after a waiting // period. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -16476,17 +20898,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16494,30 +20916,30 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation would cause a conflict with the current state of // a resource associated with the request and/or the fleet. Resolve the conflict // before retrying. type InvalidFleetStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -16534,17 +20956,17 @@ func (s InvalidFleetStatusException) GoString() string { func newErrorInvalidFleetStatusException(v protocol.ResponseMetadata) error { return &InvalidFleetStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFleetStatusException) Code() string { +func (s *InvalidFleetStatusException) Code() string { return "InvalidFleetStatusException" } // Message returns the exception's message. -func (s InvalidFleetStatusException) Message() string { +func (s *InvalidFleetStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16552,30 +20974,30 @@ func (s InvalidFleetStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFleetStatusException) OrigErr() error { +func (s *InvalidFleetStatusException) OrigErr() error { return nil } -func (s InvalidFleetStatusException) Error() string { +func (s *InvalidFleetStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFleetStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFleetStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFleetStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFleetStatusException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation would cause a conflict with the current state of // a resource associated with the request and/or the game instance. Resolve // the conflict before retrying. type InvalidGameSessionStatusException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -16592,17 +21014,17 @@ func (s InvalidGameSessionStatusException) GoString() string { func newErrorInvalidGameSessionStatusException(v protocol.ResponseMetadata) error { return &InvalidGameSessionStatusException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGameSessionStatusException) Code() string { +func (s *InvalidGameSessionStatusException) Code() string { return "InvalidGameSessionStatusException" } // Message returns the exception's message. -func (s InvalidGameSessionStatusException) Message() string { +func (s *InvalidGameSessionStatusException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16610,29 +21032,29 @@ func (s InvalidGameSessionStatusException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGameSessionStatusException) OrigErr() error { +func (s *InvalidGameSessionStatusException) OrigErr() error { return nil } -func (s InvalidGameSessionStatusException) Error() string { +func (s *InvalidGameSessionStatusException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGameSessionStatusException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGameSessionStatusException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGameSessionStatusException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGameSessionStatusException) RequestID() string { + return s.RespMetadata.RequestID } // One or more parameter values in the request are invalid. Correct the invalid // parameter values before retrying. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -16649,17 +21071,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16667,22 +21089,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // A range of IP addresses and port settings that allow inbound traffic to connect @@ -16781,11 +21203,79 @@ func (s *IpPermission) SetToPort(v int64) *IpPermission { return s } +// This data type is used with the Amazon GameLift FleetIQ and game server groups. +// +// An EC2 launch template that contains configuration settings and game server +// code to be deployed to all instances in a game server group. The launch template +// is specified when creating a new game server group with CreateGameServerGroup. +type LaunchTemplateSpecification struct { + _ struct{} `type:"structure"` + + // A unique identifier for an existing EC2 launch template. + LaunchTemplateId *string `min:"1" type:"string"` + + // A readable identifier for an existing EC2 launch template. + LaunchTemplateName *string `min:"3" type:"string"` + + // The version of the EC2 launch template to use. If no version is specified, + // the default version will be used. With Amazon EC2, you can specify a default + // version for a launch template. If none is set, the default is the first version + // created. + Version *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LaunchTemplateSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchTemplateSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LaunchTemplateSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LaunchTemplateSpecification"} + if s.LaunchTemplateId != nil && len(*s.LaunchTemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateId", 1)) + } + if s.LaunchTemplateName != nil && len(*s.LaunchTemplateName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("LaunchTemplateName", 3)) + } + if s.Version != nil && len(*s.Version) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Version", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLaunchTemplateId sets the LaunchTemplateId field's value. +func (s *LaunchTemplateSpecification) SetLaunchTemplateId(v string) *LaunchTemplateSpecification { + s.LaunchTemplateId = &v + return s +} + +// SetLaunchTemplateName sets the LaunchTemplateName field's value. +func (s *LaunchTemplateSpecification) SetLaunchTemplateName(v string) *LaunchTemplateSpecification { + s.LaunchTemplateName = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *LaunchTemplateSpecification) SetVersion(v string) *LaunchTemplateSpecification { + s.Version = &v + return s +} + // The requested operation would cause the resource to exceed the allowed service // limit. Resolve the issue before retrying. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -16802,17 +21292,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16820,25 +21310,25 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } -// Represents the input for a request action. +// Represents the input for a request operation. type ListAliasesInput struct { _ struct{} `type:"structure"` @@ -16851,8 +21341,8 @@ type ListAliasesInput struct { Name *string `min:"1" type:"string"` // A token that indicates the start of the next sequential page of results. - // Use the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. + // Use the token that is returned with a previous call to this operation. To + // start at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` // The routing type to filter results on. Use this parameter to retrieve only @@ -16923,7 +21413,7 @@ func (s *ListAliasesInput) SetRoutingStrategyType(v string) *ListAliasesInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type ListAliasesOutput struct { _ struct{} `type:"structure"` @@ -16931,77 +21421,281 @@ type ListAliasesOutput struct { Aliases []*Alias `type:"list"` // A token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAliasesOutput) GoString() string { + return s.String() +} + +// SetAliases sets the Aliases field's value. +func (s *ListAliasesOutput) SetAliases(v []*Alias) *ListAliasesOutput { + s.Aliases = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAliasesOutput) SetNextToken(v string) *ListAliasesOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request operation. +type ListBuildsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return. Use this parameter with NextToken + // to get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this operation. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // Build status to filter results by. To retrieve all builds, leave this parameter + // empty. + // + // Possible build statuses include the following: + // + // * INITIALIZED -- A new build has been defined, but no files have been + // uploaded. You cannot create fleets for builds that are in this status. + // When a build is successfully created, the build status is set to this + // value. + // + // * READY -- The game build has been successfully uploaded. You can now + // create new fleets for this build. + // + // * FAILED -- The game build upload failed. You cannot create new fleets + // for this build. + Status *string `type:"string" enum:"BuildStatus"` +} + +// String returns the string representation +func (s ListBuildsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBuildsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBuildsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBuildsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *ListBuildsInput) SetLimit(v int64) *ListBuildsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsInput) SetNextToken(v string) *ListBuildsInput { + s.NextToken = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListBuildsInput) SetStatus(v string) *ListBuildsInput { + s.Status = &v + return s +} + +// Represents the returned data in response to a request operation. +type ListBuildsOutput struct { + _ struct{} `type:"structure"` + + // A collection of build resources that match the request. + Builds []*Build `type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this operation. If no token is returned, these results represent the end + // of the list. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListBuildsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBuildsOutput) GoString() string { + return s.String() +} + +// SetBuilds sets the Builds field's value. +func (s *ListBuildsOutput) SetBuilds(v []*Build) *ListBuildsOutput { + s.Builds = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListBuildsOutput) SetNextToken(v string) *ListBuildsOutput { + s.NextToken = &v + return s +} + +// Represents the input for a request operation. +type ListFleetsInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for a build to return fleets for. Use this parameter + // to return only fleets using a specified build. Use either the build ID or + // ARN value. To retrieve all fleets, do not include either a BuildId and ScriptID + // parameter. + BuildId *string `type:"string"` + + // The maximum number of results to return. Use this parameter with NextToken + // to get results as a set of sequential pages. + Limit *int64 `min:"1" type:"integer"` + + // Token that indicates the start of the next sequential page of results. Use + // the token that is returned with a previous call to this operation. To start + // at the beginning of the result set, do not specify a value. + NextToken *string `min:"1" type:"string"` + + // A unique identifier for a Realtime script to return fleets for. Use this + // parameter to return only fleets using a specified script. Use either the + // script ID or ARN value. To retrieve all fleets, leave this parameter empty. + ScriptId *string `type:"string"` +} + +// String returns the string representation +func (s ListFleetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFleetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFleetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFleetsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBuildId sets the BuildId field's value. +func (s *ListFleetsInput) SetBuildId(v string) *ListFleetsInput { + s.BuildId = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListFleetsInput) SetLimit(v int64) *ListFleetsInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFleetsInput) SetNextToken(v string) *ListFleetsInput { + s.NextToken = &v + return s +} + +// SetScriptId sets the ScriptId field's value. +func (s *ListFleetsInput) SetScriptId(v string) *ListFleetsInput { + s.ScriptId = &v + return s +} + +// Represents the returned data in response to a request operation. +type ListFleetsOutput struct { + _ struct{} `type:"structure"` + + // Set of fleet IDs matching the list request. You can retrieve additional information + // about all returned fleets by passing this result set to a call to DescribeFleetAttributes, + // DescribeFleetCapacity, or DescribeFleetUtilization. + FleetIds []*string `min:"1" type:"list"` + + // Token that indicates where to resume retrieving results on the next call + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListAliasesOutput) String() string { +func (s ListFleetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListAliasesOutput) GoString() string { +func (s ListFleetsOutput) GoString() string { return s.String() } -// SetAliases sets the Aliases field's value. -func (s *ListAliasesOutput) SetAliases(v []*Alias) *ListAliasesOutput { - s.Aliases = v +// SetFleetIds sets the FleetIds field's value. +func (s *ListFleetsOutput) SetFleetIds(v []*string) *ListFleetsOutput { + s.FleetIds = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListAliasesOutput) SetNextToken(v string) *ListAliasesOutput { +func (s *ListFleetsOutput) SetNextToken(v string) *ListFleetsOutput { s.NextToken = &v return s } -// Represents the input for a request action. -type ListBuildsInput struct { +type ListGameServerGroupsInput struct { _ struct{} `type:"structure"` // The maximum number of results to return. Use this parameter with NextToken - // to get results as a set of sequential pages. + // to get results as a set of sequential segments. Limit *int64 `min:"1" type:"integer"` - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // A token that indicates the start of the next sequential segment of results. + // Use the token returned with the previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` - - // Build status to filter results by. To retrieve all builds, leave this parameter - // empty. - // - // Possible build statuses include the following: - // - // * INITIALIZED -- A new build has been defined, but no files have been - // uploaded. You cannot create fleets for builds that are in this status. - // When a build is successfully created, the build status is set to this - // value. - // - // * READY -- The game build has been successfully uploaded. You can now - // create new fleets for this build. - // - // * FAILED -- The game build upload failed. You cannot create new fleets - // for this build. - Status *string `type:"string" enum:"BuildStatus"` } // String returns the string representation -func (s ListBuildsInput) String() string { +func (s ListGameServerGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBuildsInput) GoString() string { +func (s ListGameServerGroupsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListBuildsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListBuildsInput"} +func (s *ListGameServerGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGameServerGroupsInput"} if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } @@ -17016,95 +21710,95 @@ func (s *ListBuildsInput) Validate() error { } // SetLimit sets the Limit field's value. -func (s *ListBuildsInput) SetLimit(v int64) *ListBuildsInput { +func (s *ListGameServerGroupsInput) SetLimit(v int64) *ListGameServerGroupsInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListBuildsInput) SetNextToken(v string) *ListBuildsInput { +func (s *ListGameServerGroupsInput) SetNextToken(v string) *ListGameServerGroupsInput { s.NextToken = &v return s } -// SetStatus sets the Status field's value. -func (s *ListBuildsInput) SetStatus(v string) *ListBuildsInput { - s.Status = &v - return s -} - -// Represents the returned data in response to a request action. -type ListBuildsOutput struct { +type ListGameServerGroupsOutput struct { _ struct{} `type:"structure"` - // A collection of build records that match the request. - Builds []*Build `type:"list"` + // A collection of game server group objects that match the request. + GameServerGroups []*GameServerGroup `type:"list"` - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // A token that indicates where to resume retrieving results on the next call + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListBuildsOutput) String() string { +func (s ListGameServerGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListBuildsOutput) GoString() string { +func (s ListGameServerGroupsOutput) GoString() string { return s.String() } -// SetBuilds sets the Builds field's value. -func (s *ListBuildsOutput) SetBuilds(v []*Build) *ListBuildsOutput { - s.Builds = v +// SetGameServerGroups sets the GameServerGroups field's value. +func (s *ListGameServerGroupsOutput) SetGameServerGroups(v []*GameServerGroup) *ListGameServerGroupsOutput { + s.GameServerGroups = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListBuildsOutput) SetNextToken(v string) *ListBuildsOutput { +func (s *ListGameServerGroupsOutput) SetNextToken(v string) *ListGameServerGroupsOutput { s.NextToken = &v return s } -// Represents the input for a request action. -type ListFleetsInput struct { +type ListGameServersInput struct { _ struct{} `type:"structure"` - // A unique identifier for a build to return fleets for. Use this parameter - // to return only fleets using the specified build. Use either the build ID - // or ARN value.To retrieve all fleets, leave this parameter empty. - BuildId *string `type:"string"` + // An identifier for the game server group to retrieve a list of game servers + // from. Use either the GameServerGroup name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` // The maximum number of results to return. Use this parameter with NextToken - // to get results as a set of sequential pages. + // to get results as a set of sequential segments. Limit *int64 `min:"1" type:"integer"` - // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // A token that indicates the start of the next sequential segment of results. + // Use the token returned with the previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` - // A unique identifier for a Realtime script to return fleets for. Use this - // parameter to return only fleets using the specified script. Use either the - // script ID or ARN value.To retrieve all fleets, leave this parameter empty. - ScriptId *string `type:"string"` + // Indicates how to sort the returned data based on game server registration + // timestamp. Use ASCENDING to retrieve oldest game servers first, or use DESCENDING + // to retrieve newest game servers first. If this parameter is left empty, game + // servers are returned in no particular order. + SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation -func (s ListFleetsInput) String() string { +func (s ListGameServersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFleetsInput) GoString() string { +func (s ListGameServersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListFleetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListFleetsInput"} +func (s *ListGameServersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGameServersInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } if s.Limit != nil && *s.Limit < 1 { invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) } @@ -17118,63 +21812,60 @@ func (s *ListFleetsInput) Validate() error { return nil } -// SetBuildId sets the BuildId field's value. -func (s *ListFleetsInput) SetBuildId(v string) *ListFleetsInput { - s.BuildId = &v +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *ListGameServersInput) SetGameServerGroupName(v string) *ListGameServersInput { + s.GameServerGroupName = &v return s } // SetLimit sets the Limit field's value. -func (s *ListFleetsInput) SetLimit(v int64) *ListFleetsInput { +func (s *ListGameServersInput) SetLimit(v int64) *ListGameServersInput { s.Limit = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListFleetsInput) SetNextToken(v string) *ListFleetsInput { +func (s *ListGameServersInput) SetNextToken(v string) *ListGameServersInput { s.NextToken = &v return s } -// SetScriptId sets the ScriptId field's value. -func (s *ListFleetsInput) SetScriptId(v string) *ListFleetsInput { - s.ScriptId = &v +// SetSortOrder sets the SortOrder field's value. +func (s *ListGameServersInput) SetSortOrder(v string) *ListGameServersInput { + s.SortOrder = &v return s } -// Represents the returned data in response to a request action. -type ListFleetsOutput struct { +type ListGameServersOutput struct { _ struct{} `type:"structure"` - // Set of fleet IDs matching the list request. You can retrieve additional information - // about all returned fleets by passing this result set to a call to DescribeFleetAttributes, - // DescribeFleetCapacity, or DescribeFleetUtilization. - FleetIds []*string `min:"1" type:"list"` + // A collection of game server objects that match the request. + GameServers []*GameServer `type:"list"` - // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // A token that indicates where to resume retrieving results on the next call + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s ListFleetsOutput) String() string { +func (s ListGameServersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFleetsOutput) GoString() string { +func (s ListGameServersOutput) GoString() string { return s.String() } -// SetFleetIds sets the FleetIds field's value. -func (s *ListFleetsOutput) SetFleetIds(v []*string) *ListFleetsOutput { - s.FleetIds = v +// SetGameServers sets the GameServers field's value. +func (s *ListGameServersOutput) SetGameServers(v []*GameServer) *ListGameServersOutput { + s.GameServers = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListFleetsOutput) SetNextToken(v string) *ListFleetsOutput { +func (s *ListGameServersOutput) SetNextToken(v string) *ListGameServersOutput { s.NextToken = &v return s } @@ -17187,8 +21878,8 @@ type ListScriptsInput struct { Limit *int64 `min:"1" type:"integer"` // A token that indicates the start of the next sequential page of results. - // Use the token that is returned with a previous call to this action. To start - // at the beginning of the result set, do not specify a value. + // Use the token that is returned with a previous call to this operation. To + // start at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` } @@ -17234,7 +21925,7 @@ type ListScriptsOutput struct { _ struct{} `type:"structure"` // A token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` @@ -17271,7 +21962,7 @@ type ListTagsForResourceInput struct { // that is assigned to and uniquely identifies the GameLift resource that you // want to retrieve tags for. GameLift resource ARNs are included in the data // object for the resource, which can be retrieved by calling a List or Describe - // action for the resource type. + // operation for the resource type. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -17817,8 +22508,8 @@ func (s *MatchmakingTicket) SetTicketId(v string) *MatchmakingTicket { // A service resource associated with the request could not be found. Clients // should not retry such requests. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -17835,17 +22526,75 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *NotFoundException) OrigErr() error { + return nil +} + +func (s *NotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified game server group has no available game servers to fulfill +// a ClaimGameServer request. Clients can retry such requests immediately or +// after a waiting period. +type OutOfCapacityException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s OutOfCapacityException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutOfCapacityException) GoString() string { + return s.String() +} + +func newErrorOutOfCapacityException(v protocol.ResponseMetadata) error { + return &OutOfCapacityException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *OutOfCapacityException) Code() string { + return "OutOfCapacityException" +} + +// Message returns the exception's message. +func (s *OutOfCapacityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17853,22 +22602,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *OutOfCapacityException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *OutOfCapacityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OutOfCapacityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *OutOfCapacityException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a player session that was created as part of a StartGameSessionPlacement @@ -18161,7 +22910,7 @@ type PlayerSession struct { // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // associated with the GameLift fleet that the player's game session is running // on. - FleetArn *string `min:"1" type:"string"` + FleetArn *string `type:"string"` // A unique identifier for a fleet that the player's game session is running // on. @@ -18293,7 +23042,7 @@ func (s *PlayerSession) SetTerminationTime(v time.Time) *PlayerSession { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type PutScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -18489,7 +23238,7 @@ func (s *PutScalingPolicyInput) SetThreshold(v float64) *PutScalingPolicyInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type PutScalingPolicyOutput struct { _ struct{} `type:"structure"` @@ -18514,7 +23263,137 @@ func (s *PutScalingPolicyOutput) SetName(v string) *PutScalingPolicyOutput { return s } -// Represents the input for a request action. +type RegisterGameServerInput struct { + _ struct{} `type:"structure"` + + // Information that is needed to make inbound client connections to the game + // server. This might include the IP address and port, DNS name, and other information. + ConnectionInfo *string `min:"1" type:"string"` + + // A set of custom game server properties, formatted as a single string value. + // This data is passed to a game client or service when it requests information + // on game servers using ListGameServers or ClaimGameServer. + GameServerData *string `min:"1" type:"string"` + + // A unique identifier for the game server group where the game server is running. + // Use either the GameServerGroup name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // A custom string that uniquely identifies the game server to register. Game + // server IDs are developer-defined and must be unique across all game server + // groups in your AWS account. + // + // GameServerId is a required field + GameServerId *string `min:"3" type:"string" required:"true"` + + // The unique identifier for the instance where the game server is running. + // This ID is available in the instance metadata. EC2 instance IDs use a 17-character + // format, for example: i-1234567890abcdef0. + // + // InstanceId is a required field + InstanceId *string `min:"19" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterGameServerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterGameServerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterGameServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterGameServerInput"} + if s.ConnectionInfo != nil && len(*s.ConnectionInfo) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConnectionInfo", 1)) + } + if s.GameServerData != nil && len(*s.GameServerData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerData", 1)) + } + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.GameServerId == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerId")) + } + if s.GameServerId != nil && len(*s.GameServerId) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GameServerId", 3)) + } + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + if s.InstanceId != nil && len(*s.InstanceId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 19)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionInfo sets the ConnectionInfo field's value. +func (s *RegisterGameServerInput) SetConnectionInfo(v string) *RegisterGameServerInput { + s.ConnectionInfo = &v + return s +} + +// SetGameServerData sets the GameServerData field's value. +func (s *RegisterGameServerInput) SetGameServerData(v string) *RegisterGameServerInput { + s.GameServerData = &v + return s +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *RegisterGameServerInput) SetGameServerGroupName(v string) *RegisterGameServerInput { + s.GameServerGroupName = &v + return s +} + +// SetGameServerId sets the GameServerId field's value. +func (s *RegisterGameServerInput) SetGameServerId(v string) *RegisterGameServerInput { + s.GameServerId = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *RegisterGameServerInput) SetInstanceId(v string) *RegisterGameServerInput { + s.InstanceId = &v + return s +} + +type RegisterGameServerOutput struct { + _ struct{} `type:"structure"` + + // Object that describes the newly registered game server. + GameServer *GameServer `type:"structure"` +} + +// String returns the string representation +func (s RegisterGameServerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterGameServerOutput) GoString() string { + return s.String() +} + +// SetGameServer sets the GameServer field's value. +func (s *RegisterGameServerOutput) SetGameServer(v *GameServer) *RegisterGameServerOutput { + s.GameServer = v + return s +} + +// Represents the input for a request operation. type RequestUploadCredentialsInput struct { _ struct{} `type:"structure"` @@ -18554,7 +23433,7 @@ func (s *RequestUploadCredentialsInput) SetBuildId(v string) *RequestUploadCrede return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type RequestUploadCredentialsOutput struct { _ struct{} `type:"structure"` @@ -18589,7 +23468,7 @@ func (s *RequestUploadCredentialsOutput) SetUploadCredentials(v *AwsCredentials) return s } -// Represents the input for a request action. +// Represents the input for a request operation. type ResolveAliasInput struct { _ struct{} `type:"structure"` @@ -18629,13 +23508,13 @@ func (s *ResolveAliasInput) SetAliasId(v string) *ResolveAliasInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type ResolveAliasOutput struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // associated with the GameLift fleet resource that this alias points to. - FleetArn *string `min:"1" type:"string"` + FleetArn *string `type:"string"` // The fleet identifier that the alias is pointing to. FleetId *string `type:"string"` @@ -18691,19 +23570,102 @@ func (s ResourceCreationLimitPolicy) String() string { } // GoString returns the string representation -func (s ResourceCreationLimitPolicy) GoString() string { +func (s ResourceCreationLimitPolicy) GoString() string { + return s.String() +} + +// SetNewGameSessionsPerCreator sets the NewGameSessionsPerCreator field's value. +func (s *ResourceCreationLimitPolicy) SetNewGameSessionsPerCreator(v int64) *ResourceCreationLimitPolicy { + s.NewGameSessionsPerCreator = &v + return s +} + +// SetPolicyPeriodInMinutes sets the PolicyPeriodInMinutes field's value. +func (s *ResourceCreationLimitPolicy) SetPolicyPeriodInMinutes(v int64) *ResourceCreationLimitPolicy { + s.PolicyPeriodInMinutes = &v + return s +} + +type ResumeGameServerGroupInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the game server group. Use either the GameServerGroup + // name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // The activity to resume for this game server group. + // + // ResumeActions is a required field + ResumeActions []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ResumeGameServerGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeGameServerGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResumeGameServerGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResumeGameServerGroupInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.ResumeActions == nil { + invalidParams.Add(request.NewErrParamRequired("ResumeActions")) + } + if s.ResumeActions != nil && len(s.ResumeActions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResumeActions", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *ResumeGameServerGroupInput) SetGameServerGroupName(v string) *ResumeGameServerGroupInput { + s.GameServerGroupName = &v + return s +} + +// SetResumeActions sets the ResumeActions field's value. +func (s *ResumeGameServerGroupInput) SetResumeActions(v []*string) *ResumeGameServerGroupInput { + s.ResumeActions = v + return s +} + +type ResumeGameServerGroupOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the game server group resource, with the SuspendedActions + // property updated to reflect the resumed activity. + GameServerGroup *GameServerGroup `type:"structure"` +} + +// String returns the string representation +func (s ResumeGameServerGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeGameServerGroupOutput) GoString() string { return s.String() } -// SetNewGameSessionsPerCreator sets the NewGameSessionsPerCreator field's value. -func (s *ResourceCreationLimitPolicy) SetNewGameSessionsPerCreator(v int64) *ResourceCreationLimitPolicy { - s.NewGameSessionsPerCreator = &v - return s -} - -// SetPolicyPeriodInMinutes sets the PolicyPeriodInMinutes field's value. -func (s *ResourceCreationLimitPolicy) SetPolicyPeriodInMinutes(v int64) *ResourceCreationLimitPolicy { - s.PolicyPeriodInMinutes = &v +// SetGameServerGroup sets the GameServerGroup field's value. +func (s *ResumeGameServerGroupOutput) SetGameServerGroup(v *GameServerGroup) *ResumeGameServerGroupOutput { + s.GameServerGroup = v return s } @@ -18796,7 +23758,7 @@ func (s *RoutingStrategy) SetType(v string) *RoutingStrategy { // // * UpdateFleetAttributes // -// * Manage fleet actions: StartFleetActions StopFleetActions +// * StartFleetActions or StopFleetActions type RuntimeConfiguration struct { _ struct{} `type:"structure"` @@ -18872,13 +23834,16 @@ func (s *RuntimeConfiguration) SetServerProcesses(v []*ServerProcess) *RuntimeCo return s } -// The location in Amazon S3 where build or script files are stored for access -// by Amazon GameLift. This location is specified in CreateBuild, CreateScript, -// and UpdateScript requests. +// The location in S3 where build or script files are stored for access by Amazon +// GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript +// requests. type S3Location struct { _ struct{} `type:"structure"` - // An Amazon S3 bucket identifier. This is the name of the S3 bucket. + // An S3 bucket identifier. This is the name of the S3 bucket. + // + // GameLift currently does not support uploading from S3 buckets with names + // that contain a dot (.). Bucket *string `min:"1" type:"string"` // The name of the zip file that contains the build files or script files. @@ -19185,9 +24150,9 @@ type Script struct { // are uploaded from an S3 location, this value remains at "0". SizeOnDisk *int64 `min:"1" type:"long"` - // The location in Amazon S3 where build or script files are stored for access - // by Amazon GameLift. This location is specified in CreateBuild, CreateScript, - // and UpdateScript requests. + // The location in S3 where build or script files are stored for access by Amazon + // GameLift. This location is specified in CreateBuild, CreateScript, and UpdateScript + // requests. StorageLocation *S3Location `type:"structure"` // The version that is associated with a build or script. Version strings do @@ -19247,7 +24212,7 @@ func (s *Script) SetVersion(v string) *Script { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type SearchGameSessionsInput struct { _ struct{} `type:"structure"` @@ -19308,7 +24273,7 @@ type SearchGameSessionsInput struct { Limit *int64 `min:"1" type:"integer"` // Token that indicates the start of the next sequential page of results. Use - // the token that is returned with a previous call to this action. To start + // the token that is returned with a previous call to this operation. To start // at the beginning of the result set, do not specify a value. NextToken *string `min:"1" type:"string"` @@ -19396,7 +24361,7 @@ func (s *SearchGameSessionsInput) SetSortExpression(v string) *SearchGameSession return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type SearchGameSessionsOutput struct { _ struct{} `type:"structure"` @@ -19405,7 +24370,7 @@ type SearchGameSessionsOutput struct { GameSessions []*GameSession `type:"list"` // Token that indicates where to resume retrieving results on the next call - // to this action. If no token is returned, these results represent the end + // to this operation. If no token is returned, these results represent the end // of the list. NextToken *string `min:"1" type:"string"` } @@ -19587,7 +24552,7 @@ func (s StartFleetActionsOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type StartGameSessionPlacementInput struct { _ struct{} `type:"structure"` @@ -19609,7 +24574,7 @@ type StartGameSessionPlacementInput struct { GameSessionName *string `min:"1" type:"string"` // Name of the queue to use to place the new game session. You can use either - // the qieue name or ARN value. + // the queue name or ARN value. // // GameSessionQueueName is a required field GameSessionQueueName *string `min:"1" type:"string" required:"true"` @@ -19753,7 +24718,7 @@ func (s *StartGameSessionPlacementInput) SetPlayerLatencies(v []*PlayerLatency) return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type StartGameSessionPlacementOutput struct { _ struct{} `type:"structure"` @@ -19779,7 +24744,7 @@ func (s *StartGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSession return s } -// Represents the input for a request action. +// Represents the input for a request operation. type StartMatchBackfillInput struct { _ struct{} `type:"structure"` @@ -19888,7 +24853,7 @@ func (s *StartMatchBackfillInput) SetTicketId(v string) *StartMatchBackfillInput return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type StartMatchBackfillOutput struct { _ struct{} `type:"structure"` @@ -19914,7 +24879,7 @@ func (s *StartMatchBackfillOutput) SetMatchmakingTicket(v *MatchmakingTicket) *S return s } -// Represents the input for a request action. +// Represents the input for a request operation. type StartMatchmakingInput struct { _ struct{} `type:"structure"` @@ -19996,7 +24961,7 @@ func (s *StartMatchmakingInput) SetTicketId(v string) *StartMatchmakingInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type StartMatchmakingOutput struct { _ struct{} `type:"structure"` @@ -20092,7 +25057,7 @@ func (s StopFleetActionsOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type StopGameSessionPlacementInput struct { _ struct{} `type:"structure"` @@ -20134,7 +25099,7 @@ func (s *StopGameSessionPlacementInput) SetPlacementId(v string) *StopGameSessio return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type StopGameSessionPlacementOutput struct { _ struct{} `type:"structure"` @@ -20159,7 +25124,7 @@ func (s *StopGameSessionPlacementOutput) SetGameSessionPlacement(v *GameSessionP return s } -// Represents the input for a request action. +// Represents the input for a request operation. type StopMatchmakingInput struct { _ struct{} `type:"structure"` @@ -20212,6 +25177,89 @@ func (s StopMatchmakingOutput) GoString() string { return s.String() } +type SuspendGameServerGroupInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the game server group. Use either the GameServerGroup + // name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // The activity to suspend for this game server group. + // + // SuspendActions is a required field + SuspendActions []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s SuspendGameServerGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendGameServerGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SuspendGameServerGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SuspendGameServerGroupInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.SuspendActions == nil { + invalidParams.Add(request.NewErrParamRequired("SuspendActions")) + } + if s.SuspendActions != nil && len(s.SuspendActions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SuspendActions", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *SuspendGameServerGroupInput) SetGameServerGroupName(v string) *SuspendGameServerGroupInput { + s.GameServerGroupName = &v + return s +} + +// SetSuspendActions sets the SuspendActions field's value. +func (s *SuspendGameServerGroupInput) SetSuspendActions(v []*string) *SuspendGameServerGroupInput { + s.SuspendActions = v + return s +} + +type SuspendGameServerGroupOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the game server group resource, with the SuspendedActions + // property updated to reflect the suspended activity. + GameServerGroup *GameServerGroup `type:"structure"` +} + +// String returns the string representation +func (s SuspendGameServerGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SuspendGameServerGroupOutput) GoString() string { + return s.String() +} + +// SetGameServerGroup sets the GameServerGroup field's value. +func (s *SuspendGameServerGroupOutput) SetGameServerGroup(v *GameServerGroup) *SuspendGameServerGroupOutput { + s.GameServerGroup = v + return s +} + // A label that can be assigned to a GameLift resource. // // Learn more @@ -20289,7 +25337,7 @@ type TagResourceInput struct { // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) // that is assigned to and uniquely identifies the GameLift resource that you // want to assign tags to. GameLift resource ARNs are included in the data object - // for the resource, which can be retrieved by calling a List or Describe action + // for the resource, which can be retrieved by calling a List or Describe operation // for the resource type. // // ResourceARN is a required field @@ -20373,8 +25421,8 @@ func (s TagResourceOutput) GoString() string { // tag format or the maximum tag limit may have been exceeded. Resolve the issue // before retrying. type TaggingFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -20391,17 +25439,17 @@ func (s TaggingFailedException) GoString() string { func newErrorTaggingFailedException(v protocol.ResponseMetadata) error { return &TaggingFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TaggingFailedException) Code() string { +func (s *TaggingFailedException) Code() string { return "TaggingFailedException" } // Message returns the exception's message. -func (s TaggingFailedException) Message() string { +func (s *TaggingFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20409,22 +25457,22 @@ func (s TaggingFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TaggingFailedException) OrigErr() error { +func (s *TaggingFailedException) OrigErr() error { return nil } -func (s TaggingFailedException) Error() string { +func (s *TaggingFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TaggingFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TaggingFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TaggingFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *TaggingFailedException) RequestID() string { + return s.RespMetadata.RequestID } // Settings for a target-based scaling policy (see ScalingPolicy. A target-based @@ -20486,14 +25534,59 @@ func (s *TargetConfiguration) SetTargetValue(v float64) *TargetConfiguration { return s } +// This data type is used with the Amazon GameLift FleetIQ and game server groups. +// +// Settings for a target-based scaling policy as part of a GameServerGroupAutoScalingPolicy. +// These settings are used to create a target-based policy that tracks the GameLift +// FleetIQ metric "PercentUtilizedGameServers" and specifies a target value +// for the metric. As player usage changes, the policy triggers to adjust the +// game server group capacity so that the metric returns to the target value. +type TargetTrackingConfiguration struct { + _ struct{} `type:"structure"` + + // Desired value to use with a game server group target-based scaling policy. + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s TargetTrackingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetTrackingConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetTrackingConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetTrackingConfiguration"} + if s.TargetValue == nil { + invalidParams.Add(request.NewErrParamRequired("TargetValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetValue sets the TargetValue field's value. +func (s *TargetTrackingConfiguration) SetTargetValue(v float64) *TargetTrackingConfiguration { + s.TargetValue = &v + return s +} + // The service is unable to resolve the routing for a particular alias because // it has a terminal RoutingStrategy associated with it. The message returned // in this exception is the message defined in the routing strategy itself. // Such requests should only be retried if the routing strategy for the specified // alias is modified. type TerminalRoutingStrategyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -20510,17 +25603,17 @@ func (s TerminalRoutingStrategyException) GoString() string { func newErrorTerminalRoutingStrategyException(v protocol.ResponseMetadata) error { return &TerminalRoutingStrategyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TerminalRoutingStrategyException) Code() string { +func (s *TerminalRoutingStrategyException) Code() string { return "TerminalRoutingStrategyException" } // Message returns the exception's message. -func (s TerminalRoutingStrategyException) Message() string { +func (s *TerminalRoutingStrategyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20528,28 +25621,28 @@ func (s TerminalRoutingStrategyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TerminalRoutingStrategyException) OrigErr() error { +func (s *TerminalRoutingStrategyException) OrigErr() error { return nil } -func (s TerminalRoutingStrategyException) Error() string { +func (s *TerminalRoutingStrategyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TerminalRoutingStrategyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TerminalRoutingStrategyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TerminalRoutingStrategyException) RequestID() string { - return s.respMetadata.RequestID +func (s *TerminalRoutingStrategyException) RequestID() string { + return s.RespMetadata.RequestID } // The client failed authentication. Clients should not retry such requests. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -20566,17 +25659,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20584,28 +25677,28 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation is not supported in the Region specified. type UnsupportedRegionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -20622,17 +25715,17 @@ func (s UnsupportedRegionException) GoString() string { func newErrorUnsupportedRegionException(v protocol.ResponseMetadata) error { return &UnsupportedRegionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedRegionException) Code() string { +func (s *UnsupportedRegionException) Code() string { return "UnsupportedRegionException" } // Message returns the exception's message. -func (s UnsupportedRegionException) Message() string { +func (s *UnsupportedRegionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20640,22 +25733,22 @@ func (s UnsupportedRegionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedRegionException) OrigErr() error { +func (s *UnsupportedRegionException) OrigErr() error { return nil } -func (s UnsupportedRegionException) Error() string { +func (s *UnsupportedRegionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedRegionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedRegionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedRegionException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedRegionException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -20665,13 +25758,14 @@ type UntagResourceInput struct { // that is assigned to and uniquely identifies the GameLift resource that you // want to remove tags from. GameLift resource ARNs are included in the data // object for the resource, which can be retrieved by calling a List or Describe - // action for the resource type. + // operation for the resource type. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // A list of one or more tags to remove from the specified GameLift resource. - // Tags are developer-defined and structured as key-value pairs. + // A list of one or more tag keys to remove from the specified GameLift resource. + // An AWS resource can have only one tag with a specific tag key, so specifying + // the tag key identifies which tag to remove. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` @@ -20732,7 +25826,7 @@ func (s UntagResourceOutput) GoString() string { return s.String() } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateAliasInput struct { _ struct{} `type:"structure"` @@ -20807,7 +25901,7 @@ func (s *UpdateAliasInput) SetRoutingStrategy(v *RoutingStrategy) *UpdateAliasIn return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateAliasOutput struct { _ struct{} `type:"structure"` @@ -20831,7 +25925,7 @@ func (s *UpdateAliasOutput) SetAlias(v *Alias) *UpdateAliasOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateBuildInput struct { _ struct{} `type:"structure"` @@ -20897,11 +25991,11 @@ func (s *UpdateBuildInput) SetVersion(v string) *UpdateBuildInput { return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateBuildOutput struct { _ struct{} `type:"structure"` - // The updated build record. + // The updated build resource. Build *Build `type:"structure"` } @@ -20921,7 +26015,7 @@ func (s *UpdateBuildOutput) SetBuild(v *Build) *UpdateBuildOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateFleetAttributesInput struct { _ struct{} `type:"structure"` @@ -21026,7 +26120,7 @@ func (s *UpdateFleetAttributesInput) SetResourceCreationLimitPolicy(v *ResourceC return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateFleetAttributesOutput struct { _ struct{} `type:"structure"` @@ -21051,7 +26145,7 @@ func (s *UpdateFleetAttributesOutput) SetFleetId(v string) *UpdateFleetAttribute return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateFleetCapacityInput struct { _ struct{} `type:"structure"` @@ -21120,7 +26214,7 @@ func (s *UpdateFleetCapacityInput) SetMinSize(v int64) *UpdateFleetCapacityInput return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateFleetCapacityOutput struct { _ struct{} `type:"structure"` @@ -21144,58 +26238,315 @@ func (s *UpdateFleetCapacityOutput) SetFleetId(v string) *UpdateFleetCapacityOut return s } -// Represents the input for a request action. -type UpdateFleetPortSettingsInput struct { +// Represents the input for a request operation. +type UpdateFleetPortSettingsInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for a fleet to update port settings for. You can use + // either the fleet ID or ARN value. + // + // FleetId is a required field + FleetId *string `type:"string" required:"true"` + + // A collection of port settings to be added to the fleet resource. + InboundPermissionAuthorizations []*IpPermission `type:"list"` + + // A collection of port settings to be removed from the fleet resource. + InboundPermissionRevocations []*IpPermission `type:"list"` +} + +// String returns the string representation +func (s UpdateFleetPortSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetPortSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFleetPortSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFleetPortSettingsInput"} + if s.FleetId == nil { + invalidParams.Add(request.NewErrParamRequired("FleetId")) + } + if s.InboundPermissionAuthorizations != nil { + for i, v := range s.InboundPermissionAuthorizations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionAuthorizations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.InboundPermissionRevocations != nil { + for i, v := range s.InboundPermissionRevocations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionRevocations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetPortSettingsInput) SetFleetId(v string) *UpdateFleetPortSettingsInput { + s.FleetId = &v + return s +} + +// SetInboundPermissionAuthorizations sets the InboundPermissionAuthorizations field's value. +func (s *UpdateFleetPortSettingsInput) SetInboundPermissionAuthorizations(v []*IpPermission) *UpdateFleetPortSettingsInput { + s.InboundPermissionAuthorizations = v + return s +} + +// SetInboundPermissionRevocations sets the InboundPermissionRevocations field's value. +func (s *UpdateFleetPortSettingsInput) SetInboundPermissionRevocations(v []*IpPermission) *UpdateFleetPortSettingsInput { + s.InboundPermissionRevocations = v + return s +} + +// Represents the returned data in response to a request operation. +type UpdateFleetPortSettingsOutput struct { + _ struct{} `type:"structure"` + + // A unique identifier for a fleet that was updated. + FleetId *string `type:"string"` +} + +// String returns the string representation +func (s UpdateFleetPortSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFleetPortSettingsOutput) GoString() string { + return s.String() +} + +// SetFleetId sets the FleetId field's value. +func (s *UpdateFleetPortSettingsOutput) SetFleetId(v string) *UpdateFleetPortSettingsOutput { + s.FleetId = &v + return s +} + +type UpdateGameServerGroupInput struct { + _ struct{} `type:"structure"` + + // Indicates how GameLift FleetIQ balances the use of Spot Instances and On-Demand + // Instances in the game server group. Method options include the following: + // + // * SPOT_ONLY - Only Spot Instances are used in the game server group. If + // Spot Instances are unavailable or not viable for game hosting, the game + // server group provides no hosting capacity until Spot Instances can again + // be used. Until then, no new instances are started, and the existing nonviable + // Spot Instances are terminated (after current gameplay ends) and are not + // replaced. + // + // * SPOT_PREFERRED - (default value) Spot Instances are used whenever available + // in the game server group. If Spot Instances are unavailable, the game + // server group continues to provide hosting capacity by falling back to + // On-Demand Instances. Existing nonviable Spot Instances are terminated + // (after current gameplay ends) and are replaced with new On-Demand Instances. + // + // * ON_DEMAND_ONLY - Only On-Demand Instances are used in the game server + // group. No Spot Instances are used, even when available, while this balancing + // strategy is in force. + BalancingStrategy *string `type:"string" enum:"BalancingStrategy"` + + // A unique identifier for the game server group. Use either the GameServerGroup + // name or ARN value. + // + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` + + // A flag that indicates whether instances in the game server group are protected + // from early termination. Unprotected instances that have active game servers + // running might be terminated during a scale-down event, causing players to + // be dropped from the game. Protected instances cannot be terminated while + // there are active game servers running except in the event of a forced game + // server group deletion (see ). An exception to this is with Spot Instances, + // which can be terminated by AWS regardless of protection status. This property + // is set to NO_PROTECTION by default. + GameServerProtectionPolicy *string `type:"string" enum:"GameServerProtectionPolicy"` + + // An updated list of EC2 instance types to use in the Auto Scaling group. The + // instance definitions must specify at least two different instance types that + // are supported by GameLift FleetIQ. This updated list replaces the entire + // current list of instance definitions for the game server group. For more + // information on instance types, see EC2 Instance Types (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html) + // in the Amazon EC2 User Guide. You can optionally specify capacity weighting + // for each instance type. If no weight value is specified for an instance type, + // it is set to the default value "1". For more information about capacity weighting, + // see Instance Weighting for Amazon EC2 Auto Scaling (https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-weighting.html) + // in the Amazon EC2 Auto Scaling User Guide. + InstanceDefinitions []*InstanceDefinition `min:"2" type:"list"` + + // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-arn-format.html)) + // for an IAM role that allows Amazon GameLift to access your EC2 Auto Scaling + // groups. + RoleArn *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateGameServerGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameServerGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGameServerGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGameServerGroupInput"} + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) + } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.InstanceDefinitions != nil && len(s.InstanceDefinitions) < 2 { + invalidParams.Add(request.NewErrParamMinLen("InstanceDefinitions", 2)) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.InstanceDefinitions != nil { + for i, v := range s.InstanceDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBalancingStrategy sets the BalancingStrategy field's value. +func (s *UpdateGameServerGroupInput) SetBalancingStrategy(v string) *UpdateGameServerGroupInput { + s.BalancingStrategy = &v + return s +} + +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *UpdateGameServerGroupInput) SetGameServerGroupName(v string) *UpdateGameServerGroupInput { + s.GameServerGroupName = &v + return s +} + +// SetGameServerProtectionPolicy sets the GameServerProtectionPolicy field's value. +func (s *UpdateGameServerGroupInput) SetGameServerProtectionPolicy(v string) *UpdateGameServerGroupInput { + s.GameServerProtectionPolicy = &v + return s +} + +// SetInstanceDefinitions sets the InstanceDefinitions field's value. +func (s *UpdateGameServerGroupInput) SetInstanceDefinitions(v []*InstanceDefinition) *UpdateGameServerGroupInput { + s.InstanceDefinitions = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateGameServerGroupInput) SetRoleArn(v string) *UpdateGameServerGroupInput { + s.RoleArn = &v + return s +} + +type UpdateGameServerGroupOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the game server group resource with updated properties. + GameServerGroup *GameServerGroup `type:"structure"` +} + +// String returns the string representation +func (s UpdateGameServerGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGameServerGroupOutput) GoString() string { + return s.String() +} + +// SetGameServerGroup sets the GameServerGroup field's value. +func (s *UpdateGameServerGroupOutput) SetGameServerGroup(v *GameServerGroup) *UpdateGameServerGroupOutput { + s.GameServerGroup = v + return s +} + +type UpdateGameServerInput struct { _ struct{} `type:"structure"` - // A unique identifier for a fleet to update port settings for. You can use - // either the fleet ID or ARN value. + // A set of custom game server properties, formatted as a single string value. + // This data is passed to a game client or service when it requests information + // on game servers using ListGameServers or ClaimGameServer. + GameServerData *string `min:"1" type:"string"` + + // A unique identifier for the game server group where the game server is running. + // Use either the GameServerGroup name or ARN value. // - // FleetId is a required field - FleetId *string `type:"string" required:"true"` + // GameServerGroupName is a required field + GameServerGroupName *string `min:"1" type:"string" required:"true"` - // A collection of port settings to be added to the fleet record. - InboundPermissionAuthorizations []*IpPermission `type:"list"` + // A custom string that uniquely identifies the game server to update. + // + // GameServerId is a required field + GameServerId *string `min:"3" type:"string" required:"true"` - // A collection of port settings to be removed from the fleet record. - InboundPermissionRevocations []*IpPermission `type:"list"` + // Indicates health status of the game server. A request that includes this + // parameter updates the game server's LastHealthCheckTime timestamp. + HealthCheck *string `type:"string" enum:"GameServerHealthCheck"` + + // Indicates whether the game server is available or is currently hosting gameplay. + UtilizationStatus *string `type:"string" enum:"GameServerUtilizationStatus"` } // String returns the string representation -func (s UpdateFleetPortSettingsInput) String() string { +func (s UpdateGameServerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateFleetPortSettingsInput) GoString() string { +func (s UpdateGameServerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateFleetPortSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateFleetPortSettingsInput"} - if s.FleetId == nil { - invalidParams.Add(request.NewErrParamRequired("FleetId")) +func (s *UpdateGameServerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGameServerInput"} + if s.GameServerData != nil && len(*s.GameServerData) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerData", 1)) } - if s.InboundPermissionAuthorizations != nil { - for i, v := range s.InboundPermissionAuthorizations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionAuthorizations", i), err.(request.ErrInvalidParams)) - } - } + if s.GameServerGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerGroupName")) } - if s.InboundPermissionRevocations != nil { - for i, v := range s.InboundPermissionRevocations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InboundPermissionRevocations", i), err.(request.ErrInvalidParams)) - } - } + if s.GameServerGroupName != nil && len(*s.GameServerGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GameServerGroupName", 1)) + } + if s.GameServerId == nil { + invalidParams.Add(request.NewErrParamRequired("GameServerId")) + } + if s.GameServerId != nil && len(*s.GameServerId) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GameServerId", 3)) } if invalidParams.Len() > 0 { @@ -21204,49 +26555,60 @@ func (s *UpdateFleetPortSettingsInput) Validate() error { return nil } -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetPortSettingsInput) SetFleetId(v string) *UpdateFleetPortSettingsInput { - s.FleetId = &v +// SetGameServerData sets the GameServerData field's value. +func (s *UpdateGameServerInput) SetGameServerData(v string) *UpdateGameServerInput { + s.GameServerData = &v return s } -// SetInboundPermissionAuthorizations sets the InboundPermissionAuthorizations field's value. -func (s *UpdateFleetPortSettingsInput) SetInboundPermissionAuthorizations(v []*IpPermission) *UpdateFleetPortSettingsInput { - s.InboundPermissionAuthorizations = v +// SetGameServerGroupName sets the GameServerGroupName field's value. +func (s *UpdateGameServerInput) SetGameServerGroupName(v string) *UpdateGameServerInput { + s.GameServerGroupName = &v return s } -// SetInboundPermissionRevocations sets the InboundPermissionRevocations field's value. -func (s *UpdateFleetPortSettingsInput) SetInboundPermissionRevocations(v []*IpPermission) *UpdateFleetPortSettingsInput { - s.InboundPermissionRevocations = v +// SetGameServerId sets the GameServerId field's value. +func (s *UpdateGameServerInput) SetGameServerId(v string) *UpdateGameServerInput { + s.GameServerId = &v return s } -// Represents the returned data in response to a request action. -type UpdateFleetPortSettingsOutput struct { +// SetHealthCheck sets the HealthCheck field's value. +func (s *UpdateGameServerInput) SetHealthCheck(v string) *UpdateGameServerInput { + s.HealthCheck = &v + return s +} + +// SetUtilizationStatus sets the UtilizationStatus field's value. +func (s *UpdateGameServerInput) SetUtilizationStatus(v string) *UpdateGameServerInput { + s.UtilizationStatus = &v + return s +} + +type UpdateGameServerOutput struct { _ struct{} `type:"structure"` - // A unique identifier for a fleet that was updated. - FleetId *string `type:"string"` + // Object that describes the newly updated game server. + GameServer *GameServer `type:"structure"` } // String returns the string representation -func (s UpdateFleetPortSettingsOutput) String() string { +func (s UpdateGameServerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateFleetPortSettingsOutput) GoString() string { +func (s UpdateGameServerOutput) GoString() string { return s.String() } -// SetFleetId sets the FleetId field's value. -func (s *UpdateFleetPortSettingsOutput) SetFleetId(v string) *UpdateFleetPortSettingsOutput { - s.FleetId = &v +// SetGameServer sets the GameServer field's value. +func (s *UpdateGameServerOutput) SetGameServer(v *GameServer) *UpdateGameServerOutput { + s.GameServer = v return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateGameSessionInput struct { _ struct{} `type:"structure"` @@ -21335,7 +26697,7 @@ func (s *UpdateGameSessionInput) SetProtectionPolicy(v string) *UpdateGameSessio return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateGameSessionOutput struct { _ struct{} `type:"structure"` @@ -21359,7 +26721,7 @@ func (s *UpdateGameSessionOutput) SetGameSession(v *GameSession) *UpdateGameSess return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateGameSessionQueueInput struct { _ struct{} `type:"structure"` @@ -21453,7 +26815,7 @@ func (s *UpdateGameSessionQueueInput) SetTimeoutInSeconds(v int64) *UpdateGameSe return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateGameSessionQueueOutput struct { _ struct{} `type:"structure"` @@ -21477,7 +26839,7 @@ func (s *UpdateGameSessionQueueOutput) SetGameSessionQueue(v *GameSessionQueue) return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateMatchmakingConfigurationInput struct { _ struct{} `type:"structure"` @@ -21682,7 +27044,7 @@ func (s *UpdateMatchmakingConfigurationInput) SetRuleSetName(v string) *UpdateMa return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateMatchmakingConfigurationOutput struct { _ struct{} `type:"structure"` @@ -21706,7 +27068,7 @@ func (s *UpdateMatchmakingConfigurationOutput) SetConfiguration(v *MatchmakingCo return s } -// Represents the input for a request action. +// Represents the input for a request operation. type UpdateRuntimeConfigurationInput struct { _ struct{} `type:"structure"` @@ -21771,7 +27133,7 @@ func (s *UpdateRuntimeConfigurationInput) SetRuntimeConfiguration(v *RuntimeConf return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type UpdateRuntimeConfigurationOutput struct { _ struct{} `type:"structure"` @@ -21927,7 +27289,7 @@ func (s *UpdateScriptOutput) SetScript(v *Script) *UpdateScriptOutput { return s } -// Represents the input for a request action. +// Represents the input for a request operation. type ValidateMatchmakingRuleSetInput struct { _ struct{} `type:"structure"` @@ -21969,7 +27331,7 @@ func (s *ValidateMatchmakingRuleSetInput) SetRuleSetBody(v string) *ValidateMatc return s } -// Represents the returned data in response to a request action. +// Represents the returned data in response to a request operation. type ValidateMatchmakingRuleSetOutput struct { _ struct{} `type:"structure"` @@ -22095,7 +27457,7 @@ type VpcPeeringConnection struct { // The Amazon Resource Name (ARN (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)) // associated with the GameLift fleet resource for this connection. - FleetArn *string `min:"1" type:"string"` + FleetArn *string `type:"string"` // A unique identifier for a fleet. This ID determines the ID of the Amazon // GameLift VPC for your fleet. @@ -22224,6 +27586,14 @@ const ( AcceptanceTypeReject = "REJECT" ) +// AcceptanceType_Values returns all elements of the AcceptanceType enum +func AcceptanceType_Values() []string { + return []string{ + AcceptanceTypeAccept, + AcceptanceTypeReject, + } +} + const ( // BackfillModeAutomatic is a BackfillMode enum value BackfillModeAutomatic = "AUTOMATIC" @@ -22232,6 +27602,34 @@ const ( BackfillModeManual = "MANUAL" ) +// BackfillMode_Values returns all elements of the BackfillMode enum +func BackfillMode_Values() []string { + return []string{ + BackfillModeAutomatic, + BackfillModeManual, + } +} + +const ( + // BalancingStrategySpotOnly is a BalancingStrategy enum value + BalancingStrategySpotOnly = "SPOT_ONLY" + + // BalancingStrategySpotPreferred is a BalancingStrategy enum value + BalancingStrategySpotPreferred = "SPOT_PREFERRED" + + // BalancingStrategyOnDemandOnly is a BalancingStrategy enum value + BalancingStrategyOnDemandOnly = "ON_DEMAND_ONLY" +) + +// BalancingStrategy_Values returns all elements of the BalancingStrategy enum +func BalancingStrategy_Values() []string { + return []string{ + BalancingStrategySpotOnly, + BalancingStrategySpotPreferred, + BalancingStrategyOnDemandOnly, + } +} + const ( // BuildStatusInitialized is a BuildStatus enum value BuildStatusInitialized = "INITIALIZED" @@ -22243,6 +27641,15 @@ const ( BuildStatusFailed = "FAILED" ) +// BuildStatus_Values returns all elements of the BuildStatus enum +func BuildStatus_Values() []string { + return []string{ + BuildStatusInitialized, + BuildStatusReady, + BuildStatusFailed, + } +} + const ( // CertificateTypeDisabled is a CertificateType enum value CertificateTypeDisabled = "DISABLED" @@ -22251,6 +27658,14 @@ const ( CertificateTypeGenerated = "GENERATED" ) +// CertificateType_Values returns all elements of the CertificateType enum +func CertificateType_Values() []string { + return []string{ + CertificateTypeDisabled, + CertificateTypeGenerated, + } +} + const ( // ComparisonOperatorTypeGreaterThanOrEqualToThreshold is a ComparisonOperatorType enum value ComparisonOperatorTypeGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" @@ -22265,6 +27680,16 @@ const ( ComparisonOperatorTypeLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" ) +// ComparisonOperatorType_Values returns all elements of the ComparisonOperatorType enum +func ComparisonOperatorType_Values() []string { + return []string{ + ComparisonOperatorTypeGreaterThanOrEqualToThreshold, + ComparisonOperatorTypeGreaterThanThreshold, + ComparisonOperatorTypeLessThanThreshold, + ComparisonOperatorTypeLessThanOrEqualToThreshold, + } +} + const ( // EC2InstanceTypeT2Micro is a EC2InstanceType enum value EC2InstanceTypeT2Micro = "t2.micro" @@ -22441,6 +27866,70 @@ const ( EC2InstanceTypeM524xlarge = "m5.24xlarge" ) +// EC2InstanceType_Values returns all elements of the EC2InstanceType enum +func EC2InstanceType_Values() []string { + return []string{ + EC2InstanceTypeT2Micro, + EC2InstanceTypeT2Small, + EC2InstanceTypeT2Medium, + EC2InstanceTypeT2Large, + EC2InstanceTypeC3Large, + EC2InstanceTypeC3Xlarge, + EC2InstanceTypeC32xlarge, + EC2InstanceTypeC34xlarge, + EC2InstanceTypeC38xlarge, + EC2InstanceTypeC4Large, + EC2InstanceTypeC4Xlarge, + EC2InstanceTypeC42xlarge, + EC2InstanceTypeC44xlarge, + EC2InstanceTypeC48xlarge, + EC2InstanceTypeC5Large, + EC2InstanceTypeC5Xlarge, + EC2InstanceTypeC52xlarge, + EC2InstanceTypeC54xlarge, + EC2InstanceTypeC59xlarge, + EC2InstanceTypeC512xlarge, + EC2InstanceTypeC518xlarge, + EC2InstanceTypeC524xlarge, + EC2InstanceTypeR3Large, + EC2InstanceTypeR3Xlarge, + EC2InstanceTypeR32xlarge, + EC2InstanceTypeR34xlarge, + EC2InstanceTypeR38xlarge, + EC2InstanceTypeR4Large, + EC2InstanceTypeR4Xlarge, + EC2InstanceTypeR42xlarge, + EC2InstanceTypeR44xlarge, + EC2InstanceTypeR48xlarge, + EC2InstanceTypeR416xlarge, + EC2InstanceTypeR5Large, + EC2InstanceTypeR5Xlarge, + EC2InstanceTypeR52xlarge, + EC2InstanceTypeR54xlarge, + EC2InstanceTypeR58xlarge, + EC2InstanceTypeR512xlarge, + EC2InstanceTypeR516xlarge, + EC2InstanceTypeR524xlarge, + EC2InstanceTypeM3Medium, + EC2InstanceTypeM3Large, + EC2InstanceTypeM3Xlarge, + EC2InstanceTypeM32xlarge, + EC2InstanceTypeM4Large, + EC2InstanceTypeM4Xlarge, + EC2InstanceTypeM42xlarge, + EC2InstanceTypeM44xlarge, + EC2InstanceTypeM410xlarge, + EC2InstanceTypeM5Large, + EC2InstanceTypeM5Xlarge, + EC2InstanceTypeM52xlarge, + EC2InstanceTypeM54xlarge, + EC2InstanceTypeM58xlarge, + EC2InstanceTypeM512xlarge, + EC2InstanceTypeM516xlarge, + EC2InstanceTypeM524xlarge, + } +} + const ( // EventCodeGenericEvent is a EventCode enum value EventCodeGenericEvent = "GENERIC_EVENT" @@ -22542,11 +28031,57 @@ const ( EventCodeInstanceInterrupted = "INSTANCE_INTERRUPTED" ) +// EventCode_Values returns all elements of the EventCode enum +func EventCode_Values() []string { + return []string{ + EventCodeGenericEvent, + EventCodeFleetCreated, + EventCodeFleetDeleted, + EventCodeFleetScalingEvent, + EventCodeFleetStateDownloading, + EventCodeFleetStateValidating, + EventCodeFleetStateBuilding, + EventCodeFleetStateActivating, + EventCodeFleetStateActive, + EventCodeFleetStateError, + EventCodeFleetInitializationFailed, + EventCodeFleetBinaryDownloadFailed, + EventCodeFleetValidationLaunchPathNotFound, + EventCodeFleetValidationExecutableRuntimeFailure, + EventCodeFleetValidationTimedOut, + EventCodeFleetActivationFailed, + EventCodeFleetActivationFailedNoInstances, + EventCodeFleetNewGameSessionProtectionPolicyUpdated, + EventCodeServerProcessInvalidPath, + EventCodeServerProcessSdkInitializationTimeout, + EventCodeServerProcessProcessReadyTimeout, + EventCodeServerProcessCrashed, + EventCodeServerProcessTerminatedUnhealthy, + EventCodeServerProcessForceTerminated, + EventCodeServerProcessProcessExitTimeout, + EventCodeGameSessionActivationTimeout, + EventCodeFleetCreationExtractingBuild, + EventCodeFleetCreationRunningInstaller, + EventCodeFleetCreationValidatingRuntimeConfig, + EventCodeFleetVpcPeeringSucceeded, + EventCodeFleetVpcPeeringFailed, + EventCodeFleetVpcPeeringDeleted, + EventCodeInstanceInterrupted, + } +} + const ( // FleetActionAutoScaling is a FleetAction enum value FleetActionAutoScaling = "AUTO_SCALING" ) +// FleetAction_Values returns all elements of the FleetAction enum +func FleetAction_Values() []string { + return []string{ + FleetActionAutoScaling, + } +} + const ( // FleetStatusNew is a FleetStatus enum value FleetStatusNew = "NEW" @@ -22576,6 +28111,21 @@ const ( FleetStatusTerminated = "TERMINATED" ) +// FleetStatus_Values returns all elements of the FleetStatus enum +func FleetStatus_Values() []string { + return []string{ + FleetStatusNew, + FleetStatusDownloading, + FleetStatusValidating, + FleetStatusBuilding, + FleetStatusActivating, + FleetStatusActive, + FleetStatusDeleting, + FleetStatusError, + FleetStatusTerminated, + } +} + const ( // FleetTypeOnDemand is a FleetType enum value FleetTypeOnDemand = "ON_DEMAND" @@ -22584,6 +28134,326 @@ const ( FleetTypeSpot = "SPOT" ) +// FleetType_Values returns all elements of the FleetType enum +func FleetType_Values() []string { + return []string{ + FleetTypeOnDemand, + FleetTypeSpot, + } +} + +const ( + // GameServerClaimStatusClaimed is a GameServerClaimStatus enum value + GameServerClaimStatusClaimed = "CLAIMED" +) + +// GameServerClaimStatus_Values returns all elements of the GameServerClaimStatus enum +func GameServerClaimStatus_Values() []string { + return []string{ + GameServerClaimStatusClaimed, + } +} + +const ( + // GameServerGroupActionReplaceInstanceTypes is a GameServerGroupAction enum value + GameServerGroupActionReplaceInstanceTypes = "REPLACE_INSTANCE_TYPES" +) + +// GameServerGroupAction_Values returns all elements of the GameServerGroupAction enum +func GameServerGroupAction_Values() []string { + return []string{ + GameServerGroupActionReplaceInstanceTypes, + } +} + +const ( + // GameServerGroupDeleteOptionSafeDelete is a GameServerGroupDeleteOption enum value + GameServerGroupDeleteOptionSafeDelete = "SAFE_DELETE" + + // GameServerGroupDeleteOptionForceDelete is a GameServerGroupDeleteOption enum value + GameServerGroupDeleteOptionForceDelete = "FORCE_DELETE" + + // GameServerGroupDeleteOptionRetain is a GameServerGroupDeleteOption enum value + GameServerGroupDeleteOptionRetain = "RETAIN" +) + +// GameServerGroupDeleteOption_Values returns all elements of the GameServerGroupDeleteOption enum +func GameServerGroupDeleteOption_Values() []string { + return []string{ + GameServerGroupDeleteOptionSafeDelete, + GameServerGroupDeleteOptionForceDelete, + GameServerGroupDeleteOptionRetain, + } +} + +const ( + // GameServerGroupInstanceTypeC4Large is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC4Large = "c4.large" + + // GameServerGroupInstanceTypeC4Xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC4Xlarge = "c4.xlarge" + + // GameServerGroupInstanceTypeC42xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC42xlarge = "c4.2xlarge" + + // GameServerGroupInstanceTypeC44xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC44xlarge = "c4.4xlarge" + + // GameServerGroupInstanceTypeC48xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC48xlarge = "c4.8xlarge" + + // GameServerGroupInstanceTypeC5Large is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC5Large = "c5.large" + + // GameServerGroupInstanceTypeC5Xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC5Xlarge = "c5.xlarge" + + // GameServerGroupInstanceTypeC52xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC52xlarge = "c5.2xlarge" + + // GameServerGroupInstanceTypeC54xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC54xlarge = "c5.4xlarge" + + // GameServerGroupInstanceTypeC59xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC59xlarge = "c5.9xlarge" + + // GameServerGroupInstanceTypeC512xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC512xlarge = "c5.12xlarge" + + // GameServerGroupInstanceTypeC518xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC518xlarge = "c5.18xlarge" + + // GameServerGroupInstanceTypeC524xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeC524xlarge = "c5.24xlarge" + + // GameServerGroupInstanceTypeR4Large is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR4Large = "r4.large" + + // GameServerGroupInstanceTypeR4Xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR4Xlarge = "r4.xlarge" + + // GameServerGroupInstanceTypeR42xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR42xlarge = "r4.2xlarge" + + // GameServerGroupInstanceTypeR44xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR44xlarge = "r4.4xlarge" + + // GameServerGroupInstanceTypeR48xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR48xlarge = "r4.8xlarge" + + // GameServerGroupInstanceTypeR416xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR416xlarge = "r4.16xlarge" + + // GameServerGroupInstanceTypeR5Large is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR5Large = "r5.large" + + // GameServerGroupInstanceTypeR5Xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR5Xlarge = "r5.xlarge" + + // GameServerGroupInstanceTypeR52xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR52xlarge = "r5.2xlarge" + + // GameServerGroupInstanceTypeR54xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR54xlarge = "r5.4xlarge" + + // GameServerGroupInstanceTypeR58xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR58xlarge = "r5.8xlarge" + + // GameServerGroupInstanceTypeR512xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR512xlarge = "r5.12xlarge" + + // GameServerGroupInstanceTypeR516xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR516xlarge = "r5.16xlarge" + + // GameServerGroupInstanceTypeR524xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeR524xlarge = "r5.24xlarge" + + // GameServerGroupInstanceTypeM4Large is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM4Large = "m4.large" + + // GameServerGroupInstanceTypeM4Xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM4Xlarge = "m4.xlarge" + + // GameServerGroupInstanceTypeM42xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM42xlarge = "m4.2xlarge" + + // GameServerGroupInstanceTypeM44xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM44xlarge = "m4.4xlarge" + + // GameServerGroupInstanceTypeM410xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM410xlarge = "m4.10xlarge" + + // GameServerGroupInstanceTypeM5Large is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM5Large = "m5.large" + + // GameServerGroupInstanceTypeM5Xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM5Xlarge = "m5.xlarge" + + // GameServerGroupInstanceTypeM52xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM52xlarge = "m5.2xlarge" + + // GameServerGroupInstanceTypeM54xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM54xlarge = "m5.4xlarge" + + // GameServerGroupInstanceTypeM58xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM58xlarge = "m5.8xlarge" + + // GameServerGroupInstanceTypeM512xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM512xlarge = "m5.12xlarge" + + // GameServerGroupInstanceTypeM516xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM516xlarge = "m5.16xlarge" + + // GameServerGroupInstanceTypeM524xlarge is a GameServerGroupInstanceType enum value + GameServerGroupInstanceTypeM524xlarge = "m5.24xlarge" +) + +// GameServerGroupInstanceType_Values returns all elements of the GameServerGroupInstanceType enum +func GameServerGroupInstanceType_Values() []string { + return []string{ + GameServerGroupInstanceTypeC4Large, + GameServerGroupInstanceTypeC4Xlarge, + GameServerGroupInstanceTypeC42xlarge, + GameServerGroupInstanceTypeC44xlarge, + GameServerGroupInstanceTypeC48xlarge, + GameServerGroupInstanceTypeC5Large, + GameServerGroupInstanceTypeC5Xlarge, + GameServerGroupInstanceTypeC52xlarge, + GameServerGroupInstanceTypeC54xlarge, + GameServerGroupInstanceTypeC59xlarge, + GameServerGroupInstanceTypeC512xlarge, + GameServerGroupInstanceTypeC518xlarge, + GameServerGroupInstanceTypeC524xlarge, + GameServerGroupInstanceTypeR4Large, + GameServerGroupInstanceTypeR4Xlarge, + GameServerGroupInstanceTypeR42xlarge, + GameServerGroupInstanceTypeR44xlarge, + GameServerGroupInstanceTypeR48xlarge, + GameServerGroupInstanceTypeR416xlarge, + GameServerGroupInstanceTypeR5Large, + GameServerGroupInstanceTypeR5Xlarge, + GameServerGroupInstanceTypeR52xlarge, + GameServerGroupInstanceTypeR54xlarge, + GameServerGroupInstanceTypeR58xlarge, + GameServerGroupInstanceTypeR512xlarge, + GameServerGroupInstanceTypeR516xlarge, + GameServerGroupInstanceTypeR524xlarge, + GameServerGroupInstanceTypeM4Large, + GameServerGroupInstanceTypeM4Xlarge, + GameServerGroupInstanceTypeM42xlarge, + GameServerGroupInstanceTypeM44xlarge, + GameServerGroupInstanceTypeM410xlarge, + GameServerGroupInstanceTypeM5Large, + GameServerGroupInstanceTypeM5Xlarge, + GameServerGroupInstanceTypeM52xlarge, + GameServerGroupInstanceTypeM54xlarge, + GameServerGroupInstanceTypeM58xlarge, + GameServerGroupInstanceTypeM512xlarge, + GameServerGroupInstanceTypeM516xlarge, + GameServerGroupInstanceTypeM524xlarge, + } +} + +const ( + // GameServerGroupStatusNew is a GameServerGroupStatus enum value + GameServerGroupStatusNew = "NEW" + + // GameServerGroupStatusActivating is a GameServerGroupStatus enum value + GameServerGroupStatusActivating = "ACTIVATING" + + // GameServerGroupStatusActive is a GameServerGroupStatus enum value + GameServerGroupStatusActive = "ACTIVE" + + // GameServerGroupStatusDeleteScheduled is a GameServerGroupStatus enum value + GameServerGroupStatusDeleteScheduled = "DELETE_SCHEDULED" + + // GameServerGroupStatusDeleting is a GameServerGroupStatus enum value + GameServerGroupStatusDeleting = "DELETING" + + // GameServerGroupStatusDeleted is a GameServerGroupStatus enum value + GameServerGroupStatusDeleted = "DELETED" + + // GameServerGroupStatusError is a GameServerGroupStatus enum value + GameServerGroupStatusError = "ERROR" +) + +// GameServerGroupStatus_Values returns all elements of the GameServerGroupStatus enum +func GameServerGroupStatus_Values() []string { + return []string{ + GameServerGroupStatusNew, + GameServerGroupStatusActivating, + GameServerGroupStatusActive, + GameServerGroupStatusDeleteScheduled, + GameServerGroupStatusDeleting, + GameServerGroupStatusDeleted, + GameServerGroupStatusError, + } +} + +const ( + // GameServerHealthCheckHealthy is a GameServerHealthCheck enum value + GameServerHealthCheckHealthy = "HEALTHY" +) + +// GameServerHealthCheck_Values returns all elements of the GameServerHealthCheck enum +func GameServerHealthCheck_Values() []string { + return []string{ + GameServerHealthCheckHealthy, + } +} + +const ( + // GameServerInstanceStatusActive is a GameServerInstanceStatus enum value + GameServerInstanceStatusActive = "ACTIVE" + + // GameServerInstanceStatusDraining is a GameServerInstanceStatus enum value + GameServerInstanceStatusDraining = "DRAINING" + + // GameServerInstanceStatusSpotTerminating is a GameServerInstanceStatus enum value + GameServerInstanceStatusSpotTerminating = "SPOT_TERMINATING" +) + +// GameServerInstanceStatus_Values returns all elements of the GameServerInstanceStatus enum +func GameServerInstanceStatus_Values() []string { + return []string{ + GameServerInstanceStatusActive, + GameServerInstanceStatusDraining, + GameServerInstanceStatusSpotTerminating, + } +} + +const ( + // GameServerProtectionPolicyNoProtection is a GameServerProtectionPolicy enum value + GameServerProtectionPolicyNoProtection = "NO_PROTECTION" + + // GameServerProtectionPolicyFullProtection is a GameServerProtectionPolicy enum value + GameServerProtectionPolicyFullProtection = "FULL_PROTECTION" +) + +// GameServerProtectionPolicy_Values returns all elements of the GameServerProtectionPolicy enum +func GameServerProtectionPolicy_Values() []string { + return []string{ + GameServerProtectionPolicyNoProtection, + GameServerProtectionPolicyFullProtection, + } +} + +const ( + // GameServerUtilizationStatusAvailable is a GameServerUtilizationStatus enum value + GameServerUtilizationStatusAvailable = "AVAILABLE" + + // GameServerUtilizationStatusUtilized is a GameServerUtilizationStatus enum value + GameServerUtilizationStatusUtilized = "UTILIZED" +) + +// GameServerUtilizationStatus_Values returns all elements of the GameServerUtilizationStatus enum +func GameServerUtilizationStatus_Values() []string { + return []string{ + GameServerUtilizationStatusAvailable, + GameServerUtilizationStatusUtilized, + } +} + const ( // GameSessionPlacementStatePending is a GameSessionPlacementState enum value GameSessionPlacementStatePending = "PENDING" @@ -22601,6 +28471,17 @@ const ( GameSessionPlacementStateFailed = "FAILED" ) +// GameSessionPlacementState_Values returns all elements of the GameSessionPlacementState enum +func GameSessionPlacementState_Values() []string { + return []string{ + GameSessionPlacementStatePending, + GameSessionPlacementStateFulfilled, + GameSessionPlacementStateCancelled, + GameSessionPlacementStateTimedOut, + GameSessionPlacementStateFailed, + } +} + const ( // GameSessionStatusActive is a GameSessionStatus enum value GameSessionStatusActive = "ACTIVE" @@ -22618,11 +28499,29 @@ const ( GameSessionStatusError = "ERROR" ) +// GameSessionStatus_Values returns all elements of the GameSessionStatus enum +func GameSessionStatus_Values() []string { + return []string{ + GameSessionStatusActive, + GameSessionStatusActivating, + GameSessionStatusTerminated, + GameSessionStatusTerminating, + GameSessionStatusError, + } +} + const ( // GameSessionStatusReasonInterrupted is a GameSessionStatusReason enum value GameSessionStatusReasonInterrupted = "INTERRUPTED" ) +// GameSessionStatusReason_Values returns all elements of the GameSessionStatusReason enum +func GameSessionStatusReason_Values() []string { + return []string{ + GameSessionStatusReasonInterrupted, + } +} + const ( // InstanceStatusPending is a InstanceStatus enum value InstanceStatusPending = "PENDING" @@ -22634,6 +28533,15 @@ const ( InstanceStatusTerminating = "TERMINATING" ) +// InstanceStatus_Values returns all elements of the InstanceStatus enum +func InstanceStatus_Values() []string { + return []string{ + InstanceStatusPending, + InstanceStatusActive, + InstanceStatusTerminating, + } +} + const ( // IpProtocolTcp is a IpProtocol enum value IpProtocolTcp = "TCP" @@ -22642,6 +28550,14 @@ const ( IpProtocolUdp = "UDP" ) +// IpProtocol_Values returns all elements of the IpProtocol enum +func IpProtocol_Values() []string { + return []string{ + IpProtocolTcp, + IpProtocolUdp, + } +} + const ( // MatchmakingConfigurationStatusCancelled is a MatchmakingConfigurationStatus enum value MatchmakingConfigurationStatusCancelled = "CANCELLED" @@ -22668,6 +28584,20 @@ const ( MatchmakingConfigurationStatusTimedOut = "TIMED_OUT" ) +// MatchmakingConfigurationStatus_Values returns all elements of the MatchmakingConfigurationStatus enum +func MatchmakingConfigurationStatus_Values() []string { + return []string{ + MatchmakingConfigurationStatusCancelled, + MatchmakingConfigurationStatusCompleted, + MatchmakingConfigurationStatusFailed, + MatchmakingConfigurationStatusPlacing, + MatchmakingConfigurationStatusQueued, + MatchmakingConfigurationStatusRequiresAcceptance, + MatchmakingConfigurationStatusSearching, + MatchmakingConfigurationStatusTimedOut, + } +} + const ( // MetricNameActivatingGameSessions is a MetricName enum value MetricNameActivatingGameSessions = "ActivatingGameSessions" @@ -22703,6 +28633,23 @@ const ( MetricNameWaitTime = "WaitTime" ) +// MetricName_Values returns all elements of the MetricName enum +func MetricName_Values() []string { + return []string{ + MetricNameActivatingGameSessions, + MetricNameActiveGameSessions, + MetricNameActiveInstances, + MetricNameAvailableGameSessions, + MetricNameAvailablePlayerSessions, + MetricNameCurrentPlayerSessions, + MetricNameIdleInstances, + MetricNamePercentAvailableGameSessions, + MetricNamePercentIdleInstances, + MetricNameQueueDepth, + MetricNameWaitTime, + } +} + const ( // OperatingSystemWindows2012 is a OperatingSystem enum value OperatingSystemWindows2012 = "WINDOWS_2012" @@ -22714,6 +28661,15 @@ const ( OperatingSystemAmazonLinux2 = "AMAZON_LINUX_2" ) +// OperatingSystem_Values returns all elements of the OperatingSystem enum +func OperatingSystem_Values() []string { + return []string{ + OperatingSystemWindows2012, + OperatingSystemAmazonLinux, + OperatingSystemAmazonLinux2, + } +} + const ( // PlayerSessionCreationPolicyAcceptAll is a PlayerSessionCreationPolicy enum value PlayerSessionCreationPolicyAcceptAll = "ACCEPT_ALL" @@ -22722,6 +28678,14 @@ const ( PlayerSessionCreationPolicyDenyAll = "DENY_ALL" ) +// PlayerSessionCreationPolicy_Values returns all elements of the PlayerSessionCreationPolicy enum +func PlayerSessionCreationPolicy_Values() []string { + return []string{ + PlayerSessionCreationPolicyAcceptAll, + PlayerSessionCreationPolicyDenyAll, + } +} + const ( // PlayerSessionStatusReserved is a PlayerSessionStatus enum value PlayerSessionStatusReserved = "RESERVED" @@ -22736,6 +28700,16 @@ const ( PlayerSessionStatusTimedout = "TIMEDOUT" ) +// PlayerSessionStatus_Values returns all elements of the PlayerSessionStatus enum +func PlayerSessionStatus_Values() []string { + return []string{ + PlayerSessionStatusReserved, + PlayerSessionStatusActive, + PlayerSessionStatusCompleted, + PlayerSessionStatusTimedout, + } +} + const ( // PolicyTypeRuleBased is a PolicyType enum value PolicyTypeRuleBased = "RuleBased" @@ -22744,6 +28718,14 @@ const ( PolicyTypeTargetBased = "TargetBased" ) +// PolicyType_Values returns all elements of the PolicyType enum +func PolicyType_Values() []string { + return []string{ + PolicyTypeRuleBased, + PolicyTypeTargetBased, + } +} + const ( // ProtectionPolicyNoProtection is a ProtectionPolicy enum value ProtectionPolicyNoProtection = "NoProtection" @@ -22752,6 +28734,14 @@ const ( ProtectionPolicyFullProtection = "FullProtection" ) +// ProtectionPolicy_Values returns all elements of the ProtectionPolicy enum +func ProtectionPolicy_Values() []string { + return []string{ + ProtectionPolicyNoProtection, + ProtectionPolicyFullProtection, + } +} + const ( // RoutingStrategyTypeSimple is a RoutingStrategyType enum value RoutingStrategyTypeSimple = "SIMPLE" @@ -22760,6 +28750,14 @@ const ( RoutingStrategyTypeTerminal = "TERMINAL" ) +// RoutingStrategyType_Values returns all elements of the RoutingStrategyType enum +func RoutingStrategyType_Values() []string { + return []string{ + RoutingStrategyTypeSimple, + RoutingStrategyTypeTerminal, + } +} + const ( // ScalingAdjustmentTypeChangeInCapacity is a ScalingAdjustmentType enum value ScalingAdjustmentTypeChangeInCapacity = "ChangeInCapacity" @@ -22771,6 +28769,15 @@ const ( ScalingAdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" ) +// ScalingAdjustmentType_Values returns all elements of the ScalingAdjustmentType enum +func ScalingAdjustmentType_Values() []string { + return []string{ + ScalingAdjustmentTypeChangeInCapacity, + ScalingAdjustmentTypeExactCapacity, + ScalingAdjustmentTypePercentChangeInCapacity, + } +} + const ( // ScalingStatusTypeActive is a ScalingStatusType enum value ScalingStatusTypeActive = "ACTIVE" @@ -22793,3 +28800,32 @@ const ( // ScalingStatusTypeError is a ScalingStatusType enum value ScalingStatusTypeError = "ERROR" ) + +// ScalingStatusType_Values returns all elements of the ScalingStatusType enum +func ScalingStatusType_Values() []string { + return []string{ + ScalingStatusTypeActive, + ScalingStatusTypeUpdateRequested, + ScalingStatusTypeUpdating, + ScalingStatusTypeDeleteRequested, + ScalingStatusTypeDeleting, + ScalingStatusTypeDeleted, + ScalingStatusTypeError, + } +} + +const ( + // SortOrderAscending is a SortOrder enum value + SortOrderAscending = "ASCENDING" + + // SortOrderDescending is a SortOrder enum value + SortOrderDescending = "DESCENDING" +) + +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go index 9c0543341..1e5e92ffb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/doc.go @@ -3,39 +3,43 @@ // Package gamelift provides the client and types for making API // requests to Amazon GameLift. // -// Amazon GameLift is a managed service for developers who need a scalable, -// dedicated server solution for their multiplayer games. Use Amazon GameLift -// for these tasks: (1) set up computing resources and deploy your game servers, -// (2) run game sessions and get players into games, (3) automatically scale -// your resources to meet player demand and manage costs, and (4) track in-depth -// metrics on game server performance and player usage. +// GameLift provides solutions for hosting session-based multiplayer game servers +// in the cloud, including tools for deploying, operating, and scaling game +// servers. Built on AWS global computing infrastructure, GameLift helps you +// deliver high-performance, high-reliability, low-cost game servers while dynamically +// scaling your resource usage to meet player demand. // -// When setting up hosting resources, you can deploy your custom game server -// or use the Amazon GameLift Realtime Servers. Realtime Servers gives you the -// ability to quickly stand up lightweight, efficient game servers with the -// core Amazon GameLift infrastructure already built in. +// About GameLift solutions // -// Get Amazon GameLift Tools and Resources +// Get more information on these GameLift solutions in the Amazon GameLift Developer +// Guide (http://docs.aws.amazon.com/gamelift/latest/developerguide/). // -// This reference guide describes the low-level service API for Amazon GameLift -// and provides links to language-specific SDK reference topics. See also Amazon -// GameLift Tools and Resources (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-components.html). +// * Managed GameLift -- GameLift offers a fully managed service to set up +// and maintain computing machines for hosting, manage game session and player +// session life cycle, and handle security, storage, and performance tracking. +// You can use automatic scaling tools to balance hosting costs against meeting +// player demand., configure your game session management to minimize player +// latency, or add FlexMatch for matchmaking. // -// API Summary +// * Managed GameLift with Realtime Servers – With GameLift Realtime Servers, +// you can quickly configure and set up game servers for your game. Realtime +// Servers provides a game server framework with core Amazon GameLift infrastructure +// already built in. // -// The Amazon GameLift service API includes two key sets of actions: +// * GameLift FleetIQ – Use GameLift FleetIQ as a standalone feature while +// managing your own EC2 instances and Auto Scaling groups for game hosting. +// GameLift FleetIQ provides optimizations that make low-cost Spot Instances +// viable for game hosting. // -// * Manage game sessions and player access -- Integrate this functionality -// into game client services in order to create new game sessions, retrieve -// information on existing game sessions; reserve a player slot in a game -// session, request matchmaking, etc. +// About this API Reference // -// * Configure and manage game server resources -- Manage your Amazon GameLift -// hosting resources, including builds, scripts, fleets, queues, and aliases. -// Set up matchmakers, configure auto-scaling, retrieve game logs, and get -// hosting and game metrics. +// This reference guide describes the low-level service API for Amazon GameLift. +// You can find links to language-specific SDK guides and the AWS CLI reference +// with each operation and data type topic. Useful links: // -// Task-based list of API actions (https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html) +// * GameLift API operations listed by tasks (https://docs.aws.amazon.com/gamelift/latest/developerguide/reference-awssdk.html) +// +// * GameLift tools and resources (https://docs.aws.amazon.com/gamelift/latest/developerguide/gamelift-components.html) // // See https://docs.aws.amazon.com/goto/WebAPI/gamelift-2015-10-01 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go index f9794d642..ef6de30dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/errors.go @@ -82,6 +82,14 @@ const ( // should not retry such requests. ErrCodeNotFoundException = "NotFoundException" + // ErrCodeOutOfCapacityException for service response error code + // "OutOfCapacityException". + // + // The specified game server group has no available game servers to fulfill + // a ClaimGameServer request. Clients can retry such requests immediately or + // after a waiting period. + ErrCodeOutOfCapacityException = "OutOfCapacityException" + // ErrCodeTaggingFailedException for service response error code // "TaggingFailedException". // @@ -124,6 +132,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidRequestException": newErrorInvalidRequestException, "LimitExceededException": newErrorLimitExceededException, "NotFoundException": newErrorNotFoundException, + "OutOfCapacityException": newErrorOutOfCapacityException, "TaggingFailedException": newErrorTaggingFailedException, "TerminalRoutingStrategyException": newErrorTerminalRoutingStrategyException, "UnauthorizedException": newErrorUnauthorizedException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go index b2917a153..9d8ead550 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go index 32e021c87..9201d9da0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go @@ -6179,8 +6179,8 @@ func (s *InputSerialization) SetCsv(v *CSVInput) *InputSerialization { // This error only applies to expedited retrievals and not to standard or bulk // retrievals. type InsufficientCapacityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -6201,17 +6201,17 @@ func (s InsufficientCapacityException) GoString() string { func newErrorInsufficientCapacityException(v protocol.ResponseMetadata) error { return &InsufficientCapacityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InsufficientCapacityException) Code() string { +func (s *InsufficientCapacityException) Code() string { return "InsufficientCapacityException" } // Message returns the exception's message. -func (s InsufficientCapacityException) Message() string { +func (s *InsufficientCapacityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6219,28 +6219,28 @@ func (s InsufficientCapacityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InsufficientCapacityException) OrigErr() error { +func (s *InsufficientCapacityException) OrigErr() error { return nil } -func (s InsufficientCapacityException) Error() string { +func (s *InsufficientCapacityException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InsufficientCapacityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InsufficientCapacityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InsufficientCapacityException) RequestID() string { - return s.respMetadata.RequestID +func (s *InsufficientCapacityException) RequestID() string { + return s.RespMetadata.RequestID } // Returned if a parameter of the request is incorrectly specified. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 400 Bad Request Code_ *string `locationName:"code" type:"string"` @@ -6264,17 +6264,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6282,22 +6282,22 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the options for a range inventory retrieval job. @@ -6815,8 +6815,8 @@ func (s *JobParameters) SetType(v string) *JobParameters { // Returned if the request results in a vault or account limit being exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 400 Bad Request Code_ *string `locationName:"code" type:"string"` @@ -6840,17 +6840,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6858,22 +6858,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Provides options for retrieving a job list for an Amazon S3 Glacier vault. @@ -7588,8 +7588,8 @@ func (s *ListVaultsOutput) SetVaultList(v []*DescribeVaultOutput) *ListVaultsOut // Returned if a required header or parameter is missing from the request. type MissingParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 400 Bad Request Code_ *string `locationName:"code" type:"string"` @@ -7613,17 +7613,17 @@ func (s MissingParameterValueException) GoString() string { func newErrorMissingParameterValueException(v protocol.ResponseMetadata) error { return &MissingParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MissingParameterValueException) Code() string { +func (s *MissingParameterValueException) Code() string { return "MissingParameterValueException" } // Message returns the exception's message. -func (s MissingParameterValueException) Message() string { +func (s *MissingParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7631,22 +7631,22 @@ func (s MissingParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MissingParameterValueException) OrigErr() error { +func (s *MissingParameterValueException) OrigErr() error { return nil } -func (s MissingParameterValueException) Error() string { +func (s *MissingParameterValueException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s MissingParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MissingParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MissingParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *MissingParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about the location where the select job results are @@ -7750,8 +7750,8 @@ func (s *PartListElement) SetSHA256TreeHash(v string) *PartListElement { // Returned if a retrieval job would exceed the current data policy's retrieval // rate limit. For more information about data retrieval policies, type PolicyEnforcedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // PolicyEnforcedException Code_ *string `locationName:"code" type:"string"` @@ -7775,17 +7775,17 @@ func (s PolicyEnforcedException) GoString() string { func newErrorPolicyEnforcedException(v protocol.ResponseMetadata) error { return &PolicyEnforcedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyEnforcedException) Code() string { +func (s *PolicyEnforcedException) Code() string { return "PolicyEnforcedException" } // Message returns the exception's message. -func (s PolicyEnforcedException) Message() string { +func (s *PolicyEnforcedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7793,22 +7793,22 @@ func (s PolicyEnforcedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyEnforcedException) OrigErr() error { +func (s *PolicyEnforcedException) OrigErr() error { return nil } -func (s PolicyEnforcedException) Error() string { +func (s *PolicyEnforcedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyEnforcedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyEnforcedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyEnforcedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyEnforcedException) RequestID() string { + return s.RespMetadata.RequestID } // The definition for a provisioned capacity unit. @@ -8012,8 +8012,8 @@ func (s RemoveTagsFromVaultOutput) GoString() string { // Returned if, when uploading an archive, Amazon S3 Glacier times out while // receiving the upload. type RequestTimeoutException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 408 Request Timeout Code_ *string `locationName:"code" type:"string"` @@ -8038,17 +8038,17 @@ func (s RequestTimeoutException) GoString() string { func newErrorRequestTimeoutException(v protocol.ResponseMetadata) error { return &RequestTimeoutException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestTimeoutException) Code() string { +func (s *RequestTimeoutException) Code() string { return "RequestTimeoutException" } // Message returns the exception's message. -func (s RequestTimeoutException) Message() string { +func (s *RequestTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8056,29 +8056,29 @@ func (s RequestTimeoutException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestTimeoutException) OrigErr() error { +func (s *RequestTimeoutException) OrigErr() error { return nil } -func (s RequestTimeoutException) Error() string { +func (s *RequestTimeoutException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestTimeoutException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestTimeoutException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the specified resource (such as a vault, upload ID, or job ID) // doesn't exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 404 Not Found Code_ *string `locationName:"code" type:"string"` @@ -8103,17 +8103,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8121,22 +8121,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about the location in Amazon S3 where the select job @@ -8301,8 +8301,8 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec // Returned if the service cannot complete the request. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 500 Internal Server Error Code_ *string `locationName:"code" type:"string"` @@ -8326,17 +8326,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8344,22 +8344,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // SetDataRetrievalPolicy input. @@ -8986,6 +8986,15 @@ const ( ActionCodeSelect = "Select" ) +// ActionCode_Values returns all elements of the ActionCode enum +func ActionCode_Values() []string { + return []string{ + ActionCodeArchiveRetrieval, + ActionCodeInventoryRetrieval, + ActionCodeSelect, + } +} + const ( // CannedACLPrivate is a CannedACL enum value CannedACLPrivate = "private" @@ -9009,6 +9018,19 @@ const ( CannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) +// CannedACL_Values returns all elements of the CannedACL enum +func CannedACL_Values() []string { + return []string{ + CannedACLPrivate, + CannedACLPublicRead, + CannedACLPublicReadWrite, + CannedACLAwsExecRead, + CannedACLAuthenticatedRead, + CannedACLBucketOwnerRead, + CannedACLBucketOwnerFullControl, + } +} + const ( // EncryptionTypeAwsKms is a EncryptionType enum value EncryptionTypeAwsKms = "aws:kms" @@ -9017,11 +9039,26 @@ const ( EncryptionTypeAes256 = "AES256" ) +// EncryptionType_Values returns all elements of the EncryptionType enum +func EncryptionType_Values() []string { + return []string{ + EncryptionTypeAwsKms, + EncryptionTypeAes256, + } +} + const ( // ExpressionTypeSql is a ExpressionType enum value ExpressionTypeSql = "SQL" ) +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + const ( // FileHeaderInfoUse is a FileHeaderInfo enum value FileHeaderInfoUse = "USE" @@ -9033,6 +9070,15 @@ const ( FileHeaderInfoNone = "NONE" ) +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + const ( // PermissionFullControl is a Permission enum value PermissionFullControl = "FULL_CONTROL" @@ -9050,6 +9096,17 @@ const ( PermissionReadAcp = "READ_ACP" ) +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + const ( // QuoteFieldsAlways is a QuoteFields enum value QuoteFieldsAlways = "ALWAYS" @@ -9058,6 +9115,14 @@ const ( QuoteFieldsAsneeded = "ASNEEDED" ) +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + const ( // StatusCodeInProgress is a StatusCode enum value StatusCodeInProgress = "InProgress" @@ -9069,6 +9134,15 @@ const ( StatusCodeFailed = "Failed" ) +// StatusCode_Values returns all elements of the StatusCode enum +func StatusCode_Values() []string { + return []string{ + StatusCodeInProgress, + StatusCodeSucceeded, + StatusCodeFailed, + } +} + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" @@ -9080,6 +9154,15 @@ const ( StorageClassStandardIa = "STANDARD_IA" ) +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + } +} + const ( // TypeAmazonCustomerByEmail is a Type enum value TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" @@ -9090,3 +9173,12 @@ const ( // TypeGroup is a Type enum value TypeGroup = "Group" ) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeAmazonCustomerByEmail, + TypeCanonicalUser, + TypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go index 40b9be9ff..5679e7933 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go index 1470135e8..49b904885 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/api.go @@ -1013,7 +1013,8 @@ func (c *GlobalAccelerator) DescribeEndpointGroupRequest(input *DescribeEndpoint // DescribeEndpointGroup API operation for AWS Global Accelerator. // -// Describe an endpoint group. +// Describe an endpoint group. To see an AWS CLI example of describing an endpoint +// group, scroll down to Example. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1271,7 +1272,8 @@ func (c *GlobalAccelerator) ListByoipCidrsRequest(input *ListByoipCidrsInput) (r // ListByoipCidrs API operation for AWS Global Accelerator. // // Lists the IP address ranges that were specified in calls to ProvisionByoipCidr -// (https://docs.aws.amazon.com/global-accelerator/latest/api/ProvisionByoipCidr.html). +// (https://docs.aws.amazon.com/global-accelerator/latest/api/ProvisionByoipCidr.html), +// including the current state and a history of state changes. // // To see an AWS CLI example of listing BYOIP CIDR addresses, scroll down to // Example. @@ -2505,8 +2507,8 @@ func (s *AcceleratorAttributes) SetFlowLogsS3Prefix(v string) *AcceleratorAttrib // The accelerator that you specified could not be disabled. type AcceleratorNotDisabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2523,17 +2525,17 @@ func (s AcceleratorNotDisabledException) GoString() string { func newErrorAcceleratorNotDisabledException(v protocol.ResponseMetadata) error { return &AcceleratorNotDisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AcceleratorNotDisabledException) Code() string { +func (s *AcceleratorNotDisabledException) Code() string { return "AcceleratorNotDisabledException" } // Message returns the exception's message. -func (s AcceleratorNotDisabledException) Message() string { +func (s *AcceleratorNotDisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2541,28 +2543,28 @@ func (s AcceleratorNotDisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AcceleratorNotDisabledException) OrigErr() error { +func (s *AcceleratorNotDisabledException) OrigErr() error { return nil } -func (s AcceleratorNotDisabledException) Error() string { +func (s *AcceleratorNotDisabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AcceleratorNotDisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AcceleratorNotDisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AcceleratorNotDisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *AcceleratorNotDisabledException) RequestID() string { + return s.RespMetadata.RequestID } // The accelerator that you specified doesn't exist. type AcceleratorNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2579,17 +2581,17 @@ func (s AcceleratorNotFoundException) GoString() string { func newErrorAcceleratorNotFoundException(v protocol.ResponseMetadata) error { return &AcceleratorNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AcceleratorNotFoundException) Code() string { +func (s *AcceleratorNotFoundException) Code() string { return "AcceleratorNotFoundException" } // Message returns the exception's message. -func (s AcceleratorNotFoundException) Message() string { +func (s *AcceleratorNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2597,28 +2599,28 @@ func (s AcceleratorNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AcceleratorNotFoundException) OrigErr() error { +func (s *AcceleratorNotFoundException) OrigErr() error { return nil } -func (s AcceleratorNotFoundException) Error() string { +func (s *AcceleratorNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AcceleratorNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AcceleratorNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AcceleratorNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AcceleratorNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // You don't have access permission. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2635,17 +2637,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2653,22 +2655,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type AdvertiseByoipCidrInput struct { @@ -2737,8 +2739,8 @@ func (s *AdvertiseByoipCidrOutput) SetByoipCidr(v *ByoipCidr) *AdvertiseByoipCid // You must remove all dependent resources from a listener before you can delete // it. type AssociatedEndpointGroupFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2755,17 +2757,17 @@ func (s AssociatedEndpointGroupFoundException) GoString() string { func newErrorAssociatedEndpointGroupFoundException(v protocol.ResponseMetadata) error { return &AssociatedEndpointGroupFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociatedEndpointGroupFoundException) Code() string { +func (s *AssociatedEndpointGroupFoundException) Code() string { return "AssociatedEndpointGroupFoundException" } // Message returns the exception's message. -func (s AssociatedEndpointGroupFoundException) Message() string { +func (s *AssociatedEndpointGroupFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2773,30 +2775,30 @@ func (s AssociatedEndpointGroupFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociatedEndpointGroupFoundException) OrigErr() error { +func (s *AssociatedEndpointGroupFoundException) OrigErr() error { return nil } -func (s AssociatedEndpointGroupFoundException) Error() string { +func (s *AssociatedEndpointGroupFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociatedEndpointGroupFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociatedEndpointGroupFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociatedEndpointGroupFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociatedEndpointGroupFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The accelerator that you specified has a listener associated with it. You // must remove all dependent resources from an accelerator before you can delete // it. type AssociatedListenerFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2813,17 +2815,17 @@ func (s AssociatedListenerFoundException) GoString() string { func newErrorAssociatedListenerFoundException(v protocol.ResponseMetadata) error { return &AssociatedListenerFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociatedListenerFoundException) Code() string { +func (s *AssociatedListenerFoundException) Code() string { return "AssociatedListenerFoundException" } // Message returns the exception's message. -func (s AssociatedListenerFoundException) Message() string { +func (s *AssociatedListenerFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2831,26 +2833,26 @@ func (s AssociatedListenerFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociatedListenerFoundException) OrigErr() error { +func (s *AssociatedListenerFoundException) OrigErr() error { return nil } -func (s AssociatedListenerFoundException) Error() string { +func (s *AssociatedListenerFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociatedListenerFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociatedListenerFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociatedListenerFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociatedListenerFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Information about an IP address range that is provisioned for use with your -// AWS resources through bring your own IP addresses (BYOIP). +// AWS resources through bring your own IP address (BYOIP). // // The following describes each BYOIP State that your IP address range can be // in. @@ -2902,6 +2904,10 @@ type ByoipCidr struct { // The address range, in CIDR notation. Cidr *string `type:"string"` + // A history of status changes for an IP address range that that you bring to + // AWS Global Accelerator through bring your own IP address (BYOIP). + Events []*ByoipCidrEvent `type:"list"` + // The state of the address pool. State *string `type:"string" enum:"ByoipCidrState"` } @@ -2922,16 +2928,60 @@ func (s *ByoipCidr) SetCidr(v string) *ByoipCidr { return s } +// SetEvents sets the Events field's value. +func (s *ByoipCidr) SetEvents(v []*ByoipCidrEvent) *ByoipCidr { + s.Events = v + return s +} + // SetState sets the State field's value. func (s *ByoipCidr) SetState(v string) *ByoipCidr { s.State = &v return s } +// A complex type that contains a Message and a Timestamp value for changes +// that you make in the status an IP address range that you bring to AWS Global +// Accelerator through bring your own IP address (BYOIP). +type ByoipCidrEvent struct { + _ struct{} `type:"structure"` + + // A string that contains an Event message describing changes that you make + // in the status of an IP address range that you bring to AWS Global Accelerator + // through bring your own IP address (BYOIP). + Message *string `type:"string"` + + // A timestamp when you make a status change for an IP address range that you + // bring to AWS Global Accelerator through bring your own IP address (BYOIP). + Timestamp *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ByoipCidrEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ByoipCidrEvent) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *ByoipCidrEvent) SetMessage(v string) *ByoipCidrEvent { + s.Message = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *ByoipCidrEvent) SetTimestamp(v time.Time) *ByoipCidrEvent { + s.Timestamp = &v + return s +} + // The CIDR that you specified was not found or is incorrect. type ByoipCidrNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2948,17 +2998,17 @@ func (s ByoipCidrNotFoundException) GoString() string { func newErrorByoipCidrNotFoundException(v protocol.ResponseMetadata) error { return &ByoipCidrNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ByoipCidrNotFoundException) Code() string { +func (s *ByoipCidrNotFoundException) Code() string { return "ByoipCidrNotFoundException" } // Message returns the exception's message. -func (s ByoipCidrNotFoundException) Message() string { +func (s *ByoipCidrNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2966,22 +3016,22 @@ func (s ByoipCidrNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ByoipCidrNotFoundException) OrigErr() error { +func (s *ByoipCidrNotFoundException) OrigErr() error { return nil } -func (s ByoipCidrNotFoundException) Error() string { +func (s *ByoipCidrNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ByoipCidrNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ByoipCidrNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ByoipCidrNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ByoipCidrNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Provides authorization for Amazon to bring a specific IP address range to @@ -3966,8 +4016,8 @@ type EndpointConfiguration struct { // X-Forwarded-For request header as traffic travels to applications on the // Application Load Balancer endpoint fronted by the accelerator. // - // For more information, see Viewing Client IP Addresses in AWS Global Accelerator - // (https://docs.aws.amazon.com/global-accelerator/latest/dg/introduction-how-it-works-client-ip.html) + // For more information, see Preserve Client IP Addresses in AWS Global Accelerator + // (https://docs.aws.amazon.com/global-accelerator/latest/dg/preserve-client-ip-address.html) // in the AWS Global Accelerator Developer Guide. ClientIPPreservationEnabled *bool `type:"boolean"` @@ -4237,8 +4287,8 @@ func (s *EndpointGroup) SetTrafficDialPercentage(v float64) *EndpointGroup { // The endpoint group that you specified already exists. type EndpointGroupAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4255,17 +4305,17 @@ func (s EndpointGroupAlreadyExistsException) GoString() string { func newErrorEndpointGroupAlreadyExistsException(v protocol.ResponseMetadata) error { return &EndpointGroupAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EndpointGroupAlreadyExistsException) Code() string { +func (s *EndpointGroupAlreadyExistsException) Code() string { return "EndpointGroupAlreadyExistsException" } // Message returns the exception's message. -func (s EndpointGroupAlreadyExistsException) Message() string { +func (s *EndpointGroupAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4273,28 +4323,28 @@ func (s EndpointGroupAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EndpointGroupAlreadyExistsException) OrigErr() error { +func (s *EndpointGroupAlreadyExistsException) OrigErr() error { return nil } -func (s EndpointGroupAlreadyExistsException) Error() string { +func (s *EndpointGroupAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EndpointGroupAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EndpointGroupAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EndpointGroupAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *EndpointGroupAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The endpoint group that you specified doesn't exist. type EndpointGroupNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4311,17 +4361,17 @@ func (s EndpointGroupNotFoundException) GoString() string { func newErrorEndpointGroupNotFoundException(v protocol.ResponseMetadata) error { return &EndpointGroupNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EndpointGroupNotFoundException) Code() string { +func (s *EndpointGroupNotFoundException) Code() string { return "EndpointGroupNotFoundException" } // Message returns the exception's message. -func (s EndpointGroupNotFoundException) Message() string { +func (s *EndpointGroupNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4329,29 +4379,29 @@ func (s EndpointGroupNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EndpointGroupNotFoundException) OrigErr() error { +func (s *EndpointGroupNotFoundException) OrigErr() error { return nil } -func (s EndpointGroupNotFoundException) Error() string { +func (s *EndpointGroupNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EndpointGroupNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EndpointGroupNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EndpointGroupNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *EndpointGroupNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The CIDR that you specified is not valid for this action. For example, the // state of the CIDR might be incorrect for this action. type IncorrectCidrStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4368,17 +4418,17 @@ func (s IncorrectCidrStateException) GoString() string { func newErrorIncorrectCidrStateException(v protocol.ResponseMetadata) error { return &IncorrectCidrStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectCidrStateException) Code() string { +func (s *IncorrectCidrStateException) Code() string { return "IncorrectCidrStateException" } // Message returns the exception's message. -func (s IncorrectCidrStateException) Message() string { +func (s *IncorrectCidrStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4386,28 +4436,28 @@ func (s IncorrectCidrStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectCidrStateException) OrigErr() error { +func (s *IncorrectCidrStateException) OrigErr() error { return nil } -func (s IncorrectCidrStateException) Error() string { +func (s *IncorrectCidrStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectCidrStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectCidrStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectCidrStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectCidrStateException) RequestID() string { + return s.RespMetadata.RequestID } // There was an internal error for AWS Global Accelerator. type InternalServiceErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4424,17 +4474,17 @@ func (s InternalServiceErrorException) GoString() string { func newErrorInternalServiceErrorException(v protocol.ResponseMetadata) error { return &InternalServiceErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceErrorException) Code() string { +func (s *InternalServiceErrorException) Code() string { return "InternalServiceErrorException" } // Message returns the exception's message. -func (s InternalServiceErrorException) Message() string { +func (s *InternalServiceErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4442,28 +4492,28 @@ func (s InternalServiceErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceErrorException) OrigErr() error { +func (s *InternalServiceErrorException) OrigErr() error { return nil } -func (s InternalServiceErrorException) Error() string { +func (s *InternalServiceErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceErrorException) RequestID() string { + return s.RespMetadata.RequestID } // An argument that you specified is invalid. type InvalidArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4480,17 +4530,17 @@ func (s InvalidArgumentException) GoString() string { func newErrorInvalidArgumentException(v protocol.ResponseMetadata) error { return &InvalidArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgumentException) Code() string { +func (s *InvalidArgumentException) Code() string { return "InvalidArgumentException" } // Message returns the exception's message. -func (s InvalidArgumentException) Message() string { +func (s *InvalidArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4498,28 +4548,28 @@ func (s InvalidArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgumentException) OrigErr() error { +func (s *InvalidArgumentException) OrigErr() error { return nil } -func (s InvalidArgumentException) Error() string { +func (s *InvalidArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // There isn't another item to return. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4536,17 +4586,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4554,29 +4604,29 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The port numbers that you specified are not valid numbers or are not unique // for this accelerator. type InvalidPortRangeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4593,17 +4643,17 @@ func (s InvalidPortRangeException) GoString() string { func newErrorInvalidPortRangeException(v protocol.ResponseMetadata) error { return &InvalidPortRangeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPortRangeException) Code() string { +func (s *InvalidPortRangeException) Code() string { return "InvalidPortRangeException" } // Message returns the exception's message. -func (s InvalidPortRangeException) Message() string { +func (s *InvalidPortRangeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4611,22 +4661,22 @@ func (s InvalidPortRangeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPortRangeException) OrigErr() error { +func (s *InvalidPortRangeException) OrigErr() error { return nil } -func (s InvalidPortRangeException) Error() string { +func (s *InvalidPortRangeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPortRangeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPortRangeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPortRangeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPortRangeException) RequestID() string { + return s.RespMetadata.RequestID } // A complex type for the set of IP addresses for an accelerator. @@ -4666,8 +4716,8 @@ func (s *IpSet) SetIpFamily(v string) *IpSet { // Processing your request would cause you to exceed an AWS Global Accelerator // limit. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4684,17 +4734,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4702,22 +4752,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAcceleratorsInput struct { @@ -5204,8 +5254,8 @@ func (s *Listener) SetProtocol(v string) *Listener { // The listener that you specified doesn't exist. type ListenerNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5222,17 +5272,17 @@ func (s ListenerNotFoundException) GoString() string { func newErrorListenerNotFoundException(v protocol.ResponseMetadata) error { return &ListenerNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ListenerNotFoundException) Code() string { +func (s *ListenerNotFoundException) Code() string { return "ListenerNotFoundException" } // Message returns the exception's message. -func (s ListenerNotFoundException) Message() string { +func (s *ListenerNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5240,22 +5290,22 @@ func (s ListenerNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ListenerNotFoundException) OrigErr() error { +func (s *ListenerNotFoundException) OrigErr() error { return nil } -func (s ListenerNotFoundException) Error() string { +func (s *ListenerNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ListenerNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ListenerNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ListenerNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ListenerNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A complex type for a range of ports for a listener. @@ -6129,6 +6179,14 @@ const ( AcceleratorStatusInProgress = "IN_PROGRESS" ) +// AcceleratorStatus_Values returns all elements of the AcceleratorStatus enum +func AcceleratorStatus_Values() []string { + return []string{ + AcceleratorStatusDeployed, + AcceleratorStatusInProgress, + } +} + const ( // ByoipCidrStatePendingProvisioning is a ByoipCidrState enum value ByoipCidrStatePendingProvisioning = "PENDING_PROVISIONING" @@ -6164,6 +6222,23 @@ const ( ByoipCidrStateFailedDeprovision = "FAILED_DEPROVISION" ) +// ByoipCidrState_Values returns all elements of the ByoipCidrState enum +func ByoipCidrState_Values() []string { + return []string{ + ByoipCidrStatePendingProvisioning, + ByoipCidrStateReady, + ByoipCidrStatePendingAdvertising, + ByoipCidrStateAdvertising, + ByoipCidrStatePendingWithdrawing, + ByoipCidrStatePendingDeprovisioning, + ByoipCidrStateDeprovisioned, + ByoipCidrStateFailedProvision, + ByoipCidrStateFailedAdvertising, + ByoipCidrStateFailedWithdraw, + ByoipCidrStateFailedDeprovision, + } +} + const ( // ClientAffinityNone is a ClientAffinity enum value ClientAffinityNone = "NONE" @@ -6172,6 +6247,14 @@ const ( ClientAffinitySourceIp = "SOURCE_IP" ) +// ClientAffinity_Values returns all elements of the ClientAffinity enum +func ClientAffinity_Values() []string { + return []string{ + ClientAffinityNone, + ClientAffinitySourceIp, + } +} + const ( // HealthCheckProtocolTcp is a HealthCheckProtocol enum value HealthCheckProtocolTcp = "TCP" @@ -6183,6 +6266,15 @@ const ( HealthCheckProtocolHttps = "HTTPS" ) +// HealthCheckProtocol_Values returns all elements of the HealthCheckProtocol enum +func HealthCheckProtocol_Values() []string { + return []string{ + HealthCheckProtocolTcp, + HealthCheckProtocolHttp, + HealthCheckProtocolHttps, + } +} + const ( // HealthStateInitial is a HealthState enum value HealthStateInitial = "INITIAL" @@ -6194,11 +6286,27 @@ const ( HealthStateUnhealthy = "UNHEALTHY" ) +// HealthState_Values returns all elements of the HealthState enum +func HealthState_Values() []string { + return []string{ + HealthStateInitial, + HealthStateHealthy, + HealthStateUnhealthy, + } +} + const ( // IpAddressTypeIpv4 is a IpAddressType enum value IpAddressTypeIpv4 = "IPV4" ) +// IpAddressType_Values returns all elements of the IpAddressType enum +func IpAddressType_Values() []string { + return []string{ + IpAddressTypeIpv4, + } +} + const ( // ProtocolTcp is a Protocol enum value ProtocolTcp = "TCP" @@ -6206,3 +6314,11 @@ const ( // ProtocolUdp is a Protocol enum value ProtocolUdp = "UDP" ) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolTcp, + ProtocolUdp, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go index 0da801b27..37df58b08 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/api.go b/vendor/github.com/aws/aws-sdk-go/service/glue/api.go index b6f7683dd..1dc8de834 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glue/api.go @@ -1081,6 +1081,97 @@ func (c *Glue) BatchStopJobRunWithContext(ctx aws.Context, input *BatchStopJobRu return out, req.Send() } +const opBatchUpdatePartition = "BatchUpdatePartition" + +// BatchUpdatePartitionRequest generates a "aws/request.Request" representing the +// client's request for the BatchUpdatePartition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchUpdatePartition for more information on using the BatchUpdatePartition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchUpdatePartitionRequest method. +// req, resp := client.BatchUpdatePartitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchUpdatePartition +func (c *Glue) BatchUpdatePartitionRequest(input *BatchUpdatePartitionInput) (req *request.Request, output *BatchUpdatePartitionOutput) { + op := &request.Operation{ + Name: opBatchUpdatePartition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchUpdatePartitionInput{} + } + + output = &BatchUpdatePartitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchUpdatePartition API operation for AWS Glue. +// +// Updates one or more partitions in a batch operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation BatchUpdatePartition for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input provided was not valid. +// +// * EntityNotFoundException +// A specified entity does not exist +// +// * OperationTimeoutException +// The operation timed out. +// +// * InternalServiceException +// An internal service error occurred. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/BatchUpdatePartition +func (c *Glue) BatchUpdatePartition(input *BatchUpdatePartitionInput) (*BatchUpdatePartitionOutput, error) { + req, out := c.BatchUpdatePartitionRequest(input) + return out, req.Send() +} + +// BatchUpdatePartitionWithContext is the same as BatchUpdatePartition with the addition of +// the ability to pass a context and additional request options. +// +// See BatchUpdatePartition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) BatchUpdatePartitionWithContext(ctx aws.Context, input *BatchUpdatePartitionInput, opts ...request.Option) (*BatchUpdatePartitionOutput, error) { + req, out := c.BatchUpdatePartitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCancelMLTaskRun = "CancelMLTaskRun" // CancelMLTaskRunRequest generates a "aws/request.Request" representing the @@ -2595,6 +2686,190 @@ func (c *Glue) DeleteClassifierWithContext(ctx aws.Context, input *DeleteClassif return out, req.Send() } +const opDeleteColumnStatisticsForPartition = "DeleteColumnStatisticsForPartition" + +// DeleteColumnStatisticsForPartitionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteColumnStatisticsForPartition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteColumnStatisticsForPartition for more information on using the DeleteColumnStatisticsForPartition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteColumnStatisticsForPartitionRequest method. +// req, resp := client.DeleteColumnStatisticsForPartitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteColumnStatisticsForPartition +func (c *Glue) DeleteColumnStatisticsForPartitionRequest(input *DeleteColumnStatisticsForPartitionInput) (req *request.Request, output *DeleteColumnStatisticsForPartitionOutput) { + op := &request.Operation{ + Name: opDeleteColumnStatisticsForPartition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteColumnStatisticsForPartitionInput{} + } + + output = &DeleteColumnStatisticsForPartitionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteColumnStatisticsForPartition API operation for AWS Glue. +// +// Delete the partition column statistics of a column. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation DeleteColumnStatisticsForPartition for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * InvalidInputException +// The input provided was not valid. +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteColumnStatisticsForPartition +func (c *Glue) DeleteColumnStatisticsForPartition(input *DeleteColumnStatisticsForPartitionInput) (*DeleteColumnStatisticsForPartitionOutput, error) { + req, out := c.DeleteColumnStatisticsForPartitionRequest(input) + return out, req.Send() +} + +// DeleteColumnStatisticsForPartitionWithContext is the same as DeleteColumnStatisticsForPartition with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteColumnStatisticsForPartition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) DeleteColumnStatisticsForPartitionWithContext(ctx aws.Context, input *DeleteColumnStatisticsForPartitionInput, opts ...request.Option) (*DeleteColumnStatisticsForPartitionOutput, error) { + req, out := c.DeleteColumnStatisticsForPartitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteColumnStatisticsForTable = "DeleteColumnStatisticsForTable" + +// DeleteColumnStatisticsForTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteColumnStatisticsForTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteColumnStatisticsForTable for more information on using the DeleteColumnStatisticsForTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteColumnStatisticsForTableRequest method. +// req, resp := client.DeleteColumnStatisticsForTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteColumnStatisticsForTable +func (c *Glue) DeleteColumnStatisticsForTableRequest(input *DeleteColumnStatisticsForTableInput) (req *request.Request, output *DeleteColumnStatisticsForTableOutput) { + op := &request.Operation{ + Name: opDeleteColumnStatisticsForTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteColumnStatisticsForTableInput{} + } + + output = &DeleteColumnStatisticsForTableOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteColumnStatisticsForTable API operation for AWS Glue. +// +// Retrieves table statistics of columns. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation DeleteColumnStatisticsForTable for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * InvalidInputException +// The input provided was not valid. +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/DeleteColumnStatisticsForTable +func (c *Glue) DeleteColumnStatisticsForTable(input *DeleteColumnStatisticsForTableInput) (*DeleteColumnStatisticsForTableOutput, error) { + req, out := c.DeleteColumnStatisticsForTableRequest(input) + return out, req.Send() +} + +// DeleteColumnStatisticsForTableWithContext is the same as DeleteColumnStatisticsForTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteColumnStatisticsForTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) DeleteColumnStatisticsForTableWithContext(ctx aws.Context, input *DeleteColumnStatisticsForTableInput, opts ...request.Option) (*DeleteColumnStatisticsForTableOutput, error) { + req, out := c.DeleteColumnStatisticsForTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteConnection = "DeleteConnection" // DeleteConnectionRequest generates a "aws/request.Request" representing the @@ -4160,6 +4435,188 @@ func (c *Glue) GetClassifiersPagesWithContext(ctx aws.Context, input *GetClassif return p.Err() } +const opGetColumnStatisticsForPartition = "GetColumnStatisticsForPartition" + +// GetColumnStatisticsForPartitionRequest generates a "aws/request.Request" representing the +// client's request for the GetColumnStatisticsForPartition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetColumnStatisticsForPartition for more information on using the GetColumnStatisticsForPartition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetColumnStatisticsForPartitionRequest method. +// req, resp := client.GetColumnStatisticsForPartitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetColumnStatisticsForPartition +func (c *Glue) GetColumnStatisticsForPartitionRequest(input *GetColumnStatisticsForPartitionInput) (req *request.Request, output *GetColumnStatisticsForPartitionOutput) { + op := &request.Operation{ + Name: opGetColumnStatisticsForPartition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetColumnStatisticsForPartitionInput{} + } + + output = &GetColumnStatisticsForPartitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetColumnStatisticsForPartition API operation for AWS Glue. +// +// Retrieves partition statistics of columns. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation GetColumnStatisticsForPartition for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * InvalidInputException +// The input provided was not valid. +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetColumnStatisticsForPartition +func (c *Glue) GetColumnStatisticsForPartition(input *GetColumnStatisticsForPartitionInput) (*GetColumnStatisticsForPartitionOutput, error) { + req, out := c.GetColumnStatisticsForPartitionRequest(input) + return out, req.Send() +} + +// GetColumnStatisticsForPartitionWithContext is the same as GetColumnStatisticsForPartition with the addition of +// the ability to pass a context and additional request options. +// +// See GetColumnStatisticsForPartition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetColumnStatisticsForPartitionWithContext(ctx aws.Context, input *GetColumnStatisticsForPartitionInput, opts ...request.Option) (*GetColumnStatisticsForPartitionOutput, error) { + req, out := c.GetColumnStatisticsForPartitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetColumnStatisticsForTable = "GetColumnStatisticsForTable" + +// GetColumnStatisticsForTableRequest generates a "aws/request.Request" representing the +// client's request for the GetColumnStatisticsForTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetColumnStatisticsForTable for more information on using the GetColumnStatisticsForTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetColumnStatisticsForTableRequest method. +// req, resp := client.GetColumnStatisticsForTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetColumnStatisticsForTable +func (c *Glue) GetColumnStatisticsForTableRequest(input *GetColumnStatisticsForTableInput) (req *request.Request, output *GetColumnStatisticsForTableOutput) { + op := &request.Operation{ + Name: opGetColumnStatisticsForTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetColumnStatisticsForTableInput{} + } + + output = &GetColumnStatisticsForTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetColumnStatisticsForTable API operation for AWS Glue. +// +// Retrieves table statistics of columns. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation GetColumnStatisticsForTable for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * InvalidInputException +// The input provided was not valid. +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetColumnStatisticsForTable +func (c *Glue) GetColumnStatisticsForTable(input *GetColumnStatisticsForTableInput) (*GetColumnStatisticsForTableOutput, error) { + req, out := c.GetColumnStatisticsForTableRequest(input) + return out, req.Send() +} + +// GetColumnStatisticsForTableWithContext is the same as GetColumnStatisticsForTable with the addition of +// the ability to pass a context and additional request options. +// +// See GetColumnStatisticsForTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetColumnStatisticsForTableWithContext(ctx aws.Context, input *GetColumnStatisticsForTableInput, opts ...request.Option) (*GetColumnStatisticsForTableOutput, error) { + req, out := c.GetColumnStatisticsForTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetConnection = "GetConnection" // GetConnectionRequest generates a "aws/request.Request" representing the @@ -6626,6 +7083,155 @@ func (c *Glue) GetPartitionWithContext(ctx aws.Context, input *GetPartitionInput return out, req.Send() } +const opGetPartitionIndexes = "GetPartitionIndexes" + +// GetPartitionIndexesRequest generates a "aws/request.Request" representing the +// client's request for the GetPartitionIndexes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPartitionIndexes for more information on using the GetPartitionIndexes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPartitionIndexesRequest method. +// req, resp := client.GetPartitionIndexesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionIndexes +func (c *Glue) GetPartitionIndexesRequest(input *GetPartitionIndexesInput) (req *request.Request, output *GetPartitionIndexesOutput) { + op := &request.Operation{ + Name: opGetPartitionIndexes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetPartitionIndexesInput{} + } + + output = &GetPartitionIndexesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPartitionIndexes API operation for AWS Glue. +// +// Retrieves the partition indexes associated with a table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation GetPartitionIndexes for usage and error information. +// +// Returned Error Types: +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * InvalidInputException +// The input provided was not valid. +// +// * EntityNotFoundException +// A specified entity does not exist +// +// * ConflictException +// The CreatePartitions API was called on a table that has indexes enabled. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetPartitionIndexes +func (c *Glue) GetPartitionIndexes(input *GetPartitionIndexesInput) (*GetPartitionIndexesOutput, error) { + req, out := c.GetPartitionIndexesRequest(input) + return out, req.Send() +} + +// GetPartitionIndexesWithContext is the same as GetPartitionIndexes with the addition of +// the ability to pass a context and additional request options. +// +// See GetPartitionIndexes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetPartitionIndexesWithContext(ctx aws.Context, input *GetPartitionIndexesInput, opts ...request.Option) (*GetPartitionIndexesOutput, error) { + req, out := c.GetPartitionIndexesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetPartitionIndexesPages iterates over the pages of a GetPartitionIndexes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetPartitionIndexes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetPartitionIndexes operation. +// pageNum := 0 +// err := client.GetPartitionIndexesPages(params, +// func(page *glue.GetPartitionIndexesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Glue) GetPartitionIndexesPages(input *GetPartitionIndexesInput, fn func(*GetPartitionIndexesOutput, bool) bool) error { + return c.GetPartitionIndexesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetPartitionIndexesPagesWithContext same as GetPartitionIndexesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetPartitionIndexesPagesWithContext(ctx aws.Context, input *GetPartitionIndexesInput, fn func(*GetPartitionIndexesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetPartitionIndexesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetPartitionIndexesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetPartitionIndexesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetPartitions = "GetPartitions" // GetPartitionsRequest generates a "aws/request.Request" representing the @@ -6860,6 +7466,100 @@ func (c *Glue) GetPlanWithContext(ctx aws.Context, input *GetPlanInput, opts ... return out, req.Send() } +const opGetResourcePolicies = "GetResourcePolicies" + +// GetResourcePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the GetResourcePolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetResourcePolicies for more information on using the GetResourcePolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetResourcePoliciesRequest method. +// req, resp := client.GetResourcePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetResourcePolicies +func (c *Glue) GetResourcePoliciesRequest(input *GetResourcePoliciesInput) (req *request.Request, output *GetResourcePoliciesOutput) { + op := &request.Operation{ + Name: opGetResourcePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetResourcePoliciesInput{} + } + + output = &GetResourcePoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetResourcePolicies API operation for AWS Glue. +// +// Retrieves the security configurations for the resource policies set on individual +// resources, and also the account-level policy. +// +// This operation also returns the Data Catalog resource policy. However, if +// you enabled metadata encryption in Data Catalog settings, and you do not +// have permission on the AWS KMS key, the operation can't return the Data Catalog +// resource policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation GetResourcePolicies for usage and error information. +// +// Returned Error Types: +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * InvalidInputException +// The input provided was not valid. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/GetResourcePolicies +func (c *Glue) GetResourcePolicies(input *GetResourcePoliciesInput) (*GetResourcePoliciesOutput, error) { + req, out := c.GetResourcePoliciesRequest(input) + return out, req.Send() +} + +// GetResourcePoliciesWithContext is the same as GetResourcePolicies with the addition of +// the ability to pass a context and additional request options. +// +// See GetResourcePolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) GetResourcePoliciesWithContext(ctx aws.Context, input *GetResourcePoliciesInput, opts ...request.Option) (*GetResourcePoliciesOutput, error) { + req, out := c.GetResourcePoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetResourcePolicy = "GetResourcePolicy" // GetResourcePolicyRequest generates a "aws/request.Request" representing the @@ -9977,6 +10677,102 @@ func (c *Glue) ResetJobBookmarkWithContext(ctx aws.Context, input *ResetJobBookm return out, req.Send() } +const opResumeWorkflowRun = "ResumeWorkflowRun" + +// ResumeWorkflowRunRequest generates a "aws/request.Request" representing the +// client's request for the ResumeWorkflowRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResumeWorkflowRun for more information on using the ResumeWorkflowRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResumeWorkflowRunRequest method. +// req, resp := client.ResumeWorkflowRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRun +func (c *Glue) ResumeWorkflowRunRequest(input *ResumeWorkflowRunInput) (req *request.Request, output *ResumeWorkflowRunOutput) { + op := &request.Operation{ + Name: opResumeWorkflowRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResumeWorkflowRunInput{} + } + + output = &ResumeWorkflowRunOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResumeWorkflowRun API operation for AWS Glue. +// +// Restarts selected nodes of a previous partially completed workflow run and +// resumes the workflow run. The selected nodes and all nodes that are downstream +// from the selected nodes are run. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation ResumeWorkflowRun for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input provided was not valid. +// +// * EntityNotFoundException +// A specified entity does not exist +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * ConcurrentRunsExceededException +// Too many jobs are being run concurrently. +// +// * IllegalWorkflowStateException +// The workflow is in an invalid state to perform a requested operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/ResumeWorkflowRun +func (c *Glue) ResumeWorkflowRun(input *ResumeWorkflowRunInput) (*ResumeWorkflowRunOutput, error) { + req, out := c.ResumeWorkflowRunRequest(input) + return out, req.Send() +} + +// ResumeWorkflowRunWithContext is the same as ResumeWorkflowRun with the addition of +// the ability to pass a context and additional request options. +// +// See ResumeWorkflowRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) ResumeWorkflowRunWithContext(ctx aws.Context, input *ResumeWorkflowRunInput, opts ...request.Option) (*ResumeWorkflowRunOutput, error) { + req, out := c.ResumeWorkflowRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opSearchTables = "SearchTables" // SearchTablesRequest generates a "aws/request.Request" representing the @@ -11279,6 +12075,98 @@ func (c *Glue) StopTriggerWithContext(ctx aws.Context, input *StopTriggerInput, return out, req.Send() } +const opStopWorkflowRun = "StopWorkflowRun" + +// StopWorkflowRunRequest generates a "aws/request.Request" representing the +// client's request for the StopWorkflowRun operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StopWorkflowRun for more information on using the StopWorkflowRun +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StopWorkflowRunRequest method. +// req, resp := client.StopWorkflowRunRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopWorkflowRun +func (c *Glue) StopWorkflowRunRequest(input *StopWorkflowRunInput) (req *request.Request, output *StopWorkflowRunOutput) { + op := &request.Operation{ + Name: opStopWorkflowRun, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StopWorkflowRunInput{} + } + + output = &StopWorkflowRunOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// StopWorkflowRun API operation for AWS Glue. +// +// Stops the execution of the specified workflow run. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation StopWorkflowRun for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// The input provided was not valid. +// +// * EntityNotFoundException +// A specified entity does not exist +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * IllegalWorkflowStateException +// The workflow is in an invalid state to perform a requested operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/StopWorkflowRun +func (c *Glue) StopWorkflowRun(input *StopWorkflowRunInput) (*StopWorkflowRunOutput, error) { + req, out := c.StopWorkflowRunRequest(input) + return out, req.Send() +} + +// StopWorkflowRunWithContext is the same as StopWorkflowRun with the addition of +// the ability to pass a context and additional request options. +// +// See StopWorkflowRun for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) StopWorkflowRunWithContext(ctx aws.Context, input *StopWorkflowRunInput, opts ...request.Option) (*StopWorkflowRunOutput, error) { + req, out := c.StopWorkflowRunRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -11549,6 +12437,188 @@ func (c *Glue) UpdateClassifierWithContext(ctx aws.Context, input *UpdateClassif return out, req.Send() } +const opUpdateColumnStatisticsForPartition = "UpdateColumnStatisticsForPartition" + +// UpdateColumnStatisticsForPartitionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateColumnStatisticsForPartition operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateColumnStatisticsForPartition for more information on using the UpdateColumnStatisticsForPartition +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateColumnStatisticsForPartitionRequest method. +// req, resp := client.UpdateColumnStatisticsForPartitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateColumnStatisticsForPartition +func (c *Glue) UpdateColumnStatisticsForPartitionRequest(input *UpdateColumnStatisticsForPartitionInput) (req *request.Request, output *UpdateColumnStatisticsForPartitionOutput) { + op := &request.Operation{ + Name: opUpdateColumnStatisticsForPartition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateColumnStatisticsForPartitionInput{} + } + + output = &UpdateColumnStatisticsForPartitionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateColumnStatisticsForPartition API operation for AWS Glue. +// +// Creates or updates partition statistics of columns. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateColumnStatisticsForPartition for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * InvalidInputException +// The input provided was not valid. +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateColumnStatisticsForPartition +func (c *Glue) UpdateColumnStatisticsForPartition(input *UpdateColumnStatisticsForPartitionInput) (*UpdateColumnStatisticsForPartitionOutput, error) { + req, out := c.UpdateColumnStatisticsForPartitionRequest(input) + return out, req.Send() +} + +// UpdateColumnStatisticsForPartitionWithContext is the same as UpdateColumnStatisticsForPartition with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateColumnStatisticsForPartition for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateColumnStatisticsForPartitionWithContext(ctx aws.Context, input *UpdateColumnStatisticsForPartitionInput, opts ...request.Option) (*UpdateColumnStatisticsForPartitionOutput, error) { + req, out := c.UpdateColumnStatisticsForPartitionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateColumnStatisticsForTable = "UpdateColumnStatisticsForTable" + +// UpdateColumnStatisticsForTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateColumnStatisticsForTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateColumnStatisticsForTable for more information on using the UpdateColumnStatisticsForTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateColumnStatisticsForTableRequest method. +// req, resp := client.UpdateColumnStatisticsForTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateColumnStatisticsForTable +func (c *Glue) UpdateColumnStatisticsForTableRequest(input *UpdateColumnStatisticsForTableInput) (req *request.Request, output *UpdateColumnStatisticsForTableOutput) { + op := &request.Operation{ + Name: opUpdateColumnStatisticsForTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateColumnStatisticsForTableInput{} + } + + output = &UpdateColumnStatisticsForTableOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateColumnStatisticsForTable API operation for AWS Glue. +// +// Creates or updates table statistics of columns. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Glue's +// API operation UpdateColumnStatisticsForTable for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// A specified entity does not exist +// +// * InvalidInputException +// The input provided was not valid. +// +// * InternalServiceException +// An internal service error occurred. +// +// * OperationTimeoutException +// The operation timed out. +// +// * EncryptionException +// An encryption operation failed. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/glue-2017-03-31/UpdateColumnStatisticsForTable +func (c *Glue) UpdateColumnStatisticsForTable(input *UpdateColumnStatisticsForTableInput) (*UpdateColumnStatisticsForTableOutput, error) { + req, out := c.UpdateColumnStatisticsForTableRequest(input) + return out, req.Send() +} + +// UpdateColumnStatisticsForTableWithContext is the same as UpdateColumnStatisticsForTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateColumnStatisticsForTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Glue) UpdateColumnStatisticsForTableWithContext(ctx aws.Context, input *UpdateColumnStatisticsForTableInput, opts ...request.Option) (*UpdateColumnStatisticsForTableOutput, error) { + req, out := c.UpdateColumnStatisticsForTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateConnection = "UpdateConnection" // UpdateConnectionRequest generates a "aws/request.Request" representing the @@ -12663,8 +13733,8 @@ func (c *Glue) UpdateWorkflowWithContext(ctx aws.Context, input *UpdateWorkflowI // Access to a resource was denied. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -12682,17 +13752,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12700,22 +13770,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // Defines an action to be initiated by a trigger. @@ -12831,8 +13901,8 @@ func (s *Action) SetTimeout(v int64) *Action { // A resource to be created or added already exists. type AlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -12850,17 +13920,17 @@ func (s AlreadyExistsException) GoString() string { func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { return &AlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyExistsException) Code() string { +func (s *AlreadyExistsException) Code() string { return "AlreadyExistsException" } // Message returns the exception's message. -func (s AlreadyExistsException) Message() string { +func (s *AlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12868,22 +13938,22 @@ func (s AlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyExistsException) OrigErr() error { +func (s *AlreadyExistsException) OrigErr() error { return nil } -func (s AlreadyExistsException) Error() string { +func (s *AlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } type BatchCreatePartitionInput struct { @@ -14073,6 +15143,353 @@ func (s *BatchStopJobRunSuccessfulSubmission) SetJobRunId(v string) *BatchStopJo return s } +// Contains information about a batch update partition error. +type BatchUpdatePartitionFailureEntry struct { + _ struct{} `type:"structure"` + + // The details about the batch update partition error. + ErrorDetail *ErrorDetail `type:"structure"` + + // A list of values defining the partitions. + PartitionValueList []*string `type:"list"` +} + +// String returns the string representation +func (s BatchUpdatePartitionFailureEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionFailureEntry) GoString() string { + return s.String() +} + +// SetErrorDetail sets the ErrorDetail field's value. +func (s *BatchUpdatePartitionFailureEntry) SetErrorDetail(v *ErrorDetail) *BatchUpdatePartitionFailureEntry { + s.ErrorDetail = v + return s +} + +// SetPartitionValueList sets the PartitionValueList field's value. +func (s *BatchUpdatePartitionFailureEntry) SetPartitionValueList(v []*string) *BatchUpdatePartitionFailureEntry { + s.PartitionValueList = v + return s +} + +type BatchUpdatePartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the catalog in which the partition is to be updated. Currently, + // this should be the AWS account ID. + CatalogId *string `min:"1" type:"string"` + + // The name of the metadata database in which the partition is to be updated. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of up to 100 BatchUpdatePartitionRequestEntry objects to update. + // + // Entries is a required field + Entries []*BatchUpdatePartitionRequestEntry `min:"1" type:"list" required:"true"` + + // The name of the metadata table in which the partition is to be updated. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchUpdatePartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchUpdatePartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchUpdatePartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Entries == nil { + invalidParams.Add(request.NewErrParamRequired("Entries")) + } + if s.Entries != nil && len(s.Entries) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Entries", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.Entries != nil { + for i, v := range s.Entries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *BatchUpdatePartitionInput) SetCatalogId(v string) *BatchUpdatePartitionInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *BatchUpdatePartitionInput) SetDatabaseName(v string) *BatchUpdatePartitionInput { + s.DatabaseName = &v + return s +} + +// SetEntries sets the Entries field's value. +func (s *BatchUpdatePartitionInput) SetEntries(v []*BatchUpdatePartitionRequestEntry) *BatchUpdatePartitionInput { + s.Entries = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BatchUpdatePartitionInput) SetTableName(v string) *BatchUpdatePartitionInput { + s.TableName = &v + return s +} + +type BatchUpdatePartitionOutput struct { + _ struct{} `type:"structure"` + + // The errors encountered when trying to update the requested partitions. A + // list of BatchUpdatePartitionFailureEntry objects. + Errors []*BatchUpdatePartitionFailureEntry `type:"list"` +} + +// String returns the string representation +func (s BatchUpdatePartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *BatchUpdatePartitionOutput) SetErrors(v []*BatchUpdatePartitionFailureEntry) *BatchUpdatePartitionOutput { + s.Errors = v + return s +} + +// A structure that contains the values and structure used to update a partition. +type BatchUpdatePartitionRequestEntry struct { + _ struct{} `type:"structure"` + + // The structure used to update a partition. + // + // PartitionInput is a required field + PartitionInput *PartitionInput `type:"structure" required:"true"` + + // A list of values defining the partitions. + // + // PartitionValueList is a required field + PartitionValueList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchUpdatePartitionRequestEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdatePartitionRequestEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchUpdatePartitionRequestEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchUpdatePartitionRequestEntry"} + if s.PartitionInput == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionInput")) + } + if s.PartitionValueList == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValueList")) + } + if s.PartitionInput != nil { + if err := s.PartitionInput.Validate(); err != nil { + invalidParams.AddNested("PartitionInput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPartitionInput sets the PartitionInput field's value. +func (s *BatchUpdatePartitionRequestEntry) SetPartitionInput(v *PartitionInput) *BatchUpdatePartitionRequestEntry { + s.PartitionInput = v + return s +} + +// SetPartitionValueList sets the PartitionValueList field's value. +func (s *BatchUpdatePartitionRequestEntry) SetPartitionValueList(v []*string) *BatchUpdatePartitionRequestEntry { + s.PartitionValueList = v + return s +} + +// Defines a binary column statistics data. +type BinaryColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Average length of the column. + // + // AverageLength is a required field + AverageLength *float64 `type:"double" required:"true"` + + // Maximum length of the column. + // + // MaximumLength is a required field + MaximumLength *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s BinaryColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BinaryColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BinaryColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BinaryColumnStatisticsData"} + if s.AverageLength == nil { + invalidParams.Add(request.NewErrParamRequired("AverageLength")) + } + if s.MaximumLength == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumLength")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAverageLength sets the AverageLength field's value. +func (s *BinaryColumnStatisticsData) SetAverageLength(v float64) *BinaryColumnStatisticsData { + s.AverageLength = &v + return s +} + +// SetMaximumLength sets the MaximumLength field's value. +func (s *BinaryColumnStatisticsData) SetMaximumLength(v int64) *BinaryColumnStatisticsData { + s.MaximumLength = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *BinaryColumnStatisticsData) SetNumberOfNulls(v int64) *BinaryColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + +// Defines a boolean column statistics. +type BooleanColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Number of false value. + // + // NumberOfFalses is a required field + NumberOfFalses *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` + + // Number of true value. + // + // NumberOfTrues is a required field + NumberOfTrues *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s BooleanColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BooleanColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BooleanColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BooleanColumnStatisticsData"} + if s.NumberOfFalses == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfFalses")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + if s.NumberOfTrues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfTrues")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNumberOfFalses sets the NumberOfFalses field's value. +func (s *BooleanColumnStatisticsData) SetNumberOfFalses(v int64) *BooleanColumnStatisticsData { + s.NumberOfFalses = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *BooleanColumnStatisticsData) SetNumberOfNulls(v int64) *BooleanColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + +// SetNumberOfTrues sets the NumberOfTrues field's value. +func (s *BooleanColumnStatisticsData) SetNumberOfTrues(v int64) *BooleanColumnStatisticsData { + s.NumberOfTrues = &v + return s +} + type CancelMLTaskRunInput struct { _ struct{} `type:"structure"` @@ -14712,10 +16129,302 @@ func (s *Column) SetType(v string) *Column { return s } +// Defines a column containing error. +type ColumnError struct { + _ struct{} `type:"structure"` + + // The name of the column. + ColumnName *string `min:"1" type:"string"` + + // The error message occurred during operation. + Error *ErrorDetail `type:"structure"` +} + +// String returns the string representation +func (s ColumnError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnError) GoString() string { + return s.String() +} + +// SetColumnName sets the ColumnName field's value. +func (s *ColumnError) SetColumnName(v string) *ColumnError { + s.ColumnName = &v + return s +} + +// SetError sets the Error field's value. +func (s *ColumnError) SetError(v *ErrorDetail) *ColumnError { + s.Error = v + return s +} + +// Defines a column statistics. +type ColumnStatistics struct { + _ struct{} `type:"structure"` + + // The analyzed time of the column statistics. + // + // AnalyzedTime is a required field + AnalyzedTime *time.Time `type:"timestamp" required:"true"` + + // The name of the column. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // The type of the column. + // + // ColumnType is a required field + ColumnType *string `type:"string" required:"true"` + + // The statistics of the column. + // + // StatisticsData is a required field + StatisticsData *ColumnStatisticsData `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ColumnStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnStatistics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ColumnStatistics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ColumnStatistics"} + if s.AnalyzedTime == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyzedTime")) + } + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.ColumnType == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnType")) + } + if s.StatisticsData == nil { + invalidParams.Add(request.NewErrParamRequired("StatisticsData")) + } + if s.StatisticsData != nil { + if err := s.StatisticsData.Validate(); err != nil { + invalidParams.AddNested("StatisticsData", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyzedTime sets the AnalyzedTime field's value. +func (s *ColumnStatistics) SetAnalyzedTime(v time.Time) *ColumnStatistics { + s.AnalyzedTime = &v + return s +} + +// SetColumnName sets the ColumnName field's value. +func (s *ColumnStatistics) SetColumnName(v string) *ColumnStatistics { + s.ColumnName = &v + return s +} + +// SetColumnType sets the ColumnType field's value. +func (s *ColumnStatistics) SetColumnType(v string) *ColumnStatistics { + s.ColumnType = &v + return s +} + +// SetStatisticsData sets the StatisticsData field's value. +func (s *ColumnStatistics) SetStatisticsData(v *ColumnStatisticsData) *ColumnStatistics { + s.StatisticsData = v + return s +} + +// Defines a column statistics data. +type ColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Binary Column Statistics Data. + BinaryColumnStatisticsData *BinaryColumnStatisticsData `type:"structure"` + + // Boolean Column Statistics Data. + BooleanColumnStatisticsData *BooleanColumnStatisticsData `type:"structure"` + + // Date Column Statistics Data. + DateColumnStatisticsData *DateColumnStatisticsData `type:"structure"` + + // Decimal Column Statistics Data. + DecimalColumnStatisticsData *DecimalColumnStatisticsData `type:"structure"` + + // Double Column Statistics Data. + DoubleColumnStatisticsData *DoubleColumnStatisticsData `type:"structure"` + + // Long Column Statistics Data. + LongColumnStatisticsData *LongColumnStatisticsData `type:"structure"` + + // String Column Statistics Data. + StringColumnStatisticsData *StringColumnStatisticsData `type:"structure"` + + // The name of the column. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"ColumnStatisticsType"` +} + +// String returns the string representation +func (s ColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ColumnStatisticsData"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.BinaryColumnStatisticsData != nil { + if err := s.BinaryColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("BinaryColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + if s.BooleanColumnStatisticsData != nil { + if err := s.BooleanColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("BooleanColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + if s.DateColumnStatisticsData != nil { + if err := s.DateColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("DateColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + if s.DecimalColumnStatisticsData != nil { + if err := s.DecimalColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("DecimalColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + if s.DoubleColumnStatisticsData != nil { + if err := s.DoubleColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("DoubleColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + if s.LongColumnStatisticsData != nil { + if err := s.LongColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("LongColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + if s.StringColumnStatisticsData != nil { + if err := s.StringColumnStatisticsData.Validate(); err != nil { + invalidParams.AddNested("StringColumnStatisticsData", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBinaryColumnStatisticsData sets the BinaryColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetBinaryColumnStatisticsData(v *BinaryColumnStatisticsData) *ColumnStatisticsData { + s.BinaryColumnStatisticsData = v + return s +} + +// SetBooleanColumnStatisticsData sets the BooleanColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetBooleanColumnStatisticsData(v *BooleanColumnStatisticsData) *ColumnStatisticsData { + s.BooleanColumnStatisticsData = v + return s +} + +// SetDateColumnStatisticsData sets the DateColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetDateColumnStatisticsData(v *DateColumnStatisticsData) *ColumnStatisticsData { + s.DateColumnStatisticsData = v + return s +} + +// SetDecimalColumnStatisticsData sets the DecimalColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetDecimalColumnStatisticsData(v *DecimalColumnStatisticsData) *ColumnStatisticsData { + s.DecimalColumnStatisticsData = v + return s +} + +// SetDoubleColumnStatisticsData sets the DoubleColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetDoubleColumnStatisticsData(v *DoubleColumnStatisticsData) *ColumnStatisticsData { + s.DoubleColumnStatisticsData = v + return s +} + +// SetLongColumnStatisticsData sets the LongColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetLongColumnStatisticsData(v *LongColumnStatisticsData) *ColumnStatisticsData { + s.LongColumnStatisticsData = v + return s +} + +// SetStringColumnStatisticsData sets the StringColumnStatisticsData field's value. +func (s *ColumnStatisticsData) SetStringColumnStatisticsData(v *StringColumnStatisticsData) *ColumnStatisticsData { + s.StringColumnStatisticsData = v + return s +} + +// SetType sets the Type field's value. +func (s *ColumnStatisticsData) SetType(v string) *ColumnStatisticsData { + s.Type = &v + return s +} + +// Defines a column containing error. +type ColumnStatisticsError struct { + _ struct{} `type:"structure"` + + // The ColumnStatistics of the column. + ColumnStatistics *ColumnStatistics `type:"structure"` + + // The error message occurred during operation. + Error *ErrorDetail `type:"structure"` +} + +// String returns the string representation +func (s ColumnStatisticsError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnStatisticsError) GoString() string { + return s.String() +} + +// SetColumnStatistics sets the ColumnStatistics field's value. +func (s *ColumnStatisticsError) SetColumnStatistics(v *ColumnStatistics) *ColumnStatisticsError { + s.ColumnStatistics = v + return s +} + +// SetError sets the Error field's value. +func (s *ColumnStatisticsError) SetError(v *ErrorDetail) *ColumnStatisticsError { + s.Error = v + return s +} + // Two processes are trying to modify a resource simultaneously. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -14733,17 +16442,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14751,28 +16460,28 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // Too many jobs are being run concurrently. type ConcurrentRunsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -14790,17 +16499,17 @@ func (s ConcurrentRunsExceededException) GoString() string { func newErrorConcurrentRunsExceededException(v protocol.ResponseMetadata) error { return &ConcurrentRunsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentRunsExceededException) Code() string { +func (s *ConcurrentRunsExceededException) Code() string { return "ConcurrentRunsExceededException" } // Message returns the exception's message. -func (s ConcurrentRunsExceededException) Message() string { +func (s *ConcurrentRunsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14808,22 +16517,22 @@ func (s ConcurrentRunsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentRunsExceededException) OrigErr() error { +func (s *ConcurrentRunsExceededException) OrigErr() error { return nil } -func (s ConcurrentRunsExceededException) Error() string { +func (s *ConcurrentRunsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentRunsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentRunsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentRunsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentRunsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Defines a condition under which a trigger fires. @@ -14843,8 +16552,9 @@ type Condition struct { // A logical operator. LogicalOperator *string `type:"string" enum:"LogicalOperator"` - // The condition state. Currently, the values supported are SUCCEEDED, STOPPED, - // TIMEOUT, and FAILED. + // The condition state. Currently, the only job states that a trigger can listen + // for are SUCCEEDED, STOPPED, FAILED, and TIMEOUT. The only crawler states + // that a trigger can listen for are SUCCEEDED, FAILED, and CANCELLED. State *string `type:"string" enum:"JobRunState"` } @@ -14906,8 +16616,8 @@ func (s *Condition) SetState(v string) *Condition { // A specified condition was not satisfied. type ConditionCheckFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -14925,17 +16635,17 @@ func (s ConditionCheckFailureException) GoString() string { func newErrorConditionCheckFailureException(v protocol.ResponseMetadata) error { return &ConditionCheckFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConditionCheckFailureException) Code() string { +func (s *ConditionCheckFailureException) Code() string { return "ConditionCheckFailureException" } // Message returns the exception's message. -func (s ConditionCheckFailureException) Message() string { +func (s *ConditionCheckFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14943,22 +16653,79 @@ func (s ConditionCheckFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConditionCheckFailureException) OrigErr() error { +func (s *ConditionCheckFailureException) OrigErr() error { return nil } -func (s ConditionCheckFailureException) Error() string { +func (s *ConditionCheckFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConditionCheckFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConditionCheckFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConditionCheckFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConditionCheckFailureException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The CreatePartitions API was called on a table that has indexes enabled. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A message describing the problem. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // The confusion matrix shows you what your transform is predicting accurately @@ -15054,7 +16821,7 @@ type Connection struct { // // * INSTANCE_ID - The instance ID to use. // - // * JDBC_CONNECTION_URL - The URL for the JDBC connection. + // * JDBC_CONNECTION_URL - The URL for connecting to a JDBC data source. // // * JDBC_ENFORCE_SSL - A Boolean string (true, false) specifying whether // Secure Sockets Layer (SSL) with hostname matching is enforced for the @@ -15078,10 +16845,26 @@ type Connection struct { // used for domain match or distinguished name match to prevent a man-in-the-middle // attack. In Oracle database, this is used as the SSL_SERVER_CERT_DN; in // Microsoft SQL Server, this is used as the hostNameInCertificate. + // + // * CONNECTION_URL - The URL for connecting to a general (non-JDBC) data + // source. + // + // * KAFKA_BOOTSTRAP_SERVERS - A comma-separated list of host and port pairs + // that are the addresses of the Apache Kafka brokers in a Kafka cluster + // to which a Kafka client will connect to and bootstrap itself. + // + // * KAFKA_SSL_ENABLED - Whether to enable or disable SSL on an Apache Kafka + // connection. Default value is "true". + // + // * KAFKA_CUSTOM_CERT - The Amazon S3 URL for the private CA cert file (.pem + // format). The default is an empty string. + // + // * KAFKA_SKIP_CUSTOM_CERT_VALIDATION - Whether to skip the validation of + // the CA cert file or not. AWS Glue validates for three algorithms: SHA256withRSA, + // SHA384withRSA and SHA512withRSA. Default value is "false". ConnectionProperties map[string]*string `type:"map"` - // The type of the connection. Currently, only JDBC is supported; SFTP is not - // supported. + // The type of the connection. Currently, SFTP is not supported. ConnectionType *string `type:"string" enum:"ConnectionType"` // The time that this connection definition was created. @@ -15180,8 +16963,19 @@ type ConnectionInput struct { // ConnectionProperties is a required field ConnectionProperties map[string]*string `type:"map" required:"true"` - // The type of the connection. Currently, only JDBC is supported; SFTP is not - // supported. + // The type of the connection. Currently, these types are supported: + // + // * JDBC - Designates a connection to a database through Java Database Connectivity + // (JDBC). + // + // * KAFKA - Designates a connection to an Apache Kafka streaming platform. + // + // * MONGODB - Designates a connection to a MongoDB document database. + // + // * NETWORK - Designates a network connection to a data source within an + // Amazon Virtual Private Cloud environment (Amazon VPC). + // + // SFTP is not supported. // // ConnectionType is a required field ConnectionType *string `type:"string" required:"true" enum:"ConnectionType"` @@ -15452,7 +17246,7 @@ type Crawler struct { // Crawler configuration information. This versioned JSON string allows users // to specify aspects of a crawler's behavior. For more information, see Configuring - // a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). + // a Crawler (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). Configuration *string `type:"string"` // If the crawler is running, contains the total time elapsed since the last @@ -15730,8 +17524,8 @@ func (s *CrawlerNodeDetails) SetCrawls(v []*Crawl) *CrawlerNodeDetails { // The specified crawler is not running. type CrawlerNotRunningException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -15749,17 +17543,17 @@ func (s CrawlerNotRunningException) GoString() string { func newErrorCrawlerNotRunningException(v protocol.ResponseMetadata) error { return &CrawlerNotRunningException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CrawlerNotRunningException) Code() string { +func (s *CrawlerNotRunningException) Code() string { return "CrawlerNotRunningException" } // Message returns the exception's message. -func (s CrawlerNotRunningException) Message() string { +func (s *CrawlerNotRunningException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15767,28 +17561,28 @@ func (s CrawlerNotRunningException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CrawlerNotRunningException) OrigErr() error { +func (s *CrawlerNotRunningException) OrigErr() error { return nil } -func (s CrawlerNotRunningException) Error() string { +func (s *CrawlerNotRunningException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CrawlerNotRunningException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CrawlerNotRunningException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CrawlerNotRunningException) RequestID() string { - return s.respMetadata.RequestID +func (s *CrawlerNotRunningException) RequestID() string { + return s.RespMetadata.RequestID } // The operation cannot be performed because the crawler is already running. type CrawlerRunningException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -15806,17 +17600,17 @@ func (s CrawlerRunningException) GoString() string { func newErrorCrawlerRunningException(v protocol.ResponseMetadata) error { return &CrawlerRunningException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CrawlerRunningException) Code() string { +func (s *CrawlerRunningException) Code() string { return "CrawlerRunningException" } // Message returns the exception's message. -func (s CrawlerRunningException) Message() string { +func (s *CrawlerRunningException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15824,28 +17618,28 @@ func (s CrawlerRunningException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CrawlerRunningException) OrigErr() error { +func (s *CrawlerRunningException) OrigErr() error { return nil } -func (s CrawlerRunningException) Error() string { +func (s *CrawlerRunningException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CrawlerRunningException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CrawlerRunningException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CrawlerRunningException) RequestID() string { - return s.respMetadata.RequestID +func (s *CrawlerRunningException) RequestID() string { + return s.RespMetadata.RequestID } // The specified crawler is stopping. type CrawlerStoppingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -15863,17 +17657,17 @@ func (s CrawlerStoppingException) GoString() string { func newErrorCrawlerStoppingException(v protocol.ResponseMetadata) error { return &CrawlerStoppingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CrawlerStoppingException) Code() string { +func (s *CrawlerStoppingException) Code() string { return "CrawlerStoppingException" } // Message returns the exception's message. -func (s CrawlerStoppingException) Message() string { +func (s *CrawlerStoppingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15881,22 +17675,22 @@ func (s CrawlerStoppingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CrawlerStoppingException) OrigErr() error { +func (s *CrawlerStoppingException) OrigErr() error { return nil } -func (s CrawlerStoppingException) Error() string { +func (s *CrawlerStoppingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CrawlerStoppingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CrawlerStoppingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CrawlerStoppingException) RequestID() string { - return s.respMetadata.RequestID +func (s *CrawlerStoppingException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies data stores to crawl. @@ -15912,6 +17706,9 @@ type CrawlerTargets struct { // Specifies JDBC targets. JdbcTargets []*JdbcTarget `type:"list"` + // Specifies Amazon DocumentDB or MongoDB targets. + MongoDBTargets []*MongoDBTarget `type:"list"` + // Specifies Amazon Simple Storage Service (Amazon S3) targets. S3Targets []*S3Target `type:"list"` } @@ -15964,6 +17761,12 @@ func (s *CrawlerTargets) SetJdbcTargets(v []*JdbcTarget) *CrawlerTargets { return s } +// SetMongoDBTargets sets the MongoDBTargets field's value. +func (s *CrawlerTargets) SetMongoDBTargets(v []*MongoDBTarget) *CrawlerTargets { + s.MongoDBTargets = v + return s +} + // SetS3Targets sets the S3Targets field's value. func (s *CrawlerTargets) SetS3Targets(v []*S3Target) *CrawlerTargets { s.S3Targets = v @@ -16142,9 +17945,9 @@ type CreateCrawlerInput struct { // always override the default classifiers for a given classification. Classifiers []*string `type:"list"` - // The crawler configuration information. This versioned JSON string allows - // users to specify aspects of a crawler's behavior. For more information, see - // Configuring a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). + // Crawler configuration information. This versioned JSON string allows users + // to specify aspects of a crawler's behavior. For more information, see Configuring + // a Crawler (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). Configuration *string `type:"string"` // The name of the SecurityConfiguration structure to be used by this crawler. @@ -16167,10 +17970,10 @@ type CreateCrawlerInput struct { // Role is a required field Role *string `type:"string" required:"true"` - // A cron expression used to specify the schedule. For more information, see - // Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, specify cron(15 12 - // * * ? *). + // A cron expression used to specify the schedule (see Time-Based Schedules + // for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, you would specify: + // cron(15 12 * * ? *). Schedule *string `type:"string"` // The policy for the crawler's update and deletion behavior. @@ -16179,8 +17982,10 @@ type CreateCrawlerInput struct { // The table prefix used for catalog tables that are created. TablePrefix *string `type:"string"` - // The tags to use with this crawler request. You can use tags to limit access - // to the crawler. For more information, see AWS Tags in AWS Glue (http://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html). + // The tags to use with this crawler request. You may use tags to limit access + // to the crawler. For more information about tags in AWS Glue, see AWS Tags + // in AWS Glue (https://docs.aws.amazon.com/glue/latest/dg/monitor-tags.html) + // in the developer guide. Tags map[string]*string `type:"map"` // A list of collection of targets to crawl. @@ -17069,9 +18874,10 @@ type CreateJobInput struct { // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") + // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type + // cannot have a fractional DPU allocation. MaxCapacity *float64 `type:"double"` // The maximum number of times to retry this job if it fails. @@ -17946,6 +19752,10 @@ type CreateTableInput struct { // DatabaseName is a required field DatabaseName *string `min:"1" type:"string" required:"true"` + // A list of partition indexes, PartitionIndex structures, to create in the + // table. + PartitionIndexes []*PartitionIndex `type:"list"` + // The TableInput object that defines the metadata table to create in the catalog. // // TableInput is a required field @@ -17977,6 +19787,16 @@ func (s *CreateTableInput) Validate() error { if s.TableInput == nil { invalidParams.Add(request.NewErrParamRequired("TableInput")) } + if s.PartitionIndexes != nil { + for i, v := range s.PartitionIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PartitionIndexes", i), err.(request.ErrInvalidParams)) + } + } + } if s.TableInput != nil { if err := s.TableInput.Validate(); err != nil { invalidParams.AddNested("TableInput", err.(request.ErrInvalidParams)) @@ -18001,6 +19821,12 @@ func (s *CreateTableInput) SetDatabaseName(v string) *CreateTableInput { return s } +// SetPartitionIndexes sets the PartitionIndexes field's value. +func (s *CreateTableInput) SetPartitionIndexes(v []*PartitionIndex) *CreateTableInput { + s.PartitionIndexes = v + return s +} + // SetTableInput sets the TableInput field's value. func (s *CreateTableInput) SetTableInput(v *TableInput) *CreateTableInput { s.TableInput = v @@ -18292,6 +20118,12 @@ type CreateWorkflowInput struct { // A description of the workflow. Description *string `type:"string"` + // You can use this parameter to prevent unwanted multiple updates to data, + // to control costs, or in some cases, to prevent exceeding the maximum number + // of concurrent runs of any of the component jobs. If you leave this parameter + // blank, there is no limit to the number of concurrent workflow runs. + MaxConcurrentRuns *int64 `type:"integer"` + // The name to be assigned to the workflow. It should be unique within your // account. // @@ -18340,6 +20172,12 @@ func (s *CreateWorkflowInput) SetDescription(v string) *CreateWorkflowInput { return s } +// SetMaxConcurrentRuns sets the MaxConcurrentRuns field's value. +func (s *CreateWorkflowInput) SetMaxConcurrentRuns(v int64) *CreateWorkflowInput { + s.MaxConcurrentRuns = &v + return s +} + // SetName sets the Name field's value. func (s *CreateWorkflowInput) SetName(v string) *CreateWorkflowInput { s.Name = &v @@ -18651,6 +20489,9 @@ func (s *DataLakePrincipal) SetDataLakePrincipalIdentifier(v string) *DataLakePr type Database struct { _ struct{} `type:"structure"` + // The ID of the Data Catalog in which the database resides. + CatalogId *string `min:"1" type:"string"` + // Creates a set of default permissions on the table for principals. CreateTableDefaultPermissions []*PrincipalPermissions `type:"list"` @@ -18671,6 +20512,10 @@ type Database struct { // These key-value pairs define parameters and properties of the database. Parameters map[string]*string `type:"map"` + + // A DatabaseIdentifier structure that describes a target database for resource + // linking. + TargetDatabase *DatabaseIdentifier `type:"structure"` } // String returns the string representation @@ -18683,6 +20528,12 @@ func (s Database) GoString() string { return s.String() } +// SetCatalogId sets the CatalogId field's value. +func (s *Database) SetCatalogId(v string) *Database { + s.CatalogId = &v + return s +} + // SetCreateTableDefaultPermissions sets the CreateTableDefaultPermissions field's value. func (s *Database) SetCreateTableDefaultPermissions(v []*PrincipalPermissions) *Database { s.CreateTableDefaultPermissions = v @@ -18719,6 +20570,61 @@ func (s *Database) SetParameters(v map[string]*string) *Database { return s } +// SetTargetDatabase sets the TargetDatabase field's value. +func (s *Database) SetTargetDatabase(v *DatabaseIdentifier) *Database { + s.TargetDatabase = v + return s +} + +// A structure that describes a target database for resource linking. +type DatabaseIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog in which the database resides. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database. + DatabaseName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DatabaseIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DatabaseIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DatabaseIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DatabaseIdentifier"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *DatabaseIdentifier) SetCatalogId(v string) *DatabaseIdentifier { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *DatabaseIdentifier) SetDatabaseName(v string) *DatabaseIdentifier { + s.DatabaseName = &v + return s +} + // The structure used to create or update a database. type DatabaseInput struct { _ struct{} `type:"structure"` @@ -18742,6 +20648,10 @@ type DatabaseInput struct { // // These key-value pairs define parameters and properties of the database. Parameters map[string]*string `type:"map"` + + // A DatabaseIdentifier structure that describes a target database for resource + // linking. + TargetDatabase *DatabaseIdentifier `type:"structure"` } // String returns the string representation @@ -18776,6 +20686,11 @@ func (s *DatabaseInput) Validate() error { } } } + if s.TargetDatabase != nil { + if err := s.TargetDatabase.Validate(); err != nil { + invalidParams.AddNested("TargetDatabase", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -18813,6 +20728,219 @@ func (s *DatabaseInput) SetParameters(v map[string]*string) *DatabaseInput { return s } +// SetTargetDatabase sets the TargetDatabase field's value. +func (s *DatabaseInput) SetTargetDatabase(v *DatabaseIdentifier) *DatabaseInput { + s.TargetDatabase = v + return s +} + +// Defines a date column statistics data. +type DateColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Maximum value of the column. + MaximumValue *time.Time `type:"timestamp"` + + // Minimum value of the column. + MinimumValue *time.Time `type:"timestamp"` + + // Number of distinct values. + // + // NumberOfDistinctValues is a required field + NumberOfDistinctValues *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s DateColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DateColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DateColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DateColumnStatisticsData"} + if s.NumberOfDistinctValues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumValue sets the MaximumValue field's value. +func (s *DateColumnStatisticsData) SetMaximumValue(v time.Time) *DateColumnStatisticsData { + s.MaximumValue = &v + return s +} + +// SetMinimumValue sets the MinimumValue field's value. +func (s *DateColumnStatisticsData) SetMinimumValue(v time.Time) *DateColumnStatisticsData { + s.MinimumValue = &v + return s +} + +// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. +func (s *DateColumnStatisticsData) SetNumberOfDistinctValues(v int64) *DateColumnStatisticsData { + s.NumberOfDistinctValues = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *DateColumnStatisticsData) SetNumberOfNulls(v int64) *DateColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + +// Defines a decimal column statistics data. +type DecimalColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Maximum value of the column. + MaximumValue *DecimalNumber `type:"structure"` + + // Minimum value of the column. + MinimumValue *DecimalNumber `type:"structure"` + + // Number of distinct values. + // + // NumberOfDistinctValues is a required field + NumberOfDistinctValues *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s DecimalColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecimalColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecimalColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecimalColumnStatisticsData"} + if s.NumberOfDistinctValues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + if s.MaximumValue != nil { + if err := s.MaximumValue.Validate(); err != nil { + invalidParams.AddNested("MaximumValue", err.(request.ErrInvalidParams)) + } + } + if s.MinimumValue != nil { + if err := s.MinimumValue.Validate(); err != nil { + invalidParams.AddNested("MinimumValue", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumValue sets the MaximumValue field's value. +func (s *DecimalColumnStatisticsData) SetMaximumValue(v *DecimalNumber) *DecimalColumnStatisticsData { + s.MaximumValue = v + return s +} + +// SetMinimumValue sets the MinimumValue field's value. +func (s *DecimalColumnStatisticsData) SetMinimumValue(v *DecimalNumber) *DecimalColumnStatisticsData { + s.MinimumValue = v + return s +} + +// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. +func (s *DecimalColumnStatisticsData) SetNumberOfDistinctValues(v int64) *DecimalColumnStatisticsData { + s.NumberOfDistinctValues = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *DecimalColumnStatisticsData) SetNumberOfNulls(v int64) *DecimalColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + +// Contains a numeric value in decimal format. +type DecimalNumber struct { + _ struct{} `type:"structure"` + + // The scale that determines where the decimal point falls in the unscaled value. + // + // Scale is a required field + Scale *int64 `type:"integer" required:"true"` + + // The unscaled numeric value. + // + // UnscaledValue is automatically base64 encoded/decoded by the SDK. + // + // UnscaledValue is a required field + UnscaledValue []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s DecimalNumber) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecimalNumber) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecimalNumber) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecimalNumber"} + if s.Scale == nil { + invalidParams.Add(request.NewErrParamRequired("Scale")) + } + if s.UnscaledValue == nil { + invalidParams.Add(request.NewErrParamRequired("UnscaledValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetScale sets the Scale field's value. +func (s *DecimalNumber) SetScale(v int64) *DecimalNumber { + s.Scale = &v + return s +} + +// SetUnscaledValue sets the UnscaledValue field's value. +func (s *DecimalNumber) SetUnscaledValue(v []byte) *DecimalNumber { + s.UnscaledValue = v + return s +} + type DeleteClassifierInput struct { _ struct{} `type:"structure"` @@ -18868,6 +20996,224 @@ func (s DeleteClassifierOutput) GoString() string { return s.String() } +type DeleteColumnStatisticsForPartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // Name of the column. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of partition values identifying the partition. + // + // PartitionValues is a required field + PartitionValues []*string `type:"list" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteColumnStatisticsForPartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteColumnStatisticsForPartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteColumnStatisticsForPartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteColumnStatisticsForPartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionValues == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValues")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *DeleteColumnStatisticsForPartitionInput) SetCatalogId(v string) *DeleteColumnStatisticsForPartitionInput { + s.CatalogId = &v + return s +} + +// SetColumnName sets the ColumnName field's value. +func (s *DeleteColumnStatisticsForPartitionInput) SetColumnName(v string) *DeleteColumnStatisticsForPartitionInput { + s.ColumnName = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *DeleteColumnStatisticsForPartitionInput) SetDatabaseName(v string) *DeleteColumnStatisticsForPartitionInput { + s.DatabaseName = &v + return s +} + +// SetPartitionValues sets the PartitionValues field's value. +func (s *DeleteColumnStatisticsForPartitionInput) SetPartitionValues(v []*string) *DeleteColumnStatisticsForPartitionInput { + s.PartitionValues = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DeleteColumnStatisticsForPartitionInput) SetTableName(v string) *DeleteColumnStatisticsForPartitionInput { + s.TableName = &v + return s +} + +type DeleteColumnStatisticsForPartitionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteColumnStatisticsForPartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteColumnStatisticsForPartitionOutput) GoString() string { + return s.String() +} + +type DeleteColumnStatisticsForTableInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // The name of the column. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteColumnStatisticsForTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteColumnStatisticsForTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteColumnStatisticsForTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteColumnStatisticsForTableInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *DeleteColumnStatisticsForTableInput) SetCatalogId(v string) *DeleteColumnStatisticsForTableInput { + s.CatalogId = &v + return s +} + +// SetColumnName sets the ColumnName field's value. +func (s *DeleteColumnStatisticsForTableInput) SetColumnName(v string) *DeleteColumnStatisticsForTableInput { + s.ColumnName = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *DeleteColumnStatisticsForTableInput) SetDatabaseName(v string) *DeleteColumnStatisticsForTableInput { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DeleteColumnStatisticsForTableInput) SetTableName(v string) *DeleteColumnStatisticsForTableInput { + s.TableName = &v + return s +} + +type DeleteColumnStatisticsForTableOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteColumnStatisticsForTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteColumnStatisticsForTableOutput) GoString() string { + return s.String() +} + type DeleteConnectionInput struct { _ struct{} `type:"structure"` @@ -19344,6 +21690,9 @@ type DeleteResourcePolicyInput struct { // The hash value returned when this policy was set. PolicyHashCondition *string `min:"1" type:"string"` + + // The ARN of the AWS Glue resource for the resource policy to be deleted. + ResourceArn *string `min:"1" type:"string"` } // String returns the string representation @@ -19362,6 +21711,9 @@ func (s *DeleteResourcePolicyInput) Validate() error { if s.PolicyHashCondition != nil && len(*s.PolicyHashCondition) < 1 { invalidParams.Add(request.NewErrParamMinLen("PolicyHashCondition", 1)) } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -19375,6 +21727,12 @@ func (s *DeleteResourcePolicyInput) SetPolicyHashCondition(v string) *DeleteReso return s } +// SetResourceArn sets the ResourceArn field's value. +func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput { + s.ResourceArn = &v + return s +} + type DeleteResourcePolicyOutput struct { _ struct{} `type:"structure"` } @@ -20197,12 +22555,102 @@ func (s *DevEndpointCustomLibraries) SetExtraPythonLibsS3Path(v string) *DevEndp return s } +// Defines a double column statistics data. +type DoubleColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Maximum value of the column. + MaximumValue *float64 `type:"double"` + + // Minimum value of the column. + MinimumValue *float64 `type:"double"` + + // Number of distinct values. + // + // NumberOfDistinctValues is a required field + NumberOfDistinctValues *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s DoubleColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DoubleColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DoubleColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DoubleColumnStatisticsData"} + if s.NumberOfDistinctValues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumValue sets the MaximumValue field's value. +func (s *DoubleColumnStatisticsData) SetMaximumValue(v float64) *DoubleColumnStatisticsData { + s.MaximumValue = &v + return s +} + +// SetMinimumValue sets the MinimumValue field's value. +func (s *DoubleColumnStatisticsData) SetMinimumValue(v float64) *DoubleColumnStatisticsData { + s.MinimumValue = &v + return s +} + +// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. +func (s *DoubleColumnStatisticsData) SetNumberOfDistinctValues(v int64) *DoubleColumnStatisticsData { + s.NumberOfDistinctValues = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *DoubleColumnStatisticsData) SetNumberOfNulls(v int64) *DoubleColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + // Specifies an Amazon DynamoDB table to crawl. type DynamoDBTarget struct { _ struct{} `type:"structure"` // The name of the DynamoDB table to crawl. Path *string `type:"string"` + + // Indicates whether to scan all the records, or to sample rows from the table. + // Scanning all the records can take a long time when the table is not a high + // throughput table. + // + // A value of true means to scan all records, while a value of false means to + // sample the records. If no value is specified, the value defaults to true. + ScanAll *bool `locationName:"scanAll" type:"boolean"` + + // The percentage of the configured read capacity units to use by the AWS Glue + // crawler. Read capacity units is a term defined by DynamoDB, and is a numeric + // value that acts as rate limiter for the number of reads that can be performed + // on that table per second. + // + // The valid values are null or a value between 0.1 to 1.5. A null value is + // used when user does not provide a value, and defaults to 0.5 of the configured + // Read Capacity Unit (for provisioned tables), or 0.25 of the max configured + // Read Capacity Unit (for tables using on-demand mode). + ScanRate *float64 `locationName:"scanRate" type:"double"` } // String returns the string representation @@ -20221,8 +22669,20 @@ func (s *DynamoDBTarget) SetPath(v string) *DynamoDBTarget { return s } +// SetScanAll sets the ScanAll field's value. +func (s *DynamoDBTarget) SetScanAll(v bool) *DynamoDBTarget { + s.ScanAll = &v + return s +} + +// SetScanRate sets the ScanRate field's value. +func (s *DynamoDBTarget) SetScanRate(v float64) *DynamoDBTarget { + s.ScanRate = &v + return s +} + // An edge represents a directed connection between two AWS Glue components -// which are part of the workflow the edge belongs to. +// that are part of the workflow the edge belongs to. type Edge struct { _ struct{} `type:"structure"` @@ -20351,8 +22811,8 @@ func (s *EncryptionConfiguration) SetS3Encryption(v []*S3Encryption) *Encryption // An encryption operation failed. type EncryptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -20370,17 +22830,17 @@ func (s EncryptionException) GoString() string { func newErrorEncryptionException(v protocol.ResponseMetadata) error { return &EncryptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionException) Code() string { +func (s *EncryptionException) Code() string { return "GlueEncryptionException" } // Message returns the exception's message. -func (s EncryptionException) Message() string { +func (s *EncryptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20388,28 +22848,28 @@ func (s EncryptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionException) OrigErr() error { +func (s *EncryptionException) OrigErr() error { return nil } -func (s EncryptionException) Error() string { +func (s *EncryptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionException) RequestID() string { + return s.RespMetadata.RequestID } // A specified entity does not exist type EntityNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -20427,17 +22887,17 @@ func (s EntityNotFoundException) GoString() string { func newErrorEntityNotFoundException(v protocol.ResponseMetadata) error { return &EntityNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityNotFoundException) Code() string { +func (s *EntityNotFoundException) Code() string { return "EntityNotFoundException" } // Message returns the exception's message. -func (s EntityNotFoundException) Message() string { +func (s *EntityNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20445,22 +22905,22 @@ func (s EntityNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityNotFoundException) OrigErr() error { +func (s *EntityNotFoundException) OrigErr() error { return nil } -func (s EntityNotFoundException) Error() string { +func (s *EntityNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an error. @@ -21008,6 +23468,254 @@ func (s *GetClassifiersOutput) SetNextToken(v string) *GetClassifiersOutput { return s } +type GetColumnStatisticsForPartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // A list of the column names. + // + // ColumnNames is a required field + ColumnNames []*string `type:"list" required:"true"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of partition values identifying the partition. + // + // PartitionValues is a required field + PartitionValues []*string `type:"list" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetColumnStatisticsForPartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetColumnStatisticsForPartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetColumnStatisticsForPartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetColumnStatisticsForPartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ColumnNames == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnNames")) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionValues == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValues")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GetColumnStatisticsForPartitionInput) SetCatalogId(v string) *GetColumnStatisticsForPartitionInput { + s.CatalogId = &v + return s +} + +// SetColumnNames sets the ColumnNames field's value. +func (s *GetColumnStatisticsForPartitionInput) SetColumnNames(v []*string) *GetColumnStatisticsForPartitionInput { + s.ColumnNames = v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetColumnStatisticsForPartitionInput) SetDatabaseName(v string) *GetColumnStatisticsForPartitionInput { + s.DatabaseName = &v + return s +} + +// SetPartitionValues sets the PartitionValues field's value. +func (s *GetColumnStatisticsForPartitionInput) SetPartitionValues(v []*string) *GetColumnStatisticsForPartitionInput { + s.PartitionValues = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetColumnStatisticsForPartitionInput) SetTableName(v string) *GetColumnStatisticsForPartitionInput { + s.TableName = &v + return s +} + +type GetColumnStatisticsForPartitionOutput struct { + _ struct{} `type:"structure"` + + // List of ColumnStatistics that failed to be retrieved. + ColumnStatisticsList []*ColumnStatistics `type:"list"` + + // Error occurred during retrieving column statistics data. + Errors []*ColumnError `type:"list"` +} + +// String returns the string representation +func (s GetColumnStatisticsForPartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetColumnStatisticsForPartitionOutput) GoString() string { + return s.String() +} + +// SetColumnStatisticsList sets the ColumnStatisticsList field's value. +func (s *GetColumnStatisticsForPartitionOutput) SetColumnStatisticsList(v []*ColumnStatistics) *GetColumnStatisticsForPartitionOutput { + s.ColumnStatisticsList = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *GetColumnStatisticsForPartitionOutput) SetErrors(v []*ColumnError) *GetColumnStatisticsForPartitionOutput { + s.Errors = v + return s +} + +type GetColumnStatisticsForTableInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // A list of the column names. + // + // ColumnNames is a required field + ColumnNames []*string `type:"list" required:"true"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetColumnStatisticsForTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetColumnStatisticsForTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetColumnStatisticsForTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetColumnStatisticsForTableInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ColumnNames == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnNames")) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GetColumnStatisticsForTableInput) SetCatalogId(v string) *GetColumnStatisticsForTableInput { + s.CatalogId = &v + return s +} + +// SetColumnNames sets the ColumnNames field's value. +func (s *GetColumnStatisticsForTableInput) SetColumnNames(v []*string) *GetColumnStatisticsForTableInput { + s.ColumnNames = v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetColumnStatisticsForTableInput) SetDatabaseName(v string) *GetColumnStatisticsForTableInput { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetColumnStatisticsForTableInput) SetTableName(v string) *GetColumnStatisticsForTableInput { + s.TableName = &v + return s +} + +type GetColumnStatisticsForTableOutput struct { + _ struct{} `type:"structure"` + + // List of ColumnStatistics that failed to be retrieved. + ColumnStatisticsList []*ColumnStatistics `type:"list"` + + // List of ColumnStatistics that failed to be retrieved. + Errors []*ColumnError `type:"list"` +} + +// String returns the string representation +func (s GetColumnStatisticsForTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetColumnStatisticsForTableOutput) GoString() string { + return s.String() +} + +// SetColumnStatisticsList sets the ColumnStatisticsList field's value. +func (s *GetColumnStatisticsForTableOutput) SetColumnStatisticsList(v []*ColumnStatistics) *GetColumnStatisticsForTableOutput { + s.ColumnStatisticsList = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *GetColumnStatisticsForTableOutput) SetErrors(v []*ColumnError) *GetColumnStatisticsForTableOutput { + s.Errors = v + return s +} + type GetConnectionInput struct { _ struct{} `type:"structure"` @@ -21103,8 +23811,7 @@ func (s *GetConnectionOutput) SetConnection(v *Connection) *GetConnectionOutput type GetConnectionsFilter struct { _ struct{} `type:"structure"` - // The type of connections to return. Currently, only JDBC is supported; SFTP - // is not supported. + // The type of connections to return. Currently, SFTP is not supported. ConnectionType *string `type:"string" enum:"ConnectionType"` // A criteria string that must match the criteria recorded in the connection @@ -21626,6 +24333,15 @@ type GetDatabasesInput struct { // A continuation token, if this is a continuation call. NextToken *string `type:"string"` + + // Allows you to specify that you want to list the databases shared with your + // account. The allowable values are FOREIGN or ALL. + // + // * If set to FOREIGN, will list the databases shared with your account. + // + // * If set to ALL, will list the databases shared with your account, as + // well as the databases in yor local account. + ResourceShareType *string `type:"string" enum:"ResourceShareType"` } // String returns the string representation @@ -21672,6 +24388,12 @@ func (s *GetDatabasesInput) SetNextToken(v string) *GetDatabasesInput { return s } +// SetResourceShareType sets the ResourceShareType field's value. +func (s *GetDatabasesInput) SetResourceShareType(v string) *GetDatabasesInput { + s.ResourceShareType = &v + return s +} + type GetDatabasesOutput struct { _ struct{} `type:"structure"` @@ -23031,6 +25753,119 @@ func (s *GetMappingOutput) SetMapping(v []*MappingEntry) *GetMappingOutput { return s } +type GetPartitionIndexesInput struct { + _ struct{} `type:"structure"` + + // The catalog ID where the table resides. + CatalogId *string `min:"1" type:"string"` + + // Specifies the name of a database from which you want to retrieve partition + // indexes. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A continuation token, included if this is a continuation call. + NextToken *string `type:"string"` + + // Specifies the name of a table for which you want to retrieve the partition + // indexes. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPartitionIndexesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPartitionIndexesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPartitionIndexesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPartitionIndexesInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *GetPartitionIndexesInput) SetCatalogId(v string) *GetPartitionIndexesInput { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *GetPartitionIndexesInput) SetDatabaseName(v string) *GetPartitionIndexesInput { + s.DatabaseName = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetPartitionIndexesInput) SetNextToken(v string) *GetPartitionIndexesInput { + s.NextToken = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetPartitionIndexesInput) SetTableName(v string) *GetPartitionIndexesInput { + s.TableName = &v + return s +} + +type GetPartitionIndexesOutput struct { + _ struct{} `type:"structure"` + + // A continuation token, present if the current list segment is not the last. + NextToken *string `type:"string"` + + // A list of index descriptors. + PartitionIndexDescriptorList []*PartitionIndexDescriptor `type:"list"` +} + +// String returns the string representation +func (s GetPartitionIndexesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPartitionIndexesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetPartitionIndexesOutput) SetNextToken(v string) *GetPartitionIndexesOutput { + s.NextToken = &v + return s +} + +// SetPartitionIndexDescriptorList sets the PartitionIndexDescriptorList field's value. +func (s *GetPartitionIndexesOutput) SetPartitionIndexDescriptorList(v []*PartitionIndexDescriptor) *GetPartitionIndexesOutput { + s.PartitionIndexDescriptorList = v + return s +} + type GetPartitionInput struct { _ struct{} `type:"structure"` @@ -23374,6 +26209,16 @@ func (s *GetPartitionsOutput) SetPartitions(v []*Partition) *GetPartitionsOutput type GetPlanInput struct { _ struct{} `type:"structure"` + // A map to hold additional optional key-value parameters. + // + // Currently, these key-value pairs are supported: + // + // * inferSchema — Specifies whether to set inferSchema to true or false + // for the default script generated by an AWS Glue job. For example, to set + // inferSchema to true, pass the following key value pair: --additional-plan-options-map + // '{"inferSchema":"true"}' + AdditionalPlanOptionsMap map[string]*string `type:"map"` + // The programming language of the code to perform the mapping. Language *string `type:"string" enum:"Language"` @@ -23440,6 +26285,12 @@ func (s *GetPlanInput) Validate() error { return nil } +// SetAdditionalPlanOptionsMap sets the AdditionalPlanOptionsMap field's value. +func (s *GetPlanInput) SetAdditionalPlanOptionsMap(v map[string]*string) *GetPlanInput { + s.AdditionalPlanOptionsMap = v + return s +} + // SetLanguage sets the Language field's value. func (s *GetPlanInput) SetLanguage(v string) *GetPlanInput { s.Language = &v @@ -23502,8 +26353,92 @@ func (s *GetPlanOutput) SetScalaCode(v string) *GetPlanOutput { return s } +type GetResourcePoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum size of a list to return. + MaxResults *int64 `min:"1" type:"integer"` + + // A continuation token, if this is a continuation request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetResourcePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourcePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourcePoliciesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetResourcePoliciesInput) SetMaxResults(v int64) *GetResourcePoliciesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetResourcePoliciesInput) SetNextToken(v string) *GetResourcePoliciesInput { + s.NextToken = &v + return s +} + +type GetResourcePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the individual resource policies and the account-level resource + // policy. + GetResourcePoliciesResponseList []*GluePolicy `type:"list"` + + // A continuation token, if the returned list does not contain the last resource + // policy available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s GetResourcePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResourcePoliciesOutput) GoString() string { + return s.String() +} + +// SetGetResourcePoliciesResponseList sets the GetResourcePoliciesResponseList field's value. +func (s *GetResourcePoliciesOutput) SetGetResourcePoliciesResponseList(v []*GluePolicy) *GetResourcePoliciesOutput { + s.GetResourcePoliciesResponseList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetResourcePoliciesOutput) SetNextToken(v string) *GetResourcePoliciesOutput { + s.NextToken = &v + return s +} + type GetResourcePolicyInput struct { _ struct{} `type:"structure"` + + // The ARN of the AWS Glue resource for the resource policy to be retrieved. + // For more information about AWS Glue resource ARNs, see the AWS Glue ARN string + // pattern (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id) + ResourceArn *string `min:"1" type:"string"` } // String returns the string representation @@ -23516,6 +26451,25 @@ func (s GetResourcePolicyInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"} + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetResourcePolicyInput) SetResourceArn(v string) *GetResourcePolicyInput { + s.ResourceArn = &v + return s +} + type GetResourcePolicyOutput struct { _ struct{} `type:"structure"` @@ -24474,10 +27428,10 @@ type GetUserDefinedFunctionsInput struct { // If none is provided, the AWS account ID is used by default. CatalogId *string `min:"1" type:"string"` - // The name of the catalog database where the functions are located. - // - // DatabaseName is a required field - DatabaseName *string `min:"1" type:"string" required:"true"` + // The name of the catalog database where the functions are located. If none + // is provided, functions from all the databases across the catalog will be + // returned. + DatabaseName *string `min:"1" type:"string"` // The maximum number of functions to return in one response. MaxResults *int64 `min:"1" type:"integer"` @@ -24508,9 +27462,6 @@ func (s *GetUserDefinedFunctionsInput) Validate() error { if s.CatalogId != nil && len(*s.CatalogId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) } - if s.DatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("DatabaseName")) - } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } @@ -24941,6 +27892,57 @@ func (s *GetWorkflowRunsOutput) SetRuns(v []*WorkflowRun) *GetWorkflowRunsOutput return s } +// A structure for returning a resource policy. +type GluePolicy struct { + _ struct{} `type:"structure"` + + // The date and time at which the policy was created. + CreateTime *time.Time `type:"timestamp"` + + // Contains the hash value associated with this policy. + PolicyHash *string `min:"1" type:"string"` + + // Contains the requested policy document, in JSON format. + PolicyInJson *string `min:"2" type:"string"` + + // The date and time at which the policy was last updated. + UpdateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s GluePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GluePolicy) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *GluePolicy) SetCreateTime(v time.Time) *GluePolicy { + s.CreateTime = &v + return s +} + +// SetPolicyHash sets the PolicyHash field's value. +func (s *GluePolicy) SetPolicyHash(v string) *GluePolicy { + s.PolicyHash = &v + return s +} + +// SetPolicyInJson sets the PolicyInJson field's value. +func (s *GluePolicy) SetPolicyInJson(v string) *GluePolicy { + s.PolicyInJson = &v + return s +} + +// SetUpdateTime sets the UpdateTime field's value. +func (s *GluePolicy) SetUpdateTime(v time.Time) *GluePolicy { + s.UpdateTime = &v + return s +} + // A classifier that uses grok patterns. type GrokClassifier struct { _ struct{} `type:"structure"` @@ -24955,11 +27957,11 @@ type GrokClassifier struct { CreationTime *time.Time `type:"timestamp"` // Optional custom grok patterns defined by this classifier. For more information, - // see custom patterns in Writing Custom Classifiers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). + // see custom patterns in Writing Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). CustomPatterns *string `type:"string"` // The grok pattern applied to a data store by this classifier. For more information, - // see built-in patterns in Writing Custom Classifiers (http://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). + // see built-in patterns in Writing Custom Classifiers (https://docs.aws.amazon.com/glue/latest/dg/custom-classifier.html). // // GrokPattern is a required field GrokPattern *string `min:"1" type:"string" required:"true"` @@ -25030,8 +28032,8 @@ func (s *GrokClassifier) SetVersion(v int64) *GrokClassifier { // The same unique identifier was associated with two different records. type IdempotentParameterMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -25049,17 +28051,17 @@ func (s IdempotentParameterMismatchException) GoString() string { func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { return &IdempotentParameterMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotentParameterMismatchException) Code() string { +func (s *IdempotentParameterMismatchException) Code() string { return "IdempotentParameterMismatchException" } // Message returns the exception's message. -func (s IdempotentParameterMismatchException) Message() string { +func (s *IdempotentParameterMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25067,22 +28069,79 @@ func (s IdempotentParameterMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotentParameterMismatchException) OrigErr() error { +func (s *IdempotentParameterMismatchException) OrigErr() error { return nil } -func (s IdempotentParameterMismatchException) Error() string { +func (s *IdempotentParameterMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotentParameterMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotentParameterMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The workflow is in an invalid state to perform a requested operation. +type IllegalWorkflowStateException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // A message describing the problem. + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s IllegalWorkflowStateException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IllegalWorkflowStateException) GoString() string { + return s.String() +} + +func newErrorIllegalWorkflowStateException(v protocol.ResponseMetadata) error { + return &IllegalWorkflowStateException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *IllegalWorkflowStateException) Code() string { + return "IllegalWorkflowStateException" +} + +// Message returns the exception's message. +func (s *IllegalWorkflowStateException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IllegalWorkflowStateException) OrigErr() error { + return nil +} + +func (s *IllegalWorkflowStateException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IllegalWorkflowStateException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IllegalWorkflowStateException) RequestID() string { + return s.RespMetadata.RequestID } type ImportCatalogToGlueInput struct { @@ -25172,8 +28231,8 @@ func (s *ImportLabelsTaskRunProperties) SetReplace(v bool) *ImportLabelsTaskRunP // An internal service error occurred. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -25191,17 +28250,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25209,28 +28268,28 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // The input provided was not valid. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -25248,17 +28307,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25266,22 +28325,22 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies a JDBC data store to crawl. @@ -25292,7 +28351,7 @@ type JdbcTarget struct { ConnectionName *string `type:"string"` // A list of glob patterns used to exclude from the crawl. For more information, - // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). + // see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). Exclusions []*string `type:"list"` // The path of the JDBC target. @@ -25397,14 +28456,16 @@ type Job struct { // Do not set Max Capacity if using WorkerType and NumberOfWorkers. // // The value that can be allocated for MaxCapacity depends on whether you are - // running a Python shell job or an Apache Spark ETL job: + // running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming + // ETL job: // // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") + // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type + // cannot have a fractional DPU allocation. MaxCapacity *float64 `type:"double"` // The maximum number of times to retry this job after a JobRun fails. @@ -25700,7 +28761,8 @@ type JobCommand struct { _ struct{} `type:"structure"` // The name of the job command. For an Apache Spark ETL job, this must be glueetl. - // For a Python shell job, it must be pythonshell. + // For a Python shell job, it must be pythonshell. For an Apache Spark streaming + // ETL job, this must be gluestreaming. Name *string `type:"string"` // The Python version being used to execute a Python shell job. Allowed values @@ -25823,7 +28885,8 @@ type JobRun struct { // The name of the job definition being used in this run. JobName *string `min:"1" type:"string"` - // The current state of the job run. + // The current state of the job run. For more information about the statuses + // of jobs that have terminated abnormally, see AWS Glue Job Run Statuses (https://docs.aws.amazon.com/glue/latest/dg/job-run-statuses.html). JobRunState *string `type:"string" enum:"JobRunState"` // The last time that this job run was modified. @@ -26111,9 +29174,10 @@ type JobUpdate struct { // * When you specify a Python shell job (JobCommand.Name="pythonshell"), // you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. // - // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl"), - // you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job - // type cannot have a fractional DPU allocation. + // * When you specify an Apache Spark ETL job (JobCommand.Name="glueetl") + // or Apache Spark streaming ETL job (JobCommand.Name="gluestreaming"), you + // can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type + // cannot have a fractional DPU allocation. MaxCapacity *float64 `type:"double"` // The maximum number of times to retry this job if it fails. @@ -26362,6 +29426,43 @@ func (s *JsonClassifier) SetVersion(v int64) *JsonClassifier { return s } +// A partition key pair consisting of a name and a type. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a partition key. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The type of a partition key. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *KeySchemaElement) SetName(v string) *KeySchemaElement { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *KeySchemaElement) SetType(v string) *KeySchemaElement { + s.Type = &v + return s +} + // Specifies configuration properties for a labeling set generation task run. type LabelingSetGenerationTaskRunProperties struct { _ struct{} `type:"structure"` @@ -27098,6 +30199,77 @@ func (s *Location) SetS3(v []*CodeGenNodeArg) *Location { return s } +// Defines a long column statistics data. +type LongColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Maximum value of the column. + MaximumValue *int64 `type:"long"` + + // Minimum value of the column. + MinimumValue *int64 `type:"long"` + + // Number of distinct values. + // + // NumberOfDistinctValues is a required field + NumberOfDistinctValues *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s LongColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LongColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LongColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LongColumnStatisticsData"} + if s.NumberOfDistinctValues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaximumValue sets the MaximumValue field's value. +func (s *LongColumnStatisticsData) SetMaximumValue(v int64) *LongColumnStatisticsData { + s.MaximumValue = &v + return s +} + +// SetMinimumValue sets the MinimumValue field's value. +func (s *LongColumnStatisticsData) SetMinimumValue(v int64) *LongColumnStatisticsData { + s.MinimumValue = &v + return s +} + +// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. +func (s *LongColumnStatisticsData) SetNumberOfDistinctValues(v int64) *LongColumnStatisticsData { + s.NumberOfDistinctValues = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *LongColumnStatisticsData) SetNumberOfNulls(v int64) *LongColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + // A structure for a machine learning transform. type MLTransform struct { _ struct{} `type:"structure"` @@ -27348,8 +30520,8 @@ func (s *MLTransform) SetWorkerType(v string) *MLTransform { // The machine learning transform is not ready to run. type MLTransformNotReadyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -27367,17 +30539,17 @@ func (s MLTransformNotReadyException) GoString() string { func newErrorMLTransformNotReadyException(v protocol.ResponseMetadata) error { return &MLTransformNotReadyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MLTransformNotReadyException) Code() string { +func (s *MLTransformNotReadyException) Code() string { return "MLTransformNotReadyException" } // Message returns the exception's message. -func (s MLTransformNotReadyException) Message() string { +func (s *MLTransformNotReadyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27385,22 +30557,22 @@ func (s MLTransformNotReadyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MLTransformNotReadyException) OrigErr() error { +func (s *MLTransformNotReadyException) OrigErr() error { return nil } -func (s MLTransformNotReadyException) Error() string { +func (s *MLTransformNotReadyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MLTransformNotReadyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MLTransformNotReadyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MLTransformNotReadyException) RequestID() string { - return s.respMetadata.RequestID +func (s *MLTransformNotReadyException) RequestID() string { + return s.RespMetadata.RequestID } // Defines a mapping. @@ -27472,10 +30644,58 @@ func (s *MappingEntry) SetTargetType(v string) *MappingEntry { return s } +// Specifies an Amazon DocumentDB or MongoDB data store to crawl. +type MongoDBTarget struct { + _ struct{} `type:"structure"` + + // The name of the connection to use to connect to the Amazon DocumentDB or + // MongoDB target. + ConnectionName *string `type:"string"` + + // The path of the Amazon DocumentDB or MongoDB target (database/collection). + Path *string `type:"string"` + + // Indicates whether to scan all the records, or to sample rows from the table. + // Scanning all the records can take a long time when the table is not a high + // throughput table. + // + // A value of true means to scan all records, while a value of false means to + // sample the records. If no value is specified, the value defaults to true. + ScanAll *bool `type:"boolean"` +} + +// String returns the string representation +func (s MongoDBTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MongoDBTarget) GoString() string { + return s.String() +} + +// SetConnectionName sets the ConnectionName field's value. +func (s *MongoDBTarget) SetConnectionName(v string) *MongoDBTarget { + s.ConnectionName = &v + return s +} + +// SetPath sets the Path field's value. +func (s *MongoDBTarget) SetPath(v string) *MongoDBTarget { + s.Path = &v + return s +} + +// SetScanAll sets the ScanAll field's value. +func (s *MongoDBTarget) SetScanAll(v bool) *MongoDBTarget { + s.ScanAll = &v + return s +} + // There is no applicable schedule. type NoScheduleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -27493,17 +30713,17 @@ func (s NoScheduleException) GoString() string { func newErrorNoScheduleException(v protocol.ResponseMetadata) error { return &NoScheduleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoScheduleException) Code() string { +func (s *NoScheduleException) Code() string { return "NoScheduleException" } // Message returns the exception's message. -func (s NoScheduleException) Message() string { +func (s *NoScheduleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27511,26 +30731,26 @@ func (s NoScheduleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoScheduleException) OrigErr() error { +func (s *NoScheduleException) OrigErr() error { return nil } -func (s NoScheduleException) Error() string { +func (s *NoScheduleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoScheduleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoScheduleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoScheduleException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoScheduleException) RequestID() string { + return s.RespMetadata.RequestID } -// A node represents an AWS Glue component like Trigger, Job etc. which is part -// of a workflow. +// A node represents an AWS Glue component such as a trigger, or job, etc., +// that is part of a workflow. type Node struct { _ struct{} `type:"structure"` @@ -27639,8 +30859,8 @@ func (s *NotificationProperty) SetNotifyDelayAfter(v int64) *NotificationPropert // The operation timed out. type OperationTimeoutException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -27658,17 +30878,17 @@ func (s OperationTimeoutException) GoString() string { func newErrorOperationTimeoutException(v protocol.ResponseMetadata) error { return &OperationTimeoutException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationTimeoutException) Code() string { +func (s *OperationTimeoutException) Code() string { return "OperationTimeoutException" } // Message returns the exception's message. -func (s OperationTimeoutException) Message() string { +func (s *OperationTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27676,22 +30896,22 @@ func (s OperationTimeoutException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationTimeoutException) OrigErr() error { +func (s *OperationTimeoutException) OrigErr() error { return nil } -func (s OperationTimeoutException) Error() string { +func (s *OperationTimeoutException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationTimeoutException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationTimeoutException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the sort order of a sorted column. @@ -27755,6 +30975,9 @@ func (s *Order) SetSortOrder(v int64) *Order { type Partition struct { _ struct{} `type:"structure"` + // The ID of the Data Catalog in which the partition resides. + CatalogId *string `min:"1" type:"string"` + // The time at which the partition was created. CreationTime *time.Time `type:"timestamp"` @@ -27790,6 +31013,12 @@ func (s Partition) GoString() string { return s.String() } +// SetCatalogId sets the CatalogId field's value. +func (s *Partition) SetCatalogId(v string) *Partition { + s.CatalogId = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *Partition) SetCreationTime(v time.Time) *Partition { s.CreationTime = &v @@ -27871,6 +31100,114 @@ func (s *PartitionError) SetPartitionValues(v []*string) *PartitionError { return s } +// A structure for a partition index. +type PartitionIndex struct { + _ struct{} `type:"structure"` + + // The name of the partition index. + // + // IndexName is a required field + IndexName *string `min:"1" type:"string" required:"true"` + + // The keys for the partition index. + // + // Keys is a required field + Keys []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PartitionIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartitionIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PartitionIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PartitionIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 1)) + } + if s.Keys == nil { + invalidParams.Add(request.NewErrParamRequired("Keys")) + } + if s.Keys != nil && len(s.Keys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Keys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *PartitionIndex) SetIndexName(v string) *PartitionIndex { + s.IndexName = &v + return s +} + +// SetKeys sets the Keys field's value. +func (s *PartitionIndex) SetKeys(v []*string) *PartitionIndex { + s.Keys = v + return s +} + +// A descriptor for a partition index in a table. +type PartitionIndexDescriptor struct { + _ struct{} `type:"structure"` + + // The name of the partition index. + // + // IndexName is a required field + IndexName *string `min:"1" type:"string" required:"true"` + + // The status of the partition index. + // + // IndexStatus is a required field + IndexStatus *string `type:"string" required:"true" enum:"PartitionIndexStatus"` + + // A list of one or more keys, as KeySchemaElement structures, for the partition + // index. + // + // Keys is a required field + Keys []*KeySchemaElement `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s PartitionIndexDescriptor) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PartitionIndexDescriptor) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *PartitionIndexDescriptor) SetIndexName(v string) *PartitionIndexDescriptor { + s.IndexName = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *PartitionIndexDescriptor) SetIndexStatus(v string) *PartitionIndexDescriptor { + s.IndexStatus = &v + return s +} + +// SetKeys sets the Keys field's value. +func (s *PartitionIndexDescriptor) SetKeys(v []*KeySchemaElement) *PartitionIndexDescriptor { + s.Keys = v + return s +} + // The structure used to create and update a partition. type PartitionInput struct { _ struct{} `type:"structure"` @@ -28302,6 +31639,15 @@ func (s PutDataCatalogEncryptionSettingsOutput) GoString() string { type PutResourcePolicyInput struct { _ struct{} `type:"structure"` + // Allows you to specify if you want to use both resource-level and account/catalog-level + // resource policies. A resource-level policy is a policy attached to an individual + // resource such as a database or a table. + // + // The default value of NO indicates that resource-level policies cannot co-exist + // with an account-level policy. A value of YES means the use of both resource-level + // and account/catalog-level resource policies is allowed. + EnableHybrid *string `type:"string" enum:"EnableHybridValues"` + // A value of MUST_EXIST is used to update a policy. A value of NOT_EXIST is // used to create a new policy. If a value of NONE or a null value is used, // the call will not depend on the existence of a policy. @@ -28316,6 +31662,11 @@ type PutResourcePolicyInput struct { // // PolicyInJson is a required field PolicyInJson *string `min:"2" type:"string" required:"true"` + + // The ARN of the AWS Glue resource for the resource policy to be set. For more + // information about AWS Glue resource ARNs, see the AWS Glue ARN string pattern + // (https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-common.html#aws-glue-api-regex-aws-glue-arn-id) + ResourceArn *string `min:"1" type:"string"` } // String returns the string representation @@ -28340,6 +31691,9 @@ func (s *PutResourcePolicyInput) Validate() error { if s.PolicyInJson != nil && len(*s.PolicyInJson) < 2 { invalidParams.Add(request.NewErrParamMinLen("PolicyInJson", 2)) } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -28347,6 +31701,12 @@ func (s *PutResourcePolicyInput) Validate() error { return nil } +// SetEnableHybrid sets the EnableHybrid field's value. +func (s *PutResourcePolicyInput) SetEnableHybrid(v string) *PutResourcePolicyInput { + s.EnableHybrid = &v + return s +} + // SetPolicyExistsCondition sets the PolicyExistsCondition field's value. func (s *PutResourcePolicyInput) SetPolicyExistsCondition(v string) *PutResourcePolicyInput { s.PolicyExistsCondition = &v @@ -28365,6 +31725,12 @@ func (s *PutResourcePolicyInput) SetPolicyInJson(v string) *PutResourcePolicyInp return s } +// SetResourceArn sets the ResourceArn field's value. +func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput { + s.ResourceArn = &v + return s +} + type PutResourcePolicyOutput struct { _ struct{} `type:"structure"` @@ -28547,8 +31913,8 @@ func (s *ResetJobBookmarkOutput) SetJobBookmarkEntry(v *JobBookmarkEntry) *Reset // A resource numerical limit was exceeded. type ResourceNumberLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -28566,17 +31932,17 @@ func (s ResourceNumberLimitExceededException) GoString() string { func newErrorResourceNumberLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceNumberLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNumberLimitExceededException) Code() string { +func (s *ResourceNumberLimitExceededException) Code() string { return "ResourceNumberLimitExceededException" } // Message returns the exception's message. -func (s ResourceNumberLimitExceededException) Message() string { +func (s *ResourceNumberLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28584,22 +31950,22 @@ func (s ResourceNumberLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNumberLimitExceededException) OrigErr() error { +func (s *ResourceNumberLimitExceededException) OrigErr() error { return nil } -func (s ResourceNumberLimitExceededException) Error() string { +func (s *ResourceNumberLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNumberLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNumberLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNumberLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNumberLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The URIs for function resources. @@ -28648,6 +32014,112 @@ func (s *ResourceUri) SetUri(v string) *ResourceUri { return s } +type ResumeWorkflowRunInput struct { + _ struct{} `type:"structure"` + + // The name of the workflow to resume. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of the node IDs for the nodes you want to restart. The nodes that + // are to be restarted must have a run attempt in the original run. + // + // NodeIds is a required field + NodeIds []*string `type:"list" required:"true"` + + // The ID of the workflow run to resume. + // + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResumeWorkflowRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeWorkflowRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResumeWorkflowRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResumeWorkflowRunInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.NodeIds == nil { + invalidParams.Add(request.NewErrParamRequired("NodeIds")) + } + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) + } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *ResumeWorkflowRunInput) SetName(v string) *ResumeWorkflowRunInput { + s.Name = &v + return s +} + +// SetNodeIds sets the NodeIds field's value. +func (s *ResumeWorkflowRunInput) SetNodeIds(v []*string) *ResumeWorkflowRunInput { + s.NodeIds = v + return s +} + +// SetRunId sets the RunId field's value. +func (s *ResumeWorkflowRunInput) SetRunId(v string) *ResumeWorkflowRunInput { + s.RunId = &v + return s +} + +type ResumeWorkflowRunOutput struct { + _ struct{} `type:"structure"` + + // A list of the node IDs for the nodes that were actually restarted. + NodeIds []*string `type:"list"` + + // The new ID assigned to the resumed workflow run. Each resume of a workflow + // run will have a new run ID. + RunId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResumeWorkflowRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResumeWorkflowRunOutput) GoString() string { + return s.String() +} + +// SetNodeIds sets the NodeIds field's value. +func (s *ResumeWorkflowRunOutput) SetNodeIds(v []*string) *ResumeWorkflowRunOutput { + s.NodeIds = v + return s +} + +// SetRunId sets the RunId field's value. +func (s *ResumeWorkflowRunOutput) SetRunId(v string) *ResumeWorkflowRunOutput { + s.RunId = &v + return s +} + // Specifies how Amazon Simple Storage Service (Amazon S3) data should be encrypted. type S3Encryption struct { _ struct{} `type:"structure"` @@ -28685,8 +32157,12 @@ func (s *S3Encryption) SetS3EncryptionMode(v string) *S3Encryption { type S3Target struct { _ struct{} `type:"structure"` + // The name of a connection which allows a job or crawler to access data in + // Amazon S3 within an Amazon Virtual Private Cloud environment (Amazon VPC). + ConnectionName *string `type:"string"` + // A list of glob patterns used to exclude from the crawl. For more information, - // see Catalog Tables with a Crawler (http://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). + // see Catalog Tables with a Crawler (https://docs.aws.amazon.com/glue/latest/dg/add-crawler.html). Exclusions []*string `type:"list"` // The path to the Amazon S3 target. @@ -28703,6 +32179,12 @@ func (s S3Target) GoString() string { return s.String() } +// SetConnectionName sets the ConnectionName field's value. +func (s *S3Target) SetConnectionName(v string) *S3Target { + s.ConnectionName = &v + return s +} + // SetExclusions sets the Exclusions field's value. func (s *S3Target) SetExclusions(v []*string) *S3Target { s.Exclusions = v @@ -28719,10 +32201,10 @@ func (s *S3Target) SetPath(v string) *S3Target { type Schedule struct { _ struct{} `type:"structure"` - // A cron expression used to specify the schedule. For more information, see - // Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, specify cron(15 12 - // * * ? *). + // A cron expression used to specify the schedule (see Time-Based Schedules + // for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, you would specify: + // cron(15 12 * * ? *). ScheduleExpression *string `type:"string"` // The state of the schedule. @@ -28753,8 +32235,8 @@ func (s *Schedule) SetState(v string) *Schedule { // The specified scheduler is not running. type SchedulerNotRunningException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -28772,17 +32254,17 @@ func (s SchedulerNotRunningException) GoString() string { func newErrorSchedulerNotRunningException(v protocol.ResponseMetadata) error { return &SchedulerNotRunningException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SchedulerNotRunningException) Code() string { +func (s *SchedulerNotRunningException) Code() string { return "SchedulerNotRunningException" } // Message returns the exception's message. -func (s SchedulerNotRunningException) Message() string { +func (s *SchedulerNotRunningException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28790,28 +32272,28 @@ func (s SchedulerNotRunningException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SchedulerNotRunningException) OrigErr() error { +func (s *SchedulerNotRunningException) OrigErr() error { return nil } -func (s SchedulerNotRunningException) Error() string { +func (s *SchedulerNotRunningException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SchedulerNotRunningException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SchedulerNotRunningException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SchedulerNotRunningException) RequestID() string { - return s.respMetadata.RequestID +func (s *SchedulerNotRunningException) RequestID() string { + return s.RespMetadata.RequestID } // The specified scheduler is already running. type SchedulerRunningException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -28829,17 +32311,17 @@ func (s SchedulerRunningException) GoString() string { func newErrorSchedulerRunningException(v protocol.ResponseMetadata) error { return &SchedulerRunningException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SchedulerRunningException) Code() string { +func (s *SchedulerRunningException) Code() string { return "SchedulerRunningException" } // Message returns the exception's message. -func (s SchedulerRunningException) Message() string { +func (s *SchedulerRunningException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28847,28 +32329,28 @@ func (s SchedulerRunningException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SchedulerRunningException) OrigErr() error { +func (s *SchedulerRunningException) OrigErr() error { return nil } -func (s SchedulerRunningException) Error() string { +func (s *SchedulerRunningException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SchedulerRunningException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SchedulerRunningException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SchedulerRunningException) RequestID() string { - return s.respMetadata.RequestID +func (s *SchedulerRunningException) RequestID() string { + return s.RespMetadata.RequestID } // The specified scheduler is transitioning. type SchedulerTransitioningException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -28886,17 +32368,17 @@ func (s SchedulerTransitioningException) GoString() string { func newErrorSchedulerTransitioningException(v protocol.ResponseMetadata) error { return &SchedulerTransitioningException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SchedulerTransitioningException) Code() string { +func (s *SchedulerTransitioningException) Code() string { return "SchedulerTransitioningException" } // Message returns the exception's message. -func (s SchedulerTransitioningException) Message() string { +func (s *SchedulerTransitioningException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28904,22 +32386,22 @@ func (s SchedulerTransitioningException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SchedulerTransitioningException) OrigErr() error { +func (s *SchedulerTransitioningException) OrigErr() error { return nil } -func (s SchedulerTransitioningException) Error() string { +func (s *SchedulerTransitioningException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SchedulerTransitioningException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SchedulerTransitioningException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SchedulerTransitioningException) RequestID() string { - return s.respMetadata.RequestID +func (s *SchedulerTransitioningException) RequestID() string { + return s.RespMetadata.RequestID } // A policy that specifies update and deletion behaviors for the crawler. @@ -29006,11 +32488,20 @@ func (s *SchemaColumn) SetName(v string) *SchemaColumn { type SearchTablesInput struct { _ struct{} `type:"structure"` - // A unique identifier, consisting of account_id/datalake. + // A unique identifier, consisting of account_id . CatalogId *string `min:"1" type:"string"` // A list of key-value pairs, and a comparator used to filter the search results. // Returns all entities matching the predicate. + // + // The Comparator member of the PropertyPredicate struct is used only for time + // fields, and can be omitted for other field types. Also, when comparing string + // values, such as when Key=Name, a fuzzy match algorithm is used. The Key field + // (for example, the value of the Name field) is split on certain punctuation + // characters, for example, -, :, #, etc. into tokens. Then each token is exact-match + // compared with the Value member of PropertyPredicate. For example, if Key=Name + // and Value=link, tables named customer-link and xx-link-yy are returned, but + // xxlinkyy is not returned. Filters []*PropertyPredicate `type:"list"` // The maximum number of tables to return in a single response. @@ -29019,6 +32510,15 @@ type SearchTablesInput struct { // A continuation token, included if this is a continuation call. NextToken *string `type:"string"` + // Allows you to specify that you want to search the tables shared with your + // account. The allowable values are FOREIGN or ALL. + // + // * If set to FOREIGN, will search the tables shared with your account. + // + // * If set to ALL, will search the tables shared with your account, as well + // as the tables in yor local account. + ResourceShareType *string `type:"string" enum:"ResourceShareType"` + // A string used for a text search. // // Specifying a value in quotes filters based on an exact match to the value. @@ -29079,6 +32579,12 @@ func (s *SearchTablesInput) SetNextToken(v string) *SearchTablesInput { return s } +// SetResourceShareType sets the ResourceShareType field's value. +func (s *SearchTablesInput) SetResourceShareType(v string) *SearchTablesInput { + s.ResourceShareType = &v + return s +} + // SetSearchText sets the SearchText field's value. func (s *SearchTablesInput) SetSearchText(v string) *SearchTablesInput { s.SearchText = &v @@ -30292,6 +33798,78 @@ func (s *StopTriggerOutput) SetName(v string) *StopTriggerOutput { return s } +type StopWorkflowRunInput struct { + _ struct{} `type:"structure"` + + // The name of the workflow to stop. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The ID of the workflow run to stop. + // + // RunId is a required field + RunId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StopWorkflowRunInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopWorkflowRunInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopWorkflowRunInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopWorkflowRunInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.RunId == nil { + invalidParams.Add(request.NewErrParamRequired("RunId")) + } + if s.RunId != nil && len(*s.RunId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RunId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *StopWorkflowRunInput) SetName(v string) *StopWorkflowRunInput { + s.Name = &v + return s +} + +// SetRunId sets the RunId field's value. +func (s *StopWorkflowRunInput) SetRunId(v string) *StopWorkflowRunInput { + s.RunId = &v + return s +} + +type StopWorkflowRunOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s StopWorkflowRunOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopWorkflowRunOutput) GoString() string { + return s.String() +} + // Describes the physical storage of table data. type StorageDescriptor struct { _ struct{} `type:"structure"` @@ -30455,6 +34033,87 @@ func (s *StorageDescriptor) SetStoredAsSubDirectories(v bool) *StorageDescriptor return s } +// Defines a string column statistics data. +type StringColumnStatisticsData struct { + _ struct{} `type:"structure"` + + // Average value of the column. + // + // AverageLength is a required field + AverageLength *float64 `type:"double" required:"true"` + + // Maximum value of the column. + // + // MaximumLength is a required field + MaximumLength *int64 `type:"long" required:"true"` + + // Number of distinct values. + // + // NumberOfDistinctValues is a required field + NumberOfDistinctValues *int64 `type:"long" required:"true"` + + // Number of nulls. + // + // NumberOfNulls is a required field + NumberOfNulls *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s StringColumnStatisticsData) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StringColumnStatisticsData) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StringColumnStatisticsData) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StringColumnStatisticsData"} + if s.AverageLength == nil { + invalidParams.Add(request.NewErrParamRequired("AverageLength")) + } + if s.MaximumLength == nil { + invalidParams.Add(request.NewErrParamRequired("MaximumLength")) + } + if s.NumberOfDistinctValues == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfDistinctValues")) + } + if s.NumberOfNulls == nil { + invalidParams.Add(request.NewErrParamRequired("NumberOfNulls")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAverageLength sets the AverageLength field's value. +func (s *StringColumnStatisticsData) SetAverageLength(v float64) *StringColumnStatisticsData { + s.AverageLength = &v + return s +} + +// SetMaximumLength sets the MaximumLength field's value. +func (s *StringColumnStatisticsData) SetMaximumLength(v int64) *StringColumnStatisticsData { + s.MaximumLength = &v + return s +} + +// SetNumberOfDistinctValues sets the NumberOfDistinctValues field's value. +func (s *StringColumnStatisticsData) SetNumberOfDistinctValues(v int64) *StringColumnStatisticsData { + s.NumberOfDistinctValues = &v + return s +} + +// SetNumberOfNulls sets the NumberOfNulls field's value. +func (s *StringColumnStatisticsData) SetNumberOfNulls(v int64) *StringColumnStatisticsData { + s.NumberOfNulls = &v + return s +} + // The database and table in the AWS Glue Data Catalog that is used for input // or output data. type Table struct { @@ -30543,6 +34202,9 @@ func (s *Table) SetTableName(v string) *Table { type TableData struct { _ struct{} `type:"structure"` + // The ID of the Data Catalog in which the table resides. + CatalogId *string `min:"1" type:"string"` + // The time when the table definition was created in the Data Catalog. CreateTime *time.Time `type:"timestamp"` @@ -30597,6 +34259,9 @@ type TableData struct { // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). TableType *string `type:"string"` + // A TableIdentifier structure that describes a target table for resource linking. + TargetTable *TableIdentifier `type:"structure"` + // The last time that the table was updated. UpdateTime *time.Time `type:"timestamp"` @@ -30617,6 +34282,12 @@ func (s TableData) GoString() string { return s.String() } +// SetCatalogId sets the CatalogId field's value. +func (s *TableData) SetCatalogId(v string) *TableData { + s.CatalogId = &v + return s +} + // SetCreateTime sets the CreateTime field's value. func (s *TableData) SetCreateTime(v time.Time) *TableData { s.CreateTime = &v @@ -30701,6 +34372,12 @@ func (s *TableData) SetTableType(v string) *TableData { return s } +// SetTargetTable sets the TargetTable field's value. +func (s *TableData) SetTargetTable(v *TableIdentifier) *TableData { + s.TargetTable = v + return s +} + // SetUpdateTime sets the UpdateTime field's value. func (s *TableData) SetUpdateTime(v time.Time) *TableData { s.UpdateTime = &v @@ -30752,6 +34429,67 @@ func (s *TableError) SetTableName(v string) *TableError { return s } +// A structure that describes a target table for resource linking. +type TableIdentifier struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog in which the table resides. + CatalogId *string `min:"1" type:"string"` + + // The name of the catalog database that contains the target table. + DatabaseName *string `min:"1" type:"string"` + + // The name of the target table. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TableIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TableIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TableIdentifier"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *TableIdentifier) SetCatalogId(v string) *TableIdentifier { + s.CatalogId = &v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *TableIdentifier) SetDatabaseName(v string) *TableIdentifier { + s.DatabaseName = &v + return s +} + +// SetName sets the Name field's value. +func (s *TableIdentifier) SetName(v string) *TableIdentifier { + s.Name = &v + return s +} + // A structure used to define a table. type TableInput struct { _ struct{} `type:"structure"` @@ -30797,6 +34535,9 @@ type TableInput struct { // The type of this table (EXTERNAL_TABLE, VIRTUAL_VIEW, etc.). TableType *string `type:"string"` + // A TableIdentifier structure that describes a target table for resource linking. + TargetTable *TableIdentifier `type:"structure"` + // If the table is a view, the expanded text of the view; otherwise null. ViewExpandedText *string `type:"string"` @@ -30841,6 +34582,11 @@ func (s *TableInput) Validate() error { invalidParams.AddNested("StorageDescriptor", err.(request.ErrInvalidParams)) } } + if s.TargetTable != nil { + if err := s.TargetTable.Validate(); err != nil { + invalidParams.AddNested("TargetTable", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -30908,6 +34654,12 @@ func (s *TableInput) SetTableType(v string) *TableInput { return s } +// SetTargetTable sets the TargetTable field's value. +func (s *TableInput) SetTargetTable(v *TableIdentifier) *TableInput { + s.TargetTable = v + return s +} + // SetViewExpandedText sets the ViewExpandedText field's value. func (s *TableInput) SetViewExpandedText(v string) *TableInput { s.ViewExpandedText = &v @@ -31963,6 +35715,256 @@ func (s UpdateClassifierOutput) GoString() string { return s.String() } +type UpdateColumnStatisticsForPartitionInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // A list of the column statistics. + // + // ColumnStatisticsList is a required field + ColumnStatisticsList []*ColumnStatistics `type:"list" required:"true"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // A list of partition values identifying the partition. + // + // PartitionValues is a required field + PartitionValues []*string `type:"list" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateColumnStatisticsForPartitionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateColumnStatisticsForPartitionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateColumnStatisticsForPartitionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateColumnStatisticsForPartitionInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ColumnStatisticsList == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnStatisticsList")) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.PartitionValues == nil { + invalidParams.Add(request.NewErrParamRequired("PartitionValues")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.ColumnStatisticsList != nil { + for i, v := range s.ColumnStatisticsList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnStatisticsList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *UpdateColumnStatisticsForPartitionInput) SetCatalogId(v string) *UpdateColumnStatisticsForPartitionInput { + s.CatalogId = &v + return s +} + +// SetColumnStatisticsList sets the ColumnStatisticsList field's value. +func (s *UpdateColumnStatisticsForPartitionInput) SetColumnStatisticsList(v []*ColumnStatistics) *UpdateColumnStatisticsForPartitionInput { + s.ColumnStatisticsList = v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *UpdateColumnStatisticsForPartitionInput) SetDatabaseName(v string) *UpdateColumnStatisticsForPartitionInput { + s.DatabaseName = &v + return s +} + +// SetPartitionValues sets the PartitionValues field's value. +func (s *UpdateColumnStatisticsForPartitionInput) SetPartitionValues(v []*string) *UpdateColumnStatisticsForPartitionInput { + s.PartitionValues = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateColumnStatisticsForPartitionInput) SetTableName(v string) *UpdateColumnStatisticsForPartitionInput { + s.TableName = &v + return s +} + +type UpdateColumnStatisticsForPartitionOutput struct { + _ struct{} `type:"structure"` + + // Error occurred during updating column statistics data. + Errors []*ColumnStatisticsError `type:"list"` +} + +// String returns the string representation +func (s UpdateColumnStatisticsForPartitionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateColumnStatisticsForPartitionOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *UpdateColumnStatisticsForPartitionOutput) SetErrors(v []*ColumnStatisticsError) *UpdateColumnStatisticsForPartitionOutput { + s.Errors = v + return s +} + +type UpdateColumnStatisticsForTableInput struct { + _ struct{} `type:"structure"` + + // The ID of the Data Catalog where the partitions in question reside. If none + // is supplied, the AWS account ID is used by default. + CatalogId *string `min:"1" type:"string"` + + // A list of the column statistics. + // + // ColumnStatisticsList is a required field + ColumnStatisticsList []*ColumnStatistics `type:"list" required:"true"` + + // The name of the catalog database where the partitions reside. + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` + + // The name of the partitions' table. + // + // TableName is a required field + TableName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateColumnStatisticsForTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateColumnStatisticsForTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateColumnStatisticsForTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateColumnStatisticsForTableInput"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.ColumnStatisticsList == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnStatisticsList")) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 1)) + } + if s.ColumnStatisticsList != nil { + for i, v := range s.ColumnStatisticsList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnStatisticsList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCatalogId sets the CatalogId field's value. +func (s *UpdateColumnStatisticsForTableInput) SetCatalogId(v string) *UpdateColumnStatisticsForTableInput { + s.CatalogId = &v + return s +} + +// SetColumnStatisticsList sets the ColumnStatisticsList field's value. +func (s *UpdateColumnStatisticsForTableInput) SetColumnStatisticsList(v []*ColumnStatistics) *UpdateColumnStatisticsForTableInput { + s.ColumnStatisticsList = v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *UpdateColumnStatisticsForTableInput) SetDatabaseName(v string) *UpdateColumnStatisticsForTableInput { + s.DatabaseName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateColumnStatisticsForTableInput) SetTableName(v string) *UpdateColumnStatisticsForTableInput { + s.TableName = &v + return s +} + +type UpdateColumnStatisticsForTableOutput struct { + _ struct{} `type:"structure"` + + // List of ColumnStatisticsErrors. + Errors []*ColumnStatisticsError `type:"list"` +} + +// String returns the string representation +func (s UpdateColumnStatisticsForTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateColumnStatisticsForTableOutput) GoString() string { + return s.String() +} + +// SetErrors sets the Errors field's value. +func (s *UpdateColumnStatisticsForTableOutput) SetErrors(v []*ColumnStatisticsError) *UpdateColumnStatisticsForTableOutput { + s.Errors = v + return s +} + type UpdateConnectionInput struct { _ struct{} `type:"structure"` @@ -32058,9 +36060,9 @@ type UpdateCrawlerInput struct { // always override the default classifiers for a given classification. Classifiers []*string `type:"list"` - // The crawler configuration information. This versioned JSON string allows - // users to specify aspects of a crawler's behavior. For more information, see - // Configuring a Crawler (http://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). + // Crawler configuration information. This versioned JSON string allows users + // to specify aspects of a crawler's behavior. For more information, see Configuring + // a Crawler (https://docs.aws.amazon.com/glue/latest/dg/crawler-configuration.html). Configuration *string `type:"string"` // The name of the SecurityConfiguration structure to be used by this crawler. @@ -32081,10 +36083,10 @@ type UpdateCrawlerInput struct { // the new crawler to access customer resources. Role *string `type:"string"` - // A cron expression used to specify the schedule. For more information, see - // Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, specify cron(15 12 - // * * ? *). + // A cron expression used to specify the schedule (see Time-Based Schedules + // for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, you would specify: + // cron(15 12 * * ? *). Schedule *string `type:"string"` // The policy for the crawler's update and deletion behavior. @@ -32216,10 +36218,10 @@ type UpdateCrawlerScheduleInput struct { // CrawlerName is a required field CrawlerName *string `min:"1" type:"string" required:"true"` - // The updated cron expression used to specify the schedule. For more information, - // see Time-Based Schedules for Jobs and Crawlers (http://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). - // For example, to run something every day at 12:15 UTC, specify cron(15 12 - // * * ? *). + // The updated cron expression used to specify the schedule (see Time-Based + // Schedules for Jobs and Crawlers (https://docs.aws.amazon.com/glue/latest/dg/monitor-data-warehouse-schedule.html). + // For example, to run something every day at 12:15 UTC, you would specify: + // cron(15 12 * * ? *). Schedule *string `type:"string"` } @@ -33015,10 +37017,13 @@ type UpdatePartitionInput struct { // The new partition object to update the partition to. // + // The Values property can't be changed. If you want to change the partition + // key values for a partition, delete and recreate the partition. + // // PartitionInput is a required field PartitionInput *PartitionInput `type:"structure" required:"true"` - // A list of the values defining the partition. + // List of partition key values that define the partition to update. // // PartitionValueList is a required field PartitionValueList []*string `type:"list" required:"true"` @@ -33414,6 +37419,12 @@ type UpdateWorkflowInput struct { // The description of the workflow. Description *string `type:"string"` + // You can use this parameter to prevent unwanted multiple updates to data, + // to control costs, or in some cases, to prevent exceeding the maximum number + // of concurrent runs of any of the component jobs. If you leave this parameter + // blank, there is no limit to the number of concurrent workflow runs. + MaxConcurrentRuns *int64 `type:"integer"` + // Name of the workflow to be updated. // // Name is a required field @@ -33458,6 +37469,12 @@ func (s *UpdateWorkflowInput) SetDescription(v string) *UpdateWorkflowInput { return s } +// SetMaxConcurrentRuns sets the MaxConcurrentRuns field's value. +func (s *UpdateWorkflowInput) SetMaxConcurrentRuns(v int64) *UpdateWorkflowInput { + s.MaxConcurrentRuns = &v + return s +} + // SetName sets the Name field's value. func (s *UpdateWorkflowInput) SetName(v string) *UpdateWorkflowInput { s.Name = &v @@ -33555,12 +37572,18 @@ func (s *UpdateXMLClassifierRequest) SetRowTag(v string) *UpdateXMLClassifierReq type UserDefinedFunction struct { _ struct{} `type:"structure"` + // The ID of the Data Catalog in which the function resides. + CatalogId *string `min:"1" type:"string"` + // The Java class that contains the function code. ClassName *string `min:"1" type:"string"` // The time at which the function was created. CreateTime *time.Time `type:"timestamp"` + // The name of the catalog database that contains the function. + DatabaseName *string `min:"1" type:"string"` + // The name of the function. FunctionName *string `min:"1" type:"string"` @@ -33584,6 +37607,12 @@ func (s UserDefinedFunction) GoString() string { return s.String() } +// SetCatalogId sets the CatalogId field's value. +func (s *UserDefinedFunction) SetCatalogId(v string) *UserDefinedFunction { + s.CatalogId = &v + return s +} + // SetClassName sets the ClassName field's value. func (s *UserDefinedFunction) SetClassName(v string) *UserDefinedFunction { s.ClassName = &v @@ -33596,6 +37625,12 @@ func (s *UserDefinedFunction) SetCreateTime(v time.Time) *UserDefinedFunction { return s } +// SetDatabaseName sets the DatabaseName field's value. +func (s *UserDefinedFunction) SetDatabaseName(v string) *UserDefinedFunction { + s.DatabaseName = &v + return s +} + // SetFunctionName sets the FunctionName field's value. func (s *UserDefinedFunction) SetFunctionName(v string) *UserDefinedFunction { s.FunctionName = &v @@ -33711,8 +37746,8 @@ func (s *UserDefinedFunctionInput) SetResourceUris(v []*ResourceUri) *UserDefine // A value could not be validated. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -33730,17 +37765,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -33748,28 +37783,28 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // There was a version conflict. type VersionMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -33787,17 +37822,17 @@ func (s VersionMismatchException) GoString() string { func newErrorVersionMismatchException(v protocol.ResponseMetadata) error { return &VersionMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s VersionMismatchException) Code() string { +func (s *VersionMismatchException) Code() string { return "VersionMismatchException" } // Message returns the exception's message. -func (s VersionMismatchException) Message() string { +func (s *VersionMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -33805,22 +37840,22 @@ func (s VersionMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s VersionMismatchException) OrigErr() error { +func (s *VersionMismatchException) OrigErr() error { return nil } -func (s VersionMismatchException) Error() string { +func (s *VersionMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s VersionMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *VersionMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s VersionMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *VersionMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // A workflow represents a flow in which AWS Glue components should be executed @@ -33847,6 +37882,12 @@ type Workflow struct { // The information about the last execution of the workflow. LastRun *WorkflowRun `type:"structure"` + // You can use this parameter to prevent unwanted multiple updates to data, + // to control costs, or in some cases, to prevent exceeding the maximum number + // of concurrent runs of any of the component jobs. If you leave this parameter + // blank, there is no limit to the number of concurrent workflow runs. + MaxConcurrentRuns *int64 `type:"integer"` + // The name of the workflow representing the flow. Name *string `min:"1" type:"string"` } @@ -33897,6 +37938,12 @@ func (s *Workflow) SetLastRun(v *WorkflowRun) *Workflow { return s } +// SetMaxConcurrentRuns sets the MaxConcurrentRuns field's value. +func (s *Workflow) SetMaxConcurrentRuns(v int64) *Workflow { + s.MaxConcurrentRuns = &v + return s +} + // SetName sets the Name field's value. func (s *Workflow) SetName(v string) *Workflow { s.Name = &v @@ -33947,13 +37994,21 @@ type WorkflowRun struct { // The date and time when the workflow run completed. CompletedOn *time.Time `type:"timestamp"` + // This error message describes any error that may have occurred in starting + // the workflow run. Currently the only error message is "Concurrent runs exceeded + // for workflow: foo." + ErrorMessage *string `type:"string"` + // The graph representing all the AWS Glue components that belong to the workflow // as nodes and directed connections between them as edges. Graph *WorkflowGraph `type:"structure"` - // Name of the workflow which was executed. + // Name of the workflow that was executed. Name *string `min:"1" type:"string"` + // The ID of the previous workflow run. + PreviousRunId *string `min:"1" type:"string"` + // The date and time when the workflow run was started. StartedOn *time.Time `type:"timestamp"` @@ -33986,6 +38041,12 @@ func (s *WorkflowRun) SetCompletedOn(v time.Time) *WorkflowRun { return s } +// SetErrorMessage sets the ErrorMessage field's value. +func (s *WorkflowRun) SetErrorMessage(v string) *WorkflowRun { + s.ErrorMessage = &v + return s +} + // SetGraph sets the Graph field's value. func (s *WorkflowRun) SetGraph(v *WorkflowGraph) *WorkflowRun { s.Graph = v @@ -33998,6 +38059,12 @@ func (s *WorkflowRun) SetName(v string) *WorkflowRun { return s } +// SetPreviousRunId sets the PreviousRunId field's value. +func (s *WorkflowRun) SetPreviousRunId(v string) *WorkflowRun { + s.PreviousRunId = &v + return s +} + // SetStartedOn sets the StartedOn field's value. func (s *WorkflowRun) SetStartedOn(v time.Time) *WorkflowRun { s.StartedOn = &v @@ -34032,19 +38099,19 @@ func (s *WorkflowRun) SetWorkflowRunProperties(v map[string]*string) *WorkflowRu type WorkflowRunStatistics struct { _ struct{} `type:"structure"` - // Total number of Actions which have failed. + // Total number of Actions that have failed. FailedActions *int64 `type:"integer"` // Total number Actions in running state. RunningActions *int64 `type:"integer"` - // Total number of Actions which have stopped. + // Total number of Actions that have stopped. StoppedActions *int64 `type:"integer"` - // Total number of Actions which have succeeded. + // Total number of Actions that have succeeded. SucceededActions *int64 `type:"integer"` - // Total number of Actions which timed out. + // Total number of Actions that timed out. TimeoutActions *int64 `type:"integer"` // Total number of Actions in the workflow run. @@ -34182,6 +38249,14 @@ const ( CatalogEncryptionModeSseKms = "SSE-KMS" ) +// CatalogEncryptionMode_Values returns all elements of the CatalogEncryptionMode enum +func CatalogEncryptionMode_Values() []string { + return []string{ + CatalogEncryptionModeDisabled, + CatalogEncryptionModeSseKms, + } +} + const ( // CloudWatchEncryptionModeDisabled is a CloudWatchEncryptionMode enum value CloudWatchEncryptionModeDisabled = "DISABLED" @@ -34190,6 +38265,50 @@ const ( CloudWatchEncryptionModeSseKms = "SSE-KMS" ) +// CloudWatchEncryptionMode_Values returns all elements of the CloudWatchEncryptionMode enum +func CloudWatchEncryptionMode_Values() []string { + return []string{ + CloudWatchEncryptionModeDisabled, + CloudWatchEncryptionModeSseKms, + } +} + +const ( + // ColumnStatisticsTypeBoolean is a ColumnStatisticsType enum value + ColumnStatisticsTypeBoolean = "BOOLEAN" + + // ColumnStatisticsTypeDate is a ColumnStatisticsType enum value + ColumnStatisticsTypeDate = "DATE" + + // ColumnStatisticsTypeDecimal is a ColumnStatisticsType enum value + ColumnStatisticsTypeDecimal = "DECIMAL" + + // ColumnStatisticsTypeDouble is a ColumnStatisticsType enum value + ColumnStatisticsTypeDouble = "DOUBLE" + + // ColumnStatisticsTypeLong is a ColumnStatisticsType enum value + ColumnStatisticsTypeLong = "LONG" + + // ColumnStatisticsTypeString is a ColumnStatisticsType enum value + ColumnStatisticsTypeString = "STRING" + + // ColumnStatisticsTypeBinary is a ColumnStatisticsType enum value + ColumnStatisticsTypeBinary = "BINARY" +) + +// ColumnStatisticsType_Values returns all elements of the ColumnStatisticsType enum +func ColumnStatisticsType_Values() []string { + return []string{ + ColumnStatisticsTypeBoolean, + ColumnStatisticsTypeDate, + ColumnStatisticsTypeDecimal, + ColumnStatisticsTypeDouble, + ColumnStatisticsTypeLong, + ColumnStatisticsTypeString, + ColumnStatisticsTypeBinary, + } +} + const ( // ComparatorEquals is a Comparator enum value ComparatorEquals = "EQUALS" @@ -34207,6 +38326,17 @@ const ( ComparatorLessThanEquals = "LESS_THAN_EQUALS" ) +// Comparator_Values returns all elements of the Comparator enum +func Comparator_Values() []string { + return []string{ + ComparatorEquals, + ComparatorGreaterThan, + ComparatorLessThan, + ComparatorGreaterThanEquals, + ComparatorLessThanEquals, + } +} + const ( // ConnectionPropertyKeyHost is a ConnectionPropertyKey enum value ConnectionPropertyKeyHost = "HOST" @@ -34255,30 +38385,106 @@ const ( // ConnectionPropertyKeyCustomJdbcCertString is a ConnectionPropertyKey enum value ConnectionPropertyKeyCustomJdbcCertString = "CUSTOM_JDBC_CERT_STRING" + + // ConnectionPropertyKeyConnectionUrl is a ConnectionPropertyKey enum value + ConnectionPropertyKeyConnectionUrl = "CONNECTION_URL" + + // ConnectionPropertyKeyKafkaBootstrapServers is a ConnectionPropertyKey enum value + ConnectionPropertyKeyKafkaBootstrapServers = "KAFKA_BOOTSTRAP_SERVERS" + + // ConnectionPropertyKeyKafkaSslEnabled is a ConnectionPropertyKey enum value + ConnectionPropertyKeyKafkaSslEnabled = "KAFKA_SSL_ENABLED" + + // ConnectionPropertyKeyKafkaCustomCert is a ConnectionPropertyKey enum value + ConnectionPropertyKeyKafkaCustomCert = "KAFKA_CUSTOM_CERT" + + // ConnectionPropertyKeyKafkaSkipCustomCertValidation is a ConnectionPropertyKey enum value + ConnectionPropertyKeyKafkaSkipCustomCertValidation = "KAFKA_SKIP_CUSTOM_CERT_VALIDATION" ) +// ConnectionPropertyKey_Values returns all elements of the ConnectionPropertyKey enum +func ConnectionPropertyKey_Values() []string { + return []string{ + ConnectionPropertyKeyHost, + ConnectionPropertyKeyPort, + ConnectionPropertyKeyUsername, + ConnectionPropertyKeyPassword, + ConnectionPropertyKeyEncryptedPassword, + ConnectionPropertyKeyJdbcDriverJarUri, + ConnectionPropertyKeyJdbcDriverClassName, + ConnectionPropertyKeyJdbcEngine, + ConnectionPropertyKeyJdbcEngineVersion, + ConnectionPropertyKeyConfigFiles, + ConnectionPropertyKeyInstanceId, + ConnectionPropertyKeyJdbcConnectionUrl, + ConnectionPropertyKeyJdbcEnforceSsl, + ConnectionPropertyKeyCustomJdbcCert, + ConnectionPropertyKeySkipCustomJdbcCertValidation, + ConnectionPropertyKeyCustomJdbcCertString, + ConnectionPropertyKeyConnectionUrl, + ConnectionPropertyKeyKafkaBootstrapServers, + ConnectionPropertyKeyKafkaSslEnabled, + ConnectionPropertyKeyKafkaCustomCert, + ConnectionPropertyKeyKafkaSkipCustomCertValidation, + } +} + const ( // ConnectionTypeJdbc is a ConnectionType enum value ConnectionTypeJdbc = "JDBC" // ConnectionTypeSftp is a ConnectionType enum value ConnectionTypeSftp = "SFTP" + + // ConnectionTypeMongodb is a ConnectionType enum value + ConnectionTypeMongodb = "MONGODB" + + // ConnectionTypeKafka is a ConnectionType enum value + ConnectionTypeKafka = "KAFKA" + + // ConnectionTypeNetwork is a ConnectionType enum value + ConnectionTypeNetwork = "NETWORK" ) +// ConnectionType_Values returns all elements of the ConnectionType enum +func ConnectionType_Values() []string { + return []string{ + ConnectionTypeJdbc, + ConnectionTypeSftp, + ConnectionTypeMongodb, + ConnectionTypeKafka, + ConnectionTypeNetwork, + } +} + const ( // CrawlStateRunning is a CrawlState enum value CrawlStateRunning = "RUNNING" - // CrawlStateSucceeded is a CrawlState enum value - CrawlStateSucceeded = "SUCCEEDED" + // CrawlStateCancelling is a CrawlState enum value + CrawlStateCancelling = "CANCELLING" // CrawlStateCancelled is a CrawlState enum value CrawlStateCancelled = "CANCELLED" + // CrawlStateSucceeded is a CrawlState enum value + CrawlStateSucceeded = "SUCCEEDED" + // CrawlStateFailed is a CrawlState enum value CrawlStateFailed = "FAILED" ) +// CrawlState_Values returns all elements of the CrawlState enum +func CrawlState_Values() []string { + return []string{ + CrawlStateRunning, + CrawlStateCancelling, + CrawlStateCancelled, + CrawlStateSucceeded, + CrawlStateFailed, + } +} + const ( // CrawlerStateReady is a CrawlerState enum value CrawlerStateReady = "READY" @@ -34290,6 +38496,15 @@ const ( CrawlerStateStopping = "STOPPING" ) +// CrawlerState_Values returns all elements of the CrawlerState enum +func CrawlerState_Values() []string { + return []string{ + CrawlerStateReady, + CrawlerStateRunning, + CrawlerStateStopping, + } +} + const ( // CsvHeaderOptionUnknown is a CsvHeaderOption enum value CsvHeaderOptionUnknown = "UNKNOWN" @@ -34301,6 +38516,15 @@ const ( CsvHeaderOptionAbsent = "ABSENT" ) +// CsvHeaderOption_Values returns all elements of the CsvHeaderOption enum +func CsvHeaderOption_Values() []string { + return []string{ + CsvHeaderOptionUnknown, + CsvHeaderOptionPresent, + CsvHeaderOptionAbsent, + } +} + const ( // DeleteBehaviorLog is a DeleteBehavior enum value DeleteBehaviorLog = "LOG" @@ -34312,6 +38536,31 @@ const ( DeleteBehaviorDeprecateInDatabase = "DEPRECATE_IN_DATABASE" ) +// DeleteBehavior_Values returns all elements of the DeleteBehavior enum +func DeleteBehavior_Values() []string { + return []string{ + DeleteBehaviorLog, + DeleteBehaviorDeleteFromDatabase, + DeleteBehaviorDeprecateInDatabase, + } +} + +const ( + // EnableHybridValuesTrue is a EnableHybridValues enum value + EnableHybridValuesTrue = "TRUE" + + // EnableHybridValuesFalse is a EnableHybridValues enum value + EnableHybridValuesFalse = "FALSE" +) + +// EnableHybridValues_Values returns all elements of the EnableHybridValues enum +func EnableHybridValues_Values() []string { + return []string{ + EnableHybridValuesTrue, + EnableHybridValuesFalse, + } +} + const ( // ExistConditionMustExist is a ExistCondition enum value ExistConditionMustExist = "MUST_EXIST" @@ -34323,6 +38572,15 @@ const ( ExistConditionNone = "NONE" ) +// ExistCondition_Values returns all elements of the ExistCondition enum +func ExistCondition_Values() []string { + return []string{ + ExistConditionMustExist, + ExistConditionNotExist, + ExistConditionNone, + } +} + const ( // JobBookmarksEncryptionModeDisabled is a JobBookmarksEncryptionMode enum value JobBookmarksEncryptionModeDisabled = "DISABLED" @@ -34331,6 +38589,14 @@ const ( JobBookmarksEncryptionModeCseKms = "CSE-KMS" ) +// JobBookmarksEncryptionMode_Values returns all elements of the JobBookmarksEncryptionMode enum +func JobBookmarksEncryptionMode_Values() []string { + return []string{ + JobBookmarksEncryptionModeDisabled, + JobBookmarksEncryptionModeCseKms, + } +} + const ( // JobRunStateStarting is a JobRunState enum value JobRunStateStarting = "STARTING" @@ -34354,6 +38620,19 @@ const ( JobRunStateTimeout = "TIMEOUT" ) +// JobRunState_Values returns all elements of the JobRunState enum +func JobRunState_Values() []string { + return []string{ + JobRunStateStarting, + JobRunStateRunning, + JobRunStateStopping, + JobRunStateStopped, + JobRunStateSucceeded, + JobRunStateFailed, + JobRunStateTimeout, + } +} + const ( // LanguagePython is a Language enum value LanguagePython = "PYTHON" @@ -34362,6 +38641,14 @@ const ( LanguageScala = "SCALA" ) +// Language_Values returns all elements of the Language enum +func Language_Values() []string { + return []string{ + LanguagePython, + LanguageScala, + } +} + const ( // LastCrawlStatusSucceeded is a LastCrawlStatus enum value LastCrawlStatusSucceeded = "SUCCEEDED" @@ -34373,6 +38660,15 @@ const ( LastCrawlStatusFailed = "FAILED" ) +// LastCrawlStatus_Values returns all elements of the LastCrawlStatus enum +func LastCrawlStatus_Values() []string { + return []string{ + LastCrawlStatusSucceeded, + LastCrawlStatusCancelled, + LastCrawlStatusFailed, + } +} + const ( // LogicalAnd is a Logical enum value LogicalAnd = "AND" @@ -34381,11 +38677,26 @@ const ( LogicalAny = "ANY" ) +// Logical_Values returns all elements of the Logical enum +func Logical_Values() []string { + return []string{ + LogicalAnd, + LogicalAny, + } +} + const ( // LogicalOperatorEquals is a LogicalOperator enum value LogicalOperatorEquals = "EQUALS" ) +// LogicalOperator_Values returns all elements of the LogicalOperator enum +func LogicalOperator_Values() []string { + return []string{ + LogicalOperatorEquals, + } +} + const ( // NodeTypeCrawler is a NodeType enum value NodeTypeCrawler = "CRAWLER" @@ -34397,6 +38708,27 @@ const ( NodeTypeTrigger = "TRIGGER" ) +// NodeType_Values returns all elements of the NodeType enum +func NodeType_Values() []string { + return []string{ + NodeTypeCrawler, + NodeTypeJob, + NodeTypeTrigger, + } +} + +const ( + // PartitionIndexStatusActive is a PartitionIndexStatus enum value + PartitionIndexStatusActive = "ACTIVE" +) + +// PartitionIndexStatus_Values returns all elements of the PartitionIndexStatus enum +func PartitionIndexStatus_Values() []string { + return []string{ + PartitionIndexStatusActive, + } +} + const ( // PermissionAll is a Permission enum value PermissionAll = "ALL" @@ -34426,6 +38758,21 @@ const ( PermissionDataLocationAccess = "DATA_LOCATION_ACCESS" ) +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionAll, + PermissionSelect, + PermissionAlter, + PermissionDrop, + PermissionDelete, + PermissionInsert, + PermissionCreateDatabase, + PermissionCreateTable, + PermissionDataLocationAccess, + } +} + const ( // PrincipalTypeUser is a PrincipalType enum value PrincipalTypeUser = "USER" @@ -34437,6 +38784,31 @@ const ( PrincipalTypeGroup = "GROUP" ) +// PrincipalType_Values returns all elements of the PrincipalType enum +func PrincipalType_Values() []string { + return []string{ + PrincipalTypeUser, + PrincipalTypeRole, + PrincipalTypeGroup, + } +} + +const ( + // ResourceShareTypeForeign is a ResourceShareType enum value + ResourceShareTypeForeign = "FOREIGN" + + // ResourceShareTypeAll is a ResourceShareType enum value + ResourceShareTypeAll = "ALL" +) + +// ResourceShareType_Values returns all elements of the ResourceShareType enum +func ResourceShareType_Values() []string { + return []string{ + ResourceShareTypeForeign, + ResourceShareTypeAll, + } +} + const ( // ResourceTypeJar is a ResourceType enum value ResourceTypeJar = "JAR" @@ -34448,6 +38820,15 @@ const ( ResourceTypeArchive = "ARCHIVE" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeJar, + ResourceTypeFile, + ResourceTypeArchive, + } +} + const ( // S3EncryptionModeDisabled is a S3EncryptionMode enum value S3EncryptionModeDisabled = "DISABLED" @@ -34459,6 +38840,15 @@ const ( S3EncryptionModeSseS3 = "SSE-S3" ) +// S3EncryptionMode_Values returns all elements of the S3EncryptionMode enum +func S3EncryptionMode_Values() []string { + return []string{ + S3EncryptionModeDisabled, + S3EncryptionModeSseKms, + S3EncryptionModeSseS3, + } +} + const ( // ScheduleStateScheduled is a ScheduleState enum value ScheduleStateScheduled = "SCHEDULED" @@ -34470,6 +38860,15 @@ const ( ScheduleStateTransitioning = "TRANSITIONING" ) +// ScheduleState_Values returns all elements of the ScheduleState enum +func ScheduleState_Values() []string { + return []string{ + ScheduleStateScheduled, + ScheduleStateNotScheduled, + ScheduleStateTransitioning, + } +} + const ( // SortAsc is a Sort enum value SortAsc = "ASC" @@ -34478,6 +38877,14 @@ const ( SortDesc = "DESC" ) +// Sort_Values returns all elements of the Sort enum +func Sort_Values() []string { + return []string{ + SortAsc, + SortDesc, + } +} + const ( // SortDirectionTypeDescending is a SortDirectionType enum value SortDirectionTypeDescending = "DESCENDING" @@ -34486,6 +38893,14 @@ const ( SortDirectionTypeAscending = "ASCENDING" ) +// SortDirectionType_Values returns all elements of the SortDirectionType enum +func SortDirectionType_Values() []string { + return []string{ + SortDirectionTypeDescending, + SortDirectionTypeAscending, + } +} + const ( // TaskRunSortColumnTypeTaskRunType is a TaskRunSortColumnType enum value TaskRunSortColumnTypeTaskRunType = "TASK_RUN_TYPE" @@ -34497,6 +38912,15 @@ const ( TaskRunSortColumnTypeStarted = "STARTED" ) +// TaskRunSortColumnType_Values returns all elements of the TaskRunSortColumnType enum +func TaskRunSortColumnType_Values() []string { + return []string{ + TaskRunSortColumnTypeTaskRunType, + TaskRunSortColumnTypeStatus, + TaskRunSortColumnTypeStarted, + } +} + const ( // TaskStatusTypeStarting is a TaskStatusType enum value TaskStatusTypeStarting = "STARTING" @@ -34520,6 +38944,19 @@ const ( TaskStatusTypeTimeout = "TIMEOUT" ) +// TaskStatusType_Values returns all elements of the TaskStatusType enum +func TaskStatusType_Values() []string { + return []string{ + TaskStatusTypeStarting, + TaskStatusTypeRunning, + TaskStatusTypeStopping, + TaskStatusTypeStopped, + TaskStatusTypeSucceeded, + TaskStatusTypeFailed, + TaskStatusTypeTimeout, + } +} + const ( // TaskTypeEvaluation is a TaskType enum value TaskTypeEvaluation = "EVALUATION" @@ -34537,6 +38974,17 @@ const ( TaskTypeFindMatches = "FIND_MATCHES" ) +// TaskType_Values returns all elements of the TaskType enum +func TaskType_Values() []string { + return []string{ + TaskTypeEvaluation, + TaskTypeLabelingSetGeneration, + TaskTypeImportLabels, + TaskTypeExportLabels, + TaskTypeFindMatches, + } +} + const ( // TransformSortColumnTypeName is a TransformSortColumnType enum value TransformSortColumnTypeName = "NAME" @@ -34554,6 +39002,17 @@ const ( TransformSortColumnTypeLastModified = "LAST_MODIFIED" ) +// TransformSortColumnType_Values returns all elements of the TransformSortColumnType enum +func TransformSortColumnType_Values() []string { + return []string{ + TransformSortColumnTypeName, + TransformSortColumnTypeTransformType, + TransformSortColumnTypeStatus, + TransformSortColumnTypeCreated, + TransformSortColumnTypeLastModified, + } +} + const ( // TransformStatusTypeNotReady is a TransformStatusType enum value TransformStatusTypeNotReady = "NOT_READY" @@ -34565,11 +39024,27 @@ const ( TransformStatusTypeDeleting = "DELETING" ) +// TransformStatusType_Values returns all elements of the TransformStatusType enum +func TransformStatusType_Values() []string { + return []string{ + TransformStatusTypeNotReady, + TransformStatusTypeReady, + TransformStatusTypeDeleting, + } +} + const ( // TransformTypeFindMatches is a TransformType enum value TransformTypeFindMatches = "FIND_MATCHES" ) +// TransformType_Values returns all elements of the TransformType enum +func TransformType_Values() []string { + return []string{ + TransformTypeFindMatches, + } +} + const ( // TriggerStateCreating is a TriggerState enum value TriggerStateCreating = "CREATING" @@ -34596,6 +39071,20 @@ const ( TriggerStateUpdating = "UPDATING" ) +// TriggerState_Values returns all elements of the TriggerState enum +func TriggerState_Values() []string { + return []string{ + TriggerStateCreating, + TriggerStateCreated, + TriggerStateActivating, + TriggerStateActivated, + TriggerStateDeactivating, + TriggerStateDeactivated, + TriggerStateDeleting, + TriggerStateUpdating, + } +} + const ( // TriggerTypeScheduled is a TriggerType enum value TriggerTypeScheduled = "SCHEDULED" @@ -34607,6 +39096,15 @@ const ( TriggerTypeOnDemand = "ON_DEMAND" ) +// TriggerType_Values returns all elements of the TriggerType enum +func TriggerType_Values() []string { + return []string{ + TriggerTypeScheduled, + TriggerTypeConditional, + TriggerTypeOnDemand, + } +} + const ( // UpdateBehaviorLog is a UpdateBehavior enum value UpdateBehaviorLog = "LOG" @@ -34615,6 +39113,14 @@ const ( UpdateBehaviorUpdateInDatabase = "UPDATE_IN_DATABASE" ) +// UpdateBehavior_Values returns all elements of the UpdateBehavior enum +func UpdateBehavior_Values() []string { + return []string{ + UpdateBehaviorLog, + UpdateBehaviorUpdateInDatabase, + } +} + const ( // WorkerTypeStandard is a WorkerType enum value WorkerTypeStandard = "Standard" @@ -34626,10 +39132,39 @@ const ( WorkerTypeG2x = "G.2X" ) +// WorkerType_Values returns all elements of the WorkerType enum +func WorkerType_Values() []string { + return []string{ + WorkerTypeStandard, + WorkerTypeG1x, + WorkerTypeG2x, + } +} + const ( // WorkflowRunStatusRunning is a WorkflowRunStatus enum value WorkflowRunStatusRunning = "RUNNING" // WorkflowRunStatusCompleted is a WorkflowRunStatus enum value WorkflowRunStatusCompleted = "COMPLETED" + + // WorkflowRunStatusStopping is a WorkflowRunStatus enum value + WorkflowRunStatusStopping = "STOPPING" + + // WorkflowRunStatusStopped is a WorkflowRunStatus enum value + WorkflowRunStatusStopped = "STOPPED" + + // WorkflowRunStatusError is a WorkflowRunStatus enum value + WorkflowRunStatusError = "ERROR" ) + +// WorkflowRunStatus_Values returns all elements of the WorkflowRunStatus enum +func WorkflowRunStatus_Values() []string { + return []string{ + WorkflowRunStatusRunning, + WorkflowRunStatusCompleted, + WorkflowRunStatusStopping, + WorkflowRunStatusStopped, + WorkflowRunStatusError, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go b/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go index dc5ad7afd..424f5d6e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glue/errors.go @@ -38,6 +38,12 @@ const ( // A specified condition was not satisfied. ErrCodeConditionCheckFailureException = "ConditionCheckFailureException" + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // The CreatePartitions API was called on a table that has indexes enabled. + ErrCodeConflictException = "ConflictException" + // ErrCodeCrawlerNotRunningException for service response error code // "CrawlerNotRunningException". // @@ -74,6 +80,12 @@ const ( // The same unique identifier was associated with two different records. ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" + // ErrCodeIllegalWorkflowStateException for service response error code + // "IllegalWorkflowStateException". + // + // The workflow is in an invalid state to perform a requested operation. + ErrCodeIllegalWorkflowStateException = "IllegalWorkflowStateException" + // ErrCodeInternalServiceException for service response error code // "InternalServiceException". // @@ -147,12 +159,14 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ConcurrentModificationException": newErrorConcurrentModificationException, "ConcurrentRunsExceededException": newErrorConcurrentRunsExceededException, "ConditionCheckFailureException": newErrorConditionCheckFailureException, + "ConflictException": newErrorConflictException, "CrawlerNotRunningException": newErrorCrawlerNotRunningException, "CrawlerRunningException": newErrorCrawlerRunningException, "CrawlerStoppingException": newErrorCrawlerStoppingException, "GlueEncryptionException": newErrorEncryptionException, "EntityNotFoundException": newErrorEntityNotFoundException, "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException, + "IllegalWorkflowStateException": newErrorIllegalWorkflowStateException, "InternalServiceException": newErrorInternalServiceException, "InvalidInputException": newErrorInvalidInputException, "MLTransformNotReadyException": newErrorMLTransformNotReadyException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go index d4a9ceb80..060d5ac81 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/greengrass/api.go b/vendor/github.com/aws/aws-sdk-go/service/greengrass/api.go index 1f62bf81a..58fba6d19 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/greengrass/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/greengrass/api.go @@ -4348,6 +4348,88 @@ func (c *Greengrass) GetSubscriptionDefinitionVersionWithContext(ctx aws.Context return out, req.Send() } +const opGetThingRuntimeConfiguration = "GetThingRuntimeConfiguration" + +// GetThingRuntimeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetThingRuntimeConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetThingRuntimeConfiguration for more information on using the GetThingRuntimeConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetThingRuntimeConfigurationRequest method. +// req, resp := client.GetThingRuntimeConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetThingRuntimeConfiguration +func (c *Greengrass) GetThingRuntimeConfigurationRequest(input *GetThingRuntimeConfigurationInput) (req *request.Request, output *GetThingRuntimeConfigurationOutput) { + op := &request.Operation{ + Name: opGetThingRuntimeConfiguration, + HTTPMethod: "GET", + HTTPPath: "/greengrass/things/{ThingName}/runtimeconfig", + } + + if input == nil { + input = &GetThingRuntimeConfigurationInput{} + } + + output = &GetThingRuntimeConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetThingRuntimeConfiguration API operation for AWS Greengrass. +// +// Get the runtime configuration of a thing. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Greengrass's +// API operation GetThingRuntimeConfiguration for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// General error information. +// +// * InternalServerErrorException +// General error information. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/GetThingRuntimeConfiguration +func (c *Greengrass) GetThingRuntimeConfiguration(input *GetThingRuntimeConfigurationInput) (*GetThingRuntimeConfigurationOutput, error) { + req, out := c.GetThingRuntimeConfigurationRequest(input) + return out, req.Send() +} + +// GetThingRuntimeConfigurationWithContext is the same as GetThingRuntimeConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetThingRuntimeConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Greengrass) GetThingRuntimeConfigurationWithContext(ctx aws.Context, input *GetThingRuntimeConfigurationInput, opts ...request.Option) (*GetThingRuntimeConfigurationOutput, error) { + req, out := c.GetThingRuntimeConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListBulkDeploymentDetailedReports = "ListBulkDeploymentDetailedReports" // ListBulkDeploymentDetailedReportsRequest generates a "aws/request.Request" representing the @@ -7187,6 +7269,89 @@ func (c *Greengrass) UpdateSubscriptionDefinitionWithContext(ctx aws.Context, in return out, req.Send() } +const opUpdateThingRuntimeConfiguration = "UpdateThingRuntimeConfiguration" + +// UpdateThingRuntimeConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateThingRuntimeConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateThingRuntimeConfiguration for more information on using the UpdateThingRuntimeConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateThingRuntimeConfigurationRequest method. +// req, resp := client.UpdateThingRuntimeConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateThingRuntimeConfiguration +func (c *Greengrass) UpdateThingRuntimeConfigurationRequest(input *UpdateThingRuntimeConfigurationInput) (req *request.Request, output *UpdateThingRuntimeConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateThingRuntimeConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/greengrass/things/{ThingName}/runtimeconfig", + } + + if input == nil { + input = &UpdateThingRuntimeConfigurationInput{} + } + + output = &UpdateThingRuntimeConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateThingRuntimeConfiguration API operation for AWS Greengrass. +// +// Updates the runtime configuration of a thing. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Greengrass's +// API operation UpdateThingRuntimeConfiguration for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// General error information. +// +// * InternalServerErrorException +// General error information. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/greengrass-2017-06-07/UpdateThingRuntimeConfiguration +func (c *Greengrass) UpdateThingRuntimeConfiguration(input *UpdateThingRuntimeConfigurationInput) (*UpdateThingRuntimeConfigurationOutput, error) { + req, out := c.UpdateThingRuntimeConfigurationRequest(input) + return out, req.Send() +} + +// UpdateThingRuntimeConfigurationWithContext is the same as UpdateThingRuntimeConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateThingRuntimeConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Greengrass) UpdateThingRuntimeConfigurationWithContext(ctx aws.Context, input *UpdateThingRuntimeConfigurationInput, opts ...request.Option) (*UpdateThingRuntimeConfigurationOutput, error) { + req, out := c.UpdateThingRuntimeConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + type AssociateRoleToGroupInput struct { _ struct{} `type:"structure"` @@ -7328,8 +7493,8 @@ func (s *AssociateServiceRoleToAccountOutput) SetAssociatedAt(v string) *Associa // General error information. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A list of error details. ErrorDetails []*ErrorDetail `type:"list"` @@ -7349,17 +7514,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7367,22 +7532,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a bulk deployment. You cannot start a new bulk deployment @@ -13979,6 +14144,69 @@ func (s *GetSubscriptionDefinitionVersionOutput) SetVersion(v string) *GetSubscr return s } +type GetThingRuntimeConfigurationInput struct { + _ struct{} `type:"structure"` + + // ThingName is a required field + ThingName *string `location:"uri" locationName:"ThingName" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetThingRuntimeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThingRuntimeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetThingRuntimeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetThingRuntimeConfigurationInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetThingName sets the ThingName field's value. +func (s *GetThingRuntimeConfigurationInput) SetThingName(v string) *GetThingRuntimeConfigurationInput { + s.ThingName = &v + return s +} + +// The runtime configuration for a thing. +type GetThingRuntimeConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Runtime configuration for a thing. + RuntimeConfiguration *RuntimeConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetThingRuntimeConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetThingRuntimeConfigurationOutput) GoString() string { + return s.String() +} + +// SetRuntimeConfiguration sets the RuntimeConfiguration field's value. +func (s *GetThingRuntimeConfigurationOutput) SetRuntimeConfiguration(v *RuntimeConfiguration) *GetThingRuntimeConfigurationOutput { + s.RuntimeConfiguration = v + return s +} + // Information about a certificate authority for a group. type GroupCertificateAuthorityProperties struct { _ struct{} `type:"structure"` @@ -14206,8 +14434,8 @@ func (s *GroupVersion) SetSubscriptionDefinitionVersionArn(v string) *GroupVersi // General error information. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A list of error details. ErrorDetails []*ErrorDetail `type:"list"` @@ -14227,17 +14455,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14245,22 +14473,22 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } type ListBulkDeploymentDetailedReportsInput struct { @@ -16403,6 +16631,30 @@ func (s *ResourceDownloadOwnerSetting) SetGroupPermission(v string) *ResourceDow return s } +// Runtime configuration for a thing. +type RuntimeConfiguration struct { + _ struct{} `type:"structure"` + + // Configuration for telemetry service. + TelemetryConfiguration *TelemetryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s RuntimeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RuntimeConfiguration) GoString() string { + return s.String() +} + +// SetTelemetryConfiguration sets the TelemetryConfiguration field's value. +func (s *RuntimeConfiguration) SetTelemetryConfiguration(v *TelemetryConfiguration) *RuntimeConfiguration { + s.TelemetryConfiguration = v + return s +} + // Attributes that define an Amazon S3 machine learning resource. type S3MachineLearningModelResourceData struct { _ struct{} `type:"structure"` @@ -16912,6 +17164,81 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// Configuration settings for running telemetry. +type TelemetryConfiguration struct { + _ struct{} `type:"structure"` + + // Synchronization status of the device reported configuration with the desired + // configuration. + ConfigurationSyncStatus *string `type:"string" enum:"ConfigurationSyncStatus"` + + // Configure telemetry to be on or off. + // + // Telemetry is a required field + Telemetry *string `type:"string" required:"true" enum:"Telemetry"` +} + +// String returns the string representation +func (s TelemetryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TelemetryConfiguration) GoString() string { + return s.String() +} + +// SetConfigurationSyncStatus sets the ConfigurationSyncStatus field's value. +func (s *TelemetryConfiguration) SetConfigurationSyncStatus(v string) *TelemetryConfiguration { + s.ConfigurationSyncStatus = &v + return s +} + +// SetTelemetry sets the Telemetry field's value. +func (s *TelemetryConfiguration) SetTelemetry(v string) *TelemetryConfiguration { + s.Telemetry = &v + return s +} + +// Configuration settings for running telemetry. +type TelemetryConfigurationUpdate struct { + _ struct{} `type:"structure"` + + // Configure telemetry to be on or off. + // + // Telemetry is a required field + Telemetry *string `type:"string" required:"true" enum:"Telemetry"` +} + +// String returns the string representation +func (s TelemetryConfigurationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TelemetryConfigurationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TelemetryConfigurationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TelemetryConfigurationUpdate"} + if s.Telemetry == nil { + invalidParams.Add(request.NewErrParamRequired("Telemetry")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTelemetry sets the Telemetry field's value. +func (s *TelemetryConfigurationUpdate) SetTelemetry(v string) *TelemetryConfigurationUpdate { + s.Telemetry = &v + return s +} + type UntagResourceInput struct { _ struct{} `type:"structure"` @@ -17632,6 +17959,73 @@ func (s UpdateSubscriptionDefinitionOutput) GoString() string { return s.String() } +type UpdateThingRuntimeConfigurationInput struct { + _ struct{} `type:"structure"` + + // Configuration settings for running telemetry. + TelemetryConfiguration *TelemetryConfigurationUpdate `type:"structure"` + + // ThingName is a required field + ThingName *string `location:"uri" locationName:"ThingName" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateThingRuntimeConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingRuntimeConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateThingRuntimeConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateThingRuntimeConfigurationInput"} + if s.ThingName == nil { + invalidParams.Add(request.NewErrParamRequired("ThingName")) + } + if s.ThingName != nil && len(*s.ThingName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThingName", 1)) + } + if s.TelemetryConfiguration != nil { + if err := s.TelemetryConfiguration.Validate(); err != nil { + invalidParams.AddNested("TelemetryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTelemetryConfiguration sets the TelemetryConfiguration field's value. +func (s *UpdateThingRuntimeConfigurationInput) SetTelemetryConfiguration(v *TelemetryConfigurationUpdate) *UpdateThingRuntimeConfigurationInput { + s.TelemetryConfiguration = v + return s +} + +// SetThingName sets the ThingName field's value. +func (s *UpdateThingRuntimeConfigurationInput) SetThingName(v string) *UpdateThingRuntimeConfigurationInput { + s.ThingName = &v + return s +} + +type UpdateThingRuntimeConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateThingRuntimeConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateThingRuntimeConfigurationOutput) GoString() string { + return s.String() +} + // Information about a version. type VersionInformation struct { _ struct{} `type:"structure"` @@ -17704,6 +18098,34 @@ const ( BulkDeploymentStatusFailed = "Failed" ) +// BulkDeploymentStatus_Values returns all elements of the BulkDeploymentStatus enum +func BulkDeploymentStatus_Values() []string { + return []string{ + BulkDeploymentStatusInitializing, + BulkDeploymentStatusRunning, + BulkDeploymentStatusCompleted, + BulkDeploymentStatusStopping, + BulkDeploymentStatusStopped, + BulkDeploymentStatusFailed, + } +} + +const ( + // ConfigurationSyncStatusInSync is a ConfigurationSyncStatus enum value + ConfigurationSyncStatusInSync = "InSync" + + // ConfigurationSyncStatusOutOfSync is a ConfigurationSyncStatus enum value + ConfigurationSyncStatusOutOfSync = "OutOfSync" +) + +// ConfigurationSyncStatus_Values returns all elements of the ConfigurationSyncStatus enum +func ConfigurationSyncStatus_Values() []string { + return []string{ + ConfigurationSyncStatusInSync, + ConfigurationSyncStatusOutOfSync, + } +} + // The type of deployment. When used for ''CreateDeployment'', only ''NewDeployment'' // and ''Redeployment'' are valid. const ( @@ -17720,6 +18142,16 @@ const ( DeploymentTypeForceResetDeployment = "ForceResetDeployment" ) +// DeploymentType_Values returns all elements of the DeploymentType enum +func DeploymentType_Values() []string { + return []string{ + DeploymentTypeNewDeployment, + DeploymentTypeRedeployment, + DeploymentTypeResetDeployment, + DeploymentTypeForceResetDeployment, + } +} + const ( // EncodingTypeBinary is a EncodingType enum value EncodingTypeBinary = "binary" @@ -17728,6 +18160,14 @@ const ( EncodingTypeJson = "json" ) +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeBinary, + EncodingTypeJson, + } +} + // Specifies whether the Lambda function runs in a Greengrass container (default) // or without containerization. Unless your scenario requires that you run without // containerization, we recommend that you run in a Greengrass container. Omit @@ -17741,6 +18181,14 @@ const ( FunctionIsolationModeNoContainer = "NoContainer" ) +// FunctionIsolationMode_Values returns all elements of the FunctionIsolationMode enum +func FunctionIsolationMode_Values() []string { + return []string{ + FunctionIsolationModeGreengrassContainer, + FunctionIsolationModeNoContainer, + } +} + const ( // LoggerComponentGreengrassSystem is a LoggerComponent enum value LoggerComponentGreengrassSystem = "GreengrassSystem" @@ -17749,6 +18197,14 @@ const ( LoggerComponentLambda = "Lambda" ) +// LoggerComponent_Values returns all elements of the LoggerComponent enum +func LoggerComponent_Values() []string { + return []string{ + LoggerComponentGreengrassSystem, + LoggerComponentLambda, + } +} + const ( // LoggerLevelDebug is a LoggerLevel enum value LoggerLevelDebug = "DEBUG" @@ -17766,6 +18222,17 @@ const ( LoggerLevelFatal = "FATAL" ) +// LoggerLevel_Values returns all elements of the LoggerLevel enum +func LoggerLevel_Values() []string { + return []string{ + LoggerLevelDebug, + LoggerLevelInfo, + LoggerLevelWarn, + LoggerLevelError, + LoggerLevelFatal, + } +} + const ( // LoggerTypeFileSystem is a LoggerType enum value LoggerTypeFileSystem = "FileSystem" @@ -17774,6 +18241,14 @@ const ( LoggerTypeAwscloudWatch = "AWSCloudWatch" ) +// LoggerType_Values returns all elements of the LoggerType enum +func LoggerType_Values() []string { + return []string{ + LoggerTypeFileSystem, + LoggerTypeAwscloudWatch, + } +} + // The type of permission a function has to access a resource. const ( // PermissionRo is a Permission enum value @@ -17783,6 +18258,14 @@ const ( PermissionRw = "rw" ) +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionRo, + PermissionRw, + } +} + // The piece of software on the Greengrass core that will be updated. const ( // SoftwareToUpdateCore is a SoftwareToUpdate enum value @@ -17792,6 +18275,30 @@ const ( SoftwareToUpdateOtaAgent = "ota_agent" ) +// SoftwareToUpdate_Values returns all elements of the SoftwareToUpdate enum +func SoftwareToUpdate_Values() []string { + return []string{ + SoftwareToUpdateCore, + SoftwareToUpdateOtaAgent, + } +} + +const ( + // TelemetryOn is a Telemetry enum value + TelemetryOn = "On" + + // TelemetryOff is a Telemetry enum value + TelemetryOff = "Off" +) + +// Telemetry_Values returns all elements of the Telemetry enum +func Telemetry_Values() []string { + return []string{ + TelemetryOn, + TelemetryOff, + } +} + // The minimum level of log statements that should be logged by the OTA Agent // during an update. const ( @@ -17820,6 +18327,20 @@ const ( UpdateAgentLogLevelFatal = "FATAL" ) +// UpdateAgentLogLevel_Values returns all elements of the UpdateAgentLogLevel enum +func UpdateAgentLogLevel_Values() []string { + return []string{ + UpdateAgentLogLevelNone, + UpdateAgentLogLevelTrace, + UpdateAgentLogLevelDebug, + UpdateAgentLogLevelVerbose, + UpdateAgentLogLevelInfo, + UpdateAgentLogLevelWarn, + UpdateAgentLogLevelError, + UpdateAgentLogLevelFatal, + } +} + // The architecture of the cores which are the targets of an update. const ( // UpdateTargetsArchitectureArmv6l is a UpdateTargetsArchitecture enum value @@ -17835,6 +18356,16 @@ const ( UpdateTargetsArchitectureAarch64 = "aarch64" ) +// UpdateTargetsArchitecture_Values returns all elements of the UpdateTargetsArchitecture enum +func UpdateTargetsArchitecture_Values() []string { + return []string{ + UpdateTargetsArchitectureArmv6l, + UpdateTargetsArchitectureArmv7l, + UpdateTargetsArchitectureX8664, + UpdateTargetsArchitectureAarch64, + } +} + // The operating system of the cores which are the targets of an update. const ( // UpdateTargetsOperatingSystemUbuntu is a UpdateTargetsOperatingSystem enum value @@ -17849,3 +18380,13 @@ const ( // UpdateTargetsOperatingSystemOpenwrt is a UpdateTargetsOperatingSystem enum value UpdateTargetsOperatingSystemOpenwrt = "openwrt" ) + +// UpdateTargetsOperatingSystem_Values returns all elements of the UpdateTargetsOperatingSystem enum +func UpdateTargetsOperatingSystem_Values() []string { + return []string{ + UpdateTargetsOperatingSystemUbuntu, + UpdateTargetsOperatingSystemRaspbian, + UpdateTargetsOperatingSystemAmazonLinux, + UpdateTargetsOperatingSystemOpenwrt, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/greengrass/service.go b/vendor/github.com/aws/aws-sdk-go/service/greengrass/service.go index d6df2b3d9..6d47a15c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/greengrass/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/greengrass/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go b/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go index e6778de64..8dfeb33e1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/guardduty/api.go @@ -4,6 +4,7 @@ package guardduty import ( "fmt" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" @@ -68,10 +69,10 @@ func (c *GuardDuty) AcceptInvitationRequest(input *AcceptInvitationInput) (req * // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/AcceptInvitation func (c *GuardDuty) AcceptInvitation(input *AcceptInvitationInput) (*AcceptInvitationOutput, error) { @@ -140,9 +141,9 @@ func (c *GuardDuty) ArchiveFindingsRequest(input *ArchiveFindingsInput) (req *re // ArchiveFindings API operation for Amazon GuardDuty. // -// Archives GuardDuty findings specified by the list of finding IDs. +// Archives GuardDuty findings that are specified by the list of finding IDs. // -// Only the master account can archive findings. Member accounts do not have +// Only the master account can archive findings. Member accounts don't have // permission to archive findings from their accounts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -154,10 +155,10 @@ func (c *GuardDuty) ArchiveFindingsRequest(input *ArchiveFindingsInput) (req *re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ArchiveFindings func (c *GuardDuty) ArchiveFindings(input *ArchiveFindingsInput) (*ArchiveFindingsOutput, error) { @@ -227,8 +228,9 @@ func (c *GuardDuty) CreateDetectorRequest(input *CreateDetectorInput) (req *requ // // Creates a single Amazon GuardDuty detector. A detector is a resource that // represents the GuardDuty service. To start using GuardDuty, you must create -// a detector in each region that you enable the service. You can have only -// one detector per account per region. +// a detector in each Region where you enable the service. You can have only +// one detector per account per Region. All data sources are enabled in a new +// detector by default. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -239,10 +241,10 @@ func (c *GuardDuty) CreateDetectorRequest(input *CreateDetectorInput) (req *requ // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateDetector func (c *GuardDuty) CreateDetector(input *CreateDetectorInput) (*CreateDetectorOutput, error) { @@ -321,10 +323,10 @@ func (c *GuardDuty) CreateFilterRequest(input *CreateFilterInput) (req *request. // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateFilter func (c *GuardDuty) CreateFilter(input *CreateFilterInput) (*CreateFilterOutput, error) { @@ -392,11 +394,11 @@ func (c *GuardDuty) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Re // CreateIPSet API operation for Amazon GuardDuty. // -// Creates a new IPSet, called Trusted IP list in the consoler user interface. -// An IPSet is a list IP addresses trusted for secure communication with AWS -// infrastructure and applications. GuardDuty does not generate findings for -// IP addresses included in IPSets. Only users from the master account can use -// this operation. +// Creates a new IPSet, which is called a trusted IP list in the console user +// interface. An IPSet is a list of IP addresses that are trusted for secure +// communication with AWS infrastructure and applications. GuardDuty doesn't +// generate findings for IP addresses that are included in IPSets. Only users +// from the master account can use this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -407,10 +409,10 @@ func (c *GuardDuty) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateIPSet func (c *GuardDuty) CreateIPSet(input *CreateIPSetInput) (*CreateIPSetOutput, error) { @@ -479,8 +481,17 @@ func (c *GuardDuty) CreateMembersRequest(input *CreateMembersInput) (req *reques // CreateMembers API operation for Amazon GuardDuty. // // Creates member accounts of the current AWS account by specifying a list of -// AWS account IDs. The current AWS account can then invite these members to -// manage GuardDuty in their accounts. +// AWS account IDs. This step is a prerequisite for managing the associated +// member accounts either by invitation or through an organization. +// +// When using Create Members as an organizations delegated administrator this +// action will enable GuardDuty in the added member accounts, with the exception +// of the organization master account, which must enable GuardDuty prior to +// being added as a member. +// +// If you are adding accounts by invitation use this action after GuardDuty +// has been enabled in potential member accounts and before using Invite Members +// (https://docs.aws.amazon.com/guardduty/latest/APIReference/API_InviteMembers.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -491,10 +502,10 @@ func (c *GuardDuty) CreateMembersRequest(input *CreateMembersInput) (req *reques // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateMembers func (c *GuardDuty) CreateMembers(input *CreateMembersInput) (*CreateMembersOutput, error) { @@ -562,7 +573,7 @@ func (c *GuardDuty) CreatePublishingDestinationRequest(input *CreatePublishingDe // CreatePublishingDestination API operation for Amazon GuardDuty. // -// Creates a publishing destination to send findings to. The resource to send +// Creates a publishing destination to export findings to. The resource to export // findings to must exist before you use this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -574,10 +585,10 @@ func (c *GuardDuty) CreatePublishingDestinationRequest(input *CreatePublishingDe // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreatePublishingDestination func (c *GuardDuty) CreatePublishingDestination(input *CreatePublishingDestinationInput) (*CreatePublishingDestinationOutput, error) { @@ -659,10 +670,10 @@ func (c *GuardDuty) CreateSampleFindingsRequest(input *CreateSampleFindingsInput // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateSampleFindings func (c *GuardDuty) CreateSampleFindings(input *CreateSampleFindingsInput) (*CreateSampleFindingsOutput, error) { @@ -730,9 +741,9 @@ func (c *GuardDuty) CreateThreatIntelSetRequest(input *CreateThreatIntelSetInput // CreateThreatIntelSet API operation for Amazon GuardDuty. // -// Create a new ThreatIntelSet. ThreatIntelSets consist of known malicious IP -// addresses. GuardDuty generates findings based on ThreatIntelSets. Only users -// of the master account can use this operation. +// Creates a new ThreatIntelSet. ThreatIntelSets consist of known malicious +// IP addresses. GuardDuty generates findings based on ThreatIntelSets. Only +// users of the master account can use this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -743,10 +754,10 @@ func (c *GuardDuty) CreateThreatIntelSetRequest(input *CreateThreatIntelSetInput // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/CreateThreatIntelSet func (c *GuardDuty) CreateThreatIntelSet(input *CreateThreatIntelSetInput) (*CreateThreatIntelSetOutput, error) { @@ -814,7 +825,7 @@ func (c *GuardDuty) DeclineInvitationsRequest(input *DeclineInvitationsInput) (r // DeclineInvitations API operation for Amazon GuardDuty. // -// Declines invitations sent to the current member account by AWS account specified +// Declines invitations sent to the current member account by AWS accounts specified // by their account IDs. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -826,10 +837,10 @@ func (c *GuardDuty) DeclineInvitationsRequest(input *DeclineInvitationsInput) (r // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeclineInvitations func (c *GuardDuty) DeclineInvitations(input *DeclineInvitationsInput) (*DeclineInvitationsOutput, error) { @@ -898,7 +909,7 @@ func (c *GuardDuty) DeleteDetectorRequest(input *DeleteDetectorInput) (req *requ // DeleteDetector API operation for Amazon GuardDuty. // -// Deletes a Amazon GuardDuty detector specified by the detector ID. +// Deletes an Amazon GuardDuty detector that is specified by the detector ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -909,10 +920,10 @@ func (c *GuardDuty) DeleteDetectorRequest(input *DeleteDetectorInput) (req *requ // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteDetector func (c *GuardDuty) DeleteDetector(input *DeleteDetectorInput) (*DeleteDetectorOutput, error) { @@ -992,10 +1003,10 @@ func (c *GuardDuty) DeleteFilterRequest(input *DeleteFilterInput) (req *request. // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteFilter func (c *GuardDuty) DeleteFilter(input *DeleteFilterInput) (*DeleteFilterOutput, error) { @@ -1064,7 +1075,7 @@ func (c *GuardDuty) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Re // DeleteIPSet API operation for Amazon GuardDuty. // -// Deletes the IPSet specified by the ipSetId. IPSets are called Trusted IP +// Deletes the IPSet specified by the ipSetId. IPSets are called trusted IP // lists in the console user interface. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1076,10 +1087,10 @@ func (c *GuardDuty) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteIPSet func (c *GuardDuty) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { @@ -1159,10 +1170,10 @@ func (c *GuardDuty) DeleteInvitationsRequest(input *DeleteInvitationsInput) (req // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteInvitations func (c *GuardDuty) DeleteInvitations(input *DeleteInvitationsInput) (*DeleteInvitationsOutput, error) { @@ -1242,10 +1253,10 @@ func (c *GuardDuty) DeleteMembersRequest(input *DeleteMembersInput) (req *reques // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteMembers func (c *GuardDuty) DeleteMembers(input *DeleteMembersInput) (*DeleteMembersOutput, error) { @@ -1325,10 +1336,10 @@ func (c *GuardDuty) DeletePublishingDestinationRequest(input *DeletePublishingDe // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeletePublishingDestination func (c *GuardDuty) DeletePublishingDestination(input *DeletePublishingDestinationInput) (*DeletePublishingDestinationOutput, error) { @@ -1397,7 +1408,7 @@ func (c *GuardDuty) DeleteThreatIntelSetRequest(input *DeleteThreatIntelSetInput // DeleteThreatIntelSet API operation for Amazon GuardDuty. // -// Deletes ThreatIntelSet specified by the ThreatIntelSet ID. +// Deletes the ThreatIntelSet specified by the ThreatIntelSet ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1408,10 +1419,10 @@ func (c *GuardDuty) DeleteThreatIntelSetRequest(input *DeleteThreatIntelSetInput // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DeleteThreatIntelSet func (c *GuardDuty) DeleteThreatIntelSet(input *DeleteThreatIntelSetInput) (*DeleteThreatIntelSetOutput, error) { @@ -1435,6 +1446,89 @@ func (c *GuardDuty) DeleteThreatIntelSetWithContext(ctx aws.Context, input *Dele return out, req.Send() } +const opDescribeOrganizationConfiguration = "DescribeOrganizationConfiguration" + +// DescribeOrganizationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrganizationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeOrganizationConfiguration for more information on using the DescribeOrganizationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeOrganizationConfigurationRequest method. +// req, resp := client.DescribeOrganizationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DescribeOrganizationConfiguration +func (c *GuardDuty) DescribeOrganizationConfigurationRequest(input *DescribeOrganizationConfigurationInput) (req *request.Request, output *DescribeOrganizationConfigurationOutput) { + op := &request.Operation{ + Name: opDescribeOrganizationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/detector/{detectorId}/admin", + } + + if input == nil { + input = &DescribeOrganizationConfigurationInput{} + } + + output = &DescribeOrganizationConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeOrganizationConfiguration API operation for Amazon GuardDuty. +// +// Returns information about the account selected as the delegated administrator +// for GuardDuty. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation DescribeOrganizationConfiguration for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DescribeOrganizationConfiguration +func (c *GuardDuty) DescribeOrganizationConfiguration(input *DescribeOrganizationConfigurationInput) (*DescribeOrganizationConfigurationOutput, error) { + req, out := c.DescribeOrganizationConfigurationRequest(input) + return out, req.Send() +} + +// DescribeOrganizationConfigurationWithContext is the same as DescribeOrganizationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeOrganizationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) DescribeOrganizationConfigurationWithContext(ctx aws.Context, input *DescribeOrganizationConfigurationInput, opts ...request.Option) (*DescribeOrganizationConfigurationOutput, error) { + req, out := c.DescribeOrganizationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribePublishingDestination = "DescribePublishingDestination" // DescribePublishingDestinationRequest generates a "aws/request.Request" representing the @@ -1491,10 +1585,10 @@ func (c *GuardDuty) DescribePublishingDestinationRequest(input *DescribePublishi // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DescribePublishingDestination func (c *GuardDuty) DescribePublishingDestination(input *DescribePublishingDestinationInput) (*DescribePublishingDestinationOutput, error) { @@ -1518,6 +1612,90 @@ func (c *GuardDuty) DescribePublishingDestinationWithContext(ctx aws.Context, in return out, req.Send() } +const opDisableOrganizationAdminAccount = "DisableOrganizationAdminAccount" + +// DisableOrganizationAdminAccountRequest generates a "aws/request.Request" representing the +// client's request for the DisableOrganizationAdminAccount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableOrganizationAdminAccount for more information on using the DisableOrganizationAdminAccount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableOrganizationAdminAccountRequest method. +// req, resp := client.DisableOrganizationAdminAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisableOrganizationAdminAccount +func (c *GuardDuty) DisableOrganizationAdminAccountRequest(input *DisableOrganizationAdminAccountInput) (req *request.Request, output *DisableOrganizationAdminAccountOutput) { + op := &request.Operation{ + Name: opDisableOrganizationAdminAccount, + HTTPMethod: "POST", + HTTPPath: "/admin/disable", + } + + if input == nil { + input = &DisableOrganizationAdminAccountInput{} + } + + output = &DisableOrganizationAdminAccountOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisableOrganizationAdminAccount API operation for Amazon GuardDuty. +// +// Disables an AWS account within the Organization as the GuardDuty delegated +// administrator. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation DisableOrganizationAdminAccount for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisableOrganizationAdminAccount +func (c *GuardDuty) DisableOrganizationAdminAccount(input *DisableOrganizationAdminAccountInput) (*DisableOrganizationAdminAccountOutput, error) { + req, out := c.DisableOrganizationAdminAccountRequest(input) + return out, req.Send() +} + +// DisableOrganizationAdminAccountWithContext is the same as DisableOrganizationAdminAccount with the addition of +// the ability to pass a context and additional request options. +// +// See DisableOrganizationAdminAccount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) DisableOrganizationAdminAccountWithContext(ctx aws.Context, input *DisableOrganizationAdminAccountInput, opts ...request.Option) (*DisableOrganizationAdminAccountOutput, error) { + req, out := c.DisableOrganizationAdminAccountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisassociateFromMasterAccount = "DisassociateFromMasterAccount" // DisassociateFromMasterAccountRequest generates a "aws/request.Request" representing the @@ -1574,10 +1752,10 @@ func (c *GuardDuty) DisassociateFromMasterAccountRequest(input *DisassociateFrom // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisassociateFromMasterAccount func (c *GuardDuty) DisassociateFromMasterAccount(input *DisassociateFromMasterAccountInput) (*DisassociateFromMasterAccountOutput, error) { @@ -1657,10 +1835,10 @@ func (c *GuardDuty) DisassociateMembersRequest(input *DisassociateMembersInput) // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/DisassociateMembers func (c *GuardDuty) DisassociateMembers(input *DisassociateMembersInput) (*DisassociateMembersOutput, error) { @@ -1684,6 +1862,90 @@ func (c *GuardDuty) DisassociateMembersWithContext(ctx aws.Context, input *Disas return out, req.Send() } +const opEnableOrganizationAdminAccount = "EnableOrganizationAdminAccount" + +// EnableOrganizationAdminAccountRequest generates a "aws/request.Request" representing the +// client's request for the EnableOrganizationAdminAccount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableOrganizationAdminAccount for more information on using the EnableOrganizationAdminAccount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableOrganizationAdminAccountRequest method. +// req, resp := client.EnableOrganizationAdminAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/EnableOrganizationAdminAccount +func (c *GuardDuty) EnableOrganizationAdminAccountRequest(input *EnableOrganizationAdminAccountInput) (req *request.Request, output *EnableOrganizationAdminAccountOutput) { + op := &request.Operation{ + Name: opEnableOrganizationAdminAccount, + HTTPMethod: "POST", + HTTPPath: "/admin/enable", + } + + if input == nil { + input = &EnableOrganizationAdminAccountInput{} + } + + output = &EnableOrganizationAdminAccountOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableOrganizationAdminAccount API operation for Amazon GuardDuty. +// +// Enables an AWS account within the organization as the GuardDuty delegated +// administrator. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation EnableOrganizationAdminAccount for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/EnableOrganizationAdminAccount +func (c *GuardDuty) EnableOrganizationAdminAccount(input *EnableOrganizationAdminAccountInput) (*EnableOrganizationAdminAccountOutput, error) { + req, out := c.EnableOrganizationAdminAccountRequest(input) + return out, req.Send() +} + +// EnableOrganizationAdminAccountWithContext is the same as EnableOrganizationAdminAccount with the addition of +// the ability to pass a context and additional request options. +// +// See EnableOrganizationAdminAccount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) EnableOrganizationAdminAccountWithContext(ctx aws.Context, input *EnableOrganizationAdminAccountInput, opts ...request.Option) (*EnableOrganizationAdminAccountOutput, error) { + req, out := c.EnableOrganizationAdminAccountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetDetector = "GetDetector" // GetDetectorRequest generates a "aws/request.Request" representing the @@ -1739,10 +2001,10 @@ func (c *GuardDuty) GetDetectorRequest(input *GetDetectorInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetDetector func (c *GuardDuty) GetDetector(input *GetDetectorInput) (*GetDetectorOutput, error) { @@ -1821,10 +2083,10 @@ func (c *GuardDuty) GetFilterRequest(input *GetFilterInput) (req *request.Reques // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFilter func (c *GuardDuty) GetFilter(input *GetFilterInput) (*GetFilterOutput, error) { @@ -1903,10 +2165,10 @@ func (c *GuardDuty) GetFindingsRequest(input *GetFindingsInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFindings func (c *GuardDuty) GetFindings(input *GetFindingsInput) (*GetFindingsOutput, error) { @@ -1974,7 +2236,7 @@ func (c *GuardDuty) GetFindingsStatisticsRequest(input *GetFindingsStatisticsInp // GetFindingsStatistics API operation for Amazon GuardDuty. // -// Lists Amazon GuardDuty findings' statistics for the specified detector ID. +// Lists Amazon GuardDuty findings statistics for the specified detector ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1985,10 +2247,10 @@ func (c *GuardDuty) GetFindingsStatisticsRequest(input *GetFindingsStatisticsInp // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetFindingsStatistics func (c *GuardDuty) GetFindingsStatistics(input *GetFindingsStatisticsInput) (*GetFindingsStatisticsOutput, error) { @@ -2067,10 +2329,10 @@ func (c *GuardDuty) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetIPSet func (c *GuardDuty) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { @@ -2150,10 +2412,10 @@ func (c *GuardDuty) GetInvitationsCountRequest(input *GetInvitationsCountInput) // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetInvitationsCount func (c *GuardDuty) GetInvitationsCount(input *GetInvitationsCountInput) (*GetInvitationsCountOutput, error) { @@ -2233,10 +2495,10 @@ func (c *GuardDuty) GetMasterAccountRequest(input *GetMasterAccountInput) (req * // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMasterAccount func (c *GuardDuty) GetMasterAccount(input *GetMasterAccountInput) (*GetMasterAccountOutput, error) { @@ -2260,6 +2522,88 @@ func (c *GuardDuty) GetMasterAccountWithContext(ctx aws.Context, input *GetMaste return out, req.Send() } +const opGetMemberDetectors = "GetMemberDetectors" + +// GetMemberDetectorsRequest generates a "aws/request.Request" representing the +// client's request for the GetMemberDetectors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMemberDetectors for more information on using the GetMemberDetectors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMemberDetectorsRequest method. +// req, resp := client.GetMemberDetectorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMemberDetectors +func (c *GuardDuty) GetMemberDetectorsRequest(input *GetMemberDetectorsInput) (req *request.Request, output *GetMemberDetectorsOutput) { + op := &request.Operation{ + Name: opGetMemberDetectors, + HTTPMethod: "POST", + HTTPPath: "/detector/{detectorId}/member/detector/get", + } + + if input == nil { + input = &GetMemberDetectorsInput{} + } + + output = &GetMemberDetectorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMemberDetectors API operation for Amazon GuardDuty. +// +// Describes which data sources are enabled for the member account's detector. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation GetMemberDetectors for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMemberDetectors +func (c *GuardDuty) GetMemberDetectors(input *GetMemberDetectorsInput) (*GetMemberDetectorsOutput, error) { + req, out := c.GetMemberDetectorsRequest(input) + return out, req.Send() +} + +// GetMemberDetectorsWithContext is the same as GetMemberDetectors with the addition of +// the ability to pass a context and additional request options. +// +// See GetMemberDetectors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) GetMemberDetectorsWithContext(ctx aws.Context, input *GetMemberDetectorsInput, opts ...request.Option) (*GetMemberDetectorsOutput, error) { + req, out := c.GetMemberDetectorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetMembers = "GetMembers" // GetMembersRequest generates a "aws/request.Request" representing the @@ -2316,10 +2660,10 @@ func (c *GuardDuty) GetMembersRequest(input *GetMembersInput) (req *request.Requ // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetMembers func (c *GuardDuty) GetMembers(input *GetMembersInput) (*GetMembersOutput, error) { @@ -2398,10 +2742,10 @@ func (c *GuardDuty) GetThreatIntelSetRequest(input *GetThreatIntelSetInput) (req // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetThreatIntelSet func (c *GuardDuty) GetThreatIntelSet(input *GetThreatIntelSetInput) (*GetThreatIntelSetOutput, error) { @@ -2425,54 +2769,199 @@ func (c *GuardDuty) GetThreatIntelSetWithContext(ctx aws.Context, input *GetThre return out, req.Send() } -const opInviteMembers = "InviteMembers" +const opGetUsageStatistics = "GetUsageStatistics" -// InviteMembersRequest generates a "aws/request.Request" representing the -// client's request for the InviteMembers operation. The "output" return +// GetUsageStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetUsageStatistics operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See InviteMembers for more information on using the InviteMembers +// See GetUsageStatistics for more information on using the GetUsageStatistics // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the InviteMembersRequest method. -// req, resp := client.InviteMembersRequest(params) +// // Example sending a request using the GetUsageStatisticsRequest method. +// req, resp := client.GetUsageStatisticsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/InviteMembers -func (c *GuardDuty) InviteMembersRequest(input *InviteMembersInput) (req *request.Request, output *InviteMembersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetUsageStatistics +func (c *GuardDuty) GetUsageStatisticsRequest(input *GetUsageStatisticsInput) (req *request.Request, output *GetUsageStatisticsOutput) { op := &request.Operation{ - Name: opInviteMembers, + Name: opGetUsageStatistics, HTTPMethod: "POST", - HTTPPath: "/detector/{detectorId}/member/invite", + HTTPPath: "/detector/{detectorId}/usage/statistics", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &InviteMembersInput{} + input = &GetUsageStatisticsInput{} } - output = &InviteMembersOutput{} + output = &GetUsageStatisticsOutput{} req = c.newRequest(op, input, output) return } -// InviteMembers API operation for Amazon GuardDuty. +// GetUsageStatistics API operation for Amazon GuardDuty. // -// Invites other AWS accounts (created as members of the current AWS account -// by CreateMembers) to enable GuardDuty and allow the current AWS account to -// view and manage these accounts' GuardDuty findings on their behalf as the -// master account. +// Lists Amazon GuardDuty usage statistics over the last 30 days for the specified +// detector ID. For newly enabled detectors or data sources the cost returned +// will include only the usage so far under 30 days, this may differ from the +// cost metrics in the console, which projects usage over 30 days to provide +// a monthly cost estimate. For more information see Understanding How Usage +// Costs are Calculated (https://docs.aws.amazon.com/guardduty/latest/ug/monitoring_costs.html#usage-calculations). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation GetUsageStatistics for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/GetUsageStatistics +func (c *GuardDuty) GetUsageStatistics(input *GetUsageStatisticsInput) (*GetUsageStatisticsOutput, error) { + req, out := c.GetUsageStatisticsRequest(input) + return out, req.Send() +} + +// GetUsageStatisticsWithContext is the same as GetUsageStatistics with the addition of +// the ability to pass a context and additional request options. +// +// See GetUsageStatistics for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) GetUsageStatisticsWithContext(ctx aws.Context, input *GetUsageStatisticsInput, opts ...request.Option) (*GetUsageStatisticsOutput, error) { + req, out := c.GetUsageStatisticsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetUsageStatisticsPages iterates over the pages of a GetUsageStatistics operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetUsageStatistics method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetUsageStatistics operation. +// pageNum := 0 +// err := client.GetUsageStatisticsPages(params, +// func(page *guardduty.GetUsageStatisticsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GuardDuty) GetUsageStatisticsPages(input *GetUsageStatisticsInput, fn func(*GetUsageStatisticsOutput, bool) bool) error { + return c.GetUsageStatisticsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetUsageStatisticsPagesWithContext same as GetUsageStatisticsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) GetUsageStatisticsPagesWithContext(ctx aws.Context, input *GetUsageStatisticsInput, fn func(*GetUsageStatisticsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetUsageStatisticsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetUsageStatisticsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetUsageStatisticsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opInviteMembers = "InviteMembers" + +// InviteMembersRequest generates a "aws/request.Request" representing the +// client's request for the InviteMembers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See InviteMembers for more information on using the InviteMembers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the InviteMembersRequest method. +// req, resp := client.InviteMembersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/InviteMembers +func (c *GuardDuty) InviteMembersRequest(input *InviteMembersInput) (req *request.Request, output *InviteMembersOutput) { + op := &request.Operation{ + Name: opInviteMembers, + HTTPMethod: "POST", + HTTPPath: "/detector/{detectorId}/member/invite", + } + + if input == nil { + input = &InviteMembersInput{} + } + + output = &InviteMembersOutput{} + req = c.newRequest(op, input, output) + return +} + +// InviteMembers API operation for Amazon GuardDuty. +// +// Invites other AWS accounts (created as members of the current AWS account +// by CreateMembers) to enable GuardDuty, and allow the current AWS account +// to view and manage these accounts' GuardDuty findings on their behalf as +// the master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2483,10 +2972,10 @@ func (c *GuardDuty) InviteMembersRequest(input *InviteMembersInput) (req *reques // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/InviteMembers func (c *GuardDuty) InviteMembers(input *InviteMembersInput) (*InviteMembersOutput, error) { @@ -2571,10 +3060,10 @@ func (c *GuardDuty) ListDetectorsRequest(input *ListDetectorsInput) (req *reques // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListDetectors func (c *GuardDuty) ListDetectors(input *ListDetectorsInput) (*ListDetectorsOutput, error) { @@ -2711,10 +3200,10 @@ func (c *GuardDuty) ListFiltersRequest(input *ListFiltersInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFilters func (c *GuardDuty) ListFilters(input *ListFiltersInput) (*ListFiltersOutput, error) { @@ -2851,10 +3340,10 @@ func (c *GuardDuty) ListFindingsRequest(input *ListFindingsInput) (req *request. // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListFindings func (c *GuardDuty) ListFindings(input *ListFindingsInput) (*ListFindingsOutput, error) { @@ -2993,10 +3482,10 @@ func (c *GuardDuty) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Requ // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListIPSets func (c *GuardDuty) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { @@ -3134,10 +3623,10 @@ func (c *GuardDuty) ListInvitationsRequest(input *ListInvitationsInput) (req *re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListInvitations func (c *GuardDuty) ListInvitations(input *ListInvitationsInput) (*ListInvitationsOutput, error) { @@ -3275,10 +3764,10 @@ func (c *GuardDuty) ListMembersRequest(input *ListMembersInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListMembers func (c *GuardDuty) ListMembers(input *ListMembersInput) (*ListMembersOutput, error) { @@ -3354,6 +3843,146 @@ func (c *GuardDuty) ListMembersPagesWithContext(ctx aws.Context, input *ListMemb return p.Err() } +const opListOrganizationAdminAccounts = "ListOrganizationAdminAccounts" + +// ListOrganizationAdminAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListOrganizationAdminAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListOrganizationAdminAccounts for more information on using the ListOrganizationAdminAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListOrganizationAdminAccountsRequest method. +// req, resp := client.ListOrganizationAdminAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListOrganizationAdminAccounts +func (c *GuardDuty) ListOrganizationAdminAccountsRequest(input *ListOrganizationAdminAccountsInput) (req *request.Request, output *ListOrganizationAdminAccountsOutput) { + op := &request.Operation{ + Name: opListOrganizationAdminAccounts, + HTTPMethod: "GET", + HTTPPath: "/admin", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListOrganizationAdminAccountsInput{} + } + + output = &ListOrganizationAdminAccountsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListOrganizationAdminAccounts API operation for Amazon GuardDuty. +// +// Lists the accounts configured as GuardDuty delegated administrators. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation ListOrganizationAdminAccounts for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListOrganizationAdminAccounts +func (c *GuardDuty) ListOrganizationAdminAccounts(input *ListOrganizationAdminAccountsInput) (*ListOrganizationAdminAccountsOutput, error) { + req, out := c.ListOrganizationAdminAccountsRequest(input) + return out, req.Send() +} + +// ListOrganizationAdminAccountsWithContext is the same as ListOrganizationAdminAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListOrganizationAdminAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) ListOrganizationAdminAccountsWithContext(ctx aws.Context, input *ListOrganizationAdminAccountsInput, opts ...request.Option) (*ListOrganizationAdminAccountsOutput, error) { + req, out := c.ListOrganizationAdminAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListOrganizationAdminAccountsPages iterates over the pages of a ListOrganizationAdminAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOrganizationAdminAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOrganizationAdminAccounts operation. +// pageNum := 0 +// err := client.ListOrganizationAdminAccountsPages(params, +// func(page *guardduty.ListOrganizationAdminAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *GuardDuty) ListOrganizationAdminAccountsPages(input *ListOrganizationAdminAccountsInput, fn func(*ListOrganizationAdminAccountsOutput, bool) bool) error { + return c.ListOrganizationAdminAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListOrganizationAdminAccountsPagesWithContext same as ListOrganizationAdminAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) ListOrganizationAdminAccountsPagesWithContext(ctx aws.Context, input *ListOrganizationAdminAccountsInput, fn func(*ListOrganizationAdminAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListOrganizationAdminAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListOrganizationAdminAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListOrganizationAdminAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPublishingDestinations = "ListPublishingDestinations" // ListPublishingDestinationsRequest generates a "aws/request.Request" representing the @@ -3415,10 +4044,10 @@ func (c *GuardDuty) ListPublishingDestinationsRequest(input *ListPublishingDesti // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListPublishingDestinations func (c *GuardDuty) ListPublishingDestinations(input *ListPublishingDestinationsInput) (*ListPublishingDestinationsOutput, error) { @@ -3539,9 +4168,9 @@ func (c *GuardDuty) ListTagsForResourceRequest(input *ListTagsForResourceInput) // ListTagsForResource API operation for Amazon GuardDuty. // // Lists tags for a resource. Tagging is currently supported for detectors, -// finding filters, IP sets, and Threat Intel sets, with a limit of 50 tags +// finding filters, IP sets, and threat intel sets, with a limit of 50 tags // per resource. When invoked, this operation returns all assigned tags for -// a given resource.. +// a given resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3552,10 +4181,10 @@ func (c *GuardDuty) ListTagsForResourceRequest(input *ListTagsForResourceInput) // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListTagsForResource func (c *GuardDuty) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { @@ -3642,10 +4271,10 @@ func (c *GuardDuty) ListThreatIntelSetsRequest(input *ListThreatIntelSetsInput) // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/ListThreatIntelSets func (c *GuardDuty) ListThreatIntelSets(input *ListThreatIntelSetsInput) (*ListThreatIntelSetsOutput, error) { @@ -3778,10 +4407,10 @@ func (c *GuardDuty) StartMonitoringMembersRequest(input *StartMonitoringMembersI // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/StartMonitoringMembers func (c *GuardDuty) StartMonitoringMembers(input *StartMonitoringMembersInput) (*StartMonitoringMembersOutput, error) { @@ -3849,8 +4478,8 @@ func (c *GuardDuty) StopMonitoringMembersRequest(input *StopMonitoringMembersInp // StopMonitoringMembers API operation for Amazon GuardDuty. // -// Stops GuardDuty monitoring for the specified member accounnts. Use the StartMonitoringMembers -// to restart monitoring for those accounts. +// Stops GuardDuty monitoring for the specified member accounts. Use the StartMonitoringMembers +// operation to restart monitoring for those accounts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3861,10 +4490,10 @@ func (c *GuardDuty) StopMonitoringMembersRequest(input *StopMonitoringMembersInp // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/StopMonitoringMembers func (c *GuardDuty) StopMonitoringMembers(input *StopMonitoringMembersInput) (*StopMonitoringMembersOutput, error) { @@ -3944,10 +4573,10 @@ func (c *GuardDuty) TagResourceRequest(input *TagResourceInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/TagResource func (c *GuardDuty) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -4027,10 +4656,10 @@ func (c *GuardDuty) UnarchiveFindingsRequest(input *UnarchiveFindingsInput) (req // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UnarchiveFindings func (c *GuardDuty) UnarchiveFindings(input *UnarchiveFindingsInput) (*UnarchiveFindingsOutput, error) { @@ -4110,10 +4739,10 @@ func (c *GuardDuty) UntagResourceRequest(input *UntagResourceInput) (req *reques // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UntagResource func (c *GuardDuty) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -4193,10 +4822,10 @@ func (c *GuardDuty) UpdateDetectorRequest(input *UpdateDetectorInput) (req *requ // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateDetector func (c *GuardDuty) UpdateDetector(input *UpdateDetectorInput) (*UpdateDetectorOutput, error) { @@ -4275,10 +4904,10 @@ func (c *GuardDuty) UpdateFilterRequest(input *UpdateFilterInput) (req *request. // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateFilter func (c *GuardDuty) UpdateFilter(input *UpdateFilterInput) (*UpdateFilterOutput, error) { @@ -4358,10 +4987,10 @@ func (c *GuardDuty) UpdateFindingsFeedbackRequest(input *UpdateFindingsFeedbackI // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateFindingsFeedback func (c *GuardDuty) UpdateFindingsFeedback(input *UpdateFindingsFeedbackInput) (*UpdateFindingsFeedbackOutput, error) { @@ -4441,10 +5070,10 @@ func (c *GuardDuty) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Re // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // // See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateIPSet func (c *GuardDuty) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { @@ -4468,153 +5097,318 @@ func (c *GuardDuty) UpdateIPSetWithContext(ctx aws.Context, input *UpdateIPSetIn return out, req.Send() } -const opUpdatePublishingDestination = "UpdatePublishingDestination" +const opUpdateMemberDetectors = "UpdateMemberDetectors" -// UpdatePublishingDestinationRequest generates a "aws/request.Request" representing the -// client's request for the UpdatePublishingDestination operation. The "output" return +// UpdateMemberDetectorsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMemberDetectors operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdatePublishingDestination for more information on using the UpdatePublishingDestination +// See UpdateMemberDetectors for more information on using the UpdateMemberDetectors // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdatePublishingDestinationRequest method. -// req, resp := client.UpdatePublishingDestinationRequest(params) +// // Example sending a request using the UpdateMemberDetectorsRequest method. +// req, resp := client.UpdateMemberDetectorsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdatePublishingDestination -func (c *GuardDuty) UpdatePublishingDestinationRequest(input *UpdatePublishingDestinationInput) (req *request.Request, output *UpdatePublishingDestinationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateMemberDetectors +func (c *GuardDuty) UpdateMemberDetectorsRequest(input *UpdateMemberDetectorsInput) (req *request.Request, output *UpdateMemberDetectorsOutput) { op := &request.Operation{ - Name: opUpdatePublishingDestination, + Name: opUpdateMemberDetectors, HTTPMethod: "POST", - HTTPPath: "/detector/{detectorId}/publishingDestination/{destinationId}", + HTTPPath: "/detector/{detectorId}/member/detector/update", } if input == nil { - input = &UpdatePublishingDestinationInput{} + input = &UpdateMemberDetectorsInput{} } - output = &UpdatePublishingDestinationOutput{} + output = &UpdateMemberDetectorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdatePublishingDestination API operation for Amazon GuardDuty. +// UpdateMemberDetectors API operation for Amazon GuardDuty. // -// Updates information about the publishing destination specified by the destinationId. +// Contains information on member accounts to be updated. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GuardDuty's -// API operation UpdatePublishingDestination for usage and error information. +// API operation UpdateMemberDetectors for usage and error information. // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdatePublishingDestination -func (c *GuardDuty) UpdatePublishingDestination(input *UpdatePublishingDestinationInput) (*UpdatePublishingDestinationOutput, error) { - req, out := c.UpdatePublishingDestinationRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateMemberDetectors +func (c *GuardDuty) UpdateMemberDetectors(input *UpdateMemberDetectorsInput) (*UpdateMemberDetectorsOutput, error) { + req, out := c.UpdateMemberDetectorsRequest(input) return out, req.Send() } -// UpdatePublishingDestinationWithContext is the same as UpdatePublishingDestination with the addition of +// UpdateMemberDetectorsWithContext is the same as UpdateMemberDetectors with the addition of // the ability to pass a context and additional request options. // -// See UpdatePublishingDestination for details on how to use this API operation. +// See UpdateMemberDetectors for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *GuardDuty) UpdatePublishingDestinationWithContext(ctx aws.Context, input *UpdatePublishingDestinationInput, opts ...request.Option) (*UpdatePublishingDestinationOutput, error) { - req, out := c.UpdatePublishingDestinationRequest(input) +func (c *GuardDuty) UpdateMemberDetectorsWithContext(ctx aws.Context, input *UpdateMemberDetectorsInput, opts ...request.Option) (*UpdateMemberDetectorsOutput, error) { + req, out := c.UpdateMemberDetectorsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateThreatIntelSet = "UpdateThreatIntelSet" +const opUpdateOrganizationConfiguration = "UpdateOrganizationConfiguration" -// UpdateThreatIntelSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateThreatIntelSet operation. The "output" return +// UpdateOrganizationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOrganizationConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateThreatIntelSet for more information on using the UpdateThreatIntelSet +// See UpdateOrganizationConfiguration for more information on using the UpdateOrganizationConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateThreatIntelSetRequest method. -// req, resp := client.UpdateThreatIntelSetRequest(params) +// // Example sending a request using the UpdateOrganizationConfigurationRequest method. +// req, resp := client.UpdateOrganizationConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateThreatIntelSet -func (c *GuardDuty) UpdateThreatIntelSetRequest(input *UpdateThreatIntelSetInput) (req *request.Request, output *UpdateThreatIntelSetOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateOrganizationConfiguration +func (c *GuardDuty) UpdateOrganizationConfigurationRequest(input *UpdateOrganizationConfigurationInput) (req *request.Request, output *UpdateOrganizationConfigurationOutput) { op := &request.Operation{ - Name: opUpdateThreatIntelSet, + Name: opUpdateOrganizationConfiguration, HTTPMethod: "POST", - HTTPPath: "/detector/{detectorId}/threatintelset/{threatIntelSetId}", + HTTPPath: "/detector/{detectorId}/admin", } if input == nil { - input = &UpdateThreatIntelSetInput{} + input = &UpdateOrganizationConfigurationInput{} } - output = &UpdateThreatIntelSetOutput{} + output = &UpdateOrganizationConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateThreatIntelSet API operation for Amazon GuardDuty. +// UpdateOrganizationConfiguration API operation for Amazon GuardDuty. // -// Updates the ThreatIntelSet specified by ThreatIntelSet ID. +// Updates the delegated administrator account with the values provided. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon GuardDuty's -// API operation UpdateThreatIntelSet for usage and error information. +// API operation UpdateOrganizationConfiguration for usage and error information. // // Returned Error Types: // * BadRequestException -// Bad request exception object. +// A bad request exception object. // // * InternalServerErrorException -// Internal server error exception object. +// An internal server error exception object. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateThreatIntelSet -func (c *GuardDuty) UpdateThreatIntelSet(input *UpdateThreatIntelSetInput) (*UpdateThreatIntelSetOutput, error) { - req, out := c.UpdateThreatIntelSetRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateOrganizationConfiguration +func (c *GuardDuty) UpdateOrganizationConfiguration(input *UpdateOrganizationConfigurationInput) (*UpdateOrganizationConfigurationOutput, error) { + req, out := c.UpdateOrganizationConfigurationRequest(input) + return out, req.Send() +} + +// UpdateOrganizationConfigurationWithContext is the same as UpdateOrganizationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateOrganizationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) UpdateOrganizationConfigurationWithContext(ctx aws.Context, input *UpdateOrganizationConfigurationInput, opts ...request.Option) (*UpdateOrganizationConfigurationOutput, error) { + req, out := c.UpdateOrganizationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdatePublishingDestination = "UpdatePublishingDestination" + +// UpdatePublishingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePublishingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdatePublishingDestination for more information on using the UpdatePublishingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdatePublishingDestinationRequest method. +// req, resp := client.UpdatePublishingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdatePublishingDestination +func (c *GuardDuty) UpdatePublishingDestinationRequest(input *UpdatePublishingDestinationInput) (req *request.Request, output *UpdatePublishingDestinationOutput) { + op := &request.Operation{ + Name: opUpdatePublishingDestination, + HTTPMethod: "POST", + HTTPPath: "/detector/{detectorId}/publishingDestination/{destinationId}", + } + + if input == nil { + input = &UpdatePublishingDestinationInput{} + } + + output = &UpdatePublishingDestinationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdatePublishingDestination API operation for Amazon GuardDuty. +// +// Updates information about the publishing destination specified by the destinationId. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation UpdatePublishingDestination for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdatePublishingDestination +func (c *GuardDuty) UpdatePublishingDestination(input *UpdatePublishingDestinationInput) (*UpdatePublishingDestinationOutput, error) { + req, out := c.UpdatePublishingDestinationRequest(input) + return out, req.Send() +} + +// UpdatePublishingDestinationWithContext is the same as UpdatePublishingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See UpdatePublishingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *GuardDuty) UpdatePublishingDestinationWithContext(ctx aws.Context, input *UpdatePublishingDestinationInput, opts ...request.Option) (*UpdatePublishingDestinationOutput, error) { + req, out := c.UpdatePublishingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateThreatIntelSet = "UpdateThreatIntelSet" + +// UpdateThreatIntelSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateThreatIntelSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateThreatIntelSet for more information on using the UpdateThreatIntelSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateThreatIntelSetRequest method. +// req, resp := client.UpdateThreatIntelSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateThreatIntelSet +func (c *GuardDuty) UpdateThreatIntelSetRequest(input *UpdateThreatIntelSetInput) (req *request.Request, output *UpdateThreatIntelSetOutput) { + op := &request.Operation{ + Name: opUpdateThreatIntelSet, + HTTPMethod: "POST", + HTTPPath: "/detector/{detectorId}/threatintelset/{threatIntelSetId}", + } + + if input == nil { + input = &UpdateThreatIntelSetInput{} + } + + output = &UpdateThreatIntelSetOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateThreatIntelSet API operation for Amazon GuardDuty. +// +// Updates the ThreatIntelSet specified by the ThreatIntelSet ID. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon GuardDuty's +// API operation UpdateThreatIntelSet for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// A bad request exception object. +// +// * InternalServerErrorException +// An internal server error exception object. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28/UpdateThreatIntelSet +func (c *GuardDuty) UpdateThreatIntelSet(input *UpdateThreatIntelSetInput) (*UpdateThreatIntelSetOutput, error) { + req, out := c.UpdateThreatIntelSetRequest(input) return out, req.Send() } @@ -4642,7 +5436,7 @@ type AcceptInvitationInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // This value is used to validate the master account to the member account. + // The value that is used to validate the master account to the member account. // // InvitationId is a required field InvitationId *string `locationName:"invitationId" type:"string" required:"true"` @@ -4717,11 +5511,46 @@ func (s AcceptInvitationOutput) GoString() string { return s.String() } +// Contains information on the current access control policies for the bucket. +type AccessControlList struct { + _ struct{} `type:"structure"` + + // A value that indicates whether public read access for the bucket is enabled + // through an Access Control List (ACL). + AllowsPublicReadAccess *bool `locationName:"allowsPublicReadAccess" type:"boolean"` + + // A value that indicates whether public write access for the bucket is enabled + // through an Access Control List (ACL). + AllowsPublicWriteAccess *bool `locationName:"allowsPublicWriteAccess" type:"boolean"` +} + +// String returns the string representation +func (s AccessControlList) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlList) GoString() string { + return s.String() +} + +// SetAllowsPublicReadAccess sets the AllowsPublicReadAccess field's value. +func (s *AccessControlList) SetAllowsPublicReadAccess(v bool) *AccessControlList { + s.AllowsPublicReadAccess = &v + return s +} + +// SetAllowsPublicWriteAccess sets the AllowsPublicWriteAccess field's value. +func (s *AccessControlList) SetAllowsPublicWriteAccess(v bool) *AccessControlList { + s.AllowsPublicWriteAccess = &v + return s +} + // Contains information about the access keys. type AccessKeyDetails struct { _ struct{} `type:"structure"` - // Access key ID of the user. + // The access key ID of the user. AccessKeyId *string `locationName:"accessKeyId" type:"string"` // The principal ID of the user. @@ -4772,12 +5601,12 @@ func (s *AccessKeyDetails) SetUserType(v string) *AccessKeyDetails { type AccountDetail struct { _ struct{} `type:"structure"` - // Member account ID. + // The member account ID. // // AccountId is a required field AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` - // Member account's email address. + // The email address of the member account. // // Email is a required field Email *string `locationName:"email" min:"1" type:"string" required:"true"` @@ -4827,11 +5656,35 @@ func (s *AccountDetail) SetEmail(v string) *AccountDetail { return s } -// Contains information about action. +// Contains information about the account level permissions on the S3 bucket. +type AccountLevelPermissions struct { + _ struct{} `type:"structure"` + + // Describes the S3 Block Public Access settings of the bucket's parent account. + BlockPublicAccess *BlockPublicAccess `locationName:"blockPublicAccess" type:"structure"` +} + +// String returns the string representation +func (s AccountLevelPermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountLevelPermissions) GoString() string { + return s.String() +} + +// SetBlockPublicAccess sets the BlockPublicAccess field's value. +func (s *AccountLevelPermissions) SetBlockPublicAccess(v *BlockPublicAccess) *AccountLevelPermissions { + s.BlockPublicAccess = v + return s +} + +// Contains information about actions. type Action struct { _ struct{} `type:"structure"` - // GuardDuty Finding activity type. + // The GuardDuty finding activity type. ActionType *string `locationName:"actionType" type:"string"` // Information about the AWS_API_CALL action described in this finding. @@ -4887,6 +5740,40 @@ func (s *Action) SetPortProbeAction(v *PortProbeAction) *Action { return s } +// The account within the organization specified as the GuardDuty delegated +// administrator. +type AdminAccount struct { + _ struct{} `type:"structure"` + + // The AWS account ID for the account. + AdminAccountId *string `locationName:"adminAccountId" type:"string"` + + // Indicates whether the account is enabled as the delegated administrator. + AdminStatus *string `locationName:"adminStatus" min:"1" type:"string" enum:"AdminStatus"` +} + +// String returns the string representation +func (s AdminAccount) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdminAccount) GoString() string { + return s.String() +} + +// SetAdminAccountId sets the AdminAccountId field's value. +func (s *AdminAccount) SetAdminAccountId(v string) *AdminAccount { + s.AdminAccountId = &v + return s +} + +// SetAdminStatus sets the AdminStatus field's value. +func (s *AdminAccount) SetAdminStatus(v string) *AdminAccount { + s.AdminStatus = &v + return s +} + type ArchiveFindingsInput struct { _ struct{} `type:"structure"` @@ -4896,7 +5783,7 @@ type ArchiveFindingsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // IDs of the findings that you want to archive. + // The IDs of the findings that you want to archive. // // FindingIds is a required field FindingIds []*string `locationName:"findingIds" type:"list" required:"true"` @@ -4957,23 +5844,26 @@ func (s ArchiveFindingsOutput) GoString() string { return s.String() } -// Contains information about the API operation. +// Contains information about the API action. type AwsApiCallAction struct { _ struct{} `type:"structure"` - // AWS API name. + // The AWS API name. Api *string `locationName:"api" type:"string"` - // AWS API caller type. + // The AWS API caller type. CallerType *string `locationName:"callerType" type:"string"` - // Domain information for the AWS API call. + // The domain information for the AWS API call. DomainDetails *DomainDetails `locationName:"domainDetails" type:"structure"` - // Remote IP information of the connection. + // The error code of the failed AWS API action. + ErrorCode *string `locationName:"errorCode" type:"string"` + + // The remote IP information of the connection that initiated the AWS API call. RemoteIpDetails *RemoteIpDetails `locationName:"remoteIpDetails" type:"structure"` - // AWS service name whose API was invoked. + // The AWS service name whose API was invoked. ServiceName *string `locationName:"serviceName" type:"string"` } @@ -5005,6 +5895,12 @@ func (s *AwsApiCallAction) SetDomainDetails(v *DomainDetails) *AwsApiCallAction return s } +// SetErrorCode sets the ErrorCode field's value. +func (s *AwsApiCallAction) SetErrorCode(v string) *AwsApiCallAction { + s.ErrorCode = &v + return s +} + // SetRemoteIpDetails sets the RemoteIpDetails field's value. func (s *AwsApiCallAction) SetRemoteIpDetails(v *RemoteIpDetails) *AwsApiCallAction { s.RemoteIpDetails = v @@ -5017,10 +5913,10 @@ func (s *AwsApiCallAction) SetServiceName(v string) *AwsApiCallAction { return s } -// Bad request exception object. +// A bad request exception object. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message. Message_ *string `locationName:"message" type:"string"` @@ -5041,17 +5937,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5059,88 +5955,246 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } -// Contains information about the city associated with the IP address. -type City struct { +// Contains information on how the bucker owner's S3 Block Public Access settings +// are being applied to the S3 bucket. See S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// for more information. +type BlockPublicAccess struct { _ struct{} `type:"structure"` - // City name of the remote IP address. - CityName *string `locationName:"cityName" type:"string"` + // Indicates if S3 Block Public Access is set to BlockPublicAcls. + BlockPublicAcls *bool `locationName:"blockPublicAcls" type:"boolean"` + + // Indicates if S3 Block Public Access is set to BlockPublicPolicy. + BlockPublicPolicy *bool `locationName:"blockPublicPolicy" type:"boolean"` + + // Indicates if S3 Block Public Access is set to IgnorePublicAcls. + IgnorePublicAcls *bool `locationName:"ignorePublicAcls" type:"boolean"` + + // Indicates if S3 Block Public Access is set to RestrictPublicBuckets. + RestrictPublicBuckets *bool `locationName:"restrictPublicBuckets" type:"boolean"` } // String returns the string representation -func (s City) String() string { +func (s BlockPublicAccess) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s City) GoString() string { +func (s BlockPublicAccess) GoString() string { return s.String() } -// SetCityName sets the CityName field's value. -func (s *City) SetCityName(v string) *City { - s.CityName = &v +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *BlockPublicAccess) SetBlockPublicAcls(v bool) *BlockPublicAccess { + s.BlockPublicAcls = &v return s } -// Contains information about the condition. -type Condition struct { - _ struct{} `type:"structure"` +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *BlockPublicAccess) SetBlockPublicPolicy(v bool) *BlockPublicAccess { + s.BlockPublicPolicy = &v + return s +} - // Represents the equal condition to be applied to a single field when querying - // for findings. - // - // Deprecated: Eq has been deprecated - Eq []*string `locationName:"eq" deprecated:"true" type:"list"` +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *BlockPublicAccess) SetIgnorePublicAcls(v bool) *BlockPublicAccess { + s.IgnorePublicAcls = &v + return s +} - // Represents an equal condition to be applied to a single field when querying - // for findings. - Equals []*string `locationName:"equals" type:"list"` +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *BlockPublicAccess) SetRestrictPublicBuckets(v bool) *BlockPublicAccess { + s.RestrictPublicBuckets = &v + return s +} - // Represents a greater than condition to be applied to a single field when - // querying for findings. - GreaterThan *int64 `locationName:"greaterThan" type:"long"` +// Contains information about the bucket level permissions for the S3 bucket. +type BucketLevelPermissions struct { + _ struct{} `type:"structure"` - // Represents a greater than equal condition to be applied to a single field - // when querying for findings. - GreaterThanOrEqual *int64 `locationName:"greaterThanOrEqual" type:"long"` + // Contains information on how Access Control Policies are applied to the bucket. + AccessControlList *AccessControlList `locationName:"accessControlList" type:"structure"` - // Represents a greater than condition to be applied to a single field when - // querying for findings. - // - // Deprecated: Gt has been deprecated - Gt *int64 `locationName:"gt" deprecated:"true" type:"integer"` + // Contains information on which account level S3 Block Public Access settings + // are applied to the S3 bucket. + BlockPublicAccess *BlockPublicAccess `locationName:"blockPublicAccess" type:"structure"` - // Represents a greater than equal condition to be applied to a single field - // when querying for findings. - // - // Deprecated: Gte has been deprecated + // Contains information on the bucket policies for the S3 bucket. + BucketPolicy *BucketPolicy `locationName:"bucketPolicy" type:"structure"` +} + +// String returns the string representation +func (s BucketLevelPermissions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLevelPermissions) GoString() string { + return s.String() +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *BucketLevelPermissions) SetAccessControlList(v *AccessControlList) *BucketLevelPermissions { + s.AccessControlList = v + return s +} + +// SetBlockPublicAccess sets the BlockPublicAccess field's value. +func (s *BucketLevelPermissions) SetBlockPublicAccess(v *BlockPublicAccess) *BucketLevelPermissions { + s.BlockPublicAccess = v + return s +} + +// SetBucketPolicy sets the BucketPolicy field's value. +func (s *BucketLevelPermissions) SetBucketPolicy(v *BucketPolicy) *BucketLevelPermissions { + s.BucketPolicy = v + return s +} + +// Contains information on the current bucket policies for the S3 bucket. +type BucketPolicy struct { + _ struct{} `type:"structure"` + + // A value that indicates whether public read access for the bucket is enabled + // through a bucket policy. + AllowsPublicReadAccess *bool `locationName:"allowsPublicReadAccess" type:"boolean"` + + // A value that indicates whether public write access for the bucket is enabled + // through a bucket policy. + AllowsPublicWriteAccess *bool `locationName:"allowsPublicWriteAccess" type:"boolean"` +} + +// String returns the string representation +func (s BucketPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketPolicy) GoString() string { + return s.String() +} + +// SetAllowsPublicReadAccess sets the AllowsPublicReadAccess field's value. +func (s *BucketPolicy) SetAllowsPublicReadAccess(v bool) *BucketPolicy { + s.AllowsPublicReadAccess = &v + return s +} + +// SetAllowsPublicWriteAccess sets the AllowsPublicWriteAccess field's value. +func (s *BucketPolicy) SetAllowsPublicWriteAccess(v bool) *BucketPolicy { + s.AllowsPublicWriteAccess = &v + return s +} + +// Contains information about the city associated with the IP address. +type City struct { + _ struct{} `type:"structure"` + + // The city name of the remote IP address. + CityName *string `locationName:"cityName" type:"string"` +} + +// String returns the string representation +func (s City) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s City) GoString() string { + return s.String() +} + +// SetCityName sets the CityName field's value. +func (s *City) SetCityName(v string) *City { + s.CityName = &v + return s +} + +// Contains information on the status of CloudTrail as a data source for the +// detector. +type CloudTrailConfigurationResult struct { + _ struct{} `type:"structure"` + + // Describes whether CloudTrail is enabled as a data source for the detector. + // + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"DataSourceStatus"` +} + +// String returns the string representation +func (s CloudTrailConfigurationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudTrailConfigurationResult) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *CloudTrailConfigurationResult) SetStatus(v string) *CloudTrailConfigurationResult { + s.Status = &v + return s +} + +// Contains information about the condition. +type Condition struct { + _ struct{} `type:"structure"` + + // Represents the equal condition to be applied to a single field when querying + // for findings. + // + // Deprecated: Eq has been deprecated + Eq []*string `locationName:"eq" deprecated:"true" type:"list"` + + // Represents an equal condition to be applied to a single field when querying + // for findings. + Equals []*string `locationName:"equals" type:"list"` + + // Represents a greater than condition to be applied to a single field when + // querying for findings. + GreaterThan *int64 `locationName:"greaterThan" type:"long"` + + // Represents a greater than or equal condition to be applied to a single field + // when querying for findings. + GreaterThanOrEqual *int64 `locationName:"greaterThanOrEqual" type:"long"` + + // Represents a greater than condition to be applied to a single field when + // querying for findings. + // + // Deprecated: Gt has been deprecated + Gt *int64 `locationName:"gt" deprecated:"true" type:"integer"` + + // Represents a greater than or equal condition to be applied to a single field + // when querying for findings. + // + // Deprecated: Gte has been deprecated Gte *int64 `locationName:"gte" deprecated:"true" type:"integer"` // Represents a less than condition to be applied to a single field when querying // for findings. LessThan *int64 `locationName:"lessThan" type:"long"` - // Represents a less than equal condition to be applied to a single field when - // querying for findings. + // Represents a less than or equal condition to be applied to a single field + // when querying for findings. LessThanOrEqual *int64 `locationName:"lessThanOrEqual" type:"long"` // Represents a less than condition to be applied to a single field when querying @@ -5149,8 +6203,8 @@ type Condition struct { // Deprecated: Lt has been deprecated Lt *int64 `locationName:"lt" deprecated:"true" type:"integer"` - // Represents a less than equal condition to be applied to a single field when - // querying for findings. + // Represents a less than or equal condition to be applied to a single field + // when querying for findings. // // Deprecated: Lte has been deprecated Lte *int64 `locationName:"lte" deprecated:"true" type:"integer"` @@ -5161,7 +6215,7 @@ type Condition struct { // Deprecated: Neq has been deprecated Neq []*string `locationName:"neq" deprecated:"true" type:"list"` - // Represents an not equal condition to be applied to a single field when querying + // Represents a not equal condition to be applied to a single field when querying // for findings. NotEquals []*string `locationName:"notEquals" type:"list"` } @@ -5248,15 +6302,14 @@ func (s *Condition) SetNotEquals(v []*string) *Condition { return s } -// Contains information about the country in which the remote IP address is -// located. +// Contains information about the country where the remote IP address is located. type Country struct { _ struct{} `type:"structure"` - // Country code of the remote IP address. + // The country code of the remote IP address. CountryCode *string `locationName:"countryCode" type:"string"` - // Country name of the remote IP address. + // The country name of the remote IP address. CountryName *string `locationName:"countryName" type:"string"` } @@ -5288,12 +6341,15 @@ type CreateDetectorInput struct { // The idempotency token for the create request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // A boolean value that specifies whether the detector is to be enabled. + // An object that describes which data sources will be enabled for the detector. + DataSources *DataSourceConfigurations `locationName:"dataSources" type:"structure"` + + // A Boolean value that specifies whether the detector is to be enabled. // // Enable is a required field Enable *bool `locationName:"enable" type:"boolean" required:"true"` - // A enum value that specifies how frequently customer got Finding updates published. + // An enum value that specifies how frequently updated findings are exported. FindingPublishingFrequency *string `locationName:"findingPublishingFrequency" type:"string" enum:"FindingPublishingFrequency"` // The tags to be added to a new detector resource. @@ -5319,6 +6375,11 @@ func (s *CreateDetectorInput) Validate() error { if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.DataSources != nil { + if err := s.DataSources.Validate(); err != nil { + invalidParams.AddNested("DataSources", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5332,6 +6393,12 @@ func (s *CreateDetectorInput) SetClientToken(v string) *CreateDetectorInput { return s } +// SetDataSources sets the DataSources field's value. +func (s *CreateDetectorInput) SetDataSources(v *DataSourceConfigurations) *CreateDetectorInput { + s.DataSources = v + return s +} + // SetEnable sets the Enable field's value. func (s *CreateDetectorInput) SetEnable(v bool) *CreateDetectorInput { s.Enable = &v @@ -5386,14 +6453,119 @@ type CreateFilterInput struct { // The description of the filter. Description *string `locationName:"description" type:"string"` - // The unique ID of the detector of the GuardDuty account for which you want - // to create a filter. + // The unique ID of the detector of the GuardDuty account that you want to create + // a filter for. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` // Represents the criteria to be used in the filter for querying findings. // + // You can only use the following attributes to query findings: + // + // * accountId + // + // * region + // + // * confidence + // + // * id + // + // * resource.accessKeyDetails.accessKeyId + // + // * resource.accessKeyDetails.principalId + // + // * resource.accessKeyDetails.userName + // + // * resource.accessKeyDetails.userType + // + // * resource.instanceDetails.iamInstanceProfile.id + // + // * resource.instanceDetails.imageId + // + // * resource.instanceDetails.instanceId + // + // * resource.instanceDetails.outpostArn + // + // * resource.instanceDetails.networkInterfaces.ipv6Addresses + // + // * resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress + // + // * resource.instanceDetails.networkInterfaces.publicDnsName + // + // * resource.instanceDetails.networkInterfaces.publicIp + // + // * resource.instanceDetails.networkInterfaces.securityGroups.groupId + // + // * resource.instanceDetails.networkInterfaces.securityGroups.groupName + // + // * resource.instanceDetails.networkInterfaces.subnetId + // + // * resource.instanceDetails.networkInterfaces.vpcId + // + // * resource.instanceDetails.tags.key + // + // * resource.instanceDetails.tags.value + // + // * resource.resourceType + // + // * service.action.actionType + // + // * service.action.awsApiCallAction.api + // + // * service.action.awsApiCallAction.callerType + // + // * service.action.awsApiCallAction.remoteIpDetails.city.cityName + // + // * service.action.awsApiCallAction.remoteIpDetails.country.countryName + // + // * service.action.awsApiCallAction.remoteIpDetails.ipAddressV4 + // + // * service.action.awsApiCallAction.remoteIpDetails.organization.asn + // + // * service.action.awsApiCallAction.remoteIpDetails.organization.asnOrg + // + // * service.action.awsApiCallAction.serviceName + // + // * service.action.dnsRequestAction.domain + // + // * service.action.networkConnectionAction.blocked + // + // * service.action.networkConnectionAction.connectionDirection + // + // * service.action.networkConnectionAction.localPortDetails.port + // + // * service.action.networkConnectionAction.protocol + // + // * service.action.networkConnectionAction.localIpDetails.ipAddressV4 + // + // * service.action.networkConnectionAction.remoteIpDetails.city.cityName + // + // * service.action.networkConnectionAction.remoteIpDetails.country.countryName + // + // * service.action.networkConnectionAction.remoteIpDetails.ipAddressV4 + // + // * service.action.networkConnectionAction.remoteIpDetails.organization.asn + // + // * service.action.networkConnectionAction.remoteIpDetails.organization.asnOrg + // + // * service.action.networkConnectionAction.remotePortDetails.port + // + // * service.additionalInfo.threatListName + // + // * service.archived When this attribute is set to TRUE, only archived findings + // are listed. When it's set to FALSE, only unarchived findings are listed. + // When this attribute is not set, all existing findings are listed. + // + // * service.resourceRole + // + // * severity + // + // * type + // + // * updatedAt Type: ISO 8601 string format: YYYY-MM-DDTHH:MM:SS.SSSZ or + // YYYY-MM-DDTHH:MM:SSZ depending on whether the value contains milliseconds. + // // FindingCriteria is a required field FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure" required:"true"` @@ -5530,7 +6702,7 @@ func (s *CreateFilterOutput) SetName(v string) *CreateFilterOutput { type CreateIPSetInput struct { _ struct{} `type:"structure"` - // A boolean value that indicates whether GuardDuty is to start using the uploaded + // A Boolean value that indicates whether GuardDuty is to start using the uploaded // IPSet. // // Activate is a required field @@ -5539,8 +6711,8 @@ type CreateIPSetInput struct { // The idempotency token for the create request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The unique ID of the detector of the GuardDuty account for which you want - // to create an IPSet. + // The unique ID of the detector of the GuardDuty account that you want to create + // an IPSet for. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -5550,14 +6722,15 @@ type CreateIPSetInput struct { // Format is a required field Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"IpSetFormat"` - // The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key) + // The URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key. // // Location is a required field Location *string `locationName:"location" min:"1" type:"string" required:"true"` - // The user friendly name to identify the IPSet. This name is displayed in all - // findings that are triggered by activity that involves IP addresses included - // in this IPSet. + // The user-friendly name to identify the IPSet. + // + // Allowed characters are alphanumerics, spaces, hyphens (-), and underscores + // (_). // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` @@ -5692,8 +6865,8 @@ type CreateMembersInput struct { // AccountDetails is a required field AccountDetails []*AccountDetail `locationName:"accountDetails" min:"1" type:"list" required:"true"` - // The unique ID of the detector of the GuardDuty account with which you want - // to associate member accounts. + // The unique ID of the detector of the GuardDuty account that you want to associate + // member accounts with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -5756,8 +6929,8 @@ func (s *CreateMembersInput) SetDetectorId(v string) *CreateMembersInput { type CreateMembersOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that include the accountIds of the unprocessed accounts + // and a result string that explains why each was unprocessed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -5785,14 +6958,14 @@ type CreatePublishingDestinationInput struct { // The idempotency token for the request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // Properties of the publishing destination, including the ARNs for the destination - // and the KMS key used for encryption. + // The properties of the publishing destination, including the ARNs for the + // destination and the KMS key used for encryption. // // DestinationProperties is a required field DestinationProperties *DestinationProperties `locationName:"destinationProperties" type:"structure" required:"true"` - // The type of resource for the publishing destination. Currently only S3 is - // supported. + // The type of resource for the publishing destination. Currently only Amazon + // S3 buckets are supported. // // DestinationType is a required field DestinationType *string `locationName:"destinationType" min:"1" type:"string" required:"true" enum:"DestinationType"` @@ -5865,7 +7038,7 @@ func (s *CreatePublishingDestinationInput) SetDetectorId(v string) *CreatePublis type CreatePublishingDestinationOutput struct { _ struct{} `type:"structure"` - // The ID of the publishing destination created. + // The ID of the publishing destination that is created. // // DestinationId is a required field DestinationId *string `locationName:"destinationId" type:"string" required:"true"` @@ -5895,7 +7068,7 @@ type CreateSampleFindingsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // Types of sample findings to generate. + // The types of sample findings to generate. FindingTypes []*string `locationName:"findingTypes" type:"list"` } @@ -5954,7 +7127,7 @@ func (s CreateSampleFindingsOutput) GoString() string { type CreateThreatIntelSetInput struct { _ struct{} `type:"structure"` - // A boolean value that indicates whether GuardDuty is to start using the uploaded + // A Boolean value that indicates whether GuardDuty is to start using the uploaded // ThreatIntelSet. // // Activate is a required field @@ -5963,8 +7136,8 @@ type CreateThreatIntelSetInput struct { // The idempotency token for the create request. ClientToken *string `locationName:"clientToken" type:"string" idempotencyToken:"true"` - // The unique ID of the detector of the GuardDuty account for which you want - // to create a threatIntelSet. + // The unique ID of the detector of the GuardDuty account that you want to create + // a threatIntelSet for. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -5974,18 +7147,18 @@ type CreateThreatIntelSetInput struct { // Format is a required field Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"ThreatIntelSetFormat"` - // The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key). + // The URI of the file that contains the ThreatIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key. // // Location is a required field Location *string `locationName:"location" min:"1" type:"string" required:"true"` - // A user-friendly ThreatIntelSet name that is displayed in all finding generated + // A user-friendly ThreatIntelSet name displayed in all findings that are generated // by activity that involves IP addresses included in this ThreatIntelSet. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The tags to be added to a new Threat List resource. + // The tags to be added to a new threat list resource. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -6106,65 +7279,192 @@ func (s *CreateThreatIntelSetOutput) SetThreatIntelSetId(v string) *CreateThreat return s } -type DeclineInvitationsInput struct { +// Contains information on the status of DNS logs as a data source. +type DNSLogsConfigurationResult struct { _ struct{} `type:"structure"` - // A list of account IDs of the AWS accounts that sent invitations to the current - // member account that you want to decline invitations from. + // Denotes whether DNS logs is enabled as a data source. // - // AccountIds is a required field - AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"DataSourceStatus"` } // String returns the string representation -func (s DeclineInvitationsInput) String() string { +func (s DNSLogsConfigurationResult) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeclineInvitationsInput) GoString() string { +func (s DNSLogsConfigurationResult) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeclineInvitationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeclineInvitationsInput"} - if s.AccountIds == nil { - invalidParams.Add(request.NewErrParamRequired("AccountIds")) - } - if s.AccountIds != nil && len(s.AccountIds) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountIds sets the AccountIds field's value. -func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitationsInput { - s.AccountIds = v +// SetStatus sets the Status field's value. +func (s *DNSLogsConfigurationResult) SetStatus(v string) *DNSLogsConfigurationResult { + s.Status = &v return s } -type DeclineInvitationsOutput struct { +// Contains information about which data sources are enabled. +type DataSourceConfigurations struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. - // - // UnprocessedAccounts is a required field - UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` + // Describes whether S3 data event logs are enabled as a data source. + S3Logs *S3LogsConfiguration `locationName:"s3Logs" type:"structure"` } // String returns the string representation -func (s DeclineInvitationsOutput) String() string { +func (s DataSourceConfigurations) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeclineInvitationsOutput) GoString() string { +func (s DataSourceConfigurations) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSourceConfigurations) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSourceConfigurations"} + if s.S3Logs != nil { + if err := s.S3Logs.Validate(); err != nil { + invalidParams.AddNested("S3Logs", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Logs sets the S3Logs field's value. +func (s *DataSourceConfigurations) SetS3Logs(v *S3LogsConfiguration) *DataSourceConfigurations { + s.S3Logs = v + return s +} + +// Contains information on the status of data sources for the detector. +type DataSourceConfigurationsResult struct { + _ struct{} `type:"structure"` + + // An object that contains information on the status of CloudTrail as a data + // source. + // + // CloudTrail is a required field + CloudTrail *CloudTrailConfigurationResult `locationName:"cloudTrail" type:"structure" required:"true"` + + // An object that contains information on the status of DNS logs as a data source. + // + // DNSLogs is a required field + DNSLogs *DNSLogsConfigurationResult `locationName:"dnsLogs" type:"structure" required:"true"` + + // An object that contains information on the status of VPC flow logs as a data + // source. + // + // FlowLogs is a required field + FlowLogs *FlowLogsConfigurationResult `locationName:"flowLogs" type:"structure" required:"true"` + + // An object that contains information on the status of S3 Data event logs as + // a data source. + // + // S3Logs is a required field + S3Logs *S3LogsConfigurationResult `locationName:"s3Logs" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DataSourceConfigurationsResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSourceConfigurationsResult) GoString() string { + return s.String() +} + +// SetCloudTrail sets the CloudTrail field's value. +func (s *DataSourceConfigurationsResult) SetCloudTrail(v *CloudTrailConfigurationResult) *DataSourceConfigurationsResult { + s.CloudTrail = v + return s +} + +// SetDNSLogs sets the DNSLogs field's value. +func (s *DataSourceConfigurationsResult) SetDNSLogs(v *DNSLogsConfigurationResult) *DataSourceConfigurationsResult { + s.DNSLogs = v + return s +} + +// SetFlowLogs sets the FlowLogs field's value. +func (s *DataSourceConfigurationsResult) SetFlowLogs(v *FlowLogsConfigurationResult) *DataSourceConfigurationsResult { + s.FlowLogs = v + return s +} + +// SetS3Logs sets the S3Logs field's value. +func (s *DataSourceConfigurationsResult) SetS3Logs(v *S3LogsConfigurationResult) *DataSourceConfigurationsResult { + s.S3Logs = v + return s +} + +type DeclineInvitationsInput struct { + _ struct{} `type:"structure"` + + // A list of account IDs of the AWS accounts that sent invitations to the current + // member account that you want to decline invitations from. + // + // AccountIds is a required field + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeclineInvitationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeclineInvitationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeclineInvitationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeclineInvitationsInput"} + if s.AccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AccountIds")) + } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIds sets the AccountIds field's value. +func (s *DeclineInvitationsInput) SetAccountIds(v []*string) *DeclineInvitationsInput { + s.AccountIds = v + return s +} + +type DeclineInvitationsOutput struct { + _ struct{} `type:"structure"` + + // A list of objects that contain the unprocessed account and a result string + // that explains why it was unprocessed. + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` +} + +// String returns the string representation +func (s DeclineInvitationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeclineInvitationsOutput) GoString() string { return s.String() } @@ -6174,6 +7474,42 @@ func (s *DeclineInvitationsOutput) SetUnprocessedAccounts(v []*UnprocessedAccoun return s } +// Contains information on the server side encryption method used in the S3 +// bucket. See S3 Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// for more information. +type DefaultServerSideEncryption struct { + _ struct{} `type:"structure"` + + // The type of encryption used for objects within the S3 bucket. + EncryptionType *string `locationName:"encryptionType" type:"string"` + + // The Amazon Resource Name (ARN) of the KMS encryption key. Only available + // if the bucket EncryptionType is aws:kms. + KmsMasterKeyArn *string `locationName:"kmsMasterKeyArn" type:"string"` +} + +// String returns the string representation +func (s DefaultServerSideEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultServerSideEncryption) GoString() string { + return s.String() +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *DefaultServerSideEncryption) SetEncryptionType(v string) *DefaultServerSideEncryption { + s.EncryptionType = &v + return s +} + +// SetKmsMasterKeyArn sets the KmsMasterKeyArn field's value. +func (s *DefaultServerSideEncryption) SetKmsMasterKeyArn(v string) *DefaultServerSideEncryption { + s.KmsMasterKeyArn = &v + return s +} + type DeleteDetectorInput struct { _ struct{} `type:"structure"` @@ -6232,12 +7568,12 @@ func (s DeleteDetectorOutput) GoString() string { type DeleteFilterInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the filter is associated with. + // The unique ID of the detector that the filter is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The name of the filter you want to delete. + // The name of the filter that you want to delete. // // FilterName is a required field FilterName *string `location:"uri" locationName:"filterName" type:"string" required:"true"` @@ -6418,8 +7754,8 @@ func (s *DeleteInvitationsInput) SetAccountIds(v []*string) *DeleteInvitationsIn type DeleteInvitationsOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that contain the unprocessed account and a result string + // that explains why it was unprocessed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -6601,12 +7937,12 @@ func (s DeletePublishingDestinationOutput) GoString() string { type DeleteThreatIntelSetInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the threatIntelSet is associated with. + // The unique ID of the detector that the threatIntelSet is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The unique ID of the threatIntelSet you want to delete. + // The unique ID of the threatIntelSet that you want to delete. // // ThreatIntelSetId is a required field ThreatIntelSetId *string `location:"uri" locationName:"threatIntelSetId" type:"string" required:"true"` @@ -6670,6 +8006,96 @@ func (s DeleteThreatIntelSetOutput) GoString() string { return s.String() } +type DescribeOrganizationConfigurationInput struct { + _ struct{} `type:"structure"` + + // The ID of the detector to retrieve information about the delegated administrator + // from. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeOrganizationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrganizationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOrganizationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOrganizationConfigurationInput"} + if s.DetectorId == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorId sets the DetectorId field's value. +func (s *DescribeOrganizationConfigurationInput) SetDetectorId(v string) *DescribeOrganizationConfigurationInput { + s.DetectorId = &v + return s +} + +type DescribeOrganizationConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether GuardDuty is automatically enabled for accounts added to + // the organization. + // + // AutoEnable is a required field + AutoEnable *bool `locationName:"autoEnable" type:"boolean" required:"true"` + + // An object that describes which data sources are enabled automatically for + // member accounts. + DataSources *OrganizationDataSourceConfigurationsResult `locationName:"dataSources" type:"structure"` + + // Indicates whether the maximum number of allowed member accounts are already + // associated with the delegated administrator master account. + // + // MemberAccountLimitReached is a required field + MemberAccountLimitReached *bool `locationName:"memberAccountLimitReached" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s DescribeOrganizationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOrganizationConfigurationOutput) GoString() string { + return s.String() +} + +// SetAutoEnable sets the AutoEnable field's value. +func (s *DescribeOrganizationConfigurationOutput) SetAutoEnable(v bool) *DescribeOrganizationConfigurationOutput { + s.AutoEnable = &v + return s +} + +// SetDataSources sets the DataSources field's value. +func (s *DescribeOrganizationConfigurationOutput) SetDataSources(v *OrganizationDataSourceConfigurationsResult) *DescribeOrganizationConfigurationOutput { + s.DataSources = v + return s +} + +// SetMemberAccountLimitReached sets the MemberAccountLimitReached field's value. +func (s *DescribeOrganizationConfigurationOutput) SetMemberAccountLimitReached(v bool) *DescribeOrganizationConfigurationOutput { + s.MemberAccountLimitReached = &v + return s +} + type DescribePublishingDestinationInput struct { _ struct{} `type:"structure"` @@ -6743,7 +8169,8 @@ type DescribePublishingDestinationOutput struct { // DestinationProperties is a required field DestinationProperties *DestinationProperties `locationName:"destinationProperties" type:"structure" required:"true"` - // The type of the publishing destination. Currently, only S3 is supported. + // The type of publishing destination. Currently, only Amazon S3 buckets are + // supported. // // DestinationType is a required field DestinationType *string `locationName:"destinationType" min:"1" type:"string" required:"true" enum:"DestinationType"` @@ -6800,8 +8227,8 @@ func (s *DescribePublishingDestinationOutput) SetStatus(v string) *DescribePubli return s } -// Contains information about a publishing destination, including the ID, type, -// and status. +// Contains information about the publishing destination, including the ID, +// type, and status. type Destination struct { _ struct{} `type:"structure"` @@ -6811,7 +8238,7 @@ type Destination struct { DestinationId *string `locationName:"destinationId" type:"string" required:"true"` // The type of resource used for the publishing destination. Currently, only - // S3 is supported. + // Amazon S3 buckets are supported. // // DestinationType is a required field DestinationType *string `locationName:"destinationType" min:"1" type:"string" required:"true" enum:"DestinationType"` @@ -6850,8 +8277,8 @@ func (s *Destination) SetStatus(v string) *Destination { return s } -// Contains the ARN of the resource to publish to, such as an S3 bucket, and -// the ARN of the KMS key to use to encrypt published findings. +// Contains the Amazon Resource Name (ARN) of the resource to publish to, such +// as an S3 bucket, and the ARN of the KMS key to use to encrypt published findings. type DestinationProperties struct { _ struct{} `type:"structure"` @@ -6884,6 +8311,59 @@ func (s *DestinationProperties) SetKmsKeyArn(v string) *DestinationProperties { return s } +type DisableOrganizationAdminAccountInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID for the organizations account to be disabled as a GuardDuty + // delegated administrator. + // + // AdminAccountId is a required field + AdminAccountId *string `locationName:"adminAccountId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisableOrganizationAdminAccountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableOrganizationAdminAccountInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisableOrganizationAdminAccountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableOrganizationAdminAccountInput"} + if s.AdminAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AdminAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdminAccountId sets the AdminAccountId field's value. +func (s *DisableOrganizationAdminAccountInput) SetAdminAccountId(v string) *DisableOrganizationAdminAccountInput { + s.AdminAccountId = &v + return s +} + +type DisableOrganizationAdminAccountOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableOrganizationAdminAccountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableOrganizationAdminAccountOutput) GoString() string { + return s.String() +} + type DisassociateFromMasterAccountInput struct { _ struct{} `type:"structure"` @@ -6943,13 +8423,13 @@ type DisassociateMembersInput struct { _ struct{} `type:"structure"` // A list of account IDs of the GuardDuty member accounts that you want to disassociate - // from master. + // from the master account. // // AccountIds is a required field AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` // The unique ID of the detector of the GuardDuty account whose members you - // want to disassociate from master. + // want to disassociate from the master account. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -7002,8 +8482,8 @@ func (s *DisassociateMembersInput) SetDetectorId(v string) *DisassociateMembersI type DisassociateMembersOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that contain the unprocessed account and a result string + // that explains why it was unprocessed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -7029,7 +8509,7 @@ func (s *DisassociateMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccou type DnsRequestAction struct { _ struct{} `type:"structure"` - // Domain information for the API request. + // The domain information for the API request. Domain *string `locationName:"domain" type:"string"` } @@ -7053,7 +8533,7 @@ func (s *DnsRequestAction) SetDomain(v string) *DnsRequestAction { type DomainDetails struct { _ struct{} `type:"structure"` - // Domain information for the AWS API call. + // The domain information for the AWS API call. Domain *string `locationName:"domain" type:"string"` } @@ -7073,6 +8553,59 @@ func (s *DomainDetails) SetDomain(v string) *DomainDetails { return s } +type EnableOrganizationAdminAccountInput struct { + _ struct{} `type:"structure"` + + // The AWS Account ID for the organization account to be enabled as a GuardDuty + // delegated administrator. + // + // AdminAccountId is a required field + AdminAccountId *string `locationName:"adminAccountId" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableOrganizationAdminAccountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableOrganizationAdminAccountInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableOrganizationAdminAccountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableOrganizationAdminAccountInput"} + if s.AdminAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AdminAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdminAccountId sets the AdminAccountId field's value. +func (s *EnableOrganizationAdminAccountInput) SetAdminAccountId(v string) *EnableOrganizationAdminAccountInput { + s.AdminAccountId = &v + return s +} + +type EnableOrganizationAdminAccountOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableOrganizationAdminAccountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableOrganizationAdminAccountOutput) GoString() string { + return s.String() +} + // Contains information about the reason that the finding was generated. type Evidence struct { _ struct{} `type:"structure"` @@ -7107,7 +8640,7 @@ type Finding struct { // AccountId is a required field AccountId *string `locationName:"accountId" type:"string" required:"true"` - // The ARN for the finding. + // The ARN of the finding. // // Arn is a required field Arn *string `locationName:"arn" type:"string" required:"true"` @@ -7115,7 +8648,7 @@ type Finding struct { // The confidence score for the finding. Confidence *float64 `locationName:"confidence" type:"double"` - // The time and date at which the finding was created. + // The time and date when the finding was created. // // CreatedAt is a required field CreatedAt *string `locationName:"createdAt" type:"string" required:"true"` @@ -7131,7 +8664,7 @@ type Finding struct { // The partition associated with the finding. Partition *string `locationName:"partition" type:"string"` - // The Region in which the finding was generated. + // The Region where the finding was generated. // // Region is a required field Region *string `locationName:"region" type:"string" required:"true"` @@ -7155,15 +8688,15 @@ type Finding struct { // Severity is a required field Severity *float64 `locationName:"severity" type:"double" required:"true"` - // The title for the finding. + // The title of the finding. Title *string `locationName:"title" type:"string"` - // The type of the finding. + // The type of finding. // // Type is a required field Type *string `locationName:"type" min:"1" type:"string" required:"true"` - // The time and date at which the finding was laste updated. + // The time and date when the finding was last updated. // // UpdatedAt is a required field UpdatedAt *string `locationName:"updatedAt" type:"string" required:"true"` @@ -7298,7 +8831,7 @@ func (s *FindingCriteria) SetCriterion(v map[string]*Condition) *FindingCriteria type FindingStatistics struct { _ struct{} `type:"structure"` - // Represents a map of severity to count statistic for a set of findings + // Represents a map of severity to count statistics for a set of findings. CountBySeverity map[string]*int64 `locationName:"countBySeverity" type:"map"` } @@ -7318,14 +8851,40 @@ func (s *FindingStatistics) SetCountBySeverity(v map[string]*int64) *FindingStat return s } -// Contains information about the location of the remote IP address. +// Contains information on the status of VPC flow logs as a data source. +type FlowLogsConfigurationResult struct { + _ struct{} `type:"structure"` + + // Denotes whether VPC flow logs is enabled as a data source. + // + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"DataSourceStatus"` +} + +// String returns the string representation +func (s FlowLogsConfigurationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FlowLogsConfigurationResult) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *FlowLogsConfigurationResult) SetStatus(v string) *FlowLogsConfigurationResult { + s.Status = &v + return s +} + +// Contains information about the location of the remote IP address. type GeoLocation struct { _ struct{} `type:"structure"` - // Latitude information of remote IP address. + // The latitude information of the remote IP address. Lat *float64 `locationName:"lat" type:"double"` - // Longitude information of remote IP address. + // The longitude information of the remote IP address. Lon *float64 `locationName:"lon" type:"double"` } @@ -7395,10 +8954,13 @@ func (s *GetDetectorInput) SetDetectorId(v string) *GetDetectorInput { type GetDetectorOutput struct { _ struct{} `type:"structure"` - // Detector creation timestamp. + // The timestamp of when the detector was created. CreatedAt *string `locationName:"createdAt" type:"string"` - // Finding publishing frequency. + // An object that describes which data sources are enabled for the detector. + DataSources *DataSourceConfigurationsResult `locationName:"dataSources" type:"structure"` + + // The publishing frequency of the finding. FindingPublishingFrequency *string `locationName:"findingPublishingFrequency" type:"string" enum:"FindingPublishingFrequency"` // The GuardDuty service role. @@ -7414,7 +8976,7 @@ type GetDetectorOutput struct { // The tags of the detector resource. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` - // Detector last update timestamp. + // The last-updated timestamp for the detector. UpdatedAt *string `locationName:"updatedAt" type:"string"` } @@ -7434,6 +8996,12 @@ func (s *GetDetectorOutput) SetCreatedAt(v string) *GetDetectorOutput { return s } +// SetDataSources sets the DataSources field's value. +func (s *GetDetectorOutput) SetDataSources(v *DataSourceConfigurationsResult) *GetDetectorOutput { + s.DataSources = v + return s +} + // SetFindingPublishingFrequency sets the FindingPublishingFrequency field's value. func (s *GetDetectorOutput) SetFindingPublishingFrequency(v string) *GetDetectorOutput { s.FindingPublishingFrequency = &v @@ -7467,7 +9035,7 @@ func (s *GetDetectorOutput) SetUpdatedAt(v string) *GetDetectorOutput { type GetFilterInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the filter is associated with. + // The unique ID of the detector that the filter is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -7607,7 +9175,7 @@ type GetFindingsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // IDs of the findings that you want to retrieve. + // The IDs of the findings that you want to retrieve. // // FindingIds is a required field FindingIds []*string `locationName:"findingIds" type:"list" required:"true"` @@ -7697,10 +9265,10 @@ type GetFindingsStatisticsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // Represents the criteria used for querying findings. + // Represents the criteria that is used for querying findings. FindingCriteria *FindingCriteria `locationName:"findingCriteria" type:"structure"` - // Types of finding statistics to retrieve. + // The types of finding statistics to retrieve. // // FindingStatisticTypes is a required field FindingStatisticTypes []*string `locationName:"findingStatisticTypes" type:"list" required:"true"` @@ -7756,7 +9324,7 @@ func (s *GetFindingsStatisticsInput) SetFindingStatisticTypes(v []*string) *GetF type GetFindingsStatisticsOutput struct { _ struct{} `type:"structure"` - // Finding statistics object. + // The finding statistics object. // // FindingStatistics is a required field FindingStatistics *FindingStatistics `locationName:"findingStatistics" type:"structure" required:"true"` @@ -7781,7 +9349,7 @@ func (s *GetFindingsStatisticsOutput) SetFindingStatistics(v *FindingStatistics) type GetIPSetInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the ipSet is associated with. + // The unique ID of the detector that the IPSet is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -7844,22 +9412,22 @@ type GetIPSetOutput struct { // Format is a required field Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"IpSetFormat"` - // The URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key) + // The URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key. // // Location is a required field Location *string `locationName:"location" min:"1" type:"string" required:"true"` - // The user friendly name for the IPSet. + // The user-friendly name for the IPSet. // // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The status of ipSet file uploaded. + // The status of IPSet file that was uploaded. // // Status is a required field Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"IpSetStatus"` - // The tags of the IP set resource. + // The tags of the IPSet resource. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -7984,7 +9552,7 @@ func (s *GetMasterAccountInput) SetDetectorId(v string) *GetMasterAccountInput { type GetMasterAccountOutput struct { _ struct{} `type:"structure"` - // Master account details. + // The master account details. // // Master is a required field Master *Master `locationName:"master" type:"structure" required:"true"` @@ -8006,6 +9574,101 @@ func (s *GetMasterAccountOutput) SetMaster(v *Master) *GetMasterAccountOutput { return s } +type GetMemberDetectorsInput struct { + _ struct{} `type:"structure"` + + // The account ID of the member account. + // + // AccountIds is a required field + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + + // The detector ID for the master account. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMemberDetectorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMemberDetectorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMemberDetectorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMemberDetectorsInput"} + if s.AccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AccountIds")) + } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } + if s.DetectorId == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIds sets the AccountIds field's value. +func (s *GetMemberDetectorsInput) SetAccountIds(v []*string) *GetMemberDetectorsInput { + s.AccountIds = v + return s +} + +// SetDetectorId sets the DetectorId field's value. +func (s *GetMemberDetectorsInput) SetDetectorId(v string) *GetMemberDetectorsInput { + s.DetectorId = &v + return s +} + +type GetMemberDetectorsOutput struct { + _ struct{} `type:"structure"` + + // An object that describes which data sources are enabled for a member account. + // + // MemberDataSourceConfigurations is a required field + MemberDataSourceConfigurations []*MemberDataSourceConfiguration `locationName:"members" min:"1" type:"list" required:"true"` + + // A list of member account IDs that were unable to be processed along with + // an explanation for why they were not processed. + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetMemberDetectorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMemberDetectorsOutput) GoString() string { + return s.String() +} + +// SetMemberDataSourceConfigurations sets the MemberDataSourceConfigurations field's value. +func (s *GetMemberDetectorsOutput) SetMemberDataSourceConfigurations(v []*MemberDataSourceConfiguration) *GetMemberDetectorsOutput { + s.MemberDataSourceConfigurations = v + return s +} + +// SetUnprocessedAccounts sets the UnprocessedAccounts field's value. +func (s *GetMemberDetectorsOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *GetMemberDetectorsOutput { + s.UnprocessedAccounts = v + return s +} + type GetMembersInput struct { _ struct{} `type:"structure"` @@ -8073,8 +9736,8 @@ type GetMembersOutput struct { // Members is a required field Members []*Member `locationName:"members" type:"list" required:"true"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that contain the unprocessed account and a result string + // that explains why it was unprocessed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -8105,12 +9768,12 @@ func (s *GetMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *GetM type GetThreatIntelSetInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the threatIntelSet is associated with. + // The unique ID of the detector that the threatIntelSet is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The unique ID of the threatIntelSet you want to get. + // The unique ID of the threatIntelSet that you want to get. // // ThreatIntelSetId is a required field ThreatIntelSetId *string `location:"uri" locationName:"threatIntelSetId" type:"string" required:"true"` @@ -8168,12 +9831,12 @@ type GetThreatIntelSetOutput struct { // Format is a required field Format *string `locationName:"format" min:"1" type:"string" required:"true" enum:"ThreatIntelSetFormat"` - // The URI of the file that contains the ThreatIntelSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key). + // The URI of the file that contains the ThreatIntelSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key. // // Location is a required field Location *string `locationName:"location" min:"1" type:"string" required:"true"` - // A user-friendly ThreatIntelSet name that is displayed in all finding generated + // A user-friendly ThreatIntelSet name displayed in all findings that are generated // by activity that involves IP addresses included in this ThreatIntelSet. // // Name is a required field @@ -8184,7 +9847,7 @@ type GetThreatIntelSetOutput struct { // Status is a required field Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"ThreatIntelSetStatus"` - // The tags of the Threat List resource. + // The tags of the threat list resource. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -8228,14 +9891,157 @@ func (s *GetThreatIntelSetOutput) SetTags(v map[string]*string) *GetThreatIntelS return s } +type GetUsageStatisticsInput struct { + _ struct{} `type:"structure"` + + // The ID of the detector that specifies the GuardDuty service whose usage statistics + // you want to retrieve. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + + // The maximum number of results to return in the response. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token to use for paginating results that are returned in the response. + // Set the value of this parameter to null for the first request to a list action. + // For subsequent calls, use the NextToken value returned from the previous + // request to continue listing results after the first page. + NextToken *string `locationName:"nextToken" type:"string"` + + // The currency unit you would like to view your usage statistics in. Current + // valid values are USD. + Unit *string `locationName:"unit" type:"string"` + + // Represents the criteria used for querying usage. + // + // UsageCriteria is a required field + UsageCriteria *UsageCriteria `locationName:"usageCriteria" type:"structure" required:"true"` + + // The type of usage statistics to retrieve. + // + // UsageStatisticType is a required field + UsageStatisticType *string `locationName:"usageStatisticsType" type:"string" required:"true" enum:"UsageStatisticType"` +} + +// String returns the string representation +func (s GetUsageStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUsageStatisticsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUsageStatisticsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUsageStatisticsInput"} + if s.DetectorId == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.UsageCriteria == nil { + invalidParams.Add(request.NewErrParamRequired("UsageCriteria")) + } + if s.UsageStatisticType == nil { + invalidParams.Add(request.NewErrParamRequired("UsageStatisticType")) + } + if s.UsageCriteria != nil { + if err := s.UsageCriteria.Validate(); err != nil { + invalidParams.AddNested("UsageCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorId sets the DetectorId field's value. +func (s *GetUsageStatisticsInput) SetDetectorId(v string) *GetUsageStatisticsInput { + s.DetectorId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *GetUsageStatisticsInput) SetMaxResults(v int64) *GetUsageStatisticsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *GetUsageStatisticsInput) SetNextToken(v string) *GetUsageStatisticsInput { + s.NextToken = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *GetUsageStatisticsInput) SetUnit(v string) *GetUsageStatisticsInput { + s.Unit = &v + return s +} + +// SetUsageCriteria sets the UsageCriteria field's value. +func (s *GetUsageStatisticsInput) SetUsageCriteria(v *UsageCriteria) *GetUsageStatisticsInput { + s.UsageCriteria = v + return s +} + +// SetUsageStatisticType sets the UsageStatisticType field's value. +func (s *GetUsageStatisticsInput) SetUsageStatisticType(v string) *GetUsageStatisticsInput { + s.UsageStatisticType = &v + return s +} + +type GetUsageStatisticsOutput struct { + _ struct{} `type:"structure"` + + // The pagination parameter to be used on the next list operation to retrieve + // more items. + NextToken *string `locationName:"nextToken" type:"string"` + + // The usage statistics object. If a UsageStatisticType was provided, the objects + // representing other types will be null. + UsageStatistics *UsageStatistics `locationName:"usageStatistics" type:"structure"` +} + +// String returns the string representation +func (s GetUsageStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUsageStatisticsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *GetUsageStatisticsOutput) SetNextToken(v string) *GetUsageStatisticsOutput { + s.NextToken = &v + return s +} + +// SetUsageStatistics sets the UsageStatistics field's value. +func (s *GetUsageStatisticsOutput) SetUsageStatistics(v *UsageStatistics) *GetUsageStatisticsOutput { + s.UsageStatistics = v + return s +} + // Contains information about the EC2 instance profile. type IamInstanceProfile struct { _ struct{} `type:"structure"` - // AWS EC2 instance profile ARN. + // The profile ARN of the EC2 instance. Arn *string `locationName:"arn" type:"string"` - // AWS EC2 instance profile ID. + // The profile ID of the EC2 instance. Id *string `locationName:"id" type:"string"` } @@ -8265,7 +10071,7 @@ func (s *IamInstanceProfile) SetId(v string) *IamInstanceProfile { type InstanceDetails struct { _ struct{} `type:"structure"` - // The availability zone of the EC2 instance. + // The Availability Zone of the EC2 instance. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The profile information of the EC2 instance. @@ -8289,7 +10095,7 @@ type InstanceDetails struct { // The launch time of the EC2 instance. LaunchTime *string `locationName:"launchTime" type:"string"` - // The network interface information of the EC2 instance. + // The elastic network interface information of the EC2 instance. NetworkInterfaces []*NetworkInterface `locationName:"networkInterfaces" type:"list"` // The Amazon Resource Name (ARN) of the AWS Outpost. Only applicable to AWS @@ -8394,10 +10200,10 @@ func (s *InstanceDetails) SetTags(v []*Tag) *InstanceDetails { return s } -// Internal server error exception object. +// An internal server error exception object. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The error message. Message_ *string `locationName:"message" type:"string"` @@ -8418,17 +10224,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8436,36 +10242,36 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about the invitation to become a member account. type Invitation struct { _ struct{} `type:"structure"` - // The ID of the account from which the invitations was sent. + // The ID of the account that the invitation was sent from. AccountId *string `locationName:"accountId" min:"12" type:"string"` // The ID of the invitation. This value is used to validate the inviter account // to the member account. InvitationId *string `locationName:"invitationId" type:"string"` - // Timestamp at which the invitation was sent. + // The timestamp when the invitation was sent. InvitedAt *string `locationName:"invitedAt" type:"string"` // The status of the relationship between the inviter and invitee accounts. @@ -8515,17 +10321,17 @@ type InviteMembersInput struct { // AccountIds is a required field AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` - // The unique ID of the detector of the GuardDuty account with which you want - // to invite members. + // The unique ID of the detector of the GuardDuty account that you want to invite + // members with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // A boolean value that specifies whether you want to disable email notification - // to the accounts that you’re inviting to GuardDuty as members. + // A Boolean value that specifies whether you want to disable email notification + // to the accounts that you are inviting to GuardDuty as members. DisableEmailNotification *bool `locationName:"disableEmailNotification" type:"boolean"` - // The invitation message that you want to send to the accounts that you’re + // The invitation message that you want to send to the accounts that you're // inviting to GuardDuty as members. Message *string `locationName:"message" type:"string"` } @@ -8589,8 +10395,8 @@ func (s *InviteMembersInput) SetMessage(v string) *InviteMembersInput { type InviteMembersOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that contain the unprocessed account and a result string + // that explains why it was unprocessed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -8615,14 +10421,14 @@ func (s *InviteMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *I type ListDetectorsInput struct { _ struct{} `type:"structure"` - // You can use this parameter to indicate the maximum number of items you want - // in the response. The default value is 50. The maximum value is 50. + // You can use this parameter to indicate the maximum number of items that you + // want in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // You can use this parameter when paginating results. Set the value of this // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // to the action, fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -8664,13 +10470,13 @@ func (s *ListDetectorsInput) SetNextToken(v string) *ListDetectorsInput { type ListDetectorsOutput struct { _ struct{} `type:"structure"` - // A list of detector Ids. + // A list of detector IDs. // // DetectorIds is a required field DetectorIds []*string `locationName:"detectorIds" type:"list" required:"true"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -8699,19 +10505,19 @@ func (s *ListDetectorsOutput) SetNextToken(v string) *ListDetectorsOutput { type ListFiltersInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the filter is associated with. + // The unique ID of the detector that the filter is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // You can use this parameter to indicate the maximum number of items you want - // in the response. The default value is 50. The maximum value is 50. + // You can use this parameter to indicate the maximum number of items that you + // want in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // You can use this parameter when paginating results. Set the value of this // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // to the action, fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -8765,13 +10571,13 @@ func (s *ListFiltersInput) SetNextToken(v string) *ListFiltersInput { type ListFiltersOutput struct { _ struct{} `type:"structure"` - // A list of filter names + // A list of filter names. // // FilterNames is a required field FilterNames []*string `locationName:"filterNames" type:"list" required:"true"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -8832,8 +10638,6 @@ type ListFindingsInput struct { // // * resource.instanceDetails.instanceId // - // * resource.instanceDetails.outpostArn - // // * resource.instanceDetails.networkInterfaces.ipv6Addresses // // * resource.instanceDetails.networkInterfaces.privateIpAddresses.privateIpAddress @@ -8884,8 +10688,6 @@ type ListFindingsInput struct { // // * service.action.networkConnectionAction.protocol // - // * service.action.networkConnectionAction.localIpDetails.ipAddressV4 - // // * service.action.networkConnectionAction.remoteIpDetails.city.cityName // // * service.action.networkConnectionAction.remoteIpDetails.country.countryName @@ -8920,8 +10722,8 @@ type ListFindingsInput struct { // You can use this parameter when paginating results. Set the value of this // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // to the action, fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `locationName:"nextToken" type:"string"` // Represents the criteria used for sorting findings. @@ -8990,13 +10792,13 @@ func (s *ListFindingsInput) SetSortCriteria(v *SortCriteria) *ListFindingsInput type ListFindingsOutput struct { _ struct{} `type:"structure"` - // The IDs of the findings you are listing. + // The IDs of the findings that you're listing. // // FindingIds is a required field FindingIds []*string `locationName:"findingIds" type:"list" required:"true"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -9025,7 +10827,7 @@ func (s *ListFindingsOutput) SetNextToken(v string) *ListFindingsOutput { type ListIPSetsInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the ipSet is associated with. + // The unique ID of the detector that the IPSet is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -9036,8 +10838,8 @@ type ListIPSetsInput struct { // You can use this parameter when paginating results. Set the value of this // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // to the action, fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -9096,8 +10898,8 @@ type ListIPSetsOutput struct { // IpSetIds is a required field IpSetIds []*string `locationName:"ipSetIds" type:"list" required:"true"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -9126,14 +10928,14 @@ func (s *ListIPSetsOutput) SetNextToken(v string) *ListIPSetsOutput { type ListInvitationsInput struct { _ struct{} `type:"structure"` - // You can use this parameter to indicate the maximum number of items you want - // in the response. The default value is 50. The maximum value is 50. + // You can use this parameter to indicate the maximum number of items that you + // want in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // You can use this parameter when paginating results. Set the value of this // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // to the action, fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -9178,8 +10980,8 @@ type ListInvitationsOutput struct { // A list of invitation descriptions. Invitations []*Invitation `locationName:"invitations" type:"list"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -9219,12 +11021,12 @@ type ListMembersInput struct { // You can use this parameter when paginating results. Set the value of this // parameter to null on your first call to the list action. For subsequent calls - // to the action fill nextToken in the request with the value of NextToken from - // the previous response to continue listing data. + // to the action, fill nextToken in the request with the value of NextToken + // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` // Specifies whether to only return associated members or to return all members - // (including members which haven't been invited yet or have been disassociated). + // (including members who haven't been invited yet or have been disassociated). OnlyAssociated *string `location:"querystring" locationName:"onlyAssociated" type:"string"` } @@ -9287,8 +11089,8 @@ type ListMembersOutput struct { // A list of members. Members []*Member `locationName:"members" type:"list"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } @@ -9314,43 +11116,32 @@ func (s *ListMembersOutput) SetNextToken(v string) *ListMembersOutput { return s } -type ListPublishingDestinationsInput struct { +type ListOrganizationAdminAccountsInput struct { _ struct{} `type:"structure"` - // The ID of the detector to retrieve publishing destinations for. - // - // DetectorId is a required field - DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The maximum number of results to return in the response. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // A token to use for paginating results returned in the repsonse. Set the value - // of this parameter to null for the first request to a list action. For subsequent - // calls, use the NextToken value returned from the previous request to continue - // listing results after the first page. + // A token to use for paginating results that are returned in the response. + // Set the value of this parameter to null for the first request to a list action. + // For subsequent calls, use the NextToken value returned from the previous + // request to continue listing results after the first page. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListPublishingDestinationsInput) String() string { +func (s ListOrganizationAdminAccountsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPublishingDestinationsInput) GoString() string { +func (s ListOrganizationAdminAccountsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListPublishingDestinationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListPublishingDestinationsInput"} - if s.DetectorId == nil { - invalidParams.Add(request.NewErrParamRequired("DetectorId")) - } - if s.DetectorId != nil && len(*s.DetectorId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) - } +func (s *ListOrganizationAdminAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOrganizationAdminAccountsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -9361,73 +11152,166 @@ func (s *ListPublishingDestinationsInput) Validate() error { return nil } -// SetDetectorId sets the DetectorId field's value. -func (s *ListPublishingDestinationsInput) SetDetectorId(v string) *ListPublishingDestinationsInput { - s.DetectorId = &v - return s -} - // SetMaxResults sets the MaxResults field's value. -func (s *ListPublishingDestinationsInput) SetMaxResults(v int64) *ListPublishingDestinationsInput { +func (s *ListOrganizationAdminAccountsInput) SetMaxResults(v int64) *ListOrganizationAdminAccountsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListPublishingDestinationsInput) SetNextToken(v string) *ListPublishingDestinationsInput { +func (s *ListOrganizationAdminAccountsInput) SetNextToken(v string) *ListOrganizationAdminAccountsInput { s.NextToken = &v return s } -type ListPublishingDestinationsOutput struct { +type ListOrganizationAdminAccountsOutput struct { _ struct{} `type:"structure"` - // A Destinations obect that includes information about each publishing destination - // returned. - // - // Destinations is a required field - Destinations []*Destination `locationName:"destinations" type:"list" required:"true"` + // An AdminAccounts object that includes a list of accounts configured as GuardDuty + // delegated administrators. + AdminAccounts []*AdminAccount `locationName:"adminAccounts" type:"list"` - // A token to use for paginating results returned in the repsonse. Set the value - // of this parameter to null for the first request to a list action. For subsequent - // calls, use the NextToken value returned from the previous request to continue - // listing results after the first page. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListPublishingDestinationsOutput) String() string { +func (s ListOrganizationAdminAccountsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListPublishingDestinationsOutput) GoString() string { +func (s ListOrganizationAdminAccountsOutput) GoString() string { return s.String() } -// SetDestinations sets the Destinations field's value. -func (s *ListPublishingDestinationsOutput) SetDestinations(v []*Destination) *ListPublishingDestinationsOutput { - s.Destinations = v +// SetAdminAccounts sets the AdminAccounts field's value. +func (s *ListOrganizationAdminAccountsOutput) SetAdminAccounts(v []*AdminAccount) *ListOrganizationAdminAccountsOutput { + s.AdminAccounts = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListPublishingDestinationsOutput) SetNextToken(v string) *ListPublishingDestinationsOutput { +func (s *ListOrganizationAdminAccountsOutput) SetNextToken(v string) *ListOrganizationAdminAccountsOutput { s.NextToken = &v return s } -type ListTagsForResourceInput struct { +type ListPublishingDestinationsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the given GuardDuty resource + // The ID of the detector to retrieve publishing destinations for. // - // ResourceArn is a required field - ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` + + // The maximum number of results to return in the response. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // A token to use for paginating results that are returned in the response. + // Set the value of this parameter to null for the first request to a list action. + // For subsequent calls, use the NextToken value returned from the previous + // request to continue listing results after the first page. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s ListPublishingDestinationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublishingDestinationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPublishingDestinationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPublishingDestinationsInput"} + if s.DetectorId == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDetectorId sets the DetectorId field's value. +func (s *ListPublishingDestinationsInput) SetDetectorId(v string) *ListPublishingDestinationsInput { + s.DetectorId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListPublishingDestinationsInput) SetMaxResults(v int64) *ListPublishingDestinationsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPublishingDestinationsInput) SetNextToken(v string) *ListPublishingDestinationsInput { + s.NextToken = &v + return s +} + +type ListPublishingDestinationsOutput struct { + _ struct{} `type:"structure"` + + // A Destinations object that includes information about each publishing destination + // returned. + // + // Destinations is a required field + Destinations []*Destination `locationName:"destinations" type:"list" required:"true"` + + // A token to use for paginating results that are returned in the response. + // Set the value of this parameter to null for the first request to a list action. + // For subsequent calls, use the NextToken value returned from the previous + // request to continue listing results after the first page. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPublishingDestinationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPublishingDestinationsOutput) GoString() string { + return s.String() +} + +// SetDestinations sets the Destinations field's value. +func (s *ListPublishingDestinationsOutput) SetDestinations(v []*Destination) *ListPublishingDestinationsOutput { + s.Destinations = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListPublishingDestinationsOutput) SetNextToken(v string) *ListPublishingDestinationsOutput { + s.NextToken = &v + return s +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the given GuardDuty resource. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } @@ -9484,18 +11368,18 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe type ListThreatIntelSetsInput struct { _ struct{} `type:"structure"` - // The unique ID of the detector the threatIntelSet is associated with. + // The unique ID of the detector that the threatIntelSet is associated with. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // You can use this parameter to indicate the maximum number of items you want - // in the response. The default value is 50. The maximum value is 50. + // You can use this parameter to indicate the maximum number of items that you + // want in the response. The default value is 50. The maximum value is 50. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` // You can use this parameter to paginate results in the response. Set the value // of this parameter to null on your first call to the list action. For subsequent - // calls to the action fill nextToken in the request with the value of NextToken + // calls to the action, fill nextToken in the request with the value of NextToken // from the previous response to continue listing data. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -9550,8 +11434,8 @@ func (s *ListThreatIntelSetsInput) SetNextToken(v string) *ListThreatIntelSetsIn type ListThreatIntelSetsOutput struct { _ struct{} `type:"structure"` - // Pagination parameter to be used on the next list operation to retrieve more - // items. + // The pagination parameter to be used on the next list operation to retrieve + // more items. NextToken *string `locationName:"nextToken" type:"string"` // The IDs of the ThreatIntelSet resources. @@ -9586,7 +11470,7 @@ func (s *ListThreatIntelSetsOutput) SetThreatIntelSetIds(v []*string) *ListThrea type LocalIpDetails struct { _ struct{} `type:"structure"` - // IPV4 remote address of the connection. + // The IPv4 local address of the connection. IpAddressV4 *string `locationName:"ipAddressV4" type:"string"` } @@ -9610,10 +11494,10 @@ func (s *LocalIpDetails) SetIpAddressV4(v string) *LocalIpDetails { type LocalPortDetails struct { _ struct{} `type:"structure"` - // Port number of the local connection. + // The port number of the local connection. Port *int64 `locationName:"port" type:"integer"` - // Port name of the local connection. + // The port name of the local connection. PortName *string `locationName:"portName" type:"string"` } @@ -9639,17 +11523,17 @@ func (s *LocalPortDetails) SetPortName(v string) *LocalPortDetails { return s } -// Contains information about the Master account and invitation. +// Contains information about the master account and invitation. type Master struct { _ struct{} `type:"structure"` - // The ID of the account used as the Master account. + // The ID of the account used as the master account. AccountId *string `locationName:"accountId" min:"12" type:"string"` - // This value is used to validate the master account to the member account. + // The value used to validate the master account to the member account. InvitationId *string `locationName:"invitationId" type:"string"` - // Timestamp at which the invitation was sent. + // The timestamp when the invitation was sent. InvitedAt *string `locationName:"invitedAt" type:"string"` // The status of the relationship between the master and member accounts. @@ -9690,27 +11574,27 @@ func (s *Master) SetRelationshipStatus(v string) *Master { return s } -// Continas information about the member account +// Contains information about the member account. type Member struct { _ struct{} `type:"structure"` - // Member account ID. + // The ID of the member account. // // AccountId is a required field AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` - // Member account's detector ID. + // The detector ID of the member account. DetectorId *string `locationName:"detectorId" min:"1" type:"string"` - // Member account's email address. + // The email address of the member account. // // Email is a required field Email *string `locationName:"email" min:"1" type:"string" required:"true"` - // Timestamp at which the invitation was sent + // The timestamp when the invitation was sent. InvitedAt *string `locationName:"invitedAt" type:"string"` - // Master account ID. + // The master account ID. // // MasterId is a required field MasterId *string `locationName:"masterId" type:"string" required:"true"` @@ -9720,7 +11604,7 @@ type Member struct { // RelationshipStatus is a required field RelationshipStatus *string `locationName:"relationshipStatus" type:"string" required:"true"` - // Member last updated timestamp. + // The last-updated timestamp of the member. // // UpdatedAt is a required field UpdatedAt *string `locationName:"updatedAt" type:"string" required:"true"` @@ -9778,30 +11662,67 @@ func (s *Member) SetUpdatedAt(v string) *Member { return s } +// Contains information on which data sources are enabled for a member account. +type MemberDataSourceConfiguration struct { + _ struct{} `type:"structure"` + + // The account ID for the member account. + // + // AccountId is a required field + AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` + + // Contains information on the status of data sources for the account. + // + // DataSources is a required field + DataSources *DataSourceConfigurationsResult `locationName:"dataSources" type:"structure" required:"true"` +} + +// String returns the string representation +func (s MemberDataSourceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MemberDataSourceConfiguration) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *MemberDataSourceConfiguration) SetAccountId(v string) *MemberDataSourceConfiguration { + s.AccountId = &v + return s +} + +// SetDataSources sets the DataSources field's value. +func (s *MemberDataSourceConfiguration) SetDataSources(v *DataSourceConfigurationsResult) *MemberDataSourceConfiguration { + s.DataSources = v + return s +} + // Contains information about the NETWORK_CONNECTION action described in the // finding. type NetworkConnectionAction struct { _ struct{} `type:"structure"` - // Network connection blocked information. + // Indicates whether EC2 blocked the network connection to your instance. Blocked *bool `locationName:"blocked" type:"boolean"` - // Network connection direction. + // The network connection direction. ConnectionDirection *string `locationName:"connectionDirection" type:"string"` - // Local IP information of the connection. + // The local IP information of the connection. LocalIpDetails *LocalIpDetails `locationName:"localIpDetails" type:"structure"` - // Local port information of the connection. + // The local port information of the connection. LocalPortDetails *LocalPortDetails `locationName:"localPortDetails" type:"structure"` - // Network connection protocol. + // The network connection protocol. Protocol *string `locationName:"protocol" type:"string"` - // Remote IP information of the connection. + // The remote IP information of the connection. RemoteIpDetails *RemoteIpDetails `locationName:"remoteIpDetails" type:"structure"` - // Remote port information of the connection. + // The remote port information of the connection. RemotePortDetails *RemotePortDetails `locationName:"remotePortDetails" type:"structure"` } @@ -9857,32 +11778,32 @@ func (s *NetworkConnectionAction) SetRemotePortDetails(v *RemotePortDetails) *Ne return s } -// Contains information about the network interface of the Ec2 instance. +// Contains information about the elastic network interface of the EC2 instance. type NetworkInterface struct { _ struct{} `type:"structure"` - // A list of EC2 instance IPv6 address information. + // A list of IPv6 addresses for the EC2 instance. Ipv6Addresses []*string `locationName:"ipv6Addresses" type:"list"` - // The ID of the network interface + // The ID of the network interface. NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` - // Private DNS name of the EC2 instance. + // The private DNS name of the EC2 instance. PrivateDnsName *string `locationName:"privateDnsName" type:"string"` - // Private IP address of the EC2 instance. + // The private IP address of the EC2 instance. PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` // Other private IP address information of the EC2 instance. PrivateIpAddresses []*PrivateIpAddressDetails `locationName:"privateIpAddresses" type:"list"` - // Public DNS name of the EC2 instance. + // The public DNS name of the EC2 instance. PublicDnsName *string `locationName:"publicDnsName" type:"string"` - // Public IP address of the EC2 instance. + // The public IP address of the EC2 instance. PublicIp *string `locationName:"publicIp" type:"string"` - // Security groups associated with the EC2 instance. + // The security groups associated with the EC2 instance. SecurityGroups []*SecurityGroup `locationName:"securityGroups" type:"list"` // The subnet ID of the EC2 instance. @@ -9962,20 +11883,21 @@ func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { return s } -// Continas information about the ISP organization of the remote IP address. +// Contains information about the ISP organization of the remote IP address. type Organization struct { _ struct{} `type:"structure"` - // Autonomous system number of the internet provider of the remote IP address. + // The Autonomous System Number (ASN) of the internet provider of the remote + // IP address. Asn *string `locationName:"asn" type:"string"` - // Organization that registered this ASN. + // The organization that registered this ASN. AsnOrg *string `locationName:"asnOrg" type:"string"` - // ISP information for the internet provider. + // The ISP information for the internet provider. Isp *string `locationName:"isp" type:"string"` - // Name of the internet provider. + // The name of the internet provider. Org *string `locationName:"org" type:"string"` } @@ -10013,14 +11935,209 @@ func (s *Organization) SetOrg(v string) *Organization { return s } +// An object that contains information on which data sources will be configured +// to be automatically enabled for new members within the organization. +type OrganizationDataSourceConfigurations struct { + _ struct{} `type:"structure"` + + // Describes whether S3 data event logs are enabled for new members of the organization. + S3Logs *OrganizationS3LogsConfiguration `locationName:"s3Logs" type:"structure"` +} + +// String returns the string representation +func (s OrganizationDataSourceConfigurations) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrganizationDataSourceConfigurations) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OrganizationDataSourceConfigurations) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OrganizationDataSourceConfigurations"} + if s.S3Logs != nil { + if err := s.S3Logs.Validate(); err != nil { + invalidParams.AddNested("S3Logs", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3Logs sets the S3Logs field's value. +func (s *OrganizationDataSourceConfigurations) SetS3Logs(v *OrganizationS3LogsConfiguration) *OrganizationDataSourceConfigurations { + s.S3Logs = v + return s +} + +// An object that contains information on which data sources are automatically +// enabled for new members within the organization. +type OrganizationDataSourceConfigurationsResult struct { + _ struct{} `type:"structure"` + + // Describes whether S3 data event logs are enabled as a data source. + // + // S3Logs is a required field + S3Logs *OrganizationS3LogsConfigurationResult `locationName:"s3Logs" type:"structure" required:"true"` +} + +// String returns the string representation +func (s OrganizationDataSourceConfigurationsResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrganizationDataSourceConfigurationsResult) GoString() string { + return s.String() +} + +// SetS3Logs sets the S3Logs field's value. +func (s *OrganizationDataSourceConfigurationsResult) SetS3Logs(v *OrganizationS3LogsConfigurationResult) *OrganizationDataSourceConfigurationsResult { + s.S3Logs = v + return s +} + +// Describes whether S3 data event logs will be automatically enabled for new +// members of the organization. +type OrganizationS3LogsConfiguration struct { + _ struct{} `type:"structure"` + + // A value that contains information on whether S3 data event logs will be enabled + // automatically as a data source for the organization. + // + // AutoEnable is a required field + AutoEnable *bool `locationName:"autoEnable" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s OrganizationS3LogsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrganizationS3LogsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OrganizationS3LogsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OrganizationS3LogsConfiguration"} + if s.AutoEnable == nil { + invalidParams.Add(request.NewErrParamRequired("AutoEnable")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoEnable sets the AutoEnable field's value. +func (s *OrganizationS3LogsConfiguration) SetAutoEnable(v bool) *OrganizationS3LogsConfiguration { + s.AutoEnable = &v + return s +} + +// The current configuration of S3 data event logs as a data source for the +// organization. +type OrganizationS3LogsConfigurationResult struct { + _ struct{} `type:"structure"` + + // A value that describes whether S3 data event logs are automatically enabled + // for new members of the organization. + // + // AutoEnable is a required field + AutoEnable *bool `locationName:"autoEnable" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s OrganizationS3LogsConfigurationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrganizationS3LogsConfigurationResult) GoString() string { + return s.String() +} + +// SetAutoEnable sets the AutoEnable field's value. +func (s *OrganizationS3LogsConfigurationResult) SetAutoEnable(v bool) *OrganizationS3LogsConfigurationResult { + s.AutoEnable = &v + return s +} + +// Contains information on the owner of the bucket. +type Owner struct { + _ struct{} `type:"structure"` + + // The canonical user ID of the bucket owner. For information about locating + // your canonical user ID see Finding Your Account Canonical User ID. (https://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html#FindingCanonicalId) + Id *string `locationName:"id" type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *Owner) SetId(v string) *Owner { + s.Id = &v + return s +} + +// Contains information about how permissions are configured for the S3 bucket. +type PermissionConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about the account level permissions on the S3 bucket. + AccountLevelPermissions *AccountLevelPermissions `locationName:"accountLevelPermissions" type:"structure"` + + // Contains information about the bucket level permissions for the S3 bucket. + BucketLevelPermissions *BucketLevelPermissions `locationName:"bucketLevelPermissions" type:"structure"` +} + +// String returns the string representation +func (s PermissionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PermissionConfiguration) GoString() string { + return s.String() +} + +// SetAccountLevelPermissions sets the AccountLevelPermissions field's value. +func (s *PermissionConfiguration) SetAccountLevelPermissions(v *AccountLevelPermissions) *PermissionConfiguration { + s.AccountLevelPermissions = v + return s +} + +// SetBucketLevelPermissions sets the BucketLevelPermissions field's value. +func (s *PermissionConfiguration) SetBucketLevelPermissions(v *BucketLevelPermissions) *PermissionConfiguration { + s.BucketLevelPermissions = v + return s +} + // Contains information about the PORT_PROBE action described in the finding. type PortProbeAction struct { _ struct{} `type:"structure"` - // Port probe blocked information. + // Indicates whether EC2 blocked the port probe to the instance, such as with + // an ACL. Blocked *bool `locationName:"blocked" type:"boolean"` - // A list of port probe details objects. + // A list of objects related to port probe details. PortProbeDetails []*PortProbeDetail `locationName:"portProbeDetails" type:"list"` } @@ -10050,13 +12167,13 @@ func (s *PortProbeAction) SetPortProbeDetails(v []*PortProbeDetail) *PortProbeAc type PortProbeDetail struct { _ struct{} `type:"structure"` - // Local IP information of the connection. + // The local IP information of the connection. LocalIpDetails *LocalIpDetails `locationName:"localIpDetails" type:"structure"` - // Local port information of the connection. + // The local port information of the connection. LocalPortDetails *LocalPortDetails `locationName:"localPortDetails" type:"structure"` - // Remote IP information of the connection. + // The remote IP information of the connection. RemoteIpDetails *RemoteIpDetails `locationName:"remoteIpDetails" type:"structure"` } @@ -10092,10 +12209,10 @@ func (s *PortProbeDetail) SetRemoteIpDetails(v *RemoteIpDetails) *PortProbeDetai type PrivateIpAddressDetails struct { _ struct{} `type:"structure"` - // Private DNS name of the EC2 instance. + // The private DNS name of the EC2 instance. PrivateDnsName *string `locationName:"privateDnsName" type:"string"` - // Private IP address of the EC2 instance. + // The private IP address of the EC2 instance. PrivateIpAddress *string `locationName:"privateIpAddress" type:"string"` } @@ -10121,14 +12238,14 @@ func (s *PrivateIpAddressDetails) SetPrivateIpAddress(v string) *PrivateIpAddres return s } -// Contains information about the product code for the Ec2 instance. +// Contains information about the product code for the EC2 instance. type ProductCode struct { _ struct{} `type:"structure"` - // Product code information. + // The product code information. Code *string `locationName:"code" type:"string"` - // Product code type. + // The product code type. ProductType *string `locationName:"productType" type:"string"` } @@ -10154,23 +12271,57 @@ func (s *ProductCode) SetProductType(v string) *ProductCode { return s } -// Continas information about the remote IP address of the connection. +// Describes the public access policies that apply to the S3 bucket. +type PublicAccess struct { + _ struct{} `type:"structure"` + + // Describes the effective permission on this bucket after factoring all attached + // policies. + EffectivePermission *string `locationName:"effectivePermission" type:"string"` + + // Contains information about how permissions are configured for the S3 bucket. + PermissionConfiguration *PermissionConfiguration `locationName:"permissionConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PublicAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccess) GoString() string { + return s.String() +} + +// SetEffectivePermission sets the EffectivePermission field's value. +func (s *PublicAccess) SetEffectivePermission(v string) *PublicAccess { + s.EffectivePermission = &v + return s +} + +// SetPermissionConfiguration sets the PermissionConfiguration field's value. +func (s *PublicAccess) SetPermissionConfiguration(v *PermissionConfiguration) *PublicAccess { + s.PermissionConfiguration = v + return s +} + +// Contains information about the remote IP address of the connection. type RemoteIpDetails struct { _ struct{} `type:"structure"` - // City information of the remote IP address. + // The city information of the remote IP address. City *City `locationName:"city" type:"structure"` - // Country code of the remote IP address. + // The country code of the remote IP address. Country *Country `locationName:"country" type:"structure"` - // Location information of the remote IP address. + // The location information of the remote IP address. GeoLocation *GeoLocation `locationName:"geoLocation" type:"structure"` - // IPV4 remote address of the connection. + // The IPv4 remote address of the connection. IpAddressV4 *string `locationName:"ipAddressV4" type:"string"` - // ISP Organization information of the remote IP address. + // The ISP organization information of the remote IP address. Organization *Organization `locationName:"organization" type:"structure"` } @@ -10218,10 +12369,10 @@ func (s *RemoteIpDetails) SetOrganization(v *Organization) *RemoteIpDetails { type RemotePortDetails struct { _ struct{} `type:"structure"` - // Port number of the remote connection. + // The port number of the remote connection. Port *int64 `locationName:"port" type:"integer"` - // Port name of the remote connection. + // The port name of the remote connection. PortName *string `locationName:"portName" type:"string"` } @@ -10260,8 +12411,11 @@ type Resource struct { // prompted GuardDuty to generate a finding. InstanceDetails *InstanceDetails `locationName:"instanceDetails" type:"structure"` - // The type of the AWS resource. + // The type of AWS resource. ResourceType *string `locationName:"resourceType" type:"string"` + + // Contains information on the S3 bucket. + S3BucketDetails []*S3BucketDetail `locationName:"s3BucketDetails" type:"list"` } // String returns the string representation @@ -10292,14 +12446,173 @@ func (s *Resource) SetResourceType(v string) *Resource { return s } -// Contains information about the security groups associated with the EC2 instance. -type SecurityGroup struct { +// SetS3BucketDetails sets the S3BucketDetails field's value. +func (s *Resource) SetS3BucketDetails(v []*S3BucketDetail) *Resource { + s.S3BucketDetails = v + return s +} + +// Contains information on the S3 bucket. +type S3BucketDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the S3 bucket. + Arn *string `locationName:"arn" type:"string"` + + // The date and time the bucket was created at. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // Describes the server side encryption method used in the S3 bucket. + DefaultServerSideEncryption *DefaultServerSideEncryption `locationName:"defaultServerSideEncryption" type:"structure"` + + // The name of the S3 bucket. + Name *string `locationName:"name" type:"string"` + + // The owner of the S3 bucket. + Owner *Owner `locationName:"owner" type:"structure"` + + // Describes the public access policies that apply to the S3 bucket. + PublicAccess *PublicAccess `locationName:"publicAccess" type:"structure"` + + // All tags attached to the S3 bucket + Tags []*Tag `locationName:"tags" type:"list"` + + // Describes whether the bucket is a source or destination bucket. + Type *string `locationName:"type" type:"string"` +} + +// String returns the string representation +func (s S3BucketDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3BucketDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *S3BucketDetail) SetArn(v string) *S3BucketDetail { + s.Arn = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *S3BucketDetail) SetCreatedAt(v time.Time) *S3BucketDetail { + s.CreatedAt = &v + return s +} + +// SetDefaultServerSideEncryption sets the DefaultServerSideEncryption field's value. +func (s *S3BucketDetail) SetDefaultServerSideEncryption(v *DefaultServerSideEncryption) *S3BucketDetail { + s.DefaultServerSideEncryption = v + return s +} + +// SetName sets the Name field's value. +func (s *S3BucketDetail) SetName(v string) *S3BucketDetail { + s.Name = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *S3BucketDetail) SetOwner(v *Owner) *S3BucketDetail { + s.Owner = v + return s +} + +// SetPublicAccess sets the PublicAccess field's value. +func (s *S3BucketDetail) SetPublicAccess(v *PublicAccess) *S3BucketDetail { + s.PublicAccess = v + return s +} + +// SetTags sets the Tags field's value. +func (s *S3BucketDetail) SetTags(v []*Tag) *S3BucketDetail { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *S3BucketDetail) SetType(v string) *S3BucketDetail { + s.Type = &v + return s +} + +// Describes whether S3 data event logs will be enabled as a data source. +type S3LogsConfiguration struct { _ struct{} `type:"structure"` - // EC2 instance's security group ID. + // The status of S3 data event logs as a data source. + // + // Enable is a required field + Enable *bool `locationName:"enable" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s S3LogsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3LogsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3LogsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3LogsConfiguration"} + if s.Enable == nil { + invalidParams.Add(request.NewErrParamRequired("Enable")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEnable sets the Enable field's value. +func (s *S3LogsConfiguration) SetEnable(v bool) *S3LogsConfiguration { + s.Enable = &v + return s +} + +// Describes whether S3 data event logs will be enabled as a data source. +type S3LogsConfigurationResult struct { + _ struct{} `type:"structure"` + + // A value that describes whether S3 data event logs are automatically enabled + // for new members of the organization. + // + // Status is a required field + Status *string `locationName:"status" min:"1" type:"string" required:"true" enum:"DataSourceStatus"` +} + +// String returns the string representation +func (s S3LogsConfigurationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3LogsConfigurationResult) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *S3LogsConfigurationResult) SetStatus(v string) *S3LogsConfigurationResult { + s.Status = &v + return s +} + +// Contains information about the security groups associated with the EC2 instance. +type SecurityGroup struct { + _ struct{} `type:"structure"` + + // The security group ID of the EC2 instance. GroupId *string `locationName:"groupId" type:"string"` - // EC2 instance's security group name. + // The security group name of the EC2 instance. GroupName *string `locationName:"groupName" type:"string"` } @@ -10329,36 +12642,36 @@ func (s *SecurityGroup) SetGroupName(v string) *SecurityGroup { type Service struct { _ struct{} `type:"structure"` - // Information about the activity described in a finding. + // Information about the activity that is described in a finding. Action *Action `locationName:"action" type:"structure"` // Indicates whether this finding is archived. Archived *bool `locationName:"archived" type:"boolean"` - // Total count of the occurrences of this finding type. + // The total count of the occurrences of this finding type. Count *int64 `locationName:"count" type:"integer"` - // Detector ID for the GuardDuty service. + // The detector ID for the GuardDuty service. DetectorId *string `locationName:"detectorId" min:"1" type:"string"` - // First seen timestamp of the activity that prompted GuardDuty to generate + // The first-seen timestamp of the activity that prompted GuardDuty to generate // this finding. EventFirstSeen *string `locationName:"eventFirstSeen" type:"string"` - // Last seen timestamp of the activity that prompted GuardDuty to generate this - // finding. + // The last-seen timestamp of the activity that prompted GuardDuty to generate + // this finding. EventLastSeen *string `locationName:"eventLastSeen" type:"string"` // An evidence object associated with the service. Evidence *Evidence `locationName:"evidence" type:"structure"` - // Resource role information for this finding. + // The resource role information for this finding. ResourceRole *string `locationName:"resourceRole" type:"string"` // The name of the AWS service (GuardDuty) that generated a finding. ServiceName *string `locationName:"serviceName" type:"string"` - // Feedback left about the finding. + // Feedback that was submitted about the finding. UserFeedback *string `locationName:"userFeedback" type:"string"` } @@ -10436,11 +12749,11 @@ func (s *Service) SetUserFeedback(v string) *Service { type SortCriteria struct { _ struct{} `type:"structure"` - // Represents the finding attribute (for example, accountId) by which to sort - // findings. + // Represents the finding attribute (for example, accountId) to sort findings + // by. AttributeName *string `locationName:"attributeName" type:"string"` - // Order by which the sorted findings are to be displayed. + // The order by which the sorted findings are to be displayed. OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` } @@ -10528,8 +12841,8 @@ func (s *StartMonitoringMembersInput) SetDetectorId(v string) *StartMonitoringMe type StartMonitoringMembersOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that contain the unprocessed account and a result string + // that explains why it was unprocessed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -10554,14 +12867,13 @@ func (s *StartMonitoringMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAc type StopMonitoringMembersInput struct { _ struct{} `type:"structure"` - // A list of account IDs of the GuardDuty member accounts whose findings you - // want the master account to stop monitoring. + // A list of account IDs for the member accounts to stop monitoring. // // AccountIds is a required field AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` - // The unique ID of the detector of the GuardDuty account that you want to stop - // from monitor members' findings. + // The unique ID of the detector associated with the GuardDuty master account + // that is monitoring member accounts. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -10614,8 +12926,9 @@ func (s *StopMonitoringMembersInput) SetDetectorId(v string) *StopMonitoringMemb type StopMonitoringMembersOutput struct { _ struct{} `type:"structure"` - // A list of objects containing the unprocessed account and a result string - // explaining why it was unprocessed. + // A list of objects that contain an accountId for each account that could not + // be processed, and a result string that indicates why the account was not + // processed. // // UnprocessedAccounts is a required field UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` @@ -10637,14 +12950,14 @@ func (s *StopMonitoringMembersOutput) SetUnprocessedAccounts(v []*UnprocessedAcc return s } -// Contains information about a tag associated with the Ec2 instance. +// Contains information about a tag associated with the EC2 instance. type Tag struct { _ struct{} `type:"structure"` - // EC2 instance tag key. + // The EC2 instance tag key. Key *string `locationName:"key" type:"string"` - // EC2 instance tag value. + // The EC2 instance tag value. Value *string `locationName:"value" type:"string"` } @@ -10778,6 +13091,39 @@ func (s *ThreatIntelligenceDetail) SetThreatNames(v []*string) *ThreatIntelligen return s } +// Contains the total usage with the corresponding currency unit for that value. +type Total struct { + _ struct{} `type:"structure"` + + // The total usage. + Amount *string `locationName:"amount" type:"string"` + + // The currency unit that the amount is given in. + Unit *string `locationName:"unit" type:"string"` +} + +// String returns the string representation +func (s Total) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Total) GoString() string { + return s.String() +} + +// SetAmount sets the Amount field's value. +func (s *Total) SetAmount(v string) *Total { + s.Amount = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *Total) SetUnit(v string) *Total { + s.Unit = &v + return s +} + type UnarchiveFindingsInput struct { _ struct{} `type:"structure"` @@ -10786,7 +13132,7 @@ type UnarchiveFindingsInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // IDs of the findings to unarchive. + // The IDs of the findings to unarchive. // // FindingIds is a required field FindingIds []*string `locationName:"findingIds" type:"list" required:"true"` @@ -10847,11 +13193,11 @@ func (s UnarchiveFindingsOutput) GoString() string { return s.String() } -// Contains information about the accounts that were not processed. +// Contains information about the accounts that weren't processed. type UnprocessedAccount struct { _ struct{} `type:"structure"` - // AWS Account ID. + // The AWS account ID. // // AccountId is a required field AccountId *string `locationName:"accountId" min:"12" type:"string" required:"true"` @@ -10959,6 +13305,9 @@ func (s UntagResourceOutput) GoString() string { type UpdateDetectorInput struct { _ struct{} `type:"structure"` + // An object that describes which data sources will be updated. + DataSources *DataSourceConfigurations `locationName:"dataSources" type:"structure"` + // The unique ID of the detector to update. // // DetectorId is a required field @@ -10967,7 +13316,7 @@ type UpdateDetectorInput struct { // Specifies whether the detector is enabled or not enabled. Enable *bool `locationName:"enable" type:"boolean"` - // A enum value that specifies how frequently findings are exported, such as + // An enum value that specifies how frequently findings are exported, such as // to CloudWatch Events. FindingPublishingFrequency *string `locationName:"findingPublishingFrequency" type:"string" enum:"FindingPublishingFrequency"` } @@ -10991,6 +13340,11 @@ func (s *UpdateDetectorInput) Validate() error { if s.DetectorId != nil && len(*s.DetectorId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) } + if s.DataSources != nil { + if err := s.DataSources.Validate(); err != nil { + invalidParams.AddNested("DataSources", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -10998,6 +13352,12 @@ func (s *UpdateDetectorInput) Validate() error { return nil } +// SetDataSources sets the DataSources field's value. +func (s *UpdateDetectorInput) SetDataSources(v *DataSourceConfigurations) *UpdateDetectorInput { + s.DataSources = v + return s +} + // SetDetectorId sets the DetectorId field's value. func (s *UpdateDetectorInput) SetDetectorId(v string) *UpdateDetectorInput { s.DetectorId = &v @@ -11174,7 +13534,7 @@ type UpdateFindingsFeedbackInput struct { // Feedback is a required field Feedback *string `locationName:"feedback" type:"string" required:"true" enum:"Feedback"` - // IDs of the findings that you want to mark as useful or not useful. + // The IDs of the findings that you want to mark as useful or not useful. // // FindingIds is a required field FindingIds []*string `locationName:"findingIds" type:"list" required:"true"` @@ -11253,7 +13613,7 @@ func (s UpdateFindingsFeedbackOutput) GoString() string { type UpdateIPSetInput struct { _ struct{} `type:"structure"` - // The updated boolean value that specifies whether the IPSet is active or not. + // The updated Boolean value that specifies whether the IPSet is active or not. Activate *bool `locationName:"activate" type:"boolean"` // The detectorID that specifies the GuardDuty service whose IPSet you want @@ -11267,7 +13627,7 @@ type UpdateIPSetInput struct { // IpSetId is a required field IpSetId *string `location:"uri" locationName:"ipSetId" type:"string" required:"true"` - // The updated URI of the file that contains the IPSet. For example (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key). + // The updated URI of the file that contains the IPSet. For example: https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key. Location *string `locationName:"location" min:"1" type:"string"` // The unique ID that specifies the IPSet that you want to update. @@ -11356,10 +13716,191 @@ func (s UpdateIPSetOutput) GoString() string { return s.String() } +type UpdateMemberDetectorsInput struct { + _ struct{} `type:"structure"` + + // A list of member account IDs to be updated. + // + // AccountIds is a required field + AccountIds []*string `locationName:"accountIds" min:"1" type:"list" required:"true"` + + // An object describes which data sources will be updated. + DataSources *DataSourceConfigurations `locationName:"dataSources" type:"structure"` + + // The detector ID of the master account. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMemberDetectorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMemberDetectorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMemberDetectorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMemberDetectorsInput"} + if s.AccountIds == nil { + invalidParams.Add(request.NewErrParamRequired("AccountIds")) + } + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } + if s.DetectorId == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) + } + if s.DataSources != nil { + if err := s.DataSources.Validate(); err != nil { + invalidParams.AddNested("DataSources", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIds sets the AccountIds field's value. +func (s *UpdateMemberDetectorsInput) SetAccountIds(v []*string) *UpdateMemberDetectorsInput { + s.AccountIds = v + return s +} + +// SetDataSources sets the DataSources field's value. +func (s *UpdateMemberDetectorsInput) SetDataSources(v *DataSourceConfigurations) *UpdateMemberDetectorsInput { + s.DataSources = v + return s +} + +// SetDetectorId sets the DetectorId field's value. +func (s *UpdateMemberDetectorsInput) SetDetectorId(v string) *UpdateMemberDetectorsInput { + s.DetectorId = &v + return s +} + +type UpdateMemberDetectorsOutput struct { + _ struct{} `type:"structure"` + + // A list of member account IDs that were unable to be processed along with + // an explanation for why they were not processed. + // + // UnprocessedAccounts is a required field + UnprocessedAccounts []*UnprocessedAccount `locationName:"unprocessedAccounts" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateMemberDetectorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMemberDetectorsOutput) GoString() string { + return s.String() +} + +// SetUnprocessedAccounts sets the UnprocessedAccounts field's value. +func (s *UpdateMemberDetectorsOutput) SetUnprocessedAccounts(v []*UnprocessedAccount) *UpdateMemberDetectorsOutput { + s.UnprocessedAccounts = v + return s +} + +type UpdateOrganizationConfigurationInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to automatically enable member accounts in the organization. + // + // AutoEnable is a required field + AutoEnable *bool `locationName:"autoEnable" type:"boolean" required:"true"` + + // An object describes which data sources will be updated. + DataSources *OrganizationDataSourceConfigurations `locationName:"dataSources" type:"structure"` + + // The ID of the detector to update the delegated administrator for. + // + // DetectorId is a required field + DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateOrganizationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOrganizationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOrganizationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateOrganizationConfigurationInput"} + if s.AutoEnable == nil { + invalidParams.Add(request.NewErrParamRequired("AutoEnable")) + } + if s.DetectorId == nil { + invalidParams.Add(request.NewErrParamRequired("DetectorId")) + } + if s.DetectorId != nil && len(*s.DetectorId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DetectorId", 1)) + } + if s.DataSources != nil { + if err := s.DataSources.Validate(); err != nil { + invalidParams.AddNested("DataSources", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoEnable sets the AutoEnable field's value. +func (s *UpdateOrganizationConfigurationInput) SetAutoEnable(v bool) *UpdateOrganizationConfigurationInput { + s.AutoEnable = &v + return s +} + +// SetDataSources sets the DataSources field's value. +func (s *UpdateOrganizationConfigurationInput) SetDataSources(v *OrganizationDataSourceConfigurations) *UpdateOrganizationConfigurationInput { + s.DataSources = v + return s +} + +// SetDetectorId sets the DetectorId field's value. +func (s *UpdateOrganizationConfigurationInput) SetDetectorId(v string) *UpdateOrganizationConfigurationInput { + s.DetectorId = &v + return s +} + +type UpdateOrganizationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOrganizationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOrganizationConfigurationOutput) GoString() string { + return s.String() +} + type UpdatePublishingDestinationInput struct { _ struct{} `type:"structure"` - // The ID of the detector associated with the publishing destinations to update. + // The ID of the publishing destination to update. // // DestinationId is a required field DestinationId *string `location:"uri" locationName:"destinationId" type:"string" required:"true"` @@ -11368,7 +13909,7 @@ type UpdatePublishingDestinationInput struct { // of the publishing destination. DestinationProperties *DestinationProperties `locationName:"destinationProperties" type:"structure"` - // The ID of the + // The ID of the detector associated with the publishing destinations to update. // // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` @@ -11441,7 +13982,7 @@ func (s UpdatePublishingDestinationOutput) GoString() string { type UpdateThreatIntelSetInput struct { _ struct{} `type:"structure"` - // The updated boolean value that specifies whether the ThreateIntelSet is active + // The updated Boolean value that specifies whether the ThreateIntelSet is active // or not. Activate *bool `locationName:"activate" type:"boolean"` @@ -11451,8 +13992,8 @@ type UpdateThreatIntelSetInput struct { // DetectorId is a required field DetectorId *string `location:"uri" locationName:"detectorId" min:"1" type:"string" required:"true"` - // The updated URI of the file that contains the ThreateIntelSet. For example - // (https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key) + // The updated URI of the file that contains the ThreateIntelSet. For example: + // https://s3.us-west-2.amazonaws.com/my-bucket/my-object-key. Location *string `locationName:"location" min:"1" type:"string"` // The unique ID that specifies the ThreatIntelSet that you want to update. @@ -11546,11 +14087,287 @@ func (s UpdateThreatIntelSetOutput) GoString() string { return s.String() } +// Contains information on the total of usage based on account IDs. +type UsageAccountResult struct { + _ struct{} `type:"structure"` + + // The Account ID that generated usage. + AccountId *string `locationName:"accountId" min:"12" type:"string"` + + // Represents the total of usage for the Account ID. + Total *Total `locationName:"total" type:"structure"` +} + +// String returns the string representation +func (s UsageAccountResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageAccountResult) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *UsageAccountResult) SetAccountId(v string) *UsageAccountResult { + s.AccountId = &v + return s +} + +// SetTotal sets the Total field's value. +func (s *UsageAccountResult) SetTotal(v *Total) *UsageAccountResult { + s.Total = v + return s +} + +// Contains information about the criteria used to query usage statistics. +type UsageCriteria struct { + _ struct{} `type:"structure"` + + // The account IDs to aggregate usage statistics from. + AccountIds []*string `locationName:"accountIds" min:"1" type:"list"` + + // The data sources to aggregate usage statistics from. + // + // DataSources is a required field + DataSources []*string `locationName:"dataSources" type:"list" required:"true"` + + // The resources to aggregate usage statistics from. Only accepts exact resource + // names. + Resources []*string `locationName:"resources" type:"list"` +} + +// String returns the string representation +func (s UsageCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UsageCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UsageCriteria"} + if s.AccountIds != nil && len(s.AccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountIds", 1)) + } + if s.DataSources == nil { + invalidParams.Add(request.NewErrParamRequired("DataSources")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountIds sets the AccountIds field's value. +func (s *UsageCriteria) SetAccountIds(v []*string) *UsageCriteria { + s.AccountIds = v + return s +} + +// SetDataSources sets the DataSources field's value. +func (s *UsageCriteria) SetDataSources(v []*string) *UsageCriteria { + s.DataSources = v + return s +} + +// SetResources sets the Resources field's value. +func (s *UsageCriteria) SetResources(v []*string) *UsageCriteria { + s.Resources = v + return s +} + +// Contains information on the result of usage based on data source type. +type UsageDataSourceResult struct { + _ struct{} `type:"structure"` + + // The data source type that generated usage. + DataSource *string `locationName:"dataSource" type:"string" enum:"DataSource"` + + // Represents the total of usage for the specified data source. + Total *Total `locationName:"total" type:"structure"` +} + +// String returns the string representation +func (s UsageDataSourceResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageDataSourceResult) GoString() string { + return s.String() +} + +// SetDataSource sets the DataSource field's value. +func (s *UsageDataSourceResult) SetDataSource(v string) *UsageDataSourceResult { + s.DataSource = &v + return s +} + +// SetTotal sets the Total field's value. +func (s *UsageDataSourceResult) SetTotal(v *Total) *UsageDataSourceResult { + s.Total = v + return s +} + +// Contains information on the sum of usage based on an AWS resource. +type UsageResourceResult struct { + _ struct{} `type:"structure"` + + // The AWS resource that generated usage. + Resource *string `locationName:"resource" type:"string"` + + // Represents the sum total of usage for the specified resource type. + Total *Total `locationName:"total" type:"structure"` +} + +// String returns the string representation +func (s UsageResourceResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageResourceResult) GoString() string { + return s.String() +} + +// SetResource sets the Resource field's value. +func (s *UsageResourceResult) SetResource(v string) *UsageResourceResult { + s.Resource = &v + return s +} + +// SetTotal sets the Total field's value. +func (s *UsageResourceResult) SetTotal(v *Total) *UsageResourceResult { + s.Total = v + return s +} + +// Contains the result of GuardDuty usage. If a UsageStatisticType is provided +// the result for other types will be null. +type UsageStatistics struct { + _ struct{} `type:"structure"` + + // The usage statistic sum organized by account ID. + SumByAccount []*UsageAccountResult `locationName:"sumByAccount" type:"list"` + + // The usage statistic sum organized by on data source. + SumByDataSource []*UsageDataSourceResult `locationName:"sumByDataSource" type:"list"` + + // The usage statistic sum organized by resource. + SumByResource []*UsageResourceResult `locationName:"sumByResource" type:"list"` + + // Lists the top 50 resources that have generated the most GuardDuty usage, + // in order from most to least expensive. + TopResources []*UsageResourceResult `locationName:"topResources" type:"list"` +} + +// String returns the string representation +func (s UsageStatistics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageStatistics) GoString() string { + return s.String() +} + +// SetSumByAccount sets the SumByAccount field's value. +func (s *UsageStatistics) SetSumByAccount(v []*UsageAccountResult) *UsageStatistics { + s.SumByAccount = v + return s +} + +// SetSumByDataSource sets the SumByDataSource field's value. +func (s *UsageStatistics) SetSumByDataSource(v []*UsageDataSourceResult) *UsageStatistics { + s.SumByDataSource = v + return s +} + +// SetSumByResource sets the SumByResource field's value. +func (s *UsageStatistics) SetSumByResource(v []*UsageResourceResult) *UsageStatistics { + s.SumByResource = v + return s +} + +// SetTopResources sets the TopResources field's value. +func (s *UsageStatistics) SetTopResources(v []*UsageResourceResult) *UsageStatistics { + s.TopResources = v + return s +} + +const ( + // AdminStatusEnabled is a AdminStatus enum value + AdminStatusEnabled = "ENABLED" + + // AdminStatusDisableInProgress is a AdminStatus enum value + AdminStatusDisableInProgress = "DISABLE_IN_PROGRESS" +) + +// AdminStatus_Values returns all elements of the AdminStatus enum +func AdminStatus_Values() []string { + return []string{ + AdminStatusEnabled, + AdminStatusDisableInProgress, + } +} + +const ( + // DataSourceFlowLogs is a DataSource enum value + DataSourceFlowLogs = "FLOW_LOGS" + + // DataSourceCloudTrail is a DataSource enum value + DataSourceCloudTrail = "CLOUD_TRAIL" + + // DataSourceDnsLogs is a DataSource enum value + DataSourceDnsLogs = "DNS_LOGS" + + // DataSourceS3Logs is a DataSource enum value + DataSourceS3Logs = "S3_LOGS" +) + +// DataSource_Values returns all elements of the DataSource enum +func DataSource_Values() []string { + return []string{ + DataSourceFlowLogs, + DataSourceCloudTrail, + DataSourceDnsLogs, + DataSourceS3Logs, + } +} + +const ( + // DataSourceStatusEnabled is a DataSourceStatus enum value + DataSourceStatusEnabled = "ENABLED" + + // DataSourceStatusDisabled is a DataSourceStatus enum value + DataSourceStatusDisabled = "DISABLED" +) + +// DataSourceStatus_Values returns all elements of the DataSourceStatus enum +func DataSourceStatus_Values() []string { + return []string{ + DataSourceStatusEnabled, + DataSourceStatusDisabled, + } +} + const ( // DestinationTypeS3 is a DestinationType enum value DestinationTypeS3 = "S3" ) +// DestinationType_Values returns all elements of the DestinationType enum +func DestinationType_Values() []string { + return []string{ + DestinationTypeS3, + } +} + const ( // DetectorStatusEnabled is a DetectorStatus enum value DetectorStatusEnabled = "ENABLED" @@ -11559,6 +14376,14 @@ const ( DetectorStatusDisabled = "DISABLED" ) +// DetectorStatus_Values returns all elements of the DetectorStatus enum +func DetectorStatus_Values() []string { + return []string{ + DetectorStatusEnabled, + DetectorStatusDisabled, + } +} + const ( // FeedbackUseful is a Feedback enum value FeedbackUseful = "USEFUL" @@ -11567,6 +14392,14 @@ const ( FeedbackNotUseful = "NOT_USEFUL" ) +// Feedback_Values returns all elements of the Feedback enum +func Feedback_Values() []string { + return []string{ + FeedbackUseful, + FeedbackNotUseful, + } +} + const ( // FilterActionNoop is a FilterAction enum value FilterActionNoop = "NOOP" @@ -11575,6 +14408,14 @@ const ( FilterActionArchive = "ARCHIVE" ) +// FilterAction_Values returns all elements of the FilterAction enum +func FilterAction_Values() []string { + return []string{ + FilterActionNoop, + FilterActionArchive, + } +} + const ( // FindingPublishingFrequencyFifteenMinutes is a FindingPublishingFrequency enum value FindingPublishingFrequencyFifteenMinutes = "FIFTEEN_MINUTES" @@ -11586,11 +14427,27 @@ const ( FindingPublishingFrequencySixHours = "SIX_HOURS" ) +// FindingPublishingFrequency_Values returns all elements of the FindingPublishingFrequency enum +func FindingPublishingFrequency_Values() []string { + return []string{ + FindingPublishingFrequencyFifteenMinutes, + FindingPublishingFrequencyOneHour, + FindingPublishingFrequencySixHours, + } +} + const ( // FindingStatisticTypeCountBySeverity is a FindingStatisticType enum value FindingStatisticTypeCountBySeverity = "COUNT_BY_SEVERITY" ) +// FindingStatisticType_Values returns all elements of the FindingStatisticType enum +func FindingStatisticType_Values() []string { + return []string{ + FindingStatisticTypeCountBySeverity, + } +} + const ( // IpSetFormatTxt is a IpSetFormat enum value IpSetFormatTxt = "TXT" @@ -11611,6 +14468,18 @@ const ( IpSetFormatFireEye = "FIRE_EYE" ) +// IpSetFormat_Values returns all elements of the IpSetFormat enum +func IpSetFormat_Values() []string { + return []string{ + IpSetFormatTxt, + IpSetFormatStix, + IpSetFormatOtxCsv, + IpSetFormatAlienVault, + IpSetFormatProofPoint, + IpSetFormatFireEye, + } +} + const ( // IpSetStatusInactive is a IpSetStatus enum value IpSetStatusInactive = "INACTIVE" @@ -11634,6 +14503,19 @@ const ( IpSetStatusDeleted = "DELETED" ) +// IpSetStatus_Values returns all elements of the IpSetStatus enum +func IpSetStatus_Values() []string { + return []string{ + IpSetStatusInactive, + IpSetStatusActivating, + IpSetStatusActive, + IpSetStatusDeactivating, + IpSetStatusError, + IpSetStatusDeletePending, + IpSetStatusDeleted, + } +} + const ( // OrderByAsc is a OrderBy enum value OrderByAsc = "ASC" @@ -11642,6 +14524,14 @@ const ( OrderByDesc = "DESC" ) +// OrderBy_Values returns all elements of the OrderBy enum +func OrderBy_Values() []string { + return []string{ + OrderByAsc, + OrderByDesc, + } +} + const ( // PublishingStatusPendingVerification is a PublishingStatus enum value PublishingStatusPendingVerification = "PENDING_VERIFICATION" @@ -11656,6 +14546,16 @@ const ( PublishingStatusStopped = "STOPPED" ) +// PublishingStatus_Values returns all elements of the PublishingStatus enum +func PublishingStatus_Values() []string { + return []string{ + PublishingStatusPendingVerification, + PublishingStatusPublishing, + PublishingStatusUnableToPublishFixDestinationProperty, + PublishingStatusStopped, + } +} + const ( // ThreatIntelSetFormatTxt is a ThreatIntelSetFormat enum value ThreatIntelSetFormatTxt = "TXT" @@ -11676,6 +14576,18 @@ const ( ThreatIntelSetFormatFireEye = "FIRE_EYE" ) +// ThreatIntelSetFormat_Values returns all elements of the ThreatIntelSetFormat enum +func ThreatIntelSetFormat_Values() []string { + return []string{ + ThreatIntelSetFormatTxt, + ThreatIntelSetFormatStix, + ThreatIntelSetFormatOtxCsv, + ThreatIntelSetFormatAlienVault, + ThreatIntelSetFormatProofPoint, + ThreatIntelSetFormatFireEye, + } +} + const ( // ThreatIntelSetStatusInactive is a ThreatIntelSetStatus enum value ThreatIntelSetStatusInactive = "INACTIVE" @@ -11698,3 +14610,40 @@ const ( // ThreatIntelSetStatusDeleted is a ThreatIntelSetStatus enum value ThreatIntelSetStatusDeleted = "DELETED" ) + +// ThreatIntelSetStatus_Values returns all elements of the ThreatIntelSetStatus enum +func ThreatIntelSetStatus_Values() []string { + return []string{ + ThreatIntelSetStatusInactive, + ThreatIntelSetStatusActivating, + ThreatIntelSetStatusActive, + ThreatIntelSetStatusDeactivating, + ThreatIntelSetStatusError, + ThreatIntelSetStatusDeletePending, + ThreatIntelSetStatusDeleted, + } +} + +const ( + // UsageStatisticTypeSumByAccount is a UsageStatisticType enum value + UsageStatisticTypeSumByAccount = "SUM_BY_ACCOUNT" + + // UsageStatisticTypeSumByDataSource is a UsageStatisticType enum value + UsageStatisticTypeSumByDataSource = "SUM_BY_DATA_SOURCE" + + // UsageStatisticTypeSumByResource is a UsageStatisticType enum value + UsageStatisticTypeSumByResource = "SUM_BY_RESOURCE" + + // UsageStatisticTypeTopResources is a UsageStatisticType enum value + UsageStatisticTypeTopResources = "TOP_RESOURCES" +) + +// UsageStatisticType_Values returns all elements of the UsageStatisticType enum +func UsageStatisticType_Values() []string { + return []string{ + UsageStatisticTypeSumByAccount, + UsageStatisticTypeSumByDataSource, + UsageStatisticTypeSumByResource, + UsageStatisticTypeTopResources, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go b/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go index fcc8098df..c14c96412 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/guardduty/doc.go @@ -5,19 +5,22 @@ // // Amazon GuardDuty is a continuous security monitoring service that analyzes // and processes the following data sources: VPC Flow Logs, AWS CloudTrail event -// logs, and DNS logs. It uses threat intelligence feeds, such as lists of malicious -// IPs and domains, and machine learning to identify unexpected and potentially -// unauthorized and malicious activity within your AWS environment. This can +// logs, and DNS logs. It uses threat intelligence feeds (such as lists of malicious +// IPs and domains) and machine learning to identify unexpected, potentially +// unauthorized, and malicious activity within your AWS environment. This can // include issues like escalations of privileges, uses of exposed credentials, // or communication with malicious IPs, URLs, or domains. For example, GuardDuty -// can detect compromised EC2 instances serving malware or mining bitcoin. It -// also monitors AWS account access behavior for signs of compromise, such as -// unauthorized infrastructure deployments, like instances deployed in a region -// that has never been used, or unusual API calls, like a password policy change -// to reduce password strength. GuardDuty informs you of the status of your -// AWS environment by producing security findings that you can view in the GuardDuty -// console or through Amazon CloudWatch events. For more information, see Amazon -// GuardDuty User Guide (https://docs.aws.amazon.com/guardduty/latest/ug/what-is-guardduty.html). +// can detect compromised EC2 instances that serve malware or mine bitcoin. +// +// GuardDuty also monitors AWS account access behavior for signs of compromise. +// Some examples of this are unauthorized infrastructure deployments such as +// EC2 instances deployed in a Region that has never been used, or unusual API +// calls like a password policy change to reduce password strength. +// +// GuardDuty informs you of the status of your AWS environment by producing +// security findings that you can view in the GuardDuty console or through Amazon +// CloudWatch events. For more information, see the Amazon GuardDuty User Guide +// (https://docs.aws.amazon.com/guardduty/latest/ug/what-is-guardduty.html) . // // See https://docs.aws.amazon.com/goto/WebAPI/guardduty-2017-11-28 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go b/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go index 63b0c1f7d..8a4f92d1b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/guardduty/errors.go @@ -11,13 +11,13 @@ const ( // ErrCodeBadRequestException for service response error code // "BadRequestException". // - // Bad request exception object. + // A bad request exception object. ErrCodeBadRequestException = "BadRequestException" // ErrCodeInternalServerErrorException for service response error code // "InternalServerErrorException". // - // Internal server error exception object. + // An internal server error exception object. ErrCodeInternalServerErrorException = "InternalServerErrorException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go b/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go index 17a8f29ba..024e65573 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index a17131ac1..7c3a5dd0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -82,7 +82,8 @@ func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpen // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -156,10 +157,12 @@ func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInp // AddRoleToInstanceProfile API operation for AWS Identity and Access Management. // // Adds the specified IAM role to the specified instance profile. An instance -// profile can contain only one role, and this limit cannot be increased. You -// can remove the existing role and then add a different role to an instance -// profile. You must then wait for the change to appear across all of AWS because -// of eventual consistency (https://en.wikipedia.org/wiki/Eventual_consistency). +// profile can contain only one role. (The number and size of IAM resources +// in an AWS account are limited. For more information, see IAM and STS Quotas +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) +// in the IAM User Guide.) You can remove the existing role and then add a different +// role to an instance profile. You must then wait for the change to appear +// across all of AWS because of eventual consistency (https://en.wikipedia.org/wiki/Eventual_consistency). // To force the change, you must disassociate the instance profile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisassociateIamInstanceProfile.html) // and then associate the instance profile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html), // or you can stop your instance and then restart it. @@ -189,7 +192,8 @@ func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInp // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" // The request was rejected because only the service that depends on the service-linked @@ -284,7 +288,8 @@ func (c *IAM) AddUserToGroupRequest(input *AddUserToGroupInput) (req *request.Re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -380,7 +385,8 @@ func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *requ // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -488,7 +494,8 @@ func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -598,7 +605,8 @@ func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -704,7 +712,8 @@ func (c *IAM) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" // The request was rejected because it referenced an entity that is temporarily @@ -795,8 +804,8 @@ func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request. // to manage AWS account root user credentials. This is true even if the AWS // account has no associated users. // -// For information about limits on the number of keys you can create, see Limitations -// on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // To ensure the security of your AWS account, the secret access key is accessible @@ -819,7 +828,8 @@ func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request. // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -910,7 +920,8 @@ func (c *IAM) CreateAccountAliasRequest(input *CreateAccountAliasInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -984,8 +995,8 @@ func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, // // Creates a new group. // -// For information about the number of groups you can create, see Limitations -// on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -998,7 +1009,8 @@ func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -1081,8 +1093,8 @@ func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (r // Creates a new instance profile. For information about instance profiles, // go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). // -// For information about the number of instance profiles you can create, see -// Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1099,7 +1111,8 @@ func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (r // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -1198,7 +1211,8 @@ func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -1312,7 +1326,8 @@ func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProvi // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -1409,7 +1424,8 @@ func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -1524,7 +1540,8 @@ func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req * // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -1598,8 +1615,8 @@ func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, o // // Creates a new role for your AWS account. For more information about roles, // go to IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). -// For information about limitations on role names and the number of roles you -// can create, go to Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1612,7 +1629,8 @@ func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, o // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -1742,7 +1760,8 @@ func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -1840,7 +1859,8 @@ func (c *IAM) CreateServiceLinkedRoleRequest(input *CreateServiceLinkedRoleInput // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeNoSuchEntityException "NoSuchEntity" // The request was rejected because it referenced a resource entity that does @@ -1941,7 +1961,8 @@ func (c *IAM) CreateServiceSpecificCredentialRequest(input *CreateServiceSpecifi // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeNoSuchEntityException "NoSuchEntity" // The request was rejected because it referenced a resource entity that does @@ -2018,8 +2039,8 @@ func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, o // // Creates a new IAM user for your AWS account. // -// For information about limitations on the number of IAM users you can create, -// see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2032,7 +2053,8 @@ func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, o // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -2127,8 +2149,8 @@ func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) // go to Using a Virtual MFA Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) // in the IAM User Guide. // -// For information about limits on the number of MFA devices you can create, -// see Limitations on Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // The seed information contained in the QR code and the Base32 string should @@ -2147,7 +2169,8 @@ func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -2251,7 +2274,8 @@ func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req * // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2346,7 +2370,8 @@ func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request. // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2437,7 +2462,8 @@ func (c *IAM) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2526,7 +2552,8 @@ func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPol // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2620,7 +2647,8 @@ func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2715,7 +2743,8 @@ func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *requ // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2817,7 +2846,8 @@ func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (r // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -2918,7 +2948,8 @@ func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -3125,7 +3156,8 @@ func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -3230,7 +3262,8 @@ func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req * // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -3337,7 +3370,8 @@ func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, o // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" // The request was rejected because only the service that depends on the service-linked @@ -3538,7 +3572,8 @@ func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" // The request was rejected because only the service that depends on the service-linked @@ -3640,7 +3675,8 @@ func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeNoSuchEntityException "NoSuchEntity" // The request was rejected because it referenced a resource entity that does @@ -3839,7 +3875,8 @@ func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -3946,7 +3983,8 @@ func (c *IAM) DeleteServiceLinkedRoleRequest(input *DeleteServiceLinkedRoleInput // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -4122,7 +4160,8 @@ func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInp // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -4229,7 +4268,8 @@ func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, o // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeNoSuchEntityException "NoSuchEntity" // The request was rejected because it referenced a resource entity that does @@ -4426,7 +4466,8 @@ func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -4522,7 +4563,8 @@ func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -4616,7 +4658,8 @@ func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *requ // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -4714,7 +4757,8 @@ func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -4818,7 +4862,8 @@ func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *reques // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -4923,7 +4968,8 @@ func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request. // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeNoSuchEntityException "NoSuchEntity" // The request was rejected because it referenced a resource entity that does @@ -5013,7 +5059,8 @@ func (c *IAM) GenerateCredentialReportRequest(input *GenerateCredentialReportInp // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -5302,7 +5349,9 @@ func (c *IAM) GenerateServiceLastAccessedDetailsRequest(input *GenerateServiceLa // * GetServiceLastAccessedDetails – Use this operation for users, groups, // roles, or policies to list every AWS service that the resource could access // using permissions policies. For each service, the response includes information -// about the most recent access attempt. +// about the most recent access attempt. The JobId returned by GenerateServiceLastAccessedDetail +// must be used by the same role within a session, or by the same user when +// used to call GetServiceLastAccessedDetail. // // * GetServiceLastAccessedDetailsWithEntities – Use this operation for // groups and policies to list information about the associated entities @@ -5324,8 +5373,8 @@ func (c *IAM) GenerateServiceLastAccessedDetailsRequest(input *GenerateServiceLa // see Evaluating Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) // in the IAM User Guide. // -// For more information about service last accessed data, see Reducing Policy -// Scope by Viewing User Activity (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// For more information about service and action last accessed data, see Reducing +// Permissions Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5724,8 +5773,8 @@ func (c *IAM) GetAccountSummaryRequest(input *GetAccountSummaryInput) (req *requ // // Retrieves information about IAM entity usage and IAM quotas in the AWS account. // -// For information about limitations on IAM entities, see Limitations on IAM -// Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// The number and size of IAM resources in an AWS account are limited. For more +// information, see IAM and STS Quotas (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7408,6 +7457,15 @@ func (c *IAM) GetServiceLastAccessedDetailsRequest(input *GetServiceLastAccessed // // By default, the list is sorted by service namespace. // +// If you specified ACTION_LEVEL granularity when you generated the report, +// this operation returns service and action last accessed data. This includes +// the most recent access attempt for each tracked action within a service. +// Otherwise, this operation returns only service data. +// +// For more information about service and action last accessed data, see Reducing +// Permissions Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -11713,7 +11771,8 @@ func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Re // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error @@ -11939,7 +11998,8 @@ func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Requ // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error @@ -12157,7 +12217,8 @@ func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Requ // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" // The request was rejected because the policy document was malformed. The error @@ -12357,7 +12418,8 @@ func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstance // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" // The request was rejected because only the service that depends on the service-linked @@ -12452,7 +12514,8 @@ func (c *IAM) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) (req * // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -12633,7 +12696,8 @@ func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request. // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -12735,7 +12799,8 @@ func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -13290,7 +13355,8 @@ func (c *IAM) TagRoleRequest(input *TagRoleInput) (req *request.Request, output // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -13420,7 +13486,8 @@ func (c *IAM) TagUserRequest(input *TagUserInput) (req *request.Request, output // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeInvalidInputException "InvalidInput" // The request was rejected because an invalid or out-of-range value was supplied @@ -13713,7 +13780,8 @@ func (c *IAM) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) (req *request. // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -13817,7 +13885,8 @@ func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPol // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -13913,7 +13982,8 @@ func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" // The request was rejected because only the service that depends on the service-linked @@ -14023,7 +14093,8 @@ func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -14126,7 +14197,8 @@ func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -14507,7 +14579,8 @@ func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *re // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -14709,7 +14782,8 @@ func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -14890,7 +14964,8 @@ func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInp // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeServiceFailureException "ServiceFailure" // The request processing has failed because of an unknown error, exception @@ -14990,7 +15065,8 @@ func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, o // // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -15095,7 +15171,8 @@ func (c *IAM) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) (req *re // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeNoSuchEntityException "NoSuchEntity" // The request was rejected because it referenced a resource entity that does @@ -15216,7 +15293,8 @@ func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -15329,7 +15407,8 @@ func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInp // Returned Error Codes: // * ErrCodeLimitExceededException "LimitExceeded" // The request was rejected because it attempted to create resources beyond -// the current AWS account limits. The error message describes the limit exceeded. +// the current AWS account limitations. The error message describes the limit +// exceeded. // // * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" // The request was rejected because it attempted to create a resource that already @@ -16341,7 +16420,7 @@ func (s ChangePasswordOutput) GoString() string { // evaluating the Condition elements of the input policies. // // This data type is used as an input parameter to SimulateCustomPolicy and -// SimulatePrincipalPolicy . +// SimulatePrincipalPolicy. type ContextEntry struct { _ struct{} `type:"structure"` @@ -20344,6 +20423,14 @@ type GenerateServiceLastAccessedDetailsInput struct { // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` + + // The level of detail that you want to generate. You can specify whether you + // want to generate information about the last attempt to access services or + // actions. If you specify service-level granularity, this operation generates + // only service data. If you specify action-level granularity, it generates + // service and action data. If you don't include this optional parameter, the + // operation generates service data. + Granularity *string `type:"string" enum:"AccessAdvisorUsageGranularityType"` } // String returns the string representation @@ -20378,11 +20465,19 @@ func (s *GenerateServiceLastAccessedDetailsInput) SetArn(v string) *GenerateServ return s } +// SetGranularity sets the Granularity field's value. +func (s *GenerateServiceLastAccessedDetailsInput) SetGranularity(v string) *GenerateServiceLastAccessedDetailsInput { + s.Granularity = &v + return s +} + type GenerateServiceLastAccessedDetailsOutput struct { _ struct{} `type:"structure"` - // The job ID that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities - // operations. + // The JobId that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities + // operations. The JobId returned by GenerateServiceLastAccessedDetail must + // be used by the same role within a session, or by the same user when used + // to call GetServiceLastAccessedDetail. JobId *string `min:"36" type:"string"` } @@ -22245,7 +22340,9 @@ type GetServiceLastAccessedDetailsInput struct { _ struct{} `type:"structure"` // The ID of the request generated by the GenerateServiceLastAccessedDetails - // operation. + // operation. The JobId returned by GenerateServiceLastAccessedDetail must be + // used by the same role within a session, or by the same user when used to + // call GetServiceLastAccessedDetail. // // JobId is a required field JobId *string `min:"36" type:"string" required:"true"` @@ -22352,6 +22449,11 @@ type GetServiceLastAccessedDetailsOutput struct { // JobStatus is a required field JobStatus *string `type:"string" required:"true" enum:"JobStatusType"` + // The type of job. Service jobs return information about when each service + // was last accessed. Action jobs also include information about when tracked + // actions within the service were last accessed. + JobType *string `type:"string" enum:"AccessAdvisorUsageGranularityType"` + // When IsTruncated is true, this element is present and contains the value // to use for the Marker parameter in a subsequent pagination request. Marker *string `type:"string"` @@ -22403,6 +22505,12 @@ func (s *GetServiceLastAccessedDetailsOutput) SetJobStatus(v string) *GetService return s } +// SetJobType sets the JobType field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetJobType(v string) *GetServiceLastAccessedDetailsOutput { + s.JobType = &v + return s +} + // SetMarker sets the Marker field's value. func (s *GetServiceLastAccessedDetailsOutput) SetMarker(v string) *GetServiceLastAccessedDetailsOutput { s.Marker = &v @@ -29456,6 +29564,13 @@ type ServiceLastAccessed struct { // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). LastAuthenticatedEntity *string `min:"20" type:"string"` + // The Region from which the authenticated entity (user or role) last attempted + // to access the service. AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticatedRegion *string `type:"string"` + // The name of the service in which access was attempted. // // ServiceName is a required field @@ -29480,6 +29595,16 @@ type ServiceLastAccessed struct { // This field is null if no principals attempted to access the service within // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). TotalAuthenticatedEntities *int64 `type:"integer"` + + // An object that contains details about the most recent attempt to access a + // tracked action within the service. + // + // This field is null if there no tracked actions or if the principal did not + // use the tracked actions within the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + // This field is also null if the report was generated at the service level + // and not the action level. For more information, see the Granularity field + // in GenerateServiceLastAccessedDetails. + TrackedActionsLastAccessed []*TrackedActionLastAccessed `type:"list"` } // String returns the string representation @@ -29504,6 +29629,12 @@ func (s *ServiceLastAccessed) SetLastAuthenticatedEntity(v string) *ServiceLastA return s } +// SetLastAuthenticatedRegion sets the LastAuthenticatedRegion field's value. +func (s *ServiceLastAccessed) SetLastAuthenticatedRegion(v string) *ServiceLastAccessed { + s.LastAuthenticatedRegion = &v + return s +} + // SetServiceName sets the ServiceName field's value. func (s *ServiceLastAccessed) SetServiceName(v string) *ServiceLastAccessed { s.ServiceName = &v @@ -29522,6 +29653,12 @@ func (s *ServiceLastAccessed) SetTotalAuthenticatedEntities(v int64) *ServiceLas return s } +// SetTrackedActionsLastAccessed sets the TrackedActionsLastAccessed field's value. +func (s *ServiceLastAccessed) SetTrackedActionsLastAccessed(v []*TrackedActionLastAccessed) *ServiceLastAccessed { + s.TrackedActionsLastAccessed = v + return s +} + // Contains the details of a service-specific credential. type ServiceSpecificCredential struct { _ struct{} `type:"structure"` @@ -30307,7 +30444,7 @@ type SimulatePrincipalPolicyInput struct { // one permissions boundary when you pass a policy to this operation. An IAM // entity can only have one permissions boundary in effect at a time. For example, // if a permissions boundary is attached to an entity and you pass in a different - // permissions boundary policy using this parameter, then the new permission + // permissions boundary policy using this parameter, then the new permissions // boundary policy is used for the simulation. For more information about permissions // boundaries, see Permissions Boundaries for IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) // in the IAM User Guide. The policy input is specified as a string containing @@ -30851,6 +30988,75 @@ func (s TagUserOutput) GoString() string { return s.String() } +// Contains details about the most recent attempt to access an action within +// the service. +// +// This data type is used as a response element in the GetServiceLastAccessedDetails +// operation. +type TrackedActionLastAccessed struct { + _ struct{} `type:"structure"` + + // The name of the tracked action to which access was attempted. Tracked actions + // are actions that report activity to IAM. + ActionName *string `type:"string"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + LastAccessedEntity *string `min:"20" type:"string"` + + // The Region from which the authenticated entity (user or role) last attempted + // to access the tracked action. AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAccessedRegion *string `type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when an authenticated entity most recently attempted to access the tracked + // service. AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAccessedTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s TrackedActionLastAccessed) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TrackedActionLastAccessed) GoString() string { + return s.String() +} + +// SetActionName sets the ActionName field's value. +func (s *TrackedActionLastAccessed) SetActionName(v string) *TrackedActionLastAccessed { + s.ActionName = &v + return s +} + +// SetLastAccessedEntity sets the LastAccessedEntity field's value. +func (s *TrackedActionLastAccessed) SetLastAccessedEntity(v string) *TrackedActionLastAccessed { + s.LastAccessedEntity = &v + return s +} + +// SetLastAccessedRegion sets the LastAccessedRegion field's value. +func (s *TrackedActionLastAccessed) SetLastAccessedRegion(v string) *TrackedActionLastAccessed { + s.LastAccessedRegion = &v + return s +} + +// SetLastAccessedTime sets the LastAccessedTime field's value. +func (s *TrackedActionLastAccessed) SetLastAccessedTime(v time.Time) *TrackedActionLastAccessed { + s.LastAccessedTime = &v + return s +} + type UntagRoleInput struct { _ struct{} `type:"structure"` @@ -33060,6 +33266,22 @@ func (s *VirtualMFADevice) SetUser(v *User) *VirtualMFADevice { return s } +const ( + // AccessAdvisorUsageGranularityTypeServiceLevel is a AccessAdvisorUsageGranularityType enum value + AccessAdvisorUsageGranularityTypeServiceLevel = "SERVICE_LEVEL" + + // AccessAdvisorUsageGranularityTypeActionLevel is a AccessAdvisorUsageGranularityType enum value + AccessAdvisorUsageGranularityTypeActionLevel = "ACTION_LEVEL" +) + +// AccessAdvisorUsageGranularityType_Values returns all elements of the AccessAdvisorUsageGranularityType enum +func AccessAdvisorUsageGranularityType_Values() []string { + return []string{ + AccessAdvisorUsageGranularityTypeServiceLevel, + AccessAdvisorUsageGranularityTypeActionLevel, + } +} + const ( // AssignmentStatusTypeAssigned is a AssignmentStatusType enum value AssignmentStatusTypeAssigned = "Assigned" @@ -33071,6 +33293,15 @@ const ( AssignmentStatusTypeAny = "Any" ) +// AssignmentStatusType_Values returns all elements of the AssignmentStatusType enum +func AssignmentStatusType_Values() []string { + return []string{ + AssignmentStatusTypeAssigned, + AssignmentStatusTypeUnassigned, + AssignmentStatusTypeAny, + } +} + const ( // ContextKeyTypeEnumString is a ContextKeyTypeEnum enum value ContextKeyTypeEnumString = "string" @@ -33109,6 +33340,24 @@ const ( ContextKeyTypeEnumDateList = "dateList" ) +// ContextKeyTypeEnum_Values returns all elements of the ContextKeyTypeEnum enum +func ContextKeyTypeEnum_Values() []string { + return []string{ + ContextKeyTypeEnumString, + ContextKeyTypeEnumStringList, + ContextKeyTypeEnumNumeric, + ContextKeyTypeEnumNumericList, + ContextKeyTypeEnumBoolean, + ContextKeyTypeEnumBooleanList, + ContextKeyTypeEnumIp, + ContextKeyTypeEnumIpList, + ContextKeyTypeEnumBinary, + ContextKeyTypeEnumBinaryList, + ContextKeyTypeEnumDate, + ContextKeyTypeEnumDateList, + } +} + const ( // DeletionTaskStatusTypeSucceeded is a DeletionTaskStatusType enum value DeletionTaskStatusTypeSucceeded = "SUCCEEDED" @@ -33123,6 +33372,16 @@ const ( DeletionTaskStatusTypeNotStarted = "NOT_STARTED" ) +// DeletionTaskStatusType_Values returns all elements of the DeletionTaskStatusType enum +func DeletionTaskStatusType_Values() []string { + return []string{ + DeletionTaskStatusTypeSucceeded, + DeletionTaskStatusTypeInProgress, + DeletionTaskStatusTypeFailed, + DeletionTaskStatusTypeNotStarted, + } +} + const ( // EncodingTypeSsh is a EncodingType enum value EncodingTypeSsh = "SSH" @@ -33131,6 +33390,14 @@ const ( EncodingTypePem = "PEM" ) +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeSsh, + EncodingTypePem, + } +} + const ( // EntityTypeUser is a EntityType enum value EntityTypeUser = "User" @@ -33148,6 +33415,17 @@ const ( EntityTypeAwsmanagedPolicy = "AWSManagedPolicy" ) +// EntityType_Values returns all elements of the EntityType enum +func EntityType_Values() []string { + return []string{ + EntityTypeUser, + EntityTypeRole, + EntityTypeGroup, + EntityTypeLocalManagedPolicy, + EntityTypeAwsmanagedPolicy, + } +} + const ( // GlobalEndpointTokenVersionV1token is a GlobalEndpointTokenVersion enum value GlobalEndpointTokenVersionV1token = "v1Token" @@ -33156,6 +33434,14 @@ const ( GlobalEndpointTokenVersionV2token = "v2Token" ) +// GlobalEndpointTokenVersion_Values returns all elements of the GlobalEndpointTokenVersion enum +func GlobalEndpointTokenVersion_Values() []string { + return []string{ + GlobalEndpointTokenVersionV1token, + GlobalEndpointTokenVersionV2token, + } +} + const ( // JobStatusTypeInProgress is a JobStatusType enum value JobStatusTypeInProgress = "IN_PROGRESS" @@ -33167,11 +33453,27 @@ const ( JobStatusTypeFailed = "FAILED" ) +// JobStatusType_Values returns all elements of the JobStatusType enum +func JobStatusType_Values() []string { + return []string{ + JobStatusTypeInProgress, + JobStatusTypeCompleted, + JobStatusTypeFailed, + } +} + const ( // PermissionsBoundaryAttachmentTypePermissionsBoundaryPolicy is a PermissionsBoundaryAttachmentType enum value PermissionsBoundaryAttachmentTypePermissionsBoundaryPolicy = "PermissionsBoundaryPolicy" ) +// PermissionsBoundaryAttachmentType_Values returns all elements of the PermissionsBoundaryAttachmentType enum +func PermissionsBoundaryAttachmentType_Values() []string { + return []string{ + PermissionsBoundaryAttachmentTypePermissionsBoundaryPolicy, + } +} + const ( // PolicyEvaluationDecisionTypeAllowed is a PolicyEvaluationDecisionType enum value PolicyEvaluationDecisionTypeAllowed = "allowed" @@ -33183,6 +33485,15 @@ const ( PolicyEvaluationDecisionTypeImplicitDeny = "implicitDeny" ) +// PolicyEvaluationDecisionType_Values returns all elements of the PolicyEvaluationDecisionType enum +func PolicyEvaluationDecisionType_Values() []string { + return []string{ + PolicyEvaluationDecisionTypeAllowed, + PolicyEvaluationDecisionTypeExplicitDeny, + PolicyEvaluationDecisionTypeImplicitDeny, + } +} + const ( // PolicyOwnerEntityTypeUser is a PolicyOwnerEntityType enum value PolicyOwnerEntityTypeUser = "USER" @@ -33194,6 +33505,15 @@ const ( PolicyOwnerEntityTypeGroup = "GROUP" ) +// PolicyOwnerEntityType_Values returns all elements of the PolicyOwnerEntityType enum +func PolicyOwnerEntityType_Values() []string { + return []string{ + PolicyOwnerEntityTypeUser, + PolicyOwnerEntityTypeRole, + PolicyOwnerEntityTypeGroup, + } +} + const ( // PolicyScopeTypeAll is a PolicyScopeType enum value PolicyScopeTypeAll = "All" @@ -33205,6 +33525,15 @@ const ( PolicyScopeTypeLocal = "Local" ) +// PolicyScopeType_Values returns all elements of the PolicyScopeType enum +func PolicyScopeType_Values() []string { + return []string{ + PolicyScopeTypeAll, + PolicyScopeTypeAws, + PolicyScopeTypeLocal, + } +} + const ( // PolicySourceTypeUser is a PolicySourceType enum value PolicySourceTypeUser = "user" @@ -33228,6 +33557,19 @@ const ( PolicySourceTypeNone = "none" ) +// PolicySourceType_Values returns all elements of the PolicySourceType enum +func PolicySourceType_Values() []string { + return []string{ + PolicySourceTypeUser, + PolicySourceTypeGroup, + PolicySourceTypeRole, + PolicySourceTypeAwsManaged, + PolicySourceTypeUserManaged, + PolicySourceTypeResource, + PolicySourceTypeNone, + } +} + const ( // PolicyTypeInline is a PolicyType enum value PolicyTypeInline = "INLINE" @@ -33236,6 +33578,14 @@ const ( PolicyTypeManaged = "MANAGED" ) +// PolicyType_Values returns all elements of the PolicyType enum +func PolicyType_Values() []string { + return []string{ + PolicyTypeInline, + PolicyTypeManaged, + } +} + // The policy usage type that indicates whether the policy is used as a permissions // policy or as the permissions boundary for an entity. // @@ -33250,11 +33600,26 @@ const ( PolicyUsageTypePermissionsBoundary = "PermissionsBoundary" ) +// PolicyUsageType_Values returns all elements of the PolicyUsageType enum +func PolicyUsageType_Values() []string { + return []string{ + PolicyUsageTypePermissionsPolicy, + PolicyUsageTypePermissionsBoundary, + } +} + const ( // ReportFormatTypeTextCsv is a ReportFormatType enum value ReportFormatTypeTextCsv = "text/csv" ) +// ReportFormatType_Values returns all elements of the ReportFormatType enum +func ReportFormatType_Values() []string { + return []string{ + ReportFormatTypeTextCsv, + } +} + const ( // ReportStateTypeStarted is a ReportStateType enum value ReportStateTypeStarted = "STARTED" @@ -33266,6 +33631,15 @@ const ( ReportStateTypeComplete = "COMPLETE" ) +// ReportStateType_Values returns all elements of the ReportStateType enum +func ReportStateType_Values() []string { + return []string{ + ReportStateTypeStarted, + ReportStateTypeInprogress, + ReportStateTypeComplete, + } +} + const ( // SortKeyTypeServiceNamespaceAscending is a SortKeyType enum value SortKeyTypeServiceNamespaceAscending = "SERVICE_NAMESPACE_ASCENDING" @@ -33280,6 +33654,16 @@ const ( SortKeyTypeLastAuthenticatedTimeDescending = "LAST_AUTHENTICATED_TIME_DESCENDING" ) +// SortKeyType_Values returns all elements of the SortKeyType enum +func SortKeyType_Values() []string { + return []string{ + SortKeyTypeServiceNamespaceAscending, + SortKeyTypeServiceNamespaceDescending, + SortKeyTypeLastAuthenticatedTimeAscending, + SortKeyTypeLastAuthenticatedTimeDescending, + } +} + const ( // StatusTypeActive is a StatusType enum value StatusTypeActive = "Active" @@ -33288,6 +33672,14 @@ const ( StatusTypeInactive = "Inactive" ) +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypeActive, + StatusTypeInactive, + } +} + const ( // SummaryKeyTypeUsers is a SummaryKeyType enum value SummaryKeyTypeUsers = "Users" @@ -33367,3 +33759,35 @@ const ( // SummaryKeyTypeGlobalEndpointTokenVersion is a SummaryKeyType enum value SummaryKeyTypeGlobalEndpointTokenVersion = "GlobalEndpointTokenVersion" ) + +// SummaryKeyType_Values returns all elements of the SummaryKeyType enum +func SummaryKeyType_Values() []string { + return []string{ + SummaryKeyTypeUsers, + SummaryKeyTypeUsersQuota, + SummaryKeyTypeGroups, + SummaryKeyTypeGroupsQuota, + SummaryKeyTypeServerCertificates, + SummaryKeyTypeServerCertificatesQuota, + SummaryKeyTypeUserPolicySizeQuota, + SummaryKeyTypeGroupPolicySizeQuota, + SummaryKeyTypeGroupsPerUserQuota, + SummaryKeyTypeSigningCertificatesPerUserQuota, + SummaryKeyTypeAccessKeysPerUserQuota, + SummaryKeyTypeMfadevices, + SummaryKeyTypeMfadevicesInUse, + SummaryKeyTypeAccountMfaenabled, + SummaryKeyTypeAccountAccessKeysPresent, + SummaryKeyTypeAccountSigningCertificatesPresent, + SummaryKeyTypeAttachedPoliciesPerGroupQuota, + SummaryKeyTypeAttachedPoliciesPerRoleQuota, + SummaryKeyTypeAttachedPoliciesPerUserQuota, + SummaryKeyTypePolicies, + SummaryKeyTypePoliciesQuota, + SummaryKeyTypePolicySizeQuota, + SummaryKeyTypePolicyVersionsInUse, + SummaryKeyTypePolicyVersionsInUseQuota, + SummaryKeyTypeVersionsPerPolicyQuota, + SummaryKeyTypeGlobalEndpointTokenVersion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go b/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go index 0d709cd27..13f89fa19 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go @@ -3,59 +3,12 @@ // Package iam provides the client and types for making API // requests to AWS Identity and Access Management. // -// AWS Identity and Access Management (IAM) is a web service that you can use -// to manage users and user permissions under your AWS account. This guide provides -// descriptions of IAM actions that you can call programmatically. For general -// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). -// For the user guide for IAM, see Using IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/). -// -// AWS provides SDKs that consist of libraries and sample code for various programming -// languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs -// provide a convenient way to create programmatic access to IAM and AWS. For -// example, the SDKs take care of tasks such as cryptographically signing requests -// (see below), managing errors, and retrying requests automatically. For information -// about the AWS SDKs, including how to download and install them, see the Tools -// for Amazon Web Services (http://aws.amazon.com/tools/) page. -// -// We recommend that you use the AWS SDKs to make programmatic API calls to -// IAM. However, you can also use the IAM Query API to make direct calls to -// the IAM web service. To learn more about the IAM Query API, see Making Query -// Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in the Using IAM guide. IAM supports GET and POST requests for all actions. -// That is, the API does not require you to use GET for some actions and POST -// for others. However, GET requests are subject to the limitation size of a -// URL. Therefore, for operations that require larger sizes, use a POST request. -// -// Signing Requests -// -// Requests must be signed using an access key ID and a secret access key. We -// strongly recommend that you do not use your AWS account access key ID and -// secret access key for everyday work with IAM. You can use the access key -// ID and secret access key for an IAM user or you can use the AWS Security -// Token Service to generate temporary security credentials and use those to -// sign requests. -// -// To sign requests, we recommend that you use Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// If you have an existing application that uses Signature Version 2, you do -// not have to update it to use Signature Version 4. However, some operations -// now require Signature Version 4. The documentation for operations that require -// version 4 indicate this requirement. -// -// Additional Resources -// -// For more information, see the following: -// -// * AWS Security Credentials (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). -// This topic provides general information about the types of credentials -// used for accessing AWS. -// -// * IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). -// This topic presents a list of suggestions for using the IAM service to -// help secure your AWS resources. -// -// * Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). -// This set of topics walk you through the process of signing a request using -// an access key ID and secret access key. +// AWS Identity and Access Management (IAM) is a web service for securely controlling +// access to AWS services. With IAM, you can centrally manage users, security +// credentials such as access keys, and permissions that control which AWS resources +// users and applications can access. For more information about IAM, see AWS +// Identity and Access Management (IAM) (http://aws.amazon.com/iam/) and the +// AWS Identity and Access Management User Guide (https://docs.aws.amazon.com/IAM/latest/UserGuide/). // // See https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go b/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go index 30a85b3b4..74afac25e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go @@ -117,7 +117,8 @@ const ( // "LimitExceeded". // // The request was rejected because it attempted to create resources beyond - // the current AWS account limits. The error message describes the limit exceeded. + // the current AWS account limitations. The error message describes the limit + // exceeded. ErrCodeLimitExceededException = "LimitExceeded" // ErrCodeMalformedCertificateException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go index 816d3e3df..6e5d37139 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/api.go b/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/api.go index 02e78844c..6c57feba8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/api.go @@ -207,6 +207,10 @@ func (c *Imagebuilder) CreateComponentRequest(input *CreateComponentInput) (req // You have specified two or more mutually exclusive parameters. Review the // error message for details. // +// * ServiceQuotaExceededException +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +// // See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/CreateComponent func (c *Imagebuilder) CreateComponent(input *CreateComponentInput) (*CreateComponentOutput, error) { req, out := c.CreateComponentRequest(input) @@ -319,6 +323,10 @@ func (c *Imagebuilder) CreateDistributionConfigurationRequest(input *CreateDistr // You have specified two or more mutually exclusive parameters. Review the // error message for details. // +// * ServiceQuotaExceededException +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +// // See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/CreateDistributionConfiguration func (c *Imagebuilder) CreateDistributionConfiguration(input *CreateDistributionConfigurationInput) (*CreateDistributionConfigurationOutput, error) { req, out := c.CreateDistributionConfigurationRequest(input) @@ -424,6 +432,10 @@ func (c *Imagebuilder) CreateImageRequest(input *CreateImageInput) (req *request // The resource that you are trying to operate on is currently in use. Review // the message details and retry later. // +// * ServiceQuotaExceededException +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +// // See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/CreateImage func (c *Imagebuilder) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { req, out := c.CreateImageRequest(input) @@ -532,6 +544,10 @@ func (c *Imagebuilder) CreateImagePipelineRequest(input *CreateImagePipelineInpu // * ResourceAlreadyExistsException // The resource that you are trying to create already exists. // +// * ServiceQuotaExceededException +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +// // See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/CreateImagePipeline func (c *Imagebuilder) CreateImagePipeline(input *CreateImagePipelineInput) (*CreateImagePipelineOutput, error) { req, out := c.CreateImagePipelineRequest(input) @@ -643,6 +659,10 @@ func (c *Imagebuilder) CreateImageRecipeRequest(input *CreateImageRecipeInput) ( // * ResourceAlreadyExistsException // The resource that you are trying to create already exists. // +// * ServiceQuotaExceededException +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +// // See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/CreateImageRecipe func (c *Imagebuilder) CreateImageRecipe(input *CreateImageRecipeInput) (*CreateImageRecipeOutput, error) { req, out := c.CreateImageRecipeRequest(input) @@ -751,6 +771,10 @@ func (c *Imagebuilder) CreateInfrastructureConfigurationRequest(input *CreateInf // * ResourceAlreadyExistsException // The resource that you are trying to create already exists. // +// * ServiceQuotaExceededException +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +// // See also, https://docs.aws.amazon.com/goto/WebAPI/imagebuilder-2019-12-02/CreateInfrastructureConfiguration func (c *Imagebuilder) CreateInfrastructureConfiguration(input *CreateInfrastructureConfigurationInput) (*CreateInfrastructureConfigurationOutput, error) { req, out := c.CreateInfrastructureConfigurationRequest(input) @@ -2863,7 +2887,7 @@ func (c *Imagebuilder) ListImageBuildVersionsRequest(input *ListImageBuildVersio // ListImageBuildVersions API operation for EC2 Image Builder. // -// Returns a list of distribution configurations. +// Returns a list of image build versions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3494,7 +3518,7 @@ func (c *Imagebuilder) ListImagesRequest(input *ListImagesInput) (req *request.R // ListImages API operation for EC2 Image Builder. // -// Returns the list of image build versions for the specified semantic version. +// Returns the list of images that you have access to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3888,7 +3912,12 @@ func (c *Imagebuilder) PutComponentPolicyRequest(input *PutComponentPolicyInput) // PutComponentPolicy API operation for EC2 Image Builder. // -// Applies a policy to a component. +// Applies a policy to a component. We recommend that you call the RAM API CreateResourceShare +// (https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html) +// to share resources. If you call the Image Builder API PutComponentPolicy, +// you must also call the RAM API PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html) +// in order for the resource to be visible to all principals with whom the resource +// is shared. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3990,7 +4019,12 @@ func (c *Imagebuilder) PutImagePolicyRequest(input *PutImagePolicyInput) (req *r // PutImagePolicy API operation for EC2 Image Builder. // -// Applies a policy to an image. +// Applies a policy to an image. We recommend that you call the RAM API CreateResourceShare +// (https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html) +// to share resources. If you call the Image Builder API PutImagePolicy, you +// must also call the RAM API PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html) +// in order for the resource to be visible to all principals with whom the resource +// is shared. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4092,7 +4126,12 @@ func (c *Imagebuilder) PutImageRecipePolicyRequest(input *PutImageRecipePolicyIn // PutImageRecipePolicy API operation for EC2 Image Builder. // -// Applies a policy to an image recipe. +// Applies a policy to an image recipe. We recommend that you call the RAM API +// CreateResourceShare (https://docs.aws.amazon.com/ram/latest/APIReference/API_CreateResourceShare.html) +// to share resources. If you call the Image Builder API PutImageRecipePolicy, +// you must also call the RAM API PromoteResourceShareCreatedFromPolicy (https://docs.aws.amazon.com/ram/latest/APIReference/API_PromoteResourceShareCreatedFromPolicy.html) +// in order for the resource to be visible to all principals with whom the resource +// is shared. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4754,6 +4793,9 @@ func (c *Imagebuilder) UpdateInfrastructureConfigurationWithContext(ctx aws.Cont type Ami struct { _ struct{} `type:"structure"` + // The account ID of the owner of the AMI. + AccountId *string `locationName:"accountId" min:"1" type:"string"` + // The description of the EC2 AMI. Description *string `locationName:"description" min:"1" type:"string"` @@ -4780,6 +4822,12 @@ func (s Ami) GoString() string { return s.String() } +// SetAccountId sets the AccountId field's value. +func (s *Ami) SetAccountId(v string) *Ami { + s.AccountId = &v + return s +} + // SetDescription sets the Description field's value. func (s *Ami) SetDescription(v string) *Ami { s.Description = &v @@ -4820,12 +4868,18 @@ type AmiDistributionConfiguration struct { // The description of the distribution configuration. Description *string `locationName:"description" min:"1" type:"string"` + // The KMS key identifier used to encrypt the distributed image. + KmsKeyId *string `locationName:"kmsKeyId" min:"1" type:"string"` + // Launch permissions can be used to configure which AWS accounts can use the // AMI to launch instances. LaunchPermission *LaunchPermissionConfiguration `locationName:"launchPermission" type:"structure"` // The name of the distribution configuration. Name *string `locationName:"name" min:"1" type:"string"` + + // The ID of an account to which you want to distribute an image. + TargetAccountIds []*string `locationName:"targetAccountIds" min:"1" type:"list"` } // String returns the string representation @@ -4847,9 +4901,20 @@ func (s *AmiDistributionConfiguration) Validate() error { if s.Description != nil && len(*s.Description) < 1 { invalidParams.Add(request.NewErrParamMinLen("Description", 1)) } + if s.KmsKeyId != nil && len(*s.KmsKeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyId", 1)) + } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.TargetAccountIds != nil && len(s.TargetAccountIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TargetAccountIds", 1)) + } + if s.LaunchPermission != nil { + if err := s.LaunchPermission.Validate(); err != nil { + invalidParams.AddNested("LaunchPermission", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4869,6 +4934,12 @@ func (s *AmiDistributionConfiguration) SetDescription(v string) *AmiDistribution return s } +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AmiDistributionConfiguration) SetKmsKeyId(v string) *AmiDistributionConfiguration { + s.KmsKeyId = &v + return s +} + // SetLaunchPermission sets the LaunchPermission field's value. func (s *AmiDistributionConfiguration) SetLaunchPermission(v *LaunchPermissionConfiguration) *AmiDistributionConfiguration { s.LaunchPermission = v @@ -4881,10 +4952,16 @@ func (s *AmiDistributionConfiguration) SetName(v string) *AmiDistributionConfigu return s } +// SetTargetAccountIds sets the TargetAccountIds field's value. +func (s *AmiDistributionConfiguration) SetTargetAccountIds(v []*string) *AmiDistributionConfiguration { + s.TargetAccountIds = v + return s +} + // You have exceeded the permitted request rate for the specific operation. type CallRateLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4901,17 +4978,17 @@ func (s CallRateLimitExceededException) GoString() string { func newErrorCallRateLimitExceededException(v protocol.ResponseMetadata) error { return &CallRateLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CallRateLimitExceededException) Code() string { +func (s *CallRateLimitExceededException) Code() string { return "CallRateLimitExceededException" } // Message returns the exception's message. -func (s CallRateLimitExceededException) Message() string { +func (s *CallRateLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4919,22 +4996,22 @@ func (s CallRateLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CallRateLimitExceededException) OrigErr() error { +func (s *CallRateLimitExceededException) OrigErr() error { return nil } -func (s CallRateLimitExceededException) Error() string { +func (s *CallRateLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CallRateLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CallRateLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CallRateLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CallRateLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type CancelImageCreationInput struct { @@ -5032,8 +5109,8 @@ func (s *CancelImageCreationOutput) SetRequestId(v string) *CancelImageCreationO // or resource on behalf of a user that doesn't have permissions to use the // action or resource, or specifying an invalid resource identifier. type ClientException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5050,17 +5127,17 @@ func (s ClientException) GoString() string { func newErrorClientException(v protocol.ResponseMetadata) error { return &ClientException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientException) Code() string { +func (s *ClientException) Code() string { return "ClientException" } // Message returns the exception's message. -func (s ClientException) Message() string { +func (s *ClientException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5068,22 +5145,22 @@ func (s ClientException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientException) OrigErr() error { +func (s *ClientException) OrigErr() error { return nil } -func (s ClientException) Error() string { +func (s *ClientException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientException) RequestID() string { + return s.RespMetadata.RequestID } // A detailed view of a component. @@ -5120,6 +5197,11 @@ type Component struct { // The platform of the component. Platform *string `locationName:"platform" type:"string" enum:"Platform"` + // The operating system (OS) version supported by the component. If the OS information + // is available, a prefix match is performed against the parent image OS version + // during image recipe creation. + SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` + // The tags associated with the component. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` @@ -5201,6 +5283,12 @@ func (s *Component) SetPlatform(v string) *Component { return s } +// SetSupportedOsVersions sets the SupportedOsVersions field's value. +func (s *Component) SetSupportedOsVersions(v []*string) *Component { + s.SupportedOsVersions = v + return s +} + // SetTags sets the Tags field's value. func (s *Component) SetTags(v map[string]*string) *Component { s.Tags = v @@ -5283,6 +5371,11 @@ type ComponentSummary struct { // The platform of the component. Platform *string `locationName:"platform" type:"string" enum:"Platform"` + // The operating system (OS) version supported by the component. If the OS information + // is available, a prefix match is performed against the parent image OS version + // during image recipe creation. + SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` + // The tags associated with the component. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` @@ -5346,6 +5439,12 @@ func (s *ComponentSummary) SetPlatform(v string) *ComponentSummary { return s } +// SetSupportedOsVersions sets the SupportedOsVersions field's value. +func (s *ComponentSummary) SetSupportedOsVersions(v []*string) *ComponentSummary { + s.SupportedOsVersions = v + return s +} + // SetTags sets the Tags field's value. func (s *ComponentSummary) SetTags(v map[string]*string) *ComponentSummary { s.Tags = v @@ -5386,6 +5485,11 @@ type ComponentVersion struct { // The platform of the component. Platform *string `locationName:"platform" type:"string" enum:"Platform"` + // The operating system (OS) version supported by the component. If the OS information + // is available, a prefix match is performed against the parent image OS version + // during image recipe creation. + SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` + // The type of the component denotes whether the component is used to build // the image or only to test it. Type *string `locationName:"type" type:"string" enum:"ComponentType"` @@ -5440,6 +5544,12 @@ func (s *ComponentVersion) SetPlatform(v string) *ComponentVersion { return s } +// SetSupportedOsVersions sets the SupportedOsVersions field's value. +func (s *ComponentVersion) SetSupportedOsVersions(v []*string) *ComponentVersion { + s.SupportedOsVersions = v + return s +} + // SetType sets the Type field's value. func (s *ComponentVersion) SetType(v string) *ComponentVersion { s.Type = &v @@ -5490,6 +5600,11 @@ type CreateComponentInput struct { // SemanticVersion is a required field SemanticVersion *string `locationName:"semanticVersion" type:"string" required:"true"` + // The operating system (OS) version supported by the component. If the OS information + // is available, a prefix match is performed against the parent image OS version + // during image recipe creation. + SupportedOsVersions []*string `locationName:"supportedOsVersions" min:"1" type:"list"` + // The tags of the component. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` @@ -5537,6 +5652,9 @@ func (s *CreateComponentInput) Validate() error { if s.SemanticVersion == nil { invalidParams.Add(request.NewErrParamRequired("SemanticVersion")) } + if s.SupportedOsVersions != nil && len(s.SupportedOsVersions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SupportedOsVersions", 1)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } @@ -5595,6 +5713,12 @@ func (s *CreateComponentInput) SetSemanticVersion(v string) *CreateComponentInpu return s } +// SetSupportedOsVersions sets the SupportedOsVersions field's value. +func (s *CreateComponentInput) SetSupportedOsVersions(v []*string) *CreateComponentInput { + s.SupportedOsVersions = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateComponentInput) SetTags(v map[string]*string) *CreateComponentInput { s.Tags = v @@ -5799,6 +5923,12 @@ type CreateImageInput struct { // and configures the outputs of your pipeline. DistributionConfigurationArn *string `locationName:"distributionConfigurationArn" type:"string"` + // Collects additional information about the image being created, including + // the operating system (OS) version and package list. This information is used + // to enhance the overall experience of using EC2 Image Builder. Enabled by + // default. + EnhancedImageMetadataEnabled *bool `locationName:"enhancedImageMetadataEnabled" type:"boolean"` + // The Amazon Resource Name (ARN) of the image recipe that defines how images // are configured, tested, and assessed. // @@ -5867,6 +5997,12 @@ func (s *CreateImageInput) SetDistributionConfigurationArn(v string) *CreateImag return s } +// SetEnhancedImageMetadataEnabled sets the EnhancedImageMetadataEnabled field's value. +func (s *CreateImageInput) SetEnhancedImageMetadataEnabled(v bool) *CreateImageInput { + s.EnhancedImageMetadataEnabled = &v + return s +} + // SetImageRecipeArn sets the ImageRecipeArn field's value. func (s *CreateImageInput) SetImageRecipeArn(v string) *CreateImageInput { s.ImageRecipeArn = &v @@ -5945,6 +6081,12 @@ type CreateImagePipelineInput struct { // be used to configure and distribute images created by this image pipeline. DistributionConfigurationArn *string `locationName:"distributionConfigurationArn" type:"string"` + // Collects additional information about the image being created, including + // the operating system (OS) version and package list. This information is used + // to enhance the overall experience of using EC2 Image Builder. Enabled by + // default. + EnhancedImageMetadataEnabled *bool `locationName:"enhancedImageMetadataEnabled" type:"boolean"` + // The Amazon Resource Name (ARN) of the image recipe that will be used to configure // images created by this image pipeline. // @@ -6041,6 +6183,12 @@ func (s *CreateImagePipelineInput) SetDistributionConfigurationArn(v string) *Cr return s } +// SetEnhancedImageMetadataEnabled sets the EnhancedImageMetadataEnabled field's value. +func (s *CreateImagePipelineInput) SetEnhancedImageMetadataEnabled(v bool) *CreateImagePipelineInput { + s.EnhancedImageMetadataEnabled = &v + return s +} + // SetImageRecipeArn sets the ImageRecipeArn field's value. func (s *CreateImagePipelineInput) SetImageRecipeArn(v string) *CreateImagePipelineInput { s.ImageRecipeArn = &v @@ -6147,7 +6295,13 @@ type CreateImageRecipeInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // The parent image of the image recipe. + // The parent image of the image recipe. The value of the string can be the + // ARN of the parent image or an AMI ID. The format for the ARN follows this + // example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/xxxx.x.x. + // You can provide the specific version that you want to use, or you can use + // a wildcard in all of the fields. If you enter an AMI ID for the string value, + // you must have access to the AMI, and the AMI must be in the same Region in + // which you are using Image Builder. // // ParentImage is a required field ParentImage *string `locationName:"parentImage" min:"1" type:"string" required:"true"` @@ -6159,6 +6313,9 @@ type CreateImageRecipeInput struct { // The tags of the image recipe. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` + + // The working directory to be used during build and test workflows. + WorkingDirectory *string `locationName:"workingDirectory" min:"1" type:"string"` } // String returns the string representation @@ -6201,6 +6358,9 @@ func (s *CreateImageRecipeInput) Validate() error { if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.WorkingDirectory != nil && len(*s.WorkingDirectory) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkingDirectory", 1)) + } if s.BlockDeviceMappings != nil { for i, v := range s.BlockDeviceMappings { if v == nil { @@ -6276,6 +6436,12 @@ func (s *CreateImageRecipeInput) SetTags(v map[string]*string) *CreateImageRecip return s } +// SetWorkingDirectory sets the WorkingDirectory field's value. +func (s *CreateImageRecipeInput) SetWorkingDirectory(v string) *CreateImageRecipeInput { + s.WorkingDirectory = &v + return s +} + type CreateImageRecipeOutput struct { _ struct{} `type:"structure"` @@ -6350,6 +6516,9 @@ type CreateInfrastructureConfigurationInput struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` + // The tags attached to the resource created by Image Builder. + ResourceTags map[string]*string `locationName:"resourceTags" min:"1" type:"map"` + // The security group IDs to associate with the instance used to customize your // EC2 AMI. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` @@ -6400,6 +6569,9 @@ func (s *CreateInfrastructureConfigurationInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.ResourceTags != nil && len(s.ResourceTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceTags", 1)) + } if s.SubnetId != nil && len(*s.SubnetId) < 1 { invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) } @@ -6460,6 +6632,12 @@ func (s *CreateInfrastructureConfigurationInput) SetName(v string) *CreateInfras return s } +// SetResourceTags sets the ResourceTags field's value. +func (s *CreateInfrastructureConfigurationInput) SetResourceTags(v map[string]*string) *CreateInfrastructureConfigurationInput { + s.ResourceTags = v + return s +} + // SetSecurityGroupIds sets the SecurityGroupIds field's value. func (s *CreateInfrastructureConfigurationInput) SetSecurityGroupIds(v []*string) *CreateInfrastructureConfigurationInput { s.SecurityGroupIds = v @@ -6963,7 +7141,7 @@ type Distribution struct { // The License Manager Configuration to associate with the AMI in the specified // Region. - LicenseConfigurationArns []*string `locationName:"licenseConfigurationArns" type:"list"` + LicenseConfigurationArns []*string `locationName:"licenseConfigurationArns" min:"1" type:"list"` // The target Region. // @@ -6984,6 +7162,9 @@ func (s Distribution) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Distribution) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Distribution"} + if s.LicenseConfigurationArns != nil && len(s.LicenseConfigurationArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LicenseConfigurationArns", 1)) + } if s.Region == nil { invalidParams.Add(request.NewErrParamRequired("Region")) } @@ -7328,8 +7509,8 @@ func (s *Filter) SetValues(v []*string) *Filter { // You are not authorized to perform the requested operation. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7346,17 +7527,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7364,22 +7545,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } type GetComponentInput struct { @@ -8022,8 +8203,8 @@ func (s *GetInfrastructureConfigurationOutput) SetRequestId(v string) *GetInfras // You have specified a client token for an operation using parameter values // that differ from a previous request that used the same client token. type IdempotentParameterMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8040,17 +8221,17 @@ func (s IdempotentParameterMismatchException) GoString() string { func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { return &IdempotentParameterMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotentParameterMismatchException) Code() string { +func (s *IdempotentParameterMismatchException) Code() string { return "IdempotentParameterMismatchException" } // Message returns the exception's message. -func (s IdempotentParameterMismatchException) Message() string { +func (s *IdempotentParameterMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8058,22 +8239,22 @@ func (s IdempotentParameterMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotentParameterMismatchException) OrigErr() error { +func (s *IdempotentParameterMismatchException) OrigErr() error { return nil } -func (s IdempotentParameterMismatchException) Error() string { +func (s *IdempotentParameterMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotentParameterMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotentParameterMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // An image build version. @@ -8089,6 +8270,12 @@ type Image struct { // The distribution configuration used when creating this image. DistributionConfiguration *DistributionConfiguration `locationName:"distributionConfiguration" type:"structure"` + // Collects additional information about the image being created, including + // the operating system (OS) version and package list. This information is used + // to enhance the overall experience of using EC2 Image Builder. Enabled by + // default. + EnhancedImageMetadataEnabled *bool `locationName:"enhancedImageMetadataEnabled" type:"boolean"` + // The image recipe used when creating the image. ImageRecipe *ImageRecipe `locationName:"imageRecipe" type:"structure"` @@ -8101,6 +8288,10 @@ type Image struct { // The name of the image. Name *string `locationName:"name" type:"string"` + // The operating system version of the instance. For example, Amazon Linux 2, + // Ubuntu 18, or Microsoft Windows Server 2019. + OsVersion *string `locationName:"osVersion" min:"1" type:"string"` + // The output resources produced when creating this image. OutputResources *OutputResources `locationName:"outputResources" type:"structure"` @@ -8151,6 +8342,12 @@ func (s *Image) SetDistributionConfiguration(v *DistributionConfiguration) *Imag return s } +// SetEnhancedImageMetadataEnabled sets the EnhancedImageMetadataEnabled field's value. +func (s *Image) SetEnhancedImageMetadataEnabled(v bool) *Image { + s.EnhancedImageMetadataEnabled = &v + return s +} + // SetImageRecipe sets the ImageRecipe field's value. func (s *Image) SetImageRecipe(v *ImageRecipe) *Image { s.ImageRecipe = v @@ -8175,6 +8372,12 @@ func (s *Image) SetName(v string) *Image { return s } +// SetOsVersion sets the OsVersion field's value. +func (s *Image) SetOsVersion(v string) *Image { + s.OsVersion = &v + return s +} + // SetOutputResources sets the OutputResources field's value. func (s *Image) SetOutputResources(v *OutputResources) *Image { s.OutputResources = v @@ -8243,6 +8446,12 @@ type ImagePipeline struct { // with this image pipeline. DistributionConfigurationArn *string `locationName:"distributionConfigurationArn" type:"string"` + // Collects additional information about the image being created, including + // the operating system (OS) version and package list. This information is used + // to enhance the overall experience of using EC2 Image Builder. Enabled by + // default. + EnhancedImageMetadataEnabled *bool `locationName:"enhancedImageMetadataEnabled" type:"boolean"` + // The Amazon Resource Name (ARN) of the image recipe associated with this image // pipeline. ImageRecipeArn *string `locationName:"imageRecipeArn" type:"string"` @@ -8322,6 +8531,12 @@ func (s *ImagePipeline) SetDistributionConfigurationArn(v string) *ImagePipeline return s } +// SetEnhancedImageMetadataEnabled sets the EnhancedImageMetadataEnabled field's value. +func (s *ImagePipeline) SetEnhancedImageMetadataEnabled(v bool) *ImagePipeline { + s.EnhancedImageMetadataEnabled = &v + return s +} + // SetImageRecipeArn sets the ImageRecipeArn field's value. func (s *ImagePipeline) SetImageRecipeArn(v string) *ImagePipeline { s.ImageRecipeArn = &v @@ -8406,6 +8621,9 @@ type ImageRecipe struct { // The version of the image recipe. Version *string `locationName:"version" type:"string"` + + // The working directory to be used during build and test workflows. + WorkingDirectory *string `locationName:"workingDirectory" min:"1" type:"string"` } // String returns the string representation @@ -8484,6 +8702,12 @@ func (s *ImageRecipe) SetVersion(v string) *ImageRecipe { return s } +// SetWorkingDirectory sets the WorkingDirectory field's value. +func (s *ImageRecipe) SetWorkingDirectory(v string) *ImageRecipe { + s.WorkingDirectory = &v + return s +} + // A summary of an image recipe. type ImageRecipeSummary struct { _ struct{} `type:"structure"` @@ -8608,6 +8832,10 @@ type ImageSummary struct { // The name of the image. Name *string `locationName:"name" type:"string"` + // The operating system version of the instance. For example, Amazon Linux 2, + // Ubuntu 18, or Microsoft Windows Server 2019. + OsVersion *string `locationName:"osVersion" min:"1" type:"string"` + // The output resources produced when creating this image. OutputResources *OutputResources `locationName:"outputResources" type:"structure"` @@ -8655,6 +8883,12 @@ func (s *ImageSummary) SetName(v string) *ImageSummary { return s } +// SetOsVersion sets the OsVersion field's value. +func (s *ImageSummary) SetOsVersion(v string) *ImageSummary { + s.OsVersion = &v + return s +} + // SetOutputResources sets the OutputResources field's value. func (s *ImageSummary) SetOutputResources(v *OutputResources) *ImageSummary { s.OutputResources = v @@ -8750,6 +8984,10 @@ type ImageVersion struct { // The name of the image semantic version. Name *string `locationName:"name" type:"string"` + // The operating system version of the instance. For example, Amazon Linux 2, + // Ubuntu 18, or Microsoft Windows Server 2019. + OsVersion *string `locationName:"osVersion" min:"1" type:"string"` + // The owner of the image semantic version. Owner *string `locationName:"owner" min:"1" type:"string"` @@ -8788,6 +9026,12 @@ func (s *ImageVersion) SetName(v string) *ImageVersion { return s } +// SetOsVersion sets the OsVersion field's value. +func (s *ImageVersion) SetOsVersion(v string) *ImageVersion { + s.OsVersion = &v + return s +} + // SetOwner sets the Owner field's value. func (s *ImageVersion) SetOwner(v string) *ImageVersion { s.Owner = &v @@ -9062,6 +9306,9 @@ type InfrastructureConfiguration struct { // The name of the infrastructure configuration. Name *string `locationName:"name" type:"string"` + // The tags attached to the resource created by Image Builder. + ResourceTags map[string]*string `locationName:"resourceTags" min:"1" type:"map"` + // The security group IDs of the infrastructure configuration. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` @@ -9142,6 +9389,12 @@ func (s *InfrastructureConfiguration) SetName(v string) *InfrastructureConfigura return s } +// SetResourceTags sets the ResourceTags field's value. +func (s *InfrastructureConfiguration) SetResourceTags(v map[string]*string) *InfrastructureConfiguration { + s.ResourceTags = v + return s +} + // SetSecurityGroupIds sets the SecurityGroupIds field's value. func (s *InfrastructureConfiguration) SetSecurityGroupIds(v []*string) *InfrastructureConfiguration { s.SecurityGroupIds = v @@ -9191,6 +9444,9 @@ type InfrastructureConfigurationSummary struct { // The name of the infrastructure configuration. Name *string `locationName:"name" type:"string"` + // The tags attached to the image created by Image Builder. + ResourceTags map[string]*string `locationName:"resourceTags" min:"1" type:"map"` + // The tags of the infrastructure configuration. Tags map[string]*string `locationName:"tags" min:"1" type:"map"` } @@ -9235,6 +9491,12 @@ func (s *InfrastructureConfigurationSummary) SetName(v string) *InfrastructureCo return s } +// SetResourceTags sets the ResourceTags field's value. +func (s *InfrastructureConfigurationSummary) SetResourceTags(v map[string]*string) *InfrastructureConfigurationSummary { + s.ResourceTags = v + return s +} + // SetTags sets the Tags field's value. func (s *InfrastructureConfigurationSummary) SetTags(v map[string]*string) *InfrastructureConfigurationSummary { s.Tags = v @@ -9315,8 +9577,8 @@ func (s *InstanceBlockDeviceMapping) SetVirtualName(v string) *InstanceBlockDevi // You have provided an invalid pagination token in your request. type InvalidPaginationTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9333,17 +9595,17 @@ func (s InvalidPaginationTokenException) GoString() string { func newErrorInvalidPaginationTokenException(v protocol.ResponseMetadata) error { return &InvalidPaginationTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPaginationTokenException) Code() string { +func (s *InvalidPaginationTokenException) Code() string { return "InvalidPaginationTokenException" } // Message returns the exception's message. -func (s InvalidPaginationTokenException) Message() string { +func (s *InvalidPaginationTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9351,29 +9613,29 @@ func (s InvalidPaginationTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPaginationTokenException) OrigErr() error { +func (s *InvalidPaginationTokenException) OrigErr() error { return nil } -func (s InvalidPaginationTokenException) Error() string { +func (s *InvalidPaginationTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPaginationTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPaginationTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPaginationTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPaginationTokenException) RequestID() string { + return s.RespMetadata.RequestID } // You have specified two or more mutually exclusive parameters. Review the // error message for details. type InvalidParameterCombinationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9390,17 +9652,17 @@ func (s InvalidParameterCombinationException) GoString() string { func newErrorInvalidParameterCombinationException(v protocol.ResponseMetadata) error { return &InvalidParameterCombinationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterCombinationException) Code() string { +func (s *InvalidParameterCombinationException) Code() string { return "InvalidParameterCombinationException" } // Message returns the exception's message. -func (s InvalidParameterCombinationException) Message() string { +func (s *InvalidParameterCombinationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9408,29 +9670,29 @@ func (s InvalidParameterCombinationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterCombinationException) OrigErr() error { +func (s *InvalidParameterCombinationException) OrigErr() error { return nil } -func (s InvalidParameterCombinationException) Error() string { +func (s *InvalidParameterCombinationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterCombinationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterCombinationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterCombinationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterCombinationException) RequestID() string { + return s.RespMetadata.RequestID } // The specified parameter is invalid. Review the available parameters for the // API request. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9447,17 +9709,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9465,28 +9727,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The value that you provided for the specified parameter is invalid. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9503,17 +9765,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9521,28 +9783,28 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // You have made a request for an action that is not supported by the service. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9559,17 +9821,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9577,28 +9839,28 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Your version number is out of bounds or does not follow the required syntax. type InvalidVersionNumberException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9615,17 +9877,17 @@ func (s InvalidVersionNumberException) GoString() string { func newErrorInvalidVersionNumberException(v protocol.ResponseMetadata) error { return &InvalidVersionNumberException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidVersionNumberException) Code() string { +func (s *InvalidVersionNumberException) Code() string { return "InvalidVersionNumberException" } // Message returns the exception's message. -func (s InvalidVersionNumberException) Message() string { +func (s *InvalidVersionNumberException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9633,28 +9895,30 @@ func (s InvalidVersionNumberException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidVersionNumberException) OrigErr() error { +func (s *InvalidVersionNumberException) OrigErr() error { return nil } -func (s InvalidVersionNumberException) Error() string { +func (s *InvalidVersionNumberException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidVersionNumberException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidVersionNumberException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidVersionNumberException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidVersionNumberException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the configuration for a launch permission. The launch permission // modification request is sent to the EC2 ModifyImageAttribute (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyImageAttribute.html) // API on behalf of the user for each Region they have selected to distribute -// the AMI. +// the AMI. To make an AMI public, set the launch permission authorized accounts +// to all. See the examples for making an AMI public at EC2 ModifyImageAttribute +// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyImageAttribute.html). type LaunchPermissionConfiguration struct { _ struct{} `type:"structure"` @@ -9662,7 +9926,7 @@ type LaunchPermissionConfiguration struct { UserGroups []*string `locationName:"userGroups" type:"list"` // The AWS account ID. - UserIds []*string `locationName:"userIds" type:"list"` + UserIds []*string `locationName:"userIds" min:"1" type:"list"` } // String returns the string representation @@ -9675,6 +9939,19 @@ func (s LaunchPermissionConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *LaunchPermissionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LaunchPermissionConfiguration"} + if s.UserIds != nil && len(s.UserIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserIds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetUserGroups sets the UserGroups field's value. func (s *LaunchPermissionConfiguration) SetUserGroups(v []*string) *LaunchPermissionConfiguration { s.UserGroups = v @@ -9924,6 +10201,8 @@ type ListDistributionConfigurationsInput struct { _ struct{} `type:"structure"` // The filters. + // + // * name - The name of this distribution configuration. Filters []*Filter `locationName:"filters" min:"1" type:"list"` // The maximum items to return in a request. @@ -11168,8 +11447,8 @@ func (s *PutImageRecipePolicyOutput) SetRequestId(v string) *PutImageRecipePolic // The resource that you are trying to create already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11186,17 +11465,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11204,29 +11483,29 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // You have attempted to mutate or delete a resource with a dependency that // prohibits this action. See the error message for more details. type ResourceDependencyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11243,17 +11522,17 @@ func (s ResourceDependencyException) GoString() string { func newErrorResourceDependencyException(v protocol.ResponseMetadata) error { return &ResourceDependencyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDependencyException) Code() string { +func (s *ResourceDependencyException) Code() string { return "ResourceDependencyException" } // Message returns the exception's message. -func (s ResourceDependencyException) Message() string { +func (s *ResourceDependencyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11261,29 +11540,29 @@ func (s ResourceDependencyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDependencyException) OrigErr() error { +func (s *ResourceDependencyException) OrigErr() error { return nil } -func (s ResourceDependencyException) Error() string { +func (s *ResourceDependencyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDependencyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDependencyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDependencyException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDependencyException) RequestID() string { + return s.RespMetadata.RequestID } // The resource that you are trying to operate on is currently in use. Review // the message details and retry later. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11300,17 +11579,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11318,28 +11597,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // At least one of the resources referenced by your request does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11356,17 +11635,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11374,22 +11653,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Amazon S3 logging configuration. @@ -11448,12 +11727,20 @@ type Schedule struct { // The condition configures when the pipeline should trigger a new image build. // When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, - // EC2 Image Builder will build a new image only when there are known changes - // pending. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image - // every time the CRON expression matches the current time. + // and you use semantic version filters on the source image or components in + // your image recipe, EC2 Image Builder will build a new image only when there + // are new versions of the image or components in your recipe that match the + // semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will + // build a new image every time the CRON expression matches the current time. + // For semantic version syntax, see CreateComponent (https://docs.aws.amazon.com/imagebuilder/latest/APIReference/API_CreateComponent.html) + // in the EC2 Image Builder API Reference. PipelineExecutionStartCondition *string `locationName:"pipelineExecutionStartCondition" type:"string" enum:"PipelineExecutionStartCondition"` - // The expression determines how often EC2 Image Builder evaluates your pipelineExecutionStartCondition. + // The cron expression determines how often EC2 Image Builder evaluates your + // pipelineExecutionStartCondition. + // + // For information on how to format a cron expression in Image Builder, see + // Use cron expressions in EC2 Image Builder (https://docs.aws.amazon.com/imagebuilder/latest/userguide/image-builder-cron.html). ScheduleExpression *string `locationName:"scheduleExpression" min:"1" type:"string"` } @@ -11494,8 +11781,8 @@ func (s *Schedule) SetScheduleExpression(v string) *Schedule { // This exception is thrown when the service encounters an unrecoverable exception. type ServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11512,17 +11799,74 @@ func (s ServiceException) GoString() string { func newErrorServiceException(v protocol.ResponseMetadata) error { return &ServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceException) Code() string { +func (s *ServiceException) Code() string { return "ServiceException" } // Message returns the exception's message. -func (s ServiceException) Message() string { +func (s *ServiceException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ServiceException) OrigErr() error { + return nil +} + +func (s *ServiceException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ServiceException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ServiceException) RequestID() string { + return s.RespMetadata.RequestID +} + +// You have exceeded the number of permitted resources or operations for this +// service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). +type ServiceQuotaExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s ServiceQuotaExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceQuotaExceededException) GoString() string { + return s.String() +} + +func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error { + return &ServiceQuotaExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ServiceQuotaExceededException) Code() string { + return "ServiceQuotaExceededException" +} + +// Message returns the exception's message. +func (s *ServiceQuotaExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11530,28 +11874,28 @@ func (s ServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceException) OrigErr() error { +func (s *ServiceQuotaExceededException) OrigErr() error { return nil } -func (s ServiceException) Error() string { +func (s *ServiceQuotaExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceQuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceQuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The service is unable to process your request at this time. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11568,17 +11912,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11586,22 +11930,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type StartImagePipelineExecutionInput struct { @@ -11982,6 +12326,12 @@ type UpdateImagePipelineInput struct { // be used to configure and distribute images updated by this image pipeline. DistributionConfigurationArn *string `locationName:"distributionConfigurationArn" type:"string"` + // Collects additional information about the image being created, including + // the operating system (OS) version and package list. This information is used + // to enhance the overall experience of using EC2 Image Builder. Enabled by + // default. + EnhancedImageMetadataEnabled *bool `locationName:"enhancedImageMetadataEnabled" type:"boolean"` + // The Amazon Resource Name (ARN) of the image pipeline that you want to update. // // ImagePipelineArn is a required field @@ -12072,6 +12422,12 @@ func (s *UpdateImagePipelineInput) SetDistributionConfigurationArn(v string) *Up return s } +// SetEnhancedImageMetadataEnabled sets the EnhancedImageMetadataEnabled field's value. +func (s *UpdateImagePipelineInput) SetEnhancedImageMetadataEnabled(v bool) *UpdateImagePipelineInput { + s.EnhancedImageMetadataEnabled = &v + return s +} + // SetImagePipelineArn sets the ImagePipelineArn field's value. func (s *UpdateImagePipelineInput) SetImagePipelineArn(v string) *UpdateImagePipelineInput { s.ImagePipelineArn = &v @@ -12183,6 +12539,9 @@ type UpdateInfrastructureConfigurationInput struct { // The logging configuration of the infrastructure configuration. Logging *Logging `locationName:"logging" type:"structure"` + // The tags attached to the resource created by Image Builder. + ResourceTags map[string]*string `locationName:"resourceTags" min:"1" type:"map"` + // The security group IDs to associate with the instance used to customize your // EC2 AMI. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` @@ -12230,6 +12589,9 @@ func (s *UpdateInfrastructureConfigurationInput) Validate() error { if s.KeyPair != nil && len(*s.KeyPair) < 1 { invalidParams.Add(request.NewErrParamMinLen("KeyPair", 1)) } + if s.ResourceTags != nil && len(s.ResourceTags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceTags", 1)) + } if s.SubnetId != nil && len(*s.SubnetId) < 1 { invalidParams.Add(request.NewErrParamMinLen("SubnetId", 1)) } @@ -12287,6 +12649,12 @@ func (s *UpdateInfrastructureConfigurationInput) SetLogging(v *Logging) *UpdateI return s } +// SetResourceTags sets the ResourceTags field's value. +func (s *UpdateInfrastructureConfigurationInput) SetResourceTags(v map[string]*string) *UpdateInfrastructureConfigurationInput { + s.ResourceTags = v + return s +} + // SetSecurityGroupIds sets the SecurityGroupIds field's value. func (s *UpdateInfrastructureConfigurationInput) SetSecurityGroupIds(v []*string) *UpdateInfrastructureConfigurationInput { s.SecurityGroupIds = v @@ -12358,6 +12726,13 @@ const ( ComponentFormatShell = "SHELL" ) +// ComponentFormat_Values returns all elements of the ComponentFormat enum +func ComponentFormat_Values() []string { + return []string{ + ComponentFormatShell, + } +} + const ( // ComponentTypeBuild is a ComponentType enum value ComponentTypeBuild = "BUILD" @@ -12366,6 +12741,14 @@ const ( ComponentTypeTest = "TEST" ) +// ComponentType_Values returns all elements of the ComponentType enum +func ComponentType_Values() []string { + return []string{ + ComponentTypeBuild, + ComponentTypeTest, + } +} + const ( // EbsVolumeTypeStandard is a EbsVolumeType enum value EbsVolumeTypeStandard = "standard" @@ -12373,6 +12756,9 @@ const ( // EbsVolumeTypeIo1 is a EbsVolumeType enum value EbsVolumeTypeIo1 = "io1" + // EbsVolumeTypeIo2 is a EbsVolumeType enum value + EbsVolumeTypeIo2 = "io2" + // EbsVolumeTypeGp2 is a EbsVolumeType enum value EbsVolumeTypeGp2 = "gp2" @@ -12383,6 +12769,18 @@ const ( EbsVolumeTypeSt1 = "st1" ) +// EbsVolumeType_Values returns all elements of the EbsVolumeType enum +func EbsVolumeType_Values() []string { + return []string{ + EbsVolumeTypeStandard, + EbsVolumeTypeIo1, + EbsVolumeTypeIo2, + EbsVolumeTypeGp2, + EbsVolumeTypeSc1, + EbsVolumeTypeSt1, + } +} + const ( // ImageStatusPending is a ImageStatus enum value ImageStatusPending = "PENDING" @@ -12418,6 +12816,23 @@ const ( ImageStatusDeleted = "DELETED" ) +// ImageStatus_Values returns all elements of the ImageStatus enum +func ImageStatus_Values() []string { + return []string{ + ImageStatusPending, + ImageStatusCreating, + ImageStatusBuilding, + ImageStatusTesting, + ImageStatusDistributing, + ImageStatusIntegrating, + ImageStatusAvailable, + ImageStatusCancelled, + ImageStatusFailed, + ImageStatusDeprecated, + ImageStatusDeleted, + } +} + const ( // OwnershipSelf is a Ownership enum value OwnershipSelf = "Self" @@ -12429,6 +12844,15 @@ const ( OwnershipAmazon = "Amazon" ) +// Ownership_Values returns all elements of the Ownership enum +func Ownership_Values() []string { + return []string{ + OwnershipSelf, + OwnershipShared, + OwnershipAmazon, + } +} + const ( // PipelineExecutionStartConditionExpressionMatchOnly is a PipelineExecutionStartCondition enum value PipelineExecutionStartConditionExpressionMatchOnly = "EXPRESSION_MATCH_ONLY" @@ -12437,6 +12861,14 @@ const ( PipelineExecutionStartConditionExpressionMatchAndDependencyUpdatesAvailable = "EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE" ) +// PipelineExecutionStartCondition_Values returns all elements of the PipelineExecutionStartCondition enum +func PipelineExecutionStartCondition_Values() []string { + return []string{ + PipelineExecutionStartConditionExpressionMatchOnly, + PipelineExecutionStartConditionExpressionMatchAndDependencyUpdatesAvailable, + } +} + const ( // PipelineStatusDisabled is a PipelineStatus enum value PipelineStatusDisabled = "DISABLED" @@ -12445,6 +12877,14 @@ const ( PipelineStatusEnabled = "ENABLED" ) +// PipelineStatus_Values returns all elements of the PipelineStatus enum +func PipelineStatus_Values() []string { + return []string{ + PipelineStatusDisabled, + PipelineStatusEnabled, + } +} + const ( // PlatformWindows is a Platform enum value PlatformWindows = "Windows" @@ -12452,3 +12892,11 @@ const ( // PlatformLinux is a Platform enum value PlatformLinux = "Linux" ) + +// Platform_Values returns all elements of the Platform enum +func Platform_Values() []string { + return []string{ + PlatformWindows, + PlatformLinux, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/errors.go b/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/errors.go index 3f688043e..d40ea6366 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/errors.go @@ -105,6 +105,13 @@ const ( // This exception is thrown when the service encounters an unrecoverable exception. ErrCodeServiceException = "ServiceException" + // ErrCodeServiceQuotaExceededException for service response error code + // "ServiceQuotaExceededException". + // + // You have exceeded the number of permitted resources or operations for this + // service. For service quotas, see EC2 Image Builder endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/imagebuilder.html#limits_imagebuilder). + ErrCodeServiceQuotaExceededException = "ServiceQuotaExceededException" + // ErrCodeServiceUnavailableException for service response error code // "ServiceUnavailableException". // @@ -128,5 +135,6 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "ResourceInUseException": newErrorResourceInUseException, "ResourceNotFoundException": newErrorResourceNotFoundException, "ServiceException": newErrorServiceException, + "ServiceQuotaExceededException": newErrorServiceQuotaExceededException, "ServiceUnavailableException": newErrorServiceUnavailableException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/service.go b/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/service.go index 657a0bbc3..83f68a77f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/imagebuilder/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go index 14961577c..bffa5642d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/api.go @@ -4027,8 +4027,8 @@ func (c *Inspector) UpdateAssessmentTargetWithContext(ctx aws.Context, input *Up // You do not have required permissions to access the requested resource. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can immediately retry your request. // @@ -4056,17 +4056,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4074,22 +4074,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type AddAttributesToFindingsInput struct { @@ -4373,8 +4373,8 @@ func (s *AgentPreview) SetOperatingSystem(v string) *AgentPreview { // You started an assessment run, but one of the instances is already participating // in another assessment run. type AgentsAlreadyRunningAssessmentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Agents is a required field Agents []*AgentAlreadyRunningAssessment `locationName:"agents" min:"1" type:"list" required:"true"` @@ -4403,17 +4403,17 @@ func (s AgentsAlreadyRunningAssessmentException) GoString() string { func newErrorAgentsAlreadyRunningAssessmentException(v protocol.ResponseMetadata) error { return &AgentsAlreadyRunningAssessmentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AgentsAlreadyRunningAssessmentException) Code() string { +func (s *AgentsAlreadyRunningAssessmentException) Code() string { return "AgentsAlreadyRunningAssessmentException" } // Message returns the exception's message. -func (s AgentsAlreadyRunningAssessmentException) Message() string { +func (s *AgentsAlreadyRunningAssessmentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4421,22 +4421,22 @@ func (s AgentsAlreadyRunningAssessmentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AgentsAlreadyRunningAssessmentException) OrigErr() error { +func (s *AgentsAlreadyRunningAssessmentException) OrigErr() error { return nil } -func (s AgentsAlreadyRunningAssessmentException) Error() string { +func (s *AgentsAlreadyRunningAssessmentException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AgentsAlreadyRunningAssessmentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AgentsAlreadyRunningAssessmentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AgentsAlreadyRunningAssessmentException) RequestID() string { - return s.respMetadata.RequestID +func (s *AgentsAlreadyRunningAssessmentException) RequestID() string { + return s.RespMetadata.RequestID } // A snapshot of an Amazon Inspector assessment run that contains the findings @@ -4827,8 +4827,8 @@ func (s *AssessmentRunFilter) SetStates(v []*string) *AssessmentRunFilter { // You cannot perform a specified action if an assessment run is currently in // progress. type AssessmentRunInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The ARNs of the assessment runs that are currently in progress. // @@ -4862,17 +4862,17 @@ func (s AssessmentRunInProgressException) GoString() string { func newErrorAssessmentRunInProgressException(v protocol.ResponseMetadata) error { return &AssessmentRunInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssessmentRunInProgressException) Code() string { +func (s *AssessmentRunInProgressException) Code() string { return "AssessmentRunInProgressException" } // Message returns the exception's message. -func (s AssessmentRunInProgressException) Message() string { +func (s *AssessmentRunInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4880,22 +4880,22 @@ func (s AssessmentRunInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssessmentRunInProgressException) OrigErr() error { +func (s *AssessmentRunInProgressException) OrigErr() error { return nil } -func (s AssessmentRunInProgressException) Error() string { +func (s *AssessmentRunInProgressException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AssessmentRunInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssessmentRunInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssessmentRunInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *AssessmentRunInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // Used as one of the elements of the AssessmentRun data type. @@ -7529,8 +7529,8 @@ func (s *GetTelemetryMetadataOutput) SetTelemetryMetadata(v []*TelemetryMetadata // Internal server error. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can immediately retry your request. // @@ -7553,17 +7553,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7571,29 +7571,29 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // Amazon Inspector cannot assume the cross-account role that it needs to list // your EC2 instances during the assessment run. type InvalidCrossAccountRoleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can immediately retry your request. // @@ -7621,17 +7621,17 @@ func (s InvalidCrossAccountRoleException) GoString() string { func newErrorInvalidCrossAccountRoleException(v protocol.ResponseMetadata) error { return &InvalidCrossAccountRoleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCrossAccountRoleException) Code() string { +func (s *InvalidCrossAccountRoleException) Code() string { return "InvalidCrossAccountRoleException" } // Message returns the exception's message. -func (s InvalidCrossAccountRoleException) Message() string { +func (s *InvalidCrossAccountRoleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7639,29 +7639,29 @@ func (s InvalidCrossAccountRoleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCrossAccountRoleException) OrigErr() error { +func (s *InvalidCrossAccountRoleException) OrigErr() error { return nil } -func (s InvalidCrossAccountRoleException) Error() string { +func (s *InvalidCrossAccountRoleException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCrossAccountRoleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCrossAccountRoleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCrossAccountRoleException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCrossAccountRoleException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because an invalid or out-of-range value was supplied // for an input parameter. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can immediately retry your request. // @@ -7689,17 +7689,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7707,29 +7707,29 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can immediately retry your request. // @@ -7757,17 +7757,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7775,22 +7775,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAssessmentRunAgentsInput struct { @@ -8834,8 +8834,8 @@ func (s *NetworkInterface) SetVpcId(v string) *NetworkInterface { // The request was rejected because it referenced an entity that does not exist. // The error code describes the entity. type NoSuchEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can immediately retry your request. // @@ -8863,17 +8863,17 @@ func (s NoSuchEntityException) GoString() string { func newErrorNoSuchEntityException(v protocol.ResponseMetadata) error { return &NoSuchEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchEntityException) Code() string { +func (s *NoSuchEntityException) Code() string { return "NoSuchEntityException" } // Message returns the exception's message. -func (s NoSuchEntityException) Message() string { +func (s *NoSuchEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8881,22 +8881,22 @@ func (s NoSuchEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchEntityException) OrigErr() error { +func (s *NoSuchEntityException) OrigErr() error { return nil } -func (s NoSuchEntityException) Error() string { +func (s *NoSuchEntityException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchEntityException) RequestID() string { + return s.RespMetadata.RequestID } type PreviewAgentsInput struct { @@ -9005,8 +9005,8 @@ func (s *PreviewAgentsOutput) SetNextToken(v string) *PreviewAgentsOutput { // The request is rejected. The specified assessment template is currently generating // an exclusions preview. type PreviewGenerationInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9023,17 +9023,17 @@ func (s PreviewGenerationInProgressException) GoString() string { func newErrorPreviewGenerationInProgressException(v protocol.ResponseMetadata) error { return &PreviewGenerationInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PreviewGenerationInProgressException) Code() string { +func (s *PreviewGenerationInProgressException) Code() string { return "PreviewGenerationInProgressException" } // Message returns the exception's message. -func (s PreviewGenerationInProgressException) Message() string { +func (s *PreviewGenerationInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9041,22 +9041,22 @@ func (s PreviewGenerationInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PreviewGenerationInProgressException) OrigErr() error { +func (s *PreviewGenerationInProgressException) OrigErr() error { return nil } -func (s PreviewGenerationInProgressException) Error() string { +func (s *PreviewGenerationInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PreviewGenerationInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PreviewGenerationInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PreviewGenerationInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *PreviewGenerationInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a private IP address associated with a network @@ -9520,8 +9520,8 @@ func (s *ServiceAttributes) SetSchemaVersion(v int64) *ServiceAttributes { // The serice is temporary unavailable. type ServiceTemporarilyUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // You can wait and then retry your request. // @@ -9544,17 +9544,17 @@ func (s ServiceTemporarilyUnavailableException) GoString() string { func newErrorServiceTemporarilyUnavailableException(v protocol.ResponseMetadata) error { return &ServiceTemporarilyUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceTemporarilyUnavailableException) Code() string { +func (s *ServiceTemporarilyUnavailableException) Code() string { return "ServiceTemporarilyUnavailableException" } // Message returns the exception's message. -func (s ServiceTemporarilyUnavailableException) Message() string { +func (s *ServiceTemporarilyUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9562,22 +9562,22 @@ func (s ServiceTemporarilyUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceTemporarilyUnavailableException) OrigErr() error { +func (s *ServiceTemporarilyUnavailableException) OrigErr() error { return nil } -func (s ServiceTemporarilyUnavailableException) Error() string { +func (s *ServiceTemporarilyUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceTemporarilyUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceTemporarilyUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceTemporarilyUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceTemporarilyUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type SetTagsForResourceInput struct { @@ -10171,8 +10171,8 @@ func (s UnsubscribeFromEventOutput) GoString() string { // runs that took place or will take place after generating reports in Amazon // Inspector became available. type UnsupportedFeatureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // CanRetry is a required field CanRetry *bool `locationName:"canRetry" type:"boolean" required:"true"` @@ -10192,17 +10192,17 @@ func (s UnsupportedFeatureException) GoString() string { func newErrorUnsupportedFeatureException(v protocol.ResponseMetadata) error { return &UnsupportedFeatureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedFeatureException) Code() string { +func (s *UnsupportedFeatureException) Code() string { return "UnsupportedFeatureException" } // Message returns the exception's message. -func (s UnsupportedFeatureException) Message() string { +func (s *UnsupportedFeatureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10210,22 +10210,22 @@ func (s UnsupportedFeatureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedFeatureException) OrigErr() error { +func (s *UnsupportedFeatureException) OrigErr() error { return nil } -func (s UnsupportedFeatureException) Error() string { +func (s *UnsupportedFeatureException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedFeatureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedFeatureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedFeatureException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedFeatureException) RequestID() string { + return s.RespMetadata.RequestID } type UpdateAssessmentTargetInput struct { @@ -10339,6 +10339,20 @@ const ( AccessDeniedErrorCodeAccessDeniedToIamRole = "ACCESS_DENIED_TO_IAM_ROLE" ) +// AccessDeniedErrorCode_Values returns all elements of the AccessDeniedErrorCode enum +func AccessDeniedErrorCode_Values() []string { + return []string{ + AccessDeniedErrorCodeAccessDeniedToAssessmentTarget, + AccessDeniedErrorCodeAccessDeniedToAssessmentTemplate, + AccessDeniedErrorCodeAccessDeniedToAssessmentRun, + AccessDeniedErrorCodeAccessDeniedToFinding, + AccessDeniedErrorCodeAccessDeniedToResourceGroup, + AccessDeniedErrorCodeAccessDeniedToRulesPackage, + AccessDeniedErrorCodeAccessDeniedToSnsTopic, + AccessDeniedErrorCodeAccessDeniedToIamRole, + } +} + const ( // AgentHealthHealthy is a AgentHealth enum value AgentHealthHealthy = "HEALTHY" @@ -10350,6 +10364,15 @@ const ( AgentHealthUnknown = "UNKNOWN" ) +// AgentHealth_Values returns all elements of the AgentHealth enum +func AgentHealth_Values() []string { + return []string{ + AgentHealthHealthy, + AgentHealthUnhealthy, + AgentHealthUnknown, + } +} + const ( // AgentHealthCodeIdle is a AgentHealthCode enum value AgentHealthCodeIdle = "IDLE" @@ -10370,6 +10393,18 @@ const ( AgentHealthCodeUnknown = "UNKNOWN" ) +// AgentHealthCode_Values returns all elements of the AgentHealthCode enum +func AgentHealthCode_Values() []string { + return []string{ + AgentHealthCodeIdle, + AgentHealthCodeRunning, + AgentHealthCodeShutdown, + AgentHealthCodeUnhealthy, + AgentHealthCodeThrottled, + AgentHealthCodeUnknown, + } +} + const ( // AssessmentRunNotificationSnsStatusCodeSuccess is a AssessmentRunNotificationSnsStatusCode enum value AssessmentRunNotificationSnsStatusCodeSuccess = "SUCCESS" @@ -10384,6 +10419,16 @@ const ( AssessmentRunNotificationSnsStatusCodeInternalError = "INTERNAL_ERROR" ) +// AssessmentRunNotificationSnsStatusCode_Values returns all elements of the AssessmentRunNotificationSnsStatusCode enum +func AssessmentRunNotificationSnsStatusCode_Values() []string { + return []string{ + AssessmentRunNotificationSnsStatusCodeSuccess, + AssessmentRunNotificationSnsStatusCodeTopicDoesNotExist, + AssessmentRunNotificationSnsStatusCodeAccessDenied, + AssessmentRunNotificationSnsStatusCodeInternalError, + } +} + const ( // AssessmentRunStateCreated is a AssessmentRunState enum value AssessmentRunStateCreated = "CREATED" @@ -10425,11 +10470,37 @@ const ( AssessmentRunStateCanceled = "CANCELED" ) +// AssessmentRunState_Values returns all elements of the AssessmentRunState enum +func AssessmentRunState_Values() []string { + return []string{ + AssessmentRunStateCreated, + AssessmentRunStateStartDataCollectionPending, + AssessmentRunStateStartDataCollectionInProgress, + AssessmentRunStateCollectingData, + AssessmentRunStateStopDataCollectionPending, + AssessmentRunStateDataCollected, + AssessmentRunStateStartEvaluatingRulesPending, + AssessmentRunStateEvaluatingRules, + AssessmentRunStateFailed, + AssessmentRunStateError, + AssessmentRunStateCompleted, + AssessmentRunStateCompletedWithErrors, + AssessmentRunStateCanceled, + } +} + const ( // AssetTypeEc2Instance is a AssetType enum value AssetTypeEc2Instance = "ec2-instance" ) +// AssetType_Values returns all elements of the AssetType enum +func AssetType_Values() []string { + return []string{ + AssetTypeEc2Instance, + } +} + const ( // EventAssessmentRunStarted is a Event enum value EventAssessmentRunStarted = "ASSESSMENT_RUN_STARTED" @@ -10447,6 +10518,17 @@ const ( EventOther = "OTHER" ) +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventAssessmentRunStarted, + EventAssessmentRunCompleted, + EventAssessmentRunStateChanged, + EventFindingReported, + EventOther, + } +} + const ( // FailedItemErrorCodeInvalidArn is a FailedItemErrorCode enum value FailedItemErrorCodeInvalidArn = "INVALID_ARN" @@ -10467,6 +10549,18 @@ const ( FailedItemErrorCodeInternalError = "INTERNAL_ERROR" ) +// FailedItemErrorCode_Values returns all elements of the FailedItemErrorCode enum +func FailedItemErrorCode_Values() []string { + return []string{ + FailedItemErrorCodeInvalidArn, + FailedItemErrorCodeDuplicateArn, + FailedItemErrorCodeItemDoesNotExist, + FailedItemErrorCodeAccessDenied, + FailedItemErrorCodeLimitExceeded, + FailedItemErrorCodeInternalError, + } +} + const ( // InvalidCrossAccountRoleErrorCodeRoleDoesNotExistOrInvalidTrustRelationship is a InvalidCrossAccountRoleErrorCode enum value InvalidCrossAccountRoleErrorCodeRoleDoesNotExistOrInvalidTrustRelationship = "ROLE_DOES_NOT_EXIST_OR_INVALID_TRUST_RELATIONSHIP" @@ -10475,6 +10569,14 @@ const ( InvalidCrossAccountRoleErrorCodeRoleDoesNotHaveCorrectPolicy = "ROLE_DOES_NOT_HAVE_CORRECT_POLICY" ) +// InvalidCrossAccountRoleErrorCode_Values returns all elements of the InvalidCrossAccountRoleErrorCode enum +func InvalidCrossAccountRoleErrorCode_Values() []string { + return []string{ + InvalidCrossAccountRoleErrorCodeRoleDoesNotExistOrInvalidTrustRelationship, + InvalidCrossAccountRoleErrorCodeRoleDoesNotHaveCorrectPolicy, + } +} + const ( // InvalidInputErrorCodeInvalidAssessmentTargetArn is a InvalidInputErrorCode enum value InvalidInputErrorCodeInvalidAssessmentTargetArn = "INVALID_ASSESSMENT_TARGET_ARN" @@ -10639,6 +10741,66 @@ const ( InvalidInputErrorCodeInvalidNumberOfSeverities = "INVALID_NUMBER_OF_SEVERITIES" ) +// InvalidInputErrorCode_Values returns all elements of the InvalidInputErrorCode enum +func InvalidInputErrorCode_Values() []string { + return []string{ + InvalidInputErrorCodeInvalidAssessmentTargetArn, + InvalidInputErrorCodeInvalidAssessmentTemplateArn, + InvalidInputErrorCodeInvalidAssessmentRunArn, + InvalidInputErrorCodeInvalidFindingArn, + InvalidInputErrorCodeInvalidResourceGroupArn, + InvalidInputErrorCodeInvalidRulesPackageArn, + InvalidInputErrorCodeInvalidResourceArn, + InvalidInputErrorCodeInvalidSnsTopicArn, + InvalidInputErrorCodeInvalidIamRoleArn, + InvalidInputErrorCodeInvalidAssessmentTargetName, + InvalidInputErrorCodeInvalidAssessmentTargetNamePattern, + InvalidInputErrorCodeInvalidAssessmentTemplateName, + InvalidInputErrorCodeInvalidAssessmentTemplateNamePattern, + InvalidInputErrorCodeInvalidAssessmentTemplateDuration, + InvalidInputErrorCodeInvalidAssessmentTemplateDurationRange, + InvalidInputErrorCodeInvalidAssessmentRunDurationRange, + InvalidInputErrorCodeInvalidAssessmentRunStartTimeRange, + InvalidInputErrorCodeInvalidAssessmentRunCompletionTimeRange, + InvalidInputErrorCodeInvalidAssessmentRunStateChangeTimeRange, + InvalidInputErrorCodeInvalidAssessmentRunState, + InvalidInputErrorCodeInvalidTag, + InvalidInputErrorCodeInvalidTagKey, + InvalidInputErrorCodeInvalidTagValue, + InvalidInputErrorCodeInvalidResourceGroupTagKey, + InvalidInputErrorCodeInvalidResourceGroupTagValue, + InvalidInputErrorCodeInvalidAttribute, + InvalidInputErrorCodeInvalidUserAttribute, + InvalidInputErrorCodeInvalidUserAttributeKey, + InvalidInputErrorCodeInvalidUserAttributeValue, + InvalidInputErrorCodeInvalidPaginationToken, + InvalidInputErrorCodeInvalidMaxResults, + InvalidInputErrorCodeInvalidAgentId, + InvalidInputErrorCodeInvalidAutoScalingGroup, + InvalidInputErrorCodeInvalidRuleName, + InvalidInputErrorCodeInvalidSeverity, + InvalidInputErrorCodeInvalidLocale, + InvalidInputErrorCodeInvalidEvent, + InvalidInputErrorCodeAssessmentTargetNameAlreadyTaken, + InvalidInputErrorCodeAssessmentTemplateNameAlreadyTaken, + InvalidInputErrorCodeInvalidNumberOfAssessmentTargetArns, + InvalidInputErrorCodeInvalidNumberOfAssessmentTemplateArns, + InvalidInputErrorCodeInvalidNumberOfAssessmentRunArns, + InvalidInputErrorCodeInvalidNumberOfFindingArns, + InvalidInputErrorCodeInvalidNumberOfResourceGroupArns, + InvalidInputErrorCodeInvalidNumberOfRulesPackageArns, + InvalidInputErrorCodeInvalidNumberOfAssessmentRunStates, + InvalidInputErrorCodeInvalidNumberOfTags, + InvalidInputErrorCodeInvalidNumberOfResourceGroupTags, + InvalidInputErrorCodeInvalidNumberOfAttributes, + InvalidInputErrorCodeInvalidNumberOfUserAttributes, + InvalidInputErrorCodeInvalidNumberOfAgentIds, + InvalidInputErrorCodeInvalidNumberOfAutoScalingGroups, + InvalidInputErrorCodeInvalidNumberOfRuleNames, + InvalidInputErrorCodeInvalidNumberOfSeverities, + } +} + const ( // LimitExceededErrorCodeAssessmentTargetLimitExceeded is a LimitExceededErrorCode enum value LimitExceededErrorCodeAssessmentTargetLimitExceeded = "ASSESSMENT_TARGET_LIMIT_EXCEEDED" @@ -10656,11 +10818,29 @@ const ( LimitExceededErrorCodeEventSubscriptionLimitExceeded = "EVENT_SUBSCRIPTION_LIMIT_EXCEEDED" ) +// LimitExceededErrorCode_Values returns all elements of the LimitExceededErrorCode enum +func LimitExceededErrorCode_Values() []string { + return []string{ + LimitExceededErrorCodeAssessmentTargetLimitExceeded, + LimitExceededErrorCodeAssessmentTemplateLimitExceeded, + LimitExceededErrorCodeAssessmentRunLimitExceeded, + LimitExceededErrorCodeResourceGroupLimitExceeded, + LimitExceededErrorCodeEventSubscriptionLimitExceeded, + } +} + const ( // LocaleEnUs is a Locale enum value LocaleEnUs = "EN_US" ) +// Locale_Values returns all elements of the Locale enum +func Locale_Values() []string { + return []string{ + LocaleEnUs, + } +} + const ( // NoSuchEntityErrorCodeAssessmentTargetDoesNotExist is a NoSuchEntityErrorCode enum value NoSuchEntityErrorCodeAssessmentTargetDoesNotExist = "ASSESSMENT_TARGET_DOES_NOT_EXIST" @@ -10687,6 +10867,20 @@ const ( NoSuchEntityErrorCodeIamRoleDoesNotExist = "IAM_ROLE_DOES_NOT_EXIST" ) +// NoSuchEntityErrorCode_Values returns all elements of the NoSuchEntityErrorCode enum +func NoSuchEntityErrorCode_Values() []string { + return []string{ + NoSuchEntityErrorCodeAssessmentTargetDoesNotExist, + NoSuchEntityErrorCodeAssessmentTemplateDoesNotExist, + NoSuchEntityErrorCodeAssessmentRunDoesNotExist, + NoSuchEntityErrorCodeFindingDoesNotExist, + NoSuchEntityErrorCodeResourceGroupDoesNotExist, + NoSuchEntityErrorCodeRulesPackageDoesNotExist, + NoSuchEntityErrorCodeSnsTopicDoesNotExist, + NoSuchEntityErrorCodeIamRoleDoesNotExist, + } +} + const ( // PreviewStatusWorkInProgress is a PreviewStatus enum value PreviewStatusWorkInProgress = "WORK_IN_PROGRESS" @@ -10695,6 +10889,14 @@ const ( PreviewStatusCompleted = "COMPLETED" ) +// PreviewStatus_Values returns all elements of the PreviewStatus enum +func PreviewStatus_Values() []string { + return []string{ + PreviewStatusWorkInProgress, + PreviewStatusCompleted, + } +} + const ( // ReportFileFormatHtml is a ReportFileFormat enum value ReportFileFormatHtml = "HTML" @@ -10703,6 +10905,14 @@ const ( ReportFileFormatPdf = "PDF" ) +// ReportFileFormat_Values returns all elements of the ReportFileFormat enum +func ReportFileFormat_Values() []string { + return []string{ + ReportFileFormatHtml, + ReportFileFormatPdf, + } +} + const ( // ReportStatusWorkInProgress is a ReportStatus enum value ReportStatusWorkInProgress = "WORK_IN_PROGRESS" @@ -10714,6 +10924,15 @@ const ( ReportStatusCompleted = "COMPLETED" ) +// ReportStatus_Values returns all elements of the ReportStatus enum +func ReportStatus_Values() []string { + return []string{ + ReportStatusWorkInProgress, + ReportStatusFailed, + ReportStatusCompleted, + } +} + const ( // ReportTypeFinding is a ReportType enum value ReportTypeFinding = "FINDING" @@ -10722,6 +10941,14 @@ const ( ReportTypeFull = "FULL" ) +// ReportType_Values returns all elements of the ReportType enum +func ReportType_Values() []string { + return []string{ + ReportTypeFinding, + ReportTypeFull, + } +} + const ( // ScopeTypeInstanceId is a ScopeType enum value ScopeTypeInstanceId = "INSTANCE_ID" @@ -10730,6 +10957,14 @@ const ( ScopeTypeRulesPackageArn = "RULES_PACKAGE_ARN" ) +// ScopeType_Values returns all elements of the ScopeType enum +func ScopeType_Values() []string { + return []string{ + ScopeTypeInstanceId, + ScopeTypeRulesPackageArn, + } +} + const ( // SeverityLow is a Severity enum value SeverityLow = "Low" @@ -10747,6 +10982,17 @@ const ( SeverityUndefined = "Undefined" ) +// Severity_Values returns all elements of the Severity enum +func Severity_Values() []string { + return []string{ + SeverityLow, + SeverityMedium, + SeverityHigh, + SeverityInformational, + SeverityUndefined, + } +} + const ( // StopActionStartEvaluation is a StopAction enum value StopActionStartEvaluation = "START_EVALUATION" @@ -10754,3 +11000,11 @@ const ( // StopActionSkipEvaluation is a StopAction enum value StopActionSkipEvaluation = "SKIP_EVALUATION" ) + +// StopAction_Values returns all elements of the StopAction enum +func StopAction_Values() []string { + return []string{ + StopActionStartEvaluation, + StopActionSkipEvaluation, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go index 75a6cecd0..e3d77114a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/api.go b/vendor/github.com/aws/aws-sdk-go/service/iot/api.go index 24d353e17..22aff6649 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iot/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/api.go @@ -1417,6 +1417,95 @@ func (c *IoT) ConfirmTopicRuleDestinationWithContext(ctx aws.Context, input *Con return out, req.Send() } +const opCreateAuditSuppression = "CreateAuditSuppression" + +// CreateAuditSuppressionRequest generates a "aws/request.Request" representing the +// client's request for the CreateAuditSuppression operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAuditSuppression for more information on using the CreateAuditSuppression +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAuditSuppressionRequest method. +// req, resp := client.CreateAuditSuppressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) CreateAuditSuppressionRequest(input *CreateAuditSuppressionInput) (req *request.Request, output *CreateAuditSuppressionOutput) { + op := &request.Operation{ + Name: opCreateAuditSuppression, + HTTPMethod: "POST", + HTTPPath: "/audit/suppressions/create", + } + + if input == nil { + input = &CreateAuditSuppressionInput{} + } + + output = &CreateAuditSuppressionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateAuditSuppression API operation for AWS IoT. +// +// Creates a Device Defender audit suppression. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation CreateAuditSuppression for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// * ResourceAlreadyExistsException +// The resource already exists. +// +// * ThrottlingException +// The rate exceeds the limit. +// +// * InternalFailureException +// An unexpected error has occurred. +// +// * LimitExceededException +// A limit has been exceeded. +// +func (c *IoT) CreateAuditSuppression(input *CreateAuditSuppressionInput) (*CreateAuditSuppressionOutput, error) { + req, out := c.CreateAuditSuppressionRequest(input) + return out, req.Send() +} + +// CreateAuditSuppressionWithContext is the same as CreateAuditSuppression with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAuditSuppression for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) CreateAuditSuppressionWithContext(ctx aws.Context, input *CreateAuditSuppressionInput, opts ...request.Option) (*CreateAuditSuppressionOutput, error) { + req, out := c.CreateAuditSuppressionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateAuthorizer = "CreateAuthorizer" // CreateAuthorizerRequest generates a "aws/request.Request" representing the @@ -1725,6 +1814,97 @@ func (c *IoT) CreateCertificateFromCsrWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opCreateDimension = "CreateDimension" + +// CreateDimensionRequest generates a "aws/request.Request" representing the +// client's request for the CreateDimension operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDimension for more information on using the CreateDimension +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDimensionRequest method. +// req, resp := client.CreateDimensionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) CreateDimensionRequest(input *CreateDimensionInput) (req *request.Request, output *CreateDimensionOutput) { + op := &request.Operation{ + Name: opCreateDimension, + HTTPMethod: "POST", + HTTPPath: "/dimensions/{name}", + } + + if input == nil { + input = &CreateDimensionInput{} + } + + output = &CreateDimensionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDimension API operation for AWS IoT. +// +// Create a dimension that you can use to limit the scope of a metric used in +// a security profile for AWS IoT Device Defender. For example, using a TOPIC_FILTER +// dimension, you can narrow down the scope of the metric only to MQTT topics +// whose name match the pattern specified in the dimension. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation CreateDimension for usage and error information. +// +// Returned Error Types: +// * InternalFailureException +// An unexpected error has occurred. +// +// * InvalidRequestException +// The request is not valid. +// +// * LimitExceededException +// A limit has been exceeded. +// +// * ResourceAlreadyExistsException +// The resource already exists. +// +// * ThrottlingException +// The rate exceeds the limit. +// +func (c *IoT) CreateDimension(input *CreateDimensionInput) (*CreateDimensionOutput, error) { + req, out := c.CreateDimensionRequest(input) + return out, req.Send() +} + +// CreateDimensionWithContext is the same as CreateDimension with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDimension for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) CreateDimensionWithContext(ctx aws.Context, input *CreateDimensionInput, opts ...request.Option) (*CreateDimensionOutput, error) { + req, out := c.CreateDimensionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDomainConfiguration = "CreateDomainConfiguration" // CreateDomainConfigurationRequest generates a "aws/request.Request" representing the @@ -2145,6 +2325,8 @@ func (c *IoT) CreateMitigationActionRequest(input *CreateMitigationActionInput) // CreateMitigationAction API operation for AWS IoT. // // Defines an action that can be applied to audit findings by using StartAuditMitigationActionsTask. +// Only certain types of mitigation actions can be applied to specific check +// names. For more information, see Mitigation actions (https://docs.aws.amazon.com/iot/latest/developerguide/device-defender-mitigation-actions.html). // Each mitigation action can apply only one type of change. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3685,6 +3867,89 @@ func (c *IoT) DeleteAccountAuditConfigurationWithContext(ctx aws.Context, input return out, req.Send() } +const opDeleteAuditSuppression = "DeleteAuditSuppression" + +// DeleteAuditSuppressionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAuditSuppression operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAuditSuppression for more information on using the DeleteAuditSuppression +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAuditSuppressionRequest method. +// req, resp := client.DeleteAuditSuppressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DeleteAuditSuppressionRequest(input *DeleteAuditSuppressionInput) (req *request.Request, output *DeleteAuditSuppressionOutput) { + op := &request.Operation{ + Name: opDeleteAuditSuppression, + HTTPMethod: "POST", + HTTPPath: "/audit/suppressions/delete", + } + + if input == nil { + input = &DeleteAuditSuppressionInput{} + } + + output = &DeleteAuditSuppressionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAuditSuppression API operation for AWS IoT. +// +// Deletes a Device Defender audit suppression. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DeleteAuditSuppression for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// * ThrottlingException +// The rate exceeds the limit. +// +// * InternalFailureException +// An unexpected error has occurred. +// +func (c *IoT) DeleteAuditSuppression(input *DeleteAuditSuppressionInput) (*DeleteAuditSuppressionOutput, error) { + req, out := c.DeleteAuditSuppressionRequest(input) + return out, req.Send() +} + +// DeleteAuditSuppressionWithContext is the same as DeleteAuditSuppression with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAuditSuppression for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DeleteAuditSuppressionWithContext(ctx aws.Context, input *DeleteAuditSuppressionInput, opts ...request.Option) (*DeleteAuditSuppressionOutput, error) { + req, out := c.DeleteAuditSuppressionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteAuthorizer = "DeleteAuthorizer" // DeleteAuthorizerRequest generates a "aws/request.Request" representing the @@ -4065,6 +4330,89 @@ func (c *IoT) DeleteCertificateWithContext(ctx aws.Context, input *DeleteCertifi return out, req.Send() } +const opDeleteDimension = "DeleteDimension" + +// DeleteDimensionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDimension operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDimension for more information on using the DeleteDimension +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDimensionRequest method. +// req, resp := client.DeleteDimensionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DeleteDimensionRequest(input *DeleteDimensionInput) (req *request.Request, output *DeleteDimensionOutput) { + op := &request.Operation{ + Name: opDeleteDimension, + HTTPMethod: "DELETE", + HTTPPath: "/dimensions/{name}", + } + + if input == nil { + input = &DeleteDimensionInput{} + } + + output = &DeleteDimensionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteDimension API operation for AWS IoT. +// +// Removes the specified dimension from your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DeleteDimension for usage and error information. +// +// Returned Error Types: +// * InternalFailureException +// An unexpected error has occurred. +// +// * InvalidRequestException +// The request is not valid. +// +// * ThrottlingException +// The rate exceeds the limit. +// +func (c *IoT) DeleteDimension(input *DeleteDimensionInput) (*DeleteDimensionOutput, error) { + req, out := c.DeleteDimensionRequest(input) + return out, req.Send() +} + +// DeleteDimensionWithContext is the same as DeleteDimension with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDimension for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DeleteDimensionWithContext(ctx aws.Context, input *DeleteDimensionInput, opts ...request.Option) (*DeleteDimensionOutput, error) { + req, out := c.DeleteDimensionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDomainConfiguration = "DeleteDomainConfiguration" // DeleteDomainConfigurationRequest generates a "aws/request.Request" representing the @@ -4890,6 +5238,10 @@ func (c *IoT) DeleteProvisioningTemplateRequest(input *DeleteProvisioningTemplat // * ThrottlingException // The rate exceeds the limit. // +// * ConflictingResourceUpdateException +// A conflicting resource update exception. This exception is thrown when two +// pending updates cause a conflict. +// // * UnauthorizedException // You are not authorized to perform this operation. // @@ -4982,6 +5334,10 @@ func (c *IoT) DeleteProvisioningTemplateVersionRequest(input *DeleteProvisioning // * UnauthorizedException // You are not authorized to perform this operation. // +// * ConflictingResourceUpdateException +// A conflicting resource update exception. This exception is thrown when two +// pending updates cause a conflict. +// // * DeleteConflictException // You can't delete the resource because it is attached to one or more resources. // @@ -6350,6 +6706,91 @@ func (c *IoT) DescribeAuditMitigationActionsTaskWithContext(ctx aws.Context, inp return out, req.Send() } +const opDescribeAuditSuppression = "DescribeAuditSuppression" + +// DescribeAuditSuppressionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAuditSuppression operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeAuditSuppression for more information on using the DescribeAuditSuppression +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeAuditSuppressionRequest method. +// req, resp := client.DescribeAuditSuppressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DescribeAuditSuppressionRequest(input *DescribeAuditSuppressionInput) (req *request.Request, output *DescribeAuditSuppressionOutput) { + op := &request.Operation{ + Name: opDescribeAuditSuppression, + HTTPMethod: "POST", + HTTPPath: "/audit/suppressions/describe", + } + + if input == nil { + input = &DescribeAuditSuppressionInput{} + } + + output = &DescribeAuditSuppressionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeAuditSuppression API operation for AWS IoT. +// +// Gets information about a Device Defender audit suppression. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DescribeAuditSuppression for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ThrottlingException +// The rate exceeds the limit. +// +// * InternalFailureException +// An unexpected error has occurred. +// +func (c *IoT) DescribeAuditSuppression(input *DescribeAuditSuppressionInput) (*DescribeAuditSuppressionOutput, error) { + req, out := c.DescribeAuditSuppressionRequest(input) + return out, req.Send() +} + +// DescribeAuditSuppressionWithContext is the same as DescribeAuditSuppression with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeAuditSuppression for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DescribeAuditSuppressionWithContext(ctx aws.Context, input *DescribeAuditSuppressionInput, opts ...request.Option) (*DescribeAuditSuppressionOutput, error) { + req, out := c.DescribeAuditSuppressionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAuditTask = "DescribeAuditTask" // DescribeAuditTaskRequest generates a "aws/request.Request" representing the @@ -6884,6 +7325,91 @@ func (c *IoT) DescribeDefaultAuthorizerWithContext(ctx aws.Context, input *Descr return out, req.Send() } +const opDescribeDimension = "DescribeDimension" + +// DescribeDimensionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDimension operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeDimension for more information on using the DescribeDimension +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeDimensionRequest method. +// req, resp := client.DescribeDimensionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) DescribeDimensionRequest(input *DescribeDimensionInput) (req *request.Request, output *DescribeDimensionOutput) { + op := &request.Operation{ + Name: opDescribeDimension, + HTTPMethod: "GET", + HTTPPath: "/dimensions/{name}", + } + + if input == nil { + input = &DescribeDimensionInput{} + } + + output = &DescribeDimensionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeDimension API operation for AWS IoT. +// +// Provides details about a dimension that is defined in your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation DescribeDimension for usage and error information. +// +// Returned Error Types: +// * InternalFailureException +// An unexpected error has occurred. +// +// * InvalidRequestException +// The request is not valid. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ThrottlingException +// The rate exceeds the limit. +// +func (c *IoT) DescribeDimension(input *DescribeDimensionInput) (*DescribeDimensionOutput, error) { + req, out := c.DescribeDimensionRequest(input) + return out, req.Send() +} + +// DescribeDimensionWithContext is the same as DescribeDimension with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeDimension for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) DescribeDimensionWithContext(ctx aws.Context, input *DescribeDimensionInput, opts ...request.Option) (*DescribeDimensionOutput, error) { + req, out := c.DescribeDimensionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeDomainConfiguration = "DescribeDomainConfiguration" // DescribeDomainConfigurationRequest generates a "aws/request.Request" representing the @@ -6944,6 +7470,9 @@ func (c *IoT) DescribeDomainConfigurationRequest(input *DescribeDomainConfigurat // * ThrottlingException // The rate exceeds the limit. // +// * InvalidRequestException +// The request is not valid. +// // * UnauthorizedException // You are not authorized to perform this operation. // @@ -10232,6 +10761,12 @@ func (c *IoT) ListActiveViolationsRequest(input *ListActiveViolationsInput) (req Name: opListActiveViolations, HTTPMethod: "GET", HTTPPath: "/active-violations", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -10288,6 +10823,58 @@ func (c *IoT) ListActiveViolationsWithContext(ctx aws.Context, input *ListActive return out, req.Send() } +// ListActiveViolationsPages iterates over the pages of a ListActiveViolations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListActiveViolations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListActiveViolations operation. +// pageNum := 0 +// err := client.ListActiveViolationsPages(params, +// func(page *iot.ListActiveViolationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListActiveViolationsPages(input *ListActiveViolationsInput, fn func(*ListActiveViolationsOutput, bool) bool) error { + return c.ListActiveViolationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListActiveViolationsPagesWithContext same as ListActiveViolationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListActiveViolationsPagesWithContext(ctx aws.Context, input *ListActiveViolationsInput, fn func(*ListActiveViolationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListActiveViolationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListActiveViolationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListActiveViolationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAttachedPolicies = "ListAttachedPolicies" // ListAttachedPoliciesRequest generates a "aws/request.Request" representing the @@ -10317,6 +10904,12 @@ func (c *IoT) ListAttachedPoliciesRequest(input *ListAttachedPoliciesInput) (req Name: opListAttachedPolicies, HTTPMethod: "POST", HTTPPath: "/attached-policies/{target}", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -10382,6 +10975,58 @@ func (c *IoT) ListAttachedPoliciesWithContext(ctx aws.Context, input *ListAttach return out, req.Send() } +// ListAttachedPoliciesPages iterates over the pages of a ListAttachedPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedPolicies operation. +// pageNum := 0 +// err := client.ListAttachedPoliciesPages(params, +// func(page *iot.ListAttachedPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAttachedPoliciesPages(input *ListAttachedPoliciesInput, fn func(*ListAttachedPoliciesOutput, bool) bool) error { + return c.ListAttachedPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAttachedPoliciesPagesWithContext same as ListAttachedPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAttachedPoliciesPagesWithContext(ctx aws.Context, input *ListAttachedPoliciesInput, fn func(*ListAttachedPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAttachedPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAttachedPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAttachedPoliciesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAuditFindings = "ListAuditFindings" // ListAuditFindingsRequest generates a "aws/request.Request" representing the @@ -10411,6 +11056,12 @@ func (c *IoT) ListAuditFindingsRequest(input *ListAuditFindingsInput) (req *requ Name: opListAuditFindings, HTTPMethod: "POST", HTTPPath: "/audit/findings", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -10425,8 +11076,7 @@ func (c *IoT) ListAuditFindingsRequest(input *ListAuditFindingsInput) (req *requ // ListAuditFindings API operation for AWS IoT. // // Lists the findings (results) of a Device Defender audit or of the audits -// performed during a specified time period. (Findings are retained for 180 -// days.) +// performed during a specified time period. (Findings are retained for 90 days.) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10466,6 +11116,58 @@ func (c *IoT) ListAuditFindingsWithContext(ctx aws.Context, input *ListAuditFind return out, req.Send() } +// ListAuditFindingsPages iterates over the pages of a ListAuditFindings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAuditFindings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAuditFindings operation. +// pageNum := 0 +// err := client.ListAuditFindingsPages(params, +// func(page *iot.ListAuditFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAuditFindingsPages(input *ListAuditFindingsInput, fn func(*ListAuditFindingsOutput, bool) bool) error { + return c.ListAuditFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAuditFindingsPagesWithContext same as ListAuditFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditFindingsPagesWithContext(ctx aws.Context, input *ListAuditFindingsInput, fn func(*ListAuditFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAuditFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAuditFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAuditFindingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAuditMitigationActionsExecutions = "ListAuditMitigationActionsExecutions" // ListAuditMitigationActionsExecutionsRequest generates a "aws/request.Request" representing the @@ -10495,6 +11197,12 @@ func (c *IoT) ListAuditMitigationActionsExecutionsRequest(input *ListAuditMitiga Name: opListAuditMitigationActionsExecutions, HTTPMethod: "GET", HTTPPath: "/audit/mitigationactions/executions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -10548,6 +11256,58 @@ func (c *IoT) ListAuditMitigationActionsExecutionsWithContext(ctx aws.Context, i return out, req.Send() } +// ListAuditMitigationActionsExecutionsPages iterates over the pages of a ListAuditMitigationActionsExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAuditMitigationActionsExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAuditMitigationActionsExecutions operation. +// pageNum := 0 +// err := client.ListAuditMitigationActionsExecutionsPages(params, +// func(page *iot.ListAuditMitigationActionsExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAuditMitigationActionsExecutionsPages(input *ListAuditMitigationActionsExecutionsInput, fn func(*ListAuditMitigationActionsExecutionsOutput, bool) bool) error { + return c.ListAuditMitigationActionsExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAuditMitigationActionsExecutionsPagesWithContext same as ListAuditMitigationActionsExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditMitigationActionsExecutionsPagesWithContext(ctx aws.Context, input *ListAuditMitigationActionsExecutionsInput, fn func(*ListAuditMitigationActionsExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAuditMitigationActionsExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAuditMitigationActionsExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAuditMitigationActionsExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAuditMitigationActionsTasks = "ListAuditMitigationActionsTasks" // ListAuditMitigationActionsTasksRequest generates a "aws/request.Request" representing the @@ -10577,6 +11337,12 @@ func (c *IoT) ListAuditMitigationActionsTasksRequest(input *ListAuditMitigationA Name: opListAuditMitigationActionsTasks, HTTPMethod: "GET", HTTPPath: "/audit/mitigationactions/tasks", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -10630,6 +11396,198 @@ func (c *IoT) ListAuditMitigationActionsTasksWithContext(ctx aws.Context, input return out, req.Send() } +// ListAuditMitigationActionsTasksPages iterates over the pages of a ListAuditMitigationActionsTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAuditMitigationActionsTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAuditMitigationActionsTasks operation. +// pageNum := 0 +// err := client.ListAuditMitigationActionsTasksPages(params, +// func(page *iot.ListAuditMitigationActionsTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAuditMitigationActionsTasksPages(input *ListAuditMitigationActionsTasksInput, fn func(*ListAuditMitigationActionsTasksOutput, bool) bool) error { + return c.ListAuditMitigationActionsTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAuditMitigationActionsTasksPagesWithContext same as ListAuditMitigationActionsTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditMitigationActionsTasksPagesWithContext(ctx aws.Context, input *ListAuditMitigationActionsTasksInput, fn func(*ListAuditMitigationActionsTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAuditMitigationActionsTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAuditMitigationActionsTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAuditMitigationActionsTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAuditSuppressions = "ListAuditSuppressions" + +// ListAuditSuppressionsRequest generates a "aws/request.Request" representing the +// client's request for the ListAuditSuppressions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAuditSuppressions for more information on using the ListAuditSuppressions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAuditSuppressionsRequest method. +// req, resp := client.ListAuditSuppressionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListAuditSuppressionsRequest(input *ListAuditSuppressionsInput) (req *request.Request, output *ListAuditSuppressionsOutput) { + op := &request.Operation{ + Name: opListAuditSuppressions, + HTTPMethod: "POST", + HTTPPath: "/audit/suppressions/list", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAuditSuppressionsInput{} + } + + output = &ListAuditSuppressionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAuditSuppressions API operation for AWS IoT. +// +// Lists your Device Defender audit listings. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListAuditSuppressions for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// * ThrottlingException +// The rate exceeds the limit. +// +// * InternalFailureException +// An unexpected error has occurred. +// +func (c *IoT) ListAuditSuppressions(input *ListAuditSuppressionsInput) (*ListAuditSuppressionsOutput, error) { + req, out := c.ListAuditSuppressionsRequest(input) + return out, req.Send() +} + +// ListAuditSuppressionsWithContext is the same as ListAuditSuppressions with the addition of +// the ability to pass a context and additional request options. +// +// See ListAuditSuppressions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditSuppressionsWithContext(ctx aws.Context, input *ListAuditSuppressionsInput, opts ...request.Option) (*ListAuditSuppressionsOutput, error) { + req, out := c.ListAuditSuppressionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAuditSuppressionsPages iterates over the pages of a ListAuditSuppressions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAuditSuppressions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAuditSuppressions operation. +// pageNum := 0 +// err := client.ListAuditSuppressionsPages(params, +// func(page *iot.ListAuditSuppressionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAuditSuppressionsPages(input *ListAuditSuppressionsInput, fn func(*ListAuditSuppressionsOutput, bool) bool) error { + return c.ListAuditSuppressionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAuditSuppressionsPagesWithContext same as ListAuditSuppressionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditSuppressionsPagesWithContext(ctx aws.Context, input *ListAuditSuppressionsInput, fn func(*ListAuditSuppressionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAuditSuppressionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAuditSuppressionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAuditSuppressionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAuditTasks = "ListAuditTasks" // ListAuditTasksRequest generates a "aws/request.Request" representing the @@ -10659,6 +11617,12 @@ func (c *IoT) ListAuditTasksRequest(input *ListAuditTasksInput) (req *request.Re Name: opListAuditTasks, HTTPMethod: "GET", HTTPPath: "/audit/tasks", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -10713,6 +11677,58 @@ func (c *IoT) ListAuditTasksWithContext(ctx aws.Context, input *ListAuditTasksIn return out, req.Send() } +// ListAuditTasksPages iterates over the pages of a ListAuditTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAuditTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAuditTasks operation. +// pageNum := 0 +// err := client.ListAuditTasksPages(params, +// func(page *iot.ListAuditTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAuditTasksPages(input *ListAuditTasksInput, fn func(*ListAuditTasksOutput, bool) bool) error { + return c.ListAuditTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAuditTasksPagesWithContext same as ListAuditTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuditTasksPagesWithContext(ctx aws.Context, input *ListAuditTasksInput, fn func(*ListAuditTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAuditTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAuditTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAuditTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAuthorizers = "ListAuthorizers" // ListAuthorizersRequest generates a "aws/request.Request" representing the @@ -10742,6 +11758,12 @@ func (c *IoT) ListAuthorizersRequest(input *ListAuthorizersInput) (req *request. Name: opListAuthorizers, HTTPMethod: "GET", HTTPPath: "/authorizers/", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -10801,6 +11823,58 @@ func (c *IoT) ListAuthorizersWithContext(ctx aws.Context, input *ListAuthorizers return out, req.Send() } +// ListAuthorizersPages iterates over the pages of a ListAuthorizers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAuthorizers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAuthorizers operation. +// pageNum := 0 +// err := client.ListAuthorizersPages(params, +// func(page *iot.ListAuthorizersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListAuthorizersPages(input *ListAuthorizersInput, fn func(*ListAuthorizersOutput, bool) bool) error { + return c.ListAuthorizersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAuthorizersPagesWithContext same as ListAuthorizersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListAuthorizersPagesWithContext(ctx aws.Context, input *ListAuthorizersInput, fn func(*ListAuthorizersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAuthorizersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAuthorizersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAuthorizersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListBillingGroups = "ListBillingGroups" // ListBillingGroupsRequest generates a "aws/request.Request" representing the @@ -10830,6 +11904,12 @@ func (c *IoT) ListBillingGroupsRequest(input *ListBillingGroupsInput) (req *requ Name: opListBillingGroups, HTTPMethod: "GET", HTTPPath: "/billing-groups", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -10886,6 +11966,58 @@ func (c *IoT) ListBillingGroupsWithContext(ctx aws.Context, input *ListBillingGr return out, req.Send() } +// ListBillingGroupsPages iterates over the pages of a ListBillingGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBillingGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBillingGroups operation. +// pageNum := 0 +// err := client.ListBillingGroupsPages(params, +// func(page *iot.ListBillingGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListBillingGroupsPages(input *ListBillingGroupsInput, fn func(*ListBillingGroupsOutput, bool) bool) error { + return c.ListBillingGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBillingGroupsPagesWithContext same as ListBillingGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListBillingGroupsPagesWithContext(ctx aws.Context, input *ListBillingGroupsInput, fn func(*ListBillingGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBillingGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBillingGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBillingGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListCACertificates = "ListCACertificates" // ListCACertificatesRequest generates a "aws/request.Request" representing the @@ -10915,6 +12047,12 @@ func (c *IoT) ListCACertificatesRequest(input *ListCACertificatesInput) (req *re Name: opListCACertificates, HTTPMethod: "GET", HTTPPath: "/cacertificates", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -10977,6 +12115,58 @@ func (c *IoT) ListCACertificatesWithContext(ctx aws.Context, input *ListCACertif return out, req.Send() } +// ListCACertificatesPages iterates over the pages of a ListCACertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCACertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCACertificates operation. +// pageNum := 0 +// err := client.ListCACertificatesPages(params, +// func(page *iot.ListCACertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListCACertificatesPages(input *ListCACertificatesInput, fn func(*ListCACertificatesOutput, bool) bool) error { + return c.ListCACertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCACertificatesPagesWithContext same as ListCACertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListCACertificatesPagesWithContext(ctx aws.Context, input *ListCACertificatesInput, fn func(*ListCACertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCACertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCACertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCACertificatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListCertificates = "ListCertificates" // ListCertificatesRequest generates a "aws/request.Request" representing the @@ -11006,6 +12196,12 @@ func (c *IoT) ListCertificatesRequest(input *ListCertificatesInput) (req *reques Name: opListCertificates, HTTPMethod: "GET", HTTPPath: "/certificates", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -11068,6 +12264,58 @@ func (c *IoT) ListCertificatesWithContext(ctx aws.Context, input *ListCertificat return out, req.Send() } +// ListCertificatesPages iterates over the pages of a ListCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCertificates operation. +// pageNum := 0 +// err := client.ListCertificatesPages(params, +// func(page *iot.ListCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListCertificatesPages(input *ListCertificatesInput, fn func(*ListCertificatesOutput, bool) bool) error { + return c.ListCertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCertificatesPagesWithContext same as ListCertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListCertificatesPagesWithContext(ctx aws.Context, input *ListCertificatesInput, fn func(*ListCertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCertificatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListCertificatesByCA = "ListCertificatesByCA" // ListCertificatesByCARequest generates a "aws/request.Request" representing the @@ -11097,6 +12345,12 @@ func (c *IoT) ListCertificatesByCARequest(input *ListCertificatesByCAInput) (req Name: opListCertificatesByCA, HTTPMethod: "GET", HTTPPath: "/certificates-by-ca/{caCertificateId}", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -11156,6 +12410,198 @@ func (c *IoT) ListCertificatesByCAWithContext(ctx aws.Context, input *ListCertif return out, req.Send() } +// ListCertificatesByCAPages iterates over the pages of a ListCertificatesByCA operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCertificatesByCA method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCertificatesByCA operation. +// pageNum := 0 +// err := client.ListCertificatesByCAPages(params, +// func(page *iot.ListCertificatesByCAOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListCertificatesByCAPages(input *ListCertificatesByCAInput, fn func(*ListCertificatesByCAOutput, bool) bool) error { + return c.ListCertificatesByCAPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListCertificatesByCAPagesWithContext same as ListCertificatesByCAPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListCertificatesByCAPagesWithContext(ctx aws.Context, input *ListCertificatesByCAInput, fn func(*ListCertificatesByCAOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListCertificatesByCAInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListCertificatesByCARequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListCertificatesByCAOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDimensions = "ListDimensions" + +// ListDimensionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDimensions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDimensions for more information on using the ListDimensions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDimensionsRequest method. +// req, resp := client.ListDimensionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) ListDimensionsRequest(input *ListDimensionsInput) (req *request.Request, output *ListDimensionsOutput) { + op := &request.Operation{ + Name: opListDimensions, + HTTPMethod: "GET", + HTTPPath: "/dimensions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDimensionsInput{} + } + + output = &ListDimensionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDimensions API operation for AWS IoT. +// +// List the set of dimensions that are defined for your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation ListDimensions for usage and error information. +// +// Returned Error Types: +// * InternalFailureException +// An unexpected error has occurred. +// +// * InvalidRequestException +// The request is not valid. +// +// * ThrottlingException +// The rate exceeds the limit. +// +func (c *IoT) ListDimensions(input *ListDimensionsInput) (*ListDimensionsOutput, error) { + req, out := c.ListDimensionsRequest(input) + return out, req.Send() +} + +// ListDimensionsWithContext is the same as ListDimensions with the addition of +// the ability to pass a context and additional request options. +// +// See ListDimensions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListDimensionsWithContext(ctx aws.Context, input *ListDimensionsInput, opts ...request.Option) (*ListDimensionsOutput, error) { + req, out := c.ListDimensionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDimensionsPages iterates over the pages of a ListDimensions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDimensions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDimensions operation. +// pageNum := 0 +// err := client.ListDimensionsPages(params, +// func(page *iot.ListDimensionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListDimensionsPages(input *ListDimensionsInput, fn func(*ListDimensionsOutput, bool) bool) error { + return c.ListDimensionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDimensionsPagesWithContext same as ListDimensionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListDimensionsPagesWithContext(ctx aws.Context, input *ListDimensionsInput, fn func(*ListDimensionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDimensionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDimensionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDimensionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListDomainConfigurations = "ListDomainConfigurations" // ListDomainConfigurationsRequest generates a "aws/request.Request" representing the @@ -11185,6 +12631,12 @@ func (c *IoT) ListDomainConfigurationsRequest(input *ListDomainConfigurationsInp Name: opListDomainConfigurations, HTTPMethod: "GET", HTTPPath: "/domainConfigurations", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -11247,6 +12699,58 @@ func (c *IoT) ListDomainConfigurationsWithContext(ctx aws.Context, input *ListDo return out, req.Send() } +// ListDomainConfigurationsPages iterates over the pages of a ListDomainConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDomainConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDomainConfigurations operation. +// pageNum := 0 +// err := client.ListDomainConfigurationsPages(params, +// func(page *iot.ListDomainConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListDomainConfigurationsPages(input *ListDomainConfigurationsInput, fn func(*ListDomainConfigurationsOutput, bool) bool) error { + return c.ListDomainConfigurationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDomainConfigurationsPagesWithContext same as ListDomainConfigurationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListDomainConfigurationsPagesWithContext(ctx aws.Context, input *ListDomainConfigurationsInput, fn func(*ListDomainConfigurationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDomainConfigurationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDomainConfigurationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDomainConfigurationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListIndices = "ListIndices" // ListIndicesRequest generates a "aws/request.Request" representing the @@ -11276,6 +12780,12 @@ func (c *IoT) ListIndicesRequest(input *ListIndicesInput) (req *request.Request, Name: opListIndices, HTTPMethod: "GET", HTTPPath: "/indices", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -11335,6 +12845,58 @@ func (c *IoT) ListIndicesWithContext(ctx aws.Context, input *ListIndicesInput, o return out, req.Send() } +// ListIndicesPages iterates over the pages of a ListIndices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListIndices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListIndices operation. +// pageNum := 0 +// err := client.ListIndicesPages(params, +// func(page *iot.ListIndicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListIndicesPages(input *ListIndicesInput, fn func(*ListIndicesOutput, bool) bool) error { + return c.ListIndicesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListIndicesPagesWithContext same as ListIndicesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListIndicesPagesWithContext(ctx aws.Context, input *ListIndicesInput, fn func(*ListIndicesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListIndicesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListIndicesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListIndicesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListJobExecutionsForJob = "ListJobExecutionsForJob" // ListJobExecutionsForJobRequest generates a "aws/request.Request" representing the @@ -11364,6 +12926,12 @@ func (c *IoT) ListJobExecutionsForJobRequest(input *ListJobExecutionsForJobInput Name: opListJobExecutionsForJob, HTTPMethod: "GET", HTTPPath: "/jobs/{jobId}/things", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -11420,6 +12988,58 @@ func (c *IoT) ListJobExecutionsForJobWithContext(ctx aws.Context, input *ListJob return out, req.Send() } +// ListJobExecutionsForJobPages iterates over the pages of a ListJobExecutionsForJob operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobExecutionsForJob method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobExecutionsForJob operation. +// pageNum := 0 +// err := client.ListJobExecutionsForJobPages(params, +// func(page *iot.ListJobExecutionsForJobOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListJobExecutionsForJobPages(input *ListJobExecutionsForJobInput, fn func(*ListJobExecutionsForJobOutput, bool) bool) error { + return c.ListJobExecutionsForJobPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJobExecutionsForJobPagesWithContext same as ListJobExecutionsForJobPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListJobExecutionsForJobPagesWithContext(ctx aws.Context, input *ListJobExecutionsForJobInput, fn func(*ListJobExecutionsForJobOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJobExecutionsForJobInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJobExecutionsForJobRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJobExecutionsForJobOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListJobExecutionsForThing = "ListJobExecutionsForThing" // ListJobExecutionsForThingRequest generates a "aws/request.Request" representing the @@ -11449,6 +13069,12 @@ func (c *IoT) ListJobExecutionsForThingRequest(input *ListJobExecutionsForThingI Name: opListJobExecutionsForThing, HTTPMethod: "GET", HTTPPath: "/things/{thingName}/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -11505,6 +13131,58 @@ func (c *IoT) ListJobExecutionsForThingWithContext(ctx aws.Context, input *ListJ return out, req.Send() } +// ListJobExecutionsForThingPages iterates over the pages of a ListJobExecutionsForThing operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobExecutionsForThing method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobExecutionsForThing operation. +// pageNum := 0 +// err := client.ListJobExecutionsForThingPages(params, +// func(page *iot.ListJobExecutionsForThingOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListJobExecutionsForThingPages(input *ListJobExecutionsForThingInput, fn func(*ListJobExecutionsForThingOutput, bool) bool) error { + return c.ListJobExecutionsForThingPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJobExecutionsForThingPagesWithContext same as ListJobExecutionsForThingPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListJobExecutionsForThingPagesWithContext(ctx aws.Context, input *ListJobExecutionsForThingInput, fn func(*ListJobExecutionsForThingOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJobExecutionsForThingInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJobExecutionsForThingRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJobExecutionsForThingOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListJobs = "ListJobs" // ListJobsRequest generates a "aws/request.Request" representing the @@ -11534,6 +13212,12 @@ func (c *IoT) ListJobsRequest(input *ListJobsInput) (req *request.Request, outpu Name: opListJobs, HTTPMethod: "GET", HTTPPath: "/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -11590,6 +13274,58 @@ func (c *IoT) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts .. return out, req.Send() } +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *iot.ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { + return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJobsPagesWithContext same as ListJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListMitigationActions = "ListMitigationActions" // ListMitigationActionsRequest generates a "aws/request.Request" representing the @@ -11619,6 +13355,12 @@ func (c *IoT) ListMitigationActionsRequest(input *ListMitigationActionsInput) (r Name: opListMitigationActions, HTTPMethod: "GET", HTTPPath: "/mitigationactions/actions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -11672,6 +13414,58 @@ func (c *IoT) ListMitigationActionsWithContext(ctx aws.Context, input *ListMitig return out, req.Send() } +// ListMitigationActionsPages iterates over the pages of a ListMitigationActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMitigationActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMitigationActions operation. +// pageNum := 0 +// err := client.ListMitigationActionsPages(params, +// func(page *iot.ListMitigationActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListMitigationActionsPages(input *ListMitigationActionsInput, fn func(*ListMitigationActionsOutput, bool) bool) error { + return c.ListMitigationActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMitigationActionsPagesWithContext same as ListMitigationActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListMitigationActionsPagesWithContext(ctx aws.Context, input *ListMitigationActionsInput, fn func(*ListMitigationActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMitigationActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMitigationActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMitigationActionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListOTAUpdates = "ListOTAUpdates" // ListOTAUpdatesRequest generates a "aws/request.Request" representing the @@ -11701,6 +13495,12 @@ func (c *IoT) ListOTAUpdatesRequest(input *ListOTAUpdatesInput) (req *request.Re Name: opListOTAUpdates, HTTPMethod: "GET", HTTPPath: "/otaUpdates", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -11760,6 +13560,58 @@ func (c *IoT) ListOTAUpdatesWithContext(ctx aws.Context, input *ListOTAUpdatesIn return out, req.Send() } +// ListOTAUpdatesPages iterates over the pages of a ListOTAUpdates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOTAUpdates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOTAUpdates operation. +// pageNum := 0 +// err := client.ListOTAUpdatesPages(params, +// func(page *iot.ListOTAUpdatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListOTAUpdatesPages(input *ListOTAUpdatesInput, fn func(*ListOTAUpdatesOutput, bool) bool) error { + return c.ListOTAUpdatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListOTAUpdatesPagesWithContext same as ListOTAUpdatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListOTAUpdatesPagesWithContext(ctx aws.Context, input *ListOTAUpdatesInput, fn func(*ListOTAUpdatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListOTAUpdatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListOTAUpdatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListOTAUpdatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListOutgoingCertificates = "ListOutgoingCertificates" // ListOutgoingCertificatesRequest generates a "aws/request.Request" representing the @@ -11789,6 +13641,12 @@ func (c *IoT) ListOutgoingCertificatesRequest(input *ListOutgoingCertificatesInp Name: opListOutgoingCertificates, HTTPMethod: "GET", HTTPPath: "/certificates-out-going", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -11848,6 +13706,58 @@ func (c *IoT) ListOutgoingCertificatesWithContext(ctx aws.Context, input *ListOu return out, req.Send() } +// ListOutgoingCertificatesPages iterates over the pages of a ListOutgoingCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListOutgoingCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOutgoingCertificates operation. +// pageNum := 0 +// err := client.ListOutgoingCertificatesPages(params, +// func(page *iot.ListOutgoingCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListOutgoingCertificatesPages(input *ListOutgoingCertificatesInput, fn func(*ListOutgoingCertificatesOutput, bool) bool) error { + return c.ListOutgoingCertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListOutgoingCertificatesPagesWithContext same as ListOutgoingCertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListOutgoingCertificatesPagesWithContext(ctx aws.Context, input *ListOutgoingCertificatesInput, fn func(*ListOutgoingCertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListOutgoingCertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListOutgoingCertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListOutgoingCertificatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPolicies = "ListPolicies" // ListPoliciesRequest generates a "aws/request.Request" representing the @@ -11877,6 +13787,12 @@ func (c *IoT) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Reques Name: opListPolicies, HTTPMethod: "GET", HTTPPath: "/policies", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -11936,6 +13852,58 @@ func (c *IoT) ListPoliciesWithContext(ctx aws.Context, input *ListPoliciesInput, return out, req.Send() } +// ListPoliciesPages iterates over the pages of a ListPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicies operation. +// pageNum := 0 +// err := client.ListPoliciesPages(params, +// func(page *iot.ListPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListPoliciesPages(input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool) error { + return c.ListPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPoliciesPagesWithContext same as ListPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPolicyPrincipals = "ListPolicyPrincipals" // ListPolicyPrincipalsRequest generates a "aws/request.Request" representing the @@ -11970,6 +13938,12 @@ func (c *IoT) ListPolicyPrincipalsRequest(input *ListPolicyPrincipalsInput) (req Name: opListPolicyPrincipals, HTTPMethod: "GET", HTTPPath: "/policy-principals", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -12038,6 +14012,62 @@ func (c *IoT) ListPolicyPrincipalsWithContext(ctx aws.Context, input *ListPolicy return out, req.Send() } +// ListPolicyPrincipalsPages iterates over the pages of a ListPolicyPrincipals operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicyPrincipals method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicyPrincipals operation. +// pageNum := 0 +// err := client.ListPolicyPrincipalsPages(params, +// func(page *iot.ListPolicyPrincipalsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +// +// Deprecated: ListPolicyPrincipalsPages has been deprecated +func (c *IoT) ListPolicyPrincipalsPages(input *ListPolicyPrincipalsInput, fn func(*ListPolicyPrincipalsOutput, bool) bool) error { + return c.ListPolicyPrincipalsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPolicyPrincipalsPagesWithContext same as ListPolicyPrincipalsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: ListPolicyPrincipalsPagesWithContext has been deprecated +func (c *IoT) ListPolicyPrincipalsPagesWithContext(ctx aws.Context, input *ListPolicyPrincipalsInput, fn func(*ListPolicyPrincipalsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPolicyPrincipalsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPolicyPrincipalsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPolicyPrincipalsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPolicyVersions = "ListPolicyVersions" // ListPolicyVersionsRequest generates a "aws/request.Request" representing the @@ -12163,6 +14193,12 @@ func (c *IoT) ListPrincipalPoliciesRequest(input *ListPrincipalPoliciesInput) (r Name: opListPrincipalPolicies, HTTPMethod: "GET", HTTPPath: "/principal-policies", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -12232,6 +14268,62 @@ func (c *IoT) ListPrincipalPoliciesWithContext(ctx aws.Context, input *ListPrinc return out, req.Send() } +// ListPrincipalPoliciesPages iterates over the pages of a ListPrincipalPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPrincipalPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPrincipalPolicies operation. +// pageNum := 0 +// err := client.ListPrincipalPoliciesPages(params, +// func(page *iot.ListPrincipalPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +// +// Deprecated: ListPrincipalPoliciesPages has been deprecated +func (c *IoT) ListPrincipalPoliciesPages(input *ListPrincipalPoliciesInput, fn func(*ListPrincipalPoliciesOutput, bool) bool) error { + return c.ListPrincipalPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPrincipalPoliciesPagesWithContext same as ListPrincipalPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: ListPrincipalPoliciesPagesWithContext has been deprecated +func (c *IoT) ListPrincipalPoliciesPagesWithContext(ctx aws.Context, input *ListPrincipalPoliciesInput, fn func(*ListPrincipalPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPrincipalPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPrincipalPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPrincipalPoliciesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListPrincipalThings = "ListPrincipalThings" // ListPrincipalThingsRequest generates a "aws/request.Request" representing the @@ -12261,6 +14353,12 @@ func (c *IoT) ListPrincipalThingsRequest(input *ListPrincipalThingsInput) (req * Name: opListPrincipalThings, HTTPMethod: "GET", HTTPPath: "/principals/things", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12325,6 +14423,58 @@ func (c *IoT) ListPrincipalThingsWithContext(ctx aws.Context, input *ListPrincip return out, req.Send() } +// ListPrincipalThingsPages iterates over the pages of a ListPrincipalThings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPrincipalThings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPrincipalThings operation. +// pageNum := 0 +// err := client.ListPrincipalThingsPages(params, +// func(page *iot.ListPrincipalThingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListPrincipalThingsPages(input *ListPrincipalThingsInput, fn func(*ListPrincipalThingsOutput, bool) bool) error { + return c.ListPrincipalThingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPrincipalThingsPagesWithContext same as ListPrincipalThingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListPrincipalThingsPagesWithContext(ctx aws.Context, input *ListPrincipalThingsInput, fn func(*ListPrincipalThingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPrincipalThingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPrincipalThingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPrincipalThingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListProvisioningTemplateVersions = "ListProvisioningTemplateVersions" // ListProvisioningTemplateVersionsRequest generates a "aws/request.Request" representing the @@ -12354,6 +14504,12 @@ func (c *IoT) ListProvisioningTemplateVersionsRequest(input *ListProvisioningTem Name: opListProvisioningTemplateVersions, HTTPMethod: "GET", HTTPPath: "/provisioning-templates/{templateName}/versions", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12413,6 +14569,58 @@ func (c *IoT) ListProvisioningTemplateVersionsWithContext(ctx aws.Context, input return out, req.Send() } +// ListProvisioningTemplateVersionsPages iterates over the pages of a ListProvisioningTemplateVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProvisioningTemplateVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProvisioningTemplateVersions operation. +// pageNum := 0 +// err := client.ListProvisioningTemplateVersionsPages(params, +// func(page *iot.ListProvisioningTemplateVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListProvisioningTemplateVersionsPages(input *ListProvisioningTemplateVersionsInput, fn func(*ListProvisioningTemplateVersionsOutput, bool) bool) error { + return c.ListProvisioningTemplateVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListProvisioningTemplateVersionsPagesWithContext same as ListProvisioningTemplateVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListProvisioningTemplateVersionsPagesWithContext(ctx aws.Context, input *ListProvisioningTemplateVersionsInput, fn func(*ListProvisioningTemplateVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListProvisioningTemplateVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListProvisioningTemplateVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListProvisioningTemplateVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListProvisioningTemplates = "ListProvisioningTemplates" // ListProvisioningTemplatesRequest generates a "aws/request.Request" representing the @@ -12442,6 +14650,12 @@ func (c *IoT) ListProvisioningTemplatesRequest(input *ListProvisioningTemplatesI Name: opListProvisioningTemplates, HTTPMethod: "GET", HTTPPath: "/provisioning-templates", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12498,6 +14712,58 @@ func (c *IoT) ListProvisioningTemplatesWithContext(ctx aws.Context, input *ListP return out, req.Send() } +// ListProvisioningTemplatesPages iterates over the pages of a ListProvisioningTemplates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProvisioningTemplates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProvisioningTemplates operation. +// pageNum := 0 +// err := client.ListProvisioningTemplatesPages(params, +// func(page *iot.ListProvisioningTemplatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListProvisioningTemplatesPages(input *ListProvisioningTemplatesInput, fn func(*ListProvisioningTemplatesOutput, bool) bool) error { + return c.ListProvisioningTemplatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListProvisioningTemplatesPagesWithContext same as ListProvisioningTemplatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListProvisioningTemplatesPagesWithContext(ctx aws.Context, input *ListProvisioningTemplatesInput, fn func(*ListProvisioningTemplatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListProvisioningTemplatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListProvisioningTemplatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListProvisioningTemplatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListRoleAliases = "ListRoleAliases" // ListRoleAliasesRequest generates a "aws/request.Request" representing the @@ -12527,6 +14793,12 @@ func (c *IoT) ListRoleAliasesRequest(input *ListRoleAliasesInput) (req *request. Name: opListRoleAliases, HTTPMethod: "GET", HTTPPath: "/role-aliases", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -12586,6 +14858,58 @@ func (c *IoT) ListRoleAliasesWithContext(ctx aws.Context, input *ListRoleAliases return out, req.Send() } +// ListRoleAliasesPages iterates over the pages of a ListRoleAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRoleAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRoleAliases operation. +// pageNum := 0 +// err := client.ListRoleAliasesPages(params, +// func(page *iot.ListRoleAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListRoleAliasesPages(input *ListRoleAliasesInput, fn func(*ListRoleAliasesOutput, bool) bool) error { + return c.ListRoleAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRoleAliasesPagesWithContext same as ListRoleAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListRoleAliasesPagesWithContext(ctx aws.Context, input *ListRoleAliasesInput, fn func(*ListRoleAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRoleAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRoleAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListRoleAliasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListScheduledAudits = "ListScheduledAudits" // ListScheduledAuditsRequest generates a "aws/request.Request" representing the @@ -12615,6 +14939,12 @@ func (c *IoT) ListScheduledAuditsRequest(input *ListScheduledAuditsInput) (req * Name: opListScheduledAudits, HTTPMethod: "GET", HTTPPath: "/audit/scheduledaudits", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12668,6 +14998,58 @@ func (c *IoT) ListScheduledAuditsWithContext(ctx aws.Context, input *ListSchedul return out, req.Send() } +// ListScheduledAuditsPages iterates over the pages of a ListScheduledAudits operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListScheduledAudits method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListScheduledAudits operation. +// pageNum := 0 +// err := client.ListScheduledAuditsPages(params, +// func(page *iot.ListScheduledAuditsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListScheduledAuditsPages(input *ListScheduledAuditsInput, fn func(*ListScheduledAuditsOutput, bool) bool) error { + return c.ListScheduledAuditsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListScheduledAuditsPagesWithContext same as ListScheduledAuditsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListScheduledAuditsPagesWithContext(ctx aws.Context, input *ListScheduledAuditsInput, fn func(*ListScheduledAuditsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListScheduledAuditsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListScheduledAuditsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListScheduledAuditsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSecurityProfiles = "ListSecurityProfiles" // ListSecurityProfilesRequest generates a "aws/request.Request" representing the @@ -12697,6 +15079,12 @@ func (c *IoT) ListSecurityProfilesRequest(input *ListSecurityProfilesInput) (req Name: opListSecurityProfiles, HTTPMethod: "GET", HTTPPath: "/security-profiles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12731,6 +15119,9 @@ func (c *IoT) ListSecurityProfilesRequest(input *ListSecurityProfilesInput) (req // * InternalFailureException // An unexpected error has occurred. // +// * ResourceNotFoundException +// The specified resource does not exist. +// func (c *IoT) ListSecurityProfiles(input *ListSecurityProfilesInput) (*ListSecurityProfilesOutput, error) { req, out := c.ListSecurityProfilesRequest(input) return out, req.Send() @@ -12752,6 +15143,58 @@ func (c *IoT) ListSecurityProfilesWithContext(ctx aws.Context, input *ListSecuri return out, req.Send() } +// ListSecurityProfilesPages iterates over the pages of a ListSecurityProfiles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSecurityProfiles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSecurityProfiles operation. +// pageNum := 0 +// err := client.ListSecurityProfilesPages(params, +// func(page *iot.ListSecurityProfilesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListSecurityProfilesPages(input *ListSecurityProfilesInput, fn func(*ListSecurityProfilesOutput, bool) bool) error { + return c.ListSecurityProfilesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSecurityProfilesPagesWithContext same as ListSecurityProfilesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListSecurityProfilesPagesWithContext(ctx aws.Context, input *ListSecurityProfilesInput, fn func(*ListSecurityProfilesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSecurityProfilesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSecurityProfilesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSecurityProfilesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListSecurityProfilesForTarget = "ListSecurityProfilesForTarget" // ListSecurityProfilesForTargetRequest generates a "aws/request.Request" representing the @@ -12781,6 +15224,12 @@ func (c *IoT) ListSecurityProfilesForTargetRequest(input *ListSecurityProfilesFo Name: opListSecurityProfilesForTarget, HTTPMethod: "GET", HTTPPath: "/security-profiles-for-target", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12837,6 +15286,58 @@ func (c *IoT) ListSecurityProfilesForTargetWithContext(ctx aws.Context, input *L return out, req.Send() } +// ListSecurityProfilesForTargetPages iterates over the pages of a ListSecurityProfilesForTarget operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSecurityProfilesForTarget method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSecurityProfilesForTarget operation. +// pageNum := 0 +// err := client.ListSecurityProfilesForTargetPages(params, +// func(page *iot.ListSecurityProfilesForTargetOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListSecurityProfilesForTargetPages(input *ListSecurityProfilesForTargetInput, fn func(*ListSecurityProfilesForTargetOutput, bool) bool) error { + return c.ListSecurityProfilesForTargetPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSecurityProfilesForTargetPagesWithContext same as ListSecurityProfilesForTargetPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListSecurityProfilesForTargetPagesWithContext(ctx aws.Context, input *ListSecurityProfilesForTargetInput, fn func(*ListSecurityProfilesForTargetOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSecurityProfilesForTargetInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSecurityProfilesForTargetRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSecurityProfilesForTargetOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListStreams = "ListStreams" // ListStreamsRequest generates a "aws/request.Request" representing the @@ -12866,6 +15367,12 @@ func (c *IoT) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, Name: opListStreams, HTTPMethod: "GET", HTTPPath: "/streams", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -12925,6 +15432,58 @@ func (c *IoT) ListStreamsWithContext(ctx aws.Context, input *ListStreamsInput, o return out, req.Send() } +// ListStreamsPages iterates over the pages of a ListStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreams operation. +// pageNum := 0 +// err := client.ListStreamsPages(params, +// func(page *iot.ListStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListStreamsPages(input *ListStreamsInput, fn func(*ListStreamsOutput, bool) bool) error { + return c.ListStreamsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListStreamsPagesWithContext same as ListStreamsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListStreamsPagesWithContext(ctx aws.Context, input *ListStreamsInput, fn func(*ListStreamsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListStreamsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListStreamsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListStreamsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -12954,6 +15513,12 @@ func (c *IoT) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req * Name: opListTagsForResource, HTTPMethod: "GET", HTTPPath: "/tags", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { @@ -13010,6 +15575,58 @@ func (c *IoT) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsFor return out, req.Send() } +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *iot.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTargetsForPolicy = "ListTargetsForPolicy" // ListTargetsForPolicyRequest generates a "aws/request.Request" representing the @@ -13039,6 +15656,12 @@ func (c *IoT) ListTargetsForPolicyRequest(input *ListTargetsForPolicyInput) (req Name: opListTargetsForPolicy, HTTPMethod: "POST", HTTPPath: "/policy-targets/{policyName}", + Paginator: &request.Paginator{ + InputTokens: []string{"marker"}, + OutputTokens: []string{"nextMarker"}, + LimitToken: "pageSize", + TruncationToken: "", + }, } if input == nil { @@ -13104,6 +15727,58 @@ func (c *IoT) ListTargetsForPolicyWithContext(ctx aws.Context, input *ListTarget return out, req.Send() } +// ListTargetsForPolicyPages iterates over the pages of a ListTargetsForPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTargetsForPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTargetsForPolicy operation. +// pageNum := 0 +// err := client.ListTargetsForPolicyPages(params, +// func(page *iot.ListTargetsForPolicyOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListTargetsForPolicyPages(input *ListTargetsForPolicyInput, fn func(*ListTargetsForPolicyOutput, bool) bool) error { + return c.ListTargetsForPolicyPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTargetsForPolicyPagesWithContext same as ListTargetsForPolicyPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListTargetsForPolicyPagesWithContext(ctx aws.Context, input *ListTargetsForPolicyInput, fn func(*ListTargetsForPolicyOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTargetsForPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTargetsForPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTargetsForPolicyOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTargetsForSecurityProfile = "ListTargetsForSecurityProfile" // ListTargetsForSecurityProfileRequest generates a "aws/request.Request" representing the @@ -13133,6 +15808,12 @@ func (c *IoT) ListTargetsForSecurityProfileRequest(input *ListTargetsForSecurity Name: opListTargetsForSecurityProfile, HTTPMethod: "GET", HTTPPath: "/security-profiles/{securityProfileName}/targets", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13190,6 +15871,58 @@ func (c *IoT) ListTargetsForSecurityProfileWithContext(ctx aws.Context, input *L return out, req.Send() } +// ListTargetsForSecurityProfilePages iterates over the pages of a ListTargetsForSecurityProfile operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTargetsForSecurityProfile method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTargetsForSecurityProfile operation. +// pageNum := 0 +// err := client.ListTargetsForSecurityProfilePages(params, +// func(page *iot.ListTargetsForSecurityProfileOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListTargetsForSecurityProfilePages(input *ListTargetsForSecurityProfileInput, fn func(*ListTargetsForSecurityProfileOutput, bool) bool) error { + return c.ListTargetsForSecurityProfilePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTargetsForSecurityProfilePagesWithContext same as ListTargetsForSecurityProfilePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListTargetsForSecurityProfilePagesWithContext(ctx aws.Context, input *ListTargetsForSecurityProfileInput, fn func(*ListTargetsForSecurityProfileOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTargetsForSecurityProfileInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTargetsForSecurityProfileRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTargetsForSecurityProfileOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingGroups = "ListThingGroups" // ListThingGroupsRequest generates a "aws/request.Request" representing the @@ -13219,6 +15952,12 @@ func (c *IoT) ListThingGroupsRequest(input *ListThingGroupsInput) (req *request. Name: opListThingGroups, HTTPMethod: "GET", HTTPPath: "/thing-groups", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13272,6 +16011,58 @@ func (c *IoT) ListThingGroupsWithContext(ctx aws.Context, input *ListThingGroups return out, req.Send() } +// ListThingGroupsPages iterates over the pages of a ListThingGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingGroups operation. +// pageNum := 0 +// err := client.ListThingGroupsPages(params, +// func(page *iot.ListThingGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingGroupsPages(input *ListThingGroupsInput, fn func(*ListThingGroupsOutput, bool) bool) error { + return c.ListThingGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingGroupsPagesWithContext same as ListThingGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingGroupsPagesWithContext(ctx aws.Context, input *ListThingGroupsInput, fn func(*ListThingGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingGroupsForThing = "ListThingGroupsForThing" // ListThingGroupsForThingRequest generates a "aws/request.Request" representing the @@ -13301,6 +16092,12 @@ func (c *IoT) ListThingGroupsForThingRequest(input *ListThingGroupsForThingInput Name: opListThingGroupsForThing, HTTPMethod: "GET", HTTPPath: "/things/{thingName}/thing-groups", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13354,6 +16151,58 @@ func (c *IoT) ListThingGroupsForThingWithContext(ctx aws.Context, input *ListThi return out, req.Send() } +// ListThingGroupsForThingPages iterates over the pages of a ListThingGroupsForThing operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingGroupsForThing method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingGroupsForThing operation. +// pageNum := 0 +// err := client.ListThingGroupsForThingPages(params, +// func(page *iot.ListThingGroupsForThingOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingGroupsForThingPages(input *ListThingGroupsForThingInput, fn func(*ListThingGroupsForThingOutput, bool) bool) error { + return c.ListThingGroupsForThingPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingGroupsForThingPagesWithContext same as ListThingGroupsForThingPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingGroupsForThingPagesWithContext(ctx aws.Context, input *ListThingGroupsForThingInput, fn func(*ListThingGroupsForThingOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingGroupsForThingInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingGroupsForThingRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingGroupsForThingOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingPrincipals = "ListThingPrincipals" // ListThingPrincipalsRequest generates a "aws/request.Request" representing the @@ -13476,6 +16325,12 @@ func (c *IoT) ListThingRegistrationTaskReportsRequest(input *ListThingRegistrati Name: opListThingRegistrationTaskReports, HTTPMethod: "GET", HTTPPath: "/thing-registration-tasks/{taskId}/reports", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13532,6 +16387,58 @@ func (c *IoT) ListThingRegistrationTaskReportsWithContext(ctx aws.Context, input return out, req.Send() } +// ListThingRegistrationTaskReportsPages iterates over the pages of a ListThingRegistrationTaskReports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingRegistrationTaskReports method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingRegistrationTaskReports operation. +// pageNum := 0 +// err := client.ListThingRegistrationTaskReportsPages(params, +// func(page *iot.ListThingRegistrationTaskReportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingRegistrationTaskReportsPages(input *ListThingRegistrationTaskReportsInput, fn func(*ListThingRegistrationTaskReportsOutput, bool) bool) error { + return c.ListThingRegistrationTaskReportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingRegistrationTaskReportsPagesWithContext same as ListThingRegistrationTaskReportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingRegistrationTaskReportsPagesWithContext(ctx aws.Context, input *ListThingRegistrationTaskReportsInput, fn func(*ListThingRegistrationTaskReportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingRegistrationTaskReportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingRegistrationTaskReportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingRegistrationTaskReportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingRegistrationTasks = "ListThingRegistrationTasks" // ListThingRegistrationTasksRequest generates a "aws/request.Request" representing the @@ -13561,6 +16468,12 @@ func (c *IoT) ListThingRegistrationTasksRequest(input *ListThingRegistrationTask Name: opListThingRegistrationTasks, HTTPMethod: "GET", HTTPPath: "/thing-registration-tasks", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13617,6 +16530,58 @@ func (c *IoT) ListThingRegistrationTasksWithContext(ctx aws.Context, input *List return out, req.Send() } +// ListThingRegistrationTasksPages iterates over the pages of a ListThingRegistrationTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingRegistrationTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingRegistrationTasks operation. +// pageNum := 0 +// err := client.ListThingRegistrationTasksPages(params, +// func(page *iot.ListThingRegistrationTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingRegistrationTasksPages(input *ListThingRegistrationTasksInput, fn func(*ListThingRegistrationTasksOutput, bool) bool) error { + return c.ListThingRegistrationTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingRegistrationTasksPagesWithContext same as ListThingRegistrationTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingRegistrationTasksPagesWithContext(ctx aws.Context, input *ListThingRegistrationTasksInput, fn func(*ListThingRegistrationTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingRegistrationTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingRegistrationTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingRegistrationTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingTypes = "ListThingTypes" // ListThingTypesRequest generates a "aws/request.Request" representing the @@ -13646,6 +16611,12 @@ func (c *IoT) ListThingTypesRequest(input *ListThingTypesInput) (req *request.Re Name: opListThingTypes, HTTPMethod: "GET", HTTPPath: "/thing-types", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13705,6 +16676,58 @@ func (c *IoT) ListThingTypesWithContext(ctx aws.Context, input *ListThingTypesIn return out, req.Send() } +// ListThingTypesPages iterates over the pages of a ListThingTypes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingTypes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingTypes operation. +// pageNum := 0 +// err := client.ListThingTypesPages(params, +// func(page *iot.ListThingTypesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingTypesPages(input *ListThingTypesInput, fn func(*ListThingTypesOutput, bool) bool) error { + return c.ListThingTypesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingTypesPagesWithContext same as ListThingTypesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingTypesPagesWithContext(ctx aws.Context, input *ListThingTypesInput, fn func(*ListThingTypesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingTypesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingTypesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingTypesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThings = "ListThings" // ListThingsRequest generates a "aws/request.Request" representing the @@ -13734,6 +16757,12 @@ func (c *IoT) ListThingsRequest(input *ListThingsInput) (req *request.Request, o Name: opListThings, HTTPMethod: "GET", HTTPPath: "/things", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13752,6 +16781,10 @@ func (c *IoT) ListThingsRequest(input *ListThingsInput) (req *request.Request, o // and attributeValue=Red retrieves all things in the registry that contain // an attribute Color with the value Red. // +// You will not be charged for calling this API if an Access denied error is +// returned. You will also not be charged if no attributes or pagination token +// was provided in request and no pagination token and no results were returned. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -13796,6 +16829,58 @@ func (c *IoT) ListThingsWithContext(ctx aws.Context, input *ListThingsInput, opt return out, req.Send() } +// ListThingsPages iterates over the pages of a ListThings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThings operation. +// pageNum := 0 +// err := client.ListThingsPages(params, +// func(page *iot.ListThingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingsPages(input *ListThingsInput, fn func(*ListThingsOutput, bool) bool) error { + return c.ListThingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingsPagesWithContext same as ListThingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingsPagesWithContext(ctx aws.Context, input *ListThingsInput, fn func(*ListThingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingsInBillingGroup = "ListThingsInBillingGroup" // ListThingsInBillingGroupRequest generates a "aws/request.Request" representing the @@ -13825,6 +16910,12 @@ func (c *IoT) ListThingsInBillingGroupRequest(input *ListThingsInBillingGroupInp Name: opListThingsInBillingGroup, HTTPMethod: "GET", HTTPPath: "/billing-groups/{billingGroupName}/things", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13881,6 +16972,58 @@ func (c *IoT) ListThingsInBillingGroupWithContext(ctx aws.Context, input *ListTh return out, req.Send() } +// ListThingsInBillingGroupPages iterates over the pages of a ListThingsInBillingGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingsInBillingGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingsInBillingGroup operation. +// pageNum := 0 +// err := client.ListThingsInBillingGroupPages(params, +// func(page *iot.ListThingsInBillingGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingsInBillingGroupPages(input *ListThingsInBillingGroupInput, fn func(*ListThingsInBillingGroupOutput, bool) bool) error { + return c.ListThingsInBillingGroupPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingsInBillingGroupPagesWithContext same as ListThingsInBillingGroupPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingsInBillingGroupPagesWithContext(ctx aws.Context, input *ListThingsInBillingGroupInput, fn func(*ListThingsInBillingGroupOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingsInBillingGroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingsInBillingGroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingsInBillingGroupOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListThingsInThingGroup = "ListThingsInThingGroup" // ListThingsInThingGroupRequest generates a "aws/request.Request" representing the @@ -13910,6 +17053,12 @@ func (c *IoT) ListThingsInThingGroupRequest(input *ListThingsInThingGroupInput) Name: opListThingsInThingGroup, HTTPMethod: "GET", HTTPPath: "/thing-groups/{thingGroupName}/things", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -13963,6 +17112,58 @@ func (c *IoT) ListThingsInThingGroupWithContext(ctx aws.Context, input *ListThin return out, req.Send() } +// ListThingsInThingGroupPages iterates over the pages of a ListThingsInThingGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListThingsInThingGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListThingsInThingGroup operation. +// pageNum := 0 +// err := client.ListThingsInThingGroupPages(params, +// func(page *iot.ListThingsInThingGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListThingsInThingGroupPages(input *ListThingsInThingGroupInput, fn func(*ListThingsInThingGroupOutput, bool) bool) error { + return c.ListThingsInThingGroupPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListThingsInThingGroupPagesWithContext same as ListThingsInThingGroupPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListThingsInThingGroupPagesWithContext(ctx aws.Context, input *ListThingsInThingGroupInput, fn func(*ListThingsInThingGroupOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListThingsInThingGroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListThingsInThingGroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListThingsInThingGroupOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTopicRuleDestinations = "ListTopicRuleDestinations" // ListTopicRuleDestinationsRequest generates a "aws/request.Request" representing the @@ -13992,6 +17193,12 @@ func (c *IoT) ListTopicRuleDestinationsRequest(input *ListTopicRuleDestinationsI Name: opListTopicRuleDestinations, HTTPMethod: "GET", HTTPPath: "/destinations", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -14048,6 +17255,58 @@ func (c *IoT) ListTopicRuleDestinationsWithContext(ctx aws.Context, input *ListT return out, req.Send() } +// ListTopicRuleDestinationsPages iterates over the pages of a ListTopicRuleDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTopicRuleDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTopicRuleDestinations operation. +// pageNum := 0 +// err := client.ListTopicRuleDestinationsPages(params, +// func(page *iot.ListTopicRuleDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListTopicRuleDestinationsPages(input *ListTopicRuleDestinationsInput, fn func(*ListTopicRuleDestinationsOutput, bool) bool) error { + return c.ListTopicRuleDestinationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTopicRuleDestinationsPagesWithContext same as ListTopicRuleDestinationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListTopicRuleDestinationsPagesWithContext(ctx aws.Context, input *ListTopicRuleDestinationsInput, fn func(*ListTopicRuleDestinationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTopicRuleDestinationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTopicRuleDestinationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTopicRuleDestinationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTopicRules = "ListTopicRules" // ListTopicRulesRequest generates a "aws/request.Request" representing the @@ -14077,6 +17336,12 @@ func (c *IoT) ListTopicRulesRequest(input *ListTopicRulesInput) (req *request.Re Name: opListTopicRules, HTTPMethod: "GET", HTTPPath: "/rules", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -14130,6 +17395,58 @@ func (c *IoT) ListTopicRulesWithContext(ctx aws.Context, input *ListTopicRulesIn return out, req.Send() } +// ListTopicRulesPages iterates over the pages of a ListTopicRules operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTopicRules method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTopicRules operation. +// pageNum := 0 +// err := client.ListTopicRulesPages(params, +// func(page *iot.ListTopicRulesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListTopicRulesPages(input *ListTopicRulesInput, fn func(*ListTopicRulesOutput, bool) bool) error { + return c.ListTopicRulesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTopicRulesPagesWithContext same as ListTopicRulesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListTopicRulesPagesWithContext(ctx aws.Context, input *ListTopicRulesInput, fn func(*ListTopicRulesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTopicRulesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTopicRulesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTopicRulesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListV2LoggingLevels = "ListV2LoggingLevels" // ListV2LoggingLevelsRequest generates a "aws/request.Request" representing the @@ -14159,6 +17476,12 @@ func (c *IoT) ListV2LoggingLevelsRequest(input *ListV2LoggingLevelsInput) (req * Name: opListV2LoggingLevels, HTTPMethod: "GET", HTTPPath: "/v2LoggingLevel", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -14215,6 +17538,58 @@ func (c *IoT) ListV2LoggingLevelsWithContext(ctx aws.Context, input *ListV2Loggi return out, req.Send() } +// ListV2LoggingLevelsPages iterates over the pages of a ListV2LoggingLevels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListV2LoggingLevels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListV2LoggingLevels operation. +// pageNum := 0 +// err := client.ListV2LoggingLevelsPages(params, +// func(page *iot.ListV2LoggingLevelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListV2LoggingLevelsPages(input *ListV2LoggingLevelsInput, fn func(*ListV2LoggingLevelsOutput, bool) bool) error { + return c.ListV2LoggingLevelsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListV2LoggingLevelsPagesWithContext same as ListV2LoggingLevelsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListV2LoggingLevelsPagesWithContext(ctx aws.Context, input *ListV2LoggingLevelsInput, fn func(*ListV2LoggingLevelsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListV2LoggingLevelsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListV2LoggingLevelsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListV2LoggingLevelsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListViolationEvents = "ListViolationEvents" // ListViolationEventsRequest generates a "aws/request.Request" representing the @@ -14244,6 +17619,12 @@ func (c *IoT) ListViolationEventsRequest(input *ListViolationEventsInput) (req * Name: opListViolationEvents, HTTPMethod: "GET", HTTPPath: "/violation-events", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -14299,6 +17680,58 @@ func (c *IoT) ListViolationEventsWithContext(ctx aws.Context, input *ListViolati return out, req.Send() } +// ListViolationEventsPages iterates over the pages of a ListViolationEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListViolationEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListViolationEvents operation. +// pageNum := 0 +// err := client.ListViolationEventsPages(params, +// func(page *iot.ListViolationEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IoT) ListViolationEventsPages(input *ListViolationEventsInput, fn func(*ListViolationEventsOutput, bool) bool) error { + return c.ListViolationEventsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListViolationEventsPagesWithContext same as ListViolationEventsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) ListViolationEventsPagesWithContext(ctx aws.Context, input *ListViolationEventsInput, fn func(*ListViolationEventsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListViolationEventsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListViolationEventsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListViolationEventsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opRegisterCACertificate = "RegisterCACertificate" // RegisterCACertificateRequest generates a "aws/request.Request" representing the @@ -14509,6 +17942,103 @@ func (c *IoT) RegisterCertificateWithContext(ctx aws.Context, input *RegisterCer return out, req.Send() } +const opRegisterCertificateWithoutCA = "RegisterCertificateWithoutCA" + +// RegisterCertificateWithoutCARequest generates a "aws/request.Request" representing the +// client's request for the RegisterCertificateWithoutCA operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterCertificateWithoutCA for more information on using the RegisterCertificateWithoutCA +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterCertificateWithoutCARequest method. +// req, resp := client.RegisterCertificateWithoutCARequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) RegisterCertificateWithoutCARequest(input *RegisterCertificateWithoutCAInput) (req *request.Request, output *RegisterCertificateWithoutCAOutput) { + op := &request.Operation{ + Name: opRegisterCertificateWithoutCA, + HTTPMethod: "POST", + HTTPPath: "/certificate/register-no-ca", + } + + if input == nil { + input = &RegisterCertificateWithoutCAInput{} + } + + output = &RegisterCertificateWithoutCAOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterCertificateWithoutCA API operation for AWS IoT. +// +// Register a certificate that does not have a certificate authority (CA). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation RegisterCertificateWithoutCA for usage and error information. +// +// Returned Error Types: +// * ResourceAlreadyExistsException +// The resource already exists. +// +// * InvalidRequestException +// The request is not valid. +// +// * CertificateStateException +// The certificate operation is not allowed. +// +// * CertificateValidationException +// The certificate is invalid. +// +// * ThrottlingException +// The rate exceeds the limit. +// +// * UnauthorizedException +// You are not authorized to perform this operation. +// +// * ServiceUnavailableException +// The service is temporarily unavailable. +// +// * InternalFailureException +// An unexpected error has occurred. +// +func (c *IoT) RegisterCertificateWithoutCA(input *RegisterCertificateWithoutCAInput) (*RegisterCertificateWithoutCAOutput, error) { + req, out := c.RegisterCertificateWithoutCARequest(input) + return out, req.Send() +} + +// RegisterCertificateWithoutCAWithContext is the same as RegisterCertificateWithoutCA with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterCertificateWithoutCA for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) RegisterCertificateWithoutCAWithContext(ctx aws.Context, input *RegisterCertificateWithoutCAInput, opts ...request.Option) (*RegisterCertificateWithoutCAOutput, error) { + req, out := c.RegisterCertificateWithoutCARequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterThing = "RegisterThing" // RegisterThingRequest generates a "aws/request.Request" representing the @@ -14843,6 +18373,10 @@ func (c *IoT) RemoveThingFromThingGroupRequest(input *RemoveThingFromThingGroupI // // Remove the specified thing from the specified group. // +// You must specify either a thingGroupArn or a thingGroupName to identify the +// thing group and either a thingArn or a thingName to identify the thing to +// remove from the thing group. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -15416,6 +18950,9 @@ func (c *IoT) SetV2LoggingLevelRequest(input *SetV2LoggingLevelInput) (req *requ // * ServiceUnavailableException // The service is temporarily unavailable. // +// * LimitExceededException +// A limit has been exceeded. +// func (c *IoT) SetV2LoggingLevel(input *SetV2LoggingLevelInput) (*SetV2LoggingLevelOutput, error) { req, out := c.SetV2LoggingLevelRequest(input) return out, req.Send() @@ -16430,6 +19967,92 @@ func (c *IoT) UpdateAccountAuditConfigurationWithContext(ctx aws.Context, input return out, req.Send() } +const opUpdateAuditSuppression = "UpdateAuditSuppression" + +// UpdateAuditSuppressionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAuditSuppression operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAuditSuppression for more information on using the UpdateAuditSuppression +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAuditSuppressionRequest method. +// req, resp := client.UpdateAuditSuppressionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) UpdateAuditSuppressionRequest(input *UpdateAuditSuppressionInput) (req *request.Request, output *UpdateAuditSuppressionOutput) { + op := &request.Operation{ + Name: opUpdateAuditSuppression, + HTTPMethod: "PATCH", + HTTPPath: "/audit/suppressions/update", + } + + if input == nil { + input = &UpdateAuditSuppressionInput{} + } + + output = &UpdateAuditSuppressionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateAuditSuppression API operation for AWS IoT. +// +// Updates a Device Defender audit suppression. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation UpdateAuditSuppression for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ThrottlingException +// The rate exceeds the limit. +// +// * InternalFailureException +// An unexpected error has occurred. +// +func (c *IoT) UpdateAuditSuppression(input *UpdateAuditSuppressionInput) (*UpdateAuditSuppressionOutput, error) { + req, out := c.UpdateAuditSuppressionRequest(input) + return out, req.Send() +} + +// UpdateAuditSuppressionWithContext is the same as UpdateAuditSuppression with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAuditSuppression for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) UpdateAuditSuppressionWithContext(ctx aws.Context, input *UpdateAuditSuppressionInput, opts ...request.Option) (*UpdateAuditSuppressionOutput, error) { + req, out := c.UpdateAuditSuppressionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateAuthorizer = "UpdateAuthorizer" // UpdateAuthorizerRequest generates a "aws/request.Request" representing the @@ -16750,11 +20373,13 @@ func (c *IoT) UpdateCertificateRequest(input *UpdateCertificateInput) (req *requ // // Updates the status of the specified certificate. This operation is idempotent. // -// Moving a certificate from the ACTIVE state (including REVOKED) will not disconnect -// currently connected devices, but these devices will be unable to reconnect. +// Certificates must be in the ACTIVE state to authenticate devices that use +// a certificate to connect to AWS IoT. // -// The ACTIVE state is required to authenticate devices connecting to AWS IoT -// using a certificate. +// Within a few minutes of updating a certificate from the ACTIVE state to any +// other state, AWS IoT disconnects all devices that used that certificate to +// connect. Devices cannot use a certificate that is not in the ACTIVE state +// to reconnect. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -16806,6 +20431,92 @@ func (c *IoT) UpdateCertificateWithContext(ctx aws.Context, input *UpdateCertifi return out, req.Send() } +const opUpdateDimension = "UpdateDimension" + +// UpdateDimensionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDimension operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDimension for more information on using the UpdateDimension +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDimensionRequest method. +// req, resp := client.UpdateDimensionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +func (c *IoT) UpdateDimensionRequest(input *UpdateDimensionInput) (req *request.Request, output *UpdateDimensionOutput) { + op := &request.Operation{ + Name: opUpdateDimension, + HTTPMethod: "PATCH", + HTTPPath: "/dimensions/{name}", + } + + if input == nil { + input = &UpdateDimensionInput{} + } + + output = &UpdateDimensionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDimension API operation for AWS IoT. +// +// Updates the definition for a dimension. You cannot change the type of a dimension +// after it is created (you can delete it and re-create it). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS IoT's +// API operation UpdateDimension for usage and error information. +// +// Returned Error Types: +// * InternalFailureException +// An unexpected error has occurred. +// +// * InvalidRequestException +// The request is not valid. +// +// * ResourceNotFoundException +// The specified resource does not exist. +// +// * ThrottlingException +// The rate exceeds the limit. +// +func (c *IoT) UpdateDimension(input *UpdateDimensionInput) (*UpdateDimensionOutput, error) { + req, out := c.UpdateDimensionRequest(input) + return out, req.Send() +} + +// UpdateDimensionWithContext is the same as UpdateDimension with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDimension for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IoT) UpdateDimensionWithContext(ctx aws.Context, input *UpdateDimensionInput, opts ...request.Option) (*UpdateDimensionOutput, error) { + req, out := c.UpdateDimensionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateDomainConfiguration = "UpdateDomainConfiguration" // UpdateDomainConfigurationRequest generates a "aws/request.Request" representing the @@ -18229,11 +21940,11 @@ func (c *IoT) ValidateSecurityProfileBehaviorsWithContext(ctx aws.Context, input return out, req.Send() } -// Details of abort criteria to abort the job. +// The criteria that determine when and how a job abort takes place. type AbortConfig struct { _ struct{} `type:"structure"` - // The list of abort criteria to define rules to abort the job. + // The list of criteria that determine when and how to abort the job. // // CriteriaList is a required field CriteriaList []*AbortCriteria `locationName:"criteriaList" min:"1" type:"list" required:"true"` @@ -18281,27 +21992,28 @@ func (s *AbortConfig) SetCriteriaList(v []*AbortCriteria) *AbortConfig { return s } -// Details of abort criteria to define rules to abort the job. +// The criteria that determine when and how a job abort takes place. type AbortCriteria struct { _ struct{} `type:"structure"` - // The type of abort action to initiate a job abort. + // The type of job action to take to initiate the job abort. // // Action is a required field Action *string `locationName:"action" type:"string" required:"true" enum:"AbortAction"` - // The type of job execution failure to define a rule to initiate a job abort. + // The type of job execution failures that can initiate a job abort. // // FailureType is a required field FailureType *string `locationName:"failureType" type:"string" required:"true" enum:"JobExecutionFailureType"` - // Minimum number of executed things before evaluating an abort rule. + // The minimum number of things which must receive job execution notifications + // before the job can be aborted. // // MinNumberOfExecutedThings is a required field MinNumberOfExecutedThings *int64 `locationName:"minNumberOfExecutedThings" min:"1" type:"integer" required:"true"` - // The threshold as a percentage of the total number of executed things that - // will initiate a job abort. + // The minimum percentage of job execution failures that must occur to initiate + // the job abort. // // AWS IoT supports up to two digits after the decimal (for example, 10.9 and // 10.99, but not 10.999). @@ -18442,7 +22154,7 @@ type Action struct { // Change the state of a CloudWatch alarm. CloudwatchAlarm *CloudwatchAlarmAction `locationName:"cloudwatchAlarm" type:"structure"` - // Send data to CloudWatch logs. + // Send data to CloudWatch Logs. CloudwatchLogs *CloudwatchLogsAction `locationName:"cloudwatchLogs" type:"structure"` // Capture a CloudWatch metric. @@ -18498,6 +22210,12 @@ type Action struct { // Starts execution of a Step Functions state machine. StepFunctions *StepFunctionsAction `locationName:"stepFunctions" type:"structure"` + + // The Timestream rule action writes attributes (measures) from an MQTT message + // into an Amazon Timestream table. For more information, see the Timestream + // (https://docs.aws.amazon.com/iot/latest/developerguide/timestream-rule-action.html) + // topic rule action documentation. + Timestream *TimestreamAction `locationName:"timestream" type:"structure"` } // String returns the string representation @@ -18603,6 +22321,11 @@ func (s *Action) Validate() error { invalidParams.AddNested("StepFunctions", err.(request.ErrInvalidParams)) } } + if s.Timestream != nil { + if err := s.Timestream.Validate(); err != nil { + invalidParams.AddNested("Timestream", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -18724,6 +22447,12 @@ func (s *Action) SetStepFunctions(v *StepFunctionsAction) *Action { return s } +// SetTimestream sets the Timestream field's value. +func (s *Action) SetTimestream(v *TimestreamAction) *Action { + s.Timestream = v + return s +} + // Information about an active Device Defender security profile behavior violation. type ActiveViolation struct { _ struct{} `type:"structure"` @@ -19417,7 +23146,7 @@ type AttachPolicyInput struct { // PolicyName is a required field PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` - // The identity (https://docs.aws.amazon.com/iot/latest/developerguide/iot-security-identity.html) + // The identity (https://docs.aws.amazon.com/iot/latest/developerguide/security-iam.html) // to which the policy is attached. // // Target is a required field @@ -19778,6 +23507,10 @@ type AuditCheckDetails struct { // The number of resources that were found noncompliant during the check. NonCompliantResourcesCount *int64 `locationName:"nonCompliantResourcesCount" type:"long"` + // Describes how many of the non-compliant resources created during the evaluation + // of an audit check were marked as suppressed. + SuppressedNonCompliantResourcesCount *int64 `locationName:"suppressedNonCompliantResourcesCount" type:"long"` + // The number of resources on which the check was performed. TotalResourcesCount *int64 `locationName:"totalResourcesCount" type:"long"` } @@ -19822,6 +23555,12 @@ func (s *AuditCheckDetails) SetNonCompliantResourcesCount(v int64) *AuditCheckDe return s } +// SetSuppressedNonCompliantResourcesCount sets the SuppressedNonCompliantResourcesCount field's value. +func (s *AuditCheckDetails) SetSuppressedNonCompliantResourcesCount(v int64) *AuditCheckDetails { + s.SuppressedNonCompliantResourcesCount = &v + return s +} + // SetTotalResourcesCount sets the TotalResourcesCount field's value. func (s *AuditCheckDetails) SetTotalResourcesCount(v int64) *AuditCheckDetails { s.TotalResourcesCount = &v @@ -19842,6 +23581,9 @@ type AuditFinding struct { // The time the result (finding) was discovered. FindingTime *time.Time `locationName:"findingTime" type:"timestamp"` + // Indicates whether the audit finding was suppressed or not during reporting. + IsSuppressed *bool `locationName:"isSuppressed" type:"boolean"` + // The resource that was found to be noncompliant with the audit check. NonCompliantResource *NonCompliantResource `locationName:"nonCompliantResource" type:"structure"` @@ -19892,6 +23634,12 @@ func (s *AuditFinding) SetFindingTime(v time.Time) *AuditFinding { return s } +// SetIsSuppressed sets the IsSuppressed field's value. +func (s *AuditFinding) SetIsSuppressed(v bool) *AuditFinding { + s.IsSuppressed = &v + return s +} + // SetNonCompliantResource sets the NonCompliantResource field's value. func (s *AuditFinding) SetNonCompliantResource(v *NonCompliantResource) *AuditFinding { s.NonCompliantResource = v @@ -20193,6 +23941,73 @@ func (s *AuditNotificationTarget) SetTargetArn(v string) *AuditNotificationTarge return s } +// Filters out specific findings of a Device Defender audit. +type AuditSuppression struct { + _ struct{} `type:"structure"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + // + // CheckName is a required field + CheckName *string `locationName:"checkName" type:"string" required:"true"` + + // The description of the audit suppression. + Description *string `locationName:"description" type:"string"` + + // The expiration date (epoch timestamp in seconds) that you want the suppression + // to adhere to. + ExpirationDate *time.Time `locationName:"expirationDate" type:"timestamp"` + + // Information that identifies the noncompliant resource. + // + // ResourceIdentifier is a required field + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure" required:"true"` + + // Indicates whether a suppression should exist indefinitely or not. + SuppressIndefinitely *bool `locationName:"suppressIndefinitely" type:"boolean"` +} + +// String returns the string representation +func (s AuditSuppression) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuditSuppression) GoString() string { + return s.String() +} + +// SetCheckName sets the CheckName field's value. +func (s *AuditSuppression) SetCheckName(v string) *AuditSuppression { + s.CheckName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AuditSuppression) SetDescription(v string) *AuditSuppression { + s.Description = &v + return s +} + +// SetExpirationDate sets the ExpirationDate field's value. +func (s *AuditSuppression) SetExpirationDate(v time.Time) *AuditSuppression { + s.ExpirationDate = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *AuditSuppression) SetResourceIdentifier(v *ResourceIdentifier) *AuditSuppression { + s.ResourceIdentifier = v + return s +} + +// SetSuppressIndefinitely sets the SuppressIndefinitely field's value. +func (s *AuditSuppression) SetSuppressIndefinitely(v bool) *AuditSuppression { + s.SuppressIndefinitely = &v + return s +} + // The audits that were performed. type AuditTaskMetadata struct { _ struct{} `type:"structure"` @@ -20245,7 +24060,9 @@ type AuthInfo struct { // The resources for which the principal is being authorized to perform the // specified action. - Resources []*string `locationName:"resources" type:"list"` + // + // Resources is a required field + Resources []*string `locationName:"resources" type:"list" required:"true"` } // String returns the string representation @@ -20258,6 +24075,19 @@ func (s AuthInfo) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuthInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuthInfo"} + if s.Resources == nil { + invalidParams.Add(request.NewErrParamRequired("Resources")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetActionType sets the ActionType field's value. func (s *AuthInfo) SetActionType(v string) *AuthInfo { s.ActionType = &v @@ -20510,10 +24340,155 @@ func (s *AuthorizerSummary) SetAuthorizerName(v string) *AuthorizerSummary { return s } +// The criteria that determine when and how a job abort takes place. +type AwsJobAbortConfig struct { + _ struct{} `type:"structure"` + + // The list of criteria that determine when and how to abort the job. + // + // AbortCriteriaList is a required field + AbortCriteriaList []*AwsJobAbortCriteria `locationName:"abortCriteriaList" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AwsJobAbortConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsJobAbortConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsJobAbortConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsJobAbortConfig"} + if s.AbortCriteriaList == nil { + invalidParams.Add(request.NewErrParamRequired("AbortCriteriaList")) + } + if s.AbortCriteriaList != nil && len(s.AbortCriteriaList) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AbortCriteriaList", 1)) + } + if s.AbortCriteriaList != nil { + for i, v := range s.AbortCriteriaList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AbortCriteriaList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortCriteriaList sets the AbortCriteriaList field's value. +func (s *AwsJobAbortConfig) SetAbortCriteriaList(v []*AwsJobAbortCriteria) *AwsJobAbortConfig { + s.AbortCriteriaList = v + return s +} + +// The criteria that determine when and how a job abort takes place. +type AwsJobAbortCriteria struct { + _ struct{} `type:"structure"` + + // The type of job action to take to initiate the job abort. + // + // Action is a required field + Action *string `locationName:"action" type:"string" required:"true" enum:"AwsJobAbortCriteriaAbortAction"` + + // The type of job execution failures that can initiate a job abort. + // + // FailureType is a required field + FailureType *string `locationName:"failureType" type:"string" required:"true" enum:"AwsJobAbortCriteriaFailureType"` + + // The minimum number of things which must receive job execution notifications + // before the job can be aborted. + // + // MinNumberOfExecutedThings is a required field + MinNumberOfExecutedThings *int64 `locationName:"minNumberOfExecutedThings" min:"1" type:"integer" required:"true"` + + // The minimum percentage of job execution failures that must occur to initiate + // the job abort. + // + // AWS IoT supports up to two digits after the decimal (for example, 10.9 and + // 10.99, but not 10.999). + // + // ThresholdPercentage is a required field + ThresholdPercentage *float64 `locationName:"thresholdPercentage" type:"double" required:"true"` +} + +// String returns the string representation +func (s AwsJobAbortCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsJobAbortCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsJobAbortCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsJobAbortCriteria"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.FailureType == nil { + invalidParams.Add(request.NewErrParamRequired("FailureType")) + } + if s.MinNumberOfExecutedThings == nil { + invalidParams.Add(request.NewErrParamRequired("MinNumberOfExecutedThings")) + } + if s.MinNumberOfExecutedThings != nil && *s.MinNumberOfExecutedThings < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinNumberOfExecutedThings", 1)) + } + if s.ThresholdPercentage == nil { + invalidParams.Add(request.NewErrParamRequired("ThresholdPercentage")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *AwsJobAbortCriteria) SetAction(v string) *AwsJobAbortCriteria { + s.Action = &v + return s +} + +// SetFailureType sets the FailureType field's value. +func (s *AwsJobAbortCriteria) SetFailureType(v string) *AwsJobAbortCriteria { + s.FailureType = &v + return s +} + +// SetMinNumberOfExecutedThings sets the MinNumberOfExecutedThings field's value. +func (s *AwsJobAbortCriteria) SetMinNumberOfExecutedThings(v int64) *AwsJobAbortCriteria { + s.MinNumberOfExecutedThings = &v + return s +} + +// SetThresholdPercentage sets the ThresholdPercentage field's value. +func (s *AwsJobAbortCriteria) SetThresholdPercentage(v float64) *AwsJobAbortCriteria { + s.ThresholdPercentage = &v + return s +} + // Configuration for the rollout of OTA updates. type AwsJobExecutionsRolloutConfig struct { _ struct{} `type:"structure"` + // The rate of increase for a job rollout. This parameter allows you to define + // an exponential rate increase for a job rollout. + ExponentialRate *AwsJobExponentialRolloutRate `locationName:"exponentialRate" type:"structure"` + // The maximum number of OTA update job executions started per minute. MaximumPerMinute *int64 `locationName:"maximumPerMinute" min:"1" type:"integer"` } @@ -20534,6 +24509,11 @@ func (s *AwsJobExecutionsRolloutConfig) Validate() error { if s.MaximumPerMinute != nil && *s.MaximumPerMinute < 1 { invalidParams.Add(request.NewErrParamMinValue("MaximumPerMinute", 1)) } + if s.ExponentialRate != nil { + if err := s.ExponentialRate.Validate(); err != nil { + invalidParams.AddNested("ExponentialRate", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20541,12 +24521,100 @@ func (s *AwsJobExecutionsRolloutConfig) Validate() error { return nil } +// SetExponentialRate sets the ExponentialRate field's value. +func (s *AwsJobExecutionsRolloutConfig) SetExponentialRate(v *AwsJobExponentialRolloutRate) *AwsJobExecutionsRolloutConfig { + s.ExponentialRate = v + return s +} + // SetMaximumPerMinute sets the MaximumPerMinute field's value. func (s *AwsJobExecutionsRolloutConfig) SetMaximumPerMinute(v int64) *AwsJobExecutionsRolloutConfig { s.MaximumPerMinute = &v return s } +// The rate of increase for a job rollout. This parameter allows you to define +// an exponential rate increase for a job rollout. +type AwsJobExponentialRolloutRate struct { + _ struct{} `type:"structure"` + + // The minimum number of things that will be notified of a pending job, per + // minute, at the start of the job rollout. This is the initial rate of the + // rollout. + // + // BaseRatePerMinute is a required field + BaseRatePerMinute *int64 `locationName:"baseRatePerMinute" min:"1" type:"integer" required:"true"` + + // The rate of increase for a job rollout. The number of things notified is + // multiplied by this factor. + // + // IncrementFactor is a required field + IncrementFactor *float64 `locationName:"incrementFactor" type:"double" required:"true"` + + // The criteria to initiate the increase in rate of rollout for a job. + // + // AWS IoT supports up to one digit after the decimal (for example, 1.5, but + // not 1.55). + // + // RateIncreaseCriteria is a required field + RateIncreaseCriteria *AwsJobRateIncreaseCriteria `locationName:"rateIncreaseCriteria" type:"structure" required:"true"` +} + +// String returns the string representation +func (s AwsJobExponentialRolloutRate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsJobExponentialRolloutRate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsJobExponentialRolloutRate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsJobExponentialRolloutRate"} + if s.BaseRatePerMinute == nil { + invalidParams.Add(request.NewErrParamRequired("BaseRatePerMinute")) + } + if s.BaseRatePerMinute != nil && *s.BaseRatePerMinute < 1 { + invalidParams.Add(request.NewErrParamMinValue("BaseRatePerMinute", 1)) + } + if s.IncrementFactor == nil { + invalidParams.Add(request.NewErrParamRequired("IncrementFactor")) + } + if s.RateIncreaseCriteria == nil { + invalidParams.Add(request.NewErrParamRequired("RateIncreaseCriteria")) + } + if s.RateIncreaseCriteria != nil { + if err := s.RateIncreaseCriteria.Validate(); err != nil { + invalidParams.AddNested("RateIncreaseCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBaseRatePerMinute sets the BaseRatePerMinute field's value. +func (s *AwsJobExponentialRolloutRate) SetBaseRatePerMinute(v int64) *AwsJobExponentialRolloutRate { + s.BaseRatePerMinute = &v + return s +} + +// SetIncrementFactor sets the IncrementFactor field's value. +func (s *AwsJobExponentialRolloutRate) SetIncrementFactor(v float64) *AwsJobExponentialRolloutRate { + s.IncrementFactor = &v + return s +} + +// SetRateIncreaseCriteria sets the RateIncreaseCriteria field's value. +func (s *AwsJobExponentialRolloutRate) SetRateIncreaseCriteria(v *AwsJobRateIncreaseCriteria) *AwsJobExponentialRolloutRate { + s.RateIncreaseCriteria = v + return s +} + // Configuration information for pre-signed URLs. Valid when protocols contains // HTTP. type AwsJobPresignedUrlConfig struct { @@ -20574,6 +24642,89 @@ func (s *AwsJobPresignedUrlConfig) SetExpiresInSec(v int64) *AwsJobPresignedUrlC return s } +// The criteria to initiate the increase in rate of rollout for a job. +type AwsJobRateIncreaseCriteria struct { + _ struct{} `type:"structure"` + + // When this number of things have been notified, it will initiate an increase + // in the rollout rate. + NumberOfNotifiedThings *int64 `locationName:"numberOfNotifiedThings" min:"1" type:"integer"` + + // When this number of things have succeeded in their job execution, it will + // initiate an increase in the rollout rate. + NumberOfSucceededThings *int64 `locationName:"numberOfSucceededThings" min:"1" type:"integer"` +} + +// String returns the string representation +func (s AwsJobRateIncreaseCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsJobRateIncreaseCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsJobRateIncreaseCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsJobRateIncreaseCriteria"} + if s.NumberOfNotifiedThings != nil && *s.NumberOfNotifiedThings < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfNotifiedThings", 1)) + } + if s.NumberOfSucceededThings != nil && *s.NumberOfSucceededThings < 1 { + invalidParams.Add(request.NewErrParamMinValue("NumberOfSucceededThings", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNumberOfNotifiedThings sets the NumberOfNotifiedThings field's value. +func (s *AwsJobRateIncreaseCriteria) SetNumberOfNotifiedThings(v int64) *AwsJobRateIncreaseCriteria { + s.NumberOfNotifiedThings = &v + return s +} + +// SetNumberOfSucceededThings sets the NumberOfSucceededThings field's value. +func (s *AwsJobRateIncreaseCriteria) SetNumberOfSucceededThings(v int64) *AwsJobRateIncreaseCriteria { + s.NumberOfSucceededThings = &v + return s +} + +// Specifies the amount of time each device has to finish its execution of the +// job. A timer is started when the job execution status is set to IN_PROGRESS. +// If the job execution status is not set to another terminal state before the +// timer expires, it will be automatically set to TIMED_OUT. +type AwsJobTimeoutConfig struct { + _ struct{} `type:"structure"` + + // Specifies the amount of time, in minutes, this device has to finish execution + // of this job. The timeout interval can be anywhere between 1 minute and 7 + // days (1 to 10080 minutes). The in progress timer can't be updated and will + // apply to all job executions for the job. Whenever a job execution remains + // in the IN_PROGRESS status for longer than this interval, the job execution + // will fail and switch to the terminal TIMED_OUT status. + InProgressTimeoutInMinutes *int64 `locationName:"inProgressTimeoutInMinutes" type:"long"` +} + +// String returns the string representation +func (s AwsJobTimeoutConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsJobTimeoutConfig) GoString() string { + return s.String() +} + +// SetInProgressTimeoutInMinutes sets the InProgressTimeoutInMinutes field's value. +func (s *AwsJobTimeoutConfig) SetInProgressTimeoutInMinutes(v int64) *AwsJobTimeoutConfig { + s.InProgressTimeoutInMinutes = &v + return s +} + // A Device Defender security profile behavior. type Behavior struct { _ struct{} `type:"structure"` @@ -20585,6 +24736,11 @@ type Behavior struct { // What is measured by the behavior. Metric *string `locationName:"metric" type:"string"` + // The dimension for a metric in your behavior. For example, using a TOPIC_FILTER + // dimension, you can narrow down the scope of the metric only to MQTT topics + // whose name match the pattern specified in the dimension. + MetricDimension *MetricDimension `locationName:"metricDimension" type:"structure"` + // The name you have given to the behavior. // // Name is a required field @@ -20615,6 +24771,11 @@ func (s *Behavior) Validate() error { invalidParams.AddNested("Criteria", err.(request.ErrInvalidParams)) } } + if s.MetricDimension != nil { + if err := s.MetricDimension.Validate(); err != nil { + invalidParams.AddNested("MetricDimension", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -20634,6 +24795,12 @@ func (s *Behavior) SetMetric(v string) *Behavior { return s } +// SetMetricDimension sets the MetricDimension field's value. +func (s *Behavior) SetMetricDimension(v *MetricDimension) *Behavior { + s.MetricDimension = v + return s +} + // SetName sets the Name field's value. func (s *Behavior) SetName(v string) *Behavior { s.Name = &v @@ -21361,6 +25528,9 @@ type Certificate struct { // the certificate ID.) CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + // The mode of the certificate. + CertificateMode *string `locationName:"certificateMode" type:"string" enum:"CertificateMode"` + // The date and time the certificate was created. CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` @@ -21392,6 +25562,12 @@ func (s *Certificate) SetCertificateId(v string) *Certificate { return s } +// SetCertificateMode sets the CertificateMode field's value. +func (s *Certificate) SetCertificateMode(v string) *Certificate { + s.CertificateMode = &v + return s +} + // SetCreationDate sets the CreationDate field's value. func (s *Certificate) SetCreationDate(v time.Time) *Certificate { s.CreationDate = &v @@ -21408,8 +25584,8 @@ func (s *Certificate) SetStatus(v string) *Certificate { // are attempting to register. This is happens when you have registered more // than one CA certificate that has the same subject field and public key. type CertificateConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -21427,17 +25603,17 @@ func (s CertificateConflictException) GoString() string { func newErrorCertificateConflictException(v protocol.ResponseMetadata) error { return &CertificateConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateConflictException) Code() string { +func (s *CertificateConflictException) Code() string { return "CertificateConflictException" } // Message returns the exception's message. -func (s CertificateConflictException) Message() string { +func (s *CertificateConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21445,22 +25621,22 @@ func (s CertificateConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateConflictException) OrigErr() error { +func (s *CertificateConflictException) OrigErr() error { return nil } -func (s CertificateConflictException) Error() string { +func (s *CertificateConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a certificate. @@ -21476,6 +25652,9 @@ type CertificateDescription struct { // The ID of the certificate. CertificateId *string `locationName:"certificateId" min:"64" type:"string"` + // The mode of the certificate. + CertificateMode *string `locationName:"certificateMode" type:"string" enum:"CertificateMode"` + // The certificate data, in PEM format. CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` @@ -21535,6 +25714,12 @@ func (s *CertificateDescription) SetCertificateId(v string) *CertificateDescript return s } +// SetCertificateMode sets the CertificateMode field's value. +func (s *CertificateDescription) SetCertificateMode(v string) *CertificateDescription { + s.CertificateMode = &v + return s +} + // SetCertificatePem sets the CertificatePem field's value. func (s *CertificateDescription) SetCertificatePem(v string) *CertificateDescription { s.CertificatePem = &v @@ -21597,8 +25782,8 @@ func (s *CertificateDescription) SetValidity(v *CertificateValidity) *Certificat // The certificate operation is not allowed. type CertificateStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -21616,17 +25801,17 @@ func (s CertificateStateException) GoString() string { func newErrorCertificateStateException(v protocol.ResponseMetadata) error { return &CertificateStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateStateException) Code() string { +func (s *CertificateStateException) Code() string { return "CertificateStateException" } // Message returns the exception's message. -func (s CertificateStateException) Message() string { +func (s *CertificateStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21634,28 +25819,28 @@ func (s CertificateStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateStateException) OrigErr() error { +func (s *CertificateStateException) OrigErr() error { return nil } -func (s CertificateStateException) Error() string { +func (s *CertificateStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateStateException) RequestID() string { + return s.RespMetadata.RequestID } // The certificate is invalid. type CertificateValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -21673,17 +25858,17 @@ func (s CertificateValidationException) GoString() string { func newErrorCertificateValidationException(v protocol.ResponseMetadata) error { return &CertificateValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CertificateValidationException) Code() string { +func (s *CertificateValidationException) Code() string { return "CertificateValidationException" } // Message returns the exception's message. -func (s CertificateValidationException) Message() string { +func (s *CertificateValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21691,22 +25876,22 @@ func (s CertificateValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CertificateValidationException) OrigErr() error { +func (s *CertificateValidationException) OrigErr() error { return nil } -func (s CertificateValidationException) Error() string { +func (s *CertificateValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CertificateValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CertificateValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CertificateValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *CertificateValidationException) RequestID() string { + return s.RespMetadata.RequestID } // When the certificate is valid. @@ -21851,7 +26036,7 @@ func (s *CloudwatchAlarmAction) SetStateValue(v string) *CloudwatchAlarmAction { return s } -// Describes an action that sends data to CloudWatch logs. +// Describes an action that sends data to CloudWatch Logs. type CloudwatchLogsAction struct { _ struct{} `type:"structure"` @@ -22208,8 +26393,8 @@ func (s ConfirmTopicRuleDestinationOutput) GoString() string { // A conflicting resource update exception. This exception is thrown when two // pending updates cause a conflict. type ConflictingResourceUpdateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -22227,17 +26412,17 @@ func (s ConflictingResourceUpdateException) GoString() string { func newErrorConflictingResourceUpdateException(v protocol.ResponseMetadata) error { return &ConflictingResourceUpdateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictingResourceUpdateException) Code() string { +func (s *ConflictingResourceUpdateException) Code() string { return "ConflictingResourceUpdateException" } // Message returns the exception's message. -func (s ConflictingResourceUpdateException) Message() string { +func (s *ConflictingResourceUpdateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22245,22 +26430,134 @@ func (s ConflictingResourceUpdateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictingResourceUpdateException) OrigErr() error { +func (s *ConflictingResourceUpdateException) OrigErr() error { return nil } -func (s ConflictingResourceUpdateException) Error() string { +func (s *ConflictingResourceUpdateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictingResourceUpdateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictingResourceUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictingResourceUpdateException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictingResourceUpdateException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateAuditSuppressionInput struct { + _ struct{} `type:"structure"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + // + // CheckName is a required field + CheckName *string `locationName:"checkName" type:"string" required:"true"` + + // The epoch timestamp in seconds at which this suppression expires. + ClientRequestToken *string `locationName:"clientRequestToken" min:"1" type:"string" idempotencyToken:"true"` + + // The description of the audit suppression. + Description *string `locationName:"description" type:"string"` + + // The epoch timestamp in seconds at which this suppression expires. + ExpirationDate *time.Time `locationName:"expirationDate" type:"timestamp"` + + // Information that identifies the noncompliant resource. + // + // ResourceIdentifier is a required field + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure" required:"true"` + + // Indicates whether a suppression should exist indefinitely or not. + SuppressIndefinitely *bool `locationName:"suppressIndefinitely" type:"boolean"` +} + +// String returns the string representation +func (s CreateAuditSuppressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAuditSuppressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAuditSuppressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAuditSuppressionInput"} + if s.CheckName == nil { + invalidParams.Add(request.NewErrParamRequired("CheckName")) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + if s.ResourceIdentifier != nil { + if err := s.ResourceIdentifier.Validate(); err != nil { + invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCheckName sets the CheckName field's value. +func (s *CreateAuditSuppressionInput) SetCheckName(v string) *CreateAuditSuppressionInput { + s.CheckName = &v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateAuditSuppressionInput) SetClientRequestToken(v string) *CreateAuditSuppressionInput { + s.ClientRequestToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateAuditSuppressionInput) SetDescription(v string) *CreateAuditSuppressionInput { + s.Description = &v + return s +} + +// SetExpirationDate sets the ExpirationDate field's value. +func (s *CreateAuditSuppressionInput) SetExpirationDate(v time.Time) *CreateAuditSuppressionInput { + s.ExpirationDate = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *CreateAuditSuppressionInput) SetResourceIdentifier(v *ResourceIdentifier) *CreateAuditSuppressionInput { + s.ResourceIdentifier = v + return s +} + +// SetSuppressIndefinitely sets the SuppressIndefinitely field's value. +func (s *CreateAuditSuppressionInput) SetSuppressIndefinitely(v bool) *CreateAuditSuppressionInput { + s.SuppressIndefinitely = &v + return s +} + +type CreateAuditSuppressionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAuditSuppressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAuditSuppressionOutput) GoString() string { + return s.String() } type CreateAuthorizerInput struct { @@ -22283,6 +26580,15 @@ type CreateAuthorizerInput struct { // The status of the create authorizer request. Status *string `locationName:"status" type:"string" enum:"AuthorizerStatus"` + // Metadata which can be used to manage the custom authorizer. + // + // For URI Request parameters use format: ...key1=value1&key2=value2... + // + // For the CLI command-line parameter use format: &&tags "key1=value1&key2=value2..." + // + // For the cli-input-json file use format: "tags": "key1=value1&key2=value2..." + Tags []*Tag `locationName:"tags" type:"list"` + // The name of the token key used to extract the token from the HTTP headers. TokenKeyName *string `locationName:"tokenKeyName" min:"1" type:"string"` @@ -22316,6 +26622,16 @@ func (s *CreateAuthorizerInput) Validate() error { if s.TokenKeyName != nil && len(*s.TokenKeyName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TokenKeyName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -22347,6 +26663,12 @@ func (s *CreateAuthorizerInput) SetStatus(v string) *CreateAuthorizerInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateAuthorizerInput) SetTags(v []*Tag) *CreateAuthorizerInput { + s.Tags = v + return s +} + // SetTokenKeyName sets the TokenKeyName field's value. func (s *CreateAuthorizerInput) SetTokenKeyName(v string) *CreateAuthorizerInput { s.TokenKeyName = &v @@ -22425,6 +26747,16 @@ func (s *CreateBillingGroupInput) Validate() error { if s.BillingGroupName != nil && len(*s.BillingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("BillingGroupName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -22586,6 +26918,147 @@ func (s *CreateCertificateFromCsrOutput) SetCertificatePem(v string) *CreateCert return s } +type CreateDimensionInput struct { + _ struct{} `type:"structure"` + + // Each dimension must have a unique client request token. If you try to create + // a new dimension with the same token as a dimension that already exists, an + // exception occurs. If you omit this value, AWS SDKs will automatically generate + // a unique client request. + ClientRequestToken *string `locationName:"clientRequestToken" min:"1" type:"string" idempotencyToken:"true"` + + // A unique identifier for the dimension. Choose something that describes the + // type and value to make it easy to remember what it does. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // Specifies the value or list of values for the dimension. For TOPIC_FILTER + // dimensions, this is a pattern used to match the MQTT topic (for example, + // "admin/#"). + // + // StringValues is a required field + StringValues []*string `locationName:"stringValues" min:"1" type:"list" required:"true"` + + // Metadata that can be used to manage the dimension. + Tags []*Tag `locationName:"tags" type:"list"` + + // Specifies the type of dimension. Supported types: TOPIC_FILTER. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"DimensionType"` +} + +// String returns the string representation +func (s CreateDimensionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDimensionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDimensionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDimensionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.StringValues == nil { + invalidParams.Add(request.NewErrParamRequired("StringValues")) + } + if s.StringValues != nil && len(s.StringValues) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StringValues", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateDimensionInput) SetClientRequestToken(v string) *CreateDimensionInput { + s.ClientRequestToken = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDimensionInput) SetName(v string) *CreateDimensionInput { + s.Name = &v + return s +} + +// SetStringValues sets the StringValues field's value. +func (s *CreateDimensionInput) SetStringValues(v []*string) *CreateDimensionInput { + s.StringValues = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDimensionInput) SetTags(v []*Tag) *CreateDimensionInput { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateDimensionInput) SetType(v string) *CreateDimensionInput { + s.Type = &v + return s +} + +type CreateDimensionOutput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon resource name) of the created dimension. + Arn *string `locationName:"arn" type:"string"` + + // A unique identifier for the dimension. + Name *string `locationName:"name" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDimensionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDimensionOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateDimensionOutput) SetArn(v string) *CreateDimensionOutput { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDimensionOutput) SetName(v string) *CreateDimensionOutput { + s.Name = &v + return s +} + type CreateDomainConfigurationInput struct { _ struct{} `type:"structure"` @@ -22606,8 +27079,19 @@ type CreateDomainConfigurationInput struct { ServerCertificateArns []*string `locationName:"serverCertificateArns" type:"list"` // The type of service delivered by the endpoint. + // + // AWS IoT Core currently supports only the DATA service type. ServiceType *string `locationName:"serviceType" type:"string" enum:"ServiceType"` + // Metadata which can be used to manage the domain configuration. + // + // For URI Request parameters use format: ...key1=value1&key2=value2... + // + // For the CLI command-line parameter use format: &&tags "key1=value1&key2=value2..." + // + // For the cli-input-json file use format: "tags": "key1=value1&key2=value2..." + Tags []*Tag `locationName:"tags" type:"list"` + // The certificate used to validate the server certificate and prove domain // name ownership. This certificate must be signed by a public certificate authority. // This value is not required for AWS-managed domains. @@ -22644,6 +27128,16 @@ func (s *CreateDomainConfigurationInput) Validate() error { invalidParams.AddNested("AuthorizerConfig", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -22681,6 +27175,12 @@ func (s *CreateDomainConfigurationInput) SetServiceType(v string) *CreateDomainC return s } +// SetTags sets the Tags field's value. +func (s *CreateDomainConfigurationInput) SetTags(v []*Tag) *CreateDomainConfigurationInput { + s.Tags = v + return s +} + // SetValidationCertificateArn sets the ValidationCertificateArn field's value. func (s *CreateDomainConfigurationInput) SetValidationCertificateArn(v string) *CreateDomainConfigurationInput { s.ValidationCertificateArn = &v @@ -22781,6 +27281,16 @@ func (s *CreateDynamicThingGroupInput) Validate() error { if s.ThingGroupName != nil && len(*s.ThingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ThingGroupName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -22995,6 +27505,16 @@ func (s *CreateJobInput) Validate() error { invalidParams.AddNested("PresignedUrlConfig", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -23241,6 +27761,16 @@ func (s *CreateMitigationActionInput) Validate() error { invalidParams.AddNested("ActionParams", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -23310,12 +27840,21 @@ type CreateOTAUpdateInput struct { // A list of additional OTA update parameters which are name-value pairs. AdditionalParameters map[string]*string `locationName:"additionalParameters" type:"map"` + // The criteria that determine when and how a job abort takes place. + AwsJobAbortConfig *AwsJobAbortConfig `locationName:"awsJobAbortConfig" type:"structure"` + // Configuration for the rollout of OTA updates. AwsJobExecutionsRolloutConfig *AwsJobExecutionsRolloutConfig `locationName:"awsJobExecutionsRolloutConfig" type:"structure"` // Configuration information for pre-signed URLs. AwsJobPresignedUrlConfig *AwsJobPresignedUrlConfig `locationName:"awsJobPresignedUrlConfig" type:"structure"` + // Specifies the amount of time each device has to finish its execution of the + // job. A timer is started when the job execution status is set to IN_PROGRESS. + // If the job execution status is not set to another terminal state before the + // timer expires, it will be automatically set to TIMED_OUT. + AwsJobTimeoutConfig *AwsJobTimeoutConfig `locationName:"awsJobTimeoutConfig" type:"structure"` + // The description of the OTA update. Description *string `locationName:"description" type:"string"` @@ -23334,7 +27873,8 @@ type CreateOTAUpdateInput struct { // can choose the protocol. Protocols []*string `locationName:"protocols" min:"1" type:"list"` - // The IAM role that allows access to the AWS IoT Jobs service. + // The IAM role that grants AWS IoT access to the Amazon S3, AWS IoT jobs and + // AWS Code Signing resources to create an OTA update job. // // RoleArn is a required field RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` @@ -23350,7 +27890,7 @@ type CreateOTAUpdateInput struct { // by all things originally in the group. Valid values: CONTINUOUS | SNAPSHOT. TargetSelection *string `locationName:"targetSelection" type:"string" enum:"TargetSelection"` - // The targeted devices to receive OTA updates. + // The devices targeted to receive OTA updates. // // Targets is a required field Targets []*string `locationName:"targets" min:"1" type:"list" required:"true"` @@ -23396,6 +27936,11 @@ func (s *CreateOTAUpdateInput) Validate() error { if s.Targets != nil && len(s.Targets) < 1 { invalidParams.Add(request.NewErrParamMinLen("Targets", 1)) } + if s.AwsJobAbortConfig != nil { + if err := s.AwsJobAbortConfig.Validate(); err != nil { + invalidParams.AddNested("AwsJobAbortConfig", err.(request.ErrInvalidParams)) + } + } if s.AwsJobExecutionsRolloutConfig != nil { if err := s.AwsJobExecutionsRolloutConfig.Validate(); err != nil { invalidParams.AddNested("AwsJobExecutionsRolloutConfig", err.(request.ErrInvalidParams)) @@ -23411,6 +27956,16 @@ func (s *CreateOTAUpdateInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -23424,6 +27979,12 @@ func (s *CreateOTAUpdateInput) SetAdditionalParameters(v map[string]*string) *Cr return s } +// SetAwsJobAbortConfig sets the AwsJobAbortConfig field's value. +func (s *CreateOTAUpdateInput) SetAwsJobAbortConfig(v *AwsJobAbortConfig) *CreateOTAUpdateInput { + s.AwsJobAbortConfig = v + return s +} + // SetAwsJobExecutionsRolloutConfig sets the AwsJobExecutionsRolloutConfig field's value. func (s *CreateOTAUpdateInput) SetAwsJobExecutionsRolloutConfig(v *AwsJobExecutionsRolloutConfig) *CreateOTAUpdateInput { s.AwsJobExecutionsRolloutConfig = v @@ -23436,6 +27997,12 @@ func (s *CreateOTAUpdateInput) SetAwsJobPresignedUrlConfig(v *AwsJobPresignedUrl return s } +// SetAwsJobTimeoutConfig sets the AwsJobTimeoutConfig field's value. +func (s *CreateOTAUpdateInput) SetAwsJobTimeoutConfig(v *AwsJobTimeoutConfig) *CreateOTAUpdateInput { + s.AwsJobTimeoutConfig = v + return s +} + // SetDescription sets the Description field's value. func (s *CreateOTAUpdateInput) SetDescription(v string) *CreateOTAUpdateInput { s.Description = &v @@ -23557,6 +28124,15 @@ type CreatePolicyInput struct { // // PolicyName is a required field PolicyName *string `location:"uri" locationName:"policyName" min:"1" type:"string" required:"true"` + + // Metadata which can be used to manage the policy. + // + // For URI Request parameters use format: ...key1=value1&key2=value2... + // + // For the CLI command-line parameter use format: &&tags "key1=value1&key2=value2..." + // + // For the cli-input-json file use format: "tags": "key1=value1&key2=value2..." + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -23581,6 +28157,16 @@ func (s *CreatePolicyInput) Validate() error { if s.PolicyName != nil && len(*s.PolicyName) < 1 { invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -23600,6 +28186,12 @@ func (s *CreatePolicyInput) SetPolicyName(v string) *CreatePolicyInput { return s } +// SetTags sets the Tags field's value. +func (s *CreatePolicyInput) SetTags(v []*Tag) *CreatePolicyInput { + s.Tags = v + return s +} + // The output from the CreatePolicy operation. type CreatePolicyOutput struct { _ struct{} `type:"structure"` @@ -23870,6 +28462,9 @@ type CreateProvisioningTemplateInput struct { // True to enable the fleet provisioning template, otherwise false. Enabled *bool `locationName:"enabled" type:"boolean"` + // Creates a pre-provisioning hook template. + PreProvisioningHook *ProvisioningHook `locationName:"preProvisioningHook" type:"structure"` + // The role ARN for the role associated with the fleet provisioning template. // This IoT role grants permission to provision a device. // @@ -23924,6 +28519,21 @@ func (s *CreateProvisioningTemplateInput) Validate() error { if s.TemplateName != nil && len(*s.TemplateName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } + if s.PreProvisioningHook != nil { + if err := s.PreProvisioningHook.Validate(); err != nil { + invalidParams.AddNested("PreProvisioningHook", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -23943,6 +28553,12 @@ func (s *CreateProvisioningTemplateInput) SetEnabled(v bool) *CreateProvisioning return s } +// SetPreProvisioningHook sets the PreProvisioningHook field's value. +func (s *CreateProvisioningTemplateInput) SetPreProvisioningHook(v *ProvisioningHook) *CreateProvisioningTemplateInput { + s.PreProvisioningHook = v + return s +} + // SetProvisioningRoleArn sets the ProvisioningRoleArn field's value. func (s *CreateProvisioningTemplateInput) SetProvisioningRoleArn(v string) *CreateProvisioningTemplateInput { s.ProvisioningRoleArn = &v @@ -24139,6 +28755,15 @@ type CreateRoleAliasInput struct { // // RoleArn is a required field RoleArn *string `locationName:"roleArn" min:"20" type:"string" required:"true"` + + // Metadata which can be used to manage the role alias. + // + // For URI Request parameters use format: ...key1=value1&key2=value2... + // + // For the CLI command-line parameter use format: &&tags "key1=value1&key2=value2..." + // + // For the cli-input-json file use format: "tags": "key1=value1&key2=value2..." + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation @@ -24169,6 +28794,16 @@ func (s *CreateRoleAliasInput) Validate() error { if s.RoleArn != nil && len(*s.RoleArn) < 20 { invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -24194,6 +28829,12 @@ func (s *CreateRoleAliasInput) SetRoleArn(v string) *CreateRoleAliasInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateRoleAliasInput) SetTags(v []*Tag) *CreateRoleAliasInput { + s.Tags = v + return s +} + type CreateRoleAliasOutput struct { _ struct{} `type:"structure"` @@ -24289,6 +28930,16 @@ func (s *CreateScheduledAuditInput) Validate() error { if s.TargetCheckNames == nil { invalidParams.Add(request.NewErrParamRequired("TargetCheckNames")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -24358,10 +29009,19 @@ func (s *CreateScheduledAuditOutput) SetScheduledAuditArn(v string) *CreateSched type CreateSecurityProfileInput struct { _ struct{} `type:"structure"` + // Please use CreateSecurityProfileRequest$additionalMetricsToRetainV2 instead. + // + // A list of metrics whose data is retained (stored). By default, data is retained + // for any metric used in the profile's behaviors, but it is also retained for + // any metric specified here. + // + // Deprecated: Use additionalMetricsToRetainV2. + AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" deprecated:"true" type:"list"` + // A list of metrics whose data is retained (stored). By default, data is retained // for any metric used in the profile's behaviors, but it is also retained for // any metric specified here. - AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` + AdditionalMetricsToRetainV2 []*MetricToRetain `locationName:"additionalMetricsToRetainV2" type:"list"` // Specifies the destinations to which alerts are sent. (Alerts are always sent // to the console.) Alerts are generated when a device (thing) violates a behavior. @@ -24402,6 +29062,16 @@ func (s *CreateSecurityProfileInput) Validate() error { if s.SecurityProfileName != nil && len(*s.SecurityProfileName) < 1 { invalidParams.Add(request.NewErrParamMinLen("SecurityProfileName", 1)) } + if s.AdditionalMetricsToRetainV2 != nil { + for i, v := range s.AdditionalMetricsToRetainV2 { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalMetricsToRetainV2", i), err.(request.ErrInvalidParams)) + } + } + } if s.AlertTargets != nil { for i, v := range s.AlertTargets { if v == nil { @@ -24422,6 +29092,16 @@ func (s *CreateSecurityProfileInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -24435,6 +29115,12 @@ func (s *CreateSecurityProfileInput) SetAdditionalMetricsToRetain(v []*string) * return s } +// SetAdditionalMetricsToRetainV2 sets the AdditionalMetricsToRetainV2 field's value. +func (s *CreateSecurityProfileInput) SetAdditionalMetricsToRetainV2(v []*MetricToRetain) *CreateSecurityProfileInput { + s.AdditionalMetricsToRetainV2 = v + return s +} + // SetAlertTargets sets the AlertTargets field's value. func (s *CreateSecurityProfileInput) SetAlertTargets(v map[string]*AlertTarget) *CreateSecurityProfileInput { s.AlertTargets = v @@ -24564,6 +29250,16 @@ func (s *CreateStreamInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -24691,6 +29387,16 @@ func (s *CreateThingGroupInput) Validate() error { if s.ThingGroupName != nil && len(*s.ThingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ThingGroupName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -24778,6 +29484,10 @@ type CreateThingInput struct { // The name of the thing to create. // + // You can't change a thing's name after you create it. To change a thing's + // name, you must create a new thing, give it the new name, and then delete + // the old thing. + // // ThingName is a required field ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` @@ -24920,6 +29630,16 @@ func (s *CreateThingTypeInput) Validate() error { if s.ThingTypeName != nil && len(*s.ThingTypeName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ThingTypeName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -25231,6 +29951,79 @@ func (s DeleteAccountAuditConfigurationOutput) GoString() string { return s.String() } +type DeleteAuditSuppressionInput struct { + _ struct{} `type:"structure"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + // + // CheckName is a required field + CheckName *string `locationName:"checkName" type:"string" required:"true"` + + // Information that identifies the noncompliant resource. + // + // ResourceIdentifier is a required field + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DeleteAuditSuppressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAuditSuppressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAuditSuppressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAuditSuppressionInput"} + if s.CheckName == nil { + invalidParams.Add(request.NewErrParamRequired("CheckName")) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + if s.ResourceIdentifier != nil { + if err := s.ResourceIdentifier.Validate(); err != nil { + invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCheckName sets the CheckName field's value. +func (s *DeleteAuditSuppressionInput) SetCheckName(v string) *DeleteAuditSuppressionInput { + s.CheckName = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *DeleteAuditSuppressionInput) SetResourceIdentifier(v *ResourceIdentifier) *DeleteAuditSuppressionInput { + s.ResourceIdentifier = v + return s +} + +type DeleteAuditSuppressionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAuditSuppressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAuditSuppressionOutput) GoString() string { + return s.String() +} + type DeleteAuthorizerInput struct { _ struct{} `type:"structure"` @@ -25479,8 +30272,8 @@ func (s DeleteCertificateOutput) GoString() string { // You can't delete the resource because it is attached to one or more resources. type DeleteConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -25498,17 +30291,17 @@ func (s DeleteConflictException) GoString() string { func newErrorDeleteConflictException(v protocol.ResponseMetadata) error { return &DeleteConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeleteConflictException) Code() string { +func (s *DeleteConflictException) Code() string { return "DeleteConflictException" } // Message returns the exception's message. -func (s DeleteConflictException) Message() string { +func (s *DeleteConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -25516,22 +30309,77 @@ func (s DeleteConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeleteConflictException) OrigErr() error { +func (s *DeleteConflictException) OrigErr() error { return nil } -func (s DeleteConflictException) Error() string { +func (s *DeleteConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeleteConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeleteConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeleteConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeleteConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type DeleteDimensionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the dimension that you want to delete. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDimensionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDimensionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDimensionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDimensionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DeleteDimensionInput) SetName(v string) *DeleteDimensionInput { + s.Name = &v + return s +} + +type DeleteDimensionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDimensionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDimensionOutput) GoString() string { + return s.String() } type DeleteDomainConfigurationInput struct { @@ -25897,10 +30745,10 @@ type DeleteOTAUpdateInput struct { DeleteStream *bool `location:"querystring" locationName:"deleteStream" type:"boolean"` // Specifies if the AWS Job associated with the OTA update should be deleted - // with the OTA update is deleted. + // when the OTA update is deleted. ForceDeleteAWSJob *bool `location:"querystring" locationName:"forceDeleteAWSJob" type:"boolean"` - // The OTA update ID to delete. + // The ID of the OTA update to delete. // // OtaUpdateId is a required field OtaUpdateId *string `location:"uri" locationName:"otaUpdateId" min:"1" type:"string" required:"true"` @@ -27193,6 +32041,126 @@ func (s *DescribeAuditMitigationActionsTaskOutput) SetTaskStatus(v string) *Desc return s } +type DescribeAuditSuppressionInput struct { + _ struct{} `type:"structure"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + // + // CheckName is a required field + CheckName *string `locationName:"checkName" type:"string" required:"true"` + + // Information that identifies the noncompliant resource. + // + // ResourceIdentifier is a required field + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeAuditSuppressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAuditSuppressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAuditSuppressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAuditSuppressionInput"} + if s.CheckName == nil { + invalidParams.Add(request.NewErrParamRequired("CheckName")) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + if s.ResourceIdentifier != nil { + if err := s.ResourceIdentifier.Validate(); err != nil { + invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCheckName sets the CheckName field's value. +func (s *DescribeAuditSuppressionInput) SetCheckName(v string) *DescribeAuditSuppressionInput { + s.CheckName = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *DescribeAuditSuppressionInput) SetResourceIdentifier(v *ResourceIdentifier) *DescribeAuditSuppressionInput { + s.ResourceIdentifier = v + return s +} + +type DescribeAuditSuppressionOutput struct { + _ struct{} `type:"structure"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + CheckName *string `locationName:"checkName" type:"string"` + + // The description of the audit suppression. + Description *string `locationName:"description" type:"string"` + + // The epoch timestamp in seconds at which this suppression expires. + ExpirationDate *time.Time `locationName:"expirationDate" type:"timestamp"` + + // Information that identifies the noncompliant resource. + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure"` + + // Indicates whether a suppression should exist indefinitely or not. + SuppressIndefinitely *bool `locationName:"suppressIndefinitely" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAuditSuppressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAuditSuppressionOutput) GoString() string { + return s.String() +} + +// SetCheckName sets the CheckName field's value. +func (s *DescribeAuditSuppressionOutput) SetCheckName(v string) *DescribeAuditSuppressionOutput { + s.CheckName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DescribeAuditSuppressionOutput) SetDescription(v string) *DescribeAuditSuppressionOutput { + s.Description = &v + return s +} + +// SetExpirationDate sets the ExpirationDate field's value. +func (s *DescribeAuditSuppressionOutput) SetExpirationDate(v time.Time) *DescribeAuditSuppressionOutput { + s.ExpirationDate = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *DescribeAuditSuppressionOutput) SetResourceIdentifier(v *ResourceIdentifier) *DescribeAuditSuppressionOutput { + s.ResourceIdentifier = v + return s +} + +// SetSuppressIndefinitely sets the SuppressIndefinitely field's value. +func (s *DescribeAuditSuppressionOutput) SetSuppressIndefinitely(v bool) *DescribeAuditSuppressionOutput { + s.SuppressIndefinitely = &v + return s +} + type DescribeAuditTaskInput struct { _ struct{} `type:"structure"` @@ -27655,6 +32623,116 @@ func (s *DescribeDefaultAuthorizerOutput) SetAuthorizerDescription(v *Authorizer return s } +type DescribeDimensionInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the dimension. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDimensionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDimensionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDimensionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDimensionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DescribeDimensionInput) SetName(v string) *DescribeDimensionInput { + s.Name = &v + return s +} + +type DescribeDimensionOutput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon resource name) for the dimension. + Arn *string `locationName:"arn" type:"string"` + + // The date the dimension was created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date the dimension was last modified. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // The unique identifier for the dimension. + Name *string `locationName:"name" min:"1" type:"string"` + + // The value or list of values used to scope the dimension. For example, for + // topic filters, this is the pattern used to match the MQTT topic name. + StringValues []*string `locationName:"stringValues" min:"1" type:"list"` + + // The type of the dimension. + Type *string `locationName:"type" type:"string" enum:"DimensionType"` +} + +// String returns the string representation +func (s DescribeDimensionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDimensionOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeDimensionOutput) SetArn(v string) *DescribeDimensionOutput { + s.Arn = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *DescribeDimensionOutput) SetCreationDate(v time.Time) *DescribeDimensionOutput { + s.CreationDate = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *DescribeDimensionOutput) SetLastModifiedDate(v time.Time) *DescribeDimensionOutput { + s.LastModifiedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeDimensionOutput) SetName(v string) *DescribeDimensionOutput { + s.Name = &v + return s +} + +// SetStringValues sets the StringValues field's value. +func (s *DescribeDimensionOutput) SetStringValues(v []*string) *DescribeDimensionOutput { + s.StringValues = v + return s +} + +// SetType sets the Type field's value. +func (s *DescribeDimensionOutput) SetType(v string) *DescribeDimensionOutput { + s.Type = &v + return s +} + type DescribeDomainConfigurationInput struct { _ struct{} `type:"structure"` @@ -27717,6 +32795,9 @@ type DescribeDomainConfigurationOutput struct { // The type of the domain. DomainType *string `locationName:"domainType" type:"string" enum:"DomainType"` + // The date and time the domain configuration's status was last changed. + LastStatusChangeDate *time.Time `locationName:"lastStatusChangeDate" type:"timestamp"` + // A list containing summary information about the server certificate included // in the domain configuration. ServerCertificates []*ServerCertificateSummary `locationName:"serverCertificates" type:"list"` @@ -27771,6 +32852,12 @@ func (s *DescribeDomainConfigurationOutput) SetDomainType(v string) *DescribeDom return s } +// SetLastStatusChangeDate sets the LastStatusChangeDate field's value. +func (s *DescribeDomainConfigurationOutput) SetLastStatusChangeDate(v time.Time) *DescribeDomainConfigurationOutput { + s.LastStatusChangeDate = &v + return s +} + // SetServerCertificates sets the ServerCertificates field's value. func (s *DescribeDomainConfigurationOutput) SetServerCertificates(v []*ServerCertificateSummary) *DescribeDomainConfigurationOutput { s.ServerCertificates = v @@ -28344,6 +33431,9 @@ type DescribeProvisioningTemplateOutput struct { // The date when the fleet provisioning template was last modified. LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + // Gets information about a pre-provisioned hook. + PreProvisioningHook *ProvisioningHook `locationName:"preProvisioningHook" type:"structure"` + // The ARN of the role associated with the provisioning template. This IoT role // grants permission to provision a device. ProvisioningRoleArn *string `locationName:"provisioningRoleArn" min:"20" type:"string"` @@ -28398,6 +33488,12 @@ func (s *DescribeProvisioningTemplateOutput) SetLastModifiedDate(v time.Time) *D return s } +// SetPreProvisioningHook sets the PreProvisioningHook field's value. +func (s *DescribeProvisioningTemplateOutput) SetPreProvisioningHook(v *ProvisioningHook) *DescribeProvisioningTemplateOutput { + s.PreProvisioningHook = v + return s +} + // SetProvisioningRoleArn sets the ProvisioningRoleArn field's value. func (s *DescribeProvisioningTemplateOutput) SetProvisioningRoleArn(v string) *DescribeProvisioningTemplateOutput { s.ProvisioningRoleArn = &v @@ -28751,10 +33847,19 @@ func (s *DescribeSecurityProfileInput) SetSecurityProfileName(v string) *Describ type DescribeSecurityProfileOutput struct { _ struct{} `type:"structure"` + // Please use DescribeSecurityProfileResponse$additionalMetricsToRetainV2 instead. + // // A list of metrics whose data is retained (stored). By default, data is retained // for any metric used in the profile's behaviors, but it is also retained for // any metric specified here. - AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` + // + // Deprecated: Use additionalMetricsToRetainV2. + AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" deprecated:"true" type:"list"` + + // A list of metrics whose data is retained (stored). By default, data is retained + // for any metric used in the profile's behaviors, but it is also retained for + // any metric specified here. + AdditionalMetricsToRetainV2 []*MetricToRetain `locationName:"additionalMetricsToRetainV2" type:"list"` // Where the alerts are sent. (Alerts are always sent to the console.) AlertTargets map[string]*AlertTarget `locationName:"alertTargets" type:"map"` @@ -28800,6 +33905,12 @@ func (s *DescribeSecurityProfileOutput) SetAdditionalMetricsToRetain(v []*string return s } +// SetAdditionalMetricsToRetainV2 sets the AdditionalMetricsToRetainV2 field's value. +func (s *DescribeSecurityProfileOutput) SetAdditionalMetricsToRetainV2(v []*MetricToRetain) *DescribeSecurityProfileOutput { + s.AdditionalMetricsToRetainV2 = v + return s +} + // SetAlertTargets sets the AlertTargets field's value. func (s *DescribeSecurityProfileOutput) SetAlertTargets(v map[string]*AlertTarget) *DescribeSecurityProfileOutput { s.AlertTargets = v @@ -29109,7 +34220,14 @@ type DescribeThingOutput struct { // The name of the billing group the thing belongs to. BillingGroupName *string `locationName:"billingGroupName" min:"1" type:"string"` - // The default client ID. + // The default MQTT client ID. For a typical device, the thing name is also + // used as the default MQTT client ID. Although we don’t require a mapping + // between a thing's registry name and its use of MQTT client IDs, certificates, + // or shadow state, we recommend that you choose a thing name and use it as + // the MQTT client ID for the registry and the Device Shadow service. + // + // This lets you better organize your AWS IoT fleet without removing the flexibility + // of the underlying device certificate model or shadows. DefaultClientId *string `locationName:"defaultClientId" type:"string"` // The ARN of the thing to describe. @@ -29577,8 +34695,9 @@ type DetachPrincipalPolicyInput struct { // The principal. // - // If the principal is a certificate, specify the certificate ARN. If the principal - // is an Amazon Cognito identity, specify the identity ID. + // Valid principals are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId), + // thingGroupArn (arn:aws:iot:region:accountId:thinggroup/groupName) and CognitoId + // (region:id). // // Principal is a required field Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` @@ -30430,14 +35549,14 @@ type ExponentialRolloutRate struct { // The exponential factor to increase the rate of rollout for a job. // + // AWS IoT supports up to one digit after the decimal (for example, 1.5, but + // not 1.55). + // // IncrementFactor is a required field IncrementFactor *float64 `locationName:"incrementFactor" min:"1" type:"double" required:"true"` // The criteria to initiate the increase in rate of rollout for a job. // - // AWS IoT supports up to one digit after the decimal (for example, 1.5, but - // not 1.55). - // // RateIncreaseCriteria is a required field RateIncreaseCriteria *RateIncreaseCriteria `locationName:"rateIncreaseCriteria" type:"structure" required:"true"` } @@ -30753,7 +35872,9 @@ type GetEffectivePoliciesInput struct { // The Cognito identity pool ID. CognitoIdentityPoolId *string `locationName:"cognitoIdentityPoolId" type:"string"` - // The principal. + // The principal. Valid principals are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId), + // thingGroupArn (arn:aws:iot:region:accountId:thinggroup/groupName) and CognitoId + // (region:id). Principal *string `locationName:"principal" type:"string"` // The thing name. @@ -31794,7 +36915,7 @@ type HttpAction struct { // URL must be a prefix of the endpoint URL. If you do not specify a confirmation // URL AWS IoT uses the endpoint URL as the confirmation URL. If you use substitution // templates in the confirmationUrl, you must create and enable topic rule destinations - // that match each possible value of the substituion template before traffic + // that match each possible value of the substitution template before traffic // is allowed to your endpoint URL. ConfirmationUrl *string `locationName:"confirmationUrl" type:"string"` @@ -32130,8 +37251,8 @@ func (s *ImplicitDeny) SetPolicies(v []*Policy) *ImplicitDeny { // The index is not ready. type IndexNotReadyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32149,17 +37270,17 @@ func (s IndexNotReadyException) GoString() string { func newErrorIndexNotReadyException(v protocol.ResponseMetadata) error { return &IndexNotReadyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IndexNotReadyException) Code() string { +func (s *IndexNotReadyException) Code() string { return "IndexNotReadyException" } // Message returns the exception's message. -func (s IndexNotReadyException) Message() string { +func (s *IndexNotReadyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32167,28 +37288,28 @@ func (s IndexNotReadyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IndexNotReadyException) OrigErr() error { +func (s *IndexNotReadyException) OrigErr() error { return nil } -func (s IndexNotReadyException) Error() string { +func (s *IndexNotReadyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IndexNotReadyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IndexNotReadyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IndexNotReadyException) RequestID() string { - return s.respMetadata.RequestID +func (s *IndexNotReadyException) RequestID() string { + return s.RespMetadata.RequestID } // An unexpected error has occurred. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32206,17 +37327,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32224,28 +37345,28 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // An unexpected error has occurred. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32263,17 +37384,17 @@ func (s InternalFailureException) GoString() string { func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32281,28 +37402,28 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } // The aggregation is invalid. type InvalidAggregationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -32319,17 +37440,17 @@ func (s InvalidAggregationException) GoString() string { func newErrorInvalidAggregationException(v protocol.ResponseMetadata) error { return &InvalidAggregationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAggregationException) Code() string { +func (s *InvalidAggregationException) Code() string { return "InvalidAggregationException" } // Message returns the exception's message. -func (s InvalidAggregationException) Message() string { +func (s *InvalidAggregationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32337,28 +37458,28 @@ func (s InvalidAggregationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAggregationException) OrigErr() error { +func (s *InvalidAggregationException) OrigErr() error { return nil } -func (s InvalidAggregationException) Error() string { +func (s *InvalidAggregationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAggregationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAggregationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAggregationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAggregationException) RequestID() string { + return s.RespMetadata.RequestID } // The query is invalid. type InvalidQueryException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32376,17 +37497,17 @@ func (s InvalidQueryException) GoString() string { func newErrorInvalidQueryException(v protocol.ResponseMetadata) error { return &InvalidQueryException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidQueryException) Code() string { +func (s *InvalidQueryException) Code() string { return "InvalidQueryException" } // Message returns the exception's message. -func (s InvalidQueryException) Message() string { +func (s *InvalidQueryException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32394,28 +37515,28 @@ func (s InvalidQueryException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidQueryException) OrigErr() error { +func (s *InvalidQueryException) OrigErr() error { return nil } -func (s InvalidQueryException) Error() string { +func (s *InvalidQueryException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidQueryException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidQueryException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidQueryException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidQueryException) RequestID() string { + return s.RespMetadata.RequestID } // The request is not valid. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32433,17 +37554,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32451,28 +37572,28 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The response is invalid. type InvalidResponseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32490,17 +37611,17 @@ func (s InvalidResponseException) GoString() string { func newErrorInvalidResponseException(v protocol.ResponseMetadata) error { return &InvalidResponseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResponseException) Code() string { +func (s *InvalidResponseException) Code() string { return "InvalidResponseException" } // Message returns the exception's message. -func (s InvalidResponseException) Message() string { +func (s *InvalidResponseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32508,30 +37629,30 @@ func (s InvalidResponseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResponseException) OrigErr() error { +func (s *InvalidResponseException) OrigErr() error { return nil } -func (s InvalidResponseException) Error() string { +func (s *InvalidResponseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResponseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResponseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResponseException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResponseException) RequestID() string { + return s.RespMetadata.RequestID } // An attempt was made to change to an invalid state, for example by deleting // a job or a job execution which is "IN_PROGRESS" without setting the force // parameter. type InvalidStateTransitionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -32549,17 +37670,17 @@ func (s InvalidStateTransitionException) GoString() string { func newErrorInvalidStateTransitionException(v protocol.ResponseMetadata) error { return &InvalidStateTransitionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateTransitionException) Code() string { +func (s *InvalidStateTransitionException) Code() string { return "InvalidStateTransitionException" } // Message returns the exception's message. -func (s InvalidStateTransitionException) Message() string { +func (s *InvalidStateTransitionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -32567,22 +37688,22 @@ func (s InvalidStateTransitionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateTransitionException) OrigErr() error { +func (s *InvalidStateTransitionException) OrigErr() error { return nil } -func (s InvalidStateTransitionException) Error() string { +func (s *InvalidStateTransitionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateTransitionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateTransitionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateTransitionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateTransitionException) RequestID() string { + return s.RespMetadata.RequestID } // Sends message data to an AWS IoT Analytics channel. @@ -33600,8 +38721,8 @@ func (s *LambdaAction) SetFunctionArn(v string) *LambdaAction { // A limit has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -33619,17 +38740,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -33637,22 +38758,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListActiveViolationsInput struct { @@ -33770,7 +38891,9 @@ type ListAttachedPoliciesInput struct { // When true, recursively list attached policies. Recursive *bool `location:"querystring" locationName:"recursive" type:"boolean"` - // The group or principal for which the policies will be listed. + // The group or principal for which the policies will be listed. Valid principals + // are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId), thingGroupArn + // (arn:aws:iot:region:accountId:thinggroup/groupName) and CognitoId (region:id). // // Target is a required field Target *string `location:"uri" locationName:"target" type:"string" required:"true"` @@ -33872,6 +38995,11 @@ type ListAuditFindingsInput struct { // specify either the startTime and endTime or the taskId, but not both. EndTime *time.Time `locationName:"endTime" type:"timestamp"` + // Boolean flag indicating whether only the suppressed findings or the unsuppressed + // findings should be listed. If this parameter isn't provided, the response + // will list both suppressed and unsuppressed findings. + ListSuppressedFindings *bool `locationName:"listSuppressedFindings" type:"boolean"` + // The maximum number of results to return at one time. The default is 25. MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` @@ -33933,6 +39061,12 @@ func (s *ListAuditFindingsInput) SetEndTime(v time.Time) *ListAuditFindingsInput return s } +// SetListSuppressedFindings sets the ListSuppressedFindings field's value. +func (s *ListAuditFindingsInput) SetListSuppressedFindings(v bool) *ListAuditFindingsInput { + s.ListSuppressedFindings = &v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListAuditFindingsInput) SetMaxResults(v int64) *ListAuditFindingsInput { s.MaxResults = &v @@ -34261,6 +39395,119 @@ func (s *ListAuditMitigationActionsTasksOutput) SetTasks(v []*AuditMitigationAct return s } +type ListAuditSuppressionsInput struct { + _ struct{} `type:"structure"` + + // Determines whether suppressions are listed in ascending order by expiration + // date or not. If parameter isn't provided, ascendingOrder=true. + AscendingOrder *bool `locationName:"ascendingOrder" type:"boolean"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + CheckName *string `locationName:"checkName" type:"string"` + + // The maximum number of results to return at one time. The default is 25. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information that identifies the noncompliant resource. + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure"` +} + +// String returns the string representation +func (s ListAuditSuppressionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAuditSuppressionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAuditSuppressionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAuditSuppressionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResourceIdentifier != nil { + if err := s.ResourceIdentifier.Validate(); err != nil { + invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAscendingOrder sets the AscendingOrder field's value. +func (s *ListAuditSuppressionsInput) SetAscendingOrder(v bool) *ListAuditSuppressionsInput { + s.AscendingOrder = &v + return s +} + +// SetCheckName sets the CheckName field's value. +func (s *ListAuditSuppressionsInput) SetCheckName(v string) *ListAuditSuppressionsInput { + s.CheckName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAuditSuppressionsInput) SetMaxResults(v int64) *ListAuditSuppressionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAuditSuppressionsInput) SetNextToken(v string) *ListAuditSuppressionsInput { + s.NextToken = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *ListAuditSuppressionsInput) SetResourceIdentifier(v *ResourceIdentifier) *ListAuditSuppressionsInput { + s.ResourceIdentifier = v + return s +} + +type ListAuditSuppressionsOutput struct { + _ struct{} `type:"structure"` + + // A token that can be used to retrieve the next set of results, or null if + // there are no additional results. + NextToken *string `locationName:"nextToken" type:"string"` + + // List of audit suppressions. + Suppressions []*AuditSuppression `locationName:"suppressions" type:"list"` +} + +// String returns the string representation +func (s ListAuditSuppressionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAuditSuppressionsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAuditSuppressionsOutput) SetNextToken(v string) *ListAuditSuppressionsOutput { + s.NextToken = &v + return s +} + +// SetSuppressions sets the Suppressions field's value. +func (s *ListAuditSuppressionsOutput) SetSuppressions(v []*AuditSuppression) *ListAuditSuppressionsOutput { + s.Suppressions = v + return s +} + type ListAuditTasksInput struct { _ struct{} `type:"structure"` @@ -34276,7 +39523,7 @@ type ListAuditTasksInput struct { NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` // The beginning of the time period. Audit information is retained for a limited - // time (180 days). Requesting a start time prior to what is retained results + // time (90 days). Requesting a start time prior to what is retained results // in an "InvalidRequestException". // // StartTime is a required field @@ -34860,6 +40107,85 @@ func (s *ListCertificatesOutput) SetNextMarker(v string) *ListCertificatesOutput return s } +type ListDimensionsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to retrieve at one time. + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next set of results. + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDimensionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDimensionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListDimensionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDimensionsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListDimensionsInput) SetMaxResults(v int64) *ListDimensionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDimensionsInput) SetNextToken(v string) *ListDimensionsInput { + s.NextToken = &v + return s +} + +type ListDimensionsOutput struct { + _ struct{} `type:"structure"` + + // A list of the names of the defined dimensions. Use DescribeDimension to get + // details for a dimension. + DimensionNames []*string `locationName:"dimensionNames" type:"list"` + + // A token that can be used to retrieve the next set of results, or null if + // there are no additional results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListDimensionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDimensionsOutput) GoString() string { + return s.String() +} + +// SetDimensionNames sets the DimensionNames field's value. +func (s *ListDimensionsOutput) SetDimensionNames(v []*string) *ListDimensionsOutput { + s.DimensionNames = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDimensionsOutput) SetNextToken(v string) *ListDimensionsOutput { + s.NextToken = &v + return s +} + type ListDomainConfigurationsInput struct { _ struct{} `type:"structure"` @@ -35900,7 +41226,9 @@ type ListPrincipalPoliciesInput struct { // The result page size. PageSize *int64 `location:"querystring" locationName:"pageSize" min:"1" type:"integer"` - // The principal. + // The principal. Valid principals are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId), + // thingGroupArn (arn:aws:iot:region:accountId:thinggroup/groupName) and CognitoId + // (region:id). // // Principal is a required field Principal *string `location:"header" locationName:"x-amzn-iot-principal" type:"string" required:"true"` @@ -36524,6 +41852,9 @@ func (s *ListSecurityProfilesForTargetOutput) SetSecurityProfileTargetMappings(v type ListSecurityProfilesInput struct { _ struct{} `type:"structure"` + // A filter to limit results to the security profiles that use the defined dimension. + DimensionName *string `location:"querystring" locationName:"dimensionName" min:"1" type:"string"` + // The maximum number of results to return at one time. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` @@ -36544,6 +41875,9 @@ func (s ListSecurityProfilesInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListSecurityProfilesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListSecurityProfilesInput"} + if s.DimensionName != nil && len(*s.DimensionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DimensionName", 1)) + } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -36554,6 +41888,12 @@ func (s *ListSecurityProfilesInput) Validate() error { return nil } +// SetDimensionName sets the DimensionName field's value. +func (s *ListSecurityProfilesInput) SetDimensionName(v string) *ListSecurityProfilesInput { + s.DimensionName = &v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListSecurityProfilesInput) SetMaxResults(v int64) *ListSecurityProfilesInput { s.MaxResults = &v @@ -37130,8 +42470,8 @@ func (s *ListThingGroupsInput) SetRecursive(v bool) *ListThingGroupsInput { type ListThingGroupsOutput struct { _ struct{} `type:"structure"` - // The token used to get the next set of results, or null if there are no additional - // results. + // The token used to get the next set of results. Will not be returned if operation + // has returned all results. NextToken *string `locationName:"nextToken" type:"string"` // The thing groups. @@ -37493,8 +42833,8 @@ func (s *ListThingTypesInput) SetThingTypeName(v string) *ListThingTypesInput { type ListThingTypesOutput struct { _ struct{} `type:"structure"` - // The token for the next set of results, or null if there are no additional - // results. + // The token for the next set of results. Will not be returned if operation + // has returned all results. NextToken *string `locationName:"nextToken" type:"string"` // The thing types. @@ -37588,8 +42928,8 @@ func (s *ListThingsInBillingGroupInput) SetNextToken(v string) *ListThingsInBill type ListThingsInBillingGroupOutput struct { _ struct{} `type:"structure"` - // The token used to get the next set of results, or null if there are no additional - // results. + // The token used to get the next set of results. Will not be returned if operation + // has returned all results. NextToken *string `locationName:"nextToken" type:"string"` // A list of things in the billing group. @@ -37802,8 +43142,8 @@ func (s *ListThingsInput) SetThingTypeName(v string) *ListThingsInput { type ListThingsOutput struct { _ struct{} `type:"structure"` - // The token used to get the next set of results, or null if there are no additional - // results. + // The token used to get the next set of results. Will not be returned if operation + // has returned all results. NextToken *string `locationName:"nextToken" type:"string"` // The things. @@ -38358,8 +43698,8 @@ func (s *LoggingOptionsPayload) SetRoleArn(v string) *LoggingOptionsPayload { // The policy documentation is not valid. type MalformedPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -38377,17 +43717,17 @@ func (s MalformedPolicyException) GoString() string { func newErrorMalformedPolicyException(v protocol.ResponseMetadata) error { return &MalformedPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedPolicyException) Code() string { +func (s *MalformedPolicyException) Code() string { return "MalformedPolicyException" } // Message returns the exception's message. -func (s MalformedPolicyException) Message() string { +func (s *MalformedPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -38395,22 +43735,131 @@ func (s MalformedPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedPolicyException) OrigErr() error { +func (s *MalformedPolicyException) OrigErr() error { return nil } -func (s MalformedPolicyException) Error() string { +func (s *MalformedPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedPolicyException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The dimension of a metric. +type MetricDimension struct { + _ struct{} `type:"structure"` + + // A unique identifier for the dimension. + // + // DimensionName is a required field + DimensionName *string `locationName:"dimensionName" min:"1" type:"string" required:"true"` + + // Defines how the dimensionValues of a dimension are interpreted. For example, + // for dimension type TOPIC_FILTER, the IN operator, a message will be counted + // only if its topic matches one of the topic filters. With NOT_IN operator, + // a message will be counted only if it doesn't match any of the topic filters. + // The operator is optional: if it's not provided (is null), it will be interpreted + // as IN. + Operator *string `locationName:"operator" type:"string" enum:"DimensionValueOperator"` +} + +// String returns the string representation +func (s MetricDimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricDimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricDimension"} + if s.DimensionName == nil { + invalidParams.Add(request.NewErrParamRequired("DimensionName")) + } + if s.DimensionName != nil && len(*s.DimensionName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DimensionName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensionName sets the DimensionName field's value. +func (s *MetricDimension) SetDimensionName(v string) *MetricDimension { + s.DimensionName = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *MetricDimension) SetOperator(v string) *MetricDimension { + s.Operator = &v + return s +} + +// The metric you want to retain. Dimensions are optional. +type MetricToRetain struct { + _ struct{} `type:"structure"` + + // What is measured by the behavior. + // + // Metric is a required field + Metric *string `locationName:"metric" type:"string" required:"true"` + + // The dimension of a metric. + MetricDimension *MetricDimension `locationName:"metricDimension" type:"structure"` +} + +// String returns the string representation +func (s MetricToRetain) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricToRetain) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricToRetain) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricToRetain"} + if s.Metric == nil { + invalidParams.Add(request.NewErrParamRequired("Metric")) + } + if s.MetricDimension != nil { + if err := s.MetricDimension.Validate(); err != nil { + invalidParams.AddNested("MetricDimension", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetric sets the Metric field's value. +func (s *MetricToRetain) SetMetric(v string) *MetricToRetain { + s.Metric = &v + return s +} + +// SetMetricDimension sets the MetricDimension field's value. +func (s *MetricToRetain) SetMetricDimension(v *MetricDimension) *MetricToRetain { + s.MetricDimension = v + return s } // The value to be compared with the metric. @@ -38778,8 +44227,8 @@ func (s *NonCompliantResource) SetResourceType(v string) *NonCompliantResource { // The resource is not configured. type NotConfiguredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -38797,17 +44246,17 @@ func (s NotConfiguredException) GoString() string { func newErrorNotConfiguredException(v protocol.ResponseMetadata) error { return &NotConfiguredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotConfiguredException) Code() string { +func (s *NotConfiguredException) Code() string { return "NotConfiguredException" } // Message returns the exception's message. -func (s NotConfiguredException) Message() string { +func (s *NotConfiguredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -38815,22 +44264,22 @@ func (s NotConfiguredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotConfiguredException) OrigErr() error { +func (s *NotConfiguredException) OrigErr() error { return nil } -func (s NotConfiguredException) Error() string { +func (s *NotConfiguredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotConfiguredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotConfiguredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotConfiguredException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotConfiguredException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a file to be associated with an OTA update. @@ -39398,6 +44847,61 @@ func (s *PresignedUrlConfig) SetRoleArn(v string) *PresignedUrlConfig { return s } +// Structure that contains payloadVersion and targetArn. +type ProvisioningHook struct { + _ struct{} `type:"structure"` + + // The payload that was sent to the target function. + // + // Note: Only Lambda functions are currently supported. + PayloadVersion *string `locationName:"payloadVersion" min:"10" type:"string"` + + // The ARN of the target function. + // + // Note: Only Lambda functions are currently supported. + // + // TargetArn is a required field + TargetArn *string `locationName:"targetArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s ProvisioningHook) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisioningHook) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisioningHook) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisioningHook"} + if s.PayloadVersion != nil && len(*s.PayloadVersion) < 10 { + invalidParams.Add(request.NewErrParamMinLen("PayloadVersion", 10)) + } + if s.TargetArn == nil { + invalidParams.Add(request.NewErrParamRequired("TargetArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayloadVersion sets the PayloadVersion field's value. +func (s *ProvisioningHook) SetPayloadVersion(v string) *ProvisioningHook { + s.PayloadVersion = &v + return s +} + +// SetTargetArn sets the TargetArn field's value. +func (s *ProvisioningHook) SetTargetArn(v string) *ProvisioningHook { + s.TargetArn = &v + return s +} + // A summary of information about a fleet provisioning template. type ProvisioningTemplateSummary struct { _ struct{} `type:"structure"` @@ -39759,6 +45263,15 @@ type RegisterCACertificateInput struct { // A boolean value that specifies if the CA certificate is set to active. SetAsActive *bool `location:"querystring" locationName:"setAsActive" type:"boolean"` + // Metadata which can be used to manage the CA certificate. + // + // For URI Request parameters use format: ...key1=value1&key2=value2... + // + // For the CLI command-line parameter use format: &&tags "key1=value1&key2=value2..." + // + // For the cli-input-json file use format: "tags": "key1=value1&key2=value2..." + Tags []*Tag `locationName:"tags" type:"list"` + // The private key verification certificate. // // VerificationCertificate is a required field @@ -39795,6 +45308,16 @@ func (s *RegisterCACertificateInput) Validate() error { invalidParams.AddNested("RegistrationConfig", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -39826,6 +45349,12 @@ func (s *RegisterCACertificateInput) SetSetAsActive(v bool) *RegisterCACertifica return s } +// SetTags sets the Tags field's value. +func (s *RegisterCACertificateInput) SetTags(v []*Tag) *RegisterCACertificateInput { + s.Tags = v + return s +} + // SetVerificationCertificate sets the VerificationCertificate field's value. func (s *RegisterCACertificateInput) SetVerificationCertificate(v string) *RegisterCACertificateInput { s.VerificationCertificate = &v @@ -39972,10 +45501,93 @@ func (s *RegisterCertificateOutput) SetCertificateId(v string) *RegisterCertific return s } +type RegisterCertificateWithoutCAInput struct { + _ struct{} `type:"structure"` + + // The certificate data, in PEM format. + // + // CertificatePem is a required field + CertificatePem *string `locationName:"certificatePem" min:"1" type:"string" required:"true"` + + // The status of the register certificate request. + Status *string `locationName:"status" type:"string" enum:"CertificateStatus"` +} + +// String returns the string representation +func (s RegisterCertificateWithoutCAInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCertificateWithoutCAInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterCertificateWithoutCAInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterCertificateWithoutCAInput"} + if s.CertificatePem == nil { + invalidParams.Add(request.NewErrParamRequired("CertificatePem")) + } + if s.CertificatePem != nil && len(*s.CertificatePem) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificatePem", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificatePem sets the CertificatePem field's value. +func (s *RegisterCertificateWithoutCAInput) SetCertificatePem(v string) *RegisterCertificateWithoutCAInput { + s.CertificatePem = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *RegisterCertificateWithoutCAInput) SetStatus(v string) *RegisterCertificateWithoutCAInput { + s.Status = &v + return s +} + +type RegisterCertificateWithoutCAOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the registered certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // The ID of the registered certificate. (The last part of the certificate ARN + // contains the certificate ID. + CertificateId *string `locationName:"certificateId" min:"64" type:"string"` +} + +// String returns the string representation +func (s RegisterCertificateWithoutCAOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterCertificateWithoutCAOutput) GoString() string { + return s.String() +} + +// SetCertificateArn sets the CertificateArn field's value. +func (s *RegisterCertificateWithoutCAOutput) SetCertificateArn(v string) *RegisterCertificateWithoutCAOutput { + s.CertificateArn = &v + return s +} + +// SetCertificateId sets the CertificateId field's value. +func (s *RegisterCertificateWithoutCAOutput) SetCertificateId(v string) *RegisterCertificateWithoutCAOutput { + s.CertificateId = &v + return s +} + type RegisterThingInput struct { _ struct{} `type:"structure"` - // The parameters for provisioning a thing. See Programmatic Provisioning (https://docs.aws.amazon.com/iot/latest/developerguide/programmatic-provisioning.html) + // The parameters for provisioning a thing. See Provisioning Templates (https://docs.aws.amazon.com/iot/latest/developerguide/provision-template.html) // for more information. Parameters map[string]*string `locationName:"parameters" type:"map"` @@ -40025,7 +45637,7 @@ func (s *RegisterThingInput) SetTemplateBody(v string) *RegisterThingInput { type RegisterThingOutput struct { _ struct{} `type:"structure"` - // . + // The certificate data, in PEM format. CertificatePem *string `locationName:"certificatePem" min:"1" type:"string"` // ARNs for the generated resources. @@ -40056,8 +45668,8 @@ func (s *RegisterThingOutput) SetResourceArns(v map[string]*string) *RegisterThi // The registration code is invalid. type RegistrationCodeValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Additional information about the exception. Message_ *string `locationName:"message" type:"string"` @@ -40075,17 +45687,17 @@ func (s RegistrationCodeValidationException) GoString() string { func newErrorRegistrationCodeValidationException(v protocol.ResponseMetadata) error { return &RegistrationCodeValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RegistrationCodeValidationException) Code() string { +func (s *RegistrationCodeValidationException) Code() string { return "RegistrationCodeValidationException" } // Message returns the exception's message. -func (s RegistrationCodeValidationException) Message() string { +func (s *RegistrationCodeValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -40093,22 +45705,22 @@ func (s RegistrationCodeValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RegistrationCodeValidationException) OrigErr() error { +func (s *RegistrationCodeValidationException) OrigErr() error { return nil } -func (s RegistrationCodeValidationException) Error() string { +func (s *RegistrationCodeValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RegistrationCodeValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RegistrationCodeValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RegistrationCodeValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *RegistrationCodeValidationException) RequestID() string { + return s.RespMetadata.RequestID } // The registration configuration. @@ -40605,8 +46217,8 @@ func (s *RepublishAction) SetTopic(v string) *RepublishAction { // The resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -40630,17 +46242,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -40648,22 +46260,22 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Information that identifies the noncompliant resource. @@ -40785,8 +46397,8 @@ func (s *ResourceIdentifier) SetRoleAliasArn(v string) *ResourceIdentifier { // The specified resource does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -40804,17 +46416,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -40822,28 +46434,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The resource registration failed. type ResourceRegistrationFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -40861,17 +46473,17 @@ func (s ResourceRegistrationFailureException) GoString() string { func newErrorResourceRegistrationFailureException(v protocol.ResponseMetadata) error { return &ResourceRegistrationFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceRegistrationFailureException) Code() string { +func (s *ResourceRegistrationFailureException) Code() string { return "ResourceRegistrationFailureException" } // Message returns the exception's message. -func (s ResourceRegistrationFailureException) Message() string { +func (s *ResourceRegistrationFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -40879,22 +46491,22 @@ func (s ResourceRegistrationFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceRegistrationFailureException) OrigErr() error { +func (s *ResourceRegistrationFailureException) OrigErr() error { return nil } -func (s ResourceRegistrationFailureException) Error() string { +func (s *ResourceRegistrationFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceRegistrationFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceRegistrationFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceRegistrationFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceRegistrationFailureException) RequestID() string { + return s.RespMetadata.RequestID } // Role alias description. @@ -41544,8 +47156,8 @@ func (s *ServerCertificateSummary) SetServerCertificateStatusDetail(v string) *S // The service is temporarily unavailable. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -41563,17 +47175,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -41581,22 +47193,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type SetDefaultAuthorizerInput struct { @@ -42107,8 +47719,8 @@ func (s *SnsAction) SetTargetArn(v string) *SnsAction { // The Rule-SQL expression can't be parsed correctly. type SqlParseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -42126,17 +47738,17 @@ func (s SqlParseException) GoString() string { func newErrorSqlParseException(v protocol.ResponseMetadata) error { return &SqlParseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SqlParseException) Code() string { +func (s *SqlParseException) Code() string { return "SqlParseException" } // Message returns the exception's message. -func (s SqlParseException) Message() string { +func (s *SqlParseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42144,22 +47756,22 @@ func (s SqlParseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SqlParseException) OrigErr() error { +func (s *SqlParseException) OrigErr() error { return nil } -func (s SqlParseException) Error() string { +func (s *SqlParseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SqlParseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SqlParseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SqlParseException) RequestID() string { - return s.respMetadata.RequestID +func (s *SqlParseException) RequestID() string { + return s.RespMetadata.RequestID } // Describes an action to publish data to an Amazon SQS queue. @@ -43053,10 +48665,12 @@ type Tag struct { _ struct{} `type:"structure"` // The tag's key. - Key *string `type:"string"` + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` // The tag's value. - Value *string `type:"string"` + Value *string `min:"1" type:"string"` } // String returns the string representation @@ -43069,6 +48683,25 @@ func (s Tag) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetKey sets the Key field's value. func (s *Tag) SetKey(v string) *Tag { s.Key = &v @@ -43114,6 +48747,16 @@ func (s *TagResourceInput) Validate() error { if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -43150,8 +48793,8 @@ func (s TagResourceOutput) GoString() string { // This exception occurs if you attempt to start a task with the same task-id // as an existing task but with a different clientRequestToken. type TaskAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -43168,17 +48811,17 @@ func (s TaskAlreadyExistsException) GoString() string { func newErrorTaskAlreadyExistsException(v protocol.ResponseMetadata) error { return &TaskAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TaskAlreadyExistsException) Code() string { +func (s *TaskAlreadyExistsException) Code() string { return "TaskAlreadyExistsException" } // Message returns the exception's message. -func (s TaskAlreadyExistsException) Message() string { +func (s *TaskAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -43186,22 +48829,22 @@ func (s TaskAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TaskAlreadyExistsException) OrigErr() error { +func (s *TaskAlreadyExistsException) OrigErr() error { return nil } -func (s TaskAlreadyExistsException) Error() string { +func (s *TaskAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TaskAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TaskAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TaskAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TaskAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Statistics for the checks performed during the audit. @@ -43369,7 +49012,9 @@ type TestAuthorizationInput struct { // as if they are not attached to the principal being authorized. PolicyNamesToSkip []*string `locationName:"policyNamesToSkip" type:"list"` - // The principal. + // The principal. Valid principals are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId), + // thingGroupArn (arn:aws:iot:region:accountId:thinggroup/groupName) and CognitoId + // (region:id). Principal *string `locationName:"principal" type:"string"` } @@ -43392,6 +49037,16 @@ func (s *TestAuthorizationInput) Validate() error { if s.AuthInfos != nil && len(s.AuthInfos) < 1 { invalidParams.Add(request.NewErrParamMinLen("AuthInfos", 1)) } + if s.AuthInfos != nil { + for i, v := range s.AuthInfos { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AuthInfos", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -43479,7 +49134,7 @@ type TestInvokeAuthorizerInput struct { Token *string `locationName:"token" min:"1" type:"string"` // The signature made with the token and your custom authentication service's - // private key. + // private key. This value must be Base-64-encoded. TokenSignature *string `locationName:"tokenSignature" min:"1" type:"string"` } @@ -44210,8 +49865,8 @@ func (s *ThingTypeProperties) SetThingTypeDescription(v string) *ThingTypeProper // The rate exceeds the limit. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -44229,17 +49884,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -44247,22 +49902,22 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the amount of time each device has to finish its execution of the @@ -44297,6 +49952,242 @@ func (s *TimeoutConfig) SetInProgressTimeoutInMinutes(v int64) *TimeoutConfig { return s } +// The Timestream rule action writes attributes (measures) from an MQTT message +// into an Amazon Timestream table. For more information, see the Timestream +// (https://docs.aws.amazon.com/iot/latest/developerguide/timestream-rule-action.html) +// topic rule action documentation. +type TimestreamAction struct { + _ struct{} `type:"structure"` + + // The name of an Amazon Timestream database. + // + // DatabaseName is a required field + DatabaseName *string `locationName:"databaseName" type:"string" required:"true"` + + // Metadata attributes of the time series that are written in each measure record. + // + // Dimensions is a required field + Dimensions []*TimestreamDimension `locationName:"dimensions" min:"1" type:"list" required:"true"` + + // The ARN of the role that grants permission to write to the Amazon Timestream + // database table. + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The name of the database table into which to write the measure records. + // + // TableName is a required field + TableName *string `locationName:"tableName" type:"string" required:"true"` + + // Specifies an application-defined value to replace the default value assigned + // to the Timestream record's timestamp in the time column. + // + // You can use this property to specify the value and the precision of the Timestream + // record's timestamp. You can specify a value from the message payload or a + // value computed by a substitution template. + // + // If omitted, the topic rule action assigns the timestamp, in milliseconds, + // at the time it processed the rule. + Timestamp *TimestreamTimestamp `locationName:"timestamp" type:"structure"` +} + +// String returns the string representation +func (s TimestreamAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimestreamAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimestreamAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimestreamAction"} + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } + if s.Dimensions == nil { + invalidParams.Add(request.NewErrParamRequired("Dimensions")) + } + if s.Dimensions != nil && len(s.Dimensions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Dimensions", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.Dimensions != nil { + for i, v := range s.Dimensions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Timestamp != nil { + if err := s.Timestamp.Validate(); err != nil { + invalidParams.AddNested("Timestamp", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *TimestreamAction) SetDatabaseName(v string) *TimestreamAction { + s.DatabaseName = &v + return s +} + +// SetDimensions sets the Dimensions field's value. +func (s *TimestreamAction) SetDimensions(v []*TimestreamDimension) *TimestreamAction { + s.Dimensions = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *TimestreamAction) SetRoleArn(v string) *TimestreamAction { + s.RoleArn = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TimestreamAction) SetTableName(v string) *TimestreamAction { + s.TableName = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *TimestreamAction) SetTimestamp(v *TimestreamTimestamp) *TimestreamAction { + s.Timestamp = v + return s +} + +// Metadata attributes of the time series that are written in each measure record. +type TimestreamDimension struct { + _ struct{} `type:"structure"` + + // The metadata dimension name. This is the name of the column in the Amazon + // Timestream database table record. + // + // Dimensions cannot be named: measure_name, measure_value, or time. These names + // are reserved. Dimension names cannot start with ts_ or measure_value and + // they cannot contain the colon (:) character. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The value to write in this column of the database record. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimestreamDimension) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimestreamDimension) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimestreamDimension) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimestreamDimension"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *TimestreamDimension) SetName(v string) *TimestreamDimension { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *TimestreamDimension) SetValue(v string) *TimestreamDimension { + s.Value = &v + return s +} + +// Describes how to interpret an application-defined timestamp value from an +// MQTT message payload and the precision of that value. +type TimestreamTimestamp struct { + _ struct{} `type:"structure"` + + // The precision of the timestamp value that results from the expression described + // in value. + // + // Valid values: SECONDS | MILLISECONDS | MICROSECONDS | NANOSECONDS. The default + // is MILLISECONDS. + // + // Unit is a required field + Unit *string `locationName:"unit" type:"string" required:"true"` + + // An expression that returns a long epoch time value. + // + // Value is a required field + Value *string `locationName:"value" type:"string" required:"true"` +} + +// String returns the string representation +func (s TimestreamTimestamp) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimestreamTimestamp) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimestreamTimestamp) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimestreamTimestamp"} + if s.Unit == nil { + invalidParams.Add(request.NewErrParamRequired("Unit")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUnit sets the Unit field's value. +func (s *TimestreamTimestamp) SetUnit(v string) *TimestreamTimestamp { + s.Unit = &v + return s +} + +// SetValue sets the Value field's value. +func (s *TimestreamTimestamp) SetValue(v string) *TimestreamTimestamp { + s.Value = &v + return s +} + // Specifies the TLS context to use for the test authorizer request. type TlsContext struct { _ struct{} `type:"structure"` @@ -44696,7 +50587,7 @@ type TopicRulePayload struct { RuleDisabled *bool `locationName:"ruleDisabled" type:"boolean"` // The SQL statement used to query the topic. For more information, see AWS - // IoT SQL Reference (https://docs.aws.amazon.com/iot/latest/developerguide/iot-rules.html#aws-iot-sql-reference) + // IoT SQL Reference (https://docs.aws.amazon.com/iot/latest/developerguide/iot-sql-reference.html) // in the AWS IoT Developer Guide. // // Sql is a required field @@ -44783,8 +50674,8 @@ func (s *TopicRulePayload) SetSql(v string) *TopicRulePayload { // You can't revert the certificate transfer because the transfer is already // complete. type TransferAlreadyCompletedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -44802,17 +50693,17 @@ func (s TransferAlreadyCompletedException) GoString() string { func newErrorTransferAlreadyCompletedException(v protocol.ResponseMetadata) error { return &TransferAlreadyCompletedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TransferAlreadyCompletedException) Code() string { +func (s *TransferAlreadyCompletedException) Code() string { return "TransferAlreadyCompletedException" } // Message returns the exception's message. -func (s TransferAlreadyCompletedException) Message() string { +func (s *TransferAlreadyCompletedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -44820,22 +50711,22 @@ func (s TransferAlreadyCompletedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TransferAlreadyCompletedException) OrigErr() error { +func (s *TransferAlreadyCompletedException) OrigErr() error { return nil } -func (s TransferAlreadyCompletedException) Error() string { +func (s *TransferAlreadyCompletedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TransferAlreadyCompletedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TransferAlreadyCompletedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TransferAlreadyCompletedException) RequestID() string { - return s.respMetadata.RequestID +func (s *TransferAlreadyCompletedException) RequestID() string { + return s.RespMetadata.RequestID } // The input for the TransferCertificate operation. @@ -44934,8 +50825,8 @@ func (s *TransferCertificateOutput) SetTransferredCertificateArn(v string) *Tran // You can't transfer the certificate because authorization policies are still // attached. type TransferConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -44953,17 +50844,17 @@ func (s TransferConflictException) GoString() string { func newErrorTransferConflictException(v protocol.ResponseMetadata) error { return &TransferConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TransferConflictException) Code() string { +func (s *TransferConflictException) Code() string { return "TransferConflictException" } // Message returns the exception's message. -func (s TransferConflictException) Message() string { +func (s *TransferConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -44971,22 +50862,22 @@ func (s TransferConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TransferConflictException) OrigErr() error { +func (s *TransferConflictException) OrigErr() error { return nil } -func (s TransferConflictException) Error() string { +func (s *TransferConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TransferConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TransferConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TransferConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *TransferConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Data used to transfer a certificate to an AWS account. @@ -45051,8 +50942,8 @@ func (s *TransferData) SetTransferMessage(v string) *TransferData { // You are not authorized to perform this operation. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -45070,17 +50961,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -45088,22 +50979,22 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -45265,6 +51156,107 @@ func (s UpdateAccountAuditConfigurationOutput) GoString() string { return s.String() } +type UpdateAuditSuppressionInput struct { + _ struct{} `type:"structure"` + + // An audit check name. Checks must be enabled for your account. (Use DescribeAccountAuditConfiguration + // to see the list of all checks, including those that are enabled or use UpdateAccountAuditConfiguration + // to select which checks are enabled.) + // + // CheckName is a required field + CheckName *string `locationName:"checkName" type:"string" required:"true"` + + // The description of the audit suppression. + Description *string `locationName:"description" type:"string"` + + // The expiration date (epoch timestamp in seconds) that you want the suppression + // to adhere to. + ExpirationDate *time.Time `locationName:"expirationDate" type:"timestamp"` + + // Information that identifies the noncompliant resource. + // + // ResourceIdentifier is a required field + ResourceIdentifier *ResourceIdentifier `locationName:"resourceIdentifier" type:"structure" required:"true"` + + // Indicates whether a suppression should exist indefinitely or not. + SuppressIndefinitely *bool `locationName:"suppressIndefinitely" type:"boolean"` +} + +// String returns the string representation +func (s UpdateAuditSuppressionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAuditSuppressionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAuditSuppressionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAuditSuppressionInput"} + if s.CheckName == nil { + invalidParams.Add(request.NewErrParamRequired("CheckName")) + } + if s.ResourceIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceIdentifier")) + } + if s.ResourceIdentifier != nil { + if err := s.ResourceIdentifier.Validate(); err != nil { + invalidParams.AddNested("ResourceIdentifier", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCheckName sets the CheckName field's value. +func (s *UpdateAuditSuppressionInput) SetCheckName(v string) *UpdateAuditSuppressionInput { + s.CheckName = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateAuditSuppressionInput) SetDescription(v string) *UpdateAuditSuppressionInput { + s.Description = &v + return s +} + +// SetExpirationDate sets the ExpirationDate field's value. +func (s *UpdateAuditSuppressionInput) SetExpirationDate(v time.Time) *UpdateAuditSuppressionInput { + s.ExpirationDate = &v + return s +} + +// SetResourceIdentifier sets the ResourceIdentifier field's value. +func (s *UpdateAuditSuppressionInput) SetResourceIdentifier(v *ResourceIdentifier) *UpdateAuditSuppressionInput { + s.ResourceIdentifier = v + return s +} + +// SetSuppressIndefinitely sets the SuppressIndefinitely field's value. +func (s *UpdateAuditSuppressionInput) SetSuppressIndefinitely(v bool) *UpdateAuditSuppressionInput { + s.SuppressIndefinitely = &v + return s +} + +type UpdateAuditSuppressionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAuditSuppressionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAuditSuppressionOutput) GoString() string { + return s.String() +} + type UpdateAuthorizerInput struct { _ struct{} `type:"structure"` @@ -45728,6 +51720,138 @@ func (s *UpdateDeviceCertificateParams) SetAction(v string) *UpdateDeviceCertifi return s } +type UpdateDimensionInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the dimension. Choose something that describes the + // type and value to make it easy to remember what it does. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // Specifies the value or list of values for the dimension. For TOPIC_FILTER + // dimensions, this is a pattern used to match the MQTT topic (for example, + // "admin/#"). + // + // StringValues is a required field + StringValues []*string `locationName:"stringValues" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateDimensionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDimensionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDimensionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDimensionInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.StringValues == nil { + invalidParams.Add(request.NewErrParamRequired("StringValues")) + } + if s.StringValues != nil && len(s.StringValues) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StringValues", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *UpdateDimensionInput) SetName(v string) *UpdateDimensionInput { + s.Name = &v + return s +} + +// SetStringValues sets the StringValues field's value. +func (s *UpdateDimensionInput) SetStringValues(v []*string) *UpdateDimensionInput { + s.StringValues = v + return s +} + +type UpdateDimensionOutput struct { + _ struct{} `type:"structure"` + + // The ARN (Amazon resource name) of the created dimension. + Arn *string `locationName:"arn" type:"string"` + + // The date and time, in milliseconds since epoch, when the dimension was initially + // created. + CreationDate *time.Time `locationName:"creationDate" type:"timestamp"` + + // The date and time, in milliseconds since epoch, when the dimension was most + // recently updated. + LastModifiedDate *time.Time `locationName:"lastModifiedDate" type:"timestamp"` + + // A unique identifier for the dimension. + Name *string `locationName:"name" min:"1" type:"string"` + + // The value or list of values used to scope the dimension. For example, for + // topic filters, this is the pattern used to match the MQTT topic name. + StringValues []*string `locationName:"stringValues" min:"1" type:"list"` + + // The type of the dimension. + Type *string `locationName:"type" type:"string" enum:"DimensionType"` +} + +// String returns the string representation +func (s UpdateDimensionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDimensionOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UpdateDimensionOutput) SetArn(v string) *UpdateDimensionOutput { + s.Arn = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *UpdateDimensionOutput) SetCreationDate(v time.Time) *UpdateDimensionOutput { + s.CreationDate = &v + return s +} + +// SetLastModifiedDate sets the LastModifiedDate field's value. +func (s *UpdateDimensionOutput) SetLastModifiedDate(v time.Time) *UpdateDimensionOutput { + s.LastModifiedDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateDimensionOutput) SetName(v string) *UpdateDimensionOutput { + s.Name = &v + return s +} + +// SetStringValues sets the StringValues field's value. +func (s *UpdateDimensionOutput) SetStringValues(v []*string) *UpdateDimensionOutput { + s.StringValues = v + return s +} + +// SetType sets the Type field's value. +func (s *UpdateDimensionOutput) SetType(v string) *UpdateDimensionOutput { + s.Type = &v + return s +} + type UpdateDomainConfigurationInput struct { _ struct{} `type:"structure"` @@ -46292,10 +52416,16 @@ type UpdateProvisioningTemplateInput struct { // True to enable the fleet provisioning template, otherwise false. Enabled *bool `locationName:"enabled" type:"boolean"` + // Updates the pre-provisioning hook template. + PreProvisioningHook *ProvisioningHook `locationName:"preProvisioningHook" type:"structure"` + // The ARN of the role associated with the provisioning template. This IoT role // grants permission to provision a device. ProvisioningRoleArn *string `locationName:"provisioningRoleArn" min:"20" type:"string"` + // Removes pre-provisioning hook template. + RemovePreProvisioningHook *bool `locationName:"removePreProvisioningHook" type:"boolean"` + // The name of the fleet provisioning template. // // TemplateName is a required field @@ -46324,6 +52454,11 @@ func (s *UpdateProvisioningTemplateInput) Validate() error { if s.TemplateName != nil && len(*s.TemplateName) < 1 { invalidParams.Add(request.NewErrParamMinLen("TemplateName", 1)) } + if s.PreProvisioningHook != nil { + if err := s.PreProvisioningHook.Validate(); err != nil { + invalidParams.AddNested("PreProvisioningHook", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -46349,12 +52484,24 @@ func (s *UpdateProvisioningTemplateInput) SetEnabled(v bool) *UpdateProvisioning return s } +// SetPreProvisioningHook sets the PreProvisioningHook field's value. +func (s *UpdateProvisioningTemplateInput) SetPreProvisioningHook(v *ProvisioningHook) *UpdateProvisioningTemplateInput { + s.PreProvisioningHook = v + return s +} + // SetProvisioningRoleArn sets the ProvisioningRoleArn field's value. func (s *UpdateProvisioningTemplateInput) SetProvisioningRoleArn(v string) *UpdateProvisioningTemplateInput { s.ProvisioningRoleArn = &v return s } +// SetRemovePreProvisioningHook sets the RemovePreProvisioningHook field's value. +func (s *UpdateProvisioningTemplateInput) SetRemovePreProvisioningHook(v bool) *UpdateProvisioningTemplateInput { + s.RemovePreProvisioningHook = &v + return s +} + // SetTemplateName sets the TemplateName field's value. func (s *UpdateProvisioningTemplateInput) SetTemplateName(v string) *UpdateProvisioningTemplateInput { s.TemplateName = &v @@ -46585,10 +52732,19 @@ func (s *UpdateScheduledAuditOutput) SetScheduledAuditArn(v string) *UpdateSched type UpdateSecurityProfileInput struct { _ struct{} `type:"structure"` + // Please use UpdateSecurityProfileRequest$additionalMetricsToRetainV2 instead. + // + // A list of metrics whose data is retained (stored). By default, data is retained + // for any metric used in the profile's behaviors, but it is also retained for + // any metric specified here. + // + // Deprecated: Use additionalMetricsToRetainV2. + AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" deprecated:"true" type:"list"` + // A list of metrics whose data is retained (stored). By default, data is retained // for any metric used in the profile's behaviors, but it is also retained for // any metric specified here. - AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` + AdditionalMetricsToRetainV2 []*MetricToRetain `locationName:"additionalMetricsToRetainV2" type:"list"` // Where the alerts are sent. (Alerts are always sent to the console.) AlertTargets map[string]*AlertTarget `locationName:"alertTargets" type:"map"` @@ -46643,6 +52799,16 @@ func (s *UpdateSecurityProfileInput) Validate() error { if s.SecurityProfileName != nil && len(*s.SecurityProfileName) < 1 { invalidParams.Add(request.NewErrParamMinLen("SecurityProfileName", 1)) } + if s.AdditionalMetricsToRetainV2 != nil { + for i, v := range s.AdditionalMetricsToRetainV2 { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AdditionalMetricsToRetainV2", i), err.(request.ErrInvalidParams)) + } + } + } if s.AlertTargets != nil { for i, v := range s.AlertTargets { if v == nil { @@ -46676,6 +52842,12 @@ func (s *UpdateSecurityProfileInput) SetAdditionalMetricsToRetain(v []*string) * return s } +// SetAdditionalMetricsToRetainV2 sets the AdditionalMetricsToRetainV2 field's value. +func (s *UpdateSecurityProfileInput) SetAdditionalMetricsToRetainV2(v []*MetricToRetain) *UpdateSecurityProfileInput { + s.AdditionalMetricsToRetainV2 = v + return s +} + // SetAlertTargets sets the AlertTargets field's value. func (s *UpdateSecurityProfileInput) SetAlertTargets(v map[string]*AlertTarget) *UpdateSecurityProfileInput { s.AlertTargets = v @@ -46727,10 +52899,19 @@ func (s *UpdateSecurityProfileInput) SetSecurityProfileName(v string) *UpdateSec type UpdateSecurityProfileOutput struct { _ struct{} `type:"structure"` + // Please use UpdateSecurityProfileResponse$additionalMetricsToRetainV2 instead. + // // A list of metrics whose data is retained (stored). By default, data is retained // for any metric used in the security profile's behaviors, but it is also retained // for any metric specified here. - AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" type:"list"` + // + // Deprecated: Use additionalMetricsToRetainV2. + AdditionalMetricsToRetain []*string `locationName:"additionalMetricsToRetain" deprecated:"true" type:"list"` + + // A list of metrics whose data is retained (stored). By default, data is retained + // for any metric used in the profile's behaviors, but it is also retained for + // any metric specified here. + AdditionalMetricsToRetainV2 []*MetricToRetain `locationName:"additionalMetricsToRetainV2" type:"list"` // Where the alerts are sent. (Alerts are always sent to the console.) AlertTargets map[string]*AlertTarget `locationName:"alertTargets" type:"map"` @@ -46774,6 +52955,12 @@ func (s *UpdateSecurityProfileOutput) SetAdditionalMetricsToRetain(v []*string) return s } +// SetAdditionalMetricsToRetainV2 sets the AdditionalMetricsToRetainV2 field's value. +func (s *UpdateSecurityProfileOutput) SetAdditionalMetricsToRetainV2(v []*MetricToRetain) *UpdateSecurityProfileOutput { + s.AdditionalMetricsToRetainV2 = v + return s +} + // SetAlertTargets sets the AlertTargets field's value. func (s *UpdateSecurityProfileOutput) SetAlertTargets(v map[string]*AlertTarget) *UpdateSecurityProfileOutput { s.AlertTargets = v @@ -47147,6 +53334,9 @@ type UpdateThingInput struct { // The name of the thing to update. // + // You can't change a thing's name. To change a thing's name, you must create + // a new thing, give it the new name, and then delete the old thing. + // // ThingName is a required field ThingName *string `location:"uri" locationName:"thingName" min:"1" type:"string" required:"true"` @@ -47427,8 +53617,8 @@ func (s *ValidationError) SetErrorMessage(v string) *ValidationError { // An exception thrown when the version of an entity specified with the expectedVersion // parameter does not match the latest version in the system. type VersionConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -47446,17 +53636,17 @@ func (s VersionConflictException) GoString() string { func newErrorVersionConflictException(v protocol.ResponseMetadata) error { return &VersionConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s VersionConflictException) Code() string { +func (s *VersionConflictException) Code() string { return "VersionConflictException" } // Message returns the exception's message. -func (s VersionConflictException) Message() string { +func (s *VersionConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -47464,28 +53654,28 @@ func (s VersionConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s VersionConflictException) OrigErr() error { +func (s *VersionConflictException) OrigErr() error { return nil } -func (s VersionConflictException) Error() string { +func (s *VersionConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s VersionConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *VersionConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s VersionConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *VersionConflictException) RequestID() string { + return s.RespMetadata.RequestID } // The number of policy versions exceeds the limit. type VersionsLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -47503,17 +53693,17 @@ func (s VersionsLimitExceededException) GoString() string { func newErrorVersionsLimitExceededException(v protocol.ResponseMetadata) error { return &VersionsLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s VersionsLimitExceededException) Code() string { +func (s *VersionsLimitExceededException) Code() string { return "VersionsLimitExceededException" } // Message returns the exception's message. -func (s VersionsLimitExceededException) Message() string { +func (s *VersionsLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -47521,22 +53711,22 @@ func (s VersionsLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s VersionsLimitExceededException) OrigErr() error { +func (s *VersionsLimitExceededException) OrigErr() error { return nil } -func (s VersionsLimitExceededException) Error() string { +func (s *VersionsLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s VersionsLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *VersionsLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s VersionsLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *VersionsLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a Device Defender security profile behavior violation. @@ -47622,6 +53812,13 @@ const ( AbortActionCancel = "CANCEL" ) +// AbortAction_Values returns all elements of the AbortAction enum +func AbortAction_Values() []string { + return []string{ + AbortActionCancel, + } +} + const ( // ActionTypePublish is a ActionType enum value ActionTypePublish = "PUBLISH" @@ -47636,12 +53833,29 @@ const ( ActionTypeConnect = "CONNECT" ) +// ActionType_Values returns all elements of the ActionType enum +func ActionType_Values() []string { + return []string{ + ActionTypePublish, + ActionTypeSubscribe, + ActionTypeReceive, + ActionTypeConnect, + } +} + // The type of alert target: one of "SNS". const ( // AlertTargetTypeSns is a AlertTargetType enum value AlertTargetTypeSns = "SNS" ) +// AlertTargetType_Values returns all elements of the AlertTargetType enum +func AlertTargetType_Values() []string { + return []string{ + AlertTargetTypeSns, + } +} + const ( // AuditCheckRunStatusInProgress is a AuditCheckRunStatus enum value AuditCheckRunStatusInProgress = "IN_PROGRESS" @@ -47662,6 +53876,18 @@ const ( AuditCheckRunStatusFailed = "FAILED" ) +// AuditCheckRunStatus_Values returns all elements of the AuditCheckRunStatus enum +func AuditCheckRunStatus_Values() []string { + return []string{ + AuditCheckRunStatusInProgress, + AuditCheckRunStatusWaitingForDataCollection, + AuditCheckRunStatusCanceled, + AuditCheckRunStatusCompletedCompliant, + AuditCheckRunStatusCompletedNonCompliant, + AuditCheckRunStatusFailed, + } +} + const ( // AuditFindingSeverityCritical is a AuditFindingSeverity enum value AuditFindingSeverityCritical = "CRITICAL" @@ -47676,6 +53902,16 @@ const ( AuditFindingSeverityLow = "LOW" ) +// AuditFindingSeverity_Values returns all elements of the AuditFindingSeverity enum +func AuditFindingSeverity_Values() []string { + return []string{ + AuditFindingSeverityCritical, + AuditFindingSeverityHigh, + AuditFindingSeverityMedium, + AuditFindingSeverityLow, + } +} + const ( // AuditFrequencyDaily is a AuditFrequency enum value AuditFrequencyDaily = "DAILY" @@ -47690,6 +53926,16 @@ const ( AuditFrequencyMonthly = "MONTHLY" ) +// AuditFrequency_Values returns all elements of the AuditFrequency enum +func AuditFrequency_Values() []string { + return []string{ + AuditFrequencyDaily, + AuditFrequencyWeekly, + AuditFrequencyBiweekly, + AuditFrequencyMonthly, + } +} + const ( // AuditMitigationActionsExecutionStatusInProgress is a AuditMitigationActionsExecutionStatus enum value AuditMitigationActionsExecutionStatusInProgress = "IN_PROGRESS" @@ -47710,6 +53956,18 @@ const ( AuditMitigationActionsExecutionStatusPending = "PENDING" ) +// AuditMitigationActionsExecutionStatus_Values returns all elements of the AuditMitigationActionsExecutionStatus enum +func AuditMitigationActionsExecutionStatus_Values() []string { + return []string{ + AuditMitigationActionsExecutionStatusInProgress, + AuditMitigationActionsExecutionStatusCompleted, + AuditMitigationActionsExecutionStatusFailed, + AuditMitigationActionsExecutionStatusCanceled, + AuditMitigationActionsExecutionStatusSkipped, + AuditMitigationActionsExecutionStatusPending, + } +} + const ( // AuditMitigationActionsTaskStatusInProgress is a AuditMitigationActionsTaskStatus enum value AuditMitigationActionsTaskStatusInProgress = "IN_PROGRESS" @@ -47724,11 +53982,28 @@ const ( AuditMitigationActionsTaskStatusCanceled = "CANCELED" ) +// AuditMitigationActionsTaskStatus_Values returns all elements of the AuditMitigationActionsTaskStatus enum +func AuditMitigationActionsTaskStatus_Values() []string { + return []string{ + AuditMitigationActionsTaskStatusInProgress, + AuditMitigationActionsTaskStatusCompleted, + AuditMitigationActionsTaskStatusFailed, + AuditMitigationActionsTaskStatusCanceled, + } +} + const ( // AuditNotificationTypeSns is a AuditNotificationType enum value AuditNotificationTypeSns = "SNS" ) +// AuditNotificationType_Values returns all elements of the AuditNotificationType enum +func AuditNotificationType_Values() []string { + return []string{ + AuditNotificationTypeSns, + } +} + const ( // AuditTaskStatusInProgress is a AuditTaskStatus enum value AuditTaskStatusInProgress = "IN_PROGRESS" @@ -47743,6 +54018,16 @@ const ( AuditTaskStatusCanceled = "CANCELED" ) +// AuditTaskStatus_Values returns all elements of the AuditTaskStatus enum +func AuditTaskStatus_Values() []string { + return []string{ + AuditTaskStatusInProgress, + AuditTaskStatusCompleted, + AuditTaskStatusFailed, + AuditTaskStatusCanceled, + } +} + const ( // AuditTaskTypeOnDemandAuditTask is a AuditTaskType enum value AuditTaskTypeOnDemandAuditTask = "ON_DEMAND_AUDIT_TASK" @@ -47751,6 +54036,14 @@ const ( AuditTaskTypeScheduledAuditTask = "SCHEDULED_AUDIT_TASK" ) +// AuditTaskType_Values returns all elements of the AuditTaskType enum +func AuditTaskType_Values() []string { + return []string{ + AuditTaskTypeOnDemandAuditTask, + AuditTaskTypeScheduledAuditTask, + } +} + const ( // AuthDecisionAllowed is a AuthDecision enum value AuthDecisionAllowed = "ALLOWED" @@ -47762,6 +54055,15 @@ const ( AuthDecisionImplicitDeny = "IMPLICIT_DENY" ) +// AuthDecision_Values returns all elements of the AuthDecision enum +func AuthDecision_Values() []string { + return []string{ + AuthDecisionAllowed, + AuthDecisionExplicitDeny, + AuthDecisionImplicitDeny, + } +} + const ( // AuthorizerStatusActive is a AuthorizerStatus enum value AuthorizerStatusActive = "ACTIVE" @@ -47770,6 +54072,14 @@ const ( AuthorizerStatusInactive = "INACTIVE" ) +// AuthorizerStatus_Values returns all elements of the AuthorizerStatus enum +func AuthorizerStatus_Values() []string { + return []string{ + AuthorizerStatusActive, + AuthorizerStatusInactive, + } +} + const ( // AutoRegistrationStatusEnable is a AutoRegistrationStatus enum value AutoRegistrationStatusEnable = "ENABLE" @@ -47778,6 +54088,50 @@ const ( AutoRegistrationStatusDisable = "DISABLE" ) +// AutoRegistrationStatus_Values returns all elements of the AutoRegistrationStatus enum +func AutoRegistrationStatus_Values() []string { + return []string{ + AutoRegistrationStatusEnable, + AutoRegistrationStatusDisable, + } +} + +const ( + // AwsJobAbortCriteriaAbortActionCancel is a AwsJobAbortCriteriaAbortAction enum value + AwsJobAbortCriteriaAbortActionCancel = "CANCEL" +) + +// AwsJobAbortCriteriaAbortAction_Values returns all elements of the AwsJobAbortCriteriaAbortAction enum +func AwsJobAbortCriteriaAbortAction_Values() []string { + return []string{ + AwsJobAbortCriteriaAbortActionCancel, + } +} + +const ( + // AwsJobAbortCriteriaFailureTypeFailed is a AwsJobAbortCriteriaFailureType enum value + AwsJobAbortCriteriaFailureTypeFailed = "FAILED" + + // AwsJobAbortCriteriaFailureTypeRejected is a AwsJobAbortCriteriaFailureType enum value + AwsJobAbortCriteriaFailureTypeRejected = "REJECTED" + + // AwsJobAbortCriteriaFailureTypeTimedOut is a AwsJobAbortCriteriaFailureType enum value + AwsJobAbortCriteriaFailureTypeTimedOut = "TIMED_OUT" + + // AwsJobAbortCriteriaFailureTypeAll is a AwsJobAbortCriteriaFailureType enum value + AwsJobAbortCriteriaFailureTypeAll = "ALL" +) + +// AwsJobAbortCriteriaFailureType_Values returns all elements of the AwsJobAbortCriteriaFailureType enum +func AwsJobAbortCriteriaFailureType_Values() []string { + return []string{ + AwsJobAbortCriteriaFailureTypeFailed, + AwsJobAbortCriteriaFailureTypeRejected, + AwsJobAbortCriteriaFailureTypeTimedOut, + AwsJobAbortCriteriaFailureTypeAll, + } +} + const ( // CACertificateStatusActive is a CACertificateStatus enum value CACertificateStatusActive = "ACTIVE" @@ -47786,11 +54140,26 @@ const ( CACertificateStatusInactive = "INACTIVE" ) +// CACertificateStatus_Values returns all elements of the CACertificateStatus enum +func CACertificateStatus_Values() []string { + return []string{ + CACertificateStatusActive, + CACertificateStatusInactive, + } +} + const ( // CACertificateUpdateActionDeactivate is a CACertificateUpdateAction enum value CACertificateUpdateActionDeactivate = "DEACTIVATE" ) +// CACertificateUpdateAction_Values returns all elements of the CACertificateUpdateAction enum +func CACertificateUpdateAction_Values() []string { + return []string{ + CACertificateUpdateActionDeactivate, + } +} + const ( // CannedAccessControlListPrivate is a CannedAccessControlList enum value CannedAccessControlListPrivate = "private" @@ -47817,6 +54186,36 @@ const ( CannedAccessControlListLogDeliveryWrite = "log-delivery-write" ) +// CannedAccessControlList_Values returns all elements of the CannedAccessControlList enum +func CannedAccessControlList_Values() []string { + return []string{ + CannedAccessControlListPrivate, + CannedAccessControlListPublicRead, + CannedAccessControlListPublicReadWrite, + CannedAccessControlListAwsExecRead, + CannedAccessControlListAuthenticatedRead, + CannedAccessControlListBucketOwnerRead, + CannedAccessControlListBucketOwnerFullControl, + CannedAccessControlListLogDeliveryWrite, + } +} + +const ( + // CertificateModeDefault is a CertificateMode enum value + CertificateModeDefault = "DEFAULT" + + // CertificateModeSniOnly is a CertificateMode enum value + CertificateModeSniOnly = "SNI_ONLY" +) + +// CertificateMode_Values returns all elements of the CertificateMode enum +func CertificateMode_Values() []string { + return []string{ + CertificateModeDefault, + CertificateModeSniOnly, + } +} + const ( // CertificateStatusActive is a CertificateStatus enum value CertificateStatusActive = "ACTIVE" @@ -47837,6 +54236,18 @@ const ( CertificateStatusPendingActivation = "PENDING_ACTIVATION" ) +// CertificateStatus_Values returns all elements of the CertificateStatus enum +func CertificateStatus_Values() []string { + return []string{ + CertificateStatusActive, + CertificateStatusInactive, + CertificateStatusRevoked, + CertificateStatusPendingTransfer, + CertificateStatusRegisterInactive, + CertificateStatusPendingActivation, + } +} + const ( // ComparisonOperatorLessThan is a ComparisonOperator enum value ComparisonOperatorLessThan = "less-than" @@ -47863,6 +54274,20 @@ const ( ComparisonOperatorNotInPortSet = "not-in-port-set" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorLessThan, + ComparisonOperatorLessThanEquals, + ComparisonOperatorGreaterThan, + ComparisonOperatorGreaterThanEquals, + ComparisonOperatorInCidrSet, + ComparisonOperatorNotInCidrSet, + ComparisonOperatorInPortSet, + ComparisonOperatorNotInPortSet, + } +} + const ( // DayOfWeekSun is a DayOfWeek enum value DayOfWeekSun = "SUN" @@ -47886,11 +54311,59 @@ const ( DayOfWeekSat = "SAT" ) +// DayOfWeek_Values returns all elements of the DayOfWeek enum +func DayOfWeek_Values() []string { + return []string{ + DayOfWeekSun, + DayOfWeekMon, + DayOfWeekTue, + DayOfWeekWed, + DayOfWeekThu, + DayOfWeekFri, + DayOfWeekSat, + } +} + const ( // DeviceCertificateUpdateActionDeactivate is a DeviceCertificateUpdateAction enum value DeviceCertificateUpdateActionDeactivate = "DEACTIVATE" ) +// DeviceCertificateUpdateAction_Values returns all elements of the DeviceCertificateUpdateAction enum +func DeviceCertificateUpdateAction_Values() []string { + return []string{ + DeviceCertificateUpdateActionDeactivate, + } +} + +const ( + // DimensionTypeTopicFilter is a DimensionType enum value + DimensionTypeTopicFilter = "TOPIC_FILTER" +) + +// DimensionType_Values returns all elements of the DimensionType enum +func DimensionType_Values() []string { + return []string{ + DimensionTypeTopicFilter, + } +} + +const ( + // DimensionValueOperatorIn is a DimensionValueOperator enum value + DimensionValueOperatorIn = "IN" + + // DimensionValueOperatorNotIn is a DimensionValueOperator enum value + DimensionValueOperatorNotIn = "NOT_IN" +) + +// DimensionValueOperator_Values returns all elements of the DimensionValueOperator enum +func DimensionValueOperator_Values() []string { + return []string{ + DimensionValueOperatorIn, + DimensionValueOperatorNotIn, + } +} + const ( // DomainConfigurationStatusEnabled is a DomainConfigurationStatus enum value DomainConfigurationStatusEnabled = "ENABLED" @@ -47899,6 +54372,14 @@ const ( DomainConfigurationStatusDisabled = "DISABLED" ) +// DomainConfigurationStatus_Values returns all elements of the DomainConfigurationStatus enum +func DomainConfigurationStatus_Values() []string { + return []string{ + DomainConfigurationStatusEnabled, + DomainConfigurationStatusDisabled, + } +} + const ( // DomainTypeEndpoint is a DomainType enum value DomainTypeEndpoint = "ENDPOINT" @@ -47910,6 +54391,15 @@ const ( DomainTypeCustomerManaged = "CUSTOMER_MANAGED" ) +// DomainType_Values returns all elements of the DomainType enum +func DomainType_Values() []string { + return []string{ + DomainTypeEndpoint, + DomainTypeAwsManaged, + DomainTypeCustomerManaged, + } +} + const ( // DynamicGroupStatusActive is a DynamicGroupStatus enum value DynamicGroupStatusActive = "ACTIVE" @@ -47921,6 +54411,15 @@ const ( DynamicGroupStatusRebuilding = "REBUILDING" ) +// DynamicGroupStatus_Values returns all elements of the DynamicGroupStatus enum +func DynamicGroupStatus_Values() []string { + return []string{ + DynamicGroupStatusActive, + DynamicGroupStatusBuilding, + DynamicGroupStatusRebuilding, + } +} + const ( // DynamoKeyTypeString is a DynamoKeyType enum value DynamoKeyTypeString = "STRING" @@ -47929,6 +54428,14 @@ const ( DynamoKeyTypeNumber = "NUMBER" ) +// DynamoKeyType_Values returns all elements of the DynamoKeyType enum +func DynamoKeyType_Values() []string { + return []string{ + DynamoKeyTypeString, + DynamoKeyTypeNumber, + } +} + const ( // EventTypeThing is a EventType enum value EventTypeThing = "THING" @@ -47964,6 +54471,23 @@ const ( EventTypeCaCertificate = "CA_CERTIFICATE" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeThing, + EventTypeThingGroup, + EventTypeThingType, + EventTypeThingGroupMembership, + EventTypeThingGroupHierarchy, + EventTypeThingTypeAssociation, + EventTypeJob, + EventTypeJobExecution, + EventTypePolicy, + EventTypeCertificate, + EventTypeCaCertificate, + } +} + const ( // FieldTypeNumber is a FieldType enum value FieldTypeNumber = "Number" @@ -47975,6 +54499,15 @@ const ( FieldTypeBoolean = "Boolean" ) +// FieldType_Values returns all elements of the FieldType enum +func FieldType_Values() []string { + return []string{ + FieldTypeNumber, + FieldTypeString, + FieldTypeBoolean, + } +} + const ( // IndexStatusActive is a IndexStatus enum value IndexStatusActive = "ACTIVE" @@ -47986,6 +54519,15 @@ const ( IndexStatusRebuilding = "REBUILDING" ) +// IndexStatus_Values returns all elements of the IndexStatus enum +func IndexStatus_Values() []string { + return []string{ + IndexStatusActive, + IndexStatusBuilding, + IndexStatusRebuilding, + } +} + const ( // JobExecutionFailureTypeFailed is a JobExecutionFailureType enum value JobExecutionFailureTypeFailed = "FAILED" @@ -48000,6 +54542,16 @@ const ( JobExecutionFailureTypeAll = "ALL" ) +// JobExecutionFailureType_Values returns all elements of the JobExecutionFailureType enum +func JobExecutionFailureType_Values() []string { + return []string{ + JobExecutionFailureTypeFailed, + JobExecutionFailureTypeRejected, + JobExecutionFailureTypeTimedOut, + JobExecutionFailureTypeAll, + } +} + const ( // JobExecutionStatusQueued is a JobExecutionStatus enum value JobExecutionStatusQueued = "QUEUED" @@ -48026,6 +54578,20 @@ const ( JobExecutionStatusCanceled = "CANCELED" ) +// JobExecutionStatus_Values returns all elements of the JobExecutionStatus enum +func JobExecutionStatus_Values() []string { + return []string{ + JobExecutionStatusQueued, + JobExecutionStatusInProgress, + JobExecutionStatusSucceeded, + JobExecutionStatusFailed, + JobExecutionStatusTimedOut, + JobExecutionStatusRejected, + JobExecutionStatusRemoved, + JobExecutionStatusCanceled, + } +} + const ( // JobStatusInProgress is a JobStatus enum value JobStatusInProgress = "IN_PROGRESS" @@ -48040,6 +54606,16 @@ const ( JobStatusDeletionInProgress = "DELETION_IN_PROGRESS" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusInProgress, + JobStatusCanceled, + JobStatusCompleted, + JobStatusDeletionInProgress, + } +} + const ( // LogLevelDebug is a LogLevel enum value LogLevelDebug = "DEBUG" @@ -48057,6 +54633,17 @@ const ( LogLevelDisabled = "DISABLED" ) +// LogLevel_Values returns all elements of the LogLevel enum +func LogLevel_Values() []string { + return []string{ + LogLevelDebug, + LogLevelInfo, + LogLevelError, + LogLevelWarn, + LogLevelDisabled, + } +} + const ( // LogTargetTypeDefault is a LogTargetType enum value LogTargetTypeDefault = "DEFAULT" @@ -48065,6 +54652,14 @@ const ( LogTargetTypeThingGroup = "THING_GROUP" ) +// LogTargetType_Values returns all elements of the LogTargetType enum +func LogTargetType_Values() []string { + return []string{ + LogTargetTypeDefault, + LogTargetTypeThingGroup, + } +} + const ( // MessageFormatRaw is a MessageFormat enum value MessageFormatRaw = "RAW" @@ -48073,6 +54668,14 @@ const ( MessageFormatJson = "JSON" ) +// MessageFormat_Values returns all elements of the MessageFormat enum +func MessageFormat_Values() []string { + return []string{ + MessageFormatRaw, + MessageFormatJson, + } +} + const ( // MitigationActionTypeUpdateDeviceCertificate is a MitigationActionType enum value MitigationActionTypeUpdateDeviceCertificate = "UPDATE_DEVICE_CERTIFICATE" @@ -48093,6 +54696,18 @@ const ( MitigationActionTypePublishFindingToSns = "PUBLISH_FINDING_TO_SNS" ) +// MitigationActionType_Values returns all elements of the MitigationActionType enum +func MitigationActionType_Values() []string { + return []string{ + MitigationActionTypeUpdateDeviceCertificate, + MitigationActionTypeUpdateCaCertificate, + MitigationActionTypeAddThingsToThingGroup, + MitigationActionTypeReplaceDefaultPolicyVersion, + MitigationActionTypeEnableIotLogging, + MitigationActionTypePublishFindingToSns, + } +} + const ( // OTAUpdateStatusCreatePending is a OTAUpdateStatus enum value OTAUpdateStatusCreatePending = "CREATE_PENDING" @@ -48107,11 +54722,28 @@ const ( OTAUpdateStatusCreateFailed = "CREATE_FAILED" ) +// OTAUpdateStatus_Values returns all elements of the OTAUpdateStatus enum +func OTAUpdateStatus_Values() []string { + return []string{ + OTAUpdateStatusCreatePending, + OTAUpdateStatusCreateInProgress, + OTAUpdateStatusCreateComplete, + OTAUpdateStatusCreateFailed, + } +} + const ( // PolicyTemplateNameBlankPolicy is a PolicyTemplateName enum value PolicyTemplateNameBlankPolicy = "BLANK_POLICY" ) +// PolicyTemplateName_Values returns all elements of the PolicyTemplateName enum +func PolicyTemplateName_Values() []string { + return []string{ + PolicyTemplateNameBlankPolicy, + } +} + const ( // ProtocolMqtt is a Protocol enum value ProtocolMqtt = "MQTT" @@ -48120,6 +54752,14 @@ const ( ProtocolHttp = "HTTP" ) +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolMqtt, + ProtocolHttp, + } +} + const ( // ReportTypeErrors is a ReportType enum value ReportTypeErrors = "ERRORS" @@ -48128,6 +54768,14 @@ const ( ReportTypeResults = "RESULTS" ) +// ReportType_Values returns all elements of the ReportType enum +func ReportType_Values() []string { + return []string{ + ReportTypeErrors, + ReportTypeResults, + } +} + const ( // ResourceTypeDeviceCertificate is a ResourceType enum value ResourceTypeDeviceCertificate = "DEVICE_CERTIFICATE" @@ -48154,6 +54802,20 @@ const ( ResourceTypeIamRole = "IAM_ROLE" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeDeviceCertificate, + ResourceTypeCaCertificate, + ResourceTypeIotPolicy, + ResourceTypeCognitoIdentityPool, + ResourceTypeClientId, + ResourceTypeAccountSettings, + ResourceTypeRoleAlias, + ResourceTypeIamRole, + } +} + const ( // ServerCertificateStatusInvalid is a ServerCertificateStatus enum value ServerCertificateStatusInvalid = "INVALID" @@ -48162,6 +54824,14 @@ const ( ServerCertificateStatusValid = "VALID" ) +// ServerCertificateStatus_Values returns all elements of the ServerCertificateStatus enum +func ServerCertificateStatus_Values() []string { + return []string{ + ServerCertificateStatusInvalid, + ServerCertificateStatusValid, + } +} + const ( // ServiceTypeData is a ServiceType enum value ServiceTypeData = "DATA" @@ -48173,6 +54843,15 @@ const ( ServiceTypeJobs = "JOBS" ) +// ServiceType_Values returns all elements of the ServiceType enum +func ServiceType_Values() []string { + return []string{ + ServiceTypeData, + ServiceTypeCredentialProvider, + ServiceTypeJobs, + } +} + const ( // StatusInProgress is a Status enum value StatusInProgress = "InProgress" @@ -48190,6 +54869,17 @@ const ( StatusCancelling = "Cancelling" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusInProgress, + StatusCompleted, + StatusFailed, + StatusCancelled, + StatusCancelling, + } +} + const ( // TargetSelectionContinuous is a TargetSelection enum value TargetSelectionContinuous = "CONTINUOUS" @@ -48198,6 +54888,14 @@ const ( TargetSelectionSnapshot = "SNAPSHOT" ) +// TargetSelection_Values returns all elements of the TargetSelection enum +func TargetSelection_Values() []string { + return []string{ + TargetSelectionContinuous, + TargetSelectionSnapshot, + } +} + const ( // ThingConnectivityIndexingModeOff is a ThingConnectivityIndexingMode enum value ThingConnectivityIndexingModeOff = "OFF" @@ -48206,6 +54904,14 @@ const ( ThingConnectivityIndexingModeStatus = "STATUS" ) +// ThingConnectivityIndexingMode_Values returns all elements of the ThingConnectivityIndexingMode enum +func ThingConnectivityIndexingMode_Values() []string { + return []string{ + ThingConnectivityIndexingModeOff, + ThingConnectivityIndexingModeStatus, + } +} + const ( // ThingGroupIndexingModeOff is a ThingGroupIndexingMode enum value ThingGroupIndexingModeOff = "OFF" @@ -48214,6 +54920,14 @@ const ( ThingGroupIndexingModeOn = "ON" ) +// ThingGroupIndexingMode_Values returns all elements of the ThingGroupIndexingMode enum +func ThingGroupIndexingMode_Values() []string { + return []string{ + ThingGroupIndexingModeOff, + ThingGroupIndexingModeOn, + } +} + const ( // ThingIndexingModeOff is a ThingIndexingMode enum value ThingIndexingModeOff = "OFF" @@ -48225,6 +54939,15 @@ const ( ThingIndexingModeRegistryAndShadow = "REGISTRY_AND_SHADOW" ) +// ThingIndexingMode_Values returns all elements of the ThingIndexingMode enum +func ThingIndexingMode_Values() []string { + return []string{ + ThingIndexingModeOff, + ThingIndexingModeRegistry, + ThingIndexingModeRegistryAndShadow, + } +} + const ( // TopicRuleDestinationStatusEnabled is a TopicRuleDestinationStatus enum value TopicRuleDestinationStatusEnabled = "ENABLED" @@ -48239,6 +54962,16 @@ const ( TopicRuleDestinationStatusError = "ERROR" ) +// TopicRuleDestinationStatus_Values returns all elements of the TopicRuleDestinationStatus enum +func TopicRuleDestinationStatus_Values() []string { + return []string{ + TopicRuleDestinationStatusEnabled, + TopicRuleDestinationStatusInProgress, + TopicRuleDestinationStatusDisabled, + TopicRuleDestinationStatusError, + } +} + const ( // ViolationEventTypeInAlarm is a ViolationEventType enum value ViolationEventTypeInAlarm = "in-alarm" @@ -48249,3 +54982,12 @@ const ( // ViolationEventTypeAlarmInvalidated is a ViolationEventType enum value ViolationEventTypeAlarmInvalidated = "alarm-invalidated" ) + +// ViolationEventType_Values returns all elements of the ViolationEventType enum +func ViolationEventType_Values() []string { + return []string{ + ViolationEventTypeInAlarm, + ViolationEventTypeAlarmCleared, + ViolationEventTypeAlarmInvalidated, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/doc.go b/vendor/github.com/aws/aws-sdk-go/service/iot/doc.go index 7c5721e3f..74e040ef6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iot/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/doc.go @@ -10,6 +10,14 @@ // organize resources associated with each device (Registry), configure logging, // and create and manage policies and credentials to authenticate devices. // +// The service endpoints that expose this API are listed in AWS IoT Core Endpoints +// and Quotas (https://docs.aws.amazon.com/general/latest/gr/iot-core.html). +// You must use the endpoint for the region that has the resources you want +// to access. +// +// The service name used by AWS Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) +// to sign the request is: execute-api. +// // For more information about how AWS IoT works, see the Developer Guide (https://docs.aws.amazon.com/iot/latest/developerguide/aws-iot-how-it-works.html). // // For information about how to use the credentials provider for AWS IoT, see diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go index e20b6f202..1b26fbf37 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go index b101fafeb..a0c0e800d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/api.go @@ -7191,8 +7191,8 @@ func (s *GlueConfiguration) SetTableName(v string) *GlueConfiguration { // There was an internal failure. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7209,17 +7209,17 @@ func (s InternalFailureException) GoString() string { func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7227,28 +7227,28 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } // The request was not valid. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7265,17 +7265,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7283,22 +7283,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Configuration information for delivery of data set contents to AWS IoT Events. @@ -7454,8 +7454,8 @@ func (s *LambdaActivity) SetNext(v string) *LambdaActivity { // The command caused an internal limit to be exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7472,17 +7472,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7490,22 +7490,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListChannelsInput struct { @@ -8770,8 +8770,8 @@ func (s *ReprocessingSummary) SetStatus(v string) *ReprocessingSummary { // A resource with the same name already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -8794,17 +8794,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8812,22 +8812,22 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The configuration of the resource used to execute the "containerAction". @@ -8890,8 +8890,8 @@ func (s *ResourceConfiguration) SetVolumeSizeInGB(v int64) *ResourceConfiguratio // A resource with the specified name could not be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8908,17 +8908,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8926,22 +8926,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // How long, in days, message data is kept. @@ -9447,8 +9447,8 @@ func (s ServiceManagedDatastoreS3StorageSummary) GoString() string { // The service is temporarily unavailable. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9465,17 +9465,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9483,22 +9483,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // The SQL query to modify the message. @@ -9784,8 +9784,8 @@ func (s TagResourceOutput) GoString() string { // The request was denied due to request throttling. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9802,17 +9802,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9820,22 +9820,22 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the data set whose content generation triggers the new @@ -10521,6 +10521,15 @@ const ( ChannelStatusDeleting = "DELETING" ) +// ChannelStatus_Values returns all elements of the ChannelStatus enum +func ChannelStatus_Values() []string { + return []string{ + ChannelStatusCreating, + ChannelStatusActive, + ChannelStatusDeleting, + } +} + const ( // ComputeTypeAcu1 is a ComputeType enum value ComputeTypeAcu1 = "ACU_1" @@ -10529,6 +10538,14 @@ const ( ComputeTypeAcu2 = "ACU_2" ) +// ComputeType_Values returns all elements of the ComputeType enum +func ComputeType_Values() []string { + return []string{ + ComputeTypeAcu1, + ComputeTypeAcu2, + } +} + const ( // DatasetActionTypeQuery is a DatasetActionType enum value DatasetActionTypeQuery = "QUERY" @@ -10537,6 +10554,14 @@ const ( DatasetActionTypeContainer = "CONTAINER" ) +// DatasetActionType_Values returns all elements of the DatasetActionType enum +func DatasetActionType_Values() []string { + return []string{ + DatasetActionTypeQuery, + DatasetActionTypeContainer, + } +} + const ( // DatasetContentStateCreating is a DatasetContentState enum value DatasetContentStateCreating = "CREATING" @@ -10548,6 +10573,15 @@ const ( DatasetContentStateFailed = "FAILED" ) +// DatasetContentState_Values returns all elements of the DatasetContentState enum +func DatasetContentState_Values() []string { + return []string{ + DatasetContentStateCreating, + DatasetContentStateSucceeded, + DatasetContentStateFailed, + } +} + const ( // DatasetStatusCreating is a DatasetStatus enum value DatasetStatusCreating = "CREATING" @@ -10559,6 +10593,15 @@ const ( DatasetStatusDeleting = "DELETING" ) +// DatasetStatus_Values returns all elements of the DatasetStatus enum +func DatasetStatus_Values() []string { + return []string{ + DatasetStatusCreating, + DatasetStatusActive, + DatasetStatusDeleting, + } +} + const ( // DatastoreStatusCreating is a DatastoreStatus enum value DatastoreStatusCreating = "CREATING" @@ -10570,11 +10613,27 @@ const ( DatastoreStatusDeleting = "DELETING" ) +// DatastoreStatus_Values returns all elements of the DatastoreStatus enum +func DatastoreStatus_Values() []string { + return []string{ + DatastoreStatusCreating, + DatastoreStatusActive, + DatastoreStatusDeleting, + } +} + const ( // LoggingLevelError is a LoggingLevel enum value LoggingLevelError = "ERROR" ) +// LoggingLevel_Values returns all elements of the LoggingLevel enum +func LoggingLevel_Values() []string { + return []string{ + LoggingLevelError, + } +} + const ( // ReprocessingStatusRunning is a ReprocessingStatus enum value ReprocessingStatusRunning = "RUNNING" @@ -10588,3 +10647,13 @@ const ( // ReprocessingStatusFailed is a ReprocessingStatus enum value ReprocessingStatusFailed = "FAILED" ) + +// ReprocessingStatus_Values returns all elements of the ReprocessingStatus enum +func ReprocessingStatus_Values() []string { + return []string{ + ReprocessingStatusRunning, + ReprocessingStatusSucceeded, + ReprocessingStatusCancelled, + ReprocessingStatusFailed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go index 0e505ecbc..eeeceecff 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go index 24481d95d..74e06e253 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go @@ -1515,6 +1515,10 @@ type Action struct { // // InputName is a required field InputName *string `locationName:"inputName" min:"1" type:"string" required:"true"` + + // You can configure the action payload when you send a message to an AWS IoT + // Events input. + Payload *Payload `locationName:"payload" type:"structure"` } // String returns the string representation @@ -1536,6 +1540,11 @@ func (s *Action) Validate() error { if s.InputName != nil && len(*s.InputName) < 1 { invalidParams.Add(request.NewErrParamMinLen("InputName", 1)) } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1549,6 +1558,12 @@ func (s *Action) SetInputName(v string) *Action { return s } +// SetPayload sets the Payload field's value. +func (s *Action) SetPayload(v *Payload) *Action { + s.Payload = v + return s +} + // An action to be performed when the condition is TRUE. type ActionData struct { _ struct{} `type:"structure"` @@ -1556,14 +1571,36 @@ type ActionData struct { // Information needed to clear the timer. ClearTimer *ClearTimerAction `locationName:"clearTimer" type:"structure"` + // Writes to the DynamoDB table that you created. The default action payload + // contains all attribute-value pairs that have the information about the detector + // model instance and the event that triggered the action. You can also customize + // the payload (https://docs.aws.amazon.com/iotevents/latest/apireference/API_Payload.html). + // One column of the DynamoDB table receives all attribute-value pairs in the + // payload that you specify. For more information, see Actions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-event-actions.html) + // in AWS IoT Events Developer Guide. + DynamoDB *DynamoDBAction `locationName:"dynamoDB" type:"structure"` + + // Writes to the DynamoDB table that you created. The default action payload + // contains all attribute-value pairs that have the information about the detector + // model instance and the event that triggered the action. You can also customize + // the payload (https://docs.aws.amazon.com/iotevents/latest/apireference/API_Payload.html). + // A separate column of the DynamoDB table receives one attribute-value pair + // in the payload that you specify. For more information, see Actions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-event-actions.html) + // in AWS IoT Events Developer Guide. + DynamoDBv2 *DynamoDBv2Action `locationName:"dynamoDBv2" type:"structure"` + // Sends information about the detector model instance and the event that triggered // the action to an Amazon Kinesis Data Firehose delivery stream. Firehose *FirehoseAction `locationName:"firehose" type:"structure"` - // Sends an AWS IoT Events input, passing in information about the detector - // model instance and the event that triggered the action. + // Sends AWS IoT Events input, which passes information about the detector model + // instance and the event that triggered the action. IotEvents *Action `locationName:"iotEvents" type:"structure"` + // Sends information about the detector model instance and the event that triggered + // the action to an asset property in AWS IoT SiteWise . + IotSiteWise *IotSiteWiseAction `locationName:"iotSiteWise" type:"structure"` + // Publishes an MQTT message with the given topic to the AWS IoT message broker. IotTopicPublish *IotTopicPublishAction `locationName:"iotTopicPublish" type:"structure"` @@ -1606,6 +1643,16 @@ func (s *ActionData) Validate() error { invalidParams.AddNested("ClearTimer", err.(request.ErrInvalidParams)) } } + if s.DynamoDB != nil { + if err := s.DynamoDB.Validate(); err != nil { + invalidParams.AddNested("DynamoDB", err.(request.ErrInvalidParams)) + } + } + if s.DynamoDBv2 != nil { + if err := s.DynamoDBv2.Validate(); err != nil { + invalidParams.AddNested("DynamoDBv2", err.(request.ErrInvalidParams)) + } + } if s.Firehose != nil { if err := s.Firehose.Validate(); err != nil { invalidParams.AddNested("Firehose", err.(request.ErrInvalidParams)) @@ -1616,6 +1663,11 @@ func (s *ActionData) Validate() error { invalidParams.AddNested("IotEvents", err.(request.ErrInvalidParams)) } } + if s.IotSiteWise != nil { + if err := s.IotSiteWise.Validate(); err != nil { + invalidParams.AddNested("IotSiteWise", err.(request.ErrInvalidParams)) + } + } if s.IotTopicPublish != nil { if err := s.IotTopicPublish.Validate(); err != nil { invalidParams.AddNested("IotTopicPublish", err.(request.ErrInvalidParams)) @@ -1664,6 +1716,18 @@ func (s *ActionData) SetClearTimer(v *ClearTimerAction) *ActionData { return s } +// SetDynamoDB sets the DynamoDB field's value. +func (s *ActionData) SetDynamoDB(v *DynamoDBAction) *ActionData { + s.DynamoDB = v + return s +} + +// SetDynamoDBv2 sets the DynamoDBv2 field's value. +func (s *ActionData) SetDynamoDBv2(v *DynamoDBv2Action) *ActionData { + s.DynamoDBv2 = v + return s +} + // SetFirehose sets the Firehose field's value. func (s *ActionData) SetFirehose(v *FirehoseAction) *ActionData { s.Firehose = v @@ -1676,6 +1740,12 @@ func (s *ActionData) SetIotEvents(v *Action) *ActionData { return s } +// SetIotSiteWise sets the IotSiteWise field's value. +func (s *ActionData) SetIotSiteWise(v *IotSiteWiseAction) *ActionData { + s.IotSiteWise = v + return s +} + // SetIotTopicPublish sets the IotTopicPublish field's value. func (s *ActionData) SetIotTopicPublish(v *IotTopicPublishAction) *ActionData { s.IotTopicPublish = v @@ -1718,6 +1788,211 @@ func (s *ActionData) SetSqs(v *SqsAction) *ActionData { return s } +// A structure that contains timestamp information. For more information, see +// TimeInNanos (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_TimeInNanos.html) +// in the AWS IoT SiteWise API Reference. +// +// For parameters that are string data type, you can specify the following options: +// +// * Use a string. For example, the timeInSeconds value can be '1586400675'. +// +// * Use an expression. For example, the timeInSeconds value can be '${$input.TemperatureInput.sensorData.timestamp/1000}'. +// For more information, see Expressions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) +// in the AWS IoT Events Developer Guide. +type AssetPropertyTimestamp struct { + _ struct{} `type:"structure"` + + // The nanosecond offset converted from timeInSeconds. The valid range is between + // 0-999999999. You can also specify an expression. + OffsetInNanos *string `locationName:"offsetInNanos" type:"string"` + + // The timestamp, in seconds, in the Unix epoch format. The valid range is between + // 1-31556889864403199. You can also specify an expression. + // + // TimeInSeconds is a required field + TimeInSeconds *string `locationName:"timeInSeconds" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssetPropertyTimestamp) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssetPropertyTimestamp) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssetPropertyTimestamp) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssetPropertyTimestamp"} + if s.TimeInSeconds == nil { + invalidParams.Add(request.NewErrParamRequired("TimeInSeconds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOffsetInNanos sets the OffsetInNanos field's value. +func (s *AssetPropertyTimestamp) SetOffsetInNanos(v string) *AssetPropertyTimestamp { + s.OffsetInNanos = &v + return s +} + +// SetTimeInSeconds sets the TimeInSeconds field's value. +func (s *AssetPropertyTimestamp) SetTimeInSeconds(v string) *AssetPropertyTimestamp { + s.TimeInSeconds = &v + return s +} + +// A structure that contains value information. For more information, see AssetPropertyValue +// (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_AssetPropertyValue.html) +// in the AWS IoT SiteWise API Reference. +// +// For parameters that are string data type, you can specify the following options: +// +// * Use a string. For example, the quality value can be 'GOOD'. +// +// * Use an expression. For example, the quality value can be $input.TemperatureInput.sensorData.quality +// . For more information, see Expressions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) +// in the AWS IoT Events Developer Guide. +type AssetPropertyValue struct { + _ struct{} `type:"structure"` + + // The quality of the asset property value. The value must be GOOD, BAD, or + // UNCERTAIN. You can also specify an expression. + Quality *string `locationName:"quality" type:"string"` + + // The timestamp associated with the asset property value. The default is the + // current event time. + Timestamp *AssetPropertyTimestamp `locationName:"timestamp" type:"structure"` + + // The value to send to an asset property. + // + // Value is a required field + Value *AssetPropertyVariant `locationName:"value" type:"structure" required:"true"` +} + +// String returns the string representation +func (s AssetPropertyValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssetPropertyValue) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssetPropertyValue) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssetPropertyValue"} + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + if s.Timestamp != nil { + if err := s.Timestamp.Validate(); err != nil { + invalidParams.AddNested("Timestamp", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetQuality sets the Quality field's value. +func (s *AssetPropertyValue) SetQuality(v string) *AssetPropertyValue { + s.Quality = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *AssetPropertyValue) SetTimestamp(v *AssetPropertyTimestamp) *AssetPropertyValue { + s.Timestamp = v + return s +} + +// SetValue sets the Value field's value. +func (s *AssetPropertyValue) SetValue(v *AssetPropertyVariant) *AssetPropertyValue { + s.Value = v + return s +} + +// A structure that contains an asset property value. For more information, +// see Variant (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_Variant.html) +// in the AWS IoT SiteWise API Reference. +// +// You must specify one of the following value types, depending on the dataType +// of the specified asset property. For more information, see AssetProperty +// (https://docs.aws.amazon.com/iot-sitewise/latest/APIReference/API_AssetProperty.html) +// in the AWS IoT SiteWise API Reference. +// +// For parameters that are string data type, you can specify the following options: +// +// * Use a string. For example, the doubleValue value can be '47.9'. +// +// * Use an expression. For example, the doubleValue value can be $input.TemperatureInput.sensorData.temperature. +// For more information, see Expressions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) +// in the AWS IoT Events Developer Guide. +type AssetPropertyVariant struct { + _ struct{} `type:"structure"` + + // The asset property value is a Boolean value that must be TRUE or FALSE. You + // can also specify an expression. If you use an expression, the evaluated result + // should be a Boolean value. + BooleanValue *string `locationName:"booleanValue" type:"string"` + + // The asset property value is a double. You can also specify an expression. + // If you use an expression, the evaluated result should be a double. + DoubleValue *string `locationName:"doubleValue" type:"string"` + + // The asset property value is an integer. You can also specify an expression. + // If you use an expression, the evaluated result should be an integer. + IntegerValue *string `locationName:"integerValue" type:"string"` + + // The asset property value is a string. You can also specify an expression. + // If you use an expression, the evaluated result should be a string. + StringValue *string `locationName:"stringValue" type:"string"` +} + +// String returns the string representation +func (s AssetPropertyVariant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssetPropertyVariant) GoString() string { + return s.String() +} + +// SetBooleanValue sets the BooleanValue field's value. +func (s *AssetPropertyVariant) SetBooleanValue(v string) *AssetPropertyVariant { + s.BooleanValue = &v + return s +} + +// SetDoubleValue sets the DoubleValue field's value. +func (s *AssetPropertyVariant) SetDoubleValue(v string) *AssetPropertyVariant { + s.DoubleValue = &v + return s +} + +// SetIntegerValue sets the IntegerValue field's value. +func (s *AssetPropertyVariant) SetIntegerValue(v string) *AssetPropertyVariant { + s.IntegerValue = &v + return s +} + +// SetStringValue sets the StringValue field's value. +func (s *AssetPropertyVariant) SetStringValue(v string) *AssetPropertyVariant { + s.StringValue = &v + return s +} + // The attributes from the JSON payload that are made available by the input. // Inputs are derived from messages sent to the AWS IoT Events system using // BatchPutMessage. Each such message contains a JSON payload. Those attributes @@ -2478,11 +2753,15 @@ type DetectorModelConfiguration struct { // are executed. EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` - // The input attribute key used to identify a device or system to create a detector - // (an instance of the detector model) and then to route each input received - // to the appropriate detector (instance). This parameter uses a JSON-path expression - // in the message payload of each input to specify the attribute-value pair - // that is used to identify the device associated with the input. + // The value used to identify a detector instance. When a device or system sends + // input, a new detector instance with a unique key value is created. AWS IoT + // Events can continue to route input to its corresponding detector instance + // based on this identifying information. + // + // This parameter uses a JSON-path expression to select the attribute-value + // pair in the message payload that is used for identification. To route the + // message to the correct detector instance, the device must send a message + // payload that contains the same attribute-value. Key *string `locationName:"key" min:"1" type:"string"` // The time the detector model was last updated. @@ -2766,6 +3045,267 @@ func (s *DetectorModelVersionSummary) SetStatus(v string) *DetectorModelVersionS return s } +// Defines an action to write to the Amazon DynamoDB table that you created. +// The standard action payload contains all attribute-value pairs that have +// the information about the detector model instance and the event that triggered +// the action. You can also customize the payload (https://docs.aws.amazon.com/iotevents/latest/apireference/API_Payload.html). +// One column of the DynamoDB table receives all attribute-value pairs in the +// payload that you specify. +// +// The tableName and hashKeyField values must match the table name and the partition +// key of the DynamoDB table. +// +// If the DynamoDB table also has a sort key, you must specify rangeKeyField. +// The rangeKeyField value must match the sort key. +// +// The hashKeyValue and rangeKeyValue use substitution templates. These templates +// provide data at runtime. The syntax is ${sql-expression}. +// +// You can use expressions for parameters that are string data type. For more +// information, see Expressions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) +// in the AWS IoT Events Developer Guide. +// +// If the defined payload type is a string, DynamoDBAction writes non-JSON data +// to the DynamoDB table as binary data. The DynamoDB console displays the data +// as Base64-encoded text. The payloadField is _raw. +type DynamoDBAction struct { + _ struct{} `type:"structure"` + + // The name of the hash key (also called the partition key). + // + // HashKeyField is a required field + HashKeyField *string `locationName:"hashKeyField" type:"string" required:"true"` + + // The data type for the hash key (also called the partition key). You can specify + // the following values: + // + // * STRING - The hash key is a string. + // + // * NUMBER - The hash key is a number. + // + // If you don't specify hashKeyType, the default value is STRING. + HashKeyType *string `locationName:"hashKeyType" type:"string"` + + // The value of the hash key (also called the partition key). + // + // HashKeyValue is a required field + HashKeyValue *string `locationName:"hashKeyValue" type:"string" required:"true"` + + // The type of operation to perform. You can specify the following values: + // + // * INSERT - Insert data as a new item into the DynamoDB table. This item + // uses the specified hash key as a partition key. If you specified a range + // key, the item uses the range key as a sort key. + // + // * UPDATE - Update an existing item of the DynamoDB table with new data. + // This item's partition key must match the specified hash key. If you specified + // a range key, the range key must match the item's sort key. + // + // * DELETE - Delete an existing item of the DynamoDB table. This item's + // partition key must match the specified hash key. If you specified a range + // key, the range key must match the item's sort key. + // + // If you don't specify this parameter, AWS IoT Events triggers the INSERT operation. + Operation *string `locationName:"operation" type:"string"` + + // Information needed to configure the payload. + // + // By default, AWS IoT Events generates a standard payload in JSON for any action. + // This action payload contains all attribute-value pairs that have the information + // about the detector model instance and the event triggered the action. To + // configure the action payload, you can use contentExpression. + Payload *Payload `locationName:"payload" type:"structure"` + + // The name of the DynamoDB column that receives the action payload. + // + // If you don't specify this parameter, the name of the DynamoDB column is payload. + PayloadField *string `locationName:"payloadField" type:"string"` + + // The name of the range key (also called the sort key). + RangeKeyField *string `locationName:"rangeKeyField" type:"string"` + + // The data type for the range key (also called the sort key), You can specify + // the following values: + // + // * STRING - The range key is a string. + // + // * NUMBER - The range key is number. + // + // If you don't specify rangeKeyField, the default value is STRING. + RangeKeyType *string `locationName:"rangeKeyType" type:"string"` + + // The value of the range key (also called the sort key). + RangeKeyValue *string `locationName:"rangeKeyValue" type:"string"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `locationName:"tableName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DynamoDBAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DynamoDBAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DynamoDBAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DynamoDBAction"} + if s.HashKeyField == nil { + invalidParams.Add(request.NewErrParamRequired("HashKeyField")) + } + if s.HashKeyValue == nil { + invalidParams.Add(request.NewErrParamRequired("HashKeyValue")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHashKeyField sets the HashKeyField field's value. +func (s *DynamoDBAction) SetHashKeyField(v string) *DynamoDBAction { + s.HashKeyField = &v + return s +} + +// SetHashKeyType sets the HashKeyType field's value. +func (s *DynamoDBAction) SetHashKeyType(v string) *DynamoDBAction { + s.HashKeyType = &v + return s +} + +// SetHashKeyValue sets the HashKeyValue field's value. +func (s *DynamoDBAction) SetHashKeyValue(v string) *DynamoDBAction { + s.HashKeyValue = &v + return s +} + +// SetOperation sets the Operation field's value. +func (s *DynamoDBAction) SetOperation(v string) *DynamoDBAction { + s.Operation = &v + return s +} + +// SetPayload sets the Payload field's value. +func (s *DynamoDBAction) SetPayload(v *Payload) *DynamoDBAction { + s.Payload = v + return s +} + +// SetPayloadField sets the PayloadField field's value. +func (s *DynamoDBAction) SetPayloadField(v string) *DynamoDBAction { + s.PayloadField = &v + return s +} + +// SetRangeKeyField sets the RangeKeyField field's value. +func (s *DynamoDBAction) SetRangeKeyField(v string) *DynamoDBAction { + s.RangeKeyField = &v + return s +} + +// SetRangeKeyType sets the RangeKeyType field's value. +func (s *DynamoDBAction) SetRangeKeyType(v string) *DynamoDBAction { + s.RangeKeyType = &v + return s +} + +// SetRangeKeyValue sets the RangeKeyValue field's value. +func (s *DynamoDBAction) SetRangeKeyValue(v string) *DynamoDBAction { + s.RangeKeyValue = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DynamoDBAction) SetTableName(v string) *DynamoDBAction { + s.TableName = &v + return s +} + +// Defines an action to write to the Amazon DynamoDB table that you created. +// The default action payload contains all attribute-value pairs that have the +// information about the detector model instance and the event that triggered +// the action. You can also customize the payload (https://docs.aws.amazon.com/iotevents/latest/apireference/API_Payload.html). +// A separate column of the DynamoDB table receives one attribute-value pair +// in the payload that you specify. +// +// The type value for Payload must be JSON. +// +// You can use expressions for parameters that are strings. For more information, +// see Expressions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) +// in the AWS IoT Events Developer Guide. +type DynamoDBv2Action struct { + _ struct{} `type:"structure"` + + // Information needed to configure the payload. + // + // By default, AWS IoT Events generates a standard payload in JSON for any action. + // This action payload contains all attribute-value pairs that have the information + // about the detector model instance and the event triggered the action. To + // configure the action payload, you can use contentExpression. + Payload *Payload `locationName:"payload" type:"structure"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `locationName:"tableName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DynamoDBv2Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DynamoDBv2Action) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DynamoDBv2Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DynamoDBv2Action"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayload sets the Payload field's value. +func (s *DynamoDBv2Action) SetPayload(v *Payload) *DynamoDBv2Action { + s.Payload = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DynamoDBv2Action) SetTableName(v string) *DynamoDBv2Action { + s.TableName = &v + return s +} + // Specifies the actions to be performed when the condition evaluates to TRUE. type Event struct { _ struct{} `type:"structure"` @@ -2845,6 +3385,10 @@ type FirehoseAction struct { // DeliveryStreamName is a required field DeliveryStreamName *string `locationName:"deliveryStreamName" type:"string" required:"true"` + // You can configure the action payload when you send a message to an Amazon + // Kinesis Data Firehose delivery stream. + Payload *Payload `locationName:"payload" type:"structure"` + // A character separator that is used to separate records written to the Kinesis // Data Firehose delivery stream. Valid values are: '\n' (newline), '\t' (tab), // '\r\n' (Windows newline), ',' (comma). @@ -2867,6 +3411,11 @@ func (s *FirehoseAction) Validate() error { if s.DeliveryStreamName == nil { invalidParams.Add(request.NewErrParamRequired("DeliveryStreamName")) } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2880,6 +3429,12 @@ func (s *FirehoseAction) SetDeliveryStreamName(v string) *FirehoseAction { return s } +// SetPayload sets the Payload field's value. +func (s *FirehoseAction) SetPayload(v *Payload) *FirehoseAction { + s.Payload = v + return s +} + // SetSeparator sets the Separator field's value. func (s *FirehoseAction) SetSeparator(v string) *FirehoseAction { s.Separator = &v @@ -3125,8 +3680,8 @@ func (s *InputSummary) SetStatus(v string) *InputSummary { // An internal failure occurred. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -3144,17 +3699,17 @@ func (s InternalFailureException) GoString() string { func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3162,28 +3717,28 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } // The request was invalid. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -3201,17 +3756,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3219,22 +3774,117 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Sends information about the detector model instance and the event that triggered +// the action to a specified asset property in AWS IoT SiteWise. +// +// You must specify either propertyAlias or both assetId and propertyId to identify +// the target asset property in AWS IoT SiteWise. +// +// For parameters that are string data type, you can specify the following options: +// +// * Use a string. For example, the propertyAlias value can be '/company/windfarm/3/turbine/7/temperature'. +// +// * Use an expression. For example, the propertyAlias value can be 'company/windfarm/${$input.TemperatureInput.sensorData.windfarmID}/turbine/${$input.TemperatureInput.sensorData.turbineID}/temperature'. +// For more information, see Expressions (https://docs.aws.amazon.com/iotevents/latest/developerguide/iotevents-expressions.html) +// in the AWS IoT Events Developer Guide. +type IotSiteWiseAction struct { + _ struct{} `type:"structure"` + + // The ID of the asset that has the specified property. You can specify an expression. + AssetId *string `locationName:"assetId" type:"string"` + + // A unique identifier for this entry. You can use the entry ID to track which + // data entry causes an error in case of failure. The default is a new unique + // identifier. You can also specify an expression. + EntryId *string `locationName:"entryId" type:"string"` + + // The alias of the asset property. You can also specify an expression. + PropertyAlias *string `locationName:"propertyAlias" type:"string"` + + // The ID of the asset property. You can specify an expression. + PropertyId *string `locationName:"propertyId" type:"string"` + + // The value to send to the asset property. This value contains timestamp, quality, + // and value (TQV) information. + // + // PropertyValue is a required field + PropertyValue *AssetPropertyValue `locationName:"propertyValue" type:"structure" required:"true"` +} + +// String returns the string representation +func (s IotSiteWiseAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IotSiteWiseAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IotSiteWiseAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IotSiteWiseAction"} + if s.PropertyValue == nil { + invalidParams.Add(request.NewErrParamRequired("PropertyValue")) + } + if s.PropertyValue != nil { + if err := s.PropertyValue.Validate(); err != nil { + invalidParams.AddNested("PropertyValue", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssetId sets the AssetId field's value. +func (s *IotSiteWiseAction) SetAssetId(v string) *IotSiteWiseAction { + s.AssetId = &v + return s +} + +// SetEntryId sets the EntryId field's value. +func (s *IotSiteWiseAction) SetEntryId(v string) *IotSiteWiseAction { + s.EntryId = &v + return s +} + +// SetPropertyAlias sets the PropertyAlias field's value. +func (s *IotSiteWiseAction) SetPropertyAlias(v string) *IotSiteWiseAction { + s.PropertyAlias = &v + return s +} + +// SetPropertyId sets the PropertyId field's value. +func (s *IotSiteWiseAction) SetPropertyId(v string) *IotSiteWiseAction { + s.PropertyId = &v + return s +} + +// SetPropertyValue sets the PropertyValue field's value. +func (s *IotSiteWiseAction) SetPropertyValue(v *AssetPropertyValue) *IotSiteWiseAction { + s.PropertyValue = v + return s } // Information required to publish the MQTT message through the AWS IoT message @@ -3248,6 +3898,10 @@ type IotTopicPublishAction struct { // // MqttTopic is a required field MqttTopic *string `locationName:"mqttTopic" min:"1" type:"string" required:"true"` + + // You can configure the action payload when you publish a message to an AWS + // IoT Core topic. + Payload *Payload `locationName:"payload" type:"structure"` } // String returns the string representation @@ -3269,6 +3923,11 @@ func (s *IotTopicPublishAction) Validate() error { if s.MqttTopic != nil && len(*s.MqttTopic) < 1 { invalidParams.Add(request.NewErrParamMinLen("MqttTopic", 1)) } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3282,6 +3941,12 @@ func (s *IotTopicPublishAction) SetMqttTopic(v string) *IotTopicPublishAction { return s } +// SetPayload sets the Payload field's value. +func (s *IotTopicPublishAction) SetPayload(v *Payload) *IotTopicPublishAction { + s.Payload = v + return s +} + // Calls a Lambda function, passing in information about the detector model // instance and the event that triggered the action. type LambdaAction struct { @@ -3291,6 +3956,10 @@ type LambdaAction struct { // // FunctionArn is a required field FunctionArn *string `locationName:"functionArn" min:"1" type:"string" required:"true"` + + // You can configure the action payload when you send a message to a Lambda + // function. + Payload *Payload `locationName:"payload" type:"structure"` } // String returns the string representation @@ -3312,6 +3981,11 @@ func (s *LambdaAction) Validate() error { if s.FunctionArn != nil && len(*s.FunctionArn) < 1 { invalidParams.Add(request.NewErrParamMinLen("FunctionArn", 1)) } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -3325,10 +3999,16 @@ func (s *LambdaAction) SetFunctionArn(v string) *LambdaAction { return s } +// SetPayload sets the Payload field's value. +func (s *LambdaAction) SetPayload(v *Payload) *LambdaAction { + s.Payload = v + return s +} + // A limit was exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -3346,17 +4026,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3364,22 +4044,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListDetectorModelVersionsInput struct { @@ -3945,6 +4625,71 @@ func (s *OnInputLifecycle) SetTransitionEvents(v []*TransitionEvent) *OnInputLif return s } +// Information needed to configure the payload. +// +// By default, AWS IoT Events generates a standard payload in JSON for any action. +// This action payload contains all attribute-value pairs that have the information +// about the detector model instance and the event triggered the action. To +// configure the action payload, you can use contentExpression. +type Payload struct { + _ struct{} `type:"structure"` + + // The content of the payload. You can use a string expression that includes + // quoted strings (''), variables ($variable.), input + // values ($input..), string concatenations, and + // quoted strings that contain ${} as the content. The recommended maximum size + // of a content expression is 1 KB. + // + // ContentExpression is a required field + ContentExpression *string `locationName:"contentExpression" min:"1" type:"string" required:"true"` + + // The value of the payload type can be either STRING or JSON. + // + // Type is a required field + Type *string `locationName:"type" type:"string" required:"true" enum:"PayloadType"` +} + +// String returns the string representation +func (s Payload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Payload) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Payload) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Payload"} + if s.ContentExpression == nil { + invalidParams.Add(request.NewErrParamRequired("ContentExpression")) + } + if s.ContentExpression != nil && len(*s.ContentExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContentExpression", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContentExpression sets the ContentExpression field's value. +func (s *Payload) SetContentExpression(v string) *Payload { + s.ContentExpression = &v + return s +} + +// SetType sets the Type field's value. +func (s *Payload) SetType(v string) *Payload { + s.Type = &v + return s +} + type PutLoggingOptionsInput struct { _ struct{} `type:"structure"` @@ -4003,7 +4748,8 @@ func (s PutLoggingOptionsOutput) GoString() string { } // Information required to reset the timer. The timer is reset to the previously -// evaluated result of the duration. +// evaluated result of the duration. The duration expression isn't reevaluated +// when you reset the timer. type ResetTimerAction struct { _ struct{} `type:"structure"` @@ -4047,8 +4793,8 @@ func (s *ResetTimerAction) SetTimerName(v string) *ResetTimerAction { // The resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -4072,17 +4818,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4090,28 +4836,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The resource is in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -4129,17 +4875,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4147,28 +4893,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -4186,17 +4932,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4204,28 +4950,32 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Information required to publish the Amazon SNS message. type SNSTopicPublishAction struct { _ struct{} `type:"structure"` + // You can configure the action payload when you send a message as an Amazon + // SNS push notification. + Payload *Payload `locationName:"payload" type:"structure"` + // The ARN of the Amazon SNS target where the message is sent. // // TargetArn is a required field @@ -4251,6 +5001,11 @@ func (s *SNSTopicPublishAction) Validate() error { if s.TargetArn != nil && len(*s.TargetArn) < 1 { invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4258,6 +5013,12 @@ func (s *SNSTopicPublishAction) Validate() error { return nil } +// SetPayload sets the Payload field's value. +func (s *SNSTopicPublishAction) SetPayload(v *Payload) *SNSTopicPublishAction { + s.Payload = v + return s +} + // SetTargetArn sets the TargetArn field's value. func (s *SNSTopicPublishAction) SetTargetArn(v string) *SNSTopicPublishAction { s.TargetArn = &v @@ -4266,8 +5027,8 @@ func (s *SNSTopicPublishAction) SetTargetArn(v string) *SNSTopicPublishAction { // The service is currently unavailable. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -4285,17 +5046,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4303,22 +5064,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Information needed to set the timer. @@ -4334,7 +5095,7 @@ type SetTimerAction struct { DurationExpression *string `locationName:"durationExpression" min:"1" type:"string"` // The number of seconds until the timer expires. The minimum value is 60 seconds - // to ensure accuracy. + // to ensure accuracy. The maximum value is 31622400 seconds. // // Deprecated: seconds is deprecated. You can use durationExpression for SetTimerAction. The value of seconds can be used as a string expression for durationExpression. Seconds *int64 `locationName:"seconds" min:"1" deprecated:"true" type:"integer"` @@ -4459,13 +5220,17 @@ func (s *SetVariableAction) SetVariableName(v string) *SetVariableAction { type SqsAction struct { _ struct{} `type:"structure"` + // You can configure the action payload when you send a message to an Amazon + // SQS queue. + Payload *Payload `locationName:"payload" type:"structure"` + // The URL of the SQS queue where the data is written. // // QueueUrl is a required field QueueUrl *string `locationName:"queueUrl" type:"string" required:"true"` // Set this to TRUE if you want the data to be base-64 encoded before it is - // written to the queue. + // written to the queue. Otherwise, set this to FALSE. UseBase64 *bool `locationName:"useBase64" type:"boolean"` } @@ -4485,6 +5250,11 @@ func (s *SqsAction) Validate() error { if s.QueueUrl == nil { invalidParams.Add(request.NewErrParamRequired("QueueUrl")) } + if s.Payload != nil { + if err := s.Payload.Validate(); err != nil { + invalidParams.AddNested("Payload", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4492,6 +5262,12 @@ func (s *SqsAction) Validate() error { return nil } +// SetPayload sets the Payload field's value. +func (s *SqsAction) SetPayload(v *Payload) *SqsAction { + s.Payload = v + return s +} + // SetQueueUrl sets the QueueUrl field's value. func (s *SqsAction) SetQueueUrl(v string) *SqsAction { s.QueueUrl = &v @@ -4727,8 +5503,8 @@ func (s TagResourceOutput) GoString() string { // The request could not be completed due to throttling. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -4746,17 +5522,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4764,22 +5540,22 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the actions performed and the next state entered when a condition @@ -4875,8 +5651,8 @@ func (s *TransitionEvent) SetNextState(v string) *TransitionEvent { // The requested operation is not supported. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The message for the exception. Message_ *string `locationName:"message" type:"string"` @@ -4894,17 +5670,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4912,22 +5688,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -5234,6 +6010,19 @@ const ( DetectorModelVersionStatusFailed = "FAILED" ) +// DetectorModelVersionStatus_Values returns all elements of the DetectorModelVersionStatus enum +func DetectorModelVersionStatus_Values() []string { + return []string{ + DetectorModelVersionStatusActive, + DetectorModelVersionStatusActivating, + DetectorModelVersionStatusInactive, + DetectorModelVersionStatusDeprecated, + DetectorModelVersionStatusDraft, + DetectorModelVersionStatusPaused, + DetectorModelVersionStatusFailed, + } +} + const ( // EvaluationMethodBatch is a EvaluationMethod enum value EvaluationMethodBatch = "BATCH" @@ -5242,6 +6031,14 @@ const ( EvaluationMethodSerial = "SERIAL" ) +// EvaluationMethod_Values returns all elements of the EvaluationMethod enum +func EvaluationMethod_Values() []string { + return []string{ + EvaluationMethodBatch, + EvaluationMethodSerial, + } +} + const ( // InputStatusCreating is a InputStatus enum value InputStatusCreating = "CREATING" @@ -5256,6 +6053,16 @@ const ( InputStatusDeleting = "DELETING" ) +// InputStatus_Values returns all elements of the InputStatus enum +func InputStatus_Values() []string { + return []string{ + InputStatusCreating, + InputStatusUpdating, + InputStatusActive, + InputStatusDeleting, + } +} + const ( // LoggingLevelError is a LoggingLevel enum value LoggingLevelError = "ERROR" @@ -5266,3 +6073,28 @@ const ( // LoggingLevelDebug is a LoggingLevel enum value LoggingLevelDebug = "DEBUG" ) + +// LoggingLevel_Values returns all elements of the LoggingLevel enum +func LoggingLevel_Values() []string { + return []string{ + LoggingLevelError, + LoggingLevelInfo, + LoggingLevelDebug, + } +} + +const ( + // PayloadTypeString is a PayloadType enum value + PayloadTypeString = "STRING" + + // PayloadTypeJson is a PayloadType enum value + PayloadTypeJson = "JSON" +) + +// PayloadType_Values returns all elements of the PayloadType enum +func PayloadType_Values() []string { + return []string{ + PayloadTypeString, + PayloadTypeJson, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go index 96fb49ba7..06ec1c6ac 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/doc.go @@ -5,8 +5,8 @@ // // AWS IoT Events monitors your equipment or device fleets for failures or changes // in operation, and triggers actions when such events occur. You can use AWS -// IoT Events API commands to create, read, update, and delete inputs and detector -// models, and to list their versions. +// IoT Events API operations to create, read, update, and delete inputs and +// detector models, and to list their versions. // // See https://docs.aws.amazon.com/goto/WebAPI/iotevents-2018-07-27 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go index 85a685ddf..7dd170dd8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go index 620d2a10c..f2366e802 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go @@ -13,6 +13,200 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opBatchAssociateScramSecret = "BatchAssociateScramSecret" + +// BatchAssociateScramSecretRequest generates a "aws/request.Request" representing the +// client's request for the BatchAssociateScramSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchAssociateScramSecret for more information on using the BatchAssociateScramSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchAssociateScramSecretRequest method. +// req, resp := client.BatchAssociateScramSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/BatchAssociateScramSecret +func (c *Kafka) BatchAssociateScramSecretRequest(input *BatchAssociateScramSecretInput) (req *request.Request, output *BatchAssociateScramSecretOutput) { + op := &request.Operation{ + Name: opBatchAssociateScramSecret, + HTTPMethod: "POST", + HTTPPath: "/v1/clusters/{clusterArn}/scram-secrets", + } + + if input == nil { + input = &BatchAssociateScramSecretInput{} + } + + output = &BatchAssociateScramSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchAssociateScramSecret API operation for Managed Streaming for Kafka. +// +// Associates one or more Scram Secrets with an Amazon MSK cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation BatchAssociateScramSecret for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Returns information about an error. +// +// * UnauthorizedException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// * NotFoundException +// Returns information about an error. +// +// * ServiceUnavailableException +// Returns information about an error. +// +// * TooManyRequestsException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/BatchAssociateScramSecret +func (c *Kafka) BatchAssociateScramSecret(input *BatchAssociateScramSecretInput) (*BatchAssociateScramSecretOutput, error) { + req, out := c.BatchAssociateScramSecretRequest(input) + return out, req.Send() +} + +// BatchAssociateScramSecretWithContext is the same as BatchAssociateScramSecret with the addition of +// the ability to pass a context and additional request options. +// +// See BatchAssociateScramSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) BatchAssociateScramSecretWithContext(ctx aws.Context, input *BatchAssociateScramSecretInput, opts ...request.Option) (*BatchAssociateScramSecretOutput, error) { + req, out := c.BatchAssociateScramSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchDisassociateScramSecret = "BatchDisassociateScramSecret" + +// BatchDisassociateScramSecretRequest generates a "aws/request.Request" representing the +// client's request for the BatchDisassociateScramSecret operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchDisassociateScramSecret for more information on using the BatchDisassociateScramSecret +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchDisassociateScramSecretRequest method. +// req, resp := client.BatchDisassociateScramSecretRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/BatchDisassociateScramSecret +func (c *Kafka) BatchDisassociateScramSecretRequest(input *BatchDisassociateScramSecretInput) (req *request.Request, output *BatchDisassociateScramSecretOutput) { + op := &request.Operation{ + Name: opBatchDisassociateScramSecret, + HTTPMethod: "PATCH", + HTTPPath: "/v1/clusters/{clusterArn}/scram-secrets", + } + + if input == nil { + input = &BatchDisassociateScramSecretInput{} + } + + output = &BatchDisassociateScramSecretOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchDisassociateScramSecret API operation for Managed Streaming for Kafka. +// +// Disassociates one or more Scram Secrets from an Amazon MSK cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation BatchDisassociateScramSecret for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Returns information about an error. +// +// * UnauthorizedException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// * NotFoundException +// Returns information about an error. +// +// * ServiceUnavailableException +// Returns information about an error. +// +// * TooManyRequestsException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/BatchDisassociateScramSecret +func (c *Kafka) BatchDisassociateScramSecret(input *BatchDisassociateScramSecretInput) (*BatchDisassociateScramSecretOutput, error) { + req, out := c.BatchDisassociateScramSecretRequest(input) + return out, req.Send() +} + +// BatchDisassociateScramSecretWithContext is the same as BatchDisassociateScramSecret with the addition of +// the ability to pass a context and additional request options. +// +// See BatchDisassociateScramSecret for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) BatchDisassociateScramSecretWithContext(ctx aws.Context, input *BatchDisassociateScramSecretInput, opts ...request.Option) (*BatchDisassociateScramSecretOutput, error) { + req, out := c.BatchDisassociateScramSecretRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateCluster = "CreateCluster" // CreateClusterRequest generates a "aws/request.Request" representing the @@ -296,6 +490,95 @@ func (c *Kafka) DeleteClusterWithContext(ctx aws.Context, input *DeleteClusterIn return out, req.Send() } +const opDeleteConfiguration = "DeleteConfiguration" + +// DeleteConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConfiguration for more information on using the DeleteConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConfigurationRequest method. +// req, resp := client.DeleteConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DeleteConfiguration +func (c *Kafka) DeleteConfigurationRequest(input *DeleteConfigurationInput) (req *request.Request, output *DeleteConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/v1/configurations/{arn}", + } + + if input == nil { + input = &DeleteConfigurationInput{} + } + + output = &DeleteConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteConfiguration API operation for Managed Streaming for Kafka. +// +// Deletes the specified MSK configuration. The configuration must be in the +// ACTIVE or DELETE_FAILED state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation DeleteConfiguration for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// Returns information about an error. +// +// * BadRequestException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/DeleteConfiguration +func (c *Kafka) DeleteConfiguration(input *DeleteConfigurationInput) (*DeleteConfigurationOutput, error) { + req, out := c.DeleteConfigurationRequest(input) + return out, req.Send() +} + +// DeleteConfigurationWithContext is the same as DeleteConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) DeleteConfigurationWithContext(ctx aws.Context, input *DeleteConfigurationInput, opts ...request.Option) (*DeleteConfigurationOutput, error) { + req, out := c.DeleteConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeCluster = "DescribeCluster" // DescribeClusterRequest generates a "aws/request.Request" representing the @@ -758,89 +1041,186 @@ func (c *Kafka) GetBootstrapBrokersWithContext(ctx aws.Context, input *GetBootst return out, req.Send() } -const opListClusterOperations = "ListClusterOperations" +const opGetCompatibleKafkaVersions = "GetCompatibleKafkaVersions" -// ListClusterOperationsRequest generates a "aws/request.Request" representing the -// client's request for the ListClusterOperations operation. The "output" return +// GetCompatibleKafkaVersionsRequest generates a "aws/request.Request" representing the +// client's request for the GetCompatibleKafkaVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListClusterOperations for more information on using the ListClusterOperations +// See GetCompatibleKafkaVersions for more information on using the GetCompatibleKafkaVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListClusterOperationsRequest method. -// req, resp := client.ListClusterOperationsRequest(params) +// // Example sending a request using the GetCompatibleKafkaVersionsRequest method. +// req, resp := client.GetCompatibleKafkaVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations -func (c *Kafka) ListClusterOperationsRequest(input *ListClusterOperationsInput) (req *request.Request, output *ListClusterOperationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetCompatibleKafkaVersions +func (c *Kafka) GetCompatibleKafkaVersionsRequest(input *GetCompatibleKafkaVersionsInput) (req *request.Request, output *GetCompatibleKafkaVersionsOutput) { op := &request.Operation{ - Name: opListClusterOperations, + Name: opGetCompatibleKafkaVersions, HTTPMethod: "GET", - HTTPPath: "/v1/clusters/{clusterArn}/operations", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/v1/compatible-kafka-versions", } if input == nil { - input = &ListClusterOperationsInput{} + input = &GetCompatibleKafkaVersionsInput{} } - output = &ListClusterOperationsOutput{} + output = &GetCompatibleKafkaVersionsOutput{} req = c.newRequest(op, input, output) return } -// ListClusterOperations API operation for Managed Streaming for Kafka. +// GetCompatibleKafkaVersions API operation for Managed Streaming for Kafka. // -// Returns a list of all the operations that have been performed on the specified -// MSK cluster. +// Gets the Apache Kafka versions to which you can update the MSK cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListClusterOperations for usage and error information. +// API operation GetCompatibleKafkaVersions for usage and error information. // // Returned Error Types: // * BadRequestException // Returns information about an error. // -// * InternalServerErrorException +// * UnauthorizedException // Returns information about an error. // -// * UnauthorizedException +// * InternalServerErrorException // Returns information about an error. // // * ForbiddenException // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations -func (c *Kafka) ListClusterOperations(input *ListClusterOperationsInput) (*ListClusterOperationsOutput, error) { - req, out := c.ListClusterOperationsRequest(input) +// * NotFoundException +// Returns information about an error. +// +// * ServiceUnavailableException +// Returns information about an error. +// +// * TooManyRequestsException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/GetCompatibleKafkaVersions +func (c *Kafka) GetCompatibleKafkaVersions(input *GetCompatibleKafkaVersionsInput) (*GetCompatibleKafkaVersionsOutput, error) { + req, out := c.GetCompatibleKafkaVersionsRequest(input) return out, req.Send() } -// ListClusterOperationsWithContext is the same as ListClusterOperations with the addition of +// GetCompatibleKafkaVersionsWithContext is the same as GetCompatibleKafkaVersions with the addition of // the ability to pass a context and additional request options. // -// See ListClusterOperations for details on how to use this API operation. +// See GetCompatibleKafkaVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) GetCompatibleKafkaVersionsWithContext(ctx aws.Context, input *GetCompatibleKafkaVersionsInput, opts ...request.Option) (*GetCompatibleKafkaVersionsOutput, error) { + req, out := c.GetCompatibleKafkaVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListClusterOperations = "ListClusterOperations" + +// ListClusterOperationsRequest generates a "aws/request.Request" representing the +// client's request for the ListClusterOperations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListClusterOperations for more information on using the ListClusterOperations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListClusterOperationsRequest method. +// req, resp := client.ListClusterOperationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations +func (c *Kafka) ListClusterOperationsRequest(input *ListClusterOperationsInput) (req *request.Request, output *ListClusterOperationsOutput) { + op := &request.Operation{ + Name: opListClusterOperations, + HTTPMethod: "GET", + HTTPPath: "/v1/clusters/{clusterArn}/operations", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListClusterOperationsInput{} + } + + output = &ListClusterOperationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListClusterOperations API operation for Managed Streaming for Kafka. +// +// Returns a list of all the operations that have been performed on the specified +// MSK cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation ListClusterOperations for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * UnauthorizedException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListClusterOperations +func (c *Kafka) ListClusterOperations(input *ListClusterOperationsInput) (*ListClusterOperationsOutput, error) { + req, out := c.ListClusterOperationsRequest(input) + return out, req.Send() +} + +// ListClusterOperationsWithContext is the same as ListClusterOperations with the addition of +// the ability to pass a context and additional request options. +// +// See ListClusterOperations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create @@ -1644,144 +2024,213 @@ func (c *Kafka) ListNodesPagesWithContext(ctx aws.Context, input *ListNodesInput return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListScramSecrets = "ListScramSecrets" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListScramSecretsRequest generates a "aws/request.Request" representing the +// client's request for the ListScramSecrets operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListScramSecrets for more information on using the ListScramSecrets // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListScramSecretsRequest method. +// req, resp := client.ListScramSecretsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource -func (c *Kafka) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListScramSecrets +func (c *Kafka) ListScramSecretsRequest(input *ListScramSecretsInput) (req *request.Request, output *ListScramSecretsOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opListScramSecrets, HTTPMethod: "GET", - HTTPPath: "/v1/tags/{resourceArn}", + HTTPPath: "/v1/clusters/{clusterArn}/scram-secrets", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListScramSecretsInput{} } - output = &ListTagsForResourceOutput{} + output = &ListScramSecretsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Managed Streaming for Kafka. +// ListScramSecrets API operation for Managed Streaming for Kafka. // -// Returns a list of the tags associated with the specified resource. +// Returns a list of the Scram Secrets associated with an Amazon MSK cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation ListTagsForResource for usage and error information. +// API operation ListScramSecrets for usage and error information. // // Returned Error Types: -// * NotFoundException +// * BadRequestException // Returns information about an error. // -// * BadRequestException +// * UnauthorizedException // Returns information about an error. // // * InternalServerErrorException // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource -func (c *Kafka) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// * ForbiddenException +// Returns information about an error. +// +// * NotFoundException +// Returns information about an error. +// +// * ServiceUnavailableException +// Returns information about an error. +// +// * TooManyRequestsException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListScramSecrets +func (c *Kafka) ListScramSecrets(input *ListScramSecretsInput) (*ListScramSecretsOutput, error) { + req, out := c.ListScramSecretsRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// ListScramSecretsWithContext is the same as ListScramSecrets with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See ListScramSecrets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Kafka) ListScramSecretsWithContext(ctx aws.Context, input *ListScramSecretsInput, opts ...request.Option) (*ListScramSecretsOutput, error) { + req, out := c.ListScramSecretsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +// ListScramSecretsPages iterates over the pages of a ListScramSecrets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListScramSecrets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListScramSecrets operation. +// pageNum := 0 +// err := client.ListScramSecretsPages(params, +// func(page *kafka.ListScramSecretsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Kafka) ListScramSecretsPages(input *ListScramSecretsInput, fn func(*ListScramSecretsOutput, bool) bool) error { + return c.ListScramSecretsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// ListScramSecretsPagesWithContext same as ListScramSecretsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) ListScramSecretsPagesWithContext(ctx aws.Context, input *ListScramSecretsInput, fn func(*ListScramSecretsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListScramSecretsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListScramSecretsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListScramSecretsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource -func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource +func (c *Kafka) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", + Name: opListTagsForResource, + HTTPMethod: "GET", HTTPPath: "/v1/tags/{resourceArn}", } if input == nil { - input = &TagResourceInput{} + input = &ListTagsForResourceInput{} } - output = &TagResourceOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for Managed Streaming for Kafka. +// ListTagsForResource API operation for Managed Streaming for Kafka. // -// Adds tags to the specified MSK resource. +// Returns a list of the tags associated with the specified resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation TagResource for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Types: // * NotFoundException @@ -1793,102 +2242,285 @@ func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Reques // * InternalServerErrorException // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource -func (c *Kafka) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/ListTagsForResource +func (c *Kafka) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Kafka) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Kafka) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opRebootBroker = "RebootBroker" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// RebootBrokerRequest generates a "aws/request.Request" representing the +// client's request for the RebootBroker operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See RebootBroker for more information on using the RebootBroker // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the RebootBrokerRequest method. +// req, resp := client.RebootBrokerRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource -func (c *Kafka) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/RebootBroker +func (c *Kafka) RebootBrokerRequest(input *RebootBrokerInput) (req *request.Request, output *RebootBrokerOutput) { op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/v1/tags/{resourceArn}", + Name: opRebootBroker, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/reboot-broker", } if input == nil { - input = &UntagResourceInput{} + input = &RebootBrokerInput{} } - output = &UntagResourceOutput{} + output = &RebootBrokerOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for Managed Streaming for Kafka. +// RebootBroker API operation for Managed Streaming for Kafka. // -// Removes the tags associated with the keys that are provided in the query. +// Executes a reboot on a broker. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Managed Streaming for Kafka's -// API operation UntagResource for usage and error information. +// API operation RebootBroker for usage and error information. // // Returned Error Types: -// * NotFoundException +// * BadRequestException // Returns information about an error. // -// * BadRequestException +// * UnauthorizedException // Returns information about an error. // // * InternalServerErrorException // Returns information about an error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource -func (c *Kafka) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) - return out, req.Send() -} - -// UntagResourceWithContext is the same as UntagResource with the addition of -// the ability to pass a context and additional request options. -// -// See UntagResource for details on how to use this API operation. +// * ForbiddenException +// Returns information about an error. +// +// * NotFoundException +// Returns information about an error. +// +// * ServiceUnavailableException +// Returns information about an error. +// +// * TooManyRequestsException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/RebootBroker +func (c *Kafka) RebootBroker(input *RebootBrokerInput) (*RebootBrokerOutput, error) { + req, out := c.RebootBrokerRequest(input) + return out, req.Send() +} + +// RebootBrokerWithContext is the same as RebootBroker with the addition of +// the ability to pass a context and additional request options. +// +// See RebootBroker for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) RebootBrokerWithContext(ctx aws.Context, input *RebootBrokerInput, opts ...request.Option) (*RebootBrokerOutput, error) { + req, out := c.RebootBrokerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource +func (c *Kafka) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Managed Streaming for Kafka. +// +// Adds tags to the specified MSK resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// Returns information about an error. +// +// * BadRequestException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/TagResource +func (c *Kafka) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource +func (c *Kafka) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/v1/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Managed Streaming for Kafka. +// +// Removes the tags associated with the keys that are provided in the query. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// Returns information about an error. +// +// * BadRequestException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UntagResource +func (c *Kafka) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create @@ -2180,6 +2812,195 @@ func (c *Kafka) UpdateClusterConfigurationWithContext(ctx aws.Context, input *Up return out, req.Send() } +const opUpdateClusterKafkaVersion = "UpdateClusterKafkaVersion" + +// UpdateClusterKafkaVersionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClusterKafkaVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateClusterKafkaVersion for more information on using the UpdateClusterKafkaVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateClusterKafkaVersionRequest method. +// req, resp := client.UpdateClusterKafkaVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterKafkaVersion +func (c *Kafka) UpdateClusterKafkaVersionRequest(input *UpdateClusterKafkaVersionInput) (req *request.Request, output *UpdateClusterKafkaVersionOutput) { + op := &request.Operation{ + Name: opUpdateClusterKafkaVersion, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/version", + } + + if input == nil { + input = &UpdateClusterKafkaVersionInput{} + } + + output = &UpdateClusterKafkaVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateClusterKafkaVersion API operation for Managed Streaming for Kafka. +// +// Updates the Apache Kafka version for the cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateClusterKafkaVersion for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Returns information about an error. +// +// * UnauthorizedException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// * NotFoundException +// Returns information about an error. +// +// * ServiceUnavailableException +// Returns information about an error. +// +// * TooManyRequestsException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateClusterKafkaVersion +func (c *Kafka) UpdateClusterKafkaVersion(input *UpdateClusterKafkaVersionInput) (*UpdateClusterKafkaVersionOutput, error) { + req, out := c.UpdateClusterKafkaVersionRequest(input) + return out, req.Send() +} + +// UpdateClusterKafkaVersionWithContext is the same as UpdateClusterKafkaVersion with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateClusterKafkaVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateClusterKafkaVersionWithContext(ctx aws.Context, input *UpdateClusterKafkaVersionInput, opts ...request.Option) (*UpdateClusterKafkaVersionOutput, error) { + req, out := c.UpdateClusterKafkaVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateConfiguration = "UpdateConfiguration" + +// UpdateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConfiguration for more information on using the UpdateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConfigurationRequest method. +// req, resp := client.UpdateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateConfiguration +func (c *Kafka) UpdateConfigurationRequest(input *UpdateConfigurationInput) (req *request.Request, output *UpdateConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/v1/configurations/{arn}", + } + + if input == nil { + input = &UpdateConfigurationInput{} + } + + output = &UpdateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateConfiguration API operation for Managed Streaming for Kafka. +// +// Updates an existing MSK configuration. The configuration must be in the Active +// state. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateConfiguration for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// Returns information about an error. +// +// * BadRequestException +// Returns information about an error. +// +// * UnauthorizedException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateConfiguration +func (c *Kafka) UpdateConfiguration(input *UpdateConfigurationInput) (*UpdateConfigurationOutput, error) { + req, out := c.UpdateConfigurationRequest(input) + return out, req.Send() +} + +// UpdateConfigurationWithContext is the same as UpdateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateConfigurationWithContext(ctx aws.Context, input *UpdateConfigurationInput, opts ...request.Option) (*UpdateConfigurationOutput, error) { + req, out := c.UpdateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateMonitoring = "UpdateMonitoring" // UpdateMonitoringRequest generates a "aws/request.Request" representing the @@ -2222,113 +3043,287 @@ func (c *Kafka) UpdateMonitoringRequest(input *UpdateMonitoringInput) (req *requ return } -// UpdateMonitoring API operation for Managed Streaming for Kafka. -// -// Updates the monitoring settings for the cluster. You can use this operation -// to specify which Apache Kafka metrics you want Amazon MSK to send to Amazon -// CloudWatch. You can also specify settings for open monitoring with Prometheus. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Managed Streaming for Kafka's -// API operation UpdateMonitoring for usage and error information. -// -// Returned Error Types: -// * ServiceUnavailableException -// Returns information about an error. -// -// * BadRequestException -// Returns information about an error. -// -// * UnauthorizedException -// Returns information about an error. -// -// * InternalServerErrorException -// Returns information about an error. -// -// * ForbiddenException -// Returns information about an error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateMonitoring -func (c *Kafka) UpdateMonitoring(input *UpdateMonitoringInput) (*UpdateMonitoringOutput, error) { - req, out := c.UpdateMonitoringRequest(input) - return out, req.Send() +// UpdateMonitoring API operation for Managed Streaming for Kafka. +// +// Updates the monitoring settings for the cluster. You can use this operation +// to specify which Apache Kafka metrics you want Amazon MSK to send to Amazon +// CloudWatch. You can also specify settings for open monitoring with Prometheus. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateMonitoring for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// Returns information about an error. +// +// * BadRequestException +// Returns information about an error. +// +// * UnauthorizedException +// Returns information about an error. +// +// * InternalServerErrorException +// Returns information about an error. +// +// * ForbiddenException +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateMonitoring +func (c *Kafka) UpdateMonitoring(input *UpdateMonitoringInput) (*UpdateMonitoringOutput, error) { + req, out := c.UpdateMonitoringRequest(input) + return out, req.Send() +} + +// UpdateMonitoringWithContext is the same as UpdateMonitoring with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMonitoring for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateMonitoringWithContext(ctx aws.Context, input *UpdateMonitoringInput, opts ...request.Option) (*UpdateMonitoringOutput, error) { + req, out := c.UpdateMonitoringRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Returns information about an error. +type BadRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + InvalidParameter *string `locationName:"invalidParameter" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BadRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BadRequestException) GoString() string { + return s.String() +} + +func newErrorBadRequestException(v protocol.ResponseMetadata) error { + return &BadRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BadRequestException) Code() string { + return "BadRequestException" +} + +// Message returns the exception's message. +func (s *BadRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequestException) OrigErr() error { + return nil +} + +func (s *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Request body for BatchAssociateScramSecret. +type BatchAssociateScramSecretInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // List of AWS Secrets Manager secret ARNs. + // + // SecretArnList is a required field + SecretArnList []*string `locationName:"secretArnList" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchAssociateScramSecretInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAssociateScramSecretInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchAssociateScramSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchAssociateScramSecretInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.SecretArnList == nil { + invalidParams.Add(request.NewErrParamRequired("SecretArnList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *BatchAssociateScramSecretInput) SetClusterArn(v string) *BatchAssociateScramSecretInput { + s.ClusterArn = &v + return s +} + +// SetSecretArnList sets the SecretArnList field's value. +func (s *BatchAssociateScramSecretInput) SetSecretArnList(v []*string) *BatchAssociateScramSecretInput { + s.SecretArnList = v + return s +} + +// Response body for BatchAssociateScramSecret. +type BatchAssociateScramSecretOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // List of errors when associating secrets to cluster. + UnprocessedScramSecrets []*UnprocessedScramSecret `locationName:"unprocessedScramSecrets" type:"list"` +} + +// String returns the string representation +func (s BatchAssociateScramSecretOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchAssociateScramSecretOutput) GoString() string { + return s.String() } -// UpdateMonitoringWithContext is the same as UpdateMonitoring with the addition of -// the ability to pass a context and additional request options. -// -// See UpdateMonitoring for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Kafka) UpdateMonitoringWithContext(ctx aws.Context, input *UpdateMonitoringInput, opts ...request.Option) (*UpdateMonitoringOutput, error) { - req, out := c.UpdateMonitoringRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() +// SetClusterArn sets the ClusterArn field's value. +func (s *BatchAssociateScramSecretOutput) SetClusterArn(v string) *BatchAssociateScramSecretOutput { + s.ClusterArn = &v + return s } -// Returns information about an error. -type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetUnprocessedScramSecrets sets the UnprocessedScramSecrets field's value. +func (s *BatchAssociateScramSecretOutput) SetUnprocessedScramSecrets(v []*UnprocessedScramSecret) *BatchAssociateScramSecretOutput { + s.UnprocessedScramSecrets = v + return s +} - InvalidParameter *string `locationName:"invalidParameter" type:"string"` +// Request body for BatchDisassociateScramSecret. +type BatchDisassociateScramSecretInput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"message" type:"string"` + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // List of AWS Secrets Manager secret ARNs. + // + // SecretArnList is a required field + SecretArnList []*string `locationName:"secretArnList" type:"list" required:"true"` } // String returns the string representation -func (s BadRequestException) String() string { +func (s BatchDisassociateScramSecretInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BadRequestException) GoString() string { +func (s BatchDisassociateScramSecretInput) GoString() string { return s.String() } -func newErrorBadRequestException(v protocol.ResponseMetadata) error { - return &BadRequestException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDisassociateScramSecretInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDisassociateScramSecretInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.SecretArnList == nil { + invalidParams.Add(request.NewErrParamRequired("SecretArnList")) + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil } -// Code returns the exception type name. -func (s BadRequestException) Code() string { - return "BadRequestException" +// SetClusterArn sets the ClusterArn field's value. +func (s *BatchDisassociateScramSecretInput) SetClusterArn(v string) *BatchDisassociateScramSecretInput { + s.ClusterArn = &v + return s } -// Message returns the exception's message. -func (s BadRequestException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetSecretArnList sets the SecretArnList field's value. +func (s *BatchDisassociateScramSecretInput) SetSecretArnList(v []*string) *BatchDisassociateScramSecretInput { + s.SecretArnList = v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { - return nil +// Response body for BatchDisassociateScramSecret. +type BatchDisassociateScramSecretOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // List of errors when disassociating secrets to cluster. + UnprocessedScramSecrets []*UnprocessedScramSecret `locationName:"unprocessedScramSecrets" type:"list"` } -func (s BadRequestException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// String returns the string representation +func (s BatchDisassociateScramSecretOutput) String() string { + return awsutil.Prettify(s) } -// Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +// GoString returns the string representation +func (s BatchDisassociateScramSecretOutput) GoString() string { + return s.String() } -// RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +// SetClusterArn sets the ClusterArn field's value. +func (s *BatchDisassociateScramSecretOutput) SetClusterArn(v string) *BatchDisassociateScramSecretOutput { + s.ClusterArn = &v + return s +} + +// SetUnprocessedScramSecrets sets the UnprocessedScramSecrets field's value. +func (s *BatchDisassociateScramSecretOutput) SetUnprocessedScramSecrets(v []*UnprocessedScramSecret) *BatchDisassociateScramSecretOutput { + s.UnprocessedScramSecrets = v + return s } // Specifies the EBS volume upgrade information. The broker identifier must @@ -2670,6 +3665,8 @@ func (s *BrokerSoftwareInfo) SetKafkaVersion(v string) *BrokerSoftwareInfo { type ClientAuthentication struct { _ struct{} `type:"structure"` + Sasl *Sasl `locationName:"sasl" type:"structure"` + // Details for ClientAuthentication using TLS. Tls *Tls `locationName:"tls" type:"structure"` } @@ -2684,6 +3681,12 @@ func (s ClientAuthentication) GoString() string { return s.String() } +// SetSasl sets the Sasl field's value. +func (s *ClientAuthentication) SetSasl(v *Sasl) *ClientAuthentication { + s.Sasl = v + return s +} + // SetTls sets the Tls field's value. func (s *ClientAuthentication) SetTls(v *Tls) *ClientAuthentication { s.Tls = v @@ -2797,6 +3800,9 @@ type ClusterInfo struct { // The connection string to use to connect to the Apache ZooKeeper cluster. ZookeeperConnectString *string `locationName:"zookeeperConnectString" type:"string"` + + // The connection string to use to connect to zookeeper cluster on Tls port. + ZookeeperConnectStringTls *string `locationName:"zookeeperConnectStringTls" type:"string"` } // String returns the string representation @@ -2905,6 +3911,12 @@ func (s *ClusterInfo) SetZookeeperConnectString(v string) *ClusterInfo { return s } +// SetZookeeperConnectStringTls sets the ZookeeperConnectStringTls field's value. +func (s *ClusterInfo) SetZookeeperConnectStringTls(v string) *ClusterInfo { + s.ZookeeperConnectStringTls = &v + return s +} + // Returns information about a cluster operation. type ClusterOperationInfo struct { _ struct{} `type:"structure"` @@ -2930,6 +3942,9 @@ type ClusterOperationInfo struct { // State of the cluster operation. OperationState *string `locationName:"operationState" type:"string"` + // Steps completed during the operation. + OperationSteps []*ClusterOperationStep `locationName:"operationSteps" type:"list"` + // Type of the cluster operation. OperationType *string `locationName:"operationType" type:"string"` @@ -2992,6 +4007,12 @@ func (s *ClusterOperationInfo) SetOperationState(v string) *ClusterOperationInfo return s } +// SetOperationSteps sets the OperationSteps field's value. +func (s *ClusterOperationInfo) SetOperationSteps(v []*ClusterOperationStep) *ClusterOperationInfo { + s.OperationSteps = v + return s +} + // SetOperationType sets the OperationType field's value. func (s *ClusterOperationInfo) SetOperationType(v string) *ClusterOperationInfo { s.OperationType = &v @@ -3010,6 +4031,94 @@ func (s *ClusterOperationInfo) SetTargetClusterInfo(v *MutableClusterInfo) *Clus return s } +// Step taken during a cluster operation. +type ClusterOperationStep struct { + _ struct{} `type:"structure"` + + // Information about the step and its status. + StepInfo *ClusterOperationStepInfo `locationName:"stepInfo" type:"structure"` + + // The name of the step. + StepName *string `locationName:"stepName" type:"string"` +} + +// String returns the string representation +func (s ClusterOperationStep) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterOperationStep) GoString() string { + return s.String() +} + +// SetStepInfo sets the StepInfo field's value. +func (s *ClusterOperationStep) SetStepInfo(v *ClusterOperationStepInfo) *ClusterOperationStep { + s.StepInfo = v + return s +} + +// SetStepName sets the StepName field's value. +func (s *ClusterOperationStep) SetStepName(v string) *ClusterOperationStep { + s.StepName = &v + return s +} + +// State information about the operation step. +type ClusterOperationStepInfo struct { + _ struct{} `type:"structure"` + + // The steps current status. + StepStatus *string `locationName:"stepStatus" type:"string"` +} + +// String returns the string representation +func (s ClusterOperationStepInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClusterOperationStepInfo) GoString() string { + return s.String() +} + +// SetStepStatus sets the StepStatus field's value. +func (s *ClusterOperationStepInfo) SetStepStatus(v string) *ClusterOperationStepInfo { + s.StepStatus = &v + return s +} + +// Contains source Kafka versions and compatible target Kafka versions. +type CompatibleKafkaVersion struct { + _ struct{} `type:"structure"` + + SourceVersion *string `locationName:"sourceVersion" type:"string"` + + TargetVersions []*string `locationName:"targetVersions" type:"list"` +} + +// String returns the string representation +func (s CompatibleKafkaVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompatibleKafkaVersion) GoString() string { + return s.String() +} + +// SetSourceVersion sets the SourceVersion field's value. +func (s *CompatibleKafkaVersion) SetSourceVersion(v string) *CompatibleKafkaVersion { + s.SourceVersion = &v + return s +} + +// SetTargetVersions sets the TargetVersions field's value. +func (s *CompatibleKafkaVersion) SetTargetVersions(v []*string) *CompatibleKafkaVersion { + s.TargetVersions = v + return s +} + // Represents an MSK Configuration. type Configuration struct { _ struct{} `type:"structure"` @@ -3044,6 +4153,11 @@ type Configuration struct { // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` + + // The state of a configuration. + // + // State is a required field + State *string `locationName:"state" type:"string" required:"true" enum:"ConfigurationState"` } // String returns the string representation @@ -3092,6 +4206,12 @@ func (s *Configuration) SetName(v string) *Configuration { return s } +// SetState sets the State field's value. +func (s *Configuration) SetState(v string) *Configuration { + s.State = &v + return s +} + // Specifies the configuration to use for the brokers. type ConfigurationInfo struct { _ struct{} `type:"structure"` @@ -3193,8 +4313,8 @@ func (s *ConfigurationRevision) SetRevision(v int64) *ConfigurationRevision { // Returns information about an error. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -3213,17 +4333,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3231,22 +4351,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Creates a cluster. @@ -3478,9 +4598,7 @@ type CreateConfigurationInput struct { Description *string `locationName:"description" type:"string"` // The versions of Apache Kafka with which you can use this MSK configuration. - // - // KafkaVersions is a required field - KafkaVersions []*string `locationName:"kafkaVersions" type:"list" required:"true"` + KafkaVersions []*string `locationName:"kafkaVersions" type:"list"` // The name of the configuration. Configuration names are strings that match // the regex "^[0-9A-Za-z-]+$". @@ -3507,9 +4625,6 @@ func (s CreateConfigurationInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateConfigurationInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateConfigurationInput"} - if s.KafkaVersions == nil { - invalidParams.Add(request.NewErrParamRequired("KafkaVersions")) - } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -3563,6 +4678,10 @@ type CreateConfigurationOutput struct { // The name of the configuration. Configuration names are strings that match // the regex "^[0-9A-Za-z-]+$". Name *string `locationName:"name" type:"string"` + + // The state of the configuration. The possible states are ACTIVE, DELETING + // and DELETE_FAILED. + State *string `locationName:"state" type:"string" enum:"ConfigurationState"` } // String returns the string representation @@ -3599,6 +4718,12 @@ func (s *CreateConfigurationOutput) SetName(v string) *CreateConfigurationOutput return s } +// SetState sets the State field's value. +func (s *CreateConfigurationOutput) SetState(v string) *CreateConfigurationOutput { + s.State = &v + return s +} + type DeleteClusterInput struct { _ struct{} `type:"structure"` @@ -3663,18 +4788,94 @@ func (s DeleteClusterOutput) String() string { } // GoString returns the string representation -func (s DeleteClusterOutput) GoString() string { +func (s DeleteClusterOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *DeleteClusterOutput) SetClusterArn(v string) *DeleteClusterOutput { + s.ClusterArn = &v + return s +} + +// SetState sets the State field's value. +func (s *DeleteClusterOutput) SetState(v string) *DeleteClusterOutput { + s.State = &v + return s +} + +// Request body for DeleteConfiguration. +type DeleteConfigurationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration. + // + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConfigurationInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DeleteConfigurationInput) SetArn(v string) *DeleteConfigurationInput { + s.Arn = &v + return s +} + +// Response body for DeleteConfiguration. +type DeleteConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration. + Arn *string `locationName:"arn" type:"string"` + + // The state of the configuration. The possible states are ACTIVE, DELETING + // and DELETE_FAILED. + State *string `locationName:"state" type:"string" enum:"ConfigurationState"` +} + +// String returns the string representation +func (s DeleteConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConfigurationOutput) GoString() string { return s.String() } -// SetClusterArn sets the ClusterArn field's value. -func (s *DeleteClusterOutput) SetClusterArn(v string) *DeleteClusterOutput { - s.ClusterArn = &v +// SetArn sets the Arn field's value. +func (s *DeleteConfigurationOutput) SetArn(v string) *DeleteConfigurationOutput { + s.Arn = &v return s } // SetState sets the State field's value. -func (s *DeleteClusterOutput) SetState(v string) *DeleteClusterOutput { +func (s *DeleteConfigurationOutput) SetState(v string) *DeleteConfigurationOutput { s.State = &v return s } @@ -3866,6 +5067,10 @@ type DescribeConfigurationOutput struct { // The name of the configuration. Configuration names are strings that match // the regex "^[0-9A-Za-z-]+$". Name *string `locationName:"name" type:"string"` + + // The state of the configuration. The possible states are ACTIVE, DELETING + // and DELETE_FAILED. + State *string `locationName:"state" type:"string" enum:"ConfigurationState"` } // String returns the string representation @@ -3914,6 +5119,12 @@ func (s *DescribeConfigurationOutput) SetName(v string) *DescribeConfigurationOu return s } +// SetState sets the State field's value. +func (s *DescribeConfigurationOutput) SetState(v string) *DescribeConfigurationOutput { + s.State = &v + return s +} + type DescribeConfigurationRevisionInput struct { _ struct{} `type:"structure"` @@ -4286,8 +5497,8 @@ func (s *Firehose) SetEnabled(v bool) *Firehose { // Returns information about an error. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -4306,17 +5517,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4324,22 +5535,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } type GetBootstrapBrokersInput struct { @@ -4388,6 +5599,11 @@ type GetBootstrapBrokersOutput struct { // A string containing one or more hostname:port pairs. BootstrapBrokerString *string `locationName:"bootstrapBrokerString" type:"string"` + // A string containing one or more DNS names (or IP) and SASL SCRAM port pairs. + // The following is an example. + // { "BootstrapBrokerStringSaslScram": "b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096"} + BootstrapBrokerStringSaslScram *string `locationName:"bootstrapBrokerStringSaslScram" type:"string"` + // A string containing one or more DNS names (or IP) and TLS port pairs. The // following is an example. // { "BootstrapBrokerStringTls": "b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094"} @@ -4410,16 +5626,68 @@ func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerString(v string) *GetBoots return s } +// SetBootstrapBrokerStringSaslScram sets the BootstrapBrokerStringSaslScram field's value. +func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerStringSaslScram(v string) *GetBootstrapBrokersOutput { + s.BootstrapBrokerStringSaslScram = &v + return s +} + // SetBootstrapBrokerStringTls sets the BootstrapBrokerStringTls field's value. func (s *GetBootstrapBrokersOutput) SetBootstrapBrokerStringTls(v string) *GetBootstrapBrokersOutput { s.BootstrapBrokerStringTls = &v return s } +type GetCompatibleKafkaVersionsInput struct { + _ struct{} `type:"structure"` + + ClusterArn *string `location:"querystring" locationName:"clusterArn" type:"string"` +} + +// String returns the string representation +func (s GetCompatibleKafkaVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCompatibleKafkaVersionsInput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *GetCompatibleKafkaVersionsInput) SetClusterArn(v string) *GetCompatibleKafkaVersionsInput { + s.ClusterArn = &v + return s +} + +// Response body for GetCompatibleKafkaVersions. +type GetCompatibleKafkaVersionsOutput struct { + _ struct{} `type:"structure"` + + // A list of CompatibleKafkaVersion objects. + CompatibleKafkaVersions []*CompatibleKafkaVersion `locationName:"compatibleKafkaVersions" type:"list"` +} + +// String returns the string representation +func (s GetCompatibleKafkaVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCompatibleKafkaVersionsOutput) GoString() string { + return s.String() +} + +// SetCompatibleKafkaVersions sets the CompatibleKafkaVersions field's value. +func (s *GetCompatibleKafkaVersionsOutput) SetCompatibleKafkaVersions(v []*CompatibleKafkaVersion) *GetCompatibleKafkaVersionsOutput { + s.CompatibleKafkaVersions = v + return s +} + // Returns information about an error. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -4438,17 +5706,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4456,22 +5724,22 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Indicates whether you want to enable or disable the JMX Exporter. @@ -5092,6 +6360,97 @@ func (s *ListNodesOutput) SetNodeInfoList(v []*NodeInfo) *ListNodesOutput { return s } +type ListScramSecretsInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListScramSecretsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListScramSecretsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListScramSecretsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListScramSecretsInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *ListScramSecretsInput) SetClusterArn(v string) *ListScramSecretsInput { + s.ClusterArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListScramSecretsInput) SetMaxResults(v int64) *ListScramSecretsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListScramSecretsInput) SetNextToken(v string) *ListScramSecretsInput { + s.NextToken = &v + return s +} + +// Information about scram secrets associated to the cluster. +type ListScramSecretsOutput struct { + _ struct{} `type:"structure"` + + // Paginated results marker. + NextToken *string `locationName:"nextToken" type:"string"` + + // The list of scram secrets associated with the cluster. + SecretArnList []*string `locationName:"secretArnList" type:"list"` +} + +// String returns the string representation +func (s ListScramSecretsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListScramSecretsOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListScramSecretsOutput) SetNextToken(v string) *ListScramSecretsOutput { + s.NextToken = &v + return s +} + +// SetSecretArnList sets the SecretArnList field's value. +func (s *ListScramSecretsOutput) SetSecretArnList(v []*string) *ListScramSecretsOutput { + s.SecretArnList = v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure"` @@ -5216,6 +6575,8 @@ type MutableClusterInfo struct { // CloudWatch for this cluster. EnhancedMonitoring *string `locationName:"enhancedMonitoring" type:"string" enum:"EnhancedMonitoring"` + KafkaVersion *string `locationName:"kafkaVersion" type:"string"` + // LoggingInfo details. LoggingInfo *LoggingInfo `locationName:"loggingInfo" type:"structure"` @@ -5254,6 +6615,12 @@ func (s *MutableClusterInfo) SetEnhancedMonitoring(v string) *MutableClusterInfo return s } +// SetKafkaVersion sets the KafkaVersion field's value. +func (s *MutableClusterInfo) SetKafkaVersion(v string) *MutableClusterInfo { + s.KafkaVersion = &v + return s +} + // SetLoggingInfo sets the LoggingInfo field's value. func (s *MutableClusterInfo) SetLoggingInfo(v *LoggingInfo) *MutableClusterInfo { s.LoggingInfo = v @@ -5408,8 +6775,8 @@ func (s *NodeInfo) SetZookeeperNodeInfo(v *ZookeeperNodeInfo) *NodeInfo { // Returns information about an error. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -5428,17 +6795,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5446,22 +6813,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // JMX and Node monitoring for the MSK cluster. @@ -5620,6 +6987,93 @@ func (s *PrometheusInfo) SetNodeExporter(v *NodeExporterInfo) *PrometheusInfo { return s } +// Request body for RebootBrokerNode action. +type RebootBrokerInput struct { + _ struct{} `type:"structure"` + + // The list of broker ids to be rebooted. + // + // BrokerIds is a required field + BrokerIds []*string `locationName:"brokerIds" type:"list" required:"true"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s RebootBrokerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootBrokerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RebootBrokerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RebootBrokerInput"} + if s.BrokerIds == nil { + invalidParams.Add(request.NewErrParamRequired("BrokerIds")) + } + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBrokerIds sets the BrokerIds field's value. +func (s *RebootBrokerInput) SetBrokerIds(v []*string) *RebootBrokerInput { + s.BrokerIds = v + return s +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *RebootBrokerInput) SetClusterArn(v string) *RebootBrokerInput { + s.ClusterArn = &v + return s +} + +// Response body for RebootBrokers action. +type RebootBrokerOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s RebootBrokerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RebootBrokerOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *RebootBrokerOutput) SetClusterArn(v string) *RebootBrokerOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *RebootBrokerOutput) SetClusterOperationArn(v string) *RebootBrokerOutput { + s.ClusterOperationArn = &v + return s +} + // The details of the Amazon S3 destination for broker logs. type S3 struct { _ struct{} `type:"structure"` @@ -5632,55 +7086,99 @@ type S3 struct { // Enabled is a required field Enabled *bool `locationName:"enabled" type:"boolean" required:"true"` - // The S3 prefix that is the destination for broker logs. - Prefix *string `locationName:"prefix" type:"string"` + // The S3 prefix that is the destination for broker logs. + Prefix *string `locationName:"prefix" type:"string"` +} + +// String returns the string representation +func (s S3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *S3) SetBucket(v string) *S3 { + s.Bucket = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *S3) SetEnabled(v bool) *S3 { + s.Enabled = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *S3) SetPrefix(v string) *S3 { + s.Prefix = &v + return s +} + +type Sasl struct { + _ struct{} `type:"structure"` + + Scram *Scram `locationName:"scram" type:"structure"` +} + +// String returns the string representation +func (s Sasl) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Sasl) GoString() string { + return s.String() +} + +// SetScram sets the Scram field's value. +func (s *Sasl) SetScram(v *Scram) *Sasl { + s.Scram = v + return s +} + +type Scram struct { + _ struct{} `type:"structure"` + + Enabled *bool `locationName:"enabled" type:"boolean"` } // String returns the string representation -func (s S3) String() string { +func (s Scram) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s S3) GoString() string { +func (s Scram) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3"} - if s.Enabled == nil { - invalidParams.Add(request.NewErrParamRequired("Enabled")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetBucket sets the Bucket field's value. -func (s *S3) SetBucket(v string) *S3 { - s.Bucket = &v - return s -} - // SetEnabled sets the Enabled field's value. -func (s *S3) SetEnabled(v bool) *S3 { +func (s *Scram) SetEnabled(v bool) *Scram { s.Enabled = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *S3) SetPrefix(v string) *S3 { - s.Prefix = &v - return s -} - // Returns information about an error. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -5699,17 +7197,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5717,22 +7215,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about storage volumes attached to MSK broker nodes. @@ -5868,8 +7366,8 @@ func (s *Tls) SetCertificateAuthorityArnList(v []*string) *Tls { // Returns information about an error. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -5888,17 +7386,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5906,28 +7404,28 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about an error. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` InvalidParameter *string `locationName:"invalidParameter" type:"string"` @@ -5946,17 +7444,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5964,22 +7462,60 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UnprocessedScramSecret struct { + _ struct{} `type:"structure"` + + ErrorCode *string `locationName:"errorCode" type:"string"` + + ErrorMessage *string `locationName:"errorMessage" type:"string"` + + SecretArn *string `locationName:"secretArn" type:"string"` +} + +// String returns the string representation +func (s UnprocessedScramSecret) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UnprocessedScramSecret) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *UnprocessedScramSecret) SetErrorCode(v string) *UnprocessedScramSecret { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *UnprocessedScramSecret) SetErrorMessage(v string) *UnprocessedScramSecret { + s.ErrorMessage = &v + return s +} + +// SetSecretArn sets the SecretArn field's value. +func (s *UnprocessedScramSecret) SetSecretArn(v string) *UnprocessedScramSecret { + s.SecretArn = &v + return s } type UntagResourceInput struct { @@ -6376,6 +7912,219 @@ func (s *UpdateClusterConfigurationOutput) SetClusterOperationArn(v string) *Upd return s } +// Request body for UpdateClusterKafkaVersion. +type UpdateClusterKafkaVersionInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // Specifies the configuration to use for the brokers. + ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` + + // Current cluster version. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` + + // Target Kafka version. + // + // TargetKafkaVersion is a required field + TargetKafkaVersion *string `locationName:"targetKafkaVersion" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateClusterKafkaVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterKafkaVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateClusterKafkaVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateClusterKafkaVersionInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.TargetKafkaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("TargetKafkaVersion")) + } + if s.ConfigurationInfo != nil { + if err := s.ConfigurationInfo.Validate(); err != nil { + invalidParams.AddNested("ConfigurationInfo", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateClusterKafkaVersionInput) SetClusterArn(v string) *UpdateClusterKafkaVersionInput { + s.ClusterArn = &v + return s +} + +// SetConfigurationInfo sets the ConfigurationInfo field's value. +func (s *UpdateClusterKafkaVersionInput) SetConfigurationInfo(v *ConfigurationInfo) *UpdateClusterKafkaVersionInput { + s.ConfigurationInfo = v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateClusterKafkaVersionInput) SetCurrentVersion(v string) *UpdateClusterKafkaVersionInput { + s.CurrentVersion = &v + return s +} + +// SetTargetKafkaVersion sets the TargetKafkaVersion field's value. +func (s *UpdateClusterKafkaVersionInput) SetTargetKafkaVersion(v string) *UpdateClusterKafkaVersionInput { + s.TargetKafkaVersion = &v + return s +} + +// Response body for UpdateClusterKafkaVersion. +type UpdateClusterKafkaVersionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateClusterKafkaVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateClusterKafkaVersionOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateClusterKafkaVersionOutput) SetClusterArn(v string) *UpdateClusterKafkaVersionOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateClusterKafkaVersionOutput) SetClusterOperationArn(v string) *UpdateClusterKafkaVersionOutput { + s.ClusterOperationArn = &v + return s +} + +// Request body for UpdateConfiguration. +type UpdateConfigurationInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration. + // + // Arn is a required field + Arn *string `location:"uri" locationName:"arn" type:"string" required:"true"` + + // The description of the configuration. + Description *string `locationName:"description" type:"string"` + + // ServerProperties is automatically base64 encoded/decoded by the SDK. + // + // ServerProperties is a required field + ServerProperties []byte `locationName:"serverProperties" type:"blob" required:"true"` +} + +// String returns the string representation +func (s UpdateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConfigurationInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + if s.ServerProperties == nil { + invalidParams.Add(request.NewErrParamRequired("ServerProperties")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *UpdateConfigurationInput) SetArn(v string) *UpdateConfigurationInput { + s.Arn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *UpdateConfigurationInput) SetDescription(v string) *UpdateConfigurationInput { + s.Description = &v + return s +} + +// SetServerProperties sets the ServerProperties field's value. +func (s *UpdateConfigurationInput) SetServerProperties(v []byte) *UpdateConfigurationInput { + s.ServerProperties = v + return s +} + +// Response body for UpdateConfiguration. +type UpdateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the configuration. + Arn *string `locationName:"arn" type:"string"` + + // Latest revision of the configuration. + LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` +} + +// String returns the string representation +func (s UpdateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConfigurationOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UpdateConfigurationOutput) SetArn(v string) *UpdateConfigurationOutput { + s.Arn = &v + return s +} + +// SetLatestRevision sets the LatestRevision field's value. +func (s *UpdateConfigurationOutput) SetLatestRevision(v *ConfigurationRevision) *UpdateConfigurationOutput { + s.LatestRevision = v + return s +} + // Request body for UpdateMonitoring. type UpdateMonitoringInput struct { _ struct{} `type:"structure"` @@ -6571,6 +8320,13 @@ const ( BrokerAZDistributionDefault = "DEFAULT" ) +// BrokerAZDistribution_Values returns all elements of the BrokerAZDistribution enum +func BrokerAZDistribution_Values() []string { + return []string{ + BrokerAZDistributionDefault, + } +} + // Client-broker encryption in transit setting. const ( // ClientBrokerTls is a ClientBroker enum value @@ -6583,6 +8339,15 @@ const ( ClientBrokerPlaintext = "PLAINTEXT" ) +// ClientBroker_Values returns all elements of the ClientBroker enum +func ClientBroker_Values() []string { + return []string{ + ClientBrokerTls, + ClientBrokerTlsPlaintext, + ClientBrokerPlaintext, + } +} + // The state of a Kafka cluster. const ( // ClusterStateActive is a ClusterState enum value @@ -6601,6 +8366,38 @@ const ( ClusterStateFailed = "FAILED" ) +// ClusterState_Values returns all elements of the ClusterState enum +func ClusterState_Values() []string { + return []string{ + ClusterStateActive, + ClusterStateCreating, + ClusterStateUpdating, + ClusterStateDeleting, + ClusterStateFailed, + } +} + +// The state of a configuration. +const ( + // ConfigurationStateActive is a ConfigurationState enum value + ConfigurationStateActive = "ACTIVE" + + // ConfigurationStateDeleting is a ConfigurationState enum value + ConfigurationStateDeleting = "DELETING" + + // ConfigurationStateDeleteFailed is a ConfigurationState enum value + ConfigurationStateDeleteFailed = "DELETE_FAILED" +) + +// ConfigurationState_Values returns all elements of the ConfigurationState enum +func ConfigurationState_Values() []string { + return []string{ + ConfigurationStateActive, + ConfigurationStateDeleting, + ConfigurationStateDeleteFailed, + } +} + // Specifies which metrics are gathered for the MSK cluster. This property has // three possible values: DEFAULT, PER_BROKER, and PER_TOPIC_PER_BROKER. For // a list of the metrics associated with each of these three levels of monitoring, @@ -6616,6 +8413,15 @@ const ( EnhancedMonitoringPerTopicPerBroker = "PER_TOPIC_PER_BROKER" ) +// EnhancedMonitoring_Values returns all elements of the EnhancedMonitoring enum +func EnhancedMonitoring_Values() []string { + return []string{ + EnhancedMonitoringDefault, + EnhancedMonitoringPerBroker, + EnhancedMonitoringPerTopicPerBroker, + } +} + // The status of a Kafka version. const ( // KafkaVersionStatusActive is a KafkaVersionStatus enum value @@ -6625,8 +8431,23 @@ const ( KafkaVersionStatusDeprecated = "DEPRECATED" ) +// KafkaVersionStatus_Values returns all elements of the KafkaVersionStatus enum +func KafkaVersionStatus_Values() []string { + return []string{ + KafkaVersionStatusActive, + KafkaVersionStatusDeprecated, + } +} + // The broker or Zookeeper node. const ( // NodeTypeBroker is a NodeType enum value NodeTypeBroker = "BROKER" ) + +// NodeType_Values returns all elements of the NodeType enum +func NodeType_Values() []string { + return []string{ + NodeTypeBroker, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go index af35910b5..959927691 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go index 96c089e58..4a24daca9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -174,8 +174,8 @@ func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Re // // You specify and control the number of shards that a stream is composed of. // Each shard can support reads up to five transactions per second, up to a -// maximum data read total of 2 MB per second. Each shard can support writes -// up to 1,000 records per second, up to a maximum data write total of 1 MB +// maximum data read total of 2 MiB per second. Each shard can support writes +// up to 1,000 records per second, up to a maximum data write total of 1 MiB // per second. If the amount of data input increases or decreases, you can add // or remove shards. // @@ -198,9 +198,9 @@ func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Re // * Create more shards than are authorized for your account. // // For the default shard limit for an AWS account, see Amazon Kinesis Data Streams -// Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, -// contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// contact AWS Support (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). // // You can use DescribeStream to check the stream status, which is returned // in StreamStatus. @@ -510,7 +510,7 @@ func (c *Kinesis) DeregisterStreamConsumerRequest(input *DeregisterStreamConsume // with a given data stream. The description of a consumer contains its name // and ARN. // -// This operation has a limit of five transactions per second per account. +// This operation has a limit of five transactions per second per stream. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -700,7 +700,7 @@ func (c *Kinesis) DescribeStreamRequest(input *DescribeStreamInput) (req *reques // the record is put into the stream. // // You can limit the number of shards returned by each call. For more information, -// see Retrieving Shards from a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html) +// see Retrieving Shards from a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-retrieve-shards.html) // in the Amazon Kinesis Data Streams Developer Guide. // // There are no guarantees about the chronological order shards returned. To @@ -851,7 +851,7 @@ func (c *Kinesis) DescribeStreamConsumerRequest(input *DescribeStreamConsumerInp // the ListStreamConsumers operation to get a list of the descriptions of all // the consumers that are currently registered with a given data stream. // -// This operation has a limit of 20 transactions per second per account. +// This operation has a limit of 20 transactions per second per stream. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -946,6 +946,8 @@ func (c *Kinesis) DescribeStreamSummaryRequest(input *DescribeStreamSummaryInput // status, record retention period, approximate creation time, monitoring, encryption // details, and open shard count. // +// DescribeStreamSummary has a limit of 20 transactions per second per account. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1223,7 +1225,7 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // // You can scale by provisioning multiple shards per stream while considering // service limits (for more information, see Amazon Kinesis Data Streams Limits -// (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide). Your application should // have one thread per shard, each reading continuously from its stream. To // read from a stream continually, call GetRecords in a loop. Use GetShardIterator @@ -1254,7 +1256,7 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // // To detect whether the application is falling behind in processing, you can // use the MillisBehindLatest response attribute. You can also monitor the stream -// using CloudWatch metrics and other mechanisms (see Monitoring (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) +// using CloudWatch metrics and other mechanisms (see Monitoring (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html) // in the Amazon Kinesis Data Streams Developer Guide). // // Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, @@ -1267,7 +1269,7 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // always increasing. For example, records in a shard or across a stream might // have time stamps that are out of order. // -// This operation has a limit of five transactions per second per account. +// This operation has a limit of five transactions per second per shard. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1288,9 +1290,9 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // * ProvisionedThroughputExceededException // The request rate for the stream is too high, or the requested data is too // large for the available throughput. Reduce the frequency or size of your -// requests. For more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// requests. For more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and -// Exponential Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) +// Exponential Backoff in AWS (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. // // * ExpiredIteratorException @@ -1303,7 +1305,7 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // * KMSInvalidStateException // The request was rejected because the state of the specified resource isn't // valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // // * KMSAccessDeniedException @@ -1319,7 +1321,7 @@ func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Reques // // * KMSThrottlingException // The request was denied due to request throttling. For more information about -// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) // in the AWS Key Management Service Developer Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetRecords @@ -1418,7 +1420,7 @@ func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *re // // If a GetShardIterator request is made too often, you receive a ProvisionedThroughputExceededException. // For more information about throughput limits, see GetRecords, and Streams -// Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide. // // If the shard is closed, GetShardIterator returns a valid iterator for the @@ -1447,9 +1449,9 @@ func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *re // * ProvisionedThroughputExceededException // The request rate for the stream is too high, or the requested data is too // large for the available throughput. Reduce the frequency or size of your -// requests. For more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// requests. For more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and -// Exponential Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) +// Exponential Backoff in AWS (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/GetShardIterator @@ -1732,7 +1734,7 @@ func (c *Kinesis) ListStreamConsumersRequest(input *ListStreamConsumersInput) (r // Lists the consumers registered to receive data from a stream using enhanced // fan-out, and provides information about each consumer. // -// This operation has a limit of 10 transactions per second per account. +// This operation has a limit of 5 transactions per second per stream. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2135,7 +2137,7 @@ func (c *Kinesis) MergeShardsRequest(input *MergeShardsInput) (req *request.Requ // MergeShards is called when there is a need to reduce the overall capacity // of a stream because of excess capacity that is not being used. You must specify // the shard to be merged and the adjacent shard for a stream. For more information -// about merging shards, see Merge Two Shards (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html) +// about merging shards, see Merge Two Shards (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html) // in the Amazon Kinesis Data Streams Developer Guide. // // If the stream is in the ACTIVE state, you can call MergeShards. If a stream @@ -2252,7 +2254,7 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // Writes a single data record into an Amazon Kinesis data stream. Call PutRecord // to send data into the stream for real-time ingestion and subsequent processing, // one record at a time. Each shard can support writes up to 1,000 records per -// second, up to a maximum data write total of 1 MB per second. +// second, up to a maximum data write total of 1 MiB per second. // // You must specify the name of the stream that captures, stores, and transports // the data; a partition key; and the data blob itself. @@ -2270,7 +2272,7 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // integer values and to map associated data records to shards using the hash // key ranges of the shards. You can override hashing the partition key to determine // the shard by explicitly specifying a hash value using the ExplicitHashKey -// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// parameter. For more information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) // in the Amazon Kinesis Data Streams Developer Guide. // // PutRecord returns the shard ID of where the data record was placed and the @@ -2279,9 +2281,12 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // Sequence numbers increase over time and are specific to a shard within a // stream, not across all shards within a stream. To guarantee strictly increasing // ordering, write serially to a shard and use the SequenceNumberForOrdering -// parameter. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// parameter. For more information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) // in the Amazon Kinesis Data Streams Developer Guide. // +// After you write a record to a stream, you cannot modify that record or its +// order within the stream. +// // If a PutRecord request cannot be processed because of insufficient provisioned // throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException. // @@ -2308,9 +2313,9 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // * ProvisionedThroughputExceededException // The request rate for the stream is too high, or the requested data is too // large for the available throughput. Reduce the frequency or size of your -// requests. For more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// requests. For more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and -// Exponential Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) +// Exponential Backoff in AWS (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. // // * KMSDisabledException @@ -2320,7 +2325,7 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // * KMSInvalidStateException // The request was rejected because the state of the specified resource isn't // valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // // * KMSAccessDeniedException @@ -2336,7 +2341,7 @@ func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, // // * KMSThrottlingException // The request was denied due to request throttling. For more information about -// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) // in the AWS Key Management Service Developer Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecord @@ -2410,9 +2415,9 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // into the stream for data ingestion and processing. // // Each PutRecords request can support up to 500 records. Each record in the -// request can be as large as 1 MB, up to a limit of 5 MB for the entire request, +// request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, // including partition keys. Each shard can support writes up to 1,000 records -// per second, up to a maximum data write total of 1 MB per second. +// per second, up to a maximum data write total of 1 MiB per second. // // You must specify the name of the stream that captures, stores, and transports // the data; and an array of request Records, with each record in the array @@ -2427,13 +2432,13 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // hash function is used to map partition keys to 128-bit integer values and // to map associated data records to shards. As a result of this hashing mechanism, // all data records with the same partition key map to the same shard within -// the stream. For more information, see Adding Data to a Stream (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) +// the stream. For more information, see Adding Data to a Stream (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream) // in the Amazon Kinesis Data Streams Developer Guide. // // Each record in the Records array may include an optional parameter, ExplicitHashKey, // which overrides the partition key to shard mapping. This parameter allows // a data producer to determine explicitly the shard where the record is stored. -// For more information, see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) +// For more information, see Adding Multiple Records with PutRecords (https://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords) // in the Amazon Kinesis Data Streams Developer Guide. // // The PutRecords response includes an array of response Records. Each record @@ -2445,7 +2450,10 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // The response Records array includes both successfully and unsuccessfully // processed records. Kinesis Data Streams attempts to process all records in // each PutRecords request. A single record failure does not stop the processing -// of subsequent records. +// of subsequent records. As a result, PutRecords doesn't guarantee the ordering +// of records. If you need to read records in the same order they are written +// to the stream, use PutRecord instead of PutRecords, and write to the same +// shard. // // A successfully processed record includes ShardId and SequenceNumber values. // The ShardId parameter identifies the shard in the stream where the record @@ -2458,9 +2466,12 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // more detailed information about the ProvisionedThroughputExceededException // exception including the account ID, stream name, and shard ID of the record // that was throttled. For more information about partially successful responses, -// see Adding Multiple Records with PutRecords (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) +// see Adding Multiple Records with PutRecords (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords) // in the Amazon Kinesis Data Streams Developer Guide. // +// After you write a record to a stream, you cannot modify that record or its +// order within the stream. +// // By default, data records are accessible for 24 hours from the time that they // are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod // to modify this retention period. @@ -2484,9 +2495,9 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // * ProvisionedThroughputExceededException // The request rate for the stream is too high, or the requested data is too // large for the available throughput. Reduce the frequency or size of your -// requests. For more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// requests. For more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and -// Exponential Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) +// Exponential Backoff in AWS (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. // // * KMSDisabledException @@ -2496,7 +2507,7 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // * KMSInvalidStateException // The request was rejected because the state of the specified resource isn't // valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // // * KMSAccessDeniedException @@ -2512,7 +2523,7 @@ func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Reques // // * KMSThrottlingException // The request was denied due to request throttling. For more information about -// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) // in the AWS Key Management Service Developer Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/PutRecords @@ -2582,14 +2593,22 @@ func (c *Kinesis) RegisterStreamConsumerRequest(input *RegisterStreamConsumerInp // RegisterStreamConsumer API operation for Amazon Kinesis. // // Registers a consumer with a Kinesis data stream. When you use this operation, -// the consumer you register can read data from the stream at a rate of up to -// 2 MiB per second. This rate is unaffected by the total number of consumers -// that read from the same stream. +// the consumer you register can then call SubscribeToShard to receive data +// from the stream using enhanced fan-out, at a rate of up to 2 MiB per second +// for every shard you subscribe to. This rate is unaffected by the total number +// of consumers that read from the same stream. +// +// You can register up to 20 consumers per stream. A given consumer can only +// be registered with one stream at a time. // -// You can register up to 5 consumers per stream. A given consumer can only -// be registered with one stream. +// For an example of how to use this operations, see Enhanced Fan-Out Using +// the Kinesis Data Streams API (/streams/latest/dev/building-enhanced-consumers-api.html). // -// This operation has a limit of five transactions per second per account. +// The use of this operation has a limit of five transactions per second per +// account. Also, only 5 consumers can be created simultaneously. In other words, +// you cannot have more than 5 consumers in a CREATING status at the same time. +// Registering a 6th consumer while there are 5 in a CREATING status results +// in a LimitExceededException. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2795,7 +2814,7 @@ func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Reques // position in the shard where the shard gets split in two. In many cases, the // new hash key might be the average of the beginning and ending hash key, but // it can be any hash key value in the range being mapped into the shard. For -// more information, see Split a Shard (http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html) +// more information, see Split a Shard (https://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html) // in the Amazon Kinesis Data Streams Developer Guide. // // You can use DescribeStream to determine the shard ID and hash key values @@ -2818,9 +2837,9 @@ func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Reques // receive a LimitExceededException. // // For the default shard limit for an AWS account, see Kinesis Data Streams -// Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide. To increase this limit, -// contact AWS Support (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). +// contact AWS Support (https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html). // // If you try to operate on too many streams simultaneously using CreateStream, // DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException. @@ -2969,7 +2988,7 @@ func (c *Kinesis) StartStreamEncryptionRequest(input *StartStreamEncryptionInput // * KMSInvalidStateException // The request was rejected because the state of the specified resource isn't // valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // // * KMSAccessDeniedException @@ -2985,7 +3004,7 @@ func (c *Kinesis) StartStreamEncryptionRequest(input *StartStreamEncryptionInput // // * KMSThrottlingException // The request was denied due to request throttling. For more information about -// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) // in the AWS Key Management Service Developer Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesis-2013-12-02/StartStreamEncryption @@ -3160,7 +3179,7 @@ func (c *Kinesis) SubscribeToShardRequest(input *SubscribeToShardInput) (req *re output = &SubscribeToShardOutput{} req = c.newRequest(op, input, output) - es := newSubscribeToShardEventStream() + es := NewSubscribeToShardEventStream() req.Handlers.Unmarshal.PushBack(es.setStreamCloser) output.EventStream = es @@ -3175,17 +3194,31 @@ func (c *Kinesis) SubscribeToShardRequest(input *SubscribeToShardInput) (req *re // SubscribeToShard API operation for Amazon Kinesis. // -// Call this operation from your consumer after you call RegisterStreamConsumer -// to register the consumer with Kinesis Data Streams. If the call succeeds, -// your consumer starts receiving events of type SubscribeToShardEvent for up -// to 5 minutes, after which time you need to call SubscribeToShard again to -// renew the subscription if you want to continue to receive records. +// This operation establishes an HTTP/2 connection between the consumer you +// specify in the ConsumerARN parameter and the shard you specify in the ShardId +// parameter. After the connection is successfully established, Kinesis Data +// Streams pushes records from the shard to the consumer over this connection. +// Before you call this operation, call RegisterStreamConsumer to register the +// consumer with Kinesis Data Streams. +// +// When the SubscribeToShard call succeeds, your consumer starts receiving events +// of type SubscribeToShardEvent over the HTTP/2 connection for up to 5 minutes, +// after which time you need to call SubscribeToShard again to renew the subscription +// if you want to continue to receive records. // -// You can make one call to SubscribeToShard per second per ConsumerARN. If -// your call succeeds, and then you call the operation again less than 5 seconds -// later, the second call generates a ResourceInUseException. If you call the -// operation a second time more than 5 seconds after the first call succeeds, -// the second call succeeds and the first connection gets shut down. +// You can make one call to SubscribeToShard per second per registered consumer +// per shard. For example, if you have a 4000 shard stream and two registered +// stream consumers, you can make one SubscribeToShard request per second for +// each combination of shard and registered consumer, allowing you to subscribe +// both consumers to all 4000 shards in one second. +// +// If you call SubscribeToShard again with the same ConsumerARN and ShardId +// within 5 seconds of a successful call, you'll get a ResourceInUseException. +// If you call SubscribeToShard 5 seconds or more after a successful call, the +// first connection will expire and the second call will take over the subscription. +// +// For an example of how to use this operations, see Enhanced Fan-Out Using +// the Kinesis Data Streams API (/streams/latest/dev/building-enhanced-consumers-api.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3233,7 +3266,13 @@ func (c *Kinesis) SubscribeToShardWithContext(ctx aws.Context, input *SubscribeT return out, req.Send() } +var _ awserr.Error + // SubscribeToShardEventStream provides the event stream handling for the SubscribeToShard. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSubscribeToShardEventStream constructor function. Using the functional options +// to pass in nested mock behavior. type SubscribeToShardEventStream struct { // Reader is the EventStream reader for the SubscribeToShardEventStream @@ -3257,11 +3296,31 @@ type SubscribeToShardEventStream struct { err *eventstreamapi.OnceError } -func newSubscribeToShardEventStream() *SubscribeToShardEventStream { - return &SubscribeToShardEventStream{ +// NewSubscribeToShardEventStream initializes an SubscribeToShardEventStream. +// This function should only be used for testing and mocking the SubscribeToShardEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSubscribeToShardEventStream(func(o *SubscribeToShardEventStream{ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSubscribeToShardEventStream(opts ...func(*SubscribeToShardEventStream)) *SubscribeToShardEventStream { + es := &SubscribeToShardEventStream{ done: make(chan struct{}), err: eventstreamapi.NewOnceError(), } + + for _, fn := range opts { + fn(es) + } + + return es } func (es *SubscribeToShardEventStream) setStreamCloser(r *request.Request) { @@ -3316,6 +3375,7 @@ func (e eventTypeForSubscribeToShardEventStreamOutputEvent) UnmarshalerForEventN // These events are: // // * SubscribeToShardEvent +// * SubscribeToShardEventStreamUnknownEvent func (es *SubscribeToShardEventStream) Events() <-chan SubscribeToShardEventStreamEvent { return es.Reader.Events() } @@ -3467,13 +3527,18 @@ func (c *Kinesis) UpdateShardCountRequest(input *UpdateShardCountInput) (req *re // // To update the shard count, Kinesis Data Streams performs splits or merges // on individual shards. This can cause short-lived shards to be created, in -// addition to the final shards. We recommend that you double or halve the shard -// count, as this results in the fewest number of splits or merges. +// addition to the final shards. These short-lived shards count towards your +// total shard limit for your account in the Region. +// +// When using this operation, we recommend that you specify a target shard count +// that is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target +// value within your shard limit. However, if you specify a target that isn't +// a multiple of 25%, the scaling action might take longer to complete. // // This operation has the following default limits. By default, you cannot do // the following: // -// * Scale more than twice per rolling 24-hour period per stream +// * Scale more than ten times per rolling 24-hour period per stream // // * Scale up to more than double your current shard count for a stream // @@ -3486,7 +3551,7 @@ func (c *Kinesis) UpdateShardCountRequest(input *UpdateShardCountInput) (req *re // // * Scale up to more than the shard limit for your account // -// For the default limits for an AWS account, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// For the default limits for an AWS account, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide. To request an increase // in the call rate limit, the shard limit for this API, or your overall shard // limit, use the limits form (https://console.aws.amazon.com/support/v1#/case/create?issueType=service-limit-increase&limitType=service-code-kinesis). @@ -3610,7 +3675,52 @@ func (s AddTagsToStreamOutput) GoString() string { return s.String() } -// An object that represents the details of the consumer you registered. +type ChildShard struct { + _ struct{} `type:"structure"` + + // The range of possible hash key values for the shard, which is a set of ordered + // contiguous positive integers. + // + // HashKeyRange is a required field + HashKeyRange *HashKeyRange `type:"structure" required:"true"` + + // ParentShards is a required field + ParentShards []*string `type:"list" required:"true"` + + // ShardId is a required field + ShardId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChildShard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChildShard) GoString() string { + return s.String() +} + +// SetHashKeyRange sets the HashKeyRange field's value. +func (s *ChildShard) SetHashKeyRange(v *HashKeyRange) *ChildShard { + s.HashKeyRange = v + return s +} + +// SetParentShards sets the ParentShards field's value. +func (s *ChildShard) SetParentShards(v []*string) *ChildShard { + s.ParentShards = v + return s +} + +// SetShardId sets the ShardId field's value. +func (s *ChildShard) SetShardId(v string) *ChildShard { + s.ShardId = &v + return s +} + +// An object that represents the details of the consumer you registered. This +// type of object is returned by RegisterStreamConsumer. type Consumer struct { _ struct{} `type:"structure"` @@ -3673,7 +3783,8 @@ func (s *Consumer) SetConsumerStatus(v string) *Consumer { return s } -// An object that represents the details of a registered consumer. +// An object that represents the details of a registered consumer. This type +// of object is returned by DescribeStreamConsumer. type ConsumerDescription struct { _ struct{} `type:"structure"` @@ -3755,8 +3866,6 @@ type CreateStreamInput struct { // is a function of the number of shards; more shards are required for greater // provisioned throughput. // - // DefaultShardLimit; - // // ShardCount is a required field ShardCount *int64 `min:"1" type:"integer" required:"true"` @@ -3836,7 +3945,7 @@ type DecreaseStreamRetentionPeriodInput struct { // retention period. // // RetentionPeriodHours is a required field - RetentionPeriodHours *int64 `min:"1" type:"integer" required:"true"` + RetentionPeriodHours *int64 `type:"integer" required:"true"` // The name of the stream to modify. // @@ -3860,9 +3969,6 @@ func (s *DecreaseStreamRetentionPeriodInput) Validate() error { if s.RetentionPeriodHours == nil { invalidParams.Add(request.NewErrParamRequired("RetentionPeriodHours")) } - if s.RetentionPeriodHours != nil && *s.RetentionPeriodHours < 1 { - invalidParams.Add(request.NewErrParamMinValue("RetentionPeriodHours", 1)) - } if s.StreamName == nil { invalidParams.Add(request.NewErrParamRequired("StreamName")) } @@ -4372,7 +4478,7 @@ type DisableEnhancedMonitoringInput struct { // * ALL // // For more information, see Monitoring the Amazon Kinesis Data Streams Service - // with Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) + // with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) // in the Amazon Kinesis Data Streams Developer Guide. // // ShardLevelMetrics is a required field @@ -4454,7 +4560,7 @@ type EnableEnhancedMonitoringInput struct { // * ALL // // For more information, see Monitoring the Amazon Kinesis Data Streams Service - // with Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) + // with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) // in the Amazon Kinesis Data Streams Developer Guide. // // ShardLevelMetrics is a required field @@ -4536,7 +4642,7 @@ type EnhancedMetrics struct { // * ALL // // For more information, see Monitoring the Amazon Kinesis Data Streams Service - // with Amazon CloudWatch (http://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) + // with Amazon CloudWatch (https://docs.aws.amazon.com/kinesis/latest/dev/monitoring-with-cloudwatch.html) // in the Amazon Kinesis Data Streams Developer Guide. ShardLevelMetrics []*string `min:"1" type:"list"` } @@ -4603,8 +4709,8 @@ func (s *EnhancedMonitoringOutput) SetStreamName(v string) *EnhancedMonitoringOu // The provided iterator exceeds the maximum age allowed. type ExpiredIteratorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -4622,17 +4728,17 @@ func (s ExpiredIteratorException) GoString() string { func newErrorExpiredIteratorException(v protocol.ResponseMetadata) error { return &ExpiredIteratorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExpiredIteratorException) Code() string { +func (s *ExpiredIteratorException) Code() string { return "ExpiredIteratorException" } // Message returns the exception's message. -func (s ExpiredIteratorException) Message() string { +func (s *ExpiredIteratorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4640,28 +4746,28 @@ func (s ExpiredIteratorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredIteratorException) OrigErr() error { +func (s *ExpiredIteratorException) OrigErr() error { return nil } -func (s ExpiredIteratorException) Error() string { +func (s *ExpiredIteratorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExpiredIteratorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredIteratorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExpiredIteratorException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredIteratorException) RequestID() string { + return s.RespMetadata.RequestID } // The pagination token passed to the operation is expired. type ExpiredNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4678,17 +4784,17 @@ func (s ExpiredNextTokenException) GoString() string { func newErrorExpiredNextTokenException(v protocol.ResponseMetadata) error { return &ExpiredNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExpiredNextTokenException) Code() string { +func (s *ExpiredNextTokenException) Code() string { return "ExpiredNextTokenException" } // Message returns the exception's message. -func (s ExpiredNextTokenException) Message() string { +func (s *ExpiredNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4696,22 +4802,22 @@ func (s ExpiredNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredNextTokenException) OrigErr() error { +func (s *ExpiredNextTokenException) OrigErr() error { return nil } -func (s ExpiredNextTokenException) Error() string { +func (s *ExpiredNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExpiredNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExpiredNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input for GetRecords. @@ -4720,6 +4826,7 @@ type GetRecordsInput struct { // The maximum number of records to return. Specify a value of up to 10,000. // If you specify a value that is greater than 10,000, GetRecords throws InvalidArgumentException. + // The default value is 10,000. Limit *int64 `min:"1" type:"integer"` // The position in the shard from which you want to start sequentially reading @@ -4775,6 +4882,8 @@ func (s *GetRecordsInput) SetShardIterator(v string) *GetRecordsInput { type GetRecordsOutput struct { _ struct{} `type:"structure"` + ChildShards []*ChildShard `type:"list"` + // The number of milliseconds the GetRecords response is from the tip of the // stream, indicating how far behind current time the consumer is. A value of // zero indicates that record processing is caught up, and there are no new @@ -4802,6 +4911,12 @@ func (s GetRecordsOutput) GoString() string { return s.String() } +// SetChildShards sets the ChildShards field's value. +func (s *GetRecordsOutput) SetChildShards(v []*ChildShard) *GetRecordsOutput { + s.ChildShards = v + return s +} + // SetMillisBehindLatest sets the MillisBehindLatest field's value. func (s *GetRecordsOutput) SetMillisBehindLatest(v int64) *GetRecordsOutput { s.MillisBehindLatest = &v @@ -5008,7 +5123,7 @@ type IncreaseStreamRetentionPeriodInput struct { // retention period. // // RetentionPeriodHours is a required field - RetentionPeriodHours *int64 `min:"1" type:"integer" required:"true"` + RetentionPeriodHours *int64 `type:"integer" required:"true"` // The name of the stream to modify. // @@ -5032,9 +5147,6 @@ func (s *IncreaseStreamRetentionPeriodInput) Validate() error { if s.RetentionPeriodHours == nil { invalidParams.Add(request.NewErrParamRequired("RetentionPeriodHours")) } - if s.RetentionPeriodHours != nil && *s.RetentionPeriodHours < 1 { - invalidParams.Add(request.NewErrParamMinValue("RetentionPeriodHours", 1)) - } if s.StreamName == nil { invalidParams.Add(request.NewErrParamRequired("StreamName")) } @@ -5074,9 +5186,11 @@ func (s IncreaseStreamRetentionPeriodOutput) GoString() string { return s.String() } +// The processing of the request failed because of an unknown error, exception, +// or failure. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5108,6 +5222,8 @@ func (s *InternalFailureException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *InternalFailureException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5120,17 +5236,17 @@ func (s *InternalFailureException) MarshalEvent(pm protocol.PayloadMarshaler) (m func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5138,29 +5254,29 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } // A specified parameter exceeds its restrictions, is not supported, or can't // be used. For more information, see the returned message. type InvalidArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5178,17 +5294,17 @@ func (s InvalidArgumentException) GoString() string { func newErrorInvalidArgumentException(v protocol.ResponseMetadata) error { return &InvalidArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgumentException) Code() string { +func (s *InvalidArgumentException) Code() string { return "InvalidArgumentException" } // Message returns the exception's message. -func (s InvalidArgumentException) Message() string { +func (s *InvalidArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5196,29 +5312,29 @@ func (s InvalidArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgumentException) OrigErr() error { +func (s *InvalidArgumentException) OrigErr() error { return nil } -func (s InvalidArgumentException) Error() string { +func (s *InvalidArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // The ciphertext references a key that doesn't exist or that you don't have // access to. type KMSAccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5251,6 +5367,8 @@ func (s *KMSAccessDeniedException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *KMSAccessDeniedException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5263,17 +5381,17 @@ func (s *KMSAccessDeniedException) MarshalEvent(pm protocol.PayloadMarshaler) (m func newErrorKMSAccessDeniedException(v protocol.ResponseMetadata) error { return &KMSAccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSAccessDeniedException) Code() string { +func (s *KMSAccessDeniedException) Code() string { return "KMSAccessDeniedException" } // Message returns the exception's message. -func (s KMSAccessDeniedException) Message() string { +func (s *KMSAccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5281,29 +5399,29 @@ func (s KMSAccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSAccessDeniedException) OrigErr() error { +func (s *KMSAccessDeniedException) OrigErr() error { return nil } -func (s KMSAccessDeniedException) Error() string { +func (s *KMSAccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSAccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSAccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified customer master key (CMK) // isn't enabled. type KMSDisabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5336,6 +5454,8 @@ func (s *KMSDisabledException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *KMSDisabledException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5348,17 +5468,17 @@ func (s *KMSDisabledException) MarshalEvent(pm protocol.PayloadMarshaler) (msg e func newErrorKMSDisabledException(v protocol.ResponseMetadata) error { return &KMSDisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSDisabledException) Code() string { +func (s *KMSDisabledException) Code() string { return "KMSDisabledException" } // Message returns the exception's message. -func (s KMSDisabledException) Message() string { +func (s *KMSDisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5366,31 +5486,31 @@ func (s KMSDisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSDisabledException) OrigErr() error { +func (s *KMSDisabledException) OrigErr() error { return nil } -func (s KMSDisabledException) Error() string { +func (s *KMSDisabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSDisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSDisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSDisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSDisabledException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the state of the specified resource isn't // valid for this request. For more information, see How Key State Affects Use -// of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) +// of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. type KMSInvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5423,6 +5543,8 @@ func (s *KMSInvalidStateException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *KMSInvalidStateException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5435,17 +5557,17 @@ func (s *KMSInvalidStateException) MarshalEvent(pm protocol.PayloadMarshaler) (m func newErrorKMSInvalidStateException(v protocol.ResponseMetadata) error { return &KMSInvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSInvalidStateException) Code() string { +func (s *KMSInvalidStateException) Code() string { return "KMSInvalidStateException" } // Message returns the exception's message. -func (s KMSInvalidStateException) Message() string { +func (s *KMSInvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5453,29 +5575,29 @@ func (s KMSInvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSInvalidStateException) OrigErr() error { +func (s *KMSInvalidStateException) OrigErr() error { return nil } -func (s KMSInvalidStateException) Error() string { +func (s *KMSInvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSInvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSInvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSInvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSInvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified entity or resource can't be // found. type KMSNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5508,6 +5630,8 @@ func (s *KMSNotFoundException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *KMSNotFoundException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5520,17 +5644,17 @@ func (s *KMSNotFoundException) MarshalEvent(pm protocol.PayloadMarshaler) (msg e func newErrorKMSNotFoundException(v protocol.ResponseMetadata) error { return &KMSNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSNotFoundException) Code() string { +func (s *KMSNotFoundException) Code() string { return "KMSNotFoundException" } // Message returns the exception's message. -func (s KMSNotFoundException) Message() string { +func (s *KMSNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5538,28 +5662,28 @@ func (s KMSNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSNotFoundException) OrigErr() error { +func (s *KMSNotFoundException) OrigErr() error { return nil } -func (s KMSNotFoundException) Error() string { +func (s *KMSNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The AWS access key ID needs a subscription for the service. type KMSOptInRequired struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5592,6 +5716,8 @@ func (s *KMSOptInRequired) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *KMSOptInRequired) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5604,17 +5730,17 @@ func (s *KMSOptInRequired) MarshalEvent(pm protocol.PayloadMarshaler) (msg event func newErrorKMSOptInRequired(v protocol.ResponseMetadata) error { return &KMSOptInRequired{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSOptInRequired) Code() string { +func (s *KMSOptInRequired) Code() string { return "KMSOptInRequired" } // Message returns the exception's message. -func (s KMSOptInRequired) Message() string { +func (s *KMSOptInRequired) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5622,30 +5748,30 @@ func (s KMSOptInRequired) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSOptInRequired) OrigErr() error { +func (s *KMSOptInRequired) OrigErr() error { return nil } -func (s KMSOptInRequired) Error() string { +func (s *KMSOptInRequired) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSOptInRequired) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSOptInRequired) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSOptInRequired) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSOptInRequired) RequestID() string { + return s.RespMetadata.RequestID } // The request was denied due to request throttling. For more information about -// throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) +// throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) // in the AWS Key Management Service Developer Guide. type KMSThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5678,6 +5804,8 @@ func (s *KMSThrottlingException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *KMSThrottlingException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -5690,17 +5818,17 @@ func (s *KMSThrottlingException) MarshalEvent(pm protocol.PayloadMarshaler) (msg func newErrorKMSThrottlingException(v protocol.ResponseMetadata) error { return &KMSThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSThrottlingException) Code() string { +func (s *KMSThrottlingException) Code() string { return "KMSThrottlingException" } // Message returns the exception's message. -func (s KMSThrottlingException) Message() string { +func (s *KMSThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5708,29 +5836,29 @@ func (s KMSThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSThrottlingException) OrigErr() error { +func (s *KMSThrottlingException) OrigErr() error { return nil } -func (s KMSThrottlingException) Error() string { +func (s *KMSThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource exceeds the maximum number allowed, or the number // of concurrent stream requests exceeds the maximum number allowed. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -5748,17 +5876,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5766,22 +5894,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListShardsInput struct { @@ -5798,7 +5926,7 @@ type ListShardsInput struct { // The maximum number of shards to return in a single call to ListShards. The // minimum value you can specify for this parameter is 1, and the maximum is - // 1,000, which is also the default. + // 10,000, which is also the default. // // When the number of shards to be listed is greater than the value of MaxResults, // the response contains a NextToken value that you can use in a subsequent @@ -5826,6 +5954,8 @@ type ListShardsInput struct { // If you specify an expired token in a call to ListShards, you get ExpiredNextTokenException. NextToken *string `min:"1" type:"string"` + ShardFilter *ShardFilter `type:"structure"` + // Specify this input parameter to distinguish data streams that have the same // name. For example, if you create a data stream and then delete it, and you // later create another data stream with the same name, you can use this input @@ -5866,6 +5996,11 @@ func (s *ListShardsInput) Validate() error { if s.StreamName != nil && len(*s.StreamName) < 1 { invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) } + if s.ShardFilter != nil { + if err := s.ShardFilter.Validate(); err != nil { + invalidParams.AddNested("ShardFilter", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5891,6 +6026,12 @@ func (s *ListShardsInput) SetNextToken(v string) *ListShardsInput { return s } +// SetShardFilter sets the ShardFilter field's value. +func (s *ListShardsInput) SetShardFilter(v *ShardFilter) *ListShardsInput { + s.ShardFilter = v + return s +} + // SetStreamCreationTimestamp sets the StreamCreationTimestamp field's value. func (s *ListShardsInput) SetStreamCreationTimestamp(v time.Time) *ListShardsInput { s.StreamCreationTimestamp = &v @@ -6382,13 +6523,13 @@ func (s MergeShardsOutput) GoString() string { // The request rate for the stream is too high, or the requested data is too // large for the available throughput. Reduce the frequency or size of your -// requests. For more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) +// requests. For more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and -// Exponential Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) +// Exponential Backoff in AWS (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. type ProvisionedThroughputExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -6406,17 +6547,17 @@ func (s ProvisionedThroughputExceededException) GoString() string { func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { return &ProvisionedThroughputExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ProvisionedThroughputExceededException) Code() string { +func (s *ProvisionedThroughputExceededException) Code() string { return "ProvisionedThroughputExceededException" } // Message returns the exception's message. -func (s ProvisionedThroughputExceededException) Message() string { +func (s *ProvisionedThroughputExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6424,22 +6565,22 @@ func (s ProvisionedThroughputExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ProvisionedThroughputExceededException) OrigErr() error { +func (s *ProvisionedThroughputExceededException) OrigErr() error { return nil } -func (s ProvisionedThroughputExceededException) Error() string { +func (s *ProvisionedThroughputExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ProvisionedThroughputExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ProvisionedThroughputExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ProvisionedThroughputExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ProvisionedThroughputExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Represents the input for PutRecord. @@ -6449,7 +6590,7 @@ type PutRecordInput struct { // The data blob to put into the record, which is base64-encoded when the blob // is serialized. When the data blob (the payload before base64-encoding) is // added to the partition key size, the total size must not exceed the maximum - // record size (1 MB). + // record size (1 MiB). // // Data is automatically base64 encoded/decoded by the SDK. // @@ -6735,7 +6876,7 @@ type PutRecordsRequestEntry struct { // The data blob to put into the record, which is base64-encoded when the blob // is serialized. When the data blob (the payload before base64-encoding) is // added to the partition key size, the total size must not exceed the maximum - // record size (1 MB). + // record size (1 MiB). // // Data is automatically base64 encoded/decoded by the SDK. // @@ -6876,7 +7017,7 @@ type Record struct { // Data Streams, which does not inspect, interpret, or change the data in the // blob in any way. When the data blob (the payload before base64-encoding) // is added to the partition key size, the total size must not exceed the maximum - // record size (1 MB). + // record size (1 MiB). // // Data is automatically base64 encoded/decoded by the SDK. // @@ -7107,8 +7248,8 @@ func (s RemoveTagsFromStreamOutput) GoString() string { // The resource is not available for this operation. For successful operation, // the resource must be in the ACTIVE state. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -7141,6 +7282,8 @@ func (s *ResourceInUseException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *ResourceInUseException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -7153,17 +7296,17 @@ func (s *ResourceInUseException) MarshalEvent(pm protocol.PayloadMarshaler) (msg func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7171,29 +7314,29 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource could not be found. The stream might not be specified // correctly. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message that provides information about the error. Message_ *string `locationName:"message" type:"string"` @@ -7226,6 +7369,8 @@ func (s *ResourceNotFoundException) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *ResourceNotFoundException) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.ExceptionMessageType)) var buf bytes.Buffer @@ -7238,17 +7383,17 @@ func (s *ResourceNotFoundException) MarshalEvent(pm protocol.PayloadMarshaler) ( func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7256,22 +7401,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The range of possible sequence numbers for the shard. @@ -7377,6 +7522,61 @@ func (s *Shard) SetShardId(v string) *Shard { return s } +type ShardFilter struct { + _ struct{} `type:"structure"` + + ShardId *string `min:"1" type:"string"` + + Timestamp *time.Time `type:"timestamp"` + + // Type is a required field + Type *string `type:"string" required:"true" enum:"ShardFilterType"` +} + +// String returns the string representation +func (s ShardFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShardFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ShardFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ShardFilter"} + if s.ShardId != nil && len(*s.ShardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ShardId", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetShardId sets the ShardId field's value. +func (s *ShardFilter) SetShardId(v string) *ShardFilter { + s.ShardId = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *ShardFilter) SetTimestamp(v time.Time) *ShardFilter { + s.Timestamp = &v + return s +} + +// SetType sets the Type field's value. +func (s *ShardFilter) SetType(v string) *ShardFilter { + s.Type = &v + return s +} + // Represents the input for SplitShard. type SplitShardInput struct { _ struct{} `type:"structure"` @@ -7573,10 +7773,37 @@ func (s StartStreamEncryptionOutput) GoString() string { type StartingPosition struct { _ struct{} `type:"structure"` + // The sequence number of the data record in the shard from which to start streaming. + // To specify a sequence number, set StartingPosition to AT_SEQUENCE_NUMBER + // or AFTER_SEQUENCE_NUMBER. SequenceNumber *string `type:"string"` + // The time stamp of the data record from which to start reading. To specify + // a time stamp, set StartingPosition to Type AT_TIMESTAMP. A time stamp is + // the Unix epoch date with precision in milliseconds. For example, 2016-04-04T19:58:46.480-00:00 + // or 1459799926.480. If a record with this exact time stamp does not exist, + // records will be streamed from the next (later) record. If the time stamp + // is older than the current trim horizon, records will be streamed from the + // oldest untrimmed data record (TRIM_HORIZON). Timestamp *time.Time `type:"timestamp"` + // You can set the starting position to one of the following values: + // + // AT_SEQUENCE_NUMBER: Start streaming from the position denoted by the sequence + // number specified in the SequenceNumber field. + // + // AFTER_SEQUENCE_NUMBER: Start streaming right after the position denoted by + // the sequence number specified in the SequenceNumber field. + // + // AT_TIMESTAMP: Start streaming from the position denoted by the time stamp + // specified in the Timestamp field. + // + // TRIM_HORIZON: Start streaming at the last untrimmed record in the shard, + // which is the oldest data record in the shard. + // + // LATEST: Start streaming just after the most recent record in the shard, so + // that you always read the most recent data in the shard. + // // Type is a required field Type *string `type:"string" required:"true" enum:"ShardIteratorType"` } @@ -7761,10 +7988,11 @@ type StreamDescription struct { // * Master key owned by Kinesis Data Streams: alias/aws/kinesis KeyId *string `min:"1" type:"string"` - // The current retention period, in hours. + // The current retention period, in hours. Minimum value of 24. Maximum value + // of 168. // // RetentionPeriodHours is a required field - RetentionPeriodHours *int64 `min:"1" type:"integer" required:"true"` + RetentionPeriodHours *int64 `type:"integer" required:"true"` // The shards that comprise the stream. // @@ -7920,7 +8148,7 @@ type StreamDescriptionSummary struct { // The current retention period, in hours. // // RetentionPeriodHours is a required field - RetentionPeriodHours *int64 `min:"1" type:"integer" required:"true"` + RetentionPeriodHours *int64 `type:"integer" required:"true"` // The Amazon Resource Name (ARN) for the stream being described. // @@ -8029,11 +8257,16 @@ func (s *StreamDescriptionSummary) SetStreamStatus(v string) *StreamDescriptionS } // After you call SubscribeToShard, Kinesis Data Streams sends events of this -// type to your consumer. +// type over an HTTP/2 connection to your consumer. type SubscribeToShardEvent struct { _ struct{} `type:"structure"` - // Use this as StartingSequenceNumber in the next call to SubscribeToShard. + ChildShards []*ChildShard `type:"list"` + + // Use this as SequenceNumber in the next call to SubscribeToShard, with StartingPosition + // set to AT_SEQUENCE_NUMBER or AFTER_SEQUENCE_NUMBER. Use ContinuationSequenceNumber + // for checkpointing because it captures your shard progress even when no data + // is written to the shard. // // ContinuationSequenceNumber is a required field ContinuationSequenceNumber *string `type:"string" required:"true"` @@ -8060,6 +8293,12 @@ func (s SubscribeToShardEvent) GoString() string { return s.String() } +// SetChildShards sets the ChildShards field's value. +func (s *SubscribeToShardEvent) SetChildShards(v []*ChildShard) *SubscribeToShardEvent { + s.ChildShards = v + return s +} + // SetContinuationSequenceNumber sets the ContinuationSequenceNumber field's value. func (s *SubscribeToShardEvent) SetContinuationSequenceNumber(v string) *SubscribeToShardEvent { s.ContinuationSequenceNumber = &v @@ -8095,6 +8334,8 @@ func (s *SubscribeToShardEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *SubscribeToShardEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer @@ -8125,6 +8366,7 @@ type SubscribeToShardEventStreamEvent interface { // These events are: // // * SubscribeToShardEvent +// * SubscribeToShardEventStreamUnknownEvent type SubscribeToShardEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SubscribeToShardEventStreamEvent @@ -8199,6 +8441,9 @@ func (r *readSubscribeToShardEventStream) readEventStream() { return default: } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } r.err.SetError(err) return } @@ -8238,14 +8483,39 @@ func (u unmarshalerForSubscribeToShardEventStreamEvent) UnmarshalerForEventName( case "ResourceNotFoundException": return newErrorResourceNotFoundException(u.metadata).(eventstreamapi.Unmarshaler), nil default: - return nil, awserr.New( - request.ErrCodeSerialization, - fmt.Sprintf("unknown event type name, %s, for SubscribeToShardEventStream", eventType), - nil, - ) + return &SubscribeToShardEventStreamUnknownEvent{Type: eventType}, nil } } +// SubscribeToShardEventStreamUnknownEvent provides a failsafe event for the +// SubscribeToShardEventStream group of events when an unknown event is received. +type SubscribeToShardEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SubscribeToShardEventStreamUnknownEvent is and event in the SubscribeToShardEventStream +// group of events. +func (s *SubscribeToShardEventStreamUnknownEvent) eventSubscribeToShardEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SubscribeToShardEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SubscribeToShardEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SubscribeToShardEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + type SubscribeToShardInput struct { _ struct{} `type:"structure"` @@ -8368,6 +8638,8 @@ func (s *SubscribeToShardOutput) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *SubscribeToShardOutput) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer @@ -8429,7 +8701,19 @@ type UpdateShardCountInput struct { // StreamName is a required field StreamName *string `min:"1" type:"string" required:"true"` - // The new number of shards. + // The new number of shards. This value has the following default limits. By + // default, you cannot do the following: + // + // * Set this value to more than double your current shard count for a stream. + // + // * Set this value below half your current shard count for a stream. + // + // * Set this value to more than 500 shards in a stream (the default limit + // for shard count per stream is 500 per account per region), unless you + // request a limit increase. + // + // * Scale a stream with more than 500 shards down unless you set this value + // to less than 500 shards. // // TargetShardCount is a required field TargetShardCount *int64 `min:"1" type:"integer" required:"true"` @@ -8540,6 +8824,15 @@ const ( ConsumerStatusActive = "ACTIVE" ) +// ConsumerStatus_Values returns all elements of the ConsumerStatus enum +func ConsumerStatus_Values() []string { + return []string{ + ConsumerStatusCreating, + ConsumerStatusDeleting, + ConsumerStatusActive, + } +} + const ( // EncryptionTypeNone is a EncryptionType enum value EncryptionTypeNone = "NONE" @@ -8548,6 +8841,14 @@ const ( EncryptionTypeKms = "KMS" ) +// EncryptionType_Values returns all elements of the EncryptionType enum +func EncryptionType_Values() []string { + return []string{ + EncryptionTypeNone, + EncryptionTypeKms, + } +} + const ( // MetricsNameIncomingBytes is a MetricsName enum value MetricsNameIncomingBytes = "IncomingBytes" @@ -8574,11 +8875,64 @@ const ( MetricsNameAll = "ALL" ) +// MetricsName_Values returns all elements of the MetricsName enum +func MetricsName_Values() []string { + return []string{ + MetricsNameIncomingBytes, + MetricsNameIncomingRecords, + MetricsNameOutgoingBytes, + MetricsNameOutgoingRecords, + MetricsNameWriteProvisionedThroughputExceeded, + MetricsNameReadProvisionedThroughputExceeded, + MetricsNameIteratorAgeMilliseconds, + MetricsNameAll, + } +} + const ( // ScalingTypeUniformScaling is a ScalingType enum value ScalingTypeUniformScaling = "UNIFORM_SCALING" ) +// ScalingType_Values returns all elements of the ScalingType enum +func ScalingType_Values() []string { + return []string{ + ScalingTypeUniformScaling, + } +} + +const ( + // ShardFilterTypeAfterShardId is a ShardFilterType enum value + ShardFilterTypeAfterShardId = "AFTER_SHARD_ID" + + // ShardFilterTypeAtTrimHorizon is a ShardFilterType enum value + ShardFilterTypeAtTrimHorizon = "AT_TRIM_HORIZON" + + // ShardFilterTypeFromTrimHorizon is a ShardFilterType enum value + ShardFilterTypeFromTrimHorizon = "FROM_TRIM_HORIZON" + + // ShardFilterTypeAtLatest is a ShardFilterType enum value + ShardFilterTypeAtLatest = "AT_LATEST" + + // ShardFilterTypeAtTimestamp is a ShardFilterType enum value + ShardFilterTypeAtTimestamp = "AT_TIMESTAMP" + + // ShardFilterTypeFromTimestamp is a ShardFilterType enum value + ShardFilterTypeFromTimestamp = "FROM_TIMESTAMP" +) + +// ShardFilterType_Values returns all elements of the ShardFilterType enum +func ShardFilterType_Values() []string { + return []string{ + ShardFilterTypeAfterShardId, + ShardFilterTypeAtTrimHorizon, + ShardFilterTypeFromTrimHorizon, + ShardFilterTypeAtLatest, + ShardFilterTypeAtTimestamp, + ShardFilterTypeFromTimestamp, + } +} + const ( // ShardIteratorTypeAtSequenceNumber is a ShardIteratorType enum value ShardIteratorTypeAtSequenceNumber = "AT_SEQUENCE_NUMBER" @@ -8596,6 +8950,17 @@ const ( ShardIteratorTypeAtTimestamp = "AT_TIMESTAMP" ) +// ShardIteratorType_Values returns all elements of the ShardIteratorType enum +func ShardIteratorType_Values() []string { + return []string{ + ShardIteratorTypeAtSequenceNumber, + ShardIteratorTypeAfterSequenceNumber, + ShardIteratorTypeTrimHorizon, + ShardIteratorTypeLatest, + ShardIteratorTypeAtTimestamp, + } +} + const ( // StreamStatusCreating is a StreamStatus enum value StreamStatusCreating = "CREATING" @@ -8609,3 +8974,13 @@ const ( // StreamStatusUpdating is a StreamStatus enum value StreamStatusUpdating = "UPDATING" ) + +// StreamStatus_Values returns all elements of the StreamStatus enum +func StreamStatus_Values() []string { + return []string{ + StreamStatusCreating, + StreamStatusDeleting, + StreamStatusActive, + StreamStatusUpdating, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go index ba7c89dba..da7880ee5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/errors.go @@ -22,6 +22,9 @@ const ( // ErrCodeInternalFailureException for service response error code // "InternalFailureException". + // + // The processing of the request failed because of an unknown error, exception, + // or failure. ErrCodeInternalFailureException = "InternalFailureException" // ErrCodeInvalidArgumentException for service response error code @@ -50,7 +53,7 @@ const ( // // The request was rejected because the state of the specified resource isn't // valid for this request. For more information, see How Key State Affects Use - // of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. ErrCodeKMSInvalidStateException = "KMSInvalidStateException" @@ -71,7 +74,7 @@ const ( // "KMSThrottlingException". // // The request was denied due to request throttling. For more information about - // throttling, see Limits (http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) + // throttling, see Limits (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second) // in the AWS Key Management Service Developer Guide. ErrCodeKMSThrottlingException = "KMSThrottlingException" @@ -87,9 +90,9 @@ const ( // // The request rate for the stream is too high, or the requested data is too // large for the available throughput. Reduce the frequency or size of your - // requests. For more information, see Streams Limits (http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) + // requests. For more information, see Streams Limits (https://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html) // in the Amazon Kinesis Data Streams Developer Guide, and Error Retries and - // Exponential Backoff in AWS (http://docs.aws.amazon.com/general/latest/gr/api-retries.html) + // Exponential Backoff in AWS (https://docs.aws.amazon.com/general/latest/gr/api-retries.html) // in the AWS General Reference. ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go index e3c0d7024..71f51b328 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go index 7e8df003f..f527facda 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/api.go @@ -3197,8 +3197,8 @@ func (s *CloudWatchLoggingOptionUpdate) SetRoleARNUpdate(v string) *CloudWatchLo // User-provided application code (query) is invalid. This can be a simple syntax // error. type CodeValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Test Message_ *string `locationName:"message" type:"string"` @@ -3216,17 +3216,17 @@ func (s CodeValidationException) GoString() string { func newErrorCodeValidationException(v protocol.ResponseMetadata) error { return &CodeValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CodeValidationException) Code() string { +func (s *CodeValidationException) Code() string { return "CodeValidationException" } // Message returns the exception's message. -func (s CodeValidationException) Message() string { +func (s *CodeValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3234,30 +3234,30 @@ func (s CodeValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CodeValidationException) OrigErr() error { +func (s *CodeValidationException) OrigErr() error { return nil } -func (s CodeValidationException) Error() string { +func (s *CodeValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CodeValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CodeValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CodeValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *CodeValidationException) RequestID() string { + return s.RespMetadata.RequestID } // Exception thrown as a result of concurrent modification to an application. // For example, two individuals attempting to edit the same application at the // same time. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3274,17 +3274,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3292,22 +3292,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } // TBD @@ -5125,8 +5125,8 @@ func (s *InputUpdate) SetNamePrefixUpdate(v string) *InputUpdate { // User-provided application configuration is not valid. type InvalidApplicationConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // test Message_ *string `locationName:"message" type:"string"` @@ -5144,17 +5144,17 @@ func (s InvalidApplicationConfigurationException) GoString() string { func newErrorInvalidApplicationConfigurationException(v protocol.ResponseMetadata) error { return &InvalidApplicationConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApplicationConfigurationException) Code() string { +func (s *InvalidApplicationConfigurationException) Code() string { return "InvalidApplicationConfigurationException" } // Message returns the exception's message. -func (s InvalidApplicationConfigurationException) Message() string { +func (s *InvalidApplicationConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5162,28 +5162,28 @@ func (s InvalidApplicationConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApplicationConfigurationException) OrigErr() error { +func (s *InvalidApplicationConfigurationException) OrigErr() error { return nil } -func (s InvalidApplicationConfigurationException) Error() string { +func (s *InvalidApplicationConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApplicationConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApplicationConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApplicationConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApplicationConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // Specified input parameter value is invalid. type InvalidArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5200,17 +5200,17 @@ func (s InvalidArgumentException) GoString() string { func newErrorInvalidArgumentException(v protocol.ResponseMetadata) error { return &InvalidArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgumentException) Code() string { +func (s *InvalidArgumentException) Code() string { return "InvalidArgumentException" } // Message returns the exception's message. -func (s InvalidArgumentException) Message() string { +func (s *InvalidArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5218,22 +5218,22 @@ func (s InvalidArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgumentException) OrigErr() error { +func (s *InvalidArgumentException) OrigErr() error { return nil } -func (s InvalidArgumentException) Error() string { +func (s *InvalidArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // Provides additional mapping information when JSON is the record format on @@ -6047,8 +6047,8 @@ func (s *LambdaOutputUpdate) SetRoleARNUpdate(v string) *LambdaOutputUpdate { // Exceeded the number of applications allowed. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6065,17 +6065,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6083,22 +6083,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListApplicationsInput struct { @@ -6976,8 +6976,8 @@ func (s *ReferenceDataSourceUpdate) SetTableNameUpdate(v string) *ReferenceDataS // Application is not available for this operation. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6994,17 +6994,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7012,28 +7012,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Specified application can't be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7050,17 +7050,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7068,22 +7068,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Discovery failed to get a record from the streaming source because of the @@ -7091,8 +7091,8 @@ func (s ResourceNotFoundException) RequestID() string { // see GetRecords (https://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) // in the Amazon Kinesis Streams API Reference. type ResourceProvisionedThroughputExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7109,17 +7109,17 @@ func (s ResourceProvisionedThroughputExceededException) GoString() string { func newErrorResourceProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { return &ResourceProvisionedThroughputExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceProvisionedThroughputExceededException) Code() string { +func (s *ResourceProvisionedThroughputExceededException) Code() string { return "ResourceProvisionedThroughputExceededException" } // Message returns the exception's message. -func (s ResourceProvisionedThroughputExceededException) Message() string { +func (s *ResourceProvisionedThroughputExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7127,22 +7127,22 @@ func (s ResourceProvisionedThroughputExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceProvisionedThroughputExceededException) OrigErr() error { +func (s *ResourceProvisionedThroughputExceededException) OrigErr() error { return nil } -func (s ResourceProvisionedThroughputExceededException) Error() string { +func (s *ResourceProvisionedThroughputExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceProvisionedThroughputExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceProvisionedThroughputExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceProvisionedThroughputExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceProvisionedThroughputExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Provides a description of an Amazon S3 data source, including the Amazon @@ -7425,8 +7425,8 @@ func (s *S3ReferenceDataSourceUpdate) SetReferenceRoleARNUpdate(v string) *S3Ref // The service is unavailable. Back off and retry the operation. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7443,17 +7443,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7461,22 +7461,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the format of the data in the streaming source, and how each data @@ -7839,8 +7839,8 @@ func (s TagResourceOutput) GoString() string { // Note that the maximum number of application tags includes system tags. The // maximum number of user-defined application tags is 50. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7857,17 +7857,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7875,29 +7875,29 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } // Data format is not valid. Amazon Kinesis Analytics is not able to detect // schema for the given streaming source. type UnableToDetectSchemaException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -7918,17 +7918,17 @@ func (s UnableToDetectSchemaException) GoString() string { func newErrorUnableToDetectSchemaException(v protocol.ResponseMetadata) error { return &UnableToDetectSchemaException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnableToDetectSchemaException) Code() string { +func (s *UnableToDetectSchemaException) Code() string { return "UnableToDetectSchemaException" } // Message returns the exception's message. -func (s UnableToDetectSchemaException) Message() string { +func (s *UnableToDetectSchemaException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7936,29 +7936,29 @@ func (s UnableToDetectSchemaException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnableToDetectSchemaException) OrigErr() error { +func (s *UnableToDetectSchemaException) OrigErr() error { return nil } -func (s UnableToDetectSchemaException) Error() string { +func (s *UnableToDetectSchemaException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnableToDetectSchemaException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnableToDetectSchemaException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnableToDetectSchemaException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnableToDetectSchemaException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7975,17 +7975,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7993,22 +7993,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -8195,6 +8195,18 @@ const ( ApplicationStatusUpdating = "UPDATING" ) +// ApplicationStatus_Values returns all elements of the ApplicationStatus enum +func ApplicationStatus_Values() []string { + return []string{ + ApplicationStatusDeleting, + ApplicationStatusStarting, + ApplicationStatusStopping, + ApplicationStatusReady, + ApplicationStatusRunning, + ApplicationStatusUpdating, + } +} + const ( // InputStartingPositionNow is a InputStartingPosition enum value InputStartingPositionNow = "NOW" @@ -8206,6 +8218,15 @@ const ( InputStartingPositionLastStoppedPoint = "LAST_STOPPED_POINT" ) +// InputStartingPosition_Values returns all elements of the InputStartingPosition enum +func InputStartingPosition_Values() []string { + return []string{ + InputStartingPositionNow, + InputStartingPositionTrimHorizon, + InputStartingPositionLastStoppedPoint, + } +} + const ( // RecordFormatTypeJson is a RecordFormatType enum value RecordFormatTypeJson = "JSON" @@ -8213,3 +8234,11 @@ const ( // RecordFormatTypeCsv is a RecordFormatType enum value RecordFormatTypeCsv = "CSV" ) + +// RecordFormatType_Values returns all elements of the RecordFormatType enum +func RecordFormatType_Values() []string { + return []string{ + RecordFormatTypeJson, + RecordFormatTypeCsv, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go index 2c6645729..7de10edf9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go index 1428e9372..e70c4066a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/api.go @@ -154,7 +154,7 @@ func (c *KinesisAnalyticsV2) AddApplicationInputRequest(input *AddApplicationInp // AddApplicationInput API operation for Amazon Kinesis Analytics. // -// Adds a streaming source to your SQL-based Amazon Kinesis Data Analytics application. +// Adds a streaming source to your SQL-based Kinesis Data Analytics application. // // You can add a streaming source when you create an application, or you can // use this operation to add a streaming source after you create an application. @@ -259,10 +259,10 @@ func (c *KinesisAnalyticsV2) AddApplicationInputProcessingConfigurationRequest(i // AddApplicationInputProcessingConfiguration API operation for Amazon Kinesis Analytics. // -// Adds an InputProcessingConfiguration to an SQL-based Kinesis Data Analytics +// Adds an InputProcessingConfiguration to a SQL-based Kinesis Data Analytics // application. An input processor pre-processes records on the input stream // before the application's SQL code executes. Currently, the only input processor -// available is AWS Lambda (https://aws.amazon.com/documentation/lambda/). +// available is AWS Lambda (https://docs.aws.amazon.com/lambda/). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -355,8 +355,7 @@ func (c *KinesisAnalyticsV2) AddApplicationOutputRequest(input *AddApplicationOu // AddApplicationOutput API operation for Amazon Kinesis Analytics. // -// Adds an external destination to your SQL-based Amazon Kinesis Data Analytics -// application. +// Adds an external destination to your SQL-based Kinesis Data Analytics application. // // If you want Kinesis Data Analytics to deliver data from an in-application // stream within your application to an external destination (such as an Kinesis @@ -463,8 +462,8 @@ func (c *KinesisAnalyticsV2) AddApplicationReferenceDataSourceRequest(input *Add // AddApplicationReferenceDataSource API operation for Amazon Kinesis Analytics. // -// Adds a reference data source to an existing SQL-based Amazon Kinesis Data -// Analytics application. +// Adds a reference data source to an existing SQL-based Kinesis Data Analytics +// application. // // Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) // and creates an in-application table within your application. In the request, @@ -597,6 +596,9 @@ func (c *KinesisAnalyticsV2) AddApplicationVpcConfigurationRequest(input *AddApp // This error can be the result of attempting to modify an application without // using the current application ID. // +// * InvalidApplicationConfigurationException +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/AddApplicationVpcConfiguration func (c *KinesisAnalyticsV2) AddApplicationVpcConfiguration(input *AddApplicationVpcConfigurationInput) (*AddApplicationVpcConfigurationOutput, error) { req, out := c.AddApplicationVpcConfigurationRequest(input) @@ -663,9 +665,8 @@ func (c *KinesisAnalyticsV2) CreateApplicationRequest(input *CreateApplicationIn // CreateApplication API operation for Amazon Kinesis Analytics. // -// Creates an Amazon Kinesis Data Analytics application. For information about -// creating a Kinesis Data Analytics application, see Creating an Application -// (https://docs.aws.amazon.com/kinesisanalytics/latest/java/getting-started.html). +// Creates a Kinesis Data Analytics application. For information about creating +// a Kinesis Data Analytics application, see Creating an Application (https://docs.aws.amazon.com/kinesisanalytics/latest/java/getting-started.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -797,6 +798,9 @@ func (c *KinesisAnalyticsV2) CreateApplicationSnapshotRequest(input *CreateAppli // * InvalidRequestException // The request JSON is not valid for the operation. // +// * InvalidApplicationConfigurationException +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/CreateApplicationSnapshot func (c *KinesisAnalyticsV2) CreateApplicationSnapshot(input *CreateApplicationSnapshotInput) (*CreateApplicationSnapshotOutput, error) { req, out := c.CreateApplicationSnapshotRequest(input) @@ -961,8 +965,7 @@ func (c *KinesisAnalyticsV2) DeleteApplicationCloudWatchLoggingOptionRequest(inp // DeleteApplicationCloudWatchLoggingOption API operation for Amazon Kinesis Analytics. // -// Deletes an Amazon CloudWatch log stream from an Amazon Kinesis Data Analytics -// application. +// Deletes an Amazon CloudWatch log stream from an Kinesis Data Analytics application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1151,7 +1154,7 @@ func (c *KinesisAnalyticsV2) DeleteApplicationOutputRequest(input *DeleteApplica // DeleteApplicationOutput API operation for Amazon Kinesis Analytics. // -// Deletes the output destination configuration from your SQL-based Amazon Kinesis +// Deletes the output destination configuration from your SQL-based Kinesis // Data Analytics application's configuration. Kinesis Data Analytics will no // longer write data from the corresponding in-application stream to the external // output destination. @@ -1248,7 +1251,7 @@ func (c *KinesisAnalyticsV2) DeleteApplicationReferenceDataSourceRequest(input * // DeleteApplicationReferenceDataSource API operation for Amazon Kinesis Analytics. // // Deletes a reference data source configuration from the specified SQL-based -// Amazon Kinesis Data Analytics application's configuration. +// Kinesis Data Analytics application's configuration. // // If the application is running, Kinesis Data Analytics immediately removes // the in-application table that you created using the AddApplicationReferenceDataSource @@ -1462,6 +1465,9 @@ func (c *KinesisAnalyticsV2) DeleteApplicationVpcConfigurationRequest(input *Del // This error can be the result of attempting to modify an application without // using the current application ID. // +// * InvalidApplicationConfigurationException +// The user-provided application configuration is not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/DeleteApplicationVpcConfiguration func (c *KinesisAnalyticsV2) DeleteApplicationVpcConfiguration(input *DeleteApplicationVpcConfigurationInput) (*DeleteApplicationVpcConfigurationOutput, error) { req, out := c.DeleteApplicationVpcConfigurationRequest(input) @@ -1528,7 +1534,7 @@ func (c *KinesisAnalyticsV2) DescribeApplicationRequest(input *DescribeApplicati // DescribeApplication API operation for Amazon Kinesis Analytics. // -// Returns information about a specific Amazon Kinesis Data Analytics application. +// Returns information about a specific Kinesis Data Analytics application. // // If you want to retrieve a list of all applications in your account, use the // ListApplications operation. @@ -1702,11 +1708,11 @@ func (c *KinesisAnalyticsV2) DiscoverInputSchemaRequest(input *DiscoverInputSche // DiscoverInputSchema API operation for Amazon Kinesis Analytics. // -// Infers a schema for an SQL-based Amazon Kinesis Data Analytics application -// by evaluating sample records on the specified streaming source (Kinesis data -// stream or Kinesis Data Firehose delivery stream) or Amazon S3 object. In -// the response, the operation returns the inferred schema and also the sample -// records that the operation used to infer the schema. +// Infers a schema for a SQL-based Kinesis Data Analytics application by evaluating +// sample records on the specified streaming source (Kinesis data stream or +// Kinesis Data Firehose delivery stream) or Amazon S3 object. In the response, +// the operation returns the inferred schema and also the sample records that +// the operation used to infer the schema. // // You can use the inferred schema when configuring a streaming source for your // application. When you create an application using the Kinesis Data Analytics @@ -1725,12 +1731,12 @@ func (c *KinesisAnalyticsV2) DiscoverInputSchemaRequest(input *DiscoverInputSche // The specified input parameter value is not valid. // // * UnableToDetectSchemaException -// The data format is not valid. Amazon Kinesis Data Analytics cannot detect -// the schema for the given streaming source. +// The data format is not valid. Kinesis Data Analytics cannot detect the schema +// for the given streaming source. // // * ResourceProvisionedThroughputExceededException // Discovery failed to get a record from the streaming source because of the -// Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, +// Kinesis Streams ProvisionedThroughputExceededException. For more information, // see GetRecords (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) // in the Amazon Kinesis Streams API Reference. // @@ -1889,9 +1895,9 @@ func (c *KinesisAnalyticsV2) ListApplicationsRequest(input *ListApplicationsInpu // ListApplications API operation for Amazon Kinesis Analytics. // -// Returns a list of Amazon Kinesis Data Analytics applications in your account. -// For each application, the response includes the application name, Amazon -// Resource Name (ARN), and status. +// Returns a list of Kinesis Data Analytics applications in your account. For +// each application, the response includes the application name, Amazon Resource +// Name (ARN), and status. // // If you want detailed information about a specific application, use DescribeApplication. // @@ -2061,8 +2067,8 @@ func (c *KinesisAnalyticsV2) StartApplicationRequest(input *StartApplicationInpu // StartApplication API operation for Amazon Kinesis Analytics. // -// Starts the specified Amazon Kinesis Data Analytics application. After creating -// an application, you must exclusively call this operation to start your application. +// Starts the specified Kinesis Data Analytics application. After creating an +// application, you must exclusively call this operation to start your application. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2181,6 +2187,11 @@ func (c *KinesisAnalyticsV2) StopApplicationRequest(input *StopApplicationInput) // * InvalidApplicationConfigurationException // The user-provided application configuration is not valid. // +// * ConcurrentModificationException +// Exception thrown as a result of concurrent modifications to an application. +// This error can be the result of attempting to modify an application without +// using the current application ID. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23/StopApplication func (c *KinesisAnalyticsV2) StopApplication(input *StopApplicationInput) (*StopApplicationOutput, error) { req, out := c.StopApplicationRequest(input) @@ -2248,10 +2259,10 @@ func (c *KinesisAnalyticsV2) TagResourceRequest(input *TagResourceInput) (req *r // TagResource API operation for Amazon Kinesis Analytics. // -// Adds one or more key-value tags to a Kinesis Analytics application. Note -// that the maximum number of application tags includes system tags. The maximum -// number of user-defined application tags is 50. For more information, see -// Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html). +// Adds one or more key-value tags to a Kinesis Data Analytics application. +// Note that the maximum number of application tags includes system tags. The +// maximum number of user-defined application tags is 50. For more information, +// see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2347,8 +2358,8 @@ func (c *KinesisAnalyticsV2) UntagResourceRequest(input *UntagResourceInput) (re // UntagResource API operation for Amazon Kinesis Analytics. // -// Removes one or more tags from a Kinesis Analytics application. For more information, -// see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html). +// Removes one or more tags from a Kinesis Data Analytics application. For more +// information, see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2443,13 +2454,16 @@ func (c *KinesisAnalyticsV2) UpdateApplicationRequest(input *UpdateApplicationIn // UpdateApplication API operation for Amazon Kinesis Analytics. // -// Updates an existing Amazon Kinesis Data Analytics application. Using this -// operation, you can update application code, input configuration, and output -// configuration. +// Updates an existing Kinesis Data Analytics application. Using this operation, +// you can update application code, input configuration, and output configuration. // // Kinesis Data Analytics updates the ApplicationVersionId each time you update // your application. // +// You cannot update the RuntimeEnvironment of an existing application. If you +// need to update an application's RuntimeEnvironment, you must delete the application +// and create it again. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2856,8 +2870,8 @@ type AddApplicationInputProcessingConfigurationOutput struct { ApplicationVersionId *int64 `min:"1" type:"long"` // The input ID that is associated with the application input. This is the ID - // that Amazon Kinesis Data Analytics assigns to each input configuration that - // you add to your application. + // that Kinesis Data Analytics assigns to each input configuration that you + // add to your application. InputId *string `min:"1" type:"string"` // The description of the preprocessor that executes on records in this input @@ -3115,8 +3129,8 @@ type AddApplicationReferenceDataSourceOutput struct { // The application Amazon Resource Name (ARN). ApplicationARN *string `min:"1" type:"string"` - // The updated application version ID. Amazon Kinesis Data Analytics increments - // this ID when the application is updated. + // The updated application version ID. Kinesis Data Analytics increments this + // ID when the application is updated. ApplicationVersionId *int64 `min:"1" type:"long"` // Describes reference data sources configured for the application. @@ -3159,10 +3173,10 @@ type AddApplicationVpcConfigurationInput struct { // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` - // The version of the application to which you want to add the input processing - // configuration. You can use the DescribeApplication operation to get the current - // application version. If the version specified is not the current version, - // the ConcurrentModificationException is returned. + // The version of the application to which you want to add the VPC configuration. + // You can use the DescribeApplication operation to get the current application + // version. If the version specified is not the current version, the ConcurrentModificationException + // is returned. // // CurrentApplicationVersionId is a required field CurrentApplicationVersionId *int64 `min:"1" type:"long" required:"true"` @@ -3273,7 +3287,7 @@ func (s *AddApplicationVpcConfigurationOutput) SetVpcConfigurationDescription(v return s } -// Describes code configuration for a Java-based Kinesis Data Analytics application. +// Describes code configuration for a Flink-based Kinesis Data Analytics application. type ApplicationCodeConfiguration struct { _ struct{} `type:"structure"` @@ -3326,7 +3340,7 @@ func (s *ApplicationCodeConfiguration) SetCodeContentType(v string) *Application return s } -// Describes code configuration for a Java-based Kinesis Data Analytics application. +// Describes code configuration for a Flink-based Kinesis Data Analytics application. type ApplicationCodeConfigurationDescription struct { _ struct{} `type:"structure"` @@ -3361,7 +3375,8 @@ func (s *ApplicationCodeConfigurationDescription) SetCodeContentType(v string) * return s } -// Describes updates to a Java-based Amazon Kinesis Data Analytics application. +// Describes code configuration updates to a Flink-based Kinesis Data Analytics +// application. type ApplicationCodeConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -3409,28 +3424,28 @@ func (s *ApplicationCodeConfigurationUpdate) SetCodeContentUpdate(v *CodeContent return s } -// Specifies the creation parameters for an Amazon Kinesis Data Analytics application. +// Specifies the creation parameters for a Kinesis Data Analytics application. type ApplicationConfiguration struct { _ struct{} `type:"structure"` - // The code location and type parameters for a Java-based Kinesis Data Analytics + // The code location and type parameters for a Flink-based Kinesis Data Analytics // application. // // ApplicationCodeConfiguration is a required field ApplicationCodeConfiguration *ApplicationCodeConfiguration `type:"structure" required:"true"` - // Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. ApplicationSnapshotConfiguration *ApplicationSnapshotConfiguration `type:"structure"` - // Describes execution properties for a Java-based Kinesis Data Analytics application. + // Describes execution properties for a Flink-based Kinesis Data Analytics application. EnvironmentProperties *EnvironmentProperties `type:"structure"` - // The creation and update parameters for a Java-based Kinesis Data Analytics + // The creation and update parameters for a Flink-based Kinesis Data Analytics // application. FlinkApplicationConfiguration *FlinkApplicationConfiguration `type:"structure"` - // The creation and update parameters for an SQL-based Kinesis Data Analytics + // The creation and update parameters for a SQL-based Kinesis Data Analytics // application. SqlApplicationConfiguration *SqlApplicationConfiguration `type:"structure"` @@ -3533,28 +3548,28 @@ func (s *ApplicationConfiguration) SetVpcConfigurations(v []*VpcConfiguration) * } // Describes details about the application code and starting parameters for -// an Amazon Kinesis Data Analytics application. +// a Kinesis Data Analytics application. type ApplicationConfigurationDescription struct { _ struct{} `type:"structure"` - // The details about the application code for a Java-based Kinesis Data Analytics + // The details about the application code for a Flink-based Kinesis Data Analytics // application. ApplicationCodeConfigurationDescription *ApplicationCodeConfigurationDescription `type:"structure"` - // Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. ApplicationSnapshotConfigurationDescription *ApplicationSnapshotConfigurationDescription `type:"structure"` - // Describes execution properties for a Java-based Kinesis Data Analytics application. + // Describes execution properties for a Flink-based Kinesis Data Analytics application. EnvironmentPropertyDescriptions *EnvironmentPropertyDescriptions `type:"structure"` - // The details about a Java-based Kinesis Data Analytics application. + // The details about a Flink-based Kinesis Data Analytics application. FlinkApplicationConfigurationDescription *FlinkApplicationConfigurationDescription `type:"structure"` // The details about the starting properties for a Kinesis Data Analytics application. RunConfigurationDescription *RunConfigurationDescription `type:"structure"` - // The details about inputs, outputs, and reference data sources for an SQL-based + // The details about inputs, outputs, and reference data sources for a SQL-based // Kinesis Data Analytics application. SqlApplicationConfigurationDescription *SqlApplicationConfigurationDescription `type:"structure"` @@ -3618,22 +3633,22 @@ func (s *ApplicationConfigurationDescription) SetVpcConfigurationDescriptions(v type ApplicationConfigurationUpdate struct { _ struct{} `type:"structure"` - // Describes updates to a Java-based Kinesis Data Analytics application's code + // Describes updates to a Flink-based Kinesis Data Analytics application's code // configuration. ApplicationCodeConfigurationUpdate *ApplicationCodeConfigurationUpdate `type:"structure"` - // Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. ApplicationSnapshotConfigurationUpdate *ApplicationSnapshotConfigurationUpdate `type:"structure"` - // Describes updates to the environment properties for a Java-based Kinesis + // Describes updates to the environment properties for a Flink-based Kinesis // Data Analytics application. EnvironmentPropertyUpdates *EnvironmentPropertyUpdates `type:"structure"` - // Describes updates to a Java-based Kinesis Data Analytics application's configuration. + // Describes updates to a Flink-based Kinesis Data Analytics application's configuration. FlinkApplicationConfigurationUpdate *FlinkApplicationConfigurationUpdate `type:"structure"` - // Describes updates to an SQL-based Kinesis Data Analytics application's configuration. + // Describes updates to a SQL-based Kinesis Data Analytics application's configuration. SqlApplicationConfigurationUpdate *SqlApplicationConfigurationUpdate `type:"structure"` // Updates to the array of descriptions of VPC configurations available to the @@ -3742,7 +3757,8 @@ type ApplicationDetail struct { // ApplicationARN is a required field ApplicationARN *string `min:"1" type:"string" required:"true"` - // Provides details about the application's SQL or Java code and starting parameters. + // Provides details about the application's Java, SQL, or Scala code and starting + // parameters. ApplicationConfigurationDescription *ApplicationConfigurationDescription `type:"structure"` // The description of the application. @@ -3773,7 +3789,7 @@ type ApplicationDetail struct { // The current timestamp when the application was last updated. LastUpdateTimestamp *time.Time `type:"timestamp"` - // The runtime environment for the application (SQL-1.0 or FLINK-1_6). + // The runtime environment for the application (SQL-1.0, FLINK-1_6, or FLINK-1_8). // // RuntimeEnvironment is a required field RuntimeEnvironment *string `type:"string" required:"true" enum:"RuntimeEnvironment"` @@ -3912,12 +3928,12 @@ func (s *ApplicationRestoreConfiguration) SetSnapshotName(v string) *Application return s } -// Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics +// Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. type ApplicationSnapshotConfiguration struct { _ struct{} `type:"structure"` - // Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. // // SnapshotsEnabled is a required field @@ -3953,12 +3969,12 @@ func (s *ApplicationSnapshotConfiguration) SetSnapshotsEnabled(v bool) *Applicat return s } -// Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics +// Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. type ApplicationSnapshotConfigurationDescription struct { _ struct{} `type:"structure"` - // Describes whether snapshots are enabled for a Java-based Kinesis Data Analytics + // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics // application. // // SnapshotsEnabled is a required field @@ -3981,12 +3997,12 @@ func (s *ApplicationSnapshotConfigurationDescription) SetSnapshotsEnabled(v bool return s } -// Describes updates to whether snapshots are enabled for a Java-based Kinesis +// Describes updates to whether snapshots are enabled for a Flink-based Kinesis // Data Analytics application. type ApplicationSnapshotConfigurationUpdate struct { _ struct{} `type:"structure"` - // Describes updates to whether snapshots are enabled for a Java-based Kinesis + // Describes updates to whether snapshots are enabled for a Flink-based Kinesis // Data Analytics application. // // SnapshotsEnabledUpdate is a required field @@ -4047,7 +4063,7 @@ type ApplicationSummary struct { // ApplicationVersionId is a required field ApplicationVersionId *int64 `min:"1" type:"long" required:"true"` - // The runtime environment for the application (SQL-1.0 or FLINK-1_6). + // The runtime environment for the application (SQL-1.0, FLINK-1_6, or FLINK-1_8). // // RuntimeEnvironment is a required field RuntimeEnvironment *string `type:"string" required:"true" enum:"RuntimeEnvironment"` @@ -4093,10 +4109,10 @@ func (s *ApplicationSummary) SetRuntimeEnvironment(v string) *ApplicationSummary return s } -// For an SQL-based application, provides additional mapping information when -// the record format uses delimiters, such as CSV. For example, the following -// sample records use CSV format, where the records use the '\n' as the row -// delimiter and a comma (",") as the column delimiter: +// For a SQL-based Kinesis Data Analytics application, provides additional mapping +// information when the record format uses delimiters, such as CSV. For example, +// the following sample records use CSV format, where the records use the '\n' +// as the row delimiter and a comma (",") as the column delimiter: // // "name1", "address1" // @@ -4163,8 +4179,8 @@ func (s *CSVMappingParameters) SetRecordRowDelimiter(v string) *CSVMappingParame // Describes an application's checkpointing configuration. Checkpointing is // the process of persisting application state for fault tolerance. For more -// information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.6/concepts/programming-model.html#checkpoints-for-fault-tolerance) -// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). +// information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance) +// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/). type CheckpointConfiguration struct { _ struct{} `type:"structure"` @@ -4175,7 +4191,7 @@ type CheckpointConfiguration struct { // another value using this API or in application code. CheckpointInterval *int64 `min:"1" type:"long"` - // Describes whether checkpointing is enabled for a Java-based Kinesis Data + // Describes whether checkpointing is enabled for a Flink-based Kinesis Data // Analytics application. // // If CheckpointConfiguration.ConfigurationType is DEFAULT, the application @@ -4183,10 +4199,9 @@ type CheckpointConfiguration struct { // to another value using this API or in application code. CheckpointingEnabled *bool `type:"boolean"` - // Describes whether the application uses Amazon Kinesis Data Analytics' default - // checkpointing behavior. You must set this property to CUSTOM in order to - // set the CheckpointingEnabled, CheckpointInterval, or MinPauseBetweenCheckpoints - // parameters. + // Describes whether the application uses Kinesis Data Analytics' default checkpointing + // behavior. You must set this property to CUSTOM in order to set the CheckpointingEnabled, + // CheckpointInterval, or MinPauseBetweenCheckpoints parameters. // // If this value is set to DEFAULT, the application will use the following values, // even if they are set to other values using APIs or application code: @@ -4203,8 +4218,8 @@ type CheckpointConfiguration struct { // Describes the minimum time in milliseconds after a checkpoint operation completes // that a new checkpoint operation can start. If a checkpoint operation takes // longer than the CheckpointInterval, the application otherwise performs continual - // checkpoint operations. For more information, see Tuning Checkpointing (https://ci.apache.org/projects/flink/flink-docs-stable/ops/state/large_state_tuning.html#tuning-checkpointing) - // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). + // checkpoint operations. For more information, see Tuning Checkpointing (https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/large_state_tuning.html#tuning-checkpointing) + // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/). // // If CheckpointConfiguration.ConfigurationType is DEFAULT, the application // will use a MinPauseBetweenCheckpoints value of 5000, even if this value is @@ -4262,7 +4277,7 @@ func (s *CheckpointConfiguration) SetMinPauseBetweenCheckpoints(v int64) *Checkp return s } -// Describes checkpointing parameters for a Java-based Amazon Kinesis Data Analytics +// Describes checkpointing parameters for a Flink-based Kinesis Data Analytics // application. type CheckpointConfigurationDescription struct { _ struct{} `type:"structure"` @@ -4274,7 +4289,7 @@ type CheckpointConfigurationDescription struct { // another value using this API or in application code. CheckpointInterval *int64 `min:"1" type:"long"` - // Describes whether checkpointing is enabled for a Java-based Kinesis Data + // Describes whether checkpointing is enabled for a Flink-based Kinesis Data // Analytics application. // // If CheckpointConfiguration.ConfigurationType is DEFAULT, the application @@ -4338,8 +4353,8 @@ func (s *CheckpointConfigurationDescription) SetMinPauseBetweenCheckpoints(v int return s } -// Describes updates to the checkpointing parameters for a Java-based Amazon -// Kinesis Data Analytics application. +// Describes updates to the checkpointing parameters for a Flink-based Kinesis +// Data Analytics application. type CheckpointConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -4576,17 +4591,17 @@ func (s *CloudWatchLoggingOptionUpdate) SetLogStreamARNUpdate(v string) *CloudWa } // Specifies either the application code, or the location of the application -// code, for a Java-based Amazon Kinesis Data Analytics application. +// code, for a Flink-based Kinesis Data Analytics application. type CodeContent struct { _ struct{} `type:"structure"` // Information about the Amazon S3 bucket containing the application code. S3ContentLocation *S3ContentLocation `type:"structure"` - // The text-format code for a Java-based Kinesis Data Analytics application. + // The text-format code for a Flink-based Kinesis Data Analytics application. TextContent *string `type:"string"` - // The zip-format code for a Java-based Kinesis Data Analytics application. + // The zip-format code for a Flink-based Kinesis Data Analytics application. // // ZipFileContent is automatically base64 encoded/decoded by the SDK. ZipFileContent []byte `type:"blob"` @@ -4635,7 +4650,7 @@ func (s *CodeContent) SetZipFileContent(v []byte) *CodeContent { return s } -// Describes details about the application code for a Java-based Kinesis Data +// Describes details about the application code for a Flink-based Kinesis Data // Analytics application. type CodeContentDescription struct { _ struct{} `type:"structure"` @@ -4689,7 +4704,7 @@ func (s *CodeContentDescription) SetTextContent(v string) *CodeContentDescriptio return s } -// Describes an update to the code of a Java-based Kinesis Data Analytics application. +// Describes an update to the code of a Flink-based Kinesis Data Analytics application. type CodeContentUpdate struct { _ struct{} `type:"structure"` @@ -4751,8 +4766,8 @@ func (s *CodeContentUpdate) SetZipFileContentUpdate(v []byte) *CodeContentUpdate // The user-provided application code (query) is not valid. This can be a simple // syntax error. type CodeValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4769,17 +4784,17 @@ func (s CodeValidationException) GoString() string { func newErrorCodeValidationException(v protocol.ResponseMetadata) error { return &CodeValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CodeValidationException) Code() string { +func (s *CodeValidationException) Code() string { return "CodeValidationException" } // Message returns the exception's message. -func (s CodeValidationException) Message() string { +func (s *CodeValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4787,30 +4802,30 @@ func (s CodeValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CodeValidationException) OrigErr() error { +func (s *CodeValidationException) OrigErr() error { return nil } -func (s CodeValidationException) Error() string { +func (s *CodeValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CodeValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CodeValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CodeValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *CodeValidationException) RequestID() string { + return s.RespMetadata.RequestID } // Exception thrown as a result of concurrent modifications to an application. // This error can be the result of attempting to modify an application without // using the current application ID. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4827,17 +4842,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4845,22 +4860,22 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } type CreateApplicationInput struct { @@ -4881,7 +4896,7 @@ type CreateApplicationInput struct { // application configuration errors. CloudWatchLoggingOptions []*CloudWatchLoggingOption `type:"list"` - // The runtime environment for the application (SQL-1.0 or FLINK-1_6). + // The runtime environment for the application (SQL-1.0, FLINK-1_6, or FLINK-1_8). // // RuntimeEnvironment is a required field RuntimeEnvironment *string `type:"string" required:"true" enum:"RuntimeEnvironment"` @@ -5984,7 +5999,7 @@ func (s *DescribeApplicationSnapshotOutput) SetSnapshotDetails(v *SnapshotDetail } // Describes the data format when records are written to the destination in -// an SQL-based Amazon Kinesis Data Analytics application. +// a SQL-based Kinesis Data Analytics application. type DestinationSchema struct { _ struct{} `type:"structure"` @@ -6169,7 +6184,7 @@ func (s *DiscoverInputSchemaOutput) SetRawInputRecords(v []*string) *DiscoverInp return s } -// Describes execution properties for a Java-based Kinesis Data Analytics application. +// Describes execution properties for a Flink-based Kinesis Data Analytics application. type EnvironmentProperties struct { _ struct{} `type:"structure"` @@ -6218,7 +6233,7 @@ func (s *EnvironmentProperties) SetPropertyGroups(v []*PropertyGroup) *Environme return s } -// Describes the execution properties for a Java-based Amazon Kinesis Data Analytics +// Describes the execution properties for a Flink-based Kinesis Data Analytics // application. type EnvironmentPropertyDescriptions struct { _ struct{} `type:"structure"` @@ -6243,8 +6258,8 @@ func (s *EnvironmentPropertyDescriptions) SetPropertyGroupDescriptions(v []*Prop return s } -// Describes updates to the execution property groups for a Java-based Amazon -// Kinesis Data Analytics application. +// Describes updates to the execution property groups for a Flink-based Kinesis +// Data Analytics application. type EnvironmentPropertyUpdates struct { _ struct{} `type:"structure"` @@ -6293,15 +6308,15 @@ func (s *EnvironmentPropertyUpdates) SetPropertyGroups(v []*PropertyGroup) *Envi return s } -// Describes configuration parameters for a Java-based Amazon Kinesis Data Analytics +// Describes configuration parameters for a Flink-based Kinesis Data Analytics // application. type FlinkApplicationConfiguration struct { _ struct{} `type:"structure"` // Describes an application's checkpointing configuration. Checkpointing is // the process of persisting application state for fault tolerance. For more - // information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.6/concepts/programming-model.html#checkpoints-for-fault-tolerance) - // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). + // information, see Checkpoints for Fault Tolerance (https://ci.apache.org/projects/flink/flink-docs-release-1.8/concepts/programming-model.html#checkpoints-for-fault-tolerance) + // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/). CheckpointConfiguration *CheckpointConfiguration `type:"structure"` // Describes configuration parameters for Amazon CloudWatch logging for an application. @@ -6364,7 +6379,7 @@ func (s *FlinkApplicationConfiguration) SetParallelismConfiguration(v *Paralleli return s } -// Describes configuration parameters for a Java-based Amazon Kinesis Data Analytics +// Describes configuration parameters for a Flink-based Kinesis Data Analytics // application. type FlinkApplicationConfigurationDescription struct { _ struct{} `type:"structure"` @@ -6374,8 +6389,8 @@ type FlinkApplicationConfigurationDescription struct { CheckpointConfigurationDescription *CheckpointConfigurationDescription `type:"structure"` // The job plan for an application. For more information about the job plan, - // see Jobs and Scheduling (https://ci.apache.org/projects/flink/flink-docs-stable/internals/job_scheduling.html) - // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). + // see Jobs and Scheduling (https://ci.apache.org/projects/flink/flink-docs-release-1.8/internals/job_scheduling.html) + // in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/). // To retrieve the job plan for the application, use the DescribeApplicationRequest$IncludeAdditionalDetails // parameter of the DescribeApplication operation. JobPlanDescription *string `type:"string"` @@ -6421,8 +6436,8 @@ func (s *FlinkApplicationConfigurationDescription) SetParallelismConfigurationDe return s } -// Describes updates to the configuration parameters for a Java-based Amazon -// Kinesis Data Analytics application. +// Describes updates to the configuration parameters for a Flink-based Kinesis +// Data Analytics application. type FlinkApplicationConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -6487,17 +6502,21 @@ func (s *FlinkApplicationConfigurationUpdate) SetParallelismConfigurationUpdate( return s } -// Describes the starting parameters for an Apache Flink-based Kinesis Data -// Analytics application. +// Describes the starting parameters for a Flink-based Kinesis Data Analytics +// application. type FlinkRunConfiguration struct { _ struct{} `type:"structure"` - // When restoring from a savepoint, specifies whether the runtime is allowed + // When restoring from a snapshot, specifies whether the runtime is allowed // to skip a state that cannot be mapped to the new program. This will happen - // if the program is updated between savepoints to remove stateful parameters, - // and state data in the savepoint no longer corresponds to valid application + // if the program is updated between snapshots to remove stateful parameters, + // and state data in the snapshot no longer corresponds to valid application // data. For more information, see Allowing Non-Restored State (https://ci.apache.org/projects/flink/flink-docs-release-1.8/ops/state/savepoints.html#allowing-non-restored-state) // in the Apache Flink documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/). + // + // This value defaults to false. If you update your application without specifying + // this parameter, AllowNonRestoredState will be set to false, even if it was + // previously set to true. AllowNonRestoredState *bool `type:"boolean"` } @@ -6517,9 +6536,9 @@ func (s *FlinkRunConfiguration) SetAllowNonRestoredState(v bool) *FlinkRunConfig return s } -// When you configure the application input for an SQL-based Amazon Kinesis -// Data Analytics application, you specify the streaming source, the in-application -// stream name that is created, and the mapping between the two. +// When you configure the application input for a SQL-based Kinesis Data Analytics +// application, you specify the streaming source, the in-application stream +// name that is created, and the mapping between the two. type Input struct { _ struct{} `type:"structure"` @@ -6649,8 +6668,8 @@ func (s *Input) SetNamePrefix(v string) *Input { return s } -// Describes the application input configuration for an SQL-based Amazon Kinesis -// Data Analytics application. +// Describes the application input configuration for a SQL-based Kinesis Data +// Analytics application. type InputDescription struct { _ struct{} `type:"structure"` @@ -6755,8 +6774,8 @@ func (s *InputDescription) SetNamePrefix(v string) *InputDescription { } // An object that contains the Amazon Resource Name (ARN) of the AWS Lambda -// function that is used to preprocess records in the stream in an SQL-based -// Amazon Kinesis Data Analytics application. +// function that is used to preprocess records in the stream in a SQL-based +// Kinesis Data Analytics application. type InputLambdaProcessor struct { _ struct{} `type:"structure"` @@ -6764,7 +6783,7 @@ type InputLambdaProcessor struct { // // To specify an earlier version of the Lambda function than the latest, include // the Lambda function version in the Lambda function ARN. For more information - // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // about Lambda ARNs, see Example ARNs: AWS Lambda (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -6802,9 +6821,9 @@ func (s *InputLambdaProcessor) SetResourceARN(v string) *InputLambdaProcessor { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, an object that -// contains the Amazon Resource Name (ARN) of the AWS Lambda function that is -// used to preprocess records in the stream. +// For a SQL-based Kinesis Data Analytics application, an object that contains +// the Amazon Resource Name (ARN) of the AWS Lambda function that is used to +// preprocess records in the stream. type InputLambdaProcessorDescription struct { _ struct{} `type:"structure"` @@ -6813,7 +6832,7 @@ type InputLambdaProcessorDescription struct { // // To specify an earlier version of the Lambda function than the latest, include // the Lambda function version in the Lambda function ARN. For more information - // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // about Lambda ARNs, see Example ARNs: AWS Lambda (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -6848,9 +6867,9 @@ func (s *InputLambdaProcessorDescription) SetRoleARN(v string) *InputLambdaProce return s } -// For an SQL-based Amazon Kinesis Data Analytics application, represents an -// update to the InputLambdaProcessor that is used to preprocess the records -// in the stream. +// For a SQL-based Kinesis Data Analytics application, represents an update +// to the InputLambdaProcessor that is used to preprocess the records in the +// stream. type InputLambdaProcessorUpdate struct { _ struct{} `type:"structure"` @@ -6859,7 +6878,7 @@ type InputLambdaProcessorUpdate struct { // // To specify an earlier version of the Lambda function than the latest, include // the Lambda function version in the Lambda function ARN. For more information - // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // about Lambda ARNs, see Example ARNs: AWS Lambda (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) // // ResourceARNUpdate is a required field ResourceARNUpdate *string `min:"1" type:"string" required:"true"` @@ -6897,8 +6916,8 @@ func (s *InputLambdaProcessorUpdate) SetResourceARNUpdate(v string) *InputLambda return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// number of in-application streams to create for a given streaming source. +// For a SQL-based Kinesis Data Analytics application, describes the number +// of in-application streams to create for a given streaming source. type InputParallelism struct { _ struct{} `type:"structure"` @@ -6935,8 +6954,8 @@ func (s *InputParallelism) SetCount(v int64) *InputParallelism { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, provides updates -// to the parallelism count. +// For a SQL-based Kinesis Data Analytics application, provides updates to the +// parallelism count. type InputParallelismUpdate struct { _ struct{} `type:"structure"` @@ -6979,10 +6998,10 @@ func (s *InputParallelismUpdate) SetCountUpdate(v int64) *InputParallelismUpdate return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes a processor +// For a SQL-based Kinesis Data Analytics application, describes a processor // that is used to preprocess the records in the stream before being processed // by your application code. Currently, the only input processor available is -// AWS Lambda (https://aws.amazon.com/documentation/lambda/). +// AWS Lambda (https://docs.aws.amazon.com/lambda/). type InputProcessingConfiguration struct { _ struct{} `type:"structure"` @@ -7027,9 +7046,9 @@ func (s *InputProcessingConfiguration) SetInputLambdaProcessor(v *InputLambdaPro return s } -// For an SQL-based Amazon Kinesis Data Analytics application, provides the -// configuration information about an input processor. Currently, the only input -// processor available is AWS Lambda (https://aws.amazon.com/documentation/lambda/). +// For a SQL-based Kinesis Data Analytics application, provides the configuration +// information about an input processor. Currently, the only input processor +// available is AWS Lambda (https://docs.aws.amazon.com/lambda/). type InputProcessingConfigurationDescription struct { _ struct{} `type:"structure"` @@ -7053,8 +7072,8 @@ func (s *InputProcessingConfigurationDescription) SetInputLambdaProcessorDescrip return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes updates -// to an InputProcessingConfiguration. +// For a SQL-based Kinesis Data Analytics application, describes updates to +// an InputProcessingConfiguration. type InputProcessingConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -7098,8 +7117,8 @@ func (s *InputProcessingConfigurationUpdate) SetInputLambdaProcessorUpdate(v *In return s } -// Describes updates for an SQL-based Amazon Kinesis Data Analytics application's -// input schema. +// Describes updates for an SQL-based Kinesis Data Analytics application's input +// schema. type InputSchemaUpdate struct { _ struct{} `type:"structure"` @@ -7110,7 +7129,7 @@ type InputSchemaUpdate struct { // Specifies the encoding of the records in the streaming source; for example, // UTF-8. - RecordEncodingUpdate *string `type:"string"` + RecordEncodingUpdate *string `min:"5" type:"string"` // Specifies the format of the records on the streaming source. RecordFormatUpdate *RecordFormat `type:"structure"` @@ -7132,6 +7151,9 @@ func (s *InputSchemaUpdate) Validate() error { if s.RecordColumnUpdates != nil && len(s.RecordColumnUpdates) < 1 { invalidParams.Add(request.NewErrParamMinLen("RecordColumnUpdates", 1)) } + if s.RecordEncodingUpdate != nil && len(*s.RecordEncodingUpdate) < 5 { + invalidParams.Add(request.NewErrParamMinLen("RecordEncodingUpdate", 5)) + } if s.RecordColumnUpdates != nil { for i, v := range s.RecordColumnUpdates { if v == nil { @@ -7206,8 +7228,8 @@ func (s *InputStartingPositionConfiguration) SetInputStartingPosition(v string) return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes updates -// to a specific input configuration (identified by the InputId of an application). +// For a SQL-based Kinesis Data Analytics application, describes updates to +// a specific input configuration (identified by the InputId of an application). type InputUpdate struct { _ struct{} `type:"structure"` @@ -7339,8 +7361,8 @@ func (s *InputUpdate) SetNamePrefixUpdate(v string) *InputUpdate { // The user-provided application configuration is not valid. type InvalidApplicationConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7357,17 +7379,17 @@ func (s InvalidApplicationConfigurationException) GoString() string { func newErrorInvalidApplicationConfigurationException(v protocol.ResponseMetadata) error { return &InvalidApplicationConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidApplicationConfigurationException) Code() string { +func (s *InvalidApplicationConfigurationException) Code() string { return "InvalidApplicationConfigurationException" } // Message returns the exception's message. -func (s InvalidApplicationConfigurationException) Message() string { +func (s *InvalidApplicationConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7375,28 +7397,28 @@ func (s InvalidApplicationConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidApplicationConfigurationException) OrigErr() error { +func (s *InvalidApplicationConfigurationException) OrigErr() error { return nil } -func (s InvalidApplicationConfigurationException) Error() string { +func (s *InvalidApplicationConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidApplicationConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidApplicationConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidApplicationConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidApplicationConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // The specified input parameter value is not valid. type InvalidArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7413,17 +7435,17 @@ func (s InvalidArgumentException) GoString() string { func newErrorInvalidArgumentException(v protocol.ResponseMetadata) error { return &InvalidArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgumentException) Code() string { +func (s *InvalidArgumentException) Code() string { return "InvalidArgumentException" } // Message returns the exception's message. -func (s InvalidArgumentException) Message() string { +func (s *InvalidArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7431,28 +7453,28 @@ func (s InvalidArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgumentException) OrigErr() error { +func (s *InvalidArgumentException) OrigErr() error { return nil } -func (s InvalidArgumentException) Error() string { +func (s *InvalidArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // The request JSON is not valid for the operation. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7469,17 +7491,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7487,26 +7509,26 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } -// For an SQL-based Amazon Kinesis Data Analytics application, provides additional -// mapping information when JSON is the record format on the streaming source. +// For a SQL-based Kinesis Data Analytics application, provides additional mapping +// information when JSON is the record format on the streaming source. type JSONMappingParameters struct { _ struct{} `type:"structure"` @@ -7548,9 +7570,9 @@ func (s *JSONMappingParameters) SetRecordRowPath(v string) *JSONMappingParameter return s } -// For an SQL-based Amazon Kinesis Data Analytics application, identifies a -// Kinesis Data Firehose delivery stream as the streaming source. You provide -// the delivery stream's Amazon Resource Name (ARN). +// For a SQL-based Kinesis Data Analytics application, identifies a Kinesis +// Data Firehose delivery stream as the streaming source. You provide the delivery +// stream's Amazon Resource Name (ARN). type KinesisFirehoseInput struct { _ struct{} `type:"structure"` @@ -7633,9 +7655,9 @@ func (s *KinesisFirehoseInputDescription) SetRoleARN(v string) *KinesisFirehoseI return s } -// For an SQL-based Amazon Kinesis Data Analytics application, when updating -// application input configuration, provides information about a Kinesis Data -// Firehose delivery stream as the streaming source. +// For a SQL-based Kinesis Data Analytics application, when updating application +// input configuration, provides information about a Kinesis Data Firehose delivery +// stream as the streaming source. type KinesisFirehoseInputUpdate struct { _ struct{} `type:"structure"` @@ -7677,10 +7699,9 @@ func (s *KinesisFirehoseInputUpdate) SetResourceARNUpdate(v string) *KinesisFire return s } -// For an SQL-based Amazon Kinesis Data Analytics application, when configuring -// application output, identifies a Kinesis Data Firehose delivery stream as -// the destination. You provide the stream Amazon Resource Name (ARN) of the -// delivery stream. +// For a SQL-based Kinesis Data Analytics application, when configuring application +// output, identifies a Kinesis Data Firehose delivery stream as the destination. +// You provide the stream Amazon Resource Name (ARN) of the delivery stream. type KinesisFirehoseOutput struct { _ struct{} `type:"structure"` @@ -7722,8 +7743,8 @@ func (s *KinesisFirehoseOutput) SetResourceARN(v string) *KinesisFirehoseOutput return s } -// For an SQL-based Amazon Kinesis Data Analytics application's output, describes -// the Kinesis Data Firehose delivery stream that is configured as its destination. +// For a SQL-based Kinesis Data Analytics application's output, describes the +// Kinesis Data Firehose delivery stream that is configured as its destination. type KinesisFirehoseOutputDescription struct { _ struct{} `type:"structure"` @@ -7763,8 +7784,8 @@ func (s *KinesisFirehoseOutputDescription) SetRoleARN(v string) *KinesisFirehose return s } -// For an SQL-based Amazon Kinesis Data Analytics application, when updating -// an output configuration using the UpdateApplication operation, provides information +// For a SQL-based Kinesis Data Analytics application, when updating an output +// configuration using the UpdateApplication operation, provides information // about a Kinesis Data Firehose delivery stream that is configured as the destination. type KinesisFirehoseOutputUpdate struct { _ struct{} `type:"structure"` @@ -7807,8 +7828,8 @@ func (s *KinesisFirehoseOutputUpdate) SetResourceARNUpdate(v string) *KinesisFir return s } -// Identifies an Amazon Kinesis data stream as the streaming source. You provide -// the stream's Amazon Resource Name (ARN). +// Identifies a Kinesis data stream as the streaming source. You provide the +// stream's Amazon Resource Name (ARN). type KinesisStreamsInput struct { _ struct{} `type:"structure"` @@ -7850,8 +7871,8 @@ func (s *KinesisStreamsInput) SetResourceARN(v string) *KinesisStreamsInput { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// Kinesis data stream that is configured as the streaming source in the application +// For a SQL-based Kinesis Data Analytics application, describes the Kinesis +// data stream that is configured as the streaming source in the application // input configuration. type KinesisStreamsInputDescription struct { _ struct{} `type:"structure"` @@ -7892,9 +7913,9 @@ func (s *KinesisStreamsInputDescription) SetRoleARN(v string) *KinesisStreamsInp return s } -// When you update the input configuration for an SQL-based Amazon Kinesis Data -// Analytics application, provides information about an Amazon Kinesis stream -// as the streaming source. +// When you update the input configuration for a SQL-based Kinesis Data Analytics +// application, provides information about a Kinesis stream as the streaming +// source. type KinesisStreamsInputUpdate struct { _ struct{} `type:"structure"` @@ -7936,9 +7957,9 @@ func (s *KinesisStreamsInputUpdate) SetResourceARNUpdate(v string) *KinesisStrea return s } -// When you configure an SQL-based Amazon Kinesis Data Analytics application's -// output, identifies a Kinesis data stream as the destination. You provide -// the stream Amazon Resource Name (ARN). +// When you configure a SQL-based Kinesis Data Analytics application's output, +// identifies a Kinesis data stream as the destination. You provide the stream +// Amazon Resource Name (ARN). type KinesisStreamsOutput struct { _ struct{} `type:"structure"` @@ -7980,8 +8001,8 @@ func (s *KinesisStreamsOutput) SetResourceARN(v string) *KinesisStreamsOutput { return s } -// For an SQL-based Amazon Kinesis Data Analytics application's output, describes -// the Kinesis data stream that is configured as its destination. +// For an SQL-based Kinesis Data Analytics application's output, describes the +// Kinesis data stream that is configured as its destination. type KinesisStreamsOutputDescription struct { _ struct{} `type:"structure"` @@ -8021,9 +8042,9 @@ func (s *KinesisStreamsOutputDescription) SetRoleARN(v string) *KinesisStreamsOu return s } -// When you update an SQL-based Amazon Kinesis Data Analytics application's -// output configuration using the UpdateApplication operation, provides information -// about a Kinesis data stream that is configured as the destination. +// When you update a SQL-based Kinesis Data Analytics application's output configuration +// using the UpdateApplication operation, provides information about a Kinesis +// data stream that is configured as the destination. type KinesisStreamsOutputUpdate struct { _ struct{} `type:"structure"` @@ -8066,9 +8087,9 @@ func (s *KinesisStreamsOutputUpdate) SetResourceARNUpdate(v string) *KinesisStre return s } -// When you configure an SQL-based Amazon Kinesis Data Analytics application's -// output, identifies an AWS Lambda function as the destination. You provide -// the function Amazon Resource Name (ARN) of the Lambda function. +// When you configure a SQL-based Kinesis Data Analytics application's output, +// identifies an AWS Lambda function as the destination. You provide the function +// Amazon Resource Name (ARN) of the Lambda function. type LambdaOutput struct { _ struct{} `type:"structure"` @@ -8077,7 +8098,7 @@ type LambdaOutput struct { // // To specify an earlier version of the Lambda function than the latest, include // the Lambda function version in the Lambda function ARN. For more information - // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // about Lambda ARNs, see Example ARNs: AWS Lambda (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -8115,8 +8136,8 @@ func (s *LambdaOutput) SetResourceARN(v string) *LambdaOutput { return s } -// For an SQL-based Amazon Kinesis Data Analytics application output, describes -// the AWS Lambda function that is configured as its destination. +// For a SQL-based Kinesis Data Analytics application's output, describes the +// AWS Lambda function that is configured as its destination. type LambdaOutputDescription struct { _ struct{} `type:"structure"` @@ -8156,8 +8177,8 @@ func (s *LambdaOutputDescription) SetRoleARN(v string) *LambdaOutputDescription return s } -// When you update an SQL-based Amazon Kinesis Data Analytics application's -// output configuration using the UpdateApplication operation, provides information +// When you update an SQL-based Kinesis Data Analytics application's output +// configuration using the UpdateApplication operation, provides information // about an AWS Lambda function that is configured as the destination. type LambdaOutputUpdate struct { _ struct{} `type:"structure"` @@ -8166,7 +8187,7 @@ type LambdaOutputUpdate struct { // // To specify an earlier version of the Lambda function than the latest, include // the Lambda function version in the Lambda function ARN. For more information - // about Lambda ARNs, see Example ARNs: AWS Lambda (/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) + // about Lambda ARNs, see Example ARNs: AWS Lambda (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-lambda) // // ResourceARNUpdate is a required field ResourceARNUpdate *string `min:"1" type:"string" required:"true"` @@ -8206,8 +8227,8 @@ func (s *LambdaOutputUpdate) SetResourceARNUpdate(v string) *LambdaOutputUpdate // The number of allowed resources has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8224,17 +8245,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8242,22 +8263,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListApplicationSnapshotsInput struct { @@ -8512,10 +8533,10 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput return s } -// When you configure an SQL-based Amazon Kinesis Data Analytics application's -// input at the time of creating or updating an application, provides additional -// mapping information specific to the record format (such as JSON, CSV, or -// record fields delimited by some delimiter) on the streaming source. +// When you configure a SQL-based Kinesis Data Analytics application's input +// at the time of creating or updating an application, provides additional mapping +// information specific to the record format (such as JSON, CSV, or record fields +// delimited by some delimiter) on the streaming source. type MappingParameters struct { _ struct{} `type:"structure"` @@ -8570,7 +8591,7 @@ func (s *MappingParameters) SetJSONMappingParameters(v *JSONMappingParameters) * return s } -// Describes configuration parameters for Amazon CloudWatch logging for a Java-based +// Describes configuration parameters for Amazon CloudWatch logging for a Flink-based // Kinesis Data Analytics application. For more information about CloudWatch // logging, see Monitoring (https://docs.aws.amazon.com/kinesisanalytics/latest/java/monitoring-overview.html). type MonitoringConfiguration struct { @@ -8586,7 +8607,9 @@ type MonitoringConfiguration struct { // Describes the verbosity of the CloudWatch Logs for an application. LogLevel *string `type:"string" enum:"LogLevel"` - // Describes the granularity of the CloudWatch Logs for an application. + // Describes the granularity of the CloudWatch Logs for an application. The + // Parallelism level is not recommended for applications with a Parallelism + // over 64 due to excessive costs. MetricsLevel *string `type:"string" enum:"MetricsLevel"` } @@ -8631,7 +8654,7 @@ func (s *MonitoringConfiguration) SetMetricsLevel(v string) *MonitoringConfigura return s } -// Describes configuration parameters for CloudWatch logging for a Java-based +// Describes configuration parameters for CloudWatch logging for a Flink-based // Kinesis Data Analytics application. type MonitoringConfigurationDescription struct { _ struct{} `type:"structure"` @@ -8676,7 +8699,7 @@ func (s *MonitoringConfigurationDescription) SetMetricsLevel(v string) *Monitori } // Describes updates to configuration parameters for Amazon CloudWatch logging -// for a Java-based Kinesis Data Analytics application. +// for a Flink-based Kinesis Data Analytics application. type MonitoringConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -8689,6 +8712,8 @@ type MonitoringConfigurationUpdate struct { LogLevelUpdate *string `type:"string" enum:"LogLevel"` // Describes updates to the granularity of the CloudWatch Logs for an application. + // The Parallelism level is not recommended for applications with a Parallelism + // over 64 due to excessive costs. MetricsLevelUpdate *string `type:"string" enum:"MetricsLevel"` } @@ -8720,10 +8745,10 @@ func (s *MonitoringConfigurationUpdate) SetMetricsLevelUpdate(v string) *Monitor return s } -// Describes an SQL-based Amazon Kinesis Data Analytics application's output -// configuration, in which you identify an in-application stream and a destination -// where you want the in-application stream data to be written. The destination -// can be a Kinesis data stream or a Kinesis Data Firehose delivery stream. +// Describes a SQL-based Kinesis Data Analytics application's output configuration, +// in which you identify an in-application stream and a destination where you +// want the in-application stream data to be written. The destination can be +// a Kinesis data stream or a Kinesis Data Firehose delivery stream. type Output struct { _ struct{} `type:"structure"` @@ -8732,10 +8757,10 @@ type Output struct { // DestinationSchema is a required field DestinationSchema *DestinationSchema `type:"structure" required:"true"` - // Identifies an Amazon Kinesis Data Firehose delivery stream as the destination. + // Identifies a Kinesis Data Firehose delivery stream as the destination. KinesisFirehoseOutput *KinesisFirehoseOutput `type:"structure"` - // Identifies an Amazon Kinesis data stream as the destination. + // Identifies a Kinesis data stream as the destination. KinesisStreamsOutput *KinesisStreamsOutput `type:"structure"` // Identifies an AWS Lambda function as the destination. @@ -8826,10 +8851,10 @@ func (s *Output) SetName(v string) *Output { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// application output configuration, which includes the in-application stream -// name and the destination where the stream data is written. The destination -// can be a Kinesis data stream or a Kinesis Data Firehose delivery stream. +// For a SQL-based Kinesis Data Analytics application, describes the application +// output configuration, which includes the in-application stream name and the +// destination where the stream data is written. The destination can be a Kinesis +// data stream or a Kinesis Data Firehose delivery stream. type OutputDescription struct { _ struct{} `type:"structure"` @@ -8901,8 +8926,8 @@ func (s *OutputDescription) SetOutputId(v string) *OutputDescription { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes updates -// to the output configuration identified by the OutputId. +// For a SQL-based Kinesis Data Analytics application, describes updates to +// the output configuration identified by the OutputId. type OutputUpdate struct { _ struct{} `type:"structure"` @@ -9014,10 +9039,10 @@ func (s *OutputUpdate) SetOutputId(v string) *OutputUpdate { return s } -// Describes parameters for how a Java-based Amazon Kinesis Data Analytics application -// executes multiple tasks simultaneously. For more information about parallelism, -// see Parallel Execution (https://ci.apache.org/projects/flink/flink-docs-stable/dev/parallel.html) -// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.6/). +// Describes parameters for how a Flink-based Kinesis Data Analytics application +// application executes multiple tasks simultaneously. For more information +// about parallelism, see Parallel Execution (https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/parallel.html) +// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/). type ParallelismConfiguration struct { _ struct{} `type:"structure"` @@ -9033,7 +9058,7 @@ type ParallelismConfiguration struct { // ConfigurationType is a required field ConfigurationType *string `type:"string" required:"true" enum:"ConfigurationType"` - // Describes the initial number of parallel tasks that a Java-based Kinesis + // Describes the initial number of parallel tasks that a Flink-based Kinesis // Data Analytics application can perform. If AutoScalingEnabled is set to True, // Kinesis Data Analytics increases the CurrentParallelism value in response // to application load. The service can increase the CurrentParallelism value @@ -9044,7 +9069,7 @@ type ParallelismConfiguration struct { // Parallelism setting. Parallelism *int64 `min:"1" type:"integer"` - // Describes the number of parallel tasks that a Java-based Kinesis Data Analytics + // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics // application can perform per Kinesis Processing Unit (KPU) used by the application. // For more information about KPUs, see Amazon Kinesis Data Analytics Pricing // (http://aws.amazon.com/kinesis/data-analytics/pricing/). @@ -9104,7 +9129,7 @@ func (s *ParallelismConfiguration) SetParallelismPerKPU(v int64) *ParallelismCon return s } -// Describes parameters for how a Java-based Kinesis Data Analytics application +// Describes parameters for how a Flink-based Kinesis Data Analytics application // executes multiple tasks simultaneously. type ParallelismConfigurationDescription struct { _ struct{} `type:"structure"` @@ -9117,7 +9142,7 @@ type ParallelismConfigurationDescription struct { // Data Analytics service. ConfigurationType *string `type:"string" enum:"ConfigurationType"` - // Describes the current number of parallel tasks that a Java-based Kinesis + // Describes the current number of parallel tasks that a Flink-based Kinesis // Data Analytics application can perform. If AutoScalingEnabled is set to True, // Kinesis Data Analytics can increase this value in response to application // load. The service can increase this value up to the maximum parallelism, @@ -9127,7 +9152,7 @@ type ParallelismConfigurationDescription struct { // can reduce the CurrentParallelism value down to the Parallelism setting. CurrentParallelism *int64 `min:"1" type:"integer"` - // Describes the initial number of parallel tasks that a Java-based Kinesis + // Describes the initial number of parallel tasks that a Flink-based Kinesis // Data Analytics application can perform. If AutoScalingEnabled is set to True, // then Kinesis Data Analytics can increase the CurrentParallelism value in // response to application load. The service can increase CurrentParallelism @@ -9138,7 +9163,7 @@ type ParallelismConfigurationDescription struct { // Parallelism setting. Parallelism *int64 `min:"1" type:"integer"` - // Describes the number of parallel tasks that a Java-based Kinesis Data Analytics + // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics // application can perform per Kinesis Processing Unit (KPU) used by the application. ParallelismPerKPU *int64 `min:"1" type:"integer"` } @@ -9183,7 +9208,7 @@ func (s *ParallelismConfigurationDescription) SetParallelismPerKPU(v int64) *Par return s } -// Describes updates to parameters for how a Java-based Kinesis Data Analytics +// Describes updates to parameters for how a Flink-based Kinesis Data Analytics // application executes multiple tasks simultaneously. type ParallelismConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -9263,7 +9288,7 @@ func (s *ParallelismConfigurationUpdate) SetParallelismUpdate(v int64) *Parallel return s } -// Property key-value pairs passed into a Java-based Kinesis Data Analytics +// Property key-value pairs passed into a Flink-based Kinesis Data Analytics // application. type PropertyGroup struct { _ struct{} `type:"structure"` @@ -9323,9 +9348,9 @@ func (s *PropertyGroup) SetPropertyMap(v map[string]*string) *PropertyGroup { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// mapping of each data element in the streaming source to the corresponding -// column in the in-application stream. +// For a SQL-based Kinesis Data Analytics application, describes the mapping +// of each data element in the streaming source to the corresponding column +// in the in-application stream. // // Also used to describe the format of the reference data source. type RecordColumn struct { @@ -9339,7 +9364,7 @@ type RecordColumn struct { // or reference table. // // Name is a required field - Name *string `type:"string" required:"true"` + Name *string `min:"1" type:"string" required:"true"` // The type of column created in the in-application input stream or reference // table. @@ -9364,6 +9389,9 @@ func (s *RecordColumn) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } if s.SqlType == nil { invalidParams.Add(request.NewErrParamRequired("SqlType")) } @@ -9395,9 +9423,9 @@ func (s *RecordColumn) SetSqlType(v string) *RecordColumn { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// record format and relevant mapping information that should be applied to -// schematize the records on the stream. +// For a SQL-based Kinesis Data Analytics application, describes the record +// format and relevant mapping information that should be applied to schematize +// the records on the stream. type RecordFormat struct { _ struct{} `type:"structure"` @@ -9453,11 +9481,11 @@ func (s *RecordFormat) SetRecordFormatType(v string) *RecordFormat { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// reference data source by providing the source information (Amazon S3 bucket -// name and object key name), the resulting in-application table name that is -// created, and the necessary schema to map the data elements in the Amazon -// S3 object to the in-application table. +// For a SQL-based Kinesis Data Analytics application, describes the reference +// data source by providing the source information (Amazon S3 bucket name and +// object key name), the resulting in-application table name that is created, +// and the necessary schema to map the data elements in the Amazon S3 object +// to the in-application table. type ReferenceDataSource struct { _ struct{} `type:"structure"` @@ -9536,8 +9564,8 @@ func (s *ReferenceDataSource) SetTableName(v string) *ReferenceDataSource { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// reference data source configured for an application. +// For a SQL-based Kinesis Data Analytics application, describes the reference +// data source configured for an application. type ReferenceDataSourceDescription struct { _ struct{} `type:"structure"` @@ -9599,12 +9627,11 @@ func (s *ReferenceDataSourceDescription) SetTableName(v string) *ReferenceDataSo return s } -// When you update a reference data source configuration for a SQL-based Amazon -// Kinesis Data Analytics application, this object provides all the updated -// values (such as the source bucket name and object key name), the in-application -// table name that is created, and updated mapping information that maps the -// data in the Amazon S3 object to the in-application reference table that is -// created. +// When you update a reference data source configuration for a SQL-based Kinesis +// Data Analytics application, this object provides all the updated values (such +// as the source bucket name and object key name), the in-application table +// name that is created, and updated mapping information that maps the data +// in the Amazon S3 object to the in-application reference table that is created. type ReferenceDataSourceUpdate struct { _ struct{} `type:"structure"` @@ -9692,8 +9719,8 @@ func (s *ReferenceDataSourceUpdate) SetTableNameUpdate(v string) *ReferenceDataS // The application is not available for this operation. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -9710,17 +9737,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9728,28 +9755,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Specified application can't be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -9766,17 +9793,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9784,31 +9811,31 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Discovery failed to get a record from the streaming source because of the -// Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, +// Kinesis Streams ProvisionedThroughputExceededException. For more information, // see GetRecords (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) // in the Amazon Kinesis Streams API Reference. type ResourceProvisionedThroughputExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -9825,17 +9852,17 @@ func (s ResourceProvisionedThroughputExceededException) GoString() string { func newErrorResourceProvisionedThroughputExceededException(v protocol.ResponseMetadata) error { return &ResourceProvisionedThroughputExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceProvisionedThroughputExceededException) Code() string { +func (s *ResourceProvisionedThroughputExceededException) Code() string { return "ResourceProvisionedThroughputExceededException" } // Message returns the exception's message. -func (s ResourceProvisionedThroughputExceededException) Message() string { +func (s *ResourceProvisionedThroughputExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9843,37 +9870,37 @@ func (s ResourceProvisionedThroughputExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceProvisionedThroughputExceededException) OrigErr() error { +func (s *ResourceProvisionedThroughputExceededException) OrigErr() error { return nil } -func (s ResourceProvisionedThroughputExceededException) Error() string { +func (s *ResourceProvisionedThroughputExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceProvisionedThroughputExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceProvisionedThroughputExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceProvisionedThroughputExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceProvisionedThroughputExceededException) RequestID() string { + return s.RespMetadata.RequestID } -// Describes the starting parameters for an Amazon Kinesis Data Analytics application. +// Describes the starting parameters for an Kinesis Data Analytics application. type RunConfiguration struct { _ struct{} `type:"structure"` // Describes the restore behavior of a restarting application. ApplicationRestoreConfiguration *ApplicationRestoreConfiguration `type:"structure"` - // Describes the starting parameters for an Apache Flink-based Kinesis Data - // Analytics application. + // Describes the starting parameters for a Flink-based Kinesis Data Analytics + // application. FlinkRunConfiguration *FlinkRunConfiguration `type:"structure"` - // Describes the starting parameters for an SQL-based Kinesis Data Analytics - // application. + // Describes the starting parameters for a SQL-based Kinesis Data Analytics + // application application. SqlRunConfigurations []*SqlRunConfiguration `type:"list"` } @@ -9936,6 +9963,10 @@ type RunConfigurationDescription struct { // Describes the restore behavior of a restarting application. ApplicationRestoreConfigurationDescription *ApplicationRestoreConfiguration `type:"structure"` + + // Describes the starting parameters for a Flink-based Kinesis Data Analytics + // application. + FlinkRunConfigurationDescription *FlinkRunConfiguration `type:"structure"` } // String returns the string representation @@ -9954,6 +9985,12 @@ func (s *RunConfigurationDescription) SetApplicationRestoreConfigurationDescript return s } +// SetFlinkRunConfigurationDescription sets the FlinkRunConfigurationDescription field's value. +func (s *RunConfigurationDescription) SetFlinkRunConfigurationDescription(v *FlinkRunConfiguration) *RunConfigurationDescription { + s.FlinkRunConfigurationDescription = v + return s +} + // Describes the updates to the starting parameters for a Kinesis Data Analytics // application. type RunConfigurationUpdate struct { @@ -9962,8 +9999,8 @@ type RunConfigurationUpdate struct { // Describes updates to the restore behavior of a restarting application. ApplicationRestoreConfiguration *ApplicationRestoreConfiguration `type:"structure"` - // Describes the starting parameters for an Apache Flink-based Kinesis Data - // Analytics application. + // Describes the starting parameters for a Flink-based Kinesis Data Analytics + // application. FlinkRunConfiguration *FlinkRunConfiguration `type:"structure"` } @@ -10004,7 +10041,7 @@ func (s *RunConfigurationUpdate) SetFlinkRunConfiguration(v *FlinkRunConfigurati return s } -// Describes the location of a Java-based Amazon Kinesis Data Analytics application's +// Describes the location of a Flink-based Kinesis Data Analytics application's // code stored in an S3 bucket. type S3ApplicationCodeLocationDescription struct { _ struct{} `type:"structure"` @@ -10052,7 +10089,7 @@ func (s *S3ApplicationCodeLocationDescription) SetObjectVersion(v string) *S3App return s } -// For an SQL-based Amazon Kinesis Data Analytics application, provides a description +// For a SQL-based Kinesis Data Analytics application, provides a description // of an Amazon S3 data source, including the Amazon Resource Name (ARN) of // the S3 bucket and the name of the Amazon S3 object that contains the data. type S3Configuration struct { @@ -10113,7 +10150,7 @@ func (s *S3Configuration) SetFileKey(v string) *S3Configuration { return s } -// For a Java-based Amazon Kinesis Data Analytics application, provides a description +// For a Flink-based Kinesis Data Analytics application, provides a description // of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3 // bucket, the name of the Amazon S3 object that contains the data, and the // version number of the Amazon S3 object that contains the data. @@ -10185,8 +10222,8 @@ func (s *S3ContentLocation) SetObjectVersion(v string) *S3ContentLocation { return s } -// Describes an update for the Amazon S3 code content location for a Java-based -// Amazon Kinesis Data Analytics application. +// Describes an update for the Amazon S3 code content location for a Flink-based +// Kinesis Data Analytics application. type S3ContentLocationUpdate struct { _ struct{} `type:"structure"` @@ -10245,8 +10282,8 @@ func (s *S3ContentLocationUpdate) SetObjectVersionUpdate(v string) *S3ContentLoc return s } -// For an SQL-based Amazon Kinesis Data Analytics application, identifies the -// Amazon S3 bucket and object that contains the reference data. +// For a SQL-based Kinesis Data Analytics application, identifies the Amazon +// S3 bucket and object that contains the reference data. // // A Kinesis Data Analytics application loads reference data only once. If the // data changes, you call the UpdateApplication operation to trigger reloading @@ -10299,8 +10336,8 @@ func (s *S3ReferenceDataSource) SetFileKey(v string) *S3ReferenceDataSource { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, provides the -// bucket name and object key name that stores the reference data. +// For a SQL-based Kinesis Data Analytics application, provides the bucket name +// and object key name that stores the reference data. type S3ReferenceDataSourceDescription struct { _ struct{} `type:"structure"` @@ -10352,9 +10389,8 @@ func (s *S3ReferenceDataSourceDescription) SetReferenceRoleARN(v string) *S3Refe return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// Amazon S3 bucket name and object key name for an in-application reference -// table. +// For a SQL-based Kinesis Data Analytics application, describes the Amazon +// S3 bucket name and object key name for an in-application reference table. type S3ReferenceDataSourceUpdate struct { _ struct{} `type:"structure"` @@ -10405,8 +10441,8 @@ func (s *S3ReferenceDataSourceUpdate) SetFileKeyUpdate(v string) *S3ReferenceDat // The service cannot complete the request. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -10423,17 +10459,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10441,22 +10477,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Provides details about a snapshot of application state. @@ -10516,9 +10552,9 @@ func (s *SnapshotDetails) SetSnapshotStatus(v string) *SnapshotDetails { return s } -// For an SQL-based Amazon Kinesis Data Analytics application, describes the -// format of the data in the streaming source, and how each data element maps -// to corresponding columns created in the in-application stream. +// For a SQL-based Kinesis Data Analytics application, describes the format +// of the data in the streaming source, and how each data element maps to corresponding +// columns created in the in-application stream. type SourceSchema struct { _ struct{} `type:"structure"` @@ -10529,7 +10565,7 @@ type SourceSchema struct { // Specifies the encoding of the records in the streaming source. For example, // UTF-8. - RecordEncoding *string `type:"string"` + RecordEncoding *string `min:"5" type:"string"` // Specifies the format of the records on the streaming source. // @@ -10556,6 +10592,9 @@ func (s *SourceSchema) Validate() error { if s.RecordColumns != nil && len(s.RecordColumns) < 1 { invalidParams.Add(request.NewErrParamMinLen("RecordColumns", 1)) } + if s.RecordEncoding != nil && len(*s.RecordEncoding) < 5 { + invalidParams.Add(request.NewErrParamMinLen("RecordEncoding", 5)) + } if s.RecordFormat == nil { invalidParams.Add(request.NewErrParamRequired("RecordFormat")) } @@ -10599,7 +10638,7 @@ func (s *SourceSchema) SetRecordFormat(v *RecordFormat) *SourceSchema { return s } -// Describes the inputs, outputs, and reference data sources for an SQL-based +// Describes the inputs, outputs, and reference data sources for a SQL-based // Kinesis Data Analytics application. type SqlApplicationConfiguration struct { _ struct{} `type:"structure"` @@ -10684,7 +10723,7 @@ func (s *SqlApplicationConfiguration) SetReferenceDataSources(v []*ReferenceData return s } -// Describes the inputs, outputs, and reference data sources for an SQL-based +// Describes the inputs, outputs, and reference data sources for a SQL-based // Kinesis Data Analytics application. type SqlApplicationConfigurationDescription struct { _ struct{} `type:"structure"` @@ -10731,7 +10770,7 @@ func (s *SqlApplicationConfigurationDescription) SetReferenceDataSourceDescripti } // Describes updates to the input streams, destination streams, and reference -// data sources for an SQL-based Kinesis Data Analytics application. +// data sources for a SQL-based Kinesis Data Analytics application. type SqlApplicationConfigurationUpdate struct { _ struct{} `type:"structure"` @@ -10816,7 +10855,7 @@ func (s *SqlApplicationConfigurationUpdate) SetReferenceDataSourceUpdates(v []*R return s } -// Describes the starting parameters for an SQL-based Kinesis Data Analytics +// Describes the starting parameters for a SQL-based Kinesis Data Analytics // application. type SqlRunConfiguration struct { _ struct{} `type:"structure"` @@ -10957,6 +10996,16 @@ type StopApplicationInput struct { // // ApplicationName is a required field ApplicationName *string `min:"1" type:"string" required:"true"` + + // Set to true to force the application to stop. If you set Force to true, Kinesis + // Data Analytics stops the application without taking a snapshot. + // + // You can only force stop a Flink-based Kinesis Data Analytics application. + // You can't force stop a SQL-based Kinesis Data Analytics application. + // + // The application must be in the STARTING, UPDATING, STOPPING, AUTOSCALING, + // or RUNNING state. + Force *bool `type:"boolean"` } // String returns the string representation @@ -10991,6 +11040,12 @@ func (s *StopApplicationInput) SetApplicationName(v string) *StopApplicationInpu return s } +// SetForce sets the Force field's value. +func (s *StopApplicationInput) SetForce(v bool) *StopApplicationInput { + s.Force = &v + return s +} + type StopApplicationOutput struct { _ struct{} `type:"structure"` } @@ -11146,8 +11201,8 @@ func (s TagResourceOutput) GoString() string { // Note that the maximum number of application tags includes system tags. The // maximum number of user-defined application tags is 50. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11164,17 +11219,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11182,29 +11237,29 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } -// The data format is not valid. Amazon Kinesis Data Analytics cannot detect -// the schema for the given streaming source. +// The data format is not valid. Kinesis Data Analytics cannot detect the schema +// for the given streaming source. type UnableToDetectSchemaException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -11228,17 +11283,17 @@ func (s UnableToDetectSchemaException) GoString() string { func newErrorUnableToDetectSchemaException(v protocol.ResponseMetadata) error { return &UnableToDetectSchemaException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnableToDetectSchemaException) Code() string { +func (s *UnableToDetectSchemaException) Code() string { return "UnableToDetectSchemaException" } // Message returns the exception's message. -func (s UnableToDetectSchemaException) Message() string { +func (s *UnableToDetectSchemaException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11246,29 +11301,29 @@ func (s UnableToDetectSchemaException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnableToDetectSchemaException) OrigErr() error { +func (s *UnableToDetectSchemaException) OrigErr() error { return nil } -func (s UnableToDetectSchemaException) Error() string { +func (s *UnableToDetectSchemaException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnableToDetectSchemaException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnableToDetectSchemaException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnableToDetectSchemaException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnableToDetectSchemaException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -11285,17 +11340,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11303,28 +11358,29 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the Kinesis Analytics application from which to remove the tags. + // The ARN of the Kinesis Data Analytics application from which to remove the + // tags. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -11739,6 +11795,15 @@ const ( ApplicationRestoreTypeRestoreFromCustomSnapshot = "RESTORE_FROM_CUSTOM_SNAPSHOT" ) +// ApplicationRestoreType_Values returns all elements of the ApplicationRestoreType enum +func ApplicationRestoreType_Values() []string { + return []string{ + ApplicationRestoreTypeSkipRestoreFromSnapshot, + ApplicationRestoreTypeRestoreFromLatestSnapshot, + ApplicationRestoreTypeRestoreFromCustomSnapshot, + } +} + const ( // ApplicationStatusDeleting is a ApplicationStatus enum value ApplicationStatusDeleting = "DELETING" @@ -11757,8 +11822,28 @@ const ( // ApplicationStatusUpdating is a ApplicationStatus enum value ApplicationStatusUpdating = "UPDATING" + + // ApplicationStatusAutoscaling is a ApplicationStatus enum value + ApplicationStatusAutoscaling = "AUTOSCALING" + + // ApplicationStatusForceStopping is a ApplicationStatus enum value + ApplicationStatusForceStopping = "FORCE_STOPPING" ) +// ApplicationStatus_Values returns all elements of the ApplicationStatus enum +func ApplicationStatus_Values() []string { + return []string{ + ApplicationStatusDeleting, + ApplicationStatusStarting, + ApplicationStatusStopping, + ApplicationStatusReady, + ApplicationStatusRunning, + ApplicationStatusUpdating, + ApplicationStatusAutoscaling, + ApplicationStatusForceStopping, + } +} + const ( // CodeContentTypePlaintext is a CodeContentType enum value CodeContentTypePlaintext = "PLAINTEXT" @@ -11767,6 +11852,14 @@ const ( CodeContentTypeZipfile = "ZIPFILE" ) +// CodeContentType_Values returns all elements of the CodeContentType enum +func CodeContentType_Values() []string { + return []string{ + CodeContentTypePlaintext, + CodeContentTypeZipfile, + } +} + const ( // ConfigurationTypeDefault is a ConfigurationType enum value ConfigurationTypeDefault = "DEFAULT" @@ -11775,6 +11868,14 @@ const ( ConfigurationTypeCustom = "CUSTOM" ) +// ConfigurationType_Values returns all elements of the ConfigurationType enum +func ConfigurationType_Values() []string { + return []string{ + ConfigurationTypeDefault, + ConfigurationTypeCustom, + } +} + const ( // InputStartingPositionNow is a InputStartingPosition enum value InputStartingPositionNow = "NOW" @@ -11786,6 +11887,15 @@ const ( InputStartingPositionLastStoppedPoint = "LAST_STOPPED_POINT" ) +// InputStartingPosition_Values returns all elements of the InputStartingPosition enum +func InputStartingPosition_Values() []string { + return []string{ + InputStartingPositionNow, + InputStartingPositionTrimHorizon, + InputStartingPositionLastStoppedPoint, + } +} + const ( // LogLevelInfo is a LogLevel enum value LogLevelInfo = "INFO" @@ -11800,6 +11910,16 @@ const ( LogLevelDebug = "DEBUG" ) +// LogLevel_Values returns all elements of the LogLevel enum +func LogLevel_Values() []string { + return []string{ + LogLevelInfo, + LogLevelWarn, + LogLevelError, + LogLevelDebug, + } +} + const ( // MetricsLevelApplication is a MetricsLevel enum value MetricsLevelApplication = "APPLICATION" @@ -11814,6 +11934,16 @@ const ( MetricsLevelParallelism = "PARALLELISM" ) +// MetricsLevel_Values returns all elements of the MetricsLevel enum +func MetricsLevel_Values() []string { + return []string{ + MetricsLevelApplication, + MetricsLevelTask, + MetricsLevelOperator, + MetricsLevelParallelism, + } +} + const ( // RecordFormatTypeJson is a RecordFormatType enum value RecordFormatTypeJson = "JSON" @@ -11822,6 +11952,14 @@ const ( RecordFormatTypeCsv = "CSV" ) +// RecordFormatType_Values returns all elements of the RecordFormatType enum +func RecordFormatType_Values() []string { + return []string{ + RecordFormatTypeJson, + RecordFormatTypeCsv, + } +} + const ( // RuntimeEnvironmentSql10 is a RuntimeEnvironment enum value RuntimeEnvironmentSql10 = "SQL-1_0" @@ -11833,6 +11971,15 @@ const ( RuntimeEnvironmentFlink18 = "FLINK-1_8" ) +// RuntimeEnvironment_Values returns all elements of the RuntimeEnvironment enum +func RuntimeEnvironment_Values() []string { + return []string{ + RuntimeEnvironmentSql10, + RuntimeEnvironmentFlink16, + RuntimeEnvironmentFlink18, + } +} + const ( // SnapshotStatusCreating is a SnapshotStatus enum value SnapshotStatusCreating = "CREATING" @@ -11846,3 +11993,13 @@ const ( // SnapshotStatusFailed is a SnapshotStatus enum value SnapshotStatusFailed = "FAILED" ) + +// SnapshotStatus_Values returns all elements of the SnapshotStatus enum +func SnapshotStatus_Values() []string { + return []string{ + SnapshotStatusCreating, + SnapshotStatusReady, + SnapshotStatusDeleting, + SnapshotStatusFailed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go index e4ae663b5..1c2e7737a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/doc.go @@ -4,10 +4,10 @@ // requests to Amazon Kinesis Analytics. // // Amazon Kinesis Data Analytics is a fully managed service that you can use -// to process and analyze streaming data using SQL or Java. The service enables -// you to quickly author and run SQL or Java code against streaming sources -// to perform time series analytics, feed real-time dashboards, and create real-time -// metrics. +// to process and analyze streaming data using Java, SQL, or Scala. The service +// enables you to quickly author and run Java, SQL, or Scala code against streaming +// sources to perform time series analytics, feed real-time dashboards, and +// create real-time metrics. // // See https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go index 852296297..14f633783 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/errors.go @@ -63,7 +63,7 @@ const ( // "ResourceProvisionedThroughputExceededException". // // Discovery failed to get a record from the streaming source because of the - // Amazon Kinesis Streams ProvisionedThroughputExceededException. For more information, + // Kinesis Streams ProvisionedThroughputExceededException. For more information, // see GetRecords (http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetRecords.html) // in the Amazon Kinesis Streams API Reference. ErrCodeResourceProvisionedThroughputExceededException = "ResourceProvisionedThroughputExceededException" @@ -85,8 +85,8 @@ const ( // ErrCodeUnableToDetectSchemaException for service response error code // "UnableToDetectSchemaException". // - // The data format is not valid. Amazon Kinesis Data Analytics cannot detect - // the schema for the given streaming source. + // The data format is not valid. Kinesis Data Analytics cannot detect the schema + // for the given streaming source. ErrCodeUnableToDetectSchemaException = "UnableToDetectSchemaException" // ErrCodeUnsupportedOperationException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go index a15313cf7..9dc208250 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go index 8266d661e..facd06f10 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/api.go @@ -81,7 +81,7 @@ func (c *KinesisVideo) CreateSignalingChannelRequest(input *CreateSignalingChann // AWS account in this region. // // * ResourceInUseException -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. // // * AccessDeniedException // You do not have required permissions to perform this operation. @@ -183,7 +183,7 @@ func (c *KinesisVideo) CreateStreamRequest(input *CreateStreamInput) (req *reque // Not implemented. // // * ResourceInUseException -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. // // * InvalidDeviceException // Not implemented. @@ -296,6 +296,9 @@ func (c *KinesisVideo) DeleteSignalingChannelRequest(input *DeleteSignalingChann // latest version, use the DescribeStream (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_DescribeStream.html) // API. // +// * ResourceInUseException +// The signaling channel is currently not available for this operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisvideo-2017-09-30/DeleteSignalingChannel func (c *KinesisVideo) DeleteSignalingChannel(input *DeleteSignalingChannelInput) (*DeleteSignalingChannelOutput, error) { req, out := c.DeleteSignalingChannelRequest(input) @@ -402,6 +405,9 @@ func (c *KinesisVideo) DeleteStreamRequest(input *DeleteStreamInput) (req *reque // latest version, use the DescribeStream (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_DescribeStream.html) // API. // +// * ResourceInUseException +// The signaling channel is currently not available for this operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kinesisvideo-2017-09-30/DeleteStream func (c *KinesisVideo) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, error) { req, out := c.DeleteStreamRequest(input) @@ -469,7 +475,8 @@ func (c *KinesisVideo) DescribeSignalingChannelRequest(input *DescribeSignalingC // DescribeSignalingChannel API operation for Amazon Kinesis Video Streams. // // Returns the most current information about the signaling channel. You must -// specify either the name or the ARN of the channel that you want to describe. +// specify either the name or the Amazon Resource Name (ARN) of the channel +// that you want to describe. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -750,9 +757,9 @@ func (c *KinesisVideo) GetSignalingChannelEndpointRequest(input *GetSignalingCha // parameter, which consists of the Protocols and Role properties. // // Protocols is used to determine the communication mechanism. For example, -// specifying WSS as the protocol, results in this API producing a secure websocket -// endpoint, and specifying HTTPS as the protocol, results in this API generating -// an HTTPS endpoint. +// if you specify WSS as the protocol, this API produces a secure websocket +// endpoint. If you specify HTTPS as the protocol, this API generates an HTTPS +// endpoint. // // Role determines the messaging permissions. A MASTER role results in this // API generating an endpoint that a client can use to communicate with any @@ -778,7 +785,7 @@ func (c *KinesisVideo) GetSignalingChannelEndpointRequest(input *GetSignalingCha // Amazon Kinesis Video Streams can't find the stream that you specified. // // * ResourceInUseException -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. // // * AccessDeniedException // You do not have required permissions to perform this operation. @@ -1759,7 +1766,7 @@ func (c *KinesisVideo) UpdateDataRetentionRequest(input *UpdateDataRetentionInpu // Amazon Kinesis Video Streams can't find the stream that you specified. // // * ResourceInUseException -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. // // * NotAuthorizedException // The caller is not authorized to perform this operation. @@ -1840,8 +1847,8 @@ func (c *KinesisVideo) UpdateSignalingChannelRequest(input *UpdateSignalingChann // and takes time to complete. // // If the MessageTtlSeconds value is updated (either increased or reduced), -// then it only applies to new messages sent via this channel after it's been -// updated. Existing messages are still expire as per the previous MessageTtlSeconds +// it only applies to new messages sent via this channel after it's been updated. +// Existing messages are still expired as per the previous MessageTtlSeconds // value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1863,7 +1870,7 @@ func (c *KinesisVideo) UpdateSignalingChannelRequest(input *UpdateSignalingChann // Amazon Kinesis Video Streams can't find the stream that you specified. // // * ResourceInUseException -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. // // * AccessDeniedException // You do not have required permissions to perform this operation. @@ -1972,7 +1979,7 @@ func (c *KinesisVideo) UpdateStreamRequest(input *UpdateStreamInput) (req *reque // Amazon Kinesis Video Streams can't find the stream that you specified. // // * ResourceInUseException -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. // // * NotAuthorizedException // The caller is not authorized to perform this operation. @@ -2006,8 +2013,8 @@ func (c *KinesisVideo) UpdateStreamWithContext(ctx aws.Context, input *UpdateStr // You do not have required permissions to perform this operation. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2024,17 +2031,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2042,29 +2049,29 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // You have reached the maximum limit of active signaling channels for this // AWS account in this region. type AccountChannelLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2081,17 +2088,17 @@ func (s AccountChannelLimitExceededException) GoString() string { func newErrorAccountChannelLimitExceededException(v protocol.ResponseMetadata) error { return &AccountChannelLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccountChannelLimitExceededException) Code() string { +func (s *AccountChannelLimitExceededException) Code() string { return "AccountChannelLimitExceededException" } // Message returns the exception's message. -func (s AccountChannelLimitExceededException) Message() string { +func (s *AccountChannelLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2099,28 +2106,28 @@ func (s AccountChannelLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccountChannelLimitExceededException) OrigErr() error { +func (s *AccountChannelLimitExceededException) OrigErr() error { return nil } -func (s AccountChannelLimitExceededException) Error() string { +func (s *AccountChannelLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccountChannelLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccountChannelLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccountChannelLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccountChannelLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The number of streams created for the account is too high. type AccountStreamLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2137,17 +2144,17 @@ func (s AccountStreamLimitExceededException) GoString() string { func newErrorAccountStreamLimitExceededException(v protocol.ResponseMetadata) error { return &AccountStreamLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccountStreamLimitExceededException) Code() string { +func (s *AccountStreamLimitExceededException) Code() string { return "AccountStreamLimitExceededException" } // Message returns the exception's message. -func (s AccountStreamLimitExceededException) Message() string { +func (s *AccountStreamLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2155,29 +2162,29 @@ func (s AccountStreamLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccountStreamLimitExceededException) OrigErr() error { +func (s *AccountStreamLimitExceededException) OrigErr() error { return nil } -func (s AccountStreamLimitExceededException) Error() string { +func (s *AccountStreamLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccountStreamLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccountStreamLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccountStreamLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccountStreamLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A structure that encapsulates a signaling channel's metadata and properties. type ChannelInfo struct { _ struct{} `type:"structure"` - // The ARN of the signaling channel. + // The Amazon Resource Name (ARN) of the signaling channel. ChannelARN *string `min:"1" type:"string"` // The name of the signaling channel. @@ -2304,8 +2311,8 @@ func (s *ChannelNameCondition) SetComparisonValue(v string) *ChannelNameConditio // Kinesis Video Streams has throttled the request because you have exceeded // the limit of allowed client calls. Try making the call later. type ClientLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2322,17 +2329,17 @@ func (s ClientLimitExceededException) GoString() string { func newErrorClientLimitExceededException(v protocol.ResponseMetadata) error { return &ClientLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ClientLimitExceededException) Code() string { +func (s *ClientLimitExceededException) Code() string { return "ClientLimitExceededException" } // Message returns the exception's message. -func (s ClientLimitExceededException) Message() string { +func (s *ClientLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2340,29 +2347,29 @@ func (s ClientLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ClientLimitExceededException) OrigErr() error { +func (s *ClientLimitExceededException) OrigErr() error { return nil } -func (s ClientLimitExceededException) Error() string { +func (s *ClientLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ClientLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ClientLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ClientLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ClientLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type CreateSignalingChannelInput struct { _ struct{} `type:"structure"` // A name for the signaling channel that you are creating. It must be unique - // for each account and region. + // for each AWS account and AWS Region. // // ChannelName is a required field ChannelName *string `min:"1" type:"string" required:"true"` @@ -2374,7 +2381,7 @@ type CreateSignalingChannelInput struct { // A structure containing the configuration for the SINGLE_MASTER channel type. SingleMasterConfiguration *SingleMasterConfiguration `type:"structure"` - // A set of tags (key/value pairs) that you want to associate with this channel. + // A set of tags (key-value pairs) that you want to associate with this channel. Tags []*Tag `type:"list"` } @@ -2446,7 +2453,7 @@ func (s *CreateSignalingChannelInput) SetTags(v []*Tag) *CreateSignalingChannelI type CreateSignalingChannelOutput struct { _ struct{} `type:"structure"` - // The ARN of the created channel. + // The Amazon Resource Name (ARN) of the created channel. ChannelARN *string `min:"1" type:"string"` } @@ -2619,14 +2626,15 @@ func (s *CreateStreamOutput) SetStreamARN(v string) *CreateStreamOutput { type DeleteSignalingChannelInput struct { _ struct{} `type:"structure"` - // The ARN of the signaling channel that you want to delete. + // The Amazon Resource Name (ARN) of the signaling channel that you want to + // delete. // // ChannelARN is a required field ChannelARN *string `min:"1" type:"string" required:"true"` // The current version of the signaling channel that you want to delete. You // can obtain the current version by invoking the DescribeSignalingChannel or - // ListSignalingChannels APIs. + // ListSignalingChannels API operations. CurrentVersion *string `min:"1" type:"string"` } @@ -2902,8 +2910,8 @@ func (s *DescribeStreamOutput) SetStreamInfo(v *StreamInfo) *DescribeStreamOutpu // Not implemented. type DeviceStreamLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2920,17 +2928,17 @@ func (s DeviceStreamLimitExceededException) GoString() string { func newErrorDeviceStreamLimitExceededException(v protocol.ResponseMetadata) error { return &DeviceStreamLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DeviceStreamLimitExceededException) Code() string { +func (s *DeviceStreamLimitExceededException) Code() string { return "DeviceStreamLimitExceededException" } // Message returns the exception's message. -func (s DeviceStreamLimitExceededException) Message() string { +func (s *DeviceStreamLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2938,22 +2946,22 @@ func (s DeviceStreamLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DeviceStreamLimitExceededException) OrigErr() error { +func (s *DeviceStreamLimitExceededException) OrigErr() error { return nil } -func (s DeviceStreamLimitExceededException) Error() string { +func (s *DeviceStreamLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DeviceStreamLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DeviceStreamLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DeviceStreamLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *DeviceStreamLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type GetDataEndpointInput struct { @@ -3047,7 +3055,8 @@ func (s *GetDataEndpointOutput) SetDataEndpoint(v string) *GetDataEndpointOutput type GetSignalingChannelEndpointInput struct { _ struct{} `type:"structure"` - // The ARN of the signalling channel for which you want to get an endpoint. + // The Amazon Resource Name (ARN) of the signalling channel for which you want + // to get an endpoint. // // ChannelARN is a required field ChannelARN *string `min:"1" type:"string" required:"true"` @@ -3125,8 +3134,8 @@ func (s *GetSignalingChannelEndpointOutput) SetResourceEndpointList(v []*Resourc // The value for this input parameter is invalid. type InvalidArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3143,17 +3152,17 @@ func (s InvalidArgumentException) GoString() string { func newErrorInvalidArgumentException(v protocol.ResponseMetadata) error { return &InvalidArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArgumentException) Code() string { +func (s *InvalidArgumentException) Code() string { return "InvalidArgumentException" } // Message returns the exception's message. -func (s InvalidArgumentException) Message() string { +func (s *InvalidArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3161,28 +3170,28 @@ func (s InvalidArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArgumentException) OrigErr() error { +func (s *InvalidArgumentException) OrigErr() error { return nil } -func (s InvalidArgumentException) Error() string { +func (s *InvalidArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // Not implemented. type InvalidDeviceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3199,17 +3208,17 @@ func (s InvalidDeviceException) GoString() string { func newErrorInvalidDeviceException(v protocol.ResponseMetadata) error { return &InvalidDeviceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeviceException) Code() string { +func (s *InvalidDeviceException) Code() string { return "InvalidDeviceException" } // Message returns the exception's message. -func (s InvalidDeviceException) Message() string { +func (s *InvalidDeviceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3217,28 +3226,28 @@ func (s InvalidDeviceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeviceException) OrigErr() error { +func (s *InvalidDeviceException) OrigErr() error { return nil } -func (s InvalidDeviceException) Error() string { +func (s *InvalidDeviceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeviceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeviceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeviceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeviceException) RequestID() string { + return s.RespMetadata.RequestID } // The format of the StreamARN is invalid. type InvalidResourceFormatException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3255,17 +3264,17 @@ func (s InvalidResourceFormatException) GoString() string { func newErrorInvalidResourceFormatException(v protocol.ResponseMetadata) error { return &InvalidResourceFormatException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceFormatException) Code() string { +func (s *InvalidResourceFormatException) Code() string { return "InvalidResourceFormatException" } // Message returns the exception's message. -func (s InvalidResourceFormatException) Message() string { +func (s *InvalidResourceFormatException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3273,22 +3282,22 @@ func (s InvalidResourceFormatException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceFormatException) OrigErr() error { +func (s *InvalidResourceFormatException) OrigErr() error { return nil } -func (s InvalidResourceFormatException) Error() string { +func (s *InvalidResourceFormatException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceFormatException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceFormatException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceFormatException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceFormatException) RequestID() string { + return s.RespMetadata.RequestID } type ListSignalingChannelsInput struct { @@ -3489,7 +3498,8 @@ type ListTagsForResourceInput struct { // request to fetch the next batch of tags. NextToken *string `type:"string"` - // The ARN of the signaling channel for which you want to list tags. + // The Amazon Resource Name (ARN) of the signaling channel for which you want + // to list tags. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -3662,8 +3672,8 @@ func (s *ListTagsForStreamOutput) SetTags(v map[string]*string) *ListTagsForStre // The caller is not authorized to perform this operation. type NotAuthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3680,17 +3690,17 @@ func (s NotAuthorizedException) GoString() string { func newErrorNotAuthorizedException(v protocol.ResponseMetadata) error { return &NotAuthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotAuthorizedException) Code() string { +func (s *NotAuthorizedException) Code() string { return "NotAuthorizedException" } // Message returns the exception's message. -func (s NotAuthorizedException) Message() string { +func (s *NotAuthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3698,22 +3708,22 @@ func (s NotAuthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotAuthorizedException) OrigErr() error { +func (s *NotAuthorizedException) OrigErr() error { return nil } -func (s NotAuthorizedException) Error() string { +func (s *NotAuthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotAuthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotAuthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotAuthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotAuthorizedException) RequestID() string { + return s.RespMetadata.RequestID } // An object that describes the endpoint of the signaling channel returned by @@ -3752,10 +3762,10 @@ func (s *ResourceEndpointListItem) SetResourceEndpoint(v string) *ResourceEndpoi return s } -// The stream is currently not available for this operation. +// The signaling channel is currently not available for this operation. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3772,17 +3782,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3790,28 +3800,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Amazon Kinesis Video Streams can't find the stream that you specified. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3828,17 +3838,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3846,22 +3856,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An object that contains the endpoint configuration for the SINGLE_MASTER @@ -4163,7 +4173,8 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the signaling channel to which you want to add tags. + // The Amazon Resource Name (ARN) of the signaling channel to which you want + // to add tags. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -4327,8 +4338,8 @@ func (s TagStreamOutput) GoString() string { // You have exceeded the limit of tags that you can associate with the resource. // Kinesis video streams support up to 50 tags. type TagsPerResourceExceededLimitException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4345,17 +4356,17 @@ func (s TagsPerResourceExceededLimitException) GoString() string { func newErrorTagsPerResourceExceededLimitException(v protocol.ResponseMetadata) error { return &TagsPerResourceExceededLimitException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagsPerResourceExceededLimitException) Code() string { +func (s *TagsPerResourceExceededLimitException) Code() string { return "TagsPerResourceExceededLimitException" } // Message returns the exception's message. -func (s TagsPerResourceExceededLimitException) Message() string { +func (s *TagsPerResourceExceededLimitException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4363,28 +4374,29 @@ func (s TagsPerResourceExceededLimitException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagsPerResourceExceededLimitException) OrigErr() error { +func (s *TagsPerResourceExceededLimitException) OrigErr() error { return nil } -func (s TagsPerResourceExceededLimitException) Error() string { +func (s *TagsPerResourceExceededLimitException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagsPerResourceExceededLimitException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagsPerResourceExceededLimitException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagsPerResourceExceededLimitException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagsPerResourceExceededLimitException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the signaling channel from which you want to remove tags. + // The Amazon Resource Name (ARN) of the signaling channel from which you want + // to remove tags. // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` @@ -4649,7 +4661,8 @@ func (s UpdateDataRetentionOutput) GoString() string { type UpdateSignalingChannelInput struct { _ struct{} `type:"structure"` - // The ARN of the signaling channel that you want to update. + // The Amazon Resource Name (ARN) of the signaling channel that you want to + // update. // // ChannelARN is a required field ChannelARN *string `min:"1" type:"string" required:"true"` @@ -4851,8 +4864,8 @@ func (s UpdateStreamOutput) GoString() string { // latest version, use the DescribeStream (https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_DescribeStream.html) // API. type VersionMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4869,17 +4882,17 @@ func (s VersionMismatchException) GoString() string { func newErrorVersionMismatchException(v protocol.ResponseMetadata) error { return &VersionMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s VersionMismatchException) Code() string { +func (s *VersionMismatchException) Code() string { return "VersionMismatchException" } // Message returns the exception's message. -func (s VersionMismatchException) Message() string { +func (s *VersionMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4887,22 +4900,22 @@ func (s VersionMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s VersionMismatchException) OrigErr() error { +func (s *VersionMismatchException) OrigErr() error { return nil } -func (s VersionMismatchException) Error() string { +func (s *VersionMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s VersionMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *VersionMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s VersionMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *VersionMismatchException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -4923,8 +4936,24 @@ const ( // APINameGetDashStreamingSessionUrl is a APIName enum value APINameGetDashStreamingSessionUrl = "GET_DASH_STREAMING_SESSION_URL" + + // APINameGetClip is a APIName enum value + APINameGetClip = "GET_CLIP" ) +// APIName_Values returns all elements of the APIName enum +func APIName_Values() []string { + return []string{ + APINamePutMedia, + APINameGetMedia, + APINameListFragments, + APINameGetMediaForFragmentList, + APINameGetHlsStreamingSessionUrl, + APINameGetDashStreamingSessionUrl, + APINameGetClip, + } +} + const ( // ChannelProtocolWss is a ChannelProtocol enum value ChannelProtocolWss = "WSS" @@ -4933,6 +4962,14 @@ const ( ChannelProtocolHttps = "HTTPS" ) +// ChannelProtocol_Values returns all elements of the ChannelProtocol enum +func ChannelProtocol_Values() []string { + return []string{ + ChannelProtocolWss, + ChannelProtocolHttps, + } +} + const ( // ChannelRoleMaster is a ChannelRole enum value ChannelRoleMaster = "MASTER" @@ -4941,16 +4978,38 @@ const ( ChannelRoleViewer = "VIEWER" ) +// ChannelRole_Values returns all elements of the ChannelRole enum +func ChannelRole_Values() []string { + return []string{ + ChannelRoleMaster, + ChannelRoleViewer, + } +} + const ( // ChannelTypeSingleMaster is a ChannelType enum value ChannelTypeSingleMaster = "SINGLE_MASTER" ) +// ChannelType_Values returns all elements of the ChannelType enum +func ChannelType_Values() []string { + return []string{ + ChannelTypeSingleMaster, + } +} + const ( // ComparisonOperatorBeginsWith is a ComparisonOperator enum value ComparisonOperatorBeginsWith = "BEGINS_WITH" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorBeginsWith, + } +} + const ( // StatusCreating is a Status enum value StatusCreating = "CREATING" @@ -4965,6 +5024,16 @@ const ( StatusDeleting = "DELETING" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusCreating, + StatusActive, + StatusUpdating, + StatusDeleting, + } +} + const ( // UpdateDataRetentionOperationIncreaseDataRetention is a UpdateDataRetentionOperation enum value UpdateDataRetentionOperationIncreaseDataRetention = "INCREASE_DATA_RETENTION" @@ -4972,3 +5041,11 @@ const ( // UpdateDataRetentionOperationDecreaseDataRetention is a UpdateDataRetentionOperation enum value UpdateDataRetentionOperationDecreaseDataRetention = "DECREASE_DATA_RETENTION" ) + +// UpdateDataRetentionOperation_Values returns all elements of the UpdateDataRetentionOperation enum +func UpdateDataRetentionOperation_Values() []string { + return []string{ + UpdateDataRetentionOperationIncreaseDataRetention, + UpdateDataRetentionOperationDecreaseDataRetention, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/errors.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/errors.go index 57993f66a..c1b48db3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/errors.go @@ -67,7 +67,7 @@ const ( // ErrCodeResourceInUseException for service response error code // "ResourceInUseException". // - // The stream is currently not available for this operation. + // The signaling channel is currently not available for this operation. ErrCodeResourceInUseException = "ResourceInUseException" // ErrCodeResourceNotFoundException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go index 765192ab8..3694f941e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index 725da9206..8e2aae8e5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -350,9 +350,9 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // CreateAlias API operation for AWS Key Management Service. // // Creates a display name for a customer managed customer master key (CMK). -// You can use an alias to identify a CMK in cryptographic operations, such -// as Encrypt and GenerateDataKey. You can change the CMK associated with the -// alias at any time. +// You can use an alias to identify a CMK in cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations), +// such as Encrypt and GenerateDataKey. You can change the CMK associated with +// the alias at any time. // // Aliases are easier to remember than key IDs. They can also help to simplify // your applications. For example, if you use an alias in your code, you can @@ -399,11 +399,12 @@ func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, // a new alias with the desired name. // // * You can use an alias name or alias ARN to identify a CMK in AWS KMS -// cryptographic operations and in the DescribeKey operation. However, you -// cannot use alias names or alias ARNs in API operations that manage CMKs, -// such as DisableKey or GetKeyPolicy. For information about the valid CMK -// identifiers for each AWS KMS API operation, see the descriptions of the -// KeyId parameter in the API operation documentation. +// cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// and in the DescribeKey operation. However, you cannot use alias names +// or alias ARNs in API operations that manage CMKs, such as DisableKey or +// GetKeyPolicy. For information about the valid CMK identifiers for each +// AWS KMS API operation, see the descriptions of the KeyId parameter in +// the API operation documentation. // // Because an alias is not a property of a CMK, you can delete and change the // aliases of a CMK without affecting the CMK. Also, aliases do not appear in @@ -693,8 +694,8 @@ func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, // principal to use the CMK when the conditions specified in the grant are met. // When setting permissions, grants are an alternative to key policies. // -// To create a grant that allows a cryptographic operation only when the request -// includes a particular encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context), +// To create a grant that allows a cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// only when the request includes a particular encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context), // use the Constraints parameter. For details, see GrantConstraints. // // You can create grants on symmetric and asymmetric CMKs. However, if the grant @@ -1357,12 +1358,12 @@ func (c *KMS) DeleteCustomKeyStoreRequest(input *DeleteCustomKeyStoreInput) (req // The custom key store that you delete cannot contain any AWS KMS customer // master keys (CMKs) (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys). // Before deleting the key store, verify that you will never need to use any -// of the CMKs in the key store for any cryptographic operations. Then, use -// ScheduleKeyDeletion to delete the AWS KMS customer master keys (CMKs) from -// the key store. When the scheduled waiting period expires, the ScheduleKeyDeletion -// operation deletes the CMKs. Then it makes a best effort to delete the key -// material from the associated cluster. However, you might need to manually -// delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) +// of the CMKs in the key store for any cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// Then, use ScheduleKeyDeletion to delete the AWS KMS customer master keys +// (CMKs) from the key store. When the scheduled waiting period expires, the +// ScheduleKeyDeletion operation deletes the CMKs. Then it makes a best effort +// to delete the key material from the associated cluster. However, you might +// need to manually delete the orphaned key material (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-orphaned-key) // from the cluster and its backups. // // After all CMKs are deleted from AWS KMS, use DisconnectCustomKeyStore to @@ -1844,8 +1845,8 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o // DisableKey API operation for AWS Key Management Service. // // Sets the state of a customer master key (CMK) to disabled, thereby preventing -// its use for cryptographic operations. You cannot perform this operation on -// a CMK in a different AWS account. +// its use for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// You cannot perform this operation on a CMK in a different AWS account. // // For more information about how key state affects the use of a CMK, see How // Key State Affects the Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -2079,8 +2080,9 @@ func (c *KMS) DisconnectCustomKeyStoreRequest(input *DisconnectCustomKeyStoreInp // // While a custom key store is disconnected, all attempts to create customer // master keys (CMKs) in the custom key store or to use existing CMKs in cryptographic -// operations will fail. This action can prevent users from storing and accessing -// sensitive data. +// operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// will fail. This action can prevent users from storing and accessing sensitive +// data. // // To find the connection state of a custom key store, use the DescribeCustomKeyStores // operation. To reconnect a custom key store, use the ConnectCustomKeyStore @@ -2195,8 +2197,8 @@ func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, out // EnableKey API operation for AWS Key Management Service. // // Sets the key state of a customer master key (CMK) to enabled. This allows -// you to use the CMK for cryptographic operations. You cannot perform this -// operation on a CMK in a different AWS account. +// you to use the CMK for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations). +// You cannot perform this operation on a CMK in a different AWS account. // // The CMK that you use for this operation must be in a compatible key state. // For details, see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -2429,11 +2431,12 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output // identifier or database password, or other sensitive information. // // * You can use the Encrypt operation to move encrypted data from one AWS -// region to another. In the first region, generate a data key and use the -// plaintext key to encrypt the data. Then, in the new region, call the Encrypt -// method on same plaintext data key. Now, you can safely move the encrypted -// data and encrypted data key to the new region, and decrypt in the new -// region when necessary. +// Region to another. For example, in Region A, generate a data key and use +// the plaintext key to encrypt your data. Then, in Region A, use the Encrypt +// operation to encrypt the plaintext data key under a CMK in Region B. Now, +// you can move the encrypted data and the encrypted data key to Region B. +// When necessary, you can decrypt the encrypted data key and the encrypted +// data entirely within in Region B. // // You don't need to use the Encrypt operation to encrypt a data key. The GenerateDataKey // and GenerateDataKeyPair operations return a plaintext data key and an encrypted @@ -2603,27 +2606,20 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // GenerateDataKey API operation for AWS Key Management Service. // -// Generates a unique symmetric data key. This operation returns a plaintext -// copy of the data key and a copy that is encrypted under a customer master -// key (CMK) that you specify. You can use the plaintext key to encrypt your -// data outside of AWS KMS and store the encrypted data key with the encrypted -// data. +// Generates a unique symmetric data key for client-side encryption. This operation +// returns a plaintext copy of the data key and a copy that is encrypted under +// a customer master key (CMK) that you specify. You can use the plaintext key +// to encrypt your data outside of AWS KMS and store the encrypted data key +// with the encrypted data. // // GenerateDataKey returns a unique data key for each request. The bytes in -// the key are not related to the caller or CMK that is used to encrypt the -// data key. +// the plaintext key are not related to the caller or the CMK. // // To generate a data key, specify the symmetric CMK that will be used to encrypt // the data key. You cannot use an asymmetric CMK to generate data keys. To -// get the type of your CMK, use the DescribeKey operation. -// -// You must also specify the length of the data key. Use either the KeySpec -// or NumberOfBytes parameters (but not both). For 128-bit and 256-bit data -// keys, use the KeySpec parameter. -// -// If the operation succeeds, the plaintext copy of the data key is in the Plaintext -// field of the response, and the encrypted copy of the data key in the CiphertextBlob -// field. +// get the type of your CMK, use the DescribeKey operation. You must also specify +// the length of the data key. Use either the KeySpec or NumberOfBytes parameters +// (but not both). For 128-bit and 256-bit data keys, use the KeySpec parameter. // // To get only an encrypted copy of the data key, use GenerateDataKeyWithoutPlaintext. // To generate an asymmetric data key pair, use the GenerateDataKeyPair or GenerateDataKeyPairWithoutPlaintext @@ -2640,24 +2636,32 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // For details, see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. // +// How to use your data key +// // We recommend that you use the following pattern to encrypt data locally in -// your application: +// your application. You can write your own code or use a client-side encryption +// library, such as the AWS Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/), +// the Amazon DynamoDB Encryption Client (https://docs.aws.amazon.com/dynamodb-encryption-client/latest/devguide/), +// or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html) +// to do these tasks for you. // -// Use the GenerateDataKey operation to get a data encryption key. +// To encrypt data outside of AWS KMS: // -// Use the plaintext data key (returned in the Plaintext field of the response) -// to encrypt data locally, then erase the plaintext data key from memory. +// Use the GenerateDataKey operation to get a data key. // -// Store the encrypted data key (returned in the CiphertextBlob field of the -// response) alongside the locally encrypted data. +// Use the plaintext data key (in the Plaintext field of the response) to encrypt +// your data outside of AWS KMS. Then erase the plaintext data key from memory. // -// To decrypt data locally: +// Store the encrypted data key (in the CiphertextBlob field of the response) +// with the encrypted data. +// +// To decrypt data outside of AWS KMS: // // Use the Decrypt operation to decrypt the encrypted data key. The operation // returns a plaintext copy of the data key. // -// Use the plaintext data key to decrypt data locally, then erase the plaintext -// data key from memory. +// Use the plaintext data key to decrypt data outside of AWS KMS, then erase +// the plaintext data key from memory. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2794,7 +2798,8 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req * // // To generate a data key pair, you must specify a symmetric customer master // key (CMK) to encrypt the private key in a data key pair. You cannot use an -// asymmetric CMK. To get the type of your CMK, use the DescribeKey operation. +// asymmetric CMK or a CMK in a custom key store. To get the type and origin +// of your CMK, use the DescribeKey operation. // // If you are using the data key pair to encrypt data, or for any operation // where you don't immediately need a private key, consider using the GenerateDataKeyPairWithoutPlaintext @@ -2868,6 +2873,10 @@ func (c *KMS) GenerateDataKeyPairRequest(input *GenerateDataKeyPairInput) (req * // Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide . // +// * UnsupportedOperationException +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPair func (c *KMS) GenerateDataKeyPair(input *GenerateDataKeyPairInput) (*GenerateDataKeyPairOutput, error) { req, out := c.GenerateDataKeyPairRequest(input) @@ -2941,8 +2950,8 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP // // To generate a data key pair, you must specify a symmetric customer master // key (CMK) to encrypt the private key in the data key pair. You cannot use -// an asymmetric CMK. To get the type of your CMK, use the KeySpec field in -// the DescribeKey response. +// an asymmetric CMK or a CMK in a custom key store. To get the type and origin +// of your CMK, use the KeySpec field in the DescribeKey response. // // You can use the public key that GenerateDataKeyPairWithoutPlaintext returns // to encrypt data or verify a signature outside of AWS KMS. Then, store the @@ -3018,6 +3027,10 @@ func (c *KMS) GenerateDataKeyPairWithoutPlaintextRequest(input *GenerateDataKeyP // Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide . // +// * UnsupportedOperationException +// The request was rejected because a specified parameter is not supported or +// a specified resource is not valid for this operation. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/kms-2014-11-01/GenerateDataKeyPairWithoutPlaintext func (c *KMS) GenerateDataKeyPairWithoutPlaintext(input *GenerateDataKeyPairWithoutPlaintextInput) (*GenerateDataKeyPairWithoutPlaintextOutput, error) { req, out := c.GenerateDataKeyPairWithoutPlaintextRequest(input) @@ -4237,6 +4250,12 @@ func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, o // To perform this operation on a CMK in a different AWS account, specify the // key ARN in the value of the KeyId parameter. // +// The GranteePrincipal field in the ListGrants response usually contains the +// user or role designated as the grantee principal in the grant. However, when +// the grantee principal in the grant is an AWS service, the GranteePrincipal +// field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), +// which might represent several different grantee principals. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5016,13 +5035,15 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // is encrypted, such as when you manually rotate (https://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html#rotate-keys-manually) // a CMK or change the CMK that protects a ciphertext. You can also use it to // reencrypt ciphertext under the same CMK, such as to change the encryption -// context of a ciphertext. +// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) +// of a ciphertext. // // The ReEncrypt operation can decrypt ciphertext that was encrypted by using // an AWS KMS CMK in an AWS KMS operation, such as Encrypt or GenerateDataKey. // It can also decrypt ciphertext that was encrypted by using the public key -// of an asymmetric CMK outside of AWS KMS. However, it cannot decrypt ciphertext -// produced by other libraries, such as the AWS Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) +// of an asymmetric CMK (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#asymmetric-cmks) +// outside of AWS KMS. However, it cannot decrypt ciphertext produced by other +// libraries, such as the AWS Encryption SDK (https://docs.aws.amazon.com/encryption-sdk/latest/developer-guide/) // or Amazon S3 client-side encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). // These libraries return a ciphertext format that is incompatible with AWS // KMS. @@ -5057,17 +5078,16 @@ func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, out // // Unlike other AWS KMS API operations, ReEncrypt callers must have two permissions: // -// * kms:EncryptFrom permission on the source CMK +// * kms:ReEncryptFrom permission on the source CMK // -// * kms:EncryptTo permission on the destination CMK +// * kms:ReEncryptTo permission on the destination CMK // -// To permit reencryption from -// -// or to a CMK, include the "kms:ReEncrypt*" permission in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). +// To permit reencryption from or to a CMK, include the "kms:ReEncrypt*" permission +// in your key policy (https://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html). // This permission is automatically included in the key policy when you use // the console to create a CMK. But you must include it manually when you create -// a CMK programmatically or when you use the PutKeyPolicy operation set a key -// policy. +// a CMK programmatically or when you use the PutKeyPolicy operation to set +// a key policy. // // The CMK that you use for this operation must be in a compatible key state. // For details, see How Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) @@ -5995,6 +6015,11 @@ func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, // The request was rejected because an internal exception occurred. The request // can be retried. // +// * LimitExceededException +// The request was rejected because a quota was exceeded. For more information, +// see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) +// in the AWS Key Management Service Developer Guide. +// // * InvalidStateException // The request was rejected because the state of the specified resource is not // valid for this request. @@ -6533,8 +6558,8 @@ func (s *AliasListEntry) SetTargetKeyId(v string) *AliasListEntry { // The request was rejected because it attempted to create a resource that already // exists. type AlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6551,17 +6576,17 @@ func (s AlreadyExistsException) GoString() string { func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { return &AlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyExistsException) Code() string { +func (s *AlreadyExistsException) Code() string { return "AlreadyExistsException" } // Message returns the exception's message. -func (s AlreadyExistsException) Message() string { +func (s *AlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6569,22 +6594,22 @@ func (s AlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyExistsException) OrigErr() error { +func (s *AlreadyExistsException) OrigErr() error { return nil } -func (s AlreadyExistsException) Error() string { +func (s *AlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } type CancelKeyDeletionInput struct { @@ -6642,7 +6667,8 @@ func (s *CancelKeyDeletionInput) SetKeyId(v string) *CancelKeyDeletionInput { type CancelKeyDeletionOutput struct { _ struct{} `type:"structure"` - // The unique identifier of the master key for which deletion is canceled. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK whose deletion is canceled. KeyId *string `min:"1" type:"string"` } @@ -6671,8 +6697,8 @@ func (s *CancelKeyDeletionOutput) SetKeyId(v string) *CancelKeyDeletionOutput { // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. type CloudHsmClusterInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6689,17 +6715,17 @@ func (s CloudHsmClusterInUseException) GoString() string { func newErrorCloudHsmClusterInUseException(v protocol.ResponseMetadata) error { return &CloudHsmClusterInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterInUseException) Code() string { +func (s *CloudHsmClusterInUseException) Code() string { return "CloudHsmClusterInUseException" } // Message returns the exception's message. -func (s CloudHsmClusterInUseException) Message() string { +func (s *CloudHsmClusterInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6707,22 +6733,22 @@ func (s CloudHsmClusterInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterInUseException) OrigErr() error { +func (s *CloudHsmClusterInUseException) OrigErr() error { return nil } -func (s CloudHsmClusterInUseException) Error() string { +func (s *CloudHsmClusterInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the associated AWS CloudHSM cluster did @@ -6756,8 +6782,8 @@ func (s CloudHsmClusterInUseException) RequestID() string { // see Configure a Default Security Group (https://docs.aws.amazon.com/cloudhsm/latest/userguide/configure-sg.html) // in the AWS CloudHSM User Guide . type CloudHsmClusterInvalidConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6774,17 +6800,17 @@ func (s CloudHsmClusterInvalidConfigurationException) GoString() string { func newErrorCloudHsmClusterInvalidConfigurationException(v protocol.ResponseMetadata) error { return &CloudHsmClusterInvalidConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterInvalidConfigurationException) Code() string { +func (s *CloudHsmClusterInvalidConfigurationException) Code() string { return "CloudHsmClusterInvalidConfigurationException" } // Message returns the exception's message. -func (s CloudHsmClusterInvalidConfigurationException) Message() string { +func (s *CloudHsmClusterInvalidConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6792,22 +6818,22 @@ func (s CloudHsmClusterInvalidConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterInvalidConfigurationException) OrigErr() error { +func (s *CloudHsmClusterInvalidConfigurationException) OrigErr() error { return nil } -func (s CloudHsmClusterInvalidConfigurationException) Error() string { +func (s *CloudHsmClusterInvalidConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterInvalidConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterInvalidConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterInvalidConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterInvalidConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the AWS CloudHSM cluster that is associated @@ -6816,8 +6842,8 @@ func (s CloudHsmClusterInvalidConfigurationException) RequestID() string { // (https://docs.aws.amazon.com/cloudhsm/latest/userguide/getting-started.html) // in the AWS CloudHSM User Guide. type CloudHsmClusterNotActiveException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6834,17 +6860,17 @@ func (s CloudHsmClusterNotActiveException) GoString() string { func newErrorCloudHsmClusterNotActiveException(v protocol.ResponseMetadata) error { return &CloudHsmClusterNotActiveException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterNotActiveException) Code() string { +func (s *CloudHsmClusterNotActiveException) Code() string { return "CloudHsmClusterNotActiveException" } // Message returns the exception's message. -func (s CloudHsmClusterNotActiveException) Message() string { +func (s *CloudHsmClusterNotActiveException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6852,30 +6878,30 @@ func (s CloudHsmClusterNotActiveException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterNotActiveException) OrigErr() error { +func (s *CloudHsmClusterNotActiveException) OrigErr() error { return nil } -func (s CloudHsmClusterNotActiveException) Error() string { +func (s *CloudHsmClusterNotActiveException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterNotActiveException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterNotActiveException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterNotActiveException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterNotActiveException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because AWS KMS cannot find the AWS CloudHSM cluster // with the specified cluster ID. Retry the request with a different cluster // ID. type CloudHsmClusterNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6892,17 +6918,17 @@ func (s CloudHsmClusterNotFoundException) GoString() string { func newErrorCloudHsmClusterNotFoundException(v protocol.ResponseMetadata) error { return &CloudHsmClusterNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterNotFoundException) Code() string { +func (s *CloudHsmClusterNotFoundException) Code() string { return "CloudHsmClusterNotFoundException" } // Message returns the exception's message. -func (s CloudHsmClusterNotFoundException) Message() string { +func (s *CloudHsmClusterNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6910,22 +6936,22 @@ func (s CloudHsmClusterNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterNotFoundException) OrigErr() error { +func (s *CloudHsmClusterNotFoundException) OrigErr() error { return nil } -func (s CloudHsmClusterNotFoundException) Error() string { +func (s *CloudHsmClusterNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified AWS CloudHSM cluster has a @@ -6941,8 +6967,8 @@ func (s CloudHsmClusterNotFoundException) RequestID() string { // view the cluster certificate of a cluster, use the DescribeClusters (https://docs.aws.amazon.com/cloudhsm/latest/APIReference/API_DescribeClusters.html) // operation. type CloudHsmClusterNotRelatedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6959,17 +6985,17 @@ func (s CloudHsmClusterNotRelatedException) GoString() string { func newErrorCloudHsmClusterNotRelatedException(v protocol.ResponseMetadata) error { return &CloudHsmClusterNotRelatedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CloudHsmClusterNotRelatedException) Code() string { +func (s *CloudHsmClusterNotRelatedException) Code() string { return "CloudHsmClusterNotRelatedException" } // Message returns the exception's message. -func (s CloudHsmClusterNotRelatedException) Message() string { +func (s *CloudHsmClusterNotRelatedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6977,22 +7003,22 @@ func (s CloudHsmClusterNotRelatedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CloudHsmClusterNotRelatedException) OrigErr() error { +func (s *CloudHsmClusterNotRelatedException) OrigErr() error { return nil } -func (s CloudHsmClusterNotRelatedException) Error() string { +func (s *CloudHsmClusterNotRelatedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CloudHsmClusterNotRelatedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CloudHsmClusterNotRelatedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CloudHsmClusterNotRelatedException) RequestID() string { - return s.respMetadata.RequestID +func (s *CloudHsmClusterNotRelatedException) RequestID() string { + return s.RespMetadata.RequestID } type ConnectCustomKeyStoreInput struct { @@ -7259,9 +7285,10 @@ func (s *CreateCustomKeyStoreOutput) SetCustomKeyStoreId(v string) *CreateCustom type CreateGrantInput struct { _ struct{} `type:"structure"` - // Allows a cryptographic operation only when the encryption context matches - // or includes the encryption context specified in this structure. For more - // information about encryption context, see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) + // Allows a cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // only when the encryption context matches or includes the encryption context + // specified in this structure. For more information about encryption context, + // see Encryption Context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context) // in the AWS Key Management Service Developer Guide . Constraints *GrantConstraints `type:"structure"` @@ -7530,9 +7557,10 @@ type CreateKeyInput struct { // a task. Description *string `type:"string"` - // Determines the cryptographic operations for which you can use the CMK. The - // default value is ENCRYPT_DECRYPT. This parameter is required only for asymmetric - // CMKs. You can't change the KeyUsage value after the CMK is created. + // Determines the cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the CMK. The default value is ENCRYPT_DECRYPT. This + // parameter is required only for asymmetric CMKs. You can't change the KeyUsage + // value after the CMK is created. // // Select only one valid value. // @@ -7714,8 +7742,8 @@ func (s *CreateKeyOutput) SetKeyMetadata(v *KeyMetadata) *CreateKeyOutput { // use the ScheduleKeyDeletion operation to delete the CMKs. After they are // deleted, you can delete the custom key store. type CustomKeyStoreHasCMKsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7732,17 +7760,17 @@ func (s CustomKeyStoreHasCMKsException) GoString() string { func newErrorCustomKeyStoreHasCMKsException(v protocol.ResponseMetadata) error { return &CustomKeyStoreHasCMKsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreHasCMKsException) Code() string { +func (s *CustomKeyStoreHasCMKsException) Code() string { return "CustomKeyStoreHasCMKsException" } // Message returns the exception's message. -func (s CustomKeyStoreHasCMKsException) Message() string { +func (s *CustomKeyStoreHasCMKsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7750,22 +7778,22 @@ func (s CustomKeyStoreHasCMKsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreHasCMKsException) OrigErr() error { +func (s *CustomKeyStoreHasCMKsException) OrigErr() error { return nil } -func (s CustomKeyStoreHasCMKsException) Error() string { +func (s *CustomKeyStoreHasCMKsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreHasCMKsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreHasCMKsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreHasCMKsException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreHasCMKsException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because of the ConnectionState of the custom key @@ -7786,8 +7814,8 @@ func (s CustomKeyStoreHasCMKsException) RequestID() string { // with a ConnectionState of DISCONNECTING or FAILED. This operation is valid // for all other ConnectionState values. type CustomKeyStoreInvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7804,17 +7832,17 @@ func (s CustomKeyStoreInvalidStateException) GoString() string { func newErrorCustomKeyStoreInvalidStateException(v protocol.ResponseMetadata) error { return &CustomKeyStoreInvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreInvalidStateException) Code() string { +func (s *CustomKeyStoreInvalidStateException) Code() string { return "CustomKeyStoreInvalidStateException" } // Message returns the exception's message. -func (s CustomKeyStoreInvalidStateException) Message() string { +func (s *CustomKeyStoreInvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7822,30 +7850,30 @@ func (s CustomKeyStoreInvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreInvalidStateException) OrigErr() error { +func (s *CustomKeyStoreInvalidStateException) OrigErr() error { return nil } -func (s CustomKeyStoreInvalidStateException) Error() string { +func (s *CustomKeyStoreInvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreInvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreInvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreInvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreInvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified custom key store name is already // assigned to another custom key store in the account. Try again with a custom // key store name that is unique in the account. type CustomKeyStoreNameInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7862,17 +7890,17 @@ func (s CustomKeyStoreNameInUseException) GoString() string { func newErrorCustomKeyStoreNameInUseException(v protocol.ResponseMetadata) error { return &CustomKeyStoreNameInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreNameInUseException) Code() string { +func (s *CustomKeyStoreNameInUseException) Code() string { return "CustomKeyStoreNameInUseException" } // Message returns the exception's message. -func (s CustomKeyStoreNameInUseException) Message() string { +func (s *CustomKeyStoreNameInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7880,29 +7908,29 @@ func (s CustomKeyStoreNameInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreNameInUseException) OrigErr() error { +func (s *CustomKeyStoreNameInUseException) OrigErr() error { return nil } -func (s CustomKeyStoreNameInUseException) Error() string { +func (s *CustomKeyStoreNameInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreNameInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreNameInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreNameInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreNameInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because AWS KMS cannot find a custom key store with // the specified key store name or ID. type CustomKeyStoreNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7919,17 +7947,17 @@ func (s CustomKeyStoreNotFoundException) GoString() string { func newErrorCustomKeyStoreNotFoundException(v protocol.ResponseMetadata) error { return &CustomKeyStoreNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomKeyStoreNotFoundException) Code() string { +func (s *CustomKeyStoreNotFoundException) Code() string { return "CustomKeyStoreNotFoundException" } // Message returns the exception's message. -func (s CustomKeyStoreNotFoundException) Message() string { +func (s *CustomKeyStoreNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7937,22 +7965,22 @@ func (s CustomKeyStoreNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomKeyStoreNotFoundException) OrigErr() error { +func (s *CustomKeyStoreNotFoundException) OrigErr() error { return nil } -func (s CustomKeyStoreNotFoundException) Error() string { +func (s *CustomKeyStoreNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomKeyStoreNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomKeyStoreNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomKeyStoreNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomKeyStoreNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about each custom key store in the custom key store @@ -7992,11 +8020,12 @@ type CustomKeyStoresListEntry struct { // to the custom key store. // // * SUBNET_NOT_FOUND - A subnet in the AWS CloudHSM cluster configuration - // was deleted. If AWS KMS cannot find all of the subnets that were configured - // for the cluster when the custom key store was created, attempts to connect - // fail. To fix this error, create a cluster from a backup and associate - // it with your custom key store. This process includes selecting a VPC and - // subnets. For details, see How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) + // was deleted. If AWS KMS cannot find all of the subnets in the cluster + // configuration, attempts to connect the custom key store to the AWS CloudHSM + // cluster fail. To fix this error, create a cluster from a recent backup + // and associate it with your custom key store. (This process creates a new + // cluster configuration with a VPC and private subnets.) For details, see + // How to Fix a Connection Failure (https://docs.aws.amazon.com/kms/latest/developerguide/fix-keystore.html#fix-keystore-failed) // in the AWS Key Management Service Developer Guide. // // * USER_LOCKED_OUT - The kmsuser CU account is locked out of the associated @@ -8126,9 +8155,9 @@ type DecryptInput struct { EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` // Specifies the encryption context to use when decrypting the data. An encryption - // context is valid only for cryptographic operations with a symmetric CMK. - // The standard asymmetric encryption algorithms that AWS KMS uses do not support - // an encryption context. + // context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric CMK. The standard asymmetric encryption algorithms that + // AWS KMS uses do not support an encryption context. // // An encryption context is a collection of non-secret key-value pairs that // represents additional authenticated data. When you use an encryption context @@ -8241,7 +8270,8 @@ type DecryptOutput struct { // The encryption algorithm that was used to decrypt the ciphertext. EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - // The ARN of the customer master key that was used to perform the decryption. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that was used to decrypt the ciphertext. KeyId *string `min:"1" type:"string"` // Decrypted plaintext data. When you use the HTTP API or the AWS CLI, the value @@ -8460,8 +8490,8 @@ func (s DeleteImportedKeyMaterialOutput) GoString() string { // The system timed out while trying to fulfill the request. The request can // be retried. type DependencyTimeoutException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8478,17 +8508,17 @@ func (s DependencyTimeoutException) GoString() string { func newErrorDependencyTimeoutException(v protocol.ResponseMetadata) error { return &DependencyTimeoutException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DependencyTimeoutException) Code() string { +func (s *DependencyTimeoutException) Code() string { return "DependencyTimeoutException" } // Message returns the exception's message. -func (s DependencyTimeoutException) Message() string { +func (s *DependencyTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8496,22 +8526,22 @@ func (s DependencyTimeoutException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DependencyTimeoutException) OrigErr() error { +func (s *DependencyTimeoutException) OrigErr() error { return nil } -func (s DependencyTimeoutException) Error() string { +func (s *DependencyTimeoutException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DependencyTimeoutException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DependencyTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DependencyTimeoutException) RequestID() string { - return s.respMetadata.RequestID +func (s *DependencyTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } type DescribeCustomKeyStoresInput struct { @@ -8879,8 +8909,8 @@ func (s DisableKeyRotationOutput) GoString() string { // The request was rejected because the specified CMK is not enabled. type DisabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8897,17 +8927,17 @@ func (s DisabledException) GoString() string { func newErrorDisabledException(v protocol.ResponseMetadata) error { return &DisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DisabledException) Code() string { +func (s *DisabledException) Code() string { return "DisabledException" } // Message returns the exception's message. -func (s DisabledException) Message() string { +func (s *DisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8915,22 +8945,22 @@ func (s DisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DisabledException) OrigErr() error { +func (s *DisabledException) OrigErr() error { return nil } -func (s DisabledException) Error() string { +func (s *DisabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *DisabledException) RequestID() string { + return s.RespMetadata.RequestID } type DisconnectCustomKeyStoreInput struct { @@ -9133,9 +9163,9 @@ type EncryptInput struct { EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` // Specifies the encryption context that will be used to encrypt the data. An - // encryption context is valid only for cryptographic operations with a symmetric - // CMK. The standard asymmetric encryption algorithms that AWS KMS uses do not - // support an encryption context. + // encryption context is valid only for cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // with a symmetric CMK. The standard asymmetric encryption algorithms that + // AWS KMS uses do not support an encryption context. // // An encryption context is a collection of non-secret key-value pairs that // represents additional authenticated data. When you use an encryption context @@ -9257,7 +9287,8 @@ type EncryptOutput struct { // The encryption algorithm that was used to encrypt the plaintext. EncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - // The ID of the key used during encryption. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that was used to encrypt the plaintext. KeyId *string `min:"1" type:"string"` } @@ -9293,8 +9324,8 @@ func (s *EncryptOutput) SetKeyId(v string) *EncryptOutput { // GetParametersForImport to get a new import token and public key, use the // new public key to encrypt the key material, and then try the request again. type ExpiredImportTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9311,17 +9342,17 @@ func (s ExpiredImportTokenException) GoString() string { func newErrorExpiredImportTokenException(v protocol.ResponseMetadata) error { return &ExpiredImportTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExpiredImportTokenException) Code() string { +func (s *ExpiredImportTokenException) Code() string { return "ExpiredImportTokenException" } // Message returns the exception's message. -func (s ExpiredImportTokenException) Message() string { +func (s *ExpiredImportTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9329,22 +9360,22 @@ func (s ExpiredImportTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredImportTokenException) OrigErr() error { +func (s *ExpiredImportTokenException) OrigErr() error { return nil } -func (s ExpiredImportTokenException) Error() string { +func (s *ExpiredImportTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExpiredImportTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredImportTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExpiredImportTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredImportTokenException) RequestID() string { + return s.RespMetadata.RequestID } type GenerateDataKeyInput struct { @@ -9475,7 +9506,8 @@ type GenerateDataKeyOutput struct { // CiphertextBlob is automatically base64 encoded/decoded by the SDK. CiphertextBlob []byte `min:"1" type:"blob"` - // The identifier of the CMK that encrypted the data key. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the data key. KeyId *string `min:"1" type:"string"` // The plaintext data key. When you use the HTTP API or the AWS CLI, the value @@ -9538,7 +9570,8 @@ type GenerateDataKeyPairInput struct { GrantTokens []*string `type:"list"` // Specifies the symmetric CMK that encrypts the private key in the data key - // pair. You cannot specify an asymmetric CMKs. + // pair. You cannot specify an asymmetric CMK or a CMK in a custom key store. + // To get the type and origin of your CMK, use the DescribeKey operation. // // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, // or alias ARN. When using an alias name, prefix it with "alias/". To specify @@ -9627,7 +9660,8 @@ func (s *GenerateDataKeyPairInput) SetKeyPairSpec(v string) *GenerateDataKeyPair type GenerateDataKeyPairOutput struct { _ struct{} `type:"structure"` - // The identifier of the CMK that encrypted the private key. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the private key. KeyId *string `min:"1" type:"string"` // The type of data key pair that was generated. @@ -9714,8 +9748,9 @@ type GenerateDataKeyPairWithoutPlaintextInput struct { GrantTokens []*string `type:"list"` // Specifies the CMK that encrypts the private key in the data key pair. You - // must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the - // type of your CMK, use the DescribeKey operation. + // must specify a symmetric CMK. You cannot use an asymmetric CMK or a CMK in + // a custom key store. To get the type and origin of your CMK, use the DescribeKey + // operation. // // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, // or alias ARN. When using an alias name, prefix it with "alias/". @@ -9803,25 +9838,8 @@ func (s *GenerateDataKeyPairWithoutPlaintextInput) SetKeyPairSpec(v string) *Gen type GenerateDataKeyPairWithoutPlaintextOutput struct { _ struct{} `type:"structure"` - // Specifies the CMK that encrypted the private key in the data key pair. You - // must specify a symmetric CMK. You cannot use an asymmetric CMK. To get the - // type of your CMK, use the DescribeKey operation. - // - // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, - // or alias ARN. When using an alias name, prefix it with "alias/". - // - // For example: - // - // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // - // * Alias name: alias/ExampleAlias - // - // * Alias ARN: arn:aws:kms:us-east-2:111122223333:alias/ExampleAlias - // - // To get the key ID and key ARN for a CMK, use ListKeys or DescribeKey. To - // get the alias name and alias ARN, use ListAliases. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the private key. KeyId *string `min:"1" type:"string"` // The type of data key pair that was generated. @@ -9997,7 +10015,8 @@ type GenerateDataKeyWithoutPlaintextOutput struct { // CiphertextBlob is automatically base64 encoded/decoded by the SDK. CiphertextBlob []byte `min:"1" type:"blob"` - // The identifier of the CMK that encrypted the data key. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that encrypted the data key. KeyId *string `min:"1" type:"string"` } @@ -10358,8 +10377,9 @@ type GetParametersForImportOutput struct { // ImportToken is automatically base64 encoded/decoded by the SDK. ImportToken []byte `min:"1" type:"blob"` - // The identifier of the CMK to use in a subsequent ImportKeyMaterial request. - // This is the same CMK specified in the GetParametersForImport request. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK to use in a subsequent ImportKeyMaterial request. This is the + // same CMK specified in the GetParametersForImport request. KeyId *string `min:"1" type:"string"` // The time at which the import token and public key are no longer valid. After @@ -10494,7 +10514,8 @@ type GetPublicKeyOutput struct { // is ENCRYPT_DECRYPT. EncryptionAlgorithms []*string `type:"list"` - // The identifier of the asymmetric CMK from which the public key was downloaded. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric CMK from which the public key was downloaded. KeyId *string `min:"1" type:"string"` // The permitted use of the public key. Valid values are ENCRYPT_DECRYPT or @@ -10567,24 +10588,16 @@ func (s *GetPublicKeyOutput) SetSigningAlgorithms(v []*string) *GetPublicKeyOutp return s } -// Use this structure to allow cryptographic operations in the grant only when -// the operation request includes the specified encryption context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). -// -// AWS KMS applies the grant constraints only when the grant allows a cryptographic -// operation that accepts an encryption context as input, such as the following. -// -// * Encrypt -// -// * Decrypt -// -// * GenerateDataKey -// -// * GenerateDataKeyWithoutPlaintext +// Use this structure to allow cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) +// in the grant only when the operation request includes the specified encryption +// context (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context). // -// * ReEncrypt -// -// AWS KMS does not apply the grant constraints to other operations, such as -// DescribeKey or ScheduleKeyDeletion. +// AWS KMS applies the grant constraints only to cryptographic operations that +// support an encryption context, that is, all cryptographic operations with +// a symmetric CMK (https://docs.aws.amazon.com/kms/latest/developerguide/symm-asymm-concepts.html#symmetric-cmks). +// Grant constraints are not applied to operations that do not support an encryption +// context, such as cryptographic operations with asymmetric CMKs and management +// operations, such as DescribeKey or ScheduleKeyDeletion. // // In a cryptographic operation, the encryption context in the decryption operation // must be an exact, case-sensitive match for the keys and values in the encryption @@ -10602,16 +10615,16 @@ type GrantConstraints struct { _ struct{} `type:"structure"` // A list of key-value pairs that must match the encryption context in the cryptographic - // operation request. The grant allows the operation only when the encryption - // context in the request is the same as the encryption context specified in - // this constraint. + // operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the operation only when the encryption context + // in the request is the same as the encryption context specified in this constraint. EncryptionContextEquals map[string]*string `type:"map"` // A list of key-value pairs that must be included in the encryption context - // of the cryptographic operation request. The grant allows the cryptographic - // operation only when the encryption context in the request includes the key-value - // pairs specified in this constraint, although it can include additional key-value - // pairs. + // of the cryptographic operation (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // request. The grant allows the cryptographic operation only when the encryption + // context in the request includes the key-value pairs specified in this constraint, + // although it can include additional key-value pairs. EncryptionContextSubset map[string]*string `type:"map"` } @@ -10637,7 +10650,7 @@ func (s *GrantConstraints) SetEncryptionContextSubset(v map[string]*string) *Gra return s } -// Contains information about an entry in a list of grants. +// Contains information about a grant. type GrantListEntry struct { _ struct{} `type:"structure"` @@ -10651,7 +10664,13 @@ type GrantListEntry struct { // The unique identifier for the grant. GrantId *string `min:"1" type:"string"` - // The principal that receives the grant's permissions. + // The identity that gets the permissions in the grant. + // + // The GranteePrincipal field in the ListGrants response usually contains the + // user or role designated as the grantee principal in the grant. However, when + // the grantee principal in the grant is an AWS service, the GranteePrincipal + // field contains the service principal (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-services), + // which might represent several different grantee principals. GranteePrincipal *string `min:"1" type:"string"` // The AWS account under which the grant was issued. @@ -10873,8 +10892,8 @@ func (s ImportKeyMaterialOutput) GoString() string { // The KeyId in a Decrypt request and the SourceKeyId in a ReEncrypt request // must identify the same CMK that was used to encrypt the ciphertext. type IncorrectKeyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10891,17 +10910,17 @@ func (s IncorrectKeyException) GoString() string { func newErrorIncorrectKeyException(v protocol.ResponseMetadata) error { return &IncorrectKeyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectKeyException) Code() string { +func (s *IncorrectKeyException) Code() string { return "IncorrectKeyException" } // Message returns the exception's message. -func (s IncorrectKeyException) Message() string { +func (s *IncorrectKeyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10909,30 +10928,30 @@ func (s IncorrectKeyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectKeyException) OrigErr() error { +func (s *IncorrectKeyException) OrigErr() error { return nil } -func (s IncorrectKeyException) Error() string { +func (s *IncorrectKeyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectKeyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectKeyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectKeyException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectKeyException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the key material in the request is, expired, // invalid, or is not the same key material that was previously imported into // this customer master key (CMK). type IncorrectKeyMaterialException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10949,17 +10968,17 @@ func (s IncorrectKeyMaterialException) GoString() string { func newErrorIncorrectKeyMaterialException(v protocol.ResponseMetadata) error { return &IncorrectKeyMaterialException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectKeyMaterialException) Code() string { +func (s *IncorrectKeyMaterialException) Code() string { return "IncorrectKeyMaterialException" } // Message returns the exception's message. -func (s IncorrectKeyMaterialException) Message() string { +func (s *IncorrectKeyMaterialException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10967,22 +10986,22 @@ func (s IncorrectKeyMaterialException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectKeyMaterialException) OrigErr() error { +func (s *IncorrectKeyMaterialException) OrigErr() error { return nil } -func (s IncorrectKeyMaterialException) Error() string { +func (s *IncorrectKeyMaterialException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectKeyMaterialException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectKeyMaterialException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectKeyMaterialException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectKeyMaterialException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the trust anchor certificate in the request @@ -10992,8 +11011,8 @@ func (s IncorrectKeyMaterialException) RequestID() string { // you create the trust anchor certificate and save it in the customerCA.crt // file. type IncorrectTrustAnchorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11010,17 +11029,17 @@ func (s IncorrectTrustAnchorException) GoString() string { func newErrorIncorrectTrustAnchorException(v protocol.ResponseMetadata) error { return &IncorrectTrustAnchorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncorrectTrustAnchorException) Code() string { +func (s *IncorrectTrustAnchorException) Code() string { return "IncorrectTrustAnchorException" } // Message returns the exception's message. -func (s IncorrectTrustAnchorException) Message() string { +func (s *IncorrectTrustAnchorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11028,29 +11047,29 @@ func (s IncorrectTrustAnchorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncorrectTrustAnchorException) OrigErr() error { +func (s *IncorrectTrustAnchorException) OrigErr() error { return nil } -func (s IncorrectTrustAnchorException) Error() string { +func (s *IncorrectTrustAnchorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncorrectTrustAnchorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncorrectTrustAnchorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncorrectTrustAnchorException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncorrectTrustAnchorException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because an internal exception occurred. The request // can be retried. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11067,17 +11086,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "KMSInternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11085,28 +11104,28 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified alias name is not valid. type InvalidAliasNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11123,17 +11142,17 @@ func (s InvalidAliasNameException) GoString() string { func newErrorInvalidAliasNameException(v protocol.ResponseMetadata) error { return &InvalidAliasNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAliasNameException) Code() string { +func (s *InvalidAliasNameException) Code() string { return "InvalidAliasNameException" } // Message returns the exception's message. -func (s InvalidAliasNameException) Message() string { +func (s *InvalidAliasNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11141,29 +11160,29 @@ func (s InvalidAliasNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAliasNameException) OrigErr() error { +func (s *InvalidAliasNameException) OrigErr() error { return nil } -func (s InvalidAliasNameException) Error() string { +func (s *InvalidAliasNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAliasNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAliasNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAliasNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAliasNameException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because a specified ARN, or an ARN in a key policy, // is not valid. type InvalidArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11180,17 +11199,17 @@ func (s InvalidArnException) GoString() string { func newErrorInvalidArnException(v protocol.ResponseMetadata) error { return &InvalidArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArnException) Code() string { +func (s *InvalidArnException) Code() string { return "InvalidArnException" } // Message returns the exception's message. -func (s InvalidArnException) Message() string { +func (s *InvalidArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11198,22 +11217,22 @@ func (s InvalidArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArnException) OrigErr() error { +func (s *InvalidArnException) OrigErr() error { return nil } -func (s InvalidArnException) Error() string { +func (s *InvalidArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArnException) RequestID() string { + return s.RespMetadata.RequestID } // From the Decrypt or ReEncrypt operation, the request was rejected because @@ -11224,8 +11243,8 @@ func (s InvalidArnException) RequestID() string { // From the ImportKeyMaterial operation, the request was rejected because AWS // KMS could not decrypt the encrypted (wrapped) key material. type InvalidCiphertextException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11242,17 +11261,17 @@ func (s InvalidCiphertextException) GoString() string { func newErrorInvalidCiphertextException(v protocol.ResponseMetadata) error { return &InvalidCiphertextException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCiphertextException) Code() string { +func (s *InvalidCiphertextException) Code() string { return "InvalidCiphertextException" } // Message returns the exception's message. -func (s InvalidCiphertextException) Message() string { +func (s *InvalidCiphertextException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11260,28 +11279,28 @@ func (s InvalidCiphertextException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCiphertextException) OrigErr() error { +func (s *InvalidCiphertextException) OrigErr() error { return nil } -func (s InvalidCiphertextException) Error() string { +func (s *InvalidCiphertextException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCiphertextException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCiphertextException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCiphertextException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCiphertextException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified GrantId is not valid. type InvalidGrantIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11298,17 +11317,17 @@ func (s InvalidGrantIdException) GoString() string { func newErrorInvalidGrantIdException(v protocol.ResponseMetadata) error { return &InvalidGrantIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGrantIdException) Code() string { +func (s *InvalidGrantIdException) Code() string { return "InvalidGrantIdException" } // Message returns the exception's message. -func (s InvalidGrantIdException) Message() string { +func (s *InvalidGrantIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11316,28 +11335,28 @@ func (s InvalidGrantIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGrantIdException) OrigErr() error { +func (s *InvalidGrantIdException) OrigErr() error { return nil } -func (s InvalidGrantIdException) Error() string { +func (s *InvalidGrantIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGrantIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGrantIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGrantIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGrantIdException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified grant token is not valid. type InvalidGrantTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11354,17 +11373,17 @@ func (s InvalidGrantTokenException) GoString() string { func newErrorInvalidGrantTokenException(v protocol.ResponseMetadata) error { return &InvalidGrantTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGrantTokenException) Code() string { +func (s *InvalidGrantTokenException) Code() string { return "InvalidGrantTokenException" } // Message returns the exception's message. -func (s InvalidGrantTokenException) Message() string { +func (s *InvalidGrantTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11372,29 +11391,29 @@ func (s InvalidGrantTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGrantTokenException) OrigErr() error { +func (s *InvalidGrantTokenException) OrigErr() error { return nil } -func (s InvalidGrantTokenException) Error() string { +func (s *InvalidGrantTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGrantTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGrantTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGrantTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGrantTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the provided import token is invalid or // is associated with a different customer master key (CMK). type InvalidImportTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11411,17 +11430,17 @@ func (s InvalidImportTokenException) GoString() string { func newErrorInvalidImportTokenException(v protocol.ResponseMetadata) error { return &InvalidImportTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidImportTokenException) Code() string { +func (s *InvalidImportTokenException) Code() string { return "InvalidImportTokenException" } // Message returns the exception's message. -func (s InvalidImportTokenException) Message() string { +func (s *InvalidImportTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11429,22 +11448,22 @@ func (s InvalidImportTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidImportTokenException) OrigErr() error { +func (s *InvalidImportTokenException) OrigErr() error { return nil } -func (s InvalidImportTokenException) Error() string { +func (s *InvalidImportTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidImportTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidImportTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidImportTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidImportTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected for one of the following reasons: @@ -11461,8 +11480,8 @@ func (s InvalidImportTokenException) RequestID() string { // To find the encryption or signing algorithms supported for a particular CMK, // use the DescribeKey operation. type InvalidKeyUsageException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11479,17 +11498,17 @@ func (s InvalidKeyUsageException) GoString() string { func newErrorInvalidKeyUsageException(v protocol.ResponseMetadata) error { return &InvalidKeyUsageException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidKeyUsageException) Code() string { +func (s *InvalidKeyUsageException) Code() string { return "InvalidKeyUsageException" } // Message returns the exception's message. -func (s InvalidKeyUsageException) Message() string { +func (s *InvalidKeyUsageException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11497,29 +11516,29 @@ func (s InvalidKeyUsageException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidKeyUsageException) OrigErr() error { +func (s *InvalidKeyUsageException) OrigErr() error { return nil } -func (s InvalidKeyUsageException) Error() string { +func (s *InvalidKeyUsageException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidKeyUsageException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidKeyUsageException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidKeyUsageException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidKeyUsageException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the marker that specifies where pagination // should next begin is not valid. type InvalidMarkerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11536,17 +11555,17 @@ func (s InvalidMarkerException) GoString() string { func newErrorInvalidMarkerException(v protocol.ResponseMetadata) error { return &InvalidMarkerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMarkerException) Code() string { +func (s *InvalidMarkerException) Code() string { return "InvalidMarkerException" } // Message returns the exception's message. -func (s InvalidMarkerException) Message() string { +func (s *InvalidMarkerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11554,22 +11573,22 @@ func (s InvalidMarkerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMarkerException) OrigErr() error { +func (s *InvalidMarkerException) OrigErr() error { return nil } -func (s InvalidMarkerException) Error() string { +func (s *InvalidMarkerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMarkerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMarkerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMarkerException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMarkerException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the state of the specified resource is not @@ -11579,8 +11598,8 @@ func (s InvalidMarkerException) RequestID() string { // Key State Affects Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide . type InvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11597,17 +11616,17 @@ func (s InvalidStateException) GoString() string { func newErrorInvalidStateException(v protocol.ResponseMetadata) error { return &InvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateException) Code() string { +func (s *InvalidStateException) Code() string { return "KMSInvalidStateException" } // Message returns the exception's message. -func (s InvalidStateException) Message() string { +func (s *InvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11615,30 +11634,30 @@ func (s InvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateException) OrigErr() error { +func (s *InvalidStateException) OrigErr() error { return nil } -func (s InvalidStateException) Error() string { +func (s *InvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the signature verification failed. Signature // verification fails when it cannot confirm that signature was produced by // signing the specified message with the specified CMK and signing algorithm. type KMSInvalidSignatureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11655,17 +11674,17 @@ func (s KMSInvalidSignatureException) GoString() string { func newErrorKMSInvalidSignatureException(v protocol.ResponseMetadata) error { return &KMSInvalidSignatureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSInvalidSignatureException) Code() string { +func (s *KMSInvalidSignatureException) Code() string { return "KMSInvalidSignatureException" } // Message returns the exception's message. -func (s KMSInvalidSignatureException) Message() string { +func (s *KMSInvalidSignatureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11673,22 +11692,22 @@ func (s KMSInvalidSignatureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSInvalidSignatureException) OrigErr() error { +func (s *KMSInvalidSignatureException) OrigErr() error { return nil } -func (s KMSInvalidSignatureException) Error() string { +func (s *KMSInvalidSignatureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSInvalidSignatureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSInvalidSignatureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSInvalidSignatureException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSInvalidSignatureException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about each entry in the key list. @@ -11768,8 +11787,8 @@ type KeyMetadata struct { // is true, otherwise it is false. Enabled *bool `type:"boolean"` - // A list of encryption algorithms that the CMK supports. You cannot use the - // CMK with other encryption algorithms within AWS KMS. + // The encryption algorithms that the CMK supports. You cannot use the CMK with + // other encryption algorithms within AWS KMS. // // This field appears only when the KeyUsage of the CMK is ENCRYPT_DECRYPT. EncryptionAlgorithms []*string `type:"list"` @@ -11789,14 +11808,15 @@ type KeyMetadata struct { // in the AWS Key Management Service Developer Guide. KeyManager *string `type:"string" enum:"KeyManagerType"` - // The state of the CMK. + // The current status of the CMK. // - // For more information about how key state affects the use of a CMK, see How - // Key State Affects the Use of a Customer Master Key (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // For more information about how key state affects the use of a CMK, see Key + // state: Effect on your CMK (https://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. KeyState *string `type:"string" enum:"KeyState"` - // The cryptographic operations for which you can use the CMK. + // The cryptographic operations (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#cryptographic-operations) + // for which you can use the CMK. KeyUsage *string `type:"string" enum:"KeyUsageType"` // The source of the CMK's key material. When this value is AWS_KMS, AWS KMS @@ -11806,8 +11826,8 @@ type KeyMetadata struct { // in the AWS CloudHSM cluster associated with a custom key store. Origin *string `type:"string" enum:"OriginType"` - // A list of signing algorithms that the CMK supports. You cannot use the CMK - // with other signing algorithms within AWS KMS. + // The signing algorithms that the CMK supports. You cannot use the CMK with + // other signing algorithms within AWS KMS. // // This field appears only when the KeyUsage of the CMK is SIGN_VERIFY. SigningAlgorithms []*string `type:"list"` @@ -11940,8 +11960,8 @@ func (s *KeyMetadata) SetValidTo(v time.Time) *KeyMetadata { // The request was rejected because the specified CMK was not available. You // can retry the request. type KeyUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11958,17 +11978,17 @@ func (s KeyUnavailableException) GoString() string { func newErrorKeyUnavailableException(v protocol.ResponseMetadata) error { return &KeyUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KeyUnavailableException) Code() string { +func (s *KeyUnavailableException) Code() string { return "KeyUnavailableException" } // Message returns the exception's message. -func (s KeyUnavailableException) Message() string { +func (s *KeyUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11976,30 +11996,30 @@ func (s KeyUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KeyUnavailableException) OrigErr() error { +func (s *KeyUnavailableException) OrigErr() error { return nil } -func (s KeyUnavailableException) Error() string { +func (s *KeyUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s KeyUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KeyUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KeyUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *KeyUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because a quota was exceeded. For more information, // see Quotas (https://docs.aws.amazon.com/kms/latest/developerguide/limits.html) // in the AWS Key Management Service Developer Guide. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12016,17 +12036,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12034,22 +12054,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAliasesInput struct { @@ -12740,8 +12760,8 @@ func (s *ListRetirableGrantsInput) SetRetiringPrincipal(v string) *ListRetirable // The request was rejected because the specified policy is not syntactically // or semantically correct. type MalformedPolicyDocumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12758,17 +12778,17 @@ func (s MalformedPolicyDocumentException) GoString() string { func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { return &MalformedPolicyDocumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedPolicyDocumentException) Code() string { +func (s *MalformedPolicyDocumentException) Code() string { return "MalformedPolicyDocumentException" } // Message returns the exception's message. -func (s MalformedPolicyDocumentException) Message() string { +func (s *MalformedPolicyDocumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12776,29 +12796,29 @@ func (s MalformedPolicyDocumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedPolicyDocumentException) OrigErr() error { +func (s *MalformedPolicyDocumentException) OrigErr() error { return nil } -func (s MalformedPolicyDocumentException) Error() string { +func (s *MalformedPolicyDocumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedPolicyDocumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedPolicyDocumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedPolicyDocumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedPolicyDocumentException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because the specified entity or resource could not // be found. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12815,17 +12835,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12833,22 +12853,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type PutKeyPolicyInput struct { @@ -13205,7 +13225,8 @@ type ReEncryptOutput struct { // The encryption algorithm that was used to reencrypt the data. DestinationEncryptionAlgorithm *string `type:"string" enum:"EncryptionAlgorithmSpec"` - // Unique identifier of the CMK used to reencrypt the data. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK that was used to reencrypt the data. KeyId *string `min:"1" type:"string"` // The encryption algorithm that was used to decrypt the ciphertext before it @@ -13491,8 +13512,8 @@ type ScheduleKeyDeletionOutput struct { // The date and time after which AWS KMS deletes the customer master key (CMK). DeletionDate *time.Time `type:"timestamp"` - // The unique identifier of the customer master key (CMK) for which deletion - // is scheduled. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the CMK whose deletion is scheduled. KeyId *string `min:"1" type:"string"` } @@ -13644,8 +13665,8 @@ func (s *SignInput) SetSigningAlgorithm(v string) *SignInput { type SignOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the asymmetric CMK that was used to sign - // the message. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric CMK that was used to sign the message. KeyId *string `min:"1" type:"string"` // The cryptographic signature that was generated for the message. @@ -13760,8 +13781,8 @@ func (s *Tag) SetTagValue(v string) *Tag { // The request was rejected because one or more tags are not valid. type TagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13778,17 +13799,17 @@ func (s TagException) GoString() string { func newErrorTagException(v protocol.ResponseMetadata) error { return &TagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagException) Code() string { +func (s *TagException) Code() string { return "TagException" } // Message returns the exception's message. -func (s TagException) Message() string { +func (s *TagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13796,22 +13817,22 @@ func (s TagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagException) OrigErr() error { +func (s *TagException) OrigErr() error { return nil } -func (s TagException) Error() string { +func (s *TagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -13906,8 +13927,8 @@ func (s TagResourceOutput) GoString() string { // The request was rejected because a specified parameter is not supported or // a specified resource is not valid for this operation. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13924,17 +13945,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13942,22 +13963,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -14466,8 +14487,8 @@ func (s *VerifyInput) SetSigningAlgorithm(v string) *VerifyInput { type VerifyOutput struct { _ struct{} `type:"structure"` - // The unique identifier for the asymmetric CMK that was used to verify the - // signature. + // The Amazon Resource Name (key ARN (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#key-id-key-ARN)) + // of the asymmetric CMK that was used to verify the signature. KeyId *string `min:"1" type:"string"` // A Boolean value that indicates whether the signature was verified. A value @@ -14519,6 +14540,15 @@ const ( AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" ) +// AlgorithmSpec_Values returns all elements of the AlgorithmSpec enum +func AlgorithmSpec_Values() []string { + return []string{ + AlgorithmSpecRsaesPkcs1V15, + AlgorithmSpecRsaesOaepSha1, + AlgorithmSpecRsaesOaepSha256, + } +} + const ( // ConnectionErrorCodeTypeInvalidCredentials is a ConnectionErrorCodeType enum value ConnectionErrorCodeTypeInvalidCredentials = "INVALID_CREDENTIALS" @@ -14548,6 +14578,21 @@ const ( ConnectionErrorCodeTypeSubnetNotFound = "SUBNET_NOT_FOUND" ) +// ConnectionErrorCodeType_Values returns all elements of the ConnectionErrorCodeType enum +func ConnectionErrorCodeType_Values() []string { + return []string{ + ConnectionErrorCodeTypeInvalidCredentials, + ConnectionErrorCodeTypeClusterNotFound, + ConnectionErrorCodeTypeNetworkErrors, + ConnectionErrorCodeTypeInternalError, + ConnectionErrorCodeTypeInsufficientCloudhsmHsms, + ConnectionErrorCodeTypeUserLockedOut, + ConnectionErrorCodeTypeUserNotFound, + ConnectionErrorCodeTypeUserLoggedIn, + ConnectionErrorCodeTypeSubnetNotFound, + } +} + const ( // ConnectionStateTypeConnected is a ConnectionStateType enum value ConnectionStateTypeConnected = "CONNECTED" @@ -14565,6 +14610,17 @@ const ( ConnectionStateTypeDisconnecting = "DISCONNECTING" ) +// ConnectionStateType_Values returns all elements of the ConnectionStateType enum +func ConnectionStateType_Values() []string { + return []string{ + ConnectionStateTypeConnected, + ConnectionStateTypeConnecting, + ConnectionStateTypeFailed, + ConnectionStateTypeDisconnected, + ConnectionStateTypeDisconnecting, + } +} + const ( // CustomerMasterKeySpecRsa2048 is a CustomerMasterKeySpec enum value CustomerMasterKeySpecRsa2048 = "RSA_2048" @@ -14591,6 +14647,20 @@ const ( CustomerMasterKeySpecSymmetricDefault = "SYMMETRIC_DEFAULT" ) +// CustomerMasterKeySpec_Values returns all elements of the CustomerMasterKeySpec enum +func CustomerMasterKeySpec_Values() []string { + return []string{ + CustomerMasterKeySpecRsa2048, + CustomerMasterKeySpecRsa3072, + CustomerMasterKeySpecRsa4096, + CustomerMasterKeySpecEccNistP256, + CustomerMasterKeySpecEccNistP384, + CustomerMasterKeySpecEccNistP521, + CustomerMasterKeySpecEccSecgP256k1, + CustomerMasterKeySpecSymmetricDefault, + } +} + const ( // DataKeyPairSpecRsa2048 is a DataKeyPairSpec enum value DataKeyPairSpecRsa2048 = "RSA_2048" @@ -14614,6 +14684,19 @@ const ( DataKeyPairSpecEccSecgP256k1 = "ECC_SECG_P256K1" ) +// DataKeyPairSpec_Values returns all elements of the DataKeyPairSpec enum +func DataKeyPairSpec_Values() []string { + return []string{ + DataKeyPairSpecRsa2048, + DataKeyPairSpecRsa3072, + DataKeyPairSpecRsa4096, + DataKeyPairSpecEccNistP256, + DataKeyPairSpecEccNistP384, + DataKeyPairSpecEccNistP521, + DataKeyPairSpecEccSecgP256k1, + } +} + const ( // DataKeySpecAes256 is a DataKeySpec enum value DataKeySpecAes256 = "AES_256" @@ -14622,6 +14705,14 @@ const ( DataKeySpecAes128 = "AES_128" ) +// DataKeySpec_Values returns all elements of the DataKeySpec enum +func DataKeySpec_Values() []string { + return []string{ + DataKeySpecAes256, + DataKeySpecAes128, + } +} + const ( // EncryptionAlgorithmSpecSymmetricDefault is a EncryptionAlgorithmSpec enum value EncryptionAlgorithmSpecSymmetricDefault = "SYMMETRIC_DEFAULT" @@ -14633,6 +14724,15 @@ const ( EncryptionAlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" ) +// EncryptionAlgorithmSpec_Values returns all elements of the EncryptionAlgorithmSpec enum +func EncryptionAlgorithmSpec_Values() []string { + return []string{ + EncryptionAlgorithmSpecSymmetricDefault, + EncryptionAlgorithmSpecRsaesOaepSha1, + EncryptionAlgorithmSpecRsaesOaepSha256, + } +} + const ( // ExpirationModelTypeKeyMaterialExpires is a ExpirationModelType enum value ExpirationModelTypeKeyMaterialExpires = "KEY_MATERIAL_EXPIRES" @@ -14641,6 +14741,14 @@ const ( ExpirationModelTypeKeyMaterialDoesNotExpire = "KEY_MATERIAL_DOES_NOT_EXPIRE" ) +// ExpirationModelType_Values returns all elements of the ExpirationModelType enum +func ExpirationModelType_Values() []string { + return []string{ + ExpirationModelTypeKeyMaterialExpires, + ExpirationModelTypeKeyMaterialDoesNotExpire, + } +} + const ( // GrantOperationDecrypt is a GrantOperation enum value GrantOperationDecrypt = "Decrypt" @@ -14685,6 +14793,26 @@ const ( GrantOperationGenerateDataKeyPairWithoutPlaintext = "GenerateDataKeyPairWithoutPlaintext" ) +// GrantOperation_Values returns all elements of the GrantOperation enum +func GrantOperation_Values() []string { + return []string{ + GrantOperationDecrypt, + GrantOperationEncrypt, + GrantOperationGenerateDataKey, + GrantOperationGenerateDataKeyWithoutPlaintext, + GrantOperationReEncryptFrom, + GrantOperationReEncryptTo, + GrantOperationSign, + GrantOperationVerify, + GrantOperationGetPublicKey, + GrantOperationCreateGrant, + GrantOperationRetireGrant, + GrantOperationDescribeKey, + GrantOperationGenerateDataKeyPair, + GrantOperationGenerateDataKeyPairWithoutPlaintext, + } +} + const ( // KeyManagerTypeAws is a KeyManagerType enum value KeyManagerTypeAws = "AWS" @@ -14693,6 +14821,14 @@ const ( KeyManagerTypeCustomer = "CUSTOMER" ) +// KeyManagerType_Values returns all elements of the KeyManagerType enum +func KeyManagerType_Values() []string { + return []string{ + KeyManagerTypeAws, + KeyManagerTypeCustomer, + } +} + const ( // KeyStateEnabled is a KeyState enum value KeyStateEnabled = "Enabled" @@ -14710,6 +14846,17 @@ const ( KeyStateUnavailable = "Unavailable" ) +// KeyState_Values returns all elements of the KeyState enum +func KeyState_Values() []string { + return []string{ + KeyStateEnabled, + KeyStateDisabled, + KeyStatePendingDeletion, + KeyStatePendingImport, + KeyStateUnavailable, + } +} + const ( // KeyUsageTypeSignVerify is a KeyUsageType enum value KeyUsageTypeSignVerify = "SIGN_VERIFY" @@ -14718,6 +14865,14 @@ const ( KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" ) +// KeyUsageType_Values returns all elements of the KeyUsageType enum +func KeyUsageType_Values() []string { + return []string{ + KeyUsageTypeSignVerify, + KeyUsageTypeEncryptDecrypt, + } +} + const ( // MessageTypeRaw is a MessageType enum value MessageTypeRaw = "RAW" @@ -14726,6 +14881,14 @@ const ( MessageTypeDigest = "DIGEST" ) +// MessageType_Values returns all elements of the MessageType enum +func MessageType_Values() []string { + return []string{ + MessageTypeRaw, + MessageTypeDigest, + } +} + const ( // OriginTypeAwsKms is a OriginType enum value OriginTypeAwsKms = "AWS_KMS" @@ -14737,6 +14900,15 @@ const ( OriginTypeAwsCloudhsm = "AWS_CLOUDHSM" ) +// OriginType_Values returns all elements of the OriginType enum +func OriginType_Values() []string { + return []string{ + OriginTypeAwsKms, + OriginTypeExternal, + OriginTypeAwsCloudhsm, + } +} + const ( // SigningAlgorithmSpecRsassaPssSha256 is a SigningAlgorithmSpec enum value SigningAlgorithmSpecRsassaPssSha256 = "RSASSA_PSS_SHA_256" @@ -14766,7 +14938,29 @@ const ( SigningAlgorithmSpecEcdsaSha512 = "ECDSA_SHA_512" ) +// SigningAlgorithmSpec_Values returns all elements of the SigningAlgorithmSpec enum +func SigningAlgorithmSpec_Values() []string { + return []string{ + SigningAlgorithmSpecRsassaPssSha256, + SigningAlgorithmSpecRsassaPssSha384, + SigningAlgorithmSpecRsassaPssSha512, + SigningAlgorithmSpecRsassaPkcs1V15Sha256, + SigningAlgorithmSpecRsassaPkcs1V15Sha384, + SigningAlgorithmSpecRsassaPkcs1V15Sha512, + SigningAlgorithmSpecEcdsaSha256, + SigningAlgorithmSpecEcdsaSha384, + SigningAlgorithmSpecEcdsaSha512, + } +} + const ( // WrappingKeySpecRsa2048 is a WrappingKeySpec enum value WrappingKeySpecRsa2048 = "RSA_2048" ) + +// WrappingKeySpec_Values returns all elements of the WrappingKeySpec enum +func WrappingKeySpec_Values() []string { + return []string{ + WrappingKeySpecRsa2048, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go index add455d28..50ca0c092 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go index 34a54bd94..b40ecb54e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/api.go @@ -402,7 +402,8 @@ func (c *LakeFormation) GetDataLakeSettingsRequest(input *GetDataLakeSettingsInp // GetDataLakeSettings API operation for AWS Lake Formation. // -// The AWS Lake Formation principal. +// Retrieves the list of the data lake administrators of a Lake Formation-managed +// data lake. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -493,8 +494,9 @@ func (c *LakeFormation) GetEffectivePermissionsForPathRequest(input *GetEffectiv // GetEffectivePermissionsForPath API operation for AWS Lake Formation. // -// Returns the permissions for a specified table or database resource located -// at a path in Amazon S3. +// Returns the Lake Formation permissions for a specified table or database +// resource located at a path in Amazon S3. GetEffectivePermissionsForPath will +// not return databases and tables if the catalog is encrypted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -639,7 +641,7 @@ func (c *LakeFormation) GrantPermissionsRequest(input *GrantPermissionsInput) (r // and data organized in underlying data storage such as Amazon S3. // // For information about permissions, see Security and Access Control to Metadata -// and Data (https://docs-aws.amazon.com/michigan/latest/dg/security-data-access.html). +// and Data (https://docs-aws.amazon.com/lake-formation/latest/dg/security-data-access.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -737,7 +739,7 @@ func (c *LakeFormation) ListPermissionsRequest(input *ListPermissionsInput) (req // This operation returns only those permissions that have been explicitly granted. // // For information about permissions, see Security and Access Control to Metadata -// and Data (https://docs-aws.amazon.com/michigan/latest/dg/security-data-access.html). +// and Data (https://docs-aws.amazon.com/lake-formation/latest/dg/security-data-access.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1018,7 +1020,13 @@ func (c *LakeFormation) PutDataLakeSettingsRequest(input *PutDataLakeSettingsInp // PutDataLakeSettings API operation for AWS Lake Formation. // -// The AWS Lake Formation principal. +// Sets the list of data lake administrators who have admin privileges on all +// resources managed by Lake Formation. For more information on admin privileges, +// see Granting Lake Formation Permissions (https://docs.aws.amazon.com/lake-formation/latest/dg/lake-formation-permissions.html). +// +// This API replaces the current list of data lake admins with the new list +// being passed. To add an admin, fetch the current list and add the new admin +// to that list and pass that list in this API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1112,6 +1120,15 @@ func (c *LakeFormation) RegisterResourceRequest(input *RegisterResourceInput) (r // you register subsequent paths, Lake Formation adds the path to the existing // policy. // +// The following request registers a new location and gives AWS Lake Formation +// permission to use the service-linked role to access that location. +// +// ResourceArn = arn:aws:s3:::my-bucket UseServiceLinkedRole = true +// +// If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn: +// +// arn:aws:iam::12345:role/my-data-access-role +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1333,8 +1350,8 @@ func (c *LakeFormation) UpdateResourceWithContext(ctx aws.Context, input *Update // A resource to be created or added already exists. type AlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -1352,17 +1369,17 @@ func (s AlreadyExistsException) GoString() string { func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { return &AlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyExistsException) Code() string { +func (s *AlreadyExistsException) Code() string { return "AlreadyExistsException" } // Message returns the exception's message. -func (s AlreadyExistsException) Message() string { +func (s *AlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1370,22 +1387,22 @@ func (s AlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyExistsException) OrigErr() error { +func (s *AlreadyExistsException) OrigErr() error { return nil } -func (s AlreadyExistsException) Error() string { +func (s *AlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } type BatchGrantPermissionsInput struct { @@ -1725,8 +1742,8 @@ func (s *ColumnWildcard) SetExcludedColumnNames(v []*string) *ColumnWildcard { // Two processes are trying to modify a resource simultaneously. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -1744,17 +1761,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1762,25 +1779,26 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } -// The AWS Lake Formation principal. +// The AWS Lake Formation principal. Supported principals are IAM users or IAM +// roles. type DataLakePrincipal struct { _ struct{} `type:"structure"` @@ -1817,20 +1835,31 @@ func (s *DataLakePrincipal) SetDataLakePrincipalIdentifier(v string) *DataLakePr return s } -// The AWS Lake Formation principal. +// A structure representing a list of AWS Lake Formation principals designated +// as data lake administrators and lists of principal permission entries for +// default create database and default create table permissions. type DataLakeSettings struct { _ struct{} `type:"structure"` - // A list of up to three principal permissions entries for default create database - // permissions. + // A structure representing a list of up to three principal permissions entries + // for default create database permissions. CreateDatabaseDefaultPermissions []*PrincipalPermissions `type:"list"` - // A list of up to three principal permissions entries for default create table - // permissions. + // A structure representing a list of up to three principal permissions entries + // for default create table permissions. CreateTableDefaultPermissions []*PrincipalPermissions `type:"list"` - // A list of AWS Lake Formation principals. + // A list of AWS Lake Formation principals. Supported principals are IAM users + // or IAM roles. DataLakeAdmins []*DataLakePrincipal `type:"list"` + + // A list of the resource-owning account IDs that the caller's account can use + // to share their user access details (user ARNs). The user ARNs can be logged + // in the resource owner's AWS CloudTrail log. + // + // You may want to specify this property when you are in a high-trust boundary, + // such as the same team or company. + TrustedResourceOwners []*string `type:"list"` } // String returns the string representation @@ -1901,10 +1930,20 @@ func (s *DataLakeSettings) SetDataLakeAdmins(v []*DataLakePrincipal) *DataLakeSe return s } +// SetTrustedResourceOwners sets the TrustedResourceOwners field's value. +func (s *DataLakeSettings) SetTrustedResourceOwners(v []*string) *DataLakeSettings { + s.TrustedResourceOwners = v + return s +} + // A structure for a data location object where permissions are granted or revoked. type DataLocationResource struct { _ struct{} `type:"structure"` + // The identifier for the Data Catalog where the location is registered with + // AWS Lake Formation. By default, it is the account ID of the caller. + CatalogId *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) that uniquely identifies the data location // resource. // @@ -1925,6 +1964,9 @@ func (s DataLocationResource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DataLocationResource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DataLocationResource"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } if s.ResourceArn == nil { invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } @@ -1935,6 +1977,12 @@ func (s *DataLocationResource) Validate() error { return nil } +// SetCatalogId sets the CatalogId field's value. +func (s *DataLocationResource) SetCatalogId(v string) *DataLocationResource { + s.CatalogId = &v + return s +} + // SetResourceArn sets the ResourceArn field's value. func (s *DataLocationResource) SetResourceArn(v string) *DataLocationResource { s.ResourceArn = &v @@ -1945,6 +1993,10 @@ func (s *DataLocationResource) SetResourceArn(v string) *DataLocationResource { type DatabaseResource struct { _ struct{} `type:"structure"` + // The identifier for the Data Catalog. By default, it is the account ID of + // the caller. + CatalogId *string `min:"1" type:"string"` + // The name of the database resource. Unique to the Data Catalog. // // Name is a required field @@ -1964,6 +2016,9 @@ func (s DatabaseResource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DatabaseResource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DatabaseResource"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -1977,6 +2032,12 @@ func (s *DatabaseResource) Validate() error { return nil } +// SetCatalogId sets the CatalogId field's value. +func (s *DatabaseResource) SetCatalogId(v string) *DatabaseResource { + s.CatalogId = &v + return s +} + // SetName sets the Name field's value. func (s *DatabaseResource) SetName(v string) *DatabaseResource { s.Name = &v @@ -2096,10 +2157,39 @@ func (s *DescribeResourceOutput) SetResourceInfo(v *ResourceInfo) *DescribeResou return s } +// A structure containing the additional details to be returned in the AdditionalDetails +// attribute of PrincipalResourcePermissions. +// +// If a catalog resource is shared through AWS Resource Access Manager (AWS +// RAM), then there will exist a corresponding RAM share resource ARN. +type DetailsMap struct { + _ struct{} `type:"structure"` + + // A share resource ARN for a catalog resource shared through AWS Resource Access + // Manager (AWS RAM). + ResourceShare []*string `type:"list"` +} + +// String returns the string representation +func (s DetailsMap) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetailsMap) GoString() string { + return s.String() +} + +// SetResourceShare sets the ResourceShare field's value. +func (s *DetailsMap) SetResourceShare(v []*string) *DetailsMap { + s.ResourceShare = v + return s +} + // A specified entity does not exist type EntityNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -2117,17 +2207,17 @@ func (s EntityNotFoundException) GoString() string { func newErrorEntityNotFoundException(v protocol.ResponseMetadata) error { return &EntityNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityNotFoundException) Code() string { +func (s *EntityNotFoundException) Code() string { return "EntityNotFoundException" } // Message returns the exception's message. -func (s EntityNotFoundException) Message() string { +func (s *EntityNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2135,22 +2225,22 @@ func (s EntityNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityNotFoundException) OrigErr() error { +func (s *EntityNotFoundException) OrigErr() error { return nil } -func (s EntityNotFoundException) Error() string { +func (s *EntityNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an error. @@ -2271,7 +2361,8 @@ func (s *GetDataLakeSettingsInput) SetCatalogId(v string) *GetDataLakeSettingsIn type GetDataLakeSettingsOutput struct { _ struct{} `type:"structure"` - // A list of AWS Lake Formation principals. + // A structure representing a list of AWS Lake Formation principals designated + // as data lake administrators. DataLakeSettings *DataLakeSettings `type:"structure"` } @@ -2528,8 +2619,8 @@ func (s GrantPermissionsOutput) GoString() string { // An internal service error occurred. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -2547,17 +2638,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2565,28 +2656,28 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } // The input provided was not valid. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -2604,17 +2695,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2622,22 +2713,22 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } type ListPermissionsInput struct { @@ -2866,8 +2957,8 @@ func (s *ListResourcesOutput) SetResourceInfoList(v []*ResourceInfo) *ListResour // The operation timed out. type OperationTimeoutException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A message describing the problem. Message_ *string `locationName:"Message" type:"string"` @@ -2885,17 +2976,17 @@ func (s OperationTimeoutException) GoString() string { func newErrorOperationTimeoutException(v protocol.ResponseMetadata) error { return &OperationTimeoutException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationTimeoutException) Code() string { +func (s *OperationTimeoutException) Code() string { return "OperationTimeoutException" } // Message returns the exception's message. -func (s OperationTimeoutException) Message() string { +func (s *OperationTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2903,22 +2994,22 @@ func (s OperationTimeoutException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationTimeoutException) OrigErr() error { +func (s *OperationTimeoutException) OrigErr() error { return nil } -func (s OperationTimeoutException) Error() string { +func (s *OperationTimeoutException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationTimeoutException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationTimeoutException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } // Permissions granted to a principal. @@ -2973,6 +3064,10 @@ func (s *PrincipalPermissions) SetPrincipal(v *DataLakePrincipal) *PrincipalPerm type PrincipalResourcePermissions struct { _ struct{} `type:"structure"` + // This attribute can be used to return any additional details of PrincipalResourcePermissions. + // Currently returns only as a RAM share resource ARN. + AdditionalDetails *DetailsMap `type:"structure"` + // The permissions to be granted or revoked on the resource. Permissions []*string `type:"list"` @@ -2997,6 +3092,12 @@ func (s PrincipalResourcePermissions) GoString() string { return s.String() } +// SetAdditionalDetails sets the AdditionalDetails field's value. +func (s *PrincipalResourcePermissions) SetAdditionalDetails(v *DetailsMap) *PrincipalResourcePermissions { + s.AdditionalDetails = v + return s +} + // SetPermissions sets the Permissions field's value. func (s *PrincipalResourcePermissions) SetPermissions(v []*string) *PrincipalResourcePermissions { s.Permissions = v @@ -3030,7 +3131,8 @@ type PutDataLakeSettingsInput struct { // Formation environment. CatalogId *string `min:"1" type:"string"` - // A list of AWS Lake Formation principals. + // A structure representing a list of AWS Lake Formation principals designated + // as data lake administrators. // // DataLakeSettings is a required field DataLakeSettings *DataLakeSettings `type:"structure" required:"true"` @@ -3101,11 +3203,14 @@ type RegisterResourceInput struct { // ResourceArn is a required field ResourceArn *string `type:"string" required:"true"` - // The identifier for the role. + // The identifier for the role that registers the resource. RoleArn *string `type:"string"` - // Designates a trusted caller, an IAM principal, by registering this caller - // with the Data Catalog. + // Designates an AWS Identity and Access Management (IAM) service-linked role + // by registering this role with the Data Catalog. A service-linked role is + // a unique type of IAM role that is linked directly to Lake Formation. + // + // For more information, see Using Service-Linked Roles for Lake Formation (https://docs-aws.amazon.com/lake-formation/latest/dg/service-linked-roles.html). UseServiceLinkedRole *bool `type:"boolean"` } @@ -3314,7 +3419,7 @@ type RevokePermissionsInput struct { CatalogId *string `min:"1" type:"string"` // The permissions revoked to the principal on the resource. For information - // about permissions, see Security and Access Control to Metadata and Data (https://docs-aws.amazon.com/michigan/latest/dg/security-data-access.html). + // about permissions, see Security and Access Control to Metadata and Data (https://docs-aws.amazon.com/lake-formation/latest/dg/security-data-access.html). // // Permissions is a required field Permissions []*string `type:"list" required:"true"` @@ -3425,6 +3530,10 @@ func (s RevokePermissionsOutput) GoString() string { type TableResource struct { _ struct{} `type:"structure"` + // The identifier for the Data Catalog. By default, it is the account ID of + // the caller. + CatalogId *string `min:"1" type:"string"` + // The name of the database for the table. Unique to a Data Catalog. A database // is a set of associated table definitions organized into a logical group. // You can Grant and Revoke database privileges to a principal. @@ -3433,9 +3542,12 @@ type TableResource struct { DatabaseName *string `min:"1" type:"string" required:"true"` // The name of the table. + Name *string `min:"1" type:"string"` + + // A wildcard object representing every table under a database. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // At least one of TableResource$Name or TableResource$TableWildcard is required. + TableWildcard *TableWildcard `type:"structure"` } // String returns the string representation @@ -3451,15 +3563,15 @@ func (s TableResource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TableResource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TableResource"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } if s.DatabaseName == nil { invalidParams.Add(request.NewErrParamRequired("DatabaseName")) } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } @@ -3470,6 +3582,12 @@ func (s *TableResource) Validate() error { return nil } +// SetCatalogId sets the CatalogId field's value. +func (s *TableResource) SetCatalogId(v string) *TableResource { + s.CatalogId = &v + return s +} + // SetDatabaseName sets the DatabaseName field's value. func (s *TableResource) SetDatabaseName(v string) *TableResource { s.DatabaseName = &v @@ -3482,6 +3600,27 @@ func (s *TableResource) SetName(v string) *TableResource { return s } +// SetTableWildcard sets the TableWildcard field's value. +func (s *TableResource) SetTableWildcard(v *TableWildcard) *TableResource { + s.TableWildcard = v + return s +} + +// A wildcard object representing every table under a database. +type TableWildcard struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TableWildcard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableWildcard) GoString() string { + return s.String() +} + // A structure for a table with columns object. This object is only used when // granting a SELECT permission. // @@ -3490,6 +3629,10 @@ func (s *TableResource) SetName(v string) *TableResource { type TableWithColumnsResource struct { _ struct{} `type:"structure"` + // The identifier for the Data Catalog. By default, it is the account ID of + // the caller. + CatalogId *string `min:"1" type:"string"` + // The list of column names for the table. At least one of ColumnNames or ColumnWildcard // is required. ColumnNames []*string `type:"list"` @@ -3501,11 +3644,15 @@ type TableWithColumnsResource struct { // The name of the database for the table with columns resource. Unique to the // Data Catalog. A database is a set of associated table definitions organized // into a logical group. You can Grant and Revoke database privileges to a principal. - DatabaseName *string `min:"1" type:"string"` + // + // DatabaseName is a required field + DatabaseName *string `min:"1" type:"string" required:"true"` // The name of the table resource. A table is a metadata definition that represents // your data. You can Grant and Revoke table privileges to a principal. - Name *string `min:"1" type:"string"` + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation @@ -3521,9 +3668,18 @@ func (s TableWithColumnsResource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *TableWithColumnsResource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "TableWithColumnsResource"} + if s.CatalogId != nil && len(*s.CatalogId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CatalogId", 1)) + } + if s.DatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("DatabaseName")) + } if s.DatabaseName != nil && len(*s.DatabaseName) < 1 { invalidParams.Add(request.NewErrParamMinLen("DatabaseName", 1)) } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } @@ -3534,6 +3690,12 @@ func (s *TableWithColumnsResource) Validate() error { return nil } +// SetCatalogId sets the CatalogId field's value. +func (s *TableWithColumnsResource) SetCatalogId(v string) *TableWithColumnsResource { + s.CatalogId = &v + return s +} + // SetColumnNames sets the ColumnNames field's value. func (s *TableWithColumnsResource) SetColumnNames(v []*string) *TableWithColumnsResource { s.ColumnNames = v @@ -3659,6 +3821,23 @@ const ( ComparisonOperatorBetween = "BETWEEN" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + ComparisonOperatorContains, + ComparisonOperatorNotContains, + ComparisonOperatorBeginsWith, + ComparisonOperatorIn, + ComparisonOperatorBetween, + } +} + const ( // DataLakeResourceTypeCatalog is a DataLakeResourceType enum value DataLakeResourceTypeCatalog = "CATALOG" @@ -3673,6 +3852,16 @@ const ( DataLakeResourceTypeDataLocation = "DATA_LOCATION" ) +// DataLakeResourceType_Values returns all elements of the DataLakeResourceType enum +func DataLakeResourceType_Values() []string { + return []string{ + DataLakeResourceTypeCatalog, + DataLakeResourceTypeDatabase, + DataLakeResourceTypeTable, + DataLakeResourceTypeDataLocation, + } +} + const ( // FieldNameStringResourceArn is a FieldNameString enum value FieldNameStringResourceArn = "RESOURCE_ARN" @@ -3684,6 +3873,15 @@ const ( FieldNameStringLastModified = "LAST_MODIFIED" ) +// FieldNameString_Values returns all elements of the FieldNameString enum +func FieldNameString_Values() []string { + return []string{ + FieldNameStringResourceArn, + FieldNameStringRoleArn, + FieldNameStringLastModified, + } +} + const ( // PermissionAll is a Permission enum value PermissionAll = "ALL" @@ -3703,6 +3901,9 @@ const ( // PermissionInsert is a Permission enum value PermissionInsert = "INSERT" + // PermissionDescribe is a Permission enum value + PermissionDescribe = "DESCRIBE" + // PermissionCreateDatabase is a Permission enum value PermissionCreateDatabase = "CREATE_DATABASE" @@ -3712,3 +3913,19 @@ const ( // PermissionDataLocationAccess is a Permission enum value PermissionDataLocationAccess = "DATA_LOCATION_ACCESS" ) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionAll, + PermissionSelect, + PermissionAlter, + PermissionDrop, + PermissionDelete, + PermissionInsert, + PermissionDescribe, + PermissionCreateDatabase, + PermissionCreateTable, + PermissionDataLocationAccess, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go index 10b8a812a..4d6e49a18 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go index 13531de92..5a3d50023 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -173,10 +173,10 @@ func (c *Lambda) AddPermissionRequest(input *AddPermissionInput) (req *request.R // To grant permission to another account, specify the account ID as the Principal. // For AWS services, the principal is a domain-style identifier defined by the // service, like s3.amazonaws.com or sns.amazonaws.com. For AWS services, you -// can also specify the ARN or owning account of the associated resource as -// the SourceArn or SourceAccount. If you grant permission to a service principal -// without specifying the source, other accounts could potentially configure -// resources in their account to invoke your Lambda function. +// can also specify the ARN of the associated resource as the SourceArn. If +// you grant permission to a service principal without specifying the source, +// other accounts could potentially configure resources in their account to +// invoke your Lambda function. // // This action adds a statement to a resource-based permissions policy for the // function. For more information about function policies, see Lambda Function @@ -387,6 +387,8 @@ func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMapping // // * Using AWS Lambda with Amazon SQS (https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html) // +// * Using AWS Lambda with Amazon MSK (https://docs.aws.amazon.com/lambda/latest/dg/with-msk.html) +// // The following error handling options are only available for stream sources // (DynamoDB and Kinesis): // @@ -397,10 +399,11 @@ func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMapping // Amazon SNS topic. // // * MaximumRecordAgeInSeconds - Discard records older than the specified -// age. +// age. Default -1 (infinite). Minimum 60. Maximum 604800. // // * MaximumRetryAttempts - Discard records after the specified number of -// retries. +// retries. Default -1 (infinite). Minimum 0. Maximum 10000. When infinite, +// failed records will be retried until the record expires. // // * ParallelizationFactor - Process multiple batches from each shard concurrently. // @@ -2374,7 +2377,8 @@ func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output // client, SDK, firewall, proxy, or operating system to allow for long connections // with timeout or keep-alive settings. // -// This operation requires permission for the lambda:InvokeFunction action. +// This operation requires permission for the lambda:InvokeFunction (https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awslambda.html) +// action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2419,6 +2423,20 @@ func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output // specified as part of Lambda function configuration, because the limit for // network interfaces has been reached. // +// * EFSMountConnectivityException +// The function couldn't make a network connection to the configured file system. +// +// * EFSMountFailureException +// The function couldn't mount the configured file system due to a permission +// or configuration issue. +// +// * EFSMountTimeoutException +// The function was able to make a network connection to the configured file +// system, but the mount operation timed out. +// +// * EFSIOException +// An error occured when reading from or writing to a connected file system. +// // * EC2ThrottledException // AWS Lambda was throttled by Amazon EC2 during Lambda function initialization // using the execution role provided for the Lambda function. @@ -4210,7 +4228,7 @@ func (c *Lambda) PutFunctionEventInvokeConfigRequest(input *PutFunctionEventInvo // on a function, version, or alias. If a configuration already exists for a // function, version, or alias, this operation overwrites it. If you exclude // any settings, they are removed. To set one option without affecting existing -// settings for other options, use PutFunctionEventInvokeConfig. +// settings for other options, use UpdateFunctionEventInvokeConfig. // // By default, Lambda retries an asynchronous invocation twice if the function // returns an error. It retains events in a queue for up to six hours. When @@ -4887,10 +4905,11 @@ func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMapping // Amazon SNS topic. // // * MaximumRecordAgeInSeconds - Discard records older than the specified -// age. +// age. Default -1 (infinite). Minimum 60. Maximum 604800. // // * MaximumRetryAttempts - Discard records after the specified number of -// retries. +// retries. Default -1 (infinite). Minimum 0. Maximum 10000. When infinite, +// failed records will be retried until the record expires. // // * ParallelizationFactor - Process multiple batches from each shard concurrently. // @@ -5545,11 +5564,9 @@ type AddPermissionInput struct { // read it. RevisionId *string `type:"string"` - // For AWS services, the ID of the account that owns the resource. Use this - // instead of SourceArn to grant permission to resources that are owned by another - // account (for example, all of an account's Amazon S3 buckets). Or use it together + // For Amazon S3, the ID of the account that owns the resource. Use this together // with SourceArn to ensure that the resource is owned by the specified account. - // For example, an Amazon S3 bucket could be deleted by its owner and recreated + // It is possible for an Amazon S3 bucket to be deleted by its owner and recreated // by another account. SourceAccount *string `type:"string"` @@ -5757,8 +5774,7 @@ func (s *AliasConfiguration) SetRoutingConfig(v *AliasRoutingConfiguration) *Ali type AliasRoutingConfiguration struct { _ struct{} `type:"structure"` - // The name of the second alias, and the percentage of traffic that's routed - // to it. + // The second version, and the percentage of traffic that's routed to it. AdditionalVersionWeights map[string]*float64 `type:"map"` } @@ -5780,8 +5796,8 @@ func (s *AliasRoutingConfiguration) SetAdditionalVersionWeights(v map[string]*fl // You have exceeded your maximum total code size per account. Learn more (https://docs.aws.amazon.com/lambda/latest/dg/limits.html) type CodeStorageExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -5801,17 +5817,17 @@ func (s CodeStorageExceededException) GoString() string { func newErrorCodeStorageExceededException(v protocol.ResponseMetadata) error { return &CodeStorageExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CodeStorageExceededException) Code() string { +func (s *CodeStorageExceededException) Code() string { return "CodeStorageExceededException" } // Message returns the exception's message. -func (s CodeStorageExceededException) Message() string { +func (s *CodeStorageExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5819,22 +5835,22 @@ func (s CodeStorageExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CodeStorageExceededException) OrigErr() error { +func (s *CodeStorageExceededException) OrigErr() error { return nil } -func (s CodeStorageExceededException) Error() string { +func (s *CodeStorageExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s CodeStorageExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CodeStorageExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CodeStorageExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CodeStorageExceededException) RequestID() string { + return s.RespMetadata.RequestID } type CreateAliasInput struct { @@ -5869,7 +5885,7 @@ type CreateAliasInput struct { // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The routing configuration (https://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html) + // The routing configuration (https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html#configuring-alias-routing) // of the alias. RoutingConfig *AliasRoutingConfiguration `type:"structure"` } @@ -5952,6 +5968,8 @@ type CreateEventSourceMappingInput struct { // * Amazon DynamoDB Streams - Default 100. Max 1,000. // // * Amazon Simple Queue Service - Default 10. Max 10. + // + // * Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000. BatchSize *int64 `min:"1" type:"integer"` // (Streams) If the function returns an error, split the batch in two and retry. @@ -5961,7 +5979,8 @@ type CreateEventSourceMappingInput struct { // records. DestinationConfig *DestinationConfig `type:"structure"` - // Disables the event source mapping to pause polling and invocation. + // If true, the event source mapping is active. Set to false to pause polling + // and invocation. Enabled *bool `type:"boolean"` // The Amazon Resource Name (ARN) of the event source. @@ -5972,6 +5991,8 @@ type CreateEventSourceMappingInput struct { // // * Amazon Simple Queue Service - The ARN of the queue. // + // * Amazon Managed Streaming for Apache Kafka - The ARN of the cluster. + // // EventSourceArn is a required field EventSourceArn *string `type:"string" required:"true"` @@ -5993,28 +6014,32 @@ type CreateEventSourceMappingInput struct { // FunctionName is a required field FunctionName *string `min:"1" type:"string" required:"true"` - // The maximum amount of time to gather records before invoking the function, - // in seconds. + // (Streams) The maximum amount of time to gather records before invoking the + // function, in seconds. MaximumBatchingWindowInSeconds *int64 `type:"integer"` - // (Streams) The maximum age of a record that Lambda sends to a function for - // processing. - MaximumRecordAgeInSeconds *int64 `min:"60" type:"integer"` + // (Streams) Discard records older than the specified age. The default value + // is infinite (-1). + MaximumRecordAgeInSeconds *int64 `type:"integer"` - // (Streams) The maximum number of times to retry when the function returns - // an error. + // (Streams) Discard records after the specified number of retries. The default + // value is infinite (-1). When set to infinite (-1), failed records will be + // retried until the record expires. MaximumRetryAttempts *int64 `type:"integer"` // (Streams) The number of batches to process from each shard concurrently. ParallelizationFactor *int64 `min:"1" type:"integer"` // The position in a stream from which to start reading. Required for Amazon - // Kinesis and Amazon DynamoDB Streams sources. AT_TIMESTAMP is only supported - // for Amazon Kinesis streams. + // Kinesis, Amazon DynamoDB, and Amazon MSK Streams sources. AT_TIMESTAMP is + // only supported for Amazon Kinesis streams. StartingPosition *string `type:"string" enum:"EventSourcePosition"` // With StartingPosition set to AT_TIMESTAMP, the time from which to start reading. StartingPositionTimestamp *time.Time `type:"timestamp"` + + // (MSK) The name of the Kafka topic. + Topics []*string `min:"1" type:"list"` } // String returns the string representation @@ -6042,12 +6067,18 @@ func (s *CreateEventSourceMappingInput) Validate() error { if s.FunctionName != nil && len(*s.FunctionName) < 1 { invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) } - if s.MaximumRecordAgeInSeconds != nil && *s.MaximumRecordAgeInSeconds < 60 { - invalidParams.Add(request.NewErrParamMinValue("MaximumRecordAgeInSeconds", 60)) + if s.MaximumRecordAgeInSeconds != nil && *s.MaximumRecordAgeInSeconds < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumRecordAgeInSeconds", -1)) + } + if s.MaximumRetryAttempts != nil && *s.MaximumRetryAttempts < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumRetryAttempts", -1)) } if s.ParallelizationFactor != nil && *s.ParallelizationFactor < 1 { invalidParams.Add(request.NewErrParamMinValue("ParallelizationFactor", 1)) } + if s.Topics != nil && len(s.Topics) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Topics", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6127,6 +6158,12 @@ func (s *CreateEventSourceMappingInput) SetStartingPositionTimestamp(v time.Time return s } +// SetTopics sets the Topics field's value. +func (s *CreateEventSourceMappingInput) SetTopics(v []*string) *CreateEventSourceMappingInput { + s.Topics = v + return s +} + type CreateFunctionInput struct { _ struct{} `type:"structure"` @@ -6146,6 +6183,9 @@ type CreateFunctionInput struct { // Environment variables that are accessible from function code during execution. Environment *Environment `type:"structure"` + // Connection settings for an Amazon EFS file system. + FileSystemConfigs []*FileSystemConfig `type:"list"` + // The name of the Lambda function. // // Name formats @@ -6259,6 +6299,16 @@ func (s *CreateFunctionInput) Validate() error { invalidParams.AddNested("Code", err.(request.ErrInvalidParams)) } } + if s.FileSystemConfigs != nil { + for i, v := range s.FileSystemConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FileSystemConfigs", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6290,6 +6340,12 @@ func (s *CreateFunctionInput) SetEnvironment(v *Environment) *CreateFunctionInpu return s } +// SetFileSystemConfigs sets the FileSystemConfigs field's value. +func (s *CreateFunctionInput) SetFileSystemConfigs(v []*FileSystemConfig) *CreateFunctionInput { + s.FileSystemConfigs = v + return s +} + // SetFunctionName sets the FunctionName field's value. func (s *CreateFunctionInput) SetFunctionName(v string) *CreateFunctionInput { s.FunctionName = &v @@ -6924,8 +6980,8 @@ func (s *DestinationConfig) SetOnSuccess(v *OnSuccess) *DestinationConfig { // Need additional permissions to configure VPC settings. type EC2AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -6944,17 +7000,17 @@ func (s EC2AccessDeniedException) GoString() string { func newErrorEC2AccessDeniedException(v protocol.ResponseMetadata) error { return &EC2AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EC2AccessDeniedException) Code() string { +func (s *EC2AccessDeniedException) Code() string { return "EC2AccessDeniedException" } // Message returns the exception's message. -func (s EC2AccessDeniedException) Message() string { +func (s *EC2AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6962,29 +7018,29 @@ func (s EC2AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EC2AccessDeniedException) OrigErr() error { +func (s *EC2AccessDeniedException) OrigErr() error { return nil } -func (s EC2AccessDeniedException) Error() string { +func (s *EC2AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s EC2AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EC2AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EC2AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *EC2AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Lambda was throttled by Amazon EC2 during Lambda function initialization // using the execution role provided for the Lambda function. type EC2ThrottledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -7003,17 +7059,17 @@ func (s EC2ThrottledException) GoString() string { func newErrorEC2ThrottledException(v protocol.ResponseMetadata) error { return &EC2ThrottledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EC2ThrottledException) Code() string { +func (s *EC2ThrottledException) Code() string { return "EC2ThrottledException" } // Message returns the exception's message. -func (s EC2ThrottledException) Message() string { +func (s *EC2ThrottledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7021,29 +7077,29 @@ func (s EC2ThrottledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EC2ThrottledException) OrigErr() error { +func (s *EC2ThrottledException) OrigErr() error { return nil } -func (s EC2ThrottledException) Error() string { +func (s *EC2ThrottledException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s EC2ThrottledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EC2ThrottledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EC2ThrottledException) RequestID() string { - return s.respMetadata.RequestID +func (s *EC2ThrottledException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Lambda received an unexpected EC2 client exception while setting up for // the Lambda function. type EC2UnexpectedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` EC2ErrorCode *string `type:"string"` @@ -7064,17 +7120,251 @@ func (s EC2UnexpectedException) GoString() string { func newErrorEC2UnexpectedException(v protocol.ResponseMetadata) error { return &EC2UnexpectedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EC2UnexpectedException) Code() string { +func (s *EC2UnexpectedException) Code() string { return "EC2UnexpectedException" } // Message returns the exception's message. -func (s EC2UnexpectedException) Message() string { +func (s *EC2UnexpectedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EC2UnexpectedException) OrigErr() error { + return nil +} + +func (s *EC2UnexpectedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EC2UnexpectedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EC2UnexpectedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// An error occured when reading from or writing to a connected file system. +type EFSIOException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + Type *string `type:"string"` +} + +// String returns the string representation +func (s EFSIOException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSIOException) GoString() string { + return s.String() +} + +func newErrorEFSIOException(v protocol.ResponseMetadata) error { + return &EFSIOException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EFSIOException) Code() string { + return "EFSIOException" +} + +// Message returns the exception's message. +func (s *EFSIOException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EFSIOException) OrigErr() error { + return nil +} + +func (s *EFSIOException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EFSIOException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EFSIOException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The function couldn't make a network connection to the configured file system. +type EFSMountConnectivityException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + Type *string `type:"string"` +} + +// String returns the string representation +func (s EFSMountConnectivityException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSMountConnectivityException) GoString() string { + return s.String() +} + +func newErrorEFSMountConnectivityException(v protocol.ResponseMetadata) error { + return &EFSMountConnectivityException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EFSMountConnectivityException) Code() string { + return "EFSMountConnectivityException" +} + +// Message returns the exception's message. +func (s *EFSMountConnectivityException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EFSMountConnectivityException) OrigErr() error { + return nil +} + +func (s *EFSMountConnectivityException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EFSMountConnectivityException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EFSMountConnectivityException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The function couldn't mount the configured file system due to a permission +// or configuration issue. +type EFSMountFailureException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + Type *string `type:"string"` +} + +// String returns the string representation +func (s EFSMountFailureException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSMountFailureException) GoString() string { + return s.String() +} + +func newErrorEFSMountFailureException(v protocol.ResponseMetadata) error { + return &EFSMountFailureException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EFSMountFailureException) Code() string { + return "EFSMountFailureException" +} + +// Message returns the exception's message. +func (s *EFSMountFailureException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *EFSMountFailureException) OrigErr() error { + return nil +} + +func (s *EFSMountFailureException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *EFSMountFailureException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *EFSMountFailureException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The function was able to make a network connection to the configured file +// system, but the mount operation timed out. +type EFSMountTimeoutException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + Type *string `type:"string"` +} + +// String returns the string representation +func (s EFSMountTimeoutException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EFSMountTimeoutException) GoString() string { + return s.String() +} + +func newErrorEFSMountTimeoutException(v protocol.ResponseMetadata) error { + return &EFSMountTimeoutException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *EFSMountTimeoutException) Code() string { + return "EFSMountTimeoutException" +} + +// Message returns the exception's message. +func (s *EFSMountTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7082,30 +7372,30 @@ func (s EC2UnexpectedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EC2UnexpectedException) OrigErr() error { +func (s *EFSMountTimeoutException) OrigErr() error { return nil } -func (s EC2UnexpectedException) Error() string { +func (s *EFSMountTimeoutException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s EC2UnexpectedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EFSMountTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EC2UnexpectedException) RequestID() string { - return s.respMetadata.RequestID +func (s *EFSMountTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Lambda was not able to create an elastic network interface in the VPC, // specified as part of Lambda function configuration, because the limit for // network interfaces has been reached. type ENILimitReachedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -7124,17 +7414,17 @@ func (s ENILimitReachedException) GoString() string { func newErrorENILimitReachedException(v protocol.ResponseMetadata) error { return &ENILimitReachedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ENILimitReachedException) Code() string { +func (s *ENILimitReachedException) Code() string { return "ENILimitReachedException" } // Message returns the exception's message. -func (s ENILimitReachedException) Message() string { +func (s *ENILimitReachedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7142,22 +7432,22 @@ func (s ENILimitReachedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ENILimitReachedException) OrigErr() error { +func (s *ENILimitReachedException) OrigErr() error { return nil } -func (s ENILimitReachedException) Error() string { +func (s *ENILimitReachedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ENILimitReachedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ENILimitReachedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ENILimitReachedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ENILimitReachedException) RequestID() string { + return s.RespMetadata.RequestID } // A function's environment variable settings. @@ -7279,13 +7569,13 @@ type EventSourceMappingConfiguration struct { // The result of the last AWS Lambda invocation of your Lambda function. LastProcessingResult *string `type:"string"` - // The maximum amount of time to gather records before invoking the function, - // in seconds. + // (Streams) The maximum amount of time to gather records before invoking the + // function, in seconds. MaximumBatchingWindowInSeconds *int64 `type:"integer"` // (Streams) The maximum age of a record that Lambda sends to a function for // processing. - MaximumRecordAgeInSeconds *int64 `min:"60" type:"integer"` + MaximumRecordAgeInSeconds *int64 `type:"integer"` // (Streams) The maximum number of times to retry when the function returns // an error. @@ -7302,6 +7592,9 @@ type EventSourceMappingConfiguration struct { // a user, or by the Lambda service. StateTransitionReason *string `type:"string"` + // (MSK) The name of the Kafka topic. + Topics []*string `min:"1" type:"list"` + // The identifier of the event source mapping. UUID *string `type:"string"` } @@ -7394,12 +7687,73 @@ func (s *EventSourceMappingConfiguration) SetStateTransitionReason(v string) *Ev return s } +// SetTopics sets the Topics field's value. +func (s *EventSourceMappingConfiguration) SetTopics(v []*string) *EventSourceMappingConfiguration { + s.Topics = v + return s +} + // SetUUID sets the UUID field's value. func (s *EventSourceMappingConfiguration) SetUUID(v string) *EventSourceMappingConfiguration { s.UUID = &v return s } +// Details about the connection between a Lambda function and an Amazon EFS +// file system. +type FileSystemConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon EFS access point that provides + // access to the file system. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The path where the function can access the file system, starting with /mnt/. + // + // LocalMountPath is a required field + LocalMountPath *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s FileSystemConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FileSystemConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FileSystemConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FileSystemConfig"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.LocalMountPath == nil { + invalidParams.Add(request.NewErrParamRequired("LocalMountPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *FileSystemConfig) SetArn(v string) *FileSystemConfig { + s.Arn = &v + return s +} + +// SetLocalMountPath sets the LocalMountPath field's value. +func (s *FileSystemConfig) SetLocalMountPath(v string) *FileSystemConfig { + s.LocalMountPath = &v + return s +} + // The code for the Lambda function. You can specify either an object in Amazon // S3, or upload a deployment package directly. type FunctionCode struct { @@ -7527,6 +7881,9 @@ type FunctionConfiguration struct { // The function's environment variables. Environment *EnvironmentResponse `type:"structure"` + // Connection settings for an Amazon EFS file system. + FileSystemConfigs []*FileSystemConfig `type:"list"` + // The function's Amazon Resource Name (ARN). FunctionArn *string `type:"string"` @@ -7583,7 +7940,8 @@ type FunctionConfiguration struct { // you can't invoke or modify the function. StateReasonCode *string `type:"string" enum:"StateReasonCode"` - // The amount of time that Lambda allows a function to run before stopping it. + // The amount of time in seconds that Lambda allows a function to run before + // stopping it. Timeout *int64 `min:"1" type:"integer"` // The function's AWS X-Ray tracing configuration. @@ -7636,6 +7994,12 @@ func (s *FunctionConfiguration) SetEnvironment(v *EnvironmentResponse) *Function return s } +// SetFileSystemConfigs sets the FileSystemConfigs field's value. +func (s *FunctionConfiguration) SetFileSystemConfigs(v []*FileSystemConfig) *FunctionConfiguration { + s.FileSystemConfigs = v + return s +} + // SetFunctionArn sets the FunctionArn field's value. func (s *FunctionConfiguration) SetFunctionArn(v string) *FunctionConfiguration { s.FunctionArn = &v @@ -8973,8 +9337,8 @@ func (s *GetProvisionedConcurrencyConfigOutput) SetStatusReason(v string) *GetPr // One of the parameters in the request is invalid. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -8995,17 +9359,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9013,28 +9377,28 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // The request body could not be parsed as JSON. type InvalidRequestContentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -9055,17 +9419,17 @@ func (s InvalidRequestContentException) GoString() string { func newErrorInvalidRequestContentException(v protocol.ResponseMetadata) error { return &InvalidRequestContentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestContentException) Code() string { +func (s *InvalidRequestContentException) Code() string { return "InvalidRequestContentException" } // Message returns the exception's message. -func (s InvalidRequestContentException) Message() string { +func (s *InvalidRequestContentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9073,28 +9437,28 @@ func (s InvalidRequestContentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestContentException) OrigErr() error { +func (s *InvalidRequestContentException) OrigErr() error { return nil } -func (s InvalidRequestContentException) Error() string { +func (s *InvalidRequestContentException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestContentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestContentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestContentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestContentException) RequestID() string { + return s.RespMetadata.RequestID } // The runtime or runtime version specified is not supported. type InvalidRuntimeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9113,17 +9477,17 @@ func (s InvalidRuntimeException) GoString() string { func newErrorInvalidRuntimeException(v protocol.ResponseMetadata) error { return &InvalidRuntimeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRuntimeException) Code() string { +func (s *InvalidRuntimeException) Code() string { return "InvalidRuntimeException" } // Message returns the exception's message. -func (s InvalidRuntimeException) Message() string { +func (s *InvalidRuntimeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9131,29 +9495,29 @@ func (s InvalidRuntimeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRuntimeException) OrigErr() error { +func (s *InvalidRuntimeException) OrigErr() error { return nil } -func (s InvalidRuntimeException) Error() string { +func (s *InvalidRuntimeException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRuntimeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRuntimeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRuntimeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRuntimeException) RequestID() string { + return s.RespMetadata.RequestID } // The Security Group ID provided in the Lambda function VPC configuration is // invalid. type InvalidSecurityGroupIDException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9172,17 +9536,17 @@ func (s InvalidSecurityGroupIDException) GoString() string { func newErrorInvalidSecurityGroupIDException(v protocol.ResponseMetadata) error { return &InvalidSecurityGroupIDException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSecurityGroupIDException) Code() string { +func (s *InvalidSecurityGroupIDException) Code() string { return "InvalidSecurityGroupIDException" } // Message returns the exception's message. -func (s InvalidSecurityGroupIDException) Message() string { +func (s *InvalidSecurityGroupIDException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9190,28 +9554,28 @@ func (s InvalidSecurityGroupIDException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSecurityGroupIDException) OrigErr() error { +func (s *InvalidSecurityGroupIDException) OrigErr() error { return nil } -func (s InvalidSecurityGroupIDException) Error() string { +func (s *InvalidSecurityGroupIDException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSecurityGroupIDException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSecurityGroupIDException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSecurityGroupIDException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSecurityGroupIDException) RequestID() string { + return s.RespMetadata.RequestID } // The Subnet ID provided in the Lambda function VPC configuration is invalid. type InvalidSubnetIDException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9230,17 +9594,17 @@ func (s InvalidSubnetIDException) GoString() string { func newErrorInvalidSubnetIDException(v protocol.ResponseMetadata) error { return &InvalidSubnetIDException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSubnetIDException) Code() string { +func (s *InvalidSubnetIDException) Code() string { return "InvalidSubnetIDException" } // Message returns the exception's message. -func (s InvalidSubnetIDException) Message() string { +func (s *InvalidSubnetIDException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9248,28 +9612,28 @@ func (s InvalidSubnetIDException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSubnetIDException) OrigErr() error { +func (s *InvalidSubnetIDException) OrigErr() error { return nil } -func (s InvalidSubnetIDException) Error() string { +func (s *InvalidSubnetIDException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSubnetIDException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSubnetIDException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSubnetIDException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSubnetIDException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Lambda could not unzip the deployment package. type InvalidZipFileException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9288,17 +9652,17 @@ func (s InvalidZipFileException) GoString() string { func newErrorInvalidZipFileException(v protocol.ResponseMetadata) error { return &InvalidZipFileException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidZipFileException) Code() string { +func (s *InvalidZipFileException) Code() string { return "InvalidZipFileException" } // Message returns the exception's message. -func (s InvalidZipFileException) Message() string { +func (s *InvalidZipFileException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9306,22 +9670,22 @@ func (s InvalidZipFileException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidZipFileException) OrigErr() error { +func (s *InvalidZipFileException) OrigErr() error { return nil } -func (s InvalidZipFileException) Error() string { +func (s *InvalidZipFileException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidZipFileException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidZipFileException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidZipFileException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidZipFileException) RequestID() string { + return s.RespMetadata.RequestID } // Deprecated: InvokeAsyncInput has been deprecated @@ -9598,8 +9962,8 @@ func (s *InvokeOutput) SetStatusCode(v int64) *InvokeOutput { // Lambda was unable to decrypt the environment variables because KMS access // was denied. Check the Lambda function's KMS permissions. type KMSAccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9618,17 +9982,17 @@ func (s KMSAccessDeniedException) GoString() string { func newErrorKMSAccessDeniedException(v protocol.ResponseMetadata) error { return &KMSAccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSAccessDeniedException) Code() string { +func (s *KMSAccessDeniedException) Code() string { return "KMSAccessDeniedException" } // Message returns the exception's message. -func (s KMSAccessDeniedException) Message() string { +func (s *KMSAccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9636,29 +10000,29 @@ func (s KMSAccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSAccessDeniedException) OrigErr() error { +func (s *KMSAccessDeniedException) OrigErr() error { return nil } -func (s KMSAccessDeniedException) Error() string { +func (s *KMSAccessDeniedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSAccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSAccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // Lambda was unable to decrypt the environment variables because the KMS key // used is disabled. Check the Lambda function's KMS key settings. type KMSDisabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9677,17 +10041,17 @@ func (s KMSDisabledException) GoString() string { func newErrorKMSDisabledException(v protocol.ResponseMetadata) error { return &KMSDisabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSDisabledException) Code() string { +func (s *KMSDisabledException) Code() string { return "KMSDisabledException" } // Message returns the exception's message. -func (s KMSDisabledException) Message() string { +func (s *KMSDisabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9695,29 +10059,29 @@ func (s KMSDisabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSDisabledException) OrigErr() error { +func (s *KMSDisabledException) OrigErr() error { return nil } -func (s KMSDisabledException) Error() string { +func (s *KMSDisabledException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSDisabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSDisabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSDisabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSDisabledException) RequestID() string { + return s.RespMetadata.RequestID } // Lambda was unable to decrypt the environment variables because the KMS key // used is in an invalid state for Decrypt. Check the function's KMS key settings. type KMSInvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9736,17 +10100,17 @@ func (s KMSInvalidStateException) GoString() string { func newErrorKMSInvalidStateException(v protocol.ResponseMetadata) error { return &KMSInvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSInvalidStateException) Code() string { +func (s *KMSInvalidStateException) Code() string { return "KMSInvalidStateException" } // Message returns the exception's message. -func (s KMSInvalidStateException) Message() string { +func (s *KMSInvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9754,29 +10118,29 @@ func (s KMSInvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSInvalidStateException) OrigErr() error { +func (s *KMSInvalidStateException) OrigErr() error { return nil } -func (s KMSInvalidStateException) Error() string { +func (s *KMSInvalidStateException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSInvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSInvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSInvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSInvalidStateException) RequestID() string { + return s.RespMetadata.RequestID } // Lambda was unable to decrypt the environment variables because the KMS key // was not found. Check the function's KMS key settings. type KMSNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -9795,17 +10159,17 @@ func (s KMSNotFoundException) GoString() string { func newErrorKMSNotFoundException(v protocol.ResponseMetadata) error { return &KMSNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s KMSNotFoundException) Code() string { +func (s *KMSNotFoundException) Code() string { return "KMSNotFoundException" } // Message returns the exception's message. -func (s KMSNotFoundException) Message() string { +func (s *KMSNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9813,22 +10177,22 @@ func (s KMSNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s KMSNotFoundException) OrigErr() error { +func (s *KMSNotFoundException) OrigErr() error { return nil } -func (s KMSNotFoundException) Error() string { +func (s *KMSNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s KMSNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *KMSNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s KMSNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *KMSNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An AWS Lambda layer (https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html). @@ -10219,6 +10583,8 @@ type ListEventSourceMappingsInput struct { // * Amazon DynamoDB Streams - The ARN of the stream. // // * Amazon Simple Queue Service - The ARN of the queue. + // + // * Amazon Managed Streaming for Apache Kafka - The ARN of the cluster. EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` // The name of the Lambda function. @@ -11048,8 +11414,8 @@ func (s *OnSuccess) SetDestination(v string) *OnSuccess { // The permissions policy for the resource is too large. Learn more (https://docs.aws.amazon.com/lambda/latest/dg/limits.html) type PolicyLengthExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -11068,17 +11434,17 @@ func (s PolicyLengthExceededException) GoString() string { func newErrorPolicyLengthExceededException(v protocol.ResponseMetadata) error { return &PolicyLengthExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyLengthExceededException) Code() string { +func (s *PolicyLengthExceededException) Code() string { return "PolicyLengthExceededException" } // Message returns the exception's message. -func (s PolicyLengthExceededException) Message() string { +func (s *PolicyLengthExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11086,30 +11452,30 @@ func (s PolicyLengthExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyLengthExceededException) OrigErr() error { +func (s *PolicyLengthExceededException) OrigErr() error { return nil } -func (s PolicyLengthExceededException) Error() string { +func (s *PolicyLengthExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyLengthExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyLengthExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyLengthExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyLengthExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The RevisionId provided does not match the latest RevisionId for the Lambda // function or alias. Call the GetFunction or the GetAlias API to retrieve the // latest RevisionId for your resource. type PreconditionFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -11130,17 +11496,17 @@ func (s PreconditionFailedException) GoString() string { func newErrorPreconditionFailedException(v protocol.ResponseMetadata) error { return &PreconditionFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PreconditionFailedException) Code() string { +func (s *PreconditionFailedException) Code() string { return "PreconditionFailedException" } // Message returns the exception's message. -func (s PreconditionFailedException) Message() string { +func (s *PreconditionFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11148,22 +11514,22 @@ func (s PreconditionFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PreconditionFailedException) OrigErr() error { +func (s *PreconditionFailedException) OrigErr() error { return nil } -func (s PreconditionFailedException) Error() string { +func (s *PreconditionFailedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s PreconditionFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PreconditionFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PreconditionFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PreconditionFailedException) RequestID() string { + return s.RespMetadata.RequestID } // Details about the provisioned concurrency configuration for a function alias @@ -11249,8 +11615,8 @@ func (s *ProvisionedConcurrencyConfigListItem) SetStatusReason(v string) *Provis // The specified configuration does not exist. type ProvisionedConcurrencyConfigNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -11269,17 +11635,17 @@ func (s ProvisionedConcurrencyConfigNotFoundException) GoString() string { func newErrorProvisionedConcurrencyConfigNotFoundException(v protocol.ResponseMetadata) error { return &ProvisionedConcurrencyConfigNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ProvisionedConcurrencyConfigNotFoundException) Code() string { +func (s *ProvisionedConcurrencyConfigNotFoundException) Code() string { return "ProvisionedConcurrencyConfigNotFoundException" } // Message returns the exception's message. -func (s ProvisionedConcurrencyConfigNotFoundException) Message() string { +func (s *ProvisionedConcurrencyConfigNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11287,22 +11653,22 @@ func (s ProvisionedConcurrencyConfigNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ProvisionedConcurrencyConfigNotFoundException) OrigErr() error { +func (s *ProvisionedConcurrencyConfigNotFoundException) OrigErr() error { return nil } -func (s ProvisionedConcurrencyConfigNotFoundException) Error() string { +func (s *ProvisionedConcurrencyConfigNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ProvisionedConcurrencyConfigNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ProvisionedConcurrencyConfigNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ProvisionedConcurrencyConfigNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ProvisionedConcurrencyConfigNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type PublishLayerVersionInput struct { @@ -12199,8 +12565,8 @@ func (s RemovePermissionOutput) GoString() string { // The request payload exceeded the Invoke request body JSON input limit. For // more information, see Limits (https://docs.aws.amazon.com/lambda/latest/dg/limits.html). type RequestTooLargeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -12219,17 +12585,17 @@ func (s RequestTooLargeException) GoString() string { func newErrorRequestTooLargeException(v protocol.ResponseMetadata) error { return &RequestTooLargeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestTooLargeException) Code() string { +func (s *RequestTooLargeException) Code() string { return "RequestTooLargeException" } // Message returns the exception's message. -func (s RequestTooLargeException) Message() string { +func (s *RequestTooLargeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12237,28 +12603,28 @@ func (s RequestTooLargeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestTooLargeException) OrigErr() error { +func (s *RequestTooLargeException) OrigErr() error { return nil } -func (s RequestTooLargeException) Error() string { +func (s *RequestTooLargeException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestTooLargeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestTooLargeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestTooLargeException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestTooLargeException) RequestID() string { + return s.RespMetadata.RequestID } // The resource already exists, or another operation is in progress. type ResourceConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -12279,17 +12645,17 @@ func (s ResourceConflictException) GoString() string { func newErrorResourceConflictException(v protocol.ResponseMetadata) error { return &ResourceConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceConflictException) Code() string { +func (s *ResourceConflictException) Code() string { return "ResourceConflictException" } // Message returns the exception's message. -func (s ResourceConflictException) Message() string { +func (s *ResourceConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12297,30 +12663,30 @@ func (s ResourceConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceConflictException) OrigErr() error { +func (s *ResourceConflictException) OrigErr() error { return nil } -func (s ResourceConflictException) Error() string { +func (s *ResourceConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceConflictException) RequestID() string { + return s.RespMetadata.RequestID } // The operation conflicts with the resource's availability. For example, you // attempted to update an EventSource Mapping in CREATING, or tried to delete // a EventSource mapping currently in the UPDATING state. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -12339,17 +12705,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12357,28 +12723,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The resource specified in the request does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -12397,17 +12763,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12415,29 +12781,29 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The function is inactive and its VPC connection is no longer available. Wait // for the VPC connection to reestablish and try again. type ResourceNotReadyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -12458,17 +12824,17 @@ func (s ResourceNotReadyException) GoString() string { func newErrorResourceNotReadyException(v protocol.ResponseMetadata) error { return &ResourceNotReadyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotReadyException) Code() string { +func (s *ResourceNotReadyException) Code() string { return "ResourceNotReadyException" } // Message returns the exception's message. -func (s ResourceNotReadyException) Message() string { +func (s *ResourceNotReadyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12476,28 +12842,28 @@ func (s ResourceNotReadyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotReadyException) OrigErr() error { +func (s *ResourceNotReadyException) OrigErr() error { return nil } -func (s ResourceNotReadyException) Error() string { +func (s *ResourceNotReadyException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotReadyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotReadyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotReadyException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotReadyException) RequestID() string { + return s.RespMetadata.RequestID } // The AWS Lambda service encountered an internal error. type ServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -12516,17 +12882,17 @@ func (s ServiceException) GoString() string { func newErrorServiceException(v protocol.ResponseMetadata) error { return &ServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceException) Code() string { +func (s *ServiceException) Code() string { return "ServiceException" } // Message returns the exception's message. -func (s ServiceException) Message() string { +func (s *ServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12534,29 +12900,29 @@ func (s ServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceException) OrigErr() error { +func (s *ServiceException) OrigErr() error { return nil } -func (s ServiceException) Error() string { +func (s *ServiceException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Lambda was not able to set up VPC access for the Lambda function because // one or more configured subnets has no available IP addresses. type SubnetIPAddressLimitReachedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -12575,17 +12941,17 @@ func (s SubnetIPAddressLimitReachedException) GoString() string { func newErrorSubnetIPAddressLimitReachedException(v protocol.ResponseMetadata) error { return &SubnetIPAddressLimitReachedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubnetIPAddressLimitReachedException) Code() string { +func (s *SubnetIPAddressLimitReachedException) Code() string { return "SubnetIPAddressLimitReachedException" } // Message returns the exception's message. -func (s SubnetIPAddressLimitReachedException) Message() string { +func (s *SubnetIPAddressLimitReachedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12593,22 +12959,22 @@ func (s SubnetIPAddressLimitReachedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubnetIPAddressLimitReachedException) OrigErr() error { +func (s *SubnetIPAddressLimitReachedException) OrigErr() error { return nil } -func (s SubnetIPAddressLimitReachedException) Error() string { +func (s *SubnetIPAddressLimitReachedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s SubnetIPAddressLimitReachedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubnetIPAddressLimitReachedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubnetIPAddressLimitReachedException) RequestID() string { - return s.respMetadata.RequestID +func (s *SubnetIPAddressLimitReachedException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -12682,8 +13048,8 @@ func (s TagResourceOutput) GoString() string { // The request throughput limit was exceeded. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -12707,17 +13073,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12725,22 +13091,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // The function's AWS X-Ray tracing configuration. To sample and record incoming @@ -12794,8 +13160,8 @@ func (s *TracingConfigResponse) SetMode(v string) *TracingConfigResponse { // The content type of the Invoke request body is not JSON. type UnsupportedMediaTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -12814,17 +13180,17 @@ func (s UnsupportedMediaTypeException) GoString() string { func newErrorUnsupportedMediaTypeException(v protocol.ResponseMetadata) error { return &UnsupportedMediaTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedMediaTypeException) Code() string { +func (s *UnsupportedMediaTypeException) Code() string { return "UnsupportedMediaTypeException" } // Message returns the exception's message. -func (s UnsupportedMediaTypeException) Message() string { +func (s *UnsupportedMediaTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12832,22 +13198,22 @@ func (s UnsupportedMediaTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedMediaTypeException) OrigErr() error { +func (s *UnsupportedMediaTypeException) OrigErr() error { return nil } -func (s UnsupportedMediaTypeException) Error() string { +func (s *UnsupportedMediaTypeException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedMediaTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedMediaTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedMediaTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedMediaTypeException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -12954,7 +13320,7 @@ type UpdateAliasInput struct { // read it. RevisionId *string `type:"string"` - // The routing configuration (https://docs.aws.amazon.com/lambda/latest/dg/lambda-traffic-shifting-using-aliases.html) + // The routing configuration (https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html#configuring-alias-routing) // of the alias. RoutingConfig *AliasRoutingConfiguration `type:"structure"` } @@ -13040,6 +13406,8 @@ type UpdateEventSourceMappingInput struct { // * Amazon DynamoDB Streams - Default 100. Max 1,000. // // * Amazon Simple Queue Service - Default 10. Max 10. + // + // * Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000. BatchSize *int64 `min:"1" type:"integer"` // (Streams) If the function returns an error, split the batch in two and retry. @@ -13049,7 +13417,8 @@ type UpdateEventSourceMappingInput struct { // records. DestinationConfig *DestinationConfig `type:"structure"` - // Disables the event source mapping to pause polling and invocation. + // If true, the event source mapping is active. Set to false to pause polling + // and invocation. Enabled *bool `type:"boolean"` // The name of the Lambda function. @@ -13068,16 +13437,17 @@ type UpdateEventSourceMappingInput struct { // function name, it's limited to 64 characters in length. FunctionName *string `min:"1" type:"string"` - // The maximum amount of time to gather records before invoking the function, - // in seconds. + // (Streams) The maximum amount of time to gather records before invoking the + // function, in seconds. MaximumBatchingWindowInSeconds *int64 `type:"integer"` - // (Streams) The maximum age of a record that Lambda sends to a function for - // processing. - MaximumRecordAgeInSeconds *int64 `min:"60" type:"integer"` + // (Streams) Discard records older than the specified age. The default value + // is infinite (-1). + MaximumRecordAgeInSeconds *int64 `type:"integer"` - // (Streams) The maximum number of times to retry when the function returns - // an error. + // (Streams) Discard records after the specified number of retries. The default + // value is infinite (-1). When set to infinite (-1), failed records will be + // retried until the record expires. MaximumRetryAttempts *int64 `type:"integer"` // (Streams) The number of batches to process from each shard concurrently. @@ -13108,8 +13478,11 @@ func (s *UpdateEventSourceMappingInput) Validate() error { if s.FunctionName != nil && len(*s.FunctionName) < 1 { invalidParams.Add(request.NewErrParamMinLen("FunctionName", 1)) } - if s.MaximumRecordAgeInSeconds != nil && *s.MaximumRecordAgeInSeconds < 60 { - invalidParams.Add(request.NewErrParamMinValue("MaximumRecordAgeInSeconds", 60)) + if s.MaximumRecordAgeInSeconds != nil && *s.MaximumRecordAgeInSeconds < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumRecordAgeInSeconds", -1)) + } + if s.MaximumRetryAttempts != nil && *s.MaximumRetryAttempts < -1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumRetryAttempts", -1)) } if s.ParallelizationFactor != nil && *s.ParallelizationFactor < 1 { invalidParams.Add(request.NewErrParamMinValue("ParallelizationFactor", 1)) @@ -13333,6 +13706,9 @@ type UpdateFunctionConfigurationInput struct { // Environment variables that are accessible from function code during execution. Environment *Environment `type:"structure"` + // Connection settings for an Amazon EFS file system. + FileSystemConfigs []*FileSystemConfig `type:"list"` + // The name of the Lambda function. // // Name formats @@ -13421,6 +13797,16 @@ func (s *UpdateFunctionConfigurationInput) Validate() error { if s.Timeout != nil && *s.Timeout < 1 { invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) } + if s.FileSystemConfigs != nil { + for i, v := range s.FileSystemConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FileSystemConfigs", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -13446,6 +13832,12 @@ func (s *UpdateFunctionConfigurationInput) SetEnvironment(v *Environment) *Updat return s } +// SetFileSystemConfigs sets the FileSystemConfigs field's value. +func (s *UpdateFunctionConfigurationInput) SetFileSystemConfigs(v []*FileSystemConfig) *UpdateFunctionConfigurationInput { + s.FileSystemConfigs = v + return s +} + // SetFunctionName sets the FunctionName field's value. func (s *UpdateFunctionConfigurationInput) SetFunctionName(v string) *UpdateFunctionConfigurationInput { s.FunctionName = &v @@ -13773,11 +14165,27 @@ const ( EventSourcePositionAtTimestamp = "AT_TIMESTAMP" ) +// EventSourcePosition_Values returns all elements of the EventSourcePosition enum +func EventSourcePosition_Values() []string { + return []string{ + EventSourcePositionTrimHorizon, + EventSourcePositionLatest, + EventSourcePositionAtTimestamp, + } +} + const ( // FunctionVersionAll is a FunctionVersion enum value FunctionVersionAll = "ALL" ) +// FunctionVersion_Values returns all elements of the FunctionVersion enum +func FunctionVersion_Values() []string { + return []string{ + FunctionVersionAll, + } +} + const ( // InvocationTypeEvent is a InvocationType enum value InvocationTypeEvent = "Event" @@ -13789,6 +14197,15 @@ const ( InvocationTypeDryRun = "DryRun" ) +// InvocationType_Values returns all elements of the InvocationType enum +func InvocationType_Values() []string { + return []string{ + InvocationTypeEvent, + InvocationTypeRequestResponse, + InvocationTypeDryRun, + } +} + const ( // LastUpdateStatusSuccessful is a LastUpdateStatus enum value LastUpdateStatusSuccessful = "Successful" @@ -13800,6 +14217,15 @@ const ( LastUpdateStatusInProgress = "InProgress" ) +// LastUpdateStatus_Values returns all elements of the LastUpdateStatus enum +func LastUpdateStatus_Values() []string { + return []string{ + LastUpdateStatusSuccessful, + LastUpdateStatusFailed, + LastUpdateStatusInProgress, + } +} + const ( // LastUpdateStatusReasonCodeEniLimitExceeded is a LastUpdateStatusReasonCode enum value LastUpdateStatusReasonCodeEniLimitExceeded = "EniLimitExceeded" @@ -13823,6 +14249,19 @@ const ( LastUpdateStatusReasonCodeInvalidSecurityGroup = "InvalidSecurityGroup" ) +// LastUpdateStatusReasonCode_Values returns all elements of the LastUpdateStatusReasonCode enum +func LastUpdateStatusReasonCode_Values() []string { + return []string{ + LastUpdateStatusReasonCodeEniLimitExceeded, + LastUpdateStatusReasonCodeInsufficientRolePermissions, + LastUpdateStatusReasonCodeInvalidConfiguration, + LastUpdateStatusReasonCodeInternalError, + LastUpdateStatusReasonCodeSubnetOutOfIpaddresses, + LastUpdateStatusReasonCodeInvalidSubnet, + LastUpdateStatusReasonCodeInvalidSecurityGroup, + } +} + const ( // LogTypeNone is a LogType enum value LogTypeNone = "None" @@ -13831,6 +14270,14 @@ const ( LogTypeTail = "Tail" ) +// LogType_Values returns all elements of the LogType enum +func LogType_Values() []string { + return []string{ + LogTypeNone, + LogTypeTail, + } +} + const ( // ProvisionedConcurrencyStatusEnumInProgress is a ProvisionedConcurrencyStatusEnum enum value ProvisionedConcurrencyStatusEnumInProgress = "IN_PROGRESS" @@ -13842,6 +14289,15 @@ const ( ProvisionedConcurrencyStatusEnumFailed = "FAILED" ) +// ProvisionedConcurrencyStatusEnum_Values returns all elements of the ProvisionedConcurrencyStatusEnum enum +func ProvisionedConcurrencyStatusEnum_Values() []string { + return []string{ + ProvisionedConcurrencyStatusEnumInProgress, + ProvisionedConcurrencyStatusEnumReady, + ProvisionedConcurrencyStatusEnumFailed, + } +} + const ( // RuntimeNodejs is a Runtime enum value RuntimeNodejs = "nodejs" @@ -13864,6 +14320,9 @@ const ( // RuntimeJava8 is a Runtime enum value RuntimeJava8 = "java8" + // RuntimeJava8Al2 is a Runtime enum value + RuntimeJava8Al2 = "java8.al2" + // RuntimeJava11 is a Runtime enum value RuntimeJava11 = "java11" @@ -13888,6 +14347,9 @@ const ( // RuntimeDotnetcore21 is a Runtime enum value RuntimeDotnetcore21 = "dotnetcore2.1" + // RuntimeDotnetcore31 is a Runtime enum value + RuntimeDotnetcore31 = "dotnetcore3.1" + // RuntimeNodejs43Edge is a Runtime enum value RuntimeNodejs43Edge = "nodejs4.3-edge" @@ -13902,8 +14364,40 @@ const ( // RuntimeProvided is a Runtime enum value RuntimeProvided = "provided" + + // RuntimeProvidedAl2 is a Runtime enum value + RuntimeProvidedAl2 = "provided.al2" ) +// Runtime_Values returns all elements of the Runtime enum +func Runtime_Values() []string { + return []string{ + RuntimeNodejs, + RuntimeNodejs43, + RuntimeNodejs610, + RuntimeNodejs810, + RuntimeNodejs10X, + RuntimeNodejs12X, + RuntimeJava8, + RuntimeJava8Al2, + RuntimeJava11, + RuntimePython27, + RuntimePython36, + RuntimePython37, + RuntimePython38, + RuntimeDotnetcore10, + RuntimeDotnetcore20, + RuntimeDotnetcore21, + RuntimeDotnetcore31, + RuntimeNodejs43Edge, + RuntimeGo1X, + RuntimeRuby25, + RuntimeRuby27, + RuntimeProvided, + RuntimeProvidedAl2, + } +} + const ( // StatePending is a State enum value StatePending = "Pending" @@ -13918,6 +14412,16 @@ const ( StateFailed = "Failed" ) +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StatePending, + StateActive, + StateInactive, + StateFailed, + } +} + const ( // StateReasonCodeIdle is a StateReasonCode enum value StateReasonCodeIdle = "Idle" @@ -13950,6 +14454,22 @@ const ( StateReasonCodeInvalidSecurityGroup = "InvalidSecurityGroup" ) +// StateReasonCode_Values returns all elements of the StateReasonCode enum +func StateReasonCode_Values() []string { + return []string{ + StateReasonCodeIdle, + StateReasonCodeCreating, + StateReasonCodeRestoring, + StateReasonCodeEniLimitExceeded, + StateReasonCodeInsufficientRolePermissions, + StateReasonCodeInvalidConfiguration, + StateReasonCodeInternalError, + StateReasonCodeSubnetOutOfIpaddresses, + StateReasonCodeInvalidSubnet, + StateReasonCodeInvalidSecurityGroup, + } +} + const ( // ThrottleReasonConcurrentInvocationLimitExceeded is a ThrottleReason enum value ThrottleReasonConcurrentInvocationLimitExceeded = "ConcurrentInvocationLimitExceeded" @@ -13967,6 +14487,17 @@ const ( ThrottleReasonCallerRateLimitExceeded = "CallerRateLimitExceeded" ) +// ThrottleReason_Values returns all elements of the ThrottleReason enum +func ThrottleReason_Values() []string { + return []string{ + ThrottleReasonConcurrentInvocationLimitExceeded, + ThrottleReasonFunctionInvocationRateLimitExceeded, + ThrottleReasonReservedFunctionConcurrentInvocationLimitExceeded, + ThrottleReasonReservedFunctionInvocationRateLimitExceeded, + ThrottleReasonCallerRateLimitExceeded, + } +} + const ( // TracingModeActive is a TracingMode enum value TracingModeActive = "Active" @@ -13974,3 +14505,11 @@ const ( // TracingModePassThrough is a TracingMode enum value TracingModePassThrough = "PassThrough" ) + +// TracingMode_Values returns all elements of the TracingMode enum +func TracingMode_Values() []string { + return []string{ + TracingModeActive, + TracingModePassThrough, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/errors.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/errors.go index 739daeca0..4cee20127 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/errors.go @@ -34,6 +34,32 @@ const ( // the Lambda function. ErrCodeEC2UnexpectedException = "EC2UnexpectedException" + // ErrCodeEFSIOException for service response error code + // "EFSIOException". + // + // An error occured when reading from or writing to a connected file system. + ErrCodeEFSIOException = "EFSIOException" + + // ErrCodeEFSMountConnectivityException for service response error code + // "EFSMountConnectivityException". + // + // The function couldn't make a network connection to the configured file system. + ErrCodeEFSMountConnectivityException = "EFSMountConnectivityException" + + // ErrCodeEFSMountFailureException for service response error code + // "EFSMountFailureException". + // + // The function couldn't mount the configured file system due to a permission + // or configuration issue. + ErrCodeEFSMountFailureException = "EFSMountFailureException" + + // ErrCodeEFSMountTimeoutException for service response error code + // "EFSMountTimeoutException". + // + // The function was able to make a network connection to the configured file + // system, but the mount operation timed out. + ErrCodeEFSMountTimeoutException = "EFSMountTimeoutException" + // ErrCodeENILimitReachedException for service response error code // "ENILimitReachedException". // @@ -192,6 +218,10 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "EC2AccessDeniedException": newErrorEC2AccessDeniedException, "EC2ThrottledException": newErrorEC2ThrottledException, "EC2UnexpectedException": newErrorEC2UnexpectedException, + "EFSIOException": newErrorEFSIOException, + "EFSMountConnectivityException": newErrorEFSMountConnectivityException, + "EFSMountFailureException": newErrorEFSMountFailureException, + "EFSMountTimeoutException": newErrorEFSMountTimeoutException, "ENILimitReachedException": newErrorENILimitReachedException, "InvalidParameterValueException": newErrorInvalidParameterValueException, "InvalidRequestContentException": newErrorInvalidRequestContentException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go index 8b7ea172a..20a8ee892 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go index 5be4ae6e2..17504cf41 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/api.go @@ -4545,8 +4545,8 @@ func (c *LexModelBuildingService) UntagResourceWithContext(ctx aws.Context, inpu // The request is not well formed. For example, a value is invalid or a required // field is missing. Check the field values, and try again. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4563,17 +4563,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4581,22 +4581,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Provides information about a bot alias. @@ -5025,8 +5025,8 @@ func (s *CodeHook) SetUri(v string) *CodeHook { // There was a conflict processing the request. Try your request again. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5043,17 +5043,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5061,22 +5061,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Provides the settings needed for conversation logs. @@ -5242,7 +5242,7 @@ func (s *CreateBotVersionInput) SetName(v string) *CreateBotVersionInput { type CreateBotVersionOutput struct { _ struct{} `type:"structure"` - // The message that Amazon Lex uses to abort a conversation. For more information, + // The message that Amazon Lex uses to cancel a conversation. For more information, // see PutBot. AbortStatement *Statement `locationName:"abortStatement" type:"structure"` @@ -5288,6 +5288,10 @@ type CreateBotVersionOutput struct { // Comprehend for sentiment analysis. DetectSentiment *bool `locationName:"detectSentiment" type:"boolean"` + // Indicates whether the bot uses accuracy improvements. true indicates that + // the bot is using the improvements, otherwise, false. + EnableModelImprovements *bool `locationName:"enableModelImprovements" type:"boolean"` + // If status is FAILED, Amazon Lex provides the reason that it failed to build // the bot. FailureReason *string `locationName:"failureReason" type:"string"` @@ -5374,6 +5378,12 @@ func (s *CreateBotVersionOutput) SetDetectSentiment(v bool) *CreateBotVersionOut return s } +// SetEnableModelImprovements sets the EnableModelImprovements field's value. +func (s *CreateBotVersionOutput) SetEnableModelImprovements(v bool) *CreateBotVersionOutput { + s.EnableModelImprovements = &v + return s +} + // SetFailureReason sets the FailureReason field's value. func (s *CreateBotVersionOutput) SetFailureReason(v string) *CreateBotVersionOutput { s.FailureReason = &v @@ -5513,6 +5523,10 @@ type CreateIntentVersionOutput struct { // Describes how the intent is fulfilled. FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` + // Configuration information, if any, for connecting an Amazon Kendra index + // with the AMAZON.KendraSearchIntent intent. + KendraConfiguration *KendraConfiguration `locationName:"kendraConfiguration" type:"structure"` + // The date that the intent was updated. LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp"` @@ -5595,6 +5609,12 @@ func (s *CreateIntentVersionOutput) SetFulfillmentActivity(v *FulfillmentActivit return s } +// SetKendraConfiguration sets the KendraConfiguration field's value. +func (s *CreateIntentVersionOutput) SetKendraConfiguration(v *KendraConfiguration) *CreateIntentVersionOutput { + s.KendraConfiguration = v + return s +} + // SetLastUpdatedDate sets the LastUpdatedDate field's value. func (s *CreateIntentVersionOutput) SetLastUpdatedDate(v time.Time) *CreateIntentVersionOutput { s.LastUpdatedDate = &v @@ -7304,6 +7324,10 @@ type GetBotOutput struct { // sentiment analysis. DetectSentiment *bool `locationName:"detectSentiment" type:"boolean"` + // Indicates whether the bot uses accuracy improvements. true indicates that + // the bot is using the improvements, otherwise, false. + EnableModelImprovements *bool `locationName:"enableModelImprovements" type:"boolean"` + // If status is FAILED, Amazon Lex explains why it failed to build the bot. FailureReason *string `locationName:"failureReason" type:"string"` @@ -7324,6 +7348,15 @@ type GetBotOutput struct { // The name of the bot. Name *string `locationName:"name" min:"2" type:"string"` + // The score that determines where Amazon Lex inserts the AMAZON.FallbackIntent, + // AMAZON.KendraSearchIntent, or both when returning alternative intents in + // a PostContent (https://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostContent.html) + // or PostText (https://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html) + // response. AMAZON.FallbackIntent is inserted if the confidence score for all + // intents is below this value. AMAZON.KendraSearchIntent is only inserted if + // it is configured for the bot. + NluIntentConfidenceThreshold *float64 `locationName:"nluIntentConfidenceThreshold" type:"double"` + // The status of the bot. // // When the status is BUILDING Amazon Lex is building the bot for testing and @@ -7399,6 +7432,12 @@ func (s *GetBotOutput) SetDetectSentiment(v bool) *GetBotOutput { return s } +// SetEnableModelImprovements sets the EnableModelImprovements field's value. +func (s *GetBotOutput) SetEnableModelImprovements(v bool) *GetBotOutput { + s.EnableModelImprovements = &v + return s +} + // SetFailureReason sets the FailureReason field's value. func (s *GetBotOutput) SetFailureReason(v string) *GetBotOutput { s.FailureReason = &v @@ -7435,6 +7474,12 @@ func (s *GetBotOutput) SetName(v string) *GetBotOutput { return s } +// SetNluIntentConfidenceThreshold sets the NluIntentConfidenceThreshold field's value. +func (s *GetBotOutput) SetNluIntentConfidenceThreshold(v float64) *GetBotOutput { + s.NluIntentConfidenceThreshold = &v + return s +} + // SetStatus sets the Status field's value. func (s *GetBotOutput) SetStatus(v string) *GetBotOutput { s.Status = &v @@ -8325,6 +8370,10 @@ type GetIntentOutput struct { // Describes how the intent is fulfilled. For more information, see PutIntent. FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` + // Configuration information, if any, to connect to an Amazon Kendra index with + // the AMAZON.KendraSearchIntent intent. + KendraConfiguration *KendraConfiguration `locationName:"kendraConfiguration" type:"structure"` + // The date that the intent was updated. When you create a resource, the creation // date and the last updated date are the same. LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp"` @@ -8407,6 +8456,12 @@ func (s *GetIntentOutput) SetFulfillmentActivity(v *FulfillmentActivity) *GetInt return s } +// SetKendraConfiguration sets the KendraConfiguration field's value. +func (s *GetIntentOutput) SetKendraConfiguration(v *KendraConfiguration) *GetIntentOutput { + s.KendraConfiguration = v + return s +} + // SetLastUpdatedDate sets the LastUpdatedDate field's value. func (s *GetIntentOutput) SetLastUpdatedDate(v time.Time) *GetIntentOutput { s.LastUpdatedDate = &v @@ -9242,8 +9297,8 @@ func (s *IntentMetadata) SetVersion(v string) *IntentMetadata { // An internal Amazon Lex error occurred. Try your request again. type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9260,17 +9315,17 @@ func (s InternalFailureException) GoString() string { func newErrorInternalFailureException(v protocol.ResponseMetadata) error { return &InternalFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalFailureException) Code() string { +func (s *InternalFailureException) Code() string { return "InternalFailureException" } // Message returns the exception's message. -func (s InternalFailureException) Message() string { +func (s *InternalFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9278,28 +9333,109 @@ func (s InternalFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { +func (s *InternalFailureException) OrigErr() error { return nil } -func (s InternalFailureException) Error() string { +func (s *InternalFailureException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides configuration information for the AMAZON.KendraSearchIntent intent. +// When you use this intent, Amazon Lex searches the specified Amazon Kendra +// index and returns documents from the index that match the user's utterance. +// For more information, see AMAZON.KendraSearchIntent (http://docs.aws.amazon.com/lex/latest/dg/built-in-intent-kendra-search.html). +type KendraConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon Kendra index that you want the + // AMAZON.KendraSearchIntent intent to search. The index must be in the same + // account and Region as the Amazon Lex bot. If the Amazon Kendra index does + // not exist, you get an exception when you call the PutIntent operation. + // + // KendraIndex is a required field + KendraIndex *string `locationName:"kendraIndex" min:"20" type:"string" required:"true"` + + // A query filter that Amazon Lex sends to Amazon Kendra to filter the response + // from the query. The filter is in the format defined by Amazon Kendra. For + // more information, see Filtering queries (http://docs.aws.amazon.com/kendra/latest/dg/filtering.html). + // + // You can override this filter string with a new filter string at runtime. + QueryFilterString *string `locationName:"queryFilterString" type:"string"` + + // The Amazon Resource Name (ARN) of an IAM role that has permission to search + // the Amazon Kendra index. The role must be in the same account and Region + // as the Amazon Lex bot. If the role does not exist, you get an exception when + // you call the PutIntent operation. + // + // Role is a required field + Role *string `locationName:"role" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s KendraConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KendraConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KendraConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KendraConfiguration"} + if s.KendraIndex == nil { + invalidParams.Add(request.NewErrParamRequired("KendraIndex")) + } + if s.KendraIndex != nil && len(*s.KendraIndex) < 20 { + invalidParams.Add(request.NewErrParamMinLen("KendraIndex", 20)) + } + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKendraIndex sets the KendraIndex field's value. +func (s *KendraConfiguration) SetKendraIndex(v string) *KendraConfiguration { + s.KendraIndex = &v + return s +} + +// SetQueryFilterString sets the QueryFilterString field's value. +func (s *KendraConfiguration) SetQueryFilterString(v string) *KendraConfiguration { + s.QueryFilterString = &v + return s +} + +// SetRole sets the Role field's value. +func (s *KendraConfiguration) SetRole(v string) *KendraConfiguration { + s.Role = &v + return s } // The request exceeded a limit. Try your request again. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -9318,17 +9454,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9336,22 +9472,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListTagsForResourceInput struct { @@ -9643,8 +9779,8 @@ func (s *Message) SetGroupNumber(v int64) *Message { // The resource specified in the request was not found. Check the resource and // try again. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9661,17 +9797,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9679,29 +9815,29 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The checksum of the resource that you are trying to change does not match // the checksum in the request. Check the resource's checksum and try again. type PreconditionFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9718,17 +9854,17 @@ func (s PreconditionFailedException) GoString() string { func newErrorPreconditionFailedException(v protocol.ResponseMetadata) error { return &PreconditionFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PreconditionFailedException) Code() string { +func (s *PreconditionFailedException) Code() string { return "PreconditionFailedException" } // Message returns the exception's message. -func (s PreconditionFailedException) Message() string { +func (s *PreconditionFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9736,22 +9872,22 @@ func (s PreconditionFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PreconditionFailedException) OrigErr() error { +func (s *PreconditionFailedException) OrigErr() error { return nil } -func (s PreconditionFailedException) Error() string { +func (s *PreconditionFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PreconditionFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PreconditionFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PreconditionFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PreconditionFailedException) RequestID() string { + return s.RespMetadata.RequestID } // Obtains information from the user. To define a prompt, provide one or more @@ -10080,7 +10216,7 @@ type PutBotInput struct { // When Amazon Lex can't understand the user's input in context, it tries to // elicit the information a few times. After that, Amazon Lex sends the message - // defined in abortStatement to the user, and then aborts the conversation. + // defined in abortStatement to the user, and then cancels the conversation. // To set the number of retries, use the valueElicitationPrompt field for the // slot type. // @@ -10093,7 +10229,7 @@ type PutBotInput struct { // the intents. This intent might require the CrustType slot. You specify the // valueElicitationPrompt field when you create the CrustType slot. // - // If you have defined a fallback intent the abort statement will not be sent + // If you have defined a fallback intent the cancel statement will not be sent // to the user, the fallback intent is used instead. For more information, see // AMAZON.FallbackIntent (https://docs.aws.amazon.com/lex/latest/dg/built-in-intent-fallback.html). AbortStatement *Statement `locationName:"abortStatement" type:"structure"` @@ -10180,6 +10316,39 @@ type PutBotInput struct { // analysis. If you don't specify detectSentiment, the default is false. DetectSentiment *bool `locationName:"detectSentiment" type:"boolean"` + // Set to true to enable access to natural language understanding improvements. + // + // When you set the enableModelImprovements parameter to true you can use the + // nluIntentConfidenceThreshold parameter to configure confidence scores. For + // more information, see Confidence Scores (https://docs.aws.amazon.com/lex/latest/dg/confidence-scores.html). + // + // You can only set the enableModelImprovements parameter in certain Regions. + // If you set the parameter to true, your bot has access to accuracy improvements. + // + // The Regions where you can set the enableModelImprovements parameter to true + // are: + // + // * US East (N. Virginia) (us-east-1) + // + // * US West (Oregon) (us-west-2) + // + // * Asia Pacific (Sydney) (ap-southeast-2) + // + // * EU (Ireland) (eu-west-1) + // + // In other Regions, the enableModelImprovements parameter is set to true by + // default. In these Regions setting the parameter to false throws a ValidationException + // exception. + // + // * Asia Pacific (Singapore) (ap-southeast-1) + // + // * Asia Pacific (Tokyo) (ap-northeast-1) + // + // * EU (Frankfurt) (eu-central-1) + // + // * EU (London) (eu-west-2) + EnableModelImprovements *bool `locationName:"enableModelImprovements" type:"boolean"` + // The maximum time in seconds that Amazon Lex retains the data gathered in // a conversation. // @@ -10217,6 +10386,41 @@ type PutBotInput struct { // Name is a required field Name *string `location:"uri" locationName:"name" min:"2" type:"string" required:"true"` + // Determines the threshold where Amazon Lex will insert the AMAZON.FallbackIntent, + // AMAZON.KendraSearchIntent, or both when returning alternative intents in + // a PostContent (https://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostContent.html) + // or PostText (https://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html) + // response. AMAZON.FallbackIntent and AMAZON.KendraSearchIntent are only inserted + // if they are configured for the bot. + // + // You must set the enableModelImprovements parameter to true to use confidence + // scores. + // + // * US East (N. Virginia) (us-east-1) + // + // * US West (Oregon) (us-west-2) + // + // * Asia Pacific (Sydney) (ap-southeast-2) + // + // * EU (Ireland) (eu-west-1) + // + // In other Regions, the enableModelImprovements parameter is set to true by + // default. + // + // For example, suppose a bot is configured with the confidence threshold of + // 0.80 and the AMAZON.FallbackIntent. Amazon Lex returns three alternative + // intents with the following confidence scores: IntentA (0.70), IntentB (0.60), + // IntentC (0.50). The response from the PostText operation would be: + // + // * AMAZON.FallbackIntent + // + // * IntentA + // + // * IntentB + // + // * IntentC + NluIntentConfidenceThreshold *float64 `locationName:"nluIntentConfidenceThreshold" type:"double"` + // If you set the processBehavior element to BUILD, Amazon Lex builds the bot // so that it can be run. If you set the element to SAVE Amazon Lex saves the // bot, but doesn't build it. @@ -10343,6 +10547,12 @@ func (s *PutBotInput) SetDetectSentiment(v bool) *PutBotInput { return s } +// SetEnableModelImprovements sets the EnableModelImprovements field's value. +func (s *PutBotInput) SetEnableModelImprovements(v bool) *PutBotInput { + s.EnableModelImprovements = &v + return s +} + // SetIdleSessionTTLInSeconds sets the IdleSessionTTLInSeconds field's value. func (s *PutBotInput) SetIdleSessionTTLInSeconds(v int64) *PutBotInput { s.IdleSessionTTLInSeconds = &v @@ -10367,6 +10577,12 @@ func (s *PutBotInput) SetName(v string) *PutBotInput { return s } +// SetNluIntentConfidenceThreshold sets the NluIntentConfidenceThreshold field's value. +func (s *PutBotInput) SetNluIntentConfidenceThreshold(v float64) *PutBotInput { + s.NluIntentConfidenceThreshold = &v + return s +} + // SetProcessBehavior sets the ProcessBehavior field's value. func (s *PutBotInput) SetProcessBehavior(v string) *PutBotInput { s.ProcessBehavior = &v @@ -10388,7 +10604,7 @@ func (s *PutBotInput) SetVoiceId(v string) *PutBotInput { type PutBotOutput struct { _ struct{} `type:"structure"` - // The message that Amazon Lex uses to abort a conversation. For more information, + // The message that Amazon Lex uses to cancel a conversation. For more information, // see PutBot. AbortStatement *Statement `locationName:"abortStatement" type:"structure"` @@ -10440,6 +10656,10 @@ type PutBotOutput struct { // the request, the detectSentiment field is false in the response. DetectSentiment *bool `locationName:"detectSentiment" type:"boolean"` + // Indicates whether the bot uses accuracy improvements. true indicates that + // the bot is using the improvements, otherwise, false. + EnableModelImprovements *bool `locationName:"enableModelImprovements" type:"boolean"` + // If status is FAILED, Amazon Lex provides the reason that it failed to build // the bot. FailureReason *string `locationName:"failureReason" type:"string"` @@ -10461,6 +10681,15 @@ type PutBotOutput struct { // The name of the bot. Name *string `locationName:"name" min:"2" type:"string"` + // The score that determines where Amazon Lex inserts the AMAZON.FallbackIntent, + // AMAZON.KendraSearchIntent, or both when returning alternative intents in + // a PostContent (https://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostContent.html) + // or PostText (https://docs.aws.amazon.com/lex/latest/dg/API_runtime_PostText.html) + // response. AMAZON.FallbackIntent is inserted if the confidence score for all + // intents is below this value. AMAZON.KendraSearchIntent is only inserted if + // it is configured for the bot. + NluIntentConfidenceThreshold *float64 `locationName:"nluIntentConfidenceThreshold" type:"double"` + // When you send a request to create a bot with processBehavior set to BUILD, // Amazon Lex sets the status response element to BUILDING. // @@ -10546,6 +10775,12 @@ func (s *PutBotOutput) SetDetectSentiment(v bool) *PutBotOutput { return s } +// SetEnableModelImprovements sets the EnableModelImprovements field's value. +func (s *PutBotOutput) SetEnableModelImprovements(v bool) *PutBotOutput { + s.EnableModelImprovements = &v + return s +} + // SetFailureReason sets the FailureReason field's value. func (s *PutBotOutput) SetFailureReason(v string) *PutBotOutput { s.FailureReason = &v @@ -10582,6 +10817,12 @@ func (s *PutBotOutput) SetName(v string) *PutBotOutput { return s } +// SetNluIntentConfidenceThreshold sets the NluIntentConfidenceThreshold field's value. +func (s *PutBotOutput) SetNluIntentConfidenceThreshold(v float64) *PutBotOutput { + s.NluIntentConfidenceThreshold = &v + return s +} + // SetStatus sets the Status field's value. func (s *PutBotOutput) SetStatus(v string) *PutBotOutput { s.Status = &v @@ -10693,6 +10934,11 @@ type PutIntentInput struct { // process the intent (for example, place an order with a pizzeria). FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` + // Configuration information required to use the AMAZON.KendraSearchIntent intent + // to connect to an Amazon Kendra index. For more information, see AMAZON.KendraSearchIntent + // (http://docs.aws.amazon.com/lex/latest/dg/built-in-intent-kendra-search.html). + KendraConfiguration *KendraConfiguration `locationName:"kendraConfiguration" type:"structure"` + // The name of the intent. The name is not case sensitive. // // The name can't match a built-in intent name, or a built-in intent name with @@ -10774,6 +11020,11 @@ func (s *PutIntentInput) Validate() error { invalidParams.AddNested("FulfillmentActivity", err.(request.ErrInvalidParams)) } } + if s.KendraConfiguration != nil { + if err := s.KendraConfiguration.Validate(); err != nil { + invalidParams.AddNested("KendraConfiguration", err.(request.ErrInvalidParams)) + } + } if s.RejectionStatement != nil { if err := s.RejectionStatement.Validate(); err != nil { invalidParams.AddNested("RejectionStatement", err.(request.ErrInvalidParams)) @@ -10844,6 +11095,12 @@ func (s *PutIntentInput) SetFulfillmentActivity(v *FulfillmentActivity) *PutInte return s } +// SetKendraConfiguration sets the KendraConfiguration field's value. +func (s *PutIntentInput) SetKendraConfiguration(v *KendraConfiguration) *PutIntentInput { + s.KendraConfiguration = v + return s +} + // SetName sets the Name field's value. func (s *PutIntentInput) SetName(v string) *PutIntentInput { s.Name = &v @@ -10912,6 +11169,10 @@ type PutIntentOutput struct { // intent. FulfillmentActivity *FulfillmentActivity `locationName:"fulfillmentActivity" type:"structure"` + // Configuration information, if any, required to connect to an Amazon Kendra + // index and use the AMAZON.KendraSearchIntent intent. + KendraConfiguration *KendraConfiguration `locationName:"kendraConfiguration" type:"structure"` + // The date that the intent was updated. When you create a resource, the creation // date and last update dates are the same. LastUpdatedDate *time.Time `locationName:"lastUpdatedDate" type:"timestamp"` @@ -11000,6 +11261,12 @@ func (s *PutIntentOutput) SetFulfillmentActivity(v *FulfillmentActivity) *PutInt return s } +// SetKendraConfiguration sets the KendraConfiguration field's value. +func (s *PutIntentOutput) SetKendraConfiguration(v *KendraConfiguration) *PutIntentOutput { + s.KendraConfiguration = v + return s +} + // SetLastUpdatedDate sets the LastUpdatedDate field's value. func (s *PutIntentOutput) SetLastUpdatedDate(v time.Time) *PutIntentOutput { s.LastUpdatedDate = &v @@ -11069,6 +11336,9 @@ type PutSlotTypeInput struct { // values that help train the machine learning model about the values that it // resolves for a slot. // + // A regular expression slot type doesn't require enumeration values. All other + // slot types require a list of enumeration values. + // // When Amazon Lex resolves a slot value, it generates a resolution list that // contains up to five possible values for the slot. If you are using a Lambda // function, this resolution list is passed to the function. If you are not @@ -11342,8 +11612,8 @@ func (s *PutSlotTypeOutput) SetVersion(v string) *PutSlotTypeOutput { // // "name": string, "version": string } } type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Describes the resource that refers to the resource that you are attempting // to delete. This object is returned as part of the ResourceInUseException @@ -11367,17 +11637,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11385,22 +11655,22 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the resource that refers to the resource that you are attempting @@ -11459,12 +11729,12 @@ type Slot struct { // (https://docs.aws.amazon.com/lex/latest/dg/how-obfuscate.html). ObfuscationSetting *string `locationName:"obfuscationSetting" type:"string" enum:"ObfuscationSetting"` - // Directs Lex the order in which to elicit this slot value from the user. For - // example, if the intent has two slots with priorities 1 and 2, AWS Lex first - // elicits a value for the slot with priority 1. + // Directs Amazon Lex the order in which to elicit this slot value from the + // user. For example, if the intent has two slots with priorities 1 and 2, AWS + // Amazon Lex first elicits a value for the slot with priority 1. // - // If multiple slots share the same priority, the order in which Lex elicits - // values is arbitrary. + // If multiple slots share the same priority, the order in which Amazon Lex + // elicits values is arbitrary. Priority *int64 `locationName:"priority" type:"integer"` // A set of possible responses for the slot type used by text-based clients. @@ -12322,6 +12592,15 @@ const ( ChannelStatusFailed = "FAILED" ) +// ChannelStatus_Values returns all elements of the ChannelStatus enum +func ChannelStatus_Values() []string { + return []string{ + ChannelStatusInProgress, + ChannelStatusCreated, + ChannelStatusFailed, + } +} + const ( // ChannelTypeFacebook is a ChannelType enum value ChannelTypeFacebook = "Facebook" @@ -12336,6 +12615,16 @@ const ( ChannelTypeKik = "Kik" ) +// ChannelType_Values returns all elements of the ChannelType enum +func ChannelType_Values() []string { + return []string{ + ChannelTypeFacebook, + ChannelTypeSlack, + ChannelTypeTwilioSms, + ChannelTypeKik, + } +} + const ( // ContentTypePlainText is a ContentType enum value ContentTypePlainText = "PlainText" @@ -12347,6 +12636,15 @@ const ( ContentTypeCustomPayload = "CustomPayload" ) +// ContentType_Values returns all elements of the ContentType enum +func ContentType_Values() []string { + return []string{ + ContentTypePlainText, + ContentTypeSsml, + ContentTypeCustomPayload, + } +} + const ( // DestinationCloudwatchLogs is a Destination enum value DestinationCloudwatchLogs = "CLOUDWATCH_LOGS" @@ -12355,6 +12653,14 @@ const ( DestinationS3 = "S3" ) +// Destination_Values returns all elements of the Destination enum +func Destination_Values() []string { + return []string{ + DestinationCloudwatchLogs, + DestinationS3, + } +} + const ( // ExportStatusInProgress is a ExportStatus enum value ExportStatusInProgress = "IN_PROGRESS" @@ -12366,6 +12672,15 @@ const ( ExportStatusFailed = "FAILED" ) +// ExportStatus_Values returns all elements of the ExportStatus enum +func ExportStatus_Values() []string { + return []string{ + ExportStatusInProgress, + ExportStatusReady, + ExportStatusFailed, + } +} + const ( // ExportTypeAlexaSkillsKit is a ExportType enum value ExportTypeAlexaSkillsKit = "ALEXA_SKILLS_KIT" @@ -12374,6 +12689,14 @@ const ( ExportTypeLex = "LEX" ) +// ExportType_Values returns all elements of the ExportType enum +func ExportType_Values() []string { + return []string{ + ExportTypeAlexaSkillsKit, + ExportTypeLex, + } +} + const ( // FulfillmentActivityTypeReturnIntent is a FulfillmentActivityType enum value FulfillmentActivityTypeReturnIntent = "ReturnIntent" @@ -12382,6 +12705,14 @@ const ( FulfillmentActivityTypeCodeHook = "CodeHook" ) +// FulfillmentActivityType_Values returns all elements of the FulfillmentActivityType enum +func FulfillmentActivityType_Values() []string { + return []string{ + FulfillmentActivityTypeReturnIntent, + FulfillmentActivityTypeCodeHook, + } +} + const ( // ImportStatusInProgress is a ImportStatus enum value ImportStatusInProgress = "IN_PROGRESS" @@ -12393,17 +12724,43 @@ const ( ImportStatusFailed = "FAILED" ) +// ImportStatus_Values returns all elements of the ImportStatus enum +func ImportStatus_Values() []string { + return []string{ + ImportStatusInProgress, + ImportStatusComplete, + ImportStatusFailed, + } +} + const ( - // LocaleEnUs is a Locale enum value - LocaleEnUs = "en-US" + // LocaleDeDe is a Locale enum value + LocaleDeDe = "de-DE" + + // LocaleEnAu is a Locale enum value + LocaleEnAu = "en-AU" // LocaleEnGb is a Locale enum value LocaleEnGb = "en-GB" - // LocaleDeDe is a Locale enum value - LocaleDeDe = "de-DE" + // LocaleEnUs is a Locale enum value + LocaleEnUs = "en-US" + + // LocaleEsUs is a Locale enum value + LocaleEsUs = "es-US" ) +// Locale_Values returns all elements of the Locale enum +func Locale_Values() []string { + return []string{ + LocaleDeDe, + LocaleEnAu, + LocaleEnGb, + LocaleEnUs, + LocaleEsUs, + } +} + const ( // LogTypeAudio is a LogType enum value LogTypeAudio = "AUDIO" @@ -12412,6 +12769,14 @@ const ( LogTypeText = "TEXT" ) +// LogType_Values returns all elements of the LogType enum +func LogType_Values() []string { + return []string{ + LogTypeAudio, + LogTypeText, + } +} + const ( // MergeStrategyOverwriteLatest is a MergeStrategy enum value MergeStrategyOverwriteLatest = "OVERWRITE_LATEST" @@ -12420,6 +12785,14 @@ const ( MergeStrategyFailOnConflict = "FAIL_ON_CONFLICT" ) +// MergeStrategy_Values returns all elements of the MergeStrategy enum +func MergeStrategy_Values() []string { + return []string{ + MergeStrategyOverwriteLatest, + MergeStrategyFailOnConflict, + } +} + const ( // ObfuscationSettingNone is a ObfuscationSetting enum value ObfuscationSettingNone = "NONE" @@ -12428,6 +12801,14 @@ const ( ObfuscationSettingDefaultObfuscation = "DEFAULT_OBFUSCATION" ) +// ObfuscationSetting_Values returns all elements of the ObfuscationSetting enum +func ObfuscationSetting_Values() []string { + return []string{ + ObfuscationSettingNone, + ObfuscationSettingDefaultObfuscation, + } +} + const ( // ProcessBehaviorSave is a ProcessBehavior enum value ProcessBehaviorSave = "SAVE" @@ -12436,6 +12817,14 @@ const ( ProcessBehaviorBuild = "BUILD" ) +// ProcessBehavior_Values returns all elements of the ProcessBehavior enum +func ProcessBehavior_Values() []string { + return []string{ + ProcessBehaviorSave, + ProcessBehaviorBuild, + } +} + const ( // ReferenceTypeIntent is a ReferenceType enum value ReferenceTypeIntent = "Intent" @@ -12450,6 +12839,16 @@ const ( ReferenceTypeBotChannel = "BotChannel" ) +// ReferenceType_Values returns all elements of the ReferenceType enum +func ReferenceType_Values() []string { + return []string{ + ReferenceTypeIntent, + ReferenceTypeBot, + ReferenceTypeBotAlias, + ReferenceTypeBotChannel, + } +} + const ( // ResourceTypeBot is a ResourceType enum value ResourceTypeBot = "BOT" @@ -12461,6 +12860,15 @@ const ( ResourceTypeSlotType = "SLOT_TYPE" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeBot, + ResourceTypeIntent, + ResourceTypeSlotType, + } +} + const ( // SlotConstraintRequired is a SlotConstraint enum value SlotConstraintRequired = "Required" @@ -12469,6 +12877,14 @@ const ( SlotConstraintOptional = "Optional" ) +// SlotConstraint_Values returns all elements of the SlotConstraint enum +func SlotConstraint_Values() []string { + return []string{ + SlotConstraintRequired, + SlotConstraintOptional, + } +} + const ( // SlotValueSelectionStrategyOriginalValue is a SlotValueSelectionStrategy enum value SlotValueSelectionStrategyOriginalValue = "ORIGINAL_VALUE" @@ -12477,6 +12893,14 @@ const ( SlotValueSelectionStrategyTopResolution = "TOP_RESOLUTION" ) +// SlotValueSelectionStrategy_Values returns all elements of the SlotValueSelectionStrategy enum +func SlotValueSelectionStrategy_Values() []string { + return []string{ + SlotValueSelectionStrategyOriginalValue, + SlotValueSelectionStrategyTopResolution, + } +} + const ( // StatusBuilding is a Status enum value StatusBuilding = "BUILDING" @@ -12494,6 +12918,17 @@ const ( StatusNotBuilt = "NOT_BUILT" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusBuilding, + StatusReady, + StatusReadyBasicTesting, + StatusFailed, + StatusNotBuilt, + } +} + const ( // StatusTypeDetected is a StatusType enum value StatusTypeDetected = "Detected" @@ -12501,3 +12936,11 @@ const ( // StatusTypeMissed is a StatusType enum value StatusTypeMissed = "Missed" ) + +// StatusType_Values returns all elements of the StatusType enum +func StatusType_Values() []string { + return []string{ + StatusTypeDetected, + StatusTypeMissed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go index 77d83721c..3d8119220 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/api.go b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/api.go index 6fab0ad67..97c9641d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/api.go @@ -63,8 +63,8 @@ func (c *LicenseManager) CreateLicenseConfigurationRequest(input *CreateLicenseC // that can be consumed and enforced by License Manager. Components include // specifications for the license type (licensing by instance, socket, CPU, // or vCPU), allowed tenancy (shared tenancy, Dedicated Instance, Dedicated -// Host, or all of these), host affinity (how long a VM must be associated with -// a host), and the number of licenses purchased and used. +// Host, or all of these), license affinity to host (how long a license must +// be associated with a host), and the number of licenses purchased and used. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1289,13 +1289,6 @@ func (c *LicenseManager) UpdateLicenseConfigurationRequest(input *UpdateLicenseC // // Modifies the attributes of an existing license configuration. // -// A license configuration is an abstraction of a customer license agreement -// that can be consumed and enforced by License Manager. Components include -// specifications for the license type (licensing by instance, socket, CPU, -// or vCPU), allowed tenancy (shared tenancy, Dedicated Instance, Dedicated -// Host, or all of these), host affinity (how long a VM must be associated with -// a host), and the number of licenses purchased and used. -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1545,8 +1538,8 @@ func (c *LicenseManager) UpdateServiceSettingsWithContext(ctx aws.Context, input // Access to resource denied. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1563,17 +1556,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1581,29 +1574,29 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The AWS user account does not have permission to perform the action. Check // the IAM policy associated with this account. type AuthorizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1620,17 +1613,17 @@ func (s AuthorizationException) GoString() string { func newErrorAuthorizationException(v protocol.ResponseMetadata) error { return &AuthorizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AuthorizationException) Code() string { +func (s *AuthorizationException) Code() string { return "AuthorizationException" } // Message returns the exception's message. -func (s AuthorizationException) Message() string { +func (s *AuthorizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1638,22 +1631,22 @@ func (s AuthorizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AuthorizationException) OrigErr() error { +func (s *AuthorizationException) OrigErr() error { return nil } -func (s AuthorizationException) Error() string { +func (s *AuthorizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AuthorizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AuthorizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AuthorizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *AuthorizationException) RequestID() string { + return s.RespMetadata.RequestID } // Describes automated discovery. @@ -1732,17 +1725,24 @@ type CreateLicenseConfigurationInput struct { LicenseCountingType *string `type:"string" required:"true" enum:"LicenseCountingType"` // License rules. The syntax is #name=value (for example, #allowedTenancy=EC2-DedicatedHost). - // Available rules vary by dimension. + // The available rules vary by dimension, as follows. // - // * Cores dimension: allowedTenancy | maximumCores | minimumCores + // * Cores dimension: allowedTenancy | licenseAffinityToHost | maximumCores + // | minimumCores // // * Instances dimension: allowedTenancy | maximumCores | minimumCores | // maximumSockets | minimumSockets | maximumVcpus | minimumVcpus // - // * Sockets dimension: allowedTenancy | maximumSockets | minimumSockets + // * Sockets dimension: allowedTenancy | licenseAffinityToHost | maximumSockets + // | minimumSockets // // * vCPUs dimension: allowedTenancy | honorVcpuOptimization | maximumVcpus // | minimumVcpus + // + // The unit for licenseAffinityToHost is days and the range is 1 to 180. The + // possible values for allowedTenancy are EC2-Default, EC2-DedicatedHost, and + // EC2-DedicatedInstance. The possible values for honorVcpuOptimization are + // True and False. LicenseRules []*string `type:"list"` // Name of the license configuration. @@ -1918,8 +1918,8 @@ func (s DeleteLicenseConfigurationOutput) GoString() string { // A dependency required to run the API is missing. type FailedDependencyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1936,17 +1936,17 @@ func (s FailedDependencyException) GoString() string { func newErrorFailedDependencyException(v protocol.ResponseMetadata) error { return &FailedDependencyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FailedDependencyException) Code() string { +func (s *FailedDependencyException) Code() string { return "FailedDependencyException" } // Message returns the exception's message. -func (s FailedDependencyException) Message() string { +func (s *FailedDependencyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1954,22 +1954,22 @@ func (s FailedDependencyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FailedDependencyException) OrigErr() error { +func (s *FailedDependencyException) OrigErr() error { return nil } -func (s FailedDependencyException) Error() string { +func (s *FailedDependencyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FailedDependencyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FailedDependencyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FailedDependencyException) RequestID() string { - return s.respMetadata.RequestID +func (s *FailedDependencyException) RequestID() string { + return s.RespMetadata.RequestID } // A filter name and value pair that is used to return more specific results @@ -2009,8 +2009,8 @@ func (s *Filter) SetValues(v []*string) *Filter { // The request uses too many filters or too many filter values. type FilterLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2027,17 +2027,17 @@ func (s FilterLimitExceededException) GoString() string { func newErrorFilterLimitExceededException(v protocol.ResponseMetadata) error { return &FilterLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FilterLimitExceededException) Code() string { +func (s *FilterLimitExceededException) Code() string { return "FilterLimitExceededException" } // Message returns the exception's message. -func (s FilterLimitExceededException) Message() string { +func (s *FilterLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2045,22 +2045,22 @@ func (s FilterLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FilterLimitExceededException) OrigErr() error { +func (s *FilterLimitExceededException) OrigErr() error { return nil } -func (s FilterLimitExceededException) Error() string { +func (s *FilterLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FilterLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FilterLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FilterLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *FilterLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type GetLicenseConfigurationInput struct { @@ -2337,8 +2337,8 @@ func (s *GetServiceSettingsOutput) SetSnsTopicArn(v string) *GetServiceSettingsO // One or more parameter values are not valid. type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2355,17 +2355,17 @@ func (s InvalidParameterValueException) GoString() string { func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { return &InvalidParameterValueException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { +func (s *InvalidParameterValueException) Code() string { return "InvalidParameterValueException" } // Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { +func (s *InvalidParameterValueException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2373,22 +2373,22 @@ func (s InvalidParameterValueException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { +func (s *InvalidParameterValueException) OrigErr() error { return nil } -func (s InvalidParameterValueException) Error() string { +func (s *InvalidParameterValueException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID } // License Manager cannot allocate a license to a resource because of its state. @@ -2396,8 +2396,8 @@ func (s InvalidParameterValueException) RequestID() string { // For example, you cannot allocate a license to an instance in the process // of shutting down. type InvalidResourceStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2414,17 +2414,17 @@ func (s InvalidResourceStateException) GoString() string { func newErrorInvalidResourceStateException(v protocol.ResponseMetadata) error { return &InvalidResourceStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceStateException) Code() string { +func (s *InvalidResourceStateException) Code() string { return "InvalidResourceStateException" } // Message returns the exception's message. -func (s InvalidResourceStateException) Message() string { +func (s *InvalidResourceStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2432,22 +2432,22 @@ func (s InvalidResourceStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceStateException) OrigErr() error { +func (s *InvalidResourceStateException) OrigErr() error { return nil } -func (s InvalidResourceStateException) Error() string { +func (s *InvalidResourceStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceStateException) RequestID() string { + return s.RespMetadata.RequestID } // An inventory filter. @@ -2915,8 +2915,8 @@ func (s *LicenseSpecification) SetLicenseConfigurationArn(v string) *LicenseSpec // You do not have enough licenses available to support a new resource launch. type LicenseUsageException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2933,17 +2933,17 @@ func (s LicenseUsageException) GoString() string { func newErrorLicenseUsageException(v protocol.ResponseMetadata) error { return &LicenseUsageException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LicenseUsageException) Code() string { +func (s *LicenseUsageException) Code() string { return "LicenseUsageException" } // Message returns the exception's message. -func (s LicenseUsageException) Message() string { +func (s *LicenseUsageException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2951,22 +2951,22 @@ func (s LicenseUsageException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LicenseUsageException) OrigErr() error { +func (s *LicenseUsageException) OrigErr() error { return nil } -func (s LicenseUsageException) Error() string { +func (s *LicenseUsageException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LicenseUsageException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LicenseUsageException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LicenseUsageException) RequestID() string { - return s.respMetadata.RequestID +func (s *LicenseUsageException) RequestID() string { + return s.RespMetadata.RequestID } type ListAssociationsForLicenseConfigurationInput struct { @@ -3151,8 +3151,9 @@ type ListLicenseConfigurationsInput struct { // Filters to scope the results. The following filters and logical operators // are supported: // - // * licenseCountingType - The dimension on which licenses are counted (vCPU). - // Logical operators are EQUALS | NOT_EQUALS. + // * licenseCountingType - The dimension on which licenses are counted. Possible + // values are vCPU | Instance | Core | Socket. Logical operators are EQUALS + // | NOT_EQUALS. // // * enforceLicenseCount - A Boolean value that indicates whether hard license // enforcement is used. Logical operators are EQUALS | NOT_EQUALS. @@ -3712,8 +3713,10 @@ func (s *OrganizationConfiguration) SetEnableIntegration(v bool) *OrganizationCo type ProductInformation struct { _ struct{} `type:"structure"` - // Product information filters. The following filters and logical operators - // are supported: + // Product information filters. + // + // The following filters and logical operators are supported when the resource + // type is SSM_MANAGED: // // * Application Name - The name of the application. Logical operator is // EQUALS. @@ -3729,13 +3732,23 @@ type ProductInformation struct { // * Platform Type - The platform type. Logical operator is EQUALS. // // * License Included - The type of license included. Logical operators are - // EQUALS and NOT_EQUALS. Possible values are sql-server-enterprise | sql-server-standard + // EQUALS and NOT_EQUALS. Possible values are: sql-server-enterprise | sql-server-standard // | sql-server-web | windows-server-datacenter. // + // The following filters and logical operators are supported when the resource + // type is RDS: + // + // * Engine Edition - The edition of the database engine. Logical operator + // is EQUALS. Possible values are: oracle-ee | oracle-se | oracle-se1 | oracle-se2. + // + // * License Pack - The license pack. Logical operator is EQUALS. Possible + // values are: data guard | diagnostic pack sqlt | tuning pack sqlt | ols + // | olap. + // // ProductInformationFilterList is a required field ProductInformationFilterList []*ProductInformationFilter `type:"list" required:"true"` - // Resource type. The value is SSM_MANAGED. + // Resource type. The possible values are SSM_MANAGED | RDS. // // ResourceType is a required field ResourceType *string `type:"string" required:"true"` @@ -3858,8 +3871,8 @@ func (s *ProductInformationFilter) SetProductInformationFilterValue(v []*string) // Too many requests have been submitted. Try again after a brief wait. type RateLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3876,17 +3889,17 @@ func (s RateLimitExceededException) GoString() string { func newErrorRateLimitExceededException(v protocol.ResponseMetadata) error { return &RateLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RateLimitExceededException) Code() string { +func (s *RateLimitExceededException) Code() string { return "RateLimitExceededException" } // Message returns the exception's message. -func (s RateLimitExceededException) Message() string { +func (s *RateLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3894,22 +3907,22 @@ func (s RateLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RateLimitExceededException) OrigErr() error { +func (s *RateLimitExceededException) OrigErr() error { return nil } -func (s RateLimitExceededException) Error() string { +func (s *RateLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RateLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RateLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RateLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *RateLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Details about a resource. @@ -3983,8 +3996,8 @@ func (s *ResourceInventory) SetResourceType(v string) *ResourceInventory { // Your resource limits have been exceeded. type ResourceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4001,17 +4014,17 @@ func (s ResourceLimitExceededException) GoString() string { func newErrorResourceLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceededException) Code() string { +func (s *ResourceLimitExceededException) Code() string { return "ResourceLimitExceededException" } // Message returns the exception's message. -func (s ResourceLimitExceededException) Message() string { +func (s *ResourceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4019,28 +4032,28 @@ func (s ResourceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceededException) OrigErr() error { +func (s *ResourceLimitExceededException) OrigErr() error { return nil } -func (s ResourceLimitExceededException) Error() string { +func (s *ResourceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The server experienced an internal error. Try again. type ServerInternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4057,17 +4070,17 @@ func (s ServerInternalException) GoString() string { func newErrorServerInternalException(v protocol.ResponseMetadata) error { return &ServerInternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerInternalException) Code() string { +func (s *ServerInternalException) Code() string { return "ServerInternalException" } // Message returns the exception's message. -func (s ServerInternalException) Message() string { +func (s *ServerInternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4075,22 +4088,22 @@ func (s ServerInternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerInternalException) OrigErr() error { +func (s *ServerInternalException) OrigErr() error { return nil } -func (s ServerInternalException) Error() string { +func (s *ServerInternalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerInternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerInternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerInternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerInternalException) RequestID() string { + return s.RespMetadata.RequestID } // Details about a tag for a license configuration. @@ -4278,7 +4291,8 @@ type UpdateLicenseConfigurationInput struct { // New hard limit of the number of available licenses. LicenseCountHardLimit *bool `type:"boolean"` - // New license rules. + // New license rule. The only rule that you can add after you create a license + // configuration is licenseAffinityToHost. LicenseRules []*string `type:"list"` // New name of the license configuration. @@ -4568,6 +4582,16 @@ const ( InventoryFilterConditionContains = "CONTAINS" ) +// InventoryFilterCondition_Values returns all elements of the InventoryFilterCondition enum +func InventoryFilterCondition_Values() []string { + return []string{ + InventoryFilterConditionEquals, + InventoryFilterConditionNotEquals, + InventoryFilterConditionBeginsWith, + InventoryFilterConditionContains, + } +} + const ( // LicenseConfigurationStatusAvailable is a LicenseConfigurationStatus enum value LicenseConfigurationStatusAvailable = "AVAILABLE" @@ -4576,6 +4600,14 @@ const ( LicenseConfigurationStatusDisabled = "DISABLED" ) +// LicenseConfigurationStatus_Values returns all elements of the LicenseConfigurationStatus enum +func LicenseConfigurationStatus_Values() []string { + return []string{ + LicenseConfigurationStatusAvailable, + LicenseConfigurationStatusDisabled, + } +} + const ( // LicenseCountingTypeVCpu is a LicenseCountingType enum value LicenseCountingTypeVCpu = "vCPU" @@ -4590,6 +4622,16 @@ const ( LicenseCountingTypeSocket = "Socket" ) +// LicenseCountingType_Values returns all elements of the LicenseCountingType enum +func LicenseCountingType_Values() []string { + return []string{ + LicenseCountingTypeVCpu, + LicenseCountingTypeInstance, + LicenseCountingTypeCore, + LicenseCountingTypeSocket, + } +} + const ( // ResourceTypeEc2Instance is a ResourceType enum value ResourceTypeEc2Instance = "EC2_INSTANCE" @@ -4606,3 +4648,14 @@ const ( // ResourceTypeSystemsManagerManagedInstance is a ResourceType enum value ResourceTypeSystemsManagerManagedInstance = "SYSTEMS_MANAGER_MANAGED_INSTANCE" ) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeEc2Instance, + ResourceTypeEc2Host, + ResourceTypeEc2Ami, + ResourceTypeRds, + ResourceTypeSystemsManagerManagedInstance, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go index 9a85ea46e..7a4b8be03 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go index 6d61ff508..d9ff1c079 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/api.go @@ -116,6 +116,118 @@ func (c *Lightsail) AllocateStaticIpWithContext(ctx aws.Context, input *Allocate return out, req.Send() } +const opAttachCertificateToDistribution = "AttachCertificateToDistribution" + +// AttachCertificateToDistributionRequest generates a "aws/request.Request" representing the +// client's request for the AttachCertificateToDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachCertificateToDistribution for more information on using the AttachCertificateToDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachCertificateToDistributionRequest method. +// req, resp := client.AttachCertificateToDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachCertificateToDistribution +func (c *Lightsail) AttachCertificateToDistributionRequest(input *AttachCertificateToDistributionInput) (req *request.Request, output *AttachCertificateToDistributionOutput) { + op := &request.Operation{ + Name: opAttachCertificateToDistribution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachCertificateToDistributionInput{} + } + + output = &AttachCertificateToDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// AttachCertificateToDistribution API operation for Amazon Lightsail. +// +// Attaches an SSL/TLS certificate to your Amazon Lightsail content delivery +// network (CDN) distribution. +// +// After the certificate is attached, your distribution accepts HTTPS traffic +// for all of the domains that are associated with the certificate. +// +// Use the CreateCertificate action to create a certificate that you can attach +// to your distribution. +// +// Only certificates created in the us-east-1 AWS Region can be attached to +// Lightsail distributions. Lightsail distributions are global resources that +// can reference an origin in any AWS Region, and distribute its content globally. +// However, all distributions are located in the us-east-1 Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation AttachCertificateToDistribution for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/AttachCertificateToDistribution +func (c *Lightsail) AttachCertificateToDistribution(input *AttachCertificateToDistributionInput) (*AttachCertificateToDistributionOutput, error) { + req, out := c.AttachCertificateToDistributionRequest(input) + return out, req.Send() +} + +// AttachCertificateToDistributionWithContext is the same as AttachCertificateToDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See AttachCertificateToDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) AttachCertificateToDistributionWithContext(ctx aws.Context, input *AttachCertificateToDistributionInput, opts ...request.Option) (*AttachCertificateToDistributionOutput, error) { + req, out := c.AttachCertificateToDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAttachDisk = "AttachDisk" // AttachDiskRequest generates a "aws/request.Request" representing the @@ -599,11 +711,11 @@ func (c *Lightsail) CloseInstancePublicPortsRequest(input *CloseInstancePublicPo // CloseInstancePublicPorts API operation for Amazon Lightsail. // -// Closes the public ports on a specific Amazon Lightsail instance. +// Closes ports for a specific Amazon Lightsail instance. // -// The close instance public ports operation supports tag-based access control -// via resource tags applied to the resource identified by instance name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// The CloseInstancePublicPorts action supports tag-based access control via +// resource tags applied to the resource identified by instanceName. For more +// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -777,6 +889,112 @@ func (c *Lightsail) CopySnapshotWithContext(ctx aws.Context, input *CopySnapshot return out, req.Send() } +const opCreateCertificate = "CreateCertificate" + +// CreateCertificateRequest generates a "aws/request.Request" representing the +// client's request for the CreateCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCertificate for more information on using the CreateCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCertificateRequest method. +// req, resp := client.CreateCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateCertificate +func (c *Lightsail) CreateCertificateRequest(input *CreateCertificateInput) (req *request.Request, output *CreateCertificateOutput) { + op := &request.Operation{ + Name: opCreateCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCertificateInput{} + } + + output = &CreateCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCertificate API operation for Amazon Lightsail. +// +// Creates an SSL/TLS certificate for a Amazon Lightsail content delivery network +// (CDN) distribution. +// +// After the certificate is created, use the AttachCertificateToDistribution +// action to attach the certificate to your distribution. +// +// Only certificates created in the us-east-1 AWS Region can be attached to +// Lightsail distributions. Lightsail distributions are global resources that +// can reference an origin in any AWS Region, and distribute its content globally. +// However, all distributions are located in the us-east-1 Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation CreateCertificate for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateCertificate +func (c *Lightsail) CreateCertificate(input *CreateCertificateInput) (*CreateCertificateOutput, error) { + req, out := c.CreateCertificateRequest(input) + return out, req.Send() +} + +// CreateCertificateWithContext is the same as CreateCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) CreateCertificateWithContext(ctx aws.Context, input *CreateCertificateInput, opts ...request.Option) (*CreateCertificateOutput, error) { + req, out := c.CreateCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateCloudFormationStack = "CreateCloudFormationStack" // CreateCloudFormationStackRequest generates a "aws/request.Request" representing the @@ -1343,6 +1561,111 @@ func (c *Lightsail) CreateDiskSnapshotWithContext(ctx aws.Context, input *Create return out, req.Send() } +const opCreateDistribution = "CreateDistribution" + +// CreateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateDistribution for more information on using the CreateDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateDistributionRequest method. +// req, resp := client.CreateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDistribution +func (c *Lightsail) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { + op := &request.Operation{ + Name: opCreateDistribution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDistributionInput{} + } + + output = &CreateDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateDistribution API operation for Amazon Lightsail. +// +// Creates an Amazon Lightsail content delivery network (CDN) distribution. +// +// A distribution is a globally distributed network of caching servers that +// improve the performance of your website or web application hosted on a Lightsail +// instance. For more information, see Content delivery networks in Amazon Lightsail +// (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-content-delivery-networks). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation CreateDistribution for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CreateDistribution +func (c *Lightsail) CreateDistribution(input *CreateDistributionInput) (*CreateDistributionOutput, error) { + req, out := c.CreateDistributionRequest(input) + return out, req.Send() +} + +// CreateDistributionWithContext is the same as CreateDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See CreateDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) CreateDistributionWithContext(ctx aws.Context, input *CreateDistributionInput, opts ...request.Option) (*CreateDistributionOutput, error) { + req, out := c.CreateDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDomain = "CreateDomain" // CreateDomainRequest generates a "aws/request.Request" representing the @@ -2752,51 +3075,153 @@ func (c *Lightsail) DeleteAutoSnapshotWithContext(ctx aws.Context, input *Delete return out, req.Send() } -const opDeleteContactMethod = "DeleteContactMethod" +const opDeleteCertificate = "DeleteCertificate" -// DeleteContactMethodRequest generates a "aws/request.Request" representing the -// client's request for the DeleteContactMethod operation. The "output" return +// DeleteCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCertificate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteContactMethod for more information on using the DeleteContactMethod +// See DeleteCertificate for more information on using the DeleteCertificate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteContactMethodRequest method. -// req, resp := client.DeleteContactMethodRequest(params) +// // Example sending a request using the DeleteCertificateRequest method. +// req, resp := client.DeleteCertificateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContactMethod -func (c *Lightsail) DeleteContactMethodRequest(input *DeleteContactMethodInput) (req *request.Request, output *DeleteContactMethodOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteCertificate +func (c *Lightsail) DeleteCertificateRequest(input *DeleteCertificateInput) (req *request.Request, output *DeleteCertificateOutput) { op := &request.Operation{ - Name: opDeleteContactMethod, + Name: opDeleteCertificate, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DeleteContactMethodInput{} + input = &DeleteCertificateInput{} } - output = &DeleteContactMethodOutput{} + output = &DeleteCertificateOutput{} req = c.newRequest(op, input, output) return } -// DeleteContactMethod API operation for Amazon Lightsail. +// DeleteCertificate API operation for Amazon Lightsail. // -// Deletes a contact method. +// Deletes an SSL/TLS certificate for your Amazon Lightsail content delivery +// network (CDN) distribution. +// +// Certificates that are currently attached to a distribution cannot be deleted. +// Use the DetachCertificateFromDistribution action to detach a certificate +// from a distribution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DeleteCertificate for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteCertificate +func (c *Lightsail) DeleteCertificate(input *DeleteCertificateInput) (*DeleteCertificateOutput, error) { + req, out := c.DeleteCertificateRequest(input) + return out, req.Send() +} + +// DeleteCertificateWithContext is the same as DeleteCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DeleteCertificateWithContext(ctx aws.Context, input *DeleteCertificateInput, opts ...request.Option) (*DeleteCertificateOutput, error) { + req, out := c.DeleteCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteContactMethod = "DeleteContactMethod" + +// DeleteContactMethodRequest generates a "aws/request.Request" representing the +// client's request for the DeleteContactMethod operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteContactMethod for more information on using the DeleteContactMethod +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteContactMethodRequest method. +// req, resp := client.DeleteContactMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteContactMethod +func (c *Lightsail) DeleteContactMethodRequest(input *DeleteContactMethodInput) (req *request.Request, output *DeleteContactMethodOutput) { + op := &request.Operation{ + Name: opDeleteContactMethod, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteContactMethodInput{} + } + + output = &DeleteContactMethodOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteContactMethod API operation for Amazon Lightsail. +// +// Deletes a contact method. // // A contact method is used to send you notifications about your Amazon Lightsail // resources. You can add one email address and one mobile phone number contact @@ -3084,6 +3509,106 @@ func (c *Lightsail) DeleteDiskSnapshotWithContext(ctx aws.Context, input *Delete return out, req.Send() } +const opDeleteDistribution = "DeleteDistribution" + +// DeleteDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteDistribution for more information on using the DeleteDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteDistributionRequest method. +// req, resp := client.DeleteDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDistribution +func (c *Lightsail) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { + op := &request.Operation{ + Name: opDeleteDistribution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDistributionInput{} + } + + output = &DeleteDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteDistribution API operation for Amazon Lightsail. +// +// Deletes your Amazon Lightsail content delivery network (CDN) distribution. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DeleteDistribution for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DeleteDistribution +func (c *Lightsail) DeleteDistribution(input *DeleteDistributionInput) (*DeleteDistributionOutput, error) { + req, out := c.DeleteDistributionRequest(input) + return out, req.Send() +} + +// DeleteDistributionWithContext is the same as DeleteDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DeleteDistributionWithContext(ctx aws.Context, input *DeleteDistributionInput, opts ...request.Option) (*DeleteDistributionOutput, error) { + req, out := c.DeleteDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDomain = "DeleteDomain" // DeleteDomainRequest generates a "aws/request.Request" representing the @@ -4170,6 +4695,110 @@ func (c *Lightsail) DeleteRelationalDatabaseSnapshotWithContext(ctx aws.Context, return out, req.Send() } +const opDetachCertificateFromDistribution = "DetachCertificateFromDistribution" + +// DetachCertificateFromDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DetachCertificateFromDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachCertificateFromDistribution for more information on using the DetachCertificateFromDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachCertificateFromDistributionRequest method. +// req, resp := client.DetachCertificateFromDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachCertificateFromDistribution +func (c *Lightsail) DetachCertificateFromDistributionRequest(input *DetachCertificateFromDistributionInput) (req *request.Request, output *DetachCertificateFromDistributionOutput) { + op := &request.Operation{ + Name: opDetachCertificateFromDistribution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachCertificateFromDistributionInput{} + } + + output = &DetachCertificateFromDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// DetachCertificateFromDistribution API operation for Amazon Lightsail. +// +// Detaches an SSL/TLS certificate from your Amazon Lightsail content delivery +// network (CDN) distribution. +// +// After the certificate is detached, your distribution stops accepting traffic +// for all of the domains that are associated with the certificate. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation DetachCertificateFromDistribution for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/DetachCertificateFromDistribution +func (c *Lightsail) DetachCertificateFromDistribution(input *DetachCertificateFromDistributionInput) (*DetachCertificateFromDistributionOutput, error) { + req, out := c.DetachCertificateFromDistributionRequest(input) + return out, req.Send() +} + +// DetachCertificateFromDistributionWithContext is the same as DetachCertificateFromDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See DetachCertificateFromDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) DetachCertificateFromDistributionWithContext(ctx aws.Context, input *DetachCertificateFromDistributionInput, opts ...request.Option) (*DetachCertificateFromDistributionOutput, error) { + req, out := c.DetachCertificateFromDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDetachDisk = "DetachDisk" // DetachDiskRequest generates a "aws/request.Request" representing the @@ -5450,32 +6079,133 @@ func (c *Lightsail) GetBundlesWithContext(ctx aws.Context, input *GetBundlesInpu return out, req.Send() } -const opGetCloudFormationStackRecords = "GetCloudFormationStackRecords" +const opGetCertificates = "GetCertificates" -// GetCloudFormationStackRecordsRequest generates a "aws/request.Request" representing the -// client's request for the GetCloudFormationStackRecords operation. The "output" return +// GetCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the GetCertificates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetCloudFormationStackRecords for more information on using the GetCloudFormationStackRecords +// See GetCertificates for more information on using the GetCertificates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetCloudFormationStackRecordsRequest method. -// req, resp := client.GetCloudFormationStackRecordsRequest(params) +// // Example sending a request using the GetCertificatesRequest method. +// req, resp := client.GetCertificatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCloudFormationStackRecords +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCertificates +func (c *Lightsail) GetCertificatesRequest(input *GetCertificatesInput) (req *request.Request, output *GetCertificatesOutput) { + op := &request.Operation{ + Name: opGetCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCertificatesInput{} + } + + output = &GetCertificatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCertificates API operation for Amazon Lightsail. +// +// Returns information about one or more Amazon Lightsail SSL/TLS certificates. +// +// To get a summary of a certificate, ommit includeCertificateDetails from your +// request. The response will include only the certificate Amazon Resource Name +// (ARN), certificate name, domain name, and tags. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation GetCertificates for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCertificates +func (c *Lightsail) GetCertificates(input *GetCertificatesInput) (*GetCertificatesOutput, error) { + req, out := c.GetCertificatesRequest(input) + return out, req.Send() +} + +// GetCertificatesWithContext is the same as GetCertificates with the addition of +// the ability to pass a context and additional request options. +// +// See GetCertificates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) GetCertificatesWithContext(ctx aws.Context, input *GetCertificatesInput, opts ...request.Option) (*GetCertificatesOutput, error) { + req, out := c.GetCertificatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCloudFormationStackRecords = "GetCloudFormationStackRecords" + +// GetCloudFormationStackRecordsRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFormationStackRecords operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCloudFormationStackRecords for more information on using the GetCloudFormationStackRecords +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCloudFormationStackRecordsRequest method. +// req, resp := client.GetCloudFormationStackRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetCloudFormationStackRecords func (c *Lightsail) GetCloudFormationStackRecordsRequest(input *GetCloudFormationStackRecordsInput) (req *request.Request, output *GetCloudFormationStackRecordsOutput) { op := &request.Operation{ Name: opGetCloudFormationStackRecords, @@ -6083,58 +6813,62 @@ func (c *Lightsail) GetDisksWithContext(ctx aws.Context, input *GetDisksInput, o return out, req.Send() } -const opGetDomain = "GetDomain" +const opGetDistributionBundles = "GetDistributionBundles" -// GetDomainRequest generates a "aws/request.Request" representing the -// client's request for the GetDomain operation. The "output" return +// GetDistributionBundlesRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionBundles operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDomain for more information on using the GetDomain +// See GetDistributionBundles for more information on using the GetDistributionBundles // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetDomainRequest method. -// req, resp := client.GetDomainRequest(params) +// // Example sending a request using the GetDistributionBundlesRequest method. +// req, resp := client.GetDistributionBundlesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomain -func (c *Lightsail) GetDomainRequest(input *GetDomainInput) (req *request.Request, output *GetDomainOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionBundles +func (c *Lightsail) GetDistributionBundlesRequest(input *GetDistributionBundlesInput) (req *request.Request, output *GetDistributionBundlesOutput) { op := &request.Operation{ - Name: opGetDomain, + Name: opGetDistributionBundles, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetDomainInput{} + input = &GetDistributionBundlesInput{} } - output = &GetDomainOutput{} + output = &GetDistributionBundlesOutput{} req = c.newRequest(op, input, output) return } -// GetDomain API operation for Amazon Lightsail. +// GetDistributionBundles API operation for Amazon Lightsail. // -// Returns information about a specific domain recordset. +// Returns the list bundles that can be applied to you Amazon Lightsail content +// delivery network (CDN) distributions. +// +// A distribution bundle specifies the monthly network transfer quota and monthly +// cost of your dsitribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetDomain for usage and error information. +// API operation GetDistributionBundles for usage and error information. // // Returned Error Types: // * ServiceException @@ -6158,87 +6892,84 @@ func (c *Lightsail) GetDomainRequest(input *GetDomainInput) (req *request.Reques // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. -// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomain -func (c *Lightsail) GetDomain(input *GetDomainInput) (*GetDomainOutput, error) { - req, out := c.GetDomainRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionBundles +func (c *Lightsail) GetDistributionBundles(input *GetDistributionBundlesInput) (*GetDistributionBundlesOutput, error) { + req, out := c.GetDistributionBundlesRequest(input) return out, req.Send() } -// GetDomainWithContext is the same as GetDomain with the addition of +// GetDistributionBundlesWithContext is the same as GetDistributionBundles with the addition of // the ability to pass a context and additional request options. // -// See GetDomain for details on how to use this API operation. +// See GetDistributionBundles for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetDomainWithContext(ctx aws.Context, input *GetDomainInput, opts ...request.Option) (*GetDomainOutput, error) { - req, out := c.GetDomainRequest(input) +func (c *Lightsail) GetDistributionBundlesWithContext(ctx aws.Context, input *GetDistributionBundlesInput, opts ...request.Option) (*GetDistributionBundlesOutput, error) { + req, out := c.GetDistributionBundlesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDomains = "GetDomains" +const opGetDistributionLatestCacheReset = "GetDistributionLatestCacheReset" -// GetDomainsRequest generates a "aws/request.Request" representing the -// client's request for the GetDomains operation. The "output" return +// GetDistributionLatestCacheResetRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionLatestCacheReset operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDomains for more information on using the GetDomains +// See GetDistributionLatestCacheReset for more information on using the GetDistributionLatestCacheReset // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetDomainsRequest method. -// req, resp := client.GetDomainsRequest(params) +// // Example sending a request using the GetDistributionLatestCacheResetRequest method. +// req, resp := client.GetDistributionLatestCacheResetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomains -func (c *Lightsail) GetDomainsRequest(input *GetDomainsInput) (req *request.Request, output *GetDomainsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionLatestCacheReset +func (c *Lightsail) GetDistributionLatestCacheResetRequest(input *GetDistributionLatestCacheResetInput) (req *request.Request, output *GetDistributionLatestCacheResetOutput) { op := &request.Operation{ - Name: opGetDomains, + Name: opGetDistributionLatestCacheReset, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetDomainsInput{} + input = &GetDistributionLatestCacheResetInput{} } - output = &GetDomainsOutput{} + output = &GetDistributionLatestCacheResetOutput{} req = c.newRequest(op, input, output) return } -// GetDomains API operation for Amazon Lightsail. +// GetDistributionLatestCacheReset API operation for Amazon Lightsail. // -// Returns a list of all domains in the user's account. +// Returns the timestamp and status of the last cache reset of a specific Amazon +// Lightsail content delivery network (CDN) distribution. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetDomains for usage and error information. +// API operation GetDistributionLatestCacheReset for usage and error information. // // Returned Error Types: // * ServiceException @@ -6262,91 +6993,88 @@ func (c *Lightsail) GetDomainsRequest(input *GetDomainsInput) (req *request.Requ // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. -// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomains -func (c *Lightsail) GetDomains(input *GetDomainsInput) (*GetDomainsOutput, error) { - req, out := c.GetDomainsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionLatestCacheReset +func (c *Lightsail) GetDistributionLatestCacheReset(input *GetDistributionLatestCacheResetInput) (*GetDistributionLatestCacheResetOutput, error) { + req, out := c.GetDistributionLatestCacheResetRequest(input) return out, req.Send() } -// GetDomainsWithContext is the same as GetDomains with the addition of +// GetDistributionLatestCacheResetWithContext is the same as GetDistributionLatestCacheReset with the addition of // the ability to pass a context and additional request options. // -// See GetDomains for details on how to use this API operation. +// See GetDistributionLatestCacheReset for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetDomainsWithContext(ctx aws.Context, input *GetDomainsInput, opts ...request.Option) (*GetDomainsOutput, error) { - req, out := c.GetDomainsRequest(input) +func (c *Lightsail) GetDistributionLatestCacheResetWithContext(ctx aws.Context, input *GetDistributionLatestCacheResetInput, opts ...request.Option) (*GetDistributionLatestCacheResetOutput, error) { + req, out := c.GetDistributionLatestCacheResetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetExportSnapshotRecords = "GetExportSnapshotRecords" +const opGetDistributionMetricData = "GetDistributionMetricData" -// GetExportSnapshotRecordsRequest generates a "aws/request.Request" representing the -// client's request for the GetExportSnapshotRecords operation. The "output" return +// GetDistributionMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionMetricData operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetExportSnapshotRecords for more information on using the GetExportSnapshotRecords +// See GetDistributionMetricData for more information on using the GetDistributionMetricData // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetExportSnapshotRecordsRequest method. -// req, resp := client.GetExportSnapshotRecordsRequest(params) +// // Example sending a request using the GetDistributionMetricDataRequest method. +// req, resp := client.GetDistributionMetricDataRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetExportSnapshotRecords -func (c *Lightsail) GetExportSnapshotRecordsRequest(input *GetExportSnapshotRecordsInput) (req *request.Request, output *GetExportSnapshotRecordsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionMetricData +func (c *Lightsail) GetDistributionMetricDataRequest(input *GetDistributionMetricDataInput) (req *request.Request, output *GetDistributionMetricDataOutput) { op := &request.Operation{ - Name: opGetExportSnapshotRecords, + Name: opGetDistributionMetricData, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetExportSnapshotRecordsInput{} + input = &GetDistributionMetricDataInput{} } - output = &GetExportSnapshotRecordsOutput{} + output = &GetDistributionMetricDataOutput{} req = c.newRequest(op, input, output) return } -// GetExportSnapshotRecords API operation for Amazon Lightsail. +// GetDistributionMetricData API operation for Amazon Lightsail. // -// Returns the export snapshot record created as a result of the export snapshot -// operation. +// Returns the data points of a specific metric for an Amazon Lightsail content +// delivery network (CDN) distribution. // -// An export snapshot record can be used to create a new Amazon EC2 instance -// and its related resources with the create cloud formation stack operation. +// Metrics report the utilization of your resources, and the error counts generated +// by them. Monitor and collect metric data regularly to maintain the reliability, +// availability, and performance of your resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetExportSnapshotRecords for usage and error information. +// API operation GetDistributionMetricData for usage and error information. // // Returned Error Types: // * ServiceException @@ -6370,88 +7098,84 @@ func (c *Lightsail) GetExportSnapshotRecordsRequest(input *GetExportSnapshotReco // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. -// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetExportSnapshotRecords -func (c *Lightsail) GetExportSnapshotRecords(input *GetExportSnapshotRecordsInput) (*GetExportSnapshotRecordsOutput, error) { - req, out := c.GetExportSnapshotRecordsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributionMetricData +func (c *Lightsail) GetDistributionMetricData(input *GetDistributionMetricDataInput) (*GetDistributionMetricDataOutput, error) { + req, out := c.GetDistributionMetricDataRequest(input) return out, req.Send() } -// GetExportSnapshotRecordsWithContext is the same as GetExportSnapshotRecords with the addition of +// GetDistributionMetricDataWithContext is the same as GetDistributionMetricData with the addition of // the ability to pass a context and additional request options. // -// See GetExportSnapshotRecords for details on how to use this API operation. +// See GetDistributionMetricData for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetExportSnapshotRecordsWithContext(ctx aws.Context, input *GetExportSnapshotRecordsInput, opts ...request.Option) (*GetExportSnapshotRecordsOutput, error) { - req, out := c.GetExportSnapshotRecordsRequest(input) +func (c *Lightsail) GetDistributionMetricDataWithContext(ctx aws.Context, input *GetDistributionMetricDataInput, opts ...request.Option) (*GetDistributionMetricDataOutput, error) { + req, out := c.GetDistributionMetricDataRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstance = "GetInstance" +const opGetDistributions = "GetDistributions" -// GetInstanceRequest generates a "aws/request.Request" representing the -// client's request for the GetInstance operation. The "output" return +// GetDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstance for more information on using the GetInstance +// See GetDistributions for more information on using the GetDistributions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceRequest method. -// req, resp := client.GetInstanceRequest(params) +// // Example sending a request using the GetDistributionsRequest method. +// req, resp := client.GetDistributionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstance -func (c *Lightsail) GetInstanceRequest(input *GetInstanceInput) (req *request.Request, output *GetInstanceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributions +func (c *Lightsail) GetDistributionsRequest(input *GetDistributionsInput) (req *request.Request, output *GetDistributionsOutput) { op := &request.Operation{ - Name: opGetInstance, + Name: opGetDistributions, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstanceInput{} + input = &GetDistributionsInput{} } - output = &GetInstanceOutput{} + output = &GetDistributionsOutput{} req = c.newRequest(op, input, output) return } -// GetInstance API operation for Amazon Lightsail. +// GetDistributions API operation for Amazon Lightsail. // -// Returns information about a specific Amazon Lightsail instance, which is -// a virtual private server. +// Returns information about one or more of your Amazon Lightsail content delivery +// network (CDN) distributions. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstance for usage and error information. +// API operation GetDistributions for usage and error information. // // Returned Error Types: // * ServiceException @@ -6475,92 +7199,83 @@ func (c *Lightsail) GetInstanceRequest(input *GetInstanceInput) (req *request.Re // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. -// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstance -func (c *Lightsail) GetInstance(input *GetInstanceInput) (*GetInstanceOutput, error) { - req, out := c.GetInstanceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDistributions +func (c *Lightsail) GetDistributions(input *GetDistributionsInput) (*GetDistributionsOutput, error) { + req, out := c.GetDistributionsRequest(input) return out, req.Send() } -// GetInstanceWithContext is the same as GetInstance with the addition of +// GetDistributionsWithContext is the same as GetDistributions with the addition of // the ability to pass a context and additional request options. // -// See GetInstance for details on how to use this API operation. +// See GetDistributions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstanceWithContext(ctx aws.Context, input *GetInstanceInput, opts ...request.Option) (*GetInstanceOutput, error) { - req, out := c.GetInstanceRequest(input) +func (c *Lightsail) GetDistributionsWithContext(ctx aws.Context, input *GetDistributionsInput, opts ...request.Option) (*GetDistributionsOutput, error) { + req, out := c.GetDistributionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstanceAccessDetails = "GetInstanceAccessDetails" +const opGetDomain = "GetDomain" -// GetInstanceAccessDetailsRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceAccessDetails operation. The "output" return +// GetDomainRequest generates a "aws/request.Request" representing the +// client's request for the GetDomain operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstanceAccessDetails for more information on using the GetInstanceAccessDetails +// See GetDomain for more information on using the GetDomain // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceAccessDetailsRequest method. -// req, resp := client.GetInstanceAccessDetailsRequest(params) +// // Example sending a request using the GetDomainRequest method. +// req, resp := client.GetDomainRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceAccessDetails -func (c *Lightsail) GetInstanceAccessDetailsRequest(input *GetInstanceAccessDetailsInput) (req *request.Request, output *GetInstanceAccessDetailsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomain +func (c *Lightsail) GetDomainRequest(input *GetDomainInput) (req *request.Request, output *GetDomainOutput) { op := &request.Operation{ - Name: opGetInstanceAccessDetails, + Name: opGetDomain, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstanceAccessDetailsInput{} + input = &GetDomainInput{} } - output = &GetInstanceAccessDetailsOutput{} + output = &GetDomainOutput{} req = c.newRequest(op, input, output) return } -// GetInstanceAccessDetails API operation for Amazon Lightsail. -// -// Returns temporary SSH keys you can use to connect to a specific virtual private -// server, or instance. +// GetDomain API operation for Amazon Lightsail. // -// The get instance access details operation supports tag-based access control -// via resource tags applied to the resource identified by instance name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Returns information about a specific domain recordset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstanceAccessDetails for usage and error information. +// API operation GetDomain for usage and error information. // // Returned Error Types: // * ServiceException @@ -6591,81 +7306,80 @@ func (c *Lightsail) GetInstanceAccessDetailsRequest(input *GetInstanceAccessDeta // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceAccessDetails -func (c *Lightsail) GetInstanceAccessDetails(input *GetInstanceAccessDetailsInput) (*GetInstanceAccessDetailsOutput, error) { - req, out := c.GetInstanceAccessDetailsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomain +func (c *Lightsail) GetDomain(input *GetDomainInput) (*GetDomainOutput, error) { + req, out := c.GetDomainRequest(input) return out, req.Send() } -// GetInstanceAccessDetailsWithContext is the same as GetInstanceAccessDetails with the addition of +// GetDomainWithContext is the same as GetDomain with the addition of // the ability to pass a context and additional request options. // -// See GetInstanceAccessDetails for details on how to use this API operation. +// See GetDomain for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstanceAccessDetailsWithContext(ctx aws.Context, input *GetInstanceAccessDetailsInput, opts ...request.Option) (*GetInstanceAccessDetailsOutput, error) { - req, out := c.GetInstanceAccessDetailsRequest(input) +func (c *Lightsail) GetDomainWithContext(ctx aws.Context, input *GetDomainInput, opts ...request.Option) (*GetDomainOutput, error) { + req, out := c.GetDomainRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstanceMetricData = "GetInstanceMetricData" +const opGetDomains = "GetDomains" -// GetInstanceMetricDataRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceMetricData operation. The "output" return +// GetDomainsRequest generates a "aws/request.Request" representing the +// client's request for the GetDomains operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstanceMetricData for more information on using the GetInstanceMetricData +// See GetDomains for more information on using the GetDomains // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceMetricDataRequest method. -// req, resp := client.GetInstanceMetricDataRequest(params) +// // Example sending a request using the GetDomainsRequest method. +// req, resp := client.GetDomainsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceMetricData -func (c *Lightsail) GetInstanceMetricDataRequest(input *GetInstanceMetricDataInput) (req *request.Request, output *GetInstanceMetricDataOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomains +func (c *Lightsail) GetDomainsRequest(input *GetDomainsInput) (req *request.Request, output *GetDomainsOutput) { op := &request.Operation{ - Name: opGetInstanceMetricData, + Name: opGetDomains, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstanceMetricDataInput{} + input = &GetDomainsInput{} } - output = &GetInstanceMetricDataOutput{} + output = &GetDomainsOutput{} req = c.newRequest(op, input, output) return } -// GetInstanceMetricData API operation for Amazon Lightsail. +// GetDomains API operation for Amazon Lightsail. // -// Returns the data points for the specified Amazon Lightsail instance metric, -// given an instance name. +// Returns a list of all domains in the user's account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstanceMetricData for usage and error information. +// API operation GetDomains for usage and error information. // // Returned Error Types: // * ServiceException @@ -6696,80 +7410,84 @@ func (c *Lightsail) GetInstanceMetricDataRequest(input *GetInstanceMetricDataInp // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceMetricData -func (c *Lightsail) GetInstanceMetricData(input *GetInstanceMetricDataInput) (*GetInstanceMetricDataOutput, error) { - req, out := c.GetInstanceMetricDataRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetDomains +func (c *Lightsail) GetDomains(input *GetDomainsInput) (*GetDomainsOutput, error) { + req, out := c.GetDomainsRequest(input) return out, req.Send() } -// GetInstanceMetricDataWithContext is the same as GetInstanceMetricData with the addition of +// GetDomainsWithContext is the same as GetDomains with the addition of // the ability to pass a context and additional request options. // -// See GetInstanceMetricData for details on how to use this API operation. +// See GetDomains for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstanceMetricDataWithContext(ctx aws.Context, input *GetInstanceMetricDataInput, opts ...request.Option) (*GetInstanceMetricDataOutput, error) { - req, out := c.GetInstanceMetricDataRequest(input) +func (c *Lightsail) GetDomainsWithContext(ctx aws.Context, input *GetDomainsInput, opts ...request.Option) (*GetDomainsOutput, error) { + req, out := c.GetDomainsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstancePortStates = "GetInstancePortStates" +const opGetExportSnapshotRecords = "GetExportSnapshotRecords" -// GetInstancePortStatesRequest generates a "aws/request.Request" representing the -// client's request for the GetInstancePortStates operation. The "output" return +// GetExportSnapshotRecordsRequest generates a "aws/request.Request" representing the +// client's request for the GetExportSnapshotRecords operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstancePortStates for more information on using the GetInstancePortStates +// See GetExportSnapshotRecords for more information on using the GetExportSnapshotRecords // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstancePortStatesRequest method. -// req, resp := client.GetInstancePortStatesRequest(params) +// // Example sending a request using the GetExportSnapshotRecordsRequest method. +// req, resp := client.GetExportSnapshotRecordsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstancePortStates -func (c *Lightsail) GetInstancePortStatesRequest(input *GetInstancePortStatesInput) (req *request.Request, output *GetInstancePortStatesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetExportSnapshotRecords +func (c *Lightsail) GetExportSnapshotRecordsRequest(input *GetExportSnapshotRecordsInput) (req *request.Request, output *GetExportSnapshotRecordsOutput) { op := &request.Operation{ - Name: opGetInstancePortStates, + Name: opGetExportSnapshotRecords, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstancePortStatesInput{} + input = &GetExportSnapshotRecordsInput{} } - output = &GetInstancePortStatesOutput{} + output = &GetExportSnapshotRecordsOutput{} req = c.newRequest(op, input, output) return } -// GetInstancePortStates API operation for Amazon Lightsail. +// GetExportSnapshotRecords API operation for Amazon Lightsail. +// +// Returns the export snapshot record created as a result of the export snapshot +// operation. // -// Returns the port states for a specific virtual private server, or instance. +// An export snapshot record can be used to create a new Amazon EC2 instance +// and its related resources with the create cloud formation stack operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstancePortStates for usage and error information. +// API operation GetExportSnapshotRecords for usage and error information. // // Returned Error Types: // * ServiceException @@ -6800,80 +7518,81 @@ func (c *Lightsail) GetInstancePortStatesRequest(input *GetInstancePortStatesInp // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstancePortStates -func (c *Lightsail) GetInstancePortStates(input *GetInstancePortStatesInput) (*GetInstancePortStatesOutput, error) { - req, out := c.GetInstancePortStatesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetExportSnapshotRecords +func (c *Lightsail) GetExportSnapshotRecords(input *GetExportSnapshotRecordsInput) (*GetExportSnapshotRecordsOutput, error) { + req, out := c.GetExportSnapshotRecordsRequest(input) return out, req.Send() } -// GetInstancePortStatesWithContext is the same as GetInstancePortStates with the addition of +// GetExportSnapshotRecordsWithContext is the same as GetExportSnapshotRecords with the addition of // the ability to pass a context and additional request options. // -// See GetInstancePortStates for details on how to use this API operation. +// See GetExportSnapshotRecords for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstancePortStatesWithContext(ctx aws.Context, input *GetInstancePortStatesInput, opts ...request.Option) (*GetInstancePortStatesOutput, error) { - req, out := c.GetInstancePortStatesRequest(input) +func (c *Lightsail) GetExportSnapshotRecordsWithContext(ctx aws.Context, input *GetExportSnapshotRecordsInput, opts ...request.Option) (*GetExportSnapshotRecordsOutput, error) { + req, out := c.GetExportSnapshotRecordsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstanceSnapshot = "GetInstanceSnapshot" +const opGetInstance = "GetInstance" -// GetInstanceSnapshotRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceSnapshot operation. The "output" return +// GetInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetInstance operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstanceSnapshot for more information on using the GetInstanceSnapshot +// See GetInstance for more information on using the GetInstance // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceSnapshotRequest method. -// req, resp := client.GetInstanceSnapshotRequest(params) +// // Example sending a request using the GetInstanceRequest method. +// req, resp := client.GetInstanceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshot -func (c *Lightsail) GetInstanceSnapshotRequest(input *GetInstanceSnapshotInput) (req *request.Request, output *GetInstanceSnapshotOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstance +func (c *Lightsail) GetInstanceRequest(input *GetInstanceInput) (req *request.Request, output *GetInstanceOutput) { op := &request.Operation{ - Name: opGetInstanceSnapshot, + Name: opGetInstance, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstanceSnapshotInput{} + input = &GetInstanceInput{} } - output = &GetInstanceSnapshotOutput{} + output = &GetInstanceOutput{} req = c.newRequest(op, input, output) return } -// GetInstanceSnapshot API operation for Amazon Lightsail. +// GetInstance API operation for Amazon Lightsail. // -// Returns information about a specific instance snapshot. +// Returns information about a specific Amazon Lightsail instance, which is +// a virtual private server. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstanceSnapshot for usage and error information. +// API operation GetInstance for usage and error information. // // Returned Error Types: // * ServiceException @@ -6904,80 +7623,85 @@ func (c *Lightsail) GetInstanceSnapshotRequest(input *GetInstanceSnapshotInput) // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshot -func (c *Lightsail) GetInstanceSnapshot(input *GetInstanceSnapshotInput) (*GetInstanceSnapshotOutput, error) { - req, out := c.GetInstanceSnapshotRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstance +func (c *Lightsail) GetInstance(input *GetInstanceInput) (*GetInstanceOutput, error) { + req, out := c.GetInstanceRequest(input) return out, req.Send() } -// GetInstanceSnapshotWithContext is the same as GetInstanceSnapshot with the addition of +// GetInstanceWithContext is the same as GetInstance with the addition of // the ability to pass a context and additional request options. // -// See GetInstanceSnapshot for details on how to use this API operation. +// See GetInstance for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstanceSnapshotWithContext(ctx aws.Context, input *GetInstanceSnapshotInput, opts ...request.Option) (*GetInstanceSnapshotOutput, error) { - req, out := c.GetInstanceSnapshotRequest(input) +func (c *Lightsail) GetInstanceWithContext(ctx aws.Context, input *GetInstanceInput, opts ...request.Option) (*GetInstanceOutput, error) { + req, out := c.GetInstanceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstanceSnapshots = "GetInstanceSnapshots" +const opGetInstanceAccessDetails = "GetInstanceAccessDetails" -// GetInstanceSnapshotsRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceSnapshots operation. The "output" return +// GetInstanceAccessDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceAccessDetails operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstanceSnapshots for more information on using the GetInstanceSnapshots +// See GetInstanceAccessDetails for more information on using the GetInstanceAccessDetails // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceSnapshotsRequest method. -// req, resp := client.GetInstanceSnapshotsRequest(params) +// // Example sending a request using the GetInstanceAccessDetailsRequest method. +// req, resp := client.GetInstanceAccessDetailsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshots -func (c *Lightsail) GetInstanceSnapshotsRequest(input *GetInstanceSnapshotsInput) (req *request.Request, output *GetInstanceSnapshotsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceAccessDetails +func (c *Lightsail) GetInstanceAccessDetailsRequest(input *GetInstanceAccessDetailsInput) (req *request.Request, output *GetInstanceAccessDetailsOutput) { op := &request.Operation{ - Name: opGetInstanceSnapshots, + Name: opGetInstanceAccessDetails, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstanceSnapshotsInput{} + input = &GetInstanceAccessDetailsInput{} } - output = &GetInstanceSnapshotsOutput{} + output = &GetInstanceAccessDetailsOutput{} req = c.newRequest(op, input, output) return } -// GetInstanceSnapshots API operation for Amazon Lightsail. +// GetInstanceAccessDetails API operation for Amazon Lightsail. // -// Returns all instance snapshots for the user's account. +// Returns temporary SSH keys you can use to connect to a specific virtual private +// server, or instance. +// +// The get instance access details operation supports tag-based access control +// via resource tags applied to the resource identified by instance name. For +// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstanceSnapshots for usage and error information. +// API operation GetInstanceAccessDetails for usage and error information. // // Returned Error Types: // * ServiceException @@ -7008,80 +7732,85 @@ func (c *Lightsail) GetInstanceSnapshotsRequest(input *GetInstanceSnapshotsInput // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshots -func (c *Lightsail) GetInstanceSnapshots(input *GetInstanceSnapshotsInput) (*GetInstanceSnapshotsOutput, error) { - req, out := c.GetInstanceSnapshotsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceAccessDetails +func (c *Lightsail) GetInstanceAccessDetails(input *GetInstanceAccessDetailsInput) (*GetInstanceAccessDetailsOutput, error) { + req, out := c.GetInstanceAccessDetailsRequest(input) return out, req.Send() } -// GetInstanceSnapshotsWithContext is the same as GetInstanceSnapshots with the addition of +// GetInstanceAccessDetailsWithContext is the same as GetInstanceAccessDetails with the addition of // the ability to pass a context and additional request options. // -// See GetInstanceSnapshots for details on how to use this API operation. +// See GetInstanceAccessDetails for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstanceSnapshotsWithContext(ctx aws.Context, input *GetInstanceSnapshotsInput, opts ...request.Option) (*GetInstanceSnapshotsOutput, error) { - req, out := c.GetInstanceSnapshotsRequest(input) +func (c *Lightsail) GetInstanceAccessDetailsWithContext(ctx aws.Context, input *GetInstanceAccessDetailsInput, opts ...request.Option) (*GetInstanceAccessDetailsOutput, error) { + req, out := c.GetInstanceAccessDetailsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstanceState = "GetInstanceState" +const opGetInstanceMetricData = "GetInstanceMetricData" -// GetInstanceStateRequest generates a "aws/request.Request" representing the -// client's request for the GetInstanceState operation. The "output" return +// GetInstanceMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceMetricData operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstanceState for more information on using the GetInstanceState +// See GetInstanceMetricData for more information on using the GetInstanceMetricData // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstanceStateRequest method. -// req, resp := client.GetInstanceStateRequest(params) +// // Example sending a request using the GetInstanceMetricDataRequest method. +// req, resp := client.GetInstanceMetricDataRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceState -func (c *Lightsail) GetInstanceStateRequest(input *GetInstanceStateInput) (req *request.Request, output *GetInstanceStateOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceMetricData +func (c *Lightsail) GetInstanceMetricDataRequest(input *GetInstanceMetricDataInput) (req *request.Request, output *GetInstanceMetricDataOutput) { op := &request.Operation{ - Name: opGetInstanceState, + Name: opGetInstanceMetricData, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstanceStateInput{} + input = &GetInstanceMetricDataInput{} } - output = &GetInstanceStateOutput{} + output = &GetInstanceMetricDataOutput{} req = c.newRequest(op, input, output) return } -// GetInstanceState API operation for Amazon Lightsail. +// GetInstanceMetricData API operation for Amazon Lightsail. // -// Returns the state of a specific instance. Works on one instance at a time. +// Returns the data points for the specified Amazon Lightsail instance metric, +// given an instance name. +// +// Metrics report the utilization of your resources, and the error counts generated +// by them. Monitor and collect metric data regularly to maintain the reliability, +// availability, and performance of your resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstanceState for usage and error information. +// API operation GetInstanceMetricData for usage and error information. // // Returned Error Types: // * ServiceException @@ -7112,81 +7841,82 @@ func (c *Lightsail) GetInstanceStateRequest(input *GetInstanceStateInput) (req * // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceState -func (c *Lightsail) GetInstanceState(input *GetInstanceStateInput) (*GetInstanceStateOutput, error) { - req, out := c.GetInstanceStateRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceMetricData +func (c *Lightsail) GetInstanceMetricData(input *GetInstanceMetricDataInput) (*GetInstanceMetricDataOutput, error) { + req, out := c.GetInstanceMetricDataRequest(input) return out, req.Send() } -// GetInstanceStateWithContext is the same as GetInstanceState with the addition of +// GetInstanceMetricDataWithContext is the same as GetInstanceMetricData with the addition of // the ability to pass a context and additional request options. // -// See GetInstanceState for details on how to use this API operation. +// See GetInstanceMetricData for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstanceStateWithContext(ctx aws.Context, input *GetInstanceStateInput, opts ...request.Option) (*GetInstanceStateOutput, error) { - req, out := c.GetInstanceStateRequest(input) +func (c *Lightsail) GetInstanceMetricDataWithContext(ctx aws.Context, input *GetInstanceMetricDataInput, opts ...request.Option) (*GetInstanceMetricDataOutput, error) { + req, out := c.GetInstanceMetricDataRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetInstances = "GetInstances" +const opGetInstancePortStates = "GetInstancePortStates" -// GetInstancesRequest generates a "aws/request.Request" representing the -// client's request for the GetInstances operation. The "output" return +// GetInstancePortStatesRequest generates a "aws/request.Request" representing the +// client's request for the GetInstancePortStates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetInstances for more information on using the GetInstances +// See GetInstancePortStates for more information on using the GetInstancePortStates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetInstancesRequest method. -// req, resp := client.GetInstancesRequest(params) +// // Example sending a request using the GetInstancePortStatesRequest method. +// req, resp := client.GetInstancePortStatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstances -func (c *Lightsail) GetInstancesRequest(input *GetInstancesInput) (req *request.Request, output *GetInstancesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstancePortStates +func (c *Lightsail) GetInstancePortStatesRequest(input *GetInstancePortStatesInput) (req *request.Request, output *GetInstancePortStatesOutput) { op := &request.Operation{ - Name: opGetInstances, + Name: opGetInstancePortStates, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetInstancesInput{} + input = &GetInstancePortStatesInput{} } - output = &GetInstancesOutput{} + output = &GetInstancePortStatesOutput{} req = c.newRequest(op, input, output) return } -// GetInstances API operation for Amazon Lightsail. +// GetInstancePortStates API operation for Amazon Lightsail. // -// Returns information about all Amazon Lightsail virtual private servers, or -// instances. +// Returns the firewall port states for a specific Amazon Lightsail instance, +// the IP addresses allowed to connect to the instance through the ports, and +// the protocol. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetInstances for usage and error information. +// API operation GetInstancePortStates for usage and error information. // // Returned Error Types: // * ServiceException @@ -7217,80 +7947,80 @@ func (c *Lightsail) GetInstancesRequest(input *GetInstancesInput) (req *request. // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstances -func (c *Lightsail) GetInstances(input *GetInstancesInput) (*GetInstancesOutput, error) { - req, out := c.GetInstancesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstancePortStates +func (c *Lightsail) GetInstancePortStates(input *GetInstancePortStatesInput) (*GetInstancePortStatesOutput, error) { + req, out := c.GetInstancePortStatesRequest(input) return out, req.Send() } -// GetInstancesWithContext is the same as GetInstances with the addition of +// GetInstancePortStatesWithContext is the same as GetInstancePortStates with the addition of // the ability to pass a context and additional request options. // -// See GetInstances for details on how to use this API operation. +// See GetInstancePortStates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetInstancesWithContext(ctx aws.Context, input *GetInstancesInput, opts ...request.Option) (*GetInstancesOutput, error) { - req, out := c.GetInstancesRequest(input) +func (c *Lightsail) GetInstancePortStatesWithContext(ctx aws.Context, input *GetInstancePortStatesInput, opts ...request.Option) (*GetInstancePortStatesOutput, error) { + req, out := c.GetInstancePortStatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetKeyPair = "GetKeyPair" +const opGetInstanceSnapshot = "GetInstanceSnapshot" -// GetKeyPairRequest generates a "aws/request.Request" representing the -// client's request for the GetKeyPair operation. The "output" return +// GetInstanceSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceSnapshot operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetKeyPair for more information on using the GetKeyPair +// See GetInstanceSnapshot for more information on using the GetInstanceSnapshot // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetKeyPairRequest method. -// req, resp := client.GetKeyPairRequest(params) +// // Example sending a request using the GetInstanceSnapshotRequest method. +// req, resp := client.GetInstanceSnapshotRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPair -func (c *Lightsail) GetKeyPairRequest(input *GetKeyPairInput) (req *request.Request, output *GetKeyPairOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshot +func (c *Lightsail) GetInstanceSnapshotRequest(input *GetInstanceSnapshotInput) (req *request.Request, output *GetInstanceSnapshotOutput) { op := &request.Operation{ - Name: opGetKeyPair, + Name: opGetInstanceSnapshot, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetKeyPairInput{} + input = &GetInstanceSnapshotInput{} } - output = &GetKeyPairOutput{} + output = &GetInstanceSnapshotOutput{} req = c.newRequest(op, input, output) return } -// GetKeyPair API operation for Amazon Lightsail. +// GetInstanceSnapshot API operation for Amazon Lightsail. // -// Returns information about a specific key pair. +// Returns information about a specific instance snapshot. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetKeyPair for usage and error information. +// API operation GetInstanceSnapshot for usage and error information. // // Returned Error Types: // * ServiceException @@ -7321,80 +8051,80 @@ func (c *Lightsail) GetKeyPairRequest(input *GetKeyPairInput) (req *request.Requ // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPair -func (c *Lightsail) GetKeyPair(input *GetKeyPairInput) (*GetKeyPairOutput, error) { - req, out := c.GetKeyPairRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshot +func (c *Lightsail) GetInstanceSnapshot(input *GetInstanceSnapshotInput) (*GetInstanceSnapshotOutput, error) { + req, out := c.GetInstanceSnapshotRequest(input) return out, req.Send() } -// GetKeyPairWithContext is the same as GetKeyPair with the addition of +// GetInstanceSnapshotWithContext is the same as GetInstanceSnapshot with the addition of // the ability to pass a context and additional request options. // -// See GetKeyPair for details on how to use this API operation. +// See GetInstanceSnapshot for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetKeyPairWithContext(ctx aws.Context, input *GetKeyPairInput, opts ...request.Option) (*GetKeyPairOutput, error) { - req, out := c.GetKeyPairRequest(input) +func (c *Lightsail) GetInstanceSnapshotWithContext(ctx aws.Context, input *GetInstanceSnapshotInput, opts ...request.Option) (*GetInstanceSnapshotOutput, error) { + req, out := c.GetInstanceSnapshotRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetKeyPairs = "GetKeyPairs" +const opGetInstanceSnapshots = "GetInstanceSnapshots" -// GetKeyPairsRequest generates a "aws/request.Request" representing the -// client's request for the GetKeyPairs operation. The "output" return +// GetInstanceSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceSnapshots operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetKeyPairs for more information on using the GetKeyPairs +// See GetInstanceSnapshots for more information on using the GetInstanceSnapshots // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetKeyPairsRequest method. -// req, resp := client.GetKeyPairsRequest(params) +// // Example sending a request using the GetInstanceSnapshotsRequest method. +// req, resp := client.GetInstanceSnapshotsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPairs -func (c *Lightsail) GetKeyPairsRequest(input *GetKeyPairsInput) (req *request.Request, output *GetKeyPairsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshots +func (c *Lightsail) GetInstanceSnapshotsRequest(input *GetInstanceSnapshotsInput) (req *request.Request, output *GetInstanceSnapshotsOutput) { op := &request.Operation{ - Name: opGetKeyPairs, + Name: opGetInstanceSnapshots, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetKeyPairsInput{} + input = &GetInstanceSnapshotsInput{} } - output = &GetKeyPairsOutput{} + output = &GetInstanceSnapshotsOutput{} req = c.newRequest(op, input, output) return } -// GetKeyPairs API operation for Amazon Lightsail. +// GetInstanceSnapshots API operation for Amazon Lightsail. // -// Returns information about all key pairs in the user's account. +// Returns all instance snapshots for the user's account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetKeyPairs for usage and error information. +// API operation GetInstanceSnapshots for usage and error information. // // Returned Error Types: // * ServiceException @@ -7425,80 +8155,80 @@ func (c *Lightsail) GetKeyPairsRequest(input *GetKeyPairsInput) (req *request.Re // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPairs -func (c *Lightsail) GetKeyPairs(input *GetKeyPairsInput) (*GetKeyPairsOutput, error) { - req, out := c.GetKeyPairsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceSnapshots +func (c *Lightsail) GetInstanceSnapshots(input *GetInstanceSnapshotsInput) (*GetInstanceSnapshotsOutput, error) { + req, out := c.GetInstanceSnapshotsRequest(input) return out, req.Send() } -// GetKeyPairsWithContext is the same as GetKeyPairs with the addition of +// GetInstanceSnapshotsWithContext is the same as GetInstanceSnapshots with the addition of // the ability to pass a context and additional request options. // -// See GetKeyPairs for details on how to use this API operation. +// See GetInstanceSnapshots for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetKeyPairsWithContext(ctx aws.Context, input *GetKeyPairsInput, opts ...request.Option) (*GetKeyPairsOutput, error) { - req, out := c.GetKeyPairsRequest(input) +func (c *Lightsail) GetInstanceSnapshotsWithContext(ctx aws.Context, input *GetInstanceSnapshotsInput, opts ...request.Option) (*GetInstanceSnapshotsOutput, error) { + req, out := c.GetInstanceSnapshotsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetLoadBalancer = "GetLoadBalancer" +const opGetInstanceState = "GetInstanceState" -// GetLoadBalancerRequest generates a "aws/request.Request" representing the -// client's request for the GetLoadBalancer operation. The "output" return +// GetInstanceStateRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceState operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetLoadBalancer for more information on using the GetLoadBalancer +// See GetInstanceState for more information on using the GetInstanceState // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetLoadBalancerRequest method. -// req, resp := client.GetLoadBalancerRequest(params) +// // Example sending a request using the GetInstanceStateRequest method. +// req, resp := client.GetInstanceStateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancer -func (c *Lightsail) GetLoadBalancerRequest(input *GetLoadBalancerInput) (req *request.Request, output *GetLoadBalancerOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceState +func (c *Lightsail) GetInstanceStateRequest(input *GetInstanceStateInput) (req *request.Request, output *GetInstanceStateOutput) { op := &request.Operation{ - Name: opGetLoadBalancer, + Name: opGetInstanceState, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetLoadBalancerInput{} + input = &GetInstanceStateInput{} } - output = &GetLoadBalancerOutput{} + output = &GetInstanceStateOutput{} req = c.newRequest(op, input, output) return } -// GetLoadBalancer API operation for Amazon Lightsail. +// GetInstanceState API operation for Amazon Lightsail. // -// Returns information about the specified Lightsail load balancer. +// Returns the state of a specific instance. Works on one instance at a time. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetLoadBalancer for usage and error information. +// API operation GetInstanceState for usage and error information. // // Returned Error Types: // * ServiceException @@ -7529,80 +8259,81 @@ func (c *Lightsail) GetLoadBalancerRequest(input *GetLoadBalancerInput) (req *re // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancer -func (c *Lightsail) GetLoadBalancer(input *GetLoadBalancerInput) (*GetLoadBalancerOutput, error) { - req, out := c.GetLoadBalancerRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstanceState +func (c *Lightsail) GetInstanceState(input *GetInstanceStateInput) (*GetInstanceStateOutput, error) { + req, out := c.GetInstanceStateRequest(input) return out, req.Send() } -// GetLoadBalancerWithContext is the same as GetLoadBalancer with the addition of +// GetInstanceStateWithContext is the same as GetInstanceState with the addition of // the ability to pass a context and additional request options. // -// See GetLoadBalancer for details on how to use this API operation. +// See GetInstanceState for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetLoadBalancerWithContext(ctx aws.Context, input *GetLoadBalancerInput, opts ...request.Option) (*GetLoadBalancerOutput, error) { - req, out := c.GetLoadBalancerRequest(input) +func (c *Lightsail) GetInstanceStateWithContext(ctx aws.Context, input *GetInstanceStateInput, opts ...request.Option) (*GetInstanceStateOutput, error) { + req, out := c.GetInstanceStateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetLoadBalancerMetricData = "GetLoadBalancerMetricData" +const opGetInstances = "GetInstances" -// GetLoadBalancerMetricDataRequest generates a "aws/request.Request" representing the -// client's request for the GetLoadBalancerMetricData operation. The "output" return +// GetInstancesRequest generates a "aws/request.Request" representing the +// client's request for the GetInstances operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetLoadBalancerMetricData for more information on using the GetLoadBalancerMetricData +// See GetInstances for more information on using the GetInstances // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetLoadBalancerMetricDataRequest method. -// req, resp := client.GetLoadBalancerMetricDataRequest(params) +// // Example sending a request using the GetInstancesRequest method. +// req, resp := client.GetInstancesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerMetricData -func (c *Lightsail) GetLoadBalancerMetricDataRequest(input *GetLoadBalancerMetricDataInput) (req *request.Request, output *GetLoadBalancerMetricDataOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstances +func (c *Lightsail) GetInstancesRequest(input *GetInstancesInput) (req *request.Request, output *GetInstancesOutput) { op := &request.Operation{ - Name: opGetLoadBalancerMetricData, + Name: opGetInstances, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetLoadBalancerMetricDataInput{} + input = &GetInstancesInput{} } - output = &GetLoadBalancerMetricDataOutput{} + output = &GetInstancesOutput{} req = c.newRequest(op, input, output) return } -// GetLoadBalancerMetricData API operation for Amazon Lightsail. +// GetInstances API operation for Amazon Lightsail. // -// Returns information about health metrics for your Lightsail load balancer. +// Returns information about all Amazon Lightsail virtual private servers, or +// instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetLoadBalancerMetricData for usage and error information. +// API operation GetInstances for usage and error information. // // Returned Error Types: // * ServiceException @@ -7633,86 +8364,80 @@ func (c *Lightsail) GetLoadBalancerMetricDataRequest(input *GetLoadBalancerMetri // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerMetricData -func (c *Lightsail) GetLoadBalancerMetricData(input *GetLoadBalancerMetricDataInput) (*GetLoadBalancerMetricDataOutput, error) { - req, out := c.GetLoadBalancerMetricDataRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetInstances +func (c *Lightsail) GetInstances(input *GetInstancesInput) (*GetInstancesOutput, error) { + req, out := c.GetInstancesRequest(input) return out, req.Send() } -// GetLoadBalancerMetricDataWithContext is the same as GetLoadBalancerMetricData with the addition of +// GetInstancesWithContext is the same as GetInstances with the addition of // the ability to pass a context and additional request options. // -// See GetLoadBalancerMetricData for details on how to use this API operation. +// See GetInstances for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetLoadBalancerMetricDataWithContext(ctx aws.Context, input *GetLoadBalancerMetricDataInput, opts ...request.Option) (*GetLoadBalancerMetricDataOutput, error) { - req, out := c.GetLoadBalancerMetricDataRequest(input) +func (c *Lightsail) GetInstancesWithContext(ctx aws.Context, input *GetInstancesInput, opts ...request.Option) (*GetInstancesOutput, error) { + req, out := c.GetInstancesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetLoadBalancerTlsCertificates = "GetLoadBalancerTlsCertificates" +const opGetKeyPair = "GetKeyPair" -// GetLoadBalancerTlsCertificatesRequest generates a "aws/request.Request" representing the -// client's request for the GetLoadBalancerTlsCertificates operation. The "output" return +// GetKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the GetKeyPair operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetLoadBalancerTlsCertificates for more information on using the GetLoadBalancerTlsCertificates +// See GetKeyPair for more information on using the GetKeyPair // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetLoadBalancerTlsCertificatesRequest method. -// req, resp := client.GetLoadBalancerTlsCertificatesRequest(params) +// // Example sending a request using the GetKeyPairRequest method. +// req, resp := client.GetKeyPairRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerTlsCertificates -func (c *Lightsail) GetLoadBalancerTlsCertificatesRequest(input *GetLoadBalancerTlsCertificatesInput) (req *request.Request, output *GetLoadBalancerTlsCertificatesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPair +func (c *Lightsail) GetKeyPairRequest(input *GetKeyPairInput) (req *request.Request, output *GetKeyPairOutput) { op := &request.Operation{ - Name: opGetLoadBalancerTlsCertificates, + Name: opGetKeyPair, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetLoadBalancerTlsCertificatesInput{} + input = &GetKeyPairInput{} } - output = &GetLoadBalancerTlsCertificatesOutput{} + output = &GetKeyPairOutput{} req = c.newRequest(op, input, output) return } -// GetLoadBalancerTlsCertificates API operation for Amazon Lightsail. -// -// Returns information about the TLS certificates that are associated with the -// specified Lightsail load balancer. -// -// TLS is just an updated, more secure version of Secure Socket Layer (SSL). +// GetKeyPair API operation for Amazon Lightsail. // -// You can have a maximum of 2 certificates associated with a Lightsail load -// balancer. One is active and the other is inactive. +// Returns information about a specific key pair. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetLoadBalancerTlsCertificates for usage and error information. +// API operation GetKeyPair for usage and error information. // // Returned Error Types: // * ServiceException @@ -7743,80 +8468,80 @@ func (c *Lightsail) GetLoadBalancerTlsCertificatesRequest(input *GetLoadBalancer // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerTlsCertificates -func (c *Lightsail) GetLoadBalancerTlsCertificates(input *GetLoadBalancerTlsCertificatesInput) (*GetLoadBalancerTlsCertificatesOutput, error) { - req, out := c.GetLoadBalancerTlsCertificatesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPair +func (c *Lightsail) GetKeyPair(input *GetKeyPairInput) (*GetKeyPairOutput, error) { + req, out := c.GetKeyPairRequest(input) return out, req.Send() } -// GetLoadBalancerTlsCertificatesWithContext is the same as GetLoadBalancerTlsCertificates with the addition of +// GetKeyPairWithContext is the same as GetKeyPair with the addition of // the ability to pass a context and additional request options. // -// See GetLoadBalancerTlsCertificates for details on how to use this API operation. +// See GetKeyPair for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetLoadBalancerTlsCertificatesWithContext(ctx aws.Context, input *GetLoadBalancerTlsCertificatesInput, opts ...request.Option) (*GetLoadBalancerTlsCertificatesOutput, error) { - req, out := c.GetLoadBalancerTlsCertificatesRequest(input) +func (c *Lightsail) GetKeyPairWithContext(ctx aws.Context, input *GetKeyPairInput, opts ...request.Option) (*GetKeyPairOutput, error) { + req, out := c.GetKeyPairRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetLoadBalancers = "GetLoadBalancers" +const opGetKeyPairs = "GetKeyPairs" -// GetLoadBalancersRequest generates a "aws/request.Request" representing the -// client's request for the GetLoadBalancers operation. The "output" return +// GetKeyPairsRequest generates a "aws/request.Request" representing the +// client's request for the GetKeyPairs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetLoadBalancers for more information on using the GetLoadBalancers +// See GetKeyPairs for more information on using the GetKeyPairs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetLoadBalancersRequest method. -// req, resp := client.GetLoadBalancersRequest(params) +// // Example sending a request using the GetKeyPairsRequest method. +// req, resp := client.GetKeyPairsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancers -func (c *Lightsail) GetLoadBalancersRequest(input *GetLoadBalancersInput) (req *request.Request, output *GetLoadBalancersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPairs +func (c *Lightsail) GetKeyPairsRequest(input *GetKeyPairsInput) (req *request.Request, output *GetKeyPairsOutput) { op := &request.Operation{ - Name: opGetLoadBalancers, + Name: opGetKeyPairs, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetLoadBalancersInput{} + input = &GetKeyPairsInput{} } - output = &GetLoadBalancersOutput{} + output = &GetKeyPairsOutput{} req = c.newRequest(op, input, output) return } -// GetLoadBalancers API operation for Amazon Lightsail. +// GetKeyPairs API operation for Amazon Lightsail. // -// Returns information about all load balancers in an account. +// Returns information about all key pairs in the user's account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetLoadBalancers for usage and error information. +// API operation GetKeyPairs for usage and error information. // // Returned Error Types: // * ServiceException @@ -7847,82 +8572,80 @@ func (c *Lightsail) GetLoadBalancersRequest(input *GetLoadBalancersInput) (req * // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancers -func (c *Lightsail) GetLoadBalancers(input *GetLoadBalancersInput) (*GetLoadBalancersOutput, error) { - req, out := c.GetLoadBalancersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetKeyPairs +func (c *Lightsail) GetKeyPairs(input *GetKeyPairsInput) (*GetKeyPairsOutput, error) { + req, out := c.GetKeyPairsRequest(input) return out, req.Send() } -// GetLoadBalancersWithContext is the same as GetLoadBalancers with the addition of +// GetKeyPairsWithContext is the same as GetKeyPairs with the addition of // the ability to pass a context and additional request options. // -// See GetLoadBalancers for details on how to use this API operation. +// See GetKeyPairs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetLoadBalancersWithContext(ctx aws.Context, input *GetLoadBalancersInput, opts ...request.Option) (*GetLoadBalancersOutput, error) { - req, out := c.GetLoadBalancersRequest(input) +func (c *Lightsail) GetKeyPairsWithContext(ctx aws.Context, input *GetKeyPairsInput, opts ...request.Option) (*GetKeyPairsOutput, error) { + req, out := c.GetKeyPairsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetOperation = "GetOperation" +const opGetLoadBalancer = "GetLoadBalancer" -// GetOperationRequest generates a "aws/request.Request" representing the -// client's request for the GetOperation operation. The "output" return +// GetLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the GetLoadBalancer operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetOperation for more information on using the GetOperation +// See GetLoadBalancer for more information on using the GetLoadBalancer // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetOperationRequest method. -// req, resp := client.GetOperationRequest(params) +// // Example sending a request using the GetLoadBalancerRequest method. +// req, resp := client.GetLoadBalancerRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperation -func (c *Lightsail) GetOperationRequest(input *GetOperationInput) (req *request.Request, output *GetOperationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancer +func (c *Lightsail) GetLoadBalancerRequest(input *GetLoadBalancerInput) (req *request.Request, output *GetLoadBalancerOutput) { op := &request.Operation{ - Name: opGetOperation, + Name: opGetLoadBalancer, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetOperationInput{} + input = &GetLoadBalancerInput{} } - output = &GetOperationOutput{} + output = &GetLoadBalancerOutput{} req = c.newRequest(op, input, output) return } -// GetOperation API operation for Amazon Lightsail. +// GetLoadBalancer API operation for Amazon Lightsail. // -// Returns information about a specific operation. Operations include events -// such as when you create an instance, allocate a static IP, attach a static -// IP, and so on. +// Returns information about the specified Lightsail load balancer. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetOperation for usage and error information. +// API operation GetLoadBalancer for usage and error information. // // Returned Error Types: // * ServiceException @@ -7953,84 +8676,84 @@ func (c *Lightsail) GetOperationRequest(input *GetOperationInput) (req *request. // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperation -func (c *Lightsail) GetOperation(input *GetOperationInput) (*GetOperationOutput, error) { - req, out := c.GetOperationRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancer +func (c *Lightsail) GetLoadBalancer(input *GetLoadBalancerInput) (*GetLoadBalancerOutput, error) { + req, out := c.GetLoadBalancerRequest(input) return out, req.Send() } -// GetOperationWithContext is the same as GetOperation with the addition of +// GetLoadBalancerWithContext is the same as GetLoadBalancer with the addition of // the ability to pass a context and additional request options. // -// See GetOperation for details on how to use this API operation. +// See GetLoadBalancer for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetOperationWithContext(ctx aws.Context, input *GetOperationInput, opts ...request.Option) (*GetOperationOutput, error) { - req, out := c.GetOperationRequest(input) +func (c *Lightsail) GetLoadBalancerWithContext(ctx aws.Context, input *GetLoadBalancerInput, opts ...request.Option) (*GetLoadBalancerOutput, error) { + req, out := c.GetLoadBalancerRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetOperations = "GetOperations" +const opGetLoadBalancerMetricData = "GetLoadBalancerMetricData" -// GetOperationsRequest generates a "aws/request.Request" representing the -// client's request for the GetOperations operation. The "output" return +// GetLoadBalancerMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the GetLoadBalancerMetricData operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetOperations for more information on using the GetOperations +// See GetLoadBalancerMetricData for more information on using the GetLoadBalancerMetricData // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetOperationsRequest method. -// req, resp := client.GetOperationsRequest(params) +// // Example sending a request using the GetLoadBalancerMetricDataRequest method. +// req, resp := client.GetLoadBalancerMetricDataRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperations -func (c *Lightsail) GetOperationsRequest(input *GetOperationsInput) (req *request.Request, output *GetOperationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerMetricData +func (c *Lightsail) GetLoadBalancerMetricDataRequest(input *GetLoadBalancerMetricDataInput) (req *request.Request, output *GetLoadBalancerMetricDataOutput) { op := &request.Operation{ - Name: opGetOperations, + Name: opGetLoadBalancerMetricData, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetOperationsInput{} + input = &GetLoadBalancerMetricDataInput{} } - output = &GetOperationsOutput{} + output = &GetLoadBalancerMetricDataOutput{} req = c.newRequest(op, input, output) return } -// GetOperations API operation for Amazon Lightsail. +// GetLoadBalancerMetricData API operation for Amazon Lightsail. // -// Returns information about all operations. +// Returns information about health metrics for your Lightsail load balancer. // -// Results are returned from oldest to newest, up to a maximum of 200. Results -// can be paged by making each subsequent call to GetOperations use the maximum -// (last) statusChangedAt value from the previous request. +// Metrics report the utilization of your resources, and the error counts generated +// by them. Monitor and collect metric data regularly to maintain the reliability, +// availability, and performance of your resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetOperations for usage and error information. +// API operation GetLoadBalancerMetricData for usage and error information. // // Returned Error Types: // * ServiceException @@ -8061,80 +8784,86 @@ func (c *Lightsail) GetOperationsRequest(input *GetOperationsInput) (req *reques // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperations -func (c *Lightsail) GetOperations(input *GetOperationsInput) (*GetOperationsOutput, error) { - req, out := c.GetOperationsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerMetricData +func (c *Lightsail) GetLoadBalancerMetricData(input *GetLoadBalancerMetricDataInput) (*GetLoadBalancerMetricDataOutput, error) { + req, out := c.GetLoadBalancerMetricDataRequest(input) return out, req.Send() } -// GetOperationsWithContext is the same as GetOperations with the addition of +// GetLoadBalancerMetricDataWithContext is the same as GetLoadBalancerMetricData with the addition of // the ability to pass a context and additional request options. // -// See GetOperations for details on how to use this API operation. +// See GetLoadBalancerMetricData for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetOperationsWithContext(ctx aws.Context, input *GetOperationsInput, opts ...request.Option) (*GetOperationsOutput, error) { - req, out := c.GetOperationsRequest(input) +func (c *Lightsail) GetLoadBalancerMetricDataWithContext(ctx aws.Context, input *GetLoadBalancerMetricDataInput, opts ...request.Option) (*GetLoadBalancerMetricDataOutput, error) { + req, out := c.GetLoadBalancerMetricDataRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetOperationsForResource = "GetOperationsForResource" +const opGetLoadBalancerTlsCertificates = "GetLoadBalancerTlsCertificates" -// GetOperationsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the GetOperationsForResource operation. The "output" return +// GetLoadBalancerTlsCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the GetLoadBalancerTlsCertificates operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetOperationsForResource for more information on using the GetOperationsForResource +// See GetLoadBalancerTlsCertificates for more information on using the GetLoadBalancerTlsCertificates // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetOperationsForResourceRequest method. -// req, resp := client.GetOperationsForResourceRequest(params) +// // Example sending a request using the GetLoadBalancerTlsCertificatesRequest method. +// req, resp := client.GetLoadBalancerTlsCertificatesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperationsForResource -func (c *Lightsail) GetOperationsForResourceRequest(input *GetOperationsForResourceInput) (req *request.Request, output *GetOperationsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerTlsCertificates +func (c *Lightsail) GetLoadBalancerTlsCertificatesRequest(input *GetLoadBalancerTlsCertificatesInput) (req *request.Request, output *GetLoadBalancerTlsCertificatesOutput) { op := &request.Operation{ - Name: opGetOperationsForResource, + Name: opGetLoadBalancerTlsCertificates, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetOperationsForResourceInput{} + input = &GetLoadBalancerTlsCertificatesInput{} } - output = &GetOperationsForResourceOutput{} + output = &GetLoadBalancerTlsCertificatesOutput{} req = c.newRequest(op, input, output) return } -// GetOperationsForResource API operation for Amazon Lightsail. +// GetLoadBalancerTlsCertificates API operation for Amazon Lightsail. // -// Gets operations for a specific resource (e.g., an instance or a static IP). +// Returns information about the TLS certificates that are associated with the +// specified Lightsail load balancer. +// +// TLS is just an updated, more secure version of Secure Socket Layer (SSL). +// +// You can have a maximum of 2 certificates associated with a Lightsail load +// balancer. One is active and the other is inactive. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetOperationsForResource for usage and error information. +// API operation GetLoadBalancerTlsCertificates for usage and error information. // // Returned Error Types: // * ServiceException @@ -8165,81 +8894,80 @@ func (c *Lightsail) GetOperationsForResourceRequest(input *GetOperationsForResou // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperationsForResource -func (c *Lightsail) GetOperationsForResource(input *GetOperationsForResourceInput) (*GetOperationsForResourceOutput, error) { - req, out := c.GetOperationsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancerTlsCertificates +func (c *Lightsail) GetLoadBalancerTlsCertificates(input *GetLoadBalancerTlsCertificatesInput) (*GetLoadBalancerTlsCertificatesOutput, error) { + req, out := c.GetLoadBalancerTlsCertificatesRequest(input) return out, req.Send() } -// GetOperationsForResourceWithContext is the same as GetOperationsForResource with the addition of +// GetLoadBalancerTlsCertificatesWithContext is the same as GetLoadBalancerTlsCertificates with the addition of // the ability to pass a context and additional request options. // -// See GetOperationsForResource for details on how to use this API operation. +// See GetLoadBalancerTlsCertificates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetOperationsForResourceWithContext(ctx aws.Context, input *GetOperationsForResourceInput, opts ...request.Option) (*GetOperationsForResourceOutput, error) { - req, out := c.GetOperationsForResourceRequest(input) +func (c *Lightsail) GetLoadBalancerTlsCertificatesWithContext(ctx aws.Context, input *GetLoadBalancerTlsCertificatesInput, opts ...request.Option) (*GetLoadBalancerTlsCertificatesOutput, error) { + req, out := c.GetLoadBalancerTlsCertificatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRegions = "GetRegions" +const opGetLoadBalancers = "GetLoadBalancers" -// GetRegionsRequest generates a "aws/request.Request" representing the -// client's request for the GetRegions operation. The "output" return +// GetLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the GetLoadBalancers operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRegions for more information on using the GetRegions +// See GetLoadBalancers for more information on using the GetLoadBalancers // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRegionsRequest method. -// req, resp := client.GetRegionsRequest(params) +// // Example sending a request using the GetLoadBalancersRequest method. +// req, resp := client.GetLoadBalancersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRegions -func (c *Lightsail) GetRegionsRequest(input *GetRegionsInput) (req *request.Request, output *GetRegionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancers +func (c *Lightsail) GetLoadBalancersRequest(input *GetLoadBalancersInput) (req *request.Request, output *GetLoadBalancersOutput) { op := &request.Operation{ - Name: opGetRegions, + Name: opGetLoadBalancers, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRegionsInput{} + input = &GetLoadBalancersInput{} } - output = &GetRegionsOutput{} + output = &GetLoadBalancersOutput{} req = c.newRequest(op, input, output) return } -// GetRegions API operation for Amazon Lightsail. +// GetLoadBalancers API operation for Amazon Lightsail. // -// Returns a list of all valid regions for Amazon Lightsail. Use the include -// availability zones parameter to also return the Availability Zones in a region. +// Returns information about all load balancers in an account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRegions for usage and error information. +// API operation GetLoadBalancers for usage and error information. // // Returned Error Types: // * ServiceException @@ -8270,80 +8998,82 @@ func (c *Lightsail) GetRegionsRequest(input *GetRegionsInput) (req *request.Requ // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRegions -func (c *Lightsail) GetRegions(input *GetRegionsInput) (*GetRegionsOutput, error) { - req, out := c.GetRegionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetLoadBalancers +func (c *Lightsail) GetLoadBalancers(input *GetLoadBalancersInput) (*GetLoadBalancersOutput, error) { + req, out := c.GetLoadBalancersRequest(input) return out, req.Send() } -// GetRegionsWithContext is the same as GetRegions with the addition of +// GetLoadBalancersWithContext is the same as GetLoadBalancers with the addition of // the ability to pass a context and additional request options. // -// See GetRegions for details on how to use this API operation. +// See GetLoadBalancers for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRegionsWithContext(ctx aws.Context, input *GetRegionsInput, opts ...request.Option) (*GetRegionsOutput, error) { - req, out := c.GetRegionsRequest(input) +func (c *Lightsail) GetLoadBalancersWithContext(ctx aws.Context, input *GetLoadBalancersInput, opts ...request.Option) (*GetLoadBalancersOutput, error) { + req, out := c.GetLoadBalancersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabase = "GetRelationalDatabase" +const opGetOperation = "GetOperation" -// GetRelationalDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabase operation. The "output" return +// GetOperationRequest generates a "aws/request.Request" representing the +// client's request for the GetOperation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabase for more information on using the GetRelationalDatabase +// See GetOperation for more information on using the GetOperation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseRequest method. -// req, resp := client.GetRelationalDatabaseRequest(params) +// // Example sending a request using the GetOperationRequest method. +// req, resp := client.GetOperationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabase -func (c *Lightsail) GetRelationalDatabaseRequest(input *GetRelationalDatabaseInput) (req *request.Request, output *GetRelationalDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperation +func (c *Lightsail) GetOperationRequest(input *GetOperationInput) (req *request.Request, output *GetOperationOutput) { op := &request.Operation{ - Name: opGetRelationalDatabase, + Name: opGetOperation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseInput{} + input = &GetOperationInput{} } - output = &GetRelationalDatabaseOutput{} + output = &GetOperationOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabase API operation for Amazon Lightsail. +// GetOperation API operation for Amazon Lightsail. // -// Returns information about a specific database in Amazon Lightsail. +// Returns information about a specific operation. Operations include events +// such as when you create an instance, allocate a static IP, attach a static +// IP, and so on. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabase for usage and error information. +// API operation GetOperation for usage and error information. // // Returned Error Types: // * ServiceException @@ -8374,84 +9104,84 @@ func (c *Lightsail) GetRelationalDatabaseRequest(input *GetRelationalDatabaseInp // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabase -func (c *Lightsail) GetRelationalDatabase(input *GetRelationalDatabaseInput) (*GetRelationalDatabaseOutput, error) { - req, out := c.GetRelationalDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperation +func (c *Lightsail) GetOperation(input *GetOperationInput) (*GetOperationOutput, error) { + req, out := c.GetOperationRequest(input) return out, req.Send() } -// GetRelationalDatabaseWithContext is the same as GetRelationalDatabase with the addition of +// GetOperationWithContext is the same as GetOperation with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabase for details on how to use this API operation. +// See GetOperation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseWithContext(ctx aws.Context, input *GetRelationalDatabaseInput, opts ...request.Option) (*GetRelationalDatabaseOutput, error) { - req, out := c.GetRelationalDatabaseRequest(input) +func (c *Lightsail) GetOperationWithContext(ctx aws.Context, input *GetOperationInput, opts ...request.Option) (*GetOperationOutput, error) { + req, out := c.GetOperationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseBlueprints = "GetRelationalDatabaseBlueprints" +const opGetOperations = "GetOperations" -// GetRelationalDatabaseBlueprintsRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseBlueprints operation. The "output" return +// GetOperationsRequest generates a "aws/request.Request" representing the +// client's request for the GetOperations operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseBlueprints for more information on using the GetRelationalDatabaseBlueprints +// See GetOperations for more information on using the GetOperations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseBlueprintsRequest method. -// req, resp := client.GetRelationalDatabaseBlueprintsRequest(params) +// // Example sending a request using the GetOperationsRequest method. +// req, resp := client.GetOperationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBlueprints -func (c *Lightsail) GetRelationalDatabaseBlueprintsRequest(input *GetRelationalDatabaseBlueprintsInput) (req *request.Request, output *GetRelationalDatabaseBlueprintsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperations +func (c *Lightsail) GetOperationsRequest(input *GetOperationsInput) (req *request.Request, output *GetOperationsOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseBlueprints, + Name: opGetOperations, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseBlueprintsInput{} + input = &GetOperationsInput{} } - output = &GetRelationalDatabaseBlueprintsOutput{} + output = &GetOperationsOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseBlueprints API operation for Amazon Lightsail. +// GetOperations API operation for Amazon Lightsail. // -// Returns a list of available database blueprints in Amazon Lightsail. A blueprint -// describes the major engine version of a database. +// Returns information about all operations. // -// You can use a blueprint ID to create a new database that runs a specific -// database engine. +// Results are returned from oldest to newest, up to a maximum of 200. Results +// can be paged by making each subsequent call to GetOperations use the maximum +// (last) statusChangedAt value from the previous request. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseBlueprints for usage and error information. +// API operation GetOperations for usage and error information. // // Returned Error Types: // * ServiceException @@ -8482,84 +9212,80 @@ func (c *Lightsail) GetRelationalDatabaseBlueprintsRequest(input *GetRelationalD // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBlueprints -func (c *Lightsail) GetRelationalDatabaseBlueprints(input *GetRelationalDatabaseBlueprintsInput) (*GetRelationalDatabaseBlueprintsOutput, error) { - req, out := c.GetRelationalDatabaseBlueprintsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperations +func (c *Lightsail) GetOperations(input *GetOperationsInput) (*GetOperationsOutput, error) { + req, out := c.GetOperationsRequest(input) return out, req.Send() } -// GetRelationalDatabaseBlueprintsWithContext is the same as GetRelationalDatabaseBlueprints with the addition of +// GetOperationsWithContext is the same as GetOperations with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseBlueprints for details on how to use this API operation. +// See GetOperations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseBlueprintsWithContext(ctx aws.Context, input *GetRelationalDatabaseBlueprintsInput, opts ...request.Option) (*GetRelationalDatabaseBlueprintsOutput, error) { - req, out := c.GetRelationalDatabaseBlueprintsRequest(input) +func (c *Lightsail) GetOperationsWithContext(ctx aws.Context, input *GetOperationsInput, opts ...request.Option) (*GetOperationsOutput, error) { + req, out := c.GetOperationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseBundles = "GetRelationalDatabaseBundles" +const opGetOperationsForResource = "GetOperationsForResource" -// GetRelationalDatabaseBundlesRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseBundles operation. The "output" return +// GetOperationsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetOperationsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseBundles for more information on using the GetRelationalDatabaseBundles +// See GetOperationsForResource for more information on using the GetOperationsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseBundlesRequest method. -// req, resp := client.GetRelationalDatabaseBundlesRequest(params) +// // Example sending a request using the GetOperationsForResourceRequest method. +// req, resp := client.GetOperationsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBundles -func (c *Lightsail) GetRelationalDatabaseBundlesRequest(input *GetRelationalDatabaseBundlesInput) (req *request.Request, output *GetRelationalDatabaseBundlesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperationsForResource +func (c *Lightsail) GetOperationsForResourceRequest(input *GetOperationsForResourceInput) (req *request.Request, output *GetOperationsForResourceOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseBundles, + Name: opGetOperationsForResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseBundlesInput{} + input = &GetOperationsForResourceInput{} } - output = &GetRelationalDatabaseBundlesOutput{} + output = &GetOperationsForResourceOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseBundles API operation for Amazon Lightsail. -// -// Returns the list of bundles that are available in Amazon Lightsail. A bundle -// describes the performance specifications for a database. +// GetOperationsForResource API operation for Amazon Lightsail. // -// You can use a bundle ID to create a new database with explicit performance -// specifications. +// Gets operations for a specific resource (e.g., an instance or a static IP). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseBundles for usage and error information. +// API operation GetOperationsForResource for usage and error information. // // Returned Error Types: // * ServiceException @@ -8590,80 +9316,81 @@ func (c *Lightsail) GetRelationalDatabaseBundlesRequest(input *GetRelationalData // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBundles -func (c *Lightsail) GetRelationalDatabaseBundles(input *GetRelationalDatabaseBundlesInput) (*GetRelationalDatabaseBundlesOutput, error) { - req, out := c.GetRelationalDatabaseBundlesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetOperationsForResource +func (c *Lightsail) GetOperationsForResource(input *GetOperationsForResourceInput) (*GetOperationsForResourceOutput, error) { + req, out := c.GetOperationsForResourceRequest(input) return out, req.Send() } -// GetRelationalDatabaseBundlesWithContext is the same as GetRelationalDatabaseBundles with the addition of +// GetOperationsForResourceWithContext is the same as GetOperationsForResource with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseBundles for details on how to use this API operation. +// See GetOperationsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseBundlesWithContext(ctx aws.Context, input *GetRelationalDatabaseBundlesInput, opts ...request.Option) (*GetRelationalDatabaseBundlesOutput, error) { - req, out := c.GetRelationalDatabaseBundlesRequest(input) +func (c *Lightsail) GetOperationsForResourceWithContext(ctx aws.Context, input *GetOperationsForResourceInput, opts ...request.Option) (*GetOperationsForResourceOutput, error) { + req, out := c.GetOperationsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseEvents = "GetRelationalDatabaseEvents" +const opGetRegions = "GetRegions" -// GetRelationalDatabaseEventsRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseEvents operation. The "output" return +// GetRegionsRequest generates a "aws/request.Request" representing the +// client's request for the GetRegions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseEvents for more information on using the GetRelationalDatabaseEvents +// See GetRegions for more information on using the GetRegions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseEventsRequest method. -// req, resp := client.GetRelationalDatabaseEventsRequest(params) +// // Example sending a request using the GetRegionsRequest method. +// req, resp := client.GetRegionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseEvents -func (c *Lightsail) GetRelationalDatabaseEventsRequest(input *GetRelationalDatabaseEventsInput) (req *request.Request, output *GetRelationalDatabaseEventsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRegions +func (c *Lightsail) GetRegionsRequest(input *GetRegionsInput) (req *request.Request, output *GetRegionsOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseEvents, + Name: opGetRegions, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseEventsInput{} + input = &GetRegionsInput{} } - output = &GetRelationalDatabaseEventsOutput{} + output = &GetRegionsOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseEvents API operation for Amazon Lightsail. +// GetRegions API operation for Amazon Lightsail. // -// Returns a list of events for a specific database in Amazon Lightsail. +// Returns a list of all valid regions for Amazon Lightsail. Use the include +// availability zones parameter to also return the Availability Zones in a region. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseEvents for usage and error information. +// API operation GetRegions for usage and error information. // // Returned Error Types: // * ServiceException @@ -8694,80 +9421,80 @@ func (c *Lightsail) GetRelationalDatabaseEventsRequest(input *GetRelationalDatab // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseEvents -func (c *Lightsail) GetRelationalDatabaseEvents(input *GetRelationalDatabaseEventsInput) (*GetRelationalDatabaseEventsOutput, error) { - req, out := c.GetRelationalDatabaseEventsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRegions +func (c *Lightsail) GetRegions(input *GetRegionsInput) (*GetRegionsOutput, error) { + req, out := c.GetRegionsRequest(input) return out, req.Send() } -// GetRelationalDatabaseEventsWithContext is the same as GetRelationalDatabaseEvents with the addition of +// GetRegionsWithContext is the same as GetRegions with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseEvents for details on how to use this API operation. +// See GetRegions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseEventsWithContext(ctx aws.Context, input *GetRelationalDatabaseEventsInput, opts ...request.Option) (*GetRelationalDatabaseEventsOutput, error) { - req, out := c.GetRelationalDatabaseEventsRequest(input) +func (c *Lightsail) GetRegionsWithContext(ctx aws.Context, input *GetRegionsInput, opts ...request.Option) (*GetRegionsOutput, error) { + req, out := c.GetRegionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseLogEvents = "GetRelationalDatabaseLogEvents" +const opGetRelationalDatabase = "GetRelationalDatabase" -// GetRelationalDatabaseLogEventsRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseLogEvents operation. The "output" return +// GetRelationalDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabase operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseLogEvents for more information on using the GetRelationalDatabaseLogEvents +// See GetRelationalDatabase for more information on using the GetRelationalDatabase // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseLogEventsRequest method. -// req, resp := client.GetRelationalDatabaseLogEventsRequest(params) +// // Example sending a request using the GetRelationalDatabaseRequest method. +// req, resp := client.GetRelationalDatabaseRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogEvents -func (c *Lightsail) GetRelationalDatabaseLogEventsRequest(input *GetRelationalDatabaseLogEventsInput) (req *request.Request, output *GetRelationalDatabaseLogEventsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabase +func (c *Lightsail) GetRelationalDatabaseRequest(input *GetRelationalDatabaseInput) (req *request.Request, output *GetRelationalDatabaseOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseLogEvents, + Name: opGetRelationalDatabase, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseLogEventsInput{} + input = &GetRelationalDatabaseInput{} } - output = &GetRelationalDatabaseLogEventsOutput{} + output = &GetRelationalDatabaseOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseLogEvents API operation for Amazon Lightsail. +// GetRelationalDatabase API operation for Amazon Lightsail. // -// Returns a list of log events for a database in Amazon Lightsail. +// Returns information about a specific database in Amazon Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseLogEvents for usage and error information. +// API operation GetRelationalDatabase for usage and error information. // // Returned Error Types: // * ServiceException @@ -8798,81 +9525,84 @@ func (c *Lightsail) GetRelationalDatabaseLogEventsRequest(input *GetRelationalDa // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogEvents -func (c *Lightsail) GetRelationalDatabaseLogEvents(input *GetRelationalDatabaseLogEventsInput) (*GetRelationalDatabaseLogEventsOutput, error) { - req, out := c.GetRelationalDatabaseLogEventsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabase +func (c *Lightsail) GetRelationalDatabase(input *GetRelationalDatabaseInput) (*GetRelationalDatabaseOutput, error) { + req, out := c.GetRelationalDatabaseRequest(input) return out, req.Send() } -// GetRelationalDatabaseLogEventsWithContext is the same as GetRelationalDatabaseLogEvents with the addition of +// GetRelationalDatabaseWithContext is the same as GetRelationalDatabase with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseLogEvents for details on how to use this API operation. +// See GetRelationalDatabase for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseLogEventsWithContext(ctx aws.Context, input *GetRelationalDatabaseLogEventsInput, opts ...request.Option) (*GetRelationalDatabaseLogEventsOutput, error) { - req, out := c.GetRelationalDatabaseLogEventsRequest(input) +func (c *Lightsail) GetRelationalDatabaseWithContext(ctx aws.Context, input *GetRelationalDatabaseInput, opts ...request.Option) (*GetRelationalDatabaseOutput, error) { + req, out := c.GetRelationalDatabaseRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseLogStreams = "GetRelationalDatabaseLogStreams" +const opGetRelationalDatabaseBlueprints = "GetRelationalDatabaseBlueprints" -// GetRelationalDatabaseLogStreamsRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseLogStreams operation. The "output" return +// GetRelationalDatabaseBlueprintsRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseBlueprints operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseLogStreams for more information on using the GetRelationalDatabaseLogStreams +// See GetRelationalDatabaseBlueprints for more information on using the GetRelationalDatabaseBlueprints // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseLogStreamsRequest method. -// req, resp := client.GetRelationalDatabaseLogStreamsRequest(params) +// // Example sending a request using the GetRelationalDatabaseBlueprintsRequest method. +// req, resp := client.GetRelationalDatabaseBlueprintsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogStreams -func (c *Lightsail) GetRelationalDatabaseLogStreamsRequest(input *GetRelationalDatabaseLogStreamsInput) (req *request.Request, output *GetRelationalDatabaseLogStreamsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBlueprints +func (c *Lightsail) GetRelationalDatabaseBlueprintsRequest(input *GetRelationalDatabaseBlueprintsInput) (req *request.Request, output *GetRelationalDatabaseBlueprintsOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseLogStreams, + Name: opGetRelationalDatabaseBlueprints, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseLogStreamsInput{} + input = &GetRelationalDatabaseBlueprintsInput{} } - output = &GetRelationalDatabaseLogStreamsOutput{} + output = &GetRelationalDatabaseBlueprintsOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseLogStreams API operation for Amazon Lightsail. +// GetRelationalDatabaseBlueprints API operation for Amazon Lightsail. // -// Returns a list of available log streams for a specific database in Amazon -// Lightsail. +// Returns a list of available database blueprints in Amazon Lightsail. A blueprint +// describes the major engine version of a database. +// +// You can use a blueprint ID to create a new database that runs a specific +// database engine. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseLogStreams for usage and error information. +// API operation GetRelationalDatabaseBlueprints for usage and error information. // // Returned Error Types: // * ServiceException @@ -8903,84 +9633,84 @@ func (c *Lightsail) GetRelationalDatabaseLogStreamsRequest(input *GetRelationalD // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogStreams -func (c *Lightsail) GetRelationalDatabaseLogStreams(input *GetRelationalDatabaseLogStreamsInput) (*GetRelationalDatabaseLogStreamsOutput, error) { - req, out := c.GetRelationalDatabaseLogStreamsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBlueprints +func (c *Lightsail) GetRelationalDatabaseBlueprints(input *GetRelationalDatabaseBlueprintsInput) (*GetRelationalDatabaseBlueprintsOutput, error) { + req, out := c.GetRelationalDatabaseBlueprintsRequest(input) return out, req.Send() } -// GetRelationalDatabaseLogStreamsWithContext is the same as GetRelationalDatabaseLogStreams with the addition of +// GetRelationalDatabaseBlueprintsWithContext is the same as GetRelationalDatabaseBlueprints with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseLogStreams for details on how to use this API operation. +// See GetRelationalDatabaseBlueprints for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseLogStreamsWithContext(ctx aws.Context, input *GetRelationalDatabaseLogStreamsInput, opts ...request.Option) (*GetRelationalDatabaseLogStreamsOutput, error) { - req, out := c.GetRelationalDatabaseLogStreamsRequest(input) +func (c *Lightsail) GetRelationalDatabaseBlueprintsWithContext(ctx aws.Context, input *GetRelationalDatabaseBlueprintsInput, opts ...request.Option) (*GetRelationalDatabaseBlueprintsOutput, error) { + req, out := c.GetRelationalDatabaseBlueprintsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseMasterUserPassword = "GetRelationalDatabaseMasterUserPassword" +const opGetRelationalDatabaseBundles = "GetRelationalDatabaseBundles" -// GetRelationalDatabaseMasterUserPasswordRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseMasterUserPassword operation. The "output" return +// GetRelationalDatabaseBundlesRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseBundles operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseMasterUserPassword for more information on using the GetRelationalDatabaseMasterUserPassword +// See GetRelationalDatabaseBundles for more information on using the GetRelationalDatabaseBundles // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseMasterUserPasswordRequest method. -// req, resp := client.GetRelationalDatabaseMasterUserPasswordRequest(params) +// // Example sending a request using the GetRelationalDatabaseBundlesRequest method. +// req, resp := client.GetRelationalDatabaseBundlesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMasterUserPassword -func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordRequest(input *GetRelationalDatabaseMasterUserPasswordInput) (req *request.Request, output *GetRelationalDatabaseMasterUserPasswordOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBundles +func (c *Lightsail) GetRelationalDatabaseBundlesRequest(input *GetRelationalDatabaseBundlesInput) (req *request.Request, output *GetRelationalDatabaseBundlesOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseMasterUserPassword, + Name: opGetRelationalDatabaseBundles, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseMasterUserPasswordInput{} + input = &GetRelationalDatabaseBundlesInput{} } - output = &GetRelationalDatabaseMasterUserPasswordOutput{} + output = &GetRelationalDatabaseBundlesOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseMasterUserPassword API operation for Amazon Lightsail. +// GetRelationalDatabaseBundles API operation for Amazon Lightsail. // -// Returns the current, previous, or pending versions of the master user password -// for a Lightsail database. +// Returns the list of bundles that are available in Amazon Lightsail. A bundle +// describes the performance specifications for a database. // -// The GetRelationalDatabaseMasterUserPassword operation supports tag-based -// access control via resource tags applied to the resource identified by relationalDatabaseName. +// You can use a bundle ID to create a new database with explicit performance +// specifications. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseMasterUserPassword for usage and error information. +// API operation GetRelationalDatabaseBundles for usage and error information. // // Returned Error Types: // * ServiceException @@ -9011,81 +9741,80 @@ func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordRequest(input *GetRel // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMasterUserPassword -func (c *Lightsail) GetRelationalDatabaseMasterUserPassword(input *GetRelationalDatabaseMasterUserPasswordInput) (*GetRelationalDatabaseMasterUserPasswordOutput, error) { - req, out := c.GetRelationalDatabaseMasterUserPasswordRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseBundles +func (c *Lightsail) GetRelationalDatabaseBundles(input *GetRelationalDatabaseBundlesInput) (*GetRelationalDatabaseBundlesOutput, error) { + req, out := c.GetRelationalDatabaseBundlesRequest(input) return out, req.Send() } -// GetRelationalDatabaseMasterUserPasswordWithContext is the same as GetRelationalDatabaseMasterUserPassword with the addition of +// GetRelationalDatabaseBundlesWithContext is the same as GetRelationalDatabaseBundles with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseMasterUserPassword for details on how to use this API operation. +// See GetRelationalDatabaseBundles for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordWithContext(ctx aws.Context, input *GetRelationalDatabaseMasterUserPasswordInput, opts ...request.Option) (*GetRelationalDatabaseMasterUserPasswordOutput, error) { - req, out := c.GetRelationalDatabaseMasterUserPasswordRequest(input) +func (c *Lightsail) GetRelationalDatabaseBundlesWithContext(ctx aws.Context, input *GetRelationalDatabaseBundlesInput, opts ...request.Option) (*GetRelationalDatabaseBundlesOutput, error) { + req, out := c.GetRelationalDatabaseBundlesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseMetricData = "GetRelationalDatabaseMetricData" +const opGetRelationalDatabaseEvents = "GetRelationalDatabaseEvents" -// GetRelationalDatabaseMetricDataRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseMetricData operation. The "output" return +// GetRelationalDatabaseEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseEvents operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseMetricData for more information on using the GetRelationalDatabaseMetricData +// See GetRelationalDatabaseEvents for more information on using the GetRelationalDatabaseEvents // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseMetricDataRequest method. -// req, resp := client.GetRelationalDatabaseMetricDataRequest(params) +// // Example sending a request using the GetRelationalDatabaseEventsRequest method. +// req, resp := client.GetRelationalDatabaseEventsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMetricData -func (c *Lightsail) GetRelationalDatabaseMetricDataRequest(input *GetRelationalDatabaseMetricDataInput) (req *request.Request, output *GetRelationalDatabaseMetricDataOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseEvents +func (c *Lightsail) GetRelationalDatabaseEventsRequest(input *GetRelationalDatabaseEventsInput) (req *request.Request, output *GetRelationalDatabaseEventsOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseMetricData, + Name: opGetRelationalDatabaseEvents, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseMetricDataInput{} + input = &GetRelationalDatabaseEventsInput{} } - output = &GetRelationalDatabaseMetricDataOutput{} + output = &GetRelationalDatabaseEventsOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseMetricData API operation for Amazon Lightsail. +// GetRelationalDatabaseEvents API operation for Amazon Lightsail. // -// Returns the data points of the specified metric for a database in Amazon -// Lightsail. +// Returns a list of events for a specific database in Amazon Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseMetricData for usage and error information. +// API operation GetRelationalDatabaseEvents for usage and error information. // // Returned Error Types: // * ServiceException @@ -9116,86 +9845,80 @@ func (c *Lightsail) GetRelationalDatabaseMetricDataRequest(input *GetRelationalD // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMetricData -func (c *Lightsail) GetRelationalDatabaseMetricData(input *GetRelationalDatabaseMetricDataInput) (*GetRelationalDatabaseMetricDataOutput, error) { - req, out := c.GetRelationalDatabaseMetricDataRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseEvents +func (c *Lightsail) GetRelationalDatabaseEvents(input *GetRelationalDatabaseEventsInput) (*GetRelationalDatabaseEventsOutput, error) { + req, out := c.GetRelationalDatabaseEventsRequest(input) return out, req.Send() } -// GetRelationalDatabaseMetricDataWithContext is the same as GetRelationalDatabaseMetricData with the addition of +// GetRelationalDatabaseEventsWithContext is the same as GetRelationalDatabaseEvents with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseMetricData for details on how to use this API operation. +// See GetRelationalDatabaseEvents for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseMetricDataWithContext(ctx aws.Context, input *GetRelationalDatabaseMetricDataInput, opts ...request.Option) (*GetRelationalDatabaseMetricDataOutput, error) { - req, out := c.GetRelationalDatabaseMetricDataRequest(input) +func (c *Lightsail) GetRelationalDatabaseEventsWithContext(ctx aws.Context, input *GetRelationalDatabaseEventsInput, opts ...request.Option) (*GetRelationalDatabaseEventsOutput, error) { + req, out := c.GetRelationalDatabaseEventsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseParameters = "GetRelationalDatabaseParameters" +const opGetRelationalDatabaseLogEvents = "GetRelationalDatabaseLogEvents" -// GetRelationalDatabaseParametersRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseParameters operation. The "output" return +// GetRelationalDatabaseLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseLogEvents operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseParameters for more information on using the GetRelationalDatabaseParameters +// See GetRelationalDatabaseLogEvents for more information on using the GetRelationalDatabaseLogEvents // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseParametersRequest method. -// req, resp := client.GetRelationalDatabaseParametersRequest(params) +// // Example sending a request using the GetRelationalDatabaseLogEventsRequest method. +// req, resp := client.GetRelationalDatabaseLogEventsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseParameters -func (c *Lightsail) GetRelationalDatabaseParametersRequest(input *GetRelationalDatabaseParametersInput) (req *request.Request, output *GetRelationalDatabaseParametersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogEvents +func (c *Lightsail) GetRelationalDatabaseLogEventsRequest(input *GetRelationalDatabaseLogEventsInput) (req *request.Request, output *GetRelationalDatabaseLogEventsOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseParameters, + Name: opGetRelationalDatabaseLogEvents, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseParametersInput{} + input = &GetRelationalDatabaseLogEventsInput{} } - output = &GetRelationalDatabaseParametersOutput{} + output = &GetRelationalDatabaseLogEventsOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseParameters API operation for Amazon Lightsail. -// -// Returns all of the runtime parameters offered by the underlying database -// software, or engine, for a specific database in Amazon Lightsail. +// GetRelationalDatabaseLogEvents API operation for Amazon Lightsail. // -// In addition to the parameter names and values, this operation returns other -// information about each parameter. This information includes whether changes -// require a reboot, whether the parameter is modifiable, the allowed values, -// and the data types. +// Returns a list of log events for a database in Amazon Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseParameters for usage and error information. +// API operation GetRelationalDatabaseLogEvents for usage and error information. // // Returned Error Types: // * ServiceException @@ -9226,80 +9949,81 @@ func (c *Lightsail) GetRelationalDatabaseParametersRequest(input *GetRelationalD // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseParameters -func (c *Lightsail) GetRelationalDatabaseParameters(input *GetRelationalDatabaseParametersInput) (*GetRelationalDatabaseParametersOutput, error) { - req, out := c.GetRelationalDatabaseParametersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogEvents +func (c *Lightsail) GetRelationalDatabaseLogEvents(input *GetRelationalDatabaseLogEventsInput) (*GetRelationalDatabaseLogEventsOutput, error) { + req, out := c.GetRelationalDatabaseLogEventsRequest(input) return out, req.Send() } -// GetRelationalDatabaseParametersWithContext is the same as GetRelationalDatabaseParameters with the addition of +// GetRelationalDatabaseLogEventsWithContext is the same as GetRelationalDatabaseLogEvents with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseParameters for details on how to use this API operation. +// See GetRelationalDatabaseLogEvents for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseParametersWithContext(ctx aws.Context, input *GetRelationalDatabaseParametersInput, opts ...request.Option) (*GetRelationalDatabaseParametersOutput, error) { - req, out := c.GetRelationalDatabaseParametersRequest(input) +func (c *Lightsail) GetRelationalDatabaseLogEventsWithContext(ctx aws.Context, input *GetRelationalDatabaseLogEventsInput, opts ...request.Option) (*GetRelationalDatabaseLogEventsOutput, error) { + req, out := c.GetRelationalDatabaseLogEventsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseSnapshot = "GetRelationalDatabaseSnapshot" +const opGetRelationalDatabaseLogStreams = "GetRelationalDatabaseLogStreams" -// GetRelationalDatabaseSnapshotRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseSnapshot operation. The "output" return +// GetRelationalDatabaseLogStreamsRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseLogStreams operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseSnapshot for more information on using the GetRelationalDatabaseSnapshot +// See GetRelationalDatabaseLogStreams for more information on using the GetRelationalDatabaseLogStreams // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseSnapshotRequest method. -// req, resp := client.GetRelationalDatabaseSnapshotRequest(params) +// // Example sending a request using the GetRelationalDatabaseLogStreamsRequest method. +// req, resp := client.GetRelationalDatabaseLogStreamsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshot -func (c *Lightsail) GetRelationalDatabaseSnapshotRequest(input *GetRelationalDatabaseSnapshotInput) (req *request.Request, output *GetRelationalDatabaseSnapshotOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogStreams +func (c *Lightsail) GetRelationalDatabaseLogStreamsRequest(input *GetRelationalDatabaseLogStreamsInput) (req *request.Request, output *GetRelationalDatabaseLogStreamsOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseSnapshot, + Name: opGetRelationalDatabaseLogStreams, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseSnapshotInput{} + input = &GetRelationalDatabaseLogStreamsInput{} } - output = &GetRelationalDatabaseSnapshotOutput{} + output = &GetRelationalDatabaseLogStreamsOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseSnapshot API operation for Amazon Lightsail. +// GetRelationalDatabaseLogStreams API operation for Amazon Lightsail. // -// Returns information about a specific database snapshot in Amazon Lightsail. +// Returns a list of available log streams for a specific database in Amazon +// Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseSnapshot for usage and error information. +// API operation GetRelationalDatabaseLogStreams for usage and error information. // // Returned Error Types: // * ServiceException @@ -9330,80 +10054,84 @@ func (c *Lightsail) GetRelationalDatabaseSnapshotRequest(input *GetRelationalDat // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshot -func (c *Lightsail) GetRelationalDatabaseSnapshot(input *GetRelationalDatabaseSnapshotInput) (*GetRelationalDatabaseSnapshotOutput, error) { - req, out := c.GetRelationalDatabaseSnapshotRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseLogStreams +func (c *Lightsail) GetRelationalDatabaseLogStreams(input *GetRelationalDatabaseLogStreamsInput) (*GetRelationalDatabaseLogStreamsOutput, error) { + req, out := c.GetRelationalDatabaseLogStreamsRequest(input) return out, req.Send() } -// GetRelationalDatabaseSnapshotWithContext is the same as GetRelationalDatabaseSnapshot with the addition of +// GetRelationalDatabaseLogStreamsWithContext is the same as GetRelationalDatabaseLogStreams with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseSnapshot for details on how to use this API operation. +// See GetRelationalDatabaseLogStreams for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseSnapshotWithContext(ctx aws.Context, input *GetRelationalDatabaseSnapshotInput, opts ...request.Option) (*GetRelationalDatabaseSnapshotOutput, error) { - req, out := c.GetRelationalDatabaseSnapshotRequest(input) +func (c *Lightsail) GetRelationalDatabaseLogStreamsWithContext(ctx aws.Context, input *GetRelationalDatabaseLogStreamsInput, opts ...request.Option) (*GetRelationalDatabaseLogStreamsOutput, error) { + req, out := c.GetRelationalDatabaseLogStreamsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabaseSnapshots = "GetRelationalDatabaseSnapshots" +const opGetRelationalDatabaseMasterUserPassword = "GetRelationalDatabaseMasterUserPassword" -// GetRelationalDatabaseSnapshotsRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabaseSnapshots operation. The "output" return +// GetRelationalDatabaseMasterUserPasswordRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseMasterUserPassword operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabaseSnapshots for more information on using the GetRelationalDatabaseSnapshots +// See GetRelationalDatabaseMasterUserPassword for more information on using the GetRelationalDatabaseMasterUserPassword // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabaseSnapshotsRequest method. -// req, resp := client.GetRelationalDatabaseSnapshotsRequest(params) +// // Example sending a request using the GetRelationalDatabaseMasterUserPasswordRequest method. +// req, resp := client.GetRelationalDatabaseMasterUserPasswordRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshots -func (c *Lightsail) GetRelationalDatabaseSnapshotsRequest(input *GetRelationalDatabaseSnapshotsInput) (req *request.Request, output *GetRelationalDatabaseSnapshotsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMasterUserPassword +func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordRequest(input *GetRelationalDatabaseMasterUserPasswordInput) (req *request.Request, output *GetRelationalDatabaseMasterUserPasswordOutput) { op := &request.Operation{ - Name: opGetRelationalDatabaseSnapshots, + Name: opGetRelationalDatabaseMasterUserPassword, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabaseSnapshotsInput{} + input = &GetRelationalDatabaseMasterUserPasswordInput{} } - output = &GetRelationalDatabaseSnapshotsOutput{} + output = &GetRelationalDatabaseMasterUserPasswordOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabaseSnapshots API operation for Amazon Lightsail. +// GetRelationalDatabaseMasterUserPassword API operation for Amazon Lightsail. // -// Returns information about all of your database snapshots in Amazon Lightsail. +// Returns the current, previous, or pending versions of the master user password +// for a Lightsail database. +// +// The GetRelationalDatabaseMasterUserPassword operation supports tag-based +// access control via resource tags applied to the resource identified by relationalDatabaseName. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabaseSnapshots for usage and error information. +// API operation GetRelationalDatabaseMasterUserPassword for usage and error information. // // Returned Error Types: // * ServiceException @@ -9434,80 +10162,85 @@ func (c *Lightsail) GetRelationalDatabaseSnapshotsRequest(input *GetRelationalDa // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshots -func (c *Lightsail) GetRelationalDatabaseSnapshots(input *GetRelationalDatabaseSnapshotsInput) (*GetRelationalDatabaseSnapshotsOutput, error) { - req, out := c.GetRelationalDatabaseSnapshotsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMasterUserPassword +func (c *Lightsail) GetRelationalDatabaseMasterUserPassword(input *GetRelationalDatabaseMasterUserPasswordInput) (*GetRelationalDatabaseMasterUserPasswordOutput, error) { + req, out := c.GetRelationalDatabaseMasterUserPasswordRequest(input) return out, req.Send() } -// GetRelationalDatabaseSnapshotsWithContext is the same as GetRelationalDatabaseSnapshots with the addition of +// GetRelationalDatabaseMasterUserPasswordWithContext is the same as GetRelationalDatabaseMasterUserPassword with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabaseSnapshots for details on how to use this API operation. +// See GetRelationalDatabaseMasterUserPassword for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabaseSnapshotsWithContext(ctx aws.Context, input *GetRelationalDatabaseSnapshotsInput, opts ...request.Option) (*GetRelationalDatabaseSnapshotsOutput, error) { - req, out := c.GetRelationalDatabaseSnapshotsRequest(input) +func (c *Lightsail) GetRelationalDatabaseMasterUserPasswordWithContext(ctx aws.Context, input *GetRelationalDatabaseMasterUserPasswordInput, opts ...request.Option) (*GetRelationalDatabaseMasterUserPasswordOutput, error) { + req, out := c.GetRelationalDatabaseMasterUserPasswordRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRelationalDatabases = "GetRelationalDatabases" +const opGetRelationalDatabaseMetricData = "GetRelationalDatabaseMetricData" -// GetRelationalDatabasesRequest generates a "aws/request.Request" representing the -// client's request for the GetRelationalDatabases operation. The "output" return +// GetRelationalDatabaseMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseMetricData operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRelationalDatabases for more information on using the GetRelationalDatabases +// See GetRelationalDatabaseMetricData for more information on using the GetRelationalDatabaseMetricData // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRelationalDatabasesRequest method. -// req, resp := client.GetRelationalDatabasesRequest(params) +// // Example sending a request using the GetRelationalDatabaseMetricDataRequest method. +// req, resp := client.GetRelationalDatabaseMetricDataRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabases -func (c *Lightsail) GetRelationalDatabasesRequest(input *GetRelationalDatabasesInput) (req *request.Request, output *GetRelationalDatabasesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMetricData +func (c *Lightsail) GetRelationalDatabaseMetricDataRequest(input *GetRelationalDatabaseMetricDataInput) (req *request.Request, output *GetRelationalDatabaseMetricDataOutput) { op := &request.Operation{ - Name: opGetRelationalDatabases, + Name: opGetRelationalDatabaseMetricData, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRelationalDatabasesInput{} + input = &GetRelationalDatabaseMetricDataInput{} } - output = &GetRelationalDatabasesOutput{} + output = &GetRelationalDatabaseMetricDataOutput{} req = c.newRequest(op, input, output) return } -// GetRelationalDatabases API operation for Amazon Lightsail. +// GetRelationalDatabaseMetricData API operation for Amazon Lightsail. // -// Returns information about all of your databases in Amazon Lightsail. +// Returns the data points of the specified metric for a database in Amazon +// Lightsail. +// +// Metrics report the utilization of your resources, and the error counts generated +// by them. Monitor and collect metric data regularly to maintain the reliability, +// availability, and performance of your resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetRelationalDatabases for usage and error information. +// API operation GetRelationalDatabaseMetricData for usage and error information. // // Returned Error Types: // * ServiceException @@ -9538,80 +10271,86 @@ func (c *Lightsail) GetRelationalDatabasesRequest(input *GetRelationalDatabasesI // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabases -func (c *Lightsail) GetRelationalDatabases(input *GetRelationalDatabasesInput) (*GetRelationalDatabasesOutput, error) { - req, out := c.GetRelationalDatabasesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseMetricData +func (c *Lightsail) GetRelationalDatabaseMetricData(input *GetRelationalDatabaseMetricDataInput) (*GetRelationalDatabaseMetricDataOutput, error) { + req, out := c.GetRelationalDatabaseMetricDataRequest(input) return out, req.Send() } -// GetRelationalDatabasesWithContext is the same as GetRelationalDatabases with the addition of +// GetRelationalDatabaseMetricDataWithContext is the same as GetRelationalDatabaseMetricData with the addition of // the ability to pass a context and additional request options. // -// See GetRelationalDatabases for details on how to use this API operation. +// See GetRelationalDatabaseMetricData for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetRelationalDatabasesWithContext(ctx aws.Context, input *GetRelationalDatabasesInput, opts ...request.Option) (*GetRelationalDatabasesOutput, error) { - req, out := c.GetRelationalDatabasesRequest(input) +func (c *Lightsail) GetRelationalDatabaseMetricDataWithContext(ctx aws.Context, input *GetRelationalDatabaseMetricDataInput, opts ...request.Option) (*GetRelationalDatabaseMetricDataOutput, error) { + req, out := c.GetRelationalDatabaseMetricDataRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetStaticIp = "GetStaticIp" +const opGetRelationalDatabaseParameters = "GetRelationalDatabaseParameters" -// GetStaticIpRequest generates a "aws/request.Request" representing the -// client's request for the GetStaticIp operation. The "output" return +// GetRelationalDatabaseParametersRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseParameters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetStaticIp for more information on using the GetStaticIp +// See GetRelationalDatabaseParameters for more information on using the GetRelationalDatabaseParameters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetStaticIpRequest method. -// req, resp := client.GetStaticIpRequest(params) +// // Example sending a request using the GetRelationalDatabaseParametersRequest method. +// req, resp := client.GetRelationalDatabaseParametersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIp -func (c *Lightsail) GetStaticIpRequest(input *GetStaticIpInput) (req *request.Request, output *GetStaticIpOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseParameters +func (c *Lightsail) GetRelationalDatabaseParametersRequest(input *GetRelationalDatabaseParametersInput) (req *request.Request, output *GetRelationalDatabaseParametersOutput) { op := &request.Operation{ - Name: opGetStaticIp, + Name: opGetRelationalDatabaseParameters, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetStaticIpInput{} + input = &GetRelationalDatabaseParametersInput{} } - output = &GetStaticIpOutput{} + output = &GetRelationalDatabaseParametersOutput{} req = c.newRequest(op, input, output) return } -// GetStaticIp API operation for Amazon Lightsail. +// GetRelationalDatabaseParameters API operation for Amazon Lightsail. // -// Returns information about a specific static IP. +// Returns all of the runtime parameters offered by the underlying database +// software, or engine, for a specific database in Amazon Lightsail. +// +// In addition to the parameter names and values, this operation returns other +// information about each parameter. This information includes whether changes +// require a reboot, whether the parameter is modifiable, the allowed values, +// and the data types. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetStaticIp for usage and error information. +// API operation GetRelationalDatabaseParameters for usage and error information. // // Returned Error Types: // * ServiceException @@ -9642,80 +10381,80 @@ func (c *Lightsail) GetStaticIpRequest(input *GetStaticIpInput) (req *request.Re // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIp -func (c *Lightsail) GetStaticIp(input *GetStaticIpInput) (*GetStaticIpOutput, error) { - req, out := c.GetStaticIpRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseParameters +func (c *Lightsail) GetRelationalDatabaseParameters(input *GetRelationalDatabaseParametersInput) (*GetRelationalDatabaseParametersOutput, error) { + req, out := c.GetRelationalDatabaseParametersRequest(input) return out, req.Send() } -// GetStaticIpWithContext is the same as GetStaticIp with the addition of +// GetRelationalDatabaseParametersWithContext is the same as GetRelationalDatabaseParameters with the addition of // the ability to pass a context and additional request options. // -// See GetStaticIp for details on how to use this API operation. +// See GetRelationalDatabaseParameters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetStaticIpWithContext(ctx aws.Context, input *GetStaticIpInput, opts ...request.Option) (*GetStaticIpOutput, error) { - req, out := c.GetStaticIpRequest(input) +func (c *Lightsail) GetRelationalDatabaseParametersWithContext(ctx aws.Context, input *GetRelationalDatabaseParametersInput, opts ...request.Option) (*GetRelationalDatabaseParametersOutput, error) { + req, out := c.GetRelationalDatabaseParametersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetStaticIps = "GetStaticIps" +const opGetRelationalDatabaseSnapshot = "GetRelationalDatabaseSnapshot" -// GetStaticIpsRequest generates a "aws/request.Request" representing the -// client's request for the GetStaticIps operation. The "output" return +// GetRelationalDatabaseSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseSnapshot operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetStaticIps for more information on using the GetStaticIps +// See GetRelationalDatabaseSnapshot for more information on using the GetRelationalDatabaseSnapshot // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetStaticIpsRequest method. -// req, resp := client.GetStaticIpsRequest(params) +// // Example sending a request using the GetRelationalDatabaseSnapshotRequest method. +// req, resp := client.GetRelationalDatabaseSnapshotRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIps -func (c *Lightsail) GetStaticIpsRequest(input *GetStaticIpsInput) (req *request.Request, output *GetStaticIpsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshot +func (c *Lightsail) GetRelationalDatabaseSnapshotRequest(input *GetRelationalDatabaseSnapshotInput) (req *request.Request, output *GetRelationalDatabaseSnapshotOutput) { op := &request.Operation{ - Name: opGetStaticIps, + Name: opGetRelationalDatabaseSnapshot, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetStaticIpsInput{} + input = &GetRelationalDatabaseSnapshotInput{} } - output = &GetStaticIpsOutput{} + output = &GetRelationalDatabaseSnapshotOutput{} req = c.newRequest(op, input, output) return } -// GetStaticIps API operation for Amazon Lightsail. +// GetRelationalDatabaseSnapshot API operation for Amazon Lightsail. // -// Returns information about all static IPs in the user's account. +// Returns information about a specific database snapshot in Amazon Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation GetStaticIps for usage and error information. +// API operation GetRelationalDatabaseSnapshot for usage and error information. // // Returned Error Types: // * ServiceException @@ -9746,80 +10485,80 @@ func (c *Lightsail) GetStaticIpsRequest(input *GetStaticIpsInput) (req *request. // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIps -func (c *Lightsail) GetStaticIps(input *GetStaticIpsInput) (*GetStaticIpsOutput, error) { - req, out := c.GetStaticIpsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshot +func (c *Lightsail) GetRelationalDatabaseSnapshot(input *GetRelationalDatabaseSnapshotInput) (*GetRelationalDatabaseSnapshotOutput, error) { + req, out := c.GetRelationalDatabaseSnapshotRequest(input) return out, req.Send() } -// GetStaticIpsWithContext is the same as GetStaticIps with the addition of +// GetRelationalDatabaseSnapshotWithContext is the same as GetRelationalDatabaseSnapshot with the addition of // the ability to pass a context and additional request options. // -// See GetStaticIps for details on how to use this API operation. +// See GetRelationalDatabaseSnapshot for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) GetStaticIpsWithContext(ctx aws.Context, input *GetStaticIpsInput, opts ...request.Option) (*GetStaticIpsOutput, error) { - req, out := c.GetStaticIpsRequest(input) +func (c *Lightsail) GetRelationalDatabaseSnapshotWithContext(ctx aws.Context, input *GetRelationalDatabaseSnapshotInput, opts ...request.Option) (*GetRelationalDatabaseSnapshotOutput, error) { + req, out := c.GetRelationalDatabaseSnapshotRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opImportKeyPair = "ImportKeyPair" +const opGetRelationalDatabaseSnapshots = "GetRelationalDatabaseSnapshots" -// ImportKeyPairRequest generates a "aws/request.Request" representing the -// client's request for the ImportKeyPair operation. The "output" return +// GetRelationalDatabaseSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabaseSnapshots operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ImportKeyPair for more information on using the ImportKeyPair +// See GetRelationalDatabaseSnapshots for more information on using the GetRelationalDatabaseSnapshots // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ImportKeyPairRequest method. -// req, resp := client.ImportKeyPairRequest(params) +// // Example sending a request using the GetRelationalDatabaseSnapshotsRequest method. +// req, resp := client.GetRelationalDatabaseSnapshotsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ImportKeyPair -func (c *Lightsail) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshots +func (c *Lightsail) GetRelationalDatabaseSnapshotsRequest(input *GetRelationalDatabaseSnapshotsInput) (req *request.Request, output *GetRelationalDatabaseSnapshotsOutput) { op := &request.Operation{ - Name: opImportKeyPair, + Name: opGetRelationalDatabaseSnapshots, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ImportKeyPairInput{} + input = &GetRelationalDatabaseSnapshotsInput{} } - output = &ImportKeyPairOutput{} + output = &GetRelationalDatabaseSnapshotsOutput{} req = c.newRequest(op, input, output) return } -// ImportKeyPair API operation for Amazon Lightsail. +// GetRelationalDatabaseSnapshots API operation for Amazon Lightsail. // -// Imports a public SSH key from a specific key pair. +// Returns information about all of your database snapshots in Amazon Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation ImportKeyPair for usage and error information. +// API operation GetRelationalDatabaseSnapshots for usage and error information. // // Returned Error Types: // * ServiceException @@ -9850,80 +10589,80 @@ func (c *Lightsail) ImportKeyPairRequest(input *ImportKeyPairInput) (req *reques // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ImportKeyPair -func (c *Lightsail) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { - req, out := c.ImportKeyPairRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabaseSnapshots +func (c *Lightsail) GetRelationalDatabaseSnapshots(input *GetRelationalDatabaseSnapshotsInput) (*GetRelationalDatabaseSnapshotsOutput, error) { + req, out := c.GetRelationalDatabaseSnapshotsRequest(input) return out, req.Send() } -// ImportKeyPairWithContext is the same as ImportKeyPair with the addition of +// GetRelationalDatabaseSnapshotsWithContext is the same as GetRelationalDatabaseSnapshots with the addition of // the ability to pass a context and additional request options. // -// See ImportKeyPair for details on how to use this API operation. +// See GetRelationalDatabaseSnapshots for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) ImportKeyPairWithContext(ctx aws.Context, input *ImportKeyPairInput, opts ...request.Option) (*ImportKeyPairOutput, error) { - req, out := c.ImportKeyPairRequest(input) +func (c *Lightsail) GetRelationalDatabaseSnapshotsWithContext(ctx aws.Context, input *GetRelationalDatabaseSnapshotsInput, opts ...request.Option) (*GetRelationalDatabaseSnapshotsOutput, error) { + req, out := c.GetRelationalDatabaseSnapshotsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opIsVpcPeered = "IsVpcPeered" +const opGetRelationalDatabases = "GetRelationalDatabases" -// IsVpcPeeredRequest generates a "aws/request.Request" representing the -// client's request for the IsVpcPeered operation. The "output" return +// GetRelationalDatabasesRequest generates a "aws/request.Request" representing the +// client's request for the GetRelationalDatabases operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See IsVpcPeered for more information on using the IsVpcPeered +// See GetRelationalDatabases for more information on using the GetRelationalDatabases // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the IsVpcPeeredRequest method. -// req, resp := client.IsVpcPeeredRequest(params) +// // Example sending a request using the GetRelationalDatabasesRequest method. +// req, resp := client.GetRelationalDatabasesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/IsVpcPeered -func (c *Lightsail) IsVpcPeeredRequest(input *IsVpcPeeredInput) (req *request.Request, output *IsVpcPeeredOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabases +func (c *Lightsail) GetRelationalDatabasesRequest(input *GetRelationalDatabasesInput) (req *request.Request, output *GetRelationalDatabasesOutput) { op := &request.Operation{ - Name: opIsVpcPeered, + Name: opGetRelationalDatabases, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &IsVpcPeeredInput{} + input = &GetRelationalDatabasesInput{} } - output = &IsVpcPeeredOutput{} + output = &GetRelationalDatabasesOutput{} req = c.newRequest(op, input, output) return } -// IsVpcPeered API operation for Amazon Lightsail. +// GetRelationalDatabases API operation for Amazon Lightsail. // -// Returns a Boolean value indicating whether your Lightsail VPC is peered. +// Returns information about all of your databases in Amazon Lightsail. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation IsVpcPeered for usage and error information. +// API operation GetRelationalDatabases for usage and error information. // // Returned Error Types: // * ServiceException @@ -9954,84 +10693,80 @@ func (c *Lightsail) IsVpcPeeredRequest(input *IsVpcPeeredInput) (req *request.Re // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/IsVpcPeered -func (c *Lightsail) IsVpcPeered(input *IsVpcPeeredInput) (*IsVpcPeeredOutput, error) { - req, out := c.IsVpcPeeredRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetRelationalDatabases +func (c *Lightsail) GetRelationalDatabases(input *GetRelationalDatabasesInput) (*GetRelationalDatabasesOutput, error) { + req, out := c.GetRelationalDatabasesRequest(input) return out, req.Send() } -// IsVpcPeeredWithContext is the same as IsVpcPeered with the addition of +// GetRelationalDatabasesWithContext is the same as GetRelationalDatabases with the addition of // the ability to pass a context and additional request options. // -// See IsVpcPeered for details on how to use this API operation. +// See GetRelationalDatabases for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) IsVpcPeeredWithContext(ctx aws.Context, input *IsVpcPeeredInput, opts ...request.Option) (*IsVpcPeeredOutput, error) { - req, out := c.IsVpcPeeredRequest(input) +func (c *Lightsail) GetRelationalDatabasesWithContext(ctx aws.Context, input *GetRelationalDatabasesInput, opts ...request.Option) (*GetRelationalDatabasesOutput, error) { + req, out := c.GetRelationalDatabasesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opOpenInstancePublicPorts = "OpenInstancePublicPorts" +const opGetStaticIp = "GetStaticIp" -// OpenInstancePublicPortsRequest generates a "aws/request.Request" representing the -// client's request for the OpenInstancePublicPorts operation. The "output" return +// GetStaticIpRequest generates a "aws/request.Request" representing the +// client's request for the GetStaticIp operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See OpenInstancePublicPorts for more information on using the OpenInstancePublicPorts +// See GetStaticIp for more information on using the GetStaticIp // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the OpenInstancePublicPortsRequest method. -// req, resp := client.OpenInstancePublicPortsRequest(params) +// // Example sending a request using the GetStaticIpRequest method. +// req, resp := client.GetStaticIpRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/OpenInstancePublicPorts -func (c *Lightsail) OpenInstancePublicPortsRequest(input *OpenInstancePublicPortsInput) (req *request.Request, output *OpenInstancePublicPortsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIp +func (c *Lightsail) GetStaticIpRequest(input *GetStaticIpInput) (req *request.Request, output *GetStaticIpOutput) { op := &request.Operation{ - Name: opOpenInstancePublicPorts, + Name: opGetStaticIp, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &OpenInstancePublicPortsInput{} + input = &GetStaticIpInput{} } - output = &OpenInstancePublicPortsOutput{} + output = &GetStaticIpOutput{} req = c.newRequest(op, input, output) return } -// OpenInstancePublicPorts API operation for Amazon Lightsail. -// -// Adds public ports to an Amazon Lightsail instance. +// GetStaticIp API operation for Amazon Lightsail. // -// The open instance public ports operation supports tag-based access control -// via resource tags applied to the resource identified by instance name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Returns information about a specific static IP. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation OpenInstancePublicPorts for usage and error information. +// API operation GetStaticIp for usage and error information. // // Returned Error Types: // * ServiceException @@ -10062,80 +10797,80 @@ func (c *Lightsail) OpenInstancePublicPortsRequest(input *OpenInstancePublicPort // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/OpenInstancePublicPorts -func (c *Lightsail) OpenInstancePublicPorts(input *OpenInstancePublicPortsInput) (*OpenInstancePublicPortsOutput, error) { - req, out := c.OpenInstancePublicPortsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIp +func (c *Lightsail) GetStaticIp(input *GetStaticIpInput) (*GetStaticIpOutput, error) { + req, out := c.GetStaticIpRequest(input) return out, req.Send() } -// OpenInstancePublicPortsWithContext is the same as OpenInstancePublicPorts with the addition of +// GetStaticIpWithContext is the same as GetStaticIp with the addition of // the ability to pass a context and additional request options. // -// See OpenInstancePublicPorts for details on how to use this API operation. +// See GetStaticIp for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) OpenInstancePublicPortsWithContext(ctx aws.Context, input *OpenInstancePublicPortsInput, opts ...request.Option) (*OpenInstancePublicPortsOutput, error) { - req, out := c.OpenInstancePublicPortsRequest(input) +func (c *Lightsail) GetStaticIpWithContext(ctx aws.Context, input *GetStaticIpInput, opts ...request.Option) (*GetStaticIpOutput, error) { + req, out := c.GetStaticIpRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPeerVpc = "PeerVpc" +const opGetStaticIps = "GetStaticIps" -// PeerVpcRequest generates a "aws/request.Request" representing the -// client's request for the PeerVpc operation. The "output" return +// GetStaticIpsRequest generates a "aws/request.Request" representing the +// client's request for the GetStaticIps operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PeerVpc for more information on using the PeerVpc +// See GetStaticIps for more information on using the GetStaticIps // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PeerVpcRequest method. -// req, resp := client.PeerVpcRequest(params) +// // Example sending a request using the GetStaticIpsRequest method. +// req, resp := client.GetStaticIpsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpc -func (c *Lightsail) PeerVpcRequest(input *PeerVpcInput) (req *request.Request, output *PeerVpcOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIps +func (c *Lightsail) GetStaticIpsRequest(input *GetStaticIpsInput) (req *request.Request, output *GetStaticIpsOutput) { op := &request.Operation{ - Name: opPeerVpc, + Name: opGetStaticIps, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PeerVpcInput{} + input = &GetStaticIpsInput{} } - output = &PeerVpcOutput{} + output = &GetStaticIpsOutput{} req = c.newRequest(op, input, output) return } -// PeerVpc API operation for Amazon Lightsail. +// GetStaticIps API operation for Amazon Lightsail. // -// Tries to peer the Lightsail VPC with the user's default VPC. +// Returns information about all static IPs in the user's account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation PeerVpc for usage and error information. +// API operation GetStaticIps for usage and error information. // // Returned Error Types: // * ServiceException @@ -10166,93 +10901,80 @@ func (c *Lightsail) PeerVpcRequest(input *PeerVpcInput) (req *request.Request, o // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpc -func (c *Lightsail) PeerVpc(input *PeerVpcInput) (*PeerVpcOutput, error) { - req, out := c.PeerVpcRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/GetStaticIps +func (c *Lightsail) GetStaticIps(input *GetStaticIpsInput) (*GetStaticIpsOutput, error) { + req, out := c.GetStaticIpsRequest(input) return out, req.Send() } -// PeerVpcWithContext is the same as PeerVpc with the addition of +// GetStaticIpsWithContext is the same as GetStaticIps with the addition of // the ability to pass a context and additional request options. // -// See PeerVpc for details on how to use this API operation. +// See GetStaticIps for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) PeerVpcWithContext(ctx aws.Context, input *PeerVpcInput, opts ...request.Option) (*PeerVpcOutput, error) { - req, out := c.PeerVpcRequest(input) +func (c *Lightsail) GetStaticIpsWithContext(ctx aws.Context, input *GetStaticIpsInput, opts ...request.Option) (*GetStaticIpsOutput, error) { + req, out := c.GetStaticIpsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutAlarm = "PutAlarm" +const opImportKeyPair = "ImportKeyPair" -// PutAlarmRequest generates a "aws/request.Request" representing the -// client's request for the PutAlarm operation. The "output" return +// ImportKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the ImportKeyPair operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutAlarm for more information on using the PutAlarm +// See ImportKeyPair for more information on using the ImportKeyPair // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutAlarmRequest method. -// req, resp := client.PutAlarmRequest(params) +// // Example sending a request using the ImportKeyPairRequest method. +// req, resp := client.ImportKeyPairRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutAlarm -func (c *Lightsail) PutAlarmRequest(input *PutAlarmInput) (req *request.Request, output *PutAlarmOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ImportKeyPair +func (c *Lightsail) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { op := &request.Operation{ - Name: opPutAlarm, + Name: opImportKeyPair, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutAlarmInput{} + input = &ImportKeyPairInput{} } - output = &PutAlarmOutput{} + output = &ImportKeyPairOutput{} req = c.newRequest(op, input, output) return } -// PutAlarm API operation for Amazon Lightsail. -// -// Creates or updates an alarm, and associates it with the specified metric. -// -// An alarm is used to monitor a single metric for one of your resources. When -// a metric condition is met, the alarm can notify you by email, SMS text message, -// and a banner displayed on the Amazon Lightsail console. For more information, -// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). -// -// When this action creates an alarm, the alarm state is immediately set to -// INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. -// Any actions associated with the new state are then executed. +// ImportKeyPair API operation for Amazon Lightsail. // -// When you update an existing alarm, its state is left unchanged, but the update -// completely overwrites the previous configuration of the alarm. The alarm -// is then evaluated with the updated configuration. +// Imports a public SSH key from a specific key pair. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation PutAlarm for usage and error information. +// API operation ImportKeyPair for usage and error information. // // Returned Error Types: // * ServiceException @@ -10266,6 +10988,9 @@ func (c *Lightsail) PutAlarmRequest(input *PutAlarmInput) (req *request.Request, // Please set your AWS Region configuration to us-east-1 to create, view, or // edit these resources. // +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// // * OperationFailureException // Lightsail throws this exception when an operation fails to execute. // @@ -10273,91 +10998,87 @@ func (c *Lightsail) PutAlarmRequest(input *PutAlarmInput) (req *request.Request, // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. +// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// * NotFoundException -// Lightsail throws this exception when it cannot find a resource. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutAlarm -func (c *Lightsail) PutAlarm(input *PutAlarmInput) (*PutAlarmOutput, error) { - req, out := c.PutAlarmRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ImportKeyPair +func (c *Lightsail) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) return out, req.Send() } -// PutAlarmWithContext is the same as PutAlarm with the addition of +// ImportKeyPairWithContext is the same as ImportKeyPair with the addition of // the ability to pass a context and additional request options. // -// See PutAlarm for details on how to use this API operation. +// See ImportKeyPair for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) PutAlarmWithContext(ctx aws.Context, input *PutAlarmInput, opts ...request.Option) (*PutAlarmOutput, error) { - req, out := c.PutAlarmRequest(input) +func (c *Lightsail) ImportKeyPairWithContext(ctx aws.Context, input *ImportKeyPairInput, opts ...request.Option) (*ImportKeyPairOutput, error) { + req, out := c.ImportKeyPairRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutInstancePublicPorts = "PutInstancePublicPorts" +const opIsVpcPeered = "IsVpcPeered" -// PutInstancePublicPortsRequest generates a "aws/request.Request" representing the -// client's request for the PutInstancePublicPorts operation. The "output" return +// IsVpcPeeredRequest generates a "aws/request.Request" representing the +// client's request for the IsVpcPeered operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutInstancePublicPorts for more information on using the PutInstancePublicPorts +// See IsVpcPeered for more information on using the IsVpcPeered // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutInstancePublicPortsRequest method. -// req, resp := client.PutInstancePublicPortsRequest(params) +// // Example sending a request using the IsVpcPeeredRequest method. +// req, resp := client.IsVpcPeeredRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutInstancePublicPorts -func (c *Lightsail) PutInstancePublicPortsRequest(input *PutInstancePublicPortsInput) (req *request.Request, output *PutInstancePublicPortsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/IsVpcPeered +func (c *Lightsail) IsVpcPeeredRequest(input *IsVpcPeeredInput) (req *request.Request, output *IsVpcPeeredOutput) { op := &request.Operation{ - Name: opPutInstancePublicPorts, + Name: opIsVpcPeered, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutInstancePublicPortsInput{} + input = &IsVpcPeeredInput{} } - output = &PutInstancePublicPortsOutput{} + output = &IsVpcPeeredOutput{} req = c.newRequest(op, input, output) return } -// PutInstancePublicPorts API operation for Amazon Lightsail. -// -// Sets the specified open ports for an Amazon Lightsail instance, and closes -// all ports for every protocol not included in the current request. +// IsVpcPeered API operation for Amazon Lightsail. // -// The put instance public ports operation supports tag-based access control -// via resource tags applied to the resource identified by instance name. For -// more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Returns a Boolean value indicating whether your Lightsail VPC is peered. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation PutInstancePublicPorts for usage and error information. +// API operation IsVpcPeered for usage and error information. // // Returned Error Types: // * ServiceException @@ -10388,84 +11109,85 @@ func (c *Lightsail) PutInstancePublicPortsRequest(input *PutInstancePublicPortsI // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutInstancePublicPorts -func (c *Lightsail) PutInstancePublicPorts(input *PutInstancePublicPortsInput) (*PutInstancePublicPortsOutput, error) { - req, out := c.PutInstancePublicPortsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/IsVpcPeered +func (c *Lightsail) IsVpcPeered(input *IsVpcPeeredInput) (*IsVpcPeeredOutput, error) { + req, out := c.IsVpcPeeredRequest(input) return out, req.Send() } -// PutInstancePublicPortsWithContext is the same as PutInstancePublicPorts with the addition of +// IsVpcPeeredWithContext is the same as IsVpcPeered with the addition of // the ability to pass a context and additional request options. // -// See PutInstancePublicPorts for details on how to use this API operation. +// See IsVpcPeered for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) PutInstancePublicPortsWithContext(ctx aws.Context, input *PutInstancePublicPortsInput, opts ...request.Option) (*PutInstancePublicPortsOutput, error) { - req, out := c.PutInstancePublicPortsRequest(input) +func (c *Lightsail) IsVpcPeeredWithContext(ctx aws.Context, input *IsVpcPeeredInput, opts ...request.Option) (*IsVpcPeeredOutput, error) { + req, out := c.IsVpcPeeredRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRebootInstance = "RebootInstance" +const opOpenInstancePublicPorts = "OpenInstancePublicPorts" -// RebootInstanceRequest generates a "aws/request.Request" representing the -// client's request for the RebootInstance operation. The "output" return +// OpenInstancePublicPortsRequest generates a "aws/request.Request" representing the +// client's request for the OpenInstancePublicPorts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RebootInstance for more information on using the RebootInstance +// See OpenInstancePublicPorts for more information on using the OpenInstancePublicPorts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RebootInstanceRequest method. -// req, resp := client.RebootInstanceRequest(params) +// // Example sending a request using the OpenInstancePublicPortsRequest method. +// req, resp := client.OpenInstancePublicPortsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootInstance -func (c *Lightsail) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/OpenInstancePublicPorts +func (c *Lightsail) OpenInstancePublicPortsRequest(input *OpenInstancePublicPortsInput) (req *request.Request, output *OpenInstancePublicPortsOutput) { op := &request.Operation{ - Name: opRebootInstance, + Name: opOpenInstancePublicPorts, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RebootInstanceInput{} + input = &OpenInstancePublicPortsInput{} } - output = &RebootInstanceOutput{} + output = &OpenInstancePublicPortsOutput{} req = c.newRequest(op, input, output) return } -// RebootInstance API operation for Amazon Lightsail. +// OpenInstancePublicPorts API operation for Amazon Lightsail. // -// Restarts a specific instance. +// Opens ports for a specific Amazon Lightsail instance, and specifies the IP +// addresses allowed to connect to the instance through the ports, and the protocol. // -// The reboot instance operation supports tag-based access control via resource -// tags applied to the resource identified by instance name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// The OpenInstancePublicPorts action supports tag-based access control via +// resource tags applied to the resource identified by instanceName. For more +// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation RebootInstance for usage and error information. +// API operation OpenInstancePublicPorts for usage and error information. // // Returned Error Types: // * ServiceException @@ -10496,84 +11218,80 @@ func (c *Lightsail) RebootInstanceRequest(input *RebootInstanceInput) (req *requ // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootInstance -func (c *Lightsail) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOutput, error) { - req, out := c.RebootInstanceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/OpenInstancePublicPorts +func (c *Lightsail) OpenInstancePublicPorts(input *OpenInstancePublicPortsInput) (*OpenInstancePublicPortsOutput, error) { + req, out := c.OpenInstancePublicPortsRequest(input) return out, req.Send() } -// RebootInstanceWithContext is the same as RebootInstance with the addition of +// OpenInstancePublicPortsWithContext is the same as OpenInstancePublicPorts with the addition of // the ability to pass a context and additional request options. // -// See RebootInstance for details on how to use this API operation. +// See OpenInstancePublicPorts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) RebootInstanceWithContext(ctx aws.Context, input *RebootInstanceInput, opts ...request.Option) (*RebootInstanceOutput, error) { - req, out := c.RebootInstanceRequest(input) +func (c *Lightsail) OpenInstancePublicPortsWithContext(ctx aws.Context, input *OpenInstancePublicPortsInput, opts ...request.Option) (*OpenInstancePublicPortsOutput, error) { + req, out := c.OpenInstancePublicPortsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRebootRelationalDatabase = "RebootRelationalDatabase" +const opPeerVpc = "PeerVpc" -// RebootRelationalDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the RebootRelationalDatabase operation. The "output" return +// PeerVpcRequest generates a "aws/request.Request" representing the +// client's request for the PeerVpc operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RebootRelationalDatabase for more information on using the RebootRelationalDatabase +// See PeerVpc for more information on using the PeerVpc // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RebootRelationalDatabaseRequest method. -// req, resp := client.RebootRelationalDatabaseRequest(params) +// // Example sending a request using the PeerVpcRequest method. +// req, resp := client.PeerVpcRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootRelationalDatabase -func (c *Lightsail) RebootRelationalDatabaseRequest(input *RebootRelationalDatabaseInput) (req *request.Request, output *RebootRelationalDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpc +func (c *Lightsail) PeerVpcRequest(input *PeerVpcInput) (req *request.Request, output *PeerVpcOutput) { op := &request.Operation{ - Name: opRebootRelationalDatabase, + Name: opPeerVpc, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RebootRelationalDatabaseInput{} + input = &PeerVpcInput{} } - output = &RebootRelationalDatabaseOutput{} + output = &PeerVpcOutput{} req = c.newRequest(op, input, output) return } -// RebootRelationalDatabase API operation for Amazon Lightsail. -// -// Restarts a specific database in Amazon Lightsail. +// PeerVpc API operation for Amazon Lightsail. // -// The reboot relational database operation supports tag-based access control -// via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Tries to peer the Lightsail VPC with the user's default VPC. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation RebootRelationalDatabase for usage and error information. +// API operation PeerVpc for usage and error information. // // Returned Error Types: // * ServiceException @@ -10604,80 +11322,93 @@ func (c *Lightsail) RebootRelationalDatabaseRequest(input *RebootRelationalDatab // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootRelationalDatabase -func (c *Lightsail) RebootRelationalDatabase(input *RebootRelationalDatabaseInput) (*RebootRelationalDatabaseOutput, error) { - req, out := c.RebootRelationalDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PeerVpc +func (c *Lightsail) PeerVpc(input *PeerVpcInput) (*PeerVpcOutput, error) { + req, out := c.PeerVpcRequest(input) return out, req.Send() } -// RebootRelationalDatabaseWithContext is the same as RebootRelationalDatabase with the addition of +// PeerVpcWithContext is the same as PeerVpc with the addition of // the ability to pass a context and additional request options. // -// See RebootRelationalDatabase for details on how to use this API operation. +// See PeerVpc for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) RebootRelationalDatabaseWithContext(ctx aws.Context, input *RebootRelationalDatabaseInput, opts ...request.Option) (*RebootRelationalDatabaseOutput, error) { - req, out := c.RebootRelationalDatabaseRequest(input) +func (c *Lightsail) PeerVpcWithContext(ctx aws.Context, input *PeerVpcInput, opts ...request.Option) (*PeerVpcOutput, error) { + req, out := c.PeerVpcRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opReleaseStaticIp = "ReleaseStaticIp" +const opPutAlarm = "PutAlarm" -// ReleaseStaticIpRequest generates a "aws/request.Request" representing the -// client's request for the ReleaseStaticIp operation. The "output" return +// PutAlarmRequest generates a "aws/request.Request" representing the +// client's request for the PutAlarm operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ReleaseStaticIp for more information on using the ReleaseStaticIp +// See PutAlarm for more information on using the PutAlarm // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ReleaseStaticIpRequest method. -// req, resp := client.ReleaseStaticIpRequest(params) +// // Example sending a request using the PutAlarmRequest method. +// req, resp := client.PutAlarmRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ReleaseStaticIp -func (c *Lightsail) ReleaseStaticIpRequest(input *ReleaseStaticIpInput) (req *request.Request, output *ReleaseStaticIpOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutAlarm +func (c *Lightsail) PutAlarmRequest(input *PutAlarmInput) (req *request.Request, output *PutAlarmOutput) { op := &request.Operation{ - Name: opReleaseStaticIp, + Name: opPutAlarm, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ReleaseStaticIpInput{} + input = &PutAlarmInput{} } - output = &ReleaseStaticIpOutput{} + output = &PutAlarmOutput{} req = c.newRequest(op, input, output) return } -// ReleaseStaticIp API operation for Amazon Lightsail. +// PutAlarm API operation for Amazon Lightsail. // -// Deletes a specific static IP from your account. +// Creates or updates an alarm, and associates it with the specified metric. +// +// An alarm is used to monitor a single metric for one of your resources. When +// a metric condition is met, the alarm can notify you by email, SMS text message, +// and a banner displayed on the Amazon Lightsail console. For more information, +// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). +// +// When this action creates an alarm, the alarm state is immediately set to +// INSUFFICIENT_DATA. The alarm is then evaluated and its state is set appropriately. +// Any actions associated with the new state are then executed. +// +// When you update an existing alarm, its state is left unchanged, but the update +// completely overwrites the previous configuration of the alarm. The alarm +// is then evaluated with the updated configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation ReleaseStaticIp for usage and error information. +// API operation PutAlarm for usage and error information. // // Returned Error Types: // * ServiceException @@ -10691,9 +11422,6 @@ func (c *Lightsail) ReleaseStaticIpRequest(input *ReleaseStaticIpInput) (req *re // Please set your AWS Region configuration to us-east-1 to create, view, or // edit these resources. // -// * NotFoundException -// Lightsail throws this exception when it cannot find a resource. -// // * OperationFailureException // Lightsail throws this exception when an operation fails to execute. // @@ -10701,101 +11429,95 @@ func (c *Lightsail) ReleaseStaticIpRequest(input *ReleaseStaticIpInput) (req *re // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. -// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ReleaseStaticIp -func (c *Lightsail) ReleaseStaticIp(input *ReleaseStaticIpInput) (*ReleaseStaticIpOutput, error) { - req, out := c.ReleaseStaticIpRequest(input) +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutAlarm +func (c *Lightsail) PutAlarm(input *PutAlarmInput) (*PutAlarmOutput, error) { + req, out := c.PutAlarmRequest(input) return out, req.Send() } -// ReleaseStaticIpWithContext is the same as ReleaseStaticIp with the addition of +// PutAlarmWithContext is the same as PutAlarm with the addition of // the ability to pass a context and additional request options. // -// See ReleaseStaticIp for details on how to use this API operation. +// See PutAlarm for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) ReleaseStaticIpWithContext(ctx aws.Context, input *ReleaseStaticIpInput, opts ...request.Option) (*ReleaseStaticIpOutput, error) { - req, out := c.ReleaseStaticIpRequest(input) +func (c *Lightsail) PutAlarmWithContext(ctx aws.Context, input *PutAlarmInput, opts ...request.Option) (*PutAlarmOutput, error) { + req, out := c.PutAlarmRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSendContactMethodVerification = "SendContactMethodVerification" +const opPutInstancePublicPorts = "PutInstancePublicPorts" -// SendContactMethodVerificationRequest generates a "aws/request.Request" representing the -// client's request for the SendContactMethodVerification operation. The "output" return +// PutInstancePublicPortsRequest generates a "aws/request.Request" representing the +// client's request for the PutInstancePublicPorts operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SendContactMethodVerification for more information on using the SendContactMethodVerification +// See PutInstancePublicPorts for more information on using the PutInstancePublicPorts // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SendContactMethodVerificationRequest method. -// req, resp := client.SendContactMethodVerificationRequest(params) +// // Example sending a request using the PutInstancePublicPortsRequest method. +// req, resp := client.PutInstancePublicPortsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SendContactMethodVerification -func (c *Lightsail) SendContactMethodVerificationRequest(input *SendContactMethodVerificationInput) (req *request.Request, output *SendContactMethodVerificationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutInstancePublicPorts +func (c *Lightsail) PutInstancePublicPortsRequest(input *PutInstancePublicPortsInput) (req *request.Request, output *PutInstancePublicPortsOutput) { op := &request.Operation{ - Name: opSendContactMethodVerification, + Name: opPutInstancePublicPorts, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &SendContactMethodVerificationInput{} + input = &PutInstancePublicPortsInput{} } - output = &SendContactMethodVerificationOutput{} + output = &PutInstancePublicPortsOutput{} req = c.newRequest(op, input, output) return } -// SendContactMethodVerification API operation for Amazon Lightsail. -// -// Sends a verification request to an email contact method to ensure it’s -// owned by the requester. SMS contact methods don’t need to be verified. -// -// A contact method is used to send you notifications about your Amazon Lightsail -// resources. You can add one email address and one mobile phone number contact -// method in each AWS Region. However, SMS text messaging is not supported in -// some AWS Regions, and SMS text messages cannot be sent to some countries/regions. -// For more information, see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications). +// PutInstancePublicPorts API operation for Amazon Lightsail. // -// A verification request is sent to the contact method when you initially create -// it. Use this action to send another verification request if a previous verification -// request was deleted, or has expired. +// Opens ports for a specific Amazon Lightsail instance, and specifies the IP +// addresses allowed to connect to the instance through the ports, and the protocol. +// This action also closes all currently open ports that are not included in +// the request. Include all of the ports and the protocols you want to open +// in your PutInstancePublicPortsrequest. Or use the OpenInstancePublicPorts +// action to open ports without closing currently open ports. // -// Notifications are not sent to an email contact method until after it is verified, -// and confirmed as valid. +// The PutInstancePublicPorts action supports tag-based access control via resource +// tags applied to the resource identified by instanceName. For more information, +// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation SendContactMethodVerification for usage and error information. +// API operation PutInstancePublicPorts for usage and error information. // // Returned Error Types: // * ServiceException @@ -10809,94 +11531,92 @@ func (c *Lightsail) SendContactMethodVerificationRequest(input *SendContactMetho // Please set your AWS Region configuration to us-east-1 to create, view, or // edit these resources. // +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// // * OperationFailureException // Lightsail throws this exception when an operation fails to execute. // -// * UnauthenticatedException -// Lightsail throws this exception when the user has not been authenticated. -// // * AccessDeniedException // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * NotFoundException -// Lightsail throws this exception when it cannot find a resource. +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SendContactMethodVerification -func (c *Lightsail) SendContactMethodVerification(input *SendContactMethodVerificationInput) (*SendContactMethodVerificationOutput, error) { - req, out := c.SendContactMethodVerificationRequest(input) +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/PutInstancePublicPorts +func (c *Lightsail) PutInstancePublicPorts(input *PutInstancePublicPortsInput) (*PutInstancePublicPortsOutput, error) { + req, out := c.PutInstancePublicPortsRequest(input) return out, req.Send() } -// SendContactMethodVerificationWithContext is the same as SendContactMethodVerification with the addition of +// PutInstancePublicPortsWithContext is the same as PutInstancePublicPorts with the addition of // the ability to pass a context and additional request options. // -// See SendContactMethodVerification for details on how to use this API operation. +// See PutInstancePublicPorts for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) SendContactMethodVerificationWithContext(ctx aws.Context, input *SendContactMethodVerificationInput, opts ...request.Option) (*SendContactMethodVerificationOutput, error) { - req, out := c.SendContactMethodVerificationRequest(input) +func (c *Lightsail) PutInstancePublicPortsWithContext(ctx aws.Context, input *PutInstancePublicPortsInput, opts ...request.Option) (*PutInstancePublicPortsOutput, error) { + req, out := c.PutInstancePublicPortsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartInstance = "StartInstance" +const opRebootInstance = "RebootInstance" -// StartInstanceRequest generates a "aws/request.Request" representing the -// client's request for the StartInstance operation. The "output" return +// RebootInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RebootInstance operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartInstance for more information on using the StartInstance +// See RebootInstance for more information on using the RebootInstance // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartInstanceRequest method. -// req, resp := client.StartInstanceRequest(params) +// // Example sending a request using the RebootInstanceRequest method. +// req, resp := client.RebootInstanceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartInstance -func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootInstance +func (c *Lightsail) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) { op := &request.Operation{ - Name: opStartInstance, + Name: opRebootInstance, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartInstanceInput{} + input = &RebootInstanceInput{} } - output = &StartInstanceOutput{} + output = &RebootInstanceOutput{} req = c.newRequest(op, input, output) return } -// StartInstance API operation for Amazon Lightsail. -// -// Starts a specific Amazon Lightsail instance from a stopped state. To restart -// an instance, use the reboot instance operation. +// RebootInstance API operation for Amazon Lightsail. // -// When you start a stopped instance, Lightsail assigns a new public IP address -// to the instance. To use the same IP address after stopping and starting an -// instance, create a static IP address and attach it to the instance. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). +// Restarts a specific instance. // -// The start instance operation supports tag-based access control via resource +// The reboot instance operation supports tag-based access control via resource // tags applied to the resource identified by instance name. For more information, // see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // @@ -10905,7 +11625,7 @@ func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *reques // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation StartInstance for usage and error information. +// API operation RebootInstance for usage and error information. // // Returned Error Types: // * ServiceException @@ -10936,76 +11656,75 @@ func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *reques // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartInstance -func (c *Lightsail) StartInstance(input *StartInstanceInput) (*StartInstanceOutput, error) { - req, out := c.StartInstanceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootInstance +func (c *Lightsail) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOutput, error) { + req, out := c.RebootInstanceRequest(input) return out, req.Send() } -// StartInstanceWithContext is the same as StartInstance with the addition of +// RebootInstanceWithContext is the same as RebootInstance with the addition of // the ability to pass a context and additional request options. // -// See StartInstance for details on how to use this API operation. +// See RebootInstance for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) StartInstanceWithContext(ctx aws.Context, input *StartInstanceInput, opts ...request.Option) (*StartInstanceOutput, error) { - req, out := c.StartInstanceRequest(input) +func (c *Lightsail) RebootInstanceWithContext(ctx aws.Context, input *RebootInstanceInput, opts ...request.Option) (*RebootInstanceOutput, error) { + req, out := c.RebootInstanceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartRelationalDatabase = "StartRelationalDatabase" +const opRebootRelationalDatabase = "RebootRelationalDatabase" -// StartRelationalDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the StartRelationalDatabase operation. The "output" return +// RebootRelationalDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the RebootRelationalDatabase operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartRelationalDatabase for more information on using the StartRelationalDatabase +// See RebootRelationalDatabase for more information on using the RebootRelationalDatabase // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartRelationalDatabaseRequest method. -// req, resp := client.StartRelationalDatabaseRequest(params) +// // Example sending a request using the RebootRelationalDatabaseRequest method. +// req, resp := client.RebootRelationalDatabaseRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartRelationalDatabase -func (c *Lightsail) StartRelationalDatabaseRequest(input *StartRelationalDatabaseInput) (req *request.Request, output *StartRelationalDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootRelationalDatabase +func (c *Lightsail) RebootRelationalDatabaseRequest(input *RebootRelationalDatabaseInput) (req *request.Request, output *RebootRelationalDatabaseOutput) { op := &request.Operation{ - Name: opStartRelationalDatabase, + Name: opRebootRelationalDatabase, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartRelationalDatabaseInput{} + input = &RebootRelationalDatabaseInput{} } - output = &StartRelationalDatabaseOutput{} + output = &RebootRelationalDatabaseOutput{} req = c.newRequest(op, input, output) return } -// StartRelationalDatabase API operation for Amazon Lightsail. +// RebootRelationalDatabase API operation for Amazon Lightsail. // -// Starts a specific database from a stopped state in Amazon Lightsail. To restart -// a database, use the reboot relational database operation. +// Restarts a specific database in Amazon Lightsail. // -// The start relational database operation supports tag-based access control +// The reboot relational database operation supports tag-based access control // via resource tags applied to the resource identified by relationalDatabaseName. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // @@ -11014,7 +11733,7 @@ func (c *Lightsail) StartRelationalDatabaseRequest(input *StartRelationalDatabas // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation StartRelationalDatabase for usage and error information. +// API operation RebootRelationalDatabase for usage and error information. // // Returned Error Types: // * ServiceException @@ -11045,89 +11764,80 @@ func (c *Lightsail) StartRelationalDatabaseRequest(input *StartRelationalDatabas // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartRelationalDatabase -func (c *Lightsail) StartRelationalDatabase(input *StartRelationalDatabaseInput) (*StartRelationalDatabaseOutput, error) { - req, out := c.StartRelationalDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/RebootRelationalDatabase +func (c *Lightsail) RebootRelationalDatabase(input *RebootRelationalDatabaseInput) (*RebootRelationalDatabaseOutput, error) { + req, out := c.RebootRelationalDatabaseRequest(input) return out, req.Send() } -// StartRelationalDatabaseWithContext is the same as StartRelationalDatabase with the addition of +// RebootRelationalDatabaseWithContext is the same as RebootRelationalDatabase with the addition of // the ability to pass a context and additional request options. // -// See StartRelationalDatabase for details on how to use this API operation. +// See RebootRelationalDatabase for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) StartRelationalDatabaseWithContext(ctx aws.Context, input *StartRelationalDatabaseInput, opts ...request.Option) (*StartRelationalDatabaseOutput, error) { - req, out := c.StartRelationalDatabaseRequest(input) +func (c *Lightsail) RebootRelationalDatabaseWithContext(ctx aws.Context, input *RebootRelationalDatabaseInput, opts ...request.Option) (*RebootRelationalDatabaseOutput, error) { + req, out := c.RebootRelationalDatabaseRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopInstance = "StopInstance" +const opReleaseStaticIp = "ReleaseStaticIp" -// StopInstanceRequest generates a "aws/request.Request" representing the -// client's request for the StopInstance operation. The "output" return +// ReleaseStaticIpRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseStaticIp operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopInstance for more information on using the StopInstance +// See ReleaseStaticIp for more information on using the ReleaseStaticIp // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopInstanceRequest method. -// req, resp := client.StopInstanceRequest(params) +// // Example sending a request using the ReleaseStaticIpRequest method. +// req, resp := client.ReleaseStaticIpRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopInstance -func (c *Lightsail) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ReleaseStaticIp +func (c *Lightsail) ReleaseStaticIpRequest(input *ReleaseStaticIpInput) (req *request.Request, output *ReleaseStaticIpOutput) { op := &request.Operation{ - Name: opStopInstance, + Name: opReleaseStaticIp, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopInstanceInput{} + input = &ReleaseStaticIpInput{} } - output = &StopInstanceOutput{} + output = &ReleaseStaticIpOutput{} req = c.newRequest(op, input, output) return } -// StopInstance API operation for Amazon Lightsail. -// -// Stops a specific Amazon Lightsail instance that is currently running. -// -// When you start a stopped instance, Lightsail assigns a new public IP address -// to the instance. To use the same IP address after stopping and starting an -// instance, create a static IP address and attach it to the instance. For more -// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). +// ReleaseStaticIp API operation for Amazon Lightsail. // -// The stop instance operation supports tag-based access control via resource -// tags applied to the resource identified by instance name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Deletes a specific static IP from your account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation StopInstance for usage and error information. +// API operation ReleaseStaticIp for usage and error information. // // Returned Error Types: // * ServiceException @@ -11158,84 +11868,84 @@ func (c *Lightsail) StopInstanceRequest(input *StopInstanceInput) (req *request. // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopInstance -func (c *Lightsail) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, error) { - req, out := c.StopInstanceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ReleaseStaticIp +func (c *Lightsail) ReleaseStaticIp(input *ReleaseStaticIpInput) (*ReleaseStaticIpOutput, error) { + req, out := c.ReleaseStaticIpRequest(input) return out, req.Send() } -// StopInstanceWithContext is the same as StopInstance with the addition of +// ReleaseStaticIpWithContext is the same as ReleaseStaticIp with the addition of // the ability to pass a context and additional request options. // -// See StopInstance for details on how to use this API operation. +// See ReleaseStaticIp for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) StopInstanceWithContext(ctx aws.Context, input *StopInstanceInput, opts ...request.Option) (*StopInstanceOutput, error) { - req, out := c.StopInstanceRequest(input) +func (c *Lightsail) ReleaseStaticIpWithContext(ctx aws.Context, input *ReleaseStaticIpInput, opts ...request.Option) (*ReleaseStaticIpOutput, error) { + req, out := c.ReleaseStaticIpRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopRelationalDatabase = "StopRelationalDatabase" +const opResetDistributionCache = "ResetDistributionCache" -// StopRelationalDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the StopRelationalDatabase operation. The "output" return +// ResetDistributionCacheRequest generates a "aws/request.Request" representing the +// client's request for the ResetDistributionCache operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopRelationalDatabase for more information on using the StopRelationalDatabase +// See ResetDistributionCache for more information on using the ResetDistributionCache // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopRelationalDatabaseRequest method. -// req, resp := client.StopRelationalDatabaseRequest(params) +// // Example sending a request using the ResetDistributionCacheRequest method. +// req, resp := client.ResetDistributionCacheRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopRelationalDatabase -func (c *Lightsail) StopRelationalDatabaseRequest(input *StopRelationalDatabaseInput) (req *request.Request, output *StopRelationalDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ResetDistributionCache +func (c *Lightsail) ResetDistributionCacheRequest(input *ResetDistributionCacheInput) (req *request.Request, output *ResetDistributionCacheOutput) { op := &request.Operation{ - Name: opStopRelationalDatabase, + Name: opResetDistributionCache, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StopRelationalDatabaseInput{} + input = &ResetDistributionCacheInput{} } - output = &StopRelationalDatabaseOutput{} + output = &ResetDistributionCacheOutput{} req = c.newRequest(op, input, output) return } -// StopRelationalDatabase API operation for Amazon Lightsail. +// ResetDistributionCache API operation for Amazon Lightsail. // -// Stops a specific database that is currently running in Amazon Lightsail. +// Deletes currently cached content from your Amazon Lightsail content delivery +// network (CDN) distribution. // -// The stop relational database operation supports tag-based access control -// via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// After resetting the cache, the next time a content request is made, your +// distribution pulls, serves, and caches it from the origin. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation StopRelationalDatabase for usage and error information. +// API operation ResetDistributionCache for usage and error information. // // Returned Error Types: // * ServiceException @@ -11259,94 +11969,97 @@ func (c *Lightsail) StopRelationalDatabaseRequest(input *StopRelationalDatabaseI // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. -// // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopRelationalDatabase -func (c *Lightsail) StopRelationalDatabase(input *StopRelationalDatabaseInput) (*StopRelationalDatabaseOutput, error) { - req, out := c.StopRelationalDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/ResetDistributionCache +func (c *Lightsail) ResetDistributionCache(input *ResetDistributionCacheInput) (*ResetDistributionCacheOutput, error) { + req, out := c.ResetDistributionCacheRequest(input) return out, req.Send() } -// StopRelationalDatabaseWithContext is the same as StopRelationalDatabase with the addition of +// ResetDistributionCacheWithContext is the same as ResetDistributionCache with the addition of // the ability to pass a context and additional request options. // -// See StopRelationalDatabase for details on how to use this API operation. +// See ResetDistributionCache for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) StopRelationalDatabaseWithContext(ctx aws.Context, input *StopRelationalDatabaseInput, opts ...request.Option) (*StopRelationalDatabaseOutput, error) { - req, out := c.StopRelationalDatabaseRequest(input) +func (c *Lightsail) ResetDistributionCacheWithContext(ctx aws.Context, input *ResetDistributionCacheInput, opts ...request.Option) (*ResetDistributionCacheOutput, error) { + req, out := c.ResetDistributionCacheRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opSendContactMethodVerification = "SendContactMethodVerification" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// SendContactMethodVerificationRequest generates a "aws/request.Request" representing the +// client's request for the SendContactMethodVerification operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See SendContactMethodVerification for more information on using the SendContactMethodVerification // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the SendContactMethodVerificationRequest method. +// req, resp := client.SendContactMethodVerificationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TagResource -func (c *Lightsail) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SendContactMethodVerification +func (c *Lightsail) SendContactMethodVerificationRequest(input *SendContactMethodVerificationInput) (req *request.Request, output *SendContactMethodVerificationOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opSendContactMethodVerification, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TagResourceInput{} + input = &SendContactMethodVerificationInput{} } - output = &TagResourceOutput{} + output = &SendContactMethodVerificationOutput{} req = c.newRequest(op, input, output) return } -// TagResource API operation for Amazon Lightsail. +// SendContactMethodVerification API operation for Amazon Lightsail. // -// Adds one or more tags to the specified Amazon Lightsail resource. Each resource -// can have a maximum of 50 tags. Each tag consists of a key and an optional -// value. Tag keys must be unique per resource. For more information about tags, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). +// Sends a verification request to an email contact method to ensure it's owned +// by the requester. SMS contact methods don't need to be verified. // -// The tag resource operation supports tag-based access control via request -// tags and resource tags applied to the resource identified by resource name. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// A contact method is used to send you notifications about your Amazon Lightsail +// resources. You can add one email address and one mobile phone number contact +// method in each AWS Region. However, SMS text messaging is not supported in +// some AWS Regions, and SMS text messages cannot be sent to some countries/regions. +// For more information, see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications). +// +// A verification request is sent to the contact method when you initially create +// it. Use this action to send another verification request if a previous verification +// request was deleted, or has expired. +// +// Notifications are not sent to an email contact method until after it is verified, +// and confirmed as valid. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation TagResource for usage and error information. +// API operation SendContactMethodVerification for usage and error information. // // Returned Error Types: // * ServiceException @@ -11360,105 +12073,103 @@ func (c *Lightsail) TagResourceRequest(input *TagResourceInput) (req *request.Re // Please set your AWS Region configuration to us-east-1 to create, view, or // edit these resources. // -// * NotFoundException -// Lightsail throws this exception when it cannot find a resource. -// // * OperationFailureException // Lightsail throws this exception when an operation fails to execute. // +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// // * AccessDeniedException // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * AccountSetupInProgressException -// Lightsail throws this exception when an account is still in the setup in -// progress state. +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. // -// * UnauthenticatedException -// Lightsail throws this exception when the user has not been authenticated. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TagResource -func (c *Lightsail) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/SendContactMethodVerification +func (c *Lightsail) SendContactMethodVerification(input *SendContactMethodVerificationInput) (*SendContactMethodVerificationOutput, error) { + req, out := c.SendContactMethodVerificationRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// SendContactMethodVerificationWithContext is the same as SendContactMethodVerification with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See SendContactMethodVerification for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *Lightsail) SendContactMethodVerificationWithContext(ctx aws.Context, input *SendContactMethodVerificationInput, opts ...request.Option) (*SendContactMethodVerificationOutput, error) { + req, out := c.SendContactMethodVerificationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTestAlarm = "TestAlarm" +const opStartInstance = "StartInstance" -// TestAlarmRequest generates a "aws/request.Request" representing the -// client's request for the TestAlarm operation. The "output" return +// StartInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StartInstance operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TestAlarm for more information on using the TestAlarm +// See StartInstance for more information on using the StartInstance // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TestAlarmRequest method. -// req, resp := client.TestAlarmRequest(params) +// // Example sending a request using the StartInstanceRequest method. +// req, resp := client.StartInstanceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TestAlarm -func (c *Lightsail) TestAlarmRequest(input *TestAlarmInput) (req *request.Request, output *TestAlarmOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartInstance +func (c *Lightsail) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) { op := &request.Operation{ - Name: opTestAlarm, + Name: opStartInstance, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TestAlarmInput{} + input = &StartInstanceInput{} } - output = &TestAlarmOutput{} + output = &StartInstanceOutput{} req = c.newRequest(op, input, output) return } -// TestAlarm API operation for Amazon Lightsail. +// StartInstance API operation for Amazon Lightsail. // -// Tests an alarm by displaying a banner on the Amazon Lightsail console. If -// a notification trigger is configured for the specified alarm, the test also -// sends a notification to the notification protocol (Email and/or SMS) configured -// for the alarm. +// Starts a specific Amazon Lightsail instance from a stopped state. To restart +// an instance, use the reboot instance operation. // -// An alarm is used to monitor a single metric for one of your resources. When -// a metric condition is met, the alarm can notify you by email, SMS text message, -// and a banner displayed on the Amazon Lightsail console. For more information, -// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). +// When you start a stopped instance, Lightsail assigns a new public IP address +// to the instance. To use the same IP address after stopping and starting an +// instance, create a static IP address and attach it to the instance. For more +// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). +// +// The start instance operation supports tag-based access control via resource +// tags applied to the resource identified by instance name. For more information, +// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation TestAlarm for usage and error information. +// API operation StartInstance for usage and error information. // // Returned Error Types: // * ServiceException @@ -11472,93 +12183,102 @@ func (c *Lightsail) TestAlarmRequest(input *TestAlarmInput) (req *request.Reques // Please set your AWS Region configuration to us-east-1 to create, view, or // edit these resources. // +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// // * OperationFailureException // Lightsail throws this exception when an operation fails to execute. // -// * UnauthenticatedException -// Lightsail throws this exception when the user has not been authenticated. -// // * AccessDeniedException // Lightsail throws this exception when the user cannot be authenticated or // uses invalid credentials to access a resource. // -// * NotFoundException -// Lightsail throws this exception when it cannot find a resource. +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TestAlarm -func (c *Lightsail) TestAlarm(input *TestAlarmInput) (*TestAlarmOutput, error) { - req, out := c.TestAlarmRequest(input) +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartInstance +func (c *Lightsail) StartInstance(input *StartInstanceInput) (*StartInstanceOutput, error) { + req, out := c.StartInstanceRequest(input) return out, req.Send() } -// TestAlarmWithContext is the same as TestAlarm with the addition of +// StartInstanceWithContext is the same as StartInstance with the addition of // the ability to pass a context and additional request options. // -// See TestAlarm for details on how to use this API operation. +// See StartInstance for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) TestAlarmWithContext(ctx aws.Context, input *TestAlarmInput, opts ...request.Option) (*TestAlarmOutput, error) { - req, out := c.TestAlarmRequest(input) +func (c *Lightsail) StartInstanceWithContext(ctx aws.Context, input *StartInstanceInput, opts ...request.Option) (*StartInstanceOutput, error) { + req, out := c.StartInstanceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUnpeerVpc = "UnpeerVpc" +const opStartRelationalDatabase = "StartRelationalDatabase" -// UnpeerVpcRequest generates a "aws/request.Request" representing the -// client's request for the UnpeerVpc operation. The "output" return +// StartRelationalDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the StartRelationalDatabase operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UnpeerVpc for more information on using the UnpeerVpc +// See StartRelationalDatabase for more information on using the StartRelationalDatabase // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UnpeerVpcRequest method. -// req, resp := client.UnpeerVpcRequest(params) +// // Example sending a request using the StartRelationalDatabaseRequest method. +// req, resp := client.StartRelationalDatabaseRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UnpeerVpc -func (c *Lightsail) UnpeerVpcRequest(input *UnpeerVpcInput) (req *request.Request, output *UnpeerVpcOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartRelationalDatabase +func (c *Lightsail) StartRelationalDatabaseRequest(input *StartRelationalDatabaseInput) (req *request.Request, output *StartRelationalDatabaseOutput) { op := &request.Operation{ - Name: opUnpeerVpc, + Name: opStartRelationalDatabase, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UnpeerVpcInput{} + input = &StartRelationalDatabaseInput{} } - output = &UnpeerVpcOutput{} + output = &StartRelationalDatabaseOutput{} req = c.newRequest(op, input, output) return } -// UnpeerVpc API operation for Amazon Lightsail. +// StartRelationalDatabase API operation for Amazon Lightsail. // -// Attempts to unpeer the Lightsail VPC from the user's default VPC. +// Starts a specific database from a stopped state in Amazon Lightsail. To restart +// a database, use the reboot relational database operation. +// +// The start relational database operation supports tag-based access control +// via resource tags applied to the resource identified by relationalDatabaseName. +// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UnpeerVpc for usage and error information. +// API operation StartRelationalDatabase for usage and error information. // // Returned Error Types: // * ServiceException @@ -11589,85 +12309,89 @@ func (c *Lightsail) UnpeerVpcRequest(input *UnpeerVpcInput) (req *request.Reques // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UnpeerVpc -func (c *Lightsail) UnpeerVpc(input *UnpeerVpcInput) (*UnpeerVpcOutput, error) { - req, out := c.UnpeerVpcRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StartRelationalDatabase +func (c *Lightsail) StartRelationalDatabase(input *StartRelationalDatabaseInput) (*StartRelationalDatabaseOutput, error) { + req, out := c.StartRelationalDatabaseRequest(input) return out, req.Send() } -// UnpeerVpcWithContext is the same as UnpeerVpc with the addition of +// StartRelationalDatabaseWithContext is the same as StartRelationalDatabase with the addition of // the ability to pass a context and additional request options. // -// See UnpeerVpc for details on how to use this API operation. +// See StartRelationalDatabase for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UnpeerVpcWithContext(ctx aws.Context, input *UnpeerVpcInput, opts ...request.Option) (*UnpeerVpcOutput, error) { - req, out := c.UnpeerVpcRequest(input) +func (c *Lightsail) StartRelationalDatabaseWithContext(ctx aws.Context, input *StartRelationalDatabaseInput, opts ...request.Option) (*StartRelationalDatabaseOutput, error) { + req, out := c.StartRelationalDatabaseRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opStopInstance = "StopInstance" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// StopInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StopInstance operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See StopInstance for more information on using the StopInstance // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the StopInstanceRequest method. +// req, resp := client.StopInstanceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UntagResource -func (c *Lightsail) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopInstance +func (c *Lightsail) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opStopInstance, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &StopInstanceInput{} } - output = &UntagResourceOutput{} + output = &StopInstanceOutput{} req = c.newRequest(op, input, output) return } -// UntagResource API operation for Amazon Lightsail. +// StopInstance API operation for Amazon Lightsail. // -// Deletes the specified set of tag keys and their values from the specified -// Amazon Lightsail resource. +// Stops a specific Amazon Lightsail instance that is currently running. // -// The untag resource operation supports tag-based access control via request -// tags and resource tags applied to the resource identified by resource name. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// When you start a stopped instance, Lightsail assigns a new public IP address +// to the instance. To use the same IP address after stopping and starting an +// instance, create a static IP address and attach it to the instance. For more +// information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/lightsail-create-static-ip). +// +// The stop instance operation supports tag-based access control via resource +// tags applied to the resource identified by instance name. For more information, +// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UntagResource for usage and error information. +// API operation StopInstance for usage and error information. // // Returned Error Types: // * ServiceException @@ -11698,84 +12422,84 @@ func (c *Lightsail) UntagResourceRequest(input *UntagResourceInput) (req *reques // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UntagResource -func (c *Lightsail) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopInstance +func (c *Lightsail) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, error) { + req, out := c.StopInstanceRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// StopInstanceWithContext is the same as StopInstance with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See StopInstance for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *Lightsail) StopInstanceWithContext(ctx aws.Context, input *StopInstanceInput, opts ...request.Option) (*StopInstanceOutput, error) { + req, out := c.StopInstanceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDomainEntry = "UpdateDomainEntry" +const opStopRelationalDatabase = "StopRelationalDatabase" -// UpdateDomainEntryRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDomainEntry operation. The "output" return +// StopRelationalDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the StopRelationalDatabase operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDomainEntry for more information on using the UpdateDomainEntry +// See StopRelationalDatabase for more information on using the StopRelationalDatabase // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDomainEntryRequest method. -// req, resp := client.UpdateDomainEntryRequest(params) +// // Example sending a request using the StopRelationalDatabaseRequest method. +// req, resp := client.StopRelationalDatabaseRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDomainEntry -func (c *Lightsail) UpdateDomainEntryRequest(input *UpdateDomainEntryInput) (req *request.Request, output *UpdateDomainEntryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopRelationalDatabase +func (c *Lightsail) StopRelationalDatabaseRequest(input *StopRelationalDatabaseInput) (req *request.Request, output *StopRelationalDatabaseOutput) { op := &request.Operation{ - Name: opUpdateDomainEntry, + Name: opStopRelationalDatabase, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateDomainEntryInput{} + input = &StopRelationalDatabaseInput{} } - output = &UpdateDomainEntryOutput{} + output = &StopRelationalDatabaseOutput{} req = c.newRequest(op, input, output) return } -// UpdateDomainEntry API operation for Amazon Lightsail. +// StopRelationalDatabase API operation for Amazon Lightsail. // -// Updates a domain recordset after it is created. +// Stops a specific database that is currently running in Amazon Lightsail. // -// The update domain entry operation supports tag-based access control via resource -// tags applied to the resource identified by domain name. For more information, -// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// The stop relational database operation supports tag-based access control +// via resource tags applied to the resource identified by relationalDatabaseName. +// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateDomainEntry for usage and error information. +// API operation StopRelationalDatabase for usage and error information. // // Returned Error Types: // * ServiceException @@ -11806,77 +12530,79 @@ func (c *Lightsail) UpdateDomainEntryRequest(input *UpdateDomainEntryInput) (req // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDomainEntry -func (c *Lightsail) UpdateDomainEntry(input *UpdateDomainEntryInput) (*UpdateDomainEntryOutput, error) { - req, out := c.UpdateDomainEntryRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/StopRelationalDatabase +func (c *Lightsail) StopRelationalDatabase(input *StopRelationalDatabaseInput) (*StopRelationalDatabaseOutput, error) { + req, out := c.StopRelationalDatabaseRequest(input) return out, req.Send() } -// UpdateDomainEntryWithContext is the same as UpdateDomainEntry with the addition of +// StopRelationalDatabaseWithContext is the same as StopRelationalDatabase with the addition of // the ability to pass a context and additional request options. // -// See UpdateDomainEntry for details on how to use this API operation. +// See StopRelationalDatabase for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UpdateDomainEntryWithContext(ctx aws.Context, input *UpdateDomainEntryInput, opts ...request.Option) (*UpdateDomainEntryOutput, error) { - req, out := c.UpdateDomainEntryRequest(input) +func (c *Lightsail) StopRelationalDatabaseWithContext(ctx aws.Context, input *StopRelationalDatabaseInput, opts ...request.Option) (*StopRelationalDatabaseOutput, error) { + req, out := c.StopRelationalDatabaseRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateLoadBalancerAttribute = "UpdateLoadBalancerAttribute" +const opTagResource = "TagResource" -// UpdateLoadBalancerAttributeRequest generates a "aws/request.Request" representing the -// client's request for the UpdateLoadBalancerAttribute operation. The "output" return +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateLoadBalancerAttribute for more information on using the UpdateLoadBalancerAttribute +// See TagResource for more information on using the TagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateLoadBalancerAttributeRequest method. -// req, resp := client.UpdateLoadBalancerAttributeRequest(params) +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateLoadBalancerAttribute -func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancerAttributeInput) (req *request.Request, output *UpdateLoadBalancerAttributeOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TagResource +func (c *Lightsail) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { op := &request.Operation{ - Name: opUpdateLoadBalancerAttribute, + Name: opTagResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateLoadBalancerAttributeInput{} + input = &TagResourceInput{} } - output = &UpdateLoadBalancerAttributeOutput{} + output = &TagResourceOutput{} req = c.newRequest(op, input, output) return } -// UpdateLoadBalancerAttribute API operation for Amazon Lightsail. +// TagResource API operation for Amazon Lightsail. // -// Updates the specified attribute for a load balancer. You can only update -// one attribute at a time. +// Adds one or more tags to the specified Amazon Lightsail resource. Each resource +// can have a maximum of 50 tags. Each tag consists of a key and an optional +// value. Tag keys must be unique per resource. For more information about tags, +// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). // -// The update load balancer attribute operation supports tag-based access control -// via resource tags applied to the resource identified by load balancer name. +// The tag resource operation supports tag-based access control via request +// tags and resource tags applied to the resource identified by resource name. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11884,7 +12610,7 @@ func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancer // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateLoadBalancerAttribute for usage and error information. +// API operation TagResource for usage and error information. // // Returned Error Types: // * ServiceException @@ -11915,87 +12641,188 @@ func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancer // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateLoadBalancerAttribute -func (c *Lightsail) UpdateLoadBalancerAttribute(input *UpdateLoadBalancerAttributeInput) (*UpdateLoadBalancerAttributeOutput, error) { - req, out := c.UpdateLoadBalancerAttributeRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TagResource +func (c *Lightsail) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) return out, req.Send() } -// UpdateLoadBalancerAttributeWithContext is the same as UpdateLoadBalancerAttribute with the addition of +// TagResourceWithContext is the same as TagResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateLoadBalancerAttribute for details on how to use this API operation. +// See TagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UpdateLoadBalancerAttributeWithContext(ctx aws.Context, input *UpdateLoadBalancerAttributeInput, opts ...request.Option) (*UpdateLoadBalancerAttributeOutput, error) { - req, out := c.UpdateLoadBalancerAttributeRequest(input) +func (c *Lightsail) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRelationalDatabase = "UpdateRelationalDatabase" +const opTestAlarm = "TestAlarm" -// UpdateRelationalDatabaseRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRelationalDatabase operation. The "output" return +// TestAlarmRequest generates a "aws/request.Request" representing the +// client's request for the TestAlarm operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRelationalDatabase for more information on using the UpdateRelationalDatabase +// See TestAlarm for more information on using the TestAlarm // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRelationalDatabaseRequest method. -// req, resp := client.UpdateRelationalDatabaseRequest(params) +// // Example sending a request using the TestAlarmRequest method. +// req, resp := client.TestAlarmRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabase -func (c *Lightsail) UpdateRelationalDatabaseRequest(input *UpdateRelationalDatabaseInput) (req *request.Request, output *UpdateRelationalDatabaseOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TestAlarm +func (c *Lightsail) TestAlarmRequest(input *TestAlarmInput) (req *request.Request, output *TestAlarmOutput) { op := &request.Operation{ - Name: opUpdateRelationalDatabase, + Name: opTestAlarm, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateRelationalDatabaseInput{} + input = &TestAlarmInput{} } - output = &UpdateRelationalDatabaseOutput{} + output = &TestAlarmOutput{} req = c.newRequest(op, input, output) return } -// UpdateRelationalDatabase API operation for Amazon Lightsail. +// TestAlarm API operation for Amazon Lightsail. // -// Allows the update of one or more attributes of a database in Amazon Lightsail. +// Tests an alarm by displaying a banner on the Amazon Lightsail console. If +// a notification trigger is configured for the specified alarm, the test also +// sends a notification to the notification protocol (Email and/or SMS) configured +// for the alarm. // -// Updates are applied immediately, or in cases where the updates could result -// in an outage, are applied during the database's predefined maintenance window. +// An alarm is used to monitor a single metric for one of your resources. When +// a metric condition is met, the alarm can notify you by email, SMS text message, +// and a banner displayed on the Amazon Lightsail console. For more information, +// see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). // -// The update relational database operation supports tag-based access control -// via resource tags applied to the resource identified by relationalDatabaseName. -// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation TestAlarm for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/TestAlarm +func (c *Lightsail) TestAlarm(input *TestAlarmInput) (*TestAlarmOutput, error) { + req, out := c.TestAlarmRequest(input) + return out, req.Send() +} + +// TestAlarmWithContext is the same as TestAlarm with the addition of +// the ability to pass a context and additional request options. +// +// See TestAlarm for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) TestAlarmWithContext(ctx aws.Context, input *TestAlarmInput, opts ...request.Option) (*TestAlarmOutput, error) { + req, out := c.TestAlarmRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUnpeerVpc = "UnpeerVpc" + +// UnpeerVpcRequest generates a "aws/request.Request" representing the +// client's request for the UnpeerVpc operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UnpeerVpc for more information on using the UnpeerVpc +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UnpeerVpcRequest method. +// req, resp := client.UnpeerVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UnpeerVpc +func (c *Lightsail) UnpeerVpcRequest(input *UnpeerVpcInput) (req *request.Request, output *UnpeerVpcOutput) { + op := &request.Operation{ + Name: opUnpeerVpc, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UnpeerVpcInput{} + } + + output = &UnpeerVpcOutput{} + req = c.newRequest(op, input, output) + return +} + +// UnpeerVpc API operation for Amazon Lightsail. +// +// Attempts to unpeer the Lightsail VPC from the user's default VPC. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateRelationalDatabase for usage and error information. +// API operation UnpeerVpc for usage and error information. // // Returned Error Types: // * ServiceException @@ -12026,83 +12853,77 @@ func (c *Lightsail) UpdateRelationalDatabaseRequest(input *UpdateRelationalDatab // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabase -func (c *Lightsail) UpdateRelationalDatabase(input *UpdateRelationalDatabaseInput) (*UpdateRelationalDatabaseOutput, error) { - req, out := c.UpdateRelationalDatabaseRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UnpeerVpc +func (c *Lightsail) UnpeerVpc(input *UnpeerVpcInput) (*UnpeerVpcOutput, error) { + req, out := c.UnpeerVpcRequest(input) return out, req.Send() } -// UpdateRelationalDatabaseWithContext is the same as UpdateRelationalDatabase with the addition of +// UnpeerVpcWithContext is the same as UnpeerVpc with the addition of // the ability to pass a context and additional request options. // -// See UpdateRelationalDatabase for details on how to use this API operation. +// See UnpeerVpc for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UpdateRelationalDatabaseWithContext(ctx aws.Context, input *UpdateRelationalDatabaseInput, opts ...request.Option) (*UpdateRelationalDatabaseOutput, error) { - req, out := c.UpdateRelationalDatabaseRequest(input) +func (c *Lightsail) UnpeerVpcWithContext(ctx aws.Context, input *UnpeerVpcInput, opts ...request.Option) (*UnpeerVpcOutput, error) { + req, out := c.UnpeerVpcRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateRelationalDatabaseParameters = "UpdateRelationalDatabaseParameters" +const opUntagResource = "UntagResource" -// UpdateRelationalDatabaseParametersRequest generates a "aws/request.Request" representing the -// client's request for the UpdateRelationalDatabaseParameters operation. The "output" return +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateRelationalDatabaseParameters for more information on using the UpdateRelationalDatabaseParameters +// See UntagResource for more information on using the UntagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateRelationalDatabaseParametersRequest method. -// req, resp := client.UpdateRelationalDatabaseParametersRequest(params) +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabaseParameters -func (c *Lightsail) UpdateRelationalDatabaseParametersRequest(input *UpdateRelationalDatabaseParametersInput) (req *request.Request, output *UpdateRelationalDatabaseParametersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UntagResource +func (c *Lightsail) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { op := &request.Operation{ - Name: opUpdateRelationalDatabaseParameters, + Name: opUntagResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateRelationalDatabaseParametersInput{} + input = &UntagResourceInput{} } - output = &UpdateRelationalDatabaseParametersOutput{} + output = &UntagResourceOutput{} req = c.newRequest(op, input, output) return } -// UpdateRelationalDatabaseParameters API operation for Amazon Lightsail. -// -// Allows the update of one or more parameters of a database in Amazon Lightsail. +// UntagResource API operation for Amazon Lightsail. // -// Parameter updates don't cause outages; therefore, their application is not -// subject to the preferred maintenance window. However, there are two ways -// in which parameter updates are applied: dynamic or pending-reboot. Parameters -// marked with a dynamic apply type are applied immediately. Parameters marked -// with a pending-reboot apply type are applied only after the database is rebooted -// using the reboot relational database operation. +// Deletes the specified set of tag keys and their values from the specified +// Amazon Lightsail resource. // -// The update relational database parameters operation supports tag-based access -// control via resource tags applied to the resource identified by relationalDatabaseName. +// The untag resource operation supports tag-based access control via request +// tags and resource tags applied to the resource identified by resource name. // For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -12110,7 +12931,7 @@ func (c *Lightsail) UpdateRelationalDatabaseParametersRequest(input *UpdateRelat // the error. // // See the AWS API reference guide for Amazon Lightsail's -// API operation UpdateRelationalDatabaseParameters for usage and error information. +// API operation UntagResource for usage and error information. // // Returned Error Types: // * ServiceException @@ -12141,538 +12962,2689 @@ func (c *Lightsail) UpdateRelationalDatabaseParametersRequest(input *UpdateRelat // * UnauthenticatedException // Lightsail throws this exception when the user has not been authenticated. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabaseParameters -func (c *Lightsail) UpdateRelationalDatabaseParameters(input *UpdateRelationalDatabaseParametersInput) (*UpdateRelationalDatabaseParametersOutput, error) { - req, out := c.UpdateRelationalDatabaseParametersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UntagResource +func (c *Lightsail) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) return out, req.Send() } -// UpdateRelationalDatabaseParametersWithContext is the same as UpdateRelationalDatabaseParameters with the addition of +// UntagResourceWithContext is the same as UntagResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateRelationalDatabaseParameters for details on how to use this API operation. +// See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Lightsail) UpdateRelationalDatabaseParametersWithContext(ctx aws.Context, input *UpdateRelationalDatabaseParametersInput, opts ...request.Option) (*UpdateRelationalDatabaseParametersOutput, error) { - req, out := c.UpdateRelationalDatabaseParametersRequest(input) +func (c *Lightsail) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Lightsail throws this exception when the user cannot be authenticated or -// uses invalid credentials to access a resource. -type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Code_ *string `locationName:"code" type:"string"` - - Docs *string `locationName:"docs" type:"string"` - - Message_ *string `locationName:"message" type:"string"` - - Tip *string `locationName:"tip" type:"string"` -} - -// String returns the string representation -func (s AccessDeniedException) String() string { - return awsutil.Prettify(s) -} +const opUpdateDistribution = "UpdateDistribution" -// GoString returns the string representation +// UpdateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistribution operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDistribution for more information on using the UpdateDistribution +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDistributionRequest method. +// req, resp := client.UpdateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution +func (c *Lightsail) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { + op := &request.Operation{ + Name: opUpdateDistribution, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDistributionInput{} + } + + output = &UpdateDistributionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDistribution API operation for Amazon Lightsail. +// +// Updates an existing Amazon Lightsail content delivery network (CDN) distribution. +// +// Use this action to update the configuration of your existing distribution +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateDistribution for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistribution +func (c *Lightsail) UpdateDistribution(input *UpdateDistributionInput) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + return out, req.Send() +} + +// UpdateDistributionWithContext is the same as UpdateDistribution with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDistribution for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateDistributionWithContext(ctx aws.Context, input *UpdateDistributionInput, opts ...request.Option) (*UpdateDistributionOutput, error) { + req, out := c.UpdateDistributionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDistributionBundle = "UpdateDistributionBundle" + +// UpdateDistributionBundleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistributionBundle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDistributionBundle for more information on using the UpdateDistributionBundle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDistributionBundleRequest method. +// req, resp := client.UpdateDistributionBundleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistributionBundle +func (c *Lightsail) UpdateDistributionBundleRequest(input *UpdateDistributionBundleInput) (req *request.Request, output *UpdateDistributionBundleOutput) { + op := &request.Operation{ + Name: opUpdateDistributionBundle, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDistributionBundleInput{} + } + + output = &UpdateDistributionBundleOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDistributionBundle API operation for Amazon Lightsail. +// +// Updates the bundle of your Amazon Lightsail content delivery network (CDN) +// distribution. +// +// A distribution bundle specifies the monthly network transfer quota and monthly +// cost of your dsitribution. +// +// Update your distribution's bundle if your distribution is going over its +// monthly network transfer quota and is incurring an overage fee. +// +// You can update your distribution's bundle only one time within your monthly +// AWS billing cycle. To determine if you can update your distribution's bundle, +// use the GetDistributions action. The ableToUpdateBundle parameter in the +// result will indicate whether you can currently update your distribution's +// bundle. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateDistributionBundle for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDistributionBundle +func (c *Lightsail) UpdateDistributionBundle(input *UpdateDistributionBundleInput) (*UpdateDistributionBundleOutput, error) { + req, out := c.UpdateDistributionBundleRequest(input) + return out, req.Send() +} + +// UpdateDistributionBundleWithContext is the same as UpdateDistributionBundle with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDistributionBundle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateDistributionBundleWithContext(ctx aws.Context, input *UpdateDistributionBundleInput, opts ...request.Option) (*UpdateDistributionBundleOutput, error) { + req, out := c.UpdateDistributionBundleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDomainEntry = "UpdateDomainEntry" + +// UpdateDomainEntryRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainEntry operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDomainEntry for more information on using the UpdateDomainEntry +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDomainEntryRequest method. +// req, resp := client.UpdateDomainEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDomainEntry +func (c *Lightsail) UpdateDomainEntryRequest(input *UpdateDomainEntryInput) (req *request.Request, output *UpdateDomainEntryOutput) { + op := &request.Operation{ + Name: opUpdateDomainEntry, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateDomainEntryInput{} + } + + output = &UpdateDomainEntryOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDomainEntry API operation for Amazon Lightsail. +// +// Updates a domain recordset after it is created. +// +// The update domain entry operation supports tag-based access control via resource +// tags applied to the resource identified by domain name. For more information, +// see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateDomainEntry for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateDomainEntry +func (c *Lightsail) UpdateDomainEntry(input *UpdateDomainEntryInput) (*UpdateDomainEntryOutput, error) { + req, out := c.UpdateDomainEntryRequest(input) + return out, req.Send() +} + +// UpdateDomainEntryWithContext is the same as UpdateDomainEntry with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDomainEntry for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateDomainEntryWithContext(ctx aws.Context, input *UpdateDomainEntryInput, opts ...request.Option) (*UpdateDomainEntryOutput, error) { + req, out := c.UpdateDomainEntryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateLoadBalancerAttribute = "UpdateLoadBalancerAttribute" + +// UpdateLoadBalancerAttributeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLoadBalancerAttribute operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLoadBalancerAttribute for more information on using the UpdateLoadBalancerAttribute +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateLoadBalancerAttributeRequest method. +// req, resp := client.UpdateLoadBalancerAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateLoadBalancerAttribute +func (c *Lightsail) UpdateLoadBalancerAttributeRequest(input *UpdateLoadBalancerAttributeInput) (req *request.Request, output *UpdateLoadBalancerAttributeOutput) { + op := &request.Operation{ + Name: opUpdateLoadBalancerAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLoadBalancerAttributeInput{} + } + + output = &UpdateLoadBalancerAttributeOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateLoadBalancerAttribute API operation for Amazon Lightsail. +// +// Updates the specified attribute for a load balancer. You can only update +// one attribute at a time. +// +// The update load balancer attribute operation supports tag-based access control +// via resource tags applied to the resource identified by load balancer name. +// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateLoadBalancerAttribute for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateLoadBalancerAttribute +func (c *Lightsail) UpdateLoadBalancerAttribute(input *UpdateLoadBalancerAttributeInput) (*UpdateLoadBalancerAttributeOutput, error) { + req, out := c.UpdateLoadBalancerAttributeRequest(input) + return out, req.Send() +} + +// UpdateLoadBalancerAttributeWithContext is the same as UpdateLoadBalancerAttribute with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLoadBalancerAttribute for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateLoadBalancerAttributeWithContext(ctx aws.Context, input *UpdateLoadBalancerAttributeInput, opts ...request.Option) (*UpdateLoadBalancerAttributeOutput, error) { + req, out := c.UpdateLoadBalancerAttributeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRelationalDatabase = "UpdateRelationalDatabase" + +// UpdateRelationalDatabaseRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRelationalDatabase operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRelationalDatabase for more information on using the UpdateRelationalDatabase +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRelationalDatabaseRequest method. +// req, resp := client.UpdateRelationalDatabaseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabase +func (c *Lightsail) UpdateRelationalDatabaseRequest(input *UpdateRelationalDatabaseInput) (req *request.Request, output *UpdateRelationalDatabaseOutput) { + op := &request.Operation{ + Name: opUpdateRelationalDatabase, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRelationalDatabaseInput{} + } + + output = &UpdateRelationalDatabaseOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRelationalDatabase API operation for Amazon Lightsail. +// +// Allows the update of one or more attributes of a database in Amazon Lightsail. +// +// Updates are applied immediately, or in cases where the updates could result +// in an outage, are applied during the database's predefined maintenance window. +// +// The update relational database operation supports tag-based access control +// via resource tags applied to the resource identified by relationalDatabaseName. +// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateRelationalDatabase for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabase +func (c *Lightsail) UpdateRelationalDatabase(input *UpdateRelationalDatabaseInput) (*UpdateRelationalDatabaseOutput, error) { + req, out := c.UpdateRelationalDatabaseRequest(input) + return out, req.Send() +} + +// UpdateRelationalDatabaseWithContext is the same as UpdateRelationalDatabase with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRelationalDatabase for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateRelationalDatabaseWithContext(ctx aws.Context, input *UpdateRelationalDatabaseInput, opts ...request.Option) (*UpdateRelationalDatabaseOutput, error) { + req, out := c.UpdateRelationalDatabaseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRelationalDatabaseParameters = "UpdateRelationalDatabaseParameters" + +// UpdateRelationalDatabaseParametersRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRelationalDatabaseParameters operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRelationalDatabaseParameters for more information on using the UpdateRelationalDatabaseParameters +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRelationalDatabaseParametersRequest method. +// req, resp := client.UpdateRelationalDatabaseParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabaseParameters +func (c *Lightsail) UpdateRelationalDatabaseParametersRequest(input *UpdateRelationalDatabaseParametersInput) (req *request.Request, output *UpdateRelationalDatabaseParametersOutput) { + op := &request.Operation{ + Name: opUpdateRelationalDatabaseParameters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRelationalDatabaseParametersInput{} + } + + output = &UpdateRelationalDatabaseParametersOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRelationalDatabaseParameters API operation for Amazon Lightsail. +// +// Allows the update of one or more parameters of a database in Amazon Lightsail. +// +// Parameter updates don't cause outages; therefore, their application is not +// subject to the preferred maintenance window. However, there are two ways +// in which parameter updates are applied: dynamic or pending-reboot. Parameters +// marked with a dynamic apply type are applied immediately. Parameters marked +// with a pending-reboot apply type are applied only after the database is rebooted +// using the reboot relational database operation. +// +// The update relational database parameters operation supports tag-based access +// control via resource tags applied to the resource identified by relationalDatabaseName. +// For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Lightsail's +// API operation UpdateRelationalDatabaseParameters for usage and error information. +// +// Returned Error Types: +// * ServiceException +// A general service exception. +// +// * InvalidInputException +// Lightsail throws this exception when user input does not conform to the validation +// rules of an input field. +// +// Domain-related APIs are only available in the N. Virginia (us-east-1) Region. +// Please set your AWS Region configuration to us-east-1 to create, view, or +// edit these resources. +// +// * NotFoundException +// Lightsail throws this exception when it cannot find a resource. +// +// * OperationFailureException +// Lightsail throws this exception when an operation fails to execute. +// +// * AccessDeniedException +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +// +// * AccountSetupInProgressException +// Lightsail throws this exception when an account is still in the setup in +// progress state. +// +// * UnauthenticatedException +// Lightsail throws this exception when the user has not been authenticated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/UpdateRelationalDatabaseParameters +func (c *Lightsail) UpdateRelationalDatabaseParameters(input *UpdateRelationalDatabaseParametersInput) (*UpdateRelationalDatabaseParametersOutput, error) { + req, out := c.UpdateRelationalDatabaseParametersRequest(input) + return out, req.Send() +} + +// UpdateRelationalDatabaseParametersWithContext is the same as UpdateRelationalDatabaseParameters with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRelationalDatabaseParameters for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Lightsail) UpdateRelationalDatabaseParametersWithContext(ctx aws.Context, input *UpdateRelationalDatabaseParametersInput, opts ...request.Option) (*UpdateRelationalDatabaseParametersOutput, error) { + req, out := c.UpdateRelationalDatabaseParametersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Lightsail throws this exception when the user cannot be authenticated or +// uses invalid credentials to access a resource. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Code_ *string `locationName:"code" type:"string"` + + Docs *string `locationName:"docs" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + Tip *string `locationName:"tip" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation func (s AccessDeniedException) GoString() string { return s.String() } -func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { - return &AccessDeniedException{ - respMetadata: v, - } +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Lightsail throws this exception when an account is still in the setup in +// progress state. +type AccountSetupInProgressException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Code_ *string `locationName:"code" type:"string"` + + Docs *string `locationName:"docs" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + Tip *string `locationName:"tip" type:"string"` +} + +// String returns the string representation +func (s AccountSetupInProgressException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountSetupInProgressException) GoString() string { + return s.String() +} + +func newErrorAccountSetupInProgressException(v protocol.ResponseMetadata) error { + return &AccountSetupInProgressException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccountSetupInProgressException) Code() string { + return "AccountSetupInProgressException" +} + +// Message returns the exception's message. +func (s *AccountSetupInProgressException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccountSetupInProgressException) OrigErr() error { + return nil +} + +func (s *AccountSetupInProgressException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccountSetupInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccountSetupInProgressException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes an add-on that is enabled for an Amazon Lightsail resource. +type AddOn struct { + _ struct{} `type:"structure"` + + // The name of the add-on. + Name *string `locationName:"name" type:"string"` + + // The next daily time an automatic snapshot will be created. + // + // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC). + // + // The snapshot is automatically created between the time shown and up to 45 + // minutes after. + NextSnapshotTimeOfDay *string `locationName:"nextSnapshotTimeOfDay" type:"string"` + + // The daily time when an automatic snapshot is created. + // + // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC). + // + // The snapshot is automatically created between the time shown and up to 45 + // minutes after. + SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"` + + // The status of the add-on. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s AddOn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddOn) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AddOn) SetName(v string) *AddOn { + s.Name = &v + return s +} + +// SetNextSnapshotTimeOfDay sets the NextSnapshotTimeOfDay field's value. +func (s *AddOn) SetNextSnapshotTimeOfDay(v string) *AddOn { + s.NextSnapshotTimeOfDay = &v + return s +} + +// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value. +func (s *AddOn) SetSnapshotTimeOfDay(v string) *AddOn { + s.SnapshotTimeOfDay = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AddOn) SetStatus(v string) *AddOn { + s.Status = &v + return s +} + +// Describes a request to enable, modify, or disable an add-on for an Amazon +// Lightsail resource. +// +// An additional cost may be associated with enabling add-ons. For more information, +// see the Lightsail pricing page (https://aws.amazon.com/lightsail/pricing/). +type AddOnRequest struct { + _ struct{} `type:"structure"` + + // The add-on type. + // + // AddOnType is a required field + AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"` + + // An object that represents additional parameters when enabling or modifying + // the automatic snapshot add-on. + AutoSnapshotAddOnRequest *AutoSnapshotAddOnRequest `locationName:"autoSnapshotAddOnRequest" type:"structure"` +} + +// String returns the string representation +func (s AddOnRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddOnRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddOnRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddOnRequest"} + if s.AddOnType == nil { + invalidParams.Add(request.NewErrParamRequired("AddOnType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAddOnType sets the AddOnType field's value. +func (s *AddOnRequest) SetAddOnType(v string) *AddOnRequest { + s.AddOnType = &v + return s +} + +// SetAutoSnapshotAddOnRequest sets the AutoSnapshotAddOnRequest field's value. +func (s *AddOnRequest) SetAutoSnapshotAddOnRequest(v *AutoSnapshotAddOnRequest) *AddOnRequest { + s.AutoSnapshotAddOnRequest = v + return s +} + +// Describes an alarm. +// +// An alarm is a way to monitor your Amazon Lightsail resource metrics. For +// more information, see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). +type Alarm struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the alarm. + Arn *string `locationName:"arn" type:"string"` + + // The arithmetic operation used when comparing the specified statistic and + // threshold. + ComparisonOperator *string `locationName:"comparisonOperator" type:"string" enum:"ComparisonOperator"` + + // The contact protocols for the alarm, such as Email, SMS (text messaging), + // or both. + ContactProtocols []*string `locationName:"contactProtocols" type:"list"` + + // The timestamp when the alarm was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The number of data points that must not within the specified threshold to + // trigger the alarm. + DatapointsToAlarm *int64 `locationName:"datapointsToAlarm" type:"integer"` + + // The number of periods over which data is compared to the specified threshold. + EvaluationPeriods *int64 `locationName:"evaluationPeriods" type:"integer"` + + // An object that lists information about the location of the alarm. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The name of the metric associated with the alarm. + MetricName *string `locationName:"metricName" type:"string" enum:"MetricName"` + + // An object that lists information about the resource monitored by the alarm. + MonitoredResourceInfo *MonitoredResourceInfo `locationName:"monitoredResourceInfo" type:"structure"` + + // The name of the alarm. + Name *string `locationName:"name" type:"string"` + + // Indicates whether the alarm is enabled. + NotificationEnabled *bool `locationName:"notificationEnabled" type:"boolean"` + + // The alarm states that trigger a notification. + NotificationTriggers []*string `locationName:"notificationTriggers" type:"list"` + + // The period, in seconds, over which the statistic is applied. + Period *int64 `locationName:"period" min:"60" type:"integer"` + + // The Lightsail resource type (e.g., Alarm). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The current state of the alarm. + // + // An alarm has the following possible states: + // + // * ALARM - The metric is outside of the defined threshold. + // + // * INSUFFICIENT_DATA - The alarm has just started, the metric is not available, + // or not enough data is available for the metric to determine the alarm + // state. + // + // * OK - The metric is within the defined threshold. + State *string `locationName:"state" type:"string" enum:"AlarmState"` + + // The statistic for the metric associated with the alarm. + // + // The following statistics are available: + // + // * Minimum - The lowest value observed during the specified period. Use + // this value to determine low volumes of activity for your application. + // + // * Maximum - The highest value observed during the specified period. Use + // this value to determine high volumes of activity for your application. + // + // * Sum - All values submitted for the matching metric added together. You + // can use this statistic to determine the total volume of a metric. + // + // * Average - The value of Sum / SampleCount during the specified period. + // By comparing this statistic with the Minimum and Maximum values, you can + // determine the full scope of a metric and how close the average use is + // to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. + // + // * SampleCount - The count, or number, of data points used for the statistical + // calculation. + Statistic *string `locationName:"statistic" type:"string" enum:"MetricStatistic"` + + // The support code. Include this code in your email to support when you have + // questions about your Lightsail alarm. This code enables our support team + // to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` + + // The value against which the specified statistic is compared. + Threshold *float64 `locationName:"threshold" type:"double"` + + // Specifies how the alarm handles missing data points. + // + // An alarm can treat missing data in the following ways: + // + // * breaching - Assume the missing data is not within the threshold. Missing + // data counts towards the number of times the metric is not within the threshold. + // + // * notBreaching - Assume the missing data is within the threshold. Missing + // data does not count towards the number of times the metric is not within + // the threshold. + // + // * ignore - Ignore the missing data. Maintains the current alarm state. + // + // * missing - Missing data is treated as missing. + TreatMissingData *string `locationName:"treatMissingData" type:"string" enum:"TreatMissingData"` + + // The unit of the metric associated with the alarm. + Unit *string `locationName:"unit" type:"string" enum:"MetricUnit"` +} + +// String returns the string representation +func (s Alarm) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Alarm) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Alarm) SetArn(v string) *Alarm { + s.Arn = &v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *Alarm) SetComparisonOperator(v string) *Alarm { + s.ComparisonOperator = &v + return s +} + +// SetContactProtocols sets the ContactProtocols field's value. +func (s *Alarm) SetContactProtocols(v []*string) *Alarm { + s.ContactProtocols = v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *Alarm) SetCreatedAt(v time.Time) *Alarm { + s.CreatedAt = &v + return s +} + +// SetDatapointsToAlarm sets the DatapointsToAlarm field's value. +func (s *Alarm) SetDatapointsToAlarm(v int64) *Alarm { + s.DatapointsToAlarm = &v + return s +} + +// SetEvaluationPeriods sets the EvaluationPeriods field's value. +func (s *Alarm) SetEvaluationPeriods(v int64) *Alarm { + s.EvaluationPeriods = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *Alarm) SetLocation(v *ResourceLocation) *Alarm { + s.Location = v + return s +} + +// SetMetricName sets the MetricName field's value. +func (s *Alarm) SetMetricName(v string) *Alarm { + s.MetricName = &v + return s +} + +// SetMonitoredResourceInfo sets the MonitoredResourceInfo field's value. +func (s *Alarm) SetMonitoredResourceInfo(v *MonitoredResourceInfo) *Alarm { + s.MonitoredResourceInfo = v + return s +} + +// SetName sets the Name field's value. +func (s *Alarm) SetName(v string) *Alarm { + s.Name = &v + return s +} + +// SetNotificationEnabled sets the NotificationEnabled field's value. +func (s *Alarm) SetNotificationEnabled(v bool) *Alarm { + s.NotificationEnabled = &v + return s +} + +// SetNotificationTriggers sets the NotificationTriggers field's value. +func (s *Alarm) SetNotificationTriggers(v []*string) *Alarm { + s.NotificationTriggers = v + return s +} + +// SetPeriod sets the Period field's value. +func (s *Alarm) SetPeriod(v int64) *Alarm { + s.Period = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *Alarm) SetResourceType(v string) *Alarm { + s.ResourceType = &v + return s +} + +// SetState sets the State field's value. +func (s *Alarm) SetState(v string) *Alarm { + s.State = &v + return s +} + +// SetStatistic sets the Statistic field's value. +func (s *Alarm) SetStatistic(v string) *Alarm { + s.Statistic = &v + return s +} + +// SetSupportCode sets the SupportCode field's value. +func (s *Alarm) SetSupportCode(v string) *Alarm { + s.SupportCode = &v + return s +} + +// SetThreshold sets the Threshold field's value. +func (s *Alarm) SetThreshold(v float64) *Alarm { + s.Threshold = &v + return s +} + +// SetTreatMissingData sets the TreatMissingData field's value. +func (s *Alarm) SetTreatMissingData(v string) *Alarm { + s.TreatMissingData = &v + return s +} + +// SetUnit sets the Unit field's value. +func (s *Alarm) SetUnit(v string) *Alarm { + s.Unit = &v + return s +} + +type AllocateStaticIpInput struct { + _ struct{} `type:"structure"` + + // The name of the static IP address. + // + // StaticIpName is a required field + StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AllocateStaticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateStaticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AllocateStaticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AllocateStaticIpInput"} + if s.StaticIpName == nil { + invalidParams.Add(request.NewErrParamRequired("StaticIpName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStaticIpName sets the StaticIpName field's value. +func (s *AllocateStaticIpInput) SetStaticIpName(v string) *AllocateStaticIpInput { + s.StaticIpName = &v + return s +} + +type AllocateStaticIpOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s AllocateStaticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AllocateStaticIpOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *AllocateStaticIpOutput) SetOperations(v []*Operation) *AllocateStaticIpOutput { + s.Operations = v + return s +} + +type AttachCertificateToDistributionInput struct { + _ struct{} `type:"structure"` + + // The name of the certificate to attach to a distribution. + // + // Only certificates with a status of ISSUED can be attached to a distribution. + // + // Use the GetCertificates action to get a list of certificate names that you + // can specify. + // + // This is the name of the certificate resource type and is used only to reference + // the certificate in other API actions. It can be different than the domain + // name of the certificate. For example, your certificate name might be WordPress-Blog-Certificate + // and the domain name of the certificate might be example.com. + // + // CertificateName is a required field + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // The name of the distribution that the certificate will be attached to. + // + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + // + // DistributionName is a required field + DistributionName *string `locationName:"distributionName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachCertificateToDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachCertificateToDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachCertificateToDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachCertificateToDistributionInput"} + if s.CertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateName")) + } + if s.DistributionName == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateName sets the CertificateName field's value. +func (s *AttachCertificateToDistributionInput) SetCertificateName(v string) *AttachCertificateToDistributionInput { + s.CertificateName = &v + return s +} + +// SetDistributionName sets the DistributionName field's value. +func (s *AttachCertificateToDistributionInput) SetDistributionName(v string) *AttachCertificateToDistributionInput { + s.DistributionName = &v + return s +} + +type AttachCertificateToDistributionOutput struct { + _ struct{} `type:"structure"` + + // An object that describes the result of the action, such as the status of + // the request, the timestamp of the request, and the resources affected by + // the request. + Operation *Operation `locationName:"operation" type:"structure"` +} + +// String returns the string representation +func (s AttachCertificateToDistributionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachCertificateToDistributionOutput) GoString() string { + return s.String() +} + +// SetOperation sets the Operation field's value. +func (s *AttachCertificateToDistributionOutput) SetOperation(v *Operation) *AttachCertificateToDistributionOutput { + s.Operation = v + return s +} + +type AttachDiskInput struct { + _ struct{} `type:"structure"` + + // The unique Lightsail disk name (e.g., my-disk). + // + // DiskName is a required field + DiskName *string `locationName:"diskName" type:"string" required:"true"` + + // The disk path to expose to the instance (e.g., /dev/xvdf). + // + // DiskPath is a required field + DiskPath *string `locationName:"diskPath" type:"string" required:"true"` + + // The name of the Lightsail instance where you want to utilize the storage + // disk. + // + // InstanceName is a required field + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachDiskInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachDiskInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachDiskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachDiskInput"} + if s.DiskName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskName")) + } + if s.DiskPath == nil { + invalidParams.Add(request.NewErrParamRequired("DiskPath")) + } + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDiskName sets the DiskName field's value. +func (s *AttachDiskInput) SetDiskName(v string) *AttachDiskInput { + s.DiskName = &v + return s +} + +// SetDiskPath sets the DiskPath field's value. +func (s *AttachDiskInput) SetDiskPath(v string) *AttachDiskInput { + s.DiskPath = &v + return s +} + +// SetInstanceName sets the InstanceName field's value. +func (s *AttachDiskInput) SetInstanceName(v string) *AttachDiskInput { + s.InstanceName = &v + return s +} + +type AttachDiskOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s AttachDiskOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachDiskOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *AttachDiskOutput) SetOperations(v []*Operation) *AttachDiskOutput { + s.Operations = v + return s +} + +type AttachInstancesToLoadBalancerInput struct { + _ struct{} `type:"structure"` + + // An array of strings representing the instance name(s) you want to attach + // to your load balancer. + // + // An instance must be running before you can attach it to your load balancer. + // + // There are no additional limits on the number of instances you can attach + // to your load balancer, aside from the limit of Lightsail instances you can + // create in your account (20). + // + // InstanceNames is a required field + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The name of the load balancer. + // + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachInstancesToLoadBalancerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesToLoadBalancerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachInstancesToLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachInstancesToLoadBalancerInput"} + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceNames sets the InstanceNames field's value. +func (s *AttachInstancesToLoadBalancerInput) SetInstanceNames(v []*string) *AttachInstancesToLoadBalancerInput { + s.InstanceNames = v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *AttachInstancesToLoadBalancerInput) SetLoadBalancerName(v string) *AttachInstancesToLoadBalancerInput { + s.LoadBalancerName = &v + return s +} + +type AttachInstancesToLoadBalancerOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s AttachInstancesToLoadBalancerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachInstancesToLoadBalancerOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *AttachInstancesToLoadBalancerOutput) SetOperations(v []*Operation) *AttachInstancesToLoadBalancerOutput { + s.Operations = v + return s +} + +type AttachLoadBalancerTlsCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of your SSL/TLS certificate. + // + // CertificateName is a required field + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // The name of the load balancer to which you want to associate the SSL/TLS + // certificate. + // + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachLoadBalancerTlsCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerTlsCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachLoadBalancerTlsCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancerTlsCertificateInput"} + if s.CertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateName")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateName sets the CertificateName field's value. +func (s *AttachLoadBalancerTlsCertificateInput) SetCertificateName(v string) *AttachLoadBalancerTlsCertificateInput { + s.CertificateName = &v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *AttachLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *AttachLoadBalancerTlsCertificateInput { + s.LoadBalancerName = &v + return s +} + +type AttachLoadBalancerTlsCertificateOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + // + // These SSL/TLS certificates are only usable by Lightsail load balancers. You + // can't get the certificate and use it for another purpose. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s AttachLoadBalancerTlsCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerTlsCertificateOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *AttachLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *AttachLoadBalancerTlsCertificateOutput { + s.Operations = v + return s +} + +type AttachStaticIpInput struct { + _ struct{} `type:"structure"` + + // The instance name to which you want to attach the static IP address. + // + // InstanceName is a required field + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + + // The name of the static IP. + // + // StaticIpName is a required field + StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachStaticIpInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachStaticIpInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachStaticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachStaticIpInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) + } + if s.StaticIpName == nil { + invalidParams.Add(request.NewErrParamRequired("StaticIpName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceName sets the InstanceName field's value. +func (s *AttachStaticIpInput) SetInstanceName(v string) *AttachStaticIpInput { + s.InstanceName = &v + return s +} + +// SetStaticIpName sets the StaticIpName field's value. +func (s *AttachStaticIpInput) SetStaticIpName(v string) *AttachStaticIpInput { + s.StaticIpName = &v + return s +} + +type AttachStaticIpOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s AttachStaticIpOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachStaticIpOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *AttachStaticIpOutput) SetOperations(v []*Operation) *AttachStaticIpOutput { + s.Operations = v + return s +} + +// Describes a block storage disk that is attached to an instance, and is included +// in an automatic snapshot. +type AttachedDisk struct { + _ struct{} `type:"structure"` + + // The path of the disk (e.g., /dev/xvdf). + Path *string `locationName:"path" type:"string"` + + // The size of the disk in GB. + SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` +} + +// String returns the string representation +func (s AttachedDisk) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedDisk) GoString() string { + return s.String() +} + +// SetPath sets the Path field's value. +func (s *AttachedDisk) SetPath(v string) *AttachedDisk { + s.Path = &v + return s +} + +// SetSizeInGb sets the SizeInGb field's value. +func (s *AttachedDisk) SetSizeInGb(v int64) *AttachedDisk { + s.SizeInGb = &v + return s +} + +// Describes a request to enable or modify the automatic snapshot add-on for +// an Amazon Lightsail instance or disk. +// +// When you modify the automatic snapshot time for a resource, it is typically +// effective immediately except under the following conditions: +// +// * If an automatic snapshot has been created for the current day, and you +// change the snapshot time to a later time of day, then the new snapshot +// time will be effective the following day. This ensures that two snapshots +// are not created for the current day. +// +// * If an automatic snapshot has not yet been created for the current day, +// and you change the snapshot time to an earlier time of day, then the new +// snapshot time will be effective the following day and a snapshot is automatically +// created at the previously set time for the current day. This ensures that +// a snapshot is created for the current day. +// +// * If an automatic snapshot has not yet been created for the current day, +// and you change the snapshot time to a time that is within 30 minutes from +// your current time, then the new snapshot time will be effective the following +// day and a snapshot is automatically created at the previously set time +// for the current day. This ensures that a snapshot is created for the current +// day, because 30 minutes is required between your current time and the +// new snapshot time that you specify. +// +// * If an automatic snapshot is scheduled to be created within 30 minutes +// from your current time and you change the snapshot time, then the new +// snapshot time will be effective the following day and a snapshot is automatically +// created at the previously set time for the current day. This ensures that +// a snapshot is created for the current day, because 30 minutes is required +// between your current time and the new snapshot time that you specify. +type AutoSnapshotAddOnRequest struct { + _ struct{} `type:"structure"` + + // The daily time when an automatic snapshot will be created. + // + // Constraints: + // + // * Must be in HH:00 format, and in an hourly increment. + // + // * Specified in Coordinated Universal Time (UTC). + // + // * The snapshot will be automatically created between the time specified + // and up to 45 minutes after. + SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"` +} + +// String returns the string representation +func (s AutoSnapshotAddOnRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoSnapshotAddOnRequest) GoString() string { + return s.String() +} + +// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value. +func (s *AutoSnapshotAddOnRequest) SetSnapshotTimeOfDay(v string) *AutoSnapshotAddOnRequest { + s.SnapshotTimeOfDay = &v + return s +} + +// Describes an automatic snapshot. +type AutoSnapshotDetails struct { + _ struct{} `type:"structure"` + + // The timestamp when the automatic snapshot was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The date of the automatic snapshot in YYYY-MM-DD format. + Date *string `locationName:"date" type:"string"` + + // An array of objects that describe the block storage disks attached to the + // instance when the automatic snapshot was created. + FromAttachedDisks []*AttachedDisk `locationName:"fromAttachedDisks" type:"list"` + + // The status of the automatic snapshot. + Status *string `locationName:"status" type:"string" enum:"AutoSnapshotStatus"` +} + +// String returns the string representation +func (s AutoSnapshotDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoSnapshotDetails) GoString() string { + return s.String() +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *AutoSnapshotDetails) SetCreatedAt(v time.Time) *AutoSnapshotDetails { + s.CreatedAt = &v + return s +} + +// SetDate sets the Date field's value. +func (s *AutoSnapshotDetails) SetDate(v string) *AutoSnapshotDetails { + s.Date = &v + return s +} + +// SetFromAttachedDisks sets the FromAttachedDisks field's value. +func (s *AutoSnapshotDetails) SetFromAttachedDisks(v []*AttachedDisk) *AutoSnapshotDetails { + s.FromAttachedDisks = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AutoSnapshotDetails) SetStatus(v string) *AutoSnapshotDetails { + s.Status = &v + return s +} + +// Describes an Availability Zone. +type AvailabilityZone struct { + _ struct{} `type:"structure"` + + // The state of the Availability Zone. + State *string `locationName:"state" type:"string"` + + // The name of the Availability Zone. The format is us-east-2a (case-sensitive). + ZoneName *string `locationName:"zoneName" type:"string"` +} + +// String returns the string representation +func (s AvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailabilityZone) GoString() string { + return s.String() +} + +// SetState sets the State field's value. +func (s *AvailabilityZone) SetState(v string) *AvailabilityZone { + s.State = &v + return s +} + +// SetZoneName sets the ZoneName field's value. +func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { + s.ZoneName = &v + return s +} + +// Describes a blueprint (a virtual private server image). +type Blueprint struct { + _ struct{} `type:"structure"` + + // The ID for the virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0). + BlueprintId *string `locationName:"blueprintId" type:"string"` + + // The description of the blueprint. + Description *string `locationName:"description" type:"string"` + + // The group name of the blueprint (e.g., amazon-linux). + Group *string `locationName:"group" type:"string"` + + // A Boolean value indicating whether the blueprint is active. Inactive blueprints + // are listed to support customers with existing instances but are not necessarily + // available for launch of new instances. Blueprints are marked inactive when + // they become outdated due to operating system updates or new application releases. + IsActive *bool `locationName:"isActive" type:"boolean"` + + // The end-user license agreement URL for the image or blueprint. + LicenseUrl *string `locationName:"licenseUrl" type:"string"` + + // The minimum bundle power required to run this blueprint. For example, you + // need a bundle with a power value of 500 or more to create an instance that + // uses a blueprint with a minimum power value of 500. 0 indicates that the + // blueprint runs on all instance sizes. + MinPower *int64 `locationName:"minPower" type:"integer"` + + // The friendly name of the blueprint (e.g., Amazon Linux). + Name *string `locationName:"name" type:"string"` + + // The operating system platform (either Linux/Unix-based or Windows Server-based) + // of the blueprint. + Platform *string `locationName:"platform" type:"string" enum:"InstancePlatform"` + + // The product URL to learn more about the image or blueprint. + ProductUrl *string `locationName:"productUrl" type:"string"` + + // The type of the blueprint (e.g., os or app). + Type *string `locationName:"type" type:"string" enum:"BlueprintType"` + + // The version number of the operating system, application, or stack (e.g., + // 2016.03.0). + Version *string `locationName:"version" type:"string"` + + // The version code. + VersionCode *string `locationName:"versionCode" type:"string"` +} + +// String returns the string representation +func (s Blueprint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Blueprint) GoString() string { + return s.String() +} + +// SetBlueprintId sets the BlueprintId field's value. +func (s *Blueprint) SetBlueprintId(v string) *Blueprint { + s.BlueprintId = &v + return s } -// Code returns the exception type name. -func (s AccessDeniedException) Code() string { - return "AccessDeniedException" +// SetDescription sets the Description field's value. +func (s *Blueprint) SetDescription(v string) *Blueprint { + s.Description = &v + return s } -// Message returns the exception's message. -func (s AccessDeniedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetGroup sets the Group field's value. +func (s *Blueprint) SetGroup(v string) *Blueprint { + s.Group = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { - return nil +// SetIsActive sets the IsActive field's value. +func (s *Blueprint) SetIsActive(v bool) *Blueprint { + s.IsActive = &v + return s } -func (s AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetLicenseUrl sets the LicenseUrl field's value. +func (s *Blueprint) SetLicenseUrl(v string) *Blueprint { + s.LicenseUrl = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +// SetMinPower sets the MinPower field's value. +func (s *Blueprint) SetMinPower(v int64) *Blueprint { + s.MinPower = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +// SetName sets the Name field's value. +func (s *Blueprint) SetName(v string) *Blueprint { + s.Name = &v + return s } -// Lightsail throws this exception when an account is still in the setup in -// progress state. -type AccountSetupInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetPlatform sets the Platform field's value. +func (s *Blueprint) SetPlatform(v string) *Blueprint { + s.Platform = &v + return s +} - Code_ *string `locationName:"code" type:"string"` +// SetProductUrl sets the ProductUrl field's value. +func (s *Blueprint) SetProductUrl(v string) *Blueprint { + s.ProductUrl = &v + return s +} - Docs *string `locationName:"docs" type:"string"` +// SetType sets the Type field's value. +func (s *Blueprint) SetType(v string) *Blueprint { + s.Type = &v + return s +} - Message_ *string `locationName:"message" type:"string"` +// SetVersion sets the Version field's value. +func (s *Blueprint) SetVersion(v string) *Blueprint { + s.Version = &v + return s +} - Tip *string `locationName:"tip" type:"string"` +// SetVersionCode sets the VersionCode field's value. +func (s *Blueprint) SetVersionCode(v string) *Blueprint { + s.VersionCode = &v + return s +} + +// Describes a bundle, which is a set of specs describing your virtual private +// server (or instance). +type Bundle struct { + _ struct{} `type:"structure"` + + // The bundle ID (e.g., micro_1_0). + BundleId *string `locationName:"bundleId" type:"string"` + + // The number of vCPUs included in the bundle (e.g., 2). + CpuCount *int64 `locationName:"cpuCount" type:"integer"` + + // The size of the SSD (e.g., 30). + DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"` + + // The Amazon EC2 instance type (e.g., t2.micro). + InstanceType *string `locationName:"instanceType" type:"string"` + + // A Boolean value indicating whether the bundle is active. + IsActive *bool `locationName:"isActive" type:"boolean"` + + // A friendly name for the bundle (e.g., Micro). + Name *string `locationName:"name" type:"string"` + + // A numeric value that represents the power of the bundle (e.g., 500). You + // can use the bundle's power value in conjunction with a blueprint's minimum + // power value to determine whether the blueprint will run on the bundle. For + // example, you need a bundle with a power value of 500 or more to create an + // instance that uses a blueprint with a minimum power value of 500. + Power *int64 `locationName:"power" type:"integer"` + + // The price in US dollars (e.g., 5.0) of the bundle. + Price *float64 `locationName:"price" type:"float"` + + // The amount of RAM in GB (e.g., 2.0). + RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"` + + // The operating system platform (Linux/Unix-based or Windows Server-based) + // that the bundle supports. You can only launch a WINDOWS bundle on a blueprint + // that supports the WINDOWS platform. LINUX_UNIX blueprints require a LINUX_UNIX + // bundle. + SupportedPlatforms []*string `locationName:"supportedPlatforms" type:"list"` + + // The data transfer rate per month in GB (e.g., 2000). + TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"` } // String returns the string representation -func (s AccountSetupInProgressException) String() string { +func (s Bundle) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccountSetupInProgressException) GoString() string { +func (s Bundle) GoString() string { return s.String() } -func newErrorAccountSetupInProgressException(v protocol.ResponseMetadata) error { - return &AccountSetupInProgressException{ - respMetadata: v, - } +// SetBundleId sets the BundleId field's value. +func (s *Bundle) SetBundleId(v string) *Bundle { + s.BundleId = &v + return s } -// Code returns the exception type name. -func (s AccountSetupInProgressException) Code() string { - return "AccountSetupInProgressException" +// SetCpuCount sets the CpuCount field's value. +func (s *Bundle) SetCpuCount(v int64) *Bundle { + s.CpuCount = &v + return s } -// Message returns the exception's message. -func (s AccountSetupInProgressException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetDiskSizeInGb sets the DiskSizeInGb field's value. +func (s *Bundle) SetDiskSizeInGb(v int64) *Bundle { + s.DiskSizeInGb = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccountSetupInProgressException) OrigErr() error { - return nil +// SetInstanceType sets the InstanceType field's value. +func (s *Bundle) SetInstanceType(v string) *Bundle { + s.InstanceType = &v + return s } -func (s AccountSetupInProgressException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetIsActive sets the IsActive field's value. +func (s *Bundle) SetIsActive(v bool) *Bundle { + s.IsActive = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s AccountSetupInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +// SetName sets the Name field's value. +func (s *Bundle) SetName(v string) *Bundle { + s.Name = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s AccountSetupInProgressException) RequestID() string { - return s.respMetadata.RequestID +// SetPower sets the Power field's value. +func (s *Bundle) SetPower(v int64) *Bundle { + s.Power = &v + return s } -// Describes an add-on that is enabled for an Amazon Lightsail resource. -type AddOn struct { +// SetPrice sets the Price field's value. +func (s *Bundle) SetPrice(v float64) *Bundle { + s.Price = &v + return s +} + +// SetRamSizeInGb sets the RamSizeInGb field's value. +func (s *Bundle) SetRamSizeInGb(v float64) *Bundle { + s.RamSizeInGb = &v + return s +} + +// SetSupportedPlatforms sets the SupportedPlatforms field's value. +func (s *Bundle) SetSupportedPlatforms(v []*string) *Bundle { + s.SupportedPlatforms = v + return s +} + +// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value. +func (s *Bundle) SetTransferPerMonthInGb(v int64) *Bundle { + s.TransferPerMonthInGb = &v + return s +} + +// Describes the default cache behavior of an Amazon Lightsail content delivery +// network (CDN) distribution. +type CacheBehavior struct { _ struct{} `type:"structure"` - // The name of the add-on. - Name *string `locationName:"name" type:"string"` + // The cache behavior of the distribution. + // + // The following cache behaviors can be specified: + // + // * cache - This option is best for static sites. When specified, your distribution + // caches and serves your entire website as static content. This behavior + // is ideal for websites with static content that doesn't change depending + // on who views it, or for websites that don't use cookies, headers, or query + // strings to personalize content. + // + // * dont-cache - This option is best for sites that serve a mix of static + // and dynamic content. When specified, your distribution caches and serve + // only the content that is specified in the distribution's CacheBehaviorPerPath + // parameter. This behavior is ideal for websites or web applications that + // use cookies, headers, and query strings to personalize content for individual + // users. + Behavior *string `locationName:"behavior" type:"string" enum:"BehaviorEnum"` +} - // The next daily time an automatic snapshot will be created. +// String returns the string representation +func (s CacheBehavior) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehavior) GoString() string { + return s.String() +} + +// SetBehavior sets the Behavior field's value. +func (s *CacheBehavior) SetBehavior(v string) *CacheBehavior { + s.Behavior = &v + return s +} + +// Describes the per-path cache behavior of an Amazon Lightsail content delivery +// network (CDN) distribution. +// +// A per-path cache behavior is used to override, or add an exception to, the +// default cache behavior of a distribution. For example, if the cacheBehavior +// is set to cache, then a per-path cache behavior can be used to specify a +// directory, file, or file type that your distribution will cache. Alternately, +// if the distribution's cacheBehavior is dont-cache, then a per-path cache +// behavior can be used to specify a directory, file, or file type that your +// distribution will not cache. +// +// if the cacheBehavior's behavior is set to 'cache', then +type CacheBehaviorPerPath struct { + _ struct{} `type:"structure"` + + // The cache behavior for the specified path. + // + // You can specify one of the following per-path cache behaviors: + // + // * cache - This behavior caches the specified path. + // + // * dont-cache - This behavior doesn't cache the specified path. + Behavior *string `locationName:"behavior" type:"string" enum:"BehaviorEnum"` + + // The path to a directory or file to cached, or not cache. Use an asterisk + // symbol to specify wildcard directories (path/to/assets/*), and file types + // (*.html, *jpg, *js). Directories and file paths are case-sensitive. + // + // Examples: + // + // * Specify the following to cache all files in the document root of an + // Apache web server running on a Lightsail instance. var/www/html/ + // + // * Specify the following file to cache only the index page in the document + // root of an Apache web server. var/www/html/index.html + // + // * Specify the following to cache only the .html files in the document + // root of an Apache web server. var/www/html/*.html + // + // * Specify the following to cache only the .jpg, .png, and .gif files in + // the images sub-directory of the document root of an Apache web server. + // var/www/html/images/*.jpg var/www/html/images/*.png var/www/html/images/*.gif + // Specify the following to cache all files in the images sub-directory of + // the document root of an Apache web server. var/www/html/images/ + Path *string `locationName:"path" type:"string"` +} + +// String returns the string representation +func (s CacheBehaviorPerPath) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheBehaviorPerPath) GoString() string { + return s.String() +} + +// SetBehavior sets the Behavior field's value. +func (s *CacheBehaviorPerPath) SetBehavior(v string) *CacheBehaviorPerPath { + s.Behavior = &v + return s +} + +// SetPath sets the Path field's value. +func (s *CacheBehaviorPerPath) SetPath(v string) *CacheBehaviorPerPath { + s.Path = &v + return s +} + +// Describes the cache settings of an Amazon Lightsail content delivery network +// (CDN) distribution. +// +// These settings apply only to your distribution's cacheBehaviors (including +// the defaultCacheBehavior) that have a behavior of cache. +type CacheSettings struct { + _ struct{} `type:"structure"` + + // The HTTP methods that are processed and forwarded to the distribution's origin. + // + // You can specify the following options: + // + // * GET,HEAD - The distribution forwards the GET and HEAD methods. + // + // * GET,HEAD,OPTIONS - The distribution forwards the GET, HEAD, and OPTIONS + // methods. + // + // * GET,HEAD,OPTIONS,PUT,PATCH,POST,DELETE - The distribution forwards the + // GET, HEAD, OPTIONS, PUT, PATCH, POST, and DELETE methods. + // + // If you specify the third option, you might need to restrict access to your + // distribution's origin so users can't perform operations that you don't want + // them to. For example, you might not want users to have permission to delete + // objects from your origin. + AllowedHTTPMethods *string `locationName:"allowedHTTPMethods" type:"string"` + + // The HTTP method responses that are cached by your distribution. + // + // You can specify the following options: // - // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC). + // * GET,HEAD - The distribution caches responses to the GET and HEAD methods. // - // The snapshot is automatically created between the time shown and up to 45 - // minutes after. - NextSnapshotTimeOfDay *string `locationName:"nextSnapshotTimeOfDay" type:"string"` + // * GET,HEAD,OPTIONS - The distribution caches responses to the GET, HEAD, + // and OPTIONS methods. + CachedHTTPMethods *string `locationName:"cachedHTTPMethods" type:"string"` - // The daily time when an automatic snapshot is created. + // The default amount of time that objects stay in the distribution's cache + // before the distribution forwards another request to the origin to determine + // whether the content has been updated. // - // The time shown is in HH:00 format, and in Coordinated Universal Time (UTC). + // The value specified applies only when the origin does not add HTTP headers + // such as Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. + DefaultTTL *int64 `locationName:"defaultTTL" type:"long"` + + // An object that describes the cookies that are forwarded to the origin. Your + // content is cached based on the cookies that are forwarded. + ForwardedCookies *CookieObject `locationName:"forwardedCookies" type:"structure"` + + // An object that describes the headers that are forwarded to the origin. Your + // content is cached based on the headers that are forwarded. + ForwardedHeaders *HeaderObject `locationName:"forwardedHeaders" type:"structure"` + + // An object that describes the query strings that are forwarded to the origin. + // Your content is cached based on the query strings that are forwarded. + ForwardedQueryStrings *QueryStringObject `locationName:"forwardedQueryStrings" type:"structure"` + + // The maximum amount of time that objects stay in the distribution's cache + // before the distribution forwards another request to the origin to determine + // whether the object has been updated. // - // The snapshot is automatically created between the time shown and up to 45 - // minutes after. - SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"` + // The value specified applies only when the origin adds HTTP headers such as + // Cache-Control max-age, Cache-Control s-maxage, and Expires to objects. + MaximumTTL *int64 `locationName:"maximumTTL" type:"long"` - // The status of the add-on. - Status *string `locationName:"status" type:"string"` + // The minimum amount of time that objects stay in the distribution's cache + // before the distribution forwards another request to the origin to determine + // whether the object has been updated. + // + // A value of 0 must be specified for minimumTTL if the distribution is configured + // to forward all headers to the origin. + MinimumTTL *int64 `locationName:"minimumTTL" type:"long"` } // String returns the string representation -func (s AddOn) String() string { +func (s CacheSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddOn) GoString() string { +func (s CacheSettings) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *AddOn) SetName(v string) *AddOn { - s.Name = &v +// SetAllowedHTTPMethods sets the AllowedHTTPMethods field's value. +func (s *CacheSettings) SetAllowedHTTPMethods(v string) *CacheSettings { + s.AllowedHTTPMethods = &v return s } -// SetNextSnapshotTimeOfDay sets the NextSnapshotTimeOfDay field's value. -func (s *AddOn) SetNextSnapshotTimeOfDay(v string) *AddOn { - s.NextSnapshotTimeOfDay = &v +// SetCachedHTTPMethods sets the CachedHTTPMethods field's value. +func (s *CacheSettings) SetCachedHTTPMethods(v string) *CacheSettings { + s.CachedHTTPMethods = &v return s } -// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value. -func (s *AddOn) SetSnapshotTimeOfDay(v string) *AddOn { - s.SnapshotTimeOfDay = &v +// SetDefaultTTL sets the DefaultTTL field's value. +func (s *CacheSettings) SetDefaultTTL(v int64) *CacheSettings { + s.DefaultTTL = &v return s } -// SetStatus sets the Status field's value. -func (s *AddOn) SetStatus(v string) *AddOn { - s.Status = &v +// SetForwardedCookies sets the ForwardedCookies field's value. +func (s *CacheSettings) SetForwardedCookies(v *CookieObject) *CacheSettings { + s.ForwardedCookies = v return s } -// Describes a request to enable, modify, or disable an add-on for an Amazon -// Lightsail resource. -// -// An additional cost may be associated with enabling add-ons. For more information, -// see the Lightsail pricing page (https://aws.amazon.com/lightsail/pricing/). -type AddOnRequest struct { - _ struct{} `type:"structure"` - - // The add-on type. - // - // AddOnType is a required field - AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"` - - // An object that represents additional parameters when enabling or modifying - // the automatic snapshot add-on. - AutoSnapshotAddOnRequest *AutoSnapshotAddOnRequest `locationName:"autoSnapshotAddOnRequest" type:"structure"` -} - -// String returns the string representation -func (s AddOnRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AddOnRequest) GoString() string { - return s.String() +// SetForwardedHeaders sets the ForwardedHeaders field's value. +func (s *CacheSettings) SetForwardedHeaders(v *HeaderObject) *CacheSettings { + s.ForwardedHeaders = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddOnRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddOnRequest"} - if s.AddOnType == nil { - invalidParams.Add(request.NewErrParamRequired("AddOnType")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetForwardedQueryStrings sets the ForwardedQueryStrings field's value. +func (s *CacheSettings) SetForwardedQueryStrings(v *QueryStringObject) *CacheSettings { + s.ForwardedQueryStrings = v + return s } -// SetAddOnType sets the AddOnType field's value. -func (s *AddOnRequest) SetAddOnType(v string) *AddOnRequest { - s.AddOnType = &v +// SetMaximumTTL sets the MaximumTTL field's value. +func (s *CacheSettings) SetMaximumTTL(v int64) *CacheSettings { + s.MaximumTTL = &v return s } -// SetAutoSnapshotAddOnRequest sets the AutoSnapshotAddOnRequest field's value. -func (s *AddOnRequest) SetAutoSnapshotAddOnRequest(v *AutoSnapshotAddOnRequest) *AddOnRequest { - s.AutoSnapshotAddOnRequest = v +// SetMinimumTTL sets the MinimumTTL field's value. +func (s *CacheSettings) SetMinimumTTL(v int64) *CacheSettings { + s.MinimumTTL = &v return s } -// Describes an alarm. +// Describes the full details of an Amazon Lightsail SSL/TLS certificate. // -// An alarm is a way to monitor your Amazon Lightsail resource metrics. For -// more information, see Alarms in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-alarms). -type Alarm struct { +// To get a summary of a certificate, use the GetCertificates action and ommit +// includeCertificateDetails from your request. The response will include only +// the certificate Amazon Resource Name (ARN), certificate name, domain name, +// and tags. +type Certificate struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the alarm. + // The Amazon Resource Name (ARN) of the certificate. Arn *string `locationName:"arn" type:"string"` - // The arithmetic operation used when comparing the specified statistic and - // threshold. - ComparisonOperator *string `locationName:"comparisonOperator" type:"string" enum:"ComparisonOperator"` + // The timestamp when the certificate was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - // The contact protocols for the alarm, such as Email, SMS (text messaging), - // or both. - ContactProtocols []*string `locationName:"contactProtocols" type:"list"` + // The domain name of the certificate. + DomainName *string `locationName:"domainName" type:"string"` - // The timestamp when the alarm was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // An array of objects that describe the domain validation records of the certificate. + DomainValidationRecords []*DomainValidationRecord `locationName:"domainValidationRecords" type:"list"` - // The number of data points that must not within the specified threshold to - // trigger the alarm. - DatapointsToAlarm *int64 `locationName:"datapointsToAlarm" type:"integer"` + // The renewal eligibility of the certificate. + EligibleToRenew *string `locationName:"eligibleToRenew" type:"string"` - // The number of periods over which data is compared to the specified threshold. - EvaluationPeriods *int64 `locationName:"evaluationPeriods" type:"integer"` + // The number of Lightsail resources that the certificate is attached to. + InUseResourceCount *int64 `locationName:"inUseResourceCount" type:"integer"` - // An object that lists information about the location of the alarm. - Location *ResourceLocation `locationName:"location" type:"structure"` + // The timestamp when the certificate was issued. + IssuedAt *time.Time `locationName:"issuedAt" type:"timestamp"` - // The name of the metric associated with the alarm. - MetricName *string `locationName:"metricName" type:"string" enum:"MetricName"` + // The certificate authority that issued the certificate. + IssuerCA *string `locationName:"issuerCA" type:"string"` - // An object that lists information about the resource monitored by the alarm. - MonitoredResourceInfo *MonitoredResourceInfo `locationName:"monitoredResourceInfo" type:"structure"` + // The algorithm used to generate the key pair (the public and private key) + // of the certificate. + KeyAlgorithm *string `locationName:"keyAlgorithm" type:"string"` - // The name of the alarm. + // The name of the certificate (e.g., my-certificate). Name *string `locationName:"name" type:"string"` - // Indicates whether the alarm is enabled. - NotificationEnabled *bool `locationName:"notificationEnabled" type:"boolean"` + // The timestamp when the certificate expires. + NotAfter *time.Time `locationName:"notAfter" type:"timestamp"` - // The alarm states that trigger a notification. - NotificationTriggers []*string `locationName:"notificationTriggers" type:"list"` + // The timestamp when the certificate is first valid. + NotBefore *time.Time `locationName:"notBefore" type:"timestamp"` - // The period, in seconds, over which the statistic is applied. - Period *int64 `locationName:"period" min:"60" type:"integer"` + // An object that describes the status of the certificate renewal managed by + // Lightsail. + RenewalSummary *RenewalSummary `locationName:"renewalSummary" type:"structure"` + + // The validation failure reason, if any, of the certificate. + // + // The following failure reasons are possible: + // + // * NO_AVAILABLE_CONTACTS - This failure applies to email validation, which + // is not available for Lightsail certificates. + // + // * ADDITIONAL_VERIFICATION_REQUIRED - Lightsail requires additional information + // to process this certificate request. This can happen as a fraud-protection + // measure, such as when the domain ranks within the Alexa top 1000 websites. + // To provide the required information, use the AWS Support Center (https://console.aws.amazon.com/support/home) + // to contact AWS Support. You cannot request a certificate for Amazon-owned + // domain names such as those ending in amazonaws.com, cloudfront.net, or + // elasticbeanstalk.com. + // + // * DOMAIN_NOT_ALLOWED - One or more of the domain names in the certificate + // request was reported as an unsafe domain by VirusTotal (https://www.virustotal.com/gui/home/url). + // To correct the problem, search for your domain name on the VirusTotal + // (https://www.virustotal.com/gui/home/url) website. If your domain is reported + // as suspicious, see Google Help for Hacked Websites (https://www.google.com/webmasters/hacked/?hl=en) + // to learn what you can do. If you believe that the result is a false positive, + // notify the organization that is reporting the domain. VirusTotal is an + // aggregate of several antivirus and URL scanners and cannot remove your + // domain from a block list itself. After you correct the problem and the + // VirusTotal registry has been updated, request a new certificate. If you + // see this error and your domain is not included in the VirusTotal list, + // visit the AWS Support Center (https://console.aws.amazon.com/support/home) + // and create a case. + // + // * INVALID_PUBLIC_DOMAIN - One or more of the domain names in the certificate + // request is not valid. Typically, this is because a domain name in the + // request is not a valid top-level domain. Try to request a certificate + // again, correcting any spelling errors or typos that were in the failed + // request, and ensure that all domain names in the request are for valid + // top-level domains. For example, you cannot request a certificate for example.invalidpublicdomain + // because invalidpublicdomain is not a valid top-level domain. + // + // * OTHER - Typically, this failure occurs when there is a typographical + // error in one or more of the domain names in the certificate request. Try + // to request a certificate again, correcting any spelling errors or typos + // that were in the failed request. + RequestFailureReason *string `locationName:"requestFailureReason" type:"string"` + + // The reason the certificate was revoked. This value is present only when the + // certificate status is REVOKED. + RevocationReason *string `locationName:"revocationReason" type:"string"` + + // The timestamp when the certificate was revoked. This value is present only + // when the certificate status is REVOKED. + RevokedAt *time.Time `locationName:"revokedAt" type:"timestamp"` - // The Lightsail resource type (e.g., Alarm). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + // The serial number of the certificate. + SerialNumber *string `locationName:"serialNumber" type:"string"` - // The current state of the alarm. - // - // An alarm has the following possible states: - // - // * ALARM — The metric is outside of the defined threshold. - // - // * INSUFFICIENT_DATA — The alarm has just started, the metric is not - // available, or not enough data is available for the metric to determine - // the alarm state. - // - // * OK — The metric is within the defined threshold. - State *string `locationName:"state" type:"string" enum:"AlarmState"` + // The validation status of the certificate. + Status *string `locationName:"status" type:"string" enum:"CertificateStatus"` - // The statistic for the metric associated with the alarm. - // - // The following statistics are available: - // - // * Minimum — The lowest value observed during the specified period. Use - // this value to determine low volumes of activity for your application. - // - // * Maximum — The highest value observed during the specified period. - // Use this value to determine high volumes of activity for your application. - // - // * Sum — All values submitted for the matching metric added together. - // You can use this statistic to determine the total volume of a metric. - // - // * Average — The value of Sum / SampleCount during the specified period. - // By comparing this statistic with the Minimum and Maximum values, you can - // determine the full scope of a metric and how close the average use is - // to the Minimum and Maximum values. This comparison helps you to know when - // to increase or decrease your resources. - // - // * SampleCount — The count, or number, of data points used for the statistical - // calculation. - Statistic *string `locationName:"statistic" type:"string" enum:"MetricStatistic"` + // An array of strings that specify the alternate domains (e.g., example2.com) + // and subdomains (e.g., blog.example.com) of the certificate. + SubjectAlternativeNames []*string `locationName:"subjectAlternativeNames" type:"list"` // The support code. Include this code in your email to support when you have - // questions about your Lightsail alarm. This code enables our support team - // to look up your Lightsail information more easily. + // questions about your Lightsail certificate. This code enables our support + // team to look up your Lightsail information more easily. SupportCode *string `locationName:"supportCode" type:"string"` - // The value against which the specified statistic is compared. - Threshold *float64 `locationName:"threshold" type:"double"` - - // Specifies how the alarm handles missing data points. - // - // An alarm can treat missing data in the following ways: - // - // * breaching — Assume the missing data is not within the threshold. Missing - // data counts towards the number of times the metric is not within the threshold. - // - // * notBreaching — Assume the missing data is within the threshold. Missing - // data does not count towards the number of times the metric is not within - // the threshold. - // - // * ignore — Ignore the missing data. Maintains the current alarm state. - // - // * missing — Missing data is treated as missing. - TreatMissingData *string `locationName:"treatMissingData" type:"string" enum:"TreatMissingData"` - - // The unit of the metric associated with the alarm. - Unit *string `locationName:"unit" type:"string" enum:"MetricUnit"` + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s Alarm) String() string { +func (s Certificate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Alarm) GoString() string { +func (s Certificate) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *Alarm) SetArn(v string) *Alarm { +func (s *Certificate) SetArn(v string) *Certificate { s.Arn = &v return s } -// SetComparisonOperator sets the ComparisonOperator field's value. -func (s *Alarm) SetComparisonOperator(v string) *Alarm { - s.ComparisonOperator = &v +// SetCreatedAt sets the CreatedAt field's value. +func (s *Certificate) SetCreatedAt(v time.Time) *Certificate { + s.CreatedAt = &v return s } -// SetContactProtocols sets the ContactProtocols field's value. -func (s *Alarm) SetContactProtocols(v []*string) *Alarm { - s.ContactProtocols = v +// SetDomainName sets the DomainName field's value. +func (s *Certificate) SetDomainName(v string) *Certificate { + s.DomainName = &v return s } -// SetCreatedAt sets the CreatedAt field's value. -func (s *Alarm) SetCreatedAt(v time.Time) *Alarm { - s.CreatedAt = &v +// SetDomainValidationRecords sets the DomainValidationRecords field's value. +func (s *Certificate) SetDomainValidationRecords(v []*DomainValidationRecord) *Certificate { + s.DomainValidationRecords = v return s } -// SetDatapointsToAlarm sets the DatapointsToAlarm field's value. -func (s *Alarm) SetDatapointsToAlarm(v int64) *Alarm { - s.DatapointsToAlarm = &v +// SetEligibleToRenew sets the EligibleToRenew field's value. +func (s *Certificate) SetEligibleToRenew(v string) *Certificate { + s.EligibleToRenew = &v return s } -// SetEvaluationPeriods sets the EvaluationPeriods field's value. -func (s *Alarm) SetEvaluationPeriods(v int64) *Alarm { - s.EvaluationPeriods = &v +// SetInUseResourceCount sets the InUseResourceCount field's value. +func (s *Certificate) SetInUseResourceCount(v int64) *Certificate { + s.InUseResourceCount = &v return s } -// SetLocation sets the Location field's value. -func (s *Alarm) SetLocation(v *ResourceLocation) *Alarm { - s.Location = v +// SetIssuedAt sets the IssuedAt field's value. +func (s *Certificate) SetIssuedAt(v time.Time) *Certificate { + s.IssuedAt = &v return s } -// SetMetricName sets the MetricName field's value. -func (s *Alarm) SetMetricName(v string) *Alarm { - s.MetricName = &v +// SetIssuerCA sets the IssuerCA field's value. +func (s *Certificate) SetIssuerCA(v string) *Certificate { + s.IssuerCA = &v return s } -// SetMonitoredResourceInfo sets the MonitoredResourceInfo field's value. -func (s *Alarm) SetMonitoredResourceInfo(v *MonitoredResourceInfo) *Alarm { - s.MonitoredResourceInfo = v +// SetKeyAlgorithm sets the KeyAlgorithm field's value. +func (s *Certificate) SetKeyAlgorithm(v string) *Certificate { + s.KeyAlgorithm = &v return s } // SetName sets the Name field's value. -func (s *Alarm) SetName(v string) *Alarm { +func (s *Certificate) SetName(v string) *Certificate { s.Name = &v return s } -// SetNotificationEnabled sets the NotificationEnabled field's value. -func (s *Alarm) SetNotificationEnabled(v bool) *Alarm { - s.NotificationEnabled = &v +// SetNotAfter sets the NotAfter field's value. +func (s *Certificate) SetNotAfter(v time.Time) *Certificate { + s.NotAfter = &v + return s +} + +// SetNotBefore sets the NotBefore field's value. +func (s *Certificate) SetNotBefore(v time.Time) *Certificate { + s.NotBefore = &v + return s +} + +// SetRenewalSummary sets the RenewalSummary field's value. +func (s *Certificate) SetRenewalSummary(v *RenewalSummary) *Certificate { + s.RenewalSummary = v + return s +} + +// SetRequestFailureReason sets the RequestFailureReason field's value. +func (s *Certificate) SetRequestFailureReason(v string) *Certificate { + s.RequestFailureReason = &v + return s +} + +// SetRevocationReason sets the RevocationReason field's value. +func (s *Certificate) SetRevocationReason(v string) *Certificate { + s.RevocationReason = &v + return s +} + +// SetRevokedAt sets the RevokedAt field's value. +func (s *Certificate) SetRevokedAt(v time.Time) *Certificate { + s.RevokedAt = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *Certificate) SetSerialNumber(v string) *Certificate { + s.SerialNumber = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Certificate) SetStatus(v string) *Certificate { + s.Status = &v + return s +} + +// SetSubjectAlternativeNames sets the SubjectAlternativeNames field's value. +func (s *Certificate) SetSubjectAlternativeNames(v []*string) *Certificate { + s.SubjectAlternativeNames = v + return s +} + +// SetSupportCode sets the SupportCode field's value. +func (s *Certificate) SetSupportCode(v string) *Certificate { + s.SupportCode = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Certificate) SetTags(v []*Tag) *Certificate { + s.Tags = v return s } -// SetNotificationTriggers sets the NotificationTriggers field's value. -func (s *Alarm) SetNotificationTriggers(v []*string) *Alarm { - s.NotificationTriggers = v - return s -} +// Describes an Amazon Lightsail SSL/TLS certificate. +type CertificateSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the certificate. + CertificateArn *string `locationName:"certificateArn" type:"string"` + + // An object that describes a certificate in detail. + CertificateDetail *Certificate `locationName:"certificateDetail" type:"structure"` + + // The name of the certificate. + CertificateName *string `locationName:"certificateName" type:"string"` + + // The domain name of the certificate. + DomainName *string `locationName:"domainName" type:"string"` -// SetPeriod sets the Period field's value. -func (s *Alarm) SetPeriod(v int64) *Alarm { - s.Period = &v - return s + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + Tags []*Tag `locationName:"tags" type:"list"` } -// SetResourceType sets the ResourceType field's value. -func (s *Alarm) SetResourceType(v string) *Alarm { - s.ResourceType = &v - return s +// String returns the string representation +func (s CertificateSummary) String() string { + return awsutil.Prettify(s) } -// SetState sets the State field's value. -func (s *Alarm) SetState(v string) *Alarm { - s.State = &v - return s +// GoString returns the string representation +func (s CertificateSummary) GoString() string { + return s.String() } -// SetStatistic sets the Statistic field's value. -func (s *Alarm) SetStatistic(v string) *Alarm { - s.Statistic = &v +// SetCertificateArn sets the CertificateArn field's value. +func (s *CertificateSummary) SetCertificateArn(v string) *CertificateSummary { + s.CertificateArn = &v return s } -// SetSupportCode sets the SupportCode field's value. -func (s *Alarm) SetSupportCode(v string) *Alarm { - s.SupportCode = &v +// SetCertificateDetail sets the CertificateDetail field's value. +func (s *CertificateSummary) SetCertificateDetail(v *Certificate) *CertificateSummary { + s.CertificateDetail = v return s } -// SetThreshold sets the Threshold field's value. -func (s *Alarm) SetThreshold(v float64) *Alarm { - s.Threshold = &v +// SetCertificateName sets the CertificateName field's value. +func (s *CertificateSummary) SetCertificateName(v string) *CertificateSummary { + s.CertificateName = &v return s } -// SetTreatMissingData sets the TreatMissingData field's value. -func (s *Alarm) SetTreatMissingData(v string) *Alarm { - s.TreatMissingData = &v +// SetDomainName sets the DomainName field's value. +func (s *CertificateSummary) SetDomainName(v string) *CertificateSummary { + s.DomainName = &v return s } -// SetUnit sets the Unit field's value. -func (s *Alarm) SetUnit(v string) *Alarm { - s.Unit = &v +// SetTags sets the Tags field's value. +func (s *CertificateSummary) SetTags(v []*Tag) *CertificateSummary { + s.Tags = v return s } -type AllocateStaticIpInput struct { +type CloseInstancePublicPortsInput struct { _ struct{} `type:"structure"` - // The name of the static IP address. + // The name of the instance for which to close ports. // - // StaticIpName is a required field - StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"` + // InstanceName is a required field + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + + // An object to describe the ports to close for the specified instance. + // + // PortInfo is a required field + PortInfo *PortInfo `locationName:"portInfo" type:"structure" required:"true"` } // String returns the string representation -func (s AllocateStaticIpInput) String() string { +func (s CloseInstancePublicPortsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AllocateStaticIpInput) GoString() string { +func (s CloseInstancePublicPortsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AllocateStaticIpInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AllocateStaticIpInput"} - if s.StaticIpName == nil { - invalidParams.Add(request.NewErrParamRequired("StaticIpName")) +func (s *CloseInstancePublicPortsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloseInstancePublicPortsInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) + } + if s.PortInfo == nil { + invalidParams.Add(request.NewErrParamRequired("PortInfo")) + } + if s.PortInfo != nil { + if err := s.PortInfo.Validate(); err != nil { + invalidParams.AddNested("PortInfo", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -12681,326 +15653,409 @@ func (s *AllocateStaticIpInput) Validate() error { return nil } -// SetStaticIpName sets the StaticIpName field's value. -func (s *AllocateStaticIpInput) SetStaticIpName(v string) *AllocateStaticIpInput { - s.StaticIpName = &v +// SetInstanceName sets the InstanceName field's value. +func (s *CloseInstancePublicPortsInput) SetInstanceName(v string) *CloseInstancePublicPortsInput { + s.InstanceName = &v return s } -type AllocateStaticIpOutput struct { +// SetPortInfo sets the PortInfo field's value. +func (s *CloseInstancePublicPortsInput) SetPortInfo(v *PortInfo) *CloseInstancePublicPortsInput { + s.PortInfo = v + return s +} + +type CloseInstancePublicPortsOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + // An object that describes the result of the action, such as the status of + // the request, the timestamp of the request, and the resources affected by + // the request. + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s AllocateStaticIpOutput) String() string { +func (s CloseInstancePublicPortsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AllocateStaticIpOutput) GoString() string { +func (s CloseInstancePublicPortsOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *AllocateStaticIpOutput) SetOperations(v []*Operation) *AllocateStaticIpOutput { - s.Operations = v +// SetOperation sets the Operation field's value. +func (s *CloseInstancePublicPortsOutput) SetOperation(v *Operation) *CloseInstancePublicPortsOutput { + s.Operation = v return s } -type AttachDiskInput struct { +// Describes a CloudFormation stack record created as a result of the create +// cloud formation stack operation. +// +// A CloudFormation stack record provides information about the AWS CloudFormation +// stack used to create a new Amazon Elastic Compute Cloud instance from an +// exported Lightsail instance snapshot. +type CloudFormationStackRecord struct { _ struct{} `type:"structure"` - // The unique Lightsail disk name (e.g., my-disk). - // - // DiskName is a required field - DiskName *string `locationName:"diskName" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the CloudFormation stack record. + Arn *string `locationName:"arn" type:"string"` - // The disk path to expose to the instance (e.g., /dev/xvdf). - // - // DiskPath is a required field - DiskPath *string `locationName:"diskPath" type:"string" required:"true"` + // The date when the CloudFormation stack record was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - // The name of the Lightsail instance where you want to utilize the storage - // disk. - // - // InstanceName is a required field - InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + // A list of objects describing the destination service, which is AWS CloudFormation, + // and the Amazon Resource Name (ARN) of the AWS CloudFormation stack. + DestinationInfo *DestinationInfo `locationName:"destinationInfo" type:"structure"` + + // A list of objects describing the Availability Zone and AWS Region of the + // CloudFormation stack record. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The name of the CloudFormation stack record. It starts with CloudFormationStackRecord + // followed by a GUID. + Name *string `locationName:"name" type:"string"` + + // The Lightsail resource type (e.g., CloudFormationStackRecord). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // A list of objects describing the source of the CloudFormation stack record. + SourceInfo []*CloudFormationStackRecordSourceInfo `locationName:"sourceInfo" type:"list"` + + // The current state of the CloudFormation stack record. + State *string `locationName:"state" type:"string" enum:"RecordState"` } // String returns the string representation -func (s AttachDiskInput) String() string { +func (s CloudFormationStackRecord) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttachDiskInput) GoString() string { +func (s CloudFormationStackRecord) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AttachDiskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AttachDiskInput"} - if s.DiskName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskName")) - } - if s.DiskPath == nil { - invalidParams.Add(request.NewErrParamRequired("DiskPath")) - } - if s.InstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceName")) - } +// SetArn sets the Arn field's value. +func (s *CloudFormationStackRecord) SetArn(v string) *CloudFormationStackRecord { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreatedAt sets the CreatedAt field's value. +func (s *CloudFormationStackRecord) SetCreatedAt(v time.Time) *CloudFormationStackRecord { + s.CreatedAt = &v + return s } -// SetDiskName sets the DiskName field's value. -func (s *AttachDiskInput) SetDiskName(v string) *AttachDiskInput { - s.DiskName = &v +// SetDestinationInfo sets the DestinationInfo field's value. +func (s *CloudFormationStackRecord) SetDestinationInfo(v *DestinationInfo) *CloudFormationStackRecord { + s.DestinationInfo = v return s } -// SetDiskPath sets the DiskPath field's value. -func (s *AttachDiskInput) SetDiskPath(v string) *AttachDiskInput { - s.DiskPath = &v +// SetLocation sets the Location field's value. +func (s *CloudFormationStackRecord) SetLocation(v *ResourceLocation) *CloudFormationStackRecord { + s.Location = v return s } -// SetInstanceName sets the InstanceName field's value. -func (s *AttachDiskInput) SetInstanceName(v string) *AttachDiskInput { - s.InstanceName = &v +// SetName sets the Name field's value. +func (s *CloudFormationStackRecord) SetName(v string) *CloudFormationStackRecord { + s.Name = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *CloudFormationStackRecord) SetResourceType(v string) *CloudFormationStackRecord { + s.ResourceType = &v + return s +} + +// SetSourceInfo sets the SourceInfo field's value. +func (s *CloudFormationStackRecord) SetSourceInfo(v []*CloudFormationStackRecordSourceInfo) *CloudFormationStackRecord { + s.SourceInfo = v + return s +} + +// SetState sets the State field's value. +func (s *CloudFormationStackRecord) SetState(v string) *CloudFormationStackRecord { + s.State = &v + return s +} + +// Describes the source of a CloudFormation stack record (i.e., the export snapshot +// record). +type CloudFormationStackRecordSourceInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the export snapshot record. + Arn *string `locationName:"arn" type:"string"` + + // The name of the record. + Name *string `locationName:"name" type:"string"` + + // The Lightsail resource type (e.g., ExportSnapshotRecord). + ResourceType *string `locationName:"resourceType" type:"string" enum:"CloudFormationStackRecordSourceType"` +} + +// String returns the string representation +func (s CloudFormationStackRecordSourceInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFormationStackRecordSourceInfo) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CloudFormationStackRecordSourceInfo) SetArn(v string) *CloudFormationStackRecordSourceInfo { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CloudFormationStackRecordSourceInfo) SetName(v string) *CloudFormationStackRecordSourceInfo { + s.Name = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *CloudFormationStackRecordSourceInfo) SetResourceType(v string) *CloudFormationStackRecordSourceInfo { + s.ResourceType = &v return s } -type AttachDiskOutput struct { +// Describes a contact method. +// +// A contact method is a way to send you notifications. For more information, +// see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications). +type ContactMethod struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` -} + // The Amazon Resource Name (ARN) of the contact method. + Arn *string `locationName:"arn" type:"string"` -// String returns the string representation -func (s AttachDiskOutput) String() string { - return awsutil.Prettify(s) -} + // The destination of the contact method, such as an email address or a mobile + // phone number. + ContactEndpoint *string `locationName:"contactEndpoint" type:"string"` -// GoString returns the string representation -func (s AttachDiskOutput) GoString() string { - return s.String() -} + // The timestamp when the contact method was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` -// SetOperations sets the Operations field's value. -func (s *AttachDiskOutput) SetOperations(v []*Operation) *AttachDiskOutput { - s.Operations = v - return s -} + // Describes the resource location. + Location *ResourceLocation `locationName:"location" type:"structure"` -type AttachInstancesToLoadBalancerInput struct { - _ struct{} `type:"structure"` + // The name of the contact method. + Name *string `locationName:"name" type:"string"` - // An array of strings representing the instance name(s) you want to attach - // to your load balancer. + // The protocol of the contact method, such as email or SMS (text messaging). + Protocol *string `locationName:"protocol" type:"string" enum:"ContactProtocol"` + + // The Lightsail resource type (e.g., ContactMethod). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The current status of the contact method. // - // An instance must be running before you can attach it to your load balancer. + // A contact method has the following possible status: // - // There are no additional limits on the number of instances you can attach - // to your load balancer, aside from the limit of Lightsail instances you can - // create in your account (20). + // * PendingVerification - The contact method has not yet been verified, + // and the verification has not yet expired. // - // InstanceNames is a required field - InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` - - // The name of the load balancer. + // * Valid - The contact method has been verified. // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` + // * InValid - An attempt was made to verify the contact method, but the + // verification has expired. + Status *string `locationName:"status" type:"string" enum:"ContactMethodStatus"` + + // The support code. Include this code in your email to support when you have + // questions about your Lightsail contact method. This code enables our support + // team to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` } // String returns the string representation -func (s AttachInstancesToLoadBalancerInput) String() string { +func (s ContactMethod) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttachInstancesToLoadBalancerInput) GoString() string { +func (s ContactMethod) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AttachInstancesToLoadBalancerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AttachInstancesToLoadBalancerInput"} - if s.InstanceNames == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceNames")) - } - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) - } +// SetArn sets the Arn field's value. +func (s *ContactMethod) SetArn(v string) *ContactMethod { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetContactEndpoint sets the ContactEndpoint field's value. +func (s *ContactMethod) SetContactEndpoint(v string) *ContactMethod { + s.ContactEndpoint = &v + return s } -// SetInstanceNames sets the InstanceNames field's value. -func (s *AttachInstancesToLoadBalancerInput) SetInstanceNames(v []*string) *AttachInstancesToLoadBalancerInput { - s.InstanceNames = v +// SetCreatedAt sets the CreatedAt field's value. +func (s *ContactMethod) SetCreatedAt(v time.Time) *ContactMethod { + s.CreatedAt = &v return s } -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *AttachInstancesToLoadBalancerInput) SetLoadBalancerName(v string) *AttachInstancesToLoadBalancerInput { - s.LoadBalancerName = &v +// SetLocation sets the Location field's value. +func (s *ContactMethod) SetLocation(v *ResourceLocation) *ContactMethod { + s.Location = v return s } -type AttachInstancesToLoadBalancerOutput struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *ContactMethod) SetName(v string) *ContactMethod { + s.Name = &v + return s +} - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` +// SetProtocol sets the Protocol field's value. +func (s *ContactMethod) SetProtocol(v string) *ContactMethod { + s.Protocol = &v + return s } -// String returns the string representation -func (s AttachInstancesToLoadBalancerOutput) String() string { - return awsutil.Prettify(s) +// SetResourceType sets the ResourceType field's value. +func (s *ContactMethod) SetResourceType(v string) *ContactMethod { + s.ResourceType = &v + return s } -// GoString returns the string representation -func (s AttachInstancesToLoadBalancerOutput) GoString() string { - return s.String() +// SetStatus sets the Status field's value. +func (s *ContactMethod) SetStatus(v string) *ContactMethod { + s.Status = &v + return s } -// SetOperations sets the Operations field's value. -func (s *AttachInstancesToLoadBalancerOutput) SetOperations(v []*Operation) *AttachInstancesToLoadBalancerOutput { - s.Operations = v +// SetSupportCode sets the SupportCode field's value. +func (s *ContactMethod) SetSupportCode(v string) *ContactMethod { + s.SupportCode = &v return s } -type AttachLoadBalancerTlsCertificateInput struct { +// Describes whether an Amazon Lightsail content delivery network (CDN) distribution +// forwards cookies to the origin and, if so, which ones. +// +// For the cookies that you specify, your distribution caches separate versions +// of the specified content based on the cookie values in viewer requests. +type CookieObject struct { _ struct{} `type:"structure"` - // The name of your SSL/TLS certificate. - // - // CertificateName is a required field - CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + // The specific cookies to forward to your distribution's origin. + CookiesAllowList []*string `locationName:"cookiesAllowList" type:"list"` - // The name of the load balancer to which you want to associate the SSL/TLS - // certificate. - // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` + // Specifies which cookies to forward to the distribution's origin for a cache + // behavior: all, none, or allow-list to forward only the cookies specified + // in the cookiesAllowList parameter. + Option *string `locationName:"option" type:"string" enum:"ForwardValues"` } // String returns the string representation -func (s AttachLoadBalancerTlsCertificateInput) String() string { +func (s CookieObject) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttachLoadBalancerTlsCertificateInput) GoString() string { +func (s CookieObject) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AttachLoadBalancerTlsCertificateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancerTlsCertificateInput"} - if s.CertificateName == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateName")) - } - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCertificateName sets the CertificateName field's value. -func (s *AttachLoadBalancerTlsCertificateInput) SetCertificateName(v string) *AttachLoadBalancerTlsCertificateInput { - s.CertificateName = &v +// SetCookiesAllowList sets the CookiesAllowList field's value. +func (s *CookieObject) SetCookiesAllowList(v []*string) *CookieObject { + s.CookiesAllowList = v return s } -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *AttachLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *AttachLoadBalancerTlsCertificateInput { - s.LoadBalancerName = &v +// SetOption sets the Option field's value. +func (s *CookieObject) SetOption(v string) *CookieObject { + s.Option = &v return s } -type AttachLoadBalancerTlsCertificateOutput struct { +type CopySnapshotInput struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. + // The date of the source automatic snapshot to copy. Use the get auto snapshots + // operation to identify the dates of the available automatic snapshots. // - // These SSL/TLS certificates are only usable by Lightsail load balancers. You - // can't get the certificate and use it for another purpose. - Operations []*Operation `locationName:"operations" type:"list"` -} - -// String returns the string representation -func (s AttachLoadBalancerTlsCertificateOutput) String() string { - return awsutil.Prettify(s) -} + // Constraints: + // + // * Must be specified in YYYY-MM-DD format. + // + // * This parameter cannot be defined together with the use latest restorable + // auto snapshot parameter. The restore date and use latest restorable auto + // snapshot parameters are mutually exclusive. + // + // * Define this parameter only when copying an automatic snapshot as a manual + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). + RestoreDate *string `locationName:"restoreDate" type:"string"` -// GoString returns the string representation -func (s AttachLoadBalancerTlsCertificateOutput) GoString() string { - return s.String() -} + // The AWS Region where the source manual or automatic snapshot is located. + // + // SourceRegion is a required field + SourceRegion *string `locationName:"sourceRegion" type:"string" required:"true" enum:"RegionName"` -// SetOperations sets the Operations field's value. -func (s *AttachLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *AttachLoadBalancerTlsCertificateOutput { - s.Operations = v - return s -} + // The name of the source instance or disk from which the source automatic snapshot + // was created. + // + // Constraint: + // + // * Define this parameter only when copying an automatic snapshot as a manual + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). + SourceResourceName *string `locationName:"sourceResourceName" type:"string"` -type AttachStaticIpInput struct { - _ struct{} `type:"structure"` + // The name of the source manual snapshot to copy. + // + // Constraint: + // + // * Define this parameter only when copying a manual snapshot as another + // manual snapshot. + SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string"` - // The instance name to which you want to attach the static IP address. + // The name of the new manual snapshot to be created as a copy. // - // InstanceName is a required field - InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + // TargetSnapshotName is a required field + TargetSnapshotName *string `locationName:"targetSnapshotName" type:"string" required:"true"` - // The name of the static IP. + // A Boolean value to indicate whether to use the latest available automatic + // snapshot of the specified source instance or disk. // - // StaticIpName is a required field - StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"` + // Constraints: + // + // * This parameter cannot be defined together with the restore date parameter. + // The use latest restorable auto snapshot and restore date parameters are + // mutually exclusive. + // + // * Define this parameter only when copying an automatic snapshot as a manual + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). + UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` } // String returns the string representation -func (s AttachStaticIpInput) String() string { +func (s CopySnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttachStaticIpInput) GoString() string { +func (s CopySnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AttachStaticIpInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AttachStaticIpInput"} - if s.InstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceName")) +func (s *CopySnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} + if s.SourceRegion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceRegion")) } - if s.StaticIpName == nil { - invalidParams.Add(request.NewErrParamRequired("StaticIpName")) + if s.TargetSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName")) } if invalidParams.Len() > 0 { @@ -13009,882 +16064,860 @@ func (s *AttachStaticIpInput) Validate() error { return nil } -// SetInstanceName sets the InstanceName field's value. -func (s *AttachStaticIpInput) SetInstanceName(v string) *AttachStaticIpInput { - s.InstanceName = &v +// SetRestoreDate sets the RestoreDate field's value. +func (s *CopySnapshotInput) SetRestoreDate(v string) *CopySnapshotInput { + s.RestoreDate = &v return s } -// SetStaticIpName sets the StaticIpName field's value. -func (s *AttachStaticIpInput) SetStaticIpName(v string) *AttachStaticIpInput { - s.StaticIpName = &v +// SetSourceRegion sets the SourceRegion field's value. +func (s *CopySnapshotInput) SetSourceRegion(v string) *CopySnapshotInput { + s.SourceRegion = &v return s } -type AttachStaticIpOutput struct { - _ struct{} `type:"structure"` - - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` +// SetSourceResourceName sets the SourceResourceName field's value. +func (s *CopySnapshotInput) SetSourceResourceName(v string) *CopySnapshotInput { + s.SourceResourceName = &v + return s } -// String returns the string representation -func (s AttachStaticIpOutput) String() string { - return awsutil.Prettify(s) +// SetSourceSnapshotName sets the SourceSnapshotName field's value. +func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput { + s.SourceSnapshotName = &v + return s } -// GoString returns the string representation -func (s AttachStaticIpOutput) GoString() string { - return s.String() +// SetTargetSnapshotName sets the TargetSnapshotName field's value. +func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput { + s.TargetSnapshotName = &v + return s } -// SetOperations sets the Operations field's value. -func (s *AttachStaticIpOutput) SetOperations(v []*Operation) *AttachStaticIpOutput { - s.Operations = v +// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. +func (s *CopySnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CopySnapshotInput { + s.UseLatestRestorableAutoSnapshot = &v return s } -// Describes a block storage disk that is attached to an instance, and is included -// in an automatic snapshot. -type AttachedDisk struct { +type CopySnapshotOutput struct { _ struct{} `type:"structure"` - // The path of the disk (e.g., /dev/xvdf). - Path *string `locationName:"path" type:"string"` - - // The size of the disk in GB. - SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s AttachedDisk) String() string { +func (s CopySnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AttachedDisk) GoString() string { +func (s CopySnapshotOutput) GoString() string { return s.String() } -// SetPath sets the Path field's value. -func (s *AttachedDisk) SetPath(v string) *AttachedDisk { - s.Path = &v - return s -} - -// SetSizeInGb sets the SizeInGb field's value. -func (s *AttachedDisk) SetSizeInGb(v int64) *AttachedDisk { - s.SizeInGb = &v +// SetOperations sets the Operations field's value. +func (s *CopySnapshotOutput) SetOperations(v []*Operation) *CopySnapshotOutput { + s.Operations = v return s } -// Describes a request to enable or modify the automatic snapshot add-on for -// an Amazon Lightsail instance or disk. -// -// When you modify the automatic snapshot time for a resource, it is typically -// effective immediately except under the following conditions: -// -// * If an automatic snapshot has been created for the current day, and you -// change the snapshot time to a later time of day, then the new snapshot -// time will be effective the following day. This ensures that two snapshots -// are not created for the current day. -// -// * If an automatic snapshot has not yet been created for the current day, -// and you change the snapshot time to an earlier time of day, then the new -// snapshot time will be effective the following day and a snapshot is automatically -// created at the previously set time for the current day. This ensures that -// a snapshot is created for the current day. -// -// * If an automatic snapshot has not yet been created for the current day, -// and you change the snapshot time to a time that is within 30 minutes from -// your current time, then the new snapshot time will be effective the following -// day and a snapshot is automatically created at the previously set time -// for the current day. This ensures that a snapshot is created for the current -// day, because 30 minutes is required between your current time and the -// new snapshot time that you specify. -// -// * If an automatic snapshot is scheduled to be created within 30 minutes -// from your current time and you change the snapshot time, then the new -// snapshot time will be effective the following day and a snapshot is automatically -// created at the previously set time for the current day. This ensures that -// a snapshot is created for the current day, because 30 minutes is required -// between your current time and the new snapshot time that you specify. -type AutoSnapshotAddOnRequest struct { +type CreateCertificateInput struct { _ struct{} `type:"structure"` - // The daily time when an automatic snapshot will be created. + // The name for the certificate. // - // Constraints: + // CertificateName is a required field + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // The domain name (e.g., example.com) for the certificate. // - // * Must be in HH:00 format, and in an hourly increment. + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` + + // An array of strings that specify the alternate domains (e.g., example2.com) + // and subdomains (e.g., blog.example.com) for the certificate. // - // * Specified in Coordinated Universal Time (UTC). + // You can specify a maximum of nine alternate domains (in addition to the primary + // domain name). // - // * The snapshot will be automatically created between the time specified - // and up to 45 minutes after. - SnapshotTimeOfDay *string `locationName:"snapshotTimeOfDay" type:"string"` + // Wildcard domain entries (e.g., *.example.com) are not supported. + SubjectAlternativeNames []*string `locationName:"subjectAlternativeNames" type:"list"` + + // The tag keys and optional values to add to the certificate during create. + // + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s AutoSnapshotAddOnRequest) String() string { +func (s CreateCertificateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AutoSnapshotAddOnRequest) GoString() string { +func (s CreateCertificateInput) GoString() string { return s.String() } -// SetSnapshotTimeOfDay sets the SnapshotTimeOfDay field's value. -func (s *AutoSnapshotAddOnRequest) SetSnapshotTimeOfDay(v string) *AutoSnapshotAddOnRequest { - s.SnapshotTimeOfDay = &v - return s -} - -// Describes an automatic snapshot. -type AutoSnapshotDetails struct { - _ struct{} `type:"structure"` - - // The timestamp when the automatic snapshot was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - - // The date of the automatic snapshot in YYYY-MM-DD format. - Date *string `locationName:"date" type:"string"` - - // An array of objects that describe the block storage disks attached to the - // instance when the automatic snapshot was created. - FromAttachedDisks []*AttachedDisk `locationName:"fromAttachedDisks" type:"list"` - - // The status of the automatic snapshot. - Status *string `locationName:"status" type:"string" enum:"AutoSnapshotStatus"` -} - -// String returns the string representation -func (s AutoSnapshotDetails) String() string { - return awsutil.Prettify(s) -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCertificateInput"} + if s.CertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateName")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } -// GoString returns the string representation -func (s AutoSnapshotDetails) GoString() string { - return s.String() + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCreatedAt sets the CreatedAt field's value. -func (s *AutoSnapshotDetails) SetCreatedAt(v time.Time) *AutoSnapshotDetails { - s.CreatedAt = &v +// SetCertificateName sets the CertificateName field's value. +func (s *CreateCertificateInput) SetCertificateName(v string) *CreateCertificateInput { + s.CertificateName = &v return s } -// SetDate sets the Date field's value. -func (s *AutoSnapshotDetails) SetDate(v string) *AutoSnapshotDetails { - s.Date = &v +// SetDomainName sets the DomainName field's value. +func (s *CreateCertificateInput) SetDomainName(v string) *CreateCertificateInput { + s.DomainName = &v return s } -// SetFromAttachedDisks sets the FromAttachedDisks field's value. -func (s *AutoSnapshotDetails) SetFromAttachedDisks(v []*AttachedDisk) *AutoSnapshotDetails { - s.FromAttachedDisks = v +// SetSubjectAlternativeNames sets the SubjectAlternativeNames field's value. +func (s *CreateCertificateInput) SetSubjectAlternativeNames(v []*string) *CreateCertificateInput { + s.SubjectAlternativeNames = v return s } -// SetStatus sets the Status field's value. -func (s *AutoSnapshotDetails) SetStatus(v string) *AutoSnapshotDetails { - s.Status = &v +// SetTags sets the Tags field's value. +func (s *CreateCertificateInput) SetTags(v []*Tag) *CreateCertificateInput { + s.Tags = v return s } -// Describes an Availability Zone. -type AvailabilityZone struct { +type CreateCertificateOutput struct { _ struct{} `type:"structure"` - // The state of the Availability Zone. - State *string `locationName:"state" type:"string"` + // An object that describes the certificate created. + Certificate *CertificateSummary `locationName:"certificate" type:"structure"` - // The name of the Availability Zone. The format is us-east-2a (case-sensitive). - ZoneName *string `locationName:"zoneName" type:"string"` + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s AvailabilityZone) String() string { +func (s CreateCertificateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AvailabilityZone) GoString() string { +func (s CreateCertificateOutput) GoString() string { return s.String() } -// SetState sets the State field's value. -func (s *AvailabilityZone) SetState(v string) *AvailabilityZone { - s.State = &v +// SetCertificate sets the Certificate field's value. +func (s *CreateCertificateOutput) SetCertificate(v *CertificateSummary) *CreateCertificateOutput { + s.Certificate = v return s } -// SetZoneName sets the ZoneName field's value. -func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { - s.ZoneName = &v +// SetOperations sets the Operations field's value. +func (s *CreateCertificateOutput) SetOperations(v []*Operation) *CreateCertificateOutput { + s.Operations = v return s } -// Describes a blueprint (a virtual private server image). -type Blueprint struct { +type CreateCloudFormationStackInput struct { _ struct{} `type:"structure"` - // The ID for the virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0). - BlueprintId *string `locationName:"blueprintId" type:"string"` - - // The description of the blueprint. - Description *string `locationName:"description" type:"string"` - - // The group name of the blueprint (e.g., amazon-linux). - Group *string `locationName:"group" type:"string"` - - // A Boolean value indicating whether the blueprint is active. Inactive blueprints - // are listed to support customers with existing instances but are not necessarily - // available for launch of new instances. Blueprints are marked inactive when - // they become outdated due to operating system updates or new application releases. - IsActive *bool `locationName:"isActive" type:"boolean"` - - // The end-user license agreement URL for the image or blueprint. - LicenseUrl *string `locationName:"licenseUrl" type:"string"` - - // The minimum bundle power required to run this blueprint. For example, you - // need a bundle with a power value of 500 or more to create an instance that - // uses a blueprint with a minimum power value of 500. 0 indicates that the - // blueprint runs on all instance sizes. - MinPower *int64 `locationName:"minPower" type:"integer"` + // An array of parameters that will be used to create the new Amazon EC2 instance. + // You can only pass one instance entry at a time in this array. You will get + // an invalid parameter error if you pass more than one instance entry in this + // array. + // + // Instances is a required field + Instances []*InstanceEntry `locationName:"instances" type:"list" required:"true"` +} - // The friendly name of the blueprint (e.g., Amazon Linux). - Name *string `locationName:"name" type:"string"` +// String returns the string representation +func (s CreateCloudFormationStackInput) String() string { + return awsutil.Prettify(s) +} - // The operating system platform (either Linux/Unix-based or Windows Server-based) - // of the blueprint. - Platform *string `locationName:"platform" type:"string" enum:"InstancePlatform"` +// GoString returns the string representation +func (s CreateCloudFormationStackInput) GoString() string { + return s.String() +} - // The product URL to learn more about the image or blueprint. - ProductUrl *string `locationName:"productUrl" type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCloudFormationStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCloudFormationStackInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.Instances != nil { + for i, v := range s.Instances { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Instances", i), err.(request.ErrInvalidParams)) + } + } + } - // The type of the blueprint (e.g., os or app). - Type *string `locationName:"type" type:"string" enum:"BlueprintType"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The version number of the operating system, application, or stack (e.g., - // 2016.03.0). - Version *string `locationName:"version" type:"string"` +// SetInstances sets the Instances field's value. +func (s *CreateCloudFormationStackInput) SetInstances(v []*InstanceEntry) *CreateCloudFormationStackInput { + s.Instances = v + return s +} - // The version code. - VersionCode *string `locationName:"versionCode" type:"string"` +type CreateCloudFormationStackOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s Blueprint) String() string { +func (s CreateCloudFormationStackOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Blueprint) GoString() string { +func (s CreateCloudFormationStackOutput) GoString() string { return s.String() } -// SetBlueprintId sets the BlueprintId field's value. -func (s *Blueprint) SetBlueprintId(v string) *Blueprint { - s.BlueprintId = &v +// SetOperations sets the Operations field's value. +func (s *CreateCloudFormationStackOutput) SetOperations(v []*Operation) *CreateCloudFormationStackOutput { + s.Operations = v return s } -// SetDescription sets the Description field's value. -func (s *Blueprint) SetDescription(v string) *Blueprint { - s.Description = &v - return s -} +type CreateContactMethodInput struct { + _ struct{} `type:"structure"` -// SetGroup sets the Group field's value. -func (s *Blueprint) SetGroup(v string) *Blueprint { - s.Group = &v - return s + // The destination of the contact method, such as an email address or a mobile + // phone number. + // + // Use the E.164 format when specifying a mobile phone number. E.164 is a standard + // for the phone number structure used for international telecommunication. + // Phone numbers that follow this format can have a maximum of 15 digits, and + // they are prefixed with the plus character (+) and the country code. For example, + // a U.S. phone number in E.164 format would be specified as +1XXX5550100. For + // more information, see E.164 (https://en.wikipedia.org/wiki/E.164) on Wikipedia. + // + // ContactEndpoint is a required field + ContactEndpoint *string `locationName:"contactEndpoint" min:"1" type:"string" required:"true"` + + // The protocol of the contact method, such as Email or SMS (text messaging). + // + // The SMS protocol is supported only in the following AWS Regions. + // + // * US East (N. Virginia) (us-east-1) + // + // * US West (Oregon) (us-west-2) + // + // * Europe (Ireland) (eu-west-1) + // + // * Asia Pacific (Tokyo) (ap-northeast-1) + // + // * Asia Pacific (Singapore) (ap-southeast-1) + // + // * Asia Pacific (Sydney) (ap-southeast-2) + // + // For a list of countries/regions where SMS text messages can be sent, and + // the latest AWS Regions where SMS text messaging is supported, see Supported + // Regions and Countries (https://docs.aws.amazon.com/sns/latest/dg/sns-supported-regions-countries.html) + // in the Amazon SNS Developer Guide. + // + // For more information about notifications in Amazon Lightsail, see Notifications + // in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications). + // + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactProtocol"` } -// SetIsActive sets the IsActive field's value. -func (s *Blueprint) SetIsActive(v bool) *Blueprint { - s.IsActive = &v - return s +// String returns the string representation +func (s CreateContactMethodInput) String() string { + return awsutil.Prettify(s) } -// SetLicenseUrl sets the LicenseUrl field's value. -func (s *Blueprint) SetLicenseUrl(v string) *Blueprint { - s.LicenseUrl = &v - return s +// GoString returns the string representation +func (s CreateContactMethodInput) GoString() string { + return s.String() } -// SetMinPower sets the MinPower field's value. -func (s *Blueprint) SetMinPower(v int64) *Blueprint { - s.MinPower = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateContactMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateContactMethodInput"} + if s.ContactEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("ContactEndpoint")) + } + if s.ContactEndpoint != nil && len(*s.ContactEndpoint) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContactEndpoint", 1)) + } + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetName sets the Name field's value. -func (s *Blueprint) SetName(v string) *Blueprint { - s.Name = &v +// SetContactEndpoint sets the ContactEndpoint field's value. +func (s *CreateContactMethodInput) SetContactEndpoint(v string) *CreateContactMethodInput { + s.ContactEndpoint = &v return s } -// SetPlatform sets the Platform field's value. -func (s *Blueprint) SetPlatform(v string) *Blueprint { - s.Platform = &v +// SetProtocol sets the Protocol field's value. +func (s *CreateContactMethodInput) SetProtocol(v string) *CreateContactMethodInput { + s.Protocol = &v return s } -// SetProductUrl sets the ProductUrl field's value. -func (s *Blueprint) SetProductUrl(v string) *Blueprint { - s.ProductUrl = &v - return s +type CreateContactMethodOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } -// SetType sets the Type field's value. -func (s *Blueprint) SetType(v string) *Blueprint { - s.Type = &v - return s +// String returns the string representation +func (s CreateContactMethodOutput) String() string { + return awsutil.Prettify(s) } -// SetVersion sets the Version field's value. -func (s *Blueprint) SetVersion(v string) *Blueprint { - s.Version = &v - return s +// GoString returns the string representation +func (s CreateContactMethodOutput) GoString() string { + return s.String() } -// SetVersionCode sets the VersionCode field's value. -func (s *Blueprint) SetVersionCode(v string) *Blueprint { - s.VersionCode = &v +// SetOperations sets the Operations field's value. +func (s *CreateContactMethodOutput) SetOperations(v []*Operation) *CreateContactMethodOutput { + s.Operations = v return s } -// Describes a bundle, which is a set of specs describing your virtual private -// server (or instance). -type Bundle struct { +type CreateDiskFromSnapshotInput struct { _ struct{} `type:"structure"` - // The bundle ID (e.g., micro_1_0). - BundleId *string `locationName:"bundleId" type:"string"` - - // The number of vCPUs included in the bundle (e.g., 2). - CpuCount *int64 `locationName:"cpuCount" type:"integer"` - - // The size of the SSD (e.g., 30). - DiskSizeInGb *int64 `locationName:"diskSizeInGb" type:"integer"` + // An array of objects that represent the add-ons to enable for the new disk. + AddOns []*AddOnRequest `locationName:"addOns" type:"list"` - // The Amazon EC2 instance type (e.g., t2.micro). - InstanceType *string `locationName:"instanceType" type:"string"` + // The Availability Zone where you want to create the disk (e.g., us-east-2a). + // Choose the same Availability Zone as the Lightsail instance where you want + // to create the disk. + // + // Use the GetRegions operation to list the Availability Zones where Lightsail + // is currently available. + // + // AvailabilityZone is a required field + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // A Boolean value indicating whether the bundle is active. - IsActive *bool `locationName:"isActive" type:"boolean"` + // The unique Lightsail disk name (e.g., my-disk). + // + // DiskName is a required field + DiskName *string `locationName:"diskName" type:"string" required:"true"` - // A friendly name for the bundle (e.g., Micro). - Name *string `locationName:"name" type:"string"` + // The name of the disk snapshot (e.g., my-snapshot) from which to create the + // new storage disk. + // + // Constraint: + // + // * This parameter cannot be defined together with the source disk name + // parameter. The disk snapshot name and source disk name parameters are + // mutually exclusive. + DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string"` - // A numeric value that represents the power of the bundle (e.g., 500). You - // can use the bundle's power value in conjunction with a blueprint's minimum - // power value to determine whether the blueprint will run on the bundle. For - // example, you need a bundle with a power value of 500 or more to create an - // instance that uses a blueprint with a minimum power value of 500. - Power *int64 `locationName:"power" type:"integer"` + // The date of the automatic snapshot to use for the new disk. Use the get auto + // snapshots operation to identify the dates of the available automatic snapshots. + // + // Constraints: + // + // * Must be specified in YYYY-MM-DD format. + // + // * This parameter cannot be defined together with the use latest restorable + // auto snapshot parameter. The restore date and use latest restorable auto + // snapshot parameters are mutually exclusive. + // + // * Define this parameter only when creating a new disk from an automatic + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + RestoreDate *string `locationName:"restoreDate" type:"string"` - // The price in US dollars (e.g., 5.0). - Price *float64 `locationName:"price" type:"float"` + // The size of the disk in GB (e.g., 32). + // + // SizeInGb is a required field + SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"` - // The amount of RAM in GB (e.g., 2.0). - RamSizeInGb *float64 `locationName:"ramSizeInGb" type:"float"` + // The name of the source disk from which the source automatic snapshot was + // created. + // + // Constraints: + // + // * This parameter cannot be defined together with the disk snapshot name + // parameter. The source disk name and disk snapshot name parameters are + // mutually exclusive. + // + // * Define this parameter only when creating a new disk from an automatic + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + SourceDiskName *string `locationName:"sourceDiskName" type:"string"` - // The operating system platform (Linux/Unix-based or Windows Server-based) - // that the bundle supports. You can only launch a WINDOWS bundle on a blueprint - // that supports the WINDOWS platform. LINUX_UNIX blueprints require a LINUX_UNIX - // bundle. - SupportedPlatforms []*string `locationName:"supportedPlatforms" type:"list"` + // The tag keys and optional values to add to the resource during create. + // + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` - // The data transfer rate per month in GB (e.g., 2000). - TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"` + // A Boolean value to indicate whether to use the latest available automatic + // snapshot. + // + // Constraints: + // + // * This parameter cannot be defined together with the restore date parameter. + // The use latest restorable auto snapshot and restore date parameters are + // mutually exclusive. + // + // * Define this parameter only when creating a new disk from an automatic + // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). + UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` } // String returns the string representation -func (s Bundle) String() string { +func (s CreateDiskFromSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Bundle) GoString() string { +func (s CreateDiskFromSnapshotInput) GoString() string { return s.String() } -// SetBundleId sets the BundleId field's value. -func (s *Bundle) SetBundleId(v string) *Bundle { - s.BundleId = &v - return s -} - -// SetCpuCount sets the CpuCount field's value. -func (s *Bundle) SetCpuCount(v int64) *Bundle { - s.CpuCount = &v - return s -} - -// SetDiskSizeInGb sets the DiskSizeInGb field's value. -func (s *Bundle) SetDiskSizeInGb(v int64) *Bundle { - s.DiskSizeInGb = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDiskFromSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDiskFromSnapshotInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + if s.DiskName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskName")) + } + if s.SizeInGb == nil { + invalidParams.Add(request.NewErrParamRequired("SizeInGb")) + } + if s.AddOns != nil { + for i, v := range s.AddOns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) + } + } + } -// SetInstanceType sets the InstanceType field's value. -func (s *Bundle) SetInstanceType(v string) *Bundle { - s.InstanceType = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetIsActive sets the IsActive field's value. -func (s *Bundle) SetIsActive(v bool) *Bundle { - s.IsActive = &v +// SetAddOns sets the AddOns field's value. +func (s *CreateDiskFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateDiskFromSnapshotInput { + s.AddOns = v return s } -// SetName sets the Name field's value. -func (s *Bundle) SetName(v string) *Bundle { - s.Name = &v +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateDiskFromSnapshotInput) SetAvailabilityZone(v string) *CreateDiskFromSnapshotInput { + s.AvailabilityZone = &v return s } -// SetPower sets the Power field's value. -func (s *Bundle) SetPower(v int64) *Bundle { - s.Power = &v +// SetDiskName sets the DiskName field's value. +func (s *CreateDiskFromSnapshotInput) SetDiskName(v string) *CreateDiskFromSnapshotInput { + s.DiskName = &v return s } -// SetPrice sets the Price field's value. -func (s *Bundle) SetPrice(v float64) *Bundle { - s.Price = &v +// SetDiskSnapshotName sets the DiskSnapshotName field's value. +func (s *CreateDiskFromSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskFromSnapshotInput { + s.DiskSnapshotName = &v return s } -// SetRamSizeInGb sets the RamSizeInGb field's value. -func (s *Bundle) SetRamSizeInGb(v float64) *Bundle { - s.RamSizeInGb = &v +// SetRestoreDate sets the RestoreDate field's value. +func (s *CreateDiskFromSnapshotInput) SetRestoreDate(v string) *CreateDiskFromSnapshotInput { + s.RestoreDate = &v return s } -// SetSupportedPlatforms sets the SupportedPlatforms field's value. -func (s *Bundle) SetSupportedPlatforms(v []*string) *Bundle { - s.SupportedPlatforms = v +// SetSizeInGb sets the SizeInGb field's value. +func (s *CreateDiskFromSnapshotInput) SetSizeInGb(v int64) *CreateDiskFromSnapshotInput { + s.SizeInGb = &v return s } -// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value. -func (s *Bundle) SetTransferPerMonthInGb(v int64) *Bundle { - s.TransferPerMonthInGb = &v +// SetSourceDiskName sets the SourceDiskName field's value. +func (s *CreateDiskFromSnapshotInput) SetSourceDiskName(v string) *CreateDiskFromSnapshotInput { + s.SourceDiskName = &v return s } -type CloseInstancePublicPortsInput struct { - _ struct{} `type:"structure"` - - // The name of the instance on which you're attempting to close the public ports. - // - // InstanceName is a required field - InstanceName *string `locationName:"instanceName" type:"string" required:"true"` - - // Information about the public port you are trying to close. - // - // PortInfo is a required field - PortInfo *PortInfo `locationName:"portInfo" type:"structure" required:"true"` -} - -// String returns the string representation -func (s CloseInstancePublicPortsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CloseInstancePublicPortsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CloseInstancePublicPortsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CloseInstancePublicPortsInput"} - if s.InstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceName")) - } - if s.PortInfo == nil { - invalidParams.Add(request.NewErrParamRequired("PortInfo")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetInstanceName sets the InstanceName field's value. -func (s *CloseInstancePublicPortsInput) SetInstanceName(v string) *CloseInstancePublicPortsInput { - s.InstanceName = &v +// SetTags sets the Tags field's value. +func (s *CreateDiskFromSnapshotInput) SetTags(v []*Tag) *CreateDiskFromSnapshotInput { + s.Tags = v return s } -// SetPortInfo sets the PortInfo field's value. -func (s *CloseInstancePublicPortsInput) SetPortInfo(v *PortInfo) *CloseInstancePublicPortsInput { - s.PortInfo = v +// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. +func (s *CreateDiskFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateDiskFromSnapshotInput { + s.UseLatestRestorableAutoSnapshot = &v return s } -type CloseInstancePublicPortsOutput struct { +type CreateDiskFromSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operation *Operation `locationName:"operation" type:"structure"` + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CloseInstancePublicPortsOutput) String() string { +func (s CreateDiskFromSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloseInstancePublicPortsOutput) GoString() string { +func (s CreateDiskFromSnapshotOutput) GoString() string { return s.String() } -// SetOperation sets the Operation field's value. -func (s *CloseInstancePublicPortsOutput) SetOperation(v *Operation) *CloseInstancePublicPortsOutput { - s.Operation = v +// SetOperations sets the Operations field's value. +func (s *CreateDiskFromSnapshotOutput) SetOperations(v []*Operation) *CreateDiskFromSnapshotOutput { + s.Operations = v return s } -// Describes a CloudFormation stack record created as a result of the create -// cloud formation stack operation. -// -// A CloudFormation stack record provides information about the AWS CloudFormation -// stack used to create a new Amazon Elastic Compute Cloud instance from an -// exported Lightsail instance snapshot. -type CloudFormationStackRecord struct { +type CreateDiskInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the CloudFormation stack record. - Arn *string `locationName:"arn" type:"string"` - - // The date when the CloudFormation stack record was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - - // A list of objects describing the destination service, which is AWS CloudFormation, - // and the Amazon Resource Name (ARN) of the AWS CloudFormation stack. - DestinationInfo *DestinationInfo `locationName:"destinationInfo" type:"structure"` - - // A list of objects describing the Availability Zone and AWS Region of the - // CloudFormation stack record. - Location *ResourceLocation `locationName:"location" type:"structure"` + // An array of objects that represent the add-ons to enable for the new disk. + AddOns []*AddOnRequest `locationName:"addOns" type:"list"` - // The name of the CloudFormation stack record. It starts with CloudFormationStackRecord - // followed by a GUID. - Name *string `locationName:"name" type:"string"` + // The Availability Zone where you want to create the disk (e.g., us-east-2a). + // Use the same Availability Zone as the Lightsail instance to which you want + // to attach the disk. + // + // Use the get regions operation to list the Availability Zones where Lightsail + // is currently available. + // + // AvailabilityZone is a required field + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // The Lightsail resource type (e.g., CloudFormationStackRecord). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + // The unique Lightsail disk name (e.g., my-disk). + // + // DiskName is a required field + DiskName *string `locationName:"diskName" type:"string" required:"true"` - // A list of objects describing the source of the CloudFormation stack record. - SourceInfo []*CloudFormationStackRecordSourceInfo `locationName:"sourceInfo" type:"list"` + // The size of the disk in GB (e.g., 32). + // + // SizeInGb is a required field + SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"` - // The current state of the CloudFormation stack record. - State *string `locationName:"state" type:"string" enum:"RecordState"` + // The tag keys and optional values to add to the resource during create. + // + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CloudFormationStackRecord) String() string { +func (s CreateDiskInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloudFormationStackRecord) GoString() string { +func (s CreateDiskInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *CloudFormationStackRecord) SetArn(v string) *CloudFormationStackRecord { - s.Arn = &v - return s -} - -// SetCreatedAt sets the CreatedAt field's value. -func (s *CloudFormationStackRecord) SetCreatedAt(v time.Time) *CloudFormationStackRecord { - s.CreatedAt = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDiskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDiskInput"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + if s.DiskName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskName")) + } + if s.SizeInGb == nil { + invalidParams.Add(request.NewErrParamRequired("SizeInGb")) + } + if s.AddOns != nil { + for i, v := range s.AddOns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) + } + } + } -// SetDestinationInfo sets the DestinationInfo field's value. -func (s *CloudFormationStackRecord) SetDestinationInfo(v *DestinationInfo) *CloudFormationStackRecord { - s.DestinationInfo = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLocation sets the Location field's value. -func (s *CloudFormationStackRecord) SetLocation(v *ResourceLocation) *CloudFormationStackRecord { - s.Location = v +// SetAddOns sets the AddOns field's value. +func (s *CreateDiskInput) SetAddOns(v []*AddOnRequest) *CreateDiskInput { + s.AddOns = v return s } -// SetName sets the Name field's value. -func (s *CloudFormationStackRecord) SetName(v string) *CloudFormationStackRecord { - s.Name = &v +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateDiskInput) SetAvailabilityZone(v string) *CreateDiskInput { + s.AvailabilityZone = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *CloudFormationStackRecord) SetResourceType(v string) *CloudFormationStackRecord { - s.ResourceType = &v +// SetDiskName sets the DiskName field's value. +func (s *CreateDiskInput) SetDiskName(v string) *CreateDiskInput { + s.DiskName = &v return s } -// SetSourceInfo sets the SourceInfo field's value. -func (s *CloudFormationStackRecord) SetSourceInfo(v []*CloudFormationStackRecordSourceInfo) *CloudFormationStackRecord { - s.SourceInfo = v +// SetSizeInGb sets the SizeInGb field's value. +func (s *CreateDiskInput) SetSizeInGb(v int64) *CreateDiskInput { + s.SizeInGb = &v return s } -// SetState sets the State field's value. -func (s *CloudFormationStackRecord) SetState(v string) *CloudFormationStackRecord { - s.State = &v +// SetTags sets the Tags field's value. +func (s *CreateDiskInput) SetTags(v []*Tag) *CreateDiskInput { + s.Tags = v return s } -// Describes the source of a CloudFormation stack record (i.e., the export snapshot -// record). -type CloudFormationStackRecordSourceInfo struct { +type CreateDiskOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the export snapshot record. - Arn *string `locationName:"arn" type:"string"` - - // The name of the record. - Name *string `locationName:"name" type:"string"` - - // The Lightsail resource type (e.g., ExportSnapshotRecord). - ResourceType *string `locationName:"resourceType" type:"string" enum:"CloudFormationStackRecordSourceType"` + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CloudFormationStackRecordSourceInfo) String() string { +func (s CreateDiskOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CloudFormationStackRecordSourceInfo) GoString() string { +func (s CreateDiskOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *CloudFormationStackRecordSourceInfo) SetArn(v string) *CloudFormationStackRecordSourceInfo { - s.Arn = &v - return s -} - -// SetName sets the Name field's value. -func (s *CloudFormationStackRecordSourceInfo) SetName(v string) *CloudFormationStackRecordSourceInfo { - s.Name = &v - return s -} - -// SetResourceType sets the ResourceType field's value. -func (s *CloudFormationStackRecordSourceInfo) SetResourceType(v string) *CloudFormationStackRecordSourceInfo { - s.ResourceType = &v +// SetOperations sets the Operations field's value. +func (s *CreateDiskOutput) SetOperations(v []*Operation) *CreateDiskOutput { + s.Operations = v return s } -// Describes a contact method. -// -// A contact method is a way to send you notifications. For more information, -// see Notifications in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications). -type ContactMethod struct { +type CreateDiskSnapshotInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the contact method. - Arn *string `locationName:"arn" type:"string"` - - // The destination of the contact method, such as an email address or a mobile - // phone number. - ContactEndpoint *string `locationName:"contactEndpoint" type:"string"` - - // The timestamp when the contact method was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - - // Describes the resource location. - Location *ResourceLocation `locationName:"location" type:"structure"` - - // The name of the contact method. - Name *string `locationName:"name" type:"string"` - - // The protocol of the contact method, such as email or SMS (text messaging). - Protocol *string `locationName:"protocol" type:"string" enum:"ContactProtocol"` - - // The Lightsail resource type (e.g., ContactMethod). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - - // The current status of the contact method. - // - // A contact method has the following possible status: + // The unique name of the source disk (e.g., Disk-Virginia-1). // - // * PendingVerification — The contact method has not yet been verified, - // and the verification has not yet expired. + // This parameter cannot be defined together with the instance name parameter. + // The disk name and instance name parameters are mutually exclusive. + DiskName *string `locationName:"diskName" type:"string"` + + // The name of the destination disk snapshot (e.g., my-disk-snapshot) based + // on the source disk. // - // * Valid — The contact method has been verified. + // DiskSnapshotName is a required field + DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` + + // The unique name of the source instance (e.g., Amazon_Linux-512MB-Virginia-1). + // When this is defined, a snapshot of the instance's system volume is created. // - // * InValid — An attempt was made to verify the contact method, but the - // verification has expired. - Status *string `locationName:"status" type:"string" enum:"ContactMethodStatus"` + // This parameter cannot be defined together with the disk name parameter. The + // instance name and disk name parameters are mutually exclusive. + InstanceName *string `locationName:"instanceName" type:"string"` - // The support code. Include this code in your email to support when you have - // questions about your Lightsail contact method. This code enables our support - // team to look up your Lightsail information more easily. - SupportCode *string `locationName:"supportCode" type:"string"` + // The tag keys and optional values to add to the resource during create. + // + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s ContactMethod) String() string { +func (s CreateDiskSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ContactMethod) GoString() string { +func (s CreateDiskSnapshotInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *ContactMethod) SetArn(v string) *ContactMethod { - s.Arn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDiskSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDiskSnapshotInput"} + if s.DiskSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetContactEndpoint sets the ContactEndpoint field's value. -func (s *ContactMethod) SetContactEndpoint(v string) *ContactMethod { - s.ContactEndpoint = &v +// SetDiskName sets the DiskName field's value. +func (s *CreateDiskSnapshotInput) SetDiskName(v string) *CreateDiskSnapshotInput { + s.DiskName = &v return s } -// SetCreatedAt sets the CreatedAt field's value. -func (s *ContactMethod) SetCreatedAt(v time.Time) *ContactMethod { - s.CreatedAt = &v +// SetDiskSnapshotName sets the DiskSnapshotName field's value. +func (s *CreateDiskSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskSnapshotInput { + s.DiskSnapshotName = &v return s } -// SetLocation sets the Location field's value. -func (s *ContactMethod) SetLocation(v *ResourceLocation) *ContactMethod { - s.Location = v +// SetInstanceName sets the InstanceName field's value. +func (s *CreateDiskSnapshotInput) SetInstanceName(v string) *CreateDiskSnapshotInput { + s.InstanceName = &v return s } -// SetName sets the Name field's value. -func (s *ContactMethod) SetName(v string) *ContactMethod { - s.Name = &v +// SetTags sets the Tags field's value. +func (s *CreateDiskSnapshotInput) SetTags(v []*Tag) *CreateDiskSnapshotInput { + s.Tags = v return s } -// SetProtocol sets the Protocol field's value. -func (s *ContactMethod) SetProtocol(v string) *ContactMethod { - s.Protocol = &v - return s +type CreateDiskSnapshotOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } -// SetResourceType sets the ResourceType field's value. -func (s *ContactMethod) SetResourceType(v string) *ContactMethod { - s.ResourceType = &v - return s +// String returns the string representation +func (s CreateDiskSnapshotOutput) String() string { + return awsutil.Prettify(s) } -// SetStatus sets the Status field's value. -func (s *ContactMethod) SetStatus(v string) *ContactMethod { - s.Status = &v - return s +// GoString returns the string representation +func (s CreateDiskSnapshotOutput) GoString() string { + return s.String() } -// SetSupportCode sets the SupportCode field's value. -func (s *ContactMethod) SetSupportCode(v string) *ContactMethod { - s.SupportCode = &v +// SetOperations sets the Operations field's value. +func (s *CreateDiskSnapshotOutput) SetOperations(v []*Operation) *CreateDiskSnapshotOutput { + s.Operations = v return s } -type CopySnapshotInput struct { +type CreateDistributionInput struct { _ struct{} `type:"structure"` - // The date of the source automatic snapshot to copy. Use the get auto snapshots - // operation to identify the dates of the available automatic snapshots. - // - // Constraints: + // The bundle ID to use for the distribution. // - // * Must be specified in YYYY-MM-DD format. + // A distribution bundle describes the specifications of your distribution, + // such as the monthly cost and monthly network transfer quota. // - // * This parameter cannot be defined together with the use latest restorable - // auto snapshot parameter. The restore date and use latest restorable auto - // snapshot parameters are mutually exclusive. + // Use the GetDistributionBundles action to get a list of distribution bundle + // IDs that you can specify. // - // * Define this parameter only when copying an automatic snapshot as a manual - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). - RestoreDate *string `locationName:"restoreDate" type:"string"` + // BundleId is a required field + BundleId *string `locationName:"bundleId" type:"string" required:"true"` - // The AWS Region where the source manual or automatic snapshot is located. - // - // SourceRegion is a required field - SourceRegion *string `locationName:"sourceRegion" type:"string" required:"true" enum:"RegionName"` + // An object that describes the cache behavior settings for the distribution. + CacheBehaviorSettings *CacheSettings `locationName:"cacheBehaviorSettings" type:"structure"` - // The name of the source instance or disk from which the source automatic snapshot - // was created. - // - // Constraint: - // - // * Define this parameter only when copying an automatic snapshot as a manual - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). - SourceResourceName *string `locationName:"sourceResourceName" type:"string"` + // An array of objects that describe the per-path cache behavior for the distribution. + CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"` - // The name of the source manual snapshot to copy. - // - // Constraint: + // An object that describes the default cache behavior for the distribution. // - // * Define this parameter only when copying a manual snapshot as another - // manual snapshot. - SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string"` + // DefaultCacheBehavior is a required field + DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure" required:"true"` - // The name of the new manual snapshot to be created as a copy. + // The name for the distribution. // - // TargetSnapshotName is a required field - TargetSnapshotName *string `locationName:"targetSnapshotName" type:"string" required:"true"` - - // A Boolean value to indicate whether to use the latest available automatic - // snapshot of the specified source instance or disk. + // DistributionName is a required field + DistributionName *string `locationName:"distributionName" type:"string" required:"true"` + + // An object that describes the origin resource for the distribution, such as + // a Lightsail instance or load balancer. // - // Constraints: + // The distribution pulls, caches, and serves content from the origin. // - // * This parameter cannot be defined together with the restore date parameter. - // The use latest restorable auto snapshot and restore date parameters are - // mutually exclusive. + // Origin is a required field + Origin *InputOrigin `locationName:"origin" type:"structure" required:"true"` + + // The tag keys and optional values to add to the distribution during create. // - // * Define this parameter only when copying an automatic snapshot as a manual - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-keeping-automatic-snapshots). - UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CopySnapshotInput) String() string { +func (s CreateDistributionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopySnapshotInput) GoString() string { +func (s CreateDistributionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CopySnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"} - if s.SourceRegion == nil { - invalidParams.Add(request.NewErrParamRequired("SourceRegion")) +func (s *CreateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDistributionInput"} + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) } - if s.TargetSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName")) + if s.DefaultCacheBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultCacheBehavior")) + } + if s.DistributionName == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionName")) + } + if s.Origin == nil { + invalidParams.Add(request.NewErrParamRequired("Origin")) } if invalidParams.Len() > 0 { @@ -13893,104 +16926,116 @@ func (s *CopySnapshotInput) Validate() error { return nil } -// SetRestoreDate sets the RestoreDate field's value. -func (s *CopySnapshotInput) SetRestoreDate(v string) *CopySnapshotInput { - s.RestoreDate = &v +// SetBundleId sets the BundleId field's value. +func (s *CreateDistributionInput) SetBundleId(v string) *CreateDistributionInput { + s.BundleId = &v return s } -// SetSourceRegion sets the SourceRegion field's value. -func (s *CopySnapshotInput) SetSourceRegion(v string) *CopySnapshotInput { - s.SourceRegion = &v +// SetCacheBehaviorSettings sets the CacheBehaviorSettings field's value. +func (s *CreateDistributionInput) SetCacheBehaviorSettings(v *CacheSettings) *CreateDistributionInput { + s.CacheBehaviorSettings = v return s } -// SetSourceResourceName sets the SourceResourceName field's value. -func (s *CopySnapshotInput) SetSourceResourceName(v string) *CopySnapshotInput { - s.SourceResourceName = &v +// SetCacheBehaviors sets the CacheBehaviors field's value. +func (s *CreateDistributionInput) SetCacheBehaviors(v []*CacheBehaviorPerPath) *CreateDistributionInput { + s.CacheBehaviors = v return s } -// SetSourceSnapshotName sets the SourceSnapshotName field's value. -func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput { - s.SourceSnapshotName = &v +// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. +func (s *CreateDistributionInput) SetDefaultCacheBehavior(v *CacheBehavior) *CreateDistributionInput { + s.DefaultCacheBehavior = v return s } -// SetTargetSnapshotName sets the TargetSnapshotName field's value. -func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput { - s.TargetSnapshotName = &v +// SetDistributionName sets the DistributionName field's value. +func (s *CreateDistributionInput) SetDistributionName(v string) *CreateDistributionInput { + s.DistributionName = &v return s } -// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. -func (s *CopySnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CopySnapshotInput { - s.UseLatestRestorableAutoSnapshot = &v +// SetOrigin sets the Origin field's value. +func (s *CreateDistributionInput) SetOrigin(v *InputOrigin) *CreateDistributionInput { + s.Origin = v return s } -type CopySnapshotOutput struct { +// SetTags sets the Tags field's value. +func (s *CreateDistributionInput) SetTags(v []*Tag) *CreateDistributionInput { + s.Tags = v + return s +} + +type CreateDistributionOutput struct { _ struct{} `type:"structure"` + // An object that describes the distribution created. + Distribution *LightsailDistribution `locationName:"distribution" type:"structure"` + // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s CopySnapshotOutput) String() string { +func (s CreateDistributionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CopySnapshotOutput) GoString() string { +func (s CreateDistributionOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *CopySnapshotOutput) SetOperations(v []*Operation) *CopySnapshotOutput { - s.Operations = v +// SetDistribution sets the Distribution field's value. +func (s *CreateDistributionOutput) SetDistribution(v *LightsailDistribution) *CreateDistributionOutput { + s.Distribution = v return s } -type CreateCloudFormationStackInput struct { +// SetOperation sets the Operation field's value. +func (s *CreateDistributionOutput) SetOperation(v *Operation) *CreateDistributionOutput { + s.Operation = v + return s +} + +type CreateDomainEntryInput struct { _ struct{} `type:"structure"` - // An array of parameters that will be used to create the new Amazon EC2 instance. - // You can only pass one instance entry at a time in this array. You will get - // an invalid parameter error if you pass more than one instance entry in this - // array. + // An array of key-value pairs containing information about the domain entry + // request. // - // Instances is a required field - Instances []*InstanceEntry `locationName:"instances" type:"list" required:"true"` + // DomainEntry is a required field + DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"` + + // The domain name (e.g., example.com) for which you want to create the domain + // entry. + // + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` } // String returns the string representation -func (s CreateCloudFormationStackInput) String() string { +func (s CreateDomainEntryInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCloudFormationStackInput) GoString() string { +func (s CreateDomainEntryInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateCloudFormationStackInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateCloudFormationStackInput"} - if s.Instances == nil { - invalidParams.Add(request.NewErrParamRequired("Instances")) +func (s *CreateDomainEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainEntryInput"} + if s.DomainEntry == nil { + invalidParams.Add(request.NewErrParamRequired("DomainEntry")) } - if s.Instances != nil { - for i, v := range s.Instances { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Instances", i), err.(request.ErrInvalidParams)) - } - } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) } if invalidParams.Len() > 0 { @@ -13999,102 +17044,159 @@ func (s *CreateCloudFormationStackInput) Validate() error { return nil } -// SetInstances sets the Instances field's value. -func (s *CreateCloudFormationStackInput) SetInstances(v []*InstanceEntry) *CreateCloudFormationStackInput { - s.Instances = v +// SetDomainEntry sets the DomainEntry field's value. +func (s *CreateDomainEntryInput) SetDomainEntry(v *DomainEntry) *CreateDomainEntryInput { + s.DomainEntry = v return s } -type CreateCloudFormationStackOutput struct { +// SetDomainName sets the DomainName field's value. +func (s *CreateDomainEntryInput) SetDomainName(v string) *CreateDomainEntryInput { + s.DomainName = &v + return s +} + +type CreateDomainEntryOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s CreateCloudFormationStackOutput) String() string { +func (s CreateDomainEntryOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateCloudFormationStackOutput) GoString() string { +func (s CreateDomainEntryOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *CreateCloudFormationStackOutput) SetOperations(v []*Operation) *CreateCloudFormationStackOutput { - s.Operations = v +// SetOperation sets the Operation field's value. +func (s *CreateDomainEntryOutput) SetOperation(v *Operation) *CreateDomainEntryOutput { + s.Operation = v return s } -type CreateContactMethodInput struct { +type CreateDomainInput struct { _ struct{} `type:"structure"` - // The destination of the contact method, such as an email address or a mobile - // phone number. + // The domain name to manage (e.g., example.com). // - // Use the E.164 format when specifying a mobile phone number. E.164 is a standard - // for the phone number structure used for international telecommunication. - // Phone numbers that follow this format can have a maximum of 15 digits, and - // they are prefixed with the plus character (+) and the country code. For example, - // a U.S. phone number in E.164 format would be specified as +1XXX5550100. For - // more information, see E.164 (https://en.wikipedia.org/wiki/E.164) in Wikipedia. + // You cannot register a new domain name using Lightsail. You must register + // a domain name using Amazon Route 53 or another domain name registrar. If + // you have already registered your domain, you can enter its name in this parameter + // to manage the DNS records for that domain. // - // ContactEndpoint is a required field - ContactEndpoint *string `locationName:"contactEndpoint" min:"1" type:"string" required:"true"` + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` - // The protocol of the contact method, such as Email or SMS (text messaging). - // - // The SMS protocol is supported only in the following AWS Regions. - // - // * US East (N. Virginia) (us-east-1) - // - // * US West (Oregon) (us-west-2) - // - // * Europe (Ireland) (eu-west-1) - // - // * Asia Pacific (Tokyo) (ap-northeast-1) - // - // * Asia Pacific (Singapore) (ap-southeast-1) + // The tag keys and optional values to add to the resource during create. // - // * Asia Pacific (Sydney) (ap-southeast-2) + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s CreateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *CreateDomainInput) SetDomainName(v string) *CreateDomainInput { + s.DomainName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDomainInput) SetTags(v []*Tag) *CreateDomainInput { + s.Tags = v + return s +} + +type CreateDomainOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operation *Operation `locationName:"operation" type:"structure"` +} + +// String returns the string representation +func (s CreateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainOutput) GoString() string { + return s.String() +} + +// SetOperation sets the Operation field's value. +func (s *CreateDomainOutput) SetOperation(v *Operation) *CreateDomainOutput { + s.Operation = v + return s +} + +type CreateInstanceSnapshotInput struct { + _ struct{} `type:"structure"` + + // The Lightsail instance on which to base your snapshot. // - // For a list of countries/regions where SMS text messages can be sent, and - // the latest AWS Regions where SMS text messaging is supported, see Supported - // Regions and Countries (https://docs.aws.amazon.com/sns/latest/dg/sns-supported-regions-countries.html) - // in the Amazon SNS Developer Guide. + // InstanceName is a required field + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + + // The name for your new snapshot. // - // For more information about notifications in Amazon Lightsail, see Notifications - // in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-notifications). + // InstanceSnapshotName is a required field + InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"` + + // The tag keys and optional values to add to the resource during create. // - // Protocol is a required field - Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactProtocol"` + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CreateContactMethodInput) String() string { +func (s CreateInstanceSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateContactMethodInput) GoString() string { +func (s CreateInstanceSnapshotInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateContactMethodInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateContactMethodInput"} - if s.ContactEndpoint == nil { - invalidParams.Add(request.NewErrParamRequired("ContactEndpoint")) - } - if s.ContactEndpoint != nil && len(*s.ContactEndpoint) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ContactEndpoint", 1)) +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceSnapshotInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) } - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) + if s.InstanceSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName")) } if invalidParams.Len() > 0 { @@ -14103,76 +17205,95 @@ func (s *CreateContactMethodInput) Validate() error { return nil } -// SetContactEndpoint sets the ContactEndpoint field's value. -func (s *CreateContactMethodInput) SetContactEndpoint(v string) *CreateContactMethodInput { - s.ContactEndpoint = &v +// SetInstanceName sets the InstanceName field's value. +func (s *CreateInstanceSnapshotInput) SetInstanceName(v string) *CreateInstanceSnapshotInput { + s.InstanceName = &v return s } -// SetProtocol sets the Protocol field's value. -func (s *CreateContactMethodInput) SetProtocol(v string) *CreateContactMethodInput { - s.Protocol = &v +// SetInstanceSnapshotName sets the InstanceSnapshotName field's value. +func (s *CreateInstanceSnapshotInput) SetInstanceSnapshotName(v string) *CreateInstanceSnapshotInput { + s.InstanceSnapshotName = &v return s } -type CreateContactMethodOutput struct { +// SetTags sets the Tags field's value. +func (s *CreateInstanceSnapshotInput) SetTags(v []*Tag) *CreateInstanceSnapshotInput { + s.Tags = v + return s +} + +type CreateInstanceSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateContactMethodOutput) String() string { +func (s CreateInstanceSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateContactMethodOutput) GoString() string { +func (s CreateInstanceSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateContactMethodOutput) SetOperations(v []*Operation) *CreateContactMethodOutput { +func (s *CreateInstanceSnapshotOutput) SetOperations(v []*Operation) *CreateInstanceSnapshotOutput { s.Operations = v return s } -type CreateDiskFromSnapshotInput struct { +type CreateInstancesFromSnapshotInput struct { _ struct{} `type:"structure"` - // An array of objects that represent the add-ons to enable for the new disk. + // An array of objects representing the add-ons to enable for the new instance. AddOns []*AddOnRequest `locationName:"addOns" type:"list"` - // The Availability Zone where you want to create the disk (e.g., us-east-2a). - // Choose the same Availability Zone as the Lightsail instance where you want - // to create the disk. - // - // Use the GetRegions operation to list the Availability Zones where Lightsail - // is currently available. + // An object containing information about one or more disk mappings. + AttachedDiskMapping map[string][]*DiskMap `locationName:"attachedDiskMapping" type:"map"` + + // The Availability Zone where you want to create your instances. Use the following + // formatting: us-east-2a (case sensitive). You can get a list of Availability + // Zones by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html) + // operation. Be sure to add the include Availability Zones parameter to your + // request. // // AvailabilityZone is a required field AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // The unique Lightsail disk name (e.g., my-disk). + // The bundle of specification information for your virtual private server (or + // instance), including the pricing plan (e.g., micro_1_0). // - // DiskName is a required field - DiskName *string `locationName:"diskName" type:"string" required:"true"` + // BundleId is a required field + BundleId *string `locationName:"bundleId" type:"string" required:"true"` - // The name of the disk snapshot (e.g., my-snapshot) from which to create the - // new storage disk. + // The names for your new instances. + // + // InstanceNames is a required field + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The name of the instance snapshot on which you are basing your new instances. + // Use the get instance snapshots operation to return information about your + // existing snapshots. // // Constraint: // - // * This parameter cannot be defined together with the source disk name - // parameter. The disk snapshot name and source disk name parameters are - // mutually exclusive. - DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string"` + // * This parameter cannot be defined together with the source instance name + // parameter. The instance snapshot name and source instance name parameters + // are mutually exclusive. + InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string"` - // The date of the automatic snapshot to use for the new disk. Use the get auto - // snapshots operation to identify the dates of the available automatic snapshots. + // The name for your key pair. + KeyPairName *string `locationName:"keyPairName" type:"string"` + + // The date of the automatic snapshot to use for the new instance. Use the get + // auto snapshots operation to identify the dates of the available automatic + // snapshots. // // Constraints: // @@ -14182,31 +17303,26 @@ type CreateDiskFromSnapshotInput struct { // auto snapshot parameter. The restore date and use latest restorable auto // snapshot parameters are mutually exclusive. // - // * Define this parameter only when creating a new disk from an automatic + // * Define this parameter only when creating a new instance from an automatic // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). RestoreDate *string `locationName:"restoreDate" type:"string"` - // The size of the disk in GB (e.g., 32). - // - // SizeInGb is a required field - SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"` - - // The name of the source disk from which the source automatic snapshot was - // created. + // The name of the source instance from which the source automatic snapshot + // was created. // // Constraints: // - // * This parameter cannot be defined together with the disk snapshot name - // parameter. The source disk name and disk snapshot name parameters are - // mutually exclusive. + // * This parameter cannot be defined together with the instance snapshot + // name parameter. The source instance name and instance snapshot name parameters + // are mutually exclusive. // - // * Define this parameter only when creating a new disk from an automatic + // * Define this parameter only when creating a new instance from an automatic // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). - SourceDiskName *string `locationName:"sourceDiskName" type:"string"` + SourceInstanceName *string `locationName:"sourceInstanceName" type:"string"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` // A Boolean value to indicate whether to use the latest available automatic @@ -14218,32 +17334,41 @@ type CreateDiskFromSnapshotInput struct { // The use latest restorable auto snapshot and restore date parameters are // mutually exclusive. // - // * Define this parameter only when creating a new disk from an automatic + // * Define this parameter only when creating a new instance from an automatic // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` + + // You can create a launch script that configures a server with additional user + // data. For example, apt-get -y update. + // + // Depending on the machine image you choose, the command to get software on + // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu + // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide + // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). + UserData *string `locationName:"userData" type:"string"` } // String returns the string representation -func (s CreateDiskFromSnapshotInput) String() string { +func (s CreateInstancesFromSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDiskFromSnapshotInput) GoString() string { +func (s CreateInstancesFromSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDiskFromSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDiskFromSnapshotInput"} +func (s *CreateInstancesFromSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstancesFromSnapshotInput"} if s.AvailabilityZone == nil { invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) } - if s.DiskName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskName")) + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) } - if s.SizeInGb == nil { - invalidParams.Add(request.NewErrParamRequired("SizeInGb")) + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) } if s.AddOns != nil { for i, v := range s.AddOns { @@ -14263,137 +17388,191 @@ func (s *CreateDiskFromSnapshotInput) Validate() error { } // SetAddOns sets the AddOns field's value. -func (s *CreateDiskFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateDiskFromSnapshotInput { +func (s *CreateInstancesFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateInstancesFromSnapshotInput { s.AddOns = v return s } +// SetAttachedDiskMapping sets the AttachedDiskMapping field's value. +func (s *CreateInstancesFromSnapshotInput) SetAttachedDiskMapping(v map[string][]*DiskMap) *CreateInstancesFromSnapshotInput { + s.AttachedDiskMapping = v + return s +} + // SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateDiskFromSnapshotInput) SetAvailabilityZone(v string) *CreateDiskFromSnapshotInput { +func (s *CreateInstancesFromSnapshotInput) SetAvailabilityZone(v string) *CreateInstancesFromSnapshotInput { s.AvailabilityZone = &v return s } -// SetDiskName sets the DiskName field's value. -func (s *CreateDiskFromSnapshotInput) SetDiskName(v string) *CreateDiskFromSnapshotInput { - s.DiskName = &v +// SetBundleId sets the BundleId field's value. +func (s *CreateInstancesFromSnapshotInput) SetBundleId(v string) *CreateInstancesFromSnapshotInput { + s.BundleId = &v return s } -// SetDiskSnapshotName sets the DiskSnapshotName field's value. -func (s *CreateDiskFromSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskFromSnapshotInput { - s.DiskSnapshotName = &v +// SetInstanceNames sets the InstanceNames field's value. +func (s *CreateInstancesFromSnapshotInput) SetInstanceNames(v []*string) *CreateInstancesFromSnapshotInput { + s.InstanceNames = v return s } -// SetRestoreDate sets the RestoreDate field's value. -func (s *CreateDiskFromSnapshotInput) SetRestoreDate(v string) *CreateDiskFromSnapshotInput { - s.RestoreDate = &v +// SetInstanceSnapshotName sets the InstanceSnapshotName field's value. +func (s *CreateInstancesFromSnapshotInput) SetInstanceSnapshotName(v string) *CreateInstancesFromSnapshotInput { + s.InstanceSnapshotName = &v return s } -// SetSizeInGb sets the SizeInGb field's value. -func (s *CreateDiskFromSnapshotInput) SetSizeInGb(v int64) *CreateDiskFromSnapshotInput { - s.SizeInGb = &v +// SetKeyPairName sets the KeyPairName field's value. +func (s *CreateInstancesFromSnapshotInput) SetKeyPairName(v string) *CreateInstancesFromSnapshotInput { + s.KeyPairName = &v return s } -// SetSourceDiskName sets the SourceDiskName field's value. -func (s *CreateDiskFromSnapshotInput) SetSourceDiskName(v string) *CreateDiskFromSnapshotInput { - s.SourceDiskName = &v +// SetRestoreDate sets the RestoreDate field's value. +func (s *CreateInstancesFromSnapshotInput) SetRestoreDate(v string) *CreateInstancesFromSnapshotInput { + s.RestoreDate = &v + return s +} + +// SetSourceInstanceName sets the SourceInstanceName field's value. +func (s *CreateInstancesFromSnapshotInput) SetSourceInstanceName(v string) *CreateInstancesFromSnapshotInput { + s.SourceInstanceName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateDiskFromSnapshotInput) SetTags(v []*Tag) *CreateDiskFromSnapshotInput { +func (s *CreateInstancesFromSnapshotInput) SetTags(v []*Tag) *CreateInstancesFromSnapshotInput { s.Tags = v return s } // SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. -func (s *CreateDiskFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateDiskFromSnapshotInput { +func (s *CreateInstancesFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateInstancesFromSnapshotInput { s.UseLatestRestorableAutoSnapshot = &v return s } -type CreateDiskFromSnapshotOutput struct { +// SetUserData sets the UserData field's value. +func (s *CreateInstancesFromSnapshotInput) SetUserData(v string) *CreateInstancesFromSnapshotInput { + s.UserData = &v + return s +} + +type CreateInstancesFromSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateDiskFromSnapshotOutput) String() string { +func (s CreateInstancesFromSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDiskFromSnapshotOutput) GoString() string { +func (s CreateInstancesFromSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateDiskFromSnapshotOutput) SetOperations(v []*Operation) *CreateDiskFromSnapshotOutput { +func (s *CreateInstancesFromSnapshotOutput) SetOperations(v []*Operation) *CreateInstancesFromSnapshotOutput { s.Operations = v return s } -type CreateDiskInput struct { +type CreateInstancesInput struct { _ struct{} `type:"structure"` - // An array of objects that represent the add-ons to enable for the new disk. + // An array of objects representing the add-ons to enable for the new instance. AddOns []*AddOnRequest `locationName:"addOns" type:"list"` - // The Availability Zone where you want to create the disk (e.g., us-east-2a). - // Use the same Availability Zone as the Lightsail instance to which you want - // to attach the disk. - // - // Use the get regions operation to list the Availability Zones where Lightsail - // is currently available. + // The Availability Zone in which to create your instance. Use the following + // format: us-east-2a (case sensitive). You can get a list of Availability Zones + // by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html) + // operation. Be sure to add the include Availability Zones parameter to your + // request. // // AvailabilityZone is a required field AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // The unique Lightsail disk name (e.g., my-disk). + // The ID for a virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0). + // Use the get blueprints operation to return a list of available images (or + // blueprints). // - // DiskName is a required field - DiskName *string `locationName:"diskName" type:"string" required:"true"` + // Use active blueprints when creating new instances. Inactive blueprints are + // listed to support customers with existing instances and are not necessarily + // available to create new instances. Blueprints are marked inactive when they + // become outdated due to operating system updates or new application releases. + // + // BlueprintId is a required field + BlueprintId *string `locationName:"blueprintId" type:"string" required:"true"` - // The size of the disk in GB (e.g., 32). + // The bundle of specification information for your virtual private server (or + // instance), including the pricing plan (e.g., micro_1_0). // - // SizeInGb is a required field - SizeInGb *int64 `locationName:"sizeInGb" type:"integer" required:"true"` + // BundleId is a required field + BundleId *string `locationName:"bundleId" type:"string" required:"true"` + + // (Deprecated) The name for your custom image. + // + // In releases prior to June 12, 2017, this parameter was ignored by the API. + // It is now deprecated. + // + // Deprecated: CustomImageName has been deprecated + CustomImageName *string `locationName:"customImageName" deprecated:"true" type:"string"` + + // The names to use for your new Lightsail instances. Separate multiple values + // using quotation marks and commas, for example: ["MyFirstInstance","MySecondInstance"] + // + // InstanceNames is a required field + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The name of your key pair. + KeyPairName *string `locationName:"keyPairName" type:"string"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` + + // A launch script you can create that configures a server with additional user + // data. For example, you might want to run apt-get -y update. + // + // Depending on the machine image you choose, the command to get software on + // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu + // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide + // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). + UserData *string `locationName:"userData" type:"string"` } // String returns the string representation -func (s CreateDiskInput) String() string { +func (s CreateInstancesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDiskInput) GoString() string { +func (s CreateInstancesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDiskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDiskInput"} +func (s *CreateInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstancesInput"} if s.AvailabilityZone == nil { invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) } - if s.DiskName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskName")) + if s.BlueprintId == nil { + invalidParams.Add(request.NewErrParamRequired("BlueprintId")) } - if s.SizeInGb == nil { - invalidParams.Add(request.NewErrParamRequired("SizeInGb")) + if s.BundleId == nil { + invalidParams.Add(request.NewErrParamRequired("BundleId")) + } + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) } if s.AddOns != nil { for i, v := range s.AddOns { @@ -14413,103 +17592,113 @@ func (s *CreateDiskInput) Validate() error { } // SetAddOns sets the AddOns field's value. -func (s *CreateDiskInput) SetAddOns(v []*AddOnRequest) *CreateDiskInput { +func (s *CreateInstancesInput) SetAddOns(v []*AddOnRequest) *CreateInstancesInput { s.AddOns = v return s } // SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateDiskInput) SetAvailabilityZone(v string) *CreateDiskInput { +func (s *CreateInstancesInput) SetAvailabilityZone(v string) *CreateInstancesInput { s.AvailabilityZone = &v return s } -// SetDiskName sets the DiskName field's value. -func (s *CreateDiskInput) SetDiskName(v string) *CreateDiskInput { - s.DiskName = &v +// SetBlueprintId sets the BlueprintId field's value. +func (s *CreateInstancesInput) SetBlueprintId(v string) *CreateInstancesInput { + s.BlueprintId = &v return s } -// SetSizeInGb sets the SizeInGb field's value. -func (s *CreateDiskInput) SetSizeInGb(v int64) *CreateDiskInput { - s.SizeInGb = &v +// SetBundleId sets the BundleId field's value. +func (s *CreateInstancesInput) SetBundleId(v string) *CreateInstancesInput { + s.BundleId = &v + return s +} + +// SetCustomImageName sets the CustomImageName field's value. +func (s *CreateInstancesInput) SetCustomImageName(v string) *CreateInstancesInput { + s.CustomImageName = &v + return s +} + +// SetInstanceNames sets the InstanceNames field's value. +func (s *CreateInstancesInput) SetInstanceNames(v []*string) *CreateInstancesInput { + s.InstanceNames = v + return s +} + +// SetKeyPairName sets the KeyPairName field's value. +func (s *CreateInstancesInput) SetKeyPairName(v string) *CreateInstancesInput { + s.KeyPairName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateDiskInput) SetTags(v []*Tag) *CreateDiskInput { +func (s *CreateInstancesInput) SetTags(v []*Tag) *CreateInstancesInput { s.Tags = v return s } -type CreateDiskOutput struct { +// SetUserData sets the UserData field's value. +func (s *CreateInstancesInput) SetUserData(v string) *CreateInstancesInput { + s.UserData = &v + return s +} + +type CreateInstancesOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateDiskOutput) String() string { +func (s CreateInstancesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDiskOutput) GoString() string { +func (s CreateInstancesOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateDiskOutput) SetOperations(v []*Operation) *CreateDiskOutput { +func (s *CreateInstancesOutput) SetOperations(v []*Operation) *CreateInstancesOutput { s.Operations = v return s } -type CreateDiskSnapshotInput struct { +type CreateKeyPairInput struct { _ struct{} `type:"structure"` - // The unique name of the source disk (e.g., Disk-Virginia-1). - // - // This parameter cannot be defined together with the instance name parameter. - // The disk name and instance name parameters are mutually exclusive. - DiskName *string `locationName:"diskName" type:"string"` - - // The name of the destination disk snapshot (e.g., my-disk-snapshot) based - // on the source disk. - // - // DiskSnapshotName is a required field - DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` - - // The unique name of the source instance (e.g., Amazon_Linux-512MB-Virginia-1). - // When this is defined, a snapshot of the instance's system volume is created. + // The name for your new key pair. // - // This parameter cannot be defined together with the disk name parameter. The - // instance name and disk name parameters are mutually exclusive. - InstanceName *string `locationName:"instanceName" type:"string"` + // KeyPairName is a required field + KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CreateDiskSnapshotInput) String() string { +func (s CreateKeyPairInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDiskSnapshotInput) GoString() string { +func (s CreateKeyPairInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDiskSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDiskSnapshotInput"} - if s.DiskSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) +func (s *CreateKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateKeyPairInput"} + if s.KeyPairName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyPairName")) } if invalidParams.Len() > 0 { @@ -14518,168 +17707,136 @@ func (s *CreateDiskSnapshotInput) Validate() error { return nil } -// SetDiskName sets the DiskName field's value. -func (s *CreateDiskSnapshotInput) SetDiskName(v string) *CreateDiskSnapshotInput { - s.DiskName = &v - return s -} - -// SetDiskSnapshotName sets the DiskSnapshotName field's value. -func (s *CreateDiskSnapshotInput) SetDiskSnapshotName(v string) *CreateDiskSnapshotInput { - s.DiskSnapshotName = &v - return s -} - -// SetInstanceName sets the InstanceName field's value. -func (s *CreateDiskSnapshotInput) SetInstanceName(v string) *CreateDiskSnapshotInput { - s.InstanceName = &v +// SetKeyPairName sets the KeyPairName field's value. +func (s *CreateKeyPairInput) SetKeyPairName(v string) *CreateKeyPairInput { + s.KeyPairName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateDiskSnapshotInput) SetTags(v []*Tag) *CreateDiskSnapshotInput { +func (s *CreateKeyPairInput) SetTags(v []*Tag) *CreateKeyPairInput { s.Tags = v return s } -type CreateDiskSnapshotOutput struct { +type CreateKeyPairOutput struct { _ struct{} `type:"structure"` + // An array of key-value pairs containing information about the new key pair + // you just created. + KeyPair *KeyPair `locationName:"keyPair" type:"structure"` + // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operations []*Operation `locationName:"operations" type:"list"` -} - -// String returns the string representation -func (s CreateDiskSnapshotOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateDiskSnapshotOutput) GoString() string { - return s.String() -} - -// SetOperations sets the Operations field's value. -func (s *CreateDiskSnapshotOutput) SetOperations(v []*Operation) *CreateDiskSnapshotOutput { - s.Operations = v - return s -} - -type CreateDomainEntryInput struct { - _ struct{} `type:"structure"` + Operation *Operation `locationName:"operation" type:"structure"` - // An array of key-value pairs containing information about the domain entry - // request. - // - // DomainEntry is a required field - DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"` + // A base64-encoded RSA private key. + PrivateKeyBase64 *string `locationName:"privateKeyBase64" type:"string"` - // The domain name (e.g., example.com) for which you want to create the domain - // entry. - // - // DomainName is a required field - DomainName *string `locationName:"domainName" type:"string" required:"true"` + // A base64-encoded public key of the ssh-rsa type. + PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string"` } // String returns the string representation -func (s CreateDomainEntryInput) String() string { +func (s CreateKeyPairOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDomainEntryInput) GoString() string { +func (s CreateKeyPairOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDomainEntryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDomainEntryInput"} - if s.DomainEntry == nil { - invalidParams.Add(request.NewErrParamRequired("DomainEntry")) - } - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) - } +// SetKeyPair sets the KeyPair field's value. +func (s *CreateKeyPairOutput) SetKeyPair(v *KeyPair) *CreateKeyPairOutput { + s.KeyPair = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetOperation sets the Operation field's value. +func (s *CreateKeyPairOutput) SetOperation(v *Operation) *CreateKeyPairOutput { + s.Operation = v + return s } -// SetDomainEntry sets the DomainEntry field's value. -func (s *CreateDomainEntryInput) SetDomainEntry(v *DomainEntry) *CreateDomainEntryInput { - s.DomainEntry = v +// SetPrivateKeyBase64 sets the PrivateKeyBase64 field's value. +func (s *CreateKeyPairOutput) SetPrivateKeyBase64(v string) *CreateKeyPairOutput { + s.PrivateKeyBase64 = &v return s } -// SetDomainName sets the DomainName field's value. -func (s *CreateDomainEntryInput) SetDomainName(v string) *CreateDomainEntryInput { - s.DomainName = &v +// SetPublicKeyBase64 sets the PublicKeyBase64 field's value. +func (s *CreateKeyPairOutput) SetPublicKeyBase64(v string) *CreateKeyPairOutput { + s.PublicKeyBase64 = &v return s } -type CreateDomainEntryOutput struct { +type CreateLoadBalancerInput struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operation *Operation `locationName:"operation" type:"structure"` -} - -// String returns the string representation -func (s CreateDomainEntryOutput) String() string { - return awsutil.Prettify(s) -} + // The optional alternative domains and subdomains to use with your SSL/TLS + // certificate (e.g., www.example.com, example.com, m.example.com, blog.example.com). + CertificateAlternativeNames []*string `locationName:"certificateAlternativeNames" type:"list"` -// GoString returns the string representation -func (s CreateDomainEntryOutput) GoString() string { - return s.String() -} + // The domain name with which your certificate is associated (e.g., example.com). + // + // If you specify certificateDomainName, then certificateName is required (and + // vice-versa). + CertificateDomainName *string `locationName:"certificateDomainName" type:"string"` -// SetOperation sets the Operation field's value. -func (s *CreateDomainEntryOutput) SetOperation(v *Operation) *CreateDomainEntryOutput { - s.Operation = v - return s -} + // The name of the SSL/TLS certificate. + // + // If you specify certificateName, then certificateDomainName is required (and + // vice-versa). + CertificateName *string `locationName:"certificateName" type:"string"` -type CreateDomainInput struct { - _ struct{} `type:"structure"` + // The path you provided to perform the load balancer health check. If you didn't + // specify a health check path, Lightsail uses the root path of your website + // (e.g., "/"). + // + // You may want to specify a custom health check path other than the root of + // your application if your home page loads slowly or has a lot of media or + // scripting on it. + HealthCheckPath *string `locationName:"healthCheckPath" type:"string"` - // The domain name to manage (e.g., example.com). + // The instance port where you're creating your load balancer. // - // You cannot register a new domain name using Lightsail. You must register - // a domain name using Amazon Route 53 or another domain name registrar. If - // you have already registered your domain, you can enter its name in this parameter - // to manage the DNS records for that domain. + // InstancePort is a required field + InstancePort *int64 `locationName:"instancePort" type:"integer" required:"true"` + + // The name of your load balancer. // - // DomainName is a required field - DomainName *string `locationName:"domainName" type:"string" required:"true"` + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CreateDomainInput) String() string { +func (s CreateLoadBalancerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDomainInput) GoString() string { +func (s CreateLoadBalancerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDomainInput"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) +func (s *CreateLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerInput"} + if s.InstancePort == nil { + invalidParams.Add(request.NewErrParamRequired("InstancePort")) + } + if s.InstancePort != nil && *s.InstancePort < -1 { + invalidParams.Add(request.NewErrParamMinValue("InstancePort", -1)) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) } if invalidParams.Len() > 0 { @@ -14688,80 +17845,129 @@ func (s *CreateDomainInput) Validate() error { return nil } -// SetDomainName sets the DomainName field's value. -func (s *CreateDomainInput) SetDomainName(v string) *CreateDomainInput { - s.DomainName = &v +// SetCertificateAlternativeNames sets the CertificateAlternativeNames field's value. +func (s *CreateLoadBalancerInput) SetCertificateAlternativeNames(v []*string) *CreateLoadBalancerInput { + s.CertificateAlternativeNames = v + return s +} + +// SetCertificateDomainName sets the CertificateDomainName field's value. +func (s *CreateLoadBalancerInput) SetCertificateDomainName(v string) *CreateLoadBalancerInput { + s.CertificateDomainName = &v + return s +} + +// SetCertificateName sets the CertificateName field's value. +func (s *CreateLoadBalancerInput) SetCertificateName(v string) *CreateLoadBalancerInput { + s.CertificateName = &v + return s +} + +// SetHealthCheckPath sets the HealthCheckPath field's value. +func (s *CreateLoadBalancerInput) SetHealthCheckPath(v string) *CreateLoadBalancerInput { + s.HealthCheckPath = &v + return s +} + +// SetInstancePort sets the InstancePort field's value. +func (s *CreateLoadBalancerInput) SetInstancePort(v int64) *CreateLoadBalancerInput { + s.InstancePort = &v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *CreateLoadBalancerInput) SetLoadBalancerName(v string) *CreateLoadBalancerInput { + s.LoadBalancerName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateDomainInput) SetTags(v []*Tag) *CreateDomainInput { +func (s *CreateLoadBalancerInput) SetTags(v []*Tag) *CreateLoadBalancerInput { s.Tags = v return s } -type CreateDomainOutput struct { +type CreateLoadBalancerOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operation *Operation `locationName:"operation" type:"structure"` + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateDomainOutput) String() string { +func (s CreateLoadBalancerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDomainOutput) GoString() string { +func (s CreateLoadBalancerOutput) GoString() string { return s.String() } -// SetOperation sets the Operation field's value. -func (s *CreateDomainOutput) SetOperation(v *Operation) *CreateDomainOutput { - s.Operation = v +// SetOperations sets the Operations field's value. +func (s *CreateLoadBalancerOutput) SetOperations(v []*Operation) *CreateLoadBalancerOutput { + s.Operations = v return s } -type CreateInstanceSnapshotInput struct { +type CreateLoadBalancerTlsCertificateInput struct { _ struct{} `type:"structure"` - // The Lightsail instance on which to base your snapshot. + // An array of strings listing alternative domains and subdomains for your SSL/TLS + // certificate. Lightsail will de-dupe the names for you. You can have a maximum + // of 9 alternative names (in addition to the 1 primary domain). We do not support + // wildcards (e.g., *.example.com). + CertificateAlternativeNames []*string `locationName:"certificateAlternativeNames" type:"list"` + + // The domain name (e.g., example.com) for your SSL/TLS certificate. // - // InstanceName is a required field - InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + // CertificateDomainName is a required field + CertificateDomainName *string `locationName:"certificateDomainName" type:"string" required:"true"` - // The name for your new snapshot. + // The SSL/TLS certificate name. // - // InstanceSnapshotName is a required field - InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"` + // You can have up to 10 certificates in your account at one time. Each Lightsail + // load balancer can have up to 2 certificates associated with it at one time. + // There is also an overall limit to the number of certificates that can be + // issue in a 365-day period. For more information, see Limits (http://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html). + // + // CertificateName is a required field + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // The load balancer name where you want to create the SSL/TLS certificate. + // + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CreateInstanceSnapshotInput) String() string { +func (s CreateLoadBalancerTlsCertificateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInstanceSnapshotInput) GoString() string { +func (s CreateLoadBalancerTlsCertificateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateInstanceSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateInstanceSnapshotInput"} - if s.InstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceName")) +func (s *CreateLoadBalancerTlsCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerTlsCertificateInput"} + if s.CertificateDomainName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateDomainName")) } - if s.InstanceSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName")) + if s.CertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateName")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) } if invalidParams.Len() > 0 { @@ -14770,180 +17976,149 @@ func (s *CreateInstanceSnapshotInput) Validate() error { return nil } -// SetInstanceName sets the InstanceName field's value. -func (s *CreateInstanceSnapshotInput) SetInstanceName(v string) *CreateInstanceSnapshotInput { - s.InstanceName = &v +// SetCertificateAlternativeNames sets the CertificateAlternativeNames field's value. +func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateAlternativeNames(v []*string) *CreateLoadBalancerTlsCertificateInput { + s.CertificateAlternativeNames = v return s } -// SetInstanceSnapshotName sets the InstanceSnapshotName field's value. -func (s *CreateInstanceSnapshotInput) SetInstanceSnapshotName(v string) *CreateInstanceSnapshotInput { - s.InstanceSnapshotName = &v +// SetCertificateDomainName sets the CertificateDomainName field's value. +func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateDomainName(v string) *CreateLoadBalancerTlsCertificateInput { + s.CertificateDomainName = &v + return s +} + +// SetCertificateName sets the CertificateName field's value. +func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateName(v string) *CreateLoadBalancerTlsCertificateInput { + s.CertificateName = &v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *CreateLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *CreateLoadBalancerTlsCertificateInput { + s.LoadBalancerName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateInstanceSnapshotInput) SetTags(v []*Tag) *CreateInstanceSnapshotInput { +func (s *CreateLoadBalancerTlsCertificateInput) SetTags(v []*Tag) *CreateLoadBalancerTlsCertificateInput { s.Tags = v return s } -type CreateInstanceSnapshotOutput struct { +type CreateLoadBalancerTlsCertificateOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateInstanceSnapshotOutput) String() string { +func (s CreateLoadBalancerTlsCertificateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInstanceSnapshotOutput) GoString() string { +func (s CreateLoadBalancerTlsCertificateOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateInstanceSnapshotOutput) SetOperations(v []*Operation) *CreateInstanceSnapshotOutput { +func (s *CreateLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *CreateLoadBalancerTlsCertificateOutput { s.Operations = v return s } -type CreateInstancesFromSnapshotInput struct { +type CreateRelationalDatabaseFromSnapshotInput struct { _ struct{} `type:"structure"` - // An array of objects representing the add-ons to enable for the new instance. - AddOns []*AddOnRequest `locationName:"addOns" type:"list"` - - // An object containing information about one or more disk mappings. - AttachedDiskMapping map[string][]*DiskMap `locationName:"attachedDiskMapping" type:"map"` - - // The Availability Zone where you want to create your instances. Use the following - // formatting: us-east-2a (case sensitive). You can get a list of Availability - // Zones by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html) - // operation. Be sure to add the include Availability Zones parameter to your - // request. - // - // AvailabilityZone is a required field - AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - - // The bundle of specification information for your virtual private server (or - // instance), including the pricing plan (e.g., micro_1_0). + // The Availability Zone in which to create your new database. Use the us-east-2a + // case-sensitive format. // - // BundleId is a required field - BundleId *string `locationName:"bundleId" type:"string" required:"true"` + // You can get a list of Availability Zones by using the get regions operation. + // Be sure to add the include relational database Availability Zones parameter + // to your request. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The names for your new instances. - // - // InstanceNames is a required field - InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + // Specifies the accessibility options for your new database. A value of true + // specifies a database that is available to resources outside of your Lightsail + // account. A value of false specifies a database that is available only to + // your Lightsail resources in the same region as your database. + PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` - // The name of the instance snapshot on which you are basing your new instances. - // Use the get instance snapshots operation to return information about your - // existing snapshots. + // The bundle ID for your new database. A bundle describes the performance specifications + // for your database. // - // Constraint: + // You can get a list of database bundle IDs by using the get relational database + // bundles operation. // - // * This parameter cannot be defined together with the source instance name - // parameter. The instance snapshot name and source instance name parameters - // are mutually exclusive. - InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string"` - - // The name for your key pair. - KeyPairName *string `locationName:"keyPairName" type:"string"` + // When creating a new database from a snapshot, you cannot choose a bundle + // that is smaller than the bundle of the source database. + RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string"` - // The date of the automatic snapshot to use for the new instance. Use the get - // auto snapshots operation to identify the dates of the available automatic - // snapshots. + // The name to use for your new database. // // Constraints: // - // * Must be specified in YYYY-MM-DD format. + // * Must contain from 2 to 255 alphanumeric characters, or hyphens. // - // * This parameter cannot be defined together with the use latest restorable - // auto snapshot parameter. The restore date and use latest restorable auto - // snapshot parameters are mutually exclusive. + // * The first and last character must be a letter or number. // - // * Define this parameter only when creating a new instance from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). - RestoreDate *string `locationName:"restoreDate" type:"string"` + // RelationalDatabaseName is a required field + RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` - // The name of the source instance from which the source automatic snapshot - // was created. + // The name of the database snapshot from which to create your new database. + RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string"` + + // The date and time to restore your database from. // // Constraints: // - // * This parameter cannot be defined together with the instance snapshot - // name parameter. The source instance name and instance snapshot name parameters - // are mutually exclusive. - // - // * Define this parameter only when creating a new instance from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). - SourceInstanceName *string `locationName:"sourceInstanceName" type:"string"` - - // The tag keys and optional values to add to the resource during create. + // * Must be before the latest restorable time for the database. // - // To tag a resource after it has been created, see the tag resource operation. - Tags []*Tag `locationName:"tags" type:"list"` - - // A Boolean value to indicate whether to use the latest available automatic - // snapshot. + // * Cannot be specified if the use latest restorable time parameter is true. // - // Constraints: + // * Specified in Coordinated Universal Time (UTC). // - // * This parameter cannot be defined together with the restore date parameter. - // The use latest restorable auto snapshot and restore date parameters are - // mutually exclusive. + // * Specified in the Unix time format. For example, if you wish to use a + // restore time of October 1, 2018, at 8 PM UTC, then you input 1538424000 + // as the restore time. + RestoreTime *time.Time `locationName:"restoreTime" type:"timestamp"` + + // The name of the source database. + SourceRelationalDatabaseName *string `locationName:"sourceRelationalDatabaseName" type:"string"` + + // The tag keys and optional values to add to the resource during create. // - // * Define this parameter only when creating a new instance from an automatic - // snapshot. For more information, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-configuring-automatic-snapshots). - UseLatestRestorableAutoSnapshot *bool `locationName:"useLatestRestorableAutoSnapshot" type:"boolean"` + // Use the TagResource action to tag a resource after it's created. + Tags []*Tag `locationName:"tags" type:"list"` - // You can create a launch script that configures a server with additional user - // data. For example, apt-get -y update. + // Specifies whether your database is restored from the latest backup time. + // A value of true restores from the latest backup time. // - // Depending on the machine image you choose, the command to get software on - // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu - // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide - // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). - UserData *string `locationName:"userData" type:"string"` + // Default: false + // + // Constraints: Cannot be specified if the restore time parameter is provided. + UseLatestRestorableTime *bool `locationName:"useLatestRestorableTime" type:"boolean"` } // String returns the string representation -func (s CreateInstancesFromSnapshotInput) String() string { +func (s CreateRelationalDatabaseFromSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInstancesFromSnapshotInput) GoString() string { +func (s CreateRelationalDatabaseFromSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateInstancesFromSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateInstancesFromSnapshotInput"} - if s.AvailabilityZone == nil { - invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) - } - if s.BundleId == nil { - invalidParams.Add(request.NewErrParamRequired("BundleId")) - } - if s.InstanceNames == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceNames")) - } - if s.AddOns != nil { - for i, v := range s.AddOns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) - } - } +func (s *CreateRelationalDatabaseFromSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseFromSnapshotInput"} + if s.RelationalDatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) } if invalidParams.Len() > 0 { @@ -14952,202 +18127,238 @@ func (s *CreateInstancesFromSnapshotInput) Validate() error { return nil } -// SetAddOns sets the AddOns field's value. -func (s *CreateInstancesFromSnapshotInput) SetAddOns(v []*AddOnRequest) *CreateInstancesFromSnapshotInput { - s.AddOns = v - return s -} - -// SetAttachedDiskMapping sets the AttachedDiskMapping field's value. -func (s *CreateInstancesFromSnapshotInput) SetAttachedDiskMapping(v map[string][]*DiskMap) *CreateInstancesFromSnapshotInput { - s.AttachedDiskMapping = v - return s -} - // SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateInstancesFromSnapshotInput) SetAvailabilityZone(v string) *CreateInstancesFromSnapshotInput { +func (s *CreateRelationalDatabaseFromSnapshotInput) SetAvailabilityZone(v string) *CreateRelationalDatabaseFromSnapshotInput { s.AvailabilityZone = &v return s } -// SetBundleId sets the BundleId field's value. -func (s *CreateInstancesFromSnapshotInput) SetBundleId(v string) *CreateInstancesFromSnapshotInput { - s.BundleId = &v +// SetPubliclyAccessible sets the PubliclyAccessible field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetPubliclyAccessible(v bool) *CreateRelationalDatabaseFromSnapshotInput { + s.PubliclyAccessible = &v return s } -// SetInstanceNames sets the InstanceNames field's value. -func (s *CreateInstancesFromSnapshotInput) SetInstanceNames(v []*string) *CreateInstancesFromSnapshotInput { - s.InstanceNames = v +// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseBundleId(v string) *CreateRelationalDatabaseFromSnapshotInput { + s.RelationalDatabaseBundleId = &v return s } -// SetInstanceSnapshotName sets the InstanceSnapshotName field's value. -func (s *CreateInstancesFromSnapshotInput) SetInstanceSnapshotName(v string) *CreateInstancesFromSnapshotInput { - s.InstanceSnapshotName = &v +// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseFromSnapshotInput { + s.RelationalDatabaseName = &v return s } -// SetKeyPairName sets the KeyPairName field's value. -func (s *CreateInstancesFromSnapshotInput) SetKeyPairName(v string) *CreateInstancesFromSnapshotInput { - s.KeyPairName = &v +// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *CreateRelationalDatabaseFromSnapshotInput { + s.RelationalDatabaseSnapshotName = &v return s } -// SetRestoreDate sets the RestoreDate field's value. -func (s *CreateInstancesFromSnapshotInput) SetRestoreDate(v string) *CreateInstancesFromSnapshotInput { - s.RestoreDate = &v +// SetRestoreTime sets the RestoreTime field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetRestoreTime(v time.Time) *CreateRelationalDatabaseFromSnapshotInput { + s.RestoreTime = &v return s } -// SetSourceInstanceName sets the SourceInstanceName field's value. -func (s *CreateInstancesFromSnapshotInput) SetSourceInstanceName(v string) *CreateInstancesFromSnapshotInput { - s.SourceInstanceName = &v +// SetSourceRelationalDatabaseName sets the SourceRelationalDatabaseName field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetSourceRelationalDatabaseName(v string) *CreateRelationalDatabaseFromSnapshotInput { + s.SourceRelationalDatabaseName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateInstancesFromSnapshotInput) SetTags(v []*Tag) *CreateInstancesFromSnapshotInput { +func (s *CreateRelationalDatabaseFromSnapshotInput) SetTags(v []*Tag) *CreateRelationalDatabaseFromSnapshotInput { s.Tags = v return s } -// SetUseLatestRestorableAutoSnapshot sets the UseLatestRestorableAutoSnapshot field's value. -func (s *CreateInstancesFromSnapshotInput) SetUseLatestRestorableAutoSnapshot(v bool) *CreateInstancesFromSnapshotInput { - s.UseLatestRestorableAutoSnapshot = &v - return s -} - -// SetUserData sets the UserData field's value. -func (s *CreateInstancesFromSnapshotInput) SetUserData(v string) *CreateInstancesFromSnapshotInput { - s.UserData = &v +// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value. +func (s *CreateRelationalDatabaseFromSnapshotInput) SetUseLatestRestorableTime(v bool) *CreateRelationalDatabaseFromSnapshotInput { + s.UseLatestRestorableTime = &v return s } -type CreateInstancesFromSnapshotOutput struct { +type CreateRelationalDatabaseFromSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateInstancesFromSnapshotOutput) String() string { +func (s CreateRelationalDatabaseFromSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInstancesFromSnapshotOutput) GoString() string { +func (s CreateRelationalDatabaseFromSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateInstancesFromSnapshotOutput) SetOperations(v []*Operation) *CreateInstancesFromSnapshotOutput { +func (s *CreateRelationalDatabaseFromSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseFromSnapshotOutput { s.Operations = v return s } -type CreateInstancesInput struct { +type CreateRelationalDatabaseInput struct { _ struct{} `type:"structure"` - // An array of objects representing the add-ons to enable for the new instance. - AddOns []*AddOnRequest `locationName:"addOns" type:"list"` + // The Availability Zone in which to create your new database. Use the us-east-2a + // case-sensitive format. + // + // You can get a list of Availability Zones by using the get regions operation. + // Be sure to add the include relational database Availability Zones parameter + // to your request. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The Availability Zone in which to create your instance. Use the following - // format: us-east-2a (case sensitive). You can get a list of Availability Zones - // by using the get regions (http://docs.aws.amazon.com/lightsail/2016-11-28/api-reference/API_GetRegions.html) - // operation. Be sure to add the include Availability Zones parameter to your - // request. + // The name of the master database created when the Lightsail database resource + // is created. // - // AvailabilityZone is a required field - AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + // Constraints: + // + // * Must contain from 1 to 64 alphanumeric characters. + // + // * Cannot be a word reserved by the specified database engine + // + // MasterDatabaseName is a required field + MasterDatabaseName *string `locationName:"masterDatabaseName" type:"string" required:"true"` - // The ID for a virtual private server image (e.g., app_wordpress_4_4 or app_lamp_7_0). - // Use the get blueprints operation to return a list of available images (or - // blueprints). + // The password for the master user of your new database. The password can include + // any printable ASCII character except "/", """, or "@". // - // Use active blueprints when creating new instances. Inactive blueprints are - // listed to support customers with existing instances and are not necessarily - // available to create new instances. Blueprints are marked inactive when they - // become outdated due to operating system updates or new application releases. + // Constraints: Must contain 8 to 41 characters. + MasterUserPassword *string `locationName:"masterUserPassword" type:"string" sensitive:"true"` + + // The master user name for your new database. // - // BlueprintId is a required field - BlueprintId *string `locationName:"blueprintId" type:"string" required:"true"` + // Constraints: + // + // * Master user name is required. + // + // * Must contain from 1 to 16 alphanumeric characters. + // + // * The first character must be a letter. + // + // * Cannot be a reserved word for the database engine you choose. For more + // information about reserved words in MySQL 5.6 or 5.7, see the Keywords + // and Reserved Words articles for MySQL 5.6 (https://dev.mysql.com/doc/refman/5.6/en/keywords.html) + // or MySQL 5.7 (https://dev.mysql.com/doc/refman/5.7/en/keywords.html) respectively. + // + // MasterUsername is a required field + MasterUsername *string `locationName:"masterUsername" type:"string" required:"true"` - // The bundle of specification information for your virtual private server (or - // instance), including the pricing plan (e.g., micro_1_0). + // The daily time range during which automated backups are created for your + // new database if automated backups are enabled. // - // BundleId is a required field - BundleId *string `locationName:"bundleId" type:"string" required:"true"` + // The default is a 30-minute window selected at random from an 8-hour block + // of time for each AWS Region. For more information about the preferred backup + // window time blocks for each region, see the Working With Backups (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) + // guide in the Amazon Relational Database Service (Amazon RDS) documentation. + // + // Constraints: + // + // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30 + // + // * Specified in Coordinated Universal Time (UTC). + // + // * Must not conflict with the preferred maintenance window. + // + // * Must be at least 30 minutes. + PreferredBackupWindow *string `locationName:"preferredBackupWindow" type:"string"` - // (Deprecated) The name for your custom image. + // The weekly time range during which system maintenance can occur on your new + // database. // - // In releases prior to June 12, 2017, this parameter was ignored by the API. - // It is now deprecated. + // The default is a 30-minute window selected at random from an 8-hour block + // of time for each AWS Region, occurring on a random day of the week. // - // Deprecated: CustomImageName has been deprecated - CustomImageName *string `locationName:"customImageName" deprecated:"true" type:"string"` + // Constraints: + // + // * Must be in the ddd:hh24:mi-ddd:hh24:mi format. + // + // * Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. + // + // * Must be at least 30 minutes. + // + // * Specified in Coordinated Universal Time (UTC). + // + // * Example: Tue:17:00-Tue:17:30 + PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"` - // The names to use for your new Lightsail instances. Separate multiple values - // using quotation marks and commas, for example: ["MyFirstInstance","MySecondInstance"] + // Specifies the accessibility options for your new database. A value of true + // specifies a database that is available to resources outside of your Lightsail + // account. A value of false specifies a database that is available only to + // your Lightsail resources in the same region as your database. + PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` + + // The blueprint ID for your new database. A blueprint describes the major engine + // version of a database. + // + // You can get a list of database blueprints IDs by using the get relational + // database blueprints operation. + // + // RelationalDatabaseBlueprintId is a required field + RelationalDatabaseBlueprintId *string `locationName:"relationalDatabaseBlueprintId" type:"string" required:"true"` + + // The bundle ID for your new database. A bundle describes the performance specifications + // for your database. + // + // You can get a list of database bundle IDs by using the get relational database + // bundles operation. + // + // RelationalDatabaseBundleId is a required field + RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string" required:"true"` + + // The name to use for your new database. + // + // Constraints: + // + // * Must contain from 2 to 255 alphanumeric characters, or hyphens. + // + // * The first and last character must be a letter or number. // - // InstanceNames is a required field - InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` - - // The name of your key pair. - KeyPairName *string `locationName:"keyPairName" type:"string"` + // RelationalDatabaseName is a required field + RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` - - // A launch script you can create that configures a server with additional user - // data. For example, you might want to run apt-get -y update. - // - // Depending on the machine image you choose, the command to get software on - // your instance varies. Amazon Linux and CentOS use yum, Debian and Ubuntu - // use apt-get, and FreeBSD uses pkg. For a complete list, see the Dev Guide - // (https://lightsail.aws.amazon.com/ls/docs/getting-started/article/compare-options-choose-lightsail-instance-image). - UserData *string `locationName:"userData" type:"string"` } // String returns the string representation -func (s CreateInstancesInput) String() string { +func (s CreateRelationalDatabaseInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInstancesInput) GoString() string { +func (s CreateRelationalDatabaseInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateInstancesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateInstancesInput"} - if s.AvailabilityZone == nil { - invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) +func (s *CreateRelationalDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseInput"} + if s.MasterDatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("MasterDatabaseName")) } - if s.BlueprintId == nil { - invalidParams.Add(request.NewErrParamRequired("BlueprintId")) + if s.MasterUsername == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUsername")) } - if s.BundleId == nil { - invalidParams.Add(request.NewErrParamRequired("BundleId")) + if s.RelationalDatabaseBlueprintId == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseBlueprintId")) } - if s.InstanceNames == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceNames")) + if s.RelationalDatabaseBundleId == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseBundleId")) } - if s.AddOns != nil { - for i, v := range s.AddOns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AddOns", i), err.(request.ErrInvalidParams)) - } - } + if s.RelationalDatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) } if invalidParams.Len() > 0 { @@ -15156,114 +18367,140 @@ func (s *CreateInstancesInput) Validate() error { return nil } -// SetAddOns sets the AddOns field's value. -func (s *CreateInstancesInput) SetAddOns(v []*AddOnRequest) *CreateInstancesInput { - s.AddOns = v +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateRelationalDatabaseInput) SetAvailabilityZone(v string) *CreateRelationalDatabaseInput { + s.AvailabilityZone = &v return s } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateInstancesInput) SetAvailabilityZone(v string) *CreateInstancesInput { - s.AvailabilityZone = &v +// SetMasterDatabaseName sets the MasterDatabaseName field's value. +func (s *CreateRelationalDatabaseInput) SetMasterDatabaseName(v string) *CreateRelationalDatabaseInput { + s.MasterDatabaseName = &v return s } -// SetBlueprintId sets the BlueprintId field's value. -func (s *CreateInstancesInput) SetBlueprintId(v string) *CreateInstancesInput { - s.BlueprintId = &v +// SetMasterUserPassword sets the MasterUserPassword field's value. +func (s *CreateRelationalDatabaseInput) SetMasterUserPassword(v string) *CreateRelationalDatabaseInput { + s.MasterUserPassword = &v return s } -// SetBundleId sets the BundleId field's value. -func (s *CreateInstancesInput) SetBundleId(v string) *CreateInstancesInput { - s.BundleId = &v +// SetMasterUsername sets the MasterUsername field's value. +func (s *CreateRelationalDatabaseInput) SetMasterUsername(v string) *CreateRelationalDatabaseInput { + s.MasterUsername = &v return s } -// SetCustomImageName sets the CustomImageName field's value. -func (s *CreateInstancesInput) SetCustomImageName(v string) *CreateInstancesInput { - s.CustomImageName = &v +// SetPreferredBackupWindow sets the PreferredBackupWindow field's value. +func (s *CreateRelationalDatabaseInput) SetPreferredBackupWindow(v string) *CreateRelationalDatabaseInput { + s.PreferredBackupWindow = &v return s } -// SetInstanceNames sets the InstanceNames field's value. -func (s *CreateInstancesInput) SetInstanceNames(v []*string) *CreateInstancesInput { - s.InstanceNames = v +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *CreateRelationalDatabaseInput) SetPreferredMaintenanceWindow(v string) *CreateRelationalDatabaseInput { + s.PreferredMaintenanceWindow = &v return s } -// SetKeyPairName sets the KeyPairName field's value. -func (s *CreateInstancesInput) SetKeyPairName(v string) *CreateInstancesInput { - s.KeyPairName = &v +// SetPubliclyAccessible sets the PubliclyAccessible field's value. +func (s *CreateRelationalDatabaseInput) SetPubliclyAccessible(v bool) *CreateRelationalDatabaseInput { + s.PubliclyAccessible = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateInstancesInput) SetTags(v []*Tag) *CreateInstancesInput { - s.Tags = v +// SetRelationalDatabaseBlueprintId sets the RelationalDatabaseBlueprintId field's value. +func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseBlueprintId(v string) *CreateRelationalDatabaseInput { + s.RelationalDatabaseBlueprintId = &v return s } -// SetUserData sets the UserData field's value. -func (s *CreateInstancesInput) SetUserData(v string) *CreateInstancesInput { - s.UserData = &v +// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value. +func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseBundleId(v string) *CreateRelationalDatabaseInput { + s.RelationalDatabaseBundleId = &v return s } -type CreateInstancesOutput struct { +// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. +func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseInput { + s.RelationalDatabaseName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateRelationalDatabaseInput) SetTags(v []*Tag) *CreateRelationalDatabaseInput { + s.Tags = v + return s +} + +type CreateRelationalDatabaseOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateInstancesOutput) String() string { +func (s CreateRelationalDatabaseOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateInstancesOutput) GoString() string { +func (s CreateRelationalDatabaseOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateInstancesOutput) SetOperations(v []*Operation) *CreateInstancesOutput { +func (s *CreateRelationalDatabaseOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseOutput { s.Operations = v return s } -type CreateKeyPairInput struct { +type CreateRelationalDatabaseSnapshotInput struct { _ struct{} `type:"structure"` - // The name for your new key pair. + // The name of the database on which to base your new snapshot. // - // KeyPairName is a required field - KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"` + // RelationalDatabaseName is a required field + RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` + + // The name for your new database snapshot. + // + // Constraints: + // + // * Must contain from 2 to 255 alphanumeric characters, or hyphens. + // + // * The first and last character must be a letter or number. + // + // RelationalDatabaseSnapshotName is a required field + RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"` // The tag keys and optional values to add to the resource during create. // - // To tag a resource after it has been created, see the tag resource operation. + // Use the TagResource action to tag a resource after it's created. Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CreateKeyPairInput) String() string { +func (s CreateRelationalDatabaseSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateKeyPairInput) GoString() string { +func (s CreateRelationalDatabaseSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateKeyPairInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateKeyPairInput"} - if s.KeyPairName == nil { - invalidParams.Add(request.NewErrParamRequired("KeyPairName")) +func (s *CreateRelationalDatabaseSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseSnapshotInput"} + if s.RelationalDatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) + } + if s.RelationalDatabaseSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName")) } if invalidParams.Len() > 0 { @@ -15272,133 +18509,147 @@ func (s *CreateKeyPairInput) Validate() error { return nil } -// SetKeyPairName sets the KeyPairName field's value. -func (s *CreateKeyPairInput) SetKeyPairName(v string) *CreateKeyPairInput { - s.KeyPairName = &v +// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. +func (s *CreateRelationalDatabaseSnapshotInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseSnapshotInput { + s.RelationalDatabaseName = &v + return s +} + +// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value. +func (s *CreateRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *CreateRelationalDatabaseSnapshotInput { + s.RelationalDatabaseSnapshotName = &v return s } // SetTags sets the Tags field's value. -func (s *CreateKeyPairInput) SetTags(v []*Tag) *CreateKeyPairInput { +func (s *CreateRelationalDatabaseSnapshotInput) SetTags(v []*Tag) *CreateRelationalDatabaseSnapshotInput { s.Tags = v return s } -type CreateKeyPairOutput struct { +type CreateRelationalDatabaseSnapshotOutput struct { _ struct{} `type:"structure"` - // An array of key-value pairs containing information about the new key pair - // you just created. - KeyPair *KeyPair `locationName:"keyPair" type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operation *Operation `locationName:"operation" type:"structure"` - - // A base64-encoded RSA private key. - PrivateKeyBase64 *string `locationName:"privateKeyBase64" type:"string"` - - // A base64-encoded public key of the ssh-rsa type. - PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string"` + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateKeyPairOutput) String() string { +func (s CreateRelationalDatabaseSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateKeyPairOutput) GoString() string { +func (s CreateRelationalDatabaseSnapshotOutput) GoString() string { return s.String() } -// SetKeyPair sets the KeyPair field's value. -func (s *CreateKeyPairOutput) SetKeyPair(v *KeyPair) *CreateKeyPairOutput { - s.KeyPair = v +// SetOperations sets the Operations field's value. +func (s *CreateRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseSnapshotOutput { + s.Operations = v return s } -// SetOperation sets the Operation field's value. -func (s *CreateKeyPairOutput) SetOperation(v *Operation) *CreateKeyPairOutput { - s.Operation = v - return s +type DeleteAlarmInput struct { + _ struct{} `type:"structure"` + + // The name of the alarm to delete. + // + // AlarmName is a required field + AlarmName *string `locationName:"alarmName" type:"string" required:"true"` } -// SetPrivateKeyBase64 sets the PrivateKeyBase64 field's value. -func (s *CreateKeyPairOutput) SetPrivateKeyBase64(v string) *CreateKeyPairOutput { - s.PrivateKeyBase64 = &v - return s +// String returns the string representation +func (s DeleteAlarmInput) String() string { + return awsutil.Prettify(s) } -// SetPublicKeyBase64 sets the PublicKeyBase64 field's value. -func (s *CreateKeyPairOutput) SetPublicKeyBase64(v string) *CreateKeyPairOutput { - s.PublicKeyBase64 = &v +// GoString returns the string representation +func (s DeleteAlarmInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAlarmInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAlarmInput"} + if s.AlarmName == nil { + invalidParams.Add(request.NewErrParamRequired("AlarmName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlarmName sets the AlarmName field's value. +func (s *DeleteAlarmInput) SetAlarmName(v string) *DeleteAlarmInput { + s.AlarmName = &v return s } -type CreateLoadBalancerInput struct { +type DeleteAlarmOutput struct { _ struct{} `type:"structure"` - // The optional alternative domains and subdomains to use with your SSL/TLS - // certificate (e.g., www.example.com, example.com, m.example.com, blog.example.com). - CertificateAlternativeNames []*string `locationName:"certificateAlternativeNames" type:"list"` + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} - // The domain name with which your certificate is associated (e.g., example.com). - // - // If you specify certificateDomainName, then certificateName is required (and - // vice-versa). - CertificateDomainName *string `locationName:"certificateDomainName" type:"string"` +// String returns the string representation +func (s DeleteAlarmOutput) String() string { + return awsutil.Prettify(s) +} - // The name of the SSL/TLS certificate. - // - // If you specify certificateName, then certificateDomainName is required (and - // vice-versa). - CertificateName *string `locationName:"certificateName" type:"string"` +// GoString returns the string representation +func (s DeleteAlarmOutput) GoString() string { + return s.String() +} - // The path you provided to perform the load balancer health check. If you didn't - // specify a health check path, Lightsail uses the root path of your website - // (e.g., "/"). - // - // You may want to specify a custom health check path other than the root of - // your application if your home page loads slowly or has a lot of media or - // scripting on it. - HealthCheckPath *string `locationName:"healthCheckPath" type:"string"` +// SetOperations sets the Operations field's value. +func (s *DeleteAlarmOutput) SetOperations(v []*Operation) *DeleteAlarmOutput { + s.Operations = v + return s +} - // The instance port where you're creating your load balancer. - // - // InstancePort is a required field - InstancePort *int64 `locationName:"instancePort" type:"integer" required:"true"` +type DeleteAutoSnapshotInput struct { + _ struct{} `type:"structure"` - // The name of your load balancer. + // The date of the automatic snapshot to delete in YYYY-MM-DD format. Use the + // get auto snapshots operation to get the available automatic snapshots for + // a resource. // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` + // Date is a required field + Date *string `locationName:"date" type:"string" required:"true"` - // The tag keys and optional values to add to the resource during create. + // The name of the source instance or disk from which to delete the automatic + // snapshot. // - // To tag a resource after it has been created, see the tag resource operation. - Tags []*Tag `locationName:"tags" type:"list"` + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` } // String returns the string representation -func (s CreateLoadBalancerInput) String() string { +func (s DeleteAutoSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateLoadBalancerInput) GoString() string { +func (s DeleteAutoSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateLoadBalancerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerInput"} - if s.InstancePort == nil { - invalidParams.Add(request.NewErrParamRequired("InstancePort")) +func (s *DeleteAutoSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAutoSnapshotInput"} + if s.Date == nil { + invalidParams.Add(request.NewErrParamRequired("Date")) } - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) } if invalidParams.Len() > 0 { @@ -15407,130 +18658,71 @@ func (s *CreateLoadBalancerInput) Validate() error { return nil } -// SetCertificateAlternativeNames sets the CertificateAlternativeNames field's value. -func (s *CreateLoadBalancerInput) SetCertificateAlternativeNames(v []*string) *CreateLoadBalancerInput { - s.CertificateAlternativeNames = v - return s -} - -// SetCertificateDomainName sets the CertificateDomainName field's value. -func (s *CreateLoadBalancerInput) SetCertificateDomainName(v string) *CreateLoadBalancerInput { - s.CertificateDomainName = &v - return s -} - -// SetCertificateName sets the CertificateName field's value. -func (s *CreateLoadBalancerInput) SetCertificateName(v string) *CreateLoadBalancerInput { - s.CertificateName = &v - return s -} - -// SetHealthCheckPath sets the HealthCheckPath field's value. -func (s *CreateLoadBalancerInput) SetHealthCheckPath(v string) *CreateLoadBalancerInput { - s.HealthCheckPath = &v - return s -} - -// SetInstancePort sets the InstancePort field's value. -func (s *CreateLoadBalancerInput) SetInstancePort(v int64) *CreateLoadBalancerInput { - s.InstancePort = &v - return s -} - -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *CreateLoadBalancerInput) SetLoadBalancerName(v string) *CreateLoadBalancerInput { - s.LoadBalancerName = &v +// SetDate sets the Date field's value. +func (s *DeleteAutoSnapshotInput) SetDate(v string) *DeleteAutoSnapshotInput { + s.Date = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateLoadBalancerInput) SetTags(v []*Tag) *CreateLoadBalancerInput { - s.Tags = v +// SetResourceName sets the ResourceName field's value. +func (s *DeleteAutoSnapshotInput) SetResourceName(v string) *DeleteAutoSnapshotInput { + s.ResourceName = &v return s } -type CreateLoadBalancerOutput struct { +type DeleteAutoSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateLoadBalancerOutput) String() string { +func (s DeleteAutoSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateLoadBalancerOutput) GoString() string { +func (s DeleteAutoSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateLoadBalancerOutput) SetOperations(v []*Operation) *CreateLoadBalancerOutput { +func (s *DeleteAutoSnapshotOutput) SetOperations(v []*Operation) *DeleteAutoSnapshotOutput { s.Operations = v return s } -type CreateLoadBalancerTlsCertificateInput struct { +type DeleteCertificateInput struct { _ struct{} `type:"structure"` - // An array of strings listing alternative domains and subdomains for your SSL/TLS - // certificate. Lightsail will de-dupe the names for you. You can have a maximum - // of 9 alternative names (in addition to the 1 primary domain). We do not support - // wildcards (e.g., *.example.com). - CertificateAlternativeNames []*string `locationName:"certificateAlternativeNames" type:"list"` - - // The domain name (e.g., example.com) for your SSL/TLS certificate. - // - // CertificateDomainName is a required field - CertificateDomainName *string `locationName:"certificateDomainName" type:"string" required:"true"` - - // The SSL/TLS certificate name. + // The name of the certificate to delete. // - // You can have up to 10 certificates in your account at one time. Each Lightsail - // load balancer can have up to 2 certificates associated with it at one time. - // There is also an overall limit to the number of certificates that can be - // issue in a 365-day period. For more information, see Limits (http://docs.aws.amazon.com/acm/latest/userguide/acm-limits.html). + // Use the GetCertificates action to get a list of certificate names that you + // can specify. // // CertificateName is a required field CertificateName *string `locationName:"certificateName" type:"string" required:"true"` - - // The load balancer name where you want to create the SSL/TLS certificate. - // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` - - // The tag keys and optional values to add to the resource during create. - // - // To tag a resource after it has been created, see the tag resource operation. - Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s CreateLoadBalancerTlsCertificateInput) String() string { +func (s DeleteCertificateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateLoadBalancerTlsCertificateInput) GoString() string { +func (s DeleteCertificateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateLoadBalancerTlsCertificateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateLoadBalancerTlsCertificateInput"} - if s.CertificateDomainName == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateDomainName")) - } +func (s *DeleteCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCertificateInput"} if s.CertificateName == nil { invalidParams.Add(request.NewErrParamRequired("CertificateName")) } - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) - } if invalidParams.Len() > 0 { return invalidParams @@ -15538,149 +18730,131 @@ func (s *CreateLoadBalancerTlsCertificateInput) Validate() error { return nil } -// SetCertificateAlternativeNames sets the CertificateAlternativeNames field's value. -func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateAlternativeNames(v []*string) *CreateLoadBalancerTlsCertificateInput { - s.CertificateAlternativeNames = v - return s -} - -// SetCertificateDomainName sets the CertificateDomainName field's value. -func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateDomainName(v string) *CreateLoadBalancerTlsCertificateInput { - s.CertificateDomainName = &v - return s -} - // SetCertificateName sets the CertificateName field's value. -func (s *CreateLoadBalancerTlsCertificateInput) SetCertificateName(v string) *CreateLoadBalancerTlsCertificateInput { +func (s *DeleteCertificateInput) SetCertificateName(v string) *DeleteCertificateInput { s.CertificateName = &v return s } -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *CreateLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *CreateLoadBalancerTlsCertificateInput { - s.LoadBalancerName = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateLoadBalancerTlsCertificateInput) SetTags(v []*Tag) *CreateLoadBalancerTlsCertificateInput { - s.Tags = v - return s -} - -type CreateLoadBalancerTlsCertificateOutput struct { +type DeleteCertificateOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateLoadBalancerTlsCertificateOutput) String() string { +func (s DeleteCertificateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateLoadBalancerTlsCertificateOutput) GoString() string { +func (s DeleteCertificateOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *CreateLoadBalancerTlsCertificateOutput { +func (s *DeleteCertificateOutput) SetOperations(v []*Operation) *DeleteCertificateOutput { s.Operations = v return s } -type CreateRelationalDatabaseFromSnapshotInput struct { +type DeleteContactMethodInput struct { _ struct{} `type:"structure"` - // The Availability Zone in which to create your new database. Use the us-east-2a - // case-sensitive format. + // The protocol that will be deleted, such as Email or SMS (text messaging). // - // You can get a list of Availability Zones by using the get regions operation. - // Be sure to add the include relational database Availability Zones parameter - // to your request. - AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // To delete an Email and an SMS contact method if you added both, you must + // run separate DeleteContactMethod actions to delete each protocol. + // + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactProtocol"` +} - // Specifies the accessibility options for your new database. A value of true - // specifies a database that is available to resources outside of your Lightsail - // account. A value of false specifies a database that is available only to - // your Lightsail resources in the same region as your database. - PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` +// String returns the string representation +func (s DeleteContactMethodInput) String() string { + return awsutil.Prettify(s) +} - // The bundle ID for your new database. A bundle describes the performance specifications - // for your database. - // - // You can get a list of database bundle IDs by using the get relational database - // bundles operation. - // - // When creating a new database from a snapshot, you cannot choose a bundle - // that is smaller than the bundle of the source database. - RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string"` +// GoString returns the string representation +func (s DeleteContactMethodInput) GoString() string { + return s.String() +} - // The name to use for your new database. - // - // Constraints: - // - // * Must contain from 2 to 255 alphanumeric characters, or hyphens. - // - // * The first and last character must be a letter or number. - // - // RelationalDatabaseName is a required field - RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteContactMethodInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteContactMethodInput"} + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } - // The name of the database snapshot from which to create your new database. - RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetProtocol sets the Protocol field's value. +func (s *DeleteContactMethodInput) SetProtocol(v string) *DeleteContactMethodInput { + s.Protocol = &v + return s +} + +type DeleteContactMethodOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s DeleteContactMethodOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteContactMethodOutput) GoString() string { + return s.String() +} - // The date and time to restore your database from. - // - // Constraints: - // - // * Must be before the latest restorable time for the database. - // - // * Cannot be specified if the use latest restorable time parameter is true. - // - // * Specified in Coordinated Universal Time (UTC). - // - // * Specified in the Unix time format. For example, if you wish to use a - // restore time of October 1, 2018, at 8 PM UTC, then you input 1538424000 - // as the restore time. - RestoreTime *time.Time `locationName:"restoreTime" type:"timestamp"` +// SetOperations sets the Operations field's value. +func (s *DeleteContactMethodOutput) SetOperations(v []*Operation) *DeleteContactMethodOutput { + s.Operations = v + return s +} - // The name of the source database. - SourceRelationalDatabaseName *string `locationName:"sourceRelationalDatabaseName" type:"string"` +type DeleteDiskInput struct { + _ struct{} `type:"structure"` - // The tag keys and optional values to add to the resource during create. + // The unique name of the disk you want to delete (e.g., my-disk). // - // To tag a resource after it has been created, see the tag resource operation. - Tags []*Tag `locationName:"tags" type:"list"` + // DiskName is a required field + DiskName *string `locationName:"diskName" type:"string" required:"true"` - // Specifies whether your database is restored from the latest backup time. - // A value of true restores from the latest backup time. - // - // Default: false - // - // Constraints: Cannot be specified if the restore time parameter is provided. - UseLatestRestorableTime *bool `locationName:"useLatestRestorableTime" type:"boolean"` + // A Boolean value to indicate whether to delete the enabled add-ons for the + // disk. + ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"` } // String returns the string representation -func (s CreateRelationalDatabaseFromSnapshotInput) String() string { +func (s DeleteDiskInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseFromSnapshotInput) GoString() string { +func (s DeleteDiskInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateRelationalDatabaseFromSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseFromSnapshotInput"} - if s.RelationalDatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) +func (s *DeleteDiskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDiskInput"} + if s.DiskName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskName")) } if invalidParams.Len() > 0 { @@ -15689,238 +18863,189 @@ func (s *CreateRelationalDatabaseFromSnapshotInput) Validate() error { return nil } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetAvailabilityZone(v string) *CreateRelationalDatabaseFromSnapshotInput { - s.AvailabilityZone = &v +// SetDiskName sets the DiskName field's value. +func (s *DeleteDiskInput) SetDiskName(v string) *DeleteDiskInput { + s.DiskName = &v return s } -// SetPubliclyAccessible sets the PubliclyAccessible field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetPubliclyAccessible(v bool) *CreateRelationalDatabaseFromSnapshotInput { - s.PubliclyAccessible = &v +// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value. +func (s *DeleteDiskInput) SetForceDeleteAddOns(v bool) *DeleteDiskInput { + s.ForceDeleteAddOns = &v return s } -// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseBundleId(v string) *CreateRelationalDatabaseFromSnapshotInput { - s.RelationalDatabaseBundleId = &v - return s +type DeleteDiskOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } -// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseFromSnapshotInput { - s.RelationalDatabaseName = &v - return s +// String returns the string representation +func (s DeleteDiskOutput) String() string { + return awsutil.Prettify(s) } -// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *CreateRelationalDatabaseFromSnapshotInput { - s.RelationalDatabaseSnapshotName = &v - return s +// GoString returns the string representation +func (s DeleteDiskOutput) GoString() string { + return s.String() } -// SetRestoreTime sets the RestoreTime field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetRestoreTime(v time.Time) *CreateRelationalDatabaseFromSnapshotInput { - s.RestoreTime = &v +// SetOperations sets the Operations field's value. +func (s *DeleteDiskOutput) SetOperations(v []*Operation) *DeleteDiskOutput { + s.Operations = v return s } -// SetSourceRelationalDatabaseName sets the SourceRelationalDatabaseName field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetSourceRelationalDatabaseName(v string) *CreateRelationalDatabaseFromSnapshotInput { - s.SourceRelationalDatabaseName = &v - return s +type DeleteDiskSnapshotInput struct { + _ struct{} `type:"structure"` + + // The name of the disk snapshot you want to delete (e.g., my-disk-snapshot). + // + // DiskSnapshotName is a required field + DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` } -// SetTags sets the Tags field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetTags(v []*Tag) *CreateRelationalDatabaseFromSnapshotInput { - s.Tags = v - return s +// String returns the string representation +func (s DeleteDiskSnapshotInput) String() string { + return awsutil.Prettify(s) } -// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value. -func (s *CreateRelationalDatabaseFromSnapshotInput) SetUseLatestRestorableTime(v bool) *CreateRelationalDatabaseFromSnapshotInput { - s.UseLatestRestorableTime = &v +// GoString returns the string representation +func (s DeleteDiskSnapshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDiskSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDiskSnapshotInput"} + if s.DiskSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDiskSnapshotName sets the DiskSnapshotName field's value. +func (s *DeleteDiskSnapshotInput) SetDiskSnapshotName(v string) *DeleteDiskSnapshotInput { + s.DiskSnapshotName = &v return s } -type CreateRelationalDatabaseFromSnapshotOutput struct { +type DeleteDiskSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateRelationalDatabaseFromSnapshotOutput) String() string { +func (s DeleteDiskSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseFromSnapshotOutput) GoString() string { +func (s DeleteDiskSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateRelationalDatabaseFromSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseFromSnapshotOutput { +func (s *DeleteDiskSnapshotOutput) SetOperations(v []*Operation) *DeleteDiskSnapshotOutput { s.Operations = v return s } -type CreateRelationalDatabaseInput struct { +type DeleteDistributionInput struct { _ struct{} `type:"structure"` - // The Availability Zone in which to create your new database. Use the us-east-2a - // case-sensitive format. + // The name of the distribution to delete. // - // You can get a list of Availability Zones by using the get regions operation. - // Be sure to add the include relational database Availability Zones parameter - // to your request. - AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + DistributionName *string `locationName:"distributionName" type:"string"` +} - // The name of the master database created when the Lightsail database resource - // is created. - // - // Constraints: - // - // * Must contain from 1 to 64 alphanumeric characters. - // - // * Cannot be a word reserved by the specified database engine - // - // MasterDatabaseName is a required field - MasterDatabaseName *string `locationName:"masterDatabaseName" type:"string" required:"true"` +// String returns the string representation +func (s DeleteDistributionInput) String() string { + return awsutil.Prettify(s) +} - // The password for the master user of your new database. The password can include - // any printable ASCII character except "/", """, or "@". - // - // Constraints: Must contain 8 to 41 characters. - MasterUserPassword *string `locationName:"masterUserPassword" type:"string" sensitive:"true"` +// GoString returns the string representation +func (s DeleteDistributionInput) GoString() string { + return s.String() +} - // The master user name for your new database. - // - // Constraints: - // - // * Master user name is required. - // - // * Must contain from 1 to 16 alphanumeric characters. - // - // * The first character must be a letter. - // - // * Cannot be a reserved word for the database engine you choose. For more - // information about reserved words in MySQL 5.6 or 5.7, see the Keywords - // and Reserved Words articles for MySQL 5.6 (https://dev.mysql.com/doc/refman/5.6/en/keywords.html) - // or MySQL 5.7 (https://dev.mysql.com/doc/refman/5.7/en/keywords.html) respectively. - // - // MasterUsername is a required field - MasterUsername *string `locationName:"masterUsername" type:"string" required:"true"` +// SetDistributionName sets the DistributionName field's value. +func (s *DeleteDistributionInput) SetDistributionName(v string) *DeleteDistributionInput { + s.DistributionName = &v + return s +} - // The daily time range during which automated backups are created for your - // new database if automated backups are enabled. - // - // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region. For more information about the preferred backup - // window time blocks for each region, see the Working With Backups (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithAutomatedBackups.html#USER_WorkingWithAutomatedBackups.BackupWindow) - // guide in the Amazon Relational Database Service (Amazon RDS) documentation. - // - // Constraints: - // - // * Must be in the hh24:mi-hh24:mi format. Example: 16:00-16:30 - // - // * Specified in Coordinated Universal Time (UTC). - // - // * Must not conflict with the preferred maintenance window. - // - // * Must be at least 30 minutes. - PreferredBackupWindow *string `locationName:"preferredBackupWindow" type:"string"` +type DeleteDistributionOutput struct { + _ struct{} `type:"structure"` - // The weekly time range during which system maintenance can occur on your new - // database. - // - // The default is a 30-minute window selected at random from an 8-hour block - // of time for each AWS Region, occurring on a random day of the week. - // - // Constraints: - // - // * Must be in the ddd:hh24:mi-ddd:hh24:mi format. - // - // * Valid days: Mon, Tue, Wed, Thu, Fri, Sat, Sun. - // - // * Must be at least 30 minutes. - // - // * Specified in Coordinated Universal Time (UTC). - // - // * Example: Tue:17:00-Tue:17:30 - PreferredMaintenanceWindow *string `locationName:"preferredMaintenanceWindow" type:"string"` + // An object that describes the result of the action, such as the status of + // the request, the timestamp of the request, and the resources affected by + // the request. + Operation *Operation `locationName:"operation" type:"structure"` +} - // Specifies the accessibility options for your new database. A value of true - // specifies a database that is available to resources outside of your Lightsail - // account. A value of false specifies a database that is available only to - // your Lightsail resources in the same region as your database. - PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` +// String returns the string representation +func (s DeleteDistributionOutput) String() string { + return awsutil.Prettify(s) +} - // The blueprint ID for your new database. A blueprint describes the major engine - // version of a database. - // - // You can get a list of database blueprints IDs by using the get relational - // database blueprints operation. - // - // RelationalDatabaseBlueprintId is a required field - RelationalDatabaseBlueprintId *string `locationName:"relationalDatabaseBlueprintId" type:"string" required:"true"` +// GoString returns the string representation +func (s DeleteDistributionOutput) GoString() string { + return s.String() +} + +// SetOperation sets the Operation field's value. +func (s *DeleteDistributionOutput) SetOperation(v *Operation) *DeleteDistributionOutput { + s.Operation = v + return s +} - // The bundle ID for your new database. A bundle describes the performance specifications - // for your database. - // - // You can get a list of database bundle IDs by using the get relational database - // bundles operation. - // - // RelationalDatabaseBundleId is a required field - RelationalDatabaseBundleId *string `locationName:"relationalDatabaseBundleId" type:"string" required:"true"` +type DeleteDomainEntryInput struct { + _ struct{} `type:"structure"` - // The name to use for your new database. - // - // Constraints: - // - // * Must contain from 2 to 255 alphanumeric characters, or hyphens. - // - // * The first and last character must be a letter or number. + // An array of key-value pairs containing information about your domain entries. // - // RelationalDatabaseName is a required field - RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` + // DomainEntry is a required field + DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"` - // The tag keys and optional values to add to the resource during create. + // The name of the domain entry to delete. // - // To tag a resource after it has been created, see the tag resource operation. - Tags []*Tag `locationName:"tags" type:"list"` + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` } // String returns the string representation -func (s CreateRelationalDatabaseInput) String() string { +func (s DeleteDomainEntryInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseInput) GoString() string { +func (s DeleteDomainEntryInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateRelationalDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseInput"} - if s.MasterDatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("MasterDatabaseName")) - } - if s.MasterUsername == nil { - invalidParams.Add(request.NewErrParamRequired("MasterUsername")) - } - if s.RelationalDatabaseBlueprintId == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseBlueprintId")) - } - if s.RelationalDatabaseBundleId == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseBundleId")) +func (s *DeleteDomainEntryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainEntryInput"} + if s.DomainEntry == nil { + invalidParams.Add(request.NewErrParamRequired("DomainEntry")) } - if s.RelationalDatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) } if invalidParams.Len() > 0 { @@ -15929,140 +19054,134 @@ func (s *CreateRelationalDatabaseInput) Validate() error { return nil } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateRelationalDatabaseInput) SetAvailabilityZone(v string) *CreateRelationalDatabaseInput { - s.AvailabilityZone = &v +// SetDomainEntry sets the DomainEntry field's value. +func (s *DeleteDomainEntryInput) SetDomainEntry(v *DomainEntry) *DeleteDomainEntryInput { + s.DomainEntry = v return s } -// SetMasterDatabaseName sets the MasterDatabaseName field's value. -func (s *CreateRelationalDatabaseInput) SetMasterDatabaseName(v string) *CreateRelationalDatabaseInput { - s.MasterDatabaseName = &v +// SetDomainName sets the DomainName field's value. +func (s *DeleteDomainEntryInput) SetDomainName(v string) *DeleteDomainEntryInput { + s.DomainName = &v return s } -// SetMasterUserPassword sets the MasterUserPassword field's value. -func (s *CreateRelationalDatabaseInput) SetMasterUserPassword(v string) *CreateRelationalDatabaseInput { - s.MasterUserPassword = &v - return s +type DeleteDomainEntryOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operation *Operation `locationName:"operation" type:"structure"` } -// SetMasterUsername sets the MasterUsername field's value. -func (s *CreateRelationalDatabaseInput) SetMasterUsername(v string) *CreateRelationalDatabaseInput { - s.MasterUsername = &v - return s +// String returns the string representation +func (s DeleteDomainEntryOutput) String() string { + return awsutil.Prettify(s) } -// SetPreferredBackupWindow sets the PreferredBackupWindow field's value. -func (s *CreateRelationalDatabaseInput) SetPreferredBackupWindow(v string) *CreateRelationalDatabaseInput { - s.PreferredBackupWindow = &v - return s +// GoString returns the string representation +func (s DeleteDomainEntryOutput) GoString() string { + return s.String() } -// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. -func (s *CreateRelationalDatabaseInput) SetPreferredMaintenanceWindow(v string) *CreateRelationalDatabaseInput { - s.PreferredMaintenanceWindow = &v +// SetOperation sets the Operation field's value. +func (s *DeleteDomainEntryOutput) SetOperation(v *Operation) *DeleteDomainEntryOutput { + s.Operation = v return s } -// SetPubliclyAccessible sets the PubliclyAccessible field's value. -func (s *CreateRelationalDatabaseInput) SetPubliclyAccessible(v bool) *CreateRelationalDatabaseInput { - s.PubliclyAccessible = &v - return s +type DeleteDomainInput struct { + _ struct{} `type:"structure"` + + // The specific domain name to delete. + // + // DomainName is a required field + DomainName *string `locationName:"domainName" type:"string" required:"true"` } -// SetRelationalDatabaseBlueprintId sets the RelationalDatabaseBlueprintId field's value. -func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseBlueprintId(v string) *CreateRelationalDatabaseInput { - s.RelationalDatabaseBlueprintId = &v - return s +// String returns the string representation +func (s DeleteDomainInput) String() string { + return awsutil.Prettify(s) } -// SetRelationalDatabaseBundleId sets the RelationalDatabaseBundleId field's value. -func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseBundleId(v string) *CreateRelationalDatabaseInput { - s.RelationalDatabaseBundleId = &v - return s +// GoString returns the string representation +func (s DeleteDomainInput) GoString() string { + return s.String() } -// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. -func (s *CreateRelationalDatabaseInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseInput { - s.RelationalDatabaseName = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTags sets the Tags field's value. -func (s *CreateRelationalDatabaseInput) SetTags(v []*Tag) *CreateRelationalDatabaseInput { - s.Tags = v +// SetDomainName sets the DomainName field's value. +func (s *DeleteDomainInput) SetDomainName(v string) *DeleteDomainInput { + s.DomainName = &v return s } -type CreateRelationalDatabaseOutput struct { +type DeleteDomainOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s CreateRelationalDatabaseOutput) String() string { +func (s DeleteDomainOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseOutput) GoString() string { +func (s DeleteDomainOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *CreateRelationalDatabaseOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseOutput { - s.Operations = v +// SetOperation sets the Operation field's value. +func (s *DeleteDomainOutput) SetOperation(v *Operation) *DeleteDomainOutput { + s.Operation = v return s } -type CreateRelationalDatabaseSnapshotInput struct { +type DeleteInstanceInput struct { _ struct{} `type:"structure"` - // The name of the database on which to base your new snapshot. - // - // RelationalDatabaseName is a required field - RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` - - // The name for your new database snapshot. - // - // Constraints: - // - // * Must contain from 2 to 255 alphanumeric characters, or hyphens. - // - // * The first and last character must be a letter or number. - // - // RelationalDatabaseSnapshotName is a required field - RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"` + // A Boolean value to indicate whether to delete the enabled add-ons for the + // disk. + ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"` - // The tag keys and optional values to add to the resource during create. + // The name of the instance to delete. // - // To tag a resource after it has been created, see the tag resource operation. - Tags []*Tag `locationName:"tags" type:"list"` + // InstanceName is a required field + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` } // String returns the string representation -func (s CreateRelationalDatabaseSnapshotInput) String() string { +func (s DeleteInstanceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseSnapshotInput) GoString() string { +func (s DeleteInstanceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateRelationalDatabaseSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateRelationalDatabaseSnapshotInput"} - if s.RelationalDatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) - } - if s.RelationalDatabaseSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName")) +func (s *DeleteInstanceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) } if invalidParams.Len() > 0 { @@ -16071,73 +19190,67 @@ func (s *CreateRelationalDatabaseSnapshotInput) Validate() error { return nil } -// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. -func (s *CreateRelationalDatabaseSnapshotInput) SetRelationalDatabaseName(v string) *CreateRelationalDatabaseSnapshotInput { - s.RelationalDatabaseName = &v - return s -} - -// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value. -func (s *CreateRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *CreateRelationalDatabaseSnapshotInput { - s.RelationalDatabaseSnapshotName = &v +// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value. +func (s *DeleteInstanceInput) SetForceDeleteAddOns(v bool) *DeleteInstanceInput { + s.ForceDeleteAddOns = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateRelationalDatabaseSnapshotInput) SetTags(v []*Tag) *CreateRelationalDatabaseSnapshotInput { - s.Tags = v +// SetInstanceName sets the InstanceName field's value. +func (s *DeleteInstanceInput) SetInstanceName(v string) *DeleteInstanceInput { + s.InstanceName = &v return s } -type CreateRelationalDatabaseSnapshotOutput struct { +type DeleteInstanceOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s CreateRelationalDatabaseSnapshotOutput) String() string { +func (s DeleteInstanceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateRelationalDatabaseSnapshotOutput) GoString() string { +func (s DeleteInstanceOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *CreateRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *CreateRelationalDatabaseSnapshotOutput { +func (s *DeleteInstanceOutput) SetOperations(v []*Operation) *DeleteInstanceOutput { s.Operations = v return s } -type DeleteAlarmInput struct { +type DeleteInstanceSnapshotInput struct { _ struct{} `type:"structure"` - // The name of the alarm to delete. + // The name of the snapshot to delete. // - // AlarmName is a required field - AlarmName *string `locationName:"alarmName" type:"string" required:"true"` + // InstanceSnapshotName is a required field + InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteAlarmInput) String() string { +func (s DeleteInstanceSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAlarmInput) GoString() string { +func (s DeleteInstanceSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAlarmInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAlarmInput"} - if s.AlarmName == nil { - invalidParams.Add(request.NewErrParamRequired("AlarmName")) +func (s *DeleteInstanceSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceSnapshotInput"} + if s.InstanceSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName")) } if invalidParams.Len() > 0 { @@ -16146,72 +19259,61 @@ func (s *DeleteAlarmInput) Validate() error { return nil } -// SetAlarmName sets the AlarmName field's value. -func (s *DeleteAlarmInput) SetAlarmName(v string) *DeleteAlarmInput { - s.AlarmName = &v +// SetInstanceSnapshotName sets the InstanceSnapshotName field's value. +func (s *DeleteInstanceSnapshotInput) SetInstanceSnapshotName(v string) *DeleteInstanceSnapshotInput { + s.InstanceSnapshotName = &v return s } -type DeleteAlarmOutput struct { +type DeleteInstanceSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteAlarmOutput) String() string { +func (s DeleteInstanceSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAlarmOutput) GoString() string { +func (s DeleteInstanceSnapshotOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteAlarmOutput) SetOperations(v []*Operation) *DeleteAlarmOutput { +func (s *DeleteInstanceSnapshotOutput) SetOperations(v []*Operation) *DeleteInstanceSnapshotOutput { s.Operations = v return s } -type DeleteAutoSnapshotInput struct { +type DeleteKeyPairInput struct { _ struct{} `type:"structure"` - // The date of the automatic snapshot to delete in YYYY-MM-DD format. Use the - // get auto snapshots operation to get the available automatic snapshots for - // a resource. - // - // Date is a required field - Date *string `locationName:"date" type:"string" required:"true"` - - // The name of the source instance or disk from which to delete the automatic - // snapshot. + // The name of the key pair to delete. // - // ResourceName is a required field - ResourceName *string `locationName:"resourceName" type:"string" required:"true"` + // KeyPairName is a required field + KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteAutoSnapshotInput) String() string { +func (s DeleteKeyPairInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAutoSnapshotInput) GoString() string { +func (s DeleteKeyPairInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAutoSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAutoSnapshotInput"} - if s.Date == nil { - invalidParams.Add(request.NewErrParamRequired("Date")) - } - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) +func (s *DeleteKeyPairInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"} + if s.KeyPairName == nil { + invalidParams.Add(request.NewErrParamRequired("KeyPairName")) } if invalidParams.Len() > 0 { @@ -16220,70 +19322,61 @@ func (s *DeleteAutoSnapshotInput) Validate() error { return nil } -// SetDate sets the Date field's value. -func (s *DeleteAutoSnapshotInput) SetDate(v string) *DeleteAutoSnapshotInput { - s.Date = &v - return s -} - -// SetResourceName sets the ResourceName field's value. -func (s *DeleteAutoSnapshotInput) SetResourceName(v string) *DeleteAutoSnapshotInput { - s.ResourceName = &v +// SetKeyPairName sets the KeyPairName field's value. +func (s *DeleteKeyPairInput) SetKeyPairName(v string) *DeleteKeyPairInput { + s.KeyPairName = &v return s } -type DeleteAutoSnapshotOutput struct { +type DeleteKeyPairOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s DeleteAutoSnapshotOutput) String() string { +func (s DeleteKeyPairOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAutoSnapshotOutput) GoString() string { +func (s DeleteKeyPairOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *DeleteAutoSnapshotOutput) SetOperations(v []*Operation) *DeleteAutoSnapshotOutput { - s.Operations = v +// SetOperation sets the Operation field's value. +func (s *DeleteKeyPairOutput) SetOperation(v *Operation) *DeleteKeyPairOutput { + s.Operation = v return s } -type DeleteContactMethodInput struct { +type DeleteKnownHostKeysInput struct { _ struct{} `type:"structure"` - // The protocol that will be deleted, such as Email or SMS (text messaging). - // - // To delete an Email and an SMS contact method if you added both, you must - // run separate DeleteContactMethod actions to delete each protocol. + // The name of the instance for which you want to reset the host key or certificate. // - // Protocol is a required field - Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"ContactProtocol"` + // InstanceName is a required field + InstanceName *string `locationName:"instanceName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteContactMethodInput) String() string { +func (s DeleteKnownHostKeysInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteContactMethodInput) GoString() string { +func (s DeleteKnownHostKeysInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteContactMethodInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteContactMethodInput"} - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) +func (s *DeleteKnownHostKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteKnownHostKeysInput"} + if s.InstanceName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceName")) } if invalidParams.Len() > 0 { @@ -16292,65 +19385,61 @@ func (s *DeleteContactMethodInput) Validate() error { return nil } -// SetProtocol sets the Protocol field's value. -func (s *DeleteContactMethodInput) SetProtocol(v string) *DeleteContactMethodInput { - s.Protocol = &v +// SetInstanceName sets the InstanceName field's value. +func (s *DeleteKnownHostKeysInput) SetInstanceName(v string) *DeleteKnownHostKeysInput { + s.InstanceName = &v return s } -type DeleteContactMethodOutput struct { +type DeleteKnownHostKeysOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteContactMethodOutput) String() string { +func (s DeleteKnownHostKeysOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteContactMethodOutput) GoString() string { +func (s DeleteKnownHostKeysOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteContactMethodOutput) SetOperations(v []*Operation) *DeleteContactMethodOutput { +func (s *DeleteKnownHostKeysOutput) SetOperations(v []*Operation) *DeleteKnownHostKeysOutput { s.Operations = v return s } -type DeleteDiskInput struct { +type DeleteLoadBalancerInput struct { _ struct{} `type:"structure"` - // The unique name of the disk you want to delete (e.g., my-disk). + // The name of the load balancer you want to delete. // - // DiskName is a required field - DiskName *string `locationName:"diskName" type:"string" required:"true"` - - // A Boolean value to indicate whether to delete the enabled add-ons for the - // disk. - ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"` + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteDiskInput) String() string { +func (s DeleteLoadBalancerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDiskInput) GoString() string { +func (s DeleteLoadBalancerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDiskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDiskInput"} - if s.DiskName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskName")) +func (s *DeleteLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerInput"} + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) } if invalidParams.Len() > 0 { @@ -16359,67 +19448,76 @@ func (s *DeleteDiskInput) Validate() error { return nil } -// SetDiskName sets the DiskName field's value. -func (s *DeleteDiskInput) SetDiskName(v string) *DeleteDiskInput { - s.DiskName = &v - return s -} - -// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value. -func (s *DeleteDiskInput) SetForceDeleteAddOns(v bool) *DeleteDiskInput { - s.ForceDeleteAddOns = &v +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *DeleteLoadBalancerInput) SetLoadBalancerName(v string) *DeleteLoadBalancerInput { + s.LoadBalancerName = &v return s } -type DeleteDiskOutput struct { +type DeleteLoadBalancerOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteDiskOutput) String() string { +func (s DeleteLoadBalancerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDiskOutput) GoString() string { +func (s DeleteLoadBalancerOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteDiskOutput) SetOperations(v []*Operation) *DeleteDiskOutput { +func (s *DeleteLoadBalancerOutput) SetOperations(v []*Operation) *DeleteLoadBalancerOutput { s.Operations = v return s } -type DeleteDiskSnapshotInput struct { +type DeleteLoadBalancerTlsCertificateInput struct { _ struct{} `type:"structure"` - // The name of the disk snapshot you want to delete (e.g., my-disk-snapshot). + // The SSL/TLS certificate name. // - // DiskSnapshotName is a required field - DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` + // CertificateName is a required field + CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + + // When true, forces the deletion of an SSL/TLS certificate. + // + // There can be two certificates associated with a Lightsail load balancer: + // the primary and the backup. The force parameter is required when the primary + // SSL/TLS certificate is in use by an instance attached to the load balancer. + Force *bool `locationName:"force" type:"boolean"` + + // The load balancer name. + // + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteDiskSnapshotInput) String() string { +func (s DeleteLoadBalancerTlsCertificateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDiskSnapshotInput) GoString() string { +func (s DeleteLoadBalancerTlsCertificateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDiskSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDiskSnapshotInput"} - if s.DiskSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) +func (s *DeleteLoadBalancerTlsCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerTlsCertificateInput"} + if s.CertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateName")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) } if invalidParams.Len() > 0 { @@ -16428,69 +19526,96 @@ func (s *DeleteDiskSnapshotInput) Validate() error { return nil } -// SetDiskSnapshotName sets the DiskSnapshotName field's value. -func (s *DeleteDiskSnapshotInput) SetDiskSnapshotName(v string) *DeleteDiskSnapshotInput { - s.DiskSnapshotName = &v +// SetCertificateName sets the CertificateName field's value. +func (s *DeleteLoadBalancerTlsCertificateInput) SetCertificateName(v string) *DeleteLoadBalancerTlsCertificateInput { + s.CertificateName = &v return s } -type DeleteDiskSnapshotOutput struct { +// SetForce sets the Force field's value. +func (s *DeleteLoadBalancerTlsCertificateInput) SetForce(v bool) *DeleteLoadBalancerTlsCertificateInput { + s.Force = &v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *DeleteLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *DeleteLoadBalancerTlsCertificateInput { + s.LoadBalancerName = &v + return s +} + +type DeleteLoadBalancerTlsCertificateOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteDiskSnapshotOutput) String() string { +func (s DeleteLoadBalancerTlsCertificateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDiskSnapshotOutput) GoString() string { +func (s DeleteLoadBalancerTlsCertificateOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteDiskSnapshotOutput) SetOperations(v []*Operation) *DeleteDiskSnapshotOutput { +func (s *DeleteLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *DeleteLoadBalancerTlsCertificateOutput { s.Operations = v return s } -type DeleteDomainEntryInput struct { +type DeleteRelationalDatabaseInput struct { _ struct{} `type:"structure"` - // An array of key-value pairs containing information about your domain entries. + // The name of the database snapshot created if skip final snapshot is false, + // which is the default value for that parameter. // - // DomainEntry is a required field - DomainEntry *DomainEntry `locationName:"domainEntry" type:"structure" required:"true"` + // Specifying this parameter and also specifying the skip final snapshot parameter + // to true results in an error. + // + // Constraints: + // + // * Must contain from 2 to 255 alphanumeric characters, or hyphens. + // + // * The first and last character must be a letter or number. + FinalRelationalDatabaseSnapshotName *string `locationName:"finalRelationalDatabaseSnapshotName" type:"string"` - // The name of the domain entry to delete. + // The name of the database that you are deleting. // - // DomainName is a required field - DomainName *string `locationName:"domainName" type:"string" required:"true"` + // RelationalDatabaseName is a required field + RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` + + // Determines whether a final database snapshot is created before your database + // is deleted. If true is specified, no database snapshot is created. If false + // is specified, a database snapshot is created before your database is deleted. + // + // You must specify the final relational database snapshot name parameter if + // the skip final snapshot parameter is false. + // + // Default: false + SkipFinalSnapshot *bool `locationName:"skipFinalSnapshot" type:"boolean"` } // String returns the string representation -func (s DeleteDomainEntryInput) String() string { +func (s DeleteRelationalDatabaseInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDomainEntryInput) GoString() string { +func (s DeleteRelationalDatabaseInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDomainEntryInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDomainEntryInput"} - if s.DomainEntry == nil { - invalidParams.Add(request.NewErrParamRequired("DomainEntry")) - } - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) +func (s *DeleteRelationalDatabaseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRelationalDatabaseInput"} + if s.RelationalDatabaseName == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) } if invalidParams.Len() > 0 { @@ -16499,67 +19624,73 @@ func (s *DeleteDomainEntryInput) Validate() error { return nil } -// SetDomainEntry sets the DomainEntry field's value. -func (s *DeleteDomainEntryInput) SetDomainEntry(v *DomainEntry) *DeleteDomainEntryInput { - s.DomainEntry = v +// SetFinalRelationalDatabaseSnapshotName sets the FinalRelationalDatabaseSnapshotName field's value. +func (s *DeleteRelationalDatabaseInput) SetFinalRelationalDatabaseSnapshotName(v string) *DeleteRelationalDatabaseInput { + s.FinalRelationalDatabaseSnapshotName = &v + return s +} + +// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. +func (s *DeleteRelationalDatabaseInput) SetRelationalDatabaseName(v string) *DeleteRelationalDatabaseInput { + s.RelationalDatabaseName = &v return s } -// SetDomainName sets the DomainName field's value. -func (s *DeleteDomainEntryInput) SetDomainName(v string) *DeleteDomainEntryInput { - s.DomainName = &v +// SetSkipFinalSnapshot sets the SkipFinalSnapshot field's value. +func (s *DeleteRelationalDatabaseInput) SetSkipFinalSnapshot(v bool) *DeleteRelationalDatabaseInput { + s.SkipFinalSnapshot = &v return s } -type DeleteDomainEntryOutput struct { +type DeleteRelationalDatabaseOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operation *Operation `locationName:"operation" type:"structure"` + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteDomainEntryOutput) String() string { +func (s DeleteRelationalDatabaseOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDomainEntryOutput) GoString() string { +func (s DeleteRelationalDatabaseOutput) GoString() string { return s.String() } -// SetOperation sets the Operation field's value. -func (s *DeleteDomainEntryOutput) SetOperation(v *Operation) *DeleteDomainEntryOutput { - s.Operation = v +// SetOperations sets the Operations field's value. +func (s *DeleteRelationalDatabaseOutput) SetOperations(v []*Operation) *DeleteRelationalDatabaseOutput { + s.Operations = v return s } -type DeleteDomainInput struct { +type DeleteRelationalDatabaseSnapshotInput struct { _ struct{} `type:"structure"` - // The specific domain name to delete. + // The name of the database snapshot that you are deleting. // - // DomainName is a required field - DomainName *string `locationName:"domainName" type:"string" required:"true"` + // RelationalDatabaseSnapshotName is a required field + RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteDomainInput) String() string { +func (s DeleteRelationalDatabaseSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDomainInput) GoString() string { +func (s DeleteRelationalDatabaseSnapshotInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDomainInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDomainInput"} - if s.DomainName == nil { - invalidParams.Add(request.NewErrParamRequired("DomainName")) +func (s *DeleteRelationalDatabaseSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRelationalDatabaseSnapshotInput"} + if s.RelationalDatabaseSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName")) } if invalidParams.Len() > 0 { @@ -16568,65 +19699,97 @@ func (s *DeleteDomainInput) Validate() error { return nil } -// SetDomainName sets the DomainName field's value. -func (s *DeleteDomainInput) SetDomainName(v string) *DeleteDomainInput { - s.DomainName = &v +// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value. +func (s *DeleteRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *DeleteRelationalDatabaseSnapshotInput { + s.RelationalDatabaseSnapshotName = &v return s } -type DeleteDomainOutput struct { +type DeleteRelationalDatabaseSnapshotOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operation *Operation `locationName:"operation" type:"structure"` + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteDomainOutput) String() string { +func (s DeleteRelationalDatabaseSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDomainOutput) GoString() string { +func (s DeleteRelationalDatabaseSnapshotOutput) GoString() string { return s.String() } -// SetOperation sets the Operation field's value. -func (s *DeleteDomainOutput) SetOperation(v *Operation) *DeleteDomainOutput { - s.Operation = v +// SetOperations sets the Operations field's value. +func (s *DeleteRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *DeleteRelationalDatabaseSnapshotOutput { + s.Operations = v return s } -type DeleteInstanceInput struct { +// Describes the destination of a record. +type DestinationInfo struct { _ struct{} `type:"structure"` - // A Boolean value to indicate whether to delete the enabled add-ons for the - // disk. - ForceDeleteAddOns *bool `locationName:"forceDeleteAddOns" type:"boolean"` + // The ID of the resource created at the destination. + Id *string `locationName:"id" type:"string"` - // The name of the instance to delete. + // The destination service of the record. + Service *string `locationName:"service" type:"string"` +} + +// String returns the string representation +func (s DestinationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DestinationInfo) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *DestinationInfo) SetId(v string) *DestinationInfo { + s.Id = &v + return s +} + +// SetService sets the Service field's value. +func (s *DestinationInfo) SetService(v string) *DestinationInfo { + s.Service = &v + return s +} + +type DetachCertificateFromDistributionInput struct { + _ struct{} `type:"structure"` + + // The name of the distribution from which to detach the certificate. // - // InstanceName is a required field - InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + // + // DistributionName is a required field + DistributionName *string `locationName:"distributionName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteInstanceInput) String() string { +func (s DetachCertificateFromDistributionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteInstanceInput) GoString() string { +func (s DetachCertificateFromDistributionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteInstanceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceInput"} - if s.InstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceName")) +func (s *DetachCertificateFromDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachCertificateFromDistributionInput"} + if s.DistributionName == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionName")) } if invalidParams.Len() > 0 { @@ -16635,67 +19798,62 @@ func (s *DeleteInstanceInput) Validate() error { return nil } -// SetForceDeleteAddOns sets the ForceDeleteAddOns field's value. -func (s *DeleteInstanceInput) SetForceDeleteAddOns(v bool) *DeleteInstanceInput { - s.ForceDeleteAddOns = &v - return s -} - -// SetInstanceName sets the InstanceName field's value. -func (s *DeleteInstanceInput) SetInstanceName(v string) *DeleteInstanceInput { - s.InstanceName = &v +// SetDistributionName sets the DistributionName field's value. +func (s *DetachCertificateFromDistributionInput) SetDistributionName(v string) *DetachCertificateFromDistributionInput { + s.DistributionName = &v return s } -type DeleteInstanceOutput struct { +type DetachCertificateFromDistributionOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + // An object that describes the result of the action, such as the status of + // the request, the timestamp of the request, and the resources affected by + // the request. + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s DeleteInstanceOutput) String() string { +func (s DetachCertificateFromDistributionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteInstanceOutput) GoString() string { +func (s DetachCertificateFromDistributionOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *DeleteInstanceOutput) SetOperations(v []*Operation) *DeleteInstanceOutput { - s.Operations = v +// SetOperation sets the Operation field's value. +func (s *DetachCertificateFromDistributionOutput) SetOperation(v *Operation) *DetachCertificateFromDistributionOutput { + s.Operation = v return s } -type DeleteInstanceSnapshotInput struct { +type DetachDiskInput struct { _ struct{} `type:"structure"` - // The name of the snapshot to delete. + // The unique name of the disk you want to detach from your instance (e.g., + // my-disk). // - // InstanceSnapshotName is a required field - InstanceSnapshotName *string `locationName:"instanceSnapshotName" type:"string" required:"true"` + // DiskName is a required field + DiskName *string `locationName:"diskName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteInstanceSnapshotInput) String() string { +func (s DetachDiskInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteInstanceSnapshotInput) GoString() string { +func (s DetachDiskInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteInstanceSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceSnapshotInput"} - if s.InstanceSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceSnapshotName")) +func (s *DetachDiskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachDiskInput"} + if s.DiskName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskName")) } if invalidParams.Len() > 0 { @@ -16704,61 +19862,70 @@ func (s *DeleteInstanceSnapshotInput) Validate() error { return nil } -// SetInstanceSnapshotName sets the InstanceSnapshotName field's value. -func (s *DeleteInstanceSnapshotInput) SetInstanceSnapshotName(v string) *DeleteInstanceSnapshotInput { - s.InstanceSnapshotName = &v +// SetDiskName sets the DiskName field's value. +func (s *DetachDiskInput) SetDiskName(v string) *DetachDiskInput { + s.DiskName = &v return s } -type DeleteInstanceSnapshotOutput struct { +type DetachDiskOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteInstanceSnapshotOutput) String() string { +func (s DetachDiskOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteInstanceSnapshotOutput) GoString() string { +func (s DetachDiskOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteInstanceSnapshotOutput) SetOperations(v []*Operation) *DeleteInstanceSnapshotOutput { +func (s *DetachDiskOutput) SetOperations(v []*Operation) *DetachDiskOutput { s.Operations = v return s } -type DeleteKeyPairInput struct { +type DetachInstancesFromLoadBalancerInput struct { _ struct{} `type:"structure"` - // The name of the key pair to delete. + // An array of strings containing the names of the instances you want to detach + // from the load balancer. // - // KeyPairName is a required field - KeyPairName *string `locationName:"keyPairName" type:"string" required:"true"` + // InstanceNames is a required field + InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + + // The name of the Lightsail load balancer. + // + // LoadBalancerName is a required field + LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteKeyPairInput) String() string { +func (s DetachInstancesFromLoadBalancerInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteKeyPairInput) GoString() string { +func (s DetachInstancesFromLoadBalancerInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteKeyPairInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteKeyPairInput"} - if s.KeyPairName == nil { - invalidParams.Add(request.NewErrParamRequired("KeyPairName")) +func (s *DetachInstancesFromLoadBalancerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachInstancesFromLoadBalancerInput"} + if s.InstanceNames == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceNames")) + } + if s.LoadBalancerName == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) } if invalidParams.Len() > 0 { @@ -16767,61 +19934,67 @@ func (s *DeleteKeyPairInput) Validate() error { return nil } -// SetKeyPairName sets the KeyPairName field's value. -func (s *DeleteKeyPairInput) SetKeyPairName(v string) *DeleteKeyPairInput { - s.KeyPairName = &v +// SetInstanceNames sets the InstanceNames field's value. +func (s *DetachInstancesFromLoadBalancerInput) SetInstanceNames(v []*string) *DetachInstancesFromLoadBalancerInput { + s.InstanceNames = v return s } -type DeleteKeyPairOutput struct { +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *DetachInstancesFromLoadBalancerInput) SetLoadBalancerName(v string) *DetachInstancesFromLoadBalancerInput { + s.LoadBalancerName = &v + return s +} + +type DetachInstancesFromLoadBalancerOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operation *Operation `locationName:"operation" type:"structure"` + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteKeyPairOutput) String() string { +func (s DetachInstancesFromLoadBalancerOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteKeyPairOutput) GoString() string { +func (s DetachInstancesFromLoadBalancerOutput) GoString() string { return s.String() } -// SetOperation sets the Operation field's value. -func (s *DeleteKeyPairOutput) SetOperation(v *Operation) *DeleteKeyPairOutput { - s.Operation = v +// SetOperations sets the Operations field's value. +func (s *DetachInstancesFromLoadBalancerOutput) SetOperations(v []*Operation) *DetachInstancesFromLoadBalancerOutput { + s.Operations = v return s } -type DeleteKnownHostKeysInput struct { +type DetachStaticIpInput struct { _ struct{} `type:"structure"` - // The name of the instance for which you want to reset the host key or certificate. + // The name of the static IP to detach from the instance. // - // InstanceName is a required field - InstanceName *string `locationName:"instanceName" type:"string" required:"true"` + // StaticIpName is a required field + StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteKnownHostKeysInput) String() string { +func (s DetachStaticIpInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteKnownHostKeysInput) GoString() string { +func (s DetachStaticIpInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteKnownHostKeysInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteKnownHostKeysInput"} - if s.InstanceName == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceName")) +func (s *DetachStaticIpInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachStaticIpInput"} + if s.StaticIpName == nil { + invalidParams.Add(request.NewErrParamRequired("StaticIpName")) } if invalidParams.Len() > 0 { @@ -16830,61 +20003,69 @@ func (s *DeleteKnownHostKeysInput) Validate() error { return nil } -// SetInstanceName sets the InstanceName field's value. -func (s *DeleteKnownHostKeysInput) SetInstanceName(v string) *DeleteKnownHostKeysInput { - s.InstanceName = &v +// SetStaticIpName sets the StaticIpName field's value. +func (s *DetachStaticIpInput) SetStaticIpName(v string) *DetachStaticIpInput { + s.StaticIpName = &v return s } -type DeleteKnownHostKeysOutput struct { +type DetachStaticIpOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteKnownHostKeysOutput) String() string { +func (s DetachStaticIpOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteKnownHostKeysOutput) GoString() string { +func (s DetachStaticIpOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteKnownHostKeysOutput) SetOperations(v []*Operation) *DeleteKnownHostKeysOutput { +func (s *DetachStaticIpOutput) SetOperations(v []*Operation) *DetachStaticIpOutput { s.Operations = v return s } -type DeleteLoadBalancerInput struct { +type DisableAddOnInput struct { _ struct{} `type:"structure"` - // The name of the load balancer you want to delete. + // The add-on type to disable. + // + // AddOnType is a required field + AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"` + + // The name of the source resource for which to disable the add-on. // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` } // String returns the string representation -func (s DeleteLoadBalancerInput) String() string { +func (s DisableAddOnInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteLoadBalancerInput) GoString() string { +func (s DisableAddOnInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLoadBalancerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerInput"} - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) +func (s *DisableAddOnInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableAddOnInput"} + if s.AddOnType == nil { + invalidParams.Add(request.NewErrParamRequired("AddOnType")) + } + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) } if invalidParams.Len() > 0 { @@ -16893,1325 +20074,1319 @@ func (s *DeleteLoadBalancerInput) Validate() error { return nil } -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *DeleteLoadBalancerInput) SetLoadBalancerName(v string) *DeleteLoadBalancerInput { - s.LoadBalancerName = &v +// SetAddOnType sets the AddOnType field's value. +func (s *DisableAddOnInput) SetAddOnType(v string) *DisableAddOnInput { + s.AddOnType = &v return s } -type DeleteLoadBalancerOutput struct { +// SetResourceName sets the ResourceName field's value. +func (s *DisableAddOnInput) SetResourceName(v string) *DisableAddOnInput { + s.ResourceName = &v + return s +} + +type DisableAddOnOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DeleteLoadBalancerOutput) String() string { +func (s DisableAddOnOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteLoadBalancerOutput) GoString() string { +func (s DisableAddOnOutput) GoString() string { return s.String() } // SetOperations sets the Operations field's value. -func (s *DeleteLoadBalancerOutput) SetOperations(v []*Operation) *DeleteLoadBalancerOutput { +func (s *DisableAddOnOutput) SetOperations(v []*Operation) *DisableAddOnOutput { s.Operations = v return s } -type DeleteLoadBalancerTlsCertificateInput struct { +// Describes a system disk or a block storage disk. +type Disk struct { _ struct{} `type:"structure"` - // The SSL/TLS certificate name. - // - // CertificateName is a required field - CertificateName *string `locationName:"certificateName" type:"string" required:"true"` + // An array of objects representing the add-ons enabled on the disk. + AddOns []*AddOn `locationName:"addOns" type:"list"` - // When true, forces the deletion of an SSL/TLS certificate. + // The Amazon Resource Name (ARN) of the disk. + Arn *string `locationName:"arn" type:"string"` + + // The resources to which the disk is attached. + AttachedTo *string `locationName:"attachedTo" type:"string"` + + // (Deprecated) The attachment state of the disk. // - // There can be two certificates associated with a Lightsail load balancer: - // the primary and the backup. The force parameter is required when the primary - // SSL/TLS certificate is in use by an instance attached to the load balancer. - Force *bool `locationName:"force" type:"boolean"` + // In releases prior to November 14, 2017, this parameter returned attached + // for system disks in the API response. It is now deprecated, but still included + // in the response. Use isAttached instead. + // + // Deprecated: AttachmentState has been deprecated + AttachmentState *string `locationName:"attachmentState" deprecated:"true" type:"string"` - // The load balancer name. + // The date when the disk was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // (Deprecated) The number of GB in use by the disk. // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` + // In releases prior to November 14, 2017, this parameter was not included in + // the API response. It is now deprecated. + // + // Deprecated: GbInUse has been deprecated + GbInUse *int64 `locationName:"gbInUse" deprecated:"true" type:"integer"` + + // The input/output operations per second (IOPS) of the disk. + Iops *int64 `locationName:"iops" type:"integer"` + + // A Boolean value indicating whether the disk is attached. + IsAttached *bool `locationName:"isAttached" type:"boolean"` + + // A Boolean value indicating whether this disk is a system disk (has an operating + // system loaded on it). + IsSystemDisk *bool `locationName:"isSystemDisk" type:"boolean"` + + // The AWS Region and Availability Zone where the disk is located. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The unique name of the disk. + Name *string `locationName:"name" type:"string"` + + // The disk path. + Path *string `locationName:"path" type:"string"` + + // The Lightsail resource type (e.g., Disk). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The size of the disk in GB. + SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` + + // Describes the status of the disk. + State *string `locationName:"state" type:"string" enum:"DiskState"` + + // The support code. Include this code in your email to support when you have + // questions about an instance or another resource in Lightsail. This code enables + // our support team to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` + + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s DeleteLoadBalancerTlsCertificateInput) String() string { +func (s Disk) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteLoadBalancerTlsCertificateInput) GoString() string { +func (s Disk) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLoadBalancerTlsCertificateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteLoadBalancerTlsCertificateInput"} - if s.CertificateName == nil { - invalidParams.Add(request.NewErrParamRequired("CertificateName")) - } - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) - } +// SetAddOns sets the AddOns field's value. +func (s *Disk) SetAddOns(v []*AddOn) *Disk { + s.AddOns = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *Disk) SetArn(v string) *Disk { + s.Arn = &v + return s } -// SetCertificateName sets the CertificateName field's value. -func (s *DeleteLoadBalancerTlsCertificateInput) SetCertificateName(v string) *DeleteLoadBalancerTlsCertificateInput { - s.CertificateName = &v +// SetAttachedTo sets the AttachedTo field's value. +func (s *Disk) SetAttachedTo(v string) *Disk { + s.AttachedTo = &v return s } -// SetForce sets the Force field's value. -func (s *DeleteLoadBalancerTlsCertificateInput) SetForce(v bool) *DeleteLoadBalancerTlsCertificateInput { - s.Force = &v +// SetAttachmentState sets the AttachmentState field's value. +func (s *Disk) SetAttachmentState(v string) *Disk { + s.AttachmentState = &v return s } -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *DeleteLoadBalancerTlsCertificateInput) SetLoadBalancerName(v string) *DeleteLoadBalancerTlsCertificateInput { - s.LoadBalancerName = &v +// SetCreatedAt sets the CreatedAt field's value. +func (s *Disk) SetCreatedAt(v time.Time) *Disk { + s.CreatedAt = &v return s } -type DeleteLoadBalancerTlsCertificateOutput struct { - _ struct{} `type:"structure"` +// SetGbInUse sets the GbInUse field's value. +func (s *Disk) SetGbInUse(v int64) *Disk { + s.GbInUse = &v + return s +} - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` +// SetIops sets the Iops field's value. +func (s *Disk) SetIops(v int64) *Disk { + s.Iops = &v + return s } -// String returns the string representation -func (s DeleteLoadBalancerTlsCertificateOutput) String() string { - return awsutil.Prettify(s) +// SetIsAttached sets the IsAttached field's value. +func (s *Disk) SetIsAttached(v bool) *Disk { + s.IsAttached = &v + return s } -// GoString returns the string representation -func (s DeleteLoadBalancerTlsCertificateOutput) GoString() string { - return s.String() +// SetIsSystemDisk sets the IsSystemDisk field's value. +func (s *Disk) SetIsSystemDisk(v bool) *Disk { + s.IsSystemDisk = &v + return s } -// SetOperations sets the Operations field's value. -func (s *DeleteLoadBalancerTlsCertificateOutput) SetOperations(v []*Operation) *DeleteLoadBalancerTlsCertificateOutput { - s.Operations = v +// SetLocation sets the Location field's value. +func (s *Disk) SetLocation(v *ResourceLocation) *Disk { + s.Location = v return s } -type DeleteRelationalDatabaseInput struct { +// SetName sets the Name field's value. +func (s *Disk) SetName(v string) *Disk { + s.Name = &v + return s +} + +// SetPath sets the Path field's value. +func (s *Disk) SetPath(v string) *Disk { + s.Path = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *Disk) SetResourceType(v string) *Disk { + s.ResourceType = &v + return s +} + +// SetSizeInGb sets the SizeInGb field's value. +func (s *Disk) SetSizeInGb(v int64) *Disk { + s.SizeInGb = &v + return s +} + +// SetState sets the State field's value. +func (s *Disk) SetState(v string) *Disk { + s.State = &v + return s +} + +// SetSupportCode sets the SupportCode field's value. +func (s *Disk) SetSupportCode(v string) *Disk { + s.SupportCode = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Disk) SetTags(v []*Tag) *Disk { + s.Tags = v + return s +} + +// Describes a disk. +type DiskInfo struct { _ struct{} `type:"structure"` - // The name of the database snapshot created if skip final snapshot is false, - // which is the default value for that parameter. - // - // Specifying this parameter and also specifying the skip final snapshot parameter - // to true results in an error. - // - // Constraints: - // - // * Must contain from 2 to 255 alphanumeric characters, or hyphens. - // - // * The first and last character must be a letter or number. - FinalRelationalDatabaseSnapshotName *string `locationName:"finalRelationalDatabaseSnapshotName" type:"string"` + // A Boolean value indicating whether this disk is a system disk (has an operating + // system loaded on it). + IsSystemDisk *bool `locationName:"isSystemDisk" type:"boolean"` - // The name of the database that you are deleting. - // - // RelationalDatabaseName is a required field - RelationalDatabaseName *string `locationName:"relationalDatabaseName" type:"string" required:"true"` + // The disk name. + Name *string `locationName:"name" type:"string"` - // Determines whether a final database snapshot is created before your database - // is deleted. If true is specified, no database snapshot is created. If false - // is specified, a database snapshot is created before your database is deleted. - // - // You must specify the final relational database snapshot name parameter if - // the skip final snapshot parameter is false. - // - // Default: false - SkipFinalSnapshot *bool `locationName:"skipFinalSnapshot" type:"boolean"` + // The disk path. + Path *string `locationName:"path" type:"string"` + + // The size of the disk in GB (e.g., 32). + SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` } // String returns the string representation -func (s DeleteRelationalDatabaseInput) String() string { +func (s DiskInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRelationalDatabaseInput) GoString() string { +func (s DiskInfo) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRelationalDatabaseInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRelationalDatabaseInput"} - if s.RelationalDatabaseName == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetIsSystemDisk sets the IsSystemDisk field's value. +func (s *DiskInfo) SetIsSystemDisk(v bool) *DiskInfo { + s.IsSystemDisk = &v + return s } -// SetFinalRelationalDatabaseSnapshotName sets the FinalRelationalDatabaseSnapshotName field's value. -func (s *DeleteRelationalDatabaseInput) SetFinalRelationalDatabaseSnapshotName(v string) *DeleteRelationalDatabaseInput { - s.FinalRelationalDatabaseSnapshotName = &v +// SetName sets the Name field's value. +func (s *DiskInfo) SetName(v string) *DiskInfo { + s.Name = &v return s } -// SetRelationalDatabaseName sets the RelationalDatabaseName field's value. -func (s *DeleteRelationalDatabaseInput) SetRelationalDatabaseName(v string) *DeleteRelationalDatabaseInput { - s.RelationalDatabaseName = &v +// SetPath sets the Path field's value. +func (s *DiskInfo) SetPath(v string) *DiskInfo { + s.Path = &v return s } -// SetSkipFinalSnapshot sets the SkipFinalSnapshot field's value. -func (s *DeleteRelationalDatabaseInput) SetSkipFinalSnapshot(v bool) *DeleteRelationalDatabaseInput { - s.SkipFinalSnapshot = &v +// SetSizeInGb sets the SizeInGb field's value. +func (s *DiskInfo) SetSizeInGb(v int64) *DiskInfo { + s.SizeInGb = &v return s } -type DeleteRelationalDatabaseOutput struct { +// Describes a block storage disk mapping. +type DiskMap struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + // The new disk name (e.g., my-new-disk). + NewDiskName *string `locationName:"newDiskName" type:"string"` + + // The original disk path exposed to the instance (for example, /dev/sdh). + OriginalDiskPath *string `locationName:"originalDiskPath" type:"string"` } // String returns the string representation -func (s DeleteRelationalDatabaseOutput) String() string { +func (s DiskMap) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRelationalDatabaseOutput) GoString() string { +func (s DiskMap) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *DeleteRelationalDatabaseOutput) SetOperations(v []*Operation) *DeleteRelationalDatabaseOutput { - s.Operations = v +// SetNewDiskName sets the NewDiskName field's value. +func (s *DiskMap) SetNewDiskName(v string) *DiskMap { + s.NewDiskName = &v return s } -type DeleteRelationalDatabaseSnapshotInput struct { +// SetOriginalDiskPath sets the OriginalDiskPath field's value. +func (s *DiskMap) SetOriginalDiskPath(v string) *DiskMap { + s.OriginalDiskPath = &v + return s +} + +// Describes a block storage disk snapshot. +type DiskSnapshot struct { _ struct{} `type:"structure"` - // The name of the database snapshot that you are deleting. - // - // RelationalDatabaseSnapshotName is a required field - RelationalDatabaseSnapshotName *string `locationName:"relationalDatabaseSnapshotName" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the disk snapshot. + Arn *string `locationName:"arn" type:"string"` + + // The date when the disk snapshot was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // The Amazon Resource Name (ARN) of the source disk from which the disk snapshot + // was created. + FromDiskArn *string `locationName:"fromDiskArn" type:"string"` + + // The unique name of the source disk from which the disk snapshot was created. + FromDiskName *string `locationName:"fromDiskName" type:"string"` + + // The Amazon Resource Name (ARN) of the source instance from which the disk + // (system volume) snapshot was created. + FromInstanceArn *string `locationName:"fromInstanceArn" type:"string"` + + // The unique name of the source instance from which the disk (system volume) + // snapshot was created. + FromInstanceName *string `locationName:"fromInstanceName" type:"string"` + + // A Boolean value indicating whether the snapshot was created from an automatic + // snapshot. + IsFromAutoSnapshot *bool `locationName:"isFromAutoSnapshot" type:"boolean"` + + // The AWS Region and Availability Zone where the disk snapshot was created. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The name of the disk snapshot (e.g., my-disk-snapshot). + Name *string `locationName:"name" type:"string"` + + // The progress of the disk snapshot operation. + Progress *string `locationName:"progress" type:"string"` + + // The Lightsail resource type (e.g., DiskSnapshot). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The size of the disk in GB. + SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` + + // The status of the disk snapshot operation. + State *string `locationName:"state" type:"string" enum:"DiskSnapshotState"` + + // The support code. Include this code in your email to support when you have + // questions about an instance or another resource in Lightsail. This code enables + // our support team to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` + + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s DeleteRelationalDatabaseSnapshotInput) String() string { +func (s DiskSnapshot) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRelationalDatabaseSnapshotInput) GoString() string { +func (s DiskSnapshot) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRelationalDatabaseSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRelationalDatabaseSnapshotInput"} - if s.RelationalDatabaseSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("RelationalDatabaseSnapshotName")) - } +// SetArn sets the Arn field's value. +func (s *DiskSnapshot) SetArn(v string) *DiskSnapshot { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreatedAt sets the CreatedAt field's value. +func (s *DiskSnapshot) SetCreatedAt(v time.Time) *DiskSnapshot { + s.CreatedAt = &v + return s } -// SetRelationalDatabaseSnapshotName sets the RelationalDatabaseSnapshotName field's value. -func (s *DeleteRelationalDatabaseSnapshotInput) SetRelationalDatabaseSnapshotName(v string) *DeleteRelationalDatabaseSnapshotInput { - s.RelationalDatabaseSnapshotName = &v +// SetFromDiskArn sets the FromDiskArn field's value. +func (s *DiskSnapshot) SetFromDiskArn(v string) *DiskSnapshot { + s.FromDiskArn = &v return s } -type DeleteRelationalDatabaseSnapshotOutput struct { - _ struct{} `type:"structure"` +// SetFromDiskName sets the FromDiskName field's value. +func (s *DiskSnapshot) SetFromDiskName(v string) *DiskSnapshot { + s.FromDiskName = &v + return s +} - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` +// SetFromInstanceArn sets the FromInstanceArn field's value. +func (s *DiskSnapshot) SetFromInstanceArn(v string) *DiskSnapshot { + s.FromInstanceArn = &v + return s } -// String returns the string representation -func (s DeleteRelationalDatabaseSnapshotOutput) String() string { - return awsutil.Prettify(s) +// SetFromInstanceName sets the FromInstanceName field's value. +func (s *DiskSnapshot) SetFromInstanceName(v string) *DiskSnapshot { + s.FromInstanceName = &v + return s } -// GoString returns the string representation -func (s DeleteRelationalDatabaseSnapshotOutput) GoString() string { - return s.String() +// SetIsFromAutoSnapshot sets the IsFromAutoSnapshot field's value. +func (s *DiskSnapshot) SetIsFromAutoSnapshot(v bool) *DiskSnapshot { + s.IsFromAutoSnapshot = &v + return s } -// SetOperations sets the Operations field's value. -func (s *DeleteRelationalDatabaseSnapshotOutput) SetOperations(v []*Operation) *DeleteRelationalDatabaseSnapshotOutput { - s.Operations = v +// SetLocation sets the Location field's value. +func (s *DiskSnapshot) SetLocation(v *ResourceLocation) *DiskSnapshot { + s.Location = v return s } -// Describes the destination of a record. -type DestinationInfo struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *DiskSnapshot) SetName(v string) *DiskSnapshot { + s.Name = &v + return s +} - // The ID of the resource created at the destination. - Id *string `locationName:"id" type:"string"` +// SetProgress sets the Progress field's value. +func (s *DiskSnapshot) SetProgress(v string) *DiskSnapshot { + s.Progress = &v + return s +} - // The destination service of the record. - Service *string `locationName:"service" type:"string"` +// SetResourceType sets the ResourceType field's value. +func (s *DiskSnapshot) SetResourceType(v string) *DiskSnapshot { + s.ResourceType = &v + return s } -// String returns the string representation -func (s DestinationInfo) String() string { - return awsutil.Prettify(s) +// SetSizeInGb sets the SizeInGb field's value. +func (s *DiskSnapshot) SetSizeInGb(v int64) *DiskSnapshot { + s.SizeInGb = &v + return s } -// GoString returns the string representation -func (s DestinationInfo) GoString() string { - return s.String() +// SetState sets the State field's value. +func (s *DiskSnapshot) SetState(v string) *DiskSnapshot { + s.State = &v + return s } -// SetId sets the Id field's value. -func (s *DestinationInfo) SetId(v string) *DestinationInfo { - s.Id = &v +// SetSupportCode sets the SupportCode field's value. +func (s *DiskSnapshot) SetSupportCode(v string) *DiskSnapshot { + s.SupportCode = &v return s } -// SetService sets the Service field's value. -func (s *DestinationInfo) SetService(v string) *DestinationInfo { - s.Service = &v +// SetTags sets the Tags field's value. +func (s *DiskSnapshot) SetTags(v []*Tag) *DiskSnapshot { + s.Tags = v return s } -type DetachDiskInput struct { +// Describes a disk snapshot. +type DiskSnapshotInfo struct { _ struct{} `type:"structure"` - // The unique name of the disk you want to detach from your instance (e.g., - // my-disk). - // - // DiskName is a required field - DiskName *string `locationName:"diskName" type:"string" required:"true"` + // The size of the disk in GB (e.g., 32). + SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` } // String returns the string representation -func (s DetachDiskInput) String() string { +func (s DiskSnapshotInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DetachDiskInput) GoString() string { +func (s DiskSnapshotInfo) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DetachDiskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DetachDiskInput"} - if s.DiskName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDiskName sets the DiskName field's value. -func (s *DetachDiskInput) SetDiskName(v string) *DetachDiskInput { - s.DiskName = &v +// SetSizeInGb sets the SizeInGb field's value. +func (s *DiskSnapshotInfo) SetSizeInGb(v int64) *DiskSnapshotInfo { + s.SizeInGb = &v return s } -type DetachDiskOutput struct { +// Describes the specifications of a distribution bundle. +type DistributionBundle struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + // The ID of the bundle. + BundleId *string `locationName:"bundleId" type:"string"` + + // Indicates whether the bundle is active, and can be specified for a new distribution. + IsActive *bool `locationName:"isActive" type:"boolean"` + + // The name of the distribution bundle. + Name *string `locationName:"name" type:"string"` + + // The monthly price, in US dollars, of the bundle. + Price *float64 `locationName:"price" type:"float"` + + // The monthly network transfer quota of the bundle. + TransferPerMonthInGb *int64 `locationName:"transferPerMonthInGb" type:"integer"` } // String returns the string representation -func (s DetachDiskOutput) String() string { +func (s DistributionBundle) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DetachDiskOutput) GoString() string { +func (s DistributionBundle) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *DetachDiskOutput) SetOperations(v []*Operation) *DetachDiskOutput { - s.Operations = v +// SetBundleId sets the BundleId field's value. +func (s *DistributionBundle) SetBundleId(v string) *DistributionBundle { + s.BundleId = &v return s } -type DetachInstancesFromLoadBalancerInput struct { +// SetIsActive sets the IsActive field's value. +func (s *DistributionBundle) SetIsActive(v bool) *DistributionBundle { + s.IsActive = &v + return s +} + +// SetName sets the Name field's value. +func (s *DistributionBundle) SetName(v string) *DistributionBundle { + s.Name = &v + return s +} + +// SetPrice sets the Price field's value. +func (s *DistributionBundle) SetPrice(v float64) *DistributionBundle { + s.Price = &v + return s +} + +// SetTransferPerMonthInGb sets the TransferPerMonthInGb field's value. +func (s *DistributionBundle) SetTransferPerMonthInGb(v int64) *DistributionBundle { + s.TransferPerMonthInGb = &v + return s +} + +// Describes a domain where you are storing recordsets in Lightsail. +type Domain struct { _ struct{} `type:"structure"` - // An array of strings containing the names of the instances you want to detach - // from the load balancer. - // - // InstanceNames is a required field - InstanceNames []*string `locationName:"instanceNames" type:"list" required:"true"` + // The Amazon Resource Name (ARN) of the domain recordset (e.g., arn:aws:lightsail:global:123456789101:Domain/824cede0-abc7-4f84-8dbc-12345EXAMPLE). + Arn *string `locationName:"arn" type:"string"` - // The name of the Lightsail load balancer. - // - // LoadBalancerName is a required field - LoadBalancerName *string `locationName:"loadBalancerName" type:"string" required:"true"` + // The date when the domain recordset was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // An array of key-value pairs containing information about the domain entries. + DomainEntries []*DomainEntry `locationName:"domainEntries" type:"list"` + + // The AWS Region and Availability Zones where the domain recordset was created. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The name of the domain. + Name *string `locationName:"name" type:"string"` + + // The resource type. + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The support code. Include this code in your email to support when you have + // questions about an instance or another resource in Lightsail. This code enables + // our support team to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` + + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + Tags []*Tag `locationName:"tags" type:"list"` } // String returns the string representation -func (s DetachInstancesFromLoadBalancerInput) String() string { +func (s Domain) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DetachInstancesFromLoadBalancerInput) GoString() string { +func (s Domain) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DetachInstancesFromLoadBalancerInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DetachInstancesFromLoadBalancerInput"} - if s.InstanceNames == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceNames")) - } - if s.LoadBalancerName == nil { - invalidParams.Add(request.NewErrParamRequired("LoadBalancerName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *Domain) SetArn(v string) *Domain { + s.Arn = &v + return s } -// SetInstanceNames sets the InstanceNames field's value. -func (s *DetachInstancesFromLoadBalancerInput) SetInstanceNames(v []*string) *DetachInstancesFromLoadBalancerInput { - s.InstanceNames = v +// SetCreatedAt sets the CreatedAt field's value. +func (s *Domain) SetCreatedAt(v time.Time) *Domain { + s.CreatedAt = &v return s } -// SetLoadBalancerName sets the LoadBalancerName field's value. -func (s *DetachInstancesFromLoadBalancerInput) SetLoadBalancerName(v string) *DetachInstancesFromLoadBalancerInput { - s.LoadBalancerName = &v +// SetDomainEntries sets the DomainEntries field's value. +func (s *Domain) SetDomainEntries(v []*DomainEntry) *Domain { + s.DomainEntries = v return s } -type DetachInstancesFromLoadBalancerOutput struct { - _ struct{} `type:"structure"` +// SetLocation sets the Location field's value. +func (s *Domain) SetLocation(v *ResourceLocation) *Domain { + s.Location = v + return s +} - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` +// SetName sets the Name field's value. +func (s *Domain) SetName(v string) *Domain { + s.Name = &v + return s } -// String returns the string representation -func (s DetachInstancesFromLoadBalancerOutput) String() string { - return awsutil.Prettify(s) +// SetResourceType sets the ResourceType field's value. +func (s *Domain) SetResourceType(v string) *Domain { + s.ResourceType = &v + return s } -// GoString returns the string representation -func (s DetachInstancesFromLoadBalancerOutput) GoString() string { - return s.String() +// SetSupportCode sets the SupportCode field's value. +func (s *Domain) SetSupportCode(v string) *Domain { + s.SupportCode = &v + return s } -// SetOperations sets the Operations field's value. -func (s *DetachInstancesFromLoadBalancerOutput) SetOperations(v []*Operation) *DetachInstancesFromLoadBalancerOutput { - s.Operations = v +// SetTags sets the Tags field's value. +func (s *Domain) SetTags(v []*Tag) *Domain { + s.Tags = v return s } -type DetachStaticIpInput struct { +// Describes a domain recordset entry. +type DomainEntry struct { _ struct{} `type:"structure"` - // The name of the static IP to detach from the instance. + // The ID of the domain recordset entry. + Id *string `locationName:"id" type:"string"` + + // When true, specifies whether the domain entry is an alias used by the Lightsail + // load balancer. You can include an alias (A type) record in your request, + // which points to a load balancer DNS name and routes traffic to your load + // balancer + IsAlias *bool `locationName:"isAlias" type:"boolean"` + + // The name of the domain. + Name *string `locationName:"name" type:"string"` + + // (Deprecated) The options for the domain entry. // - // StaticIpName is a required field - StaticIpName *string `locationName:"staticIpName" type:"string" required:"true"` + // In releases prior to November 29, 2017, this parameter was not included in + // the API response. It is now deprecated. + // + // Deprecated: Options has been deprecated + Options map[string]*string `locationName:"options" deprecated:"true" type:"map"` + + // The target AWS name server (e.g., ns-111.awsdns-22.com.). + // + // For Lightsail load balancers, the value looks like ab1234c56789c6b86aba6fb203d443bc-123456789.us-east-2.elb.amazonaws.com. + // Be sure to also set isAlias to true when setting up an A record for a load + // balancer. + Target *string `locationName:"target" type:"string"` + + // The type of domain entry, such as address (A), canonical name (CNAME), mail + // exchanger (MX), name server (NS), start of authority (SOA), service locator + // (SRV), or text (TXT). + // + // The following domain entry types can be used: + // + // * A + // + // * CNAME + // + // * MX + // + // * NS + // + // * SOA + // + // * SRV + // + // * TXT + Type *string `locationName:"type" type:"string"` } // String returns the string representation -func (s DetachStaticIpInput) String() string { +func (s DomainEntry) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DetachStaticIpInput) GoString() string { +func (s DomainEntry) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DetachStaticIpInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DetachStaticIpInput"} - if s.StaticIpName == nil { - invalidParams.Add(request.NewErrParamRequired("StaticIpName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetStaticIpName sets the StaticIpName field's value. -func (s *DetachStaticIpInput) SetStaticIpName(v string) *DetachStaticIpInput { - s.StaticIpName = &v +// SetId sets the Id field's value. +func (s *DomainEntry) SetId(v string) *DomainEntry { + s.Id = &v return s } -type DetachStaticIpOutput struct { - _ struct{} `type:"structure"` +// SetIsAlias sets the IsAlias field's value. +func (s *DomainEntry) SetIsAlias(v bool) *DomainEntry { + s.IsAlias = &v + return s +} - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` +// SetName sets the Name field's value. +func (s *DomainEntry) SetName(v string) *DomainEntry { + s.Name = &v + return s } -// String returns the string representation -func (s DetachStaticIpOutput) String() string { - return awsutil.Prettify(s) +// SetOptions sets the Options field's value. +func (s *DomainEntry) SetOptions(v map[string]*string) *DomainEntry { + s.Options = v + return s } -// GoString returns the string representation -func (s DetachStaticIpOutput) GoString() string { - return s.String() +// SetTarget sets the Target field's value. +func (s *DomainEntry) SetTarget(v string) *DomainEntry { + s.Target = &v + return s } -// SetOperations sets the Operations field's value. -func (s *DetachStaticIpOutput) SetOperations(v []*Operation) *DetachStaticIpOutput { - s.Operations = v +// SetType sets the Type field's value. +func (s *DomainEntry) SetType(v string) *DomainEntry { + s.Type = &v return s } -type DisableAddOnInput struct { +// Describes the domain validation records of an Amazon Lightsail SSL/TLS certificate. +type DomainValidationRecord struct { _ struct{} `type:"structure"` - // The add-on type to disable. - // - // AddOnType is a required field - AddOnType *string `locationName:"addOnType" type:"string" required:"true" enum:"AddOnType"` + // The domain name of the certificate validation record. For example, example.com + // or www.example.com. + DomainName *string `locationName:"domainName" type:"string"` - // The name of the source resource for which to disable the add-on. - // - // ResourceName is a required field - ResourceName *string `locationName:"resourceName" type:"string" required:"true"` + // An object that describes the DNS records to add to your domain's DNS to validate + // it for the certificate. + ResourceRecord *ResourceRecord `locationName:"resourceRecord" type:"structure"` } // String returns the string representation -func (s DisableAddOnInput) String() string { +func (s DomainValidationRecord) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DisableAddOnInput) GoString() string { +func (s DomainValidationRecord) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DisableAddOnInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DisableAddOnInput"} - if s.AddOnType == nil { - invalidParams.Add(request.NewErrParamRequired("AddOnType")) - } - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAddOnType sets the AddOnType field's value. -func (s *DisableAddOnInput) SetAddOnType(v string) *DisableAddOnInput { - s.AddOnType = &v +// SetDomainName sets the DomainName field's value. +func (s *DomainValidationRecord) SetDomainName(v string) *DomainValidationRecord { + s.DomainName = &v return s } -// SetResourceName sets the ResourceName field's value. -func (s *DisableAddOnInput) SetResourceName(v string) *DisableAddOnInput { - s.ResourceName = &v +// SetResourceRecord sets the ResourceRecord field's value. +func (s *DomainValidationRecord) SetResourceRecord(v *ResourceRecord) *DomainValidationRecord { + s.ResourceRecord = v return s } -type DisableAddOnOutput struct { +type DownloadDefaultKeyPairInput struct { _ struct{} `type:"structure"` - - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DisableAddOnOutput) String() string { +func (s DownloadDefaultKeyPairInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DisableAddOnOutput) GoString() string { +func (s DownloadDefaultKeyPairInput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *DisableAddOnOutput) SetOperations(v []*Operation) *DisableAddOnOutput { - s.Operations = v - return s -} - -// Describes a system disk or a block storage disk. -type Disk struct { +type DownloadDefaultKeyPairOutput struct { _ struct{} `type:"structure"` - // An array of objects representing the add-ons enabled on the disk. - AddOns []*AddOn `locationName:"addOns" type:"list"` - - // The Amazon Resource Name (ARN) of the disk. - Arn *string `locationName:"arn" type:"string"` - - // The resources to which the disk is attached. - AttachedTo *string `locationName:"attachedTo" type:"string"` - - // (Deprecated) The attachment state of the disk. - // - // In releases prior to November 14, 2017, this parameter returned attached - // for system disks in the API response. It is now deprecated, but still included - // in the response. Use isAttached instead. - // - // Deprecated: AttachmentState has been deprecated - AttachmentState *string `locationName:"attachmentState" deprecated:"true" type:"string"` - - // The date when the disk was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - - // (Deprecated) The number of GB in use by the disk. - // - // In releases prior to November 14, 2017, this parameter was not included in - // the API response. It is now deprecated. - // - // Deprecated: GbInUse has been deprecated - GbInUse *int64 `locationName:"gbInUse" deprecated:"true" type:"integer"` - - // The input/output operations per second (IOPS) of the disk. - Iops *int64 `locationName:"iops" type:"integer"` - - // A Boolean value indicating whether the disk is attached. - IsAttached *bool `locationName:"isAttached" type:"boolean"` - - // A Boolean value indicating whether this disk is a system disk (has an operating - // system loaded on it). - IsSystemDisk *bool `locationName:"isSystemDisk" type:"boolean"` - - // The AWS Region and Availability Zone where the disk is located. - Location *ResourceLocation `locationName:"location" type:"structure"` - - // The unique name of the disk. - Name *string `locationName:"name" type:"string"` - - // The disk path. - Path *string `locationName:"path" type:"string"` - - // The Lightsail resource type (e.g., Disk). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - - // The size of the disk in GB. - SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` - - // Describes the status of the disk. - State *string `locationName:"state" type:"string" enum:"DiskState"` - - // The support code. Include this code in your email to support when you have - // questions about an instance or another resource in Lightsail. This code enables - // our support team to look up your Lightsail information more easily. - SupportCode *string `locationName:"supportCode" type:"string"` + // A base64-encoded RSA private key. + PrivateKeyBase64 *string `locationName:"privateKeyBase64" type:"string"` - // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). - Tags []*Tag `locationName:"tags" type:"list"` + // A base64-encoded public key of the ssh-rsa type. + PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string"` } // String returns the string representation -func (s Disk) String() string { +func (s DownloadDefaultKeyPairOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Disk) GoString() string { +func (s DownloadDefaultKeyPairOutput) GoString() string { return s.String() } -// SetAddOns sets the AddOns field's value. -func (s *Disk) SetAddOns(v []*AddOn) *Disk { - s.AddOns = v - return s -} - -// SetArn sets the Arn field's value. -func (s *Disk) SetArn(v string) *Disk { - s.Arn = &v - return s -} - -// SetAttachedTo sets the AttachedTo field's value. -func (s *Disk) SetAttachedTo(v string) *Disk { - s.AttachedTo = &v - return s -} - -// SetAttachmentState sets the AttachmentState field's value. -func (s *Disk) SetAttachmentState(v string) *Disk { - s.AttachmentState = &v +// SetPrivateKeyBase64 sets the PrivateKeyBase64 field's value. +func (s *DownloadDefaultKeyPairOutput) SetPrivateKeyBase64(v string) *DownloadDefaultKeyPairOutput { + s.PrivateKeyBase64 = &v return s } -// SetCreatedAt sets the CreatedAt field's value. -func (s *Disk) SetCreatedAt(v time.Time) *Disk { - s.CreatedAt = &v +// SetPublicKeyBase64 sets the PublicKeyBase64 field's value. +func (s *DownloadDefaultKeyPairOutput) SetPublicKeyBase64(v string) *DownloadDefaultKeyPairOutput { + s.PublicKeyBase64 = &v return s } -// SetGbInUse sets the GbInUse field's value. -func (s *Disk) SetGbInUse(v int64) *Disk { - s.GbInUse = &v - return s -} +type EnableAddOnInput struct { + _ struct{} `type:"structure"` -// SetIops sets the Iops field's value. -func (s *Disk) SetIops(v int64) *Disk { - s.Iops = &v - return s -} + // An array of strings representing the add-on to enable or modify. + // + // AddOnRequest is a required field + AddOnRequest *AddOnRequest `locationName:"addOnRequest" type:"structure" required:"true"` -// SetIsAttached sets the IsAttached field's value. -func (s *Disk) SetIsAttached(v bool) *Disk { - s.IsAttached = &v - return s + // The name of the source resource for which to enable or modify the add-on. + // + // ResourceName is a required field + ResourceName *string `locationName:"resourceName" type:"string" required:"true"` } -// SetIsSystemDisk sets the IsSystemDisk field's value. -func (s *Disk) SetIsSystemDisk(v bool) *Disk { - s.IsSystemDisk = &v - return s +// String returns the string representation +func (s EnableAddOnInput) String() string { + return awsutil.Prettify(s) } -// SetLocation sets the Location field's value. -func (s *Disk) SetLocation(v *ResourceLocation) *Disk { - s.Location = v - return s +// GoString returns the string representation +func (s EnableAddOnInput) GoString() string { + return s.String() } -// SetName sets the Name field's value. -func (s *Disk) SetName(v string) *Disk { - s.Name = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableAddOnInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableAddOnInput"} + if s.AddOnRequest == nil { + invalidParams.Add(request.NewErrParamRequired("AddOnRequest")) + } + if s.ResourceName == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceName")) + } + if s.AddOnRequest != nil { + if err := s.AddOnRequest.Validate(); err != nil { + invalidParams.AddNested("AddOnRequest", err.(request.ErrInvalidParams)) + } + } -// SetPath sets the Path field's value. -func (s *Disk) SetPath(v string) *Disk { - s.Path = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetResourceType sets the ResourceType field's value. -func (s *Disk) SetResourceType(v string) *Disk { - s.ResourceType = &v +// SetAddOnRequest sets the AddOnRequest field's value. +func (s *EnableAddOnInput) SetAddOnRequest(v *AddOnRequest) *EnableAddOnInput { + s.AddOnRequest = v return s } -// SetSizeInGb sets the SizeInGb field's value. -func (s *Disk) SetSizeInGb(v int64) *Disk { - s.SizeInGb = &v +// SetResourceName sets the ResourceName field's value. +func (s *EnableAddOnInput) SetResourceName(v string) *EnableAddOnInput { + s.ResourceName = &v return s } -// SetState sets the State field's value. -func (s *Disk) SetState(v string) *Disk { - s.State = &v - return s +type EnableAddOnOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } -// SetSupportCode sets the SupportCode field's value. -func (s *Disk) SetSupportCode(v string) *Disk { - s.SupportCode = &v - return s +// String returns the string representation +func (s EnableAddOnOutput) String() string { + return awsutil.Prettify(s) } -// SetTags sets the Tags field's value. -func (s *Disk) SetTags(v []*Tag) *Disk { - s.Tags = v +// GoString returns the string representation +func (s EnableAddOnOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *EnableAddOnOutput) SetOperations(v []*Operation) *EnableAddOnOutput { + s.Operations = v return s } -// Describes a disk. -type DiskInfo struct { +type ExportSnapshotInput struct { _ struct{} `type:"structure"` - // A Boolean value indicating whether this disk is a system disk (has an operating - // system loaded on it). - IsSystemDisk *bool `locationName:"isSystemDisk" type:"boolean"` - - // The disk name. - Name *string `locationName:"name" type:"string"` - - // The disk path. - Path *string `locationName:"path" type:"string"` - - // The size of the disk in GB (e.g., 32). - SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` + // The name of the instance or disk snapshot to be exported to Amazon EC2. + // + // SourceSnapshotName is a required field + SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string" required:"true"` } // String returns the string representation -func (s DiskInfo) String() string { +func (s ExportSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DiskInfo) GoString() string { +func (s ExportSnapshotInput) GoString() string { return s.String() } -// SetIsSystemDisk sets the IsSystemDisk field's value. -func (s *DiskInfo) SetIsSystemDisk(v bool) *DiskInfo { - s.IsSystemDisk = &v - return s -} - -// SetName sets the Name field's value. -func (s *DiskInfo) SetName(v string) *DiskInfo { - s.Name = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExportSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExportSnapshotInput"} + if s.SourceSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName")) + } -// SetPath sets the Path field's value. -func (s *DiskInfo) SetPath(v string) *DiskInfo { - s.Path = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSizeInGb sets the SizeInGb field's value. -func (s *DiskInfo) SetSizeInGb(v int64) *DiskInfo { - s.SizeInGb = &v +// SetSourceSnapshotName sets the SourceSnapshotName field's value. +func (s *ExportSnapshotInput) SetSourceSnapshotName(v string) *ExportSnapshotInput { + s.SourceSnapshotName = &v return s } -// Describes a block storage disk mapping. -type DiskMap struct { +type ExportSnapshotOutput struct { _ struct{} `type:"structure"` - // The new disk name (e.g., my-new-disk). - NewDiskName *string `locationName:"newDiskName" type:"string"` - - // The original disk path exposed to the instance (for example, /dev/sdh). - OriginalDiskPath *string `locationName:"originalDiskPath" type:"string"` + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` } // String returns the string representation -func (s DiskMap) String() string { +func (s ExportSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DiskMap) GoString() string { +func (s ExportSnapshotOutput) GoString() string { return s.String() } -// SetNewDiskName sets the NewDiskName field's value. -func (s *DiskMap) SetNewDiskName(v string) *DiskMap { - s.NewDiskName = &v - return s -} - -// SetOriginalDiskPath sets the OriginalDiskPath field's value. -func (s *DiskMap) SetOriginalDiskPath(v string) *DiskMap { - s.OriginalDiskPath = &v +// SetOperations sets the Operations field's value. +func (s *ExportSnapshotOutput) SetOperations(v []*Operation) *ExportSnapshotOutput { + s.Operations = v return s } -// Describes a block storage disk snapshot. -type DiskSnapshot struct { +// Describes an export snapshot record. +type ExportSnapshotRecord struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the disk snapshot. + // The Amazon Resource Name (ARN) of the export snapshot record. Arn *string `locationName:"arn" type:"string"` - // The date when the disk snapshot was created. + // The date when the export snapshot record was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - // The Amazon Resource Name (ARN) of the source disk from which the disk snapshot - // was created. - FromDiskArn *string `locationName:"fromDiskArn" type:"string"` - - // The unique name of the source disk from which the disk snapshot was created. - FromDiskName *string `locationName:"fromDiskName" type:"string"` - - // The Amazon Resource Name (ARN) of the source instance from which the disk - // (system volume) snapshot was created. - FromInstanceArn *string `locationName:"fromInstanceArn" type:"string"` - - // The unique name of the source instance from which the disk (system volume) - // snapshot was created. - FromInstanceName *string `locationName:"fromInstanceName" type:"string"` - - // A Boolean value indicating whether the snapshot was created from an automatic - // snapshot. - IsFromAutoSnapshot *bool `locationName:"isFromAutoSnapshot" type:"boolean"` + // A list of objects describing the destination of the export snapshot record. + DestinationInfo *DestinationInfo `locationName:"destinationInfo" type:"structure"` - // The AWS Region and Availability Zone where the disk snapshot was created. + // The AWS Region and Availability Zone where the export snapshot record is + // located. Location *ResourceLocation `locationName:"location" type:"structure"` - // The name of the disk snapshot (e.g., my-disk-snapshot). + // The export snapshot record name. Name *string `locationName:"name" type:"string"` - // The progress of the disk snapshot operation. - Progress *string `locationName:"progress" type:"string"` - - // The Lightsail resource type (e.g., DiskSnapshot). + // The Lightsail resource type (e.g., ExportSnapshotRecord). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - // The size of the disk in GB. - SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` - - // The status of the disk snapshot operation. - State *string `locationName:"state" type:"string" enum:"DiskSnapshotState"` - - // The support code. Include this code in your email to support when you have - // questions about an instance or another resource in Lightsail. This code enables - // our support team to look up your Lightsail information more easily. - SupportCode *string `locationName:"supportCode" type:"string"` + // A list of objects describing the source of the export snapshot record. + SourceInfo *ExportSnapshotRecordSourceInfo `locationName:"sourceInfo" type:"structure"` - // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). - Tags []*Tag `locationName:"tags" type:"list"` + // The state of the export snapshot record. + State *string `locationName:"state" type:"string" enum:"RecordState"` } // String returns the string representation -func (s DiskSnapshot) String() string { +func (s ExportSnapshotRecord) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DiskSnapshot) GoString() string { +func (s ExportSnapshotRecord) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *DiskSnapshot) SetArn(v string) *DiskSnapshot { +func (s *ExportSnapshotRecord) SetArn(v string) *ExportSnapshotRecord { s.Arn = &v return s } // SetCreatedAt sets the CreatedAt field's value. -func (s *DiskSnapshot) SetCreatedAt(v time.Time) *DiskSnapshot { +func (s *ExportSnapshotRecord) SetCreatedAt(v time.Time) *ExportSnapshotRecord { s.CreatedAt = &v return s } -// SetFromDiskArn sets the FromDiskArn field's value. -func (s *DiskSnapshot) SetFromDiskArn(v string) *DiskSnapshot { - s.FromDiskArn = &v - return s -} - -// SetFromDiskName sets the FromDiskName field's value. -func (s *DiskSnapshot) SetFromDiskName(v string) *DiskSnapshot { - s.FromDiskName = &v - return s -} - -// SetFromInstanceArn sets the FromInstanceArn field's value. -func (s *DiskSnapshot) SetFromInstanceArn(v string) *DiskSnapshot { - s.FromInstanceArn = &v - return s -} - -// SetFromInstanceName sets the FromInstanceName field's value. -func (s *DiskSnapshot) SetFromInstanceName(v string) *DiskSnapshot { - s.FromInstanceName = &v - return s -} - -// SetIsFromAutoSnapshot sets the IsFromAutoSnapshot field's value. -func (s *DiskSnapshot) SetIsFromAutoSnapshot(v bool) *DiskSnapshot { - s.IsFromAutoSnapshot = &v +// SetDestinationInfo sets the DestinationInfo field's value. +func (s *ExportSnapshotRecord) SetDestinationInfo(v *DestinationInfo) *ExportSnapshotRecord { + s.DestinationInfo = v return s } // SetLocation sets the Location field's value. -func (s *DiskSnapshot) SetLocation(v *ResourceLocation) *DiskSnapshot { +func (s *ExportSnapshotRecord) SetLocation(v *ResourceLocation) *ExportSnapshotRecord { s.Location = v return s } -// SetName sets the Name field's value. -func (s *DiskSnapshot) SetName(v string) *DiskSnapshot { - s.Name = &v - return s -} - -// SetProgress sets the Progress field's value. -func (s *DiskSnapshot) SetProgress(v string) *DiskSnapshot { - s.Progress = &v - return s -} - -// SetResourceType sets the ResourceType field's value. -func (s *DiskSnapshot) SetResourceType(v string) *DiskSnapshot { - s.ResourceType = &v - return s -} - -// SetSizeInGb sets the SizeInGb field's value. -func (s *DiskSnapshot) SetSizeInGb(v int64) *DiskSnapshot { - s.SizeInGb = &v - return s -} - -// SetState sets the State field's value. -func (s *DiskSnapshot) SetState(v string) *DiskSnapshot { - s.State = &v - return s -} - -// SetSupportCode sets the SupportCode field's value. -func (s *DiskSnapshot) SetSupportCode(v string) *DiskSnapshot { - s.SupportCode = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *DiskSnapshot) SetTags(v []*Tag) *DiskSnapshot { - s.Tags = v - return s -} - -// Describes a disk snapshot. -type DiskSnapshotInfo struct { - _ struct{} `type:"structure"` - - // The size of the disk in GB (e.g., 32). - SizeInGb *int64 `locationName:"sizeInGb" type:"integer"` +// SetName sets the Name field's value. +func (s *ExportSnapshotRecord) SetName(v string) *ExportSnapshotRecord { + s.Name = &v + return s } -// String returns the string representation -func (s DiskSnapshotInfo) String() string { - return awsutil.Prettify(s) +// SetResourceType sets the ResourceType field's value. +func (s *ExportSnapshotRecord) SetResourceType(v string) *ExportSnapshotRecord { + s.ResourceType = &v + return s } -// GoString returns the string representation -func (s DiskSnapshotInfo) GoString() string { - return s.String() +// SetSourceInfo sets the SourceInfo field's value. +func (s *ExportSnapshotRecord) SetSourceInfo(v *ExportSnapshotRecordSourceInfo) *ExportSnapshotRecord { + s.SourceInfo = v + return s } -// SetSizeInGb sets the SizeInGb field's value. -func (s *DiskSnapshotInfo) SetSizeInGb(v int64) *DiskSnapshotInfo { - s.SizeInGb = &v +// SetState sets the State field's value. +func (s *ExportSnapshotRecord) SetState(v string) *ExportSnapshotRecord { + s.State = &v return s } -// Describes a domain where you are storing recordsets in Lightsail. -type Domain struct { +// Describes the source of an export snapshot record. +type ExportSnapshotRecordSourceInfo struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the domain recordset (e.g., arn:aws:lightsail:global:123456789101:Domain/824cede0-abc7-4f84-8dbc-12345EXAMPLE). + // The Amazon Resource Name (ARN) of the source instance or disk snapshot. Arn *string `locationName:"arn" type:"string"` - // The date when the domain recordset was created. + // The date when the source instance or disk snapshot was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - // An array of key-value pairs containing information about the domain entries. - DomainEntries []*DomainEntry `locationName:"domainEntries" type:"list"` + // A list of objects describing a disk snapshot. + DiskSnapshotInfo *DiskSnapshotInfo `locationName:"diskSnapshotInfo" type:"structure"` - // The AWS Region and Availability Zones where the domain recordset was created. - Location *ResourceLocation `locationName:"location" type:"structure"` + // The Amazon Resource Name (ARN) of the snapshot's source instance or disk. + FromResourceArn *string `locationName:"fromResourceArn" type:"string"` - // The name of the domain. - Name *string `locationName:"name" type:"string"` + // The name of the snapshot's source instance or disk. + FromResourceName *string `locationName:"fromResourceName" type:"string"` - // The resource type. - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + // A list of objects describing an instance snapshot. + InstanceSnapshotInfo *InstanceSnapshotInfo `locationName:"instanceSnapshotInfo" type:"structure"` - // The support code. Include this code in your email to support when you have - // questions about an instance or another resource in Lightsail. This code enables - // our support team to look up your Lightsail information more easily. - SupportCode *string `locationName:"supportCode" type:"string"` + // The name of the source instance or disk snapshot. + Name *string `locationName:"name" type:"string"` - // The tag keys and optional values for the resource. For more information about - // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). - Tags []*Tag `locationName:"tags" type:"list"` + // The Lightsail resource type (e.g., InstanceSnapshot or DiskSnapshot). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ExportSnapshotRecordSourceType"` } // String returns the string representation -func (s Domain) String() string { +func (s ExportSnapshotRecordSourceInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Domain) GoString() string { +func (s ExportSnapshotRecordSourceInfo) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *Domain) SetArn(v string) *Domain { +func (s *ExportSnapshotRecordSourceInfo) SetArn(v string) *ExportSnapshotRecordSourceInfo { s.Arn = &v return s } // SetCreatedAt sets the CreatedAt field's value. -func (s *Domain) SetCreatedAt(v time.Time) *Domain { +func (s *ExportSnapshotRecordSourceInfo) SetCreatedAt(v time.Time) *ExportSnapshotRecordSourceInfo { s.CreatedAt = &v return s } -// SetDomainEntries sets the DomainEntries field's value. -func (s *Domain) SetDomainEntries(v []*DomainEntry) *Domain { - s.DomainEntries = v +// SetDiskSnapshotInfo sets the DiskSnapshotInfo field's value. +func (s *ExportSnapshotRecordSourceInfo) SetDiskSnapshotInfo(v *DiskSnapshotInfo) *ExportSnapshotRecordSourceInfo { + s.DiskSnapshotInfo = v return s } -// SetLocation sets the Location field's value. -func (s *Domain) SetLocation(v *ResourceLocation) *Domain { - s.Location = v +// SetFromResourceArn sets the FromResourceArn field's value. +func (s *ExportSnapshotRecordSourceInfo) SetFromResourceArn(v string) *ExportSnapshotRecordSourceInfo { + s.FromResourceArn = &v return s } -// SetName sets the Name field's value. -func (s *Domain) SetName(v string) *Domain { - s.Name = &v +// SetFromResourceName sets the FromResourceName field's value. +func (s *ExportSnapshotRecordSourceInfo) SetFromResourceName(v string) *ExportSnapshotRecordSourceInfo { + s.FromResourceName = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *Domain) SetResourceType(v string) *Domain { - s.ResourceType = &v +// SetInstanceSnapshotInfo sets the InstanceSnapshotInfo field's value. +func (s *ExportSnapshotRecordSourceInfo) SetInstanceSnapshotInfo(v *InstanceSnapshotInfo) *ExportSnapshotRecordSourceInfo { + s.InstanceSnapshotInfo = v return s } -// SetSupportCode sets the SupportCode field's value. -func (s *Domain) SetSupportCode(v string) *Domain { - s.SupportCode = &v +// SetName sets the Name field's value. +func (s *ExportSnapshotRecordSourceInfo) SetName(v string) *ExportSnapshotRecordSourceInfo { + s.Name = &v return s } -// SetTags sets the Tags field's value. -func (s *Domain) SetTags(v []*Tag) *Domain { - s.Tags = v +// SetResourceType sets the ResourceType field's value. +func (s *ExportSnapshotRecordSourceInfo) SetResourceType(v string) *ExportSnapshotRecordSourceInfo { + s.ResourceType = &v return s } -// Describes a domain recordset entry. -type DomainEntry struct { +type GetActiveNamesInput struct { _ struct{} `type:"structure"` - // The ID of the domain recordset entry. - Id *string `locationName:"id" type:"string"` - - // When true, specifies whether the domain entry is an alias used by the Lightsail - // load balancer. You can include an alias (A type) record in your request, - // which points to a load balancer DNS name and routes traffic to your load - // balancer - IsAlias *bool `locationName:"isAlias" type:"boolean"` - - // The name of the domain. - Name *string `locationName:"name" type:"string"` - - // (Deprecated) The options for the domain entry. - // - // In releases prior to November 29, 2017, this parameter was not included in - // the API response. It is now deprecated. - // - // Deprecated: Options has been deprecated - Options map[string]*string `locationName:"options" deprecated:"true" type:"map"` - - // The target AWS name server (e.g., ns-111.awsdns-22.com.). - // - // For Lightsail load balancers, the value looks like ab1234c56789c6b86aba6fb203d443bc-123456789.us-east-2.elb.amazonaws.com. - // Be sure to also set isAlias to true when setting up an A record for a load - // balancer. - Target *string `locationName:"target" type:"string"` - - // The type of domain entry, such as address (A), canonical name (CNAME), mail - // exchanger (MX), name server (NS), start of authority (SOA), service locator - // (SRV), or text (TXT). - // - // The following domain entry types can be used: - // - // * A - // - // * CNAME - // - // * MX - // - // * NS - // - // * SOA - // - // * SRV + // The token to advance to the next page of results from your request. // - // * TXT - Type *string `locationName:"type" type:"string"` + // To get a page token, perform an initial GetActiveNames request. If your results + // are paginated, the response will return a next page token that you can specify + // as the page token in a subsequent request. + PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s DomainEntry) String() string { +func (s GetActiveNamesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DomainEntry) GoString() string { +func (s GetActiveNamesInput) GoString() string { return s.String() } -// SetId sets the Id field's value. -func (s *DomainEntry) SetId(v string) *DomainEntry { - s.Id = &v +// SetPageToken sets the PageToken field's value. +func (s *GetActiveNamesInput) SetPageToken(v string) *GetActiveNamesInput { + s.PageToken = &v return s } -// SetIsAlias sets the IsAlias field's value. -func (s *DomainEntry) SetIsAlias(v bool) *DomainEntry { - s.IsAlias = &v - return s +type GetActiveNamesOutput struct { + _ struct{} `type:"structure"` + + // The list of active names returned by the get active names request. + ActiveNames []*string `locationName:"activeNames" type:"list"` + + // The token to advance to the next page of resutls from your request. + // + // A next page token is not returned if there are no more results to display. + // + // To get the next page of results, perform another GetActiveNames request and + // specify the next page token using the pageToken parameter. + NextPageToken *string `locationName:"nextPageToken" type:"string"` } -// SetName sets the Name field's value. -func (s *DomainEntry) SetName(v string) *DomainEntry { - s.Name = &v - return s +// String returns the string representation +func (s GetActiveNamesOutput) String() string { + return awsutil.Prettify(s) } -// SetOptions sets the Options field's value. -func (s *DomainEntry) SetOptions(v map[string]*string) *DomainEntry { - s.Options = v - return s +// GoString returns the string representation +func (s GetActiveNamesOutput) GoString() string { + return s.String() } -// SetTarget sets the Target field's value. -func (s *DomainEntry) SetTarget(v string) *DomainEntry { - s.Target = &v +// SetActiveNames sets the ActiveNames field's value. +func (s *GetActiveNamesOutput) SetActiveNames(v []*string) *GetActiveNamesOutput { + s.ActiveNames = v return s } -// SetType sets the Type field's value. -func (s *DomainEntry) SetType(v string) *DomainEntry { - s.Type = &v +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetActiveNamesOutput) SetNextPageToken(v string) *GetActiveNamesOutput { + s.NextPageToken = &v return s } -type DownloadDefaultKeyPairInput struct { +type GetAlarmsInput struct { _ struct{} `type:"structure"` + + // The name of the alarm. + // + // Specify an alarm name to return information about a specific alarm. + AlarmName *string `locationName:"alarmName" type:"string"` + + // The name of the Lightsail resource being monitored by the alarm. + // + // Specify a monitored resource name to return information about all alarms + // for a specific resource. + MonitoredResourceName *string `locationName:"monitoredResourceName" type:"string"` + + // The token to advance to the next page of results from your request. + // + // To get a page token, perform an initial GetAlarms request. If your results + // are paginated, the response will return a next page token that you can specify + // as the page token in a subsequent request. + PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s DownloadDefaultKeyPairInput) String() string { +func (s GetAlarmsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DownloadDefaultKeyPairInput) GoString() string { +func (s GetAlarmsInput) GoString() string { return s.String() } -type DownloadDefaultKeyPairOutput struct { +// SetAlarmName sets the AlarmName field's value. +func (s *GetAlarmsInput) SetAlarmName(v string) *GetAlarmsInput { + s.AlarmName = &v + return s +} + +// SetMonitoredResourceName sets the MonitoredResourceName field's value. +func (s *GetAlarmsInput) SetMonitoredResourceName(v string) *GetAlarmsInput { + s.MonitoredResourceName = &v + return s +} + +// SetPageToken sets the PageToken field's value. +func (s *GetAlarmsInput) SetPageToken(v string) *GetAlarmsInput { + s.PageToken = &v + return s +} + +type GetAlarmsOutput struct { _ struct{} `type:"structure"` - // A base64-encoded RSA private key. - PrivateKeyBase64 *string `locationName:"privateKeyBase64" type:"string"` + // An array of objects that describe the alarms. + Alarms []*Alarm `locationName:"alarms" type:"list"` - // A base64-encoded public key of the ssh-rsa type. - PublicKeyBase64 *string `locationName:"publicKeyBase64" type:"string"` + // The token to advance to the next page of resutls from your request. + // + // A next page token is not returned if there are no more results to display. + // + // To get the next page of results, perform another GetAlarms request and specify + // the next page token using the pageToken parameter. + NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation -func (s DownloadDefaultKeyPairOutput) String() string { +func (s GetAlarmsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DownloadDefaultKeyPairOutput) GoString() string { +func (s GetAlarmsOutput) GoString() string { return s.String() } -// SetPrivateKeyBase64 sets the PrivateKeyBase64 field's value. -func (s *DownloadDefaultKeyPairOutput) SetPrivateKeyBase64(v string) *DownloadDefaultKeyPairOutput { - s.PrivateKeyBase64 = &v +// SetAlarms sets the Alarms field's value. +func (s *GetAlarmsOutput) SetAlarms(v []*Alarm) *GetAlarmsOutput { + s.Alarms = v return s } -// SetPublicKeyBase64 sets the PublicKeyBase64 field's value. -func (s *DownloadDefaultKeyPairOutput) SetPublicKeyBase64(v string) *DownloadDefaultKeyPairOutput { - s.PublicKeyBase64 = &v +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetAlarmsOutput) SetNextPageToken(v string) *GetAlarmsOutput { + s.NextPageToken = &v return s } -type EnableAddOnInput struct { +type GetAutoSnapshotsInput struct { _ struct{} `type:"structure"` - // An array of strings representing the add-on to enable or modify. - // - // AddOnRequest is a required field - AddOnRequest *AddOnRequest `locationName:"addOnRequest" type:"structure" required:"true"` - - // The name of the source resource for which to enable or modify the add-on. + // The name of the source instance or disk from which to get automatic snapshot + // information. // // ResourceName is a required field ResourceName *string `locationName:"resourceName" type:"string" required:"true"` } // String returns the string representation -func (s EnableAddOnInput) String() string { +func (s GetAutoSnapshotsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnableAddOnInput) GoString() string { +func (s GetAutoSnapshotsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *EnableAddOnInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "EnableAddOnInput"} - if s.AddOnRequest == nil { - invalidParams.Add(request.NewErrParamRequired("AddOnRequest")) - } +func (s *GetAutoSnapshotsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAutoSnapshotsInput"} if s.ResourceName == nil { invalidParams.Add(request.NewErrParamRequired("ResourceName")) } - if s.AddOnRequest != nil { - if err := s.AddOnRequest.Validate(); err != nil { - invalidParams.AddNested("AddOnRequest", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -18219,457 +21394,416 @@ func (s *EnableAddOnInput) Validate() error { return nil } -// SetAddOnRequest sets the AddOnRequest field's value. -func (s *EnableAddOnInput) SetAddOnRequest(v *AddOnRequest) *EnableAddOnInput { - s.AddOnRequest = v - return s -} - // SetResourceName sets the ResourceName field's value. -func (s *EnableAddOnInput) SetResourceName(v string) *EnableAddOnInput { +func (s *GetAutoSnapshotsInput) SetResourceName(v string) *GetAutoSnapshotsInput { s.ResourceName = &v return s } -type EnableAddOnOutput struct { +type GetAutoSnapshotsOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + // An array of objects that describe the automatic snapshots that are available + // for the specified source instance or disk. + AutoSnapshots []*AutoSnapshotDetails `locationName:"autoSnapshots" type:"list"` + + // The name of the source instance or disk for the automatic snapshots. + ResourceName *string `locationName:"resourceName" type:"string"` + + // The resource type (e.g., Instance or Disk). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` } // String returns the string representation -func (s EnableAddOnOutput) String() string { +func (s GetAutoSnapshotsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s EnableAddOnOutput) GoString() string { +func (s GetAutoSnapshotsOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *EnableAddOnOutput) SetOperations(v []*Operation) *EnableAddOnOutput { - s.Operations = v +// SetAutoSnapshots sets the AutoSnapshots field's value. +func (s *GetAutoSnapshotsOutput) SetAutoSnapshots(v []*AutoSnapshotDetails) *GetAutoSnapshotsOutput { + s.AutoSnapshots = v return s } -type ExportSnapshotInput struct { +// SetResourceName sets the ResourceName field's value. +func (s *GetAutoSnapshotsOutput) SetResourceName(v string) *GetAutoSnapshotsOutput { + s.ResourceName = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *GetAutoSnapshotsOutput) SetResourceType(v string) *GetAutoSnapshotsOutput { + s.ResourceType = &v + return s +} + +type GetBlueprintsInput struct { _ struct{} `type:"structure"` - // The name of the instance or disk snapshot to be exported to Amazon EC2. + // A Boolean value indicating whether to include inactive results in your request. + IncludeInactive *bool `locationName:"includeInactive" type:"boolean"` + + // The token to advance to the next page of results from your request. // - // SourceSnapshotName is a required field - SourceSnapshotName *string `locationName:"sourceSnapshotName" type:"string" required:"true"` + // To get a page token, perform an initial GetBlueprints request. If your results + // are paginated, the response will return a next page token that you can specify + // as the page token in a subsequent request. + PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s ExportSnapshotInput) String() string { +func (s GetBlueprintsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportSnapshotInput) GoString() string { +func (s GetBlueprintsInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ExportSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ExportSnapshotInput"} - if s.SourceSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetIncludeInactive sets the IncludeInactive field's value. +func (s *GetBlueprintsInput) SetIncludeInactive(v bool) *GetBlueprintsInput { + s.IncludeInactive = &v + return s } -// SetSourceSnapshotName sets the SourceSnapshotName field's value. -func (s *ExportSnapshotInput) SetSourceSnapshotName(v string) *ExportSnapshotInput { - s.SourceSnapshotName = &v +// SetPageToken sets the PageToken field's value. +func (s *GetBlueprintsInput) SetPageToken(v string) *GetBlueprintsInput { + s.PageToken = &v return s } -type ExportSnapshotOutput struct { +type GetBlueprintsOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected - // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + // An array of key-value pairs that contains information about the available + // blueprints. + Blueprints []*Blueprint `locationName:"blueprints" type:"list"` + + // The token to advance to the next page of resutls from your request. + // + // A next page token is not returned if there are no more results to display. + // + // To get the next page of results, perform another GetBlueprints request and + // specify the next page token using the pageToken parameter. + NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation -func (s ExportSnapshotOutput) String() string { +func (s GetBlueprintsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportSnapshotOutput) GoString() string { +func (s GetBlueprintsOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *ExportSnapshotOutput) SetOperations(v []*Operation) *ExportSnapshotOutput { - s.Operations = v +// SetBlueprints sets the Blueprints field's value. +func (s *GetBlueprintsOutput) SetBlueprints(v []*Blueprint) *GetBlueprintsOutput { + s.Blueprints = v return s } -// Describes an export snapshot record. -type ExportSnapshotRecord struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the export snapshot record. - Arn *string `locationName:"arn" type:"string"` - - // The date when the export snapshot record was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - - // A list of objects describing the destination of the export snapshot record. - DestinationInfo *DestinationInfo `locationName:"destinationInfo" type:"structure"` - - // The AWS Region and Availability Zone where the export snapshot record is - // located. - Location *ResourceLocation `locationName:"location" type:"structure"` - - // The export snapshot record name. - Name *string `locationName:"name" type:"string"` +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetBlueprintsOutput) SetNextPageToken(v string) *GetBlueprintsOutput { + s.NextPageToken = &v + return s +} - // The Lightsail resource type (e.g., ExportSnapshotRecord). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +type GetBundlesInput struct { + _ struct{} `type:"structure"` - // A list of objects describing the source of the export snapshot record. - SourceInfo *ExportSnapshotRecordSourceInfo `locationName:"sourceInfo" type:"structure"` + // A Boolean value that indicates whether to include inactive bundle results + // in your request. + IncludeInactive *bool `locationName:"includeInactive" type:"boolean"` - // The state of the export snapshot record. - State *string `locationName:"state" type:"string" enum:"RecordState"` + // The token to advance to the next page of results from your request. + // + // To get a page token, perform an initial GetBundles request. If your results + // are paginated, the response will return a next page token that you can specify + // as the page token in a subsequent request. + PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s ExportSnapshotRecord) String() string { +func (s GetBundlesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportSnapshotRecord) GoString() string { +func (s GetBundlesInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *ExportSnapshotRecord) SetArn(v string) *ExportSnapshotRecord { - s.Arn = &v +// SetIncludeInactive sets the IncludeInactive field's value. +func (s *GetBundlesInput) SetIncludeInactive(v bool) *GetBundlesInput { + s.IncludeInactive = &v return s } -// SetCreatedAt sets the CreatedAt field's value. -func (s *ExportSnapshotRecord) SetCreatedAt(v time.Time) *ExportSnapshotRecord { - s.CreatedAt = &v +// SetPageToken sets the PageToken field's value. +func (s *GetBundlesInput) SetPageToken(v string) *GetBundlesInput { + s.PageToken = &v return s } -// SetDestinationInfo sets the DestinationInfo field's value. -func (s *ExportSnapshotRecord) SetDestinationInfo(v *DestinationInfo) *ExportSnapshotRecord { - s.DestinationInfo = v - return s -} +type GetBundlesOutput struct { + _ struct{} `type:"structure"` -// SetLocation sets the Location field's value. -func (s *ExportSnapshotRecord) SetLocation(v *ResourceLocation) *ExportSnapshotRecord { - s.Location = v - return s + // An array of key-value pairs that contains information about the available + // bundles. + Bundles []*Bundle `locationName:"bundles" type:"list"` + + // The token to advance to the next page of resutls from your request. + // + // A next page token is not returned if there are no more results to display. + // + // To get the next page of results, perform another GetBundles request and specify + // the next page token using the pageToken parameter. + NextPageToken *string `locationName:"nextPageToken" type:"string"` } -// SetName sets the Name field's value. -func (s *ExportSnapshotRecord) SetName(v string) *ExportSnapshotRecord { - s.Name = &v - return s +// String returns the string representation +func (s GetBundlesOutput) String() string { + return awsutil.Prettify(s) } -// SetResourceType sets the ResourceType field's value. -func (s *ExportSnapshotRecord) SetResourceType(v string) *ExportSnapshotRecord { - s.ResourceType = &v - return s +// GoString returns the string representation +func (s GetBundlesOutput) GoString() string { + return s.String() } -// SetSourceInfo sets the SourceInfo field's value. -func (s *ExportSnapshotRecord) SetSourceInfo(v *ExportSnapshotRecordSourceInfo) *ExportSnapshotRecord { - s.SourceInfo = v +// SetBundles sets the Bundles field's value. +func (s *GetBundlesOutput) SetBundles(v []*Bundle) *GetBundlesOutput { + s.Bundles = v return s } -// SetState sets the State field's value. -func (s *ExportSnapshotRecord) SetState(v string) *ExportSnapshotRecord { - s.State = &v +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetBundlesOutput) SetNextPageToken(v string) *GetBundlesOutput { + s.NextPageToken = &v return s } -// Describes the source of an export snapshot record. -type ExportSnapshotRecordSourceInfo struct { +type GetCertificatesInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the source instance or disk snapshot. - Arn *string `locationName:"arn" type:"string"` - - // The date when the source instance or disk snapshot was created. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` - - // A list of objects describing a disk snapshot. - DiskSnapshotInfo *DiskSnapshotInfo `locationName:"diskSnapshotInfo" type:"structure"` - - // The Amazon Resource Name (ARN) of the snapshot's source instance or disk. - FromResourceArn *string `locationName:"fromResourceArn" type:"string"` - - // The name of the snapshot's source instance or disk. - FromResourceName *string `locationName:"fromResourceName" type:"string"` - - // A list of objects describing an instance snapshot. - InstanceSnapshotInfo *InstanceSnapshotInfo `locationName:"instanceSnapshotInfo" type:"structure"` + // The name for the certificate for which to return information. + // + // When omitted, the response includes all of your certificates in the AWS region + // where the request is made. + CertificateName *string `locationName:"certificateName" type:"string"` - // The name of the source instance or disk snapshot. - Name *string `locationName:"name" type:"string"` + // The status of the certificates for which to return information. + // + // For example, specify ISSUED to return only certificates with an ISSUED status. + // + // When omitted, the response includes all of your certificates in the AWS region + // where the request is made, regardless of their current status. + CertificateStatuses []*string `locationName:"certificateStatuses" type:"list"` - // The Lightsail resource type (e.g., InstanceSnapshot or DiskSnapshot). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ExportSnapshotRecordSourceType"` + // Indicates whether to include detailed information about the certificates + // in the response. + // + // When omitted, the response includes only the certificate names, Amazon Resource + // Names (ARNs), domain names, and tags. + IncludeCertificateDetails *bool `locationName:"includeCertificateDetails" type:"boolean"` } // String returns the string representation -func (s ExportSnapshotRecordSourceInfo) String() string { +func (s GetCertificatesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ExportSnapshotRecordSourceInfo) GoString() string { +func (s GetCertificatesInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *ExportSnapshotRecordSourceInfo) SetArn(v string) *ExportSnapshotRecordSourceInfo { - s.Arn = &v +// SetCertificateName sets the CertificateName field's value. +func (s *GetCertificatesInput) SetCertificateName(v string) *GetCertificatesInput { + s.CertificateName = &v return s } -// SetCreatedAt sets the CreatedAt field's value. -func (s *ExportSnapshotRecordSourceInfo) SetCreatedAt(v time.Time) *ExportSnapshotRecordSourceInfo { - s.CreatedAt = &v +// SetCertificateStatuses sets the CertificateStatuses field's value. +func (s *GetCertificatesInput) SetCertificateStatuses(v []*string) *GetCertificatesInput { + s.CertificateStatuses = v return s } -// SetDiskSnapshotInfo sets the DiskSnapshotInfo field's value. -func (s *ExportSnapshotRecordSourceInfo) SetDiskSnapshotInfo(v *DiskSnapshotInfo) *ExportSnapshotRecordSourceInfo { - s.DiskSnapshotInfo = v +// SetIncludeCertificateDetails sets the IncludeCertificateDetails field's value. +func (s *GetCertificatesInput) SetIncludeCertificateDetails(v bool) *GetCertificatesInput { + s.IncludeCertificateDetails = &v return s } -// SetFromResourceArn sets the FromResourceArn field's value. -func (s *ExportSnapshotRecordSourceInfo) SetFromResourceArn(v string) *ExportSnapshotRecordSourceInfo { - s.FromResourceArn = &v - return s -} +type GetCertificatesOutput struct { + _ struct{} `type:"structure"` -// SetFromResourceName sets the FromResourceName field's value. -func (s *ExportSnapshotRecordSourceInfo) SetFromResourceName(v string) *ExportSnapshotRecordSourceInfo { - s.FromResourceName = &v - return s + // An object that describes certificates. + Certificates []*CertificateSummary `locationName:"certificates" type:"list"` } -// SetInstanceSnapshotInfo sets the InstanceSnapshotInfo field's value. -func (s *ExportSnapshotRecordSourceInfo) SetInstanceSnapshotInfo(v *InstanceSnapshotInfo) *ExportSnapshotRecordSourceInfo { - s.InstanceSnapshotInfo = v - return s +// String returns the string representation +func (s GetCertificatesOutput) String() string { + return awsutil.Prettify(s) } -// SetName sets the Name field's value. -func (s *ExportSnapshotRecordSourceInfo) SetName(v string) *ExportSnapshotRecordSourceInfo { - s.Name = &v - return s +// GoString returns the string representation +func (s GetCertificatesOutput) GoString() string { + return s.String() } -// SetResourceType sets the ResourceType field's value. -func (s *ExportSnapshotRecordSourceInfo) SetResourceType(v string) *ExportSnapshotRecordSourceInfo { - s.ResourceType = &v +// SetCertificates sets the Certificates field's value. +func (s *GetCertificatesOutput) SetCertificates(v []*CertificateSummary) *GetCertificatesOutput { + s.Certificates = v return s } -type GetActiveNamesInput struct { +type GetCloudFormationStackRecordsInput struct { _ struct{} `type:"structure"` // The token to advance to the next page of results from your request. // - // To get a page token, perform an initial GetActiveNames request. If your results - // are paginated, the response will return a next page token that you can specify - // as the page token in a subsequent request. + // To get a page token, perform an initial GetClouFormationStackRecords request. + // If your results are paginated, the response will return a next page token + // that you can specify as the page token in a subsequent request. PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s GetActiveNamesInput) String() string { +func (s GetCloudFormationStackRecordsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetActiveNamesInput) GoString() string { +func (s GetCloudFormationStackRecordsInput) GoString() string { return s.String() } // SetPageToken sets the PageToken field's value. -func (s *GetActiveNamesInput) SetPageToken(v string) *GetActiveNamesInput { +func (s *GetCloudFormationStackRecordsInput) SetPageToken(v string) *GetCloudFormationStackRecordsInput { s.PageToken = &v return s } -type GetActiveNamesOutput struct { +type GetCloudFormationStackRecordsOutput struct { _ struct{} `type:"structure"` - // The list of active names returned by the get active names request. - ActiveNames []*string `locationName:"activeNames" type:"list"` + // A list of objects describing the CloudFormation stack records. + CloudFormationStackRecords []*CloudFormationStackRecord `locationName:"cloudFormationStackRecords" type:"list"` // The token to advance to the next page of resutls from your request. // // A next page token is not returned if there are no more results to display. // - // To get the next page of results, perform another GetActiveNames request and - // specify the next page token using the pageToken parameter. + // To get the next page of results, perform another GetCloudFormationStackRecords + // request and specify the next page token using the pageToken parameter. NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation -func (s GetActiveNamesOutput) String() string { +func (s GetCloudFormationStackRecordsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetActiveNamesOutput) GoString() string { +func (s GetCloudFormationStackRecordsOutput) GoString() string { return s.String() } -// SetActiveNames sets the ActiveNames field's value. -func (s *GetActiveNamesOutput) SetActiveNames(v []*string) *GetActiveNamesOutput { - s.ActiveNames = v +// SetCloudFormationStackRecords sets the CloudFormationStackRecords field's value. +func (s *GetCloudFormationStackRecordsOutput) SetCloudFormationStackRecords(v []*CloudFormationStackRecord) *GetCloudFormationStackRecordsOutput { + s.CloudFormationStackRecords = v return s } // SetNextPageToken sets the NextPageToken field's value. -func (s *GetActiveNamesOutput) SetNextPageToken(v string) *GetActiveNamesOutput { +func (s *GetCloudFormationStackRecordsOutput) SetNextPageToken(v string) *GetCloudFormationStackRecordsOutput { s.NextPageToken = &v return s } -type GetAlarmsInput struct { +type GetContactMethodsInput struct { _ struct{} `type:"structure"` - // The name of the alarm. - // - // Specify an alarm name to return information about a specific alarm. - AlarmName *string `locationName:"alarmName" type:"string"` - - // The name of the Lightsail resource being monitored by the alarm. - // - // Specify a monitored resource name to return information about all alarms - // for a specific resource. - MonitoredResourceName *string `locationName:"monitoredResourceName" type:"string"` - - // The token to advance to the next page of results from your request. + // The protocols used to send notifications, such as Email, or SMS (text messaging). // - // To get a page token, perform an initial GetAlarms request. If your results - // are paginated, the response will return a next page token that you can specify - // as the page token in a subsequent request. - PageToken *string `locationName:"pageToken" type:"string"` + // Specify a protocol in your request to return information about a specific + // contact method protocol. + Protocols []*string `locationName:"protocols" type:"list"` } // String returns the string representation -func (s GetAlarmsInput) String() string { +func (s GetContactMethodsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAlarmsInput) GoString() string { +func (s GetContactMethodsInput) GoString() string { return s.String() } -// SetAlarmName sets the AlarmName field's value. -func (s *GetAlarmsInput) SetAlarmName(v string) *GetAlarmsInput { - s.AlarmName = &v - return s -} - -// SetMonitoredResourceName sets the MonitoredResourceName field's value. -func (s *GetAlarmsInput) SetMonitoredResourceName(v string) *GetAlarmsInput { - s.MonitoredResourceName = &v - return s -} - -// SetPageToken sets the PageToken field's value. -func (s *GetAlarmsInput) SetPageToken(v string) *GetAlarmsInput { - s.PageToken = &v +// SetProtocols sets the Protocols field's value. +func (s *GetContactMethodsInput) SetProtocols(v []*string) *GetContactMethodsInput { + s.Protocols = v return s } -type GetAlarmsOutput struct { +type GetContactMethodsOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the alarms. - Alarms []*Alarm `locationName:"alarms" type:"list"` - - // The token to advance to the next page of resutls from your request. - // - // A next page token is not returned if there are no more results to display. - // - // To get the next page of results, perform another GetAlarms request and specify - // the next page token using the pageToken parameter. - NextPageToken *string `locationName:"nextPageToken" type:"string"` + // An array of objects that describe the contact methods. + ContactMethods []*ContactMethod `locationName:"contactMethods" type:"list"` } // String returns the string representation -func (s GetAlarmsOutput) String() string { +func (s GetContactMethodsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAlarmsOutput) GoString() string { +func (s GetContactMethodsOutput) GoString() string { return s.String() } -// SetAlarms sets the Alarms field's value. -func (s *GetAlarmsOutput) SetAlarms(v []*Alarm) *GetAlarmsOutput { - s.Alarms = v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *GetAlarmsOutput) SetNextPageToken(v string) *GetAlarmsOutput { - s.NextPageToken = &v +// SetContactMethods sets the ContactMethods field's value. +func (s *GetContactMethodsOutput) SetContactMethods(v []*ContactMethod) *GetContactMethodsOutput { + s.ContactMethods = v return s } -type GetAutoSnapshotsInput struct { +type GetDiskInput struct { _ struct{} `type:"structure"` - // The name of the source instance or disk from which to get automatic snapshot - // information. + // The name of the disk (e.g., my-disk). // - // ResourceName is a required field - ResourceName *string `locationName:"resourceName" type:"string" required:"true"` + // DiskName is a required field + DiskName *string `locationName:"diskName" type:"string" required:"true"` } // String returns the string representation -func (s GetAutoSnapshotsInput) String() string { +func (s GetDiskInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAutoSnapshotsInput) GoString() string { +func (s GetDiskInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAutoSnapshotsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAutoSnapshotsInput"} - if s.ResourceName == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceName")) +func (s *GetDiskInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDiskInput"} + if s.DiskName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskName")) } if invalidParams.Len() > 0 { @@ -18678,401 +21812,479 @@ func (s *GetAutoSnapshotsInput) Validate() error { return nil } -// SetResourceName sets the ResourceName field's value. -func (s *GetAutoSnapshotsInput) SetResourceName(v string) *GetAutoSnapshotsInput { - s.ResourceName = &v +// SetDiskName sets the DiskName field's value. +func (s *GetDiskInput) SetDiskName(v string) *GetDiskInput { + s.DiskName = &v return s } -type GetAutoSnapshotsOutput struct { +type GetDiskOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the automatic snapshots that are available - // for the specified source instance or disk. - AutoSnapshots []*AutoSnapshotDetails `locationName:"autoSnapshots" type:"list"` - - // The name of the source instance or disk for the automatic snapshots. - ResourceName *string `locationName:"resourceName" type:"string"` - - // The resource type (e.g., Instance or Disk). - ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + // An object containing information about the disk. + Disk *Disk `locationName:"disk" type:"structure"` } // String returns the string representation -func (s GetAutoSnapshotsOutput) String() string { +func (s GetDiskOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAutoSnapshotsOutput) GoString() string { +func (s GetDiskOutput) GoString() string { return s.String() } -// SetAutoSnapshots sets the AutoSnapshots field's value. -func (s *GetAutoSnapshotsOutput) SetAutoSnapshots(v []*AutoSnapshotDetails) *GetAutoSnapshotsOutput { - s.AutoSnapshots = v - return s -} - -// SetResourceName sets the ResourceName field's value. -func (s *GetAutoSnapshotsOutput) SetResourceName(v string) *GetAutoSnapshotsOutput { - s.ResourceName = &v - return s -} - -// SetResourceType sets the ResourceType field's value. -func (s *GetAutoSnapshotsOutput) SetResourceType(v string) *GetAutoSnapshotsOutput { - s.ResourceType = &v +// SetDisk sets the Disk field's value. +func (s *GetDiskOutput) SetDisk(v *Disk) *GetDiskOutput { + s.Disk = v return s } -type GetBlueprintsInput struct { +type GetDiskSnapshotInput struct { _ struct{} `type:"structure"` - // A Boolean value indicating whether to include inactive results in your request. - IncludeInactive *bool `locationName:"includeInactive" type:"boolean"` - - // The token to advance to the next page of results from your request. + // The name of the disk snapshot (e.g., my-disk-snapshot). // - // To get a page token, perform an initial GetBlueprints request. If your results - // are paginated, the response will return a next page token that you can specify - // as the page token in a subsequent request. - PageToken *string `locationName:"pageToken" type:"string"` + // DiskSnapshotName is a required field + DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` } // String returns the string representation -func (s GetBlueprintsInput) String() string { +func (s GetDiskSnapshotInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBlueprintsInput) GoString() string { +func (s GetDiskSnapshotInput) GoString() string { return s.String() } -// SetIncludeInactive sets the IncludeInactive field's value. -func (s *GetBlueprintsInput) SetIncludeInactive(v bool) *GetBlueprintsInput { - s.IncludeInactive = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDiskSnapshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDiskSnapshotInput"} + if s.DiskSnapshotName == nil { + invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPageToken sets the PageToken field's value. -func (s *GetBlueprintsInput) SetPageToken(v string) *GetBlueprintsInput { - s.PageToken = &v +// SetDiskSnapshotName sets the DiskSnapshotName field's value. +func (s *GetDiskSnapshotInput) SetDiskSnapshotName(v string) *GetDiskSnapshotInput { + s.DiskSnapshotName = &v return s } -type GetBlueprintsOutput struct { +type GetDiskSnapshotOutput struct { _ struct{} `type:"structure"` - // An array of key-value pairs that contains information about the available - // blueprints. - Blueprints []*Blueprint `locationName:"blueprints" type:"list"` - - // The token to advance to the next page of resutls from your request. - // - // A next page token is not returned if there are no more results to display. - // - // To get the next page of results, perform another GetBlueprints request and - // specify the next page token using the pageToken parameter. - NextPageToken *string `locationName:"nextPageToken" type:"string"` + // An object containing information about the disk snapshot. + DiskSnapshot *DiskSnapshot `locationName:"diskSnapshot" type:"structure"` } // String returns the string representation -func (s GetBlueprintsOutput) String() string { +func (s GetDiskSnapshotOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBlueprintsOutput) GoString() string { +func (s GetDiskSnapshotOutput) GoString() string { return s.String() } -// SetBlueprints sets the Blueprints field's value. -func (s *GetBlueprintsOutput) SetBlueprints(v []*Blueprint) *GetBlueprintsOutput { - s.Blueprints = v - return s -} - -// SetNextPageToken sets the NextPageToken field's value. -func (s *GetBlueprintsOutput) SetNextPageToken(v string) *GetBlueprintsOutput { - s.NextPageToken = &v +// SetDiskSnapshot sets the DiskSnapshot field's value. +func (s *GetDiskSnapshotOutput) SetDiskSnapshot(v *DiskSnapshot) *GetDiskSnapshotOutput { + s.DiskSnapshot = v return s } -type GetBundlesInput struct { +type GetDiskSnapshotsInput struct { _ struct{} `type:"structure"` - // A Boolean value that indicates whether to include inactive bundle results - // in your request. - IncludeInactive *bool `locationName:"includeInactive" type:"boolean"` - // The token to advance to the next page of results from your request. // - // To get a page token, perform an initial GetBundles request. If your results - // are paginated, the response will return a next page token that you can specify - // as the page token in a subsequent request. + // To get a page token, perform an initial GetDiskSnapshots request. If your + // results are paginated, the response will return a next page token that you + // can specify as the page token in a subsequent request. PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s GetBundlesInput) String() string { +func (s GetDiskSnapshotsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBundlesInput) GoString() string { +func (s GetDiskSnapshotsInput) GoString() string { return s.String() } -// SetIncludeInactive sets the IncludeInactive field's value. -func (s *GetBundlesInput) SetIncludeInactive(v bool) *GetBundlesInput { - s.IncludeInactive = &v - return s -} - // SetPageToken sets the PageToken field's value. -func (s *GetBundlesInput) SetPageToken(v string) *GetBundlesInput { +func (s *GetDiskSnapshotsInput) SetPageToken(v string) *GetDiskSnapshotsInput { s.PageToken = &v return s } -type GetBundlesOutput struct { +type GetDiskSnapshotsOutput struct { _ struct{} `type:"structure"` - // An array of key-value pairs that contains information about the available - // bundles. - Bundles []*Bundle `locationName:"bundles" type:"list"` + // An array of objects containing information about all block storage disk snapshots. + DiskSnapshots []*DiskSnapshot `locationName:"diskSnapshots" type:"list"` // The token to advance to the next page of resutls from your request. // // A next page token is not returned if there are no more results to display. // - // To get the next page of results, perform another GetBundles request and specify - // the next page token using the pageToken parameter. + // To get the next page of results, perform another GetDiskSnapshots request + // and specify the next page token using the pageToken parameter. NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation -func (s GetBundlesOutput) String() string { +func (s GetDiskSnapshotsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBundlesOutput) GoString() string { +func (s GetDiskSnapshotsOutput) GoString() string { return s.String() } -// SetBundles sets the Bundles field's value. -func (s *GetBundlesOutput) SetBundles(v []*Bundle) *GetBundlesOutput { - s.Bundles = v +// SetDiskSnapshots sets the DiskSnapshots field's value. +func (s *GetDiskSnapshotsOutput) SetDiskSnapshots(v []*DiskSnapshot) *GetDiskSnapshotsOutput { + s.DiskSnapshots = v return s } // SetNextPageToken sets the NextPageToken field's value. -func (s *GetBundlesOutput) SetNextPageToken(v string) *GetBundlesOutput { +func (s *GetDiskSnapshotsOutput) SetNextPageToken(v string) *GetDiskSnapshotsOutput { s.NextPageToken = &v return s } -type GetCloudFormationStackRecordsInput struct { +type GetDisksInput struct { _ struct{} `type:"structure"` // The token to advance to the next page of results from your request. // - // To get a page token, perform an initial GetClouFormationStackRecords request. - // If your results are paginated, the response will return a next page token - // that you can specify as the page token in a subsequent request. + // To get a page token, perform an initial GetDisks request. If your results + // are paginated, the response will return a next page token that you can specify + // as the page token in a subsequent request. PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s GetCloudFormationStackRecordsInput) String() string { +func (s GetDisksInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCloudFormationStackRecordsInput) GoString() string { +func (s GetDisksInput) GoString() string { return s.String() } // SetPageToken sets the PageToken field's value. -func (s *GetCloudFormationStackRecordsInput) SetPageToken(v string) *GetCloudFormationStackRecordsInput { +func (s *GetDisksInput) SetPageToken(v string) *GetDisksInput { s.PageToken = &v return s } -type GetCloudFormationStackRecordsOutput struct { +type GetDisksOutput struct { _ struct{} `type:"structure"` - // A list of objects describing the CloudFormation stack records. - CloudFormationStackRecords []*CloudFormationStackRecord `locationName:"cloudFormationStackRecords" type:"list"` + // An array of objects containing information about all block storage disks. + Disks []*Disk `locationName:"disks" type:"list"` // The token to advance to the next page of resutls from your request. // // A next page token is not returned if there are no more results to display. // - // To get the next page of results, perform another GetCloudFormationStackRecords - // request and specify the next page token using the pageToken parameter. + // To get the next page of results, perform another GetDisks request and specify + // the next page token using the pageToken parameter. NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation -func (s GetCloudFormationStackRecordsOutput) String() string { +func (s GetDisksOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetCloudFormationStackRecordsOutput) GoString() string { +func (s GetDisksOutput) GoString() string { return s.String() } -// SetCloudFormationStackRecords sets the CloudFormationStackRecords field's value. -func (s *GetCloudFormationStackRecordsOutput) SetCloudFormationStackRecords(v []*CloudFormationStackRecord) *GetCloudFormationStackRecordsOutput { - s.CloudFormationStackRecords = v +// SetDisks sets the Disks field's value. +func (s *GetDisksOutput) SetDisks(v []*Disk) *GetDisksOutput { + s.Disks = v return s } // SetNextPageToken sets the NextPageToken field's value. -func (s *GetCloudFormationStackRecordsOutput) SetNextPageToken(v string) *GetCloudFormationStackRecordsOutput { +func (s *GetDisksOutput) SetNextPageToken(v string) *GetDisksOutput { s.NextPageToken = &v return s } -type GetContactMethodsInput struct { +type GetDistributionBundlesInput struct { _ struct{} `type:"structure"` - - // The protocols used to send notifications, such as Email, or SMS (text messaging). - // - // Specify a protocol in your request to return information about a specific - // contact method protocol. - Protocols []*string `locationName:"protocols" type:"list"` } // String returns the string representation -func (s GetContactMethodsInput) String() string { +func (s GetDistributionBundlesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetContactMethodsInput) GoString() string { +func (s GetDistributionBundlesInput) GoString() string { return s.String() } -// SetProtocols sets the Protocols field's value. -func (s *GetContactMethodsInput) SetProtocols(v []*string) *GetContactMethodsInput { - s.Protocols = v - return s -} - -type GetContactMethodsOutput struct { +type GetDistributionBundlesOutput struct { _ struct{} `type:"structure"` - // An array of objects that describe the contact methods. - ContactMethods []*ContactMethod `locationName:"contactMethods" type:"list"` + // An object that describes a distribution bundle. + Bundles []*DistributionBundle `locationName:"bundles" type:"list"` } // String returns the string representation -func (s GetContactMethodsOutput) String() string { +func (s GetDistributionBundlesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetContactMethodsOutput) GoString() string { +func (s GetDistributionBundlesOutput) GoString() string { return s.String() } -// SetContactMethods sets the ContactMethods field's value. -func (s *GetContactMethodsOutput) SetContactMethods(v []*ContactMethod) *GetContactMethodsOutput { - s.ContactMethods = v +// SetBundles sets the Bundles field's value. +func (s *GetDistributionBundlesOutput) SetBundles(v []*DistributionBundle) *GetDistributionBundlesOutput { + s.Bundles = v return s } -type GetDiskInput struct { +type GetDistributionLatestCacheResetInput struct { _ struct{} `type:"structure"` - // The name of the disk (e.g., my-disk). + // The name of the distribution for which to return the timestamp of the last + // cache reset. // - // DiskName is a required field - DiskName *string `locationName:"diskName" type:"string" required:"true"` + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + // + // When omitted, the response includes the latest cache reset timestamp of all + // your distributions. + DistributionName *string `locationName:"distributionName" type:"string"` } // String returns the string representation -func (s GetDiskInput) String() string { +func (s GetDistributionLatestCacheResetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDiskInput) GoString() string { +func (s GetDistributionLatestCacheResetInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetDiskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDiskInput"} - if s.DiskName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskName")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDiskName sets the DiskName field's value. -func (s *GetDiskInput) SetDiskName(v string) *GetDiskInput { - s.DiskName = &v +// SetDistributionName sets the DistributionName field's value. +func (s *GetDistributionLatestCacheResetInput) SetDistributionName(v string) *GetDistributionLatestCacheResetInput { + s.DistributionName = &v return s } -type GetDiskOutput struct { +type GetDistributionLatestCacheResetOutput struct { _ struct{} `type:"structure"` - // An object containing information about the disk. - Disk *Disk `locationName:"disk" type:"structure"` + // The timestamp of the last cache reset (e.g., 1479734909.17) in Unix time + // format. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // The status of the last cache reset. + Status *string `locationName:"status" type:"string"` } // String returns the string representation -func (s GetDiskOutput) String() string { +func (s GetDistributionLatestCacheResetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDiskOutput) GoString() string { +func (s GetDistributionLatestCacheResetOutput) GoString() string { return s.String() } -// SetDisk sets the Disk field's value. -func (s *GetDiskOutput) SetDisk(v *Disk) *GetDiskOutput { - s.Disk = v +// SetCreateTime sets the CreateTime field's value. +func (s *GetDistributionLatestCacheResetOutput) SetCreateTime(v time.Time) *GetDistributionLatestCacheResetOutput { + s.CreateTime = &v return s } -type GetDiskSnapshotInput struct { +// SetStatus sets the Status field's value. +func (s *GetDistributionLatestCacheResetOutput) SetStatus(v string) *GetDistributionLatestCacheResetOutput { + s.Status = &v + return s +} + +type GetDistributionMetricDataInput struct { _ struct{} `type:"structure"` - // The name of the disk snapshot (e.g., my-disk-snapshot). + // The name of the distribution for which to get metric data. + // + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + // + // DistributionName is a required field + DistributionName *string `locationName:"distributionName" type:"string" required:"true"` + + // The end of the time interval for which to get metric data. + // + // Constraints: + // + // * Specified in Coordinated Universal Time (UTC). + // + // * Specified in the Unix time format. For example, if you wish to use an + // end time of October 1, 2018, at 9 PM UTC, specify 1538427600 as the end + // time. + // + // You can convert a human-friendly time to Unix time format using a converter + // like Epoch converter (https://www.epochconverter.com/). + // + // EndTime is a required field + EndTime *time.Time `locationName:"endTime" type:"timestamp" required:"true"` + + // The metric for which you want to return information. + // + // Valid distribution metric names are listed below, along with the most useful + // statistics to include in your request, and the published unit value. + // + // * Requests - The total number of viewer requests received by your Lightsail + // distribution, for all HTTP methods, and for both HTTP and HTTPS requests. + // Statistics: The most useful statistic is Sum. Unit: The published unit + // is None. + // + // * BytesDownloaded - The number of bytes downloaded by viewers for GET, + // HEAD, and OPTIONS requests. Statistics: The most useful statistic is Sum. + // Unit: The published unit is None. + // + // * BytesUploaded - The number of bytes uploaded to your origin by your + // Lightsail distribution, using POST and PUT requests. Statistics: The most + // useful statistic is Sum. Unit: The published unit is None. + // + // * TotalErrorRate - The percentage of all viewer requests for which the + // response's HTTP status code was 4xx or 5xx. Statistics: The most useful + // statistic is Average. Unit: The published unit is Percent. + // + // * 4xxErrorRate - The percentage of all viewer requests for which the response's + // HTTP status cod was 4xx. In these cases, the client or client viewer may + // have made an error. For example, a status code of 404 (Not Found) means + // that the client requested an object that could not be found. Statistics: + // The most useful statistic is Average. Unit: The published unit is Percent. + // + // * 5xxErrorRate - The percentage of all viewer requests for which the response's + // HTTP status code was 5xx. In these cases, the origin server did not satisfy + // the requests. For example, a status code of 503 (Service Unavailable) + // means that the origin server is currently unavailable. Statistics: The + // most useful statistic is Average. Unit: The published unit is Percent. + // + // MetricName is a required field + MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"DistributionMetricName"` + + // The granularity, in seconds, for the metric data points that will be returned. + // + // Period is a required field + Period *int64 `locationName:"period" min:"60" type:"integer" required:"true"` + + // The start of the time interval for which to get metric data. + // + // Constraints: + // + // * Specified in Coordinated Universal Time (UTC). + // + // * Specified in the Unix time format. For example, if you wish to use a + // start time of October 1, 2018, at 8 PM UTC, specify 1538424000 as the + // start time. + // + // You can convert a human-friendly time to Unix time format using a converter + // like Epoch converter (https://www.epochconverter.com/). + // + // StartTime is a required field + StartTime *time.Time `locationName:"startTime" type:"timestamp" required:"true"` + + // The statistic for the metric. + // + // The following statistics are available: + // + // * Minimum - The lowest value observed during the specified period. Use + // this value to determine low volumes of activity for your application. + // + // * Maximum - The highest value observed during the specified period. Use + // this value to determine high volumes of activity for your application. + // + // * Sum - All values submitted for the matching metric added together. You + // can use this statistic to determine the total volume of a metric. + // + // * Average - The value of Sum / SampleCount during the specified period. + // By comparing this statistic with the Minimum and Maximum values, you can + // determine the full scope of a metric and how close the average use is + // to the Minimum and Maximum values. This comparison helps you to know when + // to increase or decrease your resources. + // + // * SampleCount - The count, or number, of data points used for the statistical + // calculation. + // + // Statistics is a required field + Statistics []*string `locationName:"statistics" type:"list" required:"true"` + + // The unit for the metric data request. // - // DiskSnapshotName is a required field - DiskSnapshotName *string `locationName:"diskSnapshotName" type:"string" required:"true"` + // Valid units depend on the metric data being requested. For the valid units + // with each available metric, see the metricName parameter. + // + // Unit is a required field + Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"` } // String returns the string representation -func (s GetDiskSnapshotInput) String() string { +func (s GetDistributionMetricDataInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDiskSnapshotInput) GoString() string { +func (s GetDistributionMetricDataInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDiskSnapshotInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDiskSnapshotInput"} - if s.DiskSnapshotName == nil { - invalidParams.Add(request.NewErrParamRequired("DiskSnapshotName")) +func (s *GetDistributionMetricDataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDistributionMetricDataInput"} + if s.DistributionName == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionName")) + } + if s.EndTime == nil { + invalidParams.Add(request.NewErrParamRequired("EndTime")) + } + if s.MetricName == nil { + invalidParams.Add(request.NewErrParamRequired("MetricName")) + } + if s.Period == nil { + invalidParams.Add(request.NewErrParamRequired("Period")) + } + if s.Period != nil && *s.Period < 60 { + invalidParams.Add(request.NewErrParamMinValue("Period", 60)) + } + if s.StartTime == nil { + invalidParams.Add(request.NewErrParamRequired("StartTime")) + } + if s.Statistics == nil { + invalidParams.Add(request.NewErrParamRequired("Statistics")) + } + if s.Unit == nil { + invalidParams.Add(request.NewErrParamRequired("Unit")) } if invalidParams.Len() > 0 { @@ -19081,159 +22293,155 @@ func (s *GetDiskSnapshotInput) Validate() error { return nil } -// SetDiskSnapshotName sets the DiskSnapshotName field's value. -func (s *GetDiskSnapshotInput) SetDiskSnapshotName(v string) *GetDiskSnapshotInput { - s.DiskSnapshotName = &v +// SetDistributionName sets the DistributionName field's value. +func (s *GetDistributionMetricDataInput) SetDistributionName(v string) *GetDistributionMetricDataInput { + s.DistributionName = &v return s } -type GetDiskSnapshotOutput struct { - _ struct{} `type:"structure"` - - // An object containing information about the disk snapshot. - DiskSnapshot *DiskSnapshot `locationName:"diskSnapshot" type:"structure"` -} - -// String returns the string representation -func (s GetDiskSnapshotOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetDiskSnapshotOutput) GoString() string { - return s.String() +// SetEndTime sets the EndTime field's value. +func (s *GetDistributionMetricDataInput) SetEndTime(v time.Time) *GetDistributionMetricDataInput { + s.EndTime = &v + return s } -// SetDiskSnapshot sets the DiskSnapshot field's value. -func (s *GetDiskSnapshotOutput) SetDiskSnapshot(v *DiskSnapshot) *GetDiskSnapshotOutput { - s.DiskSnapshot = v +// SetMetricName sets the MetricName field's value. +func (s *GetDistributionMetricDataInput) SetMetricName(v string) *GetDistributionMetricDataInput { + s.MetricName = &v return s } -type GetDiskSnapshotsInput struct { - _ struct{} `type:"structure"` - - // The token to advance to the next page of results from your request. - // - // To get a page token, perform an initial GetDiskSnapshots request. If your - // results are paginated, the response will return a next page token that you - // can specify as the page token in a subsequent request. - PageToken *string `locationName:"pageToken" type:"string"` +// SetPeriod sets the Period field's value. +func (s *GetDistributionMetricDataInput) SetPeriod(v int64) *GetDistributionMetricDataInput { + s.Period = &v + return s } -// String returns the string representation -func (s GetDiskSnapshotsInput) String() string { - return awsutil.Prettify(s) +// SetStartTime sets the StartTime field's value. +func (s *GetDistributionMetricDataInput) SetStartTime(v time.Time) *GetDistributionMetricDataInput { + s.StartTime = &v + return s } -// GoString returns the string representation -func (s GetDiskSnapshotsInput) GoString() string { - return s.String() +// SetStatistics sets the Statistics field's value. +func (s *GetDistributionMetricDataInput) SetStatistics(v []*string) *GetDistributionMetricDataInput { + s.Statistics = v + return s } -// SetPageToken sets the PageToken field's value. -func (s *GetDiskSnapshotsInput) SetPageToken(v string) *GetDiskSnapshotsInput { - s.PageToken = &v +// SetUnit sets the Unit field's value. +func (s *GetDistributionMetricDataInput) SetUnit(v string) *GetDistributionMetricDataInput { + s.Unit = &v return s } -type GetDiskSnapshotsOutput struct { +type GetDistributionMetricDataOutput struct { _ struct{} `type:"structure"` - // An array of objects containing information about all block storage disk snapshots. - DiskSnapshots []*DiskSnapshot `locationName:"diskSnapshots" type:"list"` + // An array of objects that describe the metric data returned. + MetricData []*MetricDatapoint `locationName:"metricData" type:"list"` - // The token to advance to the next page of resutls from your request. - // - // A next page token is not returned if there are no more results to display. - // - // To get the next page of results, perform another GetDiskSnapshots request - // and specify the next page token using the pageToken parameter. - NextPageToken *string `locationName:"nextPageToken" type:"string"` + // The name of the metric returned. + MetricName *string `locationName:"metricName" type:"string" enum:"DistributionMetricName"` } // String returns the string representation -func (s GetDiskSnapshotsOutput) String() string { +func (s GetDistributionMetricDataOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDiskSnapshotsOutput) GoString() string { +func (s GetDistributionMetricDataOutput) GoString() string { return s.String() } -// SetDiskSnapshots sets the DiskSnapshots field's value. -func (s *GetDiskSnapshotsOutput) SetDiskSnapshots(v []*DiskSnapshot) *GetDiskSnapshotsOutput { - s.DiskSnapshots = v +// SetMetricData sets the MetricData field's value. +func (s *GetDistributionMetricDataOutput) SetMetricData(v []*MetricDatapoint) *GetDistributionMetricDataOutput { + s.MetricData = v return s } -// SetNextPageToken sets the NextPageToken field's value. -func (s *GetDiskSnapshotsOutput) SetNextPageToken(v string) *GetDiskSnapshotsOutput { - s.NextPageToken = &v +// SetMetricName sets the MetricName field's value. +func (s *GetDistributionMetricDataOutput) SetMetricName(v string) *GetDistributionMetricDataOutput { + s.MetricName = &v return s } -type GetDisksInput struct { +type GetDistributionsInput struct { _ struct{} `type:"structure"` + // The name of the distribution for which to return information. + // + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + // + // When omitted, the response includes all of your distributions in the AWS + // Region where the request is made. + DistributionName *string `locationName:"distributionName" type:"string"` + // The token to advance to the next page of results from your request. // - // To get a page token, perform an initial GetDisks request. If your results - // are paginated, the response will return a next page token that you can specify - // as the page token in a subsequent request. + // To get a page token, perform an initial GetDistributions request. If your + // results are paginated, the response will return a next page token that you + // can specify as the page token in a subsequent request. PageToken *string `locationName:"pageToken" type:"string"` } // String returns the string representation -func (s GetDisksInput) String() string { +func (s GetDistributionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDisksInput) GoString() string { +func (s GetDistributionsInput) GoString() string { return s.String() } +// SetDistributionName sets the DistributionName field's value. +func (s *GetDistributionsInput) SetDistributionName(v string) *GetDistributionsInput { + s.DistributionName = &v + return s +} + // SetPageToken sets the PageToken field's value. -func (s *GetDisksInput) SetPageToken(v string) *GetDisksInput { +func (s *GetDistributionsInput) SetPageToken(v string) *GetDistributionsInput { s.PageToken = &v return s } -type GetDisksOutput struct { +type GetDistributionsOutput struct { _ struct{} `type:"structure"` - // An array of objects containing information about all block storage disks. - Disks []*Disk `locationName:"disks" type:"list"` + // An array of objects that describe your distributions. + Distributions []*LightsailDistribution `locationName:"distributions" type:"list"` - // The token to advance to the next page of resutls from your request. + // The token to advance to the next page of results from your request. // // A next page token is not returned if there are no more results to display. // - // To get the next page of results, perform another GetDisks request and specify - // the next page token using the pageToken parameter. + // To get the next page of results, perform another GetDistributions request + // and specify the next page token using the pageToken parameter. NextPageToken *string `locationName:"nextPageToken" type:"string"` } // String returns the string representation -func (s GetDisksOutput) String() string { +func (s GetDistributionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDisksOutput) GoString() string { +func (s GetDistributionsOutput) GoString() string { return s.String() } -// SetDisks sets the Disks field's value. -func (s *GetDisksOutput) SetDisks(v []*Disk) *GetDisksOutput { - s.Disks = v +// SetDistributions sets the Distributions field's value. +func (s *GetDistributionsOutput) SetDistributions(v []*LightsailDistribution) *GetDistributionsOutput { + s.Distributions = v return s } // SetNextPageToken sets the NextPageToken field's value. -func (s *GetDisksOutput) SetNextPageToken(v string) *GetDisksOutput { +func (s *GetDistributionsOutput) SetNextPageToken(v string) *GetDistributionsOutput { s.NextPageToken = &v return s } @@ -19556,44 +22764,65 @@ type GetInstanceMetricDataInput struct { // Valid instance metric names are listed below, along with the most useful // statistics to include in your request, and the published unit value. // - // * CPUUtilization — The percentage of allocated compute units that are + // * BurstCapacityPercentage - The percentage of CPU performance available + // for your instance to burst above its baseline. Your instance continuously + // accrues and consumes burst capacity. Burst capacity stops accruing when + // your instance's BurstCapacityPercentage reaches 100%. For more information, + // see Viewing instance burst capacity in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-viewing-instance-burst-capacity). + // Statistics: The most useful statistics are Maximum and Average. Unit: + // The published unit is Percent. + // + // * BurstCapacityTime - The available amount of time for your instance to + // burst at 100% CPU utilization. Your instance continuously accrues and + // consumes burst capacity. Burst capacity time stops accruing when your + // instance's BurstCapacityPercentage metric reaches 100%. Burst capacity + // time is consumed at the full rate only when your instance operates at + // 100% CPU utilization. For example, if your instance operates at 50% CPU + // utilization in the burstable zone for a 5-minute period, then it consumes + // CPU burst capacity minutes at a 50% rate in that period. Your instance + // consumed 2 minutes and 30 seconds of CPU burst capacity minutes in the + // 5-minute period. For more information, see Viewing instance burst capacity + // in Amazon Lightsail (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-viewing-instance-burst-capacity). + // Statistics: The most useful statistics are Maximum and Average. Unit: + // The published unit is Seconds. + // + // * CPUUtilization - The percentage of allocated compute units that are // currently in use on the instance. This metric identifies the processing // power to run the applications on the instance. Tools in your operating // system can show a lower percentage than Lightsail when the instance is // not allocated a full processor core. Statistics: The most useful statistics // are Maximum and Average. Unit: The published unit is Percent. // - // * NetworkIn — The number of bytes received on all network interfaces - // by the instance. This metric identifies the volume of incoming network - // traffic to the instance. The number reported is the number of bytes received - // during the period. Because this metric is reported in 5-minute intervals, - // divide the reported number by 300 to find Bytes/second. Statistics: The - // most useful statistic is Sum. Unit: The published unit is Bytes. + // * NetworkIn - The number of bytes received on all network interfaces by + // the instance. This metric identifies the volume of incoming network traffic + // to the instance. The number reported is the number of bytes received during + // the period. Because this metric is reported in 5-minute intervals, divide + // the reported number by 300 to find Bytes/second. Statistics: The most + // useful statistic is Sum. Unit: The published unit is Bytes. // - // * NetworkOut — The number of bytes sent out on all network interfaces + // * NetworkOut - The number of bytes sent out on all network interfaces // by the instance. This metric identifies the volume of outgoing network // traffic from the instance. The number reported is the number of bytes // sent during the period. Because this metric is reported in 5-minute intervals, // divide the reported number by 300 to find Bytes/second. Statistics: The // most useful statistic is Sum. Unit: The published unit is Bytes. // - // * StatusCheckFailed — Reports whether the instance passed or failed - // both the instance status check and the system status check. This metric - // can be either 0 (passed) or 1 (failed). This metric data is available - // in 1-minute (60 seconds) granularity. Statistics: The most useful statistic - // is Sum. Unit: The published unit is Count. + // * StatusCheckFailed - Reports whether the instance passed or failed both + // the instance status check and the system status check. This metric can + // be either 0 (passed) or 1 (failed). This metric data is available in 1-minute + // (60 seconds) granularity. Statistics: The most useful statistic is Sum. + // Unit: The published unit is Count. // - // * StatusCheckFailed_Instance — Reports whether the instance passed or + // * StatusCheckFailed_Instance - Reports whether the instance passed or // failed the instance status check. This metric can be either 0 (passed) // or 1 (failed). This metric data is available in 1-minute (60 seconds) // granularity. Statistics: The most useful statistic is Sum. Unit: The published // unit is Count. // - // * StatusCheckFailed_System — Reports whether the instance passed or - // failed the system status check. This metric can be either 0 (passed) or - // 1 (failed). This metric data is available in 1-minute (60 seconds) granularity. - // Statistics: The most useful statistic is Sum. Unit: The published unit - // is Count. + // * StatusCheckFailed_System - Reports whether the instance passed or failed + // the system status check. This metric can be either 0 (passed) or 1 (failed). + // This metric data is available in 1-minute (60 seconds) granularity. Statistics: + // The most useful statistic is Sum. Unit: The published unit is Count. // // MetricName is a required field MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"InstanceMetricName"` @@ -19616,30 +22845,30 @@ type GetInstanceMetricDataInput struct { // // The following statistics are available: // - // * Minimum — The lowest value observed during the specified period. Use + // * Minimum - The lowest value observed during the specified period. Use // this value to determine low volumes of activity for your application. // - // * Maximum — The highest value observed during the specified period. - // Use this value to determine high volumes of activity for your application. + // * Maximum - The highest value observed during the specified period. Use + // this value to determine high volumes of activity for your application. // - // * Sum — All values submitted for the matching metric added together. - // You can use this statistic to determine the total volume of a metric. + // * Sum - All values submitted for the matching metric added together. You + // can use this statistic to determine the total volume of a metric. // - // * Average — The value of Sum / SampleCount during the specified period. + // * Average - The value of Sum / SampleCount during the specified period. // By comparing this statistic with the Minimum and Maximum values, you can // determine the full scope of a metric and how close the average use is // to the Minimum and Maximum values. This comparison helps you to know when // to increase or decrease your resources. // - // * SampleCount — The count, or number, of data points used for the statistical + // * SampleCount - The count, or number, of data points used for the statistical // calculation. // // Statistics is a required field Statistics []*string `locationName:"statistics" type:"list" required:"true"` // The unit for the metric data request. Valid units depend on the metric data - // being required. For the valid units with each available metric, see the metricName - // parameter. + // being requested. For the valid units to specify with each available metric, + // see the metricName parameter. // // Unit is a required field Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"` @@ -19734,11 +22963,10 @@ func (s *GetInstanceMetricDataInput) SetUnit(v string) *GetInstanceMetricDataInp type GetInstanceMetricDataOutput struct { _ struct{} `type:"structure"` - // An array of key-value pairs containing information about the results of your - // get instance metric data request. + // An array of objects that describe the metric data returned. MetricData []*MetricDatapoint `locationName:"metricData" type:"list"` - // The metric name to return data for. + // The name of the metric returned. MetricName *string `locationName:"metricName" type:"string" enum:"InstanceMetricName"` } @@ -19790,7 +23018,7 @@ func (s *GetInstanceOutput) SetInstance(v *Instance) *GetInstanceOutput { type GetInstancePortStatesInput struct { _ struct{} `type:"structure"` - // The name of the instance. + // The name of the instance for which to return firewall port states. // // InstanceName is a required field InstanceName *string `locationName:"instanceName" type:"string" required:"true"` @@ -19828,7 +23056,8 @@ func (s *GetInstancePortStatesInput) SetInstanceName(v string) *GetInstancePortS type GetInstancePortStatesOutput struct { _ struct{} `type:"structure"` - // Information about the port states resulting from your request. + // An array of objects that describe the firewall port states for the specified + // instance. PortStates []*InstancePortState `locationName:"portStates" type:"list"` } @@ -20281,73 +23510,73 @@ type GetLoadBalancerMetricDataInput struct { // Valid load balancer metric names are listed below, along with the most useful // statistics to include in your request, and the published unit value. // - // * ClientTLSNegotiationErrorCount — The number of TLS connections initiated + // * ClientTLSNegotiationErrorCount - The number of TLS connections initiated // by the client that did not establish a session with the load balancer // due to a TLS error generated by the load balancer. Possible causes include // a mismatch of ciphers or protocols. Statistics: The most useful statistic // is Sum. Unit: The published unit is Count. // - // * HealthyHostCount — The number of target instances that are considered + // * HealthyHostCount - The number of target instances that are considered // healthy. Statistics: The most useful statistic are Average, Minimum, and // Maximum. Unit: The published unit is Count. // - // * HTTPCode_Instance_2XX_Count — The number of HTTP 2XX response codes + // * HTTPCode_Instance_2XX_Count - The number of HTTP 2XX response codes // generated by the target instances. This does not include any response // codes generated by the load balancer. Statistics: The most useful statistic // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The // published unit is Count. // - // * HTTPCode_Instance_3XX_Count — The number of HTTP 3XX response codes + // * HTTPCode_Instance_3XX_Count - The number of HTTP 3XX response codes // generated by the target instances. This does not include any response // codes generated by the load balancer. Statistics: The most useful statistic // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The // published unit is Count. // - // * HTTPCode_Instance_4XX_Count — The number of HTTP 4XX response codes + // * HTTPCode_Instance_4XX_Count - The number of HTTP 4XX response codes // generated by the target instances. This does not include any response // codes generated by the load balancer. Statistics: The most useful statistic // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The // published unit is Count. // - // * HTTPCode_Instance_5XX_Count — The number of HTTP 5XX response codes + // * HTTPCode_Instance_5XX_Count - The number of HTTP 5XX response codes // generated by the target instances. This does not include any response // codes generated by the load balancer. Statistics: The most useful statistic // is Sum. Note that Minimum, Maximum, and Average all return 1. Unit: The // published unit is Count. // - // * HTTPCode_LB_4XX_Count — The number of HTTP 4XX client error codes - // that originated from the load balancer. Client errors are generated when - // requests are malformed or incomplete. These requests were not received - // by the target instance. This count does not include response codes generated - // by the target instances. Statistics: The most useful statistic is Sum. - // Note that Minimum, Maximum, and Average all return 1. Unit: The published - // unit is Count. + // * HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that + // originated from the load balancer. Client errors are generated when requests + // are malformed or incomplete. These requests were not received by the target + // instance. This count does not include response codes generated by the + // target instances. Statistics: The most useful statistic is Sum. Note that + // Minimum, Maximum, and Average all return 1. Unit: The published unit is + // Count. // - // * HTTPCode_LB_5XX_Count — The number of HTTP 5XX server error codes - // that originated from the load balancer. This does not include any response + // * HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that + // originated from the load balancer. This does not include any response // codes generated by the target instance. This metric is reported if there // are no healthy instances attached to the load balancer, or if the request // rate exceeds the capacity of the instances (spillover) or the load balancer. // Statistics: The most useful statistic is Sum. Note that Minimum, Maximum, // and Average all return 1. Unit: The published unit is Count. // - // * InstanceResponseTime — The time elapsed, in seconds, after the request + // * InstanceResponseTime - The time elapsed, in seconds, after the request // leaves the load balancer until a response from the target instance is // received. Statistics: The most useful statistic is Average. Unit: The // published unit is Seconds. // - // * RejectedConnectionCount — The number of connections that were rejected + // * RejectedConnectionCount - The number of connections that were rejected // because the load balancer had reached its maximum number of connections. // Statistics: The most useful statistic is Sum. Unit: The published unit // is Count. // - // * RequestCount — The number of requests processed over IPv4. This count + // * RequestCount - The number of requests processed over IPv4. This count // includes only the requests with a response generated by a target instance // of the load balancer. Statistics: The most useful statistic is Sum. Note // that Minimum, Maximum, and Average all return 1. Unit: The published unit // is Count. // - // * UnhealthyHostCount — The number of target instances that are considered + // * UnhealthyHostCount - The number of target instances that are considered // unhealthy. Statistics: The most useful statistic are Average, Minimum, // and Maximum. Unit: The published unit is Count. // @@ -20368,30 +23597,30 @@ type GetLoadBalancerMetricDataInput struct { // // The following statistics are available: // - // * Minimum — The lowest value observed during the specified period. Use + // * Minimum - The lowest value observed during the specified period. Use // this value to determine low volumes of activity for your application. // - // * Maximum — The highest value observed during the specified period. - // Use this value to determine high volumes of activity for your application. + // * Maximum - The highest value observed during the specified period. Use + // this value to determine high volumes of activity for your application. // - // * Sum — All values submitted for the matching metric added together. - // You can use this statistic to determine the total volume of a metric. + // * Sum - All values submitted for the matching metric added together. You + // can use this statistic to determine the total volume of a metric. // - // * Average — The value of Sum / SampleCount during the specified period. + // * Average - The value of Sum / SampleCount during the specified period. // By comparing this statistic with the Minimum and Maximum values, you can // determine the full scope of a metric and how close the average use is // to the Minimum and Maximum values. This comparison helps you to know when // to increase or decrease your resources. // - // * SampleCount — The count, or number, of data points used for the statistical + // * SampleCount - The count, or number, of data points used for the statistical // calculation. // // Statistics is a required field Statistics []*string `locationName:"statistics" type:"list" required:"true"` // The unit for the metric data request. Valid units depend on the metric data - // being required. For the valid units with each available metric, see the metricName - // parameter. + // being requested. For the valid units with each available metric, see the + // metricName parameter. // // Unit is a required field Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"` @@ -20486,70 +23715,10 @@ func (s *GetLoadBalancerMetricDataInput) SetUnit(v string) *GetLoadBalancerMetri type GetLoadBalancerMetricDataOutput struct { _ struct{} `type:"structure"` - // An array of metric datapoint objects. + // An array of objects that describe the metric data returned. MetricData []*MetricDatapoint `locationName:"metricData" type:"list"` - // The metric about which you are receiving information. Valid values are listed - // below, along with the most useful statistics to include in your request. - // - // * ClientTLSNegotiationErrorCount - The number of TLS connections initiated - // by the client that did not establish a session with the load balancer. - // Possible causes include a mismatch of ciphers or protocols. Statistics: - // The most useful statistic is Sum. - // - // * HealthyHostCount - The number of target instances that are considered - // healthy. Statistics: The most useful statistic are Average, Minimum, and - // Maximum. - // - // * UnhealthyHostCount - The number of target instances that are considered - // unhealthy. Statistics: The most useful statistic are Average, Minimum, - // and Maximum. - // - // * HTTPCode_LB_4XX_Count - The number of HTTP 4XX client error codes that - // originate from the load balancer. Client errors are generated when requests - // are malformed or incomplete. These requests have not been received by - // the target instance. This count does not include any response codes generated - // by the target instances. Statistics: The most useful statistic is Sum. - // Note that Minimum, Maximum, and Average all return 1. - // - // * HTTPCode_LB_5XX_Count - The number of HTTP 5XX server error codes that - // originate from the load balancer. This count does not include any response - // codes generated by the target instances. Statistics: The most useful statistic - // is Sum. Note that Minimum, Maximum, and Average all return 1. Note that - // Minimum, Maximum, and Average all return 1. - // - // * HTTPCode_Instance_2XX_Count - The number of HTTP response codes generated - // by the target instances. This does not include any response codes generated - // by the load balancer. Statistics: The most useful statistic is Sum. Note - // that Minimum, Maximum, and Average all return 1. - // - // * HTTPCode_Instance_3XX_Count - The number of HTTP response codes generated - // by the target instances. This does not include any response codes generated - // by the load balancer. Statistics: The most useful statistic is Sum. Note - // that Minimum, Maximum, and Average all return 1. - // - // * HTTPCode_Instance_4XX_Count - The number of HTTP response codes generated - // by the target instances. This does not include any response codes generated - // by the load balancer. Statistics: The most useful statistic is Sum. Note - // that Minimum, Maximum, and Average all return 1. - // - // * HTTPCode_Instance_5XX_Count - The number of HTTP response codes generated - // by the target instances. This does not include any response codes generated - // by the load balancer. Statistics: The most useful statistic is Sum. Note - // that Minimum, Maximum, and Average all return 1. - // - // * InstanceResponseTime - The time elapsed, in seconds, after the request - // leaves the load balancer until a response from the target instance is - // received. Statistics: The most useful statistic is Average. - // - // * RejectedConnectionCount - The number of connections that were rejected - // because the load balancer had reached its maximum number of connections. - // Statistics: The most useful statistic is Sum. - // - // * RequestCount - The number of requests processed over IPv4. This count - // includes only the requests with a response generated by a target instance - // of the load balancer. Statistics: The most useful statistic is Sum. Note - // that Minimum, Maximum, and Average all return 1. + // The name of the metric returned. MetricName *string `locationName:"metricName" type:"string" enum:"LoadBalancerMetricName"` } @@ -20765,7 +23934,7 @@ type GetOperationOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operation *Operation `locationName:"operation" type:"structure"` } @@ -20857,7 +24026,7 @@ type GetOperationsForResourceOutput struct { NextPageToken *string `locationName:"nextPageToken" type:"string"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -20929,7 +24098,7 @@ type GetOperationsOutput struct { NextPageToken *string `locationName:"nextPageToken" type:"string"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -21617,27 +24786,27 @@ type GetRelationalDatabaseMetricDataInput struct { // All relational database metric data is available in 1-minute (60 seconds) // granularity. // - // * CPUUtilization — The percentage of CPU utilization currently in use + // * CPUUtilization - The percentage of CPU utilization currently in use // on the database. Statistics: The most useful statistics are Maximum and // Average. Unit: The published unit is Percent. // - // * DatabaseConnections — The number of database connections in use. Statistics: + // * DatabaseConnections - The number of database connections in use. Statistics: // The most useful statistics are Maximum and Sum. Unit: The published unit // is Count. // - // * DiskQueueDepth — The number of outstanding IOs (read/write requests) + // * DiskQueueDepth - The number of outstanding IOs (read/write requests) // that are waiting to access the disk. Statistics: The most useful statistic // is Sum. Unit: The published unit is Count. // - // * FreeStorageSpace — The amount of available storage space. Statistics: + // * FreeStorageSpace - The amount of available storage space. Statistics: // The most useful statistic is Sum. Unit: The published unit is Bytes. // - // * NetworkReceiveThroughput — The incoming (Receive) network traffic - // on the database, including both customer database traffic and AWS traffic + // * NetworkReceiveThroughput - The incoming (Receive) network traffic on + // the database, including both customer database traffic and AWS traffic // used for monitoring and replication. Statistics: The most useful statistic // is Average. Unit: The published unit is Bytes/Second. // - // * NetworkTransmitThroughput — The outgoing (Transmit) network traffic + // * NetworkTransmitThroughput - The outgoing (Transmit) network traffic // on the database, including both customer database traffic and AWS traffic // used for monitoring and replication. Statistics: The most useful statistic // is Average. Unit: The published unit is Bytes/Second. @@ -21675,30 +24844,30 @@ type GetRelationalDatabaseMetricDataInput struct { // // The following statistics are available: // - // * Minimum — The lowest value observed during the specified period. Use + // * Minimum - The lowest value observed during the specified period. Use // this value to determine low volumes of activity for your application. // - // * Maximum — The highest value observed during the specified period. - // Use this value to determine high volumes of activity for your application. + // * Maximum - The highest value observed during the specified period. Use + // this value to determine high volumes of activity for your application. // - // * Sum — All values submitted for the matching metric added together. - // You can use this statistic to determine the total volume of a metric. + // * Sum - All values submitted for the matching metric added together. You + // can use this statistic to determine the total volume of a metric. // - // * Average — The value of Sum / SampleCount during the specified period. + // * Average - The value of Sum / SampleCount during the specified period. // By comparing this statistic with the Minimum and Maximum values, you can // determine the full scope of a metric and how close the average use is // to the Minimum and Maximum values. This comparison helps you to know when // to increase or decrease your resources. // - // * SampleCount — The count, or number, of data points used for the statistical + // * SampleCount - The count, or number, of data points used for the statistical // calculation. // // Statistics is a required field Statistics []*string `locationName:"statistics" type:"list" required:"true"` // The unit for the metric data request. Valid units depend on the metric data - // being required. For the valid units with each available metric, see the metricName - // parameter. + // being requested. For the valid units with each available metric, see the + // metricName parameter. // // Unit is a required field Unit *string `locationName:"unit" type:"string" required:"true" enum:"MetricUnit"` @@ -21793,11 +24962,10 @@ func (s *GetRelationalDatabaseMetricDataInput) SetUnit(v string) *GetRelationalD type GetRelationalDatabaseMetricDataOutput struct { _ struct{} `type:"structure"` - // An object describing the result of your get relational database metric data - // request. + // An array of objects that describe the metric data returned. MetricData []*MetricDatapoint `locationName:"metricData" type:"list"` - // The name of the metric. + // The name of the metric returned. MetricName *string `locationName:"metricName" type:"string" enum:"RelationalDatabaseMetricName"` } @@ -22252,6 +25420,58 @@ func (s *GetStaticIpsOutput) SetStaticIps(v []*StaticIp) *GetStaticIpsOutput { return s } +// Describes the request headers that a Lightsail distribution bases caching +// on. +// +// For the headers that you specify, your distribution caches separate versions +// of the specified content based on the header values in viewer requests. For +// example, suppose viewer requests for logo.jpg contain a custom product header +// that has a value of either acme or apex, and you configure your distribution +// to cache your content based on values in the product header. Your distribution +// forwards the product header to the origin and caches the response from the +// origin once for each header value. +type HeaderObject struct { + _ struct{} `type:"structure"` + + // The specific headers to forward to your distribution's origin. + HeadersAllowList []*string `locationName:"headersAllowList" type:"list"` + + // The headers that you want your distribution to forward to your origin and + // base caching on. + // + // You can configure your distribution to do one of the following: + // + // * all - Forward all headers to your origin. + // + // * none - Forward only the default headers. + // + // * allow-list - Forward only the headers you specify using the headersAllowList + // parameter. + Option *string `locationName:"option" type:"string" enum:"ForwardValues"` +} + +// String returns the string representation +func (s HeaderObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeaderObject) GoString() string { + return s.String() +} + +// SetHeadersAllowList sets the HeadersAllowList field's value. +func (s *HeaderObject) SetHeadersAllowList(v []*string) *HeaderObject { + s.HeadersAllowList = v + return s +} + +// SetOption sets the Option field's value. +func (s *HeaderObject) SetOption(v string) *HeaderObject { + s.Option = &v + return s +} + // Describes the public SSH host keys or the RDP certificate. type HostKeyAttributes struct { _ struct{} `type:"structure"` @@ -22401,7 +25621,7 @@ type ImportKeyPairOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operation *Operation `locationName:"operation" type:"structure"` } @@ -22422,6 +25642,54 @@ func (s *ImportKeyPairOutput) SetOperation(v *Operation) *ImportKeyPairOutput { return s } +// Describes the origin resource of an Amazon Lightsail content delivery network +// (CDN) distribution. +// +// An origin can be a Lightsail instance or load balancer. A distribution pulls +// content from an origin, caches it, and serves it to viewers via a worldwide +// network of edge servers. +type InputOrigin struct { + _ struct{} `type:"structure"` + + // The name of the origin resource. + Name *string `locationName:"name" type:"string"` + + // The protocol that your Amazon Lightsail distribution uses when establishing + // a connection with your origin to pull content. + ProtocolPolicy *string `locationName:"protocolPolicy" type:"string" enum:"OriginProtocolPolicyEnum"` + + // The AWS Region name of the origin resource. + RegionName *string `locationName:"regionName" type:"string" enum:"RegionName"` +} + +// String returns the string representation +func (s InputOrigin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputOrigin) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *InputOrigin) SetName(v string) *InputOrigin { + s.Name = &v + return s +} + +// SetProtocolPolicy sets the ProtocolPolicy field's value. +func (s *InputOrigin) SetProtocolPolicy(v string) *InputOrigin { + s.ProtocolPolicy = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *InputOrigin) SetRegionName(v string) *InputOrigin { + s.RegionName = &v + return s +} + // Describes an instance (a virtual private server). type Instance struct { _ struct{} `type:"structure"` @@ -22441,7 +25709,8 @@ type Instance struct { // The bundle for the instance (e.g., micro_1_0). BundleId *string `locationName:"bundleId" type:"string"` - // The timestamp when the instance was created (e.g., 1479734909.17). + // The timestamp when the instance was created (e.g., 1479734909.17) in Unix + // time format. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` // The size of the vCPU and the amount of RAM for the instance. @@ -22763,13 +26032,20 @@ type InstanceEntry struct { // // The following configuration options are available: // - // * DEFAULT — Use the default firewall settings from the image. + // * DEFAULT - Use the default firewall settings from the Lightsail instance + // blueprint. + // + // * INSTANCE - Use the configured firewall settings from the source Lightsail + // instance. // - // * INSTANCE — Use the firewall settings from the source Lightsail instance. + // * NONE - Use the default Amazon EC2 security group. // - // * NONE — Default to Amazon EC2. + // * CLOSED - All ports closed. // - // * CLOSED — All ports closed. + // If you configured lightsail-connect as a cidrListAliases on your instance, + // or if you chose to allow the Lightsail browser-based SSH or RDP clients to + // connect to your instance, that configuration is not carried over to your + // new Amazon EC2 instance. // // PortInfoSource is a required field PortInfoSource *string `locationName:"portInfoSource" type:"string" required:"true" enum:"PortInfoSourceType"` @@ -23019,26 +26295,56 @@ func (s *InstanceNetworking) SetPorts(v []*InstancePortInfo) *InstanceNetworking return s } -// Describes information about the instance ports. +// Describes information about ports for an Amazon Lightsail instance. type InstancePortInfo struct { _ struct{} `type:"structure"` // The access direction (inbound or outbound). + // + // Lightsail currently supports only inbound access direction. AccessDirection *string `locationName:"accessDirection" type:"string" enum:"AccessDirection"` - // The location from which access is allowed (e.g., Anywhere (0.0.0.0/0)). + // The location from which access is allowed. For example, Anywhere (0.0.0.0/0), + // or Custom if a specific IP address or range of IP addresses is allowed. AccessFrom *string `locationName:"accessFrom" type:"string"` // The type of access (Public or Private). AccessType *string `locationName:"accessType" type:"string" enum:"PortAccessType"` - // The common name. + // An alias that defines access for a preconfigured range of IP addresses. + // + // The only alias currently supported is lightsail-connect, which allows IP + // addresses of the browser-based RDP/SSH client in the Lightsail console to + // connect to your instance. + CidrListAliases []*string `locationName:"cidrListAliases" type:"list"` + + // The IP address, or range of IP addresses in CIDR notation, that are allowed + // to connect to an instance through the ports, and the protocol. Lightsail + // supports IPv4 addresses. + // + // For more information about CIDR block notation, see Classless Inter-Domain + // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) + // on Wikipedia. + Cidrs []*string `locationName:"cidrs" type:"list"` + + // The common name of the port information. CommonName *string `locationName:"commonName" type:"string"` - // The first port in the range. + // The first port in a range of open ports on an instance. + // + // Allowed ports: + // + // * TCP and UDP - 0 to 65535 + // + // * ICMP - The ICMP type. For example, specify 8 as the fromPort (ICMP type), + // and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, + // see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) + // on Wikipedia. FromPort *int64 `locationName:"fromPort" type:"integer"` - // The protocol being used. Can be one of the following. + // The IP protocol name. + // + // The name can be one of the following: // // * tcp - Transmission Control Protocol (TCP) provides reliable, ordered, // and error-checked delivery of streamed data between applications running @@ -23056,9 +26362,25 @@ type InstancePortInfo struct { // can use UDP, which provides a connectionless datagram service that emphasizes // reduced latency over reliability. If you do require reliable data stream // service, use TCP instead. + // + // * icmp - Internet Control Message Protocol (ICMP) is used to send error + // messages and operational information indicating success or failure when + // communicating with an instance. For example, an error is indicated when + // an instance could not be reached. When you specify icmp as the protocol, + // you must specify the ICMP type using the fromPort parameter, and ICMP + // code using the toPort parameter. Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"` - // The last port in the range. + // The last port in a range of open ports on an instance. + // + // Allowed ports: + // + // * TCP and UDP - 0 to 65535 + // + // * ICMP - The ICMP code. For example, specify 8 as the fromPort (ICMP type), + // and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, + // see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) + // on Wikipedia. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -23090,6 +26412,18 @@ func (s *InstancePortInfo) SetAccessType(v string) *InstancePortInfo { return s } +// SetCidrListAliases sets the CidrListAliases field's value. +func (s *InstancePortInfo) SetCidrListAliases(v []*string) *InstancePortInfo { + s.CidrListAliases = v + return s +} + +// SetCidrs sets the Cidrs field's value. +func (s *InstancePortInfo) SetCidrs(v []*string) *InstancePortInfo { + s.Cidrs = v + return s +} + // SetCommonName sets the CommonName field's value. func (s *InstancePortInfo) SetCommonName(v string) *InstancePortInfo { s.CommonName = &v @@ -23114,14 +26448,42 @@ func (s *InstancePortInfo) SetToPort(v int64) *InstancePortInfo { return s } -// Describes the port state. +// Describes open ports on an instance, the IP addresses allowed to connect +// to the instance through the ports, and the protocol. type InstancePortState struct { _ struct{} `type:"structure"` - // The first port in the range. + // An alias that defines access for a preconfigured range of IP addresses. + // + // The only alias currently supported is lightsail-connect, which allows IP + // addresses of the browser-based RDP/SSH client in the Lightsail console to + // connect to your instance. + CidrListAliases []*string `locationName:"cidrListAliases" type:"list"` + + // The IP address, or range of IP addresses in CIDR notation, that are allowed + // to connect to an instance through the ports, and the protocol. Lightsail + // supports IPv4 addresses. + // + // For more information about CIDR block notation, see Classless Inter-Domain + // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) + // on Wikipedia. + Cidrs []*string `locationName:"cidrs" type:"list"` + + // The first port in a range of open ports on an instance. + // + // Allowed ports: + // + // * TCP and UDP - 0 to 65535 + // + // * ICMP - The ICMP type. For example, specify 8 as the fromPort (ICMP type), + // and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, + // see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) + // on Wikipedia. FromPort *int64 `locationName:"fromPort" type:"integer"` - // The protocol being used. Can be one of the following. + // The IP protocol name. + // + // The name can be one of the following: // // * tcp - Transmission Control Protocol (TCP) provides reliable, ordered, // and error-checked delivery of streamed data between applications running @@ -23139,12 +26501,30 @@ type InstancePortState struct { // can use UDP, which provides a connectionless datagram service that emphasizes // reduced latency over reliability. If you do require reliable data stream // service, use TCP instead. + // + // * icmp - Internet Control Message Protocol (ICMP) is used to send error + // messages and operational information indicating success or failure when + // communicating with an instance. For example, an error is indicated when + // an instance could not be reached. When you specify icmp as the protocol, + // you must specify the ICMP type using the fromPort parameter, and ICMP + // code using the toPort parameter. Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"` // Specifies whether the instance port is open or closed. + // + // The port state for Lightsail instances is always open. State *string `locationName:"state" type:"string" enum:"PortState"` - // The last port in the range. + // The last port in a range of open ports on an instance. + // + // Allowed ports: + // + // * TCP and UDP - 0 to 65535 + // + // * ICMP - The ICMP code. For example, specify 8 as the fromPort (ICMP type), + // and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, + // see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) + // on Wikipedia. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -23158,6 +26538,18 @@ func (s InstancePortState) GoString() string { return s.String() } +// SetCidrListAliases sets the CidrListAliases field's value. +func (s *InstancePortState) SetCidrListAliases(v []*string) *InstancePortState { + s.CidrListAliases = v + return s +} + +// SetCidrs sets the Cidrs field's value. +func (s *InstancePortState) SetCidrs(v []*string) *InstancePortState { + s.Cidrs = v + return s +} + // SetFromPort sets the FromPort field's value. func (s *InstancePortState) SetFromPort(v int64) *InstancePortState { s.FromPort = &v @@ -23430,8 +26822,8 @@ func (s *InstanceState) SetName(v string) *InstanceState { // Please set your AWS Region configuration to us-east-1 to create, view, or // edit these resources. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -23454,17 +26846,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23472,22 +26864,22 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } type IsVpcPeeredInput struct { @@ -23617,6 +27009,207 @@ func (s *KeyPair) SetTags(v []*Tag) *KeyPair { return s } +// Describes an Amazon Lightsail content delivery network (CDN) distribution. +type LightsailDistribution struct { + _ struct{} `type:"structure"` + + // Indicates whether the bundle that is currently applied to your distribution, + // specified using the distributionName parameter, can be changed to another + // bundle. + // + // Use the UpdateDistributionBundle action to change your distribution's bundle. + AbleToUpdateBundle *bool `locationName:"ableToUpdateBundle" type:"boolean"` + + // The alternate domain names of the distribution. + AlternativeDomainNames []*string `locationName:"alternativeDomainNames" type:"list"` + + // The Amazon Resource Name (ARN) of the distribution. + Arn *string `locationName:"arn" type:"string"` + + // The ID of the bundle currently applied to the distribution. + BundleId *string `locationName:"bundleId" type:"string"` + + // An object that describes the cache behavior settings of the distribution. + CacheBehaviorSettings *CacheSettings `locationName:"cacheBehaviorSettings" type:"structure"` + + // An array of objects that describe the per-path cache behavior of the distribution. + CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"` + + // The name of the SSL/TLS certificate attached to the distribution, if any. + CertificateName *string `locationName:"certificateName" type:"string"` + + // The timestamp when the distribution was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + + // An object that describes the default cache behavior of the distribution. + DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure"` + + // The domain name of the distribution. + DomainName *string `locationName:"domainName" type:"string"` + + // Indicates whether the distribution is enabled. + IsEnabled *bool `locationName:"isEnabled" type:"boolean"` + + // An object that describes the location of the distribution, such as the AWS + // Region and Availability Zone. + // + // Lightsail distributions are global resources that can reference an origin + // in any AWS Region, and distribute its content globally. However, all distributions + // are located in the us-east-1 Region. + Location *ResourceLocation `locationName:"location" type:"structure"` + + // The name of the distribution. + Name *string `locationName:"name" type:"string"` + + // An object that describes the origin resource of the distribution, such as + // a Lightsail instance or load balancer. + // + // The distribution pulls, caches, and serves content from the origin. + Origin *Origin `locationName:"origin" type:"structure"` + + // The public DNS of the origin. + OriginPublicDNS *string `locationName:"originPublicDNS" type:"string"` + + // The Lightsail resource type (e.g., Distribution). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` + + // The status of the distribution. + Status *string `locationName:"status" type:"string"` + + // The support code. Include this code in your email to support when you have + // questions about your Lightsail distribution. This code enables our support + // team to look up your Lightsail information more easily. + SupportCode *string `locationName:"supportCode" type:"string"` + + // The tag keys and optional values for the resource. For more information about + // tags in Lightsail, see the Lightsail Dev Guide (https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-tags). + Tags []*Tag `locationName:"tags" type:"list"` +} + +// String returns the string representation +func (s LightsailDistribution) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LightsailDistribution) GoString() string { + return s.String() +} + +// SetAbleToUpdateBundle sets the AbleToUpdateBundle field's value. +func (s *LightsailDistribution) SetAbleToUpdateBundle(v bool) *LightsailDistribution { + s.AbleToUpdateBundle = &v + return s +} + +// SetAlternativeDomainNames sets the AlternativeDomainNames field's value. +func (s *LightsailDistribution) SetAlternativeDomainNames(v []*string) *LightsailDistribution { + s.AlternativeDomainNames = v + return s +} + +// SetArn sets the Arn field's value. +func (s *LightsailDistribution) SetArn(v string) *LightsailDistribution { + s.Arn = &v + return s +} + +// SetBundleId sets the BundleId field's value. +func (s *LightsailDistribution) SetBundleId(v string) *LightsailDistribution { + s.BundleId = &v + return s +} + +// SetCacheBehaviorSettings sets the CacheBehaviorSettings field's value. +func (s *LightsailDistribution) SetCacheBehaviorSettings(v *CacheSettings) *LightsailDistribution { + s.CacheBehaviorSettings = v + return s +} + +// SetCacheBehaviors sets the CacheBehaviors field's value. +func (s *LightsailDistribution) SetCacheBehaviors(v []*CacheBehaviorPerPath) *LightsailDistribution { + s.CacheBehaviors = v + return s +} + +// SetCertificateName sets the CertificateName field's value. +func (s *LightsailDistribution) SetCertificateName(v string) *LightsailDistribution { + s.CertificateName = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *LightsailDistribution) SetCreatedAt(v time.Time) *LightsailDistribution { + s.CreatedAt = &v + return s +} + +// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. +func (s *LightsailDistribution) SetDefaultCacheBehavior(v *CacheBehavior) *LightsailDistribution { + s.DefaultCacheBehavior = v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *LightsailDistribution) SetDomainName(v string) *LightsailDistribution { + s.DomainName = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *LightsailDistribution) SetIsEnabled(v bool) *LightsailDistribution { + s.IsEnabled = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *LightsailDistribution) SetLocation(v *ResourceLocation) *LightsailDistribution { + s.Location = v + return s +} + +// SetName sets the Name field's value. +func (s *LightsailDistribution) SetName(v string) *LightsailDistribution { + s.Name = &v + return s +} + +// SetOrigin sets the Origin field's value. +func (s *LightsailDistribution) SetOrigin(v *Origin) *LightsailDistribution { + s.Origin = v + return s +} + +// SetOriginPublicDNS sets the OriginPublicDNS field's value. +func (s *LightsailDistribution) SetOriginPublicDNS(v string) *LightsailDistribution { + s.OriginPublicDNS = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *LightsailDistribution) SetResourceType(v string) *LightsailDistribution { + s.ResourceType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LightsailDistribution) SetStatus(v string) *LightsailDistribution { + s.Status = &v + return s +} + +// SetSupportCode sets the SupportCode field's value. +func (s *LightsailDistribution) SetSupportCode(v string) *LightsailDistribution { + s.SupportCode = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LightsailDistribution) SetTags(v []*Tag) *LightsailDistribution { + s.Tags = v + return s +} + // Describes the Lightsail load balancer. type LoadBalancer struct { _ struct{} `type:"structure"` @@ -23808,7 +27401,47 @@ type LoadBalancerTlsCertificate struct { // the records. DomainValidationRecords []*LoadBalancerTlsCertificateDomainValidationRecord `locationName:"domainValidationRecords" type:"list"` - // The reason for the SSL/TLS certificate validation failure. + // The validation failure reason, if any, of the certificate. + // + // The following failure reasons are possible: + // + // * NO_AVAILABLE_CONTACTS - This failure applies to email validation, which + // is not available for Lightsail certificates. + // + // * ADDITIONAL_VERIFICATION_REQUIRED - Lightsail requires additional information + // to process this certificate request. This can happen as a fraud-protection + // measure, such as when the domain ranks within the Alexa top 1000 websites. + // To provide the required information, use the AWS Support Center (https://console.aws.amazon.com/support/home) + // to contact AWS Support. You cannot request a certificate for Amazon-owned + // domain names such as those ending in amazonaws.com, cloudfront.net, or + // elasticbeanstalk.com. + // + // * DOMAIN_NOT_ALLOWED - One or more of the domain names in the certificate + // request was reported as an unsafe domain by VirusTotal (https://www.virustotal.com/gui/home/url). + // To correct the problem, search for your domain name on the VirusTotal + // (https://www.virustotal.com/gui/home/url) website. If your domain is reported + // as suspicious, see Google Help for Hacked Websites (https://www.google.com/webmasters/hacked/?hl=en) + // to learn what you can do. If you believe that the result is a false positive, + // notify the organization that is reporting the domain. VirusTotal is an + // aggregate of several antivirus and URL scanners and cannot remove your + // domain from a block list itself. After you correct the problem and the + // VirusTotal registry has been updated, request a new certificate. If you + // see this error and your domain is not included in the VirusTotal list, + // visit the AWS Support Center (https://console.aws.amazon.com/support/home) + // and create a case. + // + // * INVALID_PUBLIC_DOMAIN - One or more of the domain names in the certificate + // request is not valid. Typically, this is because a domain name in the + // request is not a valid top-level domain. Try to request a certificate + // again, correcting any spelling errors or typos that were in the failed + // request, and ensure that all domain names in the request are for valid + // top-level domains. For example, you cannot request a certificate for example.invalidpublicdomain + // because invalidpublicdomain is not a valid top-level domain. + // + // * OTHER - Typically, this failure occurs when there is a typographical + // error in one or more of the domain names in the certificate request. Try + // to request a certificate again, correcting any spelling errors or typos + // that were in the failed request. FailureReason *string `locationName:"failureReason" type:"string" enum:"LoadBalancerTlsCertificateFailureReason"` // When true, the SSL/TLS certificate is attached to the Lightsail load balancer. @@ -23820,8 +27453,7 @@ type LoadBalancerTlsCertificate struct { // The issuer of the certificate. Issuer *string `locationName:"issuer" type:"string"` - // The algorithm that was used to generate the key pair (the public and private - // key). + // The algorithm used to generate the key pair (the public and private key). KeyAlgorithm *string `locationName:"keyAlgorithm" type:"string"` // The load balancer name where your SSL/TLS certificate is attached. @@ -23839,8 +27471,8 @@ type LoadBalancerTlsCertificate struct { // The timestamp when the SSL/TLS certificate is first valid. NotBefore *time.Time `locationName:"notBefore" type:"timestamp"` - // An object containing information about the status of Lightsail's managed - // renewal for the certificate. + // An object that describes the status of the certificate renewal managed by + // Lightsail. RenewalSummary *LoadBalancerTlsCertificateRenewalSummary `locationName:"renewalSummary" type:"structure"` // The resource type (e.g., LoadBalancerTlsCertificate). @@ -23867,10 +27499,12 @@ type LoadBalancerTlsCertificate struct { // * DiskSnapshot - A block storage disk snapshot ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` - // The reason the certificate was revoked. Valid values are below. + // The reason the certificate was revoked. This value is present only when the + // certificate status is REVOKED. RevocationReason *string `locationName:"revocationReason" type:"string" enum:"LoadBalancerTlsCertificateRevocationReason"` - // The timestamp when the SSL/TLS certificate was revoked. + // The timestamp when the certificate was revoked. This value is present only + // when the certificate status is REVOKED. RevokedAt *time.Time `locationName:"revokedAt" type:"timestamp"` // The serial number of the certificate. @@ -23879,18 +27513,15 @@ type LoadBalancerTlsCertificate struct { // The algorithm that was used to sign the certificate. SignatureAlgorithm *string `locationName:"signatureAlgorithm" type:"string"` - // The status of the SSL/TLS certificate. Valid values are below. + // The validation status of the SSL/TLS certificate. Valid values are below. Status *string `locationName:"status" type:"string" enum:"LoadBalancerTlsCertificateStatus"` // The name of the entity that is associated with the public key contained in // the certificate. Subject *string `locationName:"subject" type:"string"` - // One or more domains or subdomains included in the certificate. This list - // contains the domain names that are bound to the public key that is contained - // in the certificate. The subject alternative names include the canonical domain - // name (CNAME) of the certificate and additional domain names that can be used - // to connect to the website, such as example.com, www.example.com, or m.example.com. + // An array of strings that specify the alternate domains (e.g., example2.com) + // and subdomains (e.g., blog.example.com) for the certificate. SubjectAlternativeNames []*string `locationName:"subjectAlternativeNames" type:"list"` // The support code. Include this code in your email to support when you have @@ -24160,6 +27791,26 @@ func (s *LoadBalancerTlsCertificateDomainValidationRecord) SetValue(v string) *L // Contains information about the status of Lightsail's managed renewal for // the certificate. +// +// The renewal status of the certificate. +// +// The following renewal status are possible: +// +// * PendingAutoRenewal - Lightsail is attempting to automatically validate +// the domain names in the certificate. No further action is required. +// +// * PendingValidation - Lightsail couldn't automatically validate one or +// more domain names in the certificate. You must take action to validate +// these domain names or the certificate won't be renewed. If you used DNS +// validation, check to make sure your certificate's domain validation records +// exist in your domain's DNS, and that your certificate remains in use. +// +// * Success - All domain names in the certificate are validated, and Lightsail +// renewed the certificate. No further action is required. +// +// * Failed - One or more domain names were not validated before the certificate +// expired, and Lightsail did not renew the certificate. You can request +// a new certificate using the CreateCertificate action. type LoadBalancerTlsCertificateRenewalSummary struct { _ struct{} `type:"structure"` @@ -24168,8 +27819,25 @@ type LoadBalancerTlsCertificateRenewalSummary struct { // initial validation that occurs as a result of the RequestCertificate request. DomainValidationOptions []*LoadBalancerTlsCertificateDomainValidationOption `locationName:"domainValidationOptions" type:"list"` - // The status of Lightsail's managed renewal of the certificate. Valid values - // are listed below. + // The renewal status of the certificate. + // + // The following renewal status are possible: + // + // * PendingAutoRenewal - Lightsail is attempting to automatically validate + // the domain names of the certificate. No further action is required. + // + // * PendingValidation - Lightsail couldn't automatically validate one or + // more domain names of the certificate. You must take action to validate + // these domain names or the certificate won't be renewed. Check to make + // sure your certificate's domain validation records exist in your domain's + // DNS, and that your certificate remains in use. + // + // * Success - All domain names in the certificate are validated, and Lightsail + // renewed the certificate. No further action is required. + // + // * Failed - One or more domain names were not validated before the certificate + // expired, and Lightsail did not renew the certificate. You can request + // a new certificate using the CreateCertificate action. RenewalStatus *string `locationName:"renewalStatus" type:"string" enum:"LoadBalancerTlsCertificateRenewalStatus"` } @@ -24414,8 +28082,8 @@ func (s *MonthlyTransfer) SetGbPerMonthAllocated(v int64) *MonthlyTransfer { // Lightsail throws this exception when it cannot find a resource. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -24438,17 +28106,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24456,33 +28124,33 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type OpenInstancePublicPortsInput struct { _ struct{} `type:"structure"` - // The name of the instance for which you want to open the public ports. + // The name of the instance for which to open ports. // // InstanceName is a required field InstanceName *string `locationName:"instanceName" type:"string" required:"true"` - // An array of key-value pairs containing information about the port mappings. + // An object to describe the ports to open for the specified instance. // // PortInfo is a required field PortInfo *PortInfo `locationName:"portInfo" type:"structure" required:"true"` @@ -24507,6 +28175,11 @@ func (s *OpenInstancePublicPortsInput) Validate() error { if s.PortInfo == nil { invalidParams.Add(request.NewErrParamRequired("PortInfo")) } + if s.PortInfo != nil { + if err := s.PortInfo.Validate(); err != nil { + invalidParams.AddNested("PortInfo", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -24530,7 +28203,7 @@ type OpenInstancePublicPortsOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operation *Operation `locationName:"operation" type:"structure"` } @@ -24676,8 +28349,8 @@ func (s *Operation) SetStatusChangedAt(v time.Time) *Operation { // Lightsail throws this exception when an operation fails to execute. type OperationFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -24700,17 +28373,17 @@ func (s OperationFailureException) GoString() string { func newErrorOperationFailureException(v protocol.ResponseMetadata) error { return &OperationFailureException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationFailureException) Code() string { +func (s *OperationFailureException) Code() string { return "OperationFailureException" } // Message returns the exception's message. -func (s OperationFailureException) Message() string { +func (s *OperationFailureException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24718,22 +28391,79 @@ func (s OperationFailureException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationFailureException) OrigErr() error { +func (s *OperationFailureException) OrigErr() error { return nil } -func (s OperationFailureException) Error() string { +func (s *OperationFailureException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationFailureException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationFailureException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationFailureException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Describes the origin resource of an Amazon Lightsail content delivery network +// (CDN) distribution. +// +// An origin can be a Lightsail instance or load balancer. A distribution pulls +// content from an origin, caches it, and serves it to viewers via a worldwide +// network of edge servers. +type Origin struct { + _ struct{} `type:"structure"` + + // The name of the origin resource. + Name *string `locationName:"name" type:"string"` + + // The protocol that your Amazon Lightsail distribution uses when establishing + // a connection with your origin to pull content. + ProtocolPolicy *string `locationName:"protocolPolicy" type:"string" enum:"OriginProtocolPolicyEnum"` + + // The AWS Region name of the origin resource. + RegionName *string `locationName:"regionName" type:"string" enum:"RegionName"` + + // The resource type of the origin resource (e.g., Instance). + ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` +} + +// String returns the string representation +func (s Origin) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Origin) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *Origin) SetName(v string) *Origin { + s.Name = &v + return s +} + +// SetProtocolPolicy sets the ProtocolPolicy field's value. +func (s *Origin) SetProtocolPolicy(v string) *Origin { + s.ProtocolPolicy = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *Origin) SetRegionName(v string) *Origin { + s.RegionName = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *Origin) SetResourceType(v string) *Origin { + s.ResourceType = &v + return s } // The password data for the Windows Server-based instance, including the ciphertext @@ -24806,7 +28536,7 @@ type PeerVpcOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operation *Operation `locationName:"operation" type:"structure"` } @@ -24911,18 +28641,84 @@ func (s *PendingModifiedRelationalDatabaseValues) SetMasterUserPassword(v string return s } -// Describes information about the ports on your virtual private server (or -// instance). +// Describes ports to open on an instance, the IP addresses allowed to connect +// to the instance through the ports, and the protocol. type PortInfo struct { _ struct{} `type:"structure"` - // The first port in the range. + // An alias that defines access for a preconfigured range of IP addresses. + // + // The only alias currently supported is lightsail-connect, which allows IP + // addresses of the browser-based RDP/SSH client in the Lightsail console to + // connect to your instance. + CidrListAliases []*string `locationName:"cidrListAliases" type:"list"` + + // The IP address, or range of IP addresses in CIDR notation, that are allowed + // to connect to an instance through the ports, and the protocol. Lightsail + // supports IPv4 addresses. + // + // Examples: + // + // * To allow the IP address 192.0.2.44, specify 192.0.2.44 or 192.0.2.44/32. + // + // * To allow the IP addresses 192.0.2.0 to 192.0.2.255, specify 192.0.2.0/24. + // + // For more information about CIDR block notation, see Classless Inter-Domain + // Routing (https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation) + // on Wikipedia. + Cidrs []*string `locationName:"cidrs" type:"list"` + + // The first port in a range of open ports on an instance. + // + // Allowed ports: + // + // * TCP and UDP - 0 to 65535 + // + // * ICMP - The ICMP type. For example, specify 8 as the fromPort (ICMP type), + // and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, + // see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) + // on Wikipedia. FromPort *int64 `locationName:"fromPort" type:"integer"` - // The protocol. + // The IP protocol name. + // + // The name can be one of the following: + // + // * tcp - Transmission Control Protocol (TCP) provides reliable, ordered, + // and error-checked delivery of streamed data between applications running + // on hosts communicating by an IP network. If you have an application that + // doesn't require reliable data stream service, use UDP instead. + // + // * all - All transport layer protocol types. For more general information, + // see Transport layer (https://en.wikipedia.org/wiki/Transport_layer) on + // Wikipedia. + // + // * udp - With User Datagram Protocol (UDP), computer applications can send + // messages (or datagrams) to other hosts on an Internet Protocol (IP) network. + // Prior communications are not required to set up transmission channels + // or data paths. Applications that don't require reliable data stream service + // can use UDP, which provides a connectionless datagram service that emphasizes + // reduced latency over reliability. If you do require reliable data stream + // service, use TCP instead. + // + // * icmp - Internet Control Message Protocol (ICMP) is used to send error + // messages and operational information indicating success or failure when + // communicating with an instance. For example, an error is indicated when + // an instance could not be reached. When you specify icmp as the protocol, + // you must specify the ICMP type using the fromPort parameter, and ICMP + // code using the toPort parameter. Protocol *string `locationName:"protocol" type:"string" enum:"NetworkProtocol"` - // The last port in the range. + // The last port in a range of open ports on an instance. + // + // Allowed ports: + // + // * TCP and UDP - 0 to 65535 + // + // * ICMP - The ICMP code. For example, specify 8 as the fromPort (ICMP type), + // and -1 as the toPort (ICMP code), to enable ICMP Ping. For more information, + // see Control Messages (https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages) + // on Wikipedia. ToPort *int64 `locationName:"toPort" type:"integer"` } @@ -24936,6 +28732,34 @@ func (s PortInfo) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PortInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PortInfo"} + if s.FromPort != nil && *s.FromPort < -1 { + invalidParams.Add(request.NewErrParamMinValue("FromPort", -1)) + } + if s.ToPort != nil && *s.ToPort < -1 { + invalidParams.Add(request.NewErrParamMinValue("ToPort", -1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidrListAliases sets the CidrListAliases field's value. +func (s *PortInfo) SetCidrListAliases(v []*string) *PortInfo { + s.CidrListAliases = v + return s +} + +// SetCidrs sets the Cidrs field's value. +func (s *PortInfo) SetCidrs(v []*string) *PortInfo { + s.Cidrs = v + return s +} + // SetFromPort sets the FromPort field's value. func (s *PortInfo) SetFromPort(v int64) *PortInfo { s.FromPort = &v @@ -25011,8 +28835,9 @@ type PutAlarmInput struct { // // The following metrics are available for each resource type: // - // * Instances: CPUUtilization, NetworkIn, NetworkOut, StatusCheckFailed, - // StatusCheckFailed_Instance, and StatusCheckFailed_System. + // * Instances: BurstCapacityPercentage, BurstCapacityTime, CPUUtilization, + // NetworkIn, NetworkOut, StatusCheckFailed, StatusCheckFailed_Instance, + // and StatusCheckFailed_System. // // * Load balancers: ClientTLSNegotiationErrorCount, HealthyHostCount, UnhealthyHostCount, // HTTPCode_LB_4XX_Count, HTTPCode_LB_5XX_Count, HTTPCode_Instance_2XX_Count, @@ -25022,6 +28847,9 @@ type PutAlarmInput struct { // * Relational databases: CPUUtilization, DatabaseConnections, DiskQueueDepth, // FreeStorageSpace, NetworkReceiveThroughput, and NetworkTransmitThroughput. // + // For more information about these metrics, see Metrics available in Lightsail + // (https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-resource-health-metrics#available-metrics). + // // MetricName is a required field MetricName *string `locationName:"metricName" type:"string" required:"true" enum:"MetricName"` @@ -25042,13 +28870,13 @@ type PutAlarmInput struct { // // An alarm has the following possible states: // - // * ALARM — The metric is outside of the defined threshold. + // * ALARM - The metric is outside of the defined threshold. // - // * INSUFFICIENT_DATA — The alarm has just started, the metric is not - // available, or not enough data is available for the metric to determine - // the alarm state. + // * INSUFFICIENT_DATA - The alarm has just started, the metric is not available, + // or not enough data is available for the metric to determine the alarm + // state. // - // * OK — The metric is within the defined threshold. + // * OK - The metric is within the defined threshold. // // When you specify a notification trigger, the ALARM state must be specified. // The INSUFFICIENT_DATA and OK states can be specified in addition to the ALARM @@ -25074,16 +28902,16 @@ type PutAlarmInput struct { // // An alarm can treat missing data in the following ways: // - // * breaching — Assume the missing data is not within the threshold. Missing + // * breaching - Assume the missing data is not within the threshold. Missing // data counts towards the number of times the metric is not within the threshold. // - // * notBreaching — Assume the missing data is within the threshold. Missing + // * notBreaching - Assume the missing data is within the threshold. Missing // data does not count towards the number of times the metric is not within // the threshold. // - // * ignore — Ignore the missing data. Maintains the current alarm state. + // * ignore - Ignore the missing data. Maintains the current alarm state. // - // * missing — Missing data is treated as missing. + // * missing - Missing data is treated as missing. // // If treatMissingData is not specified, the default behavior of missing is // used. @@ -25198,7 +29026,7 @@ type PutAlarmOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -25222,12 +29050,12 @@ func (s *PutAlarmOutput) SetOperations(v []*Operation) *PutAlarmOutput { type PutInstancePublicPortsInput struct { _ struct{} `type:"structure"` - // The Lightsail instance name of the public port(s) you are setting. + // The name of the instance for which to open ports. // // InstanceName is a required field InstanceName *string `locationName:"instanceName" type:"string" required:"true"` - // Specifies information about the public port(s). + // An array of objects to describe the ports to open for the specified instance. // // PortInfos is a required field PortInfos []*PortInfo `locationName:"portInfos" type:"list" required:"true"` @@ -25252,6 +29080,16 @@ func (s *PutInstancePublicPortsInput) Validate() error { if s.PortInfos == nil { invalidParams.Add(request.NewErrParamRequired("PortInfos")) } + if s.PortInfos != nil { + for i, v := range s.PortInfos { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PortInfos", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -25275,7 +29113,7 @@ type PutInstancePublicPortsOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operation *Operation `locationName:"operation" type:"structure"` } @@ -25296,6 +29134,49 @@ func (s *PutInstancePublicPortsOutput) SetOperation(v *Operation) *PutInstancePu return s } +// Describes the query string parameters that an Amazon Lightsail content delivery +// network (CDN) distribution to bases caching on. +// +// For the query strings that you specify, your distribution caches separate +// versions of the specified content based on the query string values in viewer +// requests. +type QueryStringObject struct { + _ struct{} `type:"structure"` + + // Indicates whether the distribution forwards and caches based on query strings. + Option *bool `locationName:"option" type:"boolean"` + + // The specific query strings that the distribution forwards to the origin. + // + // Your distribution will cache content based on the specified query strings. + // + // If the option parameter is true, then your distribution forwards all query + // strings, regardless of what you specify using the queryStringsAllowList parameter. + QueryStringsAllowList []*string `locationName:"queryStringsAllowList" type:"list"` +} + +// String returns the string representation +func (s QueryStringObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryStringObject) GoString() string { + return s.String() +} + +// SetOption sets the Option field's value. +func (s *QueryStringObject) SetOption(v bool) *QueryStringObject { + s.Option = &v + return s +} + +// SetQueryStringsAllowList sets the QueryStringsAllowList field's value. +func (s *QueryStringObject) SetQueryStringsAllowList(v []*string) *QueryStringObject { + s.QueryStringsAllowList = v + return s +} + type RebootInstanceInput struct { _ struct{} `type:"structure"` @@ -25338,7 +29219,7 @@ type RebootInstanceOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -25401,7 +29282,7 @@ type RebootRelationalDatabaseOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26336,7 +30217,7 @@ type ReleaseStaticIpOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26357,6 +30238,145 @@ func (s *ReleaseStaticIpOutput) SetOperations(v []*Operation) *ReleaseStaticIpOu return s } +// Describes the status of a SSL/TLS certificate renewal managed by Amazon Lightsail. +type RenewalSummary struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the domain validation records of the certificate. + DomainValidationRecords []*DomainValidationRecord `locationName:"domainValidationRecords" type:"list"` + + // The renewal status of the certificate. + // + // The following renewal status are possible: + // + // * PendingAutoRenewal - Lightsail is attempting to automatically validate + // the domain names of the certificate. No further action is required. + // + // * PendingValidation - Lightsail couldn't automatically validate one or + // more domain names of the certificate. You must take action to validate + // these domain names or the certificate won't be renewed. Check to make + // sure your certificate's domain validation records exist in your domain's + // DNS, and that your certificate remains in use. + // + // * Success - All domain names in the certificate are validated, and Lightsail + // renewed the certificate. No further action is required. + // + // * Failed - One or more domain names were not validated before the certificate + // expired, and Lightsail did not renew the certificate. You can request + // a new certificate using the CreateCertificate action. + RenewalStatus *string `locationName:"renewalStatus" type:"string" enum:"RenewalStatus"` + + // The reason for the renewal status of the certificate. + RenewalStatusReason *string `locationName:"renewalStatusReason" type:"string"` + + // The timestamp when the certificate was last updated. + UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s RenewalSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RenewalSummary) GoString() string { + return s.String() +} + +// SetDomainValidationRecords sets the DomainValidationRecords field's value. +func (s *RenewalSummary) SetDomainValidationRecords(v []*DomainValidationRecord) *RenewalSummary { + s.DomainValidationRecords = v + return s +} + +// SetRenewalStatus sets the RenewalStatus field's value. +func (s *RenewalSummary) SetRenewalStatus(v string) *RenewalSummary { + s.RenewalStatus = &v + return s +} + +// SetRenewalStatusReason sets the RenewalStatusReason field's value. +func (s *RenewalSummary) SetRenewalStatusReason(v string) *RenewalSummary { + s.RenewalStatusReason = &v + return s +} + +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *RenewalSummary) SetUpdatedAt(v time.Time) *RenewalSummary { + s.UpdatedAt = &v + return s +} + +type ResetDistributionCacheInput struct { + _ struct{} `type:"structure"` + + // The name of the distribution for which to reset cache. + // + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + DistributionName *string `locationName:"distributionName" type:"string"` +} + +// String returns the string representation +func (s ResetDistributionCacheInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDistributionCacheInput) GoString() string { + return s.String() +} + +// SetDistributionName sets the DistributionName field's value. +func (s *ResetDistributionCacheInput) SetDistributionName(v string) *ResetDistributionCacheInput { + s.DistributionName = &v + return s +} + +type ResetDistributionCacheOutput struct { + _ struct{} `type:"structure"` + + // The timestamp of the reset cache request (e.g., 1479734909.17) in Unix time + // format. + CreateTime *time.Time `locationName:"createTime" type:"timestamp"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operation *Operation `locationName:"operation" type:"structure"` + + // The status of the reset cache request. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s ResetDistributionCacheOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetDistributionCacheOutput) GoString() string { + return s.String() +} + +// SetCreateTime sets the CreateTime field's value. +func (s *ResetDistributionCacheOutput) SetCreateTime(v time.Time) *ResetDistributionCacheOutput { + s.CreateTime = &v + return s +} + +// SetOperation sets the Operation field's value. +func (s *ResetDistributionCacheOutput) SetOperation(v *Operation) *ResetDistributionCacheOutput { + s.Operation = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ResetDistributionCacheOutput) SetStatus(v string) *ResetDistributionCacheOutput { + s.Status = &v + return s +} + // Describes the resource location. type ResourceLocation struct { _ struct{} `type:"structure"` @@ -26390,6 +30410,49 @@ func (s *ResourceLocation) SetRegionName(v string) *ResourceLocation { return s } +// Describes the domain name system (DNS) records to add to your domain's DNS +// to validate it for an Amazon Lightsail certificate. +type ResourceRecord struct { + _ struct{} `type:"structure"` + + // The name of the record. + Name *string `locationName:"name" type:"string"` + + // The DNS record type. + Type *string `locationName:"type" type:"string"` + + // The value for the DNS record. + Value *string `locationName:"value" type:"string"` +} + +// String returns the string representation +func (s ResourceRecord) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceRecord) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ResourceRecord) SetName(v string) *ResourceRecord { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *ResourceRecord) SetType(v string) *ResourceRecord { + s.Type = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ResourceRecord) SetValue(v string) *ResourceRecord { + s.Value = &v + return s +} + type SendContactMethodVerificationInput struct { _ struct{} `type:"structure"` @@ -26432,7 +30495,7 @@ type SendContactMethodVerificationOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26455,8 +30518,8 @@ func (s *SendContactMethodVerificationOutput) SetOperations(v []*Operation) *Sen // A general service exception. type ServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -26479,17 +30542,17 @@ func (s ServiceException) GoString() string { func newErrorServiceException(v protocol.ResponseMetadata) error { return &ServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceException) Code() string { +func (s *ServiceException) Code() string { return "ServiceException" } // Message returns the exception's message. -func (s ServiceException) Message() string { +func (s *ServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26497,22 +30560,22 @@ func (s ServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceException) OrigErr() error { +func (s *ServiceException) OrigErr() error { return nil } -func (s ServiceException) Error() string { +func (s *ServiceException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceException) RequestID() string { + return s.RespMetadata.RequestID } type StartInstanceInput struct { @@ -26557,7 +30620,7 @@ type StartInstanceOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26620,7 +30683,7 @@ type StartRelationalDatabaseOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26795,7 +30858,7 @@ type StopInstanceOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26868,7 +30931,7 @@ type StopRelationalDatabaseOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -26997,7 +31060,7 @@ type TagResourceOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -27030,13 +31093,13 @@ type TestAlarmInput struct { // // An alarm has the following possible states that can be tested: // - // * ALARM — The metric is outside of the defined threshold. + // * ALARM - The metric is outside of the defined threshold. // - // * INSUFFICIENT_DATA — The alarm has just started, the metric is not - // available, or not enough data is available for the metric to determine - // the alarm state. + // * INSUFFICIENT_DATA - The alarm has just started, the metric is not available, + // or not enough data is available for the metric to determine the alarm + // state. // - // * OK — The metric is within the defined threshold. + // * OK - The metric is within the defined threshold. // // State is a required field State *string `locationName:"state" type:"string" required:"true" enum:"AlarmState"` @@ -27084,7 +31147,7 @@ type TestAlarmOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -27107,8 +31170,8 @@ func (s *TestAlarmOutput) SetOperations(v []*Operation) *TestAlarmOutput { // Lightsail throws this exception when the user has not been authenticated. type UnauthenticatedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"code" type:"string"` @@ -27131,17 +31194,17 @@ func (s UnauthenticatedException) GoString() string { func newErrorUnauthenticatedException(v protocol.ResponseMetadata) error { return &UnauthenticatedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthenticatedException) Code() string { +func (s *UnauthenticatedException) Code() string { return "UnauthenticatedException" } // Message returns the exception's message. -func (s UnauthenticatedException) Message() string { +func (s *UnauthenticatedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27149,22 +31212,22 @@ func (s UnauthenticatedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthenticatedException) OrigErr() error { +func (s *UnauthenticatedException) OrigErr() error { return nil } -func (s UnauthenticatedException) Error() string { +func (s *UnauthenticatedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthenticatedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthenticatedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthenticatedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthenticatedException) RequestID() string { + return s.RespMetadata.RequestID } type UnpeerVpcInput struct { @@ -27185,7 +31248,7 @@ type UnpeerVpcOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operation *Operation `locationName:"operation" type:"structure"` } @@ -27250,46 +31313,224 @@ func (s *UntagResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { - s.ResourceArn = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *UntagResourceInput) SetResourceName(v string) *UntagResourceInput { + s.ResourceName = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` + + // An array of objects that describe the result of the action, such as the status + // of the request, the timestamp of the request, and the resources affected + // by the request. + Operations []*Operation `locationName:"operations" type:"list"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// SetOperations sets the Operations field's value. +func (s *UntagResourceOutput) SetOperations(v []*Operation) *UntagResourceOutput { + s.Operations = v + return s +} + +type UpdateDistributionBundleInput struct { + _ struct{} `type:"structure"` + + // The bundle ID of the new bundle to apply to your distribution. + // + // Use the GetDistributionBundles action to get a list of distribution bundle + // IDs that you can specify. + BundleId *string `locationName:"bundleId" type:"string"` + + // The name of the distribution for which to update the bundle. + // + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + DistributionName *string `locationName:"distributionName" type:"string"` +} + +// String returns the string representation +func (s UpdateDistributionBundleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionBundleInput) GoString() string { + return s.String() +} + +// SetBundleId sets the BundleId field's value. +func (s *UpdateDistributionBundleInput) SetBundleId(v string) *UpdateDistributionBundleInput { + s.BundleId = &v + return s +} + +// SetDistributionName sets the DistributionName field's value. +func (s *UpdateDistributionBundleInput) SetDistributionName(v string) *UpdateDistributionBundleInput { + s.DistributionName = &v + return s +} + +type UpdateDistributionBundleOutput struct { + _ struct{} `type:"structure"` + + // Describes the API operation. + Operation *Operation `locationName:"operation" type:"structure"` +} + +// String returns the string representation +func (s UpdateDistributionBundleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionBundleOutput) GoString() string { + return s.String() +} + +// SetOperation sets the Operation field's value. +func (s *UpdateDistributionBundleOutput) SetOperation(v *Operation) *UpdateDistributionBundleOutput { + s.Operation = v + return s +} + +type UpdateDistributionInput struct { + _ struct{} `type:"structure"` + + // An object that describes the cache behavior settings for the distribution. + // + // The cacheBehaviorSettings specified in your UpdateDistributionRequest will + // replace your distribution's existing settings. + CacheBehaviorSettings *CacheSettings `locationName:"cacheBehaviorSettings" type:"structure"` + + // An array of objects that describe the per-path cache behavior for the distribution. + CacheBehaviors []*CacheBehaviorPerPath `locationName:"cacheBehaviors" type:"list"` + + // An object that describes the default cache behavior for the distribution. + DefaultCacheBehavior *CacheBehavior `locationName:"defaultCacheBehavior" type:"structure"` + + // The name of the distribution to update. + // + // Use the GetDistributions action to get a list of distribution names that + // you can specify. + // + // DistributionName is a required field + DistributionName *string `locationName:"distributionName" type:"string" required:"true"` + + // Indicates whether to enable the distribution. + IsEnabled *bool `locationName:"isEnabled" type:"boolean"` + + // An object that describes the origin resource for the distribution, such as + // a Lightsail instance or load balancer. + // + // The distribution pulls, caches, and serves content from the origin. + Origin *InputOrigin `locationName:"origin" type:"structure"` +} + +// String returns the string representation +func (s UpdateDistributionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateDistributionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateDistributionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDistributionInput"} + if s.DistributionName == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCacheBehaviorSettings sets the CacheBehaviorSettings field's value. +func (s *UpdateDistributionInput) SetCacheBehaviorSettings(v *CacheSettings) *UpdateDistributionInput { + s.CacheBehaviorSettings = v return s } -// SetResourceName sets the ResourceName field's value. -func (s *UntagResourceInput) SetResourceName(v string) *UntagResourceInput { - s.ResourceName = &v +// SetCacheBehaviors sets the CacheBehaviors field's value. +func (s *UpdateDistributionInput) SetCacheBehaviors(v []*CacheBehaviorPerPath) *UpdateDistributionInput { + s.CacheBehaviors = v return s } -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v +// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. +func (s *UpdateDistributionInput) SetDefaultCacheBehavior(v *CacheBehavior) *UpdateDistributionInput { + s.DefaultCacheBehavior = v return s } -type UntagResourceOutput struct { +// SetDistributionName sets the DistributionName field's value. +func (s *UpdateDistributionInput) SetDistributionName(v string) *UpdateDistributionInput { + s.DistributionName = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *UpdateDistributionInput) SetIsEnabled(v bool) *UpdateDistributionInput { + s.IsEnabled = &v + return s +} + +// SetOrigin sets the Origin field's value. +func (s *UpdateDistributionInput) SetOrigin(v *InputOrigin) *UpdateDistributionInput { + s.Origin = v + return s +} + +type UpdateDistributionOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. - Operations []*Operation `locationName:"operations" type:"list"` + Operation *Operation `locationName:"operation" type:"structure"` } // String returns the string representation -func (s UntagResourceOutput) String() string { +func (s UpdateDistributionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UntagResourceOutput) GoString() string { +func (s UpdateDistributionOutput) GoString() string { return s.String() } -// SetOperations sets the Operations field's value. -func (s *UntagResourceOutput) SetOperations(v []*Operation) *UntagResourceOutput { - s.Operations = v +// SetOperation sets the Operation field's value. +func (s *UpdateDistributionOutput) SetOperation(v *Operation) *UpdateDistributionOutput { + s.Operation = v return s } @@ -27349,7 +31590,7 @@ type UpdateDomainEntryOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -27443,7 +31684,7 @@ type UpdateLoadBalancerAttributeOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -27636,7 +31877,7 @@ type UpdateRelationalDatabaseOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -27713,7 +31954,7 @@ type UpdateRelationalDatabaseParametersOutput struct { _ struct{} `type:"structure"` // An array of objects that describe the result of the action, such as the status - // of the request, the time stamp of the request, and the resources affected + // of the request, the timestamp of the request, and the resources affected // by the request. Operations []*Operation `locationName:"operations" type:"list"` } @@ -27742,11 +31983,26 @@ const ( AccessDirectionOutbound = "outbound" ) +// AccessDirection_Values returns all elements of the AccessDirection enum +func AccessDirection_Values() []string { + return []string{ + AccessDirectionInbound, + AccessDirectionOutbound, + } +} + const ( // AddOnTypeAutoSnapshot is a AddOnType enum value AddOnTypeAutoSnapshot = "AutoSnapshot" ) +// AddOnType_Values returns all elements of the AddOnType enum +func AddOnType_Values() []string { + return []string{ + AddOnTypeAutoSnapshot, + } +} + const ( // AlarmStateOk is a AlarmState enum value AlarmStateOk = "OK" @@ -27758,6 +32014,15 @@ const ( AlarmStateInsufficientData = "INSUFFICIENT_DATA" ) +// AlarmState_Values returns all elements of the AlarmState enum +func AlarmState_Values() []string { + return []string{ + AlarmStateOk, + AlarmStateAlarm, + AlarmStateInsufficientData, + } +} + const ( // AutoSnapshotStatusSuccess is a AutoSnapshotStatus enum value AutoSnapshotStatusSuccess = "Success" @@ -27772,6 +32037,32 @@ const ( AutoSnapshotStatusNotFound = "NotFound" ) +// AutoSnapshotStatus_Values returns all elements of the AutoSnapshotStatus enum +func AutoSnapshotStatus_Values() []string { + return []string{ + AutoSnapshotStatusSuccess, + AutoSnapshotStatusFailed, + AutoSnapshotStatusInProgress, + AutoSnapshotStatusNotFound, + } +} + +const ( + // BehaviorEnumDontCache is a BehaviorEnum enum value + BehaviorEnumDontCache = "dont-cache" + + // BehaviorEnumCache is a BehaviorEnum enum value + BehaviorEnumCache = "cache" +) + +// BehaviorEnum_Values returns all elements of the BehaviorEnum enum +func BehaviorEnum_Values() []string { + return []string{ + BehaviorEnumDontCache, + BehaviorEnumCache, + } +} + const ( // BlueprintTypeOs is a BlueprintType enum value BlueprintTypeOs = "os" @@ -27780,11 +32071,62 @@ const ( BlueprintTypeApp = "app" ) +// BlueprintType_Values returns all elements of the BlueprintType enum +func BlueprintType_Values() []string { + return []string{ + BlueprintTypeOs, + BlueprintTypeApp, + } +} + +const ( + // CertificateStatusPendingValidation is a CertificateStatus enum value + CertificateStatusPendingValidation = "PENDING_VALIDATION" + + // CertificateStatusIssued is a CertificateStatus enum value + CertificateStatusIssued = "ISSUED" + + // CertificateStatusInactive is a CertificateStatus enum value + CertificateStatusInactive = "INACTIVE" + + // CertificateStatusExpired is a CertificateStatus enum value + CertificateStatusExpired = "EXPIRED" + + // CertificateStatusValidationTimedOut is a CertificateStatus enum value + CertificateStatusValidationTimedOut = "VALIDATION_TIMED_OUT" + + // CertificateStatusRevoked is a CertificateStatus enum value + CertificateStatusRevoked = "REVOKED" + + // CertificateStatusFailed is a CertificateStatus enum value + CertificateStatusFailed = "FAILED" +) + +// CertificateStatus_Values returns all elements of the CertificateStatus enum +func CertificateStatus_Values() []string { + return []string{ + CertificateStatusPendingValidation, + CertificateStatusIssued, + CertificateStatusInactive, + CertificateStatusExpired, + CertificateStatusValidationTimedOut, + CertificateStatusRevoked, + CertificateStatusFailed, + } +} + const ( // CloudFormationStackRecordSourceTypeExportSnapshotRecord is a CloudFormationStackRecordSourceType enum value CloudFormationStackRecordSourceTypeExportSnapshotRecord = "ExportSnapshotRecord" ) +// CloudFormationStackRecordSourceType_Values returns all elements of the CloudFormationStackRecordSourceType enum +func CloudFormationStackRecordSourceType_Values() []string { + return []string{ + CloudFormationStackRecordSourceTypeExportSnapshotRecord, + } +} + const ( // ComparisonOperatorGreaterThanOrEqualToThreshold is a ComparisonOperator enum value ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" @@ -27799,6 +32141,16 @@ const ( ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorGreaterThanOrEqualToThreshold, + ComparisonOperatorGreaterThanThreshold, + ComparisonOperatorLessThanThreshold, + ComparisonOperatorLessThanOrEqualToThreshold, + } +} + const ( // ContactMethodStatusPendingVerification is a ContactMethodStatus enum value ContactMethodStatusPendingVerification = "PendingVerification" @@ -27810,11 +32162,27 @@ const ( ContactMethodStatusInvalid = "Invalid" ) +// ContactMethodStatus_Values returns all elements of the ContactMethodStatus enum +func ContactMethodStatus_Values() []string { + return []string{ + ContactMethodStatusPendingVerification, + ContactMethodStatusValid, + ContactMethodStatusInvalid, + } +} + const ( // ContactMethodVerificationProtocolEmail is a ContactMethodVerificationProtocol enum value ContactMethodVerificationProtocolEmail = "Email" ) +// ContactMethodVerificationProtocol_Values returns all elements of the ContactMethodVerificationProtocol enum +func ContactMethodVerificationProtocol_Values() []string { + return []string{ + ContactMethodVerificationProtocolEmail, + } +} + const ( // ContactProtocolEmail is a ContactProtocol enum value ContactProtocolEmail = "Email" @@ -27823,6 +32191,14 @@ const ( ContactProtocolSms = "SMS" ) +// ContactProtocol_Values returns all elements of the ContactProtocol enum +func ContactProtocol_Values() []string { + return []string{ + ContactProtocolEmail, + ContactProtocolSms, + } +} + const ( // DiskSnapshotStatePending is a DiskSnapshotState enum value DiskSnapshotStatePending = "pending" @@ -27837,6 +32213,16 @@ const ( DiskSnapshotStateUnknown = "unknown" ) +// DiskSnapshotState_Values returns all elements of the DiskSnapshotState enum +func DiskSnapshotState_Values() []string { + return []string{ + DiskSnapshotStatePending, + DiskSnapshotStateCompleted, + DiskSnapshotStateError, + DiskSnapshotStateUnknown, + } +} + const ( // DiskStatePending is a DiskState enum value DiskStatePending = "pending" @@ -27854,6 +32240,49 @@ const ( DiskStateUnknown = "unknown" ) +// DiskState_Values returns all elements of the DiskState enum +func DiskState_Values() []string { + return []string{ + DiskStatePending, + DiskStateError, + DiskStateAvailable, + DiskStateInUse, + DiskStateUnknown, + } +} + +const ( + // DistributionMetricNameRequests is a DistributionMetricName enum value + DistributionMetricNameRequests = "Requests" + + // DistributionMetricNameBytesDownloaded is a DistributionMetricName enum value + DistributionMetricNameBytesDownloaded = "BytesDownloaded" + + // DistributionMetricNameBytesUploaded is a DistributionMetricName enum value + DistributionMetricNameBytesUploaded = "BytesUploaded" + + // DistributionMetricNameTotalErrorRate is a DistributionMetricName enum value + DistributionMetricNameTotalErrorRate = "TotalErrorRate" + + // DistributionMetricNameHttp4xxErrorRate is a DistributionMetricName enum value + DistributionMetricNameHttp4xxErrorRate = "Http4xxErrorRate" + + // DistributionMetricNameHttp5xxErrorRate is a DistributionMetricName enum value + DistributionMetricNameHttp5xxErrorRate = "Http5xxErrorRate" +) + +// DistributionMetricName_Values returns all elements of the DistributionMetricName enum +func DistributionMetricName_Values() []string { + return []string{ + DistributionMetricNameRequests, + DistributionMetricNameBytesDownloaded, + DistributionMetricNameBytesUploaded, + DistributionMetricNameTotalErrorRate, + DistributionMetricNameHttp4xxErrorRate, + DistributionMetricNameHttp5xxErrorRate, + } +} + const ( // ExportSnapshotRecordSourceTypeInstanceSnapshot is a ExportSnapshotRecordSourceType enum value ExportSnapshotRecordSourceTypeInstanceSnapshot = "InstanceSnapshot" @@ -27862,6 +32291,102 @@ const ( ExportSnapshotRecordSourceTypeDiskSnapshot = "DiskSnapshot" ) +// ExportSnapshotRecordSourceType_Values returns all elements of the ExportSnapshotRecordSourceType enum +func ExportSnapshotRecordSourceType_Values() []string { + return []string{ + ExportSnapshotRecordSourceTypeInstanceSnapshot, + ExportSnapshotRecordSourceTypeDiskSnapshot, + } +} + +const ( + // ForwardValuesNone is a ForwardValues enum value + ForwardValuesNone = "none" + + // ForwardValuesAllowList is a ForwardValues enum value + ForwardValuesAllowList = "allow-list" + + // ForwardValuesAll is a ForwardValues enum value + ForwardValuesAll = "all" +) + +// ForwardValues_Values returns all elements of the ForwardValues enum +func ForwardValues_Values() []string { + return []string{ + ForwardValuesNone, + ForwardValuesAllowList, + ForwardValuesAll, + } +} + +const ( + // HeaderEnumAccept is a HeaderEnum enum value + HeaderEnumAccept = "Accept" + + // HeaderEnumAcceptCharset is a HeaderEnum enum value + HeaderEnumAcceptCharset = "Accept-Charset" + + // HeaderEnumAcceptDatetime is a HeaderEnum enum value + HeaderEnumAcceptDatetime = "Accept-Datetime" + + // HeaderEnumAcceptEncoding is a HeaderEnum enum value + HeaderEnumAcceptEncoding = "Accept-Encoding" + + // HeaderEnumAcceptLanguage is a HeaderEnum enum value + HeaderEnumAcceptLanguage = "Accept-Language" + + // HeaderEnumAuthorization is a HeaderEnum enum value + HeaderEnumAuthorization = "Authorization" + + // HeaderEnumCloudFrontForwardedProto is a HeaderEnum enum value + HeaderEnumCloudFrontForwardedProto = "CloudFront-Forwarded-Proto" + + // HeaderEnumCloudFrontIsDesktopViewer is a HeaderEnum enum value + HeaderEnumCloudFrontIsDesktopViewer = "CloudFront-Is-Desktop-Viewer" + + // HeaderEnumCloudFrontIsMobileViewer is a HeaderEnum enum value + HeaderEnumCloudFrontIsMobileViewer = "CloudFront-Is-Mobile-Viewer" + + // HeaderEnumCloudFrontIsSmartTvViewer is a HeaderEnum enum value + HeaderEnumCloudFrontIsSmartTvViewer = "CloudFront-Is-SmartTV-Viewer" + + // HeaderEnumCloudFrontIsTabletViewer is a HeaderEnum enum value + HeaderEnumCloudFrontIsTabletViewer = "CloudFront-Is-Tablet-Viewer" + + // HeaderEnumCloudFrontViewerCountry is a HeaderEnum enum value + HeaderEnumCloudFrontViewerCountry = "CloudFront-Viewer-Country" + + // HeaderEnumHost is a HeaderEnum enum value + HeaderEnumHost = "Host" + + // HeaderEnumOrigin is a HeaderEnum enum value + HeaderEnumOrigin = "Origin" + + // HeaderEnumReferer is a HeaderEnum enum value + HeaderEnumReferer = "Referer" +) + +// HeaderEnum_Values returns all elements of the HeaderEnum enum +func HeaderEnum_Values() []string { + return []string{ + HeaderEnumAccept, + HeaderEnumAcceptCharset, + HeaderEnumAcceptDatetime, + HeaderEnumAcceptEncoding, + HeaderEnumAcceptLanguage, + HeaderEnumAuthorization, + HeaderEnumCloudFrontForwardedProto, + HeaderEnumCloudFrontIsDesktopViewer, + HeaderEnumCloudFrontIsMobileViewer, + HeaderEnumCloudFrontIsSmartTvViewer, + HeaderEnumCloudFrontIsTabletViewer, + HeaderEnumCloudFrontViewerCountry, + HeaderEnumHost, + HeaderEnumOrigin, + HeaderEnumReferer, + } +} + const ( // InstanceAccessProtocolSsh is a InstanceAccessProtocol enum value InstanceAccessProtocolSsh = "ssh" @@ -27870,6 +32395,14 @@ const ( InstanceAccessProtocolRdp = "rdp" ) +// InstanceAccessProtocol_Values returns all elements of the InstanceAccessProtocol enum +func InstanceAccessProtocol_Values() []string { + return []string{ + InstanceAccessProtocolSsh, + InstanceAccessProtocolRdp, + } +} + const ( // InstanceHealthReasonLbRegistrationInProgress is a InstanceHealthReason enum value InstanceHealthReasonLbRegistrationInProgress = "Lb.RegistrationInProgress" @@ -27905,6 +32438,23 @@ const ( InstanceHealthReasonInstanceIpUnusable = "Instance.IpUnusable" ) +// InstanceHealthReason_Values returns all elements of the InstanceHealthReason enum +func InstanceHealthReason_Values() []string { + return []string{ + InstanceHealthReasonLbRegistrationInProgress, + InstanceHealthReasonLbInitialHealthChecking, + InstanceHealthReasonLbInternalError, + InstanceHealthReasonInstanceResponseCodeMismatch, + InstanceHealthReasonInstanceTimeout, + InstanceHealthReasonInstanceFailedHealthChecks, + InstanceHealthReasonInstanceNotRegistered, + InstanceHealthReasonInstanceNotInUse, + InstanceHealthReasonInstanceDeregistrationInProgress, + InstanceHealthReasonInstanceInvalidState, + InstanceHealthReasonInstanceIpUnusable, + } +} + const ( // InstanceHealthStateInitial is a InstanceHealthState enum value InstanceHealthStateInitial = "initial" @@ -27925,6 +32475,18 @@ const ( InstanceHealthStateUnavailable = "unavailable" ) +// InstanceHealthState_Values returns all elements of the InstanceHealthState enum +func InstanceHealthState_Values() []string { + return []string{ + InstanceHealthStateInitial, + InstanceHealthStateHealthy, + InstanceHealthStateUnhealthy, + InstanceHealthStateUnused, + InstanceHealthStateDraining, + InstanceHealthStateUnavailable, + } +} + const ( // InstanceMetricNameCpuutilization is a InstanceMetricName enum value InstanceMetricNameCpuutilization = "CPUUtilization" @@ -27943,8 +32505,28 @@ const ( // InstanceMetricNameStatusCheckFailedSystem is a InstanceMetricName enum value InstanceMetricNameStatusCheckFailedSystem = "StatusCheckFailed_System" + + // InstanceMetricNameBurstCapacityTime is a InstanceMetricName enum value + InstanceMetricNameBurstCapacityTime = "BurstCapacityTime" + + // InstanceMetricNameBurstCapacityPercentage is a InstanceMetricName enum value + InstanceMetricNameBurstCapacityPercentage = "BurstCapacityPercentage" ) +// InstanceMetricName_Values returns all elements of the InstanceMetricName enum +func InstanceMetricName_Values() []string { + return []string{ + InstanceMetricNameCpuutilization, + InstanceMetricNameNetworkIn, + InstanceMetricNameNetworkOut, + InstanceMetricNameStatusCheckFailed, + InstanceMetricNameStatusCheckFailedInstance, + InstanceMetricNameStatusCheckFailedSystem, + InstanceMetricNameBurstCapacityTime, + InstanceMetricNameBurstCapacityPercentage, + } +} + const ( // InstancePlatformLinuxUnix is a InstancePlatform enum value InstancePlatformLinuxUnix = "LINUX_UNIX" @@ -27953,6 +32535,14 @@ const ( InstancePlatformWindows = "WINDOWS" ) +// InstancePlatform_Values returns all elements of the InstancePlatform enum +func InstancePlatform_Values() []string { + return []string{ + InstancePlatformLinuxUnix, + InstancePlatformWindows, + } +} + const ( // InstanceSnapshotStatePending is a InstanceSnapshotState enum value InstanceSnapshotStatePending = "pending" @@ -27964,6 +32554,15 @@ const ( InstanceSnapshotStateAvailable = "available" ) +// InstanceSnapshotState_Values returns all elements of the InstanceSnapshotState enum +func InstanceSnapshotState_Values() []string { + return []string{ + InstanceSnapshotStatePending, + InstanceSnapshotStateError, + InstanceSnapshotStateAvailable, + } +} + const ( // LoadBalancerAttributeNameHealthCheckPath is a LoadBalancerAttributeName enum value LoadBalancerAttributeNameHealthCheckPath = "HealthCheckPath" @@ -27975,6 +32574,15 @@ const ( LoadBalancerAttributeNameSessionStickinessLbCookieDurationSeconds = "SessionStickiness_LB_CookieDurationSeconds" ) +// LoadBalancerAttributeName_Values returns all elements of the LoadBalancerAttributeName enum +func LoadBalancerAttributeName_Values() []string { + return []string{ + LoadBalancerAttributeNameHealthCheckPath, + LoadBalancerAttributeNameSessionStickinessEnabled, + LoadBalancerAttributeNameSessionStickinessLbCookieDurationSeconds, + } +} + const ( // LoadBalancerMetricNameClientTlsnegotiationErrorCount is a LoadBalancerMetricName enum value LoadBalancerMetricNameClientTlsnegotiationErrorCount = "ClientTLSNegotiationErrorCount" @@ -28013,6 +32621,24 @@ const ( LoadBalancerMetricNameRequestCount = "RequestCount" ) +// LoadBalancerMetricName_Values returns all elements of the LoadBalancerMetricName enum +func LoadBalancerMetricName_Values() []string { + return []string{ + LoadBalancerMetricNameClientTlsnegotiationErrorCount, + LoadBalancerMetricNameHealthyHostCount, + LoadBalancerMetricNameUnhealthyHostCount, + LoadBalancerMetricNameHttpcodeLb4xxCount, + LoadBalancerMetricNameHttpcodeLb5xxCount, + LoadBalancerMetricNameHttpcodeInstance2xxCount, + LoadBalancerMetricNameHttpcodeInstance3xxCount, + LoadBalancerMetricNameHttpcodeInstance4xxCount, + LoadBalancerMetricNameHttpcodeInstance5xxCount, + LoadBalancerMetricNameInstanceResponseTime, + LoadBalancerMetricNameRejectedConnectionCount, + LoadBalancerMetricNameRequestCount, + } +} + const ( // LoadBalancerProtocolHttpHttps is a LoadBalancerProtocol enum value LoadBalancerProtocolHttpHttps = "HTTP_HTTPS" @@ -28021,6 +32647,14 @@ const ( LoadBalancerProtocolHttp = "HTTP" ) +// LoadBalancerProtocol_Values returns all elements of the LoadBalancerProtocol enum +func LoadBalancerProtocol_Values() []string { + return []string{ + LoadBalancerProtocolHttpHttps, + LoadBalancerProtocolHttp, + } +} + const ( // LoadBalancerStateActive is a LoadBalancerState enum value LoadBalancerStateActive = "active" @@ -28038,6 +32672,17 @@ const ( LoadBalancerStateUnknown = "unknown" ) +// LoadBalancerState_Values returns all elements of the LoadBalancerState enum +func LoadBalancerState_Values() []string { + return []string{ + LoadBalancerStateActive, + LoadBalancerStateProvisioning, + LoadBalancerStateActiveImpaired, + LoadBalancerStateFailed, + LoadBalancerStateUnknown, + } +} + const ( // LoadBalancerTlsCertificateDomainStatusPendingValidation is a LoadBalancerTlsCertificateDomainStatus enum value LoadBalancerTlsCertificateDomainStatusPendingValidation = "PENDING_VALIDATION" @@ -28049,6 +32694,15 @@ const ( LoadBalancerTlsCertificateDomainStatusSuccess = "SUCCESS" ) +// LoadBalancerTlsCertificateDomainStatus_Values returns all elements of the LoadBalancerTlsCertificateDomainStatus enum +func LoadBalancerTlsCertificateDomainStatus_Values() []string { + return []string{ + LoadBalancerTlsCertificateDomainStatusPendingValidation, + LoadBalancerTlsCertificateDomainStatusFailed, + LoadBalancerTlsCertificateDomainStatusSuccess, + } +} + const ( // LoadBalancerTlsCertificateFailureReasonNoAvailableContacts is a LoadBalancerTlsCertificateFailureReason enum value LoadBalancerTlsCertificateFailureReasonNoAvailableContacts = "NO_AVAILABLE_CONTACTS" @@ -28066,6 +32720,17 @@ const ( LoadBalancerTlsCertificateFailureReasonOther = "OTHER" ) +// LoadBalancerTlsCertificateFailureReason_Values returns all elements of the LoadBalancerTlsCertificateFailureReason enum +func LoadBalancerTlsCertificateFailureReason_Values() []string { + return []string{ + LoadBalancerTlsCertificateFailureReasonNoAvailableContacts, + LoadBalancerTlsCertificateFailureReasonAdditionalVerificationRequired, + LoadBalancerTlsCertificateFailureReasonDomainNotAllowed, + LoadBalancerTlsCertificateFailureReasonInvalidPublicDomain, + LoadBalancerTlsCertificateFailureReasonOther, + } +} + const ( // LoadBalancerTlsCertificateRenewalStatusPendingAutoRenewal is a LoadBalancerTlsCertificateRenewalStatus enum value LoadBalancerTlsCertificateRenewalStatusPendingAutoRenewal = "PENDING_AUTO_RENEWAL" @@ -28080,6 +32745,16 @@ const ( LoadBalancerTlsCertificateRenewalStatusFailed = "FAILED" ) +// LoadBalancerTlsCertificateRenewalStatus_Values returns all elements of the LoadBalancerTlsCertificateRenewalStatus enum +func LoadBalancerTlsCertificateRenewalStatus_Values() []string { + return []string{ + LoadBalancerTlsCertificateRenewalStatusPendingAutoRenewal, + LoadBalancerTlsCertificateRenewalStatusPendingValidation, + LoadBalancerTlsCertificateRenewalStatusSuccess, + LoadBalancerTlsCertificateRenewalStatusFailed, + } +} + const ( // LoadBalancerTlsCertificateRevocationReasonUnspecified is a LoadBalancerTlsCertificateRevocationReason enum value LoadBalancerTlsCertificateRevocationReasonUnspecified = "UNSPECIFIED" @@ -28112,6 +32787,22 @@ const ( LoadBalancerTlsCertificateRevocationReasonAACompromise = "A_A_COMPROMISE" ) +// LoadBalancerTlsCertificateRevocationReason_Values returns all elements of the LoadBalancerTlsCertificateRevocationReason enum +func LoadBalancerTlsCertificateRevocationReason_Values() []string { + return []string{ + LoadBalancerTlsCertificateRevocationReasonUnspecified, + LoadBalancerTlsCertificateRevocationReasonKeyCompromise, + LoadBalancerTlsCertificateRevocationReasonCaCompromise, + LoadBalancerTlsCertificateRevocationReasonAffiliationChanged, + LoadBalancerTlsCertificateRevocationReasonSuperceded, + LoadBalancerTlsCertificateRevocationReasonCessationOfOperation, + LoadBalancerTlsCertificateRevocationReasonCertificateHold, + LoadBalancerTlsCertificateRevocationReasonRemoveFromCrl, + LoadBalancerTlsCertificateRevocationReasonPrivilegeWithdrawn, + LoadBalancerTlsCertificateRevocationReasonAACompromise, + } +} + const ( // LoadBalancerTlsCertificateStatusPendingValidation is a LoadBalancerTlsCertificateStatus enum value LoadBalancerTlsCertificateStatusPendingValidation = "PENDING_VALIDATION" @@ -28138,6 +32829,20 @@ const ( LoadBalancerTlsCertificateStatusUnknown = "UNKNOWN" ) +// LoadBalancerTlsCertificateStatus_Values returns all elements of the LoadBalancerTlsCertificateStatus enum +func LoadBalancerTlsCertificateStatus_Values() []string { + return []string{ + LoadBalancerTlsCertificateStatusPendingValidation, + LoadBalancerTlsCertificateStatusIssued, + LoadBalancerTlsCertificateStatusInactive, + LoadBalancerTlsCertificateStatusExpired, + LoadBalancerTlsCertificateStatusValidationTimedOut, + LoadBalancerTlsCertificateStatusRevoked, + LoadBalancerTlsCertificateStatusFailed, + LoadBalancerTlsCertificateStatusUnknown, + } +} + const ( // MetricNameCpuutilization is a MetricName enum value MetricNameCpuutilization = "CPUUtilization" @@ -28207,8 +32912,45 @@ const ( // MetricNameNetworkTransmitThroughput is a MetricName enum value MetricNameNetworkTransmitThroughput = "NetworkTransmitThroughput" + + // MetricNameBurstCapacityTime is a MetricName enum value + MetricNameBurstCapacityTime = "BurstCapacityTime" + + // MetricNameBurstCapacityPercentage is a MetricName enum value + MetricNameBurstCapacityPercentage = "BurstCapacityPercentage" ) +// MetricName_Values returns all elements of the MetricName enum +func MetricName_Values() []string { + return []string{ + MetricNameCpuutilization, + MetricNameNetworkIn, + MetricNameNetworkOut, + MetricNameStatusCheckFailed, + MetricNameStatusCheckFailedInstance, + MetricNameStatusCheckFailedSystem, + MetricNameClientTlsnegotiationErrorCount, + MetricNameHealthyHostCount, + MetricNameUnhealthyHostCount, + MetricNameHttpcodeLb4xxCount, + MetricNameHttpcodeLb5xxCount, + MetricNameHttpcodeInstance2xxCount, + MetricNameHttpcodeInstance3xxCount, + MetricNameHttpcodeInstance4xxCount, + MetricNameHttpcodeInstance5xxCount, + MetricNameInstanceResponseTime, + MetricNameRejectedConnectionCount, + MetricNameRequestCount, + MetricNameDatabaseConnections, + MetricNameDiskQueueDepth, + MetricNameFreeStorageSpace, + MetricNameNetworkReceiveThroughput, + MetricNameNetworkTransmitThroughput, + MetricNameBurstCapacityTime, + MetricNameBurstCapacityPercentage, + } +} + const ( // MetricStatisticMinimum is a MetricStatistic enum value MetricStatisticMinimum = "Minimum" @@ -28226,6 +32968,17 @@ const ( MetricStatisticSampleCount = "SampleCount" ) +// MetricStatistic_Values returns all elements of the MetricStatistic enum +func MetricStatistic_Values() []string { + return []string{ + MetricStatisticMinimum, + MetricStatisticMaximum, + MetricStatisticSum, + MetricStatisticAverage, + MetricStatisticSampleCount, + } +} + const ( // MetricUnitSeconds is a MetricUnit enum value MetricUnitSeconds = "Seconds" @@ -28309,6 +33062,39 @@ const ( MetricUnitNone = "None" ) +// MetricUnit_Values returns all elements of the MetricUnit enum +func MetricUnit_Values() []string { + return []string{ + MetricUnitSeconds, + MetricUnitMicroseconds, + MetricUnitMilliseconds, + MetricUnitBytes, + MetricUnitKilobytes, + MetricUnitMegabytes, + MetricUnitGigabytes, + MetricUnitTerabytes, + MetricUnitBits, + MetricUnitKilobits, + MetricUnitMegabits, + MetricUnitGigabits, + MetricUnitTerabits, + MetricUnitPercent, + MetricUnitCount, + MetricUnitBytesSecond, + MetricUnitKilobytesSecond, + MetricUnitMegabytesSecond, + MetricUnitGigabytesSecond, + MetricUnitTerabytesSecond, + MetricUnitBitsSecond, + MetricUnitKilobitsSecond, + MetricUnitMegabitsSecond, + MetricUnitGigabitsSecond, + MetricUnitTerabitsSecond, + MetricUnitCountSecond, + MetricUnitNone, + } +} + const ( // NetworkProtocolTcp is a NetworkProtocol enum value NetworkProtocolTcp = "tcp" @@ -28318,8 +33104,21 @@ const ( // NetworkProtocolUdp is a NetworkProtocol enum value NetworkProtocolUdp = "udp" + + // NetworkProtocolIcmp is a NetworkProtocol enum value + NetworkProtocolIcmp = "icmp" ) +// NetworkProtocol_Values returns all elements of the NetworkProtocol enum +func NetworkProtocol_Values() []string { + return []string{ + NetworkProtocolTcp, + NetworkProtocolAll, + NetworkProtocolUdp, + NetworkProtocolIcmp, + } +} + const ( // OperationStatusNotStarted is a OperationStatus enum value OperationStatusNotStarted = "NotStarted" @@ -28337,6 +33136,17 @@ const ( OperationStatusSucceeded = "Succeeded" ) +// OperationStatus_Values returns all elements of the OperationStatus enum +func OperationStatus_Values() []string { + return []string{ + OperationStatusNotStarted, + OperationStatusStarted, + OperationStatusFailed, + OperationStatusCompleted, + OperationStatusSucceeded, + } +} + const ( // OperationTypeDeleteKnownHostKeys is a OperationType enum value OperationTypeDeleteKnownHostKeys = "DeleteKnownHostKeys" @@ -28502,8 +33312,121 @@ const ( // OperationTypeDeleteContactMethod is a OperationType enum value OperationTypeDeleteContactMethod = "DeleteContactMethod" + + // OperationTypeCreateDistribution is a OperationType enum value + OperationTypeCreateDistribution = "CreateDistribution" + + // OperationTypeUpdateDistribution is a OperationType enum value + OperationTypeUpdateDistribution = "UpdateDistribution" + + // OperationTypeDeleteDistribution is a OperationType enum value + OperationTypeDeleteDistribution = "DeleteDistribution" + + // OperationTypeResetDistributionCache is a OperationType enum value + OperationTypeResetDistributionCache = "ResetDistributionCache" + + // OperationTypeAttachCertificateToDistribution is a OperationType enum value + OperationTypeAttachCertificateToDistribution = "AttachCertificateToDistribution" + + // OperationTypeDetachCertificateFromDistribution is a OperationType enum value + OperationTypeDetachCertificateFromDistribution = "DetachCertificateFromDistribution" + + // OperationTypeUpdateDistributionBundle is a OperationType enum value + OperationTypeUpdateDistributionBundle = "UpdateDistributionBundle" + + // OperationTypeCreateCertificate is a OperationType enum value + OperationTypeCreateCertificate = "CreateCertificate" + + // OperationTypeDeleteCertificate is a OperationType enum value + OperationTypeDeleteCertificate = "DeleteCertificate" +) + +// OperationType_Values returns all elements of the OperationType enum +func OperationType_Values() []string { + return []string{ + OperationTypeDeleteKnownHostKeys, + OperationTypeDeleteInstance, + OperationTypeCreateInstance, + OperationTypeStopInstance, + OperationTypeStartInstance, + OperationTypeRebootInstance, + OperationTypeOpenInstancePublicPorts, + OperationTypePutInstancePublicPorts, + OperationTypeCloseInstancePublicPorts, + OperationTypeAllocateStaticIp, + OperationTypeReleaseStaticIp, + OperationTypeAttachStaticIp, + OperationTypeDetachStaticIp, + OperationTypeUpdateDomainEntry, + OperationTypeDeleteDomainEntry, + OperationTypeCreateDomain, + OperationTypeDeleteDomain, + OperationTypeCreateInstanceSnapshot, + OperationTypeDeleteInstanceSnapshot, + OperationTypeCreateInstancesFromSnapshot, + OperationTypeCreateLoadBalancer, + OperationTypeDeleteLoadBalancer, + OperationTypeAttachInstancesToLoadBalancer, + OperationTypeDetachInstancesFromLoadBalancer, + OperationTypeUpdateLoadBalancerAttribute, + OperationTypeCreateLoadBalancerTlsCertificate, + OperationTypeDeleteLoadBalancerTlsCertificate, + OperationTypeAttachLoadBalancerTlsCertificate, + OperationTypeCreateDisk, + OperationTypeDeleteDisk, + OperationTypeAttachDisk, + OperationTypeDetachDisk, + OperationTypeCreateDiskSnapshot, + OperationTypeDeleteDiskSnapshot, + OperationTypeCreateDiskFromSnapshot, + OperationTypeCreateRelationalDatabase, + OperationTypeUpdateRelationalDatabase, + OperationTypeDeleteRelationalDatabase, + OperationTypeCreateRelationalDatabaseFromSnapshot, + OperationTypeCreateRelationalDatabaseSnapshot, + OperationTypeDeleteRelationalDatabaseSnapshot, + OperationTypeUpdateRelationalDatabaseParameters, + OperationTypeStartRelationalDatabase, + OperationTypeRebootRelationalDatabase, + OperationTypeStopRelationalDatabase, + OperationTypeEnableAddOn, + OperationTypeDisableAddOn, + OperationTypePutAlarm, + OperationTypeGetAlarms, + OperationTypeDeleteAlarm, + OperationTypeTestAlarm, + OperationTypeCreateContactMethod, + OperationTypeGetContactMethods, + OperationTypeSendContactMethodVerification, + OperationTypeDeleteContactMethod, + OperationTypeCreateDistribution, + OperationTypeUpdateDistribution, + OperationTypeDeleteDistribution, + OperationTypeResetDistributionCache, + OperationTypeAttachCertificateToDistribution, + OperationTypeDetachCertificateFromDistribution, + OperationTypeUpdateDistributionBundle, + OperationTypeCreateCertificate, + OperationTypeDeleteCertificate, + } +} + +const ( + // OriginProtocolPolicyEnumHttpOnly is a OriginProtocolPolicyEnum enum value + OriginProtocolPolicyEnumHttpOnly = "http-only" + + // OriginProtocolPolicyEnumHttpsOnly is a OriginProtocolPolicyEnum enum value + OriginProtocolPolicyEnumHttpsOnly = "https-only" ) +// OriginProtocolPolicyEnum_Values returns all elements of the OriginProtocolPolicyEnum enum +func OriginProtocolPolicyEnum_Values() []string { + return []string{ + OriginProtocolPolicyEnumHttpOnly, + OriginProtocolPolicyEnumHttpsOnly, + } +} + const ( // PortAccessTypePublic is a PortAccessType enum value PortAccessTypePublic = "Public" @@ -28512,6 +33435,14 @@ const ( PortAccessTypePrivate = "Private" ) +// PortAccessType_Values returns all elements of the PortAccessType enum +func PortAccessType_Values() []string { + return []string{ + PortAccessTypePublic, + PortAccessTypePrivate, + } +} + const ( // PortInfoSourceTypeDefault is a PortInfoSourceType enum value PortInfoSourceTypeDefault = "DEFAULT" @@ -28526,6 +33457,16 @@ const ( PortInfoSourceTypeClosed = "CLOSED" ) +// PortInfoSourceType_Values returns all elements of the PortInfoSourceType enum +func PortInfoSourceType_Values() []string { + return []string{ + PortInfoSourceTypeDefault, + PortInfoSourceTypeInstance, + PortInfoSourceTypeNone, + PortInfoSourceTypeClosed, + } +} + const ( // PortStateOpen is a PortState enum value PortStateOpen = "open" @@ -28534,6 +33475,14 @@ const ( PortStateClosed = "closed" ) +// PortState_Values returns all elements of the PortState enum +func PortState_Values() []string { + return []string{ + PortStateOpen, + PortStateClosed, + } +} + const ( // RecordStateStarted is a RecordState enum value RecordStateStarted = "Started" @@ -28545,6 +33494,15 @@ const ( RecordStateFailed = "Failed" ) +// RecordState_Values returns all elements of the RecordState enum +func RecordState_Values() []string { + return []string{ + RecordStateStarted, + RecordStateSucceeded, + RecordStateFailed, + } +} + const ( // RegionNameUsEast1 is a RegionName enum value RegionNameUsEast1 = "us-east-1" @@ -28589,11 +33547,38 @@ const ( RegionNameApNortheast2 = "ap-northeast-2" ) +// RegionName_Values returns all elements of the RegionName enum +func RegionName_Values() []string { + return []string{ + RegionNameUsEast1, + RegionNameUsEast2, + RegionNameUsWest1, + RegionNameUsWest2, + RegionNameEuWest1, + RegionNameEuWest2, + RegionNameEuWest3, + RegionNameEuCentral1, + RegionNameCaCentral1, + RegionNameApSouth1, + RegionNameApSoutheast1, + RegionNameApSoutheast2, + RegionNameApNortheast1, + RegionNameApNortheast2, + } +} + const ( // RelationalDatabaseEngineMysql is a RelationalDatabaseEngine enum value RelationalDatabaseEngineMysql = "mysql" ) +// RelationalDatabaseEngine_Values returns all elements of the RelationalDatabaseEngine enum +func RelationalDatabaseEngine_Values() []string { + return []string{ + RelationalDatabaseEngineMysql, + } +} + const ( // RelationalDatabaseMetricNameCpuutilization is a RelationalDatabaseMetricName enum value RelationalDatabaseMetricNameCpuutilization = "CPUUtilization" @@ -28614,6 +33599,18 @@ const ( RelationalDatabaseMetricNameNetworkTransmitThroughput = "NetworkTransmitThroughput" ) +// RelationalDatabaseMetricName_Values returns all elements of the RelationalDatabaseMetricName enum +func RelationalDatabaseMetricName_Values() []string { + return []string{ + RelationalDatabaseMetricNameCpuutilization, + RelationalDatabaseMetricNameDatabaseConnections, + RelationalDatabaseMetricNameDiskQueueDepth, + RelationalDatabaseMetricNameFreeStorageSpace, + RelationalDatabaseMetricNameNetworkReceiveThroughput, + RelationalDatabaseMetricNameNetworkTransmitThroughput, + } +} + const ( // RelationalDatabasePasswordVersionCurrent is a RelationalDatabasePasswordVersion enum value RelationalDatabasePasswordVersionCurrent = "CURRENT" @@ -28625,6 +33622,39 @@ const ( RelationalDatabasePasswordVersionPending = "PENDING" ) +// RelationalDatabasePasswordVersion_Values returns all elements of the RelationalDatabasePasswordVersion enum +func RelationalDatabasePasswordVersion_Values() []string { + return []string{ + RelationalDatabasePasswordVersionCurrent, + RelationalDatabasePasswordVersionPrevious, + RelationalDatabasePasswordVersionPending, + } +} + +const ( + // RenewalStatusPendingAutoRenewal is a RenewalStatus enum value + RenewalStatusPendingAutoRenewal = "PendingAutoRenewal" + + // RenewalStatusPendingValidation is a RenewalStatus enum value + RenewalStatusPendingValidation = "PendingValidation" + + // RenewalStatusSuccess is a RenewalStatus enum value + RenewalStatusSuccess = "Success" + + // RenewalStatusFailed is a RenewalStatus enum value + RenewalStatusFailed = "Failed" +) + +// RenewalStatus_Values returns all elements of the RenewalStatus enum +func RenewalStatus_Values() []string { + return []string{ + RenewalStatusPendingAutoRenewal, + RenewalStatusPendingValidation, + RenewalStatusSuccess, + RenewalStatusFailed, + } +} + const ( // ResourceTypeInstance is a ResourceType enum value ResourceTypeInstance = "Instance" @@ -28673,8 +33703,38 @@ const ( // ResourceTypeContactMethod is a ResourceType enum value ResourceTypeContactMethod = "ContactMethod" + + // ResourceTypeDistribution is a ResourceType enum value + ResourceTypeDistribution = "Distribution" + + // ResourceTypeCertificate is a ResourceType enum value + ResourceTypeCertificate = "Certificate" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeInstance, + ResourceTypeStaticIp, + ResourceTypeKeyPair, + ResourceTypeInstanceSnapshot, + ResourceTypeDomain, + ResourceTypePeeredVpc, + ResourceTypeLoadBalancer, + ResourceTypeLoadBalancerTlsCertificate, + ResourceTypeDisk, + ResourceTypeDiskSnapshot, + ResourceTypeRelationalDatabase, + ResourceTypeRelationalDatabaseSnapshot, + ResourceTypeExportSnapshotRecord, + ResourceTypeCloudFormationStackRecord, + ResourceTypeAlarm, + ResourceTypeContactMethod, + ResourceTypeDistribution, + ResourceTypeCertificate, + } +} + const ( // TreatMissingDataBreaching is a TreatMissingData enum value TreatMissingDataBreaching = "breaching" @@ -28688,3 +33748,13 @@ const ( // TreatMissingDataMissing is a TreatMissingData enum value TreatMissingDataMissing = "missing" ) + +// TreatMissingData_Values returns all elements of the TreatMissingData enum +func TreatMissingData_Values() []string { + return []string{ + TreatMissingDataBreaching, + TreatMissingDataNotBreaching, + TreatMissingDataIgnore, + TreatMissingDataMissing, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go index f806bbf09..5947ff0d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/doc.go @@ -3,19 +3,23 @@ // Package lightsail provides the client and types for making API // requests to Amazon Lightsail. // -// Amazon Lightsail is the easiest way to get started with AWS for developers -// who just need virtual private servers. Lightsail includes everything you -// need to launch your project quickly - a virtual machine, a managed database, -// SSD-based storage, data transfer, DNS management, and a static IP - for a -// low, predictable price. You manage those Lightsail servers through the Lightsail -// console or by using the API or command-line interface (CLI). -// -// For more information about Lightsail concepts and tasks, see the Lightsail -// Dev Guide (https://lightsail.aws.amazon.com/ls/docs/all). -// -// To use the Lightsail API or the CLI, you will need to use AWS Identity and -// Access Management (IAM) to generate access keys. For details about how to -// set this up, see the Lightsail Dev Guide (http://lightsail.aws.amazon.com/ls/docs/how-to/article/lightsail-how-to-set-up-access-keys-to-use-sdk-api-cli). +// Amazon Lightsail is the easiest way to get started with Amazon Web Services +// (AWS) for developers who need to build websites or web applications. It includes +// everything you need to launch your project quickly – instances (virtual +// private servers), managed databases, SSD-based block storage, static IP addresses, +// load balancers, content delivery network (CDN) distributions, DNS management +// of registered domains, and snapshots (backups) – for a low, predictable +// monthly price. +// +// You can manage your Lightsail resources using the Lightsail console, Lightsail +// API, AWS Command Line Interface (AWS CLI), or SDKs. For more information +// about Lightsail concepts and tasks, see the Lightsail Dev Guide (http://lightsail.aws.amazon.com/ls/docs/how-to/article/lightsail-how-to-set-up-access-keys-to-use-sdk-api-cli). +// +// This API Reference provides detailed information about the actions, data +// types, parameters, and errors of the Lightsail service. For more information +// about the supported AWS Regions, endpoints, and service quotas for the Lightsail +// service, see Amazon Lightsail Endpoints and Quotas (https://docs.aws.amazon.com/general/latest/gr/lightsail.html) +// in the AWS General Reference. // // See https://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go index ae4a1427a..171dd9999 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/macie/api.go b/vendor/github.com/aws/aws-sdk-go/service/macie/api.go index 615361d04..b549dc3fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/macie/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/macie/api.go @@ -57,7 +57,8 @@ func (c *Macie) AssociateMemberAccountRequest(input *AssociateMemberAccountInput // AssociateMemberAccount API operation for Amazon Macie. // -// Associates a specified AWS account with Amazon Macie as a member account. +// Associates a specified AWS account with Amazon Macie Classic as a member +// account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -144,11 +145,11 @@ func (c *Macie) AssociateS3ResourcesRequest(input *AssociateS3ResourcesInput) (r // AssociateS3Resources API operation for Amazon Macie. // -// Associates specified S3 resources with Amazon Macie for monitoring and data -// classification. If memberAccountId isn't specified, the action associates -// specified S3 resources with Macie for the current master account. If memberAccountId -// is specified, the action associates specified S3 resources with Macie for -// the specified member account. +// Associates specified S3 resources with Amazon Macie Classic for monitoring +// and data classification. If memberAccountId isn't specified, the action associates +// specified S3 resources with Macie Classic for the current master account. +// If memberAccountId is specified, the action associates specified S3 resources +// with Macie Classic for the specified member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -239,7 +240,7 @@ func (c *Macie) DisassociateMemberAccountRequest(input *DisassociateMemberAccoun // DisassociateMemberAccount API operation for Amazon Macie. // -// Removes the specified member account from Amazon Macie. +// Removes the specified member account from Amazon Macie Classic. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -322,10 +323,11 @@ func (c *Macie) DisassociateS3ResourcesRequest(input *DisassociateS3ResourcesInp // DisassociateS3Resources API operation for Amazon Macie. // -// Removes specified S3 resources from being monitored by Amazon Macie. If memberAccountId -// isn't specified, the action removes specified S3 resources from Macie for -// the current master account. If memberAccountId is specified, the action removes -// specified S3 resources from Macie for the specified member account. +// Removes specified S3 resources from being monitored by Amazon Macie Classic. +// If memberAccountId isn't specified, the action removes specified S3 resources +// from Macie Classic for the current master account. If memberAccountId is +// specified, the action removes specified S3 resources from Macie Classic for +// the specified member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -417,8 +419,8 @@ func (c *Macie) ListMemberAccountsRequest(input *ListMemberAccountsInput) (req * // ListMemberAccounts API operation for Amazon Macie. // -// Lists all Amazon Macie member accounts for the current Amazon Macie master -// account. +// Lists all Amazon Macie Classic member accounts for the current Amazon Macie +// Classic master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -559,11 +561,11 @@ func (c *Macie) ListS3ResourcesRequest(input *ListS3ResourcesInput) (req *reques // ListS3Resources API operation for Amazon Macie. // -// Lists all the S3 resources associated with Amazon Macie. If memberAccountId +// Lists all the S3 resources associated with Amazon Macie Classic. If memberAccountId // isn't specified, the action lists the S3 resources associated with Amazon -// Macie for the current master account. If memberAccountId is specified, the -// action lists the S3 resources associated with Amazon Macie for the specified -// member account. +// Macie Classic for the current master account. If memberAccountId is specified, +// the action lists the S3 resources associated with Amazon Macie Classic for +// the specified member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -703,9 +705,9 @@ func (c *Macie) UpdateS3ResourcesRequest(input *UpdateS3ResourcesInput) (req *re // // Updates the classification types for the specified S3 resources. If memberAccountId // isn't specified, the action updates the classification types of the S3 resources -// associated with Amazon Macie for the current master account. If memberAccountId +// associated with Amazon Macie Classic for the current master account. If memberAccountId // is specified, the action updates the classification types of the S3 resources -// associated with Amazon Macie for the specified member account. +// associated with Amazon Macie Classic for the specified member account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -749,8 +751,8 @@ func (c *Macie) UpdateS3ResourcesWithContext(ctx aws.Context, input *UpdateS3Res // You do not have required permissions to access the requested resource. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -770,17 +772,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -788,29 +790,29 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type AssociateMemberAccountInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that you want to associate with Amazon Macie as - // a member account. + // The ID of the AWS account that you want to associate with Amazon Macie Classic + // as a member account. // // MemberAccountId is a required field MemberAccountId *string `locationName:"memberAccountId" type:"string" required:"true"` @@ -862,12 +864,12 @@ func (s AssociateMemberAccountOutput) GoString() string { type AssociateS3ResourcesInput struct { _ struct{} `type:"structure"` - // The ID of the Amazon Macie member account whose resources you want to associate - // with Macie. + // The ID of the Amazon Macie Classic member account whose resources you want + // to associate with Macie Classic. MemberAccountId *string `locationName:"memberAccountId" type:"string"` - // The S3 resources that you want to associate with Amazon Macie for monitoring - // and data classification. + // The S3 resources that you want to associate with Amazon Macie Classic for + // monitoring and data classification. // // S3Resources is a required field S3Resources []*S3ResourceClassification `locationName:"s3Resources" type:"list" required:"true"` @@ -921,8 +923,8 @@ func (s *AssociateS3ResourcesInput) SetS3Resources(v []*S3ResourceClassification type AssociateS3ResourcesOutput struct { _ struct{} `type:"structure"` - // S3 resources that couldn't be associated with Amazon Macie. An error code - // and an error message are provided for each failed item. + // S3 resources that couldn't be associated with Amazon Macie Classic. An error + // code and an error message are provided for each failed item. FailedS3Resources []*FailedS3Resource `locationName:"failedS3Resources" type:"list"` } @@ -942,13 +944,14 @@ func (s *AssociateS3ResourcesOutput) SetFailedS3Resources(v []*FailedS3Resource) return s } -// The classification type that Amazon Macie applies to the associated S3 resources. +// The classification type that Amazon Macie Classic applies to the associated +// S3 resources. type ClassificationType struct { _ struct{} `type:"structure"` // A continuous classification of the objects that are added to a specified - // S3 bucket. Amazon Macie begins performing continuous classification after - // a bucket is successfully associated with Amazon Macie. + // S3 bucket. Amazon Macie Classic begins performing continuous classification + // after a bucket is successfully associated with Amazon Macie Classic. // // Continuous is a required field Continuous *string `locationName:"continuous" type:"string" required:"true" enum:"S3ContinuousClassificationType"` @@ -998,15 +1001,15 @@ func (s *ClassificationType) SetOneTime(v string) *ClassificationType { return s } -// The classification type that Amazon Macie applies to the associated S3 resources. -// At least one of the classification types (oneTime or continuous) must be -// specified. +// The classification type that Amazon Macie Classic applies to the associated +// S3 resources. At least one of the classification types (oneTime or continuous) +// must be specified. type ClassificationTypeUpdate struct { _ struct{} `type:"structure"` // A continuous classification of the objects that are added to a specified - // S3 bucket. Amazon Macie begins performing continuous classification after - // a bucket is successfully associated with Amazon Macie. + // S3 bucket. Amazon Macie Classic begins performing continuous classification + // after a bucket is successfully associated with Amazon Macie Classic. Continuous *string `locationName:"continuous" type:"string" enum:"S3ContinuousClassificationType"` // A one-time classification of all of the existing objects in a specified S3 @@ -1039,7 +1042,7 @@ func (s *ClassificationTypeUpdate) SetOneTime(v string) *ClassificationTypeUpdat type DisassociateMemberAccountInput struct { _ struct{} `type:"structure"` - // The ID of the member account that you want to remove from Amazon Macie. + // The ID of the member account that you want to remove from Amazon Macie Classic. // // MemberAccountId is a required field MemberAccountId *string `locationName:"memberAccountId" type:"string" required:"true"` @@ -1092,13 +1095,13 @@ type DisassociateS3ResourcesInput struct { _ struct{} `type:"structure"` // The S3 resources (buckets or prefixes) that you want to remove from being - // monitored and classified by Amazon Macie. + // monitored and classified by Amazon Macie Classic. // // AssociatedS3Resources is a required field AssociatedS3Resources []*S3Resource `locationName:"associatedS3Resources" type:"list" required:"true"` - // The ID of the Amazon Macie member account whose resources you want to remove - // from being monitored by Amazon Macie. + // The ID of the Amazon Macie Classic member account whose resources you want + // to remove from being monitored by Amazon Macie Classic. MemberAccountId *string `locationName:"memberAccountId" type:"string"` } @@ -1151,8 +1154,8 @@ type DisassociateS3ResourcesOutput struct { _ struct{} `type:"structure"` // S3 resources that couldn't be removed from being monitored and classified - // by Amazon Macie. An error code and an error message are provided for each - // failed item. + // by Amazon Macie Classic. An error code and an error message are provided + // for each failed item. FailedS3Resources []*FailedS3Resource `locationName:"failedS3Resources" type:"list"` } @@ -1216,8 +1219,8 @@ func (s *FailedS3Resource) SetFailedItem(v *S3Resource) *FailedS3Resource { // Internal server error. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Error code for the exception ErrorCode *string `locationName:"errorCode" type:"string"` @@ -1237,17 +1240,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1255,29 +1258,29 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because an invalid or out-of-range value was supplied // for an input parameter. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Error code for the exception ErrorCode *string `locationName:"errorCode" type:"string"` @@ -1300,17 +1303,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1318,29 +1321,29 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Error code for the exception ErrorCode *string `locationName:"errorCode" type:"string"` @@ -1363,17 +1366,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1381,22 +1384,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListMemberAccountsInput struct { @@ -1438,8 +1441,8 @@ func (s *ListMemberAccountsInput) SetNextToken(v string) *ListMemberAccountsInpu type ListMemberAccountsOutput struct { _ struct{} `type:"structure"` - // A list of the Amazon Macie member accounts returned by the action. The current - // master account is also included in this list. + // A list of the Amazon Macie Classic member accounts returned by the action. + // The current master account is also included in this list. MemberAccounts []*MemberAccount `locationName:"memberAccounts" type:"list"` // When a response is generated, if there is more data to be listed, this parameter @@ -1478,8 +1481,8 @@ type ListS3ResourcesInput struct { // in the response. The default value is 250. MaxResults *int64 `locationName:"maxResults" type:"integer"` - // The Amazon Macie member account ID whose associated S3 resources you want - // to list. + // The Amazon Macie Classic member account ID whose associated S3 resources + // you want to list. MemberAccountId *string `locationName:"memberAccountId" type:"string"` // Use this parameter when paginating results. Set its value to null on your @@ -1552,11 +1555,11 @@ func (s *ListS3ResourcesOutput) SetS3Resources(v []*S3ResourceClassification) *L return s } -// Contains information about the Amazon Macie member account. +// Contains information about the Amazon Macie Classic member account. type MemberAccount struct { _ struct{} `type:"structure"` - // The AWS account ID of the Amazon Macie member account. + // The AWS account ID of the Amazon Macie Classic member account. AccountId *string `locationName:"accountId" type:"string"` } @@ -1626,25 +1629,26 @@ func (s *S3Resource) SetPrefix(v string) *S3Resource { return s } -// The S3 resources that you want to associate with Amazon Macie for monitoring -// and data classification. This data type is used as a request parameter in -// the AssociateS3Resources action and a response parameter in the ListS3Resources +// The S3 resources that you want to associate with Amazon Macie Classic for +// monitoring and data classification. This data type is used as a request parameter +// in the AssociateS3Resources action and a response parameter in the ListS3Resources // action. type S3ResourceClassification struct { _ struct{} `type:"structure"` - // The name of the S3 bucket that you want to associate with Amazon Macie. + // The name of the S3 bucket that you want to associate with Amazon Macie Classic. // // BucketName is a required field BucketName *string `locationName:"bucketName" type:"string" required:"true"` // The classification type that you want to specify for the resource associated - // with Amazon Macie. + // with Amazon Macie Classic. // // ClassificationType is a required field ClassificationType *ClassificationType `locationName:"classificationType" type:"structure" required:"true"` - // The prefix of the S3 bucket that you want to associate with Amazon Macie. + // The prefix of the S3 bucket that you want to associate with Amazon Macie + // Classic. Prefix *string `locationName:"prefix" type:"string"` } @@ -1708,7 +1712,7 @@ type S3ResourceClassificationUpdate struct { BucketName *string `locationName:"bucketName" type:"string" required:"true"` // The classification type that you want to update for the resource associated - // with Amazon Macie. + // with Amazon Macie Classic. // // ClassificationTypeUpdate is a required field ClassificationTypeUpdate *ClassificationTypeUpdate `locationName:"classificationTypeUpdate" type:"structure" required:"true"` @@ -1764,8 +1768,8 @@ func (s *S3ResourceClassificationUpdate) SetPrefix(v string) *S3ResourceClassifi type UpdateS3ResourcesInput struct { _ struct{} `type:"structure"` - // The AWS ID of the Amazon Macie member account whose S3 resources' classification - // types you want to update. + // The AWS ID of the Amazon Macie Classic member account whose S3 resources' + // classification types you want to update. MemberAccountId *string `locationName:"memberAccountId" type:"string"` // The S3 resources whose classification types you want to update. @@ -1848,6 +1852,13 @@ const ( S3ContinuousClassificationTypeFull = "FULL" ) +// S3ContinuousClassificationType_Values returns all elements of the S3ContinuousClassificationType enum +func S3ContinuousClassificationType_Values() []string { + return []string{ + S3ContinuousClassificationTypeFull, + } +} + const ( // S3OneTimeClassificationTypeFull is a S3OneTimeClassificationType enum value S3OneTimeClassificationTypeFull = "FULL" @@ -1855,3 +1866,11 @@ const ( // S3OneTimeClassificationTypeNone is a S3OneTimeClassificationType enum value S3OneTimeClassificationTypeNone = "NONE" ) + +// S3OneTimeClassificationType_Values returns all elements of the S3OneTimeClassificationType enum +func S3OneTimeClassificationType_Values() []string { + return []string{ + S3OneTimeClassificationTypeFull, + S3OneTimeClassificationTypeNone, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/macie/doc.go b/vendor/github.com/aws/aws-sdk-go/service/macie/doc.go index 1b8f9632f..1b01c3563 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/macie/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/macie/doc.go @@ -3,12 +3,18 @@ // Package macie provides the client and types for making API // requests to Amazon Macie. // -// Amazon Macie is a security service that uses machine learning to automatically -// discover, classify, and protect sensitive data in AWS. Macie recognizes sensitive -// data such as personally identifiable information (PII) or intellectual property, -// and provides you with dashboards and alerts that give visibility into how -// this data is being accessed or moved. For more information, see the Macie -// User Guide (https://docs.aws.amazon.com/macie/latest/userguide/what-is-macie.html). +// Amazon Macie Classic is a security service that uses machine learning to +// automatically discover, classify, and protect sensitive data in AWS. Macie +// Classic recognizes sensitive data such as personally identifiable information +// (PII) or intellectual property, and provides you with dashboards and alerts +// that give visibility into how this data is being accessed or moved. For more +// information, see the Amazon Macie Classic User Guide (https://docs.aws.amazon.com/macie/latest/userguide/what-is-macie.html). +// +// A new Amazon Macie is now available with significant design improvements +// and additional features, at a lower price and in most AWS Regions. We encourage +// you to explore and use the new and improved features, and benefit from the +// reduced cost. To learn about features and pricing for the new Amazon Macie, +// see Amazon Macie (https://aws.amazon.com/macie/). // // See https://docs.aws.amazon.com/goto/WebAPI/macie-2017-12-19 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/macie/service.go b/vendor/github.com/aws/aws-sdk-go/service/macie/service.go index 3f350df2c..e4eca163c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/macie/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/macie/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go index d2537bef0..7d9ad2f45 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/api.go @@ -1084,7 +1084,7 @@ func (c *ManagedBlockchain) ListInvitationsRequest(input *ListInvitationsInput) // ListInvitations API operation for Amazon Managed Blockchain. // -// Returns a listing of all invitations made on the specified network. +// Returns a listing of all invitations for the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2057,6 +2057,202 @@ func (c *ManagedBlockchain) RejectInvitationWithContext(ctx aws.Context, input * return out, req.Send() } +const opUpdateMember = "UpdateMember" + +// UpdateMemberRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMember operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateMember for more information on using the UpdateMember +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateMemberRequest method. +// req, resp := client.UpdateMemberRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/UpdateMember +func (c *ManagedBlockchain) UpdateMemberRequest(input *UpdateMemberInput) (req *request.Request, output *UpdateMemberOutput) { + op := &request.Operation{ + Name: opUpdateMember, + HTTPMethod: "PATCH", + HTTPPath: "/networks/{networkId}/members/{memberId}", + } + + if input == nil { + input = &UpdateMemberInput{} + } + + output = &UpdateMemberOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateMember API operation for Amazon Managed Blockchain. +// +// Updates a member configuration with new parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain's +// API operation UpdateMember for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The action or operation requested is invalid. Verify that the action is typed +// correctly. +// +// * AccessDeniedException +// You do not have sufficient access to perform this action. +// +// * ResourceNotFoundException +// A requested resource does not exist on the network. It may have been deleted +// or referenced inaccurately. +// +// * ThrottlingException +// The request or operation could not be performed because a service is throttling +// requests. The most common source of throttling errors is launching EC2 instances +// such that your service limit for EC2 instances is exceeded. Request a limit +// increase or delete unused resources if possible. +// +// * InternalServiceErrorException +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/UpdateMember +func (c *ManagedBlockchain) UpdateMember(input *UpdateMemberInput) (*UpdateMemberOutput, error) { + req, out := c.UpdateMemberRequest(input) + return out, req.Send() +} + +// UpdateMemberWithContext is the same as UpdateMember with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateMember for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchain) UpdateMemberWithContext(ctx aws.Context, input *UpdateMemberInput, opts ...request.Option) (*UpdateMemberOutput, error) { + req, out := c.UpdateMemberRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateNode = "UpdateNode" + +// UpdateNodeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateNode operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateNode for more information on using the UpdateNode +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateNodeRequest method. +// req, resp := client.UpdateNodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/UpdateNode +func (c *ManagedBlockchain) UpdateNodeRequest(input *UpdateNodeInput) (req *request.Request, output *UpdateNodeOutput) { + op := &request.Operation{ + Name: opUpdateNode, + HTTPMethod: "PATCH", + HTTPPath: "/networks/{networkId}/members/{memberId}/nodes/{nodeId}", + } + + if input == nil { + input = &UpdateNodeInput{} + } + + output = &UpdateNodeOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateNode API operation for Amazon Managed Blockchain. +// +// Updates a node configuration with new parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Managed Blockchain's +// API operation UpdateNode for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The action or operation requested is invalid. Verify that the action is typed +// correctly. +// +// * AccessDeniedException +// You do not have sufficient access to perform this action. +// +// * ResourceNotFoundException +// A requested resource does not exist on the network. It may have been deleted +// or referenced inaccurately. +// +// * ThrottlingException +// The request or operation could not be performed because a service is throttling +// requests. The most common source of throttling errors is launching EC2 instances +// such that your service limit for EC2 instances is exceeded. Request a limit +// increase or delete unused resources if possible. +// +// * InternalServiceErrorException +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/managedblockchain-2018-09-24/UpdateNode +func (c *ManagedBlockchain) UpdateNode(input *UpdateNodeInput) (*UpdateNodeOutput, error) { + req, out := c.UpdateNodeRequest(input) + return out, req.Send() +} + +// UpdateNodeWithContext is the same as UpdateNode with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateNode for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ManagedBlockchain) UpdateNodeWithContext(ctx aws.Context, input *UpdateNodeInput, opts ...request.Option) (*UpdateNodeOutput, error) { + req, out := c.UpdateNodeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opVoteOnProposal = "VoteOnProposal" // VoteOnProposalRequest generates a "aws/request.Request" representing the @@ -2161,8 +2357,8 @@ func (c *ManagedBlockchain) VoteOnProposalWithContext(ctx aws.Context, input *Vo // You do not have sufficient access to perform this action. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2179,17 +2375,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2197,22 +2393,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // A policy type that defines the voting rules for the network. The rules decide @@ -3313,8 +3509,8 @@ func (s *GetProposalOutput) SetProposal(v *Proposal) *GetProposalOutput { } type IllegalActionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3331,17 +3527,17 @@ func (s IllegalActionException) GoString() string { func newErrorIllegalActionException(v protocol.ResponseMetadata) error { return &IllegalActionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IllegalActionException) Code() string { +func (s *IllegalActionException) Code() string { return "IllegalActionException" } // Message returns the exception's message. -func (s IllegalActionException) Message() string { +func (s *IllegalActionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3349,29 +3545,29 @@ func (s IllegalActionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IllegalActionException) OrigErr() error { +func (s *IllegalActionException) OrigErr() error { return nil } -func (s IllegalActionException) Error() string { +func (s *IllegalActionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IllegalActionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IllegalActionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IllegalActionException) RequestID() string { - return s.respMetadata.RequestID +func (s *IllegalActionException) RequestID() string { + return s.RespMetadata.RequestID } // The request processing has failed because of an unknown error, exception // or failure. type InternalServiceErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3388,17 +3584,17 @@ func (s InternalServiceErrorException) GoString() string { func newErrorInternalServiceErrorException(v protocol.ResponseMetadata) error { return &InternalServiceErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceErrorException) Code() string { +func (s *InternalServiceErrorException) Code() string { return "InternalServiceErrorException" } // Message returns the exception's message. -func (s InternalServiceErrorException) Message() string { +func (s *InternalServiceErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3406,29 +3602,29 @@ func (s InternalServiceErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceErrorException) OrigErr() error { +func (s *InternalServiceErrorException) OrigErr() error { return nil } -func (s InternalServiceErrorException) Error() string { +func (s *InternalServiceErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The action or operation requested is invalid. Verify that the action is typed // correctly. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3445,17 +3641,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3463,22 +3659,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // An invitation to an AWS account to create a member and join the network. @@ -4234,6 +4430,54 @@ func (s *ListProposalsOutput) SetProposals(v []*ProposalSummary) *ListProposalsO return s } +// A configuration for logging events. +type LogConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether logging is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s LogConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfiguration) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *LogConfiguration) SetEnabled(v bool) *LogConfiguration { + s.Enabled = &v + return s +} + +// A collection of log configurations. +type LogConfigurations struct { + _ struct{} `type:"structure"` + + // Parameters for publishing logs to Amazon CloudWatch Logs. + Cloudwatch *LogConfiguration `type:"structure"` +} + +// String returns the string representation +func (s LogConfigurations) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LogConfigurations) GoString() string { + return s.String() +} + +// SetCloudwatch sets the Cloudwatch field's value. +func (s *LogConfigurations) SetCloudwatch(v *LogConfiguration) *LogConfigurations { + s.Cloudwatch = v + return s +} + // Member configuration properties. type Member struct { _ struct{} `type:"structure"` @@ -4251,6 +4495,9 @@ type Member struct { // The unique identifier of the member. Id *string `min:"1" type:"string"` + // Configuration properties for logging events associated with a member. + LogPublishingConfiguration *MemberLogPublishingConfiguration `type:"structure"` + // The name of the member. Name *string `min:"1" type:"string"` @@ -4312,6 +4559,12 @@ func (s *Member) SetId(v string) *Member { return s } +// SetLogPublishingConfiguration sets the LogPublishingConfiguration field's value. +func (s *Member) SetLogPublishingConfiguration(v *MemberLogPublishingConfiguration) *Member { + s.LogPublishingConfiguration = v + return s +} + // SetName sets the Name field's value. func (s *Member) SetName(v string) *Member { s.Name = &v @@ -4342,6 +4595,10 @@ type MemberConfiguration struct { // FrameworkConfiguration is a required field FrameworkConfiguration *MemberFrameworkConfiguration `type:"structure" required:"true"` + // Configuration properties for logging events associated with a member of a + // Managed Blockchain network. + LogPublishingConfiguration *MemberLogPublishingConfiguration `type:"structure"` + // The name of the member. // // Name is a required field @@ -4394,6 +4651,12 @@ func (s *MemberConfiguration) SetFrameworkConfiguration(v *MemberFrameworkConfig return s } +// SetLogPublishingConfiguration sets the LogPublishingConfiguration field's value. +func (s *MemberConfiguration) SetLogPublishingConfiguration(v *MemberLogPublishingConfiguration) *MemberConfiguration { + s.LogPublishingConfiguration = v + return s +} + // SetName sets the Name field's value. func (s *MemberConfiguration) SetName(v string) *MemberConfiguration { s.Name = &v @@ -4498,6 +4761,33 @@ func (s *MemberFabricConfiguration) SetAdminUsername(v string) *MemberFabricConf return s } +// Configuration properties for logging events associated with a member of a +// Managed Blockchain network using the Hyperledger Fabric framework. +type MemberFabricLogPublishingConfiguration struct { + _ struct{} `type:"structure"` + + // Configuration properties for logging events associated with a member's Certificate + // Authority (CA). CA logs help you determine when a member in your account + // joins the network, or when new peers register with a member CA. + CaLogs *LogConfigurations `type:"structure"` +} + +// String returns the string representation +func (s MemberFabricLogPublishingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MemberFabricLogPublishingConfiguration) GoString() string { + return s.String() +} + +// SetCaLogs sets the CaLogs field's value. +func (s *MemberFabricLogPublishingConfiguration) SetCaLogs(v *LogConfigurations) *MemberFabricLogPublishingConfiguration { + s.CaLogs = v + return s +} + // Attributes relevant to a member for the blockchain framework that the Managed // Blockchain network uses. type MemberFrameworkAttributes struct { @@ -4565,6 +4855,32 @@ func (s *MemberFrameworkConfiguration) SetFabric(v *MemberFabricConfiguration) * return s } +// Configuration properties for logging events associated with a member of a +// Managed Blockchain network. +type MemberLogPublishingConfiguration struct { + _ struct{} `type:"structure"` + + // Configuration properties for logging events associated with a member of a + // Managed Blockchain network using the Hyperledger Fabric framework. + Fabric *MemberFabricLogPublishingConfiguration `type:"structure"` +} + +// String returns the string representation +func (s MemberLogPublishingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MemberLogPublishingConfiguration) GoString() string { + return s.String() +} + +// SetFabric sets the Fabric field's value. +func (s *MemberLogPublishingConfiguration) SetFabric(v *MemberFabricLogPublishingConfiguration) *MemberLogPublishingConfiguration { + s.Fabric = v + return s +} + // A summary of configuration properties for a member. type MemberSummary struct { _ struct{} `type:"structure"` @@ -4764,7 +5080,7 @@ type NetworkFabricAttributes struct { _ struct{} `type:"structure"` // The edition of Amazon Managed Blockchain that Hyperledger Fabric uses. For - // more information, see Amazon Managed Blockchain Pricing (https://aws.amazon.com/managed-blockchain/pricing/). + // more information, see Amazon Managed Blockchain Pricing (http://aws.amazon.com/managed-blockchain/pricing/). Edition *string `type:"string" enum:"Edition"` // The endpoint of the ordering service for the network. @@ -4798,7 +5114,7 @@ type NetworkFabricConfiguration struct { _ struct{} `type:"structure"` // The edition of Amazon Managed Blockchain that the network uses. For more - // information, see Amazon Managed Blockchain Pricing (https://aws.amazon.com/managed-blockchain/pricing/). + // information, see Amazon Managed Blockchain Pricing (http://aws.amazon.com/managed-blockchain/pricing/). // // Edition is a required field Edition *string `type:"string" required:"true" enum:"Edition"` @@ -4997,12 +5313,19 @@ type Node struct { // The instance type of the node. InstanceType *string `type:"string"` + // Configuration properties for logging events associated with a peer node owned + // by a member in a Managed Blockchain network. + LogPublishingConfiguration *NodeLogPublishingConfiguration `type:"structure"` + // The unique identifier of the member to which the node belongs. MemberId *string `min:"1" type:"string"` // The unique identifier of the network that the node is in. NetworkId *string `min:"1" type:"string"` + // The state database that the node uses. Values are LevelDB or CouchDB. + StateDB *string `type:"string" enum:"StateDBType"` + // The status of the node. Status *string `type:"string" enum:"NodeStatus"` } @@ -5047,6 +5370,12 @@ func (s *Node) SetInstanceType(v string) *Node { return s } +// SetLogPublishingConfiguration sets the LogPublishingConfiguration field's value. +func (s *Node) SetLogPublishingConfiguration(v *NodeLogPublishingConfiguration) *Node { + s.LogPublishingConfiguration = v + return s +} + // SetMemberId sets the MemberId field's value. func (s *Node) SetMemberId(v string) *Node { s.MemberId = &v @@ -5059,6 +5388,12 @@ func (s *Node) SetNetworkId(v string) *Node { return s } +// SetStateDB sets the StateDB field's value. +func (s *Node) SetStateDB(v string) *Node { + s.StateDB = &v + return s +} + // SetStatus sets the Status field's value. func (s *Node) SetStatus(v string) *Node { s.Status = &v @@ -5078,6 +5413,15 @@ type NodeConfiguration struct { // // InstanceType is a required field InstanceType *string `type:"string" required:"true"` + + // Configuration properties for logging events associated with a peer node owned + // by a member in a Managed Blockchain network. + LogPublishingConfiguration *NodeLogPublishingConfiguration `type:"structure"` + + // The state database that the node uses. Values are LevelDB or CouchDB. When + // using an Amazon Managed Blockchain network with Hyperledger Fabric version + // 1.4 or later, the default is CouchDB. + StateDB *string `type:"string" enum:"StateDBType"` } // String returns the string representation @@ -5118,6 +5462,18 @@ func (s *NodeConfiguration) SetInstanceType(v string) *NodeConfiguration { return s } +// SetLogPublishingConfiguration sets the LogPublishingConfiguration field's value. +func (s *NodeConfiguration) SetLogPublishingConfiguration(v *NodeLogPublishingConfiguration) *NodeConfiguration { + s.LogPublishingConfiguration = v + return s +} + +// SetStateDB sets the StateDB field's value. +func (s *NodeConfiguration) SetStateDB(v string) *NodeConfiguration { + s.StateDB = &v + return s +} + // Attributes of Hyperledger Fabric for a peer node on a Managed Blockchain // network that uses Hyperledger Fabric. type NodeFabricAttributes struct { @@ -5153,6 +5509,47 @@ func (s *NodeFabricAttributes) SetPeerEventEndpoint(v string) *NodeFabricAttribu return s } +// Configuration properties for logging events associated with a peer node owned +// by a member in a Managed Blockchain network. +type NodeFabricLogPublishingConfiguration struct { + _ struct{} `type:"structure"` + + // Configuration properties for logging events associated with chaincode execution + // on a peer node. Chaincode logs contain the results of instantiating, invoking, + // and querying the chaincode. A peer can run multiple instances of chaincode. + // When enabled, a log stream is created for all chaincodes, with an individual + // log stream for each chaincode. + ChaincodeLogs *LogConfigurations `type:"structure"` + + // Configuration properties for a peer node log. Peer node logs contain messages + // generated when your client submits transaction proposals to peer nodes, requests + // to join channels, enrolls an admin peer, and lists the chaincode instances + // on a peer node. + PeerLogs *LogConfigurations `type:"structure"` +} + +// String returns the string representation +func (s NodeFabricLogPublishingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeFabricLogPublishingConfiguration) GoString() string { + return s.String() +} + +// SetChaincodeLogs sets the ChaincodeLogs field's value. +func (s *NodeFabricLogPublishingConfiguration) SetChaincodeLogs(v *LogConfigurations) *NodeFabricLogPublishingConfiguration { + s.ChaincodeLogs = v + return s +} + +// SetPeerLogs sets the PeerLogs field's value. +func (s *NodeFabricLogPublishingConfiguration) SetPeerLogs(v *LogConfigurations) *NodeFabricLogPublishingConfiguration { + s.PeerLogs = v + return s +} + // Attributes relevant to a peer node on a Managed Blockchain network for the // blockchain framework that the network uses. type NodeFrameworkAttributes struct { @@ -5179,6 +5576,33 @@ func (s *NodeFrameworkAttributes) SetFabric(v *NodeFabricAttributes) *NodeFramew return s } +// Configuration properties for logging events associated with a peer node owned +// by a member in a Managed Blockchain network. +type NodeLogPublishingConfiguration struct { + _ struct{} `type:"structure"` + + // Configuration properties for logging events associated with a node that is + // owned by a member of a Managed Blockchain network using the Hyperledger Fabric + // framework. + Fabric *NodeFabricLogPublishingConfiguration `type:"structure"` +} + +// String returns the string representation +func (s NodeLogPublishingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NodeLogPublishingConfiguration) GoString() string { + return s.String() +} + +// SetFabric sets the Fabric field's value. +func (s *NodeLogPublishingConfiguration) SetFabric(v *NodeFabricLogPublishingConfiguration) *NodeLogPublishingConfiguration { + s.Fabric = v + return s +} + // A summary of configuration properties for a peer node. type NodeSummary struct { _ struct{} `type:"structure"` @@ -5295,7 +5719,9 @@ type Proposal struct { // are not carried out. // // * ACTION_FAILED - One or more of the specified ProposalActions in a proposal - // that was approved could not be completed because of an error. + // that was approved could not be completed because of an error. The ACTION_FAILED + // status occurs even if only one ProposalAction fails and other actions + // are successful. Status *string `type:"string" enum:"ProposalStatus"` // The current total of YES votes cast on the proposal by members. @@ -5650,8 +6076,8 @@ func (s *RemoveAction) SetMemberId(v string) *RemoveAction { // A resource request is issued for a resource that already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5668,17 +6094,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5686,30 +6112,30 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of resources of that type already exist. Ensure the resources // requested are within the boundaries of the service edition and your account // limits. type ResourceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5726,17 +6152,17 @@ func (s ResourceLimitExceededException) GoString() string { func newErrorResourceLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceededException) Code() string { +func (s *ResourceLimitExceededException) Code() string { return "ResourceLimitExceededException" } // Message returns the exception's message. -func (s ResourceLimitExceededException) Message() string { +func (s *ResourceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5744,29 +6170,29 @@ func (s ResourceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceededException) OrigErr() error { +func (s *ResourceLimitExceededException) OrigErr() error { return nil } -func (s ResourceLimitExceededException) Error() string { +func (s *ResourceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A requested resource does not exist on the network. It may have been deleted // or referenced inaccurately. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5783,17 +6209,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5801,29 +6227,29 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource exists but is not in a status that can complete the // operation. type ResourceNotReadyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5840,17 +6266,17 @@ func (s ResourceNotReadyException) GoString() string { func newErrorResourceNotReadyException(v protocol.ResponseMetadata) error { return &ResourceNotReadyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotReadyException) Code() string { +func (s *ResourceNotReadyException) Code() string { return "ResourceNotReadyException" } // Message returns the exception's message. -func (s ResourceNotReadyException) Message() string { +func (s *ResourceNotReadyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5858,22 +6284,22 @@ func (s ResourceNotReadyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotReadyException) OrigErr() error { +func (s *ResourceNotReadyException) OrigErr() error { return nil } -func (s ResourceNotReadyException) Error() string { +func (s *ResourceNotReadyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotReadyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotReadyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotReadyException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotReadyException) RequestID() string { + return s.RespMetadata.RequestID } // The request or operation could not be performed because a service is throttling @@ -5881,8 +6307,8 @@ func (s ResourceNotReadyException) RequestID() string { // such that your service limit for EC2 instances is exceeded. Request a limit // increase or delete unused resources if possible. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5899,17 +6325,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5917,22 +6343,201 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UpdateMemberInput struct { + _ struct{} `type:"structure"` + + // Configuration properties for publishing to Amazon CloudWatch Logs. + LogPublishingConfiguration *MemberLogPublishingConfiguration `type:"structure"` + + // The unique ID of the member. + // + // MemberId is a required field + MemberId *string `location:"uri" locationName:"memberId" min:"1" type:"string" required:"true"` + + // The unique ID of the Managed Blockchain network to which the member belongs. + // + // NetworkId is a required field + NetworkId *string `location:"uri" locationName:"networkId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateMemberInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMemberInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateMemberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateMemberInput"} + if s.MemberId == nil { + invalidParams.Add(request.NewErrParamRequired("MemberId")) + } + if s.MemberId != nil && len(*s.MemberId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MemberId", 1)) + } + if s.NetworkId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkId")) + } + if s.NetworkId != nil && len(*s.NetworkId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NetworkId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogPublishingConfiguration sets the LogPublishingConfiguration field's value. +func (s *UpdateMemberInput) SetLogPublishingConfiguration(v *MemberLogPublishingConfiguration) *UpdateMemberInput { + s.LogPublishingConfiguration = v + return s +} + +// SetMemberId sets the MemberId field's value. +func (s *UpdateMemberInput) SetMemberId(v string) *UpdateMemberInput { + s.MemberId = &v + return s +} + +// SetNetworkId sets the NetworkId field's value. +func (s *UpdateMemberInput) SetNetworkId(v string) *UpdateMemberInput { + s.NetworkId = &v + return s +} + +type UpdateMemberOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateMemberOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateMemberOutput) GoString() string { + return s.String() +} + +type UpdateNodeInput struct { + _ struct{} `type:"structure"` + + // Configuration properties for publishing to Amazon CloudWatch Logs. + LogPublishingConfiguration *NodeLogPublishingConfiguration `type:"structure"` + + // The unique ID of the member that owns the node. + // + // MemberId is a required field + MemberId *string `location:"uri" locationName:"memberId" min:"1" type:"string" required:"true"` + + // The unique ID of the Managed Blockchain network to which the node belongs. + // + // NetworkId is a required field + NetworkId *string `location:"uri" locationName:"networkId" min:"1" type:"string" required:"true"` + + // The unique ID of the node. + // + // NodeId is a required field + NodeId *string `location:"uri" locationName:"nodeId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateNodeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNodeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateNodeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateNodeInput"} + if s.MemberId == nil { + invalidParams.Add(request.NewErrParamRequired("MemberId")) + } + if s.MemberId != nil && len(*s.MemberId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MemberId", 1)) + } + if s.NetworkId == nil { + invalidParams.Add(request.NewErrParamRequired("NetworkId")) + } + if s.NetworkId != nil && len(*s.NetworkId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NetworkId", 1)) + } + if s.NodeId == nil { + invalidParams.Add(request.NewErrParamRequired("NodeId")) + } + if s.NodeId != nil && len(*s.NodeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NodeId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLogPublishingConfiguration sets the LogPublishingConfiguration field's value. +func (s *UpdateNodeInput) SetLogPublishingConfiguration(v *NodeLogPublishingConfiguration) *UpdateNodeInput { + s.LogPublishingConfiguration = v + return s +} + +// SetMemberId sets the MemberId field's value. +func (s *UpdateNodeInput) SetMemberId(v string) *UpdateNodeInput { + s.MemberId = &v + return s +} + +// SetNetworkId sets the NetworkId field's value. +func (s *UpdateNodeInput) SetNetworkId(v string) *UpdateNodeInput { + s.NetworkId = &v + return s +} + +// SetNodeId sets the NodeId field's value. +func (s *UpdateNodeInput) SetNodeId(v string) *UpdateNodeInput { + s.NodeId = &v + return s +} + +type UpdateNodeOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateNodeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateNodeOutput) GoString() string { + return s.String() } type VoteOnProposalInput struct { @@ -6130,11 +6735,26 @@ const ( EditionStandard = "STANDARD" ) +// Edition_Values returns all elements of the Edition enum +func Edition_Values() []string { + return []string{ + EditionStarter, + EditionStandard, + } +} + const ( // FrameworkHyperledgerFabric is a Framework enum value FrameworkHyperledgerFabric = "HYPERLEDGER_FABRIC" ) +// Framework_Values returns all elements of the Framework enum +func Framework_Values() []string { + return []string{ + FrameworkHyperledgerFabric, + } +} + const ( // InvitationStatusPending is a InvitationStatus enum value InvitationStatusPending = "PENDING" @@ -6152,6 +6772,17 @@ const ( InvitationStatusExpired = "EXPIRED" ) +// InvitationStatus_Values returns all elements of the InvitationStatus enum +func InvitationStatus_Values() []string { + return []string{ + InvitationStatusPending, + InvitationStatusAccepted, + InvitationStatusAccepting, + InvitationStatusRejected, + InvitationStatusExpired, + } +} + const ( // MemberStatusCreating is a MemberStatus enum value MemberStatusCreating = "CREATING" @@ -6162,6 +6793,9 @@ const ( // MemberStatusCreateFailed is a MemberStatus enum value MemberStatusCreateFailed = "CREATE_FAILED" + // MemberStatusUpdating is a MemberStatus enum value + MemberStatusUpdating = "UPDATING" + // MemberStatusDeleting is a MemberStatus enum value MemberStatusDeleting = "DELETING" @@ -6169,6 +6803,18 @@ const ( MemberStatusDeleted = "DELETED" ) +// MemberStatus_Values returns all elements of the MemberStatus enum +func MemberStatus_Values() []string { + return []string{ + MemberStatusCreating, + MemberStatusAvailable, + MemberStatusCreateFailed, + MemberStatusUpdating, + MemberStatusDeleting, + MemberStatusDeleted, + } +} + const ( // NetworkStatusCreating is a NetworkStatus enum value NetworkStatusCreating = "CREATING" @@ -6186,6 +6832,17 @@ const ( NetworkStatusDeleted = "DELETED" ) +// NetworkStatus_Values returns all elements of the NetworkStatus enum +func NetworkStatus_Values() []string { + return []string{ + NetworkStatusCreating, + NetworkStatusAvailable, + NetworkStatusCreateFailed, + NetworkStatusDeleting, + NetworkStatusDeleted, + } +} + const ( // NodeStatusCreating is a NodeStatus enum value NodeStatusCreating = "CREATING" @@ -6196,6 +6853,9 @@ const ( // NodeStatusCreateFailed is a NodeStatus enum value NodeStatusCreateFailed = "CREATE_FAILED" + // NodeStatusUpdating is a NodeStatus enum value + NodeStatusUpdating = "UPDATING" + // NodeStatusDeleting is a NodeStatus enum value NodeStatusDeleting = "DELETING" @@ -6206,6 +6866,19 @@ const ( NodeStatusFailed = "FAILED" ) +// NodeStatus_Values returns all elements of the NodeStatus enum +func NodeStatus_Values() []string { + return []string{ + NodeStatusCreating, + NodeStatusAvailable, + NodeStatusCreateFailed, + NodeStatusUpdating, + NodeStatusDeleting, + NodeStatusDeleted, + NodeStatusFailed, + } +} + const ( // ProposalStatusInProgress is a ProposalStatus enum value ProposalStatusInProgress = "IN_PROGRESS" @@ -6223,6 +6896,33 @@ const ( ProposalStatusActionFailed = "ACTION_FAILED" ) +// ProposalStatus_Values returns all elements of the ProposalStatus enum +func ProposalStatus_Values() []string { + return []string{ + ProposalStatusInProgress, + ProposalStatusApproved, + ProposalStatusRejected, + ProposalStatusExpired, + ProposalStatusActionFailed, + } +} + +const ( + // StateDBTypeLevelDb is a StateDBType enum value + StateDBTypeLevelDb = "LevelDB" + + // StateDBTypeCouchDb is a StateDBType enum value + StateDBTypeCouchDb = "CouchDB" +) + +// StateDBType_Values returns all elements of the StateDBType enum +func StateDBType_Values() []string { + return []string{ + StateDBTypeLevelDb, + StateDBTypeCouchDb, + } +} + const ( // ThresholdComparatorGreaterThan is a ThresholdComparator enum value ThresholdComparatorGreaterThan = "GREATER_THAN" @@ -6231,6 +6931,14 @@ const ( ThresholdComparatorGreaterThanOrEqualTo = "GREATER_THAN_OR_EQUAL_TO" ) +// ThresholdComparator_Values returns all elements of the ThresholdComparator enum +func ThresholdComparator_Values() []string { + return []string{ + ThresholdComparatorGreaterThan, + ThresholdComparatorGreaterThanOrEqualTo, + } +} + const ( // VoteValueYes is a VoteValue enum value VoteValueYes = "YES" @@ -6238,3 +6946,11 @@ const ( // VoteValueNo is a VoteValue enum value VoteValueNo = "NO" ) + +// VoteValue_Values returns all elements of the VoteValue enum +func VoteValue_Values() []string { + return []string{ + VoteValueYes, + VoteValueNo, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go index 89f752384..ade725afa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/api.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/api.go index 41c04b48e..9e837e96f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/api.go @@ -638,7 +638,17 @@ func (c *MarketplaceCatalog) StartChangeSetRequest(input *StartChangeSetInput) ( // StartChangeSet API operation for AWS Marketplace Catalog Service. // -// This operation allows you to request changes in your entities. +// This operation allows you to request changes for your entities. Within a +// single ChangeSet, you cannot start the same change type against the same +// entity multiple times. Additionally, when a ChangeSet is running, all the +// entities targeted by the different changes are locked until the ChangeSet +// has completed (either succeeded, cancelled, or failed). If you try to start +// a ChangeSet containing a change against an entity that is already locked, +// you will receive a ResourceInUseException. +// +// For example, you cannot start the ChangeSet described in the example (https://docs.aws.amazon.com/marketplace-catalog/latest/api-reference/API_StartChangeSet.html#API_StartChangeSet_Examples) +// below because it contains two changes to execute the same change type (AddRevisions) +// against the same entity (entity-id@1). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -693,8 +703,8 @@ func (c *MarketplaceCatalog) StartChangeSetWithContext(ctx aws.Context, input *S // Access is denied. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -711,17 +721,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -729,22 +739,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type CancelChangeSetInput struct { @@ -943,6 +953,12 @@ type ChangeSetSummaryListItem struct { // one entity. EntityIdList []*string `type:"list"` + // Returned if the change set is in FAILED status. Can be either CLIENT_ERROR, + // which means that there are issues with the request (see the ErrorDetailList + // of DescribeChangeSet), or SERVER_FAULT, which means that there is a problem + // in the system, and you should retry your request. + FailureCode *string `type:"string" enum:"FailureCode"` + // The time, in ISO 8601 format (2018-02-27T13:45:22Z), when the change set // was started. StartTime *string `min:"20" type:"string"` @@ -991,6 +1007,12 @@ func (s *ChangeSetSummaryListItem) SetEntityIdList(v []*string) *ChangeSetSummar return s } +// SetFailureCode sets the FailureCode field's value. +func (s *ChangeSetSummaryListItem) SetFailureCode(v string) *ChangeSetSummaryListItem { + s.FailureCode = &v + return s +} + // SetStartTime sets the StartTime field's value. func (s *ChangeSetSummaryListItem) SetStartTime(v string) *ChangeSetSummaryListItem { s.StartTime = &v @@ -1011,6 +1033,10 @@ type ChangeSummary struct { // The type of the change. ChangeType *string `min:"1" type:"string"` + // This object contains details specific to the change type of the requested + // change. + Details *string `min:"2" type:"string"` + // The entity to be changed. Entity *Entity `type:"structure"` @@ -1034,6 +1060,12 @@ func (s *ChangeSummary) SetChangeType(v string) *ChangeSummary { return s } +// SetDetails sets the Details field's value. +func (s *ChangeSummary) SetDetails(v string) *ChangeSummary { + s.Details = &v + return s +} + // SetEntity sets the Entity field's value. func (s *ChangeSummary) SetEntity(v *Entity) *ChangeSummary { s.Entity = v @@ -1127,6 +1159,12 @@ type DescribeChangeSetOutput struct { // state. Null if the request is not in a terminal state. EndTime *string `min:"20" type:"string"` + // Returned if the change set is in FAILED status. Can be either CLIENT_ERROR, + // which means that there are issues with the request (see the ErrorDetailList), + // or SERVER_FAULT, which means that there is a problem in the system, and you + // should retry your request. + FailureCode *string `type:"string" enum:"FailureCode"` + // Returned if there is a failure on the change set, but that failure is not // related to any of the changes in the request. FailureDescription *string `type:"string"` @@ -1179,6 +1217,12 @@ func (s *DescribeChangeSetOutput) SetEndTime(v string) *DescribeChangeSetOutput return s } +// SetFailureCode sets the FailureCode field's value. +func (s *DescribeChangeSetOutput) SetFailureCode(v string) *DescribeChangeSetOutput { + s.FailureCode = &v + return s +} + // SetFailureDescription sets the FailureDescription field's value. func (s *DescribeChangeSetOutput) SetFailureDescription(v string) *DescribeChangeSetOutput { s.FailureDescription = &v @@ -1388,13 +1432,13 @@ type EntitySummary struct { // The last time the entity was published, using ISO 8601 format (2018-02-27T13:45:22Z). LastModifiedDate *string `type:"string"` - // The name for the entity. This value is not unique. It is defined by the provider. + // The name for the entity. This value is not unique. It is defined by the seller. Name *string `type:"string"` - // The visibility status of the entity to subscribers. This value can be Public - // (everyone can view the entity), Limited (the entity is visible to limited - // accounts only), or Restricted (the entity was published and then unpublished - // and only existing subscribers can view it). + // The visibility status of the entity to buyers. This value can be Public (everyone + // can view the entity), Limited (the entity is visible to limited accounts + // only), or Restricted (the entity was published and then unpublished and only + // existing buyers can view it). Visibility *string `type:"string"` } @@ -1554,8 +1598,8 @@ func (s *Filter) SetValueList(v []*string) *Filter { // There was an internal service exception. type InternalServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1572,17 +1616,17 @@ func (s InternalServiceException) GoString() string { func newErrorInternalServiceException(v protocol.ResponseMetadata) error { return &InternalServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceException) Code() string { +func (s *InternalServiceException) Code() string { return "InternalServiceException" } // Message returns the exception's message. -func (s InternalServiceException) Message() string { +func (s *InternalServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1590,22 +1634,22 @@ func (s InternalServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceException) OrigErr() error { +func (s *InternalServiceException) OrigErr() error { return nil } -func (s InternalServiceException) Error() string { +func (s *InternalServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceException) RequestID() string { + return s.RespMetadata.RequestID } type ListChangeSetsInput struct { @@ -1628,7 +1672,7 @@ type ListChangeSetsInput struct { // results. NextToken *string `min:"1" type:"string"` - // An object that contains two attributes, sortBy and sortOrder. + // An object that contains two attributes, SortBy and SortOrder. Sort *Sort `type:"structure"` } @@ -1768,7 +1812,7 @@ type ListEntitiesInput struct { // The value of the next token, if it exists. Null if there are no more results. NextToken *string `min:"1" type:"string"` - // An object that contains two attributes, sortBy and sortOrder. + // An object that contains two attributes, SortBy and SortOrder. Sort *Sort `type:"structure"` } @@ -1898,8 +1942,8 @@ func (s *ListEntitiesOutput) SetNextToken(v string) *ListEntitiesOutput { // The resource is currently in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1916,17 +1960,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1934,28 +1978,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource wasn't found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1972,17 +2016,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1990,28 +2034,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Currently, the specified resource is not supported. type ResourceNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2028,17 +2072,17 @@ func (s ResourceNotSupportedException) GoString() string { func newErrorResourceNotSupportedException(v protocol.ResponseMetadata) error { return &ResourceNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotSupportedException) Code() string { +func (s *ResourceNotSupportedException) Code() string { return "ResourceNotSupportedException" } // Message returns the exception's message. -func (s ResourceNotSupportedException) Message() string { +func (s *ResourceNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2046,28 +2090,28 @@ func (s ResourceNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotSupportedException) OrigErr() error { +func (s *ResourceNotSupportedException) OrigErr() error { return nil } -func (s ResourceNotSupportedException) Error() string { +func (s *ResourceNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of open requests per account has been exceeded. type ServiceQuotaExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2084,17 +2128,17 @@ func (s ServiceQuotaExceededException) GoString() string { func newErrorServiceQuotaExceededException(v protocol.ResponseMetadata) error { return &ServiceQuotaExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceQuotaExceededException) Code() string { +func (s *ServiceQuotaExceededException) Code() string { return "ServiceQuotaExceededException" } // Message returns the exception's message. -func (s ServiceQuotaExceededException) Message() string { +func (s *ServiceQuotaExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2102,25 +2146,25 @@ func (s ServiceQuotaExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceQuotaExceededException) OrigErr() error { +func (s *ServiceQuotaExceededException) OrigErr() error { return nil } -func (s ServiceQuotaExceededException) Error() string { +func (s *ServiceQuotaExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceQuotaExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceQuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceQuotaExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceQuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID } -// An object that contains two attributes, sortBy and sortOrder. +// An object that contains two attributes, SortBy and SortOrder. type Sort struct { _ struct{} `type:"structure"` @@ -2296,8 +2340,8 @@ func (s *StartChangeSetOutput) SetChangeSetId(v string) *StartChangeSetOutput { // Too many requests. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2314,17 +2358,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2332,28 +2376,28 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // An error occurred during validation. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2370,17 +2414,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2388,22 +2432,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -2423,6 +2467,33 @@ const ( ChangeStatusFailed = "FAILED" ) +// ChangeStatus_Values returns all elements of the ChangeStatus enum +func ChangeStatus_Values() []string { + return []string{ + ChangeStatusPreparing, + ChangeStatusApplying, + ChangeStatusSucceeded, + ChangeStatusCancelled, + ChangeStatusFailed, + } +} + +const ( + // FailureCodeClientError is a FailureCode enum value + FailureCodeClientError = "CLIENT_ERROR" + + // FailureCodeServerFault is a FailureCode enum value + FailureCodeServerFault = "SERVER_FAULT" +) + +// FailureCode_Values returns all elements of the FailureCode enum +func FailureCode_Values() []string { + return []string{ + FailureCodeClientError, + FailureCodeServerFault, + } +} + const ( // SortOrderAscending is a SortOrder enum value SortOrderAscending = "ASCENDING" @@ -2430,3 +2501,11 @@ const ( // SortOrderDescending is a SortOrder enum value SortOrderDescending = "DESCENDING" ) + +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/doc.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/doc.go index debadb7b0..4b61f9070 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/doc.go @@ -3,8 +3,8 @@ // Package marketplacecatalog provides the client and types for making API // requests to AWS Marketplace Catalog Service. // -// Catalog API actions allow you to create, describe, list, and delete changes -// to your published entities. An entity is a product or an offer on AWS Marketplace. +// Catalog API actions allow you to manage your entities through list, describe, +// and update capabilities. An entity can be a product or an offer on AWS Marketplace. // // You can automate your entity update process by integrating the AWS Marketplace // Catalog API with your AWS Marketplace product build or deployment pipelines. diff --git a/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/service.go b/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/service.go index 8fc782449..ae4063ce9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/marketplacecatalog/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go index b34602a90..442dce39e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/api.go @@ -56,7 +56,7 @@ func (c *MediaConnect) AddFlowOutputsRequest(input *AddFlowOutputsInput) (req *r // AddFlowOutputs API operation for AWS MediaConnect. // -// Adds outputs to an existing flow. You can create up to 20 outputs per flow. +// Adds outputs to an existing flow. You can create up to 50 outputs per flow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -123,6 +123,218 @@ func (c *MediaConnect) AddFlowOutputsWithContext(ctx aws.Context, input *AddFlow return out, req.Send() } +const opAddFlowSources = "AddFlowSources" + +// AddFlowSourcesRequest generates a "aws/request.Request" representing the +// client's request for the AddFlowSources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddFlowSources for more information on using the AddFlowSources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddFlowSourcesRequest method. +// req, resp := client.AddFlowSourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/AddFlowSources +func (c *MediaConnect) AddFlowSourcesRequest(input *AddFlowSourcesInput) (req *request.Request, output *AddFlowSourcesOutput) { + op := &request.Operation{ + Name: opAddFlowSources, + HTTPMethod: "POST", + HTTPPath: "/v1/flows/{flowArn}/source", + } + + if input == nil { + input = &AddFlowSourcesInput{} + } + + output = &AddFlowSourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// AddFlowSources API operation for AWS MediaConnect. +// +// Adds Sources to flow +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation AddFlowSources for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/AddFlowSources +func (c *MediaConnect) AddFlowSources(input *AddFlowSourcesInput) (*AddFlowSourcesOutput, error) { + req, out := c.AddFlowSourcesRequest(input) + return out, req.Send() +} + +// AddFlowSourcesWithContext is the same as AddFlowSources with the addition of +// the ability to pass a context and additional request options. +// +// See AddFlowSources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) AddFlowSourcesWithContext(ctx aws.Context, input *AddFlowSourcesInput, opts ...request.Option) (*AddFlowSourcesOutput, error) { + req, out := c.AddFlowSourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAddFlowVpcInterfaces = "AddFlowVpcInterfaces" + +// AddFlowVpcInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the AddFlowVpcInterfaces operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddFlowVpcInterfaces for more information on using the AddFlowVpcInterfaces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddFlowVpcInterfacesRequest method. +// req, resp := client.AddFlowVpcInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/AddFlowVpcInterfaces +func (c *MediaConnect) AddFlowVpcInterfacesRequest(input *AddFlowVpcInterfacesInput) (req *request.Request, output *AddFlowVpcInterfacesOutput) { + op := &request.Operation{ + Name: opAddFlowVpcInterfaces, + HTTPMethod: "POST", + HTTPPath: "/v1/flows/{flowArn}/vpcInterfaces", + } + + if input == nil { + input = &AddFlowVpcInterfacesInput{} + } + + output = &AddFlowVpcInterfacesOutput{} + req = c.newRequest(op, input, output) + return +} + +// AddFlowVpcInterfaces API operation for AWS MediaConnect. +// +// Adds VPC interfaces to flow +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation AddFlowVpcInterfaces for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/AddFlowVpcInterfaces +func (c *MediaConnect) AddFlowVpcInterfaces(input *AddFlowVpcInterfacesInput) (*AddFlowVpcInterfacesOutput, error) { + req, out := c.AddFlowVpcInterfacesRequest(input) + return out, req.Send() +} + +// AddFlowVpcInterfacesWithContext is the same as AddFlowVpcInterfaces with the addition of +// the ability to pass a context and additional request options. +// +// See AddFlowVpcInterfaces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) AddFlowVpcInterfacesWithContext(ctx aws.Context, input *AddFlowVpcInterfacesInput, opts ...request.Option) (*AddFlowVpcInterfacesOutput, error) { + req, out := c.AddFlowVpcInterfacesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateFlow = "CreateFlow" // CreateFlowRequest generates a "aws/request.Request" representing the @@ -168,7 +380,7 @@ func (c *MediaConnect) CreateFlowRequest(input *CreateFlowInput) (req *request.R // CreateFlow API operation for AWS MediaConnect. // // Creates a new flow. The request must include one source. The request optionally -// can include outputs (up to 20) and entitlements (up to 50). +// can include outputs (up to 50) and entitlements (up to 50). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -444,178 +656,169 @@ func (c *MediaConnect) DescribeFlowWithContext(ctx aws.Context, input *DescribeF return out, req.Send() } -const opGrantFlowEntitlements = "GrantFlowEntitlements" +const opDescribeOffering = "DescribeOffering" -// GrantFlowEntitlementsRequest generates a "aws/request.Request" representing the -// client's request for the GrantFlowEntitlements operation. The "output" return +// DescribeOfferingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOffering operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GrantFlowEntitlements for more information on using the GrantFlowEntitlements +// See DescribeOffering for more information on using the DescribeOffering // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GrantFlowEntitlementsRequest method. -// req, resp := client.GrantFlowEntitlementsRequest(params) +// // Example sending a request using the DescribeOfferingRequest method. +// req, resp := client.DescribeOfferingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/GrantFlowEntitlements -func (c *MediaConnect) GrantFlowEntitlementsRequest(input *GrantFlowEntitlementsInput) (req *request.Request, output *GrantFlowEntitlementsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/DescribeOffering +func (c *MediaConnect) DescribeOfferingRequest(input *DescribeOfferingInput) (req *request.Request, output *DescribeOfferingOutput) { op := &request.Operation{ - Name: opGrantFlowEntitlements, - HTTPMethod: "POST", - HTTPPath: "/v1/flows/{flowArn}/entitlements", + Name: opDescribeOffering, + HTTPMethod: "GET", + HTTPPath: "/v1/offerings/{offeringArn}", } if input == nil { - input = &GrantFlowEntitlementsInput{} + input = &DescribeOfferingInput{} } - output = &GrantFlowEntitlementsOutput{} + output = &DescribeOfferingOutput{} req = c.newRequest(op, input, output) return } -// GrantFlowEntitlements API operation for AWS MediaConnect. +// DescribeOffering API operation for AWS MediaConnect. // -// Grants entitlements to an existing flow. +// Displays the details of an offering. The response includes the offering description, +// duration, outbound bandwidth, price, and Amazon Resource Name (ARN). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation GrantFlowEntitlements for usage and error information. +// API operation DescribeOffering for usage and error information. // // Returned Error Types: -// * GrantFlowEntitlements420Exception -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// -// * BadRequestException -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// -// * InternalServerErrorException +// * NotFoundException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * ForbiddenException +// * ServiceUnavailableException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * NotFoundException +// * TooManyRequestsException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * ServiceUnavailableException +// * BadRequestException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * TooManyRequestsException +// * InternalServerErrorException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/GrantFlowEntitlements -func (c *MediaConnect) GrantFlowEntitlements(input *GrantFlowEntitlementsInput) (*GrantFlowEntitlementsOutput, error) { - req, out := c.GrantFlowEntitlementsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/DescribeOffering +func (c *MediaConnect) DescribeOffering(input *DescribeOfferingInput) (*DescribeOfferingOutput, error) { + req, out := c.DescribeOfferingRequest(input) return out, req.Send() } -// GrantFlowEntitlementsWithContext is the same as GrantFlowEntitlements with the addition of +// DescribeOfferingWithContext is the same as DescribeOffering with the addition of // the ability to pass a context and additional request options. // -// See GrantFlowEntitlements for details on how to use this API operation. +// See DescribeOffering for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) GrantFlowEntitlementsWithContext(ctx aws.Context, input *GrantFlowEntitlementsInput, opts ...request.Option) (*GrantFlowEntitlementsOutput, error) { - req, out := c.GrantFlowEntitlementsRequest(input) +func (c *MediaConnect) DescribeOfferingWithContext(ctx aws.Context, input *DescribeOfferingInput, opts ...request.Option) (*DescribeOfferingOutput, error) { + req, out := c.DescribeOfferingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListEntitlements = "ListEntitlements" +const opDescribeReservation = "DescribeReservation" -// ListEntitlementsRequest generates a "aws/request.Request" representing the -// client's request for the ListEntitlements operation. The "output" return +// DescribeReservationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListEntitlements for more information on using the ListEntitlements +// See DescribeReservation for more information on using the DescribeReservation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListEntitlementsRequest method. -// req, resp := client.ListEntitlementsRequest(params) +// // Example sending a request using the DescribeReservationRequest method. +// req, resp := client.DescribeReservationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListEntitlements -func (c *MediaConnect) ListEntitlementsRequest(input *ListEntitlementsInput) (req *request.Request, output *ListEntitlementsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/DescribeReservation +func (c *MediaConnect) DescribeReservationRequest(input *DescribeReservationInput) (req *request.Request, output *DescribeReservationOutput) { op := &request.Operation{ - Name: opListEntitlements, + Name: opDescribeReservation, HTTPMethod: "GET", - HTTPPath: "/v1/entitlements", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/v1/reservations/{reservationArn}", } if input == nil { - input = &ListEntitlementsInput{} + input = &DescribeReservationInput{} } - output = &ListEntitlementsOutput{} + output = &DescribeReservationOutput{} req = c.newRequest(op, input, output) return } -// ListEntitlements API operation for AWS MediaConnect. +// DescribeReservation API operation for AWS MediaConnect. // -// Displays a list of all entitlements that have been granted to this account. -// This request returns 20 results per page. +// Displays the details of a reservation. The response includes the reservation +// name, state, start date and time, and the details of the offering that make +// up the rest of the reservation (such as price, duration, and outbound bandwidth). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation ListEntitlements for usage and error information. +// API operation DescribeReservation for usage and error information. // // Returned Error Types: +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// // * ServiceUnavailableException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this @@ -636,33 +839,247 @@ func (c *MediaConnect) ListEntitlementsRequest(input *ListEntitlementsInput) (re // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListEntitlements -func (c *MediaConnect) ListEntitlements(input *ListEntitlementsInput) (*ListEntitlementsOutput, error) { - req, out := c.ListEntitlementsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/DescribeReservation +func (c *MediaConnect) DescribeReservation(input *DescribeReservationInput) (*DescribeReservationOutput, error) { + req, out := c.DescribeReservationRequest(input) return out, req.Send() } -// ListEntitlementsWithContext is the same as ListEntitlements with the addition of +// DescribeReservationWithContext is the same as DescribeReservation with the addition of // the ability to pass a context and additional request options. // -// See ListEntitlements for details on how to use this API operation. +// See DescribeReservation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) ListEntitlementsWithContext(ctx aws.Context, input *ListEntitlementsInput, opts ...request.Option) (*ListEntitlementsOutput, error) { - req, out := c.ListEntitlementsRequest(input) +func (c *MediaConnect) DescribeReservationWithContext(ctx aws.Context, input *DescribeReservationInput, opts ...request.Option) (*DescribeReservationOutput, error) { + req, out := c.DescribeReservationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListEntitlementsPages iterates over the pages of a ListEntitlements operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGrantFlowEntitlements = "GrantFlowEntitlements" + +// GrantFlowEntitlementsRequest generates a "aws/request.Request" representing the +// client's request for the GrantFlowEntitlements operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListEntitlements method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GrantFlowEntitlements for more information on using the GrantFlowEntitlements +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GrantFlowEntitlementsRequest method. +// req, resp := client.GrantFlowEntitlementsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/GrantFlowEntitlements +func (c *MediaConnect) GrantFlowEntitlementsRequest(input *GrantFlowEntitlementsInput) (req *request.Request, output *GrantFlowEntitlementsOutput) { + op := &request.Operation{ + Name: opGrantFlowEntitlements, + HTTPMethod: "POST", + HTTPPath: "/v1/flows/{flowArn}/entitlements", + } + + if input == nil { + input = &GrantFlowEntitlementsInput{} + } + + output = &GrantFlowEntitlementsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GrantFlowEntitlements API operation for AWS MediaConnect. +// +// Grants entitlements to an existing flow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation GrantFlowEntitlements for usage and error information. +// +// Returned Error Types: +// * GrantFlowEntitlements420Exception +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/GrantFlowEntitlements +func (c *MediaConnect) GrantFlowEntitlements(input *GrantFlowEntitlementsInput) (*GrantFlowEntitlementsOutput, error) { + req, out := c.GrantFlowEntitlementsRequest(input) + return out, req.Send() +} + +// GrantFlowEntitlementsWithContext is the same as GrantFlowEntitlements with the addition of +// the ability to pass a context and additional request options. +// +// See GrantFlowEntitlements for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) GrantFlowEntitlementsWithContext(ctx aws.Context, input *GrantFlowEntitlementsInput, opts ...request.Option) (*GrantFlowEntitlementsOutput, error) { + req, out := c.GrantFlowEntitlementsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListEntitlements = "ListEntitlements" + +// ListEntitlementsRequest generates a "aws/request.Request" representing the +// client's request for the ListEntitlements operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEntitlements for more information on using the ListEntitlements +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEntitlementsRequest method. +// req, resp := client.ListEntitlementsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListEntitlements +func (c *MediaConnect) ListEntitlementsRequest(input *ListEntitlementsInput) (req *request.Request, output *ListEntitlementsOutput) { + op := &request.Operation{ + Name: opListEntitlements, + HTTPMethod: "GET", + HTTPPath: "/v1/entitlements", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListEntitlementsInput{} + } + + output = &ListEntitlementsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEntitlements API operation for AWS MediaConnect. +// +// Displays a list of all entitlements that have been granted to this account. +// This request returns 20 results per page. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation ListEntitlements for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListEntitlements +func (c *MediaConnect) ListEntitlements(input *ListEntitlementsInput) (*ListEntitlementsOutput, error) { + req, out := c.ListEntitlementsRequest(input) + return out, req.Send() +} + +// ListEntitlementsWithContext is the same as ListEntitlements with the addition of +// the ability to pass a context and additional request options. +// +// See ListEntitlements for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) ListEntitlementsWithContext(ctx aws.Context, input *ListEntitlementsInput, opts ...request.Option) (*ListEntitlementsOutput, error) { + req, out := c.ListEntitlementsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEntitlementsPages iterates over the pages of a ListEntitlements operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEntitlements method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // @@ -865,61 +1282,75 @@ func (c *MediaConnect) ListFlowsPagesWithContext(ctx aws.Context, input *ListFlo return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListOfferings = "ListOfferings" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the ListOfferings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListOfferings for more information on using the ListOfferings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListOfferingsRequest method. +// req, resp := client.ListOfferingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListTagsForResource -func (c *MediaConnect) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListOfferings +func (c *MediaConnect) ListOfferingsRequest(input *ListOfferingsInput) (req *request.Request, output *ListOfferingsOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opListOfferings, HTTPMethod: "GET", - HTTPPath: "/tags/{resourceArn}", + HTTPPath: "/v1/offerings", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListOfferingsInput{} } - output = &ListTagsForResourceOutput{} + output = &ListOfferingsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for AWS MediaConnect. +// ListOfferings API operation for AWS MediaConnect. // -// List all tags on an AWS Elemental MediaConnect resource +// Displays a list of all offerings that are available to this account in the +// current AWS Region. If you have an active reservation (which means you've +// purchased an offering that has already started and hasn't expired yet), your +// account isn't eligible for other offerings. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation ListTagsForResource for usage and error information. +// API operation ListOfferings for usage and error information. // // Returned Error Types: -// * NotFoundException +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. @@ -934,297 +1365,380 @@ func (c *MediaConnect) ListTagsForResourceRequest(input *ListTagsForResourceInpu // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListTagsForResource -func (c *MediaConnect) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListOfferings +func (c *MediaConnect) ListOfferings(input *ListOfferingsInput) (*ListOfferingsOutput, error) { + req, out := c.ListOfferingsRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// ListOfferingsWithContext is the same as ListOfferings with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See ListOfferings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *MediaConnect) ListOfferingsWithContext(ctx aws.Context, input *ListOfferingsInput, opts ...request.Option) (*ListOfferingsOutput, error) { + req, out := c.ListOfferingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRemoveFlowOutput = "RemoveFlowOutput" - -// RemoveFlowOutputRequest generates a "aws/request.Request" representing the -// client's request for the RemoveFlowOutput operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// ListOfferingsPages iterates over the pages of a ListOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Use "Send" method on the returned Request to send the API call to the service. +// See ListOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListOfferings operation. +// pageNum := 0 +// err := client.ListOfferingsPages(params, +// func(page *mediaconnect.ListOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MediaConnect) ListOfferingsPages(input *ListOfferingsInput, fn func(*ListOfferingsOutput, bool) bool) error { + return c.ListOfferingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListOfferingsPagesWithContext same as ListOfferingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) ListOfferingsPagesWithContext(ctx aws.Context, input *ListOfferingsInput, fn func(*ListOfferingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListOfferingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListOfferingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListOfferingsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListReservations = "ListReservations" + +// ListReservationsRequest generates a "aws/request.Request" representing the +// client's request for the ListReservations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RemoveFlowOutput for more information on using the RemoveFlowOutput +// See ListReservations for more information on using the ListReservations // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RemoveFlowOutputRequest method. -// req, resp := client.RemoveFlowOutputRequest(params) +// // Example sending a request using the ListReservationsRequest method. +// req, resp := client.ListReservationsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowOutput -func (c *MediaConnect) RemoveFlowOutputRequest(input *RemoveFlowOutputInput) (req *request.Request, output *RemoveFlowOutputOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListReservations +func (c *MediaConnect) ListReservationsRequest(input *ListReservationsInput) (req *request.Request, output *ListReservationsOutput) { op := &request.Operation{ - Name: opRemoveFlowOutput, - HTTPMethod: "DELETE", - HTTPPath: "/v1/flows/{flowArn}/outputs/{outputArn}", + Name: opListReservations, + HTTPMethod: "GET", + HTTPPath: "/v1/reservations", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &RemoveFlowOutputInput{} + input = &ListReservationsInput{} } - output = &RemoveFlowOutputOutput{} + output = &ListReservationsOutput{} req = c.newRequest(op, input, output) return } -// RemoveFlowOutput API operation for AWS MediaConnect. +// ListReservations API operation for AWS MediaConnect. // -// Removes an output from an existing flow. This request can be made only on -// an output that does not have an entitlement associated with it. If the output -// has an entitlement, you must revoke the entitlement instead. When an entitlement -// is revoked from a flow, the service automatically removes the associated -// output. +// Displays a list of all reservations that have been purchased by this account +// in the current AWS Region. This list includes all reservations in all states +// (such as active and expired). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation RemoveFlowOutput for usage and error information. +// API operation ListReservations for usage and error information. // // Returned Error Types: -// * BadRequestException -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// -// * InternalServerErrorException -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// -// * ForbiddenException +// * ServiceUnavailableException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * NotFoundException +// * TooManyRequestsException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * ServiceUnavailableException +// * BadRequestException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * TooManyRequestsException +// * InternalServerErrorException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowOutput -func (c *MediaConnect) RemoveFlowOutput(input *RemoveFlowOutputInput) (*RemoveFlowOutputOutput, error) { - req, out := c.RemoveFlowOutputRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListReservations +func (c *MediaConnect) ListReservations(input *ListReservationsInput) (*ListReservationsOutput, error) { + req, out := c.ListReservationsRequest(input) return out, req.Send() } -// RemoveFlowOutputWithContext is the same as RemoveFlowOutput with the addition of +// ListReservationsWithContext is the same as ListReservations with the addition of // the ability to pass a context and additional request options. // -// See RemoveFlowOutput for details on how to use this API operation. +// See ListReservations for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) RemoveFlowOutputWithContext(ctx aws.Context, input *RemoveFlowOutputInput, opts ...request.Option) (*RemoveFlowOutputOutput, error) { - req, out := c.RemoveFlowOutputRequest(input) +func (c *MediaConnect) ListReservationsWithContext(ctx aws.Context, input *ListReservationsInput, opts ...request.Option) (*ListReservationsOutput, error) { + req, out := c.ListReservationsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRevokeFlowEntitlement = "RevokeFlowEntitlement" +// ListReservationsPages iterates over the pages of a ListReservations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListReservations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListReservations operation. +// pageNum := 0 +// err := client.ListReservationsPages(params, +// func(page *mediaconnect.ListReservationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MediaConnect) ListReservationsPages(input *ListReservationsInput, fn func(*ListReservationsOutput, bool) bool) error { + return c.ListReservationsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// RevokeFlowEntitlementRequest generates a "aws/request.Request" representing the -// client's request for the RevokeFlowEntitlement operation. The "output" return +// ListReservationsPagesWithContext same as ListReservationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) ListReservationsPagesWithContext(ctx aws.Context, input *ListReservationsInput, fn func(*ListReservationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListReservationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListReservationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListReservationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RevokeFlowEntitlement for more information on using the RevokeFlowEntitlement +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RevokeFlowEntitlementRequest method. -// req, resp := client.RevokeFlowEntitlementRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RevokeFlowEntitlement -func (c *MediaConnect) RevokeFlowEntitlementRequest(input *RevokeFlowEntitlementInput) (req *request.Request, output *RevokeFlowEntitlementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListTagsForResource +func (c *MediaConnect) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opRevokeFlowEntitlement, - HTTPMethod: "DELETE", - HTTPPath: "/v1/flows/{flowArn}/entitlements/{entitlementArn}", + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{resourceArn}", } if input == nil { - input = &RevokeFlowEntitlementInput{} + input = &ListTagsForResourceInput{} } - output = &RevokeFlowEntitlementOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } -// RevokeFlowEntitlement API operation for AWS MediaConnect. +// ListTagsForResource API operation for AWS MediaConnect. // -// Revokes an entitlement from a flow. Once an entitlement is revoked, the content -// becomes unavailable to the subscriber and the associated output is removed. +// List all tags on an AWS Elemental MediaConnect resource // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation RevokeFlowEntitlement for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Types: -// * BadRequestException -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// -// * InternalServerErrorException -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// -// * ForbiddenException -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -// // * NotFoundException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * ServiceUnavailableException +// * BadRequestException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * TooManyRequestsException +// * InternalServerErrorException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RevokeFlowEntitlement -func (c *MediaConnect) RevokeFlowEntitlement(input *RevokeFlowEntitlementInput) (*RevokeFlowEntitlementOutput, error) { - req, out := c.RevokeFlowEntitlementRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/ListTagsForResource +func (c *MediaConnect) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// RevokeFlowEntitlementWithContext is the same as RevokeFlowEntitlement with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See RevokeFlowEntitlement for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) RevokeFlowEntitlementWithContext(ctx aws.Context, input *RevokeFlowEntitlementInput, opts ...request.Option) (*RevokeFlowEntitlementOutput, error) { - req, out := c.RevokeFlowEntitlementRequest(input) +func (c *MediaConnect) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStartFlow = "StartFlow" +const opPurchaseOffering = "PurchaseOffering" -// StartFlowRequest generates a "aws/request.Request" representing the -// client's request for the StartFlow operation. The "output" return +// PurchaseOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseOffering operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartFlow for more information on using the StartFlow +// See PurchaseOffering for more information on using the PurchaseOffering // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartFlowRequest method. -// req, resp := client.StartFlowRequest(params) +// // Example sending a request using the PurchaseOfferingRequest method. +// req, resp := client.PurchaseOfferingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StartFlow -func (c *MediaConnect) StartFlowRequest(input *StartFlowInput) (req *request.Request, output *StartFlowOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/PurchaseOffering +func (c *MediaConnect) PurchaseOfferingRequest(input *PurchaseOfferingInput) (req *request.Request, output *PurchaseOfferingOutput) { op := &request.Operation{ - Name: opStartFlow, + Name: opPurchaseOffering, HTTPMethod: "POST", - HTTPPath: "/v1/flows/start/{flowArn}", + HTTPPath: "/v1/offerings/{offeringArn}", } if input == nil { - input = &StartFlowInput{} + input = &PurchaseOfferingInput{} } - output = &StartFlowOutput{} + output = &PurchaseOfferingOutput{} req = c.newRequest(op, input, output) return } -// StartFlow API operation for AWS MediaConnect. +// PurchaseOffering API operation for AWS MediaConnect. // -// Starts a flow. +// Submits a request to purchase an offering. If you already have an active +// reservation, you can't purchase another offering. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation StartFlow for usage and error information. +// API operation PurchaseOffering for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1257,80 +1771,84 @@ func (c *MediaConnect) StartFlowRequest(input *StartFlowInput) (req *request.Req // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StartFlow -func (c *MediaConnect) StartFlow(input *StartFlowInput) (*StartFlowOutput, error) { - req, out := c.StartFlowRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/PurchaseOffering +func (c *MediaConnect) PurchaseOffering(input *PurchaseOfferingInput) (*PurchaseOfferingOutput, error) { + req, out := c.PurchaseOfferingRequest(input) return out, req.Send() } -// StartFlowWithContext is the same as StartFlow with the addition of +// PurchaseOfferingWithContext is the same as PurchaseOffering with the addition of // the ability to pass a context and additional request options. // -// See StartFlow for details on how to use this API operation. +// See PurchaseOffering for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) StartFlowWithContext(ctx aws.Context, input *StartFlowInput, opts ...request.Option) (*StartFlowOutput, error) { - req, out := c.StartFlowRequest(input) +func (c *MediaConnect) PurchaseOfferingWithContext(ctx aws.Context, input *PurchaseOfferingInput, opts ...request.Option) (*PurchaseOfferingOutput, error) { + req, out := c.PurchaseOfferingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opStopFlow = "StopFlow" +const opRemoveFlowOutput = "RemoveFlowOutput" -// StopFlowRequest generates a "aws/request.Request" representing the -// client's request for the StopFlow operation. The "output" return +// RemoveFlowOutputRequest generates a "aws/request.Request" representing the +// client's request for the RemoveFlowOutput operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StopFlow for more information on using the StopFlow +// See RemoveFlowOutput for more information on using the RemoveFlowOutput // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StopFlowRequest method. -// req, resp := client.StopFlowRequest(params) +// // Example sending a request using the RemoveFlowOutputRequest method. +// req, resp := client.RemoveFlowOutputRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StopFlow -func (c *MediaConnect) StopFlowRequest(input *StopFlowInput) (req *request.Request, output *StopFlowOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowOutput +func (c *MediaConnect) RemoveFlowOutputRequest(input *RemoveFlowOutputInput) (req *request.Request, output *RemoveFlowOutputOutput) { op := &request.Operation{ - Name: opStopFlow, - HTTPMethod: "POST", - HTTPPath: "/v1/flows/stop/{flowArn}", + Name: opRemoveFlowOutput, + HTTPMethod: "DELETE", + HTTPPath: "/v1/flows/{flowArn}/outputs/{outputArn}", } if input == nil { - input = &StopFlowInput{} + input = &RemoveFlowOutputInput{} } - output = &StopFlowOutput{} + output = &RemoveFlowOutputOutput{} req = c.newRequest(op, input, output) return } -// StopFlow API operation for AWS MediaConnect. +// RemoveFlowOutput API operation for AWS MediaConnect. // -// Stops a flow. +// Removes an output from an existing flow. This request can be made only on +// an output that does not have an entitlement associated with it. If the output +// has an entitlement, you must revoke the entitlement instead. When an entitlement +// is revoked from a flow, the service automatically removes the associated +// output. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation StopFlow for usage and error information. +// API operation RemoveFlowOutput for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1363,269 +1881,297 @@ func (c *MediaConnect) StopFlowRequest(input *StopFlowInput) (req *request.Reque // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StopFlow -func (c *MediaConnect) StopFlow(input *StopFlowInput) (*StopFlowOutput, error) { - req, out := c.StopFlowRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowOutput +func (c *MediaConnect) RemoveFlowOutput(input *RemoveFlowOutputInput) (*RemoveFlowOutputOutput, error) { + req, out := c.RemoveFlowOutputRequest(input) return out, req.Send() } -// StopFlowWithContext is the same as StopFlow with the addition of +// RemoveFlowOutputWithContext is the same as RemoveFlowOutput with the addition of // the ability to pass a context and additional request options. // -// See StopFlow for details on how to use this API operation. +// See RemoveFlowOutput for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) StopFlowWithContext(ctx aws.Context, input *StopFlowInput, opts ...request.Option) (*StopFlowOutput, error) { - req, out := c.StopFlowRequest(input) +func (c *MediaConnect) RemoveFlowOutputWithContext(ctx aws.Context, input *RemoveFlowOutputInput, opts ...request.Option) (*RemoveFlowOutputOutput, error) { + req, out := c.RemoveFlowOutputRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opRemoveFlowSource = "RemoveFlowSource" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// RemoveFlowSourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveFlowSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See RemoveFlowSource for more information on using the RemoveFlowSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the RemoveFlowSourceRequest method. +// req, resp := client.RemoveFlowSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/TagResource -func (c *MediaConnect) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowSource +func (c *MediaConnect) RemoveFlowSourceRequest(input *RemoveFlowSourceInput) (req *request.Request, output *RemoveFlowSourceOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/tags/{resourceArn}", + Name: opRemoveFlowSource, + HTTPMethod: "DELETE", + HTTPPath: "/v1/flows/{flowArn}/source/{sourceArn}", } if input == nil { - input = &TagResourceInput{} + input = &RemoveFlowSourceInput{} } - output = &TagResourceOutput{} + output = &RemoveFlowSourceOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// TagResource API operation for AWS MediaConnect. +// RemoveFlowSource API operation for AWS MediaConnect. // -// Associates the specified tags to a resource with the specified resourceArn. -// If existing tags on a resource are not specified in the request parameters, -// they are not changed. When a resource is deleted, the tags associated with -// that resource are deleted as well. +// Removes a source from an existing flow. This request can be made only if +// there is more than one source on the flow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation TagResource for usage and error information. +// API operation RemoveFlowSource for usage and error information. // // Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// // * NotFoundException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * BadRequestException +// * ServiceUnavailableException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * InternalServerErrorException +// * TooManyRequestsException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/TagResource -func (c *MediaConnect) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowSource +func (c *MediaConnect) RemoveFlowSource(input *RemoveFlowSourceInput) (*RemoveFlowSourceOutput, error) { + req, out := c.RemoveFlowSourceRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// RemoveFlowSourceWithContext is the same as RemoveFlowSource with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See RemoveFlowSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *MediaConnect) RemoveFlowSourceWithContext(ctx aws.Context, input *RemoveFlowSourceInput, opts ...request.Option) (*RemoveFlowSourceOutput, error) { + req, out := c.RemoveFlowSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +const opRemoveFlowVpcInterface = "RemoveFlowVpcInterface" -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// RemoveFlowVpcInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveFlowVpcInterface operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See RemoveFlowVpcInterface for more information on using the RemoveFlowVpcInterface // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the RemoveFlowVpcInterfaceRequest method. +// req, resp := client.RemoveFlowVpcInterfaceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UntagResource -func (c *MediaConnect) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowVpcInterface +func (c *MediaConnect) RemoveFlowVpcInterfaceRequest(input *RemoveFlowVpcInterfaceInput) (req *request.Request, output *RemoveFlowVpcInterfaceOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opRemoveFlowVpcInterface, HTTPMethod: "DELETE", - HTTPPath: "/tags/{resourceArn}", + HTTPPath: "/v1/flows/{flowArn}/vpcInterfaces/{vpcInterfaceName}", } if input == nil { - input = &UntagResourceInput{} + input = &RemoveFlowVpcInterfaceInput{} } - output = &UntagResourceOutput{} + output = &RemoveFlowVpcInterfaceOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UntagResource API operation for AWS MediaConnect. +// RemoveFlowVpcInterface API operation for AWS MediaConnect. // -// Deletes specified tags from a resource. +// Removes a VPC Interface from an existing flow. This request can be made only +// on a VPC interface that does not have a Source or Output associated with +// it. If the VPC interface is referenced by a Source or Output, you must first +// delete or update the Source or Output to no longer reference the VPC interface. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation UntagResource for usage and error information. +// API operation RemoveFlowVpcInterface for usage and error information. // // Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// // * NotFoundException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * BadRequestException +// * ServiceUnavailableException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// * InternalServerErrorException +// * TooManyRequestsException // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UntagResource -func (c *MediaConnect) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RemoveFlowVpcInterface +func (c *MediaConnect) RemoveFlowVpcInterface(input *RemoveFlowVpcInterfaceInput) (*RemoveFlowVpcInterfaceOutput, error) { + req, out := c.RemoveFlowVpcInterfaceRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// RemoveFlowVpcInterfaceWithContext is the same as RemoveFlowVpcInterface with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See RemoveFlowVpcInterface for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *MediaConnect) RemoveFlowVpcInterfaceWithContext(ctx aws.Context, input *RemoveFlowVpcInterfaceInput, opts ...request.Option) (*RemoveFlowVpcInterfaceOutput, error) { + req, out := c.RemoveFlowVpcInterfaceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateFlowEntitlement = "UpdateFlowEntitlement" +const opRevokeFlowEntitlement = "RevokeFlowEntitlement" -// UpdateFlowEntitlementRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFlowEntitlement operation. The "output" return +// RevokeFlowEntitlementRequest generates a "aws/request.Request" representing the +// client's request for the RevokeFlowEntitlement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateFlowEntitlement for more information on using the UpdateFlowEntitlement +// See RevokeFlowEntitlement for more information on using the RevokeFlowEntitlement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateFlowEntitlementRequest method. -// req, resp := client.UpdateFlowEntitlementRequest(params) +// // Example sending a request using the RevokeFlowEntitlementRequest method. +// req, resp := client.RevokeFlowEntitlementRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowEntitlement -func (c *MediaConnect) UpdateFlowEntitlementRequest(input *UpdateFlowEntitlementInput) (req *request.Request, output *UpdateFlowEntitlementOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RevokeFlowEntitlement +func (c *MediaConnect) RevokeFlowEntitlementRequest(input *RevokeFlowEntitlementInput) (req *request.Request, output *RevokeFlowEntitlementOutput) { op := &request.Operation{ - Name: opUpdateFlowEntitlement, - HTTPMethod: "PUT", + Name: opRevokeFlowEntitlement, + HTTPMethod: "DELETE", HTTPPath: "/v1/flows/{flowArn}/entitlements/{entitlementArn}", } if input == nil { - input = &UpdateFlowEntitlementInput{} + input = &RevokeFlowEntitlementInput{} } - output = &UpdateFlowEntitlementOutput{} + output = &RevokeFlowEntitlementOutput{} req = c.newRequest(op, input, output) return } -// UpdateFlowEntitlement API operation for AWS MediaConnect. +// RevokeFlowEntitlement API operation for AWS MediaConnect. // -// You can change an entitlement's description, subscribers, and encryption. -// If you change the subscribers, the service will remove the outputs that are -// are used by the subscribers that are removed. +// Revokes an entitlement from a flow. Once an entitlement is revoked, the content +// becomes unavailable to the subscriber and the associated output is removed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation UpdateFlowEntitlement for usage and error information. +// API operation RevokeFlowEntitlement for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1658,80 +2204,80 @@ func (c *MediaConnect) UpdateFlowEntitlementRequest(input *UpdateFlowEntitlement // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowEntitlement -func (c *MediaConnect) UpdateFlowEntitlement(input *UpdateFlowEntitlementInput) (*UpdateFlowEntitlementOutput, error) { - req, out := c.UpdateFlowEntitlementRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/RevokeFlowEntitlement +func (c *MediaConnect) RevokeFlowEntitlement(input *RevokeFlowEntitlementInput) (*RevokeFlowEntitlementOutput, error) { + req, out := c.RevokeFlowEntitlementRequest(input) return out, req.Send() } -// UpdateFlowEntitlementWithContext is the same as UpdateFlowEntitlement with the addition of +// RevokeFlowEntitlementWithContext is the same as RevokeFlowEntitlement with the addition of // the ability to pass a context and additional request options. // -// See UpdateFlowEntitlement for details on how to use this API operation. +// See RevokeFlowEntitlement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) UpdateFlowEntitlementWithContext(ctx aws.Context, input *UpdateFlowEntitlementInput, opts ...request.Option) (*UpdateFlowEntitlementOutput, error) { - req, out := c.UpdateFlowEntitlementRequest(input) +func (c *MediaConnect) RevokeFlowEntitlementWithContext(ctx aws.Context, input *RevokeFlowEntitlementInput, opts ...request.Option) (*RevokeFlowEntitlementOutput, error) { + req, out := c.RevokeFlowEntitlementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateFlowOutput = "UpdateFlowOutput" +const opStartFlow = "StartFlow" -// UpdateFlowOutputRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFlowOutput operation. The "output" return +// StartFlowRequest generates a "aws/request.Request" representing the +// client's request for the StartFlow operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateFlowOutput for more information on using the UpdateFlowOutput +// See StartFlow for more information on using the StartFlow // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateFlowOutputRequest method. -// req, resp := client.UpdateFlowOutputRequest(params) +// // Example sending a request using the StartFlowRequest method. +// req, resp := client.StartFlowRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowOutput -func (c *MediaConnect) UpdateFlowOutputRequest(input *UpdateFlowOutputInput) (req *request.Request, output *UpdateFlowOutputOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StartFlow +func (c *MediaConnect) StartFlowRequest(input *StartFlowInput) (req *request.Request, output *StartFlowOutput) { op := &request.Operation{ - Name: opUpdateFlowOutput, - HTTPMethod: "PUT", - HTTPPath: "/v1/flows/{flowArn}/outputs/{outputArn}", + Name: opStartFlow, + HTTPMethod: "POST", + HTTPPath: "/v1/flows/start/{flowArn}", } if input == nil { - input = &UpdateFlowOutputInput{} + input = &StartFlowInput{} } - output = &UpdateFlowOutputOutput{} + output = &StartFlowOutput{} req = c.newRequest(op, input, output) return } -// UpdateFlowOutput API operation for AWS MediaConnect. +// StartFlow API operation for AWS MediaConnect. // -// Updates an existing flow output. +// Starts a flow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation UpdateFlowOutput for usage and error information. +// API operation StartFlow for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1764,80 +2310,80 @@ func (c *MediaConnect) UpdateFlowOutputRequest(input *UpdateFlowOutputInput) (re // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowOutput -func (c *MediaConnect) UpdateFlowOutput(input *UpdateFlowOutputInput) (*UpdateFlowOutputOutput, error) { - req, out := c.UpdateFlowOutputRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StartFlow +func (c *MediaConnect) StartFlow(input *StartFlowInput) (*StartFlowOutput, error) { + req, out := c.StartFlowRequest(input) return out, req.Send() } -// UpdateFlowOutputWithContext is the same as UpdateFlowOutput with the addition of +// StartFlowWithContext is the same as StartFlow with the addition of // the ability to pass a context and additional request options. // -// See UpdateFlowOutput for details on how to use this API operation. +// See StartFlow for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) UpdateFlowOutputWithContext(ctx aws.Context, input *UpdateFlowOutputInput, opts ...request.Option) (*UpdateFlowOutputOutput, error) { - req, out := c.UpdateFlowOutputRequest(input) +func (c *MediaConnect) StartFlowWithContext(ctx aws.Context, input *StartFlowInput, opts ...request.Option) (*StartFlowOutput, error) { + req, out := c.StartFlowRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateFlowSource = "UpdateFlowSource" +const opStopFlow = "StopFlow" -// UpdateFlowSourceRequest generates a "aws/request.Request" representing the -// client's request for the UpdateFlowSource operation. The "output" return +// StopFlowRequest generates a "aws/request.Request" representing the +// client's request for the StopFlow operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateFlowSource for more information on using the UpdateFlowSource +// See StopFlow for more information on using the StopFlow // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateFlowSourceRequest method. -// req, resp := client.UpdateFlowSourceRequest(params) +// // Example sending a request using the StopFlowRequest method. +// req, resp := client.StopFlowRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowSource -func (c *MediaConnect) UpdateFlowSourceRequest(input *UpdateFlowSourceInput) (req *request.Request, output *UpdateFlowSourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StopFlow +func (c *MediaConnect) StopFlowRequest(input *StopFlowInput) (req *request.Request, output *StopFlowOutput) { op := &request.Operation{ - Name: opUpdateFlowSource, - HTTPMethod: "PUT", - HTTPPath: "/v1/flows/{flowArn}/source/{sourceArn}", + Name: opStopFlow, + HTTPMethod: "POST", + HTTPPath: "/v1/flows/stop/{flowArn}", } if input == nil { - input = &UpdateFlowSourceInput{} + input = &StopFlowInput{} } - output = &UpdateFlowSourceOutput{} + output = &StopFlowOutput{} req = c.newRequest(op, input, output) return } -// UpdateFlowSource API operation for AWS MediaConnect. +// StopFlow API operation for AWS MediaConnect. // -// Updates the source of a flow. +// Stops a flow. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS MediaConnect's -// API operation UpdateFlowSource for usage and error information. +// API operation StopFlow for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1870,244 +2416,2200 @@ func (c *MediaConnect) UpdateFlowSourceRequest(input *UpdateFlowSourceInput) (re // documentation for the operation for more information on the cause of this // exception. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowSource -func (c *MediaConnect) UpdateFlowSource(input *UpdateFlowSourceInput) (*UpdateFlowSourceOutput, error) { - req, out := c.UpdateFlowSourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/StopFlow +func (c *MediaConnect) StopFlow(input *StopFlowInput) (*StopFlowOutput, error) { + req, out := c.StopFlowRequest(input) return out, req.Send() } -// UpdateFlowSourceWithContext is the same as UpdateFlowSource with the addition of +// StopFlowWithContext is the same as StopFlow with the addition of // the ability to pass a context and additional request options. // -// See UpdateFlowSource for details on how to use this API operation. +// See StopFlow for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaConnect) UpdateFlowSourceWithContext(ctx aws.Context, input *UpdateFlowSourceInput, opts ...request.Option) (*UpdateFlowSourceOutput, error) { - req, out := c.UpdateFlowSourceRequest(input) +func (c *MediaConnect) StopFlowWithContext(ctx aws.Context, input *StopFlowInput, opts ...request.Option) (*StopFlowOutput, error) { + req, out := c.StopFlowRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -type AddFlowOutputs420Exception struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation -func (s AddFlowOutputs420Exception) String() string { - return awsutil.Prettify(s) -} +const opTagResource = "TagResource" -// GoString returns the string representation +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/TagResource +func (c *MediaConnect) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS MediaConnect. +// +// Associates the specified tags to a resource with the specified resourceArn. +// If existing tags on a resource are not specified in the request parameters, +// they are not changed. When a resource is deleted, the tags associated with +// that resource are deleted as well. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/TagResource +func (c *MediaConnect) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UntagResource +func (c *MediaConnect) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{resourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS MediaConnect. +// +// Deletes specified tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UntagResource +func (c *MediaConnect) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFlow = "UpdateFlow" + +// UpdateFlowRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFlow operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFlow for more information on using the UpdateFlow +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFlowRequest method. +// req, resp := client.UpdateFlowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlow +func (c *MediaConnect) UpdateFlowRequest(input *UpdateFlowInput) (req *request.Request, output *UpdateFlowOutput) { + op := &request.Operation{ + Name: opUpdateFlow, + HTTPMethod: "PUT", + HTTPPath: "/v1/flows/{flowArn}", + } + + if input == nil { + input = &UpdateFlowInput{} + } + + output = &UpdateFlowOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFlow API operation for AWS MediaConnect. +// +// Updates flow +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation UpdateFlow for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlow +func (c *MediaConnect) UpdateFlow(input *UpdateFlowInput) (*UpdateFlowOutput, error) { + req, out := c.UpdateFlowRequest(input) + return out, req.Send() +} + +// UpdateFlowWithContext is the same as UpdateFlow with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFlow for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) UpdateFlowWithContext(ctx aws.Context, input *UpdateFlowInput, opts ...request.Option) (*UpdateFlowOutput, error) { + req, out := c.UpdateFlowRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFlowEntitlement = "UpdateFlowEntitlement" + +// UpdateFlowEntitlementRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFlowEntitlement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFlowEntitlement for more information on using the UpdateFlowEntitlement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFlowEntitlementRequest method. +// req, resp := client.UpdateFlowEntitlementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowEntitlement +func (c *MediaConnect) UpdateFlowEntitlementRequest(input *UpdateFlowEntitlementInput) (req *request.Request, output *UpdateFlowEntitlementOutput) { + op := &request.Operation{ + Name: opUpdateFlowEntitlement, + HTTPMethod: "PUT", + HTTPPath: "/v1/flows/{flowArn}/entitlements/{entitlementArn}", + } + + if input == nil { + input = &UpdateFlowEntitlementInput{} + } + + output = &UpdateFlowEntitlementOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFlowEntitlement API operation for AWS MediaConnect. +// +// You can change an entitlement's description, subscribers, and encryption. +// If you change the subscribers, the service will remove the outputs that are +// are used by the subscribers that are removed. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation UpdateFlowEntitlement for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowEntitlement +func (c *MediaConnect) UpdateFlowEntitlement(input *UpdateFlowEntitlementInput) (*UpdateFlowEntitlementOutput, error) { + req, out := c.UpdateFlowEntitlementRequest(input) + return out, req.Send() +} + +// UpdateFlowEntitlementWithContext is the same as UpdateFlowEntitlement with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFlowEntitlement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) UpdateFlowEntitlementWithContext(ctx aws.Context, input *UpdateFlowEntitlementInput, opts ...request.Option) (*UpdateFlowEntitlementOutput, error) { + req, out := c.UpdateFlowEntitlementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFlowOutput = "UpdateFlowOutput" + +// UpdateFlowOutputRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFlowOutput operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFlowOutput for more information on using the UpdateFlowOutput +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFlowOutputRequest method. +// req, resp := client.UpdateFlowOutputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowOutput +func (c *MediaConnect) UpdateFlowOutputRequest(input *UpdateFlowOutputInput) (req *request.Request, output *UpdateFlowOutputOutput) { + op := &request.Operation{ + Name: opUpdateFlowOutput, + HTTPMethod: "PUT", + HTTPPath: "/v1/flows/{flowArn}/outputs/{outputArn}", + } + + if input == nil { + input = &UpdateFlowOutputInput{} + } + + output = &UpdateFlowOutputOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFlowOutput API operation for AWS MediaConnect. +// +// Updates an existing flow output. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation UpdateFlowOutput for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowOutput +func (c *MediaConnect) UpdateFlowOutput(input *UpdateFlowOutputInput) (*UpdateFlowOutputOutput, error) { + req, out := c.UpdateFlowOutputRequest(input) + return out, req.Send() +} + +// UpdateFlowOutputWithContext is the same as UpdateFlowOutput with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFlowOutput for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) UpdateFlowOutputWithContext(ctx aws.Context, input *UpdateFlowOutputInput, opts ...request.Option) (*UpdateFlowOutputOutput, error) { + req, out := c.UpdateFlowOutputRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateFlowSource = "UpdateFlowSource" + +// UpdateFlowSourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFlowSource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateFlowSource for more information on using the UpdateFlowSource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateFlowSourceRequest method. +// req, resp := client.UpdateFlowSourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowSource +func (c *MediaConnect) UpdateFlowSourceRequest(input *UpdateFlowSourceInput) (req *request.Request, output *UpdateFlowSourceOutput) { + op := &request.Operation{ + Name: opUpdateFlowSource, + HTTPMethod: "PUT", + HTTPPath: "/v1/flows/{flowArn}/source/{sourceArn}", + } + + if input == nil { + input = &UpdateFlowSourceInput{} + } + + output = &UpdateFlowSourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateFlowSource API operation for AWS MediaConnect. +// +// Updates the source of a flow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS MediaConnect's +// API operation UpdateFlowSource for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * InternalServerErrorException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ForbiddenException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * NotFoundException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * ServiceUnavailableException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// * TooManyRequestsException +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediaconnect-2018-11-14/UpdateFlowSource +func (c *MediaConnect) UpdateFlowSource(input *UpdateFlowSourceInput) (*UpdateFlowSourceOutput, error) { + req, out := c.UpdateFlowSourceRequest(input) + return out, req.Send() +} + +// UpdateFlowSourceWithContext is the same as UpdateFlowSource with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateFlowSource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaConnect) UpdateFlowSourceWithContext(ctx aws.Context, input *UpdateFlowSourceInput, opts ...request.Option) (*UpdateFlowSourceOutput, error) { + req, out := c.UpdateFlowSourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +type AddFlowOutputs420Exception struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s AddFlowOutputs420Exception) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation func (s AddFlowOutputs420Exception) GoString() string { return s.String() } -func newErrorAddFlowOutputs420Exception(v protocol.ResponseMetadata) error { - return &AddFlowOutputs420Exception{ - respMetadata: v, +func newErrorAddFlowOutputs420Exception(v protocol.ResponseMetadata) error { + return &AddFlowOutputs420Exception{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AddFlowOutputs420Exception) Code() string { + return "AddFlowOutputs420Exception" +} + +// Message returns the exception's message. +func (s *AddFlowOutputs420Exception) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AddFlowOutputs420Exception) OrigErr() error { + return nil +} + +func (s *AddFlowOutputs420Exception) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AddFlowOutputs420Exception) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AddFlowOutputs420Exception) RequestID() string { + return s.RespMetadata.RequestID +} + +// Adds outputs to an existing flow. You can create up to 50 outputs per flow. +type AddFlowOutputsInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + + // A list of outputs that you want to add. + // + // Outputs is a required field + Outputs []*AddOutputRequest `locationName:"outputs" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddFlowOutputsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddFlowOutputsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddFlowOutputsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddFlowOutputsInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.Outputs == nil { + invalidParams.Add(request.NewErrParamRequired("Outputs")) + } + if s.Outputs != nil { + for i, v := range s.Outputs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFlowArn sets the FlowArn field's value. +func (s *AddFlowOutputsInput) SetFlowArn(v string) *AddFlowOutputsInput { + s.FlowArn = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *AddFlowOutputsInput) SetOutputs(v []*AddOutputRequest) *AddFlowOutputsInput { + s.Outputs = v + return s +} + +// The result of a successful AddOutput request. The response includes the details +// of the newly added outputs. +type AddFlowOutputsOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the flow that these outputs were added to. + FlowArn *string `locationName:"flowArn" type:"string"` + + // The details of the newly added outputs. + Outputs []*Output `locationName:"outputs" type:"list"` +} + +// String returns the string representation +func (s AddFlowOutputsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddFlowOutputsOutput) GoString() string { + return s.String() +} + +// SetFlowArn sets the FlowArn field's value. +func (s *AddFlowOutputsOutput) SetFlowArn(v string) *AddFlowOutputsOutput { + s.FlowArn = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *AddFlowOutputsOutput) SetOutputs(v []*Output) *AddFlowOutputsOutput { + s.Outputs = v + return s +} + +// Adds sources to an existing flow. +type AddFlowSourcesInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + + // A list of sources that you want to add. + // + // Sources is a required field + Sources []*SetSourceRequest `locationName:"sources" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddFlowSourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddFlowSourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddFlowSourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddFlowSourcesInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.Sources == nil { + invalidParams.Add(request.NewErrParamRequired("Sources")) + } + if s.Sources != nil { + for i, v := range s.Sources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sources", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFlowArn sets the FlowArn field's value. +func (s *AddFlowSourcesInput) SetFlowArn(v string) *AddFlowSourcesInput { + s.FlowArn = &v + return s +} + +// SetSources sets the Sources field's value. +func (s *AddFlowSourcesInput) SetSources(v []*SetSourceRequest) *AddFlowSourcesInput { + s.Sources = v + return s +} + +// The result of a successful AddFlowSources request. The response includes +// the details of the newly added sources. +type AddFlowSourcesOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the flow that these sources were added to. + FlowArn *string `locationName:"flowArn" type:"string"` + + // The details of the newly added sources. + Sources []*Source `locationName:"sources" type:"list"` +} + +// String returns the string representation +func (s AddFlowSourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddFlowSourcesOutput) GoString() string { + return s.String() +} + +// SetFlowArn sets the FlowArn field's value. +func (s *AddFlowSourcesOutput) SetFlowArn(v string) *AddFlowSourcesOutput { + s.FlowArn = &v + return s +} + +// SetSources sets the Sources field's value. +func (s *AddFlowSourcesOutput) SetSources(v []*Source) *AddFlowSourcesOutput { + s.Sources = v + return s +} + +// Adds VPC interfaces to an existing flow. +type AddFlowVpcInterfacesInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + + // A list of VPC interfaces that you want to add. + // + // VpcInterfaces is a required field + VpcInterfaces []*VpcInterfaceRequest `locationName:"vpcInterfaces" type:"list" required:"true"` +} + +// String returns the string representation +func (s AddFlowVpcInterfacesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddFlowVpcInterfacesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddFlowVpcInterfacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddFlowVpcInterfacesInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.VpcInterfaces == nil { + invalidParams.Add(request.NewErrParamRequired("VpcInterfaces")) + } + if s.VpcInterfaces != nil { + for i, v := range s.VpcInterfaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "VpcInterfaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFlowArn sets the FlowArn field's value. +func (s *AddFlowVpcInterfacesInput) SetFlowArn(v string) *AddFlowVpcInterfacesInput { + s.FlowArn = &v + return s +} + +// SetVpcInterfaces sets the VpcInterfaces field's value. +func (s *AddFlowVpcInterfacesInput) SetVpcInterfaces(v []*VpcInterfaceRequest) *AddFlowVpcInterfacesInput { + s.VpcInterfaces = v + return s +} + +// The result of a successful AddFlowVpcInterfaces request. The response includes +// the details of the newly added VPC interfaces. +type AddFlowVpcInterfacesOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the flow that these VPC interfaces were added to. + FlowArn *string `locationName:"flowArn" type:"string"` + + // The details of the newly added VPC interfaces. + VpcInterfaces []*VpcInterface `locationName:"vpcInterfaces" type:"list"` +} + +// String returns the string representation +func (s AddFlowVpcInterfacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddFlowVpcInterfacesOutput) GoString() string { + return s.String() +} + +// SetFlowArn sets the FlowArn field's value. +func (s *AddFlowVpcInterfacesOutput) SetFlowArn(v string) *AddFlowVpcInterfacesOutput { + s.FlowArn = &v + return s +} + +// SetVpcInterfaces sets the VpcInterfaces field's value. +func (s *AddFlowVpcInterfacesOutput) SetVpcInterfaces(v []*VpcInterface) *AddFlowVpcInterfacesOutput { + s.VpcInterfaces = v + return s +} + +// The output that you want to add to this flow. +type AddOutputRequest struct { + _ struct{} `type:"structure"` + + // The range of IP addresses that should be allowed to initiate output requests + // to this flow. These IP addresses should be in the form of a Classless Inter-Domain + // Routing (CIDR) block; for example, 10.0.0.0/16. + CidrAllowList []*string `locationName:"cidrAllowList" type:"list"` + + // A description of the output. This description appears only on the AWS Elemental + // MediaConnect console and will not be seen by the end user. + Description *string `locationName:"description" type:"string"` + + // The IP address from which video will be sent to output destinations. + Destination *string `locationName:"destination" type:"string"` + + // The type of key used for the encryption. If no keyType is provided, the service + // will use the default setting (static-key). + Encryption *Encryption `locationName:"encryption" type:"structure"` + + // The maximum latency in milliseconds for Zixi-based streams. + MaxLatency *int64 `locationName:"maxLatency" type:"integer"` + + // The name of the output. This value must be unique within the current flow. + Name *string `locationName:"name" type:"string"` + + // The port to use when content is distributed to this output. + Port *int64 `locationName:"port" type:"integer"` + + // The protocol to use for the output. + // + // Protocol is a required field + Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"Protocol"` + + // The remote ID for the Zixi-pull output stream. + RemoteId *string `locationName:"remoteId" type:"string"` + + // The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams. + SmoothingLatency *int64 `locationName:"smoothingLatency" type:"integer"` + + // The stream ID that you want to use for this transport. This parameter applies + // only to Zixi-based streams. + StreamId *string `locationName:"streamId" type:"string"` + + // The name of the VPC interface attachment to use for this output. + VpcInterfaceAttachment *VpcInterfaceAttachment `locationName:"vpcInterfaceAttachment" type:"structure"` +} + +// String returns the string representation +func (s AddOutputRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddOutputRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddOutputRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddOutputRequest"} + if s.Protocol == nil { + invalidParams.Add(request.NewErrParamRequired("Protocol")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCidrAllowList sets the CidrAllowList field's value. +func (s *AddOutputRequest) SetCidrAllowList(v []*string) *AddOutputRequest { + s.CidrAllowList = v + return s +} + +// SetDescription sets the Description field's value. +func (s *AddOutputRequest) SetDescription(v string) *AddOutputRequest { + s.Description = &v + return s +} + +// SetDestination sets the Destination field's value. +func (s *AddOutputRequest) SetDestination(v string) *AddOutputRequest { + s.Destination = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *AddOutputRequest) SetEncryption(v *Encryption) *AddOutputRequest { + s.Encryption = v + return s +} + +// SetMaxLatency sets the MaxLatency field's value. +func (s *AddOutputRequest) SetMaxLatency(v int64) *AddOutputRequest { + s.MaxLatency = &v + return s +} + +// SetName sets the Name field's value. +func (s *AddOutputRequest) SetName(v string) *AddOutputRequest { + s.Name = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AddOutputRequest) SetPort(v int64) *AddOutputRequest { + s.Port = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *AddOutputRequest) SetProtocol(v string) *AddOutputRequest { + s.Protocol = &v + return s +} + +// SetRemoteId sets the RemoteId field's value. +func (s *AddOutputRequest) SetRemoteId(v string) *AddOutputRequest { + s.RemoteId = &v + return s +} + +// SetSmoothingLatency sets the SmoothingLatency field's value. +func (s *AddOutputRequest) SetSmoothingLatency(v int64) *AddOutputRequest { + s.SmoothingLatency = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *AddOutputRequest) SetStreamId(v string) *AddOutputRequest { + s.StreamId = &v + return s +} + +// SetVpcInterfaceAttachment sets the VpcInterfaceAttachment field's value. +func (s *AddOutputRequest) SetVpcInterfaceAttachment(v *VpcInterfaceAttachment) *AddOutputRequest { + s.VpcInterfaceAttachment = v + return s +} + +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +type BadRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BadRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BadRequestException) GoString() string { + return s.String() +} + +func newErrorBadRequestException(v protocol.ResponseMetadata) error { + return &BadRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BadRequestException) Code() string { + return "BadRequestException" +} + +// Message returns the exception's message. +func (s *BadRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequestException) OrigErr() error { + return nil +} + +func (s *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +type CreateFlow420Exception struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s CreateFlow420Exception) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlow420Exception) GoString() string { + return s.String() +} + +func newErrorCreateFlow420Exception(v protocol.ResponseMetadata) error { + return &CreateFlow420Exception{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *CreateFlow420Exception) Code() string { + return "CreateFlow420Exception" +} + +// Message returns the exception's message. +func (s *CreateFlow420Exception) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *CreateFlow420Exception) OrigErr() error { + return nil +} + +func (s *CreateFlow420Exception) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *CreateFlow420Exception) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *CreateFlow420Exception) RequestID() string { + return s.RespMetadata.RequestID +} + +// Creates a new flow. The request must include one source. The request optionally +// can include outputs (up to 50) and one entitlement. +type CreateFlowInput struct { + _ struct{} `type:"structure"` + + // The Availability Zone that you want to create the flow in. These options + // are limited to the Availability Zones within the current AWS Region. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The entitlements that you want to grant on a flow. + Entitlements []*GrantEntitlementRequest `locationName:"entitlements" type:"list"` + + // The name of the flow. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The outputs that you want to add to this flow. + Outputs []*AddOutputRequest `locationName:"outputs" type:"list"` + + // The settings for the source of the flow. + Source *SetSourceRequest `locationName:"source" type:"structure"` + + // The settings for source failover + SourceFailoverConfig *FailoverConfig `locationName:"sourceFailoverConfig" type:"structure"` + + Sources []*SetSourceRequest `locationName:"sources" type:"list"` + + // The VPC interfaces you want on the flow. + VpcInterfaces []*VpcInterfaceRequest `locationName:"vpcInterfaces" type:"list"` +} + +// String returns the string representation +func (s CreateFlowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFlowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFlowInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Entitlements != nil { + for i, v := range s.Entitlements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entitlements", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Outputs != nil { + for i, v := range s.Outputs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } + } + if s.Sources != nil { + for i, v := range s.Sources { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Sources", i), err.(request.ErrInvalidParams)) + } + } + } + if s.VpcInterfaces != nil { + for i, v := range s.VpcInterfaces { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "VpcInterfaces", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *CreateFlowInput) SetAvailabilityZone(v string) *CreateFlowInput { + s.AvailabilityZone = &v + return s +} + +// SetEntitlements sets the Entitlements field's value. +func (s *CreateFlowInput) SetEntitlements(v []*GrantEntitlementRequest) *CreateFlowInput { + s.Entitlements = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateFlowInput) SetName(v string) *CreateFlowInput { + s.Name = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *CreateFlowInput) SetOutputs(v []*AddOutputRequest) *CreateFlowInput { + s.Outputs = v + return s +} + +// SetSource sets the Source field's value. +func (s *CreateFlowInput) SetSource(v *SetSourceRequest) *CreateFlowInput { + s.Source = v + return s +} + +// SetSourceFailoverConfig sets the SourceFailoverConfig field's value. +func (s *CreateFlowInput) SetSourceFailoverConfig(v *FailoverConfig) *CreateFlowInput { + s.SourceFailoverConfig = v + return s +} + +// SetSources sets the Sources field's value. +func (s *CreateFlowInput) SetSources(v []*SetSourceRequest) *CreateFlowInput { + s.Sources = v + return s +} + +// SetVpcInterfaces sets the VpcInterfaces field's value. +func (s *CreateFlowInput) SetVpcInterfaces(v []*VpcInterfaceRequest) *CreateFlowInput { + s.VpcInterfaces = v + return s +} + +// The result of a successful CreateFlow request. +type CreateFlowOutput struct { + _ struct{} `type:"structure"` + + // The settings for a flow, including its source, outputs, and entitlements. + Flow *Flow `locationName:"flow" type:"structure"` +} + +// String returns the string representation +func (s CreateFlowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFlowOutput) GoString() string { + return s.String() +} + +// SetFlow sets the Flow field's value. +func (s *CreateFlowOutput) SetFlow(v *Flow) *CreateFlowOutput { + s.Flow = v + return s +} + +type DeleteFlowInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteFlowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFlowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFlowInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFlowArn sets the FlowArn field's value. +func (s *DeleteFlowInput) SetFlowArn(v string) *DeleteFlowInput { + s.FlowArn = &v + return s +} + +// The result of a successful DeleteFlow request. +type DeleteFlowOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the flow that was deleted. + FlowArn *string `locationName:"flowArn" type:"string"` + + // The status of the flow when the DeleteFlow process begins. + Status *string `locationName:"status" type:"string" enum:"Status"` +} + +// String returns the string representation +func (s DeleteFlowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFlowOutput) GoString() string { + return s.String() +} + +// SetFlowArn sets the FlowArn field's value. +func (s *DeleteFlowOutput) SetFlowArn(v string) *DeleteFlowOutput { + s.FlowArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteFlowOutput) SetStatus(v string) *DeleteFlowOutput { + s.Status = &v + return s +} + +type DescribeFlowInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFlowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFlowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFlowInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFlowArn sets the FlowArn field's value. +func (s *DescribeFlowInput) SetFlowArn(v string) *DescribeFlowInput { + s.FlowArn = &v + return s +} + +// The result of a successful DescribeFlow request. +type DescribeFlowOutput struct { + _ struct{} `type:"structure"` + + // The settings for a flow, including its source, outputs, and entitlements. + Flow *Flow `locationName:"flow" type:"structure"` + + // Messages that provide the state of the flow. + Messages *Messages `locationName:"messages" type:"structure"` +} + +// String returns the string representation +func (s DescribeFlowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFlowOutput) GoString() string { + return s.String() +} + +// SetFlow sets the Flow field's value. +func (s *DescribeFlowOutput) SetFlow(v *Flow) *DescribeFlowOutput { + s.Flow = v + return s +} + +// SetMessages sets the Messages field's value. +func (s *DescribeFlowOutput) SetMessages(v *Messages) *DescribeFlowOutput { + s.Messages = v + return s +} + +type DescribeOfferingInput struct { + _ struct{} `type:"structure"` + + // OfferingArn is a required field + OfferingArn *string `location:"uri" locationName:"offeringArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeOfferingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOfferingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeOfferingInput"} + if s.OfferingArn == nil { + invalidParams.Add(request.NewErrParamRequired("OfferingArn")) + } + if s.OfferingArn != nil && len(*s.OfferingArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OfferingArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOfferingArn sets the OfferingArn field's value. +func (s *DescribeOfferingInput) SetOfferingArn(v string) *DescribeOfferingInput { + s.OfferingArn = &v + return s +} + +// The result of a successful DescribeOffering request. +type DescribeOfferingOutput struct { + _ struct{} `type:"structure"` + + // A savings plan that reserves a certain amount of outbound bandwidth usage + // at a discounted rate each month over a period of time. + Offering *Offering `locationName:"offering" type:"structure"` +} + +// String returns the string representation +func (s DescribeOfferingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeOfferingOutput) GoString() string { + return s.String() +} + +// SetOffering sets the Offering field's value. +func (s *DescribeOfferingOutput) SetOffering(v *Offering) *DescribeOfferingOutput { + s.Offering = v + return s +} + +type DescribeReservationInput struct { + _ struct{} `type:"structure"` + + // ReservationArn is a required field + ReservationArn *string `location:"uri" locationName:"reservationArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReservationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReservationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReservationInput"} + if s.ReservationArn == nil { + invalidParams.Add(request.NewErrParamRequired("ReservationArn")) + } + if s.ReservationArn != nil && len(*s.ReservationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReservationArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReservationArn sets the ReservationArn field's value. +func (s *DescribeReservationInput) SetReservationArn(v string) *DescribeReservationInput { + s.ReservationArn = &v + return s +} + +// The result of a successful DescribeReservation request. +type DescribeReservationOutput struct { + _ struct{} `type:"structure"` + + // A pricing agreement for a discounted rate for a specific outbound bandwidth + // that your MediaConnect account will use each month over a specific time period. + // The discounted rate in the reservation applies to outbound bandwidth for + // all flows from your account until your account reaches the amount of bandwidth + // in your reservation. If you use more outbound bandwidth than the agreed upon + // amount in a single month, the overage is charged at the on-demand rate. + Reservation *Reservation `locationName:"reservation" type:"structure"` +} + +// String returns the string representation +func (s DescribeReservationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReservationOutput) GoString() string { + return s.String() +} + +// SetReservation sets the Reservation field's value. +func (s *DescribeReservationOutput) SetReservation(v *Reservation) *DescribeReservationOutput { + s.Reservation = v + return s +} + +// Information about the encryption of the flow. +type Encryption struct { + _ struct{} `type:"structure"` + + // The type of algorithm that is used for the encryption (such as aes128, aes192, + // or aes256). + // + // Algorithm is a required field + Algorithm *string `locationName:"algorithm" type:"string" required:"true" enum:"Algorithm"` + + // A 128-bit, 16-byte hex value represented by a 32-character string, to be + // used with the key for encrypting content. This parameter is not valid for + // static key encryption. + ConstantInitializationVector *string `locationName:"constantInitializationVector" type:"string"` + + // The value of one of the devices that you configured with your digital rights + // management (DRM) platform key provider. This parameter is required for SPEKE + // encryption and is not valid for static key encryption. + DeviceId *string `locationName:"deviceId" type:"string"` + + // The type of key that is used for the encryption. If no keyType is provided, + // the service will use the default setting (static-key). + KeyType *string `locationName:"keyType" type:"string" enum:"KeyType"` + + // The AWS Region that the API Gateway proxy endpoint was created in. This parameter + // is required for SPEKE encryption and is not valid for static key encryption. + Region *string `locationName:"region" type:"string"` + + // An identifier for the content. The service sends this value to the key server + // to identify the current endpoint. The resource ID is also known as the content + // ID. This parameter is required for SPEKE encryption and is not valid for + // static key encryption. + ResourceId *string `locationName:"resourceId" type:"string"` + + // The ARN of the role that you created during setup (when you set up AWS Elemental + // MediaConnect as a trusted entity). + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // The ARN of the secret that you created in AWS Secrets Manager to store the + // encryption key. This parameter is required for static key encryption and + // is not valid for SPEKE encryption. + SecretArn *string `locationName:"secretArn" type:"string"` + + // The URL from the API Gateway proxy that you set up to talk to your key server. + // This parameter is required for SPEKE encryption and is not valid for static + // key encryption. + Url *string `locationName:"url" type:"string"` +} + +// String returns the string representation +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.Algorithm == nil { + invalidParams.Add(request.NewErrParamRequired("Algorithm")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil } -// Code returns the exception type name. -func (s AddFlowOutputs420Exception) Code() string { - return "AddFlowOutputs420Exception" +// SetAlgorithm sets the Algorithm field's value. +func (s *Encryption) SetAlgorithm(v string) *Encryption { + s.Algorithm = &v + return s } -// Message returns the exception's message. -func (s AddFlowOutputs420Exception) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetConstantInitializationVector sets the ConstantInitializationVector field's value. +func (s *Encryption) SetConstantInitializationVector(v string) *Encryption { + s.ConstantInitializationVector = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s AddFlowOutputs420Exception) OrigErr() error { - return nil +// SetDeviceId sets the DeviceId field's value. +func (s *Encryption) SetDeviceId(v string) *Encryption { + s.DeviceId = &v + return s } -func (s AddFlowOutputs420Exception) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetKeyType sets the KeyType field's value. +func (s *Encryption) SetKeyType(v string) *Encryption { + s.KeyType = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s AddFlowOutputs420Exception) StatusCode() int { - return s.respMetadata.StatusCode +// SetRegion sets the Region field's value. +func (s *Encryption) SetRegion(v string) *Encryption { + s.Region = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *Encryption) SetResourceId(v string) *Encryption { + s.ResourceId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *Encryption) SetRoleArn(v string) *Encryption { + s.RoleArn = &v + return s +} + +// SetSecretArn sets the SecretArn field's value. +func (s *Encryption) SetSecretArn(v string) *Encryption { + s.SecretArn = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *Encryption) SetUrl(v string) *Encryption { + s.Url = &v + return s +} + +// The settings for a flow entitlement. +type Entitlement struct { + _ struct{} `type:"structure"` + + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + + // A description of the entitlement. + Description *string `locationName:"description" type:"string"` + + // The type of encryption that will be used on the output that is associated + // with this entitlement. + Encryption *Encryption `locationName:"encryption" type:"structure"` + + // The ARN of the entitlement. + // + // EntitlementArn is a required field + EntitlementArn *string `locationName:"entitlementArn" type:"string" required:"true"` + + // An indication of whether the entitlement is enabled. + EntitlementStatus *string `locationName:"entitlementStatus" type:"string" enum:"EntitlementStatus"` + + // The name of the entitlement. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The AWS account IDs that you want to share your content with. The receiving + // accounts (subscribers) will be allowed to create their own flow using your + // content as the source. + // + // Subscribers is a required field + Subscribers []*string `locationName:"subscribers" type:"list" required:"true"` +} + +// String returns the string representation +func (s Entitlement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Entitlement) GoString() string { + return s.String() +} + +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *Entitlement) SetDataTransferSubscriberFeePercent(v int64) *Entitlement { + s.DataTransferSubscriberFeePercent = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Entitlement) SetDescription(v string) *Entitlement { + s.Description = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Entitlement) SetEncryption(v *Encryption) *Entitlement { + s.Encryption = v + return s +} + +// SetEntitlementArn sets the EntitlementArn field's value. +func (s *Entitlement) SetEntitlementArn(v string) *Entitlement { + s.EntitlementArn = &v + return s +} + +// SetEntitlementStatus sets the EntitlementStatus field's value. +func (s *Entitlement) SetEntitlementStatus(v string) *Entitlement { + s.EntitlementStatus = &v + return s +} + +// SetName sets the Name field's value. +func (s *Entitlement) SetName(v string) *Entitlement { + s.Name = &v + return s +} + +// SetSubscribers sets the Subscribers field's value. +func (s *Entitlement) SetSubscribers(v []*string) *Entitlement { + s.Subscribers = v + return s +} + +// The settings for source failover +type FailoverConfig struct { + _ struct{} `type:"structure"` + + // Search window time to look for dash-7 packets + RecoveryWindow *int64 `locationName:"recoveryWindow" type:"integer"` + + State *string `locationName:"state" type:"string" enum:"State"` +} + +// String returns the string representation +func (s FailoverConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailoverConfig) GoString() string { + return s.String() +} + +// SetRecoveryWindow sets the RecoveryWindow field's value. +func (s *FailoverConfig) SetRecoveryWindow(v int64) *FailoverConfig { + s.RecoveryWindow = &v + return s +} + +// SetState sets the State field's value. +func (s *FailoverConfig) SetState(v string) *FailoverConfig { + s.State = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s AddFlowOutputs420Exception) RequestID() string { - return s.respMetadata.RequestID -} +// The settings for a flow, including its source, outputs, and entitlements. +type Flow struct { + _ struct{} `type:"structure"` + + // The Availability Zone that you want to create the flow in. These options + // are limited to the Availability Zones within the current AWS. + // + // AvailabilityZone is a required field + AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` + + // A description of the flow. This value is not used or seen outside of the + // current AWS Elemental MediaConnect account. + Description *string `locationName:"description" type:"string"` + + // The IP address from which video will be sent to output destinations. + EgressIp *string `locationName:"egressIp" type:"string"` + + // The entitlements in this flow. + // + // Entitlements is a required field + Entitlements []*Entitlement `locationName:"entitlements" type:"list" required:"true"` + + // The Amazon Resource Name (ARN), a unique identifier for any AWS resource, + // of the flow. + // + // FlowArn is a required field + FlowArn *string `locationName:"flowArn" type:"string" required:"true"` + + // The name of the flow. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The outputs in this flow. + // + // Outputs is a required field + Outputs []*Output `locationName:"outputs" type:"list" required:"true"` + + // The settings for the source of the flow. + // + // Source is a required field + Source *Source `locationName:"source" type:"structure" required:"true"` -// Adds outputs to an existing flow. You can create up to 20 outputs per flow. -type AddFlowOutputsInput struct { - _ struct{} `type:"structure"` + // The settings for source failover + SourceFailoverConfig *FailoverConfig `locationName:"sourceFailoverConfig" type:"structure"` - // FlowArn is a required field - FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + Sources []*Source `locationName:"sources" type:"list"` - // A list of outputs that you want to add. + // The current status of the flow. // - // Outputs is a required field - Outputs []*AddOutputRequest `locationName:"outputs" type:"list" required:"true"` + // Status is a required field + Status *string `locationName:"status" type:"string" required:"true" enum:"Status"` + + // The VPC Interfaces for this flow. + VpcInterfaces []*VpcInterface `locationName:"vpcInterfaces" type:"list"` } // String returns the string representation -func (s AddFlowOutputsInput) String() string { +func (s Flow) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddFlowOutputsInput) GoString() string { +func (s Flow) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AddFlowOutputsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddFlowOutputsInput"} - if s.FlowArn == nil { - invalidParams.Add(request.NewErrParamRequired("FlowArn")) - } - if s.FlowArn != nil && len(*s.FlowArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) - } - if s.Outputs == nil { - invalidParams.Add(request.NewErrParamRequired("Outputs")) - } - if s.Outputs != nil { - for i, v := range s.Outputs { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams)) - } - } - } +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *Flow) SetAvailabilityZone(v string) *Flow { + s.AvailabilityZone = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDescription sets the Description field's value. +func (s *Flow) SetDescription(v string) *Flow { + s.Description = &v + return s +} + +// SetEgressIp sets the EgressIp field's value. +func (s *Flow) SetEgressIp(v string) *Flow { + s.EgressIp = &v + return s +} + +// SetEntitlements sets the Entitlements field's value. +func (s *Flow) SetEntitlements(v []*Entitlement) *Flow { + s.Entitlements = v + return s } // SetFlowArn sets the FlowArn field's value. -func (s *AddFlowOutputsInput) SetFlowArn(v string) *AddFlowOutputsInput { +func (s *Flow) SetFlowArn(v string) *Flow { s.FlowArn = &v return s } +// SetName sets the Name field's value. +func (s *Flow) SetName(v string) *Flow { + s.Name = &v + return s +} + // SetOutputs sets the Outputs field's value. -func (s *AddFlowOutputsInput) SetOutputs(v []*AddOutputRequest) *AddFlowOutputsInput { +func (s *Flow) SetOutputs(v []*Output) *Flow { s.Outputs = v return s } -// The result of a successful AddOutput request. The response includes the details -// of the newly added outputs. -type AddFlowOutputsOutput struct { - _ struct{} `type:"structure"` +// SetSource sets the Source field's value. +func (s *Flow) SetSource(v *Source) *Flow { + s.Source = v + return s +} - // The ARN of the flow that these outputs were added to. - FlowArn *string `locationName:"flowArn" type:"string"` +// SetSourceFailoverConfig sets the SourceFailoverConfig field's value. +func (s *Flow) SetSourceFailoverConfig(v *FailoverConfig) *Flow { + s.SourceFailoverConfig = v + return s +} - // The details of the newly added outputs. - Outputs []*Output `locationName:"outputs" type:"list"` +// SetSources sets the Sources field's value. +func (s *Flow) SetSources(v []*Source) *Flow { + s.Sources = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Flow) SetStatus(v string) *Flow { + s.Status = &v + return s +} + +// SetVpcInterfaces sets the VpcInterfaces field's value. +func (s *Flow) SetVpcInterfaces(v []*VpcInterface) *Flow { + s.VpcInterfaces = v + return s +} + +// Exception raised by AWS Elemental MediaConnect. See the error message and +// documentation for the operation for more information on the cause of this +// exception. +type ForbiddenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s AddFlowOutputsOutput) String() string { +func (s ForbiddenException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddFlowOutputsOutput) GoString() string { +func (s ForbiddenException) GoString() string { return s.String() } -// SetFlowArn sets the FlowArn field's value. -func (s *AddFlowOutputsOutput) SetFlowArn(v string) *AddFlowOutputsOutput { - s.FlowArn = &v - return s +func newErrorForbiddenException(v protocol.ResponseMetadata) error { + return &ForbiddenException{ + RespMetadata: v, + } } -// SetOutputs sets the Outputs field's value. -func (s *AddFlowOutputsOutput) SetOutputs(v []*Output) *AddFlowOutputsOutput { - s.Outputs = v - return s +// Code returns the exception type name. +func (s *ForbiddenException) Code() string { + return "ForbiddenException" } -// The output that you want to add to this flow. -type AddOutputRequest struct { - _ struct{} `type:"structure"` +// Message returns the exception's message. +func (s *ForbiddenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} - // The range of IP addresses that should be allowed to initiate output requests - // to this flow. These IP addresses should be in the form of a Classless Inter-Domain - // Routing (CIDR) block; for example, 10.0.0.0/16. - CidrAllowList []*string `locationName:"cidrAllowList" type:"list"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ForbiddenException) OrigErr() error { + return nil +} - // A description of the output. This description appears only on the AWS Elemental - // MediaConnect console and will not be seen by the end user. - Description *string `locationName:"description" type:"string"` +func (s *ForbiddenException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} - // The IP address from which video will be sent to output destinations. - Destination *string `locationName:"destination" type:"string"` +// Status code returns the HTTP status code for the request's response error. +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode +} - // The type of key used for the encryption. If no keyType is provided, the service - // will use the default setting (static-key). - Encryption *Encryption `locationName:"encryption" type:"structure"` +// RequestID returns the service's response RequestID for request. +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID +} - // The maximum latency in milliseconds for Zixi-based streams. - MaxLatency *int64 `locationName:"maxLatency" type:"integer"` +// The entitlements that you want to grant on a flow. +type GrantEntitlementRequest struct { + _ struct{} `type:"structure"` - // The name of the output. This value must be unique within the current flow. - Name *string `locationName:"name" type:"string"` + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` - // The port to use when content is distributed to this output. - Port *int64 `locationName:"port" type:"integer"` + // A description of the entitlement. This description appears only on the AWS + // Elemental MediaConnect console and will not be seen by the subscriber or + // end user. + Description *string `locationName:"description" type:"string"` - // The protocol to use for the output. - // - // Protocol is a required field - Protocol *string `locationName:"protocol" type:"string" required:"true" enum:"Protocol"` + // The type of encryption that will be used on the output that is associated + // with this entitlement. + Encryption *Encryption `locationName:"encryption" type:"structure"` - // The remote ID for the Zixi-pull output stream. - RemoteId *string `locationName:"remoteId" type:"string"` + // An indication of whether the new entitlement should be enabled or disabled + // as soon as it is created. If you don’t specify the entitlementStatus field + // in your request, MediaConnect sets it to ENABLED. + EntitlementStatus *string `locationName:"entitlementStatus" type:"string" enum:"EntitlementStatus"` - // The smoothing latency in milliseconds for RIST, RTP, and RTP-FEC streams. - SmoothingLatency *int64 `locationName:"smoothingLatency" type:"integer"` + // The name of the entitlement. This value must be unique within the current + // flow. + Name *string `locationName:"name" type:"string"` - // The stream ID that you want to use for this transport. This parameter applies - // only to Zixi-based streams. - StreamId *string `locationName:"streamId" type:"string"` + // The AWS account IDs that you want to share your content with. The receiving + // accounts (subscribers) will be allowed to create their own flows using your + // content as the source. + // + // Subscribers is a required field + Subscribers []*string `locationName:"subscribers" type:"list" required:"true"` } // String returns the string representation -func (s AddOutputRequest) String() string { +func (s GrantEntitlementRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AddOutputRequest) GoString() string { +func (s GrantEntitlementRequest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AddOutputRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AddOutputRequest"} - if s.Protocol == nil { - invalidParams.Add(request.NewErrParamRequired("Protocol")) +func (s *GrantEntitlementRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrantEntitlementRequest"} + if s.Subscribers == nil { + invalidParams.Add(request.NewErrParamRequired("Subscribers")) } if s.Encryption != nil { if err := s.Encryption.Validate(); err != nil { @@ -2121,105 +4623,75 @@ func (s *AddOutputRequest) Validate() error { return nil } -// SetCidrAllowList sets the CidrAllowList field's value. -func (s *AddOutputRequest) SetCidrAllowList(v []*string) *AddOutputRequest { - s.CidrAllowList = v +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *GrantEntitlementRequest) SetDataTransferSubscriberFeePercent(v int64) *GrantEntitlementRequest { + s.DataTransferSubscriberFeePercent = &v return s } // SetDescription sets the Description field's value. -func (s *AddOutputRequest) SetDescription(v string) *AddOutputRequest { +func (s *GrantEntitlementRequest) SetDescription(v string) *GrantEntitlementRequest { s.Description = &v return s } -// SetDestination sets the Destination field's value. -func (s *AddOutputRequest) SetDestination(v string) *AddOutputRequest { - s.Destination = &v - return s -} - // SetEncryption sets the Encryption field's value. -func (s *AddOutputRequest) SetEncryption(v *Encryption) *AddOutputRequest { +func (s *GrantEntitlementRequest) SetEncryption(v *Encryption) *GrantEntitlementRequest { s.Encryption = v return s } -// SetMaxLatency sets the MaxLatency field's value. -func (s *AddOutputRequest) SetMaxLatency(v int64) *AddOutputRequest { - s.MaxLatency = &v +// SetEntitlementStatus sets the EntitlementStatus field's value. +func (s *GrantEntitlementRequest) SetEntitlementStatus(v string) *GrantEntitlementRequest { + s.EntitlementStatus = &v return s } // SetName sets the Name field's value. -func (s *AddOutputRequest) SetName(v string) *AddOutputRequest { +func (s *GrantEntitlementRequest) SetName(v string) *GrantEntitlementRequest { s.Name = &v return s } -// SetPort sets the Port field's value. -func (s *AddOutputRequest) SetPort(v int64) *AddOutputRequest { - s.Port = &v - return s -} - -// SetProtocol sets the Protocol field's value. -func (s *AddOutputRequest) SetProtocol(v string) *AddOutputRequest { - s.Protocol = &v - return s -} - -// SetRemoteId sets the RemoteId field's value. -func (s *AddOutputRequest) SetRemoteId(v string) *AddOutputRequest { - s.RemoteId = &v - return s -} - -// SetSmoothingLatency sets the SmoothingLatency field's value. -func (s *AddOutputRequest) SetSmoothingLatency(v int64) *AddOutputRequest { - s.SmoothingLatency = &v - return s -} - -// SetStreamId sets the StreamId field's value. -func (s *AddOutputRequest) SetStreamId(v string) *AddOutputRequest { - s.StreamId = &v +// SetSubscribers sets the Subscribers field's value. +func (s *GrantEntitlementRequest) SetSubscribers(v []*string) *GrantEntitlementRequest { + s.Subscribers = v return s } // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. -type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type GrantFlowEntitlements420Exception struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s BadRequestException) String() string { +func (s GrantFlowEntitlements420Exception) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BadRequestException) GoString() string { +func (s GrantFlowEntitlements420Exception) GoString() string { return s.String() } -func newErrorBadRequestException(v protocol.ResponseMetadata) error { - return &BadRequestException{ - respMetadata: v, +func newErrorGrantFlowEntitlements420Exception(v protocol.ResponseMetadata) error { + return &GrantFlowEntitlements420Exception{ + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { - return "BadRequestException" +func (s *GrantFlowEntitlements420Exception) Code() string { + return "GrantFlowEntitlements420Exception" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *GrantFlowEntitlements420Exception) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2227,57 +4699,154 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *GrantFlowEntitlements420Exception) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *GrantFlowEntitlements420Exception) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GrantFlowEntitlements420Exception) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *GrantFlowEntitlements420Exception) RequestID() string { + return s.RespMetadata.RequestID +} + +// Grants an entitlement on a flow. +type GrantFlowEntitlementsInput struct { + _ struct{} `type:"structure"` + + // The list of entitlements that you want to grant. + // + // Entitlements is a required field + Entitlements []*GrantEntitlementRequest `locationName:"entitlements" type:"list" required:"true"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s GrantFlowEntitlementsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantFlowEntitlementsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrantFlowEntitlementsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrantFlowEntitlementsInput"} + if s.Entitlements == nil { + invalidParams.Add(request.NewErrParamRequired("Entitlements")) + } + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.Entitlements != nil { + for i, v := range s.Entitlements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entitlements", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntitlements sets the Entitlements field's value. +func (s *GrantFlowEntitlementsInput) SetEntitlements(v []*GrantEntitlementRequest) *GrantFlowEntitlementsInput { + s.Entitlements = v + return s +} + +// SetFlowArn sets the FlowArn field's value. +func (s *GrantFlowEntitlementsInput) SetFlowArn(v string) *GrantFlowEntitlementsInput { + s.FlowArn = &v + return s +} + +// The entitlements that were just granted. +type GrantFlowEntitlementsOutput struct { + _ struct{} `type:"structure"` + + // The entitlements that were just granted. + Entitlements []*Entitlement `locationName:"entitlements" type:"list"` + + // The ARN of the flow that these entitlements were granted to. + FlowArn *string `locationName:"flowArn" type:"string"` +} + +// String returns the string representation +func (s GrantFlowEntitlementsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrantFlowEntitlementsOutput) GoString() string { + return s.String() +} + +// SetEntitlements sets the Entitlements field's value. +func (s *GrantFlowEntitlementsOutput) SetEntitlements(v []*Entitlement) *GrantFlowEntitlementsOutput { + s.Entitlements = v + return s +} + +// SetFlowArn sets the FlowArn field's value. +func (s *GrantFlowEntitlementsOutput) SetFlowArn(v string) *GrantFlowEntitlementsOutput { + s.FlowArn = &v + return s } // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. -type CreateFlow420Exception struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type InternalServerErrorException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s CreateFlow420Exception) String() string { +func (s InternalServerErrorException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateFlow420Exception) GoString() string { +func (s InternalServerErrorException) GoString() string { return s.String() } -func newErrorCreateFlow420Exception(v protocol.ResponseMetadata) error { - return &CreateFlow420Exception{ - respMetadata: v, +func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { + return &InternalServerErrorException{ + RespMetadata: v, } } // Code returns the exception type name. -func (s CreateFlow420Exception) Code() string { - return "CreateFlow420Exception" +func (s *InternalServerErrorException) Code() string { + return "InternalServerErrorException" } // Message returns the exception's message. -func (s CreateFlow420Exception) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2285,93 +4854,47 @@ func (s CreateFlow420Exception) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CreateFlow420Exception) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s CreateFlow420Exception) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CreateFlow420Exception) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CreateFlow420Exception) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } -// Creates a new flow. The request must include one source. The request optionally -// can include outputs (up to 20) and one entitlement. -type CreateFlowInput struct { +type ListEntitlementsInput struct { _ struct{} `type:"structure"` - // The Availability Zone that you want to create the flow in. These options - // are limited to the Availability Zones within the current AWS Region. - AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - - // The entitlements that you want to grant on a flow. - Entitlements []*GrantEntitlementRequest `locationName:"entitlements" type:"list"` - - // The name of the flow. - // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` - - // The outputs that you want to add to this flow. - Outputs []*AddOutputRequest `locationName:"outputs" type:"list"` + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // The settings for the source of the flow. - // - // Source is a required field - Source *SetSourceRequest `locationName:"source" type:"structure" required:"true"` + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s CreateFlowInput) String() string { +func (s ListEntitlementsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateFlowInput) GoString() string { +func (s ListEntitlementsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateFlowInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateFlowInput"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) - } - if s.Entitlements != nil { - for i, v := range s.Entitlements { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entitlements", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Outputs != nil { - for i, v := range s.Outputs { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Outputs", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } +func (s *ListEntitlementsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEntitlementsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -2380,85 +4903,161 @@ func (s *CreateFlowInput) Validate() error { return nil } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *CreateFlowInput) SetAvailabilityZone(v string) *CreateFlowInput { - s.AvailabilityZone = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListEntitlementsInput) SetMaxResults(v int64) *ListEntitlementsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListEntitlementsInput) SetNextToken(v string) *ListEntitlementsInput { + s.NextToken = &v return s } +// The result of a successful ListEntitlements request. The response includes +// the ARN of each entitlement, the name of the associated flow, and the NextToken +// to use in a subsequent ListEntitlements request. +type ListEntitlementsOutput struct { + _ struct{} `type:"structure"` + + // A list of entitlements that have been granted to you from other AWS accounts. + Entitlements []*ListedEntitlement `locationName:"entitlements" type:"list"` + + // The token that identifies which batch of results that you want to see. For + // example, you submit a ListEntitlements request with MaxResults set at 5. + // The service returns the first batch of results (up to 5) and a NextToken + // value. To see the next batch of results, you can submit the ListEntitlements + // request a second time and specify the NextToken value. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListEntitlementsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitlementsOutput) GoString() string { + return s.String() +} + // SetEntitlements sets the Entitlements field's value. -func (s *CreateFlowInput) SetEntitlements(v []*GrantEntitlementRequest) *CreateFlowInput { +func (s *ListEntitlementsOutput) SetEntitlements(v []*ListedEntitlement) *ListEntitlementsOutput { s.Entitlements = v return s } -// SetName sets the Name field's value. -func (s *CreateFlowInput) SetName(v string) *CreateFlowInput { - s.Name = &v - return s +// SetNextToken sets the NextToken field's value. +func (s *ListEntitlementsOutput) SetNextToken(v string) *ListEntitlementsOutput { + s.NextToken = &v + return s +} + +type ListFlowsInput struct { + _ struct{} `type:"structure"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListFlowsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFlowsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFlowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFlowsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetOutputs sets the Outputs field's value. -func (s *CreateFlowInput) SetOutputs(v []*AddOutputRequest) *CreateFlowInput { - s.Outputs = v +// SetMaxResults sets the MaxResults field's value. +func (s *ListFlowsInput) SetMaxResults(v int64) *ListFlowsInput { + s.MaxResults = &v return s } -// SetSource sets the Source field's value. -func (s *CreateFlowInput) SetSource(v *SetSourceRequest) *CreateFlowInput { - s.Source = v +// SetNextToken sets the NextToken field's value. +func (s *ListFlowsInput) SetNextToken(v string) *ListFlowsInput { + s.NextToken = &v return s } -// The result of a successful CreateFlow request. -type CreateFlowOutput struct { +// The result of a successful ListFlows request. The response includes flow +// summaries and the NextToken to use in a subsequent ListFlows request. +type ListFlowsOutput struct { _ struct{} `type:"structure"` - // The settings for a flow, including its source, outputs, and entitlements. - Flow *Flow `locationName:"flow" type:"structure"` + // A list of flow summaries. + Flows []*ListedFlow `locationName:"flows" type:"list"` + + // The token that identifies which batch of results that you want to see. For + // example, you submit a ListFlows request with MaxResults set at 5. The service + // returns the first batch of results (up to 5) and a NextToken value. To see + // the next batch of results, you can submit the ListFlows request a second + // time and specify the NextToken value. + NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s CreateFlowOutput) String() string { +func (s ListFlowsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateFlowOutput) GoString() string { +func (s ListFlowsOutput) GoString() string { return s.String() } -// SetFlow sets the Flow field's value. -func (s *CreateFlowOutput) SetFlow(v *Flow) *CreateFlowOutput { - s.Flow = v +// SetFlows sets the Flows field's value. +func (s *ListFlowsOutput) SetFlows(v []*ListedFlow) *ListFlowsOutput { + s.Flows = v return s } -type DeleteFlowInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListFlowsOutput) SetNextToken(v string) *ListFlowsOutput { + s.NextToken = &v + return s +} + +type ListOfferingsInput struct { _ struct{} `type:"structure"` - // FlowArn is a required field - FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s DeleteFlowInput) String() string { +func (s ListOfferingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFlowInput) GoString() string { +func (s ListOfferingsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteFlowInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteFlowInput"} - if s.FlowArn == nil { - invalidParams.Add(request.NewErrParamRequired("FlowArn")) - } - if s.FlowArn != nil && len(*s.FlowArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) +func (s *ListOfferingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListOfferingsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -2467,70 +5066,83 @@ func (s *DeleteFlowInput) Validate() error { return nil } -// SetFlowArn sets the FlowArn field's value. -func (s *DeleteFlowInput) SetFlowArn(v string) *DeleteFlowInput { - s.FlowArn = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListOfferingsInput) SetMaxResults(v int64) *ListOfferingsInput { + s.MaxResults = &v return s } -// The result of a successful DeleteFlow request. -type DeleteFlowOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListOfferingsInput) SetNextToken(v string) *ListOfferingsInput { + s.NextToken = &v + return s +} + +// The result of a successful ListOfferings request. The response includes the +// details of each offering that your account is eligible for. The response +// includes the following information for each offering: description, duration, +// outbound bandwidth, price, Amazon Resource Name (ARN), and the NextToken +// to use in a subsequent ListOfferings request. +type ListOfferingsOutput struct { _ struct{} `type:"structure"` - // The ARN of the flow that was deleted. - FlowArn *string `locationName:"flowArn" type:"string"` + // The token that identifies which batch of results that you want to see. For + // example, you submit a ListOfferings request with MaxResults set at 5. The + // service returns the first batch of results (up to 5) and a NextToken value. + // To see the next batch of results, you can submit the ListOfferings request + // a second time and specify the NextToken value. + NextToken *string `locationName:"nextToken" type:"string"` - // The status of the flow when the DeleteFlow process begins. - Status *string `locationName:"status" type:"string" enum:"Status"` + // A list of offerings that are available to this account in the current AWS + // Region. + Offerings []*Offering `locationName:"offerings" type:"list"` } // String returns the string representation -func (s DeleteFlowOutput) String() string { +func (s ListOfferingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteFlowOutput) GoString() string { +func (s ListOfferingsOutput) GoString() string { return s.String() } -// SetFlowArn sets the FlowArn field's value. -func (s *DeleteFlowOutput) SetFlowArn(v string) *DeleteFlowOutput { - s.FlowArn = &v +// SetNextToken sets the NextToken field's value. +func (s *ListOfferingsOutput) SetNextToken(v string) *ListOfferingsOutput { + s.NextToken = &v return s } -// SetStatus sets the Status field's value. -func (s *DeleteFlowOutput) SetStatus(v string) *DeleteFlowOutput { - s.Status = &v +// SetOfferings sets the Offerings field's value. +func (s *ListOfferingsOutput) SetOfferings(v []*Offering) *ListOfferingsOutput { + s.Offerings = v return s } -type DescribeFlowInput struct { +type ListReservationsInput struct { _ struct{} `type:"structure"` - // FlowArn is a required field - FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } // String returns the string representation -func (s DescribeFlowInput) String() string { +func (s ListReservationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeFlowInput) GoString() string { +func (s ListReservationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeFlowInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeFlowInput"} - if s.FlowArn == nil { - invalidParams.Add(request.NewErrParamRequired("FlowArn")) - } - if s.FlowArn != nil && len(*s.FlowArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) +func (s *ListReservationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListReservationsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -2539,114 +5151,85 @@ func (s *DescribeFlowInput) Validate() error { return nil } -// SetFlowArn sets the FlowArn field's value. -func (s *DescribeFlowInput) SetFlowArn(v string) *DescribeFlowInput { - s.FlowArn = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListReservationsInput) SetMaxResults(v int64) *ListReservationsInput { + s.MaxResults = &v return s } -// The result of a successful DescribeFlow request. -type DescribeFlowOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListReservationsInput) SetNextToken(v string) *ListReservationsInput { + s.NextToken = &v + return s +} + +// The result of a successful ListReservations request. The response includes +// the details of each offering that your account is eligible for. The response +// includes the following information for each offering: description, duration, +// outbound bandwidth, price, Amazon Resource Name (ARN), and the NextToken +// to use in a subsequent ListOfferings request. +type ListReservationsOutput struct { _ struct{} `type:"structure"` - // The settings for a flow, including its source, outputs, and entitlements. - Flow *Flow `locationName:"flow" type:"structure"` + // The token that identifies which batch of results that you want to see. For + // example, you submit a ListReservations request with MaxResults set at 5. + // The service returns the first batch of results (up to 5) and a NextToken + // value. To see the next batch of results, you can submit the ListReservations + // request a second time and specify the NextToken value. + NextToken *string `locationName:"nextToken" type:"string"` - // Messages that provide the state of the flow. - Messages *Messages `locationName:"messages" type:"structure"` + // A list of all reservations that have been purchased by this account in the + // current AWS Region. + Reservations []*Reservation `locationName:"reservations" type:"list"` } // String returns the string representation -func (s DescribeFlowOutput) String() string { +func (s ListReservationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeFlowOutput) GoString() string { +func (s ListReservationsOutput) GoString() string { return s.String() } -// SetFlow sets the Flow field's value. -func (s *DescribeFlowOutput) SetFlow(v *Flow) *DescribeFlowOutput { - s.Flow = v +// SetNextToken sets the NextToken field's value. +func (s *ListReservationsOutput) SetNextToken(v string) *ListReservationsOutput { + s.NextToken = &v return s } -// SetMessages sets the Messages field's value. -func (s *DescribeFlowOutput) SetMessages(v *Messages) *DescribeFlowOutput { - s.Messages = v +// SetReservations sets the Reservations field's value. +func (s *ListReservationsOutput) SetReservations(v []*Reservation) *ListReservationsOutput { + s.Reservations = v return s } -// Information about the encryption of the flow. -type Encryption struct { +type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // The type of algorithm that is used for the encryption (such as aes128, aes192, - // or aes256). - // - // Algorithm is a required field - Algorithm *string `locationName:"algorithm" type:"string" required:"true" enum:"Algorithm"` - - // A 128-bit, 16-byte hex value represented by a 32-character string, to be - // used with the key for encrypting content. This parameter is not valid for - // static key encryption. - ConstantInitializationVector *string `locationName:"constantInitializationVector" type:"string"` - - // The value of one of the devices that you configured with your digital rights - // management (DRM) platform key provider. This parameter is required for SPEKE - // encryption and is not valid for static key encryption. - DeviceId *string `locationName:"deviceId" type:"string"` - - // The type of key that is used for the encryption. If no keyType is provided, - // the service will use the default setting (static-key). - KeyType *string `locationName:"keyType" type:"string" enum:"KeyType"` - - // The AWS Region that the API Gateway proxy endpoint was created in. This parameter - // is required for SPEKE encryption and is not valid for static key encryption. - Region *string `locationName:"region" type:"string"` - - // An identifier for the content. The service sends this value to the key server - // to identify the current endpoint. The resource ID is also known as the content - // ID. This parameter is required for SPEKE encryption and is not valid for - // static key encryption. - ResourceId *string `locationName:"resourceId" type:"string"` - - // The ARN of the role that you created during setup (when you set up AWS Elemental - // MediaConnect as a trusted entity). - // - // RoleArn is a required field - RoleArn *string `locationName:"roleArn" type:"string" required:"true"` - - // The ARN of the secret that you created in AWS Secrets Manager to store the - // encryption key. This parameter is required for static key encryption and - // is not valid for SPEKE encryption. - SecretArn *string `locationName:"secretArn" type:"string"` - - // The URL from the API Gateway proxy that you set up to talk to your key server. - // This parameter is required for SPEKE encryption and is not valid for static - // key encryption. - Url *string `locationName:"url" type:"string"` + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` } // String returns the string representation -func (s Encryption) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Encryption) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Encryption) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Encryption"} - if s.Algorithm == nil { - invalidParams.Add(request.NewErrParamRequired("Algorithm")) +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) } if invalidParams.Len() > 0 { @@ -2655,74 +5238,44 @@ func (s *Encryption) Validate() error { return nil } -// SetAlgorithm sets the Algorithm field's value. -func (s *Encryption) SetAlgorithm(v string) *Encryption { - s.Algorithm = &v - return s -} - -// SetConstantInitializationVector sets the ConstantInitializationVector field's value. -func (s *Encryption) SetConstantInitializationVector(v string) *Encryption { - s.ConstantInitializationVector = &v - return s -} - -// SetDeviceId sets the DeviceId field's value. -func (s *Encryption) SetDeviceId(v string) *Encryption { - s.DeviceId = &v - return s -} - -// SetKeyType sets the KeyType field's value. -func (s *Encryption) SetKeyType(v string) *Encryption { - s.KeyType = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v return s } -// SetRegion sets the Region field's value. -func (s *Encryption) SetRegion(v string) *Encryption { - s.Region = &v - return s -} +// The tags for the resource. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` -// SetResourceId sets the ResourceId field's value. -func (s *Encryption) SetResourceId(v string) *Encryption { - s.ResourceId = &v - return s + // A map from tag keys to values. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + Tags map[string]*string `locationName:"tags" type:"map"` } -// SetRoleArn sets the RoleArn field's value. -func (s *Encryption) SetRoleArn(v string) *Encryption { - s.RoleArn = &v - return s +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) } -// SetSecretArn sets the SecretArn field's value. -func (s *Encryption) SetSecretArn(v string) *Encryption { - s.SecretArn = &v - return s +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() } -// SetUrl sets the Url field's value. -func (s *Encryption) SetUrl(v string) *Encryption { - s.Url = &v +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v return s } -// The settings for a flow entitlement. -type Entitlement struct { +// An entitlement that has been granted to you from other AWS accounts. +type ListedEntitlement struct { _ struct{} `type:"structure"` // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` - // A description of the entitlement. - Description *string `locationName:"description" type:"string"` - - // The type of encryption that will be used on the output that is associated - // with this entitlement. - Encryption *Encryption `locationName:"encryption" type:"structure"` - // The ARN of the entitlement. // // EntitlementArn is a required field @@ -2730,87 +5283,54 @@ type Entitlement struct { // The name of the entitlement. // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` - - // The AWS account IDs that you want to share your content with. The receiving - // accounts (subscribers) will be allowed to create their own flow using your - // content as the source. - // - // Subscribers is a required field - Subscribers []*string `locationName:"subscribers" type:"list" required:"true"` + // EntitlementName is a required field + EntitlementName *string `locationName:"entitlementName" type:"string" required:"true"` } // String returns the string representation -func (s Entitlement) String() string { +func (s ListedEntitlement) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Entitlement) GoString() string { +func (s ListedEntitlement) GoString() string { return s.String() } // SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. -func (s *Entitlement) SetDataTransferSubscriberFeePercent(v int64) *Entitlement { +func (s *ListedEntitlement) SetDataTransferSubscriberFeePercent(v int64) *ListedEntitlement { s.DataTransferSubscriberFeePercent = &v return s } -// SetDescription sets the Description field's value. -func (s *Entitlement) SetDescription(v string) *Entitlement { - s.Description = &v - return s -} - -// SetEncryption sets the Encryption field's value. -func (s *Entitlement) SetEncryption(v *Encryption) *Entitlement { - s.Encryption = v - return s -} - // SetEntitlementArn sets the EntitlementArn field's value. -func (s *Entitlement) SetEntitlementArn(v string) *Entitlement { +func (s *ListedEntitlement) SetEntitlementArn(v string) *ListedEntitlement { s.EntitlementArn = &v return s } -// SetName sets the Name field's value. -func (s *Entitlement) SetName(v string) *Entitlement { - s.Name = &v - return s -} - -// SetSubscribers sets the Subscribers field's value. -func (s *Entitlement) SetSubscribers(v []*string) *Entitlement { - s.Subscribers = v +// SetEntitlementName sets the EntitlementName field's value. +func (s *ListedEntitlement) SetEntitlementName(v string) *ListedEntitlement { + s.EntitlementName = &v return s } -// The settings for a flow, including its source, outputs, and entitlements. -type Flow struct { +// Provides a summary of a flow, including its ARN, Availability Zone, and source +// type. +type ListedFlow struct { _ struct{} `type:"structure"` - // The Availability Zone that you want to create the flow in. These options - // are limited to the Availability Zones within the current AWS. + // The Availability Zone that the flow was created in. // // AvailabilityZone is a required field AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - // A description of the flow. This value is not used or seen outside of the - // current AWS Elemental MediaConnect account. - Description *string `locationName:"description" type:"string"` - - // The IP address from which video will be sent to output destinations. - EgressIp *string `locationName:"egressIp" type:"string"` - - // The entitlements in this flow. + // A description of the flow. // - // Entitlements is a required field - Entitlements []*Entitlement `locationName:"entitlements" type:"list" required:"true"` + // Description is a required field + Description *string `locationName:"description" type:"string" required:"true"` - // The Amazon Resource Name (ARN), a unique identifier for any AWS resource, - // of the flow. + // The ARN of the flow. // // FlowArn is a required field FlowArn *string `locationName:"flowArn" type:"string" required:"true"` @@ -2820,15 +5340,13 @@ type Flow struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // The outputs in this flow. - // - // Outputs is a required field - Outputs []*Output `locationName:"outputs" type:"list" required:"true"` - - // The settings for the source of the flow. + // The type of source. This value is either owned (originated somewhere other + // than an AWS Elemental MediaConnect flow owned by another AWS account) or + // entitled (originated at an AWS Elemental MediaConnect flow owned by another + // AWS account). // - // Source is a required field - Source *Source `locationName:"source" type:"structure" required:"true"` + // SourceType is a required field + SourceType *string `locationName:"sourceType" type:"string" required:"true" enum:"SourceType"` // The current status of the flow. // @@ -2837,246 +5355,110 @@ type Flow struct { } // String returns the string representation -func (s Flow) String() string { +func (s ListedFlow) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Flow) GoString() string { +func (s ListedFlow) GoString() string { return s.String() } // SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *Flow) SetAvailabilityZone(v string) *Flow { +func (s *ListedFlow) SetAvailabilityZone(v string) *ListedFlow { s.AvailabilityZone = &v return s } // SetDescription sets the Description field's value. -func (s *Flow) SetDescription(v string) *Flow { +func (s *ListedFlow) SetDescription(v string) *ListedFlow { s.Description = &v return s } -// SetEgressIp sets the EgressIp field's value. -func (s *Flow) SetEgressIp(v string) *Flow { - s.EgressIp = &v - return s -} - -// SetEntitlements sets the Entitlements field's value. -func (s *Flow) SetEntitlements(v []*Entitlement) *Flow { - s.Entitlements = v - return s -} - // SetFlowArn sets the FlowArn field's value. -func (s *Flow) SetFlowArn(v string) *Flow { +func (s *ListedFlow) SetFlowArn(v string) *ListedFlow { s.FlowArn = &v return s } // SetName sets the Name field's value. -func (s *Flow) SetName(v string) *Flow { +func (s *ListedFlow) SetName(v string) *ListedFlow { s.Name = &v return s } -// SetOutputs sets the Outputs field's value. -func (s *Flow) SetOutputs(v []*Output) *Flow { - s.Outputs = v - return s -} - -// SetSource sets the Source field's value. -func (s *Flow) SetSource(v *Source) *Flow { - s.Source = v - return s -} - -// SetStatus sets the Status field's value. -func (s *Flow) SetStatus(v string) *Flow { - s.Status = &v +// SetSourceType sets the SourceType field's value. +func (s *ListedFlow) SetSourceType(v string) *ListedFlow { + s.SourceType = &v return s } -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"message" type:"string"` -} - -// String returns the string representation -func (s ForbiddenException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ForbiddenException) GoString() string { - return s.String() -} - -func newErrorForbiddenException(v protocol.ResponseMetadata) error { - return &ForbiddenException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s ForbiddenException) Code() string { - return "ForbiddenException" -} - -// Message returns the exception's message. -func (s ForbiddenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { - return nil -} - -func (s ForbiddenException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} - -// Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode -} - -// RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID -} - -// The entitlements that you want to grant on a flow. -type GrantEntitlementRequest struct { - _ struct{} `type:"structure"` - - // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. - DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` - - // A description of the entitlement. This description appears only on the AWS - // Elemental MediaConnect console and will not be seen by the subscriber or - // end user. - Description *string `locationName:"description" type:"string"` - - // The type of encryption that will be used on the output that is associated - // with this entitlement. - Encryption *Encryption `locationName:"encryption" type:"structure"` - - // The name of the entitlement. This value must be unique within the current - // flow. - Name *string `locationName:"name" type:"string"` - - // The AWS account IDs that you want to share your content with. The receiving - // accounts (subscribers) will be allowed to create their own flows using your - // content as the source. - // - // Subscribers is a required field - Subscribers []*string `locationName:"subscribers" type:"list" required:"true"` -} - -// String returns the string representation -func (s GrantEntitlementRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GrantEntitlementRequest) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GrantEntitlementRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrantEntitlementRequest"} - if s.Subscribers == nil { - invalidParams.Add(request.NewErrParamRequired("Subscribers")) - } - if s.Encryption != nil { - if err := s.Encryption.Validate(); err != nil { - invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. -func (s *GrantEntitlementRequest) SetDataTransferSubscriberFeePercent(v int64) *GrantEntitlementRequest { - s.DataTransferSubscriberFeePercent = &v +// SetStatus sets the Status field's value. +func (s *ListedFlow) SetStatus(v string) *ListedFlow { + s.Status = &v return s } -// SetDescription sets the Description field's value. -func (s *GrantEntitlementRequest) SetDescription(v string) *GrantEntitlementRequest { - s.Description = &v - return s +// Messages that provide the state of the flow. +type Messages struct { + _ struct{} `type:"structure"` + + // A list of errors that might have been generated from processes on this flow. + // + // Errors is a required field + Errors []*string `locationName:"errors" type:"list" required:"true"` } -// SetEncryption sets the Encryption field's value. -func (s *GrantEntitlementRequest) SetEncryption(v *Encryption) *GrantEntitlementRequest { - s.Encryption = v - return s +// String returns the string representation +func (s Messages) String() string { + return awsutil.Prettify(s) } -// SetName sets the Name field's value. -func (s *GrantEntitlementRequest) SetName(v string) *GrantEntitlementRequest { - s.Name = &v - return s +// GoString returns the string representation +func (s Messages) GoString() string { + return s.String() } -// SetSubscribers sets the Subscribers field's value. -func (s *GrantEntitlementRequest) SetSubscribers(v []*string) *GrantEntitlementRequest { - s.Subscribers = v +// SetErrors sets the Errors field's value. +func (s *Messages) SetErrors(v []*string) *Messages { + s.Errors = v return s } // Exception raised by AWS Elemental MediaConnect. See the error message and // documentation for the operation for more information on the cause of this // exception. -type GrantFlowEntitlements420Exception struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type NotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s GrantFlowEntitlements420Exception) String() string { +func (s NotFoundException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrantFlowEntitlements420Exception) GoString() string { +func (s NotFoundException) GoString() string { return s.String() } -func newErrorGrantFlowEntitlements420Exception(v protocol.ResponseMetadata) error { - return &GrantFlowEntitlements420Exception{ - respMetadata: v, +func newErrorNotFoundException(v protocol.ResponseMetadata) error { + return &NotFoundException{ + RespMetadata: v, } } // Code returns the exception type name. -func (s GrantFlowEntitlements420Exception) Code() string { - return "GrantFlowEntitlements420Exception" +func (s *NotFoundException) Code() string { + return "NotFoundException" } // Message returns the exception's message. -func (s GrantFlowEntitlements420Exception) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3084,284 +5466,302 @@ func (s GrantFlowEntitlements420Exception) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GrantFlowEntitlements420Exception) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s GrantFlowEntitlements420Exception) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GrantFlowEntitlements420Exception) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GrantFlowEntitlements420Exception) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// Grants an entitlement on a flow. -type GrantFlowEntitlementsInput struct { +// A savings plan that reserves a certain amount of outbound bandwidth usage +// at a discounted rate each month over a period of time. +type Offering struct { _ struct{} `type:"structure"` - // The list of entitlements that you want to grant. + // The type of currency that is used for billing. The currencyCode used for + // all reservations is US dollars. // - // Entitlements is a required field - Entitlements []*GrantEntitlementRequest `locationName:"entitlements" type:"list" required:"true"` + // CurrencyCode is a required field + CurrencyCode *string `locationName:"currencyCode" type:"string" required:"true"` - // FlowArn is a required field - FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + // The length of time that your reservation would be active. + // + // Duration is a required field + Duration *int64 `locationName:"duration" type:"integer" required:"true"` + + // The unit of measurement for the duration of the offering. + // + // DurationUnits is a required field + DurationUnits *string `locationName:"durationUnits" type:"string" required:"true" enum:"DurationUnits"` + + // The Amazon Resource Name (ARN) that MediaConnect assigns to the offering. + // + // OfferingArn is a required field + OfferingArn *string `locationName:"offeringArn" type:"string" required:"true"` + + // A description of the offering. + // + // OfferingDescription is a required field + OfferingDescription *string `locationName:"offeringDescription" type:"string" required:"true"` + + // The cost of a single unit. This value, in combination with priceUnits, makes + // up the rate. + // + // PricePerUnit is a required field + PricePerUnit *string `locationName:"pricePerUnit" type:"string" required:"true"` + + // The unit of measurement that is used for billing. This value, in combination + // with pricePerUnit, makes up the rate. + // + // PriceUnits is a required field + PriceUnits *string `locationName:"priceUnits" type:"string" required:"true" enum:"PriceUnits"` + + // A definition of the amount of outbound bandwidth that you would be reserving + // if you purchase the offering. + // + // ResourceSpecification is a required field + ResourceSpecification *ResourceSpecification `locationName:"resourceSpecification" type:"structure" required:"true"` } // String returns the string representation -func (s GrantFlowEntitlementsInput) String() string { +func (s Offering) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GrantFlowEntitlementsInput) GoString() string { +func (s Offering) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GrantFlowEntitlementsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GrantFlowEntitlementsInput"} - if s.Entitlements == nil { - invalidParams.Add(request.NewErrParamRequired("Entitlements")) - } - if s.FlowArn == nil { - invalidParams.Add(request.NewErrParamRequired("FlowArn")) - } - if s.FlowArn != nil && len(*s.FlowArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) - } - if s.Entitlements != nil { - for i, v := range s.Entitlements { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Entitlements", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEntitlements sets the Entitlements field's value. -func (s *GrantFlowEntitlementsInput) SetEntitlements(v []*GrantEntitlementRequest) *GrantFlowEntitlementsInput { - s.Entitlements = v +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *Offering) SetCurrencyCode(v string) *Offering { + s.CurrencyCode = &v return s } -// SetFlowArn sets the FlowArn field's value. -func (s *GrantFlowEntitlementsInput) SetFlowArn(v string) *GrantFlowEntitlementsInput { - s.FlowArn = &v +// SetDuration sets the Duration field's value. +func (s *Offering) SetDuration(v int64) *Offering { + s.Duration = &v return s } -// The entitlements that were just granted. -type GrantFlowEntitlementsOutput struct { - _ struct{} `type:"structure"` - - // The entitlements that were just granted. - Entitlements []*Entitlement `locationName:"entitlements" type:"list"` - - // The ARN of the flow that these entitlements were granted to. - FlowArn *string `locationName:"flowArn" type:"string"` +// SetDurationUnits sets the DurationUnits field's value. +func (s *Offering) SetDurationUnits(v string) *Offering { + s.DurationUnits = &v + return s } -// String returns the string representation -func (s GrantFlowEntitlementsOutput) String() string { - return awsutil.Prettify(s) +// SetOfferingArn sets the OfferingArn field's value. +func (s *Offering) SetOfferingArn(v string) *Offering { + s.OfferingArn = &v + return s } -// GoString returns the string representation -func (s GrantFlowEntitlementsOutput) GoString() string { - return s.String() +// SetOfferingDescription sets the OfferingDescription field's value. +func (s *Offering) SetOfferingDescription(v string) *Offering { + s.OfferingDescription = &v + return s } -// SetEntitlements sets the Entitlements field's value. -func (s *GrantFlowEntitlementsOutput) SetEntitlements(v []*Entitlement) *GrantFlowEntitlementsOutput { - s.Entitlements = v +// SetPricePerUnit sets the PricePerUnit field's value. +func (s *Offering) SetPricePerUnit(v string) *Offering { + s.PricePerUnit = &v return s } -// SetFlowArn sets the FlowArn field's value. -func (s *GrantFlowEntitlementsOutput) SetFlowArn(v string) *GrantFlowEntitlementsOutput { - s.FlowArn = &v +// SetPriceUnits sets the PriceUnits field's value. +func (s *Offering) SetPriceUnits(v string) *Offering { + s.PriceUnits = &v return s } -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"message" type:"string"` +// SetResourceSpecification sets the ResourceSpecification field's value. +func (s *Offering) SetResourceSpecification(v *ResourceSpecification) *Offering { + s.ResourceSpecification = v + return s } -// String returns the string representation -func (s InternalServerErrorException) String() string { - return awsutil.Prettify(s) -} +// The settings for an output. +type Output struct { + _ struct{} `type:"structure"` -// GoString returns the string representation -func (s InternalServerErrorException) GoString() string { - return s.String() -} + // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. + DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` -func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { - return &InternalServerErrorException{ - respMetadata: v, - } -} + // A description of the output. + Description *string `locationName:"description" type:"string"` -// Code returns the exception type name. -func (s InternalServerErrorException) Code() string { - return "InternalServerErrorException" -} + // The address where you want to send the output. + Destination *string `locationName:"destination" type:"string"` -// Message returns the exception's message. -func (s InternalServerErrorException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} + // The type of key used for the encryption. If no keyType is provided, the service + // will use the default setting (static-key). + Encryption *Encryption `locationName:"encryption" type:"structure"` -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { - return nil -} + // The ARN of the entitlement on the originator''s flow. This value is relevant + // only on entitled flows. + EntitlementArn *string `locationName:"entitlementArn" type:"string"` -func (s InternalServerErrorException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) -} + // The input ARN of the AWS Elemental MediaLive channel. This parameter is relevant + // only for outputs that were added by creating a MediaLive input. + MediaLiveInputArn *string `locationName:"mediaLiveInputArn" type:"string"` -// Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode -} + // The name of the output. This value must be unique within the current flow. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` -// RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID -} + // The ARN of the output. + // + // OutputArn is a required field + OutputArn *string `locationName:"outputArn" type:"string" required:"true"` -type ListEntitlementsInput struct { - _ struct{} `type:"structure"` + // The port to use when content is distributed to this output. + Port *int64 `locationName:"port" type:"integer"` - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // Attributes related to the transport stream that are used in the output. + Transport *Transport `locationName:"transport" type:"structure"` - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // The name of the VPC interface attachment to use for this output. + VpcInterfaceAttachment *VpcInterfaceAttachment `locationName:"vpcInterfaceAttachment" type:"structure"` } // String returns the string representation -func (s ListEntitlementsInput) String() string { +func (s Output) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListEntitlementsInput) GoString() string { +func (s Output) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListEntitlementsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListEntitlementsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. +func (s *Output) SetDataTransferSubscriberFeePercent(v int64) *Output { + s.DataTransferSubscriberFeePercent = &v + return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListEntitlementsInput) SetMaxResults(v int64) *ListEntitlementsInput { - s.MaxResults = &v +// SetDescription sets the Description field's value. +func (s *Output) SetDescription(v string) *Output { + s.Description = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListEntitlementsInput) SetNextToken(v string) *ListEntitlementsInput { - s.NextToken = &v +// SetDestination sets the Destination field's value. +func (s *Output) SetDestination(v string) *Output { + s.Destination = &v return s } -// The result of a successful ListEntitlements request. The response includes -// the ARN of each entitlement, the name of the associated flow, and the NextToken -// to use in a subsequent ListEntitlements request. -type ListEntitlementsOutput struct { - _ struct{} `type:"structure"` +// SetEncryption sets the Encryption field's value. +func (s *Output) SetEncryption(v *Encryption) *Output { + s.Encryption = v + return s +} - // A list of entitlements that have been granted to you from other AWS accounts. - Entitlements []*ListedEntitlement `locationName:"entitlements" type:"list"` +// SetEntitlementArn sets the EntitlementArn field's value. +func (s *Output) SetEntitlementArn(v string) *Output { + s.EntitlementArn = &v + return s +} - // The token that identifies which batch of results that you want to see. For - // example, you submit a ListEntitlements request with MaxResults set at 5. - // The service returns the first batch of results (up to 5) and a NextToken - // value. To see the next batch of results, you can submit the ListEntitlements - // request a second time and specify the NextToken value. - NextToken *string `locationName:"nextToken" type:"string"` +// SetMediaLiveInputArn sets the MediaLiveInputArn field's value. +func (s *Output) SetMediaLiveInputArn(v string) *Output { + s.MediaLiveInputArn = &v + return s } -// String returns the string representation -func (s ListEntitlementsOutput) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *Output) SetName(v string) *Output { + s.Name = &v + return s } -// GoString returns the string representation -func (s ListEntitlementsOutput) GoString() string { - return s.String() +// SetOutputArn sets the OutputArn field's value. +func (s *Output) SetOutputArn(v string) *Output { + s.OutputArn = &v + return s } -// SetEntitlements sets the Entitlements field's value. -func (s *ListEntitlementsOutput) SetEntitlements(v []*ListedEntitlement) *ListEntitlementsOutput { - s.Entitlements = v +// SetPort sets the Port field's value. +func (s *Output) SetPort(v int64) *Output { + s.Port = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListEntitlementsOutput) SetNextToken(v string) *ListEntitlementsOutput { - s.NextToken = &v +// SetTransport sets the Transport field's value. +func (s *Output) SetTransport(v *Transport) *Output { + s.Transport = v return s } -type ListFlowsInput struct { +// SetVpcInterfaceAttachment sets the VpcInterfaceAttachment field's value. +func (s *Output) SetVpcInterfaceAttachment(v *VpcInterfaceAttachment) *Output { + s.VpcInterfaceAttachment = v + return s +} + +// Submits a request to purchase an offering, which creates a reservation in +// your AWS account. If you already have an active reservation, you can't purchase +// another offering. +type PurchaseOfferingInput struct { _ struct{} `type:"structure"` - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // OfferingArn is a required field + OfferingArn *string `location:"uri" locationName:"offeringArn" type:"string" required:"true"` - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // The name that you want to use for the reservation. + // + // ReservationName is a required field + ReservationName *string `locationName:"reservationName" type:"string" required:"true"` + + // The date and time that you want the reservation to begin, in Coordinated + // Universal Time (UTC). You can specify any date and time between 12:00am on + // the first day of the current month to the current time on today's date, inclusive. + // Specify the start in a 24-hour notation. Use the following format: YYYY-MM-DDTHH:mm:SSZ, + // where T and Z are literal characters. For example, to specify 11:30pm on + // March 5, 2020, enter 2020-03-05T23:30:00Z. + // + // Start is a required field + Start *string `locationName:"start" type:"string" required:"true"` } // String returns the string representation -func (s ListFlowsInput) String() string { +func (s PurchaseOfferingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFlowsInput) GoString() string { +func (s PurchaseOfferingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListFlowsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListFlowsInput"} - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *PurchaseOfferingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PurchaseOfferingInput"} + if s.OfferingArn == nil { + invalidParams.Add(request.NewErrParamRequired("OfferingArn")) + } + if s.OfferingArn != nil && len(*s.OfferingArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OfferingArn", 1)) + } + if s.ReservationName == nil { + invalidParams.Add(request.NewErrParamRequired("ReservationName")) + } + if s.Start == nil { + invalidParams.Add(request.NewErrParamRequired("Start")) } if invalidParams.Len() > 0 { @@ -3370,81 +5770,87 @@ func (s *ListFlowsInput) Validate() error { return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListFlowsInput) SetMaxResults(v int64) *ListFlowsInput { - s.MaxResults = &v +// SetOfferingArn sets the OfferingArn field's value. +func (s *PurchaseOfferingInput) SetOfferingArn(v string) *PurchaseOfferingInput { + s.OfferingArn = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListFlowsInput) SetNextToken(v string) *ListFlowsInput { - s.NextToken = &v +// SetReservationName sets the ReservationName field's value. +func (s *PurchaseOfferingInput) SetReservationName(v string) *PurchaseOfferingInput { + s.ReservationName = &v return s } -// The result of a successful ListFlows request. The response includes flow -// summaries and the NextToken to use in a subsequent ListFlows request. -type ListFlowsOutput struct { - _ struct{} `type:"structure"` +// SetStart sets the Start field's value. +func (s *PurchaseOfferingInput) SetStart(v string) *PurchaseOfferingInput { + s.Start = &v + return s +} - // A list of flow summaries. - Flows []*ListedFlow `locationName:"flows" type:"list"` +// The result of a successful PurchaseOffering request. +type PurchaseOfferingOutput struct { + _ struct{} `type:"structure"` - // The token that identifies which batch of results that you want to see. For - // example, you submit a ListFlows request with MaxResults set at 5. The service - // returns the first batch of results (up to 5) and a NextToken value. To see - // the next batch of results, you can submit the ListFlows request a second - // time and specify the NextToken value. - NextToken *string `locationName:"nextToken" type:"string"` + // A pricing agreement for a discounted rate for a specific outbound bandwidth + // that your MediaConnect account will use each month over a specific time period. + // The discounted rate in the reservation applies to outbound bandwidth for + // all flows from your account until your account reaches the amount of bandwidth + // in your reservation. If you use more outbound bandwidth than the agreed upon + // amount in a single month, the overage is charged at the on-demand rate. + Reservation *Reservation `locationName:"reservation" type:"structure"` } // String returns the string representation -func (s ListFlowsOutput) String() string { +func (s PurchaseOfferingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListFlowsOutput) GoString() string { +func (s PurchaseOfferingOutput) GoString() string { return s.String() } -// SetFlows sets the Flows field's value. -func (s *ListFlowsOutput) SetFlows(v []*ListedFlow) *ListFlowsOutput { - s.Flows = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListFlowsOutput) SetNextToken(v string) *ListFlowsOutput { - s.NextToken = &v +// SetReservation sets the Reservation field's value. +func (s *PurchaseOfferingOutput) SetReservation(v *Reservation) *PurchaseOfferingOutput { + s.Reservation = v return s } -type ListTagsForResourceInput struct { +type RemoveFlowOutputInput struct { _ struct{} `type:"structure"` - // ResourceArn is a required field - ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + + // OutputArn is a required field + OutputArn *string `location:"uri" locationName:"outputArn" type:"string" required:"true"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s RemoveFlowOutputInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceInput) GoString() string { +func (s RemoveFlowOutputInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) +func (s *RemoveFlowOutputInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveFlowOutputInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.OutputArn == nil { + invalidParams.Add(request.NewErrParamRequired("OutputArn")) + } + if s.OutputArn != nil && len(*s.OutputArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutputArn", 1)) } if invalidParams.Len() > 0 { @@ -3453,449 +5859,444 @@ func (s *ListTagsForResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { - s.ResourceArn = &v +// SetFlowArn sets the FlowArn field's value. +func (s *RemoveFlowOutputInput) SetFlowArn(v string) *RemoveFlowOutputInput { + s.FlowArn = &v return s } -// The tags for the resource. -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` - - // A map from tag keys to values. Tag keys can have a maximum character length - // of 128 characters, and tag values can have a maximum length of 256 characters. - Tags map[string]*string `locationName:"tags" type:"map"` -} - -// String returns the string representation -func (s ListTagsForResourceOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { - return s.String() -} - -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { - s.Tags = v +// SetOutputArn sets the OutputArn field's value. +func (s *RemoveFlowOutputInput) SetOutputArn(v string) *RemoveFlowOutputInput { + s.OutputArn = &v return s } -// An entitlement that has been granted to you from other AWS accounts. -type ListedEntitlement struct { +// The result of a successful RemoveFlowOutput request including the flow ARN +// and the output ARN that was removed. +type RemoveFlowOutputOutput struct { _ struct{} `type:"structure"` - // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. - DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` - - // The ARN of the entitlement. - // - // EntitlementArn is a required field - EntitlementArn *string `locationName:"entitlementArn" type:"string" required:"true"` + // The ARN of the flow that is associated with the output you removed. + FlowArn *string `locationName:"flowArn" type:"string"` - // The name of the entitlement. - // - // EntitlementName is a required field - EntitlementName *string `locationName:"entitlementName" type:"string" required:"true"` + // The ARN of the output that was removed. + OutputArn *string `locationName:"outputArn" type:"string"` } // String returns the string representation -func (s ListedEntitlement) String() string { +func (s RemoveFlowOutputOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListedEntitlement) GoString() string { +func (s RemoveFlowOutputOutput) GoString() string { return s.String() } -// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. -func (s *ListedEntitlement) SetDataTransferSubscriberFeePercent(v int64) *ListedEntitlement { - s.DataTransferSubscriberFeePercent = &v - return s -} - -// SetEntitlementArn sets the EntitlementArn field's value. -func (s *ListedEntitlement) SetEntitlementArn(v string) *ListedEntitlement { - s.EntitlementArn = &v +// SetFlowArn sets the FlowArn field's value. +func (s *RemoveFlowOutputOutput) SetFlowArn(v string) *RemoveFlowOutputOutput { + s.FlowArn = &v return s } -// SetEntitlementName sets the EntitlementName field's value. -func (s *ListedEntitlement) SetEntitlementName(v string) *ListedEntitlement { - s.EntitlementName = &v +// SetOutputArn sets the OutputArn field's value. +func (s *RemoveFlowOutputOutput) SetOutputArn(v string) *RemoveFlowOutputOutput { + s.OutputArn = &v return s } -// Provides a summary of a flow, including its ARN, Availability Zone, and source -// type. -type ListedFlow struct { +type RemoveFlowSourceInput struct { _ struct{} `type:"structure"` - // The Availability Zone that the flow was created in. - // - // AvailabilityZone is a required field - AvailabilityZone *string `locationName:"availabilityZone" type:"string" required:"true"` - - // A description of the flow. - // - // Description is a required field - Description *string `locationName:"description" type:"string" required:"true"` - - // The ARN of the flow. - // // FlowArn is a required field - FlowArn *string `locationName:"flowArn" type:"string" required:"true"` - - // The name of the flow. - // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` - - // The type of source. This value is either owned (originated somewhere other - // than an AWS Elemental MediaConnect flow owned by another AWS account) or - // entitled (originated at an AWS Elemental MediaConnect flow owned by another - // AWS account). - // - // SourceType is a required field - SourceType *string `locationName:"sourceType" type:"string" required:"true" enum:"SourceType"` + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` - // The current status of the flow. - // - // Status is a required field - Status *string `locationName:"status" type:"string" required:"true" enum:"Status"` + // SourceArn is a required field + SourceArn *string `location:"uri" locationName:"sourceArn" type:"string" required:"true"` } // String returns the string representation -func (s ListedFlow) String() string { +func (s RemoveFlowSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListedFlow) GoString() string { +func (s RemoveFlowSourceInput) GoString() string { return s.String() } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *ListedFlow) SetAvailabilityZone(v string) *ListedFlow { - s.AvailabilityZone = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *ListedFlow) SetDescription(v string) *ListedFlow { - s.Description = &v - return s -} - -// SetFlowArn sets the FlowArn field's value. -func (s *ListedFlow) SetFlowArn(v string) *ListedFlow { - s.FlowArn = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveFlowSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveFlowSourceInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.SourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("SourceArn")) + } + if s.SourceArn != nil && len(*s.SourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SourceArn", 1)) + } -// SetName sets the Name field's value. -func (s *ListedFlow) SetName(v string) *ListedFlow { - s.Name = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSourceType sets the SourceType field's value. -func (s *ListedFlow) SetSourceType(v string) *ListedFlow { - s.SourceType = &v +// SetFlowArn sets the FlowArn field's value. +func (s *RemoveFlowSourceInput) SetFlowArn(v string) *RemoveFlowSourceInput { + s.FlowArn = &v return s } -// SetStatus sets the Status field's value. -func (s *ListedFlow) SetStatus(v string) *ListedFlow { - s.Status = &v +// SetSourceArn sets the SourceArn field's value. +func (s *RemoveFlowSourceInput) SetSourceArn(v string) *RemoveFlowSourceInput { + s.SourceArn = &v return s } -// Messages that provide the state of the flow. -type Messages struct { +// The result of a successful RemoveFlowSource request including the flow ARN +// and the source ARN that was removed. +type RemoveFlowSourceOutput struct { _ struct{} `type:"structure"` - // A list of errors that might have been generated from processes on this flow. - // - // Errors is a required field - Errors []*string `locationName:"errors" type:"list" required:"true"` + // The ARN of the flow that is associated with the source you removed. + FlowArn *string `locationName:"flowArn" type:"string"` + + // The ARN of the source that was removed. + SourceArn *string `locationName:"sourceArn" type:"string"` } // String returns the string representation -func (s Messages) String() string { +func (s RemoveFlowSourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Messages) GoString() string { +func (s RemoveFlowSourceOutput) GoString() string { return s.String() } -// SetErrors sets the Errors field's value. -func (s *Messages) SetErrors(v []*string) *Messages { - s.Errors = v +// SetFlowArn sets the FlowArn field's value. +func (s *RemoveFlowSourceOutput) SetFlowArn(v string) *RemoveFlowSourceOutput { + s.FlowArn = &v return s } -// Exception raised by AWS Elemental MediaConnect. See the error message and -// documentation for the operation for more information on the cause of this -// exception. -type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetSourceArn sets the SourceArn field's value. +func (s *RemoveFlowSourceOutput) SetSourceArn(v string) *RemoveFlowSourceOutput { + s.SourceArn = &v + return s +} - Message_ *string `locationName:"message" type:"string"` +type RemoveFlowVpcInterfaceInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + + // VpcInterfaceName is a required field + VpcInterfaceName *string `location:"uri" locationName:"vpcInterfaceName" type:"string" required:"true"` } // String returns the string representation -func (s NotFoundException) String() string { +func (s RemoveFlowVpcInterfaceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotFoundException) GoString() string { +func (s RemoveFlowVpcInterfaceInput) GoString() string { return s.String() } -func newErrorNotFoundException(v protocol.ResponseMetadata) error { - return &NotFoundException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveFlowVpcInterfaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveFlowVpcInterfaceInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + if s.VpcInterfaceName == nil { + invalidParams.Add(request.NewErrParamRequired("VpcInterfaceName")) + } + if s.VpcInterfaceName != nil && len(*s.VpcInterfaceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VpcInterfaceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams } + return nil } -// Code returns the exception type name. -func (s NotFoundException) Code() string { - return "NotFoundException" +// SetFlowArn sets the FlowArn field's value. +func (s *RemoveFlowVpcInterfaceInput) SetFlowArn(v string) *RemoveFlowVpcInterfaceInput { + s.FlowArn = &v + return s } -// Message returns the exception's message. -func (s NotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetVpcInterfaceName sets the VpcInterfaceName field's value. +func (s *RemoveFlowVpcInterfaceInput) SetVpcInterfaceName(v string) *RemoveFlowVpcInterfaceInput { + s.VpcInterfaceName = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { - return nil +// The result of a successful RemoveFlowVpcInterface request including the flow +// ARN and the VPC interface name that was removed. +type RemoveFlowVpcInterfaceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the flow that is associated with the VPC interface you removed. + FlowArn *string `locationName:"flowArn" type:"string"` + + // IDs of network interfaces associated with the removed VPC interface that + // Media Connect was unable to remove. + NonDeletedNetworkInterfaceIds []*string `locationName:"nonDeletedNetworkInterfaceIds" type:"list"` + + // The name of the VPC interface that was removed. + VpcInterfaceName *string `locationName:"vpcInterfaceName" type:"string"` } -func (s NotFoundException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// String returns the string representation +func (s RemoveFlowVpcInterfaceOutput) String() string { + return awsutil.Prettify(s) } -// Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +// GoString returns the string representation +func (s RemoveFlowVpcInterfaceOutput) GoString() string { + return s.String() } -// RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +// SetFlowArn sets the FlowArn field's value. +func (s *RemoveFlowVpcInterfaceOutput) SetFlowArn(v string) *RemoveFlowVpcInterfaceOutput { + s.FlowArn = &v + return s } -// The settings for an output. -type Output struct { +// SetNonDeletedNetworkInterfaceIds sets the NonDeletedNetworkInterfaceIds field's value. +func (s *RemoveFlowVpcInterfaceOutput) SetNonDeletedNetworkInterfaceIds(v []*string) *RemoveFlowVpcInterfaceOutput { + s.NonDeletedNetworkInterfaceIds = v + return s +} + +// SetVpcInterfaceName sets the VpcInterfaceName field's value. +func (s *RemoveFlowVpcInterfaceOutput) SetVpcInterfaceName(v string) *RemoveFlowVpcInterfaceOutput { + s.VpcInterfaceName = &v + return s +} + +// A pricing agreement for a discounted rate for a specific outbound bandwidth +// that your MediaConnect account will use each month over a specific time period. +// The discounted rate in the reservation applies to outbound bandwidth for +// all flows from your account until your account reaches the amount of bandwidth +// in your reservation. If you use more outbound bandwidth than the agreed upon +// amount in a single month, the overage is charged at the on-demand rate. +type Reservation struct { _ struct{} `type:"structure"` - // Percentage from 0-100 of the data transfer cost to be billed to the subscriber. - DataTransferSubscriberFeePercent *int64 `locationName:"dataTransferSubscriberFeePercent" type:"integer"` + // The type of currency that is used for billing. The currencyCode used for + // your reservation is US dollars. + // + // CurrencyCode is a required field + CurrencyCode *string `locationName:"currencyCode" type:"string" required:"true"` - // A description of the output. - Description *string `locationName:"description" type:"string"` + // The length of time that this reservation is active. MediaConnect defines + // this value in the offering. + // + // Duration is a required field + Duration *int64 `locationName:"duration" type:"integer" required:"true"` - // The address where you want to send the output. - Destination *string `locationName:"destination" type:"string"` + // The unit of measurement for the duration of the reservation. MediaConnect + // defines this value in the offering. + // + // DurationUnits is a required field + DurationUnits *string `locationName:"durationUnits" type:"string" required:"true" enum:"DurationUnits"` - // The type of key used for the encryption. If no keyType is provided, the service - // will use the default setting (static-key). - Encryption *Encryption `locationName:"encryption" type:"structure"` + // The day and time that this reservation expires. This value is calculated + // based on the start date and time that you set and the offering's duration. + // + // End is a required field + End *string `locationName:"end" type:"string" required:"true"` - // The ARN of the entitlement on the originator''s flow. This value is relevant - // only on entitled flows. - EntitlementArn *string `locationName:"entitlementArn" type:"string"` + // The Amazon Resource Name (ARN) that MediaConnect assigns to the offering. + // + // OfferingArn is a required field + OfferingArn *string `locationName:"offeringArn" type:"string" required:"true"` - // The input ARN of the AWS Elemental MediaLive channel. This parameter is relevant - // only for outputs that were added by creating a MediaLive input. - MediaLiveInputArn *string `locationName:"mediaLiveInputArn" type:"string"` + // A description of the offering. MediaConnect defines this value in the offering. + // + // OfferingDescription is a required field + OfferingDescription *string `locationName:"offeringDescription" type:"string" required:"true"` - // The name of the output. This value must be unique within the current flow. + // The cost of a single unit. This value, in combination with priceUnits, makes + // up the rate. MediaConnect defines this value in the offering. // - // Name is a required field - Name *string `locationName:"name" type:"string" required:"true"` + // PricePerUnit is a required field + PricePerUnit *string `locationName:"pricePerUnit" type:"string" required:"true"` - // The ARN of the output. + // The unit of measurement that is used for billing. This value, in combination + // with pricePerUnit, makes up the rate. MediaConnect defines this value in + // the offering. // - // OutputArn is a required field - OutputArn *string `locationName:"outputArn" type:"string" required:"true"` + // PriceUnits is a required field + PriceUnits *string `locationName:"priceUnits" type:"string" required:"true" enum:"PriceUnits"` - // The port to use when content is distributed to this output. - Port *int64 `locationName:"port" type:"integer"` + // The Amazon Resource Name (ARN) that MediaConnect assigns to the reservation + // when you purchase an offering. + // + // ReservationArn is a required field + ReservationArn *string `locationName:"reservationArn" type:"string" required:"true"` - // Attributes related to the transport stream that are used in the output. - Transport *Transport `locationName:"transport" type:"structure"` + // The name that you assigned to the reservation when you purchased the offering. + // + // ReservationName is a required field + ReservationName *string `locationName:"reservationName" type:"string" required:"true"` + + // The status of your reservation. + // + // ReservationState is a required field + ReservationState *string `locationName:"reservationState" type:"string" required:"true" enum:"ReservationState"` + + // A definition of the amount of outbound bandwidth that you would be reserving + // if you purchase the offering. MediaConnect defines the values that make up + // the resourceSpecification in the offering. + // + // ResourceSpecification is a required field + ResourceSpecification *ResourceSpecification `locationName:"resourceSpecification" type:"structure" required:"true"` + + // The day and time that the reservation becomes active. You set this value + // when you purchase the offering. + // + // Start is a required field + Start *string `locationName:"start" type:"string" required:"true"` } // String returns the string representation -func (s Output) String() string { +func (s Reservation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Output) GoString() string { +func (s Reservation) GoString() string { return s.String() } -// SetDataTransferSubscriberFeePercent sets the DataTransferSubscriberFeePercent field's value. -func (s *Output) SetDataTransferSubscriberFeePercent(v int64) *Output { - s.DataTransferSubscriberFeePercent = &v +// SetCurrencyCode sets the CurrencyCode field's value. +func (s *Reservation) SetCurrencyCode(v string) *Reservation { + s.CurrencyCode = &v return s } -// SetDescription sets the Description field's value. -func (s *Output) SetDescription(v string) *Output { - s.Description = &v +// SetDuration sets the Duration field's value. +func (s *Reservation) SetDuration(v int64) *Reservation { + s.Duration = &v return s } -// SetDestination sets the Destination field's value. -func (s *Output) SetDestination(v string) *Output { - s.Destination = &v +// SetDurationUnits sets the DurationUnits field's value. +func (s *Reservation) SetDurationUnits(v string) *Reservation { + s.DurationUnits = &v return s } -// SetEncryption sets the Encryption field's value. -func (s *Output) SetEncryption(v *Encryption) *Output { - s.Encryption = v +// SetEnd sets the End field's value. +func (s *Reservation) SetEnd(v string) *Reservation { + s.End = &v return s } -// SetEntitlementArn sets the EntitlementArn field's value. -func (s *Output) SetEntitlementArn(v string) *Output { - s.EntitlementArn = &v +// SetOfferingArn sets the OfferingArn field's value. +func (s *Reservation) SetOfferingArn(v string) *Reservation { + s.OfferingArn = &v return s } -// SetMediaLiveInputArn sets the MediaLiveInputArn field's value. -func (s *Output) SetMediaLiveInputArn(v string) *Output { - s.MediaLiveInputArn = &v +// SetOfferingDescription sets the OfferingDescription field's value. +func (s *Reservation) SetOfferingDescription(v string) *Reservation { + s.OfferingDescription = &v return s } -// SetName sets the Name field's value. -func (s *Output) SetName(v string) *Output { - s.Name = &v +// SetPricePerUnit sets the PricePerUnit field's value. +func (s *Reservation) SetPricePerUnit(v string) *Reservation { + s.PricePerUnit = &v return s } -// SetOutputArn sets the OutputArn field's value. -func (s *Output) SetOutputArn(v string) *Output { - s.OutputArn = &v +// SetPriceUnits sets the PriceUnits field's value. +func (s *Reservation) SetPriceUnits(v string) *Reservation { + s.PriceUnits = &v return s } -// SetPort sets the Port field's value. -func (s *Output) SetPort(v int64) *Output { - s.Port = &v +// SetReservationArn sets the ReservationArn field's value. +func (s *Reservation) SetReservationArn(v string) *Reservation { + s.ReservationArn = &v return s } -// SetTransport sets the Transport field's value. -func (s *Output) SetTransport(v *Transport) *Output { - s.Transport = v +// SetReservationName sets the ReservationName field's value. +func (s *Reservation) SetReservationName(v string) *Reservation { + s.ReservationName = &v return s } -type RemoveFlowOutputInput struct { - _ struct{} `type:"structure"` - - // FlowArn is a required field - FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` - - // OutputArn is a required field - OutputArn *string `location:"uri" locationName:"outputArn" type:"string" required:"true"` -} - -// String returns the string representation -func (s RemoveFlowOutputInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RemoveFlowOutputInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RemoveFlowOutputInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RemoveFlowOutputInput"} - if s.FlowArn == nil { - invalidParams.Add(request.NewErrParamRequired("FlowArn")) - } - if s.FlowArn != nil && len(*s.FlowArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) - } - if s.OutputArn == nil { - invalidParams.Add(request.NewErrParamRequired("OutputArn")) - } - if s.OutputArn != nil && len(*s.OutputArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("OutputArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetReservationState sets the ReservationState field's value. +func (s *Reservation) SetReservationState(v string) *Reservation { + s.ReservationState = &v + return s } -// SetFlowArn sets the FlowArn field's value. -func (s *RemoveFlowOutputInput) SetFlowArn(v string) *RemoveFlowOutputInput { - s.FlowArn = &v +// SetResourceSpecification sets the ResourceSpecification field's value. +func (s *Reservation) SetResourceSpecification(v *ResourceSpecification) *Reservation { + s.ResourceSpecification = v return s } -// SetOutputArn sets the OutputArn field's value. -func (s *RemoveFlowOutputInput) SetOutputArn(v string) *RemoveFlowOutputInput { - s.OutputArn = &v +// SetStart sets the Start field's value. +func (s *Reservation) SetStart(v string) *Reservation { + s.Start = &v return s } -// The result of a successful RemoveFlowOutput request including the flow ARN -// and the output ARN that was removed. -type RemoveFlowOutputOutput struct { +// A definition of what is being billed for, including the type and amount. +type ResourceSpecification struct { _ struct{} `type:"structure"` - // The ARN of the flow that is associated with the output you removed. - FlowArn *string `locationName:"flowArn" type:"string"` + // The amount of outbound bandwidth that is discounted in the offering. + ReservedBitrate *int64 `locationName:"reservedBitrate" type:"integer"` - // The ARN of the output that was removed. - OutputArn *string `locationName:"outputArn" type:"string"` + // The type of resource and the unit that is being billed for. + // + // ResourceType is a required field + ResourceType *string `locationName:"resourceType" type:"string" required:"true" enum:"ResourceType"` } // String returns the string representation -func (s RemoveFlowOutputOutput) String() string { +func (s ResourceSpecification) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RemoveFlowOutputOutput) GoString() string { +func (s ResourceSpecification) GoString() string { return s.String() } -// SetFlowArn sets the FlowArn field's value. -func (s *RemoveFlowOutputOutput) SetFlowArn(v string) *RemoveFlowOutputOutput { - s.FlowArn = &v +// SetReservedBitrate sets the ReservedBitrate field's value. +func (s *ResourceSpecification) SetReservedBitrate(v int64) *ResourceSpecification { + s.ReservedBitrate = &v return s } -// SetOutputArn sets the OutputArn field's value. -func (s *RemoveFlowOutputOutput) SetOutputArn(v string) *RemoveFlowOutputOutput { - s.OutputArn = &v +// SetResourceType sets the ResourceType field's value. +func (s *ResourceSpecification) SetResourceType(v string) *ResourceSpecification { + s.ResourceType = &v return s } @@ -3992,8 +6393,8 @@ func (s *RevokeFlowEntitlementOutput) SetFlowArn(v string) *RevokeFlowEntitlemen // documentation for the operation for more information on the cause of this // exception. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4010,17 +6411,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4028,22 +6429,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // The settings for the source of the flow. @@ -4082,6 +6483,9 @@ type SetSourceRequest struct { // only to Zixi-based streams. StreamId *string `locationName:"streamId" type:"string"` + // The name of the VPC interface to use for this source. + VpcInterfaceName *string `locationName:"vpcInterfaceName" type:"string"` + // The range of IP addresses that should be allowed to contribute content to // your source. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. @@ -4167,6 +6571,12 @@ func (s *SetSourceRequest) SetStreamId(v string) *SetSourceRequest { return s } +// SetVpcInterfaceName sets the VpcInterfaceName field's value. +func (s *SetSourceRequest) SetVpcInterfaceName(v string) *SetSourceRequest { + s.VpcInterfaceName = &v + return s +} + // SetWhitelistCidr sets the WhitelistCidr field's value. func (s *SetSourceRequest) SetWhitelistCidr(v string) *SetSourceRequest { s.WhitelistCidr = &v @@ -4211,6 +6621,9 @@ type Source struct { // Attributes related to the transport stream that are used in the source. Transport *Transport `locationName:"transport" type:"structure"` + // The name of the VPC Interface this Source is configured with. + VpcInterfaceName *string `locationName:"vpcInterfaceName" type:"string"` + // The range of IP addresses that should be allowed to contribute content to // your source. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. @@ -4281,6 +6694,12 @@ func (s *Source) SetTransport(v *Transport) *Source { return s } +// SetVpcInterfaceName sets the VpcInterfaceName field's value. +func (s *Source) SetVpcInterfaceName(v string) *Source { + s.VpcInterfaceName = &v + return s +} + // SetWhitelistCidr sets the WhitelistCidr field's value. func (s *Source) SetWhitelistCidr(v string) *Source { s.WhitelistCidr = &v @@ -4505,8 +6924,8 @@ func (s TagResourceOutput) GoString() string { // documentation for the operation for more information on the cause of this // exception. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4523,17 +6942,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4541,22 +6960,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Attributes related to the transport stream that are used in a source or output. @@ -4801,21 +7220,53 @@ func (s *UpdateEncryption) SetResourceId(v string) *UpdateEncryption { return s } -// SetRoleArn sets the RoleArn field's value. -func (s *UpdateEncryption) SetRoleArn(v string) *UpdateEncryption { - s.RoleArn = &v - return s +// SetRoleArn sets the RoleArn field's value. +func (s *UpdateEncryption) SetRoleArn(v string) *UpdateEncryption { + s.RoleArn = &v + return s +} + +// SetSecretArn sets the SecretArn field's value. +func (s *UpdateEncryption) SetSecretArn(v string) *UpdateEncryption { + s.SecretArn = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *UpdateEncryption) SetUrl(v string) *UpdateEncryption { + s.Url = &v + return s +} + +// The settings for source failover +type UpdateFailoverConfig struct { + _ struct{} `type:"structure"` + + // Recovery window time to look for dash-7 packets + RecoveryWindow *int64 `locationName:"recoveryWindow" type:"integer"` + + State *string `locationName:"state" type:"string" enum:"State"` +} + +// String returns the string representation +func (s UpdateFailoverConfig) String() string { + return awsutil.Prettify(s) } -// SetSecretArn sets the SecretArn field's value. -func (s *UpdateEncryption) SetSecretArn(v string) *UpdateEncryption { - s.SecretArn = &v +// GoString returns the string representation +func (s UpdateFailoverConfig) GoString() string { + return s.String() +} + +// SetRecoveryWindow sets the RecoveryWindow field's value. +func (s *UpdateFailoverConfig) SetRecoveryWindow(v int64) *UpdateFailoverConfig { + s.RecoveryWindow = &v return s } -// SetUrl sets the Url field's value. -func (s *UpdateEncryption) SetUrl(v string) *UpdateEncryption { - s.Url = &v +// SetState sets the State field's value. +func (s *UpdateFailoverConfig) SetState(v string) *UpdateFailoverConfig { + s.State = &v return s } @@ -4835,6 +7286,12 @@ type UpdateFlowEntitlementInput struct { // EntitlementArn is a required field EntitlementArn *string `location:"uri" locationName:"entitlementArn" type:"string" required:"true"` + // An indication of whether you want to enable the entitlement to allow access, + // or disable it to stop streaming content to the subscriber’s flow temporarily. + // If you don’t specify the entitlementStatus field in your request, MediaConnect + // leaves the value unchanged. + EntitlementStatus *string `locationName:"entitlementStatus" type:"string" enum:"EntitlementStatus"` + // FlowArn is a required field FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` @@ -4894,6 +7351,12 @@ func (s *UpdateFlowEntitlementInput) SetEntitlementArn(v string) *UpdateFlowEnti return s } +// SetEntitlementStatus sets the EntitlementStatus field's value. +func (s *UpdateFlowEntitlementInput) SetEntitlementStatus(v string) *UpdateFlowEntitlementInput { + s.EntitlementStatus = &v + return s +} + // SetFlowArn sets the FlowArn field's value. func (s *UpdateFlowEntitlementInput) SetFlowArn(v string) *UpdateFlowEntitlementInput { s.FlowArn = &v @@ -4911,7 +7374,7 @@ func (s *UpdateFlowEntitlementInput) SetSubscribers(v []*string) *UpdateFlowEnti type UpdateFlowEntitlementOutput struct { _ struct{} `type:"structure"` - // The settings for a flow entitlement. + // The new configuration of the entitlement that you updated. Entitlement *Entitlement `locationName:"entitlement" type:"structure"` // The ARN of the flow that this entitlement was granted on. @@ -4940,6 +7403,79 @@ func (s *UpdateFlowEntitlementOutput) SetFlowArn(v string) *UpdateFlowEntitlemen return s } +// Updates an existing flow. +type UpdateFlowInput struct { + _ struct{} `type:"structure"` + + // FlowArn is a required field + FlowArn *string `location:"uri" locationName:"flowArn" type:"string" required:"true"` + + // The settings for source failover + SourceFailoverConfig *UpdateFailoverConfig `locationName:"sourceFailoverConfig" type:"structure"` +} + +// String returns the string representation +func (s UpdateFlowInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFlowInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateFlowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateFlowInput"} + if s.FlowArn == nil { + invalidParams.Add(request.NewErrParamRequired("FlowArn")) + } + if s.FlowArn != nil && len(*s.FlowArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FlowArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFlowArn sets the FlowArn field's value. +func (s *UpdateFlowInput) SetFlowArn(v string) *UpdateFlowInput { + s.FlowArn = &v + return s +} + +// SetSourceFailoverConfig sets the SourceFailoverConfig field's value. +func (s *UpdateFlowInput) SetSourceFailoverConfig(v *UpdateFailoverConfig) *UpdateFlowInput { + s.SourceFailoverConfig = v + return s +} + +// Updates an existing flow. +type UpdateFlowOutput struct { + _ struct{} `type:"structure"` + + // The settings for a flow, including its source, outputs, and entitlements. + Flow *Flow `locationName:"flow" type:"structure"` +} + +// String returns the string representation +func (s UpdateFlowOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateFlowOutput) GoString() string { + return s.String() +} + +// SetFlow sets the Flow field's value. +func (s *UpdateFlowOutput) SetFlow(v *Flow) *UpdateFlowOutput { + s.Flow = v + return s +} + // The updates that you want to make to an existing output of an existing flow. type UpdateFlowOutputInput struct { _ struct{} `type:"structure"` @@ -4984,6 +7520,9 @@ type UpdateFlowOutputInput struct { // The stream ID that you want to use for this transport. This parameter applies // only to Zixi-based streams. StreamId *string `locationName:"streamId" type:"string"` + + // The name of the VPC interface attachment to use for this output. + VpcInterfaceAttachment *VpcInterfaceAttachment `locationName:"vpcInterfaceAttachment" type:"structure"` } // String returns the string representation @@ -5090,6 +7629,12 @@ func (s *UpdateFlowOutputInput) SetStreamId(v string) *UpdateFlowOutputInput { return s } +// SetVpcInterfaceAttachment sets the VpcInterfaceAttachment field's value. +func (s *UpdateFlowOutputInput) SetVpcInterfaceAttachment(v *VpcInterfaceAttachment) *UpdateFlowOutputInput { + s.VpcInterfaceAttachment = v + return s +} + // The result of a successful UpdateFlowOutput request including the flow ARN // and the updated output. type UpdateFlowOutputOutput struct { @@ -5098,7 +7643,7 @@ type UpdateFlowOutputOutput struct { // The ARN of the flow that is associated with the updated output. FlowArn *string `locationName:"flowArn" type:"string"` - // The settings for an output. + // The new settings of the output that you updated. Output *Output `locationName:"output" type:"structure"` } @@ -5163,6 +7708,9 @@ type UpdateFlowSourceInput struct { // only to Zixi-based streams. StreamId *string `locationName:"streamId" type:"string"` + // The name of the VPC Interface to configure this Source with. + VpcInterfaceName *string `locationName:"vpcInterfaceName" type:"string"` + // The range of IP addresses that should be allowed to contribute content to // your source. These IP addresses should be in the form of a Classless Inter-Domain // Routing (CIDR) block; for example, 10.0.0.0/16. @@ -5261,6 +7809,12 @@ func (s *UpdateFlowSourceInput) SetStreamId(v string) *UpdateFlowSourceInput { return s } +// SetVpcInterfaceName sets the VpcInterfaceName field's value. +func (s *UpdateFlowSourceInput) SetVpcInterfaceName(v string) *UpdateFlowSourceInput { + s.VpcInterfaceName = &v + return s +} + // SetWhitelistCidr sets the WhitelistCidr field's value. func (s *UpdateFlowSourceInput) SetWhitelistCidr(v string) *UpdateFlowSourceInput { s.WhitelistCidr = &v @@ -5301,6 +7855,182 @@ func (s *UpdateFlowSourceOutput) SetSource(v *Source) *UpdateFlowSourceOutput { return s } +// The settings for a VPC Source. +type VpcInterface struct { + _ struct{} `type:"structure"` + + // Immutable and has to be a unique against other VpcInterfaces in this Flow + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // IDs of the network interfaces created in customer's account by MediaConnect. + // + // NetworkInterfaceIds is a required field + NetworkInterfaceIds []*string `locationName:"networkInterfaceIds" type:"list" required:"true"` + + // Role Arn MediaConnect can assumes to create ENIs in customer's account + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // Security Group IDs to be used on ENI. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list" required:"true"` + + // Subnet must be in the AZ of the Flow + // + // SubnetId is a required field + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s VpcInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcInterface) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *VpcInterface) SetName(v string) *VpcInterface { + s.Name = &v + return s +} + +// SetNetworkInterfaceIds sets the NetworkInterfaceIds field's value. +func (s *VpcInterface) SetNetworkInterfaceIds(v []*string) *VpcInterface { + s.NetworkInterfaceIds = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *VpcInterface) SetRoleArn(v string) *VpcInterface { + s.RoleArn = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VpcInterface) SetSecurityGroupIds(v []*string) *VpcInterface { + s.SecurityGroupIds = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *VpcInterface) SetSubnetId(v string) *VpcInterface { + s.SubnetId = &v + return s +} + +// The settings for attaching a VPC interface to an output. +type VpcInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // The name of the VPC interface to use for this output. + VpcInterfaceName *string `locationName:"vpcInterfaceName" type:"string"` +} + +// String returns the string representation +func (s VpcInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcInterfaceAttachment) GoString() string { + return s.String() +} + +// SetVpcInterfaceName sets the VpcInterfaceName field's value. +func (s *VpcInterfaceAttachment) SetVpcInterfaceName(v string) *VpcInterfaceAttachment { + s.VpcInterfaceName = &v + return s +} + +// Desired VPC Interface for a Flow +type VpcInterfaceRequest struct { + _ struct{} `type:"structure"` + + // The name of the VPC Interface. This value must be unique within the current + // flow. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // Role Arn MediaConnect can assumes to create ENIs in customer's account + // + // RoleArn is a required field + RoleArn *string `locationName:"roleArn" type:"string" required:"true"` + + // Security Group IDs to be used on ENI. + // + // SecurityGroupIds is a required field + SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list" required:"true"` + + // Subnet must be in the AZ of the Flow + // + // SubnetId is a required field + SubnetId *string `locationName:"subnetId" type:"string" required:"true"` +} + +// String returns the string representation +func (s VpcInterfaceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpcInterfaceRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VpcInterfaceRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VpcInterfaceRequest"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.SecurityGroupIds == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityGroupIds")) + } + if s.SubnetId == nil { + invalidParams.Add(request.NewErrParamRequired("SubnetId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *VpcInterfaceRequest) SetName(v string) *VpcInterfaceRequest { + s.Name = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *VpcInterfaceRequest) SetRoleArn(v string) *VpcInterfaceRequest { + s.RoleArn = &v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *VpcInterfaceRequest) SetSecurityGroupIds(v []*string) *VpcInterfaceRequest { + s.SecurityGroupIds = v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *VpcInterfaceRequest) SetSubnetId(v string) *VpcInterfaceRequest { + s.SubnetId = &v + return s +} + const ( // AlgorithmAes128 is a Algorithm enum value AlgorithmAes128 = "aes128" @@ -5312,6 +8042,43 @@ const ( AlgorithmAes256 = "aes256" ) +// Algorithm_Values returns all elements of the Algorithm enum +func Algorithm_Values() []string { + return []string{ + AlgorithmAes128, + AlgorithmAes192, + AlgorithmAes256, + } +} + +const ( + // DurationUnitsMonths is a DurationUnits enum value + DurationUnitsMonths = "MONTHS" +) + +// DurationUnits_Values returns all elements of the DurationUnits enum +func DurationUnits_Values() []string { + return []string{ + DurationUnitsMonths, + } +} + +const ( + // EntitlementStatusEnabled is a EntitlementStatus enum value + EntitlementStatusEnabled = "ENABLED" + + // EntitlementStatusDisabled is a EntitlementStatus enum value + EntitlementStatusDisabled = "DISABLED" +) + +// EntitlementStatus_Values returns all elements of the EntitlementStatus enum +func EntitlementStatus_Values() []string { + return []string{ + EntitlementStatusEnabled, + EntitlementStatusDisabled, + } +} + const ( // KeyTypeSpeke is a KeyType enum value KeyTypeSpeke = "speke" @@ -5320,6 +8087,26 @@ const ( KeyTypeStaticKey = "static-key" ) +// KeyType_Values returns all elements of the KeyType enum +func KeyType_Values() []string { + return []string{ + KeyTypeSpeke, + KeyTypeStaticKey, + } +} + +const ( + // PriceUnitsHourly is a PriceUnits enum value + PriceUnitsHourly = "HOURLY" +) + +// PriceUnits_Values returns all elements of the PriceUnits enum +func PriceUnits_Values() []string { + return []string{ + PriceUnitsHourly, + } +} + const ( // ProtocolZixiPush is a Protocol enum value ProtocolZixiPush = "zixi-push" @@ -5337,6 +8124,53 @@ const ( ProtocolRist = "rist" ) +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolZixiPush, + ProtocolRtpFec, + ProtocolRtp, + ProtocolZixiPull, + ProtocolRist, + } +} + +const ( + // ReservationStateActive is a ReservationState enum value + ReservationStateActive = "ACTIVE" + + // ReservationStateExpired is a ReservationState enum value + ReservationStateExpired = "EXPIRED" + + // ReservationStateProcessing is a ReservationState enum value + ReservationStateProcessing = "PROCESSING" + + // ReservationStateCanceled is a ReservationState enum value + ReservationStateCanceled = "CANCELED" +) + +// ReservationState_Values returns all elements of the ReservationState enum +func ReservationState_Values() []string { + return []string{ + ReservationStateActive, + ReservationStateExpired, + ReservationStateProcessing, + ReservationStateCanceled, + } +} + +const ( + // ResourceTypeMbpsOutboundBandwidth is a ResourceType enum value + ResourceTypeMbpsOutboundBandwidth = "Mbps_Outbound_Bandwidth" +) + +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeMbpsOutboundBandwidth, + } +} + const ( // SourceTypeOwned is a SourceType enum value SourceTypeOwned = "OWNED" @@ -5345,6 +8179,30 @@ const ( SourceTypeEntitled = "ENTITLED" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeOwned, + SourceTypeEntitled, + } +} + +const ( + // StateEnabled is a State enum value + StateEnabled = "ENABLED" + + // StateDisabled is a State enum value + StateDisabled = "DISABLED" +) + +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StateEnabled, + StateDisabled, + } +} + const ( // StatusStandby is a Status enum value StatusStandby = "STANDBY" @@ -5367,3 +8225,16 @@ const ( // StatusError is a Status enum value StatusError = "ERROR" ) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusStandby, + StatusActive, + StatusUpdating, + StatusDeleting, + StatusStarting, + StatusStopping, + StatusError, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go index e4676ef36..849797382 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go index a4f1ea306..a84f36d30 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/api.go @@ -2785,9 +2785,7 @@ func (s *Ac3Settings) SetSampleRate(v int64) *Ac3Settings { } // Accelerated transcoding can significantly speed up jobs with long, visually -// complex content. Outputs that use this feature incur pro-tier pricing. For -// information about feature limitations, see the AWS Elemental MediaConvert -// User Guide. +// complex content. type AccelerationSettings struct { _ struct{} `type:"structure"` @@ -3010,13 +3008,47 @@ func (s AssociateCertificateOutput) GoString() string { return s.String() } +// When you mimic a multi-channel audio layout with multiple mono-channel tracks, +// you can tag each channel layout manually. For example, you would tag the +// tracks that contain your left, right, and center audio with Left (L), Right +// (R), and Center (C), respectively. When you don't specify a value, MediaConvert +// labels your track as Center (C) by default. To use audio layout tagging, +// your output must be in a QuickTime (.mov) container; your audio codec must +// be AAC, WAV, or AIFF; and you must set up your audio track to have only one +// channel. +type AudioChannelTaggingSettings struct { + _ struct{} `type:"structure"` + + // You can add a tag for this mono-channel audio track to mimic its placement + // in a multi-channel layout. For example, if this track is the left surround + // channel, choose Left surround (LS). + ChannelTag *string `locationName:"channelTag" type:"string" enum:"AudioChannelTag"` +} + +// String returns the string representation +func (s AudioChannelTaggingSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioChannelTaggingSettings) GoString() string { + return s.String() +} + +// SetChannelTag sets the ChannelTag field's value. +func (s *AudioChannelTaggingSettings) SetChannelTag(v string) *AudioChannelTaggingSettings { + s.ChannelTag = &v + return s +} + // Audio codec settings (CodecSettings) under (AudioDescriptions) contains the // group of settings related to audio encoding. The settings in this group vary // depending on the value that you choose for Audio codec (Codec). For each // codec enum that you choose, define the corresponding settings object. The // following lists the codec enum, settings object pairs. * AAC, AacSettings // * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings -// * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings +// * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * +// VORBIS, VorbisSettings * OPUS, OpusSettings type AudioCodecSettings struct { _ struct{} `type:"structure"` @@ -3056,6 +3088,14 @@ type AudioCodecSettings struct { // value MP3. Mp3Settings *Mp3Settings `locationName:"mp3Settings" type:"structure"` + // Required when you set Codec, under AudioDescriptions>CodecSettings, to the + // value OPUS. + OpusSettings *OpusSettings `locationName:"opusSettings" type:"structure"` + + // Required when you set Codec, under AudioDescriptions>CodecSettings, to the + // value Vorbis. + VorbisSettings *VorbisSettings `locationName:"vorbisSettings" type:"structure"` + // Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to // the value WAV. WavSettings *WavSettings `locationName:"wavSettings" type:"structure"` @@ -3109,6 +3149,16 @@ func (s *AudioCodecSettings) Validate() error { invalidParams.AddNested("Mp3Settings", err.(request.ErrInvalidParams)) } } + if s.OpusSettings != nil { + if err := s.OpusSettings.Validate(); err != nil { + invalidParams.AddNested("OpusSettings", err.(request.ErrInvalidParams)) + } + } + if s.VorbisSettings != nil { + if err := s.VorbisSettings.Validate(); err != nil { + invalidParams.AddNested("VorbisSettings", err.(request.ErrInvalidParams)) + } + } if s.WavSettings != nil { if err := s.WavSettings.Validate(); err != nil { invalidParams.AddNested("WavSettings", err.(request.ErrInvalidParams)) @@ -3169,6 +3219,18 @@ func (s *AudioCodecSettings) SetMp3Settings(v *Mp3Settings) *AudioCodecSettings return s } +// SetOpusSettings sets the OpusSettings field's value. +func (s *AudioCodecSettings) SetOpusSettings(v *OpusSettings) *AudioCodecSettings { + s.OpusSettings = v + return s +} + +// SetVorbisSettings sets the VorbisSettings field's value. +func (s *AudioCodecSettings) SetVorbisSettings(v *VorbisSettings) *AudioCodecSettings { + s.VorbisSettings = v + return s +} + // SetWavSettings sets the WavSettings field's value. func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings { s.WavSettings = v @@ -3179,6 +3241,16 @@ func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings type AudioDescription struct { _ struct{} `type:"structure"` + // When you mimic a multi-channel audio layout with multiple mono-channel tracks, + // you can tag each channel layout manually. For example, you would tag the + // tracks that contain your left, right, and center audio with Left (L), Right + // (R), and Center (C), respectively. When you don't specify a value, MediaConvert + // labels your track as Center (C) by default. To use audio layout tagging, + // your output must be in a QuickTime (.mov) container; your audio codec must + // be AAC, WAV, or AIFF; and you must set up your audio track to have only one + // channel. + AudioChannelTaggingSettings *AudioChannelTaggingSettings `locationName:"audioChannelTaggingSettings" type:"structure"` + // Advanced audio normalization settings. Ignore these settings unless you need // to comply with a loudness standard. AudioNormalizationSettings *AudioNormalizationSettings `locationName:"audioNormalizationSettings" type:"structure"` @@ -3214,7 +3286,8 @@ type AudioDescription struct { // codec enum that you choose, define the corresponding settings object. The // following lists the codec enum, settings object pairs. * AAC, AacSettings // * MP2, Mp2Settings * MP3, Mp3Settings * WAV, WavSettings * AIFF, AiffSettings - // * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings + // * AC3, Ac3Settings * EAC3, Eac3Settings * EAC3_ATMOS, Eac3AtmosSettings * + // VORBIS, VorbisSettings * OPUS, OpusSettings CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` // Specify the language for this audio output track. The service puts this language @@ -3287,6 +3360,12 @@ func (s *AudioDescription) Validate() error { return nil } +// SetAudioChannelTaggingSettings sets the AudioChannelTaggingSettings field's value. +func (s *AudioDescription) SetAudioChannelTaggingSettings(v *AudioChannelTaggingSettings) *AudioDescription { + s.AudioChannelTaggingSettings = v + return s +} + // SetAudioNormalizationSettings sets the AudioNormalizationSettings field's value. func (s *AudioDescription) SetAudioNormalizationSettings(v *AudioNormalizationSettings) *AudioDescription { s.AudioNormalizationSettings = v @@ -3619,30 +3698,47 @@ func (s *AudioSelectorGroup) SetAudioSelectorNames(v []*string) *AudioSelectorGr return s } -// Settings for Avail Blanking -type AvailBlanking struct { +// Settings for quality-defined variable bitrate encoding with the AV1 codec. +// Required when you set Rate control mode to QVBR. Not valid when you set Rate +// control mode to a value other than QVBR, or when you don't define Rate control +// mode. +type Av1QvbrSettings struct { _ struct{} `type:"structure"` - // Blanking image to be used. Leave empty for solid black. Only bmp and png - // images are supported. - AvailBlankingImage *string `locationName:"availBlankingImage" min:"14" type:"string"` + // Required when you use QVBR rate control mode. That is, when you specify qvbrSettings + // within av1Settings. Specify the general target quality level for this output, + // from 1 to 10. Use higher numbers for greater quality. Level 10 results in + // nearly lossless compression. The quality level for most broadcast-quality + // transcodes is between 6 and 9. Optionally, to specify a value between whole + // numbers, also provide a value for the setting qvbrQualityLevelFineTune. For + // example, if you want your QVBR quality level to be 7.33, set qvbrQualityLevel + // to 7 and set qvbrQualityLevelFineTune to .33. + QvbrQualityLevel *int64 `locationName:"qvbrQualityLevel" min:"1" type:"integer"` + + // Optional. Specify a value here to set the QVBR quality to a level that is + // between whole numbers. For example, if you want your QVBR quality level to + // be 7.33, set qvbrQualityLevel to 7 and set qvbrQualityLevelFineTune to .33. + // MediaConvert rounds your QVBR quality level to the nearest third of a whole + // number. For example, if you set qvbrQualityLevel to 7 and you set qvbrQualityLevelFineTune + // to .25, your actual QVBR quality level is 7.33. + QvbrQualityLevelFineTune *float64 `locationName:"qvbrQualityLevelFineTune" type:"double"` } // String returns the string representation -func (s AvailBlanking) String() string { +func (s Av1QvbrSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AvailBlanking) GoString() string { +func (s Av1QvbrSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AvailBlanking) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AvailBlanking"} - if s.AvailBlankingImage != nil && len(*s.AvailBlankingImage) < 14 { - invalidParams.Add(request.NewErrParamMinLen("AvailBlankingImage", 14)) +func (s *Av1QvbrSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Av1QvbrSettings"} + if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { + invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1)) } if invalidParams.Len() > 0 { @@ -3651,120 +3747,534 @@ func (s *AvailBlanking) Validate() error { return nil } -// SetAvailBlankingImage sets the AvailBlankingImage field's value. -func (s *AvailBlanking) SetAvailBlankingImage(v string) *AvailBlanking { - s.AvailBlankingImage = &v +// SetQvbrQualityLevel sets the QvbrQualityLevel field's value. +func (s *Av1QvbrSettings) SetQvbrQualityLevel(v int64) *Av1QvbrSettings { + s.QvbrQualityLevel = &v return s } -type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetQvbrQualityLevelFineTune sets the QvbrQualityLevelFineTune field's value. +func (s *Av1QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *Av1QvbrSettings { + s.QvbrQualityLevelFineTune = &v + return s +} - Message_ *string `locationName:"message" type:"string"` +// Required when you set Codec, under VideoDescription>CodecSettings to the +// value AV1. +type Av1Settings struct { + _ struct{} `type:"structure"` + + // Specify the strength of any adaptive quantization filters that you enable. + // The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization). + AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Av1AdaptiveQuantization"` + + // If you are using the console, use the Framerate setting to specify the frame + // rate for this output. If you want to keep the same frame rate as the input + // video, choose Follow source. If you want to do frame rate conversion, choose + // a frame rate from the dropdown list or choose Custom. The framerates shown + // in the dropdown list are decimal approximations of fractions. If you choose + // Custom, specify your frame rate as a fraction. If you are creating your transcoding + // job specification as a JSON file without the console, use FramerateControl + // to specify which value the service uses for the frame rate for this output. + // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate + // from the input. Choose SPECIFIED if you want the service to use the frame + // rate you specify in the settings FramerateNumerator and FramerateDenominator. + FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Av1FramerateControl"` + + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. + FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Av1FramerateConversionAlgorithm"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` + + // Specify the GOP length (keyframe interval) in frames. With AV1, MediaConvert + // doesn't support GOP length in seconds. This value must be greater than zero + // and preferably equal to 1 + ((numberBFrames + 1) * x), where x is an integer + // value. + GopSize *float64 `locationName:"gopSize" type:"double"` + + // Maximum bitrate in bits/second. For example, enter five megabits per second + // as 5000000. Required when Rate control mode is QVBR. + MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` + + // Specify the number of B-frames. With AV1, MediaConvert supports only 7 or + // 15. + NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" min:"7" type:"integer"` + + // Settings for quality-defined variable bitrate encoding with the AV1 codec. + // Required when you set Rate control mode to QVBR. Not valid when you set Rate + // control mode to a value other than QVBR, or when you don't define Rate control + // mode. + QvbrSettings *Av1QvbrSettings `locationName:"qvbrSettings" type:"structure"` + + // 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined + // variable bitrate (QVBR). You can''t use CBR or VBR.' + RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Av1RateControlMode"` + + // Specify the number of slices per picture. This value must be 1, 2, 4, 8, + // 16, or 32. For progressive pictures, this value must be less than or equal + // to the number of macroblock rows. For interlaced pictures, this value must + // be less than or equal to half the number of macroblock rows. + Slices *int64 `locationName:"slices" min:"1" type:"integer"` + + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on spatial variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas that can sustain more + // distortion with no noticeable visual degradation and uses more bits on areas + // where any small distortion will be noticeable. For example, complex textured + // blocks are encoded with fewer bits and smooth textured blocks are encoded + // with more bits. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where + // the viewer's attention is likely to be. If viewers are likely to be focusing + // their attention on a part of the screen with a lot of complex texture, you + // might choose to disable this feature. Related setting: When you enable spatial + // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) + // depending on your content. For homogeneous content, such as cartoons and + // video games, set it to Low. For content with a wider variety of textures, + // set it to High or Higher. + SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Av1SpatialAdaptiveQuantization"` } // String returns the string representation -func (s BadRequestException) String() string { +func (s Av1Settings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BadRequestException) GoString() string { +func (s Av1Settings) GoString() string { return s.String() } -func newErrorBadRequestException(v protocol.ResponseMetadata) error { - return &BadRequestException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *Av1Settings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Av1Settings"} + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } + if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { + invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) + } + if s.NumberBFramesBetweenReferenceFrames != nil && *s.NumberBFramesBetweenReferenceFrames < 7 { + invalidParams.Add(request.NewErrParamMinValue("NumberBFramesBetweenReferenceFrames", 7)) + } + if s.Slices != nil && *s.Slices < 1 { + invalidParams.Add(request.NewErrParamMinValue("Slices", 1)) + } + if s.QvbrSettings != nil { + if err := s.QvbrSettings.Validate(); err != nil { + invalidParams.AddNested("QvbrSettings", err.(request.ErrInvalidParams)) + } } -} -// Code returns the exception type name. -func (s BadRequestException) Code() string { - return "BadRequestException" + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Message returns the exception's message. -func (s BadRequestException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. +func (s *Av1Settings) SetAdaptiveQuantization(v string) *Av1Settings { + s.AdaptiveQuantization = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { - return nil +// SetFramerateControl sets the FramerateControl field's value. +func (s *Av1Settings) SetFramerateControl(v string) *Av1Settings { + s.FramerateControl = &v + return s } -func (s BadRequestException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. +func (s *Av1Settings) SetFramerateConversionAlgorithm(v string) *Av1Settings { + s.FramerateConversionAlgorithm = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *Av1Settings) SetFramerateDenominator(v int64) *Av1Settings { + s.FramerateDenominator = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *Av1Settings) SetFramerateNumerator(v int64) *Av1Settings { + s.FramerateNumerator = &v + return s } -// Burn-In Destination Settings. -type BurninDestinationSettings struct { - _ struct{} `type:"structure"` +// SetGopSize sets the GopSize field's value. +func (s *Av1Settings) SetGopSize(v float64) *Av1Settings { + s.GopSize = &v + return s +} - // If no explicit x_position or y_position is provided, setting alignment to - // centered will place the captions at the bottom center of the output. Similarly, - // setting a left alignment will align captions to the bottom left of the output. - // If x and y positions are given in conjunction with the alignment parameter, - // the font will be justified (either left or centered) relative to those coordinates. - // This option is not valid for source captions that are STL, 608/embedded or - // teletext. These source settings are already pre-defined by the caption stream. - // All burn-in and DVB-Sub font settings must match. - Alignment *string `locationName:"alignment" type:"string" enum:"BurninSubtitleAlignment"` +// SetMaxBitrate sets the MaxBitrate field's value. +func (s *Av1Settings) SetMaxBitrate(v int64) *Av1Settings { + s.MaxBitrate = &v + return s +} - // Specifies the color of the rectangle behind the captions.All burn-in and - // DVB-Sub font settings must match. - BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurninSubtitleBackgroundColor"` +// SetNumberBFramesBetweenReferenceFrames sets the NumberBFramesBetweenReferenceFrames field's value. +func (s *Av1Settings) SetNumberBFramesBetweenReferenceFrames(v int64) *Av1Settings { + s.NumberBFramesBetweenReferenceFrames = &v + return s +} - // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. - // Leaving this parameter blank is equivalent to setting it to 0 (transparent). - // All burn-in and DVB-Sub font settings must match. - BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` +// SetQvbrSettings sets the QvbrSettings field's value. +func (s *Av1Settings) SetQvbrSettings(v *Av1QvbrSettings) *Av1Settings { + s.QvbrSettings = v + return s +} - // Specifies the color of the burned-in captions. This option is not valid for - // source captions that are STL, 608/embedded or teletext. These source settings - // are already pre-defined by the caption stream. All burn-in and DVB-Sub font - // settings must match. - FontColor *string `locationName:"fontColor" type:"string" enum:"BurninSubtitleFontColor"` +// SetRateControlMode sets the RateControlMode field's value. +func (s *Av1Settings) SetRateControlMode(v string) *Av1Settings { + s.RateControlMode = &v + return s +} - // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All - // burn-in and DVB-Sub font settings must match. - FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` +// SetSlices sets the Slices field's value. +func (s *Av1Settings) SetSlices(v int64) *Av1Settings { + s.Slices = &v + return s +} - // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and - // DVB-Sub font settings must match. - FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` +// SetSpatialAdaptiveQuantization sets the SpatialAdaptiveQuantization field's value. +func (s *Av1Settings) SetSpatialAdaptiveQuantization(v string) *Av1Settings { + s.SpatialAdaptiveQuantization = &v + return s +} - // Provide the font script, using an ISO 15924 script code, if the LanguageCode - // is not sufficient for determining the script type. Where LanguageCode or - // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is - // used to help determine the appropriate font for rendering burn-in captions. - FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"` +// Settings for Avail Blanking +type AvailBlanking struct { + _ struct{} `type:"structure"` - // A positive integer indicates the exact font size in points. Set to 0 for - // automatic font size selection. All burn-in and DVB-Sub font settings must - // match. - FontSize *int64 `locationName:"fontSize" type:"integer"` + // Blanking image to be used. Leave empty for solid black. Only bmp and png + // images are supported. + AvailBlankingImage *string `locationName:"availBlankingImage" min:"14" type:"string"` +} - // Specifies font outline color. This option is not valid for source captions - // that are either 608/embedded or teletext. These source settings are already - // pre-defined by the caption stream. All burn-in and DVB-Sub font settings - // must match. - OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurninSubtitleOutlineColor"` +// String returns the string representation +func (s AvailBlanking) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvailBlanking) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AvailBlanking) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AvailBlanking"} + if s.AvailBlankingImage != nil && len(*s.AvailBlankingImage) < 14 { + invalidParams.Add(request.NewErrParamMinLen("AvailBlankingImage", 14)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvailBlankingImage sets the AvailBlankingImage field's value. +func (s *AvailBlanking) SetAvailBlankingImage(v string) *AvailBlanking { + s.AvailBlankingImage = &v + return s +} + +// Required when you set your output video codec to AVC-Intra. For more information +// about the AVC-I settings, see the relevant specification. For detailed information +// about SD and HD in AVC-I, see https://ieeexplore.ieee.org/document/7290936. +type AvcIntraSettings struct { + _ struct{} `type:"structure"` + + // Specify the AVC-Intra class of your output. The AVC-Intra class selection + // determines the output video bit rate depending on the frame rate of the output. + // Outputs with higher class values have higher bitrates and improved image + // quality. + AvcIntraClass *string `locationName:"avcIntraClass" type:"string" enum:"AvcIntraClass"` + + // If you are using the console, use the Framerate setting to specify the frame + // rate for this output. If you want to keep the same frame rate as the input + // video, choose Follow source. If you want to do frame rate conversion, choose + // a frame rate from the dropdown list or choose Custom. The framerates shown + // in the dropdown list are decimal approximations of fractions. If you choose + // Custom, specify your frame rate as a fraction. If you are creating your transcoding + // job specification as a JSON file without the console, use FramerateControl + // to specify which value the service uses for the frame rate for this output. + // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate + // from the input. Choose SPECIFIED if you want the service to use the frame + // rate you specify in the settings FramerateNumerator and FramerateDenominator. + FramerateControl *string `locationName:"framerateControl" type:"string" enum:"AvcIntraFramerateControl"` + + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. + FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"AvcIntraFramerateConversionAlgorithm"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` + + // Choose the scan line type for the output. Keep the default value, Progressive + // (PROGRESSIVE) to create a progressive output, regardless of the scan type + // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) + // to create an output that's interlaced with the same field polarity throughout. + // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) + // to produce outputs with the same field polarity as the source. For jobs that + // have multiple inputs, the output field polarity might change over the course + // of the output. Follow behavior depends on the input scan type. If the source + // is interlaced, the output will be interlaced with the same polarity as the + // source. If the source is progressive, the output will be interlaced with + // top field bottom field first, depending on which of the Follow options you + // choose. + InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"AvcIntraInterlaceMode"` + + // Ignore this setting unless your input frame rate is 23.976 or 24 frames per + // second (fps). Enable slow PAL to create a 25 fps output. When you enable + // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples + // your audio to keep it synchronized with the video. Note that enabling this + // setting will slightly reduce the duration of your video. Required settings: + // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) + // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to + // 1. + SlowPal *string `locationName:"slowPal" type:"string" enum:"AvcIntraSlowPal"` + + // When you do frame rate conversion from 23.976 frames per second (fps) to + // 29.97 fps, and your output scan type is interlaced, you can optionally enable + // hard telecine (HARD) to create a smoother picture. When you keep the default + // value, None (NONE), MediaConvert does a standard frame rate conversion to + // 29.97 without doing anything with the field polarity to create a smoother + // picture. + Telecine *string `locationName:"telecine" type:"string" enum:"AvcIntraTelecine"` +} + +// String returns the string representation +func (s AvcIntraSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AvcIntraSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AvcIntraSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AvcIntraSettings"} + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAvcIntraClass sets the AvcIntraClass field's value. +func (s *AvcIntraSettings) SetAvcIntraClass(v string) *AvcIntraSettings { + s.AvcIntraClass = &v + return s +} + +// SetFramerateControl sets the FramerateControl field's value. +func (s *AvcIntraSettings) SetFramerateControl(v string) *AvcIntraSettings { + s.FramerateControl = &v + return s +} + +// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. +func (s *AvcIntraSettings) SetFramerateConversionAlgorithm(v string) *AvcIntraSettings { + s.FramerateConversionAlgorithm = &v + return s +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *AvcIntraSettings) SetFramerateDenominator(v int64) *AvcIntraSettings { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *AvcIntraSettings) SetFramerateNumerator(v int64) *AvcIntraSettings { + s.FramerateNumerator = &v + return s +} + +// SetInterlaceMode sets the InterlaceMode field's value. +func (s *AvcIntraSettings) SetInterlaceMode(v string) *AvcIntraSettings { + s.InterlaceMode = &v + return s +} + +// SetSlowPal sets the SlowPal field's value. +func (s *AvcIntraSettings) SetSlowPal(v string) *AvcIntraSettings { + s.SlowPal = &v + return s +} + +// SetTelecine sets the Telecine field's value. +func (s *AvcIntraSettings) SetTelecine(v string) *AvcIntraSettings { + s.Telecine = &v + return s +} + +type BadRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BadRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BadRequestException) GoString() string { + return s.String() +} + +func newErrorBadRequestException(v protocol.ResponseMetadata) error { + return &BadRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *BadRequestException) Code() string { + return "BadRequestException" +} + +// Message returns the exception's message. +func (s *BadRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *BadRequestException) OrigErr() error { + return nil +} + +func (s *BadRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Burn-In Destination Settings. +type BurninDestinationSettings struct { + _ struct{} `type:"structure"` + + // If no explicit x_position or y_position is provided, setting alignment to + // centered will place the captions at the bottom center of the output. Similarly, + // setting a left alignment will align captions to the bottom left of the output. + // If x and y positions are given in conjunction with the alignment parameter, + // the font will be justified (either left or centered) relative to those coordinates. + // This option is not valid for source captions that are STL, 608/embedded or + // teletext. These source settings are already pre-defined by the caption stream. + // All burn-in and DVB-Sub font settings must match. + Alignment *string `locationName:"alignment" type:"string" enum:"BurninSubtitleAlignment"` + + // Specifies the color of the rectangle behind the captions.All burn-in and + // DVB-Sub font settings must match. + BackgroundColor *string `locationName:"backgroundColor" type:"string" enum:"BurninSubtitleBackgroundColor"` + + // Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. + // Leaving this parameter blank is equivalent to setting it to 0 (transparent). + // All burn-in and DVB-Sub font settings must match. + BackgroundOpacity *int64 `locationName:"backgroundOpacity" type:"integer"` + + // Specifies the color of the burned-in captions. This option is not valid for + // source captions that are STL, 608/embedded or teletext. These source settings + // are already pre-defined by the caption stream. All burn-in and DVB-Sub font + // settings must match. + FontColor *string `locationName:"fontColor" type:"string" enum:"BurninSubtitleFontColor"` + + // Specifies the opacity of the burned-in captions. 255 is opaque; 0 is transparent.All + // burn-in and DVB-Sub font settings must match. + FontOpacity *int64 `locationName:"fontOpacity" type:"integer"` + + // Font resolution in DPI (dots per inch); default is 96 dpi.All burn-in and + // DVB-Sub font settings must match. + FontResolution *int64 `locationName:"fontResolution" min:"96" type:"integer"` + + // Provide the font script, using an ISO 15924 script code, if the LanguageCode + // is not sufficient for determining the script type. Where LanguageCode or + // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. This is + // used to help determine the appropriate font for rendering burn-in captions. + FontScript *string `locationName:"fontScript" type:"string" enum:"FontScript"` + + // A positive integer indicates the exact font size in points. Set to 0 for + // automatic font size selection. All burn-in and DVB-Sub font settings must + // match. + FontSize *int64 `locationName:"fontSize" type:"integer"` + + // Specifies font outline color. This option is not valid for source captions + // that are either 608/embedded or teletext. These source settings are already + // pre-defined by the caption stream. All burn-in and DVB-Sub font settings + // must match. + OutlineColor *string `locationName:"outlineColor" type:"string" enum:"BurninSubtitleOutlineColor"` // Specifies font outline size in pixels. This option is not valid for source // captions that are either 608/embedded or teletext. These source settings @@ -4384,6 +4894,65 @@ func (s *CaptionSelector) SetSourceSettings(v *CaptionSourceSettings) *CaptionSe return s } +// Ignore this setting unless your input captions format is SCC. To have the +// service compensate for differing frame rates between your input captions +// and input video, specify the frame rate of the captions file. Specify this +// value as a fraction, using the settings Framerate numerator (framerateNumerator) +// and Framerate denominator (framerateDenominator). For example, you might +// specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, +// or 30000 / 1001 for 29.97 fps. +type CaptionSourceFramerate struct { + _ struct{} `type:"structure"` + + // Specify the denominator of the fraction that represents the frame rate for + // the setting Caption source frame rate (CaptionSourceFramerate). Use this + // setting along with the setting Framerate numerator (framerateNumerator). + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` + + // Specify the numerator of the fraction that represents the frame rate for + // the setting Caption source frame rate (CaptionSourceFramerate). Use this + // setting along with the setting Framerate denominator (framerateDenominator). + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` +} + +// String returns the string representation +func (s CaptionSourceFramerate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CaptionSourceFramerate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CaptionSourceFramerate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CaptionSourceFramerate"} + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *CaptionSourceFramerate) SetFramerateDenominator(v int64) *CaptionSourceFramerate { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *CaptionSourceFramerate) SetFramerateNumerator(v int64) *CaptionSourceFramerate { + s.FramerateNumerator = &v + return s +} + // If your input captions are SCC, TTML, STL, SMI, SRT, or IMSC in an xml file, // specify the URI of the input captions source file. If your input captions // are IMSC in an IMF package, use TrackSourceSettings instead of FileSoureSettings. @@ -4998,10 +5567,11 @@ type ColorCorrector struct { Brightness *int64 `locationName:"brightness" min:"1" type:"integer"` // Specify the color space you want for this output. The service supports conversion - // between HDR formats, between SDR formats, and from SDR to HDR. The service - // doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't - // upgrade the dynamic range. The converted video has an HDR format, but visually - // appears the same as an unconverted output. + // between HDR formats, between SDR formats, from SDR to HDR, and from HDR to + // SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted + // video has an HDR format, but visually appears the same as an unconverted + // output. HDR to SDR conversion uses Elemental tone mapping technology to approximate + // the outcome of manually regrading from HDR to SDR. ColorSpaceConversion *string `locationName:"colorSpaceConversion" type:"string" enum:"ColorSpaceConversion"` // Contrast level. @@ -5097,8 +5667,8 @@ func (s *ColorCorrector) SetSaturation(v int64) *ColorCorrector { } type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5115,17 +5685,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5133,22 +5703,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Container specific settings. @@ -5189,6 +5759,9 @@ type ContainerSettings struct { // Settings for MP4 segments in DASH MpdSettings *MpdSettings `locationName:"mpdSettings" type:"structure"` + + // MXF settings + MxfSettings *MxfSettings `locationName:"mxfSettings" type:"structure"` } // String returns the string representation @@ -5269,14 +5842,20 @@ func (s *ContainerSettings) SetMpdSettings(v *MpdSettings) *ContainerSettings { return s } +// SetMxfSettings sets the MxfSettings field's value. +func (s *ContainerSettings) SetMxfSettings(v *MxfSettings) *ContainerSettings { + s.MxfSettings = v + return s +} + // Send your create job request with your job settings and IAM role. Optionally, // include user metadata and the ARN for the queue. type CreateJobInput struct { _ struct{} `type:"structure"` - // Accelerated transcoding can significantly speed up jobs with long, visually - // complex content. Outputs that use this feature incur pro-tier pricing. For - // information about feature limitations, see the AWS Elemental MediaConvert + // Optional. Accelerated transcoding can significantly speed up jobs with long, + // visually complex content. Outputs that use this feature incur pro-tier pricing. + // For information about feature limitations, see the AWS Elemental MediaConvert // User Guide. AccelerationSettings *AccelerationSettings `locationName:"accelerationSettings" type:"structure"` @@ -5287,27 +5866,34 @@ type CreateJobInput struct { // for this field, your job outputs will appear on the billing report unsorted. BillingTagsSource *string `locationName:"billingTagsSource" type:"string" enum:"BillingTagsSource"` - // Idempotency token for CreateJob operation. + // Optional. Idempotency token for CreateJob operation. ClientRequestToken *string `locationName:"clientRequestToken" type:"string" idempotencyToken:"true"` - // When you create a job, you can either specify a job template or specify the - // transcoding settings individually + // Optional. Use queue hopping to avoid overly long waits in the backlog of + // the queue that you submit your job to. Specify an alternate queue and the + // maximum time that your job will wait in the initial queue before hopping. + // For more information about this feature, see the AWS Elemental MediaConvert + // User Guide. + HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` + + // Optional. When you create a job, you can either specify a job template or + // specify the transcoding settings individually. JobTemplate *string `locationName:"jobTemplate" type:"string"` - // Specify the relative priority for this job. In any given queue, the service - // begins processing the job with the highest value first. When more than one - // job has the same priority, the service begins processing the job that you - // submitted first. If you don't specify a priority, the service uses the default - // value 0. + // Optional. Specify the relative priority for this job. In any given queue, + // the service begins processing the job with the highest value first. When + // more than one job has the same priority, the service begins processing the + // job that you submitted first. If you don't specify a priority, the service + // uses the default value 0. Priority *int64 `locationName:"priority" type:"integer"` // Optional. When you create a job, you can specify a queue to send it to. If // you don't specify, the job will go to the default queue. For more about queues, - // see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html. + // see the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html. Queue *string `locationName:"queue" type:"string"` // Required. The IAM role you use for creating this job. For details about permissions, - // see the User Guide topic at the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html. + // see the User Guide topic at the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html. // // Role is a required field Role *string `locationName:"role" type:"string" required:"true"` @@ -5317,24 +5903,25 @@ type CreateJobInput struct { // Settings is a required field Settings *JobSettings `locationName:"settings" type:"structure" required:"true"` - // Enable this setting when you run a test job to estimate how many reserved - // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs - // your job from an on-demand queue with similar performance to what you will - // see with one RTS in a reserved queue. This setting is disabled by default. + // Optional. Enable this setting when you run a test job to estimate how many + // reserved transcoding slots (RTS) you need. When this is enabled, MediaConvert + // runs your job from an on-demand queue with similar performance to what you + // will see with one RTS in a reserved queue. This setting is disabled by default. SimulateReservedQueue *string `locationName:"simulateReservedQueue" type:"string" enum:"SimulateReservedQueue"` - // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch - // Events. Set the interval, in seconds, between status updates. MediaConvert - // sends an update at this interval from the time the service begins processing - // your job to the time it completes the transcode or encounters an error. + // Optional. Specify how often MediaConvert sends STATUS_UPDATE events to Amazon + // CloudWatch Events. Set the interval, in seconds, between status updates. + // MediaConvert sends an update at this interval from the time the service begins + // processing your job to the time it completes the transcode or encounters + // an error. StatusUpdateInterval *string `locationName:"statusUpdateInterval" type:"string" enum:"StatusUpdateInterval"` - // The tags that you want to add to the resource. You can tag resources with - // a key-value pair or with only a key. + // Optional. The tags that you want to add to the resource. You can tag resources + // with a key-value pair or with only a key. Tags map[string]*string `locationName:"tags" type:"map"` - // User-defined metadata that you want to associate with an MediaConvert job. - // You specify metadata in key/value pairs. + // Optional. User-defined metadata that you want to associate with an MediaConvert + // job. You specify metadata in key/value pairs. UserMetadata map[string]*string `locationName:"userMetadata" type:"map"` } @@ -5365,6 +5952,16 @@ func (s *CreateJobInput) Validate() error { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) } } + if s.HopDestinations != nil { + for i, v := range s.HopDestinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams)) + } + } + } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) @@ -5395,6 +5992,12 @@ func (s *CreateJobInput) SetClientRequestToken(v string) *CreateJobInput { return s } +// SetHopDestinations sets the HopDestinations field's value. +func (s *CreateJobInput) SetHopDestinations(v []*HopDestination) *CreateJobInput { + s.HopDestinations = v + return s +} + // SetJobTemplate sets the JobTemplate field's value. func (s *CreateJobInput) SetJobTemplate(v string) *CreateJobInput { s.JobTemplate = &v @@ -5454,7 +6057,7 @@ type CreateJobOutput struct { _ struct{} `type:"structure"` // Each job converts an input file into an output file or files. For more information, - // see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html + // see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Job *Job `locationName:"job" type:"structure"` } @@ -5492,6 +6095,13 @@ type CreateJobTemplateInput struct { // Optional. A description of the job template you are creating. Description *string `locationName:"description" type:"string"` + // Optional. Use queue hopping to avoid overly long waits in the backlog of + // the queue that you submit your job to. Specify an alternate queue and the + // maximum time that your job will wait in the initial queue before hopping. + // For more information about this feature, see the AWS Elemental MediaConvert + // User Guide. + HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` + // The name of the job template you are creating. // // Name is a required field @@ -5552,6 +6162,16 @@ func (s *CreateJobTemplateInput) Validate() error { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) } } + if s.HopDestinations != nil { + for i, v := range s.HopDestinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams)) + } + } + } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) @@ -5582,6 +6202,12 @@ func (s *CreateJobTemplateInput) SetDescription(v string) *CreateJobTemplateInpu return s } +// SetHopDestinations sets the HopDestinations field's value. +func (s *CreateJobTemplateInput) SetHopDestinations(v []*HopDestination) *CreateJobTemplateInput { + s.HopDestinations = v + return s +} + // SetName sets the Name field's value. func (s *CreateJobTemplateInput) SetName(v string) *CreateJobTemplateInput { s.Name = &v @@ -7911,6 +8537,15 @@ type FileSourceSettings struct { // 608 data into 708. Convert608To708 *string `locationName:"convert608To708" type:"string" enum:"FileSourceConvert608To708"` + // Ignore this setting unless your input captions format is SCC. To have the + // service compensate for differing frame rates between your input captions + // and input video, specify the frame rate of the captions file. Specify this + // value as a fraction, using the settings Framerate numerator (framerateNumerator) + // and Framerate denominator (framerateDenominator). For example, you might + // specify 24 / 1 for 24 fps, 25 / 1 for 25 fps, 24000 / 1001 for 23.976 fps, + // or 30000 / 1001 for 29.97 fps. + Framerate *CaptionSourceFramerate `locationName:"framerate" type:"structure"` + // External caption file used for loading captions. Accepted file extensions // are 'scc', 'ttml', 'dfxp', 'stl', 'srt', 'xml', and 'smi'. SourceFile *string `locationName:"sourceFile" min:"14" type:"string"` @@ -7939,6 +8574,11 @@ func (s *FileSourceSettings) Validate() error { if s.TimeDelta != nil && *s.TimeDelta < -2.147483648e+09 { invalidParams.Add(request.NewErrParamMinValue("TimeDelta", -2.147483648e+09)) } + if s.Framerate != nil { + if err := s.Framerate.Validate(); err != nil { + invalidParams.AddNested("Framerate", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7952,6 +8592,12 @@ func (s *FileSourceSettings) SetConvert608To708(v string) *FileSourceSettings { return s } +// SetFramerate sets the Framerate field's value. +func (s *FileSourceSettings) SetFramerate(v *CaptionSourceFramerate) *FileSourceSettings { + s.Framerate = v + return s +} + // SetSourceFile sets the SourceFile field's value. func (s *FileSourceSettings) SetSourceFile(v string) *FileSourceSettings { s.SourceFile = &v @@ -7965,8 +8611,8 @@ func (s *FileSourceSettings) SetTimeDelta(v int64) *FileSourceSettings { } type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7983,17 +8629,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8001,22 +8647,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to @@ -8150,7 +8796,7 @@ type GetJobOutput struct { _ struct{} `type:"structure"` // Each job converts an input file into an output file or files. For more information, - // see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html + // see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Job *Job `locationName:"job" type:"structure"` } @@ -8458,8 +9104,10 @@ func (s *H264QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H264QvbrSetti type H264Settings struct { _ struct{} `type:"structure"` - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. + // Specify the strength of any adaptive quantization filters that you enable. + // The value that you choose here applies to the following settings: Flicker + // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization + // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H264AdaptiveQuantization"` // Specify the average bitrate in bits per second. Required for VBR and CBR. @@ -8485,10 +9133,18 @@ type H264Settings struct { // Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"` - // Choosing FORCE_FIELD disables PAFF encoding for interlaced outputs. + // Keep the default value, PAFF, to have MediaConvert use PAFF encoding for + // interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding + // and create separate interlaced fields. FieldEncoding *string `locationName:"fieldEncoding" type:"string" enum:"H264FieldEncoding"` - // Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. + // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears + // as a visual flicker that can arise when the encoder saves bits by copying + // some macroblocks many times from frame to frame, and then refreshes them + // at the I-frame. When you enable this setting, the encoder updates these macroblocks + // slightly more often to smooth out the flicker. This setting is disabled by + // default. Related setting: In addition to enabling this setting, you must + // also set adaptiveQuantization to a value other than Off (OFF). FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H264FlickerAdaptiveQuantization"` // If you are using the console, use the Framerate setting to specify the frame @@ -8504,7 +9160,16 @@ type H264Settings struct { // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H264FramerateControl"` - // When set to INTERPOLATE, produces smoother motion during frame rate conversion. + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H264FramerateConversionAlgorithm"` // When you use the API for transcode jobs that use frame rate conversion, specify @@ -8515,8 +9180,12 @@ type H264Settings struct { // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` - // Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enable, use reference B frames for GOP structures that have B frames > @@ -8543,17 +9212,18 @@ type H264Settings struct { // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type, as follows. - If the source is interlaced, - // the output will be interlaced with the same polarity as the source (it will - // follow the source). The output could therefore be a mix of "top field first" - // and "bottom field first". - If the source is progressive, the output will - // be interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. + // Choose the scan line type for the output. Keep the default value, Progressive + // (PROGRESSIVE) to create a progressive output, regardless of the scan type + // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) + // to create an output that's interlaced with the same field polarity throughout. + // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) + // to produce outputs with the same field polarity as the source. For jobs that + // have multiple inputs, the output field polarity might change over the course + // of the output. Follow behavior depends on the input scan type. If the source + // is interlaced, the output will be interlaced with the same polarity as the + // source. If the source is progressive, the output will be interlaced with + // top field bottom field first, depending on which of the Follow options you + // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H264InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second @@ -8576,20 +9246,34 @@ type H264Settings struct { // if using B-frames and/or interlaced encoding. NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` - // Using the API, enable ParFollowSource if you want the service to use the - // pixel aspect ratio from the input. Using the console, do this by choosing - // Follow source for Pixel aspect ratio. + // Optional. Specify how the service determines the pixel aspect ratio (PAR) + // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), + // uses the PAR from your input video for your output. To specify a different + // PAR in the console, choose any value other than Follow source. To specify + // a different PAR by editing the JSON job specification, choose SPECIFIED. + // When you choose SPECIFIED for this setting, you must also specify values + // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"H264ParControl"` - // Pixel Aspect Ratio denominator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Pixel Aspect Ratio numerator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to - // use fast single-pass, high-quality singlepass, or high-quality multipass - // video encoding. + // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you + // want to trade off encoding speed for output video quality. The default behavior + // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H264QualityTuningLevel"` // Settings for quality-defined variable bitrate encoding with the H.264 codec. @@ -8617,32 +9301,71 @@ type H264Settings struct { // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"1" type:"integer"` - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. + // Ignore this setting unless your input frame rate is 23.976 or 24 frames per + // second (fps). Enable slow PAL to create a 25 fps output. When you enable + // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples + // your audio to keep it synchronized with the video. Note that enabling this + // setting will slightly reduce the duration of your video. Required settings: + // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) + // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to + // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"H264SlowPal"` - // Softness. Selects quantizer matrix, larger values reduce high-frequency content - // in the encoded image. + // Ignore this setting unless you need to comply with a specification that requires + // a specific value. If you don't have a specification requirement, we recommend + // that you adjust the softness of your output by using a lower value for the + // setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). + // The Softness (softness) setting specifies the quantization matrices that + // the encoder uses. Keep the default value, 0, for flat quantization. Choose + // the value 1 or 16 to use the default JVT softening quantization matricies + // from the H.264 specification. Choose a value from 17 to 128 to use planar + // interpolation. Increasing values from 17 to 128 result in increasing reduction + // of high-frequency data. The value 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` - // Adjust quantization within each frame based on spatial variation of content - // complexity. + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on spatial variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas that can sustain more + // distortion with no noticeable visual degradation and uses more bits on areas + // where any small distortion will be noticeable. For example, complex textured + // blocks are encoded with fewer bits and smooth textured blocks are encoded + // with more bits. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where + // the viewer's attention is likely to be. If viewers are likely to be focusing + // their attention on a part of the screen with a lot of complex texture, you + // might choose to disable this feature. Related setting: When you enable spatial + // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) + // depending on your content. For homogeneous content, such as cartoons and + // video games, set it to Low. For content with a wider variety of textures, + // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H264SpatialAdaptiveQuantization"` // Produces a bitstream compliant with SMPTE RP-2027. Syntax *string `locationName:"syntax" type:"string" enum:"H264Syntax"` - // This field applies only if the Streams > Advanced > Framerate (framerate) - // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors - // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced - // Mode field (interlace_mode) to identify the scan type for the output: Progressive, - // Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output - // from 23.976 input. - Soft: produces 23.976; the player converts this output - // to 29.97i. + // When you do frame rate conversion from 23.976 frames per second (fps) to + // 29.97 fps, and your output scan type is interlaced, you can optionally enable + // hard or soft telecine to create a smoother picture. Hard telecine (HARD) + // produces a 29.97i output. Soft telecine (SOFT) produces an output with a + // 23.976 output that signals to the video player device to do the conversion + // during play back. When you keep the default value, None (NONE), MediaConvert + // does a standard frame rate conversion to 29.97 without doing anything with + // the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"H264Telecine"` - // Adjust quantization within each frame based on temporal variation of content - // complexity. + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on temporal variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas of the frame that aren't + // moving and uses more bits on complex objects with sharp edges that move a + // lot. For example, this feature improves the readability of text tickers on + // newscasts and scoreboards on sports matches. Enabling this feature will almost + // always improve your video quality. Note, though, that this feature doesn't + // take into account where the viewer's attention is likely to be. If viewers + // are likely to be focusing their attention on a part of the screen that doesn't + // have moving objects with sharp edges, such as sports athletes' faces, you + // might choose to disable this feature. Related setting: When you enable temporal + // quantization, adjust the strength of the filter with the setting Adaptive + // quantization (adaptiveQuantization). TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H264TemporalAdaptiveQuantization"` // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. @@ -9014,8 +9737,10 @@ func (s *H265QvbrSettings) SetQvbrQualityLevelFineTune(v float64) *H265QvbrSetti type H265Settings struct { _ struct{} `type:"structure"` - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. + // Specify the strength of any adaptive quantization filters that you enable. + // The value that you choose here applies to the following settings: Flicker + // adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization + // (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"H265AdaptiveQuantization"` // Enables Alternate Transfer Function SEI message for outputs using Hybrid @@ -9042,7 +9767,13 @@ type H265Settings struct { // value you provide for the setting B frames between reference frames (numberBFramesBetweenReferenceFrames). DynamicSubGop *string `locationName:"dynamicSubGop" type:"string" enum:"H265DynamicSubGop"` - // Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. + // Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears + // as a visual flicker that can arise when the encoder saves bits by copying + // some macroblocks many times from frame to frame, and then refreshes them + // at the I-frame. When you enable this setting, the encoder updates these macroblocks + // slightly more often to smooth out the flicker. This setting is disabled by + // default. Related setting: In addition to enabling this setting, you must + // also set adaptiveQuantization to a value other than Off (OFF). FlickerAdaptiveQuantization *string `locationName:"flickerAdaptiveQuantization" type:"string" enum:"H265FlickerAdaptiveQuantization"` // If you are using the console, use the Framerate setting to specify the frame @@ -9051,21 +9782,39 @@ type H265Settings struct { // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job sepecification as a JSON file without the console, use FramerateControl + // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"H265FramerateControl"` - // When set to INTERPOLATE, produces smoother motion during frame rate conversion. + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"H265FramerateConversionAlgorithm"` - // Frame rate denominator. + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` - // Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` // If enable, use reference B frames for GOP structures that have B frames > @@ -9092,18 +9841,18 @@ type H265Settings struct { // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - // Choose the scan line type for the output. Choose Progressive (PROGRESSIVE) - // to create a progressive output, regardless of the scan type of your input. - // Choose Top Field First (TOP_FIELD) or Bottom Field First (BOTTOM_FIELD) to - // create an output that's interlaced with the same field polarity throughout. - // Choose Follow, Default Top (FOLLOW_TOP_FIELD) or Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) - // to create an interlaced output with the same field polarity as the source. - // If the source is interlaced, the output will be interlaced with the same - // polarity as the source (it will follow the source). The output could therefore - // be a mix of "top field first" and "bottom field first". If the source is - // progressive, your output will be interlaced with "top field first" or "bottom - // field first" polarity, depending on which of the Follow options you chose. - // If you don't choose a value, the service will default to Progressive (PROGRESSIVE). + // Choose the scan line type for the output. Keep the default value, Progressive + // (PROGRESSIVE) to create a progressive output, regardless of the scan type + // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) + // to create an output that's interlaced with the same field polarity throughout. + // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) + // to produce outputs with the same field polarity as the source. For jobs that + // have multiple inputs, the output field polarity might change over the course + // of the output. Follow behavior depends on the input scan type. If the source + // is interlaced, the output will be interlaced with the same polarity as the + // source. If the source is progressive, the output will be interlaced with + // top field bottom field first, depending on which of the Follow options you + // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"H265InterlaceMode"` // Maximum bitrate in bits/second. For example, enter five megabits per second @@ -9126,20 +9875,34 @@ type H265Settings struct { // if using B-frames and/or interlaced encoding. NumberReferenceFrames *int64 `locationName:"numberReferenceFrames" min:"1" type:"integer"` - // Using the API, enable ParFollowSource if you want the service to use the - // pixel aspect ratio from the input. Using the console, do this by choosing - // Follow source for Pixel aspect ratio. + // Optional. Specify how the service determines the pixel aspect ratio (PAR) + // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), + // uses the PAR from your input video for your output. To specify a different + // PAR in the console, choose any value other than Follow source. To specify + // a different PAR by editing the JSON job specification, choose SPECIFIED. + // When you choose SPECIFIED for this setting, you must also specify values + // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"H265ParControl"` - // Pixel Aspect Ratio denominator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Pixel Aspect Ratio numerator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to - // use fast single-pass, high-quality singlepass, or high-quality multipass - // video encoding. + // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you + // want to trade off encoding speed for output video quality. The default behavior + // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"H265QualityTuningLevel"` // Settings for quality-defined variable bitrate encoding with the H.265 codec. @@ -9168,12 +9931,31 @@ type H265Settings struct { // the number of macroblock rows for interlaced pictures. Slices *int64 `locationName:"slices" min:"1" type:"integer"` - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. + // Ignore this setting unless your input frame rate is 23.976 or 24 frames per + // second (fps). Enable slow PAL to create a 25 fps output. When you enable + // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples + // your audio to keep it synchronized with the video. Note that enabling this + // setting will slightly reduce the duration of your video. Required settings: + // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) + // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to + // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"H265SlowPal"` - // Adjust quantization within each frame based on spatial variation of content - // complexity. + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on spatial variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas that can sustain more + // distortion with no noticeable visual degradation and uses more bits on areas + // where any small distortion will be noticeable. For example, complex textured + // blocks are encoded with fewer bits and smooth textured blocks are encoded + // with more bits. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where + // the viewer's attention is likely to be. If viewers are likely to be focusing + // their attention on a part of the screen with a lot of complex texture, you + // might choose to disable this feature. Related setting: When you enable spatial + // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) + // depending on your content. For homogeneous content, such as cartoons and + // video games, set it to Low. For content with a wider variety of textures, + // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"H265SpatialAdaptiveQuantization"` // This field applies only if the Streams > Advanced > Framerate (framerate) @@ -9185,8 +9967,19 @@ type H265Settings struct { // to 29.97i. Telecine *string `locationName:"telecine" type:"string" enum:"H265Telecine"` - // Adjust quantization within each frame based on temporal variation of content - // complexity. + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on temporal variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas of the frame that aren't + // moving and uses more bits on complex objects with sharp edges that move a + // lot. For example, this feature improves the readability of text tickers on + // newscasts and scoreboards on sports matches. Enabling this feature will almost + // always improve your video quality. Note, though, that this feature doesn't + // take into account where the viewer's attention is likely to be. If viewers + // are likely to be focusing their attention on a part of the screen that doesn't + // have moving objects with sharp edges, such as sports athletes' faces, you + // might choose to disable this feature. Related setting: When you enable temporal + // quantization, adjust the strength of the filter with the setting Adaptive + // quantization (adaptiveQuantization). TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"H265TemporalAdaptiveQuantization"` // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers @@ -9891,6 +10684,12 @@ type HlsGroupSettings struct { // a subset of the outputs in the output group, specify a list of them here. AdditionalManifests []*HlsAdditionalManifest `locationName:"additionalManifests" type:"list"` + // Ignore this setting unless you are using FairPlay DRM with Verimatrix and + // you encounter playback issues. Keep the default value, Include (INCLUDE), + // to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only + // headers from your audio segments. + AudioOnlyHeader *string `locationName:"audioOnlyHeader" type:"string" enum:"HlsAudioOnlyHeader"` + // A partial URI prefix that will be prepended to each output in the media .m3u8 // file. Can be used if base manifest is delivered from a different URL than // the main .m3u8 file. @@ -10069,6 +10868,12 @@ func (s *HlsGroupSettings) SetAdditionalManifests(v []*HlsAdditionalManifest) *H return s } +// SetAudioOnlyHeader sets the AudioOnlyHeader field's value. +func (s *HlsGroupSettings) SetAudioOnlyHeader(v string) *HlsGroupSettings { + s.AudioOnlyHeader = &v + return s +} + // SetBaseUrl sets the BaseUrl field's value. func (s *HlsGroupSettings) SetBaseUrl(v string) *HlsGroupSettings { s.BaseUrl = &v @@ -10295,6 +11100,70 @@ func (s *HlsSettings) SetSegmentModifier(v string) *HlsSettings { return s } +// Optional. Configuration for a destination queue to which the job can hop +// once a customer-defined minimum wait time has passed. +type HopDestination struct { + _ struct{} `type:"structure"` + + // Optional. When you set up a job to use queue hopping, you can specify a different + // relative priority for the job in the destination queue. If you don't specify, + // the relative priority will remain the same as in the previous queue. + Priority *int64 `locationName:"priority" type:"integer"` + + // Optional unless the job is submitted on the default queue. When you set up + // a job to use queue hopping, you can specify a destination queue. This queue + // cannot be the original queue to which the job is submitted. If the original + // queue isn't the default queue and you don't specify the destination queue, + // the job will move to the default queue. + Queue *string `locationName:"queue" type:"string"` + + // Required for setting up a job to use queue hopping. Minimum wait time in + // minutes until the job can hop to the destination queue. Valid range is 1 + // to 1440 minutes, inclusive. + WaitMinutes *int64 `locationName:"waitMinutes" type:"integer"` +} + +// String returns the string representation +func (s HopDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HopDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HopDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HopDestination"} + if s.Priority != nil && *s.Priority < -50 { + invalidParams.Add(request.NewErrParamMinValue("Priority", -50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPriority sets the Priority field's value. +func (s *HopDestination) SetPriority(v int64) *HopDestination { + s.Priority = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *HopDestination) SetQueue(v string) *HopDestination { + s.Queue = &v + return s +} + +// SetWaitMinutes sets the WaitMinutes field's value. +func (s *HopDestination) SetWaitMinutes(v int64) *HopDestination { + s.WaitMinutes = &v + return s +} + // To insert ID3 tags in your output, specify two values. Use ID3 tag (Id3) // to specify the base 64 encoded string and use Timecode (TimeCode) to specify // the time when the tag should be inserted. To insert multiple ID3 tags in @@ -10384,8 +11253,8 @@ type ImscDestinationSettings struct { // Keep this setting enabled to have MediaConvert use the font style and position // information from the captions source in the output. This option is available - // only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable - // this setting for simplified output captions. + // only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting + // for simplified output captions. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"ImscStylePassthrough"` } @@ -10415,13 +11284,12 @@ type Input struct { AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` // Use Audio selectors (AudioSelectors) to specify a track or set of tracks - // from the input that you will use in your outputs. You can use mutiple Audio + // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` - // Use Captions selectors (CaptionSelectors) to specify the captions data from - // the input that you will use in your outputs. You can use mutiple captions - // selectors per input. + // Use captions selectors to specify the captions data from your input that + // you use in your outputs. You can use up to 20 captions selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` // Use Cropping selection (crop) to specify the video area that the service @@ -10431,7 +11299,7 @@ type Input struct { Crop *Rectangle `locationName:"crop" type:"structure"` // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manaully controllable for MPEG2 and uncompressed + // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` @@ -10455,13 +11323,13 @@ type Input struct { // that contain assets referenced by the CPL. FileInput *string `locationName:"fileInput" type:"string"` - // Use Filter enable (InputFilterEnable) to specify how the transcoding service - // applies the denoise and deblock filters. You must also enable the filters - // separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). - // * Auto - The transcoding service determines whether to apply filtering, depending - // on input type and quality. * Disable - The input is not filtered. This is - // true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). - // * Force - The in put is filtered regardless of input type. + // Specify how the transcoding service applies the denoise and deblock filters. + // You must also enable the filters separately, with Denoise (InputDenoiseFilter) + // and Deblock (InputDeblockFilter). * Auto - The transcoding service determines + // whether to apply filtering, depending on input type and quality. * Disable + // - The input is not filtered. This is true even if you use the API to enable + // them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input + // is filtered regardless of input type. FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"` // Use Filter strength (FilterStrength) to adjust the magnitude the input filter @@ -10481,6 +11349,15 @@ type Input struct { // job outputs by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` + // When you have a progressive segmented frame (PsF) input, use this setting + // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. + // Therefore, flagging your input as PsF results in better preservation of video + // quality when you do deinterlacing and frame rate conversion. If you don't + // specify, the default value is Auto (AUTO). Auto is the correct setting for + // all inputs that are not PsF. Don't set this value to PsF when your input + // is interlaced. Doing so creates horizontal interlacing artifacts. + InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"` + // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. // If you specify a value here, it will override any value that you specify @@ -10678,6 +11555,12 @@ func (s *Input) SetInputClippings(v []*InputClipping) *Input { return s } +// SetInputScanType sets the InputScanType field's value. +func (s *Input) SetInputScanType(v string) *Input { + s.InputScanType = &v + return s +} + // SetPosition sets the Position field's value. func (s *Input) SetPosition(v *Rectangle) *Input { s.Position = v @@ -10863,13 +11746,12 @@ type InputTemplate struct { AudioSelectorGroups map[string]*AudioSelectorGroup `locationName:"audioSelectorGroups" type:"map"` // Use Audio selectors (AudioSelectors) to specify a track or set of tracks - // from the input that you will use in your outputs. You can use mutiple Audio + // from the input that you will use in your outputs. You can use multiple Audio // selectors per input. AudioSelectors map[string]*AudioSelector `locationName:"audioSelectors" type:"map"` - // Use Captions selectors (CaptionSelectors) to specify the captions data from - // the input that you will use in your outputs. You can use mutiple captions - // selectors per input. + // Use captions selectors to specify the captions data from your input that + // you use in your outputs. You can use up to 20 captions selectors per input. CaptionSelectors map[string]*CaptionSelector `locationName:"captionSelectors" type:"map"` // Use Cropping selection (crop) to specify the video area that the service @@ -10879,7 +11761,7 @@ type InputTemplate struct { Crop *Rectangle `locationName:"crop" type:"structure"` // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. - // Default is disabled. Only manaully controllable for MPEG2 and uncompressed + // Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. DeblockFilter *string `locationName:"deblockFilter" type:"string" enum:"InputDeblockFilter"` @@ -10888,13 +11770,13 @@ type InputTemplate struct { // inputs. DenoiseFilter *string `locationName:"denoiseFilter" type:"string" enum:"InputDenoiseFilter"` - // Use Filter enable (InputFilterEnable) to specify how the transcoding service - // applies the denoise and deblock filters. You must also enable the filters - // separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). - // * Auto - The transcoding service determines whether to apply filtering, depending - // on input type and quality. * Disable - The input is not filtered. This is - // true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). - // * Force - The in put is filtered regardless of input type. + // Specify how the transcoding service applies the denoise and deblock filters. + // You must also enable the filters separately, with Denoise (InputDenoiseFilter) + // and Deblock (InputDeblockFilter). * Auto - The transcoding service determines + // whether to apply filtering, depending on input type and quality. * Disable + // - The input is not filtered. This is true even if you use the API to enable + // them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input + // is filtered regardless of input type. FilterEnable *string `locationName:"filterEnable" type:"string" enum:"InputFilterEnable"` // Use Filter strength (FilterStrength) to adjust the magnitude the input filter @@ -10914,6 +11796,15 @@ type InputTemplate struct { // job outputs by stringing the clips together in the order you specify them. InputClippings []*InputClipping `locationName:"inputClippings" type:"list"` + // When you have a progressive segmented frame (PsF) input, use this setting + // to flag the input as PsF. MediaConvert doesn't automatically detect PsF. + // Therefore, flagging your input as PsF results in better preservation of video + // quality when you do deinterlacing and frame rate conversion. If you don't + // specify, the default value is Auto (AUTO). Auto is the correct setting for + // all inputs that are not PsF. Don't set this value to PsF when your input + // is interlaced. Doing so creates horizontal interlacing artifacts. + InputScanType *string `locationName:"inputScanType" type:"string" enum:"InputScanType"` + // Use Selection placement (position) to define the video area in your output // frame. The area outside of the rectangle that you specify here is black. // If you specify a value here, it will override any value that you specify @@ -11086,6 +11977,12 @@ func (s *InputTemplate) SetInputClippings(v []*InputClipping) *InputTemplate { return s } +// SetInputScanType sets the InputScanType field's value. +func (s *InputTemplate) SetInputScanType(v string) *InputTemplate { + s.InputScanType = &v + return s +} + // SetPosition sets the Position field's value. func (s *InputTemplate) SetPosition(v *Rectangle) *InputTemplate { s.Position = v @@ -11272,8 +12169,8 @@ func (s *InsertableImage) SetWidth(v int64) *InsertableImage { } type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11290,17 +12187,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11308,26 +12205,26 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Each job converts an input file into an output file or files. For more information, -// see the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html +// see the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html type Job struct { _ struct{} `type:"structure"` @@ -11351,11 +12248,8 @@ type Job struct { // An identifier for this resource that is unique within all of AWS. Arn *string `locationName:"arn" type:"string"` - // Optional. Choose a tag type that AWS Billing and Cost Management will use - // to sort your AWS Elemental MediaConvert costs on any billing report that - // you set up. Any transcoding outputs that don't have an associated tag will - // appear in your billing report unsorted. If you don't choose a valid value - // for this field, your job outputs will appear on the billing report unsorted. + // The tag type that AWS Billing and Cost Management will use to sort your AWS + // Elemental MediaConvert costs on any billing report that you set up. BillingTagsSource *string `locationName:"billingTagsSource" type:"string" enum:"BillingTagsSource"` // The time, in Unix epoch format in seconds, when the job got created. @@ -11370,6 +12264,9 @@ type Job struct { // Error message of Job ErrorMessage *string `locationName:"errorMessage" type:"string"` + // Optional list of hop destinations. + HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` + // A portion of the job's ARN, unique within your AWS Elemental MediaConvert // resources Id *string `locationName:"id" type:"string"` @@ -11398,17 +12295,20 @@ type Job struct { // Relative priority on the job. Priority *int64 `locationName:"priority" type:"integer"` - // Optional. When you create a job, you can specify a queue to send it to. If - // you don't specify, the job will go to the default queue. For more about queues, - // see the User Guide topic at http://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html + // When you create a job, you can specify a queue to send it to. If you don't + // specify, the job will go to the default queue. For more about queues, see + // the User Guide topic at https://docs.aws.amazon.com/mediaconvert/latest/ug/what-is.html Queue *string `locationName:"queue" type:"string"` + // The job's queue hopping history. + QueueTransitions []*QueueTransition `locationName:"queueTransitions" type:"list"` + // The number of times that the service automatically attempted to process your // job after encountering an error. RetryCount *int64 `locationName:"retryCount" type:"integer"` // The IAM role you use for creating this job. For details about permissions, - // see the User Guide topic at the User Guide at http://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html + // see the User Guide topic at the User Guide at https://docs.aws.amazon.com/mediaconvert/latest/ug/iam-role.html // // Role is a required field Role *string `locationName:"role" type:"string" required:"true"` @@ -11500,6 +12400,12 @@ func (s *Job) SetErrorMessage(v string) *Job { return s } +// SetHopDestinations sets the HopDestinations field's value. +func (s *Job) SetHopDestinations(v []*HopDestination) *Job { + s.HopDestinations = v + return s +} + // SetId sets the Id field's value. func (s *Job) SetId(v string) *Job { s.Id = &v @@ -11542,6 +12448,12 @@ func (s *Job) SetQueue(v string) *Job { return s } +// SetQueueTransitions sets the QueueTransitions field's value. +func (s *Job) SetQueueTransitions(v []*QueueTransition) *Job { + s.QueueTransitions = v + return s +} + // SetRetryCount sets the RetryCount field's value. func (s *Job) SetRetryCount(v int64) *Job { s.RetryCount = &v @@ -11659,6 +12571,15 @@ type JobSettings struct { // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` + // Ignore these settings unless you are using Nielsen non-linear watermarking. + // Specify the values that MediaConvert uses to generate and place Nielsen watermarks + // in your output audio. In addition to specifying these values, you also need + // to set up your cloud TIC server. These settings apply to every output in + // your job. The MediaConvert implementation is currently with the following + // Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark + // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] + NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"` + // (OutputGroups) contains one group of settings for each set of outputs that // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, // MXF, and no container) are grouped in a single output group as well. Required @@ -11674,8 +12595,8 @@ type JobSettings struct { TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags - // in your job. To include timed metadata, you must enable it here, enable it - // in each output container, and specify tags and timecodes in ID3 insertion + // in any HLS outputs. To include timed metadata, you must enable it here, enable + // it in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } @@ -11716,6 +12637,11 @@ func (s *JobSettings) Validate() error { invalidParams.AddNested("MotionImageInserter", err.(request.ErrInvalidParams)) } } + if s.NielsenNonLinearWatermark != nil { + if err := s.NielsenNonLinearWatermark.Validate(); err != nil { + invalidParams.AddNested("NielsenNonLinearWatermark", err.(request.ErrInvalidParams)) + } + } if s.OutputGroups != nil { for i, v := range s.OutputGroups { if v == nil { @@ -11769,6 +12695,12 @@ func (s *JobSettings) SetNielsenConfiguration(v *NielsenConfiguration) *JobSetti return s } +// SetNielsenNonLinearWatermark sets the NielsenNonLinearWatermark field's value. +func (s *JobSettings) SetNielsenNonLinearWatermark(v *NielsenNonLinearWatermarkSettings) *JobSettings { + s.NielsenNonLinearWatermark = v + return s +} + // SetOutputGroups sets the OutputGroups field's value. func (s *JobSettings) SetOutputGroups(v []*OutputGroup) *JobSettings { s.OutputGroups = v @@ -11808,6 +12740,9 @@ type JobTemplate struct { // An optional description you create for each job template. Description *string `locationName:"description" type:"string"` + // Optional list of hop destinations. + HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` + // The timestamp in epoch seconds when the Job template was last updated. LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` @@ -11881,6 +12816,12 @@ func (s *JobTemplate) SetDescription(v string) *JobTemplate { return s } +// SetHopDestinations sets the HopDestinations field's value. +func (s *JobTemplate) SetHopDestinations(v []*HopDestination) *JobTemplate { + s.HopDestinations = v + return s +} + // SetLastUpdated sets the LastUpdated field's value. func (s *JobTemplate) SetLastUpdated(v time.Time) *JobTemplate { s.LastUpdated = &v @@ -11957,6 +12898,15 @@ type JobTemplateSettings struct { // the setting. NielsenConfiguration *NielsenConfiguration `locationName:"nielsenConfiguration" type:"structure"` + // Ignore these settings unless you are using Nielsen non-linear watermarking. + // Specify the values that MediaConvert uses to generate and place Nielsen watermarks + // in your output audio. In addition to specifying these values, you also need + // to set up your cloud TIC server. These settings apply to every output in + // your job. The MediaConvert implementation is currently with the following + // Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark + // Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] + NielsenNonLinearWatermark *NielsenNonLinearWatermarkSettings `locationName:"nielsenNonLinearWatermark" type:"structure"` + // (OutputGroups) contains one group of settings for each set of outputs that // share a common package type. All unpackaged files (MPEG-4, MPEG-2 TS, Quicktime, // MXF, and no container) are grouped in a single output group as well. Required @@ -11972,8 +12922,8 @@ type JobTemplateSettings struct { TimecodeConfig *TimecodeConfig `locationName:"timecodeConfig" type:"structure"` // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags - // in your job. To include timed metadata, you must enable it here, enable it - // in each output container, and specify tags and timecodes in ID3 insertion + // in any HLS outputs. To include timed metadata, you must enable it here, enable + // it in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. TimedMetadataInsertion *TimedMetadataInsertion `locationName:"timedMetadataInsertion" type:"structure"` } @@ -12014,6 +12964,11 @@ func (s *JobTemplateSettings) Validate() error { invalidParams.AddNested("MotionImageInserter", err.(request.ErrInvalidParams)) } } + if s.NielsenNonLinearWatermark != nil { + if err := s.NielsenNonLinearWatermark.Validate(); err != nil { + invalidParams.AddNested("NielsenNonLinearWatermark", err.(request.ErrInvalidParams)) + } + } if s.OutputGroups != nil { for i, v := range s.OutputGroups { if v == nil { @@ -12067,6 +13022,12 @@ func (s *JobTemplateSettings) SetNielsenConfiguration(v *NielsenConfiguration) * return s } +// SetNielsenNonLinearWatermark sets the NielsenNonLinearWatermark field's value. +func (s *JobTemplateSettings) SetNielsenNonLinearWatermark(v *NielsenNonLinearWatermarkSettings) *JobTemplateSettings { + s.NielsenNonLinearWatermark = v + return s +} + // SetOutputGroups sets the OutputGroups field's value. func (s *JobTemplateSettings) SetOutputGroups(v []*OutputGroup) *JobTemplateSettings { s.OutputGroups = v @@ -12109,7 +13070,7 @@ type ListJobTemplatesInput struct { // the next batch of job templates. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // When you request lists of resources, you can optionally specify whether they + // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` } @@ -12212,18 +13173,19 @@ type ListJobsInput struct { // Optional. Number of jobs, up to twenty, that will be returned at one time. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // Use this string, provided with the response to a previous request, to request - // the next batch of jobs. + // Optional. Use this string, provided with the response to a previous request, + // to request the next batch of jobs. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // When you request lists of resources, you can optionally specify whether they + // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` - // Provide a queue name to get back only jobs from that queue. + // Optional. Provide a queue name to get back only jobs from that queue. Queue *string `location:"querystring" locationName:"queue" type:"string"` - // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. + // Optional. A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, + // or ERROR. Status *string `location:"querystring" locationName:"status" type:"string" enum:"JobStatus"` } @@ -12337,7 +13299,7 @@ type ListPresetsInput struct { // the next batch of presets. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // When you request lists of resources, you can optionally specify whether they + // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` } @@ -12446,7 +13408,7 @@ type ListQueuesInput struct { // the next batch of queues. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` - // When you request lists of resources, you can optionally specify whether they + // Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. Order *string `location:"querystring" locationName:"order" type:"string" enum:"Order"` } @@ -13547,7 +14509,11 @@ type MovSettings struct { // video codec is MPEG2. Mpeg2FourCCControl *string `locationName:"mpeg2FourCCControl" type:"string" enum:"MovMpeg2FourCCControl"` - // If set to OMNEON, inserts Omneon-compatible padding + // To make this output compatible with Omenon, keep the default value, OMNEON. + // Unless you need Omneon compatibility, set this value to NONE. When you keep + // the default value, OMNEON, MediaConvert increases the length of the edit + // list atom. This might cause file rejections when a recipient of the output + // file doesn't expct this extra padding. PaddingControl *string `locationName:"paddingControl" type:"string" enum:"MovPaddingControl"` // Always keep the default value (SELF_CONTAINED) for this setting. @@ -13873,8 +14839,10 @@ func (s *MpdSettings) SetScte35Source(v string) *MpdSettings { type Mpeg2Settings struct { _ struct{} `type:"structure"` - // Adaptive quantization. Allows intra-frame quantizers to vary to improve visual - // quality. + // Specify the strength of any adaptive quantization filters that you enable. + // The value that you choose here applies to the following settings: Spatial + // adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive + // quantization (temporalAdaptiveQuantization). AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"` // Specify the average bitrate in bits per second. Required for VBR and CBR. @@ -13901,21 +14869,39 @@ type Mpeg2Settings struct { // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job sepecification as a JSON file without the console, use FramerateControl + // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Mpeg2FramerateControl"` - // When set to INTERPOLATE, produces smoother motion during frame rate conversion. + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Mpeg2FramerateConversionAlgorithm"` - // Frame rate denominator. + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` - // Frame rate numerator - frame rate is a fraction, e.g. 24000 / 1001 = 23.976 - // fps. + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` // Frequency of closed GOPs. In streaming applications, it is recommended that @@ -13938,17 +14924,18 @@ type Mpeg2Settings struct { // as 5000000. HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. + // Choose the scan line type for the output. Keep the default value, Progressive + // (PROGRESSIVE) to create a progressive output, regardless of the scan type + // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) + // to create an output that's interlaced with the same field polarity throughout. + // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) + // to produce outputs with the same field polarity as the source. For jobs that + // have multiple inputs, the output field polarity might change over the course + // of the output. Follow behavior depends on the input scan type. If the source + // is interlaced, the output will be interlaced with the same polarity as the + // source. If the source is progressive, the output will be interlaced with + // top field bottom field first, depending on which of the Follow options you + // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Mpeg2InterlaceMode"` // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision @@ -13973,19 +14960,34 @@ type Mpeg2Settings struct { // Number of B-frames between reference frames. NumberBFramesBetweenReferenceFrames *int64 `locationName:"numberBFramesBetweenReferenceFrames" type:"integer"` - // Using the API, enable ParFollowSource if you want the service to use the - // pixel aspect ratio from the input. Using the console, do this by choosing - // Follow source for Pixel aspect ratio. + // Optional. Specify how the service determines the pixel aspect ratio (PAR) + // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), + // uses the PAR from your input video for your output. To specify a different + // PAR in the console, choose any value other than Follow source. To specify + // a different PAR by editing the JSON job specification, choose SPECIFIED. + // When you choose SPECIFIED for this setting, you must also specify values + // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"Mpeg2ParControl"` - // Pixel Aspect Ratio denominator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Pixel Aspect Ratio numerator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to - // use single-pass or multipass video encoding. + // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you + // want to trade off encoding speed for output video quality. The default behavior + // is faster, lower quality, single-pass encoding. QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Mpeg2QualityTuningLevel"` // Use Rate control mode (Mpeg2RateControlMode) to specifiy whether the bitrate @@ -13996,29 +14998,72 @@ type Mpeg2Settings struct { // automatically detects. This improves video quality and is enabled by default. SceneChangeDetect *string `locationName:"sceneChangeDetect" type:"string" enum:"Mpeg2SceneChangeDetect"` - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. + // Ignore this setting unless your input frame rate is 23.976 or 24 frames per + // second (fps). Enable slow PAL to create a 25 fps output. When you enable + // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples + // your audio to keep it synchronized with the video. Note that enabling this + // setting will slightly reduce the duration of your video. Required settings: + // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) + // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to + // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"Mpeg2SlowPal"` - // Softness. Selects quantizer matrix, larger values reduce high-frequency content - // in the encoded image. + // Ignore this setting unless you need to comply with a specification that requires + // a specific value. If you don't have a specification requirement, we recommend + // that you adjust the softness of your output by using a lower value for the + // setting Sharpness (sharpness) or by enabling a noise reducer filter (noiseReducerFilter). + // The Softness (softness) setting specifies the quantization matrices that + // the encoder uses. Keep the default value, 0, to use the AWS Elemental default + // matrices. Choose a value from 17 to 128 to use planar interpolation. Increasing + // values from 17 to 128 result in increasing reduction of high-frequency data. + // The value 128 results in the softest video. Softness *int64 `locationName:"softness" type:"integer"` - // Adjust quantization within each frame based on spatial variation of content - // complexity. + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on spatial variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas that can sustain more + // distortion with no noticeable visual degradation and uses more bits on areas + // where any small distortion will be noticeable. For example, complex textured + // blocks are encoded with fewer bits and smooth textured blocks are encoded + // with more bits. Enabling this feature will almost always improve your video + // quality. Note, though, that this feature doesn't take into account where + // the viewer's attention is likely to be. If viewers are likely to be focusing + // their attention on a part of the screen with a lot of complex texture, you + // might choose to disable this feature. Related setting: When you enable spatial + // adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) + // depending on your content. For homogeneous content, such as cartoons and + // video games, set it to Low. For content with a wider variety of textures, + // set it to High or Higher. SpatialAdaptiveQuantization *string `locationName:"spatialAdaptiveQuantization" type:"string" enum:"Mpeg2SpatialAdaptiveQuantization"` - // Produces a Type D-10 compatible bitstream (SMPTE 356M-2001). + // Specify whether this output's video uses the D10 syntax. Keep the default + // value to not use the syntax. Related settings: When you choose D10 (D_10) + // for your MXF profile (profile), you must also set this value to to D10 (D_10). Syntax *string `locationName:"syntax" type:"string" enum:"Mpeg2Syntax"` - // Only use Telecine (Mpeg2Telecine) when you set Framerate (Framerate) to 29.970. - // Set Telecine (Mpeg2Telecine) to Hard (hard) to produce a 29.97i output from - // a 23.976 input. Set it to Soft (soft) to produce 23.976 output and leave - // converstion to the player. + // When you do frame rate conversion from 23.976 frames per second (fps) to + // 29.97 fps, and your output scan type is interlaced, you can optionally enable + // hard or soft telecine to create a smoother picture. Hard telecine (HARD) + // produces a 29.97i output. Soft telecine (SOFT) produces an output with a + // 23.976 output that signals to the video player device to do the conversion + // during play back. When you keep the default value, None (NONE), MediaConvert + // does a standard frame rate conversion to 29.97 without doing anything with + // the field polarity to create a smoother picture. Telecine *string `locationName:"telecine" type:"string" enum:"Mpeg2Telecine"` - // Adjust quantization within each frame based on temporal variation of content - // complexity. + // Keep the default value, Enabled (ENABLED), to adjust quantization within + // each frame based on temporal variation of content complexity. When you enable + // this feature, the encoder uses fewer bits on areas of the frame that aren't + // moving and uses more bits on complex objects with sharp edges that move a + // lot. For example, this feature improves the readability of text tickers on + // newscasts and scoreboards on sports matches. Enabling this feature will almost + // always improve your video quality. Note, though, that this feature doesn't + // take into account where the viewer's attention is likely to be. If viewers + // are likely to be focusing their attention on a part of the screen that doesn't + // have moving objects with sharp edges, such as sports athletes' faces, you + // might choose to disable this feature. Related setting: When you enable temporal + // quantization, adjust the strength of the filter with the setting Adaptive + // quantization (adaptiveQuantization). TemporalAdaptiveQuantization *string `locationName:"temporalAdaptiveQuantization" type:"string" enum:"Mpeg2TemporalAdaptiveQuantization"` } @@ -14440,96 +15485,104 @@ func (s *MsSmoothGroupSettings) SetManifestEncoding(v string) *MsSmoothGroupSett return s } -// Settings for your Nielsen configuration. If you don't do Nielsen measurement -// and analytics, ignore these settings. When you enable Nielsen configuration -// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs -// in the job. To enable Nielsen configuration programmatically, include an -// instance of nielsenConfiguration in your JSON job specification. Even if -// you don't include any children of nielsenConfiguration, you still enable -// the setting. -type NielsenConfiguration struct { +// MXF settings +type MxfSettings struct { _ struct{} `type:"structure"` - // Nielsen has discontinued the use of breakout code functionality. If you must - // include this property, set the value to zero. - BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` + // Optional. When you have AFD signaling set up in your output video stream, + // use this setting to choose whether to also include it in the MXF wrapper. + // Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. + // Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from + // the video stream for this output to the MXF wrapper. Regardless of which + // option you choose, the AFD values remain in the video stream. Related settings: + // To set up your output to include or exclude AFD values, see AfdSignaling, + // under VideoDescription. On the console, find AFD signaling under the output's + // video encoding settings. + AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"MxfAfdSignaling"` - // Use Distributor ID (DistributorID) to specify the distributor ID that is - // assigned to your organization by Neilsen. - DistributorId *string `locationName:"distributorId" type:"string"` + // Specify the MXF profile, also called shim, for this output. When you choose + // Auto, MediaConvert chooses a profile based on the video codec and resolution. + // For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. + // For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html. + Profile *string `locationName:"profile" type:"string" enum:"MxfProfile"` } // String returns the string representation -func (s NielsenConfiguration) String() string { +func (s MxfSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NielsenConfiguration) GoString() string { +func (s MxfSettings) GoString() string { return s.String() } -// SetBreakoutCode sets the BreakoutCode field's value. -func (s *NielsenConfiguration) SetBreakoutCode(v int64) *NielsenConfiguration { - s.BreakoutCode = &v +// SetAfdSignaling sets the AfdSignaling field's value. +func (s *MxfSettings) SetAfdSignaling(v string) *MxfSettings { + s.AfdSignaling = &v return s } -// SetDistributorId sets the DistributorId field's value. -func (s *NielsenConfiguration) SetDistributorId(v string) *NielsenConfiguration { - s.DistributorId = &v +// SetProfile sets the Profile field's value. +func (s *MxfSettings) SetProfile(v string) *MxfSettings { + s.Profile = &v return s } -// Enable the Noise reducer (NoiseReducer) feature to remove noise from your -// video output if necessary. Enable or disable this feature for each output -// individually. This setting is disabled by default. When you enable Noise -// reducer (NoiseReducer), you must also select a value for Noise reducer filter -// (NoiseReducerFilter). -type NoiseReducer struct { +// For forensic video watermarking, MediaConvert supports Nagra NexGuard File +// Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) +// and OTT Streaming workflows. +type NexGuardFileMarkerSettings struct { _ struct{} `type:"structure"` - // Use Noise reducer filter (NoiseReducerFilter) to select one of the following - // spatial image filtering functions. To use this setting, you must also enable - // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing - // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution - // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain - // filtering based on JND principles. * Temporal optimizes video quality for - // complex motion. - Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"` - - // Settings for a noise reducer filter - FilterSettings *NoiseReducerFilterSettings `locationName:"filterSettings" type:"structure"` - - // Noise reducer filter settings for spatial filter. - SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"` - - // Noise reducer filter settings for temporal filter. - TemporalFilterSettings *NoiseReducerTemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` + // Use the base64 license string that Nagra provides you. Enter it directly + // in your JSON job specification or in the console. Required when you include + // Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) in + // your job. + License *string `locationName:"license" min:"1" type:"string"` + + // Specify the payload ID that you want associated with this output. Valid values + // vary depending on your Nagra NexGuard forensic watermarking workflow. Required + // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) + // in your job. For PreRelease Content (NGPR/G2), specify an integer from 1 + // through 4,194,303. You must generate a unique ID for each asset you watermark, + // and keep a record of which ID you have assigned to each asset. Neither Nagra + // nor MediaConvert keep track of the relationship between output files and + // your IDs. For OTT Streaming, create two adaptive bitrate (ABR) stacks for + // each asset. Do this by setting up two output groups. For one output group, + // set the value of Payload ID (payload) to 0 in every output. For the other + // output group, set Payload ID (payload) to 1 in every output. + Payload *int64 `locationName:"payload" type:"integer"` + + // Enter one of the watermarking preset strings that Nagra provides you. Required + // when you include Nagra NexGuard File Marker watermarking (NexGuardWatermarkingSettings) + // in your job. + Preset *string `locationName:"preset" min:"1" type:"string"` + + // Optional. Ignore this setting unless Nagra support directs you to specify + // a value. When you don't specify a value here, the Nagra NexGuard library + // uses its default value. + Strength *string `locationName:"strength" type:"string" enum:"WatermarkingStrength"` } // String returns the string representation -func (s NoiseReducer) String() string { +func (s NexGuardFileMarkerSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NoiseReducer) GoString() string { +func (s NexGuardFileMarkerSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *NoiseReducer) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "NoiseReducer"} - if s.SpatialFilterSettings != nil { - if err := s.SpatialFilterSettings.Validate(); err != nil { - invalidParams.AddNested("SpatialFilterSettings", err.(request.ErrInvalidParams)) - } +func (s *NexGuardFileMarkerSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NexGuardFileMarkerSettings"} + if s.License != nil && len(*s.License) < 1 { + invalidParams.Add(request.NewErrParamMinLen("License", 1)) } - if s.TemporalFilterSettings != nil { - if err := s.TemporalFilterSettings.Validate(); err != nil { - invalidParams.AddNested("TemporalFilterSettings", err.(request.ErrInvalidParams)) - } + if s.Preset != nil && len(*s.Preset) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Preset", 1)) } if invalidParams.Len() > 0 { @@ -14538,26 +15591,321 @@ func (s *NoiseReducer) Validate() error { return nil } -// SetFilter sets the Filter field's value. -func (s *NoiseReducer) SetFilter(v string) *NoiseReducer { - s.Filter = &v +// SetLicense sets the License field's value. +func (s *NexGuardFileMarkerSettings) SetLicense(v string) *NexGuardFileMarkerSettings { + s.License = &v return s } -// SetFilterSettings sets the FilterSettings field's value. -func (s *NoiseReducer) SetFilterSettings(v *NoiseReducerFilterSettings) *NoiseReducer { - s.FilterSettings = v +// SetPayload sets the Payload field's value. +func (s *NexGuardFileMarkerSettings) SetPayload(v int64) *NexGuardFileMarkerSettings { + s.Payload = &v return s } -// SetSpatialFilterSettings sets the SpatialFilterSettings field's value. -func (s *NoiseReducer) SetSpatialFilterSettings(v *NoiseReducerSpatialFilterSettings) *NoiseReducer { - s.SpatialFilterSettings = v +// SetPreset sets the Preset field's value. +func (s *NexGuardFileMarkerSettings) SetPreset(v string) *NexGuardFileMarkerSettings { + s.Preset = &v return s } -// SetTemporalFilterSettings sets the TemporalFilterSettings field's value. -func (s *NoiseReducer) SetTemporalFilterSettings(v *NoiseReducerTemporalFilterSettings) *NoiseReducer { +// SetStrength sets the Strength field's value. +func (s *NexGuardFileMarkerSettings) SetStrength(v string) *NexGuardFileMarkerSettings { + s.Strength = &v + return s +} + +// Settings for your Nielsen configuration. If you don't do Nielsen measurement +// and analytics, ignore these settings. When you enable Nielsen configuration +// (nielsenConfiguration), MediaConvert enables PCM to ID3 tagging for all outputs +// in the job. To enable Nielsen configuration programmatically, include an +// instance of nielsenConfiguration in your JSON job specification. Even if +// you don't include any children of nielsenConfiguration, you still enable +// the setting. +type NielsenConfiguration struct { + _ struct{} `type:"structure"` + + // Nielsen has discontinued the use of breakout code functionality. If you must + // include this property, set the value to zero. + BreakoutCode *int64 `locationName:"breakoutCode" type:"integer"` + + // Use Distributor ID (DistributorID) to specify the distributor ID that is + // assigned to your organization by Neilsen. + DistributorId *string `locationName:"distributorId" type:"string"` +} + +// String returns the string representation +func (s NielsenConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NielsenConfiguration) GoString() string { + return s.String() +} + +// SetBreakoutCode sets the BreakoutCode field's value. +func (s *NielsenConfiguration) SetBreakoutCode(v int64) *NielsenConfiguration { + s.BreakoutCode = &v + return s +} + +// SetDistributorId sets the DistributorId field's value. +func (s *NielsenConfiguration) SetDistributorId(v string) *NielsenConfiguration { + s.DistributorId = &v + return s +} + +// Ignore these settings unless you are using Nielsen non-linear watermarking. +// Specify the values that MediaConvert uses to generate and place Nielsen watermarks +// in your output audio. In addition to specifying these values, you also need +// to set up your cloud TIC server. These settings apply to every output in +// your job. The MediaConvert implementation is currently with the following +// Nielsen versions: Nielsen Watermark SDK Version 5.2.1 Nielsen NLM Watermark +// Engine Version 1.2.7 Nielsen Watermark Authenticator [SID_TIC] Version [5.0.0] +type NielsenNonLinearWatermarkSettings struct { + _ struct{} `type:"structure"` + + // Choose the type of Nielsen watermarks that you want in your outputs. When + // you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the + // setting SID (sourceId). When you choose CBET (CBET), you must provide a value + // for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET + // (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. + ActiveWatermarkProcess *string `locationName:"activeWatermarkProcess" type:"string" enum:"NielsenActiveWatermarkProcessType"` + + // Optional. Use this setting when you want the service to include an ADI file + // in the Nielsen metadata .zip file. To provide an ADI file, store it in Amazon + // S3 and provide a URL to it here. The URL should be in the following format: + // S3://bucket/path/ADI-file. For more information about the metadata .zip file, + // see the setting Metadata destination (metadataDestination). + AdiFilename *string `locationName:"adiFilename" type:"string"` + + // Use the asset ID that you provide to Nielsen to uniquely identify this asset. + // Required for all Nielsen non-linear watermarking. + AssetId *string `locationName:"assetId" min:"1" type:"string"` + + // Use the asset name that you provide to Nielsen for this asset. Required for + // all Nielsen non-linear watermarking. + AssetName *string `locationName:"assetName" min:"1" type:"string"` + + // Use the CSID that Nielsen provides to you. This CBET source ID should be + // unique to your Nielsen account but common to all of your output assets that + // have CBET watermarking. Required when you choose a value for the setting + // Watermark types (ActiveWatermarkProcess) that includes CBET. + CbetSourceId *string `locationName:"cbetSourceId" type:"string"` + + // Optional. If this asset uses an episode ID with Nielsen, provide it here. + EpisodeId *string `locationName:"episodeId" min:"1" type:"string"` + + // Specify the Amazon S3 location where you want MediaConvert to save your Nielsen + // non-linear metadata .zip file. This Amazon S3 bucket must be in the same + // Region as the one where you do your MediaConvert transcoding. If you want + // to include an ADI file in this .zip file, use the setting ADI file (adiFilename) + // to specify it. MediaConvert delivers the Nielsen metadata .zip files only + // to your metadata destination Amazon S3 bucket. It doesn't deliver the .zip + // files to Nielsen. You are responsible for delivering the metadata .zip files + // to Nielsen. + MetadataDestination *string `locationName:"metadataDestination" type:"string"` + + // Use the SID that Nielsen provides to you. This source ID should be unique + // to your Nielsen account but common to all of your output assets. Required + // for all Nielsen non-linear watermarking. This ID should be unique to your + // Nielsen account but common to all of your output assets. Required for all + // Nielsen non-linear watermarking. + SourceId *int64 `locationName:"sourceId" type:"integer"` + + // Required. Specify whether your source content already contains Nielsen non-linear + // watermarks. When you set this value to Watermarked (WATERMARKED), the service + // fails the job. Nielsen requires that you add non-linear watermarking to only + // clean content that doesn't already have non-linear Nielsen watermarks. + SourceWatermarkStatus *string `locationName:"sourceWatermarkStatus" type:"string" enum:"NielsenSourceWatermarkStatusType"` + + // Specify the endpoint for the TIC server that you have deployed and configured + // in the AWS Cloud. Required for all Nielsen non-linear watermarking. MediaConvert + // can't connect directly to a TIC server. Instead, you must use API Gateway + // to provide a RESTful interface between MediaConvert and a TIC server that + // you deploy in your AWS account. For more information on deploying a TIC server + // in your AWS account and the required API Gateway, contact Nielsen support. + TicServerUrl *string `locationName:"ticServerUrl" type:"string"` + + // To create assets that have the same TIC values in each audio track, keep + // the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that + // have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK). + UniqueTicPerAudioTrack *string `locationName:"uniqueTicPerAudioTrack" type:"string" enum:"NielsenUniqueTicPerAudioTrackType"` +} + +// String returns the string representation +func (s NielsenNonLinearWatermarkSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NielsenNonLinearWatermarkSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NielsenNonLinearWatermarkSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NielsenNonLinearWatermarkSettings"} + if s.AssetId != nil && len(*s.AssetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssetId", 1)) + } + if s.AssetName != nil && len(*s.AssetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssetName", 1)) + } + if s.EpisodeId != nil && len(*s.EpisodeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EpisodeId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActiveWatermarkProcess sets the ActiveWatermarkProcess field's value. +func (s *NielsenNonLinearWatermarkSettings) SetActiveWatermarkProcess(v string) *NielsenNonLinearWatermarkSettings { + s.ActiveWatermarkProcess = &v + return s +} + +// SetAdiFilename sets the AdiFilename field's value. +func (s *NielsenNonLinearWatermarkSettings) SetAdiFilename(v string) *NielsenNonLinearWatermarkSettings { + s.AdiFilename = &v + return s +} + +// SetAssetId sets the AssetId field's value. +func (s *NielsenNonLinearWatermarkSettings) SetAssetId(v string) *NielsenNonLinearWatermarkSettings { + s.AssetId = &v + return s +} + +// SetAssetName sets the AssetName field's value. +func (s *NielsenNonLinearWatermarkSettings) SetAssetName(v string) *NielsenNonLinearWatermarkSettings { + s.AssetName = &v + return s +} + +// SetCbetSourceId sets the CbetSourceId field's value. +func (s *NielsenNonLinearWatermarkSettings) SetCbetSourceId(v string) *NielsenNonLinearWatermarkSettings { + s.CbetSourceId = &v + return s +} + +// SetEpisodeId sets the EpisodeId field's value. +func (s *NielsenNonLinearWatermarkSettings) SetEpisodeId(v string) *NielsenNonLinearWatermarkSettings { + s.EpisodeId = &v + return s +} + +// SetMetadataDestination sets the MetadataDestination field's value. +func (s *NielsenNonLinearWatermarkSettings) SetMetadataDestination(v string) *NielsenNonLinearWatermarkSettings { + s.MetadataDestination = &v + return s +} + +// SetSourceId sets the SourceId field's value. +func (s *NielsenNonLinearWatermarkSettings) SetSourceId(v int64) *NielsenNonLinearWatermarkSettings { + s.SourceId = &v + return s +} + +// SetSourceWatermarkStatus sets the SourceWatermarkStatus field's value. +func (s *NielsenNonLinearWatermarkSettings) SetSourceWatermarkStatus(v string) *NielsenNonLinearWatermarkSettings { + s.SourceWatermarkStatus = &v + return s +} + +// SetTicServerUrl sets the TicServerUrl field's value. +func (s *NielsenNonLinearWatermarkSettings) SetTicServerUrl(v string) *NielsenNonLinearWatermarkSettings { + s.TicServerUrl = &v + return s +} + +// SetUniqueTicPerAudioTrack sets the UniqueTicPerAudioTrack field's value. +func (s *NielsenNonLinearWatermarkSettings) SetUniqueTicPerAudioTrack(v string) *NielsenNonLinearWatermarkSettings { + s.UniqueTicPerAudioTrack = &v + return s +} + +// Enable the Noise reducer (NoiseReducer) feature to remove noise from your +// video output if necessary. Enable or disable this feature for each output +// individually. This setting is disabled by default. When you enable Noise +// reducer (NoiseReducer), you must also select a value for Noise reducer filter +// (NoiseReducerFilter). +type NoiseReducer struct { + _ struct{} `type:"structure"` + + // Use Noise reducer filter (NoiseReducerFilter) to select one of the following + // spatial image filtering functions. To use this setting, you must also enable + // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing + // noise. * Mean (softest), Gaussian, Lanczos, and Sharpen (sharpest) do convolution + // filtering. * Conserve does min/max noise reduction. * Spatial does frequency-domain + // filtering based on JND principles. * Temporal optimizes video quality for + // complex motion. + Filter *string `locationName:"filter" type:"string" enum:"NoiseReducerFilter"` + + // Settings for a noise reducer filter + FilterSettings *NoiseReducerFilterSettings `locationName:"filterSettings" type:"structure"` + + // Noise reducer filter settings for spatial filter. + SpatialFilterSettings *NoiseReducerSpatialFilterSettings `locationName:"spatialFilterSettings" type:"structure"` + + // Noise reducer filter settings for temporal filter. + TemporalFilterSettings *NoiseReducerTemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` +} + +// String returns the string representation +func (s NoiseReducer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoiseReducer) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NoiseReducer) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NoiseReducer"} + if s.SpatialFilterSettings != nil { + if err := s.SpatialFilterSettings.Validate(); err != nil { + invalidParams.AddNested("SpatialFilterSettings", err.(request.ErrInvalidParams)) + } + } + if s.TemporalFilterSettings != nil { + if err := s.TemporalFilterSettings.Validate(); err != nil { + invalidParams.AddNested("TemporalFilterSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *NoiseReducer) SetFilter(v string) *NoiseReducer { + s.Filter = &v + return s +} + +// SetFilterSettings sets the FilterSettings field's value. +func (s *NoiseReducer) SetFilterSettings(v *NoiseReducerFilterSettings) *NoiseReducer { + s.FilterSettings = v + return s +} + +// SetSpatialFilterSettings sets the SpatialFilterSettings field's value. +func (s *NoiseReducer) SetSpatialFilterSettings(v *NoiseReducerSpatialFilterSettings) *NoiseReducer { + s.SpatialFilterSettings = v + return s +} + +// SetTemporalFilterSettings sets the TemporalFilterSettings field's value. +func (s *NoiseReducer) SetTemporalFilterSettings(v *NoiseReducerTemporalFilterSettings) *NoiseReducer { s.TemporalFilterSettings = v return s } @@ -14654,6 +16002,14 @@ type NoiseReducerTemporalFilterSettings struct { // and creates better VQ for low bitrate outputs. AggressiveMode *int64 `locationName:"aggressiveMode" type:"integer"` + // Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), + // you can use this setting to apply sharpening. The default behavior, Auto + // (AUTO), allows the transcoder to determine whether to apply filtering, depending + // on input type and quality. When you set Noise reducer to Temporal, your output + // bandwidth is reduced. When Post temporal sharpening is also enabled, that + // bandwidth reduction is smaller. + PostTemporalSharpening *string `locationName:"postTemporalSharpening" type:"string" enum:"NoiseFilterPostTemporalSharpening"` + // The speed of the filter (higher number is faster). Low setting reduces bit // rate at the cost of transcode time, high setting improves transcode time // at the cost of bit rate. @@ -14696,6 +16052,12 @@ func (s *NoiseReducerTemporalFilterSettings) SetAggressiveMode(v int64) *NoiseRe return s } +// SetPostTemporalSharpening sets the PostTemporalSharpening field's value. +func (s *NoiseReducerTemporalFilterSettings) SetPostTemporalSharpening(v string) *NoiseReducerTemporalFilterSettings { + s.PostTemporalSharpening = &v + return s +} + // SetSpeed sets the Speed field's value. func (s *NoiseReducerTemporalFilterSettings) SetSpeed(v int64) *NoiseReducerTemporalFilterSettings { s.Speed = &v @@ -14709,8 +16071,8 @@ func (s *NoiseReducerTemporalFilterSettings) SetStrength(v int64) *NoiseReducerT } type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14727,17 +16089,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14745,22 +16107,89 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Required when you set Codec, under AudioDescriptions>CodecSettings, to the +// value OPUS. +type OpusSettings struct { + _ struct{} `type:"structure"` + + // Optional. Specify the average bitrate in bits per second. Valid values are + // multiples of 8000, from 32000 through 192000. The default value is 96000, + // which we recommend for quality and bandwidth. + Bitrate *int64 `locationName:"bitrate" min:"32000" type:"integer"` + + // Specify the number of channels in this output audio track. Choosing Mono + // on the console gives you 1 output channel; choosing Stereo gives you 2. In + // the API, valid values are 1 and 2. + Channels *int64 `locationName:"channels" min:"1" type:"integer"` + + // Optional. Sample rate in hz. Valid values are 16000, 24000, and 48000. The + // default value is 48000. + SampleRate *int64 `locationName:"sampleRate" min:"16000" type:"integer"` +} + +// String returns the string representation +func (s OpusSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpusSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OpusSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OpusSettings"} + if s.Bitrate != nil && *s.Bitrate < 32000 { + invalidParams.Add(request.NewErrParamMinValue("Bitrate", 32000)) + } + if s.Channels != nil && *s.Channels < 1 { + invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) + } + if s.SampleRate != nil && *s.SampleRate < 16000 { + invalidParams.Add(request.NewErrParamMinValue("SampleRate", 16000)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBitrate sets the Bitrate field's value. +func (s *OpusSettings) SetBitrate(v int64) *OpusSettings { + s.Bitrate = &v + return s +} + +// SetChannels sets the Channels field's value. +func (s *OpusSettings) SetChannels(v int64) *OpusSettings { + s.Channels = &v + return s +} + +// SetSampleRate sets the SampleRate field's value. +func (s *OpusSettings) SetSampleRate(v int64) *OpusSettings { + s.SampleRate = &v + return s } // An output object describes the settings for a single output file or stream @@ -14784,8 +16213,9 @@ type Output struct { // Use Extension (Extension) to specify the file extension for outputs in File // output groups. If you do not specify a value, the service will use default // extensions by container type as follows * MPEG-2 transport stream, m2ts * - // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * No Container, - // the service will use codec extensions (e.g. AAC, H265, H265, AC3) + // Quicktime, mov * MXF container, mxf * MPEG-4 container, mp4 * WebM container, + // webm * No Container, the service will use codec extensions (e.g. AAC, H265, + // H265, AC3) Extension *string `locationName:"extension" type:"string"` // Use Name modifier (NameModifier) to have the service add a string to the @@ -15204,25 +16634,68 @@ func (s *OutputSettings) SetHlsSettings(v *HlsSettings) *OutputSettings { return s } -// A preset is a collection of preconfigured media conversion settings that -// you want MediaConvert to apply to the output during the conversion process. -type Preset struct { +// If you work with a third party video watermarking partner, use the group +// of settings that correspond with your watermarking partner to include watermarks +// in your output. +type PartnerWatermarking struct { _ struct{} `type:"structure"` - // An identifier for this resource that is unique within all of AWS. - Arn *string `locationName:"arn" type:"string"` + // For forensic video watermarking, MediaConvert supports Nagra NexGuard File + // Marker watermarking. MediaConvert supports both PreRelease Content (NGPR/G2) + // and OTT Streaming workflows. + NexguardFileMarkerSettings *NexGuardFileMarkerSettings `locationName:"nexguardFileMarkerSettings" type:"structure"` +} - // An optional category you create to organize your presets. - Category *string `locationName:"category" type:"string"` +// String returns the string representation +func (s PartnerWatermarking) String() string { + return awsutil.Prettify(s) +} - // The timestamp in epoch seconds for preset creation. - CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` +// GoString returns the string representation +func (s PartnerWatermarking) GoString() string { + return s.String() +} - // An optional description you create for each preset. - Description *string `locationName:"description" type:"string"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *PartnerWatermarking) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PartnerWatermarking"} + if s.NexguardFileMarkerSettings != nil { + if err := s.NexguardFileMarkerSettings.Validate(); err != nil { + invalidParams.AddNested("NexguardFileMarkerSettings", err.(request.ErrInvalidParams)) + } + } - // The timestamp in epoch seconds when the preset was last updated. - LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNexguardFileMarkerSettings sets the NexguardFileMarkerSettings field's value. +func (s *PartnerWatermarking) SetNexguardFileMarkerSettings(v *NexGuardFileMarkerSettings) *PartnerWatermarking { + s.NexguardFileMarkerSettings = v + return s +} + +// A preset is a collection of preconfigured media conversion settings that +// you want MediaConvert to apply to the output during the conversion process. +type Preset struct { + _ struct{} `type:"structure"` + + // An identifier for this resource that is unique within all of AWS. + Arn *string `locationName:"arn" type:"string"` + + // An optional category you create to organize your presets. + Category *string `locationName:"category" type:"string"` + + // The timestamp in epoch seconds for preset creation. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unixTimestamp"` + + // An optional description you create for each preset. + Description *string `locationName:"description" type:"string"` + + // The timestamp in epoch seconds when the preset was last updated. + LastUpdated *time.Time `locationName:"lastUpdated" type:"timestamp" timestampFormat:"unixTimestamp"` // A name you create for each preset. Each name must be unique within your account. // @@ -15409,59 +16882,96 @@ type ProresSettings struct { // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding - // job sepecification as a JSON file without the console, use FramerateControl + // job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame // rate you specify in the settings FramerateNumerator and FramerateDenominator. FramerateControl *string `locationName:"framerateControl" type:"string" enum:"ProresFramerateControl"` - // When set to INTERPOLATE, produces smoother motion during frame rate conversion. + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"ProresFramerateConversionAlgorithm"` - // Frame rate denominator. + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` // When you use the API for transcode jobs that use frame rate conversion, specify // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use // FramerateNumerator to specify the numerator of this fraction. In this example, - // use 24000 for the value of FramerateNumerator. + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` - // Use Interlace mode (InterlaceMode) to choose the scan line type for the output. - // * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce - // interlaced output with the entire output having the same field polarity (top - // or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default - // Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, - // behavior depends on the input scan type. - If the source is interlaced, the - // output will be interlaced with the same polarity as the source (it will follow - // the source). The output could therefore be a mix of "top field first" and - // "bottom field first". - If the source is progressive, the output will be - // interlaced with "top field first" or "bottom field first" polarity, depending - // on which of the Follow options you chose. + // Choose the scan line type for the output. Keep the default value, Progressive + // (PROGRESSIVE) to create a progressive output, regardless of the scan type + // of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) + // to create an output that's interlaced with the same field polarity throughout. + // Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) + // to produce outputs with the same field polarity as the source. For jobs that + // have multiple inputs, the output field polarity might change over the course + // of the output. Follow behavior depends on the input scan type. If the source + // is interlaced, the output will be interlaced with the same polarity as the + // source. If the source is progressive, the output will be interlaced with + // top field bottom field first, depending on which of the Follow options you + // choose. InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"ProresInterlaceMode"` - // Use (ProresParControl) to specify how the service determines the pixel aspect - // ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect - // ratio from the input. To specify a different pixel aspect ratio: Using the - // console, choose it from the dropdown menu. Using the API, set ProresParControl - // to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator). + // Optional. Specify how the service determines the pixel aspect ratio (PAR) + // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), + // uses the PAR from your input video for your output. To specify a different + // PAR in the console, choose any value other than Follow source. To specify + // a different PAR by editing the JSON job specification, choose SPECIFIED. + // When you choose SPECIFIED for this setting, you must also specify values + // for the parNumerator and parDenominator settings. ParControl *string `locationName:"parControl" type:"string" enum:"ProresParControl"` - // Pixel Aspect Ratio denominator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parDenominator is 33. ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` - // Pixel Aspect Ratio numerator. + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parNumerator is 40. ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` - // Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled - // as 25fps, and audio is sped up correspondingly. + // Ignore this setting unless your input frame rate is 23.976 or 24 frames per + // second (fps). Enable slow PAL to create a 25 fps output. When you enable + // slow PAL, MediaConvert relabels the video frames to 25 fps and resamples + // your audio to keep it synchronized with the video. Note that enabling this + // setting will slightly reduce the duration of your video. Required settings: + // You must also set Framerate to 25. In your JSON job specification, set (framerateControl) + // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to + // 1. SlowPal *string `locationName:"slowPal" type:"string" enum:"ProresSlowPal"` - // Only use Telecine (ProresTelecine) when you set Framerate (Framerate) to - // 29.970. Set Telecine (ProresTelecine) to Hard (hard) to produce a 29.97i - // output from a 23.976 input. Set it to Soft (soft) to produce 23.976 output - // and leave converstion to the player. + // When you do frame rate conversion from 23.976 frames per second (fps) to + // 29.97 fps, and your output scan type is interlaced, you can optionally enable + // hard telecine (HARD) to create a smoother picture. When you keep the default + // value, None (NONE), MediaConvert does a standard frame rate conversion to + // 29.97 without doing anything with the field polarity to create a smoother + // picture. Telecine *string `locationName:"telecine" type:"string" enum:"ProresTelecine"` } @@ -15692,6 +17202,50 @@ func (s *Queue) SetType(v string) *Queue { return s } +// Description of the source and destination queues between which the job has +// moved, along with the timestamp of the move +type QueueTransition struct { + _ struct{} `type:"structure"` + + // The queue that the job was on after the transition. + DestinationQueue *string `locationName:"destinationQueue" type:"string"` + + // The queue that the job was on before the transition. + SourceQueue *string `locationName:"sourceQueue" type:"string"` + + // The time, in Unix epoch format, that the job moved from the source queue + // to the destination queue. + Timestamp *time.Time `locationName:"timestamp" type:"timestamp" timestampFormat:"unixTimestamp"` +} + +// String returns the string representation +func (s QueueTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueTransition) GoString() string { + return s.String() +} + +// SetDestinationQueue sets the DestinationQueue field's value. +func (s *QueueTransition) SetDestinationQueue(v string) *QueueTransition { + s.DestinationQueue = &v + return s +} + +// SetSourceQueue sets the SourceQueue field's value. +func (s *QueueTransition) SetSourceQueue(v string) *QueueTransition { + s.SourceQueue = &v + return s +} + +// SetTimestamp sets the Timestamp field's value. +func (s *QueueTransition) SetTimestamp(v time.Time) *QueueTransition { + s.Timestamp = &v + return s +} + // Use Rectangle to identify a specific area of the video frame. type Rectangle struct { _ struct{} `type:"structure"` @@ -16649,8 +18203,8 @@ func (s *TimecodeConfig) SetTimestampOffset(v string) *TimecodeConfig { } // Enable Timed metadata insertion (TimedMetadataInsertion) to include ID3 tags -// in your job. To include timed metadata, you must enable it here, enable it -// in each output container, and specify tags and timecodes in ID3 insertion +// in any HLS outputs. To include timed metadata, you must enable it here, enable +// it in each output container, and specify tags and timecodes in ID3 insertion // (Id3Insertion) objects. type TimedMetadataInsertion struct { _ struct{} `type:"structure"` @@ -16719,8 +18273,8 @@ func (s *Timing) SetSubmitTime(v time.Time) *Timing { } type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16737,17 +18291,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16755,22 +18309,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Settings specific to caption sources that are specified by track number. @@ -16824,7 +18378,7 @@ type TtmlDestinationSettings struct { _ struct{} `type:"structure"` // Pass through style and position information from a TTML-like input source - // (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or TTML output. + // (TTML, SMPTE-TT) to the TTML output. StylePassthrough *string `locationName:"stylePassthrough" type:"string" enum:"TtmlStylePassthrough"` } @@ -16930,6 +18484,9 @@ type UpdateJobTemplateInput struct { // The new description for the job template, if you are changing it. Description *string `locationName:"description" type:"string"` + // Optional list of hop destinations. + HopDestinations []*HopDestination `locationName:"hopDestinations" type:"list"` + // The name of the job template you are modifying // // Name is a required field @@ -16983,6 +18540,16 @@ func (s *UpdateJobTemplateInput) Validate() error { invalidParams.AddNested("AccelerationSettings", err.(request.ErrInvalidParams)) } } + if s.HopDestinations != nil { + for i, v := range s.HopDestinations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HopDestinations", i), err.(request.ErrInvalidParams)) + } + } + } if s.Settings != nil { if err := s.Settings.Validate(); err != nil { invalidParams.AddNested("Settings", err.(request.ErrInvalidParams)) @@ -17013,6 +18580,12 @@ func (s *UpdateJobTemplateInput) SetDescription(v string) *UpdateJobTemplateInpu return s } +// SetHopDestinations sets the HopDestinations field's value. +func (s *UpdateJobTemplateInput) SetHopDestinations(v []*HopDestination) *UpdateJobTemplateInput { + s.HopDestinations = v + return s +} + // SetName sets the Name field's value. func (s *UpdateJobTemplateInput) SetName(v string) *UpdateJobTemplateInput { s.Name = &v @@ -17280,16 +18853,178 @@ func (s *UpdateQueueOutput) SetQueue(v *Queue) *UpdateQueueOutput { return s } +// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to +// the value VC3 +type Vc3Settings struct { + _ struct{} `type:"structure"` + + // If you are using the console, use the Framerate setting to specify the frame + // rate for this output. If you want to keep the same frame rate as the input + // video, choose Follow source. If you want to do frame rate conversion, choose + // a frame rate from the dropdown list or choose Custom. The framerates shown + // in the dropdown list are decimal approximations of fractions. If you choose + // Custom, specify your frame rate as a fraction. If you are creating your transcoding + // job specification as a JSON file without the console, use FramerateControl + // to specify which value the service uses for the frame rate for this output. + // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate + // from the input. Choose SPECIFIED if you want the service to use the frame + // rate you specify in the settings FramerateNumerator and FramerateDenominator. + FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vc3FramerateControl"` + + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. + FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vc3FramerateConversionAlgorithm"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"24" type:"integer"` + + // Optional. Choose the scan line type for this output. If you don't specify + // a value, MediaConvert will create a progressive output. + InterlaceMode *string `locationName:"interlaceMode" type:"string" enum:"Vc3InterlaceMode"` + + // Ignore this setting unless your input frame rate is 23.976 or 24 frames per + // second (fps). Enable slow PAL to create a 25 fps output by relabeling the + // video frames and resampling your audio. Note that enabling this setting will + // slightly reduce the duration of your video. Related settings: You must also + // set Framerate to 25. In your JSON job specification, set (framerateControl) + // to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to + // 1. + SlowPal *string `locationName:"slowPal" type:"string" enum:"Vc3SlowPal"` + + // When you do frame rate conversion from 23.976 frames per second (fps) to + // 29.97 fps, and your output scan type is interlaced, you can optionally enable + // hard telecine (HARD) to create a smoother picture. When you keep the default + // value, None (NONE), MediaConvert does a standard frame rate conversion to + // 29.97 without doing anything with the field polarity to create a smoother + // picture. + Telecine *string `locationName:"telecine" type:"string" enum:"Vc3Telecine"` + + // Specify the VC3 class to choose the quality characteristics for this output. + // VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) + // and Resolution (height and width), determine your output bitrate. For example, + // say that your video resolution is 1920x1080 and your framerate is 29.97. + // Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately + // 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of + // approximately 220 Mbps. VC3 class also specifies the color bit depth of your + // output. + Vc3Class *string `locationName:"vc3Class" type:"string" enum:"Vc3Class"` +} + +// String returns the string representation +func (s Vc3Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vc3Settings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Vc3Settings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Vc3Settings"} + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 24 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 24)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFramerateControl sets the FramerateControl field's value. +func (s *Vc3Settings) SetFramerateControl(v string) *Vc3Settings { + s.FramerateControl = &v + return s +} + +// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. +func (s *Vc3Settings) SetFramerateConversionAlgorithm(v string) *Vc3Settings { + s.FramerateConversionAlgorithm = &v + return s +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *Vc3Settings) SetFramerateDenominator(v int64) *Vc3Settings { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *Vc3Settings) SetFramerateNumerator(v int64) *Vc3Settings { + s.FramerateNumerator = &v + return s +} + +// SetInterlaceMode sets the InterlaceMode field's value. +func (s *Vc3Settings) SetInterlaceMode(v string) *Vc3Settings { + s.InterlaceMode = &v + return s +} + +// SetSlowPal sets the SlowPal field's value. +func (s *Vc3Settings) SetSlowPal(v string) *Vc3Settings { + s.SlowPal = &v + return s +} + +// SetTelecine sets the Telecine field's value. +func (s *Vc3Settings) SetTelecine(v string) *Vc3Settings { + s.Telecine = &v + return s +} + +// SetVc3Class sets the Vc3Class field's value. +func (s *Vc3Settings) SetVc3Class(v string) *Vc3Settings { + s.Vc3Class = &v + return s +} + // Video codec settings, (CodecSettings) under (VideoDescription), contains // the group of settings related to video encoding. The settings in this group // vary depending on the value that you choose for Video codec (Codec). For // each codec enum that you choose, define the corresponding settings object. -// The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, -// FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, -// Mpeg2Settings * PRORES, ProresSettings +// The following lists the codec enum, settings object pairs. * AV1, Av1Settings +// * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, +// H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings +// * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings type VideoCodecSettings struct { _ struct{} `type:"structure"` + // Required when you set Codec, under VideoDescription>CodecSettings to the + // value AV1. + Av1Settings *Av1Settings `locationName:"av1Settings" type:"structure"` + + // Required when you set your output video codec to AVC-Intra. For more information + // about the AVC-I settings, see the relevant specification. For detailed information + // about SD and HD in AVC-I, see https://ieeexplore.ieee.org/document/7290936. + AvcIntraSettings *AvcIntraSettings `locationName:"avcIntraSettings" type:"structure"` + // Specifies the video codec. This must be equal to one of the enum values defined // by the object VideoCodec. Codec *string `locationName:"codec" type:"string" enum:"VideoCodec"` @@ -17312,6 +19047,18 @@ type VideoCodecSettings struct { // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to // the value PRORES. ProresSettings *ProresSettings `locationName:"proresSettings" type:"structure"` + + // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to + // the value VC3 + Vc3Settings *Vc3Settings `locationName:"vc3Settings" type:"structure"` + + // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to + // the value VP8. + Vp8Settings *Vp8Settings `locationName:"vp8Settings" type:"structure"` + + // Required when you set (Codec) under (VideoDescription)>(CodecSettings) to + // the value VP9. + Vp9Settings *Vp9Settings `locationName:"vp9Settings" type:"structure"` } // String returns the string representation @@ -17327,6 +19074,16 @@ func (s VideoCodecSettings) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *VideoCodecSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "VideoCodecSettings"} + if s.Av1Settings != nil { + if err := s.Av1Settings.Validate(); err != nil { + invalidParams.AddNested("Av1Settings", err.(request.ErrInvalidParams)) + } + } + if s.AvcIntraSettings != nil { + if err := s.AvcIntraSettings.Validate(); err != nil { + invalidParams.AddNested("AvcIntraSettings", err.(request.ErrInvalidParams)) + } + } if s.FrameCaptureSettings != nil { if err := s.FrameCaptureSettings.Validate(); err != nil { invalidParams.AddNested("FrameCaptureSettings", err.(request.ErrInvalidParams)) @@ -17352,6 +19109,21 @@ func (s *VideoCodecSettings) Validate() error { invalidParams.AddNested("ProresSettings", err.(request.ErrInvalidParams)) } } + if s.Vc3Settings != nil { + if err := s.Vc3Settings.Validate(); err != nil { + invalidParams.AddNested("Vc3Settings", err.(request.ErrInvalidParams)) + } + } + if s.Vp8Settings != nil { + if err := s.Vp8Settings.Validate(); err != nil { + invalidParams.AddNested("Vp8Settings", err.(request.ErrInvalidParams)) + } + } + if s.Vp9Settings != nil { + if err := s.Vp9Settings.Validate(); err != nil { + invalidParams.AddNested("Vp9Settings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -17359,6 +19131,18 @@ func (s *VideoCodecSettings) Validate() error { return nil } +// SetAv1Settings sets the Av1Settings field's value. +func (s *VideoCodecSettings) SetAv1Settings(v *Av1Settings) *VideoCodecSettings { + s.Av1Settings = v + return s +} + +// SetAvcIntraSettings sets the AvcIntraSettings field's value. +func (s *VideoCodecSettings) SetAvcIntraSettings(v *AvcIntraSettings) *VideoCodecSettings { + s.AvcIntraSettings = v + return s +} + // SetCodec sets the Codec field's value. func (s *VideoCodecSettings) SetCodec(v string) *VideoCodecSettings { s.Codec = &v @@ -17395,6 +19179,24 @@ func (s *VideoCodecSettings) SetProresSettings(v *ProresSettings) *VideoCodecSet return s } +// SetVc3Settings sets the Vc3Settings field's value. +func (s *VideoCodecSettings) SetVc3Settings(v *Vc3Settings) *VideoCodecSettings { + s.Vc3Settings = v + return s +} + +// SetVp8Settings sets the Vp8Settings field's value. +func (s *VideoCodecSettings) SetVp8Settings(v *Vp8Settings) *VideoCodecSettings { + s.Vp8Settings = v + return s +} + +// SetVp9Settings sets the Vp9Settings field's value. +func (s *VideoCodecSettings) SetVp9Settings(v *Vp9Settings) *VideoCodecSettings { + s.Vp9Settings = v + return s +} + // Settings for video outputs type VideoDescription struct { _ struct{} `type:"structure"` @@ -17416,9 +19218,10 @@ type VideoDescription struct { // the group of settings related to video encoding. The settings in this group // vary depending on the value that you choose for Video codec (Codec). For // each codec enum that you choose, define the corresponding settings object. - // The following lists the codec enum, settings object pairs. * FRAME_CAPTURE, - // FrameCaptureSettings * H_264, H264Settings * H_265, H265Settings * MPEG2, - // Mpeg2Settings * PRORES, ProresSettings + // The following lists the codec enum, settings object pairs. * AV1, Av1Settings + // * AVC_INTRA, AvcIntraSettings * FRAME_CAPTURE, FrameCaptureSettings * H_264, + // H264Settings * H_265, H265Settings * MPEG2, Mpeg2Settings * PRORES, ProresSettings + // * VC3, Vc3Settings * VP8, Vp8Settings * VP9, Vp9Settings CodecSettings *VideoCodecSettings `locationName:"codecSettings" type:"structure"` // Choose Insert (INSERT) for this setting to include color metadata in this @@ -17697,6 +19500,11 @@ type VideoPreprocessor struct { // individually. This setting is disabled by default. NoiseReducer *NoiseReducer `locationName:"noiseReducer" type:"structure"` + // If you work with a third party video watermarking partner, use the group + // of settings that correspond with your watermarking partner to include watermarks + // in your output. + PartnerWatermarking *PartnerWatermarking `locationName:"partnerWatermarking" type:"structure"` + // Timecode burn-in (TimecodeBurnIn)--Burns the output timecode and specified // prefix into the output. TimecodeBurnin *TimecodeBurnin `locationName:"timecodeBurnin" type:"structure"` @@ -17730,6 +19538,11 @@ func (s *VideoPreprocessor) Validate() error { invalidParams.AddNested("NoiseReducer", err.(request.ErrInvalidParams)) } } + if s.PartnerWatermarking != nil { + if err := s.PartnerWatermarking.Validate(); err != nil { + invalidParams.AddNested("PartnerWatermarking", err.(request.ErrInvalidParams)) + } + } if s.TimecodeBurnin != nil { if err := s.TimecodeBurnin.Validate(); err != nil { invalidParams.AddNested("TimecodeBurnin", err.(request.ErrInvalidParams)) @@ -17772,6 +19585,12 @@ func (s *VideoPreprocessor) SetNoiseReducer(v *NoiseReducer) *VideoPreprocessor return s } +// SetPartnerWatermarking sets the PartnerWatermarking field's value. +func (s *VideoPreprocessor) SetPartnerWatermarking(v *PartnerWatermarking) *VideoPreprocessor { + s.PartnerWatermarking = v + return s +} + // SetTimecodeBurnin sets the TimecodeBurnin field's value. func (s *VideoPreprocessor) SetTimecodeBurnin(v *TimecodeBurnin) *VideoPreprocessor { s.TimecodeBurnin = v @@ -17915,49 +19734,48 @@ func (s *VideoSelector) SetRotate(v string) *VideoSelector { return s } -// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to -// the value WAV. -type WavSettings struct { +// Required when you set Codec, under AudioDescriptions>CodecSettings, to the +// value Vorbis. +type VorbisSettings struct { _ struct{} `type:"structure"` - // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding - // quality for this audio track. - BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` - - // Specify the number of channels in this output audio track. Valid values are - // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. + // Optional. Specify the number of channels in this output audio track. Choosing + // Mono on the console gives you 1 output channel; choosing Stereo gives you + // 2. In the API, valid values are 1 and 2. The default value is 2. Channels *int64 `locationName:"channels" min:"1" type:"integer"` - // The service defaults to using RIFF for WAV outputs. If your output audio - // is likely to exceed 4 GB in file size, or if you otherwise need the extended - // support of the RF64 format, set your output WAV file format to RF64. - Format *string `locationName:"format" type:"string" enum:"WavFormat"` + // Optional. Specify the audio sample rate in Hz. Valid values are 22050, 32000, + // 44100, and 48000. The default value is 48000. + SampleRate *int64 `locationName:"sampleRate" min:"22050" type:"integer"` - // Sample rate in Hz. - SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` + // Optional. Specify the variable audio quality of this Vorbis output from -1 + // (lowest quality, ~45 kbit/s) to 10 (highest quality, ~500 kbit/s). The default + // value is 4 (~128 kbit/s). Values 5 and 6 are approximately 160 and 192 kbit/s, + // respectively. + VbrQuality *int64 `locationName:"vbrQuality" type:"integer"` } // String returns the string representation -func (s WavSettings) String() string { +func (s VorbisSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s WavSettings) GoString() string { +func (s VorbisSettings) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *WavSettings) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "WavSettings"} - if s.BitDepth != nil && *s.BitDepth < 16 { - invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16)) - } +func (s *VorbisSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VorbisSettings"} if s.Channels != nil && *s.Channels < 1 { invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) } - if s.SampleRate != nil && *s.SampleRate < 8000 { - invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000)) + if s.SampleRate != nil && *s.SampleRate < 22050 { + invalidParams.Add(request.NewErrParamMinValue("SampleRate", 22050)) + } + if s.VbrQuality != nil && *s.VbrQuality < -1 { + invalidParams.Add(request.NewErrParamMinValue("VbrQuality", -1)) } if invalidParams.Len() > 0 { @@ -17966,34 +19784,527 @@ func (s *WavSettings) Validate() error { return nil } -// SetBitDepth sets the BitDepth field's value. -func (s *WavSettings) SetBitDepth(v int64) *WavSettings { - s.BitDepth = &v - return s -} - // SetChannels sets the Channels field's value. -func (s *WavSettings) SetChannels(v int64) *WavSettings { +func (s *VorbisSettings) SetChannels(v int64) *VorbisSettings { s.Channels = &v return s } -// SetFormat sets the Format field's value. -func (s *WavSettings) SetFormat(v string) *WavSettings { - s.Format = &v +// SetSampleRate sets the SampleRate field's value. +func (s *VorbisSettings) SetSampleRate(v int64) *VorbisSettings { + s.SampleRate = &v return s } -// SetSampleRate sets the SampleRate field's value. -func (s *WavSettings) SetSampleRate(v int64) *WavSettings { - s.SampleRate = &v +// SetVbrQuality sets the VbrQuality field's value. +func (s *VorbisSettings) SetVbrQuality(v int64) *VorbisSettings { + s.VbrQuality = &v return s } -// Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio -// + audio description (AD) as a stereo pair. The value for AudioType will be -// set to 3, which signals to downstream systems that this stream contains "broadcaster -// mixed AD". Note that the input received by the encoder must contain pre-mixed +// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to +// the value VP8. +type Vp8Settings struct { + _ struct{} `type:"structure"` + + // Target bitrate in bits/second. For example, enter five megabits per second + // as 5000000. + Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` + + // If you are using the console, use the Framerate setting to specify the frame + // rate for this output. If you want to keep the same frame rate as the input + // video, choose Follow source. If you want to do frame rate conversion, choose + // a frame rate from the dropdown list or choose Custom. The framerates shown + // in the dropdown list are decimal approximations of fractions. If you choose + // Custom, specify your frame rate as a fraction. If you are creating your transcoding + // job specification as a JSON file without the console, use FramerateControl + // to specify which value the service uses for the frame rate for this output. + // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate + // from the input. Choose SPECIFIED if you want the service to use the frame + // rate you specify in the settings FramerateNumerator and FramerateDenominator. + FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp8FramerateControl"` + + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. + FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vp8FramerateConversionAlgorithm"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` + + // GOP Length (keyframe interval) in frames. Must be greater than zero. + GopSize *float64 `locationName:"gopSize" type:"double"` + + // Optional. Size of buffer (HRD buffer model) in bits. For example, enter five + // megabits as 5000000. + HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` + + // Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. + // Specify the maximum bitrate in bits/second. For example, enter five megabits + // per second as 5000000. The default behavior uses twice the target bitrate + // as the maximum bitrate. + MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` + + // Optional. Specify how the service determines the pixel aspect ratio (PAR) + // for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), + // uses the PAR from your input video for your output. To specify a different + // PAR in the console, choose any value other than Follow source. To specify + // a different PAR by editing the JSON job specification, choose SPECIFIED. + // When you choose SPECIFIED for this setting, you must also specify values + // for the parNumerator and parDenominator settings. + ParControl *string `locationName:"parControl" type:"string" enum:"Vp8ParControl"` + + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parDenominator is 33. + ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` + + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parNumerator is 40. + ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` + + // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you + // want to trade off encoding speed for output video quality. The default behavior + // is faster, lower quality, multi-pass encoding. + QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp8QualityTuningLevel"` + + // With the VP8 codec, you can use only the variable bitrate (VBR) rate control + // mode. + RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Vp8RateControlMode"` +} + +// String returns the string representation +func (s Vp8Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vp8Settings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Vp8Settings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Vp8Settings"} + if s.Bitrate != nil && *s.Bitrate < 1000 { + invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) + } + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } + if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { + invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) + } + if s.ParDenominator != nil && *s.ParDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) + } + if s.ParNumerator != nil && *s.ParNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBitrate sets the Bitrate field's value. +func (s *Vp8Settings) SetBitrate(v int64) *Vp8Settings { + s.Bitrate = &v + return s +} + +// SetFramerateControl sets the FramerateControl field's value. +func (s *Vp8Settings) SetFramerateControl(v string) *Vp8Settings { + s.FramerateControl = &v + return s +} + +// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. +func (s *Vp8Settings) SetFramerateConversionAlgorithm(v string) *Vp8Settings { + s.FramerateConversionAlgorithm = &v + return s +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *Vp8Settings) SetFramerateDenominator(v int64) *Vp8Settings { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *Vp8Settings) SetFramerateNumerator(v int64) *Vp8Settings { + s.FramerateNumerator = &v + return s +} + +// SetGopSize sets the GopSize field's value. +func (s *Vp8Settings) SetGopSize(v float64) *Vp8Settings { + s.GopSize = &v + return s +} + +// SetHrdBufferSize sets the HrdBufferSize field's value. +func (s *Vp8Settings) SetHrdBufferSize(v int64) *Vp8Settings { + s.HrdBufferSize = &v + return s +} + +// SetMaxBitrate sets the MaxBitrate field's value. +func (s *Vp8Settings) SetMaxBitrate(v int64) *Vp8Settings { + s.MaxBitrate = &v + return s +} + +// SetParControl sets the ParControl field's value. +func (s *Vp8Settings) SetParControl(v string) *Vp8Settings { + s.ParControl = &v + return s +} + +// SetParDenominator sets the ParDenominator field's value. +func (s *Vp8Settings) SetParDenominator(v int64) *Vp8Settings { + s.ParDenominator = &v + return s +} + +// SetParNumerator sets the ParNumerator field's value. +func (s *Vp8Settings) SetParNumerator(v int64) *Vp8Settings { + s.ParNumerator = &v + return s +} + +// SetQualityTuningLevel sets the QualityTuningLevel field's value. +func (s *Vp8Settings) SetQualityTuningLevel(v string) *Vp8Settings { + s.QualityTuningLevel = &v + return s +} + +// SetRateControlMode sets the RateControlMode field's value. +func (s *Vp8Settings) SetRateControlMode(v string) *Vp8Settings { + s.RateControlMode = &v + return s +} + +// Required when you set (Codec) under (VideoDescription)>(CodecSettings) to +// the value VP9. +type Vp9Settings struct { + _ struct{} `type:"structure"` + + // Target bitrate in bits/second. For example, enter five megabits per second + // as 5000000. + Bitrate *int64 `locationName:"bitrate" min:"1000" type:"integer"` + + // If you are using the console, use the Framerate setting to specify the frame + // rate for this output. If you want to keep the same frame rate as the input + // video, choose Follow source. If you want to do frame rate conversion, choose + // a frame rate from the dropdown list or choose Custom. The framerates shown + // in the dropdown list are decimal approximations of fractions. If you choose + // Custom, specify your frame rate as a fraction. If you are creating your transcoding + // job specification as a JSON file without the console, use FramerateControl + // to specify which value the service uses for the frame rate for this output. + // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate + // from the input. Choose SPECIFIED if you want the service to use the frame + // rate you specify in the settings FramerateNumerator and FramerateDenominator. + FramerateControl *string `locationName:"framerateControl" type:"string" enum:"Vp9FramerateControl"` + + // Choose the method that you want MediaConvert to use when increasing or decreasing + // the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically + // simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, + // you can use interpolate (INTERPOLATE) to avoid stutter. This results in a + // smooth picture, but might introduce undesirable video artifacts. For complex + // frame rate conversions, especially if your source video has already been + // converted from its original cadence, use FrameFormer (FRAMEFORMER) to do + // motion-compensated interpolation. FrameFormer chooses the best conversion + // method frame by frame. Note that using FrameFormer increases the transcoding + // time and incurs a significant add-on cost. + FramerateConversionAlgorithm *string `locationName:"framerateConversionAlgorithm" type:"string" enum:"Vp9FramerateConversionAlgorithm"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateDenominator to specify the denominator of this fraction. In this + // example, use 1001 for the value of FramerateDenominator. When you use the + // console for transcode jobs that use frame rate conversion, provide the value + // as a decimal number for Framerate. In this example, specify 23.976. + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer"` + + // When you use the API for transcode jobs that use frame rate conversion, specify + // the frame rate as a fraction. For example, 24000 / 1001 = 23.976 fps. Use + // FramerateNumerator to specify the numerator of this fraction. In this example, + // use 24000 for the value of FramerateNumerator. When you use the console for + // transcode jobs that use frame rate conversion, provide the value as a decimal + // number for Framerate. In this example, specify 23.976. + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer"` + + // GOP Length (keyframe interval) in frames. Must be greater than zero. + GopSize *float64 `locationName:"gopSize" type:"double"` + + // Size of buffer (HRD buffer model) in bits. For example, enter five megabits + // as 5000000. + HrdBufferSize *int64 `locationName:"hrdBufferSize" type:"integer"` + + // Ignore this setting unless you set qualityTuningLevel to MULTI_PASS. Optional. + // Specify the maximum bitrate in bits/second. For example, enter five megabits + // per second as 5000000. The default behavior uses twice the target bitrate + // as the maximum bitrate. + MaxBitrate *int64 `locationName:"maxBitrate" min:"1000" type:"integer"` + + // Optional. Specify how the service determines the pixel aspect ratio for this + // output. The default behavior is to use the same pixel aspect ratio as your + // input video. + ParControl *string `locationName:"parControl" type:"string" enum:"Vp9ParControl"` + + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parDenominator is 33. + ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` + + // Required when you set Pixel aspect ratio (parControl) to SPECIFIED. On the + // console, this corresponds to any value other than Follow source. When you + // specify an output pixel aspect ratio (PAR) that is different from your input + // video PAR, provide your output PAR as a ratio. For example, for D1/DV NTSC + // widescreen, you would specify the ratio 40:33. In this example, the value + // for parNumerator is 40. + ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` + + // Optional. Use Quality tuning level (qualityTuningLevel) to choose how you + // want to trade off encoding speed for output video quality. The default behavior + // is faster, lower quality, multi-pass encoding. + QualityTuningLevel *string `locationName:"qualityTuningLevel" type:"string" enum:"Vp9QualityTuningLevel"` + + // With the VP9 codec, you can use only the variable bitrate (VBR) rate control + // mode. + RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"Vp9RateControlMode"` +} + +// String returns the string representation +func (s Vp9Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vp9Settings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Vp9Settings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Vp9Settings"} + if s.Bitrate != nil && *s.Bitrate < 1000 { + invalidParams.Add(request.NewErrParamMinValue("Bitrate", 1000)) + } + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } + if s.MaxBitrate != nil && *s.MaxBitrate < 1000 { + invalidParams.Add(request.NewErrParamMinValue("MaxBitrate", 1000)) + } + if s.ParDenominator != nil && *s.ParDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) + } + if s.ParNumerator != nil && *s.ParNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBitrate sets the Bitrate field's value. +func (s *Vp9Settings) SetBitrate(v int64) *Vp9Settings { + s.Bitrate = &v + return s +} + +// SetFramerateControl sets the FramerateControl field's value. +func (s *Vp9Settings) SetFramerateControl(v string) *Vp9Settings { + s.FramerateControl = &v + return s +} + +// SetFramerateConversionAlgorithm sets the FramerateConversionAlgorithm field's value. +func (s *Vp9Settings) SetFramerateConversionAlgorithm(v string) *Vp9Settings { + s.FramerateConversionAlgorithm = &v + return s +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *Vp9Settings) SetFramerateDenominator(v int64) *Vp9Settings { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *Vp9Settings) SetFramerateNumerator(v int64) *Vp9Settings { + s.FramerateNumerator = &v + return s +} + +// SetGopSize sets the GopSize field's value. +func (s *Vp9Settings) SetGopSize(v float64) *Vp9Settings { + s.GopSize = &v + return s +} + +// SetHrdBufferSize sets the HrdBufferSize field's value. +func (s *Vp9Settings) SetHrdBufferSize(v int64) *Vp9Settings { + s.HrdBufferSize = &v + return s +} + +// SetMaxBitrate sets the MaxBitrate field's value. +func (s *Vp9Settings) SetMaxBitrate(v int64) *Vp9Settings { + s.MaxBitrate = &v + return s +} + +// SetParControl sets the ParControl field's value. +func (s *Vp9Settings) SetParControl(v string) *Vp9Settings { + s.ParControl = &v + return s +} + +// SetParDenominator sets the ParDenominator field's value. +func (s *Vp9Settings) SetParDenominator(v int64) *Vp9Settings { + s.ParDenominator = &v + return s +} + +// SetParNumerator sets the ParNumerator field's value. +func (s *Vp9Settings) SetParNumerator(v int64) *Vp9Settings { + s.ParNumerator = &v + return s +} + +// SetQualityTuningLevel sets the QualityTuningLevel field's value. +func (s *Vp9Settings) SetQualityTuningLevel(v string) *Vp9Settings { + s.QualityTuningLevel = &v + return s +} + +// SetRateControlMode sets the RateControlMode field's value. +func (s *Vp9Settings) SetRateControlMode(v string) *Vp9Settings { + s.RateControlMode = &v + return s +} + +// Required when you set (Codec) under (AudioDescriptions)>(CodecSettings) to +// the value WAV. +type WavSettings struct { + _ struct{} `type:"structure"` + + // Specify Bit depth (BitDepth), in bits per sample, to choose the encoding + // quality for this audio track. + BitDepth *int64 `locationName:"bitDepth" min:"16" type:"integer"` + + // Specify the number of channels in this output audio track. Valid values are + // 1 and even numbers up to 64. For example, 1, 2, 4, 6, and so on, up to 64. + Channels *int64 `locationName:"channels" min:"1" type:"integer"` + + // The service defaults to using RIFF for WAV outputs. If your output audio + // is likely to exceed 4 GB in file size, or if you otherwise need the extended + // support of the RF64 format, set your output WAV file format to RF64. + Format *string `locationName:"format" type:"string" enum:"WavFormat"` + + // Sample rate in Hz. + SampleRate *int64 `locationName:"sampleRate" min:"8000" type:"integer"` +} + +// String returns the string representation +func (s WavSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WavSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WavSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WavSettings"} + if s.BitDepth != nil && *s.BitDepth < 16 { + invalidParams.Add(request.NewErrParamMinValue("BitDepth", 16)) + } + if s.Channels != nil && *s.Channels < 1 { + invalidParams.Add(request.NewErrParamMinValue("Channels", 1)) + } + if s.SampleRate != nil && *s.SampleRate < 8000 { + invalidParams.Add(request.NewErrParamMinValue("SampleRate", 8000)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBitDepth sets the BitDepth field's value. +func (s *WavSettings) SetBitDepth(v int64) *WavSettings { + s.BitDepth = &v + return s +} + +// SetChannels sets the Channels field's value. +func (s *WavSettings) SetChannels(v int64) *WavSettings { + s.Channels = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *WavSettings) SetFormat(v string) *WavSettings { + s.Format = &v + return s +} + +// SetSampleRate sets the SampleRate field's value. +func (s *WavSettings) SetSampleRate(v int64) *WavSettings { + s.SampleRate = &v + return s +} + +// Choose BROADCASTER_MIXED_AD when the input contains pre-mixed main audio +// + audio description (AD) as a stereo pair. The value for AudioType will be +// set to 3, which signals to downstream systems that this stream contains "broadcaster +// mixed AD". Note that the input received by the encoder must contain pre-mixed // audio; the encoder does not perform the mixing. When you choose BROADCASTER_MIXED_AD, // the encoder ignores any values you provide in AudioType and FollowInputAudioType. // Choose NORMAL when the input does not contain pre-mixed audio + audio description @@ -18007,6 +20318,14 @@ const ( AacAudioDescriptionBroadcasterMixNormal = "NORMAL" ) +// AacAudioDescriptionBroadcasterMix_Values returns all elements of the AacAudioDescriptionBroadcasterMix enum +func AacAudioDescriptionBroadcasterMix_Values() []string { + return []string{ + AacAudioDescriptionBroadcasterMixBroadcasterMixedAd, + AacAudioDescriptionBroadcasterMixNormal, + } +} + // AAC Profile. const ( // AacCodecProfileLc is a AacCodecProfile enum value @@ -18019,6 +20338,15 @@ const ( AacCodecProfileHev2 = "HEV2" ) +// AacCodecProfile_Values returns all elements of the AacCodecProfile enum +func AacCodecProfile_Values() []string { + return []string{ + AacCodecProfileLc, + AacCodecProfileHev1, + AacCodecProfileHev2, + } +} + // Mono (Audio Description), Mono, Stereo, or 5.1 channel layout. Valid values // depend on rate control mode and profile. "1.0 - Audio Description (Receiver // Mix)" setting receives a stereo description plus control track and emits @@ -18041,6 +20369,17 @@ const ( AacCodingModeCodingMode51 = "CODING_MODE_5_1" ) +// AacCodingMode_Values returns all elements of the AacCodingMode enum +func AacCodingMode_Values() []string { + return []string{ + AacCodingModeAdReceiverMix, + AacCodingModeCodingMode10, + AacCodingModeCodingMode11, + AacCodingModeCodingMode20, + AacCodingModeCodingMode51, + } +} + // Rate Control Mode. const ( // AacRateControlModeCbr is a AacRateControlMode enum value @@ -18050,6 +20389,14 @@ const ( AacRateControlModeVbr = "VBR" ) +// AacRateControlMode_Values returns all elements of the AacRateControlMode enum +func AacRateControlMode_Values() []string { + return []string{ + AacRateControlModeCbr, + AacRateControlModeVbr, + } +} + // Enables LATM/LOAS AAC output. Note that if you use LATM/LOAS AAC in an output, // you must choose "No container" for the output container. const ( @@ -18060,6 +20407,14 @@ const ( AacRawFormatNone = "NONE" ) +// AacRawFormat_Values returns all elements of the AacRawFormat enum +func AacRawFormat_Values() []string { + return []string{ + AacRawFormatLatmLoas, + AacRawFormatNone, + } +} + // Use MPEG-2 AAC instead of MPEG-4 AAC audio for raw or MPEG-2 Transport Stream // containers. const ( @@ -18070,6 +20425,14 @@ const ( AacSpecificationMpeg4 = "MPEG4" ) +// AacSpecification_Values returns all elements of the AacSpecification enum +func AacSpecification_Values() []string { + return []string{ + AacSpecificationMpeg2, + AacSpecificationMpeg4, + } +} + // VBR Quality Level - Only used if rate_control_mode is VBR. const ( // AacVbrQualityLow is a AacVbrQuality enum value @@ -18085,6 +20448,16 @@ const ( AacVbrQualityHigh = "HIGH" ) +// AacVbrQuality_Values returns all elements of the AacVbrQuality enum +func AacVbrQuality_Values() []string { + return []string{ + AacVbrQualityLow, + AacVbrQualityMediumLow, + AacVbrQualityMediumHigh, + AacVbrQualityHigh, + } +} + // Specify the bitstream mode for the AC-3 stream that the encoder emits. For // more information about the AC3 bitstream mode, see ATSC A/52-2012 (Annex // E). @@ -18114,6 +20487,20 @@ const ( Ac3BitstreamModeVoiceOver = "VOICE_OVER" ) +// Ac3BitstreamMode_Values returns all elements of the Ac3BitstreamMode enum +func Ac3BitstreamMode_Values() []string { + return []string{ + Ac3BitstreamModeCompleteMain, + Ac3BitstreamModeCommentary, + Ac3BitstreamModeDialogue, + Ac3BitstreamModeEmergency, + Ac3BitstreamModeHearingImpaired, + Ac3BitstreamModeMusicAndEffects, + Ac3BitstreamModeVisuallyImpaired, + Ac3BitstreamModeVoiceOver, + } +} + // Dolby Digital coding mode. Determines number of channels. const ( // Ac3CodingModeCodingMode10 is a Ac3CodingMode enum value @@ -18129,6 +20516,16 @@ const ( Ac3CodingModeCodingMode32Lfe = "CODING_MODE_3_2_LFE" ) +// Ac3CodingMode_Values returns all elements of the Ac3CodingMode enum +func Ac3CodingMode_Values() []string { + return []string{ + Ac3CodingModeCodingMode10, + Ac3CodingModeCodingMode11, + Ac3CodingModeCodingMode20, + Ac3CodingModeCodingMode32Lfe, + } +} + // If set to FILM_STANDARD, adds dynamic range compression signaling to the // output bitstream as defined in the Dolby Digital specification. const ( @@ -18139,6 +20536,14 @@ const ( Ac3DynamicRangeCompressionProfileNone = "NONE" ) +// Ac3DynamicRangeCompressionProfile_Values returns all elements of the Ac3DynamicRangeCompressionProfile enum +func Ac3DynamicRangeCompressionProfile_Values() []string { + return []string{ + Ac3DynamicRangeCompressionProfileFilmStandard, + Ac3DynamicRangeCompressionProfileNone, + } +} + // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. const ( @@ -18149,6 +20554,14 @@ const ( Ac3LfeFilterDisabled = "DISABLED" ) +// Ac3LfeFilter_Values returns all elements of the Ac3LfeFilter enum +func Ac3LfeFilter_Values() []string { + return []string{ + Ac3LfeFilterEnabled, + Ac3LfeFilterDisabled, + } +} + // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. @@ -18160,6 +20573,14 @@ const ( Ac3MetadataControlUseConfigured = "USE_CONFIGURED" ) +// Ac3MetadataControl_Values returns all elements of the Ac3MetadataControl enum +func Ac3MetadataControl_Values() []string { + return []string{ + Ac3MetadataControlFollowInput, + Ac3MetadataControlUseConfigured, + } +} + // Specify whether the service runs your job with accelerated transcoding. Choose // DISABLED if you don't want accelerated transcoding. Choose ENABLED if you // want your job to run with accelerated transcoding and to fail if your input @@ -18178,6 +20599,15 @@ const ( AccelerationModePreferred = "PREFERRED" ) +// AccelerationMode_Values returns all elements of the AccelerationMode enum +func AccelerationMode_Values() []string { + return []string{ + AccelerationModeDisabled, + AccelerationModeEnabled, + AccelerationModePreferred, + } +} + // Describes whether the current job is running with accelerated transcoding. // For jobs that have Acceleration (AccelerationMode) set to DISABLED, AccelerationStatus // is always NOT_APPLICABLE. For jobs that have Acceleration (AccelerationMode) @@ -18203,6 +20633,16 @@ const ( AccelerationStatusNotAccelerated = "NOT_ACCELERATED" ) +// AccelerationStatus_Values returns all elements of the AccelerationStatus enum +func AccelerationStatus_Values() []string { + return []string{ + AccelerationStatusNotApplicable, + AccelerationStatusInProgress, + AccelerationStatusAccelerated, + AccelerationStatusNotAccelerated, + } +} + // This setting only applies to H.264, H.265, and MPEG2 outputs. Use Insert // AFD signaling (AfdSignaling) to specify whether the service includes AFD // values in the output video data and what those values are. * Choose None @@ -18220,6 +20660,15 @@ const ( AfdSignalingFixed = "FIXED" ) +// AfdSignaling_Values returns all elements of the AfdSignaling enum +func AfdSignaling_Values() []string { + return []string{ + AfdSignalingNone, + AfdSignalingAuto, + AfdSignalingFixed, + } +} + // Ignore this setting unless this input is a QuickTime animation with an alpha // channel. Use this setting to create separate Key and Fill outputs. In each // output, specify which part of the input MediaConvert uses. Leave this setting @@ -18234,6 +20683,14 @@ const ( AlphaBehaviorRemapToLuma = "REMAP_TO_LUMA" ) +// AlphaBehavior_Values returns all elements of the AlphaBehavior enum +func AlphaBehavior_Values() []string { + return []string{ + AlphaBehaviorDiscard, + AlphaBehaviorRemapToLuma, + } +} + // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 @@ -18247,6 +20704,14 @@ const ( AncillaryConvert608To708Disabled = "DISABLED" ) +// AncillaryConvert608To708_Values returns all elements of the AncillaryConvert608To708 enum +func AncillaryConvert608To708_Values() []string { + return []string{ + AncillaryConvert608To708Upconvert, + AncillaryConvert608To708Disabled, + } +} + // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. @@ -18258,6 +20723,14 @@ const ( AncillaryTerminateCaptionsDisabled = "DISABLED" ) +// AncillaryTerminateCaptions_Values returns all elements of the AncillaryTerminateCaptions enum +func AncillaryTerminateCaptions_Values() []string { + return []string{ + AncillaryTerminateCaptionsEndOfInput, + AncillaryTerminateCaptionsDisabled, + } +} + // The anti-alias filter is automatically applied to all outputs. The service // no longer accepts the value DISABLED for AntiAlias. If you specify that in // your job, the service will ignore the setting. @@ -18269,6 +20742,85 @@ const ( AntiAliasEnabled = "ENABLED" ) +// AntiAlias_Values returns all elements of the AntiAlias enum +func AntiAlias_Values() []string { + return []string{ + AntiAliasDisabled, + AntiAliasEnabled, + } +} + +// You can add a tag for this mono-channel audio track to mimic its placement +// in a multi-channel layout. For example, if this track is the left surround +// channel, choose Left surround (LS). +const ( + // AudioChannelTagL is a AudioChannelTag enum value + AudioChannelTagL = "L" + + // AudioChannelTagR is a AudioChannelTag enum value + AudioChannelTagR = "R" + + // AudioChannelTagC is a AudioChannelTag enum value + AudioChannelTagC = "C" + + // AudioChannelTagLfe is a AudioChannelTag enum value + AudioChannelTagLfe = "LFE" + + // AudioChannelTagLs is a AudioChannelTag enum value + AudioChannelTagLs = "LS" + + // AudioChannelTagRs is a AudioChannelTag enum value + AudioChannelTagRs = "RS" + + // AudioChannelTagLc is a AudioChannelTag enum value + AudioChannelTagLc = "LC" + + // AudioChannelTagRc is a AudioChannelTag enum value + AudioChannelTagRc = "RC" + + // AudioChannelTagCs is a AudioChannelTag enum value + AudioChannelTagCs = "CS" + + // AudioChannelTagLsd is a AudioChannelTag enum value + AudioChannelTagLsd = "LSD" + + // AudioChannelTagRsd is a AudioChannelTag enum value + AudioChannelTagRsd = "RSD" + + // AudioChannelTagTcs is a AudioChannelTag enum value + AudioChannelTagTcs = "TCS" + + // AudioChannelTagVhl is a AudioChannelTag enum value + AudioChannelTagVhl = "VHL" + + // AudioChannelTagVhc is a AudioChannelTag enum value + AudioChannelTagVhc = "VHC" + + // AudioChannelTagVhr is a AudioChannelTag enum value + AudioChannelTagVhr = "VHR" +) + +// AudioChannelTag_Values returns all elements of the AudioChannelTag enum +func AudioChannelTag_Values() []string { + return []string{ + AudioChannelTagL, + AudioChannelTagR, + AudioChannelTagC, + AudioChannelTagLfe, + AudioChannelTagLs, + AudioChannelTagRs, + AudioChannelTagLc, + AudioChannelTagRc, + AudioChannelTagCs, + AudioChannelTagLsd, + AudioChannelTagRsd, + AudioChannelTagTcs, + AudioChannelTagVhl, + AudioChannelTagVhc, + AudioChannelTagVhr, + } +} + // Type of Audio codec. const ( // AudioCodecAac is a AudioCodec enum value @@ -18295,10 +20847,33 @@ const ( // AudioCodecEac3Atmos is a AudioCodec enum value AudioCodecEac3Atmos = "EAC3_ATMOS" + // AudioCodecVorbis is a AudioCodec enum value + AudioCodecVorbis = "VORBIS" + + // AudioCodecOpus is a AudioCodec enum value + AudioCodecOpus = "OPUS" + // AudioCodecPassthrough is a AudioCodec enum value AudioCodecPassthrough = "PASSTHROUGH" ) +// AudioCodec_Values returns all elements of the AudioCodec enum +func AudioCodec_Values() []string { + return []string{ + AudioCodecAac, + AudioCodecMp2, + AudioCodecMp3, + AudioCodecWav, + AudioCodecAiff, + AudioCodecAc3, + AudioCodecEac3, + AudioCodecEac3Atmos, + AudioCodecVorbis, + AudioCodecOpus, + AudioCodecPassthrough, + } +} + // Enable this setting on one audio selector to set it as the default for the // job. The service uses this default for outputs where it can't find the specified // input audio. If you don't set a default, those outputs have no audio. @@ -18310,6 +20885,14 @@ const ( AudioDefaultSelectionNotDefault = "NOT_DEFAULT" ) +// AudioDefaultSelection_Values returns all elements of the AudioDefaultSelection enum +func AudioDefaultSelection_Values() []string { + return []string{ + AudioDefaultSelectionDefault, + AudioDefaultSelectionNotDefault, + } +} + // Specify which source for language code takes precedence for this audio track. // When you choose Follow input (FOLLOW_INPUT), the service uses the language // code from the input track if it's present. If there's no languge code on @@ -18324,6 +20907,14 @@ const ( AudioLanguageCodeControlUseConfigured = "USE_CONFIGURED" ) +// AudioLanguageCodeControl_Values returns all elements of the AudioLanguageCodeControl enum +func AudioLanguageCodeControl_Values() []string { + return []string{ + AudioLanguageCodeControlFollowInput, + AudioLanguageCodeControlUseConfigured, + } +} + // Choose one of the following audio normalization algorithms: ITU-R BS.1770-1: // Ungated loudness. A measurement of ungated average loudness for an entire // piece of content, suitable for measurement of short-form content under ATSC @@ -18344,69 +20935,420 @@ const ( // AudioNormalizationAlgorithmItuBs17703 is a AudioNormalizationAlgorithm enum value AudioNormalizationAlgorithmItuBs17703 = "ITU_BS_1770_3" - // AudioNormalizationAlgorithmItuBs17704 is a AudioNormalizationAlgorithm enum value - AudioNormalizationAlgorithmItuBs17704 = "ITU_BS_1770_4" + // AudioNormalizationAlgorithmItuBs17704 is a AudioNormalizationAlgorithm enum value + AudioNormalizationAlgorithmItuBs17704 = "ITU_BS_1770_4" +) + +// AudioNormalizationAlgorithm_Values returns all elements of the AudioNormalizationAlgorithm enum +func AudioNormalizationAlgorithm_Values() []string { + return []string{ + AudioNormalizationAlgorithmItuBs17701, + AudioNormalizationAlgorithmItuBs17702, + AudioNormalizationAlgorithmItuBs17703, + AudioNormalizationAlgorithmItuBs17704, + } +} + +// When enabled the output audio is corrected using the chosen algorithm. If +// disabled, the audio will be measured but not adjusted. +const ( + // AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value + AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO" + + // AudioNormalizationAlgorithmControlMeasureOnly is a AudioNormalizationAlgorithmControl enum value + AudioNormalizationAlgorithmControlMeasureOnly = "MEASURE_ONLY" +) + +// AudioNormalizationAlgorithmControl_Values returns all elements of the AudioNormalizationAlgorithmControl enum +func AudioNormalizationAlgorithmControl_Values() []string { + return []string{ + AudioNormalizationAlgorithmControlCorrectAudio, + AudioNormalizationAlgorithmControlMeasureOnly, + } +} + +// If set to LOG, log each output's audio track loudness to a CSV file. +const ( + // AudioNormalizationLoudnessLoggingLog is a AudioNormalizationLoudnessLogging enum value + AudioNormalizationLoudnessLoggingLog = "LOG" + + // AudioNormalizationLoudnessLoggingDontLog is a AudioNormalizationLoudnessLogging enum value + AudioNormalizationLoudnessLoggingDontLog = "DONT_LOG" +) + +// AudioNormalizationLoudnessLogging_Values returns all elements of the AudioNormalizationLoudnessLogging enum +func AudioNormalizationLoudnessLogging_Values() []string { + return []string{ + AudioNormalizationLoudnessLoggingLog, + AudioNormalizationLoudnessLoggingDontLog, + } +} + +// If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio +// track loudness. +const ( + // AudioNormalizationPeakCalculationTruePeak is a AudioNormalizationPeakCalculation enum value + AudioNormalizationPeakCalculationTruePeak = "TRUE_PEAK" + + // AudioNormalizationPeakCalculationNone is a AudioNormalizationPeakCalculation enum value + AudioNormalizationPeakCalculationNone = "NONE" +) + +// AudioNormalizationPeakCalculation_Values returns all elements of the AudioNormalizationPeakCalculation enum +func AudioNormalizationPeakCalculation_Values() []string { + return []string{ + AudioNormalizationPeakCalculationTruePeak, + AudioNormalizationPeakCalculationNone, + } +} + +// Specifies the type of the audio selector. +const ( + // AudioSelectorTypePid is a AudioSelectorType enum value + AudioSelectorTypePid = "PID" + + // AudioSelectorTypeTrack is a AudioSelectorType enum value + AudioSelectorTypeTrack = "TRACK" + + // AudioSelectorTypeLanguageCode is a AudioSelectorType enum value + AudioSelectorTypeLanguageCode = "LANGUAGE_CODE" +) + +// AudioSelectorType_Values returns all elements of the AudioSelectorType enum +func AudioSelectorType_Values() []string { + return []string{ + AudioSelectorTypePid, + AudioSelectorTypeTrack, + AudioSelectorTypeLanguageCode, + } +} + +// When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then +// that value is passed through to the output. If the input contains no ISO +// 639 audio_type, the value in Audio Type is included in the output. Otherwise +// the value in Audio Type is included in the output. Note that this field and +// audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. +const ( + // AudioTypeControlFollowInput is a AudioTypeControl enum value + AudioTypeControlFollowInput = "FOLLOW_INPUT" + + // AudioTypeControlUseConfigured is a AudioTypeControl enum value + AudioTypeControlUseConfigured = "USE_CONFIGURED" +) + +// AudioTypeControl_Values returns all elements of the AudioTypeControl enum +func AudioTypeControl_Values() []string { + return []string{ + AudioTypeControlFollowInput, + AudioTypeControlUseConfigured, + } +} + +// Specify the strength of any adaptive quantization filters that you enable. +// The value that you choose here applies to Spatial adaptive quantization (spatialAdaptiveQuantization). +const ( + // Av1AdaptiveQuantizationOff is a Av1AdaptiveQuantization enum value + Av1AdaptiveQuantizationOff = "OFF" + + // Av1AdaptiveQuantizationLow is a Av1AdaptiveQuantization enum value + Av1AdaptiveQuantizationLow = "LOW" + + // Av1AdaptiveQuantizationMedium is a Av1AdaptiveQuantization enum value + Av1AdaptiveQuantizationMedium = "MEDIUM" + + // Av1AdaptiveQuantizationHigh is a Av1AdaptiveQuantization enum value + Av1AdaptiveQuantizationHigh = "HIGH" + + // Av1AdaptiveQuantizationHigher is a Av1AdaptiveQuantization enum value + Av1AdaptiveQuantizationHigher = "HIGHER" + + // Av1AdaptiveQuantizationMax is a Av1AdaptiveQuantization enum value + Av1AdaptiveQuantizationMax = "MAX" +) + +// Av1AdaptiveQuantization_Values returns all elements of the Av1AdaptiveQuantization enum +func Av1AdaptiveQuantization_Values() []string { + return []string{ + Av1AdaptiveQuantizationOff, + Av1AdaptiveQuantizationLow, + Av1AdaptiveQuantizationMedium, + Av1AdaptiveQuantizationHigh, + Av1AdaptiveQuantizationHigher, + Av1AdaptiveQuantizationMax, + } +} + +// If you are using the console, use the Framerate setting to specify the frame +// rate for this output. If you want to keep the same frame rate as the input +// video, choose Follow source. If you want to do frame rate conversion, choose +// a frame rate from the dropdown list or choose Custom. The framerates shown +// in the dropdown list are decimal approximations of fractions. If you choose +// Custom, specify your frame rate as a fraction. If you are creating your transcoding +// job specification as a JSON file without the console, use FramerateControl +// to specify which value the service uses for the frame rate for this output. +// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate +// from the input. Choose SPECIFIED if you want the service to use the frame +// rate you specify in the settings FramerateNumerator and FramerateDenominator. +const ( + // Av1FramerateControlInitializeFromSource is a Av1FramerateControl enum value + Av1FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" + + // Av1FramerateControlSpecified is a Av1FramerateControl enum value + Av1FramerateControlSpecified = "SPECIFIED" +) + +// Av1FramerateControl_Values returns all elements of the Av1FramerateControl enum +func Av1FramerateControl_Values() []string { + return []string{ + Av1FramerateControlInitializeFromSource, + Av1FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. +const ( + // Av1FramerateConversionAlgorithmDuplicateDrop is a Av1FramerateConversionAlgorithm enum value + Av1FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" + + // Av1FramerateConversionAlgorithmInterpolate is a Av1FramerateConversionAlgorithm enum value + Av1FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // Av1FramerateConversionAlgorithmFrameformer is a Av1FramerateConversionAlgorithm enum value + Av1FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" +) + +// Av1FramerateConversionAlgorithm_Values returns all elements of the Av1FramerateConversionAlgorithm enum +func Av1FramerateConversionAlgorithm_Values() []string { + return []string{ + Av1FramerateConversionAlgorithmDuplicateDrop, + Av1FramerateConversionAlgorithmInterpolate, + Av1FramerateConversionAlgorithmFrameformer, + } +} + +// 'With AV1 outputs, for rate control mode, MediaConvert supports only quality-defined +// variable bitrate (QVBR). You can''t use CBR or VBR.' +const ( + // Av1RateControlModeQvbr is a Av1RateControlMode enum value + Av1RateControlModeQvbr = "QVBR" +) + +// Av1RateControlMode_Values returns all elements of the Av1RateControlMode enum +func Av1RateControlMode_Values() []string { + return []string{ + Av1RateControlModeQvbr, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on spatial variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas that can sustain more +// distortion with no noticeable visual degradation and uses more bits on areas +// where any small distortion will be noticeable. For example, complex textured +// blocks are encoded with fewer bits and smooth textured blocks are encoded +// with more bits. Enabling this feature will almost always improve your video +// quality. Note, though, that this feature doesn't take into account where +// the viewer's attention is likely to be. If viewers are likely to be focusing +// their attention on a part of the screen with a lot of complex texture, you +// might choose to disable this feature. Related setting: When you enable spatial +// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) +// depending on your content. For homogeneous content, such as cartoons and +// video games, set it to Low. For content with a wider variety of textures, +// set it to High or Higher. +const ( + // Av1SpatialAdaptiveQuantizationDisabled is a Av1SpatialAdaptiveQuantization enum value + Av1SpatialAdaptiveQuantizationDisabled = "DISABLED" + + // Av1SpatialAdaptiveQuantizationEnabled is a Av1SpatialAdaptiveQuantization enum value + Av1SpatialAdaptiveQuantizationEnabled = "ENABLED" +) + +// Av1SpatialAdaptiveQuantization_Values returns all elements of the Av1SpatialAdaptiveQuantization enum +func Av1SpatialAdaptiveQuantization_Values() []string { + return []string{ + Av1SpatialAdaptiveQuantizationDisabled, + Av1SpatialAdaptiveQuantizationEnabled, + } +} + +// Specify the AVC-Intra class of your output. The AVC-Intra class selection +// determines the output video bit rate depending on the frame rate of the output. +// Outputs with higher class values have higher bitrates and improved image +// quality. +const ( + // AvcIntraClassClass50 is a AvcIntraClass enum value + AvcIntraClassClass50 = "CLASS_50" + + // AvcIntraClassClass100 is a AvcIntraClass enum value + AvcIntraClassClass100 = "CLASS_100" + + // AvcIntraClassClass200 is a AvcIntraClass enum value + AvcIntraClassClass200 = "CLASS_200" ) -// When enabled the output audio is corrected using the chosen algorithm. If -// disabled, the audio will be measured but not adjusted. +// AvcIntraClass_Values returns all elements of the AvcIntraClass enum +func AvcIntraClass_Values() []string { + return []string{ + AvcIntraClassClass50, + AvcIntraClassClass100, + AvcIntraClassClass200, + } +} + +// If you are using the console, use the Framerate setting to specify the frame +// rate for this output. If you want to keep the same frame rate as the input +// video, choose Follow source. If you want to do frame rate conversion, choose +// a frame rate from the dropdown list or choose Custom. The framerates shown +// in the dropdown list are decimal approximations of fractions. If you choose +// Custom, specify your frame rate as a fraction. If you are creating your transcoding +// job specification as a JSON file without the console, use FramerateControl +// to specify which value the service uses for the frame rate for this output. +// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate +// from the input. Choose SPECIFIED if you want the service to use the frame +// rate you specify in the settings FramerateNumerator and FramerateDenominator. const ( - // AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value - AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO" + // AvcIntraFramerateControlInitializeFromSource is a AvcIntraFramerateControl enum value + AvcIntraFramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" - // AudioNormalizationAlgorithmControlMeasureOnly is a AudioNormalizationAlgorithmControl enum value - AudioNormalizationAlgorithmControlMeasureOnly = "MEASURE_ONLY" + // AvcIntraFramerateControlSpecified is a AvcIntraFramerateControl enum value + AvcIntraFramerateControlSpecified = "SPECIFIED" ) -// If set to LOG, log each output's audio track loudness to a CSV file. +// AvcIntraFramerateControl_Values returns all elements of the AvcIntraFramerateControl enum +func AvcIntraFramerateControl_Values() []string { + return []string{ + AvcIntraFramerateControlInitializeFromSource, + AvcIntraFramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. const ( - // AudioNormalizationLoudnessLoggingLog is a AudioNormalizationLoudnessLogging enum value - AudioNormalizationLoudnessLoggingLog = "LOG" + // AvcIntraFramerateConversionAlgorithmDuplicateDrop is a AvcIntraFramerateConversionAlgorithm enum value + AvcIntraFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" - // AudioNormalizationLoudnessLoggingDontLog is a AudioNormalizationLoudnessLogging enum value - AudioNormalizationLoudnessLoggingDontLog = "DONT_LOG" + // AvcIntraFramerateConversionAlgorithmInterpolate is a AvcIntraFramerateConversionAlgorithm enum value + AvcIntraFramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // AvcIntraFramerateConversionAlgorithmFrameformer is a AvcIntraFramerateConversionAlgorithm enum value + AvcIntraFramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) -// If set to TRUE_PEAK, calculate and log the TruePeak for each output's audio -// track loudness. +// AvcIntraFramerateConversionAlgorithm_Values returns all elements of the AvcIntraFramerateConversionAlgorithm enum +func AvcIntraFramerateConversionAlgorithm_Values() []string { + return []string{ + AvcIntraFramerateConversionAlgorithmDuplicateDrop, + AvcIntraFramerateConversionAlgorithmInterpolate, + AvcIntraFramerateConversionAlgorithmFrameformer, + } +} + +// Choose the scan line type for the output. Keep the default value, Progressive +// (PROGRESSIVE) to create a progressive output, regardless of the scan type +// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) +// to create an output that's interlaced with the same field polarity throughout. +// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) +// to produce outputs with the same field polarity as the source. For jobs that +// have multiple inputs, the output field polarity might change over the course +// of the output. Follow behavior depends on the input scan type. If the source +// is interlaced, the output will be interlaced with the same polarity as the +// source. If the source is progressive, the output will be interlaced with +// top field bottom field first, depending on which of the Follow options you +// choose. const ( - // AudioNormalizationPeakCalculationTruePeak is a AudioNormalizationPeakCalculation enum value - AudioNormalizationPeakCalculationTruePeak = "TRUE_PEAK" + // AvcIntraInterlaceModeProgressive is a AvcIntraInterlaceMode enum value + AvcIntraInterlaceModeProgressive = "PROGRESSIVE" - // AudioNormalizationPeakCalculationNone is a AudioNormalizationPeakCalculation enum value - AudioNormalizationPeakCalculationNone = "NONE" + // AvcIntraInterlaceModeTopField is a AvcIntraInterlaceMode enum value + AvcIntraInterlaceModeTopField = "TOP_FIELD" + + // AvcIntraInterlaceModeBottomField is a AvcIntraInterlaceMode enum value + AvcIntraInterlaceModeBottomField = "BOTTOM_FIELD" + + // AvcIntraInterlaceModeFollowTopField is a AvcIntraInterlaceMode enum value + AvcIntraInterlaceModeFollowTopField = "FOLLOW_TOP_FIELD" + + // AvcIntraInterlaceModeFollowBottomField is a AvcIntraInterlaceMode enum value + AvcIntraInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) -// Specifies the type of the audio selector. -const ( - // AudioSelectorTypePid is a AudioSelectorType enum value - AudioSelectorTypePid = "PID" +// AvcIntraInterlaceMode_Values returns all elements of the AvcIntraInterlaceMode enum +func AvcIntraInterlaceMode_Values() []string { + return []string{ + AvcIntraInterlaceModeProgressive, + AvcIntraInterlaceModeTopField, + AvcIntraInterlaceModeBottomField, + AvcIntraInterlaceModeFollowTopField, + AvcIntraInterlaceModeFollowBottomField, + } +} - // AudioSelectorTypeTrack is a AudioSelectorType enum value - AudioSelectorTypeTrack = "TRACK" +// Ignore this setting unless your input frame rate is 23.976 or 24 frames per +// second (fps). Enable slow PAL to create a 25 fps output. When you enable +// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples +// your audio to keep it synchronized with the video. Note that enabling this +// setting will slightly reduce the duration of your video. Required settings: +// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) +// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to +// 1. +const ( + // AvcIntraSlowPalDisabled is a AvcIntraSlowPal enum value + AvcIntraSlowPalDisabled = "DISABLED" - // AudioSelectorTypeLanguageCode is a AudioSelectorType enum value - AudioSelectorTypeLanguageCode = "LANGUAGE_CODE" + // AvcIntraSlowPalEnabled is a AvcIntraSlowPal enum value + AvcIntraSlowPalEnabled = "ENABLED" ) -// When set to FOLLOW_INPUT, if the input contains an ISO 639 audio_type, then -// that value is passed through to the output. If the input contains no ISO -// 639 audio_type, the value in Audio Type is included in the output. Otherwise -// the value in Audio Type is included in the output. Note that this field and -// audioType are both ignored if audioDescriptionBroadcasterMix is set to BROADCASTER_MIXED_AD. +// AvcIntraSlowPal_Values returns all elements of the AvcIntraSlowPal enum +func AvcIntraSlowPal_Values() []string { + return []string{ + AvcIntraSlowPalDisabled, + AvcIntraSlowPalEnabled, + } +} + +// When you do frame rate conversion from 23.976 frames per second (fps) to +// 29.97 fps, and your output scan type is interlaced, you can optionally enable +// hard telecine (HARD) to create a smoother picture. When you keep the default +// value, None (NONE), MediaConvert does a standard frame rate conversion to +// 29.97 without doing anything with the field polarity to create a smoother +// picture. const ( - // AudioTypeControlFollowInput is a AudioTypeControl enum value - AudioTypeControlFollowInput = "FOLLOW_INPUT" + // AvcIntraTelecineNone is a AvcIntraTelecine enum value + AvcIntraTelecineNone = "NONE" - // AudioTypeControlUseConfigured is a AudioTypeControl enum value - AudioTypeControlUseConfigured = "USE_CONFIGURED" + // AvcIntraTelecineHard is a AvcIntraTelecine enum value + AvcIntraTelecineHard = "HARD" ) -// Optional. Choose a tag type that AWS Billing and Cost Management will use -// to sort your AWS Elemental MediaConvert costs on any billing report that -// you set up. Any transcoding outputs that don't have an associated tag will -// appear in your billing report unsorted. If you don't choose a valid value -// for this field, your job outputs will appear on the billing report unsorted. +// AvcIntraTelecine_Values returns all elements of the AvcIntraTelecine enum +func AvcIntraTelecine_Values() []string { + return []string{ + AvcIntraTelecineNone, + AvcIntraTelecineHard, + } +} + +// The tag type that AWS Billing and Cost Management will use to sort your AWS +// Elemental MediaConvert costs on any billing report that you set up. const ( // BillingTagsSourceQueue is a BillingTagsSource enum value BillingTagsSourceQueue = "QUEUE" @@ -18421,6 +21363,16 @@ const ( BillingTagsSourceJob = "JOB" ) +// BillingTagsSource_Values returns all elements of the BillingTagsSource enum +func BillingTagsSource_Values() []string { + return []string{ + BillingTagsSourceQueue, + BillingTagsSourcePreset, + BillingTagsSourceJobTemplate, + BillingTagsSourceJob, + } +} + // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. @@ -18437,6 +21389,14 @@ const ( BurninSubtitleAlignmentLeft = "LEFT" ) +// BurninSubtitleAlignment_Values returns all elements of the BurninSubtitleAlignment enum +func BurninSubtitleAlignment_Values() []string { + return []string{ + BurninSubtitleAlignmentCentered, + BurninSubtitleAlignmentLeft, + } +} + // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. const ( @@ -18450,6 +21410,15 @@ const ( BurninSubtitleBackgroundColorWhite = "WHITE" ) +// BurninSubtitleBackgroundColor_Values returns all elements of the BurninSubtitleBackgroundColor enum +func BurninSubtitleBackgroundColor_Values() []string { + return []string{ + BurninSubtitleBackgroundColorNone, + BurninSubtitleBackgroundColorBlack, + BurninSubtitleBackgroundColorWhite, + } +} + // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font @@ -18474,6 +21443,18 @@ const ( BurninSubtitleFontColorBlue = "BLUE" ) +// BurninSubtitleFontColor_Values returns all elements of the BurninSubtitleFontColor enum +func BurninSubtitleFontColor_Values() []string { + return []string{ + BurninSubtitleFontColorWhite, + BurninSubtitleFontColorBlack, + BurninSubtitleFontColorYellow, + BurninSubtitleFontColorRed, + BurninSubtitleFontColorGreen, + BurninSubtitleFontColorBlue, + } +} + // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings @@ -18498,6 +21479,18 @@ const ( BurninSubtitleOutlineColorBlue = "BLUE" ) +// BurninSubtitleOutlineColor_Values returns all elements of the BurninSubtitleOutlineColor enum +func BurninSubtitleOutlineColor_Values() []string { + return []string{ + BurninSubtitleOutlineColorBlack, + BurninSubtitleOutlineColorWhite, + BurninSubtitleOutlineColorYellow, + BurninSubtitleOutlineColorRed, + BurninSubtitleOutlineColorGreen, + BurninSubtitleOutlineColorBlue, + } +} + // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. const ( @@ -18511,6 +21504,15 @@ const ( BurninSubtitleShadowColorWhite = "WHITE" ) +// BurninSubtitleShadowColor_Values returns all elements of the BurninSubtitleShadowColor enum +func BurninSubtitleShadowColor_Values() []string { + return []string{ + BurninSubtitleShadowColorNone, + BurninSubtitleShadowColorBlack, + BurninSubtitleShadowColorWhite, + } +} + // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to @@ -18524,6 +21526,14 @@ const ( BurninSubtitleTeletextSpacingProportional = "PROPORTIONAL" ) +// BurninSubtitleTeletextSpacing_Values returns all elements of the BurninSubtitleTeletextSpacing enum +func BurninSubtitleTeletextSpacing_Values() []string { + return []string{ + BurninSubtitleTeletextSpacingFixedGrid, + BurninSubtitleTeletextSpacingProportional, + } +} + // Specify the format for this set of captions on this output. The default format // is embedded without SCTE-20. Other options are embedded with SCTE-20, burn-in, // DVB-sub, IMSC, SCC, SRT, teletext, TTML, and web-VTT. If you are using SCTE-20, @@ -18568,6 +21578,24 @@ const ( CaptionDestinationTypeWebvtt = "WEBVTT" ) +// CaptionDestinationType_Values returns all elements of the CaptionDestinationType enum +func CaptionDestinationType_Values() []string { + return []string{ + CaptionDestinationTypeBurnIn, + CaptionDestinationTypeDvbSub, + CaptionDestinationTypeEmbedded, + CaptionDestinationTypeEmbeddedPlusScte20, + CaptionDestinationTypeImsc, + CaptionDestinationTypeScte20PlusEmbedded, + CaptionDestinationTypeScc, + CaptionDestinationTypeSrt, + CaptionDestinationTypeSmi, + CaptionDestinationTypeTeletext, + CaptionDestinationTypeTtml, + CaptionDestinationTypeWebvtt, + } +} + // Use Source (SourceType) to identify the format of your input captions. The // service cannot auto-detect caption format. const ( @@ -18608,6 +21636,24 @@ const ( CaptionSourceTypeImsc = "IMSC" ) +// CaptionSourceType_Values returns all elements of the CaptionSourceType enum +func CaptionSourceType_Values() []string { + return []string{ + CaptionSourceTypeAncillary, + CaptionSourceTypeDvbSub, + CaptionSourceTypeEmbedded, + CaptionSourceTypeScte20, + CaptionSourceTypeScc, + CaptionSourceTypeTtml, + CaptionSourceTypeStl, + CaptionSourceTypeSrt, + CaptionSourceTypeSmi, + CaptionSourceTypeTeletext, + CaptionSourceTypeNullSource, + CaptionSourceTypeImsc, + } +} + // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client // from saving media segments for later replay. const ( @@ -18618,6 +21664,14 @@ const ( CmafClientCacheEnabled = "ENABLED" ) +// CmafClientCache_Values returns all elements of the CmafClientCache enum +func CmafClientCache_Values() []string { + return []string{ + CmafClientCacheDisabled, + CmafClientCacheEnabled, + } +} + // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. const ( @@ -18628,6 +21682,14 @@ const ( CmafCodecSpecificationRfc4281 = "RFC_4281" ) +// CmafCodecSpecification_Values returns all elements of the CmafCodecSpecification enum +func CmafCodecSpecification_Values() []string { + return []string{ + CmafCodecSpecificationRfc6381, + CmafCodecSpecificationRfc4281, + } +} + // Specify the encryption scheme that you want the service to use when encrypting // your CMAF segments. Choose AES-CBC subsample (SAMPLE-AES) or AES_CTR (AES-CTR). const ( @@ -18638,6 +21700,14 @@ const ( CmafEncryptionTypeAesCtr = "AES_CTR" ) +// CmafEncryptionType_Values returns all elements of the CmafEncryptionType enum +func CmafEncryptionType_Values() []string { + return []string{ + CmafEncryptionTypeSampleAes, + CmafEncryptionTypeAesCtr, + } +} + // When you use DRM with CMAF outputs, choose whether the service writes the // 128-bit encryption initialization vector in the HLS and DASH manifests. const ( @@ -18648,6 +21718,14 @@ const ( CmafInitializationVectorInManifestExclude = "EXCLUDE" ) +// CmafInitializationVectorInManifest_Values returns all elements of the CmafInitializationVectorInManifest enum +func CmafInitializationVectorInManifest_Values() []string { + return []string{ + CmafInitializationVectorInManifestInclude, + CmafInitializationVectorInManifestExclude, + } +} + // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. const ( @@ -18658,6 +21736,14 @@ const ( CmafKeyProviderTypeStaticKey = "STATIC_KEY" ) +// CmafKeyProviderType_Values returns all elements of the CmafKeyProviderType enum +func CmafKeyProviderType_Values() []string { + return []string{ + CmafKeyProviderTypeSpeke, + CmafKeyProviderTypeStaticKey, + } +} + // When set to GZIP, compresses HLS playlist. const ( // CmafManifestCompressionGzip is a CmafManifestCompression enum value @@ -18667,6 +21753,14 @@ const ( CmafManifestCompressionNone = "NONE" ) +// CmafManifestCompression_Values returns all elements of the CmafManifestCompression enum +func CmafManifestCompression_Values() []string { + return []string{ + CmafManifestCompressionGzip, + CmafManifestCompressionNone, + } +} + // Indicates whether the output manifest should use floating point values for // segment duration. const ( @@ -18677,6 +21771,14 @@ const ( CmafManifestDurationFormatInteger = "INTEGER" ) +// CmafManifestDurationFormat_Values returns all elements of the CmafManifestDurationFormat enum +func CmafManifestDurationFormat_Values() []string { + return []string{ + CmafManifestDurationFormatFloatingPoint, + CmafManifestDurationFormatInteger, + } +} + // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), @@ -18691,6 +21793,14 @@ const ( CmafMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" ) +// CmafMpdProfile_Values returns all elements of the CmafMpdProfile enum +func CmafMpdProfile_Values() []string { + return []string{ + CmafMpdProfileMainProfile, + CmafMpdProfileOnDemandProfile, + } +} + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -18702,6 +21812,14 @@ const ( CmafSegmentControlSegmentedFiles = "SEGMENTED_FILES" ) +// CmafSegmentControl_Values returns all elements of the CmafSegmentControl enum +func CmafSegmentControl_Values() []string { + return []string{ + CmafSegmentControlSingleFile, + CmafSegmentControlSegmentedFiles, + } +} + // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. const ( @@ -18712,6 +21830,14 @@ const ( CmafStreamInfResolutionExclude = "EXCLUDE" ) +// CmafStreamInfResolution_Values returns all elements of the CmafStreamInfResolution enum +func CmafStreamInfResolution_Values() []string { + return []string{ + CmafStreamInfResolutionInclude, + CmafStreamInfResolutionExclude, + } +} + // When set to ENABLED, a DASH MPD manifest will be generated for this output. const ( // CmafWriteDASHManifestDisabled is a CmafWriteDASHManifest enum value @@ -18721,6 +21847,14 @@ const ( CmafWriteDASHManifestEnabled = "ENABLED" ) +// CmafWriteDASHManifest_Values returns all elements of the CmafWriteDASHManifest enum +func CmafWriteDASHManifest_Values() []string { + return []string{ + CmafWriteDASHManifestDisabled, + CmafWriteDASHManifestEnabled, + } +} + // When set to ENABLED, an Apple HLS manifest will be generated for this output. const ( // CmafWriteHLSManifestDisabled is a CmafWriteHLSManifest enum value @@ -18730,6 +21864,14 @@ const ( CmafWriteHLSManifestEnabled = "ENABLED" ) +// CmafWriteHLSManifest_Values returns all elements of the CmafWriteHLSManifest enum +func CmafWriteHLSManifest_Values() []string { + return []string{ + CmafWriteHLSManifestDisabled, + CmafWriteHLSManifestEnabled, + } +} + // When you enable Precise segment duration in DASH manifests (writeSegmentTimelineInRepresentation), // your DASH manifest shows precise segment durations. The segment duration // information appears inside the SegmentTimeline element, inside SegmentTemplate @@ -18744,6 +21886,14 @@ const ( CmafWriteSegmentTimelineInRepresentationDisabled = "DISABLED" ) +// CmafWriteSegmentTimelineInRepresentation_Values returns all elements of the CmafWriteSegmentTimelineInRepresentation enum +func CmafWriteSegmentTimelineInRepresentation_Values() []string { + return []string{ + CmafWriteSegmentTimelineInRepresentationEnabled, + CmafWriteSegmentTimelineInRepresentationDisabled, + } +} + // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting @@ -18756,6 +21906,14 @@ const ( CmfcScte35EsamNone = "NONE" ) +// CmfcScte35Esam_Values returns all elements of the CmfcScte35Esam enum +func CmfcScte35Esam_Values() []string { + return []string{ + CmfcScte35EsamInsert, + CmfcScte35EsamNone, + } +} + // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't @@ -18768,6 +21926,14 @@ const ( CmfcScte35SourceNone = "NONE" ) +// CmfcScte35Source_Values returns all elements of the CmfcScte35Source enum +func CmfcScte35Source_Values() []string { + return []string{ + CmfcScte35SourcePassthrough, + CmfcScte35SourceNone, + } +} + // Choose Insert (INSERT) for this setting to include color metadata in this // output. Choose Ignore (IGNORE) to exclude color metadata from this output. // If you don't specify a value, the service sets this to Insert by default. @@ -18779,6 +21945,14 @@ const ( ColorMetadataInsert = "INSERT" ) +// ColorMetadata_Values returns all elements of the ColorMetadata enum +func ColorMetadata_Values() []string { + return []string{ + ColorMetadataIgnore, + ColorMetadataInsert, + } +} + // If your input video has accurate color space metadata, or if you don't know // about color space, leave this set to the default value Follow (FOLLOW). The // service will automatically detect your input color space. If your input video @@ -18805,11 +21979,23 @@ const ( ColorSpaceHlg2020 = "HLG_2020" ) +// ColorSpace_Values returns all elements of the ColorSpace enum +func ColorSpace_Values() []string { + return []string{ + ColorSpaceFollow, + ColorSpaceRec601, + ColorSpaceRec709, + ColorSpaceHdr10, + ColorSpaceHlg2020, + } +} + // Specify the color space you want for this output. The service supports conversion -// between HDR formats, between SDR formats, and from SDR to HDR. The service -// doesn't support conversion from HDR to SDR. SDR to HDR conversion doesn't -// upgrade the dynamic range. The converted video has an HDR format, but visually -// appears the same as an unconverted output. +// between HDR formats, between SDR formats, from SDR to HDR, and from HDR to +// SDR. SDR to HDR conversion doesn't upgrade the dynamic range. The converted +// video has an HDR format, but visually appears the same as an unconverted +// output. HDR to SDR conversion uses Elemental tone mapping technology to approximate +// the outcome of manually regrading from HDR to SDR. const ( // ColorSpaceConversionNone is a ColorSpaceConversion enum value ColorSpaceConversionNone = "NONE" @@ -18827,6 +22013,17 @@ const ( ColorSpaceConversionForceHlg2020 = "FORCE_HLG_2020" ) +// ColorSpaceConversion_Values returns all elements of the ColorSpaceConversion enum +func ColorSpaceConversion_Values() []string { + return []string{ + ColorSpaceConversionNone, + ColorSpaceConversionForce601, + ColorSpaceConversionForce709, + ColorSpaceConversionForceHdr10, + ColorSpaceConversionForceHlg2020, + } +} + // There are two sources for color metadata, the input file and the job input // settings Color space (ColorSpace) and HDR master display information settings(Hdr10Metadata). // The Color space usage setting determines which takes precedence. Choose Force @@ -18843,12 +22040,27 @@ const ( ColorSpaceUsageFallback = "FALLBACK" ) +// ColorSpaceUsage_Values returns all elements of the ColorSpaceUsage enum +func ColorSpaceUsage_Values() []string { + return []string{ + ColorSpaceUsageForce, + ColorSpaceUsageFallback, + } +} + // The length of the term of your reserved queue pricing plan commitment. const ( // CommitmentOneYear is a Commitment enum value CommitmentOneYear = "ONE_YEAR" ) +// Commitment_Values returns all elements of the Commitment enum +func Commitment_Values() []string { + return []string{ + CommitmentOneYear, + } +} + // Container for this output. Some containers require a container settings object. // If not specified, the default object will be created. const ( @@ -18879,10 +22091,30 @@ const ( // ContainerTypeMxf is a ContainerType enum value ContainerTypeMxf = "MXF" + // ContainerTypeWebm is a ContainerType enum value + ContainerTypeWebm = "WEBM" + // ContainerTypeRaw is a ContainerType enum value ContainerTypeRaw = "RAW" ) +// ContainerType_Values returns all elements of the ContainerType enum +func ContainerType_Values() []string { + return []string{ + ContainerTypeF4v, + ContainerTypeIsmv, + ContainerTypeM2ts, + ContainerTypeM3u8, + ContainerTypeCmfc, + ContainerTypeMov, + ContainerTypeMp4, + ContainerTypeMpd, + ContainerTypeMxf, + ContainerTypeWebm, + ContainerTypeRaw, + } +} + // Supports HbbTV specification as indicated const ( // DashIsoHbbtvComplianceHbbtv15 is a DashIsoHbbtvCompliance enum value @@ -18892,6 +22124,14 @@ const ( DashIsoHbbtvComplianceNone = "NONE" ) +// DashIsoHbbtvCompliance_Values returns all elements of the DashIsoHbbtvCompliance enum +func DashIsoHbbtvCompliance_Values() []string { + return []string{ + DashIsoHbbtvComplianceHbbtv15, + DashIsoHbbtvComplianceNone, + } +} + // Specify whether your DASH profile is on-demand or main. When you choose Main // profile (MAIN_PROFILE), the service signals urn:mpeg:dash:profile:isoff-main:2011 // in your .mpd DASH manifest. When you choose On-demand (ON_DEMAND_PROFILE), @@ -18906,6 +22146,14 @@ const ( DashIsoMpdProfileOnDemandProfile = "ON_DEMAND_PROFILE" ) +// DashIsoMpdProfile_Values returns all elements of the DashIsoMpdProfile enum +func DashIsoMpdProfile_Values() []string { + return []string{ + DashIsoMpdProfileMainProfile, + DashIsoMpdProfileOnDemandProfile, + } +} + // This setting can improve the compatibility of your output with video players // on obsolete devices. It applies only to DASH H.264 outputs with DRM encryption. // Choose Unencrypted SEI (UNENCRYPTED_SEI) only to correct problems with playback @@ -18920,6 +22168,14 @@ const ( DashIsoPlaybackDeviceCompatibilityUnencryptedSei = "UNENCRYPTED_SEI" ) +// DashIsoPlaybackDeviceCompatibility_Values returns all elements of the DashIsoPlaybackDeviceCompatibility enum +func DashIsoPlaybackDeviceCompatibility_Values() []string { + return []string{ + DashIsoPlaybackDeviceCompatibilityCencV1, + DashIsoPlaybackDeviceCompatibilityUnencryptedSei, + } +} + // When set to SINGLE_FILE, a single output file is generated, which is internally // segmented using the Fragment Length and Segment Length. When set to SEGMENTED_FILES, // separate segment files will be created. @@ -18931,6 +22187,14 @@ const ( DashIsoSegmentControlSegmentedFiles = "SEGMENTED_FILES" ) +// DashIsoSegmentControl_Values returns all elements of the DashIsoSegmentControl enum +func DashIsoSegmentControl_Values() []string { + return []string{ + DashIsoSegmentControlSingleFile, + DashIsoSegmentControlSegmentedFiles, + } +} + // When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), // your DASH manifest shows precise segment durations. The segment duration // information appears inside the SegmentTimeline element, inside SegmentTemplate @@ -18945,6 +22209,14 @@ const ( DashIsoWriteSegmentTimelineInRepresentationDisabled = "DISABLED" ) +// DashIsoWriteSegmentTimelineInRepresentation_Values returns all elements of the DashIsoWriteSegmentTimelineInRepresentation enum +func DashIsoWriteSegmentTimelineInRepresentation_Values() []string { + return []string{ + DashIsoWriteSegmentTimelineInRepresentationEnabled, + DashIsoWriteSegmentTimelineInRepresentationDisabled, + } +} + // Specify the encryption mode that you used to encrypt your input files. const ( // DecryptionModeAesCtr is a DecryptionMode enum value @@ -18957,6 +22229,15 @@ const ( DecryptionModeAesGcm = "AES_GCM" ) +// DecryptionMode_Values returns all elements of the DecryptionMode enum +func DecryptionMode_Values() []string { + return []string{ + DecryptionModeAesCtr, + DecryptionModeAesCbc, + DecryptionModeAesGcm, + } +} + // Only applies when you set Deinterlacer (DeinterlaceMode) to Deinterlace (DEINTERLACE) // or Adaptive (ADAPTIVE). Motion adaptive interpolate (INTERPOLATE) produces // sharper pictures, while blend (BLEND) produces smoother motion. Use (INTERPOLATE_TICKER) @@ -18976,6 +22257,16 @@ const ( DeinterlaceAlgorithmBlendTicker = "BLEND_TICKER" ) +// DeinterlaceAlgorithm_Values returns all elements of the DeinterlaceAlgorithm enum +func DeinterlaceAlgorithm_Values() []string { + return []string{ + DeinterlaceAlgorithmInterpolate, + DeinterlaceAlgorithmInterpolateTicker, + DeinterlaceAlgorithmBlend, + DeinterlaceAlgorithmBlendTicker, + } +} + // - When set to NORMAL (default), the deinterlacer does not convert frames // that are tagged in metadata as progressive. It will only convert those that // are tagged as some other type. - When set to FORCE_ALL_FRAMES, the deinterlacer @@ -18992,6 +22283,14 @@ const ( DeinterlacerControlNormal = "NORMAL" ) +// DeinterlacerControl_Values returns all elements of the DeinterlacerControl enum +func DeinterlacerControl_Values() []string { + return []string{ + DeinterlacerControlForceAllFrames, + DeinterlacerControlNormal, + } +} + // Use Deinterlacer (DeinterlaceMode) to choose how the service will do deinterlacing. // Default is Deinterlace. - Deinterlace converts interlaced to progressive. // - Inverse telecine converts Hard Telecine 29.97i to progressive 23.976p. @@ -19007,6 +22306,15 @@ const ( DeinterlacerModeAdaptive = "ADAPTIVE" ) +// DeinterlacerMode_Values returns all elements of the DeinterlacerMode enum +func DeinterlacerMode_Values() []string { + return []string{ + DeinterlacerModeDeinterlace, + DeinterlacerModeInverseTelecine, + DeinterlacerModeAdaptive, + } +} + // Optional field, defaults to DEFAULT. Specify DEFAULT for this operation to // return your endpoints if any exist, or to create an endpoint for you and // return it if one doesn't already exist. Specify GET_ONLY to return your endpoints @@ -19019,6 +22327,14 @@ const ( DescribeEndpointsModeGetOnly = "GET_ONLY" ) +// DescribeEndpointsMode_Values returns all elements of the DescribeEndpointsMode enum +func DescribeEndpointsMode_Values() []string { + return []string{ + DescribeEndpointsModeDefault, + DescribeEndpointsModeGetOnly, + } +} + // Use Dolby Vision Mode to choose how the service will handle Dolby Vision // MaxCLL and MaxFALL properies. const ( @@ -19032,6 +22348,15 @@ const ( DolbyVisionLevel6ModeSpecify = "SPECIFY" ) +// DolbyVisionLevel6Mode_Values returns all elements of the DolbyVisionLevel6Mode enum +func DolbyVisionLevel6Mode_Values() []string { + return []string{ + DolbyVisionLevel6ModePassthrough, + DolbyVisionLevel6ModeRecalculate, + DolbyVisionLevel6ModeSpecify, + } +} + // In the current MediaConvert implementation, the Dolby Vision profile is always // 5 (PROFILE_5). Therefore, all of your inputs must contain Dolby Vision frame // interleaved data. @@ -19040,6 +22365,13 @@ const ( DolbyVisionProfileProfile5 = "PROFILE_5" ) +// DolbyVisionProfile_Values returns all elements of the DolbyVisionProfile enum +func DolbyVisionProfile_Values() []string { + return []string{ + DolbyVisionProfileProfile5, + } +} + // Applies only to 29.97 fps outputs. When this feature is enabled, the service // will use drop-frame timecode on outputs. If it is not possible to use drop-frame // timecode, the system will fall back to non-drop-frame. This setting is enabled @@ -19052,6 +22384,14 @@ const ( DropFrameTimecodeEnabled = "ENABLED" ) +// DropFrameTimecode_Values returns all elements of the DropFrameTimecode enum +func DropFrameTimecode_Values() []string { + return []string{ + DropFrameTimecodeDisabled, + DropFrameTimecodeEnabled, + } +} + // If no explicit x_position or y_position is provided, setting alignment to // centered will place the captions at the bottom center of the output. Similarly, // setting a left alignment will align captions to the bottom left of the output. @@ -19068,6 +22408,14 @@ const ( DvbSubtitleAlignmentLeft = "LEFT" ) +// DvbSubtitleAlignment_Values returns all elements of the DvbSubtitleAlignment enum +func DvbSubtitleAlignment_Values() []string { + return []string{ + DvbSubtitleAlignmentCentered, + DvbSubtitleAlignmentLeft, + } +} + // Specifies the color of the rectangle behind the captions.All burn-in and // DVB-Sub font settings must match. const ( @@ -19081,6 +22429,15 @@ const ( DvbSubtitleBackgroundColorWhite = "WHITE" ) +// DvbSubtitleBackgroundColor_Values returns all elements of the DvbSubtitleBackgroundColor enum +func DvbSubtitleBackgroundColor_Values() []string { + return []string{ + DvbSubtitleBackgroundColorNone, + DvbSubtitleBackgroundColorBlack, + DvbSubtitleBackgroundColorWhite, + } +} + // Specifies the color of the burned-in captions. This option is not valid for // source captions that are STL, 608/embedded or teletext. These source settings // are already pre-defined by the caption stream. All burn-in and DVB-Sub font @@ -19105,6 +22462,18 @@ const ( DvbSubtitleFontColorBlue = "BLUE" ) +// DvbSubtitleFontColor_Values returns all elements of the DvbSubtitleFontColor enum +func DvbSubtitleFontColor_Values() []string { + return []string{ + DvbSubtitleFontColorWhite, + DvbSubtitleFontColorBlack, + DvbSubtitleFontColorYellow, + DvbSubtitleFontColorRed, + DvbSubtitleFontColorGreen, + DvbSubtitleFontColorBlue, + } +} + // Specifies font outline color. This option is not valid for source captions // that are either 608/embedded or teletext. These source settings are already // pre-defined by the caption stream. All burn-in and DVB-Sub font settings @@ -19129,6 +22498,18 @@ const ( DvbSubtitleOutlineColorBlue = "BLUE" ) +// DvbSubtitleOutlineColor_Values returns all elements of the DvbSubtitleOutlineColor enum +func DvbSubtitleOutlineColor_Values() []string { + return []string{ + DvbSubtitleOutlineColorBlack, + DvbSubtitleOutlineColorWhite, + DvbSubtitleOutlineColorYellow, + DvbSubtitleOutlineColorRed, + DvbSubtitleOutlineColorGreen, + DvbSubtitleOutlineColorBlue, + } +} + // Specifies the color of the shadow cast by the captions.All burn-in and DVB-Sub // font settings must match. const ( @@ -19142,6 +22523,15 @@ const ( DvbSubtitleShadowColorWhite = "WHITE" ) +// DvbSubtitleShadowColor_Values returns all elements of the DvbSubtitleShadowColor enum +func DvbSubtitleShadowColor_Values() []string { + return []string{ + DvbSubtitleShadowColorNone, + DvbSubtitleShadowColorBlack, + DvbSubtitleShadowColorWhite, + } +} + // Only applies to jobs with input captions in Teletext or STL formats. Specify // whether the spacing between letters in your captions is set by the captions // grid or varies depending on letter width. Choose fixed grid to conform to @@ -19155,6 +22545,14 @@ const ( DvbSubtitleTeletextSpacingProportional = "PROPORTIONAL" ) +// DvbSubtitleTeletextSpacing_Values returns all elements of the DvbSubtitleTeletextSpacing enum +func DvbSubtitleTeletextSpacing_Values() []string { + return []string{ + DvbSubtitleTeletextSpacingFixedGrid, + DvbSubtitleTeletextSpacingProportional, + } +} + // Specify whether your DVB subtitles are standard or for hearing impaired. // Choose hearing impaired if your subtitles include audio descriptions and // dialogue. Choose standard if your subtitles include only dialogue. @@ -19166,6 +22564,14 @@ const ( DvbSubtitlingTypeStandard = "STANDARD" ) +// DvbSubtitlingType_Values returns all elements of the DvbSubtitlingType enum +func DvbSubtitlingType_Values() []string { + return []string{ + DvbSubtitlingTypeHearingImpaired, + DvbSubtitlingTypeStandard, + } +} + // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). @@ -19174,12 +22580,26 @@ const ( Eac3AtmosBitstreamModeCompleteMain = "COMPLETE_MAIN" ) +// Eac3AtmosBitstreamMode_Values returns all elements of the Eac3AtmosBitstreamMode enum +func Eac3AtmosBitstreamMode_Values() []string { + return []string{ + Eac3AtmosBitstreamModeCompleteMain, + } +} + // The coding mode for Dolby Digital Plus JOC (Atmos) is always 9.1.6 (CODING_MODE_9_1_6). const ( // Eac3AtmosCodingModeCodingMode916 is a Eac3AtmosCodingMode enum value Eac3AtmosCodingModeCodingMode916 = "CODING_MODE_9_1_6" ) +// Eac3AtmosCodingMode_Values returns all elements of the Eac3AtmosCodingMode enum +func Eac3AtmosCodingMode_Values() []string { + return []string{ + Eac3AtmosCodingModeCodingMode916, + } +} + // Enable Dolby Dialogue Intelligence to adjust loudness based on dialogue analysis. const ( // Eac3AtmosDialogueIntelligenceEnabled is a Eac3AtmosDialogueIntelligence enum value @@ -19189,6 +22609,14 @@ const ( Eac3AtmosDialogueIntelligenceDisabled = "DISABLED" ) +// Eac3AtmosDialogueIntelligence_Values returns all elements of the Eac3AtmosDialogueIntelligence enum +func Eac3AtmosDialogueIntelligence_Values() []string { + return []string{ + Eac3AtmosDialogueIntelligenceEnabled, + Eac3AtmosDialogueIntelligenceDisabled, + } +} + // Specify the absolute peak level for a signal with dynamic range compression. const ( // Eac3AtmosDynamicRangeCompressionLineNone is a Eac3AtmosDynamicRangeCompressionLine enum value @@ -19210,6 +22638,18 @@ const ( Eac3AtmosDynamicRangeCompressionLineSpeech = "SPEECH" ) +// Eac3AtmosDynamicRangeCompressionLine_Values returns all elements of the Eac3AtmosDynamicRangeCompressionLine enum +func Eac3AtmosDynamicRangeCompressionLine_Values() []string { + return []string{ + Eac3AtmosDynamicRangeCompressionLineNone, + Eac3AtmosDynamicRangeCompressionLineFilmStandard, + Eac3AtmosDynamicRangeCompressionLineFilmLight, + Eac3AtmosDynamicRangeCompressionLineMusicStandard, + Eac3AtmosDynamicRangeCompressionLineMusicLight, + Eac3AtmosDynamicRangeCompressionLineSpeech, + } +} + // Specify how the service limits the audio dynamic range when compressing the // audio. const ( @@ -19232,6 +22672,18 @@ const ( Eac3AtmosDynamicRangeCompressionRfSpeech = "SPEECH" ) +// Eac3AtmosDynamicRangeCompressionRf_Values returns all elements of the Eac3AtmosDynamicRangeCompressionRf enum +func Eac3AtmosDynamicRangeCompressionRf_Values() []string { + return []string{ + Eac3AtmosDynamicRangeCompressionRfNone, + Eac3AtmosDynamicRangeCompressionRfFilmStandard, + Eac3AtmosDynamicRangeCompressionRfFilmLight, + Eac3AtmosDynamicRangeCompressionRfMusicStandard, + Eac3AtmosDynamicRangeCompressionRfMusicLight, + Eac3AtmosDynamicRangeCompressionRfSpeech, + } +} + // Choose how the service meters the loudness of your audio. const ( // Eac3AtmosMeteringModeLeqA is a Eac3AtmosMeteringMode enum value @@ -19250,6 +22702,17 @@ const ( Eac3AtmosMeteringModeItuBs17704 = "ITU_BS_1770_4" ) +// Eac3AtmosMeteringMode_Values returns all elements of the Eac3AtmosMeteringMode enum +func Eac3AtmosMeteringMode_Values() []string { + return []string{ + Eac3AtmosMeteringModeLeqA, + Eac3AtmosMeteringModeItuBs17701, + Eac3AtmosMeteringModeItuBs17702, + Eac3AtmosMeteringModeItuBs17703, + Eac3AtmosMeteringModeItuBs17704, + } +} + // Choose how the service does stereo downmixing. const ( // Eac3AtmosStereoDownmixNotIndicated is a Eac3AtmosStereoDownmix enum value @@ -19265,6 +22728,16 @@ const ( Eac3AtmosStereoDownmixDpl2 = "DPL2" ) +// Eac3AtmosStereoDownmix_Values returns all elements of the Eac3AtmosStereoDownmix enum +func Eac3AtmosStereoDownmix_Values() []string { + return []string{ + Eac3AtmosStereoDownmixNotIndicated, + Eac3AtmosStereoDownmixStereo, + Eac3AtmosStereoDownmixSurround, + Eac3AtmosStereoDownmixDpl2, + } +} + // Specify whether your input audio has an additional center rear surround channel // matrix encoded into your left and right surround channels. const ( @@ -19278,6 +22751,15 @@ const ( Eac3AtmosSurroundExModeDisabled = "DISABLED" ) +// Eac3AtmosSurroundExMode_Values returns all elements of the Eac3AtmosSurroundExMode enum +func Eac3AtmosSurroundExMode_Values() []string { + return []string{ + Eac3AtmosSurroundExModeNotIndicated, + Eac3AtmosSurroundExModeEnabled, + Eac3AtmosSurroundExModeDisabled, + } +} + // If set to ATTENUATE_3_DB, applies a 3 dB attenuation to the surround channels. // Only used for 3/2 coding mode. const ( @@ -19288,6 +22770,14 @@ const ( Eac3AttenuationControlNone = "NONE" ) +// Eac3AttenuationControl_Values returns all elements of the Eac3AttenuationControl enum +func Eac3AttenuationControl_Values() []string { + return []string{ + Eac3AttenuationControlAttenuate3Db, + Eac3AttenuationControlNone, + } +} + // Specify the bitstream mode for the E-AC-3 stream that the encoder emits. // For more information about the EAC3 bitstream mode, see ATSC A/52-2012 (Annex // E). @@ -19308,6 +22798,17 @@ const ( Eac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" ) +// Eac3BitstreamMode_Values returns all elements of the Eac3BitstreamMode enum +func Eac3BitstreamMode_Values() []string { + return []string{ + Eac3BitstreamModeCompleteMain, + Eac3BitstreamModeCommentary, + Eac3BitstreamModeEmergency, + Eac3BitstreamModeHearingImpaired, + Eac3BitstreamModeVisuallyImpaired, + } +} + // Dolby Digital Plus coding mode. Determines number of channels. const ( // Eac3CodingModeCodingMode10 is a Eac3CodingMode enum value @@ -19320,6 +22821,15 @@ const ( Eac3CodingModeCodingMode32 = "CODING_MODE_3_2" ) +// Eac3CodingMode_Values returns all elements of the Eac3CodingMode enum +func Eac3CodingMode_Values() []string { + return []string{ + Eac3CodingModeCodingMode10, + Eac3CodingModeCodingMode20, + Eac3CodingModeCodingMode32, + } +} + // Activates a DC highpass filter for all input channels. const ( // Eac3DcFilterEnabled is a Eac3DcFilter enum value @@ -19329,6 +22839,14 @@ const ( Eac3DcFilterDisabled = "DISABLED" ) +// Eac3DcFilter_Values returns all elements of the Eac3DcFilter enum +func Eac3DcFilter_Values() []string { + return []string{ + Eac3DcFilterEnabled, + Eac3DcFilterDisabled, + } +} + // Specify the absolute peak level for a signal with dynamic range compression. const ( // Eac3DynamicRangeCompressionLineNone is a Eac3DynamicRangeCompressionLine enum value @@ -19350,6 +22868,18 @@ const ( Eac3DynamicRangeCompressionLineSpeech = "SPEECH" ) +// Eac3DynamicRangeCompressionLine_Values returns all elements of the Eac3DynamicRangeCompressionLine enum +func Eac3DynamicRangeCompressionLine_Values() []string { + return []string{ + Eac3DynamicRangeCompressionLineNone, + Eac3DynamicRangeCompressionLineFilmStandard, + Eac3DynamicRangeCompressionLineFilmLight, + Eac3DynamicRangeCompressionLineMusicStandard, + Eac3DynamicRangeCompressionLineMusicLight, + Eac3DynamicRangeCompressionLineSpeech, + } +} + // Specify how the service limits the audio dynamic range when compressing the // audio. const ( @@ -19372,6 +22902,18 @@ const ( Eac3DynamicRangeCompressionRfSpeech = "SPEECH" ) +// Eac3DynamicRangeCompressionRf_Values returns all elements of the Eac3DynamicRangeCompressionRf enum +func Eac3DynamicRangeCompressionRf_Values() []string { + return []string{ + Eac3DynamicRangeCompressionRfNone, + Eac3DynamicRangeCompressionRfFilmStandard, + Eac3DynamicRangeCompressionRfFilmLight, + Eac3DynamicRangeCompressionRfMusicStandard, + Eac3DynamicRangeCompressionRfMusicLight, + Eac3DynamicRangeCompressionRfSpeech, + } +} + // When encoding 3/2 audio, controls whether the LFE channel is enabled const ( // Eac3LfeControlLfe is a Eac3LfeControl enum value @@ -19381,6 +22923,14 @@ const ( Eac3LfeControlNoLfe = "NO_LFE" ) +// Eac3LfeControl_Values returns all elements of the Eac3LfeControl enum +func Eac3LfeControl_Values() []string { + return []string{ + Eac3LfeControlLfe, + Eac3LfeControlNoLfe, + } +} + // Applies a 120Hz lowpass filter to the LFE channel prior to encoding. Only // valid with 3_2_LFE coding mode. const ( @@ -19391,6 +22941,14 @@ const ( Eac3LfeFilterDisabled = "DISABLED" ) +// Eac3LfeFilter_Values returns all elements of the Eac3LfeFilter enum +func Eac3LfeFilter_Values() []string { + return []string{ + Eac3LfeFilterEnabled, + Eac3LfeFilterDisabled, + } +} + // When set to FOLLOW_INPUT, encoder metadata will be sourced from the DD, DD+, // or DolbyE decoder that supplied this audio data. If audio was not supplied // from one of these streams, then the static metadata settings will be used. @@ -19402,6 +22960,14 @@ const ( Eac3MetadataControlUseConfigured = "USE_CONFIGURED" ) +// Eac3MetadataControl_Values returns all elements of the Eac3MetadataControl enum +func Eac3MetadataControl_Values() []string { + return []string{ + Eac3MetadataControlFollowInput, + Eac3MetadataControlUseConfigured, + } +} + // When set to WHEN_POSSIBLE, input DD+ audio will be passed through if it is // present on the input. this detection is dynamic over the life of the transcode. // Inputs that alternate between DD+ and non-DD+ content will have a consistent @@ -19414,6 +22980,14 @@ const ( Eac3PassthroughControlNoPassthrough = "NO_PASSTHROUGH" ) +// Eac3PassthroughControl_Values returns all elements of the Eac3PassthroughControl enum +func Eac3PassthroughControl_Values() []string { + return []string{ + Eac3PassthroughControlWhenPossible, + Eac3PassthroughControlNoPassthrough, + } +} + // Controls the amount of phase-shift applied to the surround channels. Only // used for 3/2 coding mode. const ( @@ -19424,6 +22998,14 @@ const ( Eac3PhaseControlNoShift = "NO_SHIFT" ) +// Eac3PhaseControl_Values returns all elements of the Eac3PhaseControl enum +func Eac3PhaseControl_Values() []string { + return []string{ + Eac3PhaseControlShift90Degrees, + Eac3PhaseControlNoShift, + } +} + // Choose how the service does stereo downmixing. This setting only applies // if you keep the default value of 3/2 - L, R, C, Ls, Rs (CODING_MODE_3_2) // for the setting Coding mode (Eac3CodingMode). If you choose a different value @@ -19442,6 +23024,16 @@ const ( Eac3StereoDownmixDpl2 = "DPL2" ) +// Eac3StereoDownmix_Values returns all elements of the Eac3StereoDownmix enum +func Eac3StereoDownmix_Values() []string { + return []string{ + Eac3StereoDownmixNotIndicated, + Eac3StereoDownmixLoRo, + Eac3StereoDownmixLtRt, + Eac3StereoDownmixDpl2, + } +} + // When encoding 3/2 audio, sets whether an extra center back surround channel // is matrix encoded into the left and right surround channels. const ( @@ -19455,6 +23047,15 @@ const ( Eac3SurroundExModeDisabled = "DISABLED" ) +// Eac3SurroundExMode_Values returns all elements of the Eac3SurroundExMode enum +func Eac3SurroundExMode_Values() []string { + return []string{ + Eac3SurroundExModeNotIndicated, + Eac3SurroundExModeEnabled, + Eac3SurroundExModeDisabled, + } +} + // When encoding 2/0 audio, sets whether Dolby Surround is matrix encoded into // the two channels. const ( @@ -19468,6 +23069,15 @@ const ( Eac3SurroundModeDisabled = "DISABLED" ) +// Eac3SurroundMode_Values returns all elements of the Eac3SurroundMode enum +func Eac3SurroundMode_Values() []string { + return []string{ + Eac3SurroundModeNotIndicated, + Eac3SurroundModeEnabled, + Eac3SurroundModeDisabled, + } +} + // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 @@ -19481,6 +23091,14 @@ const ( EmbeddedConvert608To708Disabled = "DISABLED" ) +// EmbeddedConvert608To708_Values returns all elements of the EmbeddedConvert608To708 enum +func EmbeddedConvert608To708_Values() []string { + return []string{ + EmbeddedConvert608To708Upconvert, + EmbeddedConvert608To708Disabled, + } +} + // By default, the service terminates any unterminated captions at the end of // each input. If you want the caption to continue onto your next input, disable // this setting. @@ -19492,6 +23110,14 @@ const ( EmbeddedTerminateCaptionsDisabled = "DISABLED" ) +// EmbeddedTerminateCaptions_Values returns all elements of the EmbeddedTerminateCaptions enum +func EmbeddedTerminateCaptions_Values() []string { + return []string{ + EmbeddedTerminateCaptionsEndOfInput, + EmbeddedTerminateCaptionsDisabled, + } +} + // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. @@ -19503,6 +23129,14 @@ const ( F4vMoovPlacementNormal = "NORMAL" ) +// F4vMoovPlacement_Values returns all elements of the F4vMoovPlacement enum +func F4vMoovPlacement_Values() []string { + return []string{ + F4vMoovPlacementProgressiveDownload, + F4vMoovPlacementNormal, + } +} + // Specify whether this set of input captions appears in your outputs in both // 608 and 708 format. If you choose Upconvert (UPCONVERT), MediaConvert includes // the captions data in two ways: it passes the 608 data through using the 608 @@ -19516,6 +23150,14 @@ const ( FileSourceConvert608To708Disabled = "DISABLED" ) +// FileSourceConvert608To708_Values returns all elements of the FileSourceConvert608To708 enum +func FileSourceConvert608To708_Values() []string { + return []string{ + FileSourceConvert608To708Upconvert, + FileSourceConvert608To708Disabled, + } +} + // Provide the font script, using an ISO 15924 script code, if the LanguageCode // is not sufficient for determining the script type. Where LanguageCode or // CustomLanguageCode is sufficient, use "AUTOMATIC" or leave unset. @@ -19530,8 +23172,19 @@ const ( FontScriptHant = "HANT" ) -// Adaptive quantization. Allows intra-frame quantizers to vary to improve visual -// quality. +// FontScript_Values returns all elements of the FontScript enum +func FontScript_Values() []string { + return []string{ + FontScriptAutomatic, + FontScriptHans, + FontScriptHant, + } +} + +// Specify the strength of any adaptive quantization filters that you enable. +// The value that you choose here applies to the following settings: Flicker +// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization +// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). const ( // H264AdaptiveQuantizationOff is a H264AdaptiveQuantization enum value H264AdaptiveQuantizationOff = "OFF" @@ -19552,6 +23205,18 @@ const ( H264AdaptiveQuantizationMax = "MAX" ) +// H264AdaptiveQuantization_Values returns all elements of the H264AdaptiveQuantization enum +func H264AdaptiveQuantization_Values() []string { + return []string{ + H264AdaptiveQuantizationOff, + H264AdaptiveQuantizationLow, + H264AdaptiveQuantizationMedium, + H264AdaptiveQuantizationHigh, + H264AdaptiveQuantizationHigher, + H264AdaptiveQuantizationMax, + } +} + // Specify an H.264 level that is consistent with your output video settings. // If you aren't sure what level to specify, choose Auto (AUTO). const ( @@ -19607,6 +23272,29 @@ const ( H264CodecLevelLevel52 = "LEVEL_5_2" ) +// H264CodecLevel_Values returns all elements of the H264CodecLevel enum +func H264CodecLevel_Values() []string { + return []string{ + H264CodecLevelAuto, + H264CodecLevelLevel1, + H264CodecLevelLevel11, + H264CodecLevelLevel12, + H264CodecLevelLevel13, + H264CodecLevelLevel2, + H264CodecLevelLevel21, + H264CodecLevelLevel22, + H264CodecLevelLevel3, + H264CodecLevelLevel31, + H264CodecLevelLevel32, + H264CodecLevelLevel4, + H264CodecLevelLevel41, + H264CodecLevelLevel42, + H264CodecLevelLevel5, + H264CodecLevelLevel51, + H264CodecLevelLevel52, + } +} + // H.264 Profile. High 4:2:2 and 10-bit profiles are only available with the // AVC-I License. const ( @@ -19629,6 +23317,18 @@ const ( H264CodecProfileMain = "MAIN" ) +// H264CodecProfile_Values returns all elements of the H264CodecProfile enum +func H264CodecProfile_Values() []string { + return []string{ + H264CodecProfileBaseline, + H264CodecProfileHigh, + H264CodecProfileHigh10bit, + H264CodecProfileHigh422, + H264CodecProfileHigh42210bit, + H264CodecProfileMain, + } +} + // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames @@ -19642,6 +23342,14 @@ const ( H264DynamicSubGopStatic = "STATIC" ) +// H264DynamicSubGop_Values returns all elements of the H264DynamicSubGop enum +func H264DynamicSubGop_Values() []string { + return []string{ + H264DynamicSubGopAdaptive, + H264DynamicSubGopStatic, + } +} + // Entropy encoding mode. Use CABAC (must be in Main or High profile) or CAVLC. const ( // H264EntropyEncodingCabac is a H264EntropyEncoding enum value @@ -19651,7 +23359,17 @@ const ( H264EntropyEncodingCavlc = "CAVLC" ) -// Choosing FORCE_FIELD disables PAFF encoding for interlaced outputs. +// H264EntropyEncoding_Values returns all elements of the H264EntropyEncoding enum +func H264EntropyEncoding_Values() []string { + return []string{ + H264EntropyEncodingCabac, + H264EntropyEncodingCavlc, + } +} + +// Keep the default value, PAFF, to have MediaConvert use PAFF encoding for +// interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding +// and create separate interlaced fields. const ( // H264FieldEncodingPaff is a H264FieldEncoding enum value H264FieldEncodingPaff = "PAFF" @@ -19660,7 +23378,21 @@ const ( H264FieldEncodingForceField = "FORCE_FIELD" ) -// Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. +// H264FieldEncoding_Values returns all elements of the H264FieldEncoding enum +func H264FieldEncoding_Values() []string { + return []string{ + H264FieldEncodingPaff, + H264FieldEncodingForceField, + } +} + +// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears +// as a visual flicker that can arise when the encoder saves bits by copying +// some macroblocks many times from frame to frame, and then refreshes them +// at the I-frame. When you enable this setting, the encoder updates these macroblocks +// slightly more often to smooth out the flicker. This setting is disabled by +// default. Related setting: In addition to enabling this setting, you must +// also set adaptiveQuantization to a value other than Off (OFF). const ( // H264FlickerAdaptiveQuantizationDisabled is a H264FlickerAdaptiveQuantization enum value H264FlickerAdaptiveQuantizationDisabled = "DISABLED" @@ -19669,6 +23401,14 @@ const ( H264FlickerAdaptiveQuantizationEnabled = "ENABLED" ) +// H264FlickerAdaptiveQuantization_Values returns all elements of the H264FlickerAdaptiveQuantization enum +func H264FlickerAdaptiveQuantization_Values() []string { + return []string{ + H264FlickerAdaptiveQuantizationDisabled, + H264FlickerAdaptiveQuantizationEnabled, + } +} + // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose @@ -19688,15 +23428,44 @@ const ( H264FramerateControlSpecified = "SPECIFIED" ) -// When set to INTERPOLATE, produces smoother motion during frame rate conversion. +// H264FramerateControl_Values returns all elements of the H264FramerateControl enum +func H264FramerateControl_Values() []string { + return []string{ + H264FramerateControlInitializeFromSource, + H264FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. const ( // H264FramerateConversionAlgorithmDuplicateDrop is a H264FramerateConversionAlgorithm enum value H264FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // H264FramerateConversionAlgorithmInterpolate is a H264FramerateConversionAlgorithm enum value H264FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // H264FramerateConversionAlgorithmFrameformer is a H264FramerateConversionAlgorithm enum value + H264FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) +// H264FramerateConversionAlgorithm_Values returns all elements of the H264FramerateConversionAlgorithm enum +func H264FramerateConversionAlgorithm_Values() []string { + return []string{ + H264FramerateConversionAlgorithmDuplicateDrop, + H264FramerateConversionAlgorithmInterpolate, + H264FramerateConversionAlgorithmFrameformer, + } +} + // If enable, use reference B frames for GOP structures that have B frames > // 1. const ( @@ -19707,6 +23476,14 @@ const ( H264GopBReferenceEnabled = "ENABLED" ) +// H264GopBReference_Values returns all elements of the H264GopBReference enum +func H264GopBReference_Values() []string { + return []string{ + H264GopBReferenceDisabled, + H264GopBReferenceEnabled, + } +} + // Indicates if the GOP Size in H264 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. const ( @@ -19717,17 +23494,26 @@ const ( H264GopSizeUnitsSeconds = "SECONDS" ) -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type, as follows. - If the source is interlaced, -// the output will be interlaced with the same polarity as the source (it will -// follow the source). The output could therefore be a mix of "top field first" -// and "bottom field first". - If the source is progressive, the output will -// be interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. +// H264GopSizeUnits_Values returns all elements of the H264GopSizeUnits enum +func H264GopSizeUnits_Values() []string { + return []string{ + H264GopSizeUnitsFrames, + H264GopSizeUnitsSeconds, + } +} + +// Choose the scan line type for the output. Keep the default value, Progressive +// (PROGRESSIVE) to create a progressive output, regardless of the scan type +// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) +// to create an output that's interlaced with the same field polarity throughout. +// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) +// to produce outputs with the same field polarity as the source. For jobs that +// have multiple inputs, the output field polarity might change over the course +// of the output. Follow behavior depends on the input scan type. If the source +// is interlaced, the output will be interlaced with the same polarity as the +// source. If the source is progressive, the output will be interlaced with +// top field bottom field first, depending on which of the Follow options you +// choose. const ( // H264InterlaceModeProgressive is a H264InterlaceMode enum value H264InterlaceModeProgressive = "PROGRESSIVE" @@ -19745,9 +23531,24 @@ const ( H264InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) -// Using the API, enable ParFollowSource if you want the service to use the -// pixel aspect ratio from the input. Using the console, do this by choosing -// Follow source for Pixel aspect ratio. +// H264InterlaceMode_Values returns all elements of the H264InterlaceMode enum +func H264InterlaceMode_Values() []string { + return []string{ + H264InterlaceModeProgressive, + H264InterlaceModeTopField, + H264InterlaceModeBottomField, + H264InterlaceModeFollowTopField, + H264InterlaceModeFollowBottomField, + } +} + +// Optional. Specify how the service determines the pixel aspect ratio (PAR) +// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), +// uses the PAR from your input video for your output. To specify a different +// PAR in the console, choose any value other than Follow source. To specify +// a different PAR by editing the JSON job specification, choose SPECIFIED. +// When you choose SPECIFIED for this setting, you must also specify values +// for the parNumerator and parDenominator settings. const ( // H264ParControlInitializeFromSource is a H264ParControl enum value H264ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -19756,9 +23557,17 @@ const ( H264ParControlSpecified = "SPECIFIED" ) -// Use Quality tuning level (H264QualityTuningLevel) to specifiy whether to -// use fast single-pass, high-quality singlepass, or high-quality multipass -// video encoding. +// H264ParControl_Values returns all elements of the H264ParControl enum +func H264ParControl_Values() []string { + return []string{ + H264ParControlInitializeFromSource, + H264ParControlSpecified, + } +} + +// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you +// want to trade off encoding speed for output video quality. The default behavior +// is faster, lower quality, single-pass encoding. const ( // H264QualityTuningLevelSinglePass is a H264QualityTuningLevel enum value H264QualityTuningLevelSinglePass = "SINGLE_PASS" @@ -19770,6 +23579,15 @@ const ( H264QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) +// H264QualityTuningLevel_Values returns all elements of the H264QualityTuningLevel enum +func H264QualityTuningLevel_Values() []string { + return []string{ + H264QualityTuningLevelSinglePass, + H264QualityTuningLevelSinglePassHq, + H264QualityTuningLevelMultiPassHq, + } +} + // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). const ( @@ -19783,6 +23601,15 @@ const ( H264RateControlModeQvbr = "QVBR" ) +// H264RateControlMode_Values returns all elements of the H264RateControlMode enum +func H264RateControlMode_Values() []string { + return []string{ + H264RateControlModeVbr, + H264RateControlModeCbr, + H264RateControlModeQvbr, + } +} + // Places a PPS header on each encoded picture, even if repeated. const ( // H264RepeatPpsDisabled is a H264RepeatPps enum value @@ -19792,6 +23619,14 @@ const ( H264RepeatPpsEnabled = "ENABLED" ) +// H264RepeatPps_Values returns all elements of the H264RepeatPps enum +func H264RepeatPps_Values() []string { + return []string{ + H264RepeatPpsDisabled, + H264RepeatPpsEnabled, + } +} + // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) @@ -19808,8 +23643,23 @@ const ( H264SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION" ) -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. +// H264SceneChangeDetect_Values returns all elements of the H264SceneChangeDetect enum +func H264SceneChangeDetect_Values() []string { + return []string{ + H264SceneChangeDetectDisabled, + H264SceneChangeDetectEnabled, + H264SceneChangeDetectTransitionDetection, + } +} + +// Ignore this setting unless your input frame rate is 23.976 or 24 frames per +// second (fps). Enable slow PAL to create a 25 fps output. When you enable +// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples +// your audio to keep it synchronized with the video. Note that enabling this +// setting will slightly reduce the duration of your video. Required settings: +// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) +// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to +// 1. const ( // H264SlowPalDisabled is a H264SlowPal enum value H264SlowPalDisabled = "DISABLED" @@ -19818,8 +23668,29 @@ const ( H264SlowPalEnabled = "ENABLED" ) -// Adjust quantization within each frame based on spatial variation of content -// complexity. +// H264SlowPal_Values returns all elements of the H264SlowPal enum +func H264SlowPal_Values() []string { + return []string{ + H264SlowPalDisabled, + H264SlowPalEnabled, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on spatial variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas that can sustain more +// distortion with no noticeable visual degradation and uses more bits on areas +// where any small distortion will be noticeable. For example, complex textured +// blocks are encoded with fewer bits and smooth textured blocks are encoded +// with more bits. Enabling this feature will almost always improve your video +// quality. Note, though, that this feature doesn't take into account where +// the viewer's attention is likely to be. If viewers are likely to be focusing +// their attention on a part of the screen with a lot of complex texture, you +// might choose to disable this feature. Related setting: When you enable spatial +// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) +// depending on your content. For homogeneous content, such as cartoons and +// video games, set it to Low. For content with a wider variety of textures, +// set it to High or Higher. const ( // H264SpatialAdaptiveQuantizationDisabled is a H264SpatialAdaptiveQuantization enum value H264SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -19828,6 +23699,14 @@ const ( H264SpatialAdaptiveQuantizationEnabled = "ENABLED" ) +// H264SpatialAdaptiveQuantization_Values returns all elements of the H264SpatialAdaptiveQuantization enum +func H264SpatialAdaptiveQuantization_Values() []string { + return []string{ + H264SpatialAdaptiveQuantizationDisabled, + H264SpatialAdaptiveQuantizationEnabled, + } +} + // Produces a bitstream compliant with SMPTE RP-2027. const ( // H264SyntaxDefault is a H264Syntax enum value @@ -19837,13 +23716,22 @@ const ( H264SyntaxRp2027 = "RP2027" ) -// This field applies only if the Streams > Advanced > Framerate (framerate) -// field is set to 29.970. This field works with the Streams > Advanced > Preprocessors -// > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced -// Mode field (interlace_mode) to identify the scan type for the output: Progressive, -// Interlaced, Hard Telecine or Soft Telecine. - Hard: produces 29.97i output -// from 23.976 input. - Soft: produces 23.976; the player converts this output -// to 29.97i. +// H264Syntax_Values returns all elements of the H264Syntax enum +func H264Syntax_Values() []string { + return []string{ + H264SyntaxDefault, + H264SyntaxRp2027, + } +} + +// When you do frame rate conversion from 23.976 frames per second (fps) to +// 29.97 fps, and your output scan type is interlaced, you can optionally enable +// hard or soft telecine to create a smoother picture. Hard telecine (HARD) +// produces a 29.97i output. Soft telecine (SOFT) produces an output with a +// 23.976 output that signals to the video player device to do the conversion +// during play back. When you keep the default value, None (NONE), MediaConvert +// does a standard frame rate conversion to 29.97 without doing anything with +// the field polarity to create a smoother picture. const ( // H264TelecineNone is a H264Telecine enum value H264TelecineNone = "NONE" @@ -19855,8 +23743,28 @@ const ( H264TelecineHard = "HARD" ) -// Adjust quantization within each frame based on temporal variation of content -// complexity. +// H264Telecine_Values returns all elements of the H264Telecine enum +func H264Telecine_Values() []string { + return []string{ + H264TelecineNone, + H264TelecineSoft, + H264TelecineHard, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on temporal variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas of the frame that aren't +// moving and uses more bits on complex objects with sharp edges that move a +// lot. For example, this feature improves the readability of text tickers on +// newscasts and scoreboards on sports matches. Enabling this feature will almost +// always improve your video quality. Note, though, that this feature doesn't +// take into account where the viewer's attention is likely to be. If viewers +// are likely to be focusing their attention on a part of the screen that doesn't +// have moving objects with sharp edges, such as sports athletes' faces, you +// might choose to disable this feature. Related setting: When you enable temporal +// quantization, adjust the strength of the filter with the setting Adaptive +// quantization (adaptiveQuantization). const ( // H264TemporalAdaptiveQuantizationDisabled is a H264TemporalAdaptiveQuantization enum value H264TemporalAdaptiveQuantizationDisabled = "DISABLED" @@ -19865,6 +23773,14 @@ const ( H264TemporalAdaptiveQuantizationEnabled = "ENABLED" ) +// H264TemporalAdaptiveQuantization_Values returns all elements of the H264TemporalAdaptiveQuantization enum +func H264TemporalAdaptiveQuantization_Values() []string { + return []string{ + H264TemporalAdaptiveQuantizationDisabled, + H264TemporalAdaptiveQuantizationEnabled, + } +} + // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. const ( // H264UnregisteredSeiTimecodeDisabled is a H264UnregisteredSeiTimecode enum value @@ -19874,8 +23790,18 @@ const ( H264UnregisteredSeiTimecodeEnabled = "ENABLED" ) -// Adaptive quantization. Allows intra-frame quantizers to vary to improve visual -// quality. +// H264UnregisteredSeiTimecode_Values returns all elements of the H264UnregisteredSeiTimecode enum +func H264UnregisteredSeiTimecode_Values() []string { + return []string{ + H264UnregisteredSeiTimecodeDisabled, + H264UnregisteredSeiTimecodeEnabled, + } +} + +// Specify the strength of any adaptive quantization filters that you enable. +// The value that you choose here applies to the following settings: Flicker +// adaptive quantization (flickerAdaptiveQuantization), Spatial adaptive quantization +// (spatialAdaptiveQuantization), and Temporal adaptive quantization (temporalAdaptiveQuantization). const ( // H265AdaptiveQuantizationOff is a H265AdaptiveQuantization enum value H265AdaptiveQuantizationOff = "OFF" @@ -19896,6 +23822,18 @@ const ( H265AdaptiveQuantizationMax = "MAX" ) +// H265AdaptiveQuantization_Values returns all elements of the H265AdaptiveQuantization enum +func H265AdaptiveQuantization_Values() []string { + return []string{ + H265AdaptiveQuantizationOff, + H265AdaptiveQuantizationLow, + H265AdaptiveQuantizationMedium, + H265AdaptiveQuantizationHigh, + H265AdaptiveQuantizationHigher, + H265AdaptiveQuantizationMax, + } +} + // Enables Alternate Transfer Function SEI message for outputs using Hybrid // Log Gamma (HLG) Electro-Optical Transfer Function (EOTF). const ( @@ -19906,6 +23844,14 @@ const ( H265AlternateTransferFunctionSeiEnabled = "ENABLED" ) +// H265AlternateTransferFunctionSei_Values returns all elements of the H265AlternateTransferFunctionSei enum +func H265AlternateTransferFunctionSei_Values() []string { + return []string{ + H265AlternateTransferFunctionSeiDisabled, + H265AlternateTransferFunctionSeiEnabled, + } +} + // H.265 Level. const ( // H265CodecLevelAuto is a H265CodecLevel enum value @@ -19951,6 +23897,26 @@ const ( H265CodecLevelLevel62 = "LEVEL_6_2" ) +// H265CodecLevel_Values returns all elements of the H265CodecLevel enum +func H265CodecLevel_Values() []string { + return []string{ + H265CodecLevelAuto, + H265CodecLevelLevel1, + H265CodecLevelLevel2, + H265CodecLevelLevel21, + H265CodecLevelLevel3, + H265CodecLevelLevel31, + H265CodecLevelLevel4, + H265CodecLevelLevel41, + H265CodecLevelLevel5, + H265CodecLevelLevel51, + H265CodecLevelLevel52, + H265CodecLevelLevel6, + H265CodecLevelLevel61, + H265CodecLevelLevel62, + } +} + // Represents the Profile and Tier, per the HEVC (H.265) specification. Selections // are grouped as [Profile] / [Tier], so "Main/High" represents Main Profile // with High Tier. 4:2:2 profiles are only available with the HEVC 4:2:2 License. @@ -19980,6 +23946,20 @@ const ( H265CodecProfileMain42210bitHigh = "MAIN_422_10BIT_HIGH" ) +// H265CodecProfile_Values returns all elements of the H265CodecProfile enum +func H265CodecProfile_Values() []string { + return []string{ + H265CodecProfileMainMain, + H265CodecProfileMainHigh, + H265CodecProfileMain10Main, + H265CodecProfileMain10High, + H265CodecProfileMain4228bitMain, + H265CodecProfileMain4228bitHigh, + H265CodecProfileMain42210bitMain, + H265CodecProfileMain42210bitHigh, + } +} + // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames @@ -19993,7 +23973,21 @@ const ( H265DynamicSubGopStatic = "STATIC" ) -// Adjust quantization within each frame to reduce flicker or 'pop' on I-frames. +// H265DynamicSubGop_Values returns all elements of the H265DynamicSubGop enum +func H265DynamicSubGop_Values() []string { + return []string{ + H265DynamicSubGopAdaptive, + H265DynamicSubGopStatic, + } +} + +// Enable this setting to have the encoder reduce I-frame pop. I-frame pop appears +// as a visual flicker that can arise when the encoder saves bits by copying +// some macroblocks many times from frame to frame, and then refreshes them +// at the I-frame. When you enable this setting, the encoder updates these macroblocks +// slightly more often to smooth out the flicker. This setting is disabled by +// default. Related setting: In addition to enabling this setting, you must +// also set adaptiveQuantization to a value other than Off (OFF). const ( // H265FlickerAdaptiveQuantizationDisabled is a H265FlickerAdaptiveQuantization enum value H265FlickerAdaptiveQuantizationDisabled = "DISABLED" @@ -20002,13 +23996,21 @@ const ( H265FlickerAdaptiveQuantizationEnabled = "ENABLED" ) +// H265FlickerAdaptiveQuantization_Values returns all elements of the H265FlickerAdaptiveQuantization enum +func H265FlickerAdaptiveQuantization_Values() []string { + return []string{ + H265FlickerAdaptiveQuantizationDisabled, + H265FlickerAdaptiveQuantizationEnabled, + } +} + // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job sepecification as a JSON file without the console, use FramerateControl +// job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame @@ -20021,15 +24023,44 @@ const ( H265FramerateControlSpecified = "SPECIFIED" ) -// When set to INTERPOLATE, produces smoother motion during frame rate conversion. +// H265FramerateControl_Values returns all elements of the H265FramerateControl enum +func H265FramerateControl_Values() []string { + return []string{ + H265FramerateControlInitializeFromSource, + H265FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. const ( // H265FramerateConversionAlgorithmDuplicateDrop is a H265FramerateConversionAlgorithm enum value H265FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // H265FramerateConversionAlgorithmInterpolate is a H265FramerateConversionAlgorithm enum value H265FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // H265FramerateConversionAlgorithmFrameformer is a H265FramerateConversionAlgorithm enum value + H265FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) +// H265FramerateConversionAlgorithm_Values returns all elements of the H265FramerateConversionAlgorithm enum +func H265FramerateConversionAlgorithm_Values() []string { + return []string{ + H265FramerateConversionAlgorithmDuplicateDrop, + H265FramerateConversionAlgorithmInterpolate, + H265FramerateConversionAlgorithmFrameformer, + } +} + // If enable, use reference B frames for GOP structures that have B frames > // 1. const ( @@ -20040,6 +24071,14 @@ const ( H265GopBReferenceEnabled = "ENABLED" ) +// H265GopBReference_Values returns all elements of the H265GopBReference enum +func H265GopBReference_Values() []string { + return []string{ + H265GopBReferenceDisabled, + H265GopBReferenceEnabled, + } +} + // Indicates if the GOP Size in H265 is specified in frames or seconds. If seconds // the system will convert the GOP Size into a frame count at run time. const ( @@ -20050,18 +24089,26 @@ const ( H265GopSizeUnitsSeconds = "SECONDS" ) -// Choose the scan line type for the output. Choose Progressive (PROGRESSIVE) -// to create a progressive output, regardless of the scan type of your input. -// Choose Top Field First (TOP_FIELD) or Bottom Field First (BOTTOM_FIELD) to -// create an output that's interlaced with the same field polarity throughout. -// Choose Follow, Default Top (FOLLOW_TOP_FIELD) or Follow, Default Bottom (FOLLOW_BOTTOM_FIELD) -// to create an interlaced output with the same field polarity as the source. -// If the source is interlaced, the output will be interlaced with the same -// polarity as the source (it will follow the source). The output could therefore -// be a mix of "top field first" and "bottom field first". If the source is -// progressive, your output will be interlaced with "top field first" or "bottom -// field first" polarity, depending on which of the Follow options you chose. -// If you don't choose a value, the service will default to Progressive (PROGRESSIVE). +// H265GopSizeUnits_Values returns all elements of the H265GopSizeUnits enum +func H265GopSizeUnits_Values() []string { + return []string{ + H265GopSizeUnitsFrames, + H265GopSizeUnitsSeconds, + } +} + +// Choose the scan line type for the output. Keep the default value, Progressive +// (PROGRESSIVE) to create a progressive output, regardless of the scan type +// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) +// to create an output that's interlaced with the same field polarity throughout. +// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) +// to produce outputs with the same field polarity as the source. For jobs that +// have multiple inputs, the output field polarity might change over the course +// of the output. Follow behavior depends on the input scan type. If the source +// is interlaced, the output will be interlaced with the same polarity as the +// source. If the source is progressive, the output will be interlaced with +// top field bottom field first, depending on which of the Follow options you +// choose. const ( // H265InterlaceModeProgressive is a H265InterlaceMode enum value H265InterlaceModeProgressive = "PROGRESSIVE" @@ -20079,9 +24126,24 @@ const ( H265InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) -// Using the API, enable ParFollowSource if you want the service to use the -// pixel aspect ratio from the input. Using the console, do this by choosing -// Follow source for Pixel aspect ratio. +// H265InterlaceMode_Values returns all elements of the H265InterlaceMode enum +func H265InterlaceMode_Values() []string { + return []string{ + H265InterlaceModeProgressive, + H265InterlaceModeTopField, + H265InterlaceModeBottomField, + H265InterlaceModeFollowTopField, + H265InterlaceModeFollowBottomField, + } +} + +// Optional. Specify how the service determines the pixel aspect ratio (PAR) +// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), +// uses the PAR from your input video for your output. To specify a different +// PAR in the console, choose any value other than Follow source. To specify +// a different PAR by editing the JSON job specification, choose SPECIFIED. +// When you choose SPECIFIED for this setting, you must also specify values +// for the parNumerator and parDenominator settings. const ( // H265ParControlInitializeFromSource is a H265ParControl enum value H265ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -20090,9 +24152,17 @@ const ( H265ParControlSpecified = "SPECIFIED" ) -// Use Quality tuning level (H265QualityTuningLevel) to specifiy whether to -// use fast single-pass, high-quality singlepass, or high-quality multipass -// video encoding. +// H265ParControl_Values returns all elements of the H265ParControl enum +func H265ParControl_Values() []string { + return []string{ + H265ParControlInitializeFromSource, + H265ParControlSpecified, + } +} + +// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you +// want to trade off encoding speed for output video quality. The default behavior +// is faster, lower quality, single-pass encoding. const ( // H265QualityTuningLevelSinglePass is a H265QualityTuningLevel enum value H265QualityTuningLevelSinglePass = "SINGLE_PASS" @@ -20104,6 +24174,15 @@ const ( H265QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" ) +// H265QualityTuningLevel_Values returns all elements of the H265QualityTuningLevel enum +func H265QualityTuningLevel_Values() []string { + return []string{ + H265QualityTuningLevelSinglePass, + H265QualityTuningLevelSinglePassHq, + H265QualityTuningLevelMultiPassHq, + } +} + // Use this setting to specify whether this output has a variable bitrate (VBR), // constant bitrate (CBR) or quality-defined variable bitrate (QVBR). const ( @@ -20117,6 +24196,15 @@ const ( H265RateControlModeQvbr = "QVBR" ) +// H265RateControlMode_Values returns all elements of the H265RateControlMode enum +func H265RateControlMode_Values() []string { + return []string{ + H265RateControlModeVbr, + H265RateControlModeCbr, + H265RateControlModeQvbr, + } +} + // Specify Sample Adaptive Offset (SAO) filter strength. Adaptive mode dynamically // selects best strength based on content const ( @@ -20130,6 +24218,15 @@ const ( H265SampleAdaptiveOffsetFilterModeOff = "OFF" ) +// H265SampleAdaptiveOffsetFilterMode_Values returns all elements of the H265SampleAdaptiveOffsetFilterMode enum +func H265SampleAdaptiveOffsetFilterMode_Values() []string { + return []string{ + H265SampleAdaptiveOffsetFilterModeDefault, + H265SampleAdaptiveOffsetFilterModeAdaptive, + H265SampleAdaptiveOffsetFilterModeOff, + } +} + // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. // If this output uses QVBR, choose Transition detection (TRANSITION_DETECTION) @@ -20146,8 +24243,23 @@ const ( H265SceneChangeDetectTransitionDetection = "TRANSITION_DETECTION" ) -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. +// H265SceneChangeDetect_Values returns all elements of the H265SceneChangeDetect enum +func H265SceneChangeDetect_Values() []string { + return []string{ + H265SceneChangeDetectDisabled, + H265SceneChangeDetectEnabled, + H265SceneChangeDetectTransitionDetection, + } +} + +// Ignore this setting unless your input frame rate is 23.976 or 24 frames per +// second (fps). Enable slow PAL to create a 25 fps output. When you enable +// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples +// your audio to keep it synchronized with the video. Note that enabling this +// setting will slightly reduce the duration of your video. Required settings: +// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) +// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to +// 1. const ( // H265SlowPalDisabled is a H265SlowPal enum value H265SlowPalDisabled = "DISABLED" @@ -20156,8 +24268,29 @@ const ( H265SlowPalEnabled = "ENABLED" ) -// Adjust quantization within each frame based on spatial variation of content -// complexity. +// H265SlowPal_Values returns all elements of the H265SlowPal enum +func H265SlowPal_Values() []string { + return []string{ + H265SlowPalDisabled, + H265SlowPalEnabled, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on spatial variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas that can sustain more +// distortion with no noticeable visual degradation and uses more bits on areas +// where any small distortion will be noticeable. For example, complex textured +// blocks are encoded with fewer bits and smooth textured blocks are encoded +// with more bits. Enabling this feature will almost always improve your video +// quality. Note, though, that this feature doesn't take into account where +// the viewer's attention is likely to be. If viewers are likely to be focusing +// their attention on a part of the screen with a lot of complex texture, you +// might choose to disable this feature. Related setting: When you enable spatial +// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) +// depending on your content. For homogeneous content, such as cartoons and +// video games, set it to Low. For content with a wider variety of textures, +// set it to High or Higher. const ( // H265SpatialAdaptiveQuantizationDisabled is a H265SpatialAdaptiveQuantization enum value H265SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -20166,6 +24299,14 @@ const ( H265SpatialAdaptiveQuantizationEnabled = "ENABLED" ) +// H265SpatialAdaptiveQuantization_Values returns all elements of the H265SpatialAdaptiveQuantization enum +func H265SpatialAdaptiveQuantization_Values() []string { + return []string{ + H265SpatialAdaptiveQuantizationDisabled, + H265SpatialAdaptiveQuantizationEnabled, + } +} + // This field applies only if the Streams > Advanced > Framerate (framerate) // field is set to 29.970. This field works with the Streams > Advanced > Preprocessors // > Deinterlacer field (deinterlace_mode) and the Streams > Advanced > Interlaced @@ -20184,8 +24325,28 @@ const ( H265TelecineHard = "HARD" ) -// Adjust quantization within each frame based on temporal variation of content -// complexity. +// H265Telecine_Values returns all elements of the H265Telecine enum +func H265Telecine_Values() []string { + return []string{ + H265TelecineNone, + H265TelecineSoft, + H265TelecineHard, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on temporal variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas of the frame that aren't +// moving and uses more bits on complex objects with sharp edges that move a +// lot. For example, this feature improves the readability of text tickers on +// newscasts and scoreboards on sports matches. Enabling this feature will almost +// always improve your video quality. Note, though, that this feature doesn't +// take into account where the viewer's attention is likely to be. If viewers +// are likely to be focusing their attention on a part of the screen that doesn't +// have moving objects with sharp edges, such as sports athletes' faces, you +// might choose to disable this feature. Related setting: When you enable temporal +// quantization, adjust the strength of the filter with the setting Adaptive +// quantization (adaptiveQuantization). const ( // H265TemporalAdaptiveQuantizationDisabled is a H265TemporalAdaptiveQuantization enum value H265TemporalAdaptiveQuantizationDisabled = "DISABLED" @@ -20194,6 +24355,14 @@ const ( H265TemporalAdaptiveQuantizationEnabled = "ENABLED" ) +// H265TemporalAdaptiveQuantization_Values returns all elements of the H265TemporalAdaptiveQuantization enum +func H265TemporalAdaptiveQuantization_Values() []string { + return []string{ + H265TemporalAdaptiveQuantizationDisabled, + H265TemporalAdaptiveQuantizationEnabled, + } +} + // Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers // are supported depending on GOP structure: I- and P-frames form one layer, // reference B-frames can form a second layer and non-reference b-frames can @@ -20210,6 +24379,14 @@ const ( H265TemporalIdsEnabled = "ENABLED" ) +// H265TemporalIds_Values returns all elements of the H265TemporalIds enum +func H265TemporalIds_Values() []string { + return []string{ + H265TemporalIdsDisabled, + H265TemporalIdsEnabled, + } +} + // Enable use of tiles, allowing horizontal as well as vertical subdivision // of the encoded pictures. const ( @@ -20220,6 +24397,14 @@ const ( H265TilesEnabled = "ENABLED" ) +// H265Tiles_Values returns all elements of the H265Tiles enum +func H265Tiles_Values() []string { + return []string{ + H265TilesDisabled, + H265TilesEnabled, + } +} + // Inserts timecode for each frame as 4 bytes of an unregistered SEI message. const ( // H265UnregisteredSeiTimecodeDisabled is a H265UnregisteredSeiTimecode enum value @@ -20229,6 +24414,14 @@ const ( H265UnregisteredSeiTimecodeEnabled = "ENABLED" ) +// H265UnregisteredSeiTimecode_Values returns all elements of the H265UnregisteredSeiTimecode enum +func H265UnregisteredSeiTimecode_Values() []string { + return []string{ + H265UnregisteredSeiTimecodeDisabled, + H265UnregisteredSeiTimecodeEnabled, + } +} + // If the location of parameter set NAL units doesn't matter in your workflow, // ignore this setting. Use this setting only with CMAF or DASH outputs, or // with standalone file outputs in an MPEG-4 container (MP4 outputs). Choose @@ -20248,6 +24441,14 @@ const ( H265WriteMp4PackagingTypeHev1 = "HEV1" ) +// H265WriteMp4PackagingType_Values returns all elements of the H265WriteMp4PackagingType enum +func H265WriteMp4PackagingType_Values() []string { + return []string{ + H265WriteMp4PackagingTypeHvc1, + H265WriteMp4PackagingTypeHev1, + } +} + const ( // HlsAdMarkersElemental is a HlsAdMarkers enum value HlsAdMarkersElemental = "ELEMENTAL" @@ -20256,6 +24457,14 @@ const ( HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35" ) +// HlsAdMarkers_Values returns all elements of the HlsAdMarkers enum +func HlsAdMarkers_Values() []string { + return []string{ + HlsAdMarkersElemental, + HlsAdMarkersElementalScte35, + } +} + // Use this setting only in audio-only outputs. Choose MPEG-2 Transport Stream // (M2TS) to create a file in an MPEG2-TS container. Keep the default value // Automatic (AUTOMATIC) to create a raw audio-only file with no container. @@ -20269,6 +24478,34 @@ const ( HlsAudioOnlyContainerM2ts = "M2TS" ) +// HlsAudioOnlyContainer_Values returns all elements of the HlsAudioOnlyContainer enum +func HlsAudioOnlyContainer_Values() []string { + return []string{ + HlsAudioOnlyContainerAutomatic, + HlsAudioOnlyContainerM2ts, + } +} + +// Ignore this setting unless you are using FairPlay DRM with Verimatrix and +// you encounter playback issues. Keep the default value, Include (INCLUDE), +// to output audio-only headers. Choose Exclude (EXCLUDE) to remove the audio-only +// headers from your audio segments. +const ( + // HlsAudioOnlyHeaderInclude is a HlsAudioOnlyHeader enum value + HlsAudioOnlyHeaderInclude = "INCLUDE" + + // HlsAudioOnlyHeaderExclude is a HlsAudioOnlyHeader enum value + HlsAudioOnlyHeaderExclude = "EXCLUDE" +) + +// HlsAudioOnlyHeader_Values returns all elements of the HlsAudioOnlyHeader enum +func HlsAudioOnlyHeader_Values() []string { + return []string{ + HlsAudioOnlyHeaderInclude, + HlsAudioOnlyHeaderExclude, + } +} + // Four types of audio-only tracks are supported: Audio-Only Variant Stream // The client can play back this audio-only stream instead of video in low-bandwidth // scenarios. Represented as an EXT-X-STREAM-INF in the HLS manifest. Alternate @@ -20294,6 +24531,16 @@ const ( HlsAudioTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM" ) +// HlsAudioTrackType_Values returns all elements of the HlsAudioTrackType enum +func HlsAudioTrackType_Values() []string { + return []string{ + HlsAudioTrackTypeAlternateAudioAutoSelectDefault, + HlsAudioTrackTypeAlternateAudioAutoSelect, + HlsAudioTrackTypeAlternateAudioNotAutoSelect, + HlsAudioTrackTypeAudioOnlyVariantStream, + } +} + // Applies only to 608 Embedded output captions. Insert: Include CLOSED-CAPTIONS // lines in the manifest. Specify at least one language in the CC1 Language // Code field. One CLOSED-CAPTION line is added for each Language Code you specify. @@ -20314,6 +24561,15 @@ const ( HlsCaptionLanguageSettingNone = "NONE" ) +// HlsCaptionLanguageSetting_Values returns all elements of the HlsCaptionLanguageSetting enum +func HlsCaptionLanguageSetting_Values() []string { + return []string{ + HlsCaptionLanguageSettingInsert, + HlsCaptionLanguageSettingOmit, + HlsCaptionLanguageSettingNone, + } +} + // When set to ENABLED, sets #EXT-X-ALLOW-CACHE:no tag, which prevents client // from saving media segments for later replay. const ( @@ -20324,6 +24580,14 @@ const ( HlsClientCacheEnabled = "ENABLED" ) +// HlsClientCache_Values returns all elements of the HlsClientCache enum +func HlsClientCache_Values() []string { + return []string{ + HlsClientCacheDisabled, + HlsClientCacheEnabled, + } +} + // Specification to use (RFC-6381 or the default RFC-4281) during m3u8 playlist // generation. const ( @@ -20334,6 +24598,14 @@ const ( HlsCodecSpecificationRfc4281 = "RFC_4281" ) +// HlsCodecSpecification_Values returns all elements of the HlsCodecSpecification enum +func HlsCodecSpecification_Values() []string { + return []string{ + HlsCodecSpecificationRfc6381, + HlsCodecSpecificationRfc4281, + } +} + // Indicates whether segments should be placed in subdirectories. const ( // HlsDirectoryStructureSingleDirectory is a HlsDirectoryStructure enum value @@ -20343,6 +24615,14 @@ const ( HlsDirectoryStructureSubdirectoryPerStream = "SUBDIRECTORY_PER_STREAM" ) +// HlsDirectoryStructure_Values returns all elements of the HlsDirectoryStructure enum +func HlsDirectoryStructure_Values() []string { + return []string{ + HlsDirectoryStructureSingleDirectory, + HlsDirectoryStructureSubdirectoryPerStream, + } +} + // Encrypts the segments with the given encryption scheme. Leave blank to disable. // Selecting 'Disabled' in the web interface also disables encryption. const ( @@ -20353,6 +24633,14 @@ const ( HlsEncryptionTypeSampleAes = "SAMPLE_AES" ) +// HlsEncryptionType_Values returns all elements of the HlsEncryptionType enum +func HlsEncryptionType_Values() []string { + return []string{ + HlsEncryptionTypeAes128, + HlsEncryptionTypeSampleAes, + } +} + // When set to INCLUDE, writes I-Frame Only Manifest in addition to the HLS // manifest const ( @@ -20363,6 +24651,14 @@ const ( HlsIFrameOnlyManifestExclude = "EXCLUDE" ) +// HlsIFrameOnlyManifest_Values returns all elements of the HlsIFrameOnlyManifest enum +func HlsIFrameOnlyManifest_Values() []string { + return []string{ + HlsIFrameOnlyManifestInclude, + HlsIFrameOnlyManifestExclude, + } +} + // The Initialization Vector is a 128-bit number used in conjunction with the // key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed // in the manifest. Otherwise Initialization Vector is not in the manifest. @@ -20374,6 +24670,14 @@ const ( HlsInitializationVectorInManifestExclude = "EXCLUDE" ) +// HlsInitializationVectorInManifest_Values returns all elements of the HlsInitializationVectorInManifest enum +func HlsInitializationVectorInManifest_Values() []string { + return []string{ + HlsInitializationVectorInManifestInclude, + HlsInitializationVectorInManifestExclude, + } +} + // Specify whether your DRM encryption key is static or from a key provider // that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html. const ( @@ -20384,6 +24688,14 @@ const ( HlsKeyProviderTypeStaticKey = "STATIC_KEY" ) +// HlsKeyProviderType_Values returns all elements of the HlsKeyProviderType enum +func HlsKeyProviderType_Values() []string { + return []string{ + HlsKeyProviderTypeSpeke, + HlsKeyProviderTypeStaticKey, + } +} + // When set to GZIP, compresses HLS playlist. const ( // HlsManifestCompressionGzip is a HlsManifestCompression enum value @@ -20393,6 +24705,14 @@ const ( HlsManifestCompressionNone = "NONE" ) +// HlsManifestCompression_Values returns all elements of the HlsManifestCompression enum +func HlsManifestCompression_Values() []string { + return []string{ + HlsManifestCompressionGzip, + HlsManifestCompressionNone, + } +} + // Indicates whether the output manifest should use floating point values for // segment duration. const ( @@ -20403,6 +24723,14 @@ const ( HlsManifestDurationFormatInteger = "INTEGER" ) +// HlsManifestDurationFormat_Values returns all elements of the HlsManifestDurationFormat enum +func HlsManifestDurationFormat_Values() []string { + return []string{ + HlsManifestDurationFormatFloatingPoint, + HlsManifestDurationFormatInteger, + } +} + // Enable this setting to insert the EXT-X-SESSION-KEY element into the master // playlist. This allows for offline Apple HLS FairPlay content protection. const ( @@ -20413,6 +24741,14 @@ const ( HlsOfflineEncryptedDisabled = "DISABLED" ) +// HlsOfflineEncrypted_Values returns all elements of the HlsOfflineEncrypted enum +func HlsOfflineEncrypted_Values() []string { + return []string{ + HlsOfflineEncryptedEnabled, + HlsOfflineEncryptedDisabled, + } +} + // Indicates whether the .m3u8 manifest file should be generated for this HLS // output group. const ( @@ -20423,6 +24759,14 @@ const ( HlsOutputSelectionSegmentsOnly = "SEGMENTS_ONLY" ) +// HlsOutputSelection_Values returns all elements of the HlsOutputSelection enum +func HlsOutputSelection_Values() []string { + return []string{ + HlsOutputSelectionManifestsAndSegments, + HlsOutputSelectionSegmentsOnly, + } +} + // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. // The value is calculated as follows: either the program date and time are // initialized using the input timecode source, or the time is initialized using @@ -20435,6 +24779,14 @@ const ( HlsProgramDateTimeExclude = "EXCLUDE" ) +// HlsProgramDateTime_Values returns all elements of the HlsProgramDateTime enum +func HlsProgramDateTime_Values() []string { + return []string{ + HlsProgramDateTimeInclude, + HlsProgramDateTimeExclude, + } +} + // When set to SINGLE_FILE, emits program as a single media resource (.ts) file, // uses #EXT-X-BYTERANGE tags to index segment for playback. const ( @@ -20445,6 +24797,14 @@ const ( HlsSegmentControlSegmentedFiles = "SEGMENTED_FILES" ) +// HlsSegmentControl_Values returns all elements of the HlsSegmentControl enum +func HlsSegmentControl_Values() []string { + return []string{ + HlsSegmentControlSingleFile, + HlsSegmentControlSegmentedFiles, + } +} + // Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag // of variant manifest. const ( @@ -20455,6 +24815,14 @@ const ( HlsStreamInfResolutionExclude = "EXCLUDE" ) +// HlsStreamInfResolution_Values returns all elements of the HlsStreamInfResolution enum +func HlsStreamInfResolution_Values() []string { + return []string{ + HlsStreamInfResolutionInclude, + HlsStreamInfResolutionExclude, + } +} + // Indicates ID3 frame that has the timecode. const ( // HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value @@ -20467,10 +24835,19 @@ const ( HlsTimedMetadataId3FrameTdrl = "TDRL" ) +// HlsTimedMetadataId3Frame_Values returns all elements of the HlsTimedMetadataId3Frame enum +func HlsTimedMetadataId3Frame_Values() []string { + return []string{ + HlsTimedMetadataId3FrameNone, + HlsTimedMetadataId3FramePriv, + HlsTimedMetadataId3FrameTdrl, + } +} + // Keep this setting enabled to have MediaConvert use the font style and position // information from the captions source in the output. This option is available -// only when your input captions are CFF-TT, IMSC, SMPTE-TT, or TTML. Disable -// this setting for simplified output captions. +// only when your input captions are IMSC, SMPTE-TT, or TTML. Disable this setting +// for simplified output captions. const ( // ImscStylePassthroughEnabled is a ImscStylePassthrough enum value ImscStylePassthroughEnabled = "ENABLED" @@ -20479,8 +24856,16 @@ const ( ImscStylePassthroughDisabled = "DISABLED" ) +// ImscStylePassthrough_Values returns all elements of the ImscStylePassthrough enum +func ImscStylePassthrough_Values() []string { + return []string{ + ImscStylePassthroughEnabled, + ImscStylePassthroughDisabled, + } +} + // Enable Deblock (InputDeblockFilter) to produce smoother motion in the output. -// Default is disabled. Only manaully controllable for MPEG2 and uncompressed +// Default is disabled. Only manually controllable for MPEG2 and uncompressed // video inputs. const ( // InputDeblockFilterEnabled is a InputDeblockFilter enum value @@ -20490,6 +24875,14 @@ const ( InputDeblockFilterDisabled = "DISABLED" ) +// InputDeblockFilter_Values returns all elements of the InputDeblockFilter enum +func InputDeblockFilter_Values() []string { + return []string{ + InputDeblockFilterEnabled, + InputDeblockFilterDisabled, + } +} + // Enable Denoise (InputDenoiseFilter) to filter noise from the input. Default // is disabled. Only applicable to MPEG2, H.264, H.265, and uncompressed video // inputs. @@ -20501,13 +24894,21 @@ const ( InputDenoiseFilterDisabled = "DISABLED" ) -// Use Filter enable (InputFilterEnable) to specify how the transcoding service -// applies the denoise and deblock filters. You must also enable the filters -// separately, with Denoise (InputDenoiseFilter) and Deblock (InputDeblockFilter). -// * Auto - The transcoding service determines whether to apply filtering, depending -// on input type and quality. * Disable - The input is not filtered. This is -// true even if you use the API to enable them in (InputDeblockFilter) and (InputDeblockFilter). -// * Force - The in put is filtered regardless of input type. +// InputDenoiseFilter_Values returns all elements of the InputDenoiseFilter enum +func InputDenoiseFilter_Values() []string { + return []string{ + InputDenoiseFilterEnabled, + InputDenoiseFilterDisabled, + } +} + +// Specify how the transcoding service applies the denoise and deblock filters. +// You must also enable the filters separately, with Denoise (InputDenoiseFilter) +// and Deblock (InputDeblockFilter). * Auto - The transcoding service determines +// whether to apply filtering, depending on input type and quality. * Disable +// - The input is not filtered. This is true even if you use the API to enable +// them in (InputDeblockFilter) and (InputDeblockFilter). * Force - The input +// is filtered regardless of input type. const ( // InputFilterEnableAuto is a InputFilterEnable enum value InputFilterEnableAuto = "AUTO" @@ -20519,6 +24920,15 @@ const ( InputFilterEnableForce = "FORCE" ) +// InputFilterEnable_Values returns all elements of the InputFilterEnable enum +func InputFilterEnable_Values() []string { + return []string{ + InputFilterEnableAuto, + InputFilterEnableDisable, + InputFilterEnableForce, + } +} + // Set PSI control (InputPsiControl) for transport stream inputs to specify // which data the demux process to scans. * Ignore PSI - Scan all PIDs for audio // and video. * Use PSI - Scan only PSI data. @@ -20530,6 +24940,14 @@ const ( InputPsiControlUsePsi = "USE_PSI" ) +// InputPsiControl_Values returns all elements of the InputPsiControl enum +func InputPsiControl_Values() []string { + return []string{ + InputPsiControlIgnorePsi, + InputPsiControlUsePsi, + } +} + // Use Rotate (InputRotate) to specify how the service rotates your video. You // can choose automatic rotation or specify a rotation. You can specify a clockwise // rotation of 0, 90, 180, or 270 degrees. If your input video container is @@ -20557,6 +24975,40 @@ const ( InputRotateAuto = "AUTO" ) +// InputRotate_Values returns all elements of the InputRotate enum +func InputRotate_Values() []string { + return []string{ + InputRotateDegree0, + InputRotateDegrees90, + InputRotateDegrees180, + InputRotateDegrees270, + InputRotateAuto, + } +} + +// When you have a progressive segmented frame (PsF) input, use this setting +// to flag the input as PsF. MediaConvert doesn't automatically detect PsF. +// Therefore, flagging your input as PsF results in better preservation of video +// quality when you do deinterlacing and frame rate conversion. If you don't +// specify, the default value is Auto (AUTO). Auto is the correct setting for +// all inputs that are not PsF. Don't set this value to PsF when your input +// is interlaced. Doing so creates horizontal interlacing artifacts. +const ( + // InputScanTypeAuto is a InputScanType enum value + InputScanTypeAuto = "AUTO" + + // InputScanTypePsf is a InputScanType enum value + InputScanTypePsf = "PSF" +) + +// InputScanType_Values returns all elements of the InputScanType enum +func InputScanType_Values() []string { + return []string{ + InputScanTypeAuto, + InputScanTypePsf, + } +} + // Use this Timecode source setting, located under the input settings (InputTimecodeSource), // to specify how the service counts input video frames. This input frame count // affects only the behavior of features that apply to a single input at a time, @@ -20578,6 +25030,15 @@ const ( InputTimecodeSourceSpecifiedstart = "SPECIFIEDSTART" ) +// InputTimecodeSource_Values returns all elements of the InputTimecodeSource enum +func InputTimecodeSource_Values() []string { + return []string{ + InputTimecodeSourceEmbedded, + InputTimecodeSourceZerobased, + InputTimecodeSourceSpecifiedstart, + } +} + // A job's phase can be PROBING, TRANSCODING OR UPLOADING const ( // JobPhaseProbing is a JobPhase enum value @@ -20590,6 +25051,15 @@ const ( JobPhaseUploading = "UPLOADING" ) +// JobPhase_Values returns all elements of the JobPhase enum +func JobPhase_Values() []string { + return []string{ + JobPhaseProbing, + JobPhaseTranscoding, + JobPhaseUploading, + } +} + // A job's status can be SUBMITTED, PROGRESSING, COMPLETE, CANCELED, or ERROR. const ( // JobStatusSubmitted is a JobStatus enum value @@ -20608,6 +25078,17 @@ const ( JobStatusError = "ERROR" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusSubmitted, + JobStatusProgressing, + JobStatusComplete, + JobStatusCanceled, + JobStatusError, + } +} + // Optional. When you request a list of job templates, you can choose to list // them alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by name. @@ -20622,6 +25103,15 @@ const ( JobTemplateListBySystem = "SYSTEM" ) +// JobTemplateListBy_Values returns all elements of the JobTemplateListBy enum +func JobTemplateListBy_Values() []string { + return []string{ + JobTemplateListByName, + JobTemplateListByCreationDate, + JobTemplateListBySystem, + } +} + // Specify the language, using the ISO 639-2 three-letter code listed at https://www.loc.gov/standards/iso639-2/php/code_list.php. const ( // LanguageCodeEng is a LanguageCode enum value @@ -21198,6 +25688,203 @@ const ( LanguageCodeTng = "TNG" ) +// LanguageCode_Values returns all elements of the LanguageCode enum +func LanguageCode_Values() []string { + return []string{ + LanguageCodeEng, + LanguageCodeSpa, + LanguageCodeFra, + LanguageCodeDeu, + LanguageCodeGer, + LanguageCodeZho, + LanguageCodeAra, + LanguageCodeHin, + LanguageCodeJpn, + LanguageCodeRus, + LanguageCodePor, + LanguageCodeIta, + LanguageCodeUrd, + LanguageCodeVie, + LanguageCodeKor, + LanguageCodePan, + LanguageCodeAbk, + LanguageCodeAar, + LanguageCodeAfr, + LanguageCodeAka, + LanguageCodeSqi, + LanguageCodeAmh, + LanguageCodeArg, + LanguageCodeHye, + LanguageCodeAsm, + LanguageCodeAva, + LanguageCodeAve, + LanguageCodeAym, + LanguageCodeAze, + LanguageCodeBam, + LanguageCodeBak, + LanguageCodeEus, + LanguageCodeBel, + LanguageCodeBen, + LanguageCodeBih, + LanguageCodeBis, + LanguageCodeBos, + LanguageCodeBre, + LanguageCodeBul, + LanguageCodeMya, + LanguageCodeCat, + LanguageCodeKhm, + LanguageCodeCha, + LanguageCodeChe, + LanguageCodeNya, + LanguageCodeChu, + LanguageCodeChv, + LanguageCodeCor, + LanguageCodeCos, + LanguageCodeCre, + LanguageCodeHrv, + LanguageCodeCes, + LanguageCodeDan, + LanguageCodeDiv, + LanguageCodeNld, + LanguageCodeDzo, + LanguageCodeEnm, + LanguageCodeEpo, + LanguageCodeEst, + LanguageCodeEwe, + LanguageCodeFao, + LanguageCodeFij, + LanguageCodeFin, + LanguageCodeFrm, + LanguageCodeFul, + LanguageCodeGla, + LanguageCodeGlg, + LanguageCodeLug, + LanguageCodeKat, + LanguageCodeEll, + LanguageCodeGrn, + LanguageCodeGuj, + LanguageCodeHat, + LanguageCodeHau, + LanguageCodeHeb, + LanguageCodeHer, + LanguageCodeHmo, + LanguageCodeHun, + LanguageCodeIsl, + LanguageCodeIdo, + LanguageCodeIbo, + LanguageCodeInd, + LanguageCodeIna, + LanguageCodeIle, + LanguageCodeIku, + LanguageCodeIpk, + LanguageCodeGle, + LanguageCodeJav, + LanguageCodeKal, + LanguageCodeKan, + LanguageCodeKau, + LanguageCodeKas, + LanguageCodeKaz, + LanguageCodeKik, + LanguageCodeKin, + LanguageCodeKir, + LanguageCodeKom, + LanguageCodeKon, + LanguageCodeKua, + LanguageCodeKur, + LanguageCodeLao, + LanguageCodeLat, + LanguageCodeLav, + LanguageCodeLim, + LanguageCodeLin, + LanguageCodeLit, + LanguageCodeLub, + LanguageCodeLtz, + LanguageCodeMkd, + LanguageCodeMlg, + LanguageCodeMsa, + LanguageCodeMal, + LanguageCodeMlt, + LanguageCodeGlv, + LanguageCodeMri, + LanguageCodeMar, + LanguageCodeMah, + LanguageCodeMon, + LanguageCodeNau, + LanguageCodeNav, + LanguageCodeNde, + LanguageCodeNbl, + LanguageCodeNdo, + LanguageCodeNep, + LanguageCodeSme, + LanguageCodeNor, + LanguageCodeNob, + LanguageCodeNno, + LanguageCodeOci, + LanguageCodeOji, + LanguageCodeOri, + LanguageCodeOrm, + LanguageCodeOss, + LanguageCodePli, + LanguageCodeFas, + LanguageCodePol, + LanguageCodePus, + LanguageCodeQue, + LanguageCodeQaa, + LanguageCodeRon, + LanguageCodeRoh, + LanguageCodeRun, + LanguageCodeSmo, + LanguageCodeSag, + LanguageCodeSan, + LanguageCodeSrd, + LanguageCodeSrb, + LanguageCodeSna, + LanguageCodeIii, + LanguageCodeSnd, + LanguageCodeSin, + LanguageCodeSlk, + LanguageCodeSlv, + LanguageCodeSom, + LanguageCodeSot, + LanguageCodeSun, + LanguageCodeSwa, + LanguageCodeSsw, + LanguageCodeSwe, + LanguageCodeTgl, + LanguageCodeTah, + LanguageCodeTgk, + LanguageCodeTam, + LanguageCodeTat, + LanguageCodeTel, + LanguageCodeTha, + LanguageCodeBod, + LanguageCodeTir, + LanguageCodeTon, + LanguageCodeTso, + LanguageCodeTsn, + LanguageCodeTur, + LanguageCodeTuk, + LanguageCodeTwi, + LanguageCodeUig, + LanguageCodeUkr, + LanguageCodeUzb, + LanguageCodeVen, + LanguageCodeVol, + LanguageCodeWln, + LanguageCodeCym, + LanguageCodeFry, + LanguageCodeWol, + LanguageCodeXho, + LanguageCodeYid, + LanguageCodeYor, + LanguageCodeZha, + LanguageCodeZul, + LanguageCodeOrj, + LanguageCodeQpc, + LanguageCodeTng, + } +} + // Selects between the DVB and ATSC buffer models for Dolby Digital audio. const ( // M2tsAudioBufferModelDvb is a M2tsAudioBufferModel enum value @@ -21207,6 +25894,14 @@ const ( M2tsAudioBufferModelAtsc = "ATSC" ) +// M2tsAudioBufferModel_Values returns all elements of the M2tsAudioBufferModel enum +func M2tsAudioBufferModel_Values() []string { + return []string{ + M2tsAudioBufferModelDvb, + M2tsAudioBufferModelAtsc, + } +} + // Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, // use multiplex buffer model. If set to NONE, this can lead to lower latency, // but low-memory devices may not be able to play back the stream without interruptions. @@ -21218,6 +25913,14 @@ const ( M2tsBufferModelNone = "NONE" ) +// M2tsBufferModel_Values returns all elements of the M2tsBufferModel enum +func M2tsBufferModel_Values() []string { + return []string{ + M2tsBufferModelMultiplex, + M2tsBufferModelNone, + } +} + // When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to // partitions 3 and 4. The interval between these additional markers will be // fixed, and will be slightly shorter than the video EBP marker interval. When @@ -21232,6 +25935,14 @@ const ( M2tsEbpAudioIntervalVideoInterval = "VIDEO_INTERVAL" ) +// M2tsEbpAudioInterval_Values returns all elements of the M2tsEbpAudioInterval enum +func M2tsEbpAudioInterval_Values() []string { + return []string{ + M2tsEbpAudioIntervalVideoAndFixedIntervals, + M2tsEbpAudioIntervalVideoInterval, + } +} + // Selects which PIDs to place EBP markers on. They can either be placed only // on the video PID, or on both the video PID and all audio PIDs. Only applicable // when EBP segmentation markers are is selected (segmentationMarkers is EBP @@ -21244,6 +25955,14 @@ const ( M2tsEbpPlacementVideoPid = "VIDEO_PID" ) +// M2tsEbpPlacement_Values returns all elements of the M2tsEbpPlacement enum +func M2tsEbpPlacement_Values() []string { + return []string{ + M2tsEbpPlacementVideoAndAudioPids, + M2tsEbpPlacementVideoPid, + } +} + // Controls whether to include the ES Rate field in the PES header. const ( // M2tsEsRateInPesInclude is a M2tsEsRateInPes enum value @@ -21253,6 +25972,14 @@ const ( M2tsEsRateInPesExclude = "EXCLUDE" ) +// M2tsEsRateInPes_Values returns all elements of the M2tsEsRateInPes enum +func M2tsEsRateInPes_Values() []string { + return []string{ + M2tsEsRateInPesInclude, + M2tsEsRateInPesExclude, + } +} + // Keep the default value (DEFAULT) unless you know that your audio EBP markers // are incorrectly appearing before your video EBP markers. To correct this // problem, set this value to Force (FORCE). @@ -21264,6 +25991,14 @@ const ( M2tsForceTsVideoEbpOrderDefault = "DEFAULT" ) +// M2tsForceTsVideoEbpOrder_Values returns all elements of the M2tsForceTsVideoEbpOrder enum +func M2tsForceTsVideoEbpOrder_Values() []string { + return []string{ + M2tsForceTsVideoEbpOrderForce, + M2tsForceTsVideoEbpOrderDefault, + } +} + // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. const ( @@ -21274,6 +26009,14 @@ const ( M2tsNielsenId3None = "NONE" ) +// M2tsNielsenId3_Values returns all elements of the M2tsNielsenId3 enum +func M2tsNielsenId3_Values() []string { + return []string{ + M2tsNielsenId3Insert, + M2tsNielsenId3None, + } +} + // When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This is effective only // when the PCR PID is the same as the video or audio elementary stream. @@ -21285,6 +26028,14 @@ const ( M2tsPcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" ) +// M2tsPcrControl_Values returns all elements of the M2tsPcrControl enum +func M2tsPcrControl_Values() []string { + return []string{ + M2tsPcrControlPcrEveryPesPacket, + M2tsPcrControlConfiguredPcrPeriod, + } +} + // When set to CBR, inserts null packets into transport stream to fill specified // bitrate. When set to VBR, the bitrate setting acts as the maximum bitrate, // but the output will not be padded up to that bitrate. @@ -21296,6 +26047,14 @@ const ( M2tsRateModeCbr = "CBR" ) +// M2tsRateMode_Values returns all elements of the M2tsRateMode enum +func M2tsRateMode_Values() []string { + return []string{ + M2tsRateModeVbr, + M2tsRateModeCbr, + } +} + // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. @@ -21310,6 +26069,14 @@ const ( M2tsScte35SourceNone = "NONE" ) +// M2tsScte35Source_Values returns all elements of the M2tsScte35Source enum +func M2tsScte35Source_Values() []string { + return []string{ + M2tsScte35SourcePassthrough, + M2tsScte35SourceNone, + } +} + // Inserts segmentation markers at each segmentation_time period. rai_segstart // sets the Random Access Indicator bit in the adaptation field. rai_adapt sets // the RAI bit and adds the current timecode in the private data bytes. psi_segstart @@ -21337,6 +26104,18 @@ const ( M2tsSegmentationMarkersEbpLegacy = "EBP_LEGACY" ) +// M2tsSegmentationMarkers_Values returns all elements of the M2tsSegmentationMarkers enum +func M2tsSegmentationMarkers_Values() []string { + return []string{ + M2tsSegmentationMarkersNone, + M2tsSegmentationMarkersRaiSegstart, + M2tsSegmentationMarkersRaiAdapt, + M2tsSegmentationMarkersPsiSegstart, + M2tsSegmentationMarkersEbp, + M2tsSegmentationMarkersEbpLegacy, + } +} + // The segmentation style parameter controls how segmentation markers are inserted // into the transport stream. With avails, it is possible that segments may // be truncated, which can influence where future segmentation markers are inserted. @@ -21356,6 +26135,14 @@ const ( M2tsSegmentationStyleResetCadence = "RESET_CADENCE" ) +// M2tsSegmentationStyle_Values returns all elements of the M2tsSegmentationStyle enum +func M2tsSegmentationStyle_Values() []string { + return []string{ + M2tsSegmentationStyleMaintainCadence, + M2tsSegmentationStyleResetCadence, + } +} + // If INSERT, Nielsen inaudible tones for media tracking will be detected in // the input audio and an equivalent ID3 tag will be inserted in the output. const ( @@ -21366,6 +26153,14 @@ const ( M3u8NielsenId3None = "NONE" ) +// M3u8NielsenId3_Values returns all elements of the M3u8NielsenId3 enum +func M3u8NielsenId3_Values() []string { + return []string{ + M3u8NielsenId3Insert, + M3u8NielsenId3None, + } +} + // When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted // for every Packetized Elementary Stream (PES) header. This parameter is effective // only when the PCR PID is the same as the video or audio elementary stream. @@ -21377,6 +26172,14 @@ const ( M3u8PcrControlConfiguredPcrPeriod = "CONFIGURED_PCR_PERIOD" ) +// M3u8PcrControl_Values returns all elements of the M3u8PcrControl enum +func M3u8PcrControl_Values() []string { + return []string{ + M3u8PcrControlPcrEveryPesPacket, + M3u8PcrControlConfiguredPcrPeriod, + } +} + // For SCTE-35 markers from your input-- Choose Passthrough (PASSTHROUGH) if // you want SCTE-35 markers that appear in your input to also appear in this // output. Choose None (NONE) if you don't want SCTE-35 markers in this output. @@ -21393,6 +26196,14 @@ const ( M3u8Scte35SourceNone = "NONE" ) +// M3u8Scte35Source_Values returns all elements of the M3u8Scte35Source enum +func M3u8Scte35Source_Values() []string { + return []string{ + M3u8Scte35SourcePassthrough, + M3u8Scte35SourceNone, + } +} + // Choose the type of motion graphic asset that you are providing for your overlay. // You can choose either a .mov file or a series of .png files. const ( @@ -21403,6 +26214,14 @@ const ( MotionImageInsertionModePng = "PNG" ) +// MotionImageInsertionMode_Values returns all elements of the MotionImageInsertionMode enum +func MotionImageInsertionMode_Values() []string { + return []string{ + MotionImageInsertionModeMov, + MotionImageInsertionModePng, + } +} + // Specify whether your motion graphic overlay repeats on a loop or plays only // once. const ( @@ -21413,6 +26232,14 @@ const ( MotionImagePlaybackRepeat = "REPEAT" ) +// MotionImagePlayback_Values returns all elements of the MotionImagePlayback enum +func MotionImagePlayback_Values() []string { + return []string{ + MotionImagePlaybackOnce, + MotionImagePlaybackRepeat, + } +} + // When enabled, include 'clap' atom if appropriate for the video output settings. const ( // MovClapAtomInclude is a MovClapAtom enum value @@ -21422,6 +26249,14 @@ const ( MovClapAtomExclude = "EXCLUDE" ) +// MovClapAtom_Values returns all elements of the MovClapAtom enum +func MovClapAtom_Values() []string { + return []string{ + MovClapAtomInclude, + MovClapAtomExclude, + } +} + // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per @@ -21434,6 +26269,14 @@ const ( MovCslgAtomExclude = "EXCLUDE" ) +// MovCslgAtom_Values returns all elements of the MovCslgAtom enum +func MovCslgAtom_Values() []string { + return []string{ + MovCslgAtomInclude, + MovCslgAtomExclude, + } +} + // When set to XDCAM, writes MPEG2 video streams into the QuickTime file using // XDCAM fourcc codes. This increases compatibility with Apple editors and players, // but may decrease compatibility with other players. Only applicable when the @@ -21446,7 +26289,19 @@ const ( MovMpeg2FourCCControlMpeg = "MPEG" ) -// If set to OMNEON, inserts Omneon-compatible padding +// MovMpeg2FourCCControl_Values returns all elements of the MovMpeg2FourCCControl enum +func MovMpeg2FourCCControl_Values() []string { + return []string{ + MovMpeg2FourCCControlXdcam, + MovMpeg2FourCCControlMpeg, + } +} + +// To make this output compatible with Omenon, keep the default value, OMNEON. +// Unless you need Omneon compatibility, set this value to NONE. When you keep +// the default value, OMNEON, MediaConvert increases the length of the edit +// list atom. This might cause file rejections when a recipient of the output +// file doesn't expct this extra padding. const ( // MovPaddingControlOmneon is a MovPaddingControl enum value MovPaddingControlOmneon = "OMNEON" @@ -21455,6 +26310,14 @@ const ( MovPaddingControlNone = "NONE" ) +// MovPaddingControl_Values returns all elements of the MovPaddingControl enum +func MovPaddingControl_Values() []string { + return []string{ + MovPaddingControlOmneon, + MovPaddingControlNone, + } +} + // Always keep the default value (SELF_CONTAINED) for this setting. const ( // MovReferenceSelfContained is a MovReference enum value @@ -21464,6 +26327,14 @@ const ( MovReferenceExternal = "EXTERNAL" ) +// MovReference_Values returns all elements of the MovReference enum +func MovReference_Values() []string { + return []string{ + MovReferenceSelfContained, + MovReferenceExternal, + } +} + // Specify whether the service encodes this MP3 audio output with a constant // bitrate (CBR) or a variable bitrate (VBR). const ( @@ -21474,6 +26345,14 @@ const ( Mp3RateControlModeVbr = "VBR" ) +// Mp3RateControlMode_Values returns all elements of the Mp3RateControlMode enum +func Mp3RateControlMode_Values() []string { + return []string{ + Mp3RateControlModeCbr, + Mp3RateControlModeVbr, + } +} + // When enabled, file composition times will start at zero, composition times // in the 'ctts' (composition time to sample) box for B-frames will be negative, // and a 'cslg' (composition shift least greatest) box will be included per @@ -21486,6 +26365,14 @@ const ( Mp4CslgAtomExclude = "EXCLUDE" ) +// Mp4CslgAtom_Values returns all elements of the Mp4CslgAtom enum +func Mp4CslgAtom_Values() []string { + return []string{ + Mp4CslgAtomInclude, + Mp4CslgAtomExclude, + } +} + // Inserts a free-space box immediately after the moov box. const ( // Mp4FreeSpaceBoxInclude is a Mp4FreeSpaceBox enum value @@ -21495,6 +26382,14 @@ const ( Mp4FreeSpaceBoxExclude = "EXCLUDE" ) +// Mp4FreeSpaceBox_Values returns all elements of the Mp4FreeSpaceBox enum +func Mp4FreeSpaceBox_Values() []string { + return []string{ + Mp4FreeSpaceBoxInclude, + Mp4FreeSpaceBoxExclude, + } +} + // If set to PROGRESSIVE_DOWNLOAD, the MOOV atom is relocated to the beginning // of the archive as required for progressive downloading. Otherwise it is placed // normally at the end. @@ -21506,6 +26401,14 @@ const ( Mp4MoovPlacementNormal = "NORMAL" ) +// Mp4MoovPlacement_Values returns all elements of the Mp4MoovPlacement enum +func Mp4MoovPlacement_Values() []string { + return []string{ + Mp4MoovPlacementProgressiveDownload, + Mp4MoovPlacementNormal, + } +} + // Use this setting only in DASH output groups that include sidecar TTML or // IMSC captions. You specify sidecar captions in a separate output from your // audio and video. Choose Raw (RAW) for captions in a single XML file in a @@ -21520,6 +26423,14 @@ const ( MpdCaptionContainerTypeFragmentedMp4 = "FRAGMENTED_MP4" ) +// MpdCaptionContainerType_Values returns all elements of the MpdCaptionContainerType enum +func MpdCaptionContainerType_Values() []string { + return []string{ + MpdCaptionContainerTypeRaw, + MpdCaptionContainerTypeFragmentedMp4, + } +} + // Use this setting only when you specify SCTE-35 markers from ESAM. Choose // INSERT to put SCTE-35 markers in this output at the insertion points that // you specify in an ESAM XML document. Provide the document in the setting @@ -21532,6 +26443,14 @@ const ( MpdScte35EsamNone = "NONE" ) +// MpdScte35Esam_Values returns all elements of the MpdScte35Esam enum +func MpdScte35Esam_Values() []string { + return []string{ + MpdScte35EsamInsert, + MpdScte35EsamNone, + } +} + // Ignore this setting unless you have SCTE-35 markers in your input video file. // Choose Passthrough (PASSTHROUGH) if you want SCTE-35 markers that appear // in your input to also appear in this output. Choose None (NONE) if you don't @@ -21544,8 +26463,18 @@ const ( MpdScte35SourceNone = "NONE" ) -// Adaptive quantization. Allows intra-frame quantizers to vary to improve visual -// quality. +// MpdScte35Source_Values returns all elements of the MpdScte35Source enum +func MpdScte35Source_Values() []string { + return []string{ + MpdScte35SourcePassthrough, + MpdScte35SourceNone, + } +} + +// Specify the strength of any adaptive quantization filters that you enable. +// The value that you choose here applies to the following settings: Spatial +// adaptive quantization (spatialAdaptiveQuantization), and Temporal adaptive +// quantization (temporalAdaptiveQuantization). const ( // Mpeg2AdaptiveQuantizationOff is a Mpeg2AdaptiveQuantization enum value Mpeg2AdaptiveQuantizationOff = "OFF" @@ -21560,6 +26489,16 @@ const ( Mpeg2AdaptiveQuantizationHigh = "HIGH" ) +// Mpeg2AdaptiveQuantization_Values returns all elements of the Mpeg2AdaptiveQuantization enum +func Mpeg2AdaptiveQuantization_Values() []string { + return []string{ + Mpeg2AdaptiveQuantizationOff, + Mpeg2AdaptiveQuantizationLow, + Mpeg2AdaptiveQuantizationMedium, + Mpeg2AdaptiveQuantizationHigh, + } +} + // Use Level (Mpeg2CodecLevel) to set the MPEG-2 level for the video output. const ( // Mpeg2CodecLevelAuto is a Mpeg2CodecLevel enum value @@ -21578,6 +26517,17 @@ const ( Mpeg2CodecLevelHigh = "HIGH" ) +// Mpeg2CodecLevel_Values returns all elements of the Mpeg2CodecLevel enum +func Mpeg2CodecLevel_Values() []string { + return []string{ + Mpeg2CodecLevelAuto, + Mpeg2CodecLevelLow, + Mpeg2CodecLevelMain, + Mpeg2CodecLevelHigh1440, + Mpeg2CodecLevelHigh, + } +} + // Use Profile (Mpeg2CodecProfile) to set the MPEG-2 profile for the video output. const ( // Mpeg2CodecProfileMain is a Mpeg2CodecProfile enum value @@ -21587,6 +26537,14 @@ const ( Mpeg2CodecProfileProfile422 = "PROFILE_422" ) +// Mpeg2CodecProfile_Values returns all elements of the Mpeg2CodecProfile enum +func Mpeg2CodecProfile_Values() []string { + return []string{ + Mpeg2CodecProfileMain, + Mpeg2CodecProfileProfile422, + } +} + // Choose Adaptive to improve subjective video quality for high-motion content. // This will cause the service to use fewer B-frames (which infer information // based on other frames) for high-motion portions of the video and more B-frames @@ -21600,13 +26558,21 @@ const ( Mpeg2DynamicSubGopStatic = "STATIC" ) +// Mpeg2DynamicSubGop_Values returns all elements of the Mpeg2DynamicSubGop enum +func Mpeg2DynamicSubGop_Values() []string { + return []string{ + Mpeg2DynamicSubGopAdaptive, + Mpeg2DynamicSubGopStatic, + } +} + // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job sepecification as a JSON file without the console, use FramerateControl +// job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame @@ -21619,15 +26585,44 @@ const ( Mpeg2FramerateControlSpecified = "SPECIFIED" ) -// When set to INTERPOLATE, produces smoother motion during frame rate conversion. +// Mpeg2FramerateControl_Values returns all elements of the Mpeg2FramerateControl enum +func Mpeg2FramerateControl_Values() []string { + return []string{ + Mpeg2FramerateControlInitializeFromSource, + Mpeg2FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. const ( // Mpeg2FramerateConversionAlgorithmDuplicateDrop is a Mpeg2FramerateConversionAlgorithm enum value Mpeg2FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // Mpeg2FramerateConversionAlgorithmInterpolate is a Mpeg2FramerateConversionAlgorithm enum value Mpeg2FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // Mpeg2FramerateConversionAlgorithmFrameformer is a Mpeg2FramerateConversionAlgorithm enum value + Mpeg2FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) +// Mpeg2FramerateConversionAlgorithm_Values returns all elements of the Mpeg2FramerateConversionAlgorithm enum +func Mpeg2FramerateConversionAlgorithm_Values() []string { + return []string{ + Mpeg2FramerateConversionAlgorithmDuplicateDrop, + Mpeg2FramerateConversionAlgorithmInterpolate, + Mpeg2FramerateConversionAlgorithmFrameformer, + } +} + // Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If // seconds the system will convert the GOP Size into a frame count at run time. const ( @@ -21638,17 +26633,26 @@ const ( Mpeg2GopSizeUnitsSeconds = "SECONDS" ) -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. +// Mpeg2GopSizeUnits_Values returns all elements of the Mpeg2GopSizeUnits enum +func Mpeg2GopSizeUnits_Values() []string { + return []string{ + Mpeg2GopSizeUnitsFrames, + Mpeg2GopSizeUnitsSeconds, + } +} + +// Choose the scan line type for the output. Keep the default value, Progressive +// (PROGRESSIVE) to create a progressive output, regardless of the scan type +// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) +// to create an output that's interlaced with the same field polarity throughout. +// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) +// to produce outputs with the same field polarity as the source. For jobs that +// have multiple inputs, the output field polarity might change over the course +// of the output. Follow behavior depends on the input scan type. If the source +// is interlaced, the output will be interlaced with the same polarity as the +// source. If the source is progressive, the output will be interlaced with +// top field bottom field first, depending on which of the Follow options you +// choose. const ( // Mpeg2InterlaceModeProgressive is a Mpeg2InterlaceMode enum value Mpeg2InterlaceModeProgressive = "PROGRESSIVE" @@ -21666,6 +26670,17 @@ const ( Mpeg2InterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) +// Mpeg2InterlaceMode_Values returns all elements of the Mpeg2InterlaceMode enum +func Mpeg2InterlaceMode_Values() []string { + return []string{ + Mpeg2InterlaceModeProgressive, + Mpeg2InterlaceModeTopField, + Mpeg2InterlaceModeBottomField, + Mpeg2InterlaceModeFollowTopField, + Mpeg2InterlaceModeFollowBottomField, + } +} + // Use Intra DC precision (Mpeg2IntraDcPrecision) to set quantization precision // for intra-block DC coefficients. If you choose the value auto, the service // will automatically select the precision based on the per-frame compression @@ -21687,9 +26702,24 @@ const ( Mpeg2IntraDcPrecisionIntraDcPrecision11 = "INTRA_DC_PRECISION_11" ) -// Using the API, enable ParFollowSource if you want the service to use the -// pixel aspect ratio from the input. Using the console, do this by choosing -// Follow source for Pixel aspect ratio. +// Mpeg2IntraDcPrecision_Values returns all elements of the Mpeg2IntraDcPrecision enum +func Mpeg2IntraDcPrecision_Values() []string { + return []string{ + Mpeg2IntraDcPrecisionAuto, + Mpeg2IntraDcPrecisionIntraDcPrecision8, + Mpeg2IntraDcPrecisionIntraDcPrecision9, + Mpeg2IntraDcPrecisionIntraDcPrecision10, + Mpeg2IntraDcPrecisionIntraDcPrecision11, + } +} + +// Optional. Specify how the service determines the pixel aspect ratio (PAR) +// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), +// uses the PAR from your input video for your output. To specify a different +// PAR in the console, choose any value other than Follow source. To specify +// a different PAR by editing the JSON job specification, choose SPECIFIED. +// When you choose SPECIFIED for this setting, you must also specify values +// for the parNumerator and parDenominator settings. const ( // Mpeg2ParControlInitializeFromSource is a Mpeg2ParControl enum value Mpeg2ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -21698,8 +26728,17 @@ const ( Mpeg2ParControlSpecified = "SPECIFIED" ) -// Use Quality tuning level (Mpeg2QualityTuningLevel) to specifiy whether to -// use single-pass or multipass video encoding. +// Mpeg2ParControl_Values returns all elements of the Mpeg2ParControl enum +func Mpeg2ParControl_Values() []string { + return []string{ + Mpeg2ParControlInitializeFromSource, + Mpeg2ParControlSpecified, + } +} + +// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you +// want to trade off encoding speed for output video quality. The default behavior +// is faster, lower quality, single-pass encoding. const ( // Mpeg2QualityTuningLevelSinglePass is a Mpeg2QualityTuningLevel enum value Mpeg2QualityTuningLevelSinglePass = "SINGLE_PASS" @@ -21708,6 +26747,14 @@ const ( Mpeg2QualityTuningLevelMultiPass = "MULTI_PASS" ) +// Mpeg2QualityTuningLevel_Values returns all elements of the Mpeg2QualityTuningLevel enum +func Mpeg2QualityTuningLevel_Values() []string { + return []string{ + Mpeg2QualityTuningLevelSinglePass, + Mpeg2QualityTuningLevelMultiPass, + } +} + // Use Rate control mode (Mpeg2RateControlMode) to specifiy whether the bitrate // is variable (vbr) or constant (cbr). const ( @@ -21718,6 +26765,14 @@ const ( Mpeg2RateControlModeCbr = "CBR" ) +// Mpeg2RateControlMode_Values returns all elements of the Mpeg2RateControlMode enum +func Mpeg2RateControlMode_Values() []string { + return []string{ + Mpeg2RateControlModeVbr, + Mpeg2RateControlModeCbr, + } +} + // Enable this setting to insert I-frames at scene changes that the service // automatically detects. This improves video quality and is enabled by default. const ( @@ -21728,8 +26783,22 @@ const ( Mpeg2SceneChangeDetectEnabled = "ENABLED" ) -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. +// Mpeg2SceneChangeDetect_Values returns all elements of the Mpeg2SceneChangeDetect enum +func Mpeg2SceneChangeDetect_Values() []string { + return []string{ + Mpeg2SceneChangeDetectDisabled, + Mpeg2SceneChangeDetectEnabled, + } +} + +// Ignore this setting unless your input frame rate is 23.976 or 24 frames per +// second (fps). Enable slow PAL to create a 25 fps output. When you enable +// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples +// your audio to keep it synchronized with the video. Note that enabling this +// setting will slightly reduce the duration of your video. Required settings: +// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) +// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to +// 1. const ( // Mpeg2SlowPalDisabled is a Mpeg2SlowPal enum value Mpeg2SlowPalDisabled = "DISABLED" @@ -21738,8 +26807,29 @@ const ( Mpeg2SlowPalEnabled = "ENABLED" ) -// Adjust quantization within each frame based on spatial variation of content -// complexity. +// Mpeg2SlowPal_Values returns all elements of the Mpeg2SlowPal enum +func Mpeg2SlowPal_Values() []string { + return []string{ + Mpeg2SlowPalDisabled, + Mpeg2SlowPalEnabled, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on spatial variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas that can sustain more +// distortion with no noticeable visual degradation and uses more bits on areas +// where any small distortion will be noticeable. For example, complex textured +// blocks are encoded with fewer bits and smooth textured blocks are encoded +// with more bits. Enabling this feature will almost always improve your video +// quality. Note, though, that this feature doesn't take into account where +// the viewer's attention is likely to be. If viewers are likely to be focusing +// their attention on a part of the screen with a lot of complex texture, you +// might choose to disable this feature. Related setting: When you enable spatial +// adaptive quantization, set the value for Adaptive quantization (adaptiveQuantization) +// depending on your content. For homogeneous content, such as cartoons and +// video games, set it to Low. For content with a wider variety of textures, +// set it to High or Higher. const ( // Mpeg2SpatialAdaptiveQuantizationDisabled is a Mpeg2SpatialAdaptiveQuantization enum value Mpeg2SpatialAdaptiveQuantizationDisabled = "DISABLED" @@ -21748,7 +26838,17 @@ const ( Mpeg2SpatialAdaptiveQuantizationEnabled = "ENABLED" ) -// Produces a Type D-10 compatible bitstream (SMPTE 356M-2001). +// Mpeg2SpatialAdaptiveQuantization_Values returns all elements of the Mpeg2SpatialAdaptiveQuantization enum +func Mpeg2SpatialAdaptiveQuantization_Values() []string { + return []string{ + Mpeg2SpatialAdaptiveQuantizationDisabled, + Mpeg2SpatialAdaptiveQuantizationEnabled, + } +} + +// Specify whether this output's video uses the D10 syntax. Keep the default +// value to not use the syntax. Related settings: When you choose D10 (D_10) +// for your MXF profile (profile), you must also set this value to to D10 (D_10). const ( // Mpeg2SyntaxDefault is a Mpeg2Syntax enum value Mpeg2SyntaxDefault = "DEFAULT" @@ -21757,10 +26857,22 @@ const ( Mpeg2SyntaxD10 = "D_10" ) -// Only use Telecine (Mpeg2Telecine) when you set Framerate (Framerate) to 29.970. -// Set Telecine (Mpeg2Telecine) to Hard (hard) to produce a 29.97i output from -// a 23.976 input. Set it to Soft (soft) to produce 23.976 output and leave -// converstion to the player. +// Mpeg2Syntax_Values returns all elements of the Mpeg2Syntax enum +func Mpeg2Syntax_Values() []string { + return []string{ + Mpeg2SyntaxDefault, + Mpeg2SyntaxD10, + } +} + +// When you do frame rate conversion from 23.976 frames per second (fps) to +// 29.97 fps, and your output scan type is interlaced, you can optionally enable +// hard or soft telecine to create a smoother picture. Hard telecine (HARD) +// produces a 29.97i output. Soft telecine (SOFT) produces an output with a +// 23.976 output that signals to the video player device to do the conversion +// during play back. When you keep the default value, None (NONE), MediaConvert +// does a standard frame rate conversion to 29.97 without doing anything with +// the field polarity to create a smoother picture. const ( // Mpeg2TelecineNone is a Mpeg2Telecine enum value Mpeg2TelecineNone = "NONE" @@ -21772,8 +26884,28 @@ const ( Mpeg2TelecineHard = "HARD" ) -// Adjust quantization within each frame based on temporal variation of content -// complexity. +// Mpeg2Telecine_Values returns all elements of the Mpeg2Telecine enum +func Mpeg2Telecine_Values() []string { + return []string{ + Mpeg2TelecineNone, + Mpeg2TelecineSoft, + Mpeg2TelecineHard, + } +} + +// Keep the default value, Enabled (ENABLED), to adjust quantization within +// each frame based on temporal variation of content complexity. When you enable +// this feature, the encoder uses fewer bits on areas of the frame that aren't +// moving and uses more bits on complex objects with sharp edges that move a +// lot. For example, this feature improves the readability of text tickers on +// newscasts and scoreboards on sports matches. Enabling this feature will almost +// always improve your video quality. Note, though, that this feature doesn't +// take into account where the viewer's attention is likely to be. If viewers +// are likely to be focusing their attention on a part of the screen that doesn't +// have moving objects with sharp edges, such as sports athletes' faces, you +// might choose to disable this feature. Related setting: When you enable temporal +// quantization, adjust the strength of the filter with the setting Adaptive +// quantization (adaptiveQuantization). const ( // Mpeg2TemporalAdaptiveQuantizationDisabled is a Mpeg2TemporalAdaptiveQuantization enum value Mpeg2TemporalAdaptiveQuantizationDisabled = "DISABLED" @@ -21782,6 +26914,14 @@ const ( Mpeg2TemporalAdaptiveQuantizationEnabled = "ENABLED" ) +// Mpeg2TemporalAdaptiveQuantization_Values returns all elements of the Mpeg2TemporalAdaptiveQuantization enum +func Mpeg2TemporalAdaptiveQuantization_Values() []string { + return []string{ + Mpeg2TemporalAdaptiveQuantizationDisabled, + Mpeg2TemporalAdaptiveQuantizationEnabled, + } +} + // COMBINE_DUPLICATE_STREAMS combines identical audio encoding settings across // a Microsoft Smooth output group into a single audio stream. const ( @@ -21792,6 +26932,14 @@ const ( MsSmoothAudioDeduplicationNone = "NONE" ) +// MsSmoothAudioDeduplication_Values returns all elements of the MsSmoothAudioDeduplication enum +func MsSmoothAudioDeduplication_Values() []string { + return []string{ + MsSmoothAudioDeduplicationCombineDuplicateStreams, + MsSmoothAudioDeduplicationNone, + } +} + // Use Manifest encoding (MsSmoothManifestEncoding) to specify the encoding // format for the server and client manifest. Valid options are utf8 and utf16. const ( @@ -21802,6 +26950,153 @@ const ( MsSmoothManifestEncodingUtf16 = "UTF16" ) +// MsSmoothManifestEncoding_Values returns all elements of the MsSmoothManifestEncoding enum +func MsSmoothManifestEncoding_Values() []string { + return []string{ + MsSmoothManifestEncodingUtf8, + MsSmoothManifestEncodingUtf16, + } +} + +// Optional. When you have AFD signaling set up in your output video stream, +// use this setting to choose whether to also include it in the MXF wrapper. +// Choose Don't copy (NO_COPY) to exclude AFD signaling from the MXF wrapper. +// Choose Copy from video stream (COPY_FROM_VIDEO) to copy the AFD values from +// the video stream for this output to the MXF wrapper. Regardless of which +// option you choose, the AFD values remain in the video stream. Related settings: +// To set up your output to include or exclude AFD values, see AfdSignaling, +// under VideoDescription. On the console, find AFD signaling under the output's +// video encoding settings. +const ( + // MxfAfdSignalingNoCopy is a MxfAfdSignaling enum value + MxfAfdSignalingNoCopy = "NO_COPY" + + // MxfAfdSignalingCopyFromVideo is a MxfAfdSignaling enum value + MxfAfdSignalingCopyFromVideo = "COPY_FROM_VIDEO" +) + +// MxfAfdSignaling_Values returns all elements of the MxfAfdSignaling enum +func MxfAfdSignaling_Values() []string { + return []string{ + MxfAfdSignalingNoCopy, + MxfAfdSignalingCopyFromVideo, + } +} + +// Specify the MXF profile, also called shim, for this output. When you choose +// Auto, MediaConvert chooses a profile based on the video codec and resolution. +// For a list of codecs supported with each MXF profile, see https://docs.aws.amazon.com/mediaconvert/latest/ug/codecs-supported-with-each-mxf-profile.html. +// For more information about the automatic selection behavior, see https://docs.aws.amazon.com/mediaconvert/latest/ug/default-automatic-selection-of-mxf-profiles.html. +const ( + // MxfProfileD10 is a MxfProfile enum value + MxfProfileD10 = "D_10" + + // MxfProfileXdcam is a MxfProfile enum value + MxfProfileXdcam = "XDCAM" + + // MxfProfileOp1a is a MxfProfile enum value + MxfProfileOp1a = "OP1A" +) + +// MxfProfile_Values returns all elements of the MxfProfile enum +func MxfProfile_Values() []string { + return []string{ + MxfProfileD10, + MxfProfileXdcam, + MxfProfileOp1a, + } +} + +// Choose the type of Nielsen watermarks that you want in your outputs. When +// you choose NAES 2 and NW (NAES2_AND_NW), you must provide a value for the +// setting SID (sourceId). When you choose CBET (CBET), you must provide a value +// for the setting CSID (cbetSourceId). When you choose NAES 2, NW, and CBET +// (NAES2_AND_NW_AND_CBET), you must provide values for both of these settings. +const ( + // NielsenActiveWatermarkProcessTypeNaes2AndNw is a NielsenActiveWatermarkProcessType enum value + NielsenActiveWatermarkProcessTypeNaes2AndNw = "NAES2_AND_NW" + + // NielsenActiveWatermarkProcessTypeCbet is a NielsenActiveWatermarkProcessType enum value + NielsenActiveWatermarkProcessTypeCbet = "CBET" + + // NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet is a NielsenActiveWatermarkProcessType enum value + NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet = "NAES2_AND_NW_AND_CBET" +) + +// NielsenActiveWatermarkProcessType_Values returns all elements of the NielsenActiveWatermarkProcessType enum +func NielsenActiveWatermarkProcessType_Values() []string { + return []string{ + NielsenActiveWatermarkProcessTypeNaes2AndNw, + NielsenActiveWatermarkProcessTypeCbet, + NielsenActiveWatermarkProcessTypeNaes2AndNwAndCbet, + } +} + +// Required. Specify whether your source content already contains Nielsen non-linear +// watermarks. When you set this value to Watermarked (WATERMARKED), the service +// fails the job. Nielsen requires that you add non-linear watermarking to only +// clean content that doesn't already have non-linear Nielsen watermarks. +const ( + // NielsenSourceWatermarkStatusTypeClean is a NielsenSourceWatermarkStatusType enum value + NielsenSourceWatermarkStatusTypeClean = "CLEAN" + + // NielsenSourceWatermarkStatusTypeWatermarked is a NielsenSourceWatermarkStatusType enum value + NielsenSourceWatermarkStatusTypeWatermarked = "WATERMARKED" +) + +// NielsenSourceWatermarkStatusType_Values returns all elements of the NielsenSourceWatermarkStatusType enum +func NielsenSourceWatermarkStatusType_Values() []string { + return []string{ + NielsenSourceWatermarkStatusTypeClean, + NielsenSourceWatermarkStatusTypeWatermarked, + } +} + +// To create assets that have the same TIC values in each audio track, keep +// the default value Share TICs (SAME_TICS_PER_TRACK). To create assets that +// have unique TIC values for each audio track, choose Use unique TICs (RESERVE_UNIQUE_TICS_PER_TRACK). +const ( + // NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value + NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack = "RESERVE_UNIQUE_TICS_PER_TRACK" + + // NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack is a NielsenUniqueTicPerAudioTrackType enum value + NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack = "SAME_TICS_PER_TRACK" +) + +// NielsenUniqueTicPerAudioTrackType_Values returns all elements of the NielsenUniqueTicPerAudioTrackType enum +func NielsenUniqueTicPerAudioTrackType_Values() []string { + return []string{ + NielsenUniqueTicPerAudioTrackTypeReserveUniqueTicsPerTrack, + NielsenUniqueTicPerAudioTrackTypeSameTicsPerTrack, + } +} + +// Optional. When you set Noise reducer (noiseReducer) to Temporal (TEMPORAL), +// you can use this setting to apply sharpening. The default behavior, Auto +// (AUTO), allows the transcoder to determine whether to apply filtering, depending +// on input type and quality. When you set Noise reducer to Temporal, your output +// bandwidth is reduced. When Post temporal sharpening is also enabled, that +// bandwidth reduction is smaller. +const ( + // NoiseFilterPostTemporalSharpeningDisabled is a NoiseFilterPostTemporalSharpening enum value + NoiseFilterPostTemporalSharpeningDisabled = "DISABLED" + + // NoiseFilterPostTemporalSharpeningEnabled is a NoiseFilterPostTemporalSharpening enum value + NoiseFilterPostTemporalSharpeningEnabled = "ENABLED" + + // NoiseFilterPostTemporalSharpeningAuto is a NoiseFilterPostTemporalSharpening enum value + NoiseFilterPostTemporalSharpeningAuto = "AUTO" +) + +// NoiseFilterPostTemporalSharpening_Values returns all elements of the NoiseFilterPostTemporalSharpening enum +func NoiseFilterPostTemporalSharpening_Values() []string { + return []string{ + NoiseFilterPostTemporalSharpeningDisabled, + NoiseFilterPostTemporalSharpeningEnabled, + NoiseFilterPostTemporalSharpeningAuto, + } +} + // Use Noise reducer filter (NoiseReducerFilter) to select one of the following // spatial image filtering functions. To use this setting, you must also enable // Noise reducer (NoiseReducer). * Bilateral preserves edges while reducing @@ -21835,7 +27130,21 @@ const ( NoiseReducerFilterTemporal = "TEMPORAL" ) -// When you request lists of resources, you can optionally specify whether they +// NoiseReducerFilter_Values returns all elements of the NoiseReducerFilter enum +func NoiseReducerFilter_Values() []string { + return []string{ + NoiseReducerFilterBilateral, + NoiseReducerFilterMean, + NoiseReducerFilterGaussian, + NoiseReducerFilterLanczos, + NoiseReducerFilterSharpen, + NoiseReducerFilterConserve, + NoiseReducerFilterSpatial, + NoiseReducerFilterTemporal, + } +} + +// Optional. When you request lists of resources, you can specify whether they // are sorted in ASCENDING or DESCENDING order. Default varies by resource. const ( // OrderAscending is a Order enum value @@ -21845,6 +27154,14 @@ const ( OrderDescending = "DESCENDING" ) +// Order_Values returns all elements of the Order enum +func Order_Values() []string { + return []string{ + OrderAscending, + OrderDescending, + } +} + // Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming, // CMAF) const ( @@ -21864,6 +27181,17 @@ const ( OutputGroupTypeCmafGroupSettings = "CMAF_GROUP_SETTINGS" ) +// OutputGroupType_Values returns all elements of the OutputGroupType enum +func OutputGroupType_Values() []string { + return []string{ + OutputGroupTypeHlsGroupSettings, + OutputGroupTypeDashIsoGroupSettings, + OutputGroupTypeFileGroupSettings, + OutputGroupTypeMsSmoothGroupSettings, + OutputGroupTypeCmafGroupSettings, + } +} + // Selects method of inserting SDT information into output stream. "Follow input // SDT" copies SDT information from input stream to output stream. "Follow input // SDT if present" copies SDT information from input stream to output stream @@ -21884,6 +27212,16 @@ const ( OutputSdtSdtNone = "SDT_NONE" ) +// OutputSdt_Values returns all elements of the OutputSdt enum +func OutputSdt_Values() []string { + return []string{ + OutputSdtSdtFollow, + OutputSdtSdtFollowIfPresent, + OutputSdtSdtManual, + OutputSdtSdtNone, + } +} + // Optional. When you request a list of presets, you can choose to list them // alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by name. @@ -21898,6 +27236,15 @@ const ( PresetListBySystem = "SYSTEM" ) +// PresetListBy_Values returns all elements of the PresetListBy enum +func PresetListBy_Values() []string { + return []string{ + PresetListByName, + PresetListByCreationDate, + PresetListBySystem, + } +} + // Specifies whether the pricing plan for the queue is on-demand or reserved. // For on-demand, you pay per minute, billed in increments of .01 minute. For // reserved, you pay for the transcoding capacity of the entire queue, regardless @@ -21911,6 +27258,14 @@ const ( PricingPlanReserved = "RESERVED" ) +// PricingPlan_Values returns all elements of the PricingPlan enum +func PricingPlan_Values() []string { + return []string{ + PricingPlanOnDemand, + PricingPlanReserved, + } +} + // Use Profile (ProResCodecProfile) to specifiy the type of Apple ProRes codec // to use for this output. const ( @@ -21927,13 +27282,23 @@ const ( ProresCodecProfileAppleProres422Proxy = "APPLE_PRORES_422_PROXY" ) +// ProresCodecProfile_Values returns all elements of the ProresCodecProfile enum +func ProresCodecProfile_Values() []string { + return []string{ + ProresCodecProfileAppleProres422, + ProresCodecProfileAppleProres422Hq, + ProresCodecProfileAppleProres422Lt, + ProresCodecProfileAppleProres422Proxy, + } +} + // If you are using the console, use the Framerate setting to specify the frame // rate for this output. If you want to keep the same frame rate as the input // video, choose Follow source. If you want to do frame rate conversion, choose // a frame rate from the dropdown list or choose Custom. The framerates shown // in the dropdown list are decimal approximations of fractions. If you choose // Custom, specify your frame rate as a fraction. If you are creating your transcoding -// job sepecification as a JSON file without the console, use FramerateControl +// job specification as a JSON file without the console, use FramerateControl // to specify which value the service uses for the frame rate for this output. // Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate // from the input. Choose SPECIFIED if you want the service to use the frame @@ -21946,26 +27311,56 @@ const ( ProresFramerateControlSpecified = "SPECIFIED" ) -// When set to INTERPOLATE, produces smoother motion during frame rate conversion. +// ProresFramerateControl_Values returns all elements of the ProresFramerateControl enum +func ProresFramerateControl_Values() []string { + return []string{ + ProresFramerateControlInitializeFromSource, + ProresFramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. const ( // ProresFramerateConversionAlgorithmDuplicateDrop is a ProresFramerateConversionAlgorithm enum value ProresFramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" // ProresFramerateConversionAlgorithmInterpolate is a ProresFramerateConversionAlgorithm enum value ProresFramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // ProresFramerateConversionAlgorithmFrameformer is a ProresFramerateConversionAlgorithm enum value + ProresFramerateConversionAlgorithmFrameformer = "FRAMEFORMER" ) -// Use Interlace mode (InterlaceMode) to choose the scan line type for the output. -// * Top Field First (TOP_FIELD) and Bottom Field First (BOTTOM_FIELD) produce -// interlaced output with the entire output having the same field polarity (top -// or bottom first). * Follow, Default Top (FOLLOW_TOP_FIELD) and Follow, Default -// Bottom (FOLLOW_BOTTOM_FIELD) use the same field polarity as the source. Therefore, -// behavior depends on the input scan type. - If the source is interlaced, the -// output will be interlaced with the same polarity as the source (it will follow -// the source). The output could therefore be a mix of "top field first" and -// "bottom field first". - If the source is progressive, the output will be -// interlaced with "top field first" or "bottom field first" polarity, depending -// on which of the Follow options you chose. +// ProresFramerateConversionAlgorithm_Values returns all elements of the ProresFramerateConversionAlgorithm enum +func ProresFramerateConversionAlgorithm_Values() []string { + return []string{ + ProresFramerateConversionAlgorithmDuplicateDrop, + ProresFramerateConversionAlgorithmInterpolate, + ProresFramerateConversionAlgorithmFrameformer, + } +} + +// Choose the scan line type for the output. Keep the default value, Progressive +// (PROGRESSIVE) to create a progressive output, regardless of the scan type +// of your input. Use Top field first (TOP_FIELD) or Bottom field first (BOTTOM_FIELD) +// to create an output that's interlaced with the same field polarity throughout. +// Use Follow, default top (FOLLOW_TOP_FIELD) or Follow, default bottom (FOLLOW_BOTTOM_FIELD) +// to produce outputs with the same field polarity as the source. For jobs that +// have multiple inputs, the output field polarity might change over the course +// of the output. Follow behavior depends on the input scan type. If the source +// is interlaced, the output will be interlaced with the same polarity as the +// source. If the source is progressive, the output will be interlaced with +// top field bottom field first, depending on which of the Follow options you +// choose. const ( // ProresInterlaceModeProgressive is a ProresInterlaceMode enum value ProresInterlaceModeProgressive = "PROGRESSIVE" @@ -21983,11 +27378,24 @@ const ( ProresInterlaceModeFollowBottomField = "FOLLOW_BOTTOM_FIELD" ) -// Use (ProresParControl) to specify how the service determines the pixel aspect -// ratio. Set to Follow source (INITIALIZE_FROM_SOURCE) to use the pixel aspect -// ratio from the input. To specify a different pixel aspect ratio: Using the -// console, choose it from the dropdown menu. Using the API, set ProresParControl -// to (SPECIFIED) and provide for (ParNumerator) and (ParDenominator). +// ProresInterlaceMode_Values returns all elements of the ProresInterlaceMode enum +func ProresInterlaceMode_Values() []string { + return []string{ + ProresInterlaceModeProgressive, + ProresInterlaceModeTopField, + ProresInterlaceModeBottomField, + ProresInterlaceModeFollowTopField, + ProresInterlaceModeFollowBottomField, + } +} + +// Optional. Specify how the service determines the pixel aspect ratio (PAR) +// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), +// uses the PAR from your input video for your output. To specify a different +// PAR in the console, choose any value other than Follow source. To specify +// a different PAR by editing the JSON job specification, choose SPECIFIED. +// When you choose SPECIFIED for this setting, you must also specify values +// for the parNumerator and parDenominator settings. const ( // ProresParControlInitializeFromSource is a ProresParControl enum value ProresParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" @@ -21996,8 +27404,22 @@ const ( ProresParControlSpecified = "SPECIFIED" ) -// Enables Slow PAL rate conversion. 23.976fps and 24fps input is relabeled -// as 25fps, and audio is sped up correspondingly. +// ProresParControl_Values returns all elements of the ProresParControl enum +func ProresParControl_Values() []string { + return []string{ + ProresParControlInitializeFromSource, + ProresParControlSpecified, + } +} + +// Ignore this setting unless your input frame rate is 23.976 or 24 frames per +// second (fps). Enable slow PAL to create a 25 fps output. When you enable +// slow PAL, MediaConvert relabels the video frames to 25 fps and resamples +// your audio to keep it synchronized with the video. Note that enabling this +// setting will slightly reduce the duration of your video. Required settings: +// You must also set Framerate to 25. In your JSON job specification, set (framerateControl) +// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to +// 1. const ( // ProresSlowPalDisabled is a ProresSlowPal enum value ProresSlowPalDisabled = "DISABLED" @@ -22006,10 +27428,20 @@ const ( ProresSlowPalEnabled = "ENABLED" ) -// Only use Telecine (ProresTelecine) when you set Framerate (Framerate) to -// 29.970. Set Telecine (ProresTelecine) to Hard (hard) to produce a 29.97i -// output from a 23.976 input. Set it to Soft (soft) to produce 23.976 output -// and leave converstion to the player. +// ProresSlowPal_Values returns all elements of the ProresSlowPal enum +func ProresSlowPal_Values() []string { + return []string{ + ProresSlowPalDisabled, + ProresSlowPalEnabled, + } +} + +// When you do frame rate conversion from 23.976 frames per second (fps) to +// 29.97 fps, and your output scan type is interlaced, you can optionally enable +// hard telecine (HARD) to create a smoother picture. When you keep the default +// value, None (NONE), MediaConvert does a standard frame rate conversion to +// 29.97 without doing anything with the field polarity to create a smoother +// picture. const ( // ProresTelecineNone is a ProresTelecine enum value ProresTelecineNone = "NONE" @@ -22018,6 +27450,14 @@ const ( ProresTelecineHard = "HARD" ) +// ProresTelecine_Values returns all elements of the ProresTelecine enum +func ProresTelecine_Values() []string { + return []string{ + ProresTelecineNone, + ProresTelecineHard, + } +} + // Optional. When you request a list of queues, you can choose to list them // alphabetically by NAME or chronologically by CREATION_DATE. If you don't // specify, the service will list them by creation date. @@ -22029,6 +27469,14 @@ const ( QueueListByCreationDate = "CREATION_DATE" ) +// QueueListBy_Values returns all elements of the QueueListBy enum +func QueueListBy_Values() []string { + return []string{ + QueueListByName, + QueueListByCreationDate, + } +} + // Queues can be ACTIVE or PAUSED. If you pause a queue, jobs in that queue // won't begin. Jobs that are running when you pause a queue continue to run // until they finish or result in an error. @@ -22040,6 +27488,14 @@ const ( QueueStatusPaused = "PAUSED" ) +// QueueStatus_Values returns all elements of the QueueStatus enum +func QueueStatus_Values() []string { + return []string{ + QueueStatusActive, + QueueStatusPaused, + } +} + // Specifies whether the term of your reserved queue pricing plan is automatically // extended (AUTO_RENEW) or expires (EXPIRE) at the end of the term. const ( @@ -22050,6 +27506,14 @@ const ( RenewalTypeExpire = "EXPIRE" ) +// RenewalType_Values returns all elements of the RenewalType enum +func RenewalType_Values() []string { + return []string{ + RenewalTypeAutoRenew, + RenewalTypeExpire, + } +} + // Specifies whether the pricing plan for your reserved queue is ACTIVE or EXPIRED. const ( // ReservationPlanStatusActive is a ReservationPlanStatus enum value @@ -22059,6 +27523,14 @@ const ( ReservationPlanStatusExpired = "EXPIRED" ) +// ReservationPlanStatus_Values returns all elements of the ReservationPlanStatus enum +func ReservationPlanStatus_Values() []string { + return []string{ + ReservationPlanStatusActive, + ReservationPlanStatusExpired, + } +} + // Use Respond to AFD (RespondToAfd) to specify how the service changes the // video itself in response to AFD values in the input. * Choose Respond to // clip the input video frame according to the AFD value, input display aspect @@ -22078,6 +27550,15 @@ const ( RespondToAfdPassthrough = "PASSTHROUGH" ) +// RespondToAfd_Values returns all elements of the RespondToAfd enum +func RespondToAfd_Values() []string { + return []string{ + RespondToAfdNone, + RespondToAfdRespond, + RespondToAfdPassthrough, + } +} + // Choose an Amazon S3 canned ACL for MediaConvert to apply to this output. const ( // S3ObjectCannedAclPublicRead is a S3ObjectCannedAcl enum value @@ -22093,6 +27574,16 @@ const ( S3ObjectCannedAclBucketOwnerFullControl = "BUCKET_OWNER_FULL_CONTROL" ) +// S3ObjectCannedAcl_Values returns all elements of the S3ObjectCannedAcl enum +func S3ObjectCannedAcl_Values() []string { + return []string{ + S3ObjectCannedAclPublicRead, + S3ObjectCannedAclAuthenticatedRead, + S3ObjectCannedAclBucketOwnerRead, + S3ObjectCannedAclBucketOwnerFullControl, + } +} + // Specify how you want your data keys managed. AWS uses data keys to encrypt // your content. AWS also encrypts the data keys themselves, using a customer // master key (CMK), and then stores the encrypted data keys alongside your @@ -22112,6 +27603,14 @@ const ( S3ServerSideEncryptionTypeServerSideEncryptionKms = "SERVER_SIDE_ENCRYPTION_KMS" ) +// S3ServerSideEncryptionType_Values returns all elements of the S3ServerSideEncryptionType enum +func S3ServerSideEncryptionType_Values() []string { + return []string{ + S3ServerSideEncryptionTypeServerSideEncryptionS3, + S3ServerSideEncryptionTypeServerSideEncryptionKms, + } +} + // Specify how the service handles outputs that have a different aspect ratio // from the input aspect ratio. Choose Stretch to output (STRETCH_TO_OUTPUT) // to have the service stretch your video image to fit. Keep the setting Default @@ -22126,6 +27625,14 @@ const ( ScalingBehaviorStretchToOutput = "STRETCH_TO_OUTPUT" ) +// ScalingBehavior_Values returns all elements of the ScalingBehavior enum +func ScalingBehavior_Values() []string { + return []string{ + ScalingBehaviorDefault, + ScalingBehaviorStretchToOutput, + } +} + // Set Framerate (SccDestinationFramerate) to make sure that the captions and // the video are synchronized in the output. Specify a frame rate that matches // the frame rate of the associated video. If the video frame rate is 29.97, @@ -22149,6 +27656,17 @@ const ( SccDestinationFramerateFramerate2997NonDropframe = "FRAMERATE_29_97_NON_DROPFRAME" ) +// SccDestinationFramerate_Values returns all elements of the SccDestinationFramerate enum +func SccDestinationFramerate_Values() []string { + return []string{ + SccDestinationFramerateFramerate2397, + SccDestinationFramerateFramerate24, + SccDestinationFramerateFramerate25, + SccDestinationFramerateFramerate2997Dropframe, + SccDestinationFramerateFramerate2997NonDropframe, + } +} + // Enable this setting when you run a test job to estimate how many reserved // transcoding slots (RTS) you need. When this is enabled, MediaConvert runs // your job from an on-demand queue with similar performance to what you will @@ -22161,6 +27679,14 @@ const ( SimulateReservedQueueEnabled = "ENABLED" ) +// SimulateReservedQueue_Values returns all elements of the SimulateReservedQueue enum +func SimulateReservedQueue_Values() []string { + return []string{ + SimulateReservedQueueDisabled, + SimulateReservedQueueEnabled, + } +} + // Specify how often MediaConvert sends STATUS_UPDATE events to Amazon CloudWatch // Events. Set the interval, in seconds, between status updates. MediaConvert // sends an update at this interval from the time the service begins processing @@ -22212,6 +27738,27 @@ const ( StatusUpdateIntervalSeconds600 = "SECONDS_600" ) +// StatusUpdateInterval_Values returns all elements of the StatusUpdateInterval enum +func StatusUpdateInterval_Values() []string { + return []string{ + StatusUpdateIntervalSeconds10, + StatusUpdateIntervalSeconds12, + StatusUpdateIntervalSeconds15, + StatusUpdateIntervalSeconds20, + StatusUpdateIntervalSeconds30, + StatusUpdateIntervalSeconds60, + StatusUpdateIntervalSeconds120, + StatusUpdateIntervalSeconds180, + StatusUpdateIntervalSeconds240, + StatusUpdateIntervalSeconds300, + StatusUpdateIntervalSeconds360, + StatusUpdateIntervalSeconds420, + StatusUpdateIntervalSeconds480, + StatusUpdateIntervalSeconds540, + StatusUpdateIntervalSeconds600, + } +} + // A page type as defined in the standard ETSI EN 300 468, Table 94 const ( // TeletextPageTypePageTypeInitial is a TeletextPageType enum value @@ -22230,6 +27777,17 @@ const ( TeletextPageTypePageTypeHearingImpairedSubtitle = "PAGE_TYPE_HEARING_IMPAIRED_SUBTITLE" ) +// TeletextPageType_Values returns all elements of the TeletextPageType enum +func TeletextPageType_Values() []string { + return []string{ + TeletextPageTypePageTypeInitial, + TeletextPageTypePageTypeSubtitle, + TeletextPageTypePageTypeAddlInfo, + TeletextPageTypePageTypeProgramSchedule, + TeletextPageTypePageTypeHearingImpairedSubtitle, + } +} + // Use Position (Position) under under Timecode burn-in (TimecodeBurnIn) to // specify the location the burned-in timecode on output video. const ( @@ -22261,6 +27819,21 @@ const ( TimecodeBurninPositionBottomRight = "BOTTOM_RIGHT" ) +// TimecodeBurninPosition_Values returns all elements of the TimecodeBurninPosition enum +func TimecodeBurninPosition_Values() []string { + return []string{ + TimecodeBurninPositionTopCenter, + TimecodeBurninPositionTopLeft, + TimecodeBurninPositionTopRight, + TimecodeBurninPositionMiddleLeft, + TimecodeBurninPositionMiddleCenter, + TimecodeBurninPositionMiddleRight, + TimecodeBurninPositionBottomLeft, + TimecodeBurninPositionBottomCenter, + TimecodeBurninPositionBottomRight, + } +} + // Use Source (TimecodeSource) to set how timecodes are handled within this // job. To make sure that your video, audio, captions, and markers are synchronized // and that time-based features, such as image inserter, work correctly, choose @@ -22283,6 +27856,15 @@ const ( TimecodeSourceSpecifiedstart = "SPECIFIEDSTART" ) +// TimecodeSource_Values returns all elements of the TimecodeSource enum +func TimecodeSource_Values() []string { + return []string{ + TimecodeSourceEmbedded, + TimecodeSourceZerobased, + TimecodeSourceSpecifiedstart, + } +} + // Applies only to HLS outputs. Use this setting to specify whether the service // inserts the ID3 timed metadata from the input in this output. const ( @@ -22293,8 +27875,16 @@ const ( TimedMetadataNone = "NONE" ) +// TimedMetadata_Values returns all elements of the TimedMetadata enum +func TimedMetadata_Values() []string { + return []string{ + TimedMetadataPassthrough, + TimedMetadataNone, + } +} + // Pass through style and position information from a TTML-like input source -// (TTML, SMPTE-TT, CFF-TT) to the CFF-TT output or TTML output. +// (TTML, SMPTE-TT) to the TTML output. const ( // TtmlStylePassthroughEnabled is a TtmlStylePassthrough enum value TtmlStylePassthroughEnabled = "ENABLED" @@ -22303,6 +27893,14 @@ const ( TtmlStylePassthroughDisabled = "DISABLED" ) +// TtmlStylePassthrough_Values returns all elements of the TtmlStylePassthrough enum +func TtmlStylePassthrough_Values() []string { + return []string{ + TtmlStylePassthroughEnabled, + TtmlStylePassthroughDisabled, + } +} + const ( // TypeSystem is a Type enum value TypeSystem = "SYSTEM" @@ -22311,8 +27909,170 @@ const ( TypeCustom = "CUSTOM" ) +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeSystem, + TypeCustom, + } +} + +// Specify the VC3 class to choose the quality characteristics for this output. +// VC3 class, together with the settings Framerate (framerateNumerator and framerateDenominator) +// and Resolution (height and width), determine your output bitrate. For example, +// say that your video resolution is 1920x1080 and your framerate is 29.97. +// Then Class 145 (CLASS_145) gives you an output with a bitrate of approximately +// 145 Mbps and Class 220 (CLASS_220) gives you and output with a bitrate of +// approximately 220 Mbps. VC3 class also specifies the color bit depth of your +// output. +const ( + // Vc3ClassClass1458bit is a Vc3Class enum value + Vc3ClassClass1458bit = "CLASS_145_8BIT" + + // Vc3ClassClass2208bit is a Vc3Class enum value + Vc3ClassClass2208bit = "CLASS_220_8BIT" + + // Vc3ClassClass22010bit is a Vc3Class enum value + Vc3ClassClass22010bit = "CLASS_220_10BIT" +) + +// Vc3Class_Values returns all elements of the Vc3Class enum +func Vc3Class_Values() []string { + return []string{ + Vc3ClassClass1458bit, + Vc3ClassClass2208bit, + Vc3ClassClass22010bit, + } +} + +// If you are using the console, use the Framerate setting to specify the frame +// rate for this output. If you want to keep the same frame rate as the input +// video, choose Follow source. If you want to do frame rate conversion, choose +// a frame rate from the dropdown list or choose Custom. The framerates shown +// in the dropdown list are decimal approximations of fractions. If you choose +// Custom, specify your frame rate as a fraction. If you are creating your transcoding +// job specification as a JSON file without the console, use FramerateControl +// to specify which value the service uses for the frame rate for this output. +// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate +// from the input. Choose SPECIFIED if you want the service to use the frame +// rate you specify in the settings FramerateNumerator and FramerateDenominator. +const ( + // Vc3FramerateControlInitializeFromSource is a Vc3FramerateControl enum value + Vc3FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" + + // Vc3FramerateControlSpecified is a Vc3FramerateControl enum value + Vc3FramerateControlSpecified = "SPECIFIED" +) + +// Vc3FramerateControl_Values returns all elements of the Vc3FramerateControl enum +func Vc3FramerateControl_Values() []string { + return []string{ + Vc3FramerateControlInitializeFromSource, + Vc3FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. +const ( + // Vc3FramerateConversionAlgorithmDuplicateDrop is a Vc3FramerateConversionAlgorithm enum value + Vc3FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" + + // Vc3FramerateConversionAlgorithmInterpolate is a Vc3FramerateConversionAlgorithm enum value + Vc3FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // Vc3FramerateConversionAlgorithmFrameformer is a Vc3FramerateConversionAlgorithm enum value + Vc3FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" +) + +// Vc3FramerateConversionAlgorithm_Values returns all elements of the Vc3FramerateConversionAlgorithm enum +func Vc3FramerateConversionAlgorithm_Values() []string { + return []string{ + Vc3FramerateConversionAlgorithmDuplicateDrop, + Vc3FramerateConversionAlgorithmInterpolate, + Vc3FramerateConversionAlgorithmFrameformer, + } +} + +// Optional. Choose the scan line type for this output. If you don't specify +// a value, MediaConvert will create a progressive output. +const ( + // Vc3InterlaceModeInterlaced is a Vc3InterlaceMode enum value + Vc3InterlaceModeInterlaced = "INTERLACED" + + // Vc3InterlaceModeProgressive is a Vc3InterlaceMode enum value + Vc3InterlaceModeProgressive = "PROGRESSIVE" +) + +// Vc3InterlaceMode_Values returns all elements of the Vc3InterlaceMode enum +func Vc3InterlaceMode_Values() []string { + return []string{ + Vc3InterlaceModeInterlaced, + Vc3InterlaceModeProgressive, + } +} + +// Ignore this setting unless your input frame rate is 23.976 or 24 frames per +// second (fps). Enable slow PAL to create a 25 fps output by relabeling the +// video frames and resampling your audio. Note that enabling this setting will +// slightly reduce the duration of your video. Related settings: You must also +// set Framerate to 25. In your JSON job specification, set (framerateControl) +// to (SPECIFIED), (framerateNumerator) to 25 and (framerateDenominator) to +// 1. +const ( + // Vc3SlowPalDisabled is a Vc3SlowPal enum value + Vc3SlowPalDisabled = "DISABLED" + + // Vc3SlowPalEnabled is a Vc3SlowPal enum value + Vc3SlowPalEnabled = "ENABLED" +) + +// Vc3SlowPal_Values returns all elements of the Vc3SlowPal enum +func Vc3SlowPal_Values() []string { + return []string{ + Vc3SlowPalDisabled, + Vc3SlowPalEnabled, + } +} + +// When you do frame rate conversion from 23.976 frames per second (fps) to +// 29.97 fps, and your output scan type is interlaced, you can optionally enable +// hard telecine (HARD) to create a smoother picture. When you keep the default +// value, None (NONE), MediaConvert does a standard frame rate conversion to +// 29.97 without doing anything with the field polarity to create a smoother +// picture. +const ( + // Vc3TelecineNone is a Vc3Telecine enum value + Vc3TelecineNone = "NONE" + + // Vc3TelecineHard is a Vc3Telecine enum value + Vc3TelecineHard = "HARD" +) + +// Vc3Telecine_Values returns all elements of the Vc3Telecine enum +func Vc3Telecine_Values() []string { + return []string{ + Vc3TelecineNone, + Vc3TelecineHard, + } +} + // Type of video codec const ( + // VideoCodecAv1 is a VideoCodec enum value + VideoCodecAv1 = "AV1" + + // VideoCodecAvcIntra is a VideoCodec enum value + VideoCodecAvcIntra = "AVC_INTRA" + // VideoCodecFrameCapture is a VideoCodec enum value VideoCodecFrameCapture = "FRAME_CAPTURE" @@ -22327,8 +28087,33 @@ const ( // VideoCodecProres is a VideoCodec enum value VideoCodecProres = "PRORES" + + // VideoCodecVc3 is a VideoCodec enum value + VideoCodecVc3 = "VC3" + + // VideoCodecVp8 is a VideoCodec enum value + VideoCodecVp8 = "VP8" + + // VideoCodecVp9 is a VideoCodec enum value + VideoCodecVp9 = "VP9" ) +// VideoCodec_Values returns all elements of the VideoCodec enum +func VideoCodec_Values() []string { + return []string{ + VideoCodecAv1, + VideoCodecAvcIntra, + VideoCodecFrameCapture, + VideoCodecH264, + VideoCodecH265, + VideoCodecMpeg2, + VideoCodecProres, + VideoCodecVc3, + VideoCodecVp8, + VideoCodecVp9, + } +} + // Applies only to H.264, H.265, MPEG2, and ProRes outputs. Only enable Timecode // insertion when the input frame rate is identical to the output frame rate. // To include timecodes in this output, set Timecode insertion (VideoTimecodeInsertion) @@ -22349,6 +28134,271 @@ const ( VideoTimecodeInsertionPicTimingSei = "PIC_TIMING_SEI" ) +// VideoTimecodeInsertion_Values returns all elements of the VideoTimecodeInsertion enum +func VideoTimecodeInsertion_Values() []string { + return []string{ + VideoTimecodeInsertionDisabled, + VideoTimecodeInsertionPicTimingSei, + } +} + +// If you are using the console, use the Framerate setting to specify the frame +// rate for this output. If you want to keep the same frame rate as the input +// video, choose Follow source. If you want to do frame rate conversion, choose +// a frame rate from the dropdown list or choose Custom. The framerates shown +// in the dropdown list are decimal approximations of fractions. If you choose +// Custom, specify your frame rate as a fraction. If you are creating your transcoding +// job specification as a JSON file without the console, use FramerateControl +// to specify which value the service uses for the frame rate for this output. +// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate +// from the input. Choose SPECIFIED if you want the service to use the frame +// rate you specify in the settings FramerateNumerator and FramerateDenominator. +const ( + // Vp8FramerateControlInitializeFromSource is a Vp8FramerateControl enum value + Vp8FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" + + // Vp8FramerateControlSpecified is a Vp8FramerateControl enum value + Vp8FramerateControlSpecified = "SPECIFIED" +) + +// Vp8FramerateControl_Values returns all elements of the Vp8FramerateControl enum +func Vp8FramerateControl_Values() []string { + return []string{ + Vp8FramerateControlInitializeFromSource, + Vp8FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. +const ( + // Vp8FramerateConversionAlgorithmDuplicateDrop is a Vp8FramerateConversionAlgorithm enum value + Vp8FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" + + // Vp8FramerateConversionAlgorithmInterpolate is a Vp8FramerateConversionAlgorithm enum value + Vp8FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // Vp8FramerateConversionAlgorithmFrameformer is a Vp8FramerateConversionAlgorithm enum value + Vp8FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" +) + +// Vp8FramerateConversionAlgorithm_Values returns all elements of the Vp8FramerateConversionAlgorithm enum +func Vp8FramerateConversionAlgorithm_Values() []string { + return []string{ + Vp8FramerateConversionAlgorithmDuplicateDrop, + Vp8FramerateConversionAlgorithmInterpolate, + Vp8FramerateConversionAlgorithmFrameformer, + } +} + +// Optional. Specify how the service determines the pixel aspect ratio (PAR) +// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), +// uses the PAR from your input video for your output. To specify a different +// PAR in the console, choose any value other than Follow source. To specify +// a different PAR by editing the JSON job specification, choose SPECIFIED. +// When you choose SPECIFIED for this setting, you must also specify values +// for the parNumerator and parDenominator settings. +const ( + // Vp8ParControlInitializeFromSource is a Vp8ParControl enum value + Vp8ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" + + // Vp8ParControlSpecified is a Vp8ParControl enum value + Vp8ParControlSpecified = "SPECIFIED" +) + +// Vp8ParControl_Values returns all elements of the Vp8ParControl enum +func Vp8ParControl_Values() []string { + return []string{ + Vp8ParControlInitializeFromSource, + Vp8ParControlSpecified, + } +} + +// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you +// want to trade off encoding speed for output video quality. The default behavior +// is faster, lower quality, multi-pass encoding. +const ( + // Vp8QualityTuningLevelMultiPass is a Vp8QualityTuningLevel enum value + Vp8QualityTuningLevelMultiPass = "MULTI_PASS" + + // Vp8QualityTuningLevelMultiPassHq is a Vp8QualityTuningLevel enum value + Vp8QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" +) + +// Vp8QualityTuningLevel_Values returns all elements of the Vp8QualityTuningLevel enum +func Vp8QualityTuningLevel_Values() []string { + return []string{ + Vp8QualityTuningLevelMultiPass, + Vp8QualityTuningLevelMultiPassHq, + } +} + +// With the VP8 codec, you can use only the variable bitrate (VBR) rate control +// mode. +const ( + // Vp8RateControlModeVbr is a Vp8RateControlMode enum value + Vp8RateControlModeVbr = "VBR" +) + +// Vp8RateControlMode_Values returns all elements of the Vp8RateControlMode enum +func Vp8RateControlMode_Values() []string { + return []string{ + Vp8RateControlModeVbr, + } +} + +// If you are using the console, use the Framerate setting to specify the frame +// rate for this output. If you want to keep the same frame rate as the input +// video, choose Follow source. If you want to do frame rate conversion, choose +// a frame rate from the dropdown list or choose Custom. The framerates shown +// in the dropdown list are decimal approximations of fractions. If you choose +// Custom, specify your frame rate as a fraction. If you are creating your transcoding +// job specification as a JSON file without the console, use FramerateControl +// to specify which value the service uses for the frame rate for this output. +// Choose INITIALIZE_FROM_SOURCE if you want the service to use the frame rate +// from the input. Choose SPECIFIED if you want the service to use the frame +// rate you specify in the settings FramerateNumerator and FramerateDenominator. +const ( + // Vp9FramerateControlInitializeFromSource is a Vp9FramerateControl enum value + Vp9FramerateControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" + + // Vp9FramerateControlSpecified is a Vp9FramerateControl enum value + Vp9FramerateControlSpecified = "SPECIFIED" +) + +// Vp9FramerateControl_Values returns all elements of the Vp9FramerateControl enum +func Vp9FramerateControl_Values() []string { + return []string{ + Vp9FramerateControlInitializeFromSource, + Vp9FramerateControlSpecified, + } +} + +// Choose the method that you want MediaConvert to use when increasing or decreasing +// the frame rate. We recommend using drop duplicate (DUPLICATE_DROP) for numerically +// simple conversions, such as 60 fps to 30 fps. For numerically complex conversions, +// you can use interpolate (INTERPOLATE) to avoid stutter. This results in a +// smooth picture, but might introduce undesirable video artifacts. For complex +// frame rate conversions, especially if your source video has already been +// converted from its original cadence, use FrameFormer (FRAMEFORMER) to do +// motion-compensated interpolation. FrameFormer chooses the best conversion +// method frame by frame. Note that using FrameFormer increases the transcoding +// time and incurs a significant add-on cost. +const ( + // Vp9FramerateConversionAlgorithmDuplicateDrop is a Vp9FramerateConversionAlgorithm enum value + Vp9FramerateConversionAlgorithmDuplicateDrop = "DUPLICATE_DROP" + + // Vp9FramerateConversionAlgorithmInterpolate is a Vp9FramerateConversionAlgorithm enum value + Vp9FramerateConversionAlgorithmInterpolate = "INTERPOLATE" + + // Vp9FramerateConversionAlgorithmFrameformer is a Vp9FramerateConversionAlgorithm enum value + Vp9FramerateConversionAlgorithmFrameformer = "FRAMEFORMER" +) + +// Vp9FramerateConversionAlgorithm_Values returns all elements of the Vp9FramerateConversionAlgorithm enum +func Vp9FramerateConversionAlgorithm_Values() []string { + return []string{ + Vp9FramerateConversionAlgorithmDuplicateDrop, + Vp9FramerateConversionAlgorithmInterpolate, + Vp9FramerateConversionAlgorithmFrameformer, + } +} + +// Optional. Specify how the service determines the pixel aspect ratio (PAR) +// for this output. The default behavior, Follow source (INITIALIZE_FROM_SOURCE), +// uses the PAR from your input video for your output. To specify a different +// PAR in the console, choose any value other than Follow source. To specify +// a different PAR by editing the JSON job specification, choose SPECIFIED. +// When you choose SPECIFIED for this setting, you must also specify values +// for the parNumerator and parDenominator settings. +const ( + // Vp9ParControlInitializeFromSource is a Vp9ParControl enum value + Vp9ParControlInitializeFromSource = "INITIALIZE_FROM_SOURCE" + + // Vp9ParControlSpecified is a Vp9ParControl enum value + Vp9ParControlSpecified = "SPECIFIED" +) + +// Vp9ParControl_Values returns all elements of the Vp9ParControl enum +func Vp9ParControl_Values() []string { + return []string{ + Vp9ParControlInitializeFromSource, + Vp9ParControlSpecified, + } +} + +// Optional. Use Quality tuning level (qualityTuningLevel) to choose how you +// want to trade off encoding speed for output video quality. The default behavior +// is faster, lower quality, multi-pass encoding. +const ( + // Vp9QualityTuningLevelMultiPass is a Vp9QualityTuningLevel enum value + Vp9QualityTuningLevelMultiPass = "MULTI_PASS" + + // Vp9QualityTuningLevelMultiPassHq is a Vp9QualityTuningLevel enum value + Vp9QualityTuningLevelMultiPassHq = "MULTI_PASS_HQ" +) + +// Vp9QualityTuningLevel_Values returns all elements of the Vp9QualityTuningLevel enum +func Vp9QualityTuningLevel_Values() []string { + return []string{ + Vp9QualityTuningLevelMultiPass, + Vp9QualityTuningLevelMultiPassHq, + } +} + +// With the VP9 codec, you can use only the variable bitrate (VBR) rate control +// mode. +const ( + // Vp9RateControlModeVbr is a Vp9RateControlMode enum value + Vp9RateControlModeVbr = "VBR" +) + +// Vp9RateControlMode_Values returns all elements of the Vp9RateControlMode enum +func Vp9RateControlMode_Values() []string { + return []string{ + Vp9RateControlModeVbr, + } +} + +// Optional. Ignore this setting unless Nagra support directs you to specify +// a value. When you don't specify a value here, the Nagra NexGuard library +// uses its default value. +const ( + // WatermarkingStrengthLightest is a WatermarkingStrength enum value + WatermarkingStrengthLightest = "LIGHTEST" + + // WatermarkingStrengthLighter is a WatermarkingStrength enum value + WatermarkingStrengthLighter = "LIGHTER" + + // WatermarkingStrengthDefault is a WatermarkingStrength enum value + WatermarkingStrengthDefault = "DEFAULT" + + // WatermarkingStrengthStronger is a WatermarkingStrength enum value + WatermarkingStrengthStronger = "STRONGER" + + // WatermarkingStrengthStrongest is a WatermarkingStrength enum value + WatermarkingStrengthStrongest = "STRONGEST" +) + +// WatermarkingStrength_Values returns all elements of the WatermarkingStrength enum +func WatermarkingStrength_Values() []string { + return []string{ + WatermarkingStrengthLightest, + WatermarkingStrengthLighter, + WatermarkingStrengthDefault, + WatermarkingStrengthStronger, + WatermarkingStrengthStrongest, + } +} + // The service defaults to using RIFF for WAV outputs. If your output audio // is likely to exceed 4 GB in file size, or if you otherwise need the extended // support of the RF64 format, set your output WAV file format to RF64. @@ -22359,3 +28409,11 @@ const ( // WavFormatRf64 is a WavFormat enum value WavFormatRf64 = "RF64" ) + +// WavFormat_Values returns all elements of the WavFormat enum +func WavFormat_Values() []string { + return []string{ + WavFormatRiff, + WavFormatRf64, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go index f091a86d4..12694bc38 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go index 2fa18e29d..26cdadf29 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/medialive/api.go @@ -4,6 +4,8 @@ package medialive import ( "fmt" + "io" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" @@ -12,6 +14,282 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opBatchDelete = "BatchDelete" + +// BatchDeleteRequest generates a "aws/request.Request" representing the +// client's request for the BatchDelete operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchDelete for more information on using the BatchDelete +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchDeleteRequest method. +// req, resp := client.BatchDeleteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BatchDelete +func (c *MediaLive) BatchDeleteRequest(input *BatchDeleteInput) (req *request.Request, output *BatchDeleteOutput) { + op := &request.Operation{ + Name: opBatchDelete, + HTTPMethod: "POST", + HTTPPath: "/prod/batch/delete", + } + + if input == nil { + input = &BatchDeleteInput{} + } + + output = &BatchDeleteOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchDelete API operation for AWS Elemental MediaLive. +// +// Starts delete of resources. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation BatchDelete for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * NotFoundException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// * ConflictException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BatchDelete +func (c *MediaLive) BatchDelete(input *BatchDeleteInput) (*BatchDeleteOutput, error) { + req, out := c.BatchDeleteRequest(input) + return out, req.Send() +} + +// BatchDeleteWithContext is the same as BatchDelete with the addition of +// the ability to pass a context and additional request options. +// +// See BatchDelete for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) BatchDeleteWithContext(ctx aws.Context, input *BatchDeleteInput, opts ...request.Option) (*BatchDeleteOutput, error) { + req, out := c.BatchDeleteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchStart = "BatchStart" + +// BatchStartRequest generates a "aws/request.Request" representing the +// client's request for the BatchStart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchStart for more information on using the BatchStart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchStartRequest method. +// req, resp := client.BatchStartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BatchStart +func (c *MediaLive) BatchStartRequest(input *BatchStartInput) (req *request.Request, output *BatchStartOutput) { + op := &request.Operation{ + Name: opBatchStart, + HTTPMethod: "POST", + HTTPPath: "/prod/batch/start", + } + + if input == nil { + input = &BatchStartInput{} + } + + output = &BatchStartOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchStart API operation for AWS Elemental MediaLive. +// +// Starts existing resources +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation BatchStart for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * NotFoundException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// * ConflictException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BatchStart +func (c *MediaLive) BatchStart(input *BatchStartInput) (*BatchStartOutput, error) { + req, out := c.BatchStartRequest(input) + return out, req.Send() +} + +// BatchStartWithContext is the same as BatchStart with the addition of +// the ability to pass a context and additional request options. +// +// See BatchStart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) BatchStartWithContext(ctx aws.Context, input *BatchStartInput, opts ...request.Option) (*BatchStartOutput, error) { + req, out := c.BatchStartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opBatchStop = "BatchStop" + +// BatchStopRequest generates a "aws/request.Request" representing the +// client's request for the BatchStop operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchStop for more information on using the BatchStop +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchStopRequest method. +// req, resp := client.BatchStopRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BatchStop +func (c *MediaLive) BatchStopRequest(input *BatchStopInput) (req *request.Request, output *BatchStopOutput) { + op := &request.Operation{ + Name: opBatchStop, + HTTPMethod: "POST", + HTTPPath: "/prod/batch/stop", + } + + if input == nil { + input = &BatchStopInput{} + } + + output = &BatchStopOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchStop API operation for AWS Elemental MediaLive. +// +// Stops running resources +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation BatchStop for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * NotFoundException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// * ConflictException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/BatchStop +func (c *MediaLive) BatchStop(input *BatchStopInput) (*BatchStopOutput, error) { + req, out := c.BatchStopRequest(input) + return out, req.Send() +} + +// BatchStopWithContext is the same as BatchStop with the addition of +// the ability to pass a context and additional request options. +// +// See BatchStop for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) BatchStopWithContext(ctx aws.Context, input *BatchStopInput, opts ...request.Option) (*BatchStopOutput, error) { + req, out := c.BatchStopRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchUpdateSchedule = "BatchUpdateSchedule" // BatchUpdateScheduleRequest generates a "aws/request.Request" representing the @@ -1549,58 +1827,58 @@ func (c *MediaLive) DescribeInputWithContext(ctx aws.Context, input *DescribeInp return out, req.Send() } -const opDescribeInputSecurityGroup = "DescribeInputSecurityGroup" +const opDescribeInputDevice = "DescribeInputDevice" -// DescribeInputSecurityGroupRequest generates a "aws/request.Request" representing the -// client's request for the DescribeInputSecurityGroup operation. The "output" return +// DescribeInputDeviceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInputDevice operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeInputSecurityGroup for more information on using the DescribeInputSecurityGroup +// See DescribeInputDevice for more information on using the DescribeInputDevice // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeInputSecurityGroupRequest method. -// req, resp := client.DescribeInputSecurityGroupRequest(params) +// // Example sending a request using the DescribeInputDeviceRequest method. +// req, resp := client.DescribeInputDeviceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroup -func (c *MediaLive) DescribeInputSecurityGroupRequest(input *DescribeInputSecurityGroupInput) (req *request.Request, output *DescribeInputSecurityGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputDevice +func (c *MediaLive) DescribeInputDeviceRequest(input *DescribeInputDeviceInput) (req *request.Request, output *DescribeInputDeviceOutput) { op := &request.Operation{ - Name: opDescribeInputSecurityGroup, + Name: opDescribeInputDevice, HTTPMethod: "GET", - HTTPPath: "/prod/inputSecurityGroups/{inputSecurityGroupId}", + HTTPPath: "/prod/inputDevices/{inputDeviceId}", } if input == nil { - input = &DescribeInputSecurityGroupInput{} + input = &DescribeInputDeviceInput{} } - output = &DescribeInputSecurityGroupOutput{} + output = &DescribeInputDeviceOutput{} req = c.newRequest(op, input, output) return } -// DescribeInputSecurityGroup API operation for AWS Elemental MediaLive. +// DescribeInputDevice API operation for AWS Elemental MediaLive. // -// Produces a summary of an Input Security Group +// Gets the details for the input device // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DescribeInputSecurityGroup for usage and error information. +// API operation DescribeInputDevice for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1617,80 +1895,80 @@ func (c *MediaLive) DescribeInputSecurityGroupRequest(input *DescribeInputSecuri // // * TooManyRequestsException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroup -func (c *MediaLive) DescribeInputSecurityGroup(input *DescribeInputSecurityGroupInput) (*DescribeInputSecurityGroupOutput, error) { - req, out := c.DescribeInputSecurityGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputDevice +func (c *MediaLive) DescribeInputDevice(input *DescribeInputDeviceInput) (*DescribeInputDeviceOutput, error) { + req, out := c.DescribeInputDeviceRequest(input) return out, req.Send() } -// DescribeInputSecurityGroupWithContext is the same as DescribeInputSecurityGroup with the addition of +// DescribeInputDeviceWithContext is the same as DescribeInputDevice with the addition of // the ability to pass a context and additional request options. // -// See DescribeInputSecurityGroup for details on how to use this API operation. +// See DescribeInputDevice for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaLive) DescribeInputSecurityGroupWithContext(ctx aws.Context, input *DescribeInputSecurityGroupInput, opts ...request.Option) (*DescribeInputSecurityGroupOutput, error) { - req, out := c.DescribeInputSecurityGroupRequest(input) +func (c *MediaLive) DescribeInputDeviceWithContext(ctx aws.Context, input *DescribeInputDeviceInput, opts ...request.Option) (*DescribeInputDeviceOutput, error) { + req, out := c.DescribeInputDeviceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeMultiplex = "DescribeMultiplex" +const opDescribeInputDeviceThumbnail = "DescribeInputDeviceThumbnail" -// DescribeMultiplexRequest generates a "aws/request.Request" representing the -// client's request for the DescribeMultiplex operation. The "output" return +// DescribeInputDeviceThumbnailRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInputDeviceThumbnail operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeMultiplex for more information on using the DescribeMultiplex +// See DescribeInputDeviceThumbnail for more information on using the DescribeInputDeviceThumbnail // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeMultiplexRequest method. -// req, resp := client.DescribeMultiplexRequest(params) +// // Example sending a request using the DescribeInputDeviceThumbnailRequest method. +// req, resp := client.DescribeInputDeviceThumbnailRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeMultiplex -func (c *MediaLive) DescribeMultiplexRequest(input *DescribeMultiplexInput) (req *request.Request, output *DescribeMultiplexOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputDeviceThumbnail +func (c *MediaLive) DescribeInputDeviceThumbnailRequest(input *DescribeInputDeviceThumbnailInput) (req *request.Request, output *DescribeInputDeviceThumbnailOutput) { op := &request.Operation{ - Name: opDescribeMultiplex, + Name: opDescribeInputDeviceThumbnail, HTTPMethod: "GET", - HTTPPath: "/prod/multiplexes/{multiplexId}", + HTTPPath: "/prod/inputDevices/{inputDeviceId}/thumbnailData", } if input == nil { - input = &DescribeMultiplexInput{} + input = &DescribeInputDeviceThumbnailInput{} } - output = &DescribeMultiplexOutput{} + output = &DescribeInputDeviceThumbnailOutput{} req = c.newRequest(op, input, output) return } -// DescribeMultiplex API operation for AWS Elemental MediaLive. +// DescribeInputDeviceThumbnail API operation for AWS Elemental MediaLive. // -// Gets details about a multiplex. +// Get the latest thumbnail data for the input device. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaLive's -// API operation DescribeMultiplex for usage and error information. +// API operation DescribeInputDeviceThumbnail for usage and error information. // // Returned Error Types: // * BadRequestException @@ -1707,29 +1985,209 @@ func (c *MediaLive) DescribeMultiplexRequest(input *DescribeMultiplexInput) (req // // * TooManyRequestsException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeMultiplex -func (c *MediaLive) DescribeMultiplex(input *DescribeMultiplexInput) (*DescribeMultiplexOutput, error) { - req, out := c.DescribeMultiplexRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputDeviceThumbnail +func (c *MediaLive) DescribeInputDeviceThumbnail(input *DescribeInputDeviceThumbnailInput) (*DescribeInputDeviceThumbnailOutput, error) { + req, out := c.DescribeInputDeviceThumbnailRequest(input) return out, req.Send() } -// DescribeMultiplexWithContext is the same as DescribeMultiplex with the addition of +// DescribeInputDeviceThumbnailWithContext is the same as DescribeInputDeviceThumbnail with the addition of // the ability to pass a context and additional request options. // -// See DescribeMultiplex for details on how to use this API operation. +// See DescribeInputDeviceThumbnail for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaLive) DescribeMultiplexWithContext(ctx aws.Context, input *DescribeMultiplexInput, opts ...request.Option) (*DescribeMultiplexOutput, error) { - req, out := c.DescribeMultiplexRequest(input) +func (c *MediaLive) DescribeInputDeviceThumbnailWithContext(ctx aws.Context, input *DescribeInputDeviceThumbnailInput, opts ...request.Option) (*DescribeInputDeviceThumbnailOutput, error) { + req, out := c.DescribeInputDeviceThumbnailRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeMultiplexProgram = "DescribeMultiplexProgram" +const opDescribeInputSecurityGroup = "DescribeInputSecurityGroup" + +// DescribeInputSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInputSecurityGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInputSecurityGroup for more information on using the DescribeInputSecurityGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInputSecurityGroupRequest method. +// req, resp := client.DescribeInputSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroup +func (c *MediaLive) DescribeInputSecurityGroupRequest(input *DescribeInputSecurityGroupInput) (req *request.Request, output *DescribeInputSecurityGroupOutput) { + op := &request.Operation{ + Name: opDescribeInputSecurityGroup, + HTTPMethod: "GET", + HTTPPath: "/prod/inputSecurityGroups/{inputSecurityGroupId}", + } + + if input == nil { + input = &DescribeInputSecurityGroupInput{} + } + + output = &DescribeInputSecurityGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInputSecurityGroup API operation for AWS Elemental MediaLive. +// +// Produces a summary of an Input Security Group +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation DescribeInputSecurityGroup for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * NotFoundException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeInputSecurityGroup +func (c *MediaLive) DescribeInputSecurityGroup(input *DescribeInputSecurityGroupInput) (*DescribeInputSecurityGroupOutput, error) { + req, out := c.DescribeInputSecurityGroupRequest(input) + return out, req.Send() +} + +// DescribeInputSecurityGroupWithContext is the same as DescribeInputSecurityGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInputSecurityGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) DescribeInputSecurityGroupWithContext(ctx aws.Context, input *DescribeInputSecurityGroupInput, opts ...request.Option) (*DescribeInputSecurityGroupOutput, error) { + req, out := c.DescribeInputSecurityGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeMultiplex = "DescribeMultiplex" + +// DescribeMultiplexRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMultiplex operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeMultiplex for more information on using the DescribeMultiplex +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeMultiplexRequest method. +// req, resp := client.DescribeMultiplexRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeMultiplex +func (c *MediaLive) DescribeMultiplexRequest(input *DescribeMultiplexInput) (req *request.Request, output *DescribeMultiplexOutput) { + op := &request.Operation{ + Name: opDescribeMultiplex, + HTTPMethod: "GET", + HTTPPath: "/prod/multiplexes/{multiplexId}", + } + + if input == nil { + input = &DescribeMultiplexInput{} + } + + output = &DescribeMultiplexOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeMultiplex API operation for AWS Elemental MediaLive. +// +// Gets details about a multiplex. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation DescribeMultiplex for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * NotFoundException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/DescribeMultiplex +func (c *MediaLive) DescribeMultiplex(input *DescribeMultiplexInput) (*DescribeMultiplexOutput, error) { + req, out := c.DescribeMultiplexRequest(input) + return out, req.Send() +} + +// DescribeMultiplexWithContext is the same as DescribeMultiplex with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeMultiplex for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) DescribeMultiplexWithContext(ctx aws.Context, input *DescribeMultiplexInput, opts ...request.Option) (*DescribeMultiplexOutput, error) { + req, out := c.DescribeMultiplexRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeMultiplexProgram = "DescribeMultiplexProgram" // DescribeMultiplexProgramRequest generates a "aws/request.Request" representing the // client's request for the DescribeMultiplexProgram operation. The "output" return @@ -2293,37 +2751,37 @@ func (c *MediaLive) ListChannelsPagesWithContext(ctx aws.Context, input *ListCha return p.Err() } -const opListInputSecurityGroups = "ListInputSecurityGroups" +const opListInputDevices = "ListInputDevices" -// ListInputSecurityGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListInputSecurityGroups operation. The "output" return +// ListInputDevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListInputDevices operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListInputSecurityGroups for more information on using the ListInputSecurityGroups +// See ListInputDevices for more information on using the ListInputDevices // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListInputSecurityGroupsRequest method. -// req, resp := client.ListInputSecurityGroupsRequest(params) +// // Example sending a request using the ListInputDevicesRequest method. +// req, resp := client.ListInputDevicesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups -func (c *MediaLive) ListInputSecurityGroupsRequest(input *ListInputSecurityGroupsInput) (req *request.Request, output *ListInputSecurityGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputDevices +func (c *MediaLive) ListInputDevicesRequest(input *ListInputDevicesInput) (req *request.Request, output *ListInputDevicesOutput) { op := &request.Operation{ - Name: opListInputSecurityGroups, + Name: opListInputDevices, HTTPMethod: "GET", - HTTPPath: "/prod/inputSecurityGroups", + HTTPPath: "/prod/inputDevices", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -2333,24 +2791,24 @@ func (c *MediaLive) ListInputSecurityGroupsRequest(input *ListInputSecurityGroup } if input == nil { - input = &ListInputSecurityGroupsInput{} + input = &ListInputDevicesInput{} } - output = &ListInputSecurityGroupsOutput{} + output = &ListInputDevicesOutput{} req = c.newRequest(op, input, output) return } -// ListInputSecurityGroups API operation for AWS Elemental MediaLive. +// ListInputDevices API operation for AWS Elemental MediaLive. // -// Produces a list of Input Security Groups for an account +// List input devices // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaLive's -// API operation ListInputSecurityGroups for usage and error information. +// API operation ListInputDevices for usage and error information. // // Returned Error Types: // * BadRequestException @@ -2365,65 +2823,65 @@ func (c *MediaLive) ListInputSecurityGroupsRequest(input *ListInputSecurityGroup // // * TooManyRequestsException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups -func (c *MediaLive) ListInputSecurityGroups(input *ListInputSecurityGroupsInput) (*ListInputSecurityGroupsOutput, error) { - req, out := c.ListInputSecurityGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputDevices +func (c *MediaLive) ListInputDevices(input *ListInputDevicesInput) (*ListInputDevicesOutput, error) { + req, out := c.ListInputDevicesRequest(input) return out, req.Send() } -// ListInputSecurityGroupsWithContext is the same as ListInputSecurityGroups with the addition of +// ListInputDevicesWithContext is the same as ListInputDevices with the addition of // the ability to pass a context and additional request options. // -// See ListInputSecurityGroups for details on how to use this API operation. +// See ListInputDevices for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaLive) ListInputSecurityGroupsWithContext(ctx aws.Context, input *ListInputSecurityGroupsInput, opts ...request.Option) (*ListInputSecurityGroupsOutput, error) { - req, out := c.ListInputSecurityGroupsRequest(input) +func (c *MediaLive) ListInputDevicesWithContext(ctx aws.Context, input *ListInputDevicesInput, opts ...request.Option) (*ListInputDevicesOutput, error) { + req, out := c.ListInputDevicesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListInputSecurityGroupsPages iterates over the pages of a ListInputSecurityGroups operation, +// ListInputDevicesPages iterates over the pages of a ListInputDevices operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListInputSecurityGroups method for more information on how to use this operation. +// See ListInputDevices method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListInputSecurityGroups operation. +// // Example iterating over at most 3 pages of a ListInputDevices operation. // pageNum := 0 -// err := client.ListInputSecurityGroupsPages(params, -// func(page *medialive.ListInputSecurityGroupsOutput, lastPage bool) bool { +// err := client.ListInputDevicesPages(params, +// func(page *medialive.ListInputDevicesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *MediaLive) ListInputSecurityGroupsPages(input *ListInputSecurityGroupsInput, fn func(*ListInputSecurityGroupsOutput, bool) bool) error { - return c.ListInputSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *MediaLive) ListInputDevicesPages(input *ListInputDevicesInput, fn func(*ListInputDevicesOutput, bool) bool) error { + return c.ListInputDevicesPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListInputSecurityGroupsPagesWithContext same as ListInputSecurityGroupsPages except +// ListInputDevicesPagesWithContext same as ListInputDevicesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaLive) ListInputSecurityGroupsPagesWithContext(ctx aws.Context, input *ListInputSecurityGroupsInput, fn func(*ListInputSecurityGroupsOutput, bool) bool, opts ...request.Option) error { +func (c *MediaLive) ListInputDevicesPagesWithContext(ctx aws.Context, input *ListInputDevicesInput, fn func(*ListInputDevicesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListInputSecurityGroupsInput + var inCpy *ListInputDevicesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListInputSecurityGroupsRequest(inCpy) + req, _ := c.ListInputDevicesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -2431,7 +2889,7 @@ func (c *MediaLive) ListInputSecurityGroupsPagesWithContext(ctx aws.Context, inp } for p.Next() { - if !fn(p.Page().(*ListInputSecurityGroupsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListInputDevicesOutput), !p.HasNextPage()) { break } } @@ -2439,37 +2897,37 @@ func (c *MediaLive) ListInputSecurityGroupsPagesWithContext(ctx aws.Context, inp return p.Err() } -const opListInputs = "ListInputs" +const opListInputSecurityGroups = "ListInputSecurityGroups" -// ListInputsRequest generates a "aws/request.Request" representing the -// client's request for the ListInputs operation. The "output" return +// ListInputSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListInputSecurityGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListInputs for more information on using the ListInputs +// See ListInputSecurityGroups for more information on using the ListInputSecurityGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListInputsRequest method. -// req, resp := client.ListInputsRequest(params) +// // Example sending a request using the ListInputSecurityGroupsRequest method. +// req, resp := client.ListInputSecurityGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs -func (c *MediaLive) ListInputsRequest(input *ListInputsInput) (req *request.Request, output *ListInputsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups +func (c *MediaLive) ListInputSecurityGroupsRequest(input *ListInputSecurityGroupsInput) (req *request.Request, output *ListInputSecurityGroupsOutput) { op := &request.Operation{ - Name: opListInputs, + Name: opListInputSecurityGroups, HTTPMethod: "GET", - HTTPPath: "/prod/inputs", + HTTPPath: "/prod/inputSecurityGroups", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -2479,24 +2937,24 @@ func (c *MediaLive) ListInputsRequest(input *ListInputsInput) (req *request.Requ } if input == nil { - input = &ListInputsInput{} + input = &ListInputSecurityGroupsInput{} } - output = &ListInputsOutput{} + output = &ListInputSecurityGroupsOutput{} req = c.newRequest(op, input, output) return } -// ListInputs API operation for AWS Elemental MediaLive. +// ListInputSecurityGroups API operation for AWS Elemental MediaLive. // -// Produces list of inputs that have been created +// Produces a list of Input Security Groups for an account // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Elemental MediaLive's -// API operation ListInputs for usage and error information. +// API operation ListInputSecurityGroups for usage and error information. // // Returned Error Types: // * BadRequestException @@ -2511,23 +2969,169 @@ func (c *MediaLive) ListInputsRequest(input *ListInputsInput) (req *request.Requ // // * TooManyRequestsException // -// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs -func (c *MediaLive) ListInputs(input *ListInputsInput) (*ListInputsOutput, error) { - req, out := c.ListInputsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputSecurityGroups +func (c *MediaLive) ListInputSecurityGroups(input *ListInputSecurityGroupsInput) (*ListInputSecurityGroupsOutput, error) { + req, out := c.ListInputSecurityGroupsRequest(input) return out, req.Send() } -// ListInputsWithContext is the same as ListInputs with the addition of +// ListInputSecurityGroupsWithContext is the same as ListInputSecurityGroups with the addition of // the ability to pass a context and additional request options. // -// See ListInputs for details on how to use this API operation. +// See ListInputSecurityGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *MediaLive) ListInputsWithContext(ctx aws.Context, input *ListInputsInput, opts ...request.Option) (*ListInputsOutput, error) { - req, out := c.ListInputsRequest(input) +func (c *MediaLive) ListInputSecurityGroupsWithContext(ctx aws.Context, input *ListInputSecurityGroupsInput, opts ...request.Option) (*ListInputSecurityGroupsOutput, error) { + req, out := c.ListInputSecurityGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListInputSecurityGroupsPages iterates over the pages of a ListInputSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInputSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInputSecurityGroups operation. +// pageNum := 0 +// err := client.ListInputSecurityGroupsPages(params, +// func(page *medialive.ListInputSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MediaLive) ListInputSecurityGroupsPages(input *ListInputSecurityGroupsInput, fn func(*ListInputSecurityGroupsOutput, bool) bool) error { + return c.ListInputSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListInputSecurityGroupsPagesWithContext same as ListInputSecurityGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) ListInputSecurityGroupsPagesWithContext(ctx aws.Context, input *ListInputSecurityGroupsInput, fn func(*ListInputSecurityGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListInputSecurityGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListInputSecurityGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListInputSecurityGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListInputs = "ListInputs" + +// ListInputsRequest generates a "aws/request.Request" representing the +// client's request for the ListInputs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInputs for more information on using the ListInputs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInputsRequest method. +// req, resp := client.ListInputsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs +func (c *MediaLive) ListInputsRequest(input *ListInputsInput) (req *request.Request, output *ListInputsOutput) { + op := &request.Operation{ + Name: opListInputs, + HTTPMethod: "GET", + HTTPPath: "/prod/inputs", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListInputsInput{} + } + + output = &ListInputsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInputs API operation for AWS Elemental MediaLive. +// +// Produces list of inputs that have been created +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation ListInputs for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/ListInputs +func (c *MediaLive) ListInputs(input *ListInputsInput) (*ListInputsOutput, error) { + req, out := c.ListInputsRequest(input) + return out, req.Send() +} + +// ListInputsWithContext is the same as ListInputs with the addition of +// the ability to pass a context and additional request options. +// +// See ListInputs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) ListInputsWithContext(ctx aws.Context, input *ListInputsInput, opts ...request.Option) (*ListInputsOutput, error) { + req, out := c.ListInputsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -3991,6 +4595,98 @@ func (c *MediaLive) UpdateInputWithContext(ctx aws.Context, input *UpdateInputIn return out, req.Send() } +const opUpdateInputDevice = "UpdateInputDevice" + +// UpdateInputDeviceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateInputDevice operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateInputDevice for more information on using the UpdateInputDevice +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateInputDeviceRequest method. +// req, resp := client.UpdateInputDeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/UpdateInputDevice +func (c *MediaLive) UpdateInputDeviceRequest(input *UpdateInputDeviceInput) (req *request.Request, output *UpdateInputDeviceOutput) { + op := &request.Operation{ + Name: opUpdateInputDevice, + HTTPMethod: "PUT", + HTTPPath: "/prod/inputDevices/{inputDeviceId}", + } + + if input == nil { + input = &UpdateInputDeviceInput{} + } + + output = &UpdateInputDeviceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateInputDevice API operation for AWS Elemental MediaLive. +// +// Updates the parameters for the input device. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaLive's +// API operation UpdateInputDevice for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// +// * UnprocessableEntityException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * BadGatewayException +// +// * NotFoundException +// +// * GatewayTimeoutException +// +// * TooManyRequestsException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/medialive-2017-10-14/UpdateInputDevice +func (c *MediaLive) UpdateInputDevice(input *UpdateInputDeviceInput) (*UpdateInputDeviceOutput, error) { + req, out := c.UpdateInputDeviceRequest(input) + return out, req.Send() +} + +// UpdateInputDeviceWithContext is the same as UpdateInputDevice with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateInputDevice for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) UpdateInputDeviceWithContext(ctx aws.Context, input *UpdateInputDeviceInput, opts ...request.Option) (*UpdateInputDeviceOutput, error) { + req, out := c.UpdateInputDeviceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateInputSecurityGroup = "UpdateInputSecurityGroup" // UpdateInputSecurityGroupRequest generates a "aws/request.Request" representing the @@ -4562,12 +5258,57 @@ func (s *Ac3Settings) SetMetadataControl(v string) *Ac3Settings { return s } +// Ancillary Source Settings +type AncillarySourceSettings struct { + _ struct{} `type:"structure"` + + // Specifies the number (1 to 4) of the captions channel you want to extract + // from the ancillary captions. If you plan to convert the ancillary captions + // to another format, complete this field. If you plan to choose Embedded as + // the captions destination in the output (to pass through all the channels + // in the ancillary captions), leave this field blank because MediaLive ignores + // the field. + SourceAncillaryChannelNumber *int64 `locationName:"sourceAncillaryChannelNumber" min:"1" type:"integer"` +} + +// String returns the string representation +func (s AncillarySourceSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AncillarySourceSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AncillarySourceSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AncillarySourceSettings"} + if s.SourceAncillaryChannelNumber != nil && *s.SourceAncillaryChannelNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("SourceAncillaryChannelNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSourceAncillaryChannelNumber sets the SourceAncillaryChannelNumber field's value. +func (s *AncillarySourceSettings) SetSourceAncillaryChannelNumber(v int64) *AncillarySourceSettings { + s.SourceAncillaryChannelNumber = &v + return s +} + // Archive Container Settings type ArchiveContainerSettings struct { _ struct{} `type:"structure"` // M2ts Settings M2tsSettings *M2tsSettings `locationName:"m2tsSettings" type:"structure"` + + // Raw Settings + RawSettings *RawSettings `locationName:"rawSettings" type:"structure"` } // String returns the string representation @@ -4601,6 +5342,12 @@ func (s *ArchiveContainerSettings) SetM2tsSettings(v *M2tsSettings) *ArchiveCont return s } +// SetRawSettings sets the RawSettings field's value. +func (s *ArchiveContainerSettings) SetRawSettings(v *RawSettings) *ArchiveContainerSettings { + s.RawSettings = v + return s +} + // Archive Group Settings type ArchiveGroupSettings struct { _ struct{} `type:"structure"` @@ -4829,6 +5576,9 @@ type AudioCodecSettings struct { // Pass Through Settings PassThroughSettings *PassThroughSettings `locationName:"passThroughSettings" type:"structure"` + + // Wav Settings + WavSettings *WavSettings `locationName:"wavSettings" type:"structure"` } // String returns the string representation @@ -4891,6 +5641,12 @@ func (s *AudioCodecSettings) SetPassThroughSettings(v *PassThroughSettings) *Aud return s } +// SetWavSettings sets the WavSettings field's value. +func (s *AudioCodecSettings) SetWavSettings(v *WavSettings) *AudioCodecSettings { + s.WavSettings = v + return s +} + // Audio Description type AudioDescription struct { _ struct{} `type:"structure"` @@ -4918,9 +5674,10 @@ type AudioDescription struct { // Audio codec settings. CodecSettings *AudioCodecSettings `locationName:"codecSettings" type:"structure"` - // Indicates the language of the audio output track. Only used if languageControlMode - // is useConfigured, or there is no ISO 639 language code specified in the input. - LanguageCode *string `locationName:"languageCode" min:"3" type:"string"` + // RFC 5646 language code representing the language of the audio output track. + // Only used if languageControlMode is useConfigured, or there is no ISO 639 + // language code specified in the input. + LanguageCode *string `locationName:"languageCode" min:"1" type:"string"` // Choosing followInput will cause the ISO 639 language code of the output to // follow the ISO 639 language code of the input. The languageCode will be used @@ -4960,8 +5717,8 @@ func (s *AudioDescription) Validate() error { if s.AudioSelectorName == nil { invalidParams.Add(request.NewErrParamRequired("AudioSelectorName")) } - if s.LanguageCode != nil && len(*s.LanguageCode) < 3 { - invalidParams.Add(request.NewErrParamMinLen("LanguageCode", 3)) + if s.LanguageCode != nil && len(*s.LanguageCode) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LanguageCode", 1)) } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) @@ -5328,6 +6085,9 @@ type AudioSelectorSettings struct { // Audio Pid Selection AudioPidSelection *AudioPidSelection `locationName:"audioPidSelection" type:"structure"` + + // Audio Track Selection + AudioTrackSelection *AudioTrackSelection `locationName:"audioTrackSelection" type:"structure"` } // String returns the string representation @@ -5353,6 +6113,11 @@ func (s *AudioSelectorSettings) Validate() error { invalidParams.AddNested("AudioPidSelection", err.(request.ErrInvalidParams)) } } + if s.AudioTrackSelection != nil { + if err := s.AudioTrackSelection.Validate(); err != nil { + invalidParams.AddNested("AudioTrackSelection", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5372,7 +6137,153 @@ func (s *AudioSelectorSettings) SetAudioPidSelection(v *AudioPidSelection) *Audi return s } -// Avail Blanking +// SetAudioTrackSelection sets the AudioTrackSelection field's value. +func (s *AudioSelectorSettings) SetAudioTrackSelection(v *AudioTrackSelection) *AudioSelectorSettings { + s.AudioTrackSelection = v + return s +} + +// Audio Track +type AudioTrack struct { + _ struct{} `type:"structure"` + + // 1-based integer value that maps to a specific audio track + // + // Track is a required field + Track *int64 `locationName:"track" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AudioTrack) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioTrack) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AudioTrack) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AudioTrack"} + if s.Track == nil { + invalidParams.Add(request.NewErrParamRequired("Track")) + } + if s.Track != nil && *s.Track < 1 { + invalidParams.Add(request.NewErrParamMinValue("Track", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTrack sets the Track field's value. +func (s *AudioTrack) SetTrack(v int64) *AudioTrack { + s.Track = &v + return s +} + +// Audio Track Selection +type AudioTrackSelection struct { + _ struct{} `type:"structure"` + + // Selects one or more unique audio tracks from within a source. + // + // Tracks is a required field + Tracks []*AudioTrack `locationName:"tracks" type:"list" required:"true"` +} + +// String returns the string representation +func (s AudioTrackSelection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AudioTrackSelection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AudioTrackSelection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AudioTrackSelection"} + if s.Tracks == nil { + invalidParams.Add(request.NewErrParamRequired("Tracks")) + } + if s.Tracks != nil { + for i, v := range s.Tracks { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tracks", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTracks sets the Tracks field's value. +func (s *AudioTrackSelection) SetTracks(v []*AudioTrack) *AudioTrackSelection { + s.Tracks = v + return s +} + +// The settings for Automatic Input Failover. +type AutomaticInputFailoverSettings struct { + _ struct{} `type:"structure"` + + // Input preference when deciding which input to make active when a previously + // failed input has recovered. + InputPreference *string `locationName:"inputPreference" type:"string" enum:"InputPreference"` + + // The input ID of the secondary input in the automatic input failover pair. + // + // SecondaryInputId is a required field + SecondaryInputId *string `locationName:"secondaryInputId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AutomaticInputFailoverSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomaticInputFailoverSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutomaticInputFailoverSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutomaticInputFailoverSettings"} + if s.SecondaryInputId == nil { + invalidParams.Add(request.NewErrParamRequired("SecondaryInputId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputPreference sets the InputPreference field's value. +func (s *AutomaticInputFailoverSettings) SetInputPreference(v string) *AutomaticInputFailoverSettings { + s.InputPreference = &v + return s +} + +// SetSecondaryInputId sets the SecondaryInputId field's value. +func (s *AutomaticInputFailoverSettings) SetSecondaryInputId(v string) *AutomaticInputFailoverSettings { + s.SecondaryInputId = &v + return s +} + +// Avail Blanking type AvailBlanking struct { _ struct{} `type:"structure"` @@ -5515,8 +6426,8 @@ func (s *AvailSettings) SetScte35TimeSignalApos(v *Scte35TimeSignalApos) *AvailS } type BadGatewayException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5533,17 +6444,17 @@ func (s BadGatewayException) GoString() string { func newErrorBadGatewayException(v protocol.ResponseMetadata) error { return &BadGatewayException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadGatewayException) Code() string { +func (s *BadGatewayException) Code() string { return "BadGatewayException" } // Message returns the exception's message. -func (s BadGatewayException) Message() string { +func (s *BadGatewayException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5551,27 +6462,27 @@ func (s BadGatewayException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadGatewayException) OrigErr() error { +func (s *BadGatewayException) OrigErr() error { return nil } -func (s BadGatewayException) Error() string { +func (s *BadGatewayException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadGatewayException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadGatewayException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadGatewayException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadGatewayException) RequestID() string { + return s.RespMetadata.RequestID } type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5588,17 +6499,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5606,22 +6517,149 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type BatchDeleteInput struct { + _ struct{} `type:"structure"` + + ChannelIds []*string `locationName:"channelIds" type:"list"` + + InputIds []*string `locationName:"inputIds" type:"list"` + + InputSecurityGroupIds []*string `locationName:"inputSecurityGroupIds" type:"list"` + + MultiplexIds []*string `locationName:"multiplexIds" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteInput) GoString() string { + return s.String() +} + +// SetChannelIds sets the ChannelIds field's value. +func (s *BatchDeleteInput) SetChannelIds(v []*string) *BatchDeleteInput { + s.ChannelIds = v + return s +} + +// SetInputIds sets the InputIds field's value. +func (s *BatchDeleteInput) SetInputIds(v []*string) *BatchDeleteInput { + s.InputIds = v + return s +} + +// SetInputSecurityGroupIds sets the InputSecurityGroupIds field's value. +func (s *BatchDeleteInput) SetInputSecurityGroupIds(v []*string) *BatchDeleteInput { + s.InputSecurityGroupIds = v + return s +} + +// SetMultiplexIds sets the MultiplexIds field's value. +func (s *BatchDeleteInput) SetMultiplexIds(v []*string) *BatchDeleteInput { + s.MultiplexIds = v + return s +} + +type BatchDeleteOutput struct { + _ struct{} `type:"structure"` + + Failed []*BatchFailedResultModel `locationName:"failed" type:"list"` + + Successful []*BatchSuccessfulResultModel `locationName:"successful" type:"list"` +} + +// String returns the string representation +func (s BatchDeleteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteOutput) GoString() string { + return s.String() +} + +// SetFailed sets the Failed field's value. +func (s *BatchDeleteOutput) SetFailed(v []*BatchFailedResultModel) *BatchDeleteOutput { + s.Failed = v + return s +} + +// SetSuccessful sets the Successful field's value. +func (s *BatchDeleteOutput) SetSuccessful(v []*BatchSuccessfulResultModel) *BatchDeleteOutput { + s.Successful = v + return s +} + +// Details from a failed operation +type BatchFailedResultModel struct { + _ struct{} `type:"structure"` + + // ARN of the resource + Arn *string `locationName:"arn" type:"string"` + + // Error code for the failed operation + Code *string `locationName:"code" type:"string"` + + // ID of the resource + Id *string `locationName:"id" type:"string"` + + // Error message for the failed operation + Message *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s BatchFailedResultModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchFailedResultModel) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *BatchFailedResultModel) SetArn(v string) *BatchFailedResultModel { + s.Arn = &v + return s +} + +// SetCode sets the Code field's value. +func (s *BatchFailedResultModel) SetCode(v string) *BatchFailedResultModel { + s.Code = &v + return s +} + +// SetId sets the Id field's value. +func (s *BatchFailedResultModel) SetId(v string) *BatchFailedResultModel { + s.Id = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BatchFailedResultModel) SetMessage(v string) *BatchFailedResultModel { + s.Message = &v + return s } // A list of schedule actions to create (in a request) or that have been created @@ -5765,6 +6803,168 @@ func (s *BatchScheduleActionDeleteResult) SetScheduleActions(v []*ScheduleAction return s } +type BatchStartInput struct { + _ struct{} `type:"structure"` + + ChannelIds []*string `locationName:"channelIds" type:"list"` + + MultiplexIds []*string `locationName:"multiplexIds" type:"list"` +} + +// String returns the string representation +func (s BatchStartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStartInput) GoString() string { + return s.String() +} + +// SetChannelIds sets the ChannelIds field's value. +func (s *BatchStartInput) SetChannelIds(v []*string) *BatchStartInput { + s.ChannelIds = v + return s +} + +// SetMultiplexIds sets the MultiplexIds field's value. +func (s *BatchStartInput) SetMultiplexIds(v []*string) *BatchStartInput { + s.MultiplexIds = v + return s +} + +type BatchStartOutput struct { + _ struct{} `type:"structure"` + + Failed []*BatchFailedResultModel `locationName:"failed" type:"list"` + + Successful []*BatchSuccessfulResultModel `locationName:"successful" type:"list"` +} + +// String returns the string representation +func (s BatchStartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStartOutput) GoString() string { + return s.String() +} + +// SetFailed sets the Failed field's value. +func (s *BatchStartOutput) SetFailed(v []*BatchFailedResultModel) *BatchStartOutput { + s.Failed = v + return s +} + +// SetSuccessful sets the Successful field's value. +func (s *BatchStartOutput) SetSuccessful(v []*BatchSuccessfulResultModel) *BatchStartOutput { + s.Successful = v + return s +} + +type BatchStopInput struct { + _ struct{} `type:"structure"` + + ChannelIds []*string `locationName:"channelIds" type:"list"` + + MultiplexIds []*string `locationName:"multiplexIds" type:"list"` +} + +// String returns the string representation +func (s BatchStopInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopInput) GoString() string { + return s.String() +} + +// SetChannelIds sets the ChannelIds field's value. +func (s *BatchStopInput) SetChannelIds(v []*string) *BatchStopInput { + s.ChannelIds = v + return s +} + +// SetMultiplexIds sets the MultiplexIds field's value. +func (s *BatchStopInput) SetMultiplexIds(v []*string) *BatchStopInput { + s.MultiplexIds = v + return s +} + +type BatchStopOutput struct { + _ struct{} `type:"structure"` + + Failed []*BatchFailedResultModel `locationName:"failed" type:"list"` + + Successful []*BatchSuccessfulResultModel `locationName:"successful" type:"list"` +} + +// String returns the string representation +func (s BatchStopOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStopOutput) GoString() string { + return s.String() +} + +// SetFailed sets the Failed field's value. +func (s *BatchStopOutput) SetFailed(v []*BatchFailedResultModel) *BatchStopOutput { + s.Failed = v + return s +} + +// SetSuccessful sets the Successful field's value. +func (s *BatchStopOutput) SetSuccessful(v []*BatchSuccessfulResultModel) *BatchStopOutput { + s.Successful = v + return s +} + +// Details from a successful operation +type BatchSuccessfulResultModel struct { + _ struct{} `type:"structure"` + + // ARN of the resource + Arn *string `locationName:"arn" type:"string"` + + // ID of the resource + Id *string `locationName:"id" type:"string"` + + // Current state of the resource + State *string `locationName:"state" type:"string"` +} + +// String returns the string representation +func (s BatchSuccessfulResultModel) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchSuccessfulResultModel) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *BatchSuccessfulResultModel) SetArn(v string) *BatchSuccessfulResultModel { + s.Arn = &v + return s +} + +// SetId sets the Id field's value. +func (s *BatchSuccessfulResultModel) SetId(v string) *BatchSuccessfulResultModel { + s.Id = &v + return s +} + +// SetState sets the State field's value. +func (s *BatchSuccessfulResultModel) SetState(v string) *BatchSuccessfulResultModel { + s.State = &v + return s +} + // A request to create actions (add actions to the schedule), delete actions // (remove actions from the schedule), or both create and delete actions. type BatchUpdateScheduleInput struct { @@ -6287,6 +7487,9 @@ type CaptionDestinationSettings struct { // Dvb Sub Destination Settings DvbSubDestinationSettings *DvbSubDestinationSettings `locationName:"dvbSubDestinationSettings" type:"structure"` + // Ebu Tt DDestination Settings + EbuTtDDestinationSettings *EbuTtDDestinationSettings `locationName:"ebuTtDDestinationSettings" type:"structure"` + // Embedded Destination Settings EmbeddedDestinationSettings *EmbeddedDestinationSettings `locationName:"embeddedDestinationSettings" type:"structure"` @@ -6363,6 +7566,12 @@ func (s *CaptionDestinationSettings) SetDvbSubDestinationSettings(v *DvbSubDesti return s } +// SetEbuTtDDestinationSettings sets the EbuTtDDestinationSettings field's value. +func (s *CaptionDestinationSettings) SetEbuTtDDestinationSettings(v *EbuTtDDestinationSettings) *CaptionDestinationSettings { + s.EbuTtDDestinationSettings = v + return s +} + // SetEmbeddedDestinationSettings sets the EmbeddedDestinationSettings field's value. func (s *CaptionDestinationSettings) SetEmbeddedDestinationSettings(v *EmbeddedDestinationSettings) *CaptionDestinationSettings { s.EmbeddedDestinationSettings = v @@ -6568,6 +7777,9 @@ func (s *CaptionSelector) SetSelectorSettings(v *CaptionSelectorSettings) *Capti type CaptionSelectorSettings struct { _ struct{} `type:"structure"` + // Ancillary Source Settings + AncillarySourceSettings *AncillarySourceSettings `locationName:"ancillarySourceSettings" type:"structure"` + // Arib Source Settings AribSourceSettings *AribSourceSettings `locationName:"aribSourceSettings" type:"structure"` @@ -6600,6 +7812,11 @@ func (s CaptionSelectorSettings) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CaptionSelectorSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CaptionSelectorSettings"} + if s.AncillarySourceSettings != nil { + if err := s.AncillarySourceSettings.Validate(); err != nil { + invalidParams.AddNested("AncillarySourceSettings", err.(request.ErrInvalidParams)) + } + } if s.DvbSubSourceSettings != nil { if err := s.DvbSubSourceSettings.Validate(); err != nil { invalidParams.AddNested("DvbSubSourceSettings", err.(request.ErrInvalidParams)) @@ -6627,6 +7844,12 @@ func (s *CaptionSelectorSettings) Validate() error { return nil } +// SetAncillarySourceSettings sets the AncillarySourceSettings field's value. +func (s *CaptionSelectorSettings) SetAncillarySourceSettings(v *AncillarySourceSettings) *CaptionSelectorSettings { + s.AncillarySourceSettings = v + return s +} + // SetAribSourceSettings sets the AribSourceSettings field's value. func (s *CaptionSelectorSettings) SetAribSourceSettings(v *AribSourceSettings) *CaptionSelectorSettings { s.AribSourceSettings = v @@ -6663,12 +7886,38 @@ func (s *CaptionSelectorSettings) SetTeletextSourceSettings(v *TeletextSourceSet return s } +type CdiInputSpecification struct { + _ struct{} `type:"structure"` + + // Maximum CDI input resolution + Resolution *string `locationName:"resolution" type:"string" enum:"CdiInputResolution"` +} + +// String returns the string representation +func (s CdiInputSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CdiInputSpecification) GoString() string { + return s.String() +} + +// SetResolution sets the Resolution field's value. +func (s *CdiInputSpecification) SetResolution(v string) *CdiInputSpecification { + s.Resolution = &v + return s +} + type Channel struct { _ struct{} `type:"structure"` // The unique arn of the channel. Arn *string `locationName:"arn" type:"string"` + // Specification of CDI inputs for this channel + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // The class for this channel. STANDARD for a channel with two pipelines or // SINGLE_PIPELINE for a channel with one pipeline. ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` @@ -6690,6 +7939,7 @@ type Channel struct { // List of input attachments for channel. InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` + // Specification of network and file inputs for this channel InputSpecification *InputSpecification `locationName:"inputSpecification" type:"structure"` // The log level being written to CloudWatch Logs. @@ -6729,6 +7979,12 @@ func (s *Channel) SetArn(v string) *Channel { return s } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *Channel) SetCdiInputSpecification(v *CdiInputSpecification) *Channel { + s.CdiInputSpecification = v + return s +} + // SetChannelClass sets the ChannelClass field's value. func (s *Channel) SetChannelClass(v string) *Channel { s.ChannelClass = &v @@ -6842,6 +8098,9 @@ type ChannelSummary struct { // The unique arn of the channel. Arn *string `locationName:"arn" type:"string"` + // Specification of CDI inputs for this channel + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // The class for this channel. STANDARD for a channel with two pipelines or // SINGLE_PIPELINE for a channel with one pipeline. ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` @@ -6860,6 +8119,7 @@ type ChannelSummary struct { // List of input attachments for channel. InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` + // Specification of network and file inputs for this channel InputSpecification *InputSpecification `locationName:"inputSpecification" type:"structure"` // The log level being written to CloudWatch Logs. @@ -6896,6 +8156,12 @@ func (s *ChannelSummary) SetArn(v string) *ChannelSummary { return s } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *ChannelSummary) SetCdiInputSpecification(v *CdiInputSpecification) *ChannelSummary { + s.CdiInputSpecification = v + return s +} + // SetChannelClass sets the ChannelClass field's value. func (s *ChannelSummary) SetChannelClass(v string) *ChannelSummary { s.ChannelClass = &v @@ -6984,8 +8250,8 @@ func (s ColorSpacePassthroughSettings) GoString() string { } type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7002,17 +8268,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7020,27 +8286,29 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } type CreateChannelInput struct { _ struct{} `type:"structure"` + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // A standard channel has two encoding pipelines and a single pipeline channel // only has one. ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` @@ -7113,6 +8381,12 @@ func (s *CreateChannelInput) Validate() error { return nil } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *CreateChannelInput) SetCdiInputSpecification(v *CdiInputSpecification) *CreateChannelInput { + s.CdiInputSpecification = v + return s +} + // SetChannelClass sets the ChannelClass field's value. func (s *CreateChannelInput) SetChannelClass(v string) *CreateChannelInput { s.ChannelClass = &v @@ -7206,6 +8480,8 @@ type CreateInputInput struct { Destinations []*InputDestinationRequest `locationName:"destinations" type:"list"` + InputDevices []*InputDeviceSettings `locationName:"inputDevices" type:"list"` + InputSecurityGroups []*string `locationName:"inputSecurityGroups" type:"list"` MediaConnectFlows []*MediaConnectFlowRequest `locationName:"mediaConnectFlows" type:"list"` @@ -7260,6 +8536,12 @@ func (s *CreateInputInput) SetDestinations(v []*InputDestinationRequest) *Create return s } +// SetInputDevices sets the InputDevices field's value. +func (s *CreateInputInput) SetInputDevices(v []*InputDeviceSettings) *CreateInputInput { + s.InputDevices = v + return s +} + // SetInputSecurityGroups sets the InputSecurityGroups field's value. func (s *CreateInputInput) SetInputSecurityGroups(v []*string) *CreateInputInput { s.InputSecurityGroups = v @@ -7701,6 +8983,8 @@ type DeleteChannelOutput struct { Arn *string `locationName:"arn" type:"string"` + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // A standard channel has two encoding pipelines and a single pipeline channel // only has one. ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` @@ -7750,6 +9034,12 @@ func (s *DeleteChannelOutput) SetArn(v string) *DeleteChannelOutput { return s } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *DeleteChannelOutput) SetCdiInputSpecification(v *CdiInputSpecification) *DeleteChannelOutput { + s.CdiInputSpecification = v + return s +} + // SetChannelClass sets the ChannelClass field's value. func (s *DeleteChannelOutput) SetChannelClass(v string) *DeleteChannelOutput { s.ChannelClass = &v @@ -8140,6 +9430,8 @@ type DeleteMultiplexProgramOutput struct { // Packet identifiers map for a given Multiplex program. PacketIdentifiersMap *MultiplexProgramPacketIdentifiersMap `locationName:"packetIdentifiersMap" type:"structure"` + PipelineDetails []*MultiplexProgramPipelineDetail `locationName:"pipelineDetails" type:"list"` + ProgramName *string `locationName:"programName" type:"string"` } @@ -8171,6 +9463,12 @@ func (s *DeleteMultiplexProgramOutput) SetPacketIdentifiersMap(v *MultiplexProgr return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *DeleteMultiplexProgramOutput) SetPipelineDetails(v []*MultiplexProgramPipelineDetail) *DeleteMultiplexProgramOutput { + s.PipelineDetails = v + return s +} + // SetProgramName sets the ProgramName field's value. func (s *DeleteMultiplexProgramOutput) SetProgramName(v string) *DeleteMultiplexProgramOutput { s.ProgramName = &v @@ -8491,36 +9789,221 @@ func (s DeleteTagsOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s DeleteTagsOutput) GoString() string { - return s.String() +// GoString returns the string representation +func (s DeleteTagsOutput) GoString() string { + return s.String() +} + +type DescribeChannelInput struct { + _ struct{} `type:"structure"` + + // ChannelId is a required field + ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeChannelInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChannelInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeChannelInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeChannelInput"} + if s.ChannelId == nil { + invalidParams.Add(request.NewErrParamRequired("ChannelId")) + } + if s.ChannelId != nil && len(*s.ChannelId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ChannelId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChannelId sets the ChannelId field's value. +func (s *DescribeChannelInput) SetChannelId(v string) *DescribeChannelInput { + s.ChannelId = &v + return s +} + +type DescribeChannelOutput struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + + // A standard channel has two encoding pipelines and a single pipeline channel + // only has one. + ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` + + Destinations []*OutputDestination `locationName:"destinations" type:"list"` + + EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` + + // Encoder Settings + EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` + + Id *string `locationName:"id" type:"string"` + + InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` + + InputSpecification *InputSpecification `locationName:"inputSpecification" type:"structure"` + + // The log level the user wants for their channel. + LogLevel *string `locationName:"logLevel" type:"string" enum:"LogLevel"` + + Name *string `locationName:"name" type:"string"` + + PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` + + PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` + + RoleArn *string `locationName:"roleArn" type:"string"` + + State *string `locationName:"state" type:"string" enum:"ChannelState"` + + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s DescribeChannelOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeChannelOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DescribeChannelOutput) SetArn(v string) *DescribeChannelOutput { + s.Arn = &v + return s +} + +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *DescribeChannelOutput) SetCdiInputSpecification(v *CdiInputSpecification) *DescribeChannelOutput { + s.CdiInputSpecification = v + return s +} + +// SetChannelClass sets the ChannelClass field's value. +func (s *DescribeChannelOutput) SetChannelClass(v string) *DescribeChannelOutput { + s.ChannelClass = &v + return s +} + +// SetDestinations sets the Destinations field's value. +func (s *DescribeChannelOutput) SetDestinations(v []*OutputDestination) *DescribeChannelOutput { + s.Destinations = v + return s +} + +// SetEgressEndpoints sets the EgressEndpoints field's value. +func (s *DescribeChannelOutput) SetEgressEndpoints(v []*ChannelEgressEndpoint) *DescribeChannelOutput { + s.EgressEndpoints = v + return s +} + +// SetEncoderSettings sets the EncoderSettings field's value. +func (s *DescribeChannelOutput) SetEncoderSettings(v *EncoderSettings) *DescribeChannelOutput { + s.EncoderSettings = v + return s +} + +// SetId sets the Id field's value. +func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { + s.Id = &v + return s +} + +// SetInputAttachments sets the InputAttachments field's value. +func (s *DescribeChannelOutput) SetInputAttachments(v []*InputAttachment) *DescribeChannelOutput { + s.InputAttachments = v + return s +} + +// SetInputSpecification sets the InputSpecification field's value. +func (s *DescribeChannelOutput) SetInputSpecification(v *InputSpecification) *DescribeChannelOutput { + s.InputSpecification = v + return s +} + +// SetLogLevel sets the LogLevel field's value. +func (s *DescribeChannelOutput) SetLogLevel(v string) *DescribeChannelOutput { + s.LogLevel = &v + return s +} + +// SetName sets the Name field's value. +func (s *DescribeChannelOutput) SetName(v string) *DescribeChannelOutput { + s.Name = &v + return s +} + +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *DescribeChannelOutput) SetPipelineDetails(v []*PipelineDetail) *DescribeChannelOutput { + s.PipelineDetails = v + return s +} + +// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. +func (s *DescribeChannelOutput) SetPipelinesRunningCount(v int64) *DescribeChannelOutput { + s.PipelinesRunningCount = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeChannelOutput) SetRoleArn(v string) *DescribeChannelOutput { + s.RoleArn = &v + return s +} + +// SetState sets the State field's value. +func (s *DescribeChannelOutput) SetState(v string) *DescribeChannelOutput { + s.State = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *DescribeChannelOutput) SetTags(v map[string]*string) *DescribeChannelOutput { + s.Tags = v + return s } -type DescribeChannelInput struct { +type DescribeInputDeviceInput struct { _ struct{} `type:"structure"` - // ChannelId is a required field - ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` + // InputDeviceId is a required field + InputDeviceId *string `location:"uri" locationName:"inputDeviceId" type:"string" required:"true"` } // String returns the string representation -func (s DescribeChannelInput) String() string { +func (s DescribeInputDeviceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeChannelInput) GoString() string { +func (s DescribeInputDeviceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeChannelInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeChannelInput"} - if s.ChannelId == nil { - invalidParams.Add(request.NewErrParamRequired("ChannelId")) +func (s *DescribeInputDeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInputDeviceInput"} + if s.InputDeviceId == nil { + invalidParams.Add(request.NewErrParamRequired("InputDeviceId")) } - if s.ChannelId != nil && len(*s.ChannelId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ChannelId", 1)) + if s.InputDeviceId != nil && len(*s.InputDeviceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputDeviceId", 1)) } if invalidParams.Len() > 0 { @@ -8529,147 +10012,219 @@ func (s *DescribeChannelInput) Validate() error { return nil } -// SetChannelId sets the ChannelId field's value. -func (s *DescribeChannelInput) SetChannelId(v string) *DescribeChannelInput { - s.ChannelId = &v +// SetInputDeviceId sets the InputDeviceId field's value. +func (s *DescribeInputDeviceInput) SetInputDeviceId(v string) *DescribeInputDeviceInput { + s.InputDeviceId = &v return s } -type DescribeChannelOutput struct { +type DescribeInputDeviceOutput struct { _ struct{} `type:"structure"` Arn *string `locationName:"arn" type:"string"` - // A standard channel has two encoding pipelines and a single pipeline channel - // only has one. - ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` - - Destinations []*OutputDestination `locationName:"destinations" type:"list"` + // The state of the connection between the input device and AWS. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"InputDeviceConnectionState"` - EgressEndpoints []*ChannelEgressEndpoint `locationName:"egressEndpoints" type:"list"` + // The status of the action to synchronize the device configuration. If you + // change the configuration of the input device (for example, the maximum bitrate), + // MediaLive sends the new data to the device. The device might not update itself + // immediately. SYNCED means the device has updated its configuration. SYNCING + // means that it has not updated its configuration. + DeviceSettingsSyncState *string `locationName:"deviceSettingsSyncState" type:"string" enum:"DeviceSettingsSyncState"` - // Encoder Settings - EncoderSettings *EncoderSettings `locationName:"encoderSettings" type:"structure"` + // Settings that describe the active source from the input device, and the video + // characteristics of that source. + HdDeviceSettings *InputDeviceHdSettings `locationName:"hdDeviceSettings" type:"structure"` Id *string `locationName:"id" type:"string"` - InputAttachments []*InputAttachment `locationName:"inputAttachments" type:"list"` - - InputSpecification *InputSpecification `locationName:"inputSpecification" type:"structure"` - - // The log level the user wants for their channel. - LogLevel *string `locationName:"logLevel" type:"string" enum:"LogLevel"` + MacAddress *string `locationName:"macAddress" type:"string"` Name *string `locationName:"name" type:"string"` - PipelineDetails []*PipelineDetail `locationName:"pipelineDetails" type:"list"` - - PipelinesRunningCount *int64 `locationName:"pipelinesRunningCount" type:"integer"` - - RoleArn *string `locationName:"roleArn" type:"string"` + // The network settings for the input device. + NetworkSettings *InputDeviceNetworkSettings `locationName:"networkSettings" type:"structure"` - State *string `locationName:"state" type:"string" enum:"ChannelState"` + SerialNumber *string `locationName:"serialNumber" type:"string"` - Tags map[string]*string `locationName:"tags" type:"map"` + // The type of the input device. For an AWS Elemental Link device that outputs + // resolutions up to 1080, choose "HD". + Type *string `locationName:"type" type:"string" enum:"InputDeviceType"` } // String returns the string representation -func (s DescribeChannelOutput) String() string { +func (s DescribeInputDeviceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeChannelOutput) GoString() string { +func (s DescribeInputDeviceOutput) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *DescribeChannelOutput) SetArn(v string) *DescribeChannelOutput { +func (s *DescribeInputDeviceOutput) SetArn(v string) *DescribeInputDeviceOutput { s.Arn = &v return s } -// SetChannelClass sets the ChannelClass field's value. -func (s *DescribeChannelOutput) SetChannelClass(v string) *DescribeChannelOutput { - s.ChannelClass = &v +// SetConnectionState sets the ConnectionState field's value. +func (s *DescribeInputDeviceOutput) SetConnectionState(v string) *DescribeInputDeviceOutput { + s.ConnectionState = &v return s } -// SetDestinations sets the Destinations field's value. -func (s *DescribeChannelOutput) SetDestinations(v []*OutputDestination) *DescribeChannelOutput { - s.Destinations = v +// SetDeviceSettingsSyncState sets the DeviceSettingsSyncState field's value. +func (s *DescribeInputDeviceOutput) SetDeviceSettingsSyncState(v string) *DescribeInputDeviceOutput { + s.DeviceSettingsSyncState = &v return s } -// SetEgressEndpoints sets the EgressEndpoints field's value. -func (s *DescribeChannelOutput) SetEgressEndpoints(v []*ChannelEgressEndpoint) *DescribeChannelOutput { - s.EgressEndpoints = v +// SetHdDeviceSettings sets the HdDeviceSettings field's value. +func (s *DescribeInputDeviceOutput) SetHdDeviceSettings(v *InputDeviceHdSettings) *DescribeInputDeviceOutput { + s.HdDeviceSettings = v return s } -// SetEncoderSettings sets the EncoderSettings field's value. -func (s *DescribeChannelOutput) SetEncoderSettings(v *EncoderSettings) *DescribeChannelOutput { - s.EncoderSettings = v +// SetId sets the Id field's value. +func (s *DescribeInputDeviceOutput) SetId(v string) *DescribeInputDeviceOutput { + s.Id = &v return s } -// SetId sets the Id field's value. -func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { - s.Id = &v +// SetMacAddress sets the MacAddress field's value. +func (s *DescribeInputDeviceOutput) SetMacAddress(v string) *DescribeInputDeviceOutput { + s.MacAddress = &v return s } -// SetInputAttachments sets the InputAttachments field's value. -func (s *DescribeChannelOutput) SetInputAttachments(v []*InputAttachment) *DescribeChannelOutput { - s.InputAttachments = v +// SetName sets the Name field's value. +func (s *DescribeInputDeviceOutput) SetName(v string) *DescribeInputDeviceOutput { + s.Name = &v return s } -// SetInputSpecification sets the InputSpecification field's value. -func (s *DescribeChannelOutput) SetInputSpecification(v *InputSpecification) *DescribeChannelOutput { - s.InputSpecification = v +// SetNetworkSettings sets the NetworkSettings field's value. +func (s *DescribeInputDeviceOutput) SetNetworkSettings(v *InputDeviceNetworkSettings) *DescribeInputDeviceOutput { + s.NetworkSettings = v return s } -// SetLogLevel sets the LogLevel field's value. -func (s *DescribeChannelOutput) SetLogLevel(v string) *DescribeChannelOutput { - s.LogLevel = &v +// SetSerialNumber sets the SerialNumber field's value. +func (s *DescribeInputDeviceOutput) SetSerialNumber(v string) *DescribeInputDeviceOutput { + s.SerialNumber = &v return s } -// SetName sets the Name field's value. -func (s *DescribeChannelOutput) SetName(v string) *DescribeChannelOutput { - s.Name = &v +// SetType sets the Type field's value. +func (s *DescribeInputDeviceOutput) SetType(v string) *DescribeInputDeviceOutput { + s.Type = &v return s } -// SetPipelineDetails sets the PipelineDetails field's value. -func (s *DescribeChannelOutput) SetPipelineDetails(v []*PipelineDetail) *DescribeChannelOutput { - s.PipelineDetails = v +type DescribeInputDeviceThumbnailInput struct { + _ struct{} `type:"structure"` + + // Accept is a required field + Accept *string `location:"header" locationName:"accept" type:"string" required:"true" enum:"AcceptHeader"` + + // InputDeviceId is a required field + InputDeviceId *string `location:"uri" locationName:"inputDeviceId" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeInputDeviceThumbnailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInputDeviceThumbnailInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInputDeviceThumbnailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInputDeviceThumbnailInput"} + if s.Accept == nil { + invalidParams.Add(request.NewErrParamRequired("Accept")) + } + if s.InputDeviceId == nil { + invalidParams.Add(request.NewErrParamRequired("InputDeviceId")) + } + if s.InputDeviceId != nil && len(*s.InputDeviceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputDeviceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccept sets the Accept field's value. +func (s *DescribeInputDeviceThumbnailInput) SetAccept(v string) *DescribeInputDeviceThumbnailInput { + s.Accept = &v return s } -// SetPipelinesRunningCount sets the PipelinesRunningCount field's value. -func (s *DescribeChannelOutput) SetPipelinesRunningCount(v int64) *DescribeChannelOutput { - s.PipelinesRunningCount = &v +// SetInputDeviceId sets the InputDeviceId field's value. +func (s *DescribeInputDeviceThumbnailInput) SetInputDeviceId(v string) *DescribeInputDeviceThumbnailInput { + s.InputDeviceId = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *DescribeChannelOutput) SetRoleArn(v string) *DescribeChannelOutput { - s.RoleArn = &v +type DescribeInputDeviceThumbnailOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `locationName:"body" type:"blob"` + + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + ContentType *string `location:"header" locationName:"Content-Type" type:"string" enum:"ContentType"` + + ETag *string `location:"header" locationName:"ETag" type:"string"` + + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` +} + +// String returns the string representation +func (s DescribeInputDeviceThumbnailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInputDeviceThumbnailOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *DescribeInputDeviceThumbnailOutput) SetBody(v io.ReadCloser) *DescribeInputDeviceThumbnailOutput { + s.Body = v return s } -// SetState sets the State field's value. -func (s *DescribeChannelOutput) SetState(v string) *DescribeChannelOutput { - s.State = &v +// SetContentLength sets the ContentLength field's value. +func (s *DescribeInputDeviceThumbnailOutput) SetContentLength(v int64) *DescribeInputDeviceThumbnailOutput { + s.ContentLength = &v return s } -// SetTags sets the Tags field's value. -func (s *DescribeChannelOutput) SetTags(v map[string]*string) *DescribeChannelOutput { - s.Tags = v +// SetContentType sets the ContentType field's value. +func (s *DescribeInputDeviceThumbnailOutput) SetContentType(v string) *DescribeInputDeviceThumbnailOutput { + s.ContentType = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *DescribeInputDeviceThumbnailOutput) SetETag(v string) *DescribeInputDeviceThumbnailOutput { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DescribeInputDeviceThumbnailOutput) SetLastModified(v time.Time) *DescribeInputDeviceThumbnailOutput { + s.LastModified = &v return s } @@ -8726,6 +10281,8 @@ type DescribeInputOutput struct { // A standard input has two sources and a single pipeline input only has one. InputClass *string `locationName:"inputClass" type:"string" enum:"InputClass"` + InputDevices []*InputDeviceSettings `locationName:"inputDevices" type:"list"` + // There are two types of input sources, static and dynamic. If an input source // is dynamic you canchange the source url of the input dynamically using an // input switch action. However, the only input typeto support a dynamic url @@ -8789,6 +10346,12 @@ func (s *DescribeInputOutput) SetInputClass(v string) *DescribeInputOutput { return s } +// SetInputDevices sets the InputDevices field's value. +func (s *DescribeInputOutput) SetInputDevices(v []*InputDeviceSettings) *DescribeInputOutput { + s.InputDevices = v + return s +} + // SetInputSourceType sets the InputSourceType field's value. func (s *DescribeInputOutput) SetInputSourceType(v string) *DescribeInputOutput { s.InputSourceType = &v @@ -9144,6 +10707,8 @@ type DescribeMultiplexProgramOutput struct { // Packet identifiers map for a given Multiplex program. PacketIdentifiersMap *MultiplexProgramPacketIdentifiersMap `locationName:"packetIdentifiersMap" type:"structure"` + PipelineDetails []*MultiplexProgramPipelineDetail `locationName:"pipelineDetails" type:"list"` + ProgramName *string `locationName:"programName" type:"string"` } @@ -9175,6 +10740,12 @@ func (s *DescribeMultiplexProgramOutput) SetPacketIdentifiersMap(v *MultiplexPro return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *DescribeMultiplexProgramOutput) SetPipelineDetails(v []*MultiplexProgramPipelineDetail) *DescribeMultiplexProgramOutput { + s.PipelineDetails = v + return s +} + // SetProgramName sets the ProgramName field's value. func (s *DescribeMultiplexProgramOutput) SetProgramName(v string) *DescribeMultiplexProgramOutput { s.ProgramName = &v @@ -10294,6 +11865,66 @@ func (s *Eac3Settings) SetSurroundMode(v string) *Eac3Settings { return s } +// Ebu Tt DDestination Settings +type EbuTtDDestinationSettings struct { + _ struct{} `type:"structure"` + + // Specifies how to handle the gap between the lines (in multi-line captions).- + // enabled: Fill with the captions background color (as specified in the input + // captions).- disabled: Leave the gap unfilled. + FillLineGap *string `locationName:"fillLineGap" type:"string" enum:"EbuTtDFillLineGapControl"` + + // Specifies the font family to include in the font data attached to the EBU-TT + // captions. Valid only if styleControl is set to include. If you leave this + // field empty, the font family is set to "monospaced". (If styleControl is + // set to exclude, the font family is always set to "monospaced".)You specify + // only the font family. All other style information (color, bold, position + // and so on) is copied from the input captions. The size is always set to 100% + // to allow the downstream player to choose the size.- Enter a list of font + // families, as a comma-separated list of font names, in order of preference. + // The name can be a font family (such as “Arial”), or a generic font family + // (such as “serif”), or “default” (to let the downstream player choose + // the font).- Leave blank to set the family to “monospace”. + FontFamily *string `locationName:"fontFamily" type:"string"` + + // Specifies the style information (font color, font position, and so on) to + // include in the font data that is attached to the EBU-TT captions.- include: + // Take the style information (font color, font position, and so on) from the + // source captions and include that information in the font data attached to + // the EBU-TT captions. This option is valid only if the source captions are + // Embedded or Teletext.- exclude: In the font data attached to the EBU-TT captions, + // set the font family to "monospaced". Do not include any other style information. + StyleControl *string `locationName:"styleControl" type:"string" enum:"EbuTtDDestinationStyleControl"` +} + +// String returns the string representation +func (s EbuTtDDestinationSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EbuTtDDestinationSettings) GoString() string { + return s.String() +} + +// SetFillLineGap sets the FillLineGap field's value. +func (s *EbuTtDDestinationSettings) SetFillLineGap(v string) *EbuTtDDestinationSettings { + s.FillLineGap = &v + return s +} + +// SetFontFamily sets the FontFamily field's value. +func (s *EbuTtDDestinationSettings) SetFontFamily(v string) *EbuTtDDestinationSettings { + s.FontFamily = &v + return s +} + +// SetStyleControl sets the StyleControl field's value. +func (s *EbuTtDDestinationSettings) SetStyleControl(v string) *EbuTtDDestinationSettings { + s.StyleControl = &v + return s +} + // Embedded Destination Settings type EmbeddedDestinationSettings struct { _ struct{} `type:"structure"` @@ -10414,6 +12045,9 @@ type EncoderSettings struct { // Settings for caption decriptions CaptionDescriptions []*CaptionDescription `locationName:"captionDescriptions" type:"list"` + // Feature Activations + FeatureActivations *FeatureActivations `locationName:"featureActivations" type:"structure"` + // Configuration settings that apply to the event as a whole. GlobalConfiguration *GlobalConfiguration `locationName:"globalConfiguration" type:"structure"` @@ -10559,6 +12193,12 @@ func (s *EncoderSettings) SetCaptionDescriptions(v []*CaptionDescription) *Encod return s } +// SetFeatureActivations sets the FeatureActivations field's value. +func (s *EncoderSettings) SetFeatureActivations(v *FeatureActivations) *EncoderSettings { + s.FeatureActivations = v + return s +} + // SetGlobalConfiguration sets the GlobalConfiguration field's value. func (s *EncoderSettings) SetGlobalConfiguration(v *GlobalConfiguration) *EncoderSettings { s.GlobalConfiguration = v @@ -10589,6 +12229,33 @@ func (s *EncoderSettings) SetVideoDescriptions(v []*VideoDescription) *EncoderSe return s } +// Feature Activations +type FeatureActivations struct { + _ struct{} `type:"structure"` + + // Enables the Input Prepare feature. You can create Input Prepare actions in + // the schedule only if this feature is enabled.If you disable the feature on + // an existing schedule, make sure that you first delete all input prepare actions + // from the schedule. + InputPrepareScheduleActions *string `locationName:"inputPrepareScheduleActions" type:"string" enum:"FeatureActivationsInputPrepareScheduleActions"` +} + +// String returns the string representation +func (s FeatureActivations) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FeatureActivations) GoString() string { + return s.String() +} + +// SetInputPrepareScheduleActions sets the InputPrepareScheduleActions field's value. +func (s *FeatureActivations) SetInputPrepareScheduleActions(v string) *FeatureActivations { + s.InputPrepareScheduleActions = &v + return s +} + // Fec Output Settings type FecOutputSettings struct { _ struct{} `type:"structure"` @@ -10704,6 +12371,14 @@ type Fmp4HlsSettings struct { // List all the audio groups that are used with the video output stream. Input // all the audio GROUP-IDs that are associated to the video, separate by ','. AudioRenditionSets *string `locationName:"audioRenditionSets" type:"string"` + + // If set to passthrough, Nielsen inaudible tones for media tracking will be + // detected in the input audio and an equivalent ID3 tag will be inserted in + // the output. + NielsenId3Behavior *string `locationName:"nielsenId3Behavior" type:"string" enum:"Fmp4NielsenId3Behavior"` + + // When set to passthrough, timed metadata is passed through from input to output. + TimedMetadataBehavior *string `locationName:"timedMetadataBehavior" type:"string" enum:"Fmp4TimedMetadataBehavior"` } // String returns the string representation @@ -10722,6 +12397,18 @@ func (s *Fmp4HlsSettings) SetAudioRenditionSets(v string) *Fmp4HlsSettings { return s } +// SetNielsenId3Behavior sets the NielsenId3Behavior field's value. +func (s *Fmp4HlsSettings) SetNielsenId3Behavior(v string) *Fmp4HlsSettings { + s.NielsenId3Behavior = &v + return s +} + +// SetTimedMetadataBehavior sets the TimedMetadataBehavior field's value. +func (s *Fmp4HlsSettings) SetTimedMetadataBehavior(v string) *Fmp4HlsSettings { + s.TimedMetadataBehavior = &v + return s +} + // Settings to specify if an action follows another. type FollowModeScheduleActionStartSettings struct { _ struct{} `type:"structure"` @@ -10777,8 +12464,8 @@ func (s *FollowModeScheduleActionStartSettings) SetReferenceActionName(v string) } type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10795,17 +12482,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10813,22 +12500,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } // Frame Capture Group Settings @@ -10836,12 +12523,12 @@ type FrameCaptureGroupSettings struct { _ struct{} `type:"structure"` // The destination for the frame capture files. Either the URI for an Amazon - // S3 bucket and object, plus a file name prefix (for example, s3ssl://sportsDelivery/highlights/20180820/curling_) + // S3 bucket and object, plus a file name prefix (for example, s3ssl://sportsDelivery/highlights/20180820/curling-) // or the URI for a MediaStore container, plus a file name prefix (for example, - // mediastoressl://sportsDelivery/20180820/curling_). The final file names consist - // of the prefix from the destination field (for example, "curling_") + name + // mediastoressl://sportsDelivery/20180820/curling-). The final file names consist + // of the prefix from the destination field (for example, "curling-") + name // modifier + the counter (5 digits, starting from 00001) + extension (which - // is always .jpg). For example, curlingLow.00001.jpg + // is always .jpg). For example, curling-low.00001.jpg // // Destination is a required field Destination *OutputLocationRef `locationName:"destination" type:"structure" required:"true"` @@ -10954,8 +12641,8 @@ func (s *FrameCaptureSettings) SetCaptureIntervalUnits(v string) *FrameCaptureSe } type GatewayTimeoutException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10972,17 +12659,17 @@ func (s GatewayTimeoutException) GoString() string { func newErrorGatewayTimeoutException(v protocol.ResponseMetadata) error { return &GatewayTimeoutException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s GatewayTimeoutException) Code() string { +func (s *GatewayTimeoutException) Code() string { return "GatewayTimeoutException" } // Message returns the exception's message. -func (s GatewayTimeoutException) Message() string { +func (s *GatewayTimeoutException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10990,22 +12677,22 @@ func (s GatewayTimeoutException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s GatewayTimeoutException) OrigErr() error { +func (s *GatewayTimeoutException) OrigErr() error { return nil } -func (s GatewayTimeoutException) Error() string { +func (s *GatewayTimeoutException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s GatewayTimeoutException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *GatewayTimeoutException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s GatewayTimeoutException) RequestID() string { - return s.respMetadata.RequestID +func (s *GatewayTimeoutException) RequestID() string { + return s.RespMetadata.RequestID } // Global Configuration @@ -11026,8 +12713,8 @@ type GlobalConfiguration struct { // Settings for system actions when input is lost. InputLossBehavior *InputLossBehavior `locationName:"inputLossBehavior" type:"structure"` - // Indicates how MediaLive pipelines are synchronized.PIPELINELOCKING - MediaLive - // will attempt to synchronize the output of each pipeline to the other.EPOCHLOCKING + // Indicates how MediaLive pipelines are synchronized.PIPELINE_LOCKING - MediaLive + // will attempt to synchronize the output of each pipeline to the other.EPOCH_LOCKING // - MediaLive will attempt to synchronize the output of each pipeline to the // Unix epoch. OutputLockingMode *string `locationName:"outputLockingMode" type:"string" enum:"GlobalConfigurationOutputLockingMode"` @@ -11150,6 +12837,30 @@ func (s *H264ColorSpaceSettings) SetRec709Settings(v *Rec709Settings) *H264Color return s } +// H264 Filter Settings +type H264FilterSettings struct { + _ struct{} `type:"structure"` + + // Temporal Filter Settings + TemporalFilterSettings *TemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` +} + +// String returns the string representation +func (s H264FilterSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s H264FilterSettings) GoString() string { + return s.String() +} + +// SetTemporalFilterSettings sets the TemporalFilterSettings field's value. +func (s *H264FilterSettings) SetTemporalFilterSettings(v *TemporalFilterSettings) *H264FilterSettings { + s.TemporalFilterSettings = v + return s +} + // H264 Settings type H264Settings struct { _ struct{} `type:"structure"` @@ -11185,6 +12896,9 @@ type H264Settings struct { // Entropy encoding mode. Use cabac (must be in Main or High profile) or cavlc. EntropyEncoding *string `locationName:"entropyEncoding" type:"string" enum:"H264EntropyEncoding"` + // Optional filters that you can apply to an encode. + FilterSettings *H264FilterSettings `locationName:"filterSettings" type:"structure"` + // Four bit AFD value to write on all frames of video in the output stream. // Only valid when afdSignaling is set to 'Fixed'. FixedAfd *string `locationName:"fixedAfd" type:"string" enum:"FixedAfd"` @@ -11193,6 +12907,15 @@ type H264Settings struct { // or 'pop' on I-frames. FlickerAq *string `locationName:"flickerAq" type:"string" enum:"H264FlickerAq"` + // This setting applies only when scan type is "interlaced." It controls whether + // coding is performed on a field basis or on a frame basis. (When the video + // is progressive, the coding is always performed on a frame basis.)enabled: + // Force MediaLive to code on a field basis, so that odd and even sets of fields + // are coded separately.disabled: Code the two sets of fields separately (on + // a field basis) or together (on a frame basis using PAFF), depending on what + // is most appropriate for the content. + ForceFieldPictures *string `locationName:"forceFieldPictures" type:"string" enum:"H264ForceFieldPictures"` + // This field indicates how the output video frame rate is specified. If "specified" // is selected then the output video frame rate is determined by framerateNumerator // and framerateDenominator, else if "initializeFromSource" is selected then @@ -11264,11 +12987,19 @@ type H264Settings struct { ParDenominator *int64 `locationName:"parDenominator" min:"1" type:"integer"` // Pixel Aspect Ratio numerator. - ParNumerator *int64 `locationName:"parNumerator" type:"integer"` + ParNumerator *int64 `locationName:"parNumerator" min:"1" type:"integer"` // H.264 Profile. Profile *string `locationName:"profile" type:"string" enum:"H264Profile"` + // Leave as STANDARD_QUALITY or choose a different value (which might result + // in additional costs to run the channel).- ENHANCED_QUALITY: Produces a slightly + // better video quality without an increase in the bitrate. Has an effect only + // when the Rate control mode is QVBR or CBR. If this channel is in a MediaLive + // multiplex, the value must be ENHANCED_QUALITY.- STANDARD_QUALITY: Valid for + // any Rate control mode. + QualityLevel *string `locationName:"qualityLevel" type:"string" enum:"H264QualityLevel"` + // Controls the target quality for the video encode. Applies only when the rate // control mode is QVBR. Set values for the QVBR quality level field and Max // bitrate field that suit your most important viewing devices. Recommended @@ -11359,6 +13090,9 @@ func (s *H264Settings) Validate() error { if s.ParDenominator != nil && *s.ParDenominator < 1 { invalidParams.Add(request.NewErrParamMinValue("ParDenominator", 1)) } + if s.ParNumerator != nil && *s.ParNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("ParNumerator", 1)) + } if s.QvbrQualityLevel != nil && *s.QvbrQualityLevel < 1 { invalidParams.Add(request.NewErrParamMinValue("QvbrQualityLevel", 1)) } @@ -11420,6 +13154,12 @@ func (s *H264Settings) SetEntropyEncoding(v string) *H264Settings { return s } +// SetFilterSettings sets the FilterSettings field's value. +func (s *H264Settings) SetFilterSettings(v *H264FilterSettings) *H264Settings { + s.FilterSettings = v + return s +} + // SetFixedAfd sets the FixedAfd field's value. func (s *H264Settings) SetFixedAfd(v string) *H264Settings { s.FixedAfd = &v @@ -11432,6 +13172,12 @@ func (s *H264Settings) SetFlickerAq(v string) *H264Settings { return s } +// SetForceFieldPictures sets the ForceFieldPictures field's value. +func (s *H264Settings) SetForceFieldPictures(v string) *H264Settings { + s.ForceFieldPictures = &v + return s +} + // SetFramerateControl sets the FramerateControl field's value. func (s *H264Settings) SetFramerateControl(v string) *H264Settings { s.FramerateControl = &v @@ -11534,6 +13280,12 @@ func (s *H264Settings) SetProfile(v string) *H264Settings { return s } +// SetQualityLevel sets the QualityLevel field's value. +func (s *H264Settings) SetQualityLevel(v string) *H264Settings { + s.QualityLevel = &v + return s +} + // SetQvbrQualityLevel sets the QvbrQualityLevel field's value. func (s *H264Settings) SetQvbrQualityLevel(v int64) *H264Settings { s.QvbrQualityLevel = &v @@ -11645,9 +13397,33 @@ func (s *H265ColorSpaceSettings) SetRec601Settings(v *Rec601Settings) *H265Color return s } -// SetRec709Settings sets the Rec709Settings field's value. -func (s *H265ColorSpaceSettings) SetRec709Settings(v *Rec709Settings) *H265ColorSpaceSettings { - s.Rec709Settings = v +// SetRec709Settings sets the Rec709Settings field's value. +func (s *H265ColorSpaceSettings) SetRec709Settings(v *Rec709Settings) *H265ColorSpaceSettings { + s.Rec709Settings = v + return s +} + +// H265 Filter Settings +type H265FilterSettings struct { + _ struct{} `type:"structure"` + + // Temporal Filter Settings + TemporalFilterSettings *TemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` +} + +// String returns the string representation +func (s H265FilterSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s H265FilterSettings) GoString() string { + return s.String() +} + +// SetTemporalFilterSettings sets the TemporalFilterSettings field's value. +func (s *H265FilterSettings) SetTemporalFilterSettings(v *TemporalFilterSettings) *H265FilterSettings { + s.TemporalFilterSettings = v return s } @@ -11684,6 +13460,9 @@ type H265Settings struct { // Color Space settings ColorSpaceSettings *H265ColorSpaceSettings `locationName:"colorSpaceSettings" type:"structure"` + // Optional filters that you can apply to an encode. + FilterSettings *H265FilterSettings `locationName:"filterSettings" type:"structure"` + // Four bit AFD value to write on all frames of video in the output stream. // Only valid when afdSignaling is set to 'Fixed'. FixedAfd *string `locationName:"fixedAfd" type:"string" enum:"FixedAfd"` @@ -11758,7 +13537,10 @@ type H265Settings struct { // when it is constrained by themaximum bitrate. Recommended if you or your // viewers pay for bandwidth.CBR: Quality varies, depending on the video complexity. // Recommended only if you distributeyour assets to devices that cannot handle - // variable bitrates. + // variable bitrates.Multiplex: This rate control mode is only supported (and + // is required) when the video is beingdelivered to a MediaLive Multiplex in + // which case the rate control configuration is controlledby the properties + // within the Multiplex Program. RateControlMode *string `locationName:"rateControlMode" type:"string" enum:"H265RateControlMode"` // Sets the scan type of the output to progressive or top-field-first interlaced. @@ -11878,6 +13660,12 @@ func (s *H265Settings) SetColorSpaceSettings(v *H265ColorSpaceSettings) *H265Set return s } +// SetFilterSettings sets the FilterSettings field's value. +func (s *H265Settings) SetFilterSettings(v *H265FilterSettings) *H265Settings { + s.FilterSettings = v + return s +} + // SetFixedAfd sets the FixedAfd field's value. func (s *H265Settings) SetFixedAfd(v string) *H265Settings { s.FixedAfd = &v @@ -12314,10 +14102,10 @@ type HlsGroupSettings struct { // For example, #EXT-X-BYTERANGE:160364@1461888" IFrameOnlyPlaylists *string `locationName:"iFrameOnlyPlaylists" type:"string" enum:"IFrameOnlyPlaylistType"` - // Applies only if Mode field is LIVE. Specifies the maximum number of segments + // Applies only if Mode field is LIVE.Specifies the maximum number of segments // in the media manifest file. After this maximum, older segments are removed - // from the media manifest. This number must be less than or equal to the Keep - // Segments field. + // from the media manifest. This number must be smaller than the number in the + // Keep Segments field. IndexNSegments *int64 `locationName:"indexNSegments" min:"3" type:"integer"` // Parameter that control output group behavior on input loss. @@ -12335,8 +14123,14 @@ type HlsGroupSettings struct { // constantIv value. IvSource *string `locationName:"ivSource" type:"string" enum:"HlsIvSource"` - // Applies only if Mode field is LIVE. Specifies the number of media segments - // (.ts files) to retain in the destination directory. + // Applies only if Mode field is LIVE.Specifies the number of media segments + // to retain in the destination directory. This number should be bigger than + // indexNSegments (Num segments). We recommend (value = (2 x indexNsegments) + // + 1).If this "keep segments" number is too low, the following might happen: + // the player is still reading a media manifest file that lists this segment, + // but that segment has been removed from the destination directory (as directed + // by indexNSegments). This situation would result in a 404 HTTP error on the + // player. KeepSegments *int64 `locationName:"keepSegments" min:"1" type:"integer"` // The value specifies how the key is represented in the resource identified @@ -12371,9 +14165,10 @@ type HlsGroupSettings struct { // converting it to a "VOD" type manifest on completion of the stream. Mode *string `locationName:"mode" type:"string" enum:"HlsMode"` - // MANIFESTSANDSEGMENTS: Generates manifests (master manifest, if applicable, - // and media manifests) for this output group.SEGMENTSONLY: Does not generate - // any manifests for this output group. + // MANIFESTS_AND_SEGMENTS: Generates manifests (master manifest, if applicable, + // and media manifests) for this output group.VARIANT_MANIFESTS_AND_SEGMENTS: + // Generates media manifests for this output group, but not a master manifest.SEGMENTS_ONLY: + // Does not generate any manifests for this output group. OutputSelection *string `locationName:"outputSelection" type:"string" enum:"HlsOutputSelection"` // Includes or excludes EXT-X-PROGRAM-DATE-TIME tag in .m3u8 manifest files. @@ -12425,7 +14220,7 @@ type HlsGroupSettings struct { // Provides an extra millisecond delta offset to fine tune the timestamps. TimestampDeltaMilliseconds *int64 `locationName:"timestampDeltaMilliseconds" type:"integer"` - // SEGMENTEDFILES: Emit the program as segments - multiple .ts media files.SINGLEFILE: + // SEGMENTED_FILES: Emit the program as segments - multiple .ts media files.SINGLE_FILE: // Applies only if Mode field is VOD. Emit the program as a single .ts media // file. The media manifest includes #EXT-X-BYTERANGE tags to index segments // for playback. A typical use for this value is when sending the output to @@ -13131,8 +14926,7 @@ func (s *HlsWebdavSettings) SetRestartDelay(v int64) *HlsWebdavSettings { return s } -// Settings to configure an action so that it occurs immediately. This is only -// supported for input switch actions currently. +// Settings to configure an action so that it occurs as soon as possible. type ImmediateModeScheduleActionStartSettings struct { _ struct{} `type:"structure"` } @@ -13172,6 +14966,9 @@ type Input struct { // value is not valid because the channel requires two sources in the input. InputClass *string `locationName:"inputClass" type:"string" enum:"InputClass"` + // Settings for the input devices. + InputDevices []*InputDeviceSettings `locationName:"inputDevices" type:"list"` + // Certain pull input sources can be dynamic, meaning that they can have their // URL's dynamically changesduring input switch actions. Presently, this functionality // only works with MP4_FILE inputs. @@ -13241,6 +15038,12 @@ func (s *Input) SetInputClass(v string) *Input { return s } +// SetInputDevices sets the InputDevices field's value. +func (s *Input) SetInputDevices(v []*InputDeviceSettings) *Input { + s.InputDevices = v + return s +} + // SetInputSourceType sets the InputSourceType field's value. func (s *Input) SetInputSourceType(v string) *Input { s.InputSourceType = &v @@ -13298,6 +15101,10 @@ func (s *Input) SetType(v string) *Input { type InputAttachment struct { _ struct{} `type:"structure"` + // User-specified settings for defining what the conditions are for declaring + // the input unhealthy and failing over to a different input. + AutomaticInputFailoverSettings *AutomaticInputFailoverSettings `locationName:"automaticInputFailoverSettings" type:"structure"` + // User-specified name for the attachment. This is required if the user wants // to use this input in an input switch action. InputAttachmentName *string `locationName:"inputAttachmentName" type:"string"` @@ -13322,6 +15129,11 @@ func (s InputAttachment) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *InputAttachment) Validate() error { invalidParams := request.ErrInvalidParams{Context: "InputAttachment"} + if s.AutomaticInputFailoverSettings != nil { + if err := s.AutomaticInputFailoverSettings.Validate(); err != nil { + invalidParams.AddNested("AutomaticInputFailoverSettings", err.(request.ErrInvalidParams)) + } + } if s.InputSettings != nil { if err := s.InputSettings.Validate(); err != nil { invalidParams.AddNested("InputSettings", err.(request.ErrInvalidParams)) @@ -13334,6 +15146,12 @@ func (s *InputAttachment) Validate() error { return nil } +// SetAutomaticInputFailoverSettings sets the AutomaticInputFailoverSettings field's value. +func (s *InputAttachment) SetAutomaticInputFailoverSettings(v *AutomaticInputFailoverSettings) *InputAttachment { + s.AutomaticInputFailoverSettings = v + return s +} + // SetInputAttachmentName sets the InputAttachmentName field's value. func (s *InputAttachment) SetInputAttachmentName(v string) *InputAttachment { s.InputAttachmentName = &v @@ -13419,160 +15237,506 @@ type InputClippingSettings struct { // InputTimecodeSource is a required field InputTimecodeSource *string `locationName:"inputTimecodeSource" type:"string" required:"true" enum:"InputTimecodeSource"` - // Settings to identify the start of the clip. - StartTimecode *StartTimecode `locationName:"startTimecode" type:"structure"` + // Settings to identify the start of the clip. + StartTimecode *StartTimecode `locationName:"startTimecode" type:"structure"` + + // Settings to identify the end of the clip. + StopTimecode *StopTimecode `locationName:"stopTimecode" type:"structure"` +} + +// String returns the string representation +func (s InputClippingSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputClippingSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputClippingSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputClippingSettings"} + if s.InputTimecodeSource == nil { + invalidParams.Add(request.NewErrParamRequired("InputTimecodeSource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputTimecodeSource sets the InputTimecodeSource field's value. +func (s *InputClippingSettings) SetInputTimecodeSource(v string) *InputClippingSettings { + s.InputTimecodeSource = &v + return s +} + +// SetStartTimecode sets the StartTimecode field's value. +func (s *InputClippingSettings) SetStartTimecode(v *StartTimecode) *InputClippingSettings { + s.StartTimecode = v + return s +} + +// SetStopTimecode sets the StopTimecode field's value. +func (s *InputClippingSettings) SetStopTimecode(v *StopTimecode) *InputClippingSettings { + s.StopTimecode = v + return s +} + +// The settings for a PUSH type input. +type InputDestination struct { + _ struct{} `type:"structure"` + + // The system-generated static IP address of endpoint.It remains fixed for the + // lifetime of the input. + Ip *string `locationName:"ip" type:"string"` + + // The port number for the input. + Port *string `locationName:"port" type:"string"` + + // This represents the endpoint that the customer stream will bepushed to. + Url *string `locationName:"url" type:"string"` + + // The properties for a VPC type input destination. + Vpc *InputDestinationVpc `locationName:"vpc" type:"structure"` +} + +// String returns the string representation +func (s InputDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDestination) GoString() string { + return s.String() +} + +// SetIp sets the Ip field's value. +func (s *InputDestination) SetIp(v string) *InputDestination { + s.Ip = &v + return s +} + +// SetPort sets the Port field's value. +func (s *InputDestination) SetPort(v string) *InputDestination { + s.Port = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *InputDestination) SetUrl(v string) *InputDestination { + s.Url = &v + return s +} + +// SetVpc sets the Vpc field's value. +func (s *InputDestination) SetVpc(v *InputDestinationVpc) *InputDestination { + s.Vpc = v + return s +} + +// Endpoint settings for a PUSH type input. +type InputDestinationRequest struct { + _ struct{} `type:"structure"` + + // A unique name for the location the RTMP stream is being pushedto. + StreamName *string `locationName:"streamName" type:"string"` +} + +// String returns the string representation +func (s InputDestinationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDestinationRequest) GoString() string { + return s.String() +} + +// SetStreamName sets the StreamName field's value. +func (s *InputDestinationRequest) SetStreamName(v string) *InputDestinationRequest { + s.StreamName = &v + return s +} + +// The properties for a VPC type input destination. +type InputDestinationVpc struct { + _ struct{} `type:"structure"` + + // The availability zone of the Input destination. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The network interface ID of the Input destination in the VPC. + NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` +} + +// String returns the string representation +func (s InputDestinationVpc) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDestinationVpc) GoString() string { + return s.String() +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *InputDestinationVpc) SetAvailabilityZone(v string) *InputDestinationVpc { + s.AvailabilityZone = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *InputDestinationVpc) SetNetworkInterfaceId(v string) *InputDestinationVpc { + s.NetworkInterfaceId = &v + return s +} + +// Configurable settings for the input device. +type InputDeviceConfigurableSettings struct { + _ struct{} `type:"structure"` + + // The input source that you want to use. If the device has a source connected + // to only one of its input ports, or if you don't care which source the device + // sends, specify Auto. If the device has sources connected to both its input + // ports, and you want to use a specific source, specify the source. + ConfiguredInput *string `locationName:"configuredInput" type:"string" enum:"InputDeviceConfiguredInput"` + + // The maximum bitrate in bits per second. Set a value here to throttle the + // bitrate of the source video. + MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` +} + +// String returns the string representation +func (s InputDeviceConfigurableSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDeviceConfigurableSettings) GoString() string { + return s.String() +} + +// SetConfiguredInput sets the ConfiguredInput field's value. +func (s *InputDeviceConfigurableSettings) SetConfiguredInput(v string) *InputDeviceConfigurableSettings { + s.ConfiguredInput = &v + return s +} + +// SetMaxBitrate sets the MaxBitrate field's value. +func (s *InputDeviceConfigurableSettings) SetMaxBitrate(v int64) *InputDeviceConfigurableSettings { + s.MaxBitrate = &v + return s +} + +// Settings that describe the active source from the input device, and the video +// characteristics of that source. +type InputDeviceHdSettings struct { + _ struct{} `type:"structure"` + + // If you specified Auto as the configured input, specifies which of the sources + // is currently active (SDI or HDMI). + ActiveInput *string `locationName:"activeInput" type:"string" enum:"InputDeviceActiveInput"` + + // The source at the input device that is currently active. You can specify + // this source. + ConfiguredInput *string `locationName:"configuredInput" type:"string" enum:"InputDeviceConfiguredInput"` + + // The state of the input device. + DeviceState *string `locationName:"deviceState" type:"string" enum:"InputDeviceState"` + + // The frame rate of the video source. + Framerate *float64 `locationName:"framerate" type:"double"` + + // The height of the video source, in pixels. + Height *int64 `locationName:"height" type:"integer"` + + // The current maximum bitrate for ingesting this source, in bits per second. + // You can specify this maximum. + MaxBitrate *int64 `locationName:"maxBitrate" type:"integer"` + + // The scan type of the video source. + ScanType *string `locationName:"scanType" type:"string" enum:"InputDeviceScanType"` + + // The width of the video source, in pixels. + Width *int64 `locationName:"width" type:"integer"` +} + +// String returns the string representation +func (s InputDeviceHdSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputDeviceHdSettings) GoString() string { + return s.String() +} + +// SetActiveInput sets the ActiveInput field's value. +func (s *InputDeviceHdSettings) SetActiveInput(v string) *InputDeviceHdSettings { + s.ActiveInput = &v + return s +} + +// SetConfiguredInput sets the ConfiguredInput field's value. +func (s *InputDeviceHdSettings) SetConfiguredInput(v string) *InputDeviceHdSettings { + s.ConfiguredInput = &v + return s +} + +// SetDeviceState sets the DeviceState field's value. +func (s *InputDeviceHdSettings) SetDeviceState(v string) *InputDeviceHdSettings { + s.DeviceState = &v + return s +} + +// SetFramerate sets the Framerate field's value. +func (s *InputDeviceHdSettings) SetFramerate(v float64) *InputDeviceHdSettings { + s.Framerate = &v + return s +} + +// SetHeight sets the Height field's value. +func (s *InputDeviceHdSettings) SetHeight(v int64) *InputDeviceHdSettings { + s.Height = &v + return s +} + +// SetMaxBitrate sets the MaxBitrate field's value. +func (s *InputDeviceHdSettings) SetMaxBitrate(v int64) *InputDeviceHdSettings { + s.MaxBitrate = &v + return s +} + +// SetScanType sets the ScanType field's value. +func (s *InputDeviceHdSettings) SetScanType(v string) *InputDeviceHdSettings { + s.ScanType = &v + return s +} + +// SetWidth sets the Width field's value. +func (s *InputDeviceHdSettings) SetWidth(v int64) *InputDeviceHdSettings { + s.Width = &v + return s +} + +// The network settings for the input device. +type InputDeviceNetworkSettings struct { + _ struct{} `type:"structure"` + + // The DNS addresses of the input device. + DnsAddresses []*string `locationName:"dnsAddresses" type:"list"` + + // The network gateway IP address. + Gateway *string `locationName:"gateway" type:"string"` - // Settings to identify the end of the clip. - StopTimecode *StopTimecode `locationName:"stopTimecode" type:"structure"` + // The IP address of the input device. + IpAddress *string `locationName:"ipAddress" type:"string"` + + // Specifies whether the input device has been configured (outside of MediaLive) + // to use a dynamic IP address assignment (DHCP) or a static IP address. + IpScheme *string `locationName:"ipScheme" type:"string" enum:"InputDeviceIpScheme"` + + // The subnet mask of the input device. + SubnetMask *string `locationName:"subnetMask" type:"string"` } // String returns the string representation -func (s InputClippingSettings) String() string { +func (s InputDeviceNetworkSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputClippingSettings) GoString() string { +func (s InputDeviceNetworkSettings) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *InputClippingSettings) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputClippingSettings"} - if s.InputTimecodeSource == nil { - invalidParams.Add(request.NewErrParamRequired("InputTimecodeSource")) - } +// SetDnsAddresses sets the DnsAddresses field's value. +func (s *InputDeviceNetworkSettings) SetDnsAddresses(v []*string) *InputDeviceNetworkSettings { + s.DnsAddresses = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetGateway sets the Gateway field's value. +func (s *InputDeviceNetworkSettings) SetGateway(v string) *InputDeviceNetworkSettings { + s.Gateway = &v + return s } -// SetInputTimecodeSource sets the InputTimecodeSource field's value. -func (s *InputClippingSettings) SetInputTimecodeSource(v string) *InputClippingSettings { - s.InputTimecodeSource = &v +// SetIpAddress sets the IpAddress field's value. +func (s *InputDeviceNetworkSettings) SetIpAddress(v string) *InputDeviceNetworkSettings { + s.IpAddress = &v return s } -// SetStartTimecode sets the StartTimecode field's value. -func (s *InputClippingSettings) SetStartTimecode(v *StartTimecode) *InputClippingSettings { - s.StartTimecode = v +// SetIpScheme sets the IpScheme field's value. +func (s *InputDeviceNetworkSettings) SetIpScheme(v string) *InputDeviceNetworkSettings { + s.IpScheme = &v return s } -// SetStopTimecode sets the StopTimecode field's value. -func (s *InputClippingSettings) SetStopTimecode(v *StopTimecode) *InputClippingSettings { - s.StopTimecode = v +// SetSubnetMask sets the SubnetMask field's value. +func (s *InputDeviceNetworkSettings) SetSubnetMask(v string) *InputDeviceNetworkSettings { + s.SubnetMask = &v return s } -// The settings for a PUSH type input. -type InputDestination struct { +// Settings for an input device. +type InputDeviceRequest struct { _ struct{} `type:"structure"` - // The system-generated static IP address of endpoint.It remains fixed for the - // lifetime of the input. - Ip *string `locationName:"ip" type:"string"` - - // The port number for the input. - Port *string `locationName:"port" type:"string"` - - // This represents the endpoint that the customer stream will bepushed to. - Url *string `locationName:"url" type:"string"` - - // The properties for a VPC type input destination. - Vpc *InputDestinationVpc `locationName:"vpc" type:"structure"` + // The unique ID for the device. + Id *string `locationName:"id" type:"string"` } // String returns the string representation -func (s InputDestination) String() string { +func (s InputDeviceRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputDestination) GoString() string { +func (s InputDeviceRequest) GoString() string { return s.String() } -// SetIp sets the Ip field's value. -func (s *InputDestination) SetIp(v string) *InputDestination { - s.Ip = &v +// SetId sets the Id field's value. +func (s *InputDeviceRequest) SetId(v string) *InputDeviceRequest { + s.Id = &v return s } -// SetPort sets the Port field's value. -func (s *InputDestination) SetPort(v string) *InputDestination { - s.Port = &v - return s +// Settings for an input device. +type InputDeviceSettings struct { + _ struct{} `type:"structure"` + + // The unique ID for the device. + Id *string `locationName:"id" type:"string"` } -// SetUrl sets the Url field's value. -func (s *InputDestination) SetUrl(v string) *InputDestination { - s.Url = &v - return s +// String returns the string representation +func (s InputDeviceSettings) String() string { + return awsutil.Prettify(s) } -// SetVpc sets the Vpc field's value. -func (s *InputDestination) SetVpc(v *InputDestinationVpc) *InputDestination { - s.Vpc = v +// GoString returns the string representation +func (s InputDeviceSettings) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *InputDeviceSettings) SetId(v string) *InputDeviceSettings { + s.Id = &v return s } -// Endpoint settings for a PUSH type input. -type InputDestinationRequest struct { +// Details of the input device. +type InputDeviceSummary struct { _ struct{} `type:"structure"` - // A unique name for the location the RTMP stream is being pushedto. - StreamName *string `locationName:"streamName" type:"string"` + // The unique ARN of the input device. + Arn *string `locationName:"arn" type:"string"` + + // The state of the connection between the input device and AWS. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"InputDeviceConnectionState"` + + // The status of the action to synchronize the device configuration. If you + // change the configuration of the input device (for example, the maximum bitrate), + // MediaLive sends the new data to the device. The device might not update itself + // immediately. SYNCED means the device has updated its configuration. SYNCING + // means that it has not updated its configuration. + DeviceSettingsSyncState *string `locationName:"deviceSettingsSyncState" type:"string" enum:"DeviceSettingsSyncState"` + + // Settings that describe an input device that is type HD. + HdDeviceSettings *InputDeviceHdSettings `locationName:"hdDeviceSettings" type:"structure"` + + // The unique ID of the input device. + Id *string `locationName:"id" type:"string"` + + // The network MAC address of the input device. + MacAddress *string `locationName:"macAddress" type:"string"` + + // A name that you specify for the input device. + Name *string `locationName:"name" type:"string"` + + // Network settings for the input device. + NetworkSettings *InputDeviceNetworkSettings `locationName:"networkSettings" type:"structure"` + + // The unique serial number of the input device. + SerialNumber *string `locationName:"serialNumber" type:"string"` + + // The type of the input device. + Type *string `locationName:"type" type:"string" enum:"InputDeviceType"` } // String returns the string representation -func (s InputDestinationRequest) String() string { +func (s InputDeviceSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputDestinationRequest) GoString() string { +func (s InputDeviceSummary) GoString() string { return s.String() } -// SetStreamName sets the StreamName field's value. -func (s *InputDestinationRequest) SetStreamName(v string) *InputDestinationRequest { - s.StreamName = &v +// SetArn sets the Arn field's value. +func (s *InputDeviceSummary) SetArn(v string) *InputDeviceSummary { + s.Arn = &v return s } -// The properties for a VPC type input destination. -type InputDestinationVpc struct { - _ struct{} `type:"structure"` +// SetConnectionState sets the ConnectionState field's value. +func (s *InputDeviceSummary) SetConnectionState(v string) *InputDeviceSummary { + s.ConnectionState = &v + return s +} - // The availability zone of the Input destination. - AvailabilityZone *string `locationName:"availabilityZone" type:"string"` +// SetDeviceSettingsSyncState sets the DeviceSettingsSyncState field's value. +func (s *InputDeviceSummary) SetDeviceSettingsSyncState(v string) *InputDeviceSummary { + s.DeviceSettingsSyncState = &v + return s +} - // The network interface ID of the Input destination in the VPC. - NetworkInterfaceId *string `locationName:"networkInterfaceId" type:"string"` +// SetHdDeviceSettings sets the HdDeviceSettings field's value. +func (s *InputDeviceSummary) SetHdDeviceSettings(v *InputDeviceHdSettings) *InputDeviceSummary { + s.HdDeviceSettings = v + return s } -// String returns the string representation -func (s InputDestinationVpc) String() string { - return awsutil.Prettify(s) +// SetId sets the Id field's value. +func (s *InputDeviceSummary) SetId(v string) *InputDeviceSummary { + s.Id = &v + return s } -// GoString returns the string representation -func (s InputDestinationVpc) GoString() string { - return s.String() +// SetMacAddress sets the MacAddress field's value. +func (s *InputDeviceSummary) SetMacAddress(v string) *InputDeviceSummary { + s.MacAddress = &v + return s } -// SetAvailabilityZone sets the AvailabilityZone field's value. -func (s *InputDestinationVpc) SetAvailabilityZone(v string) *InputDestinationVpc { - s.AvailabilityZone = &v +// SetName sets the Name field's value. +func (s *InputDeviceSummary) SetName(v string) *InputDeviceSummary { + s.Name = &v return s } -// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. -func (s *InputDestinationVpc) SetNetworkInterfaceId(v string) *InputDestinationVpc { - s.NetworkInterfaceId = &v +// SetNetworkSettings sets the NetworkSettings field's value. +func (s *InputDeviceSummary) SetNetworkSettings(v *InputDeviceNetworkSettings) *InputDeviceSummary { + s.NetworkSettings = v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *InputDeviceSummary) SetSerialNumber(v string) *InputDeviceSummary { + s.SerialNumber = &v + return s +} + +// SetType sets the Type field's value. +func (s *InputDeviceSummary) SetType(v string) *InputDeviceSummary { + s.Type = &v return s } @@ -13724,6 +15888,69 @@ func (s *InputLossBehavior) SetRepeatFrameMsec(v int64) *InputLossBehavior { return s } +// Action to prepare an input for a future immediate input switch. +type InputPrepareScheduleActionSettings struct { + _ struct{} `type:"structure"` + + // The name of the input attachment that should be prepared by this action. + // If no name is provided, the action will stop the most recent prepare (if + // any) when activated. + InputAttachmentNameReference *string `locationName:"inputAttachmentNameReference" type:"string"` + + // Settings to let you create a clip of the file input, in order to set up the + // input to ingest only a portion of the file. + InputClippingSettings *InputClippingSettings `locationName:"inputClippingSettings" type:"structure"` + + // The value for the variable portion of the URL for the dynamic input, for + // this instance of the input. Each time you use the same dynamic input in an + // input switch action, you can provide a different value, in order to connect + // the input to a different content source. + UrlPath []*string `locationName:"urlPath" type:"list"` +} + +// String returns the string representation +func (s InputPrepareScheduleActionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputPrepareScheduleActionSettings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InputPrepareScheduleActionSettings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputPrepareScheduleActionSettings"} + if s.InputClippingSettings != nil { + if err := s.InputClippingSettings.Validate(); err != nil { + invalidParams.AddNested("InputClippingSettings", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInputAttachmentNameReference sets the InputAttachmentNameReference field's value. +func (s *InputPrepareScheduleActionSettings) SetInputAttachmentNameReference(v string) *InputPrepareScheduleActionSettings { + s.InputAttachmentNameReference = &v + return s +} + +// SetInputClippingSettings sets the InputClippingSettings field's value. +func (s *InputPrepareScheduleActionSettings) SetInputClippingSettings(v *InputClippingSettings) *InputPrepareScheduleActionSettings { + s.InputClippingSettings = v + return s +} + +// SetUrlPath sets the UrlPath field's value. +func (s *InputPrepareScheduleActionSettings) SetUrlPath(v []*string) *InputPrepareScheduleActionSettings { + s.UrlPath = v + return s +} + // An Input Security Group type InputSecurityGroup struct { _ struct{} `type:"structure"` @@ -13822,6 +16049,13 @@ type InputSettings struct { // Input settings. NetworkInputSettings *NetworkInputSettings `locationName:"networkInputSettings" type:"structure"` + // Specifies whether to extract applicable ancillary data from a SMPTE-2038 + // source in this input. Applicable data types are captions, timecode, AFD, + // and SCTE-104 messages.- PREFER: Extract from SMPTE-2038 if present in this + // input, otherwise extract from another source (if any).- IGNORE: Never extract + // any ancillary data from SMPTE-2038. + Smpte2038DataPreference *string `locationName:"smpte2038DataPreference" type:"string" enum:"Smpte2038DataPreference"` + // Loop input if it is a file. This allows a file input to be streamed indefinitely. SourceEndBehavior *string `locationName:"sourceEndBehavior" type:"string" enum:"InputSourceEndBehavior"` @@ -13915,6 +16149,12 @@ func (s *InputSettings) SetNetworkInputSettings(v *NetworkInputSettings) *InputS return s } +// SetSmpte2038DataPreference sets the Smpte2038DataPreference field's value. +func (s *InputSettings) SetSmpte2038DataPreference(v string) *InputSettings { + s.Smpte2038DataPreference = &v + return s +} + // SetSourceEndBehavior sets the SourceEndBehavior field's value. func (s *InputSettings) SetSourceEndBehavior(v string) *InputSettings { s.SourceEndBehavior = &v @@ -14223,8 +16463,8 @@ func (s *InputWhitelistRuleCidr) SetCidr(v string) *InputWhitelistRuleCidr { } type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14241,17 +16481,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14259,22 +16499,22 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Key Provider Settings @@ -14304,19 +16544,92 @@ func (s *KeyProviderSettings) Validate() error { } } - if invalidParams.Len() > 0 { - return invalidParams - } - return nil + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStaticKeySettings sets the StaticKeySettings field's value. +func (s *KeyProviderSettings) SetStaticKeySettings(v *StaticKeySettings) *KeyProviderSettings { + s.StaticKeySettings = v + return s +} + +type ListChannelsInput struct { + _ struct{} `type:"structure"` + + MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + + NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListChannelsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChannelsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChannelsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChannelsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListChannelsInput) SetMaxResults(v int64) *ListChannelsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListChannelsInput) SetNextToken(v string) *ListChannelsInput { + s.NextToken = &v + return s +} + +type ListChannelsOutput struct { + _ struct{} `type:"structure"` + + Channels []*ChannelSummary `locationName:"channels" type:"list"` + + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListChannelsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChannelsOutput) GoString() string { + return s.String() } -// SetStaticKeySettings sets the StaticKeySettings field's value. -func (s *KeyProviderSettings) SetStaticKeySettings(v *StaticKeySettings) *KeyProviderSettings { - s.StaticKeySettings = v +// SetChannels sets the Channels field's value. +func (s *ListChannelsOutput) SetChannels(v []*ChannelSummary) *ListChannelsOutput { + s.Channels = v return s } -type ListChannelsInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListChannelsOutput) SetNextToken(v string) *ListChannelsOutput { + s.NextToken = &v + return s +} + +type ListInputDevicesInput struct { _ struct{} `type:"structure"` MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` @@ -14325,18 +16638,18 @@ type ListChannelsInput struct { } // String returns the string representation -func (s ListChannelsInput) String() string { +func (s ListInputDevicesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListChannelsInput) GoString() string { +func (s ListInputDevicesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListChannelsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListChannelsInput"} +func (s *ListInputDevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInputDevicesInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -14348,43 +16661,43 @@ func (s *ListChannelsInput) Validate() error { } // SetMaxResults sets the MaxResults field's value. -func (s *ListChannelsInput) SetMaxResults(v int64) *ListChannelsInput { +func (s *ListInputDevicesInput) SetMaxResults(v int64) *ListInputDevicesInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListChannelsInput) SetNextToken(v string) *ListChannelsInput { +func (s *ListInputDevicesInput) SetNextToken(v string) *ListInputDevicesInput { s.NextToken = &v return s } -type ListChannelsOutput struct { +type ListInputDevicesOutput struct { _ struct{} `type:"structure"` - Channels []*ChannelSummary `locationName:"channels" type:"list"` + InputDevices []*InputDeviceSummary `locationName:"inputDevices" type:"list"` NextToken *string `locationName:"nextToken" type:"string"` } // String returns the string representation -func (s ListChannelsOutput) String() string { +func (s ListInputDevicesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListChannelsOutput) GoString() string { +func (s ListInputDevicesOutput) GoString() string { return s.String() } -// SetChannels sets the Channels field's value. -func (s *ListChannelsOutput) SetChannels(v []*ChannelSummary) *ListChannelsOutput { - s.Channels = v +// SetInputDevices sets the InputDevices field's value. +func (s *ListInputDevicesOutput) SetInputDevices(v []*InputDeviceSummary) *ListInputDevicesOutput { + s.InputDevices = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListChannelsOutput) SetNextToken(v string) *ListChannelsOutput { +func (s *ListInputDevicesOutput) SetNextToken(v string) *ListInputDevicesOutput { s.NextToken = &v return s } @@ -15092,9 +17405,11 @@ type M2tsSettings struct { // 0 lets the muxer automatically determine the appropriate bitrate. Bitrate *int64 `locationName:"bitrate" type:"integer"` - // If set to multiplex, use multiplex buffer model for accurate interleaving. - // Setting to bufferModel to none can lead to lower latency, but low-memory - // devices may not be able to play back the stream without interruptions. + // Controls the timing accuracy for output network traffic. Leave as MULTIPLEX + // to ensure accurate network packet timing. Or set to NONE, which might result + // in lower latency but will result in more variability in output network packet + // timing. This variability might cause interruptions, jitter, or bursty behavior + // in your playback or receiving devices. BufferModel *string `locationName:"bufferModel" type:"string" enum:"M2tsBufferModel"` // When set to enabled, generates captionServiceDescriptor in PMT. @@ -15257,7 +17572,7 @@ type M2tsSettings struct { SegmentationStyle *string `locationName:"segmentationStyle" type:"string" enum:"M2tsSegmentationStyle"` // The length in seconds of each segment. Required unless markers is set to - // None_. + // _none_. SegmentationTime *float64 `locationName:"segmentationTime" type:"double"` // When set to passthrough, timed metadata will be passed through from input @@ -15966,12 +18281,262 @@ func (s *Mp2Settings) SetSampleRate(v float64) *Mp2Settings { return s } +// Mpeg2 Filter Settings +type Mpeg2FilterSettings struct { + _ struct{} `type:"structure"` + + // Temporal Filter Settings + TemporalFilterSettings *TemporalFilterSettings `locationName:"temporalFilterSettings" type:"structure"` +} + +// String returns the string representation +func (s Mpeg2FilterSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Mpeg2FilterSettings) GoString() string { + return s.String() +} + +// SetTemporalFilterSettings sets the TemporalFilterSettings field's value. +func (s *Mpeg2FilterSettings) SetTemporalFilterSettings(v *TemporalFilterSettings) *Mpeg2FilterSettings { + s.TemporalFilterSettings = v + return s +} + +// Mpeg2 Settings +type Mpeg2Settings struct { + _ struct{} `type:"structure"` + + // Choose Off to disable adaptive quantization. Or choose another value to enable + // the quantizer and set its strength. The strengths are: Auto, Off, Low, Medium, + // High. When you enable this field, MediaLive allows intra-frame quantizers + // to vary, which might improve visual quality. + AdaptiveQuantization *string `locationName:"adaptiveQuantization" type:"string" enum:"Mpeg2AdaptiveQuantization"` + + // Indicates the AFD values that MediaLive will write into the video encode. + // If you do not know what AFD signaling is, or if your downstream system has + // not given you guidance, choose AUTO.AUTO: MediaLive will try to preserve + // the input AFD value (in cases where multiple AFD values are valid).FIXED: + // MediaLive will use the value you specify in fixedAFD. + AfdSignaling *string `locationName:"afdSignaling" type:"string" enum:"AfdSignaling"` + + // Specifies whether to include the color space metadata. The metadata describes + // the color space that applies to the video (the colorSpace field). We recommend + // that you insert the metadata. + ColorMetadata *string `locationName:"colorMetadata" type:"string" enum:"Mpeg2ColorMetadata"` + + // Choose the type of color space conversion to apply to the output. For detailed + // information on setting up both the input and the output to obtain the desired + // color space in the output, see the section on \"MediaLive Features - Video + // - color space\" in the MediaLive User Guide.PASSTHROUGH: Keep the color space + // of the input content - do not convert it.AUTO:Convert all content that is + // SD to rec 601, and convert all content that is HD to rec 709. + ColorSpace *string `locationName:"colorSpace" type:"string" enum:"Mpeg2ColorSpace"` + + // Sets the pixel aspect ratio for the encode. + DisplayAspectRatio *string `locationName:"displayAspectRatio" type:"string" enum:"Mpeg2DisplayRatio"` + + // Optionally specify a noise reduction filter, which can improve quality of + // compressed content. If you do not choose a filter, no filter will be applied.TEMPORAL: + // This filter is useful for both source content that is noisy (when it has + // excessive digital artifacts) and source content that is clean.When the content + // is noisy, the filter cleans up the source content before the encoding phase, + // with these two effects: First, it improves the output video quality because + // the content has been cleaned up. Secondly, it decreases the bandwidth because + // MediaLive does not waste bits on encoding noise.When the content is reasonably + // clean, the filter tends to decrease the bitrate. + FilterSettings *Mpeg2FilterSettings `locationName:"filterSettings" type:"structure"` + + // Complete this field only when afdSignaling is set to FIXED. Enter the AFD + // value (4 bits) to write on all frames of the video encode. + FixedAfd *string `locationName:"fixedAfd" type:"string" enum:"FixedAfd"` + + // description": "The framerate denominator. For example, 1001. The framerate + // is the numerator divided by the denominator. For example, 24000 / 1001 = + // 23.976 FPS. + // + // FramerateDenominator is a required field + FramerateDenominator *int64 `locationName:"framerateDenominator" min:"1" type:"integer" required:"true"` + + // The framerate numerator. For example, 24000. The framerate is the numerator + // divided by the denominator. For example, 24000 / 1001 = 23.976 FPS. + // + // FramerateNumerator is a required field + FramerateNumerator *int64 `locationName:"framerateNumerator" min:"1" type:"integer" required:"true"` + + // MPEG2: default is open GOP. + GopClosedCadence *int64 `locationName:"gopClosedCadence" type:"integer"` + + // Relates to the GOP structure. The number of B-frames between reference frames. + // If you do not know what a B-frame is, use the default. + GopNumBFrames *int64 `locationName:"gopNumBFrames" type:"integer"` + + // Relates to the GOP structure. The GOP size (keyframe interval) in the units + // specified in gopSizeUnits. If you do not know what GOP is, use the default.If + // gopSizeUnits is frames, then the gopSize must be an integer and must be greater + // than or equal to 1.If gopSizeUnits is seconds, the gopSize must be greater + // than 0, but does not need to be an integer. + GopSize *float64 `locationName:"gopSize" type:"double"` + + // Relates to the GOP structure. Specifies whether the gopSize is specified + // in frames or seconds. If you do not plan to change the default gopSize, leave + // the default. If you specify SECONDS, MediaLive will internally convert the + // gop size to a frame count. + GopSizeUnits *string `locationName:"gopSizeUnits" type:"string" enum:"Mpeg2GopSizeUnits"` + + // Set the scan type of the output to PROGRESSIVE or INTERLACED (top field first). + ScanType *string `locationName:"scanType" type:"string" enum:"Mpeg2ScanType"` + + // Relates to the GOP structure. If you do not know what GOP is, use the default.FIXED: + // Set the number of B-frames in each sub-GOP to the value in gopNumBFrames.DYNAMIC: + // Let MediaLive optimize the number of B-frames in each sub-GOP, to improve + // visual quality. + SubgopLength *string `locationName:"subgopLength" type:"string" enum:"Mpeg2SubGopLength"` + + // Determines how MediaLive inserts timecodes in the output video. For detailed + // information about setting up the input and the output for a timecode, see + // the section on \"MediaLive Features - Timecode configuration\" in the MediaLive + // User Guide.DISABLED: do not include timecodes.GOP_TIMECODE: Include timecode + // metadata in the GOP header. + TimecodeInsertion *string `locationName:"timecodeInsertion" type:"string" enum:"Mpeg2TimecodeInsertionBehavior"` +} + +// String returns the string representation +func (s Mpeg2Settings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Mpeg2Settings) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Mpeg2Settings) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Mpeg2Settings"} + if s.FramerateDenominator == nil { + invalidParams.Add(request.NewErrParamRequired("FramerateDenominator")) + } + if s.FramerateDenominator != nil && *s.FramerateDenominator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateDenominator", 1)) + } + if s.FramerateNumerator == nil { + invalidParams.Add(request.NewErrParamRequired("FramerateNumerator")) + } + if s.FramerateNumerator != nil && *s.FramerateNumerator < 1 { + invalidParams.Add(request.NewErrParamMinValue("FramerateNumerator", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAdaptiveQuantization sets the AdaptiveQuantization field's value. +func (s *Mpeg2Settings) SetAdaptiveQuantization(v string) *Mpeg2Settings { + s.AdaptiveQuantization = &v + return s +} + +// SetAfdSignaling sets the AfdSignaling field's value. +func (s *Mpeg2Settings) SetAfdSignaling(v string) *Mpeg2Settings { + s.AfdSignaling = &v + return s +} + +// SetColorMetadata sets the ColorMetadata field's value. +func (s *Mpeg2Settings) SetColorMetadata(v string) *Mpeg2Settings { + s.ColorMetadata = &v + return s +} + +// SetColorSpace sets the ColorSpace field's value. +func (s *Mpeg2Settings) SetColorSpace(v string) *Mpeg2Settings { + s.ColorSpace = &v + return s +} + +// SetDisplayAspectRatio sets the DisplayAspectRatio field's value. +func (s *Mpeg2Settings) SetDisplayAspectRatio(v string) *Mpeg2Settings { + s.DisplayAspectRatio = &v + return s +} + +// SetFilterSettings sets the FilterSettings field's value. +func (s *Mpeg2Settings) SetFilterSettings(v *Mpeg2FilterSettings) *Mpeg2Settings { + s.FilterSettings = v + return s +} + +// SetFixedAfd sets the FixedAfd field's value. +func (s *Mpeg2Settings) SetFixedAfd(v string) *Mpeg2Settings { + s.FixedAfd = &v + return s +} + +// SetFramerateDenominator sets the FramerateDenominator field's value. +func (s *Mpeg2Settings) SetFramerateDenominator(v int64) *Mpeg2Settings { + s.FramerateDenominator = &v + return s +} + +// SetFramerateNumerator sets the FramerateNumerator field's value. +func (s *Mpeg2Settings) SetFramerateNumerator(v int64) *Mpeg2Settings { + s.FramerateNumerator = &v + return s +} + +// SetGopClosedCadence sets the GopClosedCadence field's value. +func (s *Mpeg2Settings) SetGopClosedCadence(v int64) *Mpeg2Settings { + s.GopClosedCadence = &v + return s +} + +// SetGopNumBFrames sets the GopNumBFrames field's value. +func (s *Mpeg2Settings) SetGopNumBFrames(v int64) *Mpeg2Settings { + s.GopNumBFrames = &v + return s +} + +// SetGopSize sets the GopSize field's value. +func (s *Mpeg2Settings) SetGopSize(v float64) *Mpeg2Settings { + s.GopSize = &v + return s +} + +// SetGopSizeUnits sets the GopSizeUnits field's value. +func (s *Mpeg2Settings) SetGopSizeUnits(v string) *Mpeg2Settings { + s.GopSizeUnits = &v + return s +} + +// SetScanType sets the ScanType field's value. +func (s *Mpeg2Settings) SetScanType(v string) *Mpeg2Settings { + s.ScanType = &v + return s +} + +// SetSubgopLength sets the SubgopLength field's value. +func (s *Mpeg2Settings) SetSubgopLength(v string) *Mpeg2Settings { + s.SubgopLength = &v + return s +} + +// SetTimecodeInsertion sets the TimecodeInsertion field's value. +func (s *Mpeg2Settings) SetTimecodeInsertion(v string) *Mpeg2Settings { + s.TimecodeInsertion = &v + return s +} + // Ms Smooth Group Settings type MsSmoothGroupSettings struct { _ struct{} `type:"structure"` - // The value of the "Acquisition Point Identity" element used in each message - // placed in the sparse track. Only enabled if sparseTrackType is not "none". + // The ID to include in each message in the sparse track. Ignored if sparseTrackType + // is NONE. AcquisitionPointId *string `locationName:"acquisitionPointId" type:"string"` // If set to passthrough for an audio-only MS Smooth output, the fragment absolute @@ -16034,8 +18599,12 @@ type MsSmoothGroupSettings struct { // Number of milliseconds to delay the output from the second pipeline. SendDelayMs *int64 `locationName:"sendDelayMs" type:"integer"` - // If set to scte35, use incoming SCTE-35 messages to generate a sparse track - // in this group of MS-Smooth outputs. + // Identifies the type of data to place in the sparse track:- SCTE35: Insert + // SCTE-35 messages from the source content. With each message, insert an IDR + // frame to start a new segment.- SCTE35_WITHOUT_SEGMENTATION: Insert SCTE-35 + // messages from the source content. With each message, insert an IDR frame + // but don't start a new segment.- NONE: Don't generate a sparse track for any + // outputs in this output group. SparseTrackType *string `locationName:"sparseTrackType" type:"string" enum:"SmoothGroupSparseTrackType"` // When set to send, send stream manifest so publishing point doesn't start @@ -16447,6 +19016,12 @@ type MultiplexProgram struct { // The packet identifier map for this multiplex program. PacketIdentifiersMap *MultiplexProgramPacketIdentifiersMap `locationName:"packetIdentifiersMap" type:"structure"` + // Contains information about the current sources for the specified program + // in the specified multiplex. Keep in mind that each multiplex pipeline connects + // to both pipelines in a given source channel (the channel identified by the + // program). But only one of those channel pipelines is ever active at one time. + PipelineDetails []*MultiplexProgramPipelineDetail `locationName:"pipelineDetails" type:"list"` + // The name of the multiplex program. ProgramName *string `locationName:"programName" type:"string"` } @@ -16479,6 +19054,12 @@ func (s *MultiplexProgram) SetPacketIdentifiersMap(v *MultiplexProgramPacketIden return s } +// SetPipelineDetails sets the PipelineDetails field's value. +func (s *MultiplexProgram) SetPipelineDetails(v []*MultiplexProgramPipelineDetail) *MultiplexProgram { + s.PipelineDetails = v + return s +} + // SetProgramName sets the ProgramName field's value. func (s *MultiplexProgram) SetProgramName(v string) *MultiplexProgram { s.ProgramName = &v @@ -16658,6 +19239,40 @@ func (s *MultiplexProgramPacketIdentifiersMap) SetVideoPid(v int64) *MultiplexPr return s } +// The current source for one of the pipelines in the multiplex. +type MultiplexProgramPipelineDetail struct { + _ struct{} `type:"structure"` + + // Identifies the channel pipeline that is currently active for the pipeline + // (identified by PipelineId) in the multiplex. + ActiveChannelPipeline *string `locationName:"activeChannelPipeline" type:"string"` + + // Identifies a specific pipeline in the multiplex. + PipelineId *string `locationName:"pipelineId" type:"string"` +} + +// String returns the string representation +func (s MultiplexProgramPipelineDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultiplexProgramPipelineDetail) GoString() string { + return s.String() +} + +// SetActiveChannelPipeline sets the ActiveChannelPipeline field's value. +func (s *MultiplexProgramPipelineDetail) SetActiveChannelPipeline(v string) *MultiplexProgramPipelineDetail { + s.ActiveChannelPipeline = &v + return s +} + +// SetPipelineId sets the PipelineId field's value. +func (s *MultiplexProgramPipelineDetail) SetPipelineId(v string) *MultiplexProgramPipelineDetail { + s.PipelineId = &v + return s +} + // Transport stream service descriptor configuration for the Multiplex program. type MultiplexProgramServiceDescriptor struct { _ struct{} `type:"structure"` @@ -16824,7 +19439,7 @@ type MultiplexSettings struct { _ struct{} `type:"structure"` // Maximum video buffer delay in milliseconds. - MaximumVideoBufferDelayMilliseconds *int64 `locationName:"maximumVideoBufferDelayMilliseconds" min:"1000" type:"integer"` + MaximumVideoBufferDelayMilliseconds *int64 `locationName:"maximumVideoBufferDelayMilliseconds" min:"800" type:"integer"` // Transport stream bit rate. // @@ -16853,8 +19468,8 @@ func (s MultiplexSettings) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *MultiplexSettings) Validate() error { invalidParams := request.ErrInvalidParams{Context: "MultiplexSettings"} - if s.MaximumVideoBufferDelayMilliseconds != nil && *s.MaximumVideoBufferDelayMilliseconds < 1000 { - invalidParams.Add(request.NewErrParamMinValue("MaximumVideoBufferDelayMilliseconds", 1000)) + if s.MaximumVideoBufferDelayMilliseconds != nil && *s.MaximumVideoBufferDelayMilliseconds < 800 { + invalidParams.Add(request.NewErrParamMinValue("MaximumVideoBufferDelayMilliseconds", 800)) } if s.TransportStreamBitrate == nil { invalidParams.Add(request.NewErrParamRequired("TransportStreamBitrate")) @@ -16929,6 +19544,13 @@ type MultiplexStatmuxVideoSettings struct { // Minimum statmux bitrate. MinimumBitrate *int64 `locationName:"minimumBitrate" min:"100000" type:"integer"` + + // The purpose of the priority is to use a combination of the\nmultiplex rate + // control algorithm and the QVBR capability of the\nencoder to prioritize the + // video quality of some channels in a\nmultiplex over others. Channels that + // have a higher priority will\nget higher video quality at the expense of the + // video quality of\nother channels in the multiplex with lower priority. + Priority *int64 `locationName:"priority" type:"integer"` } // String returns the string representation @@ -16950,6 +19572,9 @@ func (s *MultiplexStatmuxVideoSettings) Validate() error { if s.MinimumBitrate != nil && *s.MinimumBitrate < 100000 { invalidParams.Add(request.NewErrParamMinValue("MinimumBitrate", 100000)) } + if s.Priority != nil && *s.Priority < -5 { + invalidParams.Add(request.NewErrParamMinValue("Priority", -5)) + } if invalidParams.Len() > 0 { return invalidParams @@ -16969,6 +19594,12 @@ func (s *MultiplexStatmuxVideoSettings) SetMinimumBitrate(v int64) *MultiplexSta return s } +// SetPriority sets the Priority field's value. +func (s *MultiplexStatmuxVideoSettings) SetPriority(v int64) *MultiplexStatmuxVideoSettings { + s.Priority = &v + return s +} + type MultiplexSummary struct { _ struct{} `type:"structure"` @@ -17190,8 +19821,8 @@ func (s *NielsenConfiguration) SetNielsenPcmToId3Tagging(v string) *NielsenConfi } type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17208,17 +19839,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17226,22 +19857,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Reserved resources available for purchase @@ -18169,6 +20800,21 @@ func (s *PurchaseOfferingOutput) SetReservation(v *Reservation) *PurchaseOfferin return s } +// Raw Settings +type RawSettings struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RawSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawSettings) GoString() string { + return s.String() +} + // Rec601 Settings type Rec601Settings struct { _ struct{} `type:"structure"` @@ -18812,6 +21458,9 @@ type ScheduleActionSettings struct { // Action to insert HLS metadata HlsTimedMetadataSettings *HlsTimedMetadataScheduleActionSettings `locationName:"hlsTimedMetadataSettings" type:"structure"` + // Action to prepare an input for a future immediate input switch + InputPrepareSettings *InputPrepareScheduleActionSettings `locationName:"inputPrepareSettings" type:"structure"` + // Action to switch the input InputSwitchSettings *InputSwitchScheduleActionSettings `locationName:"inputSwitchSettings" type:"structure"` @@ -18857,6 +21506,11 @@ func (s *ScheduleActionSettings) Validate() error { invalidParams.AddNested("HlsTimedMetadataSettings", err.(request.ErrInvalidParams)) } } + if s.InputPrepareSettings != nil { + if err := s.InputPrepareSettings.Validate(); err != nil { + invalidParams.AddNested("InputPrepareSettings", err.(request.ErrInvalidParams)) + } + } if s.InputSwitchSettings != nil { if err := s.InputSwitchSettings.Validate(); err != nil { invalidParams.AddNested("InputSwitchSettings", err.(request.ErrInvalidParams)) @@ -18906,6 +21560,12 @@ func (s *ScheduleActionSettings) SetHlsTimedMetadataSettings(v *HlsTimedMetadata return s } +// SetInputPrepareSettings sets the InputPrepareSettings field's value. +func (s *ScheduleActionSettings) SetInputPrepareSettings(v *InputPrepareScheduleActionSettings) *ScheduleActionSettings { + s.InputPrepareSettings = v + return s +} + // SetInputSwitchSettings sets the InputSwitchSettings field's value. func (s *ScheduleActionSettings) SetInputSwitchSettings(v *InputSwitchScheduleActionSettings) *ScheduleActionSettings { s.InputSwitchSettings = v @@ -19829,6 +22489,8 @@ type StartChannelOutput struct { Arn *string `locationName:"arn" type:"string"` + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // A standard channel has two encoding pipelines and a single pipeline channel // only has one. ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` @@ -19878,6 +22540,12 @@ func (s *StartChannelOutput) SetArn(v string) *StartChannelOutput { return s } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *StartChannelOutput) SetCdiInputSpecification(v *CdiInputSpecification) *StartChannelOutput { + s.CdiInputSpecification = v + return s +} + // SetChannelClass sets the ChannelClass field's value. func (s *StartChannelOutput) SetChannelClass(v string) *StartChannelOutput { s.ChannelClass = &v @@ -20408,6 +23076,8 @@ type StopChannelOutput struct { Arn *string `locationName:"arn" type:"string"` + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // A standard channel has two encoding pipelines and a single pipeline channel // only has one. ChannelClass *string `locationName:"channelClass" type:"string" enum:"ChannelClass"` @@ -20457,6 +23127,12 @@ func (s *StopChannelOutput) SetArn(v string) *StopChannelOutput { return s } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *StopChannelOutput) SetCdiInputSpecification(v *CdiInputSpecification) *StopChannelOutput { + s.CdiInputSpecification = v + return s +} + // SetChannelClass sets the ChannelClass field's value. func (s *StopChannelOutput) SetChannelClass(v string) *StopChannelOutput { s.ChannelClass = &v @@ -20739,18 +23415,55 @@ type TeletextSourceSettings struct { } // String returns the string representation -func (s TeletextSourceSettings) String() string { +func (s TeletextSourceSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TeletextSourceSettings) GoString() string { + return s.String() +} + +// SetPageNumber sets the PageNumber field's value. +func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings { + s.PageNumber = &v + return s +} + +// Temporal Filter Settings +type TemporalFilterSettings struct { + _ struct{} `type:"structure"` + + // If you enable this filter, the results are the following:- If the source + // content is noisy (it contains excessive digital artifacts), the filter cleans + // up the source.- If the source content is already clean, the filter tends + // to decrease the bitrate, especially when the rate control mode is QVBR. + PostFilterSharpening *string `locationName:"postFilterSharpening" type:"string" enum:"TemporalFilterPostFilterSharpening"` + + // Choose a filter strength. We recommend a strength of 1 or 2. A higher strength + // might take out good information, resulting in an image that is overly soft. + Strength *string `locationName:"strength" type:"string" enum:"TemporalFilterStrength"` +} + +// String returns the string representation +func (s TemporalFilterSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TeletextSourceSettings) GoString() string { +func (s TemporalFilterSettings) GoString() string { return s.String() } -// SetPageNumber sets the PageNumber field's value. -func (s *TeletextSourceSettings) SetPageNumber(v string) *TeletextSourceSettings { - s.PageNumber = &v +// SetPostFilterSharpening sets the PostFilterSharpening field's value. +func (s *TemporalFilterSettings) SetPostFilterSharpening(v string) *TemporalFilterSettings { + s.PostFilterSharpening = &v + return s +} + +// SetStrength sets the Strength field's value. +func (s *TemporalFilterSettings) SetStrength(v string) *TemporalFilterSettings { + s.Strength = &v return s } @@ -20814,8 +23527,8 @@ func (s *TimecodeConfig) SetSyncThreshold(v int64) *TimecodeConfig { } type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -20832,17 +23545,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20850,22 +23563,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Ttml Destination Settings @@ -21067,8 +23780,8 @@ func (s *UdpOutputSettings) SetFecOutputSettings(v *FecOutputSettings) *UdpOutpu } type UnprocessableEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -21087,17 +23800,17 @@ func (s UnprocessableEntityException) GoString() string { func newErrorUnprocessableEntityException(v protocol.ResponseMetadata) error { return &UnprocessableEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnprocessableEntityException) Code() string { +func (s *UnprocessableEntityException) Code() string { return "UnprocessableEntityException" } // Message returns the exception's message. -func (s UnprocessableEntityException) Message() string { +func (s *UnprocessableEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -21105,22 +23818,22 @@ func (s UnprocessableEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnprocessableEntityException) OrigErr() error { +func (s *UnprocessableEntityException) OrigErr() error { return nil } -func (s UnprocessableEntityException) Error() string { +func (s *UnprocessableEntityException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnprocessableEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnprocessableEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnprocessableEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnprocessableEntityException) RequestID() string { + return s.RespMetadata.RequestID } type UpdateChannelClassInput struct { @@ -21220,6 +23933,8 @@ func (s *UpdateChannelClassOutput) SetChannel(v *Channel) *UpdateChannelClassOut type UpdateChannelInput struct { _ struct{} `type:"structure"` + CdiInputSpecification *CdiInputSpecification `locationName:"cdiInputSpecification" type:"structure"` + // ChannelId is a required field ChannelId *string `location:"uri" locationName:"channelId" type:"string" required:"true"` @@ -21291,6 +24006,12 @@ func (s *UpdateChannelInput) Validate() error { return nil } +// SetCdiInputSpecification sets the CdiInputSpecification field's value. +func (s *UpdateChannelInput) SetCdiInputSpecification(v *CdiInputSpecification) *UpdateChannelInput { + s.CdiInputSpecification = v + return s +} + // SetChannelId sets the ChannelId field's value. func (s *UpdateChannelInput) SetChannelId(v string) *UpdateChannelInput { s.ChannelId = &v @@ -21361,11 +24082,174 @@ func (s *UpdateChannelOutput) SetChannel(v *Channel) *UpdateChannelOutput { return s } +type UpdateInputDeviceInput struct { + _ struct{} `type:"structure"` + + // Configurable settings for the input device. + HdDeviceSettings *InputDeviceConfigurableSettings `locationName:"hdDeviceSettings" type:"structure"` + + // InputDeviceId is a required field + InputDeviceId *string `location:"uri" locationName:"inputDeviceId" type:"string" required:"true"` + + Name *string `locationName:"name" type:"string"` +} + +// String returns the string representation +func (s UpdateInputDeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInputDeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateInputDeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateInputDeviceInput"} + if s.InputDeviceId == nil { + invalidParams.Add(request.NewErrParamRequired("InputDeviceId")) + } + if s.InputDeviceId != nil && len(*s.InputDeviceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputDeviceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHdDeviceSettings sets the HdDeviceSettings field's value. +func (s *UpdateInputDeviceInput) SetHdDeviceSettings(v *InputDeviceConfigurableSettings) *UpdateInputDeviceInput { + s.HdDeviceSettings = v + return s +} + +// SetInputDeviceId sets the InputDeviceId field's value. +func (s *UpdateInputDeviceInput) SetInputDeviceId(v string) *UpdateInputDeviceInput { + s.InputDeviceId = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateInputDeviceInput) SetName(v string) *UpdateInputDeviceInput { + s.Name = &v + return s +} + +type UpdateInputDeviceOutput struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + // The state of the connection between the input device and AWS. + ConnectionState *string `locationName:"connectionState" type:"string" enum:"InputDeviceConnectionState"` + + // The status of the action to synchronize the device configuration. If you + // change the configuration of the input device (for example, the maximum bitrate), + // MediaLive sends the new data to the device. The device might not update itself + // immediately. SYNCED means the device has updated its configuration. SYNCING + // means that it has not updated its configuration. + DeviceSettingsSyncState *string `locationName:"deviceSettingsSyncState" type:"string" enum:"DeviceSettingsSyncState"` + + // Settings that describe the active source from the input device, and the video + // characteristics of that source. + HdDeviceSettings *InputDeviceHdSettings `locationName:"hdDeviceSettings" type:"structure"` + + Id *string `locationName:"id" type:"string"` + + MacAddress *string `locationName:"macAddress" type:"string"` + + Name *string `locationName:"name" type:"string"` + + // The network settings for the input device. + NetworkSettings *InputDeviceNetworkSettings `locationName:"networkSettings" type:"structure"` + + SerialNumber *string `locationName:"serialNumber" type:"string"` + + // The type of the input device. For an AWS Elemental Link device that outputs + // resolutions up to 1080, choose "HD". + Type *string `locationName:"type" type:"string" enum:"InputDeviceType"` +} + +// String returns the string representation +func (s UpdateInputDeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateInputDeviceOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UpdateInputDeviceOutput) SetArn(v string) *UpdateInputDeviceOutput { + s.Arn = &v + return s +} + +// SetConnectionState sets the ConnectionState field's value. +func (s *UpdateInputDeviceOutput) SetConnectionState(v string) *UpdateInputDeviceOutput { + s.ConnectionState = &v + return s +} + +// SetDeviceSettingsSyncState sets the DeviceSettingsSyncState field's value. +func (s *UpdateInputDeviceOutput) SetDeviceSettingsSyncState(v string) *UpdateInputDeviceOutput { + s.DeviceSettingsSyncState = &v + return s +} + +// SetHdDeviceSettings sets the HdDeviceSettings field's value. +func (s *UpdateInputDeviceOutput) SetHdDeviceSettings(v *InputDeviceHdSettings) *UpdateInputDeviceOutput { + s.HdDeviceSettings = v + return s +} + +// SetId sets the Id field's value. +func (s *UpdateInputDeviceOutput) SetId(v string) *UpdateInputDeviceOutput { + s.Id = &v + return s +} + +// SetMacAddress sets the MacAddress field's value. +func (s *UpdateInputDeviceOutput) SetMacAddress(v string) *UpdateInputDeviceOutput { + s.MacAddress = &v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateInputDeviceOutput) SetName(v string) *UpdateInputDeviceOutput { + s.Name = &v + return s +} + +// SetNetworkSettings sets the NetworkSettings field's value. +func (s *UpdateInputDeviceOutput) SetNetworkSettings(v *InputDeviceNetworkSettings) *UpdateInputDeviceOutput { + s.NetworkSettings = v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *UpdateInputDeviceOutput) SetSerialNumber(v string) *UpdateInputDeviceOutput { + s.SerialNumber = &v + return s +} + +// SetType sets the Type field's value. +func (s *UpdateInputDeviceOutput) SetType(v string) *UpdateInputDeviceOutput { + s.Type = &v + return s +} + type UpdateInputInput struct { _ struct{} `type:"structure"` Destinations []*InputDestinationRequest `locationName:"destinations" type:"list"` + InputDevices []*InputDeviceRequest `locationName:"inputDevices" type:"list"` + // InputId is a required field InputId *string `location:"uri" locationName:"inputId" type:"string" required:"true"` @@ -21412,6 +24296,12 @@ func (s *UpdateInputInput) SetDestinations(v []*InputDestinationRequest) *Update return s } +// SetInputDevices sets the InputDevices field's value. +func (s *UpdateInputInput) SetInputDevices(v []*InputDeviceRequest) *UpdateInputInput { + s.InputDevices = v + return s +} + // SetInputId sets the InputId field's value. func (s *UpdateInputInput) SetInputId(v string) *UpdateInputInput { s.InputId = &v @@ -21796,8 +24686,10 @@ func (s *UpdateReservationOutput) SetReservation(v *Reservation) *UpdateReservat type ValidationError struct { _ struct{} `type:"structure"` + // Path to the source of the error. ElementPath *string `locationName:"elementPath" type:"string"` + // The error message. ErrorMessage *string `locationName:"errorMessage" type:"string"` } @@ -21835,6 +24727,9 @@ type VideoCodecSettings struct { // H265 Settings H265Settings *H265Settings `locationName:"h265Settings" type:"structure"` + + // Mpeg2 Settings + Mpeg2Settings *Mpeg2Settings `locationName:"mpeg2Settings" type:"structure"` } // String returns the string representation @@ -21865,6 +24760,11 @@ func (s *VideoCodecSettings) Validate() error { invalidParams.AddNested("H265Settings", err.(request.ErrInvalidParams)) } } + if s.Mpeg2Settings != nil { + if err := s.Mpeg2Settings.Validate(); err != nil { + invalidParams.AddNested("Mpeg2Settings", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -21890,6 +24790,12 @@ func (s *VideoCodecSettings) SetH265Settings(v *H265Settings) *VideoCodecSetting return s } +// SetMpeg2Settings sets the Mpeg2Settings field's value. +func (s *VideoCodecSettings) SetMpeg2Settings(v *Mpeg2Settings) *VideoCodecSettings { + s.Mpeg2Settings = v + return s +} + // Video settings for this stream. type VideoDescription struct { _ struct{} `type:"structure"` @@ -21910,17 +24816,20 @@ type VideoDescription struct { // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` - // Indicates how to respond to the AFD values in the input stream. RESPOND causes - // input video to be clipped, depending on the AFD value, input display aspect - // ratio, and output display aspect ratio, and (except for FRAMECAPTURE codec) - // includes the values in the output. PASSTHROUGH (does not apply to FRAMECAPTURE - // codec) ignores the AFD values and includes the values in the output, so input - // video is not clipped. NONE ignores the AFD values and does not include the - // values through to the output, so input video is not clipped. + // Indicates how MediaLive will respond to the AFD values that might be in the + // input video. If you do not know what AFD signaling is, or if your downstream + // system has not given you guidance, choose PASSTHROUGH.RESPOND: MediaLive + // clips the input video using a formula that uses the AFD values (configured + // in afdSignaling ), the input display aspect ratio, and the output display + // aspect ratio. MediaLive also includes the AFD values in the output, unless + // the codec for this encode is FRAME_CAPTURE.PASSTHROUGH: MediaLive ignores + // the AFD values and does not clip the video. But MediaLive does include the + // values in the output.NONE: MediaLive does not clip the input video and does + // not include the AFD values in the output RespondToAfd *string `locationName:"respondToAfd" type:"string" enum:"VideoDescriptionRespondToAfd"` - // STRETCHTOOUTPUT configures the output position to stretch the video to the - // specified output resolution (height and width). This option will override + // STRETCH_TO_OUTPUT configures the output position to stretch the video to + // the specified output resolution (height and width). This option will override // any position value. DEFAULT may insert black boxes (pillar boxes or letter // boxes) around the video to provide the specified output resolution. ScalingBehavior *string `locationName:"scalingBehavior" type:"string" enum:"VideoDescriptionScalingBehavior"` @@ -22142,6 +25051,49 @@ func (s *VideoSelectorSettings) SetVideoSelectorProgramId(v *VideoSelectorProgra return s } +// Wav Settings +type WavSettings struct { + _ struct{} `type:"structure"` + + // Bits per sample. + BitDepth *float64 `locationName:"bitDepth" type:"double"` + + // The audio coding mode for the WAV audio. The mode determines the number of + // channels in the audio. + CodingMode *string `locationName:"codingMode" type:"string" enum:"WavCodingMode"` + + // Sample rate in Hz. + SampleRate *float64 `locationName:"sampleRate" type:"double"` +} + +// String returns the string representation +func (s WavSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WavSettings) GoString() string { + return s.String() +} + +// SetBitDepth sets the BitDepth field's value. +func (s *WavSettings) SetBitDepth(v float64) *WavSettings { + s.BitDepth = &v + return s +} + +// SetCodingMode sets the CodingMode field's value. +func (s *WavSettings) SetCodingMode(v string) *WavSettings { + s.CodingMode = &v + return s +} + +// SetSampleRate sets the SampleRate field's value. +func (s *WavSettings) SetSampleRate(v float64) *WavSettings { + s.SampleRate = &v + return s +} + // Webvtt Destination Settings type WebvttDestinationSettings struct { _ struct{} `type:"structure"` @@ -22175,6 +25127,17 @@ const ( AacCodingModeCodingMode51 = "CODING_MODE_5_1" ) +// AacCodingMode_Values returns all elements of the AacCodingMode enum +func AacCodingMode_Values() []string { + return []string{ + AacCodingModeAdReceiverMix, + AacCodingModeCodingMode10, + AacCodingModeCodingMode11, + AacCodingModeCodingMode20, + AacCodingModeCodingMode51, + } +} + // Aac Input Type const ( // AacInputTypeBroadcasterMixedAd is a AacInputType enum value @@ -22184,6 +25147,14 @@ const ( AacInputTypeNormal = "NORMAL" ) +// AacInputType_Values returns all elements of the AacInputType enum +func AacInputType_Values() []string { + return []string{ + AacInputTypeBroadcasterMixedAd, + AacInputTypeNormal, + } +} + // Aac Profile const ( // AacProfileHev1 is a AacProfile enum value @@ -22196,6 +25167,15 @@ const ( AacProfileLc = "LC" ) +// AacProfile_Values returns all elements of the AacProfile enum +func AacProfile_Values() []string { + return []string{ + AacProfileHev1, + AacProfileHev2, + AacProfileLc, + } +} + // Aac Rate Control Mode const ( // AacRateControlModeCbr is a AacRateControlMode enum value @@ -22205,6 +25185,14 @@ const ( AacRateControlModeVbr = "VBR" ) +// AacRateControlMode_Values returns all elements of the AacRateControlMode enum +func AacRateControlMode_Values() []string { + return []string{ + AacRateControlModeCbr, + AacRateControlModeVbr, + } +} + // Aac Raw Format const ( // AacRawFormatLatmLoas is a AacRawFormat enum value @@ -22214,6 +25202,14 @@ const ( AacRawFormatNone = "NONE" ) +// AacRawFormat_Values returns all elements of the AacRawFormat enum +func AacRawFormat_Values() []string { + return []string{ + AacRawFormatLatmLoas, + AacRawFormatNone, + } +} + // Aac Spec const ( // AacSpecMpeg2 is a AacSpec enum value @@ -22223,6 +25219,14 @@ const ( AacSpecMpeg4 = "MPEG4" ) +// AacSpec_Values returns all elements of the AacSpec enum +func AacSpec_Values() []string { + return []string{ + AacSpecMpeg2, + AacSpecMpeg4, + } +} + // Aac Vbr Quality const ( // AacVbrQualityHigh is a AacVbrQuality enum value @@ -22238,6 +25242,16 @@ const ( AacVbrQualityMediumLow = "MEDIUM_LOW" ) +// AacVbrQuality_Values returns all elements of the AacVbrQuality enum +func AacVbrQuality_Values() []string { + return []string{ + AacVbrQualityHigh, + AacVbrQualityLow, + AacVbrQualityMediumHigh, + AacVbrQualityMediumLow, + } +} + // Ac3 Bitstream Mode const ( // Ac3BitstreamModeCommentary is a Ac3BitstreamMode enum value @@ -22265,6 +25279,20 @@ const ( Ac3BitstreamModeVoiceOver = "VOICE_OVER" ) +// Ac3BitstreamMode_Values returns all elements of the Ac3BitstreamMode enum +func Ac3BitstreamMode_Values() []string { + return []string{ + Ac3BitstreamModeCommentary, + Ac3BitstreamModeCompleteMain, + Ac3BitstreamModeDialogue, + Ac3BitstreamModeEmergency, + Ac3BitstreamModeHearingImpaired, + Ac3BitstreamModeMusicAndEffects, + Ac3BitstreamModeVisuallyImpaired, + Ac3BitstreamModeVoiceOver, + } +} + // Ac3 Coding Mode const ( // Ac3CodingModeCodingMode10 is a Ac3CodingMode enum value @@ -22280,6 +25308,16 @@ const ( Ac3CodingModeCodingMode32Lfe = "CODING_MODE_3_2_LFE" ) +// Ac3CodingMode_Values returns all elements of the Ac3CodingMode enum +func Ac3CodingMode_Values() []string { + return []string{ + Ac3CodingModeCodingMode10, + Ac3CodingModeCodingMode11, + Ac3CodingModeCodingMode20, + Ac3CodingModeCodingMode32Lfe, + } +} + // Ac3 Drc Profile const ( // Ac3DrcProfileFilmStandard is a Ac3DrcProfile enum value @@ -22289,6 +25327,14 @@ const ( Ac3DrcProfileNone = "NONE" ) +// Ac3DrcProfile_Values returns all elements of the Ac3DrcProfile enum +func Ac3DrcProfile_Values() []string { + return []string{ + Ac3DrcProfileFilmStandard, + Ac3DrcProfileNone, + } +} + // Ac3 Lfe Filter const ( // Ac3LfeFilterDisabled is a Ac3LfeFilter enum value @@ -22298,6 +25344,14 @@ const ( Ac3LfeFilterEnabled = "ENABLED" ) +// Ac3LfeFilter_Values returns all elements of the Ac3LfeFilter enum +func Ac3LfeFilter_Values() []string { + return []string{ + Ac3LfeFilterDisabled, + Ac3LfeFilterEnabled, + } +} + // Ac3 Metadata Control const ( // Ac3MetadataControlFollowInput is a Ac3MetadataControl enum value @@ -22307,6 +25361,26 @@ const ( Ac3MetadataControlUseConfigured = "USE_CONFIGURED" ) +// Ac3MetadataControl_Values returns all elements of the Ac3MetadataControl enum +func Ac3MetadataControl_Values() []string { + return []string{ + Ac3MetadataControlFollowInput, + Ac3MetadataControlUseConfigured, + } +} + +const ( + // AcceptHeaderImageJpeg is a AcceptHeader enum value + AcceptHeaderImageJpeg = "image/jpeg" +) + +// AcceptHeader_Values returns all elements of the AcceptHeader enum +func AcceptHeader_Values() []string { + return []string{ + AcceptHeaderImageJpeg, + } +} + // Afd Signaling const ( // AfdSignalingAuto is a AfdSignaling enum value @@ -22319,6 +25393,15 @@ const ( AfdSignalingNone = "NONE" ) +// AfdSignaling_Values returns all elements of the AfdSignaling enum +func AfdSignaling_Values() []string { + return []string{ + AfdSignalingAuto, + AfdSignalingFixed, + AfdSignalingNone, + } +} + // Audio Description Audio Type Control const ( // AudioDescriptionAudioTypeControlFollowInput is a AudioDescriptionAudioTypeControl enum value @@ -22328,6 +25411,14 @@ const ( AudioDescriptionAudioTypeControlUseConfigured = "USE_CONFIGURED" ) +// AudioDescriptionAudioTypeControl_Values returns all elements of the AudioDescriptionAudioTypeControl enum +func AudioDescriptionAudioTypeControl_Values() []string { + return []string{ + AudioDescriptionAudioTypeControlFollowInput, + AudioDescriptionAudioTypeControlUseConfigured, + } +} + // Audio Description Language Code Control const ( // AudioDescriptionLanguageCodeControlFollowInput is a AudioDescriptionLanguageCodeControl enum value @@ -22337,6 +25428,14 @@ const ( AudioDescriptionLanguageCodeControlUseConfigured = "USE_CONFIGURED" ) +// AudioDescriptionLanguageCodeControl_Values returns all elements of the AudioDescriptionLanguageCodeControl enum +func AudioDescriptionLanguageCodeControl_Values() []string { + return []string{ + AudioDescriptionLanguageCodeControlFollowInput, + AudioDescriptionLanguageCodeControlUseConfigured, + } +} + // Audio Language Selection Policy const ( // AudioLanguageSelectionPolicyLoose is a AudioLanguageSelectionPolicy enum value @@ -22346,6 +25445,14 @@ const ( AudioLanguageSelectionPolicyStrict = "STRICT" ) +// AudioLanguageSelectionPolicy_Values returns all elements of the AudioLanguageSelectionPolicy enum +func AudioLanguageSelectionPolicy_Values() []string { + return []string{ + AudioLanguageSelectionPolicyLoose, + AudioLanguageSelectionPolicyStrict, + } +} + // Audio Normalization Algorithm const ( // AudioNormalizationAlgorithmItu17701 is a AudioNormalizationAlgorithm enum value @@ -22355,12 +25462,27 @@ const ( AudioNormalizationAlgorithmItu17702 = "ITU_1770_2" ) +// AudioNormalizationAlgorithm_Values returns all elements of the AudioNormalizationAlgorithm enum +func AudioNormalizationAlgorithm_Values() []string { + return []string{ + AudioNormalizationAlgorithmItu17701, + AudioNormalizationAlgorithmItu17702, + } +} + // Audio Normalization Algorithm Control const ( // AudioNormalizationAlgorithmControlCorrectAudio is a AudioNormalizationAlgorithmControl enum value AudioNormalizationAlgorithmControlCorrectAudio = "CORRECT_AUDIO" ) +// AudioNormalizationAlgorithmControl_Values returns all elements of the AudioNormalizationAlgorithmControl enum +func AudioNormalizationAlgorithmControl_Values() []string { + return []string{ + AudioNormalizationAlgorithmControlCorrectAudio, + } +} + // Audio Only Hls Segment Type const ( // AudioOnlyHlsSegmentTypeAac is a AudioOnlyHlsSegmentType enum value @@ -22370,6 +25492,14 @@ const ( AudioOnlyHlsSegmentTypeFmp4 = "FMP4" ) +// AudioOnlyHlsSegmentType_Values returns all elements of the AudioOnlyHlsSegmentType enum +func AudioOnlyHlsSegmentType_Values() []string { + return []string{ + AudioOnlyHlsSegmentTypeAac, + AudioOnlyHlsSegmentTypeFmp4, + } +} + // Audio Only Hls Track Type const ( // AudioOnlyHlsTrackTypeAlternateAudioAutoSelect is a AudioOnlyHlsTrackType enum value @@ -22385,6 +25515,16 @@ const ( AudioOnlyHlsTrackTypeAudioOnlyVariantStream = "AUDIO_ONLY_VARIANT_STREAM" ) +// AudioOnlyHlsTrackType_Values returns all elements of the AudioOnlyHlsTrackType enum +func AudioOnlyHlsTrackType_Values() []string { + return []string{ + AudioOnlyHlsTrackTypeAlternateAudioAutoSelect, + AudioOnlyHlsTrackTypeAlternateAudioAutoSelectDefault, + AudioOnlyHlsTrackTypeAlternateAudioNotAutoSelect, + AudioOnlyHlsTrackTypeAudioOnlyVariantStream, + } +} + // Audio Type const ( // AudioTypeCleanEffects is a AudioType enum value @@ -22400,6 +25540,16 @@ const ( AudioTypeVisualImpairedCommentary = "VISUAL_IMPAIRED_COMMENTARY" ) +// AudioType_Values returns all elements of the AudioType enum +func AudioType_Values() []string { + return []string{ + AudioTypeCleanEffects, + AudioTypeHearingImpaired, + AudioTypeUndefined, + AudioTypeVisualImpairedCommentary, + } +} + // Authentication Scheme const ( // AuthenticationSchemeAkamai is a AuthenticationScheme enum value @@ -22409,6 +25559,14 @@ const ( AuthenticationSchemeCommon = "COMMON" ) +// AuthenticationScheme_Values returns all elements of the AuthenticationScheme enum +func AuthenticationScheme_Values() []string { + return []string{ + AuthenticationSchemeAkamai, + AuthenticationSchemeCommon, + } +} + // Avail Blanking State const ( // AvailBlankingStateDisabled is a AvailBlankingState enum value @@ -22418,6 +25576,14 @@ const ( AvailBlankingStateEnabled = "ENABLED" ) +// AvailBlankingState_Values returns all elements of the AvailBlankingState enum +func AvailBlankingState_Values() []string { + return []string{ + AvailBlankingStateDisabled, + AvailBlankingStateEnabled, + } +} + // Blackout Slate Network End Blackout const ( // BlackoutSlateNetworkEndBlackoutDisabled is a BlackoutSlateNetworkEndBlackout enum value @@ -22427,6 +25593,14 @@ const ( BlackoutSlateNetworkEndBlackoutEnabled = "ENABLED" ) +// BlackoutSlateNetworkEndBlackout_Values returns all elements of the BlackoutSlateNetworkEndBlackout enum +func BlackoutSlateNetworkEndBlackout_Values() []string { + return []string{ + BlackoutSlateNetworkEndBlackoutDisabled, + BlackoutSlateNetworkEndBlackoutEnabled, + } +} + // Blackout Slate State const ( // BlackoutSlateStateDisabled is a BlackoutSlateState enum value @@ -22436,6 +25610,14 @@ const ( BlackoutSlateStateEnabled = "ENABLED" ) +// BlackoutSlateState_Values returns all elements of the BlackoutSlateState enum +func BlackoutSlateState_Values() []string { + return []string{ + BlackoutSlateStateDisabled, + BlackoutSlateStateEnabled, + } +} + // Burn In Alignment const ( // BurnInAlignmentCentered is a BurnInAlignment enum value @@ -22448,6 +25630,15 @@ const ( BurnInAlignmentSmart = "SMART" ) +// BurnInAlignment_Values returns all elements of the BurnInAlignment enum +func BurnInAlignment_Values() []string { + return []string{ + BurnInAlignmentCentered, + BurnInAlignmentLeft, + BurnInAlignmentSmart, + } +} + // Burn In Background Color const ( // BurnInBackgroundColorBlack is a BurnInBackgroundColor enum value @@ -22460,6 +25651,15 @@ const ( BurnInBackgroundColorWhite = "WHITE" ) +// BurnInBackgroundColor_Values returns all elements of the BurnInBackgroundColor enum +func BurnInBackgroundColor_Values() []string { + return []string{ + BurnInBackgroundColorBlack, + BurnInBackgroundColorNone, + BurnInBackgroundColorWhite, + } +} + // Burn In Font Color const ( // BurnInFontColorBlack is a BurnInFontColor enum value @@ -22481,6 +25681,18 @@ const ( BurnInFontColorYellow = "YELLOW" ) +// BurnInFontColor_Values returns all elements of the BurnInFontColor enum +func BurnInFontColor_Values() []string { + return []string{ + BurnInFontColorBlack, + BurnInFontColorBlue, + BurnInFontColorGreen, + BurnInFontColorRed, + BurnInFontColorWhite, + BurnInFontColorYellow, + } +} + // Burn In Outline Color const ( // BurnInOutlineColorBlack is a BurnInOutlineColor enum value @@ -22502,6 +25714,18 @@ const ( BurnInOutlineColorYellow = "YELLOW" ) +// BurnInOutlineColor_Values returns all elements of the BurnInOutlineColor enum +func BurnInOutlineColor_Values() []string { + return []string{ + BurnInOutlineColorBlack, + BurnInOutlineColorBlue, + BurnInOutlineColorGreen, + BurnInOutlineColorRed, + BurnInOutlineColorWhite, + BurnInOutlineColorYellow, + } +} + // Burn In Shadow Color const ( // BurnInShadowColorBlack is a BurnInShadowColor enum value @@ -22514,15 +25738,59 @@ const ( BurnInShadowColorWhite = "WHITE" ) +// BurnInShadowColor_Values returns all elements of the BurnInShadowColor enum +func BurnInShadowColor_Values() []string { + return []string{ + BurnInShadowColorBlack, + BurnInShadowColorNone, + BurnInShadowColorWhite, + } +} + // Burn In Teletext Grid Control const ( // BurnInTeletextGridControlFixed is a BurnInTeletextGridControl enum value BurnInTeletextGridControlFixed = "FIXED" - // BurnInTeletextGridControlScaled is a BurnInTeletextGridControl enum value - BurnInTeletextGridControlScaled = "SCALED" + // BurnInTeletextGridControlScaled is a BurnInTeletextGridControl enum value + BurnInTeletextGridControlScaled = "SCALED" +) + +// BurnInTeletextGridControl_Values returns all elements of the BurnInTeletextGridControl enum +func BurnInTeletextGridControl_Values() []string { + return []string{ + BurnInTeletextGridControlFixed, + BurnInTeletextGridControlScaled, + } +} + +// Maximum CDI input resolution; SD is 480i and 576i up to 30 frames-per-second +// (fps), HD is 720p up to 60 fps / 1080i up to 30 fps, FHD is 1080p up to 60 +// fps, UHD is 2160p up to 60 fps +const ( + // CdiInputResolutionSd is a CdiInputResolution enum value + CdiInputResolutionSd = "SD" + + // CdiInputResolutionHd is a CdiInputResolution enum value + CdiInputResolutionHd = "HD" + + // CdiInputResolutionFhd is a CdiInputResolution enum value + CdiInputResolutionFhd = "FHD" + + // CdiInputResolutionUhd is a CdiInputResolution enum value + CdiInputResolutionUhd = "UHD" ) +// CdiInputResolution_Values returns all elements of the CdiInputResolution enum +func CdiInputResolution_Values() []string { + return []string{ + CdiInputResolutionSd, + CdiInputResolutionHd, + CdiInputResolutionFhd, + CdiInputResolutionUhd, + } +} + // A standard channel has two encoding pipelines and a single pipeline channel // only has one. const ( @@ -22533,6 +25801,14 @@ const ( ChannelClassSinglePipeline = "SINGLE_PIPELINE" ) +// ChannelClass_Values returns all elements of the ChannelClass enum +func ChannelClass_Values() []string { + return []string{ + ChannelClassStandard, + ChannelClassSinglePipeline, + } +} + const ( // ChannelStateCreating is a ChannelState enum value ChannelStateCreating = "CREATING" @@ -22568,6 +25844,56 @@ const ( ChannelStateUpdateFailed = "UPDATE_FAILED" ) +// ChannelState_Values returns all elements of the ChannelState enum +func ChannelState_Values() []string { + return []string{ + ChannelStateCreating, + ChannelStateCreateFailed, + ChannelStateIdle, + ChannelStateStarting, + ChannelStateRunning, + ChannelStateRecovering, + ChannelStateStopping, + ChannelStateDeleting, + ChannelStateDeleted, + ChannelStateUpdating, + ChannelStateUpdateFailed, + } +} + +const ( + // ContentTypeImageJpeg is a ContentType enum value + ContentTypeImageJpeg = "image/jpeg" +) + +// ContentType_Values returns all elements of the ContentType enum +func ContentType_Values() []string { + return []string{ + ContentTypeImageJpeg, + } +} + +// The status of the action to synchronize the device configuration. If you +// change the configuration of the input device (for example, the maximum bitrate), +// MediaLive sends the new data to the device. The device might not update itself +// immediately. SYNCED means the device has updated its configuration. SYNCING +// means that it has not updated its configuration. +const ( + // DeviceSettingsSyncStateSynced is a DeviceSettingsSyncState enum value + DeviceSettingsSyncStateSynced = "SYNCED" + + // DeviceSettingsSyncStateSyncing is a DeviceSettingsSyncState enum value + DeviceSettingsSyncStateSyncing = "SYNCING" +) + +// DeviceSettingsSyncState_Values returns all elements of the DeviceSettingsSyncState enum +func DeviceSettingsSyncState_Values() []string { + return []string{ + DeviceSettingsSyncStateSynced, + DeviceSettingsSyncStateSyncing, + } +} + // Dvb Sdt Output Sdt const ( // DvbSdtOutputSdtSdtFollow is a DvbSdtOutputSdt enum value @@ -22583,6 +25909,16 @@ const ( DvbSdtOutputSdtSdtNone = "SDT_NONE" ) +// DvbSdtOutputSdt_Values returns all elements of the DvbSdtOutputSdt enum +func DvbSdtOutputSdt_Values() []string { + return []string{ + DvbSdtOutputSdtSdtFollow, + DvbSdtOutputSdtSdtFollowIfPresent, + DvbSdtOutputSdtSdtManual, + DvbSdtOutputSdtSdtNone, + } +} + // Dvb Sub Destination Alignment const ( // DvbSubDestinationAlignmentCentered is a DvbSubDestinationAlignment enum value @@ -22595,6 +25931,15 @@ const ( DvbSubDestinationAlignmentSmart = "SMART" ) +// DvbSubDestinationAlignment_Values returns all elements of the DvbSubDestinationAlignment enum +func DvbSubDestinationAlignment_Values() []string { + return []string{ + DvbSubDestinationAlignmentCentered, + DvbSubDestinationAlignmentLeft, + DvbSubDestinationAlignmentSmart, + } +} + // Dvb Sub Destination Background Color const ( // DvbSubDestinationBackgroundColorBlack is a DvbSubDestinationBackgroundColor enum value @@ -22607,6 +25952,15 @@ const ( DvbSubDestinationBackgroundColorWhite = "WHITE" ) +// DvbSubDestinationBackgroundColor_Values returns all elements of the DvbSubDestinationBackgroundColor enum +func DvbSubDestinationBackgroundColor_Values() []string { + return []string{ + DvbSubDestinationBackgroundColorBlack, + DvbSubDestinationBackgroundColorNone, + DvbSubDestinationBackgroundColorWhite, + } +} + // Dvb Sub Destination Font Color const ( // DvbSubDestinationFontColorBlack is a DvbSubDestinationFontColor enum value @@ -22628,6 +25982,18 @@ const ( DvbSubDestinationFontColorYellow = "YELLOW" ) +// DvbSubDestinationFontColor_Values returns all elements of the DvbSubDestinationFontColor enum +func DvbSubDestinationFontColor_Values() []string { + return []string{ + DvbSubDestinationFontColorBlack, + DvbSubDestinationFontColorBlue, + DvbSubDestinationFontColorGreen, + DvbSubDestinationFontColorRed, + DvbSubDestinationFontColorWhite, + DvbSubDestinationFontColorYellow, + } +} + // Dvb Sub Destination Outline Color const ( // DvbSubDestinationOutlineColorBlack is a DvbSubDestinationOutlineColor enum value @@ -22649,6 +26015,18 @@ const ( DvbSubDestinationOutlineColorYellow = "YELLOW" ) +// DvbSubDestinationOutlineColor_Values returns all elements of the DvbSubDestinationOutlineColor enum +func DvbSubDestinationOutlineColor_Values() []string { + return []string{ + DvbSubDestinationOutlineColorBlack, + DvbSubDestinationOutlineColorBlue, + DvbSubDestinationOutlineColorGreen, + DvbSubDestinationOutlineColorRed, + DvbSubDestinationOutlineColorWhite, + DvbSubDestinationOutlineColorYellow, + } +} + // Dvb Sub Destination Shadow Color const ( // DvbSubDestinationShadowColorBlack is a DvbSubDestinationShadowColor enum value @@ -22661,6 +26039,15 @@ const ( DvbSubDestinationShadowColorWhite = "WHITE" ) +// DvbSubDestinationShadowColor_Values returns all elements of the DvbSubDestinationShadowColor enum +func DvbSubDestinationShadowColor_Values() []string { + return []string{ + DvbSubDestinationShadowColorBlack, + DvbSubDestinationShadowColorNone, + DvbSubDestinationShadowColorWhite, + } +} + // Dvb Sub Destination Teletext Grid Control const ( // DvbSubDestinationTeletextGridControlFixed is a DvbSubDestinationTeletextGridControl enum value @@ -22670,6 +26057,14 @@ const ( DvbSubDestinationTeletextGridControlScaled = "SCALED" ) +// DvbSubDestinationTeletextGridControl_Values returns all elements of the DvbSubDestinationTeletextGridControl enum +func DvbSubDestinationTeletextGridControl_Values() []string { + return []string{ + DvbSubDestinationTeletextGridControlFixed, + DvbSubDestinationTeletextGridControlScaled, + } +} + // Eac3 Attenuation Control const ( // Eac3AttenuationControlAttenuate3Db is a Eac3AttenuationControl enum value @@ -22679,6 +26074,14 @@ const ( Eac3AttenuationControlNone = "NONE" ) +// Eac3AttenuationControl_Values returns all elements of the Eac3AttenuationControl enum +func Eac3AttenuationControl_Values() []string { + return []string{ + Eac3AttenuationControlAttenuate3Db, + Eac3AttenuationControlNone, + } +} + // Eac3 Bitstream Mode const ( // Eac3BitstreamModeCommentary is a Eac3BitstreamMode enum value @@ -22697,6 +26100,17 @@ const ( Eac3BitstreamModeVisuallyImpaired = "VISUALLY_IMPAIRED" ) +// Eac3BitstreamMode_Values returns all elements of the Eac3BitstreamMode enum +func Eac3BitstreamMode_Values() []string { + return []string{ + Eac3BitstreamModeCommentary, + Eac3BitstreamModeCompleteMain, + Eac3BitstreamModeEmergency, + Eac3BitstreamModeHearingImpaired, + Eac3BitstreamModeVisuallyImpaired, + } +} + // Eac3 Coding Mode const ( // Eac3CodingModeCodingMode10 is a Eac3CodingMode enum value @@ -22709,6 +26123,15 @@ const ( Eac3CodingModeCodingMode32 = "CODING_MODE_3_2" ) +// Eac3CodingMode_Values returns all elements of the Eac3CodingMode enum +func Eac3CodingMode_Values() []string { + return []string{ + Eac3CodingModeCodingMode10, + Eac3CodingModeCodingMode20, + Eac3CodingModeCodingMode32, + } +} + // Eac3 Dc Filter const ( // Eac3DcFilterDisabled is a Eac3DcFilter enum value @@ -22718,6 +26141,14 @@ const ( Eac3DcFilterEnabled = "ENABLED" ) +// Eac3DcFilter_Values returns all elements of the Eac3DcFilter enum +func Eac3DcFilter_Values() []string { + return []string{ + Eac3DcFilterDisabled, + Eac3DcFilterEnabled, + } +} + // Eac3 Drc Line const ( // Eac3DrcLineFilmLight is a Eac3DrcLine enum value @@ -22739,6 +26170,18 @@ const ( Eac3DrcLineSpeech = "SPEECH" ) +// Eac3DrcLine_Values returns all elements of the Eac3DrcLine enum +func Eac3DrcLine_Values() []string { + return []string{ + Eac3DrcLineFilmLight, + Eac3DrcLineFilmStandard, + Eac3DrcLineMusicLight, + Eac3DrcLineMusicStandard, + Eac3DrcLineNone, + Eac3DrcLineSpeech, + } +} + // Eac3 Drc Rf const ( // Eac3DrcRfFilmLight is a Eac3DrcRf enum value @@ -22760,6 +26203,18 @@ const ( Eac3DrcRfSpeech = "SPEECH" ) +// Eac3DrcRf_Values returns all elements of the Eac3DrcRf enum +func Eac3DrcRf_Values() []string { + return []string{ + Eac3DrcRfFilmLight, + Eac3DrcRfFilmStandard, + Eac3DrcRfMusicLight, + Eac3DrcRfMusicStandard, + Eac3DrcRfNone, + Eac3DrcRfSpeech, + } +} + // Eac3 Lfe Control const ( // Eac3LfeControlLfe is a Eac3LfeControl enum value @@ -22769,6 +26224,14 @@ const ( Eac3LfeControlNoLfe = "NO_LFE" ) +// Eac3LfeControl_Values returns all elements of the Eac3LfeControl enum +func Eac3LfeControl_Values() []string { + return []string{ + Eac3LfeControlLfe, + Eac3LfeControlNoLfe, + } +} + // Eac3 Lfe Filter const ( // Eac3LfeFilterDisabled is a Eac3LfeFilter enum value @@ -22778,6 +26241,14 @@ const ( Eac3LfeFilterEnabled = "ENABLED" ) +// Eac3LfeFilter_Values returns all elements of the Eac3LfeFilter enum +func Eac3LfeFilter_Values() []string { + return []string{ + Eac3LfeFilterDisabled, + Eac3LfeFilterEnabled, + } +} + // Eac3 Metadata Control const ( // Eac3MetadataControlFollowInput is a Eac3MetadataControl enum value @@ -22787,6 +26258,14 @@ const ( Eac3MetadataControlUseConfigured = "USE_CONFIGURED" ) +// Eac3MetadataControl_Values returns all elements of the Eac3MetadataControl enum +func Eac3MetadataControl_Values() []string { + return []string{ + Eac3MetadataControlFollowInput, + Eac3MetadataControlUseConfigured, + } +} + // Eac3 Passthrough Control const ( // Eac3PassthroughControlNoPassthrough is a Eac3PassthroughControl enum value @@ -22796,6 +26275,14 @@ const ( Eac3PassthroughControlWhenPossible = "WHEN_POSSIBLE" ) +// Eac3PassthroughControl_Values returns all elements of the Eac3PassthroughControl enum +func Eac3PassthroughControl_Values() []string { + return []string{ + Eac3PassthroughControlNoPassthrough, + Eac3PassthroughControlWhenPossible, + } +} + // Eac3 Phase Control const ( // Eac3PhaseControlNoShift is a Eac3PhaseControl enum value @@ -22805,6 +26292,14 @@ const ( Eac3PhaseControlShift90Degrees = "SHIFT_90_DEGREES" ) +// Eac3PhaseControl_Values returns all elements of the Eac3PhaseControl enum +func Eac3PhaseControl_Values() []string { + return []string{ + Eac3PhaseControlNoShift, + Eac3PhaseControlShift90Degrees, + } +} + // Eac3 Stereo Downmix const ( // Eac3StereoDownmixDpl2 is a Eac3StereoDownmix enum value @@ -22820,6 +26315,16 @@ const ( Eac3StereoDownmixNotIndicated = "NOT_INDICATED" ) +// Eac3StereoDownmix_Values returns all elements of the Eac3StereoDownmix enum +func Eac3StereoDownmix_Values() []string { + return []string{ + Eac3StereoDownmixDpl2, + Eac3StereoDownmixLoRo, + Eac3StereoDownmixLtRt, + Eac3StereoDownmixNotIndicated, + } +} + // Eac3 Surround Ex Mode const ( // Eac3SurroundExModeDisabled is a Eac3SurroundExMode enum value @@ -22832,6 +26337,15 @@ const ( Eac3SurroundExModeNotIndicated = "NOT_INDICATED" ) +// Eac3SurroundExMode_Values returns all elements of the Eac3SurroundExMode enum +func Eac3SurroundExMode_Values() []string { + return []string{ + Eac3SurroundExModeDisabled, + Eac3SurroundExModeEnabled, + Eac3SurroundExModeNotIndicated, + } +} + // Eac3 Surround Mode const ( // Eac3SurroundModeDisabled is a Eac3SurroundMode enum value @@ -22844,6 +26358,49 @@ const ( Eac3SurroundModeNotIndicated = "NOT_INDICATED" ) +// Eac3SurroundMode_Values returns all elements of the Eac3SurroundMode enum +func Eac3SurroundMode_Values() []string { + return []string{ + Eac3SurroundModeDisabled, + Eac3SurroundModeEnabled, + Eac3SurroundModeNotIndicated, + } +} + +// Ebu Tt DDestination Style Control +const ( + // EbuTtDDestinationStyleControlExclude is a EbuTtDDestinationStyleControl enum value + EbuTtDDestinationStyleControlExclude = "EXCLUDE" + + // EbuTtDDestinationStyleControlInclude is a EbuTtDDestinationStyleControl enum value + EbuTtDDestinationStyleControlInclude = "INCLUDE" +) + +// EbuTtDDestinationStyleControl_Values returns all elements of the EbuTtDDestinationStyleControl enum +func EbuTtDDestinationStyleControl_Values() []string { + return []string{ + EbuTtDDestinationStyleControlExclude, + EbuTtDDestinationStyleControlInclude, + } +} + +// Ebu Tt DFill Line Gap Control +const ( + // EbuTtDFillLineGapControlDisabled is a EbuTtDFillLineGapControl enum value + EbuTtDFillLineGapControlDisabled = "DISABLED" + + // EbuTtDFillLineGapControlEnabled is a EbuTtDFillLineGapControl enum value + EbuTtDFillLineGapControlEnabled = "ENABLED" +) + +// EbuTtDFillLineGapControl_Values returns all elements of the EbuTtDFillLineGapControl enum +func EbuTtDFillLineGapControl_Values() []string { + return []string{ + EbuTtDFillLineGapControlDisabled, + EbuTtDFillLineGapControlEnabled, + } +} + // Embedded Convert608 To708 const ( // EmbeddedConvert608To708Disabled is a EmbeddedConvert608To708 enum value @@ -22853,6 +26410,14 @@ const ( EmbeddedConvert608To708Upconvert = "UPCONVERT" ) +// EmbeddedConvert608To708_Values returns all elements of the EmbeddedConvert608To708 enum +func EmbeddedConvert608To708_Values() []string { + return []string{ + EmbeddedConvert608To708Disabled, + EmbeddedConvert608To708Upconvert, + } +} + // Embedded Scte20 Detection const ( // EmbeddedScte20DetectionAuto is a EmbeddedScte20Detection enum value @@ -22862,6 +26427,31 @@ const ( EmbeddedScte20DetectionOff = "OFF" ) +// EmbeddedScte20Detection_Values returns all elements of the EmbeddedScte20Detection enum +func EmbeddedScte20Detection_Values() []string { + return []string{ + EmbeddedScte20DetectionAuto, + EmbeddedScte20DetectionOff, + } +} + +// Feature Activations Input Prepare Schedule Actions +const ( + // FeatureActivationsInputPrepareScheduleActionsDisabled is a FeatureActivationsInputPrepareScheduleActions enum value + FeatureActivationsInputPrepareScheduleActionsDisabled = "DISABLED" + + // FeatureActivationsInputPrepareScheduleActionsEnabled is a FeatureActivationsInputPrepareScheduleActions enum value + FeatureActivationsInputPrepareScheduleActionsEnabled = "ENABLED" +) + +// FeatureActivationsInputPrepareScheduleActions_Values returns all elements of the FeatureActivationsInputPrepareScheduleActions enum +func FeatureActivationsInputPrepareScheduleActions_Values() []string { + return []string{ + FeatureActivationsInputPrepareScheduleActionsDisabled, + FeatureActivationsInputPrepareScheduleActionsEnabled, + } +} + // Fec Output Include Fec const ( // FecOutputIncludeFecColumn is a FecOutputIncludeFec enum value @@ -22871,6 +26461,14 @@ const ( FecOutputIncludeFecColumnAndRow = "COLUMN_AND_ROW" ) +// FecOutputIncludeFec_Values returns all elements of the FecOutputIncludeFec enum +func FecOutputIncludeFec_Values() []string { + return []string{ + FecOutputIncludeFecColumn, + FecOutputIncludeFecColumnAndRow, + } +} + // Fixed Afd const ( // FixedAfdAfd0000 is a FixedAfd enum value @@ -22907,6 +26505,57 @@ const ( FixedAfdAfd1111 = "AFD_1111" ) +// FixedAfd_Values returns all elements of the FixedAfd enum +func FixedAfd_Values() []string { + return []string{ + FixedAfdAfd0000, + FixedAfdAfd0010, + FixedAfdAfd0011, + FixedAfdAfd0100, + FixedAfdAfd1000, + FixedAfdAfd1001, + FixedAfdAfd1010, + FixedAfdAfd1011, + FixedAfdAfd1101, + FixedAfdAfd1110, + FixedAfdAfd1111, + } +} + +// Fmp4 Nielsen Id3 Behavior +const ( + // Fmp4NielsenId3BehaviorNoPassthrough is a Fmp4NielsenId3Behavior enum value + Fmp4NielsenId3BehaviorNoPassthrough = "NO_PASSTHROUGH" + + // Fmp4NielsenId3BehaviorPassthrough is a Fmp4NielsenId3Behavior enum value + Fmp4NielsenId3BehaviorPassthrough = "PASSTHROUGH" +) + +// Fmp4NielsenId3Behavior_Values returns all elements of the Fmp4NielsenId3Behavior enum +func Fmp4NielsenId3Behavior_Values() []string { + return []string{ + Fmp4NielsenId3BehaviorNoPassthrough, + Fmp4NielsenId3BehaviorPassthrough, + } +} + +// Fmp4 Timed Metadata Behavior +const ( + // Fmp4TimedMetadataBehaviorNoPassthrough is a Fmp4TimedMetadataBehavior enum value + Fmp4TimedMetadataBehaviorNoPassthrough = "NO_PASSTHROUGH" + + // Fmp4TimedMetadataBehaviorPassthrough is a Fmp4TimedMetadataBehavior enum value + Fmp4TimedMetadataBehaviorPassthrough = "PASSTHROUGH" +) + +// Fmp4TimedMetadataBehavior_Values returns all elements of the Fmp4TimedMetadataBehavior enum +func Fmp4TimedMetadataBehavior_Values() []string { + return []string{ + Fmp4TimedMetadataBehaviorNoPassthrough, + Fmp4TimedMetadataBehaviorPassthrough, + } +} + // Follow reference point. const ( // FollowPointEnd is a FollowPoint enum value @@ -22916,6 +26565,14 @@ const ( FollowPointStart = "START" ) +// FollowPoint_Values returns all elements of the FollowPoint enum +func FollowPoint_Values() []string { + return []string{ + FollowPointEnd, + FollowPointStart, + } +} + // Frame Capture Interval Unit const ( // FrameCaptureIntervalUnitMilliseconds is a FrameCaptureIntervalUnit enum value @@ -22925,6 +26582,14 @@ const ( FrameCaptureIntervalUnitSeconds = "SECONDS" ) +// FrameCaptureIntervalUnit_Values returns all elements of the FrameCaptureIntervalUnit enum +func FrameCaptureIntervalUnit_Values() []string { + return []string{ + FrameCaptureIntervalUnitMilliseconds, + FrameCaptureIntervalUnitSeconds, + } +} + // Global Configuration Input End Action const ( // GlobalConfigurationInputEndActionNone is a GlobalConfigurationInputEndAction enum value @@ -22934,6 +26599,14 @@ const ( GlobalConfigurationInputEndActionSwitchAndLoopInputs = "SWITCH_AND_LOOP_INPUTS" ) +// GlobalConfigurationInputEndAction_Values returns all elements of the GlobalConfigurationInputEndAction enum +func GlobalConfigurationInputEndAction_Values() []string { + return []string{ + GlobalConfigurationInputEndActionNone, + GlobalConfigurationInputEndActionSwitchAndLoopInputs, + } +} + // Global Configuration Low Framerate Inputs const ( // GlobalConfigurationLowFramerateInputsDisabled is a GlobalConfigurationLowFramerateInputs enum value @@ -22943,6 +26616,14 @@ const ( GlobalConfigurationLowFramerateInputsEnabled = "ENABLED" ) +// GlobalConfigurationLowFramerateInputs_Values returns all elements of the GlobalConfigurationLowFramerateInputs enum +func GlobalConfigurationLowFramerateInputs_Values() []string { + return []string{ + GlobalConfigurationLowFramerateInputsDisabled, + GlobalConfigurationLowFramerateInputsEnabled, + } +} + // Global Configuration Output Locking Mode const ( // GlobalConfigurationOutputLockingModeEpochLocking is a GlobalConfigurationOutputLockingMode enum value @@ -22952,6 +26633,14 @@ const ( GlobalConfigurationOutputLockingModePipelineLocking = "PIPELINE_LOCKING" ) +// GlobalConfigurationOutputLockingMode_Values returns all elements of the GlobalConfigurationOutputLockingMode enum +func GlobalConfigurationOutputLockingMode_Values() []string { + return []string{ + GlobalConfigurationOutputLockingModeEpochLocking, + GlobalConfigurationOutputLockingModePipelineLocking, + } +} + // Global Configuration Output Timing Source const ( // GlobalConfigurationOutputTimingSourceInputClock is a GlobalConfigurationOutputTimingSource enum value @@ -22961,6 +26650,14 @@ const ( GlobalConfigurationOutputTimingSourceSystemClock = "SYSTEM_CLOCK" ) +// GlobalConfigurationOutputTimingSource_Values returns all elements of the GlobalConfigurationOutputTimingSource enum +func GlobalConfigurationOutputTimingSource_Values() []string { + return []string{ + GlobalConfigurationOutputTimingSourceInputClock, + GlobalConfigurationOutputTimingSourceSystemClock, + } +} + // H264 Adaptive Quantization const ( // H264AdaptiveQuantizationHigh is a H264AdaptiveQuantization enum value @@ -22982,6 +26679,18 @@ const ( H264AdaptiveQuantizationOff = "OFF" ) +// H264AdaptiveQuantization_Values returns all elements of the H264AdaptiveQuantization enum +func H264AdaptiveQuantization_Values() []string { + return []string{ + H264AdaptiveQuantizationHigh, + H264AdaptiveQuantizationHigher, + H264AdaptiveQuantizationLow, + H264AdaptiveQuantizationMax, + H264AdaptiveQuantizationMedium, + H264AdaptiveQuantizationOff, + } +} + // H264 Color Metadata const ( // H264ColorMetadataIgnore is a H264ColorMetadata enum value @@ -22991,6 +26700,14 @@ const ( H264ColorMetadataInsert = "INSERT" ) +// H264ColorMetadata_Values returns all elements of the H264ColorMetadata enum +func H264ColorMetadata_Values() []string { + return []string{ + H264ColorMetadataIgnore, + H264ColorMetadataInsert, + } +} + // H264 Entropy Encoding const ( // H264EntropyEncodingCabac is a H264EntropyEncoding enum value @@ -23000,6 +26717,14 @@ const ( H264EntropyEncodingCavlc = "CAVLC" ) +// H264EntropyEncoding_Values returns all elements of the H264EntropyEncoding enum +func H264EntropyEncoding_Values() []string { + return []string{ + H264EntropyEncodingCabac, + H264EntropyEncodingCavlc, + } +} + // H264 Flicker Aq const ( // H264FlickerAqDisabled is a H264FlickerAq enum value @@ -23009,6 +26734,31 @@ const ( H264FlickerAqEnabled = "ENABLED" ) +// H264FlickerAq_Values returns all elements of the H264FlickerAq enum +func H264FlickerAq_Values() []string { + return []string{ + H264FlickerAqDisabled, + H264FlickerAqEnabled, + } +} + +// H264 Force Field Pictures +const ( + // H264ForceFieldPicturesDisabled is a H264ForceFieldPictures enum value + H264ForceFieldPicturesDisabled = "DISABLED" + + // H264ForceFieldPicturesEnabled is a H264ForceFieldPictures enum value + H264ForceFieldPicturesEnabled = "ENABLED" +) + +// H264ForceFieldPictures_Values returns all elements of the H264ForceFieldPictures enum +func H264ForceFieldPictures_Values() []string { + return []string{ + H264ForceFieldPicturesDisabled, + H264ForceFieldPicturesEnabled, + } +} + // H264 Framerate Control const ( // H264FramerateControlInitializeFromSource is a H264FramerateControl enum value @@ -23018,6 +26768,14 @@ const ( H264FramerateControlSpecified = "SPECIFIED" ) +// H264FramerateControl_Values returns all elements of the H264FramerateControl enum +func H264FramerateControl_Values() []string { + return []string{ + H264FramerateControlInitializeFromSource, + H264FramerateControlSpecified, + } +} + // H264 Gop BReference const ( // H264GopBReferenceDisabled is a H264GopBReference enum value @@ -23027,6 +26785,14 @@ const ( H264GopBReferenceEnabled = "ENABLED" ) +// H264GopBReference_Values returns all elements of the H264GopBReference enum +func H264GopBReference_Values() []string { + return []string{ + H264GopBReferenceDisabled, + H264GopBReferenceEnabled, + } +} + // H264 Gop Size Units const ( // H264GopSizeUnitsFrames is a H264GopSizeUnits enum value @@ -23036,6 +26802,14 @@ const ( H264GopSizeUnitsSeconds = "SECONDS" ) +// H264GopSizeUnits_Values returns all elements of the H264GopSizeUnits enum +func H264GopSizeUnits_Values() []string { + return []string{ + H264GopSizeUnitsFrames, + H264GopSizeUnitsSeconds, + } +} + // H264 Level const ( // H264LevelH264Level1 is a H264Level enum value @@ -23090,6 +26864,29 @@ const ( H264LevelH264LevelAuto = "H264_LEVEL_AUTO" ) +// H264Level_Values returns all elements of the H264Level enum +func H264Level_Values() []string { + return []string{ + H264LevelH264Level1, + H264LevelH264Level11, + H264LevelH264Level12, + H264LevelH264Level13, + H264LevelH264Level2, + H264LevelH264Level21, + H264LevelH264Level22, + H264LevelH264Level3, + H264LevelH264Level31, + H264LevelH264Level32, + H264LevelH264Level4, + H264LevelH264Level41, + H264LevelH264Level42, + H264LevelH264Level5, + H264LevelH264Level51, + H264LevelH264Level52, + H264LevelH264LevelAuto, + } +} + // H264 Look Ahead Rate Control const ( // H264LookAheadRateControlHigh is a H264LookAheadRateControl enum value @@ -23102,6 +26899,15 @@ const ( H264LookAheadRateControlMedium = "MEDIUM" ) +// H264LookAheadRateControl_Values returns all elements of the H264LookAheadRateControl enum +func H264LookAheadRateControl_Values() []string { + return []string{ + H264LookAheadRateControlHigh, + H264LookAheadRateControlLow, + H264LookAheadRateControlMedium, + } +} + // H264 Par Control const ( // H264ParControlInitializeFromSource is a H264ParControl enum value @@ -23111,6 +26917,14 @@ const ( H264ParControlSpecified = "SPECIFIED" ) +// H264ParControl_Values returns all elements of the H264ParControl enum +func H264ParControl_Values() []string { + return []string{ + H264ParControlInitializeFromSource, + H264ParControlSpecified, + } +} + // H264 Profile const ( // H264ProfileBaseline is a H264Profile enum value @@ -23132,6 +26946,35 @@ const ( H264ProfileMain = "MAIN" ) +// H264Profile_Values returns all elements of the H264Profile enum +func H264Profile_Values() []string { + return []string{ + H264ProfileBaseline, + H264ProfileHigh, + H264ProfileHigh10bit, + H264ProfileHigh422, + H264ProfileHigh42210bit, + H264ProfileMain, + } +} + +// H264 Quality Level +const ( + // H264QualityLevelEnhancedQuality is a H264QualityLevel enum value + H264QualityLevelEnhancedQuality = "ENHANCED_QUALITY" + + // H264QualityLevelStandardQuality is a H264QualityLevel enum value + H264QualityLevelStandardQuality = "STANDARD_QUALITY" +) + +// H264QualityLevel_Values returns all elements of the H264QualityLevel enum +func H264QualityLevel_Values() []string { + return []string{ + H264QualityLevelEnhancedQuality, + H264QualityLevelStandardQuality, + } +} + // H264 Rate Control Mode const ( // H264RateControlModeCbr is a H264RateControlMode enum value @@ -23147,6 +26990,16 @@ const ( H264RateControlModeVbr = "VBR" ) +// H264RateControlMode_Values returns all elements of the H264RateControlMode enum +func H264RateControlMode_Values() []string { + return []string{ + H264RateControlModeCbr, + H264RateControlModeMultiplex, + H264RateControlModeQvbr, + H264RateControlModeVbr, + } +} + // H264 Scan Type const ( // H264ScanTypeInterlaced is a H264ScanType enum value @@ -23156,6 +27009,14 @@ const ( H264ScanTypeProgressive = "PROGRESSIVE" ) +// H264ScanType_Values returns all elements of the H264ScanType enum +func H264ScanType_Values() []string { + return []string{ + H264ScanTypeInterlaced, + H264ScanTypeProgressive, + } +} + // H264 Scene Change Detect const ( // H264SceneChangeDetectDisabled is a H264SceneChangeDetect enum value @@ -23165,6 +27026,14 @@ const ( H264SceneChangeDetectEnabled = "ENABLED" ) +// H264SceneChangeDetect_Values returns all elements of the H264SceneChangeDetect enum +func H264SceneChangeDetect_Values() []string { + return []string{ + H264SceneChangeDetectDisabled, + H264SceneChangeDetectEnabled, + } +} + // H264 Spatial Aq const ( // H264SpatialAqDisabled is a H264SpatialAq enum value @@ -23174,6 +27043,14 @@ const ( H264SpatialAqEnabled = "ENABLED" ) +// H264SpatialAq_Values returns all elements of the H264SpatialAq enum +func H264SpatialAq_Values() []string { + return []string{ + H264SpatialAqDisabled, + H264SpatialAqEnabled, + } +} + // H264 Sub Gop Length const ( // H264SubGopLengthDynamic is a H264SubGopLength enum value @@ -23183,6 +27060,14 @@ const ( H264SubGopLengthFixed = "FIXED" ) +// H264SubGopLength_Values returns all elements of the H264SubGopLength enum +func H264SubGopLength_Values() []string { + return []string{ + H264SubGopLengthDynamic, + H264SubGopLengthFixed, + } +} + // H264 Syntax const ( // H264SyntaxDefault is a H264Syntax enum value @@ -23192,6 +27077,14 @@ const ( H264SyntaxRp2027 = "RP2027" ) +// H264Syntax_Values returns all elements of the H264Syntax enum +func H264Syntax_Values() []string { + return []string{ + H264SyntaxDefault, + H264SyntaxRp2027, + } +} + // H264 Temporal Aq const ( // H264TemporalAqDisabled is a H264TemporalAq enum value @@ -23201,6 +27094,14 @@ const ( H264TemporalAqEnabled = "ENABLED" ) +// H264TemporalAq_Values returns all elements of the H264TemporalAq enum +func H264TemporalAq_Values() []string { + return []string{ + H264TemporalAqDisabled, + H264TemporalAqEnabled, + } +} + // H264 Timecode Insertion Behavior const ( // H264TimecodeInsertionBehaviorDisabled is a H264TimecodeInsertionBehavior enum value @@ -23210,6 +27111,14 @@ const ( H264TimecodeInsertionBehaviorPicTimingSei = "PIC_TIMING_SEI" ) +// H264TimecodeInsertionBehavior_Values returns all elements of the H264TimecodeInsertionBehavior enum +func H264TimecodeInsertionBehavior_Values() []string { + return []string{ + H264TimecodeInsertionBehaviorDisabled, + H264TimecodeInsertionBehaviorPicTimingSei, + } +} + // H265 Adaptive Quantization const ( // H265AdaptiveQuantizationHigh is a H265AdaptiveQuantization enum value @@ -23231,6 +27140,18 @@ const ( H265AdaptiveQuantizationOff = "OFF" ) +// H265AdaptiveQuantization_Values returns all elements of the H265AdaptiveQuantization enum +func H265AdaptiveQuantization_Values() []string { + return []string{ + H265AdaptiveQuantizationHigh, + H265AdaptiveQuantizationHigher, + H265AdaptiveQuantizationLow, + H265AdaptiveQuantizationMax, + H265AdaptiveQuantizationMedium, + H265AdaptiveQuantizationOff, + } +} + // H265 Alternative Transfer Function const ( // H265AlternativeTransferFunctionInsert is a H265AlternativeTransferFunction enum value @@ -23240,6 +27161,14 @@ const ( H265AlternativeTransferFunctionOmit = "OMIT" ) +// H265AlternativeTransferFunction_Values returns all elements of the H265AlternativeTransferFunction enum +func H265AlternativeTransferFunction_Values() []string { + return []string{ + H265AlternativeTransferFunctionInsert, + H265AlternativeTransferFunctionOmit, + } +} + // H265 Color Metadata const ( // H265ColorMetadataIgnore is a H265ColorMetadata enum value @@ -23249,6 +27178,14 @@ const ( H265ColorMetadataInsert = "INSERT" ) +// H265ColorMetadata_Values returns all elements of the H265ColorMetadata enum +func H265ColorMetadata_Values() []string { + return []string{ + H265ColorMetadataIgnore, + H265ColorMetadataInsert, + } +} + // H265 Flicker Aq const ( // H265FlickerAqDisabled is a H265FlickerAq enum value @@ -23258,6 +27195,14 @@ const ( H265FlickerAqEnabled = "ENABLED" ) +// H265FlickerAq_Values returns all elements of the H265FlickerAq enum +func H265FlickerAq_Values() []string { + return []string{ + H265FlickerAqDisabled, + H265FlickerAqEnabled, + } +} + // H265 Gop Size Units const ( // H265GopSizeUnitsFrames is a H265GopSizeUnits enum value @@ -23267,6 +27212,14 @@ const ( H265GopSizeUnitsSeconds = "SECONDS" ) +// H265GopSizeUnits_Values returns all elements of the H265GopSizeUnits enum +func H265GopSizeUnits_Values() []string { + return []string{ + H265GopSizeUnitsFrames, + H265GopSizeUnitsSeconds, + } +} + // H265 Level const ( // H265LevelH265Level1 is a H265Level enum value @@ -23312,6 +27265,26 @@ const ( H265LevelH265LevelAuto = "H265_LEVEL_AUTO" ) +// H265Level_Values returns all elements of the H265Level enum +func H265Level_Values() []string { + return []string{ + H265LevelH265Level1, + H265LevelH265Level2, + H265LevelH265Level21, + H265LevelH265Level3, + H265LevelH265Level31, + H265LevelH265Level4, + H265LevelH265Level41, + H265LevelH265Level5, + H265LevelH265Level51, + H265LevelH265Level52, + H265LevelH265Level6, + H265LevelH265Level61, + H265LevelH265Level62, + H265LevelH265LevelAuto, + } +} + // H265 Look Ahead Rate Control const ( // H265LookAheadRateControlHigh is a H265LookAheadRateControl enum value @@ -23324,6 +27297,15 @@ const ( H265LookAheadRateControlMedium = "MEDIUM" ) +// H265LookAheadRateControl_Values returns all elements of the H265LookAheadRateControl enum +func H265LookAheadRateControl_Values() []string { + return []string{ + H265LookAheadRateControlHigh, + H265LookAheadRateControlLow, + H265LookAheadRateControlMedium, + } +} + // H265 Profile const ( // H265ProfileMain is a H265Profile enum value @@ -23333,6 +27315,14 @@ const ( H265ProfileMain10bit = "MAIN_10BIT" ) +// H265Profile_Values returns all elements of the H265Profile enum +func H265Profile_Values() []string { + return []string{ + H265ProfileMain, + H265ProfileMain10bit, + } +} + // H265 Rate Control Mode const ( // H265RateControlModeCbr is a H265RateControlMode enum value @@ -23345,12 +27335,32 @@ const ( H265RateControlModeQvbr = "QVBR" ) +// H265RateControlMode_Values returns all elements of the H265RateControlMode enum +func H265RateControlMode_Values() []string { + return []string{ + H265RateControlModeCbr, + H265RateControlModeMultiplex, + H265RateControlModeQvbr, + } +} + // H265 Scan Type const ( + // H265ScanTypeInterlaced is a H265ScanType enum value + H265ScanTypeInterlaced = "INTERLACED" + // H265ScanTypeProgressive is a H265ScanType enum value H265ScanTypeProgressive = "PROGRESSIVE" ) +// H265ScanType_Values returns all elements of the H265ScanType enum +func H265ScanType_Values() []string { + return []string{ + H265ScanTypeInterlaced, + H265ScanTypeProgressive, + } +} + // H265 Scene Change Detect const ( // H265SceneChangeDetectDisabled is a H265SceneChangeDetect enum value @@ -23360,6 +27370,14 @@ const ( H265SceneChangeDetectEnabled = "ENABLED" ) +// H265SceneChangeDetect_Values returns all elements of the H265SceneChangeDetect enum +func H265SceneChangeDetect_Values() []string { + return []string{ + H265SceneChangeDetectDisabled, + H265SceneChangeDetectEnabled, + } +} + // H265 Tier const ( // H265TierHigh is a H265Tier enum value @@ -23369,6 +27387,14 @@ const ( H265TierMain = "MAIN" ) +// H265Tier_Values returns all elements of the H265Tier enum +func H265Tier_Values() []string { + return []string{ + H265TierHigh, + H265TierMain, + } +} + // H265 Timecode Insertion Behavior const ( // H265TimecodeInsertionBehaviorDisabled is a H265TimecodeInsertionBehavior enum value @@ -23378,6 +27404,14 @@ const ( H265TimecodeInsertionBehaviorPicTimingSei = "PIC_TIMING_SEI" ) +// H265TimecodeInsertionBehavior_Values returns all elements of the H265TimecodeInsertionBehavior enum +func H265TimecodeInsertionBehavior_Values() []string { + return []string{ + H265TimecodeInsertionBehaviorDisabled, + H265TimecodeInsertionBehaviorPicTimingSei, + } +} + // Hls Ad Markers const ( // HlsAdMarkersAdobe is a HlsAdMarkers enum value @@ -23390,6 +27424,15 @@ const ( HlsAdMarkersElementalScte35 = "ELEMENTAL_SCTE35" ) +// HlsAdMarkers_Values returns all elements of the HlsAdMarkers enum +func HlsAdMarkers_Values() []string { + return []string{ + HlsAdMarkersAdobe, + HlsAdMarkersElemental, + HlsAdMarkersElementalScte35, + } +} + // Hls Akamai Http Transfer Mode const ( // HlsAkamaiHttpTransferModeChunked is a HlsAkamaiHttpTransferMode enum value @@ -23399,6 +27442,14 @@ const ( HlsAkamaiHttpTransferModeNonChunked = "NON_CHUNKED" ) +// HlsAkamaiHttpTransferMode_Values returns all elements of the HlsAkamaiHttpTransferMode enum +func HlsAkamaiHttpTransferMode_Values() []string { + return []string{ + HlsAkamaiHttpTransferModeChunked, + HlsAkamaiHttpTransferModeNonChunked, + } +} + // Hls Caption Language Setting const ( // HlsCaptionLanguageSettingInsert is a HlsCaptionLanguageSetting enum value @@ -23411,6 +27462,15 @@ const ( HlsCaptionLanguageSettingOmit = "OMIT" ) +// HlsCaptionLanguageSetting_Values returns all elements of the HlsCaptionLanguageSetting enum +func HlsCaptionLanguageSetting_Values() []string { + return []string{ + HlsCaptionLanguageSettingInsert, + HlsCaptionLanguageSettingNone, + HlsCaptionLanguageSettingOmit, + } +} + // Hls Client Cache const ( // HlsClientCacheDisabled is a HlsClientCache enum value @@ -23420,6 +27480,14 @@ const ( HlsClientCacheEnabled = "ENABLED" ) +// HlsClientCache_Values returns all elements of the HlsClientCache enum +func HlsClientCache_Values() []string { + return []string{ + HlsClientCacheDisabled, + HlsClientCacheEnabled, + } +} + // Hls Codec Specification const ( // HlsCodecSpecificationRfc4281 is a HlsCodecSpecification enum value @@ -23429,6 +27497,14 @@ const ( HlsCodecSpecificationRfc6381 = "RFC_6381" ) +// HlsCodecSpecification_Values returns all elements of the HlsCodecSpecification enum +func HlsCodecSpecification_Values() []string { + return []string{ + HlsCodecSpecificationRfc4281, + HlsCodecSpecificationRfc6381, + } +} + // Hls Directory Structure const ( // HlsDirectoryStructureSingleDirectory is a HlsDirectoryStructure enum value @@ -23438,6 +27514,14 @@ const ( HlsDirectoryStructureSubdirectoryPerStream = "SUBDIRECTORY_PER_STREAM" ) +// HlsDirectoryStructure_Values returns all elements of the HlsDirectoryStructure enum +func HlsDirectoryStructure_Values() []string { + return []string{ + HlsDirectoryStructureSingleDirectory, + HlsDirectoryStructureSubdirectoryPerStream, + } +} + // Hls Encryption Type const ( // HlsEncryptionTypeAes128 is a HlsEncryptionType enum value @@ -23447,6 +27531,14 @@ const ( HlsEncryptionTypeSampleAes = "SAMPLE_AES" ) +// HlsEncryptionType_Values returns all elements of the HlsEncryptionType enum +func HlsEncryptionType_Values() []string { + return []string{ + HlsEncryptionTypeAes128, + HlsEncryptionTypeSampleAes, + } +} + // Hls H265 Packaging Type const ( // HlsH265PackagingTypeHev1 is a HlsH265PackagingType enum value @@ -23456,6 +27548,14 @@ const ( HlsH265PackagingTypeHvc1 = "HVC1" ) +// HlsH265PackagingType_Values returns all elements of the HlsH265PackagingType enum +func HlsH265PackagingType_Values() []string { + return []string{ + HlsH265PackagingTypeHev1, + HlsH265PackagingTypeHvc1, + } +} + // State of HLS ID3 Segment Tagging const ( // HlsId3SegmentTaggingStateDisabled is a HlsId3SegmentTaggingState enum value @@ -23465,6 +27565,14 @@ const ( HlsId3SegmentTaggingStateEnabled = "ENABLED" ) +// HlsId3SegmentTaggingState_Values returns all elements of the HlsId3SegmentTaggingState enum +func HlsId3SegmentTaggingState_Values() []string { + return []string{ + HlsId3SegmentTaggingStateDisabled, + HlsId3SegmentTaggingStateEnabled, + } +} + // Hls Iv In Manifest const ( // HlsIvInManifestExclude is a HlsIvInManifest enum value @@ -23474,6 +27582,14 @@ const ( HlsIvInManifestInclude = "INCLUDE" ) +// HlsIvInManifest_Values returns all elements of the HlsIvInManifest enum +func HlsIvInManifest_Values() []string { + return []string{ + HlsIvInManifestExclude, + HlsIvInManifestInclude, + } +} + // Hls Iv Source const ( // HlsIvSourceExplicit is a HlsIvSource enum value @@ -23483,6 +27599,14 @@ const ( HlsIvSourceFollowsSegmentNumber = "FOLLOWS_SEGMENT_NUMBER" ) +// HlsIvSource_Values returns all elements of the HlsIvSource enum +func HlsIvSource_Values() []string { + return []string{ + HlsIvSourceExplicit, + HlsIvSourceFollowsSegmentNumber, + } +} + // Hls Manifest Compression const ( // HlsManifestCompressionGzip is a HlsManifestCompression enum value @@ -23492,6 +27616,14 @@ const ( HlsManifestCompressionNone = "NONE" ) +// HlsManifestCompression_Values returns all elements of the HlsManifestCompression enum +func HlsManifestCompression_Values() []string { + return []string{ + HlsManifestCompressionGzip, + HlsManifestCompressionNone, + } +} + // Hls Manifest Duration Format const ( // HlsManifestDurationFormatFloatingPoint is a HlsManifestDurationFormat enum value @@ -23501,12 +27633,27 @@ const ( HlsManifestDurationFormatInteger = "INTEGER" ) +// HlsManifestDurationFormat_Values returns all elements of the HlsManifestDurationFormat enum +func HlsManifestDurationFormat_Values() []string { + return []string{ + HlsManifestDurationFormatFloatingPoint, + HlsManifestDurationFormatInteger, + } +} + // Hls Media Store Storage Class const ( // HlsMediaStoreStorageClassTemporal is a HlsMediaStoreStorageClass enum value HlsMediaStoreStorageClassTemporal = "TEMPORAL" ) +// HlsMediaStoreStorageClass_Values returns all elements of the HlsMediaStoreStorageClass enum +func HlsMediaStoreStorageClass_Values() []string { + return []string{ + HlsMediaStoreStorageClassTemporal, + } +} + // Hls Mode const ( // HlsModeLive is a HlsMode enum value @@ -23516,6 +27663,14 @@ const ( HlsModeVod = "VOD" ) +// HlsMode_Values returns all elements of the HlsMode enum +func HlsMode_Values() []string { + return []string{ + HlsModeLive, + HlsModeVod, + } +} + // Hls Output Selection const ( // HlsOutputSelectionManifestsAndSegments is a HlsOutputSelection enum value @@ -23523,8 +27678,20 @@ const ( // HlsOutputSelectionSegmentsOnly is a HlsOutputSelection enum value HlsOutputSelectionSegmentsOnly = "SEGMENTS_ONLY" + + // HlsOutputSelectionVariantManifestsAndSegments is a HlsOutputSelection enum value + HlsOutputSelectionVariantManifestsAndSegments = "VARIANT_MANIFESTS_AND_SEGMENTS" ) +// HlsOutputSelection_Values returns all elements of the HlsOutputSelection enum +func HlsOutputSelection_Values() []string { + return []string{ + HlsOutputSelectionManifestsAndSegments, + HlsOutputSelectionSegmentsOnly, + HlsOutputSelectionVariantManifestsAndSegments, + } +} + // Hls Program Date Time const ( // HlsProgramDateTimeExclude is a HlsProgramDateTime enum value @@ -23534,6 +27701,14 @@ const ( HlsProgramDateTimeInclude = "INCLUDE" ) +// HlsProgramDateTime_Values returns all elements of the HlsProgramDateTime enum +func HlsProgramDateTime_Values() []string { + return []string{ + HlsProgramDateTimeExclude, + HlsProgramDateTimeInclude, + } +} + // Hls Redundant Manifest const ( // HlsRedundantManifestDisabled is a HlsRedundantManifest enum value @@ -23543,6 +27718,14 @@ const ( HlsRedundantManifestEnabled = "ENABLED" ) +// HlsRedundantManifest_Values returns all elements of the HlsRedundantManifest enum +func HlsRedundantManifest_Values() []string { + return []string{ + HlsRedundantManifestDisabled, + HlsRedundantManifestEnabled, + } +} + // Hls Segmentation Mode const ( // HlsSegmentationModeUseInputSegmentation is a HlsSegmentationMode enum value @@ -23552,6 +27735,14 @@ const ( HlsSegmentationModeUseSegmentDuration = "USE_SEGMENT_DURATION" ) +// HlsSegmentationMode_Values returns all elements of the HlsSegmentationMode enum +func HlsSegmentationMode_Values() []string { + return []string{ + HlsSegmentationModeUseInputSegmentation, + HlsSegmentationModeUseSegmentDuration, + } +} + // Hls Stream Inf Resolution const ( // HlsStreamInfResolutionExclude is a HlsStreamInfResolution enum value @@ -23561,6 +27752,14 @@ const ( HlsStreamInfResolutionInclude = "INCLUDE" ) +// HlsStreamInfResolution_Values returns all elements of the HlsStreamInfResolution enum +func HlsStreamInfResolution_Values() []string { + return []string{ + HlsStreamInfResolutionExclude, + HlsStreamInfResolutionInclude, + } +} + // Hls Timed Metadata Id3 Frame const ( // HlsTimedMetadataId3FrameNone is a HlsTimedMetadataId3Frame enum value @@ -23573,6 +27772,15 @@ const ( HlsTimedMetadataId3FrameTdrl = "TDRL" ) +// HlsTimedMetadataId3Frame_Values returns all elements of the HlsTimedMetadataId3Frame enum +func HlsTimedMetadataId3Frame_Values() []string { + return []string{ + HlsTimedMetadataId3FrameNone, + HlsTimedMetadataId3FramePriv, + HlsTimedMetadataId3FrameTdrl, + } +} + // Hls Ts File Mode const ( // HlsTsFileModeSegmentedFiles is a HlsTsFileMode enum value @@ -23582,6 +27790,14 @@ const ( HlsTsFileModeSingleFile = "SINGLE_FILE" ) +// HlsTsFileMode_Values returns all elements of the HlsTsFileMode enum +func HlsTsFileMode_Values() []string { + return []string{ + HlsTsFileModeSegmentedFiles, + HlsTsFileModeSingleFile, + } +} + // Hls Webdav Http Transfer Mode const ( // HlsWebdavHttpTransferModeChunked is a HlsWebdavHttpTransferMode enum value @@ -23591,6 +27807,14 @@ const ( HlsWebdavHttpTransferModeNonChunked = "NON_CHUNKED" ) +// HlsWebdavHttpTransferMode_Values returns all elements of the HlsWebdavHttpTransferMode enum +func HlsWebdavHttpTransferMode_Values() []string { + return []string{ + HlsWebdavHttpTransferModeChunked, + HlsWebdavHttpTransferModeNonChunked, + } +} + // When set to "standard", an I-Frame only playlist will be written out for // each video output in the output group. This I-Frame only playlist will contain // byte range offsets pointing to the I-frame(s) in each segment. @@ -23602,6 +27826,14 @@ const ( IFrameOnlyPlaylistTypeStandard = "STANDARD" ) +// IFrameOnlyPlaylistType_Values returns all elements of the IFrameOnlyPlaylistType enum +func IFrameOnlyPlaylistType_Values() []string { + return []string{ + IFrameOnlyPlaylistTypeDisabled, + IFrameOnlyPlaylistTypeStandard, + } +} + // A standard input has two sources and a single pipeline input only has one. const ( // InputClassStandard is a InputClass enum value @@ -23611,6 +27843,14 @@ const ( InputClassSinglePipeline = "SINGLE_PIPELINE" ) +// InputClass_Values returns all elements of the InputClass enum +func InputClass_Values() []string { + return []string{ + InputClassStandard, + InputClassSinglePipeline, + } +} + // codec in increasing order of complexity const ( // InputCodecMpeg2 is a InputCodec enum value @@ -23623,6 +27863,15 @@ const ( InputCodecHevc = "HEVC" ) +// InputCodec_Values returns all elements of the InputCodec enum +func InputCodec_Values() []string { + return []string{ + InputCodecMpeg2, + InputCodecAvc, + InputCodecHevc, + } +} + // Input Deblock Filter const ( // InputDeblockFilterDisabled is a InputDeblockFilter enum value @@ -23632,6 +27881,14 @@ const ( InputDeblockFilterEnabled = "ENABLED" ) +// InputDeblockFilter_Values returns all elements of the InputDeblockFilter enum +func InputDeblockFilter_Values() []string { + return []string{ + InputDeblockFilterDisabled, + InputDeblockFilterEnabled, + } +} + // Input Denoise Filter const ( // InputDenoiseFilterDisabled is a InputDenoiseFilter enum value @@ -23641,6 +27898,135 @@ const ( InputDenoiseFilterEnabled = "ENABLED" ) +// InputDenoiseFilter_Values returns all elements of the InputDenoiseFilter enum +func InputDenoiseFilter_Values() []string { + return []string{ + InputDenoiseFilterDisabled, + InputDenoiseFilterEnabled, + } +} + +// The source at the input device that is currently active. +const ( + // InputDeviceActiveInputHdmi is a InputDeviceActiveInput enum value + InputDeviceActiveInputHdmi = "HDMI" + + // InputDeviceActiveInputSdi is a InputDeviceActiveInput enum value + InputDeviceActiveInputSdi = "SDI" +) + +// InputDeviceActiveInput_Values returns all elements of the InputDeviceActiveInput enum +func InputDeviceActiveInput_Values() []string { + return []string{ + InputDeviceActiveInputHdmi, + InputDeviceActiveInputSdi, + } +} + +// The source to activate (use) from the input device. +const ( + // InputDeviceConfiguredInputAuto is a InputDeviceConfiguredInput enum value + InputDeviceConfiguredInputAuto = "AUTO" + + // InputDeviceConfiguredInputHdmi is a InputDeviceConfiguredInput enum value + InputDeviceConfiguredInputHdmi = "HDMI" + + // InputDeviceConfiguredInputSdi is a InputDeviceConfiguredInput enum value + InputDeviceConfiguredInputSdi = "SDI" +) + +// InputDeviceConfiguredInput_Values returns all elements of the InputDeviceConfiguredInput enum +func InputDeviceConfiguredInput_Values() []string { + return []string{ + InputDeviceConfiguredInputAuto, + InputDeviceConfiguredInputHdmi, + InputDeviceConfiguredInputSdi, + } +} + +// The state of the connection between the input device and AWS. +const ( + // InputDeviceConnectionStateDisconnected is a InputDeviceConnectionState enum value + InputDeviceConnectionStateDisconnected = "DISCONNECTED" + + // InputDeviceConnectionStateConnected is a InputDeviceConnectionState enum value + InputDeviceConnectionStateConnected = "CONNECTED" +) + +// InputDeviceConnectionState_Values returns all elements of the InputDeviceConnectionState enum +func InputDeviceConnectionState_Values() []string { + return []string{ + InputDeviceConnectionStateDisconnected, + InputDeviceConnectionStateConnected, + } +} + +// Specifies whether the input device has been configured (outside of MediaLive) +// to use a dynamic IP address assignment (DHCP) or a static IP address. +const ( + // InputDeviceIpSchemeStatic is a InputDeviceIpScheme enum value + InputDeviceIpSchemeStatic = "STATIC" + + // InputDeviceIpSchemeDhcp is a InputDeviceIpScheme enum value + InputDeviceIpSchemeDhcp = "DHCP" +) + +// InputDeviceIpScheme_Values returns all elements of the InputDeviceIpScheme enum +func InputDeviceIpScheme_Values() []string { + return []string{ + InputDeviceIpSchemeStatic, + InputDeviceIpSchemeDhcp, + } +} + +// The scan type of the video source. +const ( + // InputDeviceScanTypeInterlaced is a InputDeviceScanType enum value + InputDeviceScanTypeInterlaced = "INTERLACED" + + // InputDeviceScanTypeProgressive is a InputDeviceScanType enum value + InputDeviceScanTypeProgressive = "PROGRESSIVE" +) + +// InputDeviceScanType_Values returns all elements of the InputDeviceScanType enum +func InputDeviceScanType_Values() []string { + return []string{ + InputDeviceScanTypeInterlaced, + InputDeviceScanTypeProgressive, + } +} + +// The state of the input device. +const ( + // InputDeviceStateIdle is a InputDeviceState enum value + InputDeviceStateIdle = "IDLE" + + // InputDeviceStateStreaming is a InputDeviceState enum value + InputDeviceStateStreaming = "STREAMING" +) + +// InputDeviceState_Values returns all elements of the InputDeviceState enum +func InputDeviceState_Values() []string { + return []string{ + InputDeviceStateIdle, + InputDeviceStateStreaming, + } +} + +// The type of the input device. For an AWS Elemental Link device that outputs +// resolutions up to 1080, choose "HD". +const ( + // InputDeviceTypeHd is a InputDeviceType enum value + InputDeviceTypeHd = "HD" +) + +// InputDeviceType_Values returns all elements of the InputDeviceType enum +func InputDeviceType_Values() []string { + return []string{ + InputDeviceTypeHd, + } +} + // Input Filter const ( // InputFilterAuto is a InputFilter enum value @@ -23653,6 +28039,15 @@ const ( InputFilterForced = "FORCED" ) +// InputFilter_Values returns all elements of the InputFilter enum +func InputFilter_Values() []string { + return []string{ + InputFilterAuto, + InputFilterDisabled, + InputFilterForced, + } +} + // Input Loss Action For Hls Out const ( // InputLossActionForHlsOutEmitOutput is a InputLossActionForHlsOut enum value @@ -23662,6 +28057,14 @@ const ( InputLossActionForHlsOutPauseOutput = "PAUSE_OUTPUT" ) +// InputLossActionForHlsOut_Values returns all elements of the InputLossActionForHlsOut enum +func InputLossActionForHlsOut_Values() []string { + return []string{ + InputLossActionForHlsOutEmitOutput, + InputLossActionForHlsOutPauseOutput, + } +} + // Input Loss Action For Ms Smooth Out const ( // InputLossActionForMsSmoothOutEmitOutput is a InputLossActionForMsSmoothOut enum value @@ -23671,6 +28074,14 @@ const ( InputLossActionForMsSmoothOutPauseOutput = "PAUSE_OUTPUT" ) +// InputLossActionForMsSmoothOut_Values returns all elements of the InputLossActionForMsSmoothOut enum +func InputLossActionForMsSmoothOut_Values() []string { + return []string{ + InputLossActionForMsSmoothOutEmitOutput, + InputLossActionForMsSmoothOutPauseOutput, + } +} + // Input Loss Action For Rtmp Out const ( // InputLossActionForRtmpOutEmitOutput is a InputLossActionForRtmpOut enum value @@ -23680,6 +28091,14 @@ const ( InputLossActionForRtmpOutPauseOutput = "PAUSE_OUTPUT" ) +// InputLossActionForRtmpOut_Values returns all elements of the InputLossActionForRtmpOut enum +func InputLossActionForRtmpOut_Values() []string { + return []string{ + InputLossActionForRtmpOutEmitOutput, + InputLossActionForRtmpOutPauseOutput, + } +} + // Input Loss Action For Udp Out const ( // InputLossActionForUdpOutDropProgram is a InputLossActionForUdpOut enum value @@ -23692,6 +28111,15 @@ const ( InputLossActionForUdpOutEmitProgram = "EMIT_PROGRAM" ) +// InputLossActionForUdpOut_Values returns all elements of the InputLossActionForUdpOut enum +func InputLossActionForUdpOut_Values() []string { + return []string{ + InputLossActionForUdpOutDropProgram, + InputLossActionForUdpOutDropTs, + InputLossActionForUdpOutEmitProgram, + } +} + // Input Loss Image Type const ( // InputLossImageTypeColor is a InputLossImageType enum value @@ -23701,6 +28129,14 @@ const ( InputLossImageTypeSlate = "SLATE" ) +// InputLossImageType_Values returns all elements of the InputLossImageType enum +func InputLossImageType_Values() []string { + return []string{ + InputLossImageTypeColor, + InputLossImageTypeSlate, + } +} + // Maximum input bitrate in megabits per second. Bitrates up to 50 Mbps are // supported currently. const ( @@ -23714,6 +28150,35 @@ const ( InputMaximumBitrateMax50Mbps = "MAX_50_MBPS" ) +// InputMaximumBitrate_Values returns all elements of the InputMaximumBitrate enum +func InputMaximumBitrate_Values() []string { + return []string{ + InputMaximumBitrateMax10Mbps, + InputMaximumBitrateMax20Mbps, + InputMaximumBitrateMax50Mbps, + } +} + +// Input preference when deciding which input to make active when a previously +// failed input has recovered.If \"EQUAL_INPUT_PREFERENCE\", then the active +// input will stay active as long as it is healthy.If \"PRIMARY_INPUT_PREFERRED\", +// then always switch back to the primary input when it is healthy. +const ( + // InputPreferenceEqualInputPreference is a InputPreference enum value + InputPreferenceEqualInputPreference = "EQUAL_INPUT_PREFERENCE" + + // InputPreferencePrimaryInputPreferred is a InputPreference enum value + InputPreferencePrimaryInputPreferred = "PRIMARY_INPUT_PREFERRED" +) + +// InputPreference_Values returns all elements of the InputPreference enum +func InputPreference_Values() []string { + return []string{ + InputPreferenceEqualInputPreference, + InputPreferencePrimaryInputPreferred, + } +} + // Input resolution based on lines of vertical resolution in the input; SD is // less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines const ( @@ -23727,6 +28192,15 @@ const ( InputResolutionUhd = "UHD" ) +// InputResolution_Values returns all elements of the InputResolution enum +func InputResolution_Values() []string { + return []string{ + InputResolutionSd, + InputResolutionHd, + InputResolutionUhd, + } +} + const ( // InputSecurityGroupStateIdle is a InputSecurityGroupState enum value InputSecurityGroupStateIdle = "IDLE" @@ -23741,6 +28215,16 @@ const ( InputSecurityGroupStateDeleted = "DELETED" ) +// InputSecurityGroupState_Values returns all elements of the InputSecurityGroupState enum +func InputSecurityGroupState_Values() []string { + return []string{ + InputSecurityGroupStateIdle, + InputSecurityGroupStateInUse, + InputSecurityGroupStateUpdating, + InputSecurityGroupStateDeleted, + } +} + // Input Source End Behavior const ( // InputSourceEndBehaviorContinue is a InputSourceEndBehavior enum value @@ -23750,6 +28234,14 @@ const ( InputSourceEndBehaviorLoop = "LOOP" ) +// InputSourceEndBehavior_Values returns all elements of the InputSourceEndBehavior enum +func InputSourceEndBehavior_Values() []string { + return []string{ + InputSourceEndBehaviorContinue, + InputSourceEndBehaviorLoop, + } +} + // There are two types of input sources, static and dynamic. If an input source // is dynamic you canchange the source url of the input dynamically using an // input switch action. However, the only input typeto support a dynamic url @@ -23762,6 +28254,14 @@ const ( InputSourceTypeDynamic = "DYNAMIC" ) +// InputSourceType_Values returns all elements of the InputSourceType enum +func InputSourceType_Values() []string { + return []string{ + InputSourceTypeStatic, + InputSourceTypeDynamic, + } +} + const ( // InputStateCreating is a InputState enum value InputStateCreating = "CREATING" @@ -23779,6 +28279,17 @@ const ( InputStateDeleted = "DELETED" ) +// InputState_Values returns all elements of the InputState enum +func InputState_Values() []string { + return []string{ + InputStateCreating, + InputStateDetached, + InputStateAttached, + InputStateDeleting, + InputStateDeleted, + } +} + // To clip the file, you must specify the timecode for the start and end of // the clip. Specify EMBEDDED to use the timecode embedded in the source content. // The embedded timecode must exist in the source content, otherwise MediaLive @@ -23794,6 +28305,14 @@ const ( InputTimecodeSourceEmbedded = "EMBEDDED" ) +// InputTimecodeSource_Values returns all elements of the InputTimecodeSource enum +func InputTimecodeSource_Values() []string { + return []string{ + InputTimecodeSourceZerobased, + InputTimecodeSourceEmbedded, + } +} + const ( // InputTypeUdpPush is a InputType enum value InputTypeUdpPush = "UDP_PUSH" @@ -23815,8 +28334,29 @@ const ( // InputTypeMediaconnect is a InputType enum value InputTypeMediaconnect = "MEDIACONNECT" + + // InputTypeInputDevice is a InputType enum value + InputTypeInputDevice = "INPUT_DEVICE" + + // InputTypeAwsCdi is a InputType enum value + InputTypeAwsCdi = "AWS_CDI" ) +// InputType_Values returns all elements of the InputType enum +func InputType_Values() []string { + return []string{ + InputTypeUdpPush, + InputTypeRtpPush, + InputTypeRtmpPush, + InputTypeRtmpPull, + InputTypeUrlPull, + InputTypeMp4File, + InputTypeMediaconnect, + InputTypeInputDevice, + InputTypeAwsCdi, + } +} + // If you specify a StopTimecode in an input (in order to clip the file), you // can specify if you want the clip to exclude (the default) or include the // frame specified by the timecode. @@ -23828,6 +28368,14 @@ const ( LastFrameClippingBehaviorIncludeLastFrame = "INCLUDE_LAST_FRAME" ) +// LastFrameClippingBehavior_Values returns all elements of the LastFrameClippingBehavior enum +func LastFrameClippingBehavior_Values() []string { + return []string{ + LastFrameClippingBehaviorExcludeLastFrame, + LastFrameClippingBehaviorIncludeLastFrame, + } +} + // The log level the user wants for their channel. const ( // LogLevelError is a LogLevel enum value @@ -23846,6 +28394,17 @@ const ( LogLevelDisabled = "DISABLED" ) +// LogLevel_Values returns all elements of the LogLevel enum +func LogLevel_Values() []string { + return []string{ + LogLevelError, + LogLevelWarning, + LogLevelInfo, + LogLevelDebug, + LogLevelDisabled, + } +} + // M2ts Absent Input Audio Behavior const ( // M2tsAbsentInputAudioBehaviorDrop is a M2tsAbsentInputAudioBehavior enum value @@ -23855,6 +28414,14 @@ const ( M2tsAbsentInputAudioBehaviorEncodeSilence = "ENCODE_SILENCE" ) +// M2tsAbsentInputAudioBehavior_Values returns all elements of the M2tsAbsentInputAudioBehavior enum +func M2tsAbsentInputAudioBehavior_Values() []string { + return []string{ + M2tsAbsentInputAudioBehaviorDrop, + M2tsAbsentInputAudioBehaviorEncodeSilence, + } +} + // M2ts Arib const ( // M2tsAribDisabled is a M2tsArib enum value @@ -23864,6 +28431,14 @@ const ( M2tsAribEnabled = "ENABLED" ) +// M2tsArib_Values returns all elements of the M2tsArib enum +func M2tsArib_Values() []string { + return []string{ + M2tsAribDisabled, + M2tsAribEnabled, + } +} + // M2ts Arib Captions Pid Control const ( // M2tsAribCaptionsPidControlAuto is a M2tsAribCaptionsPidControl enum value @@ -23873,6 +28448,14 @@ const ( M2tsAribCaptionsPidControlUseConfigured = "USE_CONFIGURED" ) +// M2tsAribCaptionsPidControl_Values returns all elements of the M2tsAribCaptionsPidControl enum +func M2tsAribCaptionsPidControl_Values() []string { + return []string{ + M2tsAribCaptionsPidControlAuto, + M2tsAribCaptionsPidControlUseConfigured, + } +} + // M2ts Audio Buffer Model const ( // M2tsAudioBufferModelAtsc is a M2tsAudioBufferModel enum value @@ -23882,6 +28465,14 @@ const ( M2tsAudioBufferModelDvb = "DVB" ) +// M2tsAudioBufferModel_Values returns all elements of the M2tsAudioBufferModel enum +func M2tsAudioBufferModel_Values() []string { + return []string{ + M2tsAudioBufferModelAtsc, + M2tsAudioBufferModelDvb, + } +} + // M2ts Audio Interval const ( // M2tsAudioIntervalVideoAndFixedIntervals is a M2tsAudioInterval enum value @@ -23891,6 +28482,14 @@ const ( M2tsAudioIntervalVideoInterval = "VIDEO_INTERVAL" ) +// M2tsAudioInterval_Values returns all elements of the M2tsAudioInterval enum +func M2tsAudioInterval_Values() []string { + return []string{ + M2tsAudioIntervalVideoAndFixedIntervals, + M2tsAudioIntervalVideoInterval, + } +} + // M2ts Audio Stream Type const ( // M2tsAudioStreamTypeAtsc is a M2tsAudioStreamType enum value @@ -23900,6 +28499,14 @@ const ( M2tsAudioStreamTypeDvb = "DVB" ) +// M2tsAudioStreamType_Values returns all elements of the M2tsAudioStreamType enum +func M2tsAudioStreamType_Values() []string { + return []string{ + M2tsAudioStreamTypeAtsc, + M2tsAudioStreamTypeDvb, + } +} + // M2ts Buffer Model const ( // M2tsBufferModelMultiplex is a M2tsBufferModel enum value @@ -23909,6 +28516,14 @@ const ( M2tsBufferModelNone = "NONE" ) +// M2tsBufferModel_Values returns all elements of the M2tsBufferModel enum +func M2tsBufferModel_Values() []string { + return []string{ + M2tsBufferModelMultiplex, + M2tsBufferModelNone, + } +} + // M2ts Cc Descriptor const ( // M2tsCcDescriptorDisabled is a M2tsCcDescriptor enum value @@ -23918,6 +28533,14 @@ const ( M2tsCcDescriptorEnabled = "ENABLED" ) +// M2tsCcDescriptor_Values returns all elements of the M2tsCcDescriptor enum +func M2tsCcDescriptor_Values() []string { + return []string{ + M2tsCcDescriptorDisabled, + M2tsCcDescriptorEnabled, + } +} + // M2ts Ebif Control const ( // M2tsEbifControlNone is a M2tsEbifControl enum value @@ -23927,6 +28550,14 @@ const ( M2tsEbifControlPassthrough = "PASSTHROUGH" ) +// M2tsEbifControl_Values returns all elements of the M2tsEbifControl enum +func M2tsEbifControl_Values() []string { + return []string{ + M2tsEbifControlNone, + M2tsEbifControlPassthrough, + } +} + // M2ts Ebp Placement const ( // M2tsEbpPlacementVideoAndAudioPids is a M2tsEbpPlacement enum value @@ -23936,6 +28567,14 @@ const ( M2tsEbpPlacementVideoPid = "VIDEO_PID" ) +// M2tsEbpPlacement_Values returns all elements of the M2tsEbpPlacement enum +func M2tsEbpPlacement_Values() []string { + return []string{ + M2tsEbpPlacementVideoAndAudioPids, + M2tsEbpPlacementVideoPid, + } +} + // M2ts Es Rate In Pes const ( // M2tsEsRateInPesExclude is a M2tsEsRateInPes enum value @@ -23945,6 +28584,14 @@ const ( M2tsEsRateInPesInclude = "INCLUDE" ) +// M2tsEsRateInPes_Values returns all elements of the M2tsEsRateInPes enum +func M2tsEsRateInPes_Values() []string { + return []string{ + M2tsEsRateInPesExclude, + M2tsEsRateInPesInclude, + } +} + // M2ts Klv const ( // M2tsKlvNone is a M2tsKlv enum value @@ -23954,6 +28601,14 @@ const ( M2tsKlvPassthrough = "PASSTHROUGH" ) +// M2tsKlv_Values returns all elements of the M2tsKlv enum +func M2tsKlv_Values() []string { + return []string{ + M2tsKlvNone, + M2tsKlvPassthrough, + } +} + // M2ts Nielsen Id3 Behavior const ( // M2tsNielsenId3BehaviorNoPassthrough is a M2tsNielsenId3Behavior enum value @@ -23963,6 +28618,14 @@ const ( M2tsNielsenId3BehaviorPassthrough = "PASSTHROUGH" ) +// M2tsNielsenId3Behavior_Values returns all elements of the M2tsNielsenId3Behavior enum +func M2tsNielsenId3Behavior_Values() []string { + return []string{ + M2tsNielsenId3BehaviorNoPassthrough, + M2tsNielsenId3BehaviorPassthrough, + } +} + // M2ts Pcr Control const ( // M2tsPcrControlConfiguredPcrPeriod is a M2tsPcrControl enum value @@ -23972,6 +28635,14 @@ const ( M2tsPcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" ) +// M2tsPcrControl_Values returns all elements of the M2tsPcrControl enum +func M2tsPcrControl_Values() []string { + return []string{ + M2tsPcrControlConfiguredPcrPeriod, + M2tsPcrControlPcrEveryPesPacket, + } +} + // M2ts Rate Mode const ( // M2tsRateModeCbr is a M2tsRateMode enum value @@ -23981,6 +28652,14 @@ const ( M2tsRateModeVbr = "VBR" ) +// M2tsRateMode_Values returns all elements of the M2tsRateMode enum +func M2tsRateMode_Values() []string { + return []string{ + M2tsRateModeCbr, + M2tsRateModeVbr, + } +} + // M2ts Scte35 Control const ( // M2tsScte35ControlNone is a M2tsScte35Control enum value @@ -23990,6 +28669,14 @@ const ( M2tsScte35ControlPassthrough = "PASSTHROUGH" ) +// M2tsScte35Control_Values returns all elements of the M2tsScte35Control enum +func M2tsScte35Control_Values() []string { + return []string{ + M2tsScte35ControlNone, + M2tsScte35ControlPassthrough, + } +} + // M2ts Segmentation Markers const ( // M2tsSegmentationMarkersEbp is a M2tsSegmentationMarkers enum value @@ -24011,6 +28698,18 @@ const ( M2tsSegmentationMarkersRaiSegstart = "RAI_SEGSTART" ) +// M2tsSegmentationMarkers_Values returns all elements of the M2tsSegmentationMarkers enum +func M2tsSegmentationMarkers_Values() []string { + return []string{ + M2tsSegmentationMarkersEbp, + M2tsSegmentationMarkersEbpLegacy, + M2tsSegmentationMarkersNone, + M2tsSegmentationMarkersPsiSegstart, + M2tsSegmentationMarkersRaiAdapt, + M2tsSegmentationMarkersRaiSegstart, + } +} + // M2ts Segmentation Style const ( // M2tsSegmentationStyleMaintainCadence is a M2tsSegmentationStyle enum value @@ -24020,6 +28719,14 @@ const ( M2tsSegmentationStyleResetCadence = "RESET_CADENCE" ) +// M2tsSegmentationStyle_Values returns all elements of the M2tsSegmentationStyle enum +func M2tsSegmentationStyle_Values() []string { + return []string{ + M2tsSegmentationStyleMaintainCadence, + M2tsSegmentationStyleResetCadence, + } +} + // M2ts Timed Metadata Behavior const ( // M2tsTimedMetadataBehaviorNoPassthrough is a M2tsTimedMetadataBehavior enum value @@ -24029,6 +28736,14 @@ const ( M2tsTimedMetadataBehaviorPassthrough = "PASSTHROUGH" ) +// M2tsTimedMetadataBehavior_Values returns all elements of the M2tsTimedMetadataBehavior enum +func M2tsTimedMetadataBehavior_Values() []string { + return []string{ + M2tsTimedMetadataBehaviorNoPassthrough, + M2tsTimedMetadataBehaviorPassthrough, + } +} + // M3u8 Nielsen Id3 Behavior const ( // M3u8NielsenId3BehaviorNoPassthrough is a M3u8NielsenId3Behavior enum value @@ -24038,6 +28753,14 @@ const ( M3u8NielsenId3BehaviorPassthrough = "PASSTHROUGH" ) +// M3u8NielsenId3Behavior_Values returns all elements of the M3u8NielsenId3Behavior enum +func M3u8NielsenId3Behavior_Values() []string { + return []string{ + M3u8NielsenId3BehaviorNoPassthrough, + M3u8NielsenId3BehaviorPassthrough, + } +} + // M3u8 Pcr Control const ( // M3u8PcrControlConfiguredPcrPeriod is a M3u8PcrControl enum value @@ -24047,6 +28770,14 @@ const ( M3u8PcrControlPcrEveryPesPacket = "PCR_EVERY_PES_PACKET" ) +// M3u8PcrControl_Values returns all elements of the M3u8PcrControl enum +func M3u8PcrControl_Values() []string { + return []string{ + M3u8PcrControlConfiguredPcrPeriod, + M3u8PcrControlPcrEveryPesPacket, + } +} + // M3u8 Scte35 Behavior const ( // M3u8Scte35BehaviorNoPassthrough is a M3u8Scte35Behavior enum value @@ -24056,6 +28787,14 @@ const ( M3u8Scte35BehaviorPassthrough = "PASSTHROUGH" ) +// M3u8Scte35Behavior_Values returns all elements of the M3u8Scte35Behavior enum +func M3u8Scte35Behavior_Values() []string { + return []string{ + M3u8Scte35BehaviorNoPassthrough, + M3u8Scte35BehaviorPassthrough, + } +} + // M3u8 Timed Metadata Behavior const ( // M3u8TimedMetadataBehaviorNoPassthrough is a M3u8TimedMetadataBehavior enum value @@ -24065,6 +28804,14 @@ const ( M3u8TimedMetadataBehaviorPassthrough = "PASSTHROUGH" ) +// M3u8TimedMetadataBehavior_Values returns all elements of the M3u8TimedMetadataBehavior enum +func M3u8TimedMetadataBehavior_Values() []string { + return []string{ + M3u8TimedMetadataBehaviorNoPassthrough, + M3u8TimedMetadataBehaviorPassthrough, + } +} + // Mp2 Coding Mode const ( // Mp2CodingModeCodingMode10 is a Mp2CodingMode enum value @@ -24074,6 +28821,162 @@ const ( Mp2CodingModeCodingMode20 = "CODING_MODE_2_0" ) +// Mp2CodingMode_Values returns all elements of the Mp2CodingMode enum +func Mp2CodingMode_Values() []string { + return []string{ + Mp2CodingModeCodingMode10, + Mp2CodingModeCodingMode20, + } +} + +// Mpeg2 Adaptive Quantization +const ( + // Mpeg2AdaptiveQuantizationAuto is a Mpeg2AdaptiveQuantization enum value + Mpeg2AdaptiveQuantizationAuto = "AUTO" + + // Mpeg2AdaptiveQuantizationHigh is a Mpeg2AdaptiveQuantization enum value + Mpeg2AdaptiveQuantizationHigh = "HIGH" + + // Mpeg2AdaptiveQuantizationLow is a Mpeg2AdaptiveQuantization enum value + Mpeg2AdaptiveQuantizationLow = "LOW" + + // Mpeg2AdaptiveQuantizationMedium is a Mpeg2AdaptiveQuantization enum value + Mpeg2AdaptiveQuantizationMedium = "MEDIUM" + + // Mpeg2AdaptiveQuantizationOff is a Mpeg2AdaptiveQuantization enum value + Mpeg2AdaptiveQuantizationOff = "OFF" +) + +// Mpeg2AdaptiveQuantization_Values returns all elements of the Mpeg2AdaptiveQuantization enum +func Mpeg2AdaptiveQuantization_Values() []string { + return []string{ + Mpeg2AdaptiveQuantizationAuto, + Mpeg2AdaptiveQuantizationHigh, + Mpeg2AdaptiveQuantizationLow, + Mpeg2AdaptiveQuantizationMedium, + Mpeg2AdaptiveQuantizationOff, + } +} + +// Mpeg2 Color Metadata +const ( + // Mpeg2ColorMetadataIgnore is a Mpeg2ColorMetadata enum value + Mpeg2ColorMetadataIgnore = "IGNORE" + + // Mpeg2ColorMetadataInsert is a Mpeg2ColorMetadata enum value + Mpeg2ColorMetadataInsert = "INSERT" +) + +// Mpeg2ColorMetadata_Values returns all elements of the Mpeg2ColorMetadata enum +func Mpeg2ColorMetadata_Values() []string { + return []string{ + Mpeg2ColorMetadataIgnore, + Mpeg2ColorMetadataInsert, + } +} + +// Mpeg2 Color Space +const ( + // Mpeg2ColorSpaceAuto is a Mpeg2ColorSpace enum value + Mpeg2ColorSpaceAuto = "AUTO" + + // Mpeg2ColorSpacePassthrough is a Mpeg2ColorSpace enum value + Mpeg2ColorSpacePassthrough = "PASSTHROUGH" +) + +// Mpeg2ColorSpace_Values returns all elements of the Mpeg2ColorSpace enum +func Mpeg2ColorSpace_Values() []string { + return []string{ + Mpeg2ColorSpaceAuto, + Mpeg2ColorSpacePassthrough, + } +} + +// Mpeg2 Display Ratio +const ( + // Mpeg2DisplayRatioDisplayratio16x9 is a Mpeg2DisplayRatio enum value + Mpeg2DisplayRatioDisplayratio16x9 = "DISPLAYRATIO16X9" + + // Mpeg2DisplayRatioDisplayratio4x3 is a Mpeg2DisplayRatio enum value + Mpeg2DisplayRatioDisplayratio4x3 = "DISPLAYRATIO4X3" +) + +// Mpeg2DisplayRatio_Values returns all elements of the Mpeg2DisplayRatio enum +func Mpeg2DisplayRatio_Values() []string { + return []string{ + Mpeg2DisplayRatioDisplayratio16x9, + Mpeg2DisplayRatioDisplayratio4x3, + } +} + +// Mpeg2 Gop Size Units +const ( + // Mpeg2GopSizeUnitsFrames is a Mpeg2GopSizeUnits enum value + Mpeg2GopSizeUnitsFrames = "FRAMES" + + // Mpeg2GopSizeUnitsSeconds is a Mpeg2GopSizeUnits enum value + Mpeg2GopSizeUnitsSeconds = "SECONDS" +) + +// Mpeg2GopSizeUnits_Values returns all elements of the Mpeg2GopSizeUnits enum +func Mpeg2GopSizeUnits_Values() []string { + return []string{ + Mpeg2GopSizeUnitsFrames, + Mpeg2GopSizeUnitsSeconds, + } +} + +// Mpeg2 Scan Type +const ( + // Mpeg2ScanTypeInterlaced is a Mpeg2ScanType enum value + Mpeg2ScanTypeInterlaced = "INTERLACED" + + // Mpeg2ScanTypeProgressive is a Mpeg2ScanType enum value + Mpeg2ScanTypeProgressive = "PROGRESSIVE" +) + +// Mpeg2ScanType_Values returns all elements of the Mpeg2ScanType enum +func Mpeg2ScanType_Values() []string { + return []string{ + Mpeg2ScanTypeInterlaced, + Mpeg2ScanTypeProgressive, + } +} + +// Mpeg2 Sub Gop Length +const ( + // Mpeg2SubGopLengthDynamic is a Mpeg2SubGopLength enum value + Mpeg2SubGopLengthDynamic = "DYNAMIC" + + // Mpeg2SubGopLengthFixed is a Mpeg2SubGopLength enum value + Mpeg2SubGopLengthFixed = "FIXED" +) + +// Mpeg2SubGopLength_Values returns all elements of the Mpeg2SubGopLength enum +func Mpeg2SubGopLength_Values() []string { + return []string{ + Mpeg2SubGopLengthDynamic, + Mpeg2SubGopLengthFixed, + } +} + +// Mpeg2 Timecode Insertion Behavior +const ( + // Mpeg2TimecodeInsertionBehaviorDisabled is a Mpeg2TimecodeInsertionBehavior enum value + Mpeg2TimecodeInsertionBehaviorDisabled = "DISABLED" + + // Mpeg2TimecodeInsertionBehaviorGopTimecode is a Mpeg2TimecodeInsertionBehavior enum value + Mpeg2TimecodeInsertionBehaviorGopTimecode = "GOP_TIMECODE" +) + +// Mpeg2TimecodeInsertionBehavior_Values returns all elements of the Mpeg2TimecodeInsertionBehavior enum +func Mpeg2TimecodeInsertionBehavior_Values() []string { + return []string{ + Mpeg2TimecodeInsertionBehaviorDisabled, + Mpeg2TimecodeInsertionBehaviorGopTimecode, + } +} + // Ms Smooth H265 Packaging Type const ( // MsSmoothH265PackagingTypeHev1 is a MsSmoothH265PackagingType enum value @@ -24083,6 +28986,14 @@ const ( MsSmoothH265PackagingTypeHvc1 = "HVC1" ) +// MsSmoothH265PackagingType_Values returns all elements of the MsSmoothH265PackagingType enum +func MsSmoothH265PackagingType_Values() []string { + return []string{ + MsSmoothH265PackagingTypeHev1, + MsSmoothH265PackagingTypeHvc1, + } +} + // The current state of the multiplex. const ( // MultiplexStateCreating is a MultiplexState enum value @@ -24113,6 +29024,21 @@ const ( MultiplexStateDeleted = "DELETED" ) +// MultiplexState_Values returns all elements of the MultiplexState enum +func MultiplexState_Values() []string { + return []string{ + MultiplexStateCreating, + MultiplexStateCreateFailed, + MultiplexStateIdle, + MultiplexStateStarting, + MultiplexStateRunning, + MultiplexStateRecovering, + MultiplexStateStopping, + MultiplexStateDeleting, + MultiplexStateDeleted, + } +} + // Network Input Server Validation const ( // NetworkInputServerValidationCheckCryptographyAndValidateName is a NetworkInputServerValidation enum value @@ -24122,6 +29048,14 @@ const ( NetworkInputServerValidationCheckCryptographyOnly = "CHECK_CRYPTOGRAPHY_ONLY" ) +// NetworkInputServerValidation_Values returns all elements of the NetworkInputServerValidation enum +func NetworkInputServerValidation_Values() []string { + return []string{ + NetworkInputServerValidationCheckCryptographyAndValidateName, + NetworkInputServerValidationCheckCryptographyOnly, + } +} + // State of Nielsen PCM to ID3 tagging const ( // NielsenPcmToId3TaggingStateDisabled is a NielsenPcmToId3TaggingState enum value @@ -24131,18 +29065,40 @@ const ( NielsenPcmToId3TaggingStateEnabled = "ENABLED" ) +// NielsenPcmToId3TaggingState_Values returns all elements of the NielsenPcmToId3TaggingState enum +func NielsenPcmToId3TaggingState_Values() []string { + return []string{ + NielsenPcmToId3TaggingStateDisabled, + NielsenPcmToId3TaggingStateEnabled, + } +} + // Units for duration, e.g. 'MONTHS' const ( // OfferingDurationUnitsMonths is a OfferingDurationUnits enum value OfferingDurationUnitsMonths = "MONTHS" ) +// OfferingDurationUnits_Values returns all elements of the OfferingDurationUnits enum +func OfferingDurationUnits_Values() []string { + return []string{ + OfferingDurationUnitsMonths, + } +} + // Offering type, e.g. 'NO_UPFRONT' const ( // OfferingTypeNoUpfront is a OfferingType enum value OfferingTypeNoUpfront = "NO_UPFRONT" ) +// OfferingType_Values returns all elements of the OfferingType enum +func OfferingType_Values() []string { + return []string{ + OfferingTypeNoUpfront, + } +} + // Pipeline ID const ( // PipelineIdPipeline0 is a PipelineId enum value @@ -24152,6 +29108,14 @@ const ( PipelineIdPipeline1 = "PIPELINE_1" ) +// PipelineId_Values returns all elements of the PipelineId enum +func PipelineId_Values() []string { + return []string{ + PipelineIdPipeline0, + PipelineIdPipeline1, + } +} + // Indicates which pipeline is preferred by the multiplex for program ingest.If // set to \"PIPELINE_0\" or \"PIPELINE_1\" and an unhealthy ingest causes the // multiplex to switch to the non-preferred pipeline,it will switch back once @@ -24169,6 +29133,15 @@ const ( PreferredChannelPipelinePipeline1 = "PIPELINE_1" ) +// PreferredChannelPipeline_Values returns all elements of the PreferredChannelPipeline enum +func PreferredChannelPipeline_Values() []string { + return []string{ + PreferredChannelPipelineCurrentlyActive, + PreferredChannelPipelinePipeline0, + PreferredChannelPipelinePipeline1, + } +} + // Codec, 'MPEG2', 'AVC', 'HEVC', or 'AUDIO' const ( // ReservationCodecMpeg2 is a ReservationCodec enum value @@ -24182,8 +29155,22 @@ const ( // ReservationCodecAudio is a ReservationCodec enum value ReservationCodecAudio = "AUDIO" + + // ReservationCodecLink is a ReservationCodec enum value + ReservationCodecLink = "LINK" ) +// ReservationCodec_Values returns all elements of the ReservationCodec enum +func ReservationCodec_Values() []string { + return []string{ + ReservationCodecMpeg2, + ReservationCodecAvc, + ReservationCodecHevc, + ReservationCodecAudio, + ReservationCodecLink, + } +} + // Maximum bitrate in megabits per second const ( // ReservationMaximumBitrateMax10Mbps is a ReservationMaximumBitrate enum value @@ -24196,6 +29183,15 @@ const ( ReservationMaximumBitrateMax50Mbps = "MAX_50_MBPS" ) +// ReservationMaximumBitrate_Values returns all elements of the ReservationMaximumBitrate enum +func ReservationMaximumBitrate_Values() []string { + return []string{ + ReservationMaximumBitrateMax10Mbps, + ReservationMaximumBitrateMax20Mbps, + ReservationMaximumBitrateMax50Mbps, + } +} + // Maximum framerate in frames per second (Outputs only) const ( // ReservationMaximumFramerateMax30Fps is a ReservationMaximumFramerate enum value @@ -24205,6 +29201,14 @@ const ( ReservationMaximumFramerateMax60Fps = "MAX_60_FPS" ) +// ReservationMaximumFramerate_Values returns all elements of the ReservationMaximumFramerate enum +func ReservationMaximumFramerate_Values() []string { + return []string{ + ReservationMaximumFramerateMax30Fps, + ReservationMaximumFramerateMax60Fps, + } +} + // Resolution based on lines of vertical resolution; SD is less than 720 lines, // HD is 720 to 1080 lines, FHD is 1080 lines, UHD is greater than 1080 lines const ( @@ -24221,6 +29225,16 @@ const ( ReservationResolutionUhd = "UHD" ) +// ReservationResolution_Values returns all elements of the ReservationResolution enum +func ReservationResolution_Values() []string { + return []string{ + ReservationResolutionSd, + ReservationResolutionHd, + ReservationResolutionFhd, + ReservationResolutionUhd, + } +} + // Resource type, 'INPUT', 'OUTPUT', 'MULTIPLEX', or 'CHANNEL' const ( // ReservationResourceTypeInput is a ReservationResourceType enum value @@ -24236,6 +29250,16 @@ const ( ReservationResourceTypeChannel = "CHANNEL" ) +// ReservationResourceType_Values returns all elements of the ReservationResourceType enum +func ReservationResourceType_Values() []string { + return []string{ + ReservationResourceTypeInput, + ReservationResourceTypeOutput, + ReservationResourceTypeMultiplex, + ReservationResourceTypeChannel, + } +} + // Special features, 'ADVANCED_AUDIO' or 'AUDIO_NORMALIZATION' const ( // ReservationSpecialFeatureAdvancedAudio is a ReservationSpecialFeature enum value @@ -24245,6 +29269,14 @@ const ( ReservationSpecialFeatureAudioNormalization = "AUDIO_NORMALIZATION" ) +// ReservationSpecialFeature_Values returns all elements of the ReservationSpecialFeature enum +func ReservationSpecialFeature_Values() []string { + return []string{ + ReservationSpecialFeatureAdvancedAudio, + ReservationSpecialFeatureAudioNormalization, + } +} + // Current reservation state const ( // ReservationStateActive is a ReservationState enum value @@ -24260,6 +29292,16 @@ const ( ReservationStateDeleted = "DELETED" ) +// ReservationState_Values returns all elements of the ReservationState enum +func ReservationState_Values() []string { + return []string{ + ReservationStateActive, + ReservationStateExpired, + ReservationStateCanceled, + ReservationStateDeleted, + } +} + // Video quality, e.g. 'STANDARD' (Outputs only) const ( // ReservationVideoQualityStandard is a ReservationVideoQuality enum value @@ -24272,6 +29314,15 @@ const ( ReservationVideoQualityPremium = "PREMIUM" ) +// ReservationVideoQuality_Values returns all elements of the ReservationVideoQuality enum +func ReservationVideoQuality_Values() []string { + return []string{ + ReservationVideoQualityStandard, + ReservationVideoQualityEnhanced, + ReservationVideoQualityPremium, + } +} + // Rtmp Cache Full Behavior const ( // RtmpCacheFullBehaviorDisconnectImmediately is a RtmpCacheFullBehavior enum value @@ -24281,6 +29332,14 @@ const ( RtmpCacheFullBehaviorWaitForServer = "WAIT_FOR_SERVER" ) +// RtmpCacheFullBehavior_Values returns all elements of the RtmpCacheFullBehavior enum +func RtmpCacheFullBehavior_Values() []string { + return []string{ + RtmpCacheFullBehaviorDisconnectImmediately, + RtmpCacheFullBehaviorWaitForServer, + } +} + // Rtmp Caption Data const ( // RtmpCaptionDataAll is a RtmpCaptionData enum value @@ -24293,6 +29352,15 @@ const ( RtmpCaptionDataField1AndField2608 = "FIELD1_AND_FIELD2_608" ) +// RtmpCaptionData_Values returns all elements of the RtmpCaptionData enum +func RtmpCaptionData_Values() []string { + return []string{ + RtmpCaptionDataAll, + RtmpCaptionDataField1608, + RtmpCaptionDataField1AndField2608, + } +} + // Rtmp Output Certificate Mode const ( // RtmpOutputCertificateModeSelfSigned is a RtmpOutputCertificateMode enum value @@ -24302,6 +29370,14 @@ const ( RtmpOutputCertificateModeVerifyAuthenticity = "VERIFY_AUTHENTICITY" ) +// RtmpOutputCertificateMode_Values returns all elements of the RtmpOutputCertificateMode enum +func RtmpOutputCertificateMode_Values() []string { + return []string{ + RtmpOutputCertificateModeSelfSigned, + RtmpOutputCertificateModeVerifyAuthenticity, + } +} + // Scte20 Convert608 To708 const ( // Scte20Convert608To708Disabled is a Scte20Convert608To708 enum value @@ -24311,6 +29387,14 @@ const ( Scte20Convert608To708Upconvert = "UPCONVERT" ) +// Scte20Convert608To708_Values returns all elements of the Scte20Convert608To708 enum +func Scte20Convert608To708_Values() []string { + return []string{ + Scte20Convert608To708Disabled, + Scte20Convert608To708Upconvert, + } +} + // Scte35 Apos No Regional Blackout Behavior const ( // Scte35AposNoRegionalBlackoutBehaviorFollow is a Scte35AposNoRegionalBlackoutBehavior enum value @@ -24320,6 +29404,14 @@ const ( Scte35AposNoRegionalBlackoutBehaviorIgnore = "IGNORE" ) +// Scte35AposNoRegionalBlackoutBehavior_Values returns all elements of the Scte35AposNoRegionalBlackoutBehavior enum +func Scte35AposNoRegionalBlackoutBehavior_Values() []string { + return []string{ + Scte35AposNoRegionalBlackoutBehaviorFollow, + Scte35AposNoRegionalBlackoutBehaviorIgnore, + } +} + // Scte35 Apos Web Delivery Allowed Behavior const ( // Scte35AposWebDeliveryAllowedBehaviorFollow is a Scte35AposWebDeliveryAllowedBehavior enum value @@ -24329,6 +29421,14 @@ const ( Scte35AposWebDeliveryAllowedBehaviorIgnore = "IGNORE" ) +// Scte35AposWebDeliveryAllowedBehavior_Values returns all elements of the Scte35AposWebDeliveryAllowedBehavior enum +func Scte35AposWebDeliveryAllowedBehavior_Values() []string { + return []string{ + Scte35AposWebDeliveryAllowedBehaviorFollow, + Scte35AposWebDeliveryAllowedBehaviorIgnore, + } +} + // Corresponds to the archive_allowed parameter. A value of ARCHIVE_NOT_ALLOWED // corresponds to 0 (false) in the SCTE-35 specification. If you include one // of the "restriction" flags then you must include all four of them. @@ -24340,6 +29440,14 @@ const ( Scte35ArchiveAllowedFlagArchiveAllowed = "ARCHIVE_ALLOWED" ) +// Scte35ArchiveAllowedFlag_Values returns all elements of the Scte35ArchiveAllowedFlag enum +func Scte35ArchiveAllowedFlag_Values() []string { + return []string{ + Scte35ArchiveAllowedFlagArchiveNotAllowed, + Scte35ArchiveAllowedFlagArchiveAllowed, + } +} + // Corresponds to the device_restrictions parameter in a segmentation_descriptor. // If you include one of the "restriction" flags then you must include all four // of them. @@ -24357,6 +29465,16 @@ const ( Scte35DeviceRestrictionsRestrictGroup2 = "RESTRICT_GROUP2" ) +// Scte35DeviceRestrictions_Values returns all elements of the Scte35DeviceRestrictions enum +func Scte35DeviceRestrictions_Values() []string { + return []string{ + Scte35DeviceRestrictionsNone, + Scte35DeviceRestrictionsRestrictGroup0, + Scte35DeviceRestrictionsRestrictGroup1, + Scte35DeviceRestrictionsRestrictGroup2, + } +} + // Corresponds to the no_regional_blackout_flag parameter. A value of REGIONAL_BLACKOUT // corresponds to 0 (false) in the SCTE-35 specification. If you include one // of the "restriction" flags then you must include all four of them. @@ -24368,6 +29486,14 @@ const ( Scte35NoRegionalBlackoutFlagNoRegionalBlackout = "NO_REGIONAL_BLACKOUT" ) +// Scte35NoRegionalBlackoutFlag_Values returns all elements of the Scte35NoRegionalBlackoutFlag enum +func Scte35NoRegionalBlackoutFlag_Values() []string { + return []string{ + Scte35NoRegionalBlackoutFlagRegionalBlackout, + Scte35NoRegionalBlackoutFlagNoRegionalBlackout, + } +} + // Corresponds to SCTE-35 segmentation_event_cancel_indicator. SEGMENTATION_EVENT_NOT_CANCELED // corresponds to 0 in the SCTE-35 specification and indicates that this is // an insertion request. SEGMENTATION_EVENT_CANCELED corresponds to 1 in the @@ -24381,6 +29507,14 @@ const ( Scte35SegmentationCancelIndicatorSegmentationEventCanceled = "SEGMENTATION_EVENT_CANCELED" ) +// Scte35SegmentationCancelIndicator_Values returns all elements of the Scte35SegmentationCancelIndicator enum +func Scte35SegmentationCancelIndicator_Values() []string { + return []string{ + Scte35SegmentationCancelIndicatorSegmentationEventNotCanceled, + Scte35SegmentationCancelIndicatorSegmentationEventCanceled, + } +} + // Scte35 Splice Insert No Regional Blackout Behavior const ( // Scte35SpliceInsertNoRegionalBlackoutBehaviorFollow is a Scte35SpliceInsertNoRegionalBlackoutBehavior enum value @@ -24390,6 +29524,14 @@ const ( Scte35SpliceInsertNoRegionalBlackoutBehaviorIgnore = "IGNORE" ) +// Scte35SpliceInsertNoRegionalBlackoutBehavior_Values returns all elements of the Scte35SpliceInsertNoRegionalBlackoutBehavior enum +func Scte35SpliceInsertNoRegionalBlackoutBehavior_Values() []string { + return []string{ + Scte35SpliceInsertNoRegionalBlackoutBehaviorFollow, + Scte35SpliceInsertNoRegionalBlackoutBehaviorIgnore, + } +} + // Scte35 Splice Insert Web Delivery Allowed Behavior const ( // Scte35SpliceInsertWebDeliveryAllowedBehaviorFollow is a Scte35SpliceInsertWebDeliveryAllowedBehavior enum value @@ -24399,6 +29541,14 @@ const ( Scte35SpliceInsertWebDeliveryAllowedBehaviorIgnore = "IGNORE" ) +// Scte35SpliceInsertWebDeliveryAllowedBehavior_Values returns all elements of the Scte35SpliceInsertWebDeliveryAllowedBehavior enum +func Scte35SpliceInsertWebDeliveryAllowedBehavior_Values() []string { + return []string{ + Scte35SpliceInsertWebDeliveryAllowedBehaviorFollow, + Scte35SpliceInsertWebDeliveryAllowedBehaviorIgnore, + } +} + // Corresponds to the web_delivery_allowed_flag parameter. A value of WEB_DELIVERY_NOT_ALLOWED // corresponds to 0 (false) in the SCTE-35 specification. If you include one // of the "restriction" flags then you must include all four of them. @@ -24410,6 +29560,14 @@ const ( Scte35WebDeliveryAllowedFlagWebDeliveryAllowed = "WEB_DELIVERY_ALLOWED" ) +// Scte35WebDeliveryAllowedFlag_Values returns all elements of the Scte35WebDeliveryAllowedFlag enum +func Scte35WebDeliveryAllowedFlag_Values() []string { + return []string{ + Scte35WebDeliveryAllowedFlagWebDeliveryNotAllowed, + Scte35WebDeliveryAllowedFlagWebDeliveryAllowed, + } +} + // Smooth Group Audio Only Timecode Control const ( // SmoothGroupAudioOnlyTimecodeControlPassthrough is a SmoothGroupAudioOnlyTimecodeControl enum value @@ -24419,6 +29577,14 @@ const ( SmoothGroupAudioOnlyTimecodeControlUseConfiguredClock = "USE_CONFIGURED_CLOCK" ) +// SmoothGroupAudioOnlyTimecodeControl_Values returns all elements of the SmoothGroupAudioOnlyTimecodeControl enum +func SmoothGroupAudioOnlyTimecodeControl_Values() []string { + return []string{ + SmoothGroupAudioOnlyTimecodeControlPassthrough, + SmoothGroupAudioOnlyTimecodeControlUseConfiguredClock, + } +} + // Smooth Group Certificate Mode const ( // SmoothGroupCertificateModeSelfSigned is a SmoothGroupCertificateMode enum value @@ -24428,6 +29594,14 @@ const ( SmoothGroupCertificateModeVerifyAuthenticity = "VERIFY_AUTHENTICITY" ) +// SmoothGroupCertificateMode_Values returns all elements of the SmoothGroupCertificateMode enum +func SmoothGroupCertificateMode_Values() []string { + return []string{ + SmoothGroupCertificateModeSelfSigned, + SmoothGroupCertificateModeVerifyAuthenticity, + } +} + // Smooth Group Event Id Mode const ( // SmoothGroupEventIdModeNoEventId is a SmoothGroupEventIdMode enum value @@ -24440,6 +29614,15 @@ const ( SmoothGroupEventIdModeUseTimestamp = "USE_TIMESTAMP" ) +// SmoothGroupEventIdMode_Values returns all elements of the SmoothGroupEventIdMode enum +func SmoothGroupEventIdMode_Values() []string { + return []string{ + SmoothGroupEventIdModeNoEventId, + SmoothGroupEventIdModeUseConfigured, + SmoothGroupEventIdModeUseTimestamp, + } +} + // Smooth Group Event Stop Behavior const ( // SmoothGroupEventStopBehaviorNone is a SmoothGroupEventStopBehavior enum value @@ -24449,6 +29632,14 @@ const ( SmoothGroupEventStopBehaviorSendEos = "SEND_EOS" ) +// SmoothGroupEventStopBehavior_Values returns all elements of the SmoothGroupEventStopBehavior enum +func SmoothGroupEventStopBehavior_Values() []string { + return []string{ + SmoothGroupEventStopBehaviorNone, + SmoothGroupEventStopBehaviorSendEos, + } +} + // Smooth Group Segmentation Mode const ( // SmoothGroupSegmentationModeUseInputSegmentation is a SmoothGroupSegmentationMode enum value @@ -24458,6 +29649,14 @@ const ( SmoothGroupSegmentationModeUseSegmentDuration = "USE_SEGMENT_DURATION" ) +// SmoothGroupSegmentationMode_Values returns all elements of the SmoothGroupSegmentationMode enum +func SmoothGroupSegmentationMode_Values() []string { + return []string{ + SmoothGroupSegmentationModeUseInputSegmentation, + SmoothGroupSegmentationModeUseSegmentDuration, + } +} + // Smooth Group Sparse Track Type const ( // SmoothGroupSparseTrackTypeNone is a SmoothGroupSparseTrackType enum value @@ -24465,8 +29664,20 @@ const ( // SmoothGroupSparseTrackTypeScte35 is a SmoothGroupSparseTrackType enum value SmoothGroupSparseTrackTypeScte35 = "SCTE_35" + + // SmoothGroupSparseTrackTypeScte35WithoutSegmentation is a SmoothGroupSparseTrackType enum value + SmoothGroupSparseTrackTypeScte35WithoutSegmentation = "SCTE_35_WITHOUT_SEGMENTATION" ) +// SmoothGroupSparseTrackType_Values returns all elements of the SmoothGroupSparseTrackType enum +func SmoothGroupSparseTrackType_Values() []string { + return []string{ + SmoothGroupSparseTrackTypeNone, + SmoothGroupSparseTrackTypeScte35, + SmoothGroupSparseTrackTypeScte35WithoutSegmentation, + } +} + // Smooth Group Stream Manifest Behavior const ( // SmoothGroupStreamManifestBehaviorDoNotSend is a SmoothGroupStreamManifestBehavior enum value @@ -24476,6 +29687,14 @@ const ( SmoothGroupStreamManifestBehaviorSend = "SEND" ) +// SmoothGroupStreamManifestBehavior_Values returns all elements of the SmoothGroupStreamManifestBehavior enum +func SmoothGroupStreamManifestBehavior_Values() []string { + return []string{ + SmoothGroupStreamManifestBehaviorDoNotSend, + SmoothGroupStreamManifestBehaviorSend, + } +} + // Smooth Group Timestamp Offset Mode const ( // SmoothGroupTimestampOffsetModeUseConfiguredOffset is a SmoothGroupTimestampOffsetMode enum value @@ -24485,6 +29704,129 @@ const ( SmoothGroupTimestampOffsetModeUseEventStartDate = "USE_EVENT_START_DATE" ) +// SmoothGroupTimestampOffsetMode_Values returns all elements of the SmoothGroupTimestampOffsetMode enum +func SmoothGroupTimestampOffsetMode_Values() []string { + return []string{ + SmoothGroupTimestampOffsetModeUseConfiguredOffset, + SmoothGroupTimestampOffsetModeUseEventStartDate, + } +} + +// Smpte2038 Data Preference +const ( + // Smpte2038DataPreferenceIgnore is a Smpte2038DataPreference enum value + Smpte2038DataPreferenceIgnore = "IGNORE" + + // Smpte2038DataPreferencePrefer is a Smpte2038DataPreference enum value + Smpte2038DataPreferencePrefer = "PREFER" +) + +// Smpte2038DataPreference_Values returns all elements of the Smpte2038DataPreference enum +func Smpte2038DataPreference_Values() []string { + return []string{ + Smpte2038DataPreferenceIgnore, + Smpte2038DataPreferencePrefer, + } +} + +// Temporal Filter Post Filter Sharpening +const ( + // TemporalFilterPostFilterSharpeningAuto is a TemporalFilterPostFilterSharpening enum value + TemporalFilterPostFilterSharpeningAuto = "AUTO" + + // TemporalFilterPostFilterSharpeningDisabled is a TemporalFilterPostFilterSharpening enum value + TemporalFilterPostFilterSharpeningDisabled = "DISABLED" + + // TemporalFilterPostFilterSharpeningEnabled is a TemporalFilterPostFilterSharpening enum value + TemporalFilterPostFilterSharpeningEnabled = "ENABLED" +) + +// TemporalFilterPostFilterSharpening_Values returns all elements of the TemporalFilterPostFilterSharpening enum +func TemporalFilterPostFilterSharpening_Values() []string { + return []string{ + TemporalFilterPostFilterSharpeningAuto, + TemporalFilterPostFilterSharpeningDisabled, + TemporalFilterPostFilterSharpeningEnabled, + } +} + +// Temporal Filter Strength +const ( + // TemporalFilterStrengthAuto is a TemporalFilterStrength enum value + TemporalFilterStrengthAuto = "AUTO" + + // TemporalFilterStrengthStrength1 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength1 = "STRENGTH_1" + + // TemporalFilterStrengthStrength2 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength2 = "STRENGTH_2" + + // TemporalFilterStrengthStrength3 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength3 = "STRENGTH_3" + + // TemporalFilterStrengthStrength4 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength4 = "STRENGTH_4" + + // TemporalFilterStrengthStrength5 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength5 = "STRENGTH_5" + + // TemporalFilterStrengthStrength6 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength6 = "STRENGTH_6" + + // TemporalFilterStrengthStrength7 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength7 = "STRENGTH_7" + + // TemporalFilterStrengthStrength8 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength8 = "STRENGTH_8" + + // TemporalFilterStrengthStrength9 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength9 = "STRENGTH_9" + + // TemporalFilterStrengthStrength10 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength10 = "STRENGTH_10" + + // TemporalFilterStrengthStrength11 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength11 = "STRENGTH_11" + + // TemporalFilterStrengthStrength12 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength12 = "STRENGTH_12" + + // TemporalFilterStrengthStrength13 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength13 = "STRENGTH_13" + + // TemporalFilterStrengthStrength14 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength14 = "STRENGTH_14" + + // TemporalFilterStrengthStrength15 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength15 = "STRENGTH_15" + + // TemporalFilterStrengthStrength16 is a TemporalFilterStrength enum value + TemporalFilterStrengthStrength16 = "STRENGTH_16" +) + +// TemporalFilterStrength_Values returns all elements of the TemporalFilterStrength enum +func TemporalFilterStrength_Values() []string { + return []string{ + TemporalFilterStrengthAuto, + TemporalFilterStrengthStrength1, + TemporalFilterStrengthStrength2, + TemporalFilterStrengthStrength3, + TemporalFilterStrengthStrength4, + TemporalFilterStrengthStrength5, + TemporalFilterStrengthStrength6, + TemporalFilterStrengthStrength7, + TemporalFilterStrengthStrength8, + TemporalFilterStrengthStrength9, + TemporalFilterStrengthStrength10, + TemporalFilterStrengthStrength11, + TemporalFilterStrengthStrength12, + TemporalFilterStrengthStrength13, + TemporalFilterStrengthStrength14, + TemporalFilterStrengthStrength15, + TemporalFilterStrengthStrength16, + } +} + // Timecode Config Source const ( // TimecodeConfigSourceEmbedded is a TimecodeConfigSource enum value @@ -24497,6 +29839,15 @@ const ( TimecodeConfigSourceZerobased = "ZEROBASED" ) +// TimecodeConfigSource_Values returns all elements of the TimecodeConfigSource enum +func TimecodeConfigSource_Values() []string { + return []string{ + TimecodeConfigSourceEmbedded, + TimecodeConfigSourceSystemclock, + TimecodeConfigSourceZerobased, + } +} + // Ttml Destination Style Control const ( // TtmlDestinationStyleControlPassthrough is a TtmlDestinationStyleControl enum value @@ -24506,6 +29857,14 @@ const ( TtmlDestinationStyleControlUseConfigured = "USE_CONFIGURED" ) +// TtmlDestinationStyleControl_Values returns all elements of the TtmlDestinationStyleControl enum +func TtmlDestinationStyleControl_Values() []string { + return []string{ + TtmlDestinationStyleControlPassthrough, + TtmlDestinationStyleControlUseConfigured, + } +} + // Udp Timed Metadata Id3 Frame const ( // UdpTimedMetadataId3FrameNone is a UdpTimedMetadataId3Frame enum value @@ -24518,6 +29877,15 @@ const ( UdpTimedMetadataId3FrameTdrl = "TDRL" ) +// UdpTimedMetadataId3Frame_Values returns all elements of the UdpTimedMetadataId3Frame enum +func UdpTimedMetadataId3Frame_Values() []string { + return []string{ + UdpTimedMetadataId3FrameNone, + UdpTimedMetadataId3FramePriv, + UdpTimedMetadataId3FrameTdrl, + } +} + // Video Description Respond To Afd const ( // VideoDescriptionRespondToAfdNone is a VideoDescriptionRespondToAfd enum value @@ -24530,6 +29898,15 @@ const ( VideoDescriptionRespondToAfdRespond = "RESPOND" ) +// VideoDescriptionRespondToAfd_Values returns all elements of the VideoDescriptionRespondToAfd enum +func VideoDescriptionRespondToAfd_Values() []string { + return []string{ + VideoDescriptionRespondToAfdNone, + VideoDescriptionRespondToAfdPassthrough, + VideoDescriptionRespondToAfdRespond, + } +} + // Video Description Scaling Behavior const ( // VideoDescriptionScalingBehaviorDefault is a VideoDescriptionScalingBehavior enum value @@ -24539,6 +29916,14 @@ const ( VideoDescriptionScalingBehaviorStretchToOutput = "STRETCH_TO_OUTPUT" ) +// VideoDescriptionScalingBehavior_Values returns all elements of the VideoDescriptionScalingBehavior enum +func VideoDescriptionScalingBehavior_Values() []string { + return []string{ + VideoDescriptionScalingBehaviorDefault, + VideoDescriptionScalingBehaviorStretchToOutput, + } +} + // Video Selector Color Space const ( // VideoSelectorColorSpaceFollow is a VideoSelectorColorSpace enum value @@ -24551,6 +29936,15 @@ const ( VideoSelectorColorSpaceRec709 = "REC_709" ) +// VideoSelectorColorSpace_Values returns all elements of the VideoSelectorColorSpace enum +func VideoSelectorColorSpace_Values() []string { + return []string{ + VideoSelectorColorSpaceFollow, + VideoSelectorColorSpaceRec601, + VideoSelectorColorSpaceRec709, + } +} + // Video Selector Color Space Usage const ( // VideoSelectorColorSpaceUsageFallback is a VideoSelectorColorSpaceUsage enum value @@ -24559,3 +29953,36 @@ const ( // VideoSelectorColorSpaceUsageForce is a VideoSelectorColorSpaceUsage enum value VideoSelectorColorSpaceUsageForce = "FORCE" ) + +// VideoSelectorColorSpaceUsage_Values returns all elements of the VideoSelectorColorSpaceUsage enum +func VideoSelectorColorSpaceUsage_Values() []string { + return []string{ + VideoSelectorColorSpaceUsageFallback, + VideoSelectorColorSpaceUsageForce, + } +} + +// Wav Coding Mode +const ( + // WavCodingModeCodingMode10 is a WavCodingMode enum value + WavCodingModeCodingMode10 = "CODING_MODE_1_0" + + // WavCodingModeCodingMode20 is a WavCodingMode enum value + WavCodingModeCodingMode20 = "CODING_MODE_2_0" + + // WavCodingModeCodingMode40 is a WavCodingMode enum value + WavCodingModeCodingMode40 = "CODING_MODE_4_0" + + // WavCodingModeCodingMode80 is a WavCodingMode enum value + WavCodingModeCodingMode80 = "CODING_MODE_8_0" +) + +// WavCodingMode_Values returns all elements of the WavCodingMode enum +func WavCodingMode_Values() []string { + return []string{ + WavCodingModeCodingMode10, + WavCodingModeCodingMode20, + WavCodingModeCodingMode40, + WavCodingModeCodingMode80, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go index 6b7c594f4..56eec0de5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go index 36fb19d08..88b51e2ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/medialive/waiters.go @@ -89,7 +89,7 @@ func (c *MediaLive) WaitUntilChannelDeleted(input *DescribeChannelInput) error { func (c *MediaLive) WaitUntilChannelDeletedWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.WaiterOption) error { w := request.Waiter{ Name: "WaitUntilChannelDeleted", - MaxAttempts: 20, + MaxAttempts: 84, Delay: request.ConstantWaiterDelay(5 * time.Second), Acceptors: []request.WaiterAcceptor{ { @@ -201,7 +201,7 @@ func (c *MediaLive) WaitUntilChannelStopped(input *DescribeChannelInput) error { func (c *MediaLive) WaitUntilChannelStoppedWithContext(ctx aws.Context, input *DescribeChannelInput, opts ...request.WaiterOption) error { w := request.Waiter{ Name: "WaitUntilChannelStopped", - MaxAttempts: 28, + MaxAttempts: 60, Delay: request.ConstantWaiterDelay(5 * time.Second), Acceptors: []request.WaiterAcceptor{ { @@ -238,6 +238,179 @@ func (c *MediaLive) WaitUntilChannelStoppedWithContext(ctx aws.Context, input *D return w.WaitWithContext(ctx) } +// WaitUntilInputAttached uses the MediaLive API operation +// DescribeInput to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilInputAttached(input *DescribeInputInput) error { + return c.WaitUntilInputAttachedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInputAttachedWithContext is an extended version of WaitUntilInputAttached. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilInputAttachedWithContext(ctx aws.Context, input *DescribeInputInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInputAttached", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "ATTACHED", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "DETACHED", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInputInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInputRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInputDeleted uses the MediaLive API operation +// DescribeInput to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilInputDeleted(input *DescribeInputInput) error { + return c.WaitUntilInputDeletedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInputDeletedWithContext is an extended version of WaitUntilInputDeleted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilInputDeletedWithContext(ctx aws.Context, input *DescribeInputInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInputDeleted", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "DELETED", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "DELETING", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInputInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInputRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilInputDetached uses the MediaLive API operation +// DescribeInput to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *MediaLive) WaitUntilInputDetached(input *DescribeInputInput) error { + return c.WaitUntilInputDetachedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInputDetachedWithContext is an extended version of WaitUntilInputDetached. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaLive) WaitUntilInputDetachedWithContext(ctx aws.Context, input *DescribeInputInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInputDetached", + MaxAttempts: 84, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "DETACHED", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "CREATING", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "State", + Expected: "ATTACHED", + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 500, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeInputInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInputRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + // WaitUntilMultiplexCreated uses the MediaLive API operation // DescribeMultiplex to wait for a condition to be met before returning. // If the condition is not met within the max attempt window, an error will diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go index 2f4685813..bb818fd16 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/api.go @@ -12,6 +12,94 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opConfigureLogs = "ConfigureLogs" + +// ConfigureLogsRequest generates a "aws/request.Request" representing the +// client's request for the ConfigureLogs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ConfigureLogs for more information on using the ConfigureLogs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ConfigureLogsRequest method. +// req, resp := client.ConfigureLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ConfigureLogs +func (c *MediaPackage) ConfigureLogsRequest(input *ConfigureLogsInput) (req *request.Request, output *ConfigureLogsOutput) { + op := &request.Operation{ + Name: opConfigureLogs, + HTTPMethod: "PUT", + HTTPPath: "/channels/{id}/configure_logs", + } + + if input == nil { + input = &ConfigureLogsInput{} + } + + output = &ConfigureLogsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ConfigureLogs API operation for AWS Elemental MediaPackage. +// +// Changes the Channel's properities to configure log subscription +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaPackage's +// API operation ConfigureLogs for usage and error information. +// +// Returned Error Types: +// * UnprocessableEntityException +// +// * InternalServerErrorException +// +// * ForbiddenException +// +// * NotFoundException +// +// * ServiceUnavailableException +// +// * TooManyRequestsException +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediapackage-2017-10-12/ConfigureLogs +func (c *MediaPackage) ConfigureLogs(input *ConfigureLogsInput) (*ConfigureLogsOutput, error) { + req, out := c.ConfigureLogsRequest(input) + return out, req.Send() +} + +// ConfigureLogsWithContext is the same as ConfigureLogs with the addition of +// the ability to pass a context and additional request options. +// +// See ConfigureLogs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaPackage) ConfigureLogsWithContext(ctx aws.Context, input *ConfigureLogsInput, opts ...request.Option) (*ConfigureLogsOutput, error) { + req, out := c.ConfigureLogsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateChannel = "CreateChannel" // CreateChannelRequest generates a "aws/request.Request" representing the @@ -1803,12 +1891,18 @@ type Channel struct { // A short text description of the Channel. Description *string `locationName:"description" type:"string"` + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + // An HTTP Live Streaming (HLS) ingest resource configuration. HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` // The ID of the Channel. Id *string `locationName:"id" type:"string"` + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + // A collection of tags associated with a resource Tags map[string]*string `locationName:"tags" type:"map"` } @@ -1835,6 +1929,12 @@ func (s *Channel) SetDescription(v string) *Channel { return s } +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *Channel) SetEgressAccessLogs(v *EgressAccessLogs) *Channel { + s.EgressAccessLogs = v + return s +} + // SetHlsIngest sets the HlsIngest field's value. func (s *Channel) SetHlsIngest(v *HlsIngest) *Channel { s.HlsIngest = v @@ -1847,6 +1947,12 @@ func (s *Channel) SetId(v string) *Channel { return s } +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *Channel) SetIngressAccessLogs(v *IngressAccessLogs) *Channel { + s.IngressAccessLogs = v + return s +} + // SetTags sets the Tags field's value. func (s *Channel) SetTags(v map[string]*string) *Channel { s.Tags = v @@ -2056,6 +2162,137 @@ func (s *CmafPackageCreateOrUpdateParameters) SetStreamSelection(v *StreamSelect return s } +type ConfigureLogsInput struct { + _ struct{} `type:"structure"` + + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + + // Id is a required field + Id *string `location:"uri" locationName:"id" type:"string" required:"true"` + + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` +} + +// String returns the string representation +func (s ConfigureLogsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureLogsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConfigureLogsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConfigureLogsInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *ConfigureLogsInput) SetEgressAccessLogs(v *EgressAccessLogs) *ConfigureLogsInput { + s.EgressAccessLogs = v + return s +} + +// SetId sets the Id field's value. +func (s *ConfigureLogsInput) SetId(v string) *ConfigureLogsInput { + s.Id = &v + return s +} + +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *ConfigureLogsInput) SetIngressAccessLogs(v *IngressAccessLogs) *ConfigureLogsInput { + s.IngressAccessLogs = v + return s +} + +type ConfigureLogsOutput struct { + _ struct{} `type:"structure"` + + Arn *string `locationName:"arn" type:"string"` + + Description *string `locationName:"description" type:"string"` + + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + + // An HTTP Live Streaming (HLS) ingest resource configuration. + HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` + + Id *string `locationName:"id" type:"string"` + + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + + // A collection of tags associated with a resource + Tags map[string]*string `locationName:"tags" type:"map"` +} + +// String returns the string representation +func (s ConfigureLogsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConfigureLogsOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ConfigureLogsOutput) SetArn(v string) *ConfigureLogsOutput { + s.Arn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ConfigureLogsOutput) SetDescription(v string) *ConfigureLogsOutput { + s.Description = &v + return s +} + +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *ConfigureLogsOutput) SetEgressAccessLogs(v *EgressAccessLogs) *ConfigureLogsOutput { + s.EgressAccessLogs = v + return s +} + +// SetHlsIngest sets the HlsIngest field's value. +func (s *ConfigureLogsOutput) SetHlsIngest(v *HlsIngest) *ConfigureLogsOutput { + s.HlsIngest = v + return s +} + +// SetId sets the Id field's value. +func (s *ConfigureLogsOutput) SetId(v string) *ConfigureLogsOutput { + s.Id = &v + return s +} + +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *ConfigureLogsOutput) SetIngressAccessLogs(v *IngressAccessLogs) *ConfigureLogsOutput { + s.IngressAccessLogs = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ConfigureLogsOutput) SetTags(v map[string]*string) *ConfigureLogsOutput { + s.Tags = v + return s +} + type CreateChannelInput struct { _ struct{} `type:"structure"` @@ -2116,11 +2353,17 @@ type CreateChannelOutput struct { Description *string `locationName:"description" type:"string"` + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + // An HTTP Live Streaming (HLS) ingest resource configuration. HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` Id *string `locationName:"id" type:"string"` + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + // A collection of tags associated with a resource Tags map[string]*string `locationName:"tags" type:"map"` } @@ -2147,6 +2390,12 @@ func (s *CreateChannelOutput) SetDescription(v string) *CreateChannelOutput { return s } +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *CreateChannelOutput) SetEgressAccessLogs(v *EgressAccessLogs) *CreateChannelOutput { + s.EgressAccessLogs = v + return s +} + // SetHlsIngest sets the HlsIngest field's value. func (s *CreateChannelOutput) SetHlsIngest(v *HlsIngest) *CreateChannelOutput { s.HlsIngest = v @@ -2159,6 +2408,12 @@ func (s *CreateChannelOutput) SetId(v string) *CreateChannelOutput { return s } +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *CreateChannelOutput) SetIngressAccessLogs(v *IngressAccessLogs) *CreateChannelOutput { + s.IngressAccessLogs = v + return s +} + // SetTags sets the Tags field's value. func (s *CreateChannelOutput) SetTags(v map[string]*string) *CreateChannelOutput { s.Tags = v @@ -2791,6 +3046,14 @@ type DashPackage struct { // Duration (in seconds) to delay live content before presentation. SuggestedPresentationDelaySeconds *int64 `locationName:"suggestedPresentationDelaySeconds" type:"integer"` + + // Determines the type of UTCTiming included in the Media Presentation Description + // (MPD) + UtcTiming *string `locationName:"utcTiming" type:"string" enum:"UtcTiming"` + + // Specifies the value attribute of the UTCTiming field when utcTiming is set + // to HTTP-ISO or HTTP-HEAD + UtcTimingUri *string `locationName:"utcTimingUri" type:"string"` } // String returns the string representation @@ -2896,6 +3159,18 @@ func (s *DashPackage) SetSuggestedPresentationDelaySeconds(v int64) *DashPackage return s } +// SetUtcTiming sets the UtcTiming field's value. +func (s *DashPackage) SetUtcTiming(v string) *DashPackage { + s.UtcTiming = &v + return s +} + +// SetUtcTimingUri sets the UtcTimingUri field's value. +func (s *DashPackage) SetUtcTimingUri(v string) *DashPackage { + s.UtcTimingUri = &v + return s +} + type DeleteChannelInput struct { _ struct{} `type:"structure"` @@ -3048,11 +3323,17 @@ type DescribeChannelOutput struct { Description *string `locationName:"description" type:"string"` + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + // An HTTP Live Streaming (HLS) ingest resource configuration. HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` Id *string `locationName:"id" type:"string"` + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + // A collection of tags associated with a resource Tags map[string]*string `locationName:"tags" type:"map"` } @@ -3079,6 +3360,12 @@ func (s *DescribeChannelOutput) SetDescription(v string) *DescribeChannelOutput return s } +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *DescribeChannelOutput) SetEgressAccessLogs(v *EgressAccessLogs) *DescribeChannelOutput { + s.EgressAccessLogs = v + return s +} + // SetHlsIngest sets the HlsIngest field's value. func (s *DescribeChannelOutput) SetHlsIngest(v *HlsIngest) *DescribeChannelOutput { s.HlsIngest = v @@ -3091,6 +3378,12 @@ func (s *DescribeChannelOutput) SetId(v string) *DescribeChannelOutput { return s } +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *DescribeChannelOutput) SetIngressAccessLogs(v *IngressAccessLogs) *DescribeChannelOutput { + s.IngressAccessLogs = v + return s +} + // SetTags sets the Tags field's value. func (s *DescribeChannelOutput) SetTags(v map[string]*string) *DescribeChannelOutput { s.Tags = v @@ -3411,9 +3704,33 @@ func (s *DescribeOriginEndpointOutput) SetWhitelist(v []*string) *DescribeOrigin return s } +// Configure egress access logging. +type EgressAccessLogs struct { + _ struct{} `type:"structure"` + + // Customize the log group name. + LogGroupName *string `locationName:"logGroupName" type:"string"` +} + +// String returns the string representation +func (s EgressAccessLogs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EgressAccessLogs) GoString() string { + return s.String() +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *EgressAccessLogs) SetLogGroupName(v string) *EgressAccessLogs { + s.LogGroupName = &v + return s +} + type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3430,17 +3747,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3448,22 +3765,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } // A HarvestJob resource configuration @@ -3681,7 +3998,10 @@ type HlsManifest struct { // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. + // ad markers and blackout tags based on SCTE-35messages in the input source."DATERANGE" + // inserts EXT-X-DATERANGE tags to signal ad and program transition events in + // HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds + // value that is greater than 0. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` // The ID of the manifest. The ID must be unique within the OriginEndpoint and @@ -3785,7 +4105,10 @@ type HlsManifestCreateOrUpdateParameters struct { // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. + // ad markers and blackout tags based on SCTE-35messages in the input source."DATERANGE" + // inserts EXT-X-DATERANGE tags to signal ad and program transition events in + // HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds + // value that is greater than 0. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` // A list of SCTE-35 message types that are treated as ad markers in the output. @@ -3922,7 +4245,10 @@ type HlsPackage struct { // will omit all SCTE-35 ad markers from the output."PASSTHROUGH" causes the // manifest to contain a copy of the SCTE-35 admarkers (comments) taken directly // from the input HTTP Live Streaming (HLS) manifest."SCTE35_ENHANCED" generates - // ad markers and blackout tags based on SCTE-35messages in the input source. + // ad markers and blackout tags based on SCTE-35messages in the input source."DATERANGE" + // inserts EXT-X-DATERANGE tags to signal ad and program transition events in + // HLS and CMAF manifests. For this option, you must set a programDateTimeIntervalSeconds + // value that is greater than 0. AdMarkers *string `locationName:"adMarkers" type:"string" enum:"AdMarkers"` // A list of SCTE-35 message types that are treated as ad markers in the output. @@ -4119,9 +4445,33 @@ func (s *IngestEndpoint) SetUsername(v string) *IngestEndpoint { return s } +// Configure ingress access logging. +type IngressAccessLogs struct { + _ struct{} `type:"structure"` + + // Customize the log group name. + LogGroupName *string `locationName:"logGroupName" type:"string"` +} + +// String returns the string representation +func (s IngressAccessLogs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IngressAccessLogs) GoString() string { + return s.String() +} + +// SetLogGroupName sets the LogGroupName field's value. +func (s *IngressAccessLogs) SetLogGroupName(v string) *IngressAccessLogs { + s.LogGroupName = &v + return s +} + type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4138,17 +4488,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4156,22 +4506,22 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } type ListChannelsInput struct { @@ -4590,8 +4940,8 @@ func (s *MssPackage) SetStreamSelection(v *StreamSelection) *MssPackage { } type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4608,17 +4958,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4626,22 +4976,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An OriginEndpoint resource configuration. @@ -4857,11 +5207,17 @@ type RotateChannelCredentialsOutput struct { Description *string `locationName:"description" type:"string"` + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + // An HTTP Live Streaming (HLS) ingest resource configuration. HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` Id *string `locationName:"id" type:"string"` + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + // A collection of tags associated with a resource Tags map[string]*string `locationName:"tags" type:"map"` } @@ -4888,6 +5244,12 @@ func (s *RotateChannelCredentialsOutput) SetDescription(v string) *RotateChannel return s } +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *RotateChannelCredentialsOutput) SetEgressAccessLogs(v *EgressAccessLogs) *RotateChannelCredentialsOutput { + s.EgressAccessLogs = v + return s +} + // SetHlsIngest sets the HlsIngest field's value. func (s *RotateChannelCredentialsOutput) SetHlsIngest(v *HlsIngest) *RotateChannelCredentialsOutput { s.HlsIngest = v @@ -4900,6 +5262,12 @@ func (s *RotateChannelCredentialsOutput) SetId(v string) *RotateChannelCredentia return s } +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *RotateChannelCredentialsOutput) SetIngressAccessLogs(v *IngressAccessLogs) *RotateChannelCredentialsOutput { + s.IngressAccessLogs = v + return s +} + // SetTags sets the Tags field's value. func (s *RotateChannelCredentialsOutput) SetTags(v map[string]*string) *RotateChannelCredentialsOutput { s.Tags = v @@ -4967,11 +5335,17 @@ type RotateIngestEndpointCredentialsOutput struct { Description *string `locationName:"description" type:"string"` + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + // An HTTP Live Streaming (HLS) ingest resource configuration. HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` Id *string `locationName:"id" type:"string"` + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + // A collection of tags associated with a resource Tags map[string]*string `locationName:"tags" type:"map"` } @@ -4998,6 +5372,12 @@ func (s *RotateIngestEndpointCredentialsOutput) SetDescription(v string) *Rotate return s } +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *RotateIngestEndpointCredentialsOutput) SetEgressAccessLogs(v *EgressAccessLogs) *RotateIngestEndpointCredentialsOutput { + s.EgressAccessLogs = v + return s +} + // SetHlsIngest sets the HlsIngest field's value. func (s *RotateIngestEndpointCredentialsOutput) SetHlsIngest(v *HlsIngest) *RotateIngestEndpointCredentialsOutput { s.HlsIngest = v @@ -5010,6 +5390,12 @@ func (s *RotateIngestEndpointCredentialsOutput) SetId(v string) *RotateIngestEnd return s } +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *RotateIngestEndpointCredentialsOutput) SetIngressAccessLogs(v *IngressAccessLogs) *RotateIngestEndpointCredentialsOutput { + s.IngressAccessLogs = v + return s +} + // SetTags sets the Tags field's value. func (s *RotateIngestEndpointCredentialsOutput) SetTags(v map[string]*string) *RotateIngestEndpointCredentialsOutput { s.Tags = v @@ -5086,8 +5472,8 @@ func (s *S3Destination) SetRoleArn(v string) *S3Destination { } type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5104,17 +5490,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5122,22 +5508,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // A configuration for accessing an external Secure Packager and Encoder Key @@ -5342,8 +5728,8 @@ func (s TagResourceOutput) GoString() string { } type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5360,17 +5746,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5378,27 +5764,27 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } type UnprocessableEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5415,17 +5801,17 @@ func (s UnprocessableEntityException) GoString() string { func newErrorUnprocessableEntityException(v protocol.ResponseMetadata) error { return &UnprocessableEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnprocessableEntityException) Code() string { +func (s *UnprocessableEntityException) Code() string { return "UnprocessableEntityException" } // Message returns the exception's message. -func (s UnprocessableEntityException) Message() string { +func (s *UnprocessableEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5433,22 +5819,22 @@ func (s UnprocessableEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnprocessableEntityException) OrigErr() error { +func (s *UnprocessableEntityException) OrigErr() error { return nil } -func (s UnprocessableEntityException) Error() string { +func (s *UnprocessableEntityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnprocessableEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnprocessableEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnprocessableEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnprocessableEntityException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -5570,11 +5956,17 @@ type UpdateChannelOutput struct { Description *string `locationName:"description" type:"string"` + // Configure egress access logging. + EgressAccessLogs *EgressAccessLogs `locationName:"egressAccessLogs" type:"structure"` + // An HTTP Live Streaming (HLS) ingest resource configuration. HlsIngest *HlsIngest `locationName:"hlsIngest" type:"structure"` Id *string `locationName:"id" type:"string"` + // Configure ingress access logging. + IngressAccessLogs *IngressAccessLogs `locationName:"ingressAccessLogs" type:"structure"` + // A collection of tags associated with a resource Tags map[string]*string `locationName:"tags" type:"map"` } @@ -5601,6 +5993,12 @@ func (s *UpdateChannelOutput) SetDescription(v string) *UpdateChannelOutput { return s } +// SetEgressAccessLogs sets the EgressAccessLogs field's value. +func (s *UpdateChannelOutput) SetEgressAccessLogs(v *EgressAccessLogs) *UpdateChannelOutput { + s.EgressAccessLogs = v + return s +} + // SetHlsIngest sets the HlsIngest field's value. func (s *UpdateChannelOutput) SetHlsIngest(v *HlsIngest) *UpdateChannelOutput { s.HlsIngest = v @@ -5613,6 +6011,12 @@ func (s *UpdateChannelOutput) SetId(v string) *UpdateChannelOutput { return s } +// SetIngressAccessLogs sets the IngressAccessLogs field's value. +func (s *UpdateChannelOutput) SetIngressAccessLogs(v *IngressAccessLogs) *UpdateChannelOutput { + s.IngressAccessLogs = v + return s +} + // SetTags sets the Tags field's value. func (s *UpdateChannelOutput) SetTags(v map[string]*string) *UpdateChannelOutput { s.Tags = v @@ -5933,8 +6337,21 @@ const ( // AdMarkersPassthrough is a AdMarkers enum value AdMarkersPassthrough = "PASSTHROUGH" + + // AdMarkersDaterange is a AdMarkers enum value + AdMarkersDaterange = "DATERANGE" ) +// AdMarkers_Values returns all elements of the AdMarkers enum +func AdMarkers_Values() []string { + return []string{ + AdMarkersNone, + AdMarkersScte35Enhanced, + AdMarkersPassthrough, + AdMarkersDaterange, + } +} + // This setting allows the delivery restriction flags on SCTE-35 segmentation // descriptors todetermine whether a message signals an ad. Choosing "NONE" // means no SCTE-35 messages becomeads. Choosing "RESTRICTED" means SCTE-35 @@ -5959,6 +6376,16 @@ const ( AdsOnDeliveryRestrictionsBoth = "BOTH" ) +// AdsOnDeliveryRestrictions_Values returns all elements of the AdsOnDeliveryRestrictions enum +func AdsOnDeliveryRestrictions_Values() []string { + return []string{ + AdsOnDeliveryRestrictionsNone, + AdsOnDeliveryRestrictionsRestricted, + AdsOnDeliveryRestrictionsUnrestricted, + AdsOnDeliveryRestrictionsBoth, + } +} + const ( // EncryptionMethodAes128 is a EncryptionMethod enum value EncryptionMethodAes128 = "AES_128" @@ -5967,6 +6394,14 @@ const ( EncryptionMethodSampleAes = "SAMPLE_AES" ) +// EncryptionMethod_Values returns all elements of the EncryptionMethod enum +func EncryptionMethod_Values() []string { + return []string{ + EncryptionMethodAes128, + EncryptionMethodSampleAes, + } +} + const ( // ManifestLayoutFull is a ManifestLayout enum value ManifestLayoutFull = "FULL" @@ -5975,6 +6410,14 @@ const ( ManifestLayoutCompact = "COMPACT" ) +// ManifestLayout_Values returns all elements of the ManifestLayout enum +func ManifestLayout_Values() []string { + return []string{ + ManifestLayoutFull, + ManifestLayoutCompact, + } +} + const ( // OriginationAllow is a Origination enum value OriginationAllow = "ALLOW" @@ -5983,6 +6426,14 @@ const ( OriginationDeny = "DENY" ) +// Origination_Values returns all elements of the Origination enum +func Origination_Values() []string { + return []string{ + OriginationAllow, + OriginationDeny, + } +} + const ( // PlaylistTypeNone is a PlaylistType enum value PlaylistTypeNone = "NONE" @@ -5994,6 +6445,15 @@ const ( PlaylistTypeVod = "VOD" ) +// PlaylistType_Values returns all elements of the PlaylistType enum +func PlaylistType_Values() []string { + return []string{ + PlaylistTypeNone, + PlaylistTypeEvent, + PlaylistTypeVod, + } +} + const ( // ProfileNone is a Profile enum value ProfileNone = "NONE" @@ -6002,6 +6462,14 @@ const ( ProfileHbbtv15 = "HBBTV_1_5" ) +// Profile_Values returns all elements of the Profile enum +func Profile_Values() []string { + return []string{ + ProfileNone, + ProfileHbbtv15, + } +} + const ( // SegmentTemplateFormatNumberWithTimeline is a SegmentTemplateFormat enum value SegmentTemplateFormatNumberWithTimeline = "NUMBER_WITH_TIMELINE" @@ -6013,6 +6481,15 @@ const ( SegmentTemplateFormatNumberWithDuration = "NUMBER_WITH_DURATION" ) +// SegmentTemplateFormat_Values returns all elements of the SegmentTemplateFormat enum +func SegmentTemplateFormat_Values() []string { + return []string{ + SegmentTemplateFormatNumberWithTimeline, + SegmentTemplateFormatTimeWithTimeline, + SegmentTemplateFormatNumberWithDuration, + } +} + const ( // StatusInProgress is a Status enum value StatusInProgress = "IN_PROGRESS" @@ -6024,6 +6501,15 @@ const ( StatusFailed = "FAILED" ) +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusInProgress, + StatusSucceeded, + StatusFailed, + } +} + const ( // StreamOrderOriginal is a StreamOrder enum value StreamOrderOriginal = "ORIGINAL" @@ -6035,6 +6521,35 @@ const ( StreamOrderVideoBitrateDescending = "VIDEO_BITRATE_DESCENDING" ) +// StreamOrder_Values returns all elements of the StreamOrder enum +func StreamOrder_Values() []string { + return []string{ + StreamOrderOriginal, + StreamOrderVideoBitrateAscending, + StreamOrderVideoBitrateDescending, + } +} + +const ( + // UtcTimingNone is a UtcTiming enum value + UtcTimingNone = "NONE" + + // UtcTimingHttpHead is a UtcTiming enum value + UtcTimingHttpHead = "HTTP-HEAD" + + // UtcTimingHttpIso is a UtcTiming enum value + UtcTimingHttpIso = "HTTP-ISO" +) + +// UtcTiming_Values returns all elements of the UtcTiming enum +func UtcTiming_Values() []string { + return []string{ + UtcTimingNone, + UtcTimingHttpHead, + UtcTimingHttpIso, + } +} + const ( // __AdTriggersElementSpliceInsert is a __AdTriggersElement enum value __AdTriggersElementSpliceInsert = "SPLICE_INSERT" @@ -6061,7 +6576,28 @@ const ( __AdTriggersElementDistributorOverlayPlacementOpportunity = "DISTRIBUTOR_OVERLAY_PLACEMENT_OPPORTUNITY" ) +// __AdTriggersElement_Values returns all elements of the __AdTriggersElement enum +func __AdTriggersElement_Values() []string { + return []string{ + __AdTriggersElementSpliceInsert, + __AdTriggersElementBreak, + __AdTriggersElementProviderAdvertisement, + __AdTriggersElementDistributorAdvertisement, + __AdTriggersElementProviderPlacementOpportunity, + __AdTriggersElementDistributorPlacementOpportunity, + __AdTriggersElementProviderOverlayPlacementOpportunity, + __AdTriggersElementDistributorOverlayPlacementOpportunity, + } +} + const ( // __PeriodTriggersElementAds is a __PeriodTriggersElement enum value __PeriodTriggersElementAds = "ADS" ) + +// __PeriodTriggersElement_Values returns all elements of the __PeriodTriggersElement enum +func __PeriodTriggersElement_Values() []string { + return []string{ + __PeriodTriggersElementAds, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go index 06b65cc8b..ea0ee0a92 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go index daa0851e7..edee10620 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastore/api.go @@ -465,6 +465,98 @@ func (c *MediaStore) DeleteLifecyclePolicyWithContext(ctx aws.Context, input *De return out, req.Send() } +const opDeleteMetricPolicy = "DeleteMetricPolicy" + +// DeleteMetricPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteMetricPolicy for more information on using the DeleteMetricPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteMetricPolicyRequest method. +// req, resp := client.DeleteMetricPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/DeleteMetricPolicy +func (c *MediaStore) DeleteMetricPolicyRequest(input *DeleteMetricPolicyInput) (req *request.Request, output *DeleteMetricPolicyOutput) { + op := &request.Operation{ + Name: opDeleteMetricPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteMetricPolicyInput{} + } + + output = &DeleteMetricPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteMetricPolicy API operation for AWS Elemental MediaStore. +// +// Deletes the metric policy that is associated with the specified container. +// If there is no metric policy associated with the container, MediaStore doesn't +// send metrics to CloudWatch. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaStore's +// API operation DeleteMetricPolicy for usage and error information. +// +// Returned Error Types: +// * ContainerInUseException +// The container that you specified in the request already exists or is being +// updated. +// +// * ContainerNotFoundException +// The container that you specified in the request does not exist. +// +// * PolicyNotFoundException +// The policy that you specified in the request does not exist. +// +// * InternalServerError +// The service is temporarily unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/DeleteMetricPolicy +func (c *MediaStore) DeleteMetricPolicy(input *DeleteMetricPolicyInput) (*DeleteMetricPolicyOutput, error) { + req, out := c.DeleteMetricPolicyRequest(input) + return out, req.Send() +} + +// DeleteMetricPolicyWithContext is the same as DeleteMetricPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteMetricPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) DeleteMetricPolicyWithContext(ctx aws.Context, input *DeleteMetricPolicyInput, opts ...request.Option) (*DeleteMetricPolicyOutput, error) { + req, out := c.DeleteMetricPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeContainer = "DescribeContainer" // DescribeContainerRequest generates a "aws/request.Request" representing the @@ -826,6 +918,95 @@ func (c *MediaStore) GetLifecyclePolicyWithContext(ctx aws.Context, input *GetLi return out, req.Send() } +const opGetMetricPolicy = "GetMetricPolicy" + +// GetMetricPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetMetricPolicy for more information on using the GetMetricPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetMetricPolicyRequest method. +// req, resp := client.GetMetricPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/GetMetricPolicy +func (c *MediaStore) GetMetricPolicyRequest(input *GetMetricPolicyInput) (req *request.Request, output *GetMetricPolicyOutput) { + op := &request.Operation{ + Name: opGetMetricPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetMetricPolicyInput{} + } + + output = &GetMetricPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetMetricPolicy API operation for AWS Elemental MediaStore. +// +// Returns the metric policy for the specified container. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaStore's +// API operation GetMetricPolicy for usage and error information. +// +// Returned Error Types: +// * ContainerNotFoundException +// The container that you specified in the request does not exist. +// +// * PolicyNotFoundException +// The policy that you specified in the request does not exist. +// +// * ContainerInUseException +// The container that you specified in the request already exists or is being +// updated. +// +// * InternalServerError +// The service is temporarily unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/GetMetricPolicy +func (c *MediaStore) GetMetricPolicy(input *GetMetricPolicyInput) (*GetMetricPolicyOutput, error) { + req, out := c.GetMetricPolicyRequest(input) + return out, req.Send() +} + +// GetMetricPolicyWithContext is the same as GetMetricPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetMetricPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) GetMetricPolicyWithContext(ctx aws.Context, input *GetMetricPolicyInput, opts ...request.Option) (*GetMetricPolicyOutput, error) { + req, out := c.GetMetricPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListContainers = "ListContainers" // ListContainersRequest generates a "aws/request.Request" representing the @@ -1344,6 +1525,95 @@ func (c *MediaStore) PutLifecyclePolicyWithContext(ctx aws.Context, input *PutLi return out, req.Send() } +const opPutMetricPolicy = "PutMetricPolicy" + +// PutMetricPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutMetricPolicy for more information on using the PutMetricPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutMetricPolicyRequest method. +// req, resp := client.PutMetricPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/PutMetricPolicy +func (c *MediaStore) PutMetricPolicyRequest(input *PutMetricPolicyInput) (req *request.Request, output *PutMetricPolicyOutput) { + op := &request.Operation{ + Name: opPutMetricPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutMetricPolicyInput{} + } + + output = &PutMetricPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutMetricPolicy API operation for AWS Elemental MediaStore. +// +// The metric policy that you want to add to the container. A metric policy +// allows AWS Elemental MediaStore to send metrics to Amazon CloudWatch. It +// takes up to 20 minutes for the new policy to take effect. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Elemental MediaStore's +// API operation PutMetricPolicy for usage and error information. +// +// Returned Error Types: +// * ContainerInUseException +// The container that you specified in the request already exists or is being +// updated. +// +// * ContainerNotFoundException +// The container that you specified in the request does not exist. +// +// * InternalServerError +// The service is temporarily unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/mediastore-2017-09-01/PutMetricPolicy +func (c *MediaStore) PutMetricPolicy(input *PutMetricPolicyInput) (*PutMetricPolicyOutput, error) { + req, out := c.PutMetricPolicyRequest(input) + return out, req.Send() +} + +// PutMetricPolicyWithContext is the same as PutMetricPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutMetricPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MediaStore) PutMetricPolicyWithContext(ctx aws.Context, input *PutMetricPolicyInput, opts ...request.Option) (*PutMetricPolicyOutput, error) { + req, out := c.PutMetricPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opStartAccessLogging = "StartAccessLogging" // StartAccessLoggingRequest generates a "aws/request.Request" representing the @@ -1790,8 +2060,8 @@ func (s *Container) SetStatus(v string) *Container { // The container that you specified in the request already exists or is being // updated. type ContainerInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1808,17 +2078,17 @@ func (s ContainerInUseException) GoString() string { func newErrorContainerInUseException(v protocol.ResponseMetadata) error { return &ContainerInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ContainerInUseException) Code() string { +func (s *ContainerInUseException) Code() string { return "ContainerInUseException" } // Message returns the exception's message. -func (s ContainerInUseException) Message() string { +func (s *ContainerInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1826,28 +2096,28 @@ func (s ContainerInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ContainerInUseException) OrigErr() error { +func (s *ContainerInUseException) OrigErr() error { return nil } -func (s ContainerInUseException) Error() string { +func (s *ContainerInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ContainerInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ContainerInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ContainerInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ContainerInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The container that you specified in the request does not exist. type ContainerNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1864,17 +2134,17 @@ func (s ContainerNotFoundException) GoString() string { func newErrorContainerNotFoundException(v protocol.ResponseMetadata) error { return &ContainerNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ContainerNotFoundException) Code() string { +func (s *ContainerNotFoundException) Code() string { return "ContainerNotFoundException" } // Message returns the exception's message. -func (s ContainerNotFoundException) Message() string { +func (s *ContainerNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1882,28 +2152,28 @@ func (s ContainerNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ContainerNotFoundException) OrigErr() error { +func (s *ContainerNotFoundException) OrigErr() error { return nil } -func (s ContainerNotFoundException) Error() string { +func (s *ContainerNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ContainerNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ContainerNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ContainerNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ContainerNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The CORS policy that you specified in the request does not exist. type CorsPolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1920,17 +2190,17 @@ func (s CorsPolicyNotFoundException) GoString() string { func newErrorCorsPolicyNotFoundException(v protocol.ResponseMetadata) error { return &CorsPolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CorsPolicyNotFoundException) Code() string { +func (s *CorsPolicyNotFoundException) Code() string { return "CorsPolicyNotFoundException" } // Message returns the exception's message. -func (s CorsPolicyNotFoundException) Message() string { +func (s *CorsPolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1938,22 +2208,22 @@ func (s CorsPolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CorsPolicyNotFoundException) OrigErr() error { +func (s *CorsPolicyNotFoundException) OrigErr() error { return nil } -func (s CorsPolicyNotFoundException) Error() string { +func (s *CorsPolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CorsPolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CorsPolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CorsPolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CorsPolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A rule for a CORS policy. You can add up to 100 rules to a CORS policy. If @@ -2081,7 +2351,7 @@ type CreateContainerInput struct { // as "test," "development," or "production"). You can add up to 50 tags to // each container. For more information about tagging, including naming and // usage conventions, see Tagging Resources in MediaStore (https://docs.aws.amazon.com/mediastore/latest/ug/tagging.html). - Tags []*Tag `type:"list"` + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -2103,6 +2373,9 @@ func (s *CreateContainerInput) Validate() error { if s.ContainerName != nil && len(*s.ContainerName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ContainerName", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2391,6 +2664,62 @@ func (s DeleteLifecyclePolicyOutput) GoString() string { return s.String() } +type DeleteMetricPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the container that is associated with the metric policy that + // you want to delete. + // + // ContainerName is a required field + ContainerName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteMetricPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteMetricPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteMetricPolicyInput"} + if s.ContainerName == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerName")) + } + if s.ContainerName != nil && len(*s.ContainerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContainerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerName sets the ContainerName field's value. +func (s *DeleteMetricPolicyInput) SetContainerName(v string) *DeleteMetricPolicyInput { + s.ContainerName = &v + return s +} + +type DeleteMetricPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMetricPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMetricPolicyOutput) GoString() string { + return s.String() +} + type DescribeContainerInput struct { _ struct{} `type:"structure"` @@ -2648,10 +2977,76 @@ func (s *GetLifecyclePolicyOutput) SetLifecyclePolicy(v string) *GetLifecyclePol return s } +type GetMetricPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the container that is associated with the metric policy. + // + // ContainerName is a required field + ContainerName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetMetricPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetMetricPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetMetricPolicyInput"} + if s.ContainerName == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerName")) + } + if s.ContainerName != nil && len(*s.ContainerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContainerName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerName sets the ContainerName field's value. +func (s *GetMetricPolicyInput) SetContainerName(v string) *GetMetricPolicyInput { + s.ContainerName = &v + return s +} + +type GetMetricPolicyOutput struct { + _ struct{} `type:"structure"` + + // The metric policy that is associated with the specific container. + // + // MetricPolicy is a required field + MetricPolicy *MetricPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetMetricPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetMetricPolicyOutput) GoString() string { + return s.String() +} + +// SetMetricPolicy sets the MetricPolicy field's value. +func (s *GetMetricPolicyOutput) SetMetricPolicy(v *MetricPolicy) *GetMetricPolicyOutput { + s.MetricPolicy = v + return s +} + // The service is temporarily unavailable. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2668,17 +3063,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2686,28 +3081,28 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // A service limit has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2724,17 +3119,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2742,22 +3137,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListContainersInput struct { @@ -2893,7 +3288,7 @@ type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` // An array of key:value pairs that are assigned to the container. - Tags []*Tag `type:"list"` + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation @@ -2912,10 +3307,146 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput return s } +// The metric policy that is associated with the container. A metric policy +// allows AWS Elemental MediaStore to send metrics to Amazon CloudWatch. In +// the policy, you must indicate whether you want MediaStore to send container-level +// metrics. You can also include rules to define groups of objects that you +// want MediaStore to send object-level metrics for. +// +// To view examples of how to construct a metric policy for your use case, see +// Example Metric Policies (https://docs.aws.amazon.com/mediastore/latest/ug/policies-metric-examples.html). +type MetricPolicy struct { + _ struct{} `type:"structure"` + + // A setting to enable or disable metrics at the container level. + // + // ContainerLevelMetrics is a required field + ContainerLevelMetrics *string `type:"string" required:"true" enum:"ContainerLevelMetrics"` + + // A parameter that holds an array of rules that enable metrics at the object + // level. This parameter is optional, but if you choose to include it, you must + // also include at least one rule. By default, you can include up to five rules. + // You can also request a quota increase (https://console.aws.amazon.com/servicequotas/home?region=us-east-1#!/services/mediastore/quotas) + // to allow up to 300 rules per policy. + MetricPolicyRules []*MetricPolicyRule `min:"1" type:"list"` +} + +// String returns the string representation +func (s MetricPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricPolicy"} + if s.ContainerLevelMetrics == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerLevelMetrics")) + } + if s.MetricPolicyRules != nil && len(s.MetricPolicyRules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricPolicyRules", 1)) + } + if s.MetricPolicyRules != nil { + for i, v := range s.MetricPolicyRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricPolicyRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerLevelMetrics sets the ContainerLevelMetrics field's value. +func (s *MetricPolicy) SetContainerLevelMetrics(v string) *MetricPolicy { + s.ContainerLevelMetrics = &v + return s +} + +// SetMetricPolicyRules sets the MetricPolicyRules field's value. +func (s *MetricPolicy) SetMetricPolicyRules(v []*MetricPolicyRule) *MetricPolicy { + s.MetricPolicyRules = v + return s +} + +// A setting that enables metrics at the object level. Each rule contains an +// object group and an object group name. If the policy includes the MetricPolicyRules +// parameter, you must include at least one rule. Each metric policy can include +// up to five rules by default. You can also request a quota increase (https://console.aws.amazon.com/servicequotas/home?region=us-east-1#!/services/mediastore/quotas) +// to allow up to 300 rules per policy. +type MetricPolicyRule struct { + _ struct{} `type:"structure"` + + // A path or file name that defines which objects to include in the group. Wildcards + // (*) are acceptable. + // + // ObjectGroup is a required field + ObjectGroup *string `min:"1" type:"string" required:"true"` + + // A name that allows you to refer to the object group. + // + // ObjectGroupName is a required field + ObjectGroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricPolicyRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricPolicyRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricPolicyRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricPolicyRule"} + if s.ObjectGroup == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectGroup")) + } + if s.ObjectGroup != nil && len(*s.ObjectGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ObjectGroup", 1)) + } + if s.ObjectGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectGroupName")) + } + if s.ObjectGroupName != nil && len(*s.ObjectGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ObjectGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectGroup sets the ObjectGroup field's value. +func (s *MetricPolicyRule) SetObjectGroup(v string) *MetricPolicyRule { + s.ObjectGroup = &v + return s +} + +// SetObjectGroupName sets the ObjectGroupName field's value. +func (s *MetricPolicyRule) SetObjectGroupName(v string) *MetricPolicyRule { + s.ObjectGroupName = &v + return s +} + // The policy that you specified in the request does not exist. type PolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2932,17 +3463,17 @@ func (s PolicyNotFoundException) GoString() string { func newErrorPolicyNotFoundException(v protocol.ResponseMetadata) error { return &PolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyNotFoundException) Code() string { +func (s *PolicyNotFoundException) Code() string { return "PolicyNotFoundException" } // Message returns the exception's message. -func (s PolicyNotFoundException) Message() string { +func (s *PolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2950,22 +3481,22 @@ func (s PolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyNotFoundException) OrigErr() error { +func (s *PolicyNotFoundException) OrigErr() error { return nil } -func (s PolicyNotFoundException) Error() string { +func (s *PolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type PutContainerPolicyInput struct { @@ -3196,6 +3727,94 @@ func (s PutLifecyclePolicyOutput) GoString() string { return s.String() } +type PutMetricPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the container that you want to add the metric policy to. + // + // ContainerName is a required field + ContainerName *string `min:"1" type:"string" required:"true"` + + // The metric policy that you want to associate with the container. In the policy, + // you must indicate whether you want MediaStore to send container-level metrics. + // You can also include up to five rules to define groups of objects that you + // want MediaStore to send object-level metrics for. If you include rules in + // the policy, construct each rule with both of the following: + // + // * An object group that defines which objects to include in the group. + // The definition can be a path or a file name, but it can't have more than + // 900 characters. Valid characters are: a-z, A-Z, 0-9, _ (underscore), = + // (equal), : (colon), . (period), - (hyphen), ~ (tilde), / (forward slash), + // and * (asterisk). Wildcards (*) are acceptable. + // + // * An object group name that allows you to refer to the object group. The + // name can't have more than 30 characters. Valid characters are: a-z, A-Z, + // 0-9, and _ (underscore). + // + // MetricPolicy is a required field + MetricPolicy *MetricPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutMetricPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutMetricPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutMetricPolicyInput"} + if s.ContainerName == nil { + invalidParams.Add(request.NewErrParamRequired("ContainerName")) + } + if s.ContainerName != nil && len(*s.ContainerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContainerName", 1)) + } + if s.MetricPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("MetricPolicy")) + } + if s.MetricPolicy != nil { + if err := s.MetricPolicy.Validate(); err != nil { + invalidParams.AddNested("MetricPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContainerName sets the ContainerName field's value. +func (s *PutMetricPolicyInput) SetContainerName(v string) *PutMetricPolicyInput { + s.ContainerName = &v + return s +} + +// SetMetricPolicy sets the MetricPolicy field's value. +func (s *PutMetricPolicyInput) SetMetricPolicy(v *MetricPolicy) *PutMetricPolicyInput { + s.MetricPolicy = v + return s +} + +type PutMetricPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutMetricPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutMetricPolicyOutput) GoString() string { + return s.String() +} + type StartAccessLoggingInput struct { _ struct{} `type:"structure"` @@ -3383,7 +4002,7 @@ type TagResourceInput struct { // and type:Contract. // // Tags is a required field - Tags []*Tag `type:"list" required:"true"` + Tags []*Tag `min:"1" type:"list" required:"true"` } // String returns the string representation @@ -3408,6 +4027,9 @@ func (s *TagResourceInput) Validate() error { if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -3523,6 +4145,22 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +const ( + // ContainerLevelMetricsEnabled is a ContainerLevelMetrics enum value + ContainerLevelMetricsEnabled = "ENABLED" + + // ContainerLevelMetricsDisabled is a ContainerLevelMetrics enum value + ContainerLevelMetricsDisabled = "DISABLED" +) + +// ContainerLevelMetrics_Values returns all elements of the ContainerLevelMetrics enum +func ContainerLevelMetrics_Values() []string { + return []string{ + ContainerLevelMetricsEnabled, + ContainerLevelMetricsDisabled, + } +} + const ( // ContainerStatusActive is a ContainerStatus enum value ContainerStatusActive = "ACTIVE" @@ -3534,6 +4172,15 @@ const ( ContainerStatusDeleting = "DELETING" ) +// ContainerStatus_Values returns all elements of the ContainerStatus enum +func ContainerStatus_Values() []string { + return []string{ + ContainerStatusActive, + ContainerStatusCreating, + ContainerStatusDeleting, + } +} + const ( // MethodNamePut is a MethodName enum value MethodNamePut = "PUT" @@ -3547,3 +4194,13 @@ const ( // MethodNameHead is a MethodName enum value MethodNameHead = "HEAD" ) + +// MethodName_Values returns all elements of the MethodName enum +func MethodName_Values() []string { + return []string{ + MethodNamePut, + MethodNameGet, + MethodNameDelete, + MethodNameHead, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go index 99f51b4c5..98d1be28c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go index 0435f0ff6..e7f45d809 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/api.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) @@ -505,8 +505,8 @@ func (c *MediaStoreData) PutObjectWithContext(ctx aws.Context, input *PutObjectI // The specified container was not found for the specified account. type ContainerNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -523,17 +523,17 @@ func (s ContainerNotFoundException) GoString() string { func newErrorContainerNotFoundException(v protocol.ResponseMetadata) error { return &ContainerNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ContainerNotFoundException) Code() string { +func (s *ContainerNotFoundException) Code() string { return "ContainerNotFoundException" } // Message returns the exception's message. -func (s ContainerNotFoundException) Message() string { +func (s *ContainerNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -541,22 +541,22 @@ func (s ContainerNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ContainerNotFoundException) OrigErr() error { +func (s *ContainerNotFoundException) OrigErr() error { return nil } -func (s ContainerNotFoundException) Error() string { +func (s *ContainerNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ContainerNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ContainerNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ContainerNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ContainerNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type DeleteObjectInput struct { @@ -892,8 +892,8 @@ func (s *GetObjectOutput) SetStatusCode(v int64) *GetObjectOutput { // The service is temporarily unavailable. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -910,17 +910,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -928,22 +928,22 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // A metadata entry for a folder or object. @@ -1122,8 +1122,8 @@ func (s *ListItemsOutput) SetNextToken(v string) *ListItemsOutput { // Could not perform an operation on an object that does not exist. type ObjectNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1140,17 +1140,17 @@ func (s ObjectNotFoundException) GoString() string { func newErrorObjectNotFoundException(v protocol.ResponseMetadata) error { return &ObjectNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ObjectNotFoundException) Code() string { +func (s *ObjectNotFoundException) Code() string { return "ObjectNotFoundException" } // Message returns the exception's message. -func (s ObjectNotFoundException) Message() string { +func (s *ObjectNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1158,22 +1158,22 @@ func (s ObjectNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ObjectNotFoundException) OrigErr() error { +func (s *ObjectNotFoundException) OrigErr() error { return nil } -func (s ObjectNotFoundException) Error() string { +func (s *ObjectNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ObjectNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ObjectNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ObjectNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ObjectNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type PutObjectInput struct { @@ -1356,8 +1356,8 @@ func (s *PutObjectOutput) SetStorageClass(v string) *PutObjectOutput { // The requested content range is not valid. type RequestedRangeNotSatisfiableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1374,17 +1374,17 @@ func (s RequestedRangeNotSatisfiableException) GoString() string { func newErrorRequestedRangeNotSatisfiableException(v protocol.ResponseMetadata) error { return &RequestedRangeNotSatisfiableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RequestedRangeNotSatisfiableException) Code() string { +func (s *RequestedRangeNotSatisfiableException) Code() string { return "RequestedRangeNotSatisfiableException" } // Message returns the exception's message. -func (s RequestedRangeNotSatisfiableException) Message() string { +func (s *RequestedRangeNotSatisfiableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1392,22 +1392,22 @@ func (s RequestedRangeNotSatisfiableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RequestedRangeNotSatisfiableException) OrigErr() error { +func (s *RequestedRangeNotSatisfiableException) OrigErr() error { return nil } -func (s RequestedRangeNotSatisfiableException) Error() string { +func (s *RequestedRangeNotSatisfiableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RequestedRangeNotSatisfiableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RequestedRangeNotSatisfiableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RequestedRangeNotSatisfiableException) RequestID() string { - return s.respMetadata.RequestID +func (s *RequestedRangeNotSatisfiableException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -1418,11 +1418,26 @@ const ( ItemTypeFolder = "FOLDER" ) +// ItemType_Values returns all elements of the ItemType enum +func ItemType_Values() []string { + return []string{ + ItemTypeObject, + ItemTypeFolder, + } +} + const ( // StorageClassTemporal is a StorageClass enum value StorageClassTemporal = "TEMPORAL" ) +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassTemporal, + } +} + const ( // UploadAvailabilityStandard is a UploadAvailability enum value UploadAvailabilityStandard = "STANDARD" @@ -1430,3 +1445,11 @@ const ( // UploadAvailabilityStreaming is a UploadAvailability enum value UploadAvailabilityStreaming = "STREAMING" ) + +// UploadAvailability_Values returns all elements of the UploadAvailability enum +func UploadAvailability_Values() []string { + return []string{ + UploadAvailabilityStandard, + UploadAvailabilityStreaming, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go index ef2f9e00c..48d7a9cbc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/mq/api.go b/vendor/github.com/aws/aws-sdk-go/service/mq/api.go index 9aabceea0..1b6463086 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mq/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mq/api.go @@ -1193,6 +1193,12 @@ func (c *MQ) ListBrokersRequest(input *ListBrokersInput) (req *request.Request, Name: opListBrokers, HTTPMethod: "GET", HTTPPath: "/v1/brokers", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1247,6 +1253,58 @@ func (c *MQ) ListBrokersWithContext(ctx aws.Context, input *ListBrokersInput, op return out, req.Send() } +// ListBrokersPages iterates over the pages of a ListBrokers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBrokers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBrokers operation. +// pageNum := 0 +// err := client.ListBrokersPages(params, +// func(page *mq.ListBrokersResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *MQ) ListBrokersPages(input *ListBrokersInput, fn func(*ListBrokersResponse, bool) bool) error { + return c.ListBrokersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListBrokersPagesWithContext same as ListBrokersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *MQ) ListBrokersPagesWithContext(ctx aws.Context, input *ListBrokersInput, fn func(*ListBrokersResponse, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListBrokersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListBrokersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListBrokersResponse), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListConfigurationRevisions = "ListConfigurationRevisions" // ListConfigurationRevisionsRequest generates a "aws/request.Request" representing the @@ -1985,8 +2043,8 @@ func (s *AvailabilityZone) SetName(v string) *AvailabilityZone { // Returns information about an error. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorAttribute *string `locationName:"errorAttribute" type:"string"` @@ -2005,17 +2063,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2023,22 +2081,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Types of broker engines. @@ -2273,6 +2331,9 @@ type Configuration struct { // Required. The ARN of the configuration. Arn *string `locationName:"arn" type:"string"` + // The authentication strategy associated with the configuration. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + // Required. The date and time of the configuration revision. Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"iso8601"` @@ -2318,6 +2379,12 @@ func (s *Configuration) SetArn(v string) *Configuration { return s } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *Configuration) SetAuthenticationStrategy(v string) *Configuration { + s.AuthenticationStrategy = &v + return s +} + // SetCreated sets the Created field's value. func (s *Configuration) SetCreated(v time.Time) *Configuration { s.Created = &v @@ -2485,8 +2552,8 @@ func (s *Configurations) SetPending(v *ConfigurationId) *Configurations { // Returns information about an error. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorAttribute *string `locationName:"errorAttribute" type:"string"` @@ -2505,17 +2572,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2523,27 +2590,30 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } type CreateBrokerRequest struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` BrokerName *string `locationName:"brokerName" type:"string"` @@ -2566,6 +2636,10 @@ type CreateBrokerRequest struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataInput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` @@ -2612,6 +2686,12 @@ func (s *CreateBrokerRequest) Validate() error { return nil } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *CreateBrokerRequest) SetAuthenticationStrategy(v string) *CreateBrokerRequest { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *CreateBrokerRequest) SetAutoMinorVersionUpgrade(v bool) *CreateBrokerRequest { s.AutoMinorVersionUpgrade = &v @@ -2666,6 +2746,12 @@ func (s *CreateBrokerRequest) SetHostInstanceType(v string) *CreateBrokerRequest return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *CreateBrokerRequest) SetLdapServerMetadata(v *LdapServerMetadataInput) *CreateBrokerRequest { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *CreateBrokerRequest) SetLogs(v *Logs) *CreateBrokerRequest { s.Logs = v @@ -2747,6 +2833,9 @@ func (s *CreateBrokerResponse) SetBrokerId(v string) *CreateBrokerResponse { type CreateConfigurationRequest struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + // The type of broker engine. Note: Currently, Amazon MQ supports only ActiveMQ. EngineType *string `locationName:"engineType" type:"string" enum:"EngineType"` @@ -2767,6 +2856,12 @@ func (s CreateConfigurationRequest) GoString() string { return s.String() } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *CreateConfigurationRequest) SetAuthenticationStrategy(v string) *CreateConfigurationRequest { + s.AuthenticationStrategy = &v + return s +} + // SetEngineType sets the EngineType field's value. func (s *CreateConfigurationRequest) SetEngineType(v string) *CreateConfigurationRequest { s.EngineType = &v @@ -2796,6 +2891,9 @@ type CreateConfigurationResponse struct { Arn *string `locationName:"arn" type:"string"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"iso8601"` Id *string `locationName:"id" type:"string"` @@ -2822,6 +2920,12 @@ func (s *CreateConfigurationResponse) SetArn(v string) *CreateConfigurationRespo return s } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *CreateConfigurationResponse) SetAuthenticationStrategy(v string) *CreateConfigurationResponse { + s.AuthenticationStrategy = &v + return s +} + // SetCreated sets the Created field's value. func (s *CreateConfigurationResponse) SetCreated(v time.Time) *CreateConfigurationResponse { s.Created = &v @@ -3429,6 +3533,9 @@ func (s *DescribeBrokerInstanceOptionsOutput) SetNextToken(v string) *DescribeBr type DescribeBrokerResponse struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` BrokerArn *string `locationName:"brokerArn" type:"string"` @@ -3460,6 +3567,10 @@ type DescribeBrokerResponse struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataOutput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs currently enabled and pending to be deployed // for the specified broker. Logs *LogsSummary `locationName:"logs" type:"structure"` @@ -3468,10 +3579,17 @@ type DescribeBrokerResponse struct { // apply pending updates or patches to the broker. MaintenanceWindowStartTime *WeeklyStartTime `locationName:"maintenanceWindowStartTime" type:"structure"` + // The authentication strategy used to secure the broker. + PendingAuthenticationStrategy *string `locationName:"pendingAuthenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + PendingEngineVersion *string `locationName:"pendingEngineVersion" type:"string"` PendingHostInstanceType *string `locationName:"pendingHostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + PendingLdapServerMetadata *LdapServerMetadataOutput `locationName:"pendingLdapServerMetadata" type:"structure"` + PendingSecurityGroups []*string `locationName:"pendingSecurityGroups" type:"list"` PubliclyAccessible *bool `locationName:"publiclyAccessible" type:"boolean"` @@ -3498,6 +3616,12 @@ func (s DescribeBrokerResponse) GoString() string { return s.String() } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *DescribeBrokerResponse) SetAuthenticationStrategy(v string) *DescribeBrokerResponse { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *DescribeBrokerResponse) SetAutoMinorVersionUpgrade(v bool) *DescribeBrokerResponse { s.AutoMinorVersionUpgrade = &v @@ -3576,6 +3700,12 @@ func (s *DescribeBrokerResponse) SetHostInstanceType(v string) *DescribeBrokerRe return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *DescribeBrokerResponse) SetLdapServerMetadata(v *LdapServerMetadataOutput) *DescribeBrokerResponse { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *DescribeBrokerResponse) SetLogs(v *LogsSummary) *DescribeBrokerResponse { s.Logs = v @@ -3588,6 +3718,12 @@ func (s *DescribeBrokerResponse) SetMaintenanceWindowStartTime(v *WeeklyStartTim return s } +// SetPendingAuthenticationStrategy sets the PendingAuthenticationStrategy field's value. +func (s *DescribeBrokerResponse) SetPendingAuthenticationStrategy(v string) *DescribeBrokerResponse { + s.PendingAuthenticationStrategy = &v + return s +} + // SetPendingEngineVersion sets the PendingEngineVersion field's value. func (s *DescribeBrokerResponse) SetPendingEngineVersion(v string) *DescribeBrokerResponse { s.PendingEngineVersion = &v @@ -3600,6 +3736,12 @@ func (s *DescribeBrokerResponse) SetPendingHostInstanceType(v string) *DescribeB return s } +// SetPendingLdapServerMetadata sets the PendingLdapServerMetadata field's value. +func (s *DescribeBrokerResponse) SetPendingLdapServerMetadata(v *LdapServerMetadataOutput) *DescribeBrokerResponse { + s.PendingLdapServerMetadata = v + return s +} + // SetPendingSecurityGroups sets the PendingSecurityGroups field's value. func (s *DescribeBrokerResponse) SetPendingSecurityGroups(v []*string) *DescribeBrokerResponse { s.PendingSecurityGroups = v @@ -3686,6 +3828,9 @@ type DescribeConfigurationOutput struct { Arn *string `locationName:"arn" type:"string"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + Created *time.Time `locationName:"created" type:"timestamp" timestampFormat:"iso8601"` Description *string `locationName:"description" type:"string"` @@ -3721,6 +3866,12 @@ func (s *DescribeConfigurationOutput) SetArn(v string) *DescribeConfigurationOut return s } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *DescribeConfigurationOutput) SetAuthenticationStrategy(v string) *DescribeConfigurationOutput { + s.AuthenticationStrategy = &v + return s +} + // SetCreated sets the Created field's value. func (s *DescribeConfigurationOutput) SetCreated(v time.Time) *DescribeConfigurationOutput { s.Created = &v @@ -3983,9 +4134,9 @@ func (s *DescribeUserResponse) SetUsername(v string) *DescribeUserResponse { type EncryptionOptions struct { _ struct{} `type:"structure"` - // The customer master key (CMK) to use for the AWS Key Management Service (KMS). - // This key is used to encrypt your data at rest. If not provided, Amazon MQ - // will use a default CMK to encrypt your data. + // The symmetric customer master key (CMK) to use for the AWS Key Management + // Service (KMS). This key is used to encrypt your data at rest. If not provided, + // Amazon MQ will use a default CMK to encrypt your data. KmsKeyId *string `locationName:"kmsKeyId" type:"string"` // Enables the use of an AWS owned CMK using AWS Key Management Service (KMS). @@ -4055,8 +4206,8 @@ func (s *EngineVersion) SetName(v string) *EngineVersion { // Returns information about an error. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorAttribute *string `locationName:"errorAttribute" type:"string"` @@ -4075,17 +4226,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4093,28 +4244,28 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } // Returns information about an error. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorAttribute *string `locationName:"errorAttribute" type:"string"` @@ -4133,17 +4284,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4151,22 +4302,249 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The metadata of the LDAP server used to authenticate and authorize connections +// to the broker. +type LdapServerMetadataInput struct { + _ struct{} `type:"structure"` + + // Fully qualified domain name of the LDAP server. Optional failover server. + Hosts []*string `locationName:"hosts" type:"list"` + + // Fully qualified name of the directory to search for a user’s groups. + RoleBase *string `locationName:"roleBase" type:"string"` + + // Specifies the LDAP attribute that identifies the group name attribute in + // the object returned from the group membership query. + RoleName *string `locationName:"roleName" type:"string"` + + // The search criteria for groups. + RoleSearchMatching *string `locationName:"roleSearchMatching" type:"string"` + + // The directory search scope for the role. If set to true, scope is to search + // the entire sub-tree. + RoleSearchSubtree *bool `locationName:"roleSearchSubtree" type:"boolean"` + + // Service account password. + ServiceAccountPassword *string `locationName:"serviceAccountPassword" type:"string"` + + // Service account username. + ServiceAccountUsername *string `locationName:"serviceAccountUsername" type:"string"` + + // Fully qualified name of the directory where you want to search for users. + UserBase *string `locationName:"userBase" type:"string"` + + // Specifies the name of the LDAP attribute for the user group membership. + UserRoleName *string `locationName:"userRoleName" type:"string"` + + // The search criteria for users. + UserSearchMatching *string `locationName:"userSearchMatching" type:"string"` + + // The directory search scope for the user. If set to true, scope is to search + // the entire sub-tree. + UserSearchSubtree *bool `locationName:"userSearchSubtree" type:"boolean"` +} + +// String returns the string representation +func (s LdapServerMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LdapServerMetadataInput) GoString() string { + return s.String() +} + +// SetHosts sets the Hosts field's value. +func (s *LdapServerMetadataInput) SetHosts(v []*string) *LdapServerMetadataInput { + s.Hosts = v + return s +} + +// SetRoleBase sets the RoleBase field's value. +func (s *LdapServerMetadataInput) SetRoleBase(v string) *LdapServerMetadataInput { + s.RoleBase = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *LdapServerMetadataInput) SetRoleName(v string) *LdapServerMetadataInput { + s.RoleName = &v + return s +} + +// SetRoleSearchMatching sets the RoleSearchMatching field's value. +func (s *LdapServerMetadataInput) SetRoleSearchMatching(v string) *LdapServerMetadataInput { + s.RoleSearchMatching = &v + return s +} + +// SetRoleSearchSubtree sets the RoleSearchSubtree field's value. +func (s *LdapServerMetadataInput) SetRoleSearchSubtree(v bool) *LdapServerMetadataInput { + s.RoleSearchSubtree = &v + return s +} + +// SetServiceAccountPassword sets the ServiceAccountPassword field's value. +func (s *LdapServerMetadataInput) SetServiceAccountPassword(v string) *LdapServerMetadataInput { + s.ServiceAccountPassword = &v + return s +} + +// SetServiceAccountUsername sets the ServiceAccountUsername field's value. +func (s *LdapServerMetadataInput) SetServiceAccountUsername(v string) *LdapServerMetadataInput { + s.ServiceAccountUsername = &v + return s +} + +// SetUserBase sets the UserBase field's value. +func (s *LdapServerMetadataInput) SetUserBase(v string) *LdapServerMetadataInput { + s.UserBase = &v + return s +} + +// SetUserRoleName sets the UserRoleName field's value. +func (s *LdapServerMetadataInput) SetUserRoleName(v string) *LdapServerMetadataInput { + s.UserRoleName = &v + return s +} + +// SetUserSearchMatching sets the UserSearchMatching field's value. +func (s *LdapServerMetadataInput) SetUserSearchMatching(v string) *LdapServerMetadataInput { + s.UserSearchMatching = &v + return s +} + +// SetUserSearchSubtree sets the UserSearchSubtree field's value. +func (s *LdapServerMetadataInput) SetUserSearchSubtree(v bool) *LdapServerMetadataInput { + s.UserSearchSubtree = &v + return s +} + +// The metadata of the LDAP server used to authenticate and authorize connections +// to the broker. +type LdapServerMetadataOutput struct { + _ struct{} `type:"structure"` + + // Fully qualified domain name of the LDAP server. Optional failover server. + Hosts []*string `locationName:"hosts" type:"list"` + + // Fully qualified name of the directory to search for a user’s groups. + RoleBase *string `locationName:"roleBase" type:"string"` + + // Specifies the LDAP attribute that identifies the group name attribute in + // the object returned from the group membership query. + RoleName *string `locationName:"roleName" type:"string"` + + // The search criteria for groups. + RoleSearchMatching *string `locationName:"roleSearchMatching" type:"string"` + + // The directory search scope for the role. If set to true, scope is to search + // the entire sub-tree. + RoleSearchSubtree *bool `locationName:"roleSearchSubtree" type:"boolean"` + + // Service account username. + ServiceAccountUsername *string `locationName:"serviceAccountUsername" type:"string"` + + // Fully qualified name of the directory where you want to search for users. + UserBase *string `locationName:"userBase" type:"string"` + + // Specifies the name of the LDAP attribute for the user group membership. + UserRoleName *string `locationName:"userRoleName" type:"string"` + + // The search criteria for users. + UserSearchMatching *string `locationName:"userSearchMatching" type:"string"` + + // The directory search scope for the user. If set to true, scope is to search + // the entire sub-tree. + UserSearchSubtree *bool `locationName:"userSearchSubtree" type:"boolean"` +} + +// String returns the string representation +func (s LdapServerMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LdapServerMetadataOutput) GoString() string { + return s.String() +} + +// SetHosts sets the Hosts field's value. +func (s *LdapServerMetadataOutput) SetHosts(v []*string) *LdapServerMetadataOutput { + s.Hosts = v + return s +} + +// SetRoleBase sets the RoleBase field's value. +func (s *LdapServerMetadataOutput) SetRoleBase(v string) *LdapServerMetadataOutput { + s.RoleBase = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *LdapServerMetadataOutput) SetRoleName(v string) *LdapServerMetadataOutput { + s.RoleName = &v + return s +} + +// SetRoleSearchMatching sets the RoleSearchMatching field's value. +func (s *LdapServerMetadataOutput) SetRoleSearchMatching(v string) *LdapServerMetadataOutput { + s.RoleSearchMatching = &v + return s +} + +// SetRoleSearchSubtree sets the RoleSearchSubtree field's value. +func (s *LdapServerMetadataOutput) SetRoleSearchSubtree(v bool) *LdapServerMetadataOutput { + s.RoleSearchSubtree = &v + return s +} + +// SetServiceAccountUsername sets the ServiceAccountUsername field's value. +func (s *LdapServerMetadataOutput) SetServiceAccountUsername(v string) *LdapServerMetadataOutput { + s.ServiceAccountUsername = &v + return s +} + +// SetUserBase sets the UserBase field's value. +func (s *LdapServerMetadataOutput) SetUserBase(v string) *LdapServerMetadataOutput { + s.UserBase = &v + return s +} + +// SetUserRoleName sets the UserRoleName field's value. +func (s *LdapServerMetadataOutput) SetUserRoleName(v string) *LdapServerMetadataOutput { + s.UserRoleName = &v + return s +} + +// SetUserSearchMatching sets the UserSearchMatching field's value. +func (s *LdapServerMetadataOutput) SetUserSearchMatching(v string) *LdapServerMetadataOutput { + s.UserSearchMatching = &v + return s +} + +// SetUserSearchSubtree sets the UserSearchSubtree field's value. +func (s *LdapServerMetadataOutput) SetUserSearchSubtree(v bool) *LdapServerMetadataOutput { + s.UserSearchSubtree = &v + return s } type ListBrokersInput struct { @@ -4691,8 +5069,8 @@ func (s *LogsSummary) SetPending(v *PendingLogs) *LogsSummary { // Returns information about an error. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorAttribute *string `locationName:"errorAttribute" type:"string"` @@ -4711,17 +5089,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4729,22 +5107,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The list of information about logs to be enabled for the specified broker. @@ -4879,8 +5257,8 @@ func (s *SanitizationWarning) SetReason(v string) *SanitizationWarning { // Returns information about an error. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` ErrorAttribute *string `locationName:"errorAttribute" type:"string"` @@ -4899,17 +5277,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4917,27 +5295,30 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID } type UpdateBrokerRequest struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` // BrokerId is a required field @@ -4950,6 +5331,10 @@ type UpdateBrokerRequest struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataInput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` @@ -4982,6 +5367,12 @@ func (s *UpdateBrokerRequest) Validate() error { return nil } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *UpdateBrokerRequest) SetAuthenticationStrategy(v string) *UpdateBrokerRequest { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *UpdateBrokerRequest) SetAutoMinorVersionUpgrade(v bool) *UpdateBrokerRequest { s.AutoMinorVersionUpgrade = &v @@ -5012,6 +5403,12 @@ func (s *UpdateBrokerRequest) SetHostInstanceType(v string) *UpdateBrokerRequest return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *UpdateBrokerRequest) SetLdapServerMetadata(v *LdapServerMetadataInput) *UpdateBrokerRequest { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *UpdateBrokerRequest) SetLogs(v *Logs) *UpdateBrokerRequest { s.Logs = v @@ -5027,6 +5424,9 @@ func (s *UpdateBrokerRequest) SetSecurityGroups(v []*string) *UpdateBrokerReques type UpdateBrokerResponse struct { _ struct{} `type:"structure"` + // The authentication strategy used to secure the broker. + AuthenticationStrategy *string `locationName:"authenticationStrategy" type:"string" enum:"AuthenticationStrategy"` + AutoMinorVersionUpgrade *bool `locationName:"autoMinorVersionUpgrade" type:"boolean"` BrokerId *string `locationName:"brokerId" type:"string"` @@ -5038,6 +5438,10 @@ type UpdateBrokerResponse struct { HostInstanceType *string `locationName:"hostInstanceType" type:"string"` + // The metadata of the LDAP server used to authenticate and authorize connections + // to the broker. + LdapServerMetadata *LdapServerMetadataOutput `locationName:"ldapServerMetadata" type:"structure"` + // The list of information about logs to be enabled for the specified broker. Logs *Logs `locationName:"logs" type:"structure"` @@ -5054,6 +5458,12 @@ func (s UpdateBrokerResponse) GoString() string { return s.String() } +// SetAuthenticationStrategy sets the AuthenticationStrategy field's value. +func (s *UpdateBrokerResponse) SetAuthenticationStrategy(v string) *UpdateBrokerResponse { + s.AuthenticationStrategy = &v + return s +} + // SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. func (s *UpdateBrokerResponse) SetAutoMinorVersionUpgrade(v bool) *UpdateBrokerResponse { s.AutoMinorVersionUpgrade = &v @@ -5084,6 +5494,12 @@ func (s *UpdateBrokerResponse) SetHostInstanceType(v string) *UpdateBrokerRespon return s } +// SetLdapServerMetadata sets the LdapServerMetadata field's value. +func (s *UpdateBrokerResponse) SetLdapServerMetadata(v *LdapServerMetadataOutput) *UpdateBrokerResponse { + s.LdapServerMetadata = v + return s +} + // SetLogs sets the Logs field's value. func (s *UpdateBrokerResponse) SetLogs(v *Logs) *UpdateBrokerResponse { s.Logs = v @@ -5487,6 +5903,23 @@ func (s *WeeklyStartTime) SetTimeZone(v string) *WeeklyStartTime { return s } +// The authentication strategy used to secure the broker. +const ( + // AuthenticationStrategySimple is a AuthenticationStrategy enum value + AuthenticationStrategySimple = "SIMPLE" + + // AuthenticationStrategyLdap is a AuthenticationStrategy enum value + AuthenticationStrategyLdap = "LDAP" +) + +// AuthenticationStrategy_Values returns all elements of the AuthenticationStrategy enum +func AuthenticationStrategy_Values() []string { + return []string{ + AuthenticationStrategySimple, + AuthenticationStrategyLdap, + } +} + // The status of the broker. const ( // BrokerStateCreationInProgress is a BrokerState enum value @@ -5505,6 +5938,17 @@ const ( BrokerStateRebootInProgress = "REBOOT_IN_PROGRESS" ) +// BrokerState_Values returns all elements of the BrokerState enum +func BrokerState_Values() []string { + return []string{ + BrokerStateCreationInProgress, + BrokerStateCreationFailed, + BrokerStateDeletionInProgress, + BrokerStateRunning, + BrokerStateRebootInProgress, + } +} + // The storage type of the broker. const ( // BrokerStorageTypeEbs is a BrokerStorageType enum value @@ -5514,6 +5958,14 @@ const ( BrokerStorageTypeEfs = "EFS" ) +// BrokerStorageType_Values returns all elements of the BrokerStorageType enum +func BrokerStorageType_Values() []string { + return []string{ + BrokerStorageTypeEbs, + BrokerStorageTypeEfs, + } +} + // The type of change pending for the ActiveMQ user. const ( // ChangeTypeCreate is a ChangeType enum value @@ -5526,6 +5978,15 @@ const ( ChangeTypeDelete = "DELETE" ) +// ChangeType_Values returns all elements of the ChangeType enum +func ChangeType_Values() []string { + return []string{ + ChangeTypeCreate, + ChangeTypeUpdate, + ChangeTypeDelete, + } +} + const ( // DayOfWeekMonday is a DayOfWeek enum value DayOfWeekMonday = "MONDAY" @@ -5549,6 +6010,19 @@ const ( DayOfWeekSunday = "SUNDAY" ) +// DayOfWeek_Values returns all elements of the DayOfWeek enum +func DayOfWeek_Values() []string { + return []string{ + DayOfWeekMonday, + DayOfWeekTuesday, + DayOfWeekWednesday, + DayOfWeekThursday, + DayOfWeekFriday, + DayOfWeekSaturday, + DayOfWeekSunday, + } +} + // The deployment mode of the broker. const ( // DeploymentModeSingleInstance is a DeploymentMode enum value @@ -5558,12 +6032,27 @@ const ( DeploymentModeActiveStandbyMultiAz = "ACTIVE_STANDBY_MULTI_AZ" ) +// DeploymentMode_Values returns all elements of the DeploymentMode enum +func DeploymentMode_Values() []string { + return []string{ + DeploymentModeSingleInstance, + DeploymentModeActiveStandbyMultiAz, + } +} + // The type of broker engine. Note: Currently, Amazon MQ supports only ActiveMQ. const ( // EngineTypeActivemq is a EngineType enum value EngineTypeActivemq = "ACTIVEMQ" ) +// EngineType_Values returns all elements of the EngineType enum +func EngineType_Values() []string { + return []string{ + EngineTypeActivemq, + } +} + // The reason for which the XML elements or attributes were sanitized. const ( // SanitizationWarningReasonDisallowedElementRemoved is a SanitizationWarningReason enum value @@ -5575,3 +6064,12 @@ const ( // SanitizationWarningReasonInvalidAttributeValueRemoved is a SanitizationWarningReason enum value SanitizationWarningReasonInvalidAttributeValueRemoved = "INVALID_ATTRIBUTE_VALUE_REMOVED" ) + +// SanitizationWarningReason_Values returns all elements of the SanitizationWarningReason enum +func SanitizationWarningReason_Values() []string { + return []string{ + SanitizationWarningReasonDisallowedElementRemoved, + SanitizationWarningReasonDisallowedAttributeRemoved, + SanitizationWarningReasonInvalidAttributeValueRemoved, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/mq/service.go b/vendor/github.com/aws/aws-sdk-go/service/mq/service.go index 05d784ddf..d6823d360 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mq/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mq/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go b/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go index 938839b0c..b764c8295 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/neptune/api.go @@ -16953,6 +16953,14 @@ const ( ApplyMethodPendingReboot = "pending-reboot" ) +// ApplyMethod_Values returns all elements of the ApplyMethod enum +func ApplyMethod_Values() []string { + return []string{ + ApplyMethodImmediate, + ApplyMethodPendingReboot, + } +} + const ( // SourceTypeDbInstance is a SourceType enum value SourceTypeDbInstance = "db-instance" @@ -16972,3 +16980,15 @@ const ( // SourceTypeDbClusterSnapshot is a SourceType enum value SourceTypeDbClusterSnapshot = "db-cluster-snapshot" ) + +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeDbInstance, + SourceTypeDbParameterGroup, + SourceTypeDbSecurityGroup, + SourceTypeDbSnapshot, + SourceTypeDbCluster, + SourceTypeDbClusterSnapshot, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go b/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go index 451891d9e..5c542c8d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go index 0acf64add..c16c00128 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -14324,8 +14324,8 @@ func (s *ReportedOs) SetVersion(v string) *ReportedOs { // Indicates that a resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -14343,17 +14343,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14361,22 +14361,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a user's SSH information. @@ -17172,8 +17172,8 @@ func (s *UserProfile) SetSshUsername(v string) *UserProfile { // Indicates that a request was not valid. type ValidationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception message. Message_ *string `locationName:"message" type:"string"` @@ -17191,17 +17191,17 @@ func (s ValidationException) GoString() string { func newErrorValidationException(v protocol.ResponseMetadata) error { return &ValidationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ValidationException) Code() string { +func (s *ValidationException) Code() string { return "ValidationException" } // Message returns the exception's message. -func (s ValidationException) Message() string { +func (s *ValidationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17209,22 +17209,22 @@ func (s ValidationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ValidationException) OrigErr() error { +func (s *ValidationException) OrigErr() error { return nil } -func (s ValidationException) Error() string { +func (s *ValidationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ValidationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ValidationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ValidationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ValidationException) RequestID() string { + return s.RespMetadata.RequestID } // Describes an instance's Amazon EBS volume. @@ -17616,6 +17616,16 @@ const ( AppAttributesKeysAwsFlowRubySettings = "AwsFlowRubySettings" ) +// AppAttributesKeys_Values returns all elements of the AppAttributesKeys enum +func AppAttributesKeys_Values() []string { + return []string{ + AppAttributesKeysDocumentRoot, + AppAttributesKeysRailsEnv, + AppAttributesKeysAutoBundleOnDeploy, + AppAttributesKeysAwsFlowRubySettings, + } +} + const ( // AppTypeAwsFlowRuby is a AppType enum value AppTypeAwsFlowRuby = "aws-flow-ruby" @@ -17639,6 +17649,19 @@ const ( AppTypeOther = "other" ) +// AppType_Values returns all elements of the AppType enum +func AppType_Values() []string { + return []string{ + AppTypeAwsFlowRuby, + AppTypeJava, + AppTypeRails, + AppTypePhp, + AppTypeNodejs, + AppTypeStatic, + AppTypeOther, + } +} + const ( // ArchitectureX8664 is a Architecture enum value ArchitectureX8664 = "x86_64" @@ -17647,6 +17670,14 @@ const ( ArchitectureI386 = "i386" ) +// Architecture_Values returns all elements of the Architecture enum +func Architecture_Values() []string { + return []string{ + ArchitectureX8664, + ArchitectureI386, + } +} + const ( // AutoScalingTypeLoad is a AutoScalingType enum value AutoScalingTypeLoad = "load" @@ -17655,6 +17686,14 @@ const ( AutoScalingTypeTimer = "timer" ) +// AutoScalingType_Values returns all elements of the AutoScalingType enum +func AutoScalingType_Values() []string { + return []string{ + AutoScalingTypeLoad, + AutoScalingTypeTimer, + } +} + // Specifies the encoding of the log file so that the file can be read correctly. // The default is utf_8. Encodings supported by Python codecs.decode() can be // used here. @@ -17936,6 +17975,104 @@ const ( CloudWatchLogsEncodingUtf8Sig = "utf_8_sig" ) +// CloudWatchLogsEncoding_Values returns all elements of the CloudWatchLogsEncoding enum +func CloudWatchLogsEncoding_Values() []string { + return []string{ + CloudWatchLogsEncodingAscii, + CloudWatchLogsEncodingBig5, + CloudWatchLogsEncodingBig5hkscs, + CloudWatchLogsEncodingCp037, + CloudWatchLogsEncodingCp424, + CloudWatchLogsEncodingCp437, + CloudWatchLogsEncodingCp500, + CloudWatchLogsEncodingCp720, + CloudWatchLogsEncodingCp737, + CloudWatchLogsEncodingCp775, + CloudWatchLogsEncodingCp850, + CloudWatchLogsEncodingCp852, + CloudWatchLogsEncodingCp855, + CloudWatchLogsEncodingCp856, + CloudWatchLogsEncodingCp857, + CloudWatchLogsEncodingCp858, + CloudWatchLogsEncodingCp860, + CloudWatchLogsEncodingCp861, + CloudWatchLogsEncodingCp862, + CloudWatchLogsEncodingCp863, + CloudWatchLogsEncodingCp864, + CloudWatchLogsEncodingCp865, + CloudWatchLogsEncodingCp866, + CloudWatchLogsEncodingCp869, + CloudWatchLogsEncodingCp874, + CloudWatchLogsEncodingCp875, + CloudWatchLogsEncodingCp932, + CloudWatchLogsEncodingCp949, + CloudWatchLogsEncodingCp950, + CloudWatchLogsEncodingCp1006, + CloudWatchLogsEncodingCp1026, + CloudWatchLogsEncodingCp1140, + CloudWatchLogsEncodingCp1250, + CloudWatchLogsEncodingCp1251, + CloudWatchLogsEncodingCp1252, + CloudWatchLogsEncodingCp1253, + CloudWatchLogsEncodingCp1254, + CloudWatchLogsEncodingCp1255, + CloudWatchLogsEncodingCp1256, + CloudWatchLogsEncodingCp1257, + CloudWatchLogsEncodingCp1258, + CloudWatchLogsEncodingEucJp, + CloudWatchLogsEncodingEucJis2004, + CloudWatchLogsEncodingEucJisx0213, + CloudWatchLogsEncodingEucKr, + CloudWatchLogsEncodingGb2312, + CloudWatchLogsEncodingGbk, + CloudWatchLogsEncodingGb18030, + CloudWatchLogsEncodingHz, + CloudWatchLogsEncodingIso2022Jp, + CloudWatchLogsEncodingIso2022Jp1, + CloudWatchLogsEncodingIso2022Jp2, + CloudWatchLogsEncodingIso2022Jp2004, + CloudWatchLogsEncodingIso2022Jp3, + CloudWatchLogsEncodingIso2022JpExt, + CloudWatchLogsEncodingIso2022Kr, + CloudWatchLogsEncodingLatin1, + CloudWatchLogsEncodingIso88592, + CloudWatchLogsEncodingIso88593, + CloudWatchLogsEncodingIso88594, + CloudWatchLogsEncodingIso88595, + CloudWatchLogsEncodingIso88596, + CloudWatchLogsEncodingIso88597, + CloudWatchLogsEncodingIso88598, + CloudWatchLogsEncodingIso88599, + CloudWatchLogsEncodingIso885910, + CloudWatchLogsEncodingIso885913, + CloudWatchLogsEncodingIso885914, + CloudWatchLogsEncodingIso885915, + CloudWatchLogsEncodingIso885916, + CloudWatchLogsEncodingJohab, + CloudWatchLogsEncodingKoi8R, + CloudWatchLogsEncodingKoi8U, + CloudWatchLogsEncodingMacCyrillic, + CloudWatchLogsEncodingMacGreek, + CloudWatchLogsEncodingMacIceland, + CloudWatchLogsEncodingMacLatin2, + CloudWatchLogsEncodingMacRoman, + CloudWatchLogsEncodingMacTurkish, + CloudWatchLogsEncodingPtcp154, + CloudWatchLogsEncodingShiftJis, + CloudWatchLogsEncodingShiftJis2004, + CloudWatchLogsEncodingShiftJisx0213, + CloudWatchLogsEncodingUtf32, + CloudWatchLogsEncodingUtf32Be, + CloudWatchLogsEncodingUtf32Le, + CloudWatchLogsEncodingUtf16, + CloudWatchLogsEncodingUtf16Be, + CloudWatchLogsEncodingUtf16Le, + CloudWatchLogsEncodingUtf7, + CloudWatchLogsEncodingUtf8, + CloudWatchLogsEncodingUtf8Sig, + } +} + // Specifies where to start to read data (start_of_file or end_of_file). The // default is start_of_file. It's only used if there is no state persisted for // that log stream. @@ -17947,6 +18084,14 @@ const ( CloudWatchLogsInitialPositionEndOfFile = "end_of_file" ) +// CloudWatchLogsInitialPosition_Values returns all elements of the CloudWatchLogsInitialPosition enum +func CloudWatchLogsInitialPosition_Values() []string { + return []string{ + CloudWatchLogsInitialPositionStartOfFile, + CloudWatchLogsInitialPositionEndOfFile, + } +} + // The preferred time zone for logs streamed to CloudWatch Logs. Valid values // are LOCAL and UTC, for Coordinated Universal Time. const ( @@ -17957,6 +18102,14 @@ const ( CloudWatchLogsTimeZoneUtc = "UTC" ) +// CloudWatchLogsTimeZone_Values returns all elements of the CloudWatchLogsTimeZone enum +func CloudWatchLogsTimeZone_Values() []string { + return []string{ + CloudWatchLogsTimeZoneLocal, + CloudWatchLogsTimeZoneUtc, + } +} + const ( // DeploymentCommandNameInstallDependencies is a DeploymentCommandName enum value DeploymentCommandNameInstallDependencies = "install_dependencies" @@ -17995,6 +18148,24 @@ const ( DeploymentCommandNameUndeploy = "undeploy" ) +// DeploymentCommandName_Values returns all elements of the DeploymentCommandName enum +func DeploymentCommandName_Values() []string { + return []string{ + DeploymentCommandNameInstallDependencies, + DeploymentCommandNameUpdateDependencies, + DeploymentCommandNameUpdateCustomCookbooks, + DeploymentCommandNameExecuteRecipes, + DeploymentCommandNameConfigure, + DeploymentCommandNameSetup, + DeploymentCommandNameDeploy, + DeploymentCommandNameRollback, + DeploymentCommandNameStart, + DeploymentCommandNameStop, + DeploymentCommandNameRestart, + DeploymentCommandNameUndeploy, + } +} + const ( // LayerAttributesKeysEcsClusterArn is a LayerAttributesKeys enum value LayerAttributesKeysEcsClusterArn = "EcsClusterArn" @@ -18072,6 +18243,37 @@ const ( LayerAttributesKeysJavaAppServerVersion = "JavaAppServerVersion" ) +// LayerAttributesKeys_Values returns all elements of the LayerAttributesKeys enum +func LayerAttributesKeys_Values() []string { + return []string{ + LayerAttributesKeysEcsClusterArn, + LayerAttributesKeysEnableHaproxyStats, + LayerAttributesKeysHaproxyStatsUrl, + LayerAttributesKeysHaproxyStatsUser, + LayerAttributesKeysHaproxyStatsPassword, + LayerAttributesKeysHaproxyHealthCheckUrl, + LayerAttributesKeysHaproxyHealthCheckMethod, + LayerAttributesKeysMysqlRootPassword, + LayerAttributesKeysMysqlRootPasswordUbiquitous, + LayerAttributesKeysGangliaUrl, + LayerAttributesKeysGangliaUser, + LayerAttributesKeysGangliaPassword, + LayerAttributesKeysMemcachedMemory, + LayerAttributesKeysNodejsVersion, + LayerAttributesKeysRubyVersion, + LayerAttributesKeysRubygemsVersion, + LayerAttributesKeysManageBundler, + LayerAttributesKeysBundlerVersion, + LayerAttributesKeysRailsStack, + LayerAttributesKeysPassengerVersion, + LayerAttributesKeysJvm, + LayerAttributesKeysJvmVersion, + LayerAttributesKeysJvmOptions, + LayerAttributesKeysJavaAppServer, + LayerAttributesKeysJavaAppServerVersion, + } +} + const ( // LayerTypeAwsFlowRuby is a LayerType enum value LayerTypeAwsFlowRuby = "aws-flow-ruby" @@ -18110,6 +18312,24 @@ const ( LayerTypeCustom = "custom" ) +// LayerType_Values returns all elements of the LayerType enum +func LayerType_Values() []string { + return []string{ + LayerTypeAwsFlowRuby, + LayerTypeEcsCluster, + LayerTypeJavaApp, + LayerTypeLb, + LayerTypeWeb, + LayerTypePhpApp, + LayerTypeRailsApp, + LayerTypeNodejsApp, + LayerTypeMemcached, + LayerTypeDbMaster, + LayerTypeMonitoringMaster, + LayerTypeCustom, + } +} + const ( // RootDeviceTypeEbs is a RootDeviceType enum value RootDeviceTypeEbs = "ebs" @@ -18118,6 +18338,14 @@ const ( RootDeviceTypeInstanceStore = "instance-store" ) +// RootDeviceType_Values returns all elements of the RootDeviceType enum +func RootDeviceType_Values() []string { + return []string{ + RootDeviceTypeEbs, + RootDeviceTypeInstanceStore, + } +} + const ( // SourceTypeGit is a SourceType enum value SourceTypeGit = "git" @@ -18132,11 +18360,28 @@ const ( SourceTypeS3 = "s3" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeGit, + SourceTypeSvn, + SourceTypeArchive, + SourceTypeS3, + } +} + const ( // StackAttributesKeysColor is a StackAttributesKeys enum value StackAttributesKeysColor = "Color" ) +// StackAttributesKeys_Values returns all elements of the StackAttributesKeys enum +func StackAttributesKeys_Values() []string { + return []string{ + StackAttributesKeysColor, + } +} + const ( // VirtualizationTypeParavirtual is a VirtualizationType enum value VirtualizationTypeParavirtual = "paravirtual" @@ -18145,6 +18390,14 @@ const ( VirtualizationTypeHvm = "hvm" ) +// VirtualizationType_Values returns all elements of the VirtualizationType enum +func VirtualizationType_Values() []string { + return []string{ + VirtualizationTypeParavirtual, + VirtualizationTypeHvm, + } +} + const ( // VolumeTypeGp2 is a VolumeType enum value VolumeTypeGp2 = "gp2" @@ -18155,3 +18408,12 @@ const ( // VolumeTypeStandard is a VolumeType enum value VolumeTypeStandard = "standard" ) + +// VolumeType_Values returns all elements of the VolumeType enum +func VolumeType_Values() []string { + return []string{ + VolumeTypeGp2, + VolumeTypeIo1, + VolumeTypeStandard, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index 70f156c16..f319e6520 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go b/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go index 9c061bfea..d1975b6e4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/organizations/api.go @@ -160,7 +160,10 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -169,7 +172,7 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -220,6 +223,12 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ConcurrentModificationException // The target of the operation is currently being modified by a different request. // Try again later. @@ -229,12 +238,12 @@ func (c *Organizations) AcceptHandshakeRequest(input *AcceptHandshakeInput) (req // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * AccessDeniedForDependencyException // The operation that you attempted requires you to have the iam:CreateServiceLinkedRole @@ -309,16 +318,16 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // AttachPolicy API operation for AWS Organizations. // // Attaches a policy to a root, an organizational unit (OU), or an individual -// account. +// account. How the policy affects accounts depends on the type of policy. Refer +// to the AWS Organizations User Guide for information about each policy type: // -// How the policy affects accounts depends on the type of policy: +// * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) // -// * For more information about attaching SCPs, see How SCPs Work (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html) -// in the AWS Organizations User Guide. +// * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) // -// * For information about attaching tag policies, see How Policy Inheritance -// Works (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies-inheritance.html) -// in the AWS Organizations User Guide. +// * SERVICE_CONTROL_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) +// +// * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // This operation can be called only from the organization's master account. // @@ -346,27 +355,32 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -379,11 +393,33 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -394,8 +430,12 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -410,6 +450,10 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -425,27 +469,29 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * DuplicatePolicyAttachmentException // The selected policy is already attached to the specified target. @@ -456,7 +502,10 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -465,7 +514,7 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -516,6 +565,12 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * PolicyNotFoundException // We can't find a policy with the PolicyId that you specified. // @@ -531,18 +586,18 @@ func (c *Organizations) AttachPolicyRequest(input *AttachPolicyInput) (req *requ // error. Try again later. // // * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. +// We can't find a root, OU, account, or policy with the TargetId that you specified. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // * PolicyChangesInProgressException // Changes to the effective policy are in progress, and its contents can't be @@ -661,7 +716,10 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -670,7 +728,7 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -721,17 +779,23 @@ func (c *Organizations) CancelHandshakeRequest(input *CancelHandshakeInput) (req // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CancelHandshake func (c *Organizations) CancelHandshake(input *CancelHandshakeInput) (*CancelHandshakeOutput, error) { @@ -821,6 +885,9 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // For more information, see AWS Organizations and Service-Linked Roles (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs) // in the AWS Organizations User Guide. // +// If the request includes tags, then the requester must have the organizations:TagResource +// permission. +// // AWS Organizations preconfigures the new member account with a role (named // OrganizationAccountAccessRole by default) that grants users in the master // account administrator permissions in the new member account. Principals in @@ -834,13 +901,13 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) // in the AWS Organizations User Guide. // -// * When you create an account in an organization, the information required -// for the account to operate as a standalone account is not automatically -// collected. For example, information about the payment method and signing -// the end user license agreement (EULA) is not collected. If you must remove -// an account from your organization later, you can do so only after you -// provide the missing information. Follow the steps at To leave an organization -// as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// * When you create an account in an organization using the AWS Organizations +// console, API, or CLI commands, the information required for the account +// to operate as a standalone account, such as a payment method and signing +// the end user license agreement (EULA) is not automatically collected. +// If you must remove an account from your organization later, you can do +// so only after you provide the missing information. Follow the steps at +// To leave an organization as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // // * If you get an exception that indicates that you exceeded your account @@ -888,27 +955,32 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -921,11 +993,33 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -936,8 +1030,12 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -952,6 +1050,10 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -967,27 +1069,29 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -995,7 +1099,10 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -1004,7 +1111,7 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1055,6 +1162,12 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * FinalizingOrganizationException // AWS Organizations couldn't perform the operation because your organization // hasn't finished initializing. This can take up to an hour. Try again later. @@ -1066,15 +1179,15 @@ func (c *Organizations) CreateAccountRequest(input *CreateAccountInput) (req *re // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CreateAccount func (c *Organizations) CreateAccount(input *CreateAccountInput) (*CreateAccountOutput, error) { @@ -1154,10 +1267,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // * You call this action from the master account of your organization in // the commercial Region. // -// * You have the organizations:CreateGovCloudAccount permission. AWS Organizations -// creates the required service-linked role named AWSServiceRoleForOrganizations. -// For more information, see AWS Organizations and Service-Linked Roles (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs) -// in the AWS Organizations User Guide. +// * You have the organizations:CreateGovCloudAccount permission. +// +// AWS Organizations automatically creates the required service-linked role +// named AWSServiceRoleForOrganizations. For more information, see AWS Organizations +// and Service-Linked Roles (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html#orgs_integrate_services-using_slrs) +// in the AWS Organizations User Guide. // // AWS automatically enables AWS CloudTrail for AWS GovCloud (US) accounts, // but you should also do the following: @@ -1168,6 +1283,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // see Verifying AWS CloudTrail Is Enabled (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/verifying-cloudtrail.html) // in the AWS GovCloud User Guide. // +// If the request includes tags, then the requester must have the organizations:TagResource +// permission. The tags are attached to the commercial account associated with +// the GovCloud account, rather than the GovCloud account itself. To add tags +// to the GovCloud account, call the TagResource operation in the GovCloud Region +// after the new GovCloud account exists. +// // You call this action from the master account of your organization in the // commercial Region to create a standalone AWS account in the AWS GovCloud // (US) Region. After the account is created, the master account of an organization @@ -1201,9 +1322,9 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // A role is created in the new account in the commercial Region that allows // the master account in the organization in the commercial Region to assume // it. An AWS GovCloud (US) account is then created and associated with the -// commercial account that you just created. A role is created in the new AWS -// GovCloud (US) account. This role can be assumed by the AWS GovCloud (US) -// account that is associated with the master account of the commercial organization. +// commercial account that you just created. A role is also created in the new +// AWS GovCloud (US) account that can be assumed by the AWS GovCloud (US) account +// that is associated with the master account of the commercial organization. // For more information and to view a diagram that explains how account access // works, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) // in the AWS GovCloud User Guide. @@ -1212,12 +1333,13 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_create.html) // in the AWS Organizations User Guide. // -// * You can create an account in an organization using the AWS Organizations -// console, API, or CLI commands. When you do, the information required for -// the account to operate as a standalone account, such as a payment method, -// is not automatically collected. If you must remove an account from your -// organization later, you can do so only after you provide the missing information. -// Follow the steps at To leave an organization as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// * When you create an account in an organization using the AWS Organizations +// console, API, or CLI commands, the information required for the account +// to operate as a standalone account is not automatically collected. This +// includes a payment method and signing the end user license agreement (EULA). +// If you must remove an account from your organization later, you can do +// so only after you provide the missing information. Follow the steps at +// To leave an organization as a member account (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // // * If you get an exception that indicates that you exceeded your account @@ -1266,27 +1388,32 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -1299,11 +1426,33 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -1314,8 +1463,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -1330,6 +1483,10 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -1345,27 +1502,29 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -1373,7 +1532,10 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -1382,7 +1544,7 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1433,6 +1595,12 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * FinalizingOrganizationException // AWS Organizations couldn't perform the operation because your organization // hasn't finished initializing. This can take up to an hour. Try again later. @@ -1444,15 +1612,15 @@ func (c *Organizations) CreateGovCloudAccountRequest(input *CreateGovCloudAccoun // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CreateGovCloudAccount func (c *Organizations) CreateGovCloudAccount(input *CreateGovCloudAccountInput) (*CreateGovCloudAccountOutput, error) { @@ -1521,7 +1689,7 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // CreateOrganization API operation for AWS Organizations. // // Creates an AWS organization. The account whose user is calling the CreateOrganization -// operation automatically becomes the master account (https://docs.aws.amazon.com/IAM/latest/UserGuide/orgs_getting-started_concepts.html#account) +// operation automatically becomes the master account (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#account) // of the new organization. // // This operation must be called using credentials from the account that is @@ -1529,10 +1697,11 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // have the relevant IAM permissions. // // By default (or if you set the FeatureSet parameter to ALL), the new organization -// is created with all features enabled. In addition, service control policies -// are automatically enabled in the root. If you instead create the organization -// supporting only the consolidated billing features, no policy types are enabled -// by default, and you can't use organization policies. +// is created with all features enabled and service control policies automatically +// enabled in the root. If you instead choose to create the organization supporting +// only the consolidated billing features by setting the FeatureSet parameter +// to CONSOLIDATED_BILLING", no policy types are enabled by default, and you +// can't use organization policies // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1558,27 +1727,32 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -1591,11 +1765,33 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -1606,8 +1802,12 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -1622,6 +1822,10 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -1637,27 +1841,29 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -1665,7 +1871,10 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -1674,7 +1883,7 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -1725,17 +1934,23 @@ func (c *Organizations) CreateOrganizationRequest(input *CreateOrganizationInput // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * AccessDeniedForDependencyException // The operation that you attempted requires you to have the iam:CreateServiceLinkedRole @@ -1817,6 +2032,9 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // For more information about OUs, see Managing Organizational Units (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_ous.html) // in the AWS Organizations User Guide. // +// If the request includes tags, then the requester must have the organizations:TagResource +// permission. +// // This operation can be called only from the organization's master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1843,27 +2061,32 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -1876,11 +2099,33 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -1891,8 +2136,12 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -1907,6 +2156,10 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -1922,27 +2175,29 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * DuplicateOrganizationalUnitException // An OU with the same name already exists. @@ -1953,7 +2208,10 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -1962,7 +2220,7 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2013,6 +2271,12 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ParentNotFoundException // We can't find a root or OU with the ParentId that you specified. // @@ -2021,12 +2285,12 @@ func (c *Organizations) CreateOrganizationalUnitRequest(input *CreateOrganizatio // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CreateOrganizationalUnit func (c *Organizations) CreateOrganizationalUnit(input *CreateOrganizationalUnitInput) (*CreateOrganizationalUnitOutput, error) { @@ -2100,6 +2364,9 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // For more information about policies and their use, see Managing Organization // Policies (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html). // +// If the request includes tags, then the requester must have the organizations:TagResource +// permission. +// // This operation can be called only from the organization's master account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2126,27 +2393,32 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -2159,11 +2431,33 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -2174,8 +2468,12 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -2190,6 +2488,10 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -2205,27 +2507,29 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * DuplicatePolicyException // A policy with the same name already exists. @@ -2236,7 +2540,10 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -2245,7 +2552,7 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2296,6 +2603,12 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * MalformedPolicyDocumentException // The provided policy document doesn't meet the requirements of the specified // policy type. For example, the syntax might be incorrect. For details about @@ -2305,24 +2618,24 @@ func (c *Organizations) CreatePolicyRequest(input *CreatePolicyInput) (req *requ // * PolicyTypeNotAvailableForOrganizationException // You can't use the specified policy type with the feature set currently enabled // for this organization. For example, you can enable SCPs only after you enable -// all features in the organization. For more information, see Enabling and -// Disabling a Policy Type on a Root (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root) -// in the AWS Organizations User Guide. +// all features in the organization. For more information, see Managing AWS +// Organizations Policies (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root)in +// the AWS Organizations User Guide. // // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/CreatePolicy func (c *Organizations) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { @@ -2399,7 +2712,7 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // a new handshake request. // // After you decline a handshake, it continues to appear in the results of relevant -// API operations for only 30 days. After that, it's deleted. +// APIs for only 30 days. After that, it's deleted. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2438,7 +2751,10 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -2447,7 +2763,7 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2498,17 +2814,23 @@ func (c *Organizations) DeclineHandshakeRequest(input *DeclineHandshakeInput) (r // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeclineHandshake func (c *Organizations) DeclineHandshake(input *DeclineHandshakeInput) (*DeclineHandshakeOutput, error) { @@ -2609,7 +2931,10 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -2618,7 +2943,7 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2669,6 +2994,12 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * OrganizationNotEmptyException // The organization isn't empty. To delete an organization, you must first remove // all accounts except the master account, delete all OUs, and delete all policies. @@ -2678,12 +3009,12 @@ func (c *Organizations) DeleteOrganizationRequest(input *DeleteOrganizationInput // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeleteOrganization func (c *Organizations) DeleteOrganization(input *DeleteOrganizationInput) (*DeleteOrganizationOutput, error) { @@ -2786,7 +3117,10 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -2795,7 +3129,7 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -2846,6 +3180,12 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * OrganizationalUnitNotEmptyException // The specified OU is not empty. Move all accounts to another root or to other // OUs, remove all child OUs, and try the operation again. @@ -2858,12 +3198,12 @@ func (c *Organizations) DeleteOrganizationalUnitRequest(input *DeleteOrganizatio // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeleteOrganizationalUnit func (c *Organizations) DeleteOrganizationalUnit(input *DeleteOrganizationalUnitInput) (*DeleteOrganizationalUnitOutput, error) { @@ -2967,7 +3307,10 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -2976,7 +3319,7 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3027,6 +3370,12 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * PolicyInUseException // The policy is attached to one or more entities. You must detach it from all // roots, OUs, and accounts before performing this operation. @@ -3039,15 +3388,15 @@ func (c *Organizations) DeletePolicyRequest(input *DeletePolicyInput) (req *requ // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeletePolicy func (c *Organizations) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { @@ -3071,51 +3420,64 @@ func (c *Organizations) DeletePolicyWithContext(ctx aws.Context, input *DeletePo return out, req.Send() } -const opDescribeAccount = "DescribeAccount" +const opDeregisterDelegatedAdministrator = "DeregisterDelegatedAdministrator" -// DescribeAccountRequest generates a "aws/request.Request" representing the -// client's request for the DescribeAccount operation. The "output" return +// DeregisterDelegatedAdministratorRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterDelegatedAdministrator operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeAccount for more information on using the DescribeAccount +// See DeregisterDelegatedAdministrator for more information on using the DeregisterDelegatedAdministrator // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeAccountRequest method. -// req, resp := client.DescribeAccountRequest(params) +// // Example sending a request using the DeregisterDelegatedAdministratorRequest method. +// req, resp := client.DeregisterDelegatedAdministratorRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeAccount -func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req *request.Request, output *DescribeAccountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeregisterDelegatedAdministrator +func (c *Organizations) DeregisterDelegatedAdministratorRequest(input *DeregisterDelegatedAdministratorInput) (req *request.Request, output *DeregisterDelegatedAdministratorOutput) { op := &request.Operation{ - Name: opDescribeAccount, + Name: opDeregisterDelegatedAdministrator, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeAccountInput{} + input = &DeregisterDelegatedAdministratorInput{} } - output = &DescribeAccountOutput{} + output = &DeregisterDelegatedAdministratorOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DescribeAccount API operation for AWS Organizations. +// DeregisterDelegatedAdministrator API operation for AWS Organizations. +// +// Removes the specified member AWS account as a delegated administrator for +// the specified AWS service. // -// Retrieves AWS Organizations related information about the specified account. +// Deregistering a delegated administrator can have unintended impacts on the +// functionality of the enabled AWS service. See the documentation for the enabled +// service before you deregister a delegated administrator so that you understand +// any potential impacts. +// +// You can run this action only for AWS services that support this feature. +// For a current list of services that support it, see the column Supports Delegated +// Administrator in the table at AWS Services that you can use with AWS Organizations +// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html) +// in the AWS Organizations User Guide. // // This operation can be called only from the organization's master account. // @@ -3124,7 +3486,7 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation DescribeAccount for usage and error information. +// API operation DeregisterDelegatedAdministrator for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -3135,59 +3497,208 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // in the IAM User Guide. // // * AccountNotFoundException -// We can't find an AWS account with the AccountId that you specified. Or the +// We can't find an AWS account with the AccountId that you specified, or the // account whose credentials you used to make this request isn't a member of // an organization. // +// * AccountNotRegisteredException +// The specified account is not a delegated administrator for this AWS service. +// // * AWSOrganizationsNotInUseException // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * InvalidInputException -// The requested operation failed because you provided invalid values for one -// or more of the request parameters. This exception includes a reason that -// contains additional information about the violated limit: +// * ConcurrentModificationException +// The target of the operation is currently being modified by a different request. +// Try again later. +// +// * ConstraintViolationException +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. // -// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and -// can't be modified. +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // -// * INPUT_REQUIRED: You must include a value for all required parameters. +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // -// * INVALID_ENUM: You specified an invalid value. +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) +// in the AWS Organizations User Guide. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. // -// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid -// characters. +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). // -// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains -// at least one invalid value. +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. // -// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter -// from the response to a previous call of the operation. +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. // -// * INVALID_PATTERN: You provided a value that doesn't match the required -// pattern. +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. // -// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't -// match the required pattern. +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. // -// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role -// name can't begin with the reserved prefix AWSServiceRoleFor. +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. // -// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource -// Name (ARN) for the organization. +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. // -// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. // -// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system -// tag. You can’t add, edit, or delete system tag keys because they're +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide a valid contact address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of +// policies that you can have in an organization. +// +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. +// +// * InvalidInputException +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're // reserved for AWS use. System tags don’t count against your tags per // resource limit. // @@ -3209,94 +3720,104 @@ func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * ServiceException -// AWS Organizations can't complete your request because of an internal service -// error. Try again later. +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeAccount -func (c *Organizations) DescribeAccount(input *DescribeAccountInput) (*DescribeAccountOutput, error) { - req, out := c.DescribeAccountRequest(input) +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DeregisterDelegatedAdministrator +func (c *Organizations) DeregisterDelegatedAdministrator(input *DeregisterDelegatedAdministratorInput) (*DeregisterDelegatedAdministratorOutput, error) { + req, out := c.DeregisterDelegatedAdministratorRequest(input) return out, req.Send() } -// DescribeAccountWithContext is the same as DescribeAccount with the addition of +// DeregisterDelegatedAdministratorWithContext is the same as DeregisterDelegatedAdministrator with the addition of // the ability to pass a context and additional request options. // -// See DescribeAccount for details on how to use this API operation. +// See DeregisterDelegatedAdministrator for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) DescribeAccountWithContext(ctx aws.Context, input *DescribeAccountInput, opts ...request.Option) (*DescribeAccountOutput, error) { - req, out := c.DescribeAccountRequest(input) +func (c *Organizations) DeregisterDelegatedAdministratorWithContext(ctx aws.Context, input *DeregisterDelegatedAdministratorInput, opts ...request.Option) (*DeregisterDelegatedAdministratorOutput, error) { + req, out := c.DeregisterDelegatedAdministratorRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeCreateAccountStatus = "DescribeCreateAccountStatus" +const opDescribeAccount = "DescribeAccount" -// DescribeCreateAccountStatusRequest generates a "aws/request.Request" representing the -// client's request for the DescribeCreateAccountStatus operation. The "output" return +// DescribeAccountRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeCreateAccountStatus for more information on using the DescribeCreateAccountStatus +// See DescribeAccount for more information on using the DescribeAccount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeCreateAccountStatusRequest method. -// req, resp := client.DescribeCreateAccountStatusRequest(params) +// // Example sending a request using the DescribeAccountRequest method. +// req, resp := client.DescribeAccountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeCreateAccountStatus -func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreateAccountStatusInput) (req *request.Request, output *DescribeCreateAccountStatusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeAccount +func (c *Organizations) DescribeAccountRequest(input *DescribeAccountInput) (req *request.Request, output *DescribeAccountOutput) { op := &request.Operation{ - Name: opDescribeCreateAccountStatus, + Name: opDescribeAccount, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeCreateAccountStatusInput{} + input = &DescribeAccountInput{} } - output = &DescribeCreateAccountStatusOutput{} + output = &DescribeAccountOutput{} req = c.newRequest(op, input, output) return } -// DescribeCreateAccountStatus API operation for AWS Organizations. +// DescribeAccount API operation for AWS Organizations. // -// Retrieves the current status of an asynchronous request to create an account. +// Retrieves AWS Organizations-related information about the specified account. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation DescribeCreateAccountStatus for usage and error information. +// API operation DescribeAccount for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -3306,21 +3827,25 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. // +// * AccountNotFoundException +// We can't find an AWS account with the AccountId that you specified, or the +// account whose credentials you used to make this request isn't a member of +// an organization. +// // * AWSOrganizationsNotInUseException // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * CreateAccountStatusNotFoundException -// We can't find a create account request with the CreateAccountRequestId that -// you specified. -// // * InvalidInputException // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -3329,7 +3854,7 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3380,105 +3905,101 @@ func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreate // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. -// -// * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeCreateAccountStatus -func (c *Organizations) DescribeCreateAccountStatus(input *DescribeCreateAccountStatusInput) (*DescribeCreateAccountStatusOutput, error) { - req, out := c.DescribeCreateAccountStatusRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeAccount +func (c *Organizations) DescribeAccount(input *DescribeAccountInput) (*DescribeAccountOutput, error) { + req, out := c.DescribeAccountRequest(input) return out, req.Send() } -// DescribeCreateAccountStatusWithContext is the same as DescribeCreateAccountStatus with the addition of +// DescribeAccountWithContext is the same as DescribeAccount with the addition of // the ability to pass a context and additional request options. // -// See DescribeCreateAccountStatus for details on how to use this API operation. +// See DescribeAccount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) DescribeCreateAccountStatusWithContext(ctx aws.Context, input *DescribeCreateAccountStatusInput, opts ...request.Option) (*DescribeCreateAccountStatusOutput, error) { - req, out := c.DescribeCreateAccountStatusRequest(input) +func (c *Organizations) DescribeAccountWithContext(ctx aws.Context, input *DescribeAccountInput, opts ...request.Option) (*DescribeAccountOutput, error) { + req, out := c.DescribeAccountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeEffectivePolicy = "DescribeEffectivePolicy" +const opDescribeCreateAccountStatus = "DescribeCreateAccountStatus" -// DescribeEffectivePolicyRequest generates a "aws/request.Request" representing the -// client's request for the DescribeEffectivePolicy operation. The "output" return +// DescribeCreateAccountStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCreateAccountStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeEffectivePolicy for more information on using the DescribeEffectivePolicy +// See DescribeCreateAccountStatus for more information on using the DescribeCreateAccountStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeEffectivePolicyRequest method. -// req, resp := client.DescribeEffectivePolicyRequest(params) +// // Example sending a request using the DescribeCreateAccountStatusRequest method. +// req, resp := client.DescribeCreateAccountStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeEffectivePolicy -func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectivePolicyInput) (req *request.Request, output *DescribeEffectivePolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeCreateAccountStatus +func (c *Organizations) DescribeCreateAccountStatusRequest(input *DescribeCreateAccountStatusInput) (req *request.Request, output *DescribeCreateAccountStatusOutput) { op := &request.Operation{ - Name: opDescribeEffectivePolicy, + Name: opDescribeCreateAccountStatus, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeEffectivePolicyInput{} + input = &DescribeCreateAccountStatusInput{} } - output = &DescribeEffectivePolicyOutput{} + output = &DescribeCreateAccountStatusOutput{} req = c.newRequest(op, input, output) return } -// DescribeEffectivePolicy API operation for AWS Organizations. -// -// Returns the contents of the effective tag policy for the account. The effective -// tag policy is the aggregation of any tag policies the account inherits, plus -// any policy directly that is attached to the account. -// -// This action returns information on tag policies only. +// DescribeCreateAccountStatus API operation for AWS Organizations. // -// For more information on policy inheritance, see How Policy Inheritance Works -// (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies-inheritance.html) -// in the AWS Organizations User Guide. +// Retrieves the current status of an asynchronous request to create an account. // -// This operation can be called from any account in the organization. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation DescribeEffectivePolicy for usage and error information. +// API operation DescribeCreateAccountStatus for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -3492,70 +4013,299 @@ func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectiveP // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// * CreateAccountStatusNotFoundException +// We can't find an create account request with the CreateAccountRequestId that +// you specified. +// +// * InvalidInputException +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. // -// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account -// from the organization that doesn't yet have enough information to exist -// as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // -// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove -// an account from the organization that doesn't yet have enough information -// to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. // -// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number -// of accounts that you can create in one day. +// * INPUT_REQUIRED: You must include a value for all required parameters. // -// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on -// the number of accounts in an organization. If you need more accounts, -// contact AWS Support (https://console.aws.amazon.com/support/home#/) to -// request an increase in your limit. Or the number of invitations that you -// tried to send would cause you to exceed the limit of accounts in your -// organization. Send fewer invitations or contact AWS Support to request -// an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact -// AWS Support (https://console.aws.amazon.com/support/home#/). +// * INVALID_ENUM: You specified an invalid value. // -// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of -// handshakes that you can send in one day. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // -// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account -// in this organization, you first must migrate the organization's master -// account to the marketplace that corresponds to the master account's address. -// For example, accounts with India addresses must be associated with the -// AISPL marketplace. All accounts in an organization must be associated -// with the same marketplace. +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. // -// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master -// account. Then try the operation again. +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. // -// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the -// master account must have an associated account in the AWS GovCloud (US-West) -// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) -// in the AWS GovCloud User Guide. +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. // -// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization -// with this master account, you first must associate a valid payment instrument, -// such as a credit card, with the account. Follow the steps at To leave -// an organization when all required account information has not yet been -// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. +// +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * TooManyRequestsException +// You have sent too many requests in too short a period of time. The quota +// helps protect against denial-of-service attacks. Try again later. +// +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeCreateAccountStatus +func (c *Organizations) DescribeCreateAccountStatus(input *DescribeCreateAccountStatusInput) (*DescribeCreateAccountStatusOutput, error) { + req, out := c.DescribeCreateAccountStatusRequest(input) + return out, req.Send() +} + +// DescribeCreateAccountStatusWithContext is the same as DescribeCreateAccountStatus with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCreateAccountStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) DescribeCreateAccountStatusWithContext(ctx aws.Context, input *DescribeCreateAccountStatusInput, opts ...request.Option) (*DescribeCreateAccountStatusOutput, error) { + req, out := c.DescribeCreateAccountStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEffectivePolicy = "DescribeEffectivePolicy" + +// DescribeEffectivePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEffectivePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEffectivePolicy for more information on using the DescribeEffectivePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEffectivePolicyRequest method. +// req, resp := client.DescribeEffectivePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeEffectivePolicy +func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectivePolicyInput) (req *request.Request, output *DescribeEffectivePolicyOutput) { + op := &request.Operation{ + Name: opDescribeEffectivePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEffectivePolicyInput{} + } + + output = &DescribeEffectivePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEffectivePolicy API operation for AWS Organizations. +// +// Returns the contents of the effective policy for specified policy type and +// account. The effective policy is the aggregation of any policies of the specified +// type that the account inherits, plus any policy of that type that is directly +// attached to the account. +// +// This operation applies only to policy types other than service control policies +// (SCPs). +// +// For more information about policy inheritance, see How Policy Inheritance +// Works (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies-inheritance.html) +// in the AWS Organizations User Guide. +// +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation DescribeEffectivePolicy for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * AWSOrganizationsNotInUseException +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ConstraintViolationException +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide a valid contact address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. // // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity @@ -3572,42 +4322,44 @@ func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectiveP // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. +// We can't find a root, OU, account, or policy with the TargetId that you specified. // // * EffectivePolicyNotFoundException // If you ran this action on the master account, this policy type is not enabled. @@ -3621,7 +4373,10 @@ func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectiveP // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -3630,7 +4385,7 @@ func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectiveP // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3681,8 +4436,14 @@ func (c *Organizations) DescribeEffectivePolicyRequest(input *DescribeEffectiveP // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeEffectivePolicy func (c *Organizations) DescribeEffectivePolicy(input *DescribeEffectivePolicyInput) (*DescribeEffectivePolicyOutput, error) { @@ -3788,7 +4549,10 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -3797,7 +4561,7 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -3848,17 +4612,23 @@ func (c *Organizations) DescribeHandshakeRequest(input *DescribeHandshakeInput) // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeHandshake func (c *Organizations) DescribeHandshake(input *DescribeHandshakeInput) (*DescribeHandshakeOutput, error) { @@ -3963,12 +4733,12 @@ func (c *Organizations) DescribeOrganizationRequest(input *DescribeOrganizationI // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeOrganization func (c *Organizations) DescribeOrganization(input *DescribeOrganizationInput) (*DescribeOrganizationOutput, error) { @@ -4038,7 +4808,8 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // // Retrieves information about an organizational unit (OU). // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4065,7 +4836,10 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -4074,7 +4848,7 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4125,6 +4899,12 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * OrganizationalUnitNotFoundException // We can't find an OU with the OrganizationalUnitId that you specified. // @@ -4133,12 +4913,12 @@ func (c *Organizations) DescribeOrganizationalUnitRequest(input *DescribeOrganiz // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribeOrganizationalUnit func (c *Organizations) DescribeOrganizationalUnit(input *DescribeOrganizationalUnitInput) (*DescribeOrganizationalUnitOutput, error) { @@ -4208,7 +4988,8 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // // Retrieves information about a policy. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4235,7 +5016,10 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -4244,7 +5028,7 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4295,6 +5079,12 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * PolicyNotFoundException // We can't find a policy with the PolicyId that you specified. // @@ -4303,15 +5093,15 @@ func (c *Organizations) DescribePolicyRequest(input *DescribePolicyInput) (req * // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DescribePolicy func (c *Organizations) DescribePolicy(input *DescribePolicyInput) (*DescribePolicyOutput, error) { @@ -4381,19 +5171,20 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // DetachPolicy API operation for AWS Organizations. // // Detaches a policy from a target root, organizational unit (OU), or account. +// // If the policy being detached is a service control policy (SCP), the changes -// to permissions for IAM users and roles in affected accounts are immediate. -// -// Note: Every root, OU, and account must have at least one SCP attached. You -// can replace the default FullAWSAccess policy with one that limits the permissions -// that can be delegated. To do that, you must attach the replacement policy -// before you can remove the default one. This is the authorization strategy -// of using an allow list (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_whitelist). -// You could instead attach a second SCP and leave the FullAWSAccess SCP still -// attached. You could then specify "Effect": "Deny" in the second SCP to override -// the "Effect": "Allow" in the FullAWSAccess policy (or any other attached -// SCP). If you take these steps, you're using the authorization strategy of -// a deny list (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_about-scps.html#orgs_policies_blacklist). +// to permissions for AWS Identity and Access Management (IAM) users and roles +// in affected accounts are immediate. +// +// Every root, OU, and account must have at least one SCP attached. If you want +// to replace the default FullAWSAccess policy with an SCP that limits the permissions +// that can be delegated, you must attach the replacement SCP before you can +// remove the default SCP. This is the authorization strategy of an "allow list +// (https://docs.aws.amazon.com/organizations/latest/userguide/SCP_strategies.html#orgs_policies_allowlist)". +// If you instead attach a second SCP and leave the FullAWSAccess SCP still +// attached, and specify "Effect": "Deny" in the second SCP to override the +// "Effect": "Allow" in the FullAWSAccess policy (or any other attached SCP), +// you're using the authorization strategy of a "deny list (https://docs.aws.amazon.com/organizations/latest/userguide/SCP_strategies.html#orgs_policies_denylist)". // // This operation can be called only from the organization's master account. // @@ -4421,27 +5212,32 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -4454,11 +5250,33 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -4469,8 +5287,12 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -4485,6 +5307,10 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -4500,27 +5326,29 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -4528,7 +5356,10 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -4537,7 +5368,7 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4588,6 +5419,12 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * PolicyNotAttachedException // The policy isn't attached to the specified target in the specified root. // @@ -4599,18 +5436,18 @@ func (c *Organizations) DetachPolicyRequest(input *DetachPolicyInput) (req *requ // error. Try again later. // // * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. +// We can't find a root, OU, account, or policy with the TargetId that you specified. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // * PolicyChangesInProgressException // Changes to the effective policy are in progress, and its contents can't be @@ -4700,9 +5537,9 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // AWS service. // // After you perform the DisableAWSServiceAccess operation, the specified service -// can no longer perform operations in your organization's accounts. The only -// exception is when the operations are explicitly permitted by IAM policies -// that are attached to your roles. +// can no longer perform operations in your organization's accounts unless the +// operations are explicitly permitted by the IAM policies that are attached +// to your roles. // // For more information about integrating other services with AWS Organizations, // including the list of services that work with Organizations, see Integrating @@ -4735,27 +5572,32 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -4768,11 +5610,33 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -4783,8 +5647,12 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -4799,6 +5667,10 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -4814,27 +5686,29 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -4842,7 +5716,10 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -4851,7 +5728,7 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -4902,17 +5779,26 @@ func (c *Organizations) DisableAWSServiceAccessRequest(input *DisableAWSServiceA // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/DisableAWSServiceAccess func (c *Organizations) DisableAWSServiceAccess(input *DisableAWSServiceAccessInput) (*DisableAWSServiceAccessOutput, error) { @@ -4980,16 +5866,14 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // DisablePolicyType API operation for AWS Organizations. // -// Disables an organizational control policy type in a root and detaches all -// policies of that type from the organization root, OUs, and accounts. A policy -// of a certain type can be attached to entities in a root only if that type -// is enabled in the root. After you perform this operation, you no longer can -// attach policies of the specified type to that root or to any organizational -// unit (OU) or account in that root. You can undo this by using the EnablePolicyType -// operation. +// Disables an organizational policy type in a root. A policy of a certain type +// can be attached to entities in a root only if that type is enabled in the +// root. After you perform this operation, you no longer can attach policies +// of the specified type to that root or to any organizational unit (OU) or +// account in that root. You can undo this by using the EnablePolicyType operation. // // This is an asynchronous request that AWS performs in the background. If you -// disable a policy for a root, it still appears enabled for the organization +// disable a policy type for a root, it still appears enabled for the organization // if all features (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html) // are enabled for the organization. AWS recommends that you first use ListRoots // to see the status of policy types for a specified root, and then use this @@ -5023,27 +5907,32 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -5056,11 +5945,33 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -5071,8 +5982,12 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -5087,6 +6002,10 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -5102,27 +6021,29 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -5130,7 +6051,10 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -5139,7 +6063,7 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -5190,6 +6114,12 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * PolicyTypeNotEnabledException // The specified policy type isn't currently enabled in this root. You can't // attach policies of the specified type to entities in a root until you enable @@ -5205,15 +6135,15 @@ func (c *Organizations) DisablePolicyTypeRequest(input *DisablePolicyTypeInput) // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // * PolicyChangesInProgressException // Changes to the effective policy are in progress, and its contents can't be @@ -5331,27 +6261,32 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -5364,11 +6299,33 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -5379,8 +6336,12 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -5395,6 +6356,10 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -5410,27 +6375,29 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -5438,7 +6405,10 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -5447,7 +6417,7 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -5498,17 +6468,26 @@ func (c *Organizations) EnableAWSServiceAccessRequest(input *EnableAWSServiceAcc // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/EnableAWSServiceAccess func (c *Organizations) EnableAWSServiceAccess(input *EnableAWSServiceAccessInput) (*EnableAWSServiceAccessOutput, error) { @@ -5579,7 +6558,7 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // Enables all features in an organization. This enables the use of organization // policies that can restrict the services and actions that can be called in // each account. Until you enable all features, you have access only to consolidated -// billing. You can't use any of the advanced account administration features +// billing, and you can't use any of the advanced account administration features // that AWS Organizations supports. For more information, see Enabling All Features // in Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html) // in the AWS Organizations User Guide. @@ -5588,8 +6567,8 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // with only the consolidated billing features enabled. Calling this operation // sends a handshake to every invited account in the organization. The feature // set change can be finalized and the additional features enabled only after -// all administrators in the invited accounts approve the change. Accepting -// the handshake approves the change. +// all administrators in the invited accounts approve the change by accepting +// the handshake. // // After you enable all features, you can separately enable or disable individual // policy types in a root using EnablePolicyType and DisablePolicyType. To see @@ -5676,7 +6655,10 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -5685,7 +6667,7 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -5736,17 +6718,23 @@ func (c *Organizations) EnableAllFeaturesRequest(input *EnableAllFeaturesInput) // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/EnableAllFeatures func (c *Organizations) EnableAllFeatures(input *EnableAllFeaturesInput) (*EnableAllFeaturesOutput, error) { @@ -5853,27 +6841,32 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -5886,11 +6879,33 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -5901,8 +6916,12 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -5917,6 +6936,10 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -5932,27 +6955,29 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -5960,7 +6985,10 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -5969,7 +6997,7 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6020,6 +7048,12 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * PolicyTypeAlreadyEnabledException // The specified policy type is already enabled in the specified root. // @@ -6031,22 +7065,22 @@ func (c *Organizations) EnablePolicyTypeRequest(input *EnablePolicyTypeInput) (r // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * PolicyTypeNotAvailableForOrganizationException // You can't use the specified policy type with the feature set currently enabled // for this organization. For example, you can enable SCPs only after you enable -// all features in the organization. For more information, see Enabling and -// Disabling a Policy Type on a Root (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root) -// in the AWS Organizations User Guide. +// all features in the organization. For more information, see Managing AWS +// Organizations Policies (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // * PolicyChangesInProgressException // Changes to the effective policy are in progress, and its contents can't be @@ -6124,16 +7158,19 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // as a Handshake whose details are in the response. // // * You can invite AWS accounts only from the same seller as the master -// account. For example, assume that your organization's master account was -// created by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in -// India. You can invite only other AISPL accounts to your organization. -// You can't combine accounts from AISPL and AWS or from any other AWS seller. -// For more information, see Consolidated Billing in India (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html). +// account. For example, if your organization's master account was created +// by Amazon Internet Services Pvt. Ltd (AISPL), an AWS seller in India, +// you can invite only other AISPL accounts to your organization. You can't +// combine accounts from AISPL and AWS or from any other AWS seller. For +// more information, see Consolidated Billing in India (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/useconsolidatedbilliing-India.html). // -// * You might receive an exception that indicates that you exceeded your -// account limits for the organization or that the operation failed because -// your organization is still initializing. If so, wait one hour and then -// try again. If the error persists after an hour, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// * If you receive an exception that indicates that you exceeded your account +// limits for the organization or that the operation failed because your +// organization is still initializing, wait one hour and then try again. +// If the error persists after an hour, contact AWS Support (https://console.aws.amazon.com/support/home#/). +// +// If the request includes tags, then the requester must have the organizations:TagResource +// permission. // // This operation can be called only from the organization's master account. // @@ -6213,13 +7250,155 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // to resend an invitation to an account, ensure that existing handshakes that // might be considered duplicates are canceled or declined. // +// * ConstraintViolationException +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide a valid contact address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of +// policies that you can have in an organization. +// +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. +// // * InvalidInputException // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -6228,7 +7407,7 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6279,6 +7458,12 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * FinalizingOrganizationException // AWS Organizations couldn't perform the operation because your organization // hasn't finished initializing. This can take up to an hour. Try again later. @@ -6290,12 +7475,12 @@ func (c *Organizations) InviteAccountToOrganizationRequest(input *InviteAccountT // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/InviteAccountToOrganization func (c *Organizations) InviteAccountToOrganization(input *InviteAccountToOrganizationInput) (*InviteAccountToOrganizationOutput, error) { @@ -6373,21 +7558,21 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // // * The master account in an organization with all features enabled can // set service control policies (SCPs) that can restrict what administrators -// of member accounts can do. These restrictions can include preventing member -// accounts from successfully calling LeaveOrganization. +// of member accounts can do. This includes preventing them from successfully +// calling LeaveOrganization and leaving the organization. // // * You can leave an organization as a member account only if the account // is configured with the information required to operate as a standalone // account. When you create an account in an organization using the AWS Organizations -// console, API, or CLI, the information required of standalone accounts -// is not automatically collected. For each account that you want to make -// standalone, you must accept the end user license agreement (EULA). You -// must also choose a support plan, provide and verify the required contact -// information, and provide a current payment method. AWS uses the payment -// method to charge for any billable (not free tier) AWS activity that occurs -// while the account isn't attached to an organization. Follow the steps -// at To leave an organization when all required account information has -// not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// console, API, or CLI commands, the information required of standalone +// accounts is not automatically collected. For each account that you want +// to make standalone, you must perform the following steps. If any of the +// steps are already completed for this account, that step doesn't appear. +// Choose a support plan Provide and verify the required contact information +// Provide a current payment method AWS uses the payment method to charge +// for any billable (not free tier) AWS activity that occurs while the account +// isn't attached to an organization. Follow the steps at To leave an organization +// when all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // // * You can leave an organization only after you enable IAM user access @@ -6395,6 +7580,10 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // to the Billing and Cost Management Console (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate) // in the AWS Billing and Cost Management User Guide. // +// * After the account leaves the organization, all tags that were attached +// to the account object in the organization are deleted. AWS accounts outside +// of an organization do not support tags. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6411,7 +7600,7 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // in the IAM User Guide. // // * AccountNotFoundException -// We can't find an AWS account with the AccountId that you specified. Or the +// We can't find an AWS account with the AccountId that you specified, or the // account whose credentials you used to make this request isn't a member of // an organization. // @@ -6424,27 +7613,32 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -6457,11 +7651,33 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -6472,8 +7688,12 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -6488,6 +7708,10 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -6503,27 +7727,29 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -6531,7 +7757,10 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -6540,7 +7769,7 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6591,6 +7820,12 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * MasterCannotLeaveOrganizationException // You can't remove a master account from an organization. If you want the master // account to become a member account in another organization, you must first @@ -6601,12 +7836,12 @@ func (c *Organizations) LeaveOrganizationRequest(input *LeaveOrganizationInput) // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/LeaveOrganization func (c *Organizations) LeaveOrganization(input *LeaveOrganizationInput) (*LeaveOrganizationOutput, error) { @@ -6690,7 +7925,8 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // Integrating AWS Organizations with Other AWS Services (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrate_services.html) // in the AWS Organizations User Guide. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6712,27 +7948,32 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // must use the credentials of an account that belongs to an organization. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -6745,11 +7986,33 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -6760,8 +8023,12 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -6776,6 +8043,10 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -6791,27 +8062,29 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -6819,7 +8092,10 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -6828,7 +8104,7 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -6879,17 +8155,26 @@ func (c *Organizations) ListAWSServiceAccessForOrganizationRequest(input *ListAW // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAWSServiceAccessForOrganization func (c *Organizations) ListAWSServiceAccessForOrganization(input *ListAWSServiceAccessForOrganizationInput) (*ListAWSServiceAccessForOrganizationOutput, error) { @@ -7024,7 +8309,8 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7051,7 +8337,10 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -7060,7 +8349,7 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7111,17 +8400,23 @@ func (c *Organizations) ListAccountsRequest(input *ListAccountsInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * ServiceException +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// +// * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccounts func (c *Organizations) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { @@ -7258,7 +8553,8 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7285,7 +8581,10 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -7294,7 +8593,7 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7345,6 +8644,12 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ParentNotFoundException // We can't find a root or OU with the ParentId that you specified. // @@ -7353,12 +8658,12 @@ func (c *Organizations) ListAccountsForParentRequest(input *ListAccountsForParen // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccountsForParent func (c *Organizations) ListAccountsForParent(input *ListAccountsForParentInput) (*ListAccountsForParentOutput, error) { @@ -7493,7 +8798,8 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7520,7 +8826,10 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -7529,7 +8838,7 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7580,6 +8889,12 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ParentNotFoundException // We can't find a root or OU with the ParentId that you specified. // @@ -7588,12 +8903,12 @@ func (c *Organizations) ListChildrenRequest(input *ListChildrenInput) (req *requ // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListChildren func (c *Organizations) ListChildren(input *ListChildrenInput) (*ListChildrenOutput, error) { @@ -7727,7 +9042,8 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7754,7 +9070,10 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -7763,7 +9082,7 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -7814,20 +9133,26 @@ func (c *Organizations) ListCreateAccountStatusRequest(input *ListCreateAccountS // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListCreateAccountStatus func (c *Organizations) ListCreateAccountStatus(input *ListCreateAccountStatusInput) (*ListCreateAccountStatusOutput, error) { @@ -7903,35 +9228,35 @@ func (c *Organizations) ListCreateAccountStatusPagesWithContext(ctx aws.Context, return p.Err() } -const opListHandshakesForAccount = "ListHandshakesForAccount" +const opListDelegatedAdministrators = "ListDelegatedAdministrators" -// ListHandshakesForAccountRequest generates a "aws/request.Request" representing the -// client's request for the ListHandshakesForAccount operation. The "output" return +// ListDelegatedAdministratorsRequest generates a "aws/request.Request" representing the +// client's request for the ListDelegatedAdministrators operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListHandshakesForAccount for more information on using the ListHandshakesForAccount +// See ListDelegatedAdministrators for more information on using the ListDelegatedAdministrators // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListHandshakesForAccountRequest method. -// req, resp := client.ListHandshakesForAccountRequest(params) +// // Example sending a request using the ListDelegatedAdministratorsRequest method. +// req, resp := client.ListDelegatedAdministratorsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount -func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesForAccountInput) (req *request.Request, output *ListHandshakesForAccountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListDelegatedAdministrators +func (c *Organizations) ListDelegatedAdministratorsRequest(input *ListDelegatedAdministratorsInput) (req *request.Request, output *ListDelegatedAdministratorsOutput) { op := &request.Operation{ - Name: opListHandshakesForAccount, + Name: opListDelegatedAdministrators, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -7943,36 +9268,28 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor } if input == nil { - input = &ListHandshakesForAccountInput{} + input = &ListDelegatedAdministratorsInput{} } - output = &ListHandshakesForAccountOutput{} + output = &ListDelegatedAdministratorsOutput{} req = c.newRequest(op, input, output) return } -// ListHandshakesForAccount API operation for AWS Organizations. -// -// Lists the current handshakes that are associated with the account of the -// requesting user. +// ListDelegatedAdministrators API operation for AWS Organizations. // -// Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results -// of this API for only 30 days after changing to that state. After that, they're -// deleted and no longer accessible. -// -// Always check the NextToken response parameter for a null value when calling -// a List* operation. These operations can occasionally return an empty set -// of results even when there are more results available. The NextToken response -// parameter value is null only when there are no more results to display. +// Lists the AWS accounts that are designated as delegated administrators in +// this organization. // -// This operation can be called from any account in the organization. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListHandshakesForAccount for usage and error information. +// API operation ListDelegatedAdministrators for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -7982,9 +9299,148 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. // -// * ConcurrentModificationException -// The target of the operation is currently being modified by a different request. -// Try again later. +// * AWSOrganizationsNotInUseException +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ConstraintViolationException +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide a valid contact address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of +// policies that you can have in an organization. +// +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -7992,7 +9448,10 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -8001,7 +9460,7 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8052,77 +9511,86 @@ func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesFor // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * ServiceException -// AWS Organizations can't complete your request because of an internal service -// error. Try again later. +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount -func (c *Organizations) ListHandshakesForAccount(input *ListHandshakesForAccountInput) (*ListHandshakesForAccountOutput, error) { - req, out := c.ListHandshakesForAccountRequest(input) +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListDelegatedAdministrators +func (c *Organizations) ListDelegatedAdministrators(input *ListDelegatedAdministratorsInput) (*ListDelegatedAdministratorsOutput, error) { + req, out := c.ListDelegatedAdministratorsRequest(input) return out, req.Send() } -// ListHandshakesForAccountWithContext is the same as ListHandshakesForAccount with the addition of +// ListDelegatedAdministratorsWithContext is the same as ListDelegatedAdministrators with the addition of // the ability to pass a context and additional request options. // -// See ListHandshakesForAccount for details on how to use this API operation. +// See ListDelegatedAdministrators for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListHandshakesForAccountWithContext(ctx aws.Context, input *ListHandshakesForAccountInput, opts ...request.Option) (*ListHandshakesForAccountOutput, error) { - req, out := c.ListHandshakesForAccountRequest(input) +func (c *Organizations) ListDelegatedAdministratorsWithContext(ctx aws.Context, input *ListDelegatedAdministratorsInput, opts ...request.Option) (*ListDelegatedAdministratorsOutput, error) { + req, out := c.ListDelegatedAdministratorsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListHandshakesForAccountPages iterates over the pages of a ListHandshakesForAccount operation, +// ListDelegatedAdministratorsPages iterates over the pages of a ListDelegatedAdministrators operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListHandshakesForAccount method for more information on how to use this operation. +// See ListDelegatedAdministrators method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListHandshakesForAccount operation. +// // Example iterating over at most 3 pages of a ListDelegatedAdministrators operation. // pageNum := 0 -// err := client.ListHandshakesForAccountPages(params, -// func(page *organizations.ListHandshakesForAccountOutput, lastPage bool) bool { +// err := client.ListDelegatedAdministratorsPages(params, +// func(page *organizations.ListDelegatedAdministratorsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListHandshakesForAccountPages(input *ListHandshakesForAccountInput, fn func(*ListHandshakesForAccountOutput, bool) bool) error { - return c.ListHandshakesForAccountPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListDelegatedAdministratorsPages(input *ListDelegatedAdministratorsInput, fn func(*ListDelegatedAdministratorsOutput, bool) bool) error { + return c.ListDelegatedAdministratorsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListHandshakesForAccountPagesWithContext same as ListHandshakesForAccountPages except +// ListDelegatedAdministratorsPagesWithContext same as ListDelegatedAdministratorsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListHandshakesForAccountPagesWithContext(ctx aws.Context, input *ListHandshakesForAccountInput, fn func(*ListHandshakesForAccountOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListDelegatedAdministratorsPagesWithContext(ctx aws.Context, input *ListDelegatedAdministratorsInput, fn func(*ListDelegatedAdministratorsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListHandshakesForAccountInput + var inCpy *ListDelegatedAdministratorsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListHandshakesForAccountRequest(inCpy) + req, _ := c.ListDelegatedAdministratorsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -8130,7 +9598,7 @@ func (c *Organizations) ListHandshakesForAccountPagesWithContext(ctx aws.Context } for p.Next() { - if !fn(p.Page().(*ListHandshakesForAccountOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListDelegatedAdministratorsOutput), !p.HasNextPage()) { break } } @@ -8138,35 +9606,35 @@ func (c *Organizations) ListHandshakesForAccountPagesWithContext(ctx aws.Context return p.Err() } -const opListHandshakesForOrganization = "ListHandshakesForOrganization" +const opListDelegatedServicesForAccount = "ListDelegatedServicesForAccount" -// ListHandshakesForOrganizationRequest generates a "aws/request.Request" representing the -// client's request for the ListHandshakesForOrganization operation. The "output" return +// ListDelegatedServicesForAccountRequest generates a "aws/request.Request" representing the +// client's request for the ListDelegatedServicesForAccount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListHandshakesForOrganization for more information on using the ListHandshakesForOrganization +// See ListDelegatedServicesForAccount for more information on using the ListDelegatedServicesForAccount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListHandshakesForOrganizationRequest method. -// req, resp := client.ListHandshakesForOrganizationRequest(params) +// // Example sending a request using the ListDelegatedServicesForAccountRequest method. +// req, resp := client.ListDelegatedServicesForAccountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization -func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshakesForOrganizationInput) (req *request.Request, output *ListHandshakesForOrganizationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListDelegatedServicesForAccount +func (c *Organizations) ListDelegatedServicesForAccountRequest(input *ListDelegatedServicesForAccountInput) (req *request.Request, output *ListDelegatedServicesForAccountOutput) { op := &request.Operation{ - Name: opListHandshakesForOrganization, + Name: opListDelegatedServicesForAccount, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -8178,38 +9646,27 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak } if input == nil { - input = &ListHandshakesForOrganizationInput{} + input = &ListDelegatedServicesForAccountInput{} } - output = &ListHandshakesForOrganizationOutput{} + output = &ListDelegatedServicesForAccountOutput{} req = c.newRequest(op, input, output) return } -// ListHandshakesForOrganization API operation for AWS Organizations. -// -// Lists the handshakes that are associated with the organization that the requesting -// user is part of. The ListHandshakesForOrganization operation returns a list -// of handshake structures. Each structure contains details and status about -// a handshake. -// -// Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results -// of this API for only 30 days after changing to that state. After that, they're -// deleted and no longer accessible. +// ListDelegatedServicesForAccount API operation for AWS Organizations. // -// Always check the NextToken response parameter for a null value when calling -// a List* operation. These operations can occasionally return an empty set -// of results even when there are more results available. The NextToken response -// parameter value is null only when there are no more results to display. +// List the AWS services for which the specified account is a delegated administrator. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListHandshakesForOrganization for usage and error information. +// API operation ListDelegatedServicesForAccount for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -8219,50 +9676,196 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. // +// * AccountNotFoundException +// We can't find an AWS account with the AccountId that you specified, or the +// account whose credentials you used to make this request isn't a member of +// an organization. +// +// * AccountNotRegisteredException +// The specified account is not a delegated administrator for this AWS service. +// // * AWSOrganizationsNotInUseException // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * ConcurrentModificationException -// The target of the operation is currently being modified by a different request. -// Try again later. -// -// * InvalidInputException -// The requested operation failed because you provided invalid values for one -// or more of the request parameters. This exception includes a reason that -// contains additional information about the violated limit: +// * ConstraintViolationException +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. // -// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and -// can't be modified. +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // -// * INPUT_REQUIRED: You must include a value for all required parameters. +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // -// * INVALID_ENUM: You specified an invalid value. +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) +// in the AWS Organizations User Guide. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. // -// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid -// characters. +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). // -// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains -// at least one invalid value. +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. // -// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter -// from the response to a previous call of the operation. +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. // -// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, -// organization, or email) as a party. +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. // -// * INVALID_PATTERN: You provided a value that doesn't match the required -// pattern. +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. // -// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't -// match the required pattern. +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. // -// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide a valid contact address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. +// +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. +// +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. +// +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of +// policies that you can have in an organization. +// +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. +// +// * InvalidInputException +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role // name can't begin with the reserved prefix AWSServiceRoleFor. // // * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource @@ -8293,77 +9896,86 @@ func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshak // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * ServiceException -// AWS Organizations can't complete your request because of an internal service -// error. Try again later. +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization -func (c *Organizations) ListHandshakesForOrganization(input *ListHandshakesForOrganizationInput) (*ListHandshakesForOrganizationOutput, error) { - req, out := c.ListHandshakesForOrganizationRequest(input) +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListDelegatedServicesForAccount +func (c *Organizations) ListDelegatedServicesForAccount(input *ListDelegatedServicesForAccountInput) (*ListDelegatedServicesForAccountOutput, error) { + req, out := c.ListDelegatedServicesForAccountRequest(input) return out, req.Send() } -// ListHandshakesForOrganizationWithContext is the same as ListHandshakesForOrganization with the addition of +// ListDelegatedServicesForAccountWithContext is the same as ListDelegatedServicesForAccount with the addition of // the ability to pass a context and additional request options. // -// See ListHandshakesForOrganization for details on how to use this API operation. +// See ListDelegatedServicesForAccount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListHandshakesForOrganizationWithContext(ctx aws.Context, input *ListHandshakesForOrganizationInput, opts ...request.Option) (*ListHandshakesForOrganizationOutput, error) { - req, out := c.ListHandshakesForOrganizationRequest(input) +func (c *Organizations) ListDelegatedServicesForAccountWithContext(ctx aws.Context, input *ListDelegatedServicesForAccountInput, opts ...request.Option) (*ListDelegatedServicesForAccountOutput, error) { + req, out := c.ListDelegatedServicesForAccountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListHandshakesForOrganizationPages iterates over the pages of a ListHandshakesForOrganization operation, +// ListDelegatedServicesForAccountPages iterates over the pages of a ListDelegatedServicesForAccount operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListHandshakesForOrganization method for more information on how to use this operation. +// See ListDelegatedServicesForAccount method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListHandshakesForOrganization operation. +// // Example iterating over at most 3 pages of a ListDelegatedServicesForAccount operation. // pageNum := 0 -// err := client.ListHandshakesForOrganizationPages(params, -// func(page *organizations.ListHandshakesForOrganizationOutput, lastPage bool) bool { +// err := client.ListDelegatedServicesForAccountPages(params, +// func(page *organizations.ListDelegatedServicesForAccountOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListHandshakesForOrganizationPages(input *ListHandshakesForOrganizationInput, fn func(*ListHandshakesForOrganizationOutput, bool) bool) error { - return c.ListHandshakesForOrganizationPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListDelegatedServicesForAccountPages(input *ListDelegatedServicesForAccountInput, fn func(*ListDelegatedServicesForAccountOutput, bool) bool) error { + return c.ListDelegatedServicesForAccountPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListHandshakesForOrganizationPagesWithContext same as ListHandshakesForOrganizationPages except +// ListDelegatedServicesForAccountPagesWithContext same as ListDelegatedServicesForAccountPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListHandshakesForOrganizationPagesWithContext(ctx aws.Context, input *ListHandshakesForOrganizationInput, fn func(*ListHandshakesForOrganizationOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListDelegatedServicesForAccountPagesWithContext(ctx aws.Context, input *ListDelegatedServicesForAccountInput, fn func(*ListDelegatedServicesForAccountOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListHandshakesForOrganizationInput + var inCpy *ListDelegatedServicesForAccountInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListHandshakesForOrganizationRequest(inCpy) + req, _ := c.ListDelegatedServicesForAccountRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -8371,7 +9983,7 @@ func (c *Organizations) ListHandshakesForOrganizationPagesWithContext(ctx aws.Co } for p.Next() { - if !fn(p.Page().(*ListHandshakesForOrganizationOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListDelegatedServicesForAccountOutput), !p.HasNextPage()) { break } } @@ -8379,35 +9991,35 @@ func (c *Organizations) ListHandshakesForOrganizationPagesWithContext(ctx aws.Co return p.Err() } -const opListOrganizationalUnitsForParent = "ListOrganizationalUnitsForParent" +const opListHandshakesForAccount = "ListHandshakesForAccount" -// ListOrganizationalUnitsForParentRequest generates a "aws/request.Request" representing the -// client's request for the ListOrganizationalUnitsForParent operation. The "output" return +// ListHandshakesForAccountRequest generates a "aws/request.Request" representing the +// client's request for the ListHandshakesForAccount operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListOrganizationalUnitsForParent for more information on using the ListOrganizationalUnitsForParent +// See ListHandshakesForAccount for more information on using the ListHandshakesForAccount // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListOrganizationalUnitsForParentRequest method. -// req, resp := client.ListOrganizationalUnitsForParentRequest(params) +// // Example sending a request using the ListHandshakesForAccountRequest method. +// req, resp := client.ListHandshakesForAccountRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent -func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrganizationalUnitsForParentInput) (req *request.Request, output *ListOrganizationalUnitsForParentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount +func (c *Organizations) ListHandshakesForAccountRequest(input *ListHandshakesForAccountInput) (req *request.Request, output *ListHandshakesForAccountOutput) { op := &request.Operation{ - Name: opListOrganizationalUnitsForParent, + Name: opListHandshakesForAccount, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -8419,31 +10031,36 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan } if input == nil { - input = &ListOrganizationalUnitsForParentInput{} + input = &ListHandshakesForAccountInput{} } - output = &ListOrganizationalUnitsForParentOutput{} + output = &ListHandshakesForAccountOutput{} req = c.newRequest(op, input, output) return } -// ListOrganizationalUnitsForParent API operation for AWS Organizations. +// ListHandshakesForAccount API operation for AWS Organizations. // -// Lists the organizational units (OUs) in a parent organizational unit or root. +// Lists the current handshakes that are associated with the account of the +// requesting user. +// +// Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results +// of this API for only 30 days after changing to that state. After that, they're +// deleted and no longer accessible. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called from any account in the organization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListOrganizationalUnitsForParent for usage and error information. +// API operation ListHandshakesForAccount for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -8453,9 +10070,9 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. // -// * AWSOrganizationsNotInUseException -// Your account isn't a member of an organization. To make this request, you -// must use the credentials of an account that belongs to an organization. +// * ConcurrentModificationException +// The target of the operation is currently being modified by a different request. +// Try again later. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -8463,7 +10080,10 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -8472,7 +10092,7 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8523,80 +10143,83 @@ func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrgan // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * ParentNotFoundException -// We can't find a root or OU with the ParentId that you specified. +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. // // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent -func (c *Organizations) ListOrganizationalUnitsForParent(input *ListOrganizationalUnitsForParentInput) (*ListOrganizationalUnitsForParentOutput, error) { - req, out := c.ListOrganizationalUnitsForParentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount +func (c *Organizations) ListHandshakesForAccount(input *ListHandshakesForAccountInput) (*ListHandshakesForAccountOutput, error) { + req, out := c.ListHandshakesForAccountRequest(input) return out, req.Send() } -// ListOrganizationalUnitsForParentWithContext is the same as ListOrganizationalUnitsForParent with the addition of +// ListHandshakesForAccountWithContext is the same as ListHandshakesForAccount with the addition of // the ability to pass a context and additional request options. // -// See ListOrganizationalUnitsForParent for details on how to use this API operation. +// See ListHandshakesForAccount for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListOrganizationalUnitsForParentWithContext(ctx aws.Context, input *ListOrganizationalUnitsForParentInput, opts ...request.Option) (*ListOrganizationalUnitsForParentOutput, error) { - req, out := c.ListOrganizationalUnitsForParentRequest(input) +func (c *Organizations) ListHandshakesForAccountWithContext(ctx aws.Context, input *ListHandshakesForAccountInput, opts ...request.Option) (*ListHandshakesForAccountOutput, error) { + req, out := c.ListHandshakesForAccountRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListOrganizationalUnitsForParentPages iterates over the pages of a ListOrganizationalUnitsForParent operation, +// ListHandshakesForAccountPages iterates over the pages of a ListHandshakesForAccount operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListOrganizationalUnitsForParent method for more information on how to use this operation. +// See ListHandshakesForAccount method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListOrganizationalUnitsForParent operation. +// // Example iterating over at most 3 pages of a ListHandshakesForAccount operation. // pageNum := 0 -// err := client.ListOrganizationalUnitsForParentPages(params, -// func(page *organizations.ListOrganizationalUnitsForParentOutput, lastPage bool) bool { +// err := client.ListHandshakesForAccountPages(params, +// func(page *organizations.ListHandshakesForAccountOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListOrganizationalUnitsForParentPages(input *ListOrganizationalUnitsForParentInput, fn func(*ListOrganizationalUnitsForParentOutput, bool) bool) error { - return c.ListOrganizationalUnitsForParentPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListHandshakesForAccountPages(input *ListHandshakesForAccountInput, fn func(*ListHandshakesForAccountOutput, bool) bool) error { + return c.ListHandshakesForAccountPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListOrganizationalUnitsForParentPagesWithContext same as ListOrganizationalUnitsForParentPages except +// ListHandshakesForAccountPagesWithContext same as ListHandshakesForAccountPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListOrganizationalUnitsForParentPagesWithContext(ctx aws.Context, input *ListOrganizationalUnitsForParentInput, fn func(*ListOrganizationalUnitsForParentOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListHandshakesForAccountPagesWithContext(ctx aws.Context, input *ListHandshakesForAccountInput, fn func(*ListHandshakesForAccountOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListOrganizationalUnitsForParentInput + var inCpy *ListHandshakesForAccountInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListOrganizationalUnitsForParentRequest(inCpy) + req, _ := c.ListHandshakesForAccountRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -8604,7 +10227,7 @@ func (c *Organizations) ListOrganizationalUnitsForParentPagesWithContext(ctx aws } for p.Next() { - if !fn(p.Page().(*ListOrganizationalUnitsForParentOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListHandshakesForAccountOutput), !p.HasNextPage()) { break } } @@ -8612,35 +10235,35 @@ func (c *Organizations) ListOrganizationalUnitsForParentPagesWithContext(ctx aws return p.Err() } -const opListParents = "ListParents" +const opListHandshakesForOrganization = "ListHandshakesForOrganization" -// ListParentsRequest generates a "aws/request.Request" representing the -// client's request for the ListParents operation. The "output" return +// ListHandshakesForOrganizationRequest generates a "aws/request.Request" representing the +// client's request for the ListHandshakesForOrganization operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListParents for more information on using the ListParents +// See ListHandshakesForOrganization for more information on using the ListHandshakesForOrganization // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListParentsRequest method. -// req, resp := client.ListParentsRequest(params) +// // Example sending a request using the ListHandshakesForOrganizationRequest method. +// req, resp := client.ListHandshakesForOrganizationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents -func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *request.Request, output *ListParentsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization +func (c *Organizations) ListHandshakesForOrganizationRequest(input *ListHandshakesForOrganizationInput) (req *request.Request, output *ListHandshakesForOrganizationOutput) { op := &request.Operation{ - Name: opListParents, + Name: opListHandshakesForOrganization, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -8652,35 +10275,39 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques } if input == nil { - input = &ListParentsInput{} + input = &ListHandshakesForOrganizationInput{} } - output = &ListParentsOutput{} + output = &ListHandshakesForOrganizationOutput{} req = c.newRequest(op, input, output) return } -// ListParents API operation for AWS Organizations. +// ListHandshakesForOrganization API operation for AWS Organizations. // -// Lists the root or organizational units (OUs) that serve as the immediate -// parent of the specified child OU or account. This operation, along with ListChildren -// enables you to traverse the tree structure that makes up this root. +// Lists the handshakes that are associated with the organization that the requesting +// user is part of. The ListHandshakesForOrganization operation returns a list +// of handshake structures. Each structure contains details and status about +// a handshake. +// +// Handshakes that are ACCEPTED, DECLINED, or CANCELED appear in the results +// of this API for only 30 days after changing to that state. After that, they're +// deleted and no longer accessible. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. -// -// In the current release, a child can have only a single parent. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListParents for usage and error information. +// API operation ListHandshakesForOrganization for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -8694,9 +10321,9 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * ChildNotFoundException -// We can't find an organizational unit (OU) or AWS account with the ChildId -// that you specified. +// * ConcurrentModificationException +// The target of the operation is currently being modified by a different request. +// Try again later. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -8704,7 +10331,10 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -8713,7 +10343,7 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8764,77 +10394,83 @@ func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *reques // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents -func (c *Organizations) ListParents(input *ListParentsInput) (*ListParentsOutput, error) { - req, out := c.ListParentsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization +func (c *Organizations) ListHandshakesForOrganization(input *ListHandshakesForOrganizationInput) (*ListHandshakesForOrganizationOutput, error) { + req, out := c.ListHandshakesForOrganizationRequest(input) return out, req.Send() } -// ListParentsWithContext is the same as ListParents with the addition of +// ListHandshakesForOrganizationWithContext is the same as ListHandshakesForOrganization with the addition of // the ability to pass a context and additional request options. // -// See ListParents for details on how to use this API operation. +// See ListHandshakesForOrganization for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListParentsWithContext(ctx aws.Context, input *ListParentsInput, opts ...request.Option) (*ListParentsOutput, error) { - req, out := c.ListParentsRequest(input) +func (c *Organizations) ListHandshakesForOrganizationWithContext(ctx aws.Context, input *ListHandshakesForOrganizationInput, opts ...request.Option) (*ListHandshakesForOrganizationOutput, error) { + req, out := c.ListHandshakesForOrganizationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListParentsPages iterates over the pages of a ListParents operation, +// ListHandshakesForOrganizationPages iterates over the pages of a ListHandshakesForOrganization operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListParents method for more information on how to use this operation. +// See ListHandshakesForOrganization method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListParents operation. +// // Example iterating over at most 3 pages of a ListHandshakesForOrganization operation. // pageNum := 0 -// err := client.ListParentsPages(params, -// func(page *organizations.ListParentsOutput, lastPage bool) bool { +// err := client.ListHandshakesForOrganizationPages(params, +// func(page *organizations.ListHandshakesForOrganizationOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListParentsPages(input *ListParentsInput, fn func(*ListParentsOutput, bool) bool) error { - return c.ListParentsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListHandshakesForOrganizationPages(input *ListHandshakesForOrganizationInput, fn func(*ListHandshakesForOrganizationOutput, bool) bool) error { + return c.ListHandshakesForOrganizationPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListParentsPagesWithContext same as ListParentsPages except +// ListHandshakesForOrganizationPagesWithContext same as ListHandshakesForOrganizationPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListParentsPagesWithContext(ctx aws.Context, input *ListParentsInput, fn func(*ListParentsOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListHandshakesForOrganizationPagesWithContext(ctx aws.Context, input *ListHandshakesForOrganizationInput, fn func(*ListHandshakesForOrganizationOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListParentsInput + var inCpy *ListHandshakesForOrganizationInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListParentsRequest(inCpy) + req, _ := c.ListHandshakesForOrganizationRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -8842,7 +10478,7 @@ func (c *Organizations) ListParentsPagesWithContext(ctx aws.Context, input *List } for p.Next() { - if !fn(p.Page().(*ListParentsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListHandshakesForOrganizationOutput), !p.HasNextPage()) { break } } @@ -8850,35 +10486,35 @@ func (c *Organizations) ListParentsPagesWithContext(ctx aws.Context, input *List return p.Err() } -const opListPolicies = "ListPolicies" +const opListOrganizationalUnitsForParent = "ListOrganizationalUnitsForParent" -// ListPoliciesRequest generates a "aws/request.Request" representing the -// client's request for the ListPolicies operation. The "output" return +// ListOrganizationalUnitsForParentRequest generates a "aws/request.Request" representing the +// client's request for the ListOrganizationalUnitsForParent operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListPolicies for more information on using the ListPolicies +// See ListOrganizationalUnitsForParent for more information on using the ListOrganizationalUnitsForParent // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListPoliciesRequest method. -// req, resp := client.ListPoliciesRequest(params) +// // Example sending a request using the ListOrganizationalUnitsForParentRequest method. +// req, resp := client.ListOrganizationalUnitsForParentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies -func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent +func (c *Organizations) ListOrganizationalUnitsForParentRequest(input *ListOrganizationalUnitsForParentInput) (req *request.Request, output *ListOrganizationalUnitsForParentOutput) { op := &request.Operation{ - Name: opListPolicies, + Name: opListOrganizationalUnitsForParent, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -8890,31 +10526,32 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ } if input == nil { - input = &ListPoliciesInput{} + input = &ListOrganizationalUnitsForParentInput{} } - output = &ListPoliciesOutput{} + output = &ListOrganizationalUnitsForParentOutput{} req = c.newRequest(op, input, output) return } -// ListPolicies API operation for AWS Organizations. +// ListOrganizationalUnitsForParent API operation for AWS Organizations. // -// Retrieves the list of all policies in an organization of a specified type. +// Lists the organizational units (OUs) in a parent organizational unit or root. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListPolicies for usage and error information. +// API operation ListOrganizationalUnitsForParent for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -8934,7 +10571,10 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -8943,7 +10583,7 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -8994,80 +10634,86 @@ func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// +// * ParentNotFoundException +// We can't find a root or OU with the ParentId that you specified. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. -// -// * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies -func (c *Organizations) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { - req, out := c.ListPoliciesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent +func (c *Organizations) ListOrganizationalUnitsForParent(input *ListOrganizationalUnitsForParentInput) (*ListOrganizationalUnitsForParentOutput, error) { + req, out := c.ListOrganizationalUnitsForParentRequest(input) return out, req.Send() } -// ListPoliciesWithContext is the same as ListPolicies with the addition of +// ListOrganizationalUnitsForParentWithContext is the same as ListOrganizationalUnitsForParent with the addition of // the ability to pass a context and additional request options. // -// See ListPolicies for details on how to use this API operation. +// See ListOrganizationalUnitsForParent for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListPoliciesWithContext(ctx aws.Context, input *ListPoliciesInput, opts ...request.Option) (*ListPoliciesOutput, error) { - req, out := c.ListPoliciesRequest(input) +func (c *Organizations) ListOrganizationalUnitsForParentWithContext(ctx aws.Context, input *ListOrganizationalUnitsForParentInput, opts ...request.Option) (*ListOrganizationalUnitsForParentOutput, error) { + req, out := c.ListOrganizationalUnitsForParentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListPoliciesPages iterates over the pages of a ListPolicies operation, +// ListOrganizationalUnitsForParentPages iterates over the pages of a ListOrganizationalUnitsForParent operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListPolicies method for more information on how to use this operation. +// See ListOrganizationalUnitsForParent method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListPolicies operation. +// // Example iterating over at most 3 pages of a ListOrganizationalUnitsForParent operation. // pageNum := 0 -// err := client.ListPoliciesPages(params, -// func(page *organizations.ListPoliciesOutput, lastPage bool) bool { +// err := client.ListOrganizationalUnitsForParentPages(params, +// func(page *organizations.ListOrganizationalUnitsForParentOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListPoliciesPages(input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool) error { - return c.ListPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListOrganizationalUnitsForParentPages(input *ListOrganizationalUnitsForParentInput, fn func(*ListOrganizationalUnitsForParentOutput, bool) bool) error { + return c.ListOrganizationalUnitsForParentPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListPoliciesPagesWithContext same as ListPoliciesPages except +// ListOrganizationalUnitsForParentPagesWithContext same as ListOrganizationalUnitsForParentPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListOrganizationalUnitsForParentPagesWithContext(ctx aws.Context, input *ListOrganizationalUnitsForParentInput, fn func(*ListOrganizationalUnitsForParentOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListPoliciesInput + var inCpy *ListOrganizationalUnitsForParentInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListPoliciesRequest(inCpy) + req, _ := c.ListOrganizationalUnitsForParentRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -9075,7 +10721,7 @@ func (c *Organizations) ListPoliciesPagesWithContext(ctx aws.Context, input *Lis } for p.Next() { - if !fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListOrganizationalUnitsForParentOutput), !p.HasNextPage()) { break } } @@ -9083,35 +10729,35 @@ func (c *Organizations) ListPoliciesPagesWithContext(ctx aws.Context, input *Lis return p.Err() } -const opListPoliciesForTarget = "ListPoliciesForTarget" +const opListParents = "ListParents" -// ListPoliciesForTargetRequest generates a "aws/request.Request" representing the -// client's request for the ListPoliciesForTarget operation. The "output" return +// ListParentsRequest generates a "aws/request.Request" representing the +// client's request for the ListParents operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListPoliciesForTarget for more information on using the ListPoliciesForTarget +// See ListParents for more information on using the ListParents // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListPoliciesForTargetRequest method. -// req, resp := client.ListPoliciesForTargetRequest(params) +// // Example sending a request using the ListParentsRequest method. +// req, resp := client.ListParentsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget -func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTargetInput) (req *request.Request, output *ListPoliciesForTargetOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents +func (c *Organizations) ListParentsRequest(input *ListParentsInput) (req *request.Request, output *ListParentsOutput) { op := &request.Operation{ - Name: opListPoliciesForTarget, + Name: opListParents, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -9123,33 +10769,36 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge } if input == nil { - input = &ListPoliciesForTargetInput{} + input = &ListParentsInput{} } - output = &ListPoliciesForTargetOutput{} + output = &ListParentsOutput{} req = c.newRequest(op, input, output) return } -// ListPoliciesForTarget API operation for AWS Organizations. +// ListParents API operation for AWS Organizations. // -// Lists the policies that are directly attached to the specified target root, -// organizational unit (OU), or account. You must specify the policy type that -// you want included in the returned list. +// Lists the root or organizational units (OUs) that serve as the immediate +// parent of the specified child OU or account. This operation, along with ListChildren +// enables you to traverse the tree structure that makes up this root. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. +// +// In the current release, a child can have only a single parent. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListPoliciesForTarget for usage and error information. +// API operation ListParents for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -9163,13 +10812,20 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // +// * ChildNotFoundException +// We can't find an organizational unit (OU) or AWS account with the ChildId +// that you specified. +// // * InvalidInputException // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -9178,7 +10834,7 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9229,83 +10885,83 @@ func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTarge // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // -// * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. -// // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. -// -// * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget -func (c *Organizations) ListPoliciesForTarget(input *ListPoliciesForTargetInput) (*ListPoliciesForTargetOutput, error) { - req, out := c.ListPoliciesForTargetRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents +func (c *Organizations) ListParents(input *ListParentsInput) (*ListParentsOutput, error) { + req, out := c.ListParentsRequest(input) return out, req.Send() } -// ListPoliciesForTargetWithContext is the same as ListPoliciesForTarget with the addition of +// ListParentsWithContext is the same as ListParents with the addition of // the ability to pass a context and additional request options. // -// See ListPoliciesForTarget for details on how to use this API operation. +// See ListParents for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListPoliciesForTargetWithContext(ctx aws.Context, input *ListPoliciesForTargetInput, opts ...request.Option) (*ListPoliciesForTargetOutput, error) { - req, out := c.ListPoliciesForTargetRequest(input) +func (c *Organizations) ListParentsWithContext(ctx aws.Context, input *ListParentsInput, opts ...request.Option) (*ListParentsOutput, error) { + req, out := c.ListParentsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListPoliciesForTargetPages iterates over the pages of a ListPoliciesForTarget operation, +// ListParentsPages iterates over the pages of a ListParents operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListPoliciesForTarget method for more information on how to use this operation. +// See ListParents method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListPoliciesForTarget operation. +// // Example iterating over at most 3 pages of a ListParents operation. // pageNum := 0 -// err := client.ListPoliciesForTargetPages(params, -// func(page *organizations.ListPoliciesForTargetOutput, lastPage bool) bool { +// err := client.ListParentsPages(params, +// func(page *organizations.ListParentsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListPoliciesForTargetPages(input *ListPoliciesForTargetInput, fn func(*ListPoliciesForTargetOutput, bool) bool) error { - return c.ListPoliciesForTargetPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListParentsPages(input *ListParentsInput, fn func(*ListParentsOutput, bool) bool) error { + return c.ListParentsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListPoliciesForTargetPagesWithContext same as ListPoliciesForTargetPages except +// ListParentsPagesWithContext same as ListParentsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListPoliciesForTargetPagesWithContext(ctx aws.Context, input *ListPoliciesForTargetInput, fn func(*ListPoliciesForTargetOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListParentsPagesWithContext(ctx aws.Context, input *ListParentsInput, fn func(*ListParentsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListPoliciesForTargetInput + var inCpy *ListParentsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListPoliciesForTargetRequest(inCpy) + req, _ := c.ListParentsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -9313,7 +10969,7 @@ func (c *Organizations) ListPoliciesForTargetPagesWithContext(ctx aws.Context, i } for p.Next() { - if !fn(p.Page().(*ListPoliciesForTargetOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListParentsOutput), !p.HasNextPage()) { break } } @@ -9321,35 +10977,35 @@ func (c *Organizations) ListPoliciesForTargetPagesWithContext(ctx aws.Context, i return p.Err() } -const opListRoots = "ListRoots" +const opListPolicies = "ListPolicies" -// ListRootsRequest generates a "aws/request.Request" representing the -// client's request for the ListRoots operation. The "output" return +// ListPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicies operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRoots for more information on using the ListRoots +// See ListPolicies for more information on using the ListPolicies // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRootsRequest method. -// req, resp := client.ListRootsRequest(params) +// // Example sending a request using the ListPoliciesRequest method. +// req, resp := client.ListPoliciesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots -func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Request, output *ListRootsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies +func (c *Organizations) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { op := &request.Operation{ - Name: opListRoots, + Name: opListPolicies, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -9361,37 +11017,32 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re } if input == nil { - input = &ListRootsInput{} + input = &ListPoliciesInput{} } - output = &ListRootsOutput{} + output = &ListPoliciesOutput{} req = c.newRequest(op, input, output) return } -// ListRoots API operation for AWS Organizations. +// ListPolicies API operation for AWS Organizations. // -// Lists the roots that are defined in the current organization. +// Retrieves the list of all policies in an organization of a specified type. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. -// -// Policy types can be enabled and disabled in roots. This is distinct from -// whether they're available in the organization. When you enable all features, -// you make policy types available for use in that organization. Individual -// policy types can then be enabled and disabled in a root. To see the availability -// of a policy type in an organization, use DescribeOrganization. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListRoots for usage and error information. +// API operation ListPolicies for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -9411,7 +11062,10 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -9420,7 +11074,7 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9471,77 +11125,86 @@ func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Re // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots -func (c *Organizations) ListRoots(input *ListRootsInput) (*ListRootsOutput, error) { - req, out := c.ListRootsRequest(input) - return out, req.Send() -} - -// ListRootsWithContext is the same as ListRoots with the addition of +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies +func (c *Organizations) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + return out, req.Send() +} + +// ListPoliciesWithContext is the same as ListPolicies with the addition of // the ability to pass a context and additional request options. // -// See ListRoots for details on how to use this API operation. +// See ListPolicies for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListRootsWithContext(ctx aws.Context, input *ListRootsInput, opts ...request.Option) (*ListRootsOutput, error) { - req, out := c.ListRootsRequest(input) +func (c *Organizations) ListPoliciesWithContext(ctx aws.Context, input *ListPoliciesInput, opts ...request.Option) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListRootsPages iterates over the pages of a ListRoots operation, +// ListPoliciesPages iterates over the pages of a ListPolicies operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListRoots method for more information on how to use this operation. +// See ListPolicies method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListRoots operation. +// // Example iterating over at most 3 pages of a ListPolicies operation. // pageNum := 0 -// err := client.ListRootsPages(params, -// func(page *organizations.ListRootsOutput, lastPage bool) bool { +// err := client.ListPoliciesPages(params, +// func(page *organizations.ListPoliciesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListRootsPages(input *ListRootsInput, fn func(*ListRootsOutput, bool) bool) error { - return c.ListRootsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListPoliciesPages(input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool) error { + return c.ListPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListRootsPagesWithContext same as ListRootsPages except +// ListPoliciesPagesWithContext same as ListPoliciesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListRootsPagesWithContext(ctx aws.Context, input *ListRootsInput, fn func(*ListRootsOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListRootsInput + var inCpy *ListPoliciesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListRootsRequest(inCpy) + req, _ := c.ListPoliciesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -9549,7 +11212,7 @@ func (c *Organizations) ListRootsPagesWithContext(ctx aws.Context, input *ListRo } for p.Next() { - if !fn(p.Page().(*ListRootsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) { break } } @@ -9557,68 +11220,74 @@ func (c *Organizations) ListRootsPagesWithContext(ctx aws.Context, input *ListRo return p.Err() } -const opListTagsForResource = "ListTagsForResource" +const opListPoliciesForTarget = "ListPoliciesForTarget" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// ListPoliciesForTargetRequest generates a "aws/request.Request" representing the +// client's request for the ListPoliciesForTarget operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See ListPoliciesForTarget for more information on using the ListPoliciesForTarget // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the ListPoliciesForTargetRequest method. +// req, resp := client.ListPoliciesForTargetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTagsForResource -func (c *Organizations) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget +func (c *Organizations) ListPoliciesForTargetRequest(input *ListPoliciesForTargetInput) (req *request.Request, output *ListPoliciesForTargetOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opListPoliciesForTarget, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, - LimitToken: "", + LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { - input = &ListTagsForResourceInput{} + input = &ListPoliciesForTargetInput{} } - output = &ListTagsForResourceOutput{} + output = &ListPoliciesForTargetOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for AWS Organizations. +// ListPoliciesForTarget API operation for AWS Organizations. // -// Lists tags for the specified resource. +// Lists the policies that are directly attached to the specified target root, +// organizational unit (OU), or account. You must specify the policy type that +// you want included in the returned list. // -// Currently, you can list tags on an account in AWS Organizations. +// Always check the NextToken response parameter for a null value when calling +// a List* operation. These operations can occasionally return an empty set +// of results even when there are more results available. The NextToken response +// parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListTagsForResource for usage and error information. +// API operation ListPoliciesForTarget for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -9632,16 +11301,16 @@ func (c *Organizations) ListTagsForResourceRequest(input *ListTagsForResourceInp // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. // -// * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. -// // * InvalidInputException // The requested operation failed because you provided invalid values for one // or more of the request parameters. This exception includes a reason that // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -9650,7 +11319,7 @@ func (c *Organizations) ListTagsForResourceRequest(input *ListTagsForResourceInp // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9701,77 +11370,89 @@ func (c *Organizations) ListTagsForResourceRequest(input *ListTagsForResourceInp // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // +// * TargetNotFoundException +// We can't find a root, OU, account, or policy with the TargetId that you specified. +// // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTagsForResource -func (c *Organizations) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget +func (c *Organizations) ListPoliciesForTarget(input *ListPoliciesForTargetInput) (*ListPoliciesForTargetOutput, error) { + req, out := c.ListPoliciesForTargetRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// ListPoliciesForTargetWithContext is the same as ListPoliciesForTarget with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See ListPoliciesForTarget for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Organizations) ListPoliciesForTargetWithContext(ctx aws.Context, input *ListPoliciesForTargetInput, opts ...request.Option) (*ListPoliciesForTargetOutput, error) { + req, out := c.ListPoliciesForTargetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// ListPoliciesForTargetPages iterates over the pages of a ListPoliciesForTarget operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListTagsForResource method for more information on how to use this operation. +// See ListPoliciesForTarget method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// // Example iterating over at most 3 pages of a ListPoliciesForTarget operation. // pageNum := 0 -// err := client.ListTagsForResourcePages(params, -// func(page *organizations.ListTagsForResourceOutput, lastPage bool) bool { +// err := client.ListPoliciesForTargetPages(params, +// func(page *organizations.ListPoliciesForTargetOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { - return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListPoliciesForTargetPages(input *ListPoliciesForTargetInput, fn func(*ListPoliciesForTargetOutput, bool) bool) error { + return c.ListPoliciesForTargetPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// ListPoliciesForTargetPagesWithContext same as ListPoliciesForTargetPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListPoliciesForTargetPagesWithContext(ctx aws.Context, input *ListPoliciesForTargetInput, fn func(*ListPoliciesForTargetOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListTagsForResourceInput + var inCpy *ListPoliciesForTargetInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListTagsForResourceRequest(inCpy) + req, _ := c.ListPoliciesForTargetRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -9779,7 +11460,7 @@ func (c *Organizations) ListTagsForResourcePagesWithContext(ctx aws.Context, inp } for p.Next() { - if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListPoliciesForTargetOutput), !p.HasNextPage()) { break } } @@ -9787,35 +11468,35 @@ func (c *Organizations) ListTagsForResourcePagesWithContext(ctx aws.Context, inp return p.Err() } -const opListTargetsForPolicy = "ListTargetsForPolicy" +const opListRoots = "ListRoots" -// ListTargetsForPolicyRequest generates a "aws/request.Request" representing the -// client's request for the ListTargetsForPolicy operation. The "output" return +// ListRootsRequest generates a "aws/request.Request" representing the +// client's request for the ListRoots operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTargetsForPolicy for more information on using the ListTargetsForPolicy +// See ListRoots for more information on using the ListRoots // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTargetsForPolicyRequest method. -// req, resp := client.ListTargetsForPolicyRequest(params) +// // Example sending a request using the ListRootsRequest method. +// req, resp := client.ListRootsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy -func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyInput) (req *request.Request, output *ListTargetsForPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots +func (c *Organizations) ListRootsRequest(input *ListRootsInput) (req *request.Request, output *ListRootsOutput) { op := &request.Operation{ - Name: opListTargetsForPolicy, + Name: opListRoots, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -9827,32 +11508,38 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI } if input == nil { - input = &ListTargetsForPolicyInput{} + input = &ListRootsInput{} } - output = &ListTargetsForPolicyOutput{} + output = &ListRootsOutput{} req = c.newRequest(op, input, output) return } -// ListTargetsForPolicy API operation for AWS Organizations. +// ListRoots API operation for AWS Organizations. // -// Lists all the roots, organizational units (OUs), and accounts that the specified -// policy is attached to. +// Lists the roots that are defined in the current organization. // // Always check the NextToken response parameter for a null value when calling // a List* operation. These operations can occasionally return an empty set // of results even when there are more results available. The NextToken response // parameter value is null only when there are no more results to display. // -// This operation can be called only from the organization's master account. +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. +// +// Policy types can be enabled and disabled in roots. This is distinct from +// whether they're available in the organization. When you enable all features, +// you make policy types available for use in that organization. Individual +// policy types can then be enabled and disabled in a root. To see the availability +// of a policy type in an organization, use DescribeOrganization. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Organizations's -// API operation ListTargetsForPolicy for usage and error information. +// API operation ListRoots for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -9872,7 +11559,10 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -9881,7 +11571,7 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -9932,83 +11622,83 @@ func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyI // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * PolicyNotFoundException -// We can't find a policy with the PolicyId that you specified. +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. // // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. -// -// * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy -func (c *Organizations) ListTargetsForPolicy(input *ListTargetsForPolicyInput) (*ListTargetsForPolicyOutput, error) { - req, out := c.ListTargetsForPolicyRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots +func (c *Organizations) ListRoots(input *ListRootsInput) (*ListRootsOutput, error) { + req, out := c.ListRootsRequest(input) return out, req.Send() } -// ListTargetsForPolicyWithContext is the same as ListTargetsForPolicy with the addition of +// ListRootsWithContext is the same as ListRoots with the addition of // the ability to pass a context and additional request options. // -// See ListTargetsForPolicy for details on how to use this API operation. +// See ListRoots for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListTargetsForPolicyWithContext(ctx aws.Context, input *ListTargetsForPolicyInput, opts ...request.Option) (*ListTargetsForPolicyOutput, error) { - req, out := c.ListTargetsForPolicyRequest(input) +func (c *Organizations) ListRootsWithContext(ctx aws.Context, input *ListRootsInput, opts ...request.Option) (*ListRootsOutput, error) { + req, out := c.ListRootsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTargetsForPolicyPages iterates over the pages of a ListTargetsForPolicy operation, +// ListRootsPages iterates over the pages of a ListRoots operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListTargetsForPolicy method for more information on how to use this operation. +// See ListRoots method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListTargetsForPolicy operation. +// // Example iterating over at most 3 pages of a ListRoots operation. // pageNum := 0 -// err := client.ListTargetsForPolicyPages(params, -// func(page *organizations.ListTargetsForPolicyOutput, lastPage bool) bool { +// err := client.ListRootsPages(params, +// func(page *organizations.ListRootsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Organizations) ListTargetsForPolicyPages(input *ListTargetsForPolicyInput, fn func(*ListTargetsForPolicyOutput, bool) bool) error { - return c.ListTargetsForPolicyPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Organizations) ListRootsPages(input *ListRootsInput, fn func(*ListRootsOutput, bool) bool) error { + return c.ListRootsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListTargetsForPolicyPagesWithContext same as ListTargetsForPolicyPages except +// ListRootsPagesWithContext same as ListRootsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) ListTargetsForPolicyPagesWithContext(ctx aws.Context, input *ListTargetsForPolicyInput, fn func(*ListTargetsForPolicyOutput, bool) bool, opts ...request.Option) error { +func (c *Organizations) ListRootsPagesWithContext(ctx aws.Context, input *ListRootsInput, fn func(*ListRootsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListTargetsForPolicyInput + var inCpy *ListRootsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListTargetsForPolicyRequest(inCpy) + req, _ := c.ListRootsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -10016,7 +11706,7 @@ func (c *Organizations) ListTargetsForPolicyPagesWithContext(ctx aws.Context, in } for p.Next() { - if !fn(p.Page().(*ListTargetsForPolicyOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListRootsOutput), !p.HasNextPage()) { break } } @@ -10024,70 +11714,925 @@ func (c *Organizations) ListTargetsForPolicyPagesWithContext(ctx aws.Context, in return p.Err() } -const opMoveAccount = "MoveAccount" +const opListTagsForResource = "ListTagsForResource" -// MoveAccountRequest generates a "aws/request.Request" representing the -// client's request for the MoveAccount operation. The "output" return +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See MoveAccount for more information on using the MoveAccount +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the MoveAccountRequest method. -// req, resp := client.MoveAccountRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/MoveAccount -func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *request.Request, output *MoveAccountOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTagsForResource +func (c *Organizations) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opMoveAccount, + Name: opListTagsForResource, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, } if input == nil { - input = &MoveAccountInput{} + input = &ListTagsForResourceInput{} } - output = &MoveAccountOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// MoveAccount API operation for AWS Organizations. +// ListTagsForResource API operation for AWS Organizations. +// +// Lists tags that are attached to the specified resource. +// +// You can attach tags to the following resources in AWS Organizations. +// +// * AWS account +// +// * Organization root +// +// * Organizational unit (OU) +// +// * Policy (any type) +// +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * AWSOrganizationsNotInUseException +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * TargetNotFoundException +// We can't find a root, OU, account, or policy with the TargetId that you specified. +// +// * InvalidInputException +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. +// +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * TooManyRequestsException +// You have sent too many requests in too short a period of time. The quota +// helps protect against denial-of-service attacks. Try again later. +// +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTagsForResource +func (c *Organizations) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *organizations.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Organizations) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTargetsForPolicy = "ListTargetsForPolicy" + +// ListTargetsForPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListTargetsForPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTargetsForPolicy for more information on using the ListTargetsForPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTargetsForPolicyRequest method. +// req, resp := client.ListTargetsForPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy +func (c *Organizations) ListTargetsForPolicyRequest(input *ListTargetsForPolicyInput) (req *request.Request, output *ListTargetsForPolicyOutput) { + op := &request.Operation{ + Name: opListTargetsForPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTargetsForPolicyInput{} + } + + output = &ListTargetsForPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTargetsForPolicy API operation for AWS Organizations. +// +// Lists all the roots, organizational units (OUs), and accounts that the specified +// policy is attached to. +// +// Always check the NextToken response parameter for a null value when calling +// a List* operation. These operations can occasionally return an empty set +// of results even when there are more results available. The NextToken response +// parameter value is null only when there are no more results to display. +// +// This operation can be called only from the organization's master account +// or by a member account that is a delegated administrator for an AWS service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation ListTargetsForPolicy for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * AWSOrganizationsNotInUseException +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * InvalidInputException +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. +// +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// +// * PolicyNotFoundException +// We can't find a policy with the PolicyId that you specified. +// +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// * TooManyRequestsException +// You have sent too many requests in too short a period of time. The quota +// helps protect against denial-of-service attacks. Try again later. +// +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy +func (c *Organizations) ListTargetsForPolicy(input *ListTargetsForPolicyInput) (*ListTargetsForPolicyOutput, error) { + req, out := c.ListTargetsForPolicyRequest(input) + return out, req.Send() +} + +// ListTargetsForPolicyWithContext is the same as ListTargetsForPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See ListTargetsForPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) ListTargetsForPolicyWithContext(ctx aws.Context, input *ListTargetsForPolicyInput, opts ...request.Option) (*ListTargetsForPolicyOutput, error) { + req, out := c.ListTargetsForPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTargetsForPolicyPages iterates over the pages of a ListTargetsForPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTargetsForPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTargetsForPolicy operation. +// pageNum := 0 +// err := client.ListTargetsForPolicyPages(params, +// func(page *organizations.ListTargetsForPolicyOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Organizations) ListTargetsForPolicyPages(input *ListTargetsForPolicyInput, fn func(*ListTargetsForPolicyOutput, bool) bool) error { + return c.ListTargetsForPolicyPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTargetsForPolicyPagesWithContext same as ListTargetsForPolicyPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) ListTargetsForPolicyPagesWithContext(ctx aws.Context, input *ListTargetsForPolicyInput, fn func(*ListTargetsForPolicyOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTargetsForPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTargetsForPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTargetsForPolicyOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opMoveAccount = "MoveAccount" + +// MoveAccountRequest generates a "aws/request.Request" representing the +// client's request for the MoveAccount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See MoveAccount for more information on using the MoveAccount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the MoveAccountRequest method. +// req, resp := client.MoveAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/MoveAccount +func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *request.Request, output *MoveAccountOutput) { + op := &request.Operation{ + Name: opMoveAccount, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &MoveAccountInput{} + } + + output = &MoveAccountOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// MoveAccount API operation for AWS Organizations. +// +// Moves an account from its current source parent root or organizational unit +// (OU) to the specified destination parent root or OU. +// +// This operation can be called only from the organization's master account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation MoveAccount for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * InvalidInputException +// The requested operation failed because you provided invalid values for one +// or more of the request parameters. This exception includes a reason that +// contains additional information about the violated limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. +// +// * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and +// can't be modified. +// +// * INPUT_REQUIRED: You must include a value for all required parameters. +// +// * INVALID_ENUM: You specified an invalid value. +// +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. +// +// * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid +// characters. +// +// * INVALID_LIST_MEMBER: You provided a list to a parameter that contains +// at least one invalid value. +// +// * INVALID_PAGINATION_TOKEN: Get the value for the NextToken parameter +// from the response to a previous call of the operation. +// +// * INVALID_PARTY_TYPE_TARGET: You specified the wrong type of entity (account, +// organization, or email) as a party. +// +// * INVALID_PATTERN: You provided a value that doesn't match the required +// pattern. +// +// * INVALID_PATTERN_TARGET_ID: You specified a policy target ID that doesn't +// match the required pattern. +// +// * INVALID_ROLE_NAME: You provided a role name that isn't valid. A role +// name can't begin with the reserved prefix AWSServiceRoleFor. +// +// * INVALID_SYNTAX_ORGANIZATION_ARN: You specified an invalid Amazon Resource +// Name (ARN) for the organization. +// +// * INVALID_SYNTAX_POLICY_ID: You specified an invalid policy ID. +// +// * INVALID_SYSTEM_TAGS_PARAMETER: You specified a tag key that is a system +// tag. You can’t add, edit, or delete system tag keys because they're +// reserved for AWS use. System tags don’t count against your tags per +// resource limit. +// +// * MAX_FILTER_LIMIT_EXCEEDED: You can specify only one filter parameter +// for the operation. +// +// * MAX_LENGTH_EXCEEDED: You provided a string parameter that is longer +// than allowed. +// +// * MAX_VALUE_EXCEEDED: You provided a numeric parameter that has a larger +// value than allowed. +// +// * MIN_LENGTH_EXCEEDED: You provided a string parameter that is shorter +// than allowed. +// +// * MIN_VALUE_EXCEEDED: You provided a numeric parameter that has a smaller +// value than allowed. +// +// * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only +// between entities in the same root. +// +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// +// * SourceParentNotFoundException +// We can't find a source root or OU with the ParentId that you specified. +// +// * DestinationParentNotFoundException +// We can't find the destination container (a root or OU) with the ParentId +// that you specified. +// +// * DuplicateAccountException +// That account is already present in the specified destination. +// +// * AccountNotFoundException +// We can't find an AWS account with the AccountId that you specified, or the +// account whose credentials you used to make this request isn't a member of +// an organization. +// +// * TooManyRequestsException +// You have sent too many requests in too short a period of time. The quota +// helps protect against denial-of-service attacks. Try again later. +// +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. +// +// * ConcurrentModificationException +// The target of the operation is currently being modified by a different request. +// Try again later. +// +// * AWSOrganizationsNotInUseException +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ServiceException +// AWS Organizations can't complete your request because of an internal service +// error. Try again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/MoveAccount +func (c *Organizations) MoveAccount(input *MoveAccountInput) (*MoveAccountOutput, error) { + req, out := c.MoveAccountRequest(input) + return out, req.Send() +} + +// MoveAccountWithContext is the same as MoveAccount with the addition of +// the ability to pass a context and additional request options. +// +// See MoveAccount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Organizations) MoveAccountWithContext(ctx aws.Context, input *MoveAccountInput, opts ...request.Option) (*MoveAccountOutput, error) { + req, out := c.MoveAccountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterDelegatedAdministrator = "RegisterDelegatedAdministrator" + +// RegisterDelegatedAdministratorRequest generates a "aws/request.Request" representing the +// client's request for the RegisterDelegatedAdministrator operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterDelegatedAdministrator for more information on using the RegisterDelegatedAdministrator +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterDelegatedAdministratorRequest method. +// req, resp := client.RegisterDelegatedAdministratorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RegisterDelegatedAdministrator +func (c *Organizations) RegisterDelegatedAdministratorRequest(input *RegisterDelegatedAdministratorInput) (req *request.Request, output *RegisterDelegatedAdministratorOutput) { + op := &request.Operation{ + Name: opRegisterDelegatedAdministrator, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RegisterDelegatedAdministratorInput{} + } + + output = &RegisterDelegatedAdministratorOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RegisterDelegatedAdministrator API operation for AWS Organizations. +// +// Enables the specified member account to administer the Organizations features +// of the specified AWS service. It grants read-only access to AWS Organizations +// service data. The account still requires IAM permissions to access and administer +// the AWS service. +// +// You can run this action only for AWS services that support this feature. +// For a current list of services that support it, see the column Supports Delegated +// Administrator in the table at AWS Services that you can use with AWS Organizations +// (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_integrated-services-list.html) +// in the AWS Organizations User Guide. +// +// This operation can be called only from the organization's master account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Organizations's +// API operation RegisterDelegatedAdministrator for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have permissions to perform the requested operation. The user or +// role that is making the request must have at least one IAM permissions policy +// attached that grants the required permissions. For more information, see +// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// * AccountAlreadyRegisteredException +// The specified account is already a delegated administrator for this AWS service. +// +// * AccountNotFoundException +// We can't find an AWS account with the AccountId that you specified, or the +// account whose credentials you used to make this request isn't a member of +// an organization. +// +// * AWSOrganizationsNotInUseException +// Your account isn't a member of an organization. To make this request, you +// must use the credentials of an account that belongs to an organization. +// +// * ConcurrentModificationException +// The target of the operation is currently being modified by a different request. +// Try again later. +// +// * ConstraintViolationException +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: +// +// Some of the reasons in the following list might not be applicable to this +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account +// from the organization that doesn't yet have enough information to exist +// as a standalone account. This account requires you to first agree to the +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. +// +// * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove +// an account from the organization that doesn't yet have enough information +// to exist as a standalone account. This account requires you to first complete +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) +// in the AWS Organizations User Guide. +// +// * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number +// of accounts that you can create in one day. +// +// * ACCOUNT_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the limit on +// the number of accounts in an organization. If you need more accounts, +// contact AWS Support (https://console.aws.amazon.com/support/home#/) to +// request an increase in your limit. Or the number of invitations that you +// tried to send would cause you to exceed the limit of accounts in your +// organization. Send fewer invitations or contact AWS Support to request +// an increase in the number of accounts. Deleted and closed accounts still +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact +// AWS Support (https://console.aws.amazon.com/support/home#/). +// +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// +// * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of +// handshakes that you can send in one day. +// +// * MASTER_ACCOUNT_ADDRESS_DOES_NOT_MATCH_MARKETPLACE: To create an account +// in this organization, you first must migrate the organization's master +// account to the marketplace that corresponds to the master account's address. +// For example, accounts with India addresses must be associated with the +// AISPL marketplace. All accounts in an organization must be associated +// with the same marketplace. +// +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// +// * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you +// must first provide a valid contact address and phone number for the master +// account. Then try the operation again. +// +// * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the +// master account must have an associated account in the AWS GovCloud (US-West) +// Region. For more information, see AWS Organizations (http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-organizations.html) +// in the AWS GovCloud User Guide. +// +// * MASTER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To create an organization +// with this master account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// +// * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the +// number of policies of a certain type that can be attached to an entity +// at one time. +// +// * MAX_TAG_LIMIT_EXCEEDED: You have exceeded the number of tags allowed +// on this resource. +// +// * MEMBER_ACCOUNT_PAYMENT_INSTRUMENT_REQUIRED: To complete this operation +// with this member account, you first must associate a valid payment instrument, +// such as a credit card, with the account. Follow the steps at To leave +// an organization when all required account information has not yet been +// provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. // -// Moves an account from its current source parent root or organizational unit -// (OU) to the specified destination parent root or OU. +// * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // -// This operation can be called only from the organization's master account. +// * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation +// that requires the organization to be configured to support all features. +// An organization that supports only consolidated billing features can't +// perform this operation. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. // -// See the AWS API reference guide for AWS Organizations's -// API operation MoveAccount for usage and error information. +// * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs +// that you can have in an organization. // -// Returned Error Types: -// * AccessDeniedException -// You don't have permissions to perform the requested operation. The user or -// role that is making the request must have at least one IAM permissions policy -// attached that grants the required permissions. For more information, see -// Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) -// in the IAM User Guide. +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// +// * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of +// policies that you can have in an organization. +// +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -10095,7 +12640,10 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -10104,7 +12652,7 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -10155,58 +12703,44 @@ func (c *Organizations) MoveAccountRequest(input *MoveAccountInput) (req *reques // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // -// * SourceParentNotFoundException -// We can't find a source root or OU with the ParentId that you specified. -// -// * DestinationParentNotFoundException -// We can't find the destination container (a root or OU) with the ParentId -// that you specified. -// -// * DuplicateAccountException -// That account is already present in the specified destination. +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. // -// * AccountNotFoundException -// We can't find an AWS account with the AccountId that you specified. Or the -// account whose credentials you used to make this request isn't a member of -// an organization. +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. -// -// * ConcurrentModificationException -// The target of the operation is currently being modified by a different request. -// Try again later. -// -// * AWSOrganizationsNotInUseException -// Your account isn't a member of an organization. To make this request, you -// must use the credentials of an account that belongs to an organization. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/MoveAccount -func (c *Organizations) MoveAccount(input *MoveAccountInput) (*MoveAccountOutput, error) { - req, out := c.MoveAccountRequest(input) +// * UnsupportedAPIEndpointException +// This action isn't available in the current AWS Region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RegisterDelegatedAdministrator +func (c *Organizations) RegisterDelegatedAdministrator(input *RegisterDelegatedAdministratorInput) (*RegisterDelegatedAdministratorOutput, error) { + req, out := c.RegisterDelegatedAdministratorRequest(input) return out, req.Send() } -// MoveAccountWithContext is the same as MoveAccount with the addition of +// RegisterDelegatedAdministratorWithContext is the same as RegisterDelegatedAdministrator with the addition of // the ability to pass a context and additional request options. // -// See MoveAccount for details on how to use this API operation. +// See RegisterDelegatedAdministrator for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Organizations) MoveAccountWithContext(ctx aws.Context, input *MoveAccountInput, opts ...request.Option) (*MoveAccountOutput, error) { - req, out := c.MoveAccountRequest(input) +func (c *Organizations) RegisterDelegatedAdministratorWithContext(ctx aws.Context, input *RegisterDelegatedAdministratorInput, opts ...request.Option) (*RegisterDelegatedAdministratorOutput, error) { + req, out := c.RegisterDelegatedAdministratorRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -10268,20 +12802,23 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // This operation can be called only from the organization's master account. // Member accounts can remove themselves with LeaveOrganization instead. // -// You can remove an account from your organization only if the account is configured -// with the information required to operate as a standalone account. When you -// create an account in an organization using the AWS Organizations console, -// API, or CLI, the information required of standalone accounts is not automatically -// collected. For an account that you want to make standalone, you must accept -// the end user license agreement (EULA). You must also choose a support plan, -// provide and verify the required contact information, and provide a current -// payment method. AWS uses the payment method to charge for any billable (not -// free tier) AWS activity that occurs while the account isn't attached to an -// organization. To remove an account that doesn't yet have this information, -// you must sign in as the member account. Then follow the steps at To leave -// an organization when all required account information has not yet been provided -// (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// * You can remove an account from your organization only if the account +// is configured with the information required to operate as a standalone +// account. When you create an account in an organization using the AWS Organizations +// console, API, or CLI commands, the information required of standalone +// accounts is not automatically collected. For an account that you want +// to make standalone, you must choose a support plan, provide and verify +// the required contact information, and provide a current payment method. +// AWS uses the payment method to charge for any billable (not free tier) +// AWS activity that occurs while the account isn't attached to an organization. +// To remove an account that doesn't yet have this information, you must +// sign in as the member account and follow the steps at To leave an organization +// when all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// in the AWS Organizations User Guide. +// +// * After the account leaves the organization, all tags that were attached +// to the account object in the organization are deleted. AWS accounts outside +// of an organization do not support tags. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10299,7 +12836,7 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // in the IAM User Guide. // // * AccountNotFoundException -// We can't find an AWS account with the AccountId that you specified. Or the +// We can't find an AWS account with the AccountId that you specified, or the // account whose credentials you used to make this request isn't a member of // an organization. // @@ -10312,27 +12849,32 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -10345,11 +12887,33 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -10360,8 +12924,12 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -10376,6 +12944,10 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -10391,27 +12963,29 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -10419,7 +12993,10 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -10428,7 +13005,7 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -10479,6 +13056,12 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * MasterCannotLeaveOrganizationException // You can't remove a master account from an organization. If you want the master // account to become a member account in another organization, you must first @@ -10489,12 +13072,12 @@ func (c *Organizations) RemoveAccountFromOrganizationRequest(input *RemoveAccoun // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/RemoveAccountFromOrganization func (c *Organizations) RemoveAccountFromOrganization(input *RemoveAccountFromOrganizationInput) (*RemoveAccountFromOrganizationOutput, error) { @@ -10565,7 +13148,15 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // // Adds one or more tags to the specified resource. // -// Currently, you can tag and untag accounts in AWS Organizations. +// Currently, you can attach tags to the following resources in AWS Organizations. +// +// * AWS account +// +// * Organization root +// +// * Organizational unit (OU) +// +// * Policy (any type) // // This operation can be called only from the organization's master account. // @@ -10593,30 +13184,35 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // must use the credentials of an account that belongs to an organization. // // * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. +// We can't find a root, OU, account, or policy with the TargetId that you specified. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -10629,11 +13225,33 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -10644,8 +13262,12 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -10660,6 +13282,10 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -10675,27 +13301,29 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -10703,7 +13331,10 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -10712,7 +13343,7 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -10763,17 +13394,23 @@ func (c *Organizations) TagResourceRequest(input *TagResourceInput) (req *reques // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/TagResource func (c *Organizations) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -10842,9 +13479,17 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // UntagResource API operation for AWS Organizations. // -// Removes a tag from the specified resource. +// Removes any tags with the specified keys from the specified resource. +// +// You can attach tags to the following resources in AWS Organizations. +// +// * AWS account +// +// * Organization root // -// Currently, you can tag and untag accounts in AWS Organizations. +// * Organizational unit (OU) +// +// * Policy (any type) // // This operation can be called only from the organization's master account. // @@ -10872,30 +13517,35 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // must use the credentials of an account that belongs to an organization. // // * TargetNotFoundException -// We can't find a root, OU, or account with the TargetId that you specified. +// We can't find a root, OU, account, or policy with the TargetId that you specified. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -10908,11 +13558,33 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -10923,8 +13595,12 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -10939,6 +13615,10 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -10954,27 +13634,29 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * InvalidInputException // The requested operation failed because you provided invalid values for one @@ -10982,7 +13664,10 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -10991,7 +13676,7 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -11042,17 +13727,23 @@ func (c *Organizations) UntagResourceRequest(input *UntagResourceInput) (req *re // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * ServiceException // AWS Organizations can't complete your request because of an internal service // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/UntagResource func (c *Organizations) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -11158,7 +13849,10 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -11167,7 +13861,7 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -11218,6 +13912,12 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * OrganizationalUnitNotFoundException // We can't find an OU with the OrganizationalUnitId that you specified. // @@ -11226,12 +13926,12 @@ func (c *Organizations) UpdateOrganizationalUnitRequest(input *UpdateOrganizatio // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/UpdateOrganizationalUnit func (c *Organizations) UpdateOrganizationalUnit(input *UpdateOrganizationalUnitInput) (*UpdateOrganizationalUnitOutput, error) { @@ -11329,27 +14029,32 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // Try again later. // // * ConstraintViolationException -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -11362,11 +14067,33 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -11377,8 +14104,12 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -11393,6 +14124,10 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -11408,27 +14143,29 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. // // * DuplicatePolicyException // A policy with the same name already exists. @@ -11439,7 +14176,10 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -11448,7 +14188,7 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -11499,6 +14239,12 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. // +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. +// // * MalformedPolicyDocumentException // The provided policy document doesn't meet the requirements of the specified // policy type. For example, the syntax might be incorrect. For details about @@ -11513,15 +14259,15 @@ func (c *Organizations) UpdatePolicyRequest(input *UpdatePolicyInput) (req *requ // error. Try again later. // // * TooManyRequestsException -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. // // * UnsupportedAPIEndpointException -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. // // * PolicyChangesInProgressException // Changes to the effective policy are in progress, and its contents can't be @@ -11552,8 +14298,8 @@ func (c *Organizations) UpdatePolicyWithContext(ctx aws.Context, input *UpdatePo // Your account isn't a member of an organization. To make this request, you // must use the credentials of an account that belongs to an organization. type AWSOrganizationsNotInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -11570,17 +14316,17 @@ func (s AWSOrganizationsNotInUseException) GoString() string { func newErrorAWSOrganizationsNotInUseException(v protocol.ResponseMetadata) error { return &AWSOrganizationsNotInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AWSOrganizationsNotInUseException) Code() string { +func (s *AWSOrganizationsNotInUseException) Code() string { return "AWSOrganizationsNotInUseException" } // Message returns the exception's message. -func (s AWSOrganizationsNotInUseException) Message() string { +func (s *AWSOrganizationsNotInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11588,22 +14334,22 @@ func (s AWSOrganizationsNotInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AWSOrganizationsNotInUseException) OrigErr() error { +func (s *AWSOrganizationsNotInUseException) OrigErr() error { return nil } -func (s AWSOrganizationsNotInUseException) Error() string { +func (s *AWSOrganizationsNotInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AWSOrganizationsNotInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AWSOrganizationsNotInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AWSOrganizationsNotInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *AWSOrganizationsNotInUseException) RequestID() string { + return s.RespMetadata.RequestID } type AcceptHandshakeInput struct { @@ -11676,8 +14422,8 @@ func (s *AcceptHandshakeOutput) SetHandshake(v *Handshake) *AcceptHandshakeOutpu // Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -11694,17 +14440,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11712,30 +14458,30 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The operation that you attempted requires you to have the iam:CreateServiceLinkedRole // for organizations.amazonaws.com permission so that AWS Organizations can // create the required service-linked role. You don't have that permission. type AccessDeniedForDependencyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -11754,17 +14500,17 @@ func (s AccessDeniedForDependencyException) GoString() string { func newErrorAccessDeniedForDependencyException(v protocol.ResponseMetadata) error { return &AccessDeniedForDependencyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedForDependencyException) Code() string { +func (s *AccessDeniedForDependencyException) Code() string { return "AccessDeniedForDependencyException" } // Message returns the exception's message. -func (s AccessDeniedForDependencyException) Message() string { +func (s *AccessDeniedForDependencyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11772,22 +14518,22 @@ func (s AccessDeniedForDependencyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedForDependencyException) OrigErr() error { +func (s *AccessDeniedForDependencyException) OrigErr() error { return nil } -func (s AccessDeniedForDependencyException) Error() string { +func (s *AccessDeniedForDependencyException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedForDependencyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedForDependencyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedForDependencyException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedForDependencyException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about an AWS account that is a member of an organization. @@ -11882,39 +14628,151 @@ func (s *Account) SetStatus(v string) *Account { return s } -// We can't find an AWS account with the AccountId that you specified. Or the +// The specified account is already a delegated administrator for this AWS service. +type AccountAlreadyRegisteredException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccountAlreadyRegisteredException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountAlreadyRegisteredException) GoString() string { + return s.String() +} + +func newErrorAccountAlreadyRegisteredException(v protocol.ResponseMetadata) error { + return &AccountAlreadyRegisteredException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccountAlreadyRegisteredException) Code() string { + return "AccountAlreadyRegisteredException" +} + +// Message returns the exception's message. +func (s *AccountAlreadyRegisteredException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccountAlreadyRegisteredException) OrigErr() error { + return nil +} + +func (s *AccountAlreadyRegisteredException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccountAlreadyRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccountAlreadyRegisteredException) RequestID() string { + return s.RespMetadata.RequestID +} + +// We can't find an AWS account with the AccountId that you specified, or the // account whose credentials you used to make this request isn't a member of // an organization. type AccountNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccountNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountNotFoundException) GoString() string { + return s.String() +} + +func newErrorAccountNotFoundException(v protocol.ResponseMetadata) error { + return &AccountNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccountNotFoundException) Code() string { + return "AccountNotFoundException" +} + +// Message returns the exception's message. +func (s *AccountNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccountNotFoundException) OrigErr() error { + return nil +} + +func (s *AccountNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccountNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccountNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The specified account is not a delegated administrator for this AWS service. +type AccountNotRegisteredException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } // String returns the string representation -func (s AccountNotFoundException) String() string { +func (s AccountNotRegisteredException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AccountNotFoundException) GoString() string { +func (s AccountNotRegisteredException) GoString() string { return s.String() } -func newErrorAccountNotFoundException(v protocol.ResponseMetadata) error { - return &AccountNotFoundException{ - respMetadata: v, +func newErrorAccountNotRegisteredException(v protocol.ResponseMetadata) error { + return &AccountNotRegisteredException{ + RespMetadata: v, } } // Code returns the exception type name. -func (s AccountNotFoundException) Code() string { - return "AccountNotFoundException" +func (s *AccountNotRegisteredException) Code() string { + return "AccountNotRegisteredException" } // Message returns the exception's message. -func (s AccountNotFoundException) Message() string { +func (s *AccountNotRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11922,22 +14780,22 @@ func (s AccountNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccountNotFoundException) OrigErr() error { +func (s *AccountNotRegisteredException) OrigErr() error { return nil } -func (s AccountNotFoundException) Error() string { +func (s *AccountNotRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccountNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccountNotRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccountNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccountNotRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // You can't invite an existing account to your organization until you verify @@ -11945,8 +14803,8 @@ func (s AccountNotFoundException) RequestID() string { // information, see Email Address Verification (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_create.html#about-email-verification) // in the AWS Organizations User Guide. type AccountOwnerNotVerifiedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -11963,17 +14821,17 @@ func (s AccountOwnerNotVerifiedException) GoString() string { func newErrorAccountOwnerNotVerifiedException(v protocol.ResponseMetadata) error { return &AccountOwnerNotVerifiedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccountOwnerNotVerifiedException) Code() string { +func (s *AccountOwnerNotVerifiedException) Code() string { return "AccountOwnerNotVerifiedException" } // Message returns the exception's message. -func (s AccountOwnerNotVerifiedException) Message() string { +func (s *AccountOwnerNotVerifiedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11981,29 +14839,29 @@ func (s AccountOwnerNotVerifiedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccountOwnerNotVerifiedException) OrigErr() error { +func (s *AccountOwnerNotVerifiedException) OrigErr() error { return nil } -func (s AccountOwnerNotVerifiedException) Error() string { +func (s *AccountOwnerNotVerifiedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccountOwnerNotVerifiedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccountOwnerNotVerifiedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccountOwnerNotVerifiedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccountOwnerNotVerifiedException) RequestID() string { + return s.RespMetadata.RequestID } // This account is already a member of an organization. An account can belong // to only one organization at a time. type AlreadyInOrganizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12020,17 +14878,17 @@ func (s AlreadyInOrganizationException) GoString() string { func newErrorAlreadyInOrganizationException(v protocol.ResponseMetadata) error { return &AlreadyInOrganizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyInOrganizationException) Code() string { +func (s *AlreadyInOrganizationException) Code() string { return "AlreadyInOrganizationException" } // Message returns the exception's message. -func (s AlreadyInOrganizationException) Message() string { +func (s *AlreadyInOrganizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12038,22 +14896,22 @@ func (s AlreadyInOrganizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyInOrganizationException) OrigErr() error { +func (s *AlreadyInOrganizationException) OrigErr() error { return nil } -func (s AlreadyInOrganizationException) Error() string { +func (s *AlreadyInOrganizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyInOrganizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyInOrganizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyInOrganizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyInOrganizationException) RequestID() string { + return s.RespMetadata.RequestID } type AttachPolicyInput struct { @@ -12253,8 +15111,8 @@ func (s *Child) SetType(v string) *Child { // We can't find an organizational unit (OU) or AWS account with the ChildId // that you specified. type ChildNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12271,17 +15129,17 @@ func (s ChildNotFoundException) GoString() string { func newErrorChildNotFoundException(v protocol.ResponseMetadata) error { return &ChildNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ChildNotFoundException) Code() string { +func (s *ChildNotFoundException) Code() string { return "ChildNotFoundException" } // Message returns the exception's message. -func (s ChildNotFoundException) Message() string { +func (s *ChildNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12289,29 +15147,29 @@ func (s ChildNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ChildNotFoundException) OrigErr() error { +func (s *ChildNotFoundException) OrigErr() error { return nil } -func (s ChildNotFoundException) Error() string { +func (s *ChildNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ChildNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ChildNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ChildNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ChildNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The target of the operation is currently being modified by a different request. // Try again later. type ConcurrentModificationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12328,17 +15186,17 @@ func (s ConcurrentModificationException) GoString() string { func newErrorConcurrentModificationException(v protocol.ResponseMetadata) error { return &ConcurrentModificationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConcurrentModificationException) Code() string { +func (s *ConcurrentModificationException) Code() string { return "ConcurrentModificationException" } // Message returns the exception's message. -func (s ConcurrentModificationException) Message() string { +func (s *ConcurrentModificationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12346,45 +15204,50 @@ func (s ConcurrentModificationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentModificationException) OrigErr() error { +func (s *ConcurrentModificationException) OrigErr() error { return nil } -func (s ConcurrentModificationException) Error() string { +func (s *ConcurrentModificationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConcurrentModificationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConcurrentModificationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConcurrentModificationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConcurrentModificationException) RequestID() string { + return s.RespMetadata.RequestID } -// Performing this operation violates a minimum or maximum value limit. Examples -// include attempting to remove the last service control policy (SCP) from an -// OU or root, or attaching too many policies to an account, OU, or root. This -// exception includes a reason that contains additional information about the -// violated limit. +// Performing this operation violates a minimum or maximum value limit. For +// example, attempting to remove the last service control policy (SCP) from +// an OU or root, inviting or creating too many accounts to the organization, +// or attaching too many policies to an account, OU, or root. This exception +// includes a reason that contains additional information about the violated +// limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master +// account from the organization. You can't remove the master account. Instead, +// after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the -// AWS Customer Agreement. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) -// in the AWS Organizations User Guide. +// AWS Customer Agreement. Follow the steps at Removing a member account +// from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in +// the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete -// phone verification. Follow the steps at To leave an organization when -// all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) +// phone verification. Follow the steps at Removing a member account from +// your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -12397,11 +15260,33 @@ func (s ConcurrentModificationException) RequestID() string { // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still -// count toward your limit. If you get receive this exception when running -// a command immediately after creating the organization, wait one hour and -// try again. If after an hour it continues to fail with this error, contact +// count toward your limit. If you get this exception when running a command +// immediately after creating the organization, wait one hour and try again. +// After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // +// * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to +// register the master account of the organization as a delegated administrator +// for an AWS service integrated with Organizations. You can designate only +// a member account as a delegated administrator. +// +// * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove +// an account that is registered as a delegated administrator for a service +// integrated with your organization. To complete this operation, you must +// first deregister this account as a delegated administrator. +// +// * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an +// organization in the specified region, you must enable all features mode. +// +// * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register +// an AWS account as a delegated administrator for an AWS service that already +// has a delegated administrator. To complete this operation, you must first +// deregister any existing delegated administrators for this service. +// +// * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only +// valid for a limited period of time. You must resubmit the request and +// generate a new verfication code. +// // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -12412,8 +15297,12 @@ func (s ConcurrentModificationException) RequestID() string { // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // +// * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions +// in China. To create an organization, the master must have an valid business +// license. For more information, contact customer support. +// // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you -// must first provide contact a valid address and phone number for the master +// must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -12428,6 +15317,10 @@ func (s ConcurrentModificationException) RequestID() string { // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // +// * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted +// to register more delegated administrators than allowed for the service +// principal. +// // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -12443,30 +15336,32 @@ func (s ConcurrentModificationException) RequestID() string { // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a -// policy from an entity, which would cause the entity to have fewer than -// the minimum number of policies of the required type. -// -// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is -// too many levels deep. +// policy from an entity that would cause the entity to have fewer than the +// minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // +// * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is +// too many levels deep. +// // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // +// * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that +// is larger than the maximum size. +// // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // -// * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant -// with the tag policy that’s in effect for the account. For more information, -// see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) -// in the AWS Organizations User Guide. +// * TAG_POLICY_VIOLATION: You attempted to create or update a resource with +// tags that are not compliant with the tag policy requirements for this +// account. type ConstraintViolationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -12485,17 +15380,17 @@ func (s ConstraintViolationException) GoString() string { func newErrorConstraintViolationException(v protocol.ResponseMetadata) error { return &ConstraintViolationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConstraintViolationException) Code() string { +func (s *ConstraintViolationException) Code() string { return "ConstraintViolationException" } // Message returns the exception's message. -func (s ConstraintViolationException) Message() string { +func (s *ConstraintViolationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12503,22 +15398,22 @@ func (s ConstraintViolationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConstraintViolationException) OrigErr() error { +func (s *ConstraintViolationException) OrigErr() error { return nil } -func (s ConstraintViolationException) Error() string { +func (s *ConstraintViolationException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ConstraintViolationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConstraintViolationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConstraintViolationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConstraintViolationException) RequestID() string { + return s.RespMetadata.RequestID } type CreateAccountInput struct { @@ -12545,9 +15440,9 @@ type CreateAccountInput struct { // Console (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/grantaccess.html#ControllingAccessWebsite-Activate) // in the AWS Billing and Cost Management User Guide. // - // If you don't specify this parameter, the value defaults to ALLOW. This value - // allows IAM users and roles with the required permissions to access billing - // information for the new account. + // If you don't specify this parameter, the value defaults to ALLOW, and IAM + // users and roles with the required permissions can access billing information + // for the new account. IamUserAccessToBilling *string `type:"string" enum:"IAMUserAccessToBilling"` // (Optional) @@ -12561,16 +15456,31 @@ type CreateAccountInput struct { // If you don't specify this parameter, the role name defaults to OrganizationAccountAccessRole. // // For more information about how to use this role to access the member account, - // see Accessing and Administering the Member Accounts in Your Organization - // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role) - // in the AWS Organizations User Guide. Also see steps 2 and 3 in Tutorial: - // Delegate Access Across AWS Accounts Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html) - // in the IAM User Guide. + // see the following links: + // + // * Accessing and Administering the Member Accounts in Your Organization + // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role) + // in the AWS Organizations User Guide + // + // * Steps 2 and 3 in Tutorial: Delegate Access Across AWS Accounts Using + // IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html) + // in the IAM User Guide // // The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate // this parameter. The pattern can include uppercase letters, lowercase letters, // digits with no spaces, and any of the following characters: =,.@- RoleName *string `type:"string"` + + // A list of tags that you want to attach to the newly created account. For + // each tag in the list, you must specify both a tag key and a value. You can + // set the value to an empty string, but you can't set it to null. For more + // information about tagging, see Tagging AWS Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) + // in the AWS Organizations User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags for an account, then the entire request fails and the account is not + // created. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -12598,6 +15508,16 @@ func (s *CreateAccountInput) Validate() error { if s.Email != nil && len(*s.Email) < 6 { invalidParams.Add(request.NewErrParamMinLen("Email", 6)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12629,6 +15549,12 @@ func (s *CreateAccountInput) SetRoleName(v string) *CreateAccountInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateAccountInput) SetTags(v []*Tag) *CreateAccountInput { + s.Tags = v + return s +} + type CreateAccountOutput struct { _ struct{} `type:"structure"` @@ -12682,6 +15608,9 @@ type CreateAccountStatus struct { // * ACCOUNT_LIMIT_EXCEEDED: The account could not be created because you // have reached the limit on the number of accounts in your organization. // + // * CONCURRENT_ACCOUNT_MODIFICATION: You already submitted a request with + // the same information. + // // * EMAIL_ALREADY_EXISTS: The account could not be created because another // AWS account with that email address already exists. // @@ -12696,7 +15625,13 @@ type CreateAccountStatus struct { // you provided is not valid. // // * INTERNAL_FAILURE: The account could not be created because of an internal - // failure. Try again later. If the problem persists, contact AWS Support. + // failure. Try again later. If the problem persists, contact Customer Support. + // + // * MISSING_BUSINESS_VALIDATION: The AWS account that owns your organization + // has not received Business Validation. + // + // * MISSING_PAYMENT_INSTRUMENT: You must configure the master account with + // a valid payment method, such as a credit card. FailureReason *string `type:"string" enum:"CreateAccountFailureReason"` // If the account was created successfully, the unique identifier (ID) of the @@ -12776,11 +15711,11 @@ func (s *CreateAccountStatus) SetState(v string) *CreateAccountStatus { return s } -// We can't find a create account request with the CreateAccountRequestId that +// We can't find an create account request with the CreateAccountRequestId that // you specified. type CreateAccountStatusNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12797,17 +15732,17 @@ func (s CreateAccountStatusNotFoundException) GoString() string { func newErrorCreateAccountStatusNotFoundException(v protocol.ResponseMetadata) error { return &CreateAccountStatusNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CreateAccountStatusNotFoundException) Code() string { +func (s *CreateAccountStatusNotFoundException) Code() string { return "CreateAccountStatusNotFoundException" } // Message returns the exception's message. -func (s CreateAccountStatusNotFoundException) Message() string { +func (s *CreateAccountStatusNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12815,22 +15750,22 @@ func (s CreateAccountStatusNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CreateAccountStatusNotFoundException) OrigErr() error { +func (s *CreateAccountStatusNotFoundException) OrigErr() error { return nil } -func (s CreateAccountStatusNotFoundException) Error() string { +func (s *CreateAccountStatusNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CreateAccountStatusNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CreateAccountStatusNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CreateAccountStatusNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *CreateAccountStatusNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type CreateGovCloudAccountInput struct { @@ -12847,8 +15782,8 @@ type CreateGovCloudAccountInput struct { // creation. You can't access the root user of the account or remove an account // that was created with an invalid email address. Like all request parameters // for CreateGovCloudAccount, the request for the email address for the AWS - // GovCloud (US) account originates from the commercial Region. It does not - // come from the AWS GovCloud (US) Region. + // GovCloud (US) account originates from the commercial Region, not from the + // AWS GovCloud (US) Region. // // Email is a required field Email *string `min:"6" type:"string" required:"true" sensitive:"true"` @@ -12878,14 +15813,30 @@ type CreateGovCloudAccountInput struct { // For more information about how to use this role to access the member account, // see Accessing and Administering the Member Accounts in Your Organization // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_access.html#orgs_manage_accounts_create-cross-account-role) - // in the AWS Organizations User Guide. See also steps 2 and 3 in Tutorial: - // Delegate Access Across AWS Accounts Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html) + // in the AWS Organizations User Guide and steps 2 and 3 in Tutorial: Delegate + // Access Across AWS Accounts Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_cross-account-with-roles.html) // in the IAM User Guide. // // The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate // this parameter. The pattern can include uppercase letters, lowercase letters, // digits with no spaces, and any of the following characters: =,.@- RoleName *string `type:"string"` + + // A list of tags that you want to attach to the newly created account. These + // tags are attached to the commercial account associated with the GovCloud + // account, and not to the GovCloud account itself. To add tags to the actual + // GovCloud account, call the TagResource operation in the GovCloud region after + // the new GovCloud account exists. + // + // For each tag in the list, you must specify both a tag key and a value. You + // can set the value to an empty string, but you can't set it to null. For more + // information about tagging, see Tagging AWS Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) + // in the AWS Organizations User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags for an account, then the entire request fails and the account is not + // created. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -12913,6 +15864,16 @@ func (s *CreateGovCloudAccountInput) Validate() error { if s.Email != nil && len(*s.Email) < 6 { invalidParams.Add(request.NewErrParamMinLen("Email", 6)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -12944,6 +15905,12 @@ func (s *CreateGovCloudAccountInput) SetRoleName(v string) *CreateGovCloudAccoun return s } +// SetTags sets the Tags field's value. +func (s *CreateGovCloudAccountInput) SetTags(v []*Tag) *CreateGovCloudAccountInput { + s.Tags = v + return s +} + type CreateGovCloudAccountOutput struct { _ struct{} `type:"structure"` @@ -12980,8 +15947,8 @@ type CreateOrganizationInput struct { // in the AWS Organizations User Guide. The consolidated billing feature // subset isn't available for organizations in the AWS GovCloud (US) Region. // - // * ALL: In addition to all the features that consolidated billing feature - // set supports, the master account can also apply any policy type to any + // * ALL: In addition to all the features supported by the consolidated billing + // feature set, the master account can also apply any policy type to any // member account in the organization. For more information, see All features // (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_getting-started_concepts.html#feature-set-all) // in the AWS Organizations User Guide. @@ -13051,6 +16018,16 @@ type CreateOrganizationalUnitInput struct { // // ParentId is a required field ParentId *string `type:"string" required:"true"` + + // A list of tags that you want to attach to the newly created OU. For each + // tag in the list, you must specify both a tag key and a value. You can set + // the value to an empty string, but you can't set it to null. For more information + // about tagging, see Tagging AWS Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) + // in the AWS Organizations User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags for an OU, then the entire request fails and the OU is not created. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -13075,6 +16052,16 @@ func (s *CreateOrganizationalUnitInput) Validate() error { if s.ParentId == nil { invalidParams.Add(request.NewErrParamRequired("ParentId")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -13094,6 +16081,12 @@ func (s *CreateOrganizationalUnitInput) SetParentId(v string) *CreateOrganizatio return s } +// SetTags sets the Tags field's value. +func (s *CreateOrganizationalUnitInput) SetTags(v []*Tag) *CreateOrganizationalUnitInput { + s.Tags = v + return s +} + type CreateOrganizationalUnitOutput struct { _ struct{} `type:"structure"` @@ -13120,13 +16113,8 @@ func (s *CreateOrganizationalUnitOutput) SetOrganizationalUnit(v *Organizational type CreatePolicyInput struct { _ struct{} `type:"structure"` - // The policy content to add to the new policy. For example, you could create - // a service control policy (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) - // (SCP) that specifies the permissions that administrators in attached accounts - // can delegate to their users, groups, and roles. The string for this SCP must - // be JSON text. For more information about the SCP syntax, see Service Control - // Policy Syntax (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_scp-syntax.html) - // in the AWS Organizations User Guide. + // The policy text content to add to the new policy. The text that you supply + // must adhere to the rules of the policy type you specify in the Type parameter. // // Content is a required field Content *string `min:"1" type:"string" required:"true"` @@ -13145,7 +16133,25 @@ type CreatePolicyInput struct { // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The type of policy to create. + // A list of tags that you want to attach to the newly created policy. For each + // tag in the list, you must specify both a tag key and a value. You can set + // the value to an empty string, but you can't set it to null. For more information + // about tagging, see Tagging AWS Organizations resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) + // in the AWS Organizations User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags for a policy, then the entire request fails and the policy is not created. + Tags []*Tag `type:"list"` + + // The type of policy to create. You can specify one of the following values: + // + // * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) + // + // * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) + // + // * SERVICE_CONTROL_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) + // + // * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // Type is a required field Type *string `type:"string" required:"true" enum:"PolicyType"` @@ -13182,6 +16188,16 @@ func (s *CreatePolicyInput) Validate() error { if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -13207,6 +16223,12 @@ func (s *CreatePolicyInput) SetName(v string) *CreatePolicyInput { return s } +// SetTags sets the Tags field's value. +func (s *CreatePolicyInput) SetTags(v []*Tag) *CreatePolicyInput { + s.Tags = v + return s +} + // SetType sets the Type field's value. func (s *CreatePolicyInput) SetType(v string) *CreatePolicyInput { s.Type = &v @@ -13302,6 +16324,130 @@ func (s *DeclineHandshakeOutput) SetHandshake(v *Handshake) *DeclineHandshakeOut return s } +// Contains information about the delegated administrator. +type DelegatedAdministrator struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the delegated administrator's account. + Arn *string `type:"string"` + + // The date when the account was made a delegated administrator. + DelegationEnabledDate *time.Time `type:"timestamp"` + + // The email address that is associated with the delegated administrator's AWS + // account. + Email *string `min:"6" type:"string" sensitive:"true"` + + // The unique identifier (ID) of the delegated administrator's account. + Id *string `type:"string"` + + // The method by which the delegated administrator's account joined the organization. + JoinedMethod *string `type:"string" enum:"AccountJoinedMethod"` + + // The date when the delegated administrator's account became a part of the + // organization. + JoinedTimestamp *time.Time `type:"timestamp"` + + // The friendly name of the delegated administrator's account. + Name *string `min:"1" type:"string" sensitive:"true"` + + // The status of the delegated administrator's account in the organization. + Status *string `type:"string" enum:"AccountStatus"` +} + +// String returns the string representation +func (s DelegatedAdministrator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegatedAdministrator) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DelegatedAdministrator) SetArn(v string) *DelegatedAdministrator { + s.Arn = &v + return s +} + +// SetDelegationEnabledDate sets the DelegationEnabledDate field's value. +func (s *DelegatedAdministrator) SetDelegationEnabledDate(v time.Time) *DelegatedAdministrator { + s.DelegationEnabledDate = &v + return s +} + +// SetEmail sets the Email field's value. +func (s *DelegatedAdministrator) SetEmail(v string) *DelegatedAdministrator { + s.Email = &v + return s +} + +// SetId sets the Id field's value. +func (s *DelegatedAdministrator) SetId(v string) *DelegatedAdministrator { + s.Id = &v + return s +} + +// SetJoinedMethod sets the JoinedMethod field's value. +func (s *DelegatedAdministrator) SetJoinedMethod(v string) *DelegatedAdministrator { + s.JoinedMethod = &v + return s +} + +// SetJoinedTimestamp sets the JoinedTimestamp field's value. +func (s *DelegatedAdministrator) SetJoinedTimestamp(v time.Time) *DelegatedAdministrator { + s.JoinedTimestamp = &v + return s +} + +// SetName sets the Name field's value. +func (s *DelegatedAdministrator) SetName(v string) *DelegatedAdministrator { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DelegatedAdministrator) SetStatus(v string) *DelegatedAdministrator { + s.Status = &v + return s +} + +// Contains information about the AWS service for which the account is a delegated +// administrator. +type DelegatedService struct { + _ struct{} `type:"structure"` + + // The date that the account became a delegated administrator for this service. + DelegationEnabledDate *time.Time `type:"timestamp"` + + // The name of a service that can request an operation for the specified service. + // This is typically in the form of a URL, such as: servicename.amazonaws.com. + ServicePrincipal *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DelegatedService) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DelegatedService) GoString() string { + return s.String() +} + +// SetDelegationEnabledDate sets the DelegationEnabledDate field's value. +func (s *DelegatedService) SetDelegationEnabledDate(v time.Time) *DelegatedService { + s.DelegationEnabledDate = &v + return s +} + +// SetServicePrincipal sets the ServicePrincipal field's value. +func (s *DelegatedService) SetServicePrincipal(v string) *DelegatedService { + s.ServicePrincipal = &v + return s +} + type DeleteOrganizationInput struct { _ struct{} `type:"structure"` } @@ -13403,20 +16549,90 @@ type DeletePolicyInput struct { } // String returns the string representation -func (s DeletePolicyInput) String() string { +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} + if s.PolicyId == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyId sets the PolicyId field's value. +func (s *DeletePolicyInput) SetPolicyId(v string) *DeletePolicyInput { + s.PolicyId = &v + return s +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeregisterDelegatedAdministratorInput struct { + _ struct{} `type:"structure"` + + // The account ID number of the member account in the organization that you + // want to deregister as a delegated administrator. + // + // AccountId is a required field + AccountId *string `type:"string" required:"true"` + + // The service principal name of an AWS service for which the account is a delegated + // administrator. + // + // Delegated administrator privileges are revoked for only the specified AWS + // service from the member account. If the specified service is the only service + // for which the member account is a delegated administrator, the operation + // also revokes Organizations read action permissions. + // + // ServicePrincipal is a required field + ServicePrincipal *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeregisterDelegatedAdministratorInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePolicyInput) GoString() string { +func (s DeregisterDelegatedAdministratorInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} - if s.PolicyId == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyId")) +func (s *DeregisterDelegatedAdministratorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterDelegatedAdministratorInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.ServicePrincipal == nil { + invalidParams.Add(request.NewErrParamRequired("ServicePrincipal")) + } + if s.ServicePrincipal != nil && len(*s.ServicePrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServicePrincipal", 1)) } if invalidParams.Len() > 0 { @@ -13425,23 +16641,29 @@ func (s *DeletePolicyInput) Validate() error { return nil } -// SetPolicyId sets the PolicyId field's value. -func (s *DeletePolicyInput) SetPolicyId(v string) *DeletePolicyInput { - s.PolicyId = &v +// SetAccountId sets the AccountId field's value. +func (s *DeregisterDelegatedAdministratorInput) SetAccountId(v string) *DeregisterDelegatedAdministratorInput { + s.AccountId = &v return s } -type DeletePolicyOutput struct { +// SetServicePrincipal sets the ServicePrincipal field's value. +func (s *DeregisterDelegatedAdministratorInput) SetServicePrincipal(v string) *DeregisterDelegatedAdministratorInput { + s.ServicePrincipal = &v + return s +} + +type DeregisterDelegatedAdministratorOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeletePolicyOutput) String() string { +func (s DeregisterDelegatedAdministratorOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePolicyOutput) GoString() string { +func (s DeregisterDelegatedAdministratorOutput) GoString() string { return s.String() } @@ -13580,14 +16802,21 @@ func (s *DescribeCreateAccountStatusOutput) SetCreateAccountStatus(v *CreateAcco type DescribeEffectivePolicyInput struct { _ struct{} `type:"structure"` - // The type of policy that you want information about. + // The type of policy that you want information about. You can specify one of + // the following values: + // + // * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) + // + // * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) + // + // * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // PolicyType is a required field PolicyType *string `type:"string" required:"true" enum:"EffectivePolicyType"` // When you're signed in as the master account, specify the ID of the account - // that you want details about. Specifying an organization root or OU as the - // target is not supported. + // that you want details about. Specifying an organization root or organizational + // unit (OU) as the target is not supported. TargetId *string `type:"string"` } @@ -13733,6 +16962,11 @@ type DescribeOrganizationOutput struct { _ struct{} `type:"structure"` // A structure that contains information about the organization. + // + // The AvailablePolicyTypes part of the response is deprecated, and you shouldn't + // use it in your apps. It doesn't include any policy type supported by Organizations + // other than SCPs. To determine which policy types are enabled in your organization, + // use the ListRoots operation. Organization *Organization `type:"structure"` } @@ -13888,8 +17122,8 @@ func (s *DescribePolicyOutput) SetPolicy(v *Policy) *DescribePolicyOutput { // We can't find the destination container (a root or OU) with the ParentId // that you specified. type DestinationParentNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -13906,17 +17140,17 @@ func (s DestinationParentNotFoundException) GoString() string { func newErrorDestinationParentNotFoundException(v protocol.ResponseMetadata) error { return &DestinationParentNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DestinationParentNotFoundException) Code() string { +func (s *DestinationParentNotFoundException) Code() string { return "DestinationParentNotFoundException" } // Message returns the exception's message. -func (s DestinationParentNotFoundException) Message() string { +func (s *DestinationParentNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13924,22 +17158,22 @@ func (s DestinationParentNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DestinationParentNotFoundException) OrigErr() error { +func (s *DestinationParentNotFoundException) OrigErr() error { return nil } -func (s DestinationParentNotFoundException) Error() string { +func (s *DestinationParentNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DestinationParentNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DestinationParentNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DestinationParentNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *DestinationParentNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type DetachPolicyInput struct { @@ -14088,7 +17322,16 @@ func (s DisableAWSServiceAccessOutput) GoString() string { type DisablePolicyTypeInput struct { _ struct{} `type:"structure"` - // The policy type that you want to disable in this root. + // The policy type that you want to disable in this root. You can specify one + // of the following values: + // + // * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) + // + // * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) + // + // * SERVICE_CONTROL_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) + // + // * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // PolicyType is a required field PolicyType *string `type:"string" required:"true" enum:"PolicyType"` @@ -14166,8 +17409,8 @@ func (s *DisablePolicyTypeOutput) SetRoot(v *Root) *DisablePolicyTypeOutput { // That account is already present in the specified destination. type DuplicateAccountException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14184,17 +17427,17 @@ func (s DuplicateAccountException) GoString() string { func newErrorDuplicateAccountException(v protocol.ResponseMetadata) error { return &DuplicateAccountException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateAccountException) Code() string { +func (s *DuplicateAccountException) Code() string { return "DuplicateAccountException" } // Message returns the exception's message. -func (s DuplicateAccountException) Message() string { +func (s *DuplicateAccountException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14202,22 +17445,22 @@ func (s DuplicateAccountException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateAccountException) OrigErr() error { +func (s *DuplicateAccountException) OrigErr() error { return nil } -func (s DuplicateAccountException) Error() string { +func (s *DuplicateAccountException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateAccountException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateAccountException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateAccountException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateAccountException) RequestID() string { + return s.RespMetadata.RequestID } // A handshake with the same action and target already exists. For example, @@ -14226,8 +17469,8 @@ func (s DuplicateAccountException) RequestID() string { // to resend an invitation to an account, ensure that existing handshakes that // might be considered duplicates are canceled or declined. type DuplicateHandshakeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14244,17 +17487,17 @@ func (s DuplicateHandshakeException) GoString() string { func newErrorDuplicateHandshakeException(v protocol.ResponseMetadata) error { return &DuplicateHandshakeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateHandshakeException) Code() string { +func (s *DuplicateHandshakeException) Code() string { return "DuplicateHandshakeException" } // Message returns the exception's message. -func (s DuplicateHandshakeException) Message() string { +func (s *DuplicateHandshakeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14262,28 +17505,28 @@ func (s DuplicateHandshakeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateHandshakeException) OrigErr() error { +func (s *DuplicateHandshakeException) OrigErr() error { return nil } -func (s DuplicateHandshakeException) Error() string { +func (s *DuplicateHandshakeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateHandshakeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateHandshakeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateHandshakeException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateHandshakeException) RequestID() string { + return s.RespMetadata.RequestID } // An OU with the same name already exists. type DuplicateOrganizationalUnitException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14300,17 +17543,17 @@ func (s DuplicateOrganizationalUnitException) GoString() string { func newErrorDuplicateOrganizationalUnitException(v protocol.ResponseMetadata) error { return &DuplicateOrganizationalUnitException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateOrganizationalUnitException) Code() string { +func (s *DuplicateOrganizationalUnitException) Code() string { return "DuplicateOrganizationalUnitException" } // Message returns the exception's message. -func (s DuplicateOrganizationalUnitException) Message() string { +func (s *DuplicateOrganizationalUnitException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14318,28 +17561,28 @@ func (s DuplicateOrganizationalUnitException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateOrganizationalUnitException) OrigErr() error { +func (s *DuplicateOrganizationalUnitException) OrigErr() error { return nil } -func (s DuplicateOrganizationalUnitException) Error() string { +func (s *DuplicateOrganizationalUnitException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateOrganizationalUnitException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateOrganizationalUnitException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateOrganizationalUnitException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateOrganizationalUnitException) RequestID() string { + return s.RespMetadata.RequestID } // The selected policy is already attached to the specified target. type DuplicatePolicyAttachmentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14356,17 +17599,17 @@ func (s DuplicatePolicyAttachmentException) GoString() string { func newErrorDuplicatePolicyAttachmentException(v protocol.ResponseMetadata) error { return &DuplicatePolicyAttachmentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicatePolicyAttachmentException) Code() string { +func (s *DuplicatePolicyAttachmentException) Code() string { return "DuplicatePolicyAttachmentException" } // Message returns the exception's message. -func (s DuplicatePolicyAttachmentException) Message() string { +func (s *DuplicatePolicyAttachmentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14374,28 +17617,28 @@ func (s DuplicatePolicyAttachmentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicatePolicyAttachmentException) OrigErr() error { +func (s *DuplicatePolicyAttachmentException) OrigErr() error { return nil } -func (s DuplicatePolicyAttachmentException) Error() string { +func (s *DuplicatePolicyAttachmentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicatePolicyAttachmentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicatePolicyAttachmentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicatePolicyAttachmentException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicatePolicyAttachmentException) RequestID() string { + return s.RespMetadata.RequestID } // A policy with the same name already exists. type DuplicatePolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14412,17 +17655,17 @@ func (s DuplicatePolicyException) GoString() string { func newErrorDuplicatePolicyException(v protocol.ResponseMetadata) error { return &DuplicatePolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicatePolicyException) Code() string { +func (s *DuplicatePolicyException) Code() string { return "DuplicatePolicyException" } // Message returns the exception's message. -func (s DuplicatePolicyException) Message() string { +func (s *DuplicatePolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14430,22 +17673,22 @@ func (s DuplicatePolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicatePolicyException) OrigErr() error { +func (s *DuplicatePolicyException) OrigErr() error { return nil } -func (s DuplicatePolicyException) Error() string { +func (s *DuplicatePolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicatePolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicatePolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicatePolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicatePolicyException) RequestID() string { + return s.RespMetadata.RequestID } // Contains rules to be applied to the affected accounts. The effective policy @@ -14506,8 +17749,8 @@ func (s *EffectivePolicy) SetTargetId(v string) *EffectivePolicy { // policy of this type. Contact the administrator of your organization about // attaching a policy of this type to the account. type EffectivePolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14524,17 +17767,17 @@ func (s EffectivePolicyNotFoundException) GoString() string { func newErrorEffectivePolicyNotFoundException(v protocol.ResponseMetadata) error { return &EffectivePolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EffectivePolicyNotFoundException) Code() string { +func (s *EffectivePolicyNotFoundException) Code() string { return "EffectivePolicyNotFoundException" } // Message returns the exception's message. -func (s EffectivePolicyNotFoundException) Message() string { +func (s *EffectivePolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14542,22 +17785,22 @@ func (s EffectivePolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EffectivePolicyNotFoundException) OrigErr() error { +func (s *EffectivePolicyNotFoundException) OrigErr() error { return nil } -func (s EffectivePolicyNotFoundException) Error() string { +func (s *EffectivePolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EffectivePolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EffectivePolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EffectivePolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *EffectivePolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type EnableAWSServiceAccessInput struct { @@ -14658,7 +17901,16 @@ func (s *EnableAllFeaturesOutput) SetHandshake(v *Handshake) *EnableAllFeaturesO type EnablePolicyTypeInput struct { _ struct{} `type:"structure"` - // The policy type that you want to enable. + // The policy type that you want to enable. You can specify one of the following + // values: + // + // * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) + // + // * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) + // + // * SERVICE_CONTROL_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) + // + // * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // PolicyType is a required field PolicyType *string `type:"string" required:"true" enum:"PolicyType"` @@ -14734,8 +17986,8 @@ func (s *EnablePolicyTypeOutput) SetRoot(v *Root) *EnablePolicyTypeOutput { return s } -// A structure that contains details of a service principal that is enabled -// to integrate with AWS Organizations. +// A structure that contains details of a service principal that represents +// an AWS service that is enabled to integrate with AWS Organizations. type EnabledServicePrincipal struct { _ struct{} `type:"structure"` @@ -14775,8 +18027,8 @@ func (s *EnabledServicePrincipal) SetServicePrincipal(v string) *EnabledServiceP // If after one hour you continue to receive this error, contact AWS Support // (https://console.aws.amazon.com/support/home#/). type FinalizingOrganizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14793,17 +18045,17 @@ func (s FinalizingOrganizationException) GoString() string { func newErrorFinalizingOrganizationException(v protocol.ResponseMetadata) error { return &FinalizingOrganizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FinalizingOrganizationException) Code() string { +func (s *FinalizingOrganizationException) Code() string { return "FinalizingOrganizationException" } // Message returns the exception's message. -func (s FinalizingOrganizationException) Message() string { +func (s *FinalizingOrganizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14811,32 +18063,32 @@ func (s FinalizingOrganizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FinalizingOrganizationException) OrigErr() error { +func (s *FinalizingOrganizationException) OrigErr() error { return nil } -func (s FinalizingOrganizationException) Error() string { +func (s *FinalizingOrganizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FinalizingOrganizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FinalizingOrganizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FinalizingOrganizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *FinalizingOrganizationException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information that must be exchanged to securely establish a relationship -// between two accounts (an originator and a recipient). For example, assume -// that a master account (the originator) invites another account (the recipient) -// to join its organization. In that case, the two accounts exchange information -// as a series of handshake requests and responses. +// between two accounts (an originator and a recipient). For example, when a +// master account (the originator) invites another account (the recipient) to +// join its organization, the two accounts exchange information as a series +// of handshake requests and responses. // // Note: Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists -// for only 30 days after entering that state. After that, they are deleted. +// for only 30 days after entering that state After that they are deleted. type Handshake struct { _ struct{} `type:"structure"` @@ -14974,8 +18226,8 @@ func (s *Handshake) SetState(v string) *Handshake { // The specified handshake is already in the requested state. For example, you // can't accept a handshake that was already accepted. type HandshakeAlreadyInStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14992,17 +18244,17 @@ func (s HandshakeAlreadyInStateException) GoString() string { func newErrorHandshakeAlreadyInStateException(v protocol.ResponseMetadata) error { return &HandshakeAlreadyInStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s HandshakeAlreadyInStateException) Code() string { +func (s *HandshakeAlreadyInStateException) Code() string { return "HandshakeAlreadyInStateException" } // Message returns the exception's message. -func (s HandshakeAlreadyInStateException) Message() string { +func (s *HandshakeAlreadyInStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15010,22 +18262,22 @@ func (s HandshakeAlreadyInStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s HandshakeAlreadyInStateException) OrigErr() error { +func (s *HandshakeAlreadyInStateException) OrigErr() error { return nil } -func (s HandshakeAlreadyInStateException) Error() string { +func (s *HandshakeAlreadyInStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s HandshakeAlreadyInStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *HandshakeAlreadyInStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s HandshakeAlreadyInStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *HandshakeAlreadyInStateException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation would violate the constraint identified in the reason @@ -15067,8 +18319,8 @@ func (s HandshakeAlreadyInStateException) RequestID() string { // account that doesn't have a payment instrument, such as a credit card, // associated with it. type HandshakeConstraintViolationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -15087,17 +18339,17 @@ func (s HandshakeConstraintViolationException) GoString() string { func newErrorHandshakeConstraintViolationException(v protocol.ResponseMetadata) error { return &HandshakeConstraintViolationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s HandshakeConstraintViolationException) Code() string { +func (s *HandshakeConstraintViolationException) Code() string { return "HandshakeConstraintViolationException" } // Message returns the exception's message. -func (s HandshakeConstraintViolationException) Message() string { +func (s *HandshakeConstraintViolationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15105,22 +18357,22 @@ func (s HandshakeConstraintViolationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s HandshakeConstraintViolationException) OrigErr() error { +func (s *HandshakeConstraintViolationException) OrigErr() error { return nil } -func (s HandshakeConstraintViolationException) Error() string { +func (s *HandshakeConstraintViolationException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s HandshakeConstraintViolationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *HandshakeConstraintViolationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s HandshakeConstraintViolationException) RequestID() string { - return s.respMetadata.RequestID +func (s *HandshakeConstraintViolationException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the criteria that are used to select the handshakes for the operation. @@ -15166,8 +18418,8 @@ func (s *HandshakeFilter) SetParentHandshakeId(v string) *HandshakeFilter { // We can't find a handshake with the HandshakeId that you specified. type HandshakeNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -15184,17 +18436,17 @@ func (s HandshakeNotFoundException) GoString() string { func newErrorHandshakeNotFoundException(v protocol.ResponseMetadata) error { return &HandshakeNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s HandshakeNotFoundException) Code() string { +func (s *HandshakeNotFoundException) Code() string { return "HandshakeNotFoundException" } // Message returns the exception's message. -func (s HandshakeNotFoundException) Message() string { +func (s *HandshakeNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15202,22 +18454,22 @@ func (s HandshakeNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s HandshakeNotFoundException) OrigErr() error { +func (s *HandshakeNotFoundException) OrigErr() error { return nil } -func (s HandshakeNotFoundException) Error() string { +func (s *HandshakeNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s HandshakeNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *HandshakeNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s HandshakeNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *HandshakeNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Identifies a participant in a handshake. @@ -15343,8 +18595,8 @@ func (s *HandshakeResource) SetValue(v string) *HandshakeResource { // example, you can't cancel a handshake that was already accepted or accept // a handshake that was already declined. type InvalidHandshakeTransitionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -15361,17 +18613,17 @@ func (s InvalidHandshakeTransitionException) GoString() string { func newErrorInvalidHandshakeTransitionException(v protocol.ResponseMetadata) error { return &InvalidHandshakeTransitionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidHandshakeTransitionException) Code() string { +func (s *InvalidHandshakeTransitionException) Code() string { return "InvalidHandshakeTransitionException" } // Message returns the exception's message. -func (s InvalidHandshakeTransitionException) Message() string { +func (s *InvalidHandshakeTransitionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15379,22 +18631,22 @@ func (s InvalidHandshakeTransitionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidHandshakeTransitionException) OrigErr() error { +func (s *InvalidHandshakeTransitionException) OrigErr() error { return nil } -func (s InvalidHandshakeTransitionException) Error() string { +func (s *InvalidHandshakeTransitionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidHandshakeTransitionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidHandshakeTransitionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidHandshakeTransitionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidHandshakeTransitionException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation failed because you provided invalid values for one @@ -15402,7 +18654,10 @@ func (s InvalidHandshakeTransitionException) RequestID() string { // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this -// specific API or operation: +// specific API or operation. +// +// * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to +// the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -15411,7 +18666,7 @@ func (s InvalidHandshakeTransitionException) RequestID() string { // // * INVALID_ENUM: You specified an invalid value. // -// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. +// * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -15461,9 +18716,15 @@ func (s InvalidHandshakeTransitionException) RequestID() string { // // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. +// +// * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that +// target entity. +// +// * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that +// isn't recognized. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -15482,17 +18743,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15500,22 +18761,22 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } type InviteAccountToOrganizationInput struct { @@ -15525,6 +18786,26 @@ type InviteAccountToOrganizationInput struct { // the recipient account owner. Notes *string `type:"string" sensitive:"true"` + // A list of tags that you want to attach to the account when it becomes a member + // of the organization. For each tag in the list, you must specify both a tag + // key and a value. You can set the value to an empty string, but you can't + // set it to null. For more information about tagging, see Tagging AWS Organizations + // resources (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_tagging.html) + // in the AWS Organizations User Guide. + // + // Any tags in the request are checked for compliance with any applicable tag + // policies when the request is made. The request is rejected if the tags in + // the request don't match the requirements of the policy at that time. Tag + // policy compliance is not checked again when the invitation is accepted and + // the tags are actually attached to the account. That means that if the tag + // policy changes between the invitation and the acceptance, then that tags + // could potentially be non-compliant. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags for an account, then the entire request fails and invitations are not + // sent. + Tags []*Tag `type:"list"` + // The identifier (ID) of the AWS account that you want to invite to join your // organization. This is a JSON object that contains the following elements: // @@ -15561,6 +18842,16 @@ func (s *InviteAccountToOrganizationInput) Validate() error { if s.Target == nil { invalidParams.Add(request.NewErrParamRequired("Target")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if s.Target != nil { if err := s.Target.Validate(); err != nil { invalidParams.AddNested("Target", err.(request.ErrInvalidParams)) @@ -15579,6 +18870,12 @@ func (s *InviteAccountToOrganizationInput) SetNotes(v string) *InviteAccountToOr return s } +// SetTags sets the Tags field's value. +func (s *InviteAccountToOrganizationInput) SetTags(v []*Tag) *InviteAccountToOrganizationInput { + s.Tags = v + return s +} + // SetTarget sets the Target field's value. func (s *InviteAccountToOrganizationInput) SetTarget(v *HandshakeParty) *InviteAccountToOrganizationInput { s.Target = v @@ -15640,21 +18937,21 @@ func (s LeaveOrganizationOutput) GoString() string { type ListAWSServiceAccessForOrganizationInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -15701,11 +18998,10 @@ type ListAWSServiceAccessForOrganizationOutput struct { // and the date that it was enabled for integration with AWS Organizations. EnabledServicePrincipals []*EnabledServicePrincipal `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } @@ -15734,21 +19030,21 @@ func (s *ListAWSServiceAccessForOrganizationOutput) SetNextToken(v string) *List type ListAccountsForParentInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` // The unique identifier (ID) for the parent root or organization unit (OU) @@ -15808,11 +19104,10 @@ type ListAccountsForParentOutput struct { // A list of the accounts in the specified root or OU. Accounts []*Account `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } @@ -15841,21 +19136,21 @@ func (s *ListAccountsForParentOutput) SetNextToken(v string) *ListAccountsForPar type ListAccountsInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -15900,11 +19195,10 @@ type ListAccountsOutput struct { // A list of objects in the organization. Accounts []*Account `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } @@ -15938,62 +19232,278 @@ type ListChildrenInput struct { // ChildType is a required field ChildType *string `type:"string" required:"true" enum:"ChildType"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // The unique identifier (ID) for the parent root or OU whose children you want + // to list. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string + // requires one of the following: + // + // * Root - A string that begins with "r-" followed by from 4 to 32 lowercase + // letters or digits. + // + // * Organizational unit (OU) - A string that begins with "ou-" followed + // by from 4 to 32 lowercase letters or digits (the ID of the root that the + // OU is in). This string is followed by a second "-" dash and from 8 to + // 32 additional lowercase letters or digits. + // + // ParentId is a required field + ParentId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListChildrenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChildrenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListChildrenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListChildrenInput"} + if s.ChildType == nil { + invalidParams.Add(request.NewErrParamRequired("ChildType")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ParentId == nil { + invalidParams.Add(request.NewErrParamRequired("ParentId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetChildType sets the ChildType field's value. +func (s *ListChildrenInput) SetChildType(v string) *ListChildrenInput { + s.ChildType = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListChildrenInput) SetMaxResults(v int64) *ListChildrenInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListChildrenInput) SetNextToken(v string) *ListChildrenInput { + s.NextToken = &v + return s +} + +// SetParentId sets the ParentId field's value. +func (s *ListChildrenInput) SetParentId(v string) *ListChildrenInput { + s.ParentId = &v + return s +} + +type ListChildrenOutput struct { + _ struct{} `type:"structure"` + + // The list of children of the specified parent container. + Children []*Child `type:"list"` + + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListChildrenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListChildrenOutput) GoString() string { + return s.String() +} + +// SetChildren sets the Children field's value. +func (s *ListChildrenOutput) SetChildren(v []*Child) *ListChildrenOutput { + s.Children = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListChildrenOutput) SetNextToken(v string) *ListChildrenOutput { + s.NextToken = &v + return s +} + +type ListCreateAccountStatusInput struct { + _ struct{} `type:"structure"` + + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` + + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. + NextToken *string `type:"string"` + + // A list of one or more states that you want included in the response. If this + // parameter isn't present, all requests are included in the response. + States []*string `type:"list"` +} + +// String returns the string representation +func (s ListCreateAccountStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCreateAccountStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListCreateAccountStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListCreateAccountStatusInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListCreateAccountStatusInput) SetMaxResults(v int64) *ListCreateAccountStatusInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCreateAccountStatusInput) SetNextToken(v string) *ListCreateAccountStatusInput { + s.NextToken = &v + return s +} + +// SetStates sets the States field's value. +func (s *ListCreateAccountStatusInput) SetStates(v []*string) *ListCreateAccountStatusInput { + s.States = v + return s +} + +type ListCreateAccountStatusOutput struct { + _ struct{} `type:"structure"` + + // A list of objects with details about the requests. Certain elements, such + // as the accountId number, are present in the output only after the account + // has been successfully created. + CreateAccountStatuses []*CreateAccountStatus `type:"list"` + + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListCreateAccountStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListCreateAccountStatusOutput) GoString() string { + return s.String() +} + +// SetCreateAccountStatuses sets the CreateAccountStatuses field's value. +func (s *ListCreateAccountStatusOutput) SetCreateAccountStatuses(v []*CreateAccountStatus) *ListCreateAccountStatusOutput { + s.CreateAccountStatuses = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListCreateAccountStatusOutput) SetNextToken(v string) *ListCreateAccountStatusOutput { + s.NextToken = &v + return s +} + +type ListDelegatedAdministratorsInput struct { + _ struct{} `type:"structure"` + + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` - // The unique identifier (ID) for the parent root or OU whose children you want - // to list. - // - // The regex pattern (http://wikipedia.org/wiki/regex) for a parent ID string - // requires one of the following: - // - // * Root - A string that begins with "r-" followed by from 4 to 32 lowercase - // letters or digits. - // - // * Organizational unit (OU) - A string that begins with "ou-" followed - // by from 4 to 32 lowercase letters or digits (the ID of the root that the - // OU is in). This string is followed by a second "-" dash and from 8 to - // 32 additional lowercase letters or digits. + // Specifies a service principal name. If specified, then the operation lists + // the delegated administrators only for the specified service. // - // ParentId is a required field - ParentId *string `type:"string" required:"true"` + // If you don't specify a service principal, the operation lists all delegated + // administrators for all services in your organization. + ServicePrincipal *string `min:"1" type:"string"` } // String returns the string representation -func (s ListChildrenInput) String() string { +func (s ListDelegatedAdministratorsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListChildrenInput) GoString() string { +func (s ListDelegatedAdministratorsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListChildrenInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListChildrenInput"} - if s.ChildType == nil { - invalidParams.Add(request.NewErrParamRequired("ChildType")) - } +func (s *ListDelegatedAdministratorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDelegatedAdministratorsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.ParentId == nil { - invalidParams.Add(request.NewErrParamRequired("ParentId")) + if s.ServicePrincipal != nil && len(*s.ServicePrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServicePrincipal", 1)) } if invalidParams.Len() > 0 { @@ -16002,104 +19512,101 @@ func (s *ListChildrenInput) Validate() error { return nil } -// SetChildType sets the ChildType field's value. -func (s *ListChildrenInput) SetChildType(v string) *ListChildrenInput { - s.ChildType = &v - return s -} - // SetMaxResults sets the MaxResults field's value. -func (s *ListChildrenInput) SetMaxResults(v int64) *ListChildrenInput { +func (s *ListDelegatedAdministratorsInput) SetMaxResults(v int64) *ListDelegatedAdministratorsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListChildrenInput) SetNextToken(v string) *ListChildrenInput { +func (s *ListDelegatedAdministratorsInput) SetNextToken(v string) *ListDelegatedAdministratorsInput { s.NextToken = &v return s } -// SetParentId sets the ParentId field's value. -func (s *ListChildrenInput) SetParentId(v string) *ListChildrenInput { - s.ParentId = &v +// SetServicePrincipal sets the ServicePrincipal field's value. +func (s *ListDelegatedAdministratorsInput) SetServicePrincipal(v string) *ListDelegatedAdministratorsInput { + s.ServicePrincipal = &v return s } -type ListChildrenOutput struct { +type ListDelegatedAdministratorsOutput struct { _ struct{} `type:"structure"` - // The list of children of the specified parent container. - Children []*Child `type:"list"` + // The list of delegated administrators in your organization. + DelegatedAdministrators []*DelegatedAdministrator `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } // String returns the string representation -func (s ListChildrenOutput) String() string { +func (s ListDelegatedAdministratorsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListChildrenOutput) GoString() string { +func (s ListDelegatedAdministratorsOutput) GoString() string { return s.String() } -// SetChildren sets the Children field's value. -func (s *ListChildrenOutput) SetChildren(v []*Child) *ListChildrenOutput { - s.Children = v +// SetDelegatedAdministrators sets the DelegatedAdministrators field's value. +func (s *ListDelegatedAdministratorsOutput) SetDelegatedAdministrators(v []*DelegatedAdministrator) *ListDelegatedAdministratorsOutput { + s.DelegatedAdministrators = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListChildrenOutput) SetNextToken(v string) *ListChildrenOutput { +func (s *ListDelegatedAdministratorsOutput) SetNextToken(v string) *ListDelegatedAdministratorsOutput { s.NextToken = &v return s } -type ListCreateAccountStatusInput struct { +type ListDelegatedServicesForAccountInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The account ID number of a delegated administrator account in the organization. + // + // AccountId is a required field + AccountId *string `type:"string" required:"true"` + + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` - - // A list of one or more states that you want included in the response. If this - // parameter isn't present, all requests are included in the response. - States []*string `type:"list"` } // String returns the string representation -func (s ListCreateAccountStatusInput) String() string { +func (s ListDelegatedServicesForAccountInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCreateAccountStatusInput) GoString() string { +func (s ListDelegatedServicesForAccountInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListCreateAccountStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListCreateAccountStatusInput"} +func (s *ListDelegatedServicesForAccountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDelegatedServicesForAccountInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } @@ -16110,58 +19617,55 @@ func (s *ListCreateAccountStatusInput) Validate() error { return nil } +// SetAccountId sets the AccountId field's value. +func (s *ListDelegatedServicesForAccountInput) SetAccountId(v string) *ListDelegatedServicesForAccountInput { + s.AccountId = &v + return s +} + // SetMaxResults sets the MaxResults field's value. -func (s *ListCreateAccountStatusInput) SetMaxResults(v int64) *ListCreateAccountStatusInput { +func (s *ListDelegatedServicesForAccountInput) SetMaxResults(v int64) *ListDelegatedServicesForAccountInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListCreateAccountStatusInput) SetNextToken(v string) *ListCreateAccountStatusInput { +func (s *ListDelegatedServicesForAccountInput) SetNextToken(v string) *ListDelegatedServicesForAccountInput { s.NextToken = &v return s } -// SetStates sets the States field's value. -func (s *ListCreateAccountStatusInput) SetStates(v []*string) *ListCreateAccountStatusInput { - s.States = v - return s -} - -type ListCreateAccountStatusOutput struct { +type ListDelegatedServicesForAccountOutput struct { _ struct{} `type:"structure"` - // A list of objects with details about the requests. Certain elements, such - // as the accountId number, are present in the output only after the account - // has been successfully created. - CreateAccountStatuses []*CreateAccountStatus `type:"list"` + // The services for which the account is a delegated administrator. + DelegatedServices []*DelegatedService `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } // String returns the string representation -func (s ListCreateAccountStatusOutput) String() string { +func (s ListDelegatedServicesForAccountOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListCreateAccountStatusOutput) GoString() string { +func (s ListDelegatedServicesForAccountOutput) GoString() string { return s.String() } -// SetCreateAccountStatuses sets the CreateAccountStatuses field's value. -func (s *ListCreateAccountStatusOutput) SetCreateAccountStatuses(v []*CreateAccountStatus) *ListCreateAccountStatusOutput { - s.CreateAccountStatuses = v +// SetDelegatedServices sets the DelegatedServices field's value. +func (s *ListDelegatedServicesForAccountOutput) SetDelegatedServices(v []*DelegatedService) *ListDelegatedServicesForAccountOutput { + s.DelegatedServices = v return s } // SetNextToken sets the NextToken field's value. -func (s *ListCreateAccountStatusOutput) SetNextToken(v string) *ListCreateAccountStatusOutput { +func (s *ListDelegatedServicesForAccountOutput) SetNextToken(v string) *ListDelegatedServicesForAccountOutput { s.NextToken = &v return s } @@ -16172,26 +19676,26 @@ type ListHandshakesForAccountInput struct { // Filters the handshakes that you want included in the response. The default // is all types. Use the ActionType element to limit the output to only a specified // type, such as INVITE, ENABLE_ALL_FEATURES, or APPROVE_ALL_FEATURES. Alternatively, - // you can specify the ENABLE_ALL_FEATURES handshake, which generates a separate - // child handshake for each member account. When you do specify ParentHandshakeId - // to see only the handshakes that were generated by that parent request. + // for the ENABLE_ALL_FEATURES handshake that generates a separate child handshake + // for each member account, you can specify ParentHandshakeId to see only the + // handshakes that were generated by that parent request. Filter *HandshakeFilter `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -16243,11 +19747,10 @@ type ListHandshakesForAccountOutput struct { // is associated with the specified account. Handshakes []*Handshake `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } @@ -16279,26 +19782,26 @@ type ListHandshakesForOrganizationInput struct { // A filter of the handshakes that you want included in the response. The default // is all types. Use the ActionType element to limit the output to only a specified // type, such as INVITE, ENABLE-ALL-FEATURES, or APPROVE-ALL-FEATURES. Alternatively, - // you can specify the ENABLE-ALL-FEATURES handshake, which generates a separate - // child handshake for each member account. When you do, specify the ParentHandshakeId - // to see only the handshakes that were generated by that parent request. + // for the ENABLE-ALL-FEATURES handshake that generates a separate child handshake + // for each member account, you can specify the ParentHandshakeId to see only + // the handshakes that were generated by that parent request. Filter *HandshakeFilter `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -16350,11 +19853,10 @@ type ListHandshakesForOrganizationOutput struct { // are associated with an organization. Handshakes []*Handshake `type:"list"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } @@ -16383,21 +19885,21 @@ func (s *ListHandshakesForOrganizationOutput) SetNextToken(v string) *ListHandsh type ListOrganizationalUnitsForParentInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` // The unique identifier (ID) of the root or OU whose child OUs you want to @@ -16465,11 +19967,10 @@ func (s *ListOrganizationalUnitsForParentInput) SetParentId(v string) *ListOrgan type ListOrganizationalUnitsForParentOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of the OUs in the specified root or parent OU. @@ -16517,21 +20018,21 @@ type ListParentsInput struct { // ChildId is a required field ChildId *string `type:"string" required:"true"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -16582,11 +20083,10 @@ func (s *ListParentsInput) SetNextToken(v string) *ListParentsInput { type ListParentsOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of parents for the specified child account or OU. @@ -16618,26 +20118,35 @@ func (s *ListParentsOutput) SetParents(v []*Parent) *ListParentsOutput { type ListPoliciesForTargetInput struct { _ struct{} `type:"structure"` - // The type of policy that you want to include in the returned list. + // The type of policy that you want to include in the returned list. You must + // specify one of the following values: + // + // * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) + // + // * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) + // + // * SERVICE_CONTROL_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) + // + // * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // Filter is a required field Filter *string `type:"string" required:"true" enum:"PolicyType"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` // The unique identifier (ID) of the root, organizational unit, or account whose @@ -16716,11 +20225,10 @@ func (s *ListPoliciesForTargetInput) SetTargetId(v string) *ListPoliciesForTarge type ListPoliciesForTargetOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // The list of policies that match the criteria in the request. @@ -16752,26 +20260,35 @@ func (s *ListPoliciesForTargetOutput) SetPolicies(v []*PolicySummary) *ListPolic type ListPoliciesInput struct { _ struct{} `type:"structure"` - // Specifies the type of policy that you want to include in the response. + // Specifies the type of policy that you want to include in the response. You + // must specify one of the following values: + // + // * AISERVICES_OPT_OUT_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html) + // + // * BACKUP_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_backup.html) + // + // * SERVICE_CONTROL_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scp.html) + // + // * TAG_POLICY (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) // // Filter is a required field Filter *string `type:"string" required:"true" enum:"PolicyType"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -16822,11 +20339,10 @@ func (s *ListPoliciesInput) SetNextToken(v string) *ListPoliciesInput { type ListPoliciesOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of policies that match the filter criteria in the request. The output @@ -16860,21 +20376,21 @@ func (s *ListPoliciesOutput) SetPolicies(v []*PolicySummary) *ListPoliciesOutput type ListRootsInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` } @@ -16916,11 +20432,10 @@ func (s *ListRootsInput) SetNextToken(v string) *ListRootsInput { type ListRootsOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of roots that are defined in an organization. @@ -16952,13 +20467,26 @@ func (s *ListRootsOutput) SetRoots(v []*Root) *ListRootsOutput { type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` - // The ID of the resource that you want to retrieve tags for. + // The ID of the resource with the tags to list. + // + // You can specify any of the following taggable resources. + // + // * AWS account – specify the account ID number. + // + // * Organizational unit – specify the OU ID that begins with ou- and looks + // similar to: ou-1a2b-34uvwxyz + // + // * Root – specify the root ID that begins with r- and looks similar to: + // r-1a2b + // + // * Policy – specify the policy ID that begins with p- andlooks similar + // to: p-12abcdefg3 // // ResourceId is a required field ResourceId *string `type:"string" required:"true"` @@ -17002,11 +20530,10 @@ func (s *ListTagsForResourceInput) SetResourceId(v string) *ListTagsForResourceI type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // The tags that are assigned to the resource. @@ -17038,21 +20565,21 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput type ListTargetsForPolicyInput struct { _ struct{} `type:"structure"` - // (Optional) Use this to limit the number of results you want included per - // page in the response. If you do not include this parameter, it defaults to - // a value that is specific to the operation. If additional items exist beyond - // the maximum you specify, the NextToken response element is present and has - // a value (is not null). Include that value as the NextToken request parameter - // in the next call to the operation to get the next part of the results. Note - // that Organizations might return fewer results than the maximum even when - // there are more results available. You should check NextToken after every - // operation to ensure that you receive all of the results. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that Organizations + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // Use this parameter if you receive a NextToken response in a previous request - // that indicates that there is more output available. Set it to the value of - // the previous call's NextToken response to indicate where the output should - // continue from. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value of the previous call's + // NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` // The unique identifier (ID) of the policy whose attachments you want to know. @@ -17112,11 +20639,10 @@ func (s *ListTargetsForPolicyInput) SetPolicyId(v string) *ListTargetsForPolicyI type ListTargetsForPolicyOutput struct { _ struct{} `type:"structure"` - // If present, this value indicates that there is more output available than - // is included in the current response. Use this value in the NextToken request - // parameter in a subsequent call to the operation to get the next part of the - // output. You should repeat this until the NextToken response element comes - // back as null. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of structures, each of which contains details about one of the entities @@ -17151,8 +20677,8 @@ func (s *ListTargetsForPolicyOutput) SetTargets(v []*PolicyTargetSummary) *ListT // service control policy syntax, see Service Control Policy Syntax (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_scp-syntax.html) // in the AWS Organizations User Guide. type MalformedPolicyDocumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17169,17 +20695,17 @@ func (s MalformedPolicyDocumentException) GoString() string { func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { return &MalformedPolicyDocumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedPolicyDocumentException) Code() string { +func (s *MalformedPolicyDocumentException) Code() string { return "MalformedPolicyDocumentException" } // Message returns the exception's message. -func (s MalformedPolicyDocumentException) Message() string { +func (s *MalformedPolicyDocumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17187,30 +20713,30 @@ func (s MalformedPolicyDocumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedPolicyDocumentException) OrigErr() error { +func (s *MalformedPolicyDocumentException) OrigErr() error { return nil } -func (s MalformedPolicyDocumentException) Error() string { +func (s *MalformedPolicyDocumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedPolicyDocumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedPolicyDocumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedPolicyDocumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedPolicyDocumentException) RequestID() string { + return s.RespMetadata.RequestID } // You can't remove a master account from an organization. If you want the master // account to become a member account in another organization, you must first // delete the current organization of the master account. type MasterCannotLeaveOrganizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17227,17 +20753,17 @@ func (s MasterCannotLeaveOrganizationException) GoString() string { func newErrorMasterCannotLeaveOrganizationException(v protocol.ResponseMetadata) error { return &MasterCannotLeaveOrganizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MasterCannotLeaveOrganizationException) Code() string { +func (s *MasterCannotLeaveOrganizationException) Code() string { return "MasterCannotLeaveOrganizationException" } // Message returns the exception's message. -func (s MasterCannotLeaveOrganizationException) Message() string { +func (s *MasterCannotLeaveOrganizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17245,22 +20771,22 @@ func (s MasterCannotLeaveOrganizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MasterCannotLeaveOrganizationException) OrigErr() error { +func (s *MasterCannotLeaveOrganizationException) OrigErr() error { return nil } -func (s MasterCannotLeaveOrganizationException) Error() string { +func (s *MasterCannotLeaveOrganizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MasterCannotLeaveOrganizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MasterCannotLeaveOrganizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MasterCannotLeaveOrganizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *MasterCannotLeaveOrganizationException) RequestID() string { + return s.RespMetadata.RequestID } type MoveAccountInput struct { @@ -17373,7 +20899,7 @@ func (s MoveAccountOutput) GoString() string { // Contains details about an organization. An organization is a collection of // accounts that are centrally managed together using consolidated billing, // organized hierarchically with organizational units (OUs), and controlled -// with policies. +// with policies . type Organization struct { _ struct{} `type:"structure"` @@ -17384,14 +20910,12 @@ type Organization struct { // in the AWS Organizations User Guide. Arn *string `type:"string"` - // A list of policy types that are enabled for this organization. For example, - // if your organization has all features enabled, then service control policies - // (SCPs) are included in the list. // - // Even if a policy type is shown as available in the organization, you can - // separately enable and disable them at the root level by using EnablePolicyType - // and DisablePolicyType. Use ListRoots to see the status of a policy type in - // that root. + // Do not use. This field is deprecated and doesn't provide complete information + // about the policies in your organization. + // + // To determine the policies that are enabled and available for use in your + // organization, use the ListRoots operation instead. AvailablePolicyTypes []*PolicyTypeSummary `type:"list"` // Specifies the functionality that currently is available to the organization. @@ -17482,8 +21006,8 @@ func (s *Organization) SetMasterAccountId(v string) *Organization { // The organization isn't empty. To delete an organization, you must first remove // all accounts except the master account, delete all OUs, and delete all policies. type OrganizationNotEmptyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17500,17 +21024,17 @@ func (s OrganizationNotEmptyException) GoString() string { func newErrorOrganizationNotEmptyException(v protocol.ResponseMetadata) error { return &OrganizationNotEmptyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationNotEmptyException) Code() string { +func (s *OrganizationNotEmptyException) Code() string { return "OrganizationNotEmptyException" } // Message returns the exception's message. -func (s OrganizationNotEmptyException) Message() string { +func (s *OrganizationNotEmptyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17518,22 +21042,22 @@ func (s OrganizationNotEmptyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationNotEmptyException) OrigErr() error { +func (s *OrganizationNotEmptyException) OrigErr() error { return nil } -func (s OrganizationNotEmptyException) Error() string { +func (s *OrganizationNotEmptyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationNotEmptyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationNotEmptyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationNotEmptyException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationNotEmptyException) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an organizational unit (OU). An OU is a container @@ -17596,8 +21120,8 @@ func (s *OrganizationalUnit) SetName(v string) *OrganizationalUnit { // The specified OU is not empty. Move all accounts to another root or to other // OUs, remove all child OUs, and try the operation again. type OrganizationalUnitNotEmptyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17614,17 +21138,17 @@ func (s OrganizationalUnitNotEmptyException) GoString() string { func newErrorOrganizationalUnitNotEmptyException(v protocol.ResponseMetadata) error { return &OrganizationalUnitNotEmptyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationalUnitNotEmptyException) Code() string { +func (s *OrganizationalUnitNotEmptyException) Code() string { return "OrganizationalUnitNotEmptyException" } // Message returns the exception's message. -func (s OrganizationalUnitNotEmptyException) Message() string { +func (s *OrganizationalUnitNotEmptyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17632,28 +21156,28 @@ func (s OrganizationalUnitNotEmptyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationalUnitNotEmptyException) OrigErr() error { +func (s *OrganizationalUnitNotEmptyException) OrigErr() error { return nil } -func (s OrganizationalUnitNotEmptyException) Error() string { +func (s *OrganizationalUnitNotEmptyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationalUnitNotEmptyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationalUnitNotEmptyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationalUnitNotEmptyException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationalUnitNotEmptyException) RequestID() string { + return s.RespMetadata.RequestID } // We can't find an OU with the OrganizationalUnitId that you specified. type OrganizationalUnitNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17670,17 +21194,17 @@ func (s OrganizationalUnitNotFoundException) GoString() string { func newErrorOrganizationalUnitNotFoundException(v protocol.ResponseMetadata) error { return &OrganizationalUnitNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationalUnitNotFoundException) Code() string { +func (s *OrganizationalUnitNotFoundException) Code() string { return "OrganizationalUnitNotFoundException" } // Message returns the exception's message. -func (s OrganizationalUnitNotFoundException) Message() string { +func (s *OrganizationalUnitNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17688,22 +21212,22 @@ func (s OrganizationalUnitNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationalUnitNotFoundException) OrigErr() error { +func (s *OrganizationalUnitNotFoundException) OrigErr() error { return nil } -func (s OrganizationalUnitNotFoundException) Error() string { +func (s *OrganizationalUnitNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationalUnitNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationalUnitNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationalUnitNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationalUnitNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about either a root or an organizational unit (OU) that @@ -17753,8 +21277,8 @@ func (s *Parent) SetType(v string) *Parent { // We can't find a root or OU with the ParentId that you specified. type ParentNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17771,17 +21295,17 @@ func (s ParentNotFoundException) GoString() string { func newErrorParentNotFoundException(v protocol.ResponseMetadata) error { return &ParentNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParentNotFoundException) Code() string { +func (s *ParentNotFoundException) Code() string { return "ParentNotFoundException" } // Message returns the exception's message. -func (s ParentNotFoundException) Message() string { +func (s *ParentNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17789,22 +21313,22 @@ func (s ParentNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParentNotFoundException) OrigErr() error { +func (s *ParentNotFoundException) OrigErr() error { return nil } -func (s ParentNotFoundException) Error() string { +func (s *ParentNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParentNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParentNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParentNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ParentNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains rules to be applied to the affected accounts. Policies can be attached @@ -17845,8 +21369,8 @@ func (s *Policy) SetPolicySummary(v *PolicySummary) *Policy { // Changes to the effective policy are in progress, and its contents can't be // returned. Try the operation again later. type PolicyChangesInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17863,17 +21387,17 @@ func (s PolicyChangesInProgressException) GoString() string { func newErrorPolicyChangesInProgressException(v protocol.ResponseMetadata) error { return &PolicyChangesInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyChangesInProgressException) Code() string { +func (s *PolicyChangesInProgressException) Code() string { return "PolicyChangesInProgressException" } // Message returns the exception's message. -func (s PolicyChangesInProgressException) Message() string { +func (s *PolicyChangesInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17881,29 +21405,29 @@ func (s PolicyChangesInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyChangesInProgressException) OrigErr() error { +func (s *PolicyChangesInProgressException) OrigErr() error { return nil } -func (s PolicyChangesInProgressException) Error() string { +func (s *PolicyChangesInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyChangesInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyChangesInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyChangesInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyChangesInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // The policy is attached to one or more entities. You must detach it from all // roots, OUs, and accounts before performing this operation. type PolicyInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17920,17 +21444,17 @@ func (s PolicyInUseException) GoString() string { func newErrorPolicyInUseException(v protocol.ResponseMetadata) error { return &PolicyInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyInUseException) Code() string { +func (s *PolicyInUseException) Code() string { return "PolicyInUseException" } // Message returns the exception's message. -func (s PolicyInUseException) Message() string { +func (s *PolicyInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17938,28 +21462,28 @@ func (s PolicyInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyInUseException) OrigErr() error { +func (s *PolicyInUseException) OrigErr() error { return nil } -func (s PolicyInUseException) Error() string { +func (s *PolicyInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The policy isn't attached to the specified target in the specified root. type PolicyNotAttachedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17976,17 +21500,17 @@ func (s PolicyNotAttachedException) GoString() string { func newErrorPolicyNotAttachedException(v protocol.ResponseMetadata) error { return &PolicyNotAttachedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyNotAttachedException) Code() string { +func (s *PolicyNotAttachedException) Code() string { return "PolicyNotAttachedException" } // Message returns the exception's message. -func (s PolicyNotAttachedException) Message() string { +func (s *PolicyNotAttachedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17994,28 +21518,28 @@ func (s PolicyNotAttachedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyNotAttachedException) OrigErr() error { +func (s *PolicyNotAttachedException) OrigErr() error { return nil } -func (s PolicyNotAttachedException) Error() string { +func (s *PolicyNotAttachedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyNotAttachedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyNotAttachedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyNotAttachedException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyNotAttachedException) RequestID() string { + return s.RespMetadata.RequestID } // We can't find a policy with the PolicyId that you specified. type PolicyNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18032,17 +21556,17 @@ func (s PolicyNotFoundException) GoString() string { func newErrorPolicyNotFoundException(v protocol.ResponseMetadata) error { return &PolicyNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyNotFoundException) Code() string { +func (s *PolicyNotFoundException) Code() string { return "PolicyNotFoundException" } // Message returns the exception's message. -func (s PolicyNotFoundException) Message() string { +func (s *PolicyNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18050,22 +21574,22 @@ func (s PolicyNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyNotFoundException) OrigErr() error { +func (s *PolicyNotFoundException) OrigErr() error { return nil } -func (s PolicyNotFoundException) Error() string { +func (s *PolicyNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a policy, but does not include the content. To @@ -18080,7 +21604,7 @@ type PolicySummary struct { // in the AWS Organizations User Guide. Arn *string `type:"string"` - // A Boolean value that indicates whether the specified policy is an AWS managed + // A boolean value that indicates whether the specified policy is an AWS managed // policy. If true, then you can attach the policy to roots, OUs, or accounts, // but you cannot edit it. AwsManaged *bool `type:"boolean"` @@ -18226,8 +21750,8 @@ func (s *PolicyTargetSummary) SetType(v string) *PolicyTargetSummary { // The specified policy type is already enabled in the specified root. type PolicyTypeAlreadyEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18244,17 +21768,17 @@ func (s PolicyTypeAlreadyEnabledException) GoString() string { func newErrorPolicyTypeAlreadyEnabledException(v protocol.ResponseMetadata) error { return &PolicyTypeAlreadyEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyTypeAlreadyEnabledException) Code() string { +func (s *PolicyTypeAlreadyEnabledException) Code() string { return "PolicyTypeAlreadyEnabledException" } // Message returns the exception's message. -func (s PolicyTypeAlreadyEnabledException) Message() string { +func (s *PolicyTypeAlreadyEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18262,32 +21786,32 @@ func (s PolicyTypeAlreadyEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyTypeAlreadyEnabledException) OrigErr() error { +func (s *PolicyTypeAlreadyEnabledException) OrigErr() error { return nil } -func (s PolicyTypeAlreadyEnabledException) Error() string { +func (s *PolicyTypeAlreadyEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyTypeAlreadyEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyTypeAlreadyEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyTypeAlreadyEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyTypeAlreadyEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // You can't use the specified policy type with the feature set currently enabled // for this organization. For example, you can enable SCPs only after you enable -// all features in the organization. For more information, see Enabling and -// Disabling a Policy Type on a Root (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root) -// in the AWS Organizations User Guide. +// all features in the organization. For more information, see Managing AWS +// Organizations Policies (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root)in +// the AWS Organizations User Guide. type PolicyTypeNotAvailableForOrganizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18304,17 +21828,17 @@ func (s PolicyTypeNotAvailableForOrganizationException) GoString() string { func newErrorPolicyTypeNotAvailableForOrganizationException(v protocol.ResponseMetadata) error { return &PolicyTypeNotAvailableForOrganizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyTypeNotAvailableForOrganizationException) Code() string { +func (s *PolicyTypeNotAvailableForOrganizationException) Code() string { return "PolicyTypeNotAvailableForOrganizationException" } // Message returns the exception's message. -func (s PolicyTypeNotAvailableForOrganizationException) Message() string { +func (s *PolicyTypeNotAvailableForOrganizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18322,22 +21846,22 @@ func (s PolicyTypeNotAvailableForOrganizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyTypeNotAvailableForOrganizationException) OrigErr() error { +func (s *PolicyTypeNotAvailableForOrganizationException) OrigErr() error { return nil } -func (s PolicyTypeNotAvailableForOrganizationException) Error() string { +func (s *PolicyTypeNotAvailableForOrganizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyTypeNotAvailableForOrganizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyTypeNotAvailableForOrganizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyTypeNotAvailableForOrganizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyTypeNotAvailableForOrganizationException) RequestID() string { + return s.RespMetadata.RequestID } // The specified policy type isn't currently enabled in this root. You can't @@ -18346,8 +21870,8 @@ func (s PolicyTypeNotAvailableForOrganizationException) RequestID() string { // Your Organization (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_org_support-all-features.html) // in the AWS Organizations User Guide. type PolicyTypeNotEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18364,17 +21888,17 @@ func (s PolicyTypeNotEnabledException) GoString() string { func newErrorPolicyTypeNotEnabledException(v protocol.ResponseMetadata) error { return &PolicyTypeNotEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PolicyTypeNotEnabledException) Code() string { +func (s *PolicyTypeNotEnabledException) Code() string { return "PolicyTypeNotEnabledException" } // Message returns the exception's message. -func (s PolicyTypeNotEnabledException) Message() string { +func (s *PolicyTypeNotEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18382,22 +21906,22 @@ func (s PolicyTypeNotEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PolicyTypeNotEnabledException) OrigErr() error { +func (s *PolicyTypeNotEnabledException) OrigErr() error { return nil } -func (s PolicyTypeNotEnabledException) Error() string { +func (s *PolicyTypeNotEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PolicyTypeNotEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PolicyTypeNotEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PolicyTypeNotEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *PolicyTypeNotEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // Contains information about a policy type and its status in the associated @@ -18405,10 +21929,9 @@ func (s PolicyTypeNotEnabledException) RequestID() string { type PolicyTypeSummary struct { _ struct{} `type:"structure"` - // The status of the policy type as it relates to the associated root. You can - // attach a policy of the specified type to a root or to an OU or account in - // that root. To do so, the policy must be available in the organization and - // enabled for that root. + // The status of the policy type as it relates to the associated root. To attach + // a policy of the specified type to a root or to an OU or account in that root, + // it must be available in the organization and enabled for that root. Status *string `type:"string" enum:"PolicyTypeStatus"` // The name of the policy type. @@ -18437,6 +21960,77 @@ func (s *PolicyTypeSummary) SetType(v string) *PolicyTypeSummary { return s } +type RegisterDelegatedAdministratorInput struct { + _ struct{} `type:"structure"` + + // The account ID number of the member account in the organization to register + // as a delegated administrator. + // + // AccountId is a required field + AccountId *string `type:"string" required:"true"` + + // The service principal of the AWS service for which you want to make the member + // account a delegated administrator. + // + // ServicePrincipal is a required field + ServicePrincipal *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RegisterDelegatedAdministratorInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDelegatedAdministratorInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterDelegatedAdministratorInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterDelegatedAdministratorInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.ServicePrincipal == nil { + invalidParams.Add(request.NewErrParamRequired("ServicePrincipal")) + } + if s.ServicePrincipal != nil && len(*s.ServicePrincipal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServicePrincipal", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *RegisterDelegatedAdministratorInput) SetAccountId(v string) *RegisterDelegatedAdministratorInput { + s.AccountId = &v + return s +} + +// SetServicePrincipal sets the ServicePrincipal field's value. +func (s *RegisterDelegatedAdministratorInput) SetServicePrincipal(v string) *RegisterDelegatedAdministratorInput { + s.ServicePrincipal = &v + return s +} + +type RegisterDelegatedAdministratorOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterDelegatedAdministratorOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterDelegatedAdministratorOutput) GoString() string { + return s.String() +} + type RemoveAccountFromOrganizationInput struct { _ struct{} `type:"structure"` @@ -18567,8 +22161,8 @@ func (s *Root) SetPolicyTypes(v []*PolicyTypeSummary) *Root { // We can't find a root with the RootId that you specified. type RootNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18585,17 +22179,17 @@ func (s RootNotFoundException) GoString() string { func newErrorRootNotFoundException(v protocol.ResponseMetadata) error { return &RootNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RootNotFoundException) Code() string { +func (s *RootNotFoundException) Code() string { return "RootNotFoundException" } // Message returns the exception's message. -func (s RootNotFoundException) Message() string { +func (s *RootNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18603,29 +22197,29 @@ func (s RootNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RootNotFoundException) OrigErr() error { +func (s *RootNotFoundException) OrigErr() error { return nil } -func (s RootNotFoundException) Error() string { +func (s *RootNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RootNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RootNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RootNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *RootNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Organizations can't complete your request because of an internal service // error. Try again later. type ServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18642,17 +22236,17 @@ func (s ServiceException) GoString() string { func newErrorServiceException(v protocol.ResponseMetadata) error { return &ServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceException) Code() string { +func (s *ServiceException) Code() string { return "ServiceException" } // Message returns the exception's message. -func (s ServiceException) Message() string { +func (s *ServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18660,28 +22254,28 @@ func (s ServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceException) OrigErr() error { +func (s *ServiceException) OrigErr() error { return nil } -func (s ServiceException) Error() string { +func (s *ServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceException) RequestID() string { + return s.RespMetadata.RequestID } // We can't find a source root or OU with the ParentId that you specified. type SourceParentNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18698,17 +22292,17 @@ func (s SourceParentNotFoundException) GoString() string { func newErrorSourceParentNotFoundException(v protocol.ResponseMetadata) error { return &SourceParentNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SourceParentNotFoundException) Code() string { +func (s *SourceParentNotFoundException) Code() string { return "SourceParentNotFoundException" } // Message returns the exception's message. -func (s SourceParentNotFoundException) Message() string { +func (s *SourceParentNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18716,26 +22310,35 @@ func (s SourceParentNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SourceParentNotFoundException) OrigErr() error { +func (s *SourceParentNotFoundException) OrigErr() error { return nil } -func (s SourceParentNotFoundException) Error() string { +func (s *SourceParentNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SourceParentNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SourceParentNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SourceParentNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *SourceParentNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// A custom key-value pair associated with a resource such as an account within -// your organization. +// A custom key-value pair associated with a resource within your organization. +// +// You can attach tags to any of the following organization resources. +// +// * AWS account +// +// * Organizational unit (OU) +// +// * Organization root +// +// * Policy type Tag struct { _ struct{} `type:"structure"` @@ -18801,9 +22404,27 @@ type TagResourceInput struct { // ResourceId is a required field ResourceId *string `type:"string" required:"true"` - // The tag to add to the specified resource. Specifying the tag key is required. - // You can set the value of a tag to an empty string, but you can't set the - // value of a tag to null. + // A list of tags to add to the specified resource. + // + // You can specify any of the following taggable resources. + // + // * AWS account – specify the account ID number. + // + // * Organizational unit – specify the OU ID that begins with ou- and looks + // similar to: ou-1a2b-34uvwxyz + // + // * Root – specify the root ID that begins with r- and looks similar to: + // r-1a2b + // + // * Policy – specify the policy ID that begins with p- andlooks similar + // to: p-12abcdefg3 + // + // For each tag in the list, you must specify both a tag key and a value. You + // can set the value to an empty string, but you can't set it to null. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags for an account user, then the entire request fails and the account is + // not created. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -18871,10 +22492,10 @@ func (s TagResourceOutput) GoString() string { return s.String() } -// We can't find a root, OU, or account with the TargetId that you specified. +// We can't find a root, OU, account, or policy with the TargetId that you specified. type TargetNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -18891,17 +22512,17 @@ func (s TargetNotFoundException) GoString() string { func newErrorTargetNotFoundException(v protocol.ResponseMetadata) error { return &TargetNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TargetNotFoundException) Code() string { +func (s *TargetNotFoundException) Code() string { return "TargetNotFoundException" } // Message returns the exception's message. -func (s TargetNotFoundException) Message() string { +func (s *TargetNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18909,33 +22530,33 @@ func (s TargetNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TargetNotFoundException) OrigErr() error { +func (s *TargetNotFoundException) OrigErr() error { return nil } -func (s TargetNotFoundException) Error() string { +func (s *TargetNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TargetNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TargetNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TargetNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *TargetNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// You have sent too many requests in too short a period of time. The limit +// You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // -// For information on limits that affect AWS Organizations, see Limits of AWS -// Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) -// in the AWS Organizations User Guide. +// For information about quotas that affect AWS Organizations, see Quotas for +// AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in +// the AWS Organizations User Guide. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -18954,17 +22575,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18972,28 +22593,28 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } -// This action isn't available in the current Region. +// This action isn't available in the current AWS Region. type UnsupportedAPIEndpointException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -19010,17 +22631,17 @@ func (s UnsupportedAPIEndpointException) GoString() string { func newErrorUnsupportedAPIEndpointException(v protocol.ResponseMetadata) error { return &UnsupportedAPIEndpointException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedAPIEndpointException) Code() string { +func (s *UnsupportedAPIEndpointException) Code() string { return "UnsupportedAPIEndpointException" } // Message returns the exception's message. -func (s UnsupportedAPIEndpointException) Message() string { +func (s *UnsupportedAPIEndpointException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19028,33 +22649,46 @@ func (s UnsupportedAPIEndpointException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedAPIEndpointException) OrigErr() error { +func (s *UnsupportedAPIEndpointException) OrigErr() error { return nil } -func (s UnsupportedAPIEndpointException) Error() string { +func (s *UnsupportedAPIEndpointException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedAPIEndpointException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedAPIEndpointException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedAPIEndpointException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedAPIEndpointException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { _ struct{} `type:"structure"` - // The ID of the resource to remove the tag from. + // The ID of the resource to remove a tag from. + // + // You can specify any of the following taggable resources. + // + // * AWS account – specify the account ID number. + // + // * Organizational unit – specify the OU ID that begins with ou- and looks + // similar to: ou-1a2b-34uvwxyz + // + // * Root – specify the root ID that begins with r- and looks similar to: + // r-1a2b + // + // * Policy – specify the policy ID that begins with p- andlooks similar + // to: p-12abcdefg3 // // ResourceId is a required field ResourceId *string `type:"string" required:"true"` - // The tag to remove from the specified resource. + // The list of keys for tags to remove from the specified resource. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` @@ -19307,6 +22941,13 @@ const ( AccessDeniedForDependencyExceptionReasonAccessDeniedDuringCreateServiceLinkedRole = "ACCESS_DENIED_DURING_CREATE_SERVICE_LINKED_ROLE" ) +// AccessDeniedForDependencyExceptionReason_Values returns all elements of the AccessDeniedForDependencyExceptionReason enum +func AccessDeniedForDependencyExceptionReason_Values() []string { + return []string{ + AccessDeniedForDependencyExceptionReasonAccessDeniedDuringCreateServiceLinkedRole, + } +} + const ( // AccountJoinedMethodInvited is a AccountJoinedMethod enum value AccountJoinedMethodInvited = "INVITED" @@ -19315,6 +22956,14 @@ const ( AccountJoinedMethodCreated = "CREATED" ) +// AccountJoinedMethod_Values returns all elements of the AccountJoinedMethod enum +func AccountJoinedMethod_Values() []string { + return []string{ + AccountJoinedMethodInvited, + AccountJoinedMethodCreated, + } +} + const ( // AccountStatusActive is a AccountStatus enum value AccountStatusActive = "ACTIVE" @@ -19323,6 +22972,14 @@ const ( AccountStatusSuspended = "SUSPENDED" ) +// AccountStatus_Values returns all elements of the AccountStatus enum +func AccountStatus_Values() []string { + return []string{ + AccountStatusActive, + AccountStatusSuspended, + } +} + const ( // ActionTypeInvite is a ActionType enum value ActionTypeInvite = "INVITE" @@ -19337,6 +22994,16 @@ const ( ActionTypeAddOrganizationsServiceLinkedRole = "ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE" ) +// ActionType_Values returns all elements of the ActionType enum +func ActionType_Values() []string { + return []string{ + ActionTypeInvite, + ActionTypeEnableAllFeatures, + ActionTypeApproveAllFeatures, + ActionTypeAddOrganizationsServiceLinkedRole, + } +} + const ( // ChildTypeAccount is a ChildType enum value ChildTypeAccount = "ACCOUNT" @@ -19345,6 +23012,14 @@ const ( ChildTypeOrganizationalUnit = "ORGANIZATIONAL_UNIT" ) +// ChildType_Values returns all elements of the ChildType enum +func ChildType_Values() []string { + return []string{ + ChildTypeAccount, + ChildTypeOrganizationalUnit, + } +} + const ( // ConstraintViolationExceptionReasonAccountNumberLimitExceeded is a ConstraintViolationExceptionReason enum value ConstraintViolationExceptionReasonAccountNumberLimitExceeded = "ACCOUNT_NUMBER_LIMIT_EXCEEDED" @@ -19414,8 +23089,57 @@ const ( // ConstraintViolationExceptionReasonTagPolicyViolation is a ConstraintViolationExceptionReason enum value ConstraintViolationExceptionReasonTagPolicyViolation = "TAG_POLICY_VIOLATION" + + // ConstraintViolationExceptionReasonMaxDelegatedAdministratorsForServiceLimitExceeded is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonMaxDelegatedAdministratorsForServiceLimitExceeded = "MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED" + + // ConstraintViolationExceptionReasonCannotRegisterMasterAsDelegatedAdministrator is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonCannotRegisterMasterAsDelegatedAdministrator = "CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR" + + // ConstraintViolationExceptionReasonCannotRemoveDelegatedAdministratorFromOrg is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonCannotRemoveDelegatedAdministratorFromOrg = "CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG" + + // ConstraintViolationExceptionReasonDelegatedAdministratorExistsForThisService is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonDelegatedAdministratorExistsForThisService = "DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE" + + // ConstraintViolationExceptionReasonMasterAccountMissingBusinessLicense is a ConstraintViolationExceptionReason enum value + ConstraintViolationExceptionReasonMasterAccountMissingBusinessLicense = "MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE" ) +// ConstraintViolationExceptionReason_Values returns all elements of the ConstraintViolationExceptionReason enum +func ConstraintViolationExceptionReason_Values() []string { + return []string{ + ConstraintViolationExceptionReasonAccountNumberLimitExceeded, + ConstraintViolationExceptionReasonHandshakeRateLimitExceeded, + ConstraintViolationExceptionReasonOuNumberLimitExceeded, + ConstraintViolationExceptionReasonOuDepthLimitExceeded, + ConstraintViolationExceptionReasonPolicyNumberLimitExceeded, + ConstraintViolationExceptionReasonPolicyContentLimitExceeded, + ConstraintViolationExceptionReasonMaxPolicyTypeAttachmentLimitExceeded, + ConstraintViolationExceptionReasonMinPolicyTypeAttachmentLimitExceeded, + ConstraintViolationExceptionReasonAccountCannotLeaveOrganization, + ConstraintViolationExceptionReasonAccountCannotLeaveWithoutEula, + ConstraintViolationExceptionReasonAccountCannotLeaveWithoutPhoneVerification, + ConstraintViolationExceptionReasonMasterAccountPaymentInstrumentRequired, + ConstraintViolationExceptionReasonMemberAccountPaymentInstrumentRequired, + ConstraintViolationExceptionReasonAccountCreationRateLimitExceeded, + ConstraintViolationExceptionReasonMasterAccountAddressDoesNotMatchMarketplace, + ConstraintViolationExceptionReasonMasterAccountMissingContactInfo, + ConstraintViolationExceptionReasonMasterAccountNotGovcloudEnabled, + ConstraintViolationExceptionReasonOrganizationNotInAllFeaturesMode, + ConstraintViolationExceptionReasonCreateOrganizationInBillingModeUnsupportedRegion, + ConstraintViolationExceptionReasonEmailVerificationCodeExpired, + ConstraintViolationExceptionReasonWaitPeriodActive, + ConstraintViolationExceptionReasonMaxTagLimitExceeded, + ConstraintViolationExceptionReasonTagPolicyViolation, + ConstraintViolationExceptionReasonMaxDelegatedAdministratorsForServiceLimitExceeded, + ConstraintViolationExceptionReasonCannotRegisterMasterAsDelegatedAdministrator, + ConstraintViolationExceptionReasonCannotRemoveDelegatedAdministratorFromOrg, + ConstraintViolationExceptionReasonDelegatedAdministratorExistsForThisService, + ConstraintViolationExceptionReasonMasterAccountMissingBusinessLicense, + } +} + const ( // CreateAccountFailureReasonAccountLimitExceeded is a CreateAccountFailureReason enum value CreateAccountFailureReasonAccountLimitExceeded = "ACCOUNT_LIMIT_EXCEEDED" @@ -19437,8 +23161,29 @@ const ( // CreateAccountFailureReasonGovcloudAccountAlreadyExists is a CreateAccountFailureReason enum value CreateAccountFailureReasonGovcloudAccountAlreadyExists = "GOVCLOUD_ACCOUNT_ALREADY_EXISTS" + + // CreateAccountFailureReasonMissingBusinessValidation is a CreateAccountFailureReason enum value + CreateAccountFailureReasonMissingBusinessValidation = "MISSING_BUSINESS_VALIDATION" + + // CreateAccountFailureReasonMissingPaymentInstrument is a CreateAccountFailureReason enum value + CreateAccountFailureReasonMissingPaymentInstrument = "MISSING_PAYMENT_INSTRUMENT" ) +// CreateAccountFailureReason_Values returns all elements of the CreateAccountFailureReason enum +func CreateAccountFailureReason_Values() []string { + return []string{ + CreateAccountFailureReasonAccountLimitExceeded, + CreateAccountFailureReasonEmailAlreadyExists, + CreateAccountFailureReasonInvalidAddress, + CreateAccountFailureReasonInvalidEmail, + CreateAccountFailureReasonConcurrentAccountModification, + CreateAccountFailureReasonInternalFailure, + CreateAccountFailureReasonGovcloudAccountAlreadyExists, + CreateAccountFailureReasonMissingBusinessValidation, + CreateAccountFailureReasonMissingPaymentInstrument, + } +} + const ( // CreateAccountStateInProgress is a CreateAccountState enum value CreateAccountStateInProgress = "IN_PROGRESS" @@ -19450,11 +23195,35 @@ const ( CreateAccountStateFailed = "FAILED" ) +// CreateAccountState_Values returns all elements of the CreateAccountState enum +func CreateAccountState_Values() []string { + return []string{ + CreateAccountStateInProgress, + CreateAccountStateSucceeded, + CreateAccountStateFailed, + } +} + const ( // EffectivePolicyTypeTagPolicy is a EffectivePolicyType enum value EffectivePolicyTypeTagPolicy = "TAG_POLICY" + + // EffectivePolicyTypeBackupPolicy is a EffectivePolicyType enum value + EffectivePolicyTypeBackupPolicy = "BACKUP_POLICY" + + // EffectivePolicyTypeAiservicesOptOutPolicy is a EffectivePolicyType enum value + EffectivePolicyTypeAiservicesOptOutPolicy = "AISERVICES_OPT_OUT_POLICY" ) +// EffectivePolicyType_Values returns all elements of the EffectivePolicyType enum +func EffectivePolicyType_Values() []string { + return []string{ + EffectivePolicyTypeTagPolicy, + EffectivePolicyTypeBackupPolicy, + EffectivePolicyTypeAiservicesOptOutPolicy, + } +} + const ( // HandshakeConstraintViolationExceptionReasonAccountNumberLimitExceeded is a HandshakeConstraintViolationExceptionReason enum value HandshakeConstraintViolationExceptionReasonAccountNumberLimitExceeded = "ACCOUNT_NUMBER_LIMIT_EXCEEDED" @@ -19481,6 +23250,20 @@ const ( HandshakeConstraintViolationExceptionReasonOrganizationMembershipChangeRateLimitExceeded = "ORGANIZATION_MEMBERSHIP_CHANGE_RATE_LIMIT_EXCEEDED" ) +// HandshakeConstraintViolationExceptionReason_Values returns all elements of the HandshakeConstraintViolationExceptionReason enum +func HandshakeConstraintViolationExceptionReason_Values() []string { + return []string{ + HandshakeConstraintViolationExceptionReasonAccountNumberLimitExceeded, + HandshakeConstraintViolationExceptionReasonHandshakeRateLimitExceeded, + HandshakeConstraintViolationExceptionReasonAlreadyInAnOrganization, + HandshakeConstraintViolationExceptionReasonOrganizationAlreadyHasAllFeatures, + HandshakeConstraintViolationExceptionReasonInviteDisabledDuringEnableAllFeatures, + HandshakeConstraintViolationExceptionReasonPaymentInstrumentRequired, + HandshakeConstraintViolationExceptionReasonOrganizationFromDifferentSellerOfRecord, + HandshakeConstraintViolationExceptionReasonOrganizationMembershipChangeRateLimitExceeded, + } +} + const ( // HandshakePartyTypeAccount is a HandshakePartyType enum value HandshakePartyTypeAccount = "ACCOUNT" @@ -19492,6 +23275,15 @@ const ( HandshakePartyTypeEmail = "EMAIL" ) +// HandshakePartyType_Values returns all elements of the HandshakePartyType enum +func HandshakePartyType_Values() []string { + return []string{ + HandshakePartyTypeAccount, + HandshakePartyTypeOrganization, + HandshakePartyTypeEmail, + } +} + const ( // HandshakeResourceTypeAccount is a HandshakeResourceType enum value HandshakeResourceTypeAccount = "ACCOUNT" @@ -19518,6 +23310,20 @@ const ( HandshakeResourceTypeParentHandshake = "PARENT_HANDSHAKE" ) +// HandshakeResourceType_Values returns all elements of the HandshakeResourceType enum +func HandshakeResourceType_Values() []string { + return []string{ + HandshakeResourceTypeAccount, + HandshakeResourceTypeOrganization, + HandshakeResourceTypeOrganizationFeatureSet, + HandshakeResourceTypeEmail, + HandshakeResourceTypeMasterEmail, + HandshakeResourceTypeMasterName, + HandshakeResourceTypeNotes, + HandshakeResourceTypeParentHandshake, + } +} + const ( // HandshakeStateRequested is a HandshakeState enum value HandshakeStateRequested = "REQUESTED" @@ -19538,6 +23344,18 @@ const ( HandshakeStateExpired = "EXPIRED" ) +// HandshakeState_Values returns all elements of the HandshakeState enum +func HandshakeState_Values() []string { + return []string{ + HandshakeStateRequested, + HandshakeStateOpen, + HandshakeStateCanceled, + HandshakeStateAccepted, + HandshakeStateDeclined, + HandshakeStateExpired, + } +} + const ( // IAMUserAccessToBillingAllow is a IAMUserAccessToBilling enum value IAMUserAccessToBillingAllow = "ALLOW" @@ -19546,6 +23364,14 @@ const ( IAMUserAccessToBillingDeny = "DENY" ) +// IAMUserAccessToBilling_Values returns all elements of the IAMUserAccessToBilling enum +func IAMUserAccessToBilling_Values() []string { + return []string{ + IAMUserAccessToBillingAllow, + IAMUserAccessToBillingDeny, + } +} + const ( // InvalidInputExceptionReasonInvalidPartyTypeTarget is a InvalidInputExceptionReason enum value InvalidInputExceptionReasonInvalidPartyTypeTarget = "INVALID_PARTY_TYPE_TARGET" @@ -19610,10 +23436,42 @@ const ( // InvalidInputExceptionReasonInvalidSystemTagsParameter is a InvalidInputExceptionReason enum value InvalidInputExceptionReasonInvalidSystemTagsParameter = "INVALID_SYSTEM_TAGS_PARAMETER" + // InvalidInputExceptionReasonDuplicateTagKey is a InvalidInputExceptionReason enum value + InvalidInputExceptionReasonDuplicateTagKey = "DUPLICATE_TAG_KEY" + // InvalidInputExceptionReasonTargetNotSupported is a InvalidInputExceptionReason enum value InvalidInputExceptionReasonTargetNotSupported = "TARGET_NOT_SUPPORTED" ) +// InvalidInputExceptionReason_Values returns all elements of the InvalidInputExceptionReason enum +func InvalidInputExceptionReason_Values() []string { + return []string{ + InvalidInputExceptionReasonInvalidPartyTypeTarget, + InvalidInputExceptionReasonInvalidSyntaxOrganizationArn, + InvalidInputExceptionReasonInvalidSyntaxPolicyId, + InvalidInputExceptionReasonInvalidEnum, + InvalidInputExceptionReasonInvalidEnumPolicyType, + InvalidInputExceptionReasonInvalidListMember, + InvalidInputExceptionReasonMaxLengthExceeded, + InvalidInputExceptionReasonMaxValueExceeded, + InvalidInputExceptionReasonMinLengthExceeded, + InvalidInputExceptionReasonMinValueExceeded, + InvalidInputExceptionReasonImmutablePolicy, + InvalidInputExceptionReasonInvalidPattern, + InvalidInputExceptionReasonInvalidPatternTargetId, + InvalidInputExceptionReasonInputRequired, + InvalidInputExceptionReasonInvalidNextToken, + InvalidInputExceptionReasonMaxLimitExceededFilter, + InvalidInputExceptionReasonMovingAccountBetweenDifferentRoots, + InvalidInputExceptionReasonInvalidFullNameTarget, + InvalidInputExceptionReasonUnrecognizedServicePrincipal, + InvalidInputExceptionReasonInvalidRoleName, + InvalidInputExceptionReasonInvalidSystemTagsParameter, + InvalidInputExceptionReasonDuplicateTagKey, + InvalidInputExceptionReasonTargetNotSupported, + } +} + const ( // OrganizationFeatureSetAll is a OrganizationFeatureSet enum value OrganizationFeatureSetAll = "ALL" @@ -19622,6 +23480,14 @@ const ( OrganizationFeatureSetConsolidatedBilling = "CONSOLIDATED_BILLING" ) +// OrganizationFeatureSet_Values returns all elements of the OrganizationFeatureSet enum +func OrganizationFeatureSet_Values() []string { + return []string{ + OrganizationFeatureSetAll, + OrganizationFeatureSetConsolidatedBilling, + } +} + const ( // ParentTypeRoot is a ParentType enum value ParentTypeRoot = "ROOT" @@ -19630,14 +23496,38 @@ const ( ParentTypeOrganizationalUnit = "ORGANIZATIONAL_UNIT" ) +// ParentType_Values returns all elements of the ParentType enum +func ParentType_Values() []string { + return []string{ + ParentTypeRoot, + ParentTypeOrganizationalUnit, + } +} + const ( // PolicyTypeServiceControlPolicy is a PolicyType enum value PolicyTypeServiceControlPolicy = "SERVICE_CONTROL_POLICY" // PolicyTypeTagPolicy is a PolicyType enum value PolicyTypeTagPolicy = "TAG_POLICY" + + // PolicyTypeBackupPolicy is a PolicyType enum value + PolicyTypeBackupPolicy = "BACKUP_POLICY" + + // PolicyTypeAiservicesOptOutPolicy is a PolicyType enum value + PolicyTypeAiservicesOptOutPolicy = "AISERVICES_OPT_OUT_POLICY" ) +// PolicyType_Values returns all elements of the PolicyType enum +func PolicyType_Values() []string { + return []string{ + PolicyTypeServiceControlPolicy, + PolicyTypeTagPolicy, + PolicyTypeBackupPolicy, + PolicyTypeAiservicesOptOutPolicy, + } +} + const ( // PolicyTypeStatusEnabled is a PolicyTypeStatus enum value PolicyTypeStatusEnabled = "ENABLED" @@ -19649,6 +23539,15 @@ const ( PolicyTypeStatusPendingDisable = "PENDING_DISABLE" ) +// PolicyTypeStatus_Values returns all elements of the PolicyTypeStatus enum +func PolicyTypeStatus_Values() []string { + return []string{ + PolicyTypeStatusEnabled, + PolicyTypeStatusPendingEnable, + PolicyTypeStatusPendingDisable, + } +} + const ( // TargetTypeAccount is a TargetType enum value TargetTypeAccount = "ACCOUNT" @@ -19659,3 +23558,12 @@ const ( // TargetTypeRoot is a TargetType enum value TargetTypeRoot = "ROOT" ) + +// TargetType_Values returns all elements of the TargetType enum +func TargetType_Values() []string { + return []string{ + TargetTypeAccount, + TargetTypeOrganizationalUnit, + TargetTypeRoot, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go b/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go index af309e1f1..948c21f07 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/organizations/errors.go @@ -33,14 +33,26 @@ const ( // create the required service-linked role. You don't have that permission. ErrCodeAccessDeniedForDependencyException = "AccessDeniedForDependencyException" + // ErrCodeAccountAlreadyRegisteredException for service response error code + // "AccountAlreadyRegisteredException". + // + // The specified account is already a delegated administrator for this AWS service. + ErrCodeAccountAlreadyRegisteredException = "AccountAlreadyRegisteredException" + // ErrCodeAccountNotFoundException for service response error code // "AccountNotFoundException". // - // We can't find an AWS account with the AccountId that you specified. Or the + // We can't find an AWS account with the AccountId that you specified, or the // account whose credentials you used to make this request isn't a member of // an organization. ErrCodeAccountNotFoundException = "AccountNotFoundException" + // ErrCodeAccountNotRegisteredException for service response error code + // "AccountNotRegisteredException". + // + // The specified account is not a delegated administrator for this AWS service. + ErrCodeAccountNotRegisteredException = "AccountNotRegisteredException" + // ErrCodeAccountOwnerNotVerifiedException for service response error code // "AccountOwnerNotVerifiedException". // @@ -74,27 +86,32 @@ const ( // ErrCodeConstraintViolationException for service response error code // "ConstraintViolationException". // - // Performing this operation violates a minimum or maximum value limit. Examples - // include attempting to remove the last service control policy (SCP) from an - // OU or root, or attaching too many policies to an account, OU, or root. This - // exception includes a reason that contains additional information about the - // violated limit. + // Performing this operation violates a minimum or maximum value limit. For + // example, attempting to remove the last service control policy (SCP) from + // an OU or root, inviting or creating too many accounts to the organization, + // or attaching too many policies to an account, OU, or root. This exception + // includes a reason that contains additional information about the violated + // limit: // // Some of the reasons in the following list might not be applicable to this - // specific API or operation: + // specific API or operation. + // + // * ACCOUNT_CANNOT_LEAVE_ORGANIZAION: You attempted to remove the master + // account from the organization. You can't remove the master account. Instead, + // after you remove all member accounts, delete the organization itself. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_EULA: You attempted to remove an account // from the organization that doesn't yet have enough information to exist // as a standalone account. This account requires you to first agree to the - // AWS Customer Agreement. Follow the steps at To leave an organization when - // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) - // in the AWS Organizations User Guide. + // AWS Customer Agreement. Follow the steps at Removing a member account + // from your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master)in + // the AWS Organizations User Guide. // // * ACCOUNT_CANNOT_LEAVE_WITHOUT_PHONE_VERIFICATION: You attempted to remove // an account from the organization that doesn't yet have enough information // to exist as a standalone account. This account requires you to first complete - // phone verification. Follow the steps at To leave an organization when - // all required account information has not yet been provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) + // phone verification. Follow the steps at Removing a member account from + // your organization (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#orgs_manage_accounts_remove-from-master) // in the AWS Organizations User Guide. // // * ACCOUNT_CREATION_RATE_LIMIT_EXCEEDED: You attempted to exceed the number @@ -107,11 +124,33 @@ const ( // tried to send would cause you to exceed the limit of accounts in your // organization. Send fewer invitations or contact AWS Support to request // an increase in the number of accounts. Deleted and closed accounts still - // count toward your limit. If you get receive this exception when running - // a command immediately after creating the organization, wait one hour and - // try again. If after an hour it continues to fail with this error, contact + // count toward your limit. If you get this exception when running a command + // immediately after creating the organization, wait one hour and try again. + // After an hour, if the command continues to fail with this error, contact // AWS Support (https://console.aws.amazon.com/support/home#/). // + // * CANNOT_REGISTER_MASTER_AS_DELEGATED_ADMINISTRATOR: You attempted to + // register the master account of the organization as a delegated administrator + // for an AWS service integrated with Organizations. You can designate only + // a member account as a delegated administrator. + // + // * CANNOT_REMOVE_DELEGATED_ADMINISTRATOR_FROM_ORG: You attempted to remove + // an account that is registered as a delegated administrator for a service + // integrated with your organization. To complete this operation, you must + // first deregister this account as a delegated administrator. + // + // * CREATE_ORGANIZATION_IN_BILLING_MODE_UNSUPPORTED_REGION: To create an + // organization in the specified region, you must enable all features mode. + // + // * DELEGATED_ADMINISTRATOR_EXISTS_FOR_THIS_SERVICE: You attempted to register + // an AWS account as a delegated administrator for an AWS service that already + // has a delegated administrator. To complete this operation, you must first + // deregister any existing delegated administrators for this service. + // + // * EMAIL_VERIFICATION_CODE_EXPIRED: The email verification code is only + // valid for a limited period of time. You must resubmit the request and + // generate a new verfication code. + // // * HANDSHAKE_RATE_LIMIT_EXCEEDED: You attempted to exceed the number of // handshakes that you can send in one day. // @@ -122,8 +161,12 @@ const ( // AISPL marketplace. All accounts in an organization must be associated // with the same marketplace. // + // * MASTER_ACCOUNT_MISSING_BUSINESS_LICENSE: Applies only to the AWS Regions + // in China. To create an organization, the master must have an valid business + // license. For more information, contact customer support. + // // * MASTER_ACCOUNT_MISSING_CONTACT_INFO: To complete this operation, you - // must first provide contact a valid address and phone number for the master + // must first provide a valid contact address and phone number for the master // account. Then try the operation again. // // * MASTER_ACCOUNT_NOT_GOVCLOUD_ENABLED: To complete this operation, the @@ -138,6 +181,10 @@ const ( // provided (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_accounts_remove.html#leave-without-all-info) // in the AWS Organizations User Guide. // + // * MAX_DELEGATED_ADMINISTRATORS_FOR_SERVICE_LIMIT_EXCEEDED: You attempted + // to register more delegated administrators than allowed for the service + // principal. + // // * MAX_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to exceed the // number of policies of a certain type that can be attached to an entity // at one time. @@ -153,33 +200,35 @@ const ( // in the AWS Organizations User Guide. // // * MIN_POLICY_TYPE_ATTACHMENT_LIMIT_EXCEEDED: You attempted to detach a - // policy from an entity, which would cause the entity to have fewer than - // the minimum number of policies of the required type. - // - // * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is - // too many levels deep. + // policy from an entity that would cause the entity to have fewer than the + // minimum number of policies of a certain type required. // // * ORGANIZATION_NOT_IN_ALL_FEATURES_MODE: You attempted to perform an operation // that requires the organization to be configured to support all features. // An organization that supports only consolidated billing features can't // perform this operation. // + // * OU_DEPTH_LIMIT_EXCEEDED: You attempted to create an OU tree that is + // too many levels deep. + // // * OU_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of OUs // that you can have in an organization. // + // * POLICY_CONTENT_LIMIT_EXCEEDED: You attempted to create a policy that + // is larger than the maximum size. + // // * POLICY_NUMBER_LIMIT_EXCEEDED: You attempted to exceed the number of // policies that you can have in an organization. // - // * TAG_POLICY_VIOLATION: Tags associated with the resource must be compliant - // with the tag policy that’s in effect for the account. For more information, - // see Tag Policies (http://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_tag-policies.html) - // in the AWS Organizations User Guide. + // * TAG_POLICY_VIOLATION: You attempted to create or update a resource with + // tags that are not compliant with the tag policy requirements for this + // account. ErrCodeConstraintViolationException = "ConstraintViolationException" // ErrCodeCreateAccountStatusNotFoundException for service response error code // "CreateAccountStatusNotFoundException". // - // We can't find a create account request with the CreateAccountRequestId that + // We can't find an create account request with the CreateAccountRequestId that // you specified. ErrCodeCreateAccountStatusNotFoundException = "CreateAccountStatusNotFoundException" @@ -314,7 +363,10 @@ const ( // contains additional information about the violated limit: // // Some of the reasons in the following list might not be applicable to this - // specific API or operation: + // specific API or operation. + // + // * DUPLICATE_TAG_KEY: Tag keys must be unique among the tags attached to + // the same entity. // // * IMMUTABLE_POLICY: You specified a policy that is managed by AWS and // can't be modified. @@ -323,7 +375,7 @@ const ( // // * INVALID_ENUM: You specified an invalid value. // - // * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type. + // * INVALID_ENUM_POLICY_TYPE: You specified an invalid policy type string. // // * INVALID_FULL_NAME_TARGET: You specified a full name that contains invalid // characters. @@ -373,6 +425,12 @@ const ( // // * MOVING_ACCOUNT_BETWEEN_DIFFERENT_ROOTS: You can move an account only // between entities in the same root. + // + // * TARGET_NOT_SUPPORTED: You can't perform the specified operation on that + // target entity. + // + // * UNRECOGNIZED_SERVICE_PRINCIPAL: You specified a service principal that + // isn't recognized. ErrCodeInvalidInputException = "InvalidInputException" // ErrCodeMalformedPolicyDocumentException for service response error code @@ -455,9 +513,9 @@ const ( // // You can't use the specified policy type with the feature set currently enabled // for this organization. For example, you can enable SCPs only after you enable - // all features in the organization. For more information, see Enabling and - // Disabling a Policy Type on a Root (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root) - // in the AWS Organizations User Guide. + // all features in the organization. For more information, see Managing AWS + // Organizations Policies (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies.html#enable_policies_on_root)in + // the AWS Organizations User Guide. ErrCodePolicyTypeNotAvailableForOrganizationException = "PolicyTypeNotAvailableForOrganizationException" // ErrCodePolicyTypeNotEnabledException for service response error code @@ -492,24 +550,24 @@ const ( // ErrCodeTargetNotFoundException for service response error code // "TargetNotFoundException". // - // We can't find a root, OU, or account with the TargetId that you specified. + // We can't find a root, OU, account, or policy with the TargetId that you specified. ErrCodeTargetNotFoundException = "TargetNotFoundException" // ErrCodeTooManyRequestsException for service response error code // "TooManyRequestsException". // - // You have sent too many requests in too short a period of time. The limit + // You have sent too many requests in too short a period of time. The quota // helps protect against denial-of-service attacks. Try again later. // - // For information on limits that affect AWS Organizations, see Limits of AWS - // Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html) - // in the AWS Organizations User Guide. + // For information about quotas that affect AWS Organizations, see Quotas for + // AWS Organizations (https://docs.aws.amazon.com/organizations/latest/userguide/orgs_reference_limits.html)in + // the AWS Organizations User Guide. ErrCodeTooManyRequestsException = "TooManyRequestsException" // ErrCodeUnsupportedAPIEndpointException for service response error code // "UnsupportedAPIEndpointException". // - // This action isn't available in the current Region. + // This action isn't available in the current AWS Region. ErrCodeUnsupportedAPIEndpointException = "UnsupportedAPIEndpointException" ) @@ -517,7 +575,9 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "AWSOrganizationsNotInUseException": newErrorAWSOrganizationsNotInUseException, "AccessDeniedException": newErrorAccessDeniedException, "AccessDeniedForDependencyException": newErrorAccessDeniedForDependencyException, + "AccountAlreadyRegisteredException": newErrorAccountAlreadyRegisteredException, "AccountNotFoundException": newErrorAccountNotFoundException, + "AccountNotRegisteredException": newErrorAccountNotRegisteredException, "AccountOwnerNotVerifiedException": newErrorAccountOwnerNotVerifiedException, "AlreadyInOrganizationException": newErrorAlreadyInOrganizationException, "ChildNotFoundException": newErrorChildNotFoundException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go b/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go index 92777c50a..7a87e3ba3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go b/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go index 9a01bcc84..1ad36ef70 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go @@ -534,7 +534,9 @@ func (c *Personalize) CreateDatasetImportJobRequest(input *CreateDatasetImportJo // Creates a job that imports training data from your data source (an Amazon // S3 bucket) to an Amazon Personalize dataset. To allow Amazon Personalize // to import the training data, you must specify an AWS Identity and Access -// Management (IAM) role that has permission to read from the data source. +// Management (IAM) role that has permission to read from the data source, as +// Amazon Personalize makes a copy of your data and processes it in an internal +// AWS system. // // The dataset import job replaces any previous data in the dataset. // @@ -727,6 +729,95 @@ func (c *Personalize) CreateEventTrackerWithContext(ctx aws.Context, input *Crea return out, req.Send() } +const opCreateFilter = "CreateFilter" + +// CreateFilterRequest generates a "aws/request.Request" representing the +// client's request for the CreateFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateFilter for more information on using the CreateFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateFilterRequest method. +// req, resp := client.CreateFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateFilter +func (c *Personalize) CreateFilterRequest(input *CreateFilterInput) (req *request.Request, output *CreateFilterOutput) { + op := &request.Operation{ + Name: opCreateFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateFilterInput{} + } + + output = &CreateFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateFilter API operation for Amazon Personalize. +// +// Creates a recommendation filter. For more information, see Using Filters +// with Amazon Personalize (https://docs.aws.amazon.com/personalize/latest/dg/filters.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation CreateFilter for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// Provide a valid value for the field or parameter. +// +// * ResourceAlreadyExistsException +// The specified resource already exists. +// +// * ResourceNotFoundException +// Could not find the specified resource. +// +// * LimitExceededException +// The limit on the number of requests per second has been exceeded. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/CreateFilter +func (c *Personalize) CreateFilter(input *CreateFilterInput) (*CreateFilterOutput, error) { + req, out := c.CreateFilterRequest(input) + return out, req.Send() +} + +// CreateFilterWithContext is the same as CreateFilter with the addition of +// the ability to pass a context and additional request options. +// +// See CreateFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) CreateFilterWithContext(ctx aws.Context, input *CreateFilterInput, opts ...request.Option) (*CreateFilterOutput, error) { + req, out := c.CreateFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateSchema = "CreateSchema" // CreateSchemaRequest generates a "aws/request.Request" representing the @@ -1046,6 +1137,9 @@ func (c *Personalize) CreateSolutionVersionRequest(input *CreateSolutionVersionI // * ResourceNotFoundException // Could not find the specified resource. // +// * LimitExceededException +// The limit on the number of requests per second has been exceeded. +// // * ResourceInUseException // The specified resource is in use. // @@ -1429,6 +1523,92 @@ func (c *Personalize) DeleteEventTrackerWithContext(ctx aws.Context, input *Dele return out, req.Send() } +const opDeleteFilter = "DeleteFilter" + +// DeleteFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFilter for more information on using the DeleteFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFilterRequest method. +// req, resp := client.DeleteFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteFilter +func (c *Personalize) DeleteFilterRequest(input *DeleteFilterInput) (req *request.Request, output *DeleteFilterOutput) { + op := &request.Operation{ + Name: opDeleteFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFilterInput{} + } + + output = &DeleteFilterOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteFilter API operation for Amazon Personalize. +// +// Deletes a filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DeleteFilter for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// Provide a valid value for the field or parameter. +// +// * ResourceNotFoundException +// Could not find the specified resource. +// +// * ResourceInUseException +// The specified resource is in use. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DeleteFilter +func (c *Personalize) DeleteFilter(input *DeleteFilterInput) (*DeleteFilterOutput, error) { + req, out := c.DeleteFilterRequest(input) + return out, req.Send() +} + +// DeleteFilterWithContext is the same as DeleteFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DeleteFilterWithContext(ctx aws.Context, input *DeleteFilterInput, opts ...request.Option) (*DeleteFilterOutput, error) { + req, out := c.DeleteFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteSchema = "DeleteSchema" // DeleteSchemaRequest generates a "aws/request.Request" representing the @@ -2279,6 +2459,88 @@ func (c *Personalize) DescribeFeatureTransformationWithContext(ctx aws.Context, return out, req.Send() } +const opDescribeFilter = "DescribeFilter" + +// DescribeFilterRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFilter operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeFilter for more information on using the DescribeFilter +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeFilterRequest method. +// req, resp := client.DescribeFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeFilter +func (c *Personalize) DescribeFilterRequest(input *DescribeFilterInput) (req *request.Request, output *DescribeFilterOutput) { + op := &request.Operation{ + Name: opDescribeFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeFilterInput{} + } + + output = &DescribeFilterOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeFilter API operation for Amazon Personalize. +// +// Describes a filter's properties. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation DescribeFilter for usage and error information. +// +// Returned Error Types: +// * InvalidInputException +// Provide a valid value for the field or parameter. +// +// * ResourceNotFoundException +// Could not find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/DescribeFilter +func (c *Personalize) DescribeFilter(input *DescribeFilterInput) (*DescribeFilterOutput, error) { + req, out := c.DescribeFilterRequest(input) + return out, req.Send() +} + +// DescribeFilterWithContext is the same as DescribeFilter with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeFilter for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) DescribeFilterWithContext(ctx aws.Context, input *DescribeFilterInput, opts ...request.Option) (*DescribeFilterOutput, error) { + req, out := c.DescribeFilterRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeRecipe = "DescribeRecipe" // DescribeRecipeRequest generates a "aws/request.Request" representing the @@ -3561,113 +3823,195 @@ func (c *Personalize) ListEventTrackersPagesWithContext(ctx aws.Context, input * return p.Err() } -const opListRecipes = "ListRecipes" +const opListFilters = "ListFilters" -// ListRecipesRequest generates a "aws/request.Request" representing the -// client's request for the ListRecipes operation. The "output" return +// ListFiltersRequest generates a "aws/request.Request" representing the +// client's request for the ListFilters operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListRecipes for more information on using the ListRecipes +// See ListFilters for more information on using the ListFilters // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListRecipesRequest method. -// req, resp := client.ListRecipesRequest(params) +// // Example sending a request using the ListFiltersRequest method. +// req, resp := client.ListFiltersRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListRecipes -func (c *Personalize) ListRecipesRequest(input *ListRecipesInput) (req *request.Request, output *ListRecipesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListFilters +func (c *Personalize) ListFiltersRequest(input *ListFiltersInput) (req *request.Request, output *ListFiltersOutput) { op := &request.Operation{ - Name: opListRecipes, + Name: opListFilters, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "maxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListRecipesInput{} + input = &ListFiltersInput{} } - output = &ListRecipesOutput{} + output = &ListFiltersOutput{} req = c.newRequest(op, input, output) return } -// ListRecipes API operation for Amazon Personalize. +// ListFilters API operation for Amazon Personalize. // -// Returns a list of available recipes. The response provides the properties -// for each recipe, including the recipe's Amazon Resource Name (ARN). +// Lists all filters that belong to a given dataset group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Personalize's -// API operation ListRecipes for usage and error information. +// API operation ListFilters for usage and error information. // // Returned Error Types: +// * InvalidInputException +// Provide a valid value for the field or parameter. +// // * InvalidNextTokenException // The token is not valid. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListRecipes -func (c *Personalize) ListRecipes(input *ListRecipesInput) (*ListRecipesOutput, error) { - req, out := c.ListRecipesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListFilters +func (c *Personalize) ListFilters(input *ListFiltersInput) (*ListFiltersOutput, error) { + req, out := c.ListFiltersRequest(input) return out, req.Send() } -// ListRecipesWithContext is the same as ListRecipes with the addition of +// ListFiltersWithContext is the same as ListFilters with the addition of // the ability to pass a context and additional request options. // -// See ListRecipes for details on how to use this API operation. +// See ListFilters for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Personalize) ListRecipesWithContext(ctx aws.Context, input *ListRecipesInput, opts ...request.Option) (*ListRecipesOutput, error) { - req, out := c.ListRecipesRequest(input) +func (c *Personalize) ListFiltersWithContext(ctx aws.Context, input *ListFiltersInput, opts ...request.Option) (*ListFiltersOutput, error) { + req, out := c.ListFiltersRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListRecipesPages iterates over the pages of a ListRecipes operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opListRecipes = "ListRecipes" + +// ListRecipesRequest generates a "aws/request.Request" representing the +// client's request for the ListRecipes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListRecipes method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See ListRecipes for more information on using the ListRecipes +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListRecipes operation. -// pageNum := 0 -// err := client.ListRecipesPages(params, -// func(page *personalize.ListRecipesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *Personalize) ListRecipesPages(input *ListRecipesInput, fn func(*ListRecipesOutput, bool) bool) error { - return c.ListRecipesPagesWithContext(aws.BackgroundContext(), input, fn) -} - +// +// // Example sending a request using the ListRecipesRequest method. +// req, resp := client.ListRecipesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListRecipes +func (c *Personalize) ListRecipesRequest(input *ListRecipesInput) (req *request.Request, output *ListRecipesOutput) { + op := &request.Operation{ + Name: opListRecipes, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRecipesInput{} + } + + output = &ListRecipesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRecipes API operation for Amazon Personalize. +// +// Returns a list of available recipes. The response provides the properties +// for each recipe, including the recipe's Amazon Resource Name (ARN). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Personalize's +// API operation ListRecipes for usage and error information. +// +// Returned Error Types: +// * InvalidNextTokenException +// The token is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/personalize-2018-05-22/ListRecipes +func (c *Personalize) ListRecipes(input *ListRecipesInput) (*ListRecipesOutput, error) { + req, out := c.ListRecipesRequest(input) + return out, req.Send() +} + +// ListRecipesWithContext is the same as ListRecipes with the addition of +// the ability to pass a context and additional request options. +// +// See ListRecipes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Personalize) ListRecipesWithContext(ctx aws.Context, input *ListRecipesInput, opts ...request.Option) (*ListRecipesOutput, error) { + req, out := c.ListRecipesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRecipesPages iterates over the pages of a ListRecipes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRecipes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRecipes operation. +// pageNum := 0 +// err := client.ListRecipesPages(params, +// func(page *personalize.ListRecipesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Personalize) ListRecipesPages(input *ListRecipesInput, fn func(*ListRecipesOutput, bool) bool) error { + return c.ListRecipesPagesWithContext(aws.BackgroundContext(), input, fn) +} + // ListRecipesPagesWithContext same as ListRecipesPages except // it takes a Context and allows setting request options on the pages. // @@ -4431,12 +4775,19 @@ type BatchInferenceJob struct { // The Amazon Resource Name (ARN) of the batch inference job. BatchInferenceJobArn *string `locationName:"batchInferenceJobArn" type:"string"` + // A string to string map of the configuration details of a batch inference + // job. + BatchInferenceJobConfig *BatchInferenceJobConfig `locationName:"batchInferenceJobConfig" type:"structure"` + // The time at which the batch inference job was created. CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` // If the batch inference job failed, the reason for the failure. FailureReason *string `locationName:"failureReason" type:"string"` + // The ARN of the filter used on the batch inference job. + FilterArn *string `locationName:"filterArn" type:"string"` + // The Amazon S3 path that leads to the input data used to generate the batch // inference job. JobInput *BatchInferenceJobInput `locationName:"jobInput" type:"structure"` @@ -4492,6 +4843,12 @@ func (s *BatchInferenceJob) SetBatchInferenceJobArn(v string) *BatchInferenceJob return s } +// SetBatchInferenceJobConfig sets the BatchInferenceJobConfig field's value. +func (s *BatchInferenceJob) SetBatchInferenceJobConfig(v *BatchInferenceJobConfig) *BatchInferenceJob { + s.BatchInferenceJobConfig = v + return s +} + // SetCreationDateTime sets the CreationDateTime field's value. func (s *BatchInferenceJob) SetCreationDateTime(v time.Time) *BatchInferenceJob { s.CreationDateTime = &v @@ -4504,6 +4861,12 @@ func (s *BatchInferenceJob) SetFailureReason(v string) *BatchInferenceJob { return s } +// SetFilterArn sets the FilterArn field's value. +func (s *BatchInferenceJob) SetFilterArn(v string) *BatchInferenceJob { + s.FilterArn = &v + return s +} + // SetJobInput sets the JobInput field's value. func (s *BatchInferenceJob) SetJobInput(v *BatchInferenceJobInput) *BatchInferenceJob { s.JobInput = v @@ -4552,6 +4915,31 @@ func (s *BatchInferenceJob) SetStatus(v string) *BatchInferenceJob { return s } +// The configuration details of a batch inference job. +type BatchInferenceJobConfig struct { + _ struct{} `type:"structure"` + + // A string to string map specifying the inference hyperparameters you wish + // to use for hyperparameter optimization. See customizing-solution-config-hpo. + ItemExplorationConfig map[string]*string `locationName:"itemExplorationConfig" type:"map"` +} + +// String returns the string representation +func (s BatchInferenceJobConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchInferenceJobConfig) GoString() string { + return s.String() +} + +// SetItemExplorationConfig sets the ItemExplorationConfig field's value. +func (s *BatchInferenceJobConfig) SetItemExplorationConfig(v map[string]*string) *BatchInferenceJobConfig { + s.ItemExplorationConfig = v + return s +} + // The input configuration of a batch inference job. type BatchInferenceJobInput struct { _ struct{} `type:"structure"` @@ -4662,6 +5050,9 @@ type BatchInferenceJobSummary struct { // The time at which the batch inference job was last updated. LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + // The ARN of the solution version used by the batch inference job. + SolutionVersionArn *string `locationName:"solutionVersionArn" type:"string"` + // The status of the batch inference job. The status is one of the following // values: // @@ -4715,6 +5106,12 @@ func (s *BatchInferenceJobSummary) SetLastUpdatedDateTime(v time.Time) *BatchInf return s } +// SetSolutionVersionArn sets the SolutionVersionArn field's value. +func (s *BatchInferenceJobSummary) SetSolutionVersionArn(v string) *BatchInferenceJobSummary { + s.SolutionVersionArn = &v + return s +} + // SetStatus sets the Status field's value. func (s *BatchInferenceJobSummary) SetStatus(v string) *BatchInferenceJobSummary { s.Status = &v @@ -4729,6 +5126,9 @@ type Campaign struct { // The Amazon Resource Name (ARN) of the campaign. CampaignArn *string `locationName:"campaignArn" type:"string"` + // The configuration details of a campaign. + CampaignConfig *CampaignConfig `locationName:"campaignConfig" type:"structure"` + // The date and time (in Unix format) that the campaign was created. CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` @@ -4778,6 +5178,12 @@ func (s *Campaign) SetCampaignArn(v string) *Campaign { return s } +// SetCampaignConfig sets the CampaignConfig field's value. +func (s *Campaign) SetCampaignConfig(v *CampaignConfig) *Campaign { + s.CampaignConfig = v + return s +} + // SetCreationDateTime sets the CreationDateTime field's value. func (s *Campaign) SetCreationDateTime(v time.Time) *Campaign { s.CreationDateTime = &v @@ -4826,6 +5232,31 @@ func (s *Campaign) SetStatus(v string) *Campaign { return s } +// The configuration details of a campaign. +type CampaignConfig struct { + _ struct{} `type:"structure"` + + // A string to string map specifying the inference hyperparameters you wish + // to use for hyperparameter optimization. See customizing-solution-config-hpo. + ItemExplorationConfig map[string]*string `locationName:"itemExplorationConfig" type:"map"` +} + +// String returns the string representation +func (s CampaignConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignConfig) GoString() string { + return s.String() +} + +// SetItemExplorationConfig sets the ItemExplorationConfig field's value. +func (s *CampaignConfig) SetItemExplorationConfig(v map[string]*string) *CampaignConfig { + s.ItemExplorationConfig = v + return s +} + // Provides a summary of the properties of a campaign. For a complete listing, // call the DescribeCampaign API. type CampaignSummary struct { @@ -4907,6 +5338,9 @@ func (s *CampaignSummary) SetStatus(v string) *CampaignSummary { type CampaignUpdateSummary struct { _ struct{} `type:"structure"` + // The configuration details of a campaign. + CampaignConfig *CampaignConfig `locationName:"campaignConfig" type:"structure"` + // The date and time (in Unix time) that the campaign update was created. CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` @@ -4943,6 +5377,12 @@ func (s CampaignUpdateSummary) GoString() string { return s.String() } +// SetCampaignConfig sets the CampaignConfig field's value. +func (s *CampaignUpdateSummary) SetCampaignConfig(v *CampaignConfig) *CampaignUpdateSummary { + s.CampaignConfig = v + return s +} + // SetCreationDateTime sets the CreationDateTime field's value. func (s *CampaignUpdateSummary) SetCreationDateTime(v time.Time) *CampaignUpdateSummary { s.CreationDateTime = &v @@ -5073,6 +5513,13 @@ func (s *ContinuousHyperParameterRange) SetName(v string) *ContinuousHyperParame type CreateBatchInferenceJobInput struct { _ struct{} `type:"structure"` + // The configuration details of a batch inference job. + BatchInferenceJobConfig *BatchInferenceJobConfig `locationName:"batchInferenceJobConfig" type:"structure"` + + // The ARN of the filter to apply to the batch inference job. For more information + // on using filters, see Using Filters with Amazon Personalize. + FilterArn *string `locationName:"filterArn" type:"string"` + // The Amazon S3 path that leads to the input file to base your recommendations // on. The input material must be in JSON format. // @@ -5153,6 +5600,18 @@ func (s *CreateBatchInferenceJobInput) Validate() error { return nil } +// SetBatchInferenceJobConfig sets the BatchInferenceJobConfig field's value. +func (s *CreateBatchInferenceJobInput) SetBatchInferenceJobConfig(v *BatchInferenceJobConfig) *CreateBatchInferenceJobInput { + s.BatchInferenceJobConfig = v + return s +} + +// SetFilterArn sets the FilterArn field's value. +func (s *CreateBatchInferenceJobInput) SetFilterArn(v string) *CreateBatchInferenceJobInput { + s.FilterArn = &v + return s +} + // SetJobInput sets the JobInput field's value. func (s *CreateBatchInferenceJobInput) SetJobInput(v *BatchInferenceJobInput) *CreateBatchInferenceJobInput { s.JobInput = v @@ -5215,6 +5674,9 @@ func (s *CreateBatchInferenceJobOutput) SetBatchInferenceJobArn(v string) *Creat type CreateCampaignInput struct { _ struct{} `type:"structure"` + // The configuration details of a campaign. + CampaignConfig *CampaignConfig `locationName:"campaignConfig" type:"structure"` + // Specifies the requested minimum provisioned transactions (recommendations) // per second that Amazon Personalize will support. // @@ -5268,6 +5730,12 @@ func (s *CreateCampaignInput) Validate() error { return nil } +// SetCampaignConfig sets the CampaignConfig field's value. +func (s *CreateCampaignInput) SetCampaignConfig(v *CampaignConfig) *CreateCampaignInput { + s.CampaignConfig = v + return s +} + // SetMinProvisionedTPS sets the MinProvisionedTPS field's value. func (s *CreateCampaignInput) SetMinProvisionedTPS(v int64) *CreateCampaignInput { s.MinProvisionedTPS = &v @@ -5704,6 +6172,108 @@ func (s *CreateEventTrackerOutput) SetTrackingId(v string) *CreateEventTrackerOu return s } +type CreateFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset group that the filter will belong to. + // + // DatasetGroupArn is a required field + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string" required:"true"` + + // The filter expression that designates the interaction types that the filter + // will filter out. A filter expression must follow the following format: + // + // EXCLUDE itemId WHERE INTERACTIONS.event_type in ("EVENT_TYPE") + // + // Where "EVENT_TYPE" is the type of event to filter out. To filter out all + // items with any interactions history, set "*" as the EVENT_TYPE. For more + // information, see Using Filters with Amazon Personalize (https://docs.aws.amazon.com/personalize/latest/dg/filters.html). + // + // FilterExpression is a required field + FilterExpression *string `locationName:"filterExpression" min:"1" type:"string" required:"true" sensitive:"true"` + + // The name of the filter to create. + // + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateFilterInput"} + if s.DatasetGroupArn == nil { + invalidParams.Add(request.NewErrParamRequired("DatasetGroupArn")) + } + if s.FilterExpression == nil { + invalidParams.Add(request.NewErrParamRequired("FilterExpression")) + } + if s.FilterExpression != nil && len(*s.FilterExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FilterExpression", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *CreateFilterInput) SetDatasetGroupArn(v string) *CreateFilterInput { + s.DatasetGroupArn = &v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *CreateFilterInput) SetFilterExpression(v string) *CreateFilterInput { + s.FilterExpression = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateFilterInput) SetName(v string) *CreateFilterInput { + s.Name = &v + return s +} + +type CreateFilterOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the new filter. + FilterArn *string `locationName:"filterArn" type:"string"` +} + +// String returns the string representation +func (s CreateFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateFilterOutput) GoString() string { + return s.String() +} + +// SetFilterArn sets the FilterArn field's value. +func (s *CreateFilterOutput) SetFilterArn(v string) *CreateFilterOutput { + s.FilterArn = &v + return s +} + type CreateSchemaInput struct { _ struct{} `type:"structure"` @@ -7088,30 +7658,30 @@ func (s DeleteEventTrackerOutput) GoString() string { return s.String() } -type DeleteSchemaInput struct { +type DeleteFilterInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the schema to delete. + // The ARN of the filter to delete. // - // SchemaArn is a required field - SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` + // FilterArn is a required field + FilterArn *string `locationName:"filterArn" type:"string" required:"true"` } // String returns the string representation -func (s DeleteSchemaInput) String() string { +func (s DeleteFilterInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSchemaInput) GoString() string { +func (s DeleteFilterInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSchemaInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSchemaInput"} - if s.SchemaArn == nil { - invalidParams.Add(request.NewErrParamRequired("SchemaArn")) +func (s *DeleteFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFilterInput"} + if s.FilterArn == nil { + invalidParams.Add(request.NewErrParamRequired("FilterArn")) } if invalidParams.Len() > 0 { @@ -7120,48 +7690,100 @@ func (s *DeleteSchemaInput) Validate() error { return nil } -// SetSchemaArn sets the SchemaArn field's value. -func (s *DeleteSchemaInput) SetSchemaArn(v string) *DeleteSchemaInput { - s.SchemaArn = &v +// SetFilterArn sets the FilterArn field's value. +func (s *DeleteFilterInput) SetFilterArn(v string) *DeleteFilterInput { + s.FilterArn = &v return s } -type DeleteSchemaOutput struct { +type DeleteFilterOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s DeleteSchemaOutput) String() string { +func (s DeleteFilterOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSchemaOutput) GoString() string { +func (s DeleteFilterOutput) GoString() string { return s.String() } -type DeleteSolutionInput struct { +type DeleteSchemaInput struct { _ struct{} `type:"structure"` - // The ARN of the solution to delete. + // The Amazon Resource Name (ARN) of the schema to delete. // - // SolutionArn is a required field - SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` + // SchemaArn is a required field + SchemaArn *string `locationName:"schemaArn" type:"string" required:"true"` } // String returns the string representation -func (s DeleteSolutionInput) String() string { +func (s DeleteSchemaInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteSolutionInput) GoString() string { +func (s DeleteSchemaInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSolutionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSolutionInput"} +func (s *DeleteSchemaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSchemaInput"} + if s.SchemaArn == nil { + invalidParams.Add(request.NewErrParamRequired("SchemaArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSchemaArn sets the SchemaArn field's value. +func (s *DeleteSchemaInput) SetSchemaArn(v string) *DeleteSchemaInput { + s.SchemaArn = &v + return s +} + +type DeleteSchemaOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSchemaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSchemaOutput) GoString() string { + return s.String() +} + +type DeleteSolutionInput struct { + _ struct{} `type:"structure"` + + // The ARN of the solution to delete. + // + // SolutionArn is a required field + SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSolutionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSolutionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSolutionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSolutionInput"} if s.SolutionArn == nil { invalidParams.Add(request.NewErrParamRequired("SolutionArn")) } @@ -7690,6 +8312,67 @@ func (s *DescribeFeatureTransformationOutput) SetFeatureTransformation(v *Featur return s } +type DescribeFilterInput struct { + _ struct{} `type:"structure"` + + // The ARN of the filter to describe. + // + // FilterArn is a required field + FilterArn *string `locationName:"filterArn" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFilterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeFilterInput"} + if s.FilterArn == nil { + invalidParams.Add(request.NewErrParamRequired("FilterArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilterArn sets the FilterArn field's value. +func (s *DescribeFilterInput) SetFilterArn(v string) *DescribeFilterInput { + s.FilterArn = &v + return s +} + +type DescribeFilterOutput struct { + _ struct{} `type:"structure"` + + // The filter's details. + Filter *Filter `locationName:"filter" type:"structure"` +} + +// String returns the string representation +func (s DescribeFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeFilterOutput) GoString() string { + return s.String() +} + +// SetFilter sets the Filter field's value. +func (s *DescribeFilterOutput) SetFilter(v *Filter) *DescribeFilterOutput { + s.Filter = v + return s +} + type DescribeRecipeInput struct { _ struct{} `type:"structure"` @@ -8171,6 +8854,178 @@ func (s *FeatureTransformation) SetStatus(v string) *FeatureTransformation { return s } +// Contains information on a recommendation filter, including its ARN, status, +// and filter expression. +type Filter struct { + _ struct{} `type:"structure"` + + // The time at which the filter was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The ARN of the dataset group to which the filter belongs. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // If the filter failed, the reason for its failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The ARN of the filter. + FilterArn *string `locationName:"filterArn" type:"string"` + + // Specifies the type of item interactions to filter out of recommendation results. + // The filter expression must follow the following format: + // + // EXCLUDE itemId WHERE INTERACTIONS.event_type in ("EVENT_TYPE") + // + // Where "EVENT_TYPE" is the type of event to filter out. For more information, + // see Using Filters with Amazon Personalize (https://docs.aws.amazon.com/personalize/latest/dg/filters.html). + FilterExpression *string `locationName:"filterExpression" min:"1" type:"string" sensitive:"true"` + + // The time at which the filter was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the filter. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the filter. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *Filter) SetCreationDateTime(v time.Time) *Filter { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *Filter) SetDatasetGroupArn(v string) *Filter { + s.DatasetGroupArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *Filter) SetFailureReason(v string) *Filter { + s.FailureReason = &v + return s +} + +// SetFilterArn sets the FilterArn field's value. +func (s *Filter) SetFilterArn(v string) *Filter { + s.FilterArn = &v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *Filter) SetFilterExpression(v string) *Filter { + s.FilterExpression = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *Filter) SetLastUpdatedDateTime(v time.Time) *Filter { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Filter) SetName(v string) *Filter { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Filter) SetStatus(v string) *Filter { + s.Status = &v + return s +} + +// A short summary of a filter's attributes. +type FilterSummary struct { + _ struct{} `type:"structure"` + + // The time at which the filter was created. + CreationDateTime *time.Time `locationName:"creationDateTime" type:"timestamp"` + + // The ARN of the dataset group to which the filter belongs. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // If the filter failed, the reason for the failure. + FailureReason *string `locationName:"failureReason" type:"string"` + + // The ARN of the filter. + FilterArn *string `locationName:"filterArn" type:"string"` + + // The time at which the filter was last updated. + LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` + + // The name of the filter. + Name *string `locationName:"name" min:"1" type:"string"` + + // The status of the filter. + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s FilterSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterSummary) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *FilterSummary) SetCreationDateTime(v time.Time) *FilterSummary { + s.CreationDateTime = &v + return s +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *FilterSummary) SetDatasetGroupArn(v string) *FilterSummary { + s.DatasetGroupArn = &v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *FilterSummary) SetFailureReason(v string) *FilterSummary { + s.FailureReason = &v + return s +} + +// SetFilterArn sets the FilterArn field's value. +func (s *FilterSummary) SetFilterArn(v string) *FilterSummary { + s.FilterArn = &v + return s +} + +// SetLastUpdatedDateTime sets the LastUpdatedDateTime field's value. +func (s *FilterSummary) SetLastUpdatedDateTime(v time.Time) *FilterSummary { + s.LastUpdatedDateTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *FilterSummary) SetName(v string) *FilterSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *FilterSummary) SetStatus(v string) *FilterSummary { + s.Status = &v + return s +} + type GetSolutionMetricsInput struct { _ struct{} `type:"structure"` @@ -8310,7 +9165,7 @@ type HPOObjective struct { // A regular expression for finding the metric in the training job logs. MetricRegex *string `locationName:"metricRegex" type:"string"` - // The data type of the metric. + // The type of the metric. Valid values are Maximize and Minimize. Type *string `locationName:"type" type:"string"` } @@ -8507,8 +9362,8 @@ func (s *IntegerHyperParameterRange) SetName(v string) *IntegerHyperParameterRan // Provide a valid value for the field or parameter. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8525,17 +9380,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8543,28 +9398,28 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // The token is not valid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8581,17 +9436,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8599,28 +9454,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The limit on the number of requests per second has been exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8637,17 +9492,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8655,22 +9510,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListBatchInferenceJobsInput struct { @@ -9192,6 +10047,93 @@ func (s *ListEventTrackersOutput) SetNextToken(v string) *ListEventTrackersOutpu return s } +type ListFiltersInput struct { + _ struct{} `type:"structure"` + + // The ARN of the dataset group that contains the filters. + DatasetGroupArn *string `locationName:"datasetGroupArn" type:"string"` + + // The maximum number of filters to return. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // A token returned from the previous call to ListFilters for getting the next + // set of filters (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFiltersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListFiltersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListFiltersInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatasetGroupArn sets the DatasetGroupArn field's value. +func (s *ListFiltersInput) SetDatasetGroupArn(v string) *ListFiltersInput { + s.DatasetGroupArn = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListFiltersInput) SetMaxResults(v int64) *ListFiltersInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFiltersInput) SetNextToken(v string) *ListFiltersInput { + s.NextToken = &v + return s +} + +type ListFiltersOutput struct { + _ struct{} `type:"structure"` + + // A list of returned filters. + Filters []*FilterSummary `type:"list"` + + // A token for getting the next set of filters (if they exist). + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListFiltersOutput) GoString() string { + return s.String() +} + +// SetFilters sets the Filters field's value. +func (s *ListFiltersOutput) SetFilters(v []*FilterSummary) *ListFiltersOutput { + s.Filters = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListFiltersOutput) SetNextToken(v string) *ListFiltersOutput { + s.NextToken = &v + return s +} + type ListRecipesInput struct { _ struct{} `type:"structure"` @@ -9699,8 +10641,8 @@ func (s *RecipeSummary) SetStatus(v string) *RecipeSummary { // The specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9717,17 +10659,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9735,28 +10677,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource is in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9773,17 +10715,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9791,28 +10733,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Could not find the specified resource. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9829,17 +10771,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9847,22 +10789,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The configuration details of an Amazon S3 input or output bucket. @@ -10275,6 +11217,10 @@ type SolutionVersion struct { // The UPDATE option can only be used after you've created a solution version // with the FULL option and the training solution uses the native-recipe-hrnn-coldstart. TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` + + // If hyperparameter optimization was performed, contains the hyperparameter + // values of the best performing model. + TunedHPOParams *TunedHPOParams `locationName:"tunedHPOParams" type:"structure"` } // String returns the string representation @@ -10371,6 +11317,12 @@ func (s *SolutionVersion) SetTrainingMode(v string) *SolutionVersion { return s } +// SetTunedHPOParams sets the TunedHPOParams field's value. +func (s *SolutionVersion) SetTunedHPOParams(v *TunedHPOParams) *SolutionVersion { + s.TunedHPOParams = v + return s +} + // Provides a summary of the properties of a solution version. For a complete // listing, call the DescribeSolutionVersion API. type SolutionVersionSummary struct { @@ -10436,6 +11388,31 @@ func (s *SolutionVersionSummary) SetStatus(v string) *SolutionVersionSummary { return s } +// If hyperparameter optimization (HPO) was performed, contains the hyperparameter +// values of the best performing model. +type TunedHPOParams struct { + _ struct{} `type:"structure"` + + // A list of the hyperparameter values of the best performing model. + AlgorithmHyperParameters map[string]*string `locationName:"algorithmHyperParameters" type:"map"` +} + +// String returns the string representation +func (s TunedHPOParams) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TunedHPOParams) GoString() string { + return s.String() +} + +// SetAlgorithmHyperParameters sets the AlgorithmHyperParameters field's value. +func (s *TunedHPOParams) SetAlgorithmHyperParameters(v map[string]*string) *TunedHPOParams { + s.AlgorithmHyperParameters = v + return s +} + type UpdateCampaignInput struct { _ struct{} `type:"structure"` @@ -10444,6 +11421,9 @@ type UpdateCampaignInput struct { // CampaignArn is a required field CampaignArn *string `locationName:"campaignArn" type:"string" required:"true"` + // The configuration details of a campaign. + CampaignConfig *CampaignConfig `locationName:"campaignConfig" type:"structure"` + // Specifies the requested minimum provisioned transactions (recommendations) // per second that Amazon Personalize will support. MinProvisionedTPS *int64 `locationName:"minProvisionedTPS" min:"1" type:"integer"` @@ -10484,6 +11464,12 @@ func (s *UpdateCampaignInput) SetCampaignArn(v string) *UpdateCampaignInput { return s } +// SetCampaignConfig sets the CampaignConfig field's value. +func (s *UpdateCampaignInput) SetCampaignConfig(v *CampaignConfig) *UpdateCampaignInput { + s.CampaignConfig = v + return s +} + // SetMinProvisionedTPS sets the MinProvisionedTPS field's value. func (s *UpdateCampaignInput) SetMinProvisionedTPS(v int64) *UpdateCampaignInput { s.MinProvisionedTPS = &v @@ -10524,6 +11510,13 @@ const ( RecipeProviderService = "SERVICE" ) +// RecipeProvider_Values returns all elements of the RecipeProvider enum +func RecipeProvider_Values() []string { + return []string{ + RecipeProviderService, + } +} + const ( // TrainingModeFull is a TrainingMode enum value TrainingModeFull = "FULL" @@ -10531,3 +11524,11 @@ const ( // TrainingModeUpdate is a TrainingMode enum value TrainingModeUpdate = "UPDATE" ) + +// TrainingMode_Values returns all elements of the TrainingMode enum +func TrainingMode_Values() []string { + return []string{ + TrainingModeFull, + TrainingModeUpdate, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go b/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go index 78f55dbe8..e7febdb08 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go index 76144551b..242834aef 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/api.go @@ -9637,8 +9637,9 @@ func (c *Pinpoint) UpdateEndpointRequest(input *UpdateEndpointInput) (req *reque // // Creates a new endpoint for an application or updates the settings and attributes // of an existing endpoint for an application. You can also use this operation -// to define custom attributes (Attributes, Metrics, and UserAttributes properties) -// for an endpoint. +// to define custom attributes for an endpoint. If an update includes one or +// more values for a custom attribute, Amazon Pinpoint replaces (overwrites) +// any existing values with the new values. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9737,8 +9738,9 @@ func (c *Pinpoint) UpdateEndpointsBatchRequest(input *UpdateEndpointsBatchInput) // // Creates a new batch of endpoints for an application or updates the settings // and attributes of a batch of existing endpoints for an application. You can -// also use this operation to define custom attributes (Attributes, Metrics, -// and UserAttributes properties) for a batch of endpoints. +// also use this operation to define custom attributes for a batch of endpoints. +// If an update includes one or more values for a custom attribute, Amazon Pinpoint +// replaces (overwrites) any existing values with the new values. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9964,6 +9966,9 @@ func (c *Pinpoint) UpdateJourneyRequest(input *UpdateJourneyInput) (req *request // * TooManyRequestsException // Provides information about an API request or response. // +// * ConflictException +// Provides information about an API request or response. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/pinpoint-2016-12-01/UpdateJourney func (c *Pinpoint) UpdateJourney(input *UpdateJourneyInput) (*UpdateJourneyOutput, error) { req, out := c.UpdateJourneyRequest(input) @@ -12567,6 +12572,10 @@ func (s *ActivitiesResponse) SetNextToken(v string) *ActivitiesResponse { type Activity struct { _ struct{} `type:"structure"` + // The settings for a custom message activity. This type of activity calls an + // AWS Lambda function or web hook that sends messages to participants. + CUSTOM *CustomMessageActivity `type:"structure"` + // The settings for a yes/no split activity. This type of activity sends participants // down one of two paths in a journey, based on conditions that you specify. ConditionalSplit *ConditionalSplitActivity `type:"structure"` @@ -12587,11 +12596,19 @@ type Activity struct { // path) in a journey, based on conditions that you specify. MultiCondition *MultiConditionalSplitActivity `type:"structure"` + // The settings for a push notification activity. This type of activity sends + // a push notification to participants. + PUSH *PushMessageActivity `type:"structure"` + // The settings for a random split activity. This type of activity randomly // sends specified percentages of participants down one of as many as five paths // in a journey, based on conditions that you specify. RandomSplit *RandomSplitActivity `type:"structure"` + // The settings for an SMS activity. This type of activity sends a text message + // to participants. + SMS *SMSMessageActivity `type:"structure"` + // The settings for a wait activity. This type of activity waits for a certain // amount of time or until a specific date and time before moving participants // to the next activity in a journey. @@ -12633,6 +12650,12 @@ func (s *Activity) Validate() error { return nil } +// SetCUSTOM sets the CUSTOM field's value. +func (s *Activity) SetCUSTOM(v *CustomMessageActivity) *Activity { + s.CUSTOM = v + return s +} + // SetConditionalSplit sets the ConditionalSplit field's value. func (s *Activity) SetConditionalSplit(v *ConditionalSplitActivity) *Activity { s.ConditionalSplit = v @@ -12663,12 +12686,24 @@ func (s *Activity) SetMultiCondition(v *MultiConditionalSplitActivity) *Activity return s } +// SetPUSH sets the PUSH field's value. +func (s *Activity) SetPUSH(v *PushMessageActivity) *Activity { + s.PUSH = v + return s +} + // SetRandomSplit sets the RandomSplit field's value. func (s *Activity) SetRandomSplit(v *RandomSplitActivity) *Activity { s.RandomSplit = v return s } +// SetSMS sets the SMS field's value. +func (s *Activity) SetSMS(v *SMSMessageActivity) *Activity { + s.SMS = v + return s +} + // SetWait sets the Wait field's value. func (s *Activity) SetWait(v *WaitActivity) *Activity { s.Wait = v @@ -13039,7 +13074,7 @@ type ApplicationDateRangeKpiResponse struct { // that the data was retrieved for. This value describes the associated metric // and consists of two or more terms, which are comprised of lowercase alphanumeric // characters, separated by a hyphen. For a list of possible values, see the - // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // KpiName is a required field KpiName *string `type:"string" required:"true"` @@ -13177,8 +13212,9 @@ type ApplicationSettingsResource struct { // ApplicationId is a required field ApplicationId *string `type:"string" required:"true"` - // The settings for the AWS Lambda function to use by default as a code hook - // for campaigns in the application. + // The settings for the AWS Lambda function to invoke by default as a code hook + // for campaigns in the application. You can use this hook to customize segments + // that are used by campaigns in the application. CampaignHook *CampaignHook `type:"structure"` // The date and time, in ISO 8601 format, when the application's settings were @@ -13188,9 +13224,9 @@ type ApplicationSettingsResource struct { // The default sending limits for campaigns in the application. Limits *CampaignLimits `type:"structure"` - // The default quiet time for campaigns and journeys in the application. Quiet - // time is a specific time range when messages aren't sent to endpoints, if - // all the following conditions are met: + // The default quiet time for campaigns in the application. Quiet time is a + // specific time range when messages aren't sent to endpoints, if all the following + // conditions are met: // // * The EndpointDemographic.Timezone property of the endpoint is set to // a valid value. @@ -13393,8 +13429,8 @@ func (s *AttributesResource) SetAttributes(v []*string) *AttributesResource { // Provides information about an API request or response. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -13413,17 +13449,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13431,22 +13467,22 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the status and settings of the Baidu (Baidu Cloud Push) channel @@ -13833,6 +13869,32 @@ func (s *BaseKpiResult) SetRows(v []*ResultRow) *BaseKpiResult { return s } +// Specifies the contents of a message that's sent through a custom channel +// to recipients of a campaign. +type CampaignCustomMessage struct { + _ struct{} `type:"structure"` + + // The raw, JSON-formatted string to use as the payload for the message. The + // maximum size is 5 KB. + Data *string `type:"string"` +} + +// String returns the string representation +func (s CampaignCustomMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CampaignCustomMessage) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *CampaignCustomMessage) SetData(v string) *CampaignCustomMessage { + s.Data = &v + return s +} + // Provides the results of a query that retrieved the data for a standard metric // that applies to a campaign, and provides information about that query. type CampaignDateRangeKpiResponse struct { @@ -13855,7 +13917,7 @@ type CampaignDateRangeKpiResponse struct { // that the data was retrieved for. This value describes the associated metric // and consists of two or more terms, which are comprised of lowercase alphanumeric // characters, separated by a hyphen. For a list of possible values, see the - // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // KpiName is a required field KpiName *string `type:"string" required:"true"` @@ -14042,15 +14104,25 @@ func (s *CampaignEventFilter) SetFilterType(v string) *CampaignEventFilter { return s } -// Specifies the AWS Lambda function to use as a code hook for a campaign. +// Specifies settings for invoking an AWS Lambda function that customizes a +// segment for a campaign. type CampaignHook struct { _ struct{} `type:"structure"` // The name or Amazon Resource Name (ARN) of the AWS Lambda function that Amazon - // Pinpoint invokes to send messages for a campaign. + // Pinpoint invokes to customize a segment for a campaign. LambdaFunctionName *string `type:"string"` - // Specifies which Lambda mode to use when invoking the AWS Lambda function. + // The mode that Amazon Pinpoint uses to invoke the AWS Lambda function. Possible + // values are: + // + // * FILTER - Invoke the function to customize the segment that's used by + // a campaign. + // + // * DELIVERY - (Deprecated) Previously, invoked the function to send a campaign + // through a custom channel. This functionality is not supported anymore. + // To send a campaign through a custom channel, use the CustomDeliveryConfiguration + // and CampaignCustomMessage objects of the campaign. Mode *string `type:"string" enum:"Mode"` // The web URL that Amazon Pinpoint calls to invoke the AWS Lambda function @@ -14086,12 +14158,16 @@ func (s *CampaignHook) SetWebUrl(v string) *CampaignHook { return s } -// Specifies limits on the messages that a campaign can send. +// For a campaign, specifies limits on the messages that the campaign can send. +// For an application, specifies the default limits for messages that campaigns +// in the application can send. type CampaignLimits struct { _ struct{} `type:"structure"` // The maximum number of messages that a campaign can send to a single endpoint - // during a 24-hour period. The maximum value is 100. + // during a 24-hour period. For an application, this value specifies the default + // limit for the number of messages that campaigns and journeys can send to + // a single endpoint during a 24-hour period. The maximum value is 100. Daily *int64 `type:"integer"` // The maximum amount of time, in seconds, that a campaign can attempt to deliver @@ -14099,12 +14175,15 @@ type CampaignLimits struct { // is 60 seconds. MaximumDuration *int64 `type:"integer"` - // The maximum number of messages that a campaign can send each second. The - // minimum value is 50. The maximum value is 20,000. + // The maximum number of messages that a campaign can send each second. For + // an application, this value specifies the default limit for the number of + // messages that campaigns can send each second. The minimum value is 50. The + // maximum value is 20,000. MessagesPerSecond *int64 `type:"integer"` // The maximum number of messages that a campaign can send to a single endpoint - // during the course of the campaign. The maximum value is 100. + // during the course of the campaign. If a campaign recurs, this setting applies + // to all runs of the campaign. The maximum value is 100. Total *int64 `type:"integer"` } @@ -14166,8 +14245,12 @@ type CampaignResponse struct { // CreationDate is a required field CreationDate *string `type:"string" required:"true"` + // The delivery configuration settings for sending the campaign through a custom + // channel. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // The current status of the campaign's default treatment. This value exists - // only for campaigns that have more than one treatment, to support A/B testing. + // only for campaigns that have more than one treatment. DefaultState *CampaignState `type:"structure"` // The custom description of the campaign. @@ -14178,6 +14261,7 @@ type CampaignResponse struct { HoldoutPercent *int64 `type:"integer"` // The settings for the AWS Lambda function to use as a code hook for the campaign. + // You can use this hook to customize the segment that's used by the campaign. Hook *CampaignHook `type:"structure"` // The unique identifier for the campaign. @@ -14227,11 +14311,12 @@ type CampaignResponse struct { // The message template that’s used for the campaign. TemplateConfiguration *TemplateConfiguration `type:"structure"` - // The custom description of a variation of the campaign that's used for A/B - // testing. + // The custom description of the default treatment for the campaign. TreatmentDescription *string `type:"string"` - // The custom name of a variation of the campaign that's used for A/B testing. + // The custom name of the default treatment for the campaign, if the campaign + // has multiple treatments. A treatment is a variation of a campaign that's + // used for A/B testing. TreatmentName *string `type:"string"` // The version number of the campaign. @@ -14272,6 +14357,12 @@ func (s *CampaignResponse) SetCreationDate(v string) *CampaignResponse { return s } +// SetCustomDeliveryConfiguration sets the CustomDeliveryConfiguration field's value. +func (s *CampaignResponse) SetCustomDeliveryConfiguration(v *CustomDeliveryConfiguration) *CampaignResponse { + s.CustomDeliveryConfiguration = v + return s +} + // SetDefaultState sets the DefaultState field's value. func (s *CampaignResponse) SetDefaultState(v *CampaignState) *CampaignResponse { s.DefaultState = v @@ -14394,10 +14485,10 @@ type CampaignSmsMessage struct { // The body of the SMS message. Body *string `type:"string"` - // The type of SMS message. Valid values are: TRANSACTIONAL, the message is - // critical or time-sensitive, such as a one-time password that supports a customer - // transaction; and, PROMOTIONAL, the message isn't critical or time-sensitive, - // such as a marketing message. + // The SMS message type. Valid values are TRANSACTIONAL (for messages that are + // critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL + // (for messsages that aren't critical or time-sensitive, such as marketing + // messages). MessageType *string `type:"string" enum:"MessageType"` // The sender ID to display on recipients' devices when they receive the SMS @@ -14438,9 +14529,12 @@ type CampaignState struct { _ struct{} `type:"structure"` // The current status of the campaign, or the current status of a treatment - // that belongs to an A/B test campaign. If a campaign uses A/B testing, the - // campaign has a status of COMPLETED only if all campaign treatments have a - // status of COMPLETED. + // that belongs to an A/B test campaign. + // + // If a campaign uses A/B testing, the campaign has a status of COMPLETED only + // if all campaign treatments have a status of COMPLETED. If you delete the + // segment that's associated with a campaign, the campaign fails and has a status + // of DELETED. CampaignStatus *string `type:"string" enum:"CampaignStatus"` } @@ -14682,6 +14776,12 @@ func (s *Condition) SetOperator(v string) *Condition { // Specifies the settings for a yes/no split activity in a journey. This type // of activity sends participants down one of two paths in a journey, based // on conditions that you specify. +// +// To create yes/no split activities that send participants down different paths +// based on push notification events (such as Open or Received events), your +// mobile app has to specify the User ID and Endpoint ID values. For more information, +// see Integrating Amazon Pinpoint with your application (https://docs.aws.amazon.com/pinpoint/latest/developerguide/integrate.html) +// in the Amazon Pinpoint Developer Guide. type ConditionalSplitActivity struct { _ struct{} `type:"structure"` @@ -14751,6 +14851,64 @@ func (s *ConditionalSplitActivity) SetTrueActivity(v string) *ConditionalSplitAc return s } +// Provides information about an API request or response. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + RequestID_ *string `locationName:"RequestID" type:"string"` +} + +// String returns the string representation +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + type CreateAppInput struct { _ struct{} `type:"structure" payload:"CreateApplicationRequest"` @@ -15378,14 +15536,15 @@ type CreateRecommenderConfiguration struct { _ struct{} `type:"structure"` // A map of key-value pairs that defines 1-10 custom endpoint or user attributes, - // depending on the value for the RecommenderUserIdType property. Each of these - // attributes temporarily stores a recommended item that's retrieved from the - // recommender model and sent to an AWS Lambda function for additional processing. - // Each attribute can be used as a message variable in a message template. + // depending on the value for the RecommendationProviderIdType property. Each + // of these attributes temporarily stores a recommended item that's retrieved + // from the recommender model and sent to an AWS Lambda function for additional + // processing. Each attribute can be used as a message variable in a message + // template. // // In the map, the key is the name of a custom attribute and the value is a // custom display name for that attribute. The display name appears in the Attribute - // finder pane of the template editor on the Amazon Pinpoint console. The following + // finder of the template editor on the Amazon Pinpoint console. The following // restrictions apply to these names: // // * An attribute name must start with a letter or number and it can contain @@ -15397,12 +15556,13 @@ type CreateRecommenderConfiguration struct { // spaces, underscores (_), or hyphens (-). // // This object is required if the configuration invokes an AWS Lambda function - // (LambdaFunctionArn) to process recommendation data. Otherwise, don't include - // this object in your request. + // (RecommendationTransformerUri) to process recommendation data. Otherwise, + // don't include this object in your request. Attributes map[string]*string `type:"map"` // A custom description of the configuration for the recommender model. The - // description can contain up to 128 characters. + // description can contain up to 128 characters. The characters can be letters, + // numbers, spaces, or the following symbols: _ ; () , ‐. Description *string `type:"string"` // A custom name of the configuration for the recommender model. The name must @@ -15422,7 +15582,7 @@ type CreateRecommenderConfiguration struct { // * PINPOINT_USER_ID - Associate each user in the model with a particular // user and endpoint in Amazon Pinpoint. The data is correlated based on // user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition - // in Amazon Pinpoint has to specify a both a user ID (UserId) and an endpoint + // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint // ID. Otherwise, messages won’t be sent to the user's endpoint. RecommendationProviderIdType *string `type:"string"` @@ -15445,26 +15605,26 @@ type CreateRecommenderConfiguration struct { RecommendationTransformerUri *string `type:"string"` // A custom display name for the standard endpoint or user attribute (RecommendationItems) - // that temporarily stores a recommended item for each endpoint or user, depending - // on the value for the RecommenderUserIdType property. This value is required - // if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) + // that temporarily stores recommended items for each endpoint or user, depending + // on the value for the RecommendationProviderIdType property. This value is + // required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) // to perform additional processing of recommendation data. // - // This name appears in the Attribute finder pane of the template editor on - // the Amazon Pinpoint console. The name can contain up to 25 characters. The - // characters can be letters, numbers, spaces, underscores (_), or hyphens (-). - // These restrictions don't apply to attribute values. + // This name appears in the Attribute finder of the template editor on the Amazon + // Pinpoint console. The name can contain up to 25 characters. The characters + // can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions + // don't apply to attribute values. RecommendationsDisplayName *string `type:"string"` // The number of recommended items to retrieve from the model for each endpoint - // or user, depending on the value for the RecommenderUserIdType property. This - // number determines how many recommended attributes are available for use as - // message variables in message templates. The minimum value is 1. The maximum - // value is 5. The default value is 5. + // or user, depending on the value for the RecommendationProviderIdType property. + // This number determines how many recommended items are available for use in + // message variables. The minimum value is 1. The maximum value is 5. The default + // value is 5. // // To use multiple recommended items and custom attributes with message variables, - // you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional - // processing of recommendation data. + // you have to use an AWS Lambda function (RecommendationTransformerUri) to + // perform additional processing of recommendation data. RecommendationsPerMessage *int64 `type:"integer"` } @@ -15905,6 +16065,158 @@ func (s *CreateVoiceTemplateOutput) SetCreateTemplateMessageBody(v *CreateTempla return s } +// Specifies the delivery configuration settings for sending a campaign or campaign +// treatment through a custom channel. This object is required if you use the +// CampaignCustomMessage object to define the message to send for the campaign +// or campaign treatment. +type CustomDeliveryConfiguration struct { + _ struct{} `type:"structure"` + + // The destination to send the campaign or treatment to. This value can be one + // of the following: + // + // * The name or Amazon Resource Name (ARN) of an AWS Lambda function to + // invoke to handle delivery of the campaign or treatment. + // + // * The URL for a web application or service that supports HTTPS and can + // receive the message. The URL has to be a full URL, including the HTTPS + // protocol. + // + // DeliveryUri is a required field + DeliveryUri *string `type:"string" required:"true"` + + // The types of endpoints to send the campaign or treatment to. Each valid value + // maps to a type of channel that you can associate with an endpoint by using + // the ChannelType property of an endpoint. + EndpointTypes []*string `type:"list"` +} + +// String returns the string representation +func (s CustomDeliveryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomDeliveryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomDeliveryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomDeliveryConfiguration"} + if s.DeliveryUri == nil { + invalidParams.Add(request.NewErrParamRequired("DeliveryUri")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeliveryUri sets the DeliveryUri field's value. +func (s *CustomDeliveryConfiguration) SetDeliveryUri(v string) *CustomDeliveryConfiguration { + s.DeliveryUri = &v + return s +} + +// SetEndpointTypes sets the EndpointTypes field's value. +func (s *CustomDeliveryConfiguration) SetEndpointTypes(v []*string) *CustomDeliveryConfiguration { + s.EndpointTypes = v + return s +} + +// The settings for a custom message activity. This type of activity calls an +// AWS Lambda function or web hook that sends messages to participants. +type CustomMessageActivity struct { + _ struct{} `type:"structure"` + + // The destination to send the campaign or treatment to. This value can be one + // of the following: + // + // * The name or Amazon Resource Name (ARN) of an AWS Lambda function to + // invoke to handle delivery of the campaign or treatment. + // + // * The URL for a web application or service that supports HTTPS and can + // receive the message. The URL has to be a full URL, including the HTTPS + // protocol. + DeliveryUri *string `type:"string"` + + // The types of endpoints to send the custom message to. Each valid value maps + // to a type of channel that you can associate with an endpoint by using the + // ChannelType property of an endpoint. + EndpointTypes []*string `type:"list"` + + // Specifies the message data included in a custom channel message that's sent + // to participants in a journey. + MessageConfig *JourneyCustomMessage `type:"structure"` + + // The unique identifier for the next activity to perform, after Amazon Pinpoint + // calls the AWS Lambda function or web hook. + NextActivity *string `type:"string"` + + // The name of the custom message template to use for the message. If specified, + // this value must match the name of an existing message template. + TemplateName *string `type:"string"` + + // The unique identifier for the version of the message template to use for + // the message. If specified, this value must match the identifier for an existing + // template version. To retrieve a list of versions and version identifiers + // for a template, use the Template Versions resource. + // + // If you don't specify a value for this property, Amazon Pinpoint uses the + // active version of the template. The active version is typically the version + // of a template that's been most recently reviewed and approved for use, depending + // on your workflow. It isn't necessarily the latest version of a template. + TemplateVersion *string `type:"string"` +} + +// String returns the string representation +func (s CustomMessageActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomMessageActivity) GoString() string { + return s.String() +} + +// SetDeliveryUri sets the DeliveryUri field's value. +func (s *CustomMessageActivity) SetDeliveryUri(v string) *CustomMessageActivity { + s.DeliveryUri = &v + return s +} + +// SetEndpointTypes sets the EndpointTypes field's value. +func (s *CustomMessageActivity) SetEndpointTypes(v []*string) *CustomMessageActivity { + s.EndpointTypes = v + return s +} + +// SetMessageConfig sets the MessageConfig field's value. +func (s *CustomMessageActivity) SetMessageConfig(v *JourneyCustomMessage) *CustomMessageActivity { + s.MessageConfig = v + return s +} + +// SetNextActivity sets the NextActivity field's value. +func (s *CustomMessageActivity) SetNextActivity(v string) *CustomMessageActivity { + s.NextActivity = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *CustomMessageActivity) SetTemplateName(v string) *CustomMessageActivity { + s.TemplateName = &v + return s +} + +// SetTemplateVersion sets the TemplateVersion field's value. +func (s *CustomMessageActivity) SetTemplateVersion(v string) *CustomMessageActivity { + s.TemplateVersion = &v + return s +} + // Specifies the default message for all channels. type DefaultMessage struct { _ struct{} `type:"structure"` @@ -17765,8 +18077,8 @@ func (s *DirectMessageConfiguration) SetVoiceMessage(v *VoiceMessage) *DirectMes type EmailChannelRequest struct { _ struct{} `type:"structure"` - // The configuration set that you want to apply to email that you send through - // the channel by using the Amazon Pinpoint Email API (emailAPIreference.html). + // The Amazon SES configuration set (https://docs.aws.amazon.com/ses/latest/APIReference/API_ConfigurationSet.html) + // that you want to apply to messages that you send through the channel. ConfigurationSet *string `type:"string"` // Specifies whether to enable the email channel for the application. @@ -17855,8 +18167,8 @@ type EmailChannelResponse struct { // to. ApplicationId *string `type:"string"` - // The configuration set that's applied to email that's sent through the channel - // by using the Amazon Pinpoint Email API (emailAPIreference.html). + // The Amazon SES configuration set (https://docs.aws.amazon.com/ses/latest/APIReference/API_ConfigurationSet.html) + // that's applied to messages that are sent through the channel. ConfigurationSet *string `type:"string"` // The date and time, in ISO 8601 format, when the email channel was enabled. @@ -17865,7 +18177,7 @@ type EmailChannelResponse struct { // Specifies whether the email channel is enabled for the application. Enabled *bool `type:"boolean"` - // The verified email address that you send email from when you send email through + // The verified email address that email is sent from when you send email through // the channel. FromAddress *string `type:"string"` @@ -17877,8 +18189,7 @@ type EmailChannelResponse struct { Id *string `type:"string"` // The Amazon Resource Name (ARN) of the identity, verified with Amazon Simple - // Email Service (Amazon SES), that you use when you send email through the - // channel. + // Email Service (Amazon SES), that's used when you send email through the channel. Identity *string `type:"string"` // Specifies whether the email channel is archived. @@ -17890,7 +18201,7 @@ type EmailChannelResponse struct { // The date and time, in ISO 8601 format, when the email channel was last modified. LastModifiedDate *string `type:"string"` - // The maximum number of emails that you can send through the channel each second. + // The maximum number of emails that can be sent through the channel each second. MessagesPerSecond *int64 `type:"integer"` // The type of messaging or notification platform for the channel. For the email @@ -18095,14 +18406,16 @@ func (s *EmailMessage) SetSubstitutions(v map[string][]*string) *EmailMessage { type EmailMessageActivity struct { _ struct{} `type:"structure"` - // The "From" address to use for the message. + // Specifies the sender address for an email message that's sent to participants + // in the journey. MessageConfig *JourneyEmailMessage `type:"structure"` // The unique identifier for the next activity to perform, after the message // is sent. NextActivity *string `type:"string"` - // The name of the email template to use for the message. + // The name of the email message template to use for the message. If specified, + // this value must match the name of an existing message template. TemplateName *string `type:"string"` // The unique identifier for the version of the email template to use for the @@ -18466,8 +18779,8 @@ type EndpointBatchItem struct { // The unique identifier for the request to create or update the endpoint. RequestId *string `type:"string"` - // One or more custom user attributes that describe the user who's associated - // with the endpoint. + // One or more custom attributes that describe the user who's associated with + // the endpoint. User *EndpointUser `type:"structure"` } @@ -18954,8 +19267,8 @@ type EndpointRequest struct { // The unique identifier for the most recent request to update the endpoint. RequestId *string `type:"string"` - // One or more custom user attributes that describe the user who's associated - // with the endpoint. + // One or more custom attributes that describe the user who's associated with + // the endpoint. User *EndpointUser `type:"structure"` } @@ -19492,9 +19805,7 @@ type EventCondition struct { _ struct{} `type:"structure"` // The dimensions for the event filter to use for the activity. - // - // Dimensions is a required field - Dimensions *EventDimensions `type:"structure" required:"true"` + Dimensions *EventDimensions `type:"structure"` // The message identifier (message_id) for the message to use when determining // whether message events meet the condition. @@ -19514,9 +19825,6 @@ func (s EventCondition) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *EventCondition) Validate() error { invalidParams := request.ErrInvalidParams{Context: "EventCondition"} - if s.Dimensions == nil { - invalidParams.Add(request.NewErrParamRequired("Dimensions")) - } if s.Dimensions != nil { if err := s.Dimensions.Validate(); err != nil { invalidParams.AddNested("Dimensions", err.(request.ErrInvalidParams)) @@ -19628,6 +19936,69 @@ func (s *EventDimensions) SetMetrics(v map[string]*MetricDimension) *EventDimens return s } +// Specifies the settings for an event that causes a campaign to be sent or +// a journey activity to be performed. +type EventFilter struct { + _ struct{} `type:"structure"` + + // The dimensions for the event filter to use for the campaign or the journey + // activity. + // + // Dimensions is a required field + Dimensions *EventDimensions `type:"structure" required:"true"` + + // The type of event that causes the campaign to be sent or the journey activity + // to be performed. Valid values are: SYSTEM, sends the campaign or performs + // the activity when a system event occurs; and, ENDPOINT, sends the campaign + // or performs the activity when an endpoint event (Events resource) occurs. + // + // FilterType is a required field + FilterType *string `type:"string" required:"true" enum:"FilterType"` +} + +// String returns the string representation +func (s EventFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EventFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventFilter"} + if s.Dimensions == nil { + invalidParams.Add(request.NewErrParamRequired("Dimensions")) + } + if s.FilterType == nil { + invalidParams.Add(request.NewErrParamRequired("FilterType")) + } + if s.Dimensions != nil { + if err := s.Dimensions.Validate(); err != nil { + invalidParams.AddNested("Dimensions", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDimensions sets the Dimensions field's value. +func (s *EventFilter) SetDimensions(v *EventDimensions) *EventFilter { + s.Dimensions = v + return s +} + +// SetFilterType sets the FilterType field's value. +func (s *EventFilter) SetFilterType(v string) *EventFilter { + s.FilterType = &v + return s +} + // Provides the status code and message that result from processing an event. type EventItemResponse struct { _ struct{} `type:"structure"` @@ -19664,32 +20035,80 @@ func (s *EventItemResponse) SetStatusCode(v int64) *EventItemResponse { return s } -// Specifies settings for publishing event data to an Amazon Kinesis data stream -// or an Amazon Kinesis Data Firehose delivery stream. -type EventStream struct { +// Specifies the settings for an event that causes a journey activity to start. +type EventStartCondition struct { _ struct{} `type:"structure"` - // The unique identifier for the application to publish event data for. - // - // ApplicationId is a required field - ApplicationId *string `type:"string" required:"true"` + // Specifies the settings for an event that causes a campaign to be sent or + // a journey activity to be performed. + EventFilter *EventFilter `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon - // Kinesis Data Firehose delivery stream to publish event data to. - // - // For a Kinesis data stream, the ARN format is: arn:aws:kinesis:region:account-id:stream/stream_name - // - // For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:region:account-id:deliverystream/stream_name - // - // DestinationStreamArn is a required field - DestinationStreamArn *string `type:"string" required:"true"` + SegmentId *string `type:"string"` +} - // (Deprecated) Your AWS account ID, which you assigned to an external ID key - // in an IAM trust policy. Amazon Pinpoint previously used this value to assume - // an IAM role when publishing event data, but we removed this requirement. - // We don't recommend use of external IDs for IAM roles that are assumed by - // Amazon Pinpoint. - ExternalId *string `type:"string"` +// String returns the string representation +func (s EventStartCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EventStartCondition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EventStartCondition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EventStartCondition"} + if s.EventFilter != nil { + if err := s.EventFilter.Validate(); err != nil { + invalidParams.AddNested("EventFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventFilter sets the EventFilter field's value. +func (s *EventStartCondition) SetEventFilter(v *EventFilter) *EventStartCondition { + s.EventFilter = v + return s +} + +// SetSegmentId sets the SegmentId field's value. +func (s *EventStartCondition) SetSegmentId(v string) *EventStartCondition { + s.SegmentId = &v + return s +} + +// Specifies settings for publishing event data to an Amazon Kinesis data stream +// or an Amazon Kinesis Data Firehose delivery stream. +type EventStream struct { + _ struct{} `type:"structure"` + + // The unique identifier for the application to publish event data for. + // + // ApplicationId is a required field + ApplicationId *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon Kinesis data stream or Amazon + // Kinesis Data Firehose delivery stream to publish event data to. + // + // For a Kinesis data stream, the ARN format is: arn:aws:kinesis:region:account-id:stream/stream_name + // + // For a Kinesis Data Firehose delivery stream, the ARN format is: arn:aws:firehose:region:account-id:deliverystream/stream_name + // + // DestinationStreamArn is a required field + DestinationStreamArn *string `type:"string" required:"true"` + + // (Deprecated) Your AWS account ID, which you assigned to an external ID key + // in an IAM trust policy. Amazon Pinpoint previously used this value to assume + // an IAM role when publishing event data, but we removed this requirement. + // We don't recommend use of external IDs for IAM roles that are assumed by + // Amazon Pinpoint. + ExternalId *string `type:"string"` // The date, in ISO 8601 format, when the event stream was last modified. LastModifiedDate *string `type:"string"` @@ -20234,8 +20653,8 @@ func (s *ExportJobsResponse) SetNextToken(v string) *ExportJobsResponse { // Provides information about an API request or response. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -20254,17 +20673,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -20272,22 +20691,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the status and settings of the GCM channel for an application. @@ -24810,8 +25229,8 @@ func (s *ImportJobsResponse) SetNextToken(v string) *ImportJobsResponse { // Provides information about an API request or response. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -24830,17 +25249,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -24848,22 +25267,22 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Provides information about the results of a request to create or update an @@ -24902,6 +25321,31 @@ func (s *ItemResponse) SetEventsItemResponse(v map[string]*EventItemResponse) *I return s } +// Specifies the message content for a custom channel message that's sent to +// participants in a journey. +type JourneyCustomMessage struct { + _ struct{} `type:"structure"` + + // The message content that's passed to an AWS Lambda function or to a web hook. + Data *string `type:"string"` +} + +// String returns the string representation +func (s JourneyCustomMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JourneyCustomMessage) GoString() string { + return s.String() +} + +// SetData sets the Data field's value. +func (s *JourneyCustomMessage) SetData(v string) *JourneyCustomMessage { + s.Data = &v + return s +} + // Provides the results of a query that retrieved the data for a standard engagement // metric that applies to a journey, and provides information about that query. type JourneyDateRangeKpiResponse struct { @@ -24924,7 +25368,7 @@ type JourneyDateRangeKpiResponse struct { // that the data was retrieved for. This value describes the associated metric // and consists of two or more terms, which are comprised of lowercase alphanumeric // characters, separated by a hyphen. For a list of possible values, see the - // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // KpiName is a required field KpiName *string `type:"string" required:"true"` @@ -25077,7 +25521,7 @@ type JourneyExecutionActivityMetricsResponse struct { // A JSON object that contains the results of the query. The results vary depending // on the type of activity (ActivityType). For information about the structure - // and contents of the results, see the Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // and contents of the results, see the Amazon Pinpoint Developer Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/analytics-standard-metrics.html). // // Metrics is a required field Metrics map[string]*string `type:"map" required:"true"` @@ -25152,7 +25596,7 @@ type JourneyExecutionMetricsResponse struct { // A JSON object that contains the results of the query. For information about // the structure and contents of the results, see the Amazon Pinpoint Developer - // Guide (https://docs.aws.amazon.com/pinpoint/latest/developerguide/welcome.html). + // Guide (https://docs.aws.amazon.com//pinpoint/latest/developerguide/analytics-standard-metrics.html). // // Metrics is a required field Metrics map[string]*string `type:"map" required:"true"` @@ -25238,6 +25682,39 @@ func (s *JourneyLimits) SetMessagesPerSecond(v int64) *JourneyLimits { return s } +// Specifies the message configuration for a push notification that's sent to +// participants in a journey. +type JourneyPushMessage struct { + _ struct{} `type:"structure"` + + // The number of seconds that the push notification service should keep the + // message, if the service is unable to deliver the notification the first time. + // This value is converted to an expiration value when it's sent to a push-notification + // service. If this value is 0, the service treats the notification as if it + // expires immediately and the service doesn't store or try to deliver the notification + // again. + // + // This value doesn't apply to messages that are sent through the Amazon Device + // Messaging (ADM) service. + TimeToLive *string `type:"string"` +} + +// String returns the string representation +func (s JourneyPushMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JourneyPushMessage) GoString() string { + return s.String() +} + +// SetTimeToLive sets the TimeToLive field's value. +func (s *JourneyPushMessage) SetTimeToLive(v string) *JourneyPushMessage { + s.TimeToLive = &v + return s +} + // Provides information about the status, configuration, and other settings // for a journey. type JourneyResponse struct { @@ -25436,6 +25913,46 @@ func (s *JourneyResponse) SetTags(v map[string]*string) *JourneyResponse { return s } +// Specifies the sender ID and message type for an SMS message that's sent to +// participants in a journey. +type JourneySMSMessage struct { + _ struct{} `type:"structure"` + + // The SMS message type. Valid values are TRANSACTIONAL (for messages that are + // critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL + // (for messsages that aren't critical or time-sensitive, such as marketing + // messages). + MessageType *string `type:"string" enum:"MessageType"` + + // The sender ID to display as the sender of the message on a recipient's device. + // Support for sender IDs varies by country or region. For more information, + // see Supported Countries and Regions (https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-sms-countries.html) + // in the Amazon Pinpoint User Guide. + SenderId *string `type:"string"` +} + +// String returns the string representation +func (s JourneySMSMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JourneySMSMessage) GoString() string { + return s.String() +} + +// SetMessageType sets the MessageType field's value. +func (s *JourneySMSMessage) SetMessageType(v string) *JourneySMSMessage { + s.MessageType = &v + return s +} + +// SetSenderId sets the SenderId field's value. +func (s *JourneySMSMessage) SetSenderId(v string) *JourneySMSMessage { + s.SenderId = &v + return s +} + // Specifies the schedule settings for a journey. type JourneySchedule struct { _ struct{} `type:"structure"` @@ -26092,31 +26609,38 @@ type MessageConfiguration struct { _ struct{} `type:"structure"` // The message that the campaign sends through the ADM (Amazon Device Messaging) - // channel. This message overrides the default message. + // channel. If specified, this message overrides the default message. ADMMessage *Message `type:"structure"` // The message that the campaign sends through the APNs (Apple Push Notification - // service) channel. This message overrides the default message. + // service) channel. If specified, this message overrides the default message. APNSMessage *Message `type:"structure"` // The message that the campaign sends through the Baidu (Baidu Cloud Push) - // channel. This message overrides the default message. + // channel. If specified, this message overrides the default message. BaiduMessage *Message `type:"structure"` + // The message that the campaign sends through a custom channel, as specified + // by the delivery configuration (CustomDeliveryConfiguration) settings for + // the campaign. If specified, this message overrides the default message. + CustomMessage *CampaignCustomMessage `type:"structure"` + // The default message that the campaign sends through all the channels that // are configured for the campaign. DefaultMessage *Message `type:"structure"` - // The message that the campaign sends through the email channel. + // The message that the campaign sends through the email channel. If specified, + // this message overrides the default message. EmailMessage *CampaignEmailMessage `type:"structure"` // The message that the campaign sends through the GCM channel, which enables // Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging - // (FCM), formerly Google Cloud Messaging (GCM), service. This message overrides - // the default message. + // (FCM), formerly Google Cloud Messaging (GCM), service. If specified, this + // message overrides the default message. GCMMessage *Message `type:"structure"` - // The message that the campaign sends through the SMS channel. + // The message that the campaign sends through the SMS channel. If specified, + // this message overrides the default message. SMSMessage *CampaignSmsMessage `type:"structure"` } @@ -26148,6 +26672,12 @@ func (s *MessageConfiguration) SetBaiduMessage(v *Message) *MessageConfiguration return s } +// SetCustomMessage sets the CustomMessage field's value. +func (s *MessageConfiguration) SetCustomMessage(v *CampaignCustomMessage) *MessageConfiguration { + s.CustomMessage = v + return s +} + // SetDefaultMessage sets the DefaultMessage field's value. func (s *MessageConfiguration) SetDefaultMessage(v *Message) *MessageConfiguration { s.DefaultMessage = v @@ -26177,8 +26707,9 @@ type MessageRequest struct { _ struct{} `type:"structure"` // A map of key-value pairs, where each key is an address and each value is - // an AddressConfiguration object. An address can be a push notification token, - // a phone number, or an email address. You can use an AddressConfiguration + // an AddressConfiguration (https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-messages.html#apps-application-id-messages-model-addressconfiguration) + // object. An address can be a push notification token, a phone number, or an + // email address. You can use an AddressConfiguration (https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-messages.html#apps-application-id-messages-model-addressconfiguration) // object to tailor the message for an address by specifying settings such as // content overrides and message variables. Addresses map[string]*AddressConfiguration `type:"map"` @@ -26189,7 +26720,8 @@ type MessageRequest struct { Context map[string]*string `type:"map"` // A map of key-value pairs, where each key is an endpoint ID and each value - // is an EndpointSendConfiguration object. You can use an EndpointSendConfiguration + // is an EndpointSendConfiguration (https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-messages.html#apps-application-id-messages-model-endpointsendconfiguration) + // object. You can use an EndpointSendConfiguration (https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-messages.html#apps-application-id-messages-model-endpointsendconfiguration) // object to tailor the message for an endpoint by specifying settings such // as content overrides and message variables. Endpoints map[string]*EndpointSendConfiguration `type:"map"` @@ -26418,8 +26950,8 @@ func (s *MessageResult) SetUpdatedToken(v string) *MessageResult { // Provides information about an API request or response. type MethodNotAllowedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -26438,17 +26970,17 @@ func (s MethodNotAllowedException) GoString() string { func newErrorMethodNotAllowedException(v protocol.ResponseMetadata) error { return &MethodNotAllowedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MethodNotAllowedException) Code() string { +func (s *MethodNotAllowedException) Code() string { return "MethodNotAllowedException" } // Message returns the exception's message. -func (s MethodNotAllowedException) Message() string { +func (s *MethodNotAllowedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26456,22 +26988,22 @@ func (s MethodNotAllowedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MethodNotAllowedException) OrigErr() error { +func (s *MethodNotAllowedException) OrigErr() error { return nil } -func (s MethodNotAllowedException) Error() string { +func (s *MethodNotAllowedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s MethodNotAllowedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MethodNotAllowedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MethodNotAllowedException) RequestID() string { - return s.respMetadata.RequestID +func (s *MethodNotAllowedException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies metric-based criteria for including or excluding endpoints from @@ -26582,6 +27114,12 @@ func (s *MultiConditionalBranch) SetNextActivity(v string) *MultiConditionalBran // Specifies the settings for a multivariate split activity in a journey. This // type of activity sends participants down one of as many as five paths (including // a default Else path) in a journey, based on conditions that you specify. +// +// To create multivariate split activities that send participants down different +// paths based on push notification events (such as Open or Received events), +// your mobile app has to specify the User ID and Endpoint ID values. For more +// information, see Integrating Amazon Pinpoint with your application (https://docs.aws.amazon.com/pinpoint/latest/developerguide/integrate.html) +// in the Amazon Pinpoint Developer Guide. type MultiConditionalSplitActivity struct { _ struct{} `type:"structure"` @@ -26648,8 +27186,8 @@ func (s *MultiConditionalSplitActivity) SetEvaluationWaitTime(v *WaitTime) *Mult // Provides information about an API request or response. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -26668,17 +27206,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26686,22 +27224,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies a phone number to validate and retrieve information about. @@ -26893,8 +27431,8 @@ func (s *NumberValidateResponse) SetZipCode(v string) *NumberValidateResponse { // Provides information about an API request or response. type PayloadTooLargeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -26913,17 +27451,17 @@ func (s PayloadTooLargeException) GoString() string { func newErrorPayloadTooLargeException(v protocol.ResponseMetadata) error { return &PayloadTooLargeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PayloadTooLargeException) Code() string { +func (s *PayloadTooLargeException) Code() string { return "PayloadTooLargeException" } // Message returns the exception's message. -func (s PayloadTooLargeException) Message() string { +func (s *PayloadTooLargeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26931,22 +27469,22 @@ func (s PayloadTooLargeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PayloadTooLargeException) OrigErr() error { +func (s *PayloadTooLargeException) OrigErr() error { return nil } -func (s PayloadTooLargeException) Error() string { +func (s *PayloadTooLargeException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s PayloadTooLargeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PayloadTooLargeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PayloadTooLargeException) RequestID() string { - return s.respMetadata.RequestID +func (s *PayloadTooLargeException) RequestID() string { + return s.RespMetadata.RequestID } type PhoneNumberValidateInput struct { @@ -27144,6 +27682,69 @@ func (s *PublicEndpoint) SetUser(v *EndpointUser) *PublicEndpoint { return s } +// Specifies the settings for a push notification activity in a journey. This +// type of activity sends a push notification to participants. +type PushMessageActivity struct { + _ struct{} `type:"structure"` + + // Specifies the time to live (TTL) value for push notifications that are sent + // to participants in a journey. + MessageConfig *JourneyPushMessage `type:"structure"` + + // The unique identifier for the next activity to perform, after the message + // is sent. + NextActivity *string `type:"string"` + + // The name of the push notification template to use for the message. If specified, + // this value must match the name of an existing message template. + TemplateName *string `type:"string"` + + // The unique identifier for the version of the push notification template to + // use for the message. If specified, this value must match the identifier for + // an existing template version. To retrieve a list of versions and version + // identifiers for a template, use the Template Versions resource. + // + // If you don't specify a value for this property, Amazon Pinpoint uses the + // active version of the template. The active version is typically the version + // of a template that's been most recently reviewed and approved for use, depending + // on your workflow. It isn't necessarily the latest version of a template. + TemplateVersion *string `type:"string"` +} + +// String returns the string representation +func (s PushMessageActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PushMessageActivity) GoString() string { + return s.String() +} + +// SetMessageConfig sets the MessageConfig field's value. +func (s *PushMessageActivity) SetMessageConfig(v *JourneyPushMessage) *PushMessageActivity { + s.MessageConfig = v + return s +} + +// SetNextActivity sets the NextActivity field's value. +func (s *PushMessageActivity) SetNextActivity(v string) *PushMessageActivity { + s.NextActivity = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *PushMessageActivity) SetTemplateName(v string) *PushMessageActivity { + s.TemplateName = &v + return s +} + +// SetTemplateVersion sets the TemplateVersion field's value. +func (s *PushMessageActivity) SetTemplateVersion(v string) *PushMessageActivity { + s.TemplateVersion = &v + return s +} + // Specifies the content and settings for a message template that can be used // in messages that are sent through a push notification channel. type PushNotificationTemplateRequest struct { @@ -27800,13 +28401,14 @@ type RecommenderConfigurationResponse struct { _ struct{} `type:"structure"` // A map that defines 1-10 custom endpoint or user attributes, depending on - // the value for the RecommenderUserIdType property. Each of these attributes + // the value for the RecommendationProviderIdType property. Each of these attributes // temporarily stores a recommended item that's retrieved from the recommender // model and sent to an AWS Lambda function for additional processing. Each // attribute can be used as a message variable in a message template. // // This value is null if the configuration doesn't invoke an AWS Lambda function - // (LambdaFunctionArn) to perform additional processing of recommendation data. + // (RecommendationTransformerUri) to perform additional processing of recommendation + // data. Attributes map[string]*string `type:"map"` // The date, in extended ISO 8601 format, when the configuration was created @@ -27868,18 +28470,19 @@ type RecommenderConfigurationResponse struct { RecommendationTransformerUri *string `type:"string"` // The custom display name for the standard endpoint or user attribute (RecommendationItems) - // that temporarily stores a recommended item for each endpoint or user, depending - // on the value for the RecommenderUserIdType property. This name appears in - // the Attribute finder pane of the template editor on the Amazon Pinpoint console. + // that temporarily stores recommended items for each endpoint or user, depending + // on the value for the RecommendationProviderIdType property. This name appears + // in the Attribute finder of the template editor on the Amazon Pinpoint console. // // This value is null if the configuration doesn't invoke an AWS Lambda function - // (LambdaFunctionArn) to perform additional processing of recommendation data. + // (RecommendationTransformerUri) to perform additional processing of recommendation + // data. RecommendationsDisplayName *string `type:"string"` // The number of recommended items that are retrieved from the model for each - // endpoint or user, depending on the value for the RecommenderUserIdType property. - // This number determines how many recommended attributes are available for - // use as message variables in message templates. + // endpoint or user, depending on the value for the RecommendationProviderIdType + // property. This number determines how many recommended items are available + // for use in message variables. RecommendationsPerMessage *int64 `type:"integer"` } @@ -28358,10 +28961,13 @@ type SMSMessage struct { // your dedicated number. Keyword *string `type:"string"` - // The SMS message type. Valid values are: TRANSACTIONAL, the message is critical - // or time-sensitive, such as a one-time password that supports a customer transaction; - // and, PROMOTIONAL, the message is not critical or time-sensitive, such as - // a marketing message. + // This field is reserved for future use. + MediaUrl *string `type:"string"` + + // The SMS message type. Valid values are TRANSACTIONAL (for messages that are + // critical or time-sensitive, such as a one-time passwords) and PROMOTIONAL + // (for messsages that aren't critical or time-sensitive, such as marketing + // messages). MessageType *string `type:"string" enum:"MessageType"` // The number to send the SMS message from. This value should be one of the @@ -28401,6 +29007,12 @@ func (s *SMSMessage) SetKeyword(v string) *SMSMessage { return s } +// SetMediaUrl sets the MediaUrl field's value. +func (s *SMSMessage) SetMediaUrl(v string) *SMSMessage { + s.MediaUrl = &v + return s +} + // SetMessageType sets the MessageType field's value. func (s *SMSMessage) SetMessageType(v string) *SMSMessage { s.MessageType = &v @@ -28425,6 +29037,69 @@ func (s *SMSMessage) SetSubstitutions(v map[string][]*string) *SMSMessage { return s } +// Specifies the settings for an SMS activity in a journey. This type of activity +// sends a text message to participants. +type SMSMessageActivity struct { + _ struct{} `type:"structure"` + + // Specifies the sender ID and message type for an SMS message that's sent to + // participants in a journey. + MessageConfig *JourneySMSMessage `type:"structure"` + + // The unique identifier for the next activity to perform, after the message + // is sent. + NextActivity *string `type:"string"` + + // The name of the SMS message template to use for the message. If specified, + // this value must match the name of an existing message template. + TemplateName *string `type:"string"` + + // The unique identifier for the version of the SMS template to use for the + // message. If specified, this value must match the identifier for an existing + // template version. To retrieve a list of versions and version identifiers + // for a template, use the Template Versions resource. + // + // If you don't specify a value for this property, Amazon Pinpoint uses the + // active version of the template. The active version is typically the version + // of a template that's been most recently reviewed and approved for use, depending + // on your workflow. It isn't necessarily the latest version of a template. + TemplateVersion *string `type:"string"` +} + +// String returns the string representation +func (s SMSMessageActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SMSMessageActivity) GoString() string { + return s.String() +} + +// SetMessageConfig sets the MessageConfig field's value. +func (s *SMSMessageActivity) SetMessageConfig(v *JourneySMSMessage) *SMSMessageActivity { + s.MessageConfig = v + return s +} + +// SetNextActivity sets the NextActivity field's value. +func (s *SMSMessageActivity) SetNextActivity(v string) *SMSMessageActivity { + s.NextActivity = &v + return s +} + +// SetTemplateName sets the TemplateName field's value. +func (s *SMSMessageActivity) SetTemplateName(v string) *SMSMessageActivity { + s.TemplateName = &v + return s +} + +// SetTemplateVersion sets the TemplateVersion field's value. +func (s *SMSMessageActivity) SetTemplateVersion(v string) *SMSMessageActivity { + s.TemplateVersion = &v + return s +} + // Specifies the content and settings for a message template that can be used // in text messages that are sent through the SMS channel. type SMSTemplateRequest struct { @@ -29702,9 +30377,10 @@ type SendUsersMessageRequest struct { // to message recipients. TraceId *string `type:"string"` - // A map that associates user IDs with EndpointSendConfiguration objects. You - // can use an EndpointSendConfiguration object to tailor the message for a user - // by specifying settings such as content overrides and message variables. + // A map that associates user IDs with EndpointSendConfiguration (https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-messages.html#apps-application-id-messages-model-endpointsendconfiguration) + // objects. You can use an EndpointSendConfiguration (https://docs.aws.amazon.com/pinpoint/latest/apireference/apps-application-id-messages.html#apps-application-id-messages-model-endpointsendconfiguration) + // object to tailor the message for a user by specifying settings such as content + // overrides and message variables. // // Users is a required field Users map[string]*EndpointSendConfiguration `type:"map" required:"true"` @@ -30176,6 +30852,9 @@ type StartCondition struct { // The custom description of the condition. Description *string `type:"string"` + // Specifies the settings for an event that causes a journey activity to start. + EventStartCondition *EventStartCondition `type:"structure"` + // The segment that's associated with the first activity in the journey. This // segment determines which users are participants in the journey. SegmentStartCondition *SegmentCondition `type:"structure"` @@ -30194,6 +30873,11 @@ func (s StartCondition) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *StartCondition) Validate() error { invalidParams := request.ErrInvalidParams{Context: "StartCondition"} + if s.EventStartCondition != nil { + if err := s.EventStartCondition.Validate(); err != nil { + invalidParams.AddNested("EventStartCondition", err.(request.ErrInvalidParams)) + } + } if s.SegmentStartCondition != nil { if err := s.SegmentStartCondition.Validate(); err != nil { invalidParams.AddNested("SegmentStartCondition", err.(request.ErrInvalidParams)) @@ -30212,6 +30896,12 @@ func (s *StartCondition) SetDescription(v string) *StartCondition { return s } +// SetEventStartCondition sets the EventStartCondition field's value. +func (s *StartCondition) SetEventStartCondition(v *EventStartCondition) *StartCondition { + s.EventStartCondition = v + return s +} + // SetSegmentStartCondition sets the SegmentStartCondition field's value. func (s *StartCondition) SetSegmentStartCondition(v *SegmentCondition) *StartCondition { s.SegmentStartCondition = v @@ -30775,8 +31465,8 @@ func (s *TemplatesResponse) SetNextToken(v string) *TemplatesResponse { // Provides information about an API request or response. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -30795,17 +31485,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30813,22 +31503,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the settings for a campaign treatment. A treatment is a variation @@ -30836,6 +31526,11 @@ func (s TooManyRequestsException) RequestID() string { type TreatmentResource struct { _ struct{} `type:"structure"` + // The delivery configuration settings for sending the treatment through a custom + // channel. This object is required if the MessageConfiguration object for the + // treatment specifies a CustomMessage object. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // The unique identifier for the treatment. // // Id is a required field @@ -30862,8 +31557,7 @@ type TreatmentResource struct { // The custom description of the treatment. TreatmentDescription *string `type:"string"` - // The custom name of the treatment. A treatment is a variation of a campaign - // that's used for A/B testing of a campaign. + // The custom name of the treatment. TreatmentName *string `type:"string"` } @@ -30877,6 +31571,12 @@ func (s TreatmentResource) GoString() string { return s.String() } +// SetCustomDeliveryConfiguration sets the CustomDeliveryConfiguration field's value. +func (s *TreatmentResource) SetCustomDeliveryConfiguration(v *CustomDeliveryConfiguration) *TreatmentResource { + s.CustomDeliveryConfiguration = v + return s +} + // SetId sets the Id field's value. func (s *TreatmentResource) SetId(v string) *TreatmentResource { s.Id = &v @@ -32424,14 +33124,15 @@ type UpdateRecommenderConfiguration struct { _ struct{} `type:"structure"` // A map of key-value pairs that defines 1-10 custom endpoint or user attributes, - // depending on the value for the RecommenderUserIdType property. Each of these - // attributes temporarily stores a recommended item that's retrieved from the - // recommender model and sent to an AWS Lambda function for additional processing. - // Each attribute can be used as a message variable in a message template. + // depending on the value for the RecommendationProviderIdType property. Each + // of these attributes temporarily stores a recommended item that's retrieved + // from the recommender model and sent to an AWS Lambda function for additional + // processing. Each attribute can be used as a message variable in a message + // template. // // In the map, the key is the name of a custom attribute and the value is a // custom display name for that attribute. The display name appears in the Attribute - // finder pane of the template editor on the Amazon Pinpoint console. The following + // finder of the template editor on the Amazon Pinpoint console. The following // restrictions apply to these names: // // * An attribute name must start with a letter or number and it can contain @@ -32443,12 +33144,13 @@ type UpdateRecommenderConfiguration struct { // spaces, underscores (_), or hyphens (-). // // This object is required if the configuration invokes an AWS Lambda function - // (LambdaFunctionArn) to process recommendation data. Otherwise, don't include - // this object in your request. + // (RecommendationTransformerUri) to process recommendation data. Otherwise, + // don't include this object in your request. Attributes map[string]*string `type:"map"` // A custom description of the configuration for the recommender model. The - // description can contain up to 128 characters. + // description can contain up to 128 characters. The characters can be letters, + // numbers, spaces, or the following symbols: _ ; () , ‐. Description *string `type:"string"` // A custom name of the configuration for the recommender model. The name must @@ -32468,7 +33170,7 @@ type UpdateRecommenderConfiguration struct { // * PINPOINT_USER_ID - Associate each user in the model with a particular // user and endpoint in Amazon Pinpoint. The data is correlated based on // user IDs in Amazon Pinpoint. If you specify this value, an endpoint definition - // in Amazon Pinpoint has to specify a both a user ID (UserId) and an endpoint + // in Amazon Pinpoint has to specify both a user ID (UserId) and an endpoint // ID. Otherwise, messages won’t be sent to the user's endpoint. RecommendationProviderIdType *string `type:"string"` @@ -32491,26 +33193,26 @@ type UpdateRecommenderConfiguration struct { RecommendationTransformerUri *string `type:"string"` // A custom display name for the standard endpoint or user attribute (RecommendationItems) - // that temporarily stores a recommended item for each endpoint or user, depending - // on the value for the RecommenderUserIdType property. This value is required - // if the configuration doesn't invoke an AWS Lambda function (LambdaFunctionArn) + // that temporarily stores recommended items for each endpoint or user, depending + // on the value for the RecommendationProviderIdType property. This value is + // required if the configuration doesn't invoke an AWS Lambda function (RecommendationTransformerUri) // to perform additional processing of recommendation data. // - // This name appears in the Attribute finder pane of the template editor on - // the Amazon Pinpoint console. The name can contain up to 25 characters. The - // characters can be letters, numbers, spaces, underscores (_), or hyphens (-). - // These restrictions don't apply to attribute values. + // This name appears in the Attribute finder of the template editor on the Amazon + // Pinpoint console. The name can contain up to 25 characters. The characters + // can be letters, numbers, spaces, underscores (_), or hyphens (-). These restrictions + // don't apply to attribute values. RecommendationsDisplayName *string `type:"string"` // The number of recommended items to retrieve from the model for each endpoint - // or user, depending on the value for the RecommenderUserIdType property. This - // number determines how many recommended attributes are available for use as - // message variables in message templates. The minimum value is 1. The maximum - // value is 5. The default value is 5. + // or user, depending on the value for the RecommendationProviderIdType property. + // This number determines how many recommended items are available for use in + // message variables. The minimum value is 1. The maximum value is 5. The default + // value is 5. // // To use multiple recommended items and custom attributes with message variables, - // you have to use an AWS Lambda function (LambdaFunctionArn) to perform additional - // processing of recommendation data. + // you have to use an AWS Lambda function (RecommendationTransformerUri) to + // perform additional processing of recommendation data. RecommendationsPerMessage *int64 `type:"integer"` } @@ -33734,23 +34436,25 @@ func (s *WaitTime) SetWaitUntil(v string) *WaitTime { type WriteApplicationSettingsRequest struct { _ struct{} `type:"structure"` - // The settings for the AWS Lambda function to use by default as a code hook - // for campaigns in the application. To override these settings for a specific - // campaign, use the Campaign resource to define custom Lambda function settings - // for the campaign. + // The settings for the AWS Lambda function to invoke by default as a code hook + // for campaigns in the application. You can use this hook to customize segments + // that are used by campaigns in the application. + // + // To override these settings and define custom settings for a specific campaign, + // use the CampaignHook object of the Campaign resource. CampaignHook *CampaignHook `type:"structure"` // Specifies whether to enable application-related alarms in Amazon CloudWatch. CloudWatchMetricsEnabled *bool `type:"boolean"` // The default sending limits for campaigns in the application. To override - // these limits for a specific campaign, use the Campaign resource to define - // custom limits for the campaign. + // these limits and define custom limits for a specific campaign or journey, + // use the Campaign resource or the Journey resource, respectively. Limits *CampaignLimits `type:"structure"` - // The default quiet time for campaigns and journeys in the application. Quiet - // time is a specific time range when messages aren't sent to endpoints, if - // all the following conditions are met: + // The default quiet time for campaigns in the application. Quiet time is a + // specific time range when messages aren't sent to endpoints, if all the following + // conditions are met: // // * The EndpointDemographic.Timezone property of the endpoint is set to // a valid value. @@ -33814,6 +34518,11 @@ type WriteCampaignRequest struct { // in addition to the default treatment for the campaign. AdditionalTreatments []*WriteTreatmentResource `type:"list"` + // The delivery configuration settings for sending the campaign through a custom + // channel. This object is required if the MessageConfiguration object for the + // campaign specifies a CustomMessage object. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // A custom description of the campaign. Description *string `type:"string"` @@ -33821,11 +34530,13 @@ type WriteCampaignRequest struct { // messages from the campaign. HoldoutPercent *int64 `type:"integer"` - // The settings for the AWS Lambda function to use as a code hook for the campaign. + // The settings for the AWS Lambda function to invoke as a code hook for the + // campaign. You can use this hook to customize the segment that's used by the + // campaign. Hook *CampaignHook `type:"structure"` // Specifies whether to pause the campaign. A paused campaign doesn't run unless - // you resume it by setting this value to false. + // you resume it by changing this value to false. IsPaused *bool `type:"boolean"` // The messaging limits for the campaign. @@ -33854,10 +34565,12 @@ type WriteCampaignRequest struct { // The message template to use for the campaign. TemplateConfiguration *TemplateConfiguration `type:"structure"` - // A custom description of a variation of the campaign to use for A/B testing. + // A custom description of the default treatment for the campaign. TreatmentDescription *string `type:"string"` - // A custom name for a variation of the campaign to use for A/B testing. + // A custom name of the default treatment for the campaign, if the campaign + // has multiple treatments. A treatment is a variation of a campaign that's + // used for A/B testing. TreatmentName *string `type:"string"` } @@ -33884,6 +34597,11 @@ func (s *WriteCampaignRequest) Validate() error { } } } + if s.CustomDeliveryConfiguration != nil { + if err := s.CustomDeliveryConfiguration.Validate(); err != nil { + invalidParams.AddNested("CustomDeliveryConfiguration", err.(request.ErrInvalidParams)) + } + } if s.Schedule != nil { if err := s.Schedule.Validate(); err != nil { invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) @@ -33902,6 +34620,12 @@ func (s *WriteCampaignRequest) SetAdditionalTreatments(v []*WriteTreatmentResour return s } +// SetCustomDeliveryConfiguration sets the CustomDeliveryConfiguration field's value. +func (s *WriteCampaignRequest) SetCustomDeliveryConfiguration(v *CustomDeliveryConfiguration) *WriteCampaignRequest { + s.CustomDeliveryConfiguration = v + return s +} + // SetDescription sets the Description field's value. func (s *WriteCampaignRequest) SetDescription(v string) *WriteCampaignRequest { s.Description = &v @@ -34319,6 +35043,11 @@ func (s *WriteSegmentRequest) SetTags(v map[string]*string) *WriteSegmentRequest type WriteTreatmentResource struct { _ struct{} `type:"structure"` + // The delivery configuration settings for sending the treatment through a custom + // channel. This object is required if the MessageConfiguration object for the + // treatment specifies a CustomMessage object. + CustomDeliveryConfiguration *CustomDeliveryConfiguration `type:"structure"` + // The message configuration settings for the treatment. MessageConfiguration *MessageConfiguration `type:"structure"` @@ -34337,8 +35066,7 @@ type WriteTreatmentResource struct { // A custom description of the treatment. TreatmentDescription *string `type:"string"` - // A custom name for the treatment. A treatment is a variation of a campaign - // that's used for A/B testing of a campaign. + // A custom name for the treatment. TreatmentName *string `type:"string"` } @@ -34358,6 +35086,11 @@ func (s *WriteTreatmentResource) Validate() error { if s.SizePercent == nil { invalidParams.Add(request.NewErrParamRequired("SizePercent")) } + if s.CustomDeliveryConfiguration != nil { + if err := s.CustomDeliveryConfiguration.Validate(); err != nil { + invalidParams.AddNested("CustomDeliveryConfiguration", err.(request.ErrInvalidParams)) + } + } if s.Schedule != nil { if err := s.Schedule.Validate(); err != nil { invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) @@ -34370,6 +35103,12 @@ func (s *WriteTreatmentResource) Validate() error { return nil } +// SetCustomDeliveryConfiguration sets the CustomDeliveryConfiguration field's value. +func (s *WriteTreatmentResource) SetCustomDeliveryConfiguration(v *CustomDeliveryConfiguration) *WriteTreatmentResource { + s.CustomDeliveryConfiguration = v + return s +} + // SetMessageConfiguration sets the MessageConfiguration field's value. func (s *WriteTreatmentResource) SetMessageConfiguration(v *MessageConfiguration) *WriteTreatmentResource { s.MessageConfiguration = v @@ -34417,6 +35156,15 @@ const ( ActionUrl = "URL" ) +// Action_Values returns all elements of the Action enum +func Action_Values() []string { + return []string{ + ActionOpenApp, + ActionDeepLink, + ActionUrl, + } +} + const ( // AttributeTypeInclusive is a AttributeType enum value AttributeTypeInclusive = "INCLUSIVE" @@ -34425,6 +35173,14 @@ const ( AttributeTypeExclusive = "EXCLUSIVE" ) +// AttributeType_Values returns all elements of the AttributeType enum +func AttributeType_Values() []string { + return []string{ + AttributeTypeInclusive, + AttributeTypeExclusive, + } +} + const ( // CampaignStatusScheduled is a CampaignStatus enum value CampaignStatusScheduled = "SCHEDULED" @@ -34443,9 +35199,28 @@ const ( // CampaignStatusDeleted is a CampaignStatus enum value CampaignStatusDeleted = "DELETED" + + // CampaignStatusInvalid is a CampaignStatus enum value + CampaignStatusInvalid = "INVALID" ) +// CampaignStatus_Values returns all elements of the CampaignStatus enum +func CampaignStatus_Values() []string { + return []string{ + CampaignStatusScheduled, + CampaignStatusExecuting, + CampaignStatusPendingNextRun, + CampaignStatusCompleted, + CampaignStatusPaused, + CampaignStatusDeleted, + CampaignStatusInvalid, + } +} + const ( + // ChannelTypePush is a ChannelType enum value + ChannelTypePush = "PUSH" + // ChannelTypeGcm is a ChannelType enum value ChannelTypeGcm = "GCM" @@ -34480,6 +35255,24 @@ const ( ChannelTypeCustom = "CUSTOM" ) +// ChannelType_Values returns all elements of the ChannelType enum +func ChannelType_Values() []string { + return []string{ + ChannelTypePush, + ChannelTypeGcm, + ChannelTypeApns, + ChannelTypeApnsSandbox, + ChannelTypeApnsVoip, + ChannelTypeApnsVoipSandbox, + ChannelTypeAdm, + ChannelTypeSms, + ChannelTypeVoice, + ChannelTypeEmail, + ChannelTypeBaidu, + ChannelTypeCustom, + } +} + const ( // DeliveryStatusSuccessful is a DeliveryStatus enum value DeliveryStatusSuccessful = "SUCCESSFUL" @@ -34503,6 +35296,19 @@ const ( DeliveryStatusDuplicate = "DUPLICATE" ) +// DeliveryStatus_Values returns all elements of the DeliveryStatus enum +func DeliveryStatus_Values() []string { + return []string{ + DeliveryStatusSuccessful, + DeliveryStatusThrottled, + DeliveryStatusTemporaryFailure, + DeliveryStatusPermanentFailure, + DeliveryStatusUnknownFailure, + DeliveryStatusOptOut, + DeliveryStatusDuplicate, + } +} + const ( // DimensionTypeInclusive is a DimensionType enum value DimensionTypeInclusive = "INCLUSIVE" @@ -34511,6 +35317,14 @@ const ( DimensionTypeExclusive = "EXCLUSIVE" ) +// DimensionType_Values returns all elements of the DimensionType enum +func DimensionType_Values() []string { + return []string{ + DimensionTypeInclusive, + DimensionTypeExclusive, + } +} + const ( // DurationHr24 is a Duration enum value DurationHr24 = "HR_24" @@ -34525,6 +35339,72 @@ const ( DurationDay30 = "DAY_30" ) +// Duration_Values returns all elements of the Duration enum +func Duration_Values() []string { + return []string{ + DurationHr24, + DurationDay7, + DurationDay14, + DurationDay30, + } +} + +const ( + // EndpointTypesElementPush is a EndpointTypesElement enum value + EndpointTypesElementPush = "PUSH" + + // EndpointTypesElementGcm is a EndpointTypesElement enum value + EndpointTypesElementGcm = "GCM" + + // EndpointTypesElementApns is a EndpointTypesElement enum value + EndpointTypesElementApns = "APNS" + + // EndpointTypesElementApnsSandbox is a EndpointTypesElement enum value + EndpointTypesElementApnsSandbox = "APNS_SANDBOX" + + // EndpointTypesElementApnsVoip is a EndpointTypesElement enum value + EndpointTypesElementApnsVoip = "APNS_VOIP" + + // EndpointTypesElementApnsVoipSandbox is a EndpointTypesElement enum value + EndpointTypesElementApnsVoipSandbox = "APNS_VOIP_SANDBOX" + + // EndpointTypesElementAdm is a EndpointTypesElement enum value + EndpointTypesElementAdm = "ADM" + + // EndpointTypesElementSms is a EndpointTypesElement enum value + EndpointTypesElementSms = "SMS" + + // EndpointTypesElementVoice is a EndpointTypesElement enum value + EndpointTypesElementVoice = "VOICE" + + // EndpointTypesElementEmail is a EndpointTypesElement enum value + EndpointTypesElementEmail = "EMAIL" + + // EndpointTypesElementBaidu is a EndpointTypesElement enum value + EndpointTypesElementBaidu = "BAIDU" + + // EndpointTypesElementCustom is a EndpointTypesElement enum value + EndpointTypesElementCustom = "CUSTOM" +) + +// EndpointTypesElement_Values returns all elements of the EndpointTypesElement enum +func EndpointTypesElement_Values() []string { + return []string{ + EndpointTypesElementPush, + EndpointTypesElementGcm, + EndpointTypesElementApns, + EndpointTypesElementApnsSandbox, + EndpointTypesElementApnsVoip, + EndpointTypesElementApnsVoipSandbox, + EndpointTypesElementAdm, + EndpointTypesElementSms, + EndpointTypesElementVoice, + EndpointTypesElementEmail, + EndpointTypesElementBaidu, + EndpointTypesElementCustom, + } +} + const ( // FilterTypeSystem is a FilterType enum value FilterTypeSystem = "SYSTEM" @@ -34533,6 +35413,14 @@ const ( FilterTypeEndpoint = "ENDPOINT" ) +// FilterType_Values returns all elements of the FilterType enum +func FilterType_Values() []string { + return []string{ + FilterTypeSystem, + FilterTypeEndpoint, + } +} + const ( // FormatCsv is a Format enum value FormatCsv = "CSV" @@ -34541,6 +35429,14 @@ const ( FormatJson = "JSON" ) +// Format_Values returns all elements of the Format enum +func Format_Values() []string { + return []string{ + FormatCsv, + FormatJson, + } +} + const ( // FrequencyOnce is a Frequency enum value FrequencyOnce = "ONCE" @@ -34561,6 +35457,18 @@ const ( FrequencyEvent = "EVENT" ) +// Frequency_Values returns all elements of the Frequency enum +func Frequency_Values() []string { + return []string{ + FrequencyOnce, + FrequencyHourly, + FrequencyDaily, + FrequencyWeekly, + FrequencyMonthly, + FrequencyEvent, + } +} + const ( // IncludeAll is a Include enum value IncludeAll = "ALL" @@ -34572,6 +35480,15 @@ const ( IncludeNone = "NONE" ) +// Include_Values returns all elements of the Include enum +func Include_Values() []string { + return []string{ + IncludeAll, + IncludeAny, + IncludeNone, + } +} + const ( // JobStatusCreated is a JobStatus enum value JobStatusCreated = "CREATED" @@ -34601,6 +35518,21 @@ const ( JobStatusFailed = "FAILED" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusCreated, + JobStatusPreparingForInitialization, + JobStatusInitializing, + JobStatusProcessing, + JobStatusPendingJob, + JobStatusCompleting, + JobStatusCompleted, + JobStatusFailing, + JobStatusFailed, + } +} + const ( // MessageTypeTransactional is a MessageType enum value MessageTypeTransactional = "TRANSACTIONAL" @@ -34609,6 +35541,14 @@ const ( MessageTypePromotional = "PROMOTIONAL" ) +// MessageType_Values returns all elements of the MessageType enum +func MessageType_Values() []string { + return []string{ + MessageTypeTransactional, + MessageTypePromotional, + } +} + const ( // ModeDelivery is a Mode enum value ModeDelivery = "DELIVERY" @@ -34617,6 +35557,14 @@ const ( ModeFilter = "FILTER" ) +// Mode_Values returns all elements of the Mode enum +func Mode_Values() []string { + return []string{ + ModeDelivery, + ModeFilter, + } +} + const ( // OperatorAll is a Operator enum value OperatorAll = "ALL" @@ -34625,6 +35573,14 @@ const ( OperatorAny = "ANY" ) +// Operator_Values returns all elements of the Operator enum +func Operator_Values() []string { + return []string{ + OperatorAll, + OperatorAny, + } +} + const ( // RecencyTypeActive is a RecencyType enum value RecencyTypeActive = "ACTIVE" @@ -34633,6 +35589,14 @@ const ( RecencyTypeInactive = "INACTIVE" ) +// RecencyType_Values returns all elements of the RecencyType enum +func RecencyType_Values() []string { + return []string{ + RecencyTypeActive, + RecencyTypeInactive, + } +} + const ( // SegmentTypeDimensional is a SegmentType enum value SegmentTypeDimensional = "DIMENSIONAL" @@ -34641,6 +35605,14 @@ const ( SegmentTypeImport = "IMPORT" ) +// SegmentType_Values returns all elements of the SegmentType enum +func SegmentType_Values() []string { + return []string{ + SegmentTypeDimensional, + SegmentTypeImport, + } +} + const ( // SourceTypeAll is a SourceType enum value SourceTypeAll = "ALL" @@ -34652,6 +35624,15 @@ const ( SourceTypeNone = "NONE" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeAll, + SourceTypeAny, + SourceTypeNone, + } +} + const ( // StateDraft is a State enum value StateDraft = "DRAFT" @@ -34669,6 +35650,17 @@ const ( StateClosed = "CLOSED" ) +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StateDraft, + StateActive, + StateCompleted, + StateCancelled, + StateClosed, + } +} + const ( // TemplateTypeEmail is a TemplateType enum value TemplateTypeEmail = "EMAIL" @@ -34683,6 +35675,16 @@ const ( TemplateTypePush = "PUSH" ) +// TemplateType_Values returns all elements of the TemplateType enum +func TemplateType_Values() []string { + return []string{ + TemplateTypeEmail, + TemplateTypeSms, + TemplateTypeVoice, + TemplateTypePush, + } +} + const ( // TypeAll is a Type enum value TypeAll = "ALL" @@ -34693,3 +35695,12 @@ const ( // TypeNone is a Type enum value TypeNone = "NONE" ) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeAll, + TypeAny, + TypeNone, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go index 569147fd7..25f548678 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/errors.go @@ -14,6 +14,12 @@ const ( // Provides information about an API request or response. ErrCodeBadRequestException = "BadRequestException" + // ErrCodeConflictException for service response error code + // "ConflictException". + // + // Provides information about an API request or response. + ErrCodeConflictException = "ConflictException" + // ErrCodeForbiddenException for service response error code // "ForbiddenException". // @@ -53,6 +59,7 @@ const ( var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "BadRequestException": newErrorBadRequestException, + "ConflictException": newErrorConflictException, "ForbiddenException": newErrorForbiddenException, "InternalServerErrorException": newErrorInternalServerErrorException, "MethodNotAllowedException": newErrorMethodNotAllowedException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go index 80e29eeda..31529ff9c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go b/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go index eddb51929..af3787c5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pricing/api.go @@ -605,8 +605,8 @@ func (s *DescribeServicesOutput) SetServices(v []*Service) *DescribeServicesOutp // The pagination token expired. Try again without a pagination token. type ExpiredNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -623,17 +623,17 @@ func (s ExpiredNextTokenException) GoString() string { func newErrorExpiredNextTokenException(v protocol.ResponseMetadata) error { return &ExpiredNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExpiredNextTokenException) Code() string { +func (s *ExpiredNextTokenException) Code() string { return "ExpiredNextTokenException" } // Message returns the exception's message. -func (s ExpiredNextTokenException) Message() string { +func (s *ExpiredNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -641,22 +641,22 @@ func (s ExpiredNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExpiredNextTokenException) OrigErr() error { +func (s *ExpiredNextTokenException) OrigErr() error { return nil } -func (s ExpiredNextTokenException) Error() string { +func (s *ExpiredNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExpiredNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExpiredNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExpiredNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ExpiredNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The constraints that you want all returned products to match. @@ -981,8 +981,8 @@ func (s *GetProductsOutput) SetPriceList(v []aws.JSONValue) *GetProductsOutput { // An error on the server occurred during the processing of your request. Try // again later. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -999,17 +999,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1017,28 +1017,28 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The pagination token is invalid. Try again without a pagination token. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1055,17 +1055,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1073,28 +1073,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // One or more parameters had an invalid value. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1111,17 +1111,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1129,28 +1129,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource can't be found. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1167,17 +1167,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1185,22 +1185,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The metadata for a service, such as the service code and available attribute @@ -1241,3 +1241,10 @@ const ( // FilterTypeTermMatch is a FilterType enum value FilterTypeTermMatch = "TERM_MATCH" ) + +// FilterType_Values returns all elements of the FilterType enum +func FilterType_Values() []string { + return []string{ + FilterTypeTermMatch, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go b/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go index f63fe66af..c657fb606 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go b/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go index 0fcb1f127..4a56b87d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/qldb/api.go @@ -13,6 +13,96 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restjson" ) +const opCancelJournalKinesisStream = "CancelJournalKinesisStream" + +// CancelJournalKinesisStreamRequest generates a "aws/request.Request" representing the +// client's request for the CancelJournalKinesisStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelJournalKinesisStream for more information on using the CancelJournalKinesisStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelJournalKinesisStreamRequest method. +// req, resp := client.CancelJournalKinesisStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/CancelJournalKinesisStream +func (c *QLDB) CancelJournalKinesisStreamRequest(input *CancelJournalKinesisStreamInput) (req *request.Request, output *CancelJournalKinesisStreamOutput) { + op := &request.Operation{ + Name: opCancelJournalKinesisStream, + HTTPMethod: "DELETE", + HTTPPath: "/ledgers/{name}/journal-kinesis-streams/{streamId}", + } + + if input == nil { + input = &CancelJournalKinesisStreamInput{} + } + + output = &CancelJournalKinesisStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// CancelJournalKinesisStream API operation for Amazon QLDB. +// +// Ends a given Amazon QLDB journal stream. Before a stream can be canceled, +// its current status must be ACTIVE. +// +// You can't restart a stream after you cancel it. Canceled QLDB stream resources +// are subject to a 7-day retention period, so they are automatically deleted +// after this limit expires. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation CancelJournalKinesisStream for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in the request aren't valid. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * ResourcePreconditionNotMetException +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/CancelJournalKinesisStream +func (c *QLDB) CancelJournalKinesisStream(input *CancelJournalKinesisStreamInput) (*CancelJournalKinesisStreamOutput, error) { + req, out := c.CancelJournalKinesisStreamRequest(input) + return out, req.Send() +} + +// CancelJournalKinesisStreamWithContext is the same as CancelJournalKinesisStream with the addition of +// the ability to pass a context and additional request options. +// +// See CancelJournalKinesisStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) CancelJournalKinesisStreamWithContext(ctx aws.Context, input *CancelJournalKinesisStreamInput, opts ...request.Option) (*CancelJournalKinesisStreamOutput, error) { + req, out := c.CancelJournalKinesisStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateLedger = "CreateLedger" // CreateLedgerRequest generates a "aws/request.Request" representing the @@ -196,6 +286,93 @@ func (c *QLDB) DeleteLedgerWithContext(ctx aws.Context, input *DeleteLedgerInput return out, req.Send() } +const opDescribeJournalKinesisStream = "DescribeJournalKinesisStream" + +// DescribeJournalKinesisStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJournalKinesisStream operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeJournalKinesisStream for more information on using the DescribeJournalKinesisStream +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeJournalKinesisStreamRequest method. +// req, resp := client.DescribeJournalKinesisStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeJournalKinesisStream +func (c *QLDB) DescribeJournalKinesisStreamRequest(input *DescribeJournalKinesisStreamInput) (req *request.Request, output *DescribeJournalKinesisStreamOutput) { + op := &request.Operation{ + Name: opDescribeJournalKinesisStream, + HTTPMethod: "GET", + HTTPPath: "/ledgers/{name}/journal-kinesis-streams/{streamId}", + } + + if input == nil { + input = &DescribeJournalKinesisStreamInput{} + } + + output = &DescribeJournalKinesisStreamOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeJournalKinesisStream API operation for Amazon QLDB. +// +// Returns detailed information about a given Amazon QLDB journal stream. The +// output includes the Amazon Resource Name (ARN), stream name, current status, +// creation time, and the parameters of your original stream creation request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation DescribeJournalKinesisStream for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in the request aren't valid. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * ResourcePreconditionNotMetException +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/DescribeJournalKinesisStream +func (c *QLDB) DescribeJournalKinesisStream(input *DescribeJournalKinesisStreamInput) (*DescribeJournalKinesisStreamOutput, error) { + req, out := c.DescribeJournalKinesisStreamRequest(input) + return out, req.Send() +} + +// DescribeJournalKinesisStreamWithContext is the same as DescribeJournalKinesisStream with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeJournalKinesisStream for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) DescribeJournalKinesisStreamWithContext(ctx aws.Context, input *DescribeJournalKinesisStreamInput, opts ...request.Option) (*DescribeJournalKinesisStreamOutput, error) { + req, out := c.DescribeJournalKinesisStreamRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeJournalS3Export = "DescribeJournalS3Export" // DescribeJournalS3ExportRequest generates a "aws/request.Request" representing the @@ -244,6 +421,10 @@ func (c *QLDB) DescribeJournalS3ExportRequest(input *DescribeJournalS3ExportInpu // export ID, when it was created, current status, and its start and end time // export parameters. // +// This action does not return any expired export jobs. For more information, +// see Export Job Expiration (https://docs.aws.amazon.com/qldb/latest/developerguide/export-journal.request.html#export-journal.request.expiration) +// in the Amazon QLDB Developer Guide. +// // If the export job with the given ExportId doesn't exist, then throws ResourceNotFoundException. // // If the ledger with the given Name doesn't exist, then throws ResourceNotFoundException. @@ -498,9 +679,13 @@ func (c *QLDB) GetBlockRequest(input *GetBlockInput) (req *request.Request, outp // GetBlock API operation for Amazon QLDB. // -// Returns a journal block object at a specified address in a ledger. Also returns +// Returns a block object at a specified address in a journal. Also returns // a proof of the specified block for verification if DigestTipAddress is provided. // +// For information about the data contents in a block, see Journal contents +// (https://docs.aws.amazon.com/qldb/latest/developerguide/journal-contents.html) +// in the Amazon QLDB Developer Guide. +// // If the specified ledger doesn't exist or is in DELETING status, then throws // ResourceNotFoundException. // @@ -720,6 +905,155 @@ func (c *QLDB) GetRevisionWithContext(ctx aws.Context, input *GetRevisionInput, return out, req.Send() } +const opListJournalKinesisStreamsForLedger = "ListJournalKinesisStreamsForLedger" + +// ListJournalKinesisStreamsForLedgerRequest generates a "aws/request.Request" representing the +// client's request for the ListJournalKinesisStreamsForLedger operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListJournalKinesisStreamsForLedger for more information on using the ListJournalKinesisStreamsForLedger +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListJournalKinesisStreamsForLedgerRequest method. +// req, resp := client.ListJournalKinesisStreamsForLedgerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListJournalKinesisStreamsForLedger +func (c *QLDB) ListJournalKinesisStreamsForLedgerRequest(input *ListJournalKinesisStreamsForLedgerInput) (req *request.Request, output *ListJournalKinesisStreamsForLedgerOutput) { + op := &request.Operation{ + Name: opListJournalKinesisStreamsForLedger, + HTTPMethod: "GET", + HTTPPath: "/ledgers/{name}/journal-kinesis-streams", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJournalKinesisStreamsForLedgerInput{} + } + + output = &ListJournalKinesisStreamsForLedgerOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListJournalKinesisStreamsForLedger API operation for Amazon QLDB. +// +// Returns an array of all Amazon QLDB journal stream descriptors for a given +// ledger. The output of each stream descriptor includes the same details that +// are returned by DescribeJournalKinesisStream. +// +// This action returns a maximum of MaxResults items. It is paginated so that +// you can retrieve all the items by calling ListJournalKinesisStreamsForLedger +// multiple times. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation ListJournalKinesisStreamsForLedger for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in the request aren't valid. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * ResourcePreconditionNotMetException +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/ListJournalKinesisStreamsForLedger +func (c *QLDB) ListJournalKinesisStreamsForLedger(input *ListJournalKinesisStreamsForLedgerInput) (*ListJournalKinesisStreamsForLedgerOutput, error) { + req, out := c.ListJournalKinesisStreamsForLedgerRequest(input) + return out, req.Send() +} + +// ListJournalKinesisStreamsForLedgerWithContext is the same as ListJournalKinesisStreamsForLedger with the addition of +// the ability to pass a context and additional request options. +// +// See ListJournalKinesisStreamsForLedger for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListJournalKinesisStreamsForLedgerWithContext(ctx aws.Context, input *ListJournalKinesisStreamsForLedgerInput, opts ...request.Option) (*ListJournalKinesisStreamsForLedgerOutput, error) { + req, out := c.ListJournalKinesisStreamsForLedgerRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListJournalKinesisStreamsForLedgerPages iterates over the pages of a ListJournalKinesisStreamsForLedger operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJournalKinesisStreamsForLedger method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJournalKinesisStreamsForLedger operation. +// pageNum := 0 +// err := client.ListJournalKinesisStreamsForLedgerPages(params, +// func(page *qldb.ListJournalKinesisStreamsForLedgerOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QLDB) ListJournalKinesisStreamsForLedgerPages(input *ListJournalKinesisStreamsForLedgerInput, fn func(*ListJournalKinesisStreamsForLedgerOutput, bool) bool) error { + return c.ListJournalKinesisStreamsForLedgerPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJournalKinesisStreamsForLedgerPagesWithContext same as ListJournalKinesisStreamsForLedgerPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) ListJournalKinesisStreamsForLedgerPagesWithContext(ctx aws.Context, input *ListJournalKinesisStreamsForLedgerInput, fn func(*ListJournalKinesisStreamsForLedgerOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJournalKinesisStreamsForLedgerInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJournalKinesisStreamsForLedgerRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJournalKinesisStreamsForLedgerOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListJournalS3Exports = "ListJournalS3Exports" // ListJournalS3ExportsRequest generates a "aws/request.Request" representing the @@ -776,6 +1110,10 @@ func (c *QLDB) ListJournalS3ExportsRequest(input *ListJournalS3ExportsInput) (re // This action returns a maximum of MaxResults items, and is paginated so that // you can retrieve all the items by calling ListJournalS3Exports multiple times. // +// This action does not return any expired export jobs. For more information, +// see Export Job Expiration (https://docs.aws.amazon.com/qldb/latest/developerguide/export-journal.request.html#export-journal.request.expiration) +// in the Amazon QLDB Developer Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -912,6 +1250,10 @@ func (c *QLDB) ListJournalS3ExportsForLedgerRequest(input *ListJournalS3ExportsF // you can retrieve all the items by calling ListJournalS3ExportsForLedger multiple // times. // +// This action does not return any expired export jobs. For more information, +// see Export Job Expiration (https://docs.aws.amazon.com/qldb/latest/developerguide/export-journal.request.html#export-journal.request.expiration) +// in the Amazon QLDB Developer Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1210,6 +1552,93 @@ func (c *QLDB) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsFo return out, req.Send() } +const opStreamJournalToKinesis = "StreamJournalToKinesis" + +// StreamJournalToKinesisRequest generates a "aws/request.Request" representing the +// client's request for the StreamJournalToKinesis operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StreamJournalToKinesis for more information on using the StreamJournalToKinesis +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StreamJournalToKinesisRequest method. +// req, resp := client.StreamJournalToKinesisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/StreamJournalToKinesis +func (c *QLDB) StreamJournalToKinesisRequest(input *StreamJournalToKinesisInput) (req *request.Request, output *StreamJournalToKinesisOutput) { + op := &request.Operation{ + Name: opStreamJournalToKinesis, + HTTPMethod: "POST", + HTTPPath: "/ledgers/{name}/journal-kinesis-streams", + } + + if input == nil { + input = &StreamJournalToKinesisInput{} + } + + output = &StreamJournalToKinesisOutput{} + req = c.newRequest(op, input, output) + return +} + +// StreamJournalToKinesis API operation for Amazon QLDB. +// +// Creates a journal stream for a given Amazon QLDB ledger. The stream captures +// every document revision that is committed to the ledger's journal and delivers +// the data to a specified Amazon Kinesis Data Streams resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QLDB's +// API operation StreamJournalToKinesis for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in the request aren't valid. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * ResourcePreconditionNotMetException +// The operation failed because a condition wasn't satisfied in advance. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/qldb-2019-01-02/StreamJournalToKinesis +func (c *QLDB) StreamJournalToKinesis(input *StreamJournalToKinesisInput) (*StreamJournalToKinesisOutput, error) { + req, out := c.StreamJournalToKinesisRequest(input) + return out, req.Send() +} + +// StreamJournalToKinesisWithContext is the same as StreamJournalToKinesis with the addition of +// the ability to pass a context and additional request options. +// +// See StreamJournalToKinesis for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QLDB) StreamJournalToKinesisWithContext(ctx aws.Context, input *StreamJournalToKinesisInput, opts ...request.Option) (*StreamJournalToKinesisOutput, error) { + req, out := c.StreamJournalToKinesisRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -1462,6 +1891,87 @@ func (c *QLDB) UpdateLedgerWithContext(ctx aws.Context, input *UpdateLedgerInput return out, req.Send() } +type CancelJournalKinesisStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the ledger. + // + // LedgerName is a required field + LedgerName *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // The unique ID that QLDB assigns to each QLDB journal stream. + // + // StreamId is a required field + StreamId *string `location:"uri" locationName:"streamId" min:"22" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelJournalKinesisStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelJournalKinesisStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelJournalKinesisStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelJournalKinesisStreamInput"} + if s.LedgerName == nil { + invalidParams.Add(request.NewErrParamRequired("LedgerName")) + } + if s.LedgerName != nil && len(*s.LedgerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LedgerName", 1)) + } + if s.StreamId == nil { + invalidParams.Add(request.NewErrParamRequired("StreamId")) + } + if s.StreamId != nil && len(*s.StreamId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 22)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLedgerName sets the LedgerName field's value. +func (s *CancelJournalKinesisStreamInput) SetLedgerName(v string) *CancelJournalKinesisStreamInput { + s.LedgerName = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *CancelJournalKinesisStreamInput) SetStreamId(v string) *CancelJournalKinesisStreamInput { + s.StreamId = &v + return s +} + +type CancelJournalKinesisStreamOutput struct { + _ struct{} `type:"structure"` + + // The unique ID that QLDB assigns to each QLDB journal stream. + StreamId *string `min:"22" type:"string"` +} + +// String returns the string representation +func (s CancelJournalKinesisStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelJournalKinesisStreamOutput) GoString() string { + return s.String() +} + +// SetStreamId sets the StreamId field's value. +func (s *CancelJournalKinesisStreamOutput) SetStreamId(v string) *CancelJournalKinesisStreamOutput { + s.StreamId = &v + return s +} + type CreateLedgerInput struct { _ struct{} `type:"structure"` @@ -1478,6 +1988,10 @@ type CreateLedgerInput struct { // The name of the ledger that you want to create. The name must be unique among // all of your ledgers in the current AWS Region. // + // Naming constraints for ledger names are defined in Quotas in Amazon QLDB + // (https://docs.aws.amazon.com/qldb/latest/developerguide/limits.html#limits.naming) + // in the Amazon QLDB Developer Guide. + // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -1667,6 +2181,88 @@ func (s DeleteLedgerOutput) GoString() string { return s.String() } +type DescribeJournalKinesisStreamInput struct { + _ struct{} `type:"structure"` + + // The name of the ledger. + // + // LedgerName is a required field + LedgerName *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // The unique ID that QLDB assigns to each QLDB journal stream. + // + // StreamId is a required field + StreamId *string `location:"uri" locationName:"streamId" min:"22" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeJournalKinesisStreamInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJournalKinesisStreamInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeJournalKinesisStreamInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeJournalKinesisStreamInput"} + if s.LedgerName == nil { + invalidParams.Add(request.NewErrParamRequired("LedgerName")) + } + if s.LedgerName != nil && len(*s.LedgerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LedgerName", 1)) + } + if s.StreamId == nil { + invalidParams.Add(request.NewErrParamRequired("StreamId")) + } + if s.StreamId != nil && len(*s.StreamId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("StreamId", 22)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLedgerName sets the LedgerName field's value. +func (s *DescribeJournalKinesisStreamInput) SetLedgerName(v string) *DescribeJournalKinesisStreamInput { + s.LedgerName = &v + return s +} + +// SetStreamId sets the StreamId field's value. +func (s *DescribeJournalKinesisStreamInput) SetStreamId(v string) *DescribeJournalKinesisStreamInput { + s.StreamId = &v + return s +} + +type DescribeJournalKinesisStreamOutput struct { + _ struct{} `type:"structure"` + + // Information about the QLDB journal stream returned by a DescribeJournalS3Export + // request. + Stream *JournalKinesisStreamDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeJournalKinesisStreamOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJournalKinesisStreamOutput) GoString() string { + return s.String() +} + +// SetStream sets the Stream field's value. +func (s *DescribeJournalKinesisStreamOutput) SetStream(v *JournalKinesisStreamDescription) *DescribeJournalKinesisStreamOutput { + s.Stream = v + return s +} + type DescribeJournalS3ExportInput struct { _ struct{} `type:"structure"` @@ -2348,8 +2944,8 @@ func (s *GetRevisionOutput) SetRevision(v *ValueHolder) *GetRevisionOutput { // One or more parameters in the request aren't valid. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -2369,17 +2965,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2387,22 +2983,157 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +func (s *InvalidParameterException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The information about an Amazon QLDB journal stream, including the Amazon +// Resource Name (ARN), stream name, creation time, current status, and the +// parameters of your original stream creation request. +type JournalKinesisStreamDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the QLDB journal stream. + Arn *string `min:"20" type:"string"` + + // The date and time, in epoch time format, when the QLDB journal stream was + // created. (Epoch time format is the number of seconds elapsed since 12:00:00 + // AM January 1, 1970 UTC.) + CreationTime *time.Time `type:"timestamp"` + + // The error message that describes the reason that a stream has a status of + // IMPAIRED or FAILED. This is not applicable to streams that have other status + // values. + ErrorCause *string `type:"string" enum:"ErrorCause"` + + // The exclusive date and time that specifies when the stream ends. If this + // parameter is blank, the stream runs indefinitely until you cancel it. + ExclusiveEndTime *time.Time `type:"timestamp"` + + // The inclusive start date and time from which to start streaming journal data. + InclusiveStartTime *time.Time `type:"timestamp"` + + // The configuration settings of the Amazon Kinesis Data Streams destination + // for your QLDB journal stream. + // + // KinesisConfiguration is a required field + KinesisConfiguration *KinesisConfiguration `type:"structure" required:"true"` + + // The name of the ledger. + // + // LedgerName is a required field + LedgerName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions + // for a journal stream to write data records to a Kinesis Data Streams resource. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The current state of the QLDB journal stream. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"StreamStatus"` + + // The unique ID that QLDB assigns to each QLDB journal stream. + // + // StreamId is a required field + StreamId *string `min:"22" type:"string" required:"true"` + + // The user-defined name of the QLDB journal stream. + // + // StreamName is a required field + StreamName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s JournalKinesisStreamDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JournalKinesisStreamDescription) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *JournalKinesisStreamDescription) SetArn(v string) *JournalKinesisStreamDescription { + s.Arn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *JournalKinesisStreamDescription) SetCreationTime(v time.Time) *JournalKinesisStreamDescription { + s.CreationTime = &v + return s +} + +// SetErrorCause sets the ErrorCause field's value. +func (s *JournalKinesisStreamDescription) SetErrorCause(v string) *JournalKinesisStreamDescription { + s.ErrorCause = &v + return s +} + +// SetExclusiveEndTime sets the ExclusiveEndTime field's value. +func (s *JournalKinesisStreamDescription) SetExclusiveEndTime(v time.Time) *JournalKinesisStreamDescription { + s.ExclusiveEndTime = &v + return s +} + +// SetInclusiveStartTime sets the InclusiveStartTime field's value. +func (s *JournalKinesisStreamDescription) SetInclusiveStartTime(v time.Time) *JournalKinesisStreamDescription { + s.InclusiveStartTime = &v + return s +} + +// SetKinesisConfiguration sets the KinesisConfiguration field's value. +func (s *JournalKinesisStreamDescription) SetKinesisConfiguration(v *KinesisConfiguration) *JournalKinesisStreamDescription { + s.KinesisConfiguration = v + return s +} + +// SetLedgerName sets the LedgerName field's value. +func (s *JournalKinesisStreamDescription) SetLedgerName(v string) *JournalKinesisStreamDescription { + s.LedgerName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *JournalKinesisStreamDescription) SetRoleArn(v string) *JournalKinesisStreamDescription { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *JournalKinesisStreamDescription) SetStatus(v string) *JournalKinesisStreamDescription { + s.Status = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +// SetStreamId sets the StreamId field's value. +func (s *JournalKinesisStreamDescription) SetStreamId(v string) *JournalKinesisStreamDescription { + s.StreamId = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +// SetStreamName sets the StreamName field's value. +func (s *JournalKinesisStreamDescription) SetStreamName(v string) *JournalKinesisStreamDescription { + s.StreamName = &v + return s } // The information about a journal export job, including the ledger name, export @@ -2521,6 +3252,60 @@ func (s *JournalS3ExportDescription) SetStatus(v string) *JournalS3ExportDescrip return s } +// The configuration settings of the Amazon Kinesis Data Streams destination +// for your Amazon QLDB journal stream. +type KinesisConfiguration struct { + _ struct{} `type:"structure"` + + // Enables QLDB to publish multiple data records in a single Kinesis Data Streams + // record. To learn more, see KPL Key Concepts (https://docs.aws.amazon.com/streams/latest/dev/kinesis-kpl-concepts.html) + // in the Amazon Kinesis Data Streams Developer Guide. + AggregationEnabled *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of the Kinesis data stream resource. + // + // StreamArn is a required field + StreamArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s KinesisConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KinesisConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KinesisConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KinesisConfiguration"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAggregationEnabled sets the AggregationEnabled field's value. +func (s *KinesisConfiguration) SetAggregationEnabled(v bool) *KinesisConfiguration { + s.AggregationEnabled = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *KinesisConfiguration) SetStreamArn(v string) *KinesisConfiguration { + s.StreamArn = &v + return s +} + // Information about a ledger, including its name, state, and when it was created. type LedgerSummary struct { _ struct{} `type:"structure"` @@ -2567,8 +3352,8 @@ func (s *LedgerSummary) SetState(v string) *LedgerSummary { // You have reached the limit on the maximum number of resources allowed. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -2588,17 +3373,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2606,22 +3391,129 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListJournalKinesisStreamsForLedgerInput struct { + _ struct{} `type:"structure"` + + // The name of the ledger. + // + // LedgerName is a required field + LedgerName *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // The maximum number of results to return in a single ListJournalKinesisStreamsForLedger + // request. (The actual number of results returned might be fewer.) + MaxResults *int64 `location:"querystring" locationName:"max_results" min:"1" type:"integer"` + + // A pagination token, indicating that you want to retrieve the next page of + // results. If you received a value for NextToken in the response from a previous + // ListJournalKinesisStreamsForLedger call, you should use that value as input + // here. + NextToken *string `location:"querystring" locationName:"next_token" min:"4" type:"string"` +} + +// String returns the string representation +func (s ListJournalKinesisStreamsForLedgerInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJournalKinesisStreamsForLedgerInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJournalKinesisStreamsForLedgerInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJournalKinesisStreamsForLedgerInput"} + if s.LedgerName == nil { + invalidParams.Add(request.NewErrParamRequired("LedgerName")) + } + if s.LedgerName != nil && len(*s.LedgerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LedgerName", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLedgerName sets the LedgerName field's value. +func (s *ListJournalKinesisStreamsForLedgerInput) SetLedgerName(v string) *ListJournalKinesisStreamsForLedgerInput { + s.LedgerName = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListJournalKinesisStreamsForLedgerInput) SetMaxResults(v int64) *ListJournalKinesisStreamsForLedgerInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJournalKinesisStreamsForLedgerInput) SetNextToken(v string) *ListJournalKinesisStreamsForLedgerInput { + s.NextToken = &v + return s +} + +type ListJournalKinesisStreamsForLedgerOutput struct { + _ struct{} `type:"structure"` + + // * If NextToken is empty, the last page of results has been processed and + // there are no more results to be retrieved. + // + // * If NextToken is not empty, more results are available. To retrieve the + // next page of results, use the value of NextToken in a subsequent ListJournalKinesisStreamsForLedger + // call. + NextToken *string `min:"4" type:"string"` + + // The array of QLDB journal stream descriptors that are associated with the + // given ledger. + Streams []*JournalKinesisStreamDescription `type:"list"` +} + +// String returns the string representation +func (s ListJournalKinesisStreamsForLedgerOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListJournalKinesisStreamsForLedgerOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJournalKinesisStreamsForLedgerOutput) SetNextToken(v string) *ListJournalKinesisStreamsForLedgerOutput { + s.NextToken = &v + return s +} + +// SetStreams sets the Streams field's value. +func (s *ListJournalKinesisStreamsForLedgerOutput) SetStreams(v []*JournalKinesisStreamDescription) *ListJournalKinesisStreamsForLedgerOutput { + s.Streams = v + return s } type ListJournalS3ExportsForLedgerInput struct { @@ -2979,8 +3871,8 @@ func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForRe // The specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -3003,17 +3895,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3021,28 +3913,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource can't be modified at this time. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -3065,17 +3957,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3083,28 +3975,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource doesn't exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -3127,17 +4019,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3145,28 +4037,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because a condition wasn't satisfied in advance. type ResourcePreconditionNotMetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -3189,17 +4081,17 @@ func (s ResourcePreconditionNotMetException) GoString() string { func newErrorResourcePreconditionNotMetException(v protocol.ResponseMetadata) error { return &ResourcePreconditionNotMetException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourcePreconditionNotMetException) Code() string { +func (s *ResourcePreconditionNotMetException) Code() string { return "ResourcePreconditionNotMetException" } // Message returns the exception's message. -func (s ResourcePreconditionNotMetException) Message() string { +func (s *ResourcePreconditionNotMetException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3207,22 +4099,22 @@ func (s ResourcePreconditionNotMetException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourcePreconditionNotMetException) OrigErr() error { +func (s *ResourcePreconditionNotMetException) OrigErr() error { return nil } -func (s ResourcePreconditionNotMetException) Error() string { +func (s *ResourcePreconditionNotMetException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourcePreconditionNotMetException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourcePreconditionNotMetException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourcePreconditionNotMetException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourcePreconditionNotMetException) RequestID() string { + return s.RespMetadata.RequestID } // The encryption settings that are used by a journal export job to write data @@ -3230,8 +4122,9 @@ func (s ResourcePreconditionNotMetException) RequestID() string { type S3EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for a customer master key (CMK) in AWS Key - // Management Service (AWS KMS). + // The Amazon Resource Name (ARN) for a symmetric customer master key (CMK) + // in AWS Key Management Service (AWS KMS). Amazon QLDB does not support asymmetric + // CMKs. // // You must provide a KmsKeyArn if you specify SSE_KMS as the ObjectEncryptionType. // @@ -3381,6 +4274,175 @@ func (s *S3ExportConfiguration) SetPrefix(v string) *S3ExportConfiguration { return s } +type StreamJournalToKinesisInput struct { + _ struct{} `type:"structure"` + + // The exclusive date and time that specifies when the stream ends. If you don't + // define this parameter, the stream runs indefinitely until you cancel it. + // + // The ExclusiveEndTime must be in ISO 8601 date and time format and in Universal + // Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z + ExclusiveEndTime *time.Time `type:"timestamp"` + + // The inclusive start date and time from which to start streaming journal data. + // This parameter must be in ISO 8601 date and time format and in Universal + // Coordinated Time (UTC). For example: 2019-06-13T21:36:34Z + // + // The InclusiveStartTime cannot be in the future and must be before ExclusiveEndTime. + // + // If you provide an InclusiveStartTime that is before the ledger's CreationDateTime, + // QLDB effectively defaults it to the ledger's CreationDateTime. + // + // InclusiveStartTime is a required field + InclusiveStartTime *time.Time `type:"timestamp" required:"true"` + + // The configuration settings of the Kinesis Data Streams destination for your + // stream request. + // + // KinesisConfiguration is a required field + KinesisConfiguration *KinesisConfiguration `type:"structure" required:"true"` + + // The name of the ledger. + // + // LedgerName is a required field + LedgerName *string `location:"uri" locationName:"name" min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM role that grants QLDB permissions + // for a journal stream to write data records to a Kinesis Data Streams resource. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The name that you want to assign to the QLDB journal stream. User-defined + // names can help identify and indicate the purpose of a stream. + // + // Your stream name must be unique among other active streams for a given ledger. + // Stream names have the same naming constraints as ledger names, as defined + // in Quotas in Amazon QLDB (https://docs.aws.amazon.com/qldb/latest/developerguide/limits.html#limits.naming) + // in the Amazon QLDB Developer Guide. + // + // StreamName is a required field + StreamName *string `min:"1" type:"string" required:"true"` + + // The key-value pairs to add as tags to the stream that you want to create. + // Tag keys are case sensitive. Tag values are case sensitive and can be null. + Tags map[string]*string `type:"map"` +} + +// String returns the string representation +func (s StreamJournalToKinesisInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamJournalToKinesisInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamJournalToKinesisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamJournalToKinesisInput"} + if s.InclusiveStartTime == nil { + invalidParams.Add(request.NewErrParamRequired("InclusiveStartTime")) + } + if s.KinesisConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("KinesisConfiguration")) + } + if s.LedgerName == nil { + invalidParams.Add(request.NewErrParamRequired("LedgerName")) + } + if s.LedgerName != nil && len(*s.LedgerName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LedgerName", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.StreamName == nil { + invalidParams.Add(request.NewErrParamRequired("StreamName")) + } + if s.StreamName != nil && len(*s.StreamName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("StreamName", 1)) + } + if s.KinesisConfiguration != nil { + if err := s.KinesisConfiguration.Validate(); err != nil { + invalidParams.AddNested("KinesisConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveEndTime sets the ExclusiveEndTime field's value. +func (s *StreamJournalToKinesisInput) SetExclusiveEndTime(v time.Time) *StreamJournalToKinesisInput { + s.ExclusiveEndTime = &v + return s +} + +// SetInclusiveStartTime sets the InclusiveStartTime field's value. +func (s *StreamJournalToKinesisInput) SetInclusiveStartTime(v time.Time) *StreamJournalToKinesisInput { + s.InclusiveStartTime = &v + return s +} + +// SetKinesisConfiguration sets the KinesisConfiguration field's value. +func (s *StreamJournalToKinesisInput) SetKinesisConfiguration(v *KinesisConfiguration) *StreamJournalToKinesisInput { + s.KinesisConfiguration = v + return s +} + +// SetLedgerName sets the LedgerName field's value. +func (s *StreamJournalToKinesisInput) SetLedgerName(v string) *StreamJournalToKinesisInput { + s.LedgerName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *StreamJournalToKinesisInput) SetRoleArn(v string) *StreamJournalToKinesisInput { + s.RoleArn = &v + return s +} + +// SetStreamName sets the StreamName field's value. +func (s *StreamJournalToKinesisInput) SetStreamName(v string) *StreamJournalToKinesisInput { + s.StreamName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *StreamJournalToKinesisInput) SetTags(v map[string]*string) *StreamJournalToKinesisInput { + s.Tags = v + return s +} + +type StreamJournalToKinesisOutput struct { + _ struct{} `type:"structure"` + + // The unique ID that QLDB assigns to each QLDB journal stream. + StreamId *string `min:"22" type:"string"` +} + +// String returns the string representation +func (s StreamJournalToKinesisOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamJournalToKinesisOutput) GoString() string { + return s.String() +} + +// SetStreamId sets the StreamId field's value. +func (s *StreamJournalToKinesisOutput) SetStreamId(v string) *StreamJournalToKinesisOutput { + s.StreamId = &v + return s +} + type TagResourceInput struct { _ struct{} `type:"structure"` @@ -3652,7 +4714,7 @@ func (s *UpdateLedgerOutput) SetState(v string) *UpdateLedgerOutput { return s } -// A structure that can contain an Amazon Ion value in multiple encoding formats. +// A structure that can contain a value in multiple encoding formats. type ValueHolder struct { _ struct{} `type:"structure" sensitive:"true"` @@ -3689,6 +4751,22 @@ func (s *ValueHolder) SetIonText(v string) *ValueHolder { return s } +const ( + // ErrorCauseKinesisStreamNotFound is a ErrorCause enum value + ErrorCauseKinesisStreamNotFound = "KINESIS_STREAM_NOT_FOUND" + + // ErrorCauseIamPermissionRevoked is a ErrorCause enum value + ErrorCauseIamPermissionRevoked = "IAM_PERMISSION_REVOKED" +) + +// ErrorCause_Values returns all elements of the ErrorCause enum +func ErrorCause_Values() []string { + return []string{ + ErrorCauseKinesisStreamNotFound, + ErrorCauseIamPermissionRevoked, + } +} + const ( // ExportStatusInProgress is a ExportStatus enum value ExportStatusInProgress = "IN_PROGRESS" @@ -3700,6 +4778,15 @@ const ( ExportStatusCancelled = "CANCELLED" ) +// ExportStatus_Values returns all elements of the ExportStatus enum +func ExportStatus_Values() []string { + return []string{ + ExportStatusInProgress, + ExportStatusCompleted, + ExportStatusCancelled, + } +} + const ( // LedgerStateCreating is a LedgerState enum value LedgerStateCreating = "CREATING" @@ -3714,11 +4801,28 @@ const ( LedgerStateDeleted = "DELETED" ) +// LedgerState_Values returns all elements of the LedgerState enum +func LedgerState_Values() []string { + return []string{ + LedgerStateCreating, + LedgerStateActive, + LedgerStateDeleting, + LedgerStateDeleted, + } +} + const ( // PermissionsModeAllowAll is a PermissionsMode enum value PermissionsModeAllowAll = "ALLOW_ALL" ) +// PermissionsMode_Values returns all elements of the PermissionsMode enum +func PermissionsMode_Values() []string { + return []string{ + PermissionsModeAllowAll, + } +} + const ( // S3ObjectEncryptionTypeSseKms is a S3ObjectEncryptionType enum value S3ObjectEncryptionTypeSseKms = "SSE_KMS" @@ -3729,3 +4833,40 @@ const ( // S3ObjectEncryptionTypeNoEncryption is a S3ObjectEncryptionType enum value S3ObjectEncryptionTypeNoEncryption = "NO_ENCRYPTION" ) + +// S3ObjectEncryptionType_Values returns all elements of the S3ObjectEncryptionType enum +func S3ObjectEncryptionType_Values() []string { + return []string{ + S3ObjectEncryptionTypeSseKms, + S3ObjectEncryptionTypeSseS3, + S3ObjectEncryptionTypeNoEncryption, + } +} + +const ( + // StreamStatusActive is a StreamStatus enum value + StreamStatusActive = "ACTIVE" + + // StreamStatusCompleted is a StreamStatus enum value + StreamStatusCompleted = "COMPLETED" + + // StreamStatusCanceled is a StreamStatus enum value + StreamStatusCanceled = "CANCELED" + + // StreamStatusFailed is a StreamStatus enum value + StreamStatusFailed = "FAILED" + + // StreamStatusImpaired is a StreamStatus enum value + StreamStatusImpaired = "IMPAIRED" +) + +// StreamStatus_Values returns all elements of the StreamStatus enum +func StreamStatus_Values() []string { + return []string{ + StreamStatusActive, + StreamStatusCompleted, + StreamStatusCanceled, + StreamStatusFailed, + StreamStatusImpaired, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go b/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go index 45cf1d5eb..5860a9e10 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go b/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go index e4cfc186f..db98b1971 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/quicksight/api.go @@ -110,6 +110,225 @@ func (c *QuickSight) CancelIngestionWithContext(ctx aws.Context, input *CancelIn return out, req.Send() } +const opCreateAccountCustomization = "CreateAccountCustomization" + +// CreateAccountCustomizationRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccountCustomization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAccountCustomization for more information on using the CreateAccountCustomization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAccountCustomizationRequest method. +// req, resp := client.CreateAccountCustomizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateAccountCustomization +func (c *QuickSight) CreateAccountCustomizationRequest(input *CreateAccountCustomizationInput) (req *request.Request, output *CreateAccountCustomizationOutput) { + op := &request.Operation{ + Name: opCreateAccountCustomization, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/customizations", + } + + if input == nil { + input = &CreateAccountCustomizationInput{} + } + + output = &CreateAccountCustomizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateAccountCustomization API operation for Amazon QuickSight. +// +// Creates Amazon QuickSight customizations the current AWS Region. Currently, +// you can add a custom default theme by using the CreateAccountCustomization +// or UpdateAccountCustomization API operation. To further customize QuickSight +// by removing QuickSight sample assets and videos for all new users, see Customizing +// QuickSight (https://docs.aws.amazon.com/quicksight/latest/user/customizing-quicksight.html) +// in the Amazon QuickSight User Guide. +// +// You can create customizations for your AWS account or, if you specify a namespace, +// for a QuickSight namespace instead. Customizations that apply to a namespace +// always override customizations that apply to an AWS account. To find out +// which customizations apply, use the DescribeAccountCustomization API operation. +// +// Before you use the CreateAccountCustomization API operation to add a theme +// as the namespace default, make sure that you first share the theme with the +// namespace. If you don't share it with the namespace, the theme isn't visible +// to your users even if you make it the default theme. To check if the theme +// is shared, view the current permissions by using the DescribeThemePermissions +// API operation. To share the theme, grant permissions by using the UpdateThemePermissions +// API operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation CreateAccountCustomization for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateAccountCustomization +func (c *QuickSight) CreateAccountCustomization(input *CreateAccountCustomizationInput) (*CreateAccountCustomizationOutput, error) { + req, out := c.CreateAccountCustomizationRequest(input) + return out, req.Send() +} + +// CreateAccountCustomizationWithContext is the same as CreateAccountCustomization with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAccountCustomization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) CreateAccountCustomizationWithContext(ctx aws.Context, input *CreateAccountCustomizationInput, opts ...request.Option) (*CreateAccountCustomizationOutput, error) { + req, out := c.CreateAccountCustomizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateAnalysis = "CreateAnalysis" + +// CreateAnalysisRequest generates a "aws/request.Request" representing the +// client's request for the CreateAnalysis operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAnalysis for more information on using the CreateAnalysis +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAnalysisRequest method. +// req, resp := client.CreateAnalysisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateAnalysis +func (c *QuickSight) CreateAnalysisRequest(input *CreateAnalysisInput) (req *request.Request, output *CreateAnalysisOutput) { + op := &request.Operation{ + Name: opCreateAnalysis, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/analyses/{AnalysisId}", + } + + if input == nil { + input = &CreateAnalysisInput{} + } + + output = &CreateAnalysisOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateAnalysis API operation for Amazon QuickSight. +// +// Creates an analysis in Amazon QuickSight. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation CreateAnalysis for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ThrottlingException +// Access is throttled. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateAnalysis +func (c *QuickSight) CreateAnalysis(input *CreateAnalysisInput) (*CreateAnalysisOutput, error) { + req, out := c.CreateAnalysisRequest(input) + return out, req.Send() +} + +// CreateAnalysisWithContext is the same as CreateAnalysis with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAnalysis for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) CreateAnalysisWithContext(ctx aws.Context, input *CreateAnalysisInput, opts ...request.Option) (*CreateAnalysisOutput, error) { + req, out := c.CreateAnalysisRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDashboard = "CreateDashboard" // CreateDashboardRequest generates a "aws/request.Request" representing the @@ -159,10 +378,9 @@ func (c *QuickSight) CreateDashboardRequest(input *CreateDashboardInput) (req *r // // A dashboard is an entity in QuickSight that identifies QuickSight reports, // created from analyses. You can share QuickSight dashboards. With the right -// permissions, you can create scheduled email reports from them. The CreateDashboard, -// DescribeDashboard, and ListDashboardsByUser API operations act on the dashboard -// entity. If you have the correct permissions, you can create a dashboard from -// a template that exists in a different AWS account. +// permissions, you can create scheduled email reports from them. If you have +// the correct permissions, you can create a dashboard from a template that +// exists in a different AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -720,8 +938,8 @@ func (c *QuickSight) CreateIAMPolicyAssignmentRequest(input *CreateIAMPolicyAssi // Access is throttled. // // * ConcurrentUpdatingException -// A resource is already in a state that indicates an action is happening that -// must complete before a new update can be applied. +// A resource is already in a state that indicates an operation is happening +// that must complete before a new update can be applied. // // * InternalFailureException // An internal failure occurred. @@ -796,7 +1014,7 @@ func (c *QuickSight) CreateIngestionRequest(input *CreateIngestionInput) (req *r // // Any ingestions operating on tagged datasets inherit the same tags automatically // for use in access control. For an example, see How do I create an IAM policy -// to control access to Amazon EC2 resources using tags? (https://aws.example.com/premiumsupport/knowledge-center/iam-ec2-resource-tags/) +// to control access to Amazon EC2 resources using tags? (https://aws.amazon.com/premiumsupport/knowledge-center/iam-ec2-resource-tags/) // in the AWS Knowledge Center. Tags are visible on the tagged dataset, but // not on the ingestion resource. // @@ -855,6 +1073,125 @@ func (c *QuickSight) CreateIngestionWithContext(ctx aws.Context, input *CreateIn return out, req.Send() } +const opCreateNamespace = "CreateNamespace" + +// CreateNamespaceRequest generates a "aws/request.Request" representing the +// client's request for the CreateNamespace operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateNamespace for more information on using the CreateNamespace +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateNamespaceRequest method. +// req, resp := client.CreateNamespaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateNamespace +func (c *QuickSight) CreateNamespaceRequest(input *CreateNamespaceInput) (req *request.Request, output *CreateNamespaceOutput) { + op := &request.Operation{ + Name: opCreateNamespace, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}", + } + + if input == nil { + input = &CreateNamespaceInput{} + } + + output = &CreateNamespaceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateNamespace API operation for Amazon QuickSight. +// +// (Enterprise edition only) Creates a new namespace for you to use with Amazon +// QuickSight. +// +// A namespace allows you to isolate the QuickSight users and groups that are +// registered for that namespace. Users that access the namespace can share +// assets only with other users or groups in the same namespace. They can't +// see users and groups in other namespaces. You can create a namespace after +// your AWS account is subscribed to QuickSight. The namespace must be unique +// within the AWS account. By default, there is a limit of 100 namespaces per +// AWS account. To increase your limit, create a ticket with AWS Support. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation CreateNamespace for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * LimitExceededException +// A limit is exceeded. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateNamespace +func (c *QuickSight) CreateNamespace(input *CreateNamespaceInput) (*CreateNamespaceOutput, error) { + req, out := c.CreateNamespaceRequest(input) + return out, req.Send() +} + +// CreateNamespaceWithContext is the same as CreateNamespace with the addition of +// the ability to pass a context and additional request options. +// +// See CreateNamespace for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) CreateNamespaceWithContext(ctx aws.Context, input *CreateNamespaceInput, opts ...request.Option) (*CreateNamespaceOutput, error) { + req, out := c.CreateNamespaceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTemplate = "CreateTemplate" // CreateTemplateRequest generates a "aws/request.Request" representing the @@ -945,6 +1282,9 @@ func (c *QuickSight) CreateTemplateRequest(input *CreateTemplateInput) (req *req // Amazon QuickSight currently has Standard Edition and Enterprise Edition. // Not every operation and capability is available in every edition. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * InternalFailureException // An internal failure occurred. // @@ -1045,6 +1385,9 @@ func (c *QuickSight) CreateTemplateAliasRequest(input *CreateTemplateAliasInput) // Amazon QuickSight currently has Standard Edition and Enterprise Edition. // Not every operation and capability is available in every edition. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * InternalFailureException // An internal failure occurred. // @@ -1070,251 +1413,274 @@ func (c *QuickSight) CreateTemplateAliasWithContext(ctx aws.Context, input *Crea return out, req.Send() } -const opDeleteDashboard = "DeleteDashboard" +const opCreateTheme = "CreateTheme" -// DeleteDashboardRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDashboard operation. The "output" return +// CreateThemeRequest generates a "aws/request.Request" representing the +// client's request for the CreateTheme operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteDashboard for more information on using the DeleteDashboard +// See CreateTheme for more information on using the CreateTheme // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteDashboardRequest method. -// req, resp := client.DeleteDashboardRequest(params) +// // Example sending a request using the CreateThemeRequest method. +// req, resp := client.CreateThemeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDashboard -func (c *QuickSight) DeleteDashboardRequest(input *DeleteDashboardInput) (req *request.Request, output *DeleteDashboardOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateTheme +func (c *QuickSight) CreateThemeRequest(input *CreateThemeInput) (req *request.Request, output *CreateThemeOutput) { op := &request.Operation{ - Name: opDeleteDashboard, - HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + Name: opCreateTheme, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}", } if input == nil { - input = &DeleteDashboardInput{} + input = &CreateThemeInput{} } - output = &DeleteDashboardOutput{} + output = &CreateThemeOutput{} req = c.newRequest(op, input, output) return } -// DeleteDashboard API operation for Amazon QuickSight. +// CreateTheme API operation for Amazon QuickSight. // -// Deletes a dashboard. +// Creates a theme. +// +// A theme is set of configuration options for color and layout. Themes apply +// to analyses and dashboards. For more information, see Using Themes in Amazon +// QuickSight (https://docs.aws.amazon.com/quicksight/latest/user/themes-in-quicksight.html) +// in the Amazon QuickSight User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteDashboard for usage and error information. +// API operation CreateTheme for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. +// * ResourceExistsException +// The resource specified already exists. // // * ResourceNotFoundException // One or more resources can't be found. // +// * ThrottlingException +// Access is throttled. +// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. // Amazon QuickSight currently has Standard Edition and Enterprise Edition. // Not every operation and capability is available in every edition. // +// * LimitExceededException +// A limit is exceeded. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDashboard -func (c *QuickSight) DeleteDashboard(input *DeleteDashboardInput) (*DeleteDashboardOutput, error) { - req, out := c.DeleteDashboardRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateTheme +func (c *QuickSight) CreateTheme(input *CreateThemeInput) (*CreateThemeOutput, error) { + req, out := c.CreateThemeRequest(input) return out, req.Send() } -// DeleteDashboardWithContext is the same as DeleteDashboard with the addition of +// CreateThemeWithContext is the same as CreateTheme with the addition of // the ability to pass a context and additional request options. // -// See DeleteDashboard for details on how to use this API operation. +// See CreateTheme for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteDashboardWithContext(ctx aws.Context, input *DeleteDashboardInput, opts ...request.Option) (*DeleteDashboardOutput, error) { - req, out := c.DeleteDashboardRequest(input) +func (c *QuickSight) CreateThemeWithContext(ctx aws.Context, input *CreateThemeInput, opts ...request.Option) (*CreateThemeOutput, error) { + req, out := c.CreateThemeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteDataSet = "DeleteDataSet" +const opCreateThemeAlias = "CreateThemeAlias" -// DeleteDataSetRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDataSet operation. The "output" return +// CreateThemeAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateThemeAlias operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteDataSet for more information on using the DeleteDataSet +// See CreateThemeAlias for more information on using the CreateThemeAlias // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteDataSetRequest method. -// req, resp := client.DeleteDataSetRequest(params) +// // Example sending a request using the CreateThemeAliasRequest method. +// req, resp := client.CreateThemeAliasRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSet -func (c *QuickSight) DeleteDataSetRequest(input *DeleteDataSetInput) (req *request.Request, output *DeleteDataSetOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateThemeAlias +func (c *QuickSight) CreateThemeAliasRequest(input *CreateThemeAliasInput) (req *request.Request, output *CreateThemeAliasOutput) { op := &request.Operation{ - Name: opDeleteDataSet, - HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + Name: opCreateThemeAlias, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}", } if input == nil { - input = &DeleteDataSetInput{} + input = &CreateThemeAliasInput{} } - output = &DeleteDataSetOutput{} + output = &CreateThemeAliasOutput{} req = c.newRequest(op, input, output) return } -// DeleteDataSet API operation for Amazon QuickSight. +// CreateThemeAlias API operation for Amazon QuickSight. // -// Deletes a dataset. +// Creates a theme alias for a theme. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteDataSet for usage and error information. +// API operation CreateThemeAlias for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ThrottlingException -// Access is throttled. +// * LimitExceededException +// A limit is exceeded. +// +// * ResourceExistsException +// The resource specified already exists. // // * ResourceNotFoundException // One or more resources can't be found. // +// * ThrottlingException +// Access is throttled. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSet -func (c *QuickSight) DeleteDataSet(input *DeleteDataSetInput) (*DeleteDataSetOutput, error) { - req, out := c.DeleteDataSetRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/CreateThemeAlias +func (c *QuickSight) CreateThemeAlias(input *CreateThemeAliasInput) (*CreateThemeAliasOutput, error) { + req, out := c.CreateThemeAliasRequest(input) return out, req.Send() } -// DeleteDataSetWithContext is the same as DeleteDataSet with the addition of +// CreateThemeAliasWithContext is the same as CreateThemeAlias with the addition of // the ability to pass a context and additional request options. // -// See DeleteDataSet for details on how to use this API operation. +// See CreateThemeAlias for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteDataSetWithContext(ctx aws.Context, input *DeleteDataSetInput, opts ...request.Option) (*DeleteDataSetOutput, error) { - req, out := c.DeleteDataSetRequest(input) +func (c *QuickSight) CreateThemeAliasWithContext(ctx aws.Context, input *CreateThemeAliasInput, opts ...request.Option) (*CreateThemeAliasOutput, error) { + req, out := c.CreateThemeAliasRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteDataSource = "DeleteDataSource" +const opDeleteAccountCustomization = "DeleteAccountCustomization" -// DeleteDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the DeleteDataSource operation. The "output" return +// DeleteAccountCustomizationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountCustomization operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteDataSource for more information on using the DeleteDataSource +// See DeleteAccountCustomization for more information on using the DeleteAccountCustomization // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteDataSourceRequest method. -// req, resp := client.DeleteDataSourceRequest(params) +// // Example sending a request using the DeleteAccountCustomizationRequest method. +// req, resp := client.DeleteAccountCustomizationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSource -func (c *QuickSight) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req *request.Request, output *DeleteDataSourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteAccountCustomization +func (c *QuickSight) DeleteAccountCustomizationRequest(input *DeleteAccountCustomizationInput) (req *request.Request, output *DeleteAccountCustomizationOutput) { op := &request.Operation{ - Name: opDeleteDataSource, + Name: opDeleteAccountCustomization, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + HTTPPath: "/accounts/{AwsAccountId}/customizations", } if input == nil { - input = &DeleteDataSourceInput{} + input = &DeleteAccountCustomizationInput{} } - output = &DeleteDataSourceOutput{} + output = &DeleteAccountCustomizationOutput{} req = c.newRequest(op, input, output) return } -// DeleteDataSource API operation for Amazon QuickSight. +// DeleteAccountCustomization API operation for Amazon QuickSight. // -// Deletes the data source permanently. This action breaks all the datasets -// that reference the deleted data source. +// Deletes all Amazon QuickSight customizations in this AWS Region for the specified +// AWS account and QuickSight namespace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteDataSource for usage and error information. +// API operation DeleteAccountCustomization for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -1327,292 +1693,300 @@ func (c *QuickSight) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ThrottlingException -// Access is throttled. -// // * ResourceNotFoundException // One or more resources can't be found. // +// * ThrottlingException +// Access is throttled. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSource -func (c *QuickSight) DeleteDataSource(input *DeleteDataSourceInput) (*DeleteDataSourceOutput, error) { - req, out := c.DeleteDataSourceRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteAccountCustomization +func (c *QuickSight) DeleteAccountCustomization(input *DeleteAccountCustomizationInput) (*DeleteAccountCustomizationOutput, error) { + req, out := c.DeleteAccountCustomizationRequest(input) return out, req.Send() } -// DeleteDataSourceWithContext is the same as DeleteDataSource with the addition of +// DeleteAccountCustomizationWithContext is the same as DeleteAccountCustomization with the addition of // the ability to pass a context and additional request options. // -// See DeleteDataSource for details on how to use this API operation. +// See DeleteAccountCustomization for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteDataSourceWithContext(ctx aws.Context, input *DeleteDataSourceInput, opts ...request.Option) (*DeleteDataSourceOutput, error) { - req, out := c.DeleteDataSourceRequest(input) +func (c *QuickSight) DeleteAccountCustomizationWithContext(ctx aws.Context, input *DeleteAccountCustomizationInput, opts ...request.Option) (*DeleteAccountCustomizationOutput, error) { + req, out := c.DeleteAccountCustomizationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteGroup = "DeleteGroup" +const opDeleteAnalysis = "DeleteAnalysis" -// DeleteGroupRequest generates a "aws/request.Request" representing the -// client's request for the DeleteGroup operation. The "output" return +// DeleteAnalysisRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAnalysis operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteGroup for more information on using the DeleteGroup +// See DeleteAnalysis for more information on using the DeleteAnalysis // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteGroupRequest method. -// req, resp := client.DeleteGroupRequest(params) +// // Example sending a request using the DeleteAnalysisRequest method. +// req, resp := client.DeleteAnalysisRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroup -func (c *QuickSight) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteAnalysis +func (c *QuickSight) DeleteAnalysisRequest(input *DeleteAnalysisInput) (req *request.Request, output *DeleteAnalysisOutput) { op := &request.Operation{ - Name: opDeleteGroup, + Name: opDeleteAnalysis, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}", + HTTPPath: "/accounts/{AwsAccountId}/analyses/{AnalysisId}", } if input == nil { - input = &DeleteGroupInput{} + input = &DeleteAnalysisInput{} } - output = &DeleteGroupOutput{} + output = &DeleteAnalysisOutput{} req = c.newRequest(op, input, output) return } -// DeleteGroup API operation for Amazon QuickSight. +// DeleteAnalysis API operation for Amazon QuickSight. // -// Removes a user group from Amazon QuickSight. +// Deletes an analysis from Amazon QuickSight. You can optionally include a +// recovery window during which you can restore the analysis. If you don't specify +// a recovery window value, the operation defaults to 30 days. QuickSight attaches +// a DeletionTime stamp to the response that specifies the end of the recovery +// window. At the end of the recovery window, QuickSight deletes the analysis +// permanently. +// +// At any time before recovery window ends, you can use the RestoreAnalysis +// API operation to remove the DeletionTime stamp and cancel the deletion of +// the analysis. The analysis remains visible in the API until it's deleted, +// so you can describe it but you can't make a template from it. +// +// An analysis that's scheduled for deletion isn't accessible in the QuickSight +// console. To access it in the console, restore it. Deleting an analysis doesn't +// delete the dashboards that you publish from it. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteGroup for usage and error information. +// API operation DeleteAnalysis for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. +// * ThrottlingException +// Access is throttled. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * ResourceNotFoundException // One or more resources can't be found. // -// * ThrottlingException -// Access is throttled. -// -// * PreconditionNotMetException -// One or more preconditions aren't met. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroup -func (c *QuickSight) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { - req, out := c.DeleteGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteAnalysis +func (c *QuickSight) DeleteAnalysis(input *DeleteAnalysisInput) (*DeleteAnalysisOutput, error) { + req, out := c.DeleteAnalysisRequest(input) return out, req.Send() } -// DeleteGroupWithContext is the same as DeleteGroup with the addition of +// DeleteAnalysisWithContext is the same as DeleteAnalysis with the addition of // the ability to pass a context and additional request options. // -// See DeleteGroup for details on how to use this API operation. +// See DeleteAnalysis for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteGroupWithContext(ctx aws.Context, input *DeleteGroupInput, opts ...request.Option) (*DeleteGroupOutput, error) { - req, out := c.DeleteGroupRequest(input) +func (c *QuickSight) DeleteAnalysisWithContext(ctx aws.Context, input *DeleteAnalysisInput, opts ...request.Option) (*DeleteAnalysisOutput, error) { + req, out := c.DeleteAnalysisRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteGroupMembership = "DeleteGroupMembership" +const opDeleteDashboard = "DeleteDashboard" -// DeleteGroupMembershipRequest generates a "aws/request.Request" representing the -// client's request for the DeleteGroupMembership operation. The "output" return +// DeleteDashboardRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDashboard operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteGroupMembership for more information on using the DeleteGroupMembership +// See DeleteDashboard for more information on using the DeleteDashboard // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteGroupMembershipRequest method. -// req, resp := client.DeleteGroupMembershipRequest(params) +// // Example sending a request using the DeleteDashboardRequest method. +// req, resp := client.DeleteDashboardRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroupMembership -func (c *QuickSight) DeleteGroupMembershipRequest(input *DeleteGroupMembershipInput) (req *request.Request, output *DeleteGroupMembershipOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDashboard +func (c *QuickSight) DeleteDashboardRequest(input *DeleteDashboardInput) (req *request.Request, output *DeleteDashboardOutput) { op := &request.Operation{ - Name: opDeleteGroupMembership, + Name: opDeleteDashboard, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members/{MemberName}", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", } if input == nil { - input = &DeleteGroupMembershipInput{} + input = &DeleteDashboardInput{} } - output = &DeleteGroupMembershipOutput{} + output = &DeleteDashboardOutput{} req = c.newRequest(op, input, output) return } -// DeleteGroupMembership API operation for Amazon QuickSight. +// DeleteDashboard API operation for Amazon QuickSight. // -// Removes a user from a group so that the user is no longer a member of the -// group. +// Deletes a dashboard. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteGroupMembership for usage and error information. +// API operation DeleteDashboard for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. +// * ThrottlingException +// Access is throttled. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * ResourceNotFoundException // One or more resources can't be found. // -// * ThrottlingException -// Access is throttled. -// -// * PreconditionNotMetException -// One or more preconditions aren't met. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroupMembership -func (c *QuickSight) DeleteGroupMembership(input *DeleteGroupMembershipInput) (*DeleteGroupMembershipOutput, error) { - req, out := c.DeleteGroupMembershipRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDashboard +func (c *QuickSight) DeleteDashboard(input *DeleteDashboardInput) (*DeleteDashboardOutput, error) { + req, out := c.DeleteDashboardRequest(input) return out, req.Send() } -// DeleteGroupMembershipWithContext is the same as DeleteGroupMembership with the addition of +// DeleteDashboardWithContext is the same as DeleteDashboard with the addition of // the ability to pass a context and additional request options. // -// See DeleteGroupMembership for details on how to use this API operation. +// See DeleteDashboard for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteGroupMembershipWithContext(ctx aws.Context, input *DeleteGroupMembershipInput, opts ...request.Option) (*DeleteGroupMembershipOutput, error) { - req, out := c.DeleteGroupMembershipRequest(input) +func (c *QuickSight) DeleteDashboardWithContext(ctx aws.Context, input *DeleteDashboardInput, opts ...request.Option) (*DeleteDashboardOutput, error) { + req, out := c.DeleteDashboardRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteIAMPolicyAssignment = "DeleteIAMPolicyAssignment" +const opDeleteDataSet = "DeleteDataSet" -// DeleteIAMPolicyAssignmentRequest generates a "aws/request.Request" representing the -// client's request for the DeleteIAMPolicyAssignment operation. The "output" return +// DeleteDataSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataSet operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteIAMPolicyAssignment for more information on using the DeleteIAMPolicyAssignment +// See DeleteDataSet for more information on using the DeleteDataSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteIAMPolicyAssignmentRequest method. -// req, resp := client.DeleteIAMPolicyAssignmentRequest(params) +// // Example sending a request using the DeleteDataSetRequest method. +// req, resp := client.DeleteDataSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteIAMPolicyAssignment -func (c *QuickSight) DeleteIAMPolicyAssignmentRequest(input *DeleteIAMPolicyAssignmentInput) (req *request.Request, output *DeleteIAMPolicyAssignmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSet +func (c *QuickSight) DeleteDataSetRequest(input *DeleteDataSetInput) (req *request.Request, output *DeleteDataSetOutput) { op := &request.Operation{ - Name: opDeleteIAMPolicyAssignment, + Name: opDeleteDataSet, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/namespace/{Namespace}/iam-policy-assignments/{AssignmentName}", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", } if input == nil { - input = &DeleteIAMPolicyAssignmentInput{} + input = &DeleteDataSetInput{} } - output = &DeleteIAMPolicyAssignmentOutput{} + output = &DeleteDataSetOutput{} req = c.newRequest(op, input, output) return } -// DeleteIAMPolicyAssignment API operation for Amazon QuickSight. +// DeleteDataSet API operation for Amazon QuickSight. // -// Deletes an existing IAM policy assignment. +// Deletes a dataset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteIAMPolicyAssignment for usage and error information. +// API operation DeleteDataSet for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -1625,291 +1999,287 @@ func (c *QuickSight) DeleteIAMPolicyAssignmentRequest(input *DeleteIAMPolicyAssi // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceExistsException -// The resource specified already exists. -// -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // -// * ConcurrentUpdatingException -// A resource is already in a state that indicates an action is happening that -// must complete before a new update can be applied. +// * ResourceNotFoundException +// One or more resources can't be found. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteIAMPolicyAssignment -func (c *QuickSight) DeleteIAMPolicyAssignment(input *DeleteIAMPolicyAssignmentInput) (*DeleteIAMPolicyAssignmentOutput, error) { - req, out := c.DeleteIAMPolicyAssignmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSet +func (c *QuickSight) DeleteDataSet(input *DeleteDataSetInput) (*DeleteDataSetOutput, error) { + req, out := c.DeleteDataSetRequest(input) return out, req.Send() } -// DeleteIAMPolicyAssignmentWithContext is the same as DeleteIAMPolicyAssignment with the addition of +// DeleteDataSetWithContext is the same as DeleteDataSet with the addition of // the ability to pass a context and additional request options. // -// See DeleteIAMPolicyAssignment for details on how to use this API operation. +// See DeleteDataSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteIAMPolicyAssignmentWithContext(ctx aws.Context, input *DeleteIAMPolicyAssignmentInput, opts ...request.Option) (*DeleteIAMPolicyAssignmentOutput, error) { - req, out := c.DeleteIAMPolicyAssignmentRequest(input) +func (c *QuickSight) DeleteDataSetWithContext(ctx aws.Context, input *DeleteDataSetInput, opts ...request.Option) (*DeleteDataSetOutput, error) { + req, out := c.DeleteDataSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteTemplate = "DeleteTemplate" +const opDeleteDataSource = "DeleteDataSource" -// DeleteTemplateRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTemplate operation. The "output" return +// DeleteDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDataSource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteTemplate for more information on using the DeleteTemplate +// See DeleteDataSource for more information on using the DeleteDataSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteTemplateRequest method. -// req, resp := client.DeleteTemplateRequest(params) +// // Example sending a request using the DeleteDataSourceRequest method. +// req, resp := client.DeleteDataSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplate -func (c *QuickSight) DeleteTemplateRequest(input *DeleteTemplateInput) (req *request.Request, output *DeleteTemplateOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSource +func (c *QuickSight) DeleteDataSourceRequest(input *DeleteDataSourceInput) (req *request.Request, output *DeleteDataSourceOutput) { op := &request.Operation{ - Name: opDeleteTemplate, + Name: opDeleteDataSource, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", } if input == nil { - input = &DeleteTemplateInput{} + input = &DeleteDataSourceInput{} } - output = &DeleteTemplateOutput{} + output = &DeleteDataSourceOutput{} req = c.newRequest(op, input, output) return } -// DeleteTemplate API operation for Amazon QuickSight. +// DeleteDataSource API operation for Amazon QuickSight. // -// Deletes a template. +// Deletes the data source permanently. This operation breaks all the datasets +// that reference the deleted data source. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteTemplate for usage and error information. +// API operation DeleteDataSource for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// -// * LimitExceededException -// A limit is exceeded. -// -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * ResourceNotFoundException +// One or more resources can't be found. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplate -func (c *QuickSight) DeleteTemplate(input *DeleteTemplateInput) (*DeleteTemplateOutput, error) { - req, out := c.DeleteTemplateRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteDataSource +func (c *QuickSight) DeleteDataSource(input *DeleteDataSourceInput) (*DeleteDataSourceOutput, error) { + req, out := c.DeleteDataSourceRequest(input) return out, req.Send() } -// DeleteTemplateWithContext is the same as DeleteTemplate with the addition of +// DeleteDataSourceWithContext is the same as DeleteDataSource with the addition of // the ability to pass a context and additional request options. // -// See DeleteTemplate for details on how to use this API operation. +// See DeleteDataSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteTemplateWithContext(ctx aws.Context, input *DeleteTemplateInput, opts ...request.Option) (*DeleteTemplateOutput, error) { - req, out := c.DeleteTemplateRequest(input) +func (c *QuickSight) DeleteDataSourceWithContext(ctx aws.Context, input *DeleteDataSourceInput, opts ...request.Option) (*DeleteDataSourceOutput, error) { + req, out := c.DeleteDataSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteTemplateAlias = "DeleteTemplateAlias" +const opDeleteGroup = "DeleteGroup" -// DeleteTemplateAliasRequest generates a "aws/request.Request" representing the -// client's request for the DeleteTemplateAlias operation. The "output" return +// DeleteGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteTemplateAlias for more information on using the DeleteTemplateAlias +// See DeleteGroup for more information on using the DeleteGroup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteTemplateAliasRequest method. -// req, resp := client.DeleteTemplateAliasRequest(params) +// // Example sending a request using the DeleteGroupRequest method. +// req, resp := client.DeleteGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplateAlias -func (c *QuickSight) DeleteTemplateAliasRequest(input *DeleteTemplateAliasInput) (req *request.Request, output *DeleteTemplateAliasOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroup +func (c *QuickSight) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { op := &request.Operation{ - Name: opDeleteTemplateAlias, + Name: opDeleteGroup, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}", } if input == nil { - input = &DeleteTemplateAliasInput{} + input = &DeleteGroupInput{} } - output = &DeleteTemplateAliasOutput{} + output = &DeleteGroupOutput{} req = c.newRequest(op, input, output) return } -// DeleteTemplateAlias API operation for Amazon QuickSight. +// DeleteGroup API operation for Amazon QuickSight. // -// Deletes the item that the specified template alias points to. If you provide -// a specific alias, you delete the version of the template that the alias points -// to. +// Removes a user group from Amazon QuickSight. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteTemplateAlias for usage and error information. +// API operation DeleteGroup for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. // // * ResourceNotFoundException // One or more resources can't be found. // -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * ThrottlingException +// Access is throttled. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplateAlias -func (c *QuickSight) DeleteTemplateAlias(input *DeleteTemplateAliasInput) (*DeleteTemplateAliasOutput, error) { - req, out := c.DeleteTemplateAliasRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroup +func (c *QuickSight) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) return out, req.Send() } -// DeleteTemplateAliasWithContext is the same as DeleteTemplateAlias with the addition of +// DeleteGroupWithContext is the same as DeleteGroup with the addition of // the ability to pass a context and additional request options. // -// See DeleteTemplateAlias for details on how to use this API operation. +// See DeleteGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteTemplateAliasWithContext(ctx aws.Context, input *DeleteTemplateAliasInput, opts ...request.Option) (*DeleteTemplateAliasOutput, error) { - req, out := c.DeleteTemplateAliasRequest(input) +func (c *QuickSight) DeleteGroupWithContext(ctx aws.Context, input *DeleteGroupInput, opts ...request.Option) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteUser = "DeleteUser" +const opDeleteGroupMembership = "DeleteGroupMembership" -// DeleteUserRequest generates a "aws/request.Request" representing the -// client's request for the DeleteUser operation. The "output" return +// DeleteGroupMembershipRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroupMembership operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteUser for more information on using the DeleteUser +// See DeleteGroupMembership for more information on using the DeleteGroupMembership // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteUserRequest method. -// req, resp := client.DeleteUserRequest(params) +// // Example sending a request using the DeleteGroupMembershipRequest method. +// req, resp := client.DeleteGroupMembershipRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUser -func (c *QuickSight) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroupMembership +func (c *QuickSight) DeleteGroupMembershipRequest(input *DeleteGroupMembershipInput) (req *request.Request, output *DeleteGroupMembershipOutput) { op := &request.Operation{ - Name: opDeleteUser, + Name: opDeleteGroupMembership, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members/{MemberName}", } if input == nil { - input = &DeleteUserInput{} + input = &DeleteGroupMembershipInput{} } - output = &DeleteUserOutput{} + output = &DeleteGroupMembershipOutput{} req = c.newRequest(op, input, output) return } -// DeleteUser API operation for Amazon QuickSight. +// DeleteGroupMembership API operation for Amazon QuickSight. // -// Deletes the Amazon QuickSight user that is associated with the identity of -// the AWS Identity and Access Management (IAM) user or role that's making the -// call. The IAM user isn't deleted as a result of this call. +// Removes a user from a group so that the user is no longer a member of the +// group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteUser for usage and error information. +// API operation DeleteGroupMembership for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -1928,86 +2298,89 @@ func (c *QuickSight) DeleteUserRequest(input *DeleteUserInput) (req *request.Req // * ThrottlingException // Access is throttled. // +// * PreconditionNotMetException +// One or more preconditions aren't met. +// // * InternalFailureException // An internal failure occurred. // // * ResourceUnavailableException // This resource is currently unavailable. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUser -func (c *QuickSight) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { - req, out := c.DeleteUserRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteGroupMembership +func (c *QuickSight) DeleteGroupMembership(input *DeleteGroupMembershipInput) (*DeleteGroupMembershipOutput, error) { + req, out := c.DeleteGroupMembershipRequest(input) return out, req.Send() } -// DeleteUserWithContext is the same as DeleteUser with the addition of +// DeleteGroupMembershipWithContext is the same as DeleteGroupMembership with the addition of // the ability to pass a context and additional request options. // -// See DeleteUser for details on how to use this API operation. +// See DeleteGroupMembership for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteUserWithContext(ctx aws.Context, input *DeleteUserInput, opts ...request.Option) (*DeleteUserOutput, error) { - req, out := c.DeleteUserRequest(input) +func (c *QuickSight) DeleteGroupMembershipWithContext(ctx aws.Context, input *DeleteGroupMembershipInput, opts ...request.Option) (*DeleteGroupMembershipOutput, error) { + req, out := c.DeleteGroupMembershipRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDeleteUserByPrincipalId = "DeleteUserByPrincipalId" +const opDeleteIAMPolicyAssignment = "DeleteIAMPolicyAssignment" -// DeleteUserByPrincipalIdRequest generates a "aws/request.Request" representing the -// client's request for the DeleteUserByPrincipalId operation. The "output" return +// DeleteIAMPolicyAssignmentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIAMPolicyAssignment operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeleteUserByPrincipalId for more information on using the DeleteUserByPrincipalId +// See DeleteIAMPolicyAssignment for more information on using the DeleteIAMPolicyAssignment // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeleteUserByPrincipalIdRequest method. -// req, resp := client.DeleteUserByPrincipalIdRequest(params) +// // Example sending a request using the DeleteIAMPolicyAssignmentRequest method. +// req, resp := client.DeleteIAMPolicyAssignmentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUserByPrincipalId -func (c *QuickSight) DeleteUserByPrincipalIdRequest(input *DeleteUserByPrincipalIdInput) (req *request.Request, output *DeleteUserByPrincipalIdOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteIAMPolicyAssignment +func (c *QuickSight) DeleteIAMPolicyAssignmentRequest(input *DeleteIAMPolicyAssignmentInput) (req *request.Request, output *DeleteIAMPolicyAssignmentOutput) { op := &request.Operation{ - Name: opDeleteUserByPrincipalId, + Name: opDeleteIAMPolicyAssignment, HTTPMethod: "DELETE", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/user-principals/{PrincipalId}", + HTTPPath: "/accounts/{AwsAccountId}/namespace/{Namespace}/iam-policy-assignments/{AssignmentName}", } if input == nil { - input = &DeleteUserByPrincipalIdInput{} + input = &DeleteIAMPolicyAssignmentInput{} } - output = &DeleteUserByPrincipalIdOutput{} + output = &DeleteIAMPolicyAssignmentOutput{} req = c.newRequest(op, input, output) return } -// DeleteUserByPrincipalId API operation for Amazon QuickSight. +// DeleteIAMPolicyAssignment API operation for Amazon QuickSight. // -// Deletes a user identified by its principal ID. +// Deletes an existing IAM policy assignment. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DeleteUserByPrincipalId for usage and error information. +// API operation DeleteIAMPolicyAssignment for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -2020,100 +2393,101 @@ func (c *QuickSight) DeleteUserByPrincipalIdRequest(input *DeleteUserByPrincipal // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceExistsException +// The resource specified already exists. +// // * ResourceNotFoundException // One or more resources can't be found. // // * ThrottlingException // Access is throttled. // +// * ConcurrentUpdatingException +// A resource is already in a state that indicates an operation is happening +// that must complete before a new update can be applied. +// // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUserByPrincipalId -func (c *QuickSight) DeleteUserByPrincipalId(input *DeleteUserByPrincipalIdInput) (*DeleteUserByPrincipalIdOutput, error) { - req, out := c.DeleteUserByPrincipalIdRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteIAMPolicyAssignment +func (c *QuickSight) DeleteIAMPolicyAssignment(input *DeleteIAMPolicyAssignmentInput) (*DeleteIAMPolicyAssignmentOutput, error) { + req, out := c.DeleteIAMPolicyAssignmentRequest(input) return out, req.Send() } -// DeleteUserByPrincipalIdWithContext is the same as DeleteUserByPrincipalId with the addition of +// DeleteIAMPolicyAssignmentWithContext is the same as DeleteIAMPolicyAssignment with the addition of // the ability to pass a context and additional request options. // -// See DeleteUserByPrincipalId for details on how to use this API operation. +// See DeleteIAMPolicyAssignment for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DeleteUserByPrincipalIdWithContext(ctx aws.Context, input *DeleteUserByPrincipalIdInput, opts ...request.Option) (*DeleteUserByPrincipalIdOutput, error) { - req, out := c.DeleteUserByPrincipalIdRequest(input) +func (c *QuickSight) DeleteIAMPolicyAssignmentWithContext(ctx aws.Context, input *DeleteIAMPolicyAssignmentInput, opts ...request.Option) (*DeleteIAMPolicyAssignmentOutput, error) { + req, out := c.DeleteIAMPolicyAssignmentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDashboard = "DescribeDashboard" +const opDeleteNamespace = "DeleteNamespace" -// DescribeDashboardRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDashboard operation. The "output" return +// DeleteNamespaceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNamespace operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDashboard for more information on using the DescribeDashboard +// See DeleteNamespace for more information on using the DeleteNamespace // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeDashboardRequest method. -// req, resp := client.DescribeDashboardRequest(params) +// // Example sending a request using the DeleteNamespaceRequest method. +// req, resp := client.DeleteNamespaceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboard -func (c *QuickSight) DescribeDashboardRequest(input *DescribeDashboardInput) (req *request.Request, output *DescribeDashboardOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteNamespace +func (c *QuickSight) DeleteNamespaceRequest(input *DeleteNamespaceInput) (req *request.Request, output *DeleteNamespaceOutput) { op := &request.Operation{ - Name: opDescribeDashboard, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + Name: opDeleteNamespace, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}", } if input == nil { - input = &DescribeDashboardInput{} + input = &DeleteNamespaceInput{} } - output = &DescribeDashboardOutput{} + output = &DeleteNamespaceOutput{} req = c.newRequest(op, input, output) return } -// DescribeDashboard API operation for Amazon QuickSight. +// DeleteNamespace API operation for Amazon QuickSight. // -// Provides a summary for a dashboard. +// Deletes a namespace and the users and groups that are associated with the +// namespace. This is an asynchronous process. Assets including dashboards, +// analyses, datasets and data sources are not deleted. To delete these assets, +// you use the API operations for the relevant asset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDashboard for usage and error information. +// API operation DeleteNamespace for usage and error information. // // Returned Error Types: -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// -// * ResourceNotFoundException -// One or more resources can't be found. -// // * AccessDeniedException // You don't have access to this item. The provided credentials couldn't be // validated. You might not be authorized to carry out the request. Make sure @@ -2121,92 +2495,98 @@ func (c *QuickSight) DescribeDashboardRequest(input *DescribeDashboardInput) (re // your policies have the correct permissions, and that you are using the correct // access keys. // +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// // * ThrottlingException // Access is throttled. // -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * PreconditionNotMetException +// One or more preconditions aren't met. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboard -func (c *QuickSight) DescribeDashboard(input *DescribeDashboardInput) (*DescribeDashboardOutput, error) { - req, out := c.DescribeDashboardRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteNamespace +func (c *QuickSight) DeleteNamespace(input *DeleteNamespaceInput) (*DeleteNamespaceOutput, error) { + req, out := c.DeleteNamespaceRequest(input) return out, req.Send() } -// DescribeDashboardWithContext is the same as DescribeDashboard with the addition of +// DeleteNamespaceWithContext is the same as DeleteNamespace with the addition of // the ability to pass a context and additional request options. // -// See DescribeDashboard for details on how to use this API operation. +// See DeleteNamespace for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDashboardWithContext(ctx aws.Context, input *DescribeDashboardInput, opts ...request.Option) (*DescribeDashboardOutput, error) { - req, out := c.DescribeDashboardRequest(input) +func (c *QuickSight) DeleteNamespaceWithContext(ctx aws.Context, input *DeleteNamespaceInput, opts ...request.Option) (*DeleteNamespaceOutput, error) { + req, out := c.DeleteNamespaceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDashboardPermissions = "DescribeDashboardPermissions" +const opDeleteTemplate = "DeleteTemplate" -// DescribeDashboardPermissionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDashboardPermissions operation. The "output" return +// DeleteTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDashboardPermissions for more information on using the DescribeDashboardPermissions +// See DeleteTemplate for more information on using the DeleteTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeDashboardPermissionsRequest method. -// req, resp := client.DescribeDashboardPermissionsRequest(params) +// // Example sending a request using the DeleteTemplateRequest method. +// req, resp := client.DeleteTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboardPermissions -func (c *QuickSight) DescribeDashboardPermissionsRequest(input *DescribeDashboardPermissionsInput) (req *request.Request, output *DescribeDashboardPermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplate +func (c *QuickSight) DeleteTemplateRequest(input *DeleteTemplateInput) (req *request.Request, output *DeleteTemplateOutput) { op := &request.Operation{ - Name: opDescribeDashboardPermissions, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions", + Name: opDeleteTemplate, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", } if input == nil { - input = &DescribeDashboardPermissionsInput{} + input = &DeleteTemplateInput{} } - output = &DescribeDashboardPermissionsOutput{} + output = &DeleteTemplateOutput{} req = c.newRequest(op, input, output) return } -// DescribeDashboardPermissions API operation for Amazon QuickSight. +// DeleteTemplate API operation for Amazon QuickSight. // -// Describes read and write permissions for a dashboard. +// Deletes a template. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDashboardPermissions for usage and error information. +// API operation DeleteTemplate for usage and error information. // // Returned Error Types: // * InvalidParameterValueException @@ -2218,6 +2598,12 @@ func (c *QuickSight) DescribeDashboardPermissionsRequest(input *DescribeDashboar // * ThrottlingException // Access is throttled. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * LimitExceededException +// A limit is exceeded. +// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -2227,177 +2613,176 @@ func (c *QuickSight) DescribeDashboardPermissionsRequest(input *DescribeDashboar // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboardPermissions -func (c *QuickSight) DescribeDashboardPermissions(input *DescribeDashboardPermissionsInput) (*DescribeDashboardPermissionsOutput, error) { - req, out := c.DescribeDashboardPermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplate +func (c *QuickSight) DeleteTemplate(input *DeleteTemplateInput) (*DeleteTemplateOutput, error) { + req, out := c.DeleteTemplateRequest(input) return out, req.Send() } -// DescribeDashboardPermissionsWithContext is the same as DescribeDashboardPermissions with the addition of +// DeleteTemplateWithContext is the same as DeleteTemplate with the addition of // the ability to pass a context and additional request options. // -// See DescribeDashboardPermissions for details on how to use this API operation. +// See DeleteTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDashboardPermissionsWithContext(ctx aws.Context, input *DescribeDashboardPermissionsInput, opts ...request.Option) (*DescribeDashboardPermissionsOutput, error) { - req, out := c.DescribeDashboardPermissionsRequest(input) +func (c *QuickSight) DeleteTemplateWithContext(ctx aws.Context, input *DeleteTemplateInput, opts ...request.Option) (*DeleteTemplateOutput, error) { + req, out := c.DeleteTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDataSet = "DescribeDataSet" +const opDeleteTemplateAlias = "DeleteTemplateAlias" -// DescribeDataSetRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDataSet operation. The "output" return +// DeleteTemplateAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTemplateAlias operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDataSet for more information on using the DescribeDataSet +// See DeleteTemplateAlias for more information on using the DeleteTemplateAlias // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeDataSetRequest method. -// req, resp := client.DescribeDataSetRequest(params) +// // Example sending a request using the DeleteTemplateAliasRequest method. +// req, resp := client.DeleteTemplateAliasRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSet -func (c *QuickSight) DescribeDataSetRequest(input *DescribeDataSetInput) (req *request.Request, output *DescribeDataSetOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplateAlias +func (c *QuickSight) DeleteTemplateAliasRequest(input *DeleteTemplateAliasInput) (req *request.Request, output *DeleteTemplateAliasOutput) { op := &request.Operation{ - Name: opDescribeDataSet, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + Name: opDeleteTemplateAlias, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", } if input == nil { - input = &DescribeDataSetInput{} + input = &DeleteTemplateAliasInput{} } - output = &DescribeDataSetOutput{} + output = &DeleteTemplateAliasOutput{} req = c.newRequest(op, input, output) return } -// DescribeDataSet API operation for Amazon QuickSight. +// DeleteTemplateAlias API operation for Amazon QuickSight. // -// Describes a dataset. +// Deletes the item that the specified template alias points to. If you provide +// a specific alias, you delete the version of the template that the alias points +// to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDataSet for usage and error information. +// API operation DeleteTemplateAlias for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. -// -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// // * ThrottlingException // Access is throttled. // // * ResourceNotFoundException // One or more resources can't be found. // +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSet -func (c *QuickSight) DescribeDataSet(input *DescribeDataSetInput) (*DescribeDataSetOutput, error) { - req, out := c.DescribeDataSetRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTemplateAlias +func (c *QuickSight) DeleteTemplateAlias(input *DeleteTemplateAliasInput) (*DeleteTemplateAliasOutput, error) { + req, out := c.DeleteTemplateAliasRequest(input) return out, req.Send() } -// DescribeDataSetWithContext is the same as DescribeDataSet with the addition of +// DeleteTemplateAliasWithContext is the same as DeleteTemplateAlias with the addition of // the ability to pass a context and additional request options. // -// See DescribeDataSet for details on how to use this API operation. +// See DeleteTemplateAlias for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDataSetWithContext(ctx aws.Context, input *DescribeDataSetInput, opts ...request.Option) (*DescribeDataSetOutput, error) { - req, out := c.DescribeDataSetRequest(input) +func (c *QuickSight) DeleteTemplateAliasWithContext(ctx aws.Context, input *DeleteTemplateAliasInput, opts ...request.Option) (*DeleteTemplateAliasOutput, error) { + req, out := c.DeleteTemplateAliasRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDataSetPermissions = "DescribeDataSetPermissions" +const opDeleteTheme = "DeleteTheme" -// DescribeDataSetPermissionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDataSetPermissions operation. The "output" return +// DeleteThemeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTheme operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDataSetPermissions for more information on using the DescribeDataSetPermissions +// See DeleteTheme for more information on using the DeleteTheme // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeDataSetPermissionsRequest method. -// req, resp := client.DescribeDataSetPermissionsRequest(params) +// // Example sending a request using the DeleteThemeRequest method. +// req, resp := client.DeleteThemeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetPermissions -func (c *QuickSight) DescribeDataSetPermissionsRequest(input *DescribeDataSetPermissionsInput) (req *request.Request, output *DescribeDataSetPermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTheme +func (c *QuickSight) DeleteThemeRequest(input *DeleteThemeInput) (req *request.Request, output *DeleteThemeOutput) { op := &request.Operation{ - Name: opDescribeDataSetPermissions, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions", + Name: opDeleteTheme, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}", } if input == nil { - input = &DescribeDataSetPermissionsInput{} + input = &DeleteThemeInput{} } - output = &DescribeDataSetPermissionsOutput{} + output = &DeleteThemeOutput{} req = c.newRequest(op, input, output) return } -// DescribeDataSetPermissions API operation for Amazon QuickSight. -// -// Describes the permissions on a dataset. +// DeleteTheme API operation for Amazon QuickSight. // -// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id. +// Deletes a theme. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDataSetPermissions for usage and error information. +// API operation DeleteTheme for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -2407,187 +2792,202 @@ func (c *QuickSight) DescribeDataSetPermissionsRequest(input *DescribeDataSetPer // your policies have the correct permissions, and that you are using the correct // access keys. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceNotFoundException +// One or more resources can't be found. +// // * ThrottlingException // Access is throttled. // -// * ResourceNotFoundException -// One or more resources can't be found. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetPermissions -func (c *QuickSight) DescribeDataSetPermissions(input *DescribeDataSetPermissionsInput) (*DescribeDataSetPermissionsOutput, error) { - req, out := c.DescribeDataSetPermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteTheme +func (c *QuickSight) DeleteTheme(input *DeleteThemeInput) (*DeleteThemeOutput, error) { + req, out := c.DeleteThemeRequest(input) return out, req.Send() } -// DescribeDataSetPermissionsWithContext is the same as DescribeDataSetPermissions with the addition of +// DeleteThemeWithContext is the same as DeleteTheme with the addition of // the ability to pass a context and additional request options. // -// See DescribeDataSetPermissions for details on how to use this API operation. +// See DeleteTheme for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDataSetPermissionsWithContext(ctx aws.Context, input *DescribeDataSetPermissionsInput, opts ...request.Option) (*DescribeDataSetPermissionsOutput, error) { - req, out := c.DescribeDataSetPermissionsRequest(input) +func (c *QuickSight) DeleteThemeWithContext(ctx aws.Context, input *DeleteThemeInput, opts ...request.Option) (*DeleteThemeOutput, error) { + req, out := c.DeleteThemeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDataSource = "DescribeDataSource" +const opDeleteThemeAlias = "DeleteThemeAlias" -// DescribeDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDataSource operation. The "output" return +// DeleteThemeAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteThemeAlias operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDataSource for more information on using the DescribeDataSource +// See DeleteThemeAlias for more information on using the DeleteThemeAlias // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeDataSourceRequest method. -// req, resp := client.DescribeDataSourceRequest(params) +// // Example sending a request using the DeleteThemeAliasRequest method. +// req, resp := client.DeleteThemeAliasRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource -func (c *QuickSight) DescribeDataSourceRequest(input *DescribeDataSourceInput) (req *request.Request, output *DescribeDataSourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteThemeAlias +func (c *QuickSight) DeleteThemeAliasRequest(input *DeleteThemeAliasInput) (req *request.Request, output *DeleteThemeAliasOutput) { op := &request.Operation{ - Name: opDescribeDataSource, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + Name: opDeleteThemeAlias, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}", } if input == nil { - input = &DescribeDataSourceInput{} + input = &DeleteThemeAliasInput{} } - output = &DescribeDataSourceOutput{} + output = &DeleteThemeAliasOutput{} req = c.newRequest(op, input, output) return } -// DescribeDataSource API operation for Amazon QuickSight. +// DeleteThemeAlias API operation for Amazon QuickSight. // -// Describes a data source. +// Deletes the version of the theme that the specified theme alias points to. +// If you provide a specific alias, you delete the version of the theme that +// the alias points to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDataSource for usage and error information. +// API operation DeleteThemeAlias for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceNotFoundException +// One or more resources can't be found. +// // * ThrottlingException // Access is throttled. // -// * ResourceNotFoundException -// One or more resources can't be found. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource -func (c *QuickSight) DescribeDataSource(input *DescribeDataSourceInput) (*DescribeDataSourceOutput, error) { - req, out := c.DescribeDataSourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteThemeAlias +func (c *QuickSight) DeleteThemeAlias(input *DeleteThemeAliasInput) (*DeleteThemeAliasOutput, error) { + req, out := c.DeleteThemeAliasRequest(input) return out, req.Send() } -// DescribeDataSourceWithContext is the same as DescribeDataSource with the addition of +// DeleteThemeAliasWithContext is the same as DeleteThemeAlias with the addition of // the ability to pass a context and additional request options. // -// See DescribeDataSource for details on how to use this API operation. +// See DeleteThemeAlias for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDataSourceWithContext(ctx aws.Context, input *DescribeDataSourceInput, opts ...request.Option) (*DescribeDataSourceOutput, error) { - req, out := c.DescribeDataSourceRequest(input) +func (c *QuickSight) DeleteThemeAliasWithContext(ctx aws.Context, input *DeleteThemeAliasInput, opts ...request.Option) (*DeleteThemeAliasOutput, error) { + req, out := c.DeleteThemeAliasRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeDataSourcePermissions = "DescribeDataSourcePermissions" +const opDeleteUser = "DeleteUser" -// DescribeDataSourcePermissionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeDataSourcePermissions operation. The "output" return +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeDataSourcePermissions for more information on using the DescribeDataSourcePermissions +// See DeleteUser for more information on using the DeleteUser // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeDataSourcePermissionsRequest method. -// req, resp := client.DescribeDataSourcePermissionsRequest(params) +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions -func (c *QuickSight) DescribeDataSourcePermissionsRequest(input *DescribeDataSourcePermissionsInput) (req *request.Request, output *DescribeDataSourcePermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUser +func (c *QuickSight) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { op := &request.Operation{ - Name: opDescribeDataSourcePermissions, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + Name: opDeleteUser, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}", } if input == nil { - input = &DescribeDataSourcePermissionsInput{} + input = &DeleteUserInput{} } - output = &DescribeDataSourcePermissionsOutput{} + output = &DeleteUserOutput{} req = c.newRequest(op, input, output) return } -// DescribeDataSourcePermissions API operation for Amazon QuickSight. +// DeleteUser API operation for Amazon QuickSight. // -// Describes the resource permissions for a data source. +// Deletes the Amazon QuickSight user that is associated with the identity of +// the AWS Identity and Access Management (IAM) user or role that's making the +// call. The IAM user isn't deleted as a result of this call. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeDataSourcePermissions for usage and error information. +// API operation DeleteUser for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -2600,90 +3000,95 @@ func (c *QuickSight) DescribeDataSourcePermissionsRequest(input *DescribeDataSou // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceNotFoundException +// One or more resources can't be found. +// // * ThrottlingException // Access is throttled. // -// * ResourceNotFoundException -// One or more resources can't be found. +// * PreconditionNotMetException +// One or more preconditions aren't met. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions -func (c *QuickSight) DescribeDataSourcePermissions(input *DescribeDataSourcePermissionsInput) (*DescribeDataSourcePermissionsOutput, error) { - req, out := c.DescribeDataSourcePermissionsRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUser +func (c *QuickSight) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) return out, req.Send() } -// DescribeDataSourcePermissionsWithContext is the same as DescribeDataSourcePermissions with the addition of +// DeleteUserWithContext is the same as DeleteUser with the addition of // the ability to pass a context and additional request options. // -// See DescribeDataSourcePermissions for details on how to use this API operation. +// See DeleteUser for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeDataSourcePermissionsWithContext(ctx aws.Context, input *DescribeDataSourcePermissionsInput, opts ...request.Option) (*DescribeDataSourcePermissionsOutput, error) { - req, out := c.DescribeDataSourcePermissionsRequest(input) +func (c *QuickSight) DeleteUserWithContext(ctx aws.Context, input *DeleteUserInput, opts ...request.Option) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeGroup = "DescribeGroup" +const opDeleteUserByPrincipalId = "DeleteUserByPrincipalId" -// DescribeGroupRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGroup operation. The "output" return +// DeleteUserByPrincipalIdRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserByPrincipalId operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeGroup for more information on using the DescribeGroup +// See DeleteUserByPrincipalId for more information on using the DeleteUserByPrincipalId // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeGroupRequest method. -// req, resp := client.DescribeGroupRequest(params) +// // Example sending a request using the DeleteUserByPrincipalIdRequest method. +// req, resp := client.DeleteUserByPrincipalIdRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeGroup -func (c *QuickSight) DescribeGroupRequest(input *DescribeGroupInput) (req *request.Request, output *DescribeGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUserByPrincipalId +func (c *QuickSight) DeleteUserByPrincipalIdRequest(input *DeleteUserByPrincipalIdInput) (req *request.Request, output *DeleteUserByPrincipalIdOutput) { op := &request.Operation{ - Name: opDescribeGroup, - HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}", + Name: opDeleteUserByPrincipalId, + HTTPMethod: "DELETE", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/user-principals/{PrincipalId}", } if input == nil { - input = &DescribeGroupInput{} + input = &DeleteUserByPrincipalIdInput{} } - output = &DescribeGroupOutput{} + output = &DeleteUserByPrincipalIdOutput{} req = c.newRequest(op, input, output) return } -// DescribeGroup API operation for Amazon QuickSight. +// DeleteUserByPrincipalId API operation for Amazon QuickSight. // -// Returns an Amazon QuickSight group's description and Amazon Resource Name -// (ARN). +// Deletes a user identified by its principal ID. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeGroup for usage and error information. +// API operation DeleteUserByPrincipalId for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -2711,81 +3116,119 @@ func (c *QuickSight) DescribeGroupRequest(input *DescribeGroupInput) (req *reque // * ResourceUnavailableException // This resource is currently unavailable. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeGroup -func (c *QuickSight) DescribeGroup(input *DescribeGroupInput) (*DescribeGroupOutput, error) { - req, out := c.DescribeGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DeleteUserByPrincipalId +func (c *QuickSight) DeleteUserByPrincipalId(input *DeleteUserByPrincipalIdInput) (*DeleteUserByPrincipalIdOutput, error) { + req, out := c.DeleteUserByPrincipalIdRequest(input) return out, req.Send() } -// DescribeGroupWithContext is the same as DescribeGroup with the addition of +// DeleteUserByPrincipalIdWithContext is the same as DeleteUserByPrincipalId with the addition of // the ability to pass a context and additional request options. // -// See DescribeGroup for details on how to use this API operation. +// See DeleteUserByPrincipalId for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeGroupWithContext(ctx aws.Context, input *DescribeGroupInput, opts ...request.Option) (*DescribeGroupOutput, error) { - req, out := c.DescribeGroupRequest(input) +func (c *QuickSight) DeleteUserByPrincipalIdWithContext(ctx aws.Context, input *DeleteUserByPrincipalIdInput, opts ...request.Option) (*DeleteUserByPrincipalIdOutput, error) { + req, out := c.DeleteUserByPrincipalIdRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeIAMPolicyAssignment = "DescribeIAMPolicyAssignment" +const opDescribeAccountCustomization = "DescribeAccountCustomization" -// DescribeIAMPolicyAssignmentRequest generates a "aws/request.Request" representing the -// client's request for the DescribeIAMPolicyAssignment operation. The "output" return +// DescribeAccountCustomizationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountCustomization operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeIAMPolicyAssignment for more information on using the DescribeIAMPolicyAssignment +// See DescribeAccountCustomization for more information on using the DescribeAccountCustomization // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeIAMPolicyAssignmentRequest method. -// req, resp := client.DescribeIAMPolicyAssignmentRequest(params) +// // Example sending a request using the DescribeAccountCustomizationRequest method. +// req, resp := client.DescribeAccountCustomizationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIAMPolicyAssignment -func (c *QuickSight) DescribeIAMPolicyAssignmentRequest(input *DescribeIAMPolicyAssignmentInput) (req *request.Request, output *DescribeIAMPolicyAssignmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAccountCustomization +func (c *QuickSight) DescribeAccountCustomizationRequest(input *DescribeAccountCustomizationInput) (req *request.Request, output *DescribeAccountCustomizationOutput) { op := &request.Operation{ - Name: opDescribeIAMPolicyAssignment, + Name: opDescribeAccountCustomization, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}", + HTTPPath: "/accounts/{AwsAccountId}/customizations", } if input == nil { - input = &DescribeIAMPolicyAssignmentInput{} + input = &DescribeAccountCustomizationInput{} } - output = &DescribeIAMPolicyAssignmentOutput{} + output = &DescribeAccountCustomizationOutput{} req = c.newRequest(op, input, output) return } -// DescribeIAMPolicyAssignment API operation for Amazon QuickSight. -// -// Describes an existing IAM policy assignment, as specified by the assignment -// name. +// DescribeAccountCustomization API operation for Amazon QuickSight. +// +// Describes the customizations associated with the provided AWS account and +// Amazon QuickSight namespace in an AWS Region. The QuickSight console evaluates +// which customizations to apply by running this API operation with the Resolved +// flag included. +// +// To determine what customizations display when you run this command, it can +// help to visualize the relationship of the entities involved. +// +// * AWS Account - The AWS account exists at the top of the hierarchy. It +// has the potential to use all of the AWS Regions and AWS Services. When +// you subscribe to QuickSight, you choose one AWS Region to use as your +// home Region. That's where your free SPICE capacity is located. You can +// use QuickSight in any supported AWS Region. +// +// * AWS Region - In each AWS Region where you sign in to QuickSight at least +// once, QuickSight acts as a separate instance of the same service. If you +// have a user directory, it resides in us-east-1, which is the US East (N. +// Virginia). Generally speaking, these users have access to QuickSight in +// any AWS Region, unless they are constrained to a namespace. To run the +// command in a different AWS Region, you change your Region settings. If +// you're using the AWS CLI, you can use one of the following options: Use +// command line options (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-options.html). +// Use named profiles (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). +// Run aws configure to change your default AWS Region. Use Enter to key +// the same settings for your keys. For more information, see Configuring +// the AWS CLI (https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html). +// +// * Namespace - A QuickSight namespace is a partition that contains users +// and assets (data sources, datasets, dashboards, and so on). To access +// assets that are in a specific namespace, users and groups must also be +// part of the same namespace. People who share a namespace are completely +// isolated from users and assets in other namespaces, even if they are in +// the same AWS account and AWS Region. +// +// * Applied customizations - Within an AWS Region, a set of QuickSight customizations +// can apply to an AWS account or to a namespace. Settings that you apply +// to a namespace override settings that you apply to an AWS account. All +// settings are isolated to a single AWS Region. To apply them in other AWS +// Regions, run the CreateAccountCustomization command in each AWS Region +// where you want to apply the same customizations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeIAMPolicyAssignment for usage and error information. +// API operation DescribeAccountCustomization for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -2804,86 +3247,87 @@ func (c *QuickSight) DescribeIAMPolicyAssignmentRequest(input *DescribeIAMPolicy // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. -// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIAMPolicyAssignment -func (c *QuickSight) DescribeIAMPolicyAssignment(input *DescribeIAMPolicyAssignmentInput) (*DescribeIAMPolicyAssignmentOutput, error) { - req, out := c.DescribeIAMPolicyAssignmentRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAccountCustomization +func (c *QuickSight) DescribeAccountCustomization(input *DescribeAccountCustomizationInput) (*DescribeAccountCustomizationOutput, error) { + req, out := c.DescribeAccountCustomizationRequest(input) return out, req.Send() } -// DescribeIAMPolicyAssignmentWithContext is the same as DescribeIAMPolicyAssignment with the addition of +// DescribeAccountCustomizationWithContext is the same as DescribeAccountCustomization with the addition of // the ability to pass a context and additional request options. // -// See DescribeIAMPolicyAssignment for details on how to use this API operation. +// See DescribeAccountCustomization for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeIAMPolicyAssignmentWithContext(ctx aws.Context, input *DescribeIAMPolicyAssignmentInput, opts ...request.Option) (*DescribeIAMPolicyAssignmentOutput, error) { - req, out := c.DescribeIAMPolicyAssignmentRequest(input) +func (c *QuickSight) DescribeAccountCustomizationWithContext(ctx aws.Context, input *DescribeAccountCustomizationInput, opts ...request.Option) (*DescribeAccountCustomizationOutput, error) { + req, out := c.DescribeAccountCustomizationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeIngestion = "DescribeIngestion" +const opDescribeAccountSettings = "DescribeAccountSettings" -// DescribeIngestionRequest generates a "aws/request.Request" representing the -// client's request for the DescribeIngestion operation. The "output" return +// DescribeAccountSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountSettings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeIngestion for more information on using the DescribeIngestion +// See DescribeAccountSettings for more information on using the DescribeAccountSettings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeIngestionRequest method. -// req, resp := client.DescribeIngestionRequest(params) +// // Example sending a request using the DescribeAccountSettingsRequest method. +// req, resp := client.DescribeAccountSettingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIngestion -func (c *QuickSight) DescribeIngestionRequest(input *DescribeIngestionInput) (req *request.Request, output *DescribeIngestionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAccountSettings +func (c *QuickSight) DescribeAccountSettingsRequest(input *DescribeAccountSettingsInput) (req *request.Request, output *DescribeAccountSettingsOutput) { op := &request.Operation{ - Name: opDescribeIngestion, + Name: opDescribeAccountSettings, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}", + HTTPPath: "/accounts/{AwsAccountId}/settings", } if input == nil { - input = &DescribeIngestionInput{} + input = &DescribeAccountSettingsInput{} } - output = &DescribeIngestionOutput{} + output = &DescribeAccountSettingsOutput{} req = c.newRequest(op, input, output) return } -// DescribeIngestion API operation for Amazon QuickSight. +// DescribeAccountSettings API operation for Amazon QuickSight. // -// Describes a SPICE ingestion. +// Describes the settings that were used when your QuickSight subscription was +// first created in this AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeIngestion for usage and error information. +// API operation DescribeAccountSettings for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -2902,91 +3346,94 @@ func (c *QuickSight) DescribeIngestionRequest(input *DescribeIngestionInput) (re // * ThrottlingException // Access is throttled. // -// * ResourceExistsException -// The resource specified already exists. -// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIngestion -func (c *QuickSight) DescribeIngestion(input *DescribeIngestionInput) (*DescribeIngestionOutput, error) { - req, out := c.DescribeIngestionRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAccountSettings +func (c *QuickSight) DescribeAccountSettings(input *DescribeAccountSettingsInput) (*DescribeAccountSettingsOutput, error) { + req, out := c.DescribeAccountSettingsRequest(input) return out, req.Send() } -// DescribeIngestionWithContext is the same as DescribeIngestion with the addition of +// DescribeAccountSettingsWithContext is the same as DescribeAccountSettings with the addition of // the ability to pass a context and additional request options. // -// See DescribeIngestion for details on how to use this API operation. +// See DescribeAccountSettings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeIngestionWithContext(ctx aws.Context, input *DescribeIngestionInput, opts ...request.Option) (*DescribeIngestionOutput, error) { - req, out := c.DescribeIngestionRequest(input) +func (c *QuickSight) DescribeAccountSettingsWithContext(ctx aws.Context, input *DescribeAccountSettingsInput, opts ...request.Option) (*DescribeAccountSettingsOutput, error) { + req, out := c.DescribeAccountSettingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeTemplate = "DescribeTemplate" +const opDescribeAnalysis = "DescribeAnalysis" -// DescribeTemplateRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTemplate operation. The "output" return +// DescribeAnalysisRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAnalysis operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTemplate for more information on using the DescribeTemplate +// See DescribeAnalysis for more information on using the DescribeAnalysis // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTemplateRequest method. -// req, resp := client.DescribeTemplateRequest(params) +// // Example sending a request using the DescribeAnalysisRequest method. +// req, resp := client.DescribeAnalysisRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate -func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req *request.Request, output *DescribeTemplateOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAnalysis +func (c *QuickSight) DescribeAnalysisRequest(input *DescribeAnalysisInput) (req *request.Request, output *DescribeAnalysisOutput) { op := &request.Operation{ - Name: opDescribeTemplate, + Name: opDescribeAnalysis, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + HTTPPath: "/accounts/{AwsAccountId}/analyses/{AnalysisId}", } if input == nil { - input = &DescribeTemplateInput{} + input = &DescribeAnalysisInput{} } - output = &DescribeTemplateOutput{} + output = &DescribeAnalysisOutput{} req = c.newRequest(op, input, output) return } -// DescribeTemplate API operation for Amazon QuickSight. +// DescribeAnalysis API operation for Amazon QuickSight. // -// Describes a template's metadata. +// Provides a summary of the metadata for an analysis. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeTemplate for usage and error information. +// API operation DescribeAnalysis for usage and error information. // // Returned Error Types: // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceNotFoundException +// One or more resources can't be found. +// // * AccessDeniedException // You don't have access to this item. The provided credentials couldn't be // validated. You might not be authorized to carry out the request. Make sure @@ -2994,18 +3441,9 @@ func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req // your policies have the correct permissions, and that you are using the correct // access keys. // -// * ResourceExistsException -// The resource specified already exists. -// -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -3015,88 +3453,91 @@ func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate -func (c *QuickSight) DescribeTemplate(input *DescribeTemplateInput) (*DescribeTemplateOutput, error) { - req, out := c.DescribeTemplateRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAnalysis +func (c *QuickSight) DescribeAnalysis(input *DescribeAnalysisInput) (*DescribeAnalysisOutput, error) { + req, out := c.DescribeAnalysisRequest(input) return out, req.Send() } -// DescribeTemplateWithContext is the same as DescribeTemplate with the addition of +// DescribeAnalysisWithContext is the same as DescribeAnalysis with the addition of // the ability to pass a context and additional request options. // -// See DescribeTemplate for details on how to use this API operation. +// See DescribeAnalysis for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeTemplateWithContext(ctx aws.Context, input *DescribeTemplateInput, opts ...request.Option) (*DescribeTemplateOutput, error) { - req, out := c.DescribeTemplateRequest(input) +func (c *QuickSight) DescribeAnalysisWithContext(ctx aws.Context, input *DescribeAnalysisInput, opts ...request.Option) (*DescribeAnalysisOutput, error) { + req, out := c.DescribeAnalysisRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeTemplateAlias = "DescribeTemplateAlias" +const opDescribeAnalysisPermissions = "DescribeAnalysisPermissions" -// DescribeTemplateAliasRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTemplateAlias operation. The "output" return +// DescribeAnalysisPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAnalysisPermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTemplateAlias for more information on using the DescribeTemplateAlias +// See DescribeAnalysisPermissions for more information on using the DescribeAnalysisPermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTemplateAliasRequest method. -// req, resp := client.DescribeTemplateAliasRequest(params) +// // Example sending a request using the DescribeAnalysisPermissionsRequest method. +// req, resp := client.DescribeAnalysisPermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias -func (c *QuickSight) DescribeTemplateAliasRequest(input *DescribeTemplateAliasInput) (req *request.Request, output *DescribeTemplateAliasOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAnalysisPermissions +func (c *QuickSight) DescribeAnalysisPermissionsRequest(input *DescribeAnalysisPermissionsInput) (req *request.Request, output *DescribeAnalysisPermissionsOutput) { op := &request.Operation{ - Name: opDescribeTemplateAlias, + Name: opDescribeAnalysisPermissions, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + HTTPPath: "/accounts/{AwsAccountId}/analyses/{AnalysisId}/permissions", } if input == nil { - input = &DescribeTemplateAliasInput{} + input = &DescribeAnalysisPermissionsInput{} } - output = &DescribeTemplateAliasOutput{} + output = &DescribeAnalysisPermissionsOutput{} req = c.newRequest(op, input, output) return } -// DescribeTemplateAlias API operation for Amazon QuickSight. +// DescribeAnalysisPermissions API operation for Amazon QuickSight. // -// Describes the template alias for a template. +// Provides the read and write permissions for an analysis. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeTemplateAlias for usage and error information. +// API operation DescribeAnalysisPermissions for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. // // * ResourceNotFoundException // One or more resources can't be found. // +// * ThrottlingException +// Access is throttled. +// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -3106,94 +3547,98 @@ func (c *QuickSight) DescribeTemplateAliasRequest(input *DescribeTemplateAliasIn // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias -func (c *QuickSight) DescribeTemplateAlias(input *DescribeTemplateAliasInput) (*DescribeTemplateAliasOutput, error) { - req, out := c.DescribeTemplateAliasRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeAnalysisPermissions +func (c *QuickSight) DescribeAnalysisPermissions(input *DescribeAnalysisPermissionsInput) (*DescribeAnalysisPermissionsOutput, error) { + req, out := c.DescribeAnalysisPermissionsRequest(input) return out, req.Send() } -// DescribeTemplateAliasWithContext is the same as DescribeTemplateAlias with the addition of +// DescribeAnalysisPermissionsWithContext is the same as DescribeAnalysisPermissions with the addition of // the ability to pass a context and additional request options. // -// See DescribeTemplateAlias for details on how to use this API operation. +// See DescribeAnalysisPermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeTemplateAliasWithContext(ctx aws.Context, input *DescribeTemplateAliasInput, opts ...request.Option) (*DescribeTemplateAliasOutput, error) { - req, out := c.DescribeTemplateAliasRequest(input) +func (c *QuickSight) DescribeAnalysisPermissionsWithContext(ctx aws.Context, input *DescribeAnalysisPermissionsInput, opts ...request.Option) (*DescribeAnalysisPermissionsOutput, error) { + req, out := c.DescribeAnalysisPermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeTemplatePermissions = "DescribeTemplatePermissions" +const opDescribeDashboard = "DescribeDashboard" -// DescribeTemplatePermissionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeTemplatePermissions operation. The "output" return +// DescribeDashboardRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDashboard operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeTemplatePermissions for more information on using the DescribeTemplatePermissions +// See DescribeDashboard for more information on using the DescribeDashboard // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeTemplatePermissionsRequest method. -// req, resp := client.DescribeTemplatePermissionsRequest(params) +// // Example sending a request using the DescribeDashboardRequest method. +// req, resp := client.DescribeDashboardRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplatePermissions -func (c *QuickSight) DescribeTemplatePermissionsRequest(input *DescribeTemplatePermissionsInput) (req *request.Request, output *DescribeTemplatePermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboard +func (c *QuickSight) DescribeDashboardRequest(input *DescribeDashboardInput) (req *request.Request, output *DescribeDashboardOutput) { op := &request.Operation{ - Name: opDescribeTemplatePermissions, + Name: opDescribeDashboard, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/permissions", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", } if input == nil { - input = &DescribeTemplatePermissionsInput{} + input = &DescribeDashboardInput{} } - output = &DescribeTemplatePermissionsOutput{} + output = &DescribeDashboardOutput{} req = c.newRequest(op, input, output) return } -// DescribeTemplatePermissions API operation for Amazon QuickSight. +// DescribeDashboard API operation for Amazon QuickSight. // -// Describes read and write permissions on a template. +// Provides a summary for a dashboard. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeTemplatePermissions for usage and error information. +// API operation DescribeDashboard for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * ResourceNotFoundException // One or more resources can't be found. // +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * ThrottlingException +// Access is throttled. +// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -3203,89 +3648,82 @@ func (c *QuickSight) DescribeTemplatePermissionsRequest(input *DescribeTemplateP // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplatePermissions -func (c *QuickSight) DescribeTemplatePermissions(input *DescribeTemplatePermissionsInput) (*DescribeTemplatePermissionsOutput, error) { - req, out := c.DescribeTemplatePermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboard +func (c *QuickSight) DescribeDashboard(input *DescribeDashboardInput) (*DescribeDashboardOutput, error) { + req, out := c.DescribeDashboardRequest(input) return out, req.Send() } -// DescribeTemplatePermissionsWithContext is the same as DescribeTemplatePermissions with the addition of +// DescribeDashboardWithContext is the same as DescribeDashboard with the addition of // the ability to pass a context and additional request options. // -// See DescribeTemplatePermissions for details on how to use this API operation. +// See DescribeDashboard for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeTemplatePermissionsWithContext(ctx aws.Context, input *DescribeTemplatePermissionsInput, opts ...request.Option) (*DescribeTemplatePermissionsOutput, error) { - req, out := c.DescribeTemplatePermissionsRequest(input) +func (c *QuickSight) DescribeDashboardWithContext(ctx aws.Context, input *DescribeDashboardInput, opts ...request.Option) (*DescribeDashboardOutput, error) { + req, out := c.DescribeDashboardRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeUser = "DescribeUser" +const opDescribeDashboardPermissions = "DescribeDashboardPermissions" -// DescribeUserRequest generates a "aws/request.Request" representing the -// client's request for the DescribeUser operation. The "output" return +// DescribeDashboardPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDashboardPermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeUser for more information on using the DescribeUser +// See DescribeDashboardPermissions for more information on using the DescribeDashboardPermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeUserRequest method. -// req, resp := client.DescribeUserRequest(params) +// // Example sending a request using the DescribeDashboardPermissionsRequest method. +// req, resp := client.DescribeDashboardPermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeUser -func (c *QuickSight) DescribeUserRequest(input *DescribeUserInput) (req *request.Request, output *DescribeUserOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboardPermissions +func (c *QuickSight) DescribeDashboardPermissionsRequest(input *DescribeDashboardPermissionsInput) (req *request.Request, output *DescribeDashboardPermissionsOutput) { op := &request.Operation{ - Name: opDescribeUser, + Name: opDescribeDashboardPermissions, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions", } if input == nil { - input = &DescribeUserInput{} + input = &DescribeDashboardPermissionsInput{} } - output = &DescribeUserOutput{} + output = &DescribeDashboardPermissionsOutput{} req = c.newRequest(op, input, output) return } -// DescribeUser API operation for Amazon QuickSight. +// DescribeDashboardPermissions API operation for Amazon QuickSight. // -// Returns information about a user, given the user name. +// Describes read and write permissions for a dashboard. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation DescribeUser for usage and error information. +// API operation DescribeDashboardPermissions for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // @@ -3295,94 +3733,89 @@ func (c *QuickSight) DescribeUserRequest(input *DescribeUserInput) (req *request // * ThrottlingException // Access is throttled. // +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeUser -func (c *QuickSight) DescribeUser(input *DescribeUserInput) (*DescribeUserOutput, error) { - req, out := c.DescribeUserRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDashboardPermissions +func (c *QuickSight) DescribeDashboardPermissions(input *DescribeDashboardPermissionsInput) (*DescribeDashboardPermissionsOutput, error) { + req, out := c.DescribeDashboardPermissionsRequest(input) return out, req.Send() } -// DescribeUserWithContext is the same as DescribeUser with the addition of +// DescribeDashboardPermissionsWithContext is the same as DescribeDashboardPermissions with the addition of // the ability to pass a context and additional request options. // -// See DescribeUser for details on how to use this API operation. +// See DescribeDashboardPermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) DescribeUserWithContext(ctx aws.Context, input *DescribeUserInput, opts ...request.Option) (*DescribeUserOutput, error) { - req, out := c.DescribeUserRequest(input) +func (c *QuickSight) DescribeDashboardPermissionsWithContext(ctx aws.Context, input *DescribeDashboardPermissionsInput, opts ...request.Option) (*DescribeDashboardPermissionsOutput, error) { + req, out := c.DescribeDashboardPermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetDashboardEmbedUrl = "GetDashboardEmbedUrl" +const opDescribeDataSet = "DescribeDataSet" -// GetDashboardEmbedUrlRequest generates a "aws/request.Request" representing the -// client's request for the GetDashboardEmbedUrl operation. The "output" return +// DescribeDataSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSet operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetDashboardEmbedUrl for more information on using the GetDashboardEmbedUrl +// See DescribeDataSet for more information on using the DescribeDataSet // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetDashboardEmbedUrlRequest method. -// req, resp := client.GetDashboardEmbedUrlRequest(params) +// // Example sending a request using the DescribeDataSetRequest method. +// req, resp := client.DescribeDataSetRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/GetDashboardEmbedUrl -func (c *QuickSight) GetDashboardEmbedUrlRequest(input *GetDashboardEmbedUrlInput) (req *request.Request, output *GetDashboardEmbedUrlOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSet +func (c *QuickSight) DescribeDataSetRequest(input *DescribeDataSetInput) (req *request.Request, output *DescribeDataSetOutput) { op := &request.Operation{ - Name: opGetDashboardEmbedUrl, + Name: opDescribeDataSet, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", } if input == nil { - input = &GetDashboardEmbedUrlInput{} + input = &DescribeDataSetInput{} } - output = &GetDashboardEmbedUrlOutput{} + output = &DescribeDataSetOutput{} req = c.newRequest(op, input, output) return } -// GetDashboardEmbedUrl API operation for Amazon QuickSight. -// -// Generates a server-side embeddable URL and authorization code. For this process -// to work properly, first configure the dashboards and user permissions. For -// more information, see Embedding Amazon QuickSight Dashboards (https://docs.aws.amazon.com/quicksight/latest/user/embedding-dashboards.html) -// in the Amazon QuickSight User Guide or Embedding Amazon QuickSight Dashboards -// (https://docs.aws.amazon.com/quicksight/latest/APIReference/qs-dev-embedded-dashboards.html) -// in the Amazon QuickSight API Reference. +// DescribeDataSet API operation for Amazon QuickSight. // -// Currently, you can use GetDashboardEmbedURL only from the server, not from -// the user’s browser. +// Describes a dataset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation GetDashboardEmbedUrl for usage and error information. +// API operation DescribeDataSet for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -3395,428 +3828,281 @@ func (c *QuickSight) GetDashboardEmbedUrlRequest(input *GetDashboardEmbedUrlInpu // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceExistsException -// The resource specified already exists. -// -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // -// * DomainNotWhitelistedException -// The domain specified isn't on the allow list. All domains for embedded dashboards -// must be added to the approved list by an Amazon QuickSight admin. -// -// * UserNotFoundException -// The user with the provided name isn't found. This error can happen in any -// operation that requires finding a user based on a provided user name, such -// as DeleteUser, DescribeUser, and so on. -// -// * IdentityTypeNotSupportedException -// The identity type specified isn't supported. Supported identity types include -// IAM and QUICKSIGHT. -// -// * SessionLifetimeInMinutesInvalidException -// The number of minutes specified for the lifetime of a session isn't valid. -// The session lifetime must be 15-600 minutes. -// -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * ResourceNotFoundException +// One or more resources can't be found. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/GetDashboardEmbedUrl -func (c *QuickSight) GetDashboardEmbedUrl(input *GetDashboardEmbedUrlInput) (*GetDashboardEmbedUrlOutput, error) { - req, out := c.GetDashboardEmbedUrlRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSet +func (c *QuickSight) DescribeDataSet(input *DescribeDataSetInput) (*DescribeDataSetOutput, error) { + req, out := c.DescribeDataSetRequest(input) return out, req.Send() } -// GetDashboardEmbedUrlWithContext is the same as GetDashboardEmbedUrl with the addition of +// DescribeDataSetWithContext is the same as DescribeDataSet with the addition of // the ability to pass a context and additional request options. // -// See GetDashboardEmbedUrl for details on how to use this API operation. +// See DescribeDataSet for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) GetDashboardEmbedUrlWithContext(ctx aws.Context, input *GetDashboardEmbedUrlInput, opts ...request.Option) (*GetDashboardEmbedUrlOutput, error) { - req, out := c.GetDashboardEmbedUrlRequest(input) +func (c *QuickSight) DescribeDataSetWithContext(ctx aws.Context, input *DescribeDataSetInput, opts ...request.Option) (*DescribeDataSetOutput, error) { + req, out := c.DescribeDataSetRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListDashboardVersions = "ListDashboardVersions" +const opDescribeDataSetPermissions = "DescribeDataSetPermissions" -// ListDashboardVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListDashboardVersions operation. The "output" return +// DescribeDataSetPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSetPermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDashboardVersions for more information on using the ListDashboardVersions +// See DescribeDataSetPermissions for more information on using the DescribeDataSetPermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDashboardVersionsRequest method. -// req, resp := client.ListDashboardVersionsRequest(params) +// // Example sending a request using the DescribeDataSetPermissionsRequest method. +// req, resp := client.DescribeDataSetPermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboardVersions -func (c *QuickSight) ListDashboardVersionsRequest(input *ListDashboardVersionsInput) (req *request.Request, output *ListDashboardVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetPermissions +func (c *QuickSight) DescribeDataSetPermissionsRequest(input *DescribeDataSetPermissionsInput) (req *request.Request, output *DescribeDataSetPermissionsOutput) { op := &request.Operation{ - Name: opListDashboardVersions, + Name: opDescribeDataSetPermissions, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions", } if input == nil { - input = &ListDashboardVersionsInput{} + input = &DescribeDataSetPermissionsInput{} } - output = &ListDashboardVersionsOutput{} + output = &DescribeDataSetPermissionsOutput{} req = c.newRequest(op, input, output) return } -// ListDashboardVersions API operation for Amazon QuickSight. +// DescribeDataSetPermissions API operation for Amazon QuickSight. // -// Lists all the versions of the dashboards in the QuickSight subscription. +// Describes the permissions on a dataset. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListDashboardVersions for usage and error information. +// API operation DescribeDataSetPermissions for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. -// -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * ResourceNotFoundException +// One or more resources can't be found. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboardVersions -func (c *QuickSight) ListDashboardVersions(input *ListDashboardVersionsInput) (*ListDashboardVersionsOutput, error) { - req, out := c.ListDashboardVersionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSetPermissions +func (c *QuickSight) DescribeDataSetPermissions(input *DescribeDataSetPermissionsInput) (*DescribeDataSetPermissionsOutput, error) { + req, out := c.DescribeDataSetPermissionsRequest(input) return out, req.Send() } -// ListDashboardVersionsWithContext is the same as ListDashboardVersions with the addition of +// DescribeDataSetPermissionsWithContext is the same as DescribeDataSetPermissions with the addition of // the ability to pass a context and additional request options. // -// See ListDashboardVersions for details on how to use this API operation. +// See DescribeDataSetPermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListDashboardVersionsWithContext(ctx aws.Context, input *ListDashboardVersionsInput, opts ...request.Option) (*ListDashboardVersionsOutput, error) { - req, out := c.ListDashboardVersionsRequest(input) +func (c *QuickSight) DescribeDataSetPermissionsWithContext(ctx aws.Context, input *DescribeDataSetPermissionsInput, opts ...request.Option) (*DescribeDataSetPermissionsOutput, error) { + req, out := c.DescribeDataSetPermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDashboardVersionsPages iterates over the pages of a ListDashboardVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opDescribeDataSource = "DescribeDataSource" + +// DescribeDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListDashboardVersions method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDashboardVersions operation. -// pageNum := 0 -// err := client.ListDashboardVersionsPages(params, -// func(page *quicksight.ListDashboardVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListDashboardVersionsPages(input *ListDashboardVersionsInput, fn func(*ListDashboardVersionsOutput, bool) bool) error { - return c.ListDashboardVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDashboardVersionsPagesWithContext same as ListDashboardVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListDashboardVersionsPagesWithContext(ctx aws.Context, input *ListDashboardVersionsInput, fn func(*ListDashboardVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDashboardVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDashboardVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDashboardVersionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDashboards = "ListDashboards" - -// ListDashboardsRequest generates a "aws/request.Request" representing the -// client's request for the ListDashboards operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDashboards for more information on using the ListDashboards +// See DescribeDataSource for more information on using the DescribeDataSource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDashboardsRequest method. -// req, resp := client.ListDashboardsRequest(params) +// // Example sending a request using the DescribeDataSourceRequest method. +// req, resp := client.DescribeDataSourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboards -func (c *QuickSight) ListDashboardsRequest(input *ListDashboardsInput) (req *request.Request, output *ListDashboardsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource +func (c *QuickSight) DescribeDataSourceRequest(input *DescribeDataSourceInput) (req *request.Request, output *DescribeDataSourceOutput) { op := &request.Operation{ - Name: opListDashboards, + Name: opDescribeDataSource, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/dashboards", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", } if input == nil { - input = &ListDashboardsInput{} + input = &DescribeDataSourceInput{} } - output = &ListDashboardsOutput{} + output = &DescribeDataSourceOutput{} req = c.newRequest(op, input, output) return } -// ListDashboards API operation for Amazon QuickSight. +// DescribeDataSource API operation for Amazon QuickSight. // -// Lists dashboards in an AWS account. +// Describes a data source. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListDashboards for usage and error information. +// API operation DescribeDataSource for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. -// -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * ResourceNotFoundException +// One or more resources can't be found. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboards -func (c *QuickSight) ListDashboards(input *ListDashboardsInput) (*ListDashboardsOutput, error) { - req, out := c.ListDashboardsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSource +func (c *QuickSight) DescribeDataSource(input *DescribeDataSourceInput) (*DescribeDataSourceOutput, error) { + req, out := c.DescribeDataSourceRequest(input) return out, req.Send() } -// ListDashboardsWithContext is the same as ListDashboards with the addition of +// DescribeDataSourceWithContext is the same as DescribeDataSource with the addition of // the ability to pass a context and additional request options. // -// See ListDashboards for details on how to use this API operation. +// See DescribeDataSource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListDashboardsWithContext(ctx aws.Context, input *ListDashboardsInput, opts ...request.Option) (*ListDashboardsOutput, error) { - req, out := c.ListDashboardsRequest(input) +func (c *QuickSight) DescribeDataSourceWithContext(ctx aws.Context, input *DescribeDataSourceInput, opts ...request.Option) (*DescribeDataSourceOutput, error) { + req, out := c.DescribeDataSourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDashboardsPages iterates over the pages of a ListDashboards operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDashboards method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDashboards operation. -// pageNum := 0 -// err := client.ListDashboardsPages(params, -// func(page *quicksight.ListDashboardsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListDashboardsPages(input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool) error { - return c.ListDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDashboardsPagesWithContext same as ListDashboardsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListDashboardsPagesWithContext(ctx aws.Context, input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDashboardsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDashboardsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDataSets = "ListDataSets" +const opDescribeDataSourcePermissions = "DescribeDataSourcePermissions" -// ListDataSetsRequest generates a "aws/request.Request" representing the -// client's request for the ListDataSets operation. The "output" return +// DescribeDataSourcePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDataSourcePermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListDataSets for more information on using the ListDataSets +// See DescribeDataSourcePermissions for more information on using the DescribeDataSourcePermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListDataSetsRequest method. -// req, resp := client.ListDataSetsRequest(params) +// // Example sending a request using the DescribeDataSourcePermissionsRequest method. +// req, resp := client.DescribeDataSourcePermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSets -func (c *QuickSight) ListDataSetsRequest(input *ListDataSetsInput) (req *request.Request, output *ListDataSetsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions +func (c *QuickSight) DescribeDataSourcePermissionsRequest(input *DescribeDataSourcePermissionsInput) (req *request.Request, output *DescribeDataSourcePermissionsOutput) { op := &request.Operation{ - Name: opListDataSets, + Name: opDescribeDataSourcePermissions, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sets", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", } if input == nil { - input = &ListDataSetsInput{} + input = &DescribeDataSourcePermissionsInput{} } - output = &ListDataSetsOutput{} + output = &DescribeDataSourcePermissionsOutput{} req = c.newRequest(op, input, output) return } -// ListDataSets API operation for Amazon QuickSight. -// -// Lists all of the datasets belonging to the current AWS account in an AWS -// Region. +// DescribeDataSourcePermissions API operation for Amazon QuickSight. // -// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*. +// Describes the resource permissions for a data source. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListDataSets for usage and error information. +// API operation DescribeDataSourcePermissions for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -3832,144 +4118,87 @@ func (c *QuickSight) ListDataSetsRequest(input *ListDataSetsInput) (req *request // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. +// * ResourceNotFoundException +// One or more resources can't be found. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSets -func (c *QuickSight) ListDataSets(input *ListDataSetsInput) (*ListDataSetsOutput, error) { - req, out := c.ListDataSetsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeDataSourcePermissions +func (c *QuickSight) DescribeDataSourcePermissions(input *DescribeDataSourcePermissionsInput) (*DescribeDataSourcePermissionsOutput, error) { + req, out := c.DescribeDataSourcePermissionsRequest(input) return out, req.Send() } -// ListDataSetsWithContext is the same as ListDataSets with the addition of +// DescribeDataSourcePermissionsWithContext is the same as DescribeDataSourcePermissions with the addition of // the ability to pass a context and additional request options. // -// See ListDataSets for details on how to use this API operation. +// See DescribeDataSourcePermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListDataSetsWithContext(ctx aws.Context, input *ListDataSetsInput, opts ...request.Option) (*ListDataSetsOutput, error) { - req, out := c.ListDataSetsRequest(input) +func (c *QuickSight) DescribeDataSourcePermissionsWithContext(ctx aws.Context, input *DescribeDataSourcePermissionsInput, opts ...request.Option) (*DescribeDataSourcePermissionsOutput, error) { + req, out := c.DescribeDataSourcePermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDataSetsPages iterates over the pages of a ListDataSets operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opDescribeGroup = "DescribeGroup" + +// DescribeGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListDataSets method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See DescribeGroup for more information on using the DescribeGroup +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListDataSets operation. -// pageNum := 0 -// err := client.ListDataSetsPages(params, -// func(page *quicksight.ListDataSetsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *QuickSight) ListDataSetsPages(input *ListDataSetsInput, fn func(*ListDataSetsOutput, bool) bool) error { - return c.ListDataSetsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDataSetsPagesWithContext same as ListDataSetsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListDataSetsPagesWithContext(ctx aws.Context, input *ListDataSetsInput, fn func(*ListDataSetsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDataSetsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDataSetsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDataSetsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListDataSources = "ListDataSources" - -// ListDataSourcesRequest generates a "aws/request.Request" representing the -// client's request for the ListDataSources operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See ListDataSources for more information on using the ListDataSources -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// -// // Example sending a request using the ListDataSourcesRequest method. -// req, resp := client.ListDataSourcesRequest(params) +// // Example sending a request using the DescribeGroupRequest method. +// req, resp := client.DescribeGroupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSources -func (c *QuickSight) ListDataSourcesRequest(input *ListDataSourcesInput) (req *request.Request, output *ListDataSourcesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeGroup +func (c *QuickSight) DescribeGroupRequest(input *DescribeGroupInput) (req *request.Request, output *DescribeGroupOutput) { op := &request.Operation{ - Name: opListDataSources, + Name: opDescribeGroup, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sources", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}", } if input == nil { - input = &ListDataSourcesInput{} + input = &DescribeGroupInput{} } - output = &ListDataSourcesOutput{} + output = &DescribeGroupOutput{} req = c.newRequest(op, input, output) return } -// ListDataSources API operation for Amazon QuickSight. +// DescribeGroup API operation for Amazon QuickSight. // -// Lists data sources in current AWS Region that belong to this AWS account. +// Returns an Amazon QuickSight group's description and Amazon Resource Name +// (ARN). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListDataSources for usage and error information. +// API operation DescribeGroup for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -3982,141 +4211,96 @@ func (c *QuickSight) ListDataSourcesRequest(input *ListDataSourcesInput) (req *r // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceNotFoundException +// One or more resources can't be found. +// // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. +// * PreconditionNotMetException +// One or more preconditions aren't met. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSources -func (c *QuickSight) ListDataSources(input *ListDataSourcesInput) (*ListDataSourcesOutput, error) { - req, out := c.ListDataSourcesRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeGroup +func (c *QuickSight) DescribeGroup(input *DescribeGroupInput) (*DescribeGroupOutput, error) { + req, out := c.DescribeGroupRequest(input) return out, req.Send() } -// ListDataSourcesWithContext is the same as ListDataSources with the addition of +// DescribeGroupWithContext is the same as DescribeGroup with the addition of // the ability to pass a context and additional request options. // -// See ListDataSources for details on how to use this API operation. +// See DescribeGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListDataSourcesWithContext(ctx aws.Context, input *ListDataSourcesInput, opts ...request.Option) (*ListDataSourcesOutput, error) { - req, out := c.ListDataSourcesRequest(input) +func (c *QuickSight) DescribeGroupWithContext(ctx aws.Context, input *DescribeGroupInput, opts ...request.Option) (*DescribeGroupOutput, error) { + req, out := c.DescribeGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListDataSourcesPages iterates over the pages of a ListDataSources operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListDataSources method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListDataSources operation. -// pageNum := 0 -// err := client.ListDataSourcesPages(params, -// func(page *quicksight.ListDataSourcesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListDataSourcesPages(input *ListDataSourcesInput, fn func(*ListDataSourcesOutput, bool) bool) error { - return c.ListDataSourcesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListDataSourcesPagesWithContext same as ListDataSourcesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListDataSourcesPagesWithContext(ctx aws.Context, input *ListDataSourcesInput, fn func(*ListDataSourcesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListDataSourcesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListDataSourcesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListDataSourcesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListGroupMemberships = "ListGroupMemberships" +const opDescribeIAMPolicyAssignment = "DescribeIAMPolicyAssignment" -// ListGroupMembershipsRequest generates a "aws/request.Request" representing the -// client's request for the ListGroupMemberships operation. The "output" return +// DescribeIAMPolicyAssignmentRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIAMPolicyAssignment operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGroupMemberships for more information on using the ListGroupMemberships +// See DescribeIAMPolicyAssignment for more information on using the DescribeIAMPolicyAssignment // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGroupMembershipsRequest method. -// req, resp := client.ListGroupMembershipsRequest(params) +// // Example sending a request using the DescribeIAMPolicyAssignmentRequest method. +// req, resp := client.DescribeIAMPolicyAssignmentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroupMemberships -func (c *QuickSight) ListGroupMembershipsRequest(input *ListGroupMembershipsInput) (req *request.Request, output *ListGroupMembershipsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIAMPolicyAssignment +func (c *QuickSight) DescribeIAMPolicyAssignmentRequest(input *DescribeIAMPolicyAssignmentInput) (req *request.Request, output *DescribeIAMPolicyAssignmentOutput) { op := &request.Operation{ - Name: opListGroupMemberships, + Name: opDescribeIAMPolicyAssignment, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}", } if input == nil { - input = &ListGroupMembershipsInput{} + input = &DescribeIAMPolicyAssignmentInput{} } - output = &ListGroupMembershipsOutput{} + output = &DescribeIAMPolicyAssignmentOutput{} req = c.newRequest(op, input, output) return } -// ListGroupMemberships API operation for Amazon QuickSight. +// DescribeIAMPolicyAssignment API operation for Amazon QuickSight. // -// Lists member users in a group. +// Describes an existing IAM policy assignment, as specified by the assignment +// name. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListGroupMemberships for usage and error information. +// API operation DescribeIAMPolicyAssignment for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -4138,89 +4322,83 @@ func (c *QuickSight) ListGroupMembershipsRequest(input *ListGroupMembershipsInpu // * InvalidNextTokenException // The NextToken value isn't valid. // -// * PreconditionNotMetException -// One or more preconditions aren't met. -// // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroupMemberships -func (c *QuickSight) ListGroupMemberships(input *ListGroupMembershipsInput) (*ListGroupMembershipsOutput, error) { - req, out := c.ListGroupMembershipsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIAMPolicyAssignment +func (c *QuickSight) DescribeIAMPolicyAssignment(input *DescribeIAMPolicyAssignmentInput) (*DescribeIAMPolicyAssignmentOutput, error) { + req, out := c.DescribeIAMPolicyAssignmentRequest(input) return out, req.Send() } -// ListGroupMembershipsWithContext is the same as ListGroupMemberships with the addition of +// DescribeIAMPolicyAssignmentWithContext is the same as DescribeIAMPolicyAssignment with the addition of // the ability to pass a context and additional request options. // -// See ListGroupMemberships for details on how to use this API operation. +// See DescribeIAMPolicyAssignment for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListGroupMembershipsWithContext(ctx aws.Context, input *ListGroupMembershipsInput, opts ...request.Option) (*ListGroupMembershipsOutput, error) { - req, out := c.ListGroupMembershipsRequest(input) +func (c *QuickSight) DescribeIAMPolicyAssignmentWithContext(ctx aws.Context, input *DescribeIAMPolicyAssignmentInput, opts ...request.Option) (*DescribeIAMPolicyAssignmentOutput, error) { + req, out := c.DescribeIAMPolicyAssignmentRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListGroups = "ListGroups" +const opDescribeIngestion = "DescribeIngestion" -// ListGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListGroups operation. The "output" return +// DescribeIngestionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIngestion operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGroups for more information on using the ListGroups +// See DescribeIngestion for more information on using the DescribeIngestion // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGroupsRequest method. -// req, resp := client.ListGroupsRequest(params) +// // Example sending a request using the DescribeIngestionRequest method. +// req, resp := client.DescribeIngestionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroups -func (c *QuickSight) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIngestion +func (c *QuickSight) DescribeIngestionRequest(input *DescribeIngestionInput) (req *request.Request, output *DescribeIngestionOutput) { op := &request.Operation{ - Name: opListGroups, + Name: opDescribeIngestion, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions/{IngestionId}", } if input == nil { - input = &ListGroupsInput{} + input = &DescribeIngestionInput{} } - output = &ListGroupsOutput{} + output = &DescribeIngestionOutput{} req = c.newRequest(op, input, output) return } -// ListGroups API operation for Amazon QuickSight. +// DescribeIngestion API operation for Amazon QuickSight. // -// Lists all user groups in Amazon QuickSight. +// Describes a SPICE ingestion. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListGroups for usage and error information. +// API operation DescribeIngestion for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -4239,92 +4417,86 @@ func (c *QuickSight) ListGroupsRequest(input *ListGroupsInput) (req *request.Req // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. -// -// * PreconditionNotMetException -// One or more preconditions aren't met. +// * ResourceExistsException +// The resource specified already exists. // // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroups -func (c *QuickSight) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { - req, out := c.ListGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeIngestion +func (c *QuickSight) DescribeIngestion(input *DescribeIngestionInput) (*DescribeIngestionOutput, error) { + req, out := c.DescribeIngestionRequest(input) return out, req.Send() } -// ListGroupsWithContext is the same as ListGroups with the addition of +// DescribeIngestionWithContext is the same as DescribeIngestion with the addition of // the ability to pass a context and additional request options. // -// See ListGroups for details on how to use this API operation. +// See DescribeIngestion for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListGroupsWithContext(ctx aws.Context, input *ListGroupsInput, opts ...request.Option) (*ListGroupsOutput, error) { - req, out := c.ListGroupsRequest(input) +func (c *QuickSight) DescribeIngestionWithContext(ctx aws.Context, input *DescribeIngestionInput, opts ...request.Option) (*DescribeIngestionOutput, error) { + req, out := c.DescribeIngestionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListIAMPolicyAssignments = "ListIAMPolicyAssignments" +const opDescribeNamespace = "DescribeNamespace" -// ListIAMPolicyAssignmentsRequest generates a "aws/request.Request" representing the -// client's request for the ListIAMPolicyAssignments operation. The "output" return +// DescribeNamespaceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNamespace operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListIAMPolicyAssignments for more information on using the ListIAMPolicyAssignments +// See DescribeNamespace for more information on using the DescribeNamespace // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListIAMPolicyAssignmentsRequest method. -// req, resp := client.ListIAMPolicyAssignmentsRequest(params) +// // Example sending a request using the DescribeNamespaceRequest method. +// req, resp := client.DescribeNamespaceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignments -func (c *QuickSight) ListIAMPolicyAssignmentsRequest(input *ListIAMPolicyAssignmentsInput) (req *request.Request, output *ListIAMPolicyAssignmentsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeNamespace +func (c *QuickSight) DescribeNamespaceRequest(input *DescribeNamespaceInput) (req *request.Request, output *DescribeNamespaceOutput) { op := &request.Operation{ - Name: opListIAMPolicyAssignments, + Name: opDescribeNamespace, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}", } if input == nil { - input = &ListIAMPolicyAssignmentsInput{} + input = &DescribeNamespaceInput{} } - output = &ListIAMPolicyAssignmentsOutput{} + output = &DescribeNamespaceOutput{} req = c.newRequest(op, input, output) return } -// ListIAMPolicyAssignments API operation for Amazon QuickSight. +// DescribeNamespace API operation for Amazon QuickSight. // -// Lists IAM policy assignments in the current Amazon QuickSight account. +// Describes the current namespace. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListIAMPolicyAssignments for usage and error information. +// API operation DescribeNamespace for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -4343,90 +4515,91 @@ func (c *QuickSight) ListIAMPolicyAssignmentsRequest(input *ListIAMPolicyAssignm // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. -// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignments -func (c *QuickSight) ListIAMPolicyAssignments(input *ListIAMPolicyAssignmentsInput) (*ListIAMPolicyAssignmentsOutput, error) { - req, out := c.ListIAMPolicyAssignmentsRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeNamespace +func (c *QuickSight) DescribeNamespace(input *DescribeNamespaceInput) (*DescribeNamespaceOutput, error) { + req, out := c.DescribeNamespaceRequest(input) return out, req.Send() } -// ListIAMPolicyAssignmentsWithContext is the same as ListIAMPolicyAssignments with the addition of +// DescribeNamespaceWithContext is the same as DescribeNamespace with the addition of // the ability to pass a context and additional request options. // -// See ListIAMPolicyAssignments for details on how to use this API operation. +// See DescribeNamespace for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListIAMPolicyAssignmentsWithContext(ctx aws.Context, input *ListIAMPolicyAssignmentsInput, opts ...request.Option) (*ListIAMPolicyAssignmentsOutput, error) { - req, out := c.ListIAMPolicyAssignmentsRequest(input) +func (c *QuickSight) DescribeNamespaceWithContext(ctx aws.Context, input *DescribeNamespaceInput, opts ...request.Option) (*DescribeNamespaceOutput, error) { + req, out := c.DescribeNamespaceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListIAMPolicyAssignmentsForUser = "ListIAMPolicyAssignmentsForUser" +const opDescribeTemplate = "DescribeTemplate" -// ListIAMPolicyAssignmentsForUserRequest generates a "aws/request.Request" representing the -// client's request for the ListIAMPolicyAssignmentsForUser operation. The "output" return +// DescribeTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTemplate operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListIAMPolicyAssignmentsForUser for more information on using the ListIAMPolicyAssignmentsForUser +// See DescribeTemplate for more information on using the DescribeTemplate // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListIAMPolicyAssignmentsForUserRequest method. -// req, resp := client.ListIAMPolicyAssignmentsForUserRequest(params) +// // Example sending a request using the DescribeTemplateRequest method. +// req, resp := client.DescribeTemplateRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignmentsForUser -func (c *QuickSight) ListIAMPolicyAssignmentsForUserRequest(input *ListIAMPolicyAssignmentsForUserInput) (req *request.Request, output *ListIAMPolicyAssignmentsForUserOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate +func (c *QuickSight) DescribeTemplateRequest(input *DescribeTemplateInput) (req *request.Request, output *DescribeTemplateOutput) { op := &request.Operation{ - Name: opListIAMPolicyAssignmentsForUser, + Name: opDescribeTemplate, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/iam-policy-assignments", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", } if input == nil { - input = &ListIAMPolicyAssignmentsForUserInput{} + input = &DescribeTemplateInput{} } - output = &ListIAMPolicyAssignmentsForUserOutput{} + output = &DescribeTemplateOutput{} req = c.newRequest(op, input, output) return } -// ListIAMPolicyAssignmentsForUser API operation for Amazon QuickSight. +// DescribeTemplate API operation for Amazon QuickSight. // -// Lists all the IAM policy assignments, including the Amazon Resource Names -// (ARNs) for the IAM policies assigned to the specified user and group or groups -// that the user belongs to. +// Describes a template's metadata. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListIAMPolicyAssignmentsForUser for usage and error information. +// API operation DescribeTemplate for usage and error information. // // Returned Error Types: +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// // * AccessDeniedException // You don't have access to this item. The provided credentials couldn't be // validated. You might not be authorized to carry out the request. Make sure @@ -4434,9 +4607,6 @@ func (c *QuickSight) ListIAMPolicyAssignmentsForUserRequest(input *ListIAMPolicy // your policies have the correct permissions, and that you are using the correct // access keys. // -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// // * ResourceExistsException // The resource specified already exists. // @@ -4446,355 +4616,301 @@ func (c *QuickSight) ListIAMPolicyAssignmentsForUserRequest(input *ListIAMPolicy // * ThrottlingException // Access is throttled. // -// * ConcurrentUpdatingException -// A resource is already in a state that indicates an action is happening that -// must complete before a new update can be applied. +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignmentsForUser -func (c *QuickSight) ListIAMPolicyAssignmentsForUser(input *ListIAMPolicyAssignmentsForUserInput) (*ListIAMPolicyAssignmentsForUserOutput, error) { - req, out := c.ListIAMPolicyAssignmentsForUserRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplate +func (c *QuickSight) DescribeTemplate(input *DescribeTemplateInput) (*DescribeTemplateOutput, error) { + req, out := c.DescribeTemplateRequest(input) return out, req.Send() } -// ListIAMPolicyAssignmentsForUserWithContext is the same as ListIAMPolicyAssignmentsForUser with the addition of +// DescribeTemplateWithContext is the same as DescribeTemplate with the addition of // the ability to pass a context and additional request options. // -// See ListIAMPolicyAssignmentsForUser for details on how to use this API operation. +// See DescribeTemplate for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListIAMPolicyAssignmentsForUserWithContext(ctx aws.Context, input *ListIAMPolicyAssignmentsForUserInput, opts ...request.Option) (*ListIAMPolicyAssignmentsForUserOutput, error) { - req, out := c.ListIAMPolicyAssignmentsForUserRequest(input) +func (c *QuickSight) DescribeTemplateWithContext(ctx aws.Context, input *DescribeTemplateInput, opts ...request.Option) (*DescribeTemplateOutput, error) { + req, out := c.DescribeTemplateRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListIngestions = "ListIngestions" +const opDescribeTemplateAlias = "DescribeTemplateAlias" -// ListIngestionsRequest generates a "aws/request.Request" representing the -// client's request for the ListIngestions operation. The "output" return +// DescribeTemplateAliasRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTemplateAlias operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListIngestions for more information on using the ListIngestions +// See DescribeTemplateAlias for more information on using the DescribeTemplateAlias // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListIngestionsRequest method. -// req, resp := client.ListIngestionsRequest(params) +// // Example sending a request using the DescribeTemplateAliasRequest method. +// req, resp := client.DescribeTemplateAliasRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIngestions -func (c *QuickSight) ListIngestionsRequest(input *ListIngestionsInput) (req *request.Request, output *ListIngestionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias +func (c *QuickSight) DescribeTemplateAliasRequest(input *DescribeTemplateAliasInput) (req *request.Request, output *DescribeTemplateAliasOutput) { op := &request.Operation{ - Name: opListIngestions, + Name: opDescribeTemplateAlias, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", } if input == nil { - input = &ListIngestionsInput{} + input = &DescribeTemplateAliasInput{} } - output = &ListIngestionsOutput{} + output = &DescribeTemplateAliasOutput{} req = c.newRequest(op, input, output) return } -// ListIngestions API operation for Amazon QuickSight. +// DescribeTemplateAlias API operation for Amazon QuickSight. // -// Lists the history of SPICE ingestions for a dataset. +// Describes the template alias for a template. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListIngestions for usage and error information. +// API operation DescribeTemplateAlias for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. -// -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // -// * ResourceExistsException -// The resource specified already exists. +// * ResourceNotFoundException +// One or more resources can't be found. // -// * InvalidNextTokenException -// The NextToken value isn't valid. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIngestions -func (c *QuickSight) ListIngestions(input *ListIngestionsInput) (*ListIngestionsOutput, error) { - req, out := c.ListIngestionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplateAlias +func (c *QuickSight) DescribeTemplateAlias(input *DescribeTemplateAliasInput) (*DescribeTemplateAliasOutput, error) { + req, out := c.DescribeTemplateAliasRequest(input) return out, req.Send() } -// ListIngestionsWithContext is the same as ListIngestions with the addition of +// DescribeTemplateAliasWithContext is the same as DescribeTemplateAlias with the addition of // the ability to pass a context and additional request options. // -// See ListIngestions for details on how to use this API operation. +// See DescribeTemplateAlias for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListIngestionsWithContext(ctx aws.Context, input *ListIngestionsInput, opts ...request.Option) (*ListIngestionsOutput, error) { - req, out := c.ListIngestionsRequest(input) +func (c *QuickSight) DescribeTemplateAliasWithContext(ctx aws.Context, input *DescribeTemplateAliasInput, opts ...request.Option) (*DescribeTemplateAliasOutput, error) { + req, out := c.DescribeTemplateAliasRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListIngestionsPages iterates over the pages of a ListIngestions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListIngestions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListIngestions operation. -// pageNum := 0 -// err := client.ListIngestionsPages(params, -// func(page *quicksight.ListIngestionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListIngestionsPages(input *ListIngestionsInput, fn func(*ListIngestionsOutput, bool) bool) error { - return c.ListIngestionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListIngestionsPagesWithContext same as ListIngestionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListIngestionsPagesWithContext(ctx aws.Context, input *ListIngestionsInput, fn func(*ListIngestionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListIngestionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListIngestionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListIngestionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTagsForResource = "ListTagsForResource" +const opDescribeTemplatePermissions = "DescribeTemplatePermissions" -// ListTagsForResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsForResource operation. The "output" return +// DescribeTemplatePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTemplatePermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsForResource for more information on using the ListTagsForResource +// See DescribeTemplatePermissions for more information on using the DescribeTemplatePermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsForResourceRequest method. -// req, resp := client.ListTagsForResourceRequest(params) +// // Example sending a request using the DescribeTemplatePermissionsRequest method. +// req, resp := client.DescribeTemplatePermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTagsForResource -func (c *QuickSight) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplatePermissions +func (c *QuickSight) DescribeTemplatePermissionsRequest(input *DescribeTemplatePermissionsInput) (req *request.Request, output *DescribeTemplatePermissionsOutput) { op := &request.Operation{ - Name: opListTagsForResource, + Name: opDescribeTemplatePermissions, HTTPMethod: "GET", - HTTPPath: "/resources/{ResourceArn}/tags", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/permissions", } if input == nil { - input = &ListTagsForResourceInput{} + input = &DescribeTemplatePermissionsInput{} } - output = &ListTagsForResourceOutput{} + output = &DescribeTemplatePermissionsOutput{} req = c.newRequest(op, input, output) return } -// ListTagsForResource API operation for Amazon QuickSight. +// DescribeTemplatePermissions API operation for Amazon QuickSight. // -// Lists the tags assigned to a resource. +// Describes read and write permissions on a template. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListTagsForResource for usage and error information. +// API operation DescribeTemplatePermissions for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. +// * ThrottlingException +// Access is throttled. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// // * ResourceNotFoundException // One or more resources can't be found. // -// * ThrottlingException -// Access is throttled. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTagsForResource -func (c *QuickSight) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTemplatePermissions +func (c *QuickSight) DescribeTemplatePermissions(input *DescribeTemplatePermissionsInput) (*DescribeTemplatePermissionsOutput, error) { + req, out := c.DescribeTemplatePermissionsRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// DescribeTemplatePermissionsWithContext is the same as DescribeTemplatePermissions with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See DescribeTemplatePermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *QuickSight) DescribeTemplatePermissionsWithContext(ctx aws.Context, input *DescribeTemplatePermissionsInput, opts ...request.Option) (*DescribeTemplatePermissionsOutput, error) { + req, out := c.DescribeTemplatePermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTemplateAliases = "ListTemplateAliases" +const opDescribeTheme = "DescribeTheme" -// ListTemplateAliasesRequest generates a "aws/request.Request" representing the -// client's request for the ListTemplateAliases operation. The "output" return +// DescribeThemeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTheme operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTemplateAliases for more information on using the ListTemplateAliases +// See DescribeTheme for more information on using the DescribeTheme // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTemplateAliasesRequest method. -// req, resp := client.ListTemplateAliasesRequest(params) +// // Example sending a request using the DescribeThemeRequest method. +// req, resp := client.DescribeThemeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateAliases -func (c *QuickSight) ListTemplateAliasesRequest(input *ListTemplateAliasesInput) (req *request.Request, output *ListTemplateAliasesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTheme +func (c *QuickSight) DescribeThemeRequest(input *DescribeThemeInput) (req *request.Request, output *DescribeThemeOutput) { op := &request.Operation{ - Name: opListTemplateAliases, + Name: opDescribeTheme, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}", } if input == nil { - input = &ListTemplateAliasesInput{} + input = &DescribeThemeInput{} } - output = &ListTemplateAliasesOutput{} + output = &DescribeThemeOutput{} req = c.newRequest(op, input, output) return } -// ListTemplateAliases API operation for Amazon QuickSight. +// DescribeTheme API operation for Amazon QuickSight. // -// Lists all the aliases of a template. +// Describes a theme. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListTemplateAliases for usage and error information. +// API operation DescribeTheme for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceExistsException +// The resource specified already exists. // // * ResourceNotFoundException // One or more resources can't be found. // +// * ThrottlingException +// Access is throttled. +// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -4804,143 +4920,84 @@ func (c *QuickSight) ListTemplateAliasesRequest(input *ListTemplateAliasesInput) // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateAliases -func (c *QuickSight) ListTemplateAliases(input *ListTemplateAliasesInput) (*ListTemplateAliasesOutput, error) { - req, out := c.ListTemplateAliasesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeTheme +func (c *QuickSight) DescribeTheme(input *DescribeThemeInput) (*DescribeThemeOutput, error) { + req, out := c.DescribeThemeRequest(input) return out, req.Send() } -// ListTemplateAliasesWithContext is the same as ListTemplateAliases with the addition of +// DescribeThemeWithContext is the same as DescribeTheme with the addition of // the ability to pass a context and additional request options. // -// See ListTemplateAliases for details on how to use this API operation. +// See DescribeTheme for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListTemplateAliasesWithContext(ctx aws.Context, input *ListTemplateAliasesInput, opts ...request.Option) (*ListTemplateAliasesOutput, error) { - req, out := c.ListTemplateAliasesRequest(input) +func (c *QuickSight) DescribeThemeWithContext(ctx aws.Context, input *DescribeThemeInput, opts ...request.Option) (*DescribeThemeOutput, error) { + req, out := c.DescribeThemeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTemplateAliasesPages iterates over the pages of a ListTemplateAliases operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTemplateAliases method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTemplateAliases operation. -// pageNum := 0 -// err := client.ListTemplateAliasesPages(params, -// func(page *quicksight.ListTemplateAliasesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListTemplateAliasesPages(input *ListTemplateAliasesInput, fn func(*ListTemplateAliasesOutput, bool) bool) error { - return c.ListTemplateAliasesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTemplateAliasesPagesWithContext same as ListTemplateAliasesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListTemplateAliasesPagesWithContext(ctx aws.Context, input *ListTemplateAliasesInput, fn func(*ListTemplateAliasesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTemplateAliasesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTemplateAliasesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTemplateAliasesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTemplateVersions = "ListTemplateVersions" +const opDescribeThemeAlias = "DescribeThemeAlias" -// ListTemplateVersionsRequest generates a "aws/request.Request" representing the -// client's request for the ListTemplateVersions operation. The "output" return +// DescribeThemeAliasRequest generates a "aws/request.Request" representing the +// client's request for the DescribeThemeAlias operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTemplateVersions for more information on using the ListTemplateVersions +// See DescribeThemeAlias for more information on using the DescribeThemeAlias // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTemplateVersionsRequest method. -// req, resp := client.ListTemplateVersionsRequest(params) +// // Example sending a request using the DescribeThemeAliasRequest method. +// req, resp := client.DescribeThemeAliasRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateVersions -func (c *QuickSight) ListTemplateVersionsRequest(input *ListTemplateVersionsInput) (req *request.Request, output *ListTemplateVersionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeThemeAlias +func (c *QuickSight) DescribeThemeAliasRequest(input *DescribeThemeAliasInput) (req *request.Request, output *DescribeThemeAliasOutput) { op := &request.Operation{ - Name: opListTemplateVersions, + Name: opDescribeThemeAlias, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/versions", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}", } if input == nil { - input = &ListTemplateVersionsInput{} + input = &DescribeThemeAliasInput{} } - output = &ListTemplateVersionsOutput{} + output = &DescribeThemeAliasOutput{} req = c.newRequest(op, input, output) return } -// ListTemplateVersions API operation for Amazon QuickSight. +// DescribeThemeAlias API operation for Amazon QuickSight. // -// Lists all the versions of the templates in the current Amazon QuickSight -// account. +// Describes the alias for a theme. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListTemplateVersions for usage and error information. +// API operation DescribeThemeAlias for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. @@ -4948,8 +5005,8 @@ func (c *QuickSight) ListTemplateVersionsRequest(input *ListTemplateVersionsInpu // * ResourceNotFoundException // One or more resources can't be found. // -// * InvalidNextTokenException -// The NextToken value isn't valid. +// * ThrottlingException +// Access is throttled. // // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight @@ -4960,142 +5017,88 @@ func (c *QuickSight) ListTemplateVersionsRequest(input *ListTemplateVersionsInpu // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateVersions -func (c *QuickSight) ListTemplateVersions(input *ListTemplateVersionsInput) (*ListTemplateVersionsOutput, error) { - req, out := c.ListTemplateVersionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeThemeAlias +func (c *QuickSight) DescribeThemeAlias(input *DescribeThemeAliasInput) (*DescribeThemeAliasOutput, error) { + req, out := c.DescribeThemeAliasRequest(input) return out, req.Send() } -// ListTemplateVersionsWithContext is the same as ListTemplateVersions with the addition of +// DescribeThemeAliasWithContext is the same as DescribeThemeAlias with the addition of // the ability to pass a context and additional request options. // -// See ListTemplateVersions for details on how to use this API operation. +// See DescribeThemeAlias for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListTemplateVersionsWithContext(ctx aws.Context, input *ListTemplateVersionsInput, opts ...request.Option) (*ListTemplateVersionsOutput, error) { - req, out := c.ListTemplateVersionsRequest(input) +func (c *QuickSight) DescribeThemeAliasWithContext(ctx aws.Context, input *DescribeThemeAliasInput, opts ...request.Option) (*DescribeThemeAliasOutput, error) { + req, out := c.DescribeThemeAliasRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTemplateVersionsPages iterates over the pages of a ListTemplateVersions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTemplateVersions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTemplateVersions operation. -// pageNum := 0 -// err := client.ListTemplateVersionsPages(params, -// func(page *quicksight.ListTemplateVersionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListTemplateVersionsPages(input *ListTemplateVersionsInput, fn func(*ListTemplateVersionsOutput, bool) bool) error { - return c.ListTemplateVersionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTemplateVersionsPagesWithContext same as ListTemplateVersionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListTemplateVersionsPagesWithContext(ctx aws.Context, input *ListTemplateVersionsInput, fn func(*ListTemplateVersionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTemplateVersionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTemplateVersionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTemplateVersionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListTemplates = "ListTemplates" +const opDescribeThemePermissions = "DescribeThemePermissions" -// ListTemplatesRequest generates a "aws/request.Request" representing the -// client's request for the ListTemplates operation. The "output" return +// DescribeThemePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeThemePermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTemplates for more information on using the ListTemplates +// See DescribeThemePermissions for more information on using the DescribeThemePermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTemplatesRequest method. -// req, resp := client.ListTemplatesRequest(params) +// // Example sending a request using the DescribeThemePermissionsRequest method. +// req, resp := client.DescribeThemePermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplates -func (c *QuickSight) ListTemplatesRequest(input *ListTemplatesInput) (req *request.Request, output *ListTemplatesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeThemePermissions +func (c *QuickSight) DescribeThemePermissionsRequest(input *DescribeThemePermissionsInput) (req *request.Request, output *DescribeThemePermissionsOutput) { op := &request.Operation{ - Name: opListTemplates, + Name: opDescribeThemePermissions, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/templates", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/permissions", } if input == nil { - input = &ListTemplatesInput{} + input = &DescribeThemePermissionsInput{} } - output = &ListTemplatesOutput{} + output = &DescribeThemePermissionsOutput{} req = c.newRequest(op, input, output) return } -// ListTemplates API operation for Amazon QuickSight. +// DescribeThemePermissions API operation for Amazon QuickSight. // -// Lists all the templates in the current Amazon QuickSight account. +// Describes the read and write permissions for a theme. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListTemplates for usage and error information. +// API operation DescribeThemePermissions for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. @@ -5103,8 +5106,8 @@ func (c *QuickSight) ListTemplatesRequest(input *ListTemplatesInput) (req *reque // * ResourceNotFoundException // One or more resources can't be found. // -// * InvalidNextTokenException -// The NextToken value isn't valid. +// * ThrottlingException +// Access is throttled. // // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight @@ -5115,133 +5118,80 @@ func (c *QuickSight) ListTemplatesRequest(input *ListTemplatesInput) (req *reque // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplates -func (c *QuickSight) ListTemplates(input *ListTemplatesInput) (*ListTemplatesOutput, error) { - req, out := c.ListTemplatesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeThemePermissions +func (c *QuickSight) DescribeThemePermissions(input *DescribeThemePermissionsInput) (*DescribeThemePermissionsOutput, error) { + req, out := c.DescribeThemePermissionsRequest(input) return out, req.Send() } -// ListTemplatesWithContext is the same as ListTemplates with the addition of +// DescribeThemePermissionsWithContext is the same as DescribeThemePermissions with the addition of // the ability to pass a context and additional request options. // -// See ListTemplates for details on how to use this API operation. +// See DescribeThemePermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListTemplatesWithContext(ctx aws.Context, input *ListTemplatesInput, opts ...request.Option) (*ListTemplatesOutput, error) { - req, out := c.ListTemplatesRequest(input) +func (c *QuickSight) DescribeThemePermissionsWithContext(ctx aws.Context, input *DescribeThemePermissionsInput, opts ...request.Option) (*DescribeThemePermissionsOutput, error) { + req, out := c.DescribeThemePermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTemplatesPages iterates over the pages of a ListTemplates operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListTemplates method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListTemplates operation. -// pageNum := 0 -// err := client.ListTemplatesPages(params, -// func(page *quicksight.ListTemplatesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *QuickSight) ListTemplatesPages(input *ListTemplatesInput, fn func(*ListTemplatesOutput, bool) bool) error { - return c.ListTemplatesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListTemplatesPagesWithContext same as ListTemplatesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *QuickSight) ListTemplatesPagesWithContext(ctx aws.Context, input *ListTemplatesInput, fn func(*ListTemplatesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListTemplatesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListTemplatesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListTemplatesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListUserGroups = "ListUserGroups" +const opDescribeUser = "DescribeUser" -// ListUserGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListUserGroups operation. The "output" return +// DescribeUserRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUser operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListUserGroups for more information on using the ListUserGroups +// See DescribeUser for more information on using the DescribeUser // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListUserGroupsRequest method. -// req, resp := client.ListUserGroupsRequest(params) +// // Example sending a request using the DescribeUserRequest method. +// req, resp := client.DescribeUserRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUserGroups -func (c *QuickSight) ListUserGroupsRequest(input *ListUserGroupsInput) (req *request.Request, output *ListUserGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeUser +func (c *QuickSight) DescribeUserRequest(input *DescribeUserInput) (req *request.Request, output *DescribeUserOutput) { op := &request.Operation{ - Name: opListUserGroups, + Name: opDescribeUser, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/groups", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}", } if input == nil { - input = &ListUserGroupsInput{} + input = &DescribeUserInput{} } - output = &ListUserGroupsOutput{} + output = &DescribeUserOutput{} req = c.newRequest(op, input, output) return } -// ListUserGroups API operation for Amazon QuickSight. +// DescribeUser API operation for Amazon QuickSight. // -// Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member -// of. +// Returns information about a user, given the user name. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListUserGroups for usage and error information. +// API operation DescribeUser for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -5260,86 +5210,107 @@ func (c *QuickSight) ListUserGroupsRequest(input *ListUserGroupsInput) (req *req // * ThrottlingException // Access is throttled. // +// * PreconditionNotMetException +// One or more preconditions aren't met. +// // * InternalFailureException // An internal failure occurred. // // * ResourceUnavailableException // This resource is currently unavailable. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUserGroups -func (c *QuickSight) ListUserGroups(input *ListUserGroupsInput) (*ListUserGroupsOutput, error) { - req, out := c.ListUserGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/DescribeUser +func (c *QuickSight) DescribeUser(input *DescribeUserInput) (*DescribeUserOutput, error) { + req, out := c.DescribeUserRequest(input) return out, req.Send() } -// ListUserGroupsWithContext is the same as ListUserGroups with the addition of +// DescribeUserWithContext is the same as DescribeUser with the addition of // the ability to pass a context and additional request options. // -// See ListUserGroups for details on how to use this API operation. +// See DescribeUser for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListUserGroupsWithContext(ctx aws.Context, input *ListUserGroupsInput, opts ...request.Option) (*ListUserGroupsOutput, error) { - req, out := c.ListUserGroupsRequest(input) +func (c *QuickSight) DescribeUserWithContext(ctx aws.Context, input *DescribeUserInput, opts ...request.Option) (*DescribeUserOutput, error) { + req, out := c.DescribeUserRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListUsers = "ListUsers" +const opGetDashboardEmbedUrl = "GetDashboardEmbedUrl" -// ListUsersRequest generates a "aws/request.Request" representing the -// client's request for the ListUsers operation. The "output" return +// GetDashboardEmbedUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetDashboardEmbedUrl operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListUsers for more information on using the ListUsers +// See GetDashboardEmbedUrl for more information on using the GetDashboardEmbedUrl // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListUsersRequest method. -// req, resp := client.ListUsersRequest(params) +// // Example sending a request using the GetDashboardEmbedUrlRequest method. +// req, resp := client.GetDashboardEmbedUrlRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUsers -func (c *QuickSight) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/GetDashboardEmbedUrl +func (c *QuickSight) GetDashboardEmbedUrlRequest(input *GetDashboardEmbedUrlInput) (req *request.Request, output *GetDashboardEmbedUrlOutput) { op := &request.Operation{ - Name: opListUsers, + Name: opGetDashboardEmbedUrl, HTTPMethod: "GET", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/embed-url", } if input == nil { - input = &ListUsersInput{} + input = &GetDashboardEmbedUrlInput{} } - output = &ListUsersOutput{} + output = &GetDashboardEmbedUrlOutput{} req = c.newRequest(op, input, output) return } -// ListUsers API operation for Amazon QuickSight. +// GetDashboardEmbedUrl API operation for Amazon QuickSight. // -// Returns a list of all of the Amazon QuickSight users belonging to this account. +// Generates a session URL and authorization code that you can use to embed +// an Amazon QuickSight read-only dashboard in your web server code. Before +// you use this command, make sure that you have configured the dashboards and +// permissions. +// +// Currently, you can use GetDashboardEmbedURL only from the server, not from +// the user's browser. The following rules apply to the combination of URL and +// authorization code: +// +// * They must be used together. +// +// * They can be used one time only. +// +// * They are valid for 5 minutes after you run this command. +// +// * The resulting user session is valid for 10 hours. +// +// For more information, see Embedding Amazon QuickSight (https://docs.aws.amazon.com/quicksight/latest/user/embedding-dashboards.html) +// in the Amazon QuickSight User Guide . // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation ListUsers for usage and error information. +// API operation GetDashboardEmbedUrl for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -5352,97 +5323,128 @@ func (c *QuickSight) ListUsersRequest(input *ListUsersInput) (req *request.Reque // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceExistsException +// The resource specified already exists. +// // * ResourceNotFoundException // One or more resources can't be found. // // * ThrottlingException // Access is throttled. // -// * InvalidNextTokenException -// The NextToken value isn't valid. +// * DomainNotWhitelistedException +// The domain specified isn't on the allow list. All domains for embedded dashboards +// must be added to the approved list by an Amazon QuickSight admin. +// +// * UserNotFoundException +// The user with the provided name isn't found. This error can happen in any +// operation that requires finding a user based on a provided user name, such +// as DeleteUser, DescribeUser, and so on. +// +// * IdentityTypeNotSupportedException +// The identity type specified isn't supported. Supported identity types include +// IAM and QUICKSIGHT. +// +// * SessionLifetimeInMinutesInvalidException +// The number of minutes specified for the lifetime of a session isn't valid. +// The session lifetime must be 15-600 minutes. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUsers -func (c *QuickSight) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { - req, out := c.ListUsersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/GetDashboardEmbedUrl +func (c *QuickSight) GetDashboardEmbedUrl(input *GetDashboardEmbedUrlInput) (*GetDashboardEmbedUrlOutput, error) { + req, out := c.GetDashboardEmbedUrlRequest(input) return out, req.Send() } -// ListUsersWithContext is the same as ListUsers with the addition of +// GetDashboardEmbedUrlWithContext is the same as GetDashboardEmbedUrl with the addition of // the ability to pass a context and additional request options. // -// See ListUsers for details on how to use this API operation. +// See GetDashboardEmbedUrl for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) ListUsersWithContext(ctx aws.Context, input *ListUsersInput, opts ...request.Option) (*ListUsersOutput, error) { - req, out := c.ListUsersRequest(input) +func (c *QuickSight) GetDashboardEmbedUrlWithContext(ctx aws.Context, input *GetDashboardEmbedUrlInput, opts ...request.Option) (*GetDashboardEmbedUrlOutput, error) { + req, out := c.GetDashboardEmbedUrlRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRegisterUser = "RegisterUser" +const opGetSessionEmbedUrl = "GetSessionEmbedUrl" -// RegisterUserRequest generates a "aws/request.Request" representing the -// client's request for the RegisterUser operation. The "output" return +// GetSessionEmbedUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionEmbedUrl operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RegisterUser for more information on using the RegisterUser +// See GetSessionEmbedUrl for more information on using the GetSessionEmbedUrl // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RegisterUserRequest method. -// req, resp := client.RegisterUserRequest(params) +// // Example sending a request using the GetSessionEmbedUrlRequest method. +// req, resp := client.GetSessionEmbedUrlRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/RegisterUser -func (c *QuickSight) RegisterUserRequest(input *RegisterUserInput) (req *request.Request, output *RegisterUserOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/GetSessionEmbedUrl +func (c *QuickSight) GetSessionEmbedUrlRequest(input *GetSessionEmbedUrlInput) (req *request.Request, output *GetSessionEmbedUrlOutput) { op := &request.Operation{ - Name: opRegisterUser, - HTTPMethod: "POST", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users", + Name: opGetSessionEmbedUrl, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/session-embed-url", } if input == nil { - input = &RegisterUserInput{} + input = &GetSessionEmbedUrlInput{} } - output = &RegisterUserOutput{} + output = &GetSessionEmbedUrlOutput{} req = c.newRequest(op, input, output) return } -// RegisterUser API operation for Amazon QuickSight. +// GetSessionEmbedUrl API operation for Amazon QuickSight. // -// Creates an Amazon QuickSight user, whose identity is associated with the -// AWS Identity and Access Management (IAM) identity or role specified in the -// request. +// Generates a session URL and authorization code that you can use to embed +// the Amazon QuickSight console in your web server code. Use GetSessionEmbedUrl +// where you want to provide an authoring portal that allows users to create +// data sources, datasets, analyses, and dashboards. The users who access an +// embedded QuickSight console need belong to the author or admin security cohort. +// If you want to restrict permissions to some of these features, add a custom +// permissions profile to the user with the UpdateUser API operation. Use RegisterUser +// API operation to add a new user with a custom permission profile attached. +// For more information, see the following sections in the Amazon QuickSight +// User Guide: +// +// * Embedding the Amazon QuickSight Console (https://docs.aws.amazon.com/quicksight/latest/user/embedding-the-quicksight-console.html) +// +// * Customizing Access to the Amazon QuickSight Console (https://docs.aws.amazon.com/quicksight/latest/user/customizing-permissions-to-the-quicksight-console.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation RegisterUser for usage and error information. +// API operation GetSessionEmbedUrl for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -5455,80 +5457,86 @@ func (c *QuickSight) RegisterUserRequest(input *RegisterUserInput) (req *request // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceExistsException +// The resource specified already exists. +// // * ResourceNotFoundException // One or more resources can't be found. // // * ThrottlingException // Access is throttled. // -// * LimitExceededException -// A limit is exceeded. +// * UserNotFoundException +// The user with the provided name isn't found. This error can happen in any +// operation that requires finding a user based on a provided user name, such +// as DeleteUser, DescribeUser, and so on. // -// * ResourceExistsException -// The resource specified already exists. +// * SessionLifetimeInMinutesInvalidException +// The number of minutes specified for the lifetime of a session isn't valid. +// The session lifetime must be 15-600 minutes. // -// * PreconditionNotMetException -// One or more preconditions aren't met. +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. // // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/RegisterUser -func (c *QuickSight) RegisterUser(input *RegisterUserInput) (*RegisterUserOutput, error) { - req, out := c.RegisterUserRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/GetSessionEmbedUrl +func (c *QuickSight) GetSessionEmbedUrl(input *GetSessionEmbedUrlInput) (*GetSessionEmbedUrlOutput, error) { + req, out := c.GetSessionEmbedUrlRequest(input) return out, req.Send() } -// RegisterUserWithContext is the same as RegisterUser with the addition of +// GetSessionEmbedUrlWithContext is the same as GetSessionEmbedUrl with the addition of // the ability to pass a context and additional request options. // -// See RegisterUser for details on how to use this API operation. +// See GetSessionEmbedUrl for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) RegisterUserWithContext(ctx aws.Context, input *RegisterUserInput, opts ...request.Option) (*RegisterUserOutput, error) { - req, out := c.RegisterUserRequest(input) +func (c *QuickSight) GetSessionEmbedUrlWithContext(ctx aws.Context, input *GetSessionEmbedUrlInput, opts ...request.Option) (*GetSessionEmbedUrlOutput, error) { + req, out := c.GetSessionEmbedUrlRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opSearchDashboards = "SearchDashboards" +const opListAnalyses = "ListAnalyses" -// SearchDashboardsRequest generates a "aws/request.Request" representing the -// client's request for the SearchDashboards operation. The "output" return +// ListAnalysesRequest generates a "aws/request.Request" representing the +// client's request for the ListAnalyses operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See SearchDashboards for more information on using the SearchDashboards +// See ListAnalyses for more information on using the ListAnalyses // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the SearchDashboardsRequest method. -// req, resp := client.SearchDashboardsRequest(params) +// // Example sending a request using the ListAnalysesRequest method. +// req, resp := client.ListAnalysesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/SearchDashboards -func (c *QuickSight) SearchDashboardsRequest(input *SearchDashboardsInput) (req *request.Request, output *SearchDashboardsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListAnalyses +func (c *QuickSight) ListAnalysesRequest(input *ListAnalysesInput) (req *request.Request, output *ListAnalysesOutput) { op := &request.Operation{ - Name: opSearchDashboards, - HTTPMethod: "POST", - HTTPPath: "/accounts/{AwsAccountId}/search/dashboards", + Name: opListAnalyses, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/analyses", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -5538,34 +5546,31 @@ func (c *QuickSight) SearchDashboardsRequest(input *SearchDashboardsInput) (req } if input == nil { - input = &SearchDashboardsInput{} + input = &ListAnalysesInput{} } - output = &SearchDashboardsOutput{} + output = &ListAnalysesOutput{} req = c.newRequest(op, input, output) return } -// SearchDashboards API operation for Amazon QuickSight. +// ListAnalyses API operation for Amazon QuickSight. // -// Searchs for dashboards that belong to a user. +// Lists Amazon QuickSight analyses that exist in the specified AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation SearchDashboards for usage and error information. +// API operation ListAnalyses for usage and error information. // // Returned Error Types: // * ThrottlingException // Access is throttled. // -// * ResourceNotFoundException -// One or more resources can't be found. -// -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. +// * InvalidNextTokenException +// The NextToken value isn't valid. // // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight @@ -5573,71 +5578,68 @@ func (c *QuickSight) SearchDashboardsRequest(input *SearchDashboardsInput) (req // Amazon QuickSight currently has Standard Edition and Enterprise Edition. // Not every operation and capability is available in every edition. // -// * InvalidNextTokenException -// The NextToken value isn't valid. -// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/SearchDashboards -func (c *QuickSight) SearchDashboards(input *SearchDashboardsInput) (*SearchDashboardsOutput, error) { - req, out := c.SearchDashboardsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListAnalyses +func (c *QuickSight) ListAnalyses(input *ListAnalysesInput) (*ListAnalysesOutput, error) { + req, out := c.ListAnalysesRequest(input) return out, req.Send() } -// SearchDashboardsWithContext is the same as SearchDashboards with the addition of +// ListAnalysesWithContext is the same as ListAnalyses with the addition of // the ability to pass a context and additional request options. // -// See SearchDashboards for details on how to use this API operation. +// See ListAnalyses for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) SearchDashboardsWithContext(ctx aws.Context, input *SearchDashboardsInput, opts ...request.Option) (*SearchDashboardsOutput, error) { - req, out := c.SearchDashboardsRequest(input) +func (c *QuickSight) ListAnalysesWithContext(ctx aws.Context, input *ListAnalysesInput, opts ...request.Option) (*ListAnalysesOutput, error) { + req, out := c.ListAnalysesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// SearchDashboardsPages iterates over the pages of a SearchDashboards operation, +// ListAnalysesPages iterates over the pages of a ListAnalyses operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See SearchDashboards method for more information on how to use this operation. +// See ListAnalyses method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a SearchDashboards operation. +// // Example iterating over at most 3 pages of a ListAnalyses operation. // pageNum := 0 -// err := client.SearchDashboardsPages(params, -// func(page *quicksight.SearchDashboardsOutput, lastPage bool) bool { +// err := client.ListAnalysesPages(params, +// func(page *quicksight.ListAnalysesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *QuickSight) SearchDashboardsPages(input *SearchDashboardsInput, fn func(*SearchDashboardsOutput, bool) bool) error { - return c.SearchDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *QuickSight) ListAnalysesPages(input *ListAnalysesInput, fn func(*ListAnalysesOutput, bool) bool) error { + return c.ListAnalysesPagesWithContext(aws.BackgroundContext(), input, fn) } -// SearchDashboardsPagesWithContext same as SearchDashboardsPages except +// ListAnalysesPagesWithContext same as ListAnalysesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) SearchDashboardsPagesWithContext(ctx aws.Context, input *SearchDashboardsInput, fn func(*SearchDashboardsOutput, bool) bool, opts ...request.Option) error { +func (c *QuickSight) ListAnalysesPagesWithContext(ctx aws.Context, input *ListAnalysesInput, fn func(*ListAnalysesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *SearchDashboardsInput + var inCpy *ListAnalysesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.SearchDashboardsRequest(inCpy) + req, _ := c.ListAnalysesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -5645,7 +5647,7 @@ func (c *QuickSight) SearchDashboardsPagesWithContext(ctx aws.Context, input *Se } for p.Next() { - if !fn(p.Page().(*SearchDashboardsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListAnalysesOutput), !p.HasNextPage()) { break } } @@ -5653,92 +5655,66 @@ func (c *QuickSight) SearchDashboardsPagesWithContext(ctx aws.Context, input *Se return p.Err() } -const opTagResource = "TagResource" +const opListDashboardVersions = "ListDashboardVersions" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// ListDashboardVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDashboardVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See ListDashboardVersions for more information on using the ListDashboardVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the ListDashboardVersionsRequest method. +// req, resp := client.ListDashboardVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/TagResource -func (c *QuickSight) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboardVersions +func (c *QuickSight) ListDashboardVersionsRequest(input *ListDashboardVersionsInput) (req *request.Request, output *ListDashboardVersionsOutput) { op := &request.Operation{ - Name: opTagResource, - HTTPMethod: "POST", - HTTPPath: "/resources/{ResourceArn}/tags", + Name: opListDashboardVersions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &TagResourceInput{} + input = &ListDashboardVersionsInput{} } - output = &TagResourceOutput{} + output = &ListDashboardVersionsOutput{} req = c.newRequest(op, input, output) return } -// TagResource API operation for Amazon QuickSight. -// -// Assigns one or more tags (key-value pairs) to the specified QuickSight resource. -// -// Tags can help you organize and categorize your resources. You can also use -// them to scope user permissions, by granting a user permission to access or -// change only resources with certain tag values. You can use the TagResource -// operation with a resource that already has tags. If you specify a new tag -// key for the resource, this tag is appended to the list of tags associated -// with the resource. If you specify a tag key that is already associated with -// the resource, the new tag value that you specify replaces the previous value -// for that tag. -// -// You can associate as many as 50 tags with a resource. QuickSight supports -// tagging on data set, data source, dashboard, and template. -// -// Tagging for QuickSight works in a similar way to tagging for other AWS services, -// except for the following: -// -// * You can't use tags to track AWS costs for QuickSight. This restriction -// is because QuickSight costs are based on users and SPICE capacity, which -// aren't taggable resources. +// ListDashboardVersions API operation for Amazon QuickSight. // -// * QuickSight doesn't currently support the Tag Editor for AWS Resource -// Groups. +// Lists all the versions of the dashboards in the QuickSight subscription. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation TagResource for usage and error information. +// API operation ListDashboardVersions for usage and error information. // // Returned Error Types: -// * LimitExceededException -// A limit is exceeded. -// -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // @@ -5748,472 +5724,706 @@ func (c *QuickSight) TagResourceRequest(input *TagResourceInput) (req *request.R // * ThrottlingException // Access is throttled. // +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/TagResource -func (c *QuickSight) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboardVersions +func (c *QuickSight) ListDashboardVersions(input *ListDashboardVersionsInput) (*ListDashboardVersionsOutput, error) { + req, out := c.ListDashboardVersionsRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// ListDashboardVersionsWithContext is the same as ListDashboardVersions with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See ListDashboardVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *QuickSight) ListDashboardVersionsWithContext(ctx aws.Context, input *ListDashboardVersionsInput, opts ...request.Option) (*ListDashboardVersionsOutput, error) { + req, out := c.ListDashboardVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +// ListDashboardVersionsPages iterates over the pages of a ListDashboardVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDashboardVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDashboardVersions operation. +// pageNum := 0 +// err := client.ListDashboardVersionsPages(params, +// func(page *quicksight.ListDashboardVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListDashboardVersionsPages(input *ListDashboardVersionsInput, fn func(*ListDashboardVersionsOutput, bool) bool) error { + return c.ListDashboardVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// ListDashboardVersionsPagesWithContext same as ListDashboardVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListDashboardVersionsPagesWithContext(ctx aws.Context, input *ListDashboardVersionsInput, fn func(*ListDashboardVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDashboardVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDashboardVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDashboardVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDashboards = "ListDashboards" + +// ListDashboardsRequest generates a "aws/request.Request" representing the +// client's request for the ListDashboards operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See ListDashboards for more information on using the ListDashboards // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the ListDashboardsRequest method. +// req, resp := client.ListDashboardsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UntagResource -func (c *QuickSight) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboards +func (c *QuickSight) ListDashboardsRequest(input *ListDashboardsInput) (req *request.Request, output *ListDashboardsOutput) { op := &request.Operation{ - Name: opUntagResource, - HTTPMethod: "DELETE", - HTTPPath: "/resources/{ResourceArn}/tags", + Name: opListDashboards, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/dashboards", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UntagResourceInput{} + input = &ListDashboardsInput{} } - output = &UntagResourceOutput{} + output = &ListDashboardsOutput{} req = c.newRequest(op, input, output) return } -// UntagResource API operation for Amazon QuickSight. +// ListDashboards API operation for Amazon QuickSight. // -// Removes a tag or tags from a resource. +// Lists dashboards in an AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UntagResource for usage and error information. +// API operation ListDashboards for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. -// -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// -// * ResourceNotFoundException -// One or more resources can't be found. -// // * ThrottlingException // Access is throttled. // +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UntagResource -func (c *QuickSight) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDashboards +func (c *QuickSight) ListDashboards(input *ListDashboardsInput) (*ListDashboardsOutput, error) { + req, out := c.ListDashboardsRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// ListDashboardsWithContext is the same as ListDashboards with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See ListDashboards for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *QuickSight) ListDashboardsWithContext(ctx aws.Context, input *ListDashboardsInput, opts ...request.Option) (*ListDashboardsOutput, error) { + req, out := c.ListDashboardsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDashboard = "UpdateDashboard" - -// UpdateDashboardRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDashboard operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateDashboard for more information on using the UpdateDashboard -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// ListDashboardsPages iterates over the pages of a ListDashboards operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // +// See ListDashboards method for more information on how to use this operation. // -// // Example sending a request using the UpdateDashboardRequest method. -// req, resp := client.UpdateDashboardRequest(params) +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDashboards operation. +// pageNum := 0 +// err := client.ListDashboardsPages(params, +// func(page *quicksight.ListDashboardsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListDashboardsPages(input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool) error { + return c.ListDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDashboardsPagesWithContext same as ListDashboardsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListDashboardsPagesWithContext(ctx aws.Context, input *ListDashboardsInput, fn func(*ListDashboardsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDashboardsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDashboardsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDashboardsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDataSets = "ListDataSets" + +// ListDataSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListDataSets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDataSets for more information on using the ListDataSets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListDataSetsRequest method. +// req, resp := client.ListDataSetsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboard -func (c *QuickSight) UpdateDashboardRequest(input *UpdateDashboardInput) (req *request.Request, output *UpdateDashboardOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSets +func (c *QuickSight) ListDataSetsRequest(input *ListDataSetsInput) (req *request.Request, output *ListDataSetsOutput) { op := &request.Operation{ - Name: opUpdateDashboard, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + Name: opListDataSets, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateDashboardInput{} + input = &ListDataSetsInput{} } - output = &UpdateDashboardOutput{} + output = &ListDataSetsOutput{} req = c.newRequest(op, input, output) return } -// UpdateDashboard API operation for Amazon QuickSight. +// ListDataSets API operation for Amazon QuickSight. // -// Updates a dashboard in an AWS account. +// Lists all of the datasets belonging to the current AWS account in an AWS +// Region. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/*. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDashboard for usage and error information. +// API operation ListDataSets for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceNotFoundException -// One or more resources can't be found. -// -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// -// * LimitExceededException -// A limit is exceeded. +// * ThrottlingException +// Access is throttled. // -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * InvalidNextTokenException +// The NextToken value isn't valid. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboard -func (c *QuickSight) UpdateDashboard(input *UpdateDashboardInput) (*UpdateDashboardOutput, error) { - req, out := c.UpdateDashboardRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSets +func (c *QuickSight) ListDataSets(input *ListDataSetsInput) (*ListDataSetsOutput, error) { + req, out := c.ListDataSetsRequest(input) return out, req.Send() } -// UpdateDashboardWithContext is the same as UpdateDashboard with the addition of +// ListDataSetsWithContext is the same as ListDataSets with the addition of // the ability to pass a context and additional request options. // -// See UpdateDashboard for details on how to use this API operation. +// See ListDataSets for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDashboardWithContext(ctx aws.Context, input *UpdateDashboardInput, opts ...request.Option) (*UpdateDashboardOutput, error) { - req, out := c.UpdateDashboardRequest(input) +func (c *QuickSight) ListDataSetsWithContext(ctx aws.Context, input *ListDataSetsInput, opts ...request.Option) (*ListDataSetsOutput, error) { + req, out := c.ListDataSetsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDashboardPermissions = "UpdateDashboardPermissions" +// ListDataSetsPages iterates over the pages of a ListDataSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDataSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDataSets operation. +// pageNum := 0 +// err := client.ListDataSetsPages(params, +// func(page *quicksight.ListDataSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListDataSetsPages(input *ListDataSetsInput, fn func(*ListDataSetsOutput, bool) bool) error { + return c.ListDataSetsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateDashboardPermissionsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDashboardPermissions operation. The "output" return +// ListDataSetsPagesWithContext same as ListDataSetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListDataSetsPagesWithContext(ctx aws.Context, input *ListDataSetsInput, fn func(*ListDataSetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDataSetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDataSetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDataSetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListDataSources = "ListDataSources" + +// ListDataSourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListDataSources operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDashboardPermissions for more information on using the UpdateDashboardPermissions +// See ListDataSources for more information on using the ListDataSources // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDashboardPermissionsRequest method. -// req, resp := client.UpdateDashboardPermissionsRequest(params) +// // Example sending a request using the ListDataSourcesRequest method. +// req, resp := client.ListDataSourcesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPermissions -func (c *QuickSight) UpdateDashboardPermissionsRequest(input *UpdateDashboardPermissionsInput) (req *request.Request, output *UpdateDashboardPermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSources +func (c *QuickSight) ListDataSourcesRequest(input *ListDataSourcesInput) (req *request.Request, output *ListDataSourcesOutput) { op := &request.Operation{ - Name: opUpdateDashboardPermissions, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions", + Name: opListDataSources, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sources", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateDashboardPermissionsInput{} + input = &ListDataSourcesInput{} } - output = &UpdateDashboardPermissionsOutput{} + output = &ListDataSourcesOutput{} req = c.newRequest(op, input, output) return } -// UpdateDashboardPermissions API operation for Amazon QuickSight. +// ListDataSources API operation for Amazon QuickSight. // -// Updates read and write permissions on a dashboard. +// Lists data sources in current AWS Region that belong to this AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDashboardPermissions for usage and error information. +// API operation ListDataSources for usage and error information. // // Returned Error Types: -// * ThrottlingException -// Access is throttled. +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceNotFoundException -// One or more resources can't be found. -// -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * ThrottlingException +// Access is throttled. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. +// * InvalidNextTokenException +// The NextToken value isn't valid. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPermissions -func (c *QuickSight) UpdateDashboardPermissions(input *UpdateDashboardPermissionsInput) (*UpdateDashboardPermissionsOutput, error) { - req, out := c.UpdateDashboardPermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListDataSources +func (c *QuickSight) ListDataSources(input *ListDataSourcesInput) (*ListDataSourcesOutput, error) { + req, out := c.ListDataSourcesRequest(input) return out, req.Send() } -// UpdateDashboardPermissionsWithContext is the same as UpdateDashboardPermissions with the addition of +// ListDataSourcesWithContext is the same as ListDataSources with the addition of // the ability to pass a context and additional request options. // -// See UpdateDashboardPermissions for details on how to use this API operation. +// See ListDataSources for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDashboardPermissionsWithContext(ctx aws.Context, input *UpdateDashboardPermissionsInput, opts ...request.Option) (*UpdateDashboardPermissionsOutput, error) { - req, out := c.UpdateDashboardPermissionsRequest(input) +func (c *QuickSight) ListDataSourcesWithContext(ctx aws.Context, input *ListDataSourcesInput, opts ...request.Option) (*ListDataSourcesOutput, error) { + req, out := c.ListDataSourcesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDashboardPublishedVersion = "UpdateDashboardPublishedVersion" +// ListDataSourcesPages iterates over the pages of a ListDataSources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDataSources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDataSources operation. +// pageNum := 0 +// err := client.ListDataSourcesPages(params, +// func(page *quicksight.ListDataSourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListDataSourcesPages(input *ListDataSourcesInput, fn func(*ListDataSourcesOutput, bool) bool) error { + return c.ListDataSourcesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateDashboardPublishedVersionRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDashboardPublishedVersion operation. The "output" return +// ListDataSourcesPagesWithContext same as ListDataSourcesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListDataSourcesPagesWithContext(ctx aws.Context, input *ListDataSourcesInput, fn func(*ListDataSourcesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDataSourcesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDataSourcesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDataSourcesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGroupMemberships = "ListGroupMemberships" + +// ListGroupMembershipsRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupMemberships operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDashboardPublishedVersion for more information on using the UpdateDashboardPublishedVersion +// See ListGroupMemberships for more information on using the ListGroupMemberships // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDashboardPublishedVersionRequest method. -// req, resp := client.UpdateDashboardPublishedVersionRequest(params) +// // Example sending a request using the ListGroupMembershipsRequest method. +// req, resp := client.ListGroupMembershipsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPublishedVersion -func (c *QuickSight) UpdateDashboardPublishedVersionRequest(input *UpdateDashboardPublishedVersionInput) (req *request.Request, output *UpdateDashboardPublishedVersionOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroupMemberships +func (c *QuickSight) ListGroupMembershipsRequest(input *ListGroupMembershipsInput) (req *request.Request, output *ListGroupMembershipsOutput) { op := &request.Operation{ - Name: opUpdateDashboardPublishedVersion, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions/{VersionNumber}", + Name: opListGroupMemberships, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}/members", } if input == nil { - input = &UpdateDashboardPublishedVersionInput{} + input = &ListGroupMembershipsInput{} } - output = &UpdateDashboardPublishedVersionOutput{} + output = &ListGroupMembershipsOutput{} req = c.newRequest(op, input, output) return } -// UpdateDashboardPublishedVersion API operation for Amazon QuickSight. +// ListGroupMemberships API operation for Amazon QuickSight. // -// Updates the published version of a dashboard. +// Lists member users in a group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDashboardPublishedVersion for usage and error information. +// API operation ListGroupMemberships for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // +// * ResourceNotFoundException +// One or more resources can't be found. +// // * ThrottlingException // Access is throttled. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// -// * ResourceNotFoundException -// One or more resources can't be found. +// * InvalidNextTokenException +// The NextToken value isn't valid. // -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * PreconditionNotMetException +// One or more preconditions aren't met. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPublishedVersion -func (c *QuickSight) UpdateDashboardPublishedVersion(input *UpdateDashboardPublishedVersionInput) (*UpdateDashboardPublishedVersionOutput, error) { - req, out := c.UpdateDashboardPublishedVersionRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroupMemberships +func (c *QuickSight) ListGroupMemberships(input *ListGroupMembershipsInput) (*ListGroupMembershipsOutput, error) { + req, out := c.ListGroupMembershipsRequest(input) return out, req.Send() } -// UpdateDashboardPublishedVersionWithContext is the same as UpdateDashboardPublishedVersion with the addition of +// ListGroupMembershipsWithContext is the same as ListGroupMemberships with the addition of // the ability to pass a context and additional request options. // -// See UpdateDashboardPublishedVersion for details on how to use this API operation. +// See ListGroupMemberships for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDashboardPublishedVersionWithContext(ctx aws.Context, input *UpdateDashboardPublishedVersionInput, opts ...request.Option) (*UpdateDashboardPublishedVersionOutput, error) { - req, out := c.UpdateDashboardPublishedVersionRequest(input) +func (c *QuickSight) ListGroupMembershipsWithContext(ctx aws.Context, input *ListGroupMembershipsInput, opts ...request.Option) (*ListGroupMembershipsOutput, error) { + req, out := c.ListGroupMembershipsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDataSet = "UpdateDataSet" +const opListGroups = "ListGroups" -// UpdateDataSetRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataSet operation. The "output" return +// ListGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDataSet for more information on using the UpdateDataSet +// See ListGroups for more information on using the ListGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDataSetRequest method. -// req, resp := client.UpdateDataSetRequest(params) +// // Example sending a request using the ListGroupsRequest method. +// req, resp := client.ListGroupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSet -func (c *QuickSight) UpdateDataSetRequest(input *UpdateDataSetInput) (req *request.Request, output *UpdateDataSetOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroups +func (c *QuickSight) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { op := &request.Operation{ - Name: opUpdateDataSet, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + Name: opListGroups, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups", } if input == nil { - input = &UpdateDataSetInput{} + input = &ListGroupsInput{} } - output = &UpdateDataSetOutput{} + output = &ListGroupsOutput{} req = c.newRequest(op, input, output) return } -// UpdateDataSet API operation for Amazon QuickSight. +// ListGroups API operation for Amazon QuickSight. // -// Updates a dataset. +// Lists all user groups in Amazon QuickSight. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDataSet for usage and error information. +// API operation ListGroups for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -6223,106 +6433,101 @@ func (c *QuickSight) UpdateDataSetRequest(input *UpdateDataSetInput) (req *reque // your policies have the correct permissions, and that you are using the correct // access keys. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * LimitExceededException -// A limit is exceeded. +// * ResourceNotFoundException +// One or more resources can't be found. // // * ThrottlingException // Access is throttled. // -// * ResourceNotFoundException -// One or more resources can't be found. +// * InvalidNextTokenException +// The NextToken value isn't valid. // -// * UnsupportedUserEditionException -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. +// * PreconditionNotMetException +// One or more preconditions aren't met. // // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSet -func (c *QuickSight) UpdateDataSet(input *UpdateDataSetInput) (*UpdateDataSetOutput, error) { - req, out := c.UpdateDataSetRequest(input) +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListGroups +func (c *QuickSight) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) return out, req.Send() } -// UpdateDataSetWithContext is the same as UpdateDataSet with the addition of +// ListGroupsWithContext is the same as ListGroups with the addition of // the ability to pass a context and additional request options. // -// See UpdateDataSet for details on how to use this API operation. +// See ListGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDataSetWithContext(ctx aws.Context, input *UpdateDataSetInput, opts ...request.Option) (*UpdateDataSetOutput, error) { - req, out := c.UpdateDataSetRequest(input) +func (c *QuickSight) ListGroupsWithContext(ctx aws.Context, input *ListGroupsInput, opts ...request.Option) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDataSetPermissions = "UpdateDataSetPermissions" +const opListIAMPolicyAssignments = "ListIAMPolicyAssignments" -// UpdateDataSetPermissionsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataSetPermissions operation. The "output" return +// ListIAMPolicyAssignmentsRequest generates a "aws/request.Request" representing the +// client's request for the ListIAMPolicyAssignments operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDataSetPermissions for more information on using the UpdateDataSetPermissions +// See ListIAMPolicyAssignments for more information on using the ListIAMPolicyAssignments // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDataSetPermissionsRequest method. -// req, resp := client.UpdateDataSetPermissionsRequest(params) +// // Example sending a request using the ListIAMPolicyAssignmentsRequest method. +// req, resp := client.ListIAMPolicyAssignmentsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSetPermissions -func (c *QuickSight) UpdateDataSetPermissionsRequest(input *UpdateDataSetPermissionsInput) (req *request.Request, output *UpdateDataSetPermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignments +func (c *QuickSight) ListIAMPolicyAssignmentsRequest(input *ListIAMPolicyAssignmentsInput) (req *request.Request, output *ListIAMPolicyAssignmentsOutput) { op := &request.Operation{ - Name: opUpdateDataSetPermissions, - HTTPMethod: "POST", - HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions", + Name: opListIAMPolicyAssignments, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments", } if input == nil { - input = &UpdateDataSetPermissionsInput{} + input = &ListIAMPolicyAssignmentsInput{} } - output = &UpdateDataSetPermissionsOutput{} + output = &ListIAMPolicyAssignmentsOutput{} req = c.newRequest(op, input, output) return } -// UpdateDataSetPermissions API operation for Amazon QuickSight. -// -// Updates the permissions on a dataset. +// ListIAMPolicyAssignments API operation for Amazon QuickSight. // -// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id. +// Lists IAM policy assignments in the current Amazon QuickSight account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDataSetPermissions for usage and error information. +// API operation ListIAMPolicyAssignments for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -6332,9 +6537,6 @@ func (c *QuickSight) UpdateDataSetPermissionsRequest(input *UpdateDataSetPermiss // your policies have the correct permissions, and that you are using the correct // access keys. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // @@ -6344,83 +6546,88 @@ func (c *QuickSight) UpdateDataSetPermissionsRequest(input *UpdateDataSetPermiss // * ThrottlingException // Access is throttled. // +// * InvalidNextTokenException +// The NextToken value isn't valid. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSetPermissions -func (c *QuickSight) UpdateDataSetPermissions(input *UpdateDataSetPermissionsInput) (*UpdateDataSetPermissionsOutput, error) { - req, out := c.UpdateDataSetPermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignments +func (c *QuickSight) ListIAMPolicyAssignments(input *ListIAMPolicyAssignmentsInput) (*ListIAMPolicyAssignmentsOutput, error) { + req, out := c.ListIAMPolicyAssignmentsRequest(input) return out, req.Send() } -// UpdateDataSetPermissionsWithContext is the same as UpdateDataSetPermissions with the addition of +// ListIAMPolicyAssignmentsWithContext is the same as ListIAMPolicyAssignments with the addition of // the ability to pass a context and additional request options. // -// See UpdateDataSetPermissions for details on how to use this API operation. +// See ListIAMPolicyAssignments for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDataSetPermissionsWithContext(ctx aws.Context, input *UpdateDataSetPermissionsInput, opts ...request.Option) (*UpdateDataSetPermissionsOutput, error) { - req, out := c.UpdateDataSetPermissionsRequest(input) +func (c *QuickSight) ListIAMPolicyAssignmentsWithContext(ctx aws.Context, input *ListIAMPolicyAssignmentsInput, opts ...request.Option) (*ListIAMPolicyAssignmentsOutput, error) { + req, out := c.ListIAMPolicyAssignmentsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDataSource = "UpdateDataSource" +const opListIAMPolicyAssignmentsForUser = "ListIAMPolicyAssignmentsForUser" -// UpdateDataSourceRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataSource operation. The "output" return +// ListIAMPolicyAssignmentsForUserRequest generates a "aws/request.Request" representing the +// client's request for the ListIAMPolicyAssignmentsForUser operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDataSource for more information on using the UpdateDataSource +// See ListIAMPolicyAssignmentsForUser for more information on using the ListIAMPolicyAssignmentsForUser // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDataSourceRequest method. -// req, resp := client.UpdateDataSourceRequest(params) +// // Example sending a request using the ListIAMPolicyAssignmentsForUserRequest method. +// req, resp := client.ListIAMPolicyAssignmentsForUserRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSource -func (c *QuickSight) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req *request.Request, output *UpdateDataSourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignmentsForUser +func (c *QuickSight) ListIAMPolicyAssignmentsForUserRequest(input *ListIAMPolicyAssignmentsForUserInput) (req *request.Request, output *ListIAMPolicyAssignmentsForUserOutput) { op := &request.Operation{ - Name: opUpdateDataSource, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + Name: opListIAMPolicyAssignmentsForUser, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/iam-policy-assignments", } if input == nil { - input = &UpdateDataSourceInput{} + input = &ListIAMPolicyAssignmentsForUserInput{} } - output = &UpdateDataSourceOutput{} + output = &ListIAMPolicyAssignmentsForUserOutput{} req = c.newRequest(op, input, output) return } -// UpdateDataSource API operation for Amazon QuickSight. +// ListIAMPolicyAssignmentsForUser API operation for Amazon QuickSight. // -// Updates a data source. +// Lists all the IAM policy assignments, including the Amazon Resource Names +// (ARNs) for the IAM policies assigned to the specified user and group or groups +// that the user belongs to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDataSource for usage and error information. +// API operation ListIAMPolicyAssignmentsForUser for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -6430,95 +6637,105 @@ func (c *QuickSight) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req // your policies have the correct permissions, and that you are using the correct // access keys. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ThrottlingException -// Access is throttled. +// * ResourceExistsException +// The resource specified already exists. // // * ResourceNotFoundException // One or more resources can't be found. // +// * ThrottlingException +// Access is throttled. +// +// * ConcurrentUpdatingException +// A resource is already in a state that indicates an operation is happening +// that must complete before a new update can be applied. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSource -func (c *QuickSight) UpdateDataSource(input *UpdateDataSourceInput) (*UpdateDataSourceOutput, error) { - req, out := c.UpdateDataSourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIAMPolicyAssignmentsForUser +func (c *QuickSight) ListIAMPolicyAssignmentsForUser(input *ListIAMPolicyAssignmentsForUserInput) (*ListIAMPolicyAssignmentsForUserOutput, error) { + req, out := c.ListIAMPolicyAssignmentsForUserRequest(input) return out, req.Send() } -// UpdateDataSourceWithContext is the same as UpdateDataSource with the addition of +// ListIAMPolicyAssignmentsForUserWithContext is the same as ListIAMPolicyAssignmentsForUser with the addition of // the ability to pass a context and additional request options. // -// See UpdateDataSource for details on how to use this API operation. +// See ListIAMPolicyAssignmentsForUser for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDataSourceWithContext(ctx aws.Context, input *UpdateDataSourceInput, opts ...request.Option) (*UpdateDataSourceOutput, error) { - req, out := c.UpdateDataSourceRequest(input) +func (c *QuickSight) ListIAMPolicyAssignmentsForUserWithContext(ctx aws.Context, input *ListIAMPolicyAssignmentsForUserInput, opts ...request.Option) (*ListIAMPolicyAssignmentsForUserOutput, error) { + req, out := c.ListIAMPolicyAssignmentsForUserRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateDataSourcePermissions = "UpdateDataSourcePermissions" +const opListIngestions = "ListIngestions" -// UpdateDataSourcePermissionsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateDataSourcePermissions operation. The "output" return +// ListIngestionsRequest generates a "aws/request.Request" representing the +// client's request for the ListIngestions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateDataSourcePermissions for more information on using the UpdateDataSourcePermissions +// See ListIngestions for more information on using the ListIngestions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateDataSourcePermissionsRequest method. -// req, resp := client.UpdateDataSourcePermissionsRequest(params) +// // Example sending a request using the ListIngestionsRequest method. +// req, resp := client.ListIngestionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSourcePermissions -func (c *QuickSight) UpdateDataSourcePermissionsRequest(input *UpdateDataSourcePermissionsInput) (req *request.Request, output *UpdateDataSourcePermissionsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIngestions +func (c *QuickSight) ListIngestionsRequest(input *ListIngestionsInput) (req *request.Request, output *ListIngestionsOutput) { op := &request.Operation{ - Name: opUpdateDataSourcePermissions, - HTTPMethod: "POST", - HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + Name: opListIngestions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/ingestions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateDataSourcePermissionsInput{} + input = &ListIngestionsInput{} } - output = &UpdateDataSourcePermissionsOutput{} + output = &ListIngestionsOutput{} req = c.newRequest(op, input, output) return } -// UpdateDataSourcePermissions API operation for Amazon QuickSight. +// ListIngestions API operation for Amazon QuickSight. // -// Updates the permissions to a data source. +// Lists the history of SPICE ingestions for a dataset. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateDataSourcePermissions for usage and error information. +// API operation ListIngestions for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -6528,9 +6745,6 @@ func (c *QuickSight) UpdateDataSourcePermissionsRequest(input *UpdateDataSourceP // your policies have the correct permissions, and that you are using the correct // access keys. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // @@ -6540,83 +6754,147 @@ func (c *QuickSight) UpdateDataSourcePermissionsRequest(input *UpdateDataSourceP // * ThrottlingException // Access is throttled. // +// * ResourceExistsException +// The resource specified already exists. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. +// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSourcePermissions -func (c *QuickSight) UpdateDataSourcePermissions(input *UpdateDataSourcePermissionsInput) (*UpdateDataSourcePermissionsOutput, error) { - req, out := c.UpdateDataSourcePermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListIngestions +func (c *QuickSight) ListIngestions(input *ListIngestionsInput) (*ListIngestionsOutput, error) { + req, out := c.ListIngestionsRequest(input) return out, req.Send() } -// UpdateDataSourcePermissionsWithContext is the same as UpdateDataSourcePermissions with the addition of +// ListIngestionsWithContext is the same as ListIngestions with the addition of // the ability to pass a context and additional request options. // -// See UpdateDataSourcePermissions for details on how to use this API operation. +// See ListIngestions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateDataSourcePermissionsWithContext(ctx aws.Context, input *UpdateDataSourcePermissionsInput, opts ...request.Option) (*UpdateDataSourcePermissionsOutput, error) { - req, out := c.UpdateDataSourcePermissionsRequest(input) +func (c *QuickSight) ListIngestionsWithContext(ctx aws.Context, input *ListIngestionsInput, opts ...request.Option) (*ListIngestionsOutput, error) { + req, out := c.ListIngestionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateGroup = "UpdateGroup" +// ListIngestionsPages iterates over the pages of a ListIngestions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListIngestions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListIngestions operation. +// pageNum := 0 +// err := client.ListIngestionsPages(params, +// func(page *quicksight.ListIngestionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListIngestionsPages(input *ListIngestionsInput, fn func(*ListIngestionsOutput, bool) bool) error { + return c.ListIngestionsPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateGroupRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGroup operation. The "output" return +// ListIngestionsPagesWithContext same as ListIngestionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListIngestionsPagesWithContext(ctx aws.Context, input *ListIngestionsInput, fn func(*ListIngestionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListIngestionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListIngestionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListIngestionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListNamespaces = "ListNamespaces" + +// ListNamespacesRequest generates a "aws/request.Request" representing the +// client's request for the ListNamespaces operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateGroup for more information on using the UpdateGroup +// See ListNamespaces for more information on using the ListNamespaces // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateGroupRequest method. -// req, resp := client.UpdateGroupRequest(params) +// // Example sending a request using the ListNamespacesRequest method. +// req, resp := client.ListNamespacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateGroup -func (c *QuickSight) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListNamespaces +func (c *QuickSight) ListNamespacesRequest(input *ListNamespacesInput) (req *request.Request, output *ListNamespacesOutput) { op := &request.Operation{ - Name: opUpdateGroup, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}", + Name: opListNamespaces, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateGroupInput{} + input = &ListNamespacesInput{} } - output = &UpdateGroupOutput{} + output = &ListNamespacesOutput{} req = c.newRequest(op, input, output) return } -// UpdateGroup API operation for Amazon QuickSight. +// ListNamespaces API operation for Amazon QuickSight. // -// Changes a group description. +// Lists the namespaces for the specified AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateGroup for usage and error information. +// API operation ListNamespaces for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -6635,6 +6913,9 @@ func (c *QuickSight) UpdateGroupRequest(input *UpdateGroupInput) (req *request.R // * ThrottlingException // Access is throttled. // +// * InvalidNextTokenException +// The NextToken value isn't valid. +// // * PreconditionNotMetException // One or more preconditions aren't met. // @@ -6644,81 +6925,132 @@ func (c *QuickSight) UpdateGroupRequest(input *UpdateGroupInput) (req *request.R // * ResourceUnavailableException // This resource is currently unavailable. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateGroup -func (c *QuickSight) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { - req, out := c.UpdateGroupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListNamespaces +func (c *QuickSight) ListNamespaces(input *ListNamespacesInput) (*ListNamespacesOutput, error) { + req, out := c.ListNamespacesRequest(input) return out, req.Send() } -// UpdateGroupWithContext is the same as UpdateGroup with the addition of +// ListNamespacesWithContext is the same as ListNamespaces with the addition of // the ability to pass a context and additional request options. // -// See UpdateGroup for details on how to use this API operation. +// See ListNamespaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateGroupWithContext(ctx aws.Context, input *UpdateGroupInput, opts ...request.Option) (*UpdateGroupOutput, error) { - req, out := c.UpdateGroupRequest(input) +func (c *QuickSight) ListNamespacesWithContext(ctx aws.Context, input *ListNamespacesInput, opts ...request.Option) (*ListNamespacesOutput, error) { + req, out := c.ListNamespacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateIAMPolicyAssignment = "UpdateIAMPolicyAssignment" +// ListNamespacesPages iterates over the pages of a ListNamespaces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListNamespaces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListNamespaces operation. +// pageNum := 0 +// err := client.ListNamespacesPages(params, +// func(page *quicksight.ListNamespacesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListNamespacesPages(input *ListNamespacesInput, fn func(*ListNamespacesOutput, bool) bool) error { + return c.ListNamespacesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateIAMPolicyAssignmentRequest generates a "aws/request.Request" representing the -// client's request for the UpdateIAMPolicyAssignment operation. The "output" return +// ListNamespacesPagesWithContext same as ListNamespacesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListNamespacesPagesWithContext(ctx aws.Context, input *ListNamespacesInput, fn func(*ListNamespacesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListNamespacesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListNamespacesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListNamespacesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateIAMPolicyAssignment for more information on using the UpdateIAMPolicyAssignment +// See ListTagsForResource for more information on using the ListTagsForResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateIAMPolicyAssignmentRequest method. -// req, resp := client.UpdateIAMPolicyAssignmentRequest(params) +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateIAMPolicyAssignment -func (c *QuickSight) UpdateIAMPolicyAssignmentRequest(input *UpdateIAMPolicyAssignmentInput) (req *request.Request, output *UpdateIAMPolicyAssignmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTagsForResource +func (c *QuickSight) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ - Name: opUpdateIAMPolicyAssignment, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}", + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/resources/{ResourceArn}/tags", } if input == nil { - input = &UpdateIAMPolicyAssignmentInput{} + input = &ListTagsForResourceInput{} } - output = &UpdateIAMPolicyAssignmentOutput{} + output = &ListTagsForResourceOutput{} req = c.newRequest(op, input, output) return } -// UpdateIAMPolicyAssignment API operation for Amazon QuickSight. +// ListTagsForResource API operation for Amazon QuickSight. // -// Updates an existing IAM policy assignment. This operation updates only the -// optional parameter or parameters that are specified in the request. +// Lists the tags assigned to a resource. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateIAMPolicyAssignment for usage and error information. +// API operation ListTagsForResource for usage and error information. // // Returned Error Types: // * AccessDeniedException @@ -6731,116 +7063,105 @@ func (c *QuickSight) UpdateIAMPolicyAssignmentRequest(input *UpdateIAMPolicyAssi // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ResourceExistsException -// The resource specified already exists. -// // * ResourceNotFoundException // One or more resources can't be found. // // * ThrottlingException // Access is throttled. // -// * ConcurrentUpdatingException -// A resource is already in a state that indicates an action is happening that -// must complete before a new update can be applied. -// // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateIAMPolicyAssignment -func (c *QuickSight) UpdateIAMPolicyAssignment(input *UpdateIAMPolicyAssignmentInput) (*UpdateIAMPolicyAssignmentOutput, error) { - req, out := c.UpdateIAMPolicyAssignmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTagsForResource +func (c *QuickSight) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) return out, req.Send() } -// UpdateIAMPolicyAssignmentWithContext is the same as UpdateIAMPolicyAssignment with the addition of +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateIAMPolicyAssignment for details on how to use this API operation. +// See ListTagsForResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateIAMPolicyAssignmentWithContext(ctx aws.Context, input *UpdateIAMPolicyAssignmentInput, opts ...request.Option) (*UpdateIAMPolicyAssignmentOutput, error) { - req, out := c.UpdateIAMPolicyAssignmentRequest(input) +func (c *QuickSight) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTemplate = "UpdateTemplate" +const opListTemplateAliases = "ListTemplateAliases" -// UpdateTemplateRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTemplate operation. The "output" return +// ListTemplateAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListTemplateAliases operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTemplate for more information on using the UpdateTemplate +// See ListTemplateAliases for more information on using the ListTemplateAliases // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTemplateRequest method. -// req, resp := client.UpdateTemplateRequest(params) +// // Example sending a request using the ListTemplateAliasesRequest method. +// req, resp := client.ListTemplateAliasesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplate -func (c *QuickSight) UpdateTemplateRequest(input *UpdateTemplateInput) (req *request.Request, output *UpdateTemplateOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateAliases +func (c *QuickSight) ListTemplateAliasesRequest(input *ListTemplateAliasesInput) (req *request.Request, output *ListTemplateAliasesOutput) { op := &request.Operation{ - Name: opUpdateTemplate, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + Name: opListTemplateAliases, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateTemplateInput{} + input = &ListTemplateAliasesInput{} } - output = &UpdateTemplateOutput{} + output = &ListTemplateAliasesOutput{} req = c.newRequest(op, input, output) return } -// UpdateTemplate API operation for Amazon QuickSight. +// ListTemplateAliases API operation for Amazon QuickSight. // -// Updates a template from an existing Amazon QuickSight analysis or another -// template. +// Lists all the aliases of a template. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateTemplate for usage and error information. +// API operation ListTemplateAliases for usage and error information. // // Returned Error Types: -// * InvalidParameterValueException -// One or more parameters has a value that isn't valid. -// -// * ResourceExistsException -// The resource specified already exists. -// -// * ResourceNotFoundException -// One or more resources can't be found. +// * InvalidNextTokenException +// The NextToken value isn't valid. // // * ThrottlingException // Access is throttled. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// -// * LimitExceededException -// A limit is exceeded. +// * ResourceNotFoundException +// One or more resources can't be found. // // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight @@ -6851,90 +7172,152 @@ func (c *QuickSight) UpdateTemplateRequest(input *UpdateTemplateInput) (req *req // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplate -func (c *QuickSight) UpdateTemplate(input *UpdateTemplateInput) (*UpdateTemplateOutput, error) { - req, out := c.UpdateTemplateRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateAliases +func (c *QuickSight) ListTemplateAliases(input *ListTemplateAliasesInput) (*ListTemplateAliasesOutput, error) { + req, out := c.ListTemplateAliasesRequest(input) return out, req.Send() } -// UpdateTemplateWithContext is the same as UpdateTemplate with the addition of +// ListTemplateAliasesWithContext is the same as ListTemplateAliases with the addition of // the ability to pass a context and additional request options. // -// See UpdateTemplate for details on how to use this API operation. +// See ListTemplateAliases for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateTemplateWithContext(ctx aws.Context, input *UpdateTemplateInput, opts ...request.Option) (*UpdateTemplateOutput, error) { - req, out := c.UpdateTemplateRequest(input) +func (c *QuickSight) ListTemplateAliasesWithContext(ctx aws.Context, input *ListTemplateAliasesInput, opts ...request.Option) (*ListTemplateAliasesOutput, error) { + req, out := c.ListTemplateAliasesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTemplateAlias = "UpdateTemplateAlias" +// ListTemplateAliasesPages iterates over the pages of a ListTemplateAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTemplateAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTemplateAliases operation. +// pageNum := 0 +// err := client.ListTemplateAliasesPages(params, +// func(page *quicksight.ListTemplateAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListTemplateAliasesPages(input *ListTemplateAliasesInput, fn func(*ListTemplateAliasesOutput, bool) bool) error { + return c.ListTemplateAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateTemplateAliasRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTemplateAlias operation. The "output" return +// ListTemplateAliasesPagesWithContext same as ListTemplateAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListTemplateAliasesPagesWithContext(ctx aws.Context, input *ListTemplateAliasesInput, fn func(*ListTemplateAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTemplateAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTemplateAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTemplateAliasesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTemplateVersions = "ListTemplateVersions" + +// ListTemplateVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTemplateVersions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTemplateAlias for more information on using the UpdateTemplateAlias +// See ListTemplateVersions for more information on using the ListTemplateVersions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTemplateAliasRequest method. -// req, resp := client.UpdateTemplateAliasRequest(params) +// // Example sending a request using the ListTemplateVersionsRequest method. +// req, resp := client.ListTemplateVersionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplateAlias -func (c *QuickSight) UpdateTemplateAliasRequest(input *UpdateTemplateAliasInput) (req *request.Request, output *UpdateTemplateAliasOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateVersions +func (c *QuickSight) ListTemplateVersionsRequest(input *ListTemplateVersionsInput) (req *request.Request, output *ListTemplateVersionsOutput) { op := &request.Operation{ - Name: opUpdateTemplateAlias, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + Name: opListTemplateVersions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/versions", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateTemplateAliasInput{} + input = &ListTemplateVersionsInput{} } - output = &UpdateTemplateAliasOutput{} + output = &ListTemplateVersionsOutput{} req = c.newRequest(op, input, output) return } -// UpdateTemplateAlias API operation for Amazon QuickSight. +// ListTemplateVersions API operation for Amazon QuickSight. // -// Updates the template alias of a template. +// Lists all the versions of the templates in the current Amazon QuickSight +// account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateTemplateAlias for usage and error information. +// API operation ListTemplateVersions for usage and error information. // // Returned Error Types: // * ThrottlingException // Access is throttled. // +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// // * ResourceNotFoundException // One or more resources can't be found. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. +// * InvalidNextTokenException +// The NextToken value isn't valid. // // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight @@ -6945,80 +7328,138 @@ func (c *QuickSight) UpdateTemplateAliasRequest(input *UpdateTemplateAliasInput) // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplateAlias -func (c *QuickSight) UpdateTemplateAlias(input *UpdateTemplateAliasInput) (*UpdateTemplateAliasOutput, error) { - req, out := c.UpdateTemplateAliasRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplateVersions +func (c *QuickSight) ListTemplateVersions(input *ListTemplateVersionsInput) (*ListTemplateVersionsOutput, error) { + req, out := c.ListTemplateVersionsRequest(input) return out, req.Send() } -// UpdateTemplateAliasWithContext is the same as UpdateTemplateAlias with the addition of +// ListTemplateVersionsWithContext is the same as ListTemplateVersions with the addition of // the ability to pass a context and additional request options. // -// See UpdateTemplateAlias for details on how to use this API operation. +// See ListTemplateVersions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateTemplateAliasWithContext(ctx aws.Context, input *UpdateTemplateAliasInput, opts ...request.Option) (*UpdateTemplateAliasOutput, error) { - req, out := c.UpdateTemplateAliasRequest(input) +func (c *QuickSight) ListTemplateVersionsWithContext(ctx aws.Context, input *ListTemplateVersionsInput, opts ...request.Option) (*ListTemplateVersionsOutput, error) { + req, out := c.ListTemplateVersionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTemplatePermissions = "UpdateTemplatePermissions" - -// UpdateTemplatePermissionsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTemplatePermissions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See UpdateTemplatePermissions for more information on using the UpdateTemplatePermissions -// API call, and error handling. +// ListTemplateVersionsPages iterates over the pages of a ListTemplateVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// See ListTemplateVersions method for more information on how to use this operation. // +// Note: This operation can generate multiple requests to a service. // -// // Example sending a request using the UpdateTemplatePermissionsRequest method. -// req, resp := client.UpdateTemplatePermissionsRequest(params) +// // Example iterating over at most 3 pages of a ListTemplateVersions operation. +// pageNum := 0 +// err := client.ListTemplateVersionsPages(params, +// func(page *quicksight.ListTemplateVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +func (c *QuickSight) ListTemplateVersionsPages(input *ListTemplateVersionsInput, fn func(*ListTemplateVersionsOutput, bool) bool) error { + return c.ListTemplateVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTemplateVersionsPagesWithContext same as ListTemplateVersionsPages except +// it takes a Context and allows setting request options on the pages. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplatePermissions -func (c *QuickSight) UpdateTemplatePermissionsRequest(input *UpdateTemplatePermissionsInput) (req *request.Request, output *UpdateTemplatePermissionsOutput) { +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListTemplateVersionsPagesWithContext(ctx aws.Context, input *ListTemplateVersionsInput, fn func(*ListTemplateVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTemplateVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTemplateVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTemplateVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListTemplates = "ListTemplates" + +// ListTemplatesRequest generates a "aws/request.Request" representing the +// client's request for the ListTemplates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTemplates for more information on using the ListTemplates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTemplatesRequest method. +// req, resp := client.ListTemplatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplates +func (c *QuickSight) ListTemplatesRequest(input *ListTemplatesInput) (req *request.Request, output *ListTemplatesOutput) { op := &request.Operation{ - Name: opUpdateTemplatePermissions, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/permissions", + Name: opListTemplates, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/templates", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &UpdateTemplatePermissionsInput{} + input = &ListTemplatesInput{} } - output = &UpdateTemplatePermissionsOutput{} + output = &ListTemplatesOutput{} req = c.newRequest(op, input, output) return } -// UpdateTemplatePermissions API operation for Amazon QuickSight. +// ListTemplates API operation for Amazon QuickSight. // -// Updates the resource permissions for a template. +// Lists all the templates in the current Amazon QuickSight account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateTemplatePermissions for usage and error information. +// API operation ListTemplates for usage and error information. // // Returned Error Types: // * ThrottlingException @@ -7027,12 +7468,12 @@ func (c *QuickSight) UpdateTemplatePermissionsRequest(input *UpdateTemplatePermi // * InvalidParameterValueException // One or more parameters has a value that isn't valid. // -// * ConflictException -// Updating or deleting a resource can cause an inconsistent state. -// // * ResourceNotFoundException // One or more resources can't be found. // +// * InvalidNextTokenException +// The NextToken value isn't valid. +// // * UnsupportedUserEditionException // This error indicates that you are calling an operation on an Amazon QuickSight // subscription where the edition doesn't include support for that operation. @@ -7042,88 +7483,139 @@ func (c *QuickSight) UpdateTemplatePermissionsRequest(input *UpdateTemplatePermi // * InternalFailureException // An internal failure occurred. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplatePermissions -func (c *QuickSight) UpdateTemplatePermissions(input *UpdateTemplatePermissionsInput) (*UpdateTemplatePermissionsOutput, error) { - req, out := c.UpdateTemplatePermissionsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListTemplates +func (c *QuickSight) ListTemplates(input *ListTemplatesInput) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) return out, req.Send() } -// UpdateTemplatePermissionsWithContext is the same as UpdateTemplatePermissions with the addition of +// ListTemplatesWithContext is the same as ListTemplates with the addition of // the ability to pass a context and additional request options. // -// See UpdateTemplatePermissions for details on how to use this API operation. +// See ListTemplates for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateTemplatePermissionsWithContext(ctx aws.Context, input *UpdateTemplatePermissionsInput, opts ...request.Option) (*UpdateTemplatePermissionsOutput, error) { - req, out := c.UpdateTemplatePermissionsRequest(input) +func (c *QuickSight) ListTemplatesWithContext(ctx aws.Context, input *ListTemplatesInput, opts ...request.Option) (*ListTemplatesOutput, error) { + req, out := c.ListTemplatesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateUser = "UpdateUser" +// ListTemplatesPages iterates over the pages of a ListTemplates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTemplates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTemplates operation. +// pageNum := 0 +// err := client.ListTemplatesPages(params, +// func(page *quicksight.ListTemplatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) ListTemplatesPages(input *ListTemplatesInput, fn func(*ListTemplatesOutput, bool) bool) error { + return c.ListTemplatesPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UpdateUserRequest generates a "aws/request.Request" representing the -// client's request for the UpdateUser operation. The "output" return +// ListTemplatesPagesWithContext same as ListTemplatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListTemplatesPagesWithContext(ctx aws.Context, input *ListTemplatesInput, fn func(*ListTemplatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTemplatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTemplatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTemplatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListThemeAliases = "ListThemeAliases" + +// ListThemeAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListThemeAliases operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateUser for more information on using the UpdateUser +// See ListThemeAliases for more information on using the ListThemeAliases // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateUserRequest method. -// req, resp := client.UpdateUserRequest(params) +// // Example sending a request using the ListThemeAliasesRequest method. +// req, resp := client.ListThemeAliasesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateUser -func (c *QuickSight) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListThemeAliases +func (c *QuickSight) ListThemeAliasesRequest(input *ListThemeAliasesInput) (req *request.Request, output *ListThemeAliasesOutput) { op := &request.Operation{ - Name: opUpdateUser, - HTTPMethod: "PUT", - HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}", + Name: opListThemeAliases, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/aliases", } if input == nil { - input = &UpdateUserInput{} + input = &ListThemeAliasesInput{} } - output = &UpdateUserOutput{} + output = &ListThemeAliasesOutput{} req = c.newRequest(op, input, output) return } -// UpdateUser API operation for Amazon QuickSight. +// ListThemeAliases API operation for Amazon QuickSight. // -// Updates an Amazon QuickSight user. +// Lists all the aliases of a theme. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon QuickSight's -// API operation UpdateUser for usage and error information. +// API operation ListThemeAliases for usage and error information. // // Returned Error Types: -// * AccessDeniedException -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. // // * InvalidParameterValueException // One or more parameters has a value that isn't valid. @@ -7134,182 +7626,9123 @@ func (c *QuickSight) UpdateUserRequest(input *UpdateUserInput) (req *request.Req // * ThrottlingException // Access is throttled. // +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// // * InternalFailureException // An internal failure occurred. // -// * ResourceUnavailableException -// This resource is currently unavailable. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateUser -func (c *QuickSight) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { - req, out := c.UpdateUserRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListThemeAliases +func (c *QuickSight) ListThemeAliases(input *ListThemeAliasesInput) (*ListThemeAliasesOutput, error) { + req, out := c.ListThemeAliasesRequest(input) return out, req.Send() } -// UpdateUserWithContext is the same as UpdateUser with the addition of +// ListThemeAliasesWithContext is the same as ListThemeAliases with the addition of // the ability to pass a context and additional request options. // -// See UpdateUser for details on how to use this API operation. +// See ListThemeAliases for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *QuickSight) UpdateUserWithContext(ctx aws.Context, input *UpdateUserInput, opts ...request.Option) (*UpdateUserOutput, error) { - req, out := c.UpdateUserRequest(input) +func (c *QuickSight) ListThemeAliasesWithContext(ctx aws.Context, input *ListThemeAliasesInput, opts ...request.Option) (*ListThemeAliasesOutput, error) { + req, out := c.ListThemeAliasesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// You don't have access to this item. The provided credentials couldn't be -// validated. You might not be authorized to carry out the request. Make sure -// that your account is authorized to use the Amazon QuickSight service, that -// your policies have the correct permissions, and that you are using the correct -// access keys. -type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"Message" type:"string"` - - // The AWS request ID for this request. - RequestId *string `type:"string"` -} - -// String returns the string representation -func (s AccessDeniedException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccessDeniedException) GoString() string { - return s.String() -} +const opListThemeVersions = "ListThemeVersions" -func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { - return &AccessDeniedException{ - respMetadata: v, +// ListThemeVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListThemeVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListThemeVersions for more information on using the ListThemeVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListThemeVersionsRequest method. +// req, resp := client.ListThemeVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListThemeVersions +func (c *QuickSight) ListThemeVersionsRequest(input *ListThemeVersionsInput) (req *request.Request, output *ListThemeVersionsOutput) { + op := &request.Operation{ + Name: opListThemeVersions, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/versions", } -} - -// Code returns the exception type name. -func (s AccessDeniedException) Code() string { - return "AccessDeniedException" -} -// Message returns the exception's message. -func (s AccessDeniedException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if input == nil { + input = &ListThemeVersionsInput{} } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { - return nil -} -func (s AccessDeniedException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) + output = &ListThemeVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListThemeVersions API operation for Amazon QuickSight. +// +// Lists all the versions of the themes in the current AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation ListThemeVersions for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListThemeVersions +func (c *QuickSight) ListThemeVersions(input *ListThemeVersionsInput) (*ListThemeVersionsOutput, error) { + req, out := c.ListThemeVersionsRequest(input) + return out, req.Send() +} + +// ListThemeVersionsWithContext is the same as ListThemeVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListThemeVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListThemeVersionsWithContext(ctx aws.Context, input *ListThemeVersionsInput, opts ...request.Option) (*ListThemeVersionsOutput, error) { + req, out := c.ListThemeVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListThemes = "ListThemes" + +// ListThemesRequest generates a "aws/request.Request" representing the +// client's request for the ListThemes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListThemes for more information on using the ListThemes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListThemesRequest method. +// req, resp := client.ListThemesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListThemes +func (c *QuickSight) ListThemesRequest(input *ListThemesInput) (req *request.Request, output *ListThemesOutput) { + op := &request.Operation{ + Name: opListThemes, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/themes", + } + + if input == nil { + input = &ListThemesInput{} + } + + output = &ListThemesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListThemes API operation for Amazon QuickSight. +// +// Lists all the themes in the current AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation ListThemes for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListThemes +func (c *QuickSight) ListThemes(input *ListThemesInput) (*ListThemesOutput, error) { + req, out := c.ListThemesRequest(input) + return out, req.Send() +} + +// ListThemesWithContext is the same as ListThemes with the addition of +// the ability to pass a context and additional request options. +// +// See ListThemes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListThemesWithContext(ctx aws.Context, input *ListThemesInput, opts ...request.Option) (*ListThemesOutput, error) { + req, out := c.ListThemesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListUserGroups = "ListUserGroups" + +// ListUserGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListUserGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListUserGroups for more information on using the ListUserGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListUserGroupsRequest method. +// req, resp := client.ListUserGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUserGroups +func (c *QuickSight) ListUserGroupsRequest(input *ListUserGroupsInput) (req *request.Request, output *ListUserGroupsOutput) { + op := &request.Operation{ + Name: opListUserGroups, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}/groups", + } + + if input == nil { + input = &ListUserGroupsInput{} + } + + output = &ListUserGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListUserGroups API operation for Amazon QuickSight. +// +// Lists the Amazon QuickSight groups that an Amazon QuickSight user is a member +// of. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation ListUserGroups for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUserGroups +func (c *QuickSight) ListUserGroups(input *ListUserGroupsInput) (*ListUserGroupsOutput, error) { + req, out := c.ListUserGroupsRequest(input) + return out, req.Send() +} + +// ListUserGroupsWithContext is the same as ListUserGroups with the addition of +// the ability to pass a context and additional request options. +// +// See ListUserGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListUserGroupsWithContext(ctx aws.Context, input *ListUserGroupsInput, opts ...request.Option) (*ListUserGroupsOutput, error) { + req, out := c.ListUserGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListUsers = "ListUsers" + +// ListUsersRequest generates a "aws/request.Request" representing the +// client's request for the ListUsers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListUsers for more information on using the ListUsers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListUsersRequest method. +// req, resp := client.ListUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUsers +func (c *QuickSight) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { + op := &request.Operation{ + Name: opListUsers, + HTTPMethod: "GET", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users", + } + + if input == nil { + input = &ListUsersInput{} + } + + output = &ListUsersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListUsers API operation for Amazon QuickSight. +// +// Returns a list of all of the Amazon QuickSight users belonging to this account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation ListUsers for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/ListUsers +func (c *QuickSight) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + return out, req.Send() +} + +// ListUsersWithContext is the same as ListUsers with the addition of +// the ability to pass a context and additional request options. +// +// See ListUsers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) ListUsersWithContext(ctx aws.Context, input *ListUsersInput, opts ...request.Option) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterUser = "RegisterUser" + +// RegisterUserRequest generates a "aws/request.Request" representing the +// client's request for the RegisterUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterUser for more information on using the RegisterUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RegisterUserRequest method. +// req, resp := client.RegisterUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/RegisterUser +func (c *QuickSight) RegisterUserRequest(input *RegisterUserInput) (req *request.Request, output *RegisterUserOutput) { + op := &request.Operation{ + Name: opRegisterUser, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users", + } + + if input == nil { + input = &RegisterUserInput{} + } + + output = &RegisterUserOutput{} + req = c.newRequest(op, input, output) + return +} + +// RegisterUser API operation for Amazon QuickSight. +// +// Creates an Amazon QuickSight user, whose identity is associated with the +// AWS Identity and Access Management (IAM) identity or role specified in the +// request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation RegisterUser for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * LimitExceededException +// A limit is exceeded. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/RegisterUser +func (c *QuickSight) RegisterUser(input *RegisterUserInput) (*RegisterUserOutput, error) { + req, out := c.RegisterUserRequest(input) + return out, req.Send() +} + +// RegisterUserWithContext is the same as RegisterUser with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) RegisterUserWithContext(ctx aws.Context, input *RegisterUserInput, opts ...request.Option) (*RegisterUserOutput, error) { + req, out := c.RegisterUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreAnalysis = "RestoreAnalysis" + +// RestoreAnalysisRequest generates a "aws/request.Request" representing the +// client's request for the RestoreAnalysis operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreAnalysis for more information on using the RestoreAnalysis +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreAnalysisRequest method. +// req, resp := client.RestoreAnalysisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/RestoreAnalysis +func (c *QuickSight) RestoreAnalysisRequest(input *RestoreAnalysisInput) (req *request.Request, output *RestoreAnalysisOutput) { + op := &request.Operation{ + Name: opRestoreAnalysis, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/restore/analyses/{AnalysisId}", + } + + if input == nil { + input = &RestoreAnalysisInput{} + } + + output = &RestoreAnalysisOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreAnalysis API operation for Amazon QuickSight. +// +// Restores an analysis. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation RestoreAnalysis for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/RestoreAnalysis +func (c *QuickSight) RestoreAnalysis(input *RestoreAnalysisInput) (*RestoreAnalysisOutput, error) { + req, out := c.RestoreAnalysisRequest(input) + return out, req.Send() +} + +// RestoreAnalysisWithContext is the same as RestoreAnalysis with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreAnalysis for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) RestoreAnalysisWithContext(ctx aws.Context, input *RestoreAnalysisInput, opts ...request.Option) (*RestoreAnalysisOutput, error) { + req, out := c.RestoreAnalysisRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSearchAnalyses = "SearchAnalyses" + +// SearchAnalysesRequest generates a "aws/request.Request" representing the +// client's request for the SearchAnalyses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SearchAnalyses for more information on using the SearchAnalyses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SearchAnalysesRequest method. +// req, resp := client.SearchAnalysesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/SearchAnalyses +func (c *QuickSight) SearchAnalysesRequest(input *SearchAnalysesInput) (req *request.Request, output *SearchAnalysesOutput) { + op := &request.Operation{ + Name: opSearchAnalyses, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/search/analyses", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &SearchAnalysesInput{} + } + + output = &SearchAnalysesOutput{} + req = c.newRequest(op, input, output) + return +} + +// SearchAnalyses API operation for Amazon QuickSight. +// +// Searches for analyses that belong to the user specified in the filter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation SearchAnalyses for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/SearchAnalyses +func (c *QuickSight) SearchAnalyses(input *SearchAnalysesInput) (*SearchAnalysesOutput, error) { + req, out := c.SearchAnalysesRequest(input) + return out, req.Send() +} + +// SearchAnalysesWithContext is the same as SearchAnalyses with the addition of +// the ability to pass a context and additional request options. +// +// See SearchAnalyses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) SearchAnalysesWithContext(ctx aws.Context, input *SearchAnalysesInput, opts ...request.Option) (*SearchAnalysesOutput, error) { + req, out := c.SearchAnalysesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// SearchAnalysesPages iterates over the pages of a SearchAnalyses operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchAnalyses method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchAnalyses operation. +// pageNum := 0 +// err := client.SearchAnalysesPages(params, +// func(page *quicksight.SearchAnalysesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) SearchAnalysesPages(input *SearchAnalysesInput, fn func(*SearchAnalysesOutput, bool) bool) error { + return c.SearchAnalysesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchAnalysesPagesWithContext same as SearchAnalysesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) SearchAnalysesPagesWithContext(ctx aws.Context, input *SearchAnalysesInput, fn func(*SearchAnalysesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchAnalysesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchAnalysesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchAnalysesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opSearchDashboards = "SearchDashboards" + +// SearchDashboardsRequest generates a "aws/request.Request" representing the +// client's request for the SearchDashboards operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SearchDashboards for more information on using the SearchDashboards +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SearchDashboardsRequest method. +// req, resp := client.SearchDashboardsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/SearchDashboards +func (c *QuickSight) SearchDashboardsRequest(input *SearchDashboardsInput) (req *request.Request, output *SearchDashboardsOutput) { + op := &request.Operation{ + Name: opSearchDashboards, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/search/dashboards", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &SearchDashboardsInput{} + } + + output = &SearchDashboardsOutput{} + req = c.newRequest(op, input, output) + return +} + +// SearchDashboards API operation for Amazon QuickSight. +// +// Searches for dashboards that belong to a user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation SearchDashboards for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InvalidNextTokenException +// The NextToken value isn't valid. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/SearchDashboards +func (c *QuickSight) SearchDashboards(input *SearchDashboardsInput) (*SearchDashboardsOutput, error) { + req, out := c.SearchDashboardsRequest(input) + return out, req.Send() +} + +// SearchDashboardsWithContext is the same as SearchDashboards with the addition of +// the ability to pass a context and additional request options. +// +// See SearchDashboards for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) SearchDashboardsWithContext(ctx aws.Context, input *SearchDashboardsInput, opts ...request.Option) (*SearchDashboardsOutput, error) { + req, out := c.SearchDashboardsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// SearchDashboardsPages iterates over the pages of a SearchDashboards operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SearchDashboards method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SearchDashboards operation. +// pageNum := 0 +// err := client.SearchDashboardsPages(params, +// func(page *quicksight.SearchDashboardsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *QuickSight) SearchDashboardsPages(input *SearchDashboardsInput, fn func(*SearchDashboardsOutput, bool) bool) error { + return c.SearchDashboardsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SearchDashboardsPagesWithContext same as SearchDashboardsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) SearchDashboardsPagesWithContext(ctx aws.Context, input *SearchDashboardsInput, fn func(*SearchDashboardsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SearchDashboardsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SearchDashboardsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*SearchDashboardsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/TagResource +func (c *QuickSight) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/resources/{ResourceArn}/tags", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// TagResource API operation for Amazon QuickSight. +// +// Assigns one or more tags (key-value pairs) to the specified QuickSight resource. +// +// Tags can help you organize and categorize your resources. You can also use +// them to scope user permissions, by granting a user permission to access or +// change only resources with certain tag values. You can use the TagResource +// operation with a resource that already has tags. If you specify a new tag +// key for the resource, this tag is appended to the list of tags associated +// with the resource. If you specify a tag key that is already associated with +// the resource, the new tag value that you specify replaces the previous value +// for that tag. +// +// You can associate as many as 50 tags with a resource. QuickSight supports +// tagging on data set, data source, dashboard, and template. +// +// Tagging for QuickSight works in a similar way to tagging for other AWS services, +// except for the following: +// +// * You can't use tags to track AWS costs for QuickSight. This restriction +// is because QuickSight costs are based on users and SPICE capacity, which +// aren't taggable resources. +// +// * QuickSight doesn't currently support the Tag Editor for AWS Resource +// Groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * LimitExceededException +// A limit is exceeded. +// +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/TagResource +func (c *QuickSight) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UntagResource +func (c *QuickSight) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/resources/{ResourceArn}/tags", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UntagResource API operation for Amazon QuickSight. +// +// Removes a tag or tags from a resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UntagResource +func (c *QuickSight) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAccountCustomization = "UpdateAccountCustomization" + +// UpdateAccountCustomizationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccountCustomization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAccountCustomization for more information on using the UpdateAccountCustomization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAccountCustomizationRequest method. +// req, resp := client.UpdateAccountCustomizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAccountCustomization +func (c *QuickSight) UpdateAccountCustomizationRequest(input *UpdateAccountCustomizationInput) (req *request.Request, output *UpdateAccountCustomizationOutput) { + op := &request.Operation{ + Name: opUpdateAccountCustomization, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/customizations", + } + + if input == nil { + input = &UpdateAccountCustomizationInput{} + } + + output = &UpdateAccountCustomizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAccountCustomization API operation for Amazon QuickSight. +// +// Updates Amazon QuickSight customizations the current AWS Region. Currently, +// the only customization you can use is a theme. +// +// You can use customizations for your AWS account or, if you specify a namespace, +// for a QuickSight namespace instead. Customizations that apply to a namespace +// override customizations that apply to an AWS account. To find out which customizations +// apply, use the DescribeAccountCustomization API operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateAccountCustomization for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAccountCustomization +func (c *QuickSight) UpdateAccountCustomization(input *UpdateAccountCustomizationInput) (*UpdateAccountCustomizationOutput, error) { + req, out := c.UpdateAccountCustomizationRequest(input) + return out, req.Send() +} + +// UpdateAccountCustomizationWithContext is the same as UpdateAccountCustomization with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAccountCustomization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateAccountCustomizationWithContext(ctx aws.Context, input *UpdateAccountCustomizationInput, opts ...request.Option) (*UpdateAccountCustomizationOutput, error) { + req, out := c.UpdateAccountCustomizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAccountSettings = "UpdateAccountSettings" + +// UpdateAccountSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccountSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAccountSettings for more information on using the UpdateAccountSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAccountSettingsRequest method. +// req, resp := client.UpdateAccountSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAccountSettings +func (c *QuickSight) UpdateAccountSettingsRequest(input *UpdateAccountSettingsInput) (req *request.Request, output *UpdateAccountSettingsOutput) { + op := &request.Operation{ + Name: opUpdateAccountSettings, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/settings", + } + + if input == nil { + input = &UpdateAccountSettingsInput{} + } + + output = &UpdateAccountSettingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAccountSettings API operation for Amazon QuickSight. +// +// Updates the Amazon QuickSight settings in your AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateAccountSettings for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAccountSettings +func (c *QuickSight) UpdateAccountSettings(input *UpdateAccountSettingsInput) (*UpdateAccountSettingsOutput, error) { + req, out := c.UpdateAccountSettingsRequest(input) + return out, req.Send() +} + +// UpdateAccountSettingsWithContext is the same as UpdateAccountSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAccountSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateAccountSettingsWithContext(ctx aws.Context, input *UpdateAccountSettingsInput, opts ...request.Option) (*UpdateAccountSettingsOutput, error) { + req, out := c.UpdateAccountSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAnalysis = "UpdateAnalysis" + +// UpdateAnalysisRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAnalysis operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAnalysis for more information on using the UpdateAnalysis +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAnalysisRequest method. +// req, resp := client.UpdateAnalysisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAnalysis +func (c *QuickSight) UpdateAnalysisRequest(input *UpdateAnalysisInput) (req *request.Request, output *UpdateAnalysisOutput) { + op := &request.Operation{ + Name: opUpdateAnalysis, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/analyses/{AnalysisId}", + } + + if input == nil { + input = &UpdateAnalysisInput{} + } + + output = &UpdateAnalysisOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAnalysis API operation for Amazon QuickSight. +// +// Updates an analysis in Amazon QuickSight +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateAnalysis for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ThrottlingException +// Access is throttled. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAnalysis +func (c *QuickSight) UpdateAnalysis(input *UpdateAnalysisInput) (*UpdateAnalysisOutput, error) { + req, out := c.UpdateAnalysisRequest(input) + return out, req.Send() +} + +// UpdateAnalysisWithContext is the same as UpdateAnalysis with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAnalysis for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateAnalysisWithContext(ctx aws.Context, input *UpdateAnalysisInput, opts ...request.Option) (*UpdateAnalysisOutput, error) { + req, out := c.UpdateAnalysisRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAnalysisPermissions = "UpdateAnalysisPermissions" + +// UpdateAnalysisPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAnalysisPermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAnalysisPermissions for more information on using the UpdateAnalysisPermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAnalysisPermissionsRequest method. +// req, resp := client.UpdateAnalysisPermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAnalysisPermissions +func (c *QuickSight) UpdateAnalysisPermissionsRequest(input *UpdateAnalysisPermissionsInput) (req *request.Request, output *UpdateAnalysisPermissionsOutput) { + op := &request.Operation{ + Name: opUpdateAnalysisPermissions, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/analyses/{AnalysisId}/permissions", + } + + if input == nil { + input = &UpdateAnalysisPermissionsInput{} + } + + output = &UpdateAnalysisPermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAnalysisPermissions API operation for Amazon QuickSight. +// +// Updates the read and write permissions for an analysis. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateAnalysisPermissions for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateAnalysisPermissions +func (c *QuickSight) UpdateAnalysisPermissions(input *UpdateAnalysisPermissionsInput) (*UpdateAnalysisPermissionsOutput, error) { + req, out := c.UpdateAnalysisPermissionsRequest(input) + return out, req.Send() +} + +// UpdateAnalysisPermissionsWithContext is the same as UpdateAnalysisPermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAnalysisPermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateAnalysisPermissionsWithContext(ctx aws.Context, input *UpdateAnalysisPermissionsInput, opts ...request.Option) (*UpdateAnalysisPermissionsOutput, error) { + req, out := c.UpdateAnalysisPermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDashboard = "UpdateDashboard" + +// UpdateDashboardRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDashboard operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDashboard for more information on using the UpdateDashboard +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDashboardRequest method. +// req, resp := client.UpdateDashboardRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboard +func (c *QuickSight) UpdateDashboardRequest(input *UpdateDashboardInput) (req *request.Request, output *UpdateDashboardOutput) { + op := &request.Operation{ + Name: opUpdateDashboard, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}", + } + + if input == nil { + input = &UpdateDashboardInput{} + } + + output = &UpdateDashboardOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDashboard API operation for Amazon QuickSight. +// +// Updates a dashboard in an AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDashboard for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * LimitExceededException +// A limit is exceeded. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboard +func (c *QuickSight) UpdateDashboard(input *UpdateDashboardInput) (*UpdateDashboardOutput, error) { + req, out := c.UpdateDashboardRequest(input) + return out, req.Send() +} + +// UpdateDashboardWithContext is the same as UpdateDashboard with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDashboard for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDashboardWithContext(ctx aws.Context, input *UpdateDashboardInput, opts ...request.Option) (*UpdateDashboardOutput, error) { + req, out := c.UpdateDashboardRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDashboardPermissions = "UpdateDashboardPermissions" + +// UpdateDashboardPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDashboardPermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDashboardPermissions for more information on using the UpdateDashboardPermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDashboardPermissionsRequest method. +// req, resp := client.UpdateDashboardPermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPermissions +func (c *QuickSight) UpdateDashboardPermissionsRequest(input *UpdateDashboardPermissionsInput) (req *request.Request, output *UpdateDashboardPermissionsOutput) { + op := &request.Operation{ + Name: opUpdateDashboardPermissions, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/permissions", + } + + if input == nil { + input = &UpdateDashboardPermissionsInput{} + } + + output = &UpdateDashboardPermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDashboardPermissions API operation for Amazon QuickSight. +// +// Updates read and write permissions on a dashboard. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDashboardPermissions for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPermissions +func (c *QuickSight) UpdateDashboardPermissions(input *UpdateDashboardPermissionsInput) (*UpdateDashboardPermissionsOutput, error) { + req, out := c.UpdateDashboardPermissionsRequest(input) + return out, req.Send() +} + +// UpdateDashboardPermissionsWithContext is the same as UpdateDashboardPermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDashboardPermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDashboardPermissionsWithContext(ctx aws.Context, input *UpdateDashboardPermissionsInput, opts ...request.Option) (*UpdateDashboardPermissionsOutput, error) { + req, out := c.UpdateDashboardPermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDashboardPublishedVersion = "UpdateDashboardPublishedVersion" + +// UpdateDashboardPublishedVersionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDashboardPublishedVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDashboardPublishedVersion for more information on using the UpdateDashboardPublishedVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDashboardPublishedVersionRequest method. +// req, resp := client.UpdateDashboardPublishedVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPublishedVersion +func (c *QuickSight) UpdateDashboardPublishedVersionRequest(input *UpdateDashboardPublishedVersionInput) (req *request.Request, output *UpdateDashboardPublishedVersionOutput) { + op := &request.Operation{ + Name: opUpdateDashboardPublishedVersion, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/dashboards/{DashboardId}/versions/{VersionNumber}", + } + + if input == nil { + input = &UpdateDashboardPublishedVersionInput{} + } + + output = &UpdateDashboardPublishedVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDashboardPublishedVersion API operation for Amazon QuickSight. +// +// Updates the published version of a dashboard. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDashboardPublishedVersion for usage and error information. +// +// Returned Error Types: +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ThrottlingException +// Access is throttled. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDashboardPublishedVersion +func (c *QuickSight) UpdateDashboardPublishedVersion(input *UpdateDashboardPublishedVersionInput) (*UpdateDashboardPublishedVersionOutput, error) { + req, out := c.UpdateDashboardPublishedVersionRequest(input) + return out, req.Send() +} + +// UpdateDashboardPublishedVersionWithContext is the same as UpdateDashboardPublishedVersion with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDashboardPublishedVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDashboardPublishedVersionWithContext(ctx aws.Context, input *UpdateDashboardPublishedVersionInput, opts ...request.Option) (*UpdateDashboardPublishedVersionOutput, error) { + req, out := c.UpdateDashboardPublishedVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataSet = "UpdateDataSet" + +// UpdateDataSetRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataSet operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataSet for more information on using the UpdateDataSet +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDataSetRequest method. +// req, resp := client.UpdateDataSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSet +func (c *QuickSight) UpdateDataSetRequest(input *UpdateDataSetInput) (req *request.Request, output *UpdateDataSetOutput) { + op := &request.Operation{ + Name: opUpdateDataSet, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}", + } + + if input == nil { + input = &UpdateDataSetInput{} + } + + output = &UpdateDataSetOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDataSet API operation for Amazon QuickSight. +// +// Updates a dataset. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDataSet for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * LimitExceededException +// A limit is exceeded. +// +// * ThrottlingException +// Access is throttled. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSet +func (c *QuickSight) UpdateDataSet(input *UpdateDataSetInput) (*UpdateDataSetOutput, error) { + req, out := c.UpdateDataSetRequest(input) + return out, req.Send() +} + +// UpdateDataSetWithContext is the same as UpdateDataSet with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDataSet for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDataSetWithContext(ctx aws.Context, input *UpdateDataSetInput, opts ...request.Option) (*UpdateDataSetOutput, error) { + req, out := c.UpdateDataSetRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataSetPermissions = "UpdateDataSetPermissions" + +// UpdateDataSetPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataSetPermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataSetPermissions for more information on using the UpdateDataSetPermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDataSetPermissionsRequest method. +// req, resp := client.UpdateDataSetPermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSetPermissions +func (c *QuickSight) UpdateDataSetPermissionsRequest(input *UpdateDataSetPermissionsInput) (req *request.Request, output *UpdateDataSetPermissionsOutput) { + op := &request.Operation{ + Name: opUpdateDataSetPermissions, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sets/{DataSetId}/permissions", + } + + if input == nil { + input = &UpdateDataSetPermissionsInput{} + } + + output = &UpdateDataSetPermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDataSetPermissions API operation for Amazon QuickSight. +// +// Updates the permissions on a dataset. +// +// The permissions resource is arn:aws:quicksight:region:aws-account-id:dataset/data-set-id. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDataSetPermissions for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSetPermissions +func (c *QuickSight) UpdateDataSetPermissions(input *UpdateDataSetPermissionsInput) (*UpdateDataSetPermissionsOutput, error) { + req, out := c.UpdateDataSetPermissionsRequest(input) + return out, req.Send() +} + +// UpdateDataSetPermissionsWithContext is the same as UpdateDataSetPermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDataSetPermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDataSetPermissionsWithContext(ctx aws.Context, input *UpdateDataSetPermissionsInput, opts ...request.Option) (*UpdateDataSetPermissionsOutput, error) { + req, out := c.UpdateDataSetPermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataSource = "UpdateDataSource" + +// UpdateDataSourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataSource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataSource for more information on using the UpdateDataSource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDataSourceRequest method. +// req, resp := client.UpdateDataSourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSource +func (c *QuickSight) UpdateDataSourceRequest(input *UpdateDataSourceInput) (req *request.Request, output *UpdateDataSourceOutput) { + op := &request.Operation{ + Name: opUpdateDataSource, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}", + } + + if input == nil { + input = &UpdateDataSourceInput{} + } + + output = &UpdateDataSourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDataSource API operation for Amazon QuickSight. +// +// Updates a data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDataSource for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ThrottlingException +// Access is throttled. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSource +func (c *QuickSight) UpdateDataSource(input *UpdateDataSourceInput) (*UpdateDataSourceOutput, error) { + req, out := c.UpdateDataSourceRequest(input) + return out, req.Send() +} + +// UpdateDataSourceWithContext is the same as UpdateDataSource with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDataSource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDataSourceWithContext(ctx aws.Context, input *UpdateDataSourceInput, opts ...request.Option) (*UpdateDataSourceOutput, error) { + req, out := c.UpdateDataSourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateDataSourcePermissions = "UpdateDataSourcePermissions" + +// UpdateDataSourcePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDataSourcePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateDataSourcePermissions for more information on using the UpdateDataSourcePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateDataSourcePermissionsRequest method. +// req, resp := client.UpdateDataSourcePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSourcePermissions +func (c *QuickSight) UpdateDataSourcePermissionsRequest(input *UpdateDataSourcePermissionsInput) (req *request.Request, output *UpdateDataSourcePermissionsOutput) { + op := &request.Operation{ + Name: opUpdateDataSourcePermissions, + HTTPMethod: "POST", + HTTPPath: "/accounts/{AwsAccountId}/data-sources/{DataSourceId}/permissions", + } + + if input == nil { + input = &UpdateDataSourcePermissionsInput{} + } + + output = &UpdateDataSourcePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateDataSourcePermissions API operation for Amazon QuickSight. +// +// Updates the permissions to a data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateDataSourcePermissions for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateDataSourcePermissions +func (c *QuickSight) UpdateDataSourcePermissions(input *UpdateDataSourcePermissionsInput) (*UpdateDataSourcePermissionsOutput, error) { + req, out := c.UpdateDataSourcePermissionsRequest(input) + return out, req.Send() +} + +// UpdateDataSourcePermissionsWithContext is the same as UpdateDataSourcePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateDataSourcePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateDataSourcePermissionsWithContext(ctx aws.Context, input *UpdateDataSourcePermissionsInput, opts ...request.Option) (*UpdateDataSourcePermissionsOutput, error) { + req, out := c.UpdateDataSourcePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGroup = "UpdateGroup" + +// UpdateGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGroup for more information on using the UpdateGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGroupRequest method. +// req, resp := client.UpdateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateGroup +func (c *QuickSight) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { + op := &request.Operation{ + Name: opUpdateGroup, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/groups/{GroupName}", + } + + if input == nil { + input = &UpdateGroupInput{} + } + + output = &UpdateGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateGroup API operation for Amazon QuickSight. +// +// Changes a group description. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateGroup for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateGroup +func (c *QuickSight) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + return out, req.Send() +} + +// UpdateGroupWithContext is the same as UpdateGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateGroupWithContext(ctx aws.Context, input *UpdateGroupInput, opts ...request.Option) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateIAMPolicyAssignment = "UpdateIAMPolicyAssignment" + +// UpdateIAMPolicyAssignmentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIAMPolicyAssignment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateIAMPolicyAssignment for more information on using the UpdateIAMPolicyAssignment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateIAMPolicyAssignmentRequest method. +// req, resp := client.UpdateIAMPolicyAssignmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateIAMPolicyAssignment +func (c *QuickSight) UpdateIAMPolicyAssignmentRequest(input *UpdateIAMPolicyAssignmentInput) (req *request.Request, output *UpdateIAMPolicyAssignmentOutput) { + op := &request.Operation{ + Name: opUpdateIAMPolicyAssignment, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/iam-policy-assignments/{AssignmentName}", + } + + if input == nil { + input = &UpdateIAMPolicyAssignmentInput{} + } + + output = &UpdateIAMPolicyAssignmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateIAMPolicyAssignment API operation for Amazon QuickSight. +// +// Updates an existing IAM policy assignment. This operation updates only the +// optional parameter or parameters that are specified in the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateIAMPolicyAssignment for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * ConcurrentUpdatingException +// A resource is already in a state that indicates an operation is happening +// that must complete before a new update can be applied. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateIAMPolicyAssignment +func (c *QuickSight) UpdateIAMPolicyAssignment(input *UpdateIAMPolicyAssignmentInput) (*UpdateIAMPolicyAssignmentOutput, error) { + req, out := c.UpdateIAMPolicyAssignmentRequest(input) + return out, req.Send() +} + +// UpdateIAMPolicyAssignmentWithContext is the same as UpdateIAMPolicyAssignment with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateIAMPolicyAssignment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateIAMPolicyAssignmentWithContext(ctx aws.Context, input *UpdateIAMPolicyAssignmentInput, opts ...request.Option) (*UpdateIAMPolicyAssignmentOutput, error) { + req, out := c.UpdateIAMPolicyAssignmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTemplate = "UpdateTemplate" + +// UpdateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTemplate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTemplate for more information on using the UpdateTemplate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTemplateRequest method. +// req, resp := client.UpdateTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplate +func (c *QuickSight) UpdateTemplateRequest(input *UpdateTemplateInput) (req *request.Request, output *UpdateTemplateOutput) { + op := &request.Operation{ + Name: opUpdateTemplate, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}", + } + + if input == nil { + input = &UpdateTemplateInput{} + } + + output = &UpdateTemplateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTemplate API operation for Amazon QuickSight. +// +// Updates a template from an existing Amazon QuickSight analysis or another +// template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateTemplate for usage and error information. +// +// Returned Error Types: +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * LimitExceededException +// A limit is exceeded. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplate +func (c *QuickSight) UpdateTemplate(input *UpdateTemplateInput) (*UpdateTemplateOutput, error) { + req, out := c.UpdateTemplateRequest(input) + return out, req.Send() +} + +// UpdateTemplateWithContext is the same as UpdateTemplate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTemplate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateTemplateWithContext(ctx aws.Context, input *UpdateTemplateInput, opts ...request.Option) (*UpdateTemplateOutput, error) { + req, out := c.UpdateTemplateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTemplateAlias = "UpdateTemplateAlias" + +// UpdateTemplateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTemplateAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTemplateAlias for more information on using the UpdateTemplateAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTemplateAliasRequest method. +// req, resp := client.UpdateTemplateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplateAlias +func (c *QuickSight) UpdateTemplateAliasRequest(input *UpdateTemplateAliasInput) (req *request.Request, output *UpdateTemplateAliasOutput) { + op := &request.Operation{ + Name: opUpdateTemplateAlias, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/aliases/{AliasName}", + } + + if input == nil { + input = &UpdateTemplateAliasInput{} + } + + output = &UpdateTemplateAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTemplateAlias API operation for Amazon QuickSight. +// +// Updates the template alias of a template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateTemplateAlias for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplateAlias +func (c *QuickSight) UpdateTemplateAlias(input *UpdateTemplateAliasInput) (*UpdateTemplateAliasOutput, error) { + req, out := c.UpdateTemplateAliasRequest(input) + return out, req.Send() +} + +// UpdateTemplateAliasWithContext is the same as UpdateTemplateAlias with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTemplateAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateTemplateAliasWithContext(ctx aws.Context, input *UpdateTemplateAliasInput, opts ...request.Option) (*UpdateTemplateAliasOutput, error) { + req, out := c.UpdateTemplateAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTemplatePermissions = "UpdateTemplatePermissions" + +// UpdateTemplatePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTemplatePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTemplatePermissions for more information on using the UpdateTemplatePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTemplatePermissionsRequest method. +// req, resp := client.UpdateTemplatePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplatePermissions +func (c *QuickSight) UpdateTemplatePermissionsRequest(input *UpdateTemplatePermissionsInput) (req *request.Request, output *UpdateTemplatePermissionsOutput) { + op := &request.Operation{ + Name: opUpdateTemplatePermissions, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/templates/{TemplateId}/permissions", + } + + if input == nil { + input = &UpdateTemplatePermissionsInput{} + } + + output = &UpdateTemplatePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTemplatePermissions API operation for Amazon QuickSight. +// +// Updates the resource permissions for a template. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateTemplatePermissions for usage and error information. +// +// Returned Error Types: +// * ThrottlingException +// Access is throttled. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTemplatePermissions +func (c *QuickSight) UpdateTemplatePermissions(input *UpdateTemplatePermissionsInput) (*UpdateTemplatePermissionsOutput, error) { + req, out := c.UpdateTemplatePermissionsRequest(input) + return out, req.Send() +} + +// UpdateTemplatePermissionsWithContext is the same as UpdateTemplatePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTemplatePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateTemplatePermissionsWithContext(ctx aws.Context, input *UpdateTemplatePermissionsInput, opts ...request.Option) (*UpdateTemplatePermissionsOutput, error) { + req, out := c.UpdateTemplatePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTheme = "UpdateTheme" + +// UpdateThemeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTheme operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTheme for more information on using the UpdateTheme +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateThemeRequest method. +// req, resp := client.UpdateThemeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTheme +func (c *QuickSight) UpdateThemeRequest(input *UpdateThemeInput) (req *request.Request, output *UpdateThemeOutput) { + op := &request.Operation{ + Name: opUpdateTheme, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}", + } + + if input == nil { + input = &UpdateThemeInput{} + } + + output = &UpdateThemeOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTheme API operation for Amazon QuickSight. +// +// Updates a theme. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateTheme for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * LimitExceededException +// A limit is exceeded. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateTheme +func (c *QuickSight) UpdateTheme(input *UpdateThemeInput) (*UpdateThemeOutput, error) { + req, out := c.UpdateThemeRequest(input) + return out, req.Send() +} + +// UpdateThemeWithContext is the same as UpdateTheme with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTheme for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateThemeWithContext(ctx aws.Context, input *UpdateThemeInput, opts ...request.Option) (*UpdateThemeOutput, error) { + req, out := c.UpdateThemeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateThemeAlias = "UpdateThemeAlias" + +// UpdateThemeAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateThemeAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateThemeAlias for more information on using the UpdateThemeAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateThemeAliasRequest method. +// req, resp := client.UpdateThemeAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateThemeAlias +func (c *QuickSight) UpdateThemeAliasRequest(input *UpdateThemeAliasInput) (req *request.Request, output *UpdateThemeAliasOutput) { + op := &request.Operation{ + Name: opUpdateThemeAlias, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/aliases/{AliasName}", + } + + if input == nil { + input = &UpdateThemeAliasInput{} + } + + output = &UpdateThemeAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateThemeAlias API operation for Amazon QuickSight. +// +// Updates an alias of a theme. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateThemeAlias for usage and error information. +// +// Returned Error Types: +// * ConflictException +// Updating or deleting a resource can cause an inconsistent state. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceExistsException +// The resource specified already exists. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateThemeAlias +func (c *QuickSight) UpdateThemeAlias(input *UpdateThemeAliasInput) (*UpdateThemeAliasOutput, error) { + req, out := c.UpdateThemeAliasRequest(input) + return out, req.Send() +} + +// UpdateThemeAliasWithContext is the same as UpdateThemeAlias with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateThemeAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateThemeAliasWithContext(ctx aws.Context, input *UpdateThemeAliasInput, opts ...request.Option) (*UpdateThemeAliasOutput, error) { + req, out := c.UpdateThemeAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateThemePermissions = "UpdateThemePermissions" + +// UpdateThemePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateThemePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateThemePermissions for more information on using the UpdateThemePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateThemePermissionsRequest method. +// req, resp := client.UpdateThemePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateThemePermissions +func (c *QuickSight) UpdateThemePermissionsRequest(input *UpdateThemePermissionsInput) (req *request.Request, output *UpdateThemePermissionsOutput) { + op := &request.Operation{ + Name: opUpdateThemePermissions, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/themes/{ThemeId}/permissions", + } + + if input == nil { + input = &UpdateThemePermissionsInput{} + } + + output = &UpdateThemePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateThemePermissions API operation for Amazon QuickSight. +// +// Updates the resource permissions for a theme. Permissions apply to the action +// to grant or revoke permissions on, for example "quicksight:DescribeTheme". +// +// Theme permissions apply in groupings. Valid groupings include the following +// for the three levels of permissions, which are user, owner, or no permissions: +// +// * User "quicksight:DescribeTheme" "quicksight:DescribeThemeAlias" "quicksight:ListThemeAliases" +// "quicksight:ListThemeVersions" +// +// * Owner "quicksight:DescribeTheme" "quicksight:DescribeThemeAlias" "quicksight:ListThemeAliases" +// "quicksight:ListThemeVersions" "quicksight:DeleteTheme" "quicksight:UpdateTheme" +// "quicksight:CreateThemeAlias" "quicksight:DeleteThemeAlias" "quicksight:UpdateThemeAlias" +// "quicksight:UpdateThemePermissions" "quicksight:DescribeThemePermissions" +// +// * To specify no permissions, omit the permissions list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateThemePermissions for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * UnsupportedUserEditionException +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +// +// * InternalFailureException +// An internal failure occurred. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateThemePermissions +func (c *QuickSight) UpdateThemePermissions(input *UpdateThemePermissionsInput) (*UpdateThemePermissionsOutput, error) { + req, out := c.UpdateThemePermissionsRequest(input) + return out, req.Send() +} + +// UpdateThemePermissionsWithContext is the same as UpdateThemePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateThemePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateThemePermissionsWithContext(ctx aws.Context, input *UpdateThemePermissionsInput, opts ...request.Option) (*UpdateThemePermissionsOutput, error) { + req, out := c.UpdateThemePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateUser = "UpdateUser" + +// UpdateUserRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateUser for more information on using the UpdateUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateUserRequest method. +// req, resp := client.UpdateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateUser +func (c *QuickSight) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { + op := &request.Operation{ + Name: opUpdateUser, + HTTPMethod: "PUT", + HTTPPath: "/accounts/{AwsAccountId}/namespaces/{Namespace}/users/{UserName}", + } + + if input == nil { + input = &UpdateUserInput{} + } + + output = &UpdateUserOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateUser API operation for Amazon QuickSight. +// +// Updates an Amazon QuickSight user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon QuickSight's +// API operation UpdateUser for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +// +// * InvalidParameterValueException +// One or more parameters has a value that isn't valid. +// +// * ResourceNotFoundException +// One or more resources can't be found. +// +// * ThrottlingException +// Access is throttled. +// +// * PreconditionNotMetException +// One or more preconditions aren't met. +// +// * InternalFailureException +// An internal failure occurred. +// +// * ResourceUnavailableException +// This resource is currently unavailable. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/quicksight-2018-04-01/UpdateUser +func (c *QuickSight) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + return out, req.Send() +} + +// UpdateUserWithContext is the same as UpdateUser with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *QuickSight) UpdateUserWithContext(ctx aws.Context, input *UpdateUserInput, opts ...request.Option) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You don't have access to this item. The provided credentials couldn't be +// validated. You might not be authorized to carry out the request. Make sure +// that your account is authorized to use the Amazon QuickSight service, that +// your policies have the correct permissions, and that you are using the correct +// access keys. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The AWS request ID for this request. + RequestId *string `type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The Amazon QuickSight customizations associated with your AWS account or +// a QuickSight namespace in a specific AWS Region. +type AccountCustomization struct { + _ struct{} `type:"structure"` + + // The default theme for this QuickSight subscription. + DefaultTheme *string `type:"string"` +} + +// String returns the string representation +func (s AccountCustomization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountCustomization) GoString() string { + return s.String() +} + +// SetDefaultTheme sets the DefaultTheme field's value. +func (s *AccountCustomization) SetDefaultTheme(v string) *AccountCustomization { + s.DefaultTheme = &v + return s +} + +// The QuickSight settings associated with your AWS account. +type AccountSettings struct { + _ struct{} `type:"structure"` + + // The "account name" you provided for the QuickSight subscription in your AWS + // account. You create this name when you sign up for QuickSight. It is unique + // in all of AWS and it appears only in the console when users sign in. + AccountName *string `type:"string"` + + // The default QuickSight namespace for your AWS account. + DefaultNamespace *string `type:"string"` + + // The edition of QuickSight that you're currently subscribed to: Enterprise + // edition or Standard edition. + Edition *string `type:"string" enum:"Edition"` + + // The main notification email for your QuickSight subscription. + NotificationEmail *string `type:"string"` +} + +// String returns the string representation +func (s AccountSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccountSettings) GoString() string { + return s.String() +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountSettings) SetAccountName(v string) *AccountSettings { + s.AccountName = &v + return s +} + +// SetDefaultNamespace sets the DefaultNamespace field's value. +func (s *AccountSettings) SetDefaultNamespace(v string) *AccountSettings { + s.DefaultNamespace = &v + return s +} + +// SetEdition sets the Edition field's value. +func (s *AccountSettings) SetEdition(v string) *AccountSettings { + s.Edition = &v + return s +} + +// SetNotificationEmail sets the NotificationEmail field's value. +func (s *AccountSettings) SetNotificationEmail(v string) *AccountSettings { + s.NotificationEmail = &v + return s +} + +// The active AWS Identity and Access Management (IAM) policy assignment. +type ActiveIAMPolicyAssignment struct { + _ struct{} `type:"structure"` + + // A name for the IAM policy assignment. + AssignmentName *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the resource. + PolicyArn *string `type:"string"` +} + +// String returns the string representation +func (s ActiveIAMPolicyAssignment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ActiveIAMPolicyAssignment) GoString() string { + return s.String() +} + +// SetAssignmentName sets the AssignmentName field's value. +func (s *ActiveIAMPolicyAssignment) SetAssignmentName(v string) *ActiveIAMPolicyAssignment { + s.AssignmentName = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *ActiveIAMPolicyAssignment) SetPolicyArn(v string) *ActiveIAMPolicyAssignment { + s.PolicyArn = &v + return s +} + +// Ad hoc (one-time) filtering option. +type AdHocFilteringOption struct { + _ struct{} `type:"structure"` + + // Availability status. + AvailabilityStatus *string `type:"string" enum:"DashboardBehavior"` +} + +// String returns the string representation +func (s AdHocFilteringOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AdHocFilteringOption) GoString() string { + return s.String() +} + +// SetAvailabilityStatus sets the AvailabilityStatus field's value. +func (s *AdHocFilteringOption) SetAvailabilityStatus(v string) *AdHocFilteringOption { + s.AvailabilityStatus = &v + return s +} + +// Amazon Elasticsearch Service parameters. +type AmazonElasticsearchParameters struct { + _ struct{} `type:"structure"` + + // The Amazon Elasticsearch Service domain. + // + // Domain is a required field + Domain *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AmazonElasticsearchParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AmazonElasticsearchParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AmazonElasticsearchParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AmazonElasticsearchParameters"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + if s.Domain != nil && len(*s.Domain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomain sets the Domain field's value. +func (s *AmazonElasticsearchParameters) SetDomain(v string) *AmazonElasticsearchParameters { + s.Domain = &v + return s +} + +// Metadata structure for an analysis in Amazon QuickSight +type Analysis struct { + _ struct{} `type:"structure"` + + // The ID of the analysis. + AnalysisId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the analysis. + Arn *string `type:"string"` + + // The time that the analysis was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ARNs of the datasets of the analysis. + DataSetArns []*string `type:"list"` + + // Errors associated with the analysis. + Errors []*AnalysisError `min:"1" type:"list"` + + // The time that the analysis was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // The descriptive name of the analysis. + Name *string `min:"1" type:"string"` + + // A list of the associated sheets with the unique identifier and name of each + // sheet. + Sheets []*Sheet `type:"list"` + + // Status associated with the analysis. + Status *string `type:"string" enum:"ResourceStatus"` + + // The ARN of the theme of the analysis. + ThemeArn *string `type:"string"` +} + +// String returns the string representation +func (s Analysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Analysis) GoString() string { + return s.String() +} + +// SetAnalysisId sets the AnalysisId field's value. +func (s *Analysis) SetAnalysisId(v string) *Analysis { + s.AnalysisId = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *Analysis) SetArn(v string) *Analysis { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *Analysis) SetCreatedTime(v time.Time) *Analysis { + s.CreatedTime = &v + return s +} + +// SetDataSetArns sets the DataSetArns field's value. +func (s *Analysis) SetDataSetArns(v []*string) *Analysis { + s.DataSetArns = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *Analysis) SetErrors(v []*AnalysisError) *Analysis { + s.Errors = v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Analysis) SetLastUpdatedTime(v time.Time) *Analysis { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Analysis) SetName(v string) *Analysis { + s.Name = &v + return s +} + +// SetSheets sets the Sheets field's value. +func (s *Analysis) SetSheets(v []*Sheet) *Analysis { + s.Sheets = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Analysis) SetStatus(v string) *Analysis { + s.Status = &v + return s +} + +// SetThemeArn sets the ThemeArn field's value. +func (s *Analysis) SetThemeArn(v string) *Analysis { + s.ThemeArn = &v + return s +} + +// A metadata error structure for an analysis. +type AnalysisError struct { + _ struct{} `type:"structure"` + + // The message associated with the analysis error. + Message *string `type:"string"` + + // The type of the analysis error. + Type *string `type:"string" enum:"AnalysisErrorType"` +} + +// String returns the string representation +func (s AnalysisError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisError) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *AnalysisError) SetMessage(v string) *AnalysisError { + s.Message = &v + return s +} + +// SetType sets the Type field's value. +func (s *AnalysisError) SetType(v string) *AnalysisError { + s.Type = &v + return s +} + +// A filter that you apply when searching for one or more analyses. +type AnalysisSearchFilter struct { + _ struct{} `type:"structure"` + + // The name of the value that you want to use as a filter, for example "Name": + // "QUICKSIGHT_USER". + Name *string `type:"string" enum:"AnalysisFilterAttribute"` + + // The comparison operator that you want to use as a filter, for example "Operator": + // "StringEquals". + Operator *string `type:"string" enum:"FilterOperator"` + + // The value of the named item, in this case QUICKSIGHT_USER, that you want + // to use as a filter, for example "Value". An example is "arn:aws:quicksight:us-east-1:1:user/default/UserName1". + Value *string `type:"string"` +} + +// String returns the string representation +func (s AnalysisSearchFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisSearchFilter) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AnalysisSearchFilter) SetName(v string) *AnalysisSearchFilter { + s.Name = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *AnalysisSearchFilter) SetOperator(v string) *AnalysisSearchFilter { + s.Operator = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AnalysisSearchFilter) SetValue(v string) *AnalysisSearchFilter { + s.Value = &v + return s +} + +// The source entity of an analysis. +type AnalysisSourceEntity struct { + _ struct{} `type:"structure"` + + // The source template for the source entity of the analysis. + SourceTemplate *AnalysisSourceTemplate `type:"structure"` +} + +// String returns the string representation +func (s AnalysisSourceEntity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisSourceEntity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalysisSourceEntity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalysisSourceEntity"} + if s.SourceTemplate != nil { + if err := s.SourceTemplate.Validate(); err != nil { + invalidParams.AddNested("SourceTemplate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSourceTemplate sets the SourceTemplate field's value. +func (s *AnalysisSourceEntity) SetSourceTemplate(v *AnalysisSourceTemplate) *AnalysisSourceEntity { + s.SourceTemplate = v + return s +} + +// The source template of an analysis. +type AnalysisSourceTemplate struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the source template of an analysis. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The dataset references of the source template of an analysis. + // + // DataSetReferences is a required field + DataSetReferences []*DataSetReference `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s AnalysisSourceTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisSourceTemplate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalysisSourceTemplate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalysisSourceTemplate"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.DataSetReferences == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetReferences")) + } + if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetReferences", 1)) + } + if s.DataSetReferences != nil { + for i, v := range s.DataSetReferences { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *AnalysisSourceTemplate) SetArn(v string) *AnalysisSourceTemplate { + s.Arn = &v + return s +} + +// SetDataSetReferences sets the DataSetReferences field's value. +func (s *AnalysisSourceTemplate) SetDataSetReferences(v []*DataSetReference) *AnalysisSourceTemplate { + s.DataSetReferences = v + return s +} + +// The summary metadata that describes an analysis. +type AnalysisSummary struct { + _ struct{} `type:"structure"` + + // The ID of the analysis. This ID displays in the URL. + AnalysisId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) for the analysis. + Arn *string `type:"string"` + + // The time that the analysis was created. + CreatedTime *time.Time `type:"timestamp"` + + // The time that the analysis was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // The name of the analysis. This name is displayed in the QuickSight console. + Name *string `min:"1" type:"string"` + + // The last known status for the analysis. + Status *string `type:"string" enum:"ResourceStatus"` +} + +// String returns the string representation +func (s AnalysisSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalysisSummary) GoString() string { + return s.String() +} + +// SetAnalysisId sets the AnalysisId field's value. +func (s *AnalysisSummary) SetAnalysisId(v string) *AnalysisSummary { + s.AnalysisId = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *AnalysisSummary) SetArn(v string) *AnalysisSummary { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *AnalysisSummary) SetCreatedTime(v time.Time) *AnalysisSummary { + s.CreatedTime = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *AnalysisSummary) SetLastUpdatedTime(v time.Time) *AnalysisSummary { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *AnalysisSummary) SetName(v string) *AnalysisSummary { + s.Name = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AnalysisSummary) SetStatus(v string) *AnalysisSummary { + s.Status = &v + return s +} + +// Amazon Athena parameters. +type AthenaParameters struct { + _ struct{} `type:"structure"` + + // The workgroup that Amazon Athena uses. + WorkGroup *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AthenaParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AthenaParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AthenaParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AthenaParameters"} + if s.WorkGroup != nil && len(*s.WorkGroup) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkGroup", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkGroup sets the WorkGroup field's value. +func (s *AthenaParameters) SetWorkGroup(v string) *AthenaParameters { + s.WorkGroup = &v + return s +} + +// Amazon Aurora parameters. +type AuroraParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AuroraParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuroraParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuroraParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuroraParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *AuroraParameters) SetDatabase(v string) *AuroraParameters { + s.Database = &v + return s +} + +// SetHost sets the Host field's value. +func (s *AuroraParameters) SetHost(v string) *AuroraParameters { + s.Host = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AuroraParameters) SetPort(v int64) *AuroraParameters { + s.Port = &v + return s +} + +// Amazon Aurora with PostgreSQL compatibility parameters. +type AuroraPostgreSqlParameters struct { + _ struct{} `type:"structure"` + + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` + + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s AuroraPostgreSqlParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AuroraPostgreSqlParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AuroraPostgreSqlParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AuroraPostgreSqlParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDatabase sets the Database field's value. +func (s *AuroraPostgreSqlParameters) SetDatabase(v string) *AuroraPostgreSqlParameters { + s.Database = &v + return s +} + +// SetHost sets the Host field's value. +func (s *AuroraPostgreSqlParameters) SetHost(v string) *AuroraPostgreSqlParameters { + s.Host = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AuroraPostgreSqlParameters) SetPort(v int64) *AuroraPostgreSqlParameters { + s.Port = &v + return s +} + +// AWS IoT Analytics parameters. +type AwsIotAnalyticsParameters struct { + _ struct{} `type:"structure"` + + // Dataset name. + // + // DataSetName is a required field + DataSetName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AwsIotAnalyticsParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIotAnalyticsParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsIotAnalyticsParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsIotAnalyticsParameters"} + if s.DataSetName == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetName")) + } + if s.DataSetName != nil && len(*s.DataSetName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataSetName sets the DataSetName field's value. +func (s *AwsIotAnalyticsParameters) SetDataSetName(v string) *AwsIotAnalyticsParameters { + s.DataSetName = &v + return s +} + +// The display options for tile borders for visuals. +type BorderStyle struct { + _ struct{} `type:"structure"` + + // The option to enable display of borders for visuals. + Show *bool `type:"boolean"` +} + +// String returns the string representation +func (s BorderStyle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BorderStyle) GoString() string { + return s.String() +} + +// SetShow sets the Show field's value. +func (s *BorderStyle) SetShow(v bool) *BorderStyle { + s.Show = &v + return s +} + +// A calculated column for a dataset. +type CalculatedColumn struct { + _ struct{} `type:"structure"` + + // A unique ID to identify a calculated column. During a dataset update, if + // the column ID of a calculated column matches that of an existing calculated + // column, Amazon QuickSight preserves the existing calculated column. + // + // ColumnId is a required field + ColumnId *string `min:"1" type:"string" required:"true"` + + // Column name. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // An expression that defines the calculated column. + // + // Expression is a required field + Expression *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CalculatedColumn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CalculatedColumn) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CalculatedColumn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CalculatedColumn"} + if s.ColumnId == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnId")) + } + if s.ColumnId != nil && len(*s.ColumnId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnId", 1)) + } + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.Expression != nil && len(*s.Expression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Expression", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumnId sets the ColumnId field's value. +func (s *CalculatedColumn) SetColumnId(v string) *CalculatedColumn { + s.ColumnId = &v + return s +} + +// SetColumnName sets the ColumnName field's value. +func (s *CalculatedColumn) SetColumnName(v string) *CalculatedColumn { + s.ColumnName = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *CalculatedColumn) SetExpression(v string) *CalculatedColumn { + s.Expression = &v + return s +} + +type CancelIngestionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // An ID for the ingestion. + // + // IngestionId is a required field + IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelIngestionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelIngestionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelIngestionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelIngestionInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.IngestionId == nil { + invalidParams.Add(request.NewErrParamRequired("IngestionId")) + } + if s.IngestionId != nil && len(*s.IngestionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IngestionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CancelIngestionInput) SetAwsAccountId(v string) *CancelIngestionInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *CancelIngestionInput) SetDataSetId(v string) *CancelIngestionInput { + s.DataSetId = &v + return s +} + +// SetIngestionId sets the IngestionId field's value. +func (s *CancelIngestionInput) SetIngestionId(v string) *CancelIngestionInput { + s.IngestionId = &v + return s +} + +type CancelIngestionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the data ingestion. + Arn *string `type:"string"` + + // An ID for the ingestion. + IngestionId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CancelIngestionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelIngestionOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CancelIngestionOutput) SetArn(v string) *CancelIngestionOutput { + s.Arn = &v + return s +} + +// SetIngestionId sets the IngestionId field's value. +func (s *CancelIngestionOutput) SetIngestionId(v string) *CancelIngestionOutput { + s.IngestionId = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CancelIngestionOutput) SetRequestId(v string) *CancelIngestionOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CancelIngestionOutput) SetStatus(v int64) *CancelIngestionOutput { + s.Status = &v + return s +} + +// A transform operation that casts a column to a different type. +type CastColumnTypeOperation struct { + _ struct{} `type:"structure"` + + // Column name. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // When casting a column from string to datetime type, you can supply a string + // in a format supported by Amazon QuickSight to denote the source data format. + Format *string `type:"string"` + + // New column data type. + // + // NewColumnType is a required field + NewColumnType *string `type:"string" required:"true" enum:"ColumnDataType"` +} + +// String returns the string representation +func (s CastColumnTypeOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CastColumnTypeOperation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CastColumnTypeOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CastColumnTypeOperation"} + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.NewColumnType == nil { + invalidParams.Add(request.NewErrParamRequired("NewColumnType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumnName sets the ColumnName field's value. +func (s *CastColumnTypeOperation) SetColumnName(v string) *CastColumnTypeOperation { + s.ColumnName = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *CastColumnTypeOperation) SetFormat(v string) *CastColumnTypeOperation { + s.Format = &v + return s +} + +// SetNewColumnType sets the NewColumnType field's value. +func (s *CastColumnTypeOperation) SetNewColumnType(v string) *CastColumnTypeOperation { + s.NewColumnType = &v + return s +} + +// Groupings of columns that work together in certain Amazon QuickSight features. +// This is a variant type structure. For this structure to be valid, only one +// of the attributes can be non-null. +type ColumnGroup struct { + _ struct{} `type:"structure"` + + // Geospatial column group that denotes a hierarchy. + GeoSpatialColumnGroup *GeoSpatialColumnGroup `type:"structure"` +} + +// String returns the string representation +func (s ColumnGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnGroup) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ColumnGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ColumnGroup"} + if s.GeoSpatialColumnGroup != nil { + if err := s.GeoSpatialColumnGroup.Validate(); err != nil { + invalidParams.AddNested("GeoSpatialColumnGroup", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGeoSpatialColumnGroup sets the GeoSpatialColumnGroup field's value. +func (s *ColumnGroup) SetGeoSpatialColumnGroup(v *GeoSpatialColumnGroup) *ColumnGroup { + s.GeoSpatialColumnGroup = v + return s +} + +// A structure describing the name, data type, and geographic role of the columns. +type ColumnGroupColumnSchema struct { + _ struct{} `type:"structure"` + + // The name of the column group's column schema. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ColumnGroupColumnSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnGroupColumnSchema) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *ColumnGroupColumnSchema) SetName(v string) *ColumnGroupColumnSchema { + s.Name = &v + return s +} + +// The column group schema. +type ColumnGroupSchema struct { + _ struct{} `type:"structure"` + + // A structure containing the list of schemas for column group columns. + ColumnGroupColumnSchemaList []*ColumnGroupColumnSchema `type:"list"` + + // The name of the column group schema. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ColumnGroupSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnGroupSchema) GoString() string { + return s.String() +} + +// SetColumnGroupColumnSchemaList sets the ColumnGroupColumnSchemaList field's value. +func (s *ColumnGroupSchema) SetColumnGroupColumnSchemaList(v []*ColumnGroupColumnSchema) *ColumnGroupSchema { + s.ColumnGroupColumnSchemaList = v + return s +} + +// SetName sets the Name field's value. +func (s *ColumnGroupSchema) SetName(v string) *ColumnGroupSchema { + s.Name = &v + return s +} + +// The column schema. +type ColumnSchema struct { + _ struct{} `type:"structure"` + + // The data type of the column schema. + DataType *string `type:"string"` + + // The geographic role of the column schema. + GeographicRole *string `type:"string"` + + // The name of the column schema. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ColumnSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnSchema) GoString() string { + return s.String() +} + +// SetDataType sets the DataType field's value. +func (s *ColumnSchema) SetDataType(v string) *ColumnSchema { + s.DataType = &v + return s +} + +// SetGeographicRole sets the GeographicRole field's value. +func (s *ColumnSchema) SetGeographicRole(v string) *ColumnSchema { + s.GeographicRole = &v + return s +} + +// SetName sets the Name field's value. +func (s *ColumnSchema) SetName(v string) *ColumnSchema { + s.Name = &v + return s +} + +// A tag for a column in a TagColumnOperation structure. This is a variant type +// structure. For this structure to be valid, only one of the attributes can +// be non-null. +type ColumnTag struct { + _ struct{} `type:"structure"` + + // A geospatial role for a column. + ColumnGeographicRole *string `type:"string" enum:"GeoSpatialDataRole"` +} + +// String returns the string representation +func (s ColumnTag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ColumnTag) GoString() string { + return s.String() +} + +// SetColumnGeographicRole sets the ColumnGeographicRole field's value. +func (s *ColumnTag) SetColumnGeographicRole(v string) *ColumnTag { + s.ColumnGeographicRole = &v + return s +} + +// A resource is already in a state that indicates an operation is happening +// that must complete before a new update can be applied. +type ConcurrentUpdatingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + RequestId *string `type:"string"` +} + +// String returns the string representation +func (s ConcurrentUpdatingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConcurrentUpdatingException) GoString() string { + return s.String() +} + +func newErrorConcurrentUpdatingException(v protocol.ResponseMetadata) error { + return &ConcurrentUpdatingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConcurrentUpdatingException) Code() string { + return "ConcurrentUpdatingException" +} + +// Message returns the exception's message. +func (s *ConcurrentUpdatingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConcurrentUpdatingException) OrigErr() error { + return nil +} + +func (s *ConcurrentUpdatingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ConcurrentUpdatingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConcurrentUpdatingException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Updating or deleting a resource can cause an inconsistent state. +type ConflictException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The AWS request ID for this request. + RequestId *string `type:"string"` +} + +// String returns the string representation +func (s ConflictException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConflictException) GoString() string { + return s.String() +} + +func newErrorConflictException(v protocol.ResponseMetadata) error { + return &ConflictException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ConflictException) Code() string { + return "ConflictException" +} + +// Message returns the exception's message. +func (s *ConflictException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ConflictException) OrigErr() error { + return nil +} + +func (s *ConflictException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateAccountCustomizationInput struct { + _ struct{} `type:"structure"` + + // The QuickSight customizations you're adding in the current AWS Region. You + // can add these to an AWS account and a QuickSight namespace. + // + // For example, you can add a default theme by setting AccountCustomization + // to the midnight theme: "AccountCustomization": { "DefaultTheme": "arn:aws:quicksight::aws:theme/MIDNIGHT" + // }. Or, you can add a custom theme by specifying "AccountCustomization": { + // "DefaultTheme": "arn:aws:quicksight:us-west-2:111122223333:theme/bdb844d0-0fe9-4d9d-b520-0fe602d93639" + // }. + // + // AccountCustomization is a required field + AccountCustomization *AccountCustomization `type:"structure" required:"true"` + + // The ID for the AWS account that you want to customize QuickSight for. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The QuickSight namespace that you want to add customizations to. + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` + + // A list of the tags that you want to attach to this resource. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateAccountCustomizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountCustomizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccountCustomizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAccountCustomizationInput"} + if s.AccountCustomization == nil { + invalidParams.Add(request.NewErrParamRequired("AccountCustomization")) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountCustomization sets the AccountCustomization field's value. +func (s *CreateAccountCustomizationInput) SetAccountCustomization(v *AccountCustomization) *CreateAccountCustomizationInput { + s.AccountCustomization = v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateAccountCustomizationInput) SetAwsAccountId(v string) *CreateAccountCustomizationInput { + s.AwsAccountId = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CreateAccountCustomizationInput) SetNamespace(v string) *CreateAccountCustomizationInput { + s.Namespace = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateAccountCustomizationInput) SetTags(v []*Tag) *CreateAccountCustomizationInput { + s.Tags = v + return s +} + +type CreateAccountCustomizationOutput struct { + _ struct{} `type:"structure"` + + // The QuickSight customizations you're adding in the current AWS Region. + AccountCustomization *AccountCustomization `type:"structure"` + + // The Amazon Resource Name (ARN) for the customization that you created for + // this AWS account. + Arn *string `type:"string"` + + // The ID for the AWS account that you want to customize QuickSight for. + AwsAccountId *string `min:"12" type:"string"` + + // The namespace associated with the customization you're creating. + Namespace *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateAccountCustomizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountCustomizationOutput) GoString() string { + return s.String() +} + +// SetAccountCustomization sets the AccountCustomization field's value. +func (s *CreateAccountCustomizationOutput) SetAccountCustomization(v *AccountCustomization) *CreateAccountCustomizationOutput { + s.AccountCustomization = v + return s +} + +// SetArn sets the Arn field's value. +func (s *CreateAccountCustomizationOutput) SetArn(v string) *CreateAccountCustomizationOutput { + s.Arn = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateAccountCustomizationOutput) SetAwsAccountId(v string) *CreateAccountCustomizationOutput { + s.AwsAccountId = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CreateAccountCustomizationOutput) SetNamespace(v string) *CreateAccountCustomizationOutput { + s.Namespace = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateAccountCustomizationOutput) SetRequestId(v string) *CreateAccountCustomizationOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateAccountCustomizationOutput) SetStatus(v int64) *CreateAccountCustomizationOutput { + s.Status = &v + return s +} + +type CreateAnalysisInput struct { + _ struct{} `type:"structure"` + + // The ID for the analysis that you're creating. This ID displays in the URL + // of the analysis. + // + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` + + // The ID of the AWS account where you are creating an analysis. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // A descriptive name for the analysis that you're creating. This name displays + // for the analysis in the QuickSight console. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The parameter names and override values that you want to use. An analysis + // can have any parameter type, and some parameters might accept multiple values. + Parameters *Parameters `type:"structure"` + + // A structure that describes the principals and the resource-level permissions + // on an analysis. You can use the Permissions structure to grant permissions + // by providing a list of AWS Identity and Access Management (IAM) action information + // for each principal listed by Amazon Resource Name (ARN). + // + // To specify no permissions, omit Permissions. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // A source entity to use for the analysis that you're creating. This metadata + // structure contains details that describe a source template and one or more + // datasets. + // + // SourceEntity is a required field + SourceEntity *AnalysisSourceEntity `type:"structure" required:"true"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the analysis. + Tags []*Tag `min:"1" type:"list"` + + // The ARN for the theme to apply to the analysis that you're creating. To see + // the theme in the QuickSight console, make sure that you have access to it. + ThemeArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateAnalysisInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAnalysisInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAnalysisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAnalysisInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) + } + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) + } + if s.SourceEntity == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEntity")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalysisId sets the AnalysisId field's value. +func (s *CreateAnalysisInput) SetAnalysisId(v string) *CreateAnalysisInput { + s.AnalysisId = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateAnalysisInput) SetAwsAccountId(v string) *CreateAnalysisInput { + s.AwsAccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAnalysisInput) SetName(v string) *CreateAnalysisInput { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateAnalysisInput) SetParameters(v *Parameters) *CreateAnalysisInput { + s.Parameters = v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *CreateAnalysisInput) SetPermissions(v []*ResourcePermission) *CreateAnalysisInput { + s.Permissions = v + return s +} + +// SetSourceEntity sets the SourceEntity field's value. +func (s *CreateAnalysisInput) SetSourceEntity(v *AnalysisSourceEntity) *CreateAnalysisInput { + s.SourceEntity = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateAnalysisInput) SetTags(v []*Tag) *CreateAnalysisInput { + s.Tags = v + return s +} + +// SetThemeArn sets the ThemeArn field's value. +func (s *CreateAnalysisInput) SetThemeArn(v string) *CreateAnalysisInput { + s.ThemeArn = &v + return s +} + +type CreateAnalysisOutput struct { + _ struct{} `type:"structure"` + + // The ID of the analysis. + AnalysisId *string `min:"1" type:"string"` + + // The ARN for the analysis. + Arn *string `type:"string"` + + // The status of the creation of the analysis. + CreationStatus *string `type:"string" enum:"ResourceStatus"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateAnalysisOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAnalysisOutput) GoString() string { + return s.String() +} + +// SetAnalysisId sets the AnalysisId field's value. +func (s *CreateAnalysisOutput) SetAnalysisId(v string) *CreateAnalysisOutput { + s.AnalysisId = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *CreateAnalysisOutput) SetArn(v string) *CreateAnalysisOutput { + s.Arn = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *CreateAnalysisOutput) SetCreationStatus(v string) *CreateAnalysisOutput { + s.CreationStatus = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateAnalysisOutput) SetRequestId(v string) *CreateAnalysisOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateAnalysisOutput) SetStatus(v int64) *CreateAnalysisOutput { + s.Status = &v + return s +} + +// A transform operation that creates calculated columns. Columns created in +// one such operation form a lexical closure. +type CreateColumnsOperation struct { + _ struct{} `type:"structure"` + + // Calculated columns to create. + // + // Columns is a required field + Columns []*CalculatedColumn `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateColumnsOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateColumnsOperation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateColumnsOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateColumnsOperation"} + if s.Columns == nil { + invalidParams.Add(request.NewErrParamRequired("Columns")) + } + if s.Columns != nil && len(s.Columns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Columns", 1)) + } + if s.Columns != nil { + for i, v := range s.Columns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumns sets the Columns field's value. +func (s *CreateColumnsOperation) SetColumns(v []*CalculatedColumn) *CreateColumnsOperation { + s.Columns = v + return s +} + +type CreateDashboardInput struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account where you want to create the dashboard. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard, also added to the IAM policy. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // Options for publishing the dashboard when you create it: + // + // * AvailabilityStatus for AdHocFilteringOption - This status can be either + // ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables + // the left filter pane on the published dashboard, which can be used for + // ad hoc (one-time) filtering. This option is ENABLED by default. + // + // * AvailabilityStatus for ExportToCSVOption - This status can be either + // ENABLED or DISABLED. The visual option to export data to .CSV format isn't + // enabled when this is set to DISABLED. This option is ENABLED by default. + // + // * VisibilityState for SheetControlsOption - This visibility state can + // be either COLLAPSED or EXPANDED. This option is COLLAPSED by default. + DashboardPublishOptions *DashboardPublishOptions `type:"structure"` + + // The display name of the dashboard. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The parameters for the creation of the dashboard, which you want to use to + // override the default settings. A dashboard can have any type of parameters, + // and some parameters might accept multiple values. + Parameters *Parameters `type:"structure"` + + // A structure that contains the permissions of the dashboard. You can use this + // structure for granting permissions by providing a list of IAM action information + // for each principal ARN. + // + // To specify no permissions, omit the permissions list. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // The entity that you are using as a source when you create the dashboard. + // In SourceEntity, you specify the type of object you're using as source. You + // can only create a dashboard from a template, so you use a SourceTemplate + // entity. If you need to create a dashboard from an analysis, first convert + // the analysis to a template by using the CreateTemplate API operation. For + // SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. + // The SourceTemplateARN can contain any AWS Account and any QuickSight-supported + // AWS Region. + // + // Use the DataSetReferences entity within SourceTemplate to list the replacement + // datasets for the placeholders listed in the original. The schema in each + // dataset must match its placeholder. + // + // SourceEntity is a required field + SourceEntity *DashboardSourceEntity `type:"structure" required:"true"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the dashboard. + Tags []*Tag `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. + // If you add a value for this field, it overrides the value that is used in + // the source entity. The theme ARN must exist in the same AWS account where + // you create the dashboard. + ThemeArn *string `type:"string"` + + // A description for the first version of the dashboard being created. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateDashboardInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDashboardInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDashboardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDashboardInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) + } + if s.SourceEntity == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEntity")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateDashboardInput) SetAwsAccountId(v string) *CreateDashboardInput { + s.AwsAccountId = &v + return s +} + +// SetDashboardId sets the DashboardId field's value. +func (s *CreateDashboardInput) SetDashboardId(v string) *CreateDashboardInput { + s.DashboardId = &v + return s +} + +// SetDashboardPublishOptions sets the DashboardPublishOptions field's value. +func (s *CreateDashboardInput) SetDashboardPublishOptions(v *DashboardPublishOptions) *CreateDashboardInput { + s.DashboardPublishOptions = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDashboardInput) SetName(v string) *CreateDashboardInput { + s.Name = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *CreateDashboardInput) SetParameters(v *Parameters) *CreateDashboardInput { + s.Parameters = v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *CreateDashboardInput) SetPermissions(v []*ResourcePermission) *CreateDashboardInput { + s.Permissions = v + return s +} + +// SetSourceEntity sets the SourceEntity field's value. +func (s *CreateDashboardInput) SetSourceEntity(v *DashboardSourceEntity) *CreateDashboardInput { + s.SourceEntity = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDashboardInput) SetTags(v []*Tag) *CreateDashboardInput { + s.Tags = v + return s +} + +// SetThemeArn sets the ThemeArn field's value. +func (s *CreateDashboardInput) SetThemeArn(v string) *CreateDashboardInput { + s.ThemeArn = &v + return s +} + +// SetVersionDescription sets the VersionDescription field's value. +func (s *CreateDashboardInput) SetVersionDescription(v string) *CreateDashboardInput { + s.VersionDescription = &v + return s +} + +type CreateDashboardOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the dashboard. + Arn *string `type:"string"` + + // The status of the dashboard creation request. + CreationStatus *string `type:"string" enum:"ResourceStatus"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ARN of the dashboard, including the version number of the first version + // that is created. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateDashboardOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDashboardOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateDashboardOutput) SetArn(v string) *CreateDashboardOutput { + s.Arn = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *CreateDashboardOutput) SetCreationStatus(v string) *CreateDashboardOutput { + s.CreationStatus = &v + return s +} + +// SetDashboardId sets the DashboardId field's value. +func (s *CreateDashboardOutput) SetDashboardId(v string) *CreateDashboardOutput { + s.DashboardId = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateDashboardOutput) SetRequestId(v string) *CreateDashboardOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateDashboardOutput) SetStatus(v int64) *CreateDashboardOutput { + s.Status = &v + return s +} + +// SetVersionArn sets the VersionArn field's value. +func (s *CreateDashboardOutput) SetVersionArn(v string) *CreateDashboardOutput { + s.VersionArn = &v + return s +} + +type CreateDataSetInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // Groupings of columns that work together in certain QuickSight features. Currently, + // only geospatial hierarchy is supported. + ColumnGroups []*ColumnGroup `min:"1" type:"list"` + + // An ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. + // + // DataSetId is a required field + DataSetId *string `type:"string" required:"true"` + + // Indicates whether you want to import the data into SPICE. + // + // ImportMode is a required field + ImportMode *string `type:"string" required:"true" enum:"DataSetImportMode"` + + // Configures the combination and transformation of the data from the physical + // tables. + LogicalTableMap map[string]*LogicalTable `min:"1" type:"map"` + + // The display name for the dataset. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of resource permissions on the dataset. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // Declares the physical tables that are available in the underlying data sources. + // + // PhysicalTableMap is a required field + PhysicalTableMap map[string]*PhysicalTable `min:"1" type:"map" required:"true"` + + // The row-level security configuration for the data that you want to create. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the dataset. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateDataSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataSetInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.ColumnGroups != nil && len(s.ColumnGroups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnGroups", 1)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.ImportMode == nil { + invalidParams.Add(request.NewErrParamRequired("ImportMode")) + } + if s.LogicalTableMap != nil && len(s.LogicalTableMap) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogicalTableMap", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) + } + if s.PhysicalTableMap == nil { + invalidParams.Add(request.NewErrParamRequired("PhysicalTableMap")) + } + if s.PhysicalTableMap != nil && len(s.PhysicalTableMap) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PhysicalTableMap", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.ColumnGroups != nil { + for i, v := range s.ColumnGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnGroups", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LogicalTableMap != nil { + for i, v := range s.LogicalTableMap { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogicalTableMap", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.PhysicalTableMap != nil { + for i, v := range s.PhysicalTableMap { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PhysicalTableMap", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RowLevelPermissionDataSet != nil { + if err := s.RowLevelPermissionDataSet.Validate(); err != nil { + invalidParams.AddNested("RowLevelPermissionDataSet", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateDataSetInput) SetAwsAccountId(v string) *CreateDataSetInput { + s.AwsAccountId = &v + return s +} + +// SetColumnGroups sets the ColumnGroups field's value. +func (s *CreateDataSetInput) SetColumnGroups(v []*ColumnGroup) *CreateDataSetInput { + s.ColumnGroups = v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *CreateDataSetInput) SetDataSetId(v string) *CreateDataSetInput { + s.DataSetId = &v + return s +} + +// SetImportMode sets the ImportMode field's value. +func (s *CreateDataSetInput) SetImportMode(v string) *CreateDataSetInput { + s.ImportMode = &v + return s +} + +// SetLogicalTableMap sets the LogicalTableMap field's value. +func (s *CreateDataSetInput) SetLogicalTableMap(v map[string]*LogicalTable) *CreateDataSetInput { + s.LogicalTableMap = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDataSetInput) SetName(v string) *CreateDataSetInput { + s.Name = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *CreateDataSetInput) SetPermissions(v []*ResourcePermission) *CreateDataSetInput { + s.Permissions = v + return s +} + +// SetPhysicalTableMap sets the PhysicalTableMap field's value. +func (s *CreateDataSetInput) SetPhysicalTableMap(v map[string]*PhysicalTable) *CreateDataSetInput { + s.PhysicalTableMap = v + return s +} + +// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. +func (s *CreateDataSetInput) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *CreateDataSetInput { + s.RowLevelPermissionDataSet = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDataSetInput) SetTags(v []*Tag) *CreateDataSetInput { + s.Tags = v + return s +} + +type CreateDataSetOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset. + Arn *string `type:"string"` + + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. + DataSetId *string `type:"string"` + + // The ARN for the ingestion, which is triggered as a result of dataset creation + // if the import mode is SPICE. + IngestionArn *string `type:"string"` + + // The ID of the ingestion, which is triggered as a result of dataset creation + // if the import mode is SPICE. + IngestionId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSetOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateDataSetOutput) SetArn(v string) *CreateDataSetOutput { + s.Arn = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *CreateDataSetOutput) SetDataSetId(v string) *CreateDataSetOutput { + s.DataSetId = &v + return s +} + +// SetIngestionArn sets the IngestionArn field's value. +func (s *CreateDataSetOutput) SetIngestionArn(v string) *CreateDataSetOutput { + s.IngestionArn = &v + return s +} + +// SetIngestionId sets the IngestionId field's value. +func (s *CreateDataSetOutput) SetIngestionId(v string) *CreateDataSetOutput { + s.IngestionId = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateDataSetOutput) SetRequestId(v string) *CreateDataSetOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateDataSetOutput) SetStatus(v int64) *CreateDataSetOutput { + s.Status = &v + return s +} + +type CreateDataSourceInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The credentials QuickSight that uses to connect to your underlying source. + // Currently, only credentials based on user name and password are supported. + Credentials *DataSourceCredentials `type:"structure" sensitive:"true"` + + // An ID for the data source. This ID is unique per AWS Region for each AWS + // account. + // + // DataSourceId is a required field + DataSourceId *string `type:"string" required:"true"` + + // The parameters that QuickSight uses to connect to your underlying source. + DataSourceParameters *DataSourceParameters `type:"structure"` + + // A display name for the data source. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of resource permissions on the data source. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // Secure Socket Layer (SSL) properties that apply when QuickSight connects + // to your underlying source. + SslProperties *SslProperties `type:"structure"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the data source. + Tags []*Tag `min:"1" type:"list"` + + // The type of the data source. Currently, the supported types for this operation + // are: ATHENA, AURORA, AURORA_POSTGRESQL, MARIADB, MYSQL, POSTGRESQL, PRESTO, + // REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA. Use ListDataSources + // to return a list of all data sources. + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"DataSourceType"` + + // Use this parameter only when you want QuickSight to use a VPC connection + // when connecting to your underlying source. + VpcConnectionProperties *VpcConnectionProperties `type:"structure"` +} + +// String returns the string representation +func (s CreateDataSourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Credentials != nil { + if err := s.Credentials.Validate(); err != nil { + invalidParams.AddNested("Credentials", err.(request.ErrInvalidParams)) + } + } + if s.DataSourceParameters != nil { + if err := s.DataSourceParameters.Validate(); err != nil { + invalidParams.AddNested("DataSourceParameters", err.(request.ErrInvalidParams)) + } + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + if s.VpcConnectionProperties != nil { + if err := s.VpcConnectionProperties.Validate(); err != nil { + invalidParams.AddNested("VpcConnectionProperties", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateDataSourceInput) SetAwsAccountId(v string) *CreateDataSourceInput { + s.AwsAccountId = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *CreateDataSourceInput) SetCredentials(v *DataSourceCredentials) *CreateDataSourceInput { + s.Credentials = v + return s +} + +// SetDataSourceId sets the DataSourceId field's value. +func (s *CreateDataSourceInput) SetDataSourceId(v string) *CreateDataSourceInput { + s.DataSourceId = &v + return s +} + +// SetDataSourceParameters sets the DataSourceParameters field's value. +func (s *CreateDataSourceInput) SetDataSourceParameters(v *DataSourceParameters) *CreateDataSourceInput { + s.DataSourceParameters = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateDataSourceInput) SetName(v string) *CreateDataSourceInput { + s.Name = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *CreateDataSourceInput) SetPermissions(v []*ResourcePermission) *CreateDataSourceInput { + s.Permissions = v + return s +} + +// SetSslProperties sets the SslProperties field's value. +func (s *CreateDataSourceInput) SetSslProperties(v *SslProperties) *CreateDataSourceInput { + s.SslProperties = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateDataSourceInput) SetTags(v []*Tag) *CreateDataSourceInput { + s.Tags = v + return s +} + +// SetType sets the Type field's value. +func (s *CreateDataSourceInput) SetType(v string) *CreateDataSourceInput { + s.Type = &v + return s +} + +// SetVpcConnectionProperties sets the VpcConnectionProperties field's value. +func (s *CreateDataSourceInput) SetVpcConnectionProperties(v *VpcConnectionProperties) *CreateDataSourceInput { + s.VpcConnectionProperties = v + return s +} + +type CreateDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the data source. + Arn *string `type:"string"` + + // The status of creating the data source. + CreationStatus *string `type:"string" enum:"ResourceStatus"` + + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + DataSourceId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDataSourceOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateDataSourceOutput) SetArn(v string) *CreateDataSourceOutput { + s.Arn = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *CreateDataSourceOutput) SetCreationStatus(v string) *CreateDataSourceOutput { + s.CreationStatus = &v + return s +} + +// SetDataSourceId sets the DataSourceId field's value. +func (s *CreateDataSourceOutput) SetDataSourceId(v string) *CreateDataSourceOutput { + s.DataSourceId = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateDataSourceOutput) SetRequestId(v string) *CreateDataSourceOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateDataSourceOutput) SetStatus(v int64) *CreateDataSourceOutput { + s.Status = &v + return s +} + +// The request object for this operation. +type CreateGroupInput struct { + _ struct{} `type:"structure"` + + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // A description for the group that you want to create. + Description *string `min:"1" type:"string"` + + // A name for the group that you want to create. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGroupInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateGroupInput) SetAwsAccountId(v string) *CreateGroupInput { + s.AwsAccountId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateGroupInput) SetDescription(v string) *CreateGroupInput { + s.Description = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *CreateGroupInput) SetGroupName(v string) *CreateGroupInput { + s.GroupName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CreateGroupInput) SetNamespace(v string) *CreateGroupInput { + s.Namespace = &v + return s +} + +type CreateGroupMembershipInput struct { + _ struct{} `type:"structure"` + + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The name of the group that you want to add the user to. + // + // GroupName is a required field + GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + + // The name of the user that you want to add to the group membership. + // + // MemberName is a required field + MemberName *string `location:"uri" locationName:"MemberName" min:"1" type:"string" required:"true"` + + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateGroupMembershipInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupMembershipInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGroupMembershipInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGroupMembershipInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.MemberName == nil { + invalidParams.Add(request.NewErrParamRequired("MemberName")) + } + if s.MemberName != nil && len(*s.MemberName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MemberName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateGroupMembershipInput) SetAwsAccountId(v string) *CreateGroupMembershipInput { + s.AwsAccountId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *CreateGroupMembershipInput) SetGroupName(v string) *CreateGroupMembershipInput { + s.GroupName = &v + return s +} + +// SetMemberName sets the MemberName field's value. +func (s *CreateGroupMembershipInput) SetMemberName(v string) *CreateGroupMembershipInput { + s.MemberName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CreateGroupMembershipInput) SetNamespace(v string) *CreateGroupMembershipInput { + s.Namespace = &v + return s +} + +type CreateGroupMembershipOutput struct { + _ struct{} `type:"structure"` + + // The group member. + GroupMember *GroupMember `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateGroupMembershipOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupMembershipOutput) GoString() string { + return s.String() +} + +// SetGroupMember sets the GroupMember field's value. +func (s *CreateGroupMembershipOutput) SetGroupMember(v *GroupMember) *CreateGroupMembershipOutput { + s.GroupMember = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateGroupMembershipOutput) SetRequestId(v string) *CreateGroupMembershipOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateGroupMembershipOutput) SetStatus(v int64) *CreateGroupMembershipOutput { + s.Status = &v + return s +} + +// The response object for this operation. +type CreateGroupOutput struct { + _ struct{} `type:"structure"` + + // The name of the group. + Group *Group `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupOutput) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *CreateGroupOutput) SetGroup(v *Group) *CreateGroupOutput { + s.Group = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateGroupOutput) SetRequestId(v string) *CreateGroupOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateGroupOutput) SetStatus(v int64) *CreateGroupOutput { + s.Status = &v + return s +} + +type CreateIAMPolicyAssignmentInput struct { + _ struct{} `type:"structure"` + + // The name of the assignment. It must be unique within an AWS account. + // + // AssignmentName is a required field + AssignmentName *string `min:"1" type:"string" required:"true"` + + // The status of the assignment. Possible values are as follows: + // + // * ENABLED - Anything specified in this assignment is used when creating + // the data source. + // + // * DISABLED - This assignment isn't used when creating the data source. + // + // * DRAFT - This assignment is an unfinished draft and isn't used when creating + // the data source. + // + // AssignmentStatus is a required field + AssignmentStatus *string `type:"string" required:"true" enum:"AssignmentStatus"` + + // The ID of the AWS account where you want to assign an IAM policy to QuickSight + // users or groups. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The QuickSight users, groups, or both that you want to assign the policy + // to. + Identities map[string][]*string `type:"map"` + + // The namespace that contains the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The ARN for the IAM policy to apply to the QuickSight users and groups specified + // in this assignment. + PolicyArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateIAMPolicyAssignmentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIAMPolicyAssignmentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIAMPolicyAssignmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIAMPolicyAssignmentInput"} + if s.AssignmentName == nil { + invalidParams.Add(request.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) + } + if s.AssignmentStatus == nil { + invalidParams.Add(request.NewErrParamRequired("AssignmentStatus")) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssignmentName sets the AssignmentName field's value. +func (s *CreateIAMPolicyAssignmentInput) SetAssignmentName(v string) *CreateIAMPolicyAssignmentInput { + s.AssignmentName = &v + return s +} + +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *CreateIAMPolicyAssignmentInput) SetAssignmentStatus(v string) *CreateIAMPolicyAssignmentInput { + s.AssignmentStatus = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateIAMPolicyAssignmentInput) SetAwsAccountId(v string) *CreateIAMPolicyAssignmentInput { + s.AwsAccountId = &v + return s +} + +// SetIdentities sets the Identities field's value. +func (s *CreateIAMPolicyAssignmentInput) SetIdentities(v map[string][]*string) *CreateIAMPolicyAssignmentInput { + s.Identities = v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CreateIAMPolicyAssignmentInput) SetNamespace(v string) *CreateIAMPolicyAssignmentInput { + s.Namespace = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *CreateIAMPolicyAssignmentInput) SetPolicyArn(v string) *CreateIAMPolicyAssignmentInput { + s.PolicyArn = &v + return s +} + +type CreateIAMPolicyAssignmentOutput struct { + _ struct{} `type:"structure"` + + // The ID for the assignment. + AssignmentId *string `type:"string"` + + // The name of the assignment. This name must be unique within the AWS account. + AssignmentName *string `min:"1" type:"string"` + + // The status of the assignment. Possible values are as follows: + // + // * ENABLED - Anything specified in this assignment is used when creating + // the data source. + // + // * DISABLED - This assignment isn't used when creating the data source. + // + // * DRAFT - This assignment is an unfinished draft and isn't used when creating + // the data source. + AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` + + // The QuickSight users, groups, or both that the IAM policy is assigned to. + Identities map[string][]*string `type:"map"` + + // The ARN for the IAM policy that is applied to the QuickSight users and groups + // specified in this assignment. + PolicyArn *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateIAMPolicyAssignmentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIAMPolicyAssignmentOutput) GoString() string { + return s.String() +} + +// SetAssignmentId sets the AssignmentId field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetAssignmentId(v string) *CreateIAMPolicyAssignmentOutput { + s.AssignmentId = &v + return s +} + +// SetAssignmentName sets the AssignmentName field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetAssignmentName(v string) *CreateIAMPolicyAssignmentOutput { + s.AssignmentName = &v + return s +} + +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetAssignmentStatus(v string) *CreateIAMPolicyAssignmentOutput { + s.AssignmentStatus = &v + return s +} + +// SetIdentities sets the Identities field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetIdentities(v map[string][]*string) *CreateIAMPolicyAssignmentOutput { + s.Identities = v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetPolicyArn(v string) *CreateIAMPolicyAssignmentOutput { + s.PolicyArn = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetRequestId(v string) *CreateIAMPolicyAssignmentOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateIAMPolicyAssignmentOutput) SetStatus(v int64) *CreateIAMPolicyAssignmentOutput { + s.Status = &v + return s +} + +type CreateIngestionInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // An ID for the ingestion. + // + // IngestionId is a required field + IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateIngestionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIngestionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateIngestionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateIngestionInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.IngestionId == nil { + invalidParams.Add(request.NewErrParamRequired("IngestionId")) + } + if s.IngestionId != nil && len(*s.IngestionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IngestionId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateIngestionInput) SetAwsAccountId(v string) *CreateIngestionInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *CreateIngestionInput) SetDataSetId(v string) *CreateIngestionInput { + s.DataSetId = &v + return s +} + +// SetIngestionId sets the IngestionId field's value. +func (s *CreateIngestionInput) SetIngestionId(v string) *CreateIngestionInput { + s.IngestionId = &v + return s +} + +type CreateIngestionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the data ingestion. + Arn *string `type:"string"` + + // An ID for the ingestion. + IngestionId *string `min:"1" type:"string"` + + // The ingestion status. + IngestionStatus *string `type:"string" enum:"IngestionStatus"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateIngestionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateIngestionOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateIngestionOutput) SetArn(v string) *CreateIngestionOutput { + s.Arn = &v + return s +} + +// SetIngestionId sets the IngestionId field's value. +func (s *CreateIngestionOutput) SetIngestionId(v string) *CreateIngestionOutput { + s.IngestionId = &v + return s +} + +// SetIngestionStatus sets the IngestionStatus field's value. +func (s *CreateIngestionOutput) SetIngestionStatus(v string) *CreateIngestionOutput { + s.IngestionStatus = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateIngestionOutput) SetRequestId(v string) *CreateIngestionOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateIngestionOutput) SetStatus(v int64) *CreateIngestionOutput { + s.Status = &v + return s +} + +type CreateNamespaceInput struct { + _ struct{} `type:"structure"` + + // The ID for the AWS account that you want to create the QuickSight namespace + // in. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // Specifies the type of your user identity directory. Currently, this supports + // users with an identity type of QUICKSIGHT. + // + // IdentityStore is a required field + IdentityStore *string `type:"string" required:"true" enum:"IdentityStore"` + + // The name that you want to use to describe the new namespace. + // + // Namespace is a required field + Namespace *string `type:"string" required:"true"` + + // The tags that you want to associate with the namespace that you're creating. + Tags []*Tag `min:"1" type:"list"` +} + +// String returns the string representation +func (s CreateNamespaceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNamespaceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateNamespaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateNamespaceInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.IdentityStore == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityStore")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateNamespaceInput) SetAwsAccountId(v string) *CreateNamespaceInput { + s.AwsAccountId = &v + return s +} + +// SetIdentityStore sets the IdentityStore field's value. +func (s *CreateNamespaceInput) SetIdentityStore(v string) *CreateNamespaceInput { + s.IdentityStore = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *CreateNamespaceInput) SetNamespace(v string) *CreateNamespaceInput { + s.Namespace = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateNamespaceInput) SetTags(v []*Tag) *CreateNamespaceInput { + s.Tags = v + return s +} + +type CreateNamespaceOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the QuickSight namespace you created. + Arn *string `type:"string"` + + // The AWS Region that you want to use for the free SPICE capacity for the new + // namespace. This is set to the region that you run CreateNamespace in. + CapacityRegion *string `type:"string"` + + // The status of the creation of the namespace. This is an asynchronous process. + // A status of CREATED means that your namespace is ready to use. If an error + // occurs, it indicates if the process is retryable or non-retryable. In the + // case of a non-retryable error, refer to the error message for follow-up tasks. + CreationStatus *string `type:"string" enum:"NamespaceStatus"` + + // Specifies the type of your user identity directory. Currently, this supports + // users with an identity type of QUICKSIGHT. + IdentityStore *string `type:"string" enum:"IdentityStore"` + + // The name of the new namespace that you created. + Name *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s CreateNamespaceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateNamespaceOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateNamespaceOutput) SetArn(v string) *CreateNamespaceOutput { + s.Arn = &v + return s +} + +// SetCapacityRegion sets the CapacityRegion field's value. +func (s *CreateNamespaceOutput) SetCapacityRegion(v string) *CreateNamespaceOutput { + s.CapacityRegion = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *CreateNamespaceOutput) SetCreationStatus(v string) *CreateNamespaceOutput { + s.CreationStatus = &v + return s +} + +// SetIdentityStore sets the IdentityStore field's value. +func (s *CreateNamespaceOutput) SetIdentityStore(v string) *CreateNamespaceOutput { + s.IdentityStore = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateNamespaceOutput) SetName(v string) *CreateNamespaceOutput { + s.Name = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateNamespaceOutput) SetRequestId(v string) *CreateNamespaceOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateNamespaceOutput) SetStatus(v int64) *CreateNamespaceOutput { + s.Status = &v + return s +} + +type CreateTemplateAliasInput struct { + _ struct{} `type:"structure"` + + // The name that you want to give to the template alias that you're creating. + // Don't start the alias name with the $ character. Alias names that start with + // $ are reserved by QuickSight. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the template that you creating an + // alias for. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // The version number of the template. + // + // TemplateVersionNumber is a required field + TemplateVersionNumber *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateTemplateAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTemplateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTemplateAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + } + if s.TemplateVersionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateVersionNumber")) + } + if s.TemplateVersionNumber != nil && *s.TemplateVersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("TemplateVersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasName sets the AliasName field's value. +func (s *CreateTemplateAliasInput) SetAliasName(v string) *CreateTemplateAliasInput { + s.AliasName = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateTemplateAliasInput) SetAwsAccountId(v string) *CreateTemplateAliasInput { + s.AwsAccountId = &v + return s +} + +// SetTemplateId sets the TemplateId field's value. +func (s *CreateTemplateAliasInput) SetTemplateId(v string) *CreateTemplateAliasInput { + s.TemplateId = &v + return s +} + +// SetTemplateVersionNumber sets the TemplateVersionNumber field's value. +func (s *CreateTemplateAliasInput) SetTemplateVersionNumber(v int64) *CreateTemplateAliasInput { + s.TemplateVersionNumber = &v + return s +} + +type CreateTemplateAliasOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Information about the template alias. + TemplateAlias *TemplateAlias `type:"structure"` +} + +// String returns the string representation +func (s CreateTemplateAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateAliasOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateTemplateAliasOutput) SetRequestId(v string) *CreateTemplateAliasOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateTemplateAliasOutput) SetStatus(v int64) *CreateTemplateAliasOutput { + s.Status = &v + return s +} + +// SetTemplateAlias sets the TemplateAlias field's value. +func (s *CreateTemplateAliasOutput) SetTemplateAlias(v *TemplateAlias) *CreateTemplateAliasOutput { + s.TemplateAlias = v + return s +} + +type CreateTemplateInput struct { + _ struct{} `type:"structure"` + + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // A display name for the template. + Name *string `min:"1" type:"string"` + + // A list of resource permissions to be set on the template. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // The entity that you are using as a source when you create the template. In + // SourceEntity, you specify the type of object you're using as source: SourceTemplate + // for a template or SourceAnalysis for an analysis. Both of these require an + // Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source + // template. For SourceAnalysis, specify the ARN of the source analysis. The + // SourceTemplate ARN can contain any AWS Account and any QuickSight-supported + // AWS Region. + // + // Use the DataSetReferences entity within SourceTemplate or SourceAnalysis + // to list the replacement datasets for the placeholders listed in the original. + // The schema in each dataset must match its placeholder. + // + // SourceEntity is a required field + SourceEntity *TemplateSourceEntity `type:"structure" required:"true"` + + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the resource. + Tags []*Tag `min:"1" type:"list"` + + // An ID for the template that you want to create. This template is unique per + // AWS Region in each AWS account. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // A description of the current template version being created. This API operation + // creates the first version of the template. Every time UpdateTemplate is called, + // a new version is created. Each version of the template maintains a description + // of the version in the VersionDescription field. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateTemplateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTemplateInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) + } + if s.SourceEntity == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEntity")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateTemplateInput) SetAwsAccountId(v string) *CreateTemplateInput { + s.AwsAccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateTemplateInput) SetName(v string) *CreateTemplateInput { + s.Name = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *CreateTemplateInput) SetPermissions(v []*ResourcePermission) *CreateTemplateInput { + s.Permissions = v + return s +} + +// SetSourceEntity sets the SourceEntity field's value. +func (s *CreateTemplateInput) SetSourceEntity(v *TemplateSourceEntity) *CreateTemplateInput { + s.SourceEntity = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTemplateInput) SetTags(v []*Tag) *CreateTemplateInput { + s.Tags = v + return s +} + +// SetTemplateId sets the TemplateId field's value. +func (s *CreateTemplateInput) SetTemplateId(v string) *CreateTemplateInput { + s.TemplateId = &v + return s +} + +// SetVersionDescription sets the VersionDescription field's value. +func (s *CreateTemplateInput) SetVersionDescription(v string) *CreateTemplateInput { + s.VersionDescription = &v + return s +} + +type CreateTemplateOutput struct { + _ struct{} `type:"structure"` + + // The ARN for the template. + Arn *string `type:"string"` + + // The template creation status. + CreationStatus *string `type:"string" enum:"ResourceStatus"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ID of the template. + TemplateId *string `min:"1" type:"string"` + + // The ARN for the template, including the version information of the first + // version. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTemplateOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateTemplateOutput) SetArn(v string) *CreateTemplateOutput { + s.Arn = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *CreateTemplateOutput) SetCreationStatus(v string) *CreateTemplateOutput { + s.CreationStatus = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateTemplateOutput) SetRequestId(v string) *CreateTemplateOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateTemplateOutput) SetStatus(v int64) *CreateTemplateOutput { + s.Status = &v + return s +} + +// SetTemplateId sets the TemplateId field's value. +func (s *CreateTemplateOutput) SetTemplateId(v string) *CreateTemplateOutput { + s.TemplateId = &v + return s +} + +// SetVersionArn sets the VersionArn field's value. +func (s *CreateTemplateOutput) SetVersionArn(v string) *CreateTemplateOutput { + s.VersionArn = &v + return s +} + +type CreateThemeAliasInput struct { + _ struct{} `type:"structure"` + + // The name that you want to give to the theme alias that you are creating. + // The alias name can't begin with a $. Alias names that start with $ are reserved + // by Amazon QuickSight. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the theme for the new theme alias. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // An ID for the theme alias. + // + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` + + // The version number of the theme. + // + // ThemeVersionNumber is a required field + ThemeVersionNumber *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s CreateThemeAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThemeAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateThemeAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateThemeAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) + } + if s.ThemeVersionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeVersionNumber")) + } + if s.ThemeVersionNumber != nil && *s.ThemeVersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("ThemeVersionNumber", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasName sets the AliasName field's value. +func (s *CreateThemeAliasInput) SetAliasName(v string) *CreateThemeAliasInput { + s.AliasName = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateThemeAliasInput) SetAwsAccountId(v string) *CreateThemeAliasInput { + s.AwsAccountId = &v + return s +} + +// SetThemeId sets the ThemeId field's value. +func (s *CreateThemeAliasInput) SetThemeId(v string) *CreateThemeAliasInput { + s.ThemeId = &v + return s +} + +// SetThemeVersionNumber sets the ThemeVersionNumber field's value. +func (s *CreateThemeAliasInput) SetThemeVersionNumber(v int64) *CreateThemeAliasInput { + s.ThemeVersionNumber = &v + return s +} + +type CreateThemeAliasOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Information about the theme alias. + ThemeAlias *ThemeAlias `type:"structure"` +} + +// String returns the string representation +func (s CreateThemeAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThemeAliasOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateThemeAliasOutput) SetRequestId(v string) *CreateThemeAliasOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateThemeAliasOutput) SetStatus(v int64) *CreateThemeAliasOutput { + s.Status = &v + return s +} + +// SetThemeAlias sets the ThemeAlias field's value. +func (s *CreateThemeAliasOutput) SetThemeAlias(v *ThemeAlias) *CreateThemeAliasOutput { + s.ThemeAlias = v + return s +} + +type CreateThemeInput struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account where you want to store the new theme. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID of the theme that a custom theme will inherit from. All themes inherit + // from one of the starting themes defined by Amazon QuickSight. For a list + // of the starting themes, use ListThemes or choose Themes from within a QuickSight + // analysis. + // + // BaseThemeId is a required field + BaseThemeId *string `min:"1" type:"string" required:"true"` + + // The theme configuration, which contains the theme display properties. + // + // Configuration is a required field + Configuration *ThemeConfiguration `type:"structure" required:"true"` + + // A display name for the theme. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A valid grouping of resource permissions to apply to the new theme. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // A map of the key-value pairs for the resource tag or tags that you want to + // add to the resource. + Tags []*Tag `min:"1" type:"list"` + + // An ID for the theme that you want to create. The theme ID is unique per AWS + // Region in each AWS account. + // + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` + + // A description of the first version of the theme that you're creating. Every + // time UpdateTheme is called, a new version is created. Each version of the + // theme has a description of the version in the VersionDescription field. + VersionDescription *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateThemeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThemeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateThemeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateThemeInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.BaseThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("BaseThemeId")) + } + if s.BaseThemeId != nil && len(*s.BaseThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BaseThemeId", 1)) + } + if s.Configuration == nil { + invalidParams.Add(request.NewErrParamRequired("Configuration")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Permissions != nil && len(s.Permissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Permissions != nil { + for i, v := range s.Permissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *CreateThemeInput) SetAwsAccountId(v string) *CreateThemeInput { + s.AwsAccountId = &v + return s +} + +// SetBaseThemeId sets the BaseThemeId field's value. +func (s *CreateThemeInput) SetBaseThemeId(v string) *CreateThemeInput { + s.BaseThemeId = &v + return s +} + +// SetConfiguration sets the Configuration field's value. +func (s *CreateThemeInput) SetConfiguration(v *ThemeConfiguration) *CreateThemeInput { + s.Configuration = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateThemeInput) SetName(v string) *CreateThemeInput { + s.Name = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *CreateThemeInput) SetPermissions(v []*ResourcePermission) *CreateThemeInput { + s.Permissions = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateThemeInput) SetTags(v []*Tag) *CreateThemeInput { + s.Tags = v + return s +} + +// SetThemeId sets the ThemeId field's value. +func (s *CreateThemeInput) SetThemeId(v string) *CreateThemeInput { + s.ThemeId = &v + return s +} + +// SetVersionDescription sets the VersionDescription field's value. +func (s *CreateThemeInput) SetVersionDescription(v string) *CreateThemeInput { + s.VersionDescription = &v + return s +} + +type CreateThemeOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the theme. + Arn *string `type:"string"` + + // The theme creation status. + CreationStatus *string `type:"string" enum:"ResourceStatus"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The ID of the theme. + ThemeId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) for the new theme. + VersionArn *string `type:"string"` +} + +// String returns the string representation +func (s CreateThemeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateThemeOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateThemeOutput) SetArn(v string) *CreateThemeOutput { + s.Arn = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *CreateThemeOutput) SetCreationStatus(v string) *CreateThemeOutput { + s.CreationStatus = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *CreateThemeOutput) SetRequestId(v string) *CreateThemeOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *CreateThemeOutput) SetStatus(v int64) *CreateThemeOutput { + s.Status = &v + return s +} + +// SetThemeId sets the ThemeId field's value. +func (s *CreateThemeOutput) SetThemeId(v string) *CreateThemeOutput { + s.ThemeId = &v + return s +} + +// SetVersionArn sets the VersionArn field's value. +func (s *CreateThemeOutput) SetVersionArn(v string) *CreateThemeOutput { + s.VersionArn = &v + return s +} + +// The combination of user name and password that are used as credentials. +type CredentialPair struct { + _ struct{} `type:"structure"` + + // A set of alternate data source parameters that you want to share for these + // credentials. The credentials are applied in tandem with the data source parameters + // when you copy a data source by using a create or update request. The API + // operation compares the DataSourceParameters structure that's in the request + // with the structures in the AlternateDataSourceParameters allowlist. If the + // structures are an exact match, the request is allowed to use the new data + // source with the existing credentials. If the AlternateDataSourceParameters + // list is null, the DataSourceParameters originally used with these Credentials + // is automatically allowed. + AlternateDataSourceParameters []*DataSourceParameters `min:"1" type:"list"` + + // Password. + // + // Password is a required field + Password *string `min:"1" type:"string" required:"true"` + + // User name. + // + // Username is a required field + Username *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CredentialPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CredentialPair) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CredentialPair) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CredentialPair"} + if s.AlternateDataSourceParameters != nil && len(s.AlternateDataSourceParameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AlternateDataSourceParameters", 1)) + } + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.Username == nil { + invalidParams.Add(request.NewErrParamRequired("Username")) + } + if s.Username != nil && len(*s.Username) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + } + if s.AlternateDataSourceParameters != nil { + for i, v := range s.AlternateDataSourceParameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AlternateDataSourceParameters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlternateDataSourceParameters sets the AlternateDataSourceParameters field's value. +func (s *CredentialPair) SetAlternateDataSourceParameters(v []*DataSourceParameters) *CredentialPair { + s.AlternateDataSourceParameters = v + return s +} + +// SetPassword sets the Password field's value. +func (s *CredentialPair) SetPassword(v string) *CredentialPair { + s.Password = &v + return s +} + +// SetUsername sets the Username field's value. +func (s *CredentialPair) SetUsername(v string) *CredentialPair { + s.Username = &v + return s +} + +// A physical table type built from the results of the custom SQL query. +type CustomSql struct { + _ struct{} `type:"structure"` + + // The column schema from the SQL query result set. + Columns []*InputColumn `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) of the data source. + // + // DataSourceArn is a required field + DataSourceArn *string `type:"string" required:"true"` + + // A display name for the SQL query result. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The SQL query. + // + // SqlQuery is a required field + SqlQuery *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CustomSql) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomSql) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CustomSql) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CustomSql"} + if s.Columns != nil && len(s.Columns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Columns", 1)) + } + if s.DataSourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceArn")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SqlQuery == nil { + invalidParams.Add(request.NewErrParamRequired("SqlQuery")) + } + if s.SqlQuery != nil && len(*s.SqlQuery) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SqlQuery", 1)) + } + if s.Columns != nil { + for i, v := range s.Columns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetColumns sets the Columns field's value. +func (s *CustomSql) SetColumns(v []*InputColumn) *CustomSql { + s.Columns = v + return s +} + +// SetDataSourceArn sets the DataSourceArn field's value. +func (s *CustomSql) SetDataSourceArn(v string) *CustomSql { + s.DataSourceArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CustomSql) SetName(v string) *CustomSql { + s.Name = &v + return s +} + +// SetSqlQuery sets the SqlQuery field's value. +func (s *CustomSql) SetSqlQuery(v string) *CustomSql { + s.SqlQuery = &v + return s +} + +// Dashboard. +type Dashboard struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` + + // The time that this dataset was created. + CreatedTime *time.Time `type:"timestamp"` + + // Dashboard ID. + DashboardId *string `min:"1" type:"string"` + + // The last time that this dataset was published. + LastPublishedTime *time.Time `type:"timestamp"` + + // The last time that this dataset was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the dashboard. + Name *string `min:"1" type:"string"` + + // Version. + Version *DashboardVersion `type:"structure"` +} + +// String returns the string representation +func (s Dashboard) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Dashboard) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Dashboard) SetArn(v string) *Dashboard { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *Dashboard) SetCreatedTime(v time.Time) *Dashboard { + s.CreatedTime = &v + return s +} + +// SetDashboardId sets the DashboardId field's value. +func (s *Dashboard) SetDashboardId(v string) *Dashboard { + s.DashboardId = &v + return s +} + +// SetLastPublishedTime sets the LastPublishedTime field's value. +func (s *Dashboard) SetLastPublishedTime(v time.Time) *Dashboard { + s.LastPublishedTime = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Dashboard) SetLastUpdatedTime(v time.Time) *Dashboard { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *Dashboard) SetName(v string) *Dashboard { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *Dashboard) SetVersion(v *DashboardVersion) *Dashboard { + s.Version = v + return s +} + +// Dashboard error. +type DashboardError struct { + _ struct{} `type:"structure"` + + // Message. + Message *string `type:"string"` + + // Type. + Type *string `type:"string" enum:"DashboardErrorType"` +} + +// String returns the string representation +func (s DashboardError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardError) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DashboardError) SetMessage(v string) *DashboardError { + s.Message = &v + return s +} + +// SetType sets the Type field's value. +func (s *DashboardError) SetType(v string) *DashboardError { + s.Type = &v + return s +} + +// Dashboard publish options. +type DashboardPublishOptions struct { + _ struct{} `type:"structure"` + + // Ad hoc (one-time) filtering option. + AdHocFilteringOption *AdHocFilteringOption `type:"structure"` + + // Export to .csv option. + ExportToCSVOption *ExportToCSVOption `type:"structure"` + + // Sheet controls option. + SheetControlsOption *SheetControlsOption `type:"structure"` +} + +// String returns the string representation +func (s DashboardPublishOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardPublishOptions) GoString() string { + return s.String() +} + +// SetAdHocFilteringOption sets the AdHocFilteringOption field's value. +func (s *DashboardPublishOptions) SetAdHocFilteringOption(v *AdHocFilteringOption) *DashboardPublishOptions { + s.AdHocFilteringOption = v + return s +} + +// SetExportToCSVOption sets the ExportToCSVOption field's value. +func (s *DashboardPublishOptions) SetExportToCSVOption(v *ExportToCSVOption) *DashboardPublishOptions { + s.ExportToCSVOption = v + return s +} + +// SetSheetControlsOption sets the SheetControlsOption field's value. +func (s *DashboardPublishOptions) SetSheetControlsOption(v *SheetControlsOption) *DashboardPublishOptions { + s.SheetControlsOption = v + return s +} + +// A filter that you apply when searching for dashboards. +type DashboardSearchFilter struct { + _ struct{} `type:"structure"` + + // The name of the value that you want to use as a filter, for example, "Name": + // "QUICKSIGHT_USER". + Name *string `type:"string" enum:"DashboardFilterAttribute"` + + // The comparison operator that you want to use as a filter, for example, "Operator": + // "StringEquals". + // + // Operator is a required field + Operator *string `type:"string" required:"true" enum:"FilterOperator"` + + // The value of the named item, in this case QUICKSIGHT_USER, that you want + // to use as a filter, for example, "Value": "arn:aws:quicksight:us-east-1:1:user/default/UserName1". + Value *string `type:"string"` +} + +// String returns the string representation +func (s DashboardSearchFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardSearchFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DashboardSearchFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DashboardSearchFilter"} + if s.Operator == nil { + invalidParams.Add(request.NewErrParamRequired("Operator")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DashboardSearchFilter) SetName(v string) *DashboardSearchFilter { + s.Name = &v + return s +} + +// SetOperator sets the Operator field's value. +func (s *DashboardSearchFilter) SetOperator(v string) *DashboardSearchFilter { + s.Operator = &v + return s +} + +// SetValue sets the Value field's value. +func (s *DashboardSearchFilter) SetValue(v string) *DashboardSearchFilter { + s.Value = &v + return s +} + +// Dashboard source entity. +type DashboardSourceEntity struct { + _ struct{} `type:"structure"` + + // Source template. + SourceTemplate *DashboardSourceTemplate `type:"structure"` +} + +// String returns the string representation +func (s DashboardSourceEntity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardSourceEntity) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DashboardSourceEntity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DashboardSourceEntity"} + if s.SourceTemplate != nil { + if err := s.SourceTemplate.Validate(); err != nil { + invalidParams.AddNested("SourceTemplate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSourceTemplate sets the SourceTemplate field's value. +func (s *DashboardSourceEntity) SetSourceTemplate(v *DashboardSourceTemplate) *DashboardSourceEntity { + s.SourceTemplate = v + return s +} + +// Dashboard source template. +type DashboardSourceTemplate struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // Dataset references. + // + // DataSetReferences is a required field + DataSetReferences []*DataSetReference `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s DashboardSourceTemplate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardSourceTemplate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DashboardSourceTemplate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DashboardSourceTemplate"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.DataSetReferences == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetReferences")) + } + if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetReferences", 1)) + } + if s.DataSetReferences != nil { + for i, v := range s.DataSetReferences { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *DashboardSourceTemplate) SetArn(v string) *DashboardSourceTemplate { + s.Arn = &v + return s +} + +// SetDataSetReferences sets the DataSetReferences field's value. +func (s *DashboardSourceTemplate) SetDataSetReferences(v []*DataSetReference) *DashboardSourceTemplate { + s.DataSetReferences = v + return s +} + +// Dashboard summary. +type DashboardSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` + + // The time that this dashboard was created. + CreatedTime *time.Time `type:"timestamp"` + + // Dashboard ID. + DashboardId *string `min:"1" type:"string"` + + // The last time that this dashboard was published. + LastPublishedTime *time.Time `type:"timestamp"` + + // The last time that this dashboard was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the dashboard. + Name *string `min:"1" type:"string"` + + // Published version number. + PublishedVersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DashboardSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardSummary) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DashboardSummary) SetArn(v string) *DashboardSummary { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DashboardSummary) SetCreatedTime(v time.Time) *DashboardSummary { + s.CreatedTime = &v + return s +} + +// SetDashboardId sets the DashboardId field's value. +func (s *DashboardSummary) SetDashboardId(v string) *DashboardSummary { + s.DashboardId = &v + return s +} + +// SetLastPublishedTime sets the LastPublishedTime field's value. +func (s *DashboardSummary) SetLastPublishedTime(v time.Time) *DashboardSummary { + s.LastPublishedTime = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *DashboardSummary) SetLastUpdatedTime(v time.Time) *DashboardSummary { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DashboardSummary) SetName(v string) *DashboardSummary { + s.Name = &v + return s +} + +// SetPublishedVersionNumber sets the PublishedVersionNumber field's value. +func (s *DashboardSummary) SetPublishedVersionNumber(v int64) *DashboardSummary { + s.PublishedVersionNumber = &v + return s +} + +// Dashboard version. +type DashboardVersion struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` + + // The time that this dashboard version was created. + CreatedTime *time.Time `type:"timestamp"` + + // The Amazon Resource Numbers (ARNs) for the datasets that are associated with + // this version of the dashboard. + DataSetArns []*string `type:"list"` + + // Description. + Description *string `min:"1" type:"string"` + + // Errors associated with this dashboard version. + Errors []*DashboardError `min:"1" type:"list"` + + // A list of the associated sheets with the unique identifier and name of each + // sheet. + Sheets []*Sheet `type:"list"` + + // Source entity ARN. + SourceEntityArn *string `type:"string"` + + // The HTTP status of the request. + Status *string `type:"string" enum:"ResourceStatus"` + + // The ARN of the theme associated with a version of the dashboard. + ThemeArn *string `type:"string"` + + // Version number for this version of the dashboard. + VersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DashboardVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardVersion) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DashboardVersion) SetArn(v string) *DashboardVersion { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DashboardVersion) SetCreatedTime(v time.Time) *DashboardVersion { + s.CreatedTime = &v + return s +} + +// SetDataSetArns sets the DataSetArns field's value. +func (s *DashboardVersion) SetDataSetArns(v []*string) *DashboardVersion { + s.DataSetArns = v + return s +} + +// SetDescription sets the Description field's value. +func (s *DashboardVersion) SetDescription(v string) *DashboardVersion { + s.Description = &v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DashboardVersion) SetErrors(v []*DashboardError) *DashboardVersion { + s.Errors = v + return s +} + +// SetSheets sets the Sheets field's value. +func (s *DashboardVersion) SetSheets(v []*Sheet) *DashboardVersion { + s.Sheets = v + return s +} + +// SetSourceEntityArn sets the SourceEntityArn field's value. +func (s *DashboardVersion) SetSourceEntityArn(v string) *DashboardVersion { + s.SourceEntityArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DashboardVersion) SetStatus(v string) *DashboardVersion { + s.Status = &v + return s +} + +// SetThemeArn sets the ThemeArn field's value. +func (s *DashboardVersion) SetThemeArn(v string) *DashboardVersion { + s.ThemeArn = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DashboardVersion) SetVersionNumber(v int64) *DashboardVersion { + s.VersionNumber = &v + return s +} + +// Dashboard version summary. +type DashboardVersionSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` + + // The time that this dashboard version was created. + CreatedTime *time.Time `type:"timestamp"` + + // Description. + Description *string `min:"1" type:"string"` + + // Source entity ARN. + SourceEntityArn *string `type:"string"` + + // The HTTP status of the request. + Status *string `type:"string" enum:"ResourceStatus"` + + // Version number. + VersionNumber *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DashboardVersionSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DashboardVersionSummary) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DashboardVersionSummary) SetArn(v string) *DashboardVersionSummary { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DashboardVersionSummary) SetCreatedTime(v time.Time) *DashboardVersionSummary { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *DashboardVersionSummary) SetDescription(v string) *DashboardVersionSummary { + s.Description = &v + return s +} + +// SetSourceEntityArn sets the SourceEntityArn field's value. +func (s *DashboardVersionSummary) SetSourceEntityArn(v string) *DashboardVersionSummary { + s.SourceEntityArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DashboardVersionSummary) SetStatus(v string) *DashboardVersionSummary { + s.Status = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DashboardVersionSummary) SetVersionNumber(v int64) *DashboardVersionSummary { + s.VersionNumber = &v + return s +} + +// The theme colors that are used for data colors in charts. The colors description +// is a hexidecimal color code that consists of six alphanumerical characters, +// prefixed with #, for example #37BFF5. +type DataColorPalette struct { + _ struct{} `type:"structure"` + + // The hexadecimal codes for the colors. + Colors []*string `type:"list"` + + // The hexadecimal code of a color that applies to charts where a lack of data + // is highlighted. + EmptyFillColor *string `type:"string"` + + // The minimum and maximum hexadecimal codes that describe a color gradient. + MinMaxGradient []*string `type:"list"` +} + +// String returns the string representation +func (s DataColorPalette) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataColorPalette) GoString() string { + return s.String() +} + +// SetColors sets the Colors field's value. +func (s *DataColorPalette) SetColors(v []*string) *DataColorPalette { + s.Colors = v + return s +} + +// SetEmptyFillColor sets the EmptyFillColor field's value. +func (s *DataColorPalette) SetEmptyFillColor(v string) *DataColorPalette { + s.EmptyFillColor = &v + return s +} + +// SetMinMaxGradient sets the MinMaxGradient field's value. +func (s *DataColorPalette) SetMinMaxGradient(v []*string) *DataColorPalette { + s.MinMaxGradient = v + return s +} + +// Dataset. +type DataSet struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` + + // Groupings of columns that work together in certain Amazon QuickSight features. + // Currently, only geospatial hierarchy is supported. + ColumnGroups []*ColumnGroup `min:"1" type:"list"` + + // The amount of SPICE capacity used by this dataset. This is 0 if the dataset + // isn't imported into SPICE. + ConsumedSpiceCapacityInBytes *int64 `type:"long"` + + // The time that this dataset was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ID of the dataset. + DataSetId *string `type:"string"` + + // Indicates whether you want to import the data into SPICE. + ImportMode *string `type:"string" enum:"DataSetImportMode"` + + // The last time that this dataset was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // Configures the combination and transformation of the data from the physical + // tables. + LogicalTableMap map[string]*LogicalTable `min:"1" type:"map"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // The list of columns after all transforms. These columns are available in + // templates, analyses, and dashboards. + OutputColumns []*OutputColumn `type:"list"` + + // Declares the physical tables that are available in the underlying data sources. + PhysicalTableMap map[string]*PhysicalTable `min:"1" type:"map"` + + // The row-level security configuration for the dataset. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` +} + +// String returns the string representation +func (s DataSet) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSet) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DataSet) SetArn(v string) *DataSet { + s.Arn = &v + return s +} + +// SetColumnGroups sets the ColumnGroups field's value. +func (s *DataSet) SetColumnGroups(v []*ColumnGroup) *DataSet { + s.ColumnGroups = v + return s +} + +// SetConsumedSpiceCapacityInBytes sets the ConsumedSpiceCapacityInBytes field's value. +func (s *DataSet) SetConsumedSpiceCapacityInBytes(v int64) *DataSet { + s.ConsumedSpiceCapacityInBytes = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DataSet) SetCreatedTime(v time.Time) *DataSet { + s.CreatedTime = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *DataSet) SetDataSetId(v string) *DataSet { + s.DataSetId = &v + return s +} + +// SetImportMode sets the ImportMode field's value. +func (s *DataSet) SetImportMode(v string) *DataSet { + s.ImportMode = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *DataSet) SetLastUpdatedTime(v time.Time) *DataSet { + s.LastUpdatedTime = &v + return s +} + +// SetLogicalTableMap sets the LogicalTableMap field's value. +func (s *DataSet) SetLogicalTableMap(v map[string]*LogicalTable) *DataSet { + s.LogicalTableMap = v + return s +} + +// SetName sets the Name field's value. +func (s *DataSet) SetName(v string) *DataSet { + s.Name = &v + return s +} + +// SetOutputColumns sets the OutputColumns field's value. +func (s *DataSet) SetOutputColumns(v []*OutputColumn) *DataSet { + s.OutputColumns = v + return s +} + +// SetPhysicalTableMap sets the PhysicalTableMap field's value. +func (s *DataSet) SetPhysicalTableMap(v map[string]*PhysicalTable) *DataSet { + s.PhysicalTableMap = v + return s +} + +// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. +func (s *DataSet) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *DataSet { + s.RowLevelPermissionDataSet = v + return s +} + +// Dataset configuration. +type DataSetConfiguration struct { + _ struct{} `type:"structure"` + + // A structure containing the list of column group schemas. + ColumnGroupSchemaList []*ColumnGroupSchema `type:"list"` + + // Dataset schema. + DataSetSchema *DataSetSchema `type:"structure"` + + // Placeholder. + Placeholder *string `type:"string"` +} + +// String returns the string representation +func (s DataSetConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSetConfiguration) GoString() string { + return s.String() +} + +// SetColumnGroupSchemaList sets the ColumnGroupSchemaList field's value. +func (s *DataSetConfiguration) SetColumnGroupSchemaList(v []*ColumnGroupSchema) *DataSetConfiguration { + s.ColumnGroupSchemaList = v + return s +} + +// SetDataSetSchema sets the DataSetSchema field's value. +func (s *DataSetConfiguration) SetDataSetSchema(v *DataSetSchema) *DataSetConfiguration { + s.DataSetSchema = v + return s +} + +// SetPlaceholder sets the Placeholder field's value. +func (s *DataSetConfiguration) SetPlaceholder(v string) *DataSetConfiguration { + s.Placeholder = &v + return s +} + +// Dataset reference. +type DataSetReference struct { + _ struct{} `type:"structure"` + + // Dataset Amazon Resource Name (ARN). + // + // DataSetArn is a required field + DataSetArn *string `type:"string" required:"true"` + + // Dataset placeholder. + // + // DataSetPlaceholder is a required field + DataSetPlaceholder *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DataSetReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSetReference) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSetReference) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSetReference"} + if s.DataSetArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetArn")) + } + if s.DataSetPlaceholder == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetPlaceholder")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataSetArn sets the DataSetArn field's value. +func (s *DataSetReference) SetDataSetArn(v string) *DataSetReference { + s.DataSetArn = &v + return s +} + +// SetDataSetPlaceholder sets the DataSetPlaceholder field's value. +func (s *DataSetReference) SetDataSetPlaceholder(v string) *DataSetReference { + s.DataSetPlaceholder = &v + return s +} + +// Dataset schema. +type DataSetSchema struct { + _ struct{} `type:"structure"` + + // A structure containing the list of column schemas. + ColumnSchemaList []*ColumnSchema `type:"list"` +} + +// String returns the string representation +func (s DataSetSchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSetSchema) GoString() string { + return s.String() +} + +// SetColumnSchemaList sets the ColumnSchemaList field's value. +func (s *DataSetSchema) SetColumnSchemaList(v []*ColumnSchema) *DataSetSchema { + s.ColumnSchemaList = v + return s +} + +// Dataset summary. +type DataSetSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dataset. + Arn *string `type:"string"` + + // The time that this dataset was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ID of the dataset. + DataSetId *string `type:"string"` + + // Indicates whether you want to import the data into SPICE. + ImportMode *string `type:"string" enum:"DataSetImportMode"` + + // The last time that this dataset was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the dataset. + Name *string `min:"1" type:"string"` + + // The row-level security configuration for the dataset. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` +} + +// String returns the string representation +func (s DataSetSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSetSummary) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DataSetSummary) SetArn(v string) *DataSetSummary { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DataSetSummary) SetCreatedTime(v time.Time) *DataSetSummary { + s.CreatedTime = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *DataSetSummary) SetDataSetId(v string) *DataSetSummary { + s.DataSetId = &v + return s +} + +// SetImportMode sets the ImportMode field's value. +func (s *DataSetSummary) SetImportMode(v string) *DataSetSummary { + s.ImportMode = &v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *DataSetSummary) SetLastUpdatedTime(v time.Time) *DataSetSummary { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DataSetSummary) SetName(v string) *DataSetSummary { + s.Name = &v + return s +} + +// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. +func (s *DataSetSummary) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *DataSetSummary { + s.RowLevelPermissionDataSet = v + return s +} + +// The structure of a data source. +type DataSource struct { + _ struct{} `type:"structure"` + + // A set of alternate data source parameters that you want to share for the + // credentials stored with this data source. The credentials are applied in + // tandem with the data source parameters when you copy a data source by using + // a create or update request. The API operation compares the DataSourceParameters + // structure that's in the request with the structures in the AlternateDataSourceParameters + // allowlist. If the structures are an exact match, the request is allowed to + // use the credentials from this existing data source. If the AlternateDataSourceParameters + // list is null, the Credentials originally used with this DataSourceParameters + // are automatically allowed. + AlternateDataSourceParameters []*DataSourceParameters `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) of the data source. + Arn *string `type:"string"` + + // The time that this data source was created. + CreatedTime *time.Time `type:"timestamp"` + + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + DataSourceId *string `type:"string"` + + // The parameters that Amazon QuickSight uses to connect to your underlying + // source. This is a variant type structure. For this structure to be valid, + // only one of the attributes can be non-null. + DataSourceParameters *DataSourceParameters `type:"structure"` + + // Error information from the last update or the creation of the data source. + ErrorInfo *DataSourceErrorInfo `type:"structure"` + + // The last time that this data source was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A display name for the data source. + Name *string `min:"1" type:"string"` + + // Secure Socket Layer (SSL) properties that apply when QuickSight connects + // to your underlying source. + SslProperties *SslProperties `type:"structure"` + + // The HTTP status of the request. + Status *string `type:"string" enum:"ResourceStatus"` + + // The type of the data source. This type indicates which database engine the + // data source connects to. + Type *string `type:"string" enum:"DataSourceType"` + + // The VPC connection information. You need to use this parameter only when + // you want QuickSight to use a VPC connection when connecting to your underlying + // source. + VpcConnectionProperties *VpcConnectionProperties `type:"structure"` +} + +// String returns the string representation +func (s DataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSource) GoString() string { + return s.String() +} + +// SetAlternateDataSourceParameters sets the AlternateDataSourceParameters field's value. +func (s *DataSource) SetAlternateDataSourceParameters(v []*DataSourceParameters) *DataSource { + s.AlternateDataSourceParameters = v + return s +} + +// SetArn sets the Arn field's value. +func (s *DataSource) SetArn(v string) *DataSource { + s.Arn = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *DataSource) SetCreatedTime(v time.Time) *DataSource { + s.CreatedTime = &v + return s +} + +// SetDataSourceId sets the DataSourceId field's value. +func (s *DataSource) SetDataSourceId(v string) *DataSource { + s.DataSourceId = &v + return s +} + +// SetDataSourceParameters sets the DataSourceParameters field's value. +func (s *DataSource) SetDataSourceParameters(v *DataSourceParameters) *DataSource { + s.DataSourceParameters = v + return s +} + +// SetErrorInfo sets the ErrorInfo field's value. +func (s *DataSource) SetErrorInfo(v *DataSourceErrorInfo) *DataSource { + s.ErrorInfo = v + return s +} + +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *DataSource) SetLastUpdatedTime(v time.Time) *DataSource { + s.LastUpdatedTime = &v + return s +} + +// SetName sets the Name field's value. +func (s *DataSource) SetName(v string) *DataSource { + s.Name = &v + return s +} + +// SetSslProperties sets the SslProperties field's value. +func (s *DataSource) SetSslProperties(v *SslProperties) *DataSource { + s.SslProperties = v + return s +} + +// SetStatus sets the Status field's value. +func (s *DataSource) SetStatus(v string) *DataSource { + s.Status = &v + return s +} + +// SetType sets the Type field's value. +func (s *DataSource) SetType(v string) *DataSource { + s.Type = &v + return s +} + +// SetVpcConnectionProperties sets the VpcConnectionProperties field's value. +func (s *DataSource) SetVpcConnectionProperties(v *VpcConnectionProperties) *DataSource { + s.VpcConnectionProperties = v + return s +} + +// Data source credentials. This is a variant type structure. For this structure +// to be valid, only one of the attributes can be non-null. +type DataSourceCredentials struct { + _ struct{} `type:"structure" sensitive:"true"` + + // The Amazon Resource Name (ARN) of a data source that has the credential pair + // that you want to use. When CopySourceArn is not null, the credential pair + // from the data source in the ARN is used as the credentials for the DataSourceCredentials + // structure. + CopySourceArn *string `type:"string"` + + // Credential pair. For more information, see CredentialPair. + CredentialPair *CredentialPair `type:"structure"` +} + +// String returns the string representation +func (s DataSourceCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSourceCredentials) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSourceCredentials) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSourceCredentials"} + if s.CredentialPair != nil { + if err := s.CredentialPair.Validate(); err != nil { + invalidParams.AddNested("CredentialPair", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCopySourceArn sets the CopySourceArn field's value. +func (s *DataSourceCredentials) SetCopySourceArn(v string) *DataSourceCredentials { + s.CopySourceArn = &v + return s +} + +// SetCredentialPair sets the CredentialPair field's value. +func (s *DataSourceCredentials) SetCredentialPair(v *CredentialPair) *DataSourceCredentials { + s.CredentialPair = v + return s +} + +// Error information for the data source creation or update. +type DataSourceErrorInfo struct { + _ struct{} `type:"structure"` + + // Error message. + Message *string `type:"string"` + + // Error type. + Type *string `type:"string" enum:"DataSourceErrorInfoType"` +} + +// String returns the string representation +func (s DataSourceErrorInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSourceErrorInfo) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *DataSourceErrorInfo) SetMessage(v string) *DataSourceErrorInfo { + s.Message = &v + return s +} + +// SetType sets the Type field's value. +func (s *DataSourceErrorInfo) SetType(v string) *DataSourceErrorInfo { + s.Type = &v + return s +} + +// The parameters that Amazon QuickSight uses to connect to your underlying +// data source. This is a variant type structure. For this structure to be valid, +// only one of the attributes can be non-null. +type DataSourceParameters struct { + _ struct{} `type:"structure"` + + // Amazon Elasticsearch Service parameters. + AmazonElasticsearchParameters *AmazonElasticsearchParameters `type:"structure"` + + // Amazon Athena parameters. + AthenaParameters *AthenaParameters `type:"structure"` + + // Amazon Aurora MySQL parameters. + AuroraParameters *AuroraParameters `type:"structure"` + + // Aurora PostgreSQL parameters. + AuroraPostgreSqlParameters *AuroraPostgreSqlParameters `type:"structure"` + + // AWS IoT Analytics parameters. + AwsIotAnalyticsParameters *AwsIotAnalyticsParameters `type:"structure"` + + // Jira parameters. + JiraParameters *JiraParameters `type:"structure"` + + // MariaDB parameters. + MariaDbParameters *MariaDbParameters `type:"structure"` + + // MySQL parameters. + MySqlParameters *MySqlParameters `type:"structure"` + + // PostgreSQL parameters. + PostgreSqlParameters *PostgreSqlParameters `type:"structure"` + + // Presto parameters. + PrestoParameters *PrestoParameters `type:"structure"` + + // Amazon RDS parameters. + RdsParameters *RdsParameters `type:"structure"` + + // Amazon Redshift parameters. + RedshiftParameters *RedshiftParameters `type:"structure"` + + // S3 parameters. + S3Parameters *S3Parameters `type:"structure"` + + // ServiceNow parameters. + ServiceNowParameters *ServiceNowParameters `type:"structure"` + + // Snowflake parameters. + SnowflakeParameters *SnowflakeParameters `type:"structure"` + + // Spark parameters. + SparkParameters *SparkParameters `type:"structure"` + + // SQL Server parameters. + SqlServerParameters *SqlServerParameters `type:"structure"` + + // Teradata parameters. + TeradataParameters *TeradataParameters `type:"structure"` + + // Twitter parameters. + TwitterParameters *TwitterParameters `type:"structure"` +} + +// String returns the string representation +func (s DataSourceParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DataSourceParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DataSourceParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DataSourceParameters"} + if s.AmazonElasticsearchParameters != nil { + if err := s.AmazonElasticsearchParameters.Validate(); err != nil { + invalidParams.AddNested("AmazonElasticsearchParameters", err.(request.ErrInvalidParams)) + } + } + if s.AthenaParameters != nil { + if err := s.AthenaParameters.Validate(); err != nil { + invalidParams.AddNested("AthenaParameters", err.(request.ErrInvalidParams)) + } + } + if s.AuroraParameters != nil { + if err := s.AuroraParameters.Validate(); err != nil { + invalidParams.AddNested("AuroraParameters", err.(request.ErrInvalidParams)) + } + } + if s.AuroraPostgreSqlParameters != nil { + if err := s.AuroraPostgreSqlParameters.Validate(); err != nil { + invalidParams.AddNested("AuroraPostgreSqlParameters", err.(request.ErrInvalidParams)) + } + } + if s.AwsIotAnalyticsParameters != nil { + if err := s.AwsIotAnalyticsParameters.Validate(); err != nil { + invalidParams.AddNested("AwsIotAnalyticsParameters", err.(request.ErrInvalidParams)) + } + } + if s.JiraParameters != nil { + if err := s.JiraParameters.Validate(); err != nil { + invalidParams.AddNested("JiraParameters", err.(request.ErrInvalidParams)) + } + } + if s.MariaDbParameters != nil { + if err := s.MariaDbParameters.Validate(); err != nil { + invalidParams.AddNested("MariaDbParameters", err.(request.ErrInvalidParams)) + } + } + if s.MySqlParameters != nil { + if err := s.MySqlParameters.Validate(); err != nil { + invalidParams.AddNested("MySqlParameters", err.(request.ErrInvalidParams)) + } + } + if s.PostgreSqlParameters != nil { + if err := s.PostgreSqlParameters.Validate(); err != nil { + invalidParams.AddNested("PostgreSqlParameters", err.(request.ErrInvalidParams)) + } + } + if s.PrestoParameters != nil { + if err := s.PrestoParameters.Validate(); err != nil { + invalidParams.AddNested("PrestoParameters", err.(request.ErrInvalidParams)) + } + } + if s.RdsParameters != nil { + if err := s.RdsParameters.Validate(); err != nil { + invalidParams.AddNested("RdsParameters", err.(request.ErrInvalidParams)) + } + } + if s.RedshiftParameters != nil { + if err := s.RedshiftParameters.Validate(); err != nil { + invalidParams.AddNested("RedshiftParameters", err.(request.ErrInvalidParams)) + } + } + if s.S3Parameters != nil { + if err := s.S3Parameters.Validate(); err != nil { + invalidParams.AddNested("S3Parameters", err.(request.ErrInvalidParams)) + } + } + if s.ServiceNowParameters != nil { + if err := s.ServiceNowParameters.Validate(); err != nil { + invalidParams.AddNested("ServiceNowParameters", err.(request.ErrInvalidParams)) + } + } + if s.SnowflakeParameters != nil { + if err := s.SnowflakeParameters.Validate(); err != nil { + invalidParams.AddNested("SnowflakeParameters", err.(request.ErrInvalidParams)) + } + } + if s.SparkParameters != nil { + if err := s.SparkParameters.Validate(); err != nil { + invalidParams.AddNested("SparkParameters", err.(request.ErrInvalidParams)) + } + } + if s.SqlServerParameters != nil { + if err := s.SqlServerParameters.Validate(); err != nil { + invalidParams.AddNested("SqlServerParameters", err.(request.ErrInvalidParams)) + } + } + if s.TeradataParameters != nil { + if err := s.TeradataParameters.Validate(); err != nil { + invalidParams.AddNested("TeradataParameters", err.(request.ErrInvalidParams)) + } + } + if s.TwitterParameters != nil { + if err := s.TwitterParameters.Validate(); err != nil { + invalidParams.AddNested("TwitterParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +// SetAmazonElasticsearchParameters sets the AmazonElasticsearchParameters field's value. +func (s *DataSourceParameters) SetAmazonElasticsearchParameters(v *AmazonElasticsearchParameters) *DataSourceParameters { + s.AmazonElasticsearchParameters = v + return s } -// The active AWS Identity and Access Management (IAM) policy assignment. -type ActiveIAMPolicyAssignment struct { - _ struct{} `type:"structure"` +// SetAthenaParameters sets the AthenaParameters field's value. +func (s *DataSourceParameters) SetAthenaParameters(v *AthenaParameters) *DataSourceParameters { + s.AthenaParameters = v + return s +} - // A name for the IAM policy assignment. - AssignmentName *string `min:"1" type:"string"` +// SetAuroraParameters sets the AuroraParameters field's value. +func (s *DataSourceParameters) SetAuroraParameters(v *AuroraParameters) *DataSourceParameters { + s.AuroraParameters = v + return s +} - // The Amazon Resource Name (ARN) of the resource. - PolicyArn *string `type:"string"` +// SetAuroraPostgreSqlParameters sets the AuroraPostgreSqlParameters field's value. +func (s *DataSourceParameters) SetAuroraPostgreSqlParameters(v *AuroraPostgreSqlParameters) *DataSourceParameters { + s.AuroraPostgreSqlParameters = v + return s } -// String returns the string representation -func (s ActiveIAMPolicyAssignment) String() string { - return awsutil.Prettify(s) +// SetAwsIotAnalyticsParameters sets the AwsIotAnalyticsParameters field's value. +func (s *DataSourceParameters) SetAwsIotAnalyticsParameters(v *AwsIotAnalyticsParameters) *DataSourceParameters { + s.AwsIotAnalyticsParameters = v + return s } -// GoString returns the string representation -func (s ActiveIAMPolicyAssignment) GoString() string { - return s.String() +// SetJiraParameters sets the JiraParameters field's value. +func (s *DataSourceParameters) SetJiraParameters(v *JiraParameters) *DataSourceParameters { + s.JiraParameters = v + return s } -// SetAssignmentName sets the AssignmentName field's value. -func (s *ActiveIAMPolicyAssignment) SetAssignmentName(v string) *ActiveIAMPolicyAssignment { - s.AssignmentName = &v +// SetMariaDbParameters sets the MariaDbParameters field's value. +func (s *DataSourceParameters) SetMariaDbParameters(v *MariaDbParameters) *DataSourceParameters { + s.MariaDbParameters = v return s } -// SetPolicyArn sets the PolicyArn field's value. -func (s *ActiveIAMPolicyAssignment) SetPolicyArn(v string) *ActiveIAMPolicyAssignment { - s.PolicyArn = &v +// SetMySqlParameters sets the MySqlParameters field's value. +func (s *DataSourceParameters) SetMySqlParameters(v *MySqlParameters) *DataSourceParameters { + s.MySqlParameters = v return s } -// Ad hoc (one-time) filtering option. -type AdHocFilteringOption struct { +// SetPostgreSqlParameters sets the PostgreSqlParameters field's value. +func (s *DataSourceParameters) SetPostgreSqlParameters(v *PostgreSqlParameters) *DataSourceParameters { + s.PostgreSqlParameters = v + return s +} + +// SetPrestoParameters sets the PrestoParameters field's value. +func (s *DataSourceParameters) SetPrestoParameters(v *PrestoParameters) *DataSourceParameters { + s.PrestoParameters = v + return s +} + +// SetRdsParameters sets the RdsParameters field's value. +func (s *DataSourceParameters) SetRdsParameters(v *RdsParameters) *DataSourceParameters { + s.RdsParameters = v + return s +} + +// SetRedshiftParameters sets the RedshiftParameters field's value. +func (s *DataSourceParameters) SetRedshiftParameters(v *RedshiftParameters) *DataSourceParameters { + s.RedshiftParameters = v + return s +} + +// SetS3Parameters sets the S3Parameters field's value. +func (s *DataSourceParameters) SetS3Parameters(v *S3Parameters) *DataSourceParameters { + s.S3Parameters = v + return s +} + +// SetServiceNowParameters sets the ServiceNowParameters field's value. +func (s *DataSourceParameters) SetServiceNowParameters(v *ServiceNowParameters) *DataSourceParameters { + s.ServiceNowParameters = v + return s +} + +// SetSnowflakeParameters sets the SnowflakeParameters field's value. +func (s *DataSourceParameters) SetSnowflakeParameters(v *SnowflakeParameters) *DataSourceParameters { + s.SnowflakeParameters = v + return s +} + +// SetSparkParameters sets the SparkParameters field's value. +func (s *DataSourceParameters) SetSparkParameters(v *SparkParameters) *DataSourceParameters { + s.SparkParameters = v + return s +} + +// SetSqlServerParameters sets the SqlServerParameters field's value. +func (s *DataSourceParameters) SetSqlServerParameters(v *SqlServerParameters) *DataSourceParameters { + s.SqlServerParameters = v + return s +} + +// SetTeradataParameters sets the TeradataParameters field's value. +func (s *DataSourceParameters) SetTeradataParameters(v *TeradataParameters) *DataSourceParameters { + s.TeradataParameters = v + return s +} + +// SetTwitterParameters sets the TwitterParameters field's value. +func (s *DataSourceParameters) SetTwitterParameters(v *TwitterParameters) *DataSourceParameters { + s.TwitterParameters = v + return s +} + +// A date-time parameter. +type DateTimeParameter struct { _ struct{} `type:"structure"` - // Availability status. - AvailabilityStatus *string `type:"string" enum:"DashboardBehavior"` + // A display name for the date-time parameter. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The values for the date-time parameter. + // + // Values is a required field + Values []*time.Time `type:"list" required:"true"` } // String returns the string representation -func (s AdHocFilteringOption) String() string { +func (s DateTimeParameter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AdHocFilteringOption) GoString() string { +func (s DateTimeParameter) GoString() string { return s.String() } -// SetAvailabilityStatus sets the AvailabilityStatus field's value. -func (s *AdHocFilteringOption) SetAvailabilityStatus(v string) *AdHocFilteringOption { - s.AvailabilityStatus = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DateTimeParameter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DateTimeParameter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *DateTimeParameter) SetName(v string) *DateTimeParameter { + s.Name = &v return s } -// Amazon Elasticsearch Service parameters. -type AmazonElasticsearchParameters struct { +// SetValues sets the Values field's value. +func (s *DateTimeParameter) SetValues(v []*time.Time) *DateTimeParameter { + s.Values = v + return s +} + +// A decimal parameter. +type DecimalParameter struct { _ struct{} `type:"structure"` - // The Amazon Elasticsearch Service domain. + // A display name for the decimal parameter. // - // Domain is a required field - Domain *string `min:"1" type:"string" required:"true"` + // Name is a required field + Name *string `type:"string" required:"true"` + + // The values for the decimal parameter. + // + // Values is a required field + Values []*float64 `type:"list" required:"true"` } // String returns the string representation -func (s AmazonElasticsearchParameters) String() string { +func (s DecimalParameter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AmazonElasticsearchParameters) GoString() string { +func (s DecimalParameter) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AmazonElasticsearchParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AmazonElasticsearchParameters"} - if s.Domain == nil { - invalidParams.Add(request.NewErrParamRequired("Domain")) +func (s *DecimalParameter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecimalParameter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Domain != nil && len(*s.Domain) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Domain", 1)) + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) } if invalidParams.Len() > 0 { @@ -7318,35 +16751,49 @@ func (s *AmazonElasticsearchParameters) Validate() error { return nil } -// SetDomain sets the Domain field's value. -func (s *AmazonElasticsearchParameters) SetDomain(v string) *AmazonElasticsearchParameters { - s.Domain = &v +// SetName sets the Name field's value. +func (s *DecimalParameter) SetName(v string) *DecimalParameter { + s.Name = &v return s } -// Amazon Athena parameters. -type AthenaParameters struct { +// SetValues sets the Values field's value. +func (s *DecimalParameter) SetValues(v []*float64) *DecimalParameter { + s.Values = v + return s +} + +type DeleteAccountCustomizationInput struct { _ struct{} `type:"structure"` - // The workgroup that Amazon Athena uses. - WorkGroup *string `min:"1" type:"string"` + // The ID for the AWS account that you want to delete QuickSight customizations + // from in this AWS Region. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The QuickSight namespace that you're deleting the customizations from. + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` } // String returns the string representation -func (s AthenaParameters) String() string { +func (s DeleteAccountCustomizationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AthenaParameters) GoString() string { +func (s DeleteAccountCustomizationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AthenaParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AthenaParameters"} - if s.WorkGroup != nil && len(*s.WorkGroup) < 1 { - invalidParams.Add(request.NewErrParamMinLen("WorkGroup", 1)) +func (s *DeleteAccountCustomizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccountCustomizationInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } if invalidParams.Len() > 0 { @@ -7355,62 +16802,101 @@ func (s *AthenaParameters) Validate() error { return nil } -// SetWorkGroup sets the WorkGroup field's value. -func (s *AthenaParameters) SetWorkGroup(v string) *AthenaParameters { - s.WorkGroup = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteAccountCustomizationInput) SetAwsAccountId(v string) *DeleteAccountCustomizationInput { + s.AwsAccountId = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DeleteAccountCustomizationInput) SetNamespace(v string) *DeleteAccountCustomizationInput { + s.Namespace = &v + return s +} + +type DeleteAccountCustomizationOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteAccountCustomizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountCustomizationOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteAccountCustomizationOutput) SetRequestId(v string) *DeleteAccountCustomizationOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteAccountCustomizationOutput) SetStatus(v int64) *DeleteAccountCustomizationOutput { + s.Status = &v return s } -// Amazon Aurora parameters. -type AuroraParameters struct { +type DeleteAnalysisInput struct { _ struct{} `type:"structure"` - // Database. + // The ID of the analysis that you're deleting. // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` - // Host. + // The ID of the AWS account where you want to delete an analysis. // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Port. - // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // This option defaults to the value NoForceDeleteWithoutRecovery. To immediately + // delete the analysis, add the ForceDeleteWithoutRecovery option. You can't + // restore an analysis after it's deleted. + ForceDeleteWithoutRecovery *bool `location:"querystring" locationName:"force-delete-without-recovery" type:"boolean"` + + // A value that specifies the number of days that QuickSight waits before it + // deletes the analysis. You can't use this parameter with the ForceDeleteWithoutRecovery + // option in the same API call. The default value is 30. + RecoveryWindowInDays *int64 `location:"querystring" locationName:"recovery-window-in-days" min:"7" type:"long"` } // String returns the string representation -func (s AuroraParameters) String() string { +func (s DeleteAnalysisInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AuroraParameters) GoString() string { +func (s DeleteAnalysisInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AuroraParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AuroraParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) +func (s *DeleteAnalysisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAnalysisInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + if s.RecoveryWindowInDays != nil && *s.RecoveryWindowInDays < 7 { + invalidParams.Add(request.NewErrParamMinValue("RecoveryWindowInDays", 7)) } if invalidParams.Len() > 0 { @@ -7419,128 +16905,134 @@ func (s *AuroraParameters) Validate() error { return nil } -// SetDatabase sets the Database field's value. -func (s *AuroraParameters) SetDatabase(v string) *AuroraParameters { - s.Database = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *DeleteAnalysisInput) SetAnalysisId(v string) *DeleteAnalysisInput { + s.AnalysisId = &v return s } -// SetHost sets the Host field's value. -func (s *AuroraParameters) SetHost(v string) *AuroraParameters { - s.Host = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteAnalysisInput) SetAwsAccountId(v string) *DeleteAnalysisInput { + s.AwsAccountId = &v return s } -// SetPort sets the Port field's value. -func (s *AuroraParameters) SetPort(v int64) *AuroraParameters { - s.Port = &v +// SetForceDeleteWithoutRecovery sets the ForceDeleteWithoutRecovery field's value. +func (s *DeleteAnalysisInput) SetForceDeleteWithoutRecovery(v bool) *DeleteAnalysisInput { + s.ForceDeleteWithoutRecovery = &v return s } -// Amazon Aurora with PostgreSQL compatibility parameters. -type AuroraPostgreSqlParameters struct { +// SetRecoveryWindowInDays sets the RecoveryWindowInDays field's value. +func (s *DeleteAnalysisInput) SetRecoveryWindowInDays(v int64) *DeleteAnalysisInput { + s.RecoveryWindowInDays = &v + return s +} + +type DeleteAnalysisOutput struct { _ struct{} `type:"structure"` - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` + // The ID of the deleted analysis. + AnalysisId *string `min:"1" type:"string"` - // Host. - // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the deleted analysis. + Arn *string `type:"string"` - // Port. - // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // The date and time that the analysis is scheduled to be deleted. + DeletionTime *time.Time `type:"timestamp"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s AuroraPostgreSqlParameters) String() string { +func (s DeleteAnalysisOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AuroraPostgreSqlParameters) GoString() string { +func (s DeleteAnalysisOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AuroraPostgreSqlParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AuroraPostgreSqlParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) - } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) - } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) - } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) - } +// SetAnalysisId sets the AnalysisId field's value. +func (s *DeleteAnalysisOutput) SetAnalysisId(v string) *DeleteAnalysisOutput { + s.AnalysisId = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *DeleteAnalysisOutput) SetArn(v string) *DeleteAnalysisOutput { + s.Arn = &v + return s } -// SetDatabase sets the Database field's value. -func (s *AuroraPostgreSqlParameters) SetDatabase(v string) *AuroraPostgreSqlParameters { - s.Database = &v +// SetDeletionTime sets the DeletionTime field's value. +func (s *DeleteAnalysisOutput) SetDeletionTime(v time.Time) *DeleteAnalysisOutput { + s.DeletionTime = &v return s } -// SetHost sets the Host field's value. -func (s *AuroraPostgreSqlParameters) SetHost(v string) *AuroraPostgreSqlParameters { - s.Host = &v +// SetRequestId sets the RequestId field's value. +func (s *DeleteAnalysisOutput) SetRequestId(v string) *DeleteAnalysisOutput { + s.RequestId = &v return s } -// SetPort sets the Port field's value. -func (s *AuroraPostgreSqlParameters) SetPort(v int64) *AuroraPostgreSqlParameters { - s.Port = &v +// SetStatus sets the Status field's value. +func (s *DeleteAnalysisOutput) SetStatus(v int64) *DeleteAnalysisOutput { + s.Status = &v return s } -// AWS IoT Analytics parameters. -type AwsIotAnalyticsParameters struct { +type DeleteDashboardInput struct { _ struct{} `type:"structure"` - // Dataset name. + // The ID of the AWS account that contains the dashboard that you're deleting. // - // DataSetName is a required field - DataSetName *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the dashboard. + // + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The version number of the dashboard. If the version number property is provided, + // only the specified version of the dashboard is deleted. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` } // String returns the string representation -func (s AwsIotAnalyticsParameters) String() string { +func (s DeleteDashboardInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsIotAnalyticsParameters) GoString() string { +func (s DeleteDashboardInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *AwsIotAnalyticsParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AwsIotAnalyticsParameters"} - if s.DataSetName == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetName")) +func (s *DeleteDashboardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDashboardInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.DataSetName != nil && len(*s.DataSetName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetName", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) + } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -7549,91 +17041,75 @@ func (s *AwsIotAnalyticsParameters) Validate() error { return nil } -// SetDataSetName sets the DataSetName field's value. -func (s *AwsIotAnalyticsParameters) SetDataSetName(v string) *AwsIotAnalyticsParameters { - s.DataSetName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteDashboardInput) SetAwsAccountId(v string) *DeleteDashboardInput { + s.AwsAccountId = &v return s } -// A calculated column for a dataset. -type CalculatedColumn struct { +// SetDashboardId sets the DashboardId field's value. +func (s *DeleteDashboardInput) SetDashboardId(v string) *DeleteDashboardInput { + s.DashboardId = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DeleteDashboardInput) SetVersionNumber(v int64) *DeleteDashboardInput { + s.VersionNumber = &v + return s +} + +type DeleteDashboardOutput struct { _ struct{} `type:"structure"` - // A unique ID to identify a calculated column. During a dataset update, if - // the column ID of a calculated column matches that of an existing calculated - // column, Amazon QuickSight preserves the existing calculated column. - // - // ColumnId is a required field - ColumnId *string `min:"1" type:"string" required:"true"` + // The Secure Socket Layer (SSL) properties that apply for the resource. + Arn *string `type:"string"` - // Column name. - // - // ColumnName is a required field - ColumnName *string `min:"1" type:"string" required:"true"` + // The ID of the dashboard. + DashboardId *string `min:"1" type:"string"` - // An expression that defines the calculated column. - // - // Expression is a required field - Expression *string `min:"1" type:"string" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s CalculatedColumn) String() string { +func (s DeleteDashboardOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CalculatedColumn) GoString() string { +func (s DeleteDashboardOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CalculatedColumn) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CalculatedColumn"} - if s.ColumnId == nil { - invalidParams.Add(request.NewErrParamRequired("ColumnId")) - } - if s.ColumnId != nil && len(*s.ColumnId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnId", 1)) - } - if s.ColumnName == nil { - invalidParams.Add(request.NewErrParamRequired("ColumnName")) - } - if s.ColumnName != nil && len(*s.ColumnName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) - } - if s.Expression == nil { - invalidParams.Add(request.NewErrParamRequired("Expression")) - } - if s.Expression != nil && len(*s.Expression) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Expression", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *DeleteDashboardOutput) SetArn(v string) *DeleteDashboardOutput { + s.Arn = &v + return s } -// SetColumnId sets the ColumnId field's value. -func (s *CalculatedColumn) SetColumnId(v string) *CalculatedColumn { - s.ColumnId = &v +// SetDashboardId sets the DashboardId field's value. +func (s *DeleteDashboardOutput) SetDashboardId(v string) *DeleteDashboardOutput { + s.DashboardId = &v return s } -// SetColumnName sets the ColumnName field's value. -func (s *CalculatedColumn) SetColumnName(v string) *CalculatedColumn { - s.ColumnName = &v +// SetRequestId sets the RequestId field's value. +func (s *DeleteDashboardOutput) SetRequestId(v string) *DeleteDashboardOutput { + s.RequestId = &v return s } -// SetExpression sets the Expression field's value. -func (s *CalculatedColumn) SetExpression(v string) *CalculatedColumn { - s.Expression = &v +// SetStatus sets the Status field's value. +func (s *DeleteDashboardOutput) SetStatus(v int64) *DeleteDashboardOutput { + s.Status = &v return s } -type CancelIngestionInput struct { +type DeleteDataSetInput struct { _ struct{} `type:"structure"` // The AWS account ID. @@ -7641,30 +17117,26 @@ type CancelIngestionInput struct { // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID of the dataset used in the ingestion. + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. // // DataSetId is a required field DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` - - // An ID for the ingestion. - // - // IngestionId is a required field - IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CancelIngestionInput) String() string { +func (s DeleteDataSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CancelIngestionInput) GoString() string { +func (s DeleteDataSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CancelIngestionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelIngestionInput"} +func (s *DeleteDataSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataSetInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } @@ -7677,12 +17149,6 @@ func (s *CancelIngestionInput) Validate() error { if s.DataSetId != nil && len(*s.DataSetId) < 1 { invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) } - if s.IngestionId == nil { - invalidParams.Add(request.NewErrParamRequired("IngestionId")) - } - if s.IngestionId != nil && len(*s.IngestionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IngestionId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -7691,31 +17157,26 @@ func (s *CancelIngestionInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CancelIngestionInput) SetAwsAccountId(v string) *CancelIngestionInput { +func (s *DeleteDataSetInput) SetAwsAccountId(v string) *DeleteDataSetInput { s.AwsAccountId = &v return s } // SetDataSetId sets the DataSetId field's value. -func (s *CancelIngestionInput) SetDataSetId(v string) *CancelIngestionInput { +func (s *DeleteDataSetInput) SetDataSetId(v string) *DeleteDataSetInput { s.DataSetId = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *CancelIngestionInput) SetIngestionId(v string) *CancelIngestionInput { - s.IngestionId = &v - return s -} - -type CancelIngestionOutput struct { +type DeleteDataSetOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the data ingestion. + // The Amazon Resource Name (ARN) of the dataset. Arn *string `type:"string"` - // An ID for the ingestion. - IngestionId *string `min:"1" type:"string"` + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. + DataSetId *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -7725,79 +17186,78 @@ type CancelIngestionOutput struct { } // String returns the string representation -func (s CancelIngestionOutput) String() string { +func (s DeleteDataSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CancelIngestionOutput) GoString() string { +func (s DeleteDataSetOutput) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *CancelIngestionOutput) SetArn(v string) *CancelIngestionOutput { +func (s *DeleteDataSetOutput) SetArn(v string) *DeleteDataSetOutput { s.Arn = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *CancelIngestionOutput) SetIngestionId(v string) *CancelIngestionOutput { - s.IngestionId = &v +// SetDataSetId sets the DataSetId field's value. +func (s *DeleteDataSetOutput) SetDataSetId(v string) *DeleteDataSetOutput { + s.DataSetId = &v return s } // SetRequestId sets the RequestId field's value. -func (s *CancelIngestionOutput) SetRequestId(v string) *CancelIngestionOutput { +func (s *DeleteDataSetOutput) SetRequestId(v string) *DeleteDataSetOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CancelIngestionOutput) SetStatus(v int64) *CancelIngestionOutput { +func (s *DeleteDataSetOutput) SetStatus(v int64) *DeleteDataSetOutput { s.Status = &v return s } -// A transform operation that casts a column to a different type. -type CastColumnTypeOperation struct { +type DeleteDataSourceInput struct { _ struct{} `type:"structure"` - // Column name. + // The AWS account ID. // - // ColumnName is a required field - ColumnName *string `min:"1" type:"string" required:"true"` - - // When casting a column from string to datetime type, you can supply a string - // in a format supported by Amazon QuickSight to denote the source data format. - Format *string `type:"string"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // New column data type. + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. // - // NewColumnType is a required field - NewColumnType *string `type:"string" required:"true" enum:"ColumnDataType"` + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` } // String returns the string representation -func (s CastColumnTypeOperation) String() string { +func (s DeleteDataSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CastColumnTypeOperation) GoString() string { +func (s DeleteDataSourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CastColumnTypeOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CastColumnTypeOperation"} - if s.ColumnName == nil { - invalidParams.Add(request.NewErrParamRequired("ColumnName")) +func (s *DeleteDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDataSourceInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.ColumnName != nil && len(*s.ColumnName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.NewColumnType == nil { - invalidParams.Add(request.NewErrParamRequired("NewColumnType")) + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if invalidParams.Len() > 0 { @@ -7806,51 +17266,119 @@ func (s *CastColumnTypeOperation) Validate() error { return nil } -// SetColumnName sets the ColumnName field's value. -func (s *CastColumnTypeOperation) SetColumnName(v string) *CastColumnTypeOperation { - s.ColumnName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteDataSourceInput) SetAwsAccountId(v string) *DeleteDataSourceInput { + s.AwsAccountId = &v return s } -// SetFormat sets the Format field's value. -func (s *CastColumnTypeOperation) SetFormat(v string) *CastColumnTypeOperation { - s.Format = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *DeleteDataSourceInput) SetDataSourceId(v string) *DeleteDataSourceInput { + s.DataSourceId = &v return s } -// SetNewColumnType sets the NewColumnType field's value. -func (s *CastColumnTypeOperation) SetNewColumnType(v string) *CastColumnTypeOperation { - s.NewColumnType = &v +type DeleteDataSourceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the data source that you deleted. + Arn *string `type:"string"` + + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + DataSourceId *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteDataSourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDataSourceOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DeleteDataSourceOutput) SetArn(v string) *DeleteDataSourceOutput { + s.Arn = &v return s } -// Groupings of columns that work together in certain Amazon QuickSight features. -// This is a variant type structure. For this structure to be valid, only one -// of the attributes can be non-null. -type ColumnGroup struct { +// SetDataSourceId sets the DataSourceId field's value. +func (s *DeleteDataSourceOutput) SetDataSourceId(v string) *DeleteDataSourceOutput { + s.DataSourceId = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteDataSourceOutput) SetRequestId(v string) *DeleteDataSourceOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteDataSourceOutput) SetStatus(v int64) *DeleteDataSourceOutput { + s.Status = &v + return s +} + +type DeleteGroupInput struct { _ struct{} `type:"structure"` - // Geospatial column group that denotes a hierarchy. - GeoSpatialColumnGroup *GeoSpatialColumnGroup `type:"structure"` + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The name of the group that you want to delete. + // + // GroupName is a required field + GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s ColumnGroup) String() string { +func (s DeleteGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ColumnGroup) GoString() string { +func (s DeleteGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ColumnGroup) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ColumnGroup"} - if s.GeoSpatialColumnGroup != nil { - if err := s.GeoSpatialColumnGroup.Validate(); err != nil { - invalidParams.AddNested("GeoSpatialColumnGroup", err.(request.ErrInvalidParams)) - } +func (s *DeleteGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGroupInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -7859,294 +17387,336 @@ func (s *ColumnGroup) Validate() error { return nil } -// SetGeoSpatialColumnGroup sets the GeoSpatialColumnGroup field's value. -func (s *ColumnGroup) SetGeoSpatialColumnGroup(v *GeoSpatialColumnGroup) *ColumnGroup { - s.GeoSpatialColumnGroup = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteGroupInput) SetAwsAccountId(v string) *DeleteGroupInput { + s.AwsAccountId = &v return s } -// A structure describing the name, data type, and geographic role of the columns. -type ColumnGroupColumnSchema struct { +// SetGroupName sets the GroupName field's value. +func (s *DeleteGroupInput) SetGroupName(v string) *DeleteGroupInput { + s.GroupName = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DeleteGroupInput) SetNamespace(v string) *DeleteGroupInput { + s.Namespace = &v + return s +} + +type DeleteGroupMembershipInput struct { _ struct{} `type:"structure"` - // The name of the column group's column schema. - Name *string `type:"string"` + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The name of the group that you want to delete the user from. + // + // GroupName is a required field + GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + + // The name of the user that you want to delete from the group membership. + // + // MemberName is a required field + MemberName *string `location:"uri" locationName:"MemberName" min:"1" type:"string" required:"true"` + + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s ColumnGroupColumnSchema) String() string { +func (s DeleteGroupMembershipInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ColumnGroupColumnSchema) GoString() string { +func (s DeleteGroupMembershipInput) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *ColumnGroupColumnSchema) SetName(v string) *ColumnGroupColumnSchema { - s.Name = &v - return s -} - -// The column group schema. -type ColumnGroupSchema struct { - _ struct{} `type:"structure"` - - // A structure containing the list of schemas for column group columns. - ColumnGroupColumnSchemaList []*ColumnGroupColumnSchema `type:"list"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupMembershipInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGroupMembershipInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.MemberName == nil { + invalidParams.Add(request.NewErrParamRequired("MemberName")) + } + if s.MemberName != nil && len(*s.MemberName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MemberName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } - // The name of the column group schema. - Name *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// String returns the string representation -func (s ColumnGroupSchema) String() string { - return awsutil.Prettify(s) +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteGroupMembershipInput) SetAwsAccountId(v string) *DeleteGroupMembershipInput { + s.AwsAccountId = &v + return s } -// GoString returns the string representation -func (s ColumnGroupSchema) GoString() string { - return s.String() +// SetGroupName sets the GroupName field's value. +func (s *DeleteGroupMembershipInput) SetGroupName(v string) *DeleteGroupMembershipInput { + s.GroupName = &v + return s } -// SetColumnGroupColumnSchemaList sets the ColumnGroupColumnSchemaList field's value. -func (s *ColumnGroupSchema) SetColumnGroupColumnSchemaList(v []*ColumnGroupColumnSchema) *ColumnGroupSchema { - s.ColumnGroupColumnSchemaList = v +// SetMemberName sets the MemberName field's value. +func (s *DeleteGroupMembershipInput) SetMemberName(v string) *DeleteGroupMembershipInput { + s.MemberName = &v return s } -// SetName sets the Name field's value. -func (s *ColumnGroupSchema) SetName(v string) *ColumnGroupSchema { - s.Name = &v +// SetNamespace sets the Namespace field's value. +func (s *DeleteGroupMembershipInput) SetNamespace(v string) *DeleteGroupMembershipInput { + s.Namespace = &v return s } -// The column schema. -type ColumnSchema struct { +type DeleteGroupMembershipOutput struct { _ struct{} `type:"structure"` - // The data type of the column schema. - DataType *string `type:"string"` - - // The geographic role of the column schema. - GeographicRole *string `type:"string"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // The name of the column schema. - Name *string `type:"string"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s ColumnSchema) String() string { +func (s DeleteGroupMembershipOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ColumnSchema) GoString() string { +func (s DeleteGroupMembershipOutput) GoString() string { return s.String() } -// SetDataType sets the DataType field's value. -func (s *ColumnSchema) SetDataType(v string) *ColumnSchema { - s.DataType = &v - return s -} - -// SetGeographicRole sets the GeographicRole field's value. -func (s *ColumnSchema) SetGeographicRole(v string) *ColumnSchema { - s.GeographicRole = &v +// SetRequestId sets the RequestId field's value. +func (s *DeleteGroupMembershipOutput) SetRequestId(v string) *DeleteGroupMembershipOutput { + s.RequestId = &v return s } -// SetName sets the Name field's value. -func (s *ColumnSchema) SetName(v string) *ColumnSchema { - s.Name = &v +// SetStatus sets the Status field's value. +func (s *DeleteGroupMembershipOutput) SetStatus(v int64) *DeleteGroupMembershipOutput { + s.Status = &v return s } -// A tag for a column in a TagColumnOperation structure. This is a variant type -// structure. For this structure to be valid, only one of the attributes can -// be non-null. -type ColumnTag struct { +type DeleteGroupOutput struct { _ struct{} `type:"structure"` - // A geospatial role for a column. - ColumnGeographicRole *string `type:"string" enum:"GeoSpatialDataRole"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s ColumnTag) String() string { +func (s DeleteGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ColumnTag) GoString() string { +func (s DeleteGroupOutput) GoString() string { return s.String() } -// SetColumnGeographicRole sets the ColumnGeographicRole field's value. -func (s *ColumnTag) SetColumnGeographicRole(v string) *ColumnTag { - s.ColumnGeographicRole = &v +// SetRequestId sets the RequestId field's value. +func (s *DeleteGroupOutput) SetRequestId(v string) *DeleteGroupOutput { + s.RequestId = &v return s } -// A resource is already in a state that indicates an action is happening that -// must complete before a new update can be applied. -type ConcurrentUpdatingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetStatus sets the Status field's value. +func (s *DeleteGroupOutput) SetStatus(v int64) *DeleteGroupOutput { + s.Status = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +type DeleteIAMPolicyAssignmentInput struct { + _ struct{} `type:"structure"` - RequestId *string `type:"string"` + // The name of the assignment. + // + // AssignmentName is a required field + AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` + + // The AWS account ID where you want to delete the IAM policy assignment. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The namespace that contains the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s ConcurrentUpdatingException) String() string { +func (s DeleteIAMPolicyAssignmentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ConcurrentUpdatingException) GoString() string { +func (s DeleteIAMPolicyAssignmentInput) GoString() string { return s.String() } -func newErrorConcurrentUpdatingException(v protocol.ResponseMetadata) error { - return &ConcurrentUpdatingException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIAMPolicyAssignmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIAMPolicyAssignmentInput"} + if s.AssignmentName == nil { + invalidParams.Add(request.NewErrParamRequired("AssignmentName")) } -} - -// Code returns the exception type name. -func (s ConcurrentUpdatingException) Code() string { - return "ConcurrentUpdatingException" -} - -// Message returns the exception's message. -func (s ConcurrentUpdatingException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } - return "" -} -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConcurrentUpdatingException) OrigErr() error { + if invalidParams.Len() > 0 { + return invalidParams + } return nil } -func (s ConcurrentUpdatingException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetAssignmentName sets the AssignmentName field's value. +func (s *DeleteIAMPolicyAssignmentInput) SetAssignmentName(v string) *DeleteIAMPolicyAssignmentInput { + s.AssignmentName = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ConcurrentUpdatingException) StatusCode() int { - return s.respMetadata.StatusCode +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteIAMPolicyAssignmentInput) SetAwsAccountId(v string) *DeleteIAMPolicyAssignmentInput { + s.AwsAccountId = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s ConcurrentUpdatingException) RequestID() string { - return s.respMetadata.RequestID +// SetNamespace sets the Namespace field's value. +func (s *DeleteIAMPolicyAssignmentInput) SetNamespace(v string) *DeleteIAMPolicyAssignmentInput { + s.Namespace = &v + return s } -// Updating or deleting a resource can cause an inconsistent state. -type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type DeleteIAMPolicyAssignmentOutput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" type:"string"` + // The name of the assignment. + AssignmentName *string `min:"1" type:"string"` - // The AWS request ID for this request. + // The AWS request ID for this operation. RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s ConflictException) String() string { +func (s DeleteIAMPolicyAssignmentOutput) String() string { return awsutil.Prettify(s) } -// GoString returns the string representation -func (s ConflictException) GoString() string { - return s.String() -} - -func newErrorConflictException(v protocol.ResponseMetadata) error { - return &ConflictException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s ConflictException) Code() string { - return "ConflictException" -} - -// Message returns the exception's message. -func (s ConflictException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { - return nil +// GoString returns the string representation +func (s DeleteIAMPolicyAssignmentOutput) GoString() string { + return s.String() } -func (s ConflictException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetAssignmentName sets the AssignmentName field's value. +func (s *DeleteIAMPolicyAssignmentOutput) SetAssignmentName(v string) *DeleteIAMPolicyAssignmentOutput { + s.AssignmentName = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +// SetRequestId sets the RequestId field's value. +func (s *DeleteIAMPolicyAssignmentOutput) SetRequestId(v string) *DeleteIAMPolicyAssignmentOutput { + s.RequestId = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +// SetStatus sets the Status field's value. +func (s *DeleteIAMPolicyAssignmentOutput) SetStatus(v int64) *DeleteIAMPolicyAssignmentOutput { + s.Status = &v + return s } -// A transform operation that creates calculated columns. Columns created in -// one such operation form a lexical closure. -type CreateColumnsOperation struct { +type DeleteNamespaceInput struct { _ struct{} `type:"structure"` - // Calculated columns to create. + // The ID for the AWS account that you want to delete the QuickSight namespace + // from. // - // Columns is a required field - Columns []*CalculatedColumn `min:"1" type:"list" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The namespace that you want to delete. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s CreateColumnsOperation) String() string { +func (s DeleteNamespaceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateColumnsOperation) GoString() string { +func (s DeleteNamespaceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateColumnsOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateColumnsOperation"} - if s.Columns == nil { - invalidParams.Add(request.NewErrParamRequired("Columns")) +func (s *DeleteNamespaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteNamespaceInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Columns != nil && len(s.Columns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Columns", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Columns != nil { - for i, v := range s.Columns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) - } - } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -8155,149 +17725,102 @@ func (s *CreateColumnsOperation) Validate() error { return nil } -// SetColumns sets the Columns field's value. -func (s *CreateColumnsOperation) SetColumns(v []*CalculatedColumn) *CreateColumnsOperation { - s.Columns = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteNamespaceInput) SetAwsAccountId(v string) *DeleteNamespaceInput { + s.AwsAccountId = &v return s } -type CreateDashboardInput struct { +// SetNamespace sets the Namespace field's value. +func (s *DeleteNamespaceInput) SetNamespace(v string) *DeleteNamespaceInput { + s.Namespace = &v + return s +} + +type DeleteNamespaceOutput struct { _ struct{} `type:"structure"` - // The ID of the AWS account where you want to create the dashboard. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // The ID for the dashboard, also added to the IAM policy. - // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} - // Options for publishing the dashboard when you create it: - // - // * AvailabilityStatus for AdHocFilteringOption - This status can be either - // ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables - // the left filter pane on the published dashboard, which can be used for - // ad hoc (one-time) filtering. This option is ENABLED by default. - // - // * AvailabilityStatus for ExportToCSVOption - This status can be either - // ENABLED or DISABLED. The visual option to export data to .csv format isn't - // enabled when this is set to DISABLED. This option is ENABLED by default. - // - // * VisibilityState for SheetControlsOption - This visibility state can - // be either COLLAPSED or EXPANDED. The sheet controls pane is collapsed - // by default when set to true. This option is COLLAPSED by default. - DashboardPublishOptions *DashboardPublishOptions `type:"structure"` +// String returns the string representation +func (s DeleteNamespaceOutput) String() string { + return awsutil.Prettify(s) +} - // The display name of the dashboard. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` +// GoString returns the string representation +func (s DeleteNamespaceOutput) GoString() string { + return s.String() +} - // A structure that contains the parameters of the dashboard. These are parameter - // overrides for a dashboard. A dashboard can have any type of parameters, and - // some parameters might accept multiple values. You can use the dashboard permissions - // structure described following to override two string parameters that accept - // multiple values. - Parameters *Parameters `type:"structure"` +// SetRequestId sets the RequestId field's value. +func (s *DeleteNamespaceOutput) SetRequestId(v string) *DeleteNamespaceOutput { + s.RequestId = &v + return s +} - // A structure that contains the permissions of the dashboard. You can use this - // structure for granting permissions with principal and action information. - Permissions []*ResourcePermission `min:"1" type:"list"` +// SetStatus sets the Status field's value. +func (s *DeleteNamespaceOutput) SetStatus(v int64) *DeleteNamespaceOutput { + s.Status = &v + return s +} - // The source entity from which the dashboard is created. The source entity - // accepts the Amazon Resource Name (ARN) of the source template or analysis - // and also references the replacement datasets for the placeholders set when - // creating the template. The replacement datasets need to follow the same schema - // as the datasets for which placeholders were created when creating the template. - // - // If you are creating a dashboard from a source entity in a different AWS account, - // use the ARN of the source template. +type DeleteTemplateAliasInput struct { + _ struct{} `type:"structure"` + + // The name for the template alias. To delete a specific alias, you delete the + // version that the alias points to. You can specify the alias name, or specify + // the latest version of the template by providing the keyword $LATEST in the + // AliasName parameter. // - // SourceEntity is a required field - SourceEntity *DashboardSourceEntity `type:"structure" required:"true"` + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` - // Contains a map of the key-value pairs for the resource tag or tags assigned - // to the dashboard. - Tags []*Tag `min:"1" type:"list"` + // The ID of the AWS account that contains the item to delete. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // A description for the first version of the dashboard being created. - VersionDescription *string `min:"1" type:"string"` + // The ID for the template that the specified alias is for. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateDashboardInput) String() string { +func (s DeleteTemplateAliasInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDashboardInput) GoString() string { +func (s DeleteTemplateAliasInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDashboardInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDashboardInput"} +func (s *DeleteTemplateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTemplateAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) - } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Permissions != nil && len(s.Permissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) - } - if s.SourceEntity == nil { - invalidParams.Add(request.NewErrParamRequired("SourceEntity")) - } - if s.Tags != nil && len(s.Tags) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) - } - if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) - } - if s.Parameters != nil { - if err := s.Parameters.Validate(); err != nil { - invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) - } - } - if s.Permissions != nil { - for i, v := range s.Permissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.SourceEntity != nil { - if err := s.SourceEntity.Validate(); err != nil { - invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) - } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) } if invalidParams.Len() > 0 { @@ -8306,71 +17829,32 @@ func (s *CreateDashboardInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateDashboardInput) SetAwsAccountId(v string) *CreateDashboardInput { - s.AwsAccountId = &v - return s -} - -// SetDashboardId sets the DashboardId field's value. -func (s *CreateDashboardInput) SetDashboardId(v string) *CreateDashboardInput { - s.DashboardId = &v - return s -} - -// SetDashboardPublishOptions sets the DashboardPublishOptions field's value. -func (s *CreateDashboardInput) SetDashboardPublishOptions(v *DashboardPublishOptions) *CreateDashboardInput { - s.DashboardPublishOptions = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateDashboardInput) SetName(v string) *CreateDashboardInput { - s.Name = &v - return s -} - -// SetParameters sets the Parameters field's value. -func (s *CreateDashboardInput) SetParameters(v *Parameters) *CreateDashboardInput { - s.Parameters = v - return s -} - -// SetPermissions sets the Permissions field's value. -func (s *CreateDashboardInput) SetPermissions(v []*ResourcePermission) *CreateDashboardInput { - s.Permissions = v - return s -} - -// SetSourceEntity sets the SourceEntity field's value. -func (s *CreateDashboardInput) SetSourceEntity(v *DashboardSourceEntity) *CreateDashboardInput { - s.SourceEntity = v +// SetAliasName sets the AliasName field's value. +func (s *DeleteTemplateAliasInput) SetAliasName(v string) *DeleteTemplateAliasInput { + s.AliasName = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateDashboardInput) SetTags(v []*Tag) *CreateDashboardInput { - s.Tags = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteTemplateAliasInput) SetAwsAccountId(v string) *DeleteTemplateAliasInput { + s.AwsAccountId = &v return s } -// SetVersionDescription sets the VersionDescription field's value. -func (s *CreateDashboardInput) SetVersionDescription(v string) *CreateDashboardInput { - s.VersionDescription = &v +// SetTemplateId sets the TemplateId field's value. +func (s *DeleteTemplateAliasInput) SetTemplateId(v string) *DeleteTemplateAliasInput { + s.TemplateId = &v return s } -type CreateDashboardOutput struct { +type DeleteTemplateAliasOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dashboard. - Arn *string `type:"string"` - - // The status of the dashboard creation request. - CreationStatus *string `type:"string" enum:"ResourceStatus"` + // The name for the template alias. + AliasName *string `min:"1" type:"string"` - // The ID for the dashboard. - DashboardId *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the template you want to delete. + Arn *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -8378,208 +17862,95 @@ type CreateDashboardOutput struct { // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - // The ARN of the dashboard, including the version number of the first version - // that is created. - VersionArn *string `type:"string"` + // An ID for the template associated with the deletion. + TemplateId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateDashboardOutput) String() string { +func (s DeleteTemplateAliasOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDashboardOutput) GoString() string { +func (s DeleteTemplateAliasOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *CreateDashboardOutput) SetArn(v string) *CreateDashboardOutput { - s.Arn = &v - return s -} - -// SetCreationStatus sets the CreationStatus field's value. -func (s *CreateDashboardOutput) SetCreationStatus(v string) *CreateDashboardOutput { - s.CreationStatus = &v +// SetAliasName sets the AliasName field's value. +func (s *DeleteTemplateAliasOutput) SetAliasName(v string) *DeleteTemplateAliasOutput { + s.AliasName = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *CreateDashboardOutput) SetDashboardId(v string) *CreateDashboardOutput { - s.DashboardId = &v +// SetArn sets the Arn field's value. +func (s *DeleteTemplateAliasOutput) SetArn(v string) *DeleteTemplateAliasOutput { + s.Arn = &v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateDashboardOutput) SetRequestId(v string) *CreateDashboardOutput { +func (s *DeleteTemplateAliasOutput) SetRequestId(v string) *DeleteTemplateAliasOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateDashboardOutput) SetStatus(v int64) *CreateDashboardOutput { +func (s *DeleteTemplateAliasOutput) SetStatus(v int64) *DeleteTemplateAliasOutput { s.Status = &v return s } -// SetVersionArn sets the VersionArn field's value. -func (s *CreateDashboardOutput) SetVersionArn(v string) *CreateDashboardOutput { - s.VersionArn = &v +// SetTemplateId sets the TemplateId field's value. +func (s *DeleteTemplateAliasOutput) SetTemplateId(v string) *DeleteTemplateAliasOutput { + s.TemplateId = &v return s } -type CreateDataSetInput struct { +type DeleteTemplateInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID of the AWS account that contains the template that you're deleting. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Groupings of columns that work together in certain QuickSight features. Currently, - // only geospatial hierarchy is supported. - ColumnGroups []*ColumnGroup `min:"1" type:"list"` - - // An ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. - // - // DataSetId is a required field - DataSetId *string `type:"string" required:"true"` - - // Indicates whether you want to import the data into SPICE. - // - // ImportMode is a required field - ImportMode *string `type:"string" required:"true" enum:"DataSetImportMode"` - - // Configures the combination and transformation of the data from the physical - // tables. - LogicalTableMap map[string]*LogicalTable `min:"1" type:"map"` - - // The display name for the dataset. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A list of resource permissions on the dataset. - Permissions []*ResourcePermission `min:"1" type:"list"` - - // Declares the physical tables that are available in the underlying data sources. + // An ID for the template you want to delete. // - // PhysicalTableMap is a required field - PhysicalTableMap map[string]*PhysicalTable `min:"1" type:"map" required:"true"` - - // The row-level security configuration for the data that you want to create. - RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` - // Contains a map of the key-value pairs for the resource tag or tags assigned - // to the dataset. - Tags []*Tag `min:"1" type:"list"` + // Specifies the version of the template that you want to delete. If you don't + // provide a version number, DeleteTemplate deletes all versions of the template. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` } // String returns the string representation -func (s CreateDataSetInput) String() string { +func (s DeleteTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDataSetInput) GoString() string { +func (s DeleteTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDataSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDataSetInput"} +func (s *DeleteTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTemplateInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.ColumnGroups != nil && len(s.ColumnGroups) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnGroups", 1)) - } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) - } - if s.ImportMode == nil { - invalidParams.Add(request.NewErrParamRequired("ImportMode")) - } - if s.LogicalTableMap != nil && len(s.LogicalTableMap) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogicalTableMap", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Permissions != nil && len(s.Permissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) - } - if s.PhysicalTableMap == nil { - invalidParams.Add(request.NewErrParamRequired("PhysicalTableMap")) - } - if s.PhysicalTableMap != nil && len(s.PhysicalTableMap) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PhysicalTableMap", 1)) - } - if s.Tags != nil && len(s.Tags) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) - } - if s.ColumnGroups != nil { - for i, v := range s.ColumnGroups { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnGroups", i), err.(request.ErrInvalidParams)) - } - } - } - if s.LogicalTableMap != nil { - for i, v := range s.LogicalTableMap { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogicalTableMap", i), err.(request.ErrInvalidParams)) - } - } - } - if s.Permissions != nil { - for i, v := range s.Permissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.PhysicalTableMap != nil { - for i, v := range s.PhysicalTableMap { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PhysicalTableMap", i), err.(request.ErrInvalidParams)) - } - } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.RowLevelPermissionDataSet != nil { - if err := s.RowLevelPermissionDataSet.Validate(); err != nil { - invalidParams.AddNested("RowLevelPermissionDataSet", err.(request.ErrInvalidParams)) - } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -8589,257 +17960,254 @@ func (s *CreateDataSetInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateDataSetInput) SetAwsAccountId(v string) *CreateDataSetInput { +func (s *DeleteTemplateInput) SetAwsAccountId(v string) *DeleteTemplateInput { s.AwsAccountId = &v return s } -// SetColumnGroups sets the ColumnGroups field's value. -func (s *CreateDataSetInput) SetColumnGroups(v []*ColumnGroup) *CreateDataSetInput { - s.ColumnGroups = v +// SetTemplateId sets the TemplateId field's value. +func (s *DeleteTemplateInput) SetTemplateId(v string) *DeleteTemplateInput { + s.TemplateId = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DeleteTemplateInput) SetVersionNumber(v int64) *DeleteTemplateInput { + s.VersionNumber = &v + return s +} + +type DeleteTemplateOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // An ID for the template. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteTemplateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTemplateOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *DeleteTemplateOutput) SetArn(v string) *DeleteTemplateOutput { + s.Arn = &v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteTemplateOutput) SetRequestId(v string) *DeleteTemplateOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteTemplateOutput) SetStatus(v int64) *DeleteTemplateOutput { + s.Status = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *CreateDataSetInput) SetDataSetId(v string) *CreateDataSetInput { - s.DataSetId = &v +// SetTemplateId sets the TemplateId field's value. +func (s *DeleteTemplateOutput) SetTemplateId(v string) *DeleteTemplateOutput { + s.TemplateId = &v return s } -// SetImportMode sets the ImportMode field's value. -func (s *CreateDataSetInput) SetImportMode(v string) *CreateDataSetInput { - s.ImportMode = &v - return s +type DeleteThemeAliasInput struct { + _ struct{} `type:"structure"` + + // The unique name for the theme alias to delete. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the theme alias to delete. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the theme that the specified alias is for. + // + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` } -// SetLogicalTableMap sets the LogicalTableMap field's value. -func (s *CreateDataSetInput) SetLogicalTableMap(v map[string]*LogicalTable) *CreateDataSetInput { - s.LogicalTableMap = v - return s +// String returns the string representation +func (s DeleteThemeAliasInput) String() string { + return awsutil.Prettify(s) } -// SetName sets the Name field's value. -func (s *CreateDataSetInput) SetName(v string) *CreateDataSetInput { - s.Name = &v - return s +// GoString returns the string representation +func (s DeleteThemeAliasInput) GoString() string { + return s.String() } -// SetPermissions sets the Permissions field's value. -func (s *CreateDataSetInput) SetPermissions(v []*ResourcePermission) *CreateDataSetInput { - s.Permissions = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteThemeAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteThemeAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPhysicalTableMap sets the PhysicalTableMap field's value. -func (s *CreateDataSetInput) SetPhysicalTableMap(v map[string]*PhysicalTable) *CreateDataSetInput { - s.PhysicalTableMap = v +// SetAliasName sets the AliasName field's value. +func (s *DeleteThemeAliasInput) SetAliasName(v string) *DeleteThemeAliasInput { + s.AliasName = &v return s } -// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. -func (s *CreateDataSetInput) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *CreateDataSetInput { - s.RowLevelPermissionDataSet = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DeleteThemeAliasInput) SetAwsAccountId(v string) *DeleteThemeAliasInput { + s.AwsAccountId = &v return s } -// SetTags sets the Tags field's value. -func (s *CreateDataSetInput) SetTags(v []*Tag) *CreateDataSetInput { - s.Tags = v +// SetThemeId sets the ThemeId field's value. +func (s *DeleteThemeAliasInput) SetThemeId(v string) *DeleteThemeAliasInput { + s.ThemeId = &v return s } -type CreateDataSetOutput struct { +type DeleteThemeAliasOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dataset. - Arn *string `type:"string"` - - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. - DataSetId *string `type:"string"` - - // The ARN for the ingestion, which is triggered as a result of dataset creation - // if the import mode is SPICE. - IngestionArn *string `type:"string"` + // The name for the theme alias. + AliasName *string `min:"1" type:"string"` - // The ID of the ingestion, which is triggered as a result of dataset creation - // if the import mode is SPICE. - IngestionId *string `type:"string"` + // The Amazon Resource Name (ARN) of the theme resource using the deleted alias. + Arn *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // An ID for the theme associated with the deletion. + ThemeId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateDataSetOutput) String() string { +func (s DeleteThemeAliasOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDataSetOutput) GoString() string { +func (s DeleteThemeAliasOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *CreateDataSetOutput) SetArn(v string) *CreateDataSetOutput { - s.Arn = &v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *CreateDataSetOutput) SetDataSetId(v string) *CreateDataSetOutput { - s.DataSetId = &v - return s -} - -// SetIngestionArn sets the IngestionArn field's value. -func (s *CreateDataSetOutput) SetIngestionArn(v string) *CreateDataSetOutput { - s.IngestionArn = &v +// SetAliasName sets the AliasName field's value. +func (s *DeleteThemeAliasOutput) SetAliasName(v string) *DeleteThemeAliasOutput { + s.AliasName = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *CreateDataSetOutput) SetIngestionId(v string) *CreateDataSetOutput { - s.IngestionId = &v +// SetArn sets the Arn field's value. +func (s *DeleteThemeAliasOutput) SetArn(v string) *DeleteThemeAliasOutput { + s.Arn = &v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateDataSetOutput) SetRequestId(v string) *CreateDataSetOutput { +func (s *DeleteThemeAliasOutput) SetRequestId(v string) *DeleteThemeAliasOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateDataSetOutput) SetStatus(v int64) *CreateDataSetOutput { +func (s *DeleteThemeAliasOutput) SetStatus(v int64) *DeleteThemeAliasOutput { s.Status = &v return s } -type CreateDataSourceInput struct { +// SetThemeId sets the ThemeId field's value. +func (s *DeleteThemeAliasOutput) SetThemeId(v string) *DeleteThemeAliasOutput { + s.ThemeId = &v + return s +} + +type DeleteThemeInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID of the AWS account that contains the theme that you're deleting. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The credentials QuickSight that uses to connect to your underlying source. - // Currently, only credentials based on user name and password are supported. - Credentials *DataSourceCredentials `type:"structure" sensitive:"true"` - - // An ID for the data source. This ID is unique per AWS Region for each AWS - // account. - // - // DataSourceId is a required field - DataSourceId *string `type:"string" required:"true"` - - // The parameters that QuickSight uses to connect to your underlying source. - DataSourceParameters *DataSourceParameters `type:"structure"` - - // A display name for the data source. + // An ID for the theme that you want to delete. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // A list of resource permissions on the data source. - Permissions []*ResourcePermission `min:"1" type:"list"` - - // Secure Socket Layer (SSL) properties that apply when QuickSight connects - // to your underlying source. - SslProperties *SslProperties `type:"structure"` - - // Contains a map of the key-value pairs for the resource tag or tags assigned - // to the data source. - Tags []*Tag `min:"1" type:"list"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` - // The type of the data source. Currently, the supported types for this operation - // are: ATHENA, AURORA, AURORA_POSTGRESQL, MARIADB, MYSQL, POSTGRESQL, PRESTO, - // REDSHIFT, S3, SNOWFLAKE, SPARK, SQLSERVER, TERADATA. Use ListDataSources - // to return a list of all data sources. + // The version of the theme that you want to delete. // - // Type is a required field - Type *string `type:"string" required:"true" enum:"DataSourceType"` - - // Use this parameter only when you want QuickSight to use a VPC connection - // when connecting to your underlying source. - VpcConnectionProperties *VpcConnectionProperties `type:"structure"` + // Note: If you don't provide a version number, you're using this call to DeleteTheme + // to delete all versions of the theme. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` } // String returns the string representation -func (s CreateDataSourceInput) String() string { +func (s DeleteThemeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDataSourceInput) GoString() string { +func (s DeleteThemeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateDataSourceInput"} +func (s *DeleteThemeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteThemeInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSourceId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceId")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Permissions != nil && len(s.Permissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) - } - if s.Tags != nil && len(s.Tags) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) - } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } - if s.Credentials != nil { - if err := s.Credentials.Validate(); err != nil { - invalidParams.AddNested("Credentials", err.(request.ErrInvalidParams)) - } - } - if s.DataSourceParameters != nil { - if err := s.DataSourceParameters.Validate(); err != nil { - invalidParams.AddNested("DataSourceParameters", err.(request.ErrInvalidParams)) - } - } - if s.Permissions != nil { - for i, v := range s.Permissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) - } - } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) } - if s.VpcConnectionProperties != nil { - if err := s.VpcConnectionProperties.Validate(); err != nil { - invalidParams.AddNested("VpcConnectionProperties", err.(request.ErrInvalidParams)) - } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -8849,183 +18217,124 @@ func (s *CreateDataSourceInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateDataSourceInput) SetAwsAccountId(v string) *CreateDataSourceInput { - s.AwsAccountId = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *CreateDataSourceInput) SetCredentials(v *DataSourceCredentials) *CreateDataSourceInput { - s.Credentials = v - return s -} - -// SetDataSourceId sets the DataSourceId field's value. -func (s *CreateDataSourceInput) SetDataSourceId(v string) *CreateDataSourceInput { - s.DataSourceId = &v - return s -} - -// SetDataSourceParameters sets the DataSourceParameters field's value. -func (s *CreateDataSourceInput) SetDataSourceParameters(v *DataSourceParameters) *CreateDataSourceInput { - s.DataSourceParameters = v - return s -} - -// SetName sets the Name field's value. -func (s *CreateDataSourceInput) SetName(v string) *CreateDataSourceInput { - s.Name = &v - return s -} - -// SetPermissions sets the Permissions field's value. -func (s *CreateDataSourceInput) SetPermissions(v []*ResourcePermission) *CreateDataSourceInput { - s.Permissions = v - return s -} - -// SetSslProperties sets the SslProperties field's value. -func (s *CreateDataSourceInput) SetSslProperties(v *SslProperties) *CreateDataSourceInput { - s.SslProperties = v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateDataSourceInput) SetTags(v []*Tag) *CreateDataSourceInput { - s.Tags = v +func (s *DeleteThemeInput) SetAwsAccountId(v string) *DeleteThemeInput { + s.AwsAccountId = &v return s } -// SetType sets the Type field's value. -func (s *CreateDataSourceInput) SetType(v string) *CreateDataSourceInput { - s.Type = &v +// SetThemeId sets the ThemeId field's value. +func (s *DeleteThemeInput) SetThemeId(v string) *DeleteThemeInput { + s.ThemeId = &v return s } -// SetVpcConnectionProperties sets the VpcConnectionProperties field's value. -func (s *CreateDataSourceInput) SetVpcConnectionProperties(v *VpcConnectionProperties) *CreateDataSourceInput { - s.VpcConnectionProperties = v +// SetVersionNumber sets the VersionNumber field's value. +func (s *DeleteThemeInput) SetVersionNumber(v int64) *DeleteThemeInput { + s.VersionNumber = &v return s } -type CreateDataSourceOutput struct { +type DeleteThemeOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the data source. + // The Amazon Resource Name (ARN) of the resource. Arn *string `type:"string"` - // The status of creating the data source. - CreationStatus *string `type:"string" enum:"ResourceStatus"` - - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - DataSourceId *string `type:"string"` - // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // An ID for the theme. + ThemeId *string `min:"1" type:"string"` } // String returns the string representation -func (s CreateDataSourceOutput) String() string { +func (s DeleteThemeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateDataSourceOutput) GoString() string { +func (s DeleteThemeOutput) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *CreateDataSourceOutput) SetArn(v string) *CreateDataSourceOutput { +func (s *DeleteThemeOutput) SetArn(v string) *DeleteThemeOutput { s.Arn = &v return s } -// SetCreationStatus sets the CreationStatus field's value. -func (s *CreateDataSourceOutput) SetCreationStatus(v string) *CreateDataSourceOutput { - s.CreationStatus = &v - return s -} - -// SetDataSourceId sets the DataSourceId field's value. -func (s *CreateDataSourceOutput) SetDataSourceId(v string) *CreateDataSourceOutput { - s.DataSourceId = &v - return s -} - // SetRequestId sets the RequestId field's value. -func (s *CreateDataSourceOutput) SetRequestId(v string) *CreateDataSourceOutput { +func (s *DeleteThemeOutput) SetRequestId(v string) *DeleteThemeOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateDataSourceOutput) SetStatus(v int64) *CreateDataSourceOutput { +func (s *DeleteThemeOutput) SetStatus(v int64) *DeleteThemeOutput { s.Status = &v return s } -// The request object for this operation. -type CreateGroupInput struct { +// SetThemeId sets the ThemeId field's value. +func (s *DeleteThemeOutput) SetThemeId(v string) *DeleteThemeOutput { + s.ThemeId = &v + return s +} + +type DeleteUserByPrincipalIdInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID + // The ID for the AWS account that the user is in. Currently, you use the ID // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // A description for the group that you want to create. - Description *string `min:"1" type:"string"` - - // A name for the group that you want to create. - // - // GroupName is a required field - GroupName *string `min:"1" type:"string" required:"true"` - // The namespace. Currently, you should set this to default. // // Namespace is a required field Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The principal ID of the user. + // + // PrincipalId is a required field + PrincipalId *string `location:"uri" locationName:"PrincipalId" type:"string" required:"true"` } // String returns the string representation -func (s CreateGroupInput) String() string { +func (s DeleteUserByPrincipalIdInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateGroupInput) GoString() string { +func (s DeleteUserByPrincipalIdInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGroupInput"} +func (s *DeleteUserByPrincipalIdInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserByPrincipalIdInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) - } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) - } if s.Namespace == nil { invalidParams.Add(request.NewErrParamRequired("Namespace")) } if s.Namespace != nil && len(*s.Namespace) < 1 { invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } + if s.PrincipalId == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalId")) + } + if s.PrincipalId != nil && len(*s.PrincipalId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalId", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9034,91 +18343,106 @@ func (s *CreateGroupInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateGroupInput) SetAwsAccountId(v string) *CreateGroupInput { +func (s *DeleteUserByPrincipalIdInput) SetAwsAccountId(v string) *DeleteUserByPrincipalIdInput { s.AwsAccountId = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateGroupInput) SetDescription(v string) *CreateGroupInput { - s.Description = &v +// SetNamespace sets the Namespace field's value. +func (s *DeleteUserByPrincipalIdInput) SetNamespace(v string) *DeleteUserByPrincipalIdInput { + s.Namespace = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *CreateGroupInput) SetGroupName(v string) *CreateGroupInput { - s.GroupName = &v +// SetPrincipalId sets the PrincipalId field's value. +func (s *DeleteUserByPrincipalIdInput) SetPrincipalId(v string) *DeleteUserByPrincipalIdInput { + s.PrincipalId = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *CreateGroupInput) SetNamespace(v string) *CreateGroupInput { - s.Namespace = &v +type DeleteUserByPrincipalIdOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteUserByPrincipalIdOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserByPrincipalIdOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteUserByPrincipalIdOutput) SetRequestId(v string) *DeleteUserByPrincipalIdOutput { + s.RequestId = &v return s } -type CreateGroupMembershipInput struct { +// SetStatus sets the Status field's value. +func (s *DeleteUserByPrincipalIdOutput) SetStatus(v int64) *DeleteUserByPrincipalIdOutput { + s.Status = &v + return s +} + +type DeleteUserInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID + // The ID for the AWS account that the user is in. Currently, you use the ID // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The name of the group that you want to add the user to. - // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` - - // The name of the user that you want to add to the group membership. - // - // MemberName is a required field - MemberName *string `location:"uri" locationName:"MemberName" min:"1" type:"string" required:"true"` - // The namespace. Currently, you should set this to default. // // Namespace is a required field Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The name of the user that you want to delete. + // + // UserName is a required field + UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateGroupMembershipInput) String() string { +func (s DeleteUserInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateGroupMembershipInput) GoString() string { +func (s DeleteUserInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateGroupMembershipInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateGroupMembershipInput"} +func (s *DeleteUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) - } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) - } - if s.MemberName == nil { - invalidParams.Add(request.NewErrParamRequired("MemberName")) - } - if s.MemberName != nil && len(*s.MemberName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MemberName", 1)) - } if s.Namespace == nil { invalidParams.Add(request.NewErrParamRequired("Namespace")) } if s.Namespace != nil && len(*s.Namespace) < 1 { invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9127,34 +18451,135 @@ func (s *CreateGroupMembershipInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateGroupMembershipInput) SetAwsAccountId(v string) *CreateGroupMembershipInput { +func (s *DeleteUserInput) SetAwsAccountId(v string) *DeleteUserInput { s.AwsAccountId = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *CreateGroupMembershipInput) SetGroupName(v string) *CreateGroupMembershipInput { - s.GroupName = &v +// SetNamespace sets the Namespace field's value. +func (s *DeleteUserInput) SetNamespace(v string) *DeleteUserInput { + s.Namespace = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteUserInput) SetUserName(v string) *DeleteUserInput { + s.UserName = &v + return s +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *DeleteUserOutput) SetRequestId(v string) *DeleteUserOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteUserOutput) SetStatus(v int64) *DeleteUserOutput { + s.Status = &v + return s +} + +type DescribeAccountCustomizationInput struct { + _ struct{} `type:"structure"` + + // The ID for the AWS account that you want to describe QuickSight customizations + // for. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The QuickSight namespace that you want to describe QuickSight customizations + // for. + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` + + // The Resolved flag works with the other parameters to determine which view + // of QuickSight customizations is returned. You can add this flag to your command + // to use the same view that QuickSight uses to identify which customizations + // to apply to the console. Omit this flag, or set it to no-resolved, to reveal + // customizations that are configured at different levels. + Resolved *bool `location:"querystring" locationName:"resolved" type:"boolean"` +} + +// String returns the string representation +func (s DescribeAccountCustomizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountCustomizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAccountCustomizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAccountCustomizationInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeAccountCustomizationInput) SetAwsAccountId(v string) *DescribeAccountCustomizationInput { + s.AwsAccountId = &v return s } -// SetMemberName sets the MemberName field's value. -func (s *CreateGroupMembershipInput) SetMemberName(v string) *CreateGroupMembershipInput { - s.MemberName = &v +// SetNamespace sets the Namespace field's value. +func (s *DescribeAccountCustomizationInput) SetNamespace(v string) *DescribeAccountCustomizationInput { + s.Namespace = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *CreateGroupMembershipInput) SetNamespace(v string) *CreateGroupMembershipInput { - s.Namespace = &v +// SetResolved sets the Resolved field's value. +func (s *DescribeAccountCustomizationInput) SetResolved(v bool) *DescribeAccountCustomizationInput { + s.Resolved = &v return s } -type CreateGroupMembershipOutput struct { +type DescribeAccountCustomizationOutput struct { _ struct{} `type:"structure"` - // The group member. - GroupMember *GroupMember `type:"structure"` + // The QuickSight customizations that exist in the current AWS Region. + AccountCustomization *AccountCustomization `type:"structure"` + + // The Amazon Resource Name (ARN) of the customization that's associated with + // this AWS account. + Arn *string `type:"string"` + + // The ID for the AWS account that you're describing. + AwsAccountId *string `min:"12" type:"string"` + + // The QuickSight namespace that you're describing. + Namespace *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -9164,39 +18589,104 @@ type CreateGroupMembershipOutput struct { } // String returns the string representation -func (s CreateGroupMembershipOutput) String() string { +func (s DescribeAccountCustomizationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateGroupMembershipOutput) GoString() string { +func (s DescribeAccountCustomizationOutput) GoString() string { return s.String() } -// SetGroupMember sets the GroupMember field's value. -func (s *CreateGroupMembershipOutput) SetGroupMember(v *GroupMember) *CreateGroupMembershipOutput { - s.GroupMember = v +// SetAccountCustomization sets the AccountCustomization field's value. +func (s *DescribeAccountCustomizationOutput) SetAccountCustomization(v *AccountCustomization) *DescribeAccountCustomizationOutput { + s.AccountCustomization = v + return s +} + +// SetArn sets the Arn field's value. +func (s *DescribeAccountCustomizationOutput) SetArn(v string) *DescribeAccountCustomizationOutput { + s.Arn = &v + return s +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeAccountCustomizationOutput) SetAwsAccountId(v string) *DescribeAccountCustomizationOutput { + s.AwsAccountId = &v + return s +} + +// SetNamespace sets the Namespace field's value. +func (s *DescribeAccountCustomizationOutput) SetNamespace(v string) *DescribeAccountCustomizationOutput { + s.Namespace = &v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateGroupMembershipOutput) SetRequestId(v string) *CreateGroupMembershipOutput { +func (s *DescribeAccountCustomizationOutput) SetRequestId(v string) *DescribeAccountCustomizationOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateGroupMembershipOutput) SetStatus(v int64) *CreateGroupMembershipOutput { +func (s *DescribeAccountCustomizationOutput) SetStatus(v int64) *DescribeAccountCustomizationOutput { s.Status = &v return s } -// The response object for this operation. -type CreateGroupOutput struct { +type DescribeAccountSettingsInput struct { _ struct{} `type:"structure"` - // The name of the group. - Group *Group `type:"structure"` + // The ID for the AWS account that contains the settings that you want to list. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeAccountSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeAccountSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeAccountSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAccountSettingsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeAccountSettingsInput) SetAwsAccountId(v string) *DescribeAccountSettingsInput { + s.AwsAccountId = &v + return s +} + +type DescribeAccountSettingsOutput struct { + _ struct{} `type:"structure"` + + // The QuickSight settings for this AWS account. This information includes the + // edition of Amazon QuickSight that you subscribed to (Standard or Enterprise) + // and the notification email for the QuickSight subscription. In the QuickSight + // console, the QuickSight subscription is sometimes referred to as a QuickSight + // "account" even though it's technically not an account by itself. Instead, + // it's a subscription to the QuickSight service for your AWS account. The edition + // that you subscribe to applies to QuickSight in every AWS Region where you + // use it. + AccountSettings *AccountSettings `type:"structure"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -9206,95 +18696,67 @@ type CreateGroupOutput struct { } // String returns the string representation -func (s CreateGroupOutput) String() string { +func (s DescribeAccountSettingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateGroupOutput) GoString() string { +func (s DescribeAccountSettingsOutput) GoString() string { return s.String() } -// SetGroup sets the Group field's value. -func (s *CreateGroupOutput) SetGroup(v *Group) *CreateGroupOutput { - s.Group = v +// SetAccountSettings sets the AccountSettings field's value. +func (s *DescribeAccountSettingsOutput) SetAccountSettings(v *AccountSettings) *DescribeAccountSettingsOutput { + s.AccountSettings = v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateGroupOutput) SetRequestId(v string) *CreateGroupOutput { +func (s *DescribeAccountSettingsOutput) SetRequestId(v string) *DescribeAccountSettingsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateGroupOutput) SetStatus(v int64) *CreateGroupOutput { +func (s *DescribeAccountSettingsOutput) SetStatus(v int64) *DescribeAccountSettingsOutput { s.Status = &v return s } -type CreateIAMPolicyAssignmentInput struct { +type DescribeAnalysisInput struct { _ struct{} `type:"structure"` - // The name of the assignment. It must be unique within an AWS account. - // - // AssignmentName is a required field - AssignmentName *string `min:"1" type:"string" required:"true"` - - // The status of the assignment. Possible values are as follows: - // - // * ENABLED - Anything specified in this assignment is used when creating - // the data source. - // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * DRAFT - This assignment is an unfinished draft and isn't used when creating - // the data source. + // The ID of the analysis that you're describing. The ID is part of the URL + // of the analysis. // - // AssignmentStatus is a required field - AssignmentStatus *string `type:"string" required:"true" enum:"AssignmentStatus"` + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` - // The ID of the AWS account where you want to assign an IAM policy to QuickSight - // users or groups. + // The ID of the AWS account that contains the analysis. You must be using the + // AWS account that the analysis is in. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The QuickSight users, groups, or both that you want to assign the policy - // to. - Identities map[string][]*string `type:"map"` - - // The namespace that contains the assignment. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - - // The ARN for the IAM policy to apply to the QuickSight users and groups specified - // in this assignment. - PolicyArn *string `type:"string"` } // String returns the string representation -func (s CreateIAMPolicyAssignmentInput) String() string { +func (s DescribeAnalysisInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateIAMPolicyAssignmentInput) GoString() string { +func (s DescribeAnalysisInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateIAMPolicyAssignmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateIAMPolicyAssignmentInput"} - if s.AssignmentName == nil { - invalidParams.Add(request.NewErrParamRequired("AssignmentName")) - } - if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) +func (s *DescribeAnalysisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAnalysisInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) } - if s.AssignmentStatus == nil { - invalidParams.Add(request.NewErrParamRequired("AssignmentStatus")) + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) @@ -9302,12 +18764,6 @@ func (s *CreateIAMPolicyAssignmentInput) Validate() error { if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9315,68 +18771,24 @@ func (s *CreateIAMPolicyAssignmentInput) Validate() error { return nil } -// SetAssignmentName sets the AssignmentName field's value. -func (s *CreateIAMPolicyAssignmentInput) SetAssignmentName(v string) *CreateIAMPolicyAssignmentInput { - s.AssignmentName = &v - return s -} - -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *CreateIAMPolicyAssignmentInput) SetAssignmentStatus(v string) *CreateIAMPolicyAssignmentInput { - s.AssignmentStatus = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *DescribeAnalysisInput) SetAnalysisId(v string) *DescribeAnalysisInput { + s.AnalysisId = &v return s } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateIAMPolicyAssignmentInput) SetAwsAccountId(v string) *CreateIAMPolicyAssignmentInput { +func (s *DescribeAnalysisInput) SetAwsAccountId(v string) *DescribeAnalysisInput { s.AwsAccountId = &v return s } -// SetIdentities sets the Identities field's value. -func (s *CreateIAMPolicyAssignmentInput) SetIdentities(v map[string][]*string) *CreateIAMPolicyAssignmentInput { - s.Identities = v - return s -} - -// SetNamespace sets the Namespace field's value. -func (s *CreateIAMPolicyAssignmentInput) SetNamespace(v string) *CreateIAMPolicyAssignmentInput { - s.Namespace = &v - return s -} - -// SetPolicyArn sets the PolicyArn field's value. -func (s *CreateIAMPolicyAssignmentInput) SetPolicyArn(v string) *CreateIAMPolicyAssignmentInput { - s.PolicyArn = &v - return s -} - -type CreateIAMPolicyAssignmentOutput struct { +type DescribeAnalysisOutput struct { _ struct{} `type:"structure"` - // The ID for the assignment. - AssignmentId *string `type:"string"` - - // The name of the assignment. This name must be unique within the AWS account. - AssignmentName *string `min:"1" type:"string"` - - // The status of the assignment. Possible values are as follows: - // - // * ENABLED - Anything specified in this assignment is used when creating - // the data source. - // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * DRAFT - This assignment is an unfinished draft and isn't used when creating - // the data source. - AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` - - // The QuickSight users, groups, or both that the IAM policy is assigned to. - Identities map[string][]*string `type:"map"` - - // The ARN for the IAM policy that is applied to the QuickSight users and groups - // specified in this assignment. - PolicyArn *string `type:"string"` + // A metadata structure that contains summary information for the analysis that + // you're describing. + Analysis *Analysis `type:"structure"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -9386,107 +18798,74 @@ type CreateIAMPolicyAssignmentOutput struct { } // String returns the string representation -func (s CreateIAMPolicyAssignmentOutput) String() string { +func (s DescribeAnalysisOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateIAMPolicyAssignmentOutput) GoString() string { +func (s DescribeAnalysisOutput) GoString() string { return s.String() } -// SetAssignmentId sets the AssignmentId field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetAssignmentId(v string) *CreateIAMPolicyAssignmentOutput { - s.AssignmentId = &v - return s -} - -// SetAssignmentName sets the AssignmentName field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetAssignmentName(v string) *CreateIAMPolicyAssignmentOutput { - s.AssignmentName = &v - return s -} - -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetAssignmentStatus(v string) *CreateIAMPolicyAssignmentOutput { - s.AssignmentStatus = &v - return s -} - -// SetIdentities sets the Identities field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetIdentities(v map[string][]*string) *CreateIAMPolicyAssignmentOutput { - s.Identities = v - return s -} - -// SetPolicyArn sets the PolicyArn field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetPolicyArn(v string) *CreateIAMPolicyAssignmentOutput { - s.PolicyArn = &v +// SetAnalysis sets the Analysis field's value. +func (s *DescribeAnalysisOutput) SetAnalysis(v *Analysis) *DescribeAnalysisOutput { + s.Analysis = v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetRequestId(v string) *CreateIAMPolicyAssignmentOutput { +func (s *DescribeAnalysisOutput) SetRequestId(v string) *DescribeAnalysisOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateIAMPolicyAssignmentOutput) SetStatus(v int64) *CreateIAMPolicyAssignmentOutput { +func (s *DescribeAnalysisOutput) SetStatus(v int64) *DescribeAnalysisOutput { s.Status = &v return s } -type CreateIngestionInput struct { +type DescribeAnalysisPermissionsInput struct { _ struct{} `type:"structure"` - // The AWS account ID. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The ID of the dataset used in the ingestion. + // The ID of the analysis whose permissions you're describing. The ID is part + // of the analysis URL. // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` - // An ID for the ingestion. + // The ID of the AWS account that contains the analysis whose permissions you're + // describing. You must be using the AWS account that the analysis is in. // - // IngestionId is a required field - IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` } // String returns the string representation -func (s CreateIngestionInput) String() string { +func (s DescribeAnalysisPermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateIngestionInput) GoString() string { +func (s DescribeAnalysisPermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateIngestionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateIngestionInput"} +func (s *DescribeAnalysisPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeAnalysisPermissionsInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) + } + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) + } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) - } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) - } - if s.IngestionId == nil { - invalidParams.Add(request.NewErrParamRequired("IngestionId")) - } - if s.IngestionId != nil && len(*s.IngestionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IngestionId", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -9494,35 +18873,30 @@ func (s *CreateIngestionInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateIngestionInput) SetAwsAccountId(v string) *CreateIngestionInput { - s.AwsAccountId = &v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *CreateIngestionInput) SetDataSetId(v string) *CreateIngestionInput { - s.DataSetId = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *DescribeAnalysisPermissionsInput) SetAnalysisId(v string) *DescribeAnalysisPermissionsInput { + s.AnalysisId = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *CreateIngestionInput) SetIngestionId(v string) *CreateIngestionInput { - s.IngestionId = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeAnalysisPermissionsInput) SetAwsAccountId(v string) *DescribeAnalysisPermissionsInput { + s.AwsAccountId = &v return s } -type CreateIngestionOutput struct { +type DescribeAnalysisPermissionsOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the data ingestion. - Arn *string `type:"string"` + // The Amazon Resource Name (ARN) of the analysis whose permissions you're describing. + AnalysisArn *string `type:"string"` - // An ID for the ingestion. - IngestionId *string `min:"1" type:"string"` + // The ID of the analysis whose permissions you're describing. + AnalysisId *string `min:"1" type:"string"` - // The ingestion status. - IngestionStatus *string `type:"string" enum:"IngestionStatus"` + // A structure that describes the principals and the resource-level permissions + // on an analysis. + Permissions []*ResourcePermission `min:"1" type:"list"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -9532,88 +18906,79 @@ type CreateIngestionOutput struct { } // String returns the string representation -func (s CreateIngestionOutput) String() string { +func (s DescribeAnalysisPermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateIngestionOutput) GoString() string { +func (s DescribeAnalysisPermissionsOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *CreateIngestionOutput) SetArn(v string) *CreateIngestionOutput { - s.Arn = &v +// SetAnalysisArn sets the AnalysisArn field's value. +func (s *DescribeAnalysisPermissionsOutput) SetAnalysisArn(v string) *DescribeAnalysisPermissionsOutput { + s.AnalysisArn = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *CreateIngestionOutput) SetIngestionId(v string) *CreateIngestionOutput { - s.IngestionId = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *DescribeAnalysisPermissionsOutput) SetAnalysisId(v string) *DescribeAnalysisPermissionsOutput { + s.AnalysisId = &v return s } -// SetIngestionStatus sets the IngestionStatus field's value. -func (s *CreateIngestionOutput) SetIngestionStatus(v string) *CreateIngestionOutput { - s.IngestionStatus = &v +// SetPermissions sets the Permissions field's value. +func (s *DescribeAnalysisPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeAnalysisPermissionsOutput { + s.Permissions = v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateIngestionOutput) SetRequestId(v string) *CreateIngestionOutput { +func (s *DescribeAnalysisPermissionsOutput) SetRequestId(v string) *DescribeAnalysisPermissionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateIngestionOutput) SetStatus(v int64) *CreateIngestionOutput { +func (s *DescribeAnalysisPermissionsOutput) SetStatus(v int64) *DescribeAnalysisPermissionsOutput { s.Status = &v return s } -type CreateTemplateAliasInput struct { +type DescribeDashboardInput struct { _ struct{} `type:"structure"` - // The name that you want to give to the template alias that you're creating. - // Don't start the alias name with the $ character. Alias names that start with - // $ are reserved by QuickSight. - // - // AliasName is a required field - AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + // The alias name. + AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` - // The ID of the AWS account that contains the template that you creating an - // alias for. + // The ID of the AWS account that contains the dashboard that you're describing. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // An ID for the template. + // The ID for the dashboard. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` - // The version number of the template. - // - // TemplateVersionNumber is a required field - TemplateVersionNumber *int64 `min:"1" type:"long" required:"true"` + // The version number for the dashboard. If a version number isn't passed, the + // latest published dashboard version is described. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` } // String returns the string representation -func (s CreateTemplateAliasInput) String() string { +func (s DescribeDashboardInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTemplateAliasInput) GoString() string { +func (s DescribeDashboardInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTemplateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTemplateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } +func (s *DescribeDashboardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDashboardInput"} if s.AliasName != nil && len(*s.AliasName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) } @@ -9623,17 +18988,14 @@ func (s *CreateTemplateAliasInput) Validate() error { if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) - } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.TemplateVersionNumber == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateVersionNumber")) + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) } - if s.TemplateVersionNumber != nil && *s.TemplateVersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("TemplateVersionNumber", 1)) + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -9643,173 +19005,109 @@ func (s *CreateTemplateAliasInput) Validate() error { } // SetAliasName sets the AliasName field's value. -func (s *CreateTemplateAliasInput) SetAliasName(v string) *CreateTemplateAliasInput { +func (s *DescribeDashboardInput) SetAliasName(v string) *DescribeDashboardInput { s.AliasName = &v return s } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateTemplateAliasInput) SetAwsAccountId(v string) *CreateTemplateAliasInput { +func (s *DescribeDashboardInput) SetAwsAccountId(v string) *DescribeDashboardInput { s.AwsAccountId = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *CreateTemplateAliasInput) SetTemplateId(v string) *CreateTemplateAliasInput { - s.TemplateId = &v +// SetDashboardId sets the DashboardId field's value. +func (s *DescribeDashboardInput) SetDashboardId(v string) *DescribeDashboardInput { + s.DashboardId = &v return s } -// SetTemplateVersionNumber sets the TemplateVersionNumber field's value. -func (s *CreateTemplateAliasInput) SetTemplateVersionNumber(v int64) *CreateTemplateAliasInput { - s.TemplateVersionNumber = &v +// SetVersionNumber sets the VersionNumber field's value. +func (s *DescribeDashboardInput) SetVersionNumber(v int64) *DescribeDashboardInput { + s.VersionNumber = &v return s } -type CreateTemplateAliasOutput struct { +type DescribeDashboardOutput struct { _ struct{} `type:"structure"` + // Information about the dashboard. + Dashboard *Dashboard `type:"structure"` + // The AWS request ID for this operation. RequestId *string `type:"string"` - // The HTTP status of the request. + // The HTTP status of this request. Status *int64 `location:"statusCode" type:"integer"` - - // Information about the template alias. - TemplateAlias *TemplateAlias `type:"structure"` } // String returns the string representation -func (s CreateTemplateAliasOutput) String() string { +func (s DescribeDashboardOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTemplateAliasOutput) GoString() string { +func (s DescribeDashboardOutput) GoString() string { return s.String() } +// SetDashboard sets the Dashboard field's value. +func (s *DescribeDashboardOutput) SetDashboard(v *Dashboard) *DescribeDashboardOutput { + s.Dashboard = v + return s +} + // SetRequestId sets the RequestId field's value. -func (s *CreateTemplateAliasOutput) SetRequestId(v string) *CreateTemplateAliasOutput { +func (s *DescribeDashboardOutput) SetRequestId(v string) *DescribeDashboardOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateTemplateAliasOutput) SetStatus(v int64) *CreateTemplateAliasOutput { +func (s *DescribeDashboardOutput) SetStatus(v int64) *DescribeDashboardOutput { s.Status = &v return s } -// SetTemplateAlias sets the TemplateAlias field's value. -func (s *CreateTemplateAliasOutput) SetTemplateAlias(v *TemplateAlias) *CreateTemplateAliasOutput { - s.TemplateAlias = v - return s -} - -type CreateTemplateInput struct { +type DescribeDashboardPermissionsInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // The ID of the AWS account that contains the dashboard that you're describing + // permissions for. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // A display name for the template. - Name *string `min:"1" type:"string"` - - // A list of resource permissions to be set on the template. - Permissions []*ResourcePermission `min:"1" type:"list"` - - // The Amazon Resource Name (ARN) of the source entity from which this template - // is being created. Currently, you can create a template from an analysis or - // another template. If the ARN is for an analysis, include its dataset references. - // - // SourceEntity is a required field - SourceEntity *TemplateSourceEntity `type:"structure" required:"true"` - - // Contains a map of the key-value pairs for the resource tag or tags assigned - // to the resource. - Tags []*Tag `min:"1" type:"list"` - - // An ID for the template that you want to create. This template is unique per - // AWS Region in each AWS account. + // The ID for the dashboard, also added to the IAM policy. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` - - // A description of the current template version being created. This API operation - // creates the first version of the template. Every time UpdateTemplate is called, - // a new version is created. Each version of the template maintains a description - // of the version in the VersionDescription field. - VersionDescription *string `min:"1" type:"string"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s CreateTemplateInput) String() string { +func (s DescribeDashboardPermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTemplateInput) GoString() string { +func (s DescribeDashboardPermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateTemplateInput"} +func (s *DescribeDashboardPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDashboardPermissionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Permissions != nil && len(s.Permissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Permissions", 1)) - } - if s.SourceEntity == nil { - invalidParams.Add(request.NewErrParamRequired("SourceEntity")) - } - if s.Tags != nil && len(s.Tags) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) - } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) - } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) - } - if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) - } - if s.Permissions != nil { - for i, v := range s.Permissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Permissions", i), err.(request.ErrInvalidParams)) - } - } - } - if s.SourceEntity != nil { - if err := s.SourceEntity.Validate(); err != nil { - invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) - } + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) } if invalidParams.Len() > 0 { @@ -9819,155 +19117,115 @@ func (s *CreateTemplateInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *CreateTemplateInput) SetAwsAccountId(v string) *CreateTemplateInput { +func (s *DescribeDashboardPermissionsInput) SetAwsAccountId(v string) *DescribeDashboardPermissionsInput { s.AwsAccountId = &v return s } -// SetName sets the Name field's value. -func (s *CreateTemplateInput) SetName(v string) *CreateTemplateInput { - s.Name = &v - return s -} - -// SetPermissions sets the Permissions field's value. -func (s *CreateTemplateInput) SetPermissions(v []*ResourcePermission) *CreateTemplateInput { - s.Permissions = v - return s -} - -// SetSourceEntity sets the SourceEntity field's value. -func (s *CreateTemplateInput) SetSourceEntity(v *TemplateSourceEntity) *CreateTemplateInput { - s.SourceEntity = v - return s -} - -// SetTags sets the Tags field's value. -func (s *CreateTemplateInput) SetTags(v []*Tag) *CreateTemplateInput { - s.Tags = v - return s -} - -// SetTemplateId sets the TemplateId field's value. -func (s *CreateTemplateInput) SetTemplateId(v string) *CreateTemplateInput { - s.TemplateId = &v - return s -} - -// SetVersionDescription sets the VersionDescription field's value. -func (s *CreateTemplateInput) SetVersionDescription(v string) *CreateTemplateInput { - s.VersionDescription = &v +// SetDashboardId sets the DashboardId field's value. +func (s *DescribeDashboardPermissionsInput) SetDashboardId(v string) *DescribeDashboardPermissionsInput { + s.DashboardId = &v return s } -type CreateTemplateOutput struct { +type DescribeDashboardPermissionsOutput struct { _ struct{} `type:"structure"` - // The ARN for the template. - Arn *string `type:"string"` + // The Amazon Resource Name (ARN) of the dashboard. + DashboardArn *string `type:"string"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` - // The template creation status. - CreationStatus *string `type:"string" enum:"ResourceStatus"` + // A structure that contains the permissions for the dashboard. + Permissions []*ResourcePermission `min:"1" type:"list"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // The ID of the template. - TemplateId *string `min:"1" type:"string"` - - // The ARN for the template, including the version information of the first - // version. - VersionArn *string `type:"string"` } // String returns the string representation -func (s CreateTemplateOutput) String() string { +func (s DescribeDashboardPermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTemplateOutput) GoString() string { +func (s DescribeDashboardPermissionsOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *CreateTemplateOutput) SetArn(v string) *CreateTemplateOutput { - s.Arn = &v +// SetDashboardArn sets the DashboardArn field's value. +func (s *DescribeDashboardPermissionsOutput) SetDashboardArn(v string) *DescribeDashboardPermissionsOutput { + s.DashboardArn = &v return s } -// SetCreationStatus sets the CreationStatus field's value. -func (s *CreateTemplateOutput) SetCreationStatus(v string) *CreateTemplateOutput { - s.CreationStatus = &v +// SetDashboardId sets the DashboardId field's value. +func (s *DescribeDashboardPermissionsOutput) SetDashboardId(v string) *DescribeDashboardPermissionsOutput { + s.DashboardId = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *DescribeDashboardPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeDashboardPermissionsOutput { + s.Permissions = v return s } // SetRequestId sets the RequestId field's value. -func (s *CreateTemplateOutput) SetRequestId(v string) *CreateTemplateOutput { +func (s *DescribeDashboardPermissionsOutput) SetRequestId(v string) *DescribeDashboardPermissionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *CreateTemplateOutput) SetStatus(v int64) *CreateTemplateOutput { +func (s *DescribeDashboardPermissionsOutput) SetStatus(v int64) *DescribeDashboardPermissionsOutput { s.Status = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *CreateTemplateOutput) SetTemplateId(v string) *CreateTemplateOutput { - s.TemplateId = &v - return s -} - -// SetVersionArn sets the VersionArn field's value. -func (s *CreateTemplateOutput) SetVersionArn(v string) *CreateTemplateOutput { - s.VersionArn = &v - return s -} - -// The combination of user name and password that are used as credentials. -type CredentialPair struct { +type DescribeDataSetInput struct { _ struct{} `type:"structure"` - // Password. + // The AWS account ID. // - // Password is a required field - Password *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // User name. + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. // - // Username is a required field - Username *string `min:"1" type:"string" required:"true"` + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` } // String returns the string representation -func (s CredentialPair) String() string { +func (s DescribeDataSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CredentialPair) GoString() string { +func (s DescribeDataSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CredentialPair) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CredentialPair"} - if s.Password == nil { - invalidParams.Add(request.NewErrParamRequired("Password")) +func (s *DescribeDataSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataSetInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Password != nil && len(*s.Password) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Username == nil { - invalidParams.Add(request.NewErrParamRequired("Username")) + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) } - if s.Username != nil && len(*s.Username) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Username", 1)) + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) } if invalidParams.Len() > 0 { @@ -9976,81 +19234,98 @@ func (s *CredentialPair) Validate() error { return nil } -// SetPassword sets the Password field's value. -func (s *CredentialPair) SetPassword(v string) *CredentialPair { - s.Password = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeDataSetInput) SetAwsAccountId(v string) *DescribeDataSetInput { + s.AwsAccountId = &v return s } -// SetUsername sets the Username field's value. -func (s *CredentialPair) SetUsername(v string) *CredentialPair { - s.Username = &v +// SetDataSetId sets the DataSetId field's value. +func (s *DescribeDataSetInput) SetDataSetId(v string) *DescribeDataSetInput { + s.DataSetId = &v return s } -// A physical table type built from the results of the custom SQL query. -type CustomSql struct { +type DescribeDataSetOutput struct { _ struct{} `type:"structure"` - // The column schema from the SQL query result set. - Columns []*InputColumn `min:"1" type:"list"` + // Information on the dataset. + DataSet *DataSet `type:"structure"` - // The Amazon Resource Name (ARN) of the data source. - // - // DataSourceArn is a required field - DataSourceArn *string `type:"string" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // A display name for the SQL query result. + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s DescribeDataSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDataSetOutput) GoString() string { + return s.String() +} + +// SetDataSet sets the DataSet field's value. +func (s *DescribeDataSetOutput) SetDataSet(v *DataSet) *DescribeDataSetOutput { + s.DataSet = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DescribeDataSetOutput) SetRequestId(v string) *DescribeDataSetOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeDataSetOutput) SetStatus(v int64) *DescribeDataSetOutput { + s.Status = &v + return s +} + +type DescribeDataSetPermissionsInput struct { + _ struct{} `type:"structure"` + + // The AWS account ID. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The SQL query. + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. // - // SqlQuery is a required field - SqlQuery *string `min:"1" type:"string" required:"true"` + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` } // String returns the string representation -func (s CustomSql) String() string { +func (s DescribeDataSetPermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CustomSql) GoString() string { +func (s DescribeDataSetPermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CustomSql) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomSql"} - if s.Columns != nil && len(s.Columns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Columns", 1)) - } - if s.DataSourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceArn")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *DescribeDataSetPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataSetPermissionsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.SqlQuery == nil { - invalidParams.Add(request.NewErrParamRequired("SqlQuery")) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.SqlQuery != nil && len(*s.SqlQuery) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SqlQuery", 1)) + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) } - if s.Columns != nil { - for i, v := range s.Columns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Columns", i), err.(request.ErrInvalidParams)) - } - } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) } if invalidParams.Len() > 0 { @@ -10059,217 +19334,217 @@ func (s *CustomSql) Validate() error { return nil } -// SetColumns sets the Columns field's value. -func (s *CustomSql) SetColumns(v []*InputColumn) *CustomSql { - s.Columns = v - return s -} - -// SetDataSourceArn sets the DataSourceArn field's value. -func (s *CustomSql) SetDataSourceArn(v string) *CustomSql { - s.DataSourceArn = &v - return s -} - -// SetName sets the Name field's value. -func (s *CustomSql) SetName(v string) *CustomSql { - s.Name = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeDataSetPermissionsInput) SetAwsAccountId(v string) *DescribeDataSetPermissionsInput { + s.AwsAccountId = &v return s } -// SetSqlQuery sets the SqlQuery field's value. -func (s *CustomSql) SetSqlQuery(v string) *CustomSql { - s.SqlQuery = &v +// SetDataSetId sets the DataSetId field's value. +func (s *DescribeDataSetPermissionsInput) SetDataSetId(v string) *DescribeDataSetPermissionsInput { + s.DataSetId = &v return s } -// Dashboard. -type Dashboard struct { +type DescribeDataSetPermissionsOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` - - // The time that this dataset was created. - CreatedTime *time.Time `type:"timestamp"` - - // Dashboard ID. - DashboardId *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the dataset. + DataSetArn *string `type:"string"` - // The last time that this dataset was published. - LastPublishedTime *time.Time `type:"timestamp"` + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. + DataSetId *string `type:"string"` - // The last time that this dataset was updated. - LastUpdatedTime *time.Time `type:"timestamp"` + // A list of resource permissions on the dataset. + Permissions []*ResourcePermission `min:"1" type:"list"` - // A display name for the dataset. - Name *string `min:"1" type:"string"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Version. - Version *DashboardVersion `type:"structure"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s Dashboard) String() string { +func (s DescribeDataSetPermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Dashboard) GoString() string { +func (s DescribeDataSetPermissionsOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Dashboard) SetArn(v string) *Dashboard { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *Dashboard) SetCreatedTime(v time.Time) *Dashboard { - s.CreatedTime = &v - return s -} - -// SetDashboardId sets the DashboardId field's value. -func (s *Dashboard) SetDashboardId(v string) *Dashboard { - s.DashboardId = &v +// SetDataSetArn sets the DataSetArn field's value. +func (s *DescribeDataSetPermissionsOutput) SetDataSetArn(v string) *DescribeDataSetPermissionsOutput { + s.DataSetArn = &v return s } -// SetLastPublishedTime sets the LastPublishedTime field's value. -func (s *Dashboard) SetLastPublishedTime(v time.Time) *Dashboard { - s.LastPublishedTime = &v +// SetDataSetId sets the DataSetId field's value. +func (s *DescribeDataSetPermissionsOutput) SetDataSetId(v string) *DescribeDataSetPermissionsOutput { + s.DataSetId = &v return s } -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *Dashboard) SetLastUpdatedTime(v time.Time) *Dashboard { - s.LastUpdatedTime = &v +// SetPermissions sets the Permissions field's value. +func (s *DescribeDataSetPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeDataSetPermissionsOutput { + s.Permissions = v return s } -// SetName sets the Name field's value. -func (s *Dashboard) SetName(v string) *Dashboard { - s.Name = &v +// SetRequestId sets the RequestId field's value. +func (s *DescribeDataSetPermissionsOutput) SetRequestId(v string) *DescribeDataSetPermissionsOutput { + s.RequestId = &v return s } -// SetVersion sets the Version field's value. -func (s *Dashboard) SetVersion(v *DashboardVersion) *Dashboard { - s.Version = v +// SetStatus sets the Status field's value. +func (s *DescribeDataSetPermissionsOutput) SetStatus(v int64) *DescribeDataSetPermissionsOutput { + s.Status = &v return s } -// Dashboard error. -type DashboardError struct { +type DescribeDataSourceInput struct { _ struct{} `type:"structure"` - // Message. - Message *string `type:"string"` + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Type. - Type *string `type:"string" enum:"DashboardErrorType"` + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` } // String returns the string representation -func (s DashboardError) String() string { +func (s DescribeDataSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardError) GoString() string { +func (s DescribeDataSourceInput) GoString() string { return s.String() } -// SetMessage sets the Message field's value. -func (s *DashboardError) SetMessage(v string) *DashboardError { - s.Message = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataSourceInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeDataSourceInput) SetAwsAccountId(v string) *DescribeDataSourceInput { + s.AwsAccountId = &v return s } -// SetType sets the Type field's value. -func (s *DashboardError) SetType(v string) *DashboardError { - s.Type = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *DescribeDataSourceInput) SetDataSourceId(v string) *DescribeDataSourceInput { + s.DataSourceId = &v return s } -// Dashboard publish options. -type DashboardPublishOptions struct { +type DescribeDataSourceOutput struct { _ struct{} `type:"structure"` - // Ad hoc (one-time) filtering option. - AdHocFilteringOption *AdHocFilteringOption `type:"structure"` + // The information on the data source. + DataSource *DataSource `type:"structure"` - // Export to .csv option. - ExportToCSVOption *ExportToCSVOption `type:"structure"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Sheet controls option. - SheetControlsOption *SheetControlsOption `type:"structure"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DashboardPublishOptions) String() string { +func (s DescribeDataSourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardPublishOptions) GoString() string { +func (s DescribeDataSourceOutput) GoString() string { return s.String() } -// SetAdHocFilteringOption sets the AdHocFilteringOption field's value. -func (s *DashboardPublishOptions) SetAdHocFilteringOption(v *AdHocFilteringOption) *DashboardPublishOptions { - s.AdHocFilteringOption = v +// SetDataSource sets the DataSource field's value. +func (s *DescribeDataSourceOutput) SetDataSource(v *DataSource) *DescribeDataSourceOutput { + s.DataSource = v return s } -// SetExportToCSVOption sets the ExportToCSVOption field's value. -func (s *DashboardPublishOptions) SetExportToCSVOption(v *ExportToCSVOption) *DashboardPublishOptions { - s.ExportToCSVOption = v +// SetRequestId sets the RequestId field's value. +func (s *DescribeDataSourceOutput) SetRequestId(v string) *DescribeDataSourceOutput { + s.RequestId = &v return s } -// SetSheetControlsOption sets the SheetControlsOption field's value. -func (s *DashboardPublishOptions) SetSheetControlsOption(v *SheetControlsOption) *DashboardPublishOptions { - s.SheetControlsOption = v +// SetStatus sets the Status field's value. +func (s *DescribeDataSourceOutput) SetStatus(v int64) *DescribeDataSourceOutput { + s.Status = &v return s } -// A filter that you apply when searching for dashboards. -type DashboardSearchFilter struct { +type DescribeDataSourcePermissionsInput struct { _ struct{} `type:"structure"` - // The name of the value that you want to use as a filter. For example, "Name": - // "QUICKSIGHT_USER". - Name *string `type:"string" enum:"DashboardFilterAttribute"` - - // The comparison operator that you want to use as a filter. For example, "Operator": - // "StringEquals". + // The AWS account ID. // - // Operator is a required field - Operator *string `type:"string" required:"true" enum:"FilterOperator"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The value of the named item, in this case QUICKSIGHT_USER, that you want - // to use as a filter. For example, "Value": "arn:aws:quicksight:us-east-1:1:user/default/UserName1". - Value *string `type:"string"` + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + // + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` } // String returns the string representation -func (s DashboardSearchFilter) String() string { +func (s DescribeDataSourcePermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardSearchFilter) GoString() string { +func (s DescribeDataSourcePermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DashboardSearchFilter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DashboardSearchFilter"} - if s.Operator == nil { - invalidParams.Add(request.NewErrParamRequired("Operator")) +func (s *DescribeDataSourcePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDataSourcePermissionsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + } + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } if invalidParams.Len() > 0 { @@ -10278,109 +19553,128 @@ func (s *DashboardSearchFilter) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DashboardSearchFilter) SetName(v string) *DashboardSearchFilter { - s.Name = &v - return s -} - -// SetOperator sets the Operator field's value. -func (s *DashboardSearchFilter) SetOperator(v string) *DashboardSearchFilter { - s.Operator = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeDataSourcePermissionsInput) SetAwsAccountId(v string) *DescribeDataSourcePermissionsInput { + s.AwsAccountId = &v return s } -// SetValue sets the Value field's value. -func (s *DashboardSearchFilter) SetValue(v string) *DashboardSearchFilter { - s.Value = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *DescribeDataSourcePermissionsInput) SetDataSourceId(v string) *DescribeDataSourcePermissionsInput { + s.DataSourceId = &v return s } -// Dashboard source entity. -type DashboardSourceEntity struct { +type DescribeDataSourcePermissionsOutput struct { _ struct{} `type:"structure"` - // Source template. - SourceTemplate *DashboardSourceTemplate `type:"structure"` + // The Amazon Resource Name (ARN) of the data source. + DataSourceArn *string `type:"string"` + + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + DataSourceId *string `type:"string"` + + // A list of resource permissions on the data source. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DashboardSourceEntity) String() string { +func (s DescribeDataSourcePermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardSourceEntity) GoString() string { +func (s DescribeDataSourcePermissionsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DashboardSourceEntity) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DashboardSourceEntity"} - if s.SourceTemplate != nil { - if err := s.SourceTemplate.Validate(); err != nil { - invalidParams.AddNested("SourceTemplate", err.(request.ErrInvalidParams)) - } - } +// SetDataSourceArn sets the DataSourceArn field's value. +func (s *DescribeDataSourcePermissionsOutput) SetDataSourceArn(v string) *DescribeDataSourcePermissionsOutput { + s.DataSourceArn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDataSourceId sets the DataSourceId field's value. +func (s *DescribeDataSourcePermissionsOutput) SetDataSourceId(v string) *DescribeDataSourcePermissionsOutput { + s.DataSourceId = &v + return s +} + +// SetPermissions sets the Permissions field's value. +func (s *DescribeDataSourcePermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeDataSourcePermissionsOutput { + s.Permissions = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *DescribeDataSourcePermissionsOutput) SetRequestId(v string) *DescribeDataSourcePermissionsOutput { + s.RequestId = &v + return s } -// SetSourceTemplate sets the SourceTemplate field's value. -func (s *DashboardSourceEntity) SetSourceTemplate(v *DashboardSourceTemplate) *DashboardSourceEntity { - s.SourceTemplate = v +// SetStatus sets the Status field's value. +func (s *DescribeDataSourcePermissionsOutput) SetStatus(v int64) *DescribeDataSourcePermissionsOutput { + s.Status = &v return s } -// Dashboard source template. -type DashboardSourceTemplate struct { +type DescribeGroupInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // - // Arn is a required field - Arn *string `type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Dataset references. + // The name of the group that you want to describe. // - // DataSetReferences is a required field - DataSetReferences []*DataSetReference `min:"1" type:"list" required:"true"` + // GroupName is a required field + GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s DashboardSourceTemplate) String() string { +func (s DescribeGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardSourceTemplate) GoString() string { +func (s DescribeGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DashboardSourceTemplate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DashboardSourceTemplate"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) +func (s *DescribeGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGroupInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.DataSetReferences == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetReferences")) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetReferences", 1)) + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) } - if s.DataSetReferences != nil { - for i, v := range s.DataSetReferences { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(request.ErrInvalidParams)) - } - } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -10389,445 +19683,336 @@ func (s *DashboardSourceTemplate) Validate() error { return nil } -// SetArn sets the Arn field's value. -func (s *DashboardSourceTemplate) SetArn(v string) *DashboardSourceTemplate { - s.Arn = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeGroupInput) SetAwsAccountId(v string) *DescribeGroupInput { + s.AwsAccountId = &v return s } -// SetDataSetReferences sets the DataSetReferences field's value. -func (s *DashboardSourceTemplate) SetDataSetReferences(v []*DataSetReference) *DashboardSourceTemplate { - s.DataSetReferences = v +// SetGroupName sets the GroupName field's value. +func (s *DescribeGroupInput) SetGroupName(v string) *DescribeGroupInput { + s.GroupName = &v return s } -// Dashboard summary. -type DashboardSummary struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` - - // The time that this dashboard was created. - CreatedTime *time.Time `type:"timestamp"` - - // Dashboard ID. - DashboardId *string `min:"1" type:"string"` +// SetNamespace sets the Namespace field's value. +func (s *DescribeGroupInput) SetNamespace(v string) *DescribeGroupInput { + s.Namespace = &v + return s +} - // The last time that this dashboard was published. - LastPublishedTime *time.Time `type:"timestamp"` +type DescribeGroupOutput struct { + _ struct{} `type:"structure"` - // The last time that this dashboard was updated. - LastUpdatedTime *time.Time `type:"timestamp"` + // The name of the group. + Group *Group `type:"structure"` - // A display name for the dashboard. - Name *string `min:"1" type:"string"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Published version number. - PublishedVersionNumber *int64 `min:"1" type:"long"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DashboardSummary) String() string { +func (s DescribeGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardSummary) GoString() string { +func (s DescribeGroupOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DashboardSummary) SetArn(v string) *DashboardSummary { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *DashboardSummary) SetCreatedTime(v time.Time) *DashboardSummary { - s.CreatedTime = &v - return s -} - -// SetDashboardId sets the DashboardId field's value. -func (s *DashboardSummary) SetDashboardId(v string) *DashboardSummary { - s.DashboardId = &v - return s -} - -// SetLastPublishedTime sets the LastPublishedTime field's value. -func (s *DashboardSummary) SetLastPublishedTime(v time.Time) *DashboardSummary { - s.LastPublishedTime = &v - return s -} - -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *DashboardSummary) SetLastUpdatedTime(v time.Time) *DashboardSummary { - s.LastUpdatedTime = &v +// SetGroup sets the Group field's value. +func (s *DescribeGroupOutput) SetGroup(v *Group) *DescribeGroupOutput { + s.Group = v return s } -// SetName sets the Name field's value. -func (s *DashboardSummary) SetName(v string) *DashboardSummary { - s.Name = &v +// SetRequestId sets the RequestId field's value. +func (s *DescribeGroupOutput) SetRequestId(v string) *DescribeGroupOutput { + s.RequestId = &v return s } -// SetPublishedVersionNumber sets the PublishedVersionNumber field's value. -func (s *DashboardSummary) SetPublishedVersionNumber(v int64) *DashboardSummary { - s.PublishedVersionNumber = &v +// SetStatus sets the Status field's value. +func (s *DescribeGroupOutput) SetStatus(v int64) *DescribeGroupOutput { + s.Status = &v return s } -// Dashboard version. -type DashboardVersion struct { +type DescribeIAMPolicyAssignmentInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` - - // The time that this dashboard version was created. - CreatedTime *time.Time `type:"timestamp"` - - // Description. - Description *string `min:"1" type:"string"` - - // Errors. - Errors []*DashboardError `min:"1" type:"list"` - - // Source entity ARN. - SourceEntityArn *string `type:"string"` + // The name of the assignment. + // + // AssignmentName is a required field + AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` - // The HTTP status of the request. - Status *string `type:"string" enum:"ResourceStatus"` + // The ID of the AWS account that contains the assignment that you want to describe. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Version number. - VersionNumber *int64 `min:"1" type:"long"` + // The namespace that contains the assignment. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s DashboardVersion) String() string { +func (s DescribeIAMPolicyAssignmentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardVersion) GoString() string { +func (s DescribeIAMPolicyAssignmentInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DashboardVersion) SetArn(v string) *DashboardVersion { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *DashboardVersion) SetCreatedTime(v time.Time) *DashboardVersion { - s.CreatedTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *DashboardVersion) SetDescription(v string) *DashboardVersion { - s.Description = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIAMPolicyAssignmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIAMPolicyAssignmentInput"} + if s.AssignmentName == nil { + invalidParams.Add(request.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } -// SetErrors sets the Errors field's value. -func (s *DashboardVersion) SetErrors(v []*DashboardError) *DashboardVersion { - s.Errors = v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetSourceEntityArn sets the SourceEntityArn field's value. -func (s *DashboardVersion) SetSourceEntityArn(v string) *DashboardVersion { - s.SourceEntityArn = &v +// SetAssignmentName sets the AssignmentName field's value. +func (s *DescribeIAMPolicyAssignmentInput) SetAssignmentName(v string) *DescribeIAMPolicyAssignmentInput { + s.AssignmentName = &v return s } -// SetStatus sets the Status field's value. -func (s *DashboardVersion) SetStatus(v string) *DashboardVersion { - s.Status = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeIAMPolicyAssignmentInput) SetAwsAccountId(v string) *DescribeIAMPolicyAssignmentInput { + s.AwsAccountId = &v return s -} - -// SetVersionNumber sets the VersionNumber field's value. -func (s *DashboardVersion) SetVersionNumber(v int64) *DashboardVersion { - s.VersionNumber = &v +} + +// SetNamespace sets the Namespace field's value. +func (s *DescribeIAMPolicyAssignmentInput) SetNamespace(v string) *DescribeIAMPolicyAssignmentInput { + s.Namespace = &v return s } -// Dashboard version summary. -type DashboardVersionSummary struct { +type DescribeIAMPolicyAssignmentOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` - - // The time that this dashboard version was created. - CreatedTime *time.Time `type:"timestamp"` - - // Description. - Description *string `min:"1" type:"string"` + // Information describing the IAM policy assignment. + IAMPolicyAssignment *IAMPolicyAssignment `type:"structure"` - // Source entity ARN. - SourceEntityArn *string `type:"string"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` // The HTTP status of the request. - Status *string `type:"string" enum:"ResourceStatus"` - - // Version number. - VersionNumber *int64 `min:"1" type:"long"` + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DashboardVersionSummary) String() string { +func (s DescribeIAMPolicyAssignmentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DashboardVersionSummary) GoString() string { +func (s DescribeIAMPolicyAssignmentOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DashboardVersionSummary) SetArn(v string) *DashboardVersionSummary { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *DashboardVersionSummary) SetCreatedTime(v time.Time) *DashboardVersionSummary { - s.CreatedTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *DashboardVersionSummary) SetDescription(v string) *DashboardVersionSummary { - s.Description = &v +// SetIAMPolicyAssignment sets the IAMPolicyAssignment field's value. +func (s *DescribeIAMPolicyAssignmentOutput) SetIAMPolicyAssignment(v *IAMPolicyAssignment) *DescribeIAMPolicyAssignmentOutput { + s.IAMPolicyAssignment = v return s } -// SetSourceEntityArn sets the SourceEntityArn field's value. -func (s *DashboardVersionSummary) SetSourceEntityArn(v string) *DashboardVersionSummary { - s.SourceEntityArn = &v +// SetRequestId sets the RequestId field's value. +func (s *DescribeIAMPolicyAssignmentOutput) SetRequestId(v string) *DescribeIAMPolicyAssignmentOutput { + s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DashboardVersionSummary) SetStatus(v string) *DashboardVersionSummary { +func (s *DescribeIAMPolicyAssignmentOutput) SetStatus(v int64) *DescribeIAMPolicyAssignmentOutput { s.Status = &v return s } -// SetVersionNumber sets the VersionNumber field's value. -func (s *DashboardVersionSummary) SetVersionNumber(v int64) *DashboardVersionSummary { - s.VersionNumber = &v - return s -} - -// Dataset. -type DataSet struct { +type DescribeIngestionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` - - // Groupings of columns that work together in certain Amazon QuickSight features. - // Currently, only geospatial hierarchy is supported. - ColumnGroups []*ColumnGroup `min:"1" type:"list"` - - // The amount of SPICE capacity used by this dataset. This is 0 if the dataset - // isn't imported into SPICE. - ConsumedSpiceCapacityInBytes *int64 `type:"long"` - - // The time that this dataset was created. - CreatedTime *time.Time `type:"timestamp"` - - // The ID of the dataset. - DataSetId *string `type:"string"` - - // Indicates whether you want to import the data into SPICE. - ImportMode *string `type:"string" enum:"DataSetImportMode"` - - // The last time that this dataset was updated. - LastUpdatedTime *time.Time `type:"timestamp"` - - // Configures the combination and transformation of the data from the physical - // tables. - LogicalTableMap map[string]*LogicalTable `min:"1" type:"map"` - - // A display name for the dataset. - Name *string `min:"1" type:"string"` - - // The list of columns after all transforms. These columns are available in - // templates, analyses, and dashboards. - OutputColumns []*OutputColumn `type:"list"` + // The AWS account ID. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Declares the physical tables that are available in the underlying data sources. - PhysicalTableMap map[string]*PhysicalTable `min:"1" type:"map"` + // The ID of the dataset used in the ingestion. + // + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` - // The row-level security configuration for the dataset. - RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` + // An ID for the ingestion. + // + // IngestionId is a required field + IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DataSet) String() string { +func (s DescribeIngestionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSet) GoString() string { +func (s DescribeIngestionInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DataSet) SetArn(v string) *DataSet { - s.Arn = &v - return s -} - -// SetColumnGroups sets the ColumnGroups field's value. -func (s *DataSet) SetColumnGroups(v []*ColumnGroup) *DataSet { - s.ColumnGroups = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIngestionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIngestionInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.IngestionId == nil { + invalidParams.Add(request.NewErrParamRequired("IngestionId")) + } + if s.IngestionId != nil && len(*s.IngestionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("IngestionId", 1)) + } -// SetConsumedSpiceCapacityInBytes sets the ConsumedSpiceCapacityInBytes field's value. -func (s *DataSet) SetConsumedSpiceCapacityInBytes(v int64) *DataSet { - s.ConsumedSpiceCapacityInBytes = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCreatedTime sets the CreatedTime field's value. -func (s *DataSet) SetCreatedTime(v time.Time) *DataSet { - s.CreatedTime = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeIngestionInput) SetAwsAccountId(v string) *DescribeIngestionInput { + s.AwsAccountId = &v return s } // SetDataSetId sets the DataSetId field's value. -func (s *DataSet) SetDataSetId(v string) *DataSet { +func (s *DescribeIngestionInput) SetDataSetId(v string) *DescribeIngestionInput { s.DataSetId = &v return s } -// SetImportMode sets the ImportMode field's value. -func (s *DataSet) SetImportMode(v string) *DataSet { - s.ImportMode = &v - return s -} - -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *DataSet) SetLastUpdatedTime(v time.Time) *DataSet { - s.LastUpdatedTime = &v - return s -} - -// SetLogicalTableMap sets the LogicalTableMap field's value. -func (s *DataSet) SetLogicalTableMap(v map[string]*LogicalTable) *DataSet { - s.LogicalTableMap = v - return s -} - -// SetName sets the Name field's value. -func (s *DataSet) SetName(v string) *DataSet { - s.Name = &v - return s -} - -// SetOutputColumns sets the OutputColumns field's value. -func (s *DataSet) SetOutputColumns(v []*OutputColumn) *DataSet { - s.OutputColumns = v - return s -} - -// SetPhysicalTableMap sets the PhysicalTableMap field's value. -func (s *DataSet) SetPhysicalTableMap(v map[string]*PhysicalTable) *DataSet { - s.PhysicalTableMap = v - return s -} - -// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. -func (s *DataSet) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *DataSet { - s.RowLevelPermissionDataSet = v +// SetIngestionId sets the IngestionId field's value. +func (s *DescribeIngestionInput) SetIngestionId(v string) *DescribeIngestionInput { + s.IngestionId = &v return s } -// Dataset configuration. -type DataSetConfiguration struct { +type DescribeIngestionOutput struct { _ struct{} `type:"structure"` - // A structure containing the list of column group schemas. - ColumnGroupSchemaList []*ColumnGroupSchema `type:"list"` + // Information about the ingestion. + Ingestion *Ingestion `type:"structure"` - // Dataset schema. - DataSetSchema *DataSetSchema `type:"structure"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Placeholder. - Placeholder *string `type:"string"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DataSetConfiguration) String() string { +func (s DescribeIngestionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSetConfiguration) GoString() string { +func (s DescribeIngestionOutput) GoString() string { return s.String() } -// SetColumnGroupSchemaList sets the ColumnGroupSchemaList field's value. -func (s *DataSetConfiguration) SetColumnGroupSchemaList(v []*ColumnGroupSchema) *DataSetConfiguration { - s.ColumnGroupSchemaList = v +// SetIngestion sets the Ingestion field's value. +func (s *DescribeIngestionOutput) SetIngestion(v *Ingestion) *DescribeIngestionOutput { + s.Ingestion = v return s } -// SetDataSetSchema sets the DataSetSchema field's value. -func (s *DataSetConfiguration) SetDataSetSchema(v *DataSetSchema) *DataSetConfiguration { - s.DataSetSchema = v +// SetRequestId sets the RequestId field's value. +func (s *DescribeIngestionOutput) SetRequestId(v string) *DescribeIngestionOutput { + s.RequestId = &v return s } -// SetPlaceholder sets the Placeholder field's value. -func (s *DataSetConfiguration) SetPlaceholder(v string) *DataSetConfiguration { - s.Placeholder = &v +// SetStatus sets the Status field's value. +func (s *DescribeIngestionOutput) SetStatus(v int64) *DescribeIngestionOutput { + s.Status = &v return s } -// Dataset reference. -type DataSetReference struct { +type DescribeNamespaceInput struct { _ struct{} `type:"structure"` - // Dataset Amazon Resource Name (ARN). + // The ID for the AWS account that contains the QuickSight namespace that you + // want to describe. // - // DataSetArn is a required field - DataSetArn *string `type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Dataset placeholder. + // The namespace that you want to describe. // - // DataSetPlaceholder is a required field - DataSetPlaceholder *string `type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s DataSetReference) String() string { +func (s DescribeNamespaceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSetReference) GoString() string { +func (s DescribeNamespaceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DataSetReference) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DataSetReference"} - if s.DataSetArn == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetArn")) +func (s *DescribeNamespaceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeNamespaceInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.DataSetPlaceholder == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetPlaceholder")) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -10836,266 +20021,236 @@ func (s *DataSetReference) Validate() error { return nil } -// SetDataSetArn sets the DataSetArn field's value. -func (s *DataSetReference) SetDataSetArn(v string) *DataSetReference { - s.DataSetArn = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeNamespaceInput) SetAwsAccountId(v string) *DescribeNamespaceInput { + s.AwsAccountId = &v return s } -// SetDataSetPlaceholder sets the DataSetPlaceholder field's value. -func (s *DataSetReference) SetDataSetPlaceholder(v string) *DataSetReference { - s.DataSetPlaceholder = &v +// SetNamespace sets the Namespace field's value. +func (s *DescribeNamespaceInput) SetNamespace(v string) *DescribeNamespaceInput { + s.Namespace = &v return s } -// Dataset schema. -type DataSetSchema struct { +type DescribeNamespaceOutput struct { _ struct{} `type:"structure"` - // A structure containing the list of column schemas. - ColumnSchemaList []*ColumnSchema `type:"list"` + // The information about the namespace that you're describing. The response + // includes the namespace ARN, name, AWS Region, creation status, and identity + // store. DescribeNamespace also works for namespaces that are in the process + // of being created. For incomplete namespaces, this API operation lists the + // namespace error types and messages associated with the creation process. + Namespace *NamespaceInfoV2 `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DataSetSchema) String() string { +func (s DescribeNamespaceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSetSchema) GoString() string { +func (s DescribeNamespaceOutput) GoString() string { return s.String() } -// SetColumnSchemaList sets the ColumnSchemaList field's value. -func (s *DataSetSchema) SetColumnSchemaList(v []*ColumnSchema) *DataSetSchema { - s.ColumnSchemaList = v +// SetNamespace sets the Namespace field's value. +func (s *DescribeNamespaceOutput) SetNamespace(v *NamespaceInfoV2) *DescribeNamespaceOutput { + s.Namespace = v return s } -// Dataset summary. -type DataSetSummary struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the dataset. - Arn *string `type:"string"` - - // The time that this dataset was created. - CreatedTime *time.Time `type:"timestamp"` +// SetRequestId sets the RequestId field's value. +func (s *DescribeNamespaceOutput) SetRequestId(v string) *DescribeNamespaceOutput { + s.RequestId = &v + return s +} - // The ID of the dataset. - DataSetId *string `type:"string"` +// SetStatus sets the Status field's value. +func (s *DescribeNamespaceOutput) SetStatus(v int64) *DescribeNamespaceOutput { + s.Status = &v + return s +} - // Indicates whether you want to import the data into SPICE. - ImportMode *string `type:"string" enum:"DataSetImportMode"` +type DescribeTemplateAliasInput struct { + _ struct{} `type:"structure"` - // The last time that this dataset was updated. - LastUpdatedTime *time.Time `type:"timestamp"` + // The name of the template alias that you want to describe. If you name a specific + // alias, you describe the version that the alias points to. You can specify + // the latest version of the template by providing the keyword $LATEST in the + // AliasName parameter. The keyword $PUBLISHED doesn't apply to templates. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` - // A display name for the dataset. - Name *string `min:"1" type:"string"` + // The ID of the AWS account that contains the template alias that you're describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The row-level security configuration for the dataset. - RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DataSetSummary) String() string { +func (s DescribeTemplateAliasInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSetSummary) GoString() string { +func (s DescribeTemplateAliasInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DataSetSummary) SetArn(v string) *DataSetSummary { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *DataSetSummary) SetCreatedTime(v time.Time) *DataSetSummary { - s.CreatedTime = &v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *DataSetSummary) SetDataSetId(v string) *DataSetSummary { - s.DataSetId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTemplateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTemplateAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + } -// SetImportMode sets the ImportMode field's value. -func (s *DataSetSummary) SetImportMode(v string) *DataSetSummary { - s.ImportMode = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *DataSetSummary) SetLastUpdatedTime(v time.Time) *DataSetSummary { - s.LastUpdatedTime = &v +// SetAliasName sets the AliasName field's value. +func (s *DescribeTemplateAliasInput) SetAliasName(v string) *DescribeTemplateAliasInput { + s.AliasName = &v return s } -// SetName sets the Name field's value. -func (s *DataSetSummary) SetName(v string) *DataSetSummary { - s.Name = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeTemplateAliasInput) SetAwsAccountId(v string) *DescribeTemplateAliasInput { + s.AwsAccountId = &v return s } -// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. -func (s *DataSetSummary) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *DataSetSummary { - s.RowLevelPermissionDataSet = v +// SetTemplateId sets the TemplateId field's value. +func (s *DescribeTemplateAliasInput) SetTemplateId(v string) *DescribeTemplateAliasInput { + s.TemplateId = &v return s } -// The structure of a data source. -type DataSource struct { +type DescribeTemplateAliasOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the data source. - Arn *string `type:"string"` - - // The time that this data source was created. - CreatedTime *time.Time `type:"timestamp"` - - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - DataSourceId *string `type:"string"` - - // The parameters that Amazon QuickSight uses to connect to your underlying - // source. This is a variant type structure. For this structure to be valid, - // only one of the attributes can be non-null. - DataSourceParameters *DataSourceParameters `type:"structure"` - - // Error information from the last update or the creation of the data source. - ErrorInfo *DataSourceErrorInfo `type:"structure"` - - // The last time that this data source was updated. - LastUpdatedTime *time.Time `type:"timestamp"` - - // A display name for the data source. - Name *string `min:"1" type:"string"` - - // Secure Socket Layer (SSL) properties that apply when QuickSight connects - // to your underlying source. - SslProperties *SslProperties `type:"structure"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` // The HTTP status of the request. - Status *string `type:"string" enum:"ResourceStatus"` - - // The type of the data source. This type indicates which database engine the - // data source connects to. - Type *string `type:"string" enum:"DataSourceType"` + Status *int64 `location:"statusCode" type:"integer"` - // The VPC connection information. You need to use this parameter only when - // you want QuickSight to use a VPC connection when connecting to your underlying - // source. - VpcConnectionProperties *VpcConnectionProperties `type:"structure"` + // Information about the template alias. + TemplateAlias *TemplateAlias `type:"structure"` } // String returns the string representation -func (s DataSource) String() string { +func (s DescribeTemplateAliasOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSource) GoString() string { +func (s DescribeTemplateAliasOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DataSource) SetArn(v string) *DataSource { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *DataSource) SetCreatedTime(v time.Time) *DataSource { - s.CreatedTime = &v - return s -} - -// SetDataSourceId sets the DataSourceId field's value. -func (s *DataSource) SetDataSourceId(v string) *DataSource { - s.DataSourceId = &v - return s -} - -// SetDataSourceParameters sets the DataSourceParameters field's value. -func (s *DataSource) SetDataSourceParameters(v *DataSourceParameters) *DataSource { - s.DataSourceParameters = v - return s -} - -// SetErrorInfo sets the ErrorInfo field's value. -func (s *DataSource) SetErrorInfo(v *DataSourceErrorInfo) *DataSource { - s.ErrorInfo = v - return s -} - -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *DataSource) SetLastUpdatedTime(v time.Time) *DataSource { - s.LastUpdatedTime = &v - return s -} - -// SetName sets the Name field's value. -func (s *DataSource) SetName(v string) *DataSource { - s.Name = &v - return s -} - -// SetSslProperties sets the SslProperties field's value. -func (s *DataSource) SetSslProperties(v *SslProperties) *DataSource { - s.SslProperties = v +// SetRequestId sets the RequestId field's value. +func (s *DescribeTemplateAliasOutput) SetRequestId(v string) *DescribeTemplateAliasOutput { + s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DataSource) SetStatus(v string) *DataSource { +func (s *DescribeTemplateAliasOutput) SetStatus(v int64) *DescribeTemplateAliasOutput { s.Status = &v return s } -// SetType sets the Type field's value. -func (s *DataSource) SetType(v string) *DataSource { - s.Type = &v +// SetTemplateAlias sets the TemplateAlias field's value. +func (s *DescribeTemplateAliasOutput) SetTemplateAlias(v *TemplateAlias) *DescribeTemplateAliasOutput { + s.TemplateAlias = v return s } -// SetVpcConnectionProperties sets the VpcConnectionProperties field's value. -func (s *DataSource) SetVpcConnectionProperties(v *VpcConnectionProperties) *DataSource { - s.VpcConnectionProperties = v - return s -} +type DescribeTemplateInput struct { + _ struct{} `type:"structure"` -// Data source credentials. -type DataSourceCredentials struct { - _ struct{} `type:"structure" sensitive:"true"` + // The alias of the template that you want to describe. If you name a specific + // alias, you describe the version that the alias points to. You can specify + // the latest version of the template by providing the keyword $LATEST in the + // AliasName parameter. The keyword $PUBLISHED doesn't apply to templates. + AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` - // Credential pair. - CredentialPair *CredentialPair `type:"structure"` + // The ID of the AWS account that contains the template that you're describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // (Optional) The number for the version to describe. If a VersionNumber parameter + // value isn't provided, the latest version of the template is described. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` } // String returns the string representation -func (s DataSourceCredentials) String() string { +func (s DescribeTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSourceCredentials) GoString() string { +func (s DescribeTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DataSourceCredentials) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DataSourceCredentials"} - if s.CredentialPair != nil { - if err := s.CredentialPair.Validate(); err != nil { - invalidParams.AddNested("CredentialPair", err.(request.ErrInvalidParams)) - } +func (s *DescribeTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTemplateInput"} + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -11104,216 +20259,109 @@ func (s *DataSourceCredentials) Validate() error { return nil } -// SetCredentialPair sets the CredentialPair field's value. -func (s *DataSourceCredentials) SetCredentialPair(v *CredentialPair) *DataSourceCredentials { - s.CredentialPair = v +// SetAliasName sets the AliasName field's value. +func (s *DescribeTemplateInput) SetAliasName(v string) *DescribeTemplateInput { + s.AliasName = &v return s } -// Error information for the data source creation or update. -type DataSourceErrorInfo struct { - _ struct{} `type:"structure"` - - // Error message. - Message *string `type:"string"` - - // Error type. - Type *string `type:"string" enum:"DataSourceErrorInfoType"` -} - -// String returns the string representation -func (s DataSourceErrorInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DataSourceErrorInfo) GoString() string { - return s.String() +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeTemplateInput) SetAwsAccountId(v string) *DescribeTemplateInput { + s.AwsAccountId = &v + return s } -// SetMessage sets the Message field's value. -func (s *DataSourceErrorInfo) SetMessage(v string) *DataSourceErrorInfo { - s.Message = &v +// SetTemplateId sets the TemplateId field's value. +func (s *DescribeTemplateInput) SetTemplateId(v string) *DescribeTemplateInput { + s.TemplateId = &v return s } -// SetType sets the Type field's value. -func (s *DataSourceErrorInfo) SetType(v string) *DataSourceErrorInfo { - s.Type = &v +// SetVersionNumber sets the VersionNumber field's value. +func (s *DescribeTemplateInput) SetVersionNumber(v int64) *DescribeTemplateInput { + s.VersionNumber = &v return s } -// The parameters that Amazon QuickSight uses to connect to your underlying -// data source. This is a variant type structure. For this structure to be valid, -// only one of the attributes can be non-null. -type DataSourceParameters struct { +type DescribeTemplateOutput struct { _ struct{} `type:"structure"` - // Amazon Elasticsearch Service parameters. - AmazonElasticsearchParameters *AmazonElasticsearchParameters `type:"structure"` - - // Amazon Athena parameters. - AthenaParameters *AthenaParameters `type:"structure"` - - // Amazon Aurora MySQL parameters. - AuroraParameters *AuroraParameters `type:"structure"` - - // Aurora PostgreSQL parameters. - AuroraPostgreSqlParameters *AuroraPostgreSqlParameters `type:"structure"` - - // AWS IoT Analytics parameters. - AwsIotAnalyticsParameters *AwsIotAnalyticsParameters `type:"structure"` - - // Jira parameters. - JiraParameters *JiraParameters `type:"structure"` - - // MariaDB parameters. - MariaDbParameters *MariaDbParameters `type:"structure"` - - // MySQL parameters. - MySqlParameters *MySqlParameters `type:"structure"` - - // PostgreSQL parameters. - PostgreSqlParameters *PostgreSqlParameters `type:"structure"` - - // Presto parameters. - PrestoParameters *PrestoParameters `type:"structure"` - - // Amazon RDS parameters. - RdsParameters *RdsParameters `type:"structure"` - - // Amazon Redshift parameters. - RedshiftParameters *RedshiftParameters `type:"structure"` - - // S3 parameters. - S3Parameters *S3Parameters `type:"structure"` - - // ServiceNow parameters. - ServiceNowParameters *ServiceNowParameters `type:"structure"` - - // Snowflake parameters. - SnowflakeParameters *SnowflakeParameters `type:"structure"` - - // Spark parameters. - SparkParameters *SparkParameters `type:"structure"` - - // SQL Server parameters. - SqlServerParameters *SqlServerParameters `type:"structure"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Teradata parameters. - TeradataParameters *TeradataParameters `type:"structure"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` - // Twitter parameters. - TwitterParameters *TwitterParameters `type:"structure"` + // The template structure for the object you want to describe. + Template *Template `type:"structure"` } // String returns the string representation -func (s DataSourceParameters) String() string { +func (s DescribeTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DataSourceParameters) GoString() string { +func (s DescribeTemplateOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DataSourceParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DataSourceParameters"} - if s.AmazonElasticsearchParameters != nil { - if err := s.AmazonElasticsearchParameters.Validate(); err != nil { - invalidParams.AddNested("AmazonElasticsearchParameters", err.(request.ErrInvalidParams)) - } - } - if s.AthenaParameters != nil { - if err := s.AthenaParameters.Validate(); err != nil { - invalidParams.AddNested("AthenaParameters", err.(request.ErrInvalidParams)) - } - } - if s.AuroraParameters != nil { - if err := s.AuroraParameters.Validate(); err != nil { - invalidParams.AddNested("AuroraParameters", err.(request.ErrInvalidParams)) - } - } - if s.AuroraPostgreSqlParameters != nil { - if err := s.AuroraPostgreSqlParameters.Validate(); err != nil { - invalidParams.AddNested("AuroraPostgreSqlParameters", err.(request.ErrInvalidParams)) - } - } - if s.AwsIotAnalyticsParameters != nil { - if err := s.AwsIotAnalyticsParameters.Validate(); err != nil { - invalidParams.AddNested("AwsIotAnalyticsParameters", err.(request.ErrInvalidParams)) - } - } - if s.JiraParameters != nil { - if err := s.JiraParameters.Validate(); err != nil { - invalidParams.AddNested("JiraParameters", err.(request.ErrInvalidParams)) - } - } - if s.MariaDbParameters != nil { - if err := s.MariaDbParameters.Validate(); err != nil { - invalidParams.AddNested("MariaDbParameters", err.(request.ErrInvalidParams)) - } - } - if s.MySqlParameters != nil { - if err := s.MySqlParameters.Validate(); err != nil { - invalidParams.AddNested("MySqlParameters", err.(request.ErrInvalidParams)) - } - } - if s.PostgreSqlParameters != nil { - if err := s.PostgreSqlParameters.Validate(); err != nil { - invalidParams.AddNested("PostgreSqlParameters", err.(request.ErrInvalidParams)) - } - } - if s.PrestoParameters != nil { - if err := s.PrestoParameters.Validate(); err != nil { - invalidParams.AddNested("PrestoParameters", err.(request.ErrInvalidParams)) - } - } - if s.RdsParameters != nil { - if err := s.RdsParameters.Validate(); err != nil { - invalidParams.AddNested("RdsParameters", err.(request.ErrInvalidParams)) - } - } - if s.RedshiftParameters != nil { - if err := s.RedshiftParameters.Validate(); err != nil { - invalidParams.AddNested("RedshiftParameters", err.(request.ErrInvalidParams)) - } - } - if s.S3Parameters != nil { - if err := s.S3Parameters.Validate(); err != nil { - invalidParams.AddNested("S3Parameters", err.(request.ErrInvalidParams)) - } - } - if s.ServiceNowParameters != nil { - if err := s.ServiceNowParameters.Validate(); err != nil { - invalidParams.AddNested("ServiceNowParameters", err.(request.ErrInvalidParams)) - } - } - if s.SnowflakeParameters != nil { - if err := s.SnowflakeParameters.Validate(); err != nil { - invalidParams.AddNested("SnowflakeParameters", err.(request.ErrInvalidParams)) - } - } - if s.SparkParameters != nil { - if err := s.SparkParameters.Validate(); err != nil { - invalidParams.AddNested("SparkParameters", err.(request.ErrInvalidParams)) - } +// SetRequestId sets the RequestId field's value. +func (s *DescribeTemplateOutput) SetRequestId(v string) *DescribeTemplateOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DescribeTemplateOutput) SetStatus(v int64) *DescribeTemplateOutput { + s.Status = &v + return s +} + +// SetTemplate sets the Template field's value. +func (s *DescribeTemplateOutput) SetTemplate(v *Template) *DescribeTemplateOutput { + s.Template = v + return s +} + +type DescribeTemplatePermissionsInput struct { + _ struct{} `type:"structure"` + + // The ID of the AWS account that contains the template that you're describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTemplatePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTemplatePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTemplatePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTemplatePermissionsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.SqlServerParameters != nil { - if err := s.SqlServerParameters.Validate(); err != nil { - invalidParams.AddNested("SqlServerParameters", err.(request.ErrInvalidParams)) - } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TeradataParameters != nil { - if err := s.TeradataParameters.Validate(); err != nil { - invalidParams.AddNested("TeradataParameters", err.(request.ErrInvalidParams)) - } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.TwitterParameters != nil { - if err := s.TwitterParameters.Validate(); err != nil { - invalidParams.AddNested("TwitterParameters", err.(request.ErrInvalidParams)) - } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) } if invalidParams.Len() > 0 { @@ -11322,153 +20370,247 @@ func (s *DataSourceParameters) Validate() error { return nil } -// SetAmazonElasticsearchParameters sets the AmazonElasticsearchParameters field's value. -func (s *DataSourceParameters) SetAmazonElasticsearchParameters(v *AmazonElasticsearchParameters) *DataSourceParameters { - s.AmazonElasticsearchParameters = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeTemplatePermissionsInput) SetAwsAccountId(v string) *DescribeTemplatePermissionsInput { + s.AwsAccountId = &v return s } -// SetAthenaParameters sets the AthenaParameters field's value. -func (s *DataSourceParameters) SetAthenaParameters(v *AthenaParameters) *DataSourceParameters { - s.AthenaParameters = v +// SetTemplateId sets the TemplateId field's value. +func (s *DescribeTemplatePermissionsInput) SetTemplateId(v string) *DescribeTemplatePermissionsInput { + s.TemplateId = &v return s } -// SetAuroraParameters sets the AuroraParameters field's value. -func (s *DataSourceParameters) SetAuroraParameters(v *AuroraParameters) *DataSourceParameters { - s.AuroraParameters = v - return s +type DescribeTemplatePermissionsOutput struct { + _ struct{} `type:"structure"` + + // A list of resource permissions to be set on the template. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The Amazon Resource Name (ARN) of the template. + TemplateArn *string `type:"string"` + + // The ID for the template. + TemplateId *string `min:"1" type:"string"` } -// SetAuroraPostgreSqlParameters sets the AuroraPostgreSqlParameters field's value. -func (s *DataSourceParameters) SetAuroraPostgreSqlParameters(v *AuroraPostgreSqlParameters) *DataSourceParameters { - s.AuroraPostgreSqlParameters = v - return s +// String returns the string representation +func (s DescribeTemplatePermissionsOutput) String() string { + return awsutil.Prettify(s) } -// SetAwsIotAnalyticsParameters sets the AwsIotAnalyticsParameters field's value. -func (s *DataSourceParameters) SetAwsIotAnalyticsParameters(v *AwsIotAnalyticsParameters) *DataSourceParameters { - s.AwsIotAnalyticsParameters = v - return s +// GoString returns the string representation +func (s DescribeTemplatePermissionsOutput) GoString() string { + return s.String() } -// SetJiraParameters sets the JiraParameters field's value. -func (s *DataSourceParameters) SetJiraParameters(v *JiraParameters) *DataSourceParameters { - s.JiraParameters = v +// SetPermissions sets the Permissions field's value. +func (s *DescribeTemplatePermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeTemplatePermissionsOutput { + s.Permissions = v return s } -// SetMariaDbParameters sets the MariaDbParameters field's value. -func (s *DataSourceParameters) SetMariaDbParameters(v *MariaDbParameters) *DataSourceParameters { - s.MariaDbParameters = v +// SetRequestId sets the RequestId field's value. +func (s *DescribeTemplatePermissionsOutput) SetRequestId(v string) *DescribeTemplatePermissionsOutput { + s.RequestId = &v return s } -// SetMySqlParameters sets the MySqlParameters field's value. -func (s *DataSourceParameters) SetMySqlParameters(v *MySqlParameters) *DataSourceParameters { - s.MySqlParameters = v +// SetStatus sets the Status field's value. +func (s *DescribeTemplatePermissionsOutput) SetStatus(v int64) *DescribeTemplatePermissionsOutput { + s.Status = &v return s } -// SetPostgreSqlParameters sets the PostgreSqlParameters field's value. -func (s *DataSourceParameters) SetPostgreSqlParameters(v *PostgreSqlParameters) *DataSourceParameters { - s.PostgreSqlParameters = v +// SetTemplateArn sets the TemplateArn field's value. +func (s *DescribeTemplatePermissionsOutput) SetTemplateArn(v string) *DescribeTemplatePermissionsOutput { + s.TemplateArn = &v return s } -// SetPrestoParameters sets the PrestoParameters field's value. -func (s *DataSourceParameters) SetPrestoParameters(v *PrestoParameters) *DataSourceParameters { - s.PrestoParameters = v +// SetTemplateId sets the TemplateId field's value. +func (s *DescribeTemplatePermissionsOutput) SetTemplateId(v string) *DescribeTemplatePermissionsOutput { + s.TemplateId = &v return s } -// SetRdsParameters sets the RdsParameters field's value. -func (s *DataSourceParameters) SetRdsParameters(v *RdsParameters) *DataSourceParameters { - s.RdsParameters = v - return s +type DescribeThemeAliasInput struct { + _ struct{} `type:"structure"` + + // The name of the theme alias that you want to describe. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the theme alias that you're describing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // The ID for the theme. + // + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` } -// SetRedshiftParameters sets the RedshiftParameters field's value. -func (s *DataSourceParameters) SetRedshiftParameters(v *RedshiftParameters) *DataSourceParameters { - s.RedshiftParameters = v - return s +// String returns the string representation +func (s DescribeThemeAliasInput) String() string { + return awsutil.Prettify(s) } -// SetS3Parameters sets the S3Parameters field's value. -func (s *DataSourceParameters) SetS3Parameters(v *S3Parameters) *DataSourceParameters { - s.S3Parameters = v - return s +// GoString returns the string representation +func (s DescribeThemeAliasInput) GoString() string { + return s.String() } -// SetServiceNowParameters sets the ServiceNowParameters field's value. -func (s *DataSourceParameters) SetServiceNowParameters(v *ServiceNowParameters) *DataSourceParameters { - s.ServiceNowParameters = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeThemeAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeThemeAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasName sets the AliasName field's value. +func (s *DescribeThemeAliasInput) SetAliasName(v string) *DescribeThemeAliasInput { + s.AliasName = &v return s } -// SetSnowflakeParameters sets the SnowflakeParameters field's value. -func (s *DataSourceParameters) SetSnowflakeParameters(v *SnowflakeParameters) *DataSourceParameters { - s.SnowflakeParameters = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeThemeAliasInput) SetAwsAccountId(v string) *DescribeThemeAliasInput { + s.AwsAccountId = &v return s } -// SetSparkParameters sets the SparkParameters field's value. -func (s *DataSourceParameters) SetSparkParameters(v *SparkParameters) *DataSourceParameters { - s.SparkParameters = v +// SetThemeId sets the ThemeId field's value. +func (s *DescribeThemeAliasInput) SetThemeId(v string) *DescribeThemeAliasInput { + s.ThemeId = &v return s } -// SetSqlServerParameters sets the SqlServerParameters field's value. -func (s *DataSourceParameters) SetSqlServerParameters(v *SqlServerParameters) *DataSourceParameters { - s.SqlServerParameters = v +type DescribeThemeAliasOutput struct { + _ struct{} `type:"structure"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Information about the theme alias. + ThemeAlias *ThemeAlias `type:"structure"` +} + +// String returns the string representation +func (s DescribeThemeAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeThemeAliasOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *DescribeThemeAliasOutput) SetRequestId(v string) *DescribeThemeAliasOutput { + s.RequestId = &v return s } -// SetTeradataParameters sets the TeradataParameters field's value. -func (s *DataSourceParameters) SetTeradataParameters(v *TeradataParameters) *DataSourceParameters { - s.TeradataParameters = v +// SetStatus sets the Status field's value. +func (s *DescribeThemeAliasOutput) SetStatus(v int64) *DescribeThemeAliasOutput { + s.Status = &v return s } -// SetTwitterParameters sets the TwitterParameters field's value. -func (s *DataSourceParameters) SetTwitterParameters(v *TwitterParameters) *DataSourceParameters { - s.TwitterParameters = v +// SetThemeAlias sets the ThemeAlias field's value. +func (s *DescribeThemeAliasOutput) SetThemeAlias(v *ThemeAlias) *DescribeThemeAliasOutput { + s.ThemeAlias = v return s } -// Date time parameter. -type DateTimeParameter struct { +type DescribeThemeInput struct { _ struct{} `type:"structure"` - // A display name for the dataset. + // The alias of the theme that you want to describe. If you name a specific + // alias, you describe the version that the alias points to. You can specify + // the latest version of the theme by providing the keyword $LATEST in the AliasName + // parameter. The keyword $PUBLISHED doesn't apply to themes. + AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` + + // The ID of the AWS account that contains the theme that you're describing. // - // Name is a required field - Name *string `type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" type:"string" required:"true"` - // Values. + // The ID for the theme. // - // Values is a required field - Values []*time.Time `type:"list" required:"true"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` + + // The version number for the version to describe. If a VersionNumber parameter + // value isn't provided, the latest version of the theme is described. + VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` } // String returns the string representation -func (s DateTimeParameter) String() string { +func (s DescribeThemeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DateTimeParameter) GoString() string { +func (s DescribeThemeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DateTimeParameter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DateTimeParameter"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *DescribeThemeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeThemeInput"} + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Values == nil { - invalidParams.Add(request.NewErrParamRequired("Values")) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 1)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -11477,116 +20619,109 @@ func (s *DateTimeParameter) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *DateTimeParameter) SetName(v string) *DateTimeParameter { - s.Name = &v +// SetAliasName sets the AliasName field's value. +func (s *DescribeThemeInput) SetAliasName(v string) *DescribeThemeInput { + s.AliasName = &v return s } -// SetValues sets the Values field's value. -func (s *DateTimeParameter) SetValues(v []*time.Time) *DateTimeParameter { - s.Values = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *DescribeThemeInput) SetAwsAccountId(v string) *DescribeThemeInput { + s.AwsAccountId = &v return s } -// Decimal parameter. -type DecimalParameter struct { +// SetThemeId sets the ThemeId field's value. +func (s *DescribeThemeInput) SetThemeId(v string) *DescribeThemeInput { + s.ThemeId = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *DescribeThemeInput) SetVersionNumber(v int64) *DescribeThemeInput { + s.VersionNumber = &v + return s +} + +type DescribeThemeOutput struct { _ struct{} `type:"structure"` - // A display name for the dataset. - // - // Name is a required field - Name *string `type:"string" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Values. - // - // Values is a required field - Values []*float64 `type:"list" required:"true"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The information about the theme that you are describing. + Theme *Theme `type:"structure"` } // String returns the string representation -func (s DecimalParameter) String() string { +func (s DescribeThemeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DecimalParameter) GoString() string { +func (s DescribeThemeOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DecimalParameter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DecimalParameter"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Values == nil { - invalidParams.Add(request.NewErrParamRequired("Values")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetRequestId sets the RequestId field's value. +func (s *DescribeThemeOutput) SetRequestId(v string) *DescribeThemeOutput { + s.RequestId = &v + return s } -// SetName sets the Name field's value. -func (s *DecimalParameter) SetName(v string) *DecimalParameter { - s.Name = &v +// SetStatus sets the Status field's value. +func (s *DescribeThemeOutput) SetStatus(v int64) *DescribeThemeOutput { + s.Status = &v return s } -// SetValues sets the Values field's value. -func (s *DecimalParameter) SetValues(v []*float64) *DecimalParameter { - s.Values = v +// SetTheme sets the Theme field's value. +func (s *DescribeThemeOutput) SetTheme(v *Theme) *DescribeThemeOutput { + s.Theme = v return s } -type DeleteDashboardInput struct { +type DescribeThemePermissionsInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the dashboard that you're deleting. + // The ID of the AWS account that contains the theme that you're describing. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dashboard. + // The ID for the theme that you want to describe permissions for. // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` - - // The version number of the dashboard. If the version number property is provided, - // only the specified version of the dashboard is deleted. - VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteDashboardInput) String() string { +func (s DescribeThemePermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDashboardInput) GoString() string { +func (s DescribeThemePermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDashboardInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDashboardInput"} +func (s *DescribeThemePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeThemePermissionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) - } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) } - if s.VersionNumber != nil && *s.VersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) } if invalidParams.Len() > 0 { @@ -11596,112 +20731,126 @@ func (s *DeleteDashboardInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteDashboardInput) SetAwsAccountId(v string) *DeleteDashboardInput { +func (s *DescribeThemePermissionsInput) SetAwsAccountId(v string) *DescribeThemePermissionsInput { s.AwsAccountId = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *DeleteDashboardInput) SetDashboardId(v string) *DeleteDashboardInput { - s.DashboardId = &v - return s -} - -// SetVersionNumber sets the VersionNumber field's value. -func (s *DeleteDashboardInput) SetVersionNumber(v int64) *DeleteDashboardInput { - s.VersionNumber = &v +// SetThemeId sets the ThemeId field's value. +func (s *DescribeThemePermissionsInput) SetThemeId(v string) *DescribeThemePermissionsInput { + s.ThemeId = &v return s } -type DeleteDashboardOutput struct { +type DescribeThemePermissionsOutput struct { _ struct{} `type:"structure"` - // The Secure Socket Layer (SSL) properties that apply for the resource. - Arn *string `type:"string"` - - // The ID of the dashboard. - DashboardId *string `min:"1" type:"string"` + // A list of resource permissions set on the theme. + Permissions []*ResourcePermission `min:"1" type:"list"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The Amazon Resource Name (ARN) of the theme. + ThemeArn *string `type:"string"` + + // The ID for the theme. + ThemeId *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteDashboardOutput) String() string { +func (s DescribeThemePermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDashboardOutput) GoString() string { +func (s DescribeThemePermissionsOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DeleteDashboardOutput) SetArn(v string) *DeleteDashboardOutput { - s.Arn = &v - return s -} - -// SetDashboardId sets the DashboardId field's value. -func (s *DeleteDashboardOutput) SetDashboardId(v string) *DeleteDashboardOutput { - s.DashboardId = &v +// SetPermissions sets the Permissions field's value. +func (s *DescribeThemePermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeThemePermissionsOutput { + s.Permissions = v return s } // SetRequestId sets the RequestId field's value. -func (s *DeleteDashboardOutput) SetRequestId(v string) *DeleteDashboardOutput { +func (s *DescribeThemePermissionsOutput) SetRequestId(v string) *DescribeThemePermissionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DeleteDashboardOutput) SetStatus(v int64) *DeleteDashboardOutput { +func (s *DescribeThemePermissionsOutput) SetStatus(v int64) *DescribeThemePermissionsOutput { s.Status = &v return s } -type DeleteDataSetInput struct { +// SetThemeArn sets the ThemeArn field's value. +func (s *DescribeThemePermissionsOutput) SetThemeArn(v string) *DescribeThemePermissionsOutput { + s.ThemeArn = &v + return s +} + +// SetThemeId sets the ThemeId field's value. +func (s *DescribeThemePermissionsOutput) SetThemeId(v string) *DescribeThemePermissionsOutput { + s.ThemeId = &v + return s +} + +type DescribeUserInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID for the AWS account that the user is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. + // The namespace. Currently, you should set this to default. // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The name of the user that you want to describe. + // + // UserName is a required field + UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteDataSetInput) String() string { +func (s DescribeUserInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDataSetInput) GoString() string { +func (s DescribeUserInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDataSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDataSetInput"} +func (s *DescribeUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeUserInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) } if invalidParams.Len() > 0 { @@ -11711,228 +20860,271 @@ func (s *DeleteDataSetInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteDataSetInput) SetAwsAccountId(v string) *DeleteDataSetInput { +func (s *DescribeUserInput) SetAwsAccountId(v string) *DescribeUserInput { s.AwsAccountId = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *DeleteDataSetInput) SetDataSetId(v string) *DeleteDataSetInput { - s.DataSetId = &v +// SetNamespace sets the Namespace field's value. +func (s *DescribeUserInput) SetNamespace(v string) *DescribeUserInput { + s.Namespace = &v return s } -type DeleteDataSetOutput struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the dataset. - Arn *string `type:"string"` +// SetUserName sets the UserName field's value. +func (s *DescribeUserInput) SetUserName(v string) *DescribeUserInput { + s.UserName = &v + return s +} - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. - DataSetId *string `type:"string"` +type DescribeUserOutput struct { + _ struct{} `type:"structure"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The user name. + User *User `type:"structure"` } // String returns the string representation -func (s DeleteDataSetOutput) String() string { +func (s DescribeUserOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDataSetOutput) GoString() string { +func (s DescribeUserOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DeleteDataSetOutput) SetArn(v string) *DeleteDataSetOutput { - s.Arn = &v +// SetRequestId sets the RequestId field's value. +func (s *DescribeUserOutput) SetRequestId(v string) *DescribeUserOutput { + s.RequestId = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *DeleteDataSetOutput) SetDataSetId(v string) *DeleteDataSetOutput { - s.DataSetId = &v +// SetStatus sets the Status field's value. +func (s *DescribeUserOutput) SetStatus(v int64) *DescribeUserOutput { + s.Status = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *DeleteDataSetOutput) SetRequestId(v string) *DeleteDataSetOutput { - s.RequestId = &v +// SetUser sets the User field's value. +func (s *DescribeUserOutput) SetUser(v *User) *DescribeUserOutput { + s.User = v return s } -// SetStatus sets the Status field's value. -func (s *DeleteDataSetOutput) SetStatus(v int64) *DeleteDataSetOutput { - s.Status = &v - return s +// The domain specified isn't on the allow list. All domains for embedded dashboards +// must be added to the approved list by an Amazon QuickSight admin. +type DomainNotWhitelistedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The AWS request ID for this request. + RequestId *string `type:"string"` +} + +// String returns the string representation +func (s DomainNotWhitelistedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainNotWhitelistedException) GoString() string { + return s.String() +} + +func newErrorDomainNotWhitelistedException(v protocol.ResponseMetadata) error { + return &DomainNotWhitelistedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DomainNotWhitelistedException) Code() string { + return "DomainNotWhitelistedException" +} + +// Message returns the exception's message. +func (s *DomainNotWhitelistedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DomainNotWhitelistedException) OrigErr() error { + return nil +} + +func (s *DomainNotWhitelistedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DomainNotWhitelistedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DomainNotWhitelistedException) RequestID() string { + return s.RespMetadata.RequestID } -type DeleteDataSourceInput struct { +// Error information for the SPICE ingestion of a dataset. +type ErrorInfo struct { _ struct{} `type:"structure"` - // The AWS account ID. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // Error message. + Message *string `type:"string"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - // - // DataSourceId is a required field - DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` + // Error type. + Type *string `type:"string" enum:"IngestionErrorType"` } // String returns the string representation -func (s DeleteDataSourceInput) String() string { +func (s ErrorInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDataSourceInput) GoString() string { +func (s ErrorInfo) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDataSourceInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.DataSourceId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceId")) - } - if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteDataSourceInput) SetAwsAccountId(v string) *DeleteDataSourceInput { - s.AwsAccountId = &v +// SetMessage sets the Message field's value. +func (s *ErrorInfo) SetMessage(v string) *ErrorInfo { + s.Message = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *DeleteDataSourceInput) SetDataSourceId(v string) *DeleteDataSourceInput { - s.DataSourceId = &v +// SetType sets the Type field's value. +func (s *ErrorInfo) SetType(v string) *ErrorInfo { + s.Type = &v return s } -type DeleteDataSourceOutput struct { +// Export to .csv option. +type ExportToCSVOption struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the data source that you deleted. - Arn *string `type:"string"` - - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - DataSourceId *string `type:"string"` - - // The AWS request ID for this operation. - RequestId *string `type:"string"` - - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Availability status. + AvailabilityStatus *string `type:"string" enum:"DashboardBehavior"` } // String returns the string representation -func (s DeleteDataSourceOutput) String() string { +func (s ExportToCSVOption) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteDataSourceOutput) GoString() string { +func (s ExportToCSVOption) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DeleteDataSourceOutput) SetArn(v string) *DeleteDataSourceOutput { - s.Arn = &v +// SetAvailabilityStatus sets the AvailabilityStatus field's value. +func (s *ExportToCSVOption) SetAvailabilityStatus(v string) *ExportToCSVOption { + s.AvailabilityStatus = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *DeleteDataSourceOutput) SetDataSourceId(v string) *DeleteDataSourceOutput { - s.DataSourceId = &v - return s +// A transform operation that filters rows based on a condition. +type FilterOperation struct { + _ struct{} `type:"structure"` + + // An expression that must evaluate to a Boolean value. Rows for which the expression + // evaluates to true are kept in the dataset. + // + // ConditionExpression is a required field + ConditionExpression *string `min:"1" type:"string" required:"true"` } -// SetRequestId sets the RequestId field's value. -func (s *DeleteDataSourceOutput) SetRequestId(v string) *DeleteDataSourceOutput { - s.RequestId = &v - return s +// String returns the string representation +func (s FilterOperation) String() string { + return awsutil.Prettify(s) } -// SetStatus sets the Status field's value. -func (s *DeleteDataSourceOutput) SetStatus(v int64) *DeleteDataSourceOutput { - s.Status = &v +// GoString returns the string representation +func (s FilterOperation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FilterOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FilterOperation"} + if s.ConditionExpression == nil { + invalidParams.Add(request.NewErrParamRequired("ConditionExpression")) + } + if s.ConditionExpression != nil && len(*s.ConditionExpression) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConditionExpression", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *FilterOperation) SetConditionExpression(v string) *FilterOperation { + s.ConditionExpression = &v return s } -type DeleteGroupInput struct { +// Geospatial column group that denotes a hierarchy. +type GeoSpatialColumnGroup struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // Columns in this hierarchy. // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // Columns is a required field + Columns []*string `min:"1" type:"list" required:"true"` - // The name of the group that you want to delete. + // Country code. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // CountryCode is a required field + CountryCode *string `type:"string" required:"true" enum:"GeoSpatialCountryCode"` - // The namespace. Currently, you should set this to default. + // A display name for the hierarchy. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DeleteGroupInput) String() string { +func (s GeoSpatialColumnGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteGroupInput) GoString() string { +func (s GeoSpatialColumnGroup) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGroupInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) +func (s *GeoSpatialColumnGroup) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GeoSpatialColumnGroup"} + if s.Columns == nil { + invalidParams.Add(request.NewErrParamRequired("Columns")) } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Columns != nil && len(s.Columns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Columns", 1)) } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + if s.CountryCode == nil { + invalidParams.Add(request.NewErrParamRequired("CountryCode")) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if invalidParams.Len() > 0 { @@ -11941,85 +21133,100 @@ func (s *DeleteGroupInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteGroupInput) SetAwsAccountId(v string) *DeleteGroupInput { - s.AwsAccountId = &v +// SetColumns sets the Columns field's value. +func (s *GeoSpatialColumnGroup) SetColumns(v []*string) *GeoSpatialColumnGroup { + s.Columns = v return s } -// SetGroupName sets the GroupName field's value. -func (s *DeleteGroupInput) SetGroupName(v string) *DeleteGroupInput { - s.GroupName = &v +// SetCountryCode sets the CountryCode field's value. +func (s *GeoSpatialColumnGroup) SetCountryCode(v string) *GeoSpatialColumnGroup { + s.CountryCode = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DeleteGroupInput) SetNamespace(v string) *DeleteGroupInput { - s.Namespace = &v +// SetName sets the Name field's value. +func (s *GeoSpatialColumnGroup) SetName(v string) *GeoSpatialColumnGroup { + s.Name = &v return s } -type DeleteGroupMembershipInput struct { +type GetDashboardEmbedUrlInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // The ID for the AWS account that contains the dashboard that you're embedding. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The name of the group that you want to delete the user from. + // The ID for the dashboard, also added to the IAM policy. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` - // The name of the user that you want to delete from the group membership. + // The authentication method that the user uses to sign in. // - // MemberName is a required field - MemberName *string `location:"uri" locationName:"MemberName" min:"1" type:"string" required:"true"` + // IdentityType is a required field + IdentityType *string `location:"querystring" locationName:"creds-type" type:"string" required:"true" enum:"IdentityType"` - // The namespace. Currently, you should set this to default. + // Remove the reset button on the embedded dashboard. The default is FALSE, + // which enables the reset button. + ResetDisabled *bool `location:"querystring" locationName:"reset-disabled" type:"boolean"` + + // How many minutes the session is valid. The session lifetime must be 15-600 + // minutes. + SessionLifetimeInMinutes *int64 `location:"querystring" locationName:"session-lifetime" min:"15" type:"long"` + + // Remove the undo/redo button on the embedded dashboard. The default is FALSE, + // which enables the undo/redo button. + UndoRedoDisabled *bool `location:"querystring" locationName:"undo-redo-disabled" type:"boolean"` + + // The Amazon QuickSight user's Amazon Resource Name (ARN), for use with QUICKSIGHT + // identity type. You can use this for any Amazon QuickSight users in your account + // (readers, authors, or admins) authenticated as one of the following: // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // * Active Directory (AD) users or group members + // + // * Invited nonfederated users + // + // * IAM users and IAM role-based sessions authenticated through Federated + // Single Sign-On using SAML, OpenID Connect, or IAM federation. + // + // Omit this parameter for users in the third group – IAM users and IAM role-based + // sessions. + UserArn *string `location:"querystring" locationName:"user-arn" type:"string"` } // String returns the string representation -func (s DeleteGroupMembershipInput) String() string { +func (s GetDashboardEmbedUrlInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteGroupMembershipInput) GoString() string { +func (s GetDashboardEmbedUrlInput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGroupMembershipInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGroupMembershipInput"} +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDashboardEmbedUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDashboardEmbedUrlInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) - } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) - } - if s.MemberName == nil { - invalidParams.Add(request.NewErrParamRequired("MemberName")) + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.MemberName != nil && len(*s.MemberName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MemberName", 1)) + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.IdentityType == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityType")) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.SessionLifetimeInMinutes != nil && *s.SessionLifetimeInMinutes < 15 { + invalidParams.Add(request.NewErrParamMinValue("SessionLifetimeInMinutes", 15)) } if invalidParams.Len() > 0 { @@ -12029,64 +21236,56 @@ func (s *DeleteGroupMembershipInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteGroupMembershipInput) SetAwsAccountId(v string) *DeleteGroupMembershipInput { +func (s *GetDashboardEmbedUrlInput) SetAwsAccountId(v string) *GetDashboardEmbedUrlInput { s.AwsAccountId = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *DeleteGroupMembershipInput) SetGroupName(v string) *DeleteGroupMembershipInput { - s.GroupName = &v +// SetDashboardId sets the DashboardId field's value. +func (s *GetDashboardEmbedUrlInput) SetDashboardId(v string) *GetDashboardEmbedUrlInput { + s.DashboardId = &v return s } -// SetMemberName sets the MemberName field's value. -func (s *DeleteGroupMembershipInput) SetMemberName(v string) *DeleteGroupMembershipInput { - s.MemberName = &v +// SetIdentityType sets the IdentityType field's value. +func (s *GetDashboardEmbedUrlInput) SetIdentityType(v string) *GetDashboardEmbedUrlInput { + s.IdentityType = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DeleteGroupMembershipInput) SetNamespace(v string) *DeleteGroupMembershipInput { - s.Namespace = &v +// SetResetDisabled sets the ResetDisabled field's value. +func (s *GetDashboardEmbedUrlInput) SetResetDisabled(v bool) *GetDashboardEmbedUrlInput { + s.ResetDisabled = &v return s } -type DeleteGroupMembershipOutput struct { - _ struct{} `type:"structure"` - - // The AWS request ID for this operation. - RequestId *string `type:"string"` - - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` -} - -// String returns the string representation -func (s DeleteGroupMembershipOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteGroupMembershipOutput) GoString() string { - return s.String() +// SetSessionLifetimeInMinutes sets the SessionLifetimeInMinutes field's value. +func (s *GetDashboardEmbedUrlInput) SetSessionLifetimeInMinutes(v int64) *GetDashboardEmbedUrlInput { + s.SessionLifetimeInMinutes = &v + return s } -// SetRequestId sets the RequestId field's value. -func (s *DeleteGroupMembershipOutput) SetRequestId(v string) *DeleteGroupMembershipOutput { - s.RequestId = &v +// SetUndoRedoDisabled sets the UndoRedoDisabled field's value. +func (s *GetDashboardEmbedUrlInput) SetUndoRedoDisabled(v bool) *GetDashboardEmbedUrlInput { + s.UndoRedoDisabled = &v return s } -// SetStatus sets the Status field's value. -func (s *DeleteGroupMembershipOutput) SetStatus(v int64) *DeleteGroupMembershipOutput { - s.Status = &v +// SetUserArn sets the UserArn field's value. +func (s *GetDashboardEmbedUrlInput) SetUserArn(v string) *GetDashboardEmbedUrlInput { + s.UserArn = &v return s } -type DeleteGroupOutput struct { +type GetDashboardEmbedUrlOutput struct { _ struct{} `type:"structure"` + // A single-use URL that you can put into your server-side webpage to embed + // your dashboard. This URL is valid for 5 minutes. The API operation provides + // the URL with an auth_code value that enables one (and only one) sign-on to + // a user session that is valid for 10 hours. + EmbedUrl *string `type:"string" sensitive:"true"` + // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -12095,76 +21294,104 @@ type DeleteGroupOutput struct { } // String returns the string representation -func (s DeleteGroupOutput) String() string { +func (s GetDashboardEmbedUrlOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteGroupOutput) GoString() string { +func (s GetDashboardEmbedUrlOutput) GoString() string { return s.String() } +// SetEmbedUrl sets the EmbedUrl field's value. +func (s *GetDashboardEmbedUrlOutput) SetEmbedUrl(v string) *GetDashboardEmbedUrlOutput { + s.EmbedUrl = &v + return s +} + // SetRequestId sets the RequestId field's value. -func (s *DeleteGroupOutput) SetRequestId(v string) *DeleteGroupOutput { +func (s *GetDashboardEmbedUrlOutput) SetRequestId(v string) *GetDashboardEmbedUrlOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DeleteGroupOutput) SetStatus(v int64) *DeleteGroupOutput { +func (s *GetDashboardEmbedUrlOutput) SetStatus(v int64) *GetDashboardEmbedUrlOutput { s.Status = &v return s } -type DeleteIAMPolicyAssignmentInput struct { +type GetSessionEmbedUrlInput struct { _ struct{} `type:"structure"` - // The name of the assignment. - // - // AssignmentName is a required field - AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` - - // The AWS account ID where you want to delete the IAM policy assignment. + // The ID for the AWS account associated with your QuickSight subscription. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The namespace that contains the assignment. + // The URL you use to access the embedded session. The entry point URL is constrained + // to the following paths: // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // * /start + // + // * /start/analyses + // + // * /start/dashboards + // + // * /start/favorites + // + // * /dashboards/DashboardId - where DashboardId is the actual ID key from + // the QuickSight console URL of the dashboard + // + // * /analyses/AnalysisId - where AnalysisId is the actual ID key from the + // QuickSight console URL of the analysis + EntryPoint *string `location:"querystring" locationName:"entry-point" min:"1" type:"string"` + + // How many minutes the session is valid. The session lifetime must be 15-600 + // minutes. + SessionLifetimeInMinutes *int64 `location:"querystring" locationName:"session-lifetime" min:"15" type:"long"` + + // The Amazon QuickSight user's Amazon Resource Name (ARN), for use with QUICKSIGHT + // identity type. You can use this for any type of Amazon QuickSight users in + // your account (readers, authors, or admins). They need to be authenticated + // as one of the following: + // + // Active Directory (AD) users or group members + // + // Invited nonfederated users + // + // IAM users and IAM role-based sessions authenticated through Federated Single + // Sign-On using SAML, OpenID Connect, or IAM federation + // + // Omit this parameter for users in the third group – IAM users and IAM role-based + // sessions. + UserArn *string `location:"querystring" locationName:"user-arn" type:"string"` } // String returns the string representation -func (s DeleteIAMPolicyAssignmentInput) String() string { +func (s GetSessionEmbedUrlInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteIAMPolicyAssignmentInput) GoString() string { +func (s GetSessionEmbedUrlInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteIAMPolicyAssignmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteIAMPolicyAssignmentInput"} - if s.AssignmentName == nil { - invalidParams.Add(request.NewErrParamRequired("AssignmentName")) - } - if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) - } +func (s *GetSessionEmbedUrlInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionEmbedUrlInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.EntryPoint != nil && len(*s.EntryPoint) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EntryPoint", 1)) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.SessionLifetimeInMinutes != nil && *s.SessionLifetimeInMinutes < 15 { + invalidParams.Add(request.NewErrParamMinValue("SessionLifetimeInMinutes", 15)) } if invalidParams.Len() > 0 { @@ -12173,29 +21400,38 @@ func (s *DeleteIAMPolicyAssignmentInput) Validate() error { return nil } -// SetAssignmentName sets the AssignmentName field's value. -func (s *DeleteIAMPolicyAssignmentInput) SetAssignmentName(v string) *DeleteIAMPolicyAssignmentInput { - s.AssignmentName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *GetSessionEmbedUrlInput) SetAwsAccountId(v string) *GetSessionEmbedUrlInput { + s.AwsAccountId = &v return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteIAMPolicyAssignmentInput) SetAwsAccountId(v string) *DeleteIAMPolicyAssignmentInput { - s.AwsAccountId = &v +// SetEntryPoint sets the EntryPoint field's value. +func (s *GetSessionEmbedUrlInput) SetEntryPoint(v string) *GetSessionEmbedUrlInput { + s.EntryPoint = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DeleteIAMPolicyAssignmentInput) SetNamespace(v string) *DeleteIAMPolicyAssignmentInput { - s.Namespace = &v +// SetSessionLifetimeInMinutes sets the SessionLifetimeInMinutes field's value. +func (s *GetSessionEmbedUrlInput) SetSessionLifetimeInMinutes(v int64) *GetSessionEmbedUrlInput { + s.SessionLifetimeInMinutes = &v return s } -type DeleteIAMPolicyAssignmentOutput struct { +// SetUserArn sets the UserArn field's value. +func (s *GetSessionEmbedUrlInput) SetUserArn(v string) *GetSessionEmbedUrlInput { + s.UserArn = &v + return s +} + +type GetSessionEmbedUrlOutput struct { _ struct{} `type:"structure"` - // The name of the assignment. - AssignmentName *string `min:"1" type:"string"` + // A single-use URL that you can put into your server-side web page to embed + // your QuickSight session. This URL is valid for 5 minutes. The API operation + // provides the URL with an auth_code value that enables one (and only one) + // sign-on to a user session that is valid for 10 hours. + EmbedUrl *string `type:"string" sensitive:"true"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -12205,448 +21441,462 @@ type DeleteIAMPolicyAssignmentOutput struct { } // String returns the string representation -func (s DeleteIAMPolicyAssignmentOutput) String() string { +func (s GetSessionEmbedUrlOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteIAMPolicyAssignmentOutput) GoString() string { +func (s GetSessionEmbedUrlOutput) GoString() string { return s.String() } -// SetAssignmentName sets the AssignmentName field's value. -func (s *DeleteIAMPolicyAssignmentOutput) SetAssignmentName(v string) *DeleteIAMPolicyAssignmentOutput { - s.AssignmentName = &v +// SetEmbedUrl sets the EmbedUrl field's value. +func (s *GetSessionEmbedUrlOutput) SetEmbedUrl(v string) *GetSessionEmbedUrlOutput { + s.EmbedUrl = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DeleteIAMPolicyAssignmentOutput) SetRequestId(v string) *DeleteIAMPolicyAssignmentOutput { +func (s *GetSessionEmbedUrlOutput) SetRequestId(v string) *GetSessionEmbedUrlOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DeleteIAMPolicyAssignmentOutput) SetStatus(v int64) *DeleteIAMPolicyAssignmentOutput { +func (s *GetSessionEmbedUrlOutput) SetStatus(v int64) *GetSessionEmbedUrlOutput { s.Status = &v return s } -type DeleteTemplateAliasInput struct { +// A group in Amazon QuickSight consists of a set of users. You can use groups +// to make it easier to manage access and security. +type Group struct { _ struct{} `type:"structure"` - // The name for the template alias. If you name a specific alias, you delete - // the version that the alias points to. You can specify the latest version - // of the template by providing the keyword $LATEST in the AliasName parameter. - // - // AliasName is a required field - AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + // The Amazon Resource Name (ARN) for the group. + Arn *string `type:"string"` - // The ID of the AWS account that contains the item to delete. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // The group description. + Description *string `min:"1" type:"string"` - // The ID for the template that the specified alias is for. - // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // The name of the group. + GroupName *string `min:"1" type:"string"` + + // The principal ID of the group. + PrincipalId *string `type:"string"` } // String returns the string representation -func (s DeleteTemplateAliasInput) String() string { +func (s Group) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTemplateAliasInput) GoString() string { +func (s Group) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTemplateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTemplateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) - } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) - } +// SetArn sets the Arn field's value. +func (s *Group) SetArn(v string) *Group { + s.Arn = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Group) SetDescription(v string) *Group { + s.Description = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *Group) SetGroupName(v string) *Group { + s.GroupName = &v + return s +} + +// SetPrincipalId sets the PrincipalId field's value. +func (s *Group) SetPrincipalId(v string) *Group { + s.PrincipalId = &v + return s +} + +// A member of an Amazon QuickSight group. Currently, group members must be +// users. Groups can't be members of another group. . +type GroupMember struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) for the group member (user). + Arn *string `type:"string"` + + // The name of the group member (user). + MemberName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupMember) String() string { + return awsutil.Prettify(s) +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// GoString returns the string representation +func (s GroupMember) GoString() string { + return s.String() } -// SetAliasName sets the AliasName field's value. -func (s *DeleteTemplateAliasInput) SetAliasName(v string) *DeleteTemplateAliasInput { - s.AliasName = &v +// SetArn sets the Arn field's value. +func (s *GroupMember) SetArn(v string) *GroupMember { + s.Arn = &v return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteTemplateAliasInput) SetAwsAccountId(v string) *DeleteTemplateAliasInput { - s.AwsAccountId = &v +// SetMemberName sets the MemberName field's value. +func (s *GroupMember) SetMemberName(v string) *GroupMember { + s.MemberName = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *DeleteTemplateAliasInput) SetTemplateId(v string) *DeleteTemplateAliasInput { - s.TemplateId = &v +// The display options for gutter spacing between tiles on a sheet. +type GutterStyle struct { + _ struct{} `type:"structure"` + + // This Boolean value controls whether to display a gutter space between sheet + // tiles. + Show *bool `type:"boolean"` +} + +// String returns the string representation +func (s GutterStyle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GutterStyle) GoString() string { + return s.String() +} + +// SetShow sets the Show field's value. +func (s *GutterStyle) SetShow(v bool) *GutterStyle { + s.Show = &v return s } -type DeleteTemplateAliasOutput struct { +// An AWS Identity and Access Management (IAM) policy assignment. +type IAMPolicyAssignment struct { _ struct{} `type:"structure"` - // The name for the template alias. - AliasName *string `min:"1" type:"string"` + // Assignment ID. + AssignmentId *string `type:"string"` - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` + // Assignment name. + AssignmentName *string `min:"1" type:"string"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // Assignment status. + AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // The AWS account ID. + AwsAccountId *string `min:"12" type:"string"` - // An ID for the template associated with the deletion. - TemplateId *string `min:"1" type:"string"` + // Identities. + Identities map[string][]*string `type:"map"` + + // The Amazon Resource Name (ARN) for the IAM policy. + PolicyArn *string `type:"string"` } // String returns the string representation -func (s DeleteTemplateAliasOutput) String() string { +func (s IAMPolicyAssignment) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTemplateAliasOutput) GoString() string { +func (s IAMPolicyAssignment) GoString() string { return s.String() } -// SetAliasName sets the AliasName field's value. -func (s *DeleteTemplateAliasOutput) SetAliasName(v string) *DeleteTemplateAliasOutput { - s.AliasName = &v +// SetAssignmentId sets the AssignmentId field's value. +func (s *IAMPolicyAssignment) SetAssignmentId(v string) *IAMPolicyAssignment { + s.AssignmentId = &v return s } -// SetArn sets the Arn field's value. -func (s *DeleteTemplateAliasOutput) SetArn(v string) *DeleteTemplateAliasOutput { - s.Arn = &v +// SetAssignmentName sets the AssignmentName field's value. +func (s *IAMPolicyAssignment) SetAssignmentName(v string) *IAMPolicyAssignment { + s.AssignmentName = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *DeleteTemplateAliasOutput) SetRequestId(v string) *DeleteTemplateAliasOutput { - s.RequestId = &v +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *IAMPolicyAssignment) SetAssignmentStatus(v string) *IAMPolicyAssignment { + s.AssignmentStatus = &v return s } -// SetStatus sets the Status field's value. -func (s *DeleteTemplateAliasOutput) SetStatus(v int64) *DeleteTemplateAliasOutput { - s.Status = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *IAMPolicyAssignment) SetAwsAccountId(v string) *IAMPolicyAssignment { + s.AwsAccountId = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *DeleteTemplateAliasOutput) SetTemplateId(v string) *DeleteTemplateAliasOutput { - s.TemplateId = &v +// SetIdentities sets the Identities field's value. +func (s *IAMPolicyAssignment) SetIdentities(v map[string][]*string) *IAMPolicyAssignment { + s.Identities = v return s } -type DeleteTemplateInput struct { - _ struct{} `type:"structure"` +// SetPolicyArn sets the PolicyArn field's value. +func (s *IAMPolicyAssignment) SetPolicyArn(v string) *IAMPolicyAssignment { + s.PolicyArn = &v + return s +} - // The ID of the AWS account that contains the template that you're deleting. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// IAM policy assignment summary. +type IAMPolicyAssignmentSummary struct { + _ struct{} `type:"structure"` - // An ID for the template you want to delete. - // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // Assignment name. + AssignmentName *string `min:"1" type:"string"` - // Specifies the version of the template that you want to delete. If you don't - // provide a version number, DeleteTemplate deletes all versions of the template. - VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` + // Assignment status. + AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` } // String returns the string representation -func (s DeleteTemplateInput) String() string { +func (s IAMPolicyAssignmentSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTemplateInput) GoString() string { +func (s IAMPolicyAssignmentSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTemplateInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) - } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) - } - if s.VersionNumber != nil && *s.VersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteTemplateInput) SetAwsAccountId(v string) *DeleteTemplateInput { - s.AwsAccountId = &v - return s -} - -// SetTemplateId sets the TemplateId field's value. -func (s *DeleteTemplateInput) SetTemplateId(v string) *DeleteTemplateInput { - s.TemplateId = &v +// SetAssignmentName sets the AssignmentName field's value. +func (s *IAMPolicyAssignmentSummary) SetAssignmentName(v string) *IAMPolicyAssignmentSummary { + s.AssignmentName = &v return s } -// SetVersionNumber sets the VersionNumber field's value. -func (s *DeleteTemplateInput) SetVersionNumber(v int64) *DeleteTemplateInput { - s.VersionNumber = &v +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *IAMPolicyAssignmentSummary) SetAssignmentStatus(v string) *IAMPolicyAssignmentSummary { + s.AssignmentStatus = &v return s } -type DeleteTemplateOutput struct { - _ struct{} `type:"structure"` +// The identity type specified isn't supported. Supported identity types include +// IAM and QUICKSIGHT. +type IdentityTypeNotSupportedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The Amazon Resource Name (ARN) of the resource. - Arn *string `type:"string"` + Message_ *string `locationName:"Message" type:"string"` - // The AWS request ID for this operation. + // The AWS request ID for this request. RequestId *string `type:"string"` - - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` - - // An ID for the template. - TemplateId *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteTemplateOutput) String() string { +func (s IdentityTypeNotSupportedException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTemplateOutput) GoString() string { +func (s IdentityTypeNotSupportedException) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *DeleteTemplateOutput) SetArn(v string) *DeleteTemplateOutput { - s.Arn = &v - return s +func newErrorIdentityTypeNotSupportedException(v protocol.ResponseMetadata) error { + return &IdentityTypeNotSupportedException{ + RespMetadata: v, + } } -// SetRequestId sets the RequestId field's value. -func (s *DeleteTemplateOutput) SetRequestId(v string) *DeleteTemplateOutput { - s.RequestId = &v - return s +// Code returns the exception type name. +func (s *IdentityTypeNotSupportedException) Code() string { + return "IdentityTypeNotSupportedException" } -// SetStatus sets the Status field's value. -func (s *DeleteTemplateOutput) SetStatus(v int64) *DeleteTemplateOutput { - s.Status = &v - return s +// Message returns the exception's message. +func (s *IdentityTypeNotSupportedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetTemplateId sets the TemplateId field's value. -func (s *DeleteTemplateOutput) SetTemplateId(v string) *DeleteTemplateOutput { - s.TemplateId = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *IdentityTypeNotSupportedException) OrigErr() error { + return nil } -type DeleteUserByPrincipalIdInput struct { +func (s *IdentityTypeNotSupportedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *IdentityTypeNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *IdentityTypeNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about the SPICE ingestion for a dataset. +type Ingestion struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the user is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` + + // The time that this ingestion started. + // + // CreatedTime is a required field + CreatedTime *time.Time `type:"timestamp" required:"true"` + + // Error information for this ingestion. + ErrorInfo *ErrorInfo `type:"structure"` + + // Ingestion ID. + IngestionId *string `min:"1" type:"string"` + + // The size of the data ingested, in bytes. + IngestionSizeInBytes *int64 `type:"long"` + + // Ingestion status. + // + // IngestionStatus is a required field + IngestionStatus *string `type:"string" required:"true" enum:"IngestionStatus"` + + // The time that this ingestion took, measured in seconds. + IngestionTimeInSeconds *int64 `type:"long"` + + // Information about a queued dataset SPICE ingestion. + QueueInfo *QueueInfo `type:"structure"` + + // Event source for this ingestion. + RequestSource *string `type:"string" enum:"IngestionRequestSource"` - // The namespace. Currently, you should set this to default. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Type of this ingestion. + RequestType *string `type:"string" enum:"IngestionRequestType"` - // The principal ID of the user. - // - // PrincipalId is a required field - PrincipalId *string `location:"uri" locationName:"PrincipalId" type:"string" required:"true"` + // Information about rows for a data set SPICE ingestion. + RowInfo *RowInfo `type:"structure"` } // String returns the string representation -func (s DeleteUserByPrincipalIdInput) String() string { +func (s Ingestion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteUserByPrincipalIdInput) GoString() string { +func (s Ingestion) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteUserByPrincipalIdInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteUserByPrincipalIdInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) - } - if s.PrincipalId == nil { - invalidParams.Add(request.NewErrParamRequired("PrincipalId")) - } - if s.PrincipalId != nil && len(*s.PrincipalId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PrincipalId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *Ingestion) SetArn(v string) *Ingestion { + s.Arn = &v + return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteUserByPrincipalIdInput) SetAwsAccountId(v string) *DeleteUserByPrincipalIdInput { - s.AwsAccountId = &v +// SetCreatedTime sets the CreatedTime field's value. +func (s *Ingestion) SetCreatedTime(v time.Time) *Ingestion { + s.CreatedTime = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DeleteUserByPrincipalIdInput) SetNamespace(v string) *DeleteUserByPrincipalIdInput { - s.Namespace = &v +// SetErrorInfo sets the ErrorInfo field's value. +func (s *Ingestion) SetErrorInfo(v *ErrorInfo) *Ingestion { + s.ErrorInfo = v return s } -// SetPrincipalId sets the PrincipalId field's value. -func (s *DeleteUserByPrincipalIdInput) SetPrincipalId(v string) *DeleteUserByPrincipalIdInput { - s.PrincipalId = &v +// SetIngestionId sets the IngestionId field's value. +func (s *Ingestion) SetIngestionId(v string) *Ingestion { + s.IngestionId = &v return s } -type DeleteUserByPrincipalIdOutput struct { - _ struct{} `type:"structure"` +// SetIngestionSizeInBytes sets the IngestionSizeInBytes field's value. +func (s *Ingestion) SetIngestionSizeInBytes(v int64) *Ingestion { + s.IngestionSizeInBytes = &v + return s +} - // The AWS request ID for this operation. - RequestId *string `type:"string"` +// SetIngestionStatus sets the IngestionStatus field's value. +func (s *Ingestion) SetIngestionStatus(v string) *Ingestion { + s.IngestionStatus = &v + return s +} - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` +// SetIngestionTimeInSeconds sets the IngestionTimeInSeconds field's value. +func (s *Ingestion) SetIngestionTimeInSeconds(v int64) *Ingestion { + s.IngestionTimeInSeconds = &v + return s } -// String returns the string representation -func (s DeleteUserByPrincipalIdOutput) String() string { - return awsutil.Prettify(s) +// SetQueueInfo sets the QueueInfo field's value. +func (s *Ingestion) SetQueueInfo(v *QueueInfo) *Ingestion { + s.QueueInfo = v + return s } -// GoString returns the string representation -func (s DeleteUserByPrincipalIdOutput) GoString() string { - return s.String() +// SetRequestSource sets the RequestSource field's value. +func (s *Ingestion) SetRequestSource(v string) *Ingestion { + s.RequestSource = &v + return s } -// SetRequestId sets the RequestId field's value. -func (s *DeleteUserByPrincipalIdOutput) SetRequestId(v string) *DeleteUserByPrincipalIdOutput { - s.RequestId = &v +// SetRequestType sets the RequestType field's value. +func (s *Ingestion) SetRequestType(v string) *Ingestion { + s.RequestType = &v return s } -// SetStatus sets the Status field's value. -func (s *DeleteUserByPrincipalIdOutput) SetStatus(v int64) *DeleteUserByPrincipalIdOutput { - s.Status = &v +// SetRowInfo sets the RowInfo field's value. +func (s *Ingestion) SetRowInfo(v *RowInfo) *Ingestion { + s.RowInfo = v return s } -type DeleteUserInput struct { +// Metadata for a column that is used as the input of a transform operation. +type InputColumn struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the user is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The namespace. Currently, you should set this to default. + // The name of this column in the underlying data source. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` - // The name of the user that you want to delete. + // The data type of the column. // - // UserName is a required field - UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` + // Type is a required field + Type *string `type:"string" required:"true" enum:"InputColumnDataType"` } // String returns the string representation -func (s DeleteUserInput) String() string { +func (s InputColumn) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteUserInput) GoString() string { +func (s InputColumn) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteUserInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteUserInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) +func (s *InputColumn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InputColumn"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.UserName == nil { - invalidParams.Add(request.NewErrParamRequired("UserName")) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -12655,337 +21905,348 @@ func (s *DeleteUserInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DeleteUserInput) SetAwsAccountId(v string) *DeleteUserInput { - s.AwsAccountId = &v - return s -} - -// SetNamespace sets the Namespace field's value. -func (s *DeleteUserInput) SetNamespace(v string) *DeleteUserInput { - s.Namespace = &v +// SetName sets the Name field's value. +func (s *InputColumn) SetName(v string) *InputColumn { + s.Name = &v return s } -// SetUserName sets the UserName field's value. -func (s *DeleteUserInput) SetUserName(v string) *DeleteUserInput { - s.UserName = &v +// SetType sets the Type field's value. +func (s *InputColumn) SetType(v string) *InputColumn { + s.Type = &v return s } -type DeleteUserOutput struct { +// An integer parameter. +type IntegerParameter struct { _ struct{} `type:"structure"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // The name of the integer parameter. + // + // Name is a required field + Name *string `type:"string" required:"true"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // The values for the integer parameter. + // + // Values is a required field + Values []*int64 `type:"list" required:"true"` } // String returns the string representation -func (s DeleteUserOutput) String() string { +func (s IntegerParameter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteUserOutput) GoString() string { +func (s IntegerParameter) GoString() string { return s.String() } -// SetRequestId sets the RequestId field's value. -func (s *DeleteUserOutput) SetRequestId(v string) *DeleteUserOutput { - s.RequestId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntegerParameter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntegerParameter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStatus sets the Status field's value. -func (s *DeleteUserOutput) SetStatus(v int64) *DeleteUserOutput { - s.Status = &v +// SetName sets the Name field's value. +func (s *IntegerParameter) SetName(v string) *IntegerParameter { + s.Name = &v return s } -type DescribeDashboardInput struct { - _ struct{} `type:"structure"` - - // The alias name. - AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` +// SetValues sets the Values field's value. +func (s *IntegerParameter) SetValues(v []*int64) *IntegerParameter { + s.Values = v + return s +} - // The ID of the AWS account that contains the dashboard that you're describing. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// An internal failure occurred. +type InternalFailureException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The ID for the dashboard. - // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + Message_ *string `locationName:"Message" type:"string"` - // The version number for the dashboard. If a version number isn't passed, the - // latest published dashboard version is described. - VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` + // The AWS request ID for this request. + RequestId *string `type:"string"` } // String returns the string representation -func (s DescribeDashboardInput) String() string { +func (s InternalFailureException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDashboardInput) GoString() string { +func (s InternalFailureException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDashboardInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDashboardInput"} - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) - } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) - } - if s.VersionNumber != nil && *s.VersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) +func newErrorInternalFailureException(v protocol.ResponseMetadata) error { + return &InternalFailureException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *InternalFailureException) Code() string { + return "InternalFailureException" +} + +// Message returns the exception's message. +func (s *InternalFailureException) Message() string { + if s.Message_ != nil { + return *s.Message_ } - return nil + return "" } -// SetAliasName sets the AliasName field's value. -func (s *DescribeDashboardInput) SetAliasName(v string) *DescribeDashboardInput { - s.AliasName = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalFailureException) OrigErr() error { + return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeDashboardInput) SetAwsAccountId(v string) *DescribeDashboardInput { - s.AwsAccountId = &v - return s +func (s *InternalFailureException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } -// SetDashboardId sets the DashboardId field's value. -func (s *DescribeDashboardInput) SetDashboardId(v string) *DescribeDashboardInput { - s.DashboardId = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *InternalFailureException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetVersionNumber sets the VersionNumber field's value. -func (s *DescribeDashboardInput) SetVersionNumber(v int64) *DescribeDashboardInput { - s.VersionNumber = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *InternalFailureException) RequestID() string { + return s.RespMetadata.RequestID } -type DescribeDashboardOutput struct { - _ struct{} `type:"structure"` +// The NextToken value isn't valid. +type InvalidNextTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // Information about the dashboard. - Dashboard *Dashboard `type:"structure"` + Message_ *string `locationName:"Message" type:"string"` - // The AWS request ID for this operation. + // The AWS request ID for this request. RequestId *string `type:"string"` - - // The HTTP status of this request. - Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s DescribeDashboardOutput) String() string { +func (s InvalidNextTokenException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDashboardOutput) GoString() string { +func (s InvalidNextTokenException) GoString() string { return s.String() } -// SetDashboard sets the Dashboard field's value. -func (s *DescribeDashboardOutput) SetDashboard(v *Dashboard) *DescribeDashboardOutput { - s.Dashboard = v - return s +func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { + return &InvalidNextTokenException{ + RespMetadata: v, + } } -// SetRequestId sets the RequestId field's value. -func (s *DescribeDashboardOutput) SetRequestId(v string) *DescribeDashboardOutput { - s.RequestId = &v - return s +// Code returns the exception type name. +func (s *InvalidNextTokenException) Code() string { + return "InvalidNextTokenException" } -// SetStatus sets the Status field's value. -func (s *DescribeDashboardOutput) SetStatus(v int64) *DescribeDashboardOutput { - s.Status = &v - return s +// Message returns the exception's message. +func (s *InvalidNextTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -type DescribeDashboardPermissionsInput struct { - _ struct{} `type:"structure"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidNextTokenException) OrigErr() error { + return nil +} - // The ID of the AWS account that contains the dashboard that you're describing - // permissions for. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +func (s *InvalidNextTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} - // The ID for the dashboard, also added to the IAM policy. - // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// One or more parameters has a value that isn't valid. +type InvalidParameterValueException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The AWS request ID for this request. + RequestId *string `type:"string"` } // String returns the string representation -func (s DescribeDashboardPermissionsInput) String() string { +func (s InvalidParameterValueException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDashboardPermissionsInput) GoString() string { +func (s InvalidParameterValueException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDashboardPermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDashboardPermissionsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) - } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams +func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { + return &InvalidParameterValueException{ + RespMetadata: v, } - return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeDashboardPermissionsInput) SetAwsAccountId(v string) *DescribeDashboardPermissionsInput { - s.AwsAccountId = &v - return s +// Code returns the exception type name. +func (s *InvalidParameterValueException) Code() string { + return "InvalidParameterValueException" } -// SetDashboardId sets the DashboardId field's value. -func (s *DescribeDashboardPermissionsInput) SetDashboardId(v string) *DescribeDashboardPermissionsInput { - s.DashboardId = &v - return s +// Message returns the exception's message. +func (s *InvalidParameterValueException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -type DescribeDashboardPermissionsOutput struct { - _ struct{} `type:"structure"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidParameterValueException) OrigErr() error { + return nil +} - // The Amazon Resource Name (ARN) of the dashboard. - DashboardArn *string `type:"string"` +func (s *InvalidParameterValueException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} - // The ID for the dashboard. - DashboardId *string `min:"1" type:"string"` +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidParameterValueException) StatusCode() int { + return s.RespMetadata.StatusCode +} - // A structure that contains the permissions for the dashboard. - Permissions []*ResourcePermission `min:"1" type:"list"` +// RequestID returns the service's response RequestID for request. +func (s *InvalidParameterValueException) RequestID() string { + return s.RespMetadata.RequestID +} - // The AWS request ID for this operation. - RequestId *string `type:"string"` +// Jira parameters. +type JiraParameters struct { + _ struct{} `type:"structure"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // The base URL of the Jira site. + // + // SiteBaseUrl is a required field + SiteBaseUrl *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DescribeDashboardPermissionsOutput) String() string { +func (s JiraParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDashboardPermissionsOutput) GoString() string { +func (s JiraParameters) GoString() string { return s.String() } -// SetDashboardArn sets the DashboardArn field's value. -func (s *DescribeDashboardPermissionsOutput) SetDashboardArn(v string) *DescribeDashboardPermissionsOutput { - s.DashboardArn = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *JiraParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JiraParameters"} + if s.SiteBaseUrl == nil { + invalidParams.Add(request.NewErrParamRequired("SiteBaseUrl")) + } + if s.SiteBaseUrl != nil && len(*s.SiteBaseUrl) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SiteBaseUrl", 1)) + } -// SetDashboardId sets the DashboardId field's value. -func (s *DescribeDashboardPermissionsOutput) SetDashboardId(v string) *DescribeDashboardPermissionsOutput { - s.DashboardId = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetPermissions sets the Permissions field's value. -func (s *DescribeDashboardPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeDashboardPermissionsOutput { - s.Permissions = v +// SetSiteBaseUrl sets the SiteBaseUrl field's value. +func (s *JiraParameters) SetSiteBaseUrl(v string) *JiraParameters { + s.SiteBaseUrl = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *DescribeDashboardPermissionsOutput) SetRequestId(v string) *DescribeDashboardPermissionsOutput { - s.RequestId = &v - return s -} +// Join instruction. +type JoinInstruction struct { + _ struct{} `type:"structure"` -// SetStatus sets the Status field's value. -func (s *DescribeDashboardPermissionsOutput) SetStatus(v int64) *DescribeDashboardPermissionsOutput { - s.Status = &v - return s -} + // Left operand. + // + // LeftOperand is a required field + LeftOperand *string `min:"1" type:"string" required:"true"` -type DescribeDataSetInput struct { - _ struct{} `type:"structure"` + // On Clause. + // + // OnClause is a required field + OnClause *string `min:"1" type:"string" required:"true"` - // The AWS account ID. + // Right operand. // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // RightOperand is a required field + RightOperand *string `min:"1" type:"string" required:"true"` - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. + // Type. // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // Type is a required field + Type *string `type:"string" required:"true" enum:"JoinType"` } // String returns the string representation -func (s DescribeDataSetInput) String() string { +func (s JoinInstruction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSetInput) GoString() string { +func (s JoinInstruction) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDataSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDataSetInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) +func (s *JoinInstruction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JoinInstruction"} + if s.LeftOperand == nil { + invalidParams.Add(request.NewErrParamRequired("LeftOperand")) } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + if s.LeftOperand != nil && len(*s.LeftOperand) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LeftOperand", 1)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) + if s.OnClause == nil { + invalidParams.Add(request.NewErrParamRequired("OnClause")) } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + if s.OnClause != nil && len(*s.OnClause) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OnClause", 1)) + } + if s.RightOperand == nil { + invalidParams.Add(request.NewErrParamRequired("RightOperand")) + } + if s.RightOperand != nil && len(*s.RightOperand) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RightOperand", 1)) + } + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) } if invalidParams.Len() > 0 { @@ -12994,98 +22255,128 @@ func (s *DescribeDataSetInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeDataSetInput) SetAwsAccountId(v string) *DescribeDataSetInput { - s.AwsAccountId = &v +// SetLeftOperand sets the LeftOperand field's value. +func (s *JoinInstruction) SetLeftOperand(v string) *JoinInstruction { + s.LeftOperand = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *DescribeDataSetInput) SetDataSetId(v string) *DescribeDataSetInput { - s.DataSetId = &v +// SetOnClause sets the OnClause field's value. +func (s *JoinInstruction) SetOnClause(v string) *JoinInstruction { + s.OnClause = &v return s } -type DescribeDataSetOutput struct { - _ struct{} `type:"structure"` +// SetRightOperand sets the RightOperand field's value. +func (s *JoinInstruction) SetRightOperand(v string) *JoinInstruction { + s.RightOperand = &v + return s +} - // Information on the dataset. - DataSet *DataSet `type:"structure"` +// SetType sets the Type field's value. +func (s *JoinInstruction) SetType(v string) *JoinInstruction { + s.Type = &v + return s +} - // The AWS request ID for this operation. +// A limit is exceeded. +type LimitExceededException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The AWS request ID for this request. RequestId *string `type:"string"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Limit exceeded. + ResourceType *string `type:"string" enum:"ExceptionResourceType"` } // String returns the string representation -func (s DescribeDataSetOutput) String() string { +func (s LimitExceededException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSetOutput) GoString() string { +func (s LimitExceededException) GoString() string { return s.String() } -// SetDataSet sets the DataSet field's value. -func (s *DescribeDataSetOutput) SetDataSet(v *DataSet) *DescribeDataSetOutput { - s.DataSet = v - return s +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } } -// SetRequestId sets the RequestId field's value. -func (s *DescribeDataSetOutput) SetRequestId(v string) *DescribeDataSetOutput { - s.RequestId = &v - return s +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" } -// SetStatus sets the Status field's value. -func (s *DescribeDataSetOutput) SetStatus(v int64) *DescribeDataSetOutput { - s.Status = &v - return s +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -type DescribeDataSetPermissionsInput struct { +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAnalysesInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID of the AWS account that contains the analyses. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. - // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // A pagination token that can be used in a subsequent request. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeDataSetPermissionsInput) String() string { +func (s ListAnalysesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSetPermissionsInput) GoString() string { +func (s ListAnalysesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDataSetPermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDataSetPermissionsInput"} +func (s *ListAnalysesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAnalysesInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) - } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -13095,29 +22386,31 @@ func (s *DescribeDataSetPermissionsInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeDataSetPermissionsInput) SetAwsAccountId(v string) *DescribeDataSetPermissionsInput { +func (s *ListAnalysesInput) SetAwsAccountId(v string) *ListAnalysesInput { s.AwsAccountId = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *DescribeDataSetPermissionsInput) SetDataSetId(v string) *DescribeDataSetPermissionsInput { - s.DataSetId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListAnalysesInput) SetMaxResults(v int64) *ListAnalysesInput { + s.MaxResults = &v return s } -type DescribeDataSetPermissionsOutput struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListAnalysesInput) SetNextToken(v string) *ListAnalysesInput { + s.NextToken = &v + return s +} - // The Amazon Resource Name (ARN) of the dataset. - DataSetArn *string `type:"string"` +type ListAnalysesOutput struct { + _ struct{} `type:"structure"` - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. - DataSetId *string `type:"string"` + // Metadata describing each of the analyses that are listed. + AnalysisSummaryList []*AnalysisSummary `type:"list"` - // A list of resource permissions on the dataset. - Permissions []*ResourcePermission `min:"1" type:"list"` + // A pagination token that can be used in a subsequent request. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -13127,84 +22420,87 @@ type DescribeDataSetPermissionsOutput struct { } // String returns the string representation -func (s DescribeDataSetPermissionsOutput) String() string { +func (s ListAnalysesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSetPermissionsOutput) GoString() string { +func (s ListAnalysesOutput) GoString() string { return s.String() } -// SetDataSetArn sets the DataSetArn field's value. -func (s *DescribeDataSetPermissionsOutput) SetDataSetArn(v string) *DescribeDataSetPermissionsOutput { - s.DataSetArn = &v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *DescribeDataSetPermissionsOutput) SetDataSetId(v string) *DescribeDataSetPermissionsOutput { - s.DataSetId = &v +// SetAnalysisSummaryList sets the AnalysisSummaryList field's value. +func (s *ListAnalysesOutput) SetAnalysisSummaryList(v []*AnalysisSummary) *ListAnalysesOutput { + s.AnalysisSummaryList = v return s } -// SetPermissions sets the Permissions field's value. -func (s *DescribeDataSetPermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeDataSetPermissionsOutput { - s.Permissions = v +// SetNextToken sets the NextToken field's value. +func (s *ListAnalysesOutput) SetNextToken(v string) *ListAnalysesOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeDataSetPermissionsOutput) SetRequestId(v string) *DescribeDataSetPermissionsOutput { +func (s *ListAnalysesOutput) SetRequestId(v string) *ListAnalysesOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeDataSetPermissionsOutput) SetStatus(v int64) *DescribeDataSetPermissionsOutput { +func (s *ListAnalysesOutput) SetStatus(v int64) *ListAnalysesOutput { s.Status = &v return s } -type DescribeDataSourceInput struct { +type ListDashboardVersionsInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID of the AWS account that contains the dashboard that you're listing + // versions for. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. + // The ID for the dashboard. // - // DataSourceId is a required field - DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeDataSourceInput) String() string { +func (s ListDashboardVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSourceInput) GoString() string { +func (s ListDashboardVersionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDataSourceInput"} +func (s *ListDashboardVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDashboardVersionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSourceId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceId")) + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -13214,22 +22510,37 @@ func (s *DescribeDataSourceInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeDataSourceInput) SetAwsAccountId(v string) *DescribeDataSourceInput { +func (s *ListDashboardVersionsInput) SetAwsAccountId(v string) *ListDashboardVersionsInput { s.AwsAccountId = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *DescribeDataSourceInput) SetDataSourceId(v string) *DescribeDataSourceInput { - s.DataSourceId = &v +// SetDashboardId sets the DashboardId field's value. +func (s *ListDashboardVersionsInput) SetDashboardId(v string) *ListDashboardVersionsInput { + s.DashboardId = &v return s } -type DescribeDataSourceOutput struct { +// SetMaxResults sets the MaxResults field's value. +func (s *ListDashboardVersionsInput) SetMaxResults(v int64) *ListDashboardVersionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDashboardVersionsInput) SetNextToken(v string) *ListDashboardVersionsInput { + s.NextToken = &v + return s +} + +type ListDashboardVersionsOutput struct { _ struct{} `type:"structure"` - // The information on the data source. - DataSource *DataSource `type:"structure"` + // A structure that contains information about each version of the dashboard. + DashboardVersionSummaryList []*DashboardVersionSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -13239,72 +22550,75 @@ type DescribeDataSourceOutput struct { } // String returns the string representation -func (s DescribeDataSourceOutput) String() string { +func (s ListDashboardVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSourceOutput) GoString() string { +func (s ListDashboardVersionsOutput) GoString() string { return s.String() } -// SetDataSource sets the DataSource field's value. -func (s *DescribeDataSourceOutput) SetDataSource(v *DataSource) *DescribeDataSourceOutput { - s.DataSource = v +// SetDashboardVersionSummaryList sets the DashboardVersionSummaryList field's value. +func (s *ListDashboardVersionsOutput) SetDashboardVersionSummaryList(v []*DashboardVersionSummary) *ListDashboardVersionsOutput { + s.DashboardVersionSummaryList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDashboardVersionsOutput) SetNextToken(v string) *ListDashboardVersionsOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeDataSourceOutput) SetRequestId(v string) *DescribeDataSourceOutput { +func (s *ListDashboardVersionsOutput) SetRequestId(v string) *ListDashboardVersionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeDataSourceOutput) SetStatus(v int64) *DescribeDataSourceOutput { +func (s *ListDashboardVersionsOutput) SetStatus(v int64) *ListDashboardVersionsOutput { s.Status = &v return s } -type DescribeDataSourcePermissionsInput struct { +type ListDashboardsInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID of the AWS account that contains the dashboards that you're listing. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - // - // DataSourceId is a required field - DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeDataSourcePermissionsInput) String() string { +func (s ListDashboardsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSourcePermissionsInput) GoString() string { +func (s ListDashboardsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDataSourcePermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDataSourcePermissionsInput"} +func (s *ListDashboardsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDashboardsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSourceId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceId")) - } - if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -13314,29 +22628,32 @@ func (s *DescribeDataSourcePermissionsInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeDataSourcePermissionsInput) SetAwsAccountId(v string) *DescribeDataSourcePermissionsInput { +func (s *ListDashboardsInput) SetAwsAccountId(v string) *ListDashboardsInput { s.AwsAccountId = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *DescribeDataSourcePermissionsInput) SetDataSourceId(v string) *DescribeDataSourcePermissionsInput { - s.DataSourceId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListDashboardsInput) SetMaxResults(v int64) *ListDashboardsInput { + s.MaxResults = &v return s } -type DescribeDataSourcePermissionsOutput struct { - _ struct{} `type:"structure"` +// SetNextToken sets the NextToken field's value. +func (s *ListDashboardsInput) SetNextToken(v string) *ListDashboardsInput { + s.NextToken = &v + return s +} - // The Amazon Resource Name (ARN) of the data source. - DataSourceArn *string `type:"string"` +type ListDashboardsOutput struct { + _ struct{} `type:"structure"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - DataSourceId *string `type:"string"` + // A structure that contains all of the dashboards in your AWS account. This + // structure provides basic information about the dashboards. + DashboardSummaryList []*DashboardSummary `type:"list"` - // A list of resource permissions on the data source. - Permissions []*ResourcePermission `min:"1" type:"list"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -13346,95 +22663,75 @@ type DescribeDataSourcePermissionsOutput struct { } // String returns the string representation -func (s DescribeDataSourcePermissionsOutput) String() string { +func (s ListDashboardsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeDataSourcePermissionsOutput) GoString() string { +func (s ListDashboardsOutput) GoString() string { return s.String() } -// SetDataSourceArn sets the DataSourceArn field's value. -func (s *DescribeDataSourcePermissionsOutput) SetDataSourceArn(v string) *DescribeDataSourcePermissionsOutput { - s.DataSourceArn = &v - return s -} - -// SetDataSourceId sets the DataSourceId field's value. -func (s *DescribeDataSourcePermissionsOutput) SetDataSourceId(v string) *DescribeDataSourcePermissionsOutput { - s.DataSourceId = &v +// SetDashboardSummaryList sets the DashboardSummaryList field's value. +func (s *ListDashboardsOutput) SetDashboardSummaryList(v []*DashboardSummary) *ListDashboardsOutput { + s.DashboardSummaryList = v return s } -// SetPermissions sets the Permissions field's value. -func (s *DescribeDataSourcePermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeDataSourcePermissionsOutput { - s.Permissions = v +// SetNextToken sets the NextToken field's value. +func (s *ListDashboardsOutput) SetNextToken(v string) *ListDashboardsOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeDataSourcePermissionsOutput) SetRequestId(v string) *DescribeDataSourcePermissionsOutput { +func (s *ListDashboardsOutput) SetRequestId(v string) *ListDashboardsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeDataSourcePermissionsOutput) SetStatus(v int64) *DescribeDataSourcePermissionsOutput { +func (s *ListDashboardsOutput) SetStatus(v int64) *ListDashboardsOutput { s.Status = &v return s } -type DescribeGroupInput struct { +type ListDataSetsInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // The AWS account ID. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The name of the group that you want to describe. - // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` - // The namespace. Currently, you should set this to default. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeGroupInput) String() string { +func (s ListDataSetsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeGroupInput) GoString() string { +func (s ListDataSetsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGroupInput"} +func (s *ListDataSetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDataSetsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) - } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) - } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -13444,28 +22741,31 @@ func (s *DescribeGroupInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeGroupInput) SetAwsAccountId(v string) *DescribeGroupInput { +func (s *ListDataSetsInput) SetAwsAccountId(v string) *ListDataSetsInput { s.AwsAccountId = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *DescribeGroupInput) SetGroupName(v string) *DescribeGroupInput { - s.GroupName = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListDataSetsInput) SetMaxResults(v int64) *ListDataSetsInput { + s.MaxResults = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DescribeGroupInput) SetNamespace(v string) *DescribeGroupInput { - s.Namespace = &v +// SetNextToken sets the NextToken field's value. +func (s *ListDataSetsInput) SetNextToken(v string) *ListDataSetsInput { + s.NextToken = &v return s } -type DescribeGroupOutput struct { +type ListDataSetsOutput struct { _ struct{} `type:"structure"` - // The name of the group. - Group *Group `type:"structure"` + // The list of dataset summaries. + DataSetSummaries []*DataSetSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -13475,82 +22775,75 @@ type DescribeGroupOutput struct { } // String returns the string representation -func (s DescribeGroupOutput) String() string { +func (s ListDataSetsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeGroupOutput) GoString() string { +func (s ListDataSetsOutput) GoString() string { return s.String() } -// SetGroup sets the Group field's value. -func (s *DescribeGroupOutput) SetGroup(v *Group) *DescribeGroupOutput { - s.Group = v +// SetDataSetSummaries sets the DataSetSummaries field's value. +func (s *ListDataSetsOutput) SetDataSetSummaries(v []*DataSetSummary) *ListDataSetsOutput { + s.DataSetSummaries = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataSetsOutput) SetNextToken(v string) *ListDataSetsOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeGroupOutput) SetRequestId(v string) *DescribeGroupOutput { +func (s *ListDataSetsOutput) SetRequestId(v string) *ListDataSetsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeGroupOutput) SetStatus(v int64) *DescribeGroupOutput { +func (s *ListDataSetsOutput) SetStatus(v int64) *ListDataSetsOutput { s.Status = &v return s } -type DescribeIAMPolicyAssignmentInput struct { +type ListDataSourcesInput struct { _ struct{} `type:"structure"` - // The name of the assignment. - // - // AssignmentName is a required field - AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` - - // The ID of the AWS account that contains the assignment that you want to describe. + // The AWS account ID. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The namespace that contains the assignment. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeIAMPolicyAssignmentInput) String() string { +func (s ListDataSourcesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeIAMPolicyAssignmentInput) GoString() string { +func (s ListDataSourcesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeIAMPolicyAssignmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeIAMPolicyAssignmentInput"} - if s.AssignmentName == nil { - invalidParams.Add(request.NewErrParamRequired("AssignmentName")) - } - if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) - } +func (s *ListDataSourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListDataSourcesInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -13559,29 +22852,32 @@ func (s *DescribeIAMPolicyAssignmentInput) Validate() error { return nil } -// SetAssignmentName sets the AssignmentName field's value. -func (s *DescribeIAMPolicyAssignmentInput) SetAssignmentName(v string) *DescribeIAMPolicyAssignmentInput { - s.AssignmentName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListDataSourcesInput) SetAwsAccountId(v string) *ListDataSourcesInput { + s.AwsAccountId = &v return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeIAMPolicyAssignmentInput) SetAwsAccountId(v string) *DescribeIAMPolicyAssignmentInput { - s.AwsAccountId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListDataSourcesInput) SetMaxResults(v int64) *ListDataSourcesInput { + s.MaxResults = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DescribeIAMPolicyAssignmentInput) SetNamespace(v string) *DescribeIAMPolicyAssignmentInput { - s.Namespace = &v +// SetNextToken sets the NextToken field's value. +func (s *ListDataSourcesInput) SetNextToken(v string) *ListDataSourcesInput { + s.NextToken = &v return s } -type DescribeIAMPolicyAssignmentOutput struct { +type ListDataSourcesOutput struct { _ struct{} `type:"structure"` - // Information describing the IAM policy assignment. - IAMPolicyAssignment *IAMPolicyAssignment `type:"structure"` + // A list of data sources. + DataSources []*DataSource `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -13591,82 +22887,98 @@ type DescribeIAMPolicyAssignmentOutput struct { } // String returns the string representation -func (s DescribeIAMPolicyAssignmentOutput) String() string { +func (s ListDataSourcesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeIAMPolicyAssignmentOutput) GoString() string { +func (s ListDataSourcesOutput) GoString() string { return s.String() } -// SetIAMPolicyAssignment sets the IAMPolicyAssignment field's value. -func (s *DescribeIAMPolicyAssignmentOutput) SetIAMPolicyAssignment(v *IAMPolicyAssignment) *DescribeIAMPolicyAssignmentOutput { - s.IAMPolicyAssignment = v +// SetDataSources sets the DataSources field's value. +func (s *ListDataSourcesOutput) SetDataSources(v []*DataSource) *ListDataSourcesOutput { + s.DataSources = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDataSourcesOutput) SetNextToken(v string) *ListDataSourcesOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeIAMPolicyAssignmentOutput) SetRequestId(v string) *DescribeIAMPolicyAssignmentOutput { +func (s *ListDataSourcesOutput) SetRequestId(v string) *ListDataSourcesOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeIAMPolicyAssignmentOutput) SetStatus(v int64) *DescribeIAMPolicyAssignmentOutput { +func (s *ListDataSourcesOutput) SetStatus(v int64) *ListDataSourcesOutput { s.Status = &v return s } -type DescribeIngestionInput struct { +type ListGroupMembershipsInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID of the dataset used in the ingestion. + // The name of the group that you want to see a membership list of. // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // GroupName is a required field + GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` - // An ID for the ingestion. + // The maximum number of results to return from this request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace. Currently, you should set this to default. // - // IngestionId is a required field - IngestionId *string `location:"uri" locationName:"IngestionId" min:"1" type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // A pagination token that can be used in a subsequent request. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeIngestionInput) String() string { +func (s ListGroupMembershipsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeIngestionInput) GoString() string { +func (s ListGroupMembershipsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeIngestionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeIngestionInput"} +func (s *ListGroupMembershipsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupMembershipsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) } - if s.IngestionId == nil { - invalidParams.Add(request.NewErrParamRequired("IngestionId")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.IngestionId != nil && len(*s.IngestionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("IngestionId", 1)) + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -13676,28 +22988,43 @@ func (s *DescribeIngestionInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeIngestionInput) SetAwsAccountId(v string) *DescribeIngestionInput { +func (s *ListGroupMembershipsInput) SetAwsAccountId(v string) *ListGroupMembershipsInput { s.AwsAccountId = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *DescribeIngestionInput) SetDataSetId(v string) *DescribeIngestionInput { - s.DataSetId = &v +// SetGroupName sets the GroupName field's value. +func (s *ListGroupMembershipsInput) SetGroupName(v string) *ListGroupMembershipsInput { + s.GroupName = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *DescribeIngestionInput) SetIngestionId(v string) *DescribeIngestionInput { - s.IngestionId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListGroupMembershipsInput) SetMaxResults(v int64) *ListGroupMembershipsInput { + s.MaxResults = &v return s } -type DescribeIngestionOutput struct { +// SetNamespace sets the Namespace field's value. +func (s *ListGroupMembershipsInput) SetNamespace(v string) *ListGroupMembershipsInput { + s.Namespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListGroupMembershipsInput) SetNextToken(v string) *ListGroupMembershipsInput { + s.NextToken = &v + return s +} + +type ListGroupMembershipsOutput struct { _ struct{} `type:"structure"` - // Information about the ingestion. - Ingestion *Ingestion `type:"structure"` + // The list of the members of the group. + GroupMemberList []*GroupMember `type:"list"` + + // A pagination token that can be used in a subsequent request. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -13707,85 +23034,87 @@ type DescribeIngestionOutput struct { } // String returns the string representation -func (s DescribeIngestionOutput) String() string { +func (s ListGroupMembershipsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeIngestionOutput) GoString() string { +func (s ListGroupMembershipsOutput) GoString() string { return s.String() } -// SetIngestion sets the Ingestion field's value. -func (s *DescribeIngestionOutput) SetIngestion(v *Ingestion) *DescribeIngestionOutput { - s.Ingestion = v +// SetGroupMemberList sets the GroupMemberList field's value. +func (s *ListGroupMembershipsOutput) SetGroupMemberList(v []*GroupMember) *ListGroupMembershipsOutput { + s.GroupMemberList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListGroupMembershipsOutput) SetNextToken(v string) *ListGroupMembershipsOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeIngestionOutput) SetRequestId(v string) *DescribeIngestionOutput { +func (s *ListGroupMembershipsOutput) SetRequestId(v string) *ListGroupMembershipsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeIngestionOutput) SetStatus(v int64) *DescribeIngestionOutput { +func (s *ListGroupMembershipsOutput) SetStatus(v int64) *ListGroupMembershipsOutput { s.Status = &v return s } -type DescribeTemplateAliasInput struct { +type ListGroupsInput struct { _ struct{} `type:"structure"` - // The name of the template alias that you want to describe. If you name a specific - // alias, you describe the version that the alias points to. You can specify - // the latest version of the template by providing the keyword $LATEST in the - // AliasName parameter. The keyword $PUBLISHED doesn't apply to templates. - // - // AliasName is a required field - AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` - - // The ID of the AWS account that contains the template alias that you're describing. + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the template. + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace. Currently, you should set this to default. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // A pagination token that can be used in a subsequent request. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeTemplateAliasInput) String() string { +func (s ListGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTemplateAliasInput) GoString() string { +func (s ListGroupsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTemplateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTemplateAliasInput"} - if s.AliasName == nil { - invalidParams.Add(request.NewErrParamRequired("AliasName")) - } - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } +func (s *ListGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -13794,119 +23123,138 @@ func (s *DescribeTemplateAliasInput) Validate() error { return nil } -// SetAliasName sets the AliasName field's value. -func (s *DescribeTemplateAliasInput) SetAliasName(v string) *DescribeTemplateAliasInput { - s.AliasName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListGroupsInput) SetAwsAccountId(v string) *ListGroupsInput { + s.AwsAccountId = &v return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeTemplateAliasInput) SetAwsAccountId(v string) *DescribeTemplateAliasInput { - s.AwsAccountId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListGroupsInput) SetMaxResults(v int64) *ListGroupsInput { + s.MaxResults = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *DescribeTemplateAliasInput) SetTemplateId(v string) *DescribeTemplateAliasInput { - s.TemplateId = &v +// SetNamespace sets the Namespace field's value. +func (s *ListGroupsInput) SetNamespace(v string) *ListGroupsInput { + s.Namespace = &v return s } -type DescribeTemplateAliasOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *ListGroupsInput) SetNextToken(v string) *ListGroupsInput { + s.NextToken = &v + return s +} + +type ListGroupsOutput struct { _ struct{} `type:"structure"` + // The list of the groups. + GroupList []*Group `type:"list"` + + // A pagination token that can be used in a subsequent request. + NextToken *string `type:"string"` + // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // Information about the template alias. - TemplateAlias *TemplateAlias `type:"structure"` } // String returns the string representation -func (s DescribeTemplateAliasOutput) String() string { +func (s ListGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTemplateAliasOutput) GoString() string { +func (s ListGroupsOutput) GoString() string { return s.String() } +// SetGroupList sets the GroupList field's value. +func (s *ListGroupsOutput) SetGroupList(v []*Group) *ListGroupsOutput { + s.GroupList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListGroupsOutput) SetNextToken(v string) *ListGroupsOutput { + s.NextToken = &v + return s +} + // SetRequestId sets the RequestId field's value. -func (s *DescribeTemplateAliasOutput) SetRequestId(v string) *DescribeTemplateAliasOutput { +func (s *ListGroupsOutput) SetRequestId(v string) *ListGroupsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeTemplateAliasOutput) SetStatus(v int64) *DescribeTemplateAliasOutput { +func (s *ListGroupsOutput) SetStatus(v int64) *ListGroupsOutput { s.Status = &v return s } -// SetTemplateAlias sets the TemplateAlias field's value. -func (s *DescribeTemplateAliasOutput) SetTemplateAlias(v *TemplateAlias) *DescribeTemplateAliasOutput { - s.TemplateAlias = v - return s -} - -type DescribeTemplateInput struct { +type ListIAMPolicyAssignmentsForUserInput struct { _ struct{} `type:"structure"` - // The alias of the template that you want to describe. If you name a specific - // alias, you describe the version that the alias points to. You can specify - // the latest version of the template by providing the keyword $LATEST in the - // AliasName parameter. The keyword $PUBLISHED doesn't apply to templates. - AliasName *string `location:"querystring" locationName:"alias-name" min:"1" type:"string"` - - // The ID of the AWS account that contains the template that you're describing. + // The ID of the AWS account that contains the assignments. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the template. + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace of the assignment. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - // (Optional) The number for the version to describe. If a VersionNumber parameter - // value isn't provided, the latest version of the template is described. - VersionNumber *int64 `location:"querystring" locationName:"version-number" min:"1" type:"long"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The name of the user. + // + // UserName is a required field + UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DescribeTemplateInput) String() string { +func (s ListIAMPolicyAssignmentsForUserInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTemplateInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTemplateInput"} - if s.AliasName != nil && len(*s.AliasName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) - } +func (s ListIAMPolicyAssignmentsForUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIAMPolicyAssignmentsForUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIAMPolicyAssignmentsForUserInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) } - if s.VersionNumber != nil && *s.VersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) } if invalidParams.Len() > 0 { @@ -13915,100 +23263,136 @@ func (s *DescribeTemplateInput) Validate() error { return nil } -// SetAliasName sets the AliasName field's value. -func (s *DescribeTemplateInput) SetAliasName(v string) *DescribeTemplateInput { - s.AliasName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListIAMPolicyAssignmentsForUserInput) SetAwsAccountId(v string) *ListIAMPolicyAssignmentsForUserInput { + s.AwsAccountId = &v return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeTemplateInput) SetAwsAccountId(v string) *DescribeTemplateInput { - s.AwsAccountId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListIAMPolicyAssignmentsForUserInput) SetMaxResults(v int64) *ListIAMPolicyAssignmentsForUserInput { + s.MaxResults = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *DescribeTemplateInput) SetTemplateId(v string) *DescribeTemplateInput { - s.TemplateId = &v +// SetNamespace sets the Namespace field's value. +func (s *ListIAMPolicyAssignmentsForUserInput) SetNamespace(v string) *ListIAMPolicyAssignmentsForUserInput { + s.Namespace = &v return s } -// SetVersionNumber sets the VersionNumber field's value. -func (s *DescribeTemplateInput) SetVersionNumber(v int64) *DescribeTemplateInput { - s.VersionNumber = &v +// SetNextToken sets the NextToken field's value. +func (s *ListIAMPolicyAssignmentsForUserInput) SetNextToken(v string) *ListIAMPolicyAssignmentsForUserInput { + s.NextToken = &v return s } -type DescribeTemplateOutput struct { +// SetUserName sets the UserName field's value. +func (s *ListIAMPolicyAssignmentsForUserInput) SetUserName(v string) *ListIAMPolicyAssignmentsForUserInput { + s.UserName = &v + return s +} + +type ListIAMPolicyAssignmentsForUserOutput struct { _ struct{} `type:"structure"` + // The active assignments for this user. + ActiveAssignments []*ActiveIAMPolicyAssignment `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // The template structure for the object you want to describe. - Template *Template `type:"structure"` } // String returns the string representation -func (s DescribeTemplateOutput) String() string { +func (s ListIAMPolicyAssignmentsForUserOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTemplateOutput) GoString() string { +func (s ListIAMPolicyAssignmentsForUserOutput) GoString() string { return s.String() } -// SetStatus sets the Status field's value. -func (s *DescribeTemplateOutput) SetStatus(v int64) *DescribeTemplateOutput { - s.Status = &v +// SetActiveAssignments sets the ActiveAssignments field's value. +func (s *ListIAMPolicyAssignmentsForUserOutput) SetActiveAssignments(v []*ActiveIAMPolicyAssignment) *ListIAMPolicyAssignmentsForUserOutput { + s.ActiveAssignments = v return s } -// SetTemplate sets the Template field's value. -func (s *DescribeTemplateOutput) SetTemplate(v *Template) *DescribeTemplateOutput { - s.Template = v +// SetNextToken sets the NextToken field's value. +func (s *ListIAMPolicyAssignmentsForUserOutput) SetNextToken(v string) *ListIAMPolicyAssignmentsForUserOutput { + s.NextToken = &v return s } -type DescribeTemplatePermissionsInput struct { +// SetRequestId sets the RequestId field's value. +func (s *ListIAMPolicyAssignmentsForUserOutput) SetRequestId(v string) *ListIAMPolicyAssignmentsForUserOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ListIAMPolicyAssignmentsForUserOutput) SetStatus(v int64) *ListIAMPolicyAssignmentsForUserOutput { + s.Status = &v + return s +} + +type ListIAMPolicyAssignmentsInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the template that you're describing. + // The status of the assignments. + AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` + + // The ID of the AWS account that contains these IAM policy assignments. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the template. + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The namespace for the assignments. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeTemplatePermissionsInput) String() string { +func (s ListIAMPolicyAssignmentsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTemplatePermissionsInput) GoString() string { +func (s ListIAMPolicyAssignmentsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTemplatePermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTemplatePermissionsInput"} +func (s *ListIAMPolicyAssignmentsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIAMPolicyAssignmentsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -14017,127 +23401,133 @@ func (s *DescribeTemplatePermissionsInput) Validate() error { return nil } +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *ListIAMPolicyAssignmentsInput) SetAssignmentStatus(v string) *ListIAMPolicyAssignmentsInput { + s.AssignmentStatus = &v + return s +} + // SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeTemplatePermissionsInput) SetAwsAccountId(v string) *DescribeTemplatePermissionsInput { +func (s *ListIAMPolicyAssignmentsInput) SetAwsAccountId(v string) *ListIAMPolicyAssignmentsInput { s.AwsAccountId = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *DescribeTemplatePermissionsInput) SetTemplateId(v string) *DescribeTemplatePermissionsInput { - s.TemplateId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListIAMPolicyAssignmentsInput) SetMaxResults(v int64) *ListIAMPolicyAssignmentsInput { + s.MaxResults = &v return s } -type DescribeTemplatePermissionsOutput struct { +// SetNamespace sets the Namespace field's value. +func (s *ListIAMPolicyAssignmentsInput) SetNamespace(v string) *ListIAMPolicyAssignmentsInput { + s.Namespace = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIAMPolicyAssignmentsInput) SetNextToken(v string) *ListIAMPolicyAssignmentsInput { + s.NextToken = &v + return s +} + +type ListIAMPolicyAssignmentsOutput struct { _ struct{} `type:"structure"` - // A list of resource permissions to be set on the template. - Permissions []*ResourcePermission `min:"1" type:"list"` + // Information describing the IAM policy assignments. + IAMPolicyAssignments []*IAMPolicyAssignmentSummary `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // The Amazon Resource Name (ARN) of the template. - TemplateArn *string `type:"string"` - - // The ID for the template. - TemplateId *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeTemplatePermissionsOutput) String() string { +func (s ListIAMPolicyAssignmentsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTemplatePermissionsOutput) GoString() string { +func (s ListIAMPolicyAssignmentsOutput) GoString() string { return s.String() } -// SetPermissions sets the Permissions field's value. -func (s *DescribeTemplatePermissionsOutput) SetPermissions(v []*ResourcePermission) *DescribeTemplatePermissionsOutput { - s.Permissions = v +// SetIAMPolicyAssignments sets the IAMPolicyAssignments field's value. +func (s *ListIAMPolicyAssignmentsOutput) SetIAMPolicyAssignments(v []*IAMPolicyAssignmentSummary) *ListIAMPolicyAssignmentsOutput { + s.IAMPolicyAssignments = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIAMPolicyAssignmentsOutput) SetNextToken(v string) *ListIAMPolicyAssignmentsOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *DescribeTemplatePermissionsOutput) SetRequestId(v string) *DescribeTemplatePermissionsOutput { +func (s *ListIAMPolicyAssignmentsOutput) SetRequestId(v string) *ListIAMPolicyAssignmentsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeTemplatePermissionsOutput) SetStatus(v int64) *DescribeTemplatePermissionsOutput { +func (s *ListIAMPolicyAssignmentsOutput) SetStatus(v int64) *ListIAMPolicyAssignmentsOutput { s.Status = &v return s } -// SetTemplateArn sets the TemplateArn field's value. -func (s *DescribeTemplatePermissionsOutput) SetTemplateArn(v string) *DescribeTemplatePermissionsOutput { - s.TemplateArn = &v - return s -} - -// SetTemplateId sets the TemplateId field's value. -func (s *DescribeTemplatePermissionsOutput) SetTemplateId(v string) *DescribeTemplatePermissionsOutput { - s.TemplateId = &v - return s -} - -type DescribeUserInput struct { +type ListIngestionsInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the user is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // The AWS account ID. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The namespace. Currently, you should set this to default. + // The ID of the dataset used in the ingestion. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` - // The name of the user that you want to describe. - // - // UserName is a required field - UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DescribeUserInput) String() string { +func (s ListIngestionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeUserInput) GoString() string { +func (s ListIngestionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeUserInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeUserInput"} +func (s *ListIngestionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIngestionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) } - if s.UserName == nil { - invalidParams.Add(request.NewErrParamRequired("UserName")) + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } if invalidParams.Len() > 0 { @@ -14146,211 +23536,223 @@ func (s *DescribeUserInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *DescribeUserInput) SetAwsAccountId(v string) *DescribeUserInput { - s.AwsAccountId = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListIngestionsInput) SetAwsAccountId(v string) *ListIngestionsInput { + s.AwsAccountId = &v + return s +} + +// SetDataSetId sets the DataSetId field's value. +func (s *ListIngestionsInput) SetDataSetId(v string) *ListIngestionsInput { + s.DataSetId = &v return s } -// SetNamespace sets the Namespace field's value. -func (s *DescribeUserInput) SetNamespace(v string) *DescribeUserInput { - s.Namespace = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListIngestionsInput) SetMaxResults(v int64) *ListIngestionsInput { + s.MaxResults = &v return s } -// SetUserName sets the UserName field's value. -func (s *DescribeUserInput) SetUserName(v string) *DescribeUserInput { - s.UserName = &v +// SetNextToken sets the NextToken field's value. +func (s *ListIngestionsInput) SetNextToken(v string) *ListIngestionsInput { + s.NextToken = &v return s } -type DescribeUserOutput struct { +type ListIngestionsOutput struct { _ struct{} `type:"structure"` + // A list of the ingestions. + Ingestions []*Ingestion `type:"list"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // The user name. - User *User `type:"structure"` } // String returns the string representation -func (s DescribeUserOutput) String() string { +func (s ListIngestionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeUserOutput) GoString() string { +func (s ListIngestionsOutput) GoString() string { return s.String() } +// SetIngestions sets the Ingestions field's value. +func (s *ListIngestionsOutput) SetIngestions(v []*Ingestion) *ListIngestionsOutput { + s.Ingestions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListIngestionsOutput) SetNextToken(v string) *ListIngestionsOutput { + s.NextToken = &v + return s +} + // SetRequestId sets the RequestId field's value. -func (s *DescribeUserOutput) SetRequestId(v string) *DescribeUserOutput { +func (s *ListIngestionsOutput) SetRequestId(v string) *ListIngestionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *DescribeUserOutput) SetStatus(v int64) *DescribeUserOutput { +func (s *ListIngestionsOutput) SetStatus(v int64) *ListIngestionsOutput { s.Status = &v return s } -// SetUser sets the User field's value. -func (s *DescribeUserOutput) SetUser(v *User) *DescribeUserOutput { - s.User = v - return s -} +type ListNamespacesInput struct { + _ struct{} `type:"structure"` -// The domain specified isn't on the allow list. All domains for embedded dashboards -// must be added to the approved list by an Amazon QuickSight admin. -type DomainNotWhitelistedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + // The ID for the AWS account that contains the QuickSight namespaces that you + // want to list. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - Message_ *string `locationName:"Message" type:"string"` + // The maximum number of results to return. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` - // The AWS request ID for this request. - RequestId *string `type:"string"` + // A pagination token that can be used in a subsequent request. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s DomainNotWhitelistedException) String() string { +func (s ListNamespacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DomainNotWhitelistedException) GoString() string { +func (s ListNamespacesInput) GoString() string { return s.String() } -func newErrorDomainNotWhitelistedException(v protocol.ResponseMetadata) error { - return &DomainNotWhitelistedException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListNamespacesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListNamespacesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } -} - -// Code returns the exception type name. -func (s DomainNotWhitelistedException) Code() string { - return "DomainNotWhitelistedException" -} - -// Message returns the exception's message. -func (s DomainNotWhitelistedException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - return "" -} -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s DomainNotWhitelistedException) OrigErr() error { + if invalidParams.Len() > 0 { + return invalidParams + } return nil } -func (s DomainNotWhitelistedException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListNamespacesInput) SetAwsAccountId(v string) *ListNamespacesInput { + s.AwsAccountId = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s DomainNotWhitelistedException) StatusCode() int { - return s.respMetadata.StatusCode +// SetMaxResults sets the MaxResults field's value. +func (s *ListNamespacesInput) SetMaxResults(v int64) *ListNamespacesInput { + s.MaxResults = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s DomainNotWhitelistedException) RequestID() string { - return s.respMetadata.RequestID +// SetNextToken sets the NextToken field's value. +func (s *ListNamespacesInput) SetNextToken(v string) *ListNamespacesInput { + s.NextToken = &v + return s } -// Error information for the SPICE ingestion of a dataset. -type ErrorInfo struct { +type ListNamespacesOutput struct { _ struct{} `type:"structure"` - // Error message. - Message *string `type:"string"` + // The information about the namespaces in this AWS account. The response includes + // the namespace ARN, name, AWS Region, notification email address, creation + // status, and identity store. + Namespaces []*NamespaceInfoV2 `type:"list"` - // Error type. - Type *string `type:"string" enum:"IngestionErrorType"` + // A pagination token that can be used in a subsequent request. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s ErrorInfo) String() string { +func (s ListNamespacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ErrorInfo) GoString() string { +func (s ListNamespacesOutput) GoString() string { return s.String() } -// SetMessage sets the Message field's value. -func (s *ErrorInfo) SetMessage(v string) *ErrorInfo { - s.Message = &v +// SetNamespaces sets the Namespaces field's value. +func (s *ListNamespacesOutput) SetNamespaces(v []*NamespaceInfoV2) *ListNamespacesOutput { + s.Namespaces = v return s } -// SetType sets the Type field's value. -func (s *ErrorInfo) SetType(v string) *ErrorInfo { - s.Type = &v +// SetNextToken sets the NextToken field's value. +func (s *ListNamespacesOutput) SetNextToken(v string) *ListNamespacesOutput { + s.NextToken = &v return s } -// Export to .csv option. -type ExportToCSVOption struct { - _ struct{} `type:"structure"` - - // Availability status. - AvailabilityStatus *string `type:"string" enum:"DashboardBehavior"` -} - -// String returns the string representation -func (s ExportToCSVOption) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportToCSVOption) GoString() string { - return s.String() +// SetRequestId sets the RequestId field's value. +func (s *ListNamespacesOutput) SetRequestId(v string) *ListNamespacesOutput { + s.RequestId = &v + return s } -// SetAvailabilityStatus sets the AvailabilityStatus field's value. -func (s *ExportToCSVOption) SetAvailabilityStatus(v string) *ExportToCSVOption { - s.AvailabilityStatus = &v +// SetStatus sets the Status field's value. +func (s *ListNamespacesOutput) SetStatus(v int64) *ListNamespacesOutput { + s.Status = &v return s } -// A transform operation that filters rows based on a condition. -type FilterOperation struct { +type ListTagsForResourceInput struct { _ struct{} `type:"structure"` - // An expression that must evaluate to a Boolean value. Rows for which the expression - // evaluates to true are kept in the dataset. + // The Amazon Resource Name (ARN) of the resource that you want a list of tags + // for. // - // ConditionExpression is a required field - ConditionExpression *string `min:"1" type:"string" required:"true"` + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` } // String returns the string representation -func (s FilterOperation) String() string { +func (s ListTagsForResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s FilterOperation) GoString() string { +func (s ListTagsForResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *FilterOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FilterOperation"} - if s.ConditionExpression == nil { - invalidParams.Add(request.NewErrParamRequired("ConditionExpression")) +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } - if s.ConditionExpression != nil && len(*s.ConditionExpression) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ConditionExpression", 1)) + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) } if invalidParams.Len() > 0 { @@ -14359,158 +23761,102 @@ func (s *FilterOperation) Validate() error { return nil } -// SetConditionExpression sets the ConditionExpression field's value. -func (s *FilterOperation) SetConditionExpression(v string) *FilterOperation { - s.ConditionExpression = &v +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v return s } -// Geospatial column group that denotes a hierarchy. -type GeoSpatialColumnGroup struct { +type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // Columns in this hierarchy. - // - // Columns is a required field - Columns []*string `min:"1" type:"list" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Country code. - // - // CountryCode is a required field - CountryCode *string `type:"string" required:"true" enum:"GeoSpatialCountryCode"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` - // A display name for the hierarchy. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the resource. + Tags []*Tag `min:"1" type:"list"` } // String returns the string representation -func (s GeoSpatialColumnGroup) String() string { +func (s ListTagsForResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GeoSpatialColumnGroup) GoString() string { +func (s ListTagsForResourceOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *GeoSpatialColumnGroup) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GeoSpatialColumnGroup"} - if s.Columns == nil { - invalidParams.Add(request.NewErrParamRequired("Columns")) - } - if s.Columns != nil && len(s.Columns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Columns", 1)) - } - if s.CountryCode == nil { - invalidParams.Add(request.NewErrParamRequired("CountryCode")) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetColumns sets the Columns field's value. -func (s *GeoSpatialColumnGroup) SetColumns(v []*string) *GeoSpatialColumnGroup { - s.Columns = v +// SetRequestId sets the RequestId field's value. +func (s *ListTagsForResourceOutput) SetRequestId(v string) *ListTagsForResourceOutput { + s.RequestId = &v return s } -// SetCountryCode sets the CountryCode field's value. -func (s *GeoSpatialColumnGroup) SetCountryCode(v string) *GeoSpatialColumnGroup { - s.CountryCode = &v +// SetStatus sets the Status field's value. +func (s *ListTagsForResourceOutput) SetStatus(v int64) *ListTagsForResourceOutput { + s.Status = &v return s -} - -// SetName sets the Name field's value. -func (s *GeoSpatialColumnGroup) SetName(v string) *GeoSpatialColumnGroup { - s.Name = &v +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v return s } -type GetDashboardEmbedUrlInput struct { +type ListTemplateAliasesInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that contains the dashboard that you're embedding. + // The ID of the AWS account that contains the template aliases that you're + // listing. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dashboard, also added to the IAM policy. - // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` - - // The authentication method that the user uses to sign in. - // - // IdentityType is a required field - IdentityType *string `location:"querystring" locationName:"creds-type" type:"string" required:"true" enum:"IdentityType"` - - // Remove the reset button on the embedded dashboard. The default is FALSE, - // which enables the reset button. - ResetDisabled *bool `location:"querystring" locationName:"reset-disabled" type:"boolean"` - - // How many minutes the session is valid. The session lifetime must be 15-600 - // minutes. - SessionLifetimeInMinutes *int64 `location:"querystring" locationName:"session-lifetime" min:"15" type:"long"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` - // Remove the undo/redo button on the embedded dashboard. The default is FALSE, - // which enables the undo/redo button. - UndoRedoDisabled *bool `location:"querystring" locationName:"undo-redo-disabled" type:"boolean"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - // The Amazon QuickSight user's Amazon Resource Name (ARN), for use with QUICKSIGHT - // identity type. You can use this for any Amazon QuickSight users in your account - // (readers, authors, or admins) authenticated as one of the following: - // - // * Active Directory (AD) users or group members - // - // * Invited nonfederated users + // The ID for the template. // - // * IAM users and IAM role-based sessions authenticated through Federated - // Single Sign-On using SAML, OpenID Connect, or IAM federation. - UserArn *string `location:"querystring" locationName:"user-arn" type:"string"` + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s GetDashboardEmbedUrlInput) String() string { +func (s ListTemplateAliasesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDashboardEmbedUrlInput) GoString() string { +func (s ListTemplateAliasesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetDashboardEmbedUrlInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetDashboardEmbedUrlInput"} +func (s *ListTemplateAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTemplateAliasesInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) - } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.IdentityType == nil { - invalidParams.Add(request.NewErrParamRequired("IdentityType")) + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.SessionLifetimeInMinutes != nil && *s.SessionLifetimeInMinutes < 15 { - invalidParams.Add(request.NewErrParamMinValue("SessionLifetimeInMinutes", 15)) + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) } if invalidParams.Len() > 0 { @@ -14520,496 +23866,496 @@ func (s *GetDashboardEmbedUrlInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *GetDashboardEmbedUrlInput) SetAwsAccountId(v string) *GetDashboardEmbedUrlInput { +func (s *ListTemplateAliasesInput) SetAwsAccountId(v string) *ListTemplateAliasesInput { s.AwsAccountId = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *GetDashboardEmbedUrlInput) SetDashboardId(v string) *GetDashboardEmbedUrlInput { - s.DashboardId = &v - return s -} - -// SetIdentityType sets the IdentityType field's value. -func (s *GetDashboardEmbedUrlInput) SetIdentityType(v string) *GetDashboardEmbedUrlInput { - s.IdentityType = &v - return s -} - -// SetResetDisabled sets the ResetDisabled field's value. -func (s *GetDashboardEmbedUrlInput) SetResetDisabled(v bool) *GetDashboardEmbedUrlInput { - s.ResetDisabled = &v - return s -} - -// SetSessionLifetimeInMinutes sets the SessionLifetimeInMinutes field's value. -func (s *GetDashboardEmbedUrlInput) SetSessionLifetimeInMinutes(v int64) *GetDashboardEmbedUrlInput { - s.SessionLifetimeInMinutes = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListTemplateAliasesInput) SetMaxResults(v int64) *ListTemplateAliasesInput { + s.MaxResults = &v return s } -// SetUndoRedoDisabled sets the UndoRedoDisabled field's value. -func (s *GetDashboardEmbedUrlInput) SetUndoRedoDisabled(v bool) *GetDashboardEmbedUrlInput { - s.UndoRedoDisabled = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTemplateAliasesInput) SetNextToken(v string) *ListTemplateAliasesInput { + s.NextToken = &v return s } -// SetUserArn sets the UserArn field's value. -func (s *GetDashboardEmbedUrlInput) SetUserArn(v string) *GetDashboardEmbedUrlInput { - s.UserArn = &v +// SetTemplateId sets the TemplateId field's value. +func (s *ListTemplateAliasesInput) SetTemplateId(v string) *ListTemplateAliasesInput { + s.TemplateId = &v return s } -type GetDashboardEmbedUrlOutput struct { +type ListTemplateAliasesOutput struct { _ struct{} `type:"structure"` - // An URL that you can put into your server-side webpage to embed your dashboard. - // This URL is valid for 5 minutes, and the resulting session is valid for 10 - // hours. The API provides the URL with an auth_code value that enables a single - // sign-on session. - EmbedUrl *string `type:"string" sensitive:"true"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing the list of the template's aliases. + TemplateAliasList []*TemplateAlias `type:"list"` } // String returns the string representation -func (s GetDashboardEmbedUrlOutput) String() string { +func (s ListTemplateAliasesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetDashboardEmbedUrlOutput) GoString() string { +func (s ListTemplateAliasesOutput) GoString() string { return s.String() } -// SetEmbedUrl sets the EmbedUrl field's value. -func (s *GetDashboardEmbedUrlOutput) SetEmbedUrl(v string) *GetDashboardEmbedUrlOutput { - s.EmbedUrl = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTemplateAliasesOutput) SetNextToken(v string) *ListTemplateAliasesOutput { + s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *GetDashboardEmbedUrlOutput) SetRequestId(v string) *GetDashboardEmbedUrlOutput { +func (s *ListTemplateAliasesOutput) SetRequestId(v string) *ListTemplateAliasesOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *GetDashboardEmbedUrlOutput) SetStatus(v int64) *GetDashboardEmbedUrlOutput { +func (s *ListTemplateAliasesOutput) SetStatus(v int64) *ListTemplateAliasesOutput { s.Status = &v return s } -// A group in Amazon QuickSight consists of a set of users. You can use groups -// to make it easier to manage access and security. Currently, an Amazon QuickSight -// subscription can't contain more than 500 Amazon QuickSight groups. -type Group struct { +// SetTemplateAliasList sets the TemplateAliasList field's value. +func (s *ListTemplateAliasesOutput) SetTemplateAliasList(v []*TemplateAlias) *ListTemplateAliasesOutput { + s.TemplateAliasList = v + return s +} + +type ListTemplateVersionsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the group. - Arn *string `type:"string"` + // The ID of the AWS account that contains the templates that you're listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The group description. - Description *string `min:"1" type:"string"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` - // The name of the group. - GroupName *string `min:"1" type:"string"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - // The principal ID of the group. - PrincipalId *string `type:"string"` + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Group) String() string { +func (s ListTemplateVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Group) GoString() string { +func (s ListTemplateVersionsInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Group) SetArn(v string) *Group { - s.Arn = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTemplateVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTemplateVersionsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) + } + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + } -// SetDescription sets the Description field's value. -func (s *Group) SetDescription(v string) *Group { - s.Description = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetGroupName sets the GroupName field's value. -func (s *Group) SetGroupName(v string) *Group { - s.GroupName = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListTemplateVersionsInput) SetAwsAccountId(v string) *ListTemplateVersionsInput { + s.AwsAccountId = &v return s } -// SetPrincipalId sets the PrincipalId field's value. -func (s *Group) SetPrincipalId(v string) *Group { - s.PrincipalId = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListTemplateVersionsInput) SetMaxResults(v int64) *ListTemplateVersionsInput { + s.MaxResults = &v return s } -// A member of an Amazon QuickSight group. Currently, group members must be -// users. Groups can't be members of another group. . -type GroupMember struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) for the group member (user). - Arn *string `type:"string"` - - // The name of the group member (user). - MemberName *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s GroupMember) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GroupMember) GoString() string { - return s.String() -} - -// SetArn sets the Arn field's value. -func (s *GroupMember) SetArn(v string) *GroupMember { - s.Arn = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTemplateVersionsInput) SetNextToken(v string) *ListTemplateVersionsInput { + s.NextToken = &v return s } -// SetMemberName sets the MemberName field's value. -func (s *GroupMember) SetMemberName(v string) *GroupMember { - s.MemberName = &v +// SetTemplateId sets the TemplateId field's value. +func (s *ListTemplateVersionsInput) SetTemplateId(v string) *ListTemplateVersionsInput { + s.TemplateId = &v return s } -// An IAM policy assignment. -type IAMPolicyAssignment struct { +type ListTemplateVersionsOutput struct { _ struct{} `type:"structure"` - // Assignment ID. - AssignmentId *string `type:"string"` - - // Assignment name. - AssignmentName *string `min:"1" type:"string"` - - // Assignment status. - AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` - // The AWS account ID. - AwsAccountId *string `min:"12" type:"string"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Identities. - Identities map[string][]*string `type:"map"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` - // The Amazon Resource Name (ARN) for the IAM policy. - PolicyArn *string `type:"string"` + // A structure containing a list of all the versions of the specified template. + TemplateVersionSummaryList []*TemplateVersionSummary `type:"list"` } // String returns the string representation -func (s IAMPolicyAssignment) String() string { +func (s ListTemplateVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IAMPolicyAssignment) GoString() string { +func (s ListTemplateVersionsOutput) GoString() string { return s.String() } -// SetAssignmentId sets the AssignmentId field's value. -func (s *IAMPolicyAssignment) SetAssignmentId(v string) *IAMPolicyAssignment { - s.AssignmentId = &v - return s -} - -// SetAssignmentName sets the AssignmentName field's value. -func (s *IAMPolicyAssignment) SetAssignmentName(v string) *IAMPolicyAssignment { - s.AssignmentName = &v - return s -} - -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *IAMPolicyAssignment) SetAssignmentStatus(v string) *IAMPolicyAssignment { - s.AssignmentStatus = &v +// SetNextToken sets the NextToken field's value. +func (s *ListTemplateVersionsOutput) SetNextToken(v string) *ListTemplateVersionsOutput { + s.NextToken = &v return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *IAMPolicyAssignment) SetAwsAccountId(v string) *IAMPolicyAssignment { - s.AwsAccountId = &v +// SetRequestId sets the RequestId field's value. +func (s *ListTemplateVersionsOutput) SetRequestId(v string) *ListTemplateVersionsOutput { + s.RequestId = &v return s } -// SetIdentities sets the Identities field's value. -func (s *IAMPolicyAssignment) SetIdentities(v map[string][]*string) *IAMPolicyAssignment { - s.Identities = v +// SetStatus sets the Status field's value. +func (s *ListTemplateVersionsOutput) SetStatus(v int64) *ListTemplateVersionsOutput { + s.Status = &v return s } -// SetPolicyArn sets the PolicyArn field's value. -func (s *IAMPolicyAssignment) SetPolicyArn(v string) *IAMPolicyAssignment { - s.PolicyArn = &v +// SetTemplateVersionSummaryList sets the TemplateVersionSummaryList field's value. +func (s *ListTemplateVersionsOutput) SetTemplateVersionSummaryList(v []*TemplateVersionSummary) *ListTemplateVersionsOutput { + s.TemplateVersionSummaryList = v return s } -// IAM policy assignment summary. -type IAMPolicyAssignmentSummary struct { +type ListTemplatesInput struct { _ struct{} `type:"structure"` - // Assignment name. - AssignmentName *string `min:"1" type:"string"` + // The ID of the AWS account that contains the templates that you're listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Assignment status. - AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s IAMPolicyAssignmentSummary) String() string { +func (s ListTemplatesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IAMPolicyAssignmentSummary) GoString() string { +func (s ListTemplatesInput) GoString() string { return s.String() } -// SetAssignmentName sets the AssignmentName field's value. -func (s *IAMPolicyAssignmentSummary) SetAssignmentName(v string) *IAMPolicyAssignmentSummary { - s.AssignmentName = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTemplatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTemplatesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListTemplatesInput) SetAwsAccountId(v string) *ListTemplatesInput { + s.AwsAccountId = &v return s } -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *IAMPolicyAssignmentSummary) SetAssignmentStatus(v string) *IAMPolicyAssignmentSummary { - s.AssignmentStatus = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListTemplatesInput) SetMaxResults(v int64) *ListTemplatesInput { + s.MaxResults = &v return s } -// The identity type specified isn't supported. Supported identity types include -// IAM and QUICKSIGHT. -type IdentityTypeNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesInput) SetNextToken(v string) *ListTemplatesInput { + s.NextToken = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +type ListTemplatesOutput struct { + _ struct{} `type:"structure"` - // The AWS request ID for this request. + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing information about the templates in the list. + TemplateSummaryList []*TemplateSummary `type:"list"` } // String returns the string representation -func (s IdentityTypeNotSupportedException) String() string { +func (s ListTemplatesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IdentityTypeNotSupportedException) GoString() string { +func (s ListTemplatesOutput) GoString() string { return s.String() } -func newErrorIdentityTypeNotSupportedException(v protocol.ResponseMetadata) error { - return &IdentityTypeNotSupportedException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s IdentityTypeNotSupportedException) Code() string { - return "IdentityTypeNotSupportedException" -} - -// Message returns the exception's message. -func (s IdentityTypeNotSupportedException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdentityTypeNotSupportedException) OrigErr() error { - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListTemplatesOutput) SetNextToken(v string) *ListTemplatesOutput { + s.NextToken = &v + return s } -func (s IdentityTypeNotSupportedException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetRequestId sets the RequestId field's value. +func (s *ListTemplatesOutput) SetRequestId(v string) *ListTemplatesOutput { + s.RequestId = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s IdentityTypeNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +// SetStatus sets the Status field's value. +func (s *ListTemplatesOutput) SetStatus(v int64) *ListTemplatesOutput { + s.Status = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s IdentityTypeNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +// SetTemplateSummaryList sets the TemplateSummaryList field's value. +func (s *ListTemplatesOutput) SetTemplateSummaryList(v []*TemplateSummary) *ListTemplatesOutput { + s.TemplateSummaryList = v + return s } -// Information about the SPICE ingestion for a dataset. -type Ingestion struct { +type ListThemeAliasesInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. - // - // Arn is a required field - Arn *string `type:"string" required:"true"` - - // The time that this ingestion started. + // The ID of the AWS account that contains the theme aliases that you're listing. // - // CreatedTime is a required field - CreatedTime *time.Time `type:"timestamp" required:"true"` - - // Error information for this ingestion. - ErrorInfo *ErrorInfo `type:"structure"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Ingestion ID. - IngestionId *string `min:"1" type:"string"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` - // The size of the data ingested, in bytes. - IngestionSizeInBytes *int64 `type:"long"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - // Ingestion status. + // The ID for the theme. // - // IngestionStatus is a required field - IngestionStatus *string `type:"string" required:"true" enum:"IngestionStatus"` - - // The time that this ingestion took, measured in seconds. - IngestionTimeInSeconds *int64 `type:"long"` - - // Information about a queued dataset SPICE ingestion. - QueueInfo *QueueInfo `type:"structure"` - - // Event source for this ingestion. - RequestSource *string `type:"string" enum:"IngestionRequestSource"` - - // Type of this ingestion. - RequestType *string `type:"string" enum:"IngestionRequestType"` - - // Information about rows for a data set SPICE ingestion. - RowInfo *RowInfo `type:"structure"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s Ingestion) String() string { +func (s ListThemeAliasesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Ingestion) GoString() string { +func (s ListThemeAliasesInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Ingestion) SetArn(v string) *Ingestion { - s.Arn = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListThemeAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListThemeAliasesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetCreatedTime sets the CreatedTime field's value. -func (s *Ingestion) SetCreatedTime(v time.Time) *Ingestion { - s.CreatedTime = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListThemeAliasesInput) SetAwsAccountId(v string) *ListThemeAliasesInput { + s.AwsAccountId = &v return s } -// SetErrorInfo sets the ErrorInfo field's value. -func (s *Ingestion) SetErrorInfo(v *ErrorInfo) *Ingestion { - s.ErrorInfo = v +// SetMaxResults sets the MaxResults field's value. +func (s *ListThemeAliasesInput) SetMaxResults(v int64) *ListThemeAliasesInput { + s.MaxResults = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *Ingestion) SetIngestionId(v string) *Ingestion { - s.IngestionId = &v +// SetNextToken sets the NextToken field's value. +func (s *ListThemeAliasesInput) SetNextToken(v string) *ListThemeAliasesInput { + s.NextToken = &v return s } -// SetIngestionSizeInBytes sets the IngestionSizeInBytes field's value. -func (s *Ingestion) SetIngestionSizeInBytes(v int64) *Ingestion { - s.IngestionSizeInBytes = &v +// SetThemeId sets the ThemeId field's value. +func (s *ListThemeAliasesInput) SetThemeId(v string) *ListThemeAliasesInput { + s.ThemeId = &v return s } -// SetIngestionStatus sets the IngestionStatus field's value. -func (s *Ingestion) SetIngestionStatus(v string) *Ingestion { - s.IngestionStatus = &v - return s +type ListThemeAliasesOutput struct { + _ struct{} `type:"structure"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing the list of the theme's aliases. + ThemeAliasList []*ThemeAlias `type:"list"` +} + +// String returns the string representation +func (s ListThemeAliasesOutput) String() string { + return awsutil.Prettify(s) } -// SetIngestionTimeInSeconds sets the IngestionTimeInSeconds field's value. -func (s *Ingestion) SetIngestionTimeInSeconds(v int64) *Ingestion { - s.IngestionTimeInSeconds = &v - return s +// GoString returns the string representation +func (s ListThemeAliasesOutput) GoString() string { + return s.String() } -// SetQueueInfo sets the QueueInfo field's value. -func (s *Ingestion) SetQueueInfo(v *QueueInfo) *Ingestion { - s.QueueInfo = v +// SetNextToken sets the NextToken field's value. +func (s *ListThemeAliasesOutput) SetNextToken(v string) *ListThemeAliasesOutput { + s.NextToken = &v return s } -// SetRequestSource sets the RequestSource field's value. -func (s *Ingestion) SetRequestSource(v string) *Ingestion { - s.RequestSource = &v +// SetRequestId sets the RequestId field's value. +func (s *ListThemeAliasesOutput) SetRequestId(v string) *ListThemeAliasesOutput { + s.RequestId = &v return s } -// SetRequestType sets the RequestType field's value. -func (s *Ingestion) SetRequestType(v string) *Ingestion { - s.RequestType = &v +// SetStatus sets the Status field's value. +func (s *ListThemeAliasesOutput) SetStatus(v int64) *ListThemeAliasesOutput { + s.Status = &v return s } -// SetRowInfo sets the RowInfo field's value. -func (s *Ingestion) SetRowInfo(v *RowInfo) *Ingestion { - s.RowInfo = v +// SetThemeAliasList sets the ThemeAliasList field's value. +func (s *ListThemeAliasesOutput) SetThemeAliasList(v []*ThemeAlias) *ListThemeAliasesOutput { + s.ThemeAliasList = v return s } -// Metadata for a column that is used as the input of a transform operation. -type InputColumn struct { +type ListThemeVersionsInput struct { _ struct{} `type:"structure"` - // The name of this column in the underlying data source. + // The ID of the AWS account that contains the themes that you're listing. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The data type of the column. + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The ID for the theme. // - // Type is a required field - Type *string `type:"string" required:"true" enum:"InputColumnDataType"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s InputColumn) String() string { +func (s ListThemeVersionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InputColumn) GoString() string { +func (s ListThemeVersionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *InputColumn) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputColumn"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) +func (s *ListThemeVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListThemeVersionsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) + } + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) } if invalidParams.Len() > 0 { @@ -15018,348 +24364,402 @@ func (s *InputColumn) Validate() error { return nil } -// SetName sets the Name field's value. -func (s *InputColumn) SetName(v string) *InputColumn { - s.Name = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListThemeVersionsInput) SetAwsAccountId(v string) *ListThemeVersionsInput { + s.AwsAccountId = &v return s } -// SetType sets the Type field's value. -func (s *InputColumn) SetType(v string) *InputColumn { - s.Type = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListThemeVersionsInput) SetMaxResults(v int64) *ListThemeVersionsInput { + s.MaxResults = &v return s } -// Integer parameter. -type IntegerParameter struct { +// SetNextToken sets the NextToken field's value. +func (s *ListThemeVersionsInput) SetNextToken(v string) *ListThemeVersionsInput { + s.NextToken = &v + return s +} + +// SetThemeId sets the ThemeId field's value. +func (s *ListThemeVersionsInput) SetThemeId(v string) *ListThemeVersionsInput { + s.ThemeId = &v + return s +} + +type ListThemeVersionsOutput struct { _ struct{} `type:"structure"` - // A display name for the dataset. - // - // Name is a required field - Name *string `type:"string" required:"true"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` - // Values. - // - // Values is a required field - Values []*int64 `type:"list" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // A structure containing a list of all the versions of the specified theme. + ThemeVersionSummaryList []*ThemeVersionSummary `type:"list"` } // String returns the string representation -func (s IntegerParameter) String() string { +func (s ListThemeVersionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s IntegerParameter) GoString() string { +func (s ListThemeVersionsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *IntegerParameter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "IntegerParameter"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Values == nil { - invalidParams.Add(request.NewErrParamRequired("Values")) - } +// SetNextToken sets the NextToken field's value. +func (s *ListThemeVersionsOutput) SetNextToken(v string) *ListThemeVersionsOutput { + s.NextToken = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetRequestId sets the RequestId field's value. +func (s *ListThemeVersionsOutput) SetRequestId(v string) *ListThemeVersionsOutput { + s.RequestId = &v + return s } -// SetName sets the Name field's value. -func (s *IntegerParameter) SetName(v string) *IntegerParameter { - s.Name = &v +// SetStatus sets the Status field's value. +func (s *ListThemeVersionsOutput) SetStatus(v int64) *ListThemeVersionsOutput { + s.Status = &v return s } -// SetValues sets the Values field's value. -func (s *IntegerParameter) SetValues(v []*int64) *IntegerParameter { - s.Values = v +// SetThemeVersionSummaryList sets the ThemeVersionSummaryList field's value. +func (s *ListThemeVersionsOutput) SetThemeVersionSummaryList(v []*ThemeVersionSummary) *ListThemeVersionsOutput { + s.ThemeVersionSummaryList = v return s } -// An internal failure occurred. -type InternalFailureException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type ListThemesInput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" type:"string"` + // The ID of the AWS account that contains the themes that you're listing. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The AWS request ID for this request. - RequestId *string `type:"string"` + // The maximum number of results to be returned per request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + + // The type of themes that you want to list. Valid options include the following: + // + // * ALL (default)- Display all existing themes. + // + // * CUSTOM - Display only the themes created by people using Amazon QuickSight. + // + // * QUICKSIGHT - Display only the starting themes defined by QuickSight. + Type *string `location:"querystring" locationName:"type" type:"string" enum:"ThemeType"` } // String returns the string representation -func (s InternalFailureException) String() string { +func (s ListThemesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InternalFailureException) GoString() string { +func (s ListThemesInput) GoString() string { return s.String() } -func newErrorInternalFailureException(v protocol.ResponseMetadata) error { - return &InternalFailureException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListThemesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListThemesInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } -} - -// Code returns the exception type name. -func (s InternalFailureException) Code() string { - return "InternalFailureException" -} -// Message returns the exception's message. -func (s InternalFailureException) Message() string { - if s.Message_ != nil { - return *s.Message_ + if invalidParams.Len() > 0 { + return invalidParams } - return "" + return nil } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalFailureException) OrigErr() error { - return nil +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListThemesInput) SetAwsAccountId(v string) *ListThemesInput { + s.AwsAccountId = &v + return s } -func (s InternalFailureException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetMaxResults sets the MaxResults field's value. +func (s *ListThemesInput) SetMaxResults(v int64) *ListThemesInput { + s.MaxResults = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s InternalFailureException) StatusCode() int { - return s.respMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *ListThemesInput) SetNextToken(v string) *ListThemesInput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s InternalFailureException) RequestID() string { - return s.respMetadata.RequestID +// SetType sets the Type field's value. +func (s *ListThemesInput) SetType(v string) *ListThemesInput { + s.Type = &v + return s } -// The NextToken value isn't valid. -type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type ListThemesOutput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" type:"string"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` - // The AWS request ID for this request. + // The AWS request ID for this operation. RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // Information about the themes in the list. + ThemeSummaryList []*ThemeSummary `type:"list"` } // String returns the string representation -func (s InvalidNextTokenException) String() string { +func (s ListThemesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidNextTokenException) GoString() string { +func (s ListThemesOutput) GoString() string { return s.String() } -func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { - return &InvalidNextTokenException{ - respMetadata: v, - } +// SetNextToken sets the NextToken field's value. +func (s *ListThemesOutput) SetNextToken(v string) *ListThemesOutput { + s.NextToken = &v + return s } -// Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { - return "InvalidNextTokenException" +// SetRequestId sets the RequestId field's value. +func (s *ListThemesOutput) SetRequestId(v string) *ListThemesOutput { + s.RequestId = &v + return s } -// Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetStatus sets the Status field's value. +func (s *ListThemesOutput) SetStatus(v int64) *ListThemesOutput { + s.Status = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { - return nil +// SetThemeSummaryList sets the ThemeSummaryList field's value. +func (s *ListThemesOutput) SetThemeSummaryList(v []*ThemeSummary) *ListThemesOutput { + s.ThemeSummaryList = v + return s } -func (s InvalidNextTokenException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) -} +type ListUserGroupsInput struct { + _ struct{} `type:"structure"` -// Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode -} + // The AWS account ID that the user is in. Currently, you use the ID for the + // AWS account that contains your Amazon QuickSight account. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` -// RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID -} + // The maximum number of results to return from this request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` -// One or more parameters has a value that isn't valid. -type InvalidParameterValueException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - Message_ *string `locationName:"Message" type:"string"` + // A pagination token that can be used in a subsequent request. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` - // The AWS request ID for this request. - RequestId *string `type:"string"` + // The Amazon QuickSight user name that you want to list group memberships for. + // + // UserName is a required field + UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s InvalidParameterValueException) String() string { +func (s ListUserGroupsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s InvalidParameterValueException) GoString() string { +func (s ListUserGroupsInput) GoString() string { return s.String() } -func newErrorInvalidParameterValueException(v protocol.ResponseMetadata) error { - return &InvalidParameterValueException{ - respMetadata: v, +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUserGroupsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) } -} -// Code returns the exception type name. -func (s InvalidParameterValueException) Code() string { - return "InvalidParameterValueException" + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Message returns the exception's message. -func (s InvalidParameterValueException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListUserGroupsInput) SetAwsAccountId(v string) *ListUserGroupsInput { + s.AwsAccountId = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValueException) OrigErr() error { - return nil +// SetMaxResults sets the MaxResults field's value. +func (s *ListUserGroupsInput) SetMaxResults(v int64) *ListUserGroupsInput { + s.MaxResults = &v + return s } -func (s InvalidParameterValueException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetNamespace sets the Namespace field's value. +func (s *ListUserGroupsInput) SetNamespace(v string) *ListUserGroupsInput { + s.Namespace = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValueException) StatusCode() int { - return s.respMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *ListUserGroupsInput) SetNextToken(v string) *ListUserGroupsInput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s InvalidParameterValueException) RequestID() string { - return s.respMetadata.RequestID +// SetUserName sets the UserName field's value. +func (s *ListUserGroupsInput) SetUserName(v string) *ListUserGroupsInput { + s.UserName = &v + return s } -// Jira parameters. -type JiraParameters struct { +type ListUserGroupsOutput struct { _ struct{} `type:"structure"` - // The base URL of the Jira site. - // - // SiteBaseUrl is a required field - SiteBaseUrl *string `min:"1" type:"string" required:"true"` + // The list of groups the user is a member of. + GroupList []*Group `type:"list"` + + // A pagination token that can be used in a subsequent request. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s JiraParameters) String() string { +func (s ListUserGroupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JiraParameters) GoString() string { +func (s ListUserGroupsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *JiraParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JiraParameters"} - if s.SiteBaseUrl == nil { - invalidParams.Add(request.NewErrParamRequired("SiteBaseUrl")) - } - if s.SiteBaseUrl != nil && len(*s.SiteBaseUrl) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SiteBaseUrl", 1)) - } +// SetGroupList sets the GroupList field's value. +func (s *ListUserGroupsOutput) SetGroupList(v []*Group) *ListUserGroupsOutput { + s.GroupList = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListUserGroupsOutput) SetNextToken(v string) *ListUserGroupsOutput { + s.NextToken = &v + return s } -// SetSiteBaseUrl sets the SiteBaseUrl field's value. -func (s *JiraParameters) SetSiteBaseUrl(v string) *JiraParameters { - s.SiteBaseUrl = &v +// SetRequestId sets the RequestId field's value. +func (s *ListUserGroupsOutput) SetRequestId(v string) *ListUserGroupsOutput { + s.RequestId = &v return s } -// Join instruction. -type JoinInstruction struct { +// SetStatus sets the Status field's value. +func (s *ListUserGroupsOutput) SetStatus(v int64) *ListUserGroupsOutput { + s.Status = &v + return s +} + +type ListUsersInput struct { _ struct{} `type:"structure"` - // Left operand. + // The ID for the AWS account that the user is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // - // LeftOperand is a required field - LeftOperand *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // On Clause. - // - // OnClause is a required field - OnClause *string `min:"1" type:"string" required:"true"` + // The maximum number of results to return from this request. + MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` - // Right operand. + // The namespace. Currently, you should set this to default. // - // RightOperand is a required field - RightOperand *string `min:"1" type:"string" required:"true"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - // Type. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"JoinType"` + // A pagination token that can be used in a subsequent request. + NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s JoinInstruction) String() string { +func (s ListUsersInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JoinInstruction) GoString() string { +func (s ListUsersInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *JoinInstruction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JoinInstruction"} - if s.LeftOperand == nil { - invalidParams.Add(request.NewErrParamRequired("LeftOperand")) - } - if s.LeftOperand != nil && len(*s.LeftOperand) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LeftOperand", 1)) - } - if s.OnClause == nil { - invalidParams.Add(request.NewErrParamRequired("OnClause")) +func (s *ListUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUsersInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.OnClause != nil && len(*s.OnClause) < 1 { - invalidParams.Add(request.NewErrParamMinLen("OnClause", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.RightOperand == nil { - invalidParams.Add(request.NewErrParamRequired("RightOperand")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.RightOperand != nil && len(*s.RightOperand) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RightOperand", 1)) + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -15368,140 +24768,140 @@ func (s *JoinInstruction) Validate() error { return nil } -// SetLeftOperand sets the LeftOperand field's value. -func (s *JoinInstruction) SetLeftOperand(v string) *JoinInstruction { - s.LeftOperand = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *ListUsersInput) SetAwsAccountId(v string) *ListUsersInput { + s.AwsAccountId = &v return s } -// SetOnClause sets the OnClause field's value. -func (s *JoinInstruction) SetOnClause(v string) *JoinInstruction { - s.OnClause = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListUsersInput) SetMaxResults(v int64) *ListUsersInput { + s.MaxResults = &v return s } -// SetRightOperand sets the RightOperand field's value. -func (s *JoinInstruction) SetRightOperand(v string) *JoinInstruction { - s.RightOperand = &v +// SetNamespace sets the Namespace field's value. +func (s *ListUsersInput) SetNamespace(v string) *ListUsersInput { + s.Namespace = &v return s } -// SetType sets the Type field's value. -func (s *JoinInstruction) SetType(v string) *JoinInstruction { - s.Type = &v +// SetNextToken sets the NextToken field's value. +func (s *ListUsersInput) SetNextToken(v string) *ListUsersInput { + s.NextToken = &v return s } -// A limit is exceeded. -type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +type ListUsersOutput struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" type:"string"` + // A pagination token that can be used in a subsequent request. + NextToken *string `type:"string"` - // The AWS request ID for this request. + // The AWS request ID for this operation. RequestId *string `type:"string"` - // Limit exceeded. - ResourceType *string `type:"string" enum:"ExceptionResourceType"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` + + // The list of users. + UserList []*User `type:"list"` } // String returns the string representation -func (s LimitExceededException) String() string { +func (s ListUsersOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LimitExceededException) GoString() string { +func (s ListUsersOutput) GoString() string { return s.String() } -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s LimitExceededException) Code() string { - return "LimitExceededException" -} - -// Message returns the exception's message. -func (s LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { - return nil +// SetNextToken sets the NextToken field's value. +func (s *ListUsersOutput) SetNextToken(v string) *ListUsersOutput { + s.NextToken = &v + return s } -func (s LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetRequestId sets the RequestId field's value. +func (s *ListUsersOutput) SetRequestId(v string) *ListUsersOutput { + s.RequestId = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +// SetStatus sets the Status field's value. +func (s *ListUsersOutput) SetStatus(v int64) *ListUsersOutput { + s.Status = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +// SetUserList sets the UserList field's value. +func (s *ListUsersOutput) SetUserList(v []*User) *ListUsersOutput { + s.UserList = v + return s } -type ListDashboardVersionsInput struct { +// A logical table is a unit that joins and that data transformations operate +// on. A logical table has a source, which can be either a physical table or +// result of a join. When a logical table points to a physical table, the logical +// table acts as a mutable copy of that physical table through transform operations. +type LogicalTable struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the dashboard that you're listing - // versions for. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The ID for the dashboard. + // A display name for the logical table. // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + // Alias is a required field + Alias *string `min:"1" type:"string" required:"true"` - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // Transform operations that act on this logical table. + DataTransforms []*TransformOperation `min:"1" type:"list"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Source of this logical table. + // + // Source is a required field + Source *LogicalTableSource `type:"structure" required:"true"` } // String returns the string representation -func (s ListDashboardVersionsInput) String() string { +func (s LogicalTable) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDashboardVersionsInput) GoString() string { +func (s LogicalTable) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListDashboardVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDashboardVersionsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) +func (s *LogicalTable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogicalTable"} + if s.Alias == nil { + invalidParams.Add(request.NewErrParamRequired("Alias")) } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + if s.Alias != nil && len(*s.Alias) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Alias", 1)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) + if s.DataTransforms != nil && len(s.DataTransforms) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataTransforms", 1)) } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.DataTransforms != nil { + for i, v := range s.DataTransforms { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataTransforms", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Source != nil { + if err := s.Source.Validate(); err != nil { + invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -15510,116 +24910,115 @@ func (s *ListDashboardVersionsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListDashboardVersionsInput) SetAwsAccountId(v string) *ListDashboardVersionsInput { - s.AwsAccountId = &v - return s -} - -// SetDashboardId sets the DashboardId field's value. -func (s *ListDashboardVersionsInput) SetDashboardId(v string) *ListDashboardVersionsInput { - s.DashboardId = &v +// SetAlias sets the Alias field's value. +func (s *LogicalTable) SetAlias(v string) *LogicalTable { + s.Alias = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDashboardVersionsInput) SetMaxResults(v int64) *ListDashboardVersionsInput { - s.MaxResults = &v +// SetDataTransforms sets the DataTransforms field's value. +func (s *LogicalTable) SetDataTransforms(v []*TransformOperation) *LogicalTable { + s.DataTransforms = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDashboardVersionsInput) SetNextToken(v string) *ListDashboardVersionsInput { - s.NextToken = &v +// SetSource sets the Source field's value. +func (s *LogicalTable) SetSource(v *LogicalTableSource) *LogicalTable { + s.Source = v return s } -type ListDashboardVersionsOutput struct { +// Information about the source of a logical table. This is a variant type structure. +// For this structure to be valid, only one of the attributes can be non-null. +type LogicalTableSource struct { _ struct{} `type:"structure"` - // A structure that contains information about each version of the dashboard. - DashboardVersionSummaryList []*DashboardVersionSummary `type:"list"` - - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` - - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // Specifies the result of a join of two logical tables. + JoinInstruction *JoinInstruction `type:"structure"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Physical table ID. + PhysicalTableId *string `min:"1" type:"string"` } // String returns the string representation -func (s ListDashboardVersionsOutput) String() string { +func (s LogicalTableSource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDashboardVersionsOutput) GoString() string { +func (s LogicalTableSource) GoString() string { return s.String() } -// SetDashboardVersionSummaryList sets the DashboardVersionSummaryList field's value. -func (s *ListDashboardVersionsOutput) SetDashboardVersionSummaryList(v []*DashboardVersionSummary) *ListDashboardVersionsOutput { - s.DashboardVersionSummaryList = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogicalTableSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogicalTableSource"} + if s.PhysicalTableId != nil && len(*s.PhysicalTableId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PhysicalTableId", 1)) + } + if s.JoinInstruction != nil { + if err := s.JoinInstruction.Validate(); err != nil { + invalidParams.AddNested("JoinInstruction", err.(request.ErrInvalidParams)) + } + } -// SetNextToken sets the NextToken field's value. -func (s *ListDashboardVersionsOutput) SetNextToken(v string) *ListDashboardVersionsOutput { - s.NextToken = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetRequestId sets the RequestId field's value. -func (s *ListDashboardVersionsOutput) SetRequestId(v string) *ListDashboardVersionsOutput { - s.RequestId = &v +// SetJoinInstruction sets the JoinInstruction field's value. +func (s *LogicalTableSource) SetJoinInstruction(v *JoinInstruction) *LogicalTableSource { + s.JoinInstruction = v return s } -// SetStatus sets the Status field's value. -func (s *ListDashboardVersionsOutput) SetStatus(v int64) *ListDashboardVersionsOutput { - s.Status = &v +// SetPhysicalTableId sets the PhysicalTableId field's value. +func (s *LogicalTableSource) SetPhysicalTableId(v string) *LogicalTableSource { + s.PhysicalTableId = &v return s } -type ListDashboardsInput struct { +// Amazon S3 manifest file location. +type ManifestFileLocation struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the dashboards that you're listing. + // Amazon S3 bucket. // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // Bucket is a required field + Bucket *string `min:"1" type:"string" required:"true"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Amazon S3 key that identifies an object. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListDashboardsInput) String() string { +func (s ManifestFileLocation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDashboardsInput) GoString() string { +func (s ManifestFileLocation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListDashboardsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDashboardsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) +func (s *ManifestFileLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ManifestFileLocation"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } if invalidParams.Len() > 0 { @@ -15628,111 +25027,168 @@ func (s *ListDashboardsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListDashboardsInput) SetAwsAccountId(v string) *ListDashboardsInput { - s.AwsAccountId = &v +// SetBucket sets the Bucket field's value. +func (s *ManifestFileLocation) SetBucket(v string) *ManifestFileLocation { + s.Bucket = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDashboardsInput) SetMaxResults(v int64) *ListDashboardsInput { - s.MaxResults = &v +// SetKey sets the Key field's value. +func (s *ManifestFileLocation) SetKey(v string) *ManifestFileLocation { + s.Key = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDashboardsInput) SetNextToken(v string) *ListDashboardsInput { - s.NextToken = &v +// The display options for margins around the outside edge of sheets. +type MarginStyle struct { + _ struct{} `type:"structure"` + + // This Boolean value controls whether to display sheet margins. + Show *bool `type:"boolean"` +} + +// String returns the string representation +func (s MarginStyle) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MarginStyle) GoString() string { + return s.String() +} + +// SetShow sets the Show field's value. +func (s *MarginStyle) SetShow(v bool) *MarginStyle { + s.Show = &v return s } -type ListDashboardsOutput struct { +// MariaDB parameters. +type MariaDbParameters struct { _ struct{} `type:"structure"` - // A structure that contains all of the dashboards shared with the user. This - // structure provides basic information about the dashboards. - DashboardSummaryList []*DashboardSummary `type:"list"` - - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s ListDashboardsOutput) String() string { +func (s MariaDbParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDashboardsOutput) GoString() string { +func (s MariaDbParameters) GoString() string { return s.String() } -// SetDashboardSummaryList sets the DashboardSummaryList field's value. -func (s *ListDashboardsOutput) SetDashboardSummaryList(v []*DashboardSummary) *ListDashboardsOutput { - s.DashboardSummaryList = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *MariaDbParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MariaDbParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListDashboardsOutput) SetNextToken(v string) *ListDashboardsOutput { - s.NextToken = &v +// SetDatabase sets the Database field's value. +func (s *MariaDbParameters) SetDatabase(v string) *MariaDbParameters { + s.Database = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListDashboardsOutput) SetRequestId(v string) *ListDashboardsOutput { - s.RequestId = &v +// SetHost sets the Host field's value. +func (s *MariaDbParameters) SetHost(v string) *MariaDbParameters { + s.Host = &v return s } -// SetStatus sets the Status field's value. -func (s *ListDashboardsOutput) SetStatus(v int64) *ListDashboardsOutput { - s.Status = &v +// SetPort sets the Port field's value. +func (s *MariaDbParameters) SetPort(v int64) *MariaDbParameters { + s.Port = &v return s } -type ListDataSetsInput struct { +// MySQL parameters. +type MySqlParameters struct { _ struct{} `type:"structure"` - // The AWS account ID. + // Database. // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s ListDataSetsInput) String() string { +func (s MySqlParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDataSetsInput) GoString() string { +func (s MySqlParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListDataSetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDataSetsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) +func (s *MySqlParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MySqlParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } if invalidParams.Len() > 0 { @@ -15741,245 +25197,228 @@ func (s *ListDataSetsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListDataSetsInput) SetAwsAccountId(v string) *ListDataSetsInput { - s.AwsAccountId = &v +// SetDatabase sets the Database field's value. +func (s *MySqlParameters) SetDatabase(v string) *MySqlParameters { + s.Database = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDataSetsInput) SetMaxResults(v int64) *ListDataSetsInput { - s.MaxResults = &v +// SetHost sets the Host field's value. +func (s *MySqlParameters) SetHost(v string) *MySqlParameters { + s.Host = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDataSetsInput) SetNextToken(v string) *ListDataSetsInput { - s.NextToken = &v +// SetPort sets the Port field's value. +func (s *MySqlParameters) SetPort(v int64) *MySqlParameters { + s.Port = &v return s } -type ListDataSetsOutput struct { +// Errors that occur during namespace creation. +type NamespaceError struct { _ struct{} `type:"structure"` - // The list of dataset summaries. - DataSetSummaries []*DataSetSummary `type:"list"` - - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` - - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // The message for the error. + Message *string `type:"string"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // The error type. + Type *string `type:"string" enum:"NamespaceErrorType"` } // String returns the string representation -func (s ListDataSetsOutput) String() string { +func (s NamespaceError) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDataSetsOutput) GoString() string { +func (s NamespaceError) GoString() string { return s.String() } -// SetDataSetSummaries sets the DataSetSummaries field's value. -func (s *ListDataSetsOutput) SetDataSetSummaries(v []*DataSetSummary) *ListDataSetsOutput { - s.DataSetSummaries = v +// SetMessage sets the Message field's value. +func (s *NamespaceError) SetMessage(v string) *NamespaceError { + s.Message = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDataSetsOutput) SetNextToken(v string) *ListDataSetsOutput { - s.NextToken = &v +// SetType sets the Type field's value. +func (s *NamespaceError) SetType(v string) *NamespaceError { + s.Type = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListDataSetsOutput) SetRequestId(v string) *ListDataSetsOutput { - s.RequestId = &v - return s -} +// The error type. +type NamespaceInfoV2 struct { + _ struct{} `type:"structure"` -// SetStatus sets the Status field's value. -func (s *ListDataSetsOutput) SetStatus(v int64) *ListDataSetsOutput { - s.Status = &v - return s -} + // The namespace ARN. + Arn *string `type:"string"` -type ListDataSourcesInput struct { - _ struct{} `type:"structure"` + // The namespace AWS Region. + CapacityRegion *string `type:"string"` - // The AWS account ID. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // The creation status of a namespace that is not yet completely created. + CreationStatus *string `type:"string" enum:"NamespaceStatus"` - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // The identity store used for the namespace. + IdentityStore *string `type:"string" enum:"IdentityStore"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // The name of the error. + Name *string `type:"string"` + + // An error that occurred when the namespace was created. + NamespaceError *NamespaceError `type:"structure"` } // String returns the string representation -func (s ListDataSourcesInput) String() string { +func (s NamespaceInfoV2) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDataSourcesInput) GoString() string { +func (s NamespaceInfoV2) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListDataSourcesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListDataSourcesInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetArn sets the Arn field's value. +func (s *NamespaceInfoV2) SetArn(v string) *NamespaceInfoV2 { + s.Arn = &v + return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListDataSourcesInput) SetAwsAccountId(v string) *ListDataSourcesInput { - s.AwsAccountId = &v +// SetCapacityRegion sets the CapacityRegion field's value. +func (s *NamespaceInfoV2) SetCapacityRegion(v string) *NamespaceInfoV2 { + s.CapacityRegion = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListDataSourcesInput) SetMaxResults(v int64) *ListDataSourcesInput { - s.MaxResults = &v +// SetCreationStatus sets the CreationStatus field's value. +func (s *NamespaceInfoV2) SetCreationStatus(v string) *NamespaceInfoV2 { + s.CreationStatus = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListDataSourcesInput) SetNextToken(v string) *ListDataSourcesInput { - s.NextToken = &v +// SetIdentityStore sets the IdentityStore field's value. +func (s *NamespaceInfoV2) SetIdentityStore(v string) *NamespaceInfoV2 { + s.IdentityStore = &v return s } -type ListDataSourcesOutput struct { - _ struct{} `type:"structure"` +// SetName sets the Name field's value. +func (s *NamespaceInfoV2) SetName(v string) *NamespaceInfoV2 { + s.Name = &v + return s +} - // A list of data sources. - DataSources []*DataSource `type:"list"` +// SetNamespaceError sets the NamespaceError field's value. +func (s *NamespaceInfoV2) SetNamespaceError(v *NamespaceError) *NamespaceInfoV2 { + s.NamespaceError = v + return s +} - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` +// Output column. +type OutputColumn struct { + _ struct{} `type:"structure"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // A display name for the dataset. + Name *string `min:"1" type:"string"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Type. + Type *string `type:"string" enum:"ColumnDataType"` } // String returns the string representation -func (s ListDataSourcesOutput) String() string { +func (s OutputColumn) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListDataSourcesOutput) GoString() string { +func (s OutputColumn) GoString() string { return s.String() } -// SetDataSources sets the DataSources field's value. -func (s *ListDataSourcesOutput) SetDataSources(v []*DataSource) *ListDataSourcesOutput { - s.DataSources = v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListDataSourcesOutput) SetNextToken(v string) *ListDataSourcesOutput { - s.NextToken = &v - return s -} - -// SetRequestId sets the RequestId field's value. -func (s *ListDataSourcesOutput) SetRequestId(v string) *ListDataSourcesOutput { - s.RequestId = &v +// SetName sets the Name field's value. +func (s *OutputColumn) SetName(v string) *OutputColumn { + s.Name = &v return s } -// SetStatus sets the Status field's value. -func (s *ListDataSourcesOutput) SetStatus(v int64) *ListDataSourcesOutput { - s.Status = &v +// SetType sets the Type field's value. +func (s *OutputColumn) SetType(v string) *OutputColumn { + s.Type = &v return s -} - -type ListGroupMembershipsInput struct { - _ struct{} `type:"structure"` - - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +} - // The name of the group that you want to see a membership list of. - // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` +// A list of QuickSight parameters and the list's override values. +type Parameters struct { + _ struct{} `type:"structure"` - // The maximum number of results to return from this request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // Date-time parameters. + DateTimeParameters []*DateTimeParameter `type:"list"` - // The namespace. Currently, you should set this to default. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Decimal parameters. + DecimalParameters []*DecimalParameter `type:"list"` - // A pagination token that can be used in a subsequent request. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Integer parameters. + IntegerParameters []*IntegerParameter `type:"list"` + + // String parameters. + StringParameters []*StringParameter `type:"list"` } // String returns the string representation -func (s ListGroupMembershipsInput) String() string { +func (s Parameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListGroupMembershipsInput) GoString() string { +func (s Parameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListGroupMembershipsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListGroupMembershipsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) - } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) +func (s *Parameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Parameters"} + if s.DateTimeParameters != nil { + for i, v := range s.DateTimeParameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DateTimeParameters", i), err.(request.ErrInvalidParams)) + } + } } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.DecimalParameters != nil { + for i, v := range s.DecimalParameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DecimalParameters", i), err.(request.ErrInvalidParams)) + } + } } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.IntegerParameters != nil { + for i, v := range s.IntegerParameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IntegerParameters", i), err.(request.ErrInvalidParams)) + } + } } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.StringParameters != nil { + for i, v := range s.StringParameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StringParameters", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -15988,134 +25427,149 @@ func (s *ListGroupMembershipsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListGroupMembershipsInput) SetAwsAccountId(v string) *ListGroupMembershipsInput { - s.AwsAccountId = &v - return s -} - -// SetGroupName sets the GroupName field's value. -func (s *ListGroupMembershipsInput) SetGroupName(v string) *ListGroupMembershipsInput { - s.GroupName = &v +// SetDateTimeParameters sets the DateTimeParameters field's value. +func (s *Parameters) SetDateTimeParameters(v []*DateTimeParameter) *Parameters { + s.DateTimeParameters = v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListGroupMembershipsInput) SetMaxResults(v int64) *ListGroupMembershipsInput { - s.MaxResults = &v +// SetDecimalParameters sets the DecimalParameters field's value. +func (s *Parameters) SetDecimalParameters(v []*DecimalParameter) *Parameters { + s.DecimalParameters = v return s } -// SetNamespace sets the Namespace field's value. -func (s *ListGroupMembershipsInput) SetNamespace(v string) *ListGroupMembershipsInput { - s.Namespace = &v +// SetIntegerParameters sets the IntegerParameters field's value. +func (s *Parameters) SetIntegerParameters(v []*IntegerParameter) *Parameters { + s.IntegerParameters = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListGroupMembershipsInput) SetNextToken(v string) *ListGroupMembershipsInput { - s.NextToken = &v +// SetStringParameters sets the StringParameters field's value. +func (s *Parameters) SetStringParameters(v []*StringParameter) *Parameters { + s.StringParameters = v return s } -type ListGroupMembershipsOutput struct { +// A view of a data source that contains information about the shape of the +// data in the underlying source. This is a variant type structure. For this +// structure to be valid, only one of the attributes can be non-null. +type PhysicalTable struct { _ struct{} `type:"structure"` - // The list of the members of the group. - GroupMemberList []*GroupMember `type:"list"` - - // A pagination token that can be used in a subsequent request. - NextToken *string `type:"string"` + // A physical table type built from the results of the custom SQL query. + CustomSql *CustomSql `type:"structure"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // A physical table type for relational data sources. + RelationalTable *RelationalTable `type:"structure"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // A physical table type for as S3 data source. + S3Source *S3Source `type:"structure"` } // String returns the string representation -func (s ListGroupMembershipsOutput) String() string { +func (s PhysicalTable) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListGroupMembershipsOutput) GoString() string { +func (s PhysicalTable) GoString() string { return s.String() } -// SetGroupMemberList sets the GroupMemberList field's value. -func (s *ListGroupMembershipsOutput) SetGroupMemberList(v []*GroupMember) *ListGroupMembershipsOutput { - s.GroupMemberList = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PhysicalTable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PhysicalTable"} + if s.CustomSql != nil { + if err := s.CustomSql.Validate(); err != nil { + invalidParams.AddNested("CustomSql", err.(request.ErrInvalidParams)) + } + } + if s.RelationalTable != nil { + if err := s.RelationalTable.Validate(); err != nil { + invalidParams.AddNested("RelationalTable", err.(request.ErrInvalidParams)) + } + } + if s.S3Source != nil { + if err := s.S3Source.Validate(); err != nil { + invalidParams.AddNested("S3Source", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListGroupMembershipsOutput) SetNextToken(v string) *ListGroupMembershipsOutput { - s.NextToken = &v +// SetCustomSql sets the CustomSql field's value. +func (s *PhysicalTable) SetCustomSql(v *CustomSql) *PhysicalTable { + s.CustomSql = v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListGroupMembershipsOutput) SetRequestId(v string) *ListGroupMembershipsOutput { - s.RequestId = &v +// SetRelationalTable sets the RelationalTable field's value. +func (s *PhysicalTable) SetRelationalTable(v *RelationalTable) *PhysicalTable { + s.RelationalTable = v return s } -// SetStatus sets the Status field's value. -func (s *ListGroupMembershipsOutput) SetStatus(v int64) *ListGroupMembershipsOutput { - s.Status = &v +// SetS3Source sets the S3Source field's value. +func (s *PhysicalTable) SetS3Source(v *S3Source) *PhysicalTable { + s.S3Source = v return s } -type ListGroupsInput struct { +// PostgreSQL parameters. +type PostgreSqlParameters struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // Database. // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The maximum number of results to return. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The namespace. Currently, you should set this to default. + // Host. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` - // A pagination token that can be used in a subsequent request. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s ListGroupsInput) String() string { +func (s PostgreSqlParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListGroupsInput) GoString() string { +func (s PostgreSqlParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListGroupsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) +func (s *PostgreSqlParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PostgreSqlParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } if invalidParams.Len() > 0 { @@ -16124,138 +25578,130 @@ func (s *ListGroupsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListGroupsInput) SetAwsAccountId(v string) *ListGroupsInput { - s.AwsAccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListGroupsInput) SetMaxResults(v int64) *ListGroupsInput { - s.MaxResults = &v - return s -} - -// SetNamespace sets the Namespace field's value. -func (s *ListGroupsInput) SetNamespace(v string) *ListGroupsInput { - s.Namespace = &v +// SetDatabase sets the Database field's value. +func (s *PostgreSqlParameters) SetDatabase(v string) *PostgreSqlParameters { + s.Database = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListGroupsInput) SetNextToken(v string) *ListGroupsInput { - s.NextToken = &v +// SetHost sets the Host field's value. +func (s *PostgreSqlParameters) SetHost(v string) *PostgreSqlParameters { + s.Host = &v return s } -type ListGroupsOutput struct { - _ struct{} `type:"structure"` +// SetPort sets the Port field's value. +func (s *PostgreSqlParameters) SetPort(v int64) *PostgreSqlParameters { + s.Port = &v + return s +} - // The list of the groups. - GroupList []*Group `type:"list"` +// One or more preconditions aren't met. +type PreconditionNotMetException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // A pagination token that can be used in a subsequent request. - NextToken *string `type:"string"` + Message_ *string `locationName:"Message" type:"string"` - // The AWS request ID for this operation. + // The AWS request ID for this request. RequestId *string `type:"string"` - - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s ListGroupsOutput) String() string { +func (s PreconditionNotMetException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListGroupsOutput) GoString() string { +func (s PreconditionNotMetException) GoString() string { return s.String() } -// SetGroupList sets the GroupList field's value. -func (s *ListGroupsOutput) SetGroupList(v []*Group) *ListGroupsOutput { - s.GroupList = v - return s +func newErrorPreconditionNotMetException(v protocol.ResponseMetadata) error { + return &PreconditionNotMetException{ + RespMetadata: v, + } } -// SetNextToken sets the NextToken field's value. -func (s *ListGroupsOutput) SetNextToken(v string) *ListGroupsOutput { - s.NextToken = &v - return s +// Code returns the exception type name. +func (s *PreconditionNotMetException) Code() string { + return "PreconditionNotMetException" } -// SetRequestId sets the RequestId field's value. -func (s *ListGroupsOutput) SetRequestId(v string) *ListGroupsOutput { - s.RequestId = &v - return s +// Message returns the exception's message. +func (s *PreconditionNotMetException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetStatus sets the Status field's value. -func (s *ListGroupsOutput) SetStatus(v int64) *ListGroupsOutput { - s.Status = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PreconditionNotMetException) OrigErr() error { + return nil } -type ListIAMPolicyAssignmentsForUserInput struct { - _ struct{} `type:"structure"` +func (s *PreconditionNotMetException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} - // The ID of the AWS account that contains the assignments. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// Status code returns the HTTP status code for the request's response error. +func (s *PreconditionNotMetException) StatusCode() int { + return s.RespMetadata.StatusCode +} - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` +// RequestID returns the service's response RequestID for request. +func (s *PreconditionNotMetException) RequestID() string { + return s.RespMetadata.RequestID +} - // The namespace of the assignment. +// Presto parameters. +type PrestoParameters struct { + _ struct{} `type:"structure"` + + // Catalog. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Catalog is a required field + Catalog *string `type:"string" required:"true"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` - // The name of the user. + // Port. // - // UserName is a required field - UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s ListIAMPolicyAssignmentsForUserInput) String() string { +func (s PrestoParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIAMPolicyAssignmentsForUserInput) GoString() string { +func (s PrestoParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListIAMPolicyAssignmentsForUserInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListIAMPolicyAssignmentsForUserInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) +func (s *PrestoParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PrestoParameters"} + if s.Catalog == nil { + invalidParams.Add(request.NewErrParamRequired("Catalog")) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) } - if s.UserName == nil { - invalidParams.Add(request.NewErrParamRequired("UserName")) + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } if invalidParams.Len() > 0 { @@ -16264,136 +25710,144 @@ func (s *ListIAMPolicyAssignmentsForUserInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListIAMPolicyAssignmentsForUserInput) SetAwsAccountId(v string) *ListIAMPolicyAssignmentsForUserInput { - s.AwsAccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListIAMPolicyAssignmentsForUserInput) SetMaxResults(v int64) *ListIAMPolicyAssignmentsForUserInput { - s.MaxResults = &v - return s -} - -// SetNamespace sets the Namespace field's value. -func (s *ListIAMPolicyAssignmentsForUserInput) SetNamespace(v string) *ListIAMPolicyAssignmentsForUserInput { - s.Namespace = &v +// SetCatalog sets the Catalog field's value. +func (s *PrestoParameters) SetCatalog(v string) *PrestoParameters { + s.Catalog = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListIAMPolicyAssignmentsForUserInput) SetNextToken(v string) *ListIAMPolicyAssignmentsForUserInput { - s.NextToken = &v +// SetHost sets the Host field's value. +func (s *PrestoParameters) SetHost(v string) *PrestoParameters { + s.Host = &v return s } -// SetUserName sets the UserName field's value. -func (s *ListIAMPolicyAssignmentsForUserInput) SetUserName(v string) *ListIAMPolicyAssignmentsForUserInput { - s.UserName = &v +// SetPort sets the Port field's value. +func (s *PrestoParameters) SetPort(v int64) *PrestoParameters { + s.Port = &v return s } -type ListIAMPolicyAssignmentsForUserOutput struct { +// A transform operation that projects columns. Operations that come after a +// projection can only refer to projected columns. +type ProjectOperation struct { _ struct{} `type:"structure"` - // The active assignments for this user. - ActiveAssignments []*ActiveIAMPolicyAssignment `type:"list"` - - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` - - // The AWS request ID for this operation. - RequestId *string `type:"string"` - - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Projected columns. + // + // ProjectedColumns is a required field + ProjectedColumns []*string `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ListIAMPolicyAssignmentsForUserOutput) String() string { +func (s ProjectOperation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIAMPolicyAssignmentsForUserOutput) GoString() string { +func (s ProjectOperation) GoString() string { return s.String() } -// SetActiveAssignments sets the ActiveAssignments field's value. -func (s *ListIAMPolicyAssignmentsForUserOutput) SetActiveAssignments(v []*ActiveIAMPolicyAssignment) *ListIAMPolicyAssignmentsForUserOutput { - s.ActiveAssignments = v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProjectOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProjectOperation"} + if s.ProjectedColumns == nil { + invalidParams.Add(request.NewErrParamRequired("ProjectedColumns")) + } + if s.ProjectedColumns != nil && len(s.ProjectedColumns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProjectedColumns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetNextToken sets the NextToken field's value. -func (s *ListIAMPolicyAssignmentsForUserOutput) SetNextToken(v string) *ListIAMPolicyAssignmentsForUserOutput { - s.NextToken = &v +// SetProjectedColumns sets the ProjectedColumns field's value. +func (s *ProjectOperation) SetProjectedColumns(v []*string) *ProjectOperation { + s.ProjectedColumns = v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListIAMPolicyAssignmentsForUserOutput) SetRequestId(v string) *ListIAMPolicyAssignmentsForUserOutput { - s.RequestId = &v - return s +// Information about a queued dataset SPICE ingestion. +type QueueInfo struct { + _ struct{} `type:"structure"` + + // The ID of the ongoing ingestion. The queued ingestion is waiting for the + // ongoing ingestion to complete. + // + // QueuedIngestion is a required field + QueuedIngestion *string `type:"string" required:"true"` + + // The ID of the queued ingestion. + // + // WaitingOnIngestion is a required field + WaitingOnIngestion *string `type:"string" required:"true"` } -// SetStatus sets the Status field's value. -func (s *ListIAMPolicyAssignmentsForUserOutput) SetStatus(v int64) *ListIAMPolicyAssignmentsForUserOutput { - s.Status = &v - return s +// String returns the string representation +func (s QueueInfo) String() string { + return awsutil.Prettify(s) } -type ListIAMPolicyAssignmentsInput struct { - _ struct{} `type:"structure"` +// GoString returns the string representation +func (s QueueInfo) GoString() string { + return s.String() +} - // The status of the assignments. - AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` +// SetQueuedIngestion sets the QueuedIngestion field's value. +func (s *QueueInfo) SetQueuedIngestion(v string) *QueueInfo { + s.QueuedIngestion = &v + return s +} - // The ID of the AWS account that contains these IAM policy assignments. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// SetWaitingOnIngestion sets the WaitingOnIngestion field's value. +func (s *QueueInfo) SetWaitingOnIngestion(v string) *QueueInfo { + s.WaitingOnIngestion = &v + return s +} - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` +// Amazon RDS parameters. +type RdsParameters struct { + _ struct{} `type:"structure"` - // The namespace for the assignments. + // Database. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // Instance ID. + // + // InstanceId is a required field + InstanceId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListIAMPolicyAssignmentsInput) String() string { +func (s RdsParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIAMPolicyAssignmentsInput) GoString() string { +func (s RdsParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListIAMPolicyAssignmentsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListIAMPolicyAssignmentsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) +func (s *RdsParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RdsParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.InstanceId != nil && len(*s.InstanceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) } if invalidParams.Len() > 0 { @@ -16402,133 +25856,229 @@ func (s *ListIAMPolicyAssignmentsInput) Validate() error { return nil } -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *ListIAMPolicyAssignmentsInput) SetAssignmentStatus(v string) *ListIAMPolicyAssignmentsInput { - s.AssignmentStatus = &v - return s -} - -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListIAMPolicyAssignmentsInput) SetAwsAccountId(v string) *ListIAMPolicyAssignmentsInput { - s.AwsAccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListIAMPolicyAssignmentsInput) SetMaxResults(v int64) *ListIAMPolicyAssignmentsInput { - s.MaxResults = &v - return s -} - -// SetNamespace sets the Namespace field's value. -func (s *ListIAMPolicyAssignmentsInput) SetNamespace(v string) *ListIAMPolicyAssignmentsInput { - s.Namespace = &v +// SetDatabase sets the Database field's value. +func (s *RdsParameters) SetDatabase(v string) *RdsParameters { + s.Database = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListIAMPolicyAssignmentsInput) SetNextToken(v string) *ListIAMPolicyAssignmentsInput { - s.NextToken = &v +// SetInstanceId sets the InstanceId field's value. +func (s *RdsParameters) SetInstanceId(v string) *RdsParameters { + s.InstanceId = &v return s } -type ListIAMPolicyAssignmentsOutput struct { +// Amazon Redshift parameters. The ClusterId field can be blank if Host and +// Port are both set. The Host and Port fields can be blank if the ClusterId +// field is set. +type RedshiftParameters struct { _ struct{} `type:"structure"` - // Information describing the IAM policy assignments. - IAMPolicyAssignments []*IAMPolicyAssignmentSummary `type:"list"` + // Cluster ID. This field can be blank if the Host and Port are provided. + ClusterId *string `min:"1" type:"string"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // Host. This field can be blank if ClusterId is provided. + Host *string `min:"1" type:"string"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Port. This field can be blank if the ClusterId is provided. + Port *int64 `type:"integer"` } // String returns the string representation -func (s ListIAMPolicyAssignmentsOutput) String() string { +func (s RedshiftParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIAMPolicyAssignmentsOutput) GoString() string { +func (s RedshiftParameters) GoString() string { return s.String() } -// SetIAMPolicyAssignments sets the IAMPolicyAssignments field's value. -func (s *ListIAMPolicyAssignmentsOutput) SetIAMPolicyAssignments(v []*IAMPolicyAssignmentSummary) *ListIAMPolicyAssignmentsOutput { - s.IAMPolicyAssignments = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedshiftParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedshiftParameters"} + if s.ClusterId != nil && len(*s.ClusterId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterId", 1)) + } + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) + } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterId sets the ClusterId field's value. +func (s *RedshiftParameters) SetClusterId(v string) *RedshiftParameters { + s.ClusterId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListIAMPolicyAssignmentsOutput) SetNextToken(v string) *ListIAMPolicyAssignmentsOutput { - s.NextToken = &v +// SetDatabase sets the Database field's value. +func (s *RedshiftParameters) SetDatabase(v string) *RedshiftParameters { + s.Database = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListIAMPolicyAssignmentsOutput) SetRequestId(v string) *ListIAMPolicyAssignmentsOutput { - s.RequestId = &v +// SetHost sets the Host field's value. +func (s *RedshiftParameters) SetHost(v string) *RedshiftParameters { + s.Host = &v return s } -// SetStatus sets the Status field's value. -func (s *ListIAMPolicyAssignmentsOutput) SetStatus(v int64) *ListIAMPolicyAssignmentsOutput { - s.Status = &v +// SetPort sets the Port field's value. +func (s *RedshiftParameters) SetPort(v int64) *RedshiftParameters { + s.Port = &v return s } -type ListIngestionsInput struct { +type RegisterUserInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID for the AWS account that the user is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID of the dataset used in the ingestion. + // (Enterprise edition only) The name of the custom permissions profile that + // you want to assign to this user. Customized permissions allows you to control + // a user's access by restricting access the following operations: // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // * Create and update data sources + // + // * Create and update datasets + // + // * Create and update email reports + // + // * Subscribe to email reports + // + // To add custom permissions to an existing user, use UpdateUser instead. + // + // A set of custom permissions includes any combination of these restrictions. + // Currently, you need to create the profile names for custom permission sets + // by using the QuickSight console. Then, you use the RegisterUser API operation + // to assign the named set of permissions to a QuickSight user. + // + // QuickSight custom permissions are applied through IAM policies. Therefore, + // they override the permissions typically granted by assigning QuickSight users + // to one of the default security cohorts in QuickSight (admin, author, reader). + // + // This feature is available only to QuickSight Enterprise edition subscriptions + // that use SAML 2.0-Based Federation for Single Sign-On (SSO). + CustomPermissionsName *string `min:"1" type:"string"` - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` + // The email address of the user that you want to register. + // + // Email is a required field + Email *string `type:"string" required:"true"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // The ARN of the IAM user or role that you are registering with Amazon QuickSight. + IamArn *string `type:"string"` + + // Amazon QuickSight supports several ways of managing the identity of users. + // This parameter accepts two values: + // + // * IAM: A user whose identity maps to an existing IAM user or role. + // + // * QUICKSIGHT: A user whose identity is owned and managed internally by + // Amazon QuickSight. + // + // IdentityType is a required field + IdentityType *string `type:"string" required:"true" enum:"IdentityType"` + + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + + // You need to use this parameter only when you register one or more users using + // an assumed IAM role. You don't need to provide the session name for other + // scenarios, for example when you are registering an IAM user or an Amazon + // QuickSight user. You can register multiple users using the same IAM role + // if each user has a different session name. For more information on assuming + // IAM roles, see assume-role (https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sts/assume-role.html) + // in the AWS CLI Reference. + SessionName *string `min:"2" type:"string"` + + // The Amazon QuickSight user name that you want to create for the user you + // are registering. + UserName *string `min:"1" type:"string"` + + // The Amazon QuickSight role for the user. The user role can be one of the + // following: + // + // * READER: A user who has read-only access to dashboards. + // + // * AUTHOR: A user who can create data sources, datasets, analyses, and + // dashboards. + // + // * ADMIN: A user who is an author, who can also manage Amazon QuickSight + // settings. + // + // * RESTRICTED_READER: This role isn't currently available for use. + // + // * RESTRICTED_AUTHOR: This role isn't currently available for use. + // + // UserRole is a required field + UserRole *string `type:"string" required:"true" enum:"UserRole"` } // String returns the string representation -func (s ListIngestionsInput) String() string { +func (s RegisterUserInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIngestionsInput) GoString() string { +func (s RegisterUserInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListIngestionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListIngestionsInput"} +func (s *RegisterUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterUserInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) + if s.CustomPermissionsName != nil && len(*s.CustomPermissionsName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomPermissionsName", 1)) } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + if s.Email == nil { + invalidParams.Add(request.NewErrParamRequired("Email")) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.IdentityType == nil { + invalidParams.Add(request.NewErrParamRequired("IdentityType")) + } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) + } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + } + if s.SessionName != nil && len(*s.SessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SessionName", 2)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.UserRole == nil { + invalidParams.Add(request.NewErrParamRequired("UserRole")) } if invalidParams.Len() > 0 { @@ -16537,108 +26087,171 @@ func (s *ListIngestionsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListIngestionsInput) SetAwsAccountId(v string) *ListIngestionsInput { - s.AwsAccountId = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *RegisterUserInput) SetAwsAccountId(v string) *RegisterUserInput { + s.AwsAccountId = &v + return s +} + +// SetCustomPermissionsName sets the CustomPermissionsName field's value. +func (s *RegisterUserInput) SetCustomPermissionsName(v string) *RegisterUserInput { + s.CustomPermissionsName = &v + return s +} + +// SetEmail sets the Email field's value. +func (s *RegisterUserInput) SetEmail(v string) *RegisterUserInput { + s.Email = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *ListIngestionsInput) SetDataSetId(v string) *ListIngestionsInput { - s.DataSetId = &v +// SetIamArn sets the IamArn field's value. +func (s *RegisterUserInput) SetIamArn(v string) *RegisterUserInput { + s.IamArn = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListIngestionsInput) SetMaxResults(v int64) *ListIngestionsInput { - s.MaxResults = &v +// SetIdentityType sets the IdentityType field's value. +func (s *RegisterUserInput) SetIdentityType(v string) *RegisterUserInput { + s.IdentityType = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListIngestionsInput) SetNextToken(v string) *ListIngestionsInput { - s.NextToken = &v +// SetNamespace sets the Namespace field's value. +func (s *RegisterUserInput) SetNamespace(v string) *RegisterUserInput { + s.Namespace = &v return s } -type ListIngestionsOutput struct { - _ struct{} `type:"structure"` +// SetSessionName sets the SessionName field's value. +func (s *RegisterUserInput) SetSessionName(v string) *RegisterUserInput { + s.SessionName = &v + return s +} - // A list of the ingestions. - Ingestions []*Ingestion `type:"list"` +// SetUserName sets the UserName field's value. +func (s *RegisterUserInput) SetUserName(v string) *RegisterUserInput { + s.UserName = &v + return s +} - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` +// SetUserRole sets the UserRole field's value. +func (s *RegisterUserInput) SetUserRole(v string) *RegisterUserInput { + s.UserRole = &v + return s +} + +type RegisterUserOutput struct { + _ struct{} `type:"structure"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The user's user name. + User *User `type:"structure"` + + // The URL the user visits to complete registration and provide a password. + // This is returned only for users with an identity type of QUICKSIGHT. + UserInvitationUrl *string `type:"string"` } // String returns the string representation -func (s ListIngestionsOutput) String() string { +func (s RegisterUserOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListIngestionsOutput) GoString() string { +func (s RegisterUserOutput) GoString() string { return s.String() } -// SetIngestions sets the Ingestions field's value. -func (s *ListIngestionsOutput) SetIngestions(v []*Ingestion) *ListIngestionsOutput { - s.Ingestions = v +// SetRequestId sets the RequestId field's value. +func (s *RegisterUserOutput) SetRequestId(v string) *RegisterUserOutput { + s.RequestId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListIngestionsOutput) SetNextToken(v string) *ListIngestionsOutput { - s.NextToken = &v +// SetStatus sets the Status field's value. +func (s *RegisterUserOutput) SetStatus(v int64) *RegisterUserOutput { + s.Status = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListIngestionsOutput) SetRequestId(v string) *ListIngestionsOutput { - s.RequestId = &v +// SetUser sets the User field's value. +func (s *RegisterUserOutput) SetUser(v *User) *RegisterUserOutput { + s.User = v return s } -// SetStatus sets the Status field's value. -func (s *ListIngestionsOutput) SetStatus(v int64) *ListIngestionsOutput { - s.Status = &v +// SetUserInvitationUrl sets the UserInvitationUrl field's value. +func (s *RegisterUserOutput) SetUserInvitationUrl(v string) *RegisterUserOutput { + s.UserInvitationUrl = &v return s } -type ListTagsForResourceInput struct { +// A physical table type for relational data sources. +type RelationalTable struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource that you want a list of tags - // for. + // The Amazon Resource Name (ARN) for the data source. // - // ResourceArn is a required field - ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` + // DataSourceArn is a required field + DataSourceArn *string `type:"string" required:"true"` + + // The column schema of the table. + // + // InputColumns is a required field + InputColumns []*InputColumn `min:"1" type:"list" required:"true"` + + // The name of the relational table. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The schema name. This name applies to certain relational database engines. + Schema *string `type:"string"` } // String returns the string representation -func (s ListTagsForResourceInput) String() string { +func (s RelationalTable) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceInput) GoString() string { +func (s RelationalTable) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTagsForResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) +func (s *RelationalTable) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RelationalTable"} + if s.DataSourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceArn")) } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + if s.InputColumns == nil { + invalidParams.Add(request.NewErrParamRequired("InputColumns")) + } + if s.InputColumns != nil && len(s.InputColumns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputColumns", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.InputColumns != nil { + for i, v := range s.InputColumns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputColumns", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -16647,231 +26260,259 @@ func (s *ListTagsForResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { - s.ResourceArn = &v +// SetDataSourceArn sets the DataSourceArn field's value. +func (s *RelationalTable) SetDataSourceArn(v string) *RelationalTable { + s.DataSourceArn = &v return s } -type ListTagsForResourceOutput struct { - _ struct{} `type:"structure"` +// SetInputColumns sets the InputColumns field's value. +func (s *RelationalTable) SetInputColumns(v []*InputColumn) *RelationalTable { + s.InputColumns = v + return s +} - // The AWS request ID for this operation. - RequestId *string `type:"string"` +// SetName sets the Name field's value. +func (s *RelationalTable) SetName(v string) *RelationalTable { + s.Name = &v + return s +} - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` +// SetSchema sets the Schema field's value. +func (s *RelationalTable) SetSchema(v string) *RelationalTable { + s.Schema = &v + return s +} - // Contains a map of the key-value pairs for the resource tag or tags assigned - // to the resource. - Tags []*Tag `min:"1" type:"list"` +// A transform operation that renames a column. +type RenameColumnOperation struct { + _ struct{} `type:"structure"` + + // The name of the column to be renamed. + // + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // The new name for the column. + // + // NewColumnName is a required field + NewColumnName *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListTagsForResourceOutput) String() string { +func (s RenameColumnOperation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { +func (s RenameColumnOperation) GoString() string { return s.String() } -// SetRequestId sets the RequestId field's value. -func (s *ListTagsForResourceOutput) SetRequestId(v string) *ListTagsForResourceOutput { - s.RequestId = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *RenameColumnOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RenameColumnOperation"} + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) + } + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.NewColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("NewColumnName")) + } + if s.NewColumnName != nil && len(*s.NewColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewColumnName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetStatus sets the Status field's value. -func (s *ListTagsForResourceOutput) SetStatus(v int64) *ListTagsForResourceOutput { - s.Status = &v +// SetColumnName sets the ColumnName field's value. +func (s *RenameColumnOperation) SetColumnName(v string) *RenameColumnOperation { + s.ColumnName = &v return s } -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v +// SetNewColumnName sets the NewColumnName field's value. +func (s *RenameColumnOperation) SetNewColumnName(v string) *RenameColumnOperation { + s.NewColumnName = &v return s } -type ListTemplateAliasesInput struct { - _ struct{} `type:"structure"` - - // The ID of the AWS account that contains the template aliases that you're - // listing. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// The resource specified already exists. +type ResourceExistsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` + Message_ *string `locationName:"Message" type:"string"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // The AWS request ID for this request. + RequestId *string `type:"string"` - // The ID for the template. - // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // The resource type for this request. + ResourceType *string `type:"string" enum:"ExceptionResourceType"` } // String returns the string representation -func (s ListTemplateAliasesInput) String() string { +func (s ResourceExistsException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTemplateAliasesInput) GoString() string { +func (s ResourceExistsException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ListTemplateAliasesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTemplateAliasesInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) - } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) - } +func newErrorResourceExistsException(v protocol.ResponseMetadata) error { + return &ResourceExistsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceExistsException) Code() string { + return "ResourceExistsException" +} - if invalidParams.Len() > 0 { - return invalidParams +// Message returns the exception's message. +func (s *ResourceExistsException) Message() string { + if s.Message_ != nil { + return *s.Message_ } - return nil + return "" } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListTemplateAliasesInput) SetAwsAccountId(v string) *ListTemplateAliasesInput { - s.AwsAccountId = &v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceExistsException) OrigErr() error { + return nil } -// SetMaxResults sets the MaxResults field's value. -func (s *ListTemplateAliasesInput) SetMaxResults(v int64) *ListTemplateAliasesInput { - s.MaxResults = &v - return s +func (s *ResourceExistsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } -// SetNextToken sets the NextToken field's value. -func (s *ListTemplateAliasesInput) SetNextToken(v string) *ListTemplateAliasesInput { - s.NextToken = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetTemplateId sets the TemplateId field's value. -func (s *ListTemplateAliasesInput) SetTemplateId(v string) *ListTemplateAliasesInput { - s.TemplateId = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *ResourceExistsException) RequestID() string { + return s.RespMetadata.RequestID } -type ListTemplateAliasesOutput struct { - _ struct{} `type:"structure"` +// One or more resources can't be found. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + Message_ *string `locationName:"Message" type:"string"` - // The AWS request ID for this operation. + // The AWS request ID for this request. RequestId *string `type:"string"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` - - // A structure containing the list of the template's aliases. - TemplateAliasList []*TemplateAlias `type:"list"` + // The resource type for this request. + ResourceType *string `type:"string" enum:"ExceptionResourceType"` } // String returns the string representation -func (s ListTemplateAliasesOutput) String() string { +func (s ResourceNotFoundException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTemplateAliasesOutput) GoString() string { +func (s ResourceNotFoundException) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTemplateAliasesOutput) SetNextToken(v string) *ListTemplateAliasesOutput { - s.NextToken = &v - return s +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } } -// SetRequestId sets the RequestId field's value. -func (s *ListTemplateAliasesOutput) SetRequestId(v string) *ListTemplateAliasesOutput { - s.RequestId = &v - return s +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" } -// SetStatus sets the Status field's value. -func (s *ListTemplateAliasesOutput) SetStatus(v int64) *ListTemplateAliasesOutput { - s.Status = &v - return s +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetTemplateAliasList sets the TemplateAliasList field's value. -func (s *ListTemplateAliasesOutput) SetTemplateAliasList(v []*TemplateAlias) *ListTemplateAliasesOutput { - s.TemplateAliasList = v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil } -type ListTemplateVersionsInput struct { - _ struct{} `type:"structure"` +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} - // The ID of the AWS account that contains the templates that you're listing. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` +// Permission for the resource. +type ResourcePermission struct { + _ struct{} `type:"structure"` - // The ID for the template. + // The IAM action to grant or revoke permissions on, for example "quicksight:DescribeDashboard". // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // Actions is a required field + Actions []*string `min:"1" type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the principal. This can be one of the following: + // + // * The ARN of an Amazon QuickSight user, group, or namespace. (This is + // most common.) + // + // * The ARN of an AWS account root: This is an IAM ARN rather than a QuickSight + // ARN. Use this option only to share resources (templates) across AWS accounts. + // (This is less common.) + // + // Principal is a required field + Principal *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ListTemplateVersionsInput) String() string { +func (s ResourcePermission) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTemplateVersionsInput) GoString() string { +func (s ResourcePermission) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTemplateVersionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTemplateVersionsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) +func (s *ResourcePermission) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourcePermission"} + if s.Actions == nil { + invalidParams.Add(request.NewErrParamRequired("Actions")) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Actions != nil && len(s.Actions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Actions", 1)) } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.Principal == nil { + invalidParams.Add(request.NewErrParamRequired("Principal")) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.Principal != nil && len(*s.Principal) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Principal", 1)) } if invalidParams.Len() > 0 { @@ -16880,116 +26521,118 @@ func (s *ListTemplateVersionsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListTemplateVersionsInput) SetAwsAccountId(v string) *ListTemplateVersionsInput { - s.AwsAccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTemplateVersionsInput) SetMaxResults(v int64) *ListTemplateVersionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListTemplateVersionsInput) SetNextToken(v string) *ListTemplateVersionsInput { - s.NextToken = &v +// SetActions sets the Actions field's value. +func (s *ResourcePermission) SetActions(v []*string) *ResourcePermission { + s.Actions = v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *ListTemplateVersionsInput) SetTemplateId(v string) *ListTemplateVersionsInput { - s.TemplateId = &v +// SetPrincipal sets the Principal field's value. +func (s *ResourcePermission) SetPrincipal(v string) *ResourcePermission { + s.Principal = &v return s } -type ListTemplateVersionsOutput struct { - _ struct{} `type:"structure"` +// This resource is currently unavailable. +type ResourceUnavailableException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + Message_ *string `locationName:"Message" type:"string"` - // The AWS request ID for this operation. + // The AWS request ID for this request. RequestId *string `type:"string"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` - - // A structure containing a list of all the versions of the specified template. - TemplateVersionSummaryList []*TemplateVersionSummary `type:"list"` + // The resource type for this request. + ResourceType *string `type:"string" enum:"ExceptionResourceType"` } // String returns the string representation -func (s ListTemplateVersionsOutput) String() string { +func (s ResourceUnavailableException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTemplateVersionsOutput) GoString() string { +func (s ResourceUnavailableException) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTemplateVersionsOutput) SetNextToken(v string) *ListTemplateVersionsOutput { - s.NextToken = &v - return s +func newErrorResourceUnavailableException(v protocol.ResponseMetadata) error { + return &ResourceUnavailableException{ + RespMetadata: v, + } } -// SetRequestId sets the RequestId field's value. -func (s *ListTemplateVersionsOutput) SetRequestId(v string) *ListTemplateVersionsOutput { - s.RequestId = &v - return s +// Code returns the exception type name. +func (s *ResourceUnavailableException) Code() string { + return "ResourceUnavailableException" } -// SetStatus sets the Status field's value. -func (s *ListTemplateVersionsOutput) SetStatus(v int64) *ListTemplateVersionsOutput { - s.Status = &v - return s +// Message returns the exception's message. +func (s *ResourceUnavailableException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" } -// SetTemplateVersionSummaryList sets the TemplateVersionSummaryList field's value. -func (s *ListTemplateVersionsOutput) SetTemplateVersionSummaryList(v []*TemplateVersionSummary) *ListTemplateVersionsOutput { - s.TemplateVersionSummaryList = v - return s +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceUnavailableException) OrigErr() error { + return nil } -type ListTemplatesInput struct { +func (s *ResourceUnavailableException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RestoreAnalysisInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the templates that you're listing. + // The ID of the analysis that you're restoring. + // + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the analysis. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The maximum number of results to be returned per request. - MaxResults *int64 `location:"querystring" locationName:"max-result" min:"1" type:"integer"` - - // The token for the next set of results, or null if there are no more results. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` } // String returns the string representation -func (s ListTemplatesInput) String() string { +func (s RestoreAnalysisInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTemplatesInput) GoString() string { +func (s RestoreAnalysisInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListTemplatesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListTemplatesInput"} +func (s *RestoreAnalysisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreAnalysisInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) + } + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) + } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } if invalidParams.Len() > 0 { @@ -16998,133 +26641,137 @@ func (s *ListTemplatesInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListTemplatesInput) SetAwsAccountId(v string) *ListTemplatesInput { - s.AwsAccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListTemplatesInput) SetMaxResults(v int64) *ListTemplatesInput { - s.MaxResults = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *RestoreAnalysisInput) SetAnalysisId(v string) *RestoreAnalysisInput { + s.AnalysisId = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListTemplatesInput) SetNextToken(v string) *ListTemplatesInput { - s.NextToken = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *RestoreAnalysisInput) SetAwsAccountId(v string) *RestoreAnalysisInput { + s.AwsAccountId = &v return s } -type ListTemplatesOutput struct { +type RestoreAnalysisOutput struct { _ struct{} `type:"structure"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + // The ID of the analysis that you're restoring. + AnalysisId *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the analysis that you're restoring. + Arn *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // A structure containing information about the templates in the list. - TemplateSummaryList []*TemplateSummary `type:"list"` } // String returns the string representation -func (s ListTemplatesOutput) String() string { +func (s RestoreAnalysisOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTemplatesOutput) GoString() string { +func (s RestoreAnalysisOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTemplatesOutput) SetNextToken(v string) *ListTemplatesOutput { - s.NextToken = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *RestoreAnalysisOutput) SetAnalysisId(v string) *RestoreAnalysisOutput { + s.AnalysisId = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *RestoreAnalysisOutput) SetArn(v string) *RestoreAnalysisOutput { + s.Arn = &v return s } // SetRequestId sets the RequestId field's value. -func (s *ListTemplatesOutput) SetRequestId(v string) *ListTemplatesOutput { +func (s *RestoreAnalysisOutput) SetRequestId(v string) *RestoreAnalysisOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *ListTemplatesOutput) SetStatus(v int64) *ListTemplatesOutput { +func (s *RestoreAnalysisOutput) SetStatus(v int64) *RestoreAnalysisOutput { s.Status = &v return s } -// SetTemplateSummaryList sets the TemplateSummaryList field's value. -func (s *ListTemplatesOutput) SetTemplateSummaryList(v []*TemplateSummary) *ListTemplatesOutput { - s.TemplateSummaryList = v - return s +// Information about rows for a data set SPICE ingestion. +type RowInfo struct { + _ struct{} `type:"structure"` + + // The number of rows that were not ingested. + RowsDropped *int64 `type:"long"` + + // The number of rows that were ingested. + RowsIngested *int64 `type:"long"` } -type ListUserGroupsInput struct { - _ struct{} `type:"structure"` +// String returns the string representation +func (s RowInfo) String() string { + return awsutil.Prettify(s) +} - // The AWS account ID that the user is in. Currently, you use the ID for the - // AWS account that contains your Amazon QuickSight account. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` +// GoString returns the string representation +func (s RowInfo) GoString() string { + return s.String() +} - // The maximum number of results to return from this request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` +// SetRowsDropped sets the RowsDropped field's value. +func (s *RowInfo) SetRowsDropped(v int64) *RowInfo { + s.RowsDropped = &v + return s +} - // The namespace. Currently, you should set this to default. +// SetRowsIngested sets the RowsIngested field's value. +func (s *RowInfo) SetRowsIngested(v int64) *RowInfo { + s.RowsIngested = &v + return s +} + +// The row-level security configuration for the dataset. +type RowLevelPermissionDataSet struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the permission dataset. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Arn is a required field + Arn *string `type:"string" required:"true"` - // A pagination token that can be used in a subsequent request. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + // The namespace associated with the row-level permissions dataset. + Namespace *string `type:"string"` - // The Amazon QuickSight user name that you want to list group memberships for. + // Permission policy. // - // UserName is a required field - UserName *string `location:"uri" locationName:"UserName" min:"1" type:"string" required:"true"` + // PermissionPolicy is a required field + PermissionPolicy *string `type:"string" required:"true" enum:"RowLevelPermissionPolicy"` } // String returns the string representation -func (s ListUserGroupsInput) String() string { +func (s RowLevelPermissionDataSet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListUserGroupsInput) GoString() string { +func (s RowLevelPermissionDataSet) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListUserGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListUserGroupsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) - } - if s.UserName == nil { - invalidParams.Add(request.NewErrParamRequired("UserName")) +func (s *RowLevelPermissionDataSet) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RowLevelPermissionDataSet"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + if s.PermissionPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionPolicy")) } if invalidParams.Len() > 0 { @@ -17133,135 +26780,195 @@ func (s *ListUserGroupsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListUserGroupsInput) SetAwsAccountId(v string) *ListUserGroupsInput { - s.AwsAccountId = &v - return s -} - -// SetMaxResults sets the MaxResults field's value. -func (s *ListUserGroupsInput) SetMaxResults(v int64) *ListUserGroupsInput { - s.MaxResults = &v +// SetArn sets the Arn field's value. +func (s *RowLevelPermissionDataSet) SetArn(v string) *RowLevelPermissionDataSet { + s.Arn = &v return s } // SetNamespace sets the Namespace field's value. -func (s *ListUserGroupsInput) SetNamespace(v string) *ListUserGroupsInput { +func (s *RowLevelPermissionDataSet) SetNamespace(v string) *RowLevelPermissionDataSet { s.Namespace = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListUserGroupsInput) SetNextToken(v string) *ListUserGroupsInput { - s.NextToken = &v +// SetPermissionPolicy sets the PermissionPolicy field's value. +func (s *RowLevelPermissionDataSet) SetPermissionPolicy(v string) *RowLevelPermissionDataSet { + s.PermissionPolicy = &v return s } -// SetUserName sets the UserName field's value. -func (s *ListUserGroupsInput) SetUserName(v string) *ListUserGroupsInput { - s.UserName = &v +// S3 parameters. +type S3Parameters struct { + _ struct{} `type:"structure"` + + // Location of the Amazon S3 manifest file. This is NULL if the manifest file + // was uploaded in the console. + // + // ManifestFileLocation is a required field + ManifestFileLocation *ManifestFileLocation `type:"structure" required:"true"` +} + +// String returns the string representation +func (s S3Parameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Parameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Parameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Parameters"} + if s.ManifestFileLocation == nil { + invalidParams.Add(request.NewErrParamRequired("ManifestFileLocation")) + } + if s.ManifestFileLocation != nil { + if err := s.ManifestFileLocation.Validate(); err != nil { + invalidParams.AddNested("ManifestFileLocation", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetManifestFileLocation sets the ManifestFileLocation field's value. +func (s *S3Parameters) SetManifestFileLocation(v *ManifestFileLocation) *S3Parameters { + s.ManifestFileLocation = v return s } -type ListUserGroupsOutput struct { +// A physical table type for as S3 data source. +type S3Source struct { _ struct{} `type:"structure"` - // The list of groups the user is a member of. - GroupList []*Group `type:"list"` - - // A pagination token that can be used in a subsequent request. - NextToken *string `type:"string"` + // The amazon Resource Name (ARN) for the data source. + // + // DataSourceArn is a required field + DataSourceArn *string `type:"string" required:"true"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // A physical table type for as S3 data source. + // + // InputColumns is a required field + InputColumns []*InputColumn `min:"1" type:"list" required:"true"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // Information about the format for the S3 source file or files. + UploadSettings *UploadSettings `type:"structure"` } // String returns the string representation -func (s ListUserGroupsOutput) String() string { +func (s S3Source) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListUserGroupsOutput) GoString() string { +func (s S3Source) GoString() string { return s.String() } -// SetGroupList sets the GroupList field's value. -func (s *ListUserGroupsOutput) SetGroupList(v []*Group) *ListUserGroupsOutput { - s.GroupList = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Source) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Source"} + if s.DataSourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceArn")) + } + if s.InputColumns == nil { + invalidParams.Add(request.NewErrParamRequired("InputColumns")) + } + if s.InputColumns != nil && len(s.InputColumns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InputColumns", 1)) + } + if s.InputColumns != nil { + for i, v := range s.InputColumns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputColumns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.UploadSettings != nil { + if err := s.UploadSettings.Validate(); err != nil { + invalidParams.AddNested("UploadSettings", err.(request.ErrInvalidParams)) + } + } -// SetNextToken sets the NextToken field's value. -func (s *ListUserGroupsOutput) SetNextToken(v string) *ListUserGroupsOutput { - s.NextToken = &v + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataSourceArn sets the DataSourceArn field's value. +func (s *S3Source) SetDataSourceArn(v string) *S3Source { + s.DataSourceArn = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *ListUserGroupsOutput) SetRequestId(v string) *ListUserGroupsOutput { - s.RequestId = &v +// SetInputColumns sets the InputColumns field's value. +func (s *S3Source) SetInputColumns(v []*InputColumn) *S3Source { + s.InputColumns = v return s } -// SetStatus sets the Status field's value. -func (s *ListUserGroupsOutput) SetStatus(v int64) *ListUserGroupsOutput { - s.Status = &v +// SetUploadSettings sets the UploadSettings field's value. +func (s *S3Source) SetUploadSettings(v *UploadSettings) *S3Source { + s.UploadSettings = v return s } -type ListUsersInput struct { +type SearchAnalysesInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the user is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // The ID of the AWS account that contains the analyses that you're searching + // for. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The maximum number of results to return from this request. - MaxResults *int64 `location:"querystring" locationName:"max-results" min:"1" type:"integer"` - - // The namespace. Currently, you should set this to default. + // The structure for the search filters that you want to apply to your search. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // Filters is a required field + Filters []*AnalysisSearchFilter `type:"list" required:"true"` + + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` // A pagination token that can be used in a subsequent request. - NextToken *string `location:"querystring" locationName:"next-token" type:"string"` + NextToken *string `type:"string"` } // String returns the string representation -func (s ListUsersInput) String() string { +func (s SearchAnalysesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListUsersInput) GoString() string { +func (s SearchAnalysesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListUsersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListUsersInput"} +func (s *SearchAnalysesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchAnalysesInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } + if s.Filters == nil { + invalidParams.Add(request.NewErrParamRequired("Filters")) + } if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) - } if invalidParams.Len() > 0 { return invalidParams @@ -17270,32 +26977,35 @@ func (s *ListUsersInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *ListUsersInput) SetAwsAccountId(v string) *ListUsersInput { +func (s *SearchAnalysesInput) SetAwsAccountId(v string) *SearchAnalysesInput { s.AwsAccountId = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListUsersInput) SetMaxResults(v int64) *ListUsersInput { - s.MaxResults = &v +// SetFilters sets the Filters field's value. +func (s *SearchAnalysesInput) SetFilters(v []*AnalysisSearchFilter) *SearchAnalysesInput { + s.Filters = v return s } -// SetNamespace sets the Namespace field's value. -func (s *ListUsersInput) SetNamespace(v string) *ListUsersInput { - s.Namespace = &v +// SetMaxResults sets the MaxResults field's value. +func (s *SearchAnalysesInput) SetMaxResults(v int64) *SearchAnalysesInput { + s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListUsersInput) SetNextToken(v string) *ListUsersInput { +func (s *SearchAnalysesInput) SetNextToken(v string) *SearchAnalysesInput { s.NextToken = &v return s } -type ListUsersOutput struct { +type SearchAnalysesOutput struct { _ struct{} `type:"structure"` + // Metadata describing the analyses that you searched for. + AnalysisSummaryList []*AnalysisSummary `type:"list"` + // A pagination token that can be used in a subsequent request. NextToken *string `type:"string"` @@ -17304,106 +27014,101 @@ type ListUsersOutput struct { // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // The list of users. - UserList []*User `type:"list"` } // String returns the string representation -func (s ListUsersOutput) String() string { +func (s SearchAnalysesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListUsersOutput) GoString() string { +func (s SearchAnalysesOutput) GoString() string { return s.String() } +// SetAnalysisSummaryList sets the AnalysisSummaryList field's value. +func (s *SearchAnalysesOutput) SetAnalysisSummaryList(v []*AnalysisSummary) *SearchAnalysesOutput { + s.AnalysisSummaryList = v + return s +} + // SetNextToken sets the NextToken field's value. -func (s *ListUsersOutput) SetNextToken(v string) *ListUsersOutput { +func (s *SearchAnalysesOutput) SetNextToken(v string) *SearchAnalysesOutput { s.NextToken = &v return s } // SetRequestId sets the RequestId field's value. -func (s *ListUsersOutput) SetRequestId(v string) *ListUsersOutput { +func (s *SearchAnalysesOutput) SetRequestId(v string) *SearchAnalysesOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *ListUsersOutput) SetStatus(v int64) *ListUsersOutput { +func (s *SearchAnalysesOutput) SetStatus(v int64) *SearchAnalysesOutput { s.Status = &v return s } -// SetUserList sets the UserList field's value. -func (s *ListUsersOutput) SetUserList(v []*User) *ListUsersOutput { - s.UserList = v - return s -} - -// A logical table is a unit that joins and that data transformations operate -// on. A logical table has a source, which can be either a physical table or -// result of a join. When a logical table points to a physical table, the logical -// table acts as a mutable copy of that physical table through transform operations. -type LogicalTable struct { +type SearchDashboardsInput struct { _ struct{} `type:"structure"` - // A display name for the logical table. + // The ID of the AWS account that contains the user whose dashboards you're + // searching for. // - // Alias is a required field - Alias *string `min:"1" type:"string" required:"true"` - - // Transform operations that act on this logical table. - DataTransforms []*TransformOperation `min:"1" type:"list"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Source of this logical table. + // The filters to apply to the search. Currently, you can search only by user + // name, for example, "Filters": [ { "Name": "QUICKSIGHT_USER", "Operator": + // "StringEquals", "Value": "arn:aws:quicksight:us-east-1:1:user/default/UserName1" + // } ] // - // Source is a required field - Source *LogicalTableSource `type:"structure" required:"true"` + // Filters is a required field + Filters []*DashboardSearchFilter `type:"list" required:"true"` + + // The maximum number of results to be returned per request. + MaxResults *int64 `min:"1" type:"integer"` + + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` } // String returns the string representation -func (s LogicalTable) String() string { +func (s SearchDashboardsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LogicalTable) GoString() string { +func (s SearchDashboardsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *LogicalTable) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LogicalTable"} - if s.Alias == nil { - invalidParams.Add(request.NewErrParamRequired("Alias")) +func (s *SearchDashboardsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SearchDashboardsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Alias != nil && len(*s.Alias) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Alias", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataTransforms != nil && len(s.DataTransforms) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataTransforms", 1)) + if s.Filters == nil { + invalidParams.Add(request.NewErrParamRequired("Filters")) } - if s.Source == nil { - invalidParams.Add(request.NewErrParamRequired("Source")) + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.DataTransforms != nil { - for i, v := range s.DataTransforms { + if s.Filters != nil { + for i, v := range s.Filters { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataTransforms", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) } } } - if s.Source != nil { - if err := s.Source.Validate(); err != nil { - invalidParams.AddNested("Source", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -17411,115 +27116,108 @@ func (s *LogicalTable) Validate() error { return nil } -// SetAlias sets the Alias field's value. -func (s *LogicalTable) SetAlias(v string) *LogicalTable { - s.Alias = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *SearchDashboardsInput) SetAwsAccountId(v string) *SearchDashboardsInput { + s.AwsAccountId = &v return s } -// SetDataTransforms sets the DataTransforms field's value. -func (s *LogicalTable) SetDataTransforms(v []*TransformOperation) *LogicalTable { - s.DataTransforms = v +// SetFilters sets the Filters field's value. +func (s *SearchDashboardsInput) SetFilters(v []*DashboardSearchFilter) *SearchDashboardsInput { + s.Filters = v return s } -// SetSource sets the Source field's value. -func (s *LogicalTable) SetSource(v *LogicalTableSource) *LogicalTable { - s.Source = v +// SetMaxResults sets the MaxResults field's value. +func (s *SearchDashboardsInput) SetMaxResults(v int64) *SearchDashboardsInput { + s.MaxResults = &v return s } -// Information about the source of a logical table. This is a variant type structure. -// For this structure to be valid, only one of the attributes can be non-null. -type LogicalTableSource struct { +// SetNextToken sets the NextToken field's value. +func (s *SearchDashboardsInput) SetNextToken(v string) *SearchDashboardsInput { + s.NextToken = &v + return s +} + +type SearchDashboardsOutput struct { _ struct{} `type:"structure"` - // Specifies the result of a join of two logical tables. - JoinInstruction *JoinInstruction `type:"structure"` + // The list of dashboards owned by the user specified in Filters in your request. + DashboardSummaryList []*DashboardSummary `type:"list"` - // Physical table ID. - PhysicalTableId *string `min:"1" type:"string"` + // The token for the next set of results, or null if there are no more results. + NextToken *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s LogicalTableSource) String() string { +func (s SearchDashboardsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LogicalTableSource) GoString() string { +func (s SearchDashboardsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LogicalTableSource) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LogicalTableSource"} - if s.PhysicalTableId != nil && len(*s.PhysicalTableId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PhysicalTableId", 1)) - } - if s.JoinInstruction != nil { - if err := s.JoinInstruction.Validate(); err != nil { - invalidParams.AddNested("JoinInstruction", err.(request.ErrInvalidParams)) - } - } +// SetDashboardSummaryList sets the DashboardSummaryList field's value. +func (s *SearchDashboardsOutput) SetDashboardSummaryList(v []*DashboardSummary) *SearchDashboardsOutput { + s.DashboardSummaryList = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNextToken sets the NextToken field's value. +func (s *SearchDashboardsOutput) SetNextToken(v string) *SearchDashboardsOutput { + s.NextToken = &v + return s } -// SetJoinInstruction sets the JoinInstruction field's value. -func (s *LogicalTableSource) SetJoinInstruction(v *JoinInstruction) *LogicalTableSource { - s.JoinInstruction = v +// SetRequestId sets the RequestId field's value. +func (s *SearchDashboardsOutput) SetRequestId(v string) *SearchDashboardsOutput { + s.RequestId = &v return s } -// SetPhysicalTableId sets the PhysicalTableId field's value. -func (s *LogicalTableSource) SetPhysicalTableId(v string) *LogicalTableSource { - s.PhysicalTableId = &v +// SetStatus sets the Status field's value. +func (s *SearchDashboardsOutput) SetStatus(v int64) *SearchDashboardsOutput { + s.Status = &v return s } -// Amazon S3 manifest file location. -type ManifestFileLocation struct { +// ServiceNow parameters. +type ServiceNowParameters struct { _ struct{} `type:"structure"` - // Amazon S3 bucket. - // - // Bucket is a required field - Bucket *string `min:"1" type:"string" required:"true"` - - // Amazon S3 key that identifies an object. + // URL of the base site. // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` + // SiteBaseUrl is a required field + SiteBaseUrl *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s ManifestFileLocation) String() string { +func (s ServiceNowParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ManifestFileLocation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *ManifestFileLocation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ManifestFileLocation"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) +func (s ServiceNowParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServiceNowParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServiceNowParameters"} + if s.SiteBaseUrl == nil { + invalidParams.Add(request.NewErrParamRequired("SiteBaseUrl")) } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + if s.SiteBaseUrl != nil && len(*s.SiteBaseUrl) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SiteBaseUrl", 1)) } if invalidParams.Len() > 0 { @@ -17528,272 +27226,214 @@ func (s *ManifestFileLocation) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *ManifestFileLocation) SetBucket(v string) *ManifestFileLocation { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *ManifestFileLocation) SetKey(v string) *ManifestFileLocation { - s.Key = &v +// SetSiteBaseUrl sets the SiteBaseUrl field's value. +func (s *ServiceNowParameters) SetSiteBaseUrl(v string) *ServiceNowParameters { + s.SiteBaseUrl = &v return s } -// MariaDB parameters. -type MariaDbParameters struct { - _ struct{} `type:"structure"` - - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` +// The number of minutes specified for the lifetime of a session isn't valid. +// The session lifetime must be 15-600 minutes. +type SessionLifetimeInMinutesInvalidException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // Host. - // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + Message_ *string `locationName:"Message" type:"string"` - // Port. - // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // The AWS request ID for this request. + RequestId *string `type:"string"` } // String returns the string representation -func (s MariaDbParameters) String() string { +func (s SessionLifetimeInMinutesInvalidException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MariaDbParameters) GoString() string { +func (s SessionLifetimeInMinutesInvalidException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *MariaDbParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MariaDbParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) - } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) - } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) - } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) +func newErrorSessionLifetimeInMinutesInvalidException(v protocol.ResponseMetadata) error { + return &SessionLifetimeInMinutesInvalidException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *SessionLifetimeInMinutesInvalidException) Code() string { + return "SessionLifetimeInMinutesInvalidException" +} + +// Message returns the exception's message. +func (s *SessionLifetimeInMinutesInvalidException) Message() string { + if s.Message_ != nil { + return *s.Message_ } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SessionLifetimeInMinutesInvalidException) OrigErr() error { return nil } -// SetDatabase sets the Database field's value. -func (s *MariaDbParameters) SetDatabase(v string) *MariaDbParameters { - s.Database = &v - return s +func (s *SessionLifetimeInMinutesInvalidException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } -// SetHost sets the Host field's value. -func (s *MariaDbParameters) SetHost(v string) *MariaDbParameters { - s.Host = &v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *SessionLifetimeInMinutesInvalidException) StatusCode() int { + return s.RespMetadata.StatusCode } -// SetPort sets the Port field's value. -func (s *MariaDbParameters) SetPort(v int64) *MariaDbParameters { - s.Port = &v - return s +// RequestID returns the service's response RequestID for request. +func (s *SessionLifetimeInMinutesInvalidException) RequestID() string { + return s.RespMetadata.RequestID } -// MySQL parameters. -type MySqlParameters struct { +// A sheet is an object that contains a set of visuals that are viewed together +// on one page in the Amazon QuickSight console. Every analysis and dashboard +// contains at least one sheet. Each sheet contains at least one visualization +// widget, for example a chart, pivot table, or narrative insight. Sheets can +// be associated with other components, such as controls, filters, and so on. +type Sheet struct { _ struct{} `type:"structure"` - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` - - // Host. - // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // The name of a sheet. This is displayed on the sheet's tab in the QuickSight + // console. + Name *string `type:"string"` - // Port. - // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // The unique identifier associated with a sheet. + SheetId *string `min:"1" type:"string"` } // String returns the string representation -func (s MySqlParameters) String() string { +func (s Sheet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s MySqlParameters) GoString() string { +func (s Sheet) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *MySqlParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MySqlParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) - } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) - } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) - } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetName sets the Name field's value. +func (s *Sheet) SetName(v string) *Sheet { + s.Name = &v + return s } -// SetDatabase sets the Database field's value. -func (s *MySqlParameters) SetDatabase(v string) *MySqlParameters { - s.Database = &v +// SetSheetId sets the SheetId field's value. +func (s *Sheet) SetSheetId(v string) *Sheet { + s.SheetId = &v return s } -// SetHost sets the Host field's value. -func (s *MySqlParameters) SetHost(v string) *MySqlParameters { - s.Host = &v - return s +// Sheet controls option. +type SheetControlsOption struct { + _ struct{} `type:"structure"` + + // Visibility state. + VisibilityState *string `type:"string" enum:"DashboardUIState"` } -// SetPort sets the Port field's value. -func (s *MySqlParameters) SetPort(v int64) *MySqlParameters { - s.Port = &v +// String returns the string representation +func (s SheetControlsOption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SheetControlsOption) GoString() string { + return s.String() +} + +// SetVisibilityState sets the VisibilityState field's value. +func (s *SheetControlsOption) SetVisibilityState(v string) *SheetControlsOption { + s.VisibilityState = &v return s } -// Output column. -type OutputColumn struct { +// The theme display options for sheets. +type SheetStyle struct { _ struct{} `type:"structure"` - // A display name for the dataset. - Name *string `min:"1" type:"string"` + // The display options for tiles. + Tile *TileStyle `type:"structure"` - // Type. - Type *string `type:"string" enum:"ColumnDataType"` + // The layout options for tiles. + TileLayout *TileLayoutStyle `type:"structure"` } // String returns the string representation -func (s OutputColumn) String() string { +func (s SheetStyle) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s OutputColumn) GoString() string { +func (s SheetStyle) GoString() string { return s.String() } -// SetName sets the Name field's value. -func (s *OutputColumn) SetName(v string) *OutputColumn { - s.Name = &v +// SetTile sets the Tile field's value. +func (s *SheetStyle) SetTile(v *TileStyle) *SheetStyle { + s.Tile = v return s } -// SetType sets the Type field's value. -func (s *OutputColumn) SetType(v string) *OutputColumn { - s.Type = &v +// SetTileLayout sets the TileLayout field's value. +func (s *SheetStyle) SetTileLayout(v *TileLayoutStyle) *SheetStyle { + s.TileLayout = v return s } -// Parameters. -type Parameters struct { +// Snowflake parameters. +type SnowflakeParameters struct { _ struct{} `type:"structure"` - // DateTime parameters. - DateTimeParameters []*DateTimeParameter `type:"list"` - - // Decimal parameters. - DecimalParameters []*DecimalParameter `type:"list"` + // Database. + // + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // Integer parameters. - IntegerParameters []*IntegerParameter `type:"list"` + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` - // String parameters. - StringParameters []*StringParameter `type:"list"` + // Warehouse. + // + // Warehouse is a required field + Warehouse *string `type:"string" required:"true"` } // String returns the string representation -func (s Parameters) String() string { +func (s SnowflakeParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Parameters) GoString() string { +func (s SnowflakeParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Parameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Parameters"} - if s.DateTimeParameters != nil { - for i, v := range s.DateTimeParameters { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DateTimeParameters", i), err.(request.ErrInvalidParams)) - } - } +func (s *SnowflakeParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SnowflakeParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.DecimalParameters != nil { - for i, v := range s.DecimalParameters { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DecimalParameters", i), err.(request.ErrInvalidParams)) - } - } + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } - if s.IntegerParameters != nil { - for i, v := range s.IntegerParameters { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "IntegerParameters", i), err.(request.ErrInvalidParams)) - } - } + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) + } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) } - if s.StringParameters != nil { - for i, v := range s.StringParameters { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StringParameters", i), err.(request.ErrInvalidParams)) - } - } + if s.Warehouse == nil { + invalidParams.Add(request.NewErrParamRequired("Warehouse")) } if invalidParams.Len() > 0 { @@ -17802,73 +27442,63 @@ func (s *Parameters) Validate() error { return nil } -// SetDateTimeParameters sets the DateTimeParameters field's value. -func (s *Parameters) SetDateTimeParameters(v []*DateTimeParameter) *Parameters { - s.DateTimeParameters = v - return s -} - -// SetDecimalParameters sets the DecimalParameters field's value. -func (s *Parameters) SetDecimalParameters(v []*DecimalParameter) *Parameters { - s.DecimalParameters = v +// SetDatabase sets the Database field's value. +func (s *SnowflakeParameters) SetDatabase(v string) *SnowflakeParameters { + s.Database = &v return s } -// SetIntegerParameters sets the IntegerParameters field's value. -func (s *Parameters) SetIntegerParameters(v []*IntegerParameter) *Parameters { - s.IntegerParameters = v +// SetHost sets the Host field's value. +func (s *SnowflakeParameters) SetHost(v string) *SnowflakeParameters { + s.Host = &v return s } -// SetStringParameters sets the StringParameters field's value. -func (s *Parameters) SetStringParameters(v []*StringParameter) *Parameters { - s.StringParameters = v +// SetWarehouse sets the Warehouse field's value. +func (s *SnowflakeParameters) SetWarehouse(v string) *SnowflakeParameters { + s.Warehouse = &v return s } -// A view of a data source that contains information about the shape of the -// data in the underlying source. This is a variant type structure. For this -// structure to be valid, only one of the attributes can be non-null. -type PhysicalTable struct { +// Spark parameters. +type SparkParameters struct { _ struct{} `type:"structure"` - // A physical table type built from the results of the custom SQL query. - CustomSql *CustomSql `type:"structure"` - - // A physical table type for relational data sources. - RelationalTable *RelationalTable `type:"structure"` + // Host. + // + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` - // A physical table type for as S3 data source. - S3Source *S3Source `type:"structure"` + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s PhysicalTable) String() string { +func (s SparkParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PhysicalTable) GoString() string { +func (s SparkParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PhysicalTable) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PhysicalTable"} - if s.CustomSql != nil { - if err := s.CustomSql.Validate(); err != nil { - invalidParams.AddNested("CustomSql", err.(request.ErrInvalidParams)) - } +func (s *SparkParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SparkParameters"} + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) } - if s.RelationalTable != nil { - if err := s.RelationalTable.Validate(); err != nil { - invalidParams.AddNested("RelationalTable", err.(request.ErrInvalidParams)) - } + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) } - if s.S3Source != nil { - if err := s.S3Source.Validate(); err != nil { - invalidParams.AddNested("S3Source", err.(request.ErrInvalidParams)) - } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } if invalidParams.Len() > 0 { @@ -17877,26 +27507,20 @@ func (s *PhysicalTable) Validate() error { return nil } -// SetCustomSql sets the CustomSql field's value. -func (s *PhysicalTable) SetCustomSql(v *CustomSql) *PhysicalTable { - s.CustomSql = v - return s -} - -// SetRelationalTable sets the RelationalTable field's value. -func (s *PhysicalTable) SetRelationalTable(v *RelationalTable) *PhysicalTable { - s.RelationalTable = v +// SetHost sets the Host field's value. +func (s *SparkParameters) SetHost(v string) *SparkParameters { + s.Host = &v return s } -// SetS3Source sets the S3Source field's value. -func (s *PhysicalTable) SetS3Source(v *S3Source) *PhysicalTable { - s.S3Source = v +// SetPort sets the Port field's value. +func (s *SparkParameters) SetPort(v int64) *SparkParameters { + s.Port = &v return s } -// PostgreSQL parameters. -type PostgreSqlParameters struct { +// SQL Server parameters. +type SqlServerParameters struct { _ struct{} `type:"structure"` // Database. @@ -17916,18 +27540,18 @@ type PostgreSqlParameters struct { } // String returns the string representation -func (s PostgreSqlParameters) String() string { +func (s SqlServerParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PostgreSqlParameters) GoString() string { +func (s SqlServerParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PostgreSqlParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PostgreSqlParameters"} +func (s *SqlServerParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SqlServerParameters"} if s.Database == nil { invalidParams.Add(request.NewErrParamRequired("Database")) } @@ -17954,184 +27578,202 @@ func (s *PostgreSqlParameters) Validate() error { } // SetDatabase sets the Database field's value. -func (s *PostgreSqlParameters) SetDatabase(v string) *PostgreSqlParameters { +func (s *SqlServerParameters) SetDatabase(v string) *SqlServerParameters { s.Database = &v return s } // SetHost sets the Host field's value. -func (s *PostgreSqlParameters) SetHost(v string) *PostgreSqlParameters { +func (s *SqlServerParameters) SetHost(v string) *SqlServerParameters { s.Host = &v return s } // SetPort sets the Port field's value. -func (s *PostgreSqlParameters) SetPort(v int64) *PostgreSqlParameters { +func (s *SqlServerParameters) SetPort(v int64) *SqlServerParameters { s.Port = &v return s } -// One or more preconditions aren't met. -type PreconditionNotMetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"Message" type:"string"` +// Secure Socket Layer (SSL) properties that apply when QuickSight connects +// to your underlying data source. +type SslProperties struct { + _ struct{} `type:"structure"` - // The AWS request ID for this request. - RequestId *string `type:"string"` + // A Boolean option to control whether SSL should be disabled. + DisableSsl *bool `type:"boolean"` } // String returns the string representation -func (s PreconditionNotMetException) String() string { +func (s SslProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PreconditionNotMetException) GoString() string { +func (s SslProperties) GoString() string { return s.String() } -func newErrorPreconditionNotMetException(v protocol.ResponseMetadata) error { - return &PreconditionNotMetException{ - respMetadata: v, - } +// SetDisableSsl sets the DisableSsl field's value. +func (s *SslProperties) SetDisableSsl(v bool) *SslProperties { + s.DisableSsl = &v + return s } -// Code returns the exception type name. -func (s PreconditionNotMetException) Code() string { - return "PreconditionNotMetException" +// A string parameter. +type StringParameter struct { + _ struct{} `type:"structure"` + + // A display name for a string parameter. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The values of a string parameter. + // + // Values is a required field + Values []*string `type:"list" required:"true"` } -// Message returns the exception's message. -func (s PreconditionNotMetException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// String returns the string representation +func (s StringParameter) String() string { + return awsutil.Prettify(s) } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s PreconditionNotMetException) OrigErr() error { - return nil +// GoString returns the string representation +func (s StringParameter) GoString() string { + return s.String() } -func (s PreconditionNotMetException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// Validate inspects the fields of the type to determine if they are valid. +func (s *StringParameter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StringParameter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Values == nil { + invalidParams.Add(request.NewErrParamRequired("Values")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// Status code returns the HTTP status code for the request's response error. -func (s PreconditionNotMetException) StatusCode() int { - return s.respMetadata.StatusCode +// SetName sets the Name field's value. +func (s *StringParameter) SetName(v string) *StringParameter { + s.Name = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s PreconditionNotMetException) RequestID() string { - return s.respMetadata.RequestID +// SetValues sets the Values field's value. +func (s *StringParameter) SetValues(v []*string) *StringParameter { + s.Values = v + return s } -// Presto parameters. -type PrestoParameters struct { +// The key or keys of the key-value pairs for the resource tag or tags assigned +// to the resource. +type Tag struct { _ struct{} `type:"structure"` - // Catalog. - // - // Catalog is a required field - Catalog *string `type:"string" required:"true"` - - // Host. + // Tag key. // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` - // Port. + // Tag value. // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // Value is a required field + Value *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s PrestoParameters) String() string { +func (s Tag) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PrestoParameters) GoString() string { +func (s Tag) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PrestoParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PrestoParameters"} - if s.Catalog == nil { - invalidParams.Add(request.NewErrParamRequired("Catalog")) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil -} - -// SetCatalog sets the Catalog field's value. -func (s *PrestoParameters) SetCatalog(v string) *PrestoParameters { - s.Catalog = &v - return s -} - -// SetHost sets the Host field's value. -func (s *PrestoParameters) SetHost(v string) *PrestoParameters { - s.Host = &v +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v return s } -// SetPort sets the Port field's value. -func (s *PrestoParameters) SetPort(v int64) *PrestoParameters { - s.Port = &v +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v return s } -// A transform operation that projects columns. Operations that come after a -// projection can only refer to projected columns. -type ProjectOperation struct { +// A transform operation that tags a column with additional information. +type TagColumnOperation struct { _ struct{} `type:"structure"` - // Projected columns. + // The column that this operation acts on. // - // ProjectedColumns is a required field - ProjectedColumns []*string `min:"1" type:"list" required:"true"` + // ColumnName is a required field + ColumnName *string `min:"1" type:"string" required:"true"` + + // The dataset column tag, currently only used for geospatial type tagging. . + // + // This is not tags for the AWS tagging feature. . + // + // Tags is a required field + Tags []*ColumnTag `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s ProjectOperation) String() string { +func (s TagColumnOperation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ProjectOperation) GoString() string { +func (s TagColumnOperation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ProjectOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ProjectOperation"} - if s.ProjectedColumns == nil { - invalidParams.Add(request.NewErrParamRequired("ProjectedColumns")) +func (s *TagColumnOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagColumnOperation"} + if s.ColumnName == nil { + invalidParams.Add(request.NewErrParamRequired("ColumnName")) } - if s.ProjectedColumns != nil && len(s.ProjectedColumns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ProjectedColumns", 1)) + if s.ColumnName != nil && len(*s.ColumnName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } if invalidParams.Len() > 0 { @@ -18140,456 +27782,370 @@ func (s *ProjectOperation) Validate() error { return nil } -// SetProjectedColumns sets the ProjectedColumns field's value. -func (s *ProjectOperation) SetProjectedColumns(v []*string) *ProjectOperation { - s.ProjectedColumns = v +// SetColumnName sets the ColumnName field's value. +func (s *TagColumnOperation) SetColumnName(v string) *TagColumnOperation { + s.ColumnName = &v return s } -// Information about a queued dataset SPICE ingestion. -type QueueInfo struct { +// SetTags sets the Tags field's value. +func (s *TagColumnOperation) SetTags(v []*ColumnTag) *TagColumnOperation { + s.Tags = v + return s +} + +type TagResourceInput struct { _ struct{} `type:"structure"` - // The ID of the ongoing ingestion. The queued ingestion is waiting for the - // ongoing ingestion to complete. + // The Amazon Resource Name (ARN) of the resource that you want to tag. // - // QueuedIngestion is a required field - QueuedIngestion *string `type:"string" required:"true"` + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` - // The ID of the queued ingestion. + // Contains a map of the key-value pairs for the resource tag or tags assigned + // to the resource. // - // WaitingOnIngestion is a required field - WaitingOnIngestion *string `type:"string" required:"true"` + // Tags is a required field + Tags []*Tag `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s QueueInfo) String() string { +func (s TagResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s QueueInfo) GoString() string { +func (s TagResourceInput) GoString() string { return s.String() } -// SetQueuedIngestion sets the QueuedIngestion field's value. -func (s *QueueInfo) SetQueuedIngestion(v string) *QueueInfo { - s.QueuedIngestion = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v return s } -// SetWaitingOnIngestion sets the WaitingOnIngestion field's value. -func (s *QueueInfo) SetWaitingOnIngestion(v string) *QueueInfo { - s.WaitingOnIngestion = &v +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v return s } -// Amazon RDS parameters. -type RdsParameters struct { +type TagResourceOutput struct { _ struct{} `type:"structure"` - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // Instance ID. - // - // InstanceId is a required field - InstanceId *string `min:"1" type:"string" required:"true"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s RdsParameters) String() string { +func (s TagResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RdsParameters) GoString() string { +func (s TagResourceOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RdsParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RdsParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.InstanceId == nil { - invalidParams.Add(request.NewErrParamRequired("InstanceId")) - } - if s.InstanceId != nil && len(*s.InstanceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InstanceId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDatabase sets the Database field's value. -func (s *RdsParameters) SetDatabase(v string) *RdsParameters { - s.Database = &v +// SetRequestId sets the RequestId field's value. +func (s *TagResourceOutput) SetRequestId(v string) *TagResourceOutput { + s.RequestId = &v return s } -// SetInstanceId sets the InstanceId field's value. -func (s *RdsParameters) SetInstanceId(v string) *RdsParameters { - s.InstanceId = &v +// SetStatus sets the Status field's value. +func (s *TagResourceOutput) SetStatus(v int64) *TagResourceOutput { + s.Status = &v return s } -// Amazon Redshift parameters. The ClusterId field can be blank if Host and -// Port are both set. The Host and Port fields can be blank if the ClusterId -// field is set. -type RedshiftParameters struct { +// A template object. A template is an entity in QuickSight that encapsulates +// the metadata required to create an analysis and that you can use to create +// a dashboard. A template adds a layer of abstraction by using placeholders +// to replace the dataset associated with an analysis. You can use templates +// to create dashboards by replacing dataset placeholders with datasets that +// follow the same schema that was used to create the source analysis and template. +// +// You can share templates across AWS accounts by allowing users in other AWS +// accounts to create a template or a dashboard from an existing template. +type Template struct { _ struct{} `type:"structure"` - // Cluster ID. This field can be blank if the Host and Port are provided. - ClusterId *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the template. + Arn *string `type:"string"` - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` + // Time when this was created. + CreatedTime *time.Time `type:"timestamp"` - // Host. This field can be blank if ClusterId is provided. - Host *string `min:"1" type:"string"` + // Time when this was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` - // Port. This field can be blank if the ClusterId is provided. - Port *int64 `type:"integer"` + // The display name of the template. + Name *string `min:"1" type:"string"` + + // The ID for the template. This is unique per AWS Region for each AWS account. + TemplateId *string `min:"1" type:"string"` + + // A structure describing the versions of the template. + Version *TemplateVersion `type:"structure"` } // String returns the string representation -func (s RedshiftParameters) String() string { +func (s Template) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RedshiftParameters) GoString() string { +func (s Template) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedshiftParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedshiftParameters"} - if s.ClusterId != nil && len(*s.ClusterId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClusterId", 1)) - } - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) - } +// SetArn sets the Arn field's value. +func (s *Template) SetArn(v string) *Template { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreatedTime sets the CreatedTime field's value. +func (s *Template) SetCreatedTime(v time.Time) *Template { + s.CreatedTime = &v + return s } -// SetClusterId sets the ClusterId field's value. -func (s *RedshiftParameters) SetClusterId(v string) *RedshiftParameters { - s.ClusterId = &v +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Template) SetLastUpdatedTime(v time.Time) *Template { + s.LastUpdatedTime = &v return s } -// SetDatabase sets the Database field's value. -func (s *RedshiftParameters) SetDatabase(v string) *RedshiftParameters { - s.Database = &v +// SetName sets the Name field's value. +func (s *Template) SetName(v string) *Template { + s.Name = &v return s } -// SetHost sets the Host field's value. -func (s *RedshiftParameters) SetHost(v string) *RedshiftParameters { - s.Host = &v +// SetTemplateId sets the TemplateId field's value. +func (s *Template) SetTemplateId(v string) *Template { + s.TemplateId = &v return s } -// SetPort sets the Port field's value. -func (s *RedshiftParameters) SetPort(v int64) *RedshiftParameters { - s.Port = &v +// SetVersion sets the Version field's value. +func (s *Template) SetVersion(v *TemplateVersion) *Template { + s.Version = v return s } -type RegisterUserInput struct { +// The template alias. +type TemplateAlias struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the user is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - - // The email address of the user that you want to register. - // - // Email is a required field - Email *string `type:"string" required:"true"` - - // The ARN of the IAM user or role that you are registering with Amazon QuickSight. - IamArn *string `type:"string"` - - // Amazon QuickSight supports several ways of managing the identity of users. - // This parameter accepts two values: - // - // * IAM: A user whose identity maps to an existing IAM user or role. - // - // * QUICKSIGHT: A user whose identity is owned and managed internally by - // Amazon QuickSight. - // - // IdentityType is a required field - IdentityType *string `type:"string" required:"true" enum:"IdentityType"` - - // The namespace. Currently, you should set this to default. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - - // You need to use this parameter only when you register one or more users using - // an assumed IAM role. You don't need to provide the session name for other - // scenarios, for example when you are registering an IAM user or an Amazon - // QuickSight user. You can register multiple users using the same IAM role - // if each user has a different session name. For more information on assuming - // IAM roles, see assume-role (https://docs.aws.example.com/cli/latest/reference/sts/assume-role.html) - // in the AWS CLI Reference. - SessionName *string `min:"2" type:"string"` + // The display name of the template alias. + AliasName *string `min:"1" type:"string"` - // The Amazon QuickSight user name that you want to create for the user you - // are registering. - UserName *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the template alias. + Arn *string `type:"string"` - // The Amazon QuickSight role for the user. The user role can be one of the - // following: - // - // * READER: A user who has read-only access to dashboards. - // - // * AUTHOR: A user who can create data sources, datasets, analyses, and - // dashboards. - // - // * ADMIN: A user who is an author, who can also manage Amazon QuickSight - // settings. - // - // * RESTRICTED_READER: This role isn't currently available for use. - // - // * RESTRICTED_AUTHOR: This role isn't currently available for use. - // - // UserRole is a required field - UserRole *string `type:"string" required:"true" enum:"UserRole"` + // The version number of the template alias. + TemplateVersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s RegisterUserInput) String() string { +func (s TemplateAlias) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RegisterUserInput) GoString() string { +func (s TemplateAlias) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RegisterUserInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegisterUserInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.Email == nil { - invalidParams.Add(request.NewErrParamRequired("Email")) - } - if s.IdentityType == nil { - invalidParams.Add(request.NewErrParamRequired("IdentityType")) - } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) - } - if s.SessionName != nil && len(*s.SessionName) < 2 { - invalidParams.Add(request.NewErrParamMinLen("SessionName", 2)) - } - if s.UserName != nil && len(*s.UserName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) - } - if s.UserRole == nil { - invalidParams.Add(request.NewErrParamRequired("UserRole")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *RegisterUserInput) SetAwsAccountId(v string) *RegisterUserInput { - s.AwsAccountId = &v +// SetAliasName sets the AliasName field's value. +func (s *TemplateAlias) SetAliasName(v string) *TemplateAlias { + s.AliasName = &v return s } -// SetEmail sets the Email field's value. -func (s *RegisterUserInput) SetEmail(v string) *RegisterUserInput { - s.Email = &v +// SetArn sets the Arn field's value. +func (s *TemplateAlias) SetArn(v string) *TemplateAlias { + s.Arn = &v return s } -// SetIamArn sets the IamArn field's value. -func (s *RegisterUserInput) SetIamArn(v string) *RegisterUserInput { - s.IamArn = &v +// SetTemplateVersionNumber sets the TemplateVersionNumber field's value. +func (s *TemplateAlias) SetTemplateVersionNumber(v int64) *TemplateAlias { + s.TemplateVersionNumber = &v return s } -// SetIdentityType sets the IdentityType field's value. -func (s *RegisterUserInput) SetIdentityType(v string) *RegisterUserInput { - s.IdentityType = &v - return s +// List of errors that occurred when the template version creation failed. +type TemplateError struct { + _ struct{} `type:"structure"` + + // Description of the error type. + Message *string `type:"string"` + + // Type of error. + Type *string `type:"string" enum:"TemplateErrorType"` } -// SetNamespace sets the Namespace field's value. -func (s *RegisterUserInput) SetNamespace(v string) *RegisterUserInput { - s.Namespace = &v - return s +// String returns the string representation +func (s TemplateError) String() string { + return awsutil.Prettify(s) } -// SetSessionName sets the SessionName field's value. -func (s *RegisterUserInput) SetSessionName(v string) *RegisterUserInput { - s.SessionName = &v - return s +// GoString returns the string representation +func (s TemplateError) GoString() string { + return s.String() } -// SetUserName sets the UserName field's value. -func (s *RegisterUserInput) SetUserName(v string) *RegisterUserInput { - s.UserName = &v +// SetMessage sets the Message field's value. +func (s *TemplateError) SetMessage(v string) *TemplateError { + s.Message = &v return s } -// SetUserRole sets the UserRole field's value. -func (s *RegisterUserInput) SetUserRole(v string) *RegisterUserInput { - s.UserRole = &v +// SetType sets the Type field's value. +func (s *TemplateError) SetType(v string) *TemplateError { + s.Type = &v return s } -type RegisterUserOutput struct { +// The source analysis of the template. +type TemplateSourceAnalysis struct { _ struct{} `type:"structure"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` - - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` - - // The user name. - User *User `type:"structure"` + // The Amazon Resource Name (ARN) of the resource. + // + // Arn is a required field + Arn *string `type:"string" required:"true"` - // The URL the user visits to complete registration and provide a password. - // This is returned only for users with an identity type of QUICKSIGHT. - UserInvitationUrl *string `type:"string"` + // A structure containing information about the dataset references used as placeholders + // in the template. + // + // DataSetReferences is a required field + DataSetReferences []*DataSetReference `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s RegisterUserOutput) String() string { +func (s TemplateSourceAnalysis) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RegisterUserOutput) GoString() string { +func (s TemplateSourceAnalysis) GoString() string { return s.String() } -// SetRequestId sets the RequestId field's value. -func (s *RegisterUserOutput) SetRequestId(v string) *RegisterUserOutput { - s.RequestId = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *TemplateSourceAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TemplateSourceAnalysis"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.DataSetReferences == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetReferences")) + } + if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetReferences", 1)) + } + if s.DataSetReferences != nil { + for i, v := range s.DataSetReferences { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(request.ErrInvalidParams)) + } + } + } -// SetStatus sets the Status field's value. -func (s *RegisterUserOutput) SetStatus(v int64) *RegisterUserOutput { - s.Status = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetUser sets the User field's value. -func (s *RegisterUserOutput) SetUser(v *User) *RegisterUserOutput { - s.User = v +// SetArn sets the Arn field's value. +func (s *TemplateSourceAnalysis) SetArn(v string) *TemplateSourceAnalysis { + s.Arn = &v return s } -// SetUserInvitationUrl sets the UserInvitationUrl field's value. -func (s *RegisterUserOutput) SetUserInvitationUrl(v string) *RegisterUserOutput { - s.UserInvitationUrl = &v +// SetDataSetReferences sets the DataSetReferences field's value. +func (s *TemplateSourceAnalysis) SetDataSetReferences(v []*DataSetReference) *TemplateSourceAnalysis { + s.DataSetReferences = v return s } -// A physical table type for relational data sources. -type RelationalTable struct { +// The source entity of the template. +type TemplateSourceEntity struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the data source. - // - // DataSourceArn is a required field - DataSourceArn *string `type:"string" required:"true"` - - // The column schema of the table. - // - // InputColumns is a required field - InputColumns []*InputColumn `min:"1" type:"list" required:"true"` - - // The name of the relational table. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // The source analysis, if it is based on an analysis. + SourceAnalysis *TemplateSourceAnalysis `type:"structure"` - // The schema name. This name applies to certain relational database engines. - Schema *string `type:"string"` + // The source template, if it is based on an template. + SourceTemplate *TemplateSourceTemplate `type:"structure"` } // String returns the string representation -func (s RelationalTable) String() string { +func (s TemplateSourceEntity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RelationalTable) GoString() string { +func (s TemplateSourceEntity) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RelationalTable) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RelationalTable"} - if s.DataSourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceArn")) - } - if s.InputColumns == nil { - invalidParams.Add(request.NewErrParamRequired("InputColumns")) - } - if s.InputColumns != nil && len(s.InputColumns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InputColumns", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) +func (s *TemplateSourceEntity) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TemplateSourceEntity"} + if s.SourceAnalysis != nil { + if err := s.SourceAnalysis.Validate(); err != nil { + invalidParams.AddNested("SourceAnalysis", err.(request.ErrInvalidParams)) + } } - if s.InputColumns != nil { - for i, v := range s.InputColumns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputColumns", i), err.(request.ErrInvalidParams)) - } + if s.SourceTemplate != nil { + if err := s.SourceTemplate.Validate(); err != nil { + invalidParams.AddNested("SourceTemplate", err.(request.ErrInvalidParams)) } } @@ -18599,69 +28155,43 @@ func (s *RelationalTable) Validate() error { return nil } -// SetDataSourceArn sets the DataSourceArn field's value. -func (s *RelationalTable) SetDataSourceArn(v string) *RelationalTable { - s.DataSourceArn = &v - return s -} - -// SetInputColumns sets the InputColumns field's value. -func (s *RelationalTable) SetInputColumns(v []*InputColumn) *RelationalTable { - s.InputColumns = v - return s -} - -// SetName sets the Name field's value. -func (s *RelationalTable) SetName(v string) *RelationalTable { - s.Name = &v +// SetSourceAnalysis sets the SourceAnalysis field's value. +func (s *TemplateSourceEntity) SetSourceAnalysis(v *TemplateSourceAnalysis) *TemplateSourceEntity { + s.SourceAnalysis = v return s } -// SetSchema sets the Schema field's value. -func (s *RelationalTable) SetSchema(v string) *RelationalTable { - s.Schema = &v +// SetSourceTemplate sets the SourceTemplate field's value. +func (s *TemplateSourceEntity) SetSourceTemplate(v *TemplateSourceTemplate) *TemplateSourceEntity { + s.SourceTemplate = v return s } -// A transform operation that renames a column. -type RenameColumnOperation struct { +// The source template of the template. +type TemplateSourceTemplate struct { _ struct{} `type:"structure"` - // The name of the column to be renamed. - // - // ColumnName is a required field - ColumnName *string `min:"1" type:"string" required:"true"` - - // The new name for the column. + // The Amazon Resource Name (ARN) of the resource. // - // NewColumnName is a required field - NewColumnName *string `min:"1" type:"string" required:"true"` + // Arn is a required field + Arn *string `type:"string" required:"true"` } // String returns the string representation -func (s RenameColumnOperation) String() string { +func (s TemplateSourceTemplate) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RenameColumnOperation) GoString() string { +func (s TemplateSourceTemplate) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RenameColumnOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RenameColumnOperation"} - if s.ColumnName == nil { - invalidParams.Add(request.NewErrParamRequired("ColumnName")) - } - if s.ColumnName != nil && len(*s.ColumnName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) - } - if s.NewColumnName == nil { - invalidParams.Add(request.NewErrParamRequired("NewColumnName")) - } - if s.NewColumnName != nil && len(*s.NewColumnName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NewColumnName", 1)) +func (s *TemplateSourceTemplate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TemplateSourceTemplate"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) } if invalidParams.Len() > 0 { @@ -18670,184 +28200,291 @@ func (s *RenameColumnOperation) Validate() error { return nil } -// SetColumnName sets the ColumnName field's value. -func (s *RenameColumnOperation) SetColumnName(v string) *RenameColumnOperation { - s.ColumnName = &v +// SetArn sets the Arn field's value. +func (s *TemplateSourceTemplate) SetArn(v string) *TemplateSourceTemplate { + s.Arn = &v return s } -// SetNewColumnName sets the NewColumnName field's value. -func (s *RenameColumnOperation) SetNewColumnName(v string) *RenameColumnOperation { - s.NewColumnName = &v +// The template summary. +type TemplateSummary struct { + _ struct{} `type:"structure"` + + // A summary of a template. + Arn *string `type:"string"` + + // The last time that this template was created. + CreatedTime *time.Time `type:"timestamp"` + + // The last time that this template was updated. + LastUpdatedTime *time.Time `type:"timestamp"` + + // A structure containing a list of version numbers for the template summary. + LatestVersionNumber *int64 `min:"1" type:"long"` + + // A display name for the template. + Name *string `min:"1" type:"string"` + + // The ID of the template. This ID is unique per AWS Region for each AWS account. + TemplateId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TemplateSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TemplateSummary) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *TemplateSummary) SetArn(v string) *TemplateSummary { + s.Arn = &v return s } -// The resource specified already exists. -type ResourceExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetCreatedTime sets the CreatedTime field's value. +func (s *TemplateSummary) SetCreatedTime(v time.Time) *TemplateSummary { + s.CreatedTime = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *TemplateSummary) SetLastUpdatedTime(v time.Time) *TemplateSummary { + s.LastUpdatedTime = &v + return s +} - // The AWS request ID for this request. - RequestId *string `type:"string"` +// SetLatestVersionNumber sets the LatestVersionNumber field's value. +func (s *TemplateSummary) SetLatestVersionNumber(v int64) *TemplateSummary { + s.LatestVersionNumber = &v + return s +} - // The AWS request ID for this request. - ResourceType *string `type:"string" enum:"ExceptionResourceType"` +// SetName sets the Name field's value. +func (s *TemplateSummary) SetName(v string) *TemplateSummary { + s.Name = &v + return s +} + +// SetTemplateId sets the TemplateId field's value. +func (s *TemplateSummary) SetTemplateId(v string) *TemplateSummary { + s.TemplateId = &v + return s +} + +// A version of a template. +type TemplateVersion struct { + _ struct{} `type:"structure"` + + // The time that this template version was created. + CreatedTime *time.Time `type:"timestamp"` + + // Schema of the dataset identified by the placeholder. Any dashboard created + // from this template should be bound to new datasets matching the same schema + // described through this API operation. + DataSetConfigurations []*DataSetConfiguration `type:"list"` + + // The description of the template. + Description *string `min:"1" type:"string"` + + // Errors associated with this template version. + Errors []*TemplateError `min:"1" type:"list"` + + // A list of the associated sheets with the unique identifier and name of each + // sheet. + Sheets []*Sheet `type:"list"` + + // The Amazon Resource Name (ARN) of an analysis or template that was used to + // create this template. + SourceEntityArn *string `type:"string"` + + // The HTTP status of the request. + Status *string `type:"string" enum:"ResourceStatus"` + + // The ARN of the theme associated with this version of the template. + ThemeArn *string `type:"string"` + + // The version number of the template version. + VersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s ResourceExistsException) String() string { +func (s TemplateVersion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourceExistsException) GoString() string { +func (s TemplateVersion) GoString() string { return s.String() } -func newErrorResourceExistsException(v protocol.ResponseMetadata) error { - return &ResourceExistsException{ - respMetadata: v, - } +// SetCreatedTime sets the CreatedTime field's value. +func (s *TemplateVersion) SetCreatedTime(v time.Time) *TemplateVersion { + s.CreatedTime = &v + return s } -// Code returns the exception type name. -func (s ResourceExistsException) Code() string { - return "ResourceExistsException" +// SetDataSetConfigurations sets the DataSetConfigurations field's value. +func (s *TemplateVersion) SetDataSetConfigurations(v []*DataSetConfiguration) *TemplateVersion { + s.DataSetConfigurations = v + return s } -// Message returns the exception's message. -func (s ResourceExistsException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetDescription sets the Description field's value. +func (s *TemplateVersion) SetDescription(v string) *TemplateVersion { + s.Description = &v + return s +} + +// SetErrors sets the Errors field's value. +func (s *TemplateVersion) SetErrors(v []*TemplateError) *TemplateVersion { + s.Errors = v + return s +} + +// SetSheets sets the Sheets field's value. +func (s *TemplateVersion) SetSheets(v []*Sheet) *TemplateVersion { + s.Sheets = v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceExistsException) OrigErr() error { - return nil +// SetSourceEntityArn sets the SourceEntityArn field's value. +func (s *TemplateVersion) SetSourceEntityArn(v string) *TemplateVersion { + s.SourceEntityArn = &v + return s } -func (s ResourceExistsException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetStatus sets the Status field's value. +func (s *TemplateVersion) SetStatus(v string) *TemplateVersion { + s.Status = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ResourceExistsException) StatusCode() int { - return s.respMetadata.StatusCode +// SetThemeArn sets the ThemeArn field's value. +func (s *TemplateVersion) SetThemeArn(v string) *TemplateVersion { + s.ThemeArn = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s ResourceExistsException) RequestID() string { - return s.respMetadata.RequestID +// SetVersionNumber sets the VersionNumber field's value. +func (s *TemplateVersion) SetVersionNumber(v int64) *TemplateVersion { + s.VersionNumber = &v + return s } -// One or more resources can't be found. -type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// The template version. +type TemplateVersionSummary struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" type:"string"` + // The Amazon Resource Name (ARN) of the template version. + Arn *string `type:"string"` - // The AWS request ID for this request. - RequestId *string `type:"string"` + // The time that this template version was created. + CreatedTime *time.Time `type:"timestamp"` - // The AWS request ID for this request. - ResourceType *string `type:"string" enum:"ExceptionResourceType"` + // The description of the template version. + Description *string `min:"1" type:"string"` + + // The status of the template version. + Status *string `type:"string" enum:"ResourceStatus"` + + // The version number of the template version. + VersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s ResourceNotFoundException) String() string { +func (s TemplateVersionSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourceNotFoundException) GoString() string { +func (s TemplateVersionSummary) GoString() string { return s.String() } -func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { - return &ResourceNotFoundException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { - return "ResourceNotFoundException" -} - -// Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetArn sets the Arn field's value. +func (s *TemplateVersionSummary) SetArn(v string) *TemplateVersionSummary { + s.Arn = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { - return nil +// SetCreatedTime sets the CreatedTime field's value. +func (s *TemplateVersionSummary) SetCreatedTime(v time.Time) *TemplateVersionSummary { + s.CreatedTime = &v + return s } -func (s ResourceNotFoundException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetDescription sets the Description field's value. +func (s *TemplateVersionSummary) SetDescription(v string) *TemplateVersionSummary { + s.Description = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +// SetStatus sets the Status field's value. +func (s *TemplateVersionSummary) SetStatus(v string) *TemplateVersionSummary { + s.Status = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +// SetVersionNumber sets the VersionNumber field's value. +func (s *TemplateVersionSummary) SetVersionNumber(v int64) *TemplateVersionSummary { + s.VersionNumber = &v + return s } -// Permission for the resource. -type ResourcePermission struct { +// Teradata parameters. +type TeradataParameters struct { _ struct{} `type:"structure"` - // The action to grant or revoke permissions on, for example "quicksight:DescribeDashboard". + // Database. // - // Actions is a required field - Actions []*string `min:"1" type:"list" required:"true"` + // Database is a required field + Database *string `min:"1" type:"string" required:"true"` - // The Amazon Resource Name (ARN) of an Amazon QuickSight user or group, or - // an IAM ARN. If you are using cross-account resource sharing, this is the - // IAM ARN of an account root. Otherwise, it is the ARN of a QuickSight user - // or group. . + // Host. // - // Principal is a required field - Principal *string `min:"1" type:"string" required:"true"` + // Host is a required field + Host *string `min:"1" type:"string" required:"true"` + + // Port. + // + // Port is a required field + Port *int64 `min:"1" type:"integer" required:"true"` } // String returns the string representation -func (s ResourcePermission) String() string { +func (s TeradataParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ResourcePermission) GoString() string { +func (s TeradataParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ResourcePermission) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ResourcePermission"} - if s.Actions == nil { - invalidParams.Add(request.NewErrParamRequired("Actions")) +func (s *TeradataParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TeradataParameters"} + if s.Database == nil { + invalidParams.Add(request.NewErrParamRequired("Database")) } - if s.Actions != nil && len(s.Actions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Actions", 1)) + if s.Database != nil && len(*s.Database) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Database", 1)) } - if s.Principal == nil { - invalidParams.Add(request.NewErrParamRequired("Principal")) + if s.Host == nil { + invalidParams.Add(request.NewErrParamRequired("Host")) } - if s.Principal != nil && len(*s.Principal) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Principal", 1)) + if s.Host != nil && len(*s.Host) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + } + if s.Port == nil { + invalidParams.Add(request.NewErrParamRequired("Port")) + } + if s.Port != nil && *s.Port < 1 { + invalidParams.Add(request.NewErrParamMinValue("Port", 1)) } if invalidParams.Len() > 0 { @@ -18856,478 +28493,443 @@ func (s *ResourcePermission) Validate() error { return nil } -// SetActions sets the Actions field's value. -func (s *ResourcePermission) SetActions(v []*string) *ResourcePermission { - s.Actions = v +// SetDatabase sets the Database field's value. +func (s *TeradataParameters) SetDatabase(v string) *TeradataParameters { + s.Database = &v return s } -// SetPrincipal sets the Principal field's value. -func (s *ResourcePermission) SetPrincipal(v string) *ResourcePermission { - s.Principal = &v +// SetHost sets the Host field's value. +func (s *TeradataParameters) SetHost(v string) *TeradataParameters { + s.Host = &v return s } -// This resource is currently unavailable. -type ResourceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetPort sets the Port field's value. +func (s *TeradataParameters) SetPort(v int64) *TeradataParameters { + s.Port = &v + return s +} - Message_ *string `locationName:"Message" type:"string"` +// Summary information about a theme. +type Theme struct { + _ struct{} `type:"structure"` - // The AWS request ID for this request. - RequestId *string `type:"string"` + // The Amazon Resource Name (ARN) of the theme. + Arn *string `type:"string"` - // The resource type for this request. - ResourceType *string `type:"string" enum:"ExceptionResourceType"` -} + // The date and time that the theme was created. + CreatedTime *time.Time `type:"timestamp"` -// String returns the string representation -func (s ResourceUnavailableException) String() string { - return awsutil.Prettify(s) -} + // The date and time that the theme was last updated. + LastUpdatedTime *time.Time `type:"timestamp"` -// GoString returns the string representation -func (s ResourceUnavailableException) GoString() string { - return s.String() -} + // The name that the user gives to the theme. + Name *string `min:"1" type:"string"` -func newErrorResourceUnavailableException(v protocol.ResponseMetadata) error { - return &ResourceUnavailableException{ - respMetadata: v, - } -} + // The identifier that the user gives to the theme. + ThemeId *string `min:"1" type:"string"` -// Code returns the exception type name. -func (s ResourceUnavailableException) Code() string { - return "ResourceUnavailableException" -} + // The type of theme, based on how it was created. Valid values include: QUICKSIGHT + // and CUSTOM. + Type *string `type:"string" enum:"ThemeType"` -// Message returns the exception's message. -func (s ResourceUnavailableException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" + // A version of a theme. + Version *ThemeVersion `type:"structure"` } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceUnavailableException) OrigErr() error { - return nil +// String returns the string representation +func (s Theme) String() string { + return awsutil.Prettify(s) } -func (s ResourceUnavailableException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// GoString returns the string representation +func (s Theme) GoString() string { + return s.String() } -// Status code returns the HTTP status code for the request's response error. -func (s ResourceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +// SetArn sets the Arn field's value. +func (s *Theme) SetArn(v string) *Theme { + s.Arn = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s ResourceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +// SetCreatedTime sets the CreatedTime field's value. +func (s *Theme) SetCreatedTime(v time.Time) *Theme { + s.CreatedTime = &v + return s } -// Information about rows for a data set SPICE ingestion. -type RowInfo struct { - _ struct{} `type:"structure"` - - // The number of rows that were not ingested. - RowsDropped *int64 `type:"long"` - - // The number of rows that were ingested. - RowsIngested *int64 `type:"long"` +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *Theme) SetLastUpdatedTime(v time.Time) *Theme { + s.LastUpdatedTime = &v + return s } -// String returns the string representation -func (s RowInfo) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *Theme) SetName(v string) *Theme { + s.Name = &v + return s } -// GoString returns the string representation -func (s RowInfo) GoString() string { - return s.String() +// SetThemeId sets the ThemeId field's value. +func (s *Theme) SetThemeId(v string) *Theme { + s.ThemeId = &v + return s } -// SetRowsDropped sets the RowsDropped field's value. -func (s *RowInfo) SetRowsDropped(v int64) *RowInfo { - s.RowsDropped = &v +// SetType sets the Type field's value. +func (s *Theme) SetType(v string) *Theme { + s.Type = &v return s } -// SetRowsIngested sets the RowsIngested field's value. -func (s *RowInfo) SetRowsIngested(v int64) *RowInfo { - s.RowsIngested = &v +// SetVersion sets the Version field's value. +func (s *Theme) SetVersion(v *ThemeVersion) *Theme { + s.Version = v return s } -// The row-level security configuration for the dataset. -type RowLevelPermissionDataSet struct { +// An alias for a theme. +type ThemeAlias struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the permission dataset. - // - // Arn is a required field - Arn *string `type:"string" required:"true"` + // The display name of the theme alias. + AliasName *string `min:"1" type:"string"` - // Permission policy. - // - // PermissionPolicy is a required field - PermissionPolicy *string `type:"string" required:"true" enum:"RowLevelPermissionPolicy"` + // The Amazon Resource Name (ARN) of the theme alias. + Arn *string `type:"string"` + + // The version number of the theme alias. + ThemeVersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s RowLevelPermissionDataSet) String() string { +func (s ThemeAlias) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RowLevelPermissionDataSet) GoString() string { +func (s ThemeAlias) GoString() string { return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RowLevelPermissionDataSet) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RowLevelPermissionDataSet"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) - } - if s.PermissionPolicy == nil { - invalidParams.Add(request.NewErrParamRequired("PermissionPolicy")) - } +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetAliasName sets the AliasName field's value. +func (s *ThemeAlias) SetAliasName(v string) *ThemeAlias { + s.AliasName = &v + return s } // SetArn sets the Arn field's value. -func (s *RowLevelPermissionDataSet) SetArn(v string) *RowLevelPermissionDataSet { +func (s *ThemeAlias) SetArn(v string) *ThemeAlias { s.Arn = &v return s } -// SetPermissionPolicy sets the PermissionPolicy field's value. -func (s *RowLevelPermissionDataSet) SetPermissionPolicy(v string) *RowLevelPermissionDataSet { - s.PermissionPolicy = &v +// SetThemeVersionNumber sets the ThemeVersionNumber field's value. +func (s *ThemeAlias) SetThemeVersionNumber(v int64) *ThemeAlias { + s.ThemeVersionNumber = &v return s } -// S3 parameters. -type S3Parameters struct { +// The theme configuration. This configuration contains all of the display properties +// for a theme. +type ThemeConfiguration struct { _ struct{} `type:"structure"` - // Location of the Amazon S3 manifest file. This is NULL if the manifest file - // was uploaded in the console. - // - // ManifestFileLocation is a required field - ManifestFileLocation *ManifestFileLocation `type:"structure" required:"true"` + // Color properties that apply to chart data colors. + DataColorPalette *DataColorPalette `type:"structure"` + + // Display options related to sheets. + Sheet *SheetStyle `type:"structure"` + + // Color properties that apply to the UI and to charts, excluding the colors + // that apply to data. + UIColorPalette *UIColorPalette `type:"structure"` } // String returns the string representation -func (s S3Parameters) String() string { +func (s ThemeConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s S3Parameters) GoString() string { +func (s ThemeConfiguration) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3Parameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3Parameters"} - if s.ManifestFileLocation == nil { - invalidParams.Add(request.NewErrParamRequired("ManifestFileLocation")) - } - if s.ManifestFileLocation != nil { - if err := s.ManifestFileLocation.Validate(); err != nil { - invalidParams.AddNested("ManifestFileLocation", err.(request.ErrInvalidParams)) - } - } +// SetDataColorPalette sets the DataColorPalette field's value. +func (s *ThemeConfiguration) SetDataColorPalette(v *DataColorPalette) *ThemeConfiguration { + s.DataColorPalette = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSheet sets the Sheet field's value. +func (s *ThemeConfiguration) SetSheet(v *SheetStyle) *ThemeConfiguration { + s.Sheet = v + return s } -// SetManifestFileLocation sets the ManifestFileLocation field's value. -func (s *S3Parameters) SetManifestFileLocation(v *ManifestFileLocation) *S3Parameters { - s.ManifestFileLocation = v +// SetUIColorPalette sets the UIColorPalette field's value. +func (s *ThemeConfiguration) SetUIColorPalette(v *UIColorPalette) *ThemeConfiguration { + s.UIColorPalette = v return s } -// A physical table type for as S3 data source. -type S3Source struct { +// Theme error. +type ThemeError struct { _ struct{} `type:"structure"` - // The amazon Resource Name (ARN) for the data source. - // - // DataSourceArn is a required field - DataSourceArn *string `type:"string" required:"true"` - - // A physical table type for as S3 data source. - // - // InputColumns is a required field - InputColumns []*InputColumn `min:"1" type:"list" required:"true"` + // The error message. + Message *string `type:"string"` - // Information about the format for the S3 source file or files. - UploadSettings *UploadSettings `type:"structure"` + // The type of error. + Type *string `type:"string" enum:"ThemeErrorType"` } // String returns the string representation -func (s S3Source) String() string { +func (s ThemeError) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s S3Source) GoString() string { +func (s ThemeError) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *S3Source) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "S3Source"} - if s.DataSourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceArn")) - } - if s.InputColumns == nil { - invalidParams.Add(request.NewErrParamRequired("InputColumns")) - } - if s.InputColumns != nil && len(s.InputColumns) < 1 { - invalidParams.Add(request.NewErrParamMinLen("InputColumns", 1)) - } - if s.InputColumns != nil { - for i, v := range s.InputColumns { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InputColumns", i), err.(request.ErrInvalidParams)) - } - } - } - if s.UploadSettings != nil { - if err := s.UploadSettings.Validate(); err != nil { - invalidParams.AddNested("UploadSettings", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDataSourceArn sets the DataSourceArn field's value. -func (s *S3Source) SetDataSourceArn(v string) *S3Source { - s.DataSourceArn = &v - return s -} - -// SetInputColumns sets the InputColumns field's value. -func (s *S3Source) SetInputColumns(v []*InputColumn) *S3Source { - s.InputColumns = v +// SetMessage sets the Message field's value. +func (s *ThemeError) SetMessage(v string) *ThemeError { + s.Message = &v return s } -// SetUploadSettings sets the UploadSettings field's value. -func (s *S3Source) SetUploadSettings(v *UploadSettings) *S3Source { - s.UploadSettings = v +// SetType sets the Type field's value. +func (s *ThemeError) SetType(v string) *ThemeError { + s.Type = &v return s } -type SearchDashboardsInput struct { +// The theme summary. +type ThemeSummary struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the user whose dashboards you're - // searching for. - // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` - // The filters to apply to the search. Currently, you can search only by user - // name. For example, "Filters": [ { "Name": "QUICKSIGHT_USER", "Operator": - // "StringEquals", "Value": "arn:aws:quicksight:us-east-1:1:user/default/UserName1" - // } ] - // - // Filters is a required field - Filters []*DashboardSearchFilter `type:"list" required:"true"` + // The date and time that this theme was created. + CreatedTime *time.Time `type:"timestamp"` - // The maximum number of results to be returned per request. - MaxResults *int64 `min:"1" type:"integer"` + // The last date and time that this theme was updated. + LastUpdatedTime *time.Time `type:"timestamp"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + // The latest version number for the theme. + LatestVersionNumber *int64 `min:"1" type:"long"` + + // the display name for the theme. + Name *string `min:"1" type:"string"` + + // The ID of the theme. This ID is unique per AWS Region for each AWS account. + ThemeId *string `min:"1" type:"string"` } // String returns the string representation -func (s SearchDashboardsInput) String() string { +func (s ThemeSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SearchDashboardsInput) GoString() string { +func (s ThemeSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *SearchDashboardsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SearchDashboardsInput"} - if s.AwsAccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) - } - if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { - invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) - } - if s.Filters == nil { - invalidParams.Add(request.NewErrParamRequired("Filters")) - } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.Filters != nil { - for i, v := range s.Filters { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) - } - } - } +// SetArn sets the Arn field's value. +func (s *ThemeSummary) SetArn(v string) *ThemeSummary { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreatedTime sets the CreatedTime field's value. +func (s *ThemeSummary) SetCreatedTime(v time.Time) *ThemeSummary { + s.CreatedTime = &v + return s } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *SearchDashboardsInput) SetAwsAccountId(v string) *SearchDashboardsInput { - s.AwsAccountId = &v +// SetLastUpdatedTime sets the LastUpdatedTime field's value. +func (s *ThemeSummary) SetLastUpdatedTime(v time.Time) *ThemeSummary { + s.LastUpdatedTime = &v return s } -// SetFilters sets the Filters field's value. -func (s *SearchDashboardsInput) SetFilters(v []*DashboardSearchFilter) *SearchDashboardsInput { - s.Filters = v +// SetLatestVersionNumber sets the LatestVersionNumber field's value. +func (s *ThemeSummary) SetLatestVersionNumber(v int64) *ThemeSummary { + s.LatestVersionNumber = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *SearchDashboardsInput) SetMaxResults(v int64) *SearchDashboardsInput { - s.MaxResults = &v +// SetName sets the Name field's value. +func (s *ThemeSummary) SetName(v string) *ThemeSummary { + s.Name = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *SearchDashboardsInput) SetNextToken(v string) *SearchDashboardsInput { - s.NextToken = &v +// SetThemeId sets the ThemeId field's value. +func (s *ThemeSummary) SetThemeId(v string) *ThemeSummary { + s.ThemeId = &v return s } -type SearchDashboardsOutput struct { +// A version of a theme. +type ThemeVersion struct { _ struct{} `type:"structure"` - // The list of dashboards owned by the user specified in Filters in your request. - DashboardSummaryList []*DashboardSummary `type:"list"` + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` - // The token for the next set of results, or null if there are no more results. - NextToken *string `type:"string"` + // The Amazon QuickSight-defined ID of the theme that a custom theme inherits + // from. All themes initially inherit from a default QuickSight theme. + BaseThemeId *string `min:"1" type:"string"` - // The AWS request ID for this operation. - RequestId *string `type:"string"` + // The theme configuration, which contains all the theme display properties. + Configuration *ThemeConfiguration `type:"structure"` - // The HTTP status of the request. - Status *int64 `location:"statusCode" type:"integer"` + // The date and time that this theme version was created. + CreatedTime *time.Time `type:"timestamp"` + + // The description of the theme. + Description *string `min:"1" type:"string"` + + // Errors associated with the theme. + Errors []*ThemeError `min:"1" type:"list"` + + // The status of the theme version. + Status *string `type:"string" enum:"ResourceStatus"` + + // The version number of the theme. + VersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s SearchDashboardsOutput) String() string { +func (s ThemeVersion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SearchDashboardsOutput) GoString() string { +func (s ThemeVersion) GoString() string { return s.String() } -// SetDashboardSummaryList sets the DashboardSummaryList field's value. -func (s *SearchDashboardsOutput) SetDashboardSummaryList(v []*DashboardSummary) *SearchDashboardsOutput { - s.DashboardSummaryList = v +// SetArn sets the Arn field's value. +func (s *ThemeVersion) SetArn(v string) *ThemeVersion { + s.Arn = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *SearchDashboardsOutput) SetNextToken(v string) *SearchDashboardsOutput { - s.NextToken = &v +// SetBaseThemeId sets the BaseThemeId field's value. +func (s *ThemeVersion) SetBaseThemeId(v string) *ThemeVersion { + s.BaseThemeId = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *SearchDashboardsOutput) SetRequestId(v string) *SearchDashboardsOutput { - s.RequestId = &v +// SetConfiguration sets the Configuration field's value. +func (s *ThemeVersion) SetConfiguration(v *ThemeConfiguration) *ThemeVersion { + s.Configuration = v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *ThemeVersion) SetCreatedTime(v time.Time) *ThemeVersion { + s.CreatedTime = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ThemeVersion) SetDescription(v string) *ThemeVersion { + s.Description = &v + return s +} + +// SetErrors sets the Errors field's value. +func (s *ThemeVersion) SetErrors(v []*ThemeError) *ThemeVersion { + s.Errors = v return s } // SetStatus sets the Status field's value. -func (s *SearchDashboardsOutput) SetStatus(v int64) *SearchDashboardsOutput { +func (s *ThemeVersion) SetStatus(v string) *ThemeVersion { s.Status = &v return s } -// ServiceNow parameters. -type ServiceNowParameters struct { +// SetVersionNumber sets the VersionNumber field's value. +func (s *ThemeVersion) SetVersionNumber(v int64) *ThemeVersion { + s.VersionNumber = &v + return s +} + +// The theme version. +type ThemeVersionSummary struct { _ struct{} `type:"structure"` - // URL of the base site. - // - // SiteBaseUrl is a required field - SiteBaseUrl *string `min:"1" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the theme version. + Arn *string `type:"string"` + + // The date and time that this theme version was created. + CreatedTime *time.Time `type:"timestamp"` + + // The description of the theme version. + Description *string `min:"1" type:"string"` + + // The status of the theme version. + Status *string `type:"string" enum:"ResourceStatus"` + + // The version number of the theme version. + VersionNumber *int64 `min:"1" type:"long"` } // String returns the string representation -func (s ServiceNowParameters) String() string { +func (s ThemeVersionSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ServiceNowParameters) GoString() string { +func (s ThemeVersionSummary) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ServiceNowParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ServiceNowParameters"} - if s.SiteBaseUrl == nil { - invalidParams.Add(request.NewErrParamRequired("SiteBaseUrl")) - } - if s.SiteBaseUrl != nil && len(*s.SiteBaseUrl) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SiteBaseUrl", 1)) - } +// SetArn sets the Arn field's value. +func (s *ThemeVersionSummary) SetArn(v string) *ThemeVersionSummary { + s.Arn = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreatedTime sets the CreatedTime field's value. +func (s *ThemeVersionSummary) SetCreatedTime(v time.Time) *ThemeVersionSummary { + s.CreatedTime = &v + return s } -// SetSiteBaseUrl sets the SiteBaseUrl field's value. -func (s *ServiceNowParameters) SetSiteBaseUrl(v string) *ServiceNowParameters { - s.SiteBaseUrl = &v +// SetDescription sets the Description field's value. +func (s *ThemeVersionSummary) SetDescription(v string) *ThemeVersionSummary { + s.Description = &v return s } -// The number of minutes specified for the lifetime of a session isn't valid. -// The session lifetime must be 15-600 minutes. -type SessionLifetimeInMinutesInvalidException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetStatus sets the Status field's value. +func (s *ThemeVersionSummary) SetStatus(v string) *ThemeVersionSummary { + s.Status = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *ThemeVersionSummary) SetVersionNumber(v int64) *ThemeVersionSummary { + s.VersionNumber = &v + return s +} + +// Access is throttled. +type ThrottlingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -19336,28 +28938,28 @@ type SessionLifetimeInMinutesInvalidException struct { } // String returns the string representation -func (s SessionLifetimeInMinutesInvalidException) String() string { +func (s ThrottlingException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SessionLifetimeInMinutesInvalidException) GoString() string { +func (s ThrottlingException) GoString() string { return s.String() } -func newErrorSessionLifetimeInMinutesInvalidException(v protocol.ResponseMetadata) error { - return &SessionLifetimeInMinutesInvalidException{ - respMetadata: v, +func newErrorThrottlingException(v protocol.ResponseMetadata) error { + return &ThrottlingException{ + RespMetadata: v, } } // Code returns the exception type name. -func (s SessionLifetimeInMinutesInvalidException) Code() string { - return "SessionLifetimeInMinutesInvalidException" +func (s *ThrottlingException) Code() string { + return "ThrottlingException" } // Message returns the exception's message. -func (s SessionLifetimeInMinutesInvalidException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19365,160 +28967,149 @@ func (s SessionLifetimeInMinutesInvalidException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SessionLifetimeInMinutesInvalidException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s SessionLifetimeInMinutesInvalidException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s SessionLifetimeInMinutesInvalidException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SessionLifetimeInMinutesInvalidException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } -// Sheet controls option. -type SheetControlsOption struct { +// The display options for the layout of tiles on a sheet. +type TileLayoutStyle struct { _ struct{} `type:"structure"` - // Visibility state. - VisibilityState *string `type:"string" enum:"DashboardUIState"` + // The gutter settings that apply between tiles. + Gutter *GutterStyle `type:"structure"` + + // The margin settings that apply around the outside edge of sheets. + Margin *MarginStyle `type:"structure"` } // String returns the string representation -func (s SheetControlsOption) String() string { +func (s TileLayoutStyle) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SheetControlsOption) GoString() string { +func (s TileLayoutStyle) GoString() string { return s.String() } -// SetVisibilityState sets the VisibilityState field's value. -func (s *SheetControlsOption) SetVisibilityState(v string) *SheetControlsOption { - s.VisibilityState = &v +// SetGutter sets the Gutter field's value. +func (s *TileLayoutStyle) SetGutter(v *GutterStyle) *TileLayoutStyle { + s.Gutter = v return s } -// Snowflake parameters. -type SnowflakeParameters struct { - _ struct{} `type:"structure"` - - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` +// SetMargin sets the Margin field's value. +func (s *TileLayoutStyle) SetMargin(v *MarginStyle) *TileLayoutStyle { + s.Margin = v + return s +} - // Host. - // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` +// Display options related to tiles on a sheet. +type TileStyle struct { + _ struct{} `type:"structure"` - // Warehouse. - // - // Warehouse is a required field - Warehouse *string `type:"string" required:"true"` + // The border around a tile. + Border *BorderStyle `type:"structure"` } // String returns the string representation -func (s SnowflakeParameters) String() string { +func (s TileStyle) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SnowflakeParameters) GoString() string { +func (s TileStyle) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *SnowflakeParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SnowflakeParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) - } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) - } - if s.Warehouse == nil { - invalidParams.Add(request.NewErrParamRequired("Warehouse")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDatabase sets the Database field's value. -func (s *SnowflakeParameters) SetDatabase(v string) *SnowflakeParameters { - s.Database = &v +// SetBorder sets the Border field's value. +func (s *TileStyle) SetBorder(v *BorderStyle) *TileStyle { + s.Border = v return s } -// SetHost sets the Host field's value. -func (s *SnowflakeParameters) SetHost(v string) *SnowflakeParameters { - s.Host = &v - return s -} +// A data transformation on a logical table. This is a variant type structure. +// For this structure to be valid, only one of the attributes can be non-null. +type TransformOperation struct { + _ struct{} `type:"structure"` -// SetWarehouse sets the Warehouse field's value. -func (s *SnowflakeParameters) SetWarehouse(v string) *SnowflakeParameters { - s.Warehouse = &v - return s -} + // A transform operation that casts a column to a different type. + CastColumnTypeOperation *CastColumnTypeOperation `type:"structure"` -// Spark parameters. -type SparkParameters struct { - _ struct{} `type:"structure"` + // An operation that creates calculated columns. Columns created in one such + // operation form a lexical closure. + CreateColumnsOperation *CreateColumnsOperation `type:"structure"` - // Host. - // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // An operation that filters rows based on some condition. + FilterOperation *FilterOperation `type:"structure"` - // Port. - // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // An operation that projects columns. Operations that come after a projection + // can only refer to projected columns. + ProjectOperation *ProjectOperation `type:"structure"` + + // An operation that renames a column. + RenameColumnOperation *RenameColumnOperation `type:"structure"` + + // An operation that tags a column with additional information. + TagColumnOperation *TagColumnOperation `type:"structure"` } // String returns the string representation -func (s SparkParameters) String() string { +func (s TransformOperation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SparkParameters) GoString() string { +func (s TransformOperation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SparkParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SparkParameters"} - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) +func (s *TransformOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransformOperation"} + if s.CastColumnTypeOperation != nil { + if err := s.CastColumnTypeOperation.Validate(); err != nil { + invalidParams.AddNested("CastColumnTypeOperation", err.(request.ErrInvalidParams)) + } } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + if s.CreateColumnsOperation != nil { + if err := s.CreateColumnsOperation.Validate(); err != nil { + invalidParams.AddNested("CreateColumnsOperation", err.(request.ErrInvalidParams)) + } } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) + if s.FilterOperation != nil { + if err := s.FilterOperation.Validate(); err != nil { + invalidParams.AddNested("FilterOperation", err.(request.ErrInvalidParams)) + } } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + if s.ProjectOperation != nil { + if err := s.ProjectOperation.Validate(); err != nil { + invalidParams.AddNested("ProjectOperation", err.(request.ErrInvalidParams)) + } + } + if s.RenameColumnOperation != nil { + if err := s.RenameColumnOperation.Validate(); err != nil { + invalidParams.AddNested("RenameColumnOperation", err.(request.ErrInvalidParams)) + } + } + if s.TagColumnOperation != nil { + if err := s.TagColumnOperation.Validate(); err != nil { + invalidParams.AddNested("TagColumnOperation", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -19527,68 +29118,81 @@ func (s *SparkParameters) Validate() error { return nil } -// SetHost sets the Host field's value. -func (s *SparkParameters) SetHost(v string) *SparkParameters { - s.Host = &v +// SetCastColumnTypeOperation sets the CastColumnTypeOperation field's value. +func (s *TransformOperation) SetCastColumnTypeOperation(v *CastColumnTypeOperation) *TransformOperation { + s.CastColumnTypeOperation = v return s } -// SetPort sets the Port field's value. -func (s *SparkParameters) SetPort(v int64) *SparkParameters { - s.Port = &v +// SetCreateColumnsOperation sets the CreateColumnsOperation field's value. +func (s *TransformOperation) SetCreateColumnsOperation(v *CreateColumnsOperation) *TransformOperation { + s.CreateColumnsOperation = v return s } -// SQL Server parameters. -type SqlServerParameters struct { - _ struct{} `type:"structure"` +// SetFilterOperation sets the FilterOperation field's value. +func (s *TransformOperation) SetFilterOperation(v *FilterOperation) *TransformOperation { + s.FilterOperation = v + return s +} - // Database. - // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` +// SetProjectOperation sets the ProjectOperation field's value. +func (s *TransformOperation) SetProjectOperation(v *ProjectOperation) *TransformOperation { + s.ProjectOperation = v + return s +} - // Host. +// SetRenameColumnOperation sets the RenameColumnOperation field's value. +func (s *TransformOperation) SetRenameColumnOperation(v *RenameColumnOperation) *TransformOperation { + s.RenameColumnOperation = v + return s +} + +// SetTagColumnOperation sets the TagColumnOperation field's value. +func (s *TransformOperation) SetTagColumnOperation(v *TagColumnOperation) *TransformOperation { + s.TagColumnOperation = v + return s +} + +// Twitter parameters. +type TwitterParameters struct { + _ struct{} `type:"structure"` + + // Maximum number of rows to query Twitter. // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // MaxRows is a required field + MaxRows *int64 `min:"1" type:"integer" required:"true"` - // Port. + // Twitter query string. // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // Query is a required field + Query *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s SqlServerParameters) String() string { +func (s TwitterParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SqlServerParameters) GoString() string { +func (s TwitterParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *SqlServerParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "SqlServerParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) - } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) - } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) +func (s *TwitterParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TwitterParameters"} + if s.MaxRows == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRows")) } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + if s.MaxRows != nil && *s.MaxRows < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxRows", 1)) } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) + if s.Query == nil { + invalidParams.Add(request.NewErrParamRequired("Query")) } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + if s.Query != nil && len(*s.Query) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Query", 1)) } if invalidParams.Len() > 0 { @@ -19597,272 +29201,292 @@ func (s *SqlServerParameters) Validate() error { return nil } -// SetDatabase sets the Database field's value. -func (s *SqlServerParameters) SetDatabase(v string) *SqlServerParameters { - s.Database = &v - return s -} - -// SetHost sets the Host field's value. -func (s *SqlServerParameters) SetHost(v string) *SqlServerParameters { - s.Host = &v +// SetMaxRows sets the MaxRows field's value. +func (s *TwitterParameters) SetMaxRows(v int64) *TwitterParameters { + s.MaxRows = &v return s } -// SetPort sets the Port field's value. -func (s *SqlServerParameters) SetPort(v int64) *SqlServerParameters { - s.Port = &v +// SetQuery sets the Query field's value. +func (s *TwitterParameters) SetQuery(v string) *TwitterParameters { + s.Query = &v return s } -// Secure Socket Layer (SSL) properties that apply when QuickSight connects -// to your underlying data source. -type SslProperties struct { +// The theme colors that apply to UI and to charts, excluding data colors. The +// colors description is a hexidecimal color code that consists of six alphanumerical +// characters, prefixed with #, for example #37BFF5. For more information, see +// Using Themes in Amazon QuickSight (https://docs.aws.amazon.com/quicksight/latest/user/themes-in-quicksight.html) +// in the Amazon QuickSight User Guide. +type UIColorPalette struct { _ struct{} `type:"structure"` - // A Boolean option to control whether SSL should be disabled. - DisableSsl *bool `type:"boolean"` + // This color is that applies to selected states and buttons. + Accent *string `type:"string"` + + // The foreground color that applies to any text or other elements that appear + // over the accent color. + AccentForeground *string `type:"string"` + + // The color that applies to error messages. + Danger *string `type:"string"` + + // The foreground color that applies to any text or other elements that appear + // over the error color. + DangerForeground *string `type:"string"` + + // The color that applies to the names of fields that are identified as dimensions. + Dimension *string `type:"string"` + + // The foreground color that applies to any text or other elements that appear + // over the dimension color. + DimensionForeground *string `type:"string"` + + // The color that applies to the names of fields that are identified as measures. + Measure *string `type:"string"` + + // The foreground color that applies to any text or other elements that appear + // over the measure color. + MeasureForeground *string `type:"string"` + + // The background color that applies to visuals and other high emphasis UI. + PrimaryBackground *string `type:"string"` + + // The color of text and other foreground elements that appear over the primary + // background regions, such as grid lines, borders, table banding, icons, and + // so on. + PrimaryForeground *string `type:"string"` + + // The background color that applies to the sheet background and sheet controls. + SecondaryBackground *string `type:"string"` + + // The foreground color that applies to any sheet title, sheet control text, + // or UI that appears over the secondary background. + SecondaryForeground *string `type:"string"` + + // The color that applies to success messages, for example the check mark for + // a successful download. + Success *string `type:"string"` + + // The foreground color that applies to any text or other elements that appear + // over the success color. + SuccessForeground *string `type:"string"` + + // This color that applies to warning and informational messages. + Warning *string `type:"string"` + + // The foreground color that applies to any text or other elements that appear + // over the warning color. + WarningForeground *string `type:"string"` } // String returns the string representation -func (s SslProperties) String() string { +func (s UIColorPalette) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s SslProperties) GoString() string { +func (s UIColorPalette) GoString() string { return s.String() } -// SetDisableSsl sets the DisableSsl field's value. -func (s *SslProperties) SetDisableSsl(v bool) *SslProperties { - s.DisableSsl = &v +// SetAccent sets the Accent field's value. +func (s *UIColorPalette) SetAccent(v string) *UIColorPalette { + s.Accent = &v return s } -// String parameter. -type StringParameter struct { - _ struct{} `type:"structure"` - - // A display name for the dataset. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // Values. - // - // Values is a required field - Values []*string `type:"list" required:"true"` +// SetAccentForeground sets the AccentForeground field's value. +func (s *UIColorPalette) SetAccentForeground(v string) *UIColorPalette { + s.AccentForeground = &v + return s } -// String returns the string representation -func (s StringParameter) String() string { - return awsutil.Prettify(s) +// SetDanger sets the Danger field's value. +func (s *UIColorPalette) SetDanger(v string) *UIColorPalette { + s.Danger = &v + return s } -// GoString returns the string representation -func (s StringParameter) GoString() string { - return s.String() +// SetDangerForeground sets the DangerForeground field's value. +func (s *UIColorPalette) SetDangerForeground(v string) *UIColorPalette { + s.DangerForeground = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *StringParameter) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StringParameter"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Values == nil { - invalidParams.Add(request.NewErrParamRequired("Values")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDimension sets the Dimension field's value. +func (s *UIColorPalette) SetDimension(v string) *UIColorPalette { + s.Dimension = &v + return s } -// SetName sets the Name field's value. -func (s *StringParameter) SetName(v string) *StringParameter { - s.Name = &v +// SetDimensionForeground sets the DimensionForeground field's value. +func (s *UIColorPalette) SetDimensionForeground(v string) *UIColorPalette { + s.DimensionForeground = &v return s } -// SetValues sets the Values field's value. -func (s *StringParameter) SetValues(v []*string) *StringParameter { - s.Values = v +// SetMeasure sets the Measure field's value. +func (s *UIColorPalette) SetMeasure(v string) *UIColorPalette { + s.Measure = &v return s } -// The key or keys of the key-value pairs for the resource tag or tags assigned -// to the resource. -type Tag struct { - _ struct{} `type:"structure"` +// SetMeasureForeground sets the MeasureForeground field's value. +func (s *UIColorPalette) SetMeasureForeground(v string) *UIColorPalette { + s.MeasureForeground = &v + return s +} - // Tag key. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` +// SetPrimaryBackground sets the PrimaryBackground field's value. +func (s *UIColorPalette) SetPrimaryBackground(v string) *UIColorPalette { + s.PrimaryBackground = &v + return s +} - // Tag value. - // - // Value is a required field - Value *string `min:"1" type:"string" required:"true"` +// SetPrimaryForeground sets the PrimaryForeground field's value. +func (s *UIColorPalette) SetPrimaryForeground(v string) *UIColorPalette { + s.PrimaryForeground = &v + return s } -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) +// SetSecondaryBackground sets the SecondaryBackground field's value. +func (s *UIColorPalette) SetSecondaryBackground(v string) *UIColorPalette { + s.SecondaryBackground = &v + return s } -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() +// SetSecondaryForeground sets the SecondaryForeground field's value. +func (s *UIColorPalette) SetSecondaryForeground(v string) *UIColorPalette { + s.SecondaryForeground = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Tag) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Tag"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - if s.Value != nil && len(*s.Value) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Value", 1)) - } +// SetSuccess sets the Success field's value. +func (s *UIColorPalette) SetSuccess(v string) *UIColorPalette { + s.Success = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSuccessForeground sets the SuccessForeground field's value. +func (s *UIColorPalette) SetSuccessForeground(v string) *UIColorPalette { + s.SuccessForeground = &v + return s } -// SetKey sets the Key field's value. -func (s *Tag) SetKey(v string) *Tag { - s.Key = &v +// SetWarning sets the Warning field's value. +func (s *UIColorPalette) SetWarning(v string) *UIColorPalette { + s.Warning = &v return s } -// SetValue sets the Value field's value. -func (s *Tag) SetValue(v string) *Tag { - s.Value = &v +// SetWarningForeground sets the WarningForeground field's value. +func (s *UIColorPalette) SetWarningForeground(v string) *UIColorPalette { + s.WarningForeground = &v return s } -// A transform operation that tags a column with additional information. -type TagColumnOperation struct { - _ struct{} `type:"structure"` +// This error indicates that you are calling an operation on an Amazon QuickSight +// subscription where the edition doesn't include support for that operation. +// Amazon QuickSight currently has Standard Edition and Enterprise Edition. +// Not every operation and capability is available in every edition. +type UnsupportedUserEditionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` - // The column that this operation acts on. - // - // ColumnName is a required field - ColumnName *string `min:"1" type:"string" required:"true"` + Message_ *string `locationName:"Message" type:"string"` - // The dataset column tag, currently only used for geospatial type tagging. . - // - // This is not tags for the AWS tagging feature. . - // - // Tags is a required field - Tags []*ColumnTag `min:"1" type:"list" required:"true"` + // The AWS request ID for this request. + RequestId *string `type:"string"` } // String returns the string representation -func (s TagColumnOperation) String() string { +func (s UnsupportedUserEditionException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagColumnOperation) GoString() string { +func (s UnsupportedUserEditionException) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *TagColumnOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagColumnOperation"} - if s.ColumnName == nil { - invalidParams.Add(request.NewErrParamRequired("ColumnName")) - } - if s.ColumnName != nil && len(*s.ColumnName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnName", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) - } - if s.Tags != nil && len(s.Tags) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) +func newErrorUnsupportedUserEditionException(v protocol.ResponseMetadata) error { + return &UnsupportedUserEditionException{ + RespMetadata: v, } +} - if invalidParams.Len() > 0 { - return invalidParams +// Code returns the exception type name. +func (s *UnsupportedUserEditionException) Code() string { + return "UnsupportedUserEditionException" +} + +// Message returns the exception's message. +func (s *UnsupportedUserEditionException) Message() string { + if s.Message_ != nil { + return *s.Message_ } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedUserEditionException) OrigErr() error { return nil } -// SetColumnName sets the ColumnName field's value. -func (s *TagColumnOperation) SetColumnName(v string) *TagColumnOperation { - s.ColumnName = &v - return s +func (s *UnsupportedUserEditionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } -// SetTags sets the Tags field's value. -func (s *TagColumnOperation) SetTags(v []*ColumnTag) *TagColumnOperation { - s.Tags = v - return s +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedUserEditionException) StatusCode() int { + return s.RespMetadata.StatusCode } -type TagResourceInput struct { +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedUserEditionException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource that you want to tag. + // The Amazon Resource Name (ARN) of the resource that you want to untag. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` - // Contains a map of the key-value pairs for the resource tag or tags assigned - // to the resource. + // The keys of the key-value pairs for the resource tag or tags assigned to + // the resource. // - // Tags is a required field - Tags []*Tag `min:"1" type:"list" required:"true"` + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"keys" min:"1" type:"list" required:"true"` } // String returns the string representation -func (s TagResourceInput) String() string { +func (s UntagResourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagResourceInput) GoString() string { +func (s UntagResourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} if s.ResourceArn == nil { invalidParams.Add(request.NewErrParamRequired("ResourceArn")) } if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) - } - if s.Tags == nil { - invalidParams.Add(request.NewErrParamRequired("Tags")) + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) } - if s.Tags != nil && len(s.Tags) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) } - if s.Tags != nil { - for i, v := range s.Tags { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) - } - } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) } if invalidParams.Len() > 0 { @@ -19872,18 +29496,18 @@ func (s *TagResourceInput) Validate() error { } // SetResourceArn sets the ResourceArn field's value. -func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { s.ResourceArn = &v return s } -// SetTags sets the Tags field's value. -func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { - s.Tags = v +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v return s } -type TagResourceOutput struct { +type UntagResourceOutput struct { _ struct{} `type:"structure"` // The AWS request ID for this operation. @@ -19894,226 +29518,205 @@ type TagResourceOutput struct { } // String returns the string representation -func (s TagResourceOutput) String() string { +func (s UntagResourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TagResourceOutput) GoString() string { +func (s UntagResourceOutput) GoString() string { return s.String() } // SetRequestId sets the RequestId field's value. -func (s *TagResourceOutput) SetRequestId(v string) *TagResourceOutput { +func (s *UntagResourceOutput) SetRequestId(v string) *UntagResourceOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *TagResourceOutput) SetStatus(v int64) *TagResourceOutput { +func (s *UntagResourceOutput) SetStatus(v int64) *UntagResourceOutput { s.Status = &v return s } -// A template object. A template is an entity in QuickSight that encapsulates -// the metadata required to create an analysis and that you can use to create -// a dashboard. A template adds a layer of abstraction by using placeholders -// to replace the dataset associated with the analysis. You can use templates -// to create dashboards by replacing dataset placeholders with datasets that -// follow the same schema that was used to create the source analysis and template. -// -// You can share templates across AWS accounts by allowing users in other AWS -// accounts to create a template or a dashboard from an existing template. -type Template struct { +type UpdateAccountCustomizationInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the template. - Arn *string `type:"string"` - - // Time when this was created. - CreatedTime *time.Time `type:"timestamp"` - - // Time when this was last updated. - LastUpdatedTime *time.Time `type:"timestamp"` - - // The display name of the template. - Name *string `min:"1" type:"string"` + // The QuickSight customizations you're updating in the current AWS Region. + // + // AccountCustomization is a required field + AccountCustomization *AccountCustomization `type:"structure" required:"true"` - // The ID for the template. This is unique per AWS Region for each AWS account. - TemplateId *string `min:"1" type:"string"` + // The ID for the AWS account that you want to update QuickSight customizations + // for. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // A structure describing the versions of the template. - Version *TemplateVersion `type:"structure"` + // The namespace that you want to update QuickSight customizations for. + Namespace *string `location:"querystring" locationName:"namespace" type:"string"` } // String returns the string representation -func (s Template) String() string { +func (s UpdateAccountCustomizationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Template) GoString() string { +func (s UpdateAccountCustomizationInput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *Template) SetArn(v string) *Template { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *Template) SetCreatedTime(v time.Time) *Template { - s.CreatedTime = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccountCustomizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAccountCustomizationInput"} + if s.AccountCustomization == nil { + invalidParams.Add(request.NewErrParamRequired("AccountCustomization")) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *Template) SetLastUpdatedTime(v time.Time) *Template { - s.LastUpdatedTime = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetName sets the Name field's value. -func (s *Template) SetName(v string) *Template { - s.Name = &v +// SetAccountCustomization sets the AccountCustomization field's value. +func (s *UpdateAccountCustomizationInput) SetAccountCustomization(v *AccountCustomization) *UpdateAccountCustomizationInput { + s.AccountCustomization = v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *Template) SetTemplateId(v string) *Template { - s.TemplateId = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateAccountCustomizationInput) SetAwsAccountId(v string) *UpdateAccountCustomizationInput { + s.AwsAccountId = &v return s } -// SetVersion sets the Version field's value. -func (s *Template) SetVersion(v *TemplateVersion) *Template { - s.Version = v +// SetNamespace sets the Namespace field's value. +func (s *UpdateAccountCustomizationInput) SetNamespace(v string) *UpdateAccountCustomizationInput { + s.Namespace = &v return s } -// The template alias. -type TemplateAlias struct { +type UpdateAccountCustomizationOutput struct { _ struct{} `type:"structure"` - // The display name of the template alias. - AliasName *string `min:"1" type:"string"` + // The QuickSight customizations you're updating in the current AWS Region. + AccountCustomization *AccountCustomization `type:"structure"` - // The Amazon Resource Name (ARN) of the template alias. + // The Amazon Resource Name (ARN) for the updated customization for this AWS + // account. Arn *string `type:"string"` - // The version number of the template alias. - TemplateVersionNumber *int64 `min:"1" type:"long"` + // The ID for the AWS account that you want to update QuickSight customizations + // for. + AwsAccountId *string `min:"12" type:"string"` + + // The namespace associated with the customization that you're updating. + Namespace *string `type:"string"` + + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s TemplateAlias) String() string { +func (s UpdateAccountCustomizationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TemplateAlias) GoString() string { +func (s UpdateAccountCustomizationOutput) GoString() string { return s.String() } -// SetAliasName sets the AliasName field's value. -func (s *TemplateAlias) SetAliasName(v string) *TemplateAlias { - s.AliasName = &v +// SetAccountCustomization sets the AccountCustomization field's value. +func (s *UpdateAccountCustomizationOutput) SetAccountCustomization(v *AccountCustomization) *UpdateAccountCustomizationOutput { + s.AccountCustomization = v return s } // SetArn sets the Arn field's value. -func (s *TemplateAlias) SetArn(v string) *TemplateAlias { +func (s *UpdateAccountCustomizationOutput) SetArn(v string) *UpdateAccountCustomizationOutput { s.Arn = &v return s } -// SetTemplateVersionNumber sets the TemplateVersionNumber field's value. -func (s *TemplateAlias) SetTemplateVersionNumber(v int64) *TemplateAlias { - s.TemplateVersionNumber = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateAccountCustomizationOutput) SetAwsAccountId(v string) *UpdateAccountCustomizationOutput { + s.AwsAccountId = &v return s } -// List of errors that occurred when the template version creation failed. -type TemplateError struct { - _ struct{} `type:"structure"` - - // Description of the error type. - Message *string `type:"string"` - - // Type of error. - Type *string `type:"string" enum:"TemplateErrorType"` -} - -// String returns the string representation -func (s TemplateError) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TemplateError) GoString() string { - return s.String() +// SetNamespace sets the Namespace field's value. +func (s *UpdateAccountCustomizationOutput) SetNamespace(v string) *UpdateAccountCustomizationOutput { + s.Namespace = &v + return s } -// SetMessage sets the Message field's value. -func (s *TemplateError) SetMessage(v string) *TemplateError { - s.Message = &v +// SetRequestId sets the RequestId field's value. +func (s *UpdateAccountCustomizationOutput) SetRequestId(v string) *UpdateAccountCustomizationOutput { + s.RequestId = &v return s } -// SetType sets the Type field's value. -func (s *TemplateError) SetType(v string) *TemplateError { - s.Type = &v +// SetStatus sets the Status field's value. +func (s *UpdateAccountCustomizationOutput) SetStatus(v int64) *UpdateAccountCustomizationOutput { + s.Status = &v return s } -// The source analysis of the template. -type TemplateSourceAnalysis struct { +type UpdateAccountSettingsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. + // The ID for the AWS account that contains the QuickSight settings that you + // want to list. // - // Arn is a required field - Arn *string `type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // A structure containing information about the dataset references used as placeholders - // in the template. + // The default namespace for this AWS account. Currently, the default is default. + // AWS Identity and Access Management (IAM) users that register for the first + // time with QuickSight provide an email that becomes associated with the default + // namespace. // - // DataSetReferences is a required field - DataSetReferences []*DataSetReference `min:"1" type:"list" required:"true"` + // DefaultNamespace is a required field + DefaultNamespace *string `type:"string" required:"true"` + + // The email address that you want QuickSight to send notifications to regarding + // your AWS account or QuickSight subscription. + NotificationEmail *string `type:"string"` } // String returns the string representation -func (s TemplateSourceAnalysis) String() string { +func (s UpdateAccountSettingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TemplateSourceAnalysis) GoString() string { +func (s UpdateAccountSettingsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TemplateSourceAnalysis) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TemplateSourceAnalysis"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) - } - if s.DataSetReferences == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetReferences")) +func (s *UpdateAccountSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAccountSettingsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.DataSetReferences != nil && len(s.DataSetReferences) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetReferences", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetReferences != nil { - for i, v := range s.DataSetReferences { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "DataSetReferences", i), err.(request.ErrInvalidParams)) - } - } + if s.DefaultNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("DefaultNamespace")) } if invalidParams.Len() > 0 { @@ -20122,50 +29725,135 @@ func (s *TemplateSourceAnalysis) Validate() error { return nil } -// SetArn sets the Arn field's value. -func (s *TemplateSourceAnalysis) SetArn(v string) *TemplateSourceAnalysis { - s.Arn = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateAccountSettingsInput) SetAwsAccountId(v string) *UpdateAccountSettingsInput { + s.AwsAccountId = &v return s } -// SetDataSetReferences sets the DataSetReferences field's value. -func (s *TemplateSourceAnalysis) SetDataSetReferences(v []*DataSetReference) *TemplateSourceAnalysis { - s.DataSetReferences = v +// SetDefaultNamespace sets the DefaultNamespace field's value. +func (s *UpdateAccountSettingsInput) SetDefaultNamespace(v string) *UpdateAccountSettingsInput { + s.DefaultNamespace = &v return s } -// The source entity of the template. -type TemplateSourceEntity struct { +// SetNotificationEmail sets the NotificationEmail field's value. +func (s *UpdateAccountSettingsInput) SetNotificationEmail(v string) *UpdateAccountSettingsInput { + s.NotificationEmail = &v + return s +} + +type UpdateAccountSettingsOutput struct { _ struct{} `type:"structure"` - // The source analysis, if it is based on an analysis. - SourceAnalysis *TemplateSourceAnalysis `type:"structure"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` +} + +// String returns the string representation +func (s UpdateAccountSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountSettingsOutput) GoString() string { + return s.String() +} + +// SetRequestId sets the RequestId field's value. +func (s *UpdateAccountSettingsOutput) SetRequestId(v string) *UpdateAccountSettingsOutput { + s.RequestId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateAccountSettingsOutput) SetStatus(v int64) *UpdateAccountSettingsOutput { + s.Status = &v + return s +} + +type UpdateAnalysisInput struct { + _ struct{} `type:"structure"` + + // The ID for the analysis that you're updating. This ID displays in the URL + // of the analysis. + // + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the analysis that you're updating. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + + // A descriptive name for the analysis that you're updating. This name displays + // for the analysis in the QuickSight console. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The parameter names and override values that you want to use. An analysis + // can have any parameter type, and some parameters might accept multiple values. + Parameters *Parameters `type:"structure"` + + // A source entity to use for the analysis that you're updating. This metadata + // structure contains details that describe a source template and one or more + // datasets. + // + // SourceEntity is a required field + SourceEntity *AnalysisSourceEntity `type:"structure" required:"true"` - // The source template, if it is based on an template. - SourceTemplate *TemplateSourceTemplate `type:"structure"` + // The Amazon Resource Name (ARN) for the theme to apply to the analysis that + // you're creating. To see the theme in the QuickSight console, make sure that + // you have access to it. + ThemeArn *string `type:"string"` } // String returns the string representation -func (s TemplateSourceEntity) String() string { +func (s UpdateAnalysisInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TemplateSourceEntity) GoString() string { +func (s UpdateAnalysisInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TemplateSourceEntity) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TemplateSourceEntity"} - if s.SourceAnalysis != nil { - if err := s.SourceAnalysis.Validate(); err != nil { - invalidParams.AddNested("SourceAnalysis", err.(request.ErrInvalidParams)) +func (s *UpdateAnalysisInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAnalysisInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) + } + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SourceEntity == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEntity")) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) } } - if s.SourceTemplate != nil { - if err := s.SourceTemplate.Validate(); err != nil { - invalidParams.AddNested("SourceTemplate", err.(request.ErrInvalidParams)) + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) } } @@ -20175,317 +29863,371 @@ func (s *TemplateSourceEntity) Validate() error { return nil } -// SetSourceAnalysis sets the SourceAnalysis field's value. -func (s *TemplateSourceEntity) SetSourceAnalysis(v *TemplateSourceAnalysis) *TemplateSourceEntity { - s.SourceAnalysis = v +// SetAnalysisId sets the AnalysisId field's value. +func (s *UpdateAnalysisInput) SetAnalysisId(v string) *UpdateAnalysisInput { + s.AnalysisId = &v return s } -// SetSourceTemplate sets the SourceTemplate field's value. -func (s *TemplateSourceEntity) SetSourceTemplate(v *TemplateSourceTemplate) *TemplateSourceEntity { - s.SourceTemplate = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateAnalysisInput) SetAwsAccountId(v string) *UpdateAnalysisInput { + s.AwsAccountId = &v return s } -// The source template of the template. -type TemplateSourceTemplate struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the resource. - // - // Arn is a required field - Arn *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s TemplateSourceTemplate) String() string { - return awsutil.Prettify(s) +// SetName sets the Name field's value. +func (s *UpdateAnalysisInput) SetName(v string) *UpdateAnalysisInput { + s.Name = &v + return s } -// GoString returns the string representation -func (s TemplateSourceTemplate) GoString() string { - return s.String() +// SetParameters sets the Parameters field's value. +func (s *UpdateAnalysisInput) SetParameters(v *Parameters) *UpdateAnalysisInput { + s.Parameters = v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *TemplateSourceTemplate) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TemplateSourceTemplate"} - if s.Arn == nil { - invalidParams.Add(request.NewErrParamRequired("Arn")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetSourceEntity sets the SourceEntity field's value. +func (s *UpdateAnalysisInput) SetSourceEntity(v *AnalysisSourceEntity) *UpdateAnalysisInput { + s.SourceEntity = v + return s } -// SetArn sets the Arn field's value. -func (s *TemplateSourceTemplate) SetArn(v string) *TemplateSourceTemplate { - s.Arn = &v +// SetThemeArn sets the ThemeArn field's value. +func (s *UpdateAnalysisInput) SetThemeArn(v string) *UpdateAnalysisInput { + s.ThemeArn = &v return s } -// The template summary. -type TemplateSummary struct { +type UpdateAnalysisOutput struct { _ struct{} `type:"structure"` - // A summary of a template. - Arn *string `type:"string"` - - // The last time that this template was created. - CreatedTime *time.Time `type:"timestamp"` + // The ID of the analysis. + AnalysisId *string `min:"1" type:"string"` - // The last time that this template was updated. - LastUpdatedTime *time.Time `type:"timestamp"` + // The ARN of the analysis that you're updating. + Arn *string `type:"string"` - // A structure containing a list of version numbers for the template summary. - LatestVersionNumber *int64 `min:"1" type:"long"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // A display name for the template. - Name *string `min:"1" type:"string"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` - // The ID of the template. This ID is unique per AWS Region for each AWS account. - TemplateId *string `min:"1" type:"string"` + // The update status of the last update that was made to the analysis. + UpdateStatus *string `type:"string" enum:"ResourceStatus"` } // String returns the string representation -func (s TemplateSummary) String() string { +func (s UpdateAnalysisOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TemplateSummary) GoString() string { +func (s UpdateAnalysisOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *TemplateSummary) SetArn(v string) *TemplateSummary { - s.Arn = &v - return s -} - -// SetCreatedTime sets the CreatedTime field's value. -func (s *TemplateSummary) SetCreatedTime(v time.Time) *TemplateSummary { - s.CreatedTime = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *UpdateAnalysisOutput) SetAnalysisId(v string) *UpdateAnalysisOutput { + s.AnalysisId = &v return s } -// SetLastUpdatedTime sets the LastUpdatedTime field's value. -func (s *TemplateSummary) SetLastUpdatedTime(v time.Time) *TemplateSummary { - s.LastUpdatedTime = &v +// SetArn sets the Arn field's value. +func (s *UpdateAnalysisOutput) SetArn(v string) *UpdateAnalysisOutput { + s.Arn = &v return s } -// SetLatestVersionNumber sets the LatestVersionNumber field's value. -func (s *TemplateSummary) SetLatestVersionNumber(v int64) *TemplateSummary { - s.LatestVersionNumber = &v +// SetRequestId sets the RequestId field's value. +func (s *UpdateAnalysisOutput) SetRequestId(v string) *UpdateAnalysisOutput { + s.RequestId = &v return s } -// SetName sets the Name field's value. -func (s *TemplateSummary) SetName(v string) *TemplateSummary { - s.Name = &v +// SetStatus sets the Status field's value. +func (s *UpdateAnalysisOutput) SetStatus(v int64) *UpdateAnalysisOutput { + s.Status = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *TemplateSummary) SetTemplateId(v string) *TemplateSummary { - s.TemplateId = &v +// SetUpdateStatus sets the UpdateStatus field's value. +func (s *UpdateAnalysisOutput) SetUpdateStatus(v string) *UpdateAnalysisOutput { + s.UpdateStatus = &v return s } -// A version of a template. -type TemplateVersion struct { +type UpdateAnalysisPermissionsInput struct { _ struct{} `type:"structure"` - // The time that this template version was created. - CreatedTime *time.Time `type:"timestamp"` - - // Schema of the dataset identified by the placeholder. The idea is that any - // dashboard created from the template should be bound to new datasets matching - // the same schema described through this API. . - DataSetConfigurations []*DataSetConfiguration `type:"list"` - - // The description of the template. - Description *string `min:"1" type:"string"` - - // Errors associated with the template. - Errors []*TemplateError `min:"1" type:"list"` + // The ID of the analysis whose permissions you're updating. The ID is part + // of the analysis URL. + // + // AnalysisId is a required field + AnalysisId *string `location:"uri" locationName:"AnalysisId" min:"1" type:"string" required:"true"` - // The Amazon Resource Name (ARN) of the analysis or template which was used - // to create this template. - SourceEntityArn *string `type:"string"` + // The ID of the AWS account that contains the analysis whose permissions you're + // updating. You must be using the AWS account that the analysis is in. + // + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The HTTP status of the request. - Status *string `type:"string" enum:"ResourceStatus"` + // A structure that describes the permissions to add and the principal to add + // them to. + GrantPermissions []*ResourcePermission `type:"list"` - // The version number of the template. - VersionNumber *int64 `min:"1" type:"long"` + // A structure that describes the permissions to remove and the principal to + // remove them from. + RevokePermissions []*ResourcePermission `type:"list"` } // String returns the string representation -func (s TemplateVersion) String() string { +func (s UpdateAnalysisPermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TemplateVersion) GoString() string { +func (s UpdateAnalysisPermissionsInput) GoString() string { return s.String() } -// SetCreatedTime sets the CreatedTime field's value. -func (s *TemplateVersion) SetCreatedTime(v time.Time) *TemplateVersion { - s.CreatedTime = &v - return s -} - -// SetDataSetConfigurations sets the DataSetConfigurations field's value. -func (s *TemplateVersion) SetDataSetConfigurations(v []*DataSetConfiguration) *TemplateVersion { - s.DataSetConfigurations = v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAnalysisPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAnalysisPermissionsInput"} + if s.AnalysisId == nil { + invalidParams.Add(request.NewErrParamRequired("AnalysisId")) + } + if s.AnalysisId != nil && len(*s.AnalysisId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AnalysisId", 1)) + } + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) + } + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(request.ErrInvalidParams)) + } + } + } -// SetDescription sets the Description field's value. -func (s *TemplateVersion) SetDescription(v string) *TemplateVersion { - s.Description = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetErrors sets the Errors field's value. -func (s *TemplateVersion) SetErrors(v []*TemplateError) *TemplateVersion { - s.Errors = v +// SetAnalysisId sets the AnalysisId field's value. +func (s *UpdateAnalysisPermissionsInput) SetAnalysisId(v string) *UpdateAnalysisPermissionsInput { + s.AnalysisId = &v return s } -// SetSourceEntityArn sets the SourceEntityArn field's value. -func (s *TemplateVersion) SetSourceEntityArn(v string) *TemplateVersion { - s.SourceEntityArn = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateAnalysisPermissionsInput) SetAwsAccountId(v string) *UpdateAnalysisPermissionsInput { + s.AwsAccountId = &v return s } -// SetStatus sets the Status field's value. -func (s *TemplateVersion) SetStatus(v string) *TemplateVersion { - s.Status = &v +// SetGrantPermissions sets the GrantPermissions field's value. +func (s *UpdateAnalysisPermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateAnalysisPermissionsInput { + s.GrantPermissions = v return s } -// SetVersionNumber sets the VersionNumber field's value. -func (s *TemplateVersion) SetVersionNumber(v int64) *TemplateVersion { - s.VersionNumber = &v +// SetRevokePermissions sets the RevokePermissions field's value. +func (s *UpdateAnalysisPermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateAnalysisPermissionsInput { + s.RevokePermissions = v return s } -// The template version. -type TemplateVersionSummary struct { +type UpdateAnalysisPermissionsOutput struct { _ struct{} `type:"structure"` - // The ARN of the template version. - Arn *string `type:"string"` + // The Amazon Resource Name (ARN) of the analysis that you updated. + AnalysisArn *string `type:"string"` - // The time that this template version was created. - CreatedTime *time.Time `type:"timestamp"` + // The ID of the analysis that you updated permissions for. + AnalysisId *string `min:"1" type:"string"` - // The description of the template version. - Description *string `min:"1" type:"string"` + // A structure that describes the principals and the resource-level permissions + // on an analysis. + Permissions []*ResourcePermission `min:"1" type:"list"` - // The status of the template version. - Status *string `type:"string" enum:"ResourceStatus"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // The version number of the template version. - VersionNumber *int64 `min:"1" type:"long"` + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s TemplateVersionSummary) String() string { +func (s UpdateAnalysisPermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TemplateVersionSummary) GoString() string { +func (s UpdateAnalysisPermissionsOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *TemplateVersionSummary) SetArn(v string) *TemplateVersionSummary { - s.Arn = &v +// SetAnalysisArn sets the AnalysisArn field's value. +func (s *UpdateAnalysisPermissionsOutput) SetAnalysisArn(v string) *UpdateAnalysisPermissionsOutput { + s.AnalysisArn = &v return s } -// SetCreatedTime sets the CreatedTime field's value. -func (s *TemplateVersionSummary) SetCreatedTime(v time.Time) *TemplateVersionSummary { - s.CreatedTime = &v +// SetAnalysisId sets the AnalysisId field's value. +func (s *UpdateAnalysisPermissionsOutput) SetAnalysisId(v string) *UpdateAnalysisPermissionsOutput { + s.AnalysisId = &v return s } -// SetDescription sets the Description field's value. -func (s *TemplateVersionSummary) SetDescription(v string) *TemplateVersionSummary { - s.Description = &v +// SetPermissions sets the Permissions field's value. +func (s *UpdateAnalysisPermissionsOutput) SetPermissions(v []*ResourcePermission) *UpdateAnalysisPermissionsOutput { + s.Permissions = v + return s +} + +// SetRequestId sets the RequestId field's value. +func (s *UpdateAnalysisPermissionsOutput) SetRequestId(v string) *UpdateAnalysisPermissionsOutput { + s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *TemplateVersionSummary) SetStatus(v string) *TemplateVersionSummary { +func (s *UpdateAnalysisPermissionsOutput) SetStatus(v int64) *UpdateAnalysisPermissionsOutput { s.Status = &v return s } -// SetVersionNumber sets the VersionNumber field's value. -func (s *TemplateVersionSummary) SetVersionNumber(v int64) *TemplateVersionSummary { - s.VersionNumber = &v - return s -} - -// Teradata parameters. -type TeradataParameters struct { +type UpdateDashboardInput struct { _ struct{} `type:"structure"` - // Database. + // The ID of the AWS account that contains the dashboard that you're updating. // - // Database is a required field - Database *string `min:"1" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Host. + // The ID for the dashboard. // - // Host is a required field - Host *string `min:"1" type:"string" required:"true"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` - // Port. + // Options for publishing the dashboard when you create it: // - // Port is a required field - Port *int64 `min:"1" type:"integer" required:"true"` + // * AvailabilityStatus for AdHocFilteringOption - This status can be either + // ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables + // the left filter pane on the published dashboard, which can be used for + // ad hoc (one-time) filtering. This option is ENABLED by default. + // + // * AvailabilityStatus for ExportToCSVOption - This status can be either + // ENABLED or DISABLED. The visual option to export data to .CSV format isn't + // enabled when this is set to DISABLED. This option is ENABLED by default. + // + // * VisibilityState for SheetControlsOption - This visibility state can + // be either COLLAPSED or EXPANDED. This option is COLLAPSED by default. + DashboardPublishOptions *DashboardPublishOptions `type:"structure"` + + // The display name of the dashboard. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A structure that contains the parameters of the dashboard. These are parameter + // overrides for a dashboard. A dashboard can have any type of parameters, and + // some parameters might accept multiple values. + Parameters *Parameters `type:"structure"` + + // The entity that you are using as a source when you update the dashboard. + // In SourceEntity, you specify the type of object you're using as source. You + // can only update a dashboard from a template, so you use a SourceTemplate + // entity. If you need to update a dashboard from an analysis, first convert + // the analysis to a template by using the CreateTemplate API operation. For + // SourceTemplate, specify the Amazon Resource Name (ARN) of the source template. + // The SourceTemplate ARN can contain any AWS Account and any QuickSight-supported + // AWS Region. + // + // Use the DataSetReferences entity within SourceTemplate to list the replacement + // datasets for the placeholders listed in the original. The schema in each + // dataset must match its placeholder. + // + // SourceEntity is a required field + SourceEntity *DashboardSourceEntity `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) of the theme that is being used for this dashboard. + // If you add a value for this field, it overrides the value that was originally + // associated with the entity. The theme ARN must exist in the same AWS account + // where you create the dashboard. + ThemeArn *string `type:"string"` + + // A description for the first version of the dashboard being created. + VersionDescription *string `min:"1" type:"string"` } // String returns the string representation -func (s TeradataParameters) String() string { +func (s UpdateDashboardInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TeradataParameters) GoString() string { +func (s UpdateDashboardInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TeradataParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TeradataParameters"} - if s.Database == nil { - invalidParams.Add(request.NewErrParamRequired("Database")) +func (s *UpdateDashboardInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDashboardInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.Database != nil && len(*s.Database) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Database", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Host == nil { - invalidParams.Add(request.NewErrParamRequired("Host")) + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.Host != nil && len(*s.Host) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Host", 1)) + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) } - if s.Port == nil { - invalidParams.Add(request.NewErrParamRequired("Port")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.Port != nil && *s.Port < 1 { - invalidParams.Add(request.NewErrParamMinValue("Port", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SourceEntity == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEntity")) + } + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) + } + if s.Parameters != nil { + if err := s.Parameters.Validate(); err != nil { + invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) + } + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -20494,234 +30236,187 @@ func (s *TeradataParameters) Validate() error { return nil } -// SetDatabase sets the Database field's value. -func (s *TeradataParameters) SetDatabase(v string) *TeradataParameters { - s.Database = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateDashboardInput) SetAwsAccountId(v string) *UpdateDashboardInput { + s.AwsAccountId = &v return s } -// SetHost sets the Host field's value. -func (s *TeradataParameters) SetHost(v string) *TeradataParameters { - s.Host = &v +// SetDashboardId sets the DashboardId field's value. +func (s *UpdateDashboardInput) SetDashboardId(v string) *UpdateDashboardInput { + s.DashboardId = &v return s } -// SetPort sets the Port field's value. -func (s *TeradataParameters) SetPort(v int64) *TeradataParameters { - s.Port = &v +// SetDashboardPublishOptions sets the DashboardPublishOptions field's value. +func (s *UpdateDashboardInput) SetDashboardPublishOptions(v *DashboardPublishOptions) *UpdateDashboardInput { + s.DashboardPublishOptions = v return s } -// Access is throttled. -type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata - - Message_ *string `locationName:"Message" type:"string"` - - // The AWS request ID for this request. - RequestId *string `type:"string"` -} - -// String returns the string representation -func (s ThrottlingException) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ThrottlingException) GoString() string { - return s.String() -} - -func newErrorThrottlingException(v protocol.ResponseMetadata) error { - return &ThrottlingException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s ThrottlingException) Code() string { - return "ThrottlingException" -} - -// Message returns the exception's message. -func (s ThrottlingException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetName sets the Name field's value. +func (s *UpdateDashboardInput) SetName(v string) *UpdateDashboardInput { + s.Name = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { - return nil +// SetParameters sets the Parameters field's value. +func (s *UpdateDashboardInput) SetParameters(v *Parameters) *UpdateDashboardInput { + s.Parameters = v + return s } -func (s ThrottlingException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetSourceEntity sets the SourceEntity field's value. +func (s *UpdateDashboardInput) SetSourceEntity(v *DashboardSourceEntity) *UpdateDashboardInput { + s.SourceEntity = v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +// SetThemeArn sets the ThemeArn field's value. +func (s *UpdateDashboardInput) SetThemeArn(v string) *UpdateDashboardInput { + s.ThemeArn = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +// SetVersionDescription sets the VersionDescription field's value. +func (s *UpdateDashboardInput) SetVersionDescription(v string) *UpdateDashboardInput { + s.VersionDescription = &v + return s } -// A data transformation on a logical table. This is a variant type structure. -// For this structure to be valid, only one of the attributes can be non-null. -type TransformOperation struct { +type UpdateDashboardOutput struct { _ struct{} `type:"structure"` - // A transform operation that casts a column to a different type. - CastColumnTypeOperation *CastColumnTypeOperation `type:"structure"` + // The Amazon Resource Name (ARN) of the resource. + Arn *string `type:"string"` - // An operation that creates calculated columns. Columns created in one such - // operation form a lexical closure. - CreateColumnsOperation *CreateColumnsOperation `type:"structure"` + // The creation status of the request. + CreationStatus *string `type:"string" enum:"ResourceStatus"` - // An operation that filters rows based on some condition. - FilterOperation *FilterOperation `type:"structure"` + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` - // An operation that projects columns. Operations that come after a projection - // can only refer to projected columns. - ProjectOperation *ProjectOperation `type:"structure"` + // The AWS request ID for this operation. + RequestId *string `type:"string"` - // An operation that renames a column. - RenameColumnOperation *RenameColumnOperation `type:"structure"` + // The HTTP status of the request. + Status *int64 `type:"integer"` - // An operation that tags a column with additional information. - TagColumnOperation *TagColumnOperation `type:"structure"` + // The ARN of the dashboard, including the version number. + VersionArn *string `type:"string"` } // String returns the string representation -func (s TransformOperation) String() string { +func (s UpdateDashboardOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TransformOperation) GoString() string { +func (s UpdateDashboardOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *TransformOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TransformOperation"} - if s.CastColumnTypeOperation != nil { - if err := s.CastColumnTypeOperation.Validate(); err != nil { - invalidParams.AddNested("CastColumnTypeOperation", err.(request.ErrInvalidParams)) - } - } - if s.CreateColumnsOperation != nil { - if err := s.CreateColumnsOperation.Validate(); err != nil { - invalidParams.AddNested("CreateColumnsOperation", err.(request.ErrInvalidParams)) - } - } - if s.FilterOperation != nil { - if err := s.FilterOperation.Validate(); err != nil { - invalidParams.AddNested("FilterOperation", err.(request.ErrInvalidParams)) - } - } - if s.ProjectOperation != nil { - if err := s.ProjectOperation.Validate(); err != nil { - invalidParams.AddNested("ProjectOperation", err.(request.ErrInvalidParams)) - } - } - if s.RenameColumnOperation != nil { - if err := s.RenameColumnOperation.Validate(); err != nil { - invalidParams.AddNested("RenameColumnOperation", err.(request.ErrInvalidParams)) - } - } - if s.TagColumnOperation != nil { - if err := s.TagColumnOperation.Validate(); err != nil { - invalidParams.AddNested("TagColumnOperation", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCastColumnTypeOperation sets the CastColumnTypeOperation field's value. -func (s *TransformOperation) SetCastColumnTypeOperation(v *CastColumnTypeOperation) *TransformOperation { - s.CastColumnTypeOperation = v +// SetArn sets the Arn field's value. +func (s *UpdateDashboardOutput) SetArn(v string) *UpdateDashboardOutput { + s.Arn = &v return s } -// SetCreateColumnsOperation sets the CreateColumnsOperation field's value. -func (s *TransformOperation) SetCreateColumnsOperation(v *CreateColumnsOperation) *TransformOperation { - s.CreateColumnsOperation = v +// SetCreationStatus sets the CreationStatus field's value. +func (s *UpdateDashboardOutput) SetCreationStatus(v string) *UpdateDashboardOutput { + s.CreationStatus = &v return s } -// SetFilterOperation sets the FilterOperation field's value. -func (s *TransformOperation) SetFilterOperation(v *FilterOperation) *TransformOperation { - s.FilterOperation = v +// SetDashboardId sets the DashboardId field's value. +func (s *UpdateDashboardOutput) SetDashboardId(v string) *UpdateDashboardOutput { + s.DashboardId = &v return s } -// SetProjectOperation sets the ProjectOperation field's value. -func (s *TransformOperation) SetProjectOperation(v *ProjectOperation) *TransformOperation { - s.ProjectOperation = v +// SetRequestId sets the RequestId field's value. +func (s *UpdateDashboardOutput) SetRequestId(v string) *UpdateDashboardOutput { + s.RequestId = &v return s } -// SetRenameColumnOperation sets the RenameColumnOperation field's value. -func (s *TransformOperation) SetRenameColumnOperation(v *RenameColumnOperation) *TransformOperation { - s.RenameColumnOperation = v +// SetStatus sets the Status field's value. +func (s *UpdateDashboardOutput) SetStatus(v int64) *UpdateDashboardOutput { + s.Status = &v return s } -// SetTagColumnOperation sets the TagColumnOperation field's value. -func (s *TransformOperation) SetTagColumnOperation(v *TagColumnOperation) *TransformOperation { - s.TagColumnOperation = v +// SetVersionArn sets the VersionArn field's value. +func (s *UpdateDashboardOutput) SetVersionArn(v string) *UpdateDashboardOutput { + s.VersionArn = &v return s } -// Twitter parameters. -type TwitterParameters struct { +type UpdateDashboardPermissionsInput struct { _ struct{} `type:"structure"` - // Maximum number of rows to query Twitter. + // The ID of the AWS account that contains the dashboard whose permissions you're + // updating. // - // MaxRows is a required field - MaxRows *int64 `min:"1" type:"integer" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Twitter query string. + // The ID for the dashboard. // - // Query is a required field - Query *string `min:"1" type:"string" required:"true"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The permissions that you want to grant on this resource. + GrantPermissions []*ResourcePermission `type:"list"` + + // The permissions that you want to revoke from this resource. + RevokePermissions []*ResourcePermission `type:"list"` } // String returns the string representation -func (s TwitterParameters) String() string { +func (s UpdateDashboardPermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s TwitterParameters) GoString() string { +func (s UpdateDashboardPermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *TwitterParameters) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TwitterParameters"} - if s.MaxRows == nil { - invalidParams.Add(request.NewErrParamRequired("MaxRows")) +func (s *UpdateDashboardPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDashboardPermissionsInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.MaxRows != nil && *s.MaxRows < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxRows", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Query == nil { - invalidParams.Add(request.NewErrParamRequired("Query")) + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.Query != nil && len(*s.Query) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Query", 1)) + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -20730,119 +30425,138 @@ func (s *TwitterParameters) Validate() error { return nil } -// SetMaxRows sets the MaxRows field's value. -func (s *TwitterParameters) SetMaxRows(v int64) *TwitterParameters { - s.MaxRows = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateDashboardPermissionsInput) SetAwsAccountId(v string) *UpdateDashboardPermissionsInput { + s.AwsAccountId = &v return s } -// SetQuery sets the Query field's value. -func (s *TwitterParameters) SetQuery(v string) *TwitterParameters { - s.Query = &v +// SetDashboardId sets the DashboardId field's value. +func (s *UpdateDashboardPermissionsInput) SetDashboardId(v string) *UpdateDashboardPermissionsInput { + s.DashboardId = &v return s } -// This error indicates that you are calling an operation on an Amazon QuickSight -// subscription where the edition doesn't include support for that operation. -// Amazon QuickSight currently has Standard Edition and Enterprise Edition. -// Not every operation and capability is available in every edition. -type UnsupportedUserEditionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// SetGrantPermissions sets the GrantPermissions field's value. +func (s *UpdateDashboardPermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateDashboardPermissionsInput { + s.GrantPermissions = v + return s +} - Message_ *string `locationName:"Message" type:"string"` +// SetRevokePermissions sets the RevokePermissions field's value. +func (s *UpdateDashboardPermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateDashboardPermissionsInput { + s.RevokePermissions = v + return s +} - // The AWS request ID for this request. +type UpdateDashboardPermissionsOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dashboard. + DashboardArn *string `type:"string"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` + + // Information about the permissions on the dashboard. + Permissions []*ResourcePermission `min:"1" type:"list"` + + // The AWS request ID for this operation. RequestId *string `type:"string"` + + // The HTTP status of the request. + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s UnsupportedUserEditionException) String() string { +func (s UpdateDashboardPermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UnsupportedUserEditionException) GoString() string { +func (s UpdateDashboardPermissionsOutput) GoString() string { return s.String() } -func newErrorUnsupportedUserEditionException(v protocol.ResponseMetadata) error { - return &UnsupportedUserEditionException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s UnsupportedUserEditionException) Code() string { - return "UnsupportedUserEditionException" -} - -// Message returns the exception's message. -func (s UnsupportedUserEditionException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" +// SetDashboardArn sets the DashboardArn field's value. +func (s *UpdateDashboardPermissionsOutput) SetDashboardArn(v string) *UpdateDashboardPermissionsOutput { + s.DashboardArn = &v + return s } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedUserEditionException) OrigErr() error { - return nil +// SetDashboardId sets the DashboardId field's value. +func (s *UpdateDashboardPermissionsOutput) SetDashboardId(v string) *UpdateDashboardPermissionsOutput { + s.DashboardId = &v + return s } -func (s UnsupportedUserEditionException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetPermissions sets the Permissions field's value. +func (s *UpdateDashboardPermissionsOutput) SetPermissions(v []*ResourcePermission) *UpdateDashboardPermissionsOutput { + s.Permissions = v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s UnsupportedUserEditionException) StatusCode() int { - return s.respMetadata.StatusCode +// SetRequestId sets the RequestId field's value. +func (s *UpdateDashboardPermissionsOutput) SetRequestId(v string) *UpdateDashboardPermissionsOutput { + s.RequestId = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s UnsupportedUserEditionException) RequestID() string { - return s.respMetadata.RequestID +// SetStatus sets the Status field's value. +func (s *UpdateDashboardPermissionsOutput) SetStatus(v int64) *UpdateDashboardPermissionsOutput { + s.Status = &v + return s } -type UntagResourceInput struct { +type UpdateDashboardPublishedVersionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource that you want to untag. + // The ID of the AWS account that contains the dashboard that you're updating. // - // ResourceArn is a required field - ResourceArn *string `location:"uri" locationName:"ResourceArn" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The keys of the key-value pairs for the resource tag or tags assigned to - // the resource. + // The ID for the dashboard. // - // TagKeys is a required field - TagKeys []*string `location:"querystring" locationName:"keys" min:"1" type:"list" required:"true"` + // DashboardId is a required field + DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + + // The version number of the dashboard. + // + // VersionNumber is a required field + VersionNumber *int64 `location:"uri" locationName:"VersionNumber" min:"1" type:"long" required:"true"` } // String returns the string representation -func (s UntagResourceInput) String() string { +func (s UpdateDashboardPublishedVersionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UntagResourceInput) GoString() string { +func (s UpdateDashboardPublishedVersionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UntagResourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} - if s.ResourceArn == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceArn")) +func (s *UpdateDashboardPublishedVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDashboardPublishedVersionInput"} + if s.AwsAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } - if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TagKeys == nil { - invalidParams.Add(request.NewErrParamRequired("TagKeys")) + if s.DashboardId == nil { + invalidParams.Add(request.NewErrParamRequired("DashboardId")) } - if s.TagKeys != nil && len(s.TagKeys) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + if s.DashboardId != nil && len(*s.DashboardId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + } + if s.VersionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("VersionNumber")) + } + if s.VersionNumber != nil && *s.VersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -20851,20 +30565,32 @@ func (s *UntagResourceInput) Validate() error { return nil } -// SetResourceArn sets the ResourceArn field's value. -func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { - s.ResourceArn = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateDashboardPublishedVersionInput) SetAwsAccountId(v string) *UpdateDashboardPublishedVersionInput { + s.AwsAccountId = &v return s } -// SetTagKeys sets the TagKeys field's value. -func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { - s.TagKeys = v +// SetDashboardId sets the DashboardId field's value. +func (s *UpdateDashboardPublishedVersionInput) SetDashboardId(v string) *UpdateDashboardPublishedVersionInput { + s.DashboardId = &v + return s +} + +// SetVersionNumber sets the VersionNumber field's value. +func (s *UpdateDashboardPublishedVersionInput) SetVersionNumber(v int64) *UpdateDashboardPublishedVersionInput { + s.VersionNumber = &v return s } -type UntagResourceOutput struct { - _ struct{} `type:"structure"` +type UpdateDashboardPublishedVersionOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the dashboard. + DashboardArn *string `type:"string"` + + // The ID for the dashboard. + DashboardId *string `min:"1" type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -20874,101 +30600,113 @@ type UntagResourceOutput struct { } // String returns the string representation -func (s UntagResourceOutput) String() string { +func (s UpdateDashboardPublishedVersionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UntagResourceOutput) GoString() string { +func (s UpdateDashboardPublishedVersionOutput) GoString() string { return s.String() } +// SetDashboardArn sets the DashboardArn field's value. +func (s *UpdateDashboardPublishedVersionOutput) SetDashboardArn(v string) *UpdateDashboardPublishedVersionOutput { + s.DashboardArn = &v + return s +} + +// SetDashboardId sets the DashboardId field's value. +func (s *UpdateDashboardPublishedVersionOutput) SetDashboardId(v string) *UpdateDashboardPublishedVersionOutput { + s.DashboardId = &v + return s +} + // SetRequestId sets the RequestId field's value. -func (s *UntagResourceOutput) SetRequestId(v string) *UntagResourceOutput { +func (s *UpdateDashboardPublishedVersionOutput) SetRequestId(v string) *UpdateDashboardPublishedVersionOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UntagResourceOutput) SetStatus(v int64) *UntagResourceOutput { +func (s *UpdateDashboardPublishedVersionOutput) SetStatus(v int64) *UpdateDashboardPublishedVersionOutput { s.Status = &v return s } -type UpdateDashboardInput struct { +type UpdateDataSetInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the dashboard that you're updating. + // The AWS account ID. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dashboard. - // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + // Groupings of columns that work together in certain QuickSight features. Currently, + // only geospatial hierarchy is supported. + ColumnGroups []*ColumnGroup `min:"1" type:"list"` - // Options for publishing the dashboard when you create it: - // - // * AvailabilityStatus for AdHocFilteringOption - This status can be either - // ENABLED or DISABLED. When this is set to DISABLED, QuickSight disables - // the left filter pane on the published dashboard, which can be used for - // ad hoc (one-time) filtering. This option is ENABLED by default. + // The ID for the dataset that you want to update. This ID is unique per AWS + // Region for each AWS account. // - // * AvailabilityStatus for ExportToCSVOption - This status can be either - // ENABLED or DISABLED. The visual option to export data to .csv format isn't - // enabled when this is set to DISABLED. This option is ENABLED by default. + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + + // Indicates whether you want to import the data into SPICE. // - // * VisibilityState for SheetControlsOption - This visibility state can - // be either COLLAPSED or EXPANDED. The sheet controls pane is collapsed - // by default when set to true. This option is COLLAPSED by default. - DashboardPublishOptions *DashboardPublishOptions `type:"structure"` + // ImportMode is a required field + ImportMode *string `type:"string" required:"true" enum:"DataSetImportMode"` - // The display name of the dashboard. + // Configures the combination and transformation of the data from the physical + // tables. + LogicalTableMap map[string]*LogicalTable `min:"1" type:"map"` + + // The display name for the dataset. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // A structure that contains the parameters of the dashboard. - Parameters *Parameters `type:"structure"` - - // The template or analysis from which the dashboard is created. The SouceTemplate - // entity accepts the Amazon Resource Name (ARN) of the template and also references - // to replacement datasets for the placeholders set when creating the template. - // The replacement datasets need to follow the same schema as the datasets for - // which placeholders were created when creating the template. + // Declares the physical tables that are available in the underlying data sources. // - // SourceEntity is a required field - SourceEntity *DashboardSourceEntity `type:"structure" required:"true"` + // PhysicalTableMap is a required field + PhysicalTableMap map[string]*PhysicalTable `min:"1" type:"map" required:"true"` - // A description for the first version of the dashboard being created. - VersionDescription *string `min:"1" type:"string"` + // The row-level security configuration for the data you want to create. + RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` } // String returns the string representation -func (s UpdateDashboardInput) String() string { +func (s UpdateDataSetInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDashboardInput) GoString() string { +func (s UpdateDataSetInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDashboardInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDashboardInput"} +func (s *UpdateDataSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataSetInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) + if s.ColumnGroups != nil && len(s.ColumnGroups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ColumnGroups", 1)) } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) + } + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + } + if s.ImportMode == nil { + invalidParams.Add(request.NewErrParamRequired("ImportMode")) + } + if s.LogicalTableMap != nil && len(s.LogicalTableMap) < 1 { + invalidParams.Add(request.NewErrParamMinLen("LogicalTableMap", 1)) } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) @@ -20976,20 +30714,45 @@ func (s *UpdateDashboardInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.SourceEntity == nil { - invalidParams.Add(request.NewErrParamRequired("SourceEntity")) + if s.PhysicalTableMap == nil { + invalidParams.Add(request.NewErrParamRequired("PhysicalTableMap")) } - if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { - invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) + if s.PhysicalTableMap != nil && len(s.PhysicalTableMap) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PhysicalTableMap", 1)) } - if s.Parameters != nil { - if err := s.Parameters.Validate(); err != nil { - invalidParams.AddNested("Parameters", err.(request.ErrInvalidParams)) + if s.ColumnGroups != nil { + for i, v := range s.ColumnGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnGroups", i), err.(request.ErrInvalidParams)) + } } } - if s.SourceEntity != nil { - if err := s.SourceEntity.Validate(); err != nil { - invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) + if s.LogicalTableMap != nil { + for i, v := range s.LogicalTableMap { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogicalTableMap", i), err.(request.ErrInvalidParams)) + } + } + } + if s.PhysicalTableMap != nil { + for i, v := range s.PhysicalTableMap { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PhysicalTableMap", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RowLevelPermissionDataSet != nil { + if err := s.RowLevelPermissionDataSet.Validate(); err != nil { + invalidParams.AddNested("RowLevelPermissionDataSet", err.(request.ErrInvalidParams)) } } @@ -21000,160 +30763,169 @@ func (s *UpdateDashboardInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDashboardInput) SetAwsAccountId(v string) *UpdateDashboardInput { +func (s *UpdateDataSetInput) SetAwsAccountId(v string) *UpdateDataSetInput { s.AwsAccountId = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *UpdateDashboardInput) SetDashboardId(v string) *UpdateDashboardInput { - s.DashboardId = &v +// SetColumnGroups sets the ColumnGroups field's value. +func (s *UpdateDataSetInput) SetColumnGroups(v []*ColumnGroup) *UpdateDataSetInput { + s.ColumnGroups = v return s } -// SetDashboardPublishOptions sets the DashboardPublishOptions field's value. -func (s *UpdateDashboardInput) SetDashboardPublishOptions(v *DashboardPublishOptions) *UpdateDashboardInput { - s.DashboardPublishOptions = v +// SetDataSetId sets the DataSetId field's value. +func (s *UpdateDataSetInput) SetDataSetId(v string) *UpdateDataSetInput { + s.DataSetId = &v return s } -// SetName sets the Name field's value. -func (s *UpdateDashboardInput) SetName(v string) *UpdateDashboardInput { - s.Name = &v +// SetImportMode sets the ImportMode field's value. +func (s *UpdateDataSetInput) SetImportMode(v string) *UpdateDataSetInput { + s.ImportMode = &v return s } -// SetParameters sets the Parameters field's value. -func (s *UpdateDashboardInput) SetParameters(v *Parameters) *UpdateDashboardInput { - s.Parameters = v +// SetLogicalTableMap sets the LogicalTableMap field's value. +func (s *UpdateDataSetInput) SetLogicalTableMap(v map[string]*LogicalTable) *UpdateDataSetInput { + s.LogicalTableMap = v return s } -// SetSourceEntity sets the SourceEntity field's value. -func (s *UpdateDashboardInput) SetSourceEntity(v *DashboardSourceEntity) *UpdateDashboardInput { - s.SourceEntity = v +// SetName sets the Name field's value. +func (s *UpdateDataSetInput) SetName(v string) *UpdateDataSetInput { + s.Name = &v return s } -// SetVersionDescription sets the VersionDescription field's value. -func (s *UpdateDashboardInput) SetVersionDescription(v string) *UpdateDashboardInput { - s.VersionDescription = &v +// SetPhysicalTableMap sets the PhysicalTableMap field's value. +func (s *UpdateDataSetInput) SetPhysicalTableMap(v map[string]*PhysicalTable) *UpdateDataSetInput { + s.PhysicalTableMap = v return s } -type UpdateDashboardOutput struct { +// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. +func (s *UpdateDataSetInput) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *UpdateDataSetInput { + s.RowLevelPermissionDataSet = v + return s +} + +type UpdateDataSetOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the resource. + // The Amazon Resource Name (ARN) of the dataset. Arn *string `type:"string"` - // The creation status of the request. - CreationStatus *string `type:"string" enum:"ResourceStatus"` + // The ID for the dataset that you want to create. This ID is unique per AWS + // Region for each AWS account. + DataSetId *string `type:"string"` - // The ID for the dashboard. - DashboardId *string `min:"1" type:"string"` + // The ARN for the ingestion, which is triggered as a result of dataset creation + // if the import mode is SPICE. + IngestionArn *string `type:"string"` + + // The ID of the ingestion, which is triggered as a result of dataset creation + // if the import mode is SPICE. + IngestionId *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. - Status *int64 `type:"integer"` - - // The ARN of the dashboard, including the version number. - VersionArn *string `type:"string"` + Status *int64 `location:"statusCode" type:"integer"` } // String returns the string representation -func (s UpdateDashboardOutput) String() string { +func (s UpdateDataSetOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDashboardOutput) GoString() string { +func (s UpdateDataSetOutput) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *UpdateDashboardOutput) SetArn(v string) *UpdateDashboardOutput { +func (s *UpdateDataSetOutput) SetArn(v string) *UpdateDataSetOutput { s.Arn = &v return s } -// SetCreationStatus sets the CreationStatus field's value. -func (s *UpdateDashboardOutput) SetCreationStatus(v string) *UpdateDashboardOutput { - s.CreationStatus = &v +// SetDataSetId sets the DataSetId field's value. +func (s *UpdateDataSetOutput) SetDataSetId(v string) *UpdateDataSetOutput { + s.DataSetId = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *UpdateDashboardOutput) SetDashboardId(v string) *UpdateDashboardOutput { - s.DashboardId = &v +// SetIngestionArn sets the IngestionArn field's value. +func (s *UpdateDataSetOutput) SetIngestionArn(v string) *UpdateDataSetOutput { + s.IngestionArn = &v + return s +} + +// SetIngestionId sets the IngestionId field's value. +func (s *UpdateDataSetOutput) SetIngestionId(v string) *UpdateDataSetOutput { + s.IngestionId = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateDashboardOutput) SetRequestId(v string) *UpdateDashboardOutput { +func (s *UpdateDataSetOutput) SetRequestId(v string) *UpdateDataSetOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDashboardOutput) SetStatus(v int64) *UpdateDashboardOutput { +func (s *UpdateDataSetOutput) SetStatus(v int64) *UpdateDataSetOutput { s.Status = &v return s } -// SetVersionArn sets the VersionArn field's value. -func (s *UpdateDashboardOutput) SetVersionArn(v string) *UpdateDashboardOutput { - s.VersionArn = &v - return s -} - -type UpdateDashboardPermissionsInput struct { +type UpdateDataSetPermissionsInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the dashboard whose permissions you're - // updating. + // The AWS account ID. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dashboard. + // The ID for the dataset whose permissions you want to update. This ID is unique + // per AWS Region for each AWS account. // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + // DataSetId is a required field + DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` - // The permissions that you want to grant on this resource. + // The resource permissions that you want to grant to the dataset. GrantPermissions []*ResourcePermission `min:"1" type:"list"` - // The permissions that you want to revoke from this resource. + // The resource permissions that you want to revoke from the dataset. RevokePermissions []*ResourcePermission `min:"1" type:"list"` } // String returns the string representation -func (s UpdateDashboardPermissionsInput) String() string { +func (s UpdateDataSetPermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDashboardPermissionsInput) GoString() string { +func (s UpdateDataSetPermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDashboardPermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDashboardPermissionsInput"} +func (s *UpdateDataSetPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataSetPermissionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) + if s.DataSetId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSetId")) } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + if s.DataSetId != nil && len(*s.DataSetId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) } if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { invalidParams.Add(request.NewErrParamMinLen("GrantPermissions", 1)) @@ -21189,40 +30961,38 @@ func (s *UpdateDashboardPermissionsInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDashboardPermissionsInput) SetAwsAccountId(v string) *UpdateDashboardPermissionsInput { +func (s *UpdateDataSetPermissionsInput) SetAwsAccountId(v string) *UpdateDataSetPermissionsInput { s.AwsAccountId = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *UpdateDashboardPermissionsInput) SetDashboardId(v string) *UpdateDashboardPermissionsInput { - s.DashboardId = &v +// SetDataSetId sets the DataSetId field's value. +func (s *UpdateDataSetPermissionsInput) SetDataSetId(v string) *UpdateDataSetPermissionsInput { + s.DataSetId = &v return s } // SetGrantPermissions sets the GrantPermissions field's value. -func (s *UpdateDashboardPermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateDashboardPermissionsInput { +func (s *UpdateDataSetPermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateDataSetPermissionsInput { s.GrantPermissions = v return s } // SetRevokePermissions sets the RevokePermissions field's value. -func (s *UpdateDashboardPermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateDashboardPermissionsInput { +func (s *UpdateDataSetPermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateDataSetPermissionsInput { s.RevokePermissions = v return s } -type UpdateDashboardPermissionsOutput struct { +type UpdateDataSetPermissionsOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dashboard. - DashboardArn *string `type:"string"` - - // The ID for the dashboard. - DashboardId *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the dataset. + DataSetArn *string `type:"string"` - // Information about the permissions on the dashboard. - Permissions []*ResourcePermission `min:"1" type:"list"` + // The ID for the dataset whose permissions you want to update. This ID is unique + // per AWS Region for each AWS account. + DataSetId *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -21232,94 +31002,119 @@ type UpdateDashboardPermissionsOutput struct { } // String returns the string representation -func (s UpdateDashboardPermissionsOutput) String() string { +func (s UpdateDataSetPermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDashboardPermissionsOutput) GoString() string { +func (s UpdateDataSetPermissionsOutput) GoString() string { return s.String() } -// SetDashboardArn sets the DashboardArn field's value. -func (s *UpdateDashboardPermissionsOutput) SetDashboardArn(v string) *UpdateDashboardPermissionsOutput { - s.DashboardArn = &v - return s -} - -// SetDashboardId sets the DashboardId field's value. -func (s *UpdateDashboardPermissionsOutput) SetDashboardId(v string) *UpdateDashboardPermissionsOutput { - s.DashboardId = &v +// SetDataSetArn sets the DataSetArn field's value. +func (s *UpdateDataSetPermissionsOutput) SetDataSetArn(v string) *UpdateDataSetPermissionsOutput { + s.DataSetArn = &v return s } -// SetPermissions sets the Permissions field's value. -func (s *UpdateDashboardPermissionsOutput) SetPermissions(v []*ResourcePermission) *UpdateDashboardPermissionsOutput { - s.Permissions = v +// SetDataSetId sets the DataSetId field's value. +func (s *UpdateDataSetPermissionsOutput) SetDataSetId(v string) *UpdateDataSetPermissionsOutput { + s.DataSetId = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateDashboardPermissionsOutput) SetRequestId(v string) *UpdateDashboardPermissionsOutput { +func (s *UpdateDataSetPermissionsOutput) SetRequestId(v string) *UpdateDataSetPermissionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDashboardPermissionsOutput) SetStatus(v int64) *UpdateDashboardPermissionsOutput { +func (s *UpdateDataSetPermissionsOutput) SetStatus(v int64) *UpdateDataSetPermissionsOutput { s.Status = &v return s } -type UpdateDashboardPublishedVersionInput struct { +type UpdateDataSourceInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the dashboard that you're updating. + // The AWS account ID. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dashboard. + // The credentials that QuickSight that uses to connect to your underlying source. + // Currently, only credentials based on user name and password are supported. + Credentials *DataSourceCredentials `type:"structure" sensitive:"true"` + + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. // - // DashboardId is a required field - DashboardId *string `location:"uri" locationName:"DashboardId" min:"1" type:"string" required:"true"` + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` - // The version number of the dashboard. + // The parameters that QuickSight uses to connect to your underlying source. + DataSourceParameters *DataSourceParameters `type:"structure"` + + // A display name for the data source. // - // VersionNumber is a required field - VersionNumber *int64 `location:"uri" locationName:"VersionNumber" min:"1" type:"long" required:"true"` + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Secure Socket Layer (SSL) properties that apply when QuickSight connects + // to your underlying source. + SslProperties *SslProperties `type:"structure"` + + // Use this parameter only when you want QuickSight to use a VPC connection + // when connecting to your underlying source. + VpcConnectionProperties *VpcConnectionProperties `type:"structure"` } // String returns the string representation -func (s UpdateDashboardPublishedVersionInput) String() string { +func (s UpdateDataSourceInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDashboardPublishedVersionInput) GoString() string { +func (s UpdateDataSourceInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDashboardPublishedVersionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDashboardPublishedVersionInput"} +func (s *UpdateDataSourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourceInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DashboardId == nil { - invalidParams.Add(request.NewErrParamRequired("DashboardId")) + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } - if s.DashboardId != nil && len(*s.DashboardId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DashboardId", 1)) + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } - if s.VersionNumber == nil { - invalidParams.Add(request.NewErrParamRequired("VersionNumber")) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) } - if s.VersionNumber != nil && *s.VersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("VersionNumber", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Credentials != nil { + if err := s.Credentials.Validate(); err != nil { + invalidParams.AddNested("Credentials", err.(request.ErrInvalidParams)) + } + } + if s.DataSourceParameters != nil { + if err := s.DataSourceParameters.Validate(); err != nil { + invalidParams.AddNested("DataSourceParameters", err.(request.ErrInvalidParams)) + } + } + if s.VpcConnectionProperties != nil { + if err := s.VpcConnectionProperties.Validate(); err != nil { + invalidParams.AddNested("VpcConnectionProperties", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -21329,74 +31124,108 @@ func (s *UpdateDashboardPublishedVersionInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDashboardPublishedVersionInput) SetAwsAccountId(v string) *UpdateDashboardPublishedVersionInput { +func (s *UpdateDataSourceInput) SetAwsAccountId(v string) *UpdateDataSourceInput { s.AwsAccountId = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *UpdateDashboardPublishedVersionInput) SetDashboardId(v string) *UpdateDashboardPublishedVersionInput { - s.DashboardId = &v +// SetCredentials sets the Credentials field's value. +func (s *UpdateDataSourceInput) SetCredentials(v *DataSourceCredentials) *UpdateDataSourceInput { + s.Credentials = v return s } -// SetVersionNumber sets the VersionNumber field's value. -func (s *UpdateDashboardPublishedVersionInput) SetVersionNumber(v int64) *UpdateDashboardPublishedVersionInput { - s.VersionNumber = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *UpdateDataSourceInput) SetDataSourceId(v string) *UpdateDataSourceInput { + s.DataSourceId = &v return s } -type UpdateDashboardPublishedVersionOutput struct { +// SetDataSourceParameters sets the DataSourceParameters field's value. +func (s *UpdateDataSourceInput) SetDataSourceParameters(v *DataSourceParameters) *UpdateDataSourceInput { + s.DataSourceParameters = v + return s +} + +// SetName sets the Name field's value. +func (s *UpdateDataSourceInput) SetName(v string) *UpdateDataSourceInput { + s.Name = &v + return s +} + +// SetSslProperties sets the SslProperties field's value. +func (s *UpdateDataSourceInput) SetSslProperties(v *SslProperties) *UpdateDataSourceInput { + s.SslProperties = v + return s +} + +// SetVpcConnectionProperties sets the VpcConnectionProperties field's value. +func (s *UpdateDataSourceInput) SetVpcConnectionProperties(v *VpcConnectionProperties) *UpdateDataSourceInput { + s.VpcConnectionProperties = v + return s +} + +type UpdateDataSourceOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dashboard. - DashboardArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the data source. + Arn *string `type:"string"` - // The ID for the dashboard. - DashboardId *string `min:"1" type:"string"` + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + DataSourceId *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The update status of the data source's last update. + UpdateStatus *string `type:"string" enum:"ResourceStatus"` } // String returns the string representation -func (s UpdateDashboardPublishedVersionOutput) String() string { +func (s UpdateDataSourceOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDashboardPublishedVersionOutput) GoString() string { +func (s UpdateDataSourceOutput) GoString() string { return s.String() } -// SetDashboardArn sets the DashboardArn field's value. -func (s *UpdateDashboardPublishedVersionOutput) SetDashboardArn(v string) *UpdateDashboardPublishedVersionOutput { - s.DashboardArn = &v +// SetArn sets the Arn field's value. +func (s *UpdateDataSourceOutput) SetArn(v string) *UpdateDataSourceOutput { + s.Arn = &v return s } -// SetDashboardId sets the DashboardId field's value. -func (s *UpdateDashboardPublishedVersionOutput) SetDashboardId(v string) *UpdateDashboardPublishedVersionOutput { - s.DashboardId = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *UpdateDataSourceOutput) SetDataSourceId(v string) *UpdateDataSourceOutput { + s.DataSourceId = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateDashboardPublishedVersionOutput) SetRequestId(v string) *UpdateDashboardPublishedVersionOutput { +func (s *UpdateDataSourceOutput) SetRequestId(v string) *UpdateDataSourceOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDashboardPublishedVersionOutput) SetStatus(v int64) *UpdateDashboardPublishedVersionOutput { +func (s *UpdateDataSourceOutput) SetStatus(v int64) *UpdateDataSourceOutput { s.Status = &v return s } -type UpdateDataSetInput struct { +// SetUpdateStatus sets the UpdateStatus field's value. +func (s *UpdateDataSourceOutput) SetUpdateStatus(v string) *UpdateDataSourceOutput { + s.UpdateStatus = &v + return s +} + +type UpdateDataSourcePermissionsInput struct { _ struct{} `type:"structure"` // The AWS account ID. @@ -21404,120 +31233,70 @@ type UpdateDataSetInput struct { // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // Groupings of columns that work together in certain QuickSight features. Currently, - // only geospatial hierarchy is supported. - ColumnGroups []*ColumnGroup `min:"1" type:"list"` - - // The ID for the dataset that you want to update. This ID is unique per AWS - // Region for each AWS account. - // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` - - // Indicates whether you want to import the data into SPICE. - // - // ImportMode is a required field - ImportMode *string `type:"string" required:"true" enum:"DataSetImportMode"` - - // Configures the combination and transformation of the data from the physical - // tables. - LogicalTableMap map[string]*LogicalTable `min:"1" type:"map"` - - // The display name for the dataset. + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` + // DataSourceId is a required field + DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` - // Declares the physical tables that are available in the underlying data sources. - // - // PhysicalTableMap is a required field - PhysicalTableMap map[string]*PhysicalTable `min:"1" type:"map" required:"true"` + // A list of resource permissions that you want to grant on the data source. + GrantPermissions []*ResourcePermission `min:"1" type:"list"` - // The row-level security configuration for the data you want to create. - RowLevelPermissionDataSet *RowLevelPermissionDataSet `type:"structure"` + // A list of resource permissions that you want to revoke on the data source. + RevokePermissions []*ResourcePermission `min:"1" type:"list"` } // String returns the string representation -func (s UpdateDataSetInput) String() string { +func (s UpdateDataSourcePermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSetInput) GoString() string { +func (s UpdateDataSourcePermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataSetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataSetInput"} +func (s *UpdateDataSourcePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourcePermissionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.ColumnGroups != nil && len(s.ColumnGroups) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ColumnGroups", 1)) - } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) - } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) - } - if s.ImportMode == nil { - invalidParams.Add(request.NewErrParamRequired("ImportMode")) - } - if s.LogicalTableMap != nil && len(s.LogicalTableMap) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogicalTableMap", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + if s.DataSourceId == nil { + invalidParams.Add(request.NewErrParamRequired("DataSourceId")) } - if s.PhysicalTableMap == nil { - invalidParams.Add(request.NewErrParamRequired("PhysicalTableMap")) + if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) } - if s.PhysicalTableMap != nil && len(s.PhysicalTableMap) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PhysicalTableMap", 1)) + if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrantPermissions", 1)) } - if s.ColumnGroups != nil { - for i, v := range s.ColumnGroups { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ColumnGroups", i), err.(request.ErrInvalidParams)) - } - } + if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RevokePermissions", 1)) } - if s.LogicalTableMap != nil { - for i, v := range s.LogicalTableMap { + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogicalTableMap", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(request.ErrInvalidParams)) } } } - if s.PhysicalTableMap != nil { - for i, v := range s.PhysicalTableMap { + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { if v == nil { continue } if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PhysicalTableMap", i), err.(request.ErrInvalidParams)) + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(request.ErrInvalidParams)) } } } - if s.RowLevelPermissionDataSet != nil { - if err := s.RowLevelPermissionDataSet.Validate(); err != nil { - invalidParams.AddNested("RowLevelPermissionDataSet", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -21526,70 +31305,38 @@ func (s *UpdateDataSetInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDataSetInput) SetAwsAccountId(v string) *UpdateDataSetInput { +func (s *UpdateDataSourcePermissionsInput) SetAwsAccountId(v string) *UpdateDataSourcePermissionsInput { s.AwsAccountId = &v return s } -// SetColumnGroups sets the ColumnGroups field's value. -func (s *UpdateDataSetInput) SetColumnGroups(v []*ColumnGroup) *UpdateDataSetInput { - s.ColumnGroups = v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *UpdateDataSetInput) SetDataSetId(v string) *UpdateDataSetInput { - s.DataSetId = &v - return s -} - -// SetImportMode sets the ImportMode field's value. -func (s *UpdateDataSetInput) SetImportMode(v string) *UpdateDataSetInput { - s.ImportMode = &v - return s -} - -// SetLogicalTableMap sets the LogicalTableMap field's value. -func (s *UpdateDataSetInput) SetLogicalTableMap(v map[string]*LogicalTable) *UpdateDataSetInput { - s.LogicalTableMap = v - return s -} - -// SetName sets the Name field's value. -func (s *UpdateDataSetInput) SetName(v string) *UpdateDataSetInput { - s.Name = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *UpdateDataSourcePermissionsInput) SetDataSourceId(v string) *UpdateDataSourcePermissionsInput { + s.DataSourceId = &v return s } -// SetPhysicalTableMap sets the PhysicalTableMap field's value. -func (s *UpdateDataSetInput) SetPhysicalTableMap(v map[string]*PhysicalTable) *UpdateDataSetInput { - s.PhysicalTableMap = v +// SetGrantPermissions sets the GrantPermissions field's value. +func (s *UpdateDataSourcePermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateDataSourcePermissionsInput { + s.GrantPermissions = v return s } -// SetRowLevelPermissionDataSet sets the RowLevelPermissionDataSet field's value. -func (s *UpdateDataSetInput) SetRowLevelPermissionDataSet(v *RowLevelPermissionDataSet) *UpdateDataSetInput { - s.RowLevelPermissionDataSet = v +// SetRevokePermissions sets the RevokePermissions field's value. +func (s *UpdateDataSourcePermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateDataSourcePermissionsInput { + s.RevokePermissions = v return s } -type UpdateDataSetOutput struct { +type UpdateDataSourcePermissionsOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dataset. - Arn *string `type:"string"` - - // The ID for the dataset that you want to create. This ID is unique per AWS - // Region for each AWS account. - DataSetId *string `type:"string"` - - // The ARN for the ingestion, which is triggered as a result of dataset creation - // if the import mode is SPICE. - IngestionArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the data source. + DataSourceArn *string `type:"string"` - // The ID of the ingestion, which is triggered as a result of dataset creation - // if the import mode is SPICE. - IngestionId *string `type:"string"` + // The ID of the data source. This ID is unique per AWS Region for each AWS + // account. + DataSourceId *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -21599,122 +31346,95 @@ type UpdateDataSetOutput struct { } // String returns the string representation -func (s UpdateDataSetOutput) String() string { +func (s UpdateDataSourcePermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSetOutput) GoString() string { +func (s UpdateDataSourcePermissionsOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *UpdateDataSetOutput) SetArn(v string) *UpdateDataSetOutput { - s.Arn = &v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *UpdateDataSetOutput) SetDataSetId(v string) *UpdateDataSetOutput { - s.DataSetId = &v - return s -} - -// SetIngestionArn sets the IngestionArn field's value. -func (s *UpdateDataSetOutput) SetIngestionArn(v string) *UpdateDataSetOutput { - s.IngestionArn = &v +// SetDataSourceArn sets the DataSourceArn field's value. +func (s *UpdateDataSourcePermissionsOutput) SetDataSourceArn(v string) *UpdateDataSourcePermissionsOutput { + s.DataSourceArn = &v return s } -// SetIngestionId sets the IngestionId field's value. -func (s *UpdateDataSetOutput) SetIngestionId(v string) *UpdateDataSetOutput { - s.IngestionId = &v +// SetDataSourceId sets the DataSourceId field's value. +func (s *UpdateDataSourcePermissionsOutput) SetDataSourceId(v string) *UpdateDataSourcePermissionsOutput { + s.DataSourceId = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateDataSetOutput) SetRequestId(v string) *UpdateDataSetOutput { +func (s *UpdateDataSourcePermissionsOutput) SetRequestId(v string) *UpdateDataSourcePermissionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDataSetOutput) SetStatus(v int64) *UpdateDataSetOutput { +func (s *UpdateDataSourcePermissionsOutput) SetStatus(v int64) *UpdateDataSourcePermissionsOutput { s.Status = &v return s } -type UpdateDataSetPermissionsInput struct { +type UpdateGroupInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The ID for the AWS account that the group is in. Currently, you use the ID + // for the AWS account that contains your Amazon QuickSight account. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the dataset whose permissions you want to update. This ID is unique - // per AWS Region for each AWS account. - // - // DataSetId is a required field - DataSetId *string `location:"uri" locationName:"DataSetId" type:"string" required:"true"` + // The description for the group that you want to update. + Description *string `min:"1" type:"string"` - // The resource permissions that you want to grant to the dataset. - GrantPermissions []*ResourcePermission `min:"1" type:"list"` + // The name of the group that you want to update. + // + // GroupName is a required field + GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` - // The resource permissions that you want to revoke from the dataset. - RevokePermissions []*ResourcePermission `min:"1" type:"list"` + // The namespace. Currently, you should set this to default. + // + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` } // String returns the string representation -func (s UpdateDataSetPermissionsInput) String() string { +func (s UpdateGroupInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSetPermissionsInput) GoString() string { +func (s UpdateGroupInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataSetPermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataSetPermissionsInput"} +func (s *UpdateGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGroupInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSetId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSetId")) - } - if s.DataSetId != nil && len(*s.DataSetId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSetId", 1)) + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) } - if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantPermissions", 1)) + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) } - if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RevokePermissions", 1)) + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) } - if s.GrantPermissions != nil { - for i, v := range s.GrantPermissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(request.ErrInvalidParams)) - } - } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) } - if s.RevokePermissions != nil { - for i, v := range s.RevokePermissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(request.ErrInvalidParams)) - } - } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -21724,38 +31444,34 @@ func (s *UpdateDataSetPermissionsInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDataSetPermissionsInput) SetAwsAccountId(v string) *UpdateDataSetPermissionsInput { +func (s *UpdateGroupInput) SetAwsAccountId(v string) *UpdateGroupInput { s.AwsAccountId = &v return s } -// SetDataSetId sets the DataSetId field's value. -func (s *UpdateDataSetPermissionsInput) SetDataSetId(v string) *UpdateDataSetPermissionsInput { - s.DataSetId = &v +// SetDescription sets the Description field's value. +func (s *UpdateGroupInput) SetDescription(v string) *UpdateGroupInput { + s.Description = &v return s } -// SetGrantPermissions sets the GrantPermissions field's value. -func (s *UpdateDataSetPermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateDataSetPermissionsInput { - s.GrantPermissions = v +// SetGroupName sets the GroupName field's value. +func (s *UpdateGroupInput) SetGroupName(v string) *UpdateGroupInput { + s.GroupName = &v return s } -// SetRevokePermissions sets the RevokePermissions field's value. -func (s *UpdateDataSetPermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateDataSetPermissionsInput { - s.RevokePermissions = v +// SetNamespace sets the Namespace field's value. +func (s *UpdateGroupInput) SetNamespace(v string) *UpdateGroupInput { + s.Namespace = &v return s } -type UpdateDataSetPermissionsOutput struct { +type UpdateGroupOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the dataset. - DataSetArn *string `type:"string"` - - // The ID for the dataset whose permissions you want to update. This ID is unique - // per AWS Region for each AWS account. - DataSetId *string `type:"string"` + // The name of the group. + Group *Group `type:"structure"` // The AWS request ID for this operation. RequestId *string `type:"string"` @@ -21765,119 +31481,101 @@ type UpdateDataSetPermissionsOutput struct { } // String returns the string representation -func (s UpdateDataSetPermissionsOutput) String() string { +func (s UpdateGroupOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSetPermissionsOutput) GoString() string { +func (s UpdateGroupOutput) GoString() string { return s.String() } -// SetDataSetArn sets the DataSetArn field's value. -func (s *UpdateDataSetPermissionsOutput) SetDataSetArn(v string) *UpdateDataSetPermissionsOutput { - s.DataSetArn = &v - return s -} - -// SetDataSetId sets the DataSetId field's value. -func (s *UpdateDataSetPermissionsOutput) SetDataSetId(v string) *UpdateDataSetPermissionsOutput { - s.DataSetId = &v +// SetGroup sets the Group field's value. +func (s *UpdateGroupOutput) SetGroup(v *Group) *UpdateGroupOutput { + s.Group = v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateDataSetPermissionsOutput) SetRequestId(v string) *UpdateDataSetPermissionsOutput { +func (s *UpdateGroupOutput) SetRequestId(v string) *UpdateGroupOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDataSetPermissionsOutput) SetStatus(v int64) *UpdateDataSetPermissionsOutput { +func (s *UpdateGroupOutput) SetStatus(v int64) *UpdateGroupOutput { s.Status = &v return s } -type UpdateDataSourceInput struct { +type UpdateIAMPolicyAssignmentInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The name of the assignment. This name must be unique within an AWS account. // - // AwsAccountId is a required field - AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // AssignmentName is a required field + AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` - // The credentials that QuickSight that uses to connect to your underlying source. - // Currently, only credentials based on user name and password are supported. - Credentials *DataSourceCredentials `type:"structure" sensitive:"true"` + // The status of the assignment. Possible values are as follows: + // + // * ENABLED - Anything specified in this assignment is used when creating + // the data source. + // + // * DISABLED - This assignment isn't used when creating the data source. + // + // * DRAFT - This assignment is an unfinished draft and isn't used when creating + // the data source. + AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. + // The ID of the AWS account that contains the IAM policy assignment. // - // DataSourceId is a required field - DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` + // AwsAccountId is a required field + AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The parameters that QuickSight uses to connect to your underlying source. - DataSourceParameters *DataSourceParameters `type:"structure"` + // The QuickSight users, groups, or both that you want to assign the policy + // to. + Identities map[string][]*string `type:"map"` - // A display name for the data source. + // The namespace of the assignment. // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` - - // Secure Socket Layer (SSL) properties that apply when QuickSight connects - // to your underlying source. - SslProperties *SslProperties `type:"structure"` + // Namespace is a required field + Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - // Use this parameter only when you want QuickSight to use a VPC connection - // when connecting to your underlying source. - VpcConnectionProperties *VpcConnectionProperties `type:"structure"` + // The ARN for the IAM policy to apply to the QuickSight users and groups specified + // in this assignment. + PolicyArn *string `type:"string"` } // String returns the string representation -func (s UpdateDataSourceInput) String() string { +func (s UpdateIAMPolicyAssignmentInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSourceInput) GoString() string { +func (s UpdateIAMPolicyAssignmentInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataSourceInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourceInput"} +func (s *UpdateIAMPolicyAssignmentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateIAMPolicyAssignmentInput"} + if s.AssignmentName == nil { + invalidParams.Add(request.NewErrParamRequired("AssignmentName")) + } + if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) + } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSourceId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceId")) - } - if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Name", 1)) - } - if s.Credentials != nil { - if err := s.Credentials.Validate(); err != nil { - invalidParams.AddNested("Credentials", err.(request.ErrInvalidParams)) - } - } - if s.DataSourceParameters != nil { - if err := s.DataSourceParameters.Validate(); err != nil { - invalidParams.AddNested("DataSourceParameters", err.(request.ErrInvalidParams)) - } + if s.Namespace == nil { + invalidParams.Add(request.NewErrParamRequired("Namespace")) } - if s.VpcConnectionProperties != nil { - if err := s.VpcConnectionProperties.Validate(); err != nil { - invalidParams.AddNested("VpcConnectionProperties", err.(request.ErrInvalidParams)) - } + if s.Namespace != nil && len(*s.Namespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) } if invalidParams.Len() > 0 { @@ -21886,179 +31584,191 @@ func (s *UpdateDataSourceInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDataSourceInput) SetAwsAccountId(v string) *UpdateDataSourceInput { - s.AwsAccountId = &v - return s -} - -// SetCredentials sets the Credentials field's value. -func (s *UpdateDataSourceInput) SetCredentials(v *DataSourceCredentials) *UpdateDataSourceInput { - s.Credentials = v +// SetAssignmentName sets the AssignmentName field's value. +func (s *UpdateIAMPolicyAssignmentInput) SetAssignmentName(v string) *UpdateIAMPolicyAssignmentInput { + s.AssignmentName = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *UpdateDataSourceInput) SetDataSourceId(v string) *UpdateDataSourceInput { - s.DataSourceId = &v +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *UpdateIAMPolicyAssignmentInput) SetAssignmentStatus(v string) *UpdateIAMPolicyAssignmentInput { + s.AssignmentStatus = &v return s } -// SetDataSourceParameters sets the DataSourceParameters field's value. -func (s *UpdateDataSourceInput) SetDataSourceParameters(v *DataSourceParameters) *UpdateDataSourceInput { - s.DataSourceParameters = v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateIAMPolicyAssignmentInput) SetAwsAccountId(v string) *UpdateIAMPolicyAssignmentInput { + s.AwsAccountId = &v return s } -// SetName sets the Name field's value. -func (s *UpdateDataSourceInput) SetName(v string) *UpdateDataSourceInput { - s.Name = &v +// SetIdentities sets the Identities field's value. +func (s *UpdateIAMPolicyAssignmentInput) SetIdentities(v map[string][]*string) *UpdateIAMPolicyAssignmentInput { + s.Identities = v return s } -// SetSslProperties sets the SslProperties field's value. -func (s *UpdateDataSourceInput) SetSslProperties(v *SslProperties) *UpdateDataSourceInput { - s.SslProperties = v +// SetNamespace sets the Namespace field's value. +func (s *UpdateIAMPolicyAssignmentInput) SetNamespace(v string) *UpdateIAMPolicyAssignmentInput { + s.Namespace = &v return s } -// SetVpcConnectionProperties sets the VpcConnectionProperties field's value. -func (s *UpdateDataSourceInput) SetVpcConnectionProperties(v *VpcConnectionProperties) *UpdateDataSourceInput { - s.VpcConnectionProperties = v +// SetPolicyArn sets the PolicyArn field's value. +func (s *UpdateIAMPolicyAssignmentInput) SetPolicyArn(v string) *UpdateIAMPolicyAssignmentInput { + s.PolicyArn = &v return s } -type UpdateDataSourceOutput struct { +type UpdateIAMPolicyAssignmentOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the data source. - Arn *string `type:"string"` + // The ID of the assignment. + AssignmentId *string `type:"string"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - DataSourceId *string `type:"string"` + // The name of the assignment. + AssignmentName *string `min:"1" type:"string"` + + // The status of the assignment. Possible values are as follows: + // + // * ENABLED - Anything specified in this assignment is used when creating + // the data source. + // + // * DISABLED - This assignment isn't used when creating the data source. + // + // * DRAFT - This assignment is an unfinished draft and isn't used when creating + // the data source. + AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` + + // The QuickSight users, groups, or both that the IAM policy is assigned to. + Identities map[string][]*string `type:"map"` + + // The ARN for the IAM policy applied to the QuickSight users and groups specified + // in this assignment. + PolicyArn *string `type:"string"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - - // The update status of the data source's last update. - UpdateStatus *string `type:"string" enum:"ResourceStatus"` } // String returns the string representation -func (s UpdateDataSourceOutput) String() string { +func (s UpdateIAMPolicyAssignmentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSourceOutput) GoString() string { +func (s UpdateIAMPolicyAssignmentOutput) GoString() string { return s.String() } -// SetArn sets the Arn field's value. -func (s *UpdateDataSourceOutput) SetArn(v string) *UpdateDataSourceOutput { - s.Arn = &v +// SetAssignmentId sets the AssignmentId field's value. +func (s *UpdateIAMPolicyAssignmentOutput) SetAssignmentId(v string) *UpdateIAMPolicyAssignmentOutput { + s.AssignmentId = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *UpdateDataSourceOutput) SetDataSourceId(v string) *UpdateDataSourceOutput { - s.DataSourceId = &v +// SetAssignmentName sets the AssignmentName field's value. +func (s *UpdateIAMPolicyAssignmentOutput) SetAssignmentName(v string) *UpdateIAMPolicyAssignmentOutput { + s.AssignmentName = &v + return s +} + +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *UpdateIAMPolicyAssignmentOutput) SetAssignmentStatus(v string) *UpdateIAMPolicyAssignmentOutput { + s.AssignmentStatus = &v + return s +} + +// SetIdentities sets the Identities field's value. +func (s *UpdateIAMPolicyAssignmentOutput) SetIdentities(v map[string][]*string) *UpdateIAMPolicyAssignmentOutput { + s.Identities = v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *UpdateIAMPolicyAssignmentOutput) SetPolicyArn(v string) *UpdateIAMPolicyAssignmentOutput { + s.PolicyArn = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateDataSourceOutput) SetRequestId(v string) *UpdateDataSourceOutput { +func (s *UpdateIAMPolicyAssignmentOutput) SetRequestId(v string) *UpdateIAMPolicyAssignmentOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDataSourceOutput) SetStatus(v int64) *UpdateDataSourceOutput { +func (s *UpdateIAMPolicyAssignmentOutput) SetStatus(v int64) *UpdateIAMPolicyAssignmentOutput { s.Status = &v return s } -// SetUpdateStatus sets the UpdateStatus field's value. -func (s *UpdateDataSourceOutput) SetUpdateStatus(v string) *UpdateDataSourceOutput { - s.UpdateStatus = &v - return s -} - -type UpdateDataSourcePermissionsInput struct { +type UpdateTemplateAliasInput struct { _ struct{} `type:"structure"` - // The AWS account ID. + // The alias of the template that you want to update. If you name a specific + // alias, you update the version that the alias points to. You can specify the + // latest version of the template by providing the keyword $LATEST in the AliasName + // parameter. The keyword $PUBLISHED doesn't apply to templates. + // + // AliasName is a required field + AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` + + // The ID of the AWS account that contains the template alias that you're updating. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. + // The ID for the template. // - // DataSourceId is a required field - DataSourceId *string `location:"uri" locationName:"DataSourceId" type:"string" required:"true"` - - // A list of resource permissions that you want to grant on the data source. - GrantPermissions []*ResourcePermission `min:"1" type:"list"` + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` - // A list of resource permissions that you want to revoke on the data source. - RevokePermissions []*ResourcePermission `min:"1" type:"list"` + // The version number of the template. + // + // TemplateVersionNumber is a required field + TemplateVersionNumber *int64 `min:"1" type:"long" required:"true"` } // String returns the string representation -func (s UpdateDataSourcePermissionsInput) String() string { +func (s UpdateTemplateAliasInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSourcePermissionsInput) GoString() string { +func (s UpdateTemplateAliasInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateDataSourcePermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateDataSourcePermissionsInput"} +func (s *UpdateTemplateAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTemplateAliasInput"} + if s.AliasName == nil { + invalidParams.Add(request.NewErrParamRequired("AliasName")) + } + if s.AliasName != nil && len(*s.AliasName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasName", 1)) + } if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.DataSourceId == nil { - invalidParams.Add(request.NewErrParamRequired("DataSourceId")) - } - if s.DataSourceId != nil && len(*s.DataSourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DataSourceId", 1)) - } - if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantPermissions", 1)) + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RevokePermissions", 1)) + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) } - if s.GrantPermissions != nil { - for i, v := range s.GrantPermissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(request.ErrInvalidParams)) - } - } + if s.TemplateVersionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateVersionNumber")) } - if s.RevokePermissions != nil { - for i, v := range s.RevokePermissions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(request.ErrInvalidParams)) - } - } + if s.TemplateVersionNumber != nil && *s.TemplateVersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("TemplateVersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -22067,137 +31777,147 @@ func (s *UpdateDataSourcePermissionsInput) Validate() error { return nil } -// SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateDataSourcePermissionsInput) SetAwsAccountId(v string) *UpdateDataSourcePermissionsInput { - s.AwsAccountId = &v +// SetAliasName sets the AliasName field's value. +func (s *UpdateTemplateAliasInput) SetAliasName(v string) *UpdateTemplateAliasInput { + s.AliasName = &v return s } -// SetDataSourceId sets the DataSourceId field's value. -func (s *UpdateDataSourcePermissionsInput) SetDataSourceId(v string) *UpdateDataSourcePermissionsInput { - s.DataSourceId = &v +// SetAwsAccountId sets the AwsAccountId field's value. +func (s *UpdateTemplateAliasInput) SetAwsAccountId(v string) *UpdateTemplateAliasInput { + s.AwsAccountId = &v return s } -// SetGrantPermissions sets the GrantPermissions field's value. -func (s *UpdateDataSourcePermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateDataSourcePermissionsInput { - s.GrantPermissions = v +// SetTemplateId sets the TemplateId field's value. +func (s *UpdateTemplateAliasInput) SetTemplateId(v string) *UpdateTemplateAliasInput { + s.TemplateId = &v return s } -// SetRevokePermissions sets the RevokePermissions field's value. -func (s *UpdateDataSourcePermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateDataSourcePermissionsInput { - s.RevokePermissions = v +// SetTemplateVersionNumber sets the TemplateVersionNumber field's value. +func (s *UpdateTemplateAliasInput) SetTemplateVersionNumber(v int64) *UpdateTemplateAliasInput { + s.TemplateVersionNumber = &v return s } -type UpdateDataSourcePermissionsOutput struct { +type UpdateTemplateAliasOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the data source. - DataSourceArn *string `type:"string"` - - // The ID of the data source. This ID is unique per AWS Region for each AWS - // account. - DataSourceId *string `type:"string"` - // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The template alias. + TemplateAlias *TemplateAlias `type:"structure"` } // String returns the string representation -func (s UpdateDataSourcePermissionsOutput) String() string { +func (s UpdateTemplateAliasOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateDataSourcePermissionsOutput) GoString() string { +func (s UpdateTemplateAliasOutput) GoString() string { return s.String() } -// SetDataSourceArn sets the DataSourceArn field's value. -func (s *UpdateDataSourcePermissionsOutput) SetDataSourceArn(v string) *UpdateDataSourcePermissionsOutput { - s.DataSourceArn = &v - return s -} - -// SetDataSourceId sets the DataSourceId field's value. -func (s *UpdateDataSourcePermissionsOutput) SetDataSourceId(v string) *UpdateDataSourcePermissionsOutput { - s.DataSourceId = &v - return s -} - // SetRequestId sets the RequestId field's value. -func (s *UpdateDataSourcePermissionsOutput) SetRequestId(v string) *UpdateDataSourcePermissionsOutput { +func (s *UpdateTemplateAliasOutput) SetRequestId(v string) *UpdateTemplateAliasOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateDataSourcePermissionsOutput) SetStatus(v int64) *UpdateDataSourcePermissionsOutput { +func (s *UpdateTemplateAliasOutput) SetStatus(v int64) *UpdateTemplateAliasOutput { s.Status = &v return s } -type UpdateGroupInput struct { +// SetTemplateAlias sets the TemplateAlias field's value. +func (s *UpdateTemplateAliasOutput) SetTemplateAlias(v *TemplateAlias) *UpdateTemplateAliasOutput { + s.TemplateAlias = v + return s +} + +type UpdateTemplateInput struct { _ struct{} `type:"structure"` - // The ID for the AWS account that the group is in. Currently, you use the ID - // for the AWS account that contains your Amazon QuickSight account. + // The ID of the AWS account that contains the template that you're updating. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The description for the group that you want to update. - Description *string `min:"1" type:"string"` + // The name for the template. + Name *string `min:"1" type:"string"` - // The name of the group that you want to update. + // The entity that you are using as a source when you update the template. In + // SourceEntity, you specify the type of object you're using as source: SourceTemplate + // for a template or SourceAnalysis for an analysis. Both of these require an + // Amazon Resource Name (ARN). For SourceTemplate, specify the ARN of the source + // template. For SourceAnalysis, specify the ARN of the source analysis. The + // SourceTemplate ARN can contain any AWS Account and any QuickSight-supported + // AWS Region. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // Use the DataSetReferences entity within SourceTemplate or SourceAnalysis + // to list the replacement datasets for the placeholders listed in the original. + // The schema in each dataset must match its placeholder. + // + // SourceEntity is a required field + SourceEntity *TemplateSourceEntity `type:"structure" required:"true"` - // The namespace. Currently, you should set this to default. + // The ID for the template. // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + + // A description of the current template version that is being updated. Every + // time you call UpdateTemplate, you create a new version of the template. Each + // version of the template maintains a description of the version in the VersionDescription + // field. + VersionDescription *string `min:"1" type:"string"` } // String returns the string representation -func (s UpdateGroupInput) String() string { +func (s UpdateTemplateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateGroupInput) GoString() string { +func (s UpdateTemplateInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateGroupInput"} +func (s *UpdateTemplateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTemplateInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.SourceEntity == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEntity")) } - if s.GroupName != nil && len(*s.GroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) + } + if s.SourceEntity != nil { + if err := s.SourceEntity.Validate(); err != nil { + invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -22207,138 +31927,168 @@ func (s *UpdateGroupInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateGroupInput) SetAwsAccountId(v string) *UpdateGroupInput { +func (s *UpdateTemplateInput) SetAwsAccountId(v string) *UpdateTemplateInput { s.AwsAccountId = &v return s } -// SetDescription sets the Description field's value. -func (s *UpdateGroupInput) SetDescription(v string) *UpdateGroupInput { - s.Description = &v +// SetName sets the Name field's value. +func (s *UpdateTemplateInput) SetName(v string) *UpdateTemplateInput { + s.Name = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *UpdateGroupInput) SetGroupName(v string) *UpdateGroupInput { - s.GroupName = &v +// SetSourceEntity sets the SourceEntity field's value. +func (s *UpdateTemplateInput) SetSourceEntity(v *TemplateSourceEntity) *UpdateTemplateInput { + s.SourceEntity = v return s } -// SetNamespace sets the Namespace field's value. -func (s *UpdateGroupInput) SetNamespace(v string) *UpdateGroupInput { - s.Namespace = &v +// SetTemplateId sets the TemplateId field's value. +func (s *UpdateTemplateInput) SetTemplateId(v string) *UpdateTemplateInput { + s.TemplateId = &v return s } -type UpdateGroupOutput struct { +// SetVersionDescription sets the VersionDescription field's value. +func (s *UpdateTemplateInput) SetVersionDescription(v string) *UpdateTemplateInput { + s.VersionDescription = &v + return s +} + +type UpdateTemplateOutput struct { _ struct{} `type:"structure"` - // The name of the group. - Group *Group `type:"structure"` + // The Amazon Resource Name (ARN) for the template. + Arn *string `type:"string"` + + // The creation status of the template. + CreationStatus *string `type:"string" enum:"ResourceStatus"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The ID for the template. + TemplateId *string `min:"1" type:"string"` + + // The ARN for the template, including the version information of the first + // version. + VersionArn *string `type:"string"` } // String returns the string representation -func (s UpdateGroupOutput) String() string { +func (s UpdateTemplateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateGroupOutput) GoString() string { +func (s UpdateTemplateOutput) GoString() string { return s.String() } -// SetGroup sets the Group field's value. -func (s *UpdateGroupOutput) SetGroup(v *Group) *UpdateGroupOutput { - s.Group = v +// SetArn sets the Arn field's value. +func (s *UpdateTemplateOutput) SetArn(v string) *UpdateTemplateOutput { + s.Arn = &v + return s +} + +// SetCreationStatus sets the CreationStatus field's value. +func (s *UpdateTemplateOutput) SetCreationStatus(v string) *UpdateTemplateOutput { + s.CreationStatus = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateGroupOutput) SetRequestId(v string) *UpdateGroupOutput { +func (s *UpdateTemplateOutput) SetRequestId(v string) *UpdateTemplateOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateGroupOutput) SetStatus(v int64) *UpdateGroupOutput { +func (s *UpdateTemplateOutput) SetStatus(v int64) *UpdateTemplateOutput { s.Status = &v return s } -type UpdateIAMPolicyAssignmentInput struct { - _ struct{} `type:"structure"` +// SetTemplateId sets the TemplateId field's value. +func (s *UpdateTemplateOutput) SetTemplateId(v string) *UpdateTemplateOutput { + s.TemplateId = &v + return s +} - // The name of the assignment. This name must be unique within an AWS account. - // - // AssignmentName is a required field - AssignmentName *string `location:"uri" locationName:"AssignmentName" min:"1" type:"string" required:"true"` +// SetVersionArn sets the VersionArn field's value. +func (s *UpdateTemplateOutput) SetVersionArn(v string) *UpdateTemplateOutput { + s.VersionArn = &v + return s +} - // The status of the assignment. Possible values are as follows: - // - // * ENABLED - Anything specified in this assignment is used when creating - // the data source. - // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * DRAFT - This assignment is an unfinished draft and isn't used when creating - // the data source. - AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` +type UpdateTemplatePermissionsInput struct { + _ struct{} `type:"structure"` - // The ID of the AWS account that contains the IAM policy assignment. + // The ID of the AWS account that contains the template. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The QuickSight users, groups, or both that you want to assign the policy - // to. - Identities map[string][]*string `type:"map"` + // A list of resource permissions to be granted on the template. + GrantPermissions []*ResourcePermission `type:"list"` - // The namespace of the assignment. - // - // Namespace is a required field - Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` + // A list of resource permissions to be revoked from the template. + RevokePermissions []*ResourcePermission `type:"list"` - // The ARN for the IAM policy to apply to the QuickSight users and groups specified - // in this assignment. - PolicyArn *string `type:"string"` + // The ID for the template. + // + // TemplateId is a required field + TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s UpdateIAMPolicyAssignmentInput) String() string { +func (s UpdateTemplatePermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateIAMPolicyAssignmentInput) GoString() string { +func (s UpdateTemplatePermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateIAMPolicyAssignmentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateIAMPolicyAssignmentInput"} - if s.AssignmentName == nil { - invalidParams.Add(request.NewErrParamRequired("AssignmentName")) - } - if s.AssignmentName != nil && len(*s.AssignmentName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AssignmentName", 1)) - } +func (s *UpdateTemplatePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTemplatePermissionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) + if s.TemplateId == nil { + invalidParams.Add(request.NewErrParamRequired("TemplateId")) } - if s.Namespace != nil && len(*s.Namespace) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Namespace", 1)) + if s.TemplateId != nil && len(*s.TemplateId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + } + if s.GrantPermissions != nil { + for i, v := range s.GrantPermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GrantPermissions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.RevokePermissions != nil { + for i, v := range s.RevokePermissions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RevokePermissions", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -22347,168 +32097,126 @@ func (s *UpdateIAMPolicyAssignmentInput) Validate() error { return nil } -// SetAssignmentName sets the AssignmentName field's value. -func (s *UpdateIAMPolicyAssignmentInput) SetAssignmentName(v string) *UpdateIAMPolicyAssignmentInput { - s.AssignmentName = &v - return s -} - -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *UpdateIAMPolicyAssignmentInput) SetAssignmentStatus(v string) *UpdateIAMPolicyAssignmentInput { - s.AssignmentStatus = &v - return s -} - // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateIAMPolicyAssignmentInput) SetAwsAccountId(v string) *UpdateIAMPolicyAssignmentInput { +func (s *UpdateTemplatePermissionsInput) SetAwsAccountId(v string) *UpdateTemplatePermissionsInput { s.AwsAccountId = &v return s } -// SetIdentities sets the Identities field's value. -func (s *UpdateIAMPolicyAssignmentInput) SetIdentities(v map[string][]*string) *UpdateIAMPolicyAssignmentInput { - s.Identities = v +// SetGrantPermissions sets the GrantPermissions field's value. +func (s *UpdateTemplatePermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateTemplatePermissionsInput { + s.GrantPermissions = v return s } -// SetNamespace sets the Namespace field's value. -func (s *UpdateIAMPolicyAssignmentInput) SetNamespace(v string) *UpdateIAMPolicyAssignmentInput { - s.Namespace = &v +// SetRevokePermissions sets the RevokePermissions field's value. +func (s *UpdateTemplatePermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateTemplatePermissionsInput { + s.RevokePermissions = v return s } -// SetPolicyArn sets the PolicyArn field's value. -func (s *UpdateIAMPolicyAssignmentInput) SetPolicyArn(v string) *UpdateIAMPolicyAssignmentInput { - s.PolicyArn = &v +// SetTemplateId sets the TemplateId field's value. +func (s *UpdateTemplatePermissionsInput) SetTemplateId(v string) *UpdateTemplatePermissionsInput { + s.TemplateId = &v return s } -type UpdateIAMPolicyAssignmentOutput struct { +type UpdateTemplatePermissionsOutput struct { _ struct{} `type:"structure"` - // The ID of the assignment. - AssignmentId *string `type:"string"` - - // The name of the assignment. - AssignmentName *string `min:"1" type:"string"` - - // The status of the assignment. Possible values are as follows: - // - // * ENABLED - Anything specified in this assignment is used when creating - // the data source. - // - // * DISABLED - This assignment isn't used when creating the data source. - // - // * DRAFT - This assignment is an unfinished draft and isn't used when creating - // the data source. - AssignmentStatus *string `type:"string" enum:"AssignmentStatus"` - - // The QuickSight users, groups, or both that the IAM policy is assigned to. - Identities map[string][]*string `type:"map"` - - // The ARN for the IAM policy applied to the QuickSight users and groups specified - // in this assignment. - PolicyArn *string `type:"string"` + // A list of resource permissions to be set on the template. + Permissions []*ResourcePermission `min:"1" type:"list"` // The AWS request ID for this operation. RequestId *string `type:"string"` // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` + + // The Amazon Resource Name (ARN) of the template. + TemplateArn *string `type:"string"` + + // The ID for the template. + TemplateId *string `min:"1" type:"string"` } // String returns the string representation -func (s UpdateIAMPolicyAssignmentOutput) String() string { +func (s UpdateTemplatePermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateIAMPolicyAssignmentOutput) GoString() string { +func (s UpdateTemplatePermissionsOutput) GoString() string { return s.String() } -// SetAssignmentId sets the AssignmentId field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetAssignmentId(v string) *UpdateIAMPolicyAssignmentOutput { - s.AssignmentId = &v - return s -} - -// SetAssignmentName sets the AssignmentName field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetAssignmentName(v string) *UpdateIAMPolicyAssignmentOutput { - s.AssignmentName = &v - return s -} - -// SetAssignmentStatus sets the AssignmentStatus field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetAssignmentStatus(v string) *UpdateIAMPolicyAssignmentOutput { - s.AssignmentStatus = &v +// SetPermissions sets the Permissions field's value. +func (s *UpdateTemplatePermissionsOutput) SetPermissions(v []*ResourcePermission) *UpdateTemplatePermissionsOutput { + s.Permissions = v return s } -// SetIdentities sets the Identities field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetIdentities(v map[string][]*string) *UpdateIAMPolicyAssignmentOutput { - s.Identities = v +// SetRequestId sets the RequestId field's value. +func (s *UpdateTemplatePermissionsOutput) SetRequestId(v string) *UpdateTemplatePermissionsOutput { + s.RequestId = &v return s } -// SetPolicyArn sets the PolicyArn field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetPolicyArn(v string) *UpdateIAMPolicyAssignmentOutput { - s.PolicyArn = &v +// SetStatus sets the Status field's value. +func (s *UpdateTemplatePermissionsOutput) SetStatus(v int64) *UpdateTemplatePermissionsOutput { + s.Status = &v return s } -// SetRequestId sets the RequestId field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetRequestId(v string) *UpdateIAMPolicyAssignmentOutput { - s.RequestId = &v +// SetTemplateArn sets the TemplateArn field's value. +func (s *UpdateTemplatePermissionsOutput) SetTemplateArn(v string) *UpdateTemplatePermissionsOutput { + s.TemplateArn = &v return s } -// SetStatus sets the Status field's value. -func (s *UpdateIAMPolicyAssignmentOutput) SetStatus(v int64) *UpdateIAMPolicyAssignmentOutput { - s.Status = &v +// SetTemplateId sets the TemplateId field's value. +func (s *UpdateTemplatePermissionsOutput) SetTemplateId(v string) *UpdateTemplatePermissionsOutput { + s.TemplateId = &v return s } -type UpdateTemplateAliasInput struct { +type UpdateThemeAliasInput struct { _ struct{} `type:"structure"` - // The alias of the template that you want to update. If you name a specific - // alias, you update the version that the alias points to. You can specify the - // latest version of the template by providing the keyword $LATEST in the AliasName - // parameter. The keyword $PUBLISHED doesn't apply to templates. + // The name of the theme alias that you want to update. // // AliasName is a required field AliasName *string `location:"uri" locationName:"AliasName" min:"1" type:"string" required:"true"` - // The ID of the AWS account that contains the template alias that you're updating. + // The ID of the AWS account that contains the theme alias that you're updating. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The ID for the template. + // The ID for the theme. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` - // The version number of the template. + // The version number of the theme that the alias should reference. // - // TemplateVersionNumber is a required field - TemplateVersionNumber *int64 `min:"1" type:"long" required:"true"` + // ThemeVersionNumber is a required field + ThemeVersionNumber *int64 `min:"1" type:"long" required:"true"` } // String returns the string representation -func (s UpdateTemplateAliasInput) String() string { +func (s UpdateThemeAliasInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateTemplateAliasInput) GoString() string { +func (s UpdateThemeAliasInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTemplateAliasInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTemplateAliasInput"} +func (s *UpdateThemeAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateThemeAliasInput"} if s.AliasName == nil { invalidParams.Add(request.NewErrParamRequired("AliasName")) } @@ -22521,17 +32229,17 @@ func (s *UpdateTemplateAliasInput) Validate() error { if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) } - if s.TemplateVersionNumber == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateVersionNumber")) + if s.ThemeVersionNumber == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeVersionNumber")) } - if s.TemplateVersionNumber != nil && *s.TemplateVersionNumber < 1 { - invalidParams.Add(request.NewErrParamMinValue("TemplateVersionNumber", 1)) + if s.ThemeVersionNumber != nil && *s.ThemeVersionNumber < 1 { + invalidParams.Add(request.NewErrParamMinValue("ThemeVersionNumber", 1)) } if invalidParams.Len() > 0 { @@ -22541,30 +32249,30 @@ func (s *UpdateTemplateAliasInput) Validate() error { } // SetAliasName sets the AliasName field's value. -func (s *UpdateTemplateAliasInput) SetAliasName(v string) *UpdateTemplateAliasInput { +func (s *UpdateThemeAliasInput) SetAliasName(v string) *UpdateThemeAliasInput { s.AliasName = &v return s } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateTemplateAliasInput) SetAwsAccountId(v string) *UpdateTemplateAliasInput { +func (s *UpdateThemeAliasInput) SetAwsAccountId(v string) *UpdateThemeAliasInput { s.AwsAccountId = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *UpdateTemplateAliasInput) SetTemplateId(v string) *UpdateTemplateAliasInput { - s.TemplateId = &v +// SetThemeId sets the ThemeId field's value. +func (s *UpdateThemeAliasInput) SetThemeId(v string) *UpdateThemeAliasInput { + s.ThemeId = &v return s } -// SetTemplateVersionNumber sets the TemplateVersionNumber field's value. -func (s *UpdateTemplateAliasInput) SetTemplateVersionNumber(v int64) *UpdateTemplateAliasInput { - s.TemplateVersionNumber = &v +// SetThemeVersionNumber sets the ThemeVersionNumber field's value. +func (s *UpdateThemeAliasInput) SetThemeVersionNumber(v int64) *UpdateThemeAliasInput { + s.ThemeVersionNumber = &v return s } -type UpdateTemplateAliasOutput struct { +type UpdateThemeAliasOutput struct { _ struct{} `type:"structure"` // The AWS request ID for this operation. @@ -22573,106 +32281,106 @@ type UpdateTemplateAliasOutput struct { // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - // The template alias. - TemplateAlias *TemplateAlias `type:"structure"` + // Information about the theme alias. + ThemeAlias *ThemeAlias `type:"structure"` } // String returns the string representation -func (s UpdateTemplateAliasOutput) String() string { +func (s UpdateThemeAliasOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateTemplateAliasOutput) GoString() string { +func (s UpdateThemeAliasOutput) GoString() string { return s.String() } // SetRequestId sets the RequestId field's value. -func (s *UpdateTemplateAliasOutput) SetRequestId(v string) *UpdateTemplateAliasOutput { +func (s *UpdateThemeAliasOutput) SetRequestId(v string) *UpdateThemeAliasOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateTemplateAliasOutput) SetStatus(v int64) *UpdateTemplateAliasOutput { +func (s *UpdateThemeAliasOutput) SetStatus(v int64) *UpdateThemeAliasOutput { s.Status = &v return s } -// SetTemplateAlias sets the TemplateAlias field's value. -func (s *UpdateTemplateAliasOutput) SetTemplateAlias(v *TemplateAlias) *UpdateTemplateAliasOutput { - s.TemplateAlias = v +// SetThemeAlias sets the ThemeAlias field's value. +func (s *UpdateThemeAliasOutput) SetThemeAlias(v *ThemeAlias) *UpdateThemeAliasOutput { + s.ThemeAlias = v return s } -type UpdateTemplateInput struct { +type UpdateThemeInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the template that you're updating. + // The ID of the AWS account that contains the theme that you're updating. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // The name for the template. - Name *string `min:"1" type:"string"` - - // The source QuickSight entity from which this template is being updated. You - // can currently update templates from an Analysis or another template. + // The theme ID, defined by Amazon QuickSight, that a custom theme inherits + // from. All themes initially inherit from a default QuickSight theme. // - // SourceEntity is a required field - SourceEntity *TemplateSourceEntity `type:"structure" required:"true"` + // BaseThemeId is a required field + BaseThemeId *string `min:"1" type:"string" required:"true"` - // The ID for the template. + // The theme configuration, which contains the theme display properties. + Configuration *ThemeConfiguration `type:"structure"` + + // The name for the theme. + Name *string `min:"1" type:"string"` + + // The ID for the theme. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` - // A description of the current template version that is being updated. Every - // time you call UpdateTemplate, you create a new version of the template. Each - // version of the template maintains a description of the version in the VersionDescription - // field. + // A description of the theme version that you're updating Every time that you + // call UpdateTheme, you create a new version of the theme. Each version of + // the theme maintains a description of the version in VersionDescription. VersionDescription *string `min:"1" type:"string"` } // String returns the string representation -func (s UpdateTemplateInput) String() string { +func (s UpdateThemeInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateTemplateInput) GoString() string { +func (s UpdateThemeInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTemplateInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTemplateInput"} +func (s *UpdateThemeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateThemeInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } + if s.BaseThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("BaseThemeId")) + } + if s.BaseThemeId != nil && len(*s.BaseThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("BaseThemeId", 1)) + } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.SourceEntity == nil { - invalidParams.Add(request.NewErrParamRequired("SourceEntity")) - } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) } if s.VersionDescription != nil && len(*s.VersionDescription) < 1 { invalidParams.Add(request.NewErrParamMinLen("VersionDescription", 1)) } - if s.SourceEntity != nil { - if err := s.SourceEntity.Validate(); err != nil { - invalidParams.AddNested("SourceEntity", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -22681,42 +32389,48 @@ func (s *UpdateTemplateInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateTemplateInput) SetAwsAccountId(v string) *UpdateTemplateInput { +func (s *UpdateThemeInput) SetAwsAccountId(v string) *UpdateThemeInput { s.AwsAccountId = &v return s } -// SetName sets the Name field's value. -func (s *UpdateTemplateInput) SetName(v string) *UpdateTemplateInput { - s.Name = &v +// SetBaseThemeId sets the BaseThemeId field's value. +func (s *UpdateThemeInput) SetBaseThemeId(v string) *UpdateThemeInput { + s.BaseThemeId = &v return s } -// SetSourceEntity sets the SourceEntity field's value. -func (s *UpdateTemplateInput) SetSourceEntity(v *TemplateSourceEntity) *UpdateTemplateInput { - s.SourceEntity = v +// SetConfiguration sets the Configuration field's value. +func (s *UpdateThemeInput) SetConfiguration(v *ThemeConfiguration) *UpdateThemeInput { + s.Configuration = v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *UpdateTemplateInput) SetTemplateId(v string) *UpdateTemplateInput { - s.TemplateId = &v +// SetName sets the Name field's value. +func (s *UpdateThemeInput) SetName(v string) *UpdateThemeInput { + s.Name = &v + return s +} + +// SetThemeId sets the ThemeId field's value. +func (s *UpdateThemeInput) SetThemeId(v string) *UpdateThemeInput { + s.ThemeId = &v return s } // SetVersionDescription sets the VersionDescription field's value. -func (s *UpdateTemplateInput) SetVersionDescription(v string) *UpdateTemplateInput { +func (s *UpdateThemeInput) SetVersionDescription(v string) *UpdateThemeInput { s.VersionDescription = &v return s } -type UpdateTemplateOutput struct { +type UpdateThemeOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) for the template. + // The Amazon Resource Name (ARN) for the theme. Arn *string `type:"string"` - // The creation status of the template. + // The creation status of the theme. CreationStatus *string `type:"string" enum:"ResourceStatus"` // The AWS request ID for this operation. @@ -22725,110 +32439,103 @@ type UpdateTemplateOutput struct { // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - // The ID for the template. - TemplateId *string `min:"1" type:"string"` + // The ID for the theme. + ThemeId *string `min:"1" type:"string"` - // The ARN for the template, including the version information of the first - // version. + // The Amazon Resource Name (ARN) for the new version of the theme. VersionArn *string `type:"string"` } // String returns the string representation -func (s UpdateTemplateOutput) String() string { +func (s UpdateThemeOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateTemplateOutput) GoString() string { +func (s UpdateThemeOutput) GoString() string { return s.String() } // SetArn sets the Arn field's value. -func (s *UpdateTemplateOutput) SetArn(v string) *UpdateTemplateOutput { +func (s *UpdateThemeOutput) SetArn(v string) *UpdateThemeOutput { s.Arn = &v return s } // SetCreationStatus sets the CreationStatus field's value. -func (s *UpdateTemplateOutput) SetCreationStatus(v string) *UpdateTemplateOutput { +func (s *UpdateThemeOutput) SetCreationStatus(v string) *UpdateThemeOutput { s.CreationStatus = &v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateTemplateOutput) SetRequestId(v string) *UpdateTemplateOutput { +func (s *UpdateThemeOutput) SetRequestId(v string) *UpdateThemeOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateTemplateOutput) SetStatus(v int64) *UpdateTemplateOutput { +func (s *UpdateThemeOutput) SetStatus(v int64) *UpdateThemeOutput { s.Status = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *UpdateTemplateOutput) SetTemplateId(v string) *UpdateTemplateOutput { - s.TemplateId = &v +// SetThemeId sets the ThemeId field's value. +func (s *UpdateThemeOutput) SetThemeId(v string) *UpdateThemeOutput { + s.ThemeId = &v return s } // SetVersionArn sets the VersionArn field's value. -func (s *UpdateTemplateOutput) SetVersionArn(v string) *UpdateTemplateOutput { +func (s *UpdateThemeOutput) SetVersionArn(v string) *UpdateThemeOutput { s.VersionArn = &v return s } -type UpdateTemplatePermissionsInput struct { +type UpdateThemePermissionsInput struct { _ struct{} `type:"structure"` - // The ID of the AWS account that contains the template. + // The ID of the AWS account that contains the theme. // // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` - // A list of resource permissions to be granted on the template. - GrantPermissions []*ResourcePermission `min:"1" type:"list"` + // A list of resource permissions to be granted for the theme. + GrantPermissions []*ResourcePermission `type:"list"` - // A list of resource permissions to be revoked from the template. - RevokePermissions []*ResourcePermission `min:"1" type:"list"` + // A list of resource permissions to be revoked from the theme. + RevokePermissions []*ResourcePermission `type:"list"` - // The ID for the template. + // The ID for the theme. // - // TemplateId is a required field - TemplateId *string `location:"uri" locationName:"TemplateId" min:"1" type:"string" required:"true"` + // ThemeId is a required field + ThemeId *string `location:"uri" locationName:"ThemeId" min:"1" type:"string" required:"true"` } // String returns the string representation -func (s UpdateTemplatePermissionsInput) String() string { +func (s UpdateThemePermissionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateTemplatePermissionsInput) GoString() string { +func (s UpdateThemePermissionsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *UpdateTemplatePermissionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UpdateTemplatePermissionsInput"} +func (s *UpdateThemePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateThemePermissionsInput"} if s.AwsAccountId == nil { invalidParams.Add(request.NewErrParamRequired("AwsAccountId")) } if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } - if s.GrantPermissions != nil && len(s.GrantPermissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("GrantPermissions", 1)) - } - if s.RevokePermissions != nil && len(s.RevokePermissions) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RevokePermissions", 1)) - } - if s.TemplateId == nil { - invalidParams.Add(request.NewErrParamRequired("TemplateId")) + if s.ThemeId == nil { + invalidParams.Add(request.NewErrParamRequired("ThemeId")) } - if s.TemplateId != nil && len(*s.TemplateId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TemplateId", 1)) + if s.ThemeId != nil && len(*s.ThemeId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ThemeId", 1)) } if s.GrantPermissions != nil { for i, v := range s.GrantPermissions { @@ -22858,33 +32565,33 @@ func (s *UpdateTemplatePermissionsInput) Validate() error { } // SetAwsAccountId sets the AwsAccountId field's value. -func (s *UpdateTemplatePermissionsInput) SetAwsAccountId(v string) *UpdateTemplatePermissionsInput { +func (s *UpdateThemePermissionsInput) SetAwsAccountId(v string) *UpdateThemePermissionsInput { s.AwsAccountId = &v return s } // SetGrantPermissions sets the GrantPermissions field's value. -func (s *UpdateTemplatePermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateTemplatePermissionsInput { +func (s *UpdateThemePermissionsInput) SetGrantPermissions(v []*ResourcePermission) *UpdateThemePermissionsInput { s.GrantPermissions = v return s } // SetRevokePermissions sets the RevokePermissions field's value. -func (s *UpdateTemplatePermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateTemplatePermissionsInput { +func (s *UpdateThemePermissionsInput) SetRevokePermissions(v []*ResourcePermission) *UpdateThemePermissionsInput { s.RevokePermissions = v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *UpdateTemplatePermissionsInput) SetTemplateId(v string) *UpdateTemplatePermissionsInput { - s.TemplateId = &v +// SetThemeId sets the ThemeId field's value. +func (s *UpdateThemePermissionsInput) SetThemeId(v string) *UpdateThemePermissionsInput { + s.ThemeId = &v return s } -type UpdateTemplatePermissionsOutput struct { +type UpdateThemePermissionsOutput struct { _ struct{} `type:"structure"` - // A list of resource permissions to be set on the template. + // The resulting list of resource permissions for the theme. Permissions []*ResourcePermission `min:"1" type:"list"` // The AWS request ID for this operation. @@ -22893,50 +32600,50 @@ type UpdateTemplatePermissionsOutput struct { // The HTTP status of the request. Status *int64 `location:"statusCode" type:"integer"` - // The Amazon Resource Name (ARN) of the template. - TemplateArn *string `type:"string"` + // The Amazon Resource Name (ARN) of the theme. + ThemeArn *string `type:"string"` - // The ID for the template. - TemplateId *string `min:"1" type:"string"` + // The ID for the theme. + ThemeId *string `min:"1" type:"string"` } // String returns the string representation -func (s UpdateTemplatePermissionsOutput) String() string { +func (s UpdateThemePermissionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s UpdateTemplatePermissionsOutput) GoString() string { +func (s UpdateThemePermissionsOutput) GoString() string { return s.String() } // SetPermissions sets the Permissions field's value. -func (s *UpdateTemplatePermissionsOutput) SetPermissions(v []*ResourcePermission) *UpdateTemplatePermissionsOutput { +func (s *UpdateThemePermissionsOutput) SetPermissions(v []*ResourcePermission) *UpdateThemePermissionsOutput { s.Permissions = v return s } // SetRequestId sets the RequestId field's value. -func (s *UpdateTemplatePermissionsOutput) SetRequestId(v string) *UpdateTemplatePermissionsOutput { +func (s *UpdateThemePermissionsOutput) SetRequestId(v string) *UpdateThemePermissionsOutput { s.RequestId = &v return s } // SetStatus sets the Status field's value. -func (s *UpdateTemplatePermissionsOutput) SetStatus(v int64) *UpdateTemplatePermissionsOutput { +func (s *UpdateThemePermissionsOutput) SetStatus(v int64) *UpdateThemePermissionsOutput { s.Status = &v return s } -// SetTemplateArn sets the TemplateArn field's value. -func (s *UpdateTemplatePermissionsOutput) SetTemplateArn(v string) *UpdateTemplatePermissionsOutput { - s.TemplateArn = &v +// SetThemeArn sets the ThemeArn field's value. +func (s *UpdateThemePermissionsOutput) SetThemeArn(v string) *UpdateThemePermissionsOutput { + s.ThemeArn = &v return s } -// SetTemplateId sets the TemplateId field's value. -func (s *UpdateTemplatePermissionsOutput) SetTemplateId(v string) *UpdateTemplatePermissionsOutput { - s.TemplateId = &v +// SetThemeId sets the ThemeId field's value. +func (s *UpdateThemePermissionsOutput) SetThemeId(v string) *UpdateThemePermissionsOutput { + s.ThemeId = &v return s } @@ -22949,6 +32656,31 @@ type UpdateUserInput struct { // AwsAccountId is a required field AwsAccountId *string `location:"uri" locationName:"AwsAccountId" min:"12" type:"string" required:"true"` + // (Enterprise edition only) The name of the custom permissions profile that + // you want to assign to this user. Customized permissions allows you to control + // a user's access by restricting access the following operations: + // + // * Create and update data sources + // + // * Create and update datasets + // + // * Create and update email reports + // + // * Subscribe to email reports + // + // A set of custom permissions includes any combination of these restrictions. + // Currently, you need to create the profile names for custom permission sets + // by using the QuickSight console. Then, you use the RegisterUser API operation + // to assign the named set of permissions to a QuickSight user. + // + // QuickSight custom permissions are applied through IAM policies. Therefore, + // they override the permissions typically granted by assigning QuickSight users + // to one of the default security cohorts in QuickSight (admin, author, reader). + // + // This feature is available only to QuickSight Enterprise edition subscriptions + // that use SAML 2.0-Based Federation for Single Sign-On (SSO). + CustomPermissionsName *string `min:"1" type:"string"` + // The email address of the user that you want to update. // // Email is a required field @@ -22959,7 +32691,8 @@ type UpdateUserInput struct { // Namespace is a required field Namespace *string `location:"uri" locationName:"Namespace" type:"string" required:"true"` - // The Amazon QuickSight role of the user. The user role can be one of the following: + // The Amazon QuickSight role of the user. The role can be one of the following + // default security cohorts: // // * READER: A user who has read-only access to dashboards. // @@ -22969,9 +32702,18 @@ type UpdateUserInput struct { // * ADMIN: A user who is an author, who can also manage Amazon QuickSight // settings. // + // The name of the QuickSight role is invisible to the user except for the console + // screens dealing with permissions. + // // Role is a required field Role *string `type:"string" required:"true" enum:"UserRole"` + // A flag that you use to indicate that you want to remove all custom permissions + // from this user. Using this parameter resets the user to the state it was + // in before a custom permissions profile was applied. This parameter defaults + // to NULL and it doesn't accept any other value. + UnapplyCustomPermissions *bool `type:"boolean"` + // The Amazon QuickSight user name that you want to update. // // UserName is a required field @@ -22997,6 +32739,9 @@ func (s *UpdateUserInput) Validate() error { if s.AwsAccountId != nil && len(*s.AwsAccountId) < 12 { invalidParams.Add(request.NewErrParamMinLen("AwsAccountId", 12)) } + if s.CustomPermissionsName != nil && len(*s.CustomPermissionsName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomPermissionsName", 1)) + } if s.Email == nil { invalidParams.Add(request.NewErrParamRequired("Email")) } @@ -23028,6 +32773,12 @@ func (s *UpdateUserInput) SetAwsAccountId(v string) *UpdateUserInput { return s } +// SetCustomPermissionsName sets the CustomPermissionsName field's value. +func (s *UpdateUserInput) SetCustomPermissionsName(v string) *UpdateUserInput { + s.CustomPermissionsName = &v + return s +} + // SetEmail sets the Email field's value. func (s *UpdateUserInput) SetEmail(v string) *UpdateUserInput { s.Email = &v @@ -23046,6 +32797,12 @@ func (s *UpdateUserInput) SetRole(v string) *UpdateUserInput { return s } +// SetUnapplyCustomPermissions sets the UnapplyCustomPermissions field's value. +func (s *UpdateUserInput) SetUnapplyCustomPermissions(v bool) *UpdateUserInput { + s.UnapplyCustomPermissions = &v + return s +} + // SetUserName sets the UserName field's value. func (s *UpdateUserInput) SetUserName(v string) *UpdateUserInput { s.UserName = &v @@ -23169,8 +32926,7 @@ func (s *UploadSettings) SetTextQualifier(v string) *UploadSettings { return s } -// A registered user of Amazon QuickSight. Currently, an Amazon QuickSight subscription -// can't contain more than 20 million users. +// A registered user of Amazon QuickSight. type User struct { _ struct{} `type:"structure"` @@ -23182,6 +32938,9 @@ type User struct { // The Amazon Resource Name (ARN) for the user. Arn *string `type:"string"` + // The custom permissions profile associated with this user. + CustomPermissionsName *string `min:"1" type:"string"` + // The user's email address. Email *string `type:"string"` @@ -23233,6 +32992,12 @@ func (s *User) SetArn(v string) *User { return s } +// SetCustomPermissionsName sets the CustomPermissionsName field's value. +func (s *User) SetCustomPermissionsName(v string) *User { + s.CustomPermissionsName = &v + return s +} + // SetEmail sets the Email field's value. func (s *User) SetEmail(v string) *User { s.Email = &v @@ -23267,8 +33032,8 @@ func (s *User) SetUserName(v string) *User { // operation that requires finding a user based on a provided user name, such // as DeleteUser, DescribeUser, and so on. type UserNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -23288,17 +33053,17 @@ func (s UserNotFoundException) GoString() string { func newErrorUserNotFoundException(v protocol.ResponseMetadata) error { return &UserNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UserNotFoundException) Code() string { +func (s *UserNotFoundException) Code() string { return "QuickSightUserNotFoundException" } // Message returns the exception's message. -func (s UserNotFoundException) Message() string { +func (s *UserNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23306,22 +33071,22 @@ func (s UserNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UserNotFoundException) OrigErr() error { +func (s *UserNotFoundException) OrigErr() error { return nil } -func (s UserNotFoundException) Error() string { +func (s *UserNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UserNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UserNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UserNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *UserNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // VPC connection properties. @@ -23363,6 +33128,66 @@ func (s *VpcConnectionProperties) SetVpcConnectionArn(v string) *VpcConnectionPr return s } +const ( + // AnalysisErrorTypeAccessDenied is a AnalysisErrorType enum value + AnalysisErrorTypeAccessDenied = "ACCESS_DENIED" + + // AnalysisErrorTypeSourceNotFound is a AnalysisErrorType enum value + AnalysisErrorTypeSourceNotFound = "SOURCE_NOT_FOUND" + + // AnalysisErrorTypeDataSetNotFound is a AnalysisErrorType enum value + AnalysisErrorTypeDataSetNotFound = "DATA_SET_NOT_FOUND" + + // AnalysisErrorTypeInternalFailure is a AnalysisErrorType enum value + AnalysisErrorTypeInternalFailure = "INTERNAL_FAILURE" + + // AnalysisErrorTypeParameterValueIncompatible is a AnalysisErrorType enum value + AnalysisErrorTypeParameterValueIncompatible = "PARAMETER_VALUE_INCOMPATIBLE" + + // AnalysisErrorTypeParameterTypeInvalid is a AnalysisErrorType enum value + AnalysisErrorTypeParameterTypeInvalid = "PARAMETER_TYPE_INVALID" + + // AnalysisErrorTypeParameterNotFound is a AnalysisErrorType enum value + AnalysisErrorTypeParameterNotFound = "PARAMETER_NOT_FOUND" + + // AnalysisErrorTypeColumnTypeMismatch is a AnalysisErrorType enum value + AnalysisErrorTypeColumnTypeMismatch = "COLUMN_TYPE_MISMATCH" + + // AnalysisErrorTypeColumnGeographicRoleMismatch is a AnalysisErrorType enum value + AnalysisErrorTypeColumnGeographicRoleMismatch = "COLUMN_GEOGRAPHIC_ROLE_MISMATCH" + + // AnalysisErrorTypeColumnReplacementMissing is a AnalysisErrorType enum value + AnalysisErrorTypeColumnReplacementMissing = "COLUMN_REPLACEMENT_MISSING" +) + +// AnalysisErrorType_Values returns all elements of the AnalysisErrorType enum +func AnalysisErrorType_Values() []string { + return []string{ + AnalysisErrorTypeAccessDenied, + AnalysisErrorTypeSourceNotFound, + AnalysisErrorTypeDataSetNotFound, + AnalysisErrorTypeInternalFailure, + AnalysisErrorTypeParameterValueIncompatible, + AnalysisErrorTypeParameterTypeInvalid, + AnalysisErrorTypeParameterNotFound, + AnalysisErrorTypeColumnTypeMismatch, + AnalysisErrorTypeColumnGeographicRoleMismatch, + AnalysisErrorTypeColumnReplacementMissing, + } +} + +const ( + // AnalysisFilterAttributeQuicksightUser is a AnalysisFilterAttribute enum value + AnalysisFilterAttributeQuicksightUser = "QUICKSIGHT_USER" +) + +// AnalysisFilterAttribute_Values returns all elements of the AnalysisFilterAttribute enum +func AnalysisFilterAttribute_Values() []string { + return []string{ + AnalysisFilterAttributeQuicksightUser, + } +} + const ( // AssignmentStatusEnabled is a AssignmentStatus enum value AssignmentStatusEnabled = "ENABLED" @@ -23374,6 +33199,15 @@ const ( AssignmentStatusDisabled = "DISABLED" ) +// AssignmentStatus_Values returns all elements of the AssignmentStatus enum +func AssignmentStatus_Values() []string { + return []string{ + AssignmentStatusEnabled, + AssignmentStatusDraft, + AssignmentStatusDisabled, + } +} + const ( // ColumnDataTypeString is a ColumnDataType enum value ColumnDataTypeString = "STRING" @@ -23388,6 +33222,16 @@ const ( ColumnDataTypeDatetime = "DATETIME" ) +// ColumnDataType_Values returns all elements of the ColumnDataType enum +func ColumnDataType_Values() []string { + return []string{ + ColumnDataTypeString, + ColumnDataTypeInteger, + ColumnDataTypeDecimal, + ColumnDataTypeDatetime, + } +} + const ( // DashboardBehaviorEnabled is a DashboardBehavior enum value DashboardBehaviorEnabled = "ENABLED" @@ -23396,7 +33240,21 @@ const ( DashboardBehaviorDisabled = "DISABLED" ) +// DashboardBehavior_Values returns all elements of the DashboardBehavior enum +func DashboardBehavior_Values() []string { + return []string{ + DashboardBehaviorEnabled, + DashboardBehaviorDisabled, + } +} + const ( + // DashboardErrorTypeAccessDenied is a DashboardErrorType enum value + DashboardErrorTypeAccessDenied = "ACCESS_DENIED" + + // DashboardErrorTypeSourceNotFound is a DashboardErrorType enum value + DashboardErrorTypeSourceNotFound = "SOURCE_NOT_FOUND" + // DashboardErrorTypeDataSetNotFound is a DashboardErrorType enum value DashboardErrorTypeDataSetNotFound = "DATA_SET_NOT_FOUND" @@ -23422,11 +33280,34 @@ const ( DashboardErrorTypeColumnReplacementMissing = "COLUMN_REPLACEMENT_MISSING" ) +// DashboardErrorType_Values returns all elements of the DashboardErrorType enum +func DashboardErrorType_Values() []string { + return []string{ + DashboardErrorTypeAccessDenied, + DashboardErrorTypeSourceNotFound, + DashboardErrorTypeDataSetNotFound, + DashboardErrorTypeInternalFailure, + DashboardErrorTypeParameterValueIncompatible, + DashboardErrorTypeParameterTypeInvalid, + DashboardErrorTypeParameterNotFound, + DashboardErrorTypeColumnTypeMismatch, + DashboardErrorTypeColumnGeographicRoleMismatch, + DashboardErrorTypeColumnReplacementMissing, + } +} + const ( // DashboardFilterAttributeQuicksightUser is a DashboardFilterAttribute enum value DashboardFilterAttributeQuicksightUser = "QUICKSIGHT_USER" ) +// DashboardFilterAttribute_Values returns all elements of the DashboardFilterAttribute enum +func DashboardFilterAttribute_Values() []string { + return []string{ + DashboardFilterAttributeQuicksightUser, + } +} + const ( // DashboardUIStateExpanded is a DashboardUIState enum value DashboardUIStateExpanded = "EXPANDED" @@ -23435,6 +33316,14 @@ const ( DashboardUIStateCollapsed = "COLLAPSED" ) +// DashboardUIState_Values returns all elements of the DashboardUIState enum +func DashboardUIState_Values() []string { + return []string{ + DashboardUIStateExpanded, + DashboardUIStateCollapsed, + } +} + const ( // DataSetImportModeSpice is a DataSetImportMode enum value DataSetImportModeSpice = "SPICE" @@ -23443,7 +33332,21 @@ const ( DataSetImportModeDirectQuery = "DIRECT_QUERY" ) +// DataSetImportMode_Values returns all elements of the DataSetImportMode enum +func DataSetImportMode_Values() []string { + return []string{ + DataSetImportModeSpice, + DataSetImportModeDirectQuery, + } +} + const ( + // DataSourceErrorInfoTypeAccessDenied is a DataSourceErrorInfoType enum value + DataSourceErrorInfoTypeAccessDenied = "ACCESS_DENIED" + + // DataSourceErrorInfoTypeCopySourceNotFound is a DataSourceErrorInfoType enum value + DataSourceErrorInfoTypeCopySourceNotFound = "COPY_SOURCE_NOT_FOUND" + // DataSourceErrorInfoTypeTimeout is a DataSourceErrorInfoType enum value DataSourceErrorInfoTypeTimeout = "TIMEOUT" @@ -23463,6 +33366,20 @@ const ( DataSourceErrorInfoTypeUnknown = "UNKNOWN" ) +// DataSourceErrorInfoType_Values returns all elements of the DataSourceErrorInfoType enum +func DataSourceErrorInfoType_Values() []string { + return []string{ + DataSourceErrorInfoTypeAccessDenied, + DataSourceErrorInfoTypeCopySourceNotFound, + DataSourceErrorInfoTypeTimeout, + DataSourceErrorInfoTypeEngineVersionNotSupported, + DataSourceErrorInfoTypeUnknownHost, + DataSourceErrorInfoTypeGenericSqlFailure, + DataSourceErrorInfoTypeConflict, + DataSourceErrorInfoTypeUnknown, + } +} + const ( // DataSourceTypeAdobeAnalytics is a DataSourceType enum value DataSourceTypeAdobeAnalytics = "ADOBE_ANALYTICS" @@ -23526,8 +33443,55 @@ const ( // DataSourceTypeTwitter is a DataSourceType enum value DataSourceTypeTwitter = "TWITTER" + + // DataSourceTypeTimestream is a DataSourceType enum value + DataSourceTypeTimestream = "TIMESTREAM" +) + +// DataSourceType_Values returns all elements of the DataSourceType enum +func DataSourceType_Values() []string { + return []string{ + DataSourceTypeAdobeAnalytics, + DataSourceTypeAmazonElasticsearch, + DataSourceTypeAthena, + DataSourceTypeAurora, + DataSourceTypeAuroraPostgresql, + DataSourceTypeAwsIotAnalytics, + DataSourceTypeGithub, + DataSourceTypeJira, + DataSourceTypeMariadb, + DataSourceTypeMysql, + DataSourceTypePostgresql, + DataSourceTypePresto, + DataSourceTypeRedshift, + DataSourceTypeS3, + DataSourceTypeSalesforce, + DataSourceTypeServicenow, + DataSourceTypeSnowflake, + DataSourceTypeSpark, + DataSourceTypeSqlserver, + DataSourceTypeTeradata, + DataSourceTypeTwitter, + DataSourceTypeTimestream, + } +} + +const ( + // EditionStandard is a Edition enum value + EditionStandard = "STANDARD" + + // EditionEnterprise is a Edition enum value + EditionEnterprise = "ENTERPRISE" ) +// Edition_Values returns all elements of the Edition enum +func Edition_Values() []string { + return []string{ + EditionStandard, + EditionEnterprise, + } +} + const ( // ExceptionResourceTypeUser is a ExceptionResourceType enum value ExceptionResourceTypeUser = "USER" @@ -23557,6 +33521,21 @@ const ( ExceptionResourceTypeIngestion = "INGESTION" ) +// ExceptionResourceType_Values returns all elements of the ExceptionResourceType enum +func ExceptionResourceType_Values() []string { + return []string{ + ExceptionResourceTypeUser, + ExceptionResourceTypeGroup, + ExceptionResourceTypeNamespace, + ExceptionResourceTypeAccountSettings, + ExceptionResourceTypeIampolicyAssignment, + ExceptionResourceTypeDataSource, + ExceptionResourceTypeDataSet, + ExceptionResourceTypeVpcConnection, + ExceptionResourceTypeIngestion, + } +} + const ( // FileFormatCsv is a FileFormat enum value FileFormatCsv = "CSV" @@ -23577,16 +33556,42 @@ const ( FileFormatJson = "JSON" ) +// FileFormat_Values returns all elements of the FileFormat enum +func FileFormat_Values() []string { + return []string{ + FileFormatCsv, + FileFormatTsv, + FileFormatClf, + FileFormatElf, + FileFormatXlsx, + FileFormatJson, + } +} + const ( // FilterOperatorStringEquals is a FilterOperator enum value FilterOperatorStringEquals = "StringEquals" ) +// FilterOperator_Values returns all elements of the FilterOperator enum +func FilterOperator_Values() []string { + return []string{ + FilterOperatorStringEquals, + } +} + const ( // GeoSpatialCountryCodeUs is a GeoSpatialCountryCode enum value GeoSpatialCountryCodeUs = "US" ) +// GeoSpatialCountryCode_Values returns all elements of the GeoSpatialCountryCode enum +func GeoSpatialCountryCode_Values() []string { + return []string{ + GeoSpatialCountryCodeUs, + } +} + const ( // GeoSpatialDataRoleCountry is a GeoSpatialDataRole enum value GeoSpatialDataRoleCountry = "COUNTRY" @@ -23610,6 +33615,31 @@ const ( GeoSpatialDataRoleLatitude = "LATITUDE" ) +// GeoSpatialDataRole_Values returns all elements of the GeoSpatialDataRole enum +func GeoSpatialDataRole_Values() []string { + return []string{ + GeoSpatialDataRoleCountry, + GeoSpatialDataRoleState, + GeoSpatialDataRoleCounty, + GeoSpatialDataRoleCity, + GeoSpatialDataRolePostcode, + GeoSpatialDataRoleLongitude, + GeoSpatialDataRoleLatitude, + } +} + +const ( + // IdentityStoreQuicksight is a IdentityStore enum value + IdentityStoreQuicksight = "QUICKSIGHT" +) + +// IdentityStore_Values returns all elements of the IdentityStore enum +func IdentityStore_Values() []string { + return []string{ + IdentityStoreQuicksight, + } +} + const ( // IdentityTypeIam is a IdentityType enum value IdentityTypeIam = "IAM" @@ -23618,6 +33648,14 @@ const ( IdentityTypeQuicksight = "QUICKSIGHT" ) +// IdentityType_Values returns all elements of the IdentityType enum +func IdentityType_Values() []string { + return []string{ + IdentityTypeIam, + IdentityTypeQuicksight, + } +} + const ( // IngestionErrorTypeFailureToAssumeRole is a IngestionErrorType enum value IngestionErrorTypeFailureToAssumeRole = "FAILURE_TO_ASSUME_ROLE" @@ -23740,6 +33778,52 @@ const ( IngestionErrorTypeInternalServiceError = "INTERNAL_SERVICE_ERROR" ) +// IngestionErrorType_Values returns all elements of the IngestionErrorType enum +func IngestionErrorType_Values() []string { + return []string{ + IngestionErrorTypeFailureToAssumeRole, + IngestionErrorTypeIngestionSuperseded, + IngestionErrorTypeIngestionCanceled, + IngestionErrorTypeDataSetDeleted, + IngestionErrorTypeDataSetNotSpice, + IngestionErrorTypeS3UploadedFileDeleted, + IngestionErrorTypeS3ManifestError, + IngestionErrorTypeDataToleranceException, + IngestionErrorTypeSpiceTableNotFound, + IngestionErrorTypeDataSetSizeLimitExceeded, + IngestionErrorTypeRowSizeLimitExceeded, + IngestionErrorTypeAccountCapacityLimitExceeded, + IngestionErrorTypeCustomerError, + IngestionErrorTypeDataSourceNotFound, + IngestionErrorTypeIamRoleNotAvailable, + IngestionErrorTypeConnectionFailure, + IngestionErrorTypeSqlTableNotFound, + IngestionErrorTypePermissionDenied, + IngestionErrorTypeSslCertificateValidationFailure, + IngestionErrorTypeOauthTokenFailure, + IngestionErrorTypeSourceApiLimitExceededFailure, + IngestionErrorTypePasswordAuthenticationFailure, + IngestionErrorTypeSqlSchemaMismatchError, + IngestionErrorTypeInvalidDateFormat, + IngestionErrorTypeInvalidDataprepSyntax, + IngestionErrorTypeSourceResourceLimitExceeded, + IngestionErrorTypeSqlInvalidParameterValue, + IngestionErrorTypeQueryTimeout, + IngestionErrorTypeSqlNumericOverflow, + IngestionErrorTypeUnresolvableHost, + IngestionErrorTypeUnroutableHost, + IngestionErrorTypeSqlException, + IngestionErrorTypeS3FileInaccessible, + IngestionErrorTypeIotFileNotFound, + IngestionErrorTypeIotDataSetFileEmpty, + IngestionErrorTypeInvalidDataSourceConfig, + IngestionErrorTypeDataSourceAuthFailed, + IngestionErrorTypeDataSourceConnectionFailed, + IngestionErrorTypeFailureToProcessJsonFile, + IngestionErrorTypeInternalServiceError, + } +} + const ( // IngestionRequestSourceManual is a IngestionRequestSource enum value IngestionRequestSourceManual = "MANUAL" @@ -23748,6 +33832,14 @@ const ( IngestionRequestSourceScheduled = "SCHEDULED" ) +// IngestionRequestSource_Values returns all elements of the IngestionRequestSource enum +func IngestionRequestSource_Values() []string { + return []string{ + IngestionRequestSourceManual, + IngestionRequestSourceScheduled, + } +} + const ( // IngestionRequestTypeInitialIngestion is a IngestionRequestType enum value IngestionRequestTypeInitialIngestion = "INITIAL_INGESTION" @@ -23762,6 +33854,16 @@ const ( IngestionRequestTypeFullRefresh = "FULL_REFRESH" ) +// IngestionRequestType_Values returns all elements of the IngestionRequestType enum +func IngestionRequestType_Values() []string { + return []string{ + IngestionRequestTypeInitialIngestion, + IngestionRequestTypeEdit, + IngestionRequestTypeIncrementalRefresh, + IngestionRequestTypeFullRefresh, + } +} + const ( // IngestionStatusInitialized is a IngestionStatus enum value IngestionStatusInitialized = "INITIALIZED" @@ -23782,6 +33884,18 @@ const ( IngestionStatusCancelled = "CANCELLED" ) +// IngestionStatus_Values returns all elements of the IngestionStatus enum +func IngestionStatus_Values() []string { + return []string{ + IngestionStatusInitialized, + IngestionStatusQueued, + IngestionStatusRunning, + IngestionStatusFailed, + IngestionStatusCompleted, + IngestionStatusCancelled, + } +} + const ( // InputColumnDataTypeString is a InputColumnDataType enum value InputColumnDataTypeString = "STRING" @@ -23805,6 +33919,19 @@ const ( InputColumnDataTypeJson = "JSON" ) +// InputColumnDataType_Values returns all elements of the InputColumnDataType enum +func InputColumnDataType_Values() []string { + return []string{ + InputColumnDataTypeString, + InputColumnDataTypeInteger, + InputColumnDataTypeDecimal, + InputColumnDataTypeDatetime, + InputColumnDataTypeBit, + InputColumnDataTypeBoolean, + InputColumnDataTypeJson, + } +} + const ( // JoinTypeInner is a JoinType enum value JoinTypeInner = "INNER" @@ -23819,6 +33946,60 @@ const ( JoinTypeRight = "RIGHT" ) +// JoinType_Values returns all elements of the JoinType enum +func JoinType_Values() []string { + return []string{ + JoinTypeInner, + JoinTypeOuter, + JoinTypeLeft, + JoinTypeRight, + } +} + +const ( + // NamespaceErrorTypePermissionDenied is a NamespaceErrorType enum value + NamespaceErrorTypePermissionDenied = "PERMISSION_DENIED" + + // NamespaceErrorTypeInternalServiceError is a NamespaceErrorType enum value + NamespaceErrorTypeInternalServiceError = "INTERNAL_SERVICE_ERROR" +) + +// NamespaceErrorType_Values returns all elements of the NamespaceErrorType enum +func NamespaceErrorType_Values() []string { + return []string{ + NamespaceErrorTypePermissionDenied, + NamespaceErrorTypeInternalServiceError, + } +} + +const ( + // NamespaceStatusCreated is a NamespaceStatus enum value + NamespaceStatusCreated = "CREATED" + + // NamespaceStatusCreating is a NamespaceStatus enum value + NamespaceStatusCreating = "CREATING" + + // NamespaceStatusDeleting is a NamespaceStatus enum value + NamespaceStatusDeleting = "DELETING" + + // NamespaceStatusRetryableFailure is a NamespaceStatus enum value + NamespaceStatusRetryableFailure = "RETRYABLE_FAILURE" + + // NamespaceStatusNonRetryableFailure is a NamespaceStatus enum value + NamespaceStatusNonRetryableFailure = "NON_RETRYABLE_FAILURE" +) + +// NamespaceStatus_Values returns all elements of the NamespaceStatus enum +func NamespaceStatus_Values() []string { + return []string{ + NamespaceStatusCreated, + NamespaceStatusCreating, + NamespaceStatusDeleting, + NamespaceStatusRetryableFailure, + NamespaceStatusNonRetryableFailure, + } +} + const ( // ResourceStatusCreationInProgress is a ResourceStatus enum value ResourceStatusCreationInProgress = "CREATION_IN_PROGRESS" @@ -23837,8 +34018,24 @@ const ( // ResourceStatusUpdateFailed is a ResourceStatus enum value ResourceStatusUpdateFailed = "UPDATE_FAILED" + + // ResourceStatusDeleted is a ResourceStatus enum value + ResourceStatusDeleted = "DELETED" ) +// ResourceStatus_Values returns all elements of the ResourceStatus enum +func ResourceStatus_Values() []string { + return []string{ + ResourceStatusCreationInProgress, + ResourceStatusCreationSuccessful, + ResourceStatusCreationFailed, + ResourceStatusUpdateInProgress, + ResourceStatusUpdateSuccessful, + ResourceStatusUpdateFailed, + ResourceStatusDeleted, + } +} + const ( // RowLevelPermissionPolicyGrantAccess is a RowLevelPermissionPolicy enum value RowLevelPermissionPolicyGrantAccess = "GRANT_ACCESS" @@ -23847,14 +34044,38 @@ const ( RowLevelPermissionPolicyDenyAccess = "DENY_ACCESS" ) +// RowLevelPermissionPolicy_Values returns all elements of the RowLevelPermissionPolicy enum +func RowLevelPermissionPolicy_Values() []string { + return []string{ + RowLevelPermissionPolicyGrantAccess, + RowLevelPermissionPolicyDenyAccess, + } +} + const ( + // TemplateErrorTypeSourceNotFound is a TemplateErrorType enum value + TemplateErrorTypeSourceNotFound = "SOURCE_NOT_FOUND" + // TemplateErrorTypeDataSetNotFound is a TemplateErrorType enum value TemplateErrorTypeDataSetNotFound = "DATA_SET_NOT_FOUND" // TemplateErrorTypeInternalFailure is a TemplateErrorType enum value TemplateErrorTypeInternalFailure = "INTERNAL_FAILURE" + + // TemplateErrorTypeAccessDenied is a TemplateErrorType enum value + TemplateErrorTypeAccessDenied = "ACCESS_DENIED" ) +// TemplateErrorType_Values returns all elements of the TemplateErrorType enum +func TemplateErrorType_Values() []string { + return []string{ + TemplateErrorTypeSourceNotFound, + TemplateErrorTypeDataSetNotFound, + TemplateErrorTypeInternalFailure, + TemplateErrorTypeAccessDenied, + } +} + const ( // TextQualifierDoubleQuote is a TextQualifier enum value TextQualifierDoubleQuote = "DOUBLE_QUOTE" @@ -23863,6 +34084,46 @@ const ( TextQualifierSingleQuote = "SINGLE_QUOTE" ) +// TextQualifier_Values returns all elements of the TextQualifier enum +func TextQualifier_Values() []string { + return []string{ + TextQualifierDoubleQuote, + TextQualifierSingleQuote, + } +} + +const ( + // ThemeErrorTypeInternalFailure is a ThemeErrorType enum value + ThemeErrorTypeInternalFailure = "INTERNAL_FAILURE" +) + +// ThemeErrorType_Values returns all elements of the ThemeErrorType enum +func ThemeErrorType_Values() []string { + return []string{ + ThemeErrorTypeInternalFailure, + } +} + +const ( + // ThemeTypeQuicksight is a ThemeType enum value + ThemeTypeQuicksight = "QUICKSIGHT" + + // ThemeTypeCustom is a ThemeType enum value + ThemeTypeCustom = "CUSTOM" + + // ThemeTypeAll is a ThemeType enum value + ThemeTypeAll = "ALL" +) + +// ThemeType_Values returns all elements of the ThemeType enum +func ThemeType_Values() []string { + return []string{ + ThemeTypeQuicksight, + ThemeTypeCustom, + ThemeTypeAll, + } +} + const ( // UserRoleAdmin is a UserRole enum value UserRoleAdmin = "ADMIN" @@ -23879,3 +34140,14 @@ const ( // UserRoleRestrictedReader is a UserRole enum value UserRoleRestrictedReader = "RESTRICTED_READER" ) + +// UserRole_Values returns all elements of the UserRole enum +func UserRole_Values() []string { + return []string{ + UserRoleAdmin, + UserRoleAuthor, + UserRoleReader, + UserRoleRestrictedAuthor, + UserRoleRestrictedReader, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/quicksight/errors.go b/vendor/github.com/aws/aws-sdk-go/service/quicksight/errors.go index 2f6b3e713..4f8ac36a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/quicksight/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/quicksight/errors.go @@ -21,8 +21,8 @@ const ( // ErrCodeConcurrentUpdatingException for service response error code // "ConcurrentUpdatingException". // - // A resource is already in a state that indicates an action is happening that - // must complete before a new update can be applied. + // A resource is already in a state that indicates an operation is happening + // that must complete before a new update can be applied. ErrCodeConcurrentUpdatingException = "ConcurrentUpdatingException" // ErrCodeConflictException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go b/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go index d41bf27ba..9b0a6902e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ram/api.go b/vendor/github.com/aws/aws-sdk-go/service/ram/api.go index af4ed72da..9edc288d2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ram/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ram/api.go @@ -1005,6 +1005,9 @@ func (c *RAM) GetResourcePoliciesRequest(input *GetResourcePoliciesInput) (req * // * InvalidParameterException // A parameter is not valid. // +// * ResourceArnNotFoundException +// An Amazon Resource Name (ARN) was not found. +// // * ServerInternalException // The service could not respond to the request due to an internal problem. // @@ -1309,6 +1312,9 @@ func (c *RAM) GetResourceShareInvitationsRequest(input *GetResourceShareInvitati // * MalformedArnException // The format of an Amazon Resource Name (ARN) is not valid. // +// * UnknownResourceException +// A specified resource was not found. +// // * InvalidNextTokenException // The specified value for NextToken is not valid. // @@ -2051,6 +2057,94 @@ func (c *RAM) ListResourceSharePermissionsWithContext(ctx aws.Context, input *Li return out, req.Send() } +const opListResourceTypes = "ListResourceTypes" + +// ListResourceTypesRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceTypes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResourceTypes for more information on using the ListResourceTypes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResourceTypesRequest method. +// req, resp := client.ListResourceTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/ListResourceTypes +func (c *RAM) ListResourceTypesRequest(input *ListResourceTypesInput) (req *request.Request, output *ListResourceTypesOutput) { + op := &request.Operation{ + Name: opListResourceTypes, + HTTPMethod: "POST", + HTTPPath: "/listresourcetypes", + } + + if input == nil { + input = &ListResourceTypesInput{} + } + + output = &ListResourceTypesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResourceTypes API operation for AWS Resource Access Manager. +// +// Lists the shareable resource types supported by AWS RAM. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Resource Access Manager's +// API operation ListResourceTypes for usage and error information. +// +// Returned Error Types: +// * InvalidNextTokenException +// The specified value for NextToken is not valid. +// +// * InvalidParameterException +// A parameter is not valid. +// +// * ServerInternalException +// The service could not respond to the request due to an internal problem. +// +// * ServiceUnavailableException +// The service is not available. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/ListResourceTypes +func (c *RAM) ListResourceTypes(input *ListResourceTypesInput) (*ListResourceTypesOutput, error) { + req, out := c.ListResourceTypesRequest(input) + return out, req.Send() +} + +// ListResourceTypesWithContext is the same as ListResourceTypes with the addition of +// the ability to pass a context and additional request options. +// +// See ListResourceTypes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RAM) ListResourceTypesWithContext(ctx aws.Context, input *ListResourceTypesInput, opts ...request.Option) (*ListResourceTypesOutput, error) { + req, out := c.ListResourceTypesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListResources = "ListResources" // ListResourcesRequest generates a "aws/request.Request" representing the @@ -2288,6 +2382,9 @@ func (c *RAM) PromoteResourceShareCreatedFromPolicyRequest(input *PromoteResourc // * ServiceUnavailableException // The service is not available. // +// * UnknownResourceException +// A specified resource was not found. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/ram-2018-01-04/PromoteResourceShareCreatedFromPolicy func (c *RAM) PromoteResourceShareCreatedFromPolicy(input *PromoteResourceShareCreatedFromPolicyInput) (*PromoteResourceShareCreatedFromPolicyOutput, error) { req, out := c.PromoteResourceShareCreatedFromPolicyRequest(input) @@ -3971,8 +4068,8 @@ func (s *GetResourceSharesOutput) SetResourceShares(v []*ResourceShare) *GetReso // one of the other input parameters is different from the previous call to // the operation. type IdempotentParameterMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3989,17 +4086,17 @@ func (s IdempotentParameterMismatchException) GoString() string { func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error { return &IdempotentParameterMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotentParameterMismatchException) Code() string { +func (s *IdempotentParameterMismatchException) Code() string { return "IdempotentParameterMismatchException" } // Message returns the exception's message. -func (s IdempotentParameterMismatchException) Message() string { +func (s *IdempotentParameterMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4007,28 +4104,28 @@ func (s IdempotentParameterMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotentParameterMismatchException) OrigErr() error { +func (s *IdempotentParameterMismatchException) OrigErr() error { return nil } -func (s IdempotentParameterMismatchException) Error() string { +func (s *IdempotentParameterMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotentParameterMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotentParameterMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotentParameterMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotentParameterMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // A client token is not valid. type InvalidClientTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4045,17 +4142,17 @@ func (s InvalidClientTokenException) GoString() string { func newErrorInvalidClientTokenException(v protocol.ResponseMetadata) error { return &InvalidClientTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidClientTokenException) Code() string { +func (s *InvalidClientTokenException) Code() string { return "InvalidClientTokenException" } // Message returns the exception's message. -func (s InvalidClientTokenException) Message() string { +func (s *InvalidClientTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4063,28 +4160,28 @@ func (s InvalidClientTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidClientTokenException) OrigErr() error { +func (s *InvalidClientTokenException) OrigErr() error { return nil } -func (s InvalidClientTokenException) Error() string { +func (s *InvalidClientTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidClientTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidClientTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidClientTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidClientTokenException) RequestID() string { + return s.RespMetadata.RequestID } // The specified value for MaxResults is not valid. type InvalidMaxResultsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4101,17 +4198,17 @@ func (s InvalidMaxResultsException) GoString() string { func newErrorInvalidMaxResultsException(v protocol.ResponseMetadata) error { return &InvalidMaxResultsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidMaxResultsException) Code() string { +func (s *InvalidMaxResultsException) Code() string { return "InvalidMaxResultsException" } // Message returns the exception's message. -func (s InvalidMaxResultsException) Message() string { +func (s *InvalidMaxResultsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4119,28 +4216,28 @@ func (s InvalidMaxResultsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidMaxResultsException) OrigErr() error { +func (s *InvalidMaxResultsException) OrigErr() error { return nil } -func (s InvalidMaxResultsException) Error() string { +func (s *InvalidMaxResultsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidMaxResultsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidMaxResultsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidMaxResultsException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidMaxResultsException) RequestID() string { + return s.RespMetadata.RequestID } // The specified value for NextToken is not valid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4157,17 +4254,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4175,28 +4272,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // A parameter is not valid. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4213,17 +4310,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4231,28 +4328,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource type is not valid. type InvalidResourceTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4269,17 +4366,17 @@ func (s InvalidResourceTypeException) GoString() string { func newErrorInvalidResourceTypeException(v protocol.ResponseMetadata) error { return &InvalidResourceTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceTypeException) Code() string { +func (s *InvalidResourceTypeException) Code() string { return "InvalidResourceTypeException" } // Message returns the exception's message. -func (s InvalidResourceTypeException) Message() string { +func (s *InvalidResourceTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4287,28 +4384,28 @@ func (s InvalidResourceTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceTypeException) OrigErr() error { +func (s *InvalidResourceTypeException) OrigErr() error { return nil } -func (s InvalidResourceTypeException) Error() string { +func (s *InvalidResourceTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The requested state transition is not valid. type InvalidStateTransitionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4325,17 +4422,17 @@ func (s InvalidStateTransitionException) GoString() string { func newErrorInvalidStateTransitionException(v protocol.ResponseMetadata) error { return &InvalidStateTransitionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateTransitionException) Code() string { +func (s *InvalidStateTransitionException) Code() string { return "InvalidStateTransitionException" } // Message returns the exception's message. -func (s InvalidStateTransitionException) Message() string { +func (s *InvalidStateTransitionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4343,22 +4440,22 @@ func (s InvalidStateTransitionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateTransitionException) OrigErr() error { +func (s *InvalidStateTransitionException) OrigErr() error { return nil } -func (s InvalidStateTransitionException) Error() string { +func (s *InvalidStateTransitionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateTransitionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateTransitionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateTransitionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateTransitionException) RequestID() string { + return s.RespMetadata.RequestID } type ListPendingInvitationResourcesInput struct { @@ -4569,9 +4666,11 @@ type ListPrincipalsInput struct { // The resource type. // - // Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget - // | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster - // | route53resolver:ResolverRule I resource-groups:Group + // Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation + // | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway + // | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe + // | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster + // | route53resolver:ResolverRule ResourceType *string `locationName:"resourceType" type:"string"` } @@ -4769,6 +4868,85 @@ func (s *ListResourceSharePermissionsOutput) SetPermissions(v []*ResourceSharePe return s } +type ListResourceTypesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return with a single call. To retrieve the + // remaining results, make another call with the returned nextToken value. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The token for the next page of results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListResourceTypesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceTypesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResourceTypesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResourceTypesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResourceTypesInput) SetMaxResults(v int64) *ListResourceTypesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourceTypesInput) SetNextToken(v string) *ListResourceTypesInput { + s.NextToken = &v + return s +} + +type ListResourceTypesOutput struct { + _ struct{} `type:"structure"` + + // The token to use to retrieve the next page of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The shareable resource types supported by AWS RAM. + ResourceTypes []*ServiceNameAndResourceType `locationName:"resourceTypes" type:"list"` +} + +// String returns the string representation +func (s ListResourceTypesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResourceTypesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResourceTypesOutput) SetNextToken(v string) *ListResourceTypesOutput { + s.NextToken = &v + return s +} + +// SetResourceTypes sets the ResourceTypes field's value. +func (s *ListResourceTypesOutput) SetResourceTypes(v []*ServiceNameAndResourceType) *ListResourceTypesOutput { + s.ResourceTypes = v + return s +} + type ListResourcesInput struct { _ struct{} `type:"structure"` @@ -4795,9 +4973,11 @@ type ListResourcesInput struct { // The resource type. // - // Valid values: ec2:CapacityReservation | ec2:Subnet | ec2:TrafficMirrorTarget - // | ec2:TransitGateway | license-manager:LicenseConfiguration | rds:Cluster - // | route53resolver:ResolverRule | resource-groups:Group + // Valid values: codebuild:Project | codebuild:ReportGroup | ec2:CapacityReservation + // | ec2:DedicatedHost | ec2:Subnet | ec2:TrafficMirrorTarget | ec2:TransitGateway + // | imagebuilder:Component | imagebuilder:Image | imagebuilder:ImageRecipe + // | license-manager:LicenseConfiguration I resource-groups:Group | rds:Cluster + // | route53resolver:ResolverRule ResourceType *string `locationName:"resourceType" type:"string"` } @@ -4904,8 +5084,8 @@ func (s *ListResourcesOutput) SetResources(v []*Resource) *ListResourcesOutput { // The format of an Amazon Resource Name (ARN) is not valid. type MalformedArnException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4922,17 +5102,17 @@ func (s MalformedArnException) GoString() string { func newErrorMalformedArnException(v protocol.ResponseMetadata) error { return &MalformedArnException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedArnException) Code() string { +func (s *MalformedArnException) Code() string { return "MalformedArnException" } // Message returns the exception's message. -func (s MalformedArnException) Message() string { +func (s *MalformedArnException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4940,28 +5120,28 @@ func (s MalformedArnException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedArnException) OrigErr() error { +func (s *MalformedArnException) OrigErr() error { return nil } -func (s MalformedArnException) Error() string { +func (s *MalformedArnException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedArnException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedArnException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedArnException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedArnException) RequestID() string { + return s.RespMetadata.RequestID } // A required input parameter is missing. type MissingRequiredParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4978,17 +5158,17 @@ func (s MissingRequiredParameterException) GoString() string { func newErrorMissingRequiredParameterException(v protocol.ResponseMetadata) error { return &MissingRequiredParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MissingRequiredParameterException) Code() string { +func (s *MissingRequiredParameterException) Code() string { return "MissingRequiredParameterException" } // Message returns the exception's message. -func (s MissingRequiredParameterException) Message() string { +func (s *MissingRequiredParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4996,28 +5176,28 @@ func (s MissingRequiredParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MissingRequiredParameterException) OrigErr() error { +func (s *MissingRequiredParameterException) OrigErr() error { return nil } -func (s MissingRequiredParameterException) Error() string { +func (s *MissingRequiredParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MissingRequiredParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MissingRequiredParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MissingRequiredParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *MissingRequiredParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The requested operation is not permitted. type OperationNotPermittedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5034,17 +5214,17 @@ func (s OperationNotPermittedException) GoString() string { func newErrorOperationNotPermittedException(v protocol.ResponseMetadata) error { return &OperationNotPermittedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotPermittedException) Code() string { +func (s *OperationNotPermittedException) Code() string { return "OperationNotPermittedException" } // Message returns the exception's message. -func (s OperationNotPermittedException) Message() string { +func (s *OperationNotPermittedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5052,22 +5232,22 @@ func (s OperationNotPermittedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotPermittedException) OrigErr() error { +func (s *OperationNotPermittedException) OrigErr() error { return nil } -func (s OperationNotPermittedException) Error() string { +func (s *OperationNotPermittedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotPermittedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotPermittedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotPermittedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotPermittedException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a principal for use with AWS Resource Access Manager. @@ -5363,8 +5543,8 @@ func (s *Resource) SetType(v string) *Resource { // An Amazon Resource Name (ARN) was not found. type ResourceArnNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5381,17 +5561,17 @@ func (s ResourceArnNotFoundException) GoString() string { func newErrorResourceArnNotFoundException(v protocol.ResponseMetadata) error { return &ResourceArnNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceArnNotFoundException) Code() string { +func (s *ResourceArnNotFoundException) Code() string { return "ResourceArnNotFoundException" } // Message returns the exception's message. -func (s ResourceArnNotFoundException) Message() string { +func (s *ResourceArnNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5399,22 +5579,22 @@ func (s ResourceArnNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceArnNotFoundException) OrigErr() error { +func (s *ResourceArnNotFoundException) OrigErr() error { return nil } -func (s ResourceArnNotFoundException) Error() string { +func (s *ResourceArnNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceArnNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceArnNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceArnNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceArnNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a resource share. @@ -5726,8 +5906,8 @@ func (s *ResourceShareInvitation) SetStatus(v string) *ResourceShareInvitation { // The invitation was already accepted. type ResourceShareInvitationAlreadyAcceptedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5744,17 +5924,17 @@ func (s ResourceShareInvitationAlreadyAcceptedException) GoString() string { func newErrorResourceShareInvitationAlreadyAcceptedException(v protocol.ResponseMetadata) error { return &ResourceShareInvitationAlreadyAcceptedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceShareInvitationAlreadyAcceptedException) Code() string { +func (s *ResourceShareInvitationAlreadyAcceptedException) Code() string { return "ResourceShareInvitationAlreadyAcceptedException" } // Message returns the exception's message. -func (s ResourceShareInvitationAlreadyAcceptedException) Message() string { +func (s *ResourceShareInvitationAlreadyAcceptedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5762,28 +5942,28 @@ func (s ResourceShareInvitationAlreadyAcceptedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceShareInvitationAlreadyAcceptedException) OrigErr() error { +func (s *ResourceShareInvitationAlreadyAcceptedException) OrigErr() error { return nil } -func (s ResourceShareInvitationAlreadyAcceptedException) Error() string { +func (s *ResourceShareInvitationAlreadyAcceptedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceShareInvitationAlreadyAcceptedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceShareInvitationAlreadyAcceptedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceShareInvitationAlreadyAcceptedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceShareInvitationAlreadyAcceptedException) RequestID() string { + return s.RespMetadata.RequestID } // The invitation was already rejected. type ResourceShareInvitationAlreadyRejectedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5800,17 +5980,17 @@ func (s ResourceShareInvitationAlreadyRejectedException) GoString() string { func newErrorResourceShareInvitationAlreadyRejectedException(v protocol.ResponseMetadata) error { return &ResourceShareInvitationAlreadyRejectedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceShareInvitationAlreadyRejectedException) Code() string { +func (s *ResourceShareInvitationAlreadyRejectedException) Code() string { return "ResourceShareInvitationAlreadyRejectedException" } // Message returns the exception's message. -func (s ResourceShareInvitationAlreadyRejectedException) Message() string { +func (s *ResourceShareInvitationAlreadyRejectedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5818,28 +5998,28 @@ func (s ResourceShareInvitationAlreadyRejectedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceShareInvitationAlreadyRejectedException) OrigErr() error { +func (s *ResourceShareInvitationAlreadyRejectedException) OrigErr() error { return nil } -func (s ResourceShareInvitationAlreadyRejectedException) Error() string { +func (s *ResourceShareInvitationAlreadyRejectedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceShareInvitationAlreadyRejectedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceShareInvitationAlreadyRejectedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceShareInvitationAlreadyRejectedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceShareInvitationAlreadyRejectedException) RequestID() string { + return s.RespMetadata.RequestID } // The Amazon Resource Name (ARN) for an invitation was not found. type ResourceShareInvitationArnNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5856,17 +6036,17 @@ func (s ResourceShareInvitationArnNotFoundException) GoString() string { func newErrorResourceShareInvitationArnNotFoundException(v protocol.ResponseMetadata) error { return &ResourceShareInvitationArnNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceShareInvitationArnNotFoundException) Code() string { +func (s *ResourceShareInvitationArnNotFoundException) Code() string { return "ResourceShareInvitationArnNotFoundException" } // Message returns the exception's message. -func (s ResourceShareInvitationArnNotFoundException) Message() string { +func (s *ResourceShareInvitationArnNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5874,28 +6054,28 @@ func (s ResourceShareInvitationArnNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceShareInvitationArnNotFoundException) OrigErr() error { +func (s *ResourceShareInvitationArnNotFoundException) OrigErr() error { return nil } -func (s ResourceShareInvitationArnNotFoundException) Error() string { +func (s *ResourceShareInvitationArnNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceShareInvitationArnNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceShareInvitationArnNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceShareInvitationArnNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceShareInvitationArnNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The invitation is expired. type ResourceShareInvitationExpiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5912,17 +6092,17 @@ func (s ResourceShareInvitationExpiredException) GoString() string { func newErrorResourceShareInvitationExpiredException(v protocol.ResponseMetadata) error { return &ResourceShareInvitationExpiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceShareInvitationExpiredException) Code() string { +func (s *ResourceShareInvitationExpiredException) Code() string { return "ResourceShareInvitationExpiredException" } // Message returns the exception's message. -func (s ResourceShareInvitationExpiredException) Message() string { +func (s *ResourceShareInvitationExpiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5930,28 +6110,28 @@ func (s ResourceShareInvitationExpiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceShareInvitationExpiredException) OrigErr() error { +func (s *ResourceShareInvitationExpiredException) OrigErr() error { return nil } -func (s ResourceShareInvitationExpiredException) Error() string { +func (s *ResourceShareInvitationExpiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceShareInvitationExpiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceShareInvitationExpiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceShareInvitationExpiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceShareInvitationExpiredException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource share exceeds the limit for your account. type ResourceShareLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5968,17 +6148,17 @@ func (s ResourceShareLimitExceededException) GoString() string { func newErrorResourceShareLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceShareLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceShareLimitExceededException) Code() string { +func (s *ResourceShareLimitExceededException) Code() string { return "ResourceShareLimitExceededException" } // Message returns the exception's message. -func (s ResourceShareLimitExceededException) Message() string { +func (s *ResourceShareLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5986,22 +6166,22 @@ func (s ResourceShareLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceShareLimitExceededException) OrigErr() error { +func (s *ResourceShareLimitExceededException) OrigErr() error { return nil } -func (s ResourceShareLimitExceededException) Error() string { +func (s *ResourceShareLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceShareLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceShareLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceShareLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceShareLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Information about an AWS RAM permission. @@ -6184,8 +6364,8 @@ func (s *ResourceSharePermissionSummary) SetVersion(v string) *ResourceSharePerm // The service could not respond to the request due to an internal problem. type ServerInternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6202,17 +6382,17 @@ func (s ServerInternalException) GoString() string { func newErrorServerInternalException(v protocol.ResponseMetadata) error { return &ServerInternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServerInternalException) Code() string { +func (s *ServerInternalException) Code() string { return "ServerInternalException" } // Message returns the exception's message. -func (s ServerInternalException) Message() string { +func (s *ServerInternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6220,28 +6400,62 @@ func (s ServerInternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServerInternalException) OrigErr() error { +func (s *ServerInternalException) OrigErr() error { return nil } -func (s ServerInternalException) Error() string { +func (s *ServerInternalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServerInternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServerInternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServerInternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServerInternalException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Information about the shareable resource types and the AWS services to which +// they belong. +type ServiceNameAndResourceType struct { + _ struct{} `type:"structure"` + + // The shareable resource types. + ResourceType *string `locationName:"resourceType" type:"string"` + + // The name of the AWS services to which the resources belong. + ServiceName *string `locationName:"serviceName" type:"string"` +} + +// String returns the string representation +func (s ServiceNameAndResourceType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceNameAndResourceType) GoString() string { + return s.String() +} + +// SetResourceType sets the ResourceType field's value. +func (s *ServiceNameAndResourceType) SetResourceType(v string) *ServiceNameAndResourceType { + s.ResourceType = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceNameAndResourceType) SetServiceName(v string) *ServiceNameAndResourceType { + s.ServiceName = &v + return s } // The service is not available. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6258,17 +6472,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6276,22 +6490,22 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a tag. @@ -6362,8 +6576,8 @@ func (s *TagFilter) SetTagValues(v []*string) *TagFilter { // The requested tags exceed the limit for your account. type TagLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6380,17 +6594,17 @@ func (s TagLimitExceededException) GoString() string { func newErrorTagLimitExceededException(v protocol.ResponseMetadata) error { return &TagLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagLimitExceededException) Code() string { +func (s *TagLimitExceededException) Code() string { return "TagLimitExceededException" } // Message returns the exception's message. -func (s TagLimitExceededException) Message() string { +func (s *TagLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6398,28 +6612,28 @@ func (s TagLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagLimitExceededException) OrigErr() error { +func (s *TagLimitExceededException) OrigErr() error { return nil } -func (s TagLimitExceededException) Error() string { +func (s *TagLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified tag is a reserved word and cannot be used. type TagPolicyViolationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6436,17 +6650,17 @@ func (s TagPolicyViolationException) GoString() string { func newErrorTagPolicyViolationException(v protocol.ResponseMetadata) error { return &TagPolicyViolationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagPolicyViolationException) Code() string { +func (s *TagPolicyViolationException) Code() string { return "TagPolicyViolationException" } // Message returns the exception's message. -func (s TagPolicyViolationException) Message() string { +func (s *TagPolicyViolationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6454,22 +6668,22 @@ func (s TagPolicyViolationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagPolicyViolationException) OrigErr() error { +func (s *TagPolicyViolationException) OrigErr() error { return nil } -func (s TagPolicyViolationException) Error() string { +func (s *TagPolicyViolationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagPolicyViolationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagPolicyViolationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagPolicyViolationException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagPolicyViolationException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -6540,8 +6754,8 @@ func (s TagResourceOutput) GoString() string { // A specified resource was not found. type UnknownResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6558,17 +6772,17 @@ func (s UnknownResourceException) GoString() string { func newErrorUnknownResourceException(v protocol.ResponseMetadata) error { return &UnknownResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnknownResourceException) Code() string { +func (s *UnknownResourceException) Code() string { return "UnknownResourceException" } // Message returns the exception's message. -func (s UnknownResourceException) Message() string { +func (s *UnknownResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6576,22 +6790,22 @@ func (s UnknownResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnknownResourceException) OrigErr() error { +func (s *UnknownResourceException) OrigErr() error { return nil } -func (s UnknownResourceException) Error() string { +func (s *UnknownResourceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnknownResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnknownResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnknownResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnknownResourceException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -6768,6 +6982,14 @@ const ( ResourceOwnerOtherAccounts = "OTHER-ACCOUNTS" ) +// ResourceOwner_Values returns all elements of the ResourceOwner enum +func ResourceOwner_Values() []string { + return []string{ + ResourceOwnerSelf, + ResourceOwnerOtherAccounts, + } +} + const ( // ResourceShareAssociationStatusAssociating is a ResourceShareAssociationStatus enum value ResourceShareAssociationStatusAssociating = "ASSOCIATING" @@ -6785,6 +7007,17 @@ const ( ResourceShareAssociationStatusDisassociated = "DISASSOCIATED" ) +// ResourceShareAssociationStatus_Values returns all elements of the ResourceShareAssociationStatus enum +func ResourceShareAssociationStatus_Values() []string { + return []string{ + ResourceShareAssociationStatusAssociating, + ResourceShareAssociationStatusAssociated, + ResourceShareAssociationStatusFailed, + ResourceShareAssociationStatusDisassociating, + ResourceShareAssociationStatusDisassociated, + } +} + const ( // ResourceShareAssociationTypePrincipal is a ResourceShareAssociationType enum value ResourceShareAssociationTypePrincipal = "PRINCIPAL" @@ -6793,6 +7026,14 @@ const ( ResourceShareAssociationTypeResource = "RESOURCE" ) +// ResourceShareAssociationType_Values returns all elements of the ResourceShareAssociationType enum +func ResourceShareAssociationType_Values() []string { + return []string{ + ResourceShareAssociationTypePrincipal, + ResourceShareAssociationTypeResource, + } +} + const ( // ResourceShareFeatureSetCreatedFromPolicy is a ResourceShareFeatureSet enum value ResourceShareFeatureSetCreatedFromPolicy = "CREATED_FROM_POLICY" @@ -6804,6 +7045,15 @@ const ( ResourceShareFeatureSetStandard = "STANDARD" ) +// ResourceShareFeatureSet_Values returns all elements of the ResourceShareFeatureSet enum +func ResourceShareFeatureSet_Values() []string { + return []string{ + ResourceShareFeatureSetCreatedFromPolicy, + ResourceShareFeatureSetPromotingToStandard, + ResourceShareFeatureSetStandard, + } +} + const ( // ResourceShareInvitationStatusPending is a ResourceShareInvitationStatus enum value ResourceShareInvitationStatusPending = "PENDING" @@ -6818,6 +7068,16 @@ const ( ResourceShareInvitationStatusExpired = "EXPIRED" ) +// ResourceShareInvitationStatus_Values returns all elements of the ResourceShareInvitationStatus enum +func ResourceShareInvitationStatus_Values() []string { + return []string{ + ResourceShareInvitationStatusPending, + ResourceShareInvitationStatusAccepted, + ResourceShareInvitationStatusRejected, + ResourceShareInvitationStatusExpired, + } +} + const ( // ResourceShareStatusPending is a ResourceShareStatus enum value ResourceShareStatusPending = "PENDING" @@ -6835,6 +7095,17 @@ const ( ResourceShareStatusDeleted = "DELETED" ) +// ResourceShareStatus_Values returns all elements of the ResourceShareStatus enum +func ResourceShareStatus_Values() []string { + return []string{ + ResourceShareStatusPending, + ResourceShareStatusActive, + ResourceShareStatusFailed, + ResourceShareStatusDeleting, + ResourceShareStatusDeleted, + } +} + const ( // ResourceStatusAvailable is a ResourceStatus enum value ResourceStatusAvailable = "AVAILABLE" @@ -6851,3 +7122,14 @@ const ( // ResourceStatusPending is a ResourceStatus enum value ResourceStatusPending = "PENDING" ) + +// ResourceStatus_Values returns all elements of the ResourceStatus enum +func ResourceStatus_Values() []string { + return []string{ + ResourceStatusAvailable, + ResourceStatusZonalResourceInaccessible, + ResourceStatusLimitExceeded, + ResourceStatusUnavailable, + ResourceStatusPending, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ram/service.go b/vendor/github.com/aws/aws-sdk-go/service/ram/service.go index 44686717b..42713b898 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ram/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ram/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index b9c5f9c6b..491541a7e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -347,11 +347,19 @@ func (c *RDS) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *requ // * ErrCodeDBInstanceNotFoundFault "DBInstanceNotFound" // DBInstanceIdentifier doesn't refer to an existing DB instance. // +// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" +// DBClusterIdentifier doesn't refer to an existing DB cluster. +// // * ErrCodeDBSnapshotNotFoundFault "DBSnapshotNotFound" // DBSnapshotIdentifier doesn't refer to an existing DB snapshot. // -// * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" -// DBClusterIdentifier doesn't refer to an existing DB cluster. +// * ErrCodeDBProxyNotFoundFault "DBProxyNotFoundFault" +// The specified proxy name doesn't correspond to a proxy owned by your AWS +// accoutn in the specified AWS Region. +// +// * ErrCodeDBProxyTargetGroupNotFoundFault "DBProxyTargetGroupNotFoundFault" +// The specified target group isn't available for a proxy owned by your AWS +// account in the specified AWS Region. // // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/AddTagsToResource func (c *RDS) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { @@ -612,7 +620,7 @@ func (c *RDS) BacktrackDBClusterRequest(input *BacktrackDBClusterInput) (req *re // (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Managing.Backtrack.html) // in the Amazon Aurora User Guide. // -// This action only applies to Aurora DB clusters. +// This action only applies to Aurora MySQL DB clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1105,7 +1113,7 @@ func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Re // CopyDBSnapshot API operation for Amazon Relational Database Service. // -// Copies the specified DB snapshot. The source DB snapshot must be in the "available" +// Copies the specified DB snapshot. The source DB snapshot must be in the available // state. // // You can copy a snapshot from one AWS Region to another. In that case, the @@ -1384,7 +1392,7 @@ func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request. // Creates a new Amazon Aurora DB cluster. // // You can use the ReplicationSourceIdentifier parameter to create the DB cluster -// as a Read Replica of another DB cluster or Amazon RDS MySQL DB instance. +// as a read replica of another DB cluster or Amazon RDS MySQL DB instance. // For cross-region replication where the DB cluster identified by ReplicationSourceIdentifier // is encrypted, you must also specify the PreSignedUrl parameter. // @@ -1972,18 +1980,18 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl // CreateDBInstanceReadReplica API operation for Amazon Relational Database Service. // -// Creates a new DB instance that acts as a Read Replica for an existing source -// DB instance. You can create a Read Replica for a DB instance running MySQL, -// MariaDB, Oracle, or PostgreSQL. For more information, see Working with Read -// Replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) +// Creates a new DB instance that acts as a read replica for an existing source +// DB instance. You can create a read replica for a DB instance running MySQL, +// MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working +// with Read Replicas (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) // in the Amazon RDS User Guide. // -// Amazon Aurora doesn't support this action. You must call the CreateDBInstance -// action to create a DB instance for an Aurora DB cluster. +// Amazon Aurora doesn't support this action. Call the CreateDBInstance action +// to create a DB instance for an Aurora DB cluster. // -// All Read Replica DB instances are created with backups disabled. All other +// All read replica DB instances are created with backups disabled. All other // DB instance attributes (including DB security groups and DB parameter groups) -// are inherited from the source DB instance, except as specified following. +// are inherited from the source DB instance, except as specified. // // Your source DB instance must have backup retention enabled. // @@ -2228,10 +2236,6 @@ func (c *RDS) CreateDBProxyRequest(input *CreateDBProxyInput) (req *request.Requ // CreateDBProxy API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Creates a new DB proxy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2411,7 +2415,8 @@ func (c *RDS) CreateDBSnapshotRequest(input *CreateDBSnapshotInput) (req *reques // CreateDBSnapshot API operation for Amazon Relational Database Service. // -// Creates a DBSnapshot. The source DBInstance must be in "available" state. +// Creates a snapshot of a DB instance. The source DB instance must be in the +// available or storage-optimizationstate. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2600,11 +2605,12 @@ func (c *RDS) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput // or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon // SNS and subscribe to the topic. The ARN is displayed in the SNS console. // -// You can specify the type of source (SourceType) you want to be notified of, -// provide a list of RDS sources (SourceIds) that triggers the events, and provide -// a list of event categories (EventCategories) for events you want to be notified -// of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, -// mydbinstance2 and EventCategories = Availability, Backup. +// You can specify the type of source (SourceType) that you want to be notified +// of and provide a list of RDS sources (SourceIds) that triggers the events. +// You can also provide a list of event categories (EventCategories) for events +// that you want to be notified of. For example, you can specify SourceType +// = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories +// = Availability, Backup. // // If you specify both the SourceType and SourceIds, such as SourceType = db-instance // and SourceIdentifier = myDBInstance1, you are notified of all the db-instance @@ -2712,10 +2718,9 @@ func (c *RDS) CreateGlobalClusterRequest(input *CreateGlobalClusterInput) (req * // CreateGlobalCluster API operation for Amazon Relational Database Service. // -// -// Creates an Aurora global database spread across multiple regions. The global -// database contains a single primary cluster with read-write capability, and -// a read-only secondary cluster that receives data from the primary cluster +// Creates an Aurora global database spread across multiple AWS Regions. The +// global database contains a single primary cluster with read-write capability, +// and a read-only secondary cluster that receives data from the primary cluster // through high-speed replication performed by the Aurora storage subsystem. // // You can create a global database that is initially empty, and then add a @@ -3365,12 +3370,12 @@ func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *reques // If the specified DB instance is part of an Amazon Aurora DB cluster, you // can't delete the DB instance if both of the following conditions are true: // -// * The DB cluster is a Read Replica of another Amazon Aurora DB cluster. +// * The DB cluster is a read replica of another Amazon Aurora DB cluster. // // * The DB instance is the only instance in the DB cluster. // // To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster -// API action to promote the DB cluster so it's no longer a Read Replica. After +// API action to promote the DB cluster so it's no longer a read replica. After // the promotion completes, then call the DeleteDBInstance API action to delete // the final instance in the DB cluster. // @@ -3638,10 +3643,6 @@ func (c *RDS) DeleteDBProxyRequest(input *DeleteDBProxyInput) (req *request.Requ // DeleteDBProxy API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Deletes an existing proxy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4313,10 +4314,6 @@ func (c *RDS) DeregisterDBProxyTargetsRequest(input *DeregisterDBProxyTargetsInp // DeregisterDBProxyTargets API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Remove the association between one or more DBProxyTarget data structures // and a DBProxyTargetGroup. // @@ -4475,6 +4472,12 @@ func (c *RDS) DescribeCertificatesRequest(input *DescribeCertificatesInput) (req Name: opDescribeCertificates, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -4523,6 +4526,58 @@ func (c *RDS) DescribeCertificatesWithContext(ctx aws.Context, input *DescribeCe return out, req.Send() } +// DescribeCertificatesPages iterates over the pages of a DescribeCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCertificates operation. +// pageNum := 0 +// err := client.DescribeCertificatesPages(params, +// func(page *rds.DescribeCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeCertificatesPages(input *DescribeCertificatesInput, fn func(*DescribeCertificatesOutput, bool) bool) error { + return c.DescribeCertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCertificatesPagesWithContext same as DescribeCertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCertificatesPagesWithContext(ctx aws.Context, input *DescribeCertificatesInput, fn func(*DescribeCertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeCertificatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeCustomAvailabilityZones = "DescribeCustomAvailabilityZones" // DescribeCustomAvailabilityZonesRequest generates a "aws/request.Request" representing the @@ -4698,6 +4753,12 @@ func (c *RDS) DescribeDBClusterBacktracksRequest(input *DescribeDBClusterBacktra Name: opDescribeDBClusterBacktracks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -4716,7 +4777,7 @@ func (c *RDS) DescribeDBClusterBacktracksRequest(input *DescribeDBClusterBacktra // For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // -// This action only applies to Aurora DB clusters. +// This action only applies to Aurora MySQL DB clusters. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4754,6 +4815,58 @@ func (c *RDS) DescribeDBClusterBacktracksWithContext(ctx aws.Context, input *Des return out, req.Send() } +// DescribeDBClusterBacktracksPages iterates over the pages of a DescribeDBClusterBacktracks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterBacktracks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterBacktracks operation. +// pageNum := 0 +// err := client.DescribeDBClusterBacktracksPages(params, +// func(page *rds.DescribeDBClusterBacktracksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBClusterBacktracksPages(input *DescribeDBClusterBacktracksInput, fn func(*DescribeDBClusterBacktracksOutput, bool) bool) error { + return c.DescribeDBClusterBacktracksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterBacktracksPagesWithContext same as DescribeDBClusterBacktracksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBClusterBacktracksPagesWithContext(ctx aws.Context, input *DescribeDBClusterBacktracksInput, fn func(*DescribeDBClusterBacktracksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterBacktracksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterBacktracksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterBacktracksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterEndpoints = "DescribeDBClusterEndpoints" // DescribeDBClusterEndpointsRequest generates a "aws/request.Request" representing the @@ -4785,6 +4898,12 @@ func (c *RDS) DescribeDBClusterEndpointsRequest(input *DescribeDBClusterEndpoint Name: opDescribeDBClusterEndpoints, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -4835,6 +4954,58 @@ func (c *RDS) DescribeDBClusterEndpointsWithContext(ctx aws.Context, input *Desc return out, req.Send() } +// DescribeDBClusterEndpointsPages iterates over the pages of a DescribeDBClusterEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterEndpoints operation. +// pageNum := 0 +// err := client.DescribeDBClusterEndpointsPages(params, +// func(page *rds.DescribeDBClusterEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBClusterEndpointsPages(input *DescribeDBClusterEndpointsInput, fn func(*DescribeDBClusterEndpointsOutput, bool) bool) error { + return c.DescribeDBClusterEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterEndpointsPagesWithContext same as DescribeDBClusterEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBClusterEndpointsPagesWithContext(ctx aws.Context, input *DescribeDBClusterEndpointsInput, fn func(*DescribeDBClusterEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" // DescribeDBClusterParameterGroupsRequest generates a "aws/request.Request" representing the @@ -4866,6 +5037,12 @@ func (c *RDS) DescribeDBClusterParameterGroupsRequest(input *DescribeDBClusterPa Name: opDescribeDBClusterParameterGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -4921,6 +5098,58 @@ func (c *RDS) DescribeDBClusterParameterGroupsWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeDBClusterParameterGroupsPages iterates over the pages of a DescribeDBClusterParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterParameterGroups operation. +// pageNum := 0 +// err := client.DescribeDBClusterParameterGroupsPages(params, +// func(page *rds.DescribeDBClusterParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBClusterParameterGroupsPages(input *DescribeDBClusterParameterGroupsInput, fn func(*DescribeDBClusterParameterGroupsOutput, bool) bool) error { + return c.DescribeDBClusterParameterGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterParameterGroupsPagesWithContext same as DescribeDBClusterParameterGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBClusterParameterGroupsPagesWithContext(ctx aws.Context, input *DescribeDBClusterParameterGroupsInput, fn func(*DescribeDBClusterParameterGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterParameterGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterParameterGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterParameterGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterParameters = "DescribeDBClusterParameters" // DescribeDBClusterParametersRequest generates a "aws/request.Request" representing the @@ -4952,6 +5181,12 @@ func (c *RDS) DescribeDBClusterParametersRequest(input *DescribeDBClusterParamet Name: opDescribeDBClusterParameters, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -5006,6 +5241,58 @@ func (c *RDS) DescribeDBClusterParametersWithContext(ctx aws.Context, input *Des return out, req.Send() } +// DescribeDBClusterParametersPages iterates over the pages of a DescribeDBClusterParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterParameters operation. +// pageNum := 0 +// err := client.DescribeDBClusterParametersPages(params, +// func(page *rds.DescribeDBClusterParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBClusterParametersPages(input *DescribeDBClusterParametersInput, fn func(*DescribeDBClusterParametersOutput, bool) bool) error { + return c.DescribeDBClusterParametersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterParametersPagesWithContext same as DescribeDBClusterParametersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBClusterParametersPagesWithContext(ctx aws.Context, input *DescribeDBClusterParametersInput, fn func(*DescribeDBClusterParametersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterParametersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterParametersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterParametersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusterSnapshotAttributes = "DescribeDBClusterSnapshotAttributes" // DescribeDBClusterSnapshotAttributesRequest generates a "aws/request.Request" representing the @@ -5129,6 +5416,12 @@ func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshot Name: opDescribeDBClusterSnapshots, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -5183,6 +5476,58 @@ func (c *RDS) DescribeDBClusterSnapshotsWithContext(ctx aws.Context, input *Desc return out, req.Send() } +// DescribeDBClusterSnapshotsPages iterates over the pages of a DescribeDBClusterSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBClusterSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBClusterSnapshots operation. +// pageNum := 0 +// err := client.DescribeDBClusterSnapshotsPages(params, +// func(page *rds.DescribeDBClusterSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeDBClusterSnapshotsPages(input *DescribeDBClusterSnapshotsInput, fn func(*DescribeDBClusterSnapshotsOutput, bool) bool) error { + return c.DescribeDBClusterSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeDBClusterSnapshotsPagesWithContext same as DescribeDBClusterSnapshotsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeDBClusterSnapshotsPagesWithContext(ctx aws.Context, input *DescribeDBClusterSnapshotsInput, fn func(*DescribeDBClusterSnapshotsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeDBClusterSnapshotsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeDBClusterSnapshotsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeDBClusterSnapshotsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDBClusters = "DescribeDBClusters" // DescribeDBClustersRequest generates a "aws/request.Request" representing the @@ -6205,10 +6550,6 @@ func (c *RDS) DescribeDBProxiesRequest(input *DescribeDBProxiesInput) (req *requ // DescribeDBProxies API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Returns information about DB proxies. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6347,10 +6688,6 @@ func (c *RDS) DescribeDBProxyTargetGroupsRequest(input *DescribeDBProxyTargetGro // DescribeDBProxyTargetGroups API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Returns information about DB proxy target groups, represented by DBProxyTargetGroup // data structures. // @@ -6497,10 +6834,6 @@ func (c *RDS) DescribeDBProxyTargetsRequest(input *DescribeDBProxyTargetsInput) // DescribeDBProxyTargets API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Returns information about DBProxyTarget objects. This API supports pagination. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7363,8 +7696,8 @@ func (c *RDS) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput // // Displays a list of categories for all event source types, or, if specified, // for a specified source type. You can see a list of the event categories and -// source types in the Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) -// topic in the Amazon RDS User Guide. +// source types in Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// in the Amazon RDS User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7585,11 +7918,13 @@ func (c *RDS) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Re // DescribeEvents API operation for Amazon Relational Database Service. // -// Returns events related to DB instances, DB security groups, DB snapshots, -// and DB parameter groups for the past 14 days. Events specific to a particular -// DB instance, DB security group, database snapshot, or DB parameter group -// can be obtained by providing the name as a parameter. By default, the past -// hour of events are returned. +// Returns events related to DB instances, DB clusters, DB parameter groups, +// DB security groups, DB snapshots, and DB cluster snapshots for the past 14 +// days. Events specific to a particular DB instances, DB clusters, DB parameter +// groups, DB security groups, DB snapshots, and DB cluster snapshots group +// can be obtained by providing the name as a parameter. +// +// By default, the past hour of events are returned. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8521,6 +8856,12 @@ func (c *RDS) DescribePendingMaintenanceActionsRequest(input *DescribePendingMai Name: opDescribePendingMaintenanceActions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -8570,6 +8911,58 @@ func (c *RDS) DescribePendingMaintenanceActionsWithContext(ctx aws.Context, inpu return out, req.Send() } +// DescribePendingMaintenanceActionsPages iterates over the pages of a DescribePendingMaintenanceActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePendingMaintenanceActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePendingMaintenanceActions operation. +// pageNum := 0 +// err := client.DescribePendingMaintenanceActionsPages(params, +// func(page *rds.DescribePendingMaintenanceActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribePendingMaintenanceActionsPages(input *DescribePendingMaintenanceActionsInput, fn func(*DescribePendingMaintenanceActionsOutput, bool) bool) error { + return c.DescribePendingMaintenanceActionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePendingMaintenanceActionsPagesWithContext same as DescribePendingMaintenanceActionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribePendingMaintenanceActionsPagesWithContext(ctx aws.Context, input *DescribePendingMaintenanceActionsInput, fn func(*DescribePendingMaintenanceActionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePendingMaintenanceActionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePendingMaintenanceActionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePendingMaintenanceActionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeReservedDBInstances = "DescribeReservedDBInstances" // DescribeReservedDBInstancesRequest generates a "aws/request.Request" representing the @@ -8876,6 +9269,12 @@ func (c *RDS) DescribeSourceRegionsRequest(input *DescribeSourceRegionsInput) (r Name: opDescribeSourceRegions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, } if input == nil { @@ -8890,7 +9289,7 @@ func (c *RDS) DescribeSourceRegionsRequest(input *DescribeSourceRegionsInput) (r // DescribeSourceRegions API operation for Amazon Relational Database Service. // // Returns a list of the source AWS Regions where the current AWS Region can -// create a Read Replica or copy a DB snapshot from. This API action supports +// create a read replica or copy a DB snapshot from. This API action supports // pagination. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -8921,6 +9320,58 @@ func (c *RDS) DescribeSourceRegionsWithContext(ctx aws.Context, input *DescribeS return out, req.Send() } +// DescribeSourceRegionsPages iterates over the pages of a DescribeSourceRegions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSourceRegions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSourceRegions operation. +// pageNum := 0 +// err := client.DescribeSourceRegionsPages(params, +// func(page *rds.DescribeSourceRegionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeSourceRegionsPages(input *DescribeSourceRegionsInput, fn func(*DescribeSourceRegionsOutput, bool) bool) error { + return c.DescribeSourceRegionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSourceRegionsPagesWithContext same as DescribeSourceRegionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeSourceRegionsPagesWithContext(ctx aws.Context, input *DescribeSourceRegionsInput, fn func(*DescribeSourceRegionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSourceRegionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSourceRegionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSourceRegionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeValidDBInstanceModifications = "DescribeValidDBInstanceModifications" // DescribeValidDBInstanceModificationsRequest generates a "aws/request.Request" representing the @@ -9396,6 +9847,14 @@ func (c *RDS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req * // * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" // DBClusterIdentifier doesn't refer to an existing DB cluster. // +// * ErrCodeDBProxyNotFoundFault "DBProxyNotFoundFault" +// The specified proxy name doesn't correspond to a proxy owned by your AWS +// accoutn in the specified AWS Region. +// +// * ErrCodeDBProxyTargetGroupNotFoundFault "DBProxyTargetGroupNotFoundFault" +// The specified target group isn't available for a proxy owned by your AWS +// account in the specified AWS Region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ListTagsForResource func (c *RDS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) @@ -10009,16 +10468,19 @@ func (c *RDS) ModifyDBClusterSnapshotAttributeRequest(input *ModifyDBClusterSnap // as the AttributeName and use the ValuesToAdd parameter to add a list of IDs // of the AWS accounts that are authorized to restore the manual DB cluster // snapshot. Use the value all to make the manual DB cluster snapshot public, -// which means that it can be copied or restored by all AWS accounts. Do not -// add the all value for any manual DB cluster snapshots that contain private -// information that you don't want available to all AWS accounts. If a manual -// DB cluster snapshot is encrypted, it can be shared, but only by specifying -// a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't -// use all as a value for that parameter in this case. +// which means that it can be copied or restored by all AWS accounts. +// +// Don't add the all value for any manual DB cluster snapshots that contain +// private information that you don't want available to all AWS accounts. +// +// If a manual DB cluster snapshot is encrypted, it can be shared, but only +// by specifying a list of authorized AWS account IDs for the ValuesToAdd parameter. +// You can't use all as a value for that parameter in this case. // // To view which AWS accounts have access to copy or restore a manual DB cluster -// snapshot, or whether a manual DB cluster snapshot public or private, use -// the DescribeDBClusterSnapshotAttributes API action. +// snapshot, or whether a manual DB cluster snapshot is public or private, use +// the DescribeDBClusterSnapshotAttributes API action. The accounts are returned +// as values for the restore attribute. // // This action only applies to Aurora DB clusters. // @@ -10176,6 +10638,12 @@ func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *reques // // * ErrCodeBackupPolicyNotFoundFault "BackupPolicyNotFoundFault" // +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// * ErrCodeInvalidDBClusterStateFault "InvalidDBClusterStateFault" +// The requested operation can't be performed while the cluster is in this state. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ModifyDBInstance func (c *RDS) ModifyDBInstance(input *ModifyDBInstanceInput) (*ModifyDBInstanceOutput, error) { req, out := c.ModifyDBInstanceRequest(input) @@ -10343,10 +10811,6 @@ func (c *RDS) ModifyDBProxyRequest(input *ModifyDBProxyInput) (req *request.Requ // ModifyDBProxy API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Changes the settings for an existing DB proxy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10434,10 +10898,6 @@ func (c *RDS) ModifyDBProxyTargetGroupRequest(input *ModifyDBProxyTargetGroupInp // ModifyDBProxyTargetGroup API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Modifies the properties of a DBProxyTargetGroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10525,8 +10985,8 @@ func (c *RDS) ModifyDBSnapshotRequest(input *ModifyDBSnapshotInput) (req *reques // ModifyDBSnapshot API operation for Amazon Relational Database Service. // -// Updates a manual DB snapshot, which can be encrypted or not encrypted, with -// a new engine version. +// Updates a manual DB snapshot with a new engine version. The snapshot can +// be encrypted or unencrypted, but not shared or public. // // Amazon RDS supports upgrading DB snapshots for MySQL, Oracle, and PostgreSQL. // @@ -10614,16 +11074,18 @@ func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeI // the AttributeName and use the ValuesToAdd parameter to add a list of IDs // of the AWS accounts that are authorized to restore the manual DB snapshot. // Uses the value all to make the manual DB snapshot public, which means it -// can be copied or restored by all AWS accounts. Do not add the all value for -// any manual DB snapshots that contain private information that you don't want -// available to all AWS accounts. If the manual DB snapshot is encrypted, it -// can be shared, but only by specifying a list of authorized AWS account IDs -// for the ValuesToAdd parameter. You can't use all as a value for that parameter -// in this case. +// can be copied or restored by all AWS accounts. +// +// Don't add the all value for any manual DB snapshots that contain private +// information that you don't want available to all AWS accounts. +// +// If the manual DB snapshot is encrypted, it can be shared, but only by specifying +// a list of authorized AWS account IDs for the ValuesToAdd parameter. You can't +// use all as a value for that parameter in this case. // // To view which AWS accounts have access to copy or restore a manual DB snapshot, // or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes -// API action. +// API action. The accounts are returned as values for the restore attribute. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10809,10 +11271,9 @@ func (c *RDS) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput // a subscription, use the AddSourceIdentifierToSubscription and RemoveSourceIdentifierFromSubscription // calls. // -// You can see a list of the event categories for a given SourceType in the -// Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) -// topic in the Amazon RDS User Guide or by using the DescribeEventCategories -// action. +// You can see a list of the event categories for a given source type (SourceType) +// in Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) +// in the Amazon RDS User Guide or by using the DescribeEventCategories operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11074,15 +11535,15 @@ func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *re // PromoteReadReplica API operation for Amazon Relational Database Service. // -// Promotes a Read Replica DB instance to a standalone DB instance. +// Promotes a read replica DB instance to a standalone DB instance. // // * Backup duration is a function of the amount of changes to the database -// since the previous backup. If you plan to promote a Read Replica to a +// since the previous backup. If you plan to promote a read replica to a // standalone instance, we recommend that you enable backups and complete -// at least one backup prior to promotion. In addition, a Read Replica cannot +// at least one backup prior to promotion. In addition, a read replica cannot // be promoted to a standalone instance when it is in the backing-up status. -// If you have enabled backups on your Read Replica, configure the automated -// backup window so that daily backups do not interfere with Read Replica +// If you have enabled backups on your read replica, configure the automated +// backup window so that daily backups do not interfere with read replica // promotion. // // * This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. @@ -11167,7 +11628,7 @@ func (c *RDS) PromoteReadReplicaDBClusterRequest(input *PromoteReadReplicaDBClus // PromoteReadReplicaDBCluster API operation for Amazon Relational Database Service. // -// Promotes a Read Replica DB cluster to a standalone DB cluster. +// Promotes a read replica DB cluster to a standalone DB cluster. // // This action only applies to Aurora DB clusters. // @@ -11428,10 +11889,6 @@ func (c *RDS) RegisterDBProxyTargetsRequest(input *RegisterDBProxyTargetsInput) // RegisterDBProxyTargets API operation for Amazon Relational Database Service. // -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Associate one or more DBProxyTarget data structures with a DBProxyTargetGroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11469,6 +11926,11 @@ func (c *RDS) RegisterDBProxyTargetsRequest(input *RegisterDBProxyTargetsInput) // * ErrCodeInvalidDBProxyStateFault "InvalidDBProxyStateFault" // The requested operation can't be performed while the proxy is in this state. // +// * ErrCodeInsufficientAvailableIPsInSubnetFault "InsufficientAvailableIPsInSubnetFault" +// The requested operation can't be performed because there aren't enough available +// IP addresses in the proxy's subnets. Add more CIDR blocks to the VPC or remove +// IP address that aren't required from the subnets. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RegisterDBProxyTargets func (c *RDS) RegisterDBProxyTargets(input *RegisterDBProxyTargetsInput) (*RegisterDBProxyTargetsOutput, error) { req, out := c.RegisterDBProxyTargetsRequest(input) @@ -11909,6 +12371,14 @@ func (c *RDS) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) // * ErrCodeDBClusterNotFoundFault "DBClusterNotFoundFault" // DBClusterIdentifier doesn't refer to an existing DB cluster. // +// * ErrCodeDBProxyNotFoundFault "DBProxyNotFoundFault" +// The specified proxy name doesn't correspond to a proxy owned by your AWS +// accoutn in the specified AWS Region. +// +// * ErrCodeDBProxyTargetGroupNotFoundFault "DBProxyTargetGroupNotFoundFault" +// The specified target group isn't available for a proxy owned by your AWS +// account in the specified AWS Region. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/RemoveTagsFromResource func (c *RDS) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { req, out := c.RemoveTagsFromResourceRequest(input) @@ -12163,13 +12633,23 @@ func (c *RDS) RestoreDBClusterFromS3Request(input *RestoreDBClusterFromS3Input) // RestoreDBClusterFromS3 API operation for Amazon Relational Database Service. // -// Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. -// Amazon RDS must be authorized to access the Amazon S3 bucket and the data -// must be created using the Percona XtraBackup utility as described in Migrating -// Data to an Amazon Aurora MySQL DB Cluster (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.html) +// Creates an Amazon Aurora DB cluster from MySQL data stored in an Amazon S3 +// bucket. Amazon RDS must be authorized to access the Amazon S3 bucket and +// the data must be created using the Percona XtraBackup utility as described +// in Migrating Data from MySQL by Using an Amazon S3 Bucket (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.ExtMySQL.html#AuroraMySQL.Migrating.ExtMySQL.S3) // in the Amazon Aurora User Guide. // -// This action only applies to Aurora DB clusters. +// This action only restores the DB cluster, not the DB instances for that DB +// cluster. You must invoke the CreateDBInstance action to create DB instances +// for the restored DB cluster, specifying the identifier of the restored DB +// cluster in DBClusterIdentifier. You can create DB instances only after the +// RestoreDBClusterFromS3 action has completed and the DB cluster is available. +// +// For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) +// in the Amazon Aurora User Guide. +// +// This action only applies to Aurora DB clusters. The source DB engine must +// be MySQL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12296,16 +12776,19 @@ func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSna // RestoreDBClusterFromSnapshot API operation for Amazon Relational Database Service. // -// Creates a new DB cluster from a DB snapshot or DB cluster snapshot. -// -// If a DB snapshot is specified, the target DB cluster is created from the -// source DB snapshot with a default configuration and default security group. +// Creates a new DB cluster from a DB snapshot or DB cluster snapshot. This +// action only applies to Aurora DB clusters. // -// If a DB cluster snapshot is specified, the target DB cluster is created from -// the source DB cluster restore point with the same configuration as the original -// source DB cluster. If you don't specify a security group, the new DB cluster +// The target DB cluster is created from the source snapshot with a default +// configuration. If you don't specify a security group, the new DB cluster // is associated with the default security group. // +// This action only restores the DB cluster, not the DB instances for that DB +// cluster. You must invoke the CreateDBInstance action to create DB instances +// for the restored DB cluster, specifying the identifier of the restored DB +// cluster in DBClusterIdentifier. You can create DB instances only after the +// RestoreDBClusterFromSnapshot action has completed and the DB cluster is available. +// // For more information on Amazon Aurora, see What Is Amazon Aurora? (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/CHAP_AuroraOverview.html) // in the Amazon Aurora User Guide. // @@ -13885,17 +14368,22 @@ func (c *RDS) StopDBInstanceWithContext(ctx aws.Context, input *StopDBInstanceIn // * EventSubscriptions - The number of event subscriptions per account. // The used value is the count of the event subscriptions in the account. // -// * ManualSnapshots - The number of manual DB snapshots per account. The -// used value is the count of the manual DB snapshots in the account. +// * ManualClusterSnapshots - The number of manual DB cluster snapshots per +// account. The used value is the count of the manual DB cluster snapshots +// in the account. +// +// * ManualSnapshots - The number of manual DB instance snapshots per account. +// The used value is the count of the manual DB instance snapshots in the +// account. // // * OptionGroups - The number of DB option groups per account, excluding // default option groups. The used value is the count of nondefault DB option // groups in the account. // -// * ReadReplicasPerMaster - The number of Read Replicas per DB instance. -// The used value is the highest number of Read Replicas for a DB instance +// * ReadReplicasPerMaster - The number of read replicas per DB instance. +// The used value is the highest number of read replicas for a DB instance // in the account. Other DB instances in the account might have a lower number -// of Read Replicas. +// of read replicas. // // * ReservedDBInstances - The number of reserved DB instances per account. // The used value is the count of the active reserved DB instances in the @@ -14116,17 +14604,23 @@ type AddSourceIdentifierToSubscriptionInput struct { // // Constraints: // - // * If the source type is a DB instance, then a DBInstanceIdentifier must + // * If the source type is a DB instance, a DBInstanceIdentifier value must // be supplied. // - // * If the source type is a DB security group, a DBSecurityGroupName must + // * If the source type is a DB cluster, a DBClusterIdentifier value must // be supplied. // - // * If the source type is a DB parameter group, a DBParameterGroupName must + // * If the source type is a DB parameter group, a DBParameterGroupName value + // must be supplied. + // + // * If the source type is a DB security group, a DBSecurityGroupName value + // must be supplied. + // + // * If the source type is a DB snapshot, a DBSnapshotIdentifier value must // be supplied. // - // * If the source type is a DB snapshot, a DBSnapshotIdentifier must be - // supplied. + // * If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier + // value must be supplied. // // SourceIdentifier is a required field SourceIdentifier *string `type:"string" required:"true"` @@ -14782,16 +15276,16 @@ type CancelExportTaskOutput struct { // The data exported from the snapshot. Valid values are the following: // - // * database - Export all the data of the snapshot. + // * database - Export all the data from a specified database. // - // * database.table [table-name] - Export a table of the snapshot. + // * database.table table-name - Export a table of the snapshot. This format + // is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. // - // * database.schema [schema-name] - Export a database schema of the snapshot. - // This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL. + // * database.schema schema-name - Export a database schema of the snapshot. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. // - // * database.schema.table [table-name] - Export a table of the database - // schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or - // Aurora MySQL. + // * database.schema.table table-name - Export a table of the database schema. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. ExportOnly []*string `type:"list"` // A unique identifier for the snapshot export task. This ID isn't an identifier @@ -15070,9 +15564,15 @@ func (s *CharacterSet) SetCharacterSetName(v string) *CharacterSet { // // The EnableLogTypes and DisableLogTypes arrays determine which logs will be // exported (or not exported) to CloudWatch Logs. The values within these arrays -// depend on the DB engine being used. For more information, see Publishing -// Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) +// depend on the DB engine being used. +// +// For more information about exporting CloudWatch Logs for Amazon RDS DB instances, +// see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon RDS User Guide. +// +// For more information about exporting CloudWatch Logs for Amazon Aurora DB +// clusters, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) +// in the Amazon Aurora User Guide. type CloudwatchLogsExportConfiguration struct { _ struct{} `type:"structure"` @@ -15105,10 +15605,6 @@ func (s *CloudwatchLogsExportConfiguration) SetEnableLogTypes(v []*string) *Clou return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Specifies the settings that control the size and behavior of the connection // pool associated with a DBProxyTargetGroup. type ConnectionPoolConfiguration struct { @@ -15202,10 +15698,6 @@ func (s *ConnectionPoolConfiguration) SetSessionPinningFilters(v []*string) *Con return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Displays the settings that control the size and behavior of the connection // pool associated with a DBProxyTarget. type ConnectionPoolConfigurationInfo struct { @@ -15451,7 +15943,7 @@ type CopyDBClusterSnapshotInput struct { // DB cluster snapshot from another AWS Region. Don't specify PreSignedUrl when // you are copying an encrypted DB cluster snapshot in the same AWS Region. // - // The pre-signed URL must be a valid request for the CopyDBSClusterSnapshot + // The pre-signed URL must be a valid request for the CopyDBClusterSnapshot // API action that can be executed in the source AWS Region that contains the // encrypted DB cluster snapshot to be copied. The pre-signed URL request must // contain the following parameter values: @@ -15998,21 +16490,12 @@ func (s *CopyDBSnapshotOutput) SetDBSnapshot(v *DBSnapshot) *CopyDBSnapshotOutpu type CopyOptionGroupInput struct { _ struct{} `type:"structure"` - // The identifier or ARN for the source option group. For information about - // creating an ARN, see Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) - // in the Amazon RDS User Guide. + // The identifier for the source option group. // // Constraints: // // * Must specify a valid option group. // - // * If the source option group is in the same AWS Region as the copy, specify - // a valid option group identifier, for example my-option-group, or a valid - // ARN. - // - // * If the source option group is in a different AWS Region than the copy, - // specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. - // // SourceOptionGroupIdentifier is a required field SourceOptionGroupIdentifier *string `type:"string" required:"true"` @@ -16364,7 +16847,9 @@ type CreateDBClusterEndpointOutput struct { StaticMembers []*string `type:"list"` // The current status of the endpoint. One of: creating, available, deleting, - // modifying. + // inactive, modifying. The inactive state applies to an endpoint that can't + // be used for a certain kind of cluster, such as a writer endpoint for a read-only + // secondary cluster in a global database. Status *string `type:"string"` } @@ -16450,6 +16935,8 @@ type CreateDBClusterInput struct { // The target backtrack window, in seconds. To disable backtracking, set this // value to 0. // + // Currently, Backtrack is only supported for Aurora MySQL DB clusters. + // // Default: 0 // // Constraints: @@ -16525,7 +17012,7 @@ type CreateDBClusterInput struct { // // For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication // to authenticate users that connect to the DB cluster. For more information, - // see Using Kerberos Authentication for Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurmysql-kerberos.html) + // see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) // in the Amazon Aurora User Guide. Domain *string `type:"string"` @@ -16537,8 +17024,23 @@ type CreateDBClusterInput struct { // Logs. The values in the list depend on the DB engine being used. For more // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon Aurora User Guide. + // + // Aurora MySQL + // + // Possible values are audit, error, general, and slowquery. + // + // Aurora PostgreSQL + // + // Possible values are postgresql and upgrade. EnableCloudwatchLogsExports []*string `type:"list"` + // A value that indicates whether to enable write operations to be forwarded + // from this cluster to the primary cluster in an Aurora global database. The + // resulting changes are replicated back to this cluster. This parameter only + // applies to DB clusters that are secondary clusters in an Aurora global database. + // By default, Aurora disallows write operations for secondary clusters. + EnableGlobalWriteForwarding *bool `type:"boolean"` + // A value that indicates whether to enable the HTTP endpoint for an Aurora // Serverless DB cluster. By default, the HTTP endpoint is disabled. // @@ -16565,9 +17067,21 @@ type CreateDBClusterInput struct { // Engine is a required field Engine *string `type:"string" required:"true"` - // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, + // The DB engine mode of the DB cluster, either provisioned serverless, parallelquery, // global, or multimaster. // + // The parallelquery engine mode isn't required for Aurora MySQL version 1.23 + // and higher 1.x versions, and version 2.09 and higher 2.x versions. + // + // The global engine mode isn't required for Aurora MySQL version 1.22 and higher + // 1.x versions, and global engine mode isn't required for any 2.x versions. + // + // The multimaster engine mode only applies for DB clusters created with Aurora + // MySQL version 5.6.10a. + // + // For Aurora PostgreSQL, the global engine mode isn't required, and both the + // parallelquery and the multimaster engine modes currently aren't supported. + // // Limitations and requirements apply to some DB engine modes. For more information, // see the following sections in the Amazon Aurora User Guide: // @@ -16575,7 +17089,7 @@ type CreateDBClusterInput struct { // // * Limitations of Parallel Query (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-parallel-query.html#aurora-mysql-parallel-query-limitations) // - // * Requirements for Aurora Global Databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) + // * Limitations of Aurora Global Databases (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html#aurora-global-database.limitations) // // * Limitations of Multi-Master Clusters (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-multi-master.html#aurora-multi-master-limitations) EngineMode *string `type:"string"` @@ -16629,9 +17143,9 @@ type CreateDBClusterInput struct { // AWS KMS creates the default encryption key for your AWS account. Your AWS // account has a different default encryption key for each AWS Region. // - // If you create a Read Replica of an encrypted DB cluster in another AWS Region, + // If you create a read replica of an encrypted DB cluster in another AWS Region, // you must set KmsKeyId to a KMS key ID that is valid in the destination AWS - // Region. This key is used to encrypt the Read Replica in that AWS Region. + // Region. This key is used to encrypt the read replica in that AWS Region. KmsKeyId *string `type:"string"` // The password for the master database user. This password can contain any @@ -16680,7 +17194,7 @@ type CreateDBClusterInput struct { // called in the destination AWS Region, and the action contained in the // pre-signed URL. // - // * DestinationRegion - The name of the AWS Region that Aurora Read Replica + // * DestinationRegion - The name of the AWS Region that Aurora read replica // will be created in. // // * ReplicationSourceIdentifier - The DB cluster identifier for the encrypted @@ -16735,7 +17249,7 @@ type CreateDBClusterInput struct { PreferredMaintenanceWindow *string `type:"string"` // The Amazon Resource Name (ARN) of the source DB instance or DB cluster if - // this DB cluster is created as a Read Replica. + // this DB cluster is created as a read replica. ReplicationSourceIdentifier *string `type:"string"` // For DB clusters in serverless DB engine mode, the scaling properties of the @@ -16867,6 +17381,12 @@ func (s *CreateDBClusterInput) SetEnableCloudwatchLogsExports(v []*string) *Crea return s } +// SetEnableGlobalWriteForwarding sets the EnableGlobalWriteForwarding field's value. +func (s *CreateDBClusterInput) SetEnableGlobalWriteForwarding(v bool) *CreateDBClusterInput { + s.EnableGlobalWriteForwarding = &v + return s +} + // SetEnableHttpEndpoint sets the EnableHttpEndpoint field's value. func (s *CreateDBClusterInput) SetEnableHttpEndpoint(v bool) *CreateDBClusterInput { s.EnableHttpEndpoint = &v @@ -17347,7 +17867,7 @@ type CreateDBInstanceInput struct { // // * Must be a value from 0 to 35 // - // * Can't be set to 0 if the DB instance is a source to Read Replicas + // * Can't be set to 0 if the DB instance is a source to read replicas BackupRetentionPeriod *int64 `type:"integer"` // For supported engines, indicates that the DB instance should be associated @@ -17407,6 +17927,9 @@ type CreateDBInstanceInput struct { // // * Must contain 1 to 64 letters or numbers. // + // * Must begin with a letter. Subsequent characters can be letters, underscores, + // or digits (0-9). + // // * Can't be a word reserved by the specified database engine // // MariaDB @@ -17418,6 +17941,9 @@ type CreateDBInstanceInput struct { // // * Must contain 1 to 64 letters or numbers. // + // * Must begin with a letter. Subsequent characters can be letters, underscores, + // or digits (0-9). + // // * Can't be a word reserved by the specified database engine // // PostgreSQL @@ -17430,8 +17956,8 @@ type CreateDBInstanceInput struct { // // * Must contain 1 to 63 letters, numbers, or underscores. // - // * Must begin with a letter or an underscore. Subsequent characters can - // be letters, underscores, or digits (0-9). + // * Must begin with a letter. Subsequent characters can be letters, underscores, + // or digits (0-9). // // * Can't be a word reserved by the specified database engine // @@ -17501,18 +18027,10 @@ type CreateDBInstanceInput struct { DeletionProtection *bool `type:"boolean"` // The Active Directory directory ID to create the DB instance in. Currently, - // only Microsoft SQL Server and Oracle DB instances can be created in an Active - // Directory Domain. + // only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can + // be created in an Active Directory Domain. // - // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication - // to authenticate users that connect to the DB instance. For more information, - // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft - // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) - // in the Amazon RDS User Guide. - // - // For Oracle DB instance, Amazon RDS can use Kerberos Authentication to authenticate - // users that connect to the DB instance. For more information, see Using Kerberos - // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) // in the Amazon RDS User Guide. Domain *string `type:"string"` @@ -17524,6 +18042,30 @@ type CreateDBInstanceInput struct { // Logs. The values in the list depend on the DB engine being used. For more // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon Relational Database Service User Guide. + // + // Amazon Aurora + // + // Not applicable. CloudWatch Logs exports are managed by the DB cluster. + // + // MariaDB + // + // Possible values are audit, error, general, and slowquery. + // + // Microsoft SQL Server + // + // Possible values are agent and error. + // + // MySQL + // + // Possible values are audit, error, general, and slowquery. + // + // Oracle + // + // Possible values are alert, audit, listener, and trace. + // + // PostgreSQL + // + // Possible values are postgresql and upgrade. EnableCloudwatchLogsExports []*string `type:"list"` // A value that indicates whether to enable mapping of AWS Identity and Access @@ -17621,7 +18163,7 @@ type CreateDBInstanceInput struct { // // Microsoft SQL Server // - // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // See Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) // in the Amazon RDS User Guide. // // MySQL @@ -17796,6 +18338,9 @@ type CreateDBInstanceInput struct { // deployment. MultiAZ *bool `type:"boolean"` + // The name of the NCHAR character set for the Oracle DB instance. + NcharCharacterSetName *string `type:"string"` + // Indicates that the DB instance should be associated with the specified option // group. // @@ -17824,7 +18369,7 @@ type CreateDBInstanceInput struct { // // Default: 3306 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // Type: Integer // @@ -17832,7 +18377,7 @@ type CreateDBInstanceInput struct { // // Default: 3306 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // Type: Integer // @@ -17840,7 +18385,7 @@ type CreateDBInstanceInput struct { // // Default: 5432 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // Type: Integer // @@ -17848,20 +18393,20 @@ type CreateDBInstanceInput struct { // // Default: 1521 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // SQL Server // // Default: 1433 // - // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through - // 49156. + // Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and + // 49152-49156. // // Amazon Aurora // // Default: 3306 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // Type: Integer Port *int64 `type:"integer"` @@ -17920,10 +18465,16 @@ type CreateDBInstanceInput struct { // Valid Values: 0 - 15 PromotionTier *int64 `type:"integer"` - // A value that indicates whether the DB instance is publicly accessible. When - // the DB instance is publicly accessible, it is an Internet-facing instance - // with a publicly resolvable DNS name, which resolves to a public IP address. - // When the DB instance isn't publicly accessible, it is an internal instance + // A value that indicates whether the DB instance is publicly accessible. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance // with a DNS name that resolves to a private IP address. // // Default: The default behavior varies depending on whether DBSubnetGroupName @@ -18199,6 +18750,12 @@ func (s *CreateDBInstanceInput) SetMultiAZ(v bool) *CreateDBInstanceInput { return s } +// SetNcharCharacterSetName sets the NcharCharacterSetName field's value. +func (s *CreateDBInstanceInput) SetNcharCharacterSetName(v string) *CreateDBInstanceInput { + s.NcharCharacterSetName = &v + return s +} + // SetOptionGroupName sets the OptionGroupName field's value. func (s *CreateDBInstanceInput) SetOptionGroupName(v string) *CreateDBInstanceInput { s.OptionGroupName = &v @@ -18324,12 +18881,12 @@ type CreateDBInstanceReadReplicaInput struct { _ struct{} `type:"structure"` // A value that indicates whether minor engine upgrades are applied automatically - // to the Read Replica during the maintenance window. + // to the read replica during the maintenance window. // // Default: Inherits from the source DB instance AutoMinorVersionUpgrade *bool `type:"boolean"` - // The Availability Zone (AZ) where the Read Replica will be created. + // The Availability Zone (AZ) where the read replica will be created. // // Default: A random, system-chosen Availability Zone in the endpoint's AWS // Region. @@ -18337,11 +18894,11 @@ type CreateDBInstanceReadReplicaInput struct { // Example: us-east-1d AvailabilityZone *string `type:"string"` - // A value that indicates whether to copy all tags from the Read Replica to - // snapshots of the Read Replica. By default, tags are not copied. + // A value that indicates whether to copy all tags from the read replica to + // snapshots of the read replica. By default, tags are not copied. CopyTagsToSnapshot *bool `type:"boolean"` - // The compute and memory capacity of the Read Replica, for example, db.m4.large. + // The compute and memory capacity of the read replica, for example, db.m4.large. // Not all DB instance classes are available in all AWS Regions, or for all // database engines. For the full list of DB instance classes, and availability // for your engine, see DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) @@ -18350,7 +18907,7 @@ type CreateDBInstanceReadReplicaInput struct { // Default: Inherits from the source DB instance. DBInstanceClass *string `type:"string"` - // The DB instance identifier of the Read Replica. This identifier is the unique + // The DB instance identifier of the read replica. This identifier is the unique // key that identifies a DB instance. This parameter is stored as a lowercase // string. // @@ -18360,9 +18917,9 @@ type CreateDBInstanceReadReplicaInput struct { // The name of the DB parameter group to associate with this DB instance. // // If you do not specify a value for DBParameterGroupName, then Amazon RDS uses - // the DBParameterGroup of source DB instance for a same region Read Replica, + // the DBParameterGroup of source DB instance for a same region read replica, // or the default DBParameterGroup for the specified DB engine for a cross region - // Read Replica. + // read replica. // // Currently, specifying a parameter group for this operation is only supported // for Oracle DB instances. @@ -18390,10 +18947,10 @@ type CreateDBInstanceReadReplicaInput struct { // * The specified DB subnet group must be in the same AWS Region in which // the operation is running. // - // * All Read Replicas in one AWS Region that are created from the same source + // * All read replicas in one AWS Region that are created from the same source // DB instance must either:> Specify DB subnet groups from the same VPC. - // All these Read Replicas are created in the same VPC. Not specify a DB - // subnet group. All these Read Replicas are created outside of any VPC. + // All these read replicas are created in the same VPC. Not specify a DB + // subnet group. All these read replicas are created outside of any VPC. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -18407,11 +18964,11 @@ type CreateDBInstanceReadReplicaInput struct { // DestinationRegion is used for presigning the request to a given region. DestinationRegion *string `type:"string"` - // The Active Directory directory ID to create the DB instance in. + // The Active Directory directory ID to create the DB instance in. Currently, + // only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can + // be created in an Active Directory Domain. // - // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate - // users that connect to the DB instance. For more information, see Using Kerberos - // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) // in the Amazon RDS User Guide. Domain *string `type:"string"` @@ -18434,8 +18991,8 @@ type CreateDBInstanceReadReplicaInput struct { // in the Amazon RDS User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // A value that indicates whether to enable Performance Insights for the Read - // Replica. + // A value that indicates whether to enable Performance Insights for the read + // replica. // // For more information, see Using Amazon Performance Insights (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon RDS User Guide. @@ -18445,24 +19002,24 @@ type CreateDBInstanceReadReplicaInput struct { // initially allocated for the DB instance. Iops *int64 `type:"integer"` - // The AWS KMS key ID for an encrypted Read Replica. The KMS key ID is the Amazon + // The AWS KMS key ID for an encrypted read replica. The KMS key ID is the Amazon // Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS // encryption key. // - // If you create an encrypted Read Replica in the same AWS Region as the source + // If you create an encrypted read replica in the same AWS Region as the source // DB instance, then you do not have to specify a value for this parameter. - // The Read Replica is encrypted with the same KMS key as the source DB instance. + // The read replica is encrypted with the same KMS key as the source DB instance. // - // If you create an encrypted Read Replica in a different AWS Region, then you + // If you create an encrypted read replica in a different AWS Region, then you // must specify a KMS key for the destination AWS Region. KMS encryption keys // are specific to the AWS Region that they are created in, and you can't use // encryption keys from one AWS Region in another AWS Region. // - // You can't create an encrypted Read Replica from an unencrypted DB instance. + // You can't create an encrypted read replica from an unencrypted DB instance. KmsKeyId *string `type:"string"` // The interval, in seconds, between points when Enhanced Monitoring metrics - // are collected for the Read Replica. To disable collecting Enhanced Monitoring + // are collected for the read replica. To disable collecting Enhanced Monitoring // metrics, specify 0. The default is 0. // // If MonitoringRoleArn is specified, then you must also set MonitoringInterval @@ -18481,16 +19038,19 @@ type CreateDBInstanceReadReplicaInput struct { // a MonitoringRoleArn value. MonitoringRoleArn *string `type:"string"` - // A value that indicates whether the Read Replica is in a Multi-AZ deployment. + // A value that indicates whether the read replica is in a Multi-AZ deployment. // - // You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby + // You can create a read replica as a Multi-AZ DB instance. RDS creates a standby // of your replica in another Availability Zone for failover support for the - // replica. Creating your Read Replica as a Multi-AZ DB instance is independent + // replica. Creating your read replica as a Multi-AZ DB instance is independent // of whether the source database is a Multi-AZ DB instance. MultiAZ *bool `type:"boolean"` // The option group the DB instance is associated with. If omitted, the option // group associated with the source instance is used. + // + // For SQL Server, you must use the option group associated with the source + // instance. OptionGroupName *string `type:"string"` // The AWS KMS key identifier for encryption of Performance Insights data. The @@ -18517,16 +19077,16 @@ type CreateDBInstanceReadReplicaInput struct { // The URL that contains a Signature Version 4 signed request for the CreateDBInstanceReadReplica // API action in the source AWS Region that contains the source DB instance. // - // You must specify this parameter when you create an encrypted Read Replica + // You must specify this parameter when you create an encrypted read replica // from another AWS Region by using the Amazon RDS API. Don't specify PreSignedUrl - // when you are creating an encrypted Read Replica in the same AWS Region. + // when you are creating an encrypted read replica in the same AWS Region. // // The presigned URL must be a valid request for the CreateDBInstanceReadReplica // API action that can be executed in the source AWS Region that contains the // encrypted source DB instance. The presigned URL request must contain the // following parameter values: // - // * DestinationRegion - The AWS Region that the encrypted Read Replica is + // * DestinationRegion - The AWS Region that the encrypted read replica is // created in. This AWS Region is the same one where the CreateDBInstanceReadReplica // action is called that contains this presigned URL. For example, if you // create an encrypted DB instance in the us-west-1 AWS Region, from a source @@ -18537,14 +19097,14 @@ type CreateDBInstanceReadReplicaInput struct { // be set to the us-east-1 AWS Region. // // * KmsKeyId - The AWS KMS key identifier for the key to use to encrypt - // the Read Replica in the destination AWS Region. This is the same identifier + // the read replica in the destination AWS Region. This is the same identifier // for both the CreateDBInstanceReadReplica action that is called in the // destination AWS Region, and the action contained in the presigned URL. // // * SourceDBInstanceIdentifier - The DB instance identifier for the encrypted // DB instance to be replicated. This identifier must be in the Amazon Resource // Name (ARN) format for the source AWS Region. For example, if you are creating - // an encrypted Read Replica from a DB instance in the us-west-2 AWS Region, + // an encrypted read replica from a DB instance in the us-west-2 AWS Region, // then your SourceDBInstanceIdentifier looks like the following example: // arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115. // @@ -18554,51 +19114,81 @@ type CreateDBInstanceReadReplicaInput struct { // // If you are using an AWS SDK tool or the AWS CLI, you can specify SourceRegion // (or --source-region for the AWS CLI) instead of specifying PreSignedUrl manually. - // Specifying SourceRegion autogenerates a pre-signed URL that is a valid request + // Specifying SourceRegion autogenerates a presigned URL that is a valid request // for the operation that can be executed in the source AWS Region. + // + // SourceRegion isn't supported for SQL Server, because SQL Server on Amazon + // RDS doesn't support cross-region read replicas. PreSignedUrl *string `type:"string"` // The number of CPU cores and the number of threads per core for the DB instance // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // A value that indicates whether the DB instance is publicly accessible. When - // the DB instance is publicly accessible, it is an Internet-facing instance - // with a publicly resolvable DNS name, which resolves to a public IP address. - // When the DB instance isn't publicly accessible, it is an internal instance - // with a DNS name that resolves to a private IP address. For more information, - // see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance + // with a DNS name that resolves to a private IP address. + // + // For more information, see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` - // The identifier of the DB instance that will act as the source for the Read - // Replica. Each DB instance can have up to five Read Replicas. + // The open mode of the replica database: mounted or read-only. + // + // This parameter is only supported for Oracle DB instances. + // + // Mounted DB replicas are included in Oracle Enterprise Edition. The main use + // case for mounted replicas is cross-Region disaster recovery. The primary + // database doesn't use Active Data Guard to transmit information to the mounted + // replica. Because it doesn't accept user connections, a mounted replica can't + // serve a read-only workload. + // + // You can create a combination of mounted and read-only DB replicas for the + // same primary DB instance. For more information, see Working with Oracle Read + // Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) + // in the Amazon RDS User Guide. + ReplicaMode *string `type:"string" enum:"ReplicaMode"` + + // The identifier of the DB instance that will act as the source for the read + // replica. Each DB instance can have up to five read replicas. // // Constraints: // - // * Must be the identifier of an existing MySQL, MariaDB, Oracle, or PostgreSQL - // DB instance. + // * Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, + // or SQL Server DB instance. // - // * Can specify a DB instance that is a MySQL Read Replica only if the source + // * Can specify a DB instance that is a MySQL read replica only if the source // is running MySQL 5.6 or later. // - // * For the limitations of Oracle Read Replicas, see Read Replica Limitations + // * For the limitations of Oracle read replicas, see Read Replica Limitations // with Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) // in the Amazon RDS User Guide. // - // * Can specify a DB instance that is a PostgreSQL DB instance only if the - // source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for cross-region - // replication). + // * For the limitations of SQL Server read replicas, see Read Replica Limitations + // with Microsoft SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.Limitations.html) + // in the Amazon RDS User Guide. + // + // * Can specify a PostgreSQL DB instance only if the source is running PostgreSQL + // 9.3.5 or later (9.4.7 and higher for cross-region replication). // - // * The specified DB instance must have automatic backups enabled, its backup - // retention period must be greater than 0. + // * The specified DB instance must have automatic backups enabled, that + // is, its backup retention period must be greater than 0. // - // * If the source DB instance is in the same AWS Region as the Read Replica, + // * If the source DB instance is in the same AWS Region as the read replica, // specify a valid DB instance identifier. // - // * If the source DB instance is in a different AWS Region than the Read - // Replica, specify a valid DB instance ARN. For more information, go to - // Constructing an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) - // in the Amazon RDS User Guide. + // * If the source DB instance is in a different AWS Region from the read + // replica, specify a valid DB instance ARN. For more information, see Constructing + // an ARN for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) + // in the Amazon RDS User Guide. This doesn't apply to SQL Server, which + // doesn't support cross-region replicas. // // SourceDBInstanceIdentifier is a required field SourceDBInstanceIdentifier *string `type:"string" required:"true"` @@ -18608,7 +19198,7 @@ type CreateDBInstanceReadReplicaInput struct { // have the same region as the source ARN. SourceRegion *string `type:"string" ignore:"true"` - // Specifies the storage type to be associated with the Read Replica. + // Specifies the storage type to be associated with the read replica. // // Valid values: standard | gp2 | io1 // @@ -18625,7 +19215,7 @@ type CreateDBInstanceReadReplicaInput struct { // its default processor features. UseDefaultProcessorFeatures *bool `type:"boolean"` - // A list of EC2 VPC security groups to associate with the Read Replica. + // A list of EC2 VPC security groups to associate with the read replica. // // Default: The default EC2 VPC security group for the DB subnet group's VPC. VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` @@ -18813,6 +19403,12 @@ func (s *CreateDBInstanceReadReplicaInput) SetPubliclyAccessible(v bool) *Create return s } +// SetReplicaMode sets the ReplicaMode field's value. +func (s *CreateDBInstanceReadReplicaInput) SetReplicaMode(v string) *CreateDBInstanceReadReplicaInput { + s.ReplicaMode = &v + return s +} + // SetSourceDBInstanceIdentifier sets the SourceDBInstanceIdentifier field's value. func (s *CreateDBInstanceReadReplicaInput) SetSourceDBInstanceIdentifier(v string) *CreateDBInstanceReadReplicaInput { s.SourceDBInstanceIdentifier = &v @@ -19022,8 +19618,8 @@ type CreateDBProxyInput struct { // The kinds of databases that the proxy can connect to. This value determines // which database network protocol the proxy recognizes when it interprets network - // traffic to and from the database. Currently, this value is always MYSQL. - // The engine family applies to both RDS MySQL and Aurora MySQL. + // traffic to and from the database. The engine family applies to MySQL and + // PostgreSQL for both RDS and Aurora. // // EngineFamily is a required field EngineFamily *string `type:"string" required:"true" enum:"EngineFamily"` @@ -19491,11 +20087,10 @@ type CreateEventSubscriptionInput struct { // not active. Enabled *bool `type:"boolean"` - // A list of event categories for a SourceType that you want to subscribe to. - // You can see a list of the categories for a given SourceType in the Events - // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) - // topic in the Amazon RDS User Guide or by using the DescribeEventCategories - // action. + // A list of event categories for a particular source type (SourceType) that + // you want to subscribe to. You can see a list of the categories for a given + // source type in Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // in the Amazon RDS User Guide or by using the DescribeEventCategories operation. EventCategories []*string `locationNameList:"EventCategory" type:"list"` // The Amazon Resource Name (ARN) of the SNS topic created for event notification. @@ -19512,24 +20107,30 @@ type CreateEventSubscriptionInput struct { // // Constraints: // - // * If SourceIds are supplied, SourceType must also be provided. + // * If a SourceIds value is supplied, SourceType must also be provided. // - // * If the source type is a DB instance, then a DBInstanceIdentifier must + // * If the source type is a DB instance, a DBInstanceIdentifier value must // be supplied. // - // * If the source type is a DB security group, a DBSecurityGroupName must + // * If the source type is a DB cluster, a DBClusterIdentifier value must // be supplied. // - // * If the source type is a DB parameter group, a DBParameterGroupName must + // * If the source type is a DB parameter group, a DBParameterGroupName value + // must be supplied. + // + // * If the source type is a DB security group, a DBSecurityGroupName value + // must be supplied. + // + // * If the source type is a DB snapshot, a DBSnapshotIdentifier value must // be supplied. // - // * If the source type is a DB snapshot, a DBSnapshotIdentifier must be - // supplied. + // * If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier + // value must be supplied. SourceIds []*string `locationNameList:"SourceId" type:"list"` // The type of source that is generating the events. For example, if you want - // to be notified of events generated by a DB instance, you would set this parameter - // to db-instance. if this value isn't specified, all events are returned. + // to be notified of events generated by a DB instance, you set this parameter + // to db-instance. If this value isn't specified, all events are returned. // // Valid values: db-instance | db-cluster | db-parameter-group | db-security-group // | db-snapshot | db-cluster-snapshot @@ -19651,7 +20252,7 @@ type CreateGlobalClusterInput struct { // can't be deleted when deletion protection is enabled. DeletionProtection *bool `type:"boolean"` - // Provides the name of the database engine to be used for this DB cluster. + // The name of the database engine to be used for this DB cluster. Engine *string `type:"string"` // The engine version of the Aurora global database. @@ -20058,16 +20659,29 @@ type DBCluster struct { // Specifies the connection endpoint for the primary instance of the DB cluster. Endpoint *string `type:"string"` - // Provides the name of the database engine to be used for this DB cluster. + // The name of the database engine to be used for this DB cluster. Engine *string `type:"string"` // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, // global, or multimaster. + // + // For more information, see CreateDBCluster (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html). EngineMode *string `type:"string"` // Indicates the database engine version. EngineVersion *string `type:"string"` + // Specifies whether you have requested to enable write forwarding for a secondary + // cluster in an Aurora global database. Because write forwarding takes time + // to enable, check the value of GlobalWriteForwardingStatus to confirm that + // the request has completed before using the write forwarding feature for this + // cluster. + GlobalWriteForwardingRequested *bool `type:"boolean"` + + // Specifies whether a secondary cluster in an Aurora global database has write + // forwarding enabled, not enabled, or is in the process of enabling it. + GlobalWriteForwardingStatus *string `type:"string" enum:"WriteForwardingStatus"` + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. HostedZoneId *string `type:"string"` @@ -20114,7 +20728,7 @@ type DBCluster struct { // in Universal Coordinated Time (UTC). PreferredMaintenanceWindow *string `type:"string"` - // Contains one or more identifiers of the Read Replicas associated with this + // Contains one or more identifiers of the read replicas associated with this // DB cluster. ReadReplicaIdentifiers []*string `locationNameList:"ReadReplicaIdentifier" type:"list"` @@ -20132,7 +20746,7 @@ type DBCluster struct { ReaderEndpoint *string `type:"string"` // Contains the identifier of the source DB cluster if this DB cluster is a - // Read Replica. + // read replica. ReplicationSourceIdentifier *string `type:"string"` // Shows the scaling configuration for an Aurora DB cluster in serverless DB @@ -20148,6 +20762,10 @@ type DBCluster struct { // Specifies whether the DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) + // in the Amazon RDS User Guide. + TagList []*Tag `locationNameList:"Tag" type:"list"` + // Provides a list of VPC security groups that the DB cluster belongs to. VpcSecurityGroups []*VpcSecurityGroupMembership `locationNameList:"VpcSecurityGroupMembership" type:"list"` } @@ -20366,6 +20984,18 @@ func (s *DBCluster) SetEngineVersion(v string) *DBCluster { return s } +// SetGlobalWriteForwardingRequested sets the GlobalWriteForwardingRequested field's value. +func (s *DBCluster) SetGlobalWriteForwardingRequested(v bool) *DBCluster { + s.GlobalWriteForwardingRequested = &v + return s +} + +// SetGlobalWriteForwardingStatus sets the GlobalWriteForwardingStatus field's value. +func (s *DBCluster) SetGlobalWriteForwardingStatus(v string) *DBCluster { + s.GlobalWriteForwardingStatus = &v + return s +} + // SetHostedZoneId sets the HostedZoneId field's value. func (s *DBCluster) SetHostedZoneId(v string) *DBCluster { s.HostedZoneId = &v @@ -20468,6 +21098,12 @@ func (s *DBCluster) SetStorageEncrypted(v bool) *DBCluster { return s } +// SetTagList sets the TagList field's value. +func (s *DBCluster) SetTagList(v []*Tag) *DBCluster { + s.TagList = v + return s +} + // SetVpcSecurityGroups sets the VpcSecurityGroups field's value. func (s *DBCluster) SetVpcSecurityGroups(v []*VpcSecurityGroupMembership) *DBCluster { s.VpcSecurityGroups = v @@ -20524,7 +21160,9 @@ type DBClusterEndpoint struct { StaticMembers []*string `type:"list"` // The current status of the endpoint. One of: creating, available, deleting, - // modifying. + // inactive, modifying. The inactive state applies to an endpoint that can't + // be used for a certain kind of cluster, such as a writer endpoint for a read-only + // secondary cluster in a global database. Status *string `type:"string"` } @@ -20697,10 +21335,10 @@ type DBClusterParameterGroup struct { // The Amazon Resource Name (ARN) for the DB cluster parameter group. DBClusterParameterGroupArn *string `type:"string"` - // Provides the name of the DB cluster parameter group. + // The name of the DB cluster parameter group. DBClusterParameterGroupName *string `type:"string"` - // Provides the name of the DB parameter group family that this DB cluster parameter + // The name of the DB parameter group family that this DB cluster parameter // group is compatible with. DBParameterGroupFamily *string `type:"string"` @@ -20904,6 +21542,10 @@ type DBClusterSnapshot struct { // Specifies whether the DB cluster snapshot is encrypted. StorageEncrypted *bool `type:"boolean"` + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) + // in the Amazon RDS User Guide. + TagList []*Tag `locationNameList:"Tag" type:"list"` + // Provides the VPC ID associated with the DB cluster snapshot. VpcId *string `type:"string"` } @@ -21032,6 +21674,12 @@ func (s *DBClusterSnapshot) SetStorageEncrypted(v bool) *DBClusterSnapshot { return s } +// SetTagList sets the TagList field's value. +func (s *DBClusterSnapshot) SetTagList(v []*Tag) *DBClusterSnapshot { + s.TagList = v + return s +} + // SetVpcId sets the VpcId field's value. func (s *DBClusterSnapshot) SetVpcId(v string) *DBClusterSnapshot { s.VpcId = &v @@ -21155,7 +21803,7 @@ type DBEngineVersion struct { Status *string `type:"string"` // A list of the character sets supported by this engine for the CharacterSetName - // parameter of the CreateDBInstance action. + // parameter of the CreateDBInstance operation. SupportedCharacterSets []*CharacterSet `locationNameList:"CharacterSet" type:"list"` // A list of the supported DB engine modes. @@ -21167,15 +21815,27 @@ type DBEngineVersion struct { // * s3Import SupportedFeatureNames []*string `type:"list"` + // A list of the character sets supported by the Oracle DB engine for the NcharCharacterSetName + // parameter of the CreateDBInstance operation. + SupportedNcharCharacterSets []*CharacterSet `locationNameList:"CharacterSet" type:"list"` + // A list of the time zones supported by this engine for the Timezone parameter // of the CreateDBInstance action. SupportedTimezones []*Timezone `locationNameList:"Timezone" type:"list"` + // A value that indicates whether you can use Aurora global databases with a + // specific DB engine version. + SupportsGlobalDatabases *bool `type:"boolean"` + // A value that indicates whether the engine version supports exporting the // log types specified by ExportableLogTypes to CloudWatch Logs. SupportsLogExportsToCloudwatchLogs *bool `type:"boolean"` - // Indicates whether the database engine version supports Read Replicas. + // A value that indicates whether you can use Aurora parallel query with a specific + // DB engine version. + SupportsParallelQuery *bool `type:"boolean"` + + // Indicates whether the database engine version supports read replicas. SupportsReadReplica *bool `type:"boolean"` // A list of engine versions that this database engine version can be upgraded @@ -21259,18 +21919,36 @@ func (s *DBEngineVersion) SetSupportedFeatureNames(v []*string) *DBEngineVersion return s } +// SetSupportedNcharCharacterSets sets the SupportedNcharCharacterSets field's value. +func (s *DBEngineVersion) SetSupportedNcharCharacterSets(v []*CharacterSet) *DBEngineVersion { + s.SupportedNcharCharacterSets = v + return s +} + // SetSupportedTimezones sets the SupportedTimezones field's value. func (s *DBEngineVersion) SetSupportedTimezones(v []*Timezone) *DBEngineVersion { s.SupportedTimezones = v return s } +// SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. +func (s *DBEngineVersion) SetSupportsGlobalDatabases(v bool) *DBEngineVersion { + s.SupportsGlobalDatabases = &v + return s +} + // SetSupportsLogExportsToCloudwatchLogs sets the SupportsLogExportsToCloudwatchLogs field's value. func (s *DBEngineVersion) SetSupportsLogExportsToCloudwatchLogs(v bool) *DBEngineVersion { s.SupportsLogExportsToCloudwatchLogs = &v return s } +// SetSupportsParallelQuery sets the SupportsParallelQuery field's value. +func (s *DBEngineVersion) SetSupportsParallelQuery(v bool) *DBEngineVersion { + s.SupportsParallelQuery = &v + return s +} + // SetSupportsReadReplica sets the SupportsReadReplica field's value. func (s *DBEngineVersion) SetSupportsReadReplica(v bool) *DBEngineVersion { s.SupportsReadReplica = &v @@ -21337,6 +22015,9 @@ type DBInstance struct { DBInstanceIdentifier *string `type:"string"` // Specifies the current state of this database. + // + // For information about DB instance statuses, see DB Instance Status (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Status.html) + // in the Amazon RDS User Guide. DBInstanceStatus *string `type:"string"` // The meaning of this parameter differs according to the database engine you @@ -21395,7 +22076,7 @@ type DBInstance struct { // Specifies the connection endpoint. Endpoint *Endpoint `type:"structure"` - // Provides the name of the database engine to be used for this DB instance. + // The name of the database engine to be used for this DB instance. Engine *string `type:"string"` // Indicates the database engine version. @@ -21456,6 +22137,11 @@ type DBInstance struct { // Specifies if the DB instance is a Multi-AZ deployment. MultiAZ *bool `type:"boolean"` + // The name of the NCHAR character set for the Oracle DB instance. This character + // set specifies the Unicode encoding for data stored in table columns of type + // NCHAR, NCLOB, or NVARCHAR2. + NcharCharacterSetName *string `type:"string"` + // Provides the list of option group memberships for this DB instance. OptionGroupMemberships []*OptionGroupMembership `locationNameList:"OptionGroupMembership" type:"list"` @@ -21494,34 +22180,50 @@ type DBInstance struct { // in the Amazon Aurora User Guide. PromotionTier *int64 `type:"integer"` - // Specifies the accessibility options for the DB instance. A value of true - // specifies an Internet-facing instance with a publicly resolvable DNS name, - // which resolves to a public IP address. A value of false specifies an internal - // instance with a DNS name that resolves to a private IP address. + // Specifies the accessibility options for the DB instance. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance + // with a DNS name that resolves to a private IP address. + // + // For more information, see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // Contains one or more identifiers of Aurora DB clusters to which the RDS DB - // instance is replicated as a Read Replica. For example, when you create an - // Aurora Read Replica of an RDS MySQL DB instance, the Aurora MySQL DB cluster - // for the Aurora Read Replica is shown. This output does not contain information - // about cross region Aurora Read Replicas. + // instance is replicated as a read replica. For example, when you create an + // Aurora read replica of an RDS MySQL DB instance, the Aurora MySQL DB cluster + // for the Aurora read replica is shown. This output does not contain information + // about cross region Aurora read replicas. // - // Currently, each RDS DB instance can have only one Aurora Read Replica. + // Currently, each RDS DB instance can have only one Aurora read replica. ReadReplicaDBClusterIdentifiers []*string `locationNameList:"ReadReplicaDBClusterIdentifier" type:"list"` - // Contains one or more identifiers of the Read Replicas associated with this + // Contains one or more identifiers of the read replicas associated with this // DB instance. ReadReplicaDBInstanceIdentifiers []*string `locationNameList:"ReadReplicaDBInstanceIdentifier" type:"list"` // Contains the identifier of the source DB instance if this DB instance is - // a Read Replica. + // a read replica. ReadReplicaSourceDBInstanceIdentifier *string `type:"string"` + // The open mode of an Oracle read replica. The default is open-read-only. For + // more information, see Working with Oracle Read Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) + // in the Amazon RDS User Guide. + // + // This attribute is only supported in RDS for Oracle. + ReplicaMode *string `type:"string" enum:"ReplicaMode"` + // If present, specifies the name of the secondary Availability Zone for a DB // instance with multi-AZ support. SecondaryAvailabilityZone *string `type:"string"` - // The status of a Read Replica. If the instance isn't a Read Replica, this + // The status of a read replica. If the instance isn't a read replica, this // is blank. StatusInfos []*DBInstanceStatusInfo `locationNameList:"DBInstanceStatusInfo" type:"list"` @@ -21531,6 +22233,10 @@ type DBInstance struct { // Specifies the storage type associated with DB instance. StorageType *string `type:"string"` + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) + // in the Amazon RDS User Guide. + TagList []*Tag `locationNameList:"Tag" type:"list"` + // The ARN from the key store with which the instance is associated for TDE // encryption. TdeCredentialArn *string `type:"string"` @@ -21783,6 +22489,12 @@ func (s *DBInstance) SetMultiAZ(v bool) *DBInstance { return s } +// SetNcharCharacterSetName sets the NcharCharacterSetName field's value. +func (s *DBInstance) SetNcharCharacterSetName(v string) *DBInstance { + s.NcharCharacterSetName = &v + return s +} + // SetOptionGroupMemberships sets the OptionGroupMemberships field's value. func (s *DBInstance) SetOptionGroupMemberships(v []*OptionGroupMembership) *DBInstance { s.OptionGroupMemberships = v @@ -21861,6 +22573,12 @@ func (s *DBInstance) SetReadReplicaSourceDBInstanceIdentifier(v string) *DBInsta return s } +// SetReplicaMode sets the ReplicaMode field's value. +func (s *DBInstance) SetReplicaMode(v string) *DBInstance { + s.ReplicaMode = &v + return s +} + // SetSecondaryAvailabilityZone sets the SecondaryAvailabilityZone field's value. func (s *DBInstance) SetSecondaryAvailabilityZone(v string) *DBInstance { s.SecondaryAvailabilityZone = &v @@ -21885,6 +22603,12 @@ func (s *DBInstance) SetStorageType(v string) *DBInstance { return s } +// SetTagList sets the TagList field's value. +func (s *DBInstance) SetTagList(v []*Tag) *DBInstance { + s.TagList = v + return s +} + // SetTdeCredentialArn sets the TdeCredentialArn field's value. func (s *DBInstance) SetTdeCredentialArn(v string) *DBInstance { s.TdeCredentialArn = &v @@ -22216,7 +22940,7 @@ type DBInstanceStatusInfo struct { // if the instance is in an error state. Normal *bool `type:"boolean"` - // Status of the DB instance. For a StatusType of Read Replica, the values can + // Status of the DB instance. For a StatusType of read replica, the values can // be replicating, replication stop point set, replication stop point reached, // error, stopped, or terminated. Status *string `type:"string"` @@ -22269,11 +22993,11 @@ type DBParameterGroup struct { // The Amazon Resource Name (ARN) for the DB parameter group. DBParameterGroupArn *string `type:"string"` - // Provides the name of the DB parameter group family that this DB parameter - // group is compatible with. + // The name of the DB parameter group family that this DB parameter group is + // compatible with. DBParameterGroupFamily *string `type:"string"` - // Provides the name of the DB parameter group. + // The name of the DB parameter group. DBParameterGroupName *string `type:"string"` // Provides the customer-specified description for this DB parameter group. @@ -22319,7 +23043,7 @@ func (s *DBParameterGroup) SetDescription(v string) *DBParameterGroup { type DBParameterGroupNameMessage struct { _ struct{} `type:"structure"` - // Provides the name of the DB parameter group. + // The name of the DB parameter group. DBParameterGroupName *string `type:"string"` } @@ -22386,10 +23110,6 @@ func (s *DBParameterGroupStatus) SetParameterApplyStatus(v string) *DBParameterG return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // The data structure representing a proxy managed by the RDS Proxy. // // This data type is used as a response element in the DescribeDBProxies action. @@ -22423,8 +23143,7 @@ type DBProxy struct { // value in the connection string for a database client application. Endpoint *string `type:"string"` - // Currently, this value is always MYSQL. The engine family applies to both - // RDS MySQL and Aurora MySQL. + // The engine family applies to MySQL and PostgreSQL for both RDS and Aurora. EngineFamily *string `type:"string"` // The number of seconds a connection to the proxy can have no activity before @@ -22554,10 +23273,6 @@ func (s *DBProxy) SetVpcSubnetIds(v []*string) *DBProxy { return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Contains the details for an RDS Proxy target. It represents an RDS DB instance // or Aurora DB cluster that the proxy can connect to. One or more targets are // associated with an RDS Proxy target group. @@ -22581,6 +23296,9 @@ type DBProxyTarget struct { // The Amazon Resource Name (ARN) for the RDS DB instance or Aurora DB cluster. TargetArn *string `type:"string"` + // Information about the connection health of the RDS Proxy target. + TargetHealth *TargetHealth `type:"structure"` + // The DB cluster identifier when the target represents an Aurora DB cluster. // This field is blank when the target represents an RDS DB instance. TrackedClusterId *string `type:"string"` @@ -22624,6 +23342,12 @@ func (s *DBProxyTarget) SetTargetArn(v string) *DBProxyTarget { return s } +// SetTargetHealth sets the TargetHealth field's value. +func (s *DBProxyTarget) SetTargetHealth(v *TargetHealth) *DBProxyTarget { + s.TargetHealth = v + return s +} + // SetTrackedClusterId sets the TrackedClusterId field's value. func (s *DBProxyTarget) SetTrackedClusterId(v string) *DBProxyTarget { s.TrackedClusterId = &v @@ -22636,10 +23360,6 @@ func (s *DBProxyTarget) SetType(v string) *DBProxyTarget { return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Represents a set of RDS DB instances, Aurora DB clusters, or both that a // proxy can connect to. Currently, each target group is associated with exactly // one RDS DB instance or Aurora DB cluster. @@ -22901,8 +23621,8 @@ type DBSnapshot struct { // accounts is enabled, and otherwise false. IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` - // Specifies the time when the snapshot was taken, in Universal Coordinated - // Time (UTC). + // Specifies the time in Coordinated Universal Time (UTC) when the DB instance, + // from which the snapshot was taken, was created. InstanceCreateTime *time.Time `type:"timestamp"` // Specifies the Provisioned IOPS (I/O operations per second) value of the DB @@ -22932,8 +23652,7 @@ type DBSnapshot struct { // class of the DB instance when the DB snapshot was created. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // Provides the time when the snapshot was taken, in Universal Coordinated Time - // (UTC). + // Specifies when the snapshot was taken in Coodinated Universal Time (UTC). SnapshotCreateTime *time.Time `type:"timestamp"` // Provides the type of the DB snapshot. @@ -22952,6 +23671,10 @@ type DBSnapshot struct { // Specifies the storage type associated with DB snapshot. StorageType *string `type:"string"` + // A list of tags. For more information, see Tagging Amazon RDS Resources (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) + // in the Amazon RDS User Guide. + TagList []*Tag `locationNameList:"Tag" type:"list"` + // The ARN from the key store with which to associate the instance for TDE encryption. TdeCredentialArn *string `type:"string"` @@ -23124,6 +23847,12 @@ func (s *DBSnapshot) SetStorageType(v string) *DBSnapshot { return s } +// SetTagList sets the TagList field's value. +func (s *DBSnapshot) SetTagList(v []*Tag) *DBSnapshot { + s.TagList = v + return s +} + // SetTdeCredentialArn sets the TdeCredentialArn field's value. func (s *DBSnapshot) SetTdeCredentialArn(v string) *DBSnapshot { s.TdeCredentialArn = &v @@ -23452,7 +24181,9 @@ type DeleteDBClusterEndpointOutput struct { StaticMembers []*string `type:"list"` // The current status of the endpoint. One of: creating, available, deleting, - // modifying. + // inactive, modifying. The inactive state applies to an endpoint that can't + // be used for a certain kind of cluster, such as a writer endpoint for a read-only + // secondary cluster in a global database. Status *string `type:"string"` } @@ -23856,7 +24587,7 @@ type DeleteDBInstanceInput struct { // // * Can't end with a hyphen or contain two consecutive hyphens. // - // * Can't be specified when deleting a Read Replica. + // * Can't be specified when deleting a read replica. FinalDBSnapshotIdentifier *string `type:"string"` // A value that indicates whether to skip the creation of a final DB snapshot @@ -23868,7 +24599,7 @@ type DeleteDBInstanceInput struct { // When a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', // or 'incompatible-network', it can only be deleted when skip is specified. // - // Specify skip when deleting a Read Replica. + // Specify skip when deleting a read replica. // // The FinalDBSnapshotIdentifier parameter must be specified if skip isn't specified. SkipFinalSnapshot *bool `type:"boolean"` @@ -25085,7 +25816,7 @@ type DescribeDBClusterEndpointsInput struct { // db-cluster-endpoint-id, db-cluster-endpoint-status. Values for the db-cluster-endpoint-type // filter can be one or more of: reader, writer, custom. Values for the db-cluster-endpoint-custom-type // filter can be one or more of: reader, any. Values for the db-cluster-endpoint-status - // filter can be one or more of: available, creating, deleting, modifying. + // filter can be one or more of: available, creating, deleting, inactive, modifying. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeDBClusterEndpoints @@ -27882,7 +28613,8 @@ type DescribeEventCategoriesInput struct { // The type of source that is generating the events. // - // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + // Valid values: db-instance | db-cluster | db-parameter-group | db-security-group + // | db-snapshot | db-cluster-snapshot SourceType *string `type:"string"` } @@ -27928,7 +28660,7 @@ func (s *DescribeEventCategoriesInput) SetSourceType(v string) *DescribeEventCat return s } -// Data returned from the DescribeEventCategories action. +// Data returned from the DescribeEventCategories operation. type DescribeEventCategoriesOutput struct { _ struct{} `type:"structure"` @@ -28108,16 +28840,23 @@ type DescribeEventsInput struct { // // * If SourceIdentifier is supplied, SourceType must also be provided. // - // * If the source type is DBInstance, then a DBInstanceIdentifier must be - // supplied. + // * If the source type is a DB instance, a DBInstanceIdentifier value must + // be supplied. // - // * If the source type is DBSecurityGroup, a DBSecurityGroupName must be - // supplied. + // * If the source type is a DB cluster, a DBClusterIdentifier value must + // be supplied. // - // * If the source type is DBParameterGroup, a DBParameterGroupName must + // * If the source type is a DB parameter group, a DBParameterGroupName value + // must be supplied. + // + // * If the source type is a DB security group, a DBSecurityGroupName value + // must be supplied. + // + // * If the source type is a DB snapshot, a DBSnapshotIdentifier value must // be supplied. // - // * If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. + // * If the source type is a DB cluster snapshot, a DBClusterSnapshotIdentifier + // value must be supplied. // // * Can't end with a hyphen or contain two consecutive hyphens. SourceIdentifier *string `type:"string"` @@ -28261,6 +29000,7 @@ type DescribeExportTasksInput struct { // Filters specify one or more snapshot exports to describe. The filters are // specified as name-value pairs that define what to include in the output. + // Filter names and values are case-sensitive. // // Supported filters include the following: // @@ -28271,7 +29011,8 @@ type DescribeExportTasksInput struct { // * source-arn - The Amazon Resource Name (ARN) of the snapshot exported // to Amazon S3 // - // * status - The status of the export task. + // * status - The status of the export task. Must be lowercase, for example, + // complete. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribeExportTasks request. @@ -28287,7 +29028,7 @@ type DescribeExportTasksInput struct { // Default: 100 // // Constraints: Minimum 20, maximum 100. - MaxRecords *string `type:"string"` + MaxRecords *int64 `min:"20" type:"integer"` // The Amazon Resource Name (ARN) of the snapshot exported to Amazon S3. SourceArn *string `type:"string"` @@ -28306,6 +29047,9 @@ func (s DescribeExportTasksInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeExportTasksInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} + if s.MaxRecords != nil && *s.MaxRecords < 20 { + invalidParams.Add(request.NewErrParamMinValue("MaxRecords", 20)) + } if s.Filters != nil { for i, v := range s.Filters { if v == nil { @@ -28342,7 +29086,7 @@ func (s *DescribeExportTasksInput) SetMarker(v string) *DescribeExportTasksInput } // SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeExportTasksInput) SetMaxRecords(v string) *DescribeExportTasksInput { +func (s *DescribeExportTasksInput) SetMaxRecords(v int64) *DescribeExportTasksInput { s.MaxRecords = &v return s } @@ -28893,6 +29637,13 @@ func (s *DescribeOptionGroupsOutput) SetOptionGroupsList(v []*OptionGroup) *Desc type DescribeOrderableDBInstanceOptionsInput struct { _ struct{} `type:"structure"` + // The Availability Zone group associated with a Local Zone. Specify this parameter + // to retrieve available offerings for the Local Zones in the group. + // + // Omit this parameter to show the available offerings in the specified AWS + // Region. + AvailabilityZoneGroup *string `type:"string"` + // The DB instance class filter value. Specify this parameter to show only the // available offerings matching the specified DB instance class. DBInstanceClass *string `type:"string"` @@ -28964,6 +29715,12 @@ func (s *DescribeOrderableDBInstanceOptionsInput) Validate() error { return nil } +// SetAvailabilityZoneGroup sets the AvailabilityZoneGroup field's value. +func (s *DescribeOrderableDBInstanceOptionsInput) SetAvailabilityZoneGroup(v string) *DescribeOrderableDBInstanceOptionsInput { + s.AvailabilityZoneGroup = &v + return s +} + // SetDBInstanceClass sets the DBInstanceClass field's value. func (s *DescribeOrderableDBInstanceOptionsInput) SetDBInstanceClass(v string) *DescribeOrderableDBInstanceOptionsInput { s.DBInstanceClass = &v @@ -29631,7 +30388,7 @@ type DescribeSourceRegionsOutput struct { Marker *string `type:"string"` // A list of SourceRegion instances that contains each source AWS Region that - // the current AWS Region can get a Read Replica or a DB snapshot from. + // the current AWS Region can get a read replica or a DB snapshot from. SourceRegions []*SourceRegion `locationNameList:"SourceRegion" type:"list"` } @@ -30175,7 +30932,7 @@ func (s *Event) SetSourceType(v string) *Event { } // Contains the results of a successful invocation of the DescribeEventCategories -// action. +// operation. type EventCategoriesMap struct { _ struct{} `type:"structure"` @@ -30332,16 +31089,16 @@ type ExportTask struct { // The data exported from the snapshot. Valid values are the following: // - // * database - Export all the data of the snapshot. + // * database - Export all the data from a specified database. // - // * database.table [table-name] - Export a table of the snapshot. + // * database.table table-name - Export a table of the snapshot. This format + // is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. // - // * database.schema [schema-name] - Export a database schema of the snapshot. - // This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL. + // * database.schema schema-name - Export a database schema of the snapshot. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. // - // * database.schema.table [table-name] - Export a table of the database - // schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or - // Aurora MySQL. + // * database.schema.table table-name - Export a table of the database schema. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. ExportOnly []*string `type:"list"` // A unique identifier for the snapshot export task. This ID isn't an identifier @@ -30761,6 +31518,10 @@ type GlobalClusterMember struct { // The Amazon Resource Name (ARN) for each Aurora cluster. DBClusterArn *string `type:"string"` + // Specifies whether a secondary cluster in an Aurora global database has write + // forwarding enabled, not enabled, or is in the process of enabling it. + GlobalWriteForwardingStatus *string `type:"string" enum:"WriteForwardingStatus"` + // Specifies whether the Aurora cluster is the primary cluster (that is, has // read-write capability) for the Aurora global database with which it is associated. IsWriter *bool `type:"boolean"` @@ -30786,6 +31547,12 @@ func (s *GlobalClusterMember) SetDBClusterArn(v string) *GlobalClusterMember { return s } +// SetGlobalWriteForwardingStatus sets the GlobalWriteForwardingStatus field's value. +func (s *GlobalClusterMember) SetGlobalWriteForwardingStatus(v string) *GlobalClusterMember { + s.GlobalWriteForwardingStatus = &v + return s +} + // SetIsWriter sets the IsWriter field's value. func (s *GlobalClusterMember) SetIsWriter(v bool) *GlobalClusterMember { s.IsWriter = &v @@ -30877,7 +31644,7 @@ type ImportInstallationMediaInput struct { // // Microsoft SQL Server // - // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // See Microsoft SQL Server Versions on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.VersionSupport) // in the Amazon RDS User Guide. // // EngineVersion is a required field @@ -31609,7 +32376,9 @@ type ModifyDBClusterEndpointOutput struct { StaticMembers []*string `type:"list"` // The current status of the endpoint. One of: creating, available, deleting, - // modifying. + // inactive, modifying. The inactive state applies to an endpoint that can't + // be used for a certain kind of cluster, such as a writer endpoint for a read-only + // secondary cluster in a global database. Status *string `type:"string"` } @@ -31712,6 +32481,8 @@ type ModifyDBClusterInput struct { // The target backtrack window, in seconds. To disable backtracking, set this // value to 0. // + // Currently, Backtrack is only supported for Aurora MySQL DB clusters. + // // Default: 0 // // Constraints: @@ -31775,12 +32546,22 @@ type ModifyDBClusterInput struct { // The Active Directory directory ID to move the DB cluster to. Specify none // to remove the cluster from its current domain. The domain must be created // prior to this operation. + // + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) + // in the Amazon Aurora User Guide. Domain *string `type:"string"` // Specify the name of the IAM role to be used when making API calls to the // Directory Service. DomainIAMRoleName *string `type:"string"` + // A value that indicates whether to enable write operations to be forwarded + // from this cluster to the primary cluster in an Aurora global database. The + // resulting changes are replicated back to this cluster. This parameter only + // applies to DB clusters that are secondary clusters in an Aurora global database. + // By default, Aurora disallows write operations for secondary clusters. + EnableGlobalWriteForwarding *bool `type:"boolean"` + // A value that indicates whether to enable the HTTP endpoint for an Aurora // Serverless DB cluster. By default, the HTTP endpoint is disabled. // @@ -31996,6 +32777,12 @@ func (s *ModifyDBClusterInput) SetDomainIAMRoleName(v string) *ModifyDBClusterIn return s } +// SetEnableGlobalWriteForwarding sets the EnableGlobalWriteForwarding field's value. +func (s *ModifyDBClusterInput) SetEnableGlobalWriteForwarding(v bool) *ModifyDBClusterInput { + s.EnableGlobalWriteForwarding = &v + return s +} + // SetEnableHttpEndpoint sets the EnableHttpEndpoint field's value. func (s *ModifyDBClusterInput) SetEnableHttpEndpoint(v bool) *ModifyDBClusterInput { s.EnableHttpEndpoint = &v @@ -32148,6 +32935,9 @@ type ModifyDBClusterSnapshotAttributeInput struct { // To manage authorization for other AWS accounts to copy or restore a manual // DB cluster snapshot, set this value to restore. // + // To view the list of attributes available to modify, use the DescribeDBClusterSnapshotAttributes + // API action. + // // AttributeName is a required field AttributeName *string `type:"string" required:"true"` @@ -32321,13 +33111,13 @@ type ModifyDBInstanceInput struct { // // * Must be a value from 0 to 35 // - // * Can be specified for a MySQL Read Replica only if the source is running + // * Can be specified for a MySQL read replica only if the source is running // MySQL 5.6 or later // - // * Can be specified for a PostgreSQL Read Replica only if the source is + // * Can be specified for a PostgreSQL read replica only if the source is // running PostgreSQL 9.3.5 // - // * Can't be set to 0 if the DB instance is a source to Read Replicas + // * Can't be set to 0 if the DB instance is a source to read replicas BackupRetentionPeriod *int64 `type:"integer"` // Indicates the certificate that needs to be associated with the instance. @@ -32419,19 +33209,19 @@ type ModifyDBInstanceInput struct { // // Default: 3306 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // MariaDB // // Default: 3306 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // PostgreSQL // // Default: 5432 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // Type: Integer // @@ -32439,20 +33229,20 @@ type ModifyDBInstanceInput struct { // // Default: 1521 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 // // SQL Server // // Default: 1433 // - // Valid Values: 1150-65535 except for 1434, 3389, 47001, 49152, and 49152 through - // 49156. + // Valid values: 1150-65535 except 1234, 1434, 3260, 3343, 3389, 47001, and + // 49152-49156. // // Amazon Aurora // // Default: 3306 // - // Valid Values: 1150-65535 + // Valid values: 1150-65535 DBPortNumber *int64 `type:"integer"` // A list of DB security groups to authorize on this DB instance. Changing this @@ -32486,18 +33276,10 @@ type ModifyDBInstanceInput struct { // The Active Directory directory ID to move the DB instance to. Specify none // to remove the instance from its current domain. The domain must be created - // prior to this operation. Currently, only Microsoft SQL Server and Oracle - // DB instances can be created in an Active Directory Domain. - // - // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication - // to authenticate users that connect to the DB instance. For more information, - // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft - // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) - // in the Amazon RDS User Guide. + // prior to this operation. Currently, only MySQL, Microsoft SQL Server, Oracle, + // and PostgreSQL DB instances can be created in an Active Directory Domain. // - // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate - // users that connect to the DB instance. For more information, see Using Kerberos - // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) // in the Amazon RDS User Guide. Domain *string `type:"string"` @@ -32552,7 +33334,7 @@ type ModifyDBInstanceInput struct { // While the migration takes place, nightly backups for the instance are suspended. // No other Amazon RDS operations can take place for the instance, including // modifying the instance, rebooting the instance, deleting the instance, creating - // a Read Replica for the instance, and creating a DB snapshot of the instance. + // a read replica for the instance, and creating a DB snapshot of the instance. // // Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied // must be at least 10% greater than the current value. Values that are not @@ -32734,10 +33516,16 @@ type ModifyDBInstanceInput struct { // Valid Values: 0 - 15 PromotionTier *int64 `type:"integer"` - // A value that indicates whether the DB instance is publicly accessible. When - // the DB instance is publicly accessible, it is an Internet-facing instance - // with a publicly resolvable DNS name, which resolves to a public IP address. - // When the DB instance isn't publicly accessible, it is an internal instance + // A value that indicates whether the DB instance is publicly accessible. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance // with a DNS name that resolves to a private IP address. // // PubliclyAccessible only applies to DB instances in a VPC. The DB instance @@ -32748,6 +33536,20 @@ type ModifyDBInstanceInput struct { // of the value of the ApplyImmediately parameter. PubliclyAccessible *bool `type:"boolean"` + // A value that sets the open mode of a replica database to either mounted or + // read-only. + // + // Currently, this parameter is only supported for Oracle DB instances. + // + // Mounted DB replicas are included in Oracle Enterprise Edition. The main use + // case for mounted replicas is cross-Region disaster recovery. The primary + // database doesn't use Active Data Guard to transmit information to the mounted + // replica. Because it doesn't accept user connections, a mounted replica can't + // serve a read-only workload. For more information, see Working with Oracle + // Read Replicas for Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) + // in the Amazon RDS User Guide. + ReplicaMode *string `type:"string" enum:"ReplicaMode"` + // Specifies the storage type to be associated with the DB instance. // // If you specify Provisioned IOPS (io1), you must also include a value for @@ -32764,7 +33566,7 @@ type ModifyDBInstanceInput struct { // While the migration takes place, nightly backups for the instance are suspended. // No other Amazon RDS operations can take place for the instance, including // modifying the instance, rebooting the instance, deleting the instance, creating - // a Read Replica for the instance, and creating a DB snapshot of the instance. + // a read replica for the instance, and creating a DB snapshot of the instance. // // Valid values: standard | gp2 | io1 // @@ -33041,6 +33843,12 @@ func (s *ModifyDBInstanceInput) SetPubliclyAccessible(v bool) *ModifyDBInstanceI return s } +// SetReplicaMode sets the ReplicaMode field's value. +func (s *ModifyDBInstanceInput) SetReplicaMode(v string) *ModifyDBInstanceInput { + s.ReplicaMode = &v + return s +} + // SetStorageType sets the StorageType field's value. func (s *ModifyDBInstanceInput) SetStorageType(v string) *ModifyDBInstanceInput { s.StorageType = &v @@ -33402,6 +34210,9 @@ type ModifyDBSnapshotAttributeInput struct { // To manage authorization for other AWS accounts to copy or restore a manual // DB snapshot, set this value to restore. // + // To view the list of attributes available to modify, use the DescribeDBSnapshotAttributes + // API action. + // // AttributeName is a required field AttributeName *string `type:"string" required:"true"` @@ -33714,11 +34525,10 @@ type ModifyEventSubscriptionInput struct { // A value that indicates whether to activate the subscription. Enabled *bool `type:"boolean"` - // A list of event categories for a SourceType that you want to subscribe to. - // You can see a list of the categories for a given SourceType in the Events - // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) - // topic in the Amazon RDS User Guide or by using the DescribeEventCategories - // action. + // A list of event categories for a source type (SourceType) that you want to + // subscribe to. You can see a list of the categories for a given source type + // in Events (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Events.html) + // in the Amazon RDS User Guide or by using the DescribeEventCategories operation. EventCategories []*string `locationNameList:"EventCategory" type:"list"` // The Amazon Resource Name (ARN) of the SNS topic created for event notification. @@ -33730,7 +34540,8 @@ type ModifyEventSubscriptionInput struct { // to be notified of events generated by a DB instance, you would set this parameter // to db-instance. If this value isn't specified, all events are returned. // - // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + // Valid values: db-instance | db-cluster | db-parameter-group | db-security-group + // | db-snapshot | db-cluster-snapshot SourceType *string `type:"string"` // The name of the RDS event notification subscription. @@ -34712,6 +35523,9 @@ func (s *OptionVersion) SetVersion(v string) *OptionVersion { type OrderableDBInstanceOption struct { _ struct{} `type:"structure"` + // The Availability Zone group for a DB instance. + AvailabilityZoneGroup *string `type:"string"` + // A list of Availability Zones for a DB instance. AvailabilityZones []*AvailabilityZone `locationNameList:"AvailabilityZone" type:"list"` @@ -34752,7 +35566,14 @@ type OrderableDBInstanceOption struct { // Indicates whether a DB instance is Multi-AZ capable. MultiAZCapable *bool `type:"boolean"` - // Indicates whether a DB instance can have a Read Replica. + // Whether a DB instance supports RDS on Outposts. + // + // For more information about RDS on Outposts, see Amazon RDS on AWS Outposts + // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + // in the Amazon RDS User Guide. + OutpostCapable *bool `type:"boolean"` + + // Indicates whether a DB instance can have a read replica. ReadReplicaCapable *bool `type:"boolean"` // Indicates the storage type for a DB instance. @@ -34765,6 +35586,10 @@ type OrderableDBInstanceOption struct { // from 1 to 60 seconds. SupportsEnhancedMonitoring *bool `type:"boolean"` + // A value that indicates whether you can use Aurora global databases with a + // specific combination of other DB engine attributes. + SupportsGlobalDatabases *bool `type:"boolean"` + // Indicates whether a DB instance supports IAM database authentication. SupportsIAMDatabaseAuthentication *bool `type:"boolean"` @@ -34777,8 +35602,8 @@ type OrderableDBInstanceOption struct { // True if a DB instance supports Performance Insights, otherwise false. SupportsPerformanceInsights *bool `type:"boolean"` - // Whether or not Amazon RDS can automatically scale storage for DB instances - // that use the specified instance class. + // Whether Amazon RDS can automatically scale storage for DB instances that + // use the specified DB instance class. SupportsStorageAutoscaling *bool `type:"boolean"` // Indicates whether a DB instance supports encrypted storage. @@ -34798,6 +35623,12 @@ func (s OrderableDBInstanceOption) GoString() string { return s.String() } +// SetAvailabilityZoneGroup sets the AvailabilityZoneGroup field's value. +func (s *OrderableDBInstanceOption) SetAvailabilityZoneGroup(v string) *OrderableDBInstanceOption { + s.AvailabilityZoneGroup = &v + return s +} + // SetAvailabilityZones sets the AvailabilityZones field's value. func (s *OrderableDBInstanceOption) SetAvailabilityZones(v []*AvailabilityZone) *OrderableDBInstanceOption { s.AvailabilityZones = v @@ -34876,6 +35707,12 @@ func (s *OrderableDBInstanceOption) SetMultiAZCapable(v bool) *OrderableDBInstan return s } +// SetOutpostCapable sets the OutpostCapable field's value. +func (s *OrderableDBInstanceOption) SetOutpostCapable(v bool) *OrderableDBInstanceOption { + s.OutpostCapable = &v + return s +} + // SetReadReplicaCapable sets the ReadReplicaCapable field's value. func (s *OrderableDBInstanceOption) SetReadReplicaCapable(v bool) *OrderableDBInstanceOption { s.ReadReplicaCapable = &v @@ -34900,6 +35737,12 @@ func (s *OrderableDBInstanceOption) SetSupportsEnhancedMonitoring(v bool) *Order return s } +// SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. +func (s *OrderableDBInstanceOption) SetSupportsGlobalDatabases(v bool) *OrderableDBInstanceOption { + s.SupportsGlobalDatabases = &v + return s +} + // SetSupportsIAMDatabaseAuthentication sets the SupportsIAMDatabaseAuthentication field's value. func (s *OrderableDBInstanceOption) SetSupportsIAMDatabaseAuthentication(v bool) *OrderableDBInstanceOption { s.SupportsIAMDatabaseAuthentication = &v @@ -34942,6 +35785,34 @@ func (s *OrderableDBInstanceOption) SetVpc(v bool) *OrderableDBInstanceOption { return s } +// A data type that represents an Outpost. +// +// For more information about RDS on Outposts, see Amazon RDS on AWS Outposts +// (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) +// in the Amazon RDS User Guide. +type Outpost struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Outpost. + Arn *string `type:"string"` +} + +// String returns the string representation +func (s Outpost) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Outpost) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Outpost) SetArn(v string) *Outpost { + s.Arn = &v + return s +} + // This data type is used as a request parameter in the ModifyDBParameterGroup // and ResetDBParameterGroup actions. // @@ -35121,9 +35992,12 @@ type PendingMaintenanceAction struct { // A description providing more detail about the maintenance action. Description *string `type:"string"` - // The date when the maintenance action is automatically applied. The maintenance - // action is applied to the resource on this date regardless of the maintenance - // window for the resource. + // The date when the maintenance action is automatically applied. + // + // On this date, the maintenance action is applied to the resource as soon as + // possible, regardless of the maintenance window for the resource. There might + // be a delay of one or more days from this date before the maintenance action + // is applied. ForcedApplyDate *time.Time `type:"timestamp"` // Indicates the type of opt-in request that has been received for the resource. @@ -35367,6 +36241,16 @@ func (s *PendingModifiedValues) SetStorageType(v string) *PendingModifiedValues // // * DescribeValidDBInstanceModifications // +// If you call DescribeDBInstances, ProcessorFeature returns non-null values +// only if the following conditions are met: +// +// * You are accessing an Oracle DB instance. +// +// * Your Oracle DB instance class supports configuring the number of CPU +// cores and threads per core. +// +// * The current number CPU cores and threads is set to a non-default value. +// // For more information, see Configuring the Processor of the DB Instance Class // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#USER_ConfigureProcessor) // in the Amazon RDS User Guide. @@ -35405,12 +36289,12 @@ func (s *ProcessorFeature) SetValue(v string) *ProcessorFeature { type PromoteReadReplicaDBClusterInput struct { _ struct{} `type:"structure"` - // The identifier of the DB cluster Read Replica to promote. This parameter + // The identifier of the DB cluster read replica to promote. This parameter // isn't case-sensitive. // // Constraints: // - // * Must match the identifier of an existing DBCluster Read Replica. + // * Must match the identifier of an existing DB cluster read replica. // // Example: my-cluster-replica1 // @@ -35486,14 +36370,14 @@ type PromoteReadReplicaInput struct { // // * Must be a value from 0 to 35. // - // * Can't be set to 0 if the DB instance is a source to Read Replicas. + // * Can't be set to 0 if the DB instance is a source to read replicas. BackupRetentionPeriod *int64 `type:"integer"` // The DB instance identifier. This value is stored as a lowercase string. // // Constraints: // - // * Must match the identifier of an existing Read Replica DB instance. + // * Must match the identifier of an existing read replica DB instance. // // Example: mydbinstance // @@ -36750,6 +37634,8 @@ type RestoreDBClusterFromS3Input struct { // The target backtrack window, in seconds. To disable backtracking, set this // value to 0. // + // Currently, Backtrack is only supported for Aurora MySQL DB clusters. + // // Default: 0 // // Constraints: @@ -36777,7 +37663,7 @@ type RestoreDBClusterFromS3Input struct { CopyTagsToSnapshot *bool `type:"boolean"` // The name of the DB cluster to create from the source data in the Amazon S3 - // bucket. This parameter is isn't case-sensitive. + // bucket. This parameter isn't case-sensitive. // // Constraints: // @@ -36820,7 +37706,7 @@ type RestoreDBClusterFromS3Input struct { // // For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication // to authenticate users that connect to the DB cluster. For more information, - // see Using Kerberos Authentication for Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurmysql-kerberos.html) + // see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) // in the Amazon Aurora User Guide. Domain *string `type:"string"` @@ -36841,9 +37727,10 @@ type RestoreDBClusterFromS3Input struct { // in the Amazon Aurora User Guide. EnableIAMDatabaseAuthentication *bool `type:"boolean"` - // The name of the database engine to be used for the restored DB cluster. + // The name of the database engine to be used for this DB cluster. // - // Valid Values: aurora, aurora-postgresql + // Valid Values: aurora (for MySQL 5.6-compatible Aurora), aurora-mysql (for + // MySQL 5.7-compatible Aurora), and aurora-postgresql // // Engine is a required field Engine *string `type:"string" required:"true"` @@ -36985,9 +37872,9 @@ type RestoreDBClusterFromS3Input struct { // The version of the database that the backup files were created from. // - // MySQL version 5.5 and 5.6 are supported. + // MySQL versions 5.5, 5.6, and 5.7 are supported. // - // Example: 5.6.22 + // Example: 5.6.40, 5.7.28 // // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` @@ -37269,6 +38156,8 @@ type RestoreDBClusterFromSnapshotInput struct { // The target backtrack window, in seconds. To disable backtracking, set this // value to 0. // + // Currently, Backtrack is only supported for Aurora MySQL DB clusters. + // // Default: 0 // // Constraints: @@ -37329,7 +38218,12 @@ type RestoreDBClusterFromSnapshotInput struct { DeletionProtection *bool `type:"boolean"` // Specify the Active Directory directory ID to restore the DB cluster in. The - // domain must be created prior to this operation. + // domain must be created prior to this operation. Currently, only MySQL, Microsoft + // SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active + // Directory Domain. + // + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) + // in the Amazon RDS User Guide. Domain *string `type:"string"` // Specify the name of the IAM role to be used when making API calls to the @@ -37360,6 +38254,8 @@ type RestoreDBClusterFromSnapshotInput struct { // The DB engine mode of the DB cluster, either provisioned, serverless, parallelquery, // global, or multimaster. + // + // For more information, see CreateDBCluster (https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBCluster.html). EngineMode *string `type:"string"` // The version of the database engine to use for the new DB cluster. @@ -37637,6 +38533,8 @@ type RestoreDBClusterToPointInTimeInput struct { // The target backtrack window, in seconds. To disable backtracking, set this // value to 0. // + // Currently, Backtrack is only supported for Aurora MySQL DB clusters. + // // Default: 0 // // Constraints: @@ -37695,7 +38593,7 @@ type RestoreDBClusterToPointInTimeInput struct { // // For Amazon Aurora DB clusters, Amazon RDS can use Kerberos Authentication // to authenticate users that connect to the DB cluster. For more information, - // see Using Kerberos Authentication for Aurora MySQL (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurmysql-kerberos.html) + // see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/kerberos-authentication.html) // in the Amazon Aurora User Guide. Domain *string `type:"string"` @@ -38068,19 +38966,11 @@ type RestoreDBInstanceFromDBSnapshotInput struct { DeletionProtection *bool `type:"boolean"` // Specify the Active Directory directory ID to restore the DB instance in. - // The domain must be created prior to this operation. Currently, only Microsoft - // SQL Server and Oracle DB instances can be created in an Active Directory - // Domain. - // - // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication - // to authenticate users that connect to the DB instance. For more information, - // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft - // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) - // in the Amazon RDS User Guide. + // The domain must be created prior to this operation. Currently, only MySQL, + // Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created + // in an Active Directory Domain. // - // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate - // users that connect to the DB instance. For more information, see Using Kerberos - // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) // in the Amazon RDS User Guide. Domain *string `type:"string"` @@ -38090,8 +38980,8 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // The list of logs that the restored DB instance is to export to CloudWatch // Logs. The values in the list depend on the DB engine being used. For more - // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) - // in the Amazon Aurora User Guide. + // information, see Publishing Database Logs to Amazon CloudWatch Logs (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) + // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string `type:"list"` // A value that indicates whether to enable mapping of AWS Identity and Access @@ -38181,12 +39071,19 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // A value that indicates whether the DB instance is publicly accessible. When - // the DB instance is publicly accessible, it is an Internet-facing instance - // with a publicly resolvable DNS name, which resolves to a public IP address. - // When the DB instance isn't publicly accessible, it is an internal instance - // with a DNS name that resolves to a private IP address. For more information, - // see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance + // with a DNS name that resolves to a private IP address. + // + // For more information, see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -38601,6 +39498,10 @@ type RestoreDBInstanceFromS3Input struct { // * Can't be a reserved word for the chosen database engine. MasterUsername *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collecting Enhanced Monitoring // metrics, specify 0. @@ -38693,12 +39594,19 @@ type RestoreDBInstanceFromS3Input struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // A value that indicates whether the DB instance is publicly accessible. When - // the DB instance is publicly accessible, it is an Internet-facing instance - // with a publicly resolvable DNS name, which resolves to a public IP address. - // When the DB instance isn't publicly accessible, it is an internal instance - // with a DNS name that resolves to a private IP address. For more information, - // see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance + // with a DNS name that resolves to a private IP address. + // + // For more information, see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The name of your Amazon S3 bucket that contains your database backup file. @@ -38722,9 +39630,11 @@ type RestoreDBInstanceFromS3Input struct { // SourceEngine is a required field SourceEngine *string `type:"string" required:"true"` - // The engine version of your source database. + // The version of the database that the backup files were created from. + // + // MySQL versions 5.6 and 5.7 are supported. // - // Valid Values: 5.6 + // Example: 5.6.40 // // SourceEngineVersion is a required field SourceEngineVersion *string `type:"string" required:"true"` @@ -38927,6 +39837,12 @@ func (s *RestoreDBInstanceFromS3Input) SetMasterUsername(v string) *RestoreDBIns return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *RestoreDBInstanceFromS3Input) SetMaxAllocatedStorage(v int64) *RestoreDBInstanceFromS3Input { + s.MaxAllocatedStorage = &v + return s +} + // SetMonitoringInterval sets the MonitoringInterval field's value. func (s *RestoreDBInstanceFromS3Input) SetMonitoringInterval(v int64) *RestoreDBInstanceFromS3Input { s.MonitoringInterval = &v @@ -39143,19 +40059,11 @@ type RestoreDBInstanceToPointInTimeInput struct { DeletionProtection *bool `type:"boolean"` // Specify the Active Directory directory ID to restore the DB instance in. - // The domain must be created prior to this operation. Currently, only Microsoft - // SQL Server and Oracle DB instances can be created in an Active Directory - // Domain. - // - // For Microsoft SQL Server DB instances, Amazon RDS can use Windows Authentication - // to authenticate users that connect to the DB instance. For more information, - // see Using Windows Authentication with an Amazon RDS DB Instance Running Microsoft - // SQL Server (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) - // in the Amazon RDS User Guide. + // The domain must be created prior to this operation. Currently, only MySQL, + // Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created + // in an Active Directory Domain. // - // For Oracle DB instances, Amazon RDS can use Kerberos Authentication to authenticate - // users that connect to the DB instance. For more information, see Using Kerberos - // Authentication with Amazon RDS for Oracle (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) + // For more information, see Kerberos Authentication (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/kerberos-authentication.html) // in the Amazon RDS User Guide. Domain *string `type:"string"` @@ -39226,6 +40134,10 @@ type RestoreDBInstanceToPointInTimeInput struct { // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + // A value that indicates whether the DB instance is a Multi-AZ deployment. // // Constraint: You can't specify the AvailabilityZone parameter if the DB instance @@ -39250,12 +40162,19 @@ type RestoreDBInstanceToPointInTimeInput struct { // class of the DB instance. ProcessorFeatures []*ProcessorFeature `locationNameList:"ProcessorFeature" type:"list"` - // A value that indicates whether the DB instance is publicly accessible. When - // the DB instance is publicly accessible, it is an Internet-facing instance - // with a publicly resolvable DNS name, which resolves to a public IP address. - // When the DB instance isn't publicly accessible, it is an internal instance - // with a DNS name that resolves to a private IP address. For more information, - // see CreateDBInstance. + // A value that indicates whether the DB instance is publicly accessible. + // + // When the DB instance is publicly accessible, its DNS endpoint resolves to + // the private IP address from within the DB instance's VPC, and to the public + // IP address from outside of the DB instance's VPC. Access to the DB instance + // is ultimately controlled by the security group it uses, and that public access + // is not permitted if the security group assigned to the DB instance doesn't + // permit it. + // + // When the DB instance isn't publicly accessible, it is an internal DB instance + // with a DNS name that resolves to a private IP address. + // + // For more information, see CreateDBInstance. PubliclyAccessible *bool `type:"boolean"` // The date and time to restore from. @@ -39444,6 +40363,12 @@ func (s *RestoreDBInstanceToPointInTimeInput) SetLicenseModel(v string) *Restore return s } +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *RestoreDBInstanceToPointInTimeInput) SetMaxAllocatedStorage(v int64) *RestoreDBInstanceToPointInTimeInput { + s.MaxAllocatedStorage = &v + return s +} + // SetMultiAZ sets the MultiAZ field's value. func (s *RestoreDBInstanceToPointInTimeInput) SetMultiAZ(v bool) *RestoreDBInstanceToPointInTimeInput { s.MultiAZ = &v @@ -40195,16 +41120,16 @@ type StartExportTaskInput struct { // The data to be exported from the snapshot. If this parameter is not provided, // all the snapshot data is exported. Valid values are the following: // - // * database - Export all the data of the snapshot. + // * database - Export all the data from a specified database. // - // * database.table [table-name] - Export a table of the snapshot. + // * database.table table-name - Export a table of the snapshot. This format + // is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. // - // * database.schema [schema-name] - Export a database schema of the snapshot. - // This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL. + // * database.schema schema-name - Export a database schema of the snapshot. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. // - // * database.schema.table [table-name] - Export a table of the database - // schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or - // Aurora MySQL. + // * database.schema.table table-name - Export a table of the database schema. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. ExportOnly []*string `type:"list"` // A unique identifier for the snapshot export task. This ID isn't an identifier @@ -40221,9 +41146,27 @@ type StartExportTaskInput struct { // The ID of the AWS KMS key to use to encrypt the snapshot exported to Amazon // S3. The KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, - // or the KMS key alias for the KMS encryption key. The IAM role used for the - // snapshot export must have encryption and decryption permissions to use this - // KMS key. + // or the KMS key alias for the KMS encryption key. The caller of this operation + // must be authorized to execute the following operations. These can be set + // in the KMS key policy: + // + // * GrantOperation.Encrypt + // + // * GrantOperation.Decrypt + // + // * GrantOperation.GenerateDataKey + // + // * GrantOperation.GenerateDataKeyWithoutPlaintext + // + // * GrantOperation.ReEncryptFrom + // + // * GrantOperation.ReEncryptTo + // + // * GrantOperation.CreateGrant + // + // * GrantOperation.DescribeKey + // + // * GrantOperation.RetireGrant // // KmsKeyId is a required field KmsKeyId *string `type:"string" required:"true"` @@ -40328,16 +41271,16 @@ type StartExportTaskOutput struct { // The data exported from the snapshot. Valid values are the following: // - // * database - Export all the data of the snapshot. + // * database - Export all the data from a specified database. // - // * database.table [table-name] - Export a table of the snapshot. + // * database.table table-name - Export a table of the snapshot. This format + // is valid only for RDS for MySQL, RDS for MariaDB, and Aurora MySQL. // - // * database.schema [schema-name] - Export a database schema of the snapshot. - // This value isn't valid for RDS for MySQL, RDS for MariaDB, or Aurora MySQL. + // * database.schema schema-name - Export a database schema of the snapshot. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. // - // * database.schema.table [table-name] - Export a table of the database - // schema. This value isn't valid for RDS for MySQL, RDS for MariaDB, or - // Aurora MySQL. + // * database.schema.table table-name - Export a table of the database schema. + // This format is valid only for RDS for PostgreSQL and Aurora PostgreSQL. ExportOnly []*string `type:"list"` // A unique identifier for the snapshot export task. This ID isn't an identifier @@ -40720,8 +41663,8 @@ func (s *StopDBInstanceOutput) SetDBInstance(v *DBInstance) *StopDBInstanceOutpu return s } -// This data type is used as a response element in the DescribeDBSubnetGroups -// action. +// This data type is used as a response element for the DescribeDBSubnetGroups +// operation. type Subnet struct { _ struct{} `type:"structure"` @@ -40731,10 +41674,17 @@ type Subnet struct { // type. SubnetAvailabilityZone *AvailabilityZone `type:"structure"` - // Specifies the identifier of the subnet. + // The identifier of the subnet. SubnetIdentifier *string `type:"string"` - // Specifies the status of the subnet. + // If the subnet is associated with an Outpost, this value specifies the Outpost. + // + // For more information about RDS on Outposts, see Amazon RDS on AWS Outposts + // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/rds-on-outposts.html) + // in the Amazon RDS User Guide. + SubnetOutpost *Outpost `type:"structure"` + + // The status of the subnet. SubnetStatus *string `type:"string"` } @@ -40760,6 +41710,12 @@ func (s *Subnet) SetSubnetIdentifier(v string) *Subnet { return s } +// SetSubnetOutpost sets the SubnetOutpost field's value. +func (s *Subnet) SetSubnetOutpost(v *Outpost) *Subnet { + s.SubnetOutpost = v + return s +} + // SetSubnetStatus sets the SubnetStatus field's value. func (s *Subnet) SetSubnetStatus(v string) *Subnet { s.SubnetStatus = &v @@ -40773,13 +41729,13 @@ type Tag struct { // A key is the required name of the tag. The string value can be from 1 to // 128 Unicode characters in length and can't be prefixed with "aws:" or "rds:". // The string can only contain only the set of Unicode letters, digits, white-space, - // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + // '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"). Key *string `type:"string"` // A value is the optional value of the tag. The string value can be from 1 // to 256 Unicode characters in length and can't be prefixed with "aws:" or // "rds:". The string can only contain only the set of Unicode letters, digits, - // white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + // white-space, '_', '.', ':', '/', '=', '+', '-', '@' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$"). Value *string `type:"string"` } @@ -40805,6 +41761,53 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Information about the connection health of an RDS Proxy target. +type TargetHealth struct { + _ struct{} `type:"structure"` + + // A description of the health of the RDS Proxy target. If the State is AVAILABLE, + // a description is not included. + Description *string `type:"string"` + + // The reason for the current health State of the RDS Proxy target. + Reason *string `type:"string" enum:"TargetHealthReason"` + + // The current state of the connection health lifecycle for the RDS Proxy target. + // The following is a typical lifecycle example for the states of an RDS Proxy + // target: + // + // registering > unavailable > available > unavailable > available + State *string `type:"string" enum:"TargetState"` +} + +// String returns the string representation +func (s TargetHealth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetHealth) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *TargetHealth) SetDescription(v string) *TargetHealth { + s.Description = &v + return s +} + +// SetReason sets the Reason field's value. +func (s *TargetHealth) SetReason(v string) *TargetHealth { + s.Reason = &v + return s +} + +// SetState sets the State field's value. +func (s *TargetHealth) SetState(v string) *TargetHealth { + s.State = &v + return s +} + // A time zone associated with a DBInstance or a DBSnapshot. This data type // is an element in the response to the DescribeDBInstances, the DescribeDBSnapshots, // and the DescribeDBEngineVersions actions. @@ -40892,10 +41895,6 @@ func (s *UpgradeTarget) SetIsMajorVersionUpgrade(v bool) *UpgradeTarget { return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Specifies the details of authentication used by a proxy to log in as a specific // database user. type UserAuthConfig struct { @@ -40962,10 +41961,6 @@ func (s *UserAuthConfig) SetUserName(v string) *UserAuthConfig { return s } -// -// This is prerelease documentation for the RDS Database Proxy feature in preview -// release. It is subject to change. -// // Returns the details of authentication used by a proxy to log in as a specific // database user. type UserAuthConfigInfo struct { @@ -41248,6 +42243,14 @@ const ( ActivityStreamModeAsync = "async" ) +// ActivityStreamMode_Values returns all elements of the ActivityStreamMode enum +func ActivityStreamMode_Values() []string { + return []string{ + ActivityStreamModeSync, + ActivityStreamModeAsync, + } +} + const ( // ActivityStreamStatusStopped is a ActivityStreamStatus enum value ActivityStreamStatusStopped = "stopped" @@ -41262,6 +42265,16 @@ const ( ActivityStreamStatusStopping = "stopping" ) +// ActivityStreamStatus_Values returns all elements of the ActivityStreamStatus enum +func ActivityStreamStatus_Values() []string { + return []string{ + ActivityStreamStatusStopped, + ActivityStreamStatusStarting, + ActivityStreamStatusStarted, + ActivityStreamStatusStopping, + } +} + const ( // ApplyMethodImmediate is a ApplyMethod enum value ApplyMethodImmediate = "immediate" @@ -41270,11 +42283,26 @@ const ( ApplyMethodPendingReboot = "pending-reboot" ) +// ApplyMethod_Values returns all elements of the ApplyMethod enum +func ApplyMethod_Values() []string { + return []string{ + ApplyMethodImmediate, + ApplyMethodPendingReboot, + } +} + const ( // AuthSchemeSecrets is a AuthScheme enum value AuthSchemeSecrets = "SECRETS" ) +// AuthScheme_Values returns all elements of the AuthScheme enum +func AuthScheme_Values() []string { + return []string{ + AuthSchemeSecrets, + } +} + const ( // DBProxyStatusAvailable is a DBProxyStatus enum value DBProxyStatusAvailable = "available" @@ -41293,13 +42321,48 @@ const ( // DBProxyStatusDeleting is a DBProxyStatus enum value DBProxyStatusDeleting = "deleting" + + // DBProxyStatusSuspended is a DBProxyStatus enum value + DBProxyStatusSuspended = "suspended" + + // DBProxyStatusSuspending is a DBProxyStatus enum value + DBProxyStatusSuspending = "suspending" + + // DBProxyStatusReactivating is a DBProxyStatus enum value + DBProxyStatusReactivating = "reactivating" ) +// DBProxyStatus_Values returns all elements of the DBProxyStatus enum +func DBProxyStatus_Values() []string { + return []string{ + DBProxyStatusAvailable, + DBProxyStatusModifying, + DBProxyStatusIncompatibleNetwork, + DBProxyStatusInsufficientResourceLimits, + DBProxyStatusCreating, + DBProxyStatusDeleting, + DBProxyStatusSuspended, + DBProxyStatusSuspending, + DBProxyStatusReactivating, + } +} + const ( // EngineFamilyMysql is a EngineFamily enum value EngineFamilyMysql = "MYSQL" + + // EngineFamilyPostgresql is a EngineFamily enum value + EngineFamilyPostgresql = "POSTGRESQL" ) +// EngineFamily_Values returns all elements of the EngineFamily enum +func EngineFamily_Values() []string { + return []string{ + EngineFamilyMysql, + EngineFamilyPostgresql, + } +} + const ( // IAMAuthModeDisabled is a IAMAuthMode enum value IAMAuthModeDisabled = "DISABLED" @@ -41308,6 +42371,30 @@ const ( IAMAuthModeRequired = "REQUIRED" ) +// IAMAuthMode_Values returns all elements of the IAMAuthMode enum +func IAMAuthMode_Values() []string { + return []string{ + IAMAuthModeDisabled, + IAMAuthModeRequired, + } +} + +const ( + // ReplicaModeOpenReadOnly is a ReplicaMode enum value + ReplicaModeOpenReadOnly = "open-read-only" + + // ReplicaModeMounted is a ReplicaMode enum value + ReplicaModeMounted = "mounted" +) + +// ReplicaMode_Values returns all elements of the ReplicaMode enum +func ReplicaMode_Values() []string { + return []string{ + ReplicaModeOpenReadOnly, + ReplicaModeMounted, + } +} + const ( // SourceTypeDbInstance is a SourceType enum value SourceTypeDbInstance = "db-instance" @@ -41328,6 +42415,62 @@ const ( SourceTypeDbClusterSnapshot = "db-cluster-snapshot" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeDbInstance, + SourceTypeDbParameterGroup, + SourceTypeDbSecurityGroup, + SourceTypeDbSnapshot, + SourceTypeDbCluster, + SourceTypeDbClusterSnapshot, + } +} + +const ( + // TargetHealthReasonUnreachable is a TargetHealthReason enum value + TargetHealthReasonUnreachable = "UNREACHABLE" + + // TargetHealthReasonConnectionFailed is a TargetHealthReason enum value + TargetHealthReasonConnectionFailed = "CONNECTION_FAILED" + + // TargetHealthReasonAuthFailure is a TargetHealthReason enum value + TargetHealthReasonAuthFailure = "AUTH_FAILURE" + + // TargetHealthReasonPendingProxyCapacity is a TargetHealthReason enum value + TargetHealthReasonPendingProxyCapacity = "PENDING_PROXY_CAPACITY" +) + +// TargetHealthReason_Values returns all elements of the TargetHealthReason enum +func TargetHealthReason_Values() []string { + return []string{ + TargetHealthReasonUnreachable, + TargetHealthReasonConnectionFailed, + TargetHealthReasonAuthFailure, + TargetHealthReasonPendingProxyCapacity, + } +} + +const ( + // TargetStateRegistering is a TargetState enum value + TargetStateRegistering = "REGISTERING" + + // TargetStateAvailable is a TargetState enum value + TargetStateAvailable = "AVAILABLE" + + // TargetStateUnavailable is a TargetState enum value + TargetStateUnavailable = "UNAVAILABLE" +) + +// TargetState_Values returns all elements of the TargetState enum +func TargetState_Values() []string { + return []string{ + TargetStateRegistering, + TargetStateAvailable, + TargetStateUnavailable, + } +} + const ( // TargetTypeRdsInstance is a TargetType enum value TargetTypeRdsInstance = "RDS_INSTANCE" @@ -41338,3 +42481,40 @@ const ( // TargetTypeTrackedCluster is a TargetType enum value TargetTypeTrackedCluster = "TRACKED_CLUSTER" ) + +// TargetType_Values returns all elements of the TargetType enum +func TargetType_Values() []string { + return []string{ + TargetTypeRdsInstance, + TargetTypeRdsServerlessEndpoint, + TargetTypeTrackedCluster, + } +} + +const ( + // WriteForwardingStatusEnabled is a WriteForwardingStatus enum value + WriteForwardingStatusEnabled = "enabled" + + // WriteForwardingStatusDisabled is a WriteForwardingStatus enum value + WriteForwardingStatusDisabled = "disabled" + + // WriteForwardingStatusEnabling is a WriteForwardingStatus enum value + WriteForwardingStatusEnabling = "enabling" + + // WriteForwardingStatusDisabling is a WriteForwardingStatus enum value + WriteForwardingStatusDisabling = "disabling" + + // WriteForwardingStatusUnknown is a WriteForwardingStatus enum value + WriteForwardingStatusUnknown = "unknown" +) + +// WriteForwardingStatus_Values returns all elements of the WriteForwardingStatus enum +func WriteForwardingStatus_Values() []string { + return []string{ + WriteForwardingStatusEnabled, + WriteForwardingStatusDisabled, + WriteForwardingStatusEnabling, + WriteForwardingStatusDisabling, + WriteForwardingStatusUnknown, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go index d11781106..301ee5165 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go @@ -404,6 +404,14 @@ const ( // The request would result in the user exceeding the allowed number of DB instances. ErrCodeInstanceQuotaExceededFault = "InstanceQuotaExceeded" + // ErrCodeInsufficientAvailableIPsInSubnetFault for service response error code + // "InsufficientAvailableIPsInSubnetFault". + // + // The requested operation can't be performed because there aren't enough available + // IP addresses in the proxy's subnets. Add more CIDR blocks to the VPC or remove + // IP address that aren't required from the subnets. + ErrCodeInsufficientAvailableIPsInSubnetFault = "InsufficientAvailableIPsInSubnetFault" + // ErrCodeInsufficientDBClusterCapacityFault for service response error code // "InsufficientDBClusterCapacityFault". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go index dc5442239..656abea7b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go index 81c956f56..500669fa4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go @@ -1843,7 +1843,8 @@ func (c *Redshift) CreateSnapshotScheduleRequest(input *CreateSnapshotScheduleIn // CreateSnapshotSchedule API operation for Amazon Redshift. // -// Creates a snapshot schedule with the rate of every 12 hours. +// Create a snapshot schedule that can be associated to a cluster and which +// overrides the default system backup schedule. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1982,6 +1983,104 @@ func (c *Redshift) CreateTagsWithContext(ctx aws.Context, input *CreateTagsInput return out, req.Send() } +const opCreateUsageLimit = "CreateUsageLimit" + +// CreateUsageLimitRequest generates a "aws/request.Request" representing the +// client's request for the CreateUsageLimit operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateUsageLimit for more information on using the CreateUsageLimit +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateUsageLimitRequest method. +// req, resp := client.CreateUsageLimitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/CreateUsageLimit +func (c *Redshift) CreateUsageLimitRequest(input *CreateUsageLimitInput) (req *request.Request, output *CreateUsageLimitOutput) { + op := &request.Operation{ + Name: opCreateUsageLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUsageLimitInput{} + } + + output = &CreateUsageLimitOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateUsageLimit API operation for Amazon Redshift. +// +// Creates a usage limit for a specified Amazon Redshift feature on a cluster. +// The usage limit is identified by the returned usage limit identifier. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Redshift's +// API operation CreateUsageLimit for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFound" +// The ClusterIdentifier parameter does not refer to an existing cluster. +// +// * ErrCodeInvalidClusterStateFault "InvalidClusterState" +// The specified cluster is not in the available state. +// +// * ErrCodeLimitExceededFault "LimitExceededFault" +// The encryption key has exceeded its grant limit in AWS KMS. +// +// * ErrCodeUsageLimitAlreadyExistsFault "UsageLimitAlreadyExists" +// The usage limit already exists. +// +// * ErrCodeInvalidUsageLimitFault "InvalidUsageLimit" +// The usage limit is not valid. +// +// * ErrCodeTagLimitExceededFault "TagLimitExceededFault" +// You have exceeded the number of tags allowed. +// +// * ErrCodeUnsupportedOperationFault "UnsupportedOperation" +// The requested operation isn't supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/CreateUsageLimit +func (c *Redshift) CreateUsageLimit(input *CreateUsageLimitInput) (*CreateUsageLimitOutput, error) { + req, out := c.CreateUsageLimitRequest(input) + return out, req.Send() +} + +// CreateUsageLimitWithContext is the same as CreateUsageLimit with the addition of +// the ability to pass a context and additional request options. +// +// See CreateUsageLimit for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) CreateUsageLimitWithContext(ctx aws.Context, input *CreateUsageLimitInput, opts ...request.Option) (*CreateUsageLimitOutput, error) { + req, out := c.CreateUsageLimitRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteCluster = "DeleteCluster" // DeleteClusterRequest generates a "aws/request.Request" representing the @@ -3037,6 +3136,89 @@ func (c *Redshift) DeleteTagsWithContext(ctx aws.Context, input *DeleteTagsInput return out, req.Send() } +const opDeleteUsageLimit = "DeleteUsageLimit" + +// DeleteUsageLimitRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUsageLimit operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteUsageLimit for more information on using the DeleteUsageLimit +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteUsageLimitRequest method. +// req, resp := client.DeleteUsageLimitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DeleteUsageLimit +func (c *Redshift) DeleteUsageLimitRequest(input *DeleteUsageLimitInput) (req *request.Request, output *DeleteUsageLimitOutput) { + op := &request.Operation{ + Name: opDeleteUsageLimit, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUsageLimitInput{} + } + + output = &DeleteUsageLimitOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteUsageLimit API operation for Amazon Redshift. +// +// Deletes a usage limit from a cluster. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Redshift's +// API operation DeleteUsageLimit for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUsageLimitNotFoundFault "UsageLimitNotFound" +// The usage limit identifier can't be found. +// +// * ErrCodeUnsupportedOperationFault "UnsupportedOperation" +// The requested operation isn't supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DeleteUsageLimit +func (c *Redshift) DeleteUsageLimit(input *DeleteUsageLimitInput) (*DeleteUsageLimitOutput, error) { + req, out := c.DeleteUsageLimitRequest(input) + return out, req.Send() +} + +// DeleteUsageLimitWithContext is the same as DeleteUsageLimit with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUsageLimit for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) DeleteUsageLimitWithContext(ctx aws.Context, input *DeleteUsageLimitInput, opts ...request.Option) (*DeleteUsageLimitOutput, error) { + req, out := c.DeleteUsageLimitRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeAccountAttributes = "DescribeAccountAttributes" // DescribeAccountAttributesRequest generates a "aws/request.Request" representing the @@ -6458,6 +6640,160 @@ func (c *Redshift) DescribeTagsWithContext(ctx aws.Context, input *DescribeTagsI return out, req.Send() } +const opDescribeUsageLimits = "DescribeUsageLimits" + +// DescribeUsageLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUsageLimits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeUsageLimits for more information on using the DescribeUsageLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeUsageLimitsRequest method. +// req, resp := client.DescribeUsageLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeUsageLimits +func (c *Redshift) DescribeUsageLimitsRequest(input *DescribeUsageLimitsInput) (req *request.Request, output *DescribeUsageLimitsOutput) { + op := &request.Operation{ + Name: opDescribeUsageLimits, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeUsageLimitsInput{} + } + + output = &DescribeUsageLimitsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeUsageLimits API operation for Amazon Redshift. +// +// Shows usage limits on a cluster. Results are filtered based on the combination +// of input usage limit identifier, cluster identifier, and feature type parameters: +// +// * If usage limit identifier, cluster identifier, and feature type are +// not provided, then all usage limit objects for the current account in +// the current region are returned. +// +// * If usage limit identifier is provided, then the corresponding usage +// limit object is returned. +// +// * If cluster identifier is provided, then all usage limit objects for +// the specified cluster are returned. +// +// * If cluster identifier and feature type are provided, then all usage +// limit objects for the combination of cluster and feature are returned. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Redshift's +// API operation DescribeUsageLimits for usage and error information. +// +// Returned Error Codes: +// * ErrCodeClusterNotFoundFault "ClusterNotFound" +// The ClusterIdentifier parameter does not refer to an existing cluster. +// +// * ErrCodeUnsupportedOperationFault "UnsupportedOperation" +// The requested operation isn't supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/DescribeUsageLimits +func (c *Redshift) DescribeUsageLimits(input *DescribeUsageLimitsInput) (*DescribeUsageLimitsOutput, error) { + req, out := c.DescribeUsageLimitsRequest(input) + return out, req.Send() +} + +// DescribeUsageLimitsWithContext is the same as DescribeUsageLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeUsageLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) DescribeUsageLimitsWithContext(ctx aws.Context, input *DescribeUsageLimitsInput, opts ...request.Option) (*DescribeUsageLimitsOutput, error) { + req, out := c.DescribeUsageLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeUsageLimitsPages iterates over the pages of a DescribeUsageLimits operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeUsageLimits method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeUsageLimits operation. +// pageNum := 0 +// err := client.DescribeUsageLimitsPages(params, +// func(page *redshift.DescribeUsageLimitsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Redshift) DescribeUsageLimitsPages(input *DescribeUsageLimitsInput, fn func(*DescribeUsageLimitsOutput, bool) bool) error { + return c.DescribeUsageLimitsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeUsageLimitsPagesWithContext same as DescribeUsageLimitsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) DescribeUsageLimitsPagesWithContext(ctx aws.Context, input *DescribeUsageLimitsInput, fn func(*DescribeUsageLimitsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeUsageLimitsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeUsageLimitsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeUsageLimitsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDisableLogging = "DisableLogging" // DisableLoggingRequest generates a "aws/request.Request" representing the @@ -8204,51 +8540,137 @@ func (c *Redshift) ModifySnapshotScheduleWithContext(ctx aws.Context, input *Mod return out, req.Send() } -const opPauseCluster = "PauseCluster" +const opModifyUsageLimit = "ModifyUsageLimit" -// PauseClusterRequest generates a "aws/request.Request" representing the -// client's request for the PauseCluster operation. The "output" return +// ModifyUsageLimitRequest generates a "aws/request.Request" representing the +// client's request for the ModifyUsageLimit operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PauseCluster for more information on using the PauseCluster +// See ModifyUsageLimit for more information on using the ModifyUsageLimit // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PauseClusterRequest method. -// req, resp := client.PauseClusterRequest(params) +// // Example sending a request using the ModifyUsageLimitRequest method. +// req, resp := client.ModifyUsageLimitRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/PauseCluster -func (c *Redshift) PauseClusterRequest(input *PauseClusterInput) (req *request.Request, output *PauseClusterOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/ModifyUsageLimit +func (c *Redshift) ModifyUsageLimitRequest(input *ModifyUsageLimitInput) (req *request.Request, output *ModifyUsageLimitOutput) { op := &request.Operation{ - Name: opPauseCluster, + Name: opModifyUsageLimit, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PauseClusterInput{} + input = &ModifyUsageLimitInput{} } - output = &PauseClusterOutput{} + output = &ModifyUsageLimitOutput{} req = c.newRequest(op, input, output) return } -// PauseCluster API operation for Amazon Redshift. +// ModifyUsageLimit API operation for Amazon Redshift. // -// Pauses a cluster. +// Modifies a usage limit in a cluster. You can't modify the feature type or +// period of a usage limit. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Redshift's +// API operation ModifyUsageLimit for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidUsageLimitFault "InvalidUsageLimit" +// The usage limit is not valid. +// +// * ErrCodeUsageLimitNotFoundFault "UsageLimitNotFound" +// The usage limit identifier can't be found. +// +// * ErrCodeUnsupportedOperationFault "UnsupportedOperation" +// The requested operation isn't supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/ModifyUsageLimit +func (c *Redshift) ModifyUsageLimit(input *ModifyUsageLimitInput) (*ModifyUsageLimitOutput, error) { + req, out := c.ModifyUsageLimitRequest(input) + return out, req.Send() +} + +// ModifyUsageLimitWithContext is the same as ModifyUsageLimit with the addition of +// the ability to pass a context and additional request options. +// +// See ModifyUsageLimit for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Redshift) ModifyUsageLimitWithContext(ctx aws.Context, input *ModifyUsageLimitInput, opts ...request.Option) (*ModifyUsageLimitOutput, error) { + req, out := c.ModifyUsageLimitRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPauseCluster = "PauseCluster" + +// PauseClusterRequest generates a "aws/request.Request" representing the +// client's request for the PauseCluster operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PauseCluster for more information on using the PauseCluster +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PauseClusterRequest method. +// req, resp := client.PauseClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/redshift-2012-12-01/PauseCluster +func (c *Redshift) PauseClusterRequest(input *PauseClusterInput) (req *request.Request, output *PauseClusterOutput) { + op := &request.Operation{ + Name: opPauseCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PauseClusterInput{} + } + + output = &PauseClusterOutput{} + req = c.newRequest(op, input, output) + return +} + +// PauseCluster API operation for Amazon Redshift. +// +// Pauses a cluster. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8610,8 +9032,9 @@ func (c *Redshift) ResizeClusterRequest(input *ResizeClusterInput) (req *request // // Elastic resize operations have the following restrictions: // -// * You can only resize clusters of the following types: dc2.large dc2.8xlarge -// ds2.xlarge ds2.8xlarge ra3.16xlarge +// * You can only resize clusters of the following types: dc1.large (if your +// cluster is in a VPC) dc1.8xlarge (if your cluster is in a VPC) dc2.large +// dc2.8xlarge ds2.xlarge ds2.8xlarge ra3.4xlarge ra3.16xlarge // // * The type of nodes that you add must match the node type for the cluster. // @@ -9978,8 +10401,7 @@ type CancelResizeOutput struct { // The type of encryption for the cluster after the resize is complete. // - // Possible values are KMS and None. In the China region possible values are: - // Legacy and None. + // Possible values are KMS and None. TargetEncryptionType *string `type:"string"` // The node type that the cluster will have after the resize operation is complete. @@ -11608,7 +12030,7 @@ type CreateClusterInput struct { // in the Amazon Redshift Cluster Management Guide. // // Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large - // | dc2.8xlarge | ra3.16xlarge + // | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge // // NodeType is a required field NodeType *string `type:"string" required:"true"` @@ -13284,6 +13706,216 @@ func (s CreateTagsOutput) GoString() string { return s.String() } +type CreateUsageLimitInput struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). The value must be a positive number. + // + // Amount is a required field + Amount *int64 `type:"long" required:"true"` + + // The action that Amazon Redshift takes when the limit is reached. The default + // is log. For more information about this parameter, see UsageLimit. + BreachAction *string `type:"string" enum:"UsageLimitBreachAction"` + + // The identifier of the cluster that you want to limit usage. + // + // ClusterIdentifier is a required field + ClusterIdentifier *string `type:"string" required:"true"` + + // The Amazon Redshift feature that you want to limit. + // + // FeatureType is a required field + FeatureType *string `type:"string" required:"true" enum:"UsageLimitFeatureType"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. If FeatureType is spectrum, then LimitType must + // be data-scanned. If FeatureType is concurrency-scaling, then LimitType must + // be time. + // + // LimitType is a required field + LimitType *string `type:"string" required:"true" enum:"UsageLimitLimitType"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period *string `type:"string" enum:"UsageLimitPeriod"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s CreateUsageLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUsageLimitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUsageLimitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUsageLimitInput"} + if s.Amount == nil { + invalidParams.Add(request.NewErrParamRequired("Amount")) + } + if s.ClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterIdentifier")) + } + if s.FeatureType == nil { + invalidParams.Add(request.NewErrParamRequired("FeatureType")) + } + if s.LimitType == nil { + invalidParams.Add(request.NewErrParamRequired("LimitType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmount sets the Amount field's value. +func (s *CreateUsageLimitInput) SetAmount(v int64) *CreateUsageLimitInput { + s.Amount = &v + return s +} + +// SetBreachAction sets the BreachAction field's value. +func (s *CreateUsageLimitInput) SetBreachAction(v string) *CreateUsageLimitInput { + s.BreachAction = &v + return s +} + +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *CreateUsageLimitInput) SetClusterIdentifier(v string) *CreateUsageLimitInput { + s.ClusterIdentifier = &v + return s +} + +// SetFeatureType sets the FeatureType field's value. +func (s *CreateUsageLimitInput) SetFeatureType(v string) *CreateUsageLimitInput { + s.FeatureType = &v + return s +} + +// SetLimitType sets the LimitType field's value. +func (s *CreateUsageLimitInput) SetLimitType(v string) *CreateUsageLimitInput { + s.LimitType = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *CreateUsageLimitInput) SetPeriod(v string) *CreateUsageLimitInput { + s.Period = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateUsageLimitInput) SetTags(v []*Tag) *CreateUsageLimitInput { + s.Tags = v + return s +} + +// Describes a usage limit object for a cluster. +type CreateUsageLimitOutput struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). + Amount *int64 `type:"long"` + + // The action that Amazon Redshift takes when the limit is reached. Possible + // values are: + // + // * log - To log an event in a system table. The default is log. + // + // * emit-metric - To emit CloudWatch metrics. + // + // * disable - To disable the feature until the next usage period begins. + BreachAction *string `type:"string" enum:"UsageLimitBreachAction"` + + // The identifier of the cluster with a usage limit. + ClusterIdentifier *string `type:"string"` + + // The Amazon Redshift feature to which the limit applies. + FeatureType *string `type:"string" enum:"UsageLimitFeatureType"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. + LimitType *string `type:"string" enum:"UsageLimitLimitType"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period *string `type:"string" enum:"UsageLimitPeriod"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the usage limit. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s CreateUsageLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUsageLimitOutput) GoString() string { + return s.String() +} + +// SetAmount sets the Amount field's value. +func (s *CreateUsageLimitOutput) SetAmount(v int64) *CreateUsageLimitOutput { + s.Amount = &v + return s +} + +// SetBreachAction sets the BreachAction field's value. +func (s *CreateUsageLimitOutput) SetBreachAction(v string) *CreateUsageLimitOutput { + s.BreachAction = &v + return s +} + +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *CreateUsageLimitOutput) SetClusterIdentifier(v string) *CreateUsageLimitOutput { + s.ClusterIdentifier = &v + return s +} + +// SetFeatureType sets the FeatureType field's value. +func (s *CreateUsageLimitOutput) SetFeatureType(v string) *CreateUsageLimitOutput { + s.FeatureType = &v + return s +} + +// SetLimitType sets the LimitType field's value. +func (s *CreateUsageLimitOutput) SetLimitType(v string) *CreateUsageLimitOutput { + s.LimitType = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *CreateUsageLimitOutput) SetPeriod(v string) *CreateUsageLimitOutput { + s.Period = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateUsageLimitOutput) SetTags(v []*Tag) *CreateUsageLimitOutput { + s.Tags = v + return s +} + +// SetUsageLimitId sets the UsageLimitId field's value. +func (s *CreateUsageLimitOutput) SetUsageLimitId(v string) *CreateUsageLimitOutput { + s.UsageLimitId = &v + return s +} + // Describes the status of a cluster while it is in the process of resizing // with an incremental resize. type DataTransferProgress struct { @@ -14238,6 +14870,58 @@ func (s DeleteTagsOutput) GoString() string { return s.String() } +type DeleteUsageLimitInput struct { + _ struct{} `type:"structure"` + + // The identifier of the usage limit to delete. + // + // UsageLimitId is a required field + UsageLimitId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUsageLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUsageLimitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUsageLimitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUsageLimitInput"} + if s.UsageLimitId == nil { + invalidParams.Add(request.NewErrParamRequired("UsageLimitId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUsageLimitId sets the UsageLimitId field's value. +func (s *DeleteUsageLimitInput) SetUsageLimitId(v string) *DeleteUsageLimitInput { + s.UsageLimitId = &v + return s +} + +type DeleteUsageLimitOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUsageLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUsageLimitOutput) GoString() string { + return s.String() +} + type DescribeAccountAttributesInput struct { _ struct{} `type:"structure"` @@ -16638,8 +17322,7 @@ type DescribeResizeOutput struct { // The type of encryption for the cluster after the resize is complete. // - // Possible values are KMS and None. In the China region possible values are: - // Legacy and None. + // Possible values are KMS and None. TargetEncryptionType *string `type:"string"` // The node type that the cluster will have after the resize operation is complete. @@ -17347,62 +18030,196 @@ type DescribeTagsInput struct { // with them. TagKeys []*string `locationNameList:"TagKey" type:"list"` - // A tag value or values for which you want to return all matching resources - // that are associated with the specified value or values. For example, suppose - // that you have resources tagged with values called admin and test. If you - // specify both of these tag values in the request, Amazon Redshift returns - // a response with all resources that have either or both of these tag values - // associated with them. + // A tag value or values for which you want to return all matching resources + // that are associated with the specified value or values. For example, suppose + // that you have resources tagged with values called admin and test. If you + // specify both of these tag values in the request, Amazon Redshift returns + // a response with all resources that have either or both of these tag values + // associated with them. + TagValues []*string `locationNameList:"TagValue" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsInput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeTagsInput) SetMarker(v string) *DescribeTagsInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeTagsInput) SetMaxRecords(v int64) *DescribeTagsInput { + s.MaxRecords = &v + return s +} + +// SetResourceName sets the ResourceName field's value. +func (s *DescribeTagsInput) SetResourceName(v string) *DescribeTagsInput { + s.ResourceName = &v + return s +} + +// SetResourceType sets the ResourceType field's value. +func (s *DescribeTagsInput) SetResourceType(v string) *DescribeTagsInput { + s.ResourceType = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *DescribeTagsInput) SetTagKeys(v []*string) *DescribeTagsInput { + s.TagKeys = v + return s +} + +// SetTagValues sets the TagValues field's value. +func (s *DescribeTagsInput) SetTagValues(v []*string) *DescribeTagsInput { + s.TagValues = v + return s +} + +type DescribeTagsOutput struct { + _ struct{} `type:"structure"` + + // A value that indicates the starting point for the next set of response records + // in a subsequent request. If a value is returned in a response, you can retrieve + // the next set of records by providing this returned marker value in the Marker + // parameter and retrying the command. If the Marker field is empty, all response + // records have been retrieved for the request. + Marker *string `type:"string"` + + // A list of tags with their associated resources. + TaggedResources []*TaggedResource `locationNameList:"TaggedResource" type:"list"` +} + +// String returns the string representation +func (s DescribeTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTagsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *DescribeTagsOutput) SetMarker(v string) *DescribeTagsOutput { + s.Marker = &v + return s +} + +// SetTaggedResources sets the TaggedResources field's value. +func (s *DescribeTagsOutput) SetTaggedResources(v []*TaggedResource) *DescribeTagsOutput { + s.TaggedResources = v + return s +} + +type DescribeUsageLimitsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the cluster for which you want to describe usage limits. + ClusterIdentifier *string `type:"string"` + + // The feature type for which you want to describe usage limits. + FeatureType *string `type:"string" enum:"UsageLimitFeatureType"` + + // An optional parameter that specifies the starting point to return a set of + // response records. When the results of a DescribeUsageLimits request exceed + // the value specified in MaxRecords, AWS returns a value in the Marker field + // of the response. You can retrieve the next set of response records by providing + // the returned marker value in the Marker parameter and retrying the request. + Marker *string `type:"string"` + + // The maximum number of response records to return in each call. If the number + // of remaining response records exceeds the specified MaxRecords value, a value + // is returned in a marker field of the response. You can retrieve the next + // set of records by retrying the command with the returned marker value. + // + // Default: 100 + // + // Constraints: minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` + + // A tag key or keys for which you want to return all matching usage limit objects + // that are associated with the specified key or keys. For example, suppose + // that you have parameter groups that are tagged with keys called owner and + // environment. If you specify both of these tag keys in the request, Amazon + // Redshift returns a response with the usage limit objects have either or both + // of these tag keys associated with them. + TagKeys []*string `locationNameList:"TagKey" type:"list"` + + // A tag value or values for which you want to return all matching usage limit + // objects that are associated with the specified tag value or values. For example, + // suppose that you have parameter groups that are tagged with values called + // admin and test. If you specify both of these tag values in the request, Amazon + // Redshift returns a response with the usage limit objects that have either + // or both of these tag values associated with them. TagValues []*string `locationNameList:"TagValue" type:"list"` + + // The identifier of the usage limit to describe. + UsageLimitId *string `type:"string"` } // String returns the string representation -func (s DescribeTagsInput) String() string { +func (s DescribeUsageLimitsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTagsInput) GoString() string { +func (s DescribeUsageLimitsInput) GoString() string { return s.String() } -// SetMarker sets the Marker field's value. -func (s *DescribeTagsInput) SetMarker(v string) *DescribeTagsInput { - s.Marker = &v +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *DescribeUsageLimitsInput) SetClusterIdentifier(v string) *DescribeUsageLimitsInput { + s.ClusterIdentifier = &v return s } -// SetMaxRecords sets the MaxRecords field's value. -func (s *DescribeTagsInput) SetMaxRecords(v int64) *DescribeTagsInput { - s.MaxRecords = &v +// SetFeatureType sets the FeatureType field's value. +func (s *DescribeUsageLimitsInput) SetFeatureType(v string) *DescribeUsageLimitsInput { + s.FeatureType = &v return s } -// SetResourceName sets the ResourceName field's value. -func (s *DescribeTagsInput) SetResourceName(v string) *DescribeTagsInput { - s.ResourceName = &v +// SetMarker sets the Marker field's value. +func (s *DescribeUsageLimitsInput) SetMarker(v string) *DescribeUsageLimitsInput { + s.Marker = &v return s } -// SetResourceType sets the ResourceType field's value. -func (s *DescribeTagsInput) SetResourceType(v string) *DescribeTagsInput { - s.ResourceType = &v +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeUsageLimitsInput) SetMaxRecords(v int64) *DescribeUsageLimitsInput { + s.MaxRecords = &v return s } // SetTagKeys sets the TagKeys field's value. -func (s *DescribeTagsInput) SetTagKeys(v []*string) *DescribeTagsInput { +func (s *DescribeUsageLimitsInput) SetTagKeys(v []*string) *DescribeUsageLimitsInput { s.TagKeys = v return s } // SetTagValues sets the TagValues field's value. -func (s *DescribeTagsInput) SetTagValues(v []*string) *DescribeTagsInput { +func (s *DescribeUsageLimitsInput) SetTagValues(v []*string) *DescribeUsageLimitsInput { s.TagValues = v return s } -type DescribeTagsOutput struct { +// SetUsageLimitId sets the UsageLimitId field's value. +func (s *DescribeUsageLimitsInput) SetUsageLimitId(v string) *DescribeUsageLimitsInput { + s.UsageLimitId = &v + return s +} + +type DescribeUsageLimitsOutput struct { _ struct{} `type:"structure"` // A value that indicates the starting point for the next set of response records @@ -17412,29 +18229,29 @@ type DescribeTagsOutput struct { // records have been retrieved for the request. Marker *string `type:"string"` - // A list of tags with their associated resources. - TaggedResources []*TaggedResource `locationNameList:"TaggedResource" type:"list"` + // Contains the output from the DescribeUsageLimits action. + UsageLimits []*UsageLimit `type:"list"` } // String returns the string representation -func (s DescribeTagsOutput) String() string { +func (s DescribeUsageLimitsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTagsOutput) GoString() string { +func (s DescribeUsageLimitsOutput) GoString() string { return s.String() } // SetMarker sets the Marker field's value. -func (s *DescribeTagsOutput) SetMarker(v string) *DescribeTagsOutput { +func (s *DescribeUsageLimitsOutput) SetMarker(v string) *DescribeUsageLimitsOutput { s.Marker = &v return s } -// SetTaggedResources sets the TaggedResources field's value. -func (s *DescribeTagsOutput) SetTaggedResources(v []*TaggedResource) *DescribeTagsOutput { - s.TaggedResources = v +// SetUsageLimits sets the UsageLimits field's value. +func (s *DescribeUsageLimitsOutput) SetUsageLimits(v []*UsageLimit) *DescribeUsageLimitsOutput { + s.UsageLimits = v return s } @@ -19017,8 +19834,7 @@ type ModifyClusterInput struct { // Indicates whether the cluster is encrypted. If the value is encrypted (true) // and you provide a value for the KmsKeyId parameter, we encrypt the cluster // with the provided KmsKeyId. If you don't provide a KmsKeyId, we encrypt with - // the default key. In the China region we use legacy encryption if you specify - // that the cluster is encrypted. + // the default key. // // If the value is not encrypted (false), then the cluster is decrypted. Encrypted *bool `type:"boolean"` @@ -19112,7 +19928,7 @@ type ModifyClusterInput struct { // in the Amazon Redshift Cluster Management Guide. // // Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large - // | dc2.8xlarge | ra3.16xlarge + // | dc2.8xlarge | ra3.4xlarge | ra3.16xlarge NodeType *string `type:"string"` // The new number of nodes of the cluster. If you specify a new number of nodes, @@ -19439,6 +20255,7 @@ func (s *ModifyClusterOutput) SetCluster(v *Cluster) *ModifyClusterOutput { return s } +// Describes a modify cluster parameter group operation. type ModifyClusterParameterGroupInput struct { _ struct{} `type:"structure"` @@ -20348,6 +21165,160 @@ func (s *ModifySnapshotScheduleOutput) SetTags(v []*Tag) *ModifySnapshotSchedule return s } +type ModifyUsageLimitInput struct { + _ struct{} `type:"structure"` + + // The new limit amount. For more information about this parameter, see UsageLimit. + Amount *int64 `type:"long"` + + // The new action that Amazon Redshift takes when the limit is reached. For + // more information about this parameter, see UsageLimit. + BreachAction *string `type:"string" enum:"UsageLimitBreachAction"` + + // The identifier of the usage limit to modify. + // + // UsageLimitId is a required field + UsageLimitId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ModifyUsageLimitInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyUsageLimitInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyUsageLimitInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyUsageLimitInput"} + if s.UsageLimitId == nil { + invalidParams.Add(request.NewErrParamRequired("UsageLimitId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAmount sets the Amount field's value. +func (s *ModifyUsageLimitInput) SetAmount(v int64) *ModifyUsageLimitInput { + s.Amount = &v + return s +} + +// SetBreachAction sets the BreachAction field's value. +func (s *ModifyUsageLimitInput) SetBreachAction(v string) *ModifyUsageLimitInput { + s.BreachAction = &v + return s +} + +// SetUsageLimitId sets the UsageLimitId field's value. +func (s *ModifyUsageLimitInput) SetUsageLimitId(v string) *ModifyUsageLimitInput { + s.UsageLimitId = &v + return s +} + +// Describes a usage limit object for a cluster. +type ModifyUsageLimitOutput struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). + Amount *int64 `type:"long"` + + // The action that Amazon Redshift takes when the limit is reached. Possible + // values are: + // + // * log - To log an event in a system table. The default is log. + // + // * emit-metric - To emit CloudWatch metrics. + // + // * disable - To disable the feature until the next usage period begins. + BreachAction *string `type:"string" enum:"UsageLimitBreachAction"` + + // The identifier of the cluster with a usage limit. + ClusterIdentifier *string `type:"string"` + + // The Amazon Redshift feature to which the limit applies. + FeatureType *string `type:"string" enum:"UsageLimitFeatureType"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. + LimitType *string `type:"string" enum:"UsageLimitLimitType"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period *string `type:"string" enum:"UsageLimitPeriod"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the usage limit. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s ModifyUsageLimitOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyUsageLimitOutput) GoString() string { + return s.String() +} + +// SetAmount sets the Amount field's value. +func (s *ModifyUsageLimitOutput) SetAmount(v int64) *ModifyUsageLimitOutput { + s.Amount = &v + return s +} + +// SetBreachAction sets the BreachAction field's value. +func (s *ModifyUsageLimitOutput) SetBreachAction(v string) *ModifyUsageLimitOutput { + s.BreachAction = &v + return s +} + +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *ModifyUsageLimitOutput) SetClusterIdentifier(v string) *ModifyUsageLimitOutput { + s.ClusterIdentifier = &v + return s +} + +// SetFeatureType sets the FeatureType field's value. +func (s *ModifyUsageLimitOutput) SetFeatureType(v string) *ModifyUsageLimitOutput { + s.FeatureType = &v + return s +} + +// SetLimitType sets the LimitType field's value. +func (s *ModifyUsageLimitOutput) SetLimitType(v string) *ModifyUsageLimitOutput { + s.LimitType = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *ModifyUsageLimitOutput) SetPeriod(v string) *ModifyUsageLimitOutput { + s.Period = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ModifyUsageLimitOutput) SetTags(v []*Tag) *ModifyUsageLimitOutput { + s.Tags = v + return s +} + +// SetUsageLimitId sets the UsageLimitId field's value. +func (s *ModifyUsageLimitOutput) SetUsageLimitId(v string) *ModifyUsageLimitOutput { + s.UsageLimitId = &v + return s +} + // A list of node configurations. type NodeConfigurationOption struct { _ struct{} `type:"structure"` @@ -20600,6 +21571,8 @@ func (s *Parameter) SetSource(v string) *Parameter { return s } +// Describes a pause cluster operation. For example, a scheduled action to run +// the PauseCluster API operation. type PauseClusterInput struct { _ struct{} `type:"structure"` @@ -20638,6 +21611,8 @@ func (s *PauseClusterInput) SetClusterIdentifier(v string) *PauseClusterInput { return s } +// Describes a pause cluster operation. For example, a scheduled action to run +// the PauseCluster API operation. type PauseClusterMessage struct { _ struct{} `type:"structure"` @@ -20716,8 +21691,7 @@ type PendingModifiedValues struct { // The pending or in-progress change of the service version. ClusterVersion *string `type:"string"` - // The encryption type for a cluster. Possible values are: KMS and None. For - // the China region the possible values are None, and Legacy. + // The encryption type for a cluster. Possible values are: KMS and None. EncryptionType *string `type:"string"` // An option that specifies whether to create the cluster with enhanced VPC @@ -21304,6 +22278,8 @@ func (s *ResetClusterParameterGroupInput) SetResetAllParameters(v bool) *ResetCl return s } +// Describes a resize cluster operation. For example, a scheduled action to +// run the ResizeCluster API operation. type ResizeClusterInput struct { _ struct{} `type:"structure"` @@ -21324,7 +22300,8 @@ type ResizeClusterInput struct { // current node type is used. NodeType *string `type:"string"` - // The new number of nodes for the cluster. + // The new number of nodes for the cluster. If not specified, the cluster's + // current number of nodes is used. NumberOfNodes *int64 `type:"integer"` } @@ -21381,6 +22358,8 @@ func (s *ResizeClusterInput) SetNumberOfNodes(v int64) *ResizeClusterInput { return s } +// Describes a resize cluster operation. For example, a scheduled action to +// run the ResizeCluster API operation. type ResizeClusterMessage struct { _ struct{} `type:"structure"` @@ -21401,7 +22380,8 @@ type ResizeClusterMessage struct { // current node type is used. NodeType *string `type:"string"` - // The new number of nodes for the cluster. + // The new number of nodes for the cluster. If not specified, the cluster's + // current number of nodes is used. NumberOfNodes *int64 `type:"integer"` } @@ -21647,7 +22627,7 @@ type RestoreFromClusterSnapshotInput struct { // If you have a DC instance type, you must restore into that same instance // type and size. In other words, you can only restore a dc1.large instance // type into another dc1.large instance type or dc2.large instance type. You - // can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlareg + // can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge // cluster, then resize to a dc2.8large cluster. For more information about // node types, see About Clusters and Nodes (https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-about-clusters-and-nodes) // in the Amazon Redshift Cluster Management Guide. @@ -22141,6 +23121,8 @@ func (s *RestoreTableFromClusterSnapshotOutput) SetTableRestoreStatus(v *TableRe return s } +// Describes a resume cluster operation. For example, a scheduled action to +// run the ResumeCluster API operation. type ResumeClusterInput struct { _ struct{} `type:"structure"` @@ -22179,6 +23161,8 @@ func (s *ResumeClusterInput) SetClusterIdentifier(v string) *ResumeClusterInput return s } +// Describes a resume cluster operation. For example, a scheduled action to +// run the ResumeCluster API operation. type ResumeClusterMessage struct { _ struct{} `type:"structure"` @@ -23717,6 +24701,103 @@ func (s *UpdateTarget) SetSupportedOperations(v []*SupportedOperation) *UpdateTa return s } +// Describes a usage limit object for a cluster. +type UsageLimit struct { + _ struct{} `type:"structure"` + + // The limit amount. If time-based, this amount is in minutes. If data-based, + // this amount is in terabytes (TB). + Amount *int64 `type:"long"` + + // The action that Amazon Redshift takes when the limit is reached. Possible + // values are: + // + // * log - To log an event in a system table. The default is log. + // + // * emit-metric - To emit CloudWatch metrics. + // + // * disable - To disable the feature until the next usage period begins. + BreachAction *string `type:"string" enum:"UsageLimitBreachAction"` + + // The identifier of the cluster with a usage limit. + ClusterIdentifier *string `type:"string"` + + // The Amazon Redshift feature to which the limit applies. + FeatureType *string `type:"string" enum:"UsageLimitFeatureType"` + + // The type of limit. Depending on the feature type, this can be based on a + // time duration or data size. + LimitType *string `type:"string" enum:"UsageLimitLimitType"` + + // The time period that the amount applies to. A weekly period begins on Sunday. + // The default is monthly. + Period *string `type:"string" enum:"UsageLimitPeriod"` + + // A list of tag instances. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // The identifier of the usage limit. + UsageLimitId *string `type:"string"` +} + +// String returns the string representation +func (s UsageLimit) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UsageLimit) GoString() string { + return s.String() +} + +// SetAmount sets the Amount field's value. +func (s *UsageLimit) SetAmount(v int64) *UsageLimit { + s.Amount = &v + return s +} + +// SetBreachAction sets the BreachAction field's value. +func (s *UsageLimit) SetBreachAction(v string) *UsageLimit { + s.BreachAction = &v + return s +} + +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *UsageLimit) SetClusterIdentifier(v string) *UsageLimit { + s.ClusterIdentifier = &v + return s +} + +// SetFeatureType sets the FeatureType field's value. +func (s *UsageLimit) SetFeatureType(v string) *UsageLimit { + s.FeatureType = &v + return s +} + +// SetLimitType sets the LimitType field's value. +func (s *UsageLimit) SetLimitType(v string) *UsageLimit { + s.LimitType = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *UsageLimit) SetPeriod(v string) *UsageLimit { + s.Period = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *UsageLimit) SetTags(v []*Tag) *UsageLimit { + s.Tags = v + return s +} + +// SetUsageLimitId sets the UsageLimitId field's value. +func (s *UsageLimit) SetUsageLimitId(v string) *UsageLimit { + s.UsageLimitId = &v + return s +} + // Describes the members of a VPC security group. type VpcSecurityGroupMembership struct { _ struct{} `type:"structure"` @@ -23761,6 +24842,15 @@ const ( ActionTypeResizeCluster = "resize-cluster" ) +// ActionType_Values returns all elements of the ActionType enum +func ActionType_Values() []string { + return []string{ + ActionTypeRestoreCluster, + ActionTypeRecommendNodeConfig, + ActionTypeResizeCluster, + } +} + const ( // ModeStandard is a Mode enum value ModeStandard = "standard" @@ -23769,6 +24859,14 @@ const ( ModeHighPerformance = "high-performance" ) +// Mode_Values returns all elements of the Mode enum +func Mode_Values() []string { + return []string{ + ModeStandard, + ModeHighPerformance, + } +} + const ( // NodeConfigurationOptionsFilterNameNodeType is a NodeConfigurationOptionsFilterName enum value NodeConfigurationOptionsFilterNameNodeType = "NodeType" @@ -23783,6 +24881,16 @@ const ( NodeConfigurationOptionsFilterNameMode = "Mode" ) +// NodeConfigurationOptionsFilterName_Values returns all elements of the NodeConfigurationOptionsFilterName enum +func NodeConfigurationOptionsFilterName_Values() []string { + return []string{ + NodeConfigurationOptionsFilterNameNodeType, + NodeConfigurationOptionsFilterNameNumberOfNodes, + NodeConfigurationOptionsFilterNameEstimatedDiskUtilizationPercent, + NodeConfigurationOptionsFilterNameMode, + } +} + const ( // OperatorTypeEq is a OperatorType enum value OperatorTypeEq = "eq" @@ -23806,6 +24914,19 @@ const ( OperatorTypeBetween = "between" ) +// OperatorType_Values returns all elements of the OperatorType enum +func OperatorType_Values() []string { + return []string{ + OperatorTypeEq, + OperatorTypeLt, + OperatorTypeGt, + OperatorTypeLe, + OperatorTypeGe, + OperatorTypeIn, + OperatorTypeBetween, + } +} + const ( // ParameterApplyTypeStatic is a ParameterApplyType enum value ParameterApplyTypeStatic = "static" @@ -23814,6 +24935,14 @@ const ( ParameterApplyTypeDynamic = "dynamic" ) +// ParameterApplyType_Values returns all elements of the ParameterApplyType enum +func ParameterApplyType_Values() []string { + return []string{ + ParameterApplyTypeStatic, + ParameterApplyTypeDynamic, + } +} + const ( // ReservedNodeOfferingTypeRegular is a ReservedNodeOfferingType enum value ReservedNodeOfferingTypeRegular = "Regular" @@ -23822,6 +24951,14 @@ const ( ReservedNodeOfferingTypeUpgradable = "Upgradable" ) +// ReservedNodeOfferingType_Values returns all elements of the ReservedNodeOfferingType enum +func ReservedNodeOfferingType_Values() []string { + return []string{ + ReservedNodeOfferingTypeRegular, + ReservedNodeOfferingTypeUpgradable, + } +} + const ( // ScheduleStateModifying is a ScheduleState enum value ScheduleStateModifying = "MODIFYING" @@ -23833,6 +24970,15 @@ const ( ScheduleStateFailed = "FAILED" ) +// ScheduleState_Values returns all elements of the ScheduleState enum +func ScheduleState_Values() []string { + return []string{ + ScheduleStateModifying, + ScheduleStateActive, + ScheduleStateFailed, + } +} + const ( // ScheduledActionFilterNameClusterIdentifier is a ScheduledActionFilterName enum value ScheduledActionFilterNameClusterIdentifier = "cluster-identifier" @@ -23841,6 +24987,14 @@ const ( ScheduledActionFilterNameIamRole = "iam-role" ) +// ScheduledActionFilterName_Values returns all elements of the ScheduledActionFilterName enum +func ScheduledActionFilterName_Values() []string { + return []string{ + ScheduledActionFilterNameClusterIdentifier, + ScheduledActionFilterNameIamRole, + } +} + const ( // ScheduledActionStateActive is a ScheduledActionState enum value ScheduledActionStateActive = "ACTIVE" @@ -23849,6 +25003,14 @@ const ( ScheduledActionStateDisabled = "DISABLED" ) +// ScheduledActionState_Values returns all elements of the ScheduledActionState enum +func ScheduledActionState_Values() []string { + return []string{ + ScheduledActionStateActive, + ScheduledActionStateDisabled, + } +} + const ( // ScheduledActionTypeValuesResizeCluster is a ScheduledActionTypeValues enum value ScheduledActionTypeValuesResizeCluster = "ResizeCluster" @@ -23860,6 +25022,15 @@ const ( ScheduledActionTypeValuesResumeCluster = "ResumeCluster" ) +// ScheduledActionTypeValues_Values returns all elements of the ScheduledActionTypeValues enum +func ScheduledActionTypeValues_Values() []string { + return []string{ + ScheduledActionTypeValuesResizeCluster, + ScheduledActionTypeValuesPauseCluster, + ScheduledActionTypeValuesResumeCluster, + } +} + const ( // SnapshotAttributeToSortBySourceType is a SnapshotAttributeToSortBy enum value SnapshotAttributeToSortBySourceType = "SOURCE_TYPE" @@ -23871,6 +25042,15 @@ const ( SnapshotAttributeToSortByCreateTime = "CREATE_TIME" ) +// SnapshotAttributeToSortBy_Values returns all elements of the SnapshotAttributeToSortBy enum +func SnapshotAttributeToSortBy_Values() []string { + return []string{ + SnapshotAttributeToSortBySourceType, + SnapshotAttributeToSortByTotalSize, + SnapshotAttributeToSortByCreateTime, + } +} + const ( // SortByOrderAsc is a SortByOrder enum value SortByOrderAsc = "ASC" @@ -23879,6 +25059,14 @@ const ( SortByOrderDesc = "DESC" ) +// SortByOrder_Values returns all elements of the SortByOrder enum +func SortByOrder_Values() []string { + return []string{ + SortByOrderAsc, + SortByOrderDesc, + } +} + const ( // SourceTypeCluster is a SourceType enum value SourceTypeCluster = "cluster" @@ -23896,6 +25084,17 @@ const ( SourceTypeScheduledAction = "scheduled-action" ) +// SourceType_Values returns all elements of the SourceType enum +func SourceType_Values() []string { + return []string{ + SourceTypeCluster, + SourceTypeClusterParameterGroup, + SourceTypeClusterSecurityGroup, + SourceTypeClusterSnapshot, + SourceTypeScheduledAction, + } +} + const ( // TableRestoreStatusTypePending is a TableRestoreStatusType enum value TableRestoreStatusTypePending = "PENDING" @@ -23912,3 +25111,86 @@ const ( // TableRestoreStatusTypeCanceled is a TableRestoreStatusType enum value TableRestoreStatusTypeCanceled = "CANCELED" ) + +// TableRestoreStatusType_Values returns all elements of the TableRestoreStatusType enum +func TableRestoreStatusType_Values() []string { + return []string{ + TableRestoreStatusTypePending, + TableRestoreStatusTypeInProgress, + TableRestoreStatusTypeSucceeded, + TableRestoreStatusTypeFailed, + TableRestoreStatusTypeCanceled, + } +} + +const ( + // UsageLimitBreachActionLog is a UsageLimitBreachAction enum value + UsageLimitBreachActionLog = "log" + + // UsageLimitBreachActionEmitMetric is a UsageLimitBreachAction enum value + UsageLimitBreachActionEmitMetric = "emit-metric" + + // UsageLimitBreachActionDisable is a UsageLimitBreachAction enum value + UsageLimitBreachActionDisable = "disable" +) + +// UsageLimitBreachAction_Values returns all elements of the UsageLimitBreachAction enum +func UsageLimitBreachAction_Values() []string { + return []string{ + UsageLimitBreachActionLog, + UsageLimitBreachActionEmitMetric, + UsageLimitBreachActionDisable, + } +} + +const ( + // UsageLimitFeatureTypeSpectrum is a UsageLimitFeatureType enum value + UsageLimitFeatureTypeSpectrum = "spectrum" + + // UsageLimitFeatureTypeConcurrencyScaling is a UsageLimitFeatureType enum value + UsageLimitFeatureTypeConcurrencyScaling = "concurrency-scaling" +) + +// UsageLimitFeatureType_Values returns all elements of the UsageLimitFeatureType enum +func UsageLimitFeatureType_Values() []string { + return []string{ + UsageLimitFeatureTypeSpectrum, + UsageLimitFeatureTypeConcurrencyScaling, + } +} + +const ( + // UsageLimitLimitTypeTime is a UsageLimitLimitType enum value + UsageLimitLimitTypeTime = "time" + + // UsageLimitLimitTypeDataScanned is a UsageLimitLimitType enum value + UsageLimitLimitTypeDataScanned = "data-scanned" +) + +// UsageLimitLimitType_Values returns all elements of the UsageLimitLimitType enum +func UsageLimitLimitType_Values() []string { + return []string{ + UsageLimitLimitTypeTime, + UsageLimitLimitTypeDataScanned, + } +} + +const ( + // UsageLimitPeriodDaily is a UsageLimitPeriod enum value + UsageLimitPeriodDaily = "daily" + + // UsageLimitPeriodWeekly is a UsageLimitPeriod enum value + UsageLimitPeriodWeekly = "weekly" + + // UsageLimitPeriodMonthly is a UsageLimitPeriod enum value + UsageLimitPeriodMonthly = "monthly" +) + +// UsageLimitPeriod_Values returns all elements of the UsageLimitPeriod enum +func UsageLimitPeriod_Values() []string { + return []string{ + UsageLimitPeriodDaily, + UsageLimitPeriodWeekly, + UsageLimitPeriodMonthly, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go index f710284ae..ada8b21cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/errors.go @@ -421,6 +421,12 @@ const ( // The tag is invalid. ErrCodeInvalidTagFault = "InvalidTagFault" + // ErrCodeInvalidUsageLimitFault for service response error code + // "InvalidUsageLimit". + // + // The usage limit is not valid. + ErrCodeInvalidUsageLimitFault = "InvalidUsageLimit" + // ErrCodeInvalidVPCNetworkStateFault for service response error code // "InvalidVPCNetworkStateFault". // @@ -695,4 +701,16 @@ const ( // // A request option was specified that is not supported. ErrCodeUnsupportedOptionFault = "UnsupportedOptionFault" + + // ErrCodeUsageLimitAlreadyExistsFault for service response error code + // "UsageLimitAlreadyExists". + // + // The usage limit already exists. + ErrCodeUsageLimitAlreadyExistsFault = "UsageLimitAlreadyExists" + + // ErrCodeUsageLimitNotFoundFault for service response error code + // "UsageLimitNotFound". + // + // The usage limit identifier can't be found. + ErrCodeUsageLimitNotFoundFault = "UsageLimitNotFound" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go index 22e67bb10..c0dedf447 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go index 4fa97b4d5..e64fa05bd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/api.go @@ -55,7 +55,8 @@ func (c *ResourceGroups) CreateGroupRequest(input *CreateGroupInput) (req *reque // CreateGroup API operation for AWS Resource Groups. // -// Creates a group with a specified name, description, and resource query. +// Creates a resource group with the specified name and description. You can +// optionally include a resource query, or a service configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -66,20 +67,20 @@ func (c *ResourceGroups) CreateGroupRequest(input *CreateGroupInput) (req *reque // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/CreateGroup func (c *ResourceGroups) CreateGroup(input *CreateGroupInput) (*CreateGroupOutput, error) { @@ -132,8 +133,8 @@ const opDeleteGroup = "DeleteGroup" func (c *ResourceGroups) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { op := &request.Operation{ Name: opDeleteGroup, - HTTPMethod: "DELETE", - HTTPPath: "/groups/{GroupName}", + HTTPMethod: "POST", + HTTPPath: "/delete-group", } if input == nil { @@ -147,8 +148,9 @@ func (c *ResourceGroups) DeleteGroupRequest(input *DeleteGroupInput) (req *reque // DeleteGroup API operation for AWS Resource Groups. // -// Deletes a specified resource group. Deleting a resource group does not delete -// resources that are members of the group; it only deletes the group structure. +// Deletes the specified resource group. Deleting a resource group does not +// delete any resources that are members of the group; it only deletes the group +// structure. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -159,23 +161,23 @@ func (c *ResourceGroups) DeleteGroupRequest(input *DeleteGroupInput) (req *reque // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/DeleteGroup func (c *ResourceGroups) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { @@ -228,8 +230,8 @@ const opGetGroup = "GetGroup" func (c *ResourceGroups) GetGroupRequest(input *GetGroupInput) (req *request.Request, output *GetGroupOutput) { op := &request.Operation{ Name: opGetGroup, - HTTPMethod: "GET", - HTTPPath: "/groups/{GroupName}", + HTTPMethod: "POST", + HTTPPath: "/get-group", } if input == nil { @@ -254,23 +256,23 @@ func (c *ResourceGroups) GetGroupRequest(input *GetGroupInput) (req *request.Req // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetGroup func (c *ResourceGroups) GetGroup(input *GetGroupInput) (*GetGroupOutput, error) { @@ -294,6 +296,108 @@ func (c *ResourceGroups) GetGroupWithContext(ctx aws.Context, input *GetGroupInp return out, req.Send() } +const opGetGroupConfiguration = "GetGroupConfiguration" + +// GetGroupConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGroupConfiguration for more information on using the GetGroupConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGroupConfigurationRequest method. +// req, resp := client.GetGroupConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetGroupConfiguration +func (c *ResourceGroups) GetGroupConfigurationRequest(input *GetGroupConfigurationInput) (req *request.Request, output *GetGroupConfigurationOutput) { + op := &request.Operation{ + Name: opGetGroupConfiguration, + HTTPMethod: "POST", + HTTPPath: "/get-group-configuration", + } + + if input == nil { + input = &GetGroupConfigurationInput{} + } + + output = &GetGroupConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGroupConfiguration API operation for AWS Resource Groups. +// +// Returns the service configuration associated with the specified resource +// group. AWS Resource Groups supports configurations for the following resource +// group types: +// +// * AWS::EC2::CapacityReservationPool - Amazon EC2 capacity reservation +// pools. For more information, see Working with capacity reservation groups +// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) +// in the EC2 Users Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Resource Groups's +// API operation GetGroupConfiguration for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request includes one or more parameters that violate validation rules. +// +// * ForbiddenException +// The caller isn't authorized to make the request. Check permissions. +// +// * NotFoundException +// One or more of the specified resources don't exist. +// +// * MethodNotAllowedException +// The request uses an HTTP method that isn't allowed for the specified resource. +// +// * TooManyRequestsException +// You've exceeded throttling limits by making too many requests in a period +// of time. +// +// * InternalServerErrorException +// An internal error occurred while processing the request. Try again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetGroupConfiguration +func (c *ResourceGroups) GetGroupConfiguration(input *GetGroupConfigurationInput) (*GetGroupConfigurationOutput, error) { + req, out := c.GetGroupConfigurationRequest(input) + return out, req.Send() +} + +// GetGroupConfigurationWithContext is the same as GetGroupConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetGroupConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ResourceGroups) GetGroupConfigurationWithContext(ctx aws.Context, input *GetGroupConfigurationInput, opts ...request.Option) (*GetGroupConfigurationOutput, error) { + req, out := c.GetGroupConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetGroupQuery = "GetGroupQuery" // GetGroupQueryRequest generates a "aws/request.Request" representing the @@ -323,8 +427,8 @@ const opGetGroupQuery = "GetGroupQuery" func (c *ResourceGroups) GetGroupQueryRequest(input *GetGroupQueryInput) (req *request.Request, output *GetGroupQueryOutput) { op := &request.Operation{ Name: opGetGroupQuery, - HTTPMethod: "GET", - HTTPPath: "/groups/{GroupName}/query", + HTTPMethod: "POST", + HTTPPath: "/get-group-query", } if input == nil { @@ -338,7 +442,7 @@ func (c *ResourceGroups) GetGroupQueryRequest(input *GetGroupQueryInput) (req *r // GetGroupQuery API operation for AWS Resource Groups. // -// Returns the resource query associated with the specified resource group. +// Retrieves the resource query associated with the specified resource group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -349,23 +453,23 @@ func (c *ResourceGroups) GetGroupQueryRequest(input *GetGroupQueryInput) (req *r // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetGroupQuery func (c *ResourceGroups) GetGroupQuery(input *GetGroupQueryInput) (*GetGroupQueryOutput, error) { @@ -445,23 +549,23 @@ func (c *ResourceGroups) GetTagsRequest(input *GetTagsInput) (req *request.Reque // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GetTags func (c *ResourceGroups) GetTags(input *GetTagsInput) (*GetTagsOutput, error) { @@ -485,6 +589,101 @@ func (c *ResourceGroups) GetTagsWithContext(ctx aws.Context, input *GetTagsInput return out, req.Send() } +const opGroupResources = "GroupResources" + +// GroupResourcesRequest generates a "aws/request.Request" representing the +// client's request for the GroupResources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GroupResources for more information on using the GroupResources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GroupResourcesRequest method. +// req, resp := client.GroupResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GroupResources +func (c *ResourceGroups) GroupResourcesRequest(input *GroupResourcesInput) (req *request.Request, output *GroupResourcesOutput) { + op := &request.Operation{ + Name: opGroupResources, + HTTPMethod: "POST", + HTTPPath: "/group-resources", + } + + if input == nil { + input = &GroupResourcesInput{} + } + + output = &GroupResourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GroupResources API operation for AWS Resource Groups. +// +// Adds the specified resources to the specified group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Resource Groups's +// API operation GroupResources for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request includes one or more parameters that violate validation rules. +// +// * ForbiddenException +// The caller isn't authorized to make the request. Check permissions. +// +// * NotFoundException +// One or more of the specified resources don't exist. +// +// * MethodNotAllowedException +// The request uses an HTTP method that isn't allowed for the specified resource. +// +// * TooManyRequestsException +// You've exceeded throttling limits by making too many requests in a period +// of time. +// +// * InternalServerErrorException +// An internal error occurred while processing the request. Try again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/GroupResources +func (c *ResourceGroups) GroupResources(input *GroupResourcesInput) (*GroupResourcesOutput, error) { + req, out := c.GroupResourcesRequest(input) + return out, req.Send() +} + +// GroupResourcesWithContext is the same as GroupResources with the addition of +// the ability to pass a context and additional request options. +// +// See GroupResources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ResourceGroups) GroupResourcesWithContext(ctx aws.Context, input *GroupResourcesInput, opts ...request.Option) (*GroupResourcesOutput, error) { + req, out := c.GroupResourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListGroupResources = "ListGroupResources" // ListGroupResourcesRequest generates a "aws/request.Request" representing the @@ -515,7 +714,7 @@ func (c *ResourceGroups) ListGroupResourcesRequest(input *ListGroupResourcesInpu op := &request.Operation{ Name: opListGroupResources, HTTPMethod: "POST", - HTTPPath: "/groups/{GroupName}/resource-identifiers-list", + HTTPPath: "/list-group-resources", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, @@ -535,7 +734,7 @@ func (c *ResourceGroups) ListGroupResourcesRequest(input *ListGroupResourcesInpu // ListGroupResources API operation for AWS Resource Groups. // -// Returns a list of ARNs of resources that are members of a specified resource +// Returns a list of ARNs of the resources that are members of a specified resource // group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -547,27 +746,27 @@ func (c *ResourceGroups) ListGroupResourcesRequest(input *ListGroupResourcesInpu // // Returned Error Types: // * UnauthorizedException -// The request has not been applied because it lacks valid authentication credentials -// for the target resource. +// The request was rejected because it doesn't have valid credentials for the +// target resource. // // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/ListGroupResources func (c *ResourceGroups) ListGroupResources(input *ListGroupResourcesInput) (*ListGroupResourcesOutput, error) { @@ -704,20 +903,20 @@ func (c *ResourceGroups) ListGroupsRequest(input *ListGroupsInput) (req *request // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/ListGroups func (c *ResourceGroups) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { @@ -843,7 +1042,7 @@ func (c *ResourceGroups) SearchResourcesRequest(input *SearchResourcesInput) (re // SearchResources API operation for AWS Resource Groups. // -// Returns a list of AWS resource identifiers that matches a specified query. +// Returns a list of AWS resource identifiers that matches the specified query. // The query uses the same format as a resource query in a CreateGroup or UpdateGroupQuery // operation. // @@ -856,24 +1055,24 @@ func (c *ResourceGroups) SearchResourcesRequest(input *SearchResourcesInput) (re // // Returned Error Types: // * UnauthorizedException -// The request has not been applied because it lacks valid authentication credentials -// for the target resource. +// The request was rejected because it doesn't have valid credentials for the +// target resource. // // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/SearchResources func (c *ResourceGroups) SearchResources(input *SearchResourcesInput) (*SearchResourcesOutput, error) { @@ -996,6 +1195,11 @@ func (c *ResourceGroups) TagRequest(input *TagInput) (req *request.Request, outp // Adds tags to a resource group with the specified ARN. Existing tags on a // resource group are not changed if they are not specified in the request parameters. // +// Do not store personally identifiable information (PII) or other confidential +// or sensitive information in tags. We use tags to provide you with billing +// and administration services. Tags are not intended to be used for private +// or sensitive data. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1005,23 +1209,23 @@ func (c *ResourceGroups) TagRequest(input *TagInput) (req *request.Request, outp // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/Tag func (c *ResourceGroups) Tag(input *TagInput) (*TagOutput, error) { @@ -1045,6 +1249,101 @@ func (c *ResourceGroups) TagWithContext(ctx aws.Context, input *TagInput, opts . return out, req.Send() } +const opUngroupResources = "UngroupResources" + +// UngroupResourcesRequest generates a "aws/request.Request" representing the +// client's request for the UngroupResources operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UngroupResources for more information on using the UngroupResources +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UngroupResourcesRequest method. +// req, resp := client.UngroupResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/UngroupResources +func (c *ResourceGroups) UngroupResourcesRequest(input *UngroupResourcesInput) (req *request.Request, output *UngroupResourcesOutput) { + op := &request.Operation{ + Name: opUngroupResources, + HTTPMethod: "POST", + HTTPPath: "/ungroup-resources", + } + + if input == nil { + input = &UngroupResourcesInput{} + } + + output = &UngroupResourcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// UngroupResources API operation for AWS Resource Groups. +// +// Removes the specified resources from the specified group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Resource Groups's +// API operation UngroupResources for usage and error information. +// +// Returned Error Types: +// * BadRequestException +// The request includes one or more parameters that violate validation rules. +// +// * ForbiddenException +// The caller isn't authorized to make the request. Check permissions. +// +// * NotFoundException +// One or more of the specified resources don't exist. +// +// * MethodNotAllowedException +// The request uses an HTTP method that isn't allowed for the specified resource. +// +// * TooManyRequestsException +// You've exceeded throttling limits by making too many requests in a period +// of time. +// +// * InternalServerErrorException +// An internal error occurred while processing the request. Try again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/UngroupResources +func (c *ResourceGroups) UngroupResources(input *UngroupResourcesInput) (*UngroupResourcesOutput, error) { + req, out := c.UngroupResourcesRequest(input) + return out, req.Send() +} + +// UngroupResourcesWithContext is the same as UngroupResources with the addition of +// the ability to pass a context and additional request options. +// +// See UngroupResources for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ResourceGroups) UngroupResourcesWithContext(ctx aws.Context, input *UngroupResourcesInput, opts ...request.Option) (*UngroupResourcesOutput, error) { + req, out := c.UngroupResourcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUntag = "Untag" // UntagRequest generates a "aws/request.Request" representing the @@ -1089,7 +1388,7 @@ func (c *ResourceGroups) UntagRequest(input *UntagInput) (req *request.Request, // Untag API operation for AWS Resource Groups. // -// Deletes specified tags from a specified resource. +// Deletes tags from a specified resource group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1100,23 +1399,23 @@ func (c *ResourceGroups) UntagRequest(input *UntagInput) (req *request.Request, // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/Untag func (c *ResourceGroups) Untag(input *UntagInput) (*UntagOutput, error) { @@ -1169,8 +1468,8 @@ const opUpdateGroup = "UpdateGroup" func (c *ResourceGroups) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { op := &request.Operation{ Name: opUpdateGroup, - HTTPMethod: "PUT", - HTTPPath: "/groups/{GroupName}", + HTTPMethod: "POST", + HTTPPath: "/update-group", } if input == nil { @@ -1184,8 +1483,8 @@ func (c *ResourceGroups) UpdateGroupRequest(input *UpdateGroupInput) (req *reque // UpdateGroup API operation for AWS Resource Groups. // -// Updates an existing group with a new or changed description. You cannot update -// the name of a resource group. +// Updates the description for an existing group. You cannot update the name +// of a resource group. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1196,23 +1495,23 @@ func (c *ResourceGroups) UpdateGroupRequest(input *UpdateGroupInput) (req *reque // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/UpdateGroup func (c *ResourceGroups) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { @@ -1265,8 +1564,8 @@ const opUpdateGroupQuery = "UpdateGroupQuery" func (c *ResourceGroups) UpdateGroupQueryRequest(input *UpdateGroupQueryInput) (req *request.Request, output *UpdateGroupQueryOutput) { op := &request.Operation{ Name: opUpdateGroupQuery, - HTTPMethod: "PUT", - HTTPPath: "/groups/{GroupName}/query", + HTTPMethod: "POST", + HTTPPath: "/update-group-query", } if input == nil { @@ -1291,23 +1590,23 @@ func (c *ResourceGroups) UpdateGroupQueryRequest(input *UpdateGroupQueryInput) ( // // Returned Error Types: // * BadRequestException -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. // // * ForbiddenException -// The caller is not authorized to make the request. +// The caller isn't authorized to make the request. Check permissions. // // * NotFoundException -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. // // * MethodNotAllowedException -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. // // * TooManyRequestsException -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. // // * InternalServerErrorException -// An internal error occurred while processing the request. +// An internal error occurred while processing the request. Try again later. // // See also, https://docs.aws.amazon.com/goto/WebAPI/resource-groups-2017-11-27/UpdateGroupQuery func (c *ResourceGroups) UpdateGroupQuery(input *UpdateGroupQueryInput) (*UpdateGroupQueryOutput, error) { @@ -1331,11 +1630,10 @@ func (c *ResourceGroups) UpdateGroupQueryWithContext(ctx aws.Context, input *Upd return out, req.Send() } -// The request does not comply with validation rules that are defined for the -// request parameters. +// The request includes one or more parameters that violate validation rules. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -1352,17 +1650,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1370,37 +1668,44 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } type CreateGroupInput struct { _ struct{} `type:"structure"` - // The description of the resource group. Descriptions can have a maximum of - // 511 characters, including letters, numbers, hyphens, underscores, punctuation, - // and spaces. + // A configuration associates the resource group with an AWS service and specifies + // how the service can interact with the resources in the group. A configuration + // is an array of GroupConfigurationItem elements. + // + // You can specify either a Configuration or a ResourceQuery in a group, but + // not both. + Configuration []*GroupConfigurationItem `type:"list"` + + // The description of the resource group. Descriptions can consist of letters, + // numbers, hyphens, underscores, periods, and spaces. Description *string `type:"string"` // The name of the group, which is the identifier of the group in other operations. - // A resource group name cannot be updated after it is created. A resource group - // name can have a maximum of 128 characters, including letters, numbers, hyphens, - // dots, and underscores. The name cannot start with AWS or aws; these are reserved. - // A resource group name must be unique within your account. + // You can't change the name of a resource group after you create it. A resource + // group name can consist of letters, numbers, hyphens, periods, and underscores. + // The name cannot start with AWS or aws; these are reserved. A resource group + // name must be unique within each AWS Region in your AWS account. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -1408,12 +1713,10 @@ type CreateGroupInput struct { // The resource query that determines which AWS resources are members of this // group. // - // ResourceQuery is a required field - ResourceQuery *ResourceQuery `type:"structure" required:"true"` + // You can specify either a ResourceQuery or a Configuration, but not both. + ResourceQuery *ResourceQuery `type:"structure"` - // The tags to add to the group. A tag is a string-to-string map of key-value - // pairs. Tag keys can have a maximum character length of 128 characters, and - // tag values can have a maximum length of 256 characters. + // The tags to add to the group. A tag is key-value pair string. Tags map[string]*string `type:"map"` } @@ -1436,8 +1739,15 @@ func (s *CreateGroupInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } - if s.ResourceQuery == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceQuery")) + if s.Configuration != nil { + for i, v := range s.Configuration { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Configuration", i), err.(request.ErrInvalidParams)) + } + } } if s.ResourceQuery != nil { if err := s.ResourceQuery.Validate(); err != nil { @@ -1451,6 +1761,12 @@ func (s *CreateGroupInput) Validate() error { return nil } +// SetConfiguration sets the Configuration field's value. +func (s *CreateGroupInput) SetConfiguration(v []*GroupConfigurationItem) *CreateGroupInput { + s.Configuration = v + return s +} + // SetDescription sets the Description field's value. func (s *CreateGroupInput) SetDescription(v string) *CreateGroupInput { s.Description = &v @@ -1478,9 +1794,19 @@ func (s *CreateGroupInput) SetTags(v map[string]*string) *CreateGroupInput { type CreateGroupOutput struct { _ struct{} `type:"structure"` - // A full description of the resource group after it is created. + // The description of the resource group. Group *Group `type:"structure"` + // The service configuration associated with the resource group. AWS Resource + // Groups supports adding service configurations for the following resource + // group types: + // + // * AWS::EC2::CapacityReservationPool - Amazon EC2 capacity reservation + // pools. For more information, see Working with capacity reservation groups + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) + // in the EC2 Users Guide. + GroupConfiguration *GroupConfiguration `type:"structure"` + // The resource query associated with the group. ResourceQuery *ResourceQuery `type:"structure"` @@ -1504,6 +1830,12 @@ func (s *CreateGroupOutput) SetGroup(v *Group) *CreateGroupOutput { return s } +// SetGroupConfiguration sets the GroupConfiguration field's value. +func (s *CreateGroupOutput) SetGroupConfiguration(v *GroupConfiguration) *CreateGroupOutput { + s.GroupConfiguration = v + return s +} + // SetResourceQuery sets the ResourceQuery field's value. func (s *CreateGroupOutput) SetResourceQuery(v *ResourceQuery) *CreateGroupOutput { s.ResourceQuery = v @@ -1519,10 +1851,13 @@ func (s *CreateGroupOutput) SetTags(v map[string]*string) *CreateGroupOutput { type DeleteGroupInput struct { _ struct{} `type:"structure"` - // The name of the resource group to delete. + // The name or the ARN of the resource group to delete. + Group *string `min:"1" type:"string"` + + // Don't use this parameter. Use Group instead. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // Deprecated: This field is deprecated, use Group instead. + GroupName *string `min:"1" deprecated:"true" type:"string"` } // String returns the string representation @@ -1538,8 +1873,8 @@ func (s DeleteGroupInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteGroupInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteGroupInput"} - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) } if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) @@ -1551,6 +1886,12 @@ func (s *DeleteGroupInput) Validate() error { return nil } +// SetGroup sets the Group field's value. +func (s *DeleteGroupInput) SetGroup(v string) *DeleteGroupInput { + s.Group = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *DeleteGroupInput) SetGroupName(v string) *DeleteGroupInput { s.GroupName = &v @@ -1580,69 +1921,173 @@ func (s *DeleteGroupOutput) SetGroup(v *Group) *DeleteGroupOutput { return s } -// The caller is not authorized to make the request. -type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata +// A resource that failed to be added to or removed from a group. +type FailedResource struct { + _ struct{} `type:"structure"` - Message_ *string `locationName:"Message" min:"1" type:"string"` + // The error code associated with the failure. + ErrorCode *string `min:"1" type:"string"` + + // The error message text associated with the failure. + ErrorMessage *string `min:"1" type:"string"` + + // The ARN of the resource that failed to be added or removed. + ResourceArn *string `type:"string"` } // String returns the string representation -func (s ForbiddenException) String() string { +func (s FailedResource) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ForbiddenException) GoString() string { +func (s FailedResource) GoString() string { return s.String() } -func newErrorForbiddenException(v protocol.ResponseMetadata) error { - return &ForbiddenException{ - respMetadata: v, - } +// SetErrorCode sets the ErrorCode field's value. +func (s *FailedResource) SetErrorCode(v string) *FailedResource { + s.ErrorCode = &v + return s } -// Code returns the exception type name. -func (s ForbiddenException) Code() string { - return "ForbiddenException" +// SetErrorMessage sets the ErrorMessage field's value. +func (s *FailedResource) SetErrorMessage(v string) *FailedResource { + s.ErrorMessage = &v + return s } -// Message returns the exception's message. -func (s ForbiddenException) Message() string { - if s.Message_ != nil { +// SetResourceArn sets the ResourceArn field's value. +func (s *FailedResource) SetResourceArn(v string) *FailedResource { + s.ResourceArn = &v + return s +} + +// The caller isn't authorized to make the request. Check permissions. +type ForbiddenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" min:"1" type:"string"` +} + +// String returns the string representation +func (s ForbiddenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForbiddenException) GoString() string { + return s.String() +} + +func newErrorForbiddenException(v protocol.ResponseMetadata) error { + return &ForbiddenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ForbiddenException) Code() string { + return "ForbiddenException" +} + +// Message returns the exception's message. +func (s *ForbiddenException) Message() string { + if s.Message_ != nil { return *s.Message_ } return "" } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID +} + +type GetGroupConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name or the ARN of the resource group. + Group *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetGroupConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupConfigurationInput"} + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroup sets the Group field's value. +func (s *GetGroupConfigurationInput) SetGroup(v string) *GetGroupConfigurationInput { + s.Group = &v + return s +} + +type GetGroupConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The configuration associated with the specified group. + GroupConfiguration *GroupConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetGroupConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupConfigurationOutput) GoString() string { + return s.String() +} + +// SetGroupConfiguration sets the GroupConfiguration field's value. +func (s *GetGroupConfigurationOutput) SetGroupConfiguration(v *GroupConfiguration) *GetGroupConfigurationOutput { + s.GroupConfiguration = v + return s } type GetGroupInput struct { _ struct{} `type:"structure"` - // The name of the resource group. + // The name or the ARN of the resource group to retrieve. + Group *string `min:"1" type:"string"` + + // Don't use this parameter. Use Group instead. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // Deprecated: This field is deprecated, use Group instead. + GroupName *string `min:"1" deprecated:"true" type:"string"` } // String returns the string representation @@ -1658,8 +2103,8 @@ func (s GetGroupInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *GetGroupInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetGroupInput"} - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) } if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) @@ -1671,6 +2116,12 @@ func (s *GetGroupInput) Validate() error { return nil } +// SetGroup sets the Group field's value. +func (s *GetGroupInput) SetGroup(v string) *GetGroupInput { + s.Group = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *GetGroupInput) SetGroupName(v string) *GetGroupInput { s.GroupName = &v @@ -1703,10 +2154,13 @@ func (s *GetGroupOutput) SetGroup(v *Group) *GetGroupOutput { type GetGroupQueryInput struct { _ struct{} `type:"structure"` - // The name of the resource group. + // The name or the ARN of the resource group to query. + Group *string `min:"1" type:"string"` + + // Don't use this parameter. Use Group instead. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // Deprecated: This field is deprecated, use Group instead. + GroupName *string `min:"1" deprecated:"true" type:"string"` } // String returns the string representation @@ -1722,8 +2176,8 @@ func (s GetGroupQueryInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *GetGroupQueryInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetGroupQueryInput"} - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) } if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) @@ -1735,6 +2189,12 @@ func (s *GetGroupQueryInput) Validate() error { return nil } +// SetGroup sets the Group field's value. +func (s *GetGroupQueryInput) SetGroup(v string) *GetGroupQueryInput { + s.Group = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *GetGroupQueryInput) SetGroupName(v string) *GetGroupQueryInput { s.GroupName = &v @@ -1767,8 +2227,7 @@ func (s *GetGroupQueryOutput) SetGroupQuery(v *GroupQuery) *GetGroupQueryOutput type GetTagsInput struct { _ struct{} `type:"structure"` - // The ARN of the resource group for which you want a list of tags. The resource - // must exist within the account you are using. + // The ARN of the resource group whose tags you want to retrieve. // // Arn is a required field Arn *string `location:"uri" locationName:"Arn" min:"12" type:"string" required:"true"` @@ -1838,19 +2297,29 @@ func (s *GetTagsOutput) SetTags(v map[string]*string) *GetTagsOutput { return s } -// A resource group. +// A resource group that contains AWS resources. You can assign resources to +// the group by associating either of the following elements with the group: +// +// * ResourceQuery - Use a resource query to specify a set of tag keys and +// values. All resources in the same AWS Region and AWS account that have +// those keys with the same values are included in the group. You can add +// a resource query when you create the group. +// +// * GroupConfiguration - Use a service configuration to associate the group +// with an AWS service. The configuration specifies which resource types +// can be included in the group. type Group struct { _ struct{} `type:"structure"` // The description of the resource group. Description *string `type:"string"` - // The ARN of a resource group. + // The ARN of the resource group. // // GroupArn is a required field GroupArn *string `min:"12" type:"string" required:"true"` - // The name of a resource group. + // The name of the resource group. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -1884,8 +2353,200 @@ func (s *Group) SetName(v string) *Group { return s } -// A filter name and value pair that is used to obtain more specific results -// from a list of groups. +// A service configuration associated with a resource group. The configuration +// options are determined by the AWS service that defines the Type, and specifies +// which resources can be included in the group. You can add a service configuration +// when you create the group. +type GroupConfiguration struct { + _ struct{} `type:"structure"` + + // The configuration currently associated with the group and in effect. + Configuration []*GroupConfigurationItem `type:"list"` + + // If present, the reason why a request to update the group configuration failed. + FailureReason *string `type:"string"` + + // If present, the new configuration that is in the process of being applied + // to the group. + ProposedConfiguration []*GroupConfigurationItem `type:"list"` + + // The current status of an attempt to update the group configuration. + Status *string `type:"string" enum:"GroupConfigurationStatus"` +} + +// String returns the string representation +func (s GroupConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupConfiguration) GoString() string { + return s.String() +} + +// SetConfiguration sets the Configuration field's value. +func (s *GroupConfiguration) SetConfiguration(v []*GroupConfigurationItem) *GroupConfiguration { + s.Configuration = v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *GroupConfiguration) SetFailureReason(v string) *GroupConfiguration { + s.FailureReason = &v + return s +} + +// SetProposedConfiguration sets the ProposedConfiguration field's value. +func (s *GroupConfiguration) SetProposedConfiguration(v []*GroupConfigurationItem) *GroupConfiguration { + s.ProposedConfiguration = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GroupConfiguration) SetStatus(v string) *GroupConfiguration { + s.Status = &v + return s +} + +// An item in a group configuration. A group configuration can have one or more +// items. +type GroupConfigurationItem struct { + _ struct{} `type:"structure"` + + // A collection of parameters for this group configuration item. + Parameters []*GroupConfigurationParameter `type:"list"` + + // Specifies the type of group configuration item. Each item must have a unique + // value for type. + // + // You can specify the following string values: + // + // * AWS::EC2::CapacityReservationPool For more information about EC2 capacity + // reservation groups, see Working with capacity reservation groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) + // in the EC2 Users Guide. + // + // * AWS::ResourceGroups::Generic - Supports parameters that configure the + // behavior of resource groups of any type. + // + // Type is a required field + Type *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GroupConfigurationItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupConfigurationItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GroupConfigurationItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GroupConfigurationItem"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + if s.Parameters != nil { + for i, v := range s.Parameters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Parameters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameters sets the Parameters field's value. +func (s *GroupConfigurationItem) SetParameters(v []*GroupConfigurationParameter) *GroupConfigurationItem { + s.Parameters = v + return s +} + +// SetType sets the Type field's value. +func (s *GroupConfigurationItem) SetType(v string) *GroupConfigurationItem { + s.Type = &v + return s +} + +// A parameter for a group configuration item. +type GroupConfigurationParameter struct { + _ struct{} `type:"structure"` + + // The name of the group configuration parameter. + // + // You can specify the following string values: + // + // * For configuration item type AWS::ResourceGroups::Generic: allowed-resource-types + // Specifies the types of resources that you can add to this group by using + // the GroupResources operation. + // + // * For configuration item type AWS::EC2::CapacityReservationPool: None + // - This configuration item type doesn't support any parameters. For more + // information about EC2 capacity reservation groups, see Working with capacity + // reservation groups (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-reservations-using.html#create-cr-group) + // in the EC2 Users Guide. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The values of for this parameter. + // + // You can specify the following string value: + // + // * For item type allowed-resource-types: the only supported parameter value + // is AWS::EC2::CapacityReservation. + Values []*string `type:"list"` +} + +// String returns the string representation +func (s GroupConfigurationParameter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupConfigurationParameter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GroupConfigurationParameter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GroupConfigurationParameter"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *GroupConfigurationParameter) SetName(v string) *GroupConfigurationParameter { + s.Name = &v + return s +} + +// SetValues sets the Values field's value. +func (s *GroupConfigurationParameter) SetValues(v []*string) *GroupConfigurationParameter { + s.Values = v + return s +} + +// A filter collection that you can use to restrict the results from a List +// operation to only those you want to include. type GroupFilter struct { _ struct{} `type:"structure"` @@ -1942,14 +2603,14 @@ func (s *GroupFilter) SetValues(v []*string) *GroupFilter { return s } -// The ARN and group name of a group. +// The unique identifiers for a resource group. type GroupIdentifier struct { _ struct{} `type:"structure"` - // The ARN of a resource group. + // The ARN of the resource group. GroupArn *string `min:"12" type:"string"` - // The name of a resource group. + // The name of the resource group. GroupName *string `min:"1" type:"string"` } @@ -1975,18 +2636,18 @@ func (s *GroupIdentifier) SetGroupName(v string) *GroupIdentifier { return s } -// The underlying resource query of a resource group. Resources that match query -// results are part of the group. +// A mapping of a query attached to a resource group that determines the AWS +// resources that are members of the group. type GroupQuery struct { _ struct{} `type:"structure"` - // The name of a resource group that is associated with a specific resource + // The name of the resource group that is associated with the specified resource // query. // // GroupName is a required field GroupName *string `min:"1" type:"string" required:"true"` - // The resource query which determines which AWS resources are members of the + // The resource query that determines which AWS resources are members of the // associated resource group. // // ResourceQuery is a required field @@ -2015,10 +2676,101 @@ func (s *GroupQuery) SetResourceQuery(v *ResourceQuery) *GroupQuery { return s } -// An internal error occurred while processing the request. +type GroupResourcesInput struct { + _ struct{} `type:"structure"` + + // The name or the ARN of the resource group to add resources to. + // + // Group is a required field + Group *string `min:"1" type:"string" required:"true"` + + // The list of ARNs for resources to be added to the group. + // + // ResourceArns is a required field + ResourceArns []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s GroupResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GroupResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GroupResourcesInput"} + if s.Group == nil { + invalidParams.Add(request.NewErrParamRequired("Group")) + } + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) + } + if s.ResourceArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArns")) + } + if s.ResourceArns != nil && len(s.ResourceArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroup sets the Group field's value. +func (s *GroupResourcesInput) SetGroup(v string) *GroupResourcesInput { + s.Group = &v + return s +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *GroupResourcesInput) SetResourceArns(v []*string) *GroupResourcesInput { + s.ResourceArns = v + return s +} + +type GroupResourcesOutput struct { + _ struct{} `type:"structure"` + + // The ARNs of the resources that failed to be added to the group by this operation. + Failed []*FailedResource `type:"list"` + + // The ARNs of the resources that were successfully added to the group by this + // operation. + Succeeded []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s GroupResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupResourcesOutput) GoString() string { + return s.String() +} + +// SetFailed sets the Failed field's value. +func (s *GroupResourcesOutput) SetFailed(v []*FailedResource) *GroupResourcesOutput { + s.Failed = v + return s +} + +// SetSucceeded sets the Succeeded field's value. +func (s *GroupResourcesOutput) SetSucceeded(v []*string) *GroupResourcesOutput { + s.Succeeded = v + return s +} + +// An internal error occurred while processing the request. Try again later. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2035,17 +2787,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2053,48 +2805,76 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } type ListGroupResourcesInput struct { _ struct{} `type:"structure"` // Filters, formatted as ResourceFilter objects, that you want to apply to a - // ListGroupResources operation. + // ListGroupResources operation. Filters the results to include only those of + // the specified resource types. // // * resource-type - Filter resources by their type. Specify up to five resource // types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, // or AWS::S3::Bucket. + // + // When you specify a resource-type filter for ListGroupResources, AWS Resource + // Groups validates your filter resource types against the types that are defined + // in the query associated with the group. For example, if a group contains + // only S3 buckets because its query specifies only that resource type, but + // your resource-type filter includes EC2 instances, AWS Resource Groups does + // not filter for EC2 instances. In this case, a ListGroupResources request + // returns a BadRequestException error with a message similar to the following: + // + // The resource types specified as filters in the request are not valid. + // + // The error includes a list of resource types that failed the validation because + // they are not part of the query associated with the group. This validation + // doesn't occur when the group query specifies AWS::AllSupported, because a + // group based on such a query can contain any of the allowed resource types + // for the query type (tag-based or AWS CloudFormation stack-based queries). Filters []*ResourceFilter `type:"list"` - // The name of the resource group. - // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // The name or the ARN of the resource group + Group *string `min:"1" type:"string"` - // The maximum number of group member ARNs that are returned in a single call - // by ListGroupResources, in paginated output. By default, this number is 50. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // Don't use this parameter. Use Group instead. + // + // Deprecated: This field is deprecated, use Group instead. + GroupName *string `min:"1" deprecated:"true" type:"string"` + + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that the service + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. + MaxResults *int64 `min:"1" type:"integer"` - // The NextToken value that is returned in a paginated ListGroupResources request. - // To get the next page of results, run the call again, add the NextToken parameter, - // and specify the NextToken value. - NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value provided by a previous + // call's NextToken response to indicate where the output should continue from. + NextToken *string `type:"string"` } // String returns the string representation @@ -2110,8 +2890,8 @@ func (s ListGroupResourcesInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ListGroupResourcesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListGroupResourcesInput"} - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) } if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) @@ -2142,6 +2922,12 @@ func (s *ListGroupResourcesInput) SetFilters(v []*ResourceFilter) *ListGroupReso return s } +// SetGroup sets the Group field's value. +func (s *ListGroupResourcesInput) SetGroup(v string) *ListGroupResourcesInput { + s.Group = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *ListGroupResourcesInput) SetGroupName(v string) *ListGroupResourcesInput { s.GroupName = &v @@ -2163,8 +2949,10 @@ func (s *ListGroupResourcesInput) SetNextToken(v string) *ListGroupResourcesInpu type ListGroupResourcesOutput struct { _ struct{} `type:"structure"` - // The NextToken value to include in a subsequent ListGroupResources request, - // to get more results. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of QueryError objects. Each error is an object that contains ErrorCode @@ -2211,18 +2999,30 @@ type ListGroupsInput struct { // Filters, formatted as GroupFilter objects, that you want to apply to a ListGroups // operation. // - // * resource-type - Filter groups by resource type. Specify up to five resource - // types in the format AWS::ServiceCode::ResourceType. For example, AWS::EC2::Instance, - // or AWS::S3::Bucket. + // * resource-type - Filter the results to include only those of the specified + // resource types. Specify up to five resource types in the format AWS::ServiceCode::ResourceType + // . For example, AWS::EC2::Instance, or AWS::S3::Bucket. + // + // * configuration-type - Filter the results to include only those groups + // that have the specified configuration types attached. The current supported + // values are: AWS:EC2::CapacityReservationPool Filters []*GroupFilter `type:"list"` - // The maximum number of resource group results that are returned by ListGroups - // in paginated output. By default, this number is 50. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that the service + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` - // The NextToken value that is returned in a paginated ListGroups request. To - // get the next page of results, run the call again, add the NextToken parameter, - // and specify the NextToken value. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value provided by a previous + // call's NextToken response to indicate where the output should continue from. NextToken *string `location:"querystring" locationName:"nextToken" type:"string"` } @@ -2281,16 +3081,19 @@ type ListGroupsOutput struct { _ struct{} `type:"structure"` // A list of GroupIdentifier objects. Each identifier is an object that contains - // both the GroupName and the GroupArn. + // both the Name and the GroupArn. GroupIdentifiers []*GroupIdentifier `type:"list"` - // A list of resource groups. + // This output element is deprecated and shouldn't be used. Refer to GroupIdentifiers + // instead. // // Deprecated: This field is deprecated, use GroupIdentifiers instead. Groups []*Group `deprecated:"true" type:"list"` - // The NextToken value to include in a subsequent ListGroups request, to get - // more results. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` } @@ -2322,10 +3125,10 @@ func (s *ListGroupsOutput) SetNextToken(v string) *ListGroupsOutput { return s } -// The request uses an HTTP method which is not allowed for the specified resource. +// The request uses an HTTP method that isn't allowed for the specified resource. type MethodNotAllowedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2342,17 +3145,17 @@ func (s MethodNotAllowedException) GoString() string { func newErrorMethodNotAllowedException(v protocol.ResponseMetadata) error { return &MethodNotAllowedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MethodNotAllowedException) Code() string { +func (s *MethodNotAllowedException) Code() string { return "MethodNotAllowedException" } // Message returns the exception's message. -func (s MethodNotAllowedException) Message() string { +func (s *MethodNotAllowedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2360,28 +3163,28 @@ func (s MethodNotAllowedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MethodNotAllowedException) OrigErr() error { +func (s *MethodNotAllowedException) OrigErr() error { return nil } -func (s MethodNotAllowedException) Error() string { +func (s *MethodNotAllowedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MethodNotAllowedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MethodNotAllowedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MethodNotAllowedException) RequestID() string { - return s.respMetadata.RequestID +func (s *MethodNotAllowedException) RequestID() string { + return s.RespMetadata.RequestID } -// One or more resources specified in the request do not exist. +// One or more of the specified resources don't exist. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2398,17 +3201,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2416,22 +3219,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A two-part error structure that can occur in ListGroupResources or SearchResources @@ -2567,6 +3370,61 @@ func (s *ResourceIdentifier) SetResourceType(v string) *ResourceIdentifier { } // The query that is used to define a resource group or a search for resources. +// A query specifies both a query type and a query string as a JSON object. +// See the examples section for example JSON strings. +// +// The examples that follow are shown as standard JSON strings. If you include +// such a string as a parameter to the AWS CLI or an SDK API, you might need +// to 'escape' the string into a single line. For example, see the Quoting strings +// (https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters-quoting-strings.html) +// in the AWS CLI User Guide. +// +// Example 1 +// +// The following generic example shows a resource query JSON string that includes +// only resources that meet the following criteria: +// +// * The resource type must be either resource_type1 or resource_type2. +// +// * The resource must have a tag Key1 with a value of either ValueA or ValueB. +// +// * The resource must have a tag Key2 with a value of either ValueC or ValueD. +// +// { "Type": "TAG_FILTERS_1_0", "Query": { "ResourceTypeFilters": [ "resource_type1", +// "resource_type2"], "TagFilters": [ { "Key": "Key1", "Values": ["ValueA","ValueB"] +// }, { "Key":"Key2", "Values":["ValueC","ValueD"] } ] } } +// +// This has the equivalent "shortcut" syntax of the following: +// +// { "Type": "TAG_FILTERS_1_0", "Query": { "ResourceTypeFilters": [ "resource_type1", +// "resource_type2"], "TagFilters": [ { "Key1": ["ValueA","ValueB"] }, { "Key2": +// ["ValueC","ValueD"] } ] } } +// +// Example 2 +// +// The following example shows a resource query JSON string that includes only +// Amazon EC2 instances that are tagged Stage with a value of Test. +// +// { "Type": "TAG_FILTERS_1_0", "Query": "{ "ResourceTypeFilters": "AWS::EC2::Instance", +// "TagFilters": { "Stage": "Test" } } } +// +// Example 3 +// +// The following example shows a resource query JSON string that includes resource +// of any supported type as long as it is tagged Stage with a value of Prod. +// +// { "Type": "TAG_FILTERS_1_0", "Query": { "ResourceTypeFilters": "AWS::AllSupported", +// "TagFilters": { "Stage": "Prod" } } } +// +// Example 4 +// +// The following example shows a resource query JSON string that includes only +// Amazon EC2 instances and Amazon S3 buckets that are part of the specified +// AWS CloudFormation stack. +// +// { "Type": "CLOUDFORMATION_STACK_1_0", "Query": { "ResourceTypeFilters": [ +// "AWS::EC2::Instance", "AWS::S3::Bucket" ], "StackIdentifier": "arn:aws:cloudformation:us-west-2:123456789012:stack/AWStestuseraccount/fb0d5000-aba8-00e8-aa9e-50d5cEXAMPLE" +// } } type ResourceQuery struct { _ struct{} `type:"structure"` @@ -2575,40 +3433,33 @@ type ResourceQuery struct { // Query is a required field Query *string `type:"string" required:"true"` - // The type of the query. The valid values in this release are TAG_FILTERS_1_0 - // and CLOUDFORMATION_STACK_1_0. - // - // TAG_FILTERS_1_0: A JSON syntax that lets you specify a collection of simple - // tag filters for resource types and tags, as supported by the AWS Tagging - // API GetResources (https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html) - // operation. If you specify more than one tag key, only resources that match - // all tag keys, and at least one value of each specified tag key, are returned - // in your query. If you specify more than one value for a tag key, a resource - // matches the filter if it has a tag key value that matches any of the specified - // values. - // - // For example, consider the following sample query for resources that have - // two tags, Stage and Version, with two values each. ([{"Key":"Stage","Values":["Test","Deploy"]},{"Key":"Version","Values":["1","2"]}]) - // The results of this query might include the following. - // - // * An EC2 instance that has the following two tags: {"Key":"Stage","Value":"Deploy"}, - // and {"Key":"Version","Value":"2"} - // - // * An S3 bucket that has the following two tags: {"Key":"Stage","Value":"Test"}, - // and {"Key":"Version","Value":"1"} + // The type of the query. You can use the following values: // - // The query would not return the following results, however. The following - // EC2 instance does not have all tag keys specified in the filter, so it is - // rejected. The RDS database has all of the tag keys, but no values that match - // at least one of the specified tag key values in the filter. + // * CLOUDFORMATION_STACK_1_0: Specifies that the Query contains an ARN for + // a CloudFormation stack. // - // * An EC2 instance that has only the following tag: {"Key":"Stage","Value":"Deploy"}. - // - // * An RDS database that has the following two tags: {"Key":"Stage","Value":"Archived"}, - // and {"Key":"Version","Value":"4"} - // - // CLOUDFORMATION_STACK_1_0: A JSON syntax that lets you specify a CloudFormation - // stack ARN. + // * TAG_FILTERS_1_0: Specifies that the Query parameter contains a JSON + // string that represents a collection of simple tag filters for resource + // types and tags. The JSON string uses a syntax similar to the GetResources + // (https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html) + // operation, but uses only the ResourceTypeFilters (https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-ResourceTypeFilters) + // and TagFilters (https://docs.aws.amazon.com/resourcegroupstagging/latest/APIReference/API_GetResources.html#resourcegrouptagging-GetResources-request-TagFiltersTagFilters) + // fields. If you specify more than one tag key, only resources that match + // all tag keys, and at least one value of each specified tag key, are returned + // in your query. If you specify more than one value for a tag key, a resource + // matches the filter if it has a tag key value that matches any of the specified + // values. For example, consider the following sample query for resources + // that have two tags, Stage and Version, with two values each: [{"Stage":["Test","Deploy"]},{"Version":["1","2"]}] + // The results of this query could include the following. An EC2 instance + // that has the following two tags: {"Stage":"Deploy"}, and {"Version":"2"} + // An S3 bucket that has the following two tags: {"Stage":"Test"}, and {"Version":"1"} + // The query would not include the following items in the results, however. + // An EC2 instance that has only the following tag: {"Stage":"Deploy"}. The + // instance does not have all of the tag keys specified in the filter, so + // it is excluded from the results. An RDS database that has the following + // two tags: {"Stage":"Archived"} and {"Version":"4"} The database has all + // of the tag keys, but none of those keys has an associated value that matches + // at least one of the specified values in the filter. // // Type is a required field Type *string `min:"1" type:"string" required:"true" enum:"QueryType"` @@ -2658,17 +3509,25 @@ func (s *ResourceQuery) SetType(v string) *ResourceQuery { type SearchResourcesInput struct { _ struct{} `type:"structure"` - // The maximum number of group member ARNs returned by SearchResources in paginated - // output. By default, this number is 50. + // The total number of results that you want included on each page of the response. + // If you do not include this parameter, it defaults to a value that is specific + // to the operation. If additional items exist beyond the maximum you specify, + // the NextToken response element is present and has a value (is not null). + // Include that value as the NextToken request parameter in the next call to + // the operation to get the next part of the results. Note that the service + // might return fewer results than the maximum even when there are more results + // available. You should check NextToken after every operation to ensure that + // you receive all of the results. MaxResults *int64 `min:"1" type:"integer"` - // The NextToken value that is returned in a paginated SearchResources request. - // To get the next page of results, run the call again, add the NextToken parameter, - // and specify the NextToken value. + // The parameter for receiving additional results if you receive a NextToken + // response in a previous request. A NextToken response indicates that more + // output is available. Set this parameter to the value provided by a previous + // call's NextToken response to indicate where the output should continue from. NextToken *string `type:"string"` // The search query, using the same formats that are supported for resource - // group definition. + // group definition. For more information, see CreateGroup. // // ResourceQuery is a required field ResourceQuery *ResourceQuery `type:"structure" required:"true"` @@ -2726,8 +3585,10 @@ func (s *SearchResourcesInput) SetResourceQuery(v *ResourceQuery) *SearchResourc type SearchResourcesOutput struct { _ struct{} `type:"structure"` - // The NextToken value to include in a subsequent SearchResources request, to - // get more results. + // If present, indicates that more output is available than is included in the + // current response. Use this value in the NextToken request parameter in a + // subsequent call to the operation to get the next part of the output. You + // should repeat this until the NextToken response element comes back as null. NextToken *string `type:"string"` // A list of QueryError objects. Each error is an object that contains ErrorCode @@ -2771,14 +3632,13 @@ func (s *SearchResourcesOutput) SetResourceIdentifiers(v []*ResourceIdentifier) type TagInput struct { _ struct{} `type:"structure"` - // The ARN of the resource to which to add tags. + // The ARN of the resource group to which to add tags. // // Arn is a required field Arn *string `location:"uri" locationName:"Arn" min:"12" type:"string" required:"true"` - // The tags to add to the specified resource. A tag is a string-to-string map - // of key-value pairs. Tag keys can have a maximum character length of 128 characters, - // and tag values can have a maximum length of 256 characters. + // The tags to add to the specified resource group. A tag is a string-to-string + // map of key-value pairs. // // Tags is a required field Tags map[string]*string `type:"map" required:"true"` @@ -2831,7 +3691,7 @@ type TagOutput struct { // The ARN of the tagged resource. Arn *string `min:"12" type:"string"` - // The tags that have been added to the specified resource. + // The tags that have been added to the specified resource group. Tags map[string]*string `type:"map"` } @@ -2857,10 +3717,11 @@ func (s *TagOutput) SetTags(v map[string]*string) *TagOutput { return s } -// The caller has exceeded throttling limits. +// You've exceeded throttling limits by making too many requests in a period +// of time. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2877,17 +3738,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2895,29 +3756,29 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } -// The request has not been applied because it lacks valid authentication credentials -// for the target resource. +// The request was rejected because it doesn't have valid credentials for the +// target resource. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" min:"1" type:"string"` } @@ -2934,17 +3795,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2952,28 +3813,119 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UngroupResourcesInput struct { + _ struct{} `type:"structure"` + + // The name or the ARN of the resource group from which to remove the resources. + // + // Group is a required field + Group *string `min:"1" type:"string" required:"true"` + + // The ARNs of the resources to be removed from the group. + // + // ResourceArns is a required field + ResourceArns []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UngroupResourcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UngroupResourcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UngroupResourcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UngroupResourcesInput"} + if s.Group == nil { + invalidParams.Add(request.NewErrParamRequired("Group")) + } + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) + } + if s.ResourceArns == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArns")) + } + if s.ResourceArns != nil && len(s.ResourceArns) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArns", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroup sets the Group field's value. +func (s *UngroupResourcesInput) SetGroup(v string) *UngroupResourcesInput { + s.Group = &v + return s +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *UngroupResourcesInput) SetResourceArns(v []*string) *UngroupResourcesInput { + s.ResourceArns = v + return s +} + +type UngroupResourcesOutput struct { + _ struct{} `type:"structure"` + + // The resources that failed to be removed from the group. + Failed []*FailedResource `type:"list"` + + // The ARNs of the resources that were successfully removed from the group. + Succeeded []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s UngroupResourcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UngroupResourcesOutput) GoString() string { + return s.String() +} + +// SetFailed sets the Failed field's value. +func (s *UngroupResourcesOutput) SetFailed(v []*FailedResource) *UngroupResourcesOutput { + s.Failed = v + return s +} + +// SetSucceeded sets the Succeeded field's value. +func (s *UngroupResourcesOutput) SetSucceeded(v []*string) *UngroupResourcesOutput { + s.Succeeded = v + return s } type UntagInput struct { _ struct{} `type:"structure"` - // The ARN of the resource from which to remove tags. + // The ARN of the resource group from which to remove tags. The command removed + // both the specified keys and any values associated with those keys. // // Arn is a required field Arn *string `location:"uri" locationName:"Arn" min:"12" type:"string" required:"true"` @@ -3028,10 +3980,10 @@ func (s *UntagInput) SetKeys(v []*string) *UntagInput { type UntagOutput struct { _ struct{} `type:"structure"` - // The ARN of the resource from which tags have been removed. + // The ARN of the resource group from which tags have been removed. Arn *string `min:"12" type:"string"` - // The keys of tags that have been removed. + // The keys of the tags that were removed. Keys []*string `type:"list"` } @@ -3060,15 +4012,17 @@ func (s *UntagOutput) SetKeys(v []*string) *UntagOutput { type UpdateGroupInput struct { _ struct{} `type:"structure"` - // The description of the resource group. Descriptions can have a maximum of - // 511 characters, including letters, numbers, hyphens, underscores, punctuation, - // and spaces. + // The new description that you want to update the resource group with. Descriptions + // can contain letters, numbers, hyphens, underscores, periods, and spaces. Description *string `type:"string"` - // The name of the resource group for which you want to update its description. + // The name or the ARN of the resource group to modify. + Group *string `min:"1" type:"string"` + + // Don't use this parameter. Use Group instead. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // Deprecated: This field is deprecated, use Group instead. + GroupName *string `min:"1" deprecated:"true" type:"string"` } // String returns the string representation @@ -3084,8 +4038,8 @@ func (s UpdateGroupInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateGroupInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateGroupInput"} - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) } if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) @@ -3103,6 +4057,12 @@ func (s *UpdateGroupInput) SetDescription(v string) *UpdateGroupInput { return s } +// SetGroup sets the Group field's value. +func (s *UpdateGroupInput) SetGroup(v string) *UpdateGroupInput { + s.Group = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *UpdateGroupInput) SetGroupName(v string) *UpdateGroupInput { s.GroupName = &v @@ -3112,7 +4072,7 @@ func (s *UpdateGroupInput) SetGroupName(v string) *UpdateGroupInput { type UpdateGroupOutput struct { _ struct{} `type:"structure"` - // The full description of the resource group after it has been updated. + // The update description of the resource group. Group *Group `type:"structure"` } @@ -3135,13 +4095,16 @@ func (s *UpdateGroupOutput) SetGroup(v *Group) *UpdateGroupOutput { type UpdateGroupQueryInput struct { _ struct{} `type:"structure"` - // The name of the resource group for which you want to edit the query. + // The name or the ARN of the resource group to query. + Group *string `min:"1" type:"string"` + + // Don't use this parameter. Use Group instead. // - // GroupName is a required field - GroupName *string `location:"uri" locationName:"GroupName" min:"1" type:"string" required:"true"` + // Deprecated: This field is deprecated, use Group instead. + GroupName *string `min:"1" deprecated:"true" type:"string"` - // The resource query that determines which AWS resources are members of the - // resource group. + // The resource query to determine which AWS resources are members of this resource + // group. // // ResourceQuery is a required field ResourceQuery *ResourceQuery `type:"structure" required:"true"` @@ -3160,8 +4123,8 @@ func (s UpdateGroupQueryInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateGroupQueryInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateGroupQueryInput"} - if s.GroupName == nil { - invalidParams.Add(request.NewErrParamRequired("GroupName")) + if s.Group != nil && len(*s.Group) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Group", 1)) } if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) @@ -3181,6 +4144,12 @@ func (s *UpdateGroupQueryInput) Validate() error { return nil } +// SetGroup sets the Group field's value. +func (s *UpdateGroupQueryInput) SetGroup(v string) *UpdateGroupQueryInput { + s.Group = &v + return s +} + // SetGroupName sets the GroupName field's value. func (s *UpdateGroupQueryInput) SetGroupName(v string) *UpdateGroupQueryInput { s.GroupName = &v @@ -3196,7 +4165,7 @@ func (s *UpdateGroupQueryInput) SetResourceQuery(v *ResourceQuery) *UpdateGroupQ type UpdateGroupQueryOutput struct { _ struct{} `type:"structure"` - // The resource query associated with the resource group after the update. + // The updated resource query associated with the resource group after the update. GroupQuery *GroupQuery `type:"structure"` } @@ -3216,11 +4185,42 @@ func (s *UpdateGroupQueryOutput) SetGroupQuery(v *GroupQuery) *UpdateGroupQueryO return s } +const ( + // GroupConfigurationStatusUpdating is a GroupConfigurationStatus enum value + GroupConfigurationStatusUpdating = "UPDATING" + + // GroupConfigurationStatusUpdateComplete is a GroupConfigurationStatus enum value + GroupConfigurationStatusUpdateComplete = "UPDATE_COMPLETE" + + // GroupConfigurationStatusUpdateFailed is a GroupConfigurationStatus enum value + GroupConfigurationStatusUpdateFailed = "UPDATE_FAILED" +) + +// GroupConfigurationStatus_Values returns all elements of the GroupConfigurationStatus enum +func GroupConfigurationStatus_Values() []string { + return []string{ + GroupConfigurationStatusUpdating, + GroupConfigurationStatusUpdateComplete, + GroupConfigurationStatusUpdateFailed, + } +} + const ( // GroupFilterNameResourceType is a GroupFilterName enum value GroupFilterNameResourceType = "resource-type" + + // GroupFilterNameConfigurationType is a GroupFilterName enum value + GroupFilterNameConfigurationType = "configuration-type" ) +// GroupFilterName_Values returns all elements of the GroupFilterName enum +func GroupFilterName_Values() []string { + return []string{ + GroupFilterNameResourceType, + GroupFilterNameConfigurationType, + } +} + const ( // QueryErrorCodeCloudformationStackInactive is a QueryErrorCode enum value QueryErrorCodeCloudformationStackInactive = "CLOUDFORMATION_STACK_INACTIVE" @@ -3229,6 +4229,14 @@ const ( QueryErrorCodeCloudformationStackNotExisting = "CLOUDFORMATION_STACK_NOT_EXISTING" ) +// QueryErrorCode_Values returns all elements of the QueryErrorCode enum +func QueryErrorCode_Values() []string { + return []string{ + QueryErrorCodeCloudformationStackInactive, + QueryErrorCodeCloudformationStackNotExisting, + } +} + const ( // QueryTypeTagFilters10 is a QueryType enum value QueryTypeTagFilters10 = "TAG_FILTERS_1_0" @@ -3237,7 +4245,22 @@ const ( QueryTypeCloudformationStack10 = "CLOUDFORMATION_STACK_1_0" ) +// QueryType_Values returns all elements of the QueryType enum +func QueryType_Values() []string { + return []string{ + QueryTypeTagFilters10, + QueryTypeCloudformationStack10, + } +} + const ( // ResourceFilterNameResourceType is a ResourceFilterName enum value ResourceFilterNameResourceType = "resource-type" ) + +// ResourceFilterName_Values returns all elements of the ResourceFilterName enum +func ResourceFilterName_Values() []string { + return []string{ + ResourceFilterNameResourceType, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/doc.go b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/doc.go index 5d182652d..3de9bc95f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/doc.go @@ -9,7 +9,7 @@ // of resources that match the resource types specified in a query, and share // one or more tags or portions of tags. You can create a group of resources // based on their roles in your cloud infrastructure, lifecycle stages, regions, -// application layers, or virtually any criteria. Resource groups enable you +// application layers, or virtually any criteria. Resource Groups enable you // to automate management tasks, such as those in AWS Systems Manager Automation // documents, on tag-related resources in AWS Systems Manager. Groups of tagged // resources also let you quickly view a custom console in AWS Systems Manager diff --git a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/errors.go b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/errors.go index 0d0a2a074..27a78e163 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/errors.go @@ -11,45 +11,45 @@ const ( // ErrCodeBadRequestException for service response error code // "BadRequestException". // - // The request does not comply with validation rules that are defined for the - // request parameters. + // The request includes one or more parameters that violate validation rules. ErrCodeBadRequestException = "BadRequestException" // ErrCodeForbiddenException for service response error code // "ForbiddenException". // - // The caller is not authorized to make the request. + // The caller isn't authorized to make the request. Check permissions. ErrCodeForbiddenException = "ForbiddenException" // ErrCodeInternalServerErrorException for service response error code // "InternalServerErrorException". // - // An internal error occurred while processing the request. + // An internal error occurred while processing the request. Try again later. ErrCodeInternalServerErrorException = "InternalServerErrorException" // ErrCodeMethodNotAllowedException for service response error code // "MethodNotAllowedException". // - // The request uses an HTTP method which is not allowed for the specified resource. + // The request uses an HTTP method that isn't allowed for the specified resource. ErrCodeMethodNotAllowedException = "MethodNotAllowedException" // ErrCodeNotFoundException for service response error code // "NotFoundException". // - // One or more resources specified in the request do not exist. + // One or more of the specified resources don't exist. ErrCodeNotFoundException = "NotFoundException" // ErrCodeTooManyRequestsException for service response error code // "TooManyRequestsException". // - // The caller has exceeded throttling limits. + // You've exceeded throttling limits by making too many requests in a period + // of time. ErrCodeTooManyRequestsException = "TooManyRequestsException" // ErrCodeUnauthorizedException for service response error code // "UnauthorizedException". // - // The request has not been applied because it lacks valid authentication credentials - // for the target resource. + // The request was rejected because it doesn't have valid credentials for the + // target resource. ErrCodeUnauthorizedException = "UnauthorizedException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go index 9f754c200..ec1a500ad 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index c17f25d35..8a7784006 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -95,22 +95,22 @@ func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHoste // 53 doesn't support associating a VPC with a public hosted zone. // // * ErrCodeConflictingDomainExists "ConflictingDomainExists" -// The cause of this error depends on whether you're trying to create a public -// or a private hosted zone: -// -// * Public hosted zone: Two hosted zones that have the same name or that -// have a parent/child relationship (example.com and test.example.com) can't -// have any common name servers. You tried to create a hosted zone that has -// the same name as an existing hosted zone or that's the parent or child -// of an existing hosted zone, and you specified a delegation set that shares -// one or more name servers with the existing hosted zone. For more information, -// see CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). -// -// * Private hosted zone: You specified an Amazon VPC that you're already -// using for another hosted zone, and the domain that you specified for one -// of the hosted zones is a subdomain of the domain that you specified for -// the other hosted zone. For example, you can't use the same Amazon VPC -// for the hosted zones for example.com and test.example.com. +// The cause of this error depends on the operation that you're performing: +// +// * Create a public hosted zone: Two hosted zones that have the same name +// or that have a parent/child relationship (example.com and test.example.com) +// can't have any common name servers. You tried to create a hosted zone +// that has the same name as an existing hosted zone or that's the parent +// or child of an existing hosted zone, and you specified a delegation set +// that shares one or more name servers with the existing hosted zone. For +// more information, see CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). +// +// * Create a private hosted zone: A hosted zone with the specified name +// already exists and is already associated with the Amazon VPC that you +// specified. +// +// * Associate VPCs with a private hosted zone: The VPC that you specified +// is already associated with another hosted zone that has the same name. // // * ErrCodeLimitsExceeded "LimitsExceeded" // This operation can't be completed either because the current account has @@ -123,6 +123,13 @@ func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHoste // To request a higher limit, create a case (http://aws.amazon.com/route53-request) // with the AWS Support Center. // +// * ErrCodePriorRequestNotComplete "PriorRequestNotComplete" +// If Amazon Route 53 can't process a request before the next request arrives, +// it will reject subsequent requests for the same hosted zone and return an +// HTTP 400 error (Bad request). If Route 53 returns this error repeatedly for +// the same request, we recommend that you wait, in intervals of increasing +// duration, before you try the request again. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01/AssociateVPCWithHostedZone func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneInput) (*AssociateVPCWithHostedZoneOutput, error) { req, out := c.AssociateVPCWithHostedZoneRequest(input) @@ -195,27 +202,30 @@ func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSet // routes traffic for test.example.com to a web server that has an IP address // of 192.0.2.44. // +// Deleting Resource Record Sets +// +// To delete a resource record set, you must specify all the same values that +// you specified when you created it. +// // Change Batches and Transactional Changes // // The request body must include a document with a ChangeResourceRecordSetsRequest // element. The request body contains a list of change items, known as a change -// batch. Change batches are considered transactional changes. When using the -// Amazon Route 53 API to change resource record sets, Route 53 either makes -// all or none of the changes in a change batch request. This ensures that Route -// 53 never partially implements the intended changes to the resource record -// sets in a hosted zone. -// -// For example, a change batch request that deletes the CNAME record for www.example.com -// and creates an alias resource record set for www.example.com. Route 53 deletes -// the first resource record set and creates the second resource record set -// in a single operation. If either the DELETE or the CREATE action fails, then -// both changes (plus any other changes in the batch) fail, and the original -// CNAME record continues to exist. -// -// Due to the nature of transactional changes, you can't delete the same resource -// record set more than once in a single change batch. If you attempt to delete -// the same change batch more than once, Route 53 returns an InvalidChangeBatch -// error. +// batch. Change batches are considered transactional changes. Route 53 validates +// the changes in the request and then either makes all or none of the changes +// in the change batch request. This ensures that DNS routing isn't adversely +// affected by partial changes to the resource record sets in a hosted zone. +// +// For example, suppose a change batch request contains two changes: it deletes +// the CNAME resource record set for www.example.com and creates an alias resource +// record set for www.example.com. If validation for both records succeeds, +// Route 53 deletes the first resource record set and creates the second resource +// record set in a single operation. If validation for either the DELETE or +// the CREATE action fails, then the request is canceled, and the original CNAME +// record continues to exist. +// +// If you try to delete the same resource record set more than once in a single +// change batch, Route 53 returns an InvalidChangeBatch error. // // Traffic Flow // @@ -226,7 +236,7 @@ func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSet // names (such as example.com) or subdomain names (such as www.example.com), // in the same hosted zone or in multiple hosted zones. You can roll back the // updates if the new configuration isn't performing as expected. For more information, -// see Using Traffic Flow to Route DNS Traffic (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/traffic-flow.html) +// see Using Traffic Flow to Route DNS Traffic (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/traffic-flow.html) // in the Amazon Route 53 Developer Guide. // // Create, Delete, and Upsert @@ -495,7 +505,7 @@ func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req * // of the Amazon EC2 StatusCheckFailed metric, add an alarm to the metric, // and then create a health check that is based on the state of the alarm. // For information about creating CloudWatch metrics and alarms by using -// the CloudWatch console, see the Amazon CloudWatch User Guide (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatch.html). +// the CloudWatch console, see the Amazon CloudWatch User Guide (https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatch.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -620,10 +630,10 @@ func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *re // * You can't create a hosted zone for a top-level domain (TLD) such as // .com. // -// * For public hosted zones, Amazon Route 53 automatically creates a default -// SOA record and four NS records for the zone. For more information about -// SOA and NS records, see NS and SOA Records that Route 53 Creates for a -// Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) +// * For public hosted zones, Route 53 automatically creates a default SOA +// record and four NS records for the zone. For more information about SOA +// and NS records, see NS and SOA Records that Route 53 Creates for a Hosted +// Zone (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/SOA-NSrecords.html) // in the Amazon Route 53 Developer Guide. If you want to use the same name // servers for multiple public hosted zones, you can optionally associate // a reusable delegation set with the hosted zone. See the DelegationSetId @@ -632,7 +642,7 @@ func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *re // * If your domain is registered with a registrar other than Route 53, you // must update the name servers with your registrar to make Route 53 the // DNS service for the domain. For more information, see Migrating DNS Service -// for an Existing Domain to Amazon Route 53 (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html) +// for an Existing Domain to Amazon Route 53 (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/MigratingDNS.html) // in the Amazon Route 53 Developer Guide. // // When you submit a CreateHostedZone request, the initial status of the hosted @@ -689,22 +699,22 @@ func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *re // error, contact Customer Support. // // * ErrCodeConflictingDomainExists "ConflictingDomainExists" -// The cause of this error depends on whether you're trying to create a public -// or a private hosted zone: -// -// * Public hosted zone: Two hosted zones that have the same name or that -// have a parent/child relationship (example.com and test.example.com) can't -// have any common name servers. You tried to create a hosted zone that has -// the same name as an existing hosted zone or that's the parent or child -// of an existing hosted zone, and you specified a delegation set that shares -// one or more name servers with the existing hosted zone. For more information, -// see CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). -// -// * Private hosted zone: You specified an Amazon VPC that you're already -// using for another hosted zone, and the domain that you specified for one -// of the hosted zones is a subdomain of the domain that you specified for -// the other hosted zone. For example, you can't use the same Amazon VPC -// for the hosted zones for example.com and test.example.com. +// The cause of this error depends on the operation that you're performing: +// +// * Create a public hosted zone: Two hosted zones that have the same name +// or that have a parent/child relationship (example.com and test.example.com) +// can't have any common name servers. You tried to create a hosted zone +// that has the same name as an existing hosted zone or that's the parent +// or child of an existing hosted zone, and you specified a delegation set +// that shares one or more name servers with the existing hosted zone. For +// more information, see CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). +// +// * Create a private hosted zone: A hosted zone with the specified name +// already exists and is already associated with the Amazon VPC that you +// specified. +// +// * Associate VPCs with a private hosted zone: The VPC that you specified +// is already associated with another hosted zone that has the same name. // // * ErrCodeNoSuchDelegationSet "NoSuchDelegationSet" // A reusable delegation set with the specified ID does not exist. @@ -987,13 +997,16 @@ func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelega // CreateReusableDelegationSet API operation for Amazon Route 53. // // Creates a delegation set (a group of four name servers) that can be reused -// by multiple hosted zones. If a hosted zoned ID is specified, CreateReusableDelegationSet -// marks the delegation set associated with that zone as reusable. +// by multiple hosted zones that were created by the same AWS account. +// +// You can also create a reusable delegation set that uses the four name servers +// that are associated with an existing hosted zone. Specify the hosted zone +// ID in the CreateReusableDelegationSet request. // // You can't associate a reusable delegation set with a private hosted zone. // // For information about using a reusable delegation set to configure white -// label name servers, see Configuring White Label Name Servers (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/white-label-name-servers.html). +// label name servers, see Configuring White Label Name Servers (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/white-label-name-servers.html). // // The process for migrating existing hosted zones to use a reusable delegation // set is comparable to the process for configuring white label name servers. @@ -1566,9 +1579,15 @@ func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req * // you delete a health check and you don't update the associated resource record // sets, the future status of the health check can't be predicted and may change. // This will affect the routing of DNS queries for your DNS failover configuration. -// For more information, see Replacing and Deleting Health Checks (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-deleting.html#health-checks-deleting.html) +// For more information, see Replacing and Deleting Health Checks (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-deleting.html#health-checks-deleting.html) // in the Amazon Route 53 Developer Guide. // +// If you're using AWS Cloud Map and you configured Cloud Map to create a Route +// 53 health check when you register an instance, you can't use the Route 53 +// DeleteHealthCheck command to delete the health check. The health check is +// deleted automatically when you deregister the instance; there can be a delay +// of several hours before the health check is deleted from Route 53. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1984,6 +2003,19 @@ func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (r // // Deletes a traffic policy. // +// When you delete a traffic policy, Route 53 sets a flag on the policy to indicate +// that it has been deleted. However, Route 53 never fully deletes the traffic +// policy. Note the following: +// +// * Deleted traffic policies aren't listed if you run ListTrafficPolicies +// (https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListTrafficPolicies.html). +// +// * There's no way to get a list of deleted policies. +// +// * If you retain the ID of the policy, you can get information about the +// policy, including the traffic policy document, by running GetTrafficPolicy +// (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GetTrafficPolicy.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2269,16 +2301,25 @@ func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFro // DisassociateVPCFromHostedZone API operation for Amazon Route 53. // -// Disassociates a VPC from a Amazon Route 53 private hosted zone. Note the -// following: +// Disassociates an Amazon Virtual Private Cloud (Amazon VPC) from an Amazon +// Route 53 private hosted zone. Note the following: // -// * You can't disassociate the last VPC from a private hosted zone. +// * You can't disassociate the last Amazon VPC from a private hosted zone. // // * You can't convert a private hosted zone into a public hosted zone. // // * You can submit a DisassociateVPCFromHostedZone request using either // the account that created the hosted zone or the account that created the -// VPC. +// Amazon VPC. +// +// * Some services, such as AWS Cloud Map and Amazon Elastic File System +// (Amazon EFS) automatically create hosted zones and associate VPCs with +// the hosted zones. A service can create a hosted zone using your account +// or using its own account. You can disassociate a VPC from a hosted zone +// only if the service created the hosted zone using your account. When you +// run DisassociateVPCFromHostedZone (https://docs.aws.amazon.com/Route53/latest/APIReference/API_ListHostedZonesByVPC.html), +// if the hosted zone has a value for OwningAccount, you can use DisassociateVPCFromHostedZone. +// If the hosted zone has a value for OwningService, you can't use DisassociateVPCFromHostedZone. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2554,7 +2595,7 @@ func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req // // GetCheckerIpRanges still works, but we recommend that you download ip-ranges.json, // which includes IP address ranges for all AWS services. For more information, -// see IP Address Ranges of Amazon Route 53 Servers (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/route-53-ip-addresses.html) +// see IP Address Ranges of Amazon Route 53 Servers (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/route-53-ip-addresses.html) // in the Amazon Route 53 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2657,7 +2698,9 @@ func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *reques // // Returned Error Codes: // * ErrCodeNoSuchGeoLocation "NoSuchGeoLocation" -// Amazon Route 53 doesn't support the specified geographic location. +// Amazon Route 53 doesn't support the specified geographic location. For a +// list of supported geolocation codes, see the GeoLocation (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GeoLocation.html) +// data type. // // * ErrCodeInvalidInput "InvalidInput" // The input is not valid. @@ -3566,6 +3609,9 @@ func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *re // // Gets information about a specific traffic policy version. // +// For information about how of deleting a traffic policy affects the response +// from GetTrafficPolicy, see DeleteTrafficPolicy (https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteTrafficPolicy.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3817,6 +3863,9 @@ func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *re // the subdivisions for that country are listed in alphabetical order immediately // after the corresponding country. // +// For a list of supported geolocation codes, see the GeoLocation (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GeoLocation.html) +// data type. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4206,7 +4255,7 @@ func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput // // The labels are reversed and alphabetized using the escaped value. For more // information about valid domain name formats, including internationalized -// domain names, see DNS Domain Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) +// domain names, see DNS Domain Name Format (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) // in the Amazon Route 53 Developer Guide. // // Route 53 returns up to 100 items in each response. If you have a lot of hosted @@ -4271,6 +4320,99 @@ func (c *Route53) ListHostedZonesByNameWithContext(ctx aws.Context, input *ListH return out, req.Send() } +const opListHostedZonesByVPC = "ListHostedZonesByVPC" + +// ListHostedZonesByVPCRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZonesByVPC operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListHostedZonesByVPC for more information on using the ListHostedZonesByVPC +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListHostedZonesByVPCRequest method. +// req, resp := client.ListHostedZonesByVPCRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01/ListHostedZonesByVPC +func (c *Route53) ListHostedZonesByVPCRequest(input *ListHostedZonesByVPCInput) (req *request.Request, output *ListHostedZonesByVPCOutput) { + op := &request.Operation{ + Name: opListHostedZonesByVPC, + HTTPMethod: "GET", + HTTPPath: "/2013-04-01/hostedzonesbyvpc", + } + + if input == nil { + input = &ListHostedZonesByVPCInput{} + } + + output = &ListHostedZonesByVPCOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListHostedZonesByVPC API operation for Amazon Route 53. +// +// Lists all the private hosted zones that a specified VPC is associated with, +// regardless of which AWS account or AWS service owns the hosted zones. The +// HostedZoneOwner structure in the response contains one of the following values: +// +// * An OwningAccount element, which contains the account number of either +// the current AWS account or another AWS account. Some services, such as +// AWS Cloud Map, create hosted zones using the current account. +// +// * An OwningService element, which identifies the AWS service that created +// and owns the hosted zone. For example, if a hosted zone was created by +// Amazon Elastic File System (Amazon EFS), the value of Owner is efs.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53's +// API operation ListHostedZonesByVPC for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInput "InvalidInput" +// The input is not valid. +// +// * ErrCodeInvalidPaginationToken "InvalidPaginationToken" +// The value that you specified to get the second or subsequent page of results +// is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53-2013-04-01/ListHostedZonesByVPC +func (c *Route53) ListHostedZonesByVPC(input *ListHostedZonesByVPCInput) (*ListHostedZonesByVPCOutput, error) { + req, out := c.ListHostedZonesByVPCRequest(input) + return out, req.Send() +} + +// ListHostedZonesByVPCWithContext is the same as ListHostedZonesByVPC with the addition of +// the ability to pass a context and additional request options. +// +// See ListHostedZonesByVPC for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53) ListHostedZonesByVPCWithContext(ctx aws.Context, input *ListHostedZonesByVPCInput, opts ...request.Option) (*ListHostedZonesByVPCOutput, error) { + req, out := c.ListHostedZonesByVPCRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListQueryLoggingConfigs = "ListQueryLoggingConfigs" // ListQueryLoggingConfigsRequest generates a "aws/request.Request" representing the @@ -4302,6 +4444,12 @@ func (c *Route53) ListQueryLoggingConfigsRequest(input *ListQueryLoggingConfigsI Name: opListQueryLoggingConfigs, HTTPMethod: "GET", HTTPPath: "/2013-04-01/queryloggingconfig", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4364,6 +4512,58 @@ func (c *Route53) ListQueryLoggingConfigsWithContext(ctx aws.Context, input *Lis return out, req.Send() } +// ListQueryLoggingConfigsPages iterates over the pages of a ListQueryLoggingConfigs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListQueryLoggingConfigs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListQueryLoggingConfigs operation. +// pageNum := 0 +// err := client.ListQueryLoggingConfigsPages(params, +// func(page *route53.ListQueryLoggingConfigsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53) ListQueryLoggingConfigsPages(input *ListQueryLoggingConfigsInput, fn func(*ListQueryLoggingConfigsOutput, bool) bool) error { + return c.ListQueryLoggingConfigsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListQueryLoggingConfigsPagesWithContext same as ListQueryLoggingConfigsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53) ListQueryLoggingConfigsPagesWithContext(ctx aws.Context, input *ListQueryLoggingConfigsInput, fn func(*ListQueryLoggingConfigsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListQueryLoggingConfigsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListQueryLoggingConfigsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListQueryLoggingConfigsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListResourceRecordSets = "ListResourceRecordSets" // ListResourceRecordSetsRequest generates a "aws/request.Request" representing the @@ -4894,6 +5094,9 @@ func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (r // associated with the current AWS account. Policies are listed in the order // that they were created in. // +// For information about how of deleting a traffic policy affects the response +// from ListTrafficPolicies, see DeleteTrafficPolicy (https://docs.aws.amazon.com/Route53/latest/APIReference/API_DeleteTrafficPolicy.html). +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -5515,7 +5718,7 @@ func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req * // Updates an existing health check. Note that some values can't be updated. // // For more information about updating health checks, see Creating, Updating, -// and Deleting Health Checks (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-deleting.html) +// and Deleting Health Checks (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/health-checks-creating-deleting.html) // in the Amazon Route 53 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5908,7 +6111,7 @@ type AlarmIdentifier struct { // Route 53 supports CloudWatch alarms with the following features: // // * Standard-resolution metrics. High-resolution metrics aren't supported. - // For more information, see High-Resolution Metrics (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#high-resolution-metrics) + // For more information, see High-Resolution Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/publishingMetrics.html#high-resolution-metrics) // in the Amazon CloudWatch User Guide. // // * Statistics: Average, Minimum, Maximum, Sum, and SampleCount. Extended @@ -5921,9 +6124,8 @@ type AlarmIdentifier struct { // determine whether this health check is healthy, the region that the alarm // was created in. // - // For the current list of CloudWatch regions, see Amazon CloudWatch (http://docs.aws.amazon.com/general/latest/gr/rande.html#cw_region) - // in the AWS Regions and Endpoints chapter of the Amazon Web Services General - // Reference. + // For the current list of CloudWatch regions, see Amazon CloudWatch (https://docs.aws.amazon.com/general/latest/gr/rande.html#cw_region) + // in the AWS Service Endpoints chapter of the Amazon Web Services General Reference. // // Region is a required field Region *string `min:"1" type:"string" required:"true" enum:"CloudWatchRegion"` @@ -5983,7 +6185,7 @@ func (s *AlarmIdentifier) SetRegion(v string) *AlarmIdentifier { // record sets in a private hosted zone is unsupported. // // * For information about creating failover resource record sets in a private -// hosted zone, see Configuring Failover in a Private Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html). +// hosted zone, see Configuring Failover in a Private Hosted Zone (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html). type AliasTarget struct { _ struct{} `type:"structure"` @@ -6018,7 +6220,7 @@ type AliasTarget struct { // the name of the resource record set. For example, if the name of the resource // record set is acme.example.com, your CloudFront distribution must include // acme.example.com as one of the alternate domain names. For more information, - // see Using Alternate Domain Names (CNAMEs) (http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) + // see Using Alternate Domain Names (CNAMEs) (https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/CNAMEs.html) // in the Amazon CloudFront Developer Guide. // // You can't create a resource record set in a private hosted zone to route @@ -6051,17 +6253,17 @@ type AliasTarget struct { // // * AWS Management Console: For information about how to get the value by // using the console, see Using Custom Domains with AWS Elastic Beanstalk - // (http://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html) + // (https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/customdomains.html) // in the AWS Elastic Beanstalk Developer Guide. // // * Elastic Beanstalk API: Use the DescribeEnvironments action to get the // value of the CNAME attribute. For more information, see DescribeEnvironments - // (http://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) + // (https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_DescribeEnvironments.html) // in the AWS Elastic Beanstalk API Reference. // // * AWS CLI: Use the describe-environments command to get the value of the - // CNAME attribute. For more information, see describe-environments (http://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html) - // in the AWS Command Line Interface Reference. + // CNAME attribute. For more information, see describe-environments (https://docs.aws.amazon.com/cli/latest/reference/elasticbeanstalk/describe-environments.html) + // in the AWS CLI Command Reference. // // ELB load balancer // @@ -6077,22 +6279,31 @@ type AliasTarget struct { // // * Elastic Load Balancing API: Use DescribeLoadBalancers to get the value // of DNSName. For more information, see the applicable guide: Classic Load - // Balancers: DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) - // Application and Network Load Balancers: DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // Balancers: DescribeLoadBalancers (https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) + // Application and Network Load Balancers: DescribeLoadBalancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) // // * AWS CLI: Use describe-load-balancers to get the value of DNSName. For // more information, see the applicable guide: Classic Load Balancers: describe-load-balancers // (http://docs.aws.amazon.com/cli/latest/reference/elb/describe-load-balancers.html) // Application and Network Load Balancers: describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) // + // AWS Global Accelerator accelerator + // + // Specify the DNS name for your accelerator: + // + // * Global Accelerator API: To get the DNS name, use DescribeAccelerator + // (https://docs.aws.amazon.com/global-accelerator/latest/api/API_DescribeAccelerator.html). + // + // * AWS CLI: To get the DNS name, use describe-accelerator (https://docs.aws.amazon.com/cli/latest/reference/globalaccelerator/describe-accelerator.html). + // // Amazon S3 bucket that is configured as a static website // // Specify the domain name of the Amazon S3 website endpoint that you created // the bucket in, for example, s3-website.us-east-2.amazonaws.com. For more - // information about valid values, see the table Amazon Simple Storage Service - // (S3) Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // information about valid values, see the table Amazon S3 Website Endpoints + // (https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_website_region_endpoints) // in the Amazon Web Services General Reference. For more information about - // using S3 buckets for websites, see Getting Started with Amazon Route 53 (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html) + // using S3 buckets for websites, see Getting Started with Amazon Route 53 (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/getting-started.html) // in the Amazon Route 53 Developer Guide. // // Another Route 53 resource record set @@ -6173,11 +6384,11 @@ type AliasTarget struct { // records (for example, a group of weighted records) but is not another alias // record, we recommend that you associate a health check with all of the records // in the alias target. For more information, see What Happens When You Omit - // Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) + // Health Checks? (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) // in the Amazon Route 53 Developer Guide. // // For more information and examples, see Amazon Route 53 Health Checks and - // DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // DNS Failover (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) // in the Amazon Route 53 Developer Guide. // // EvaluateTargetHealth is a required field @@ -6210,8 +6421,8 @@ type AliasTarget struct { // // Specify the hosted zone ID for the region that you created the environment // in. The environment must have a regionalized subdomain. For a list of regions - // and the corresponding hosted zone IDs, see AWS Elastic Beanstalk (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region) - // in the "AWS Regions and Endpoints" chapter of the Amazon Web Services General + // and the corresponding hosted zone IDs, see AWS Elastic Beanstalk (https://docs.aws.amazon.com/general/latest/gr/rande.html#elasticbeanstalk_region) + // in the "AWS Service Endpoints" chapter of the Amazon Web Services General // Reference. // // ELB load balancer @@ -6219,11 +6430,12 @@ type AliasTarget struct { // Specify the value of the hosted zone ID for the load balancer. Use the following // methods to get the hosted zone ID: // - // * Elastic Load Balancing (https://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) - // table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services - // General Reference: Use the value that corresponds with the region that - // you created your load balancer in. Note that there are separate columns - // for Application and Classic Load Balancers and for Network Load Balancers. + // * Service Endpoints (https://docs.aws.amazon.com/general/latest/gr/elb.html) + // table in the "Elastic Load Balancing Endpoints and Quotas" topic in the + // Amazon Web Services General Reference: Use the value that corresponds + // with the region that you created your load balancer in. Note that there + // are separate columns for Application and Classic Load Balancers and for + // Network Load Balancers. // // * AWS Management Console: Go to the Amazon EC2 page, choose Load Balancers // in the navigation pane, select the load balancer, and get the value of @@ -6231,9 +6443,9 @@ type AliasTarget struct { // // * Elastic Load Balancing API: Use DescribeLoadBalancers to get the applicable // value. For more information, see the applicable guide: Classic Load Balancers: - // Use DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) + // Use DescribeLoadBalancers (https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DescribeLoadBalancers.html) // to get the value of CanonicalHostedZoneNameId. Application and Network - // Load Balancers: Use DescribeLoadBalancers (http://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) + // Load Balancers: Use DescribeLoadBalancers (https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_DescribeLoadBalancers.html) // to get the value of CanonicalHostedZoneId. // // * AWS CLI: Use describe-load-balancers to get the applicable value. For @@ -6243,13 +6455,16 @@ type AliasTarget struct { // Load Balancers: Use describe-load-balancers (http://docs.aws.amazon.com/cli/latest/reference/elbv2/describe-load-balancers.html) // to get the value of CanonicalHostedZoneId. // + // AWS Global Accelerator accelerator + // + // Specify Z2BJ6XQ5FK7U4H. + // // An Amazon S3 bucket configured as a static website // // Specify the hosted zone ID for the region that you created the bucket in. - // For more information about valid values, see the Amazon Simple Storage Service - // Website Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) - // table in the "AWS Regions and Endpoints" chapter of the Amazon Web Services - // General Reference. + // For more information about valid values, see the table Amazon S3 Website + // Endpoints (https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_website_region_endpoints) + // in the Amazon Web Services General Reference. // // Another Route 53 resource record set in your hosted zone // @@ -6822,7 +7037,7 @@ type CloudWatchAlarmConfiguration struct { // For the metric that the CloudWatch alarm is associated with, a complex type // that contains information about the dimensions for the metric. For information, - // see Amazon CloudWatch Namespaces, Dimensions, and Metrics Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) + // see Amazon CloudWatch Namespaces, Dimensions, and Metrics Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) // in the Amazon CloudWatch User Guide. Dimensions []*Dimension `locationNameList:"Dimension" type:"list"` @@ -6838,7 +7053,7 @@ type CloudWatchAlarmConfiguration struct { MetricName *string `min:"1" type:"string" required:"true"` // The namespace of the metric that the alarm is associated with. For more information, - // see Amazon CloudWatch Namespaces, Dimensions, and Metrics Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) + // see Amazon CloudWatch Namespaces, Dimensions, and Metrics Reference (https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) // in the Amazon CloudWatch User Guide. // // Namespace is a required field @@ -8521,17 +8736,39 @@ type GeoLocation struct { // The two-letter code for the continent. // - // Valid values: AF | AN | AS | EU | OC | NA | SA + // Amazon Route 53 supports the following continent codes: + // + // * AF: Africa + // + // * AN: Antarctica + // + // * AS: Asia + // + // * EU: Europe + // + // * OC: Oceania + // + // * NA: North America + // + // * SA: South America // // Constraint: Specifying ContinentCode with either CountryCode or SubdivisionCode // returns an InvalidInput error. ContinentCode *string `min:"2" type:"string"` - // The two-letter code for the country. + // For geolocation resource record sets, the two-letter code for a country. + // + // Amazon Route 53 uses the two-letter country codes that are specified in ISO + // standard 3166-1 alpha-2 (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). CountryCode *string `min:"1" type:"string"` - // The code for the subdivision. Route 53 currently supports only states in - // the United States. + // For geolocation resource record sets, the two-letter code for a state of + // the United States. Route 53 doesn't support any other values for SubdivisionCode. + // For a list of state abbreviations, see Appendix B: Two–Letter State and + // Possession Abbreviations (https://pe.usps.com/text/pub28/28apb.htm) on the + // United States Postal Service website. + // + // If you specify subdivisioncode, you must also specify US for CountryCode. SubdivisionCode *string `min:"1" type:"string"` } @@ -8874,7 +9111,8 @@ func (s *GetCheckerIpRangesOutput) SetCheckerIpRanges(v []*string) *GetCheckerIp type GetGeoLocationInput struct { _ struct{} `locationName:"GetGeoLocationRequest" type:"structure"` - // Amazon Route 53 supports the following continent codes: + // For geolocation resource record sets, a two-letter abbreviation that identifies + // a continent. Amazon Route 53 supports the following continent codes: // // * AF: Africa // @@ -8895,10 +9133,12 @@ type GetGeoLocationInput struct { // standard 3166-1 alpha-2 (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). CountryCode *string `location:"querystring" locationName:"countrycode" min:"1" type:"string"` - // Amazon Route 53 uses the one- to three-letter subdivision codes that are - // specified in ISO standard 3166-1 alpha-2 (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). - // Route 53 doesn't support subdivision codes for all countries. If you specify - // subdivisioncode, you must also specify countrycode. + // For SubdivisionCode, Amazon Route 53 supports only states of the United States. + // For a list of state abbreviations, see Appendix B: Two–Letter State and + // Possession Abbreviations (https://pe.usps.com/text/pub28/28apb.htm) on the + // United States Postal Service website. + // + // If you specify subdivisioncode, you must also specify US for CountryCode. SubdivisionCode *string `location:"querystring" locationName:"subdivisioncode" min:"1" type:"string"` } @@ -10068,7 +10308,7 @@ type HealthCheckConfig struct { // The number of consecutive health checks that an endpoint must pass or fail // for Amazon Route 53 to change the current status of the endpoint from unhealthy // to healthy or vice versa. For more information, see How Amazon Route 53 Determines - // Whether an Endpoint Is Healthy (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) + // Whether an Endpoint Is Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // in the Amazon Route 53 Developer Guide. // // If you don't specify a value for FailureThreshold, the default value is three @@ -10212,8 +10452,11 @@ type HealthCheckConfig struct { // You can't change the value of MeasureLatency after you create a health check. MeasureLatency *bool `type:"boolean"` - // The port on the endpoint on which you want Amazon Route 53 to perform health - // checks. Specify a value for Port only when you specify a value for IPAddress. + // The port on the endpoint that you want Amazon Route 53 to perform health + // checks on. + // + // Don't specify a value for Port when you specify a value for Type of CLOUDWATCH_METRIC + // or CALCULATED. Port *int64 `min:"1" type:"integer"` // A complex type that contains one Region element for each region from which @@ -10245,7 +10488,7 @@ type HealthCheckConfig struct { // parameters, for example, /welcome.html?language=jp&login=y. ResourcePath *string `type:"string"` - // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // If the value of Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string that // you want Amazon Route 53 to search for in the response body from the specified // resource. If the string appears in the response body, Route 53 considers // the resource healthy. @@ -10652,6 +10895,103 @@ func (s *HostedZoneLimit) SetValue(v int64) *HostedZoneLimit { return s } +// A complex type that identifies a hosted zone that a specified Amazon VPC +// is associated with and the owner of the hosted zone. If there is a value +// for OwningAccount, there is no value for OwningService, and vice versa. +type HostedZoneOwner struct { + _ struct{} `type:"structure"` + + // If the hosted zone was created by an AWS account, or was created by an AWS + // service that creates hosted zones using the current account, OwningAccount + // contains the account ID of that account. For example, when you use AWS Cloud + // Map to create a hosted zone, Cloud Map creates the hosted zone using the + // current AWS account. + OwningAccount *string `type:"string"` + + // If an AWS service uses its own account to create a hosted zone and associate + // the specified VPC with that hosted zone, OwningService contains an abbreviation + // that identifies the service. For example, if Amazon Elastic File System (Amazon + // EFS) created a hosted zone and associated a VPC with the hosted zone, the + // value of OwningService is efs.amazonaws.com. + OwningService *string `type:"string"` +} + +// String returns the string representation +func (s HostedZoneOwner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneOwner) GoString() string { + return s.String() +} + +// SetOwningAccount sets the OwningAccount field's value. +func (s *HostedZoneOwner) SetOwningAccount(v string) *HostedZoneOwner { + s.OwningAccount = &v + return s +} + +// SetOwningService sets the OwningService field's value. +func (s *HostedZoneOwner) SetOwningService(v string) *HostedZoneOwner { + s.OwningService = &v + return s +} + +// In the response to a ListHostedZonesByVPC request, the HostedZoneSummaries +// element contains one HostedZoneSummary element for each hosted zone that +// the specified Amazon VPC is associated with. Each HostedZoneSummary element +// contains the hosted zone name and ID, and information about who owns the +// hosted zone. +type HostedZoneSummary struct { + _ struct{} `type:"structure"` + + // The Route 53 hosted zone ID of a private hosted zone that the specified VPC + // is associated with. + // + // HostedZoneId is a required field + HostedZoneId *string `type:"string" required:"true"` + + // The name of the private hosted zone, such as example.com. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The owner of a private hosted zone that the specified VPC is associated with. + // The owner can be either an AWS account or an AWS service. + // + // Owner is a required field + Owner *HostedZoneOwner `type:"structure" required:"true"` +} + +// String returns the string representation +func (s HostedZoneSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HostedZoneSummary) GoString() string { + return s.String() +} + +// SetHostedZoneId sets the HostedZoneId field's value. +func (s *HostedZoneSummary) SetHostedZoneId(v string) *HostedZoneSummary { + s.HostedZoneId = &v + return s +} + +// SetName sets the Name field's value. +func (s *HostedZoneSummary) SetName(v string) *HostedZoneSummary { + s.Name = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *HostedZoneSummary) SetOwner(v *HostedZoneOwner) *HostedZoneSummary { + s.Owner = v + return s +} + // If a health check or hosted zone was created by another service, LinkedService // is a complex type that describes the service that created the resource. When // a resource is created by another service, you can't edit or delete it using @@ -10719,18 +11059,15 @@ type ListGeoLocationsInput struct { // a page or more of results, if IsTruncated is true, and if NextCountryCode // from the previous response has a value, enter that value in startcountrycode // to return the next page of results. - // - // Route 53 uses the two-letter country codes that are specified in ISO standard - // 3166-1 alpha-2 (https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2). StartCountryCode *string `location:"querystring" locationName:"startcountrycode" min:"1" type:"string"` - // The code for the subdivision (for example, state or province) with which - // you want to start listing locations that Amazon Route 53 supports for geolocation. - // If Route 53 has already returned a page or more of results, if IsTruncated - // is true, and if NextSubdivisionCode from the previous response has a value, - // enter that value in startsubdivisioncode to return the next page of results. + // The code for the state of the United States with which you want to start + // listing locations that Amazon Route 53 supports for geolocation. If Route + // 53 has already returned a page or more of results, if IsTruncated is true, + // and if NextSubdivisionCode from the previous response has a value, enter + // that value in startsubdivisioncode to return the next page of results. // - // To list subdivisions of a country, you must include both startcountrycode + // To list subdivisions (U.S. states), you must include both startcountrycode // and startsubdivisioncode. StartSubdivisionCode *string `location:"querystring" locationName:"startsubdivisioncode" min:"1" type:"string"` } @@ -11157,6 +11494,144 @@ func (s *ListHostedZonesByNameOutput) SetNextHostedZoneId(v string) *ListHostedZ return s } +// Lists all the private hosted zones that a specified VPC is associated with, +// regardless of which AWS account created the hosted zones. +type ListHostedZonesByVPCInput struct { + _ struct{} `locationName:"ListHostedZonesByVPCRequest" type:"structure"` + + // (Optional) The maximum number of hosted zones that you want Amazon Route + // 53 to return. If the specified VPC is associated with more than MaxItems + // hosted zones, the response includes a NextToken element. NextToken contains + // an encrypted token that identifies the first hosted zone that Route 53 will + // return if you submit another request. + MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` + + // If the previous response included a NextToken element, the specified VPC + // is associated with more hosted zones. To get more hosted zones, submit another + // ListHostedZonesByVPC request. + // + // For the value of NextToken, specify the value of NextToken from the previous + // response. + // + // If the previous response didn't include a NextToken element, there are no + // more hosted zones to get. + NextToken *string `location:"querystring" locationName:"nexttoken" type:"string"` + + // The ID of the Amazon VPC that you want to list hosted zones for. + // + // VPCId is a required field + VPCId *string `location:"querystring" locationName:"vpcid" type:"string" required:"true"` + + // For the Amazon VPC that you specified for VPCId, the AWS Region that you + // created the VPC in. + // + // VPCRegion is a required field + VPCRegion *string `location:"querystring" locationName:"vpcregion" min:"1" type:"string" required:"true" enum:"VPCRegion"` +} + +// String returns the string representation +func (s ListHostedZonesByVPCInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByVPCInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListHostedZonesByVPCInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListHostedZonesByVPCInput"} + if s.VPCId == nil { + invalidParams.Add(request.NewErrParamRequired("VPCId")) + } + if s.VPCRegion == nil { + invalidParams.Add(request.NewErrParamRequired("VPCRegion")) + } + if s.VPCRegion != nil && len(*s.VPCRegion) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VPCRegion", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListHostedZonesByVPCInput) SetMaxItems(v string) *ListHostedZonesByVPCInput { + s.MaxItems = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListHostedZonesByVPCInput) SetNextToken(v string) *ListHostedZonesByVPCInput { + s.NextToken = &v + return s +} + +// SetVPCId sets the VPCId field's value. +func (s *ListHostedZonesByVPCInput) SetVPCId(v string) *ListHostedZonesByVPCInput { + s.VPCId = &v + return s +} + +// SetVPCRegion sets the VPCRegion field's value. +func (s *ListHostedZonesByVPCInput) SetVPCRegion(v string) *ListHostedZonesByVPCInput { + s.VPCRegion = &v + return s +} + +type ListHostedZonesByVPCOutput struct { + _ struct{} `type:"structure"` + + // A list that contains one HostedZoneSummary element for each hosted zone that + // the specified Amazon VPC is associated with. Each HostedZoneSummary element + // contains the hosted zone name and ID, and information about who owns the + // hosted zone. + // + // HostedZoneSummaries is a required field + HostedZoneSummaries []*HostedZoneSummary `locationNameList:"HostedZoneSummary" type:"list" required:"true"` + + // The value that you specified for MaxItems in the most recent ListHostedZonesByVPC + // request. + // + // MaxItems is a required field + MaxItems *string `type:"string" required:"true"` + + // The value that you specified for NextToken in the most recent ListHostedZonesByVPC + // request. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListHostedZonesByVPCOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListHostedZonesByVPCOutput) GoString() string { + return s.String() +} + +// SetHostedZoneSummaries sets the HostedZoneSummaries field's value. +func (s *ListHostedZonesByVPCOutput) SetHostedZoneSummaries(v []*HostedZoneSummary) *ListHostedZonesByVPCOutput { + s.HostedZoneSummaries = v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListHostedZonesByVPCOutput) SetMaxItems(v string) *ListHostedZonesByVPCOutput { + s.MaxItems = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListHostedZonesByVPCOutput) SetNextToken(v string) *ListHostedZonesByVPCOutput { + s.NextToken = &v + return s +} + // A request to retrieve a list of the public and private hosted zones that // are associated with the current AWS account. type ListHostedZonesInput struct { @@ -11417,7 +11892,9 @@ type ListResourceRecordSetsInput struct { StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` // The first name in the lexicographic ordering of resource record sets that - // you want to list. + // you want to list. If the specified record name doesn't exist, the results + // begin with the first resource record set that has a name greater than the + // value of name. StartRecordName *string `location:"querystring" locationName:"name" type:"string"` // The type of resource record set to begin the record listing from. @@ -11438,9 +11915,9 @@ type ListResourceRecordSetsInput struct { // // * Elastic Load Balancing load balancer: A | AAAA // - // * Amazon S3 bucket: A + // * S3 bucket: A // - // * Amazon VPC interface VPC endpoint: A + // * VPC interface VPC endpoint: A // // * Another resource record set in this hosted zone: The type of the resource // record set that the alias references. @@ -12950,7 +13427,7 @@ type ResourceRecordSet struct { // record sets in a private hosted zone is unsupported. // // * For information about creating failover resource record sets in a private - // hosted zone, see Configuring Failover in a Private Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) + // hosted zone, see Configuring Failover in a Private Hosted Zone (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) // in the Amazon Route 53 Developer Guide. AliasTarget *AliasTarget `type:"structure"` @@ -12990,9 +13467,9 @@ type ResourceRecordSet struct { // For more information about configuring failover for Route 53, see the following // topics in the Amazon Route 53 Developer Guide: // - // * Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // * Route 53 Health Checks and DNS Failover (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) // - // * Configuring Failover in a Private Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) + // * Configuring Failover in a Private Hosted Zone (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) Failover *string `type:"string" enum:"ResourceRecordSetFailover"` // Geolocation resource record sets only: A complex type that lets you control @@ -13001,8 +13478,8 @@ type ResourceRecordSet struct { // to a web server with an IP address of 192.0.2.111, create a resource record // set with a Type of A and a ContinentCode of AF. // - // Creating geolocation and geolocation alias resource record sets in private - // hosted zones is not supported. + // Although creating geolocation and geolocation alias resource record sets + // in a private hosted zone is allowed, it's not supported. // // If you create separate resource record sets for overlapping geographic regions // (for example, one resource record set for a continent and one for a country @@ -13021,11 +13498,12 @@ type ResourceRecordSet struct { // addresses aren't mapped to geographic locations, so even if you create geolocation // resource record sets that cover all seven continents, Route 53 will receive // some DNS queries from locations that it can't identify. We recommend that - // you create a resource record set for which the value of CountryCode is *, - // which handles both queries that come from locations for which you haven't - // created geolocation resource record sets and queries from IP addresses that - // aren't mapped to a location. If you don't create a * resource record set, - // Route 53 returns a "no answer" response for queries from those locations. + // you create a resource record set for which the value of CountryCode is *. + // Two groups of queries are routed to the resource that you specify in this + // record: queries that come from locations for which you haven't created geolocation + // resource record sets and queries from IP addresses that aren't mapped to + // a location. If you don't create a * resource record set, Route 53 returns + // a "no answer" response for queries from those locations. // // You can't create non-geolocation resource record sets that have the same // values for the Name and Type elements as geolocation resource record sets. @@ -13058,9 +13536,9 @@ type ResourceRecordSet struct { // // * How Amazon Route 53 Determines Whether an Endpoint Is Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // - // * Route 53 Health Checks and DNS Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) + // * Route 53 Health Checks and DNS Failover (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover.html) // - // * Configuring Failover in a Private Hosted Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) + // * Configuring Failover in a Private Hosted Zone (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-private-hosted-zones.html) // // When to Specify HealthCheckId // @@ -13173,7 +13651,7 @@ type ResourceRecordSet struct { // // For information about how to specify characters other than a-z, 0-9, and // - (hyphen) and how to specify internationalized domain names, see DNS Domain - // Name Format (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) + // Name Format (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DomainNameFormat.html) // in the Amazon Route 53 Developer Guide. // // You can use the asterisk (*) wildcard to replace the leftmost label in a @@ -13203,8 +13681,8 @@ type ResourceRecordSet struct { // and is referred to by an IP address or a DNS domain name, depending on the // record type. // - // Creating latency and latency alias resource record sets in private hosted - // zones is not supported. + // Although creating latency and latency alias resource record sets in a private + // hosted zone is allowed, it's not supported. // // When Amazon Route 53 receives a DNS query for a domain name and type for // which you have created latency resource record sets, Route 53 selects the @@ -13278,7 +13756,7 @@ type ResourceRecordSet struct { TrafficPolicyInstanceId *string `min:"1" type:"string"` // The DNS record type. For information about different record types and how - // data is encoded for them, see Supported DNS Resource Record Types (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) + // data is encoded for them, see Supported DNS Resource Record Types (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ResourceRecordTypes.html) // in the Amazon Route 53 Developer Guide. // // Valid values for basic resource record sets: A | AAAA | CAA | CNAME | MX @@ -13309,8 +13787,7 @@ type ResourceRecordSet struct { // create two resource record sets to route traffic to your distribution, // one with a value of A and one with a value of AAAA. // - // * AWS Elastic Beanstalk environment that has a regionalized subdomain: - // A + // * Amazon API Gateway environment that has a regionalized subdomain: A // // * ELB load balancers: A | AAAA // @@ -13358,7 +13835,7 @@ type ResourceRecordSet struct { // of DNS name and type, traffic is routed to all resources with equal probability. // The effect of setting Weight to 0 is different when you associate health // checks with weighted resource record sets. For more information, see Options - // for Configuring Route 53 Active-Active and Active-Passive Failover (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) + // for Configuring Route 53 Active-Active and Active-Passive Failover (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html) // in the Amazon Route 53 Developer Guide. Weight *int64 `type:"long"` } @@ -14231,7 +14708,7 @@ type UpdateHealthCheckInput struct { // The number of consecutive health checks that an endpoint must pass or fail // for Amazon Route 53 to change the current status of the endpoint from unhealthy // to healthy or vice versa. For more information, see How Amazon Route 53 Determines - // Whether an Endpoint Is Healthy (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) + // Whether an Endpoint Is Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // in the Amazon Route 53 Developer Guide. // // If you don't specify a value for FailureThreshold, the default value is three @@ -14402,8 +14879,11 @@ type UpdateHealthCheckInput struct { // would be considered healthy. Inverted *bool `type:"boolean"` - // The port on the endpoint on which you want Amazon Route 53 to perform health - // checks. + // The port on the endpoint that you want Amazon Route 53 to perform health + // checks on. + // + // Don't specify a value for Port when you specify a value for Type of CLOUDWATCH_METRIC + // or CALCULATED. Port *int64 `min:"1" type:"integer"` // A complex type that contains one Region element for each region that you @@ -14436,7 +14916,7 @@ type UpdateHealthCheckInput struct { // Specify this value only if you want to change it. ResourcePath *string `type:"string"` - // If the value of Type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string that + // If the value of Type is HTTP_STR_MATCH or HTTPS_STR_MATCH, the string that // you want Amazon Route 53 to search for in the response body from the specified // resource. If the string appears in the response body, Route 53 considers // the resource healthy. (You can't change the value of Type when you update @@ -14982,6 +15462,17 @@ const ( AccountLimitTypeMaxTrafficPoliciesByOwner = "MAX_TRAFFIC_POLICIES_BY_OWNER" ) +// AccountLimitType_Values returns all elements of the AccountLimitType enum +func AccountLimitType_Values() []string { + return []string{ + AccountLimitTypeMaxHealthChecksByOwner, + AccountLimitTypeMaxHostedZonesByOwner, + AccountLimitTypeMaxTrafficPolicyInstancesByOwner, + AccountLimitTypeMaxReusableDelegationSetsByOwner, + AccountLimitTypeMaxTrafficPoliciesByOwner, + } +} + const ( // ChangeActionCreate is a ChangeAction enum value ChangeActionCreate = "CREATE" @@ -14993,6 +15484,15 @@ const ( ChangeActionUpsert = "UPSERT" ) +// ChangeAction_Values returns all elements of the ChangeAction enum +func ChangeAction_Values() []string { + return []string{ + ChangeActionCreate, + ChangeActionDelete, + ChangeActionUpsert, + } +} + const ( // ChangeStatusPending is a ChangeStatus enum value ChangeStatusPending = "PENDING" @@ -15001,6 +15501,14 @@ const ( ChangeStatusInsync = "INSYNC" ) +// ChangeStatus_Values returns all elements of the ChangeStatus enum +func ChangeStatus_Values() []string { + return []string{ + ChangeStatusPending, + ChangeStatusInsync, + } +} + const ( // CloudWatchRegionUsEast1 is a CloudWatchRegion enum value CloudWatchRegionUsEast1 = "us-east-1" @@ -15064,8 +15572,59 @@ const ( // CloudWatchRegionCnNorth1 is a CloudWatchRegion enum value CloudWatchRegionCnNorth1 = "cn-north-1" + + // CloudWatchRegionAfSouth1 is a CloudWatchRegion enum value + CloudWatchRegionAfSouth1 = "af-south-1" + + // CloudWatchRegionEuSouth1 is a CloudWatchRegion enum value + CloudWatchRegionEuSouth1 = "eu-south-1" + + // CloudWatchRegionUsGovWest1 is a CloudWatchRegion enum value + CloudWatchRegionUsGovWest1 = "us-gov-west-1" + + // CloudWatchRegionUsGovEast1 is a CloudWatchRegion enum value + CloudWatchRegionUsGovEast1 = "us-gov-east-1" + + // CloudWatchRegionUsIsoEast1 is a CloudWatchRegion enum value + CloudWatchRegionUsIsoEast1 = "us-iso-east-1" + + // CloudWatchRegionUsIsobEast1 is a CloudWatchRegion enum value + CloudWatchRegionUsIsobEast1 = "us-isob-east-1" ) +// CloudWatchRegion_Values returns all elements of the CloudWatchRegion enum +func CloudWatchRegion_Values() []string { + return []string{ + CloudWatchRegionUsEast1, + CloudWatchRegionUsEast2, + CloudWatchRegionUsWest1, + CloudWatchRegionUsWest2, + CloudWatchRegionCaCentral1, + CloudWatchRegionEuCentral1, + CloudWatchRegionEuWest1, + CloudWatchRegionEuWest2, + CloudWatchRegionEuWest3, + CloudWatchRegionApEast1, + CloudWatchRegionMeSouth1, + CloudWatchRegionApSouth1, + CloudWatchRegionApSoutheast1, + CloudWatchRegionApSoutheast2, + CloudWatchRegionApNortheast1, + CloudWatchRegionApNortheast2, + CloudWatchRegionApNortheast3, + CloudWatchRegionEuNorth1, + CloudWatchRegionSaEast1, + CloudWatchRegionCnNorthwest1, + CloudWatchRegionCnNorth1, + CloudWatchRegionAfSouth1, + CloudWatchRegionEuSouth1, + CloudWatchRegionUsGovWest1, + CloudWatchRegionUsGovEast1, + CloudWatchRegionUsIsoEast1, + CloudWatchRegionUsIsobEast1, + } +} + const ( // ComparisonOperatorGreaterThanOrEqualToThreshold is a ComparisonOperator enum value ComparisonOperatorGreaterThanOrEqualToThreshold = "GreaterThanOrEqualToThreshold" @@ -15080,6 +15639,16 @@ const ( ComparisonOperatorLessThanOrEqualToThreshold = "LessThanOrEqualToThreshold" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorGreaterThanOrEqualToThreshold, + ComparisonOperatorGreaterThanThreshold, + ComparisonOperatorLessThanThreshold, + ComparisonOperatorLessThanOrEqualToThreshold, + } +} + const ( // HealthCheckRegionUsEast1 is a HealthCheckRegion enum value HealthCheckRegionUsEast1 = "us-east-1" @@ -15106,6 +15675,20 @@ const ( HealthCheckRegionSaEast1 = "sa-east-1" ) +// HealthCheckRegion_Values returns all elements of the HealthCheckRegion enum +func HealthCheckRegion_Values() []string { + return []string{ + HealthCheckRegionUsEast1, + HealthCheckRegionUsWest1, + HealthCheckRegionUsWest2, + HealthCheckRegionEuWest1, + HealthCheckRegionApSoutheast1, + HealthCheckRegionApSoutheast2, + HealthCheckRegionApNortheast1, + HealthCheckRegionSaEast1, + } +} + const ( // HealthCheckTypeHttp is a HealthCheckType enum value HealthCheckTypeHttp = "HTTP" @@ -15129,6 +15712,19 @@ const ( HealthCheckTypeCloudwatchMetric = "CLOUDWATCH_METRIC" ) +// HealthCheckType_Values returns all elements of the HealthCheckType enum +func HealthCheckType_Values() []string { + return []string{ + HealthCheckTypeHttp, + HealthCheckTypeHttps, + HealthCheckTypeHttpStrMatch, + HealthCheckTypeHttpsStrMatch, + HealthCheckTypeTcp, + HealthCheckTypeCalculated, + HealthCheckTypeCloudwatchMetric, + } +} + const ( // HostedZoneLimitTypeMaxRrsetsByZone is a HostedZoneLimitType enum value HostedZoneLimitTypeMaxRrsetsByZone = "MAX_RRSETS_BY_ZONE" @@ -15137,6 +15733,14 @@ const ( HostedZoneLimitTypeMaxVpcsAssociatedByZone = "MAX_VPCS_ASSOCIATED_BY_ZONE" ) +// HostedZoneLimitType_Values returns all elements of the HostedZoneLimitType enum +func HostedZoneLimitType_Values() []string { + return []string{ + HostedZoneLimitTypeMaxRrsetsByZone, + HostedZoneLimitTypeMaxVpcsAssociatedByZone, + } +} + const ( // InsufficientDataHealthStatusHealthy is a InsufficientDataHealthStatus enum value InsufficientDataHealthStatusHealthy = "Healthy" @@ -15148,6 +15752,15 @@ const ( InsufficientDataHealthStatusLastKnownStatus = "LastKnownStatus" ) +// InsufficientDataHealthStatus_Values returns all elements of the InsufficientDataHealthStatus enum +func InsufficientDataHealthStatus_Values() []string { + return []string{ + InsufficientDataHealthStatusHealthy, + InsufficientDataHealthStatusUnhealthy, + InsufficientDataHealthStatusLastKnownStatus, + } +} + const ( // RRTypeSoa is a RRType enum value RRTypeSoa = "SOA" @@ -15186,6 +15799,24 @@ const ( RRTypeCaa = "CAA" ) +// RRType_Values returns all elements of the RRType enum +func RRType_Values() []string { + return []string{ + RRTypeSoa, + RRTypeA, + RRTypeTxt, + RRTypeNs, + RRTypeCname, + RRTypeMx, + RRTypeNaptr, + RRTypePtr, + RRTypeSrv, + RRTypeSpf, + RRTypeAaaa, + RRTypeCaa, + } +} + const ( // ResettableElementNameFullyQualifiedDomainName is a ResettableElementName enum value ResettableElementNameFullyQualifiedDomainName = "FullyQualifiedDomainName" @@ -15200,6 +15831,16 @@ const ( ResettableElementNameChildHealthChecks = "ChildHealthChecks" ) +// ResettableElementName_Values returns all elements of the ResettableElementName enum +func ResettableElementName_Values() []string { + return []string{ + ResettableElementNameFullyQualifiedDomainName, + ResettableElementNameRegions, + ResettableElementNameResourcePath, + ResettableElementNameChildHealthChecks, + } +} + const ( // ResourceRecordSetFailoverPrimary is a ResourceRecordSetFailover enum value ResourceRecordSetFailoverPrimary = "PRIMARY" @@ -15208,6 +15849,14 @@ const ( ResourceRecordSetFailoverSecondary = "SECONDARY" ) +// ResourceRecordSetFailover_Values returns all elements of the ResourceRecordSetFailover enum +func ResourceRecordSetFailover_Values() []string { + return []string{ + ResourceRecordSetFailoverPrimary, + ResourceRecordSetFailoverSecondary, + } +} + const ( // ResourceRecordSetRegionUsEast1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionUsEast1 = "us-east-1" @@ -15271,13 +15920,55 @@ const ( // ResourceRecordSetRegionApSouth1 is a ResourceRecordSetRegion enum value ResourceRecordSetRegionApSouth1 = "ap-south-1" + + // ResourceRecordSetRegionAfSouth1 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionAfSouth1 = "af-south-1" + + // ResourceRecordSetRegionEuSouth1 is a ResourceRecordSetRegion enum value + ResourceRecordSetRegionEuSouth1 = "eu-south-1" ) +// ResourceRecordSetRegion_Values returns all elements of the ResourceRecordSetRegion enum +func ResourceRecordSetRegion_Values() []string { + return []string{ + ResourceRecordSetRegionUsEast1, + ResourceRecordSetRegionUsEast2, + ResourceRecordSetRegionUsWest1, + ResourceRecordSetRegionUsWest2, + ResourceRecordSetRegionCaCentral1, + ResourceRecordSetRegionEuWest1, + ResourceRecordSetRegionEuWest2, + ResourceRecordSetRegionEuWest3, + ResourceRecordSetRegionEuCentral1, + ResourceRecordSetRegionApSoutheast1, + ResourceRecordSetRegionApSoutheast2, + ResourceRecordSetRegionApNortheast1, + ResourceRecordSetRegionApNortheast2, + ResourceRecordSetRegionApNortheast3, + ResourceRecordSetRegionEuNorth1, + ResourceRecordSetRegionSaEast1, + ResourceRecordSetRegionCnNorth1, + ResourceRecordSetRegionCnNorthwest1, + ResourceRecordSetRegionApEast1, + ResourceRecordSetRegionMeSouth1, + ResourceRecordSetRegionApSouth1, + ResourceRecordSetRegionAfSouth1, + ResourceRecordSetRegionEuSouth1, + } +} + const ( // ReusableDelegationSetLimitTypeMaxZonesByReusableDelegationSet is a ReusableDelegationSetLimitType enum value ReusableDelegationSetLimitTypeMaxZonesByReusableDelegationSet = "MAX_ZONES_BY_REUSABLE_DELEGATION_SET" ) +// ReusableDelegationSetLimitType_Values returns all elements of the ReusableDelegationSetLimitType enum +func ReusableDelegationSetLimitType_Values() []string { + return []string{ + ReusableDelegationSetLimitTypeMaxZonesByReusableDelegationSet, + } +} + const ( // StatisticAverage is a Statistic enum value StatisticAverage = "Average" @@ -15295,6 +15986,17 @@ const ( StatisticMinimum = "Minimum" ) +// Statistic_Values returns all elements of the Statistic enum +func Statistic_Values() []string { + return []string{ + StatisticAverage, + StatisticSum, + StatisticSampleCount, + StatisticMaximum, + StatisticMinimum, + } +} + const ( // TagResourceTypeHealthcheck is a TagResourceType enum value TagResourceTypeHealthcheck = "healthcheck" @@ -15303,6 +16005,14 @@ const ( TagResourceTypeHostedzone = "hostedzone" ) +// TagResourceType_Values returns all elements of the TagResourceType enum +func TagResourceType_Values() []string { + return []string{ + TagResourceTypeHealthcheck, + TagResourceTypeHostedzone, + } +} + const ( // VPCRegionUsEast1 is a VPCRegion enum value VPCRegionUsEast1 = "us-east-1" @@ -15334,6 +16044,18 @@ const ( // VPCRegionMeSouth1 is a VPCRegion enum value VPCRegionMeSouth1 = "me-south-1" + // VPCRegionUsGovWest1 is a VPCRegion enum value + VPCRegionUsGovWest1 = "us-gov-west-1" + + // VPCRegionUsGovEast1 is a VPCRegion enum value + VPCRegionUsGovEast1 = "us-gov-east-1" + + // VPCRegionUsIsoEast1 is a VPCRegion enum value + VPCRegionUsIsoEast1 = "us-iso-east-1" + + // VPCRegionUsIsobEast1 is a VPCRegion enum value + VPCRegionUsIsobEast1 = "us-isob-east-1" + // VPCRegionApSoutheast1 is a VPCRegion enum value VPCRegionApSoutheast1 = "ap-southeast-1" @@ -15363,4 +16085,42 @@ const ( // VPCRegionCnNorth1 is a VPCRegion enum value VPCRegionCnNorth1 = "cn-north-1" + + // VPCRegionAfSouth1 is a VPCRegion enum value + VPCRegionAfSouth1 = "af-south-1" + + // VPCRegionEuSouth1 is a VPCRegion enum value + VPCRegionEuSouth1 = "eu-south-1" ) + +// VPCRegion_Values returns all elements of the VPCRegion enum +func VPCRegion_Values() []string { + return []string{ + VPCRegionUsEast1, + VPCRegionUsEast2, + VPCRegionUsWest1, + VPCRegionUsWest2, + VPCRegionEuWest1, + VPCRegionEuWest2, + VPCRegionEuWest3, + VPCRegionEuCentral1, + VPCRegionApEast1, + VPCRegionMeSouth1, + VPCRegionUsGovWest1, + VPCRegionUsGovEast1, + VPCRegionUsIsoEast1, + VPCRegionUsIsobEast1, + VPCRegionApSoutheast1, + VPCRegionApSoutheast2, + VPCRegionApSouth1, + VPCRegionApNortheast1, + VPCRegionApNortheast2, + VPCRegionApNortheast3, + VPCRegionEuNorth1, + VPCRegionSaEast1, + VPCRegionCaCentral1, + VPCRegionCnNorth1, + VPCRegionAfSouth1, + VPCRegionEuSouth1, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go b/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go index ce86bd613..23b4270b9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/errors.go @@ -14,22 +14,22 @@ const ( // ErrCodeConflictingDomainExists for service response error code // "ConflictingDomainExists". // - // The cause of this error depends on whether you're trying to create a public - // or a private hosted zone: - // - // * Public hosted zone: Two hosted zones that have the same name or that - // have a parent/child relationship (example.com and test.example.com) can't - // have any common name servers. You tried to create a hosted zone that has - // the same name as an existing hosted zone or that's the parent or child - // of an existing hosted zone, and you specified a delegation set that shares - // one or more name servers with the existing hosted zone. For more information, - // see CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). - // - // * Private hosted zone: You specified an Amazon VPC that you're already - // using for another hosted zone, and the domain that you specified for one - // of the hosted zones is a subdomain of the domain that you specified for - // the other hosted zone. For example, you can't use the same Amazon VPC - // for the hosted zones for example.com and test.example.com. + // The cause of this error depends on the operation that you're performing: + // + // * Create a public hosted zone: Two hosted zones that have the same name + // or that have a parent/child relationship (example.com and test.example.com) + // can't have any common name servers. You tried to create a hosted zone + // that has the same name as an existing hosted zone or that's the parent + // or child of an existing hosted zone, and you specified a delegation set + // that shares one or more name servers with the existing hosted zone. For + // more information, see CreateReusableDelegationSet (https://docs.aws.amazon.com/Route53/latest/APIReference/API_CreateReusableDelegationSet.html). + // + // * Create a private hosted zone: A hosted zone with the specified name + // already exists and is already associated with the Amazon VPC that you + // specified. + // + // * Associate VPCs with a private hosted zone: The VPC that you specified + // is already associated with another hosted zone that has the same name. ErrCodeConflictingDomainExists = "ConflictingDomainExists" // ErrCodeConflictingTypes for service response error code @@ -240,7 +240,9 @@ const ( // ErrCodeNoSuchGeoLocation for service response error code // "NoSuchGeoLocation". // - // Amazon Route 53 doesn't support the specified geographic location. + // Amazon Route 53 doesn't support the specified geographic location. For a + // list of supported geolocation codes, see the GeoLocation (https://docs.aws.amazon.com/Route53/latest/APIReference/API_GeoLocation.html) + // data type. ErrCodeNoSuchGeoLocation = "NoSuchGeoLocation" // ErrCodeNoSuchHealthCheck for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go index ad56b6621..96f3e5fcc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go index 721af4756..eff7dc279 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/api.go @@ -56,11 +56,12 @@ func (c *Route53Resolver) AssociateResolverEndpointIpAddressRequest(input *Assoc // AssociateResolverEndpointIpAddress API operation for Amazon Route 53 Resolver. // -// Adds IP addresses to an inbound or an outbound resolver endpoint. If you -// want to adding more than one IP address, submit one AssociateResolverEndpointIpAddress +// Adds IP addresses to an inbound or an outbound Resolver endpoint. If you +// want to add more than one IP address, submit one AssociateResolverEndpointIpAddress // request for each IP address. // -// To remove an IP address from an endpoint, see DisassociateResolverEndpointIpAddress. +// To remove an IP address from an endpoint, see DisassociateResolverEndpointIpAddress +// (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverEndpointIpAddress.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -113,6 +114,117 @@ func (c *Route53Resolver) AssociateResolverEndpointIpAddressWithContext(ctx aws. return out, req.Send() } +const opAssociateResolverQueryLogConfig = "AssociateResolverQueryLogConfig" + +// AssociateResolverQueryLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the AssociateResolverQueryLogConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateResolverQueryLogConfig for more information on using the AssociateResolverQueryLogConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateResolverQueryLogConfigRequest method. +// req, resp := client.AssociateResolverQueryLogConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverQueryLogConfig +func (c *Route53Resolver) AssociateResolverQueryLogConfigRequest(input *AssociateResolverQueryLogConfigInput) (req *request.Request, output *AssociateResolverQueryLogConfigOutput) { + op := &request.Operation{ + Name: opAssociateResolverQueryLogConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateResolverQueryLogConfigInput{} + } + + output = &AssociateResolverQueryLogConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateResolverQueryLogConfig API operation for Amazon Route 53 Resolver. +// +// Associates an Amazon VPC with a specified query logging configuration. Route +// 53 Resolver logs DNS queries that originate in all of the Amazon VPCs that +// are associated with a specified query logging configuration. To associate +// more than one VPC with a configuration, submit one AssociateResolverQueryLogConfig +// request for each VPC. +// +// The VPCs that you associate with a query logging configuration must be in +// the same Region as the configuration. +// +// To remove a VPC from a query logging configuration, see DisassociateResolverQueryLogConfig +// (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation AssociateResolverQueryLogConfig for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * InvalidRequestException +// The request is invalid. +// +// * ResourceExistsException +// The resource that you tried to create already exists. +// +// * LimitExceededException +// The request caused one or more limits to be exceeded. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/AssociateResolverQueryLogConfig +func (c *Route53Resolver) AssociateResolverQueryLogConfig(input *AssociateResolverQueryLogConfigInput) (*AssociateResolverQueryLogConfigOutput, error) { + req, out := c.AssociateResolverQueryLogConfigRequest(input) + return out, req.Send() +} + +// AssociateResolverQueryLogConfigWithContext is the same as AssociateResolverQueryLogConfig with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateResolverQueryLogConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) AssociateResolverQueryLogConfigWithContext(ctx aws.Context, input *AssociateResolverQueryLogConfigInput, opts ...request.Option) (*AssociateResolverQueryLogConfigOutput, error) { + req, out := c.AssociateResolverQueryLogConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAssociateResolverRule = "AssociateResolverRule" // AssociateResolverRuleRequest generates a "aws/request.Request" representing the @@ -157,11 +269,11 @@ func (c *Route53Resolver) AssociateResolverRuleRequest(input *AssociateResolverR // AssociateResolverRule API operation for Amazon Route 53 Resolver. // -// Associates a resolver rule with a VPC. When you associate a rule with a VPC, +// Associates a Resolver rule with a VPC. When you associate a rule with a VPC, // Resolver forwards all DNS queries for the domain name that is specified in // the rule and that originate in the VPC. The queries are forwarded to the // IP addresses for the DNS resolvers that are specified in the rule. For more -// information about rules, see CreateResolverRule. +// information about rules, see CreateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverRule.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -177,6 +289,9 @@ func (c *Route53Resolver) AssociateResolverRuleRequest(input *AssociateResolverR // * InvalidRequestException // The request is invalid. // +// * LimitExceededException +// The request caused one or more limits to be exceeded. +// // * InvalidParameterException // One or more parameters in this request are not valid. // @@ -258,14 +373,14 @@ func (c *Route53Resolver) CreateResolverEndpointRequest(input *CreateResolverEnd // CreateResolverEndpoint API operation for Amazon Route 53 Resolver. // -// Creates a resolver endpoint. There are two types of resolver endpoints, inbound +// Creates a Resolver endpoint. There are two types of Resolver endpoints, inbound // and outbound: // -// * An inbound resolver endpoint forwards DNS queries to the DNS service -// for a VPC from your network or another VPC. +// * An inbound Resolver endpoint forwards DNS queries to the DNS service +// for a VPC from your network. // -// * An outbound resolver endpoint forwards DNS queries from the DNS service -// for a VPC to your network or another VPC. +// * An outbound Resolver endpoint forwards DNS queries from the DNS service +// for a VPC to your network. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -318,6 +433,119 @@ func (c *Route53Resolver) CreateResolverEndpointWithContext(ctx aws.Context, inp return out, req.Send() } +const opCreateResolverQueryLogConfig = "CreateResolverQueryLogConfig" + +// CreateResolverQueryLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateResolverQueryLogConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateResolverQueryLogConfig for more information on using the CreateResolverQueryLogConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateResolverQueryLogConfigRequest method. +// req, resp := client.CreateResolverQueryLogConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverQueryLogConfig +func (c *Route53Resolver) CreateResolverQueryLogConfigRequest(input *CreateResolverQueryLogConfigInput) (req *request.Request, output *CreateResolverQueryLogConfigOutput) { + op := &request.Operation{ + Name: opCreateResolverQueryLogConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateResolverQueryLogConfigInput{} + } + + output = &CreateResolverQueryLogConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateResolverQueryLogConfig API operation for Amazon Route 53 Resolver. +// +// Creates a Resolver query logging configuration, which defines where you want +// Resolver to save DNS query logs that originate in your VPCs. Resolver can +// log queries only for VPCs that are in the same Region as the query logging +// configuration. +// +// To specify which VPCs you want to log queries for, you use AssociateResolverQueryLogConfig. +// For more information, see AssociateResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverQueryLogConfig.html). +// +// You can optionally use AWS Resource Access Manager (AWS RAM) to share a query +// logging configuration with other AWS accounts. The other accounts can then +// associate VPCs with the configuration. The query logs that Resolver creates +// for a configuration include all DNS queries that originate in all VPCs that +// are associated with the configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation CreateResolverQueryLogConfig for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * InvalidRequestException +// The request is invalid. +// +// * ResourceExistsException +// The resource that you tried to create already exists. +// +// * LimitExceededException +// The request caused one or more limits to be exceeded. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/CreateResolverQueryLogConfig +func (c *Route53Resolver) CreateResolverQueryLogConfig(input *CreateResolverQueryLogConfigInput) (*CreateResolverQueryLogConfigOutput, error) { + req, out := c.CreateResolverQueryLogConfigRequest(input) + return out, req.Send() +} + +// CreateResolverQueryLogConfigWithContext is the same as CreateResolverQueryLogConfig with the addition of +// the ability to pass a context and additional request options. +// +// See CreateResolverQueryLogConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) CreateResolverQueryLogConfigWithContext(ctx aws.Context, input *CreateResolverQueryLogConfigInput, opts ...request.Option) (*CreateResolverQueryLogConfigOutput, error) { + req, out := c.CreateResolverQueryLogConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateResolverRule = "CreateResolverRule" // CreateResolverRuleRequest generates a "aws/request.Request" representing the @@ -362,7 +590,7 @@ func (c *Route53Resolver) CreateResolverRuleRequest(input *CreateResolverRuleInp // CreateResolverRule API operation for Amazon Route 53 Resolver. // -// For DNS queries that originate in your VPCs, specifies which resolver endpoint +// For DNS queries that originate in your VPCs, specifies which Resolver endpoint // the queries pass through, one domain name that you want to forward to your // network, and the IP addresses of the DNS resolvers in your network. // @@ -464,14 +692,13 @@ func (c *Route53Resolver) DeleteResolverEndpointRequest(input *DeleteResolverEnd // DeleteResolverEndpoint API operation for Amazon Route 53 Resolver. // -// Deletes a resolver endpoint. The effect of deleting a resolver endpoint depends -// on whether it's an inbound or an outbound resolver endpoint: +// Deletes a Resolver endpoint. The effect of deleting a Resolver endpoint depends +// on whether it's an inbound or an outbound Resolver endpoint: // -// * Inbound: DNS queries from your network or another VPC are no longer -// routed to the DNS service for the specified VPC. +// * Inbound: DNS queries from your network are no longer routed to the DNS +// service for the specified VPC. // -// * Outbound: DNS queries from a VPC are no longer routed to your network -// or to another VPC. +// * Outbound: DNS queries from a VPC are no longer routed to your network. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -518,6 +745,115 @@ func (c *Route53Resolver) DeleteResolverEndpointWithContext(ctx aws.Context, inp return out, req.Send() } +const opDeleteResolverQueryLogConfig = "DeleteResolverQueryLogConfig" + +// DeleteResolverQueryLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResolverQueryLogConfig operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteResolverQueryLogConfig for more information on using the DeleteResolverQueryLogConfig +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteResolverQueryLogConfigRequest method. +// req, resp := client.DeleteResolverQueryLogConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverQueryLogConfig +func (c *Route53Resolver) DeleteResolverQueryLogConfigRequest(input *DeleteResolverQueryLogConfigInput) (req *request.Request, output *DeleteResolverQueryLogConfigOutput) { + op := &request.Operation{ + Name: opDeleteResolverQueryLogConfig, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteResolverQueryLogConfigInput{} + } + + output = &DeleteResolverQueryLogConfigOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteResolverQueryLogConfig API operation for Amazon Route 53 Resolver. +// +// Deletes a query logging configuration. When you delete a configuration, Resolver +// stops logging DNS queries for all of the Amazon VPCs that are associated +// with the configuration. This also applies if the query logging configuration +// is shared with other AWS accounts, and the other accounts have associated +// VPCs with the shared configuration. +// +// Before you can delete a query logging configuration, you must first disassociate +// all VPCs from the configuration. See DisassociateResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html). +// +// If you used Resource Access Manager (RAM) to share a query logging configuration +// with other accounts, you must stop sharing the configuration before you can +// delete a configuration. The accounts that you shared the configuration with +// can first disassociate VPCs that they associated with the configuration, +// but that's not necessary. If you stop sharing the configuration, those VPCs +// are automatically disassociated from the configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation DeleteResolverQueryLogConfig for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InvalidRequestException +// The request is invalid. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DeleteResolverQueryLogConfig +func (c *Route53Resolver) DeleteResolverQueryLogConfig(input *DeleteResolverQueryLogConfigInput) (*DeleteResolverQueryLogConfigOutput, error) { + req, out := c.DeleteResolverQueryLogConfigRequest(input) + return out, req.Send() +} + +// DeleteResolverQueryLogConfigWithContext is the same as DeleteResolverQueryLogConfig with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteResolverQueryLogConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) DeleteResolverQueryLogConfigWithContext(ctx aws.Context, input *DeleteResolverQueryLogConfigInput, opts ...request.Option) (*DeleteResolverQueryLogConfigOutput, error) { + req, out := c.DeleteResolverQueryLogConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteResolverRule = "DeleteResolverRule" // DeleteResolverRuleRequest generates a "aws/request.Request" representing the @@ -562,9 +898,9 @@ func (c *Route53Resolver) DeleteResolverRuleRequest(input *DeleteResolverRuleInp // DeleteResolverRule API operation for Amazon Route 53 Resolver. // -// Deletes a resolver rule. Before you can delete a resolver rule, you must -// disassociate it from all the VPCs that you associated the resolver rule with. -// For more infomation, see DisassociateResolverRule. +// Deletes a Resolver rule. Before you can delete a Resolver rule, you must +// disassociate it from all the VPCs that you associated the Resolver rule with. +// For more information, see DisassociateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverRule.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -655,11 +991,12 @@ func (c *Route53Resolver) DisassociateResolverEndpointIpAddressRequest(input *Di // DisassociateResolverEndpointIpAddress API operation for Amazon Route 53 Resolver. // -// Removes IP addresses from an inbound or an outbound resolver endpoint. If +// Removes IP addresses from an inbound or an outbound Resolver endpoint. If // you want to remove more than one IP address, submit one DisassociateResolverEndpointIpAddress // request for each IP address. // -// To add an IP address to an endpoint, see AssociateResolverEndpointIpAddress. +// To add an IP address to an endpoint, see AssociateResolverEndpointIpAddress +// (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverEndpointIpAddress.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -709,62 +1046,68 @@ func (c *Route53Resolver) DisassociateResolverEndpointIpAddressWithContext(ctx a return out, req.Send() } -const opDisassociateResolverRule = "DisassociateResolverRule" +const opDisassociateResolverQueryLogConfig = "DisassociateResolverQueryLogConfig" -// DisassociateResolverRuleRequest generates a "aws/request.Request" representing the -// client's request for the DisassociateResolverRule operation. The "output" return +// DisassociateResolverQueryLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateResolverQueryLogConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisassociateResolverRule for more information on using the DisassociateResolverRule +// See DisassociateResolverQueryLogConfig for more information on using the DisassociateResolverQueryLogConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisassociateResolverRuleRequest method. -// req, resp := client.DisassociateResolverRuleRequest(params) +// // Example sending a request using the DisassociateResolverQueryLogConfigRequest method. +// req, resp := client.DisassociateResolverQueryLogConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule -func (c *Route53Resolver) DisassociateResolverRuleRequest(input *DisassociateResolverRuleInput) (req *request.Request, output *DisassociateResolverRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverQueryLogConfig +func (c *Route53Resolver) DisassociateResolverQueryLogConfigRequest(input *DisassociateResolverQueryLogConfigInput) (req *request.Request, output *DisassociateResolverQueryLogConfigOutput) { op := &request.Operation{ - Name: opDisassociateResolverRule, + Name: opDisassociateResolverQueryLogConfig, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DisassociateResolverRuleInput{} + input = &DisassociateResolverQueryLogConfigInput{} } - output = &DisassociateResolverRuleOutput{} + output = &DisassociateResolverQueryLogConfigOutput{} req = c.newRequest(op, input, output) return } -// DisassociateResolverRule API operation for Amazon Route 53 Resolver. +// DisassociateResolverQueryLogConfig API operation for Amazon Route 53 Resolver. // -// Removes the association between a specified resolver rule and a specified -// VPC. +// Disassociates a VPC from a query logging configuration. +// +// Before you can delete a query logging configuration, you must first disassociate +// all VPCs from the configuration. If you used Resource Access Manager (RAM) +// to share a query logging configuration with other accounts, VPCs can be disassociated +// from the configuration in the following ways: // -// If you disassociate a resolver rule from a VPC, Resolver stops forwarding -// DNS queries for the domain name that you specified in the resolver rule. +// * The accounts that you shared the configuration with can disassociate +// VPCs from the configuration. +// +// * You can stop sharing the configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Route 53 Resolver's -// API operation DisassociateResolverRule for usage and error information. +// API operation DisassociateResolverQueryLogConfig for usage and error information. // // Returned Error Types: // * ResourceNotFoundException @@ -773,15 +1116,114 @@ func (c *Route53Resolver) DisassociateResolverRuleRequest(input *DisassociateRes // * InvalidParameterException // One or more parameters in this request are not valid. // +// * InvalidRequestException +// The request is invalid. +// // * InternalServiceErrorException // We encountered an unknown error. Try again in a few minutes. // // * ThrottlingException // The request was throttled. Try again in a few minutes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule -func (c *Route53Resolver) DisassociateResolverRule(input *DisassociateResolverRuleInput) (*DisassociateResolverRuleOutput, error) { - req, out := c.DisassociateResolverRuleRequest(input) +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverQueryLogConfig +func (c *Route53Resolver) DisassociateResolverQueryLogConfig(input *DisassociateResolverQueryLogConfigInput) (*DisassociateResolverQueryLogConfigOutput, error) { + req, out := c.DisassociateResolverQueryLogConfigRequest(input) + return out, req.Send() +} + +// DisassociateResolverQueryLogConfigWithContext is the same as DisassociateResolverQueryLogConfig with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateResolverQueryLogConfig for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) DisassociateResolverQueryLogConfigWithContext(ctx aws.Context, input *DisassociateResolverQueryLogConfigInput, opts ...request.Option) (*DisassociateResolverQueryLogConfigOutput, error) { + req, out := c.DisassociateResolverQueryLogConfigRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDisassociateResolverRule = "DisassociateResolverRule" + +// DisassociateResolverRuleRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateResolverRule operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisassociateResolverRule for more information on using the DisassociateResolverRule +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisassociateResolverRuleRequest method. +// req, resp := client.DisassociateResolverRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule +func (c *Route53Resolver) DisassociateResolverRuleRequest(input *DisassociateResolverRuleInput) (req *request.Request, output *DisassociateResolverRuleOutput) { + op := &request.Operation{ + Name: opDisassociateResolverRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateResolverRuleInput{} + } + + output = &DisassociateResolverRuleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DisassociateResolverRule API operation for Amazon Route 53 Resolver. +// +// Removes the association between a specified Resolver rule and a specified +// VPC. +// +// If you disassociate a Resolver rule from a VPC, Resolver stops forwarding +// DNS queries for the domain name that you specified in the Resolver rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation DisassociateResolverRule for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/DisassociateResolverRule +func (c *Route53Resolver) DisassociateResolverRule(input *DisassociateResolverRuleInput) (*DisassociateResolverRuleOutput, error) { + req, out := c.DisassociateResolverRuleRequest(input) return out, req.Send() } @@ -845,8 +1287,8 @@ func (c *Route53Resolver) GetResolverEndpointRequest(input *GetResolverEndpointI // GetResolverEndpoint API operation for Amazon Route 53 Resolver. // -// Gets information about a specified resolver endpoint, such as whether it's -// an inbound or an outbound resolver endpoint, and the current status of the +// Gets information about a specified Resolver endpoint, such as whether it's +// an inbound or an outbound Resolver endpoint, and the current status of the // endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -891,65 +1333,68 @@ func (c *Route53Resolver) GetResolverEndpointWithContext(ctx aws.Context, input return out, req.Send() } -const opGetResolverRule = "GetResolverRule" +const opGetResolverQueryLogConfig = "GetResolverQueryLogConfig" -// GetResolverRuleRequest generates a "aws/request.Request" representing the -// client's request for the GetResolverRule operation. The "output" return +// GetResolverQueryLogConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetResolverQueryLogConfig operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetResolverRule for more information on using the GetResolverRule +// See GetResolverQueryLogConfig for more information on using the GetResolverQueryLogConfig // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetResolverRuleRequest method. -// req, resp := client.GetResolverRuleRequest(params) +// // Example sending a request using the GetResolverQueryLogConfigRequest method. +// req, resp := client.GetResolverQueryLogConfigRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule -func (c *Route53Resolver) GetResolverRuleRequest(input *GetResolverRuleInput) (req *request.Request, output *GetResolverRuleOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfig +func (c *Route53Resolver) GetResolverQueryLogConfigRequest(input *GetResolverQueryLogConfigInput) (req *request.Request, output *GetResolverQueryLogConfigOutput) { op := &request.Operation{ - Name: opGetResolverRule, + Name: opGetResolverQueryLogConfig, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetResolverRuleInput{} + input = &GetResolverQueryLogConfigInput{} } - output = &GetResolverRuleOutput{} + output = &GetResolverQueryLogConfigOutput{} req = c.newRequest(op, input, output) return } -// GetResolverRule API operation for Amazon Route 53 Resolver. +// GetResolverQueryLogConfig API operation for Amazon Route 53 Resolver. // -// Gets information about a specified resolver rule, such as the domain name -// that the rule forwards DNS queries for and the ID of the outbound resolver -// endpoint that the rule is associated with. +// Gets information about a specified Resolver query logging configuration, +// such as the number of VPCs that the configuration is logging queries for +// and the location that logs are sent to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Route 53 Resolver's -// API operation GetResolverRule for usage and error information. +// API operation GetResolverQueryLogConfig for usage and error information. // // Returned Error Types: // * ResourceNotFoundException // The specified resource doesn't exist. // +// * InvalidRequestException +// The request is invalid. +// // * InvalidParameterException // One or more parameters in this request are not valid. // @@ -959,86 +1404,94 @@ func (c *Route53Resolver) GetResolverRuleRequest(input *GetResolverRuleInput) (r // * ThrottlingException // The request was throttled. Try again in a few minutes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule -func (c *Route53Resolver) GetResolverRule(input *GetResolverRuleInput) (*GetResolverRuleOutput, error) { - req, out := c.GetResolverRuleRequest(input) +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfig +func (c *Route53Resolver) GetResolverQueryLogConfig(input *GetResolverQueryLogConfigInput) (*GetResolverQueryLogConfigOutput, error) { + req, out := c.GetResolverQueryLogConfigRequest(input) return out, req.Send() } -// GetResolverRuleWithContext is the same as GetResolverRule with the addition of +// GetResolverQueryLogConfigWithContext is the same as GetResolverQueryLogConfig with the addition of // the ability to pass a context and additional request options. // -// See GetResolverRule for details on how to use this API operation. +// See GetResolverQueryLogConfig for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) GetResolverRuleWithContext(ctx aws.Context, input *GetResolverRuleInput, opts ...request.Option) (*GetResolverRuleOutput, error) { - req, out := c.GetResolverRuleRequest(input) +func (c *Route53Resolver) GetResolverQueryLogConfigWithContext(ctx aws.Context, input *GetResolverQueryLogConfigInput, opts ...request.Option) (*GetResolverQueryLogConfigOutput, error) { + req, out := c.GetResolverQueryLogConfigRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetResolverRuleAssociation = "GetResolverRuleAssociation" +const opGetResolverQueryLogConfigAssociation = "GetResolverQueryLogConfigAssociation" -// GetResolverRuleAssociationRequest generates a "aws/request.Request" representing the -// client's request for the GetResolverRuleAssociation operation. The "output" return +// GetResolverQueryLogConfigAssociationRequest generates a "aws/request.Request" representing the +// client's request for the GetResolverQueryLogConfigAssociation operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetResolverRuleAssociation for more information on using the GetResolverRuleAssociation +// See GetResolverQueryLogConfigAssociation for more information on using the GetResolverQueryLogConfigAssociation // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetResolverRuleAssociationRequest method. -// req, resp := client.GetResolverRuleAssociationRequest(params) +// // Example sending a request using the GetResolverQueryLogConfigAssociationRequest method. +// req, resp := client.GetResolverQueryLogConfigAssociationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation -func (c *Route53Resolver) GetResolverRuleAssociationRequest(input *GetResolverRuleAssociationInput) (req *request.Request, output *GetResolverRuleAssociationOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigAssociation +func (c *Route53Resolver) GetResolverQueryLogConfigAssociationRequest(input *GetResolverQueryLogConfigAssociationInput) (req *request.Request, output *GetResolverQueryLogConfigAssociationOutput) { op := &request.Operation{ - Name: opGetResolverRuleAssociation, + Name: opGetResolverQueryLogConfigAssociation, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetResolverRuleAssociationInput{} + input = &GetResolverQueryLogConfigAssociationInput{} } - output = &GetResolverRuleAssociationOutput{} + output = &GetResolverQueryLogConfigAssociationOutput{} req = c.newRequest(op, input, output) return } -// GetResolverRuleAssociation API operation for Amazon Route 53 Resolver. +// GetResolverQueryLogConfigAssociation API operation for Amazon Route 53 Resolver. // -// Gets information about an association between a specified resolver rule and -// a VPC. You associate a resolver rule and a VPC using AssociateResolverRule. +// Gets information about a specified association between a Resolver query logging +// configuration and an Amazon VPC. When you associate a VPC with a query logging +// configuration, Resolver logs DNS queries that originate in that VPC. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Route 53 Resolver's -// API operation GetResolverRuleAssociation for usage and error information. +// API operation GetResolverQueryLogConfigAssociation for usage and error information. // // Returned Error Types: // * ResourceNotFoundException // The specified resource doesn't exist. // +// * InvalidRequestException +// The request is invalid. +// // * InvalidParameterException // One or more parameters in this request are not valid. // @@ -1048,173 +1501,180 @@ func (c *Route53Resolver) GetResolverRuleAssociationRequest(input *GetResolverRu // * ThrottlingException // The request was throttled. Try again in a few minutes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation -func (c *Route53Resolver) GetResolverRuleAssociation(input *GetResolverRuleAssociationInput) (*GetResolverRuleAssociationOutput, error) { - req, out := c.GetResolverRuleAssociationRequest(input) +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigAssociation +func (c *Route53Resolver) GetResolverQueryLogConfigAssociation(input *GetResolverQueryLogConfigAssociationInput) (*GetResolverQueryLogConfigAssociationOutput, error) { + req, out := c.GetResolverQueryLogConfigAssociationRequest(input) return out, req.Send() } -// GetResolverRuleAssociationWithContext is the same as GetResolverRuleAssociation with the addition of +// GetResolverQueryLogConfigAssociationWithContext is the same as GetResolverQueryLogConfigAssociation with the addition of // the ability to pass a context and additional request options. // -// See GetResolverRuleAssociation for details on how to use this API operation. +// See GetResolverQueryLogConfigAssociation for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) GetResolverRuleAssociationWithContext(ctx aws.Context, input *GetResolverRuleAssociationInput, opts ...request.Option) (*GetResolverRuleAssociationOutput, error) { - req, out := c.GetResolverRuleAssociationRequest(input) +func (c *Route53Resolver) GetResolverQueryLogConfigAssociationWithContext(ctx aws.Context, input *GetResolverQueryLogConfigAssociationInput, opts ...request.Option) (*GetResolverQueryLogConfigAssociationOutput, error) { + req, out := c.GetResolverQueryLogConfigAssociationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetResolverRulePolicy = "GetResolverRulePolicy" +const opGetResolverQueryLogConfigPolicy = "GetResolverQueryLogConfigPolicy" -// GetResolverRulePolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetResolverRulePolicy operation. The "output" return +// GetResolverQueryLogConfigPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetResolverQueryLogConfigPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetResolverRulePolicy for more information on using the GetResolverRulePolicy +// See GetResolverQueryLogConfigPolicy for more information on using the GetResolverQueryLogConfigPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetResolverRulePolicyRequest method. -// req, resp := client.GetResolverRulePolicyRequest(params) +// // Example sending a request using the GetResolverQueryLogConfigPolicyRequest method. +// req, resp := client.GetResolverQueryLogConfigPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy -func (c *Route53Resolver) GetResolverRulePolicyRequest(input *GetResolverRulePolicyInput) (req *request.Request, output *GetResolverRulePolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigPolicy +func (c *Route53Resolver) GetResolverQueryLogConfigPolicyRequest(input *GetResolverQueryLogConfigPolicyInput) (req *request.Request, output *GetResolverQueryLogConfigPolicyOutput) { op := &request.Operation{ - Name: opGetResolverRulePolicy, + Name: opGetResolverQueryLogConfigPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetResolverRulePolicyInput{} + input = &GetResolverQueryLogConfigPolicyInput{} } - output = &GetResolverRulePolicyOutput{} + output = &GetResolverQueryLogConfigPolicyOutput{} req = c.newRequest(op, input, output) return } -// GetResolverRulePolicy API operation for Amazon Route 53 Resolver. +// GetResolverQueryLogConfigPolicy API operation for Amazon Route 53 Resolver. // -// Gets information about a resolver rule policy. A resolver rule policy specifies -// the Resolver operations and resources that you want to allow another AWS -// account to be able to use. +// Gets information about a query logging policy. A query logging policy specifies +// the Resolver query logging operations and resources that you want to allow +// another AWS account to be able to use. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Route 53 Resolver's -// API operation GetResolverRulePolicy for usage and error information. +// API operation GetResolverQueryLogConfigPolicy for usage and error information. // // Returned Error Types: // * InvalidParameterException // One or more parameters in this request are not valid. // +// * InvalidRequestException +// The request is invalid. +// // * UnknownResourceException // The specified resource doesn't exist. // // * InternalServiceErrorException // We encountered an unknown error. Try again in a few minutes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy -func (c *Route53Resolver) GetResolverRulePolicy(input *GetResolverRulePolicyInput) (*GetResolverRulePolicyOutput, error) { - req, out := c.GetResolverRulePolicyRequest(input) +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverQueryLogConfigPolicy +func (c *Route53Resolver) GetResolverQueryLogConfigPolicy(input *GetResolverQueryLogConfigPolicyInput) (*GetResolverQueryLogConfigPolicyOutput, error) { + req, out := c.GetResolverQueryLogConfigPolicyRequest(input) return out, req.Send() } -// GetResolverRulePolicyWithContext is the same as GetResolverRulePolicy with the addition of +// GetResolverQueryLogConfigPolicyWithContext is the same as GetResolverQueryLogConfigPolicy with the addition of // the ability to pass a context and additional request options. // -// See GetResolverRulePolicy for details on how to use this API operation. +// See GetResolverQueryLogConfigPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) GetResolverRulePolicyWithContext(ctx aws.Context, input *GetResolverRulePolicyInput, opts ...request.Option) (*GetResolverRulePolicyOutput, error) { - req, out := c.GetResolverRulePolicyRequest(input) +func (c *Route53Resolver) GetResolverQueryLogConfigPolicyWithContext(ctx aws.Context, input *GetResolverQueryLogConfigPolicyInput, opts ...request.Option) (*GetResolverQueryLogConfigPolicyOutput, error) { + req, out := c.GetResolverQueryLogConfigPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListResolverEndpointIpAddresses = "ListResolverEndpointIpAddresses" +const opGetResolverRule = "GetResolverRule" -// ListResolverEndpointIpAddressesRequest generates a "aws/request.Request" representing the -// client's request for the ListResolverEndpointIpAddresses operation. The "output" return +// GetResolverRuleRequest generates a "aws/request.Request" representing the +// client's request for the GetResolverRule operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListResolverEndpointIpAddresses for more information on using the ListResolverEndpointIpAddresses +// See GetResolverRule for more information on using the GetResolverRule // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListResolverEndpointIpAddressesRequest method. -// req, resp := client.ListResolverEndpointIpAddressesRequest(params) +// // Example sending a request using the GetResolverRuleRequest method. +// req, resp := client.GetResolverRuleRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses -func (c *Route53Resolver) ListResolverEndpointIpAddressesRequest(input *ListResolverEndpointIpAddressesInput) (req *request.Request, output *ListResolverEndpointIpAddressesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule +func (c *Route53Resolver) GetResolverRuleRequest(input *GetResolverRuleInput) (req *request.Request, output *GetResolverRuleOutput) { op := &request.Operation{ - Name: opListResolverEndpointIpAddresses, + Name: opGetResolverRule, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListResolverEndpointIpAddressesInput{} + input = &GetResolverRuleInput{} } - output = &ListResolverEndpointIpAddressesOutput{} + output = &GetResolverRuleOutput{} req = c.newRequest(op, input, output) return } -// ListResolverEndpointIpAddresses API operation for Amazon Route 53 Resolver. +// GetResolverRule API operation for Amazon Route 53 Resolver. // -// Gets the IP addresses for a specified resolver endpoint. +// Gets information about a specified Resolver rule, such as the domain name +// that the rule forwards DNS queries for and the ID of the outbound Resolver +// endpoint that the rule is associated with. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Route 53 Resolver's -// API operation ListResolverEndpointIpAddresses for usage and error information. +// API operation GetResolverRule for usage and error information. // // Returned Error Types: // * ResourceNotFoundException @@ -1226,79 +1686,654 @@ func (c *Route53Resolver) ListResolverEndpointIpAddressesRequest(input *ListReso // * InternalServiceErrorException // We encountered an unknown error. Try again in a few minutes. // -// * InvalidNextTokenException -// The value that you specified for NextToken in a List request isn't valid. -// // * ThrottlingException // The request was throttled. Try again in a few minutes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses -func (c *Route53Resolver) ListResolverEndpointIpAddresses(input *ListResolverEndpointIpAddressesInput) (*ListResolverEndpointIpAddressesOutput, error) { - req, out := c.ListResolverEndpointIpAddressesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRule +func (c *Route53Resolver) GetResolverRule(input *GetResolverRuleInput) (*GetResolverRuleOutput, error) { + req, out := c.GetResolverRuleRequest(input) return out, req.Send() } -// ListResolverEndpointIpAddressesWithContext is the same as ListResolverEndpointIpAddresses with the addition of +// GetResolverRuleWithContext is the same as GetResolverRule with the addition of // the ability to pass a context and additional request options. // -// See ListResolverEndpointIpAddresses for details on how to use this API operation. +// See GetResolverRule for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) ListResolverEndpointIpAddressesWithContext(ctx aws.Context, input *ListResolverEndpointIpAddressesInput, opts ...request.Option) (*ListResolverEndpointIpAddressesOutput, error) { - req, out := c.ListResolverEndpointIpAddressesRequest(input) +func (c *Route53Resolver) GetResolverRuleWithContext(ctx aws.Context, input *GetResolverRuleInput, opts ...request.Option) (*GetResolverRuleOutput, error) { + req, out := c.GetResolverRuleRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListResolverEndpointIpAddressesPages iterates over the pages of a ListResolverEndpointIpAddresses operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opGetResolverRuleAssociation = "GetResolverRuleAssociation" + +// GetResolverRuleAssociationRequest generates a "aws/request.Request" representing the +// client's request for the GetResolverRuleAssociation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListResolverEndpointIpAddresses method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See GetResolverRuleAssociation for more information on using the GetResolverRuleAssociation +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListResolverEndpointIpAddresses operation. -// pageNum := 0 -// err := client.ListResolverEndpointIpAddressesPages(params, -// func(page *route53resolver.ListResolverEndpointIpAddressesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *Route53Resolver) ListResolverEndpointIpAddressesPages(input *ListResolverEndpointIpAddressesInput, fn func(*ListResolverEndpointIpAddressesOutput, bool) bool) error { - return c.ListResolverEndpointIpAddressesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListResolverEndpointIpAddressesPagesWithContext same as ListResolverEndpointIpAddressesPages except -// it takes a Context and allows setting request options on the pages. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *Route53Resolver) ListResolverEndpointIpAddressesPagesWithContext(ctx aws.Context, input *ListResolverEndpointIpAddressesInput, fn func(*ListResolverEndpointIpAddressesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListResolverEndpointIpAddressesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListResolverEndpointIpAddressesRequest(inCpy) - req.SetContext(ctx) +// // Example sending a request using the GetResolverRuleAssociationRequest method. +// req, resp := client.GetResolverRuleAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation +func (c *Route53Resolver) GetResolverRuleAssociationRequest(input *GetResolverRuleAssociationInput) (req *request.Request, output *GetResolverRuleAssociationOutput) { + op := &request.Operation{ + Name: opGetResolverRuleAssociation, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetResolverRuleAssociationInput{} + } + + output = &GetResolverRuleAssociationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetResolverRuleAssociation API operation for Amazon Route 53 Resolver. +// +// Gets information about an association between a specified Resolver rule and +// a VPC. You associate a Resolver rule and a VPC using AssociateResolverRule +// (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverRule.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation GetResolverRuleAssociation for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRuleAssociation +func (c *Route53Resolver) GetResolverRuleAssociation(input *GetResolverRuleAssociationInput) (*GetResolverRuleAssociationOutput, error) { + req, out := c.GetResolverRuleAssociationRequest(input) + return out, req.Send() +} + +// GetResolverRuleAssociationWithContext is the same as GetResolverRuleAssociation with the addition of +// the ability to pass a context and additional request options. +// +// See GetResolverRuleAssociation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) GetResolverRuleAssociationWithContext(ctx aws.Context, input *GetResolverRuleAssociationInput, opts ...request.Option) (*GetResolverRuleAssociationOutput, error) { + req, out := c.GetResolverRuleAssociationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetResolverRulePolicy = "GetResolverRulePolicy" + +// GetResolverRulePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetResolverRulePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetResolverRulePolicy for more information on using the GetResolverRulePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetResolverRulePolicyRequest method. +// req, resp := client.GetResolverRulePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy +func (c *Route53Resolver) GetResolverRulePolicyRequest(input *GetResolverRulePolicyInput) (req *request.Request, output *GetResolverRulePolicyOutput) { + op := &request.Operation{ + Name: opGetResolverRulePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetResolverRulePolicyInput{} + } + + output = &GetResolverRulePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetResolverRulePolicy API operation for Amazon Route 53 Resolver. +// +// Gets information about a Resolver rule policy. A Resolver rule policy specifies +// the Resolver operations and resources that you want to allow another AWS +// account to be able to use. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation GetResolverRulePolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * UnknownResourceException +// The specified resource doesn't exist. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/GetResolverRulePolicy +func (c *Route53Resolver) GetResolverRulePolicy(input *GetResolverRulePolicyInput) (*GetResolverRulePolicyOutput, error) { + req, out := c.GetResolverRulePolicyRequest(input) + return out, req.Send() +} + +// GetResolverRulePolicyWithContext is the same as GetResolverRulePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetResolverRulePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) GetResolverRulePolicyWithContext(ctx aws.Context, input *GetResolverRulePolicyInput, opts ...request.Option) (*GetResolverRulePolicyOutput, error) { + req, out := c.GetResolverRulePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListResolverEndpointIpAddresses = "ListResolverEndpointIpAddresses" + +// ListResolverEndpointIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the ListResolverEndpointIpAddresses operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResolverEndpointIpAddresses for more information on using the ListResolverEndpointIpAddresses +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResolverEndpointIpAddressesRequest method. +// req, resp := client.ListResolverEndpointIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses +func (c *Route53Resolver) ListResolverEndpointIpAddressesRequest(input *ListResolverEndpointIpAddressesInput) (req *request.Request, output *ListResolverEndpointIpAddressesOutput) { + op := &request.Operation{ + Name: opListResolverEndpointIpAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListResolverEndpointIpAddressesInput{} + } + + output = &ListResolverEndpointIpAddressesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResolverEndpointIpAddresses API operation for Amazon Route 53 Resolver. +// +// Gets the IP addresses for a specified Resolver endpoint. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation ListResolverEndpointIpAddresses for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The specified resource doesn't exist. +// +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * InvalidNextTokenException +// The value that you specified for NextToken in a List request isn't valid. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpointIpAddresses +func (c *Route53Resolver) ListResolverEndpointIpAddresses(input *ListResolverEndpointIpAddressesInput) (*ListResolverEndpointIpAddressesOutput, error) { + req, out := c.ListResolverEndpointIpAddressesRequest(input) + return out, req.Send() +} + +// ListResolverEndpointIpAddressesWithContext is the same as ListResolverEndpointIpAddresses with the addition of +// the ability to pass a context and additional request options. +// +// See ListResolverEndpointIpAddresses for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListResolverEndpointIpAddressesWithContext(ctx aws.Context, input *ListResolverEndpointIpAddressesInput, opts ...request.Option) (*ListResolverEndpointIpAddressesOutput, error) { + req, out := c.ListResolverEndpointIpAddressesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListResolverEndpointIpAddressesPages iterates over the pages of a ListResolverEndpointIpAddresses operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResolverEndpointIpAddresses method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResolverEndpointIpAddresses operation. +// pageNum := 0 +// err := client.ListResolverEndpointIpAddressesPages(params, +// func(page *route53resolver.ListResolverEndpointIpAddressesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53Resolver) ListResolverEndpointIpAddressesPages(input *ListResolverEndpointIpAddressesInput, fn func(*ListResolverEndpointIpAddressesOutput, bool) bool) error { + return c.ListResolverEndpointIpAddressesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResolverEndpointIpAddressesPagesWithContext same as ListResolverEndpointIpAddressesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListResolverEndpointIpAddressesPagesWithContext(ctx aws.Context, input *ListResolverEndpointIpAddressesInput, fn func(*ListResolverEndpointIpAddressesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResolverEndpointIpAddressesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResolverEndpointIpAddressesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResolverEndpointIpAddressesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListResolverEndpoints = "ListResolverEndpoints" + +// ListResolverEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the ListResolverEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResolverEndpoints for more information on using the ListResolverEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResolverEndpointsRequest method. +// req, resp := client.ListResolverEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints +func (c *Route53Resolver) ListResolverEndpointsRequest(input *ListResolverEndpointsInput) (req *request.Request, output *ListResolverEndpointsOutput) { + op := &request.Operation{ + Name: opListResolverEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListResolverEndpointsInput{} + } + + output = &ListResolverEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResolverEndpoints API operation for Amazon Route 53 Resolver. +// +// Lists all the Resolver endpoints that were created using the current AWS +// account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation ListResolverEndpoints for usage and error information. +// +// Returned Error Types: +// * InvalidNextTokenException +// The value that you specified for NextToken in a List request isn't valid. +// +// * InvalidRequestException +// The request is invalid. +// +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints +func (c *Route53Resolver) ListResolverEndpoints(input *ListResolverEndpointsInput) (*ListResolverEndpointsOutput, error) { + req, out := c.ListResolverEndpointsRequest(input) + return out, req.Send() +} + +// ListResolverEndpointsWithContext is the same as ListResolverEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See ListResolverEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListResolverEndpointsWithContext(ctx aws.Context, input *ListResolverEndpointsInput, opts ...request.Option) (*ListResolverEndpointsOutput, error) { + req, out := c.ListResolverEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListResolverEndpointsPages iterates over the pages of a ListResolverEndpoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResolverEndpoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResolverEndpoints operation. +// pageNum := 0 +// err := client.ListResolverEndpointsPages(params, +// func(page *route53resolver.ListResolverEndpointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53Resolver) ListResolverEndpointsPages(input *ListResolverEndpointsInput, fn func(*ListResolverEndpointsOutput, bool) bool) error { + return c.ListResolverEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResolverEndpointsPagesWithContext same as ListResolverEndpointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListResolverEndpointsPagesWithContext(ctx aws.Context, input *ListResolverEndpointsInput, fn func(*ListResolverEndpointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResolverEndpointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResolverEndpointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResolverEndpointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListResolverQueryLogConfigAssociations = "ListResolverQueryLogConfigAssociations" + +// ListResolverQueryLogConfigAssociationsRequest generates a "aws/request.Request" representing the +// client's request for the ListResolverQueryLogConfigAssociations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListResolverQueryLogConfigAssociations for more information on using the ListResolverQueryLogConfigAssociations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListResolverQueryLogConfigAssociationsRequest method. +// req, resp := client.ListResolverQueryLogConfigAssociationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigAssociations +func (c *Route53Resolver) ListResolverQueryLogConfigAssociationsRequest(input *ListResolverQueryLogConfigAssociationsInput) (req *request.Request, output *ListResolverQueryLogConfigAssociationsOutput) { + op := &request.Operation{ + Name: opListResolverQueryLogConfigAssociations, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListResolverQueryLogConfigAssociationsInput{} + } + + output = &ListResolverQueryLogConfigAssociationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListResolverQueryLogConfigAssociations API operation for Amazon Route 53 Resolver. +// +// Lists information about associations between Amazon VPCs and query logging +// configurations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation ListResolverQueryLogConfigAssociations for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InvalidRequestException +// The request is invalid. +// +// * LimitExceededException +// The request caused one or more limits to be exceeded. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigAssociations +func (c *Route53Resolver) ListResolverQueryLogConfigAssociations(input *ListResolverQueryLogConfigAssociationsInput) (*ListResolverQueryLogConfigAssociationsOutput, error) { + req, out := c.ListResolverQueryLogConfigAssociationsRequest(input) + return out, req.Send() +} + +// ListResolverQueryLogConfigAssociationsWithContext is the same as ListResolverQueryLogConfigAssociations with the addition of +// the ability to pass a context and additional request options. +// +// See ListResolverQueryLogConfigAssociations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListResolverQueryLogConfigAssociationsWithContext(ctx aws.Context, input *ListResolverQueryLogConfigAssociationsInput, opts ...request.Option) (*ListResolverQueryLogConfigAssociationsOutput, error) { + req, out := c.ListResolverQueryLogConfigAssociationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListResolverQueryLogConfigAssociationsPages iterates over the pages of a ListResolverQueryLogConfigAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResolverQueryLogConfigAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResolverQueryLogConfigAssociations operation. +// pageNum := 0 +// err := client.ListResolverQueryLogConfigAssociationsPages(params, +// func(page *route53resolver.ListResolverQueryLogConfigAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53Resolver) ListResolverQueryLogConfigAssociationsPages(input *ListResolverQueryLogConfigAssociationsInput, fn func(*ListResolverQueryLogConfigAssociationsOutput, bool) bool) error { + return c.ListResolverQueryLogConfigAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResolverQueryLogConfigAssociationsPagesWithContext same as ListResolverQueryLogConfigAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListResolverQueryLogConfigAssociationsPagesWithContext(ctx aws.Context, input *ListResolverQueryLogConfigAssociationsInput, fn func(*ListResolverQueryLogConfigAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResolverQueryLogConfigAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResolverQueryLogConfigAssociationsRequest(inCpy) + req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } for p.Next() { - if !fn(p.Page().(*ListResolverEndpointIpAddressesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListResolverQueryLogConfigAssociationsOutput), !p.HasNextPage()) { break } } @@ -1306,35 +2341,35 @@ func (c *Route53Resolver) ListResolverEndpointIpAddressesPagesWithContext(ctx aw return p.Err() } -const opListResolverEndpoints = "ListResolverEndpoints" +const opListResolverQueryLogConfigs = "ListResolverQueryLogConfigs" -// ListResolverEndpointsRequest generates a "aws/request.Request" representing the -// client's request for the ListResolverEndpoints operation. The "output" return +// ListResolverQueryLogConfigsRequest generates a "aws/request.Request" representing the +// client's request for the ListResolverQueryLogConfigs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListResolverEndpoints for more information on using the ListResolverEndpoints +// See ListResolverQueryLogConfigs for more information on using the ListResolverQueryLogConfigs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListResolverEndpointsRequest method. -// req, resp := client.ListResolverEndpointsRequest(params) +// // Example sending a request using the ListResolverQueryLogConfigsRequest method. +// req, resp := client.ListResolverQueryLogConfigsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints -func (c *Route53Resolver) ListResolverEndpointsRequest(input *ListResolverEndpointsInput) (req *request.Request, output *ListResolverEndpointsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigs +func (c *Route53Resolver) ListResolverQueryLogConfigsRequest(input *ListResolverQueryLogConfigsInput) (req *request.Request, output *ListResolverQueryLogConfigsOutput) { op := &request.Operation{ - Name: opListResolverEndpoints, + Name: opListResolverQueryLogConfigs, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -1346,25 +2381,26 @@ func (c *Route53Resolver) ListResolverEndpointsRequest(input *ListResolverEndpoi } if input == nil { - input = &ListResolverEndpointsInput{} + input = &ListResolverQueryLogConfigsInput{} } - output = &ListResolverEndpointsOutput{} + output = &ListResolverQueryLogConfigsOutput{} req = c.newRequest(op, input, output) return } -// ListResolverEndpoints API operation for Amazon Route 53 Resolver. +// ListResolverQueryLogConfigs API operation for Amazon Route 53 Resolver. // -// Lists all the resolver endpoints that were created using the current AWS -// account. +// Lists information about the specified query logging configurations. Each +// configuration defines where you want Resolver to save DNS query logs and +// specifies the VPCs that you want to log queries for. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Route 53 Resolver's -// API operation ListResolverEndpoints for usage and error information. +// API operation ListResolverQueryLogConfigs for usage and error information. // // Returned Error Types: // * InvalidNextTokenException @@ -1382,65 +2418,69 @@ func (c *Route53Resolver) ListResolverEndpointsRequest(input *ListResolverEndpoi // * ThrottlingException // The request was throttled. Try again in a few minutes. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverEndpoints -func (c *Route53Resolver) ListResolverEndpoints(input *ListResolverEndpointsInput) (*ListResolverEndpointsOutput, error) { - req, out := c.ListResolverEndpointsRequest(input) +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListResolverQueryLogConfigs +func (c *Route53Resolver) ListResolverQueryLogConfigs(input *ListResolverQueryLogConfigsInput) (*ListResolverQueryLogConfigsOutput, error) { + req, out := c.ListResolverQueryLogConfigsRequest(input) return out, req.Send() } -// ListResolverEndpointsWithContext is the same as ListResolverEndpoints with the addition of +// ListResolverQueryLogConfigsWithContext is the same as ListResolverQueryLogConfigs with the addition of // the ability to pass a context and additional request options. // -// See ListResolverEndpoints for details on how to use this API operation. +// See ListResolverQueryLogConfigs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) ListResolverEndpointsWithContext(ctx aws.Context, input *ListResolverEndpointsInput, opts ...request.Option) (*ListResolverEndpointsOutput, error) { - req, out := c.ListResolverEndpointsRequest(input) +func (c *Route53Resolver) ListResolverQueryLogConfigsWithContext(ctx aws.Context, input *ListResolverQueryLogConfigsInput, opts ...request.Option) (*ListResolverQueryLogConfigsOutput, error) { + req, out := c.ListResolverQueryLogConfigsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListResolverEndpointsPages iterates over the pages of a ListResolverEndpoints operation, +// ListResolverQueryLogConfigsPages iterates over the pages of a ListResolverQueryLogConfigs operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListResolverEndpoints method for more information on how to use this operation. +// See ListResolverQueryLogConfigs method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListResolverEndpoints operation. +// // Example iterating over at most 3 pages of a ListResolverQueryLogConfigs operation. // pageNum := 0 -// err := client.ListResolverEndpointsPages(params, -// func(page *route53resolver.ListResolverEndpointsOutput, lastPage bool) bool { +// err := client.ListResolverQueryLogConfigsPages(params, +// func(page *route53resolver.ListResolverQueryLogConfigsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *Route53Resolver) ListResolverEndpointsPages(input *ListResolverEndpointsInput, fn func(*ListResolverEndpointsOutput, bool) bool) error { - return c.ListResolverEndpointsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *Route53Resolver) ListResolverQueryLogConfigsPages(input *ListResolverQueryLogConfigsInput, fn func(*ListResolverQueryLogConfigsOutput, bool) bool) error { + return c.ListResolverQueryLogConfigsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListResolverEndpointsPagesWithContext same as ListResolverEndpointsPages except +// ListResolverQueryLogConfigsPagesWithContext same as ListResolverQueryLogConfigsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) ListResolverEndpointsPagesWithContext(ctx aws.Context, input *ListResolverEndpointsInput, fn func(*ListResolverEndpointsOutput, bool) bool, opts ...request.Option) error { +func (c *Route53Resolver) ListResolverQueryLogConfigsPagesWithContext(ctx aws.Context, input *ListResolverQueryLogConfigsInput, fn func(*ListResolverQueryLogConfigsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListResolverEndpointsInput + var inCpy *ListResolverQueryLogConfigsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListResolverEndpointsRequest(inCpy) + req, _ := c.ListResolverQueryLogConfigsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -1448,7 +2488,7 @@ func (c *Route53Resolver) ListResolverEndpointsPagesWithContext(ctx aws.Context, } for p.Next() { - if !fn(p.Page().(*ListResolverEndpointsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListResolverQueryLogConfigsOutput), !p.HasNextPage()) { break } } @@ -1506,7 +2546,7 @@ func (c *Route53Resolver) ListResolverRuleAssociationsRequest(input *ListResolve // ListResolverRuleAssociations API operation for Amazon Route 53 Resolver. // -// Lists the associations that were created between resolver rules and VPCs +// Lists the associations that were created between Resolver rules and VPCs // using the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1656,7 +2696,7 @@ func (c *Route53Resolver) ListResolverRulesRequest(input *ListResolverRulesInput // ListResolverRules API operation for Amazon Route 53 Resolver. // -// Lists the resolver rules that were created using the current AWS account. +// Lists the Resolver rules that were created using the current AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1786,6 +2826,12 @@ func (c *Route53Resolver) ListTagsForResourceRequest(input *ListTagsForResourceI Name: opListTagsForResource, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1824,26 +2870,175 @@ func (c *Route53Resolver) ListTagsForResourceRequest(input *ListTagsForResourceI // * InternalServiceErrorException // We encountered an unknown error. Try again in a few minutes. // -// * ThrottlingException -// The request was throttled. Try again in a few minutes. +// * ThrottlingException +// The request was throttled. Try again in a few minutes. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListTagsForResource +func (c *Route53Resolver) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTagsForResourcePages iterates over the pages of a ListTagsForResource operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTagsForResource method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTagsForResource operation. +// pageNum := 0 +// err := client.ListTagsForResourcePages(params, +// func(page *route53resolver.ListTagsForResourceOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Route53Resolver) ListTagsForResourcePages(input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool) error { + return c.ListTagsForResourcePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTagsForResourcePagesWithContext same as ListTagsForResourcePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Route53Resolver) ListTagsForResourcePagesWithContext(ctx aws.Context, input *ListTagsForResourceInput, fn func(*ListTagsForResourceOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTagsForResourceInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTagsForResourceRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListTagsForResourceOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutResolverQueryLogConfigPolicy = "PutResolverQueryLogConfigPolicy" + +// PutResolverQueryLogConfigPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutResolverQueryLogConfigPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutResolverQueryLogConfigPolicy for more information on using the PutResolverQueryLogConfigPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutResolverQueryLogConfigPolicyRequest method. +// req, resp := client.PutResolverQueryLogConfigPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverQueryLogConfigPolicy +func (c *Route53Resolver) PutResolverQueryLogConfigPolicyRequest(input *PutResolverQueryLogConfigPolicyInput) (req *request.Request, output *PutResolverQueryLogConfigPolicyOutput) { + op := &request.Operation{ + Name: opPutResolverQueryLogConfigPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutResolverQueryLogConfigPolicyInput{} + } + + output = &PutResolverQueryLogConfigPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutResolverQueryLogConfigPolicy API operation for Amazon Route 53 Resolver. +// +// Specifies an AWS account that you want to share a query logging configuration +// with, the query logging configuration that you want to share, and the operations +// that you want the account to be able to perform on the configuration. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Route 53 Resolver's +// API operation PutResolverQueryLogConfigPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidPolicyDocument +// The specified Resolver rule policy is invalid. +// +// * InvalidParameterException +// One or more parameters in this request are not valid. +// +// * InvalidRequestException +// The request is invalid. +// +// * UnknownResourceException +// The specified resource doesn't exist. +// +// * InternalServiceErrorException +// We encountered an unknown error. Try again in a few minutes. +// +// * AccessDeniedException +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/ListTagsForResource -func (c *Route53Resolver) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01/PutResolverQueryLogConfigPolicy +func (c *Route53Resolver) PutResolverQueryLogConfigPolicy(input *PutResolverQueryLogConfigPolicyInput) (*PutResolverQueryLogConfigPolicyOutput, error) { + req, out := c.PutResolverQueryLogConfigPolicyRequest(input) return out, req.Send() } -// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// PutResolverQueryLogConfigPolicyWithContext is the same as PutResolverQueryLogConfigPolicy with the addition of // the ability to pass a context and additional request options. // -// See ListTagsForResource for details on how to use this API operation. +// See PutResolverQueryLogConfigPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Route53Resolver) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { - req, out := c.ListTagsForResourceRequest(input) +func (c *Route53Resolver) PutResolverQueryLogConfigPolicyWithContext(ctx aws.Context, input *PutResolverQueryLogConfigPolicyInput, opts ...request.Option) (*PutResolverQueryLogConfigPolicyOutput, error) { + req, out := c.PutResolverQueryLogConfigPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -1893,8 +3088,9 @@ func (c *Route53Resolver) PutResolverRulePolicyRequest(input *PutResolverRulePol // PutResolverRulePolicy API operation for Amazon Route 53 Resolver. // -// Specifies the Resolver operations and resources that you want to allow another -// AWS account to be able to use. +// Specifies an AWS account that you want to share rules with, the Resolver +// rules that you want to share, and the operations that you want the account +// to be able to perform on those rules. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1905,7 +3101,7 @@ func (c *Route53Resolver) PutResolverRulePolicyRequest(input *PutResolverRulePol // // Returned Error Types: // * InvalidPolicyDocument -// The specified resolver rule policy is invalid. +// The specified Resolver rule policy is invalid. // // * InvalidParameterException // One or more parameters in this request are not valid. @@ -2002,6 +3198,9 @@ func (c *Route53Resolver) TagResourceRequest(input *TagResourceInput) (req *requ // * InvalidParameterException // One or more parameters in this request are not valid. // +// * InvalidRequestException +// The request is invalid. +// // * InvalidTagException // The specified tag is invalid. // @@ -2094,6 +3293,9 @@ func (c *Route53Resolver) UntagResourceRequest(input *UntagResourceInput) (req * // * InvalidParameterException // One or more parameters in this request are not valid. // +// * InvalidRequestException +// The request is invalid. +// // * InternalServiceErrorException // We encountered an unknown error. Try again in a few minutes. // @@ -2166,7 +3368,7 @@ func (c *Route53Resolver) UpdateResolverEndpointRequest(input *UpdateResolverEnd // UpdateResolverEndpoint API operation for Amazon Route 53 Resolver. // -// Updates the name of an inbound or an outbound resolver endpoint. +// Updates the name of an inbound or an outbound Resolver endpoint. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2257,7 +3459,7 @@ func (c *Route53Resolver) UpdateResolverRuleRequest(input *UpdateResolverRuleInp // UpdateResolverRule API operation for Amazon Route 53 Resolver. // -// Updates settings for a specified resolver rule. ResolverRuleId is required, +// Updates settings for a specified Resolver rule. ResolverRuleId is required, // and all other parameters are optional. If you don't specify a parameter, // it retains its current value. // @@ -2312,17 +3514,74 @@ func (c *Route53Resolver) UpdateResolverRuleWithContext(ctx aws.Context, input * return out, req.Send() } +// The current account doesn't have the IAM permissions required to perform +// the specified Resolver operation. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + type AssociateResolverEndpointIpAddressInput struct { _ struct{} `type:"structure"` - // Either the IPv4 address that you want to add to a resolver endpoint or a + // Either the IPv4 address that you want to add to a Resolver endpoint or a // subnet ID. If you specify a subnet ID, Resolver chooses an IP address for // you from the available IPs in the specified subnet. // // IpAddress is a required field IpAddress *IpAddressUpdate `type:"structure" required:"true"` - // The ID of the resolver endpoint that you want to associate IP addresses with. + // The ID of the Resolver endpoint that you want to associate IP addresses with. // // ResolverEndpointId is a required field ResolverEndpointId *string `min:"1" type:"string" required:"true"` @@ -2397,20 +3656,106 @@ func (s *AssociateResolverEndpointIpAddressOutput) SetResolverEndpoint(v *Resolv return s } +type AssociateResolverQueryLogConfigInput struct { + _ struct{} `type:"structure"` + + // The ID of the query logging configuration that you want to associate a VPC + // with. + // + // ResolverQueryLogConfigId is a required field + ResolverQueryLogConfigId *string `min:"1" type:"string" required:"true"` + + // The ID of an Amazon VPC that you want this query logging configuration to + // log queries for. + // + // The VPCs and the query logging configuration must be in the same Region. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssociateResolverQueryLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateResolverQueryLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateResolverQueryLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateResolverQueryLogConfigInput"} + if s.ResolverQueryLogConfigId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverQueryLogConfigId")) + } + if s.ResolverQueryLogConfigId != nil && len(*s.ResolverQueryLogConfigId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverQueryLogConfigId", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolverQueryLogConfigId sets the ResolverQueryLogConfigId field's value. +func (s *AssociateResolverQueryLogConfigInput) SetResolverQueryLogConfigId(v string) *AssociateResolverQueryLogConfigInput { + s.ResolverQueryLogConfigId = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *AssociateResolverQueryLogConfigInput) SetResourceId(v string) *AssociateResolverQueryLogConfigInput { + s.ResourceId = &v + return s +} + +type AssociateResolverQueryLogConfigOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for a specified association between + // an Amazon VPC and a query logging configuration. + ResolverQueryLogConfigAssociation *ResolverQueryLogConfigAssociation `type:"structure"` +} + +// String returns the string representation +func (s AssociateResolverQueryLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateResolverQueryLogConfigOutput) GoString() string { + return s.String() +} + +// SetResolverQueryLogConfigAssociation sets the ResolverQueryLogConfigAssociation field's value. +func (s *AssociateResolverQueryLogConfigOutput) SetResolverQueryLogConfigAssociation(v *ResolverQueryLogConfigAssociation) *AssociateResolverQueryLogConfigOutput { + s.ResolverQueryLogConfigAssociation = v + return s +} + type AssociateResolverRuleInput struct { _ struct{} `type:"structure"` - // A name for the association that you're creating between a resolver rule and + // A name for the association that you're creating between a Resolver rule and // a VPC. Name *string `type:"string"` - // The ID of the resolver rule that you want to associate with the VPC. To list - // the existing resolver rules, use ListResolverRules. + // The ID of the Resolver rule that you want to associate with the VPC. To list + // the existing Resolver rules, use ListResolverRules (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html). // // ResolverRuleId is a required field ResolverRuleId *string `min:"1" type:"string" required:"true"` - // The ID of the VPC that you want to associate the resolver rule with. + // The ID of the VPC that you want to associate the Resolver rule with. // // VPCId is a required field VPCId *string `min:"1" type:"string" required:"true"` @@ -2503,17 +3848,17 @@ type CreateResolverEndpointInput struct { // Specify the applicable value: // // * INBOUND: Resolver forwards DNS queries to the DNS service for a VPC - // from your network or another VPC + // from your network // // * OUTBOUND: Resolver forwards DNS queries from the DNS service for a VPC - // to your network or another VPC + // to your network // // Direction is a required field Direction *string `type:"string" required:"true" enum:"ResolverEndpointDirection"` - // The subnets and IP addresses in your VPC that you want DNS queries to pass - // through on the way from your VPCs to your network (for outbound endpoints) - // or on the way from your network to your VPCs (for inbound resolver endpoints). + // The subnets and IP addresses in your VPC that DNS queries originate from + // (for outbound endpoints) or that you forward DNS queries to (for inbound + // endpoints). The subnet ID uniquely identifies a VPC. // // IpAddresses is a required field IpAddresses []*IpAddressRequest `min:"1" type:"list" required:"true"` @@ -2524,8 +3869,10 @@ type CreateResolverEndpointInput struct { // The ID of one or more security groups that you want to use to control access // to this VPC. The security group that you specify must include one or more - // inbound rules (for inbound resolver endpoints) or outbound rules (for outbound - // resolver endpoints). + // inbound rules (for inbound Resolver endpoints) or outbound rules (for outbound + // Resolver endpoints). Inbound and outbound rules must allow TCP and UDP access. + // For inbound access, open port 53. For outbound access, open the port that + // you're using for DNS queries on your network. // // SecurityGroupIds is a required field SecurityGroupIds []*string `type:"list" required:"true"` @@ -2575,6 +3922,16 @@ func (s *CreateResolverEndpointInput) Validate() error { } } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2642,6 +3999,131 @@ func (s *CreateResolverEndpointOutput) SetResolverEndpoint(v *ResolverEndpoint) return s } +type CreateResolverQueryLogConfigInput struct { + _ struct{} `type:"structure"` + + // A unique string that identifies the request and that allows failed requests + // to be retried without the risk of executing the operation twice. CreatorRequestId + // can be any unique string, for example, a date/time stamp. + CreatorRequestId *string `min:"1" type:"string" idempotencyToken:"true"` + + // The ARN of the resource that you want Resolver to send query logs. You can + // send query logs to an S3 bucket, a CloudWatch Logs log group, or a Kinesis + // Data Firehose delivery stream. Examples of valid values include the following: + // + // * S3 bucket: arn:aws:s3:::examplebucket You can optionally append a file + // prefix to the end of the ARN. arn:aws:s3:::examplebucket/development/ + // + // * CloudWatch Logs log group: arn:aws:logs:us-west-1:123456789012:log-group:/mystack-testgroup-12ABC1AB12A1:* + // + // * Kinesis Data Firehose delivery stream: arn:aws:kinesis:us-east-2:0123456789:stream/my_stream_name + // + // DestinationArn is a required field + DestinationArn *string `min:"1" type:"string" required:"true"` + + // The name that you want to give the query logging configuration + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // A list of the tag keys and values that you want to associate with the query + // logging configuration. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateResolverQueryLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResolverQueryLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateResolverQueryLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateResolverQueryLogConfigInput"} + if s.CreatorRequestId != nil && len(*s.CreatorRequestId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CreatorRequestId", 1)) + } + if s.DestinationArn == nil { + invalidParams.Add(request.NewErrParamRequired("DestinationArn")) + } + if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreatorRequestId sets the CreatorRequestId field's value. +func (s *CreateResolverQueryLogConfigInput) SetCreatorRequestId(v string) *CreateResolverQueryLogConfigInput { + s.CreatorRequestId = &v + return s +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *CreateResolverQueryLogConfigInput) SetDestinationArn(v string) *CreateResolverQueryLogConfigInput { + s.DestinationArn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateResolverQueryLogConfigInput) SetName(v string) *CreateResolverQueryLogConfigInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateResolverQueryLogConfigInput) SetTags(v []*Tag) *CreateResolverQueryLogConfigInput { + s.Tags = v + return s +} + +type CreateResolverQueryLogConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the CreateResolverQueryLogConfig request, including the + // status of the request. + ResolverQueryLogConfig *ResolverQueryLogConfig `type:"structure"` +} + +// String returns the string representation +func (s CreateResolverQueryLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateResolverQueryLogConfigOutput) GoString() string { + return s.String() +} + +// SetResolverQueryLogConfig sets the ResolverQueryLogConfig field's value. +func (s *CreateResolverQueryLogConfigOutput) SetResolverQueryLogConfig(v *ResolverQueryLogConfig) *CreateResolverQueryLogConfigOutput { + s.ResolverQueryLogConfig = v + return s +} + type CreateResolverRuleInput struct { _ struct{} `type:"structure"` @@ -2653,8 +4135,8 @@ type CreateResolverRuleInput struct { CreatorRequestId *string `min:"1" type:"string" required:"true"` // DNS queries for this domain name are forwarded to the IP addresses that you - // specify in TargetIps. If a query matches multiple resolver rules (example.com - // and www.example.com), outbound DNS queries are routed using the resolver + // specify in TargetIps. If a query matches multiple Resolver rules (example.com + // and www.example.com), outbound DNS queries are routed using the Resolver // rule that contains the most specific domain name (www.example.com). // // DomainName is a required field @@ -2664,11 +4146,24 @@ type CreateResolverRuleInput struct { // in the Route 53 console. Name *string `type:"string"` - // The ID of the outbound resolver endpoint that you want to use to route DNS + // The ID of the outbound Resolver endpoint that you want to use to route DNS // queries to the IP addresses that you specify in TargetIps. ResolverEndpointId *string `min:"1" type:"string"` - // Specify FORWARD. Other resolver rule types aren't supported. + // When you want to forward DNS queries for specified domain name to resolvers + // on your network, specify FORWARD. + // + // When you have a forwarding rule to forward DNS queries for a domain to your + // network and you want Resolver to process queries for a subdomain of that + // domain, specify SYSTEM. + // + // For example, to forward DNS queries for example.com to resolvers on your + // network, you create a rule and specify FORWARD for RuleType. To then have + // Resolver process queries for apex.example.com, you create a rule and specify + // SYSTEM for RuleType. + // + // Currently, only Resolver can create rules that have a value of RECURSIVE + // for RuleType. // // RuleType is a required field RuleType *string `type:"string" required:"true" enum:"RuleTypeOption"` @@ -2678,6 +4173,8 @@ type CreateResolverRuleInput struct { // The IPs that you want Resolver to forward DNS queries to. You can specify // only IPv4 addresses. Separate IP addresses with a comma. + // + // TargetIps is available only when the value of Rule type is FORWARD. TargetIps []*TargetAddress `min:"1" type:"list"` } @@ -2715,6 +4212,16 @@ func (s *CreateResolverRuleInput) Validate() error { if s.TargetIps != nil && len(s.TargetIps) < 1 { invalidParams.Add(request.NewErrParamMinLen("TargetIps", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if s.TargetIps != nil { for i, v := range s.TargetIps { if v == nil { @@ -2801,7 +4308,7 @@ func (s *CreateResolverRuleOutput) SetResolverRule(v *ResolverRule) *CreateResol type DeleteResolverEndpointInput struct { _ struct{} `type:"structure"` - // The ID of the resolver endpoint that you want to delete. + // The ID of the Resolver endpoint that you want to delete. // // ResolverEndpointId is a required field ResolverEndpointId *string `min:"1" type:"string" required:"true"` @@ -2823,8 +4330,73 @@ func (s *DeleteResolverEndpointInput) Validate() error { if s.ResolverEndpointId == nil { invalidParams.Add(request.NewErrParamRequired("ResolverEndpointId")) } - if s.ResolverEndpointId != nil && len(*s.ResolverEndpointId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResolverEndpointId", 1)) + if s.ResolverEndpointId != nil && len(*s.ResolverEndpointId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverEndpointId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolverEndpointId sets the ResolverEndpointId field's value. +func (s *DeleteResolverEndpointInput) SetResolverEndpointId(v string) *DeleteResolverEndpointInput { + s.ResolverEndpointId = &v + return s +} + +type DeleteResolverEndpointOutput struct { + _ struct{} `type:"structure"` + + // Information about the DeleteResolverEndpoint request, including the status + // of the request. + ResolverEndpoint *ResolverEndpoint `type:"structure"` +} + +// String returns the string representation +func (s DeleteResolverEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResolverEndpointOutput) GoString() string { + return s.String() +} + +// SetResolverEndpoint sets the ResolverEndpoint field's value. +func (s *DeleteResolverEndpointOutput) SetResolverEndpoint(v *ResolverEndpoint) *DeleteResolverEndpointOutput { + s.ResolverEndpoint = v + return s +} + +type DeleteResolverQueryLogConfigInput struct { + _ struct{} `type:"structure"` + + // The ID of the query logging configuration that you want to delete. + // + // ResolverQueryLogConfigId is a required field + ResolverQueryLogConfigId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteResolverQueryLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteResolverQueryLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteResolverQueryLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteResolverQueryLogConfigInput"} + if s.ResolverQueryLogConfigId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverQueryLogConfigId")) + } + if s.ResolverQueryLogConfigId != nil && len(*s.ResolverQueryLogConfigId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverQueryLogConfigId", 1)) } if invalidParams.Len() > 0 { @@ -2833,40 +4405,40 @@ func (s *DeleteResolverEndpointInput) Validate() error { return nil } -// SetResolverEndpointId sets the ResolverEndpointId field's value. -func (s *DeleteResolverEndpointInput) SetResolverEndpointId(v string) *DeleteResolverEndpointInput { - s.ResolverEndpointId = &v +// SetResolverQueryLogConfigId sets the ResolverQueryLogConfigId field's value. +func (s *DeleteResolverQueryLogConfigInput) SetResolverQueryLogConfigId(v string) *DeleteResolverQueryLogConfigInput { + s.ResolverQueryLogConfigId = &v return s } -type DeleteResolverEndpointOutput struct { +type DeleteResolverQueryLogConfigOutput struct { _ struct{} `type:"structure"` - // Information about the DeleteResolverEndpoint request, including the status - // of the request. - ResolverEndpoint *ResolverEndpoint `type:"structure"` + // Information about the query logging configuration that you deleted, including + // the status of the request. + ResolverQueryLogConfig *ResolverQueryLogConfig `type:"structure"` } // String returns the string representation -func (s DeleteResolverEndpointOutput) String() string { +func (s DeleteResolverQueryLogConfigOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteResolverEndpointOutput) GoString() string { +func (s DeleteResolverQueryLogConfigOutput) GoString() string { return s.String() } -// SetResolverEndpoint sets the ResolverEndpoint field's value. -func (s *DeleteResolverEndpointOutput) SetResolverEndpoint(v *ResolverEndpoint) *DeleteResolverEndpointOutput { - s.ResolverEndpoint = v +// SetResolverQueryLogConfig sets the ResolverQueryLogConfig field's value. +func (s *DeleteResolverQueryLogConfigOutput) SetResolverQueryLogConfig(v *ResolverQueryLogConfig) *DeleteResolverQueryLogConfigOutput { + s.ResolverQueryLogConfig = v return s } type DeleteResolverRuleInput struct { _ struct{} `type:"structure"` - // The ID of the resolver rule that you want to delete. + // The ID of the Resolver rule that you want to delete. // // ResolverRuleId is a required field ResolverRuleId *string `min:"1" type:"string" required:"true"` @@ -2931,12 +4503,12 @@ func (s *DeleteResolverRuleOutput) SetResolverRule(v *ResolverRule) *DeleteResol type DisassociateResolverEndpointIpAddressInput struct { _ struct{} `type:"structure"` - // The IPv4 address that you want to remove from a resolver endpoint. + // The IPv4 address that you want to remove from a Resolver endpoint. // // IpAddress is a required field IpAddress *IpAddressUpdate `type:"structure" required:"true"` - // The ID of the resolver endpoint that you want to disassociate an IP address + // The ID of the Resolver endpoint that you want to disassociate an IP address // from. // // ResolverEndpointId is a required field @@ -3012,16 +4584,100 @@ func (s *DisassociateResolverEndpointIpAddressOutput) SetResolverEndpoint(v *Res return s } +type DisassociateResolverQueryLogConfigInput struct { + _ struct{} `type:"structure"` + + // The ID of the query logging configuration that you want to disassociate a + // specified VPC from. + // + // ResolverQueryLogConfigId is a required field + ResolverQueryLogConfigId *string `min:"1" type:"string" required:"true"` + + // The ID of the Amazon VPC that you want to disassociate from a specified query + // logging configuration. + // + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateResolverQueryLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateResolverQueryLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateResolverQueryLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateResolverQueryLogConfigInput"} + if s.ResolverQueryLogConfigId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverQueryLogConfigId")) + } + if s.ResolverQueryLogConfigId != nil && len(*s.ResolverQueryLogConfigId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverQueryLogConfigId", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolverQueryLogConfigId sets the ResolverQueryLogConfigId field's value. +func (s *DisassociateResolverQueryLogConfigInput) SetResolverQueryLogConfigId(v string) *DisassociateResolverQueryLogConfigInput { + s.ResolverQueryLogConfigId = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *DisassociateResolverQueryLogConfigInput) SetResourceId(v string) *DisassociateResolverQueryLogConfigInput { + s.ResourceId = &v + return s +} + +type DisassociateResolverQueryLogConfigOutput struct { + _ struct{} `type:"structure"` + + // A complex type that contains settings for the association that you deleted + // between an Amazon VPC and a query logging configuration. + ResolverQueryLogConfigAssociation *ResolverQueryLogConfigAssociation `type:"structure"` +} + +// String returns the string representation +func (s DisassociateResolverQueryLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateResolverQueryLogConfigOutput) GoString() string { + return s.String() +} + +// SetResolverQueryLogConfigAssociation sets the ResolverQueryLogConfigAssociation field's value. +func (s *DisassociateResolverQueryLogConfigOutput) SetResolverQueryLogConfigAssociation(v *ResolverQueryLogConfigAssociation) *DisassociateResolverQueryLogConfigOutput { + s.ResolverQueryLogConfigAssociation = v + return s +} + type DisassociateResolverRuleInput struct { _ struct{} `type:"structure"` - // The ID of the resolver rule that you want to disassociate from the specified + // The ID of the Resolver rule that you want to disassociate from the specified // VPC. // // ResolverRuleId is a required field ResolverRuleId *string `min:"1" type:"string" required:"true"` - // The ID of the VPC that you want to disassociate the resolver rule from. + // The ID of the VPC that you want to disassociate the Resolver rule from. // // VPCId is a required field VPCId *string `min:"1" type:"string" required:"true"` @@ -3095,22 +4751,171 @@ func (s *DisassociateResolverRuleOutput) SetResolverRuleAssociation(v *ResolverR return s } -// For List operations, an optional specification to return a subset of objects, -// such as resolver endpoints or resolver rules. +// For Resolver list operations (ListResolverEndpoints (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html), +// ListResolverRules (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html), +// ListResolverRuleAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html), +// ListResolverQueryLogConfigs (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverQueryLogConfigs.html), +// and ListResolverQueryLogConfigAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverQueryLogConfigAssociations.html)), +// an optional specification to return a subset of objects. +// +// To filter objects, such as Resolver endpoints or Resolver rules, you specify +// Name and Values. For example, to list only inbound Resolver endpoints, specify +// Direction for Name and specify INBOUND for Values. type Filter struct { _ struct{} `type:"structure"` - // When you're using a List operation and you want the operation to return a - // subset of objects, such as resolver endpoints or resolver rules, the name - // of the parameter that you want to use to filter objects. For example, to - // list only inbound resolver endpoints, specify Direction for the value of - // Name. + // The name of the parameter that you want to use to filter objects. + // + // The valid values for Name depend on the action that you're including the + // filter in, ListResolverEndpoints (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html), + // ListResolverRules (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html), + // ListResolverRuleAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html), + // ListResolverQueryLogConfigs (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverQueryLogConfigs.html), + // or ListResolverQueryLogConfigAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverQueryLogConfigAssociations.html). + // + // In early versions of Resolver, values for Name were listed as uppercase, + // with underscore (_) delimiters. For example, CreatorRequestId was originally + // listed as CREATOR_REQUEST_ID. Uppercase values for Name are still supported. + // + // ListResolverEndpoints + // + // Valid values for Name include the following: + // + // * CreatorRequestId: The value that you specified when you created the + // Resolver endpoint. + // + // * Direction: Whether you want to return inbound or outbound Resolver endpoints. + // If you specify DIRECTION for Name, specify INBOUND or OUTBOUND for Values. + // + // * HostVpcId: The ID of the VPC that inbound DNS queries pass through on + // the way from your network to your VPCs in a region, or the VPC that outbound + // queries pass through on the way from your VPCs to your network. In a CreateResolverEndpoint + // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html) + // request, SubnetId indirectly identifies the VPC. In a GetResolverEndpoint + // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) + // request, the VPC ID for a Resolver endpoint is returned in the HostVPCId + // element. + // + // * IpAddressCount: The number of IP addresses that you have associated + // with the Resolver endpoint. + // + // * Name: The name of the Resolver endpoint. + // + // * SecurityGroupIds: The IDs of the VPC security groups that you specified + // when you created the Resolver endpoint. + // + // * Status: The status of the Resolver endpoint. If you specify Status for + // Name, specify one of the following status codes for Values: CREATING, + // OPERATIONAL, UPDATING, AUTO_RECOVERING, ACTION_NEEDED, or DELETING. For + // more information, see Status in ResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ResolverEndpoint.html). + // + // ListResolverRules + // + // Valid values for Name include the following: + // + // * CreatorRequestId: The value that you specified when you created the + // Resolver rule. + // + // * DomainName: The domain name for which Resolver is forwarding DNS queries + // to your network. In the value that you specify for Values, include a trailing + // dot (.) after the domain name. For example, if the domain name is example.com, + // specify the following value. Note the "." after com: example.com. + // + // * Name: The name of the Resolver rule. + // + // * ResolverEndpointId: The ID of the Resolver endpoint that the Resolver + // rule is associated with. You can filter on the Resolver endpoint only + // for rules that have a value of FORWARD for RuleType. + // + // * Status: The status of the Resolver rule. If you specify Status for Name, + // specify one of the following status codes for Values: COMPLETE, DELETING, + // UPDATING, or FAILED. + // + // * Type: The type of the Resolver rule. If you specify TYPE for Name, specify + // FORWARD or SYSTEM for Values. + // + // ListResolverRuleAssociations + // + // Valid values for Name include the following: + // + // * Name: The name of the Resolver rule association. + // + // * ResolverRuleId: The ID of the Resolver rule that is associated with + // one or more VPCs. + // + // * Status: The status of the Resolver rule association. If you specify + // Status for Name, specify one of the following status codes for Values: + // CREATING, COMPLETE, DELETING, or FAILED. + // + // * VPCId: The ID of the VPC that the Resolver rule is associated with. + // + // ListResolverQueryLogConfigs + // + // Valid values for Name include the following: + // + // * Arn: The ARN for the query logging configuration. + // + // * AssociationCount: The number of VPCs that are associated with the query + // logging configuration. + // + // * CreationTime: The date and time that the query logging configuration + // was created, in Unix time format and Coordinated Universal Time (UTC). + // + // * CreatorRequestId: A unique string that identifies the request that created + // the query logging configuration. + // + // * Destination: The AWS service that you want to forward query logs to. + // Valid values include the following: S3 CloudWatchLogs KinesisFirehose + // + // * DestinationArn: The ARN of the location that Resolver is sending query + // logs to. This value can be the ARN for an S3 bucket, a CloudWatch Logs + // log group, or a Kinesis Data Firehose delivery stream. + // + // * Id: The ID of the query logging configuration + // + // * Name: The name of the query logging configuration + // + // * OwnerId: The AWS account ID for the account that created the query logging + // configuration. + // + // * ShareStatus: An indication of whether the query logging configuration + // is shared with other AWS accounts, or was shared with the current account + // by another AWS account. Valid values include: NOT_SHARED, SHARED_WITH_ME, + // or SHARED_BY_ME. + // + // * Status: The status of the query logging configuration. If you specify + // Status for Name, specify the applicable status code for Values: CREATING, + // CREATED, DELETING, or FAILED. For more information, see Status (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ResolverQueryLogConfig.html#Route53Resolver-Type-route53resolver_ResolverQueryLogConfig-Status). + // + // ListResolverQueryLogConfigAssociations + // + // Valid values for Name include the following: + // + // * CreationTime: The date and time that the VPC was associated with the + // query logging configuration, in Unix time format and Coordinated Universal + // Time (UTC). + // + // * Error: If the value of Status is FAILED, specify the cause: DESTINATION_NOT_FOUND + // or ACCESS_DENIED. + // + // * Id: The ID of the query logging association. + // + // * ResolverQueryLogConfigId: The ID of the query logging configuration + // that a VPC is associated with. + // + // * ResourceId: The ID of the Amazon VPC that is associated with the query + // logging configuration. + // + // * Status: The status of the query logging association. If you specify + // Status for Name, specify the applicable status code for Values: CREATING, + // CREATED, DELETING, or FAILED. For more information, see Status (https://docs.aws.amazon.com/API_route53resolver_ResolverQueryLogConfigAssociation.html#Route53Resolver-Type-route53resolver_ResolverQueryLogConfigAssociation-Status). Name *string `min:"1" type:"string"` // When you're using a List operation and you want the operation to return a - // subset of objects, such as resolver endpoints or resolver rules, the value + // subset of objects, such as Resolver endpoints or Resolver rules, the value // of the parameter that you want to use to filter objects. For example, to - // list only inbound resolver endpoints, specify INBOUND for the value of Values. + // list only inbound Resolver endpoints, specify Direction for Name and specify + // INBOUND for Values. Values []*string `type:"list"` } @@ -3152,7 +4957,7 @@ func (s *Filter) SetValues(v []*string) *Filter { type GetResolverEndpointInput struct { _ struct{} `type:"structure"` - // The ID of the resolver endpoint that you want to get information about. + // The ID of the Resolver endpoint that you want to get information about. // // ResolverEndpointId is a required field ResolverEndpointId *string `min:"1" type:"string" required:"true"` @@ -3164,18 +4969,216 @@ func (s GetResolverEndpointInput) String() string { } // GoString returns the string representation -func (s GetResolverEndpointInput) GoString() string { +func (s GetResolverEndpointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResolverEndpointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResolverEndpointInput"} + if s.ResolverEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverEndpointId")) + } + if s.ResolverEndpointId != nil && len(*s.ResolverEndpointId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverEndpointId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolverEndpointId sets the ResolverEndpointId field's value. +func (s *GetResolverEndpointInput) SetResolverEndpointId(v string) *GetResolverEndpointInput { + s.ResolverEndpointId = &v + return s +} + +type GetResolverEndpointOutput struct { + _ struct{} `type:"structure"` + + // Information about the Resolver endpoint that you specified in a GetResolverEndpoint + // request. + ResolverEndpoint *ResolverEndpoint `type:"structure"` +} + +// String returns the string representation +func (s GetResolverEndpointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResolverEndpointOutput) GoString() string { + return s.String() +} + +// SetResolverEndpoint sets the ResolverEndpoint field's value. +func (s *GetResolverEndpointOutput) SetResolverEndpoint(v *ResolverEndpoint) *GetResolverEndpointOutput { + s.ResolverEndpoint = v + return s +} + +type GetResolverQueryLogConfigAssociationInput struct { + _ struct{} `type:"structure"` + + // The ID of the Resolver query logging configuration association that you want + // to get information about. + // + // ResolverQueryLogConfigAssociationId is a required field + ResolverQueryLogConfigAssociationId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResolverQueryLogConfigAssociationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResolverQueryLogConfigAssociationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResolverQueryLogConfigAssociationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResolverQueryLogConfigAssociationInput"} + if s.ResolverQueryLogConfigAssociationId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverQueryLogConfigAssociationId")) + } + if s.ResolverQueryLogConfigAssociationId != nil && len(*s.ResolverQueryLogConfigAssociationId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverQueryLogConfigAssociationId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolverQueryLogConfigAssociationId sets the ResolverQueryLogConfigAssociationId field's value. +func (s *GetResolverQueryLogConfigAssociationInput) SetResolverQueryLogConfigAssociationId(v string) *GetResolverQueryLogConfigAssociationInput { + s.ResolverQueryLogConfigAssociationId = &v + return s +} + +type GetResolverQueryLogConfigAssociationOutput struct { + _ struct{} `type:"structure"` + + // Information about the Resolver query logging configuration association that + // you specified in a GetQueryLogConfigAssociation request. + ResolverQueryLogConfigAssociation *ResolverQueryLogConfigAssociation `type:"structure"` +} + +// String returns the string representation +func (s GetResolverQueryLogConfigAssociationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResolverQueryLogConfigAssociationOutput) GoString() string { + return s.String() +} + +// SetResolverQueryLogConfigAssociation sets the ResolverQueryLogConfigAssociation field's value. +func (s *GetResolverQueryLogConfigAssociationOutput) SetResolverQueryLogConfigAssociation(v *ResolverQueryLogConfigAssociation) *GetResolverQueryLogConfigAssociationOutput { + s.ResolverQueryLogConfigAssociation = v + return s +} + +type GetResolverQueryLogConfigInput struct { + _ struct{} `type:"structure"` + + // The ID of the Resolver query logging configuration that you want to get information + // about. + // + // ResolverQueryLogConfigId is a required field + ResolverQueryLogConfigId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResolverQueryLogConfigInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResolverQueryLogConfigInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetResolverQueryLogConfigInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResolverQueryLogConfigInput"} + if s.ResolverQueryLogConfigId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverQueryLogConfigId")) + } + if s.ResolverQueryLogConfigId != nil && len(*s.ResolverQueryLogConfigId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverQueryLogConfigId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResolverQueryLogConfigId sets the ResolverQueryLogConfigId field's value. +func (s *GetResolverQueryLogConfigInput) SetResolverQueryLogConfigId(v string) *GetResolverQueryLogConfigInput { + s.ResolverQueryLogConfigId = &v + return s +} + +type GetResolverQueryLogConfigOutput struct { + _ struct{} `type:"structure"` + + // Information about the Resolver query logging configuration that you specified + // in a GetQueryLogConfig request. + ResolverQueryLogConfig *ResolverQueryLogConfig `type:"structure"` +} + +// String returns the string representation +func (s GetResolverQueryLogConfigOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResolverQueryLogConfigOutput) GoString() string { + return s.String() +} + +// SetResolverQueryLogConfig sets the ResolverQueryLogConfig field's value. +func (s *GetResolverQueryLogConfigOutput) SetResolverQueryLogConfig(v *ResolverQueryLogConfig) *GetResolverQueryLogConfigOutput { + s.ResolverQueryLogConfig = v + return s +} + +type GetResolverQueryLogConfigPolicyInput struct { + _ struct{} `type:"structure"` + + // The ARN of the query logging configuration that you want to get the query + // logging policy for. + // + // Arn is a required field + Arn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetResolverQueryLogConfigPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetResolverQueryLogConfigPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetResolverEndpointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetResolverEndpointInput"} - if s.ResolverEndpointId == nil { - invalidParams.Add(request.NewErrParamRequired("ResolverEndpointId")) +func (s *GetResolverQueryLogConfigPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetResolverQueryLogConfigPolicyInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) } - if s.ResolverEndpointId != nil && len(*s.ResolverEndpointId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResolverEndpointId", 1)) + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) } if invalidParams.Len() > 0 { @@ -3184,40 +5187,40 @@ func (s *GetResolverEndpointInput) Validate() error { return nil } -// SetResolverEndpointId sets the ResolverEndpointId field's value. -func (s *GetResolverEndpointInput) SetResolverEndpointId(v string) *GetResolverEndpointInput { - s.ResolverEndpointId = &v +// SetArn sets the Arn field's value. +func (s *GetResolverQueryLogConfigPolicyInput) SetArn(v string) *GetResolverQueryLogConfigPolicyInput { + s.Arn = &v return s } -type GetResolverEndpointOutput struct { +type GetResolverQueryLogConfigPolicyOutput struct { _ struct{} `type:"structure"` - // Information about the resolver endpoint that you specified in a GetResolverEndpoint - // request. - ResolverEndpoint *ResolverEndpoint `type:"structure"` + // Information about the query logging policy for the query logging configuration + // that you specified in a GetResolverQueryLogConfigPolicy request. + ResolverQueryLogConfigPolicy *string `type:"string"` } // String returns the string representation -func (s GetResolverEndpointOutput) String() string { +func (s GetResolverQueryLogConfigPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetResolverEndpointOutput) GoString() string { +func (s GetResolverQueryLogConfigPolicyOutput) GoString() string { return s.String() } -// SetResolverEndpoint sets the ResolverEndpoint field's value. -func (s *GetResolverEndpointOutput) SetResolverEndpoint(v *ResolverEndpoint) *GetResolverEndpointOutput { - s.ResolverEndpoint = v +// SetResolverQueryLogConfigPolicy sets the ResolverQueryLogConfigPolicy field's value. +func (s *GetResolverQueryLogConfigPolicyOutput) SetResolverQueryLogConfigPolicy(v string) *GetResolverQueryLogConfigPolicyOutput { + s.ResolverQueryLogConfigPolicy = &v return s } type GetResolverRuleAssociationInput struct { _ struct{} `type:"structure"` - // The ID of the resolver rule association that you want to get information + // The ID of the Resolver rule association that you want to get information // about. // // ResolverRuleAssociationId is a required field @@ -3259,7 +5262,7 @@ func (s *GetResolverRuleAssociationInput) SetResolverRuleAssociationId(v string) type GetResolverRuleAssociationOutput struct { _ struct{} `type:"structure"` - // Information about the resolver rule association that you specified in a GetResolverRuleAssociation + // Information about the Resolver rule association that you specified in a GetResolverRuleAssociation // request. ResolverRuleAssociation *ResolverRuleAssociation `type:"structure"` } @@ -3283,7 +5286,7 @@ func (s *GetResolverRuleAssociationOutput) SetResolverRuleAssociation(v *Resolve type GetResolverRuleInput struct { _ struct{} `type:"structure"` - // The ID of the resolver rule that you want to get information about. + // The ID of the Resolver rule that you want to get information about. // // ResolverRuleId is a required field ResolverRuleId *string `min:"1" type:"string" required:"true"` @@ -3324,7 +5327,7 @@ func (s *GetResolverRuleInput) SetResolverRuleId(v string) *GetResolverRuleInput type GetResolverRuleOutput struct { _ struct{} `type:"structure"` - // Information about the resolver rule that you specified in a GetResolverRule + // Information about the Resolver rule that you specified in a GetResolverRule // request. ResolverRule *ResolverRule `type:"structure"` } @@ -3348,7 +5351,7 @@ func (s *GetResolverRuleOutput) SetResolverRule(v *ResolverRule) *GetResolverRul type GetResolverRulePolicyInput struct { _ struct{} `type:"structure"` - // The ID of the resolver rule policy that you want to get information about. + // The ID of the Resolver rule policy that you want to get information about. // // Arn is a required field Arn *string `min:"1" type:"string" required:"true"` @@ -3389,7 +5392,7 @@ func (s *GetResolverRulePolicyInput) SetArn(v string) *GetResolverRulePolicyInpu type GetResolverRulePolicyOutput struct { _ struct{} `type:"structure"` - // Information about the resolver rule policy that you specified in a GetResolverRulePolicy + // Information about the Resolver rule policy that you specified in a GetResolverRulePolicy // request. ResolverRulePolicy *string `type:"string"` } @@ -3412,8 +5415,8 @@ func (s *GetResolverRulePolicyOutput) SetResolverRulePolicy(v string) *GetResolv // We encountered an unknown error. Try again in a few minutes. type InternalServiceErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3430,17 +5433,17 @@ func (s InternalServiceErrorException) GoString() string { func newErrorInternalServiceErrorException(v protocol.ResponseMetadata) error { return &InternalServiceErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceErrorException) Code() string { +func (s *InternalServiceErrorException) Code() string { return "InternalServiceErrorException" } // Message returns the exception's message. -func (s InternalServiceErrorException) Message() string { +func (s *InternalServiceErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3448,28 +5451,28 @@ func (s InternalServiceErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceErrorException) OrigErr() error { +func (s *InternalServiceErrorException) OrigErr() error { return nil } -func (s InternalServiceErrorException) Error() string { +func (s *InternalServiceErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The value that you specified for NextToken in a List request isn't valid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3486,17 +5489,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3504,28 +5507,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // One or more parameters in this request are not valid. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // For an InvalidParameterException error, the name of the parameter that's // invalid. @@ -3546,17 +5549,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3564,28 +5567,28 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } -// The specified resolver rule policy is invalid. +// The specified Resolver rule policy is invalid. type InvalidPolicyDocument struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3602,17 +5605,17 @@ func (s InvalidPolicyDocument) GoString() string { func newErrorInvalidPolicyDocument(v protocol.ResponseMetadata) error { return &InvalidPolicyDocument{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPolicyDocument) Code() string { +func (s *InvalidPolicyDocument) Code() string { return "InvalidPolicyDocument" } // Message returns the exception's message. -func (s InvalidPolicyDocument) Message() string { +func (s *InvalidPolicyDocument) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3620,28 +5623,28 @@ func (s InvalidPolicyDocument) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPolicyDocument) OrigErr() error { +func (s *InvalidPolicyDocument) OrigErr() error { return nil } -func (s InvalidPolicyDocument) Error() string { +func (s *InvalidPolicyDocument) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPolicyDocument) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPolicyDocument) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPolicyDocument) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPolicyDocument) RequestID() string { + return s.RespMetadata.RequestID } // The request is invalid. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3658,17 +5661,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3676,28 +5679,28 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The specified tag is invalid. type InvalidTagException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3714,17 +5717,17 @@ func (s InvalidTagException) GoString() string { func newErrorInvalidTagException(v protocol.ResponseMetadata) error { return &InvalidTagException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTagException) Code() string { +func (s *InvalidTagException) Code() string { return "InvalidTagException" } // Message returns the exception's message. -func (s InvalidTagException) Message() string { +func (s *InvalidTagException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3732,33 +5735,35 @@ func (s InvalidTagException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTagException) OrigErr() error { +func (s *InvalidTagException) OrigErr() error { return nil } -func (s InvalidTagException) Error() string { +func (s *InvalidTagException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTagException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTagException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTagException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTagException) RequestID() string { + return s.RespMetadata.RequestID } -// In an CreateResolverEndpoint request, a subnet and IP address that you want -// to use for DNS queries. +// In a CreateResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html) +// request, the IP address that DNS queries originate from (for outbound endpoints) +// or that you forward DNS queries to (for inbound endpoints). IpAddressRequest +// also includes the ID of the subnet that contains the IP address. type IpAddressRequest struct { _ struct{} `type:"structure"` // The IP address that you want to use for DNS queries. Ip *string `min:"7" type:"string"` - // The subnet that contains the IP address. + // The ID of the subnet that contains the IP address. // // SubnetId is a required field SubnetId *string `min:"1" type:"string" required:"true"` @@ -3805,8 +5810,9 @@ func (s *IpAddressRequest) SetSubnetId(v string) *IpAddressRequest { return s } -// In the response to a GetResolverEndpoint request, information about the IP -// addresses that the resolver endpoint uses for DNS queries. +// In the response to a GetResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) +// request, information about the IP addresses that the Resolver endpoint uses +// for DNS queries. type IpAddressResponse struct { _ struct{} `type:"structure"` @@ -3814,7 +5820,7 @@ type IpAddressResponse struct { // Coordinated Universal Time (UTC). CreationTime *string `min:"20" type:"string"` - // One IP address that the resolver endpoint uses for DNS queries. + // One IP address that the Resolver endpoint uses for DNS queries. Ip *string `min:"7" type:"string"` // The ID of one IP address. @@ -3886,20 +5892,21 @@ func (s *IpAddressResponse) SetSubnetId(v string) *IpAddressResponse { return s } -// In an UpdateResolverEndpoint request, information about an IP address to -// update. +// In an UpdateResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_UpdateResolverEndpoint.html) +// request, information about an IP address to update. type IpAddressUpdate struct { _ struct{} `type:"structure"` // The new IP address. Ip *string `min:"7" type:"string"` - // Only when removing an IP address from a resolver endpoint: The ID of the - // IP address that you want to remove. To get this ID, use GetResolverEndpoint. + // Only when removing an IP address from a Resolver endpoint: The ID of the + // IP address that you want to remove. To get this ID, use GetResolverEndpoint + // (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html). IpId *string `min:"1" type:"string"` // The ID of the subnet that includes the IP address that you want to update. - // To get this ID, use GetResolverEndpoint. + // To get this ID, use GetResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html). SubnetId *string `min:"1" type:"string"` } @@ -3952,8 +5959,8 @@ func (s *IpAddressUpdate) SetSubnetId(v string) *IpAddressUpdate { // The request caused one or more limits to be exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -3963,96 +5970,383 @@ type LimitExceededException struct { } // String returns the string representation -func (s LimitExceededException) String() string { +func (s LimitExceededException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LimitExceededException) GoString() string { + return s.String() +} + +func newErrorLimitExceededException(v protocol.ResponseMetadata) error { + return &LimitExceededException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *LimitExceededException) Code() string { + return "LimitExceededException" +} + +// Message returns the exception's message. +func (s *LimitExceededException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *LimitExceededException) OrigErr() error { + return nil +} + +func (s *LimitExceededException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListResolverEndpointIpAddressesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of IP addresses that you want to return in the response + // to a ListResolverEndpointIpAddresses request. If you don't specify a value + // for MaxResults, Resolver returns up to 100 IP addresses. + MaxResults *int64 `min:"1" type:"integer"` + + // For the first ListResolverEndpointIpAddresses request, omit this value. + // + // If the specified Resolver endpoint has more than MaxResults IP addresses, + // you can submit another ListResolverEndpointIpAddresses request to get the + // next group of IP addresses. In the next request, specify the value of NextToken + // from the previous response. + NextToken *string `type:"string"` + + // The ID of the Resolver endpoint that you want to get IP addresses for. + // + // ResolverEndpointId is a required field + ResolverEndpointId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListResolverEndpointIpAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResolverEndpointIpAddressesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResolverEndpointIpAddressesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResolverEndpointIpAddressesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.ResolverEndpointId == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverEndpointId")) + } + if s.ResolverEndpointId != nil && len(*s.ResolverEndpointId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResolverEndpointId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResolverEndpointIpAddressesInput) SetMaxResults(v int64) *ListResolverEndpointIpAddressesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResolverEndpointIpAddressesInput) SetNextToken(v string) *ListResolverEndpointIpAddressesInput { + s.NextToken = &v + return s +} + +// SetResolverEndpointId sets the ResolverEndpointId field's value. +func (s *ListResolverEndpointIpAddressesInput) SetResolverEndpointId(v string) *ListResolverEndpointIpAddressesInput { + s.ResolverEndpointId = &v + return s +} + +type ListResolverEndpointIpAddressesOutput struct { + _ struct{} `type:"structure"` + + // Information about the IP addresses in your VPC that DNS queries originate + // from (for outbound endpoints) or that you forward DNS queries to (for inbound + // endpoints). + IpAddresses []*IpAddressResponse `type:"list"` + + // The value that you specified for MaxResults in the request. + MaxResults *int64 `min:"1" type:"integer"` + + // If the specified endpoint has more than MaxResults IP addresses, you can + // submit another ListResolverEndpointIpAddresses request to get the next group + // of IP addresses. In the next request, specify the value of NextToken from + // the previous response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListResolverEndpointIpAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResolverEndpointIpAddressesOutput) GoString() string { + return s.String() +} + +// SetIpAddresses sets the IpAddresses field's value. +func (s *ListResolverEndpointIpAddressesOutput) SetIpAddresses(v []*IpAddressResponse) *ListResolverEndpointIpAddressesOutput { + s.IpAddresses = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResolverEndpointIpAddressesOutput) SetMaxResults(v int64) *ListResolverEndpointIpAddressesOutput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResolverEndpointIpAddressesOutput) SetNextToken(v string) *ListResolverEndpointIpAddressesOutput { + s.NextToken = &v + return s +} + +type ListResolverEndpointsInput struct { + _ struct{} `type:"structure"` + + // An optional specification to return a subset of Resolver endpoints, such + // as all inbound Resolver endpoints. + // + // If you submit a second or subsequent ListResolverEndpoints request and specify + // the NextToken parameter, you must use the same values for Filters, if any, + // as in the previous request. + Filters []*Filter `type:"list"` + + // The maximum number of Resolver endpoints that you want to return in the response + // to a ListResolverEndpoints request. If you don't specify a value for MaxResults, + // Resolver returns up to 100 Resolver endpoints. + MaxResults *int64 `min:"1" type:"integer"` + + // For the first ListResolverEndpoints request, omit this value. + // + // If you have more than MaxResults Resolver endpoints, you can submit another + // ListResolverEndpoints request to get the next group of Resolver endpoints. + // In the next request, specify the value of NextToken from the previous response. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListResolverEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListResolverEndpointsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListResolverEndpointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResolverEndpointsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *ListResolverEndpointsInput) SetFilters(v []*Filter) *ListResolverEndpointsInput { + s.Filters = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListResolverEndpointsInput) SetMaxResults(v int64) *ListResolverEndpointsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListResolverEndpointsInput) SetNextToken(v string) *ListResolverEndpointsInput { + s.NextToken = &v + return s +} + +type ListResolverEndpointsOutput struct { + _ struct{} `type:"structure"` + + // The value that you specified for MaxResults in the request. + MaxResults *int64 `min:"1" type:"integer"` + + // If more than MaxResults IP addresses match the specified criteria, you can + // submit another ListResolverEndpoint request to get the next group of results. + // In the next request, specify the value of NextToken from the previous response. + NextToken *string `type:"string"` + + // The Resolver endpoints that were created by using the current AWS account, + // and that match the specified filters, if any. + ResolverEndpoints []*ResolverEndpoint `type:"list"` +} + +// String returns the string representation +func (s ListResolverEndpointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LimitExceededException) GoString() string { +func (s ListResolverEndpointsOutput) GoString() string { return s.String() } -func newErrorLimitExceededException(v protocol.ResponseMetadata) error { - return &LimitExceededException{ - respMetadata: v, - } -} - -// Code returns the exception type name. -func (s LimitExceededException) Code() string { - return "LimitExceededException" -} - -// Message returns the exception's message. -func (s LimitExceededException) Message() string { - if s.Message_ != nil { - return *s.Message_ - } - return "" -} - -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { - return nil -} - -func (s LimitExceededException) Error() string { - return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +// SetMaxResults sets the MaxResults field's value. +func (s *ListResolverEndpointsOutput) SetMaxResults(v int64) *ListResolverEndpointsOutput { + s.MaxResults = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +// SetNextToken sets the NextToken field's value. +func (s *ListResolverEndpointsOutput) SetNextToken(v string) *ListResolverEndpointsOutput { + s.NextToken = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +// SetResolverEndpoints sets the ResolverEndpoints field's value. +func (s *ListResolverEndpointsOutput) SetResolverEndpoints(v []*ResolverEndpoint) *ListResolverEndpointsOutput { + s.ResolverEndpoints = v + return s } -type ListResolverEndpointIpAddressesInput struct { +type ListResolverQueryLogConfigAssociationsInput struct { _ struct{} `type:"structure"` - // The maximum number of IP addresses that you want to return in the response - // to a ListResolverEndpointIpAddresses request. If you don't specify a value - // for MaxResults, Resolver returns up to 100 IP addresses. + // An optional specification to return a subset of query logging associations. + // + // If you submit a second or subsequent ListResolverQueryLogConfigAssociations + // request and specify the NextToken parameter, you must use the same values + // for Filters, if any, as in the previous request. + Filters []*Filter `type:"list"` + + // The maximum number of query logging associations that you want to return + // in the response to a ListResolverQueryLogConfigAssociations request. If you + // don't specify a value for MaxResults, Resolver returns up to 100 query logging + // associations. MaxResults *int64 `min:"1" type:"integer"` - // For the first ListResolverEndpointIpAddresses request, omit this value. + // For the first ListResolverQueryLogConfigAssociations request, omit this value. // - // If the specified resolver endpoint has more than MaxResults IP addresses, - // you can submit another ListResolverEndpointIpAddresses request to get the - // next group of IP addresses. In the next request, specify the value of NextToken - // from the previous response. + // If there are more than MaxResults query logging associations that match the + // values that you specify for Filters, you can submit another ListResolverQueryLogConfigAssociations + // request to get the next group of associations. In the next request, specify + // the value of NextToken from the previous response. NextToken *string `type:"string"` - // The ID of the resolver endpoint that you want to get IP addresses for. + // The element that you want Resolver to sort query logging associations by. // - // ResolverEndpointId is a required field - ResolverEndpointId *string `min:"1" type:"string" required:"true"` + // If you submit a second or subsequent ListResolverQueryLogConfigAssociations + // request and specify the NextToken parameter, you must use the same value + // for SortBy, if any, as in the previous request. + // + // Valid values include the following elements: + // + // * CreationTime: The ID of the query logging association. + // + // * Error: If the value of Status is FAILED, the value of Error indicates + // the cause: DESTINATION_NOT_FOUND: The specified destination (for example, + // an Amazon S3 bucket) was deleted. ACCESS_DENIED: Permissions don't allow + // sending logs to the destination. If Status is a value other than FAILED, + // ERROR is null. + // + // * Id: The ID of the query logging association + // + // * ResolverQueryLogConfigId: The ID of the query logging configuration + // + // * ResourceId: The ID of the VPC that is associated with the query logging + // configuration + // + // * Status: The current status of the configuration. Valid values include + // the following: CREATING: Resolver is creating an association between an + // Amazon VPC and a query logging configuration. CREATED: The association + // between an Amazon VPC and a query logging configuration was successfully + // created. Resolver is logging queries that originate in the specified VPC. + // DELETING: Resolver is deleting this query logging association. FAILED: + // Resolver either couldn't create or couldn't delete the query logging association. + // Here are two common causes: The specified destination (for example, an + // Amazon S3 bucket) was deleted. Permissions don't allow sending logs to + // the destination. + SortBy *string `min:"1" type:"string"` + + // If you specified a value for SortBy, the order that you want query logging + // associations to be listed in, ASCENDING or DESCENDING. + // + // If you submit a second or subsequent ListResolverQueryLogConfigAssociations + // request and specify the NextToken parameter, you must use the same value + // for SortOrder, if any, as in the previous request. + SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation -func (s ListResolverEndpointIpAddressesInput) String() string { +func (s ListResolverQueryLogConfigAssociationsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListResolverEndpointIpAddressesInput) GoString() string { +func (s ListResolverQueryLogConfigAssociationsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListResolverEndpointIpAddressesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListResolverEndpointIpAddressesInput"} +func (s *ListResolverQueryLogConfigAssociationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResolverQueryLogConfigAssociationsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } - if s.ResolverEndpointId == nil { - invalidParams.Add(request.NewErrParamRequired("ResolverEndpointId")) + if s.SortBy != nil && len(*s.SortBy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SortBy", 1)) } - if s.ResolverEndpointId != nil && len(*s.ResolverEndpointId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResolverEndpointId", 1)) + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -4061,109 +6355,190 @@ func (s *ListResolverEndpointIpAddressesInput) Validate() error { return nil } +// SetFilters sets the Filters field's value. +func (s *ListResolverQueryLogConfigAssociationsInput) SetFilters(v []*Filter) *ListResolverQueryLogConfigAssociationsInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. -func (s *ListResolverEndpointIpAddressesInput) SetMaxResults(v int64) *ListResolverEndpointIpAddressesInput { +func (s *ListResolverQueryLogConfigAssociationsInput) SetMaxResults(v int64) *ListResolverQueryLogConfigAssociationsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListResolverEndpointIpAddressesInput) SetNextToken(v string) *ListResolverEndpointIpAddressesInput { +func (s *ListResolverQueryLogConfigAssociationsInput) SetNextToken(v string) *ListResolverQueryLogConfigAssociationsInput { s.NextToken = &v return s } -// SetResolverEndpointId sets the ResolverEndpointId field's value. -func (s *ListResolverEndpointIpAddressesInput) SetResolverEndpointId(v string) *ListResolverEndpointIpAddressesInput { - s.ResolverEndpointId = &v +// SetSortBy sets the SortBy field's value. +func (s *ListResolverQueryLogConfigAssociationsInput) SetSortBy(v string) *ListResolverQueryLogConfigAssociationsInput { + s.SortBy = &v return s } -type ListResolverEndpointIpAddressesOutput struct { - _ struct{} `type:"structure"` - - // The IP addresses that DNS queries pass through on their way to your network - // (outbound endpoint) or on the way to Resolver (inbound endpoint). - IpAddresses []*IpAddressResponse `type:"list"` +// SetSortOrder sets the SortOrder field's value. +func (s *ListResolverQueryLogConfigAssociationsInput) SetSortOrder(v string) *ListResolverQueryLogConfigAssociationsInput { + s.SortOrder = &v + return s +} - // The value that you specified for MaxResults in the request. - MaxResults *int64 `min:"1" type:"integer"` +type ListResolverQueryLogConfigAssociationsOutput struct { + _ struct{} `type:"structure"` - // If the specified endpoint has more than MaxResults IP addresses, you can - // submit another ListResolverEndpointIpAddresses request to get the next group - // of IP addresses. In the next request, specify the value of NextToken from + // If there are more than MaxResults query logging associations, you can submit + // another ListResolverQueryLogConfigAssociations request to get the next group + // of associations. In the next request, specify the value of NextToken from // the previous response. NextToken *string `type:"string"` + + // A list that contains one ResolverQueryLogConfigAssociations element for each + // query logging association that matches the values that you specified for + // Filter. + ResolverQueryLogConfigAssociations []*ResolverQueryLogConfigAssociation `type:"list"` + + // The total number of query logging associations that were created by the current + // account in the specified Region. This count can differ from the number of + // associations that are returned in a ListResolverQueryLogConfigAssociations + // response, depending on the values that you specify in the request. + TotalCount *int64 `type:"integer"` + + // The total number of query logging associations that were created by the current + // account in the specified Region and that match the filters that were specified + // in the ListResolverQueryLogConfigAssociations request. For the total number + // of associations that were created by the current account in the specified + // Region, see TotalCount. + TotalFilteredCount *int64 `type:"integer"` } // String returns the string representation -func (s ListResolverEndpointIpAddressesOutput) String() string { +func (s ListResolverQueryLogConfigAssociationsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListResolverEndpointIpAddressesOutput) GoString() string { +func (s ListResolverQueryLogConfigAssociationsOutput) GoString() string { return s.String() } -// SetIpAddresses sets the IpAddresses field's value. -func (s *ListResolverEndpointIpAddressesOutput) SetIpAddresses(v []*IpAddressResponse) *ListResolverEndpointIpAddressesOutput { - s.IpAddresses = v +// SetNextToken sets the NextToken field's value. +func (s *ListResolverQueryLogConfigAssociationsOutput) SetNextToken(v string) *ListResolverQueryLogConfigAssociationsOutput { + s.NextToken = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListResolverEndpointIpAddressesOutput) SetMaxResults(v int64) *ListResolverEndpointIpAddressesOutput { - s.MaxResults = &v +// SetResolverQueryLogConfigAssociations sets the ResolverQueryLogConfigAssociations field's value. +func (s *ListResolverQueryLogConfigAssociationsOutput) SetResolverQueryLogConfigAssociations(v []*ResolverQueryLogConfigAssociation) *ListResolverQueryLogConfigAssociationsOutput { + s.ResolverQueryLogConfigAssociations = v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListResolverEndpointIpAddressesOutput) SetNextToken(v string) *ListResolverEndpointIpAddressesOutput { - s.NextToken = &v +// SetTotalCount sets the TotalCount field's value. +func (s *ListResolverQueryLogConfigAssociationsOutput) SetTotalCount(v int64) *ListResolverQueryLogConfigAssociationsOutput { + s.TotalCount = &v return s } -type ListResolverEndpointsInput struct { +// SetTotalFilteredCount sets the TotalFilteredCount field's value. +func (s *ListResolverQueryLogConfigAssociationsOutput) SetTotalFilteredCount(v int64) *ListResolverQueryLogConfigAssociationsOutput { + s.TotalFilteredCount = &v + return s +} + +type ListResolverQueryLogConfigsInput struct { _ struct{} `type:"structure"` - // An optional specification to return a subset of resolver endpoints, such - // as all inbound resolver endpoints. + // An optional specification to return a subset of query logging configurations. // - // If you submit a second or subsequent ListResolverEndpoints request and specify - // the NextToken parameter, you must use the same values for Filters, if any, - // as in the previous request. + // If you submit a second or subsequent ListResolverQueryLogConfigs request + // and specify the NextToken parameter, you must use the same values for Filters, + // if any, as in the previous request. Filters []*Filter `type:"list"` - // The maximum number of resolver endpoints that you want to return in the response - // to a ListResolverEndpoints request. If you don't specify a value for MaxResults, - // Resolver returns up to 100 resolver endpoints. + // The maximum number of query logging configurations that you want to return + // in the response to a ListResolverQueryLogConfigs request. If you don't specify + // a value for MaxResults, Resolver returns up to 100 query logging configurations. MaxResults *int64 `min:"1" type:"integer"` - // For the first ListResolverEndpoints request, omit this value. + // For the first ListResolverQueryLogConfigs request, omit this value. // - // If you have more than MaxResults resolver endpoints, you can submit another - // ListResolverEndpoints request to get the next group of resolver endpoints. - // In the next request, specify the value of NextToken from the previous response. + // If there are more than MaxResults query logging configurations that match + // the values that you specify for Filters, you can submit another ListResolverQueryLogConfigs + // request to get the next group of configurations. In the next request, specify + // the value of NextToken from the previous response. NextToken *string `type:"string"` + + // The element that you want Resolver to sort query logging configurations by. + // + // If you submit a second or subsequent ListResolverQueryLogConfigs request + // and specify the NextToken parameter, you must use the same value for SortBy, + // if any, as in the previous request. + // + // Valid values include the following elements: + // + // * Arn: The ARN of the query logging configuration + // + // * AssociationCount: The number of VPCs that are associated with the specified + // configuration + // + // * CreationTime: The date and time that Resolver returned when the configuration + // was created + // + // * CreatorRequestId: The value that was specified for CreatorRequestId + // when the configuration was created + // + // * DestinationArn: The location that logs are sent to + // + // * Id: The ID of the configuration + // + // * Name: The name of the configuration + // + // * OwnerId: The AWS account number of the account that created the configuration + // + // * ShareStatus: Whether the configuration is shared with other AWS accounts + // or shared with the current account by another AWS account. Sharing is + // configured through AWS Resource Access Manager (AWS RAM). + // + // * Status: The current status of the configuration. Valid values include + // the following: CREATING: Resolver is creating the query logging configuration. + // CREATED: The query logging configuration was successfully created. Resolver + // is logging queries that originate in the specified VPC. DELETING: Resolver + // is deleting this query logging configuration. FAILED: Resolver either + // couldn't create or couldn't delete the query logging configuration. Here + // are two common causes: The specified destination (for example, an Amazon + // S3 bucket) was deleted. Permissions don't allow sending logs to the destination. + SortBy *string `min:"1" type:"string"` + + // If you specified a value for SortBy, the order that you want query logging + // configurations to be listed in, ASCENDING or DESCENDING. + // + // If you submit a second or subsequent ListResolverQueryLogConfigs request + // and specify the NextToken parameter, you must use the same value for SortOrder, + // if any, as in the previous request. + SortOrder *string `type:"string" enum:"SortOrder"` } // String returns the string representation -func (s ListResolverEndpointsInput) String() string { +func (s ListResolverQueryLogConfigsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListResolverEndpointsInput) GoString() string { +func (s ListResolverQueryLogConfigsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListResolverEndpointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListResolverEndpointsInput"} +func (s *ListResolverQueryLogConfigsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListResolverQueryLogConfigsInput"} if s.MaxResults != nil && *s.MaxResults < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) } + if s.SortBy != nil && len(*s.SortBy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SortBy", 1)) + } if s.Filters != nil { for i, v := range s.Filters { if v == nil { @@ -4182,71 +6557,99 @@ func (s *ListResolverEndpointsInput) Validate() error { } // SetFilters sets the Filters field's value. -func (s *ListResolverEndpointsInput) SetFilters(v []*Filter) *ListResolverEndpointsInput { +func (s *ListResolverQueryLogConfigsInput) SetFilters(v []*Filter) *ListResolverQueryLogConfigsInput { s.Filters = v return s } // SetMaxResults sets the MaxResults field's value. -func (s *ListResolverEndpointsInput) SetMaxResults(v int64) *ListResolverEndpointsInput { +func (s *ListResolverQueryLogConfigsInput) SetMaxResults(v int64) *ListResolverQueryLogConfigsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. -func (s *ListResolverEndpointsInput) SetNextToken(v string) *ListResolverEndpointsInput { +func (s *ListResolverQueryLogConfigsInput) SetNextToken(v string) *ListResolverQueryLogConfigsInput { s.NextToken = &v return s } -type ListResolverEndpointsOutput struct { - _ struct{} `type:"structure"` +// SetSortBy sets the SortBy field's value. +func (s *ListResolverQueryLogConfigsInput) SetSortBy(v string) *ListResolverQueryLogConfigsInput { + s.SortBy = &v + return s +} - // The value that you specified for MaxResults in the request. - MaxResults *int64 `min:"1" type:"integer"` +// SetSortOrder sets the SortOrder field's value. +func (s *ListResolverQueryLogConfigsInput) SetSortOrder(v string) *ListResolverQueryLogConfigsInput { + s.SortOrder = &v + return s +} - // If more than MaxResults IP addresses match the specified criteria, you can - // submit another ListResolverEndpoint request to get the next group of results. +type ListResolverQueryLogConfigsOutput struct { + _ struct{} `type:"structure"` + + // If there are more than MaxResults query logging configurations, you can submit + // another ListResolverQueryLogConfigs request to get the next group of configurations. // In the next request, specify the value of NextToken from the previous response. NextToken *string `type:"string"` - // The resolver endpoints that were created by using the current AWS account, - // and that match the specified filters, if any. - ResolverEndpoints []*ResolverEndpoint `type:"list"` + // A list that contains one ResolverQueryLogConfig element for each query logging + // configuration that matches the values that you specified for Filter. + ResolverQueryLogConfigs []*ResolverQueryLogConfig `type:"list"` + + // The total number of query logging configurations that were created by the + // current account in the specified Region. This count can differ from the number + // of query logging configurations that are returned in a ListResolverQueryLogConfigs + // response, depending on the values that you specify in the request. + TotalCount *int64 `type:"integer"` + + // The total number of query logging configurations that were created by the + // current account in the specified Region and that match the filters that were + // specified in the ListResolverQueryLogConfigs request. For the total number + // of query logging configurations that were created by the current account + // in the specified Region, see TotalCount. + TotalFilteredCount *int64 `type:"integer"` } // String returns the string representation -func (s ListResolverEndpointsOutput) String() string { +func (s ListResolverQueryLogConfigsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListResolverEndpointsOutput) GoString() string { +func (s ListResolverQueryLogConfigsOutput) GoString() string { return s.String() } -// SetMaxResults sets the MaxResults field's value. -func (s *ListResolverEndpointsOutput) SetMaxResults(v int64) *ListResolverEndpointsOutput { - s.MaxResults = &v +// SetNextToken sets the NextToken field's value. +func (s *ListResolverQueryLogConfigsOutput) SetNextToken(v string) *ListResolverQueryLogConfigsOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListResolverEndpointsOutput) SetNextToken(v string) *ListResolverEndpointsOutput { - s.NextToken = &v +// SetResolverQueryLogConfigs sets the ResolverQueryLogConfigs field's value. +func (s *ListResolverQueryLogConfigsOutput) SetResolverQueryLogConfigs(v []*ResolverQueryLogConfig) *ListResolverQueryLogConfigsOutput { + s.ResolverQueryLogConfigs = v return s } -// SetResolverEndpoints sets the ResolverEndpoints field's value. -func (s *ListResolverEndpointsOutput) SetResolverEndpoints(v []*ResolverEndpoint) *ListResolverEndpointsOutput { - s.ResolverEndpoints = v +// SetTotalCount sets the TotalCount field's value. +func (s *ListResolverQueryLogConfigsOutput) SetTotalCount(v int64) *ListResolverQueryLogConfigsOutput { + s.TotalCount = &v + return s +} + +// SetTotalFilteredCount sets the TotalFilteredCount field's value. +func (s *ListResolverQueryLogConfigsOutput) SetTotalFilteredCount(v int64) *ListResolverQueryLogConfigsOutput { + s.TotalFilteredCount = &v return s } type ListResolverRuleAssociationsInput struct { _ struct{} `type:"structure"` - // An optional specification to return a subset of resolver rules, such as resolver + // An optional specification to return a subset of Resolver rules, such as Resolver // rules that are associated with the same VPC ID. // // If you submit a second or subsequent ListResolverRuleAssociations request @@ -4330,7 +6733,7 @@ type ListResolverRuleAssociationsOutput struct { // previous response. NextToken *string `type:"string"` - // The associations that were created between resolver rules and VPCs using + // The associations that were created between Resolver rules and VPCs using // the current AWS account, and that match the specified filters, if any. ResolverRuleAssociations []*ResolverRuleAssociation `type:"list"` } @@ -4366,23 +6769,23 @@ func (s *ListResolverRuleAssociationsOutput) SetResolverRuleAssociations(v []*Re type ListResolverRulesInput struct { _ struct{} `type:"structure"` - // An optional specification to return a subset of resolver rules, such as all - // resolver rules that are associated with the same resolver endpoint. + // An optional specification to return a subset of Resolver rules, such as all + // Resolver rules that are associated with the same Resolver endpoint. // // If you submit a second or subsequent ListResolverRules request and specify // the NextToken parameter, you must use the same values for Filters, if any, // as in the previous request. Filters []*Filter `type:"list"` - // The maximum number of resolver rules that you want to return in the response + // The maximum number of Resolver rules that you want to return in the response // to a ListResolverRules request. If you don't specify a value for MaxResults, - // Resolver returns up to 100 resolver rules. + // Resolver returns up to 100 Resolver rules. MaxResults *int64 `min:"1" type:"integer"` // For the first ListResolverRules request, omit this value. // - // If you have more than MaxResults resolver rules, you can submit another ListResolverRules - // request to get the next group of resolver rules. In the next request, specify + // If you have more than MaxResults Resolver rules, you can submit another ListResolverRules + // request to get the next group of Resolver rules. In the next request, specify // the value of NextToken from the previous response. NextToken *string `type:"string"` } @@ -4444,12 +6847,12 @@ type ListResolverRulesOutput struct { // The value that you specified for MaxResults in the request. MaxResults *int64 `min:"1" type:"integer"` - // If more than MaxResults resolver rules match the specified criteria, you + // If more than MaxResults Resolver rules match the specified criteria, you // can submit another ListResolverRules request to get the next group of results. // In the next request, specify the value of NextToken from the previous response. NextToken *string `type:"string"` - // The resolver rules that were created using the current AWS account and that + // The Resolver rules that were created using the current AWS account and that // match the specified filters, if any. ResolverRules []*ResolverRule `type:"list"` } @@ -4565,38 +6968,148 @@ type ListTagsForResourceOutput struct { } // String returns the string representation -func (s ListTagsForResourceOutput) String() string { +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + +type PutResolverQueryLogConfigPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the account that you want to share rules + // with. + // + // Arn is a required field + Arn *string `min:"1" type:"string" required:"true"` + + // An AWS Identity and Access Management policy statement that lists the query + // logging configurations that you want to share with another AWS account and + // the operations that you want the account to be able to perform. You can specify + // the following operations in the Actions section of the statement: + // + // * route53resolver:AssociateResolverQueryLogConfig + // + // * route53resolver:DisassociateResolverQueryLogConfig + // + // * route53resolver:ListResolverQueryLogConfigAssociations + // + // * route53resolver:ListResolverQueryLogConfigs + // + // In the Resource section of the statement, you specify the ARNs for the query + // logging configurations that you want to share with the account that you specified + // in Arn. + // + // ResolverQueryLogConfigPolicy is a required field + ResolverQueryLogConfigPolicy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutResolverQueryLogConfigPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutResolverQueryLogConfigPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutResolverQueryLogConfigPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutResolverQueryLogConfigPolicyInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 1)) + } + if s.ResolverQueryLogConfigPolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ResolverQueryLogConfigPolicy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PutResolverQueryLogConfigPolicyInput) SetArn(v string) *PutResolverQueryLogConfigPolicyInput { + s.Arn = &v + return s +} + +// SetResolverQueryLogConfigPolicy sets the ResolverQueryLogConfigPolicy field's value. +func (s *PutResolverQueryLogConfigPolicyInput) SetResolverQueryLogConfigPolicy(v string) *PutResolverQueryLogConfigPolicyInput { + s.ResolverQueryLogConfigPolicy = &v + return s +} + +// The response to a PutResolverQueryLogConfigPolicy request. +type PutResolverQueryLogConfigPolicyOutput struct { + _ struct{} `type:"structure"` + + // Whether the PutResolverQueryLogConfigPolicy request was successful. + ReturnValue *bool `type:"boolean"` +} + +// String returns the string representation +func (s PutResolverQueryLogConfigPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListTagsForResourceOutput) GoString() string { +func (s PutResolverQueryLogConfigPolicyOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { - s.NextToken = &v - return s -} - -// SetTags sets the Tags field's value. -func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { - s.Tags = v +// SetReturnValue sets the ReturnValue field's value. +func (s *PutResolverQueryLogConfigPolicyOutput) SetReturnValue(v bool) *PutResolverQueryLogConfigPolicyOutput { + s.ReturnValue = &v return s } type PutResolverRulePolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the account that you want to grant permissions - // to. + // The Amazon Resource Name (ARN) of the account that you want to share rules + // with. // // Arn is a required field Arn *string `min:"1" type:"string" required:"true"` - // An AWS Identity and Access Management policy statement that lists the permissions - // that you want to grant to another AWS account. + // An AWS Identity and Access Management policy statement that lists the rules + // that you want to share with another AWS account and the operations that you + // want the account to be able to perform. You can specify the following operations + // in the Actions section of the statement: + // + // * route53resolver:GetResolverRule + // + // * route53resolver:AssociateResolverRule + // + // * route53resolver:DisassociateResolverRule + // + // * route53resolver:ListResolverRules + // + // * route53resolver:ListResolverRuleAssociations + // + // In the Resource section of the statement, you specify the ARNs for the rules + // that you want to share with the account that you specified in Arn. // // ResolverRulePolicy is a required field ResolverRulePolicy *string `type:"string" required:"true"` @@ -4667,58 +7180,95 @@ func (s *PutResolverRulePolicyOutput) SetReturnValue(v bool) *PutResolverRulePol return s } -// In the response to a CreateResolverEndpoint, DeleteResolverEndpoint, GetResolverEndpoint, -// ListResolverEndpoints, or UpdateResolverEndpoint request, a complex type -// that contains settings for an existing inbound or outbound resolver endpoint. +// In the response to a CreateResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html), +// DeleteResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DeleteResolverEndpoint.html), +// GetResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html), +// ListResolverEndpoints (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html), +// or UpdateResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_UpdateResolverEndpoint.html) +// request, a complex type that contains settings for an existing inbound or +// outbound Resolver endpoint. type ResolverEndpoint struct { _ struct{} `type:"structure"` - // The ARN (Amazon Resource Name) for the resolver endpoint. + // The ARN (Amazon Resource Name) for the Resolver endpoint. Arn *string `min:"1" type:"string"` // The date and time that the endpoint was created, in Unix time format and // Coordinated Universal Time (UTC). CreationTime *string `min:"20" type:"string"` - // A unique string that identifies the request that created the resolver endpoint. + // A unique string that identifies the request that created the Resolver endpoint. // The CreatorRequestId allows failed requests to be retried without the risk // of executing the operation twice. CreatorRequestId *string `min:"1" type:"string"` - // Indicates whether the resolver endpoint allows inbound or outbound DNS queries: + // Indicates whether the Resolver endpoint allows inbound or outbound DNS queries: // - // * INBOUND: allows DNS queries to your VPC from your network or another - // VPC + // * INBOUND: allows DNS queries to your VPC from your network // - // * OUTBOUND: allows DNS queries from your VPC to your network or another - // VPC + // * OUTBOUND: allows DNS queries from your VPC to your network Direction *string `type:"string" enum:"ResolverEndpointDirection"` - // The ID of the VPC that you want to create the resolver endpoint in. + // The ID of the VPC that you want to create the Resolver endpoint in. HostVPCId *string `min:"1" type:"string"` - // The ID of the resolver endpoint. + // The ID of the Resolver endpoint. Id *string `min:"1" type:"string"` - // The number of IP addresses that the resolver endpoint can use for DNS queries. + // The number of IP addresses that the Resolver endpoint can use for DNS queries. IpAddressCount *int64 `type:"integer"` // The date and time that the endpoint was last modified, in Unix time format // and Coordinated Universal Time (UTC). ModificationTime *string `min:"20" type:"string"` - // The name that you assigned to the resolver endpoint when you submitted a - // CreateResolverEndpoint request. + // The name that you assigned to the Resolver endpoint when you submitted a + // CreateResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverEndpoint.html) + // request. Name *string `type:"string"` // The ID of one or more security groups that control access to this VPC. The - // security group must include one or more inbound resolver rules. + // security group must include one or more inbound rules (for inbound endpoints) + // or outbound rules (for outbound endpoints). Inbound and outbound rules must + // allow TCP and UDP access. For inbound access, open port 53. For outbound + // access, open the port that you're using for DNS queries on your network. SecurityGroupIds []*string `type:"list"` - // A code that specifies the current status of the resolver endpoint. + // A code that specifies the current status of the Resolver endpoint. Valid + // values include the following: + // + // * CREATING: Resolver is creating and configuring one or more Amazon VPC + // network interfaces for this endpoint. + // + // * OPERATIONAL: The Amazon VPC network interfaces for this endpoint are + // correctly configured and able to pass inbound or outbound DNS queries + // between your network and Resolver. + // + // * UPDATING: Resolver is associating or disassociating one or more network + // interfaces with this endpoint. + // + // * AUTO_RECOVERING: Resolver is trying to recover one or more of the network + // interfaces that are associated with this endpoint. During the recovery + // process, the endpoint functions with limited capacity because of the limit + // on the number of DNS queries per IP address (per network interface). For + // the current limit, see Limits on Route 53 Resolver (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html#limits-api-entities-resolver). + // + // * ACTION_NEEDED: This endpoint is unhealthy, and Resolver can't automatically + // recover it. To resolve the problem, we recommend that you check each IP + // address that you associated with the endpoint. For each IP address that + // isn't available, add another IP address and then delete the IP address + // that isn't available. (An endpoint must always include at least two IP + // addresses.) A status of ACTION_NEEDED can have a variety of causes. Here + // are two common causes: One or more of the network interfaces that are + // associated with the endpoint were deleted using Amazon VPC. The network + // interface couldn't be created for some reason that's outside the control + // of Resolver. + // + // * DELETING: Resolver is deleting this endpoint and the associated network + // interfaces. Status *string `type:"string" enum:"ResolverEndpointStatus"` - // A detailed description of the status of the resolver endpoint. + // A detailed description of the status of the Resolver endpoint. StatusMessage *string `type:"string"` } @@ -4804,32 +7354,278 @@ func (s *ResolverEndpoint) SetStatusMessage(v string) *ResolverEndpoint { return s } -// For queries that originate in your VPC, detailed information about a resolver +// In the response to a CreateResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverQueryLogConfig.html), +// DeleteResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DeleteResolverQueryLogConfig.html), +// GetResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverQueryLogConfig.html), +// or ListResolverQueryLogConfigs (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverQueryLogConfigs.html) +// request, a complex type that contains settings for one query logging configuration. +type ResolverQueryLogConfig struct { + _ struct{} `type:"structure"` + + // The ARN for the query logging configuration. + Arn *string `min:"1" type:"string"` + + // The number of VPCs that are associated with the query logging configuration. + AssociationCount *int64 `type:"integer"` + + // The date and time that the query logging configuration was created, in Unix + // time format and Coordinated Universal Time (UTC). + CreationTime *string `min:"20" type:"string"` + + // A unique string that identifies the request that created the query logging + // configuration. The CreatorRequestId allows failed requests to be retried + // without the risk of executing the operation twice. + CreatorRequestId *string `min:"1" type:"string"` + + // The ARN of the resource that you want Resolver to send query logs: an Amazon + // S3 bucket, a CloudWatch Logs log group, or a Kinesis Data Firehose delivery + // stream. + DestinationArn *string `min:"1" type:"string"` + + // The ID for the query logging configuration. + Id *string `min:"1" type:"string"` + + // The name of the query logging configuration. + Name *string `min:"1" type:"string"` + + // The AWS account ID for the account that created the query logging configuration. + OwnerId *string `min:"12" type:"string"` + + // An indication of whether the query logging configuration is shared with other + // AWS accounts, or was shared with the current account by another AWS account. + // Sharing is configured through AWS Resource Access Manager (AWS RAM). + ShareStatus *string `type:"string" enum:"ShareStatus"` + + // The status of the specified query logging configuration. Valid values include + // the following: + // + // * CREATING: Resolver is creating the query logging configuration. + // + // * CREATED: The query logging configuration was successfully created. Resolver + // is logging queries that originate in the specified VPC. + // + // * DELETING: Resolver is deleting this query logging configuration. + // + // * FAILED: Resolver can't deliver logs to the location that is specified + // in the query logging configuration. Here are two common causes: The specified + // destination (for example, an Amazon S3 bucket) was deleted. Permissions + // don't allow sending logs to the destination. + Status *string `type:"string" enum:"ResolverQueryLogConfigStatus"` +} + +// String returns the string representation +func (s ResolverQueryLogConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolverQueryLogConfig) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ResolverQueryLogConfig) SetArn(v string) *ResolverQueryLogConfig { + s.Arn = &v + return s +} + +// SetAssociationCount sets the AssociationCount field's value. +func (s *ResolverQueryLogConfig) SetAssociationCount(v int64) *ResolverQueryLogConfig { + s.AssociationCount = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ResolverQueryLogConfig) SetCreationTime(v string) *ResolverQueryLogConfig { + s.CreationTime = &v + return s +} + +// SetCreatorRequestId sets the CreatorRequestId field's value. +func (s *ResolverQueryLogConfig) SetCreatorRequestId(v string) *ResolverQueryLogConfig { + s.CreatorRequestId = &v + return s +} + +// SetDestinationArn sets the DestinationArn field's value. +func (s *ResolverQueryLogConfig) SetDestinationArn(v string) *ResolverQueryLogConfig { + s.DestinationArn = &v + return s +} + +// SetId sets the Id field's value. +func (s *ResolverQueryLogConfig) SetId(v string) *ResolverQueryLogConfig { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *ResolverQueryLogConfig) SetName(v string) *ResolverQueryLogConfig { + s.Name = &v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *ResolverQueryLogConfig) SetOwnerId(v string) *ResolverQueryLogConfig { + s.OwnerId = &v + return s +} + +// SetShareStatus sets the ShareStatus field's value. +func (s *ResolverQueryLogConfig) SetShareStatus(v string) *ResolverQueryLogConfig { + s.ShareStatus = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ResolverQueryLogConfig) SetStatus(v string) *ResolverQueryLogConfig { + s.Status = &v + return s +} + +// In the response to an AssociateResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverQueryLogConfig.html), +// DisassociateResolverQueryLogConfig (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverQueryLogConfig.html), +// GetResolverQueryLogConfigAssociation (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverQueryLogConfigAssociation.html), +// or ListResolverQueryLogConfigAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverQueryLogConfigAssociations.html), +// request, a complex type that contains settings for a specified association +// between an Amazon VPC and a query logging configuration. +type ResolverQueryLogConfigAssociation struct { + _ struct{} `type:"structure"` + + // The date and time that the VPC was associated with the query logging configuration, + // in Unix time format and Coordinated Universal Time (UTC). + CreationTime *string `min:"20" type:"string"` + + // If the value of Status is FAILED, the value of Error indicates the cause: + // + // * DESTINATION_NOT_FOUND: The specified destination (for example, an Amazon + // S3 bucket) was deleted. + // + // * ACCESS_DENIED: Permissions don't allow sending logs to the destination. + // + // If the value of Status is a value other than FAILED, Error is null. + Error *string `type:"string" enum:"ResolverQueryLogConfigAssociationError"` + + // Contains additional information about the error. If the value or Error is + // null, the value of ErrorMessage also is null. + ErrorMessage *string `type:"string"` + + // The ID of the query logging association. + Id *string `min:"1" type:"string"` + + // The ID of the query logging configuration that a VPC is associated with. + ResolverQueryLogConfigId *string `min:"1" type:"string"` + + // The ID of the Amazon VPC that is associated with the query logging configuration. + ResourceId *string `min:"1" type:"string"` + + // The status of the specified query logging association. Valid values include + // the following: + // + // * CREATING: Resolver is creating an association between an Amazon VPC + // and a query logging configuration. + // + // * CREATED: The association between an Amazon VPC and a query logging configuration + // was successfully created. Resolver is logging queries that originate in + // the specified VPC. + // + // * DELETING: Resolver is deleting this query logging association. + // + // * FAILED: Resolver either couldn't create or couldn't delete the query + // logging association. + Status *string `type:"string" enum:"ResolverQueryLogConfigAssociationStatus"` +} + +// String returns the string representation +func (s ResolverQueryLogConfigAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResolverQueryLogConfigAssociation) GoString() string { + return s.String() +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ResolverQueryLogConfigAssociation) SetCreationTime(v string) *ResolverQueryLogConfigAssociation { + s.CreationTime = &v + return s +} + +// SetError sets the Error field's value. +func (s *ResolverQueryLogConfigAssociation) SetError(v string) *ResolverQueryLogConfigAssociation { + s.Error = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ResolverQueryLogConfigAssociation) SetErrorMessage(v string) *ResolverQueryLogConfigAssociation { + s.ErrorMessage = &v + return s +} + +// SetId sets the Id field's value. +func (s *ResolverQueryLogConfigAssociation) SetId(v string) *ResolverQueryLogConfigAssociation { + s.Id = &v + return s +} + +// SetResolverQueryLogConfigId sets the ResolverQueryLogConfigId field's value. +func (s *ResolverQueryLogConfigAssociation) SetResolverQueryLogConfigId(v string) *ResolverQueryLogConfigAssociation { + s.ResolverQueryLogConfigId = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ResolverQueryLogConfigAssociation) SetResourceId(v string) *ResolverQueryLogConfigAssociation { + s.ResourceId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ResolverQueryLogConfigAssociation) SetStatus(v string) *ResolverQueryLogConfigAssociation { + s.Status = &v + return s +} + +// For queries that originate in your VPC, detailed information about a Resolver // rule, which specifies how to route DNS queries out of the VPC. The ResolverRule -// parameter appears in the response to a CreateResolverRule, DeleteResolverRule, -// GetResolverRule, ListResolverRules, or UpdateResolverRule request. +// parameter appears in the response to a CreateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverRule.html), +// DeleteResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DeleteResolverRule.html), +// GetResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRule.html), +// ListResolverRules (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html), +// or UpdateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_UpdateResolverRule.html) +// request. type ResolverRule struct { _ struct{} `type:"structure"` - // The ARN (Amazon Resource Name) for the resolver rule specified by Id. + // The ARN (Amazon Resource Name) for the Resolver rule specified by Id. Arn *string `min:"1" type:"string"` - // A unique string that you specified when you created the resolver rule. CreatorRequestIdidentifies - // the request and allows failed requests to be retried without the risk of - // executing the operation twice. + // The date and time that the Resolver rule was created, in Unix time format + // and Coordinated Universal Time (UTC). + CreationTime *string `min:"20" type:"string"` + + // A unique string that you specified when you created the Resolver rule. CreatorRequestId + // identifies the request and allows failed requests to be retried without the + // risk of executing the operation twice. CreatorRequestId *string `min:"1" type:"string"` // DNS queries for this domain name are forwarded to the IP addresses that are - // specified in TargetIps. If a query matches multiple resolver rules (example.com - // and www.example.com), the query is routed using the resolver rule that contains + // specified in TargetIps. If a query matches multiple Resolver rules (example.com + // and www.example.com), the query is routed using the Resolver rule that contains // the most specific domain name (www.example.com). DomainName *string `min:"1" type:"string"` - // The ID that Resolver assigned to the resolver rule when you created it. + // The ID that Resolver assigned to the Resolver rule when you created it. Id *string `min:"1" type:"string"` - // The name for the resolver rule, which you specified when you created the - // resolver rule. + // The date and time that the Resolver rule was last updated, in Unix time format + // and Coordinated Universal Time (UTC). + ModificationTime *string `min:"20" type:"string"` + + // The name for the Resolver rule, which you specified when you created the + // Resolver rule. Name *string `type:"string"` // When a rule is shared with another AWS account, the account ID of the account @@ -4839,7 +7635,20 @@ type ResolverRule struct { // The ID of the endpoint that the rule is associated with. ResolverEndpointId *string `min:"1" type:"string"` - // This value is always FORWARD. Other resolver rule types aren't supported. + // When you want to forward DNS queries for specified domain name to resolvers + // on your network, specify FORWARD. + // + // When you have a forwarding rule to forward DNS queries for a domain to your + // network and you want Resolver to process queries for a subdomain of that + // domain, specify SYSTEM. + // + // For example, to forward DNS queries for example.com to resolvers on your + // network, you create a rule and specify FORWARD for RuleType. To then have + // Resolver process queries for apex.example.com, you create a rule and specify + // SYSTEM for RuleType. + // + // Currently, only Resolver can create rules that have a value of RECURSIVE + // for RuleType. RuleType *string `type:"string" enum:"RuleTypeOption"` // Whether the rules is shared and, if so, whether the current account is sharing @@ -4847,13 +7656,15 @@ type ResolverRule struct { // the current account. ShareStatus *string `type:"string" enum:"ShareStatus"` - // A code that specifies the current status of the resolver rule. + // A code that specifies the current status of the Resolver rule. Status *string `type:"string" enum:"ResolverRuleStatus"` - // A detailed description of the status of a resolver rule. + // A detailed description of the status of a Resolver rule. StatusMessage *string `type:"string"` - // An array that contains the IP addresses and ports that you want to forward + // An array that contains the IP addresses and ports that an outbound endpoint + // forwards DNS queries to. Typically, these are the IP addresses of DNS resolvers + // on your network. Specify IPv4 addresses. IPv6 is not supported. TargetIps []*TargetAddress `min:"1" type:"list"` } @@ -4873,6 +7684,12 @@ func (s *ResolverRule) SetArn(v string) *ResolverRule { return s } +// SetCreationTime sets the CreationTime field's value. +func (s *ResolverRule) SetCreationTime(v string) *ResolverRule { + s.CreationTime = &v + return s +} + // SetCreatorRequestId sets the CreatorRequestId field's value. func (s *ResolverRule) SetCreatorRequestId(v string) *ResolverRule { s.CreatorRequestId = &v @@ -4891,6 +7708,12 @@ func (s *ResolverRule) SetId(v string) *ResolverRule { return s } +// SetModificationTime sets the ModificationTime field's value. +func (s *ResolverRule) SetModificationTime(v string) *ResolverRule { + s.ModificationTime = &v + return s +} + // SetName sets the Name field's value. func (s *ResolverRule) SetName(v string) *ResolverRule { s.Name = &v @@ -4939,32 +7762,36 @@ func (s *ResolverRule) SetTargetIps(v []*TargetAddress) *ResolverRule { return s } -// In the response to an AssociateResolverRule, DisassociateResolverRule, or -// ListResolverRuleAssociations request, information about an association between -// a resolver rule and a VPC. +// In the response to an AssociateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverRule.html), +// DisassociateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_DisassociateResolverRule.html), +// or ListResolverRuleAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html) +// request, provides information about an association between a Resolver rule +// and a VPC. The association determines which DNS queries that originate in +// the VPC are forwarded to your network. type ResolverRuleAssociation struct { _ struct{} `type:"structure"` - // The ID of the association between a resolver rule and a VPC. Resolver assigns - // this value when you submit an AssociateResolverRule request. + // The ID of the association between a Resolver rule and a VPC. Resolver assigns + // this value when you submit an AssociateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_AssociateResolverRule.html) + // request. Id *string `min:"1" type:"string"` - // The name of an association between a resolver rule and a VPC. + // The name of an association between a Resolver rule and a VPC. Name *string `type:"string"` - // The ID of the resolver rule that you associated with the VPC that is specified + // The ID of the Resolver rule that you associated with the VPC that is specified // by VPCId. ResolverRuleId *string `min:"1" type:"string"` - // A code that specifies the current status of the association between a resolver + // A code that specifies the current status of the association between a Resolver // rule and a VPC. Status *string `type:"string" enum:"ResolverRuleAssociationStatus"` - // A detailed description of the status of the association between a resolver + // A detailed description of the status of the association between a Resolver // rule and a VPC. StatusMessage *string `type:"string"` - // The ID of the VPC that you associated the resolver rule with. + // The ID of the VPC that you associated the Resolver rule with. VPCId *string `min:"1" type:"string"` } @@ -5014,16 +7841,16 @@ func (s *ResolverRuleAssociation) SetVPCId(v string) *ResolverRuleAssociation { return s } -// In an UpdateResolverRule request, information about the changes that you -// want to make. +// In an UpdateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_UpdateResolverRule.html) +// request, information about the changes that you want to make. type ResolverRuleConfig struct { _ struct{} `type:"structure"` - // The new name for the resolver rule. The name that you specify appears in + // The new name for the Resolver rule. The name that you specify appears in // the Resolver dashboard in the Route 53 console. Name *string `type:"string"` - // The ID of the new outbound resolver endpoint that you want to use to route + // The ID of the new outbound Resolver endpoint that you want to use to route // DNS queries to the IP addresses that you specify in TargetIps. ResolverEndpointId *string `min:"1" type:"string"` @@ -5088,8 +7915,8 @@ func (s *ResolverRuleConfig) SetTargetIps(v []*TargetAddress) *ResolverRuleConfi // The resource that you tried to create already exists. type ResourceExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -5110,17 +7937,17 @@ func (s ResourceExistsException) GoString() string { func newErrorResourceExistsException(v protocol.ResponseMetadata) error { return &ResourceExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceExistsException) Code() string { +func (s *ResourceExistsException) Code() string { return "ResourceExistsException" } // Message returns the exception's message. -func (s ResourceExistsException) Message() string { +func (s *ResourceExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5128,28 +7955,28 @@ func (s ResourceExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceExistsException) OrigErr() error { +func (s *ResourceExistsException) OrigErr() error { return nil } -func (s ResourceExistsException) Error() string { +func (s *ResourceExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The resource that you tried to update or delete is currently in use. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -5170,17 +7997,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5188,28 +8015,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource doesn't exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -5230,17 +8057,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5248,28 +8075,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource isn't available. type ResourceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -5290,17 +8117,17 @@ func (s ResourceUnavailableException) GoString() string { func newErrorResourceUnavailableException(v protocol.ResponseMetadata) error { return &ResourceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceUnavailableException) Code() string { +func (s *ResourceUnavailableException) Code() string { return "ResourceUnavailableException" } // Message returns the exception's message. -func (s ResourceUnavailableException) Message() string { +func (s *ResourceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5308,22 +8135,22 @@ func (s ResourceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceUnavailableException) OrigErr() error { +func (s *ResourceUnavailableException) OrigErr() error { return nil } -func (s ResourceUnavailableException) Error() string { +func (s *ResourceUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // One tag that you want to add to the specified resource. A tag consists of @@ -5334,11 +8161,15 @@ type Tag struct { // The name for the tag. For example, if you want to associate Resolver resources // with the account IDs of your customers for billing purposes, the value of // Key might be account-id. - Key *string `type:"string"` + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` // The value for the tag. For example, if Key is account-id, then Value might // be the ID of the customer account that you're creating the resource for. - Value *string `type:"string"` + // + // Value is a required field + Value *string `type:"string" required:"true"` } // String returns the string representation @@ -5351,6 +8182,25 @@ func (s Tag) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetKey sets the Key field's value. func (s *Tag) SetKey(v string) *Tag { s.Key = &v @@ -5369,17 +8219,17 @@ type TagResourceInput struct { // The Amazon Resource Name (ARN) for the resource that you want to add tags // to. To get the ARN for a resource, use the applicable Get or List command: // - // * GetResolverEndpoint + // * GetResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) // - // * GetResolverRule + // * GetResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRule.html) // - // * GetResolverRuleAssociation + // * GetResolverRuleAssociation (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRuleAssociation.html) // - // * ListResolverEndpoints + // * ListResolverEndpoints (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html) // - // * ListResolverRuleAssociations + // * ListResolverRuleAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html) // - // * ListResolverRules + // * ListResolverRules (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html) // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -5412,6 +8262,16 @@ func (s *TagResourceInput) Validate() error { if s.Tags == nil { invalidParams.Add(request.NewErrParamRequired("Tags")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -5445,8 +8305,8 @@ func (s TagResourceOutput) GoString() string { return s.String() } -// In a CreateResolverRule request, an array of the IPs that you want to forward -// DNS queries to. +// In a CreateResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_CreateResolverRule.html) +// request, an array of the IPs that you want to forward DNS queries to. type TargetAddress struct { _ struct{} `type:"structure"` @@ -5500,8 +8360,8 @@ func (s *TargetAddress) SetPort(v int64) *TargetAddress { // The request was throttled. Try again in a few minutes. type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5518,17 +8378,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5536,28 +8396,28 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource doesn't exist. type UnknownResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5574,17 +8434,17 @@ func (s UnknownResourceException) GoString() string { func newErrorUnknownResourceException(v protocol.ResponseMetadata) error { return &UnknownResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnknownResourceException) Code() string { +func (s *UnknownResourceException) Code() string { return "UnknownResourceException" } // Message returns the exception's message. -func (s UnknownResourceException) Message() string { +func (s *UnknownResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5592,22 +8452,22 @@ func (s UnknownResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnknownResourceException) OrigErr() error { +func (s *UnknownResourceException) OrigErr() error { return nil } -func (s UnknownResourceException) Error() string { +func (s *UnknownResourceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnknownResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnknownResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnknownResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnknownResourceException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -5616,17 +8476,17 @@ type UntagResourceInput struct { // The Amazon Resource Name (ARN) for the resource that you want to remove tags // from. To get the ARN for a resource, use the applicable Get or List command: // - // * GetResolverEndpoint + // * GetResolverEndpoint (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverEndpoint.html) // - // * GetResolverRule + // * GetResolverRule (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRule.html) // - // * GetResolverRuleAssociation + // * GetResolverRuleAssociation (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_GetResolverRuleAssociation.html) // - // * ListResolverEndpoints + // * ListResolverEndpoints (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverEndpoints.html) // - // * ListResolverRuleAssociations + // * ListResolverRuleAssociations (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRuleAssociations.html) // - // * ListResolverRules + // * ListResolverRules (https://docs.aws.amazon.com/Route53/latest/APIReference/API_route53resolver_ListResolverRules.html) // // ResourceArn is a required field ResourceArn *string `min:"1" type:"string" required:"true"` @@ -5695,10 +8555,10 @@ func (s UntagResourceOutput) GoString() string { type UpdateResolverEndpointInput struct { _ struct{} `type:"structure"` - // The name of the resolver endpoint that you want to update. + // The name of the Resolver endpoint that you want to update. Name *string `type:"string"` - // The ID of the resolver endpoint that you want to update. + // The ID of the Resolver endpoint that you want to update. // // ResolverEndpointId is a required field ResolverEndpointId *string `min:"1" type:"string" required:"true"` @@ -5768,12 +8628,12 @@ func (s *UpdateResolverEndpointOutput) SetResolverEndpoint(v *ResolverEndpoint) type UpdateResolverRuleInput struct { _ struct{} `type:"structure"` - // The new settings for the resolver rule. + // The new settings for the Resolver rule. // // Config is a required field Config *ResolverRuleConfig `type:"structure" required:"true"` - // The ID of the resolver rule that you want to update. + // The ID of the Resolver rule that you want to update. // // ResolverRuleId is a required field ResolverRuleId *string `min:"1" type:"string" required:"true"` @@ -5880,6 +8740,22 @@ const ( IpAddressStatusDeleteFailedFasExpired = "DELETE_FAILED_FAS_EXPIRED" ) +// IpAddressStatus_Values returns all elements of the IpAddressStatus enum +func IpAddressStatus_Values() []string { + return []string{ + IpAddressStatusCreating, + IpAddressStatusFailedCreation, + IpAddressStatusAttaching, + IpAddressStatusAttached, + IpAddressStatusRemapDetaching, + IpAddressStatusRemapAttaching, + IpAddressStatusDetaching, + IpAddressStatusFailedResourceGone, + IpAddressStatusDeleting, + IpAddressStatusDeleteFailedFasExpired, + } +} + const ( // ResolverEndpointDirectionInbound is a ResolverEndpointDirection enum value ResolverEndpointDirectionInbound = "INBOUND" @@ -5888,6 +8764,14 @@ const ( ResolverEndpointDirectionOutbound = "OUTBOUND" ) +// ResolverEndpointDirection_Values returns all elements of the ResolverEndpointDirection enum +func ResolverEndpointDirection_Values() []string { + return []string{ + ResolverEndpointDirectionInbound, + ResolverEndpointDirectionOutbound, + } +} + const ( // ResolverEndpointStatusCreating is a ResolverEndpointStatus enum value ResolverEndpointStatusCreating = "CREATING" @@ -5908,6 +8792,94 @@ const ( ResolverEndpointStatusDeleting = "DELETING" ) +// ResolverEndpointStatus_Values returns all elements of the ResolverEndpointStatus enum +func ResolverEndpointStatus_Values() []string { + return []string{ + ResolverEndpointStatusCreating, + ResolverEndpointStatusOperational, + ResolverEndpointStatusUpdating, + ResolverEndpointStatusAutoRecovering, + ResolverEndpointStatusActionNeeded, + ResolverEndpointStatusDeleting, + } +} + +const ( + // ResolverQueryLogConfigAssociationErrorNone is a ResolverQueryLogConfigAssociationError enum value + ResolverQueryLogConfigAssociationErrorNone = "NONE" + + // ResolverQueryLogConfigAssociationErrorDestinationNotFound is a ResolverQueryLogConfigAssociationError enum value + ResolverQueryLogConfigAssociationErrorDestinationNotFound = "DESTINATION_NOT_FOUND" + + // ResolverQueryLogConfigAssociationErrorAccessDenied is a ResolverQueryLogConfigAssociationError enum value + ResolverQueryLogConfigAssociationErrorAccessDenied = "ACCESS_DENIED" + + // ResolverQueryLogConfigAssociationErrorInternalServiceError is a ResolverQueryLogConfigAssociationError enum value + ResolverQueryLogConfigAssociationErrorInternalServiceError = "INTERNAL_SERVICE_ERROR" +) + +// ResolverQueryLogConfigAssociationError_Values returns all elements of the ResolverQueryLogConfigAssociationError enum +func ResolverQueryLogConfigAssociationError_Values() []string { + return []string{ + ResolverQueryLogConfigAssociationErrorNone, + ResolverQueryLogConfigAssociationErrorDestinationNotFound, + ResolverQueryLogConfigAssociationErrorAccessDenied, + ResolverQueryLogConfigAssociationErrorInternalServiceError, + } +} + +const ( + // ResolverQueryLogConfigAssociationStatusCreating is a ResolverQueryLogConfigAssociationStatus enum value + ResolverQueryLogConfigAssociationStatusCreating = "CREATING" + + // ResolverQueryLogConfigAssociationStatusActive is a ResolverQueryLogConfigAssociationStatus enum value + ResolverQueryLogConfigAssociationStatusActive = "ACTIVE" + + // ResolverQueryLogConfigAssociationStatusActionNeeded is a ResolverQueryLogConfigAssociationStatus enum value + ResolverQueryLogConfigAssociationStatusActionNeeded = "ACTION_NEEDED" + + // ResolverQueryLogConfigAssociationStatusDeleting is a ResolverQueryLogConfigAssociationStatus enum value + ResolverQueryLogConfigAssociationStatusDeleting = "DELETING" + + // ResolverQueryLogConfigAssociationStatusFailed is a ResolverQueryLogConfigAssociationStatus enum value + ResolverQueryLogConfigAssociationStatusFailed = "FAILED" +) + +// ResolverQueryLogConfigAssociationStatus_Values returns all elements of the ResolverQueryLogConfigAssociationStatus enum +func ResolverQueryLogConfigAssociationStatus_Values() []string { + return []string{ + ResolverQueryLogConfigAssociationStatusCreating, + ResolverQueryLogConfigAssociationStatusActive, + ResolverQueryLogConfigAssociationStatusActionNeeded, + ResolverQueryLogConfigAssociationStatusDeleting, + ResolverQueryLogConfigAssociationStatusFailed, + } +} + +const ( + // ResolverQueryLogConfigStatusCreating is a ResolverQueryLogConfigStatus enum value + ResolverQueryLogConfigStatusCreating = "CREATING" + + // ResolverQueryLogConfigStatusCreated is a ResolverQueryLogConfigStatus enum value + ResolverQueryLogConfigStatusCreated = "CREATED" + + // ResolverQueryLogConfigStatusDeleting is a ResolverQueryLogConfigStatus enum value + ResolverQueryLogConfigStatusDeleting = "DELETING" + + // ResolverQueryLogConfigStatusFailed is a ResolverQueryLogConfigStatus enum value + ResolverQueryLogConfigStatusFailed = "FAILED" +) + +// ResolverQueryLogConfigStatus_Values returns all elements of the ResolverQueryLogConfigStatus enum +func ResolverQueryLogConfigStatus_Values() []string { + return []string{ + ResolverQueryLogConfigStatusCreating, + ResolverQueryLogConfigStatusCreated, + ResolverQueryLogConfigStatusDeleting, + ResolverQueryLogConfigStatusFailed, + } +} + const ( // ResolverRuleAssociationStatusCreating is a ResolverRuleAssociationStatus enum value ResolverRuleAssociationStatusCreating = "CREATING" @@ -5925,6 +8897,17 @@ const ( ResolverRuleAssociationStatusOverridden = "OVERRIDDEN" ) +// ResolverRuleAssociationStatus_Values returns all elements of the ResolverRuleAssociationStatus enum +func ResolverRuleAssociationStatus_Values() []string { + return []string{ + ResolverRuleAssociationStatusCreating, + ResolverRuleAssociationStatusComplete, + ResolverRuleAssociationStatusDeleting, + ResolverRuleAssociationStatusFailed, + ResolverRuleAssociationStatusOverridden, + } +} + const ( // ResolverRuleStatusComplete is a ResolverRuleStatus enum value ResolverRuleStatusComplete = "COMPLETE" @@ -5939,6 +8922,16 @@ const ( ResolverRuleStatusFailed = "FAILED" ) +// ResolverRuleStatus_Values returns all elements of the ResolverRuleStatus enum +func ResolverRuleStatus_Values() []string { + return []string{ + ResolverRuleStatusComplete, + ResolverRuleStatusDeleting, + ResolverRuleStatusUpdating, + ResolverRuleStatusFailed, + } +} + const ( // RuleTypeOptionForward is a RuleTypeOption enum value RuleTypeOptionForward = "FORWARD" @@ -5950,6 +8943,15 @@ const ( RuleTypeOptionRecursive = "RECURSIVE" ) +// RuleTypeOption_Values returns all elements of the RuleTypeOption enum +func RuleTypeOption_Values() []string { + return []string{ + RuleTypeOptionForward, + RuleTypeOptionSystem, + RuleTypeOptionRecursive, + } +} + const ( // ShareStatusNotShared is a ShareStatus enum value ShareStatusNotShared = "NOT_SHARED" @@ -5960,3 +8962,28 @@ const ( // ShareStatusSharedByMe is a ShareStatus enum value ShareStatusSharedByMe = "SHARED_BY_ME" ) + +// ShareStatus_Values returns all elements of the ShareStatus enum +func ShareStatus_Values() []string { + return []string{ + ShareStatusNotShared, + ShareStatusSharedWithMe, + ShareStatusSharedByMe, + } +} + +const ( + // SortOrderAscending is a SortOrder enum value + SortOrderAscending = "ASCENDING" + + // SortOrderDescending is a SortOrder enum value + SortOrderDescending = "DESCENDING" +) + +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/doc.go b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/doc.go index 02d332e9d..3ca543fbc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/doc.go @@ -3,41 +3,42 @@ // Package route53resolver provides the client and types for making API // requests to Amazon Route 53 Resolver. // -// Here's how you set up to query an Amazon Route 53 private hosted zone from -// your network: -// -// Connect your network to a VPC using AWS Direct Connect or a VPN. -// -// Run the following AWS CLI command to create a Resolver endpoint: -// -// create-resolver-endpoint --name [endpoint_name] --direction INBOUND --creator-request-id -// [unique_string] --security-group-ids [security_group_with_inbound_rules] -// --ip-addresses SubnetId=[subnet_id] SubnetId=[subnet_id_in_different_AZ] -// -// Note the resolver endpoint ID that appears in the response. You'll use it -// in step 3. -// -// Get the IP addresses for the Resolver endpoints: -// -// get-resolver-endpoint --resolver-endpoint-id [resolver_endpoint_id] -// -// In your network configuration, define the IP addresses that you got in step -// 3 as DNS servers. -// -// You can now query instance names in your VPCs and the names of records in -// your private hosted zone. -// -// You can also perform the following operations using the AWS CLI: -// -// * list-resolver-endpoints: List all endpoints. The syntax includes options -// for pagination and filtering. -// -// * update-resolver-endpoints: Add IP addresses to an endpoint or remove -// IP addresses from an endpoint. -// -// To delete an endpoint, use the following AWS CLI command: -// -// delete-resolver-endpoint --resolver-endpoint-id [resolver_endpoint_id] +// When you create a VPC using Amazon VPC, you automatically get DNS resolution +// within the VPC from Route 53 Resolver. By default, Resolver answers DNS queries +// for VPC domain names such as domain names for EC2 instances or ELB load balancers. +// Resolver performs recursive lookups against public name servers for all other +// domain names. +// +// You can also configure DNS resolution between your VPC and your network over +// a Direct Connect or VPN connection: +// +// Forward DNS queries from resolvers on your network to Route 53 Resolver +// +// DNS resolvers on your network can forward DNS queries to Resolver in a specified +// VPC. This allows your DNS resolvers to easily resolve domain names for AWS +// resources such as EC2 instances or records in a Route 53 private hosted zone. +// For more information, see How DNS Resolvers on Your Network Forward DNS Queries +// to Route 53 Resolver (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-network-to-vpc) +// in the Amazon Route 53 Developer Guide. +// +// Conditionally forward queries from a VPC to resolvers on your network +// +// You can configure Resolver to forward queries that it receives from EC2 instances +// in your VPCs to DNS resolvers on your network. To forward selected queries, +// you create Resolver rules that specify the domain names for the DNS queries +// that you want to forward (such as example.com), and the IP addresses of the +// DNS resolvers on your network that you want to forward the queries to. If +// a query matches multiple rules (example.com, acme.example.com), Resolver +// chooses the rule with the most specific match (acme.example.com) and forwards +// the query to the IP addresses that you specified in that rule. For more information, +// see How Route 53 Resolver Forwards DNS Queries from Your VPCs to Your Network +// (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resolver.html#resolver-overview-forward-vpc-to-network) +// in the Amazon Route 53 Developer Guide. +// +// Like Amazon VPC, Resolver is regional. In each region where you have VPCs, +// you can choose whether to forward queries from your VPCs to your network +// (outbound queries), from your network to your VPCs (inbound queries), or +// both. // // See https://docs.aws.amazon.com/goto/WebAPI/route53resolver-2018-04-01 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/errors.go b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/errors.go index a27e58d72..deb0b1a0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/errors.go @@ -8,6 +8,13 @@ import ( const ( + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // The current account doesn't have the IAM permissions required to perform + // the specified Resolver operation. + ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeInternalServiceErrorException for service response error code // "InternalServiceErrorException". // @@ -29,7 +36,7 @@ const ( // ErrCodeInvalidPolicyDocument for service response error code // "InvalidPolicyDocument". // - // The specified resolver rule policy is invalid. + // The specified Resolver rule policy is invalid. ErrCodeInvalidPolicyDocument = "InvalidPolicyDocument" // ErrCodeInvalidRequestException for service response error code @@ -88,6 +95,7 @@ const ( ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, "InternalServiceErrorException": newErrorInternalServiceErrorException, "InvalidNextTokenException": newErrorInvalidNextTokenException, "InvalidParameterException": newErrorInvalidParameterException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go index 80674d402..f54c4af74 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 52e87308f..bcf7f0344 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -14,12 +14,13 @@ import ( "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/eventstream" "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" ) const opAbortMultipartUpload = "AbortMultipartUpload" @@ -74,23 +75,23 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // times in order to completely free all storage consumed by all parts. // // To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the ListParts operation and ensure that -// the parts list is empty. +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// operation and ensure that the parts list is empty. // // For information about permissions required to use the multipart upload API, // see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // // The following operations are related to AbortMultipartUpload: // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * ListMultipartUploads +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -172,14 +173,15 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // Completes a multipart upload by assembling previously uploaded parts. // // You first initiate the multipart upload and then upload all parts using the -// UploadPart operation. After successfully uploading all relevant parts of -// an upload, you call this operation to complete the upload. Upon receiving -// this request, Amazon S3 concatenates all the parts in ascending order by -// part number to create a new object. In the Complete Multipart Upload request, -// you must provide the parts list. You must ensure that the parts list is complete. -// This operation concatenates the parts that you provide in the list. For each -// part in the list, you must provide the part number and the ETag value, returned -// after that part was uploaded. +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this operation to complete the upload. Upon receiving this request, +// Amazon S3 concatenates all the parts in ascending order by part number to +// create a new object. In the Complete Multipart Upload request, you must provide +// the parts list. You must ensure that the parts list is complete. This operation +// concatenates the parts that you provide in the list. For each part in the +// list, you must provide the part number and the ETag value, returned after +// that part was uploaded. // // Processing of a Complete Multipart Upload request could take several minutes // to complete. After Amazon S3 begins processing the request, it sends an HTTP @@ -199,7 +201,7 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // For information about permissions required to use the multipart upload API, // see Multipart Upload API and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html). // -// GetBucketLifecycle has the following special errors: +// CompleteMultipartUpload has the following special errors: // // * Error code: EntityTooSmall Description: Your proposed upload is smaller // than the minimum allowed object size. Each part must be at least 5 MB @@ -217,17 +219,17 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // does not exist. The upload ID might be invalid, or the multipart upload // might have been aborted or completed. 404 Not Found // -// The following operations are related to DeleteBucketMetricsConfiguration: +// The following operations are related to CompleteMultipartUpload: // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * ListMultipartUploads +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -305,20 +307,9 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // // You can store individual objects of up to 5 TB in Amazon S3. You create a // copy of your object up to 5 GB in size in a single atomic operation using -// this API. However, for copying an object greater than 5 GB, you must use -// the multipart upload Upload Part - Copy API. For more information, see Copy -// Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). -// -// When copying an object, you can preserve all metadata (default) or specify -// new metadata. However, the ACL is not preserved and is set to private for -// the user making the request. To override the default ACL setting, specify -// a new ACL when generating a copy request. For more information, see Using -// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// -// Amazon S3 transfer acceleration does not support cross-region copies. If -// you request a cross-region copy using a transfer acceleration endpoint, you -// get a 400 Bad Request error. For more information about transfer acceleration, -// see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// this API. However, to copy an object greater than 5 GB, you must use the +// multipart upload Upload Part - Copy API. For more information, see Copy Object +// Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). // // All copy requests must be authenticated. Additionally, you must have read // access to the source object and write access to the destination bucket. For @@ -326,28 +317,6 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // Both the Region that you want to copy the object from and the Region that // you want to copy the object to must be enabled for your account. // -// To only copy an object under certain conditions, such as whether the Etag -// matches or whether the object was modified before or after a specified date, -// use the request parameters x-amz-copy-source-if-match, x-amz-copy-source-if-none-match, -// x-amz-copy-source-if-unmodified-since, or x-amz-copy-source-if-modified-since. -// -// All headers with the x-amz- prefix, including x-amz-copy-source, must be -// signed. -// -// You can use this operation to change the storage class of an object that -// is already stored in Amazon S3 using the StorageClass parameter. For more -// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). -// -// The source object that you are copying can be encrypted or unencrypted. If -// the source object is encrypted, it can be encrypted by server-side encryption -// using AWS managed encryption keys or by using a customer-provided encryption -// key. When copying an object, you can request that Amazon S3 encrypt the target -// object by using either the AWS managed encryption keys or by using your own -// encryption key. You can do this regardless of the form of server-side encryption -// that was used to encrypt the source, or even if the source object was not -// encrypted. For more information about server-side encryption, see Using Server-Side -// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). -// // A copy request might return an error when Amazon S3 receives the copy request // or while Amazon S3 is copying the files. If the error occurs before the copy // operation starts, you receive a standard Amazon S3 error. If the error occurs @@ -363,131 +332,123 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // it were not, it would not contain the content-length, and you would need // to read the entire body. // -// Consider the following when using request headers: +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. For pricing information, see Amazon S3 +// pricing (https://aws.amazon.com/s3/pricing/). // -// * Consideration 1 – If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since -// headers are present in the request and evaluate as follows, Amazon S3 -// returns 200 OK and copies the data: x-amz-copy-source-if-match condition -// evaluates to true x-amz-copy-source-if-unmodified-since condition evaluates -// to false +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). // -// * Consideration 2 – If both of the x-amz-copy-source-if-none-match and -// x-amz-copy-source-if-modified-since headers are present in the request -// and evaluate as follows, Amazon S3 returns the 412 Precondition Failed -// response code: x-amz-copy-source-if-none-match condition evaluates to -// false x-amz-copy-source-if-modified-since condition evaluates to true +// Metadata // -// The copy request charge is based on the storage class and Region you specify -// for the destination object. For pricing information, see Amazon S3 Pricing -// (https://aws.amazon.com/s3/pricing/). +// When copying an object, you can preserve all metadata (default) or specify +// new metadata. However, the ACL is not preserved and is set to private for +// the user making the request. To override the default ACL setting, specify +// a new ACL when generating a copy request. For more information, see Using +// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). // -// Following are other considerations when using CopyObject: +// To specify whether you want the object metadata copied from the source object +// or replaced with metadata provided in the request, you can optionally add +// the x-amz-metadata-directive header. When you grant permissions, you can +// use the s3:x-amz-metadata-directive condition key to enforce certain metadata +// behavior when objects are uploaded. For more information, see Specifying +// Conditions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) +// in the Amazon S3 Developer Guide. For a complete list of Amazon S3-specific +// condition keys, see Actions, Resources, and Condition Keys for Amazon S3 +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html). // -// Versioning +// x-amz-copy-source-if Headers // -// By default, x-amz-copy-source identifies the current version of an object -// to copy. (If the current version is a delete marker, Amazon S3 behaves as -// if the object was deleted.) To copy a different version, use the versionId -// subresource. +// To only copy an object under certain conditions, such as whether the Etag +// matches or whether the object was modified before or after a specified date, +// use the following request parameters: // -// If you enable versioning on the target bucket, Amazon S3 generates a unique -// version ID for the object being copied. This version ID is different from -// the version ID of the source object. Amazon S3 returns the version ID of -// the copied object in the x-amz-version-id response header in the response. +// * x-amz-copy-source-if-match // -// If you do not enable versioning or suspend it on the target bucket, the version -// ID that Amazon S3 generates is always null. +// * x-amz-copy-source-if-none-match // -// If the source object's storage class is GLACIER, you must restore a copy -// of this object before you can use it as a source object for the copy operation. -// For more information, see . +// * x-amz-copy-source-if-unmodified-since // -// Access Permissions +// * x-amz-copy-source-if-modified-since // -// When copying an object, you can optionally specify the accounts or groups -// that should be granted specific permissions on the new object. There are -// two ways to grant the permissions using the request headers: +// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// 200 OK and copies the data: // -// * Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// * x-amz-copy-source-if-match condition evaluates to true // -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// * x-amz-copy-source-if-unmodified-since condition evaluates to false // -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. +// If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since +// headers are present in the request and evaluate as follows, Amazon S3 returns +// the 412 Precondition Failed response code: // -// Server-Side- Encryption-Specific Request Headers +// * x-amz-copy-source-if-none-match condition evaluates to false // -// To encrypt the target object, you must provide the appropriate encryption-related -// request headers. The one you use depends on whether you want to use AWS managed -// encryption keys or provide your own encryption key. +// * x-amz-copy-source-if-modified-since condition evaluates to true // -// * To encrypt the target object using server-side encryption with an AWS -// managed encryption key, provide the following request headers, as appropriate. -// x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want -// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id -// of the symmetric customer managed CMK. Amazon S3 only supports symmetric -// CMKs and not asymmetric CMKs. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the AWS Key Management Service Developer Guide. All GET and PUT requests -// for an object protected by AWS KMS fail if you don't make them with SSL -// or by using SigV4. For more information about server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with CMKs stored in KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * To encrypt the target object using server-side encryption with an encryption -// key that you provide, use the following headers. x-amz-server-side​-encryption​-customer-algorithm -// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 -// -// * If the source object is encrypted using server-side encryption with -// customer-provided encryption keys, you must use the following headers. -// x-amz-copy-source​-server-side​-encryption​-customer-algorithm x-amz-copy-source​-server-side​-encryption​-customer-key -// x-amz-copy-source-​server-side​-encryption​-customer-key-MD5 For -// more information about server-side encryption with CMKs stored in AWS -// KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs -// stored in Amazon KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. // -// Access-Control-List (ACL)-Specific Request Headers +// Encryption // -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual +// The source object that you are copying can be encrypted or unencrypted. The +// source object can be encrypted with server-side encryption using AWS managed +// encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption +// key. With server-side encryption, Amazon S3 encrypts your data as it writes +// it to disks in its data centers and decrypts the data when you access it. +// +// You can optionally use the appropriate encryption-related headers to request +// server-side encryption for the target object. You have the option to provide +// your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form +// of server-side encryption that was used to encrypt the source object. You +// can even request encryption if the source object was not encrypted. For more +// information about server-side encryption, see Using Server-Side Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// Access Control List (ACL)-Specific Request Headers +// +// When copying an object, you can optionally use headers to grant ACL-based +// permissions. By default, all objects are private. Only the owner has full +// access control. When adding a new object, you can grant permissions to individual // AWS accounts or to predefined groups defined by Amazon S3. These permissions -// are then added to the access control list (ACL) on the object. For more information, -// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: +// are then added to the ACL on the object. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). // -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// Storage Class Options // -// * Specify access permissions explicitly — To explicitly grant access -// permissions to specific AWS accounts or groups, use the following headers. -// Each header maps to specific permissions that Amazon S3 supports in an -// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants the AWS -// accounts identified by email addresses permissions to read object data -// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// You can use the CopyObject operation to change the storage class of an object +// that is already stored in Amazon S3 using the StorageClass parameter. For +// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. +// +// Versioning +// +// By default, x-amz-copy-source identifies the current version of an object +// to copy. If the current version is a delete marker, Amazon S3 behaves as +// if the object was deleted. To copy a different version, use the versionId +// subresource. +// +// If you enable versioning on the target bucket, Amazon S3 generates a unique +// version ID for the object being copied. This version ID is different from +// the version ID of the source object. Amazon S3 returns the version ID of +// the copied object in the x-amz-version-id response header in the response. +// +// If you do not enable versioning or suspend it on the target bucket, the version +// ID that Amazon S3 generates is always null. +// +// If the source object's storage class is GLACIER, you must restore a copy +// of this object before you can use it as a source object for the copy operation. +// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). // // The following operations are related to CopyObject: // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html). // @@ -569,20 +530,23 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // CreateBucket API operation for Amazon Simple Storage Service. // -// Creates a new bucket. To create a bucket, you must register with Amazon S3 -// and have a valid AWS Access Key ID to authenticate requests. Anonymous requests -// are never allowed to create buckets. By creating the bucket, you become the -// bucket owner. +// Creates a new S3 bucket. To create a bucket, you must register with Amazon +// S3 and have a valid AWS Access Key ID to authenticate requests. Anonymous +// requests are never allowed to create buckets. By creating the bucket, you +// become the bucket owner. +// +// Not every string is an acceptable bucket name. For information about bucket +// naming restrictions, see Working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). // -// Not every string is an acceptable bucket name. For information on bucket -// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html). +// If you want to create an Amazon S3 on Outposts bucket, see Create Bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). // // By default, the bucket is created in the US East (N. Virginia) Region. You // can optionally specify a Region in the request body. You might choose a Region // to optimize latency, minimize costs, or address regulatory requirements. // For example, if you reside in Europe, you will probably find it advantageous -// to create buckets in the EU (Ireland) Region. For more information, see How -// to Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). +// to create buckets in the Europe (Ireland) Region. For more information, see +// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). // // If you send your create bucket request to the s3.amazonaws.com endpoint, // the request goes to the us-east-1 Region. Accordingly, the signature calculations @@ -590,7 +554,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // constraint in the request specifies another Region where the bucket is to // be created. If you create a bucket in a Region other than US East (N. Virginia), // your application must be able to handle 307 redirect. For more information, -// see Virtual Hosting of Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). +// see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). // // When creating a bucket using this operation, you can optionally specify the // accounts or groups that should be granted specific permissions on the bucket. @@ -605,24 +569,31 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // * Specify access permissions explicitly using the x-amz-grant-read, x-amz-grant-write, // x-amz-grant-read-acp, x-amz-grant-write-acp, and x-amz-grant-full-control // headers. These headers map to the set of permissions Amazon S3 supports -// in an ACL. For more information, see Access Control List (ACL) Overview +// in an ACL. For more information, see Access control list (ACL) overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You // specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants the AWS -// accounts identified by email addresses permissions to read object data -// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" // // You can use either a canned ACL or specify access permissions explicitly. // You cannot do both. // // The following operations are related to CreateBucket: // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * DeleteBucket +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -634,7 +605,7 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // Returned Error Codes: // * ErrCodeBucketAlreadyExists "BucketAlreadyExists" // The requested bucket name is not available. The bucket namespace is shared -// by all users of the system. Please select a different name and try again. +// by all users of the system. Select a different name and try again. // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // The bucket you tried to create already exists, and you own it. Amazon S3 @@ -712,8 +683,9 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // This operation initiates a multipart upload and returns an upload ID. This // upload ID is used to associate all of the parts in the specific multipart // upload. You specify this upload ID in each of your subsequent upload part -// requests (see UploadPart). You also include this upload ID in the final request -// to either complete or abort the multipart upload request. +// requests (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. // // For more information about multipart uploads, see Multipart Upload Overview // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html). @@ -746,9 +718,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // and decrypts it when you access it. You can provide your own encryption key, // or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or // Amazon S3-managed encryption keys. If you choose to provide your own encryption -// key, the request headers you provide in UploadPart) and UploadPartCopy) requests -// must match the headers you used in the request to initiate the upload by -// using CreateMultipartUpload. +// key, the request headers you provide in UploadPart (AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the request to initiate the upload +// by using CreateMultipartUpload. // // To perform a multipart upload with encryption using an AWS KMS CMK, the requester // must have permission to the kms:Encrypt, kms:Decrypt, kms:ReEncrypt*, kms:GenerateDataKey*, @@ -792,7 +765,7 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) // stored in AWS Key Management Service (AWS KMS) – If you want AWS to // manage the keys used to encrypt data, specify the following headers in -// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id +// the request. x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id // x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, // but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon // S3 uses the AWS managed CMK in AWS KMS to protect the data. All GET and @@ -803,11 +776,10 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // // * Use customer-provided encryption keys – If you want to manage your // own encryption keys, provide all the following headers in the request. -// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key -// x-amz-server-side​-encryption​-customer-key-MD5 For more information -// about server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see -// Protecting Data Using Server-Side Encryption with CMKs stored in AWS KMS -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 For more information about +// server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see Protecting +// Data Using Server-Side Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). // // Access-Control-List (ACL)-Specific Request Headers // @@ -832,24 +804,31 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // To grant permissions explicitly, use: x-amz-grant-read x-amz-grant-write // x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You // specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants the AWS -// accounts identified by email addresses permissions to read object data -// and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// following: id – if the value specified is the canonical user ID of an +// AWS account uri – if you are granting permissions to a predefined group +// emailAddress – if the value specified is the email address of an AWS +// account Using email addresses to specify a grantee is only supported in +// the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants the AWS accounts identified by account IDs permissions to +// read object data and its metadata: x-amz-grant-read: id="11112222333", +// id="444455556666" // // The following operations are related to CreateMultipartUpload: // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * ListMultipartUploads +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -924,14 +903,14 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // DeleteBucket API operation for Amazon Simple Storage Service. // -// Deletes the bucket. All objects (including all object versions and delete +// Deletes the S3 bucket. All objects (including all object versions and delete // markers) in the bucket must be deleted before the bucket itself can be deleted. // // Related Resources // -// * +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1012,7 +991,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // // For information about the Amazon S3 analytics feature, see Amazon S3 Analytics @@ -1020,11 +999,11 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // // The following operations are related to DeleteBucketAnalyticsConfiguration: // -// * +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // -// * +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) // -// * +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1110,9 +1089,9 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // Related Resources: // -// * +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // -// * RESTOPTIONSobject +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1189,21 +1168,21 @@ func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) ( // // This implementation of the DELETE operation removes default encryption from // the bucket. For information about the Amazon S3 default encryption feature, -// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev//bucket-encryption.html) +// see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) // in the Amazon Simple Storage Service Developer Guide. // // To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // in the Amazon Simple Storage Service Developer Guide. // // Related Resources // -// * PutBucketEncryption +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) // -// * GetBucketEncryption +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1292,11 +1271,11 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // // Operations related to DeleteBucketInventoryConfiguration include: // -// * GetBucketInventoryConfiguration +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // -// * PutBucketInventoryConfiguration +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) // -// * ListBucketInventoryConfigurations +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1389,9 +1368,9 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // // Related actions include: // -// * PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // -// * GetBucketLifecycleConfiguration +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1481,11 +1460,11 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // // The following operations are related to DeleteBucketMetricsConfiguration: // -// * GetBucketMetricsConfiguration +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // -// * PutBucketMetricsConfiguration +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// * ListBucketMetricsConfigurations +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // // * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // @@ -1517,6 +1496,92 @@ func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input return out, req.Send() } +const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" + +// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. +// req, resp := client.DeleteBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opDeleteBucketOwnershipControls, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &DeleteBucketOwnershipControlsInput{} + } + + output = &DeleteBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to DeleteBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * PutBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the @@ -1582,9 +1647,9 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // // The following operations are related to DeleteBucketPolicy // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * DeleteObject +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1675,9 +1740,9 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // The following operations are related to DeleteBucketReplication: // -// * PutBucketReplication +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) // -// * GetBucketReplication +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1760,9 +1825,9 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // The following operations are related to DeleteBucketTagging: // -// * GetBucketTagging +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) // -// * PutBucketTagging +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1854,9 +1919,9 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // // The following operations are related to DeleteBucketWebsite: // -// * GetBucketWebsite +// * GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) // -// * PutBucketWebsite +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1948,14 +2013,15 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). // // You can delete objects by explicitly calling the DELETE Object API or configure -// its lifecycle (PutBucketLifecycle) to enable Amazon S3 to remove them for -// you. If you want to block users or accounts from removing or deleting objects -// from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, -// and s3:PutLifeCycleConfiguration actions. +// its lifecycle (PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. // // The following operation is related to DeleteObject: // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2041,9 +2107,9 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // // The following operations are related to DeleteBucketMetricsConfiguration: // -// * PutObjectTagging +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // -// * GetObjectTagging +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2112,6 +2178,10 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque output = &DeleteObjectsOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -2150,15 +2220,15 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // // The following operations are related to DeleteObjects: // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2239,15 +2309,15 @@ func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) // Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // -// The following operations are related to DeleteBucketMetricsConfiguration: +// The following operations are related to DeletePublicAccessBlock: // // * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // -// * GetPublicAccessBlock +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// * PutPublicAccessBlock +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) // -// * GetBucketPolicyStatus +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2329,24 +2399,25 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration // action. The bucket owner has this permission by default. The bucket owner // can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // in the Amazon Simple Storage Service Developer Guide. // // You set the Transfer Acceleration state of an existing bucket to Enabled -// or Suspended by using the PutBucketAccelerateConfiguration operation. +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. // // A GET accelerate request does not return a state value for a bucket that // has no transfer acceleration state. A bucket has no Transfer Acceleration // state if a state has never been set on the bucket. // // For more information about transfer acceleration, see Transfer Acceleration -// (https://docs.aws.amazon.com/AmazonS3/latest/dev//transfer-acceleration.html) +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) // in the Amazon Simple Storage Service Developer Guide. // // Related Resources // -// * PutBucketAccelerateConfiguration +// * PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2428,7 +2499,7 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // // Related Resources // -// * +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2518,11 +2589,11 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // // Related Resources // -// * +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // -// * +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) // -// * +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2607,9 +2678,9 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // // The following operations are related to GetBucketCors: // -// * PutBucketCors +// * PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) // -// * DeleteBucketCors +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2695,9 +2766,9 @@ func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *r // // The following operations are related to GetBucketEncryption: // -// * PutBucketEncryption +// * PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) // -// * DeleteBucketEncryption +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2785,11 +2856,11 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // // The following operations are related to GetBucketInventoryConfiguration: // -// * DeleteBucketInventoryConfiguration +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// * ListBucketInventoryConfigurations +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) // -// * PutBucketInventoryConfiguration +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2869,7 +2940,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // // -// For an updated version of this API, see GetBucketLifecycleConfiguration. +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). // If you configured a bucket lifecycle using the filter element, you should // see the updated version of this topic. This topic is provided for backward // compatibility. @@ -2891,11 +2962,11 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // // The following operations are related to GetBucketLifecycle: // -// * GetBucketLifecycleConfiguration +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // -// * PutBucketLifecycle +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) // -// * DeleteBucketLifecycle +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2978,9 +3049,9 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // an object key name prefix, one or more object tags, or a combination of both. // Accordingly, this section describes the latest API. The response describes // the new filter element that you can use to specify a filter to select a subset -// of objects to which the rule applies. If you are still using previous version -// of the lifecycle configuration, it works. For the earlier API description, -// see GetBucketLifecycle. +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). // // Returns the lifecycle configuration information set on the bucket. For information // about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). @@ -2997,13 +3068,13 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault // Code Prefix: Client // -// The following operations are related to DeleteBucketMetricsConfiguration: +// The following operations are related to GetBucketLifecycleConfiguration: // -// * GetBucketLifecycle +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) // -// * PutBucketLifecycle +// * PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) // -// * DeleteBucketLifecycle +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3079,15 +3150,15 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // // Returns the Region the bucket resides in. You set the bucket's Region using // the LocationConstraint request parameter in a CreateBucket request. For more -// information, see CreateBucket. +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). // // To use this implementation of the operation, you must be the bucket owner. // // The following operations are related to GetBucketLocation: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3166,9 +3237,9 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // // The following operations are related to GetBucketLogging: // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * PutBucketLogging +// * PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3256,11 +3327,11 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // // The following operations are related to GetBucketMetricsConfiguration: // -// * PutBucketMetricsConfiguration +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// * DeleteBucketMetricsConfiguration +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // -// * ListBucketMetricsConfigurations +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // // * Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) // @@ -3341,7 +3412,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see GetBucketNotificationConfiguration. +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3435,7 +3506,7 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // The following operation is related to GetBucketNotification: // -// * PutBucketNotification +// * PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3465,6 +3536,91 @@ func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, inpu return out, req.Send() } +const opGetBucketOwnershipControls = "GetBucketOwnershipControls" + +// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketOwnershipControlsRequest method. +// req, resp := client.GetBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opGetBucketOwnershipControls, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &GetBucketOwnershipControlsInput{} + } + + output = &GetBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to GetBucketOwnershipControls: +// +// * PutBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the @@ -3528,7 +3684,7 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // // The following operation is related to GetBucketPolicy: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3614,11 +3770,11 @@ func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (re // // * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // -// * GetPublicAccessBlock +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// * PutPublicAccessBlock +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) // -// * DeletePublicAccessBlock +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3709,13 +3865,14 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // also include the DeleteMarkerReplication and Priority elements. The response // also returns those elements. // -// For information about GetBucketReplication errors, see ReplicationErrorCodeList +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) // // The following operations are related to GetBucketReplication: // -// * PutBucketReplication +// * PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) // -// * DeleteBucketReplication +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3795,7 +3952,7 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // // The following operations are related to GetBucketRequestPayment: // -// * ListObjects +// * ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3882,9 +4039,9 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // The following operations are related to GetBucketTagging: // -// * PutBucketTagging +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) // -// * DeleteBucketTagging +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3968,11 +4125,11 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // The following operations are related to GetBucketVersioning: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * DeleteObject +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4058,9 +4215,9 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // // The following operations are related to DeleteBucketWebsite: // -// * DeleteBucketWebsite +// * DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) // -// * PutBucketWebsite +// * PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4154,13 +4311,14 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // To distribute large files to many people, you can save bandwidth costs by // using BitTorrent. For more information, see Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). -// For more information about returning the ACL of an object, see GetObjectAcl. +// For more information about returning the ACL of an object, see GetObjectAcl +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // // If the object you are retrieving is stored in the GLACIER or DEEP_ARCHIVE // storage classes, before you can retrieve the object you must first restore -// a copy using . Otherwise, this operation returns an InvalidObjectStateError -// error. For information about restoring archived objects, see Restoring Archived -// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectStateError error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // // Encryption request headers, like x-amz-server-side-encryption, should not // be sent for GET requests if your object uses server-side encryption with @@ -4172,11 +4330,11 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // encryption keys (SSE-C) when you store the object in Amazon S3, then when // you GET the object, you must use the following headers: // -// * x-amz-server-side​-encryption​-customer-algorithm +// * x-amz-server-side-encryption-customer-algorithm // -// * x-amz-server-side​-encryption​-customer-key +// * x-amz-server-side-encryption-customer-key // -// * x-amz-server-side​-encryption​-customer-key-MD5 +// * x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). @@ -4184,6 +4342,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // Assuming you have permission to read object tags (permission for the s3:GetObjectVersionTagging // action), the response also returns the x-amz-tagging-count header that provides // the count of number of tags associated with the object. You can use GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // to retrieve the tag set associated with an object. // // Permissions @@ -4208,7 +4367,7 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // as if the object was deleted and includes x-amz-delete-marker: true in the // response. // -// For more information about versioning, see PutBucketVersioning. +// For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). // // Overriding Response Header Values // @@ -4255,9 +4414,9 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // // The following operations are related to GetObject: // -// * ListBuckets +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) // -// * GetObjectAcl +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4339,6 +4498,8 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // Returns the access control list (ACL) of an object. To use this operation, // you must have READ_ACP access to the object. // +// This action is not supported by Amazon S3 on Outposts. +// // Versioning // // By default, GET returns ACL information about the current version of an object. @@ -4346,11 +4507,11 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // // The following operations are related to GetObjectAcl: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * DeleteObject +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4432,6 +4593,8 @@ func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *req // Gets an object's current Legal Hold status. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // +// This action is not supported by Amazon S3 on Outposts. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4584,6 +4747,8 @@ func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *req // Retrieves an object's retention settings. For more information, see Locking // Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). // +// This action is not supported by Amazon S3 on Outposts. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4674,7 +4839,7 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request // // The following operation is related to GetObjectTagging: // -// * PutObjectTagging +// * PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4748,19 +4913,21 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request // GetObjectTorrent API operation for Amazon Simple Storage Service. // -// Return torrent files from a bucket. BitTorrent can save you bandwidth when +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when // you're distributing large files. For more information about BitTorrent, see -// Amazon S3 Torrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). +// Using BitTorrent with Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). // -// You can get torrent only for objects that are less than 5 GB in size and -// that are not encrypted using server-side encryption with customer-provided +// You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided // encryption key. // // To use GET, you must have READ access to the object. // +// This action is not supported by Amazon S3 on Outposts. +// // The following operation is related to GetObjectTorrent: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4853,11 +5020,11 @@ func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req // // * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // -// * PutPublicAccessBlock +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) // -// * GetPublicAccessBlock +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// * DeletePublicAccessBlock +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5032,11 +5199,11 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // encryption keys (SSE-C) when you store the object in Amazon S3, then when // you retrieve the metadata from the object, you must use the following headers: // -// * x-amz-server-side​-encryption​-customer-algorithm +// * x-amz-server-side-encryption-customer-algorithm // -// * x-amz-server-side​-encryption​-customer-key +// * x-amz-server-side-encryption-customer-key // -// * x-amz-server-side​-encryption​-customer-key-MD5 +// * x-amz-server-side-encryption-customer-key-MD5 // // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). @@ -5079,7 +5246,7 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou // // The following operation is related to HeadObject: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses // for more information on returned errors. @@ -5178,11 +5345,11 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics // // The following operations are related to ListBucketAnalyticsConfigurations: // -// * GetBucketAnalyticsConfiguration +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // -// * DeleteBucketAnalyticsConfiguration +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // -// * PutBucketAnalyticsConfiguration +// * PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5278,11 +5445,11 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // // The following operations are related to ListBucketInventoryConfigurations: // -// * GetBucketInventoryConfiguration +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // -// * DeleteBucketInventoryConfiguration +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// * PutBucketInventoryConfiguration +// * PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5379,11 +5546,11 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // // The following operations are related to ListBucketMetricsConfigurations: // -// * PutBucketMetricsConfiguration +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// * GetBucketMetricsConfiguration +// * GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) // -// * DeleteBucketMetricsConfiguration +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5562,15 +5729,15 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // // The following operations are related to ListMultipartUploads: // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5702,24 +5869,26 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // ListObjectVersions API operation for Amazon Simple Storage Service. // -// Returns metadata about all of the versions of objects in a bucket. You can -// also use request parameters as selection criteria to return metadata about -// a subset of all the object versions. +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. // // A 200 OK response can contain valid or invalid XML. Make sure to design your // application to parse the contents of the response and handle it appropriately. // // To use this operation, you must have READ access to the bucket. // +// This action is not supported by Amazon S3 on Outposts. +// // The following operations are related to ListObjectVersions: // -// * ListObjectsV2 +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * DeleteObject +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5857,21 +6026,22 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // to design your application to parse the contents of the response and handle // it appropriately. // -// This API has been revised. We recommend that you use the newer version, ListObjectsV2, +// This API has been revised. We recommend that you use the newer version, ListObjectsV2 +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), // when developing applications. For backward compatibility, Amazon S3 continues // to support ListObjects. // // The following operations are related to ListObjects: // -// * ListObjectsV2 +// * ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * ListBuckets +// * ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6025,17 +6195,18 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // // This section describes the latest revision of the API. We recommend that // you use this revised API for application development. For backward compatibility, -// Amazon S3 continues to support the prior version of this API, ListObjects. +// Amazon S3 continues to support the prior version of this API, ListObjects +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). // -// To get a list of your buckets, see ListBuckets. +// To get a list of your buckets, see ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). // // The following operations are related to ListObjectsV2: // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6174,14 +6345,14 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // Lists the parts that have been uploaded for a specific multipart upload. // This operation must include the upload ID, which you obtain by sending the -// initiate multipart upload request (see CreateMultipartUpload). This request -// returns a maximum of 1,000 uploaded parts. The default number of parts returned -// is 1,000 parts. You can restrict the number of parts returned by specifying -// the max-parts request parameter. If your multipart upload consists of more -// than 1,000 parts, the response returns an IsTruncated field with the value -// of true, and a NextPartNumberMarker element. In subsequent ListParts requests -// you can include the part-number-marker query string parameter and set its -// value to the NextPartNumberMarker field value from the previous response. +// initiate multipart upload request (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)). +// This request returns a maximum of 1,000 uploaded parts. The default number +// of parts returned is 1,000 parts. You can restrict the number of parts returned +// by specifying the max-parts request parameter. If your multipart upload consists +// of more than 1,000 parts, the response returns an IsTruncated field with +// the value of true, and a NextPartNumberMarker element. In subsequent ListParts +// requests you can include the part-number-marker query string parameter and +// set its value to the NextPartNumberMarker field value from the previous response. // // For more information on multipart uploads, see Uploading Objects Using Multipart // Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). @@ -6191,15 +6362,15 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // The following operations are related to ListParts: // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// * ListMultipartUploads +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6343,8 +6514,8 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // * Suspended – Disables accelerated data transfers to the bucket. // -// The GetBucketAccelerateConfiguration operation returns the transfer acceleration -// state of a bucket. +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// operation returns the transfer acceleration state of a bucket. // // After setting the Transfer Acceleration state of a bucket to Enabled, it // might take up to thirty minutes before the data transfer rates to the bucket @@ -6358,9 +6529,9 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // The following operations are related to PutBucketAccelerateConfiguration: // -// * GetBucketAccelerateConfiguration +// * GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6430,6 +6601,10 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6473,14 +6648,20 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // Amazon S3 supports in an ACL. For more information, see Access Control // List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). // You specify each grantee as a type=value pair, where the type is one of -// the following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-write header grants create, -// overwrite, and delete objects permission to LogDelivery group predefined -// by Amazon S3 and two AWS accounts identified by their email addresses. -// x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", -// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-write +// header grants create, overwrite, and delete objects permission to LogDelivery +// group predefined by Amazon S3 and two AWS accounts identified by their +// email addresses. x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery", +// id="111122223333", id="555566667777" // // You can use either a canned ACL or specify access permissions explicitly. // You cannot do both. @@ -6490,11 +6671,6 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // You can specify the person (grantee) to whom you're assigning access rights // (using request elements) in the following ways: // -// * By Email address: <>Grantees@email.com<>lt;/Grantee> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. -// // * By the person's ID: <>ID<><>GranteesEmail<> // DisplayName is optional and ignored in the request @@ -6502,13 +6678,24 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// // Related Resources // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * DeleteBucket +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// * GetObjectAcl +// * GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6623,11 +6810,11 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // // Related Resources // -// * +// * GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) // -// * +// * DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) // -// * +// * ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6697,6 +6884,10 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6740,11 +6931,11 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // Related Resources // -// * GetBucketCors +// * GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) // -// * DeleteBucketCors +// * DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) // -// * RESTOPTIONSobject +// * RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6814,6 +7005,10 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r output = &PutBucketEncryptionOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -6824,7 +7019,8 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // // This implementation of the PUT operation sets default encryption for a bucket // using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS -// customer master keys (CMKs) (SSE-KMS). +// customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 +// default encryption feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). // // This operation requires AWS Signature Version 4. For more information, see // Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). @@ -6838,9 +7034,9 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // // Related Resources // -// * GetBucketEncryption +// * GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) // -// * DeleteBucketEncryption +// * DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6929,19 +7125,19 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // bucket where you want the inventory to be stored, and whether to generate // the inventory daily or weekly. You can also configure what object metadata // to include and whether to inventory all object versions or only current versions. -// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev//storage-inventory.html) +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) // in the Amazon Simple Storage Service Developer Guide. // // You must create a bucket policy on the destination bucket to grant permissions // to Amazon S3 to write objects to the bucket in the defined location. For // an example policy, see Granting Permissions for Amazon S3 Inventory and Storage -// Class Analysis. (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9) +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). // // To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration // action. The bucket owner has this permission by default and can grant this // permission to others. For more information about permissions, see Permissions -// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) -// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // in the Amazon Simple Storage Service Developer Guide. // // Special Errors @@ -6954,15 +7150,15 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // // * HTTP 403 Forbidden Error Code: AccessDenied Cause: You are not the owner // of the specified bucket, or you do not have the s3:PutInventoryConfiguration -// bucket permission to set the configuration on the bucket +// bucket permission to set the configuration on the bucket. // // Related Resources // -// * GetBucketInventoryConfiguration +// * GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) // -// * DeleteBucketInventoryConfiguration +// * DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) // -// * ListBucketInventoryConfigurations +// * ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7037,19 +7233,23 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketLifecycle API operation for Amazon Simple Storage Service. // // -// For an updated version of this API, see PutBucketLifecycleConfiguration. +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). // This version has been deprecated. Existing lifecycle configurations will // work. For new lifecycle configurations, use the updated API. // // Creates a new lifecycle configuration for the bucket or replaces an existing // lifecycle configuration. For information about lifecycle configuration, see -// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev//object-lifecycle-mgmt.html) +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // in the Amazon Simple Storage Service Developer Guide. // // By default, all Amazon S3 resources, including buckets, objects, and related @@ -7071,26 +7271,26 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // * s3:PutLifecycleConfiguration // // For more information about permissions, see Managing Access Permissions to -// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // in the Amazon Simple Storage Service Developer Guide. // // For more examples of transitioning objects to storage classes such as STANDARD_IA -// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev//intro-lifecycle-rules.html#lifecycle-configuration-examples). +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). // // Related Resources // -// * GetBucketLifecycle(Deprecated) +// * GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) // -// * GetBucketLifecycleConfiguration +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // -// * +// * RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) // // * By default, a resource owner—in this case, a bucket owner, which is // the AWS account that created the bucket—can perform any of the operations. // A resource owner can also grant others permission to perform the operation. // For more information, see the following topics in the Amazon Simple Storage -// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev//using-with-s3-actions.html) -// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev//s3-access-control.html) +// Service Developer Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7164,6 +7364,10 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -7178,7 +7382,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // Accordingly, this section describes the latest API. The previous version // of the API supported filtering based only on an object key name prefix, which // is supported for backward compatibility. For the related API description, -// see PutBucketLifecycle. +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). // // Rules // @@ -7229,9 +7433,9 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // * Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) // -// * GetBucketLifecycleConfiguration +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // -// * DeleteBucketLifecycle +// * DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7301,6 +7505,10 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -7340,18 +7548,19 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // For more information about server access logging, see Server Access Logging // (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html). // -// For more information about creating a bucket, see CreateBucket. For more -// information about returning the logging status of a bucket, see GetBucketLogging. +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). // // The following operations are related to PutBucketLogging: // -// * PutObject +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * DeleteBucket +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * GetBucketLogging +// * GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7443,11 +7652,11 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // The following operations are related to PutBucketMetricsConfiguration: // -// * DeleteBucketMetricsConfiguration +// * DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) // -// * PutBucketMetricsConfiguration +// * PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) // -// * ListBucketMetricsConfigurations +// * ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) // // GetBucketLifecycle has the following special error: // @@ -7528,12 +7737,17 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutBucketNotification API operation for Amazon Simple Storage Service. // -// No longer used, see the PutBucketNotificationConfiguration operation. +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7660,7 +7874,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // The following operation is related to PutBucketNotificationConfiguration: // -// * GetBucketNotificationConfiguration +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7690,62 +7904,153 @@ func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, inpu return out, req.Send() } -const opPutBucketPolicy = "PutBucketPolicy" +const opPutBucketOwnershipControls = "PutBucketOwnershipControls" -// PutBucketPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutBucketPolicy operation. The "output" return +// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketOwnershipControls operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutBucketPolicy for more information on using the PutBucketPolicy +// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutBucketPolicyRequest method. -// req, resp := client.PutBucketPolicyRequest(params) +// // Example sending a request using the PutBucketOwnershipControlsRequest method. +// req, resp := client.PutBucketOwnershipControlsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { op := &request.Operation{ - Name: opPutBucketPolicy, + Name: opPutBucketOwnershipControls, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", + HTTPPath: "/{Bucket}?ownershipControls", } if input == nil { - input = &PutBucketPolicyInput{} + input = &PutBucketOwnershipControlsInput{} } - output = &PutBucketPolicyOutput{} + output = &PutBucketOwnershipControlsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// PutBucketPolicy API operation for Amazon Simple Storage Service. +// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. // -// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using -// an identity other than the root user of the AWS account that owns the bucket, -// the calling identity must have the PutBucketPolicy permissions on the specified -// bucket and belong to the bucket owner's account in order to use this operation. +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:GetBucketOwnershipControls permission. For +// more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). // -// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access -// Denied error. If you have the correct permissions, but you're not using an -// identity that belongs to the bucket owner's account, Amazon S3 returns a -// 405 Method Not Allowed error. +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). // -// As a security precaution, the root user of the AWS account that owns a bucket +// The following operations are related to GetBucketOwnershipControls: +// +// * GetBucketOwnershipControls +// +// * DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using +// an identity other than the root user of the AWS account that owns the bucket, +// the calling identity must have the PutBucketPolicy permissions on the specified +// bucket and belong to the bucket owner's account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket // can always use this operation, even if the policy explicitly denies the root // user the ability to perform this action. // @@ -7754,9 +8059,9 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // // The following operations are related to PutBucketPolicy: // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * DeleteBucket +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7826,6 +8131,10 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -7856,6 +8165,13 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // When you add the Filter element in the configuration, you must also add the // following elements: DeleteMarkerReplication, Status, and Priority. // +// The latest version of the replication configuration XML is V2. XML V2 replication +// configurations are those that contain the Filter element for rules, and rules +// that specify S3 Replication Time Control (S3 RTC). In XML V2 replication +// configurations, Amazon S3 doesn't replicate delete markers. Therefore, you +// must set the DeleteMarkerReplication element to Disabled. For backward compatibility, +// Amazon S3 continues to support the XML V1 replication configuration. +// // For information about enabling versioning on a bucket, see Using Versioning // (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). // @@ -7874,13 +8190,14 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // replication configuration, see Replicating Objects Created with SSE Using // CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). // -// For information on PutBucketReplication errors, see ReplicationErrorCodeList +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) // // The following operations are related to PutBucketReplication: // -// * GetBucketReplication +// * GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) // -// * DeleteBucketReplication +// * DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7950,6 +8267,10 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -7963,9 +8284,9 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // // The following operations are related to PutBucketRequestPayment: // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * GetBucketRequestPayment +// * GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8035,6 +8356,10 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8065,8 +8390,8 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // * Error code: InvalidTagError Description: The tag provided was not a // valid tag. This error can occur if the tag did not pass input validation. // For information about tag restrictions, see User-Defined Tag Restrictions -// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//allocation-tag-restrictions.html) -// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2//aws-tag-restrictions.html). +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). // // * Error code: MalformedXMLError Description: The XML provided does not // match the schema. @@ -8079,9 +8404,9 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // The following operations are related to PutBucketTagging: // -// * GetBucketTagging +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) // -// * DeleteBucketTagging +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8151,6 +8476,10 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8168,7 +8497,8 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // added to the bucket receive the version ID null. // // If the versioning state has never been set on a bucket, it has no versioning -// state; a GetBucketVersioning request does not return a versioning state value. +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. // // If the bucket owner enables MFA Delete in the bucket versioning configuration, // the bucket owner must include the x-amz-mfa request header and the Status @@ -8185,11 +8515,11 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // // Related Resources // -// * CreateBucket +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) // -// * DeleteBucket +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) // -// * GetBucketVersioning +// * GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8259,6 +8589,10 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8326,6 +8660,11 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // // * HttpRedirectCode // +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon Simple Storage Service Developer Guide. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8415,12 +8754,12 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // you can calculate the MD5 while putting an object to Amazon S3 and compare // the returned ETag to the calculated MD5 value. // -// To configure your application to send the request headers before sending -// the request body, use the 100-continue HTTP status code. For PUT operations, -// this helps you avoid sending the message body if the message is rejected -// based on the headers (for example, because authentication fails or a redirect -// occurs). For more information on the 100-continue HTTP status code, see Section -// 8.2.3 of http://www.ietf.org/rfc/rfc2616.txt (http://www.ietf.org/rfc/rfc2616.txt). +// The Content-MD5 header is required for any request to upload an object with +// a retention period configured using Amazon S3 Object Lock. For more information +// about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Server-side Encryption // // You can optionally request server-side encryption. With server-side encryption, // Amazon S3 encrypts your data as it writes it to disks in its data centers @@ -8428,149 +8767,42 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // your own encryption key or use AWS managed encryption keys. For more information, // see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). // -// Access Permissions -// -// You can optionally specify the accounts or groups that should be granted -// specific permissions on the new object. There are two ways to grant the permissions -// using the request headers: -// -// * Specify a canned ACL with the x-amz-acl request header. For more information, -// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, -// x-amz-grant-write-acp, and x-amz-grant-full-control headers. These parameters -// map to the set of permissions that Amazon S3 supports in an ACL. For more -// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// -// You can use either a canned ACL or specify access permissions explicitly. -// You cannot do both. -// -// Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use AWS managed encryption keys or provide your own encryption key. -// -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in AWS Key Management Service (AWS KMS) – If you want AWS to -// manage the keys used to encrypt data, specify the following headers in -// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want -// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id -// of the symmetric customer managed CMK. Amazon S3 only supports symmetric -// CMKs and not asymmetric CMKs. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the AWS Key Management Service Developer Guide. All GET and PUT requests -// for an object protected by AWS KMS fail if you don't make them with SSL -// or by using SigV4. For more information about server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// * Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// x-amz-server-side​-encryption​-customer-algorithm x-amz-server-side​-encryption​-customer-key -// x-amz-server-side​-encryption​-customer-key-MD5 For more information -// about server-side encryption with CMKs stored in KMS (SSE-KMS), see Protecting -// Data Using Server-Side Encryption with CMKs stored in AWS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). -// -// Access-Control-List (ACL)-Specific Request Headers -// -// You also can use the following access control–related headers with this -// operation. By default, all objects are private. Only the owner has full access -// control. When adding a new object, you can grant permissions to individual -// AWS accounts or to predefined groups defined by Amazon S3. These permissions -// are then added to the Access Control List (ACL) on the object. For more information, -// see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). -// With this operation, you can grant access permissions using one of the following -// two methods: -// -// * Specify a canned ACL (x-amz-acl) — Amazon S3 supports a set of predefined -// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees -// and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). -// -// * Specify access permissions explicitly — To explicitly grant access -// permissions to specific AWS accounts or groups, use the following headers. -// Each header maps to specific permissions that Amazon S3 supports in an -// ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). -// In the header, you specify a list of grantees who get the specific permission. -// To grant permissions explicitly use: x-amz-grant-read x-amz-grant-write -// x-amz-grant-read-acp x-amz-grant-write-acp x-amz-grant-full-control You -// specify each grantee as a type=value pair, where the type is one of the -// following: emailAddress – if the value specified is the email address -// of an AWS account Using email addresses to specify a grantee is only supported -// in the following AWS Regions: US East (N. Virginia) US West (N. California) -// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific -// (Tokyo) EU (Ireland) South America (São Paulo) For a list of all the -// Amazon S3 supported Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) -// in the AWS General Reference id – if the value specified is the canonical -// user ID of an AWS account uri – if you are granting permissions to a -// predefined group For example, the following x-amz-grant-read header grants -// the AWS accounts identified by email addresses permissions to read object -// data and its metadata: x-amz-grant-read: emailAddress="xyz@amazon.com", -// emailAddress="abc@amazon.com" -// -// Server-Side- Encryption-Specific Request Headers -// -// You can optionally tell Amazon S3 to encrypt data at rest using server-side -// encryption. Server-side encryption is for data encryption at rest. Amazon -// S3 encrypts your data as it writes it to disks in its data centers and decrypts -// it when you access it. The option you use depends on whether you want to -// use AWS-managed encryption keys or provide your own encryption key. -// -// * Use encryption keys managed by Amazon S3 or customer master keys (CMKs) -// stored in AWS Key Management Service (AWS KMS) – If you want AWS to -// manage the keys used to encrypt data, specify the following headers in -// the request. x-amz-server-side​-encryption x-amz-server-side-encryption-aws-kms-key-id -// x-amz-server-side-encryption-context If you specify x-amz-server-side-encryption:aws:kms, -// but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon -// S3 uses the AWS managed CMK in AWS KMS to protect the data. If you want -// to use a customer managed AWS KMS CMK, you must provide the x-amz-server-side-encryption-aws-kms-key-id -// of the symmetric customer managed CMK. Amazon S3 only supports symmetric -// CMKs and not asymmetric CMKs. For more information, see Using Symmetric -// and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) -// in the AWS Key Management Service Developer Guide. All GET and PUT requests -// for an object protected by AWS KMS fail if you don't make them with SSL -// or by using SigV4. For more information about server-side encryption with -// CMKs stored in AWS KMS (SSE-KMS), see Protecting Data Using Server-Side -// Encryption with CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// Access Control List (ACL)-Specific Request Headers // -// * Use customer-provided encryption keys – If you want to manage your -// own encryption keys, provide all the following headers in the request. -// If you use this feature, the ETag value that Amazon S3 returns in the -// response is not the MD5 of the object. x-amz-server-side​-encryption​-customer-algorithm -// x-amz-server-side​-encryption​-customer-key x-amz-server-side​-encryption​-customer-key-MD5 -// For more information about server-side encryption with CMKs stored in -// AWS KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with -// CMKs stored in AWS KMS (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html). +// You can use headers to grant ACL- based permissions. By default, all objects +// are private. Only the owner has full access control. When adding a new object, +// you can grant permissions to individual AWS accounts or to predefined groups +// defined by Amazon S3. These permissions are then added to the ACL on the +// object. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). // // Storage Class Options // -// By default, Amazon S3 uses the Standard storage class to store newly created -// objects. The Standard storage class provides high durability and high availability. -// You can specify other storage classes depending on the performance needs. -// For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon Simple Storage Service Developer Guide. +// By default, Amazon S3 uses the STANDARD Storage Class to store newly created +// objects. The STANDARD storage class provides high durability and high availability. +// Depending on performance needs, you can specify a different Storage Class. +// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, +// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +// in the Amazon S3 Service Developer Guide. // // Versioning // // If you enable versioning for a bucket, Amazon S3 automatically generates // a unique version ID for the object being stored. Amazon S3 returns this ID -// in the response using the x-amz-version-id response header. If versioning -// is suspended, Amazon S3 always uses null as the version ID for the object -// stored. For more information about returning the versioning state of a bucket, -// see GetBucketVersioning. If you enable versioning for a bucket, when Amazon -// S3 receives multiple write requests for the same object simultaneously, it -// stores all of the objects. +// in the response. When you enable versioning for a bucket, if Amazon S3 receives +// multiple write requests for the same object simultaneously, it stores all +// of the objects. +// +// For more information about versioning, see Adding Objects to Versioning Enabled +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html). +// For information about returning the versioning state of a bucket, see GetBucketVersioning +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). // // Related Resources // -// * CopyObject +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // -// * DeleteObject +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8639,19 +8871,29 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request output = &PutObjectAclOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectAcl API operation for Amazon Simple Storage Service. // // Uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket. You must have WRITE_ACP permission -// to set the ACL of an object. +// for a new or existing object in an S3 bucket. You must have WRITE_ACP permission +// to set the ACL of an object. For more information, see What permissions can +// I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon Simple Storage Service Developer Guide. +// +// This action is not supported by Amazon S3 on Outposts. // // Depending on your application needs, you can choose to set the ACL on an // object using either the request body or the headers. For example, if you // have an existing application that updates a bucket ACL using the request -// body, you can continue to use that approach. +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 Developer Guide. // // Access Permissions // @@ -8673,12 +8915,19 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // S3 supports in an ACL. For more information, see Access Control List (ACL) // Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). // You specify each grantee as a type=value pair, where the type is one of -// the following: emailAddress – if the value specified is the email address -// of an AWS account id – if the value specified is the canonical user -// ID of an AWS account uri – if you are granting permissions to a predefined -// group For example, the following x-amz-grant-read header grants list objects -// permission to the two AWS accounts identified by their email addresses. -// x-amz-grant-read: emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// the following: id – if the value specified is the canonical user ID +// of an AWS account uri – if you are granting permissions to a predefined +// group emailAddress – if the value specified is the email address of +// an AWS account Using email addresses to specify a grantee is only supported +// in the following AWS Regions: US East (N. Virginia) US West (N. California) +// US West (Oregon) Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific +// (Tokyo) Europe (Ireland) South America (São Paulo) For a list of all +// the Amazon S3 supported Regions and endpoints, see Regions and Endpoints +// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in +// the AWS General Reference. For example, the following x-amz-grant-read +// header grants list objects permission to the two AWS accounts identified +// by their email addresses. x-amz-grant-read: emailAddress="xyz@amazon.com", +// emailAddress="abc@amazon.com" // // You can use either a canned ACL or specify access permissions explicitly. // You cannot do both. @@ -8688,11 +8937,6 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // You can specify the person (grantee) to whom you're assigning access rights // (using request elements) in the following ways: // -// * By Email address: <>Grantees@email.com<>lt;/Grantee> -// The grantee is resolved to the CanonicalUser and, in a response to a GET -// Object acl request, appears as the CanonicalUser. -// // * By the person's ID: <>ID<><>GranteesEmail<> // DisplayName is optional and ignored in the request. @@ -8700,6 +8944,17 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // * By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> // +// * By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following AWS Regions: US +// East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the AWS General Reference. +// // Versioning // // The ACL of an object is set at the object version level. By default, PUT @@ -8708,9 +8963,9 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // // Related Resources // -// * CopyObject +// * CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8784,6 +9039,10 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8791,6 +9050,8 @@ func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *req // // Applies a Legal Hold configuration to the specified object. // +// This action is not supported by Amazon S3 on Outposts. +// // Related Resources // // * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) @@ -8862,6 +9123,10 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8945,6 +9210,10 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req output = &PutObjectRetentionOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8952,6 +9221,8 @@ func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *req // // Places an Object Retention configuration on an object. // +// This action is not supported by Amazon S3 on Outposts. +// // Related Resources // // * Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) @@ -9023,17 +9294,21 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request output = &PutObjectTaggingOutput{} req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } // PutObjectTagging API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Sets the supplied tag-set to an object that already exists in a bucket. // // A tag is a key-value pair. You can associate tags with an object by sending // a PUT request against the tagging subresource that is associated with the // object. You can retrieve tags by sending a GET request. For more information, -// see GetObjectTagging. +// see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). // // For tagging-related restrictions related to characters and encodings, see // Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). @@ -9065,7 +9340,7 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request // // Related Resources // -// * GetObjectTagging +// * GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9135,6 +9410,10 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req output = &PutPublicAccessBlockOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -9157,11 +9436,11 @@ func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req // // Related Resources // -// * GetPublicAccessBlock +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) // -// * DeletePublicAccessBlock +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) // -// * GetBucketPolicyStatus +// * GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) // // * Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) // @@ -9239,16 +9518,18 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // // Restores an archived copy of an object back into Amazon S3 // -// This operation performs the following types of requests: +// This action is not supported by Amazon S3 on Outposts. +// +// This action performs the following types of requests: // // * select - Perform a select query on an archived object // // * restore an archive - Restore an archived object // // To use this operation, you must have permissions to perform the s3:RestoreObject -// and s3:GetObject actions. The bucket owner has this permission by default -// and can grant this permission to others. For more information about permissions, -// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) // in the Amazon Simple Storage Service Developer Guide. // @@ -9273,7 +9554,8 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // (https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html) // in the Amazon Simple Storage Service Developer Guide. For more information // about the S3 structure in the request body, see the following: PutObject -// Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) Managing +// Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) // in the Amazon Simple Storage Service Developer Guide Protecting Data Using // Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) // in the Amazon Simple Storage Service Developer Guide @@ -9290,8 +9572,8 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // the query.) You cannot mix ordinal positions with header column names. // SELECT s.Id, s.FirstName, s.SSN FROM S3Object s // -// For more information about using SQL with Glacier Select restore, see SQL -// Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// For more information about using SQL with S3 Glacier Select restore, see +// SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) // in the Amazon Simple Storage Service Developer Guide. // // When making a select request, you can also do the following: @@ -9344,12 +9626,12 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // retrievals and provisioned capacity are not available for the DEEP_ARCHIVE // storage class. // -// * Standard - Standard retrievals allow you to access any of your archived +// * Standard - S3 Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for the GLACIER // and DEEP_ARCHIVE retrieval requests that do not specify the retrieval -// option. Standard retrievals typically complete within 3-5 hours from the -// GLACIER storage class and typically complete within 12 hours from the -// DEEP_ARCHIVE storage class. +// option. S3 Standard retrievals typically complete within 3-5 hours from +// the GLACIER storage class and typically complete within 12 hours from +// the DEEP_ARCHIVE storage class. // // * Bulk - Bulk retrievals are Amazon S3 Glacier’s lowest-cost retrieval // option, enabling you to retrieve large amounts, even petabytes, of data @@ -9389,7 +9671,8 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // specify in a restore request. For example, if you restore an object copy // for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes // the object in 3 days. For more information about lifecycle configuration, -// see PutBucketLifecycleConfiguration and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) // in Amazon Simple Storage Service Developer Guide. // // Responses @@ -9408,19 +9691,19 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // (This error does not apply to SELECT type requests.) HTTP Status Code: // 409 Conflict SOAP Fault Code Prefix: Client // -// * Code: GlacierExpeditedRetrievalNotAvailable Cause: Glacier expedited +// * Code: GlacierExpeditedRetrievalNotAvailable Cause: S3 Glacier expedited // retrievals are currently not available. Try again later. (Returned if // there is insufficient capacity to process the Expedited request. This -// error applies only to Expedited retrievals and not to Standard or Bulk +// error applies only to Expedited retrievals and not to S3 Standard or Bulk // retrievals.) HTTP Status Code: 503 SOAP Fault Code Prefix: N/A // // Related Resources // -// * PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // -// * GetBucketNotificationConfiguration +// * GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) // -// * SQL Reference for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// * SQL Reference for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) // in the Amazon Simple Storage Service Developer Guide // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -9496,7 +9779,7 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r output = &SelectObjectContentOutput{} req = c.newRequest(op, input, output) - es := newSelectObjectContentEventStream() + es := NewSelectObjectContentEventStream() req.Handlers.Unmarshal.PushBack(es.setStreamCloser) output.EventStream = es @@ -9517,12 +9800,14 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // SQL expression. You must also specify the data serialization format for the // response. // +// This action is not supported by Amazon S3 on Outposts. +// // For more information about Amazon S3 Select, see Selecting Content from Objects // (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) // in the Amazon Simple Storage Service Developer Guide. // // For more information about using SQL with Amazon S3 Select, see SQL Reference -// for Amazon S3 Select and Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) +// for Amazon S3 Select and S3 Glacier Select (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html) // in the Amazon Simple Storage Service Developer Guide. // // Permissions @@ -9550,8 +9835,8 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // * Server-side encryption - Amazon S3 Select supports querying objects // that are protected with server-side encryption. For objects that are encrypted // with customer-provided encryption keys (SSE-C), you must use HTTPS, and -// you must use the headers that are documented in the GetObject. For more -// information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) // in the Amazon Simple Storage Service Developer Guide. For objects that // are encrypted with Amazon S3 managed encryption keys (SSE-S3) and customer @@ -9565,16 +9850,18 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // Given the response size is unknown, Amazon S3 Select streams the response // as a series of messages and includes a Transfer-Encoding header with chunked -// as its value in the response. For more information, see RESTSelectObjectAppendix . +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html) . // // GetObject Support // // The SelectObjectContent operation does not support the following GetObject -// functionality. For more information, see GetObject. +// functionality. For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). // -// * Range: While you can specify a scan range for a Amazon S3 Select request, -// see SelectObjectContentRequest$ScanRange in the request parameters below, -// you cannot specify the range of bytes of an object to return. +// * Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. // // * GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot // specify the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY storage classes. @@ -9583,16 +9870,16 @@ func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *r // // Special Errors // -// For a list of special errors for this operation and for general information -// about Amazon S3 errors and a list of error codes, see ErrorResponses +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) // // Related Resources // -// * GetObject +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) // -// * GetBucketLifecycleConfiguration +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) // -// * PutBucketLifecycleConfiguration +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9622,7 +9909,13 @@ func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObject return out, req.Send() } +var _ awserr.Error + // SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. type SelectObjectContentEventStream struct { // Reader is the EventStream reader for the SelectObjectContentEventStream @@ -9645,11 +9938,31 @@ type SelectObjectContentEventStream struct { err *eventstreamapi.OnceError } -func newSelectObjectContentEventStream() *SelectObjectContentEventStream { - return &SelectObjectContentEventStream{ +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream{ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ done: make(chan struct{}), err: eventstreamapi.NewOnceError(), } + + for _, fn := range opts { + fn(es) + } + + return es } func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { @@ -9696,6 +10009,7 @@ func (es *SelectObjectContentEventStream) waitStreamPartClose() { // * ProgressEvent // * RecordsEvent // * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { return es.Reader.Events() } @@ -9812,12 +10126,13 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // In this operation, you provide part data in your request. However, you have // an option to specify your existing Amazon S3 object as a data source for // the part you are uploading. To upload a part from an existing object, you -// use the UploadPartCopy operation. +// use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. // -// You must initiate a multipart upload (see CreateMultipartUpload) before you -// can upload any part. In response to your initiate request, Amazon S3 returns -// an upload ID, a unique identifier, that you must include in your upload part -// request. +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier, that you must include in your +// upload part request. // // Part numbers can be any number from 1 to 10,000, inclusive. A part number // uniquely identifies a part and also defines its position within the object @@ -9831,6 +10146,11 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // data against the provided MD5 value. If they do not match, Amazon S3 returns // an error. // +// If the upload request is signed with Signature Version 4, then AWS S3 uses +// the x-amz-content-sha256 header as a checksum instead of Content-MD5. For +// more information see Authenticating Requests: Using the Authorization Header +// (AWS Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// // Note: After you initiate multipart upload and upload one or more parts, you // must either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort @@ -9851,25 +10171,25 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // key, or you can use the AWS managed encryption keys. If you choose to provide // your own encryption key, the request headers you provide in the request must // match the headers you used in the request to initiate the upload by using -// CreateMultipartUpload. For more information, go to Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// For more information, go to Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) // in the Amazon Simple Storage Service Developer Guide. // // Server-side encryption is supported by the S3 Multipart Upload actions. Unless // you are using a customer-provided encryption key, you don't need to specify // the encryption parameters in each UploadPart request. Instead, you only need // to specify the server-side encryption parameters in the initial Initiate -// Multipart request. For more information, see CreateMultipartUpload. +// Multipart request. For more information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). // // If you requested server-side encryption using a customer-provided encryption // key in your initiate multipart upload request, you must provide identical // encryption information in each part upload using the following headers. // -// * x-amz-server-side​-encryption​-customer-algorithm +// * x-amz-server-side-encryption-customer-algorithm // -// * x-amz-server-side​-encryption​-customer-key +// * x-amz-server-side-encryption-customer-key // -// * x-amz-server-side​-encryption​-customer-key-MD5 +// * x-amz-server-side-encryption-customer-key-MD5 // // Special Errors // @@ -9880,15 +10200,15 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // Related Resources // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * ListMultipartUploads +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -9972,7 +10292,8 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // in the Amazon Simple Storage Service Developer Guide. // // Instead of using an existing object as part data, you might use the UploadPart -// operation and provide data in your request. +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) operation +// and provide data in your request. // // You must initiate a multipart upload before you can upload any part. In response // to your initiate request. Amazon S3 returns a unique identifier, the upload @@ -9993,8 +10314,8 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // in the Amazon Simple Storage Service Developer Guide. // // * For information about using server-side encryption with customer-provided -// encryption keys with the UploadPartCopy operation, see CopyObject and -// UploadPart. +// encryption keys with the UploadPartCopy operation, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). // // Note the following additional considerations about the request headers x-amz-copy-source-if-match, // x-amz-copy-source-if-none-match, x-amz-copy-source-if-unmodified-since, and @@ -10039,17 +10360,17 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // Related Resources // -// * CreateMultipartUpload +// * CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) // -// * UploadPart +// * UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) // -// * CompleteMultipartUpload +// * CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) // -// * AbortMultipartUpload +// * AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) // -// * ListParts +// * ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) // -// * ListMultipartUploads +// * ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10115,14 +10436,27 @@ type AbortMultipartUploadInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Key of the object for which the multipart upload was initiated. // // Key is a required field @@ -10189,6 +10523,12 @@ func (s *AbortMultipartUploadInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { s.Key = &v @@ -10221,6 +10561,19 @@ func (s *AbortMultipartUploadInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -10615,8 +10968,11 @@ type AnalyticsS3BucketDestination struct { // Bucket is a required field Bucket *string `type:"string" required:"true"` - // The account ID that owns the destination bucket. If no account ID is provided, - // the owner will not be validated prior to exporting data. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. BucketAccountId *string `type:"string"` // Specifies the file format used when exporting data to Amazon S3. @@ -11226,6 +11582,11 @@ type CompleteMultipartUploadInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -11295,6 +11656,12 @@ func (s *CompleteMultipartUploadInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { s.Key = &v @@ -11333,10 +11700,38 @@ func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` // The name of the bucket that contains the newly created object. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. Bucket *string `type:"string"` // Entity tag that identifies the newly created object's data. Objects with @@ -11578,6 +11973,8 @@ func (s *ContinuationEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) return msg, err @@ -11587,10 +11984,27 @@ type CopyObjectInput struct { _ struct{} `locationName:"CopyObjectRequest" type:"structure"` // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The name of the destination bucket. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -11611,8 +12025,35 @@ type CopyObjectInput struct { // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the bucket + // awsexamplebucket, use awsexamplebucket/reports/january.pdf. The value + // must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. // // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` @@ -11644,19 +12085,37 @@ type CopyObjectInput struct { // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // The account id of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account id of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + // The date and time at which the object is no longer cacheable. Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // The key of the destination object. @@ -11694,7 +12153,7 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -11720,7 +12179,12 @@ type CopyObjectInput struct { // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object destination object this value must be used in @@ -11877,6 +12341,18 @@ func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectIn return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { + s.ExpectedSourceBucketOwner = &v + return s +} + // SetExpires sets the Expires field's value. func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { s.Expires = &v @@ -12030,6 +12506,19 @@ func (s *CopyObjectInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` @@ -12387,10 +12876,27 @@ type CreateMultipartUploadInput struct { _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` // The canned ACL to apply to the object. + // + // This action is not supported by Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // The name of the bucket to which to initiate the upload // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -12411,19 +12917,32 @@ type CreateMultipartUploadInput struct { // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The date and time at which the object is no longer cacheable. Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the multipart upload is to be initiated. @@ -12457,7 +12976,7 @@ type CreateMultipartUploadInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -12483,7 +13002,12 @@ type CreateMultipartUploadInput struct { // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // The type of storage to use for the object. Defaults to 'STANDARD'. + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -12576,6 +13100,12 @@ func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUp return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + // SetExpires sets the Expires field's value. func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { s.Expires = &v @@ -12717,6 +13247,19 @@ func (s *CreateMultipartUploadInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -12736,14 +13279,22 @@ type CreateMultipartUploadOutput struct { // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - // Name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. Bucket *string `locationName:"Bucket" type:"string"` // Object key for which the multipart upload was initiated. @@ -12975,6 +13526,11 @@ type DeleteBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID that identifies the analytics configuration. // // Id is a required field @@ -13023,6 +13579,12 @@ func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { s.Id = &v @@ -13043,6 +13605,19 @@ func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -13064,6 +13639,11 @@ type DeleteBucketCorsInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13105,6 +13685,12 @@ func (s *DeleteBucketCorsInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13119,6 +13705,19 @@ func (s *DeleteBucketCorsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -13141,6 +13740,11 @@ type DeleteBucketEncryptionInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13182,6 +13786,12 @@ func (s *DeleteBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13196,6 +13806,19 @@ func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -13217,6 +13840,11 @@ type DeleteBucketInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13258,6 +13886,12 @@ func (s *DeleteBucketInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13272,6 +13906,19 @@ func (s *DeleteBucketInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketInventoryConfigurationInput struct { _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` @@ -13280,6 +13927,11 @@ type DeleteBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID used to identify the inventory configuration. // // Id is a required field @@ -13328,6 +13980,12 @@ func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { s.Id = &v @@ -13348,6 +14006,19 @@ func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -13369,6 +14040,11 @@ type DeleteBucketLifecycleInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13410,6 +14086,12 @@ func (s *DeleteBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13424,6 +14106,19 @@ func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -13446,6 +14141,11 @@ type DeleteBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID used to identify the metrics configuration. // // Id is a required field @@ -13494,6 +14194,12 @@ func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { s.Id = &v @@ -13514,6 +14220,19 @@ func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -13542,6 +14261,103 @@ func (s DeleteBucketOutput) GoString() string { return s.String() } +type DeleteBucketOwnershipControlsInput struct { + _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s DeleteBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + type DeleteBucketPolicyInput struct { _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` @@ -13549,6 +14365,11 @@ type DeleteBucketPolicyInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13590,6 +14411,12 @@ func (s *DeleteBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13604,6 +14431,19 @@ func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -13625,6 +14465,11 @@ type DeleteBucketReplicationInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13666,6 +14511,12 @@ func (s *DeleteBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13680,6 +14531,19 @@ func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -13701,6 +14565,11 @@ type DeleteBucketTaggingInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13742,6 +14611,12 @@ func (s *DeleteBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13756,6 +14631,19 @@ func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -13777,6 +14665,11 @@ type DeleteBucketWebsiteInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -13818,6 +14711,12 @@ func (s *DeleteBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -13832,6 +14731,19 @@ func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -13951,11 +14863,19 @@ type DeleteObjectInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -13963,6 +14883,11 @@ type DeleteObjectInput struct { // to process this operation. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Key name of the object to delete. // // Key is a required field @@ -14036,6 +14961,12 @@ func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectIn return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -14074,6 +15005,19 @@ func (s *DeleteObjectInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -14125,15 +15069,28 @@ type DeleteObjectTaggingInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Name of the tag. + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -14187,6 +15144,12 @@ func (s *DeleteObjectTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { s.Key = &v @@ -14213,6 +15176,19 @@ func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -14243,11 +15219,19 @@ type DeleteObjectsInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14261,6 +15245,11 @@ type DeleteObjectsInput struct { // Delete is a required field Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. Required to // permanently delete a versioned object if versioning is configured with MFA @@ -14334,6 +15323,12 @@ func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + // SetMFA sets the MFA field's value. func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { s.MFA = &v @@ -14360,6 +15355,19 @@ func (s *DeleteObjectsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeleteObjectsOutput struct { _ struct{} `type:"structure"` @@ -14411,6 +15419,11 @@ type DeletePublicAccessBlockInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -14452,6 +15465,12 @@ func (s *DeletePublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -14466,6 +15485,19 @@ func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type DeletePublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -14575,9 +15607,9 @@ type Destination struct { // must be replicated. Must be specified together with a Metrics block. ReplicationTime *ReplicationTime `type:"structure"` - // The storage class to use when replicating objects, such as standard or reduced - // redundancy. By default, Amazon S3 uses the storage class of the source object - // to create the object replica. + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. // // For valid values, see the StorageClass element of the PUT Bucket replication // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) @@ -14794,6 +15826,8 @@ func (s *EndEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) return msg, err @@ -15327,10 +16361,15 @@ func (s *FilterRule) SetValue(v string) *FilterRule { type GetBucketAccelerateConfigurationInput struct { _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` - // Name of the bucket for which the accelerate configuration is retrieved. + // The name of the bucket for which the accelerate configuration is retrieved. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -15372,6 +16411,12 @@ func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -15386,6 +16431,19 @@ func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -15416,6 +16474,11 @@ type GetBucketAclInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -15457,6 +16520,12 @@ func (s *GetBucketAclInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -15471,6 +16540,19 @@ func (s *GetBucketAclInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -15511,6 +16593,11 @@ type GetBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID that identifies the analytics configuration. // // Id is a required field @@ -15559,6 +16646,12 @@ func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { s.Id = &v @@ -15579,6 +16672,19 @@ func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -15609,6 +16715,11 @@ type GetBucketCorsInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -15650,6 +16761,12 @@ func (s *GetBucketCorsInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -15664,6 +16781,19 @@ func (s *GetBucketCorsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -15696,6 +16826,11 @@ type GetBucketEncryptionInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -15737,6 +16872,12 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -15751,6 +16892,19 @@ func (s *GetBucketEncryptionInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketEncryptionOutput struct { _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` @@ -15782,6 +16936,11 @@ type GetBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID used to identify the inventory configuration. // // Id is a required field @@ -15830,6 +16989,12 @@ func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { s.Id = &v @@ -15850,6 +17015,19 @@ func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -15880,6 +17058,11 @@ type GetBucketLifecycleConfigurationInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -15921,6 +17104,12 @@ func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -15935,6 +17124,19 @@ func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -15965,6 +17167,11 @@ type GetBucketLifecycleInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16006,6 +17213,12 @@ func (s *GetBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16020,6 +17233,19 @@ func (s *GetBucketLifecycleInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -16050,6 +17276,11 @@ type GetBucketLocationInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16091,6 +17322,12 @@ func (s *GetBucketLocationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16105,11 +17342,25 @@ func (s *GetBucketLocationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` // Specifies the Region where the bucket resides. For a list of all the Amazon // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -16136,6 +17387,11 @@ type GetBucketLoggingInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16177,6 +17433,12 @@ func (s *GetBucketLoggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16191,6 +17453,19 @@ func (s *GetBucketLoggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` @@ -16225,6 +17500,11 @@ type GetBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID used to identify the metrics configuration. // // Id is a required field @@ -16273,6 +17553,12 @@ func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { s.Id = &v @@ -16293,6 +17579,19 @@ func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -16319,10 +17618,15 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics type GetBucketNotificationConfigurationRequest struct { _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` - // Name of the bucket for which to get the notification configuration + // The name of the bucket for which to get the notification configuration. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16364,6 +17668,12 @@ func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16378,6 +17688,126 @@ func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsInput struct { + _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation +func (s GetBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure" payload:"OwnershipControls"` + + // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in + // effect for this Amazon S3 bucket. + OwnershipControls *OwnershipControls `type:"structure"` +} + +// String returns the string representation +func (s GetBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { + s.OwnershipControls = v + return s +} + type GetBucketPolicyInput struct { _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` @@ -16385,6 +17815,11 @@ type GetBucketPolicyInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16426,6 +17861,12 @@ func (s *GetBucketPolicyInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16440,6 +17881,19 @@ func (s *GetBucketPolicyInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -16470,6 +17924,11 @@ type GetBucketPolicyStatusInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16511,6 +17970,12 @@ func (s *GetBucketPolicyStatusInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16525,6 +17990,19 @@ func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketPolicyStatusOutput struct { _ struct{} `type:"structure" payload:"PolicyStatus"` @@ -16555,6 +18033,11 @@ type GetBucketReplicationInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16596,6 +18079,12 @@ func (s *GetBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16610,6 +18099,19 @@ func (s *GetBucketReplicationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketReplicationOutput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` @@ -16641,6 +18143,11 @@ type GetBucketRequestPaymentInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16682,6 +18189,12 @@ func (s *GetBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16696,6 +18209,19 @@ func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` @@ -16726,6 +18252,11 @@ type GetBucketTaggingInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16767,6 +18298,12 @@ func (s *GetBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16781,6 +18318,19 @@ func (s *GetBucketTaggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -16813,6 +18363,11 @@ type GetBucketVersioningInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16854,6 +18409,12 @@ func (s *GetBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16868,6 +18429,19 @@ func (s *GetBucketVersioningInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -16909,6 +18483,11 @@ type GetBucketWebsiteInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -16950,6 +18529,12 @@ func (s *GetBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -16964,13 +18549,26 @@ func (s *GetBucketWebsiteInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` - // The name of the error document for the website. + // The object key name of the website error document to use for 4XX class errors. ErrorDocument *ErrorDocument `type:"structure"` - // The name of the index document for the website. + // The name of the index document for the website (for example index.html). IndexDocument *IndexDocument `type:"structure"` // Specifies the redirect behavior of all requests to a website endpoint of @@ -17022,7 +18620,7 @@ type GetObjectAclInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. @@ -17030,6 +18628,11 @@ type GetObjectAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The key of the object for which to get the ACL information. // // Key is a required field @@ -17091,6 +18694,12 @@ func (s *GetObjectAclInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { s.Key = &v @@ -17123,6 +18732,19 @@ func (s *GetObjectAclInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectAclOutput struct { _ struct{} `type:"structure"` @@ -17172,14 +18794,27 @@ type GetObjectInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Return the object only if its entity tag (ETag) is the same as the one specified, // otherwise return a 412 (precondition failed). IfMatch *string `location:"header" locationName:"If-Match" type:"string"` @@ -17207,7 +18842,10 @@ type GetObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. @@ -17242,7 +18880,7 @@ type GetObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -17300,6 +18938,12 @@ func (s *GetObjectInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + // SetIfMatch sets the IfMatch field's value. func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { s.IfMatch = &v @@ -17429,6 +19073,19 @@ func (s *GetObjectInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectLegalHoldInput struct { _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` @@ -17437,7 +19094,7 @@ type GetObjectLegalHoldInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. @@ -17445,6 +19102,11 @@ type GetObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The key name for the object whose Legal Hold status you want to retrieve. // // Key is a required field @@ -17506,6 +19168,12 @@ func (s *GetObjectLegalHoldInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { s.Key = &v @@ -17538,6 +19206,19 @@ func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectLegalHoldOutput struct { _ struct{} `type:"structure" payload:"LegalHold"` @@ -17566,8 +19247,20 @@ type GetObjectLockConfigurationInput struct { // The bucket whose Object Lock configuration you want to retrieve. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -17609,6 +19302,12 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -17623,6 +19322,19 @@ func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectLockConfigurationOutput struct { _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` @@ -17756,7 +19468,7 @@ type GetObjectOutput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for Standard storage class objects. + // header for all objects except for S3 Standard storage class objects. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The number of tags, if any, on the object. @@ -17975,7 +19687,7 @@ type GetObjectRetentionInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. @@ -17983,6 +19695,11 @@ type GetObjectRetentionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The key name for the object whose retention settings you want to retrieve. // // Key is a required field @@ -18044,6 +19761,12 @@ func (s *GetObjectRetentionInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { s.Key = &v @@ -18076,6 +19799,19 @@ func (s *GetObjectRetentionInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectRetentionOutput struct { _ struct{} `type:"structure" payload:"Retention"` @@ -18106,14 +19842,27 @@ type GetObjectTaggingInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Object key for which to get the tagging information. // // Key is a required field @@ -18168,6 +19917,12 @@ func (s *GetObjectTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { s.Key = &v @@ -18194,6 +19949,19 @@ func (s *GetObjectTaggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -18237,6 +20005,11 @@ type GetObjectTorrentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The object key for which to get the information. // // Key is a required field @@ -18295,6 +20068,12 @@ func (s *GetObjectTorrentInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { s.Key = &v @@ -18321,6 +20100,19 @@ func (s *GetObjectTorrentInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -18362,6 +20154,11 @@ type GetPublicAccessBlockInput struct { // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -18403,6 +20200,12 @@ func (s *GetPublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -18417,6 +20220,19 @@ func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type GetPublicAccessBlockOutput struct { _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` @@ -18441,11 +20257,11 @@ func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *Public return s } -// Container for Glacier job parameters. +// Container for S3 Glacier job parameters. type GlacierJobParameters struct { _ struct{} `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. // // Tier is a required field Tier *string `type:"string" required:"true" enum:"Tier"` @@ -18536,6 +20352,29 @@ type Grantee struct { DisplayName *string `type:"string"` // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // AWS Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the AWS General Reference. EmailAddress *string `type:"string"` // The canonical user ID of the grantee. @@ -18608,8 +20447,28 @@ type HeadBucketInput struct { // The bucket name. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -18651,6 +20510,12 @@ func (s *HeadBucketInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -18665,6 +20530,19 @@ func (s *HeadBucketInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -18684,9 +20562,29 @@ type HeadObjectInput struct { // The name of the bucket containing the object. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Return the object only if its entity tag (ETag) is the same as the one specified, // otherwise return a 412 (precondition failed). IfMatch *string `location:"header" locationName:"If-Match" type:"string"` @@ -18715,7 +20613,10 @@ type HeadObjectInput struct { PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + // the HTTP Range header, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. Range *string `location:"header" locationName:"Range" type:"string"` // Confirms that the requester knows that they will be charged for the request. @@ -18732,7 +20633,7 @@ type HeadObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -18790,6 +20691,12 @@ func (s *HeadObjectInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + // SetIfMatch sets the IfMatch field's value. func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { s.IfMatch = &v @@ -18883,6 +20790,19 @@ func (s *HeadObjectInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type HeadObjectOutput struct { _ struct{} `type:"structure"` @@ -18993,7 +20913,8 @@ type HeadObjectOutput struct { // If the object is an archived object (an object whose storage class is GLACIER), // the response includes this header if either the archive restoration is in - // progress (see RestoreObject or an archive copy is already restored. + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. // // If an archive copy is already restored, the header value indicates when Amazon // S3 is scheduled to delete the object copy. For example: @@ -19029,7 +20950,7 @@ type HeadObjectOutput struct { ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` // Provides storage class information of the object. Amazon S3 returns this - // header for all objects except for Standard storage class objects. + // header for all objects except for S3 Standard storage class objects. // // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` @@ -19624,7 +21545,11 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` - // The ID of the account that owns the destination bucket. + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. AccountId *string `type:"string"` // The Amazon Resource Name (ARN) of the bucket where inventory results will @@ -19781,7 +21706,8 @@ func (s *JSONInput) SetType(v string) *JSONInput { type JSONOutput struct { _ struct{} `type:"structure"` - // The value used to separate individual records in the output. + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). RecordDelimiter *string `type:"string"` } @@ -20265,6 +22191,11 @@ type ListBucketAnalyticsConfigurationsInput struct { // The ContinuationToken that represents a placeholder from where this request // should begin. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -20312,6 +22243,12 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -20326,6 +22263,19 @@ func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -20394,6 +22344,11 @@ type ListBucketInventoryConfigurationsInput struct { // response to continue the listing. The continuation token is an opaque value // that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -20441,6 +22396,12 @@ func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -20455,6 +22416,19 @@ func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -20523,6 +22497,11 @@ type ListBucketMetricsConfigurationsInput struct { // list response to continue the listing. The continuation token is an opaque // value that Amazon S3 understands. ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -20570,6 +22549,12 @@ func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *L return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -20584,6 +22569,19 @@ func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -20689,15 +22687,23 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { type ListMultipartUploadsInput struct { _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` - // Name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -20719,6 +22725,11 @@ type ListMultipartUploadsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Together with upload-id-marker, this parameter specifies the multipart upload // after which listing should begin. // @@ -20800,6 +22811,12 @@ func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUplo return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKeyMarker sets the KeyMarker field's value. func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { s.KeyMarker = &v @@ -20838,10 +22855,23 @@ func (s *ListMultipartUploadsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` - // Name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` // If you specify a delimiter in the request, then the result returns each distinct @@ -20989,13 +23019,6 @@ type ListObjectVersionsInput struct { // The bucket name that contains the objects. // - // When using this API with an access point, you must direct requests to the - // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you - // provide the access point ARN in place of the bucket name. For more information - // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) - // in the Amazon Simple Storage Service Developer Guide. - // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21014,14 +23037,20 @@ type ListObjectVersionsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Specifies the key to start with when listing objects in a bucket. KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. If additional keys satisfy - // the search criteria, but were not returned because max-keys was exceeded, - // the response contains true. To return the additional - // keys, see key-marker and version-id-marker. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Use this parameter to select only those keys that begin with the specified @@ -21086,6 +23115,12 @@ func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsI return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKeyMarker sets the KeyMarker field's value. func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { s.KeyMarker = &v @@ -21124,6 +23159,19 @@ func (s *ListObjectVersionsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` @@ -21164,7 +23212,7 @@ type ListObjectVersionsOutput struct { // Specifies the maximum number of objects to return. MaxKeys *int64 `type:"integer"` - // Bucket name. + // The bucket name. Name *string `type:"string"` // When the number of responses exceeds the value of MaxKeys, NextKeyMarker @@ -21281,6 +23329,21 @@ type ListObjectsInput struct { // The name of the bucket containing the objects. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21295,11 +23358,17 @@ type ListObjectsInput struct { // keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Specifies the key to start with when listing objects in a bucket. Marker *string `location:"querystring" locationName:"marker" type:"string"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -21362,6 +23431,12 @@ func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + // SetMarker sets the Marker field's value. func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { s.Marker = &v @@ -21400,6 +23475,19 @@ func (s *ListObjectsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListObjectsOutput struct { _ struct{} `type:"structure"` @@ -21444,14 +23532,14 @@ type ListObjectsOutput struct { // The maximum number of keys returned in the response body. MaxKeys *int64 `type:"integer"` - // Bucket name. + // The bucket name. Name *string `type:"string"` // When response is truncated (the IsTruncated element value in the response // is true), you can use the key name in this field as marker in the subsequent // request to get next set of objects. Amazon S3 lists objects in alphabetical // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, + // specified. If response does not include the NextMarker and it is truncated, // you can use the value of the last Key in the response as the marker in the // subsequent request to get the next set of object keys. NextMarker *string `type:"string"` @@ -21537,11 +23625,19 @@ type ListObjectsV2Input struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -21556,13 +23652,19 @@ type ListObjectsV2Input struct { // Encoding type used by Amazon S3 to encode object keys in the response. EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The owner field is not present in listV2 by default, if you want to return // owner field with each key in the result then set the fetch owner field to // true. FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` // Limits the response to keys that begin with the specified prefix. @@ -21635,6 +23737,12 @@ func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + // SetFetchOwner sets the FetchOwner field's value. func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { s.FetchOwner = &v @@ -21679,6 +23787,19 @@ func (s *ListObjectsV2Input) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListObjectsV2Output struct { _ struct{} `type:"structure"` @@ -21731,18 +23852,27 @@ type ListObjectsV2Output struct { // result will include less than equals 50 keys KeyCount *int64 `type:"integer"` - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. + // Sets the maximum number of keys returned in the response. By default the + // API returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. MaxKeys *int64 `type:"integer"` - // Bucket name. + // The bucket name. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string"` // NextContinuationToken is sent when isTruncated is true, which means there @@ -21843,18 +23973,31 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { type ListPartsInput struct { _ struct{} `locationName:"ListPartsRequest" type:"structure"` - // Name of the bucket to which the parts are being uploaded. + // The name of the bucket to which the parts are being uploaded. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -21928,6 +24071,12 @@ func (s *ListPartsInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *ListPartsInput) SetKey(v string) *ListPartsInput { s.Key = &v @@ -21972,6 +24121,19 @@ func (s *ListPartsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type ListPartsOutput struct { _ struct{} `type:"structure"` @@ -21991,7 +24153,7 @@ type ListPartsOutput struct { // incomplete multipart uploads. AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` - // Name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. Bucket *string `type:"string"` // Container element that identifies who initiated the multipart upload. If @@ -22929,8 +25091,22 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf type Object struct { _ struct{} `type:"structure"` - // The entity tag is an MD5 hash of the object. ETag reflects only changes to - // the contents of an object, not its metadata. + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-S3 or plaintext, + // have ETags that are an MD5 digest of their object data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the AWS Management Console, and are encrypted by SSE-C or SSE-KMS, + // have ETags that are not an MD5 digest of their object data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. ETag *string `type:"string"` // The name that you assign to an object. You use the object key to retrieve @@ -23355,6 +25531,101 @@ func (s *Owner) SetID(v string) *Owner { return s } +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + _ struct{} `type:"structure"` + + // The container element for an ownership control rule. + // + // Rules is a required field + Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s OwnershipControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OwnershipControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { + s.Rules = v + return s +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + _ struct{} `type:"structure"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // ObjectOwnership is a required field + ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` +} + +// String returns the string representation +func (s OwnershipControlsRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OwnershipControlsRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControlsRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} + if s.ObjectOwnership == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { + s.ObjectOwnership = &v + return s +} + // Container for Parquet. type ParquetInput struct { _ struct{} `type:"structure"` @@ -23530,6 +25801,8 @@ func (s *ProgressEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer @@ -23630,10 +25903,15 @@ type PutBucketAccelerateConfigurationInput struct { // AccelerateConfiguration is a required field AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Name of the bucket for which the accelerate configuration is set. + // The name of the bucket for which the accelerate configuration is set. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -23684,6 +25962,12 @@ func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -23698,6 +25982,19 @@ func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -23726,6 +26023,11 @@ type PutBucketAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -23799,6 +26101,12 @@ func (s *PutBucketAclInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { s.GrantFullControl = &v @@ -23843,6 +26151,19 @@ func (s *PutBucketAclInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -23870,6 +26191,11 @@ type PutBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID that identifies the analytics configuration. // // Id is a required field @@ -23932,6 +26258,12 @@ func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { s.Id = &v @@ -23952,6 +26284,19 @@ func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -23976,11 +26321,16 @@ type PutBucketCorsInput struct { // Describes the cross-origin access configuration for objects in an Amazon // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing - // (https://docs.aws.amazon.com/AmazonS3/latest/dev//cors.html) in the Amazon + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon // Simple Storage Service Developer Guide. // // CORSConfiguration is a required field CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -24036,6 +26386,12 @@ func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBuck return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -24050,6 +26406,19 @@ func (s *PutBucketCorsInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -24076,6 +26445,11 @@ type PutBucketEncryptionInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Specifies the default server-side-encryption configuration. // // ServerSideEncryptionConfiguration is a required field @@ -24129,6 +26503,12 @@ func (s *PutBucketEncryptionInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + // SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { s.ServerSideEncryptionConfiguration = v @@ -24149,6 +26529,19 @@ func (s *PutBucketEncryptionInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketEncryptionOutput struct { _ struct{} `type:"structure"` } @@ -24171,6 +26564,11 @@ type PutBucketInventoryConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID used to identify the inventory configuration. // // Id is a required field @@ -24232,6 +26630,12 @@ func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { s.Id = &v @@ -24258,6 +26662,19 @@ func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -24280,6 +26697,11 @@ type PutBucketLifecycleConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Container for lifecycle rules. You can add as many as 1,000 rules. LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -24328,6 +26750,12 @@ func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { s.LifecycleConfiguration = v @@ -24348,6 +26776,19 @@ func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -24368,6 +26809,11 @@ type PutBucketLifecycleInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Container for lifecycle rules. You can add as many as 1000 rules. LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } @@ -24416,6 +26862,12 @@ func (s *PutBucketLifecycleInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { s.LifecycleConfiguration = v @@ -24436,6 +26888,19 @@ func (s *PutBucketLifecycleInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -24462,6 +26927,11 @@ type PutBucketLoggingInput struct { // // BucketLoggingStatus is a required field BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` } // String returns the string representation @@ -24517,6 +26987,12 @@ func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) * return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") @@ -24531,6 +27007,19 @@ func (s *PutBucketLoggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -24553,6 +27042,11 @@ type PutBucketMetricsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The ID used to identify the metrics configuration. // // Id is a required field @@ -24614,6 +27108,12 @@ func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetId sets the Id field's value. func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { s.Id = &v @@ -24633,55 +27133,192 @@ func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, err return parseEndpointARN(*s.Bucket) } -func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { if s.Bucket == nil { - return false + return nil, fmt.Errorf("member Bucket is nil") } - return arn.IsARN(*s.Bucket) + s.Bucket = aws.String(v) + return &s, nil } -type PutBucketMetricsConfigurationOutput struct { +type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketMetricsConfigurationOutput) String() string { +func (s PutBucketNotificationConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketMetricsConfigurationOutput) GoString() string { +func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -type PutBucketNotificationConfigurationInput struct { - _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` // The name of the bucket. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // A container for specifying the notification configuration of the bucket. - // If this element is empty, notifications are turned off for the bucket. + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The container for the configuration. // // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { +func (s PutBucketNotificationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { +func (s PutBucketNotificationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationConfigurationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } @@ -24691,11 +27328,6 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } - if s.NotificationConfiguration != nil { - if err := s.NotificationConfiguration.Validate(); err != nil { - invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -24704,87 +27336,114 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { s.Bucket = &v return s } -func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { +func (s *PutBucketNotificationInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { s.NotificationConfiguration = v return s } -func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") } return parseEndpointARN(*s.Bucket) } -func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { +func (s *PutBucketNotificationInput) hasEndpointARN() bool { if s.Bucket == nil { return false } return arn.IsARN(*s.Bucket) } -type PutBucketNotificationConfigurationOutput struct { +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { +func (s PutBucketNotificationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { +func (s PutBucketNotificationOutput) GoString() string { return s.String() } -type PutBucketNotificationInput struct { - _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` +type PutBucketOwnershipControlsInput struct { + _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` - // The name of the bucket. + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The container for the configuration. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want + // to apply to this Amazon S3 bucket. // - // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + // OwnershipControls is a required field + OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketNotificationInput) String() string { +func (s PutBucketOwnershipControlsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { +func (s PutBucketOwnershipControlsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutBucketNotificationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} +func (s *PutBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } if s.Bucket != nil && len(*s.Bucket) < 1 { invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.NotificationConfiguration == nil { - invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + if s.OwnershipControls == nil { + invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) + } + if s.OwnershipControls != nil { + if err := s.OwnershipControls.Validate(); err != nil { + invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -24794,49 +27453,68 @@ func (s *PutBucketNotificationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { +func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { s.Bucket = &v return s } -func (s *PutBucketNotificationInput) getBucket() (v string) { +func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { if s.Bucket == nil { return v } return *s.Bucket } -// SetNotificationConfiguration sets the NotificationConfiguration field's value. -func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { - s.NotificationConfiguration = v +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v return s } -func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { + s.OwnershipControls = v + return s +} + +func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { if s.Bucket == nil { return nil, fmt.Errorf("member Bucket is nil") } return parseEndpointARN(*s.Bucket) } -func (s *PutBucketNotificationInput) hasEndpointARN() bool { +func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { if s.Bucket == nil { return false } return arn.IsARN(*s.Bucket) } -type PutBucketNotificationOutput struct { +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketOwnershipControlsOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutBucketNotificationOutput) String() string { +func (s PutBucketOwnershipControlsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { +func (s PutBucketOwnershipControlsOutput) GoString() string { return s.String() } @@ -24852,6 +27530,11 @@ type PutBucketPolicyInput struct { // to change this bucket policy in the future. ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The bucket policy as a JSON document. // // Policy is a required field @@ -24906,6 +27589,12 @@ func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBuck return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { s.Policy = &v @@ -24926,6 +27615,19 @@ func (s *PutBucketPolicyInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -24948,6 +27650,11 @@ type PutBucketReplicationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // A container for replication rules. You can add up to 1,000 rules. The maximum // size of a replication configuration is 2 MB. // @@ -25004,6 +27711,12 @@ func (s *PutBucketReplicationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetReplicationConfiguration sets the ReplicationConfiguration field's value. func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { s.ReplicationConfiguration = v @@ -25030,6 +27743,19 @@ func (s *PutBucketReplicationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -25052,6 +27778,11 @@ type PutBucketRequestPaymentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Container for Payer. // // RequestPaymentConfiguration is a required field @@ -25105,6 +27836,12 @@ func (s *PutBucketRequestPaymentInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + // SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { s.RequestPaymentConfiguration = v @@ -25125,6 +27862,19 @@ func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -25147,6 +27897,11 @@ type PutBucketTaggingInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Container for the TagSet and Tag elements. // // Tagging is a required field @@ -25200,6 +27955,12 @@ func (s *PutBucketTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + // SetTagging sets the Tagging field's value. func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { s.Tagging = v @@ -25220,6 +27981,19 @@ func (s *PutBucketTaggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -25242,6 +28016,11 @@ type PutBucketVersioningInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` @@ -25294,6 +28073,12 @@ func (s *PutBucketVersioningInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + // SetMFA sets the MFA field's value. func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { s.MFA = &v @@ -25320,6 +28105,19 @@ func (s *PutBucketVersioningInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -25342,6 +28140,11 @@ type PutBucketWebsiteInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Container for the request. // // WebsiteConfiguration is a required field @@ -25395,6 +28198,12 @@ func (s *PutBucketWebsiteInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + // SetWebsiteConfiguration sets the WebsiteConfiguration field's value. func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { s.WebsiteConfiguration = v @@ -25415,6 +28224,19 @@ func (s *PutBucketWebsiteInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -25444,7 +28266,7 @@ type PutObjectAclInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. @@ -25452,24 +28274,52 @@ type PutObjectAclInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. + // + // This action is not supported by Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to list the objects in the bucket. + // + // This action is not supported by Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the bucket ACL. + // + // This action is not supported by Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to create, overwrite, and delete any object in the bucket. GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` // Allows grantee to write the ACL for the applicable bucket. + // + // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Key for which the PUT operation was initiated. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -25546,6 +28396,12 @@ func (s *PutObjectAclInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { s.GrantFullControl = &v @@ -25608,6 +28464,19 @@ func (s *PutObjectAclInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -25637,20 +28506,30 @@ type PutObjectInput struct { // The canned ACL to apply to the object. For more information, see Canned ACL // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + // + // This action is not supported by Amazon S3 on Outposts. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` // Object data. Body io.ReadSeeker `type:"blob"` - // Bucket name to which the PUT operation was initiated. + // The bucket name to which the PUT operation was initiated. // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -25689,20 +28568,33 @@ type PutObjectInput struct { // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17). ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The date and time at which the object is no longer cacheable. For more information, // see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21). Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // This action is not supported by Amazon S3 on Outposts. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` // Allows grantee to read the object data and its metadata. + // + // This action is not supported by Amazon S3 on Outposts. GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` // Allows grantee to read the object ACL. + // + // This action is not supported by Amazon S3 on Outposts. GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` // Allows grantee to write the ACL for the applicable object. + // + // This action is not supported by Amazon S3 on Outposts. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` // Object key for which the PUT operation was initiated. @@ -25737,7 +28629,7 @@ type PutObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -25767,8 +28659,12 @@ type PutObjectInput struct { // S3 (for example, AES256, aws:kms). ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - // If you don't specify, Standard is the default storage class. Amazon S3 supports - // other storage classes. + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, + // see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 Service Developer Guide. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` // The tag-set for the object. The tag-set must be encoded as URL Query parameters. @@ -25895,6 +28791,12 @@ func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { + s.ExpectedBucketOwner = &v + return s +} + // SetExpires sets the Expires field's value. func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { s.Expires = &v @@ -26036,6 +28938,19 @@ func (s *PutObjectInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutObjectLegalHoldInput struct { _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` @@ -26044,7 +28959,7 @@ type PutObjectLegalHoldInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. @@ -26052,6 +28967,11 @@ type PutObjectLegalHoldInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The key name for the object that you want to place a Legal Hold on. // // Key is a required field @@ -26117,6 +29037,12 @@ func (s *PutObjectLegalHoldInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { s.Key = &v @@ -26155,6 +29081,19 @@ func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutObjectLegalHoldOutput struct { _ struct{} `type:"structure"` @@ -26187,6 +29126,11 @@ type PutObjectLockConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The Object Lock configuration that you want to apply to the specified bucket. ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` @@ -26240,6 +29184,12 @@ func (s *PutObjectLockConfigurationInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + // SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { s.ObjectLockConfiguration = v @@ -26272,6 +29222,19 @@ func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutObjectLockConfigurationOutput struct { _ struct{} `type:"structure"` @@ -26302,7 +29265,8 @@ type PutObjectOutput struct { // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` - // If the expiration is configured for the object (see PutBucketLifecycleConfiguration), + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), // the response includes this header. It includes the expiry-date and rule-id // key-value pairs that provide information about object expiration. The value // of the rule-id is URL encoded. @@ -26415,7 +29379,7 @@ type PutObjectRetentionInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. @@ -26426,6 +29390,11 @@ type PutObjectRetentionInput struct { // Indicates whether this operation should bypass Governance-mode restrictions. BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The key name for the object that you want to apply this Object Retention // configuration to. // @@ -26498,6 +29467,12 @@ func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjec return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { s.Key = &v @@ -26536,6 +29511,19 @@ func (s *PutObjectRetentionInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutObjectRetentionOutput struct { _ struct{} `type:"structure"` @@ -26567,15 +29555,28 @@ type PutObjectTaggingInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Name of the tag. + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -26642,6 +29643,12 @@ func (s *PutObjectTaggingInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { s.Key = &v @@ -26674,6 +29681,19 @@ func (s *PutObjectTaggingInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -26706,6 +29726,11 @@ type PutPublicAccessBlockInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The PublicAccessBlock configuration that you want to apply to this Amazon // S3 bucket. You can enable the configuration options in any combination. For // more information about when Amazon S3 considers a bucket or object public, @@ -26758,6 +29783,12 @@ func (s *PutPublicAccessBlockInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + // SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { s.PublicAccessBlockConfiguration = v @@ -26778,6 +29809,19 @@ func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type PutPublicAccessBlockOutput struct { _ struct{} `type:"structure"` } @@ -26868,10 +29912,10 @@ func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { return s } -// This data type is deprecated. Use QueueConfiguration for the same purposes. -// This data type specifies the configuration for publishing messages to an -// Amazon Simple Queue Service (Amazon SQS) queue when Amazon S3 detects specified -// events. +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. type QueueConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -26966,6 +30010,8 @@ func (s *RecordsEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) @@ -27624,14 +30670,27 @@ type RestoreObjectInput struct { // // When using this API with an access point, you must direct requests to the // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. - // When using this operation using an access point through the AWS SDKs, you + // When using this operation with an access point through the AWS SDKs, you // provide the access point ARN in place of the bucket name. For more information // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) // in the Amazon Simple Storage Service Developer Guide. // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Object key for which the operation was initiated. // // Key is a required field @@ -27701,6 +30760,12 @@ func (s *RestoreObjectInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { s.Key = &v @@ -27739,6 +30804,19 @@ func (s *RestoreObjectInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type RestoreObjectOutput struct { _ struct{} `type:"structure"` @@ -27784,7 +30862,7 @@ type RestoreRequest struct { // The optional description for the job. Description *string `type:"string"` - // Glacier related parameters pertaining to this job. Do not use with restores + // S3 Glacier related parameters pertaining to this job. Do not use with restores // that specify OutputLocation. GlacierJobParameters *GlacierJobParameters `type:"structure"` @@ -27794,7 +30872,7 @@ type RestoreRequest struct { // Describes the parameters for Select job types. SelectParameters *SelectParameters `type:"structure"` - // Glacier retrieval tier at which the restore will be processed. + // S3 Glacier retrieval tier at which the restore will be processed. Tier *string `type:"string" enum:"Tier"` // Type of restore request. @@ -27878,7 +30956,10 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest { return s } -// Specifies the redirect behavior and when a redirect is applied. +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon Simple Storage Service Developer Guide. type RoutingRule struct { _ struct{} `type:"structure"` @@ -27932,8 +31013,9 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { } // Specifies lifecycle rules for an Amazon S3 bucket. For more information, -// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) -// in the Amazon Simple Storage Service API Reference. +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. For examples, see Put +// Bucket Lifecycle Configuration Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples) type Rule struct { _ struct{} `type:"structure"` @@ -27978,7 +31060,10 @@ type Rule struct { // Status is a required field Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - // Specifies when an object transitions to a specified storage class. + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon Simple Storage Service Developer Guide. Transition *Transition `type:"structure"` } @@ -28182,6 +31267,7 @@ type SelectObjectContentEventStreamEvent interface { // * ProgressEvent // * RecordsEvent // * StatsEvent +// * SelectObjectContentEventStreamUnknownEvent type SelectObjectContentEventStreamReader interface { // Returns a channel of events as they are read from the event stream. Events() <-chan SelectObjectContentEventStreamEvent @@ -28256,6 +31342,9 @@ func (r *readSelectObjectContentEventStream) readEventStream() { return default: } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } r.err.SetError(err) return } @@ -28285,14 +31374,39 @@ func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventNa case "Stats": return &StatsEvent{}, nil default: - return nil, awserr.New( - request.ErrCodeSerialization, - fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), - nil, - ) + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil } } +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + // Request to filter the contents of an Amazon S3 object based on a simple Structured // Query Language (SQL) statement. In the request, along with the SQL expression, // you must specify a data serialization format (JSON or CSV) of the object. @@ -28308,6 +31422,11 @@ type SelectObjectContentInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // The expression that is used to query the object. // // Expression is a required field @@ -28424,6 +31543,12 @@ func (s *SelectObjectContentInput) getBucket() (v string) { return *s.Bucket } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { + s.ExpectedBucketOwner = &v + return s +} + // SetExpression sets the Expression field's value. func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { s.Expression = &v @@ -28505,6 +31630,19 @@ func (s *SelectObjectContentInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` @@ -28623,8 +31761,24 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // KMS master key ID to use for the default encryption. This parameter is allowed - // if and only if SSEAlgorithm is set to aws:kms. + // AWS Key Management Service (KMS) customer master key ID to use for the default + // encryption. This parameter is allowed if and only if SSEAlgorithm is set + // to aws:kms. + // + // You can specify the key ID or the Amazon Resource Name (ARN) of the CMK. + // However, if you are using encryption with cross-account operations, you must + // use a fully qualified CMK ARN. For more information, see Using encryption + // for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // For example: + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // Amazon S3 only supports symmetric CMKs and not asymmetric CMKs. For more + // information, see Using Symmetric and Asymmetric Keys (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the AWS Key Management Service Developer Guide. KMSMasterKeyID *string `type:"string" sensitive:"true"` // Server-side encryption algorithm to use for the default encryption. @@ -28928,6 +32082,8 @@ func (s *StatsEvent) UnmarshalEvent( return nil } +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) var buf bytes.Buffer @@ -29043,7 +32199,7 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora type Tag struct { _ struct{} `type:"structure"` - // Name of the tag. + // Name of the object key. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -29151,7 +32307,7 @@ type TargetGrant struct { // Container for the person being granted permissions. Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` - // Logging permissions assigned to the Grantee for the bucket. + // Logging permissions assigned to the grantee for the bucket. Permission *string `type:"string" enum:"BucketLogsPermission"` } @@ -29274,6 +32430,7 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { // A container for specifying the configuration for publication of messages // to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 // detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) // instead. type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -29329,7 +32486,10 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Specifies when an object transitions to a specified storage class. +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. type Transition struct { _ struct{} `type:"structure"` @@ -29378,11 +32538,52 @@ type UploadPartCopyInput struct { // The bucket name. // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // access points only when the source and destination buckets are in the + // same AWS Region. Alternatively, for objects accessed through Amazon S3 + // on Outposts, specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL encoded. + // + // To copy a specific version of an object, append ?versionId= to + // the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. // // CopySource is a required field CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` @@ -29421,6 +32622,16 @@ type UploadPartCopyInput struct { // encryption key was transmitted without error. CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + // The account id of the expected destination bucket owner. If the destination + // bucket is owned by a different account, the request will fail with an HTTP + // 403 (Access Denied) error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account id of the expected source bucket owner. If the source bucket + // is owned by a different account, the request will fail with an HTTP 403 (Access + // Denied) error. + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -29446,7 +32657,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -29577,6 +32788,18 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPa return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { s.Key = &v @@ -29640,6 +32863,19 @@ func (s *UploadPartCopyInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -29732,7 +32968,22 @@ type UploadPartInput struct { // Object data. Body io.ReadSeeker `type:"blob"` - // Name of the bucket to which the multipart upload was initiated. + // The name of the bucket to which the multipart upload was initiated. + // + // When using this API with an access point, you must direct requests to the + // access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this operation with an access point through the AWS SDKs, you + // provide the access point ARN in place of the bucket name. For more information + // about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // When using this API with Amazon S3 on Outposts, you must direct requests + // to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form + // AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When + // using this operation using S3 on Outposts through the AWS SDKs, you provide + // the Outposts bucket ARN in place of the bucket name. For more information + // about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -29746,6 +32997,11 @@ type UploadPartInput struct { // if object lock parameters are specified. ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // The account id of the expected bucket owner. If the bucket is owned by a + // different account, the request will fail with an HTTP 403 (Access Denied) + // error. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -29771,7 +33027,7 @@ type UploadPartInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting // data. This value is used to store the object and then it is discarded; Amazon // S3 does not store the encryption key. The key must be appropriate for use - // with the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` @@ -29856,6 +33112,12 @@ func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { return s } +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v @@ -29919,6 +33181,19 @@ func (s *UploadPartInput) hasEndpointARN() bool { return arn.IsARN(*s.Bucket) } +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -30124,6 +33399,13 @@ const ( AnalyticsS3ExportFileFormatCsv = "CSV" ) +// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum +func AnalyticsS3ExportFileFormat_Values() []string { + return []string{ + AnalyticsS3ExportFileFormatCsv, + } +} + const ( // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value BucketAccelerateStatusEnabled = "Enabled" @@ -30132,6 +33414,14 @@ const ( BucketAccelerateStatusSuspended = "Suspended" ) +// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum +func BucketAccelerateStatus_Values() []string { + return []string{ + BucketAccelerateStatusEnabled, + BucketAccelerateStatusSuspended, + } +} + const ( // BucketCannedACLPrivate is a BucketCannedACL enum value BucketCannedACLPrivate = "private" @@ -30146,18 +33436,31 @@ const ( BucketCannedACLAuthenticatedRead = "authenticated-read" ) +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + const ( - // BucketLocationConstraintEu is a BucketLocationConstraint enum value - BucketLocationConstraintEu = "EU" + // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintAfSouth1 = "af-south-1" - // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintEuWest1 = "eu-west-1" + // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApEast1 = "ap-east-1" - // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest1 = "us-west-1" + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" - // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value - BucketLocationConstraintUsWest2 = "us-west-2" + // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast2 = "ap-northeast-2" + + // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast3 = "ap-northeast-3" // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value BucketLocationConstraintApSouth1 = "ap-south-1" @@ -30168,19 +33471,89 @@ const ( // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value - BucketLocationConstraintApNortheast1 = "ap-northeast-1" - - // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value - BucketLocationConstraintSaEast1 = "sa-east-1" + // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintCaCentral1 = "ca-central-1" // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value BucketLocationConstraintCnNorth1 = "cn-north-1" + // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" + + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value BucketLocationConstraintEuCentral1 = "eu-central-1" + + // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuNorth1 = "eu-north-1" + + // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth1 = "eu-south-1" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest2 = "eu-west-2" + + // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest3 = "eu-west-3" + + // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintMeSouth1 = "me-south-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsEast2 = "us-east-2" + + // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovEast1 = "us-gov-east-1" + + // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovWest1 = "us-gov-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" ) +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintAfSouth1, + BucketLocationConstraintApEast1, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintApNortheast2, + BucketLocationConstraintApNortheast3, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintCaCentral1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintCnNorthwest1, + BucketLocationConstraintEu, + BucketLocationConstraintEuCentral1, + BucketLocationConstraintEuNorth1, + BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuWest1, + BucketLocationConstraintEuWest2, + BucketLocationConstraintEuWest3, + BucketLocationConstraintMeSouth1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintUsEast2, + BucketLocationConstraintUsGovEast1, + BucketLocationConstraintUsGovWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + } +} + const ( // BucketLogsPermissionFullControl is a BucketLogsPermission enum value BucketLogsPermissionFullControl = "FULL_CONTROL" @@ -30192,6 +33565,15 @@ const ( BucketLogsPermissionWrite = "WRITE" ) +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + const ( // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value BucketVersioningStatusEnabled = "Enabled" @@ -30200,6 +33582,14 @@ const ( BucketVersioningStatusSuspended = "Suspended" ) +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + const ( // CompressionTypeNone is a CompressionType enum value CompressionTypeNone = "NONE" @@ -30211,6 +33601,15 @@ const ( CompressionTypeBzip2 = "BZIP2" ) +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + const ( // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value DeleteMarkerReplicationStatusEnabled = "Enabled" @@ -30219,6 +33618,14 @@ const ( DeleteMarkerReplicationStatusDisabled = "Disabled" ) +// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum +func DeleteMarkerReplicationStatus_Values() []string { + return []string{ + DeleteMarkerReplicationStatusEnabled, + DeleteMarkerReplicationStatusDisabled, + } +} + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -30230,6 +33637,13 @@ const ( EncodingTypeUrl = "url" ) +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + // The bucket event for which to send notifications. const ( // EventS3ReducedRedundancyLostObject is a Event enum value @@ -30284,6 +33698,29 @@ const ( EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" ) +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventS3ReducedRedundancyLostObject, + EventS3ObjectCreated, + EventS3ObjectCreatedPut, + EventS3ObjectCreatedPost, + EventS3ObjectCreatedCopy, + EventS3ObjectCreatedCompleteMultipartUpload, + EventS3ObjectRemoved, + EventS3ObjectRemovedDelete, + EventS3ObjectRemovedDeleteMarkerCreated, + EventS3ObjectRestore, + EventS3ObjectRestorePost, + EventS3ObjectRestoreCompleted, + EventS3Replication, + EventS3ReplicationOperationFailedReplication, + EventS3ReplicationOperationNotTracked, + EventS3ReplicationOperationMissedThreshold, + EventS3ReplicationOperationReplicatedAfterThreshold, + } +} + const ( // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value ExistingObjectReplicationStatusEnabled = "Enabled" @@ -30292,6 +33729,14 @@ const ( ExistingObjectReplicationStatusDisabled = "Disabled" ) +// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum +func ExistingObjectReplicationStatus_Values() []string { + return []string{ + ExistingObjectReplicationStatusEnabled, + ExistingObjectReplicationStatusDisabled, + } +} + const ( // ExpirationStatusEnabled is a ExpirationStatus enum value ExpirationStatusEnabled = "Enabled" @@ -30300,11 +33745,26 @@ const ( ExpirationStatusDisabled = "Disabled" ) +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + const ( // ExpressionTypeSql is a ExpressionType enum value ExpressionTypeSql = "SQL" ) +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + const ( // FileHeaderInfoUse is a FileHeaderInfo enum value FileHeaderInfoUse = "USE" @@ -30316,6 +33776,15 @@ const ( FileHeaderInfoNone = "NONE" ) +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + const ( // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" @@ -30324,6 +33793,14 @@ const ( FilterRuleNameSuffix = "suffix" ) +// FilterRuleName_Values returns all elements of the FilterRuleName enum +func FilterRuleName_Values() []string { + return []string{ + FilterRuleNamePrefix, + FilterRuleNameSuffix, + } +} + const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" @@ -30335,6 +33812,15 @@ const ( InventoryFormatParquet = "Parquet" ) +// InventoryFormat_Values returns all elements of the InventoryFormat enum +func InventoryFormat_Values() []string { + return []string{ + InventoryFormatCsv, + InventoryFormatOrc, + InventoryFormatParquet, + } +} + const ( // InventoryFrequencyDaily is a InventoryFrequency enum value InventoryFrequencyDaily = "Daily" @@ -30343,6 +33829,14 @@ const ( InventoryFrequencyWeekly = "Weekly" ) +// InventoryFrequency_Values returns all elements of the InventoryFrequency enum +func InventoryFrequency_Values() []string { + return []string{ + InventoryFrequencyDaily, + InventoryFrequencyWeekly, + } +} + const ( // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value InventoryIncludedObjectVersionsAll = "All" @@ -30351,6 +33845,14 @@ const ( InventoryIncludedObjectVersionsCurrent = "Current" ) +// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum +func InventoryIncludedObjectVersions_Values() []string { + return []string{ + InventoryIncludedObjectVersionsAll, + InventoryIncludedObjectVersionsCurrent, + } +} + const ( // InventoryOptionalFieldSize is a InventoryOptionalField enum value InventoryOptionalFieldSize = "Size" @@ -30386,6 +33888,23 @@ const ( InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" ) +// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum +func InventoryOptionalField_Values() []string { + return []string{ + InventoryOptionalFieldSize, + InventoryOptionalFieldLastModifiedDate, + InventoryOptionalFieldStorageClass, + InventoryOptionalFieldEtag, + InventoryOptionalFieldIsMultipartUploaded, + InventoryOptionalFieldReplicationStatus, + InventoryOptionalFieldEncryptionStatus, + InventoryOptionalFieldObjectLockRetainUntilDate, + InventoryOptionalFieldObjectLockMode, + InventoryOptionalFieldObjectLockLegalHoldStatus, + InventoryOptionalFieldIntelligentTieringAccessTier, + } +} + const ( // JSONTypeDocument is a JSONType enum value JSONTypeDocument = "DOCUMENT" @@ -30394,6 +33913,14 @@ const ( JSONTypeLines = "LINES" ) +// JSONType_Values returns all elements of the JSONType enum +func JSONType_Values() []string { + return []string{ + JSONTypeDocument, + JSONTypeLines, + } +} + const ( // MFADeleteEnabled is a MFADelete enum value MFADeleteEnabled = "Enabled" @@ -30402,6 +33929,14 @@ const ( MFADeleteDisabled = "Disabled" ) +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + const ( // MFADeleteStatusEnabled is a MFADeleteStatus enum value MFADeleteStatusEnabled = "Enabled" @@ -30410,6 +33945,14 @@ const ( MFADeleteStatusDisabled = "Disabled" ) +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + const ( // MetadataDirectiveCopy is a MetadataDirective enum value MetadataDirectiveCopy = "COPY" @@ -30418,6 +33961,14 @@ const ( MetadataDirectiveReplace = "REPLACE" ) +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + const ( // MetricsStatusEnabled is a MetricsStatus enum value MetricsStatusEnabled = "Enabled" @@ -30426,6 +33977,14 @@ const ( MetricsStatusDisabled = "Disabled" ) +// MetricsStatus_Values returns all elements of the MetricsStatus enum +func MetricsStatus_Values() []string { + return []string{ + MetricsStatusEnabled, + MetricsStatusDisabled, + } +} + const ( // ObjectCannedACLPrivate is a ObjectCannedACL enum value ObjectCannedACLPrivate = "private" @@ -30449,11 +34008,31 @@ const ( ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + const ( // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value ObjectLockEnabledEnabled = "Enabled" ) +// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum +func ObjectLockEnabled_Values() []string { + return []string{ + ObjectLockEnabledEnabled, + } +} + const ( // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value ObjectLockLegalHoldStatusOn = "ON" @@ -30462,6 +34041,14 @@ const ( ObjectLockLegalHoldStatusOff = "OFF" ) +// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum +func ObjectLockLegalHoldStatus_Values() []string { + return []string{ + ObjectLockLegalHoldStatusOn, + ObjectLockLegalHoldStatusOff, + } +} + const ( // ObjectLockModeGovernance is a ObjectLockMode enum value ObjectLockModeGovernance = "GOVERNANCE" @@ -30470,6 +34057,14 @@ const ( ObjectLockModeCompliance = "COMPLIANCE" ) +// ObjectLockMode_Values returns all elements of the ObjectLockMode enum +func ObjectLockMode_Values() []string { + return []string{ + ObjectLockModeGovernance, + ObjectLockModeCompliance, + } +} + const ( // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value ObjectLockRetentionModeGovernance = "GOVERNANCE" @@ -30478,6 +34073,38 @@ const ( ObjectLockRetentionModeCompliance = "COMPLIANCE" ) +// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum +func ObjectLockRetentionMode_Values() []string { + return []string{ + ObjectLockRetentionModeGovernance, + ObjectLockRetentionModeCompliance, + } +} + +// The container element for object ownership for a bucket's ownership controls. +// +// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to +// the bucket owner if the objects are uploaded with the bucket-owner-full-control +// canned ACL. +// +// ObjectWriter - The uploading account will own the object if the object is +// uploaded with the bucket-owner-full-control canned ACL. +const ( + // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" + + // ObjectOwnershipObjectWriter is a ObjectOwnership enum value + ObjectOwnershipObjectWriter = "ObjectWriter" +) + +// ObjectOwnership_Values returns all elements of the ObjectOwnership enum +func ObjectOwnership_Values() []string { + return []string{ + ObjectOwnershipBucketOwnerPreferred, + ObjectOwnershipObjectWriter, + } +} + const ( // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" @@ -30499,18 +34126,49 @@ const ( // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" + + // ObjectStorageClassOutposts is a ObjectStorageClass enum value + ObjectStorageClassOutposts = "OUTPOSTS" ) +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + ObjectStorageClassOutposts, + } +} + const ( // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value ObjectVersionStorageClassStandard = "STANDARD" ) +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + const ( // OwnerOverrideDestination is a OwnerOverride enum value OwnerOverrideDestination = "Destination" ) +// OwnerOverride_Values returns all elements of the OwnerOverride enum +func OwnerOverride_Values() []string { + return []string{ + OwnerOverrideDestination, + } +} + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -30519,6 +34177,14 @@ const ( PayerBucketOwner = "BucketOwner" ) +// Payer_Values returns all elements of the Payer enum +func Payer_Values() []string { + return []string{ + PayerRequester, + PayerBucketOwner, + } +} + const ( // PermissionFullControl is a Permission enum value PermissionFullControl = "FULL_CONTROL" @@ -30536,6 +34202,17 @@ const ( PermissionReadAcp = "READ_ACP" ) +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + const ( // ProtocolHttp is a Protocol enum value ProtocolHttp = "http" @@ -30544,6 +34221,14 @@ const ( ProtocolHttps = "https" ) +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + const ( // QuoteFieldsAlways is a QuoteFields enum value QuoteFieldsAlways = "ALWAYS" @@ -30552,6 +34237,14 @@ const ( QuoteFieldsAsneeded = "ASNEEDED" ) +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" @@ -30560,6 +34253,14 @@ const ( ReplicationRuleStatusDisabled = "Disabled" ) +// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum +func ReplicationRuleStatus_Values() []string { + return []string{ + ReplicationRuleStatusEnabled, + ReplicationRuleStatusDisabled, + } +} + const ( // ReplicationStatusComplete is a ReplicationStatus enum value ReplicationStatusComplete = "COMPLETE" @@ -30574,6 +34275,16 @@ const ( ReplicationStatusReplica = "REPLICA" ) +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + } +} + const ( // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value ReplicationTimeStatusEnabled = "Enabled" @@ -30582,6 +34293,14 @@ const ( ReplicationTimeStatusDisabled = "Disabled" ) +// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum +func ReplicationTimeStatus_Values() []string { + return []string{ + ReplicationTimeStatusEnabled, + ReplicationTimeStatusDisabled, + } +} + // If present, indicates that the requester was successfully charged for the // request. const ( @@ -30589,6 +34308,13 @@ const ( RequestChargedRequester = "requester" ) +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + // Confirms that the requester knows that they will be charged for the request. // Bucket owners need not specify this parameter in their requests. For information // about downloading objects from requester pays buckets, see Downloading Objects @@ -30599,11 +34325,25 @@ const ( RequestPayerRequester = "requester" ) +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + const ( // RestoreRequestTypeSelect is a RestoreRequestType enum value RestoreRequestTypeSelect = "SELECT" ) +// RestoreRequestType_Values returns all elements of the RestoreRequestType enum +func RestoreRequestType_Values() []string { + return []string{ + RestoreRequestTypeSelect, + } +} + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -30612,6 +34352,14 @@ const ( ServerSideEncryptionAwsKms = "aws:kms" ) +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + } +} + const ( // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value SseKmsEncryptedObjectsStatusEnabled = "Enabled" @@ -30620,6 +34368,14 @@ const ( SseKmsEncryptedObjectsStatusDisabled = "Disabled" ) +// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum +func SseKmsEncryptedObjectsStatus_Values() []string { + return []string{ + SseKmsEncryptedObjectsStatusEnabled, + SseKmsEncryptedObjectsStatusDisabled, + } +} + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" @@ -30641,13 +34397,37 @@ const ( // StorageClassDeepArchive is a StorageClass enum value StorageClassDeepArchive = "DEEP_ARCHIVE" + + // StorageClassOutposts is a StorageClass enum value + StorageClassOutposts = "OUTPOSTS" ) +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassDeepArchive, + StorageClassOutposts, + } +} + const ( // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value StorageClassAnalysisSchemaVersionV1 = "V_1" ) +// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum +func StorageClassAnalysisSchemaVersion_Values() []string { + return []string{ + StorageClassAnalysisSchemaVersionV1, + } +} + const ( // TaggingDirectiveCopy is a TaggingDirective enum value TaggingDirectiveCopy = "COPY" @@ -30656,6 +34436,14 @@ const ( TaggingDirectiveReplace = "REPLACE" ) +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + const ( // TierStandard is a Tier enum value TierStandard = "Standard" @@ -30667,6 +34455,15 @@ const ( TierExpedited = "Expedited" ) +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierStandard, + TierBulk, + TierExpedited, + } +} + const ( // TransitionStorageClassGlacier is a TransitionStorageClass enum value TransitionStorageClassGlacier = "GLACIER" @@ -30684,6 +34481,17 @@ const ( TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" ) +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + } +} + const ( // TypeCanonicalUser is a Type enum value TypeCanonicalUser = "CanonicalUser" @@ -30694,3 +34502,12 @@ const ( // TypeGroup is a Type enum value TypeGroup = "Group" ) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go index 5c8ce5cc8..407f06b6e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/sdkio" ) const ( @@ -25,30 +24,6 @@ const ( appendMD5TxEncoding = "append-md5" ) -// contentMD5 computes and sets the HTTP Content-MD5 header for requests that -// require it. -func contentMD5(r *request.Request) { - h := md5.New() - - if !aws.IsReaderSeekable(r.Body) { - if r.Config.Logger != nil { - r.Config.Logger.Log(fmt.Sprintf( - "Unable to compute Content-MD5 for unseekable body, S3.%s", - r.Operation.Name)) - } - return - } - - if _, err := copySeekableBody(h, r.Body); err != nil { - r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) - return - } - - // encode the md5 checksum in base64 and set the request header. - v := base64.StdEncoding.EncodeToString(h.Sum(nil)) - r.HTTPRequest.Header.Set(contentMD5Header, v) -} - // computeBodyHashes will add Content MD5 and Content Sha256 hashes to the // request. If the body is not seekable or S3DisableContentMD5Validation set // this handler will be ignored. @@ -90,7 +65,7 @@ func computeBodyHashes(r *request.Request) { dst = io.MultiWriter(hashers...) } - if _, err := copySeekableBody(dst, r.Body); err != nil { + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) return } @@ -119,28 +94,6 @@ const ( sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen ) -func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { - curPos, err := src.Seek(0, sdkio.SeekCurrent) - if err != nil { - return 0, err - } - - // hash the body. seek back to the first position after reading to reset - // the body for transmission. copy errors may be assumed to be from the - // body. - n, err := io.Copy(dst, src) - if err != nil { - return n, err - } - - _, err = src.Seek(curPos, sdkio.SeekStart) - if err != nil { - return n, err - } - - return n, nil -} - // Adds the x-amz-te: append_md5 header to the request. This requests the service // responds with a trailing MD5 checksum. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 036d0b2e0..f1959b03a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -3,8 +3,8 @@ package s3 import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/internal/s3err" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" ) func init() { @@ -33,12 +33,6 @@ func defaultInitRequestFn(r *request.Request) { platformRequestHandlers(r) switch r.Operation.Name { - case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, - opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, - opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, - opPutBucketReplication: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) case opGetBucketLocation: // GetBucketLocation has custom parsing logic r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) @@ -75,6 +69,8 @@ type copySourceSSECustomerKeyGetter interface { getCopySourceSSECustomerKey() string } +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. type endpointARNGetter interface { getEndpointARN() (arn.Resource, error) hasEndpointARN() bool diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go index 4b65f7153..7f7aca208 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -104,19 +104,6 @@ // content from S3. The Encryption and Decryption clients can be used concurrently // once the client is created. // -// sess := session.Must(session.NewSession()) -// -// // Create the decryption client. -// svc := s3crypto.NewDecryptionClient(sess) -// -// // The object will be downloaded from S3 and decrypted locally. By metadata -// // about the object's encryption will instruct the decryption client how -// // decrypt the content of the object. By default KMS is used for keys. -// result, err := svc.GetObject(&s3.GetObjectInput { -// Bucket: aws.String(myBucket), -// Key: aws.String(myKey), -// }) -// // See the s3crypto package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ // diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go index c4048fbfb..403aebb68 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -6,11 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws" awsarn "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" ) // Used by shapes with members decorated as endpoint ARN. @@ -22,12 +20,66 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { resParts := arn.SplitResource(a.Resource) switch resParts[0] { case "accesspoint": + if a.Service != "s3" { + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"} + } return arn.ParseAccessPointResource(a, resParts[1:]) + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) default: return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} } } +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + func endpointHandler(req *request.Request) { endpoint, ok := req.Params.(endpointARNGetter) if !ok || !endpoint.hasEndpointARN() { @@ -37,29 +89,29 @@ func endpointHandler(req *request.Request) { resource, err := endpoint.getEndpointARN() if err != nil { - req.Error = newInvalidARNError(nil, err) + req.Error = s3shared.NewInvalidARNError(nil, err) return } - resReq := resourceRequest{ + resReq := s3shared.ResourceRequest{ Resource: resource, Request: req, } if resReq.IsCrossPartition() { - req.Error = newClientPartitionMismatchError(resource, + req.Error = s3shared.NewClientPartitionMismatchError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) return } if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { - req.Error = newClientRegionMismatchError(resource, + req.Error = s3shared.NewClientRegionMismatchError(resource, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) return } if resReq.HasCustomEndpoint() { - req.Error = newInvalidARNWithCustomEndpointError(resource, nil) + req.Error = s3shared.NewInvalidARNWithCustomEndpointError(resource, nil) return } @@ -69,47 +121,22 @@ func endpointHandler(req *request.Request) { if err != nil { req.Error = err } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if resReq.ResourceConfiguredForFIPS() { + req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } default: - req.Error = newInvalidARNError(resource, nil) + req.Error = s3shared.NewInvalidARNError(resource, nil) } } -type resourceRequest struct { - Resource arn.Resource - Request *request.Request -} - -func (r resourceRequest) ARN() awsarn.ARN { - return r.Resource.GetARN() -} - -func (r resourceRequest) AllowCrossRegion() bool { - return aws.BoolValue(r.Request.Config.S3UseARNRegion) -} - -func (r resourceRequest) UseFIPS() bool { - return isFIPS(aws.StringValue(r.Request.Config.Region)) -} - -func (r resourceRequest) IsCrossPartition() bool { - return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition -} - -func (r resourceRequest) IsCrossRegion() bool { - return isCrossRegion(r.Request, r.Resource.GetARN().Region) -} - -func (r resourceRequest) HasCustomEndpoint() bool { - return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 -} - -func isFIPS(clientRegion string) bool { - return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips") -} -func isCrossRegion(req *request.Request, otherRegion string) bool { - return req.ClientInfo.SigningRegion != otherRegion -} - func updateBucketEndpointFromParams(r *request.Request) { bucket, ok := bucketNameFromReqParams(r.Params) if !ok { @@ -124,7 +151,7 @@ func updateBucketEndpointFromParams(r *request.Request) { func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { // Accelerate not supported if aws.BoolValue(req.Config.S3UseAccelerate) { - return newClientConfiguredForAccelerateError(accessPoint, + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } @@ -132,7 +159,7 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce // are not supported. req.Config.DisableEndpointHostPrefix = aws.Bool(false) - if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil { + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { return err } @@ -141,93 +168,34 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce return nil } -func removeBucketFromPath(u *url.URL) { - u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) - if u.Path == "" { - u.Path = "/" +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } -} - -type accessPointEndpointBuilder arn.AccessPointARN - -const ( - accessPointPrefixLabel = "accesspoint" - accountIDPrefixLabel = "accountID" - accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." -) -func (a accessPointEndpointBuilder) Build(req *request.Request) error { - resolveRegion := arn.AccessPointARN(a).Region - cfgRegion := aws.StringValue(req.Config.Region) - - if isFIPS(cfgRegion) { - if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) { - // FIPS with cross region is not supported, the SDK must fail - // because there is no well defined method for SDK to construct a - // correct FIPS endpoint. - return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, nil) - } - resolveRegion = cfgRegion + // Dualstack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) } - endpoint, err := resolveRegionalEndpoint(req, resolveRegion) - if err != nil { - return newFailedToResolveEndpointError(arn.AccessPointARN(a), - req.ClientInfo.PartitionID, cfgRegion, err) - } + // Ignore the disable host prefix for access points since custom endpoints + // are not supported. + req.Config.DisableEndpointHostPrefix = aws.Bool(false) - if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { return err } - const serviceEndpointLabel = "s3-accesspoint" - - // dualstack provided by endpoint resolver - cfgHost := req.HTTPRequest.URL.Host - if strings.HasPrefix(cfgHost, "s3") { - req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] - } - - protocol.HostPrefixBuilder{ - Prefix: accesPointPrefixTemplate, - LabelsFn: a.hostPrefixLabelValues, - }.Build(req) - - req.ClientInfo.SigningName = endpoint.SigningName - req.ClientInfo.SigningRegion = endpoint.SigningRegion - - err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) - if err != nil { - return newInvalidARNError(arn.AccessPointARN(a), err) - } - + removeBucketFromPath(req.HTTPRequest.URL) return nil } -func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { - return map[string]string{ - accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, - accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, - } -} - -func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) { - return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) { - opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) - opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) - opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint - }) -} - -func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { - endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) - - r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) - if err != nil { - return awserr.New(request.ErrCodeSerialization, - "failed to parse endpoint URL", err) +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" } - - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 000000000..c1c77da9a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,177 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +// +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) { + if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) { + // FIPS with cross region is not supported, the SDK must fail + // because there is no well defined method for SDK to construct a + // correct FIPS endpoint. + return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, nil) + } + resolveRegion = cfgRegion + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + const serviceEndpointLabel = "s3-accesspoint" + + // dual stack provided by endpoint resolver + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, "s3") { + req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:] + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +// +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == "s3-outposts" { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // add url host as s3-outposts + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, endpointsID) { + req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go deleted file mode 100644 index 9df03e78d..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_errors.go +++ /dev/null @@ -1,151 +0,0 @@ -package s3 - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3/internal/arn" -) - -const ( - invalidARNErrorErrCode = "InvalidARNError" - configurationErrorErrCode = "ConfigurationError" -) - -type invalidARNError struct { - message string - resource arn.Resource - origErr error -} - -func (e invalidARNError) Error() string { - var extra string - if e.resource != nil { - extra = "ARN: " + e.resource.String() - } - return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) -} - -func (e invalidARNError) Code() string { - return invalidARNErrorErrCode -} - -func (e invalidARNError) Message() string { - return e.message -} - -func (e invalidARNError) OrigErr() error { - return e.origErr -} - -func newInvalidARNError(resource arn.Resource, err error) invalidARNError { - return invalidARNError{ - message: "invalid ARN", - origErr: err, - resource: resource, - } -} - -func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError { - return invalidARNError{ - message: "resource ARN not supported with custom client endpoints", - origErr: err, - resource: resource, - } -} - -// ARN not supported for the target partition -func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError { - return invalidARNError{ - message: "resource ARN not supported for the target ARN partition", - origErr: err, - resource: resource, - } -} - -type configurationError struct { - message string - resource arn.Resource - clientPartitionID string - clientRegion string - origErr error -} - -func (e configurationError) Error() string { - extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", - e.resource, e.clientPartitionID, e.clientRegion) - - return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) -} - -func (e configurationError) Code() string { - return configurationErrorErrCode -} - -func (e configurationError) Message() string { - return e.message -} - -func (e configurationError) OrigErr() error { - return e.origErr -} - -func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client partition does not match provided ARN partition", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client region does not match provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "endpoint resolver failed to find an endpoint for the provided ARN region", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client configured for fips but cross-region resource ARN provided", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client configured for S3 Accelerate but is supported with resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} - -func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError { - return configurationError{ - message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", - origErr: err, - resource: resource, - clientPartitionID: clientPartitionID, - clientRegion: clientRegion, - } -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index 49aeff16f..dd73d460c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -8,7 +8,7 @@ const ( // "BucketAlreadyExists". // // The requested bucket name is not available. The bucket namespace is shared - // by all users of the system. Please select a different name and try again. + // by all users of the system. Select a different name and try again. ErrCodeBucketAlreadyExists = "BucketAlreadyExists" // ErrCodeBucketAlreadyOwnedByYou for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 7de13789d..b4c07b4d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index b71c835de..57a0bd92c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -69,7 +69,7 @@ func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { return } - // In backwards compatiable, the header's value is not base64 encoded, + // In backwards compatible, the header's value is not base64 encoded, // and needs to be encoded and updated by the SDK's customizations. b64Key := base64.StdEncoding.EncodeToString([]byte(key)) r.Header.Set(keyHeader, b64Key) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index f6a69aed1..247770e4c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -2,6 +2,7 @@ package s3 import ( "bytes" + "io" "io/ioutil" "net/http" @@ -24,17 +25,18 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { r.HTTPResponse.Body = ioutil.NopCloser(body) defer body.Seek(0, sdkio.SeekStart) - if body.Len() == 0 { - // If there is no body don't attempt to parse the body. - return - } - unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == request.ErrCodeSerialization { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { r.Error = nil return } - r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 5b63fac72..6eecf6691 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -1,6 +1,7 @@ package s3 import ( + "bytes" "encoding/xml" "fmt" "io" @@ -45,17 +46,24 @@ func unmarshalError(r *request.Request) { // Attempt to parse error from body if it is known var errResp xmlErrorResponse - err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) - if err == io.EOF { - // Only capture the error if an unmarshal error occurs that is not EOF, - // because S3 might send an error without a error message which causes - // the XML unmarshal to fail with EOF. - err = nil + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) } + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + r.Error = awserr.NewRequestFailure( awserr.New(request.ErrCodeSerialization, - "failed to unmarshal error message", err), + errorMsg, err), r.HTTPResponse.StatusCode, r.RequestID, ) @@ -86,3 +94,21 @@ type RequestFailure interface { // Host ID is the S3 Host ID needed for debug, and contacting support HostID() string } + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go index fa5f473c8..f2c525aa0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/api.go @@ -9,6 +9,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -52,7 +54,9 @@ func (c *S3Control) CreateAccessPointRequest(input *CreateAccessPointInput) (req output = &CreateAccessPointOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return @@ -60,7 +64,41 @@ func (c *S3Control) CreateAccessPointRequest(input *CreateAccessPointInput) (req // CreateAccessPoint API operation for AWS S3 Control. // -// Creates an access point and associates it with the specified bucket. +// Creates an access point and associates it with the specified bucket. For +// more information, see Managing Data Access with Amazon S3 Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Using this action with Amazon S3 on Outposts +// +// This action: +// +// * Requires a virtual private cloud (VPC) configuration as S3 on Outposts +// only supports VPC style access points. +// +// * Does not support ACL on S3 on Outposts buckets. +// +// * Does not support Public Access on S3 on Outposts buckets. +// +// * Does not support object lock for S3 on Outposts buckets. +// +// For more information, see Using Amazon S3 on Outposts (AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide . +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateAccessPoint.html#API_control_CreateAccessPoint_Examples) +// section below. +// +// The following actions are related to CreateAccessPoint: +// +// * GetAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) +// +// * DeleteAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPoint.html) +// +// * ListAccessPoints (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_ListAccessPoints.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -90,6 +128,132 @@ func (c *S3Control) CreateAccessPointWithContext(ctx aws.Context, input *CreateA return out, req.Send() } +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/CreateBucket +func (c *S3Control) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/bucket/{name}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// CreateBucket API operation for AWS S3 Control. +// +// +// This API operation creates an Amazon S3 on Outposts bucket. To create an +// S3 bucket, see Create Bucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// in the Amazon Simple Storage Service API. +// +// Creates a new Outposts bucket. By creating the bucket, you become the bucket +// owner. To create an Outposts bucket, you must have S3 on Outposts. For more +// information, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in Amazon Simple Storage Service Developer Guide. +// +// Not every string is an acceptable bucket name. For information on bucket +// naming restrictions, see Working with Amazon S3 Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules). +// +// S3 on Outposts buckets do not support +// +// * ACLs. Instead, configure access point policies to manage access to buckets. +// +// * Public access. +// +// * Object Lock +// +// * Bucket Location constraint +// +// For an example of the request syntax for Amazon S3 on Outposts that uses +// the S3 on Outposts endpoint hostname prefix and outpost-id in your API request, +// see the Example (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateBucket.html#API_control_CreateBucket_Examples) +// section below. +// +// The following actions are related to CreateBucket for Amazon S3 on Outposts: +// +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// * GetBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucket.html) +// +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html) +// +// * CreateAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateAccessPoint.html) +// +// * PutAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutAccessPointPolicy.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested Outposts bucket name is not available. The bucket namespace +// is shared by all users of the AWS Outposts in this Region. Select a different +// name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The Outposts bucket you tried to create already exists, and you own it. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/CreateBucket +func (c *S3Control) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateJob = "CreateJob" // CreateJobRequest generates a "aws/request.Request" representing the @@ -136,7 +300,23 @@ func (c *S3Control) CreateJobRequest(input *CreateJobInput) (req *request.Reques // CreateJob API operation for AWS S3 Control. // -// Creates an Amazon S3 batch operations job. +// S3 Batch Operations performs large-scale Batch Operations on Amazon S3 objects. +// Batch Operations can run a single operation or action on lists of Amazon +// S3 objects that you specify. For more information, see S3 Batch Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) in +// the Amazon Simple Storage Service Developer Guide. +// +// This operation creates a S3 Batch Operations job. +// +// Related actions include: +// +// * DescribeJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) +// +// * ListJobs (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) +// +// * UpdateJobPriority (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) +// +// * UpdateJobStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -215,6 +395,9 @@ func (c *S3Control) DeleteAccessPointRequest(input *DeleteAccessPointInput) (req output = &DeleteAccessPointOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) @@ -225,6 +408,22 @@ func (c *S3Control) DeleteAccessPointRequest(input *DeleteAccessPointInput) (req // // Deletes the specified access point. // +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the ARN, +// see the Example (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPoint.html#API_control_DeleteAccessPoint_Examples) +// section below. +// +// The following actions are related to DeleteAccessPoint: +// +// * CreateAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) +// +// * GetAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) +// +// * ListAccessPoints (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -302,6 +501,20 @@ func (c *S3Control) DeleteAccessPointPolicyRequest(input *DeleteAccessPointPolic // // Deletes the access point policy for the specified access point. // +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteAccessPointPolicy.html#API_control_DeleteAccessPointPolicy_Examples) +// section below. +// +// The following actions are related to DeleteAccessPointPolicy: +// +// * PutAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) +// +// * GetAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html) +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -330,1226 +543,5634 @@ func (c *S3Control) DeleteAccessPointPolicyWithContext(ctx aws.Context, input *D return out, req.Send() } -const opDeletePublicAccessBlock = "DeletePublicAccessBlock" +const opDeleteBucket = "DeleteBucket" -// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the DeletePublicAccessBlock operation. The "output" return +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// See DeleteBucket for more information on using the DeleteBucket // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DeletePublicAccessBlockRequest method. -// req, resp := client.DeletePublicAccessBlockRequest(params) +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeletePublicAccessBlock -func (c *S3Control) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucket +func (c *S3Control) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ - Name: opDeletePublicAccessBlock, + Name: opDeleteBucket, HTTPMethod: "DELETE", - HTTPPath: "/v20180820/configuration/publicAccessBlock", + HTTPPath: "/v20180820/bucket/{name}", } if input == nil { - input = &DeletePublicAccessBlockInput{} + input = &DeleteBucketInput{} } - output = &DeletePublicAccessBlockOutput{} + output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// DeletePublicAccessBlock API operation for AWS S3 Control. +// DeleteBucket API operation for AWS S3 Control. +// +// +// This API operation deletes an Amazon S3 on Outposts bucket. To delete an +// S3 bucket, see DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// in the Amazon Simple Storage Service API. +// +// Deletes the Amazon S3 on Outposts bucket. All objects (including all object +// versions and delete markers) in the bucket must be deleted before the bucket +// itself can be deleted. For more information, see Using Amazon S3 on Outposts +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) in Amazon +// Simple Storage Service Developer Guide. +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html#API_control_DeleteBucket_Examples) +// section below. +// +// Related Resources +// +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) +// +// * GetBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucket.html) // -// Removes the PublicAccessBlock configuration for an Amazon Web Services account. +// * DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation DeletePublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeletePublicAccessBlock -func (c *S3Control) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { - req, out := c.DeletePublicAccessBlockRequest(input) +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucket +func (c *S3Control) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) return out, req.Send() } -// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// DeleteBucketWithContext is the same as DeleteBucket with the addition of // the ability to pass a context and additional request options. // -// See DeletePublicAccessBlock for details on how to use this API operation. +// See DeleteBucket for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { - req, out := c.DeletePublicAccessBlockRequest(input) +func (c *S3Control) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeJob = "DescribeJob" +const opDeleteBucketLifecycleConfiguration = "DeleteBucketLifecycleConfiguration" -// DescribeJobRequest generates a "aws/request.Request" representing the -// client's request for the DescribeJob operation. The "output" return +// DeleteBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeJob for more information on using the DescribeJob +// See DeleteBucketLifecycleConfiguration for more information on using the DeleteBucketLifecycleConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeJobRequest method. -// req, resp := client.DescribeJobRequest(params) +// // Example sending a request using the DeleteBucketLifecycleConfigurationRequest method. +// req, resp := client.DeleteBucketLifecycleConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DescribeJob -func (c *S3Control) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *DescribeJobOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketLifecycleConfiguration +func (c *S3Control) DeleteBucketLifecycleConfigurationRequest(input *DeleteBucketLifecycleConfigurationInput) (req *request.Request, output *DeleteBucketLifecycleConfigurationOutput) { op := &request.Operation{ - Name: opDescribeJob, - HTTPMethod: "GET", - HTTPPath: "/v20180820/jobs/{id}", + Name: opDeleteBucketLifecycleConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/v20180820/bucket/{name}/lifecycleconfiguration", } if input == nil { - input = &DescribeJobInput{} + input = &DeleteBucketLifecycleConfigurationInput{} } - output = &DescribeJobOutput{} + output = &DeleteBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// DescribeJob API operation for AWS S3 Control. +// DeleteBucketLifecycleConfiguration API operation for AWS S3 Control. // -// Retrieves the configuration parameters and status for a batch operations -// job. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// This API action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. +// To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// in the Amazon Simple Storage Service API. // -// See the AWS API reference guide for AWS S3 Control's -// API operation DescribeJob for usage and error information. +// Deletes the lifecycle configuration from the specified Outposts bucket. Amazon +// S3 on Outposts removes all the lifecycle configuration rules in the lifecycle +// subresource associated with the bucket. Your objects never expire, and Amazon +// S3 on Outposts no longer automatically deletes any objects on the basis of +// rules contained in the deleted lifecycle configuration. For more information, +// see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in Amazon Simple Storage Service Developer Guide. // -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" +// To use this operation, you must have permission to perform the s3outposts:DeleteLifecycleConfiguration +// action. By default, the bucket owner has this permission and the Outposts +// bucket owner can grant this permission to others. // -// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketLifecycleConfiguration.html#API_control_DeleteBucketLifecycleConfiguration_Examples) +// section below. // -// * ErrCodeNotFoundException "NotFoundException" +// For more information about object expiration, see Elements to Describe Lifecycle +// Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). // -// * ErrCodeInternalServiceException "InternalServiceException" +// Related actions include: // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DescribeJob -func (c *S3Control) DescribeJob(input *DescribeJobInput) (*DescribeJobOutput, error) { - req, out := c.DescribeJobRequest(input) +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation DeleteBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketLifecycleConfiguration +func (c *S3Control) DeleteBucketLifecycleConfiguration(input *DeleteBucketLifecycleConfigurationInput) (*DeleteBucketLifecycleConfigurationOutput, error) { + req, out := c.DeleteBucketLifecycleConfigurationRequest(input) return out, req.Send() } -// DescribeJobWithContext is the same as DescribeJob with the addition of +// DeleteBucketLifecycleConfigurationWithContext is the same as DeleteBucketLifecycleConfiguration with the addition of // the ability to pass a context and additional request options. // -// See DescribeJob for details on how to use this API operation. +// See DeleteBucketLifecycleConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) DescribeJobWithContext(ctx aws.Context, input *DescribeJobInput, opts ...request.Option) (*DescribeJobOutput, error) { - req, out := c.DescribeJobRequest(input) +func (c *S3Control) DeleteBucketLifecycleConfigurationWithContext(ctx aws.Context, input *DeleteBucketLifecycleConfigurationInput, opts ...request.Option) (*DeleteBucketLifecycleConfigurationOutput, error) { + req, out := c.DeleteBucketLifecycleConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAccessPoint = "GetAccessPoint" +const opDeleteBucketPolicy = "DeleteBucketPolicy" -// GetAccessPointRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessPoint operation. The "output" return +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAccessPoint for more information on using the GetAccessPoint +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAccessPointRequest method. -// req, resp := client.GetAccessPointRequest(params) +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPoint -func (c *S3Control) GetAccessPointRequest(input *GetAccessPointInput) (req *request.Request, output *GetAccessPointOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketPolicy +func (c *S3Control) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ - Name: opGetAccessPoint, - HTTPMethod: "GET", - HTTPPath: "/v20180820/accesspoint/{name}", + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/v20180820/bucket/{name}/policy", } if input == nil { - input = &GetAccessPointInput{} + input = &DeleteBucketPolicyInput{} } - output = &GetAccessPointOutput{} + output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// GetAccessPoint API operation for AWS S3 Control. +// DeleteBucketPolicy API operation for AWS S3 Control. // -// Returns configuration information about the specified access point. +// +// This API operation deletes an Amazon S3 on Outposts bucket policy. To delete +// an S3 bucket policy, see DeleteBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketPolicy.html) +// in the Amazon Simple Storage Service API. +// +// This implementation of the DELETE operation uses the policy subresource to +// delete the policy of a specified Amazon S3 on Outposts bucket. If you are +// using an identity other than the root user of the AWS account that owns the +// bucket, the calling identity must have the s3outposts:DeleteBucketPolicy +// permissions on the specified Outposts bucket and belong to the bucket owner's +// account to use this operation. For more information, see Using Amazon S3 +// on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in Amazon Simple Storage Service Developer Guide. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketPolicy.html#API_control_DeleteBucketPolicy_Examples) +// section below. +// +// The following actions are related to DeleteBucketPolicy: +// +// * GetBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html) +// +// * PutBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketPolicy.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation GetAccessPoint for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPoint -func (c *S3Control) GetAccessPoint(input *GetAccessPointInput) (*GetAccessPointOutput, error) { - req, out := c.GetAccessPointRequest(input) +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketPolicy +func (c *S3Control) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) return out, req.Send() } -// GetAccessPointWithContext is the same as GetAccessPoint with the addition of +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of // the ability to pass a context and additional request options. // -// See GetAccessPoint for details on how to use this API operation. +// See DeleteBucketPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) GetAccessPointWithContext(ctx aws.Context, input *GetAccessPointInput, opts ...request.Option) (*GetAccessPointOutput, error) { - req, out := c.GetAccessPointRequest(input) +func (c *S3Control) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAccessPointPolicy = "GetAccessPointPolicy" +const opDeleteBucketTagging = "DeleteBucketTagging" -// GetAccessPointPolicyRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessPointPolicy operation. The "output" return +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAccessPointPolicy for more information on using the GetAccessPointPolicy +// See DeleteBucketTagging for more information on using the DeleteBucketTagging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAccessPointPolicyRequest method. -// req, resp := client.GetAccessPointPolicyRequest(params) +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicy -func (c *S3Control) GetAccessPointPolicyRequest(input *GetAccessPointPolicyInput) (req *request.Request, output *GetAccessPointPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketTagging +func (c *S3Control) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ - Name: opGetAccessPointPolicy, - HTTPMethod: "GET", - HTTPPath: "/v20180820/accesspoint/{name}/policy", + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/v20180820/bucket/{name}/tagging", } if input == nil { - input = &GetAccessPointPolicyInput{} + input = &DeleteBucketTaggingInput{} } - output = &GetAccessPointPolicyOutput{} + output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// GetAccessPointPolicy API operation for AWS S3 Control. +// DeleteBucketTagging API operation for AWS S3 Control. // -// Returns the access point policy associated with the specified access point. +// +// This API operation deletes an Amazon S3 on Outposts bucket's tags. To delete +// an S3 bucket tags, see DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// in the Amazon Simple Storage Service API. +// +// Deletes the tags from the Outposts bucket. For more information, see Using +// Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permission to perform the PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucketTagging.html#API_control_DeleteBucketTagging_Examples) +// section below. +// +// The following actions are related to DeleteBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html) +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation GetAccessPointPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicy -func (c *S3Control) GetAccessPointPolicy(input *GetAccessPointPolicyInput) (*GetAccessPointPolicyOutput, error) { - req, out := c.GetAccessPointPolicyRequest(input) +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteBucketTagging +func (c *S3Control) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) return out, req.Send() } -// GetAccessPointPolicyWithContext is the same as GetAccessPointPolicy with the addition of +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of // the ability to pass a context and additional request options. // -// See GetAccessPointPolicy for details on how to use this API operation. +// See DeleteBucketTagging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) GetAccessPointPolicyWithContext(ctx aws.Context, input *GetAccessPointPolicyInput, opts ...request.Option) (*GetAccessPointPolicyOutput, error) { - req, out := c.GetAccessPointPolicyRequest(input) +func (c *S3Control) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetAccessPointPolicyStatus = "GetAccessPointPolicyStatus" +const opDeleteJobTagging = "DeleteJobTagging" -// GetAccessPointPolicyStatusRequest generates a "aws/request.Request" representing the -// client's request for the GetAccessPointPolicyStatus operation. The "output" return +// DeleteJobTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteJobTagging operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetAccessPointPolicyStatus for more information on using the GetAccessPointPolicyStatus +// See DeleteJobTagging for more information on using the DeleteJobTagging // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetAccessPointPolicyStatusRequest method. -// req, resp := client.GetAccessPointPolicyStatusRequest(params) +// // Example sending a request using the DeleteJobTaggingRequest method. +// req, resp := client.DeleteJobTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyStatus -func (c *S3Control) GetAccessPointPolicyStatusRequest(input *GetAccessPointPolicyStatusInput) (req *request.Request, output *GetAccessPointPolicyStatusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteJobTagging +func (c *S3Control) DeleteJobTaggingRequest(input *DeleteJobTaggingInput) (req *request.Request, output *DeleteJobTaggingOutput) { op := &request.Operation{ - Name: opGetAccessPointPolicyStatus, - HTTPMethod: "GET", - HTTPPath: "/v20180820/accesspoint/{name}/policyStatus", + Name: opDeleteJobTagging, + HTTPMethod: "DELETE", + HTTPPath: "/v20180820/jobs/{id}/tagging", } if input == nil { - input = &GetAccessPointPolicyStatusInput{} + input = &DeleteJobTaggingInput{} } - output = &GetAccessPointPolicyStatusOutput{} + output = &DeleteJobTaggingOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// GetAccessPointPolicyStatus API operation for AWS S3 Control. +// DeleteJobTagging API operation for AWS S3 Control. // -// Indicates whether the specified access point currently has a policy that -// allows public access. For more information about public access through access -// points, see Managing Data Access with Amazon S3 Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) +// Removes the entire tag set from the specified S3 Batch Operations job. To +// use this operation, you must have permission to perform the s3:DeleteJobTagging +// action. For more information, see Controlling access and labeling jobs using +// tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags) // in the Amazon Simple Storage Service Developer Guide. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// Related actions include: +// +// * CreateJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * GetJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html) +// +// * PutJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation GetAccessPointPolicyStatus for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyStatus -func (c *S3Control) GetAccessPointPolicyStatus(input *GetAccessPointPolicyStatusInput) (*GetAccessPointPolicyStatusOutput, error) { - req, out := c.GetAccessPointPolicyStatusRequest(input) +// API operation DeleteJobTagging for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeleteJobTagging +func (c *S3Control) DeleteJobTagging(input *DeleteJobTaggingInput) (*DeleteJobTaggingOutput, error) { + req, out := c.DeleteJobTaggingRequest(input) return out, req.Send() } -// GetAccessPointPolicyStatusWithContext is the same as GetAccessPointPolicyStatus with the addition of +// DeleteJobTaggingWithContext is the same as DeleteJobTagging with the addition of // the ability to pass a context and additional request options. // -// See GetAccessPointPolicyStatus for details on how to use this API operation. +// See DeleteJobTagging for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) GetAccessPointPolicyStatusWithContext(ctx aws.Context, input *GetAccessPointPolicyStatusInput, opts ...request.Option) (*GetAccessPointPolicyStatusOutput, error) { - req, out := c.GetAccessPointPolicyStatusRequest(input) +func (c *S3Control) DeleteJobTaggingWithContext(ctx aws.Context, input *DeleteJobTaggingInput, opts ...request.Option) (*DeleteJobTaggingOutput, error) { + req, out := c.DeleteJobTaggingRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetPublicAccessBlock = "GetPublicAccessBlock" +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" -// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the GetPublicAccessBlock operation. The "output" return +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetPublicAccessBlockRequest method. -// req, resp := client.GetPublicAccessBlockRequest(params) +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetPublicAccessBlock -func (c *S3Control) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeletePublicAccessBlock +func (c *S3Control) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { op := &request.Operation{ - Name: opGetPublicAccessBlock, - HTTPMethod: "GET", + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", HTTPPath: "/v20180820/configuration/publicAccessBlock", } if input == nil { - input = &GetPublicAccessBlockInput{} + input = &DeletePublicAccessBlockInput{} } - output = &GetPublicAccessBlockOutput{} + output = &DeletePublicAccessBlockOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// GetPublicAccessBlock API operation for AWS S3 Control. +// DeletePublicAccessBlock API operation for AWS S3 Control. +// +// Removes the PublicAccessBlock configuration for an AWS account. For more +// information, see Using Amazon S3 block public access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). +// +// Related actions include: // -// Retrieves the PublicAccessBlock configuration for an Amazon Web Services -// account. +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation GetPublicAccessBlock for usage and error information. -// -// Returned Error Codes: -// * ErrCodeNoSuchPublicAccessBlockConfiguration "NoSuchPublicAccessBlockConfiguration" -// Amazon S3 throws this exception if you make a GetPublicAccessBlock request -// against an account that doesn't have a PublicAccessBlockConfiguration set. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetPublicAccessBlock -func (c *S3Control) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { - req, out := c.GetPublicAccessBlockRequest(input) +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DeletePublicAccessBlock +func (c *S3Control) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) return out, req.Send() } -// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of // the ability to pass a context and additional request options. // -// See GetPublicAccessBlock for details on how to use this API operation. +// See DeletePublicAccessBlock for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { - req, out := c.GetPublicAccessBlockRequest(input) +func (c *S3Control) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListAccessPoints = "ListAccessPoints" +const opDescribeJob = "DescribeJob" -// ListAccessPointsRequest generates a "aws/request.Request" representing the -// client's request for the ListAccessPoints operation. The "output" return +// DescribeJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListAccessPoints for more information on using the ListAccessPoints +// See DescribeJob for more information on using the DescribeJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListAccessPointsRequest method. -// req, resp := client.ListAccessPointsRequest(params) +// // Example sending a request using the DescribeJobRequest method. +// req, resp := client.DescribeJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListAccessPoints -func (c *S3Control) ListAccessPointsRequest(input *ListAccessPointsInput) (req *request.Request, output *ListAccessPointsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DescribeJob +func (c *S3Control) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *DescribeJobOutput) { op := &request.Operation{ - Name: opListAccessPoints, + Name: opDescribeJob, HTTPMethod: "GET", - HTTPPath: "/v20180820/accesspoint", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/v20180820/jobs/{id}", } if input == nil { - input = &ListAccessPointsInput{} + input = &DescribeJobInput{} } - output = &ListAccessPointsOutput{} + output = &DescribeJobOutput{} req = c.newRequest(op, input, output) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// ListAccessPoints API operation for AWS S3 Control. +// DescribeJob API operation for AWS S3 Control. // -// Returns a list of the access points currently associated with the specified -// bucket. You can retrieve up to 1000 access points per call. If the specified -// bucket has more than 1000 access points (or the number specified in maxResults, -// whichever is less), then the response will include a continuation token that -// you can use to list the additional access points. +// Retrieves the configuration parameters and status for a Batch Operations +// job. For more information, see S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related actions include: +// +// * CreateJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * ListJobs (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) +// +// * UpdateJobPriority (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) +// +// * UpdateJobStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation ListAccessPoints for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListAccessPoints -func (c *S3Control) ListAccessPoints(input *ListAccessPointsInput) (*ListAccessPointsOutput, error) { - req, out := c.ListAccessPointsRequest(input) +// API operation DescribeJob for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeInternalServiceException "InternalServiceException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/DescribeJob +func (c *S3Control) DescribeJob(input *DescribeJobInput) (*DescribeJobOutput, error) { + req, out := c.DescribeJobRequest(input) return out, req.Send() } -// ListAccessPointsWithContext is the same as ListAccessPoints with the addition of +// DescribeJobWithContext is the same as DescribeJob with the addition of // the ability to pass a context and additional request options. // -// See ListAccessPoints for details on how to use this API operation. +// See DescribeJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) ListAccessPointsWithContext(ctx aws.Context, input *ListAccessPointsInput, opts ...request.Option) (*ListAccessPointsOutput, error) { - req, out := c.ListAccessPointsRequest(input) +func (c *S3Control) DescribeJobWithContext(ctx aws.Context, input *DescribeJobInput, opts ...request.Option) (*DescribeJobOutput, error) { + req, out := c.DescribeJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListAccessPointsPages iterates over the pages of a ListAccessPoints operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListAccessPoints method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListAccessPoints operation. -// pageNum := 0 -// err := client.ListAccessPointsPages(params, -// func(page *s3control.ListAccessPointsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3Control) ListAccessPointsPages(input *ListAccessPointsInput, fn func(*ListAccessPointsOutput, bool) bool) error { - return c.ListAccessPointsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListAccessPointsPagesWithContext same as ListAccessPointsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3Control) ListAccessPointsPagesWithContext(ctx aws.Context, input *ListAccessPointsInput, fn func(*ListAccessPointsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListAccessPointsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListAccessPointsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListAccessPointsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListJobs = "ListJobs" +const opGetAccessPoint = "GetAccessPoint" -// ListJobsRequest generates a "aws/request.Request" representing the -// client's request for the ListJobs operation. The "output" return +// GetAccessPointRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessPoint operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListJobs for more information on using the ListJobs +// See GetAccessPoint for more information on using the GetAccessPoint // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListJobsRequest method. -// req, resp := client.ListJobsRequest(params) +// // Example sending a request using the GetAccessPointRequest method. +// req, resp := client.GetAccessPointRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListJobs -func (c *S3Control) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPoint +func (c *S3Control) GetAccessPointRequest(input *GetAccessPointInput) (req *request.Request, output *GetAccessPointOutput) { op := &request.Operation{ - Name: opListJobs, + Name: opGetAccessPoint, HTTPMethod: "GET", - HTTPPath: "/v20180820/jobs", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, + HTTPPath: "/v20180820/accesspoint/{name}", } if input == nil { - input = &ListJobsInput{} + input = &GetAccessPointInput{} } - output = &ListJobsOutput{} + output = &GetAccessPointOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// ListJobs API operation for AWS S3 Control. +// GetAccessPoint API operation for AWS S3 Control. // -// Lists current jobs and jobs that have ended within the last 30 days for the -// AWS account making the request. +// Returns configuration information about the specified access point. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetAccessPoint.html#API_control_GetAccessPoint_Examples) +// section below. // -// See the AWS API reference guide for AWS S3 Control's -// API operation ListJobs for usage and error information. +// The following actions are related to GetAccessPoint: // -// Returned Error Codes: -// * ErrCodeInvalidRequestException "InvalidRequestException" +// * CreateAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) // -// * ErrCodeInternalServiceException "InternalServiceException" +// * DeleteAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) // -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// * ListAccessPoints (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListAccessPoints.html) // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListJobs -func (c *S3Control) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { - req, out := c.ListJobsRequest(input) +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation GetAccessPoint for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPoint +func (c *S3Control) GetAccessPoint(input *GetAccessPointInput) (*GetAccessPointOutput, error) { + req, out := c.GetAccessPointRequest(input) return out, req.Send() } -// ListJobsWithContext is the same as ListJobs with the addition of +// GetAccessPointWithContext is the same as GetAccessPoint with the addition of // the ability to pass a context and additional request options. // -// See ListJobs for details on how to use this API operation. +// See GetAccessPoint for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { - req, out := c.ListJobsRequest(input) +func (c *S3Control) GetAccessPointWithContext(ctx aws.Context, input *GetAccessPointInput, opts ...request.Option) (*GetAccessPointOutput, error) { + req, out := c.GetAccessPointRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListJobsPages iterates over the pages of a ListJobs operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListJobs method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListJobs operation. -// pageNum := 0 -// err := client.ListJobsPages(params, -// func(page *s3control.ListJobsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *S3Control) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { - return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListJobsPagesWithContext same as ListJobsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *S3Control) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListJobsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListJobsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opPutAccessPointPolicy = "PutAccessPointPolicy" +const opGetAccessPointPolicy = "GetAccessPointPolicy" -// PutAccessPointPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutAccessPointPolicy operation. The "output" return +// GetAccessPointPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessPointPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutAccessPointPolicy for more information on using the PutAccessPointPolicy +// See GetAccessPointPolicy for more information on using the GetAccessPointPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutAccessPointPolicyRequest method. -// req, resp := client.PutAccessPointPolicyRequest(params) +// // Example sending a request using the GetAccessPointPolicyRequest method. +// req, resp := client.GetAccessPointPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointPolicy -func (c *S3Control) PutAccessPointPolicyRequest(input *PutAccessPointPolicyInput) (req *request.Request, output *PutAccessPointPolicyOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicy +func (c *S3Control) GetAccessPointPolicyRequest(input *GetAccessPointPolicyInput) (req *request.Request, output *GetAccessPointPolicyOutput) { op := &request.Operation{ - Name: opPutAccessPointPolicy, - HTTPMethod: "PUT", + Name: opGetAccessPointPolicy, + HTTPMethod: "GET", HTTPPath: "/v20180820/accesspoint/{name}/policy", } if input == nil { - input = &PutAccessPointPolicyInput{} + input = &GetAccessPointPolicyInput{} } - output = &PutAccessPointPolicyOutput{} + output = &GetAccessPointPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// PutAccessPointPolicy API operation for AWS S3 Control. +// GetAccessPointPolicy API operation for AWS S3 Control. // -// Associates an access policy with the specified access point. Each access -// point can have only one policy, so a request made to this API replaces any -// existing policy associated with the specified access point. +// Returns the access point policy associated with the specified access point. +// +// The following actions are related to GetAccessPointPolicy: +// +// * PutAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutAccessPointPolicy.html) +// +// * DeleteAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation PutAccessPointPolicy for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointPolicy -func (c *S3Control) PutAccessPointPolicy(input *PutAccessPointPolicyInput) (*PutAccessPointPolicyOutput, error) { - req, out := c.PutAccessPointPolicyRequest(input) +// API operation GetAccessPointPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicy +func (c *S3Control) GetAccessPointPolicy(input *GetAccessPointPolicyInput) (*GetAccessPointPolicyOutput, error) { + req, out := c.GetAccessPointPolicyRequest(input) return out, req.Send() } -// PutAccessPointPolicyWithContext is the same as PutAccessPointPolicy with the addition of +// GetAccessPointPolicyWithContext is the same as GetAccessPointPolicy with the addition of // the ability to pass a context and additional request options. // -// See PutAccessPointPolicy for details on how to use this API operation. +// See GetAccessPointPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) PutAccessPointPolicyWithContext(ctx aws.Context, input *PutAccessPointPolicyInput, opts ...request.Option) (*PutAccessPointPolicyOutput, error) { - req, out := c.PutAccessPointPolicyRequest(input) +func (c *S3Control) GetAccessPointPolicyWithContext(ctx aws.Context, input *GetAccessPointPolicyInput, opts ...request.Option) (*GetAccessPointPolicyOutput, error) { + req, out := c.GetAccessPointPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutPublicAccessBlock = "PutPublicAccessBlock" +const opGetAccessPointPolicyStatus = "GetAccessPointPolicyStatus" -// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the -// client's request for the PutPublicAccessBlock operation. The "output" return +// GetAccessPointPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessPointPolicyStatus operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// See GetAccessPointPolicyStatus for more information on using the GetAccessPointPolicyStatus // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutPublicAccessBlockRequest method. -// req, resp := client.PutPublicAccessBlockRequest(params) +// // Example sending a request using the GetAccessPointPolicyStatusRequest method. +// req, resp := client.GetAccessPointPolicyStatusRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutPublicAccessBlock -func (c *S3Control) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyStatus +func (c *S3Control) GetAccessPointPolicyStatusRequest(input *GetAccessPointPolicyStatusInput) (req *request.Request, output *GetAccessPointPolicyStatusOutput) { op := &request.Operation{ - Name: opPutPublicAccessBlock, - HTTPMethod: "PUT", - HTTPPath: "/v20180820/configuration/publicAccessBlock", + Name: opGetAccessPointPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/v20180820/accesspoint/{name}/policyStatus", } if input == nil { - input = &PutPublicAccessBlockInput{} + input = &GetAccessPointPolicyStatusInput{} } - output = &PutPublicAccessBlockOutput{} + output = &GetAccessPointPolicyStatusOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// PutPublicAccessBlock API operation for AWS S3 Control. +// GetAccessPointPolicyStatus API operation for AWS S3 Control. // -// Creates or modifies the PublicAccessBlock configuration for an Amazon Web -// Services account. +// Indicates whether the specified access point currently has a policy that +// allows public access. For more information about public access through access +// points, see Managing Data Access with Amazon S3 Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) +// in the Amazon Simple Storage Service Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS S3 Control's -// API operation PutPublicAccessBlock for usage and error information. -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutPublicAccessBlock -func (c *S3Control) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) +// API operation GetAccessPointPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetAccessPointPolicyStatus +func (c *S3Control) GetAccessPointPolicyStatus(input *GetAccessPointPolicyStatusInput) (*GetAccessPointPolicyStatusOutput, error) { + req, out := c.GetAccessPointPolicyStatusRequest(input) return out, req.Send() } -// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// GetAccessPointPolicyStatusWithContext is the same as GetAccessPointPolicyStatus with the addition of // the ability to pass a context and additional request options. // -// See PutPublicAccessBlock for details on how to use this API operation. +// See GetAccessPointPolicyStatus for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { - req, out := c.PutPublicAccessBlockRequest(input) +func (c *S3Control) GetAccessPointPolicyStatusWithContext(ctx aws.Context, input *GetAccessPointPolicyStatusInput, opts ...request.Option) (*GetAccessPointPolicyStatusOutput, error) { + req, out := c.GetAccessPointPolicyStatusRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateJobPriority = "UpdateJobPriority" +const opGetBucket = "GetBucket" -// UpdateJobPriorityRequest generates a "aws/request.Request" representing the -// client's request for the UpdateJobPriority operation. The "output" return +// GetBucketRequest generates a "aws/request.Request" representing the +// client's request for the GetBucket operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateJobPriority for more information on using the UpdateJobPriority +// See GetBucket for more information on using the GetBucket // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateJobPriorityRequest method. -// req, resp := client.UpdateJobPriorityRequest(params) +// // Example sending a request using the GetBucketRequest method. +// req, resp := client.GetBucketRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobPriority -func (c *S3Control) UpdateJobPriorityRequest(input *UpdateJobPriorityInput) (req *request.Request, output *UpdateJobPriorityOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucket +func (c *S3Control) GetBucketRequest(input *GetBucketInput) (req *request.Request, output *GetBucketOutput) { op := &request.Operation{ - Name: opUpdateJobPriority, - HTTPMethod: "POST", - HTTPPath: "/v20180820/jobs/{id}/priority", + Name: opGetBucket, + HTTPMethod: "GET", + HTTPPath: "/v20180820/bucket/{name}", } if input == nil { - input = &UpdateJobPriorityInput{} + input = &GetBucketInput{} } - output = &UpdateJobPriorityOutput{} + output = &GetBucketOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// UpdateJobPriority API operation for AWS S3 Control. -// -// Updates an existing job's priority. +// GetBucket API operation for AWS S3 Control. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon +// S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide. // -// See the AWS API reference guide for AWS S3 Control's -// API operation UpdateJobPriority for usage and error information. +// The following actions are related to GetBucket for Amazon S3 on Outposts: // -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" +// * PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) // -// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// * CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_CreateBucket.html) // -// * ErrCodeNotFoundException "NotFoundException" +// * DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_DeleteBucket.html) // -// * ErrCodeInternalServiceException "InternalServiceException" +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobPriority -func (c *S3Control) UpdateJobPriority(input *UpdateJobPriorityInput) (*UpdateJobPriorityOutput, error) { - req, out := c.UpdateJobPriorityRequest(input) +// See the AWS API reference guide for AWS S3 Control's +// API operation GetBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucket +func (c *S3Control) GetBucket(input *GetBucketInput) (*GetBucketOutput, error) { + req, out := c.GetBucketRequest(input) return out, req.Send() } -// UpdateJobPriorityWithContext is the same as UpdateJobPriority with the addition of +// GetBucketWithContext is the same as GetBucket with the addition of // the ability to pass a context and additional request options. // -// See UpdateJobPriority for details on how to use this API operation. +// See GetBucket for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) UpdateJobPriorityWithContext(ctx aws.Context, input *UpdateJobPriorityInput, opts ...request.Option) (*UpdateJobPriorityOutput, error) { - req, out := c.UpdateJobPriorityRequest(input) +func (c *S3Control) GetBucketWithContext(ctx aws.Context, input *GetBucketInput, opts ...request.Option) (*GetBucketOutput, error) { + req, out := c.GetBucketRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateJobStatus = "UpdateJobStatus" +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" -// UpdateJobStatusRequest generates a "aws/request.Request" representing the -// client's request for the UpdateJobStatus operation. The "output" return +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateJobStatus for more information on using the UpdateJobStatus +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateJobStatusRequest method. -// req, resp := client.UpdateJobStatusRequest(params) +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobStatus -func (c *S3Control) UpdateJobStatusRequest(input *UpdateJobStatusInput) (req *request.Request, output *UpdateJobStatusOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketLifecycleConfiguration +func (c *S3Control) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ - Name: opUpdateJobStatus, - HTTPMethod: "POST", - HTTPPath: "/v20180820/jobs/{id}/status", + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/v20180820/bucket/{name}/lifecycleconfiguration", } if input == nil { - input = &UpdateJobStatusInput{} + input = &GetBucketLifecycleConfigurationInput{} } - output = &UpdateJobStatusOutput{} + output = &GetBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) return } -// UpdateJobStatus API operation for AWS S3 Control. +// GetBucketLifecycleConfiguration API operation for AWS S3 Control. // -// Updates the status for the specified job. Use this operation to confirm that -// you want to run a job or to cancel an existing job. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// This API operation gets an Amazon S3 on Outposts bucket's lifecycle configuration. +// To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// in the Amazon Simple Storage Service API. // -// See the AWS API reference guide for AWS S3 Control's -// API operation UpdateJobStatus for usage and error information. +// Returns the lifecycle configuration information set on the Outposts bucket. +// For more information, see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// and for information about lifecycle configuration, see Object Lifecycle Management +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon Simple Storage Service Developer Guide. // -// Returned Error Codes: -// * ErrCodeBadRequestException "BadRequestException" +// To use this operation, you must have permission to perform the s3outposts:GetLifecycleConfiguration +// action. The Outposts bucket owner has this permission, by default. The bucket +// owner can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). // -// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketLifecycleConfiguration.html#API_control_GetBucketLifecycleConfiguration_Examples) +// section below. // -// * ErrCodeNotFoundException "NotFoundException" +// GetBucketLifecycleConfiguration has the following special error: // -// * ErrCodeJobStatusException "JobStatusException" +// * Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client // -// * ErrCodeInternalServiceException "InternalServiceException" +// The following actions are related to GetBucketLifecycleConfiguration: // -// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobStatus -func (c *S3Control) UpdateJobStatus(input *UpdateJobStatusInput) (*UpdateJobStatusOutput, error) { - req, out := c.UpdateJobStatusRequest(input) +// * PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketLifecycleConfiguration +func (c *S3Control) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) return out, req.Send() } -// UpdateJobStatusWithContext is the same as UpdateJobStatus with the addition of +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of // the ability to pass a context and additional request options. // -// See UpdateJobStatus for details on how to use this API operation. +// See GetBucketLifecycleConfiguration for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *S3Control) UpdateJobStatusWithContext(ctx aws.Context, input *UpdateJobStatusInput, opts ...request.Option) (*UpdateJobStatusOutput, error) { - req, out := c.UpdateJobStatusRequest(input) +func (c *S3Control) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// An access point used to access a bucket. -type AccessPoint struct { - _ struct{} `type:"structure"` - - // The name of the bucket associated with this access point. - // - // Bucket is a required field - Bucket *string `min:"3" type:"string" required:"true"` - - // The name of this access point. - // - // Name is a required field - Name *string `min:"3" type:"string" required:"true"` - - // Indicates whether this access point allows access from the public Internet. - // If VpcConfiguration is specified for this access point, then NetworkOrigin - // is VPC, and the access point doesn't allow access from the public Internet. - // Otherwise, NetworkOrigin is Internet, and the access point allows access - // from the public Internet, subject to the access point and bucket access policies. - // - // NetworkOrigin is a required field - NetworkOrigin *string `type:"string" required:"true" enum:"NetworkOrigin"` - - // The Virtual Private Cloud (VPC) configuration for this access point, if one - // exists. - VpcConfiguration *VpcConfiguration `type:"structure"` -} +const opGetBucketPolicy = "GetBucketPolicy" -// String returns the string representation -func (s AccessPoint) String() string { - return awsutil.Prettify(s) -} +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketPolicy +func (c *S3Control) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/v20180820/bucket/{name}/policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// GetBucketPolicy API operation for AWS S3 Control. +// +// +// This API action gets a bucket policy for an Amazon S3 on Outposts bucket. +// To get a policy for an S3 bucket, see GetBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html) +// in the Amazon Simple Storage Service API. +// +// Returns the policy of a specified Outposts bucket. For more information, +// see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// If you are using an identity other than the root user of the AWS account +// that owns the bucket, the calling identity must have the GetBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have s3outposts:GetBucketPolicy permissions, Amazon S3 returns +// a 403 Access Denied error. If you have the correct permissions, but you're +// not using an identity that belongs to the bucket owner's account, Amazon +// S3 returns a 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketPolicy.html#API_control_GetBucketPolicy_Examples) +// section below. +// +// The following actions are related to GetBucketPolicy: +// +// * GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// * PutBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketPolicy.html) +// +// * DeleteBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketPolicy +func (c *S3Control) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketTagging +func (c *S3Control) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/v20180820/bucket/{name}/tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// GetBucketTagging API operation for AWS S3 Control. +// +// +// This API operation gets an Amazon S3 on Outposts bucket's tags. To get an +// S3 bucket tags, see GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// in the Amazon Simple Storage Service API. +// +// Returns the tag set associated with the Outposts bucket. For more information, +// see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// To use this operation, you must have permission to perform the GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// * Error code: NoSuchTagSetError Description: There is no tag set associated +// with the bucket. +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetBucketTagging.html#API_control_GetBucketTagging_Examples) +// section below. +// +// The following actions are related to GetBucketTagging: +// +// * PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetBucketTagging +func (c *S3Control) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetJobTagging = "GetJobTagging" + +// GetJobTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetJobTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetJobTagging for more information on using the GetJobTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetJobTaggingRequest method. +// req, resp := client.GetJobTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetJobTagging +func (c *S3Control) GetJobTaggingRequest(input *GetJobTaggingInput) (req *request.Request, output *GetJobTaggingOutput) { + op := &request.Operation{ + Name: opGetJobTagging, + HTTPMethod: "GET", + HTTPPath: "/v20180820/jobs/{id}/tagging", + } + + if input == nil { + input = &GetJobTaggingInput{} + } + + output = &GetJobTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// GetJobTagging API operation for AWS S3 Control. +// +// Returns the tags on an S3 Batch Operations job. To use this operation, you +// must have permission to perform the s3:GetJobTagging action. For more information, +// see Controlling access and labeling jobs using tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related actions include: +// +// * CreateJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * PutJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutJobTagging.html) +// +// * DeleteJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation GetJobTagging for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetJobTagging +func (c *S3Control) GetJobTagging(input *GetJobTaggingInput) (*GetJobTaggingOutput, error) { + req, out := c.GetJobTaggingRequest(input) + return out, req.Send() +} + +// GetJobTaggingWithContext is the same as GetJobTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetJobTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) GetJobTaggingWithContext(ctx aws.Context, input *GetJobTaggingInput, opts ...request.Option) (*GetJobTaggingOutput, error) { + req, out := c.GetJobTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetPublicAccessBlock +func (c *S3Control) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/v20180820/configuration/publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// GetPublicAccessBlock API operation for AWS S3 Control. +// +// Retrieves the PublicAccessBlock configuration for an AWS account. For more +// information, see Using Amazon S3 block public access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). +// +// Related actions include: +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html) +// +// * PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_PutPublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation GetPublicAccessBlock for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchPublicAccessBlockConfiguration "NoSuchPublicAccessBlockConfiguration" +// Amazon S3 throws this exception if you make a GetPublicAccessBlock request +// against an account that doesn't have a PublicAccessBlockConfiguration set. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/GetPublicAccessBlock +func (c *S3Control) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccessPoints = "ListAccessPoints" + +// ListAccessPointsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccessPoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccessPoints for more information on using the ListAccessPoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccessPointsRequest method. +// req, resp := client.ListAccessPointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListAccessPoints +func (c *S3Control) ListAccessPointsRequest(input *ListAccessPointsInput) (req *request.Request, output *ListAccessPointsOutput) { + op := &request.Operation{ + Name: opListAccessPoints, + HTTPMethod: "GET", + HTTPPath: "/v20180820/accesspoint", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccessPointsInput{} + } + + output = &ListAccessPointsOutput{} + req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListAccessPoints API operation for AWS S3 Control. +// +// Returns a list of the access points currently associated with the specified +// bucket. You can retrieve up to 1000 access points per call. If the specified +// bucket has more than 1,000 access points (or the number specified in maxResults, +// whichever is less), the response will include a continuation token that you +// can use to list the additional access points. +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_GetAccessPoint.html#API_control_GetAccessPoint_Examples) +// section below. +// +// The following actions are related to ListAccessPoints: +// +// * CreateAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateAccessPoint.html) +// +// * DeleteAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPoint.html) +// +// * GetAccessPoint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPoint.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation ListAccessPoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListAccessPoints +func (c *S3Control) ListAccessPoints(input *ListAccessPointsInput) (*ListAccessPointsOutput, error) { + req, out := c.ListAccessPointsRequest(input) + return out, req.Send() +} + +// ListAccessPointsWithContext is the same as ListAccessPoints with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccessPoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) ListAccessPointsWithContext(ctx aws.Context, input *ListAccessPointsInput, opts ...request.Option) (*ListAccessPointsOutput, error) { + req, out := c.ListAccessPointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccessPointsPages iterates over the pages of a ListAccessPoints operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccessPoints method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccessPoints operation. +// pageNum := 0 +// err := client.ListAccessPointsPages(params, +// func(page *s3control.ListAccessPointsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3Control) ListAccessPointsPages(input *ListAccessPointsInput, fn func(*ListAccessPointsOutput, bool) bool) error { + return c.ListAccessPointsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccessPointsPagesWithContext same as ListAccessPointsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) ListAccessPointsPagesWithContext(ctx aws.Context, input *ListAccessPointsInput, fn func(*ListAccessPointsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccessPointsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccessPointsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccessPointsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListJobs = "ListJobs" + +// ListJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListJobs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListJobs for more information on using the ListJobs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListJobsRequest method. +// req, resp := client.ListJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListJobs +func (c *S3Control) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { + op := &request.Operation{ + Name: opListJobs, + HTTPMethod: "GET", + HTTPPath: "/v20180820/jobs", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListJobsInput{} + } + + output = &ListJobsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListJobs API operation for AWS S3 Control. +// +// Lists current S3 Batch Operations jobs and jobs that have ended within the +// last 30 days for the AWS account making the request. For more information, +// see S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related actions include: +// +// * CreateJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * DescribeJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) +// +// * UpdateJobPriority (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobPriority.html) +// +// * UpdateJobStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation ListJobs for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidRequestException "InvalidRequestException" +// +// * ErrCodeInternalServiceException "InternalServiceException" +// +// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListJobs +func (c *S3Control) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + return out, req.Send() +} + +// ListJobsWithContext is the same as ListJobs with the addition of +// the ability to pass a context and additional request options. +// +// See ListJobs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) ListJobsWithContext(ctx aws.Context, input *ListJobsInput, opts ...request.Option) (*ListJobsOutput, error) { + req, out := c.ListJobsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *s3control.ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3Control) ListJobsPages(input *ListJobsInput, fn func(*ListJobsOutput, bool) bool) error { + return c.ListJobsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListJobsPagesWithContext same as ListJobsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, fn func(*ListJobsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListJobsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListJobsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListJobsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListRegionalBuckets = "ListRegionalBuckets" + +// ListRegionalBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListRegionalBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRegionalBuckets for more information on using the ListRegionalBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRegionalBucketsRequest method. +// req, resp := client.ListRegionalBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListRegionalBuckets +func (c *S3Control) ListRegionalBucketsRequest(input *ListRegionalBucketsInput) (req *request.Request, output *ListRegionalBucketsOutput) { + op := &request.Operation{ + Name: opListRegionalBuckets, + HTTPMethod: "GET", + HTTPPath: "/v20180820/bucket", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListRegionalBucketsInput{} + } + + output = &ListRegionalBucketsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// ListRegionalBuckets API operation for AWS S3 Control. +// +// Returns a list of all Outposts buckets in an Outposts that are owned by the +// authenticated sender of the request. For more information, see Using Amazon +// S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// For an example of the request syntax for Amazon S3 on Outposts that uses +// the S3 on Outposts endpoint hostname prefix and outpost-id in your API request, +// see the Example (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_ListRegionalBuckets.html#API_control_ListRegionalBuckets_Examples) +// section below. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation ListRegionalBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/ListRegionalBuckets +func (c *S3Control) ListRegionalBuckets(input *ListRegionalBucketsInput) (*ListRegionalBucketsOutput, error) { + req, out := c.ListRegionalBucketsRequest(input) + return out, req.Send() +} + +// ListRegionalBucketsWithContext is the same as ListRegionalBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListRegionalBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) ListRegionalBucketsWithContext(ctx aws.Context, input *ListRegionalBucketsInput, opts ...request.Option) (*ListRegionalBucketsOutput, error) { + req, out := c.ListRegionalBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRegionalBucketsPages iterates over the pages of a ListRegionalBuckets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRegionalBuckets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRegionalBuckets operation. +// pageNum := 0 +// err := client.ListRegionalBucketsPages(params, +// func(page *s3control.ListRegionalBucketsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3Control) ListRegionalBucketsPages(input *ListRegionalBucketsInput, fn func(*ListRegionalBucketsOutput, bool) bool) error { + return c.ListRegionalBucketsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRegionalBucketsPagesWithContext same as ListRegionalBucketsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) ListRegionalBucketsPagesWithContext(ctx aws.Context, input *ListRegionalBucketsInput, fn func(*ListRegionalBucketsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRegionalBucketsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRegionalBucketsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListRegionalBucketsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutAccessPointPolicy = "PutAccessPointPolicy" + +// PutAccessPointPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutAccessPointPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutAccessPointPolicy for more information on using the PutAccessPointPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutAccessPointPolicyRequest method. +// req, resp := client.PutAccessPointPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointPolicy +func (c *S3Control) PutAccessPointPolicyRequest(input *PutAccessPointPolicyInput) (req *request.Request, output *PutAccessPointPolicyOutput) { + op := &request.Operation{ + Name: opPutAccessPointPolicy, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/accesspoint/{name}/policy", + } + + if input == nil { + input = &PutAccessPointPolicyInput{} + } + + output = &PutAccessPointPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// PutAccessPointPolicy API operation for AWS S3 Control. +// +// Associates an access policy with the specified access point. Each access +// point can have only one policy, so a request made to this API replaces any +// existing policy associated with the specified access point. +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutAccessPointPolicy.html#API_control_PutAccessPointPolicy_Examples) +// section below. +// +// The following actions are related to PutAccessPointPolicy: +// +// * GetAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetAccessPointPolicy.html) +// +// * DeleteAccessPointPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteAccessPointPolicy.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation PutAccessPointPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutAccessPointPolicy +func (c *S3Control) PutAccessPointPolicy(input *PutAccessPointPolicyInput) (*PutAccessPointPolicyOutput, error) { + req, out := c.PutAccessPointPolicyRequest(input) + return out, req.Send() +} + +// PutAccessPointPolicyWithContext is the same as PutAccessPointPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutAccessPointPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) PutAccessPointPolicyWithContext(ctx aws.Context, input *PutAccessPointPolicyInput, opts ...request.Option) (*PutAccessPointPolicyOutput, error) { + req, out := c.PutAccessPointPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketLifecycleConfiguration +func (c *S3Control) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/bucket/{name}/lifecycleconfiguration", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for AWS S3 Control. +// +// +// This API action puts a lifecycle configuration to an Amazon S3 on Outposts +// bucket. To put a lifecycle configuration to an S3 bucket, see PutBucketLifecycleConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// in the Amazon Simple Storage Service API. +// +// Creates a new lifecycle configuration for the Outposts bucket or replaces +// an existing lifecycle configuration. Outposts buckets can only support a +// lifecycle that deletes objects after a certain period of time. For more information, +// see Managing Lifecycle Permissions for Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html). +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketLifecycleConfiguration.html#API_control_PutBucketLifecycleConfiguration_Examples) +// section below. +// +// The following actions are related to PutBucketLifecycleConfiguration: +// +// * GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketLifecycleConfiguration.html) +// +// * DeleteBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketLifecycleConfiguration +func (c *S3Control) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketPolicy +func (c *S3Control) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/bucket/{name}/policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for AWS S3 Control. +// +// +// This API action puts a bucket policy to an Amazon S3 on Outposts bucket. +// To put a policy on an S3 bucket, see PutBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html) +// in the Amazon Simple Storage Service API. +// +// Applies an Amazon S3 bucket policy to an Outposts bucket. For more information, +// see Using Amazon S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// If you are using an identity other than the root user of the AWS account +// that owns the Outposts bucket, the calling identity must have the PutBucketPolicy +// permissions on the specified Outposts bucket and belong to the bucket owner's +// account in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// As a security precaution, the root user of the AWS account that owns a bucket +// can always use this operation, even if the policy explicitly denies the root +// user the ability to perform this action. +// +// For more information about bucket policies, see Using Bucket Policies and +// User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketPolicy.html#API_control_PutBucketPolicy_Examples) +// section below. +// +// The following actions are related to PutBucketPolicy: +// +// * GetBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketPolicy.html) +// +// * DeleteBucketPolicy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketPolicy.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketPolicy +func (c *S3Control) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketTagging +func (c *S3Control) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/bucket/{name}/tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + // update account id or check if provided input for account id member matches + // the account id present in ARN + req.Handlers.Validate.PushFrontNamed(updateAccountIDWithARNHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketTagging API operation for AWS S3 Control. +// +// +// This API action puts tags on an Amazon S3 on Outposts bucket. To put tags +// on an S3 bucket, see PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// in the Amazon Simple Storage Service API. +// +// Sets the tags for an Outposts bucket. For more information, see Using Amazon +// S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Use tags to organize your AWS bill to reflect your own cost structure. To +// do this, sign up to get your AWS account bill with tag key values included. +// Then, to see the cost of combined resources, organize your billing information +// according to resources with the same tag key values. For example, you can +// tag several resources with a specific application name, and then organize +// your billing information to see the total cost of that application across +// several services. For more information, see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html). +// +// Within a bucket, if you add a tag that has the same key as an existing tag, +// the new value overwrites the old value. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html). +// +// To use this operation, you must have permissions to perform the s3outposts:PutBucketTagging +// action. The Outposts bucket owner has this permission by default and can +// grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html). +// +// PutBucketTagging has the following special errors: +// +// * Error code: InvalidTagError Description: The tag provided was not a +// valid tag. This error can occur if the tag did not pass input validation. +// For information about tag restrictions, see User-Defined Tag Restrictions +// (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// and AWS-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html). +// +// * Error code: MalformedXMLError Description: The XML provided does not +// match the schema. +// +// * Error code: OperationAbortedError Description: A conflicting conditional +// operation is currently in progress against this resource. Try again. +// +// * Error code: InternalError Description: The service was unable to apply +// the provided tag to the bucket. +// +// All Amazon S3 on Outposts REST API requests for this action require an additional +// parameter of outpost-id to be passed with the request and an S3 on Outposts +// endpoint hostname prefix instead of s3-control. For an example of the request +// syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname +// prefix and the outpost-id derived using the access point ARN, see the Example +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API__control_PutBucketTagging.html#API_control_PutBucketTagging_Examples) +// section below. +// +// The following actions are related to PutBucketTagging: +// +// * GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetBucketTagging.html) +// +// * DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutBucketTagging +func (c *S3Control) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutJobTagging = "PutJobTagging" + +// PutJobTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutJobTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutJobTagging for more information on using the PutJobTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutJobTaggingRequest method. +// req, resp := client.PutJobTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutJobTagging +func (c *S3Control) PutJobTaggingRequest(input *PutJobTaggingInput) (req *request.Request, output *PutJobTaggingOutput) { + op := &request.Operation{ + Name: opPutJobTagging, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/jobs/{id}/tagging", + } + + if input == nil { + input = &PutJobTaggingInput{} + } + + output = &PutJobTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// PutJobTagging API operation for AWS S3 Control. +// +// Sets the supplied tag-set on an S3 Batch Operations job. +// +// A tag is a key-value pair. You can associate S3 Batch Operations tags with +// any job by sending a PUT request against the tagging subresource that is +// associated with the job. To modify the existing tag set, you can either replace +// the existing tag set entirely, or make changes within the existing tag set +// by retrieving the existing tag set using GetJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html), +// modify that tag set, and use this API action to replace the tag set with +// the one you modified. For more information, see Controlling access and labeling +// jobs using tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-managing-jobs.html#batch-ops-job-tags) +// in the Amazon Simple Storage Service Developer Guide. +// +// * If you send this request with an empty tag set, Amazon S3 deletes the +// existing tag set on the Batch Operations job. If you use this method, +// you are charged for a Tier 1 Request (PUT). For more information, see +// Amazon S3 pricing (http://aws.amazon.com/s3/pricing/). +// +// * For deleting existing tags for your Batch Operations job, a DeleteJobTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) +// request is preferred because it achieves the same result without incurring +// charges. +// +// * A few things to consider about using tags: Amazon S3 limits the maximum +// number of tags to 50 tags per job. You can associate up to 50 tags with +// a job as long as they have unique tag keys. A tag key can be up to 128 +// Unicode characters in length, and tag values can be up to 256 Unicode +// characters in length. The key and values are case sensitive. For tagging-related +// restrictions related to characters and encodings, see User-Defined Tag +// Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) +// in the AWS Billing and Cost Management User Guide. +// +// To use this operation, you must have permission to perform the s3:PutJobTagging +// action. +// +// Related actions include: +// +// * CreatJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * GetJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetJobTagging.html) +// +// * DeleteJobTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeleteJobTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation PutJobTagging for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServiceException "InternalServiceException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeTooManyTagsException "TooManyTagsException" +// Amazon S3 throws this exception if you have too many tags in your tag set. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutJobTagging +func (c *S3Control) PutJobTagging(input *PutJobTaggingInput) (*PutJobTaggingOutput, error) { + req, out := c.PutJobTaggingRequest(input) + return out, req.Send() +} + +// PutJobTaggingWithContext is the same as PutJobTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutJobTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) PutJobTaggingWithContext(ctx aws.Context, input *PutJobTaggingInput, opts ...request.Option) (*PutJobTaggingOutput, error) { + req, out := c.PutJobTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutPublicAccessBlock +func (c *S3Control) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/v20180820/configuration/publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// PutPublicAccessBlock API operation for AWS S3 Control. +// +// Creates or modifies the PublicAccessBlock configuration for an AWS account. +// For more information, see Using Amazon S3 block public access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html). +// +// Related actions include: +// +// * GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_GetPublicAccessBlock.html) +// +// * DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/PutPublicAccessBlock +func (c *S3Control) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateJobPriority = "UpdateJobPriority" + +// UpdateJobPriorityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateJobPriority operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateJobPriority for more information on using the UpdateJobPriority +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateJobPriorityRequest method. +// req, resp := client.UpdateJobPriorityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobPriority +func (c *S3Control) UpdateJobPriorityRequest(input *UpdateJobPriorityInput) (req *request.Request, output *UpdateJobPriorityOutput) { + op := &request.Operation{ + Name: opUpdateJobPriority, + HTTPMethod: "POST", + HTTPPath: "/v20180820/jobs/{id}/priority", + } + + if input == nil { + input = &UpdateJobPriorityInput{} + } + + output = &UpdateJobPriorityOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// UpdateJobPriority API operation for AWS S3 Control. +// +// Updates an existing S3 Batch Operations job's priority. For more information, +// see S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related actions include: +// +// * CreateJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * ListJobs (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) +// +// * DescribeJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) +// +// * UpdateJobStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation UpdateJobPriority for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeInternalServiceException "InternalServiceException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobPriority +func (c *S3Control) UpdateJobPriority(input *UpdateJobPriorityInput) (*UpdateJobPriorityOutput, error) { + req, out := c.UpdateJobPriorityRequest(input) + return out, req.Send() +} + +// UpdateJobPriorityWithContext is the same as UpdateJobPriority with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateJobPriority for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) UpdateJobPriorityWithContext(ctx aws.Context, input *UpdateJobPriorityInput, opts ...request.Option) (*UpdateJobPriorityOutput, error) { + req, out := c.UpdateJobPriorityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateJobStatus = "UpdateJobStatus" + +// UpdateJobStatusRequest generates a "aws/request.Request" representing the +// client's request for the UpdateJobStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateJobStatus for more information on using the UpdateJobStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateJobStatusRequest method. +// req, resp := client.UpdateJobStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobStatus +func (c *S3Control) UpdateJobStatusRequest(input *UpdateJobStatusInput) (req *request.Request, output *UpdateJobStatusOutput) { + op := &request.Operation{ + Name: opUpdateJobStatus, + HTTPMethod: "POST", + HTTPPath: "/v20180820/jobs/{id}/status", + } + + if input == nil { + input = &UpdateJobStatusInput{} + } + + output = &UpdateJobStatusOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{AccountId}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// UpdateJobStatus API operation for AWS S3 Control. +// +// Updates the status for the specified job. Use this operation to confirm that +// you want to run a job or to cancel an existing job. For more information, +// see S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-basics.html) +// in the Amazon Simple Storage Service Developer Guide. +// +// Related actions include: +// +// * CreateJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateJob.html) +// +// * ListJobs (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_ListJobs.html) +// +// * DescribeJob (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_DescribeJob.html) +// +// * UpdateJobStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_UpdateJobStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS S3 Control's +// API operation UpdateJobStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBadRequestException "BadRequestException" +// +// * ErrCodeTooManyRequestsException "TooManyRequestsException" +// +// * ErrCodeNotFoundException "NotFoundException" +// +// * ErrCodeJobStatusException "JobStatusException" +// +// * ErrCodeInternalServiceException "InternalServiceException" +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3control-2018-08-20/UpdateJobStatus +func (c *S3Control) UpdateJobStatus(input *UpdateJobStatusInput) (*UpdateJobStatusOutput, error) { + req, out := c.UpdateJobStatusRequest(input) + return out, req.Send() +} + +// UpdateJobStatusWithContext is the same as UpdateJobStatus with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateJobStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3Control) UpdateJobStatusWithContext(ctx aws.Context, input *UpdateJobStatusInput, opts ...request.Option) (*UpdateJobStatusOutput, error) { + req, out := c.UpdateJobStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// The container for abort incomplete multipart upload +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload to the Outposts bucket. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +// An access point used to access a bucket. +type AccessPoint struct { + _ struct{} `type:"structure"` + + // The ARN for the access point. + AccessPointArn *string `min:"4" type:"string"` + + // The name of the bucket associated with this access point. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The name of this access point. + // + // Name is a required field + Name *string `min:"3" type:"string" required:"true"` + + // Indicates whether this access point allows access from the public internet. + // If VpcConfiguration is specified for this access point, then NetworkOrigin + // is VPC, and the access point doesn't allow access from the public internet. + // Otherwise, NetworkOrigin is Internet, and the access point allows access + // from the public internet, subject to the access point and bucket access policies. + // + // NetworkOrigin is a required field + NetworkOrigin *string `type:"string" required:"true" enum:"NetworkOrigin"` + + // The virtual private cloud (VPC) configuration for this access point, if one + // exists. + VpcConfiguration *VpcConfiguration `type:"structure"` +} + +// String returns the string representation +func (s AccessPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessPoint) GoString() string { + return s.String() +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *AccessPoint) SetAccessPointArn(v string) *AccessPoint { + s.AccessPointArn = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *AccessPoint) SetBucket(v string) *AccessPoint { + s.Bucket = &v + return s +} + +// SetName sets the Name field's value. +func (s *AccessPoint) SetName(v string) *AccessPoint { + s.Name = &v + return s +} + +// SetNetworkOrigin sets the NetworkOrigin field's value. +func (s *AccessPoint) SetNetworkOrigin(v string) *AccessPoint { + s.NetworkOrigin = &v + return s +} + +// SetVpcConfiguration sets the VpcConfiguration field's value. +func (s *AccessPoint) SetVpcConfiguration(v *VpcConfiguration) *AccessPoint { + s.VpcConfiguration = v + return s +} + +type CreateAccessPointInput struct { + _ struct{} `locationName:"CreateAccessPointRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` + + // The AWS account ID for the owner of the bucket for which you want to create + // an access point. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the bucket that you want to associate this access point with. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The name you want to assign to this access point. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // This is not supported for Amazon S3 on Outposts. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` + + // If you include this field, Amazon S3 restricts access to this access point + // to requests from the specified virtual private cloud (VPC). + // + // This is required for creating an access point for Amazon S3 on Outposts buckets. + VpcConfiguration *VpcConfiguration `type:"structure"` +} + +// String returns the string representation +func (s CreateAccessPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessPointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccessPointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAccessPointInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.VpcConfiguration != nil { + if err := s.VpcConfiguration.Validate(); err != nil { + invalidParams.AddNested("VpcConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateAccessPointInput) SetAccountId(v string) *CreateAccessPointInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateAccessPointInput) SetBucket(v string) *CreateAccessPointInput { + s.Bucket = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateAccessPointInput) SetName(v string) *CreateAccessPointInput { + s.Name = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *CreateAccessPointInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *CreateAccessPointInput { + s.PublicAccessBlockConfiguration = v + return s +} + +// SetVpcConfiguration sets the VpcConfiguration field's value. +func (s *CreateAccessPointInput) SetVpcConfiguration(v *VpcConfiguration) *CreateAccessPointInput { + s.VpcConfiguration = v + return s +} + +func (s *CreateAccessPointInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *CreateAccessPointInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateAccessPointInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateAccessPointInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s CreateAccessPointInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type CreateAccessPointOutput struct { + _ struct{} `type:"structure"` + + // The ARN of the access point. + AccessPointArn *string `min:"4" type:"string"` +} + +// String returns the string representation +func (s CreateAccessPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessPointOutput) GoString() string { + return s.String() +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *CreateAccessPointOutput) SetAccessPointArn(v string) *CreateAccessPointOutput { + s.AccessPointArn = &v + return s +} + +// The container for the bucket configuration. +// +// This is not supported by Amazon S3 on Outposts buckets. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket will be created. If you are creating + // a bucket on the US East (N. Virginia) Region (us-east-1), you do not need + // to specify the location. + // + // This is not supported by Amazon S3 on Outposts buckets. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + + // The configuration information for the bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This is not supported by Amazon S3 on Outposts buckets. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // + // This is not supported by Amazon S3 on Outposts buckets. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` + + // The ID of the Outposts where the bucket is being created. + // + // This is required by Amazon S3 on Outposts buckets. + OutpostId *string `location:"header" locationName:"x-amz-outpost-id" min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.OutpostId != nil && len(*s.OutpostId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutpostId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +// SetOutpostId sets the OutpostId field's value. +func (s *CreateBucketInput) SetOutpostId(v string) *CreateBucketInput { + s.OutpostId = &v + return s +} + +func (s *CreateBucketInput) getOutpostID() (string, error) { + if s.OutpostId == nil { + return "", fmt.Errorf("member OutpostId is nil") + } + return *s.OutpostId, nil +} + +func (s *CreateBucketInput) hasOutpostID() bool { + if s.OutpostId == nil { + return false + } + return true +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + BucketArn *string `min:"4" type:"string"` + + // The location of the bucket. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetBucketArn sets the BucketArn field's value. +func (s *CreateBucketOutput) SetBucketArn(v string) *CreateBucketOutput { + s.BucketArn = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateJobInput struct { + _ struct{} `locationName:"CreateJobRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` + + // The AWS account ID that creates the job. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // An idempotency token to ensure that you don't accidentally submit the same + // request twice. You can use any string up to the maximum length. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Indicates whether confirmation is required before Amazon S3 runs the job. + // Confirmation is only required for jobs created through the Amazon S3 console. + ConfirmationRequired *bool `type:"boolean"` + + // A description for this job. You can use any string within the permitted length. + // Descriptions don't need to be unique and can be used for multiple jobs. + Description *string `min:"1" type:"string"` + + // Configuration parameters for the manifest. + // + // Manifest is a required field + Manifest *JobManifest `type:"structure" required:"true"` + + // The operation that you want this job to perform on each object listed in + // the manifest. For more information about the available operations, see Operations + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-operations.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Operation is a required field + Operation *JobOperation `type:"structure" required:"true"` + + // The numerical priority for this job. Higher numbers indicate higher priority. + // + // Priority is a required field + Priority *int64 `type:"integer" required:"true"` + + // Configuration parameters for the optional job-completion report. + // + // Report is a required field + Report *JobReport `type:"structure" required:"true"` + + // The Amazon Resource Name (ARN) for the AWS Identity and Access Management + // (IAM) role that Batch Operations will use to run this job's operation on + // each object in the manifest. + // + // RoleArn is a required field + RoleArn *string `min:"1" type:"string" required:"true"` + + // A set of tags to associate with the S3 Batch Operations job. This is an optional + // parameter. + Tags []*S3Tag `type:"list"` +} + +// String returns the string representation +func (s CreateJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.Description != nil && len(*s.Description) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Description", 1)) + } + if s.Manifest == nil { + invalidParams.Add(request.NewErrParamRequired("Manifest")) + } + if s.Operation == nil { + invalidParams.Add(request.NewErrParamRequired("Operation")) + } + if s.Priority == nil { + invalidParams.Add(request.NewErrParamRequired("Priority")) + } + if s.Report == nil { + invalidParams.Add(request.NewErrParamRequired("Report")) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) + } + if s.Manifest != nil { + if err := s.Manifest.Validate(); err != nil { + invalidParams.AddNested("Manifest", err.(request.ErrInvalidParams)) + } + } + if s.Operation != nil { + if err := s.Operation.Validate(); err != nil { + invalidParams.AddNested("Operation", err.(request.ErrInvalidParams)) + } + } + if s.Report != nil { + if err := s.Report.Validate(); err != nil { + invalidParams.AddNested("Report", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *CreateJobInput) SetAccountId(v string) *CreateJobInput { + s.AccountId = &v + return s +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *CreateJobInput) SetClientRequestToken(v string) *CreateJobInput { + s.ClientRequestToken = &v + return s +} + +// SetConfirmationRequired sets the ConfirmationRequired field's value. +func (s *CreateJobInput) SetConfirmationRequired(v bool) *CreateJobInput { + s.ConfirmationRequired = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateJobInput) SetDescription(v string) *CreateJobInput { + s.Description = &v + return s +} + +// SetManifest sets the Manifest field's value. +func (s *CreateJobInput) SetManifest(v *JobManifest) *CreateJobInput { + s.Manifest = v + return s +} + +// SetOperation sets the Operation field's value. +func (s *CreateJobInput) SetOperation(v *JobOperation) *CreateJobInput { + s.Operation = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *CreateJobInput) SetPriority(v int64) *CreateJobInput { + s.Priority = &v + return s +} + +// SetReport sets the Report field's value. +func (s *CreateJobInput) SetReport(v *JobReport) *CreateJobInput { + s.Report = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *CreateJobInput) SetRoleArn(v string) *CreateJobInput { + s.RoleArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateJobInput) SetTags(v []*S3Tag) *CreateJobInput { + s.Tags = v + return s +} + +func (s *CreateJobInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type CreateJobOutput struct { + _ struct{} `type:"structure"` + + // The ID for this job. Amazon S3 generates this ID automatically and returns + // it after a successful Create Job request. + JobId *string `min:"5" type:"string"` +} + +// String returns the string representation +func (s CreateJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *CreateJobOutput) SetJobId(v string) *CreateJobOutput { + s.JobId = &v + return s +} + +type DeleteAccessPointInput struct { + _ struct{} `locationName:"DeleteAccessPointRequest" type:"structure"` + + // The account ID for the account that owns the specified access point. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the access point you want to delete. + // + // For Amazon S3 on Outposts specify the ARN of the access point accessed in + // the format arn:aws:s3-outposts:::outpost//accesspoint/. + // For example, to access the access point reports-ap through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + // The value must be URL encoded. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccessPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessPointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccessPointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccessPointInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteAccessPointInput) SetAccountId(v string) *DeleteAccessPointInput { + s.AccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteAccessPointInput) SetName(v string) *DeleteAccessPointInput { + s.Name = &v + return s +} + +func (s *DeleteAccessPointInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *DeleteAccessPointInput) getEndpointARN() (arn.Resource, error) { + if s.Name == nil { + return nil, fmt.Errorf("member Name is nil") + } + return parseEndpointARN(*s.Name) +} + +func (s *DeleteAccessPointInput) hasEndpointARN() bool { + if s.Name == nil { + return false + } + return arn.IsARN(*s.Name) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteAccessPointInput) updateArnableField(v string) (interface{}, error) { + if s.Name == nil { + return nil, fmt.Errorf("member Name is nil") + } + s.Name = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s DeleteAccessPointInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type DeleteAccessPointOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessPointOutput) GoString() string { + return s.String() +} + +type DeleteAccessPointPolicyInput struct { + _ struct{} `locationName:"DeleteAccessPointPolicyRequest" type:"structure"` + + // The account ID for the account that owns the specified access point. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the access point whose policy you want to delete. + // + // For Amazon S3 on Outposts specify the ARN of the access point accessed in + // the format arn:aws:s3-outposts:::outpost//accesspoint/. + // For example, to access the access point reports-ap through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + // The value must be URL encoded. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccessPointPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessPointPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccessPointPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccessPointPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteAccessPointPolicyInput) SetAccountId(v string) *DeleteAccessPointPolicyInput { + s.AccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *DeleteAccessPointPolicyInput) SetName(v string) *DeleteAccessPointPolicyInput { + s.Name = &v + return s +} + +func (s *DeleteAccessPointPolicyInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type DeleteAccessPointPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessPointPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessPointPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // The account ID that owns the Outposts bucket. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // Specifies the bucket being deleted. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteBucketInput) SetAccountId(v string) *DeleteBucketInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s DeleteBucketInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type DeleteBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleConfigurationRequest" type:"structure"` + + // The account ID of the lifecycle configuration to delete. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The bucket ARN of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleConfigurationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteBucketLifecycleConfigurationInput) SetAccountId(v string) *DeleteBucketLifecycleConfigurationInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleConfigurationInput) SetBucket(v string) *DeleteBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleConfigurationInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *DeleteBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s DeleteBucketLifecycleConfigurationInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type DeleteBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // The account ID of the Outposts bucket. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ARN of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteBucketPolicyInput) SetAccountId(v string) *DeleteBucketPolicyInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // The AWS account ID of the Outposts bucket tag set to be removed. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The bucket ARN that has the tag set to be removed. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteBucketTaggingInput) SetAccountId(v string) *DeleteBucketTaggingInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteJobTaggingInput struct { + _ struct{} `locationName:"DeleteJobTaggingRequest" type:"structure"` + + // The AWS account ID associated with the S3 Batch Operations job. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ID for the S3 Batch Operations job whose tags you want to delete. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"id" min:"5" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteJobTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteJobTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteJobTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteJobTaggingInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 5 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeleteJobTaggingInput) SetAccountId(v string) *DeleteJobTaggingInput { + s.AccountId = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *DeleteJobTaggingInput) SetJobId(v string) *DeleteJobTaggingInput { + s.JobId = &v + return s +} + +func (s *DeleteJobTaggingInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type DeleteJobTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteJobTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteJobTaggingOutput) GoString() string { + return s.String() +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The account ID for the AWS account whose PublicAccessBlock configuration + // you want to remove. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DeletePublicAccessBlockInput) SetAccountId(v string) *DeletePublicAccessBlockInput { + s.AccountId = &v + return s +} + +func (s *DeletePublicAccessBlockInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +type DescribeJobInput struct { + _ struct{} `locationName:"DescribeJobRequest" type:"structure"` + + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ID for the job whose information you want to retrieve. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"id" min:"5" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeJobInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 5 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *DescribeJobInput) SetAccountId(v string) *DescribeJobInput { + s.AccountId = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *DescribeJobInput) SetJobId(v string) *DescribeJobInput { + s.JobId = &v + return s +} + +func (s *DescribeJobInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type DescribeJobOutput struct { + _ struct{} `type:"structure"` + + // Contains the configuration parameters and status for the job specified in + // the Describe Job request. + Job *JobDescriptor `type:"structure"` +} + +// String returns the string representation +func (s DescribeJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeJobOutput) GoString() string { + return s.String() +} + +// SetJob sets the Job field's value. +func (s *DescribeJobOutput) SetJob(v *JobDescriptor) *DescribeJobOutput { + s.Job = v + return s +} + +type GetAccessPointInput struct { + _ struct{} `locationName:"GetAccessPointRequest" type:"structure"` + + // The account ID for the account that owns the specified access point. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the access point whose configuration information you want to + // retrieve. + // + // For Amazon S3 on Outposts specify the ARN of the access point accessed in + // the format arn:aws:s3-outposts:::outpost//accesspoint/. + // For example, to access the access point reports-ap through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + // The value must be URL encoded. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessPointInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessPointInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessPointInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessPointInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetAccessPointInput) SetAccountId(v string) *GetAccessPointInput { + s.AccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetAccessPointInput) SetName(v string) *GetAccessPointInput { + s.Name = &v + return s +} + +func (s *GetAccessPointInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *GetAccessPointInput) getEndpointARN() (arn.Resource, error) { + if s.Name == nil { + return nil, fmt.Errorf("member Name is nil") + } + return parseEndpointARN(*s.Name) +} + +func (s *GetAccessPointInput) hasEndpointARN() bool { + if s.Name == nil { + return false + } + return arn.IsARN(*s.Name) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetAccessPointInput) updateArnableField(v string) (interface{}, error) { + if s.Name == nil { + return nil, fmt.Errorf("member Name is nil") + } + s.Name = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s GetAccessPointInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type GetAccessPointOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket associated with the specified access point. + Bucket *string `min:"3" type:"string"` + + // The date and time when the specified access point was created. + CreationDate *time.Time `type:"timestamp"` + + // The name of the specified access point. + Name *string `min:"3" type:"string"` + + // Indicates whether this access point allows access from the public internet. + // If VpcConfiguration is specified for this access point, then NetworkOrigin + // is VPC, and the access point doesn't allow access from the public internet. + // Otherwise, NetworkOrigin is Internet, and the access point allows access + // from the public internet, subject to the access point and bucket access policies. + // + // This will always be true for an Amazon S3 on Outposts access point + NetworkOrigin *string `type:"string" enum:"NetworkOrigin"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // This is not supported for Amazon S3 on Outposts. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` + + // Contains the virtual private cloud (VPC) configuration for the specified + // access point. + VpcConfiguration *VpcConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetAccessPointOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessPointOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *GetAccessPointOutput) SetBucket(v string) *GetAccessPointOutput { + s.Bucket = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *GetAccessPointOutput) SetCreationDate(v time.Time) *GetAccessPointOutput { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetAccessPointOutput) SetName(v string) *GetAccessPointOutput { + s.Name = &v + return s +} + +// SetNetworkOrigin sets the NetworkOrigin field's value. +func (s *GetAccessPointOutput) SetNetworkOrigin(v string) *GetAccessPointOutput { + s.NetworkOrigin = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetAccessPointOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetAccessPointOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// SetVpcConfiguration sets the VpcConfiguration field's value. +func (s *GetAccessPointOutput) SetVpcConfiguration(v *VpcConfiguration) *GetAccessPointOutput { + s.VpcConfiguration = v + return s +} + +type GetAccessPointPolicyInput struct { + _ struct{} `locationName:"GetAccessPointPolicyRequest" type:"structure"` + + // The account ID for the account that owns the specified access point. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the access point whose policy you want to retrieve. + // + // For Amazon S3 on Outposts specify the ARN of the access point accessed in + // the format arn:aws:s3-outposts:::outpost//accesspoint/. + // For example, to access the access point reports-ap through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + // The value must be URL encoded. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessPointPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessPointPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessPointPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessPointPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetAccessPointPolicyInput) SetAccountId(v string) *GetAccessPointPolicyInput { + s.AccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetAccessPointPolicyInput) SetName(v string) *GetAccessPointPolicyInput { + s.Name = &v + return s +} + +func (s *GetAccessPointPolicyInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type GetAccessPointPolicyOutput struct { + _ struct{} `type:"structure"` + + // The access point policy associated with the specified access point. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetAccessPointPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessPointPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetAccessPointPolicyOutput) SetPolicy(v string) *GetAccessPointPolicyOutput { + s.Policy = &v + return s +} + +type GetAccessPointPolicyStatusInput struct { + _ struct{} `locationName:"GetAccessPointPolicyStatusRequest" type:"structure"` + + // The account ID for the account that owns the specified access point. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the access point whose policy status you want to retrieve. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessPointPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessPointPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessPointPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessPointPolicyStatusInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetAccessPointPolicyStatusInput) SetAccountId(v string) *GetAccessPointPolicyStatusInput { + s.AccountId = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetAccessPointPolicyStatusInput) SetName(v string) *GetAccessPointPolicyStatusInput { + s.Name = &v + return s +} + +func (s *GetAccessPointPolicyStatusInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type GetAccessPointPolicyStatusOutput struct { + _ struct{} `type:"structure"` + + // Indicates the current policy status of the specified access point. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation +func (s GetAccessPointPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessPointPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetAccessPointPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetAccessPointPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketInput struct { + _ struct{} `locationName:"GetBucketRequest" type:"structure"` + + // The AWS account ID of the Outposts bucket. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ARN of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetBucketInput) SetAccountId(v string) *GetBucketInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInput) SetBucket(v string) *GetBucketInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *GetBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s GetBucketInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The AWS account ID of the Outposts bucket. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetBucketLifecycleConfigurationInput) SetAccountId(v string) *GetBucketLifecycleConfigurationInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Container for the lifecycle rule of the Outposts bucket. + Rules []*LifecycleRule `locationNameList:"Rule" type:"list"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketOutput struct { + _ struct{} `type:"structure"` + + // The Outposts bucket requested. + Bucket *string `min:"3" type:"string"` + + // The creation date of the Outposts bucket. + CreationDate *time.Time `type:"timestamp"` + + PublicAccessBlockEnabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetBucketOutput) String() string { + return awsutil.Prettify(s) +} // GoString returns the string representation -func (s AccessPoint) GoString() string { +func (s GetBucketOutput) GoString() string { return s.String() } // SetBucket sets the Bucket field's value. -func (s *AccessPoint) SetBucket(v string) *AccessPoint { +func (s *GetBucketOutput) SetBucket(v string) *GetBucketOutput { s.Bucket = &v return s } -// SetName sets the Name field's value. -func (s *AccessPoint) SetName(v string) *AccessPoint { - s.Name = &v +// SetCreationDate sets the CreationDate field's value. +func (s *GetBucketOutput) SetCreationDate(v time.Time) *GetBucketOutput { + s.CreationDate = &v return s } -// SetNetworkOrigin sets the NetworkOrigin field's value. -func (s *AccessPoint) SetNetworkOrigin(v string) *AccessPoint { - s.NetworkOrigin = &v +// SetPublicAccessBlockEnabled sets the PublicAccessBlockEnabled field's value. +func (s *GetBucketOutput) SetPublicAccessBlockEnabled(v bool) *GetBucketOutput { + s.PublicAccessBlockEnabled = &v return s } -// SetVpcConfiguration sets the VpcConfiguration field's value. -func (s *AccessPoint) SetVpcConfiguration(v *VpcConfiguration) *AccessPoint { - s.VpcConfiguration = v +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // The AWS account ID of the Outposts bucket. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ARN of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetBucketPolicyInput) SetAccountId(v string) *GetBucketPolicyInput { + s.AccountId = &v return s } -type CreateAccessPointInput struct { - _ struct{} `locationName:"CreateAccessPointRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s GetBucketPolicyInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy of the Outposts bucket. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // The AWS account ID of the Outposts bucket. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ARN of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetBucketTaggingInput) SetAccountId(v string) *GetBucketTaggingInput { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s GetBucketTaggingInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // The tags set of the Outposts bucket. + // + // TagSet is a required field + TagSet []*S3Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*S3Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetJobTaggingInput struct { + _ struct{} `locationName:"GetJobTaggingRequest" type:"structure"` + + // The AWS account ID associated with the S3 Batch Operations job. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The ID for the S3 Batch Operations job whose tags you want to retrieve. + // + // JobId is a required field + JobId *string `location:"uri" locationName:"id" min:"5" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetJobTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetJobTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetJobTaggingInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 5 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *GetJobTaggingInput) SetAccountId(v string) *GetJobTaggingInput { + s.AccountId = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *GetJobTaggingInput) SetJobId(v string) *GetJobTaggingInput { + s.JobId = &v + return s +} + +func (s *GetJobTaggingInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +type GetJobTaggingOutput struct { + _ struct{} `type:"structure"` + + // The set of tags associated with the S3 Batch Operations job. + Tags []*S3Tag `type:"list"` +} + +// String returns the string representation +func (s GetJobTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetJobTaggingOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *GetJobTaggingOutput) SetTags(v []*S3Tag) *GetJobTaggingOutput { + s.Tags = v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` - // The AWS account ID for the owner of the bucket for which you want to create - // an access point. + // The account ID for the AWS account whose PublicAccessBlock configuration + // you want to retrieve. // // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - - // The name of the bucket that you want to associate this access point with. - // - // Bucket is a required field - Bucket *string `min:"3" type:"string" required:"true"` - - // The name you want to assign to this access point. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` - - // The PublicAccessBlock configuration that you want to apply to this Amazon - // S3 bucket. You can enable the configuration options in any combination. For - // more information about when Amazon S3 considers a bucket or object public, - // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon Simple Storage Service Developer Guide. - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` - - // If you include this field, Amazon S3 restricts access to this access point - // to requests from the specified Virtual Private Cloud (VPC). - VpcConfiguration *VpcConfiguration `type:"structure"` } // String returns the string representation -func (s CreateAccessPointInput) String() string { +func (s GetPublicAccessBlockInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateAccessPointInput) GoString() string { +func (s GetPublicAccessBlockInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *CreateAccessPointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateAccessPointInput"} +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) } - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Bucket != nil && len(*s.Bucket) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) - } - if s.VpcConfiguration != nil { - if err := s.VpcConfiguration.Validate(); err != nil { - invalidParams.AddNested("VpcConfiguration", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -1558,437 +6179,390 @@ func (s *CreateAccessPointInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *CreateAccessPointInput) SetAccountId(v string) *CreateAccessPointInput { +func (s *GetPublicAccessBlockInput) SetAccountId(v string) *GetPublicAccessBlockInput { s.AccountId = &v return s } -// SetBucket sets the Bucket field's value. -func (s *CreateAccessPointInput) SetBucket(v string) *CreateAccessPointInput { - s.Bucket = &v - return s -} - -// SetName sets the Name field's value. -func (s *CreateAccessPointInput) SetName(v string) *CreateAccessPointInput { - s.Name = &v - return s -} - -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *CreateAccessPointInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *CreateAccessPointInput { - s.PublicAccessBlockConfiguration = v - return s -} - -// SetVpcConfiguration sets the VpcConfiguration field's value. -func (s *CreateAccessPointInput) SetVpcConfiguration(v *VpcConfiguration) *CreateAccessPointInput { - s.VpcConfiguration = v - return s -} - -func (s *CreateAccessPointInput) hostLabels() map[string]string { +func (s *GetPublicAccessBlockInput) hostLabels() map[string]string { return map[string]string{ "AccountId": aws.StringValue(s.AccountId), } } -type CreateAccessPointOutput struct { - _ struct{} `type:"structure"` +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this AWS account. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` } // String returns the string representation -func (s CreateAccessPointOutput) String() string { +func (s GetPublicAccessBlockOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateAccessPointOutput) GoString() string { +func (s GetPublicAccessBlockOutput) GoString() string { return s.String() } -type CreateJobInput struct { - _ struct{} `locationName:"CreateJobRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` - - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} - // An idempotency token to ensure that you don't accidentally submit the same - // request twice. You can use any string up to the maximum length. - ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` +// A container element for the job configuration and status information returned +// by a Describe Job request. +type JobDescriptor struct { + _ struct{} `type:"structure"` - // Indicates whether confirmation is required before Amazon S3 runs the job. - // Confirmation is only required for jobs created through the Amazon S3 console. + // Indicates whether confirmation is required before Amazon S3 begins running + // the specified job. Confirmation is required only for jobs created through + // the Amazon S3 console. ConfirmationRequired *bool `type:"boolean"` - // A description for this job. You can use any string within the permitted length. - // Descriptions don't need to be unique and can be used for multiple jobs. + // A timestamp indicating when this job was created. + CreationTime *time.Time `type:"timestamp"` + + // The description for this job, if one was provided in this job's Create Job + // request. Description *string `min:"1" type:"string"` - // Configuration parameters for the manifest. - // - // Manifest is a required field - Manifest *JobManifest `type:"structure" required:"true"` + // If the specified job failed, this field contains information describing the + // failure. + FailureReasons []*JobFailure `type:"list"` - // The operation that you want this job to perform on each object listed in - // the manifest. For more information about the available operations, see Available - // Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-operations.html) - // in the Amazon Simple Storage Service Developer Guide. - // - // Operation is a required field - Operation *JobOperation `type:"structure" required:"true"` + // The Amazon Resource Name (ARN) for this job. + JobArn *string `min:"1" type:"string"` - // The numerical priority for this job. Higher numbers indicate higher priority. - // - // Priority is a required field - Priority *int64 `type:"integer" required:"true"` + // The ID for the specified job. + JobId *string `min:"5" type:"string"` - // Configuration parameters for the optional job-completion report. - // - // Report is a required field - Report *JobReport `type:"structure" required:"true"` + // The configuration information for the specified job's manifest object. + Manifest *JobManifest `type:"structure"` - // The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) - // Role that batch operations will use to execute this job's operation on each - // object in the manifest. - // - // RoleArn is a required field - RoleArn *string `min:"1" type:"string" required:"true"` + // The operation that the specified job is configured to run on the objects + // listed in the manifest. + Operation *JobOperation `type:"structure"` + + // The priority of the specified job. + Priority *int64 `type:"integer"` + + // Describes the total number of tasks that the specified job has run, the number + // of tasks that succeeded, and the number of tasks that failed. + ProgressSummary *JobProgressSummary `type:"structure"` + + // Contains the configuration information for the job-completion report if you + // requested one in the Create Job request. + Report *JobReport `type:"structure"` + + // The Amazon Resource Name (ARN) for the AWS Identity and Access Management + // (IAM) role assigned to run the tasks for this job. + RoleArn *string `min:"1" type:"string"` + + // The current status of the specified job. + Status *string `type:"string" enum:"JobStatus"` + + // The reason for updating the job. + StatusUpdateReason *string `min:"1" type:"string"` + + // The reason why the specified job was suspended. A job is only suspended if + // you create it through the Amazon S3 console. When you create the job, it + // enters the Suspended state to await confirmation before running. After you + // confirm the job, it automatically exits the Suspended state. + SuspendedCause *string `min:"1" type:"string"` + + // The timestamp when this job was suspended, if it has been suspended. + SuspendedDate *time.Time `type:"timestamp"` + + // A timestamp indicating when this job terminated. A job's termination date + // is the date and time when it succeeded, failed, or was canceled. + TerminationDate *time.Time `type:"timestamp"` } // String returns the string representation -func (s CreateJobInput) String() string { +func (s JobDescriptor) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateJobInput) GoString() string { +func (s JobDescriptor) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateJobInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) - } - if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) - } - if s.Description != nil && len(*s.Description) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Description", 1)) - } - if s.Manifest == nil { - invalidParams.Add(request.NewErrParamRequired("Manifest")) - } - if s.Operation == nil { - invalidParams.Add(request.NewErrParamRequired("Operation")) - } - if s.Priority == nil { - invalidParams.Add(request.NewErrParamRequired("Priority")) - } - if s.Report == nil { - invalidParams.Add(request.NewErrParamRequired("Report")) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) - } - if s.Manifest != nil { - if err := s.Manifest.Validate(); err != nil { - invalidParams.AddNested("Manifest", err.(request.ErrInvalidParams)) - } - } - if s.Operation != nil { - if err := s.Operation.Validate(); err != nil { - invalidParams.AddNested("Operation", err.(request.ErrInvalidParams)) - } - } - if s.Report != nil { - if err := s.Report.Validate(); err != nil { - invalidParams.AddNested("Report", err.(request.ErrInvalidParams)) - } - } +// SetConfirmationRequired sets the ConfirmationRequired field's value. +func (s *JobDescriptor) SetConfirmationRequired(v bool) *JobDescriptor { + s.ConfirmationRequired = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetCreationTime sets the CreationTime field's value. +func (s *JobDescriptor) SetCreationTime(v time.Time) *JobDescriptor { + s.CreationTime = &v + return s } -// SetAccountId sets the AccountId field's value. -func (s *CreateJobInput) SetAccountId(v string) *CreateJobInput { - s.AccountId = &v +// SetDescription sets the Description field's value. +func (s *JobDescriptor) SetDescription(v string) *JobDescriptor { + s.Description = &v return s } -// SetClientRequestToken sets the ClientRequestToken field's value. -func (s *CreateJobInput) SetClientRequestToken(v string) *CreateJobInput { - s.ClientRequestToken = &v +// SetFailureReasons sets the FailureReasons field's value. +func (s *JobDescriptor) SetFailureReasons(v []*JobFailure) *JobDescriptor { + s.FailureReasons = v return s } -// SetConfirmationRequired sets the ConfirmationRequired field's value. -func (s *CreateJobInput) SetConfirmationRequired(v bool) *CreateJobInput { - s.ConfirmationRequired = &v +// SetJobArn sets the JobArn field's value. +func (s *JobDescriptor) SetJobArn(v string) *JobDescriptor { + s.JobArn = &v return s } -// SetDescription sets the Description field's value. -func (s *CreateJobInput) SetDescription(v string) *CreateJobInput { - s.Description = &v +// SetJobId sets the JobId field's value. +func (s *JobDescriptor) SetJobId(v string) *JobDescriptor { + s.JobId = &v return s } // SetManifest sets the Manifest field's value. -func (s *CreateJobInput) SetManifest(v *JobManifest) *CreateJobInput { +func (s *JobDescriptor) SetManifest(v *JobManifest) *JobDescriptor { s.Manifest = v return s } // SetOperation sets the Operation field's value. -func (s *CreateJobInput) SetOperation(v *JobOperation) *CreateJobInput { +func (s *JobDescriptor) SetOperation(v *JobOperation) *JobDescriptor { s.Operation = v return s } // SetPriority sets the Priority field's value. -func (s *CreateJobInput) SetPriority(v int64) *CreateJobInput { +func (s *JobDescriptor) SetPriority(v int64) *JobDescriptor { s.Priority = &v return s } +// SetProgressSummary sets the ProgressSummary field's value. +func (s *JobDescriptor) SetProgressSummary(v *JobProgressSummary) *JobDescriptor { + s.ProgressSummary = v + return s +} + // SetReport sets the Report field's value. -func (s *CreateJobInput) SetReport(v *JobReport) *CreateJobInput { +func (s *JobDescriptor) SetReport(v *JobReport) *JobDescriptor { s.Report = v return s } // SetRoleArn sets the RoleArn field's value. -func (s *CreateJobInput) SetRoleArn(v string) *CreateJobInput { +func (s *JobDescriptor) SetRoleArn(v string) *JobDescriptor { s.RoleArn = &v return s } -func (s *CreateJobInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetStatus sets the Status field's value. +func (s *JobDescriptor) SetStatus(v string) *JobDescriptor { + s.Status = &v + return s } -type CreateJobOutput struct { - _ struct{} `type:"structure"` - - // The ID for this job. Amazon S3 generates this ID automatically and returns - // it after a successful Create Job request. - JobId *string `min:"5" type:"string"` +// SetStatusUpdateReason sets the StatusUpdateReason field's value. +func (s *JobDescriptor) SetStatusUpdateReason(v string) *JobDescriptor { + s.StatusUpdateReason = &v + return s } -// String returns the string representation -func (s CreateJobOutput) String() string { - return awsutil.Prettify(s) +// SetSuspendedCause sets the SuspendedCause field's value. +func (s *JobDescriptor) SetSuspendedCause(v string) *JobDescriptor { + s.SuspendedCause = &v + return s } -// GoString returns the string representation -func (s CreateJobOutput) GoString() string { - return s.String() +// SetSuspendedDate sets the SuspendedDate field's value. +func (s *JobDescriptor) SetSuspendedDate(v time.Time) *JobDescriptor { + s.SuspendedDate = &v + return s } -// SetJobId sets the JobId field's value. -func (s *CreateJobOutput) SetJobId(v string) *CreateJobOutput { - s.JobId = &v +// SetTerminationDate sets the TerminationDate field's value. +func (s *JobDescriptor) SetTerminationDate(v time.Time) *JobDescriptor { + s.TerminationDate = &v return s } -type DeleteAccessPointInput struct { - _ struct{} `locationName:"DeleteAccessPointRequest" type:"structure"` +// If this job failed, this element indicates why the job failed. +type JobFailure struct { + _ struct{} `type:"structure"` - // The account ID for the account that owns the specified access point. - // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + // The failure code, if any, for the specified job. + FailureCode *string `min:"1" type:"string"` - // The name of the access point you want to delete. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + // The failure reason, if any, for the specified job. + FailureReason *string `min:"1" type:"string"` } // String returns the string representation -func (s DeleteAccessPointInput) String() string { +func (s JobFailure) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAccessPointInput) GoString() string { +func (s JobFailure) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAccessPointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAccessPointInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAccountId sets the AccountId field's value. -func (s *DeleteAccessPointInput) SetAccountId(v string) *DeleteAccessPointInput { - s.AccountId = &v +// SetFailureCode sets the FailureCode field's value. +func (s *JobFailure) SetFailureCode(v string) *JobFailure { + s.FailureCode = &v return s } -// SetName sets the Name field's value. -func (s *DeleteAccessPointInput) SetName(v string) *DeleteAccessPointInput { - s.Name = &v +// SetFailureReason sets the FailureReason field's value. +func (s *JobFailure) SetFailureReason(v string) *JobFailure { + s.FailureReason = &v return s } -func (s *DeleteAccessPointInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } -} - -type DeleteAccessPointOutput struct { +// Contains the configuration and status information for a single job retrieved +// as part of a job list. +type JobListDescriptor struct { _ struct{} `type:"structure"` -} -// String returns the string representation -func (s DeleteAccessPointOutput) String() string { - return awsutil.Prettify(s) -} + // A timestamp indicating when the specified job was created. + CreationTime *time.Time `type:"timestamp"` -// GoString returns the string representation -func (s DeleteAccessPointOutput) GoString() string { - return s.String() -} + // The user-specified description that was included in the specified job's Create + // Job request. + Description *string `min:"1" type:"string"` -type DeleteAccessPointPolicyInput struct { - _ struct{} `locationName:"DeleteAccessPointPolicyRequest" type:"structure"` + // The ID for the specified job. + JobId *string `min:"5" type:"string"` - // The account ID for the account that owns the specified access point. - // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + // The operation that the specified job is configured to run on each object + // listed in the manifest. + Operation *string `type:"string" enum:"OperationName"` - // The name of the access point whose policy you want to delete. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + // The current priority for the specified job. + Priority *int64 `type:"integer"` + + // Describes the total number of tasks that the specified job has run, the number + // of tasks that succeeded, and the number of tasks that failed. + ProgressSummary *JobProgressSummary `type:"structure"` + + // The specified job's current status. + Status *string `type:"string" enum:"JobStatus"` + + // A timestamp indicating when the specified job terminated. A job's termination + // date is the date and time when it succeeded, failed, or was canceled. + TerminationDate *time.Time `type:"timestamp"` } // String returns the string representation -func (s DeleteAccessPointPolicyInput) String() string { +func (s JobListDescriptor) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteAccessPointPolicyInput) GoString() string { +func (s JobListDescriptor) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteAccessPointPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteAccessPointPolicyInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) - } +// SetCreationTime sets the CreationTime field's value. +func (s *JobListDescriptor) SetCreationTime(v time.Time) *JobListDescriptor { + s.CreationTime = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDescription sets the Description field's value. +func (s *JobListDescriptor) SetDescription(v string) *JobListDescriptor { + s.Description = &v + return s } -// SetAccountId sets the AccountId field's value. -func (s *DeleteAccessPointPolicyInput) SetAccountId(v string) *DeleteAccessPointPolicyInput { - s.AccountId = &v +// SetJobId sets the JobId field's value. +func (s *JobListDescriptor) SetJobId(v string) *JobListDescriptor { + s.JobId = &v return s } -// SetName sets the Name field's value. -func (s *DeleteAccessPointPolicyInput) SetName(v string) *DeleteAccessPointPolicyInput { - s.Name = &v +// SetOperation sets the Operation field's value. +func (s *JobListDescriptor) SetOperation(v string) *JobListDescriptor { + s.Operation = &v return s } -func (s *DeleteAccessPointPolicyInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetPriority sets the Priority field's value. +func (s *JobListDescriptor) SetPriority(v int64) *JobListDescriptor { + s.Priority = &v + return s } -type DeleteAccessPointPolicyOutput struct { - _ struct{} `type:"structure"` +// SetProgressSummary sets the ProgressSummary field's value. +func (s *JobListDescriptor) SetProgressSummary(v *JobProgressSummary) *JobListDescriptor { + s.ProgressSummary = v + return s } -// String returns the string representation -func (s DeleteAccessPointPolicyOutput) String() string { - return awsutil.Prettify(s) +// SetStatus sets the Status field's value. +func (s *JobListDescriptor) SetStatus(v string) *JobListDescriptor { + s.Status = &v + return s } -// GoString returns the string representation -func (s DeleteAccessPointPolicyOutput) GoString() string { - return s.String() +// SetTerminationDate sets the TerminationDate field's value. +func (s *JobListDescriptor) SetTerminationDate(v time.Time) *JobListDescriptor { + s.TerminationDate = &v + return s } -type DeletePublicAccessBlockInput struct { - _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` +// Contains the configuration information for a job's manifest. +type JobManifest struct { + _ struct{} `type:"structure"` - // The account ID for the Amazon Web Services account whose PublicAccessBlock - // configuration you want to remove. + // Contains the information required to locate the specified job's manifest. // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + // Location is a required field + Location *JobManifestLocation `type:"structure" required:"true"` + + // Describes the format of the specified job's manifest. If the manifest is + // in CSV format, also describes the columns contained within the manifest. + // + // Spec is a required field + Spec *JobManifestSpec `type:"structure" required:"true"` } // String returns the string representation -func (s DeletePublicAccessBlockInput) String() string { +func (s JobManifest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeletePublicAccessBlockInput) GoString() string { +func (s JobManifest) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeletePublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) +func (s *JobManifest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobManifest"} + if s.Location == nil { + invalidParams.Add(request.NewErrParamRequired("Location")) } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + if s.Spec == nil { + invalidParams.Add(request.NewErrParamRequired("Spec")) + } + if s.Location != nil { + if err := s.Location.Validate(); err != nil { + invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) + } + } + if s.Spec != nil { + if err := s.Spec.Validate(); err != nil { + invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -1997,68 +6571,63 @@ func (s *DeletePublicAccessBlockInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *DeletePublicAccessBlockInput) SetAccountId(v string) *DeletePublicAccessBlockInput { - s.AccountId = &v +// SetLocation sets the Location field's value. +func (s *JobManifest) SetLocation(v *JobManifestLocation) *JobManifest { + s.Location = v return s } -func (s *DeletePublicAccessBlockInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetSpec sets the Spec field's value. +func (s *JobManifest) SetSpec(v *JobManifestSpec) *JobManifest { + s.Spec = v + return s } -type DeletePublicAccessBlockOutput struct { +// Contains the information required to locate a manifest object. +type JobManifestLocation struct { _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeletePublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletePublicAccessBlockOutput) GoString() string { - return s.String() -} - -type DescribeJobInput struct { - _ struct{} `locationName:"DescribeJobRequest" type:"structure"` - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + // The ETag for the specified manifest object. + // + // ETag is a required field + ETag *string `min:"1" type:"string" required:"true"` - // The ID for the job whose information you want to retrieve. + // The Amazon Resource Name (ARN) for a manifest object. // - // JobId is a required field - JobId *string `location:"uri" locationName:"id" min:"5" type:"string" required:"true"` + // ObjectArn is a required field + ObjectArn *string `min:"1" type:"string" required:"true"` + + // The optional version ID to identify a specific version of the manifest object. + ObjectVersionId *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeJobInput) String() string { +func (s JobManifestLocation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeJobInput) GoString() string { +func (s JobManifestLocation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeJobInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeJobInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) +func (s *JobManifestLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobManifestLocation"} + if s.ETag == nil { + invalidParams.Add(request.NewErrParamRequired("ETag")) } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + if s.ETag != nil && len(*s.ETag) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ETag", 1)) } - if s.JobId == nil { - invalidParams.Add(request.NewErrParamRequired("JobId")) + if s.ObjectArn == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectArn")) } - if s.JobId != nil && len(*s.JobId) < 5 { - invalidParams.Add(request.NewErrParamMinLen("JobId", 5)) + if s.ObjectArn != nil && len(*s.ObjectArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ObjectArn", 1)) + } + if s.ObjectVersionId != nil && len(*s.ObjectVersionId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ObjectVersionId", 1)) } if invalidParams.Len() > 0 { @@ -2067,87 +6636,158 @@ func (s *DescribeJobInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *DescribeJobInput) SetAccountId(v string) *DescribeJobInput { - s.AccountId = &v +// SetETag sets the ETag field's value. +func (s *JobManifestLocation) SetETag(v string) *JobManifestLocation { + s.ETag = &v return s } -// SetJobId sets the JobId field's value. -func (s *DescribeJobInput) SetJobId(v string) *DescribeJobInput { - s.JobId = &v +// SetObjectArn sets the ObjectArn field's value. +func (s *JobManifestLocation) SetObjectArn(v string) *JobManifestLocation { + s.ObjectArn = &v return s } -func (s *DescribeJobInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetObjectVersionId sets the ObjectVersionId field's value. +func (s *JobManifestLocation) SetObjectVersionId(v string) *JobManifestLocation { + s.ObjectVersionId = &v + return s } -type DescribeJobOutput struct { +// Describes the format of a manifest. If the manifest is in CSV format, also +// describes the columns contained within the manifest. +type JobManifestSpec struct { _ struct{} `type:"structure"` - // Contains the configuration parameters and status for the job specified in - // the Describe Job request. - Job *JobDescriptor `type:"structure"` + // If the specified manifest object is in the S3BatchOperations_CSV_20180820 + // format, this element describes which columns contain the required data. + Fields []*string `type:"list"` + + // Indicates which of the available formats the specified manifest uses. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"JobManifestFormat"` } // String returns the string representation -func (s DescribeJobOutput) String() string { +func (s JobManifestSpec) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeJobOutput) GoString() string { +func (s JobManifestSpec) GoString() string { return s.String() } -// SetJob sets the Job field's value. -func (s *DescribeJobOutput) SetJob(v *JobDescriptor) *DescribeJobOutput { - s.Job = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobManifestSpec) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobManifestSpec"} + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFields sets the Fields field's value. +func (s *JobManifestSpec) SetFields(v []*string) *JobManifestSpec { + s.Fields = v return s } -type GetAccessPointInput struct { - _ struct{} `locationName:"GetAccessPointRequest" type:"structure"` +// SetFormat sets the Format field's value. +func (s *JobManifestSpec) SetFormat(v string) *JobManifestSpec { + s.Format = &v + return s +} - // The account ID for the account that owns the specified access point. - // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` +// The operation that you want this job to perform on each object listed in +// the manifest. For more information about the available operations, see Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-operations.html) +// in the Amazon Simple Storage Service Developer Guide. +type JobOperation struct { + _ struct{} `type:"structure"` - // The name of the access point whose configuration information you want to - // retrieve. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + // Directs the specified job to invoke an AWS Lambda function on each object + // in the manifest. + LambdaInvoke *LambdaInvokeOperation `type:"structure"` + + // Directs the specified job to run an Initiate Glacier Restore call on each + // object in the manifest. + S3InitiateRestoreObject *S3InitiateRestoreObjectOperation `type:"structure"` + + // Directs the specified job to run a PUT Object acl call on each object in + // the manifest. + S3PutObjectAcl *S3SetObjectAclOperation `type:"structure"` + + // Directs the specified job to run a PUT Copy object call on each object in + // the manifest. + S3PutObjectCopy *S3CopyObjectOperation `type:"structure"` + + // Contains the configuration for an S3 Object Lock legal hold operation that + // an S3 Batch Operations job passes each object through to the underlying PutObjectLegalHold + // API. For more information, see Using S3 Object Lock legal hold with S3 Batch + // Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-legal-hold.html) + // in the Amazon Simple Storage Service Developer Guide. + S3PutObjectLegalHold *S3SetObjectLegalHoldOperation `type:"structure"` + + // Contains the configuration parameters for the Object Lock retention action + // for an S3 Batch Operations job. Batch Operations passes each value through + // to the underlying PutObjectRetention API. For more information, see Using + // S3 Object Lock retention with S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-retention-date.html) + // in the Amazon Simple Storage Service Developer Guide. + S3PutObjectRetention *S3SetObjectRetentionOperation `type:"structure"` + + // Directs the specified job to run a PUT Object tagging call on each object + // in the manifest. + S3PutObjectTagging *S3SetObjectTaggingOperation `type:"structure"` } // String returns the string representation -func (s GetAccessPointInput) String() string { +func (s JobOperation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAccessPointInput) GoString() string { +func (s JobOperation) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessPointInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessPointInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) +func (s *JobOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobOperation"} + if s.LambdaInvoke != nil { + if err := s.LambdaInvoke.Validate(); err != nil { + invalidParams.AddNested("LambdaInvoke", err.(request.ErrInvalidParams)) + } } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + if s.S3PutObjectAcl != nil { + if err := s.S3PutObjectAcl.Validate(); err != nil { + invalidParams.AddNested("S3PutObjectAcl", err.(request.ErrInvalidParams)) + } } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.S3PutObjectCopy != nil { + if err := s.S3PutObjectCopy.Validate(); err != nil { + invalidParams.AddNested("S3PutObjectCopy", err.(request.ErrInvalidParams)) + } } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + if s.S3PutObjectLegalHold != nil { + if err := s.S3PutObjectLegalHold.Validate(); err != nil { + invalidParams.AddNested("S3PutObjectLegalHold", err.(request.ErrInvalidParams)) + } + } + if s.S3PutObjectRetention != nil { + if err := s.S3PutObjectRetention.Validate(); err != nil { + invalidParams.AddNested("S3PutObjectRetention", err.(request.ErrInvalidParams)) + } + } + if s.S3PutObjectTagging != nil { + if err := s.S3PutObjectTagging.Validate(); err != nil { + invalidParams.AddNested("S3PutObjectTagging", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -2156,139 +6796,134 @@ func (s *GetAccessPointInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *GetAccessPointInput) SetAccountId(v string) *GetAccessPointInput { - s.AccountId = &v +// SetLambdaInvoke sets the LambdaInvoke field's value. +func (s *JobOperation) SetLambdaInvoke(v *LambdaInvokeOperation) *JobOperation { + s.LambdaInvoke = v + return s +} + +// SetS3InitiateRestoreObject sets the S3InitiateRestoreObject field's value. +func (s *JobOperation) SetS3InitiateRestoreObject(v *S3InitiateRestoreObjectOperation) *JobOperation { + s.S3InitiateRestoreObject = v + return s +} + +// SetS3PutObjectAcl sets the S3PutObjectAcl field's value. +func (s *JobOperation) SetS3PutObjectAcl(v *S3SetObjectAclOperation) *JobOperation { + s.S3PutObjectAcl = v + return s +} + +// SetS3PutObjectCopy sets the S3PutObjectCopy field's value. +func (s *JobOperation) SetS3PutObjectCopy(v *S3CopyObjectOperation) *JobOperation { + s.S3PutObjectCopy = v + return s +} + +// SetS3PutObjectLegalHold sets the S3PutObjectLegalHold field's value. +func (s *JobOperation) SetS3PutObjectLegalHold(v *S3SetObjectLegalHoldOperation) *JobOperation { + s.S3PutObjectLegalHold = v return s } -// SetName sets the Name field's value. -func (s *GetAccessPointInput) SetName(v string) *GetAccessPointInput { - s.Name = &v +// SetS3PutObjectRetention sets the S3PutObjectRetention field's value. +func (s *JobOperation) SetS3PutObjectRetention(v *S3SetObjectRetentionOperation) *JobOperation { + s.S3PutObjectRetention = v return s } -func (s *GetAccessPointInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetS3PutObjectTagging sets the S3PutObjectTagging field's value. +func (s *JobOperation) SetS3PutObjectTagging(v *S3SetObjectTaggingOperation) *JobOperation { + s.S3PutObjectTagging = v + return s } -type GetAccessPointOutput struct { +// Describes the total number of tasks that the specified job has started, the +// number of tasks that succeeded, and the number of tasks that failed. +type JobProgressSummary struct { _ struct{} `type:"structure"` - // The name of the bucket associated with the specified access point. - Bucket *string `min:"3" type:"string"` - - // The date and time when the specified access point was created. - CreationDate *time.Time `type:"timestamp"` - - // The name of the specified access point. - Name *string `min:"3" type:"string"` - - // Indicates whether this access point allows access from the public Internet. - // If VpcConfiguration is specified for this access point, then NetworkOrigin - // is VPC, and the access point doesn't allow access from the public Internet. - // Otherwise, NetworkOrigin is Internet, and the access point allows access - // from the public Internet, subject to the access point and bucket access policies. - NetworkOrigin *string `type:"string" enum:"NetworkOrigin"` + NumberOfTasksFailed *int64 `type:"long"` - // The PublicAccessBlock configuration that you want to apply to this Amazon - // S3 bucket. You can enable the configuration options in any combination. For - // more information about when Amazon S3 considers a bucket or object public, - // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) - // in the Amazon Simple Storage Service Developer Guide. - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` + NumberOfTasksSucceeded *int64 `type:"long"` - // Contains the Virtual Private Cloud (VPC) configuration for the specified - // access point. - VpcConfiguration *VpcConfiguration `type:"structure"` + TotalNumberOfTasks *int64 `type:"long"` } // String returns the string representation -func (s GetAccessPointOutput) String() string { +func (s JobProgressSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAccessPointOutput) GoString() string { +func (s JobProgressSummary) GoString() string { return s.String() } -// SetBucket sets the Bucket field's value. -func (s *GetAccessPointOutput) SetBucket(v string) *GetAccessPointOutput { - s.Bucket = &v +// SetNumberOfTasksFailed sets the NumberOfTasksFailed field's value. +func (s *JobProgressSummary) SetNumberOfTasksFailed(v int64) *JobProgressSummary { + s.NumberOfTasksFailed = &v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *GetAccessPointOutput) SetCreationDate(v time.Time) *GetAccessPointOutput { - s.CreationDate = &v +// SetNumberOfTasksSucceeded sets the NumberOfTasksSucceeded field's value. +func (s *JobProgressSummary) SetNumberOfTasksSucceeded(v int64) *JobProgressSummary { + s.NumberOfTasksSucceeded = &v return s } -// SetName sets the Name field's value. -func (s *GetAccessPointOutput) SetName(v string) *GetAccessPointOutput { - s.Name = &v +// SetTotalNumberOfTasks sets the TotalNumberOfTasks field's value. +func (s *JobProgressSummary) SetTotalNumberOfTasks(v int64) *JobProgressSummary { + s.TotalNumberOfTasks = &v return s } -// SetNetworkOrigin sets the NetworkOrigin field's value. -func (s *GetAccessPointOutput) SetNetworkOrigin(v string) *GetAccessPointOutput { - s.NetworkOrigin = &v - return s -} +// Contains the configuration parameters for a job-completion report. +type JobReport struct { + _ struct{} `type:"structure"` -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *GetAccessPointOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetAccessPointOutput { - s.PublicAccessBlockConfiguration = v - return s -} + // The Amazon Resource Name (ARN) for the bucket where specified job-completion + // report will be stored. + Bucket *string `min:"1" type:"string"` -// SetVpcConfiguration sets the VpcConfiguration field's value. -func (s *GetAccessPointOutput) SetVpcConfiguration(v *VpcConfiguration) *GetAccessPointOutput { - s.VpcConfiguration = v - return s -} + // Indicates whether the specified job will generate a job-completion report. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` -type GetAccessPointPolicyInput struct { - _ struct{} `locationName:"GetAccessPointPolicyRequest" type:"structure"` + // The format of the specified job-completion report. + Format *string `type:"string" enum:"JobReportFormat"` - // The account ID for the account that owns the specified access point. - // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + // An optional prefix to describe where in the specified bucket the job-completion + // report will be stored. Amazon S3 stores the job-completion report at /job-/report.json. + Prefix *string `min:"1" type:"string"` - // The name of the access point whose policy you want to retrieve. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + // Indicates whether the job-completion report will include details of all tasks + // or only failed tasks. + ReportScope *string `type:"string" enum:"JobReportScope"` } // String returns the string representation -func (s GetAccessPointPolicyInput) String() string { +func (s JobReport) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAccessPointPolicyInput) GoString() string { +func (s JobReport) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessPointPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessPointPolicyInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) +func (s *JobReport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobReport"} + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) } if invalidParams.Len() > 0 { @@ -2297,85 +6932,104 @@ func (s *GetAccessPointPolicyInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *GetAccessPointPolicyInput) SetAccountId(v string) *GetAccessPointPolicyInput { - s.AccountId = &v +// SetBucket sets the Bucket field's value. +func (s *JobReport) SetBucket(v string) *JobReport { + s.Bucket = &v return s } -// SetName sets the Name field's value. -func (s *GetAccessPointPolicyInput) SetName(v string) *GetAccessPointPolicyInput { - s.Name = &v +// SetEnabled sets the Enabled field's value. +func (s *JobReport) SetEnabled(v bool) *JobReport { + s.Enabled = &v return s } -func (s *GetAccessPointPolicyInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetFormat sets the Format field's value. +func (s *JobReport) SetFormat(v string) *JobReport { + s.Format = &v + return s } -type GetAccessPointPolicyOutput struct { +// SetPrefix sets the Prefix field's value. +func (s *JobReport) SetPrefix(v string) *JobReport { + s.Prefix = &v + return s +} + +// SetReportScope sets the ReportScope field's value. +func (s *JobReport) SetReportScope(v string) *JobReport { + s.ReportScope = &v + return s +} + +// Contains the configuration parameters for a Lambda Invoke operation. +type LambdaInvokeOperation struct { _ struct{} `type:"structure"` - // The access point policy associated with the specified access point. - Policy *string `type:"string"` + // The Amazon Resource Name (ARN) for the AWS Lambda function that the specified + // job will invoke for each object in the manifest. + FunctionArn *string `min:"1" type:"string"` } // String returns the string representation -func (s GetAccessPointPolicyOutput) String() string { +func (s LambdaInvokeOperation) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAccessPointPolicyOutput) GoString() string { +func (s LambdaInvokeOperation) GoString() string { return s.String() } -// SetPolicy sets the Policy field's value. -func (s *GetAccessPointPolicyOutput) SetPolicy(v string) *GetAccessPointPolicyOutput { - s.Policy = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaInvokeOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaInvokeOperation"} + if s.FunctionArn != nil && len(*s.FunctionArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FunctionArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -type GetAccessPointPolicyStatusInput struct { - _ struct{} `locationName:"GetAccessPointPolicyStatusRequest" type:"structure"` +// SetFunctionArn sets the FunctionArn field's value. +func (s *LambdaInvokeOperation) SetFunctionArn(v string) *LambdaInvokeOperation { + s.FunctionArn = &v + return s +} - // The account ID for the account that owns the specified access point. - // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` +// The container for the Outposts bucket lifecycle configuration. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` - // The name of the access point whose policy status you want to retrieve. - // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + // A lifecycle rule for individual objects in an Outposts bucket. + Rules []*LifecycleRule `locationNameList:"Rule" type:"list"` } // String returns the string representation -func (s GetAccessPointPolicyStatusInput) String() string { +func (s LifecycleConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAccessPointPolicyStatusInput) GoString() string { +func (s LifecycleConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetAccessPointPolicyStatusInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetAccessPointPolicyStatusInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) - } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -2384,75 +7038,126 @@ func (s *GetAccessPointPolicyStatusInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *GetAccessPointPolicyStatusInput) SetAccountId(v string) *GetAccessPointPolicyStatusInput { - s.AccountId = &v +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*LifecycleRule) *LifecycleConfiguration { + s.Rules = v return s } -// SetName sets the Name field's value. -func (s *GetAccessPointPolicyStatusInput) SetName(v string) *GetAccessPointPolicyStatusInput { - s.Name = &v - return s -} +// The container of the Outposts bucket lifecycle expiration. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` -func (s *GetAccessPointPolicyStatusInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } -} + // Indicates at what date the object is to be deleted. Should be in GMT ISO + // 8601 format. + Date *time.Time `type:"timestamp"` -type GetAccessPointPolicyStatusOutput struct { - _ struct{} `type:"structure"` + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` - // Indicates the current policy status of the specified access point. - PolicyStatus *PolicyStatus `type:"structure"` + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired. If set to false, + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` } // String returns the string representation -func (s GetAccessPointPolicyStatusOutput) String() string { +func (s LifecycleExpiration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetAccessPointPolicyStatusOutput) GoString() string { +func (s LifecycleExpiration) GoString() string { return s.String() } -// SetPolicyStatus sets the PolicyStatus field's value. -func (s *GetAccessPointPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetAccessPointPolicyStatusOutput { - s.PolicyStatus = v - return s -} +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// The container for the Outposts bucket lifecycle rule. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 waits before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The container for the filter of lifecycle rule. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // The noncurrent version expiration of the lifecycle rule. + // + // This is not supported by Amazon S3 on Outposts buckets. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + // + // This is not supported by Amazon S3 on Outposts buckets. + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationNameList:"NoncurrentVersionTransition" type:"list"` -type GetPublicAccessBlockInput struct { - _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - // The account ID for the Amazon Web Services account whose PublicAccessBlock - // configuration you want to retrieve. + // Specifies when an Amazon S3 object transitions to a specified storage class. // - // AccountId is a required field - AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + // This is not supported by Amazon S3 on Outposts buckets. + Transitions []*Transition `locationNameList:"Transition" type:"list"` } // String returns the string representation -func (s GetPublicAccessBlockInput) String() string { +func (s LifecycleRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetPublicAccessBlockInput) GoString() string { +func (s LifecycleRule) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetPublicAccessBlockInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} - if s.AccountId == nil { - invalidParams.Add(request.NewErrParamRequired("AccountId")) - } - if s.AccountId != nil && len(*s.AccountId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -2461,456 +7166,493 @@ func (s *GetPublicAccessBlockInput) Validate() error { return nil } -// SetAccountId sets the AccountId field's value. -func (s *GetPublicAccessBlockInput) SetAccountId(v string) *GetPublicAccessBlockInput { - s.AccountId = &v +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v return s } -func (s *GetPublicAccessBlockInput) hostLabels() map[string]string { - return map[string]string{ - "AccountId": aws.StringValue(s.AccountId), - } +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s } -type GetPublicAccessBlockOutput struct { - _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` - - // The PublicAccessBlock configuration currently in effect for this Amazon Web - // Services account. - PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s } -// String returns the string representation -func (s GetPublicAccessBlockOutput) String() string { - return awsutil.Prettify(s) +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s } -// GoString returns the string representation -func (s GetPublicAccessBlockOutput) GoString() string { - return s.String() +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s } -// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. -func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { - s.PublicAccessBlockConfiguration = v +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v return s } -// A container element for the job configuration and status information returned -// by a Describe Job request. -type JobDescriptor struct { - _ struct{} `type:"structure"` - - // Indicates whether confirmation is required before Amazon S3 begins running - // the specified job. Confirmation is required only for jobs created through - // the Amazon S3 console. - ConfirmationRequired *bool `type:"boolean"` - - // A timestamp indicating when this job was created. - CreationTime *time.Time `type:"timestamp"` - - // The description for this job, if one was provided in this job's Create Job - // request. - Description *string `min:"1" type:"string"` +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} - // If the specified job failed, this field contains information describing the - // failure. - FailureReasons []*JobFailure `type:"list"` +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} - // The Amazon Resource Name (ARN) for this job. - JobArn *string `min:"1" type:"string"` +// The container for the Outposts bucket lifecycle rule and operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` - // The ID for the specified job. - JobId *string `min:"5" type:"string"` + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` - // The configuration information for the specified job's manifest object. - Manifest *JobManifest `type:"structure"` + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*S3Tag `type:"list"` +} - // The operation that the specified job is configured to execute on the objects - // listed in the manifest. - Operation *JobOperation `type:"structure"` +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} - // The priority of the specified job. - Priority *int64 `type:"integer"` +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} - // Describes the total number of tasks that the specified job has executed, - // the number of tasks that succeeded, and the number of tasks that failed. - ProgressSummary *JobProgressSummary `type:"structure"` +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } - // Contains the configuration information for the job-completion report if you - // requested one in the Create Job request. - Report *JobReport `type:"structure"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} - // The Amazon Resource Name (ARN) for the Identity and Access Management (IAM) - // Role assigned to execute the tasks for this job. - RoleArn *string `min:"1" type:"string"` +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} - // The current status of the specified job. - Status *string `type:"string" enum:"JobStatus"` +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*S3Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} - StatusUpdateReason *string `min:"1" type:"string"` +// The container for the filter of the lifecycle rule. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` - // The reason why the specified job was suspended. A job is only suspended if - // you create it through the Amazon S3 console. When you create the job, it - // enters the Suspended state to await confirmation before running. After you - // confirm the job, it automatically exits the Suspended state. - SuspendedCause *string `min:"1" type:"string"` + // The container for the AND condition for the lifecycle rule. + And *LifecycleRuleAndOperator `type:"structure"` - // The timestamp when this job was suspended, if it has been suspended. - SuspendedDate *time.Time `type:"timestamp"` + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` - // A timestamp indicating when this job terminated. A job's termination date - // is the date and time when it succeeded, failed, or was canceled. - TerminationDate *time.Time `type:"timestamp"` + Tag *S3Tag `type:"structure"` } // String returns the string representation -func (s JobDescriptor) String() string { +func (s LifecycleRuleFilter) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobDescriptor) GoString() string { +func (s LifecycleRuleFilter) GoString() string { return s.String() } -// SetConfirmationRequired sets the ConfirmationRequired field's value. -func (s *JobDescriptor) SetConfirmationRequired(v bool) *JobDescriptor { - s.ConfirmationRequired = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } -// SetCreationTime sets the CreationTime field's value. -func (s *JobDescriptor) SetCreationTime(v time.Time) *JobDescriptor { - s.CreationTime = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetDescription sets the Description field's value. -func (s *JobDescriptor) SetDescription(v string) *JobDescriptor { - s.Description = &v +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v return s } -// SetFailureReasons sets the FailureReasons field's value. -func (s *JobDescriptor) SetFailureReasons(v []*JobFailure) *JobDescriptor { - s.FailureReasons = v +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v return s } -// SetJobArn sets the JobArn field's value. -func (s *JobDescriptor) SetJobArn(v string) *JobDescriptor { - s.JobArn = &v +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *S3Tag) *LifecycleRuleFilter { + s.Tag = v return s } -// SetJobId sets the JobId field's value. -func (s *JobDescriptor) SetJobId(v string) *JobDescriptor { - s.JobId = &v - return s +type ListAccessPointsInput struct { + _ struct{} `locationName:"ListAccessPointsRequest" type:"structure"` + + // The AWS account ID for owner of the bucket whose access points you want to + // list. + // + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` + + // The name of the bucket whose associated access points you want to list. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + Bucket *string `location:"querystring" locationName:"bucket" min:"3" type:"string"` + + // The maximum number of access points that you want to include in the list. + // If the specified bucket has more than this number of access points, then + // the response will include a continuation token in the NextToken field that + // you can use to retrieve the next page of access points. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` + + // A continuation token. If a previous call to ListAccessPoints returned a continuation + // token in the NextToken field, then providing that value here causes Amazon + // S3 to retrieve the next page of results. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` } -// SetManifest sets the Manifest field's value. -func (s *JobDescriptor) SetManifest(v *JobManifest) *JobDescriptor { - s.Manifest = v - return s +// String returns the string representation +func (s ListAccessPointsInput) String() string { + return awsutil.Prettify(s) } -// SetOperation sets the Operation field's value. -func (s *JobDescriptor) SetOperation(v *JobOperation) *JobDescriptor { - s.Operation = v - return s +// GoString returns the string representation +func (s ListAccessPointsInput) GoString() string { + return s.String() } -// SetPriority sets the Priority field's value. -func (s *JobDescriptor) SetPriority(v int64) *JobDescriptor { - s.Priority = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccessPointsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccessPointsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetProgressSummary sets the ProgressSummary field's value. -func (s *JobDescriptor) SetProgressSummary(v *JobProgressSummary) *JobDescriptor { - s.ProgressSummary = v +// SetAccountId sets the AccountId field's value. +func (s *ListAccessPointsInput) SetAccountId(v string) *ListAccessPointsInput { + s.AccountId = &v return s } -// SetReport sets the Report field's value. -func (s *JobDescriptor) SetReport(v *JobReport) *JobDescriptor { - s.Report = v +// SetBucket sets the Bucket field's value. +func (s *ListAccessPointsInput) SetBucket(v string) *ListAccessPointsInput { + s.Bucket = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *JobDescriptor) SetRoleArn(v string) *JobDescriptor { - s.RoleArn = &v +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccessPointsInput) SetMaxResults(v int64) *ListAccessPointsInput { + s.MaxResults = &v return s } -// SetStatus sets the Status field's value. -func (s *JobDescriptor) SetStatus(v string) *JobDescriptor { - s.Status = &v +// SetNextToken sets the NextToken field's value. +func (s *ListAccessPointsInput) SetNextToken(v string) *ListAccessPointsInput { + s.NextToken = &v return s } -// SetStatusUpdateReason sets the StatusUpdateReason field's value. -func (s *JobDescriptor) SetStatusUpdateReason(v string) *JobDescriptor { - s.StatusUpdateReason = &v - return s +func (s *ListAccessPointsInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } } -// SetSuspendedCause sets the SuspendedCause field's value. -func (s *JobDescriptor) SetSuspendedCause(v string) *JobDescriptor { - s.SuspendedCause = &v - return s +func (s *ListAccessPointsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) } -// SetSuspendedDate sets the SuspendedDate field's value. -func (s *JobDescriptor) SetSuspendedDate(v time.Time) *JobDescriptor { - s.SuspendedDate = &v - return s +func (s *ListAccessPointsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } -// SetTerminationDate sets the TerminationDate field's value. -func (s *JobDescriptor) SetTerminationDate(v time.Time) *JobDescriptor { - s.TerminationDate = &v - return s +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListAccessPointsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil } -// If this job failed, this element indicates why the job failed. -type JobFailure struct { +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s ListAccessPointsInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type ListAccessPointsOutput struct { _ struct{} `type:"structure"` - // The failure code, if any, for the specified job. - FailureCode *string `min:"1" type:"string"` + // Contains identification and configuration information for one or more access + // points associated with the specified bucket. + AccessPointList []*AccessPoint `locationNameList:"AccessPoint" type:"list"` - // The failure reason, if any, for the specified job. - FailureReason *string `min:"1" type:"string"` + // If the specified bucket has more access points than can be returned in one + // call to this API, this field contains a continuation token that you can provide + // in subsequent calls to this API to retrieve additional access points. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s JobFailure) String() string { +func (s ListAccessPointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobFailure) GoString() string { +func (s ListAccessPointsOutput) GoString() string { return s.String() } -// SetFailureCode sets the FailureCode field's value. -func (s *JobFailure) SetFailureCode(v string) *JobFailure { - s.FailureCode = &v +// SetAccessPointList sets the AccessPointList field's value. +func (s *ListAccessPointsOutput) SetAccessPointList(v []*AccessPoint) *ListAccessPointsOutput { + s.AccessPointList = v return s } -// SetFailureReason sets the FailureReason field's value. -func (s *JobFailure) SetFailureReason(v string) *JobFailure { - s.FailureReason = &v +// SetNextToken sets the NextToken field's value. +func (s *ListAccessPointsOutput) SetNextToken(v string) *ListAccessPointsOutput { + s.NextToken = &v return s } -// Contains the configuration and status information for a single job retrieved -// as part of a job list. -type JobListDescriptor struct { - _ struct{} `type:"structure"` - - // A timestamp indicating when the specified job was created. - CreationTime *time.Time `type:"timestamp"` - - // The user-specified description that was included in the specified job's Create - // Job request. - Description *string `min:"1" type:"string"` - - // The ID for the specified job. - JobId *string `min:"5" type:"string"` - - // The operation that the specified job is configured to run on each object - // listed in the manifest. - Operation *string `type:"string" enum:"OperationName"` +type ListJobsInput struct { + _ struct{} `locationName:"ListJobsRequest" type:"structure"` - // The current priority for the specified job. - Priority *int64 `type:"integer"` + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // Describes the total number of tasks that the specified job has executed, - // the number of tasks that succeeded, and the number of tasks that failed. - ProgressSummary *JobProgressSummary `type:"structure"` + // The List Jobs request returns jobs that match the statuses listed in this + // element. + JobStatuses []*string `location:"querystring" locationName:"jobStatuses" type:"list"` - // The specified job's current status. - Status *string `type:"string" enum:"JobStatus"` + // The maximum number of jobs that Amazon S3 will include in the List Jobs response. + // If there are more jobs than this number, the response will include a pagination + // token in the NextToken field to enable you to retrieve the next page of results. + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - // A timestamp indicating when the specified job terminated. A job's termination - // date is the date and time when it succeeded, failed, or was canceled. - TerminationDate *time.Time `type:"timestamp"` + // A pagination token to request the next page of results. Use the token that + // Amazon S3 returned in the NextToken element of the ListJobsResult from the + // previous List Jobs request. + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` } // String returns the string representation -func (s JobListDescriptor) String() string { +func (s ListJobsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobListDescriptor) GoString() string { +func (s ListJobsInput) GoString() string { return s.String() } -// SetCreationTime sets the CreationTime field's value. -func (s *JobListDescriptor) SetCreationTime(v time.Time) *JobListDescriptor { - s.CreationTime = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *JobListDescriptor) SetDescription(v string) *JobListDescriptor { - s.Description = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } -// SetJobId sets the JobId field's value. -func (s *JobListDescriptor) SetJobId(v string) *JobListDescriptor { - s.JobId = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetOperation sets the Operation field's value. -func (s *JobListDescriptor) SetOperation(v string) *JobListDescriptor { - s.Operation = &v +// SetAccountId sets the AccountId field's value. +func (s *ListJobsInput) SetAccountId(v string) *ListJobsInput { + s.AccountId = &v return s } -// SetPriority sets the Priority field's value. -func (s *JobListDescriptor) SetPriority(v int64) *JobListDescriptor { - s.Priority = &v +// SetJobStatuses sets the JobStatuses field's value. +func (s *ListJobsInput) SetJobStatuses(v []*string) *ListJobsInput { + s.JobStatuses = v return s } -// SetProgressSummary sets the ProgressSummary field's value. -func (s *JobListDescriptor) SetProgressSummary(v *JobProgressSummary) *JobListDescriptor { - s.ProgressSummary = v +// SetMaxResults sets the MaxResults field's value. +func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { + s.MaxResults = &v return s } -// SetStatus sets the Status field's value. -func (s *JobListDescriptor) SetStatus(v string) *JobListDescriptor { - s.Status = &v +// SetNextToken sets the NextToken field's value. +func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { + s.NextToken = &v return s } -// SetTerminationDate sets the TerminationDate field's value. -func (s *JobListDescriptor) SetTerminationDate(v time.Time) *JobListDescriptor { - s.TerminationDate = &v - return s +func (s *ListJobsInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } } -// Contains the configuration information for a job's manifest. -type JobManifest struct { +type ListJobsOutput struct { _ struct{} `type:"structure"` - // Contains the information required to locate the specified job's manifest. - // - // Location is a required field - Location *JobManifestLocation `type:"structure" required:"true"` + // The list of current jobs and jobs that have ended within the last 30 days. + Jobs []*JobListDescriptor `type:"list"` - // Describes the format of the specified job's manifest. If the manifest is - // in CSV format, also describes the columns contained within the manifest. - // - // Spec is a required field - Spec *JobManifestSpec `type:"structure" required:"true"` + // If the List Jobs request produced more than the maximum number of results, + // you can pass this value into a subsequent List Jobs request in order to retrieve + // the next page of results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s JobManifest) String() string { +func (s ListJobsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobManifest) GoString() string { +func (s ListJobsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *JobManifest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JobManifest"} - if s.Location == nil { - invalidParams.Add(request.NewErrParamRequired("Location")) - } - if s.Spec == nil { - invalidParams.Add(request.NewErrParamRequired("Spec")) - } - if s.Location != nil { - if err := s.Location.Validate(); err != nil { - invalidParams.AddNested("Location", err.(request.ErrInvalidParams)) - } - } - if s.Spec != nil { - if err := s.Spec.Validate(); err != nil { - invalidParams.AddNested("Spec", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetLocation sets the Location field's value. -func (s *JobManifest) SetLocation(v *JobManifestLocation) *JobManifest { - s.Location = v +// SetJobs sets the Jobs field's value. +func (s *ListJobsOutput) SetJobs(v []*JobListDescriptor) *ListJobsOutput { + s.Jobs = v return s } -// SetSpec sets the Spec field's value. -func (s *JobManifest) SetSpec(v *JobManifestSpec) *JobManifest { - s.Spec = v +// SetNextToken sets the NextToken field's value. +func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { + s.NextToken = &v return s } -// Contains the information required to locate a manifest object. -type JobManifestLocation struct { - _ struct{} `type:"structure"` +type ListRegionalBucketsInput struct { + _ struct{} `locationName:"ListRegionalBucketsRequest" type:"structure"` - // The ETag for the specified manifest object. + // The AWS account ID of the Outposts bucket. // - // ETag is a required field - ETag *string `min:"1" type:"string" required:"true"` + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // The Amazon Resource Name (ARN) for a manifest object. - // - // ObjectArn is a required field - ObjectArn *string `min:"1" type:"string" required:"true"` + MaxResults *int64 `location:"querystring" locationName:"maxResults" type:"integer"` - // The optional version ID to identify a specific version of the manifest object. - ObjectVersionId *string `min:"1" type:"string"` + NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` + + // The ID of the AWS Outposts. + // + // This is required by Amazon S3 on Outposts buckets. + OutpostId *string `location:"header" locationName:"x-amz-outpost-id" min:"1" type:"string"` } // String returns the string representation -func (s JobManifestLocation) String() string { +func (s ListRegionalBucketsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobManifestLocation) GoString() string { +func (s ListRegionalBucketsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *JobManifestLocation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JobManifestLocation"} - if s.ETag == nil { - invalidParams.Add(request.NewErrParamRequired("ETag")) - } - if s.ETag != nil && len(*s.ETag) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ETag", 1)) +func (s *ListRegionalBucketsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRegionalBucketsInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) } - if s.ObjectArn == nil { - invalidParams.Add(request.NewErrParamRequired("ObjectArn")) + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) } - if s.ObjectArn != nil && len(*s.ObjectArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ObjectArn", 1)) + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.ObjectVersionId != nil && len(*s.ObjectVersionId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ObjectVersionId", 1)) + if s.OutpostId != nil && len(*s.OutpostId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OutpostId", 1)) } if invalidParams.Len() > 0 { @@ -2919,259 +7661,324 @@ func (s *JobManifestLocation) Validate() error { return nil } -// SetETag sets the ETag field's value. -func (s *JobManifestLocation) SetETag(v string) *JobManifestLocation { - s.ETag = &v +// SetAccountId sets the AccountId field's value. +func (s *ListRegionalBucketsInput) SetAccountId(v string) *ListRegionalBucketsInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListRegionalBucketsInput) SetMaxResults(v int64) *ListRegionalBucketsInput { + s.MaxResults = &v return s } -// SetObjectArn sets the ObjectArn field's value. -func (s *JobManifestLocation) SetObjectArn(v string) *JobManifestLocation { - s.ObjectArn = &v +// SetNextToken sets the NextToken field's value. +func (s *ListRegionalBucketsInput) SetNextToken(v string) *ListRegionalBucketsInput { + s.NextToken = &v return s } -// SetObjectVersionId sets the ObjectVersionId field's value. -func (s *JobManifestLocation) SetObjectVersionId(v string) *JobManifestLocation { - s.ObjectVersionId = &v +// SetOutpostId sets the OutpostId field's value. +func (s *ListRegionalBucketsInput) SetOutpostId(v string) *ListRegionalBucketsInput { + s.OutpostId = &v return s } -// Describes the format of a manifest. If the manifest is in CSV format, also -// describes the columns contained within the manifest. -type JobManifestSpec struct { +func (s *ListRegionalBucketsInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} + +func (s *ListRegionalBucketsInput) getOutpostID() (string, error) { + if s.OutpostId == nil { + return "", fmt.Errorf("member OutpostId is nil") + } + return *s.OutpostId, nil +} + +func (s *ListRegionalBucketsInput) hasOutpostID() bool { + if s.OutpostId == nil { + return false + } + return true +} + +type ListRegionalBucketsOutput struct { _ struct{} `type:"structure"` - // If the specified manifest object is in the S3BatchOperations_CSV_20180820 - // format, this element describes which columns contain the required data. - Fields []*string `type:"list"` + // NextToken is sent when isTruncated is true, which means there are more buckets + // that can be listed. The next list requests to Amazon S3 can be continued + // with this NextToken. NextToken is obfuscated and is not a real key. + NextToken *string `min:"1" type:"string"` - // Indicates which of the available formats the specified manifest uses. - // - // Format is a required field - Format *string `type:"string" required:"true" enum:"JobManifestFormat"` + RegionalBucketList []*RegionalBucket `locationNameList:"RegionalBucket" type:"list"` } // String returns the string representation -func (s JobManifestSpec) String() string { +func (s ListRegionalBucketsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobManifestSpec) GoString() string { +func (s ListRegionalBucketsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *JobManifestSpec) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JobManifestSpec"} - if s.Format == nil { - invalidParams.Add(request.NewErrParamRequired("Format")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFields sets the Fields field's value. -func (s *JobManifestSpec) SetFields(v []*string) *JobManifestSpec { - s.Fields = v +// SetNextToken sets the NextToken field's value. +func (s *ListRegionalBucketsOutput) SetNextToken(v string) *ListRegionalBucketsOutput { + s.NextToken = &v return s } -// SetFormat sets the Format field's value. -func (s *JobManifestSpec) SetFormat(v string) *JobManifestSpec { - s.Format = &v +// SetRegionalBucketList sets the RegionalBucketList field's value. +func (s *ListRegionalBucketsOutput) SetRegionalBucketList(v []*RegionalBucket) *ListRegionalBucketsOutput { + s.RegionalBucketList = v return s } -// The operation that you want this job to perform on each object listed in -// the manifest. For more information about the available operations, see Available -// Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-operations.html) -// in the Amazon Simple Storage Service Developer Guide. -type JobOperation struct { +// The container of the noncurrent version expiration. +type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` - // Directs the specified job to invoke an AWS Lambda function on each object - // in the manifest. - LambdaInvoke *LambdaInvokeOperation `type:"structure"` + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` +} - // Directs the specified job to execute an Initiate Glacier Restore call on - // each object in the manifest. - S3InitiateRestoreObject *S3InitiateRestoreObjectOperation `type:"structure"` +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} - // Directs the specified job to execute a PUT Object acl call on each object - // in the manifest. - S3PutObjectAcl *S3SetObjectAclOperation `type:"structure"` +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} - // Directs the specified job to execute a PUT Copy object call on each object - // in the manifest. - S3PutObjectCopy *S3CopyObjectOperation `type:"structure"` +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} - // Directs the specified job to execute a PUT Object tagging call on each object - // in the manifest. - S3PutObjectTagging *S3SetObjectTaggingOperation `type:"structure"` +// The container for the noncurrent version transition. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` } // String returns the string representation -func (s JobOperation) String() string { +func (s NoncurrentVersionTransition) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobOperation) GoString() string { +func (s NoncurrentVersionTransition) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *JobOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JobOperation"} - if s.LambdaInvoke != nil { - if err := s.LambdaInvoke.Validate(); err != nil { - invalidParams.AddNested("LambdaInvoke", err.(request.ErrInvalidParams)) - } - } - if s.S3PutObjectAcl != nil { - if err := s.S3PutObjectAcl.Validate(); err != nil { - invalidParams.AddNested("S3PutObjectAcl", err.(request.ErrInvalidParams)) - } - } - if s.S3PutObjectCopy != nil { - if err := s.S3PutObjectCopy.Validate(); err != nil { - invalidParams.AddNested("S3PutObjectCopy", err.(request.ErrInvalidParams)) - } - } - if s.S3PutObjectTagging != nil { - if err := s.S3PutObjectTagging.Validate(); err != nil { - invalidParams.AddNested("S3PutObjectTagging", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s } -// SetLambdaInvoke sets the LambdaInvoke field's value. -func (s *JobOperation) SetLambdaInvoke(v *LambdaInvokeOperation) *JobOperation { - s.LambdaInvoke = v +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v return s } -// SetS3InitiateRestoreObject sets the S3InitiateRestoreObject field's value. -func (s *JobOperation) SetS3InitiateRestoreObject(v *S3InitiateRestoreObjectOperation) *JobOperation { - s.S3InitiateRestoreObject = v - return s +// Indicates whether this access point policy is public. For more information +// about how Amazon S3 evaluates policies to determine whether they are public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + IsPublic *bool `locationName:"IsPublic" type:"boolean"` } -// SetS3PutObjectAcl sets the S3PutObjectAcl field's value. -func (s *JobOperation) SetS3PutObjectAcl(v *S3SetObjectAclOperation) *JobOperation { - s.S3PutObjectAcl = v - return s +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) } -// SetS3PutObjectCopy sets the S3PutObjectCopy field's value. -func (s *JobOperation) SetS3PutObjectCopy(v *S3CopyObjectOperation) *JobOperation { - s.S3PutObjectCopy = v - return s +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() } -// SetS3PutObjectTagging sets the S3PutObjectTagging field's value. -func (s *JobOperation) SetS3PutObjectTagging(v *S3SetObjectTaggingOperation) *JobOperation { - s.S3PutObjectTagging = v +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v return s } -// Describes the total number of tasks that the specified job has executed, -// the number of tasks that succeeded, and the number of tasks that failed. -type JobProgressSummary struct { +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon Simple Storage Service Developer Guide. +// +// This is not supported for Amazon S3 on Outposts. +type PublicAccessBlockConfiguration struct { _ struct{} `type:"structure"` - NumberOfTasksFailed *int64 `type:"long"` + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for buckets in this account. Setting this element to TRUE causes the following + // behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + // + // This is not supported for Amazon S3 on Outposts. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` - NumberOfTasksSucceeded *int64 `type:"long"` + // Specifies whether Amazon S3 should block public bucket policies for buckets + // in this account. Setting this element to TRUE causes Amazon S3 to reject + // calls to PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + // + // This is not supported for Amazon S3 on Outposts. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` - TotalNumberOfTasks *int64 `type:"long"` + // Specifies whether Amazon S3 should ignore public ACLs for buckets in this + // account. Setting this element to TRUE causes Amazon S3 to ignore all public + // ACLs on buckets in this account and any objects that they contain. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + // + // This is not supported for Amazon S3 on Outposts. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for buckets + // in this account. Setting this element to TRUE restricts access to buckets + // with public policies to only AWS services and authorized users within this + // account. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + // + // This is not supported for Amazon S3 on Outposts. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` } // String returns the string representation -func (s JobProgressSummary) String() string { +func (s PublicAccessBlockConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobProgressSummary) GoString() string { +func (s PublicAccessBlockConfiguration) GoString() string { return s.String() } -// SetNumberOfTasksFailed sets the NumberOfTasksFailed field's value. -func (s *JobProgressSummary) SetNumberOfTasksFailed(v int64) *JobProgressSummary { - s.NumberOfTasksFailed = &v +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v return s } -// SetNumberOfTasksSucceeded sets the NumberOfTasksSucceeded field's value. -func (s *JobProgressSummary) SetNumberOfTasksSucceeded(v int64) *JobProgressSummary { - s.NumberOfTasksSucceeded = &v +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v return s } -// SetTotalNumberOfTasks sets the TotalNumberOfTasks field's value. -func (s *JobProgressSummary) SetTotalNumberOfTasks(v int64) *JobProgressSummary { - s.TotalNumberOfTasks = &v +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v return s } -// Contains the configuration parameters for a job-completion report. -type JobReport struct { - _ struct{} `type:"structure"` +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} - // The Amazon Resource Name (ARN) for the bucket where specified job-completion - // report will be stored. - Bucket *string `min:"1" type:"string"` +type PutAccessPointPolicyInput struct { + _ struct{} `locationName:"PutAccessPointPolicyRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` - // Indicates whether the specified job will generate a job-completion report. + // The AWS account ID for owner of the bucket associated with the specified + // access point. // - // Enabled is a required field - Enabled *bool `type:"boolean" required:"true"` - - // The format of the specified job-completion report. - Format *string `type:"string" enum:"JobReportFormat"` + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // An optional prefix to describe where in the specified bucket the job-completion - // report will be stored. Amazon S3 will store the job-completion report at - // /job-/report.json. - Prefix *string `min:"1" type:"string"` + // The name of the access point that you want to associate with the specified + // policy. + // + // For Amazon S3 on Outposts specify the ARN of the access point accessed in + // the format arn:aws:s3-outposts:::outpost//accesspoint/. + // For example, to access the access point reports-ap through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding of + // arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/accesspoint/reports-ap. + // The value must be URL encoded. + // + // Name is a required field + Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` - // Indicates whether the job-completion report will include details of all tasks - // or only failed tasks. - ReportScope *string `type:"string" enum:"JobReportScope"` + // The policy that you want to apply to the specified access point. For more + // information about access point policies, see Managing Data Access with Amazon + // S3 Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` } // String returns the string representation -func (s JobReport) String() string { +func (s PutAccessPointPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s JobReport) GoString() string { +func (s PutAccessPointPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *JobReport) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "JobReport"} - if s.Bucket != nil && len(*s.Bucket) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) +func (s *PutAccessPointPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAccessPointPolicyInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) } - if s.Enabled == nil { - invalidParams.Add(request.NewErrParamRequired("Enabled")) + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) } - if s.Prefix != nil && len(*s.Prefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) } if invalidParams.Len() > 0 { @@ -3180,125 +7987,90 @@ func (s *JobReport) Validate() error { return nil } -// SetBucket sets the Bucket field's value. -func (s *JobReport) SetBucket(v string) *JobReport { - s.Bucket = &v - return s -} - -// SetEnabled sets the Enabled field's value. -func (s *JobReport) SetEnabled(v bool) *JobReport { - s.Enabled = &v +// SetAccountId sets the AccountId field's value. +func (s *PutAccessPointPolicyInput) SetAccountId(v string) *PutAccessPointPolicyInput { + s.AccountId = &v return s } -// SetFormat sets the Format field's value. -func (s *JobReport) SetFormat(v string) *JobReport { - s.Format = &v +// SetName sets the Name field's value. +func (s *PutAccessPointPolicyInput) SetName(v string) *PutAccessPointPolicyInput { + s.Name = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *JobReport) SetPrefix(v string) *JobReport { - s.Prefix = &v +// SetPolicy sets the Policy field's value. +func (s *PutAccessPointPolicyInput) SetPolicy(v string) *PutAccessPointPolicyInput { + s.Policy = &v return s } -// SetReportScope sets the ReportScope field's value. -func (s *JobReport) SetReportScope(v string) *JobReport { - s.ReportScope = &v - return s +func (s *PutAccessPointPolicyInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } } -// Contains the configuration parameters for a Lambda Invoke operation. -type LambdaInvokeOperation struct { +type PutAccessPointPolicyOutput struct { _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) for the AWS Lambda function that the specified - // job will invoke for each object in the manifest. - FunctionArn *string `min:"1" type:"string"` } // String returns the string representation -func (s LambdaInvokeOperation) String() string { +func (s PutAccessPointPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s LambdaInvokeOperation) GoString() string { +func (s PutAccessPointPolicyOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *LambdaInvokeOperation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "LambdaInvokeOperation"} - if s.FunctionArn != nil && len(*s.FunctionArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FunctionArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetFunctionArn sets the FunctionArn field's value. -func (s *LambdaInvokeOperation) SetFunctionArn(v string) *LambdaInvokeOperation { - s.FunctionArn = &v - return s -} - -type ListAccessPointsInput struct { - _ struct{} `locationName:"ListAccessPointsRequest" type:"structure"` +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` - // The AWS account ID for owner of the bucket whose access points you want to - // list. + // The AWS account ID of the Outposts bucket. // // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // The name of the bucket whose associated access points you want to list. - Bucket *string `location:"querystring" locationName:"bucket" min:"3" type:"string"` - - // The maximum number of access points that you want to include in the list. - // If the specified bucket has more than this number of access points, then - // the response will include a continuation token in the NextToken field that - // you can use to retrieve the next page of access points. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` - // A continuation token. If a previous call to ListAccessPoints returned a continuation - // token in the NextToken field, then providing that value here causes Amazon - // S3 to retrieve the next page of results. - NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` } // String returns the string representation -func (s ListAccessPointsInput) String() string { +func (s PutBucketLifecycleConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListAccessPointsInput) GoString() string { +func (s PutBucketLifecycleConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListAccessPointsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListAccessPointsInput"} +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } if s.Bucket != nil && len(*s.Bucket) < 3 { invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -3308,116 +8080,142 @@ func (s *ListAccessPointsInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *ListAccessPointsInput) SetAccountId(v string) *ListAccessPointsInput { +func (s *PutBucketLifecycleConfigurationInput) SetAccountId(v string) *PutBucketLifecycleConfigurationInput { s.AccountId = &v return s } // SetBucket sets the Bucket field's value. -func (s *ListAccessPointsInput) SetBucket(v string) *ListAccessPointsInput { +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { s.Bucket = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListAccessPointsInput) SetMaxResults(v int64) *ListAccessPointsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *ListAccessPointsInput) SetNextToken(v string) *ListAccessPointsInput { - s.NextToken = &v +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v return s } -func (s *ListAccessPointsInput) hostLabels() map[string]string { +func (s *PutBucketLifecycleConfigurationInput) hostLabels() map[string]string { return map[string]string{ "AccountId": aws.StringValue(s.AccountId), } } -type ListAccessPointsOutput struct { - _ struct{} `type:"structure"` +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} - // Contains identification and configuration information for one or more access - // points associated with the specified bucket. - AccessPointList []*AccessPoint `locationNameList:"AccessPoint" type:"list"` +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} - // If the specified bucket has more access points than can be returned in one - // call to this API, then this field contains a continuation token that you - // can provide in subsequent calls to this API to retrieve additional access - // points. - NextToken *string `min:"1" type:"string"` +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil } -// String returns the string representation -func (s ListAccessPointsOutput) String() string { - return awsutil.Prettify(s) +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil } -// GoString returns the string representation -func (s ListAccessPointsOutput) GoString() string { - return s.String() +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` } -// SetAccessPointList sets the AccessPointList field's value. -func (s *ListAccessPointsOutput) SetAccessPointList(v []*AccessPoint) *ListAccessPointsOutput { - s.AccessPointList = v - return s +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) } -// SetNextToken sets the NextToken field's value. -func (s *ListAccessPointsOutput) SetNextToken(v string) *ListAccessPointsOutput { - s.NextToken = &v - return s +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() } -type ListJobsInput struct { - _ struct{} `locationName:"ListJobsRequest" type:"structure"` +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` + // The AWS account ID of the Outposts bucket. + // // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // The List Jobs request returns jobs that match the statuses listed in this - // element. - JobStatuses []*string `location:"querystring" locationName:"jobStatuses" type:"list"` + // The ARN of the bucket. + // + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` - // The maximum number of jobs that Amazon S3 will include in the List Jobs response. - // If there are more jobs than this number, the response will include a pagination - // token in the NextToken field to enable you to retrieve the next page of results. - MaxResults *int64 `location:"querystring" locationName:"maxResults" min:"1" type:"integer"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + // + // This is not supported by Amazon S3 on Outposts buckets. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` - // A pagination token to request the next page of results. Use the token that - // Amazon S3 returned in the NextToken element of the ListJobsResult from the - // previous List Jobs request. - NextToken *string `location:"querystring" locationName:"nextToken" min:"1" type:"string"` + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` } // String returns the string representation -func (s ListJobsInput) String() string { +func (s PutBucketPolicyInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ListJobsInput) GoString() string { +func (s PutBucketPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ListJobsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ListJobsInput"} +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) } - if s.MaxResults != nil && *s.MaxResults < 1 { - invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) } if invalidParams.Len() > 0 { @@ -3427,228 +8225,288 @@ func (s *ListJobsInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *ListJobsInput) SetAccountId(v string) *ListJobsInput { +func (s *PutBucketPolicyInput) SetAccountId(v string) *PutBucketPolicyInput { s.AccountId = &v return s } -// SetJobStatuses sets the JobStatuses field's value. -func (s *ListJobsInput) SetJobStatuses(v []*string) *ListJobsInput { - s.JobStatuses = v +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v return s } -// SetMaxResults sets the MaxResults field's value. -func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { - s.MaxResults = &v +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { - s.NextToken = &v +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v return s } -func (s *ListJobsInput) hostLabels() map[string]string { +func (s *PutBucketPolicyInput) hostLabels() map[string]string { return map[string]string{ "AccountId": aws.StringValue(s.AccountId), } } -type ListJobsOutput struct { - _ struct{} `type:"structure"` - - // The list of current jobs and jobs that have ended within the last 30 days. - Jobs []*JobListDescriptor `type:"list"` - - // If the List Jobs request produced more than the maximum number of results, - // you can pass this value into a subsequent List Jobs request in order to retrieve - // the next page of results. - NextToken *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s ListJobsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListJobsOutput) GoString() string { - return s.String() +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) } -// SetJobs sets the Jobs field's value. -func (s *ListJobsOutput) SetJobs(v []*JobListDescriptor) *ListJobsOutput { - s.Jobs = v - return s +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) } -// SetNextToken sets the NextToken field's value. -func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { - s.NextToken = &v - return s +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil } -// Indicates whether this access point policy is public. For more information -// about how Amazon S3 evaluates policies to determine whether they are public, -// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon Simple Storage Service Developer Guide. -type PolicyStatus struct { - _ struct{} `type:"structure"` +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s PutBucketPolicyInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} - IsPublic *bool `locationName:"IsPublic" type:"boolean"` +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s PolicyStatus) String() string { +func (s PutBucketPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PolicyStatus) GoString() string { +func (s PutBucketPolicyOutput) GoString() string { return s.String() } -// SetIsPublic sets the IsPublic field's value. -func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { - s.IsPublic = &v - return s -} - -// The PublicAccessBlock configuration that you want to apply to this Amazon -// S3 bucket. You can enable the configuration options in any combination. For -// more information about when Amazon S3 considers a bucket or object public, -// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) -// in the Amazon Simple Storage Service Developer Guide. -type PublicAccessBlockConfiguration struct { - _ struct{} `type:"structure"` +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` - // Specifies whether Amazon S3 should block public access control lists (ACLs) - // for buckets in this account. Setting this element to TRUE causes the following - // behavior: - // - // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is - // public. - // - // * PUT Object calls fail if the request includes a public ACL. + // The AWS account ID of the Outposts bucket. // - // * PUT Bucket calls fail if the request includes a public ACL. - // - // Enabling this setting doesn't affect existing policies or ACLs. - BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + // AccountId is a required field + AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // Specifies whether Amazon S3 should block public bucket policies for buckets - // in this account. Setting this element to TRUE causes Amazon S3 to reject - // calls to PUT Bucket policy if the specified bucket policy allows public access. + // The Amazon Resource Name (ARN) of the bucket. // - // Enabling this setting doesn't affect existing bucket policies. - BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` - - // Specifies whether Amazon S3 should ignore public ACLs for buckets in this - // account. Setting this element to TRUE causes Amazon S3 to ignore all public - // ACLs on buckets in this account and any objects that they contain. + // For Amazon S3 on Outposts specify the ARN of the bucket accessed in the format + // arn:aws:s3-outposts:::outpost//bucket/. + // For example, to access the bucket reports through outpost my-outpost owned + // by account 123456789012 in Region us-west-2, use the URL encoding of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/bucket/reports. + // The value must be URL encoded. // - // Enabling this setting doesn't affect the persistence of any existing ACLs - // and doesn't prevent new public ACLs from being set. - IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + // Bucket is a required field + Bucket *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` - // Specifies whether Amazon S3 should restrict public bucket policies for buckets - // in this account. Setting this element to TRUE restricts access to buckets - // with public policies to only AWS services and authorized users within this - // account. - // - // Enabling this setting doesn't affect previously stored bucket policies, except - // that public and cross-account access within any public bucket policy, including - // non-public delegation to specific accounts, is blocked. - RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` } // String returns the string representation -func (s PublicAccessBlockConfiguration) String() string { +func (s PutBucketTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PublicAccessBlockConfiguration) GoString() string { +func (s PutBucketTaggingInput) GoString() string { return s.String() } -// SetBlockPublicAcls sets the BlockPublicAcls field's value. -func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { - s.BlockPublicAcls = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.AccountId != nil && len(*s.AccountId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 3 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. -func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { - s.BlockPublicPolicy = &v +// SetAccountId sets the AccountId field's value. +func (s *PutBucketTaggingInput) SetAccountId(v string) *PutBucketTaggingInput { + s.AccountId = &v return s } -// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. -func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { - s.IgnorePublicAcls = &v +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v return s } -// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. -func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { - s.RestrictPublicBuckets = &v +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v return s } -type PutAccessPointPolicyInput struct { - _ struct{} `locationName:"PutAccessPointPolicyRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` +func (s *PutBucketTaggingInput) hostLabels() map[string]string { + return map[string]string{ + "AccountId": aws.StringValue(s.AccountId), + } +} - // The AWS account ID for owner of the bucket associated with the specified - // access point. +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +// updateAccountID returns a pointer to a modified copy of input, +// if account id is not provided, we update the account id in modified input +// if account id is provided, but doesn't match with the one in ARN, we throw an error +// if account id is not updated, we return nil. Note that original input is not modified. +func (s PutBucketTaggingInput) updateAccountID(accountId string) (interface{}, error) { + if s.AccountId == nil { + s.AccountId = aws.String(accountId) + return &s, nil + } else if *s.AccountId != accountId { + return &s, fmt.Errorf("Account ID mismatch, the Account ID cannot be specified in an ARN and in the accountId field") + } + return nil, nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutJobTaggingInput struct { + _ struct{} `locationName:"PutJobTaggingRequest" type:"structure" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` + + // The AWS account ID associated with the S3 Batch Operations job. // // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` - // The name of the access point that you want to associate with the specified - // policy. + // The ID for the S3 Batch Operations job whose tags you want to replace. // - // Name is a required field - Name *string `location:"uri" locationName:"name" min:"3" type:"string" required:"true"` + // JobId is a required field + JobId *string `location:"uri" locationName:"id" min:"5" type:"string" required:"true"` - // The policy that you want to apply to the specified access point. For more - // information about access point policies, see Managing Data Access with Amazon - // S3 Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-points.html) - // in the Amazon Simple Storage Service Developer Guide. + // The set of tags to associate with the S3 Batch Operations job. // - // Policy is a required field - Policy *string `type:"string" required:"true"` + // Tags is a required field + Tags []*S3Tag `type:"list" required:"true"` } // String returns the string representation -func (s PutAccessPointPolicyInput) String() string { +func (s PutJobTaggingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutAccessPointPolicyInput) GoString() string { +func (s PutJobTaggingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutAccessPointPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutAccessPointPolicyInput"} +func (s *PutJobTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutJobTaggingInput"} if s.AccountId == nil { invalidParams.Add(request.NewErrParamRequired("AccountId")) } if s.AccountId != nil && len(*s.AccountId) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccountId", 1)) } - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) } - if s.Name != nil && len(*s.Name) < 3 { - invalidParams.Add(request.NewErrParamMinLen("Name", 3)) + if s.JobId != nil && len(*s.JobId) < 5 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 5)) } - if s.Policy == nil { - invalidParams.Add(request.NewErrParamRequired("Policy")) + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -3658,54 +8516,54 @@ func (s *PutAccessPointPolicyInput) Validate() error { } // SetAccountId sets the AccountId field's value. -func (s *PutAccessPointPolicyInput) SetAccountId(v string) *PutAccessPointPolicyInput { +func (s *PutJobTaggingInput) SetAccountId(v string) *PutJobTaggingInput { s.AccountId = &v return s } -// SetName sets the Name field's value. -func (s *PutAccessPointPolicyInput) SetName(v string) *PutAccessPointPolicyInput { - s.Name = &v +// SetJobId sets the JobId field's value. +func (s *PutJobTaggingInput) SetJobId(v string) *PutJobTaggingInput { + s.JobId = &v return s } -// SetPolicy sets the Policy field's value. -func (s *PutAccessPointPolicyInput) SetPolicy(v string) *PutAccessPointPolicyInput { - s.Policy = &v +// SetTags sets the Tags field's value. +func (s *PutJobTaggingInput) SetTags(v []*S3Tag) *PutJobTaggingInput { + s.Tags = v return s } -func (s *PutAccessPointPolicyInput) hostLabels() map[string]string { +func (s *PutJobTaggingInput) hostLabels() map[string]string { return map[string]string{ "AccountId": aws.StringValue(s.AccountId), } } -type PutAccessPointPolicyOutput struct { +type PutJobTaggingOutput struct { _ struct{} `type:"structure"` } // String returns the string representation -func (s PutAccessPointPolicyOutput) String() string { +func (s PutJobTaggingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutAccessPointPolicyOutput) GoString() string { +func (s PutJobTaggingOutput) GoString() string { return s.String() } type PutPublicAccessBlockInput struct { _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` - // The account ID for the Amazon Web Services account whose PublicAccessBlock - // configuration you want to set. + // The account ID for the AWS account whose PublicAccessBlock configuration + // you want to set. // // AccountId is a required field AccountId *string `location:"header" locationName:"x-amz-account-id" type:"string" required:"true"` // The PublicAccessBlock configuration that you want to apply to the specified - // Amazon Web Services account. + // AWS account. // // PublicAccessBlockConfiguration is a required field PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://awss3control.amazonaws.com/doc/2018-08-20/"` @@ -3772,6 +8630,68 @@ func (s PutPublicAccessBlockOutput) GoString() string { return s.String() } +// The container for the regional bucket. +type RegionalBucket struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `min:"3" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) for the regional bucket. + BucketArn *string `min:"4" type:"string"` + + // The creation date of the regional bucket + // + // CreationDate is a required field + CreationDate *time.Time `type:"timestamp" required:"true"` + + // The AWS Outposts ID of the regional bucket. + OutpostId *string `min:"1" type:"string"` + + // PublicAccessBlockEnabled is a required field + PublicAccessBlockEnabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s RegionalBucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegionalBucket) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *RegionalBucket) SetBucket(v string) *RegionalBucket { + s.Bucket = &v + return s +} + +// SetBucketArn sets the BucketArn field's value. +func (s *RegionalBucket) SetBucketArn(v string) *RegionalBucket { + s.BucketArn = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *RegionalBucket) SetCreationDate(v time.Time) *RegionalBucket { + s.CreationDate = &v + return s +} + +// SetOutpostId sets the OutpostId field's value. +func (s *RegionalBucket) SetOutpostId(v string) *RegionalBucket { + s.OutpostId = &v + return s +} + +// SetPublicAccessBlockEnabled sets the PublicAccessBlockEnabled field's value. +func (s *RegionalBucket) SetPublicAccessBlockEnabled(v bool) *RegionalBucket { + s.PublicAccessBlockEnabled = &v + return s +} + type S3AccessControlList struct { _ struct{} `type:"structure"` @@ -3876,10 +8796,10 @@ func (s *S3AccessControlPolicy) SetCannedAccessControlList(v string) *S3AccessCo return s } -// Contains the configuration parameters for a PUT Copy object operation. Amazon -// S3 batch operations passes each value through to the underlying PUT Copy -// object API. For more information about the parameters for this operation, -// see PUT Object - Copy (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html). +// Contains the configuration parameters for a PUT Copy object operation. S3 +// Batch Operations passes each value through to the underlying PUT Copy object +// API. For more information about the parameters for this operation, see PUT +// Object - Copy (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html). type S3CopyObjectOperation struct { _ struct{} `type:"structure"` @@ -3895,10 +8815,15 @@ type S3CopyObjectOperation struct { NewObjectTagging []*S3Tag `type:"list"` + // The legal hold status to be applied to all objects in the Batch Operations + // job. ObjectLockLegalHoldStatus *string `type:"string" enum:"S3ObjectLockLegalHoldStatus"` + // The retention mode to be applied to all objects in the Batch Operations job. ObjectLockMode *string `type:"string" enum:"S3ObjectLockMode"` + // The date when the applied object retention configuration expires on all objects + // in the Batch Operations job. ObjectLockRetainUntilDate *time.Time `type:"timestamp"` RedirectLocation *string `min:"1" type:"string"` @@ -4169,9 +9094,9 @@ func (s *S3Grantee) SetTypeIdentifier(v string) *S3Grantee { } // Contains the configuration parameters for an Initiate Glacier Restore job. -// Amazon S3 batch operations passes each value through to the underlying POST -// Object restore API. For more information about the parameters for this operation, -// see Restoring Archives (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOSTrestore.html#RESTObjectPOSTrestore-restore-request). +// S3 Batch Operations passes each value through to the underlying POST Object +// restore API. For more information about the parameters for this operation, +// see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOSTrestore.html#RESTObjectPOSTrestore-restore-request). type S3InitiateRestoreObjectOperation struct { _ struct{} `type:"structure"` @@ -4190,15 +9115,56 @@ func (s S3InitiateRestoreObjectOperation) GoString() string { return s.String() } -// SetExpirationInDays sets the ExpirationInDays field's value. -func (s *S3InitiateRestoreObjectOperation) SetExpirationInDays(v int64) *S3InitiateRestoreObjectOperation { - s.ExpirationInDays = &v - return s +// SetExpirationInDays sets the ExpirationInDays field's value. +func (s *S3InitiateRestoreObjectOperation) SetExpirationInDays(v int64) *S3InitiateRestoreObjectOperation { + s.ExpirationInDays = &v + return s +} + +// SetGlacierJobTier sets the GlacierJobTier field's value. +func (s *S3InitiateRestoreObjectOperation) SetGlacierJobTier(v string) *S3InitiateRestoreObjectOperation { + s.GlacierJobTier = &v + return s +} + +// Whether S3 Object Lock legal hold will be applied to objects in an S3 Batch +// Operations job. +type S3ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // The Object Lock legal hold status to be applied to all objects in the Batch + // Operations job. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"S3ObjectLockLegalHoldStatus"` +} + +// String returns the string representation +func (s S3ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3ObjectLockLegalHold) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3ObjectLockLegalHold) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3ObjectLockLegalHold"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetGlacierJobTier sets the GlacierJobTier field's value. -func (s *S3InitiateRestoreObjectOperation) SetGlacierJobTier(v string) *S3InitiateRestoreObjectOperation { - s.GlacierJobTier = &v +// SetStatus sets the Status field's value. +func (s *S3ObjectLockLegalHold) SetStatus(v string) *S3ObjectLockLegalHold { + s.Status = &v return s } @@ -4378,10 +9344,50 @@ func (s *S3ObjectOwner) SetID(v string) *S3ObjectOwner { return s } -// Contains the configuration parameters for a Set Object ACL operation. Amazon -// S3 batch operations passes each value through to the underlying PUT Object -// acl API. For more information about the parameters for this operation, see -// PUT Object acl (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUTacl.html). +// Contains the S3 Object Lock retention mode to be applied to all objects in +// the S3 Batch Operations job. If you don't provide Mode and RetainUntilDate +// data types in your operation, you will remove the retention from your objects. +// For more information, see Using S3 Object Lock retention with S3 Batch Operations +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-retention-date.html) +// in the Amazon Simple Storage Service Developer Guide. +type S3Retention struct { + _ struct{} `type:"structure"` + + // The Object Lock retention mode to be applied to all objects in the Batch + // Operations job. + Mode *string `type:"string" enum:"S3ObjectLockRetentionMode"` + + // The date when the applied Object Lock retention will expire on all objects + // set by the Batch Operations job. + RetainUntilDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s S3Retention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Retention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *S3Retention) SetMode(v string) *S3Retention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *S3Retention) SetRetainUntilDate(v time.Time) *S3Retention { + s.RetainUntilDate = &v + return s +} + +// Contains the configuration parameters for a Set Object ACL operation. S3 +// Batch Operations passes each value through to the underlying PUT Object acl +// API. For more information about the parameters for this operation, see PUT +// Object acl (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUTacl.html). type S3SetObjectAclOperation struct { _ struct{} `type:"structure"` @@ -4419,9 +9425,114 @@ func (s *S3SetObjectAclOperation) SetAccessControlPolicy(v *S3AccessControlPolic return s } +// Contains the configuration for an S3 Object Lock legal hold operation that +// an S3 Batch Operations job passes each object through to the underlying PutObjectLegalHold +// API. For more information, see Using S3 Object Lock legal hold with S3 Batch +// Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-legal-hold.html) +// in the Amazon Simple Storage Service Developer Guide. +type S3SetObjectLegalHoldOperation struct { + _ struct{} `type:"structure"` + + // Contains the Object Lock legal hold status to be applied to all objects in + // the Batch Operations job. + // + // LegalHold is a required field + LegalHold *S3ObjectLockLegalHold `type:"structure" required:"true"` +} + +// String returns the string representation +func (s S3SetObjectLegalHoldOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3SetObjectLegalHoldOperation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3SetObjectLegalHoldOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3SetObjectLegalHoldOperation"} + if s.LegalHold == nil { + invalidParams.Add(request.NewErrParamRequired("LegalHold")) + } + if s.LegalHold != nil { + if err := s.LegalHold.Validate(); err != nil { + invalidParams.AddNested("LegalHold", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLegalHold sets the LegalHold field's value. +func (s *S3SetObjectLegalHoldOperation) SetLegalHold(v *S3ObjectLockLegalHold) *S3SetObjectLegalHoldOperation { + s.LegalHold = v + return s +} + +// Contains the configuration parameters for the Object Lock retention action +// for an S3 Batch Operations job. Batch Operations passes each value through +// to the underlying PutObjectRetention API. For more information, see Using +// S3 Object Lock retention with S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-retention-date.html) +// in the Amazon Simple Storage Service Developer Guide. +type S3SetObjectRetentionOperation struct { + _ struct{} `type:"structure"` + + // Indicates if the action should be applied to objects in the Batch Operations + // job even if they have Object Lock GOVERNANCE type in place. + BypassGovernanceRetention *bool `type:"boolean"` + + // Contains the Object Lock retention mode to be applied to all objects in the + // Batch Operations job. For more information, see Using S3 Object Lock retention + // with S3 Batch Operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/batch-ops-retention-date.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Retention is a required field + Retention *S3Retention `type:"structure" required:"true"` +} + +// String returns the string representation +func (s S3SetObjectRetentionOperation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3SetObjectRetentionOperation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3SetObjectRetentionOperation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3SetObjectRetentionOperation"} + if s.Retention == nil { + invalidParams.Add(request.NewErrParamRequired("Retention")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *S3SetObjectRetentionOperation) SetBypassGovernanceRetention(v bool) *S3SetObjectRetentionOperation { + s.BypassGovernanceRetention = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *S3SetObjectRetentionOperation) SetRetention(v *S3Retention) *S3SetObjectRetentionOperation { + s.Retention = v + return s +} + // Contains the configuration parameters for a Set Object Tagging operation. -// Amazon S3 batch operations passes each value through to the underlying PUT -// Object tagging API. For more information about the parameters for this operation, +// S3 Batch Operations passes each value through to the underlying PUT Object +// tagging API. For more information about the parameters for this operation, // see PUT Object tagging (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUTtagging.html). type S3SetObjectTaggingOperation struct { _ struct{} `type:"structure"` @@ -4516,6 +9627,101 @@ func (s *S3Tag) SetValue(v string) *S3Tag { return s } +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags. + // + // TagSet is a required field + TagSet []*S3Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*S3Tag) *Tagging { + s.TagSet = v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 Lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon Simple Storage Service Developer Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + type UpdateJobPriorityInput struct { _ struct{} `locationName:"UpdateJobPriorityRequest" type:"structure"` @@ -4758,7 +9964,7 @@ func (s *UpdateJobStatusOutput) SetStatusUpdateReason(v string) *UpdateJobStatus return s } -// The Virtual Private Cloud (VPC) configuration for an access point. +// The virtual private cloud (VPC) configuration for an access point. type VpcConfiguration struct { _ struct{} `type:"structure"` @@ -4801,6 +10007,98 @@ func (s *VpcConfiguration) SetVpcId(v string) *VpcConfiguration { return s } +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintEu, + BucketLocationConstraintEuWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintEuCentral1, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + const ( // JobManifestFieldNameIgnore is a JobManifestFieldName enum value JobManifestFieldNameIgnore = "Ignore" @@ -4815,6 +10113,16 @@ const ( JobManifestFieldNameVersionId = "VersionId" ) +// JobManifestFieldName_Values returns all elements of the JobManifestFieldName enum +func JobManifestFieldName_Values() []string { + return []string{ + JobManifestFieldNameIgnore, + JobManifestFieldNameBucket, + JobManifestFieldNameKey, + JobManifestFieldNameVersionId, + } +} + const ( // JobManifestFormatS3batchOperationsCsv20180820 is a JobManifestFormat enum value JobManifestFormatS3batchOperationsCsv20180820 = "S3BatchOperations_CSV_20180820" @@ -4823,11 +10131,26 @@ const ( JobManifestFormatS3inventoryReportCsv20161130 = "S3InventoryReport_CSV_20161130" ) +// JobManifestFormat_Values returns all elements of the JobManifestFormat enum +func JobManifestFormat_Values() []string { + return []string{ + JobManifestFormatS3batchOperationsCsv20180820, + JobManifestFormatS3inventoryReportCsv20161130, + } +} + const ( // JobReportFormatReportCsv20180820 is a JobReportFormat enum value JobReportFormatReportCsv20180820 = "Report_CSV_20180820" ) +// JobReportFormat_Values returns all elements of the JobReportFormat enum +func JobReportFormat_Values() []string { + return []string{ + JobReportFormatReportCsv20180820, + } +} + const ( // JobReportScopeAllTasks is a JobReportScope enum value JobReportScopeAllTasks = "AllTasks" @@ -4836,6 +10159,14 @@ const ( JobReportScopeFailedTasksOnly = "FailedTasksOnly" ) +// JobReportScope_Values returns all elements of the JobReportScope enum +func JobReportScope_Values() []string { + return []string{ + JobReportScopeAllTasks, + JobReportScopeFailedTasksOnly, + } +} + const ( // JobStatusActive is a JobStatus enum value JobStatusActive = "Active" @@ -4877,6 +10208,25 @@ const ( JobStatusSuspended = "Suspended" ) +// JobStatus_Values returns all elements of the JobStatus enum +func JobStatus_Values() []string { + return []string{ + JobStatusActive, + JobStatusCancelled, + JobStatusCancelling, + JobStatusComplete, + JobStatusCompleting, + JobStatusFailed, + JobStatusFailing, + JobStatusNew, + JobStatusPaused, + JobStatusPausing, + JobStatusPreparing, + JobStatusReady, + JobStatusSuspended, + } +} + const ( // NetworkOriginInternet is a NetworkOrigin enum value NetworkOriginInternet = "Internet" @@ -4885,6 +10235,14 @@ const ( NetworkOriginVpc = "VPC" ) +// NetworkOrigin_Values returns all elements of the NetworkOrigin enum +func NetworkOrigin_Values() []string { + return []string{ + NetworkOriginInternet, + NetworkOriginVpc, + } +} + const ( // OperationNameLambdaInvoke is a OperationName enum value OperationNameLambdaInvoke = "LambdaInvoke" @@ -4900,8 +10258,27 @@ const ( // OperationNameS3initiateRestoreObject is a OperationName enum value OperationNameS3initiateRestoreObject = "S3InitiateRestoreObject" + + // OperationNameS3putObjectLegalHold is a OperationName enum value + OperationNameS3putObjectLegalHold = "S3PutObjectLegalHold" + + // OperationNameS3putObjectRetention is a OperationName enum value + OperationNameS3putObjectRetention = "S3PutObjectRetention" ) +// OperationName_Values returns all elements of the OperationName enum +func OperationName_Values() []string { + return []string{ + OperationNameLambdaInvoke, + OperationNameS3putObjectCopy, + OperationNameS3putObjectAcl, + OperationNameS3putObjectTagging, + OperationNameS3initiateRestoreObject, + OperationNameS3putObjectLegalHold, + OperationNameS3putObjectRetention, + } +} + const ( // RequestedJobStatusCancelled is a RequestedJobStatus enum value RequestedJobStatusCancelled = "Cancelled" @@ -4910,6 +10287,14 @@ const ( RequestedJobStatusReady = "Ready" ) +// RequestedJobStatus_Values returns all elements of the RequestedJobStatus enum +func RequestedJobStatus_Values() []string { + return []string{ + RequestedJobStatusCancelled, + RequestedJobStatusReady, + } +} + const ( // S3CannedAccessControlListPrivate is a S3CannedAccessControlList enum value S3CannedAccessControlListPrivate = "private" @@ -4933,6 +10318,19 @@ const ( S3CannedAccessControlListBucketOwnerFullControl = "bucket-owner-full-control" ) +// S3CannedAccessControlList_Values returns all elements of the S3CannedAccessControlList enum +func S3CannedAccessControlList_Values() []string { + return []string{ + S3CannedAccessControlListPrivate, + S3CannedAccessControlListPublicRead, + S3CannedAccessControlListPublicReadWrite, + S3CannedAccessControlListAwsExecRead, + S3CannedAccessControlListAuthenticatedRead, + S3CannedAccessControlListBucketOwnerRead, + S3CannedAccessControlListBucketOwnerFullControl, + } +} + const ( // S3GlacierJobTierBulk is a S3GlacierJobTier enum value S3GlacierJobTierBulk = "BULK" @@ -4941,6 +10339,14 @@ const ( S3GlacierJobTierStandard = "STANDARD" ) +// S3GlacierJobTier_Values returns all elements of the S3GlacierJobTier enum +func S3GlacierJobTier_Values() []string { + return []string{ + S3GlacierJobTierBulk, + S3GlacierJobTierStandard, + } +} + const ( // S3GranteeTypeIdentifierId is a S3GranteeTypeIdentifier enum value S3GranteeTypeIdentifierId = "id" @@ -4952,6 +10358,15 @@ const ( S3GranteeTypeIdentifierUri = "uri" ) +// S3GranteeTypeIdentifier_Values returns all elements of the S3GranteeTypeIdentifier enum +func S3GranteeTypeIdentifier_Values() []string { + return []string{ + S3GranteeTypeIdentifierId, + S3GranteeTypeIdentifierEmailAddress, + S3GranteeTypeIdentifierUri, + } +} + const ( // S3MetadataDirectiveCopy is a S3MetadataDirective enum value S3MetadataDirectiveCopy = "COPY" @@ -4960,6 +10375,14 @@ const ( S3MetadataDirectiveReplace = "REPLACE" ) +// S3MetadataDirective_Values returns all elements of the S3MetadataDirective enum +func S3MetadataDirective_Values() []string { + return []string{ + S3MetadataDirectiveCopy, + S3MetadataDirectiveReplace, + } +} + const ( // S3ObjectLockLegalHoldStatusOff is a S3ObjectLockLegalHoldStatus enum value S3ObjectLockLegalHoldStatusOff = "OFF" @@ -4968,6 +10391,14 @@ const ( S3ObjectLockLegalHoldStatusOn = "ON" ) +// S3ObjectLockLegalHoldStatus_Values returns all elements of the S3ObjectLockLegalHoldStatus enum +func S3ObjectLockLegalHoldStatus_Values() []string { + return []string{ + S3ObjectLockLegalHoldStatusOff, + S3ObjectLockLegalHoldStatusOn, + } +} + const ( // S3ObjectLockModeCompliance is a S3ObjectLockMode enum value S3ObjectLockModeCompliance = "COMPLIANCE" @@ -4976,6 +10407,30 @@ const ( S3ObjectLockModeGovernance = "GOVERNANCE" ) +// S3ObjectLockMode_Values returns all elements of the S3ObjectLockMode enum +func S3ObjectLockMode_Values() []string { + return []string{ + S3ObjectLockModeCompliance, + S3ObjectLockModeGovernance, + } +} + +const ( + // S3ObjectLockRetentionModeCompliance is a S3ObjectLockRetentionMode enum value + S3ObjectLockRetentionModeCompliance = "COMPLIANCE" + + // S3ObjectLockRetentionModeGovernance is a S3ObjectLockRetentionMode enum value + S3ObjectLockRetentionModeGovernance = "GOVERNANCE" +) + +// S3ObjectLockRetentionMode_Values returns all elements of the S3ObjectLockRetentionMode enum +func S3ObjectLockRetentionMode_Values() []string { + return []string{ + S3ObjectLockRetentionModeCompliance, + S3ObjectLockRetentionModeGovernance, + } +} + const ( // S3PermissionFullControl is a S3Permission enum value S3PermissionFullControl = "FULL_CONTROL" @@ -4993,6 +10448,17 @@ const ( S3PermissionWriteAcp = "WRITE_ACP" ) +// S3Permission_Values returns all elements of the S3Permission enum +func S3Permission_Values() []string { + return []string{ + S3PermissionFullControl, + S3PermissionRead, + S3PermissionWrite, + S3PermissionReadAcp, + S3PermissionWriteAcp, + } +} + const ( // S3SSEAlgorithmAes256 is a S3SSEAlgorithm enum value S3SSEAlgorithmAes256 = "AES256" @@ -5001,6 +10467,14 @@ const ( S3SSEAlgorithmKms = "KMS" ) +// S3SSEAlgorithm_Values returns all elements of the S3SSEAlgorithm enum +func S3SSEAlgorithm_Values() []string { + return []string{ + S3SSEAlgorithmAes256, + S3SSEAlgorithmKms, + } +} + const ( // S3StorageClassStandard is a S3StorageClass enum value S3StorageClassStandard = "STANDARD" @@ -5020,3 +10494,43 @@ const ( // S3StorageClassDeepArchive is a S3StorageClass enum value S3StorageClassDeepArchive = "DEEP_ARCHIVE" ) + +// S3StorageClass_Values returns all elements of the S3StorageClass enum +func S3StorageClass_Values() []string { + return []string{ + S3StorageClassStandard, + S3StorageClassStandardIa, + S3StorageClassOnezoneIa, + S3StorageClassGlacier, + S3StorageClassIntelligentTiering, + S3StorageClassDeepArchive, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/customizations.go index 6ea102095..09366ae4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3control/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/customizations.go @@ -2,7 +2,9 @@ package s3control import ( "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/internal/s3err" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" ) func init() { @@ -10,5 +12,33 @@ func init() { } func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "s3ControlEndpointHandler", + Fn: endpointHandler, + }) + + // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) } + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool + updateArnableField(string) (interface{}, error) +} + +// endpointOutpostIDGetter is an accessor interface to grab the +// the field corresponding to an outpost ID input. +type endpointOutpostIDGetter interface { + getOutpostID() (string, error) + hasOutpostID() bool +} + +// accountIDValidator is an accessor interface to validate the +// account id member value and account id present in endpoint ARN. +type accountIDValidator interface { + updateAccountID(string) (interface{}, error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint.go new file mode 100644 index 000000000..561d422ad --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint.go @@ -0,0 +1,216 @@ +package s3control + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + // outpost id header + outpostIDHeader = "x-amz-outpost-id" + + // account id header + accountIDHeader = "x-amz-account-id" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, resourceParser) +} + +func resourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "outpost": + return arn.ParseOutpostARNResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +func endpointHandler(req *request.Request) { + // For special case "CreateBucket" and "ListRegionalBuckets" operation + outpostIDEndpoint, ok := req.Params.(endpointOutpostIDGetter) + if ok && outpostIDEndpoint.hasOutpostID() { + outpostID, err := outpostIDEndpoint.getOutpostID() + if err != nil { + req.Error = fmt.Errorf("expected outpost ID to be supported, %v", err) + } + if len(strings.TrimSpace(outpostID)) == 0 { + return + } + updateRequestOutpostIDEndpoint(req) + return + } + + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + // Add account-id header for the request if not present. + // SDK must always send the x-amz-account-id header for all requests + // where an accountId has been extracted from an ARN or the accountId field modeled as a header. + if h := req.HTTPRequest.Header.Get(accountIDHeader); len(h) == 0 { + req.HTTPRequest.Header.Add(accountIDHeader, resource.GetARN().AccountID) + } + + switch tv := resource.(type) { + case arn.OutpostAccessPointARN: + // Add outpostID header + req.HTTPRequest.Header.Add(outpostIDHeader, tv.OutpostID) + + // update arnable field to resource value + updatedInput, err := endpoint.updateArnableField(tv.AccessPointName) + if err != nil { + req.Error = err + return + } + + // update request params to use modified ARN field value, if not nil + if updatedInput != nil { + req.Params = updatedInput + } + + // update request for outpost access point endpoint + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostBucketARN: + // Add outpostID header + req.HTTPRequest.Header.Add(outpostIDHeader, tv.OutpostID) + + // update arnable field to resource value + updatedInput, err := endpoint.updateArnableField(tv.BucketName) + if err != nil { + req.Error = err + return + } + + // update request params to use modified ARN field value, if not nil + if updatedInput != nil { + req.Params = updatedInput + } + + // update request for outpost bucket endpoint + err = updateRequestOutpostBucketEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +// updateRequestOutpostIDEndpoint is special customization to be applied for operations +// CreateBucket, ListRegionalBuckets which must resolve endpoint to s3-outposts.{region}.amazonaws.com +// with region as client region and signed by s3-control if an outpost id is provided. +func updateRequestOutpostIDEndpoint(request *request.Request) { + serviceEndpointLabel := "s3-outposts." + cfgRegion := aws.StringValue(request.Config.Region) + + // request url + request.HTTPRequest.URL.Host = serviceEndpointLabel + cfgRegion + ".amazonaws.com" + + // disable the host prefix for outpost access points + request.Config.DisableEndpointHostPrefix = aws.Bool(true) + + // signer redirection + request.ClientInfo.SigningName = "s3-outposts" + request.ClientInfo.SigningRegion = cfgRegion +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // validate Outpost endpoint + if err := validateOutpostEndpoint(req, accessPoint); err != nil { + return err + } + + // disable the host prefix for outpost access points + req.Config.DisableEndpointHostPrefix = aws.Bool(true) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + return nil +} + +func updateRequestOutpostBucketEndpoint(req *request.Request, bucketResource arn.OutpostBucketARN) error { + // validate Outpost endpoint + if err := validateOutpostEndpoint(req, bucketResource); err != nil { + return err + } + + // disable the host prefix for outpost bucket. + req.Config.DisableEndpointHostPrefix = aws.Bool(true) + + if err := outpostBucketResourceEndpointBuilder(bucketResource).build(req); err != nil { + return err + } + + return nil +} + +// validate request resource for retrieving endpoint +func validateEndpointRequestResource(req *request.Request, resource arn.Resource) error { + resReq := s3shared.ResourceRequest{Request: req, Resource: resource} + + if resReq.IsCrossPartition() { + return s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + return s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + if resReq.HasCustomEndpoint() { + return s3shared.NewInvalidARNWithCustomEndpointError(resource, nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + return nil +} + +// validations for fetching outpost endpoint +func validateOutpostEndpoint(req *request.Request, resource arn.Resource) error { + resReq := s3shared.ResourceRequest{ + Request: req, + Resource: resource, + } + + if err := validateEndpointRequestResource(req, resource); err != nil { + return err + } + + // resource configured with FIPS as region is not supported by outposts + if resReq.ResourceConfiguredForFIPS() { + return s3shared.NewInvalidARNWithFIPSError(resource, nil) + } + + // DualStack not supported + if aws.BoolValue(req.Config.UseDualStack) { + return s3shared.NewClientConfiguredForDualStackError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint_builder.go new file mode 100644 index 000000000..8998ad008 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/endpoint_builder.go @@ -0,0 +1,152 @@ +package s3control + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + + outpostPrefixLabel = "outpost" +) + +// outpostAccessPointEndpointBuilder represents the endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : s3-outposts.{region}.{dnsSuffix} +// - example : s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +// +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + cfgRegion := aws.StringValue(req.Config.Region) + + if s3shared.IsFIPS(cfgRegion) && !aws.BoolValue(req.Config.S3UseARNRegion) { + return s3shared.NewInvalidARNWithFIPSError(o, nil) + } + + endpointsID := resolveService + if resolveService == "s3-outposts" { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // add url host as s3-outposts + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, endpointsID) { + req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + } + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +// outpostBucketResourceEndpointBuilder represents the endpoint builder for outpost bucket resource arn +type outpostBucketResourceEndpointBuilder arn.OutpostBucketARN + +// build builds the endpoint for corresponding outpost bucket arn +// +// For building an endpoint from outpost bucket arn, format used is: +// - Outpost bucket arn endpoint format : s3-outposts.{region}.{dnsSuffix} +// - example : s3-outposts.us-west-2.amazonaws.com +// +// Outpost bucket arn endpoint request are signed using "s3-outposts" as signing name +// +func (o outpostBucketResourceEndpointBuilder) build(req *request.Request) error { + resolveService := arn.OutpostBucketARN(o).Service + resolveRegion := arn.OutpostBucketARN(o).Region + cfgRegion := aws.StringValue(req.Config.Region) + + // Outpost bucket resource uses `s3-control` as serviceEndpointLabel + endpointsID := "s3-control" + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.OutpostBucketARN(o), + req.ClientInfo.PartitionID, cfgRegion, err) + } + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // add url host as s3-outposts + cfgHost := req.HTTPRequest.URL.Host + if strings.HasPrefix(cfgHost, endpointsID) { + req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):] + } + + // signer redirection + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.OutpostBucketARN(o), err) + } + return nil +} + +func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL)) + + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/errors.go index 8f93d6b77..fea7088ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3control/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/errors.go @@ -8,6 +8,20 @@ const ( // "BadRequestException". ErrCodeBadRequestException = "BadRequestException" + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested Outposts bucket name is not available. The bucket namespace + // is shared by all users of the AWS Outposts in this Region. Select a different + // name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The Outposts bucket you tried to create already exists, and you own it. + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + // ErrCodeIdempotencyException for service response error code // "IdempotencyException". ErrCodeIdempotencyException = "IdempotencyException" @@ -42,4 +56,10 @@ const ( // ErrCodeTooManyRequestsException for service response error code // "TooManyRequestsException". ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // Amazon S3 throws this exception if you have too many tags in your tag set. + ErrCodeTooManyTagsException = "TooManyTagsException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go index f3440a201..de8fa2e2f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/validate.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/validate.go new file mode 100644 index 000000000..c14ce783e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/validate.go @@ -0,0 +1,44 @@ +package s3control + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" +) + +// updateAccountIDWithARNHandler is a request named handler that is used to validate and populate the request account id +// input if it may also be present in the resource ARN. +var updateAccountIDWithARNHandler = request.NamedHandler{ + Name: "updateAccountIDWithARNHandler", + Fn: func(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + return + } + + // fetch endpoint arn resource + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = fmt.Errorf("error while fetching endpoint ARN: %v", err) + return + } + + // Validate that the present account id in a request input matches the account id + // present in an ARN. If a value for request input account id member is not provided, + // the accountID member is populated using the account id present in the ARN + // and a pointer to copy of updatedInput is returned. + if accountIDValidator, ok := req.Params.(accountIDValidator); ok { + accID := resource.GetARN().AccountID + updatedInput, err := accountIDValidator.updateAccountID(accID) + if err != nil { + req.Error = s3shared.NewInvalidARNError(resource, err) + return + } + // update request params to use modified account id, if not nil + if updatedInput != nil { + req.Params = updatedInput + } + } + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go index 9106e36ca..57a9f9b0a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go @@ -308,12 +308,8 @@ func (c *SageMaker) CreateAppRequest(input *CreateAppInput) (req *request.Reques // // Creates a running App for the specified UserProfile. Supported Apps are JupyterServer // and KernelGateway. This operation is automatically invoked by Amazon SageMaker -// Amazon SageMaker Studio (Studio) upon access to the associated Studio Domain, -// and when new kernel configurations are selected by the user. A user may have -// multiple Apps active simultaneously. Apps will automatically terminate and -// be deleted when stopped from within Studio, or when the DeleteApp API is -// manually called. UserProfiles are limited to 5 concurrently running Apps -// at a time. +// Studio upon access to the associated Domain, and when new kernel configurations +// are selected by the user. A user may have multiple Apps active simultaneously. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -396,7 +392,14 @@ func (c *SageMaker) CreateAutoMLJobRequest(input *CreateAutoMLJobInput) (req *re // CreateAutoMLJob API operation for Amazon SageMaker Service. // -// Creates an AutoPilot job. +// Creates an Autopilot job. +// +// Find the best performing model after you run an Autopilot job by calling +// . Deploy that model by following the steps described in Step 6.1: Deploy +// the Model to Amazon SageMaker Hosting Services (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html). +// +// For information about how to use Autopilot, see Automate Model Development +// with Amazon SageMaker Autopilot (https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -580,7 +583,7 @@ func (c *SageMaker) CreateCompilationJobRequest(input *CreateCompilationJobInput // the model runs on // // * The Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker -// assumes to perform the model compilation job +// assumes to perform the model compilation job. // // You can also provide a Tag to track the model compilation job's resource // use and costs. The response body contains the CompilationJobArn for the compiled @@ -671,15 +674,54 @@ func (c *SageMaker) CreateDomainRequest(input *CreateDomainInput) (req *request. // CreateDomain API operation for Amazon SageMaker Service. // -// Creates a Domain for Amazon SageMaker Amazon SageMaker Studio (Studio), which -// can be accessed by end-users in a web browser. A Domain has an associated -// directory, list of authorized users, and a variety of security, application, -// policies, and Amazon Virtual Private Cloud configurations. An AWS account -// is limited to one Domain, per region. Users within a domain can share notebook -// files and other artifacts with each other. When a Domain is created, an Amazon -// Elastic File System (EFS) is also created for use by all of the users within -// the Domain. Each user receives a private home directory within the EFS for -// notebooks, Git repositories, and data files. +// Creates a Domain used by Amazon SageMaker Studio. A domain consists of an +// associated Amazon Elastic File System (EFS) volume, a list of authorized +// users, and a variety of security, application, policy, and Amazon Virtual +// Private Cloud (VPC) configurations. An AWS account is limited to one domain +// per region. Users within a domain can share notebook files and other artifacts +// with each other. +// +// When a domain is created, an EFS volume is created for use by all of the +// users within the domain. Each user receives a private home directory within +// the EFS volume for notebooks, Git repositories, and data files. +// +// VPC configuration +// +// All SageMaker Studio traffic between the domain and the EFS volume is through +// the specified VPC and subnets. For other Studio traffic, you can specify +// the AppNetworkAccessType parameter. AppNetworkAccessType corresponds to the +// network access type that you choose when you onboard to Studio. The following +// options are available: +// +// * PublicInternetOnly - Non-EFS traffic goes through a VPC managed by Amazon +// SageMaker, which allows internet access. This is the default value. +// +// * VpcOnly - All Studio traffic is through the specified VPC and subnets. +// Internet access is disabled by default. To allow internet access, you +// must specify a NAT gateway. When internet access is disabled, you won't +// be able to train or host models unless your VPC has an interface endpoint +// (PrivateLink) or a NAT gateway and your security groups allow outbound +// connections. +// +// VpcOnly network access type +// +// When you choose VpcOnly, you must specify the following: +// +// * Security group inbound and outbound rules to allow NFS traffic over +// TCP on port 2049 between the domain and the EFS volume +// +// * Security group inbound and outbound rules to allow traffic between the +// JupyterServer app and the KernelGateway apps +// +// * Interface endpoints to access the SageMaker API and SageMaker runtime +// +// For more information, see: +// +// * Security groups for your VPC (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) +// +// * VPC with public and private subnets (NAT) (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Scenario2.html) +// +// * Connect to SageMaker through a VPC interface endpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/interface-vpc-endpoint.html) // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -764,8 +806,7 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // // Creates an endpoint using the endpoint configuration specified in the request. // Amazon SageMaker uses the endpoint to provision resources and deploy models. -// You create the endpoint configuration with the CreateEndpointConfig (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html) -// API. +// You create the endpoint configuration with the CreateEndpointConfig API. // // Use this API to deploy models using Amazon SageMaker hosting services. // @@ -782,11 +823,21 @@ func (c *SageMaker) CreateEndpointRequest(input *CreateEndpointInput) (req *requ // When it receives the request, Amazon SageMaker creates the endpoint, launches // the resources (ML compute instances), and deploys the model(s) on them. // +// When you call CreateEndpoint, a load call is made to DynamoDB to verify that +// your endpoint configuration exists. When you read data from a DynamoDB table +// supporting Eventually Consistent Reads (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html), +// the response might not reflect the results of a recently completed write +// operation. The response might include some stale data. If the dependent entities +// are not yet in DynamoDB, this causes a validation error. If you repeat your +// read request after a short time, the response should return the latest data. +// So retry logic is recommended to handle these possible issues. We also recommend +// that customers call DescribeEndpointConfig before calling CreateEndpoint +// to minimize the potential impact of a DynamoDB eventually consistent read. +// // When Amazon SageMaker receives the request, it sets the endpoint status to // Creating. After it creates the endpoint, it sets the status to InService. // Amazon SageMaker can then process incoming requests for inferences. To check -// the status of an endpoint, use the DescribeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) -// API. +// the status of an endpoint, use the DescribeEndpoint API. // // If any of the models hosted at this endpoint get model data from an Amazon // S3 location, Amazon SageMaker uses AWS Security Token Service to download @@ -877,8 +928,7 @@ func (c *SageMaker) CreateEndpointConfigRequest(input *CreateEndpointConfigInput // Creates an endpoint configuration that Amazon SageMaker hosting services // uses to deploy models. In the configuration, you identify one or more models, // created using the CreateModel API, to deploy and the resources that you want -// Amazon SageMaker to provision. Then you call the CreateEndpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) -// API. +// Amazon SageMaker to provision. Then you call the CreateEndpoint API. // // Use this API if you want to use Amazon SageMaker hosting services to deploy // models into production. @@ -898,6 +948,17 @@ func (c *SageMaker) CreateEndpointConfigRequest(input *CreateEndpointConfigInput // hosting services, see Deploy the Model to Amazon SageMaker Hosting Services // (AWS SDK for Python (Boto 3)). (https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-deploy-model.html#ex1-deploy-model-boto) // +// When you call CreateEndpoint, a load call is made to DynamoDB to verify that +// your endpoint configuration exists. When you read data from a DynamoDB table +// supporting Eventually Consistent Reads (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html), +// the response might not reflect the results of a recently completed write +// operation. The response might include some stale data. If the dependent entities +// are not yet in DynamoDB, this causes a validation error. If you repeat your +// read request after a short time, the response should return the latest data. +// So retry logic is recommended to handle these possible issues. We also recommend +// that customers call DescribeEndpointConfig before calling CreateEndpoint +// to minimize the potential impact of a DynamoDB eventually consistent read. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -976,9 +1037,9 @@ func (c *SageMaker) CreateExperimentRequest(input *CreateExperimentInput) (req * // CreateExperiment API operation for Amazon SageMaker Service. // -// Creates an Amazon SageMaker experiment. An experiment is a collection of -// trials that are observed, compared and evaluated as a group. A trial is a -// set of steps, called trial components, that produce a machine learning model. +// Creates an SageMaker experiment. An experiment is a collection of trials +// that are observed, compared and evaluated as a group. A trial is a set of +// steps, called trial components, that produce a machine learning model. // // The goal of an experiment is to determine the components that produce the // best model. Multiple trials are performed, each one isolating and measuring @@ -1937,9 +1998,13 @@ func (c *SageMaker) CreatePresignedDomainUrlRequest(input *CreatePresignedDomain // // Creates a URL for a specified UserProfile in a Domain. When accessed in a // web browser, the user will be automatically signed in to Amazon SageMaker -// Amazon SageMaker Studio (Studio), and granted access to all of the Apps and -// files associated with that Amazon Elastic File System (EFS). This operation -// can only be called when AuthMode equals IAM. +// Studio, and granted access to all of the Apps and files associated with the +// Domain's Amazon Elastic File System (EFS) volume. This operation can only +// be called when the authentication mode equals IAM. +// +// The URL that you get from a call to CreatePresignedDomainUrl is valid only +// for 5 minutes. If you try to use the URL after the 5-minute limit expires, +// you are directed to the AWS console sign-in page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2024,17 +2089,21 @@ func (c *SageMaker) CreatePresignedNotebookInstanceUrlRequest(input *CreatePresi // home page from the notebook instance. The console uses this API to get the // URL and show the page. // -// IAM authorization policies for this API are also enforced for every HTTP -// request and WebSocket frame that attempts to connect to the notebook instance.For -// example, you can restrict access to this API and to the URL that it returns -// to a list of IP addresses that you specify. Use the NotIpAddress condition -// operator and the aws:SourceIP condition context key to specify the list of -// IP addresses that you want to have access to the notebook instance. For more -// information, see Limit Access to a Notebook Instance by IP Address (https://docs.aws.amazon.com/sagemaker/latest/dg/security_iam_id-based-policy-examples.html#nbi-ip-filter). +// The IAM role or user used to call this API defines the permissions to access +// the notebook instance. Once the presigned URL is created, no additional permission +// is required to access this URL. IAM authorization policies for this API are +// also enforced for every HTTP request and WebSocket frame that attempts to +// connect to the notebook instance. // -// The URL that you get from a call to is valid only for 5 minutes. If you try -// to use the URL after the 5-minute limit expires, you are directed to the -// AWS console sign-in page. +// You can restrict access to this API and to the URL that it returns to a list +// of IP addresses that you specify. Use the NotIpAddress condition operator +// and the aws:SourceIP condition context key to specify the list of IP addresses +// that you want to have access to the notebook instance. For more information, +// see Limit Access to a Notebook Instance by IP Address (https://docs.aws.amazon.com/sagemaker/latest/dg/security_iam_id-based-policy-examples.html#nbi-ip-filter). +// +// The URL that you get from a call to CreatePresignedNotebookInstanceUrl is +// valid only for 5 minutes. If you try to use the URL after the 5-minute limit +// expires, you are directed to the AWS console sign-in page. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2627,13 +2696,14 @@ func (c *SageMaker) CreateUserProfileRequest(input *CreateUserProfileInput) (req // CreateUserProfile API operation for Amazon SageMaker Service. // -// Creates a new user profile. A user profile represents a single user within -// a Domain, and is the main way to reference a "person" for the purposes of -// sharing, reporting and other user-oriented features. This entity is created -// during on-boarding. If an administrator invites a person by email or imports -// them from SSO, a new UserProfile is automatically created. This entity is -// the primary holder of settings for an individual user and has a reference -// to the user's private Amazon Elastic File System (EFS) home directory. +// Creates a user profile. A user profile represents a single user within a +// domain, and is the main way to reference a "person" for the purposes of sharing, +// reporting, and other user-oriented features. This entity is created when +// a user onboards to Amazon SageMaker Studio. If an administrator invites a +// person by email or imports them from SSO, a user profile is automatically +// created. A user profile is the primary holder of settings for an individual +// user and has a reference to the user's private Amazon Elastic File System +// (EFS) home directory. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2672,6 +2742,96 @@ func (c *SageMaker) CreateUserProfileWithContext(ctx aws.Context, input *CreateU return out, req.Send() } +const opCreateWorkforce = "CreateWorkforce" + +// CreateWorkforceRequest generates a "aws/request.Request" representing the +// client's request for the CreateWorkforce operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWorkforce for more information on using the CreateWorkforce +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWorkforceRequest method. +// req, resp := client.CreateWorkforceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateWorkforce +func (c *SageMaker) CreateWorkforceRequest(input *CreateWorkforceInput) (req *request.Request, output *CreateWorkforceOutput) { + op := &request.Operation{ + Name: opCreateWorkforce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWorkforceInput{} + } + + output = &CreateWorkforceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWorkforce API operation for Amazon SageMaker Service. +// +// Use this operation to create a workforce. This operation will return an error +// if a workforce already exists in the AWS Region that you specify. You can +// only create one workforce in each AWS Region per AWS account. +// +// If you want to create a new workforce in an AWS Region where a workforce +// already exists, use the API operation to delete the existing workforce and +// then use CreateWorkforce to create a new workforce. +// +// To create a private workforce using Amazon Cognito, you must specify a Cognito +// user pool in CognitoConfig. You can also create an Amazon Cognito workforce +// using the Amazon SageMaker console. For more information, see Create a Private +// Workforce (Amazon Cognito) (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). +// +// To create a private workforce using your own OIDC Identity Provider (IdP), +// specify your IdP configuration in OidcConfig. Your OIDC IdP must support +// groups because groups are used by Ground Truth and Amazon A2I to create work +// teams. For more information, see Create a Private Workforce (OIDC IdP) (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private-oidc.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation CreateWorkforce for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/CreateWorkforce +func (c *SageMaker) CreateWorkforce(input *CreateWorkforceInput) (*CreateWorkforceOutput, error) { + req, out := c.CreateWorkforceRequest(input) + return out, req.Send() +} + +// CreateWorkforceWithContext is the same as CreateWorkforce with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWorkforce for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) CreateWorkforceWithContext(ctx aws.Context, input *CreateWorkforceInput, opts ...request.Option) (*CreateWorkforceOutput, error) { + req, out := c.CreateWorkforceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateWorkteam = "CreateWorkteam" // CreateWorkteamRequest generates a "aws/request.Request" representing the @@ -3037,10 +3197,10 @@ func (c *SageMaker) DeleteDomainRequest(input *DeleteDomainInput) (req *request. // DeleteDomain API operation for Amazon SageMaker Service. // -// Used to delete a domain. If you on-boarded with IAM mode, you will need to -// delete your domain to on-board again using SSO. Use with caution. All of -// the members of the domain will lose access to their EFS volume, including -// data, notebooks, and other artifacts. +// Used to delete a domain. If you onboarded with IAM mode, you will need to +// delete your domain to onboard again using SSO. Use with caution. All of the +// members of the domain will lose access to their EFS volume, including data, +// notebooks, and other artifacts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3207,6 +3367,13 @@ func (c *SageMaker) DeleteEndpointConfigRequest(input *DeleteEndpointConfigInput // the specified configuration. It does not delete endpoints created using the // configuration. // +// You must not delete an EndpointConfig in use by an endpoint that is live +// or while the UpdateEndpoint or CreateEndpoint operations are being performed +// on the endpoint. If you delete the EndpointConfig of an endpoint that is +// active or being created or updated you may lose visibility into the instance +// type the endpoint is using. The endpoint must be deleted in order to stop +// incurring charges. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -3371,6 +3538,9 @@ func (c *SageMaker) DeleteFlowDefinitionRequest(input *DeleteFlowDefinitionInput // API operation DeleteFlowDefinition for usage and error information. // // Returned Error Types: +// * ResourceInUse +// Resource being accessed is in use. +// // * ResourceNotFound // Resource being access is not found. // @@ -3396,6 +3566,90 @@ func (c *SageMaker) DeleteFlowDefinitionWithContext(ctx aws.Context, input *Dele return out, req.Send() } +const opDeleteHumanTaskUi = "DeleteHumanTaskUi" + +// DeleteHumanTaskUiRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHumanTaskUi operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteHumanTaskUi for more information on using the DeleteHumanTaskUi +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteHumanTaskUiRequest method. +// req, resp := client.DeleteHumanTaskUiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteHumanTaskUi +func (c *SageMaker) DeleteHumanTaskUiRequest(input *DeleteHumanTaskUiInput) (req *request.Request, output *DeleteHumanTaskUiOutput) { + op := &request.Operation{ + Name: opDeleteHumanTaskUi, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteHumanTaskUiInput{} + } + + output = &DeleteHumanTaskUiOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteHumanTaskUi API operation for Amazon SageMaker Service. +// +// Use this operation to delete a human task user interface (worker task template). +// +// To see a list of human task user interfaces (work task templates) in your +// account, use . When you delete a worker task template, it no longer appears +// when you call ListHumanTaskUis. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteHumanTaskUi for usage and error information. +// +// Returned Error Types: +// * ResourceNotFound +// Resource being access is not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteHumanTaskUi +func (c *SageMaker) DeleteHumanTaskUi(input *DeleteHumanTaskUiInput) (*DeleteHumanTaskUiOutput, error) { + req, out := c.DeleteHumanTaskUiRequest(input) + return out, req.Send() +} + +// DeleteHumanTaskUiWithContext is the same as DeleteHumanTaskUi with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteHumanTaskUi for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteHumanTaskUiWithContext(ctx aws.Context, input *DeleteHumanTaskUiInput, opts ...request.Option) (*DeleteHumanTaskUiOutput, error) { + req, out := c.DeleteHumanTaskUiRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteModel = "DeleteModel" // DeleteModelRequest generates a "aws/request.Request" representing the @@ -3442,9 +3696,9 @@ func (c *SageMaker) DeleteModelRequest(input *DeleteModelInput) (req *request.Re // DeleteModel API operation for Amazon SageMaker Service. // // Deletes a model. The DeleteModel API deletes only the model entry that was -// created in Amazon SageMaker when you called the CreateModel (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html) -// API. It does not delete model artifacts, inference code, or the IAM role -// that you specified when creating the model. +// created in Amazon SageMaker when you called the CreateModel API. It does +// not delete model artifacts, inference code, or the IAM role that you specified +// when creating the model. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4078,7 +4332,8 @@ func (c *SageMaker) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req // DeleteUserProfile API operation for Amazon SageMaker Service. // -// Deletes a user profile. +// Deletes a user profile. When a user profile is deleted, the user loses access +// to their EFS volume, including data, notebooks, and other artifacts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4116,6 +4371,90 @@ func (c *SageMaker) DeleteUserProfileWithContext(ctx aws.Context, input *DeleteU return out, req.Send() } +const opDeleteWorkforce = "DeleteWorkforce" + +// DeleteWorkforceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteWorkforce operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteWorkforce for more information on using the DeleteWorkforce +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteWorkforceRequest method. +// req, resp := client.DeleteWorkforceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteWorkforce +func (c *SageMaker) DeleteWorkforceRequest(input *DeleteWorkforceInput) (req *request.Request, output *DeleteWorkforceOutput) { + op := &request.Operation{ + Name: opDeleteWorkforce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteWorkforceInput{} + } + + output = &DeleteWorkforceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteWorkforce API operation for Amazon SageMaker Service. +// +// Use this operation to delete a workforce. +// +// If you want to create a new workforce in an AWS Region where a workforce +// already exists, use this operation to delete the existing workforce and then +// use to create a new workforce. +// +// If a private workforce contains one or more work teams, you must use the +// operation to delete all work teams before you delete the workforce. If you +// try to delete a workforce that contains one or more work teams, you will +// recieve a ResourceInUse error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation DeleteWorkforce for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/DeleteWorkforce +func (c *SageMaker) DeleteWorkforce(input *DeleteWorkforceInput) (*DeleteWorkforceOutput, error) { + req, out := c.DeleteWorkforceRequest(input) + return out, req.Send() +} + +// DeleteWorkforceWithContext is the same as DeleteWorkforce with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteWorkforce for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) DeleteWorkforceWithContext(ctx aws.Context, input *DeleteWorkforceInput, opts ...request.Option) (*DeleteWorkforceOutput, error) { + req, out := c.DeleteWorkforceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteWorkteam = "DeleteWorkteam" // DeleteWorkteamRequest generates a "aws/request.Request" representing the @@ -4628,7 +4967,7 @@ func (c *SageMaker) DescribeDomainRequest(input *DescribeDomainInput) (req *requ // DescribeDomain API operation for Amazon SageMaker Service. // -// The desciption of the domain. +// The description of the domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5014,7 +5353,8 @@ func (c *SageMaker) DescribeHumanTaskUiRequest(input *DescribeHumanTaskUiInput) // DescribeHumanTaskUi API operation for Amazon SageMaker Service. // -// Returns information about the requested human task user interface. +// Returns information about the requested human task user interface (worker +// task template). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -6103,7 +6443,7 @@ func (c *SageMaker) DescribeUserProfileRequest(input *DescribeUserProfileInput) // DescribeUserProfile API operation for Amazon SageMaker Service. // -// Describes the user profile. +// Describes a user profile. For more information, see CreateUserProfile. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -8635,8 +8975,7 @@ func (c *SageMaker) ListModelsRequest(input *ListModelsInput) (req *request.Requ // ListModels API operation for Amazon SageMaker Service. // -// Lists models created with the CreateModel (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateModel.html) -// API. +// Lists models created with the CreateModel API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -10467,6 +10806,139 @@ func (c *SageMaker) ListUserProfilesPagesWithContext(ctx aws.Context, input *Lis return p.Err() } +const opListWorkforces = "ListWorkforces" + +// ListWorkforcesRequest generates a "aws/request.Request" representing the +// client's request for the ListWorkforces operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListWorkforces for more information on using the ListWorkforces +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListWorkforcesRequest method. +// req, resp := client.ListWorkforcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListWorkforces +func (c *SageMaker) ListWorkforcesRequest(input *ListWorkforcesInput) (req *request.Request, output *ListWorkforcesOutput) { + op := &request.Operation{ + Name: opListWorkforces, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListWorkforcesInput{} + } + + output = &ListWorkforcesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListWorkforces API operation for Amazon SageMaker Service. +// +// Use this operation to list all private and vendor workforces in an AWS Region. +// Note that you can only have one private workforce per AWS Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon SageMaker Service's +// API operation ListWorkforces for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/ListWorkforces +func (c *SageMaker) ListWorkforces(input *ListWorkforcesInput) (*ListWorkforcesOutput, error) { + req, out := c.ListWorkforcesRequest(input) + return out, req.Send() +} + +// ListWorkforcesWithContext is the same as ListWorkforces with the addition of +// the ability to pass a context and additional request options. +// +// See ListWorkforces for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListWorkforcesWithContext(ctx aws.Context, input *ListWorkforcesInput, opts ...request.Option) (*ListWorkforcesOutput, error) { + req, out := c.ListWorkforcesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListWorkforcesPages iterates over the pages of a ListWorkforces operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListWorkforces method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListWorkforces operation. +// pageNum := 0 +// err := client.ListWorkforcesPages(params, +// func(page *sagemaker.ListWorkforcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SageMaker) ListWorkforcesPages(input *ListWorkforcesInput, fn func(*ListWorkforcesOutput, bool) bool) error { + return c.ListWorkforcesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListWorkforcesPagesWithContext same as ListWorkforcesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SageMaker) ListWorkforcesPagesWithContext(ctx aws.Context, input *ListWorkforcesInput, fn func(*ListWorkforcesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListWorkforcesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListWorkforcesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListWorkforcesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListWorkteams = "ListWorkteams" // ListWorkteamsRequest generates a "aws/request.Request" representing the @@ -10517,8 +10989,8 @@ func (c *SageMaker) ListWorkteamsRequest(input *ListWorkteamsInput) (req *reques // ListWorkteams API operation for Amazon SageMaker Service. // -// Gets a list of work teams that you have defined in a region. The list may -// be empty if no work team satisfies the filter specified in the NameContains +// Gets a list of private work teams that you have defined in a region. The +// list may be empty if no work team satisfies the filter specified in the NameContains // parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -10653,6 +11125,11 @@ func (c *SageMaker) RenderUiTemplateRequest(input *RenderUiTemplateInput) (req * // // See the AWS API reference guide for Amazon SageMaker Service's // API operation RenderUiTemplate for usage and error information. +// +// Returned Error Types: +// * ResourceNotFound +// Resource being access is not found. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24/RenderUiTemplate func (c *SageMaker) RenderUiTemplate(input *RenderUiTemplateInput) (*RenderUiTemplateOutput, error) { req, out := c.RenderUiTemplateRequest(input) @@ -10725,9 +11202,9 @@ func (c *SageMaker) SearchRequest(input *SearchInput) (req *request.Request, out // Search API operation for Amazon SageMaker Service. // -// Finds Amazon SageMaker resources that match a search query. Matching resource -// objects are returned as a list of SearchResult objects in the response. You -// can sort the search results by any resource property in a ascending or descending +// Finds Amazon SageMaker resources that match a search query. Matching resources +// are returned as a list of SearchRecord objects in the response. You can sort +// the search results by any resource property in a ascending or descending // order. // // You can query against the following value types: numeric, text, Boolean, @@ -11850,7 +12327,7 @@ func (c *SageMaker) UpdateDomainRequest(input *UpdateDomainInput) (req *request. // UpdateDomain API operation for Amazon SageMaker Service. // -// Updates a domain. Changes will impact all of the people in the domain. +// Updates the default settings for new user profiles in the domain. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11942,13 +12419,16 @@ func (c *SageMaker) UpdateEndpointRequest(input *UpdateEndpointInput) (req *requ // // When Amazon SageMaker receives the request, it sets the endpoint status to // Updating. After updating the endpoint, it sets the status to InService. To -// check the status of an endpoint, use the DescribeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) -// API. +// check the status of an endpoint, use the DescribeEndpoint API. // // You must not delete an EndpointConfig in use by an endpoint that is live // or while the UpdateEndpoint or CreateEndpoint operations are being performed // on the endpoint. To update an endpoint, you must create a new EndpointConfig. // +// If you delete the EndpointConfig of an endpoint that is active or being created +// or updated you may lose visibility into the instance type the endpoint is +// using. The endpoint must be deleted in order to stop incurring charges. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -12031,8 +12511,7 @@ func (c *SageMaker) UpdateEndpointWeightsAndCapacitiesRequest(input *UpdateEndpo // endpoint, or capacity of one variant associated with an existing endpoint. // When it receives the request, Amazon SageMaker sets the endpoint status to // Updating. After updating the endpoint, it sets the status to InService. To -// check the status of an endpoint, use the DescribeEndpoint (https://docs.aws.amazon.com/sagemaker/latest/dg/API_DescribeEndpoint.html) -// API. +// check the status of an endpoint, use the DescribeEndpoint API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12696,18 +13175,29 @@ func (c *SageMaker) UpdateWorkforceRequest(input *UpdateWorkforceInput) (req *re // UpdateWorkforce API operation for Amazon SageMaker Service. // -// Restricts access to tasks assigned to workers in the specified workforce -// to those within specific ranges of IP addresses. You specify allowed IP addresses -// by creating a list of up to four CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html). +// Use this operation to update your workforce. You can use this operation to +// require that workers use specific IP addresses to work on tasks and to update +// your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration. // +// Use SourceIpConfig to restrict worker access to tasks to a specific range +// of IP addresses. You specify allowed IP addresses by creating a list of up +// to ten CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html). // By default, a workforce isn't restricted to specific IP addresses. If you // specify a range of IP addresses, workers who attempt to access tasks using -// any IP address outside the specified range are denied access and get a Not -// Found error message on the worker portal. After restricting access with this -// operation, you can see the allowed IP values for a private workforce with -// the operation. +// any IP address outside the specified range are denied and get a Not Found +// error message on the worker portal. // -// This operation applies only to private workforces. +// Use OidcConfig to update the configuration of a workforce created using your +// own OIDC IdP. +// +// You can only update your OIDC IdP configuration when there are no work teams +// associated with your workforce. You can delete work teams using the operation. +// +// After restricting access to a range of IP addresses or updating your OIDC +// IdP configuration with this operation, you can view details about your update +// workforce using the operation. +// +// This operation only applies to private workforces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -12904,8 +13394,7 @@ func (s *AddTagsOutput) SetTags(v []*Tag) *AddTagsOutput { return s } -// Specifies the training algorithm to use in a CreateTrainingJob (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateTrainingJob.html) -// request. +// Specifies the training algorithm to use in a CreateTrainingJob request. // // For more information about algorithms provided by Amazon SageMaker, see Algorithms // (https://docs.aws.amazon.com/sagemaker/latest/dg/algos.html). For information @@ -13338,40 +13827,65 @@ func (s *AlgorithmValidationSpecification) SetValidationRole(v string) *Algorith return s } -// Configures how labels are consolidated across human workers. +// Configures how labels are consolidated across human workers and processes +// output data. type AnnotationConsolidationConfig struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of a Lambda function implements the logic - // for annotation consolidation. - // - // For the built-in bounding box, image classification, semantic segmentation, - // and text classification task types, Amazon SageMaker Ground Truth provides - // the following Lambda functions: - // - // * Bounding box - Finds the most similar boxes from different workers based - // on the Jaccard index of the boxes. arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox - // arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox - // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox - // arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox - // arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox - // arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox - // - // * Image classification - Uses a variant of the Expectation Maximization - // approach to estimate the true class of an image based on annotations from - // individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass - // arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass + // for annotation consolidation (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html) + // and to process output data. + // + // This parameter is required for all labeling jobs. For built-in task types + // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html), use + // one of the following Amazon SageMaker Ground Truth Lambda function ARNs for + // AnnotationConsolidationLambdaArn. For custom labeling workflows, see Post-annotation + // Lambda (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step3.html#sms-custom-templates-step3-postlambda). + // + // Bounding box - Finds the most similar boxes from different workers based + // on the Jaccard index of the boxes. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox + // arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox + // + // Image classification - Uses a variant of the Expectation Maximization approach + // to estimate the true class of an image based on annotations from individual + // workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass + // arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass // arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass // arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass // arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass // - // * Semantic segmentation - Treats each pixel in an image as a multi-class - // classification and treats pixel annotations from workers as "votes" for - // the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation + // Multi-label image classification - Uses a variant of the Expectation Maximization + // approach to estimate the true classes of an image based on annotations from + // individual workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel + // + // Semantic segmentation - Treats each pixel in an image as a multi-class classification + // and treats pixel annotations from workers as "votes" for the correct label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation // arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation // arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation // arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation @@ -13384,19 +13898,39 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation // arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation // - // * Text classification - Uses a variant of the Expectation Maximization - // approach to estimate the true class of text based on annotations from - // individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass - // arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass + // Text classification - Uses a variant of the Expectation Maximization approach + // to estimate the true class of text based on annotations from individual workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass + // arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass // arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass // arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass // arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass // - // * Named entity recognition - Groups similar selections and calculates - // aggregate boundaries, resolving to most-assigned label. arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition + // Multi-label text classification - Uses a variant of the Expectation Maximization + // approach to estimate the true classes of text based on annotations from individual + // workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel + // + // Named entity recognition - Groups similar selections and calculates aggregate + // boundaries, resolving to most-assigned label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition // arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition // arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition // arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition @@ -13409,25 +13943,153 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition // arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition // - // * Bounding box verification - Uses a variant of the Expectation Maximization - // approach to estimate the true class of verification judgement for bounding - // box labels based on annotations from individual workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox - // arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox - // arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox - // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox - // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox - // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox - // arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox - // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox - // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox - // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox - // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox + // Named entity recognition - Groups similar selections and calculates aggregate + // boundaries, resolving to most-assigned label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition + // arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition + // + // Video Classification - Use this task type when you need workers to classify + // videos using predefined labels that you specify. Workers are shown videos + // and are asked to choose one label for each video. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass + // + // Video Frame Object Detection - Use this task type to have workers identify + // and locate objects in a sequence of video frames (images extracted from a + // video) using bounding boxes. For example, you can use this task to ask workers + // to identify and localize various objects in a series of video frames, such + // as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection + // arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection + // + // Video Frame Object Tracking - Use this task type to have workers track the + // movement of objects in a sequence of video frames (images extracted from + // a video) using bounding boxes. For example, you can use this task to ask + // workers to track the movement of objects, such as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking + // arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking + // + // 3D point cloud object detection - Use this task type when you want workers + // to classify objects in a 3D point cloud by drawing 3D cuboids around objects. + // For example, you can use this task type to ask workers to identify different + // types of objects in a point cloud, such as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection + // + // 3D point cloud object tracking - Use this task type when you want workers + // to draw 3D cuboids around objects that appear in a sequence of 3D point cloud + // frames. For example, you can use this task type to ask workers to track the + // movement of vehicles across multiple point cloud frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking + // + // 3D point cloud semantic segmentation - Use this task type when you want workers + // to create a point-level semantic segmentation masks by painting objects in + // a 3D point cloud using different colors where each color is assigned to one + // of the classes you specify. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation + // + // Use the following ARNs for Label Verification and Adjustment Jobs + // + // Use label verification and adjustment jobs to review and adjust labels. To + // learn more, see Verify and Adjust Labels (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-verification-data.html). + // + // Semantic segmentation adjustment - Treats each pixel in an image as a multi-class + // classification and treats pixel adjusted annotations from workers as "votes" + // for the correct label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation + // + // Semantic segmentation verification - Uses a variant of the Expectation Maximization + // approach to estimate the true class of verification judgment for semantic + // segmentation labels based on annotations from individual workers. // - // * Semantic segmentation verification - Uses a variant of the Expectation - // Maximization approach to estimate the true class of verification judgment - // for semantic segmentation labels based on annotations from individual - // workers. arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation // arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation // arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation @@ -13440,8 +14102,27 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation // - // * Bounding box adjustment - Finds the most similar boxes from different - // workers based on the Jaccard index of the adjusted annotations. arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox + // Bounding box verification - Uses a variant of the Expectation Maximization + // approach to estimate the true class of verification judgement for bounding + // box labels based on annotations from individual workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox + // arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox + // arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox + // + // Bounding box adjustment - Finds the most similar boxes from different workers + // based on the Jaccard index of the adjusted annotations. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox @@ -13454,22 +14135,89 @@ type AnnotationConsolidationConfig struct { // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox // - // * Semantic segmentation adjustment - Treats each pixel in an image as - // a multi-class classification and treats pixel adjusted annotations from - // workers as "votes" for the correct label. arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation - // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation - // - // For more information, see Annotation Consolidation (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-annotation-consolidation.html). + // Video Frame Object Detection Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to classify and localize objects in a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection + // + // Video Frame Object Tracking Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to track object movement across a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking + // + // 3D point cloud object detection adjustment - Use this task type when you + // want workers to adjust 3D cuboids around objects in a 3D point cloud. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection + // + // 3D point cloud object tracking adjustment - Use this task type when you want + // workers to adjust 3D cuboids around objects that appear in a sequence of + // 3D point cloud frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking + // + // 3D point cloud semantic segmentation adjustment - Use this task type when + // you want workers to adjust a point-level semantic segmentation masks using + // a paint tool. + // + // * arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation + // arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation // // AnnotationConsolidationLambdaArn is a required field AnnotationConsolidationLambdaArn *string `type:"string" required:"true"` @@ -13726,8 +14474,8 @@ func (s *AssociateTrialComponentOutput) SetTrialComponentArn(v string) *Associat return s } -// An AutoPilot job will return recommendations, or candidates. Each candidate -// has futher details about the steps involed, and the status. +// An Autopilot job returns recommendations, or candidates. Each candidate has +// futher details about the steps involed, and the status. type AutoMLCandidate struct { _ struct{} `type:"structure"` @@ -13757,7 +14505,7 @@ type AutoMLCandidate struct { // The failure reason. FailureReason *string `type:"string"` - // The candidate result from a job. + // The best candidate result from an AutoML training job. FinalAutoMLJobObjectiveMetric *FinalAutoMLJobObjectiveMetric `type:"structure"` // The inference containers. @@ -14013,11 +14761,13 @@ func (s *AutoMLContainerDefinition) SetModelDataUrl(v string) *AutoMLContainerDe return s } -// The data source for the AutoPilot job. +// The data source for the Autopilot job. type AutoMLDataSource struct { _ struct{} `type:"structure"` - // The Amazon S3 location of the data. + // The Amazon S3 location of the input data. + // + // The input data must be in CSV format and contain at least 500 rows. // // S3DataSource is a required field S3DataSource *AutoMLS3DataSource `type:"structure" required:"true"` @@ -14207,11 +14957,76 @@ func (s *AutoMLJobConfig) SetSecurityConfig(v *AutoMLSecurityConfig) *AutoMLJobC return s } -// Applies a metric to minimize or maximize for the job's objective. +// Specifies a metric to minimize or maximize as the objective of a job. type AutoMLJobObjective struct { _ struct{} `type:"structure"` - // The name of the metric. + // The name of the objective metric used to measure the predictive quality of + // a machine learning system. This metric is optimized during training to provide + // the best estimate for model parameter values from data. + // + // Here are the options: + // + // * MSE: The mean squared error (MSE) is the average of the squared differences + // between the predicted and actual values. It is used for regression. MSE + // values are always positive, the better a model is at predicting the actual + // values the smaller the MSE value. When the data contains outliers, they + // tend to dominate the MSE which might cause subpar prediction performance. + // + // * Accuracy: The ratio of the number correctly classified items to the + // total number (correctly and incorrectly) classified. It is used for binary + // and multiclass classification. Measures how close the predicted class + // values are to the actual values. Accuracy values vary between zero and + // one, one being perfect accuracy and zero perfect inaccuracy. + // + // * F1: The F1 score is the harmonic mean of the precision and recall. It + // is used for binary classification into classes traditionally referred + // to as positive and negative. Predictions are said to be true when they + // match their actual (correct) class; false when they do not. Precision + // is the ratio of the true positive predictions to all positive predictions + // (including the false positives) in a data set and measures the quality + // of the prediction when it predicts the positive class. Recall (or sensitivity) + // is the ratio of the true positive predictions to all actual positive instances + // and measures how completely a model predicts the actual class members + // in a data set. The standard F1 score weighs precision and recall equally. + // But which metric is paramount typically depends on specific aspects of + // a problem. F1 scores vary between zero and one, one being the best possible + // performance and zero the worst. + // + // * AUC: The area under the curve (AUC) metric is used to compare and evaluate + // binary classification by algorithms such as logistic regression that return + // probabilities. A threshold is needed to map the probabilities into classifications. + // The relevant curve is the receiver operating characteristic curve that + // plots the true positive rate (TPR) of predictions (or recall) against + // the false positive rate (FPR) as a function of the threshold value, above + // which a prediction is considered positive. Increasing the threshold results + // in fewer false positives but more false negatives. AUC is the area under + // this receiver operating characteristic curve and so provides an aggregated + // measure of the model performance across all possible classification thresholds. + // The AUC score can also be interpreted as the probability that a randomly + // selected positive data point is more likely to be predicted positive than + // a randomly selected negative example. AUC scores vary between zero and + // one, one being perfect accuracy and one half not better than a random + // classifier. Values less that one half predict worse than a random predictor + // and such consistently bad predictors can be inverted to obtain better + // than random predictors. + // + // * F1macro: The F1macro score applies F1 scoring to multiclass classification. + // In this context, you have multiple classes to predict. You just calculate + // the precision and recall for each class as you did for the positive class + // in binary classification. Then used these values to calculate the F1 score + // for each class and average them to obtain the F1macro score. F1macro scores + // vary between zero and one, one being the best possible performance and + // zero the worst. + // + // If you do not specify a metric explicitly, the default behavior is to automatically + // use: + // + // * MSE: for regression. + // + // * F1: for binary classification + // + // * Accuracy: for multiclass classification. // // MetricName is a required field MetricName *string `type:"string" required:"true" enum:"AutoMLMetricEnum"` @@ -14275,10 +15090,10 @@ type AutoMLJobSummary struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` - // The end time. + // The end time of an AutoML job. EndTime *time.Time `type:"timestamp"` - // The failure reason. + // The failure reason of a job. FailureReason *string `type:"string"` // When the job was last modified. @@ -15053,6 +15868,71 @@ func (s *CodeRepositorySummary) SetLastModifiedTime(v time.Time) *CodeRepository return s } +// Use this parameter to configure your Amazon Cognito workforce. A single Cognito +// workforce is created using and corresponds to a single Amazon Cognito user +// pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). +type CognitoConfig struct { + _ struct{} `type:"structure"` + + // The client ID for your Amazon Cognito user pool. + // + // ClientId is a required field + ClientId *string `min:"1" type:"string" required:"true"` + + // A user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html) + // is a user directory in Amazon Cognito. With a user pool, your users can sign + // in to your web or mobile app through Amazon Cognito. Your users can also + // sign in through social identity providers like Google, Facebook, Amazon, + // or Apple, and through SAML identity providers. + // + // UserPool is a required field + UserPool *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CognitoConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CognitoConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CognitoConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CognitoConfig"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.UserPool == nil { + invalidParams.Add(request.NewErrParamRequired("UserPool")) + } + if s.UserPool != nil && len(*s.UserPool) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserPool", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CognitoConfig) SetClientId(v string) *CognitoConfig { + s.ClientId = &v + return s +} + +// SetUserPool sets the UserPool field's value. +func (s *CognitoConfig) SetUserPool(v string) *CognitoConfig { + s.UserPool = &v + return s +} + // Identifies a Amazon Cognito user group. A user group can be used in on or // more work teams. type CognitoMemberDefinition struct { @@ -15205,10 +16085,20 @@ type CompilationJobSummary struct { // The time when the model compilation job started. CompilationStartTime *time.Time `type:"timestamp"` - // The type of device that the model will run on after compilation has completed. - // - // CompilationTargetDevice is a required field - CompilationTargetDevice *string `type:"string" required:"true" enum:"TargetDevice"` + // The type of device that the model will run on after the compilation job has + // completed. + CompilationTargetDevice *string `type:"string" enum:"TargetDevice"` + + // The type of accelerator that the model will run on after the compilation + // job has completed. + CompilationTargetPlatformAccelerator *string `type:"string" enum:"TargetPlatformAccelerator"` + + // The type of architecture that the model will run on after the compilation + // job has completed. + CompilationTargetPlatformArch *string `type:"string" enum:"TargetPlatformArch"` + + // The type of OS that the model will run on after the compilation job has completed. + CompilationTargetPlatformOs *string `type:"string" enum:"TargetPlatformOs"` // The time when the model compilation job was created. // @@ -15265,6 +16155,24 @@ func (s *CompilationJobSummary) SetCompilationTargetDevice(v string) *Compilatio return s } +// SetCompilationTargetPlatformAccelerator sets the CompilationTargetPlatformAccelerator field's value. +func (s *CompilationJobSummary) SetCompilationTargetPlatformAccelerator(v string) *CompilationJobSummary { + s.CompilationTargetPlatformAccelerator = &v + return s +} + +// SetCompilationTargetPlatformArch sets the CompilationTargetPlatformArch field's value. +func (s *CompilationJobSummary) SetCompilationTargetPlatformArch(v string) *CompilationJobSummary { + s.CompilationTargetPlatformArch = &v + return s +} + +// SetCompilationTargetPlatformOs sets the CompilationTargetPlatformOs field's value. +func (s *CompilationJobSummary) SetCompilationTargetPlatformOs(v string) *CompilationJobSummary { + s.CompilationTargetPlatformOs = &v + return s +} + // SetCreationTime sets the CreationTime field's value. func (s *CompilationJobSummary) SetCreationTime(v time.Time) *CompilationJobSummary { s.CreationTime = &v @@ -15280,8 +16188,8 @@ func (s *CompilationJobSummary) SetLastModifiedTime(v time.Time) *CompilationJob // There was a conflict when you attempted to modify an experiment, trial, or // trial component. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -15298,17 +16206,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15316,22 +16224,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the container, as part of model definition. @@ -15357,14 +16265,22 @@ type ContainerDefinition struct { // support up to 16 entries in the map. Environment map[string]*string `type:"map"` - // The Amazon EC2 Container Registry (Amazon ECR) path where inference code - // is stored. If you are using your own custom algorithm instead of an algorithm - // provided by Amazon SageMaker, the inference code must meet Amazon SageMaker - // requirements. Amazon SageMaker supports both registry/repository[:tag] and - // registry/repository[@digest] image path formats. For more information, see - // Using Your Own Algorithms with Amazon SageMaker (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) + // The path where inference code is stored. This can be either in Amazon EC2 + // Container Registry or in a Docker registry that is accessible from the same + // VPC that you configure for your endpoint. If you are using your own custom + // algorithm instead of an algorithm provided by Amazon SageMaker, the inference + // code must meet Amazon SageMaker requirements. Amazon SageMaker supports both + // registry/repository[:tag] and registry/repository[@digest] image path formats. + // For more information, see Using Your Own Algorithms with Amazon SageMaker + // (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms.html) Image *string `type:"string"` + // Specifies whether the model container is in Amazon ECR or a private Docker + // registry accessible from your Amazon Virtual Private Cloud (VPC). For information + // about storing containers in a private Docker registry, see Use a Private + // Docker Registry for Real-Time Inference Containers (https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-containers-inference-private.html) + ImageConfig *ImageConfig `type:"structure"` + // Whether the container hosts a single model or multiple models. Mode *string `type:"string" enum:"ContainerMode"` @@ -15374,6 +16290,9 @@ type ContainerDefinition struct { // but not if you use your own algorithms. For more information on built-in // algorithms, see Common Parameters (https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html). // + // The model artifacts must be in an S3 bucket that is in the same region as + // the model or endpoint you are creating. + // // If you provide a value for this parameter, Amazon SageMaker uses AWS Security // Token Service to download model artifacts from the S3 path you provide. AWS // STS is activated in your IAM user account by default. If you previously deactivated @@ -15407,6 +16326,11 @@ func (s *ContainerDefinition) Validate() error { if s.ModelPackageName != nil && len(*s.ModelPackageName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ModelPackageName", 1)) } + if s.ImageConfig != nil { + if err := s.ImageConfig.Validate(); err != nil { + invalidParams.AddNested("ImageConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -15432,6 +16356,12 @@ func (s *ContainerDefinition) SetImage(v string) *ContainerDefinition { return s } +// SetImageConfig sets the ImageConfig field's value. +func (s *ContainerDefinition) SetImageConfig(v *ImageConfig) *ContainerDefinition { + s.ImageConfig = v + return s +} + // SetMode sets the Mode field's value. func (s *ContainerDefinition) SetMode(v string) *ContainerDefinition { s.Mode = &v @@ -15788,7 +16718,8 @@ type CreateAppInput struct { // DomainId is a required field DomainId *string `type:"string" required:"true"` - // The instance type and quantity. + // The instance type and the Amazon Resource Name (ARN) of the SageMaker image + // created on the instance. ResourceSpec *ResourceSpec `type:"structure"` // Each tag consists of a key and an optional value. Tag keys must be unique @@ -15882,7 +16813,7 @@ func (s *CreateAppInput) SetUserProfileName(v string) *CreateAppInput { type CreateAppOutput struct { _ struct{} `type:"structure"` - // The app's Amazon Resource Name (ARN). + // The App's Amazon Resource Name (ARN). AppArn *string `type:"string"` } @@ -15908,22 +16839,23 @@ type CreateAutoMLJobInput struct { // Contains CompletionCriteria and SecurityConfig. AutoMLJobConfig *AutoMLJobConfig `type:"structure"` - // Identifies an AutoPilot job. Must be unique to your account and is case-insensitive. + // Identifies an Autopilot job. Must be unique to your account and is case-insensitive. // // AutoMLJobName is a required field AutoMLJobName *string `min:"1" type:"string" required:"true"` - // Defines the job's objective. You provide a MetricName and AutoML will infer - // minimize or maximize. If this is not provided, the most commonly used ObjectiveMetric - // for problem type will be selected. + // Defines the objective of a an AutoML job. You provide a AutoMLJobObjective$MetricName + // and Autopilot infers whether to minimize or maximize it. If a metric is not + // specified, the most commonly used ObjectiveMetric for problem type is automaically + // selected. AutoMLJobObjective *AutoMLJobObjective `type:"structure"` - // This will generate possible candidates without training a model. A candidate - // is a combination of data preprocessors, algorithms, and algorithm parameter - // settings. + // Generates possible candidates without training a model. A candidate is a + // combination of data preprocessors, algorithms, and algorithm parameter settings. GenerateCandidateDefinitionsOnly *bool `type:"boolean"` // Similar to InputDataConfig supported by Tuning. Format(s) supported: CSV. + // Minimum of 500 rows. // // InputDataConfig is a required field InputDataConfig []*AutoMLChannel `min:"1" type:"list" required:"true"` @@ -15937,7 +16869,7 @@ type CreateAutoMLJobInput struct { // Options include: BinaryClassification, MulticlassClassification, and Regression. ProblemType *string `type:"string" enum:"ProblemType"` - // The ARN of the role that will be used to access the data. + // The ARN of the role that is used to access the data. // // RoleArn is a required field RoleArn *string `min:"20" type:"string" required:"true"` @@ -16356,7 +17288,15 @@ func (s *CreateCompilationJobOutput) SetCompilationJobArn(v string) *CreateCompi type CreateDomainInput struct { _ struct{} `type:"structure"` - // The mode of authentication that member use to access the domain. + // Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. + // + // * PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon + // SageMaker, which allows direct internet access + // + // * VpcOnly - All Studio traffic is through the specified VPC and subnets + AppNetworkAccessType *string `type:"string" enum:"AppNetworkAccessType"` + + // The mode of authentication that members use to access the domain. // // AuthMode is a required field AuthMode *string `type:"string" required:"true" enum:"AuthMode"` @@ -16371,20 +17311,21 @@ type CreateDomainInput struct { // DomainName is a required field DomainName *string `type:"string" required:"true"` - // The AWS Key Management Service encryption key ID. + // The AWS Key Management Service (KMS) encryption key ID. Encryption with a + // customer master key (CMK) is not supported. HomeEfsFileSystemKmsKeyId *string `type:"string"` - // Security setting to limit to a set of subnets. + // The VPC subnets that Studio uses for communication. // // SubnetIds is a required field SubnetIds []*string `min:"1" type:"list" required:"true"` - // Each tag consists of a key and an optional value. Tag keys must be unique - // per resource. + // Tags to associated with the Domain. Each tag consists of a key and an optional + // value. Tag keys must be unique per resource. Tags are searchable using the + // Search API. Tags []*Tag `type:"list"` - // Security setting to limit the domain's communication to a Amazon Virtual - // Private Cloud. + // The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. // // VpcId is a required field VpcId *string `type:"string" required:"true"` @@ -16443,6 +17384,12 @@ func (s *CreateDomainInput) Validate() error { return nil } +// SetAppNetworkAccessType sets the AppNetworkAccessType field's value. +func (s *CreateDomainInput) SetAppNetworkAccessType(v string) *CreateDomainInput { + s.AppNetworkAccessType = &v + return s +} + // SetAuthMode sets the AuthMode field's value. func (s *CreateDomainInput) SetAuthMode(v string) *CreateDomainInput { s.AuthMode = &v @@ -16523,7 +17470,6 @@ type CreateEndpointConfigInput struct { DataCaptureConfig *DataCaptureConfig `type:"structure"` // The name of the endpoint configuration. You specify this name in a CreateEndpoint - // (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpoint.html) // request. // // EndpointConfigName is a required field @@ -16687,8 +17633,7 @@ func (s *CreateEndpointConfigOutput) SetEndpointConfigArn(v string) *CreateEndpo type CreateEndpointInput struct { _ struct{} `type:"structure"` - // The name of an endpoint configuration. For more information, see CreateEndpointConfig - // (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html). + // The name of an endpoint configuration. For more information, see CreateEndpointConfig. // // EndpointConfigName is a required field EndpointConfigName *string `type:"string" required:"true"` @@ -16908,6 +17853,10 @@ type CreateFlowDefinitionInput struct { // HumanLoopConfig is a required field HumanLoopConfig *HumanLoopConfig `type:"structure" required:"true"` + // Container for configuring the source of human task requests. Use to specify + // if Amazon Rekognition or Amazon Textract is used as an integration source. + HumanLoopRequestSource *HumanLoopRequestSource `type:"structure"` + // An object containing information about where the human review results will // be uploaded. // @@ -16967,6 +17916,11 @@ func (s *CreateFlowDefinitionInput) Validate() error { invalidParams.AddNested("HumanLoopConfig", err.(request.ErrInvalidParams)) } } + if s.HumanLoopRequestSource != nil { + if err := s.HumanLoopRequestSource.Validate(); err != nil { + invalidParams.AddNested("HumanLoopRequestSource", err.(request.ErrInvalidParams)) + } + } if s.OutputConfig != nil { if err := s.OutputConfig.Validate(); err != nil { invalidParams.AddNested("OutputConfig", err.(request.ErrInvalidParams)) @@ -17007,6 +17961,12 @@ func (s *CreateFlowDefinitionInput) SetHumanLoopConfig(v *HumanLoopConfig) *Crea return s } +// SetHumanLoopRequestSource sets the HumanLoopRequestSource field's value. +func (s *CreateFlowDefinitionInput) SetHumanLoopRequestSource(v *HumanLoopRequestSource) *CreateFlowDefinitionInput { + s.HumanLoopRequestSource = v + return s +} + // SetOutputConfig sets the OutputConfig field's value. func (s *CreateFlowDefinitionInput) SetOutputConfig(v *FlowDefinitionOutputConfig) *CreateFlowDefinitionInput { s.OutputConfig = v @@ -17163,7 +18123,7 @@ type CreateHyperParameterTuningJobInput struct { // The HyperParameterTuningJobConfig object that describes the tuning job, including // the search strategy, the objective metric used to evaluate training jobs, // ranges of parameters to search, and resource limits for the tuning job. For - // more information, see automatic-model-tuning + // more information, see How Hyperparameter Tuning Works (https://docs.aws.amazon.com/sagemaker/latest/dg/automatic-model-tuning-how-it-works.html). // // HyperParameterTuningJobConfig is a required field HyperParameterTuningJobConfig *HyperParameterTuningJobConfig `type:"structure" required:"true"` @@ -17191,6 +18151,8 @@ type CreateHyperParameterTuningJobInput struct { // stopping condition. TrainingJobDefinition *HyperParameterTrainingJobDefinition `type:"structure"` + // A list of the HyperParameterTrainingJobDefinition objects launched for this + // tuning job. TrainingJobDefinitions []*HyperParameterTrainingJobDefinition `min:"1" type:"list"` // Specifies the configuration for starting the hyperparameter tuning job using @@ -17369,7 +18331,14 @@ type CreateLabelingJobInput struct { // The S3 URL of the file that defines the categories used to label the data // objects. // - // The file is a JSON structure in the following format: + // For 3D point cloud task types, see Create a Labeling Category Configuration + // File for 3D Point Cloud Labeling Jobs (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud-label-category-config.html). + // + // For all other built-in task types (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html) + // and custom tasks (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates.html), + // your label category configuration file must be a JSON file in the following + // format. Identify the labels you want to use by replacing label_1, label_2,...,label_n + // with your label categories. // // { // @@ -17379,13 +18348,13 @@ type CreateLabelingJobInput struct { // // { // - // "label": "label 1" + // "label": "label_1" // // }, // // { // - // "label": "label 2" + // "label": "label_2" // // }, // @@ -17393,7 +18362,7 @@ type CreateLabelingJobInput struct { // // { // - // "label": "label n" + // "label": "label_n" // // } // @@ -17640,10 +18609,9 @@ type CreateModelInput struct { // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` - // A VpcConfig (https://docs.aws.amazon.com/sagemaker/latest/dg/API_VpcConfig.html) - // object that specifies the VPC that you want your model to connect to. Control - // access to and from your model container by configuring the VPC. VpcConfig - // is used in hosting services and in batch transform. For more information, + // A VpcConfig object that specifies the VPC that you want your model to connect + // to. Control access to and from your model container by configuring the VPC. + // VpcConfig is used in hosting services and in batch transform. For more information, // see Protect Endpoints by Using an Amazon Virtual Private Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/host-vpc.html) // and Protect Data in Batch Transform Jobs by Using an Amazon Virtual Private // Cloud (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-vpc.html). @@ -17930,7 +18898,7 @@ type CreateMonitoringScheduleInput struct { MonitoringScheduleName *string `min:"1" type:"string" required:"true"` // (Optional) An array of key-value pairs. For more information, see Using Cost - // Allocation Tags (https://docs-aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -18549,7 +19517,14 @@ type CreateProcessingJobInput struct { // Sets the environment variables in the Docker container. Environment map[string]*string `type:"map"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // Networking options for a processing job. @@ -18585,7 +19560,7 @@ type CreateProcessingJobInput struct { StoppingCondition *ProcessingStoppingCondition `type:"structure"` // (Optional) An array of key-value pairs. For more information, see Using Cost - // Allocation Tags (https://docs-aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + // Allocation Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` } @@ -18821,7 +19796,14 @@ type CreateTrainingJobInput struct { // have network access. EnableNetworkIsolation *bool `type:"boolean"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // Algorithm-specific parameters that influence the quality of the model. You @@ -19179,8 +20161,8 @@ type CreateTransformJobInput struct { // request. A record is a single unit of input data that inference can be made // on. For example, a single line in a CSV file is a record. // - // To enable the batch strategy, you must set the SplitType property of the - // DataProcessing object to Line, RecordIO, or TFRecord. + // To enable the batch strategy, you must set the SplitType property to Line, + // RecordIO, or TFRecord. // // To use only one record when making an HTTP invocation request to a container, // set BatchStrategy to SingleRecord and SplitType to Line. @@ -19202,7 +20184,14 @@ type CreateTransformJobInput struct { // 16 key and values entries in the map. Environment map[string]*string `type:"map"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // The maximum number of parallel requests that can be sent to each instance @@ -19227,6 +20216,10 @@ type CreateTransformJobInput struct { // do not support HTTP chunked encoding. MaxPayloadInMB *int64 `type:"integer"` + // Configures the timeout and maximum number of retries for processing a transform + // job invocation. + ModelClientConfig *ModelClientConfig `type:"structure"` + // The name of the model that you want to use for the transform job. ModelName // must be the name of an existing Amazon SageMaker model within an AWS Region // in an AWS account. @@ -19298,6 +20291,11 @@ func (s *CreateTransformJobInput) Validate() error { invalidParams.AddNested("ExperimentConfig", err.(request.ErrInvalidParams)) } } + if s.ModelClientConfig != nil { + if err := s.ModelClientConfig.Validate(); err != nil { + invalidParams.AddNested("ModelClientConfig", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -19366,6 +20364,12 @@ func (s *CreateTransformJobInput) SetMaxPayloadInMB(v int64) *CreateTransformJob return s } +// SetModelClientConfig sets the ModelClientConfig field's value. +func (s *CreateTransformJobInput) SetModelClientConfig(v *ModelClientConfig) *CreateTransformJobInput { + s.ModelClientConfig = v + return s +} + // SetModelName sets the ModelName field's value. func (s *CreateTransformJobInput) SetModelName(v string) *CreateTransformJobInput { s.ModelName = &v @@ -19855,6 +20859,145 @@ func (s *CreateUserProfileOutput) SetUserProfileArn(v string) *CreateUserProfile return s } +type CreateWorkforceInput struct { + _ struct{} `type:"structure"` + + // Use this parameter to configure an Amazon Cognito private workforce. A single + // Cognito workforce is created using and corresponds to a single Amazon Cognito + // user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // + // Do not use OidcConfig if you specify values for CognitoConfig. + CognitoConfig *CognitoConfig `type:"structure"` + + // Use this parameter to configure a private workforce using your own OIDC Identity + // Provider. + // + // Do not use CognitoConfig if you specify values for OidcConfig. + OidcConfig *OidcConfig `type:"structure"` + + // A list of IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)). + // Used to create an allow list of IP addresses for a private workforce. Workers + // will only be able to login to their worker portal from an IP address within + // this range. By default, a workforce isn't restricted to specific IP addresses. + SourceIpConfig *SourceIpConfig `type:"structure"` + + // An array of key-value pairs that contain metadata to help you categorize + // and organize our workforce. Each tag consists of a key and a value, both + // of which you define. + Tags []*Tag `type:"list"` + + // The name of the private workforce. + // + // WorkforceName is a required field + WorkforceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWorkforceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkforceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWorkforceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWorkforceInput"} + if s.WorkforceName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkforceName")) + } + if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) + } + if s.CognitoConfig != nil { + if err := s.CognitoConfig.Validate(); err != nil { + invalidParams.AddNested("CognitoConfig", err.(request.ErrInvalidParams)) + } + } + if s.OidcConfig != nil { + if err := s.OidcConfig.Validate(); err != nil { + invalidParams.AddNested("OidcConfig", err.(request.ErrInvalidParams)) + } + } + if s.SourceIpConfig != nil { + if err := s.SourceIpConfig.Validate(); err != nil { + invalidParams.AddNested("SourceIpConfig", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCognitoConfig sets the CognitoConfig field's value. +func (s *CreateWorkforceInput) SetCognitoConfig(v *CognitoConfig) *CreateWorkforceInput { + s.CognitoConfig = v + return s +} + +// SetOidcConfig sets the OidcConfig field's value. +func (s *CreateWorkforceInput) SetOidcConfig(v *OidcConfig) *CreateWorkforceInput { + s.OidcConfig = v + return s +} + +// SetSourceIpConfig sets the SourceIpConfig field's value. +func (s *CreateWorkforceInput) SetSourceIpConfig(v *SourceIpConfig) *CreateWorkforceInput { + s.SourceIpConfig = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateWorkforceInput) SetTags(v []*Tag) *CreateWorkforceInput { + s.Tags = v + return s +} + +// SetWorkforceName sets the WorkforceName field's value. +func (s *CreateWorkforceInput) SetWorkforceName(v string) *CreateWorkforceInput { + s.WorkforceName = &v + return s +} + +type CreateWorkforceOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the workforce. + // + // WorkforceArn is a required field + WorkforceArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWorkforceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWorkforceOutput) GoString() string { + return s.String() +} + +// SetWorkforceArn sets the WorkforceArn field's value. +func (s *CreateWorkforceOutput) SetWorkforceArn(v string) *CreateWorkforceOutput { + s.WorkforceArn = &v + return s +} + type CreateWorkteamInput struct { _ struct{} `type:"structure"` @@ -19864,11 +21007,23 @@ type CreateWorkteamInput struct { Description *string `min:"1" type:"string" required:"true"` // A list of MemberDefinition objects that contains objects that identify the - // Amazon Cognito user pool that makes up the work team. For more information, - // see Amazon Cognito User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // workers that make up the work team. + // + // Workforces can be created using Amazon Cognito or your own OIDC Identity + // Provider (IdP). For private workforces created using Amazon Cognito use CognitoMemberDefinition. + // For workforces created using your own OIDC identity provider (IdP) use OidcMemberDefinition. + // Do not provide input for both of these parameters in a single request. // + // For workforces created using Amazon Cognito, private work teams correspond + // to Amazon Cognito user groups within the user pool used to create a workforce. // All of the CognitoMemberDefinition objects that make up the member definition - // must have the same ClientId and UserPool values. + // must have the same ClientId and UserPool values. To add a Amazon Cognito + // user group to an existing worker pool, see Adding groups to a User Pool. + // For more information about user pools, see Amazon Cognito User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // + // For workforces created using your own OIDC IdP, specify the user groups that + // you want to include in your private work team in OidcMemberDefinition by + // listing those groups in Groups. // // MemberDefinitions is a required field MemberDefinitions []*MemberDefinition `min:"1" type:"list" required:"true"` @@ -19883,6 +21038,9 @@ type CreateWorkteamInput struct { // in the AWS Billing and Cost Management User Guide. Tags []*Tag `type:"list"` + // The name of the workforce. + WorkforceName *string `min:"1" type:"string"` + // The name of the work team. Use this name to identify the work team. // // WorkteamName is a required field @@ -19914,6 +21072,9 @@ func (s *CreateWorkteamInput) Validate() error { if s.MemberDefinitions != nil && len(s.MemberDefinitions) < 1 { invalidParams.Add(request.NewErrParamMinLen("MemberDefinitions", 1)) } + if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) + } if s.WorkteamName == nil { invalidParams.Add(request.NewErrParamRequired("WorkteamName")) } @@ -19971,6 +21132,12 @@ func (s *CreateWorkteamInput) SetTags(v []*Tag) *CreateWorkteamInput { return s } +// SetWorkforceName sets the WorkforceName field's value. +func (s *CreateWorkteamInput) SetWorkforceName(v string) *CreateWorkteamInput { + s.WorkforceName = &v + return s +} + // SetWorkteamName sets the WorkteamName field's value. func (s *CreateWorkteamInput) SetWorkteamName(v string) *CreateWorkteamInput { s.WorkteamName = &v @@ -20743,7 +21910,7 @@ type DeleteDomainInput struct { // DomainId is a required field DomainId *string `type:"string" required:"true"` - // The retention policy for this domain, which specifies which resources will + // The retention policy for this domain, which specifies whether resources will // be retained after the Domain is deleted. By default, all resources are retained // (not automatically deleted). RetentionPolicy *RetentionPolicy `type:"structure"` @@ -21021,6 +22188,62 @@ func (s DeleteFlowDefinitionOutput) GoString() string { return s.String() } +type DeleteHumanTaskUiInput struct { + _ struct{} `type:"structure"` + + // The name of the human task user interface (work task template) you want to + // delete. + // + // HumanTaskUiName is a required field + HumanTaskUiName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteHumanTaskUiInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHumanTaskUiInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteHumanTaskUiInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteHumanTaskUiInput"} + if s.HumanTaskUiName == nil { + invalidParams.Add(request.NewErrParamRequired("HumanTaskUiName")) + } + if s.HumanTaskUiName != nil && len(*s.HumanTaskUiName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HumanTaskUiName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHumanTaskUiName sets the HumanTaskUiName field's value. +func (s *DeleteHumanTaskUiInput) SetHumanTaskUiName(v string) *DeleteHumanTaskUiInput { + s.HumanTaskUiName = &v + return s +} + +type DeleteHumanTaskUiOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteHumanTaskUiOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteHumanTaskUiOutput) GoString() string { + return s.String() +} + type DeleteModelInput struct { _ struct{} `type:"structure"` @@ -21551,6 +22774,61 @@ func (s DeleteUserProfileOutput) GoString() string { return s.String() } +type DeleteWorkforceInput struct { + _ struct{} `type:"structure"` + + // The name of the workforce. + // + // WorkforceName is a required field + WorkforceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteWorkforceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWorkforceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteWorkforceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteWorkforceInput"} + if s.WorkforceName == nil { + invalidParams.Add(request.NewErrParamRequired("WorkforceName")) + } + if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWorkforceName sets the WorkforceName field's value. +func (s *DeleteWorkforceInput) SetWorkforceName(v string) *DeleteWorkforceInput { + s.WorkforceName = &v + return s +} + +type DeleteWorkforceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteWorkforceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteWorkforceOutput) GoString() string { + return s.String() +} + type DeleteWorkteamInput struct { _ struct{} `type:"structure"` @@ -21942,7 +23220,8 @@ type DescribeAppOutput struct { // The timestamp of the last user's activity. LastUserActivityTimestamp *time.Time `type:"timestamp"` - // The instance type and quantity. + // The instance type and the Amazon Resource Name (ARN) of the SageMaker image + // created on the instance. ResourceSpec *ResourceSpec `type:"structure"` // The status. @@ -22629,6 +23908,14 @@ func (s *DescribeDomainInput) SetDomainId(v string) *DescribeDomainInput { type DescribeDomainOutput struct { _ struct{} `type:"structure"` + // Specifies the VPC used for non-EFS traffic. The default value is PublicInternetOnly. + // + // * PublicInternetOnly - Non-EFS traffic is through a VPC managed by Amazon + // SageMaker, which allows direct internet access + // + // * VpcOnly - All Studio traffic is through the specified VPC and subnets + AppNetworkAccessType *string `type:"string" enum:"AppNetworkAccessType"` + // The domain's authentication mode. AuthMode *string `type:"string" enum:"AuthMode"` @@ -22666,13 +23953,13 @@ type DescribeDomainOutput struct { // The status. Status *string `type:"string" enum:"DomainStatus"` - // Security setting to limit to a set of subnets. + // The VPC subnets that Studio uses for communication. SubnetIds []*string `min:"1" type:"list"` // The domain's URL. Url *string `type:"string"` - // The ID of the Amazon Virtual Private Cloud. + // The ID of the Amazon Virtual Private Cloud (VPC) that Studio uses for communication. VpcId *string `type:"string"` } @@ -22686,6 +23973,12 @@ func (s DescribeDomainOutput) GoString() string { return s.String() } +// SetAppNetworkAccessType sets the AppNetworkAccessType field's value. +func (s *DescribeDomainOutput) SetAppNetworkAccessType(v string) *DescribeDomainOutput { + s.AppNetworkAccessType = &v + return s +} + // SetAuthMode sets the AuthMode field's value. func (s *DescribeDomainOutput) SetAuthMode(v string) *DescribeDomainOutput { s.AuthMode = &v @@ -23249,6 +24542,7 @@ type DescribeFlowDefinitionOutput struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` + // The reason your flow definition failed. FailureReason *string `type:"string"` // The Amazon Resource Name (ARN) of the flow defintion. @@ -23275,6 +24569,10 @@ type DescribeFlowDefinitionOutput struct { // HumanLoopConfig is a required field HumanLoopConfig *HumanLoopConfig `type:"structure" required:"true"` + // Container for configuring the source of human task requests. Used to specify + // if Amazon Rekognition or Amazon Textract is used as an integration source. + HumanLoopRequestSource *HumanLoopRequestSource `type:"structure"` + // An object containing information about the output file. // // OutputConfig is a required field @@ -23339,6 +24637,12 @@ func (s *DescribeFlowDefinitionOutput) SetHumanLoopConfig(v *HumanLoopConfig) *D return s } +// SetHumanLoopRequestSource sets the HumanLoopRequestSource field's value. +func (s *DescribeFlowDefinitionOutput) SetHumanLoopRequestSource(v *HumanLoopRequestSource) *DescribeFlowDefinitionOutput { + s.HumanLoopRequestSource = v + return s +} + // SetOutputConfig sets the OutputConfig field's value. func (s *DescribeFlowDefinitionOutput) SetOutputConfig(v *FlowDefinitionOutputConfig) *DescribeFlowDefinitionOutput { s.OutputConfig = v @@ -23354,7 +24658,8 @@ func (s *DescribeFlowDefinitionOutput) SetRoleArn(v string) *DescribeFlowDefinit type DescribeHumanTaskUiInput struct { _ struct{} `type:"structure"` - // The name of the human task user interface you want information about. + // The name of the human task user interface (worker task template) you want + // information about. // // HumanTaskUiName is a required field HumanTaskUiName *string `min:"1" type:"string" required:"true"` @@ -23400,16 +24705,21 @@ type DescribeHumanTaskUiOutput struct { // CreationTime is a required field CreationTime *time.Time `type:"timestamp" required:"true"` - // The Amazon Resource Name (ARN) of the human task user interface. + // The Amazon Resource Name (ARN) of the human task user interface (worker task + // template). // // HumanTaskUiArn is a required field HumanTaskUiArn *string `type:"string" required:"true"` - // The name of the human task user interface. + // The name of the human task user interface (worker task template). // // HumanTaskUiName is a required field HumanTaskUiName *string `min:"1" type:"string" required:"true"` + // The status of the human task user interface (worker task template). Valid + // values are listed below. + HumanTaskUiStatus *string `type:"string" enum:"HumanTaskUiStatus"` + // Container for user interface template information. // // UiTemplate is a required field @@ -23444,6 +24754,12 @@ func (s *DescribeHumanTaskUiOutput) SetHumanTaskUiName(v string) *DescribeHumanT return s } +// SetHumanTaskUiStatus sets the HumanTaskUiStatus field's value. +func (s *DescribeHumanTaskUiOutput) SetHumanTaskUiStatus(v string) *DescribeHumanTaskUiOutput { + s.HumanTaskUiStatus = &v + return s +} + // SetUiTemplate sets the UiTemplate field's value. func (s *DescribeHumanTaskUiOutput) SetUiTemplate(v *UiTemplateInfo) *DescribeHumanTaskUiOutput { s.UiTemplate = v @@ -23453,7 +24769,7 @@ func (s *DescribeHumanTaskUiOutput) SetUiTemplate(v *UiTemplateInfo) *DescribeHu type DescribeHyperParameterTuningJobInput struct { _ struct{} `type:"structure"` - // The name of the tuning job to describe. + // The name of the tuning job. // // HyperParameterTuningJobName is a required field HyperParameterTuningJobName *string `min:"1" type:"string" required:"true"` @@ -23551,6 +24867,8 @@ type DescribeHyperParameterTuningJobOutput struct { // of the training jobs that this tuning job launches. TrainingJobDefinition *HyperParameterTrainingJobDefinition `type:"structure"` + // A list of the HyperParameterTrainingJobDefinition objects launched for this + // tuning job. TrainingJobDefinitions []*HyperParameterTrainingJobDefinition `min:"1" type:"list"` // The TrainingJobStatusCounters object that specifies the number of training @@ -25144,6 +26462,7 @@ type DescribeTrainingJobOutput struct { // AlgorithmSpecification is a required field AlgorithmSpecification *AlgorithmSpecification `type:"structure" required:"true"` + // The Amazon Resource Name (ARN) of an AutoML job. AutoMLJobArn *string `min:"1" type:"string"` // The billable time in seconds. @@ -25192,7 +26511,14 @@ type DescribeTrainingJobOutput struct { // have network access. EnableNetworkIsolation *bool `type:"boolean"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // If the training job failed, the reason it failed. @@ -25272,7 +26598,7 @@ type DescribeTrainingJobOutput struct { // * MaxRuntimeExceeded - The job stopped because it exceeded the maximum // allowed runtime. // - // * MaxWaitTmeExceeded - The job stopped because it exceeded the maximum + // * MaxWaitTimeExceeded - The job stopped because it exceeded the maximum // allowed wait time. // // * Stopped - The training job has stopped. @@ -25629,6 +26955,7 @@ func (s *DescribeTransformJobInput) SetTransformJobName(v string) *DescribeTrans type DescribeTransformJobOutput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the AutoML transform job. AutoMLJobArn *string `min:"1" type:"string"` // Specifies the number of records to include in a mini-batch for an HTTP inference @@ -25657,7 +26984,14 @@ type DescribeTransformJobOutput struct { // 16 key and values entries in the map. Environment map[string]*string `type:"map"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // If the transform job failed, FailureReason describes why it failed. A transform @@ -25677,6 +27011,10 @@ type DescribeTransformJobOutput struct { // The maximum payload size, in MB, used in the transform job. MaxPayloadInMB *int64 `type:"integer"` + // The timeout and maximum number of retries for processing a transform job + // invocation. + ModelClientConfig *ModelClientConfig `type:"structure"` + // The name of the model used in the transform job. // // ModelName is a required field @@ -25793,6 +27131,12 @@ func (s *DescribeTransformJobOutput) SetMaxPayloadInMB(v int64) *DescribeTransfo return s } +// SetModelClientConfig sets the ModelClientConfig field's value. +func (s *DescribeTransformJobOutput) SetModelClientConfig(v *ModelClientConfig) *DescribeTransformJobOutput { + s.ModelClientConfig = v + return s +} + // SetModelName sets the ModelName field's value. func (s *DescribeTransformJobOutput) SetModelName(v string) *DescribeTransformJobOutput { s.ModelName = &v @@ -26239,13 +27583,13 @@ type DescribeUserProfileOutput struct { // The creation time. CreationTime *time.Time `type:"timestamp"` - // The domain ID. + // The ID of the domain that contains the profile. DomainId *string `type:"string"` // The failure reason. FailureReason *string `type:"string"` - // The homa Amazon Elastic File System (EFS) Uid. + // The ID of the user's profile in the Amazon Elastic File System (EFS) volume. HomeEfsFileSystemUid *string `type:"string"` // The last modified time. @@ -26934,7 +28278,7 @@ func (s *EndpointSummary) SetLastModifiedTime(v time.Time) *EndpointSummary { return s } -// A summary of the properties of an experiment as returned by the Search API. +// The properties of an experiment as returned by the Search API. type Experiment struct { _ struct{} `type:"structure"` @@ -27043,17 +28387,26 @@ func (s *Experiment) SetTags(v []*Tag) *Experiment { return s } -// Configuration for the experiment. +// Associates a SageMaker job as a trial component with an experiment and trial. +// Specified when you call the following APIs: +// +// * CreateProcessingJob +// +// * CreateTrainingJob +// +// * CreateTransformJob type ExperimentConfig struct { _ struct{} `type:"structure"` - // The name of the experiment. + // The name of an existing experiment to associate the trial component with. ExperimentName *string `min:"1" type:"string"` - // Display name for the trial component. + // The display name for the trial component. If this key isn't specified, the + // display name is the trial component name. TrialComponentDisplayName *string `min:"1" type:"string"` - // The name of the trial. + // The name of an existing trial to associate the trial component with. If not + // specified, a new trial is created. TrialName *string `min:"1" type:"string"` } @@ -27296,15 +28649,11 @@ func (s *FileSystemDataSource) SetFileSystemType(v string) *FileSystemDataSource } // A conditional statement for a search expression that includes a resource -// property, a Boolean operator, and a value. -// -// If you don't specify an Operator and a Value, the filter searches for only -// the specified property. For example, defining a Filter for the FailureReason -// for the TrainingJob Resource searches for training job objects that have -// a value in the FailureReason field. +// property, a Boolean operator, and a value. Resources that match the statement +// are returned in the results from the Search API. // // If you specify a Value, but not an Operator, Amazon SageMaker uses the equals -// operator as the default. +// operator. // // In search, there are several property types: // @@ -27318,7 +28667,7 @@ func (s *FileSystemDataSource) SetFileSystemType(v string) *FileSystemDataSource // // "Name": "Metrics.accuracy", // -// "Operator": "GREATER_THAN", +// "Operator": "GreaterThan", // // "Value": "0.9" // @@ -27337,7 +28686,7 @@ func (s *FileSystemDataSource) SetFileSystemType(v string) *FileSystemDataSource // // "Name": "HyperParameters.learning_rate", // -// "Operator": "LESS_THAN", +// "Operator": "LessThan", // // "Value": "0.5" // @@ -27345,13 +28694,12 @@ func (s *FileSystemDataSource) SetFileSystemType(v string) *FileSystemDataSource // // Tags // -// To define a tag filter, enter a value with the form "Tags.". +// To define a tag filter, enter a value with the form Tags.. type Filter struct { _ struct{} `type:"structure"` - // A property name. For example, TrainingJobName. For the list of valid property - // names returned in a search result for each supported resource, see TrainingJob - // properties. You must specify a valid property name for the resource. + // A resource property name. For example, TrainingJobName. For valid property + // names, see SearchRecord. You must specify a valid property for the resource. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -27361,44 +28709,82 @@ type Filter struct { // // Equals // - // The specified resource in Name equals the specified Value. + // The value of Name equals Value. // // NotEquals // - // The specified resource in Name does not equal the specified Value. + // The value of Name doesn't equal Value. + // + // Exists + // + // The Name property exists. + // + // NotExists + // + // The Name property does not exist. // // GreaterThan // - // The specified resource in Name is greater than the specified Value. Not supported - // for text-based properties. + // The value of Name is greater than Value. Not supported for text properties. // // GreaterThanOrEqualTo // - // The specified resource in Name is greater than or equal to the specified - // Value. Not supported for text-based properties. + // The value of Name is greater than or equal to Value. Not supported for text + // properties. // // LessThan // - // The specified resource in Name is less than the specified Value. Not supported - // for text-based properties. + // The value of Name is less than Value. Not supported for text properties. // // LessThanOrEqualTo // - // The specified resource in Name is less than or equal to the specified Value. - // Not supported for text-based properties. + // The value of Name is less than or equal to Value. Not supported for text + // properties. + // + // In + // + // The value of Name is one of the comma delimited strings in Value. Only supported + // for text properties. // // Contains // - // Only supported for text-based properties. The word-list of the property contains - // the specified Value. A SearchExpression can include only one Contains operator. + // The value of Name contains the string Value. Only supported for text properties. + // + // A SearchExpression can include the Contains operator multiple times when + // the value of Name is one of the following: + // + // * Experiment.DisplayName + // + // * Experiment.ExperimentName + // + // * Experiment.Tags + // + // * Trial.DisplayName + // + // * Trial.TrialName + // + // * Trial.Tags + // + // * TrialComponent.DisplayName + // + // * TrialComponent.TrialComponentName + // + // * TrialComponent.Tags // - // If you have specified a filter Value, the default is Equals. + // * TrialComponent.InputArtifacts + // + // * TrialComponent.OutputArtifacts + // + // A SearchExpression can include only one Contains operator for all other values + // of Name. In these cases, if you include multiple Contains operators in the + // SearchExpression, the result is the following error message: "'CONTAINS' + // operator usage limit of 1 exceeded." Operator *string `type:"string" enum:"Operator"` - // A value used with Resource and Operator to determine if objects satisfy the - // filter's condition. For numerical properties, Value must be an integer or - // floating-point decimal. For timestamp properties, Value must be an ISO 8601 - // date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS. + // A value used with Name and Operator to determine which resources satisfy + // the filter's condition. For numerical properties, Value must be an integer + // or floating-point decimal. For timestamp properties, Value must be an ISO + // 8601 date-time string of the following format: YYYY-mm-dd'T'HH:MM:SS. Value *string `min:"1" type:"string"` } @@ -27449,19 +28835,20 @@ func (s *Filter) SetValue(v string) *Filter { return s } -// The candidate result from a job. +// The best candidate result from an AutoML training job. type FinalAutoMLJobObjectiveMetric struct { _ struct{} `type:"structure"` - // The name of the metric. + // The name of the metric with the best result. For a description of the possible + // objective metrics, see AutoMLJobObjective$MetricName. // // MetricName is a required field MetricName *string `type:"string" required:"true" enum:"AutoMLMetricEnum"` - // The metric type used. + // The type of metric with the best result. Type *string `type:"string" enum:"AutoMLJobObjectiveType"` - // The value of the metric. + // The value of the metric with the best result. // // Value is a required field Value *float64 `type:"float" required:"true"` @@ -27665,7 +29052,7 @@ func (s *FlowDefinitionSummary) SetFlowDefinitionStatus(v string) *FlowDefinitio type GetSearchSuggestionsInput struct { _ struct{} `type:"structure"` - // The name of the Amazon SageMaker resource to Search for. + // The name of the Amazon SageMaker resource to search for. // // Resource is a required field Resource *string `type:"string" required:"true" enum:"ResourceType"` @@ -27846,13 +29233,17 @@ func (s *GitConfigForUpdate) SetSecretArn(v string) *GitConfigForUpdate { return s } -// Defines under what conditions SageMaker creates a human loop. Used within . +// Defines under what conditions SageMaker creates a human loop. Used within +// . See for the required format of activation conditions. type HumanLoopActivationConditionsConfig struct { _ struct{} `type:"structure"` // JSON expressing use-case specific conditions declaratively. If any condition // is matched, atomic tasks are created against the configured work team. The - // set of conditions is different for Rekognition and Textract. + // set of conditions is different for Rekognition and Textract. For more information + // about how to structure the JSON, see JSON Schema for Human Loop Activation + // Conditions in Amazon Augmented AI (https://docs.aws.amazon.com/sagemaker/latest/dg/a2i-human-fallback-conditions-json-schema.html) + // in the Amazon SageMaker Developer Guide. // // HumanLoopActivationConditions is a required field HumanLoopActivationConditions aws.JSONValue `type:"jsonvalue" required:"true"` @@ -27898,11 +29289,6 @@ type HumanLoopActivationConfig struct { // // HumanLoopActivationConditionsConfig is a required field HumanLoopActivationConditionsConfig *HumanLoopActivationConditionsConfig `type:"structure" required:"true"` - - // Container for configuring the source of human task requests. - // - // HumanLoopRequestSource is a required field - HumanLoopRequestSource *HumanLoopRequestSource `type:"structure" required:"true"` } // String returns the string representation @@ -27921,19 +29307,11 @@ func (s *HumanLoopActivationConfig) Validate() error { if s.HumanLoopActivationConditionsConfig == nil { invalidParams.Add(request.NewErrParamRequired("HumanLoopActivationConditionsConfig")) } - if s.HumanLoopRequestSource == nil { - invalidParams.Add(request.NewErrParamRequired("HumanLoopRequestSource")) - } if s.HumanLoopActivationConditionsConfig != nil { if err := s.HumanLoopActivationConditionsConfig.Validate(); err != nil { invalidParams.AddNested("HumanLoopActivationConditionsConfig", err.(request.ErrInvalidParams)) } } - if s.HumanLoopRequestSource != nil { - if err := s.HumanLoopRequestSource.Validate(); err != nil { - invalidParams.AddNested("HumanLoopRequestSource", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -27947,12 +29325,6 @@ func (s *HumanLoopActivationConfig) SetHumanLoopActivationConditionsConfig(v *Hu return s } -// SetHumanLoopRequestSource sets the HumanLoopRequestSource field's value. -func (s *HumanLoopActivationConfig) SetHumanLoopRequestSource(v *HumanLoopRequestSource) *HumanLoopActivationConfig { - s.HumanLoopRequestSource = v - return s -} - // Describes the work to be performed by human workers. type HumanLoopConfig struct { _ struct{} `type:"structure"` @@ -28169,7 +29541,7 @@ type HumanLoopConfig struct { // * 0.012 PublicWorkforceTaskPrice *PublicWorkforceTaskPrice `type:"structure"` - // The length of time that a task remains available for labeling by human workers. + // The length of time that a task remains available for review by human workers. TaskAvailabilityLifetimeInSeconds *int64 `min:"1" type:"integer"` // The number of distinct workers who will perform the same task on each object. @@ -28188,7 +29560,8 @@ type HumanLoopConfig struct { // Keywords used to describe the task so that workers can discover the task. TaskKeywords []*string `min:"1" type:"list"` - // The amount of time that a worker has to complete a task. + // The amount of time that a worker has to complete a task. The default value + // is 3,600 seconds (1 hour) TaskTimeLimitInSeconds *int64 `min:"30" type:"integer"` // A title for the human worker task. @@ -28373,250 +29746,636 @@ type HumanTaskConfig struct { // data object is sent to a human worker. Use this function to provide input // to a custom labeling job. // - // For the built-in bounding box, image classification, semantic segmentation, - // and text classification task types, Amazon SageMaker Ground Truth provides - // the following Lambda functions: + // For built-in task types (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-task-types.html), + // use one of the following Amazon SageMaker Ground Truth Lambda function ARNs + // for PreHumanTaskLambdaArn. For custom labeling workflows, see Pre-annotation + // Lambda (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step3.html#sms-custom-templates-step3-prelambda). // - // US East (Northern Virginia) (us-east-1): + // Bounding box - Finds the most similar boxes from different workers based + // on the Jaccard index of the boxes. // // * arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox // - // US East (Ohio) (us-east-2): + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox + // + // Image classification - Uses a variant of the Expectation Maximization approach + // to estimate the true class of an image based on annotations from individual + // workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass // // * arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass // - // US West (Oregon) (us-west-2): + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation + // Multi-label image classification - Uses a variant of the Expectation Maximization + // approach to estimate the true classes of an image based on annotations from + // individual workers. // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel // - // Canada (Central) (ca-central-1): + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel + // + // Semantic segmentation - Treats each pixel in an image as a multi-class classification + // and treats pixel annotations from workers as "votes" for the correct label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation // // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation // - // EU (Ireland) (eu-west-1): + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass + // Text classification - Uses a variant of the Expectation Maximization approach + // to estimate the true class of text based on annotations from individual workers. // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass // // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass // - // EU (London) (eu-west-2): + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass + // Multi-label text classification - Uses a variant of the Expectation Maximization + // approach to estimate the true classes of text based on annotations from individual + // workers. // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel // - // EU Frankfurt (eu-central-1): + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel + // + // Named entity recognition - Groups similar selections and calculates aggregate + // boundaries, resolving to most-assigned label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition // // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition // - // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition // - // Asia Pacific (Tokyo) (ap-northeast-1): + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox + // Video Classification - Use this task type when you need workers to classify + // videos using predefined labels that you specify. Workers are shown videos + // and are asked to choose one label for each video. // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass // - // Asia Pacific (Seoul) (ap-northeast-2): + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass + // Video Frame Object Detection - Use this task type to have workers identify + // and locate objects in a sequence of video frames (images extracted from a + // video) using bounding boxes. For example, you can use this task to ask workers + // to identify and localize various objects in a series of video frames, such + // as cars, bikes, and pedestrians. // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection // - // Asia Pacific (Mumbai) (ap-south-1): + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation + // Video Frame Object Tracking - Use this task type to have workers track the + // movement of objects in a sequence of video frames (images extracted from + // a video) using bounding boxes. For example, you can use this task to ask + // workers to track the movement of objects, such as cars, bikes, and pedestrians. // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking // - // Asia Pacific (Singapore) (ap-southeast-1): + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking + // + // 3D Point Cloud Modalities + // + // Use the following pre-annotation lambdas for 3D point cloud labeling modality + // tasks. See 3D Point Cloud Task types (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-point-cloud-task-types.html) + // to learn more. + // + // 3D Point Cloud Object Detection - Use this task type when you want workers + // to classify objects in a 3D point cloud by drawing 3D cuboids around objects. + // For example, you can use this task type to ask workers to identify different + // types of objects in a point cloud, such as cars, bikes, and pedestrians. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection + // + // 3D Point Cloud Object Tracking - Use this task type when you want workers + // to draw 3D cuboids around objects that appear in a sequence of 3D point cloud + // frames. For example, you can use this task type to ask workers to track the + // movement of vehicles across multiple point cloud frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking + // + // 3D Point Cloud Semantic Segmentation - Use this task type when you want workers + // to create a point-level semantic segmentation masks by painting objects in + // a 3D point cloud using different colors where each color is assigned to one + // of the classes you specify. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation + // + // Use the following ARNs for Label Verification and Adjustment Jobs + // + // Use label verification and adjustment jobs to review and adjust labels. To + // learn more, see Verify and Adjust Labels (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-verification-data.html). + // + // Bounding box verification - Uses a variant of the Expectation Maximization + // approach to estimate the true class of verification judgement for bounding + // box labels based on annotations from individual workers. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking + // + // Bounding box adjustment - Finds the most similar boxes from different workers + // based on the Jaccard index of the adjusted annotations. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox // // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox // - // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox // - // Asia Pacific (Sydney) (ap-southeast-2): + // Semantic segmentation verification - Uses a variant of the Expectation Maximization + // approach to estimate the true class of verification judgment for semantic + // segmentation labels based on annotations from individual workers. // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation // // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation // - // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox + // Semantic segmentation adjustment - Treats each pixel in an image as a multi-class + // classification and treats pixel adjusted annotations from workers as "votes" + // for the correct label. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation // // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation // + // Video Frame Object Detection Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to classify and localize objects in a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection + // + // Video Frame Object Tracking Adjustment - Use this task type when you want + // workers to adjust bounding boxes that workers have added to video frames + // to track object movement across a sequence of video frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking + // + // 3D point cloud object detection adjustment - Adjust 3D cuboids in a point + // cloud frame. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection + // + // 3D point cloud object tracking adjustment - Adjust 3D cuboids across a sequence + // of point cloud frames. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking + // + // 3D point cloud semantic segmentation adjustment - Adjust semantic segmentation + // masks in a 3D point cloud. + // + // * arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // + // * arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation + // // PreHumanTaskLambdaArn is a required field PreHumanTaskLambdaArn *string `type:"string" required:"true"` @@ -28626,7 +30385,7 @@ type HumanTaskConfig struct { // The length of time that a task remains available for labeling by human workers. // If you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours - // (43200). The default value is 864000 seconds (1 day). For private and vendor + // (43200). The default value is 864000 seconds (10 days). For private and vendor // workforces, the maximum is as listed. TaskAvailabilityLifetimeInSeconds *int64 `min:"60" type:"integer"` @@ -28725,11 +30484,6 @@ func (s *HumanTaskConfig) Validate() error { invalidParams.AddNested("AnnotationConsolidationConfig", err.(request.ErrInvalidParams)) } } - if s.UiConfig != nil { - if err := s.UiConfig.Validate(); err != nil { - invalidParams.AddNested("UiConfig", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -29918,6 +31672,51 @@ func (s *HyperParameterTuningJobWarmStartConfig) SetWarmStartType(v string) *Hyp return s } +// Specifies whether the model container is in Amazon ECR or a private Docker +// registry accessible from your Amazon Virtual Private Cloud (VPC). +type ImageConfig struct { + _ struct{} `type:"structure"` + + // Set this to one of the following values: + // + // * Platform - The model image is hosted in Amazon ECR. + // + // * Vpc - The model image is hosted in a private Docker registry in your + // VPC. + // + // RepositoryAccessMode is a required field + RepositoryAccessMode *string `type:"string" required:"true" enum:"RepositoryAccessMode"` +} + +// String returns the string representation +func (s ImageConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImageConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImageConfig"} + if s.RepositoryAccessMode == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryAccessMode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRepositoryAccessMode sets the RepositoryAccessMode field's value. +func (s *ImageConfig) SetRepositoryAccessMode(v string) *ImageConfig { + s.RepositoryAccessMode = &v + return s +} + // Defines how to perform inference generation after a training job is run. type InferenceSpecification struct { _ struct{} `type:"structure"` @@ -30080,6 +31879,52 @@ type InputConfig struct { // // * XGBOOST: input data name and shape are not needed. // + // DataInputConfig supports the following parameters for CoreML OutputConfig$TargetDevice + // (ML Model format): + // + // * shape: Input shape, for example {"input_1": {"shape": [1,224,224,3]}}. + // In addition to static input shapes, CoreML converter supports Flexible + // input shapes: Range Dimension. You can use the Range Dimension feature + // if you know the input shape will be within some specific interval in that + // dimension, for example: {"input_1": {"shape": ["1..10", 224, 224, 3]}} + // Enumerated shapes. Sometimes, the models are trained to work only on a + // select set of inputs. You can enumerate all supported input shapes, for + // example: {"input_1": {"shape": [[1, 224, 224, 3], [1, 160, 160, 3]]}} + // + // * default_shape: Default input shape. You can set a default shape during + // conversion for both Range Dimension and Enumerated Shapes. For example + // {"input_1": {"shape": ["1..10", 224, 224, 3], "default_shape": [1, 224, + // 224, 3]}} + // + // * type: Input type. Allowed values: Image and Tensor. By default, the + // converter generates an ML Model with inputs of type Tensor (MultiArray). + // User can set input type to be Image. Image input type requires additional + // input parameters such as bias and scale. + // + // * bias: If the input type is an Image, you need to provide the bias vector. + // + // * scale: If the input type is an Image, you need to provide a scale factor. + // + // CoreML ClassifierConfig parameters can be specified using OutputConfig$CompilerOptions. + // CoreML converter supports Tensorflow and PyTorch models. CoreML conversion + // examples: + // + // * Tensor type input: "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], + // [1,160,160,3]], "default_shape": [1,224,224,3]}} + // + // * Tensor type input without input name (PyTorch): "DataInputConfig": [{"shape": + // [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224]}] + // + // * Image type input: "DataInputConfig": {"input_1": {"shape": [[1,224,224,3], + // [1,160,160,3]], "default_shape": [1,224,224,3], "type": "Image", "bias": + // [-1,-1,-1], "scale": 0.007843137255}} "CompilerOptions": {"class_labels": + // "imagenet_labels_1000.txt"} + // + // * Image type input without input name (PyTorch): "DataInputConfig": [{"shape": + // [[1,3,224,224], [1,3,160,160]], "default_shape": [1,3,224,224], "type": + // "Image", "bias": [-1,-1,-1], "scale": 0.007843137255}] "CompilerOptions": + // {"class_labels": "imagenet_labels_1000.txt"} + // // DataInputConfig is a required field DataInputConfig *string `min:"1" type:"string" required:"true"` @@ -30300,7 +32145,8 @@ func (s *IntegerParameterRangeSpecification) SetMinValue(v string) *IntegerParam type JupyterServerAppSettings struct { _ struct{} `type:"structure"` - // The instance type and quantity. + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker + // image created on the instance. DefaultResourceSpec *ResourceSpec `type:"structure"` } @@ -30324,7 +32170,8 @@ func (s *JupyterServerAppSettings) SetDefaultResourceSpec(v *ResourceSpec) *Jupy type KernelGatewayAppSettings struct { _ struct{} `type:"structure"` - // The instance type and quantity. + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker + // image created on the instance. DefaultResourceSpec *ResourceSpec `type:"structure"` } @@ -30451,10 +32298,10 @@ func (s *LabelCountersForWorkteam) SetTotal(v int64) *LabelCountersForWorkteam { type LabelingJobAlgorithmsConfig struct { _ struct{} `type:"structure"` - // At the end of an auto-label job Amazon SageMaker Ground Truth sends the Amazon - // Resource Nam (ARN) of the final model used for auto-labeling. You can use - // this model as the starting point for subsequent similar jobs by providing - // the ARN of the model here. + // At the end of an auto-label job Ground Truth sends the Amazon Resource Name + // (ARN) of the final model used for auto-labeling. You can use this model as + // the starting point for subsequent similar jobs by providing the ARN of the + // model here. InitialActiveLearningModelArn *string `min:"20" type:"string"` // Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. @@ -30547,13 +32394,24 @@ func (s *LabelingJobDataAttributes) SetContentClassifiers(v []*string) *Labeling } // Provides information about the location of input data. +// +// You must specify at least one of the following: S3DataSource or SnsDataSource. +// +// Use SnsDataSource to specify an SNS input topic for a streaming labeling +// job. If you do not specify and SNS input topic ARN, Ground Truth will create +// a one-time labeling job. +// +// Use S3DataSource to specify an input manifest file for both streaming and +// one-time labeling jobs. Adding an S3DataSource is optional if you use SnsDataSource +// to create a streaming labeling job. type LabelingJobDataSource struct { _ struct{} `type:"structure"` // The Amazon S3 location of the input data objects. - // - // S3DataSource is a required field - S3DataSource *LabelingJobS3DataSource `type:"structure" required:"true"` + S3DataSource *LabelingJobS3DataSource `type:"structure"` + + // An Amazon SNS data source used for streaming labeling jobs. + SnsDataSource *LabelingJobSnsDataSource `type:"structure"` } // String returns the string representation @@ -30569,14 +32427,16 @@ func (s LabelingJobDataSource) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *LabelingJobDataSource) Validate() error { invalidParams := request.ErrInvalidParams{Context: "LabelingJobDataSource"} - if s.S3DataSource == nil { - invalidParams.Add(request.NewErrParamRequired("S3DataSource")) - } if s.S3DataSource != nil { if err := s.S3DataSource.Validate(); err != nil { invalidParams.AddNested("S3DataSource", err.(request.ErrInvalidParams)) } } + if s.SnsDataSource != nil { + if err := s.SnsDataSource.Validate(); err != nil { + invalidParams.AddNested("SnsDataSource", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -30590,6 +32450,12 @@ func (s *LabelingJobDataSource) SetS3DataSource(v *LabelingJobS3DataSource) *Lab return s } +// SetSnsDataSource sets the SnsDataSource field's value. +func (s *LabelingJobDataSource) SetSnsDataSource(v *LabelingJobSnsDataSource) *LabelingJobDataSource { + s.SnsDataSource = v + return s +} + // Provides summary information for a work team. type LabelingJobForWorkteamSummary struct { _ struct{} `type:"structure"` @@ -30780,6 +32646,15 @@ type LabelingJobOutputConfig struct { // // S3OutputPath is a required field S3OutputPath *string `type:"string" required:"true"` + + // An Amazon Simple Notification Service (Amazon SNS) output topic ARN. + // + // When workers complete labeling tasks, Ground Truth will send labeling task + // output data to the SNS output topic you specify here. + // + // You must provide a value for this parameter if you provide an Amazon SNS + // input topic in SnsDataSource in InputConfig. + SnsTopicArn *string `type:"string"` } // String returns the string representation @@ -30817,6 +32692,12 @@ func (s *LabelingJobOutputConfig) SetS3OutputPath(v string) *LabelingJobOutputCo return s } +// SetSnsTopicArn sets the SnsTopicArn field's value. +func (s *LabelingJobOutputConfig) SetSnsTopicArn(v string) *LabelingJobOutputConfig { + s.SnsTopicArn = &v + return s +} + // Provides configuration information for labeling jobs. type LabelingJobResourceConfig struct { _ struct{} `type:"structure"` @@ -30888,6 +32769,50 @@ func (s *LabelingJobS3DataSource) SetManifestS3Uri(v string) *LabelingJobS3DataS return s } +// An Amazon SNS data source used for streaming labeling jobs. +type LabelingJobSnsDataSource struct { + _ struct{} `type:"structure"` + + // The Amazon SNS input topic Amazon Resource Name (ARN). Specify the ARN of + // the input topic you will use to send new data objects to a streaming labeling + // job. + // + // If you specify an input topic for SnsTopicArn in InputConfig, you must specify + // a value for SnsTopicArn in OutputConfig. + // + // SnsTopicArn is a required field + SnsTopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s LabelingJobSnsDataSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LabelingJobSnsDataSource) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LabelingJobSnsDataSource) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LabelingJobSnsDataSource"} + if s.SnsTopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("SnsTopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSnsTopicArn sets the SnsTopicArn field's value. +func (s *LabelingJobSnsDataSource) SetSnsTopicArn(v string) *LabelingJobSnsDataSource { + s.SnsTopicArn = &v + return s +} + // A set of conditions for stopping a labeling job. If any of the conditions // are met, the job is automatically stopped. You can use these conditions to // control the cost of data labeling. @@ -31348,8 +33273,8 @@ type ListAutoMLJobsInput struct { // Request a list of jobs, using a search filter for name. NameContains *string `type:"string"` - // If the previous response was truncated, you will receive this token. Use - // it in your next request to receive the next set of results. + // If the previous response was truncated, you receive this token. Use it in + // your next request to receive the next set of results. NextToken *string `type:"string"` // The parameter by which to sort the results. The default is AutoMLJobName. @@ -31453,8 +33378,8 @@ type ListAutoMLJobsOutput struct { // AutoMLJobSummaries is a required field AutoMLJobSummaries []*AutoMLJobSummary `type:"list" required:"true"` - // If the previous response was truncated, you will receive this token. Use - // it in your next request to receive the next set of results. + // If the previous response was truncated, you receive this token. Use it in + // your next request to receive the next set of results. NextToken *string `type:"string"` } @@ -31494,8 +33419,8 @@ type ListCandidatesForAutoMLJobInput struct { // List the job's Candidates up to a specified limit. MaxResults *int64 `min:"1" type:"integer"` - // If the previous response was truncated, you will receive this token. Use - // it in your next request to receive the next set of results. + // If the previous response was truncated, you receive this token. Use it in + // your next request to receive the next set of results. NextToken *string `type:"string"` // The parameter by which to sort the results. The default is Descending. @@ -31590,8 +33515,8 @@ type ListCandidatesForAutoMLJobOutput struct { // Candidates is a required field Candidates []*AutoMLCandidate `type:"list" required:"true"` - // If the previous response was truncated, you will receive this token. Use - // it in your next request to receive the next set of results. + // If the previous response was truncated, you receive this token. Use it in + // your next request to receive the next set of results. NextToken *string `type:"string"` } @@ -35274,6 +37199,116 @@ func (s *ListUserProfilesOutput) SetUserProfiles(v []*UserProfileDetails) *ListU return s } +type ListWorkforcesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of workforces returned in the response. + MaxResults *int64 `min:"1" type:"integer"` + + // A filter you can use to search for workforces using part of the workforce + // name. + NameContains *string `min:"1" type:"string"` + + // A token to resume pagination. + NextToken *string `type:"string"` + + // Sort workforces using the workforce name or creation date. + SortBy *string `type:"string" enum:"ListWorkforcesSortByOptions"` + + // Sort workforces in ascending or descending order. + SortOrder *string `type:"string" enum:"SortOrder"` +} + +// String returns the string representation +func (s ListWorkforcesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkforcesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListWorkforcesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListWorkforcesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NameContains != nil && len(*s.NameContains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NameContains", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListWorkforcesInput) SetMaxResults(v int64) *ListWorkforcesInput { + s.MaxResults = &v + return s +} + +// SetNameContains sets the NameContains field's value. +func (s *ListWorkforcesInput) SetNameContains(v string) *ListWorkforcesInput { + s.NameContains = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWorkforcesInput) SetNextToken(v string) *ListWorkforcesInput { + s.NextToken = &v + return s +} + +// SetSortBy sets the SortBy field's value. +func (s *ListWorkforcesInput) SetSortBy(v string) *ListWorkforcesInput { + s.SortBy = &v + return s +} + +// SetSortOrder sets the SortOrder field's value. +func (s *ListWorkforcesInput) SetSortOrder(v string) *ListWorkforcesInput { + s.SortOrder = &v + return s +} + +type ListWorkforcesOutput struct { + _ struct{} `type:"structure"` + + // A token to resume pagination. + NextToken *string `type:"string"` + + // A list containing information about your workforce. + // + // Workforces is a required field + Workforces []*Workforce `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListWorkforcesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListWorkforcesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListWorkforcesOutput) SetNextToken(v string) *ListWorkforcesOutput { + s.NextToken = &v + return s +} + +// SetWorkforces sets the Workforces field's value. +func (s *ListWorkforcesOutput) SetWorkforces(v []*Workforce) *ListWorkforcesOutput { + s.Workforces = v + return s +} + type ListWorkteamsInput struct { _ struct{} `type:"structure"` @@ -35387,12 +37422,20 @@ func (s *ListWorkteamsOutput) SetWorkteams(v []*Workteam) *ListWorkteamsOutput { return s } -// Defines the Amazon Cognito user group that is part of a work team. +// Defines an Amazon Cognito or your own OIDC IdP user group that is part of +// a work team. type MemberDefinition struct { _ struct{} `type:"structure"` // The Amazon Cognito user group that is part of the work team. CognitoMemberDefinition *CognitoMemberDefinition `type:"structure"` + + // A list user groups that exist in your OIDC Identity Provider (IdP). One to + // ten groups can be used to create a single private work team. When you add + // a user group to the list of Groups, you can add that user group to one or + // more private work teams. If you add a user group to a private work team, + // all workers in that user group are added to the work team. + OidcMemberDefinition *OidcMemberDefinition `type:"structure"` } // String returns the string representation @@ -35413,6 +37456,11 @@ func (s *MemberDefinition) Validate() error { invalidParams.AddNested("CognitoMemberDefinition", err.(request.ErrInvalidParams)) } } + if s.OidcMemberDefinition != nil { + if err := s.OidcMemberDefinition.Validate(); err != nil { + invalidParams.AddNested("OidcMemberDefinition", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -35426,6 +37474,12 @@ func (s *MemberDefinition) SetCognitoMemberDefinition(v *CognitoMemberDefinition return s } +// SetOidcMemberDefinition sets the OidcMemberDefinition field's value. +func (s *MemberDefinition) SetOidcMemberDefinition(v *OidcMemberDefinition) *MemberDefinition { + s.OidcMemberDefinition = v + return s +} + // The name, value, and date and time of a metric that was emitted to Amazon // CloudWatch. type MetricData struct { @@ -35535,6 +37589,10 @@ func (s *MetricDefinition) SetRegex(v string) *MetricDefinition { // Provides information about the location that is configured for storing model // artifacts. +// +// Model artifacts are the output that results from training a model, and typically +// consist of trained parameters, a model defintion that desribes how to compute +// inferences, and other metadata. type ModelArtifacts struct { _ struct{} `type:"structure"` @@ -35561,6 +37619,53 @@ func (s *ModelArtifacts) SetS3ModelArtifacts(v string) *ModelArtifacts { return s } +// Configures the timeout and maximum number of retries for processing a transform +// job invocation. +type ModelClientConfig struct { + _ struct{} `type:"structure"` + + // The maximum number of retries when invocation requests are failing. + InvocationsMaxRetries *int64 `type:"integer"` + + // The timeout value in seconds for an invocation request. + InvocationsTimeoutInSeconds *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ModelClientConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModelClientConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModelClientConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModelClientConfig"} + if s.InvocationsTimeoutInSeconds != nil && *s.InvocationsTimeoutInSeconds < 1 { + invalidParams.Add(request.NewErrParamMinValue("InvocationsTimeoutInSeconds", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvocationsMaxRetries sets the InvocationsMaxRetries field's value. +func (s *ModelClientConfig) SetInvocationsMaxRetries(v int64) *ModelClientConfig { + s.InvocationsMaxRetries = &v + return s +} + +// SetInvocationsTimeoutInSeconds sets the InvocationsTimeoutInSeconds field's value. +func (s *ModelClientConfig) SetInvocationsTimeoutInSeconds(v int64) *ModelClientConfig { + s.InvocationsTimeoutInSeconds = &v + return s +} + // Describes the Docker container for the model package. type ModelPackageContainerDefinition struct { _ struct{} `type:"structure"` @@ -35587,6 +37692,9 @@ type ModelPackageContainerDefinition struct { // The Amazon S3 path where the model artifacts, which result from model training, // are stored. This path must point to a single gzip compressed tar archive // (.tar.gz suffix). + // + // The model artifacts must be in an S3 bucket that is in the same region as + // the model package. ModelDataUrl *string `type:"string"` // The AWS Marketplace product ID of the model package. @@ -36954,22 +39062,16 @@ func (s *MonitoringStoppingCondition) SetMaxRuntimeInSeconds(v int64) *Monitorin return s } -// Defines a list of NestedFilters objects. To satisfy the conditions specified -// in the NestedFilters call, a resource must satisfy the conditions of all -// of the filters. +// A list of nested Filter objects. A resource must satisfy the conditions of +// all filters to be included in the results returned from the Search API. // -// For example, you could define a NestedFilters using the training job's InputDataConfig -// property to filter on Channel objects. +// For example, to filter on a training job's InputDataConfig property with +// a specific channel name and S3Uri prefix, define the following filters: // -// A NestedFilters object contains multiple filters. For example, to find all -// training jobs whose name contains train and that have cat/data in their S3Uri -// (specified in InputDataConfig), you need to create a NestedFilters object -// that specifies the InputDataConfig property with the following Filter objects: +// * '{Name:"InputDataConfig.ChannelName", "Operator":"Equals", "Value":"train"}', // -// * '{Name:"InputDataConfig.ChannelName", "Operator":"EQUALS", "Value":"train"}', -// -// * '{Name:"InputDataConfig.DataSource.S3DataSource.S3Uri", "Operator":"CONTAINS", -// "Value":"cat/data"}' +// * '{Name:"InputDataConfig.DataSource.S3DataSource.S3Uri", "Operator":"Contains", +// "Value":"mybucket/catdata"}' type NestedFilters struct { _ struct{} `type:"structure"` @@ -37048,6 +39150,11 @@ func (s *NestedFilters) SetNestedPropertyName(v string) *NestedFilters { type NetworkConfig struct { _ struct{} `type:"structure"` + // Whether to encrypt all communications between distributed processing jobs. + // Choose True to encrypt communications. Encryption provides greater security + // for distributed processing jobs, but the processing might take longer. + EnableInterContainerTrafficEncryption *bool `type:"boolean"` + // Whether to allow inbound and outbound network calls to and from the containers // used for the processing job. EnableNetworkIsolation *bool `type:"boolean"` @@ -37085,6 +39192,12 @@ func (s *NetworkConfig) Validate() error { return nil } +// SetEnableInterContainerTrafficEncryption sets the EnableInterContainerTrafficEncryption field's value. +func (s *NetworkConfig) SetEnableInterContainerTrafficEncryption(v bool) *NetworkConfig { + s.EnableInterContainerTrafficEncryption = &v + return s +} + // SetEnableNetworkIsolation sets the EnableNetworkIsolation field's value. func (s *NetworkConfig) SetEnableNetworkIsolation(v bool) *NetworkConfig { s.EnableNetworkIsolation = &v @@ -37405,22 +39518,355 @@ func (s *ObjectiveStatusCounters) SetSucceeded(v int64) *ObjectiveStatusCounters return s } +// Use this parameter to configure your OIDC Identity Provider (IdP). +type OidcConfig struct { + _ struct{} `type:"structure"` + + // The OIDC IdP authorization endpoint used to configure your private workforce. + // + // AuthorizationEndpoint is a required field + AuthorizationEndpoint *string `type:"string" required:"true"` + + // The OIDC IdP client ID used to configure your private workforce. + // + // ClientId is a required field + ClientId *string `min:"1" type:"string" required:"true"` + + // The OIDC IdP client secret used to configure your private workforce. + // + // ClientSecret is a required field + ClientSecret *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The OIDC IdP issuer used to configure your private workforce. + // + // Issuer is a required field + Issuer *string `type:"string" required:"true"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + // + // JwksUri is a required field + JwksUri *string `type:"string" required:"true"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + // + // LogoutEndpoint is a required field + LogoutEndpoint *string `type:"string" required:"true"` + + // The OIDC IdP token endpoint used to configure your private workforce. + // + // TokenEndpoint is a required field + TokenEndpoint *string `type:"string" required:"true"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + // + // UserInfoEndpoint is a required field + UserInfoEndpoint *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s OidcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OidcConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OidcConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OidcConfig"} + if s.AuthorizationEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("AuthorizationEndpoint")) + } + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientId != nil && len(*s.ClientId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientId", 1)) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.ClientSecret != nil && len(*s.ClientSecret) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientSecret", 1)) + } + if s.Issuer == nil { + invalidParams.Add(request.NewErrParamRequired("Issuer")) + } + if s.JwksUri == nil { + invalidParams.Add(request.NewErrParamRequired("JwksUri")) + } + if s.LogoutEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("LogoutEndpoint")) + } + if s.TokenEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("TokenEndpoint")) + } + if s.UserInfoEndpoint == nil { + invalidParams.Add(request.NewErrParamRequired("UserInfoEndpoint")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *OidcConfig) SetAuthorizationEndpoint(v string) *OidcConfig { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *OidcConfig) SetClientId(v string) *OidcConfig { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *OidcConfig) SetClientSecret(v string) *OidcConfig { + s.ClientSecret = &v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *OidcConfig) SetIssuer(v string) *OidcConfig { + s.Issuer = &v + return s +} + +// SetJwksUri sets the JwksUri field's value. +func (s *OidcConfig) SetJwksUri(v string) *OidcConfig { + s.JwksUri = &v + return s +} + +// SetLogoutEndpoint sets the LogoutEndpoint field's value. +func (s *OidcConfig) SetLogoutEndpoint(v string) *OidcConfig { + s.LogoutEndpoint = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *OidcConfig) SetTokenEndpoint(v string) *OidcConfig { + s.TokenEndpoint = &v + return s +} + +// SetUserInfoEndpoint sets the UserInfoEndpoint field's value. +func (s *OidcConfig) SetUserInfoEndpoint(v string) *OidcConfig { + s.UserInfoEndpoint = &v + return s +} + +// Your OIDC IdP workforce configuration. +type OidcConfigForResponse struct { + _ struct{} `type:"structure"` + + // The OIDC IdP authorization endpoint used to configure your private workforce. + AuthorizationEndpoint *string `type:"string"` + + // The OIDC IdP client ID used to configure your private workforce. + ClientId *string `min:"1" type:"string"` + + // The OIDC IdP issuer used to configure your private workforce. + Issuer *string `type:"string"` + + // The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. + JwksUri *string `type:"string"` + + // The OIDC IdP logout endpoint used to configure your private workforce. + LogoutEndpoint *string `type:"string"` + + // The OIDC IdP token endpoint used to configure your private workforce. + TokenEndpoint *string `type:"string"` + + // The OIDC IdP user information endpoint used to configure your private workforce. + UserInfoEndpoint *string `type:"string"` +} + +// String returns the string representation +func (s OidcConfigForResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OidcConfigForResponse) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *OidcConfigForResponse) SetAuthorizationEndpoint(v string) *OidcConfigForResponse { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *OidcConfigForResponse) SetClientId(v string) *OidcConfigForResponse { + s.ClientId = &v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *OidcConfigForResponse) SetIssuer(v string) *OidcConfigForResponse { + s.Issuer = &v + return s +} + +// SetJwksUri sets the JwksUri field's value. +func (s *OidcConfigForResponse) SetJwksUri(v string) *OidcConfigForResponse { + s.JwksUri = &v + return s +} + +// SetLogoutEndpoint sets the LogoutEndpoint field's value. +func (s *OidcConfigForResponse) SetLogoutEndpoint(v string) *OidcConfigForResponse { + s.LogoutEndpoint = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *OidcConfigForResponse) SetTokenEndpoint(v string) *OidcConfigForResponse { + s.TokenEndpoint = &v + return s +} + +// SetUserInfoEndpoint sets the UserInfoEndpoint field's value. +func (s *OidcConfigForResponse) SetUserInfoEndpoint(v string) *OidcConfigForResponse { + s.UserInfoEndpoint = &v + return s +} + +// A list of user groups that exist in your OIDC Identity Provider (IdP). One +// to ten groups can be used to create a single private work team. When you +// add a user group to the list of Groups, you can add that user group to one +// or more private work teams. If you add a user group to a private work team, +// all workers in that user group are added to the work team. +type OidcMemberDefinition struct { + _ struct{} `type:"structure"` + + // A list of comma seperated strings that identifies user groups in your OIDC + // IdP. Each user group is made up of a group of private workers. + // + // Groups is a required field + Groups []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s OidcMemberDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OidcMemberDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OidcMemberDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OidcMemberDefinition"} + if s.Groups == nil { + invalidParams.Add(request.NewErrParamRequired("Groups")) + } + if s.Groups != nil && len(s.Groups) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Groups", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroups sets the Groups field's value. +func (s *OidcMemberDefinition) SetGroups(v []*string) *OidcMemberDefinition { + s.Groups = v + return s +} + // Contains information about the output location for the compiled model and -// the device (target) that the model runs on. +// the target device that the model runs on. TargetDevice and TargetPlatform +// are mutually exclusive, so you need to choose one between the two to specify +// your target device or platform. If you cannot find your device you want to +// use from the TargetDevice list, use TargetPlatform to describe the platform +// of your edge device and CompilerOptions if there are specific settings that +// are required or recommended to use for particular TargetPlatform. type OutputConfig struct { _ struct{} `type:"structure"` - // Identifies the S3 path where you want Amazon SageMaker to store the model + // Specifies additional parameters for compiler options in JSON format. The + // compiler options are TargetPlatform specific. It is required for NVIDIA accelerators + // and highly recommended for CPU compilations. For any other cases, it is optional + // to specify CompilerOptions. + // + // * CPU: Compilation for CPU supports the following compiler options. mcpu: + // CPU micro-architecture. For example, {'mcpu': 'skylake-avx512'} mattr: + // CPU flags. For example, {'mattr': ['+neon', '+vfpv4']} + // + // * ARM: Details of ARM CPU compilations. NEON: NEON is an implementation + // of the Advanced SIMD extension used in ARMv7 processors. For example, + // add {'mattr': ['+neon']} to the compiler options if compiling for ARM + // 32-bit platform with the NEON support. + // + // * NVIDIA: Compilation for NVIDIA GPU supports the following compiler options. + // gpu_code: Specifies the targeted architecture. trt-ver: Specifies the + // TensorRT versions in x.y.z. format. cuda-ver: Specifies the CUDA version + // in x.y format. For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', + // 'cuda-ver': '10.1'} + // + // * ANDROID: Compilation for the Android OS supports the following compiler + // options: ANDROID_PLATFORM: Specifies the Android API levels. Available + // levels range from 21 to 29. For example, {'ANDROID_PLATFORM': 28}. mattr: + // Add {'mattr': ['+neon']} to compiler options if compiling for ARM 32-bit + // platform with NEON support. + // + // * CoreML: Compilation for the CoreML OutputConfig$TargetDevice supports + // the following compiler options: class_labels: Specifies the classification + // labels file name inside input tar.gz file. For example, {"class_labels": + // "imagenet_labels_1000.txt"}. Labels inside the txt file should be separated + // by newlines. + CompilerOptions *string `min:"7" type:"string"` + + // Identifies the S3 bucket where you want Amazon SageMaker to store the model // artifacts. For example, s3://bucket-name/key-name-prefix. // // S3OutputLocation is a required field S3OutputLocation *string `type:"string" required:"true"` - // Identifies the device that you want to run your model on after it has been - // compiled. For example: ml_c5. + // Identifies the target device or the machine learning instance that you want + // to run your model on after the compilation has completed. Alternatively, + // you can specify OS, architecture, and accelerator using TargetPlatform fields. + // It can be used instead of TargetPlatform. + TargetDevice *string `type:"string" enum:"TargetDevice"` + + // Contains information about a target platform that you want your model to + // run on, such as OS, architecture, and accelerators. It is an alternative + // of TargetDevice. + // + // The following examples show how to configure the TargetPlatform and CompilerOptions + // JSON strings for popular target platforms: + // + // * Raspberry Pi 3 Model B+ "TargetPlatform": {"Os": "LINUX", "Arch": "ARM_EABIHF"}, + // "CompilerOptions": {'mattr': ['+neon']} + // + // * Jetson TX2 "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": + // "NVIDIA"}, "CompilerOptions": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1', + // 'cuda-ver': '10.0'} + // + // * EC2 m5.2xlarge instance OS "TargetPlatform": {"Os": "LINUX", "Arch": + // "X86_64", "Accelerator": "NVIDIA"}, "CompilerOptions": {'mcpu': 'skylake-avx512'} // - // TargetDevice is a required field - TargetDevice *string `type:"string" required:"true" enum:"TargetDevice"` + // * RK3399 "TargetPlatform": {"Os": "LINUX", "Arch": "ARM64", "Accelerator": + // "MALI"} + // + // * ARMv7 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM_EABI"}, + // "CompilerOptions": {'ANDROID_PLATFORM': 25, 'mattr': ['+neon']} + // + // * ARMv8 phone (CPU) "TargetPlatform": {"Os": "ANDROID", "Arch": "ARM64"}, + // "CompilerOptions": {'ANDROID_PLATFORM': 29} + TargetPlatform *TargetPlatform `type:"structure"` } // String returns the string representation @@ -37436,11 +39882,16 @@ func (s OutputConfig) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *OutputConfig) Validate() error { invalidParams := request.ErrInvalidParams{Context: "OutputConfig"} + if s.CompilerOptions != nil && len(*s.CompilerOptions) < 7 { + invalidParams.Add(request.NewErrParamMinLen("CompilerOptions", 7)) + } if s.S3OutputLocation == nil { invalidParams.Add(request.NewErrParamRequired("S3OutputLocation")) } - if s.TargetDevice == nil { - invalidParams.Add(request.NewErrParamRequired("TargetDevice")) + if s.TargetPlatform != nil { + if err := s.TargetPlatform.Validate(); err != nil { + invalidParams.AddNested("TargetPlatform", err.(request.ErrInvalidParams)) + } } if invalidParams.Len() > 0 { @@ -37449,6 +39900,12 @@ func (s *OutputConfig) Validate() error { return nil } +// SetCompilerOptions sets the CompilerOptions field's value. +func (s *OutputConfig) SetCompilerOptions(v string) *OutputConfig { + s.CompilerOptions = &v + return s +} + // SetS3OutputLocation sets the S3OutputLocation field's value. func (s *OutputConfig) SetS3OutputLocation(v string) *OutputConfig { s.S3OutputLocation = &v @@ -37461,6 +39918,12 @@ func (s *OutputConfig) SetTargetDevice(v string) *OutputConfig { return s } +// SetTargetPlatform sets the TargetPlatform field's value. +func (s *OutputConfig) SetTargetPlatform(v *TargetPlatform) *OutputConfig { + s.TargetPlatform = v + return s +} + // Provides information about how to store model training results (model artifacts). type OutputDataConfig struct { _ struct{} `type:"structure"` @@ -37918,6 +40381,239 @@ func (s *ProcessingInput) SetS3Input(v *ProcessingS3Input) *ProcessingInput { return s } +// An Amazon SageMaker processing job that is used to analyze data and evaluate +// models. For more information, see Process Data and Evaluate Models (https://docs.aws.amazon.com/sagemaker/latest/dg/processing-job.html). +type ProcessingJob struct { + _ struct{} `type:"structure"` + + // Configuration to run a processing job in a specified container image. + AppSpecification *AppSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) of the AutoML job associated with this processing + // job. + AutoMLJobArn *string `min:"1" type:"string"` + + // The time the processing job was created. + CreationTime *time.Time `type:"timestamp"` + + // Sets the environment variables in the Docker container. + Environment map[string]*string `type:"map"` + + // A string, up to one KB in size, that contains metadata from the processing + // container when the processing job exits. + ExitMessage *string `type:"string"` + + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob + ExperimentConfig *ExperimentConfig `type:"structure"` + + // A string, up to one KB in size, that contains the reason a processing job + // failed, if it failed. + FailureReason *string `type:"string"` + + // The time the processing job was last modified. + LastModifiedTime *time.Time `type:"timestamp"` + + // The ARN of a monitoring schedule for an endpoint associated with this processing + // job. + MonitoringScheduleArn *string `type:"string"` + + // Networking options for a job, such as network traffic encryption between + // containers, whether to allow inbound and outbound network calls to and from + // containers, and the VPC subnets and security groups to use for VPC-enabled + // jobs. + NetworkConfig *NetworkConfig `type:"structure"` + + // The time that the processing job ended. + ProcessingEndTime *time.Time `type:"timestamp"` + + // For each input, data is downloaded from S3 into the processing container + // before the processing job begins running if "S3InputMode" is set to File. + ProcessingInputs []*ProcessingInput `type:"list"` + + // The ARN of the processing job. + ProcessingJobArn *string `type:"string"` + + // The name of the processing job. + ProcessingJobName *string `min:"1" type:"string"` + + // The status of the processing job. + ProcessingJobStatus *string `type:"string" enum:"ProcessingJobStatus"` + + // The output configuration for the processing job. + ProcessingOutputConfig *ProcessingOutputConfig `type:"structure"` + + // Identifies the resources, ML compute instances, and ML storage volumes to + // deploy for a processing job. In distributed training, you specify more than + // one instance. + ProcessingResources *ProcessingResources `type:"structure"` + + // The time that the processing job started. + ProcessingStartTime *time.Time `type:"timestamp"` + + // The ARN of the role used to create the processing job. + RoleArn *string `min:"20" type:"string"` + + // Specifies a time limit for how long the processing job is allowed to run. + StoppingCondition *ProcessingStoppingCondition `type:"structure"` + + // An array of key-value pairs. For more information, see Using Cost Allocation + // Tags (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html#allocation-whatURL) + // in the AWS Billing and Cost Management User Guide. + Tags []*Tag `type:"list"` + + // The ARN of the training job associated with this processing job. + TrainingJobArn *string `type:"string"` +} + +// String returns the string representation +func (s ProcessingJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProcessingJob) GoString() string { + return s.String() +} + +// SetAppSpecification sets the AppSpecification field's value. +func (s *ProcessingJob) SetAppSpecification(v *AppSpecification) *ProcessingJob { + s.AppSpecification = v + return s +} + +// SetAutoMLJobArn sets the AutoMLJobArn field's value. +func (s *ProcessingJob) SetAutoMLJobArn(v string) *ProcessingJob { + s.AutoMLJobArn = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *ProcessingJob) SetCreationTime(v time.Time) *ProcessingJob { + s.CreationTime = &v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *ProcessingJob) SetEnvironment(v map[string]*string) *ProcessingJob { + s.Environment = v + return s +} + +// SetExitMessage sets the ExitMessage field's value. +func (s *ProcessingJob) SetExitMessage(v string) *ProcessingJob { + s.ExitMessage = &v + return s +} + +// SetExperimentConfig sets the ExperimentConfig field's value. +func (s *ProcessingJob) SetExperimentConfig(v *ExperimentConfig) *ProcessingJob { + s.ExperimentConfig = v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *ProcessingJob) SetFailureReason(v string) *ProcessingJob { + s.FailureReason = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *ProcessingJob) SetLastModifiedTime(v time.Time) *ProcessingJob { + s.LastModifiedTime = &v + return s +} + +// SetMonitoringScheduleArn sets the MonitoringScheduleArn field's value. +func (s *ProcessingJob) SetMonitoringScheduleArn(v string) *ProcessingJob { + s.MonitoringScheduleArn = &v + return s +} + +// SetNetworkConfig sets the NetworkConfig field's value. +func (s *ProcessingJob) SetNetworkConfig(v *NetworkConfig) *ProcessingJob { + s.NetworkConfig = v + return s +} + +// SetProcessingEndTime sets the ProcessingEndTime field's value. +func (s *ProcessingJob) SetProcessingEndTime(v time.Time) *ProcessingJob { + s.ProcessingEndTime = &v + return s +} + +// SetProcessingInputs sets the ProcessingInputs field's value. +func (s *ProcessingJob) SetProcessingInputs(v []*ProcessingInput) *ProcessingJob { + s.ProcessingInputs = v + return s +} + +// SetProcessingJobArn sets the ProcessingJobArn field's value. +func (s *ProcessingJob) SetProcessingJobArn(v string) *ProcessingJob { + s.ProcessingJobArn = &v + return s +} + +// SetProcessingJobName sets the ProcessingJobName field's value. +func (s *ProcessingJob) SetProcessingJobName(v string) *ProcessingJob { + s.ProcessingJobName = &v + return s +} + +// SetProcessingJobStatus sets the ProcessingJobStatus field's value. +func (s *ProcessingJob) SetProcessingJobStatus(v string) *ProcessingJob { + s.ProcessingJobStatus = &v + return s +} + +// SetProcessingOutputConfig sets the ProcessingOutputConfig field's value. +func (s *ProcessingJob) SetProcessingOutputConfig(v *ProcessingOutputConfig) *ProcessingJob { + s.ProcessingOutputConfig = v + return s +} + +// SetProcessingResources sets the ProcessingResources field's value. +func (s *ProcessingJob) SetProcessingResources(v *ProcessingResources) *ProcessingJob { + s.ProcessingResources = v + return s +} + +// SetProcessingStartTime sets the ProcessingStartTime field's value. +func (s *ProcessingJob) SetProcessingStartTime(v time.Time) *ProcessingJob { + s.ProcessingStartTime = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *ProcessingJob) SetRoleArn(v string) *ProcessingJob { + s.RoleArn = &v + return s +} + +// SetStoppingCondition sets the StoppingCondition field's value. +func (s *ProcessingJob) SetStoppingCondition(v *ProcessingStoppingCondition) *ProcessingJob { + s.StoppingCondition = v + return s +} + +// SetTags sets the Tags field's value. +func (s *ProcessingJob) SetTags(v []*Tag) *ProcessingJob { + s.Tags = v + return s +} + +// SetTrainingJobArn sets the TrainingJobArn field's value. +func (s *ProcessingJob) SetTrainingJobArn(v string) *ProcessingJob { + s.TrainingJobArn = &v + return s +} + // Summary of information about a processing job. type ProcessingJobSummary struct { _ struct{} `type:"structure"` @@ -38193,7 +40889,7 @@ type ProcessingS3Input struct { // LocalPath is a required field LocalPath *string `type:"string" required:"true"` - // Whether to use Gzip compresion for Amazon S3 storage. + // Whether to use Gzip compression for Amazon S3 storage. S3CompressionType *string `type:"string" enum:"ProcessingS3CompressionType"` // Whether the data stored in Amazon S3 is FullyReplicated or ShardedByS3Key. @@ -38209,7 +40905,7 @@ type ProcessingS3Input struct { // S3DataType is a required field S3DataType *string `type:"string" required:"true" enum:"ProcessingS3DataType"` - // Wether to use File or Pipe input mode. In File mode, Amazon SageMaker copies + // Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies // the data from the input source onto the local Amazon Elastic Block Store // (Amazon EBS) volumes before starting your training algorithm. This is the // most commonly used input mode. In Pipe mode, Amazon SageMaker streams input @@ -38892,6 +41588,12 @@ func (s *PublicWorkforceTaskPrice) SetAmountInUsd(v *USD) *PublicWorkforceTaskPr type RenderUiTemplateInput struct { _ struct{} `type:"structure"` + // The HumanTaskUiArn of the worker UI that you want to render. Do not provide + // a HumanTaskUiArn if you use the UiTemplate parameter. + // + // See a list of available Human Ui Amazon Resource Names (ARNs) in UiConfig. + HumanTaskUiArn *string `type:"string"` + // The Amazon Resource Name (ARN) that has access to the S3 objects that are // used by the template. // @@ -38904,9 +41606,7 @@ type RenderUiTemplateInput struct { Task *RenderableTask `type:"structure" required:"true"` // A Template object containing the worker UI template to render. - // - // UiTemplate is a required field - UiTemplate *UiTemplate `type:"structure" required:"true"` + UiTemplate *UiTemplate `type:"structure"` } // String returns the string representation @@ -38931,9 +41631,6 @@ func (s *RenderUiTemplateInput) Validate() error { if s.Task == nil { invalidParams.Add(request.NewErrParamRequired("Task")) } - if s.UiTemplate == nil { - invalidParams.Add(request.NewErrParamRequired("UiTemplate")) - } if s.Task != nil { if err := s.Task.Validate(); err != nil { invalidParams.AddNested("Task", err.(request.ErrInvalidParams)) @@ -38951,6 +41648,12 @@ func (s *RenderUiTemplateInput) Validate() error { return nil } +// SetHumanTaskUiArn sets the HumanTaskUiArn field's value. +func (s *RenderUiTemplateInput) SetHumanTaskUiArn(v string) *RenderUiTemplateInput { + s.HumanTaskUiArn = &v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *RenderUiTemplateInput) SetRoleArn(v string) *RenderUiTemplateInput { s.RoleArn = &v @@ -39092,7 +41795,7 @@ func (s *RenderingError) SetMessage(v string) *RenderingError { type ResolvedAttributes struct { _ struct{} `type:"structure"` - // Applies a metric to minimize or maximize for the job's objective. + // Specifies a metric to minimize or maximize as the objective of a job. AutoMLJobObjective *AutoMLJobObjective `type:"structure"` // How long a job is allowed to run, or how many candidates a job is allowed @@ -39254,8 +41957,8 @@ func (s *ResourceConfig) SetVolumeSizeInGB(v int64) *ResourceConfig { // Resource being accessed is in use. type ResourceInUse struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -39272,17 +41975,17 @@ func (s ResourceInUse) GoString() string { func newErrorResourceInUse(v protocol.ResponseMetadata) error { return &ResourceInUse{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUse) Code() string { +func (s *ResourceInUse) Code() string { return "ResourceInUse" } // Message returns the exception's message. -func (s ResourceInUse) Message() string { +func (s *ResourceInUse) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39290,29 +41993,29 @@ func (s ResourceInUse) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUse) OrigErr() error { +func (s *ResourceInUse) OrigErr() error { return nil } -func (s ResourceInUse) Error() string { +func (s *ResourceInUse) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUse) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUse) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUse) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUse) RequestID() string { + return s.RespMetadata.RequestID } // You have exceeded an Amazon SageMaker resource limit. For example, you might // have too many training jobs created. type ResourceLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -39329,17 +42032,17 @@ func (s ResourceLimitExceeded) GoString() string { func newErrorResourceLimitExceeded(v protocol.ResponseMetadata) error { return &ResourceLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceeded) Code() string { +func (s *ResourceLimitExceeded) Code() string { return "ResourceLimitExceeded" } // Message returns the exception's message. -func (s ResourceLimitExceeded) Message() string { +func (s *ResourceLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39347,22 +42050,22 @@ func (s ResourceLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceeded) OrigErr() error { +func (s *ResourceLimitExceeded) OrigErr() error { return nil } -func (s ResourceLimitExceeded) Error() string { +func (s *ResourceLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the maximum number of training jobs and parallel training jobs @@ -39429,8 +42132,8 @@ func (s *ResourceLimits) SetMaxParallelTrainingJobs(v int64) *ResourceLimits { // Resource being access is not found. type ResourceNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -39447,17 +42150,17 @@ func (s ResourceNotFound) GoString() string { func newErrorResourceNotFound(v protocol.ResponseMetadata) error { return &ResourceNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFound) Code() string { +func (s *ResourceNotFound) Code() string { return "ResourceNotFound" } // Message returns the exception's message. -func (s ResourceNotFound) Message() string { +func (s *ResourceNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39465,33 +42168,35 @@ func (s ResourceNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFound) OrigErr() error { +func (s *ResourceNotFound) OrigErr() error { return nil } -func (s ResourceNotFound) Error() string { +func (s *ResourceNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFound) RequestID() string { + return s.RespMetadata.RequestID } -// The instance type and quantity. +// The instance type and the Amazon Resource Name (ARN) of the SageMaker image +// created on the instance. The ARN is stored as metadata in SageMaker Studio +// notebooks. type ResourceSpec struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the environment. - EnvironmentArn *string `type:"string"` - // The instance type. InstanceType *string `type:"string" enum:"AppInstanceType"` + + // The Amazon Resource Name (ARN) of the SageMaker image created on the instance. + SageMakerImageArn *string `type:"string"` } // String returns the string representation @@ -39504,23 +42209,27 @@ func (s ResourceSpec) GoString() string { return s.String() } -// SetEnvironmentArn sets the EnvironmentArn field's value. -func (s *ResourceSpec) SetEnvironmentArn(v string) *ResourceSpec { - s.EnvironmentArn = &v - return s -} - // SetInstanceType sets the InstanceType field's value. func (s *ResourceSpec) SetInstanceType(v string) *ResourceSpec { s.InstanceType = &v return s } -// The retention policy. +// SetSageMakerImageArn sets the SageMakerImageArn field's value. +func (s *ResourceSpec) SetSageMakerImageArn(v string) *ResourceSpec { + s.SageMakerImageArn = &v + return s +} + +// The retention policy for data stored on an Amazon Elastic File System (EFS) +// volume. type RetentionPolicy struct { _ struct{} `type:"structure"` - // The home Amazon Elastic File System (EFS). + // The default is Retain, which specifies to keep the data stored on the EFS + // volume. + // + // Specify Delete to delete the data stored on the EFS volume. HomeEfsFileSystem *string `type:"string" enum:"RetentionType"` } @@ -39586,18 +42295,22 @@ type S3DataSource struct { // Depending on the value specified for the S3DataType, identifies either a // key name prefix or a manifest. For example: // - // * A key name prefix might look like this: s3://bucketname/exampleprefix. + // * A key name prefix might look like this: s3://bucketname/exampleprefix // - // * A manifest might look like this: s3://bucketname/example.manifest The - // manifest is an S3 object which is a JSON file with the following format: - // The preceding JSON matches the following s3Uris: [ {"prefix": "s3://customer_bucket/some/prefix/"}, + // * A manifest might look like this: s3://bucketname/example.manifest A + // manifest is an S3 object which is a JSON file consisting of an array of + // elements. The first element is a prefix which is followed by one or more + // suffixes. SageMaker appends the suffix elements to the prefix to get a + // full set of S3Uri. Note that the prefix must be a valid non-empty S3Uri + // that precludes users from specifying a manifest whose individual S3Uri + // is sourced from different S3 buckets. The following code example shows + // a valid manifest format: [ {"prefix": "s3://customer_bucket/some/prefix/"}, // "relative/path/to/custdata-1", "relative/path/custdata-2", ... "relative/path/custdata-N" - // ] The preceding JSON matches the following s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 + // ] This JSON is equivalent to the following S3Uri list: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 // s3://customer_bucket/some/prefix/relative/path/custdata-2 ... s3://customer_bucket/some/prefix/relative/path/custdata-N - // The complete set of s3uris in this manifest is the input data for the - // channel for this datasource. The object that each s3uris points to must - // be readable by the IAM role that Amazon SageMaker uses to perform tasks - // on your behalf. + // The complete set of S3Uri in this manifest is the input data for the channel + // for this data source. The object that each S3Uri points to must be readable + // by the IAM role that Amazon SageMaker uses to perform tasks on your behalf. // // S3Uri is a required field S3Uri *string `type:"string" required:"true"` @@ -39733,8 +42446,7 @@ func (s *ScheduleConfig) SetScheduleExpression(v string) *ScheduleConfig { // A SearchExpression contains the following components: // // * A list of Filter objects. Each filter defines a simple Boolean expression -// comprised of a resource property name, Boolean operator, and value. A -// SearchExpression can include only one Contains operator. +// comprised of a resource property name, Boolean operator, and value. // // * A list of NestedFilter objects. Each nested filter defines a list of // Boolean expressions using a list of resource properties. A nested filter @@ -39850,13 +42562,12 @@ func (s *SearchExpression) SetSubExpressions(v []*SearchExpression) *SearchExpre type SearchInput struct { _ struct{} `type:"structure"` - // The maximum number of results to return in a SearchResponse. + // The maximum number of results to return. MaxResults *int64 `min:"1" type:"integer"` - // If more than MaxResults resource objects match the specified SearchExpression, - // the SearchResponse includes a NextToken. The NextToken can be passed to the - // next SearchRequest to continue retrieving results for the specified SearchExpression - // and Sort parameters. + // If more than MaxResults resources match the specified SearchExpression, the + // response includes a NextToken. The NextToken can be passed to the next SearchRequest + // to continue retrieving results. NextToken *string `type:"string"` // The name of the Amazon SageMaker resource to search for. @@ -39864,8 +42575,8 @@ type SearchInput struct { // Resource is a required field Resource *string `type:"string" required:"true" enum:"ResourceType"` - // A Boolean conditional statement. Resource objects must satisfy this condition - // to be included in search results. You must provide at least one subexpression, + // A Boolean conditional statement. Resources must satisfy this condition to + // be included in search results. You must provide at least one subexpression, // filter, or nested filter. The maximum number of recursive SubExpressions, // NestedFilters, and Filters that can be included in a SearchExpression object // is 50. @@ -39958,7 +42669,7 @@ type SearchOutput struct { // in the next request. NextToken *string `type:"string"` - // A list of SearchResult objects. + // A list of SearchRecord objects. Results []*SearchRecord `type:"list"` } @@ -39984,20 +42695,20 @@ func (s *SearchOutput) SetResults(v []*SearchRecord) *SearchOutput { return s } -// An individual search result record that contains a single resource object. +// A single resource returned as part of the Search API response. type SearchRecord struct { _ struct{} `type:"structure"` - // A summary of the properties of an experiment. + // The properties of an experiment. Experiment *Experiment `type:"structure"` - // A TrainingJob object that is returned as part of a Search request. + // The properties of a training job. TrainingJob *TrainingJob `type:"structure"` - // A summary of the properties of a trial. + // The properties of a trial. Trial *Trial `type:"structure"` - // A summary of the properties of a trial component. + // The properties of a trial component. TrialComponent *TrialComponent `type:"structure"` } @@ -40177,17 +42888,24 @@ func (s *SecondaryStatusTransition) SetStatusMessage(v string) *SecondaryStatusT return s } -// The sharing settings. +// Specifies options when sharing an Amazon SageMaker Studio notebook. These +// settings are specified as part of DefaultUserSettings when the CreateDomain +// API is called, and as part of UserSettings when the CreateUserProfile API +// is called. type SharingSettings struct { _ struct{} `type:"structure"` - // The notebook output option. + // Whether to include the notebook cell output when sharing the notebook. The + // default is Disabled. NotebookOutputOption *string `type:"string" enum:"NotebookOutputOption"` - // The AWS Key Management Service encryption key ID. + // When NotebookOutputOption is Allowed, the AWS Key Management Service (KMS) + // encryption key ID used to encrypt the notebook cell output in the Amazon + // S3 bucket. S3KmsKeyId *string `type:"string"` - // The Amazon S3 output path. + // When NotebookOutputOption is Allowed, the Amazon S3 bucket used to save the + // notebook cell output. S3OutputPath *string `type:"string"` } @@ -40287,6 +43005,9 @@ type SourceAlgorithm struct { // The Amazon S3 path where the model artifacts, which result from model training, // are stored. This path must point to a single gzip compressed tar archive // (.tar.gz suffix). + // + // The model artifacts must be in an S3 bucket that is in the same region as + // the algorithm. ModelDataUrl *string `type:"string"` } @@ -40381,15 +43102,16 @@ func (s *SourceAlgorithmSpecification) SetSourceAlgorithms(v []*SourceAlgorithm) } // A list of IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)). -// Used to create an allow list of IP addresses for a private workforce. For -// more information, see . +// Used to create an allow list of IP addresses for a private workforce. Workers +// will only be able to login to their worker portal from an IP address within +// this range. By default, a workforce isn't restricted to specific IP addresses. type SourceIpConfig struct { _ struct{} `type:"structure"` - // A list of one to four Classless Inter-Domain Routing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) + // A list of one to ten Classless Inter-Domain Routing (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) // (CIDR) values. // - // Maximum: Four CIDR values + // Maximum: Ten CIDR values // // The following Length Constraints apply to individual CIDR values in the CIDR // value list. @@ -41103,6 +43825,7 @@ func (s *StoppingCondition) SetMaxWaitTimeInSeconds(v int64) *StoppingCondition type SubscribedWorkteam struct { _ struct{} `type:"structure"` + // Marketplace product listing ID. ListingId *string `type:"string"` // The description of the vendor from the Amazon Marketplace. @@ -41257,11 +43980,99 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// Contains information about a target platform that you want your model to +// run on, such as OS, architecture, and accelerators. It is an alternative +// of TargetDevice. +type TargetPlatform struct { + _ struct{} `type:"structure"` + + // Specifies a target platform accelerator (optional). + // + // * NVIDIA: Nvidia graphics processing unit. It also requires gpu-code, + // trt-ver, cuda-ver compiler options + // + // * MALI: ARM Mali graphics processor + // + // * INTEL_GRAPHICS: Integrated Intel graphics + Accelerator *string `type:"string" enum:"TargetPlatformAccelerator"` + + // Specifies a target platform architecture. + // + // * X86_64: 64-bit version of the x86 instruction set. + // + // * X86: 32-bit version of the x86 instruction set. + // + // * ARM64: ARMv8 64-bit CPU. + // + // * ARM_EABIHF: ARMv7 32-bit, Hard Float. + // + // * ARM_EABI: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM platform. + // + // Arch is a required field + Arch *string `type:"string" required:"true" enum:"TargetPlatformArch"` + + // Specifies a target platform OS. + // + // * LINUX: Linux-based operating systems. + // + // * ANDROID: Android operating systems. Android API level can be specified + // using the ANDROID_PLATFORM compiler option. For example, "CompilerOptions": + // {'ANDROID_PLATFORM': 28} + // + // Os is a required field + Os *string `type:"string" required:"true" enum:"TargetPlatformOs"` +} + +// String returns the string representation +func (s TargetPlatform) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetPlatform) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetPlatform) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetPlatform"} + if s.Arch == nil { + invalidParams.Add(request.NewErrParamRequired("Arch")) + } + if s.Os == nil { + invalidParams.Add(request.NewErrParamRequired("Os")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerator sets the Accelerator field's value. +func (s *TargetPlatform) SetAccelerator(v string) *TargetPlatform { + s.Accelerator = &v + return s +} + +// SetArch sets the Arch field's value. +func (s *TargetPlatform) SetArch(v string) *TargetPlatform { + s.Arch = &v + return s +} + +// SetOs sets the Os field's value. +func (s *TargetPlatform) SetOs(v string) *TargetPlatform { + s.Os = &v + return s +} + // The TensorBoard app settings. type TensorBoardAppSettings struct { _ struct{} `type:"structure"` - // The instance type and quantity. + // The default instance type and the Amazon Resource Name (ARN) of the SageMaker + // image created on the instance. DefaultResourceSpec *ResourceSpec `type:"structure"` } @@ -41368,7 +44179,7 @@ type TrainingJob struct { // When true, enables managed spot training using Amazon EC2 Spot instances // to run training jobs instead of on-demand instances. For more information, - // see model-managed-spot-training. + // see Managed Spot Training (https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html). EnableManagedSpotTraining *bool `type:"boolean"` // If the TrainingJob was created with network isolation, the value is set to @@ -41376,7 +44187,14 @@ type TrainingJob struct { // VPC they run in. EnableNetworkIsolation *bool `type:"boolean"` - // Configuration for the experiment. + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob ExperimentConfig *ExperimentConfig `type:"structure"` // If the training job failed, the reason it failed. @@ -42286,6 +45104,11 @@ type TransformInput struct { // payloads contain the entire contents of an input object. Set the value of // this parameter to Line to split records on a newline character boundary. // SplitType also supports a number of record-oriented binary data formats. + // Currently, the supported record formats are: + // + // * RecordIO + // + // * TFRecord // // When splitting is enabled, the size of a mini-batch depends on the values // of the BatchStrategy and MaxPayloadInMB parameters. When the value of BatchStrategy @@ -42357,6 +45180,261 @@ func (s *TransformInput) SetSplitType(v string) *TransformInput { return s } +// A batch transform job. For information about SageMaker batch transform, see +// Use Batch Transform (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html). +type TransformJob struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AutoML job that created the transform + // job. + AutoMLJobArn *string `min:"1" type:"string"` + + // Specifies the number of records to include in a mini-batch for an HTTP inference + // request. A record is a single unit of input data that inference can be made + // on. For example, a single line in a CSV file is a record. + BatchStrategy *string `type:"string" enum:"BatchStrategy"` + + // A timestamp that shows when the transform Job was created. + CreationTime *time.Time `type:"timestamp"` + + // The data structure used to specify the data to be used for inference in a + // batch transform job and to associate the data that is relevant to the prediction + // results in the output. The input filter provided allows you to exclude input + // data that is not needed for inference in a batch transform job. The output + // filter provided allows you to include input data relevant to interpreting + // the predictions in the output from the job. For more information, see Associate + // Prediction Results with their Corresponding Input Records (https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform-data-processing.html). + DataProcessing *DataProcessing `type:"structure"` + + // The environment variables to set in the Docker container. We support up to + // 16 key and values entries in the map. + Environment map[string]*string `type:"map"` + + // Associates a SageMaker job as a trial component with an experiment and trial. + // Specified when you call the following APIs: + // + // * CreateProcessingJob + // + // * CreateTrainingJob + // + // * CreateTransformJob + ExperimentConfig *ExperimentConfig `type:"structure"` + + // If the transform job failed, the reason it failed. + FailureReason *string `type:"string"` + + // The Amazon Resource Name (ARN) of the labeling job that created the transform + // job. + LabelingJobArn *string `type:"string"` + + // The maximum number of parallel requests that can be sent to each instance + // in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, + // SageMaker checks the optional execution-parameters to determine the settings + // for your chosen algorithm. If the execution-parameters endpoint is not enabled, + // the default value is 1. For built-in algorithms, you don't need to set a + // value for MaxConcurrentTransforms. + MaxConcurrentTransforms *int64 `type:"integer"` + + // The maximum allowed size of the payload, in MB. A payload is the data portion + // of a record (without metadata). The value in MaxPayloadInMB must be greater + // than, or equal to, the size of a single record. To estimate the size of a + // record in MB, divide the size of your dataset by the number of records. To + // ensure that the records fit within the maximum payload size, we recommend + // using a slightly larger value. The default value is 6 MB. For cases where + // the payload might be arbitrarily large and is transmitted using HTTP chunked + // encoding, set the value to 0. This feature works only in supported algorithms. + // Currently, SageMaker built-in algorithms do not support HTTP chunked encoding. + MaxPayloadInMB *int64 `type:"integer"` + + // Configures the timeout and maximum number of retries for processing a transform + // job invocation. + ModelClientConfig *ModelClientConfig `type:"structure"` + + // The name of the model associated with the transform job. + ModelName *string `type:"string"` + + // A list of tags associated with the transform job. + Tags []*Tag `type:"list"` + + // Indicates when the transform job has been completed, or has stopped or failed. + // You are billed for the time interval between this time and the value of TransformStartTime. + TransformEndTime *time.Time `type:"timestamp"` + + // Describes the input source of a transform job and the way the transform job + // consumes it. + TransformInput *TransformInput `type:"structure"` + + // The Amazon Resource Name (ARN) of the transform job. + TransformJobArn *string `type:"string"` + + // The name of the transform job. + TransformJobName *string `min:"1" type:"string"` + + // The status of the transform job. + // + // Transform job statuses are: + // + // * InProgress - The job is in progress. + // + // * Completed - The job has completed. + // + // * Failed - The transform job has failed. To see the reason for the failure, + // see the FailureReason field in the response to a DescribeTransformJob + // call. + // + // * Stopping - The transform job is stopping. + // + // * Stopped - The transform job has stopped. + TransformJobStatus *string `type:"string" enum:"TransformJobStatus"` + + // Describes the results of a transform job. + TransformOutput *TransformOutput `type:"structure"` + + // Describes the resources, including ML instance types and ML instance count, + // to use for transform job. + TransformResources *TransformResources `type:"structure"` + + // Indicates when the transform job starts on ML instances. You are billed for + // the time interval between this time and the value of TransformEndTime. + TransformStartTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s TransformJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransformJob) GoString() string { + return s.String() +} + +// SetAutoMLJobArn sets the AutoMLJobArn field's value. +func (s *TransformJob) SetAutoMLJobArn(v string) *TransformJob { + s.AutoMLJobArn = &v + return s +} + +// SetBatchStrategy sets the BatchStrategy field's value. +func (s *TransformJob) SetBatchStrategy(v string) *TransformJob { + s.BatchStrategy = &v + return s +} + +// SetCreationTime sets the CreationTime field's value. +func (s *TransformJob) SetCreationTime(v time.Time) *TransformJob { + s.CreationTime = &v + return s +} + +// SetDataProcessing sets the DataProcessing field's value. +func (s *TransformJob) SetDataProcessing(v *DataProcessing) *TransformJob { + s.DataProcessing = v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *TransformJob) SetEnvironment(v map[string]*string) *TransformJob { + s.Environment = v + return s +} + +// SetExperimentConfig sets the ExperimentConfig field's value. +func (s *TransformJob) SetExperimentConfig(v *ExperimentConfig) *TransformJob { + s.ExperimentConfig = v + return s +} + +// SetFailureReason sets the FailureReason field's value. +func (s *TransformJob) SetFailureReason(v string) *TransformJob { + s.FailureReason = &v + return s +} + +// SetLabelingJobArn sets the LabelingJobArn field's value. +func (s *TransformJob) SetLabelingJobArn(v string) *TransformJob { + s.LabelingJobArn = &v + return s +} + +// SetMaxConcurrentTransforms sets the MaxConcurrentTransforms field's value. +func (s *TransformJob) SetMaxConcurrentTransforms(v int64) *TransformJob { + s.MaxConcurrentTransforms = &v + return s +} + +// SetMaxPayloadInMB sets the MaxPayloadInMB field's value. +func (s *TransformJob) SetMaxPayloadInMB(v int64) *TransformJob { + s.MaxPayloadInMB = &v + return s +} + +// SetModelClientConfig sets the ModelClientConfig field's value. +func (s *TransformJob) SetModelClientConfig(v *ModelClientConfig) *TransformJob { + s.ModelClientConfig = v + return s +} + +// SetModelName sets the ModelName field's value. +func (s *TransformJob) SetModelName(v string) *TransformJob { + s.ModelName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TransformJob) SetTags(v []*Tag) *TransformJob { + s.Tags = v + return s +} + +// SetTransformEndTime sets the TransformEndTime field's value. +func (s *TransformJob) SetTransformEndTime(v time.Time) *TransformJob { + s.TransformEndTime = &v + return s +} + +// SetTransformInput sets the TransformInput field's value. +func (s *TransformJob) SetTransformInput(v *TransformInput) *TransformJob { + s.TransformInput = v + return s +} + +// SetTransformJobArn sets the TransformJobArn field's value. +func (s *TransformJob) SetTransformJobArn(v string) *TransformJob { + s.TransformJobArn = &v + return s +} + +// SetTransformJobName sets the TransformJobName field's value. +func (s *TransformJob) SetTransformJobName(v string) *TransformJob { + s.TransformJobName = &v + return s +} + +// SetTransformJobStatus sets the TransformJobStatus field's value. +func (s *TransformJob) SetTransformJobStatus(v string) *TransformJob { + s.TransformJobStatus = &v + return s +} + +// SetTransformOutput sets the TransformOutput field's value. +func (s *TransformJob) SetTransformOutput(v *TransformOutput) *TransformJob { + s.TransformOutput = v + return s +} + +// SetTransformResources sets the TransformResources field's value. +func (s *TransformJob) SetTransformResources(v *TransformResources) *TransformJob { + s.TransformResources = v + return s +} + +// SetTransformStartTime sets the TransformStartTime field's value. +func (s *TransformJob) SetTransformStartTime(v time.Time) *TransformJob { + s.TransformStartTime = &v + return s +} + // Defines the input needed to run a transform job using the inference specification // specified in the algorithm. type TransformJobDefinition struct { @@ -42785,7 +45863,7 @@ type TransformS3DataSource struct { // manifest is an S3 object which is a JSON file with the following format: // [ {"prefix": "s3://customer_bucket/some/prefix/"}, "relative/path/to/custdata-1", // "relative/path/custdata-2", ... "relative/path/custdata-N" ] The preceding - // JSON matches the following s3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 + // JSON matches the following S3Uris: s3://customer_bucket/some/prefix/relative/path/to/custdata-1 // s3://customer_bucket/some/prefix/relative/path/custdata-2 ... s3://customer_bucket/some/prefix/relative/path/custdata-N // The complete set of S3Uris in this manifest constitutes the input data // for the channel for this datasource. The object that each S3Uris points @@ -42834,7 +45912,7 @@ func (s *TransformS3DataSource) SetS3Uri(v string) *TransformS3DataSource { return s } -// A summary of the properties of a trial as returned by the Search API. +// The properties of a trial as returned by the Search API. type Trial struct { _ struct{} `type:"structure"` @@ -42953,8 +46031,7 @@ func (s *Trial) SetTrialName(v string) *Trial { return s } -// A summary of the properties of a trial component as returned by the Search -// API. +// The properties of a trial component as returned by the Search API. type TrialComponent struct { _ struct{} `type:"structure"` @@ -42996,10 +46073,10 @@ type TrialComponent struct { // not have any parents. Parents []*Parent `type:"list"` - // The source of the trial component. + // The Amazon Resource Name (ARN) and job type of the source of the component. Source *TrialComponentSource `type:"structure"` - // The source of the trial component.> + // Details of the source of the component. SourceDetail *TrialComponentSourceDetail `type:"structure"` // When the component started. @@ -43345,7 +46422,7 @@ type TrialComponentSimpleSummary struct { // The name of the trial component. TrialComponentName *string `min:"1" type:"string"` - // The source of the trial component. + // The Amazon Resource Name (ARN) and job type of the source of a trial component. TrialComponentSource *TrialComponentSource `type:"structure"` } @@ -43389,11 +46466,11 @@ func (s *TrialComponentSimpleSummary) SetTrialComponentSource(v *TrialComponentS return s } -// The source of the trial component. +// The Amazon Resource Name (ARN) and job type of the source of a trial component. type TrialComponentSource struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the source. + // The source ARN. // // SourceArn is a required field SourceArn *string `type:"string" required:"true"` @@ -43424,15 +46501,22 @@ func (s *TrialComponentSource) SetSourceType(v string) *TrialComponentSource { return s } -// Detailed information about the source of a trial component. +// Detailed information about the source of a trial component. Either ProcessingJob +// or TrainingJob is returned. type TrialComponentSourceDetail struct { _ struct{} `type:"structure"` + // Information about a processing job that's the source of a trial component. + ProcessingJob *ProcessingJob `type:"structure"` + // The Amazon Resource Name (ARN) of the source. SourceArn *string `type:"string"` - // Contains information about a training job. + // Information about a training job that's the source of a trial component. TrainingJob *TrainingJob `type:"structure"` + + // Information about a transform job that's the source of a trial component. + TransformJob *TransformJob `type:"structure"` } // String returns the string representation @@ -43445,6 +46529,12 @@ func (s TrialComponentSourceDetail) GoString() string { return s.String() } +// SetProcessingJob sets the ProcessingJob field's value. +func (s *TrialComponentSourceDetail) SetProcessingJob(v *ProcessingJob) *TrialComponentSourceDetail { + s.ProcessingJob = v + return s +} + // SetSourceArn sets the SourceArn field's value. func (s *TrialComponentSourceDetail) SetSourceArn(v string) *TrialComponentSourceDetail { s.SourceArn = &v @@ -43457,6 +46547,12 @@ func (s *TrialComponentSourceDetail) SetTrainingJob(v *TrainingJob) *TrialCompon return s } +// SetTransformJob sets the TransformJob field's value. +func (s *TrialComponentSourceDetail) SetTransformJob(v *TransformJob) *TrialComponentSourceDetail { + s.TransformJob = v + return s +} + // The status of the trial component. type TrialComponentStatus struct { _ struct{} `type:"structure"` @@ -43532,7 +46628,7 @@ type TrialComponentSummary struct { // The name of the trial component. TrialComponentName *string `min:"1" type:"string"` - // The source of the trial component. + // The Amazon Resource Name (ARN) and job type of the source of a trial component. TrialComponentSource *TrialComponentSource `type:"structure"` } @@ -43722,7 +46818,7 @@ func (s *TrialSummary) SetTrialSource(v *TrialSource) *TrialSummary { type TuningJobCompletionCriteria struct { _ struct{} `type:"structure"` - // The objective metric's value. + // The value of the objective metric. // // TargetObjectiveMetricValue is a required field TargetObjectiveMetricValue *float64 `type:"float" required:"true"` @@ -43803,12 +46899,50 @@ func (s *USD) SetTenthFractionsOfACent(v int64) *USD { type UiConfig struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket location of the UI template. For more information about - // the contents of a UI template, see Creating Your Custom Labeling Task Template - // (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html). + // The ARN of the worker task template used to render the worker UI and tools + // for labeling job tasks. + // + // Use this parameter when you are creating a labeling job for 3D point cloud + // and video fram labeling jobs. Use your labeling job task type to select one + // of the following ARN's and use it with this parameter when you create a labeling + // job. Replace aws-region with the AWS region you are creating your labeling + // job in. + // + // 3D Point Cloud HumanTaskUiArns + // + // Use this HumanTaskUiArn for 3D point cloud object detection and 3D point + // cloud object detection adjustment labeling jobs. + // + // * arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection + // + // Use this HumanTaskUiArn for 3D point cloud object tracking and 3D point cloud + // object tracking adjustment labeling jobs. + // + // * arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking + // + // Use this HumanTaskUiArn for 3D point cloud semantic segmentation and 3D point + // cloud semantic segmentation adjustment labeling jobs. // - // UiTemplateS3Uri is a required field - UiTemplateS3Uri *string `type:"string" required:"true"` + // * arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation + // + // Video Frame HumanTaskUiArns + // + // Use this HumanTaskUiArn for video frame object detection and video frame + // object detection adjustment labeling jobs. + // + // * arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection + // + // Use this HumanTaskUiArn for video frame object tracking and video frame object + // tracking adjustment labeling jobs. + // + // * arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking + HumanTaskUiArn *string `type:"string"` + + // The Amazon S3 bucket location of the UI template, or worker task template. + // This is the template used to render the worker UI and tools for labeling + // job tasks. For more information about the contents of a UI template, see + // Creating Your Custom Labeling Task Template (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-custom-templates-step2.html). + UiTemplateS3Uri *string `type:"string"` } // String returns the string representation @@ -43821,17 +46955,10 @@ func (s UiConfig) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *UiConfig) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "UiConfig"} - if s.UiTemplateS3Uri == nil { - invalidParams.Add(request.NewErrParamRequired("UiTemplateS3Uri")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetHumanTaskUiArn sets the HumanTaskUiArn field's value. +func (s *UiConfig) SetHumanTaskUiArn(v string) *UiConfig { + s.HumanTaskUiArn = &v + return s } // SetUiTemplateS3Uri sets the UiTemplateS3Uri field's value. @@ -43886,7 +47013,7 @@ func (s *UiTemplate) SetContent(v string) *UiTemplate { type UiTemplateInfo struct { _ struct{} `type:"structure"` - // The SHA 256 hash that you used to create the request signature. + // The SHA-256 digest of the contents of the template. ContentSha256 *string `min:"1" type:"string"` // The URL for the user interface template. @@ -44006,7 +47133,7 @@ type UpdateDomainInput struct { // A collection of settings. DefaultUserSettings *UserSettings `type:"structure"` - // The domain ID. + // The ID of the domain to be updated. // // DomainId is a required field DomainId *string `type:"string" required:"true"` @@ -44055,7 +47182,7 @@ func (s *UpdateDomainInput) SetDomainId(v string) *UpdateDomainInput { type UpdateDomainOutput struct { _ struct{} `type:"structure"` - // The domain Amazon Resource Name (ARN). + // The Amazon Resource Name (ARN) of the domain. DomainArn *string `type:"string"` } @@ -44088,12 +47215,11 @@ type UpdateEndpointInput struct { // EndpointName is a required field EndpointName *string `type:"string" required:"true"` - // When you are updating endpoint resources with RetainAllVariantProperties - // (https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpoint.html#SageMaker-UpdateEndpoint-request-RetainAllVariantProperties), + // When you are updating endpoint resources with UpdateEndpointInput$RetainAllVariantProperties, // whose value is set to true, ExcludeRetainedVariantProperties specifies the - // list of type VariantProperty (https://docs.aws.amazon.com/sagemaker/latest/dg/API_VariantProperty.html) - // to override with the values provided by EndpointConfig. If you don't specify - // a value for ExcludeAllVariantProperties, no variant properties are overridden. + // list of type VariantProperty to override with the values provided by EndpointConfig. + // If you don't specify a value for ExcludeAllVariantProperties, no variant + // properties are overridden. ExcludeRetainedVariantProperties []*VariantProperty `type:"list"` // When updating endpoint resources, enables or disables the retention of variant @@ -45105,15 +48231,18 @@ func (s *UpdateUserProfileOutput) SetUserProfileArn(v string) *UpdateUserProfile type UpdateWorkforceInput struct { _ struct{} `type:"structure"` - // A list of one to four worker IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) + // Use this parameter to update your OIDC Identity Provider (IdP) configuration + // for a workforce made using your own IdP. + OidcConfig *OidcConfig `type:"structure"` + + // A list of one to ten worker IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) // that can be used to access tasks assigned to this workforce. // - // Maximum: Four CIDR values + // Maximum: Ten CIDR values SourceIpConfig *SourceIpConfig `type:"structure"` - // The name of the private workforce whose access you want to restrict. WorkforceName - // is automatically set to default when a workforce is created and cannot be - // modified. + // The name of the private workforce that you want to update. You can find your + // workforce name by using the operation. // // WorkforceName is a required field WorkforceName *string `min:"1" type:"string" required:"true"` @@ -45138,6 +48267,11 @@ func (s *UpdateWorkforceInput) Validate() error { if s.WorkforceName != nil && len(*s.WorkforceName) < 1 { invalidParams.Add(request.NewErrParamMinLen("WorkforceName", 1)) } + if s.OidcConfig != nil { + if err := s.OidcConfig.Validate(); err != nil { + invalidParams.AddNested("OidcConfig", err.(request.ErrInvalidParams)) + } + } if s.SourceIpConfig != nil { if err := s.SourceIpConfig.Validate(); err != nil { invalidParams.AddNested("SourceIpConfig", err.(request.ErrInvalidParams)) @@ -45150,6 +48284,12 @@ func (s *UpdateWorkforceInput) Validate() error { return nil } +// SetOidcConfig sets the OidcConfig field's value. +func (s *UpdateWorkforceInput) SetOidcConfig(v *OidcConfig) *UpdateWorkforceInput { + s.OidcConfig = v + return s +} + // SetSourceIpConfig sets the SourceIpConfig field's value. func (s *UpdateWorkforceInput) SetSourceIpConfig(v *SourceIpConfig) *UpdateWorkforceInput { s.SourceIpConfig = v @@ -45165,8 +48305,7 @@ func (s *UpdateWorkforceInput) SetWorkforceName(v string) *UpdateWorkforceInput type UpdateWorkforceOutput struct { _ struct{} `type:"structure"` - // A single private workforce, which is automatically created when you create - // your first private work team. You can create one private work force in each + // A single private workforce. You can create one private work force in each // AWS Region. By default, any workforce-related API operation used in a specific // region will apply to the workforce created in that region. To learn how to // create a private workforce, see Create a Private Workforce (https://docs.aws.amazon.com/sagemaker/latest/dg/sms-workforce-create-private.html). @@ -45197,7 +48336,27 @@ type UpdateWorkteamInput struct { // An updated description for the work team. Description *string `min:"1" type:"string"` - // A list of MemberDefinition objects that contain the updated work team members. + // A list of MemberDefinition objects that contains objects that identify the + // workers that make up the work team. + // + // Workforces can be created using Amazon Cognito or your own OIDC Identity + // Provider (IdP). For private workforces created using Amazon Cognito use CognitoMemberDefinition. + // For workforces created using your own OIDC identity provider (IdP) use OidcMemberDefinition. + // You should not provide input for both of these parameters in a single request. + // + // For workforces created using Amazon Cognito, private work teams correspond + // to Amazon Cognito user groups within the user pool used to create a workforce. + // All of the CognitoMemberDefinition objects that make up the member definition + // must have the same ClientId and UserPool values. To add a Amazon Cognito + // user group to an existing worker pool, see Adding groups to a User Pool. + // For more information about user pools, see Amazon Cognito User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + // + // For workforces created using your own OIDC IdP, specify the user groups that + // you want to include in your private work team in OidcMemberDefinition by + // listing those groups in Groups. Be aware that user groups that are already + // in the work team must also be listed in Groups when you make this request + // to remain on the work team. If you do not include these user groups, they + // will no longer be associated with the work team you update. MemberDefinitions []*MemberDefinition `min:"1" type:"list"` // Configures SNS topic notifications for available or expiring work items @@ -45487,9 +48646,8 @@ func (s *UserSettings) SetTensorBoardAppSettings(v *TensorBoardAppSettings) *Use // Specifies a production variant property type for an Endpoint. // -// If you are updating an endpoint with the RetainAllVariantProperties (https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpoint.html#SageMaker-UpdateEndpoint-request-RetainAllVariantProperties) -// option set to true, the VariantProperty objects listed in ExcludeRetainedVariantProperties -// (https://docs.aws.amazon.com/sagemaker/latest/dg/API_UpdateEndpoint.html#SageMaker-UpdateEndpoint-request-ExcludeRetainedVariantProperties) +// If you are updating an endpoint with the UpdateEndpointInput$RetainAllVariantProperties +// option set to true, the VariantProperty objects listed in UpdateEndpointInput$ExcludeRetainedVariantProperties // override the existing variant properties of the endpoint. type VariantProperty struct { _ struct{} `type:"structure"` @@ -45497,12 +48655,10 @@ type VariantProperty struct { // The type of variant property. The supported values are: // // * DesiredInstanceCount: Overrides the existing variant instance counts - // using the InitialInstanceCount (https://docs.aws.amazon.com/sagemaker/latest/dg/API_ProductionVariant.html#SageMaker-Type-ProductionVariant-InitialInstanceCount) - // values in the ProductionVariants (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html#SageMaker-CreateEndpointConfig-request-ProductionVariants). + // using the ProductionVariant$InitialInstanceCount values in the CreateEndpointConfigInput$ProductionVariants. // - // * DesiredWeight: Overrides the existing variant weights using the InitialVariantWeight - // (https://docs.aws.amazon.com/sagemaker/latest/dg/API_ProductionVariant.html#SageMaker-Type-ProductionVariant-InitialVariantWeight) - // values in the ProductionVariants (https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateEndpointConfig.html#SageMaker-CreateEndpointConfig-request-ProductionVariants). + // * DesiredWeight: Overrides the existing variant weights using the ProductionVariant$InitialVariantWeight + // values in the CreateEndpointConfigInput$ProductionVariants. // // * DataCaptureConfig: (Not currently supported.) // @@ -45613,23 +48769,35 @@ func (s *VpcConfig) SetSubnets(v []*string) *VpcConfig { type Workforce struct { _ struct{} `type:"structure"` + // The configuration of an Amazon Cognito workforce. A single Cognito workforce + // is created using and corresponds to a single Amazon Cognito user pool (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools.html). + CognitoConfig *CognitoConfig `type:"structure"` + + // The date that the workforce is created. + CreateDate *time.Time `type:"timestamp"` + // The most recent date that was used to successfully add one or more IP address // ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) // to a private workforce's allow list. LastUpdatedDate *time.Time `type:"timestamp"` - // A list of one to four IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) - // to be added to the workforce allow list. + // The configuration of an OIDC Identity Provider (IdP) private workforce. + OidcConfig *OidcConfigForResponse `type:"structure"` + + // A list of one to ten IP address ranges (CIDRs (https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html)) + // to be added to the workforce allow list. By default, a workforce isn't restricted + // to specific IP addresses. SourceIpConfig *SourceIpConfig `type:"structure"` + // The subdomain for your OIDC Identity Provider. + SubDomain *string `type:"string"` + // The Amazon Resource Name (ARN) of the private workforce. // // WorkforceArn is a required field WorkforceArn *string `type:"string" required:"true"` - // The name of the private workforce whose access you want to restrict. WorkforceName - // is automatically set to default when a workforce is created and cannot be - // modified. + // The name of the private workforce. // // WorkforceName is a required field WorkforceName *string `min:"1" type:"string" required:"true"` @@ -45645,18 +48813,42 @@ func (s Workforce) GoString() string { return s.String() } +// SetCognitoConfig sets the CognitoConfig field's value. +func (s *Workforce) SetCognitoConfig(v *CognitoConfig) *Workforce { + s.CognitoConfig = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *Workforce) SetCreateDate(v time.Time) *Workforce { + s.CreateDate = &v + return s +} + // SetLastUpdatedDate sets the LastUpdatedDate field's value. func (s *Workforce) SetLastUpdatedDate(v time.Time) *Workforce { s.LastUpdatedDate = &v return s } +// SetOidcConfig sets the OidcConfig field's value. +func (s *Workforce) SetOidcConfig(v *OidcConfigForResponse) *Workforce { + s.OidcConfig = v + return s +} + // SetSourceIpConfig sets the SourceIpConfig field's value. func (s *Workforce) SetSourceIpConfig(v *SourceIpConfig) *Workforce { s.SourceIpConfig = v return s } +// SetSubDomain sets the SubDomain field's value. +func (s *Workforce) SetSubDomain(v string) *Workforce { + s.SubDomain = &v + return s +} + // SetWorkforceArn sets the WorkforceArn field's value. func (s *Workforce) SetWorkforceArn(v string) *Workforce { s.WorkforceArn = &v @@ -45684,7 +48876,12 @@ type Workteam struct { // The date and time that the work team was last updated (timestamp). LastUpdatedDate *time.Time `type:"timestamp"` - // The Amazon Cognito user groups that make up the work team. + // A list of MemberDefinition objects that contains objects that identify the + // workers that make up the work team. + // + // Workforces can be created using Amazon Cognito or your own OIDC Identity + // Provider (IdP). For private workforces created using Amazon Cognito use CognitoMemberDefinition. + // For workforces created using your own OIDC identity provider (IdP) use OidcMemberDefinition. // // MemberDefinitions is a required field MemberDefinitions []*MemberDefinition `min:"1" type:"list" required:"true"` @@ -45700,6 +48897,9 @@ type Workteam struct { // labeling your data objects. SubDomain *string `type:"string"` + // The Amazon Resource Name (ARN) of the workforce. + WorkforceArn *string `type:"string"` + // The Amazon Resource Name (ARN) that identifies the work team. // // WorkteamArn is a required field @@ -45763,6 +48963,12 @@ func (s *Workteam) SetSubDomain(v string) *Workteam { return s } +// SetWorkforceArn sets the WorkforceArn field's value. +func (s *Workteam) SetWorkforceArn(v string) *Workteam { + s.WorkforceArn = &v + return s +} + // SetWorkteamArn sets the WorkteamArn field's value. func (s *Workteam) SetWorkteamArn(v string) *Workteam { s.WorkteamArn = &v @@ -45783,6 +48989,14 @@ const ( AlgorithmSortByCreationTime = "CreationTime" ) +// AlgorithmSortBy_Values returns all elements of the AlgorithmSortBy enum +func AlgorithmSortBy_Values() []string { + return []string{ + AlgorithmSortByName, + AlgorithmSortByCreationTime, + } +} + const ( // AlgorithmStatusPending is a AlgorithmStatus enum value AlgorithmStatusPending = "Pending" @@ -45800,6 +49014,17 @@ const ( AlgorithmStatusDeleting = "Deleting" ) +// AlgorithmStatus_Values returns all elements of the AlgorithmStatus enum +func AlgorithmStatus_Values() []string { + return []string{ + AlgorithmStatusPending, + AlgorithmStatusInProgress, + AlgorithmStatusCompleted, + AlgorithmStatusFailed, + AlgorithmStatusDeleting, + } +} + const ( // AppInstanceTypeSystem is a AppInstanceType enum value AppInstanceTypeSystem = "system" @@ -45898,11 +49123,72 @@ const ( AppInstanceTypeMlG4dn16xlarge = "ml.g4dn.16xlarge" ) +// AppInstanceType_Values returns all elements of the AppInstanceType enum +func AppInstanceType_Values() []string { + return []string{ + AppInstanceTypeSystem, + AppInstanceTypeMlT3Micro, + AppInstanceTypeMlT3Small, + AppInstanceTypeMlT3Medium, + AppInstanceTypeMlT3Large, + AppInstanceTypeMlT3Xlarge, + AppInstanceTypeMlT32xlarge, + AppInstanceTypeMlM5Large, + AppInstanceTypeMlM5Xlarge, + AppInstanceTypeMlM52xlarge, + AppInstanceTypeMlM54xlarge, + AppInstanceTypeMlM58xlarge, + AppInstanceTypeMlM512xlarge, + AppInstanceTypeMlM516xlarge, + AppInstanceTypeMlM524xlarge, + AppInstanceTypeMlC5Large, + AppInstanceTypeMlC5Xlarge, + AppInstanceTypeMlC52xlarge, + AppInstanceTypeMlC54xlarge, + AppInstanceTypeMlC59xlarge, + AppInstanceTypeMlC512xlarge, + AppInstanceTypeMlC518xlarge, + AppInstanceTypeMlC524xlarge, + AppInstanceTypeMlP32xlarge, + AppInstanceTypeMlP38xlarge, + AppInstanceTypeMlP316xlarge, + AppInstanceTypeMlG4dnXlarge, + AppInstanceTypeMlG4dn2xlarge, + AppInstanceTypeMlG4dn4xlarge, + AppInstanceTypeMlG4dn8xlarge, + AppInstanceTypeMlG4dn12xlarge, + AppInstanceTypeMlG4dn16xlarge, + } +} + +const ( + // AppNetworkAccessTypePublicInternetOnly is a AppNetworkAccessType enum value + AppNetworkAccessTypePublicInternetOnly = "PublicInternetOnly" + + // AppNetworkAccessTypeVpcOnly is a AppNetworkAccessType enum value + AppNetworkAccessTypeVpcOnly = "VpcOnly" +) + +// AppNetworkAccessType_Values returns all elements of the AppNetworkAccessType enum +func AppNetworkAccessType_Values() []string { + return []string{ + AppNetworkAccessTypePublicInternetOnly, + AppNetworkAccessTypeVpcOnly, + } +} + const ( // AppSortKeyCreationTime is a AppSortKey enum value AppSortKeyCreationTime = "CreationTime" ) +// AppSortKey_Values returns all elements of the AppSortKey enum +func AppSortKey_Values() []string { + return []string{ + AppSortKeyCreationTime, + } +} + const ( // AppStatusDeleted is a AppStatus enum value AppStatusDeleted = "Deleted" @@ -45920,6 +49206,17 @@ const ( AppStatusPending = "Pending" ) +// AppStatus_Values returns all elements of the AppStatus enum +func AppStatus_Values() []string { + return []string{ + AppStatusDeleted, + AppStatusDeleting, + AppStatusFailed, + AppStatusInService, + AppStatusPending, + } +} + const ( // AppTypeJupyterServer is a AppType enum value AppTypeJupyterServer = "JupyterServer" @@ -45931,6 +49228,15 @@ const ( AppTypeTensorBoard = "TensorBoard" ) +// AppType_Values returns all elements of the AppType enum +func AppType_Values() []string { + return []string{ + AppTypeJupyterServer, + AppTypeKernelGateway, + AppTypeTensorBoard, + } +} + const ( // AssemblyTypeNone is a AssemblyType enum value AssemblyTypeNone = "None" @@ -45939,6 +49245,14 @@ const ( AssemblyTypeLine = "Line" ) +// AssemblyType_Values returns all elements of the AssemblyType enum +func AssemblyType_Values() []string { + return []string{ + AssemblyTypeNone, + AssemblyTypeLine, + } +} + const ( // AuthModeSso is a AuthMode enum value AuthModeSso = "SSO" @@ -45947,6 +49261,14 @@ const ( AuthModeIam = "IAM" ) +// AuthMode_Values returns all elements of the AuthMode enum +func AuthMode_Values() []string { + return []string{ + AuthModeSso, + AuthModeIam, + } +} + const ( // AutoMLJobObjectiveTypeMaximize is a AutoMLJobObjectiveType enum value AutoMLJobObjectiveTypeMaximize = "Maximize" @@ -45955,6 +49277,14 @@ const ( AutoMLJobObjectiveTypeMinimize = "Minimize" ) +// AutoMLJobObjectiveType_Values returns all elements of the AutoMLJobObjectiveType enum +func AutoMLJobObjectiveType_Values() []string { + return []string{ + AutoMLJobObjectiveTypeMaximize, + AutoMLJobObjectiveTypeMinimize, + } +} + const ( // AutoMLJobSecondaryStatusStarting is a AutoMLJobSecondaryStatus enum value AutoMLJobSecondaryStatusStarting = "Starting" @@ -45987,6 +49317,22 @@ const ( AutoMLJobSecondaryStatusCandidateDefinitionsGenerated = "CandidateDefinitionsGenerated" ) +// AutoMLJobSecondaryStatus_Values returns all elements of the AutoMLJobSecondaryStatus enum +func AutoMLJobSecondaryStatus_Values() []string { + return []string{ + AutoMLJobSecondaryStatusStarting, + AutoMLJobSecondaryStatusAnalyzingData, + AutoMLJobSecondaryStatusFeatureEngineering, + AutoMLJobSecondaryStatusModelTuning, + AutoMLJobSecondaryStatusMaxCandidatesReached, + AutoMLJobSecondaryStatusFailed, + AutoMLJobSecondaryStatusStopped, + AutoMLJobSecondaryStatusMaxAutoMljobRuntimeReached, + AutoMLJobSecondaryStatusStopping, + AutoMLJobSecondaryStatusCandidateDefinitionsGenerated, + } +} + const ( // AutoMLJobStatusCompleted is a AutoMLJobStatus enum value AutoMLJobStatusCompleted = "Completed" @@ -46004,6 +49350,17 @@ const ( AutoMLJobStatusStopping = "Stopping" ) +// AutoMLJobStatus_Values returns all elements of the AutoMLJobStatus enum +func AutoMLJobStatus_Values() []string { + return []string{ + AutoMLJobStatusCompleted, + AutoMLJobStatusInProgress, + AutoMLJobStatusFailed, + AutoMLJobStatusStopped, + AutoMLJobStatusStopping, + } +} + const ( // AutoMLMetricEnumAccuracy is a AutoMLMetricEnum enum value AutoMLMetricEnumAccuracy = "Accuracy" @@ -46016,8 +49373,22 @@ const ( // AutoMLMetricEnumF1macro is a AutoMLMetricEnum enum value AutoMLMetricEnumF1macro = "F1macro" + + // AutoMLMetricEnumAuc is a AutoMLMetricEnum enum value + AutoMLMetricEnumAuc = "AUC" ) +// AutoMLMetricEnum_Values returns all elements of the AutoMLMetricEnum enum +func AutoMLMetricEnum_Values() []string { + return []string{ + AutoMLMetricEnumAccuracy, + AutoMLMetricEnumMse, + AutoMLMetricEnumF1, + AutoMLMetricEnumF1macro, + AutoMLMetricEnumAuc, + } +} + const ( // AutoMLS3DataTypeManifestFile is a AutoMLS3DataType enum value AutoMLS3DataTypeManifestFile = "ManifestFile" @@ -46026,6 +49397,14 @@ const ( AutoMLS3DataTypeS3prefix = "S3Prefix" ) +// AutoMLS3DataType_Values returns all elements of the AutoMLS3DataType enum +func AutoMLS3DataType_Values() []string { + return []string{ + AutoMLS3DataTypeManifestFile, + AutoMLS3DataTypeS3prefix, + } +} + const ( // AutoMLSortByName is a AutoMLSortBy enum value AutoMLSortByName = "Name" @@ -46037,6 +49416,15 @@ const ( AutoMLSortByStatus = "Status" ) +// AutoMLSortBy_Values returns all elements of the AutoMLSortBy enum +func AutoMLSortBy_Values() []string { + return []string{ + AutoMLSortByName, + AutoMLSortByCreationTime, + AutoMLSortByStatus, + } +} + const ( // AutoMLSortOrderAscending is a AutoMLSortOrder enum value AutoMLSortOrderAscending = "Ascending" @@ -46045,6 +49433,14 @@ const ( AutoMLSortOrderDescending = "Descending" ) +// AutoMLSortOrder_Values returns all elements of the AutoMLSortOrder enum +func AutoMLSortOrder_Values() []string { + return []string{ + AutoMLSortOrderAscending, + AutoMLSortOrderDescending, + } +} + const ( // AwsManagedHumanLoopRequestSourceAwsRekognitionDetectModerationLabelsImageV3 is a AwsManagedHumanLoopRequestSource enum value AwsManagedHumanLoopRequestSourceAwsRekognitionDetectModerationLabelsImageV3 = "AWS/Rekognition/DetectModerationLabels/Image/V3" @@ -46053,6 +49449,14 @@ const ( AwsManagedHumanLoopRequestSourceAwsTextractAnalyzeDocumentFormsV1 = "AWS/Textract/AnalyzeDocument/Forms/V1" ) +// AwsManagedHumanLoopRequestSource_Values returns all elements of the AwsManagedHumanLoopRequestSource enum +func AwsManagedHumanLoopRequestSource_Values() []string { + return []string{ + AwsManagedHumanLoopRequestSourceAwsRekognitionDetectModerationLabelsImageV3, + AwsManagedHumanLoopRequestSourceAwsTextractAnalyzeDocumentFormsV1, + } +} + const ( // BatchStrategyMultiRecord is a BatchStrategy enum value BatchStrategyMultiRecord = "MultiRecord" @@ -46061,6 +49465,14 @@ const ( BatchStrategySingleRecord = "SingleRecord" ) +// BatchStrategy_Values returns all elements of the BatchStrategy enum +func BatchStrategy_Values() []string { + return []string{ + BatchStrategyMultiRecord, + BatchStrategySingleRecord, + } +} + const ( // BooleanOperatorAnd is a BooleanOperator enum value BooleanOperatorAnd = "And" @@ -46069,6 +49481,14 @@ const ( BooleanOperatorOr = "Or" ) +// BooleanOperator_Values returns all elements of the BooleanOperator enum +func BooleanOperator_Values() []string { + return []string{ + BooleanOperatorAnd, + BooleanOperatorOr, + } +} + const ( // CandidateSortByCreationTime is a CandidateSortBy enum value CandidateSortByCreationTime = "CreationTime" @@ -46080,6 +49500,15 @@ const ( CandidateSortByFinalObjectiveMetricValue = "FinalObjectiveMetricValue" ) +// CandidateSortBy_Values returns all elements of the CandidateSortBy enum +func CandidateSortBy_Values() []string { + return []string{ + CandidateSortByCreationTime, + CandidateSortByStatus, + CandidateSortByFinalObjectiveMetricValue, + } +} + const ( // CandidateStatusCompleted is a CandidateStatus enum value CandidateStatusCompleted = "Completed" @@ -46097,6 +49526,17 @@ const ( CandidateStatusStopping = "Stopping" ) +// CandidateStatus_Values returns all elements of the CandidateStatus enum +func CandidateStatus_Values() []string { + return []string{ + CandidateStatusCompleted, + CandidateStatusInProgress, + CandidateStatusFailed, + CandidateStatusStopped, + CandidateStatusStopping, + } +} + const ( // CandidateStepTypeAwsSageMakerTrainingJob is a CandidateStepType enum value CandidateStepTypeAwsSageMakerTrainingJob = "AWS::SageMaker::TrainingJob" @@ -46108,6 +49548,15 @@ const ( CandidateStepTypeAwsSageMakerProcessingJob = "AWS::SageMaker::ProcessingJob" ) +// CandidateStepType_Values returns all elements of the CandidateStepType enum +func CandidateStepType_Values() []string { + return []string{ + CandidateStepTypeAwsSageMakerTrainingJob, + CandidateStepTypeAwsSageMakerTransformJob, + CandidateStepTypeAwsSageMakerProcessingJob, + } +} + const ( // CaptureModeInput is a CaptureMode enum value CaptureModeInput = "Input" @@ -46116,6 +49565,14 @@ const ( CaptureModeOutput = "Output" ) +// CaptureMode_Values returns all elements of the CaptureMode enum +func CaptureMode_Values() []string { + return []string{ + CaptureModeInput, + CaptureModeOutput, + } +} + const ( // CaptureStatusStarted is a CaptureStatus enum value CaptureStatusStarted = "Started" @@ -46124,6 +49581,14 @@ const ( CaptureStatusStopped = "Stopped" ) +// CaptureStatus_Values returns all elements of the CaptureStatus enum +func CaptureStatus_Values() []string { + return []string{ + CaptureStatusStarted, + CaptureStatusStopped, + } +} + const ( // CodeRepositorySortByName is a CodeRepositorySortBy enum value CodeRepositorySortByName = "Name" @@ -46135,6 +49600,15 @@ const ( CodeRepositorySortByLastModifiedTime = "LastModifiedTime" ) +// CodeRepositorySortBy_Values returns all elements of the CodeRepositorySortBy enum +func CodeRepositorySortBy_Values() []string { + return []string{ + CodeRepositorySortByName, + CodeRepositorySortByCreationTime, + CodeRepositorySortByLastModifiedTime, + } +} + const ( // CodeRepositorySortOrderAscending is a CodeRepositorySortOrder enum value CodeRepositorySortOrderAscending = "Ascending" @@ -46143,6 +49617,14 @@ const ( CodeRepositorySortOrderDescending = "Descending" ) +// CodeRepositorySortOrder_Values returns all elements of the CodeRepositorySortOrder enum +func CodeRepositorySortOrder_Values() []string { + return []string{ + CodeRepositorySortOrderAscending, + CodeRepositorySortOrderDescending, + } +} + const ( // CompilationJobStatusInprogress is a CompilationJobStatus enum value CompilationJobStatusInprogress = "INPROGRESS" @@ -46163,6 +49645,18 @@ const ( CompilationJobStatusStopped = "STOPPED" ) +// CompilationJobStatus_Values returns all elements of the CompilationJobStatus enum +func CompilationJobStatus_Values() []string { + return []string{ + CompilationJobStatusInprogress, + CompilationJobStatusCompleted, + CompilationJobStatusFailed, + CompilationJobStatusStarting, + CompilationJobStatusStopping, + CompilationJobStatusStopped, + } +} + const ( // CompressionTypeNone is a CompressionType enum value CompressionTypeNone = "None" @@ -46171,6 +49665,14 @@ const ( CompressionTypeGzip = "Gzip" ) +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + } +} + const ( // ContainerModeSingleModel is a ContainerMode enum value ContainerModeSingleModel = "SingleModel" @@ -46179,6 +49681,14 @@ const ( ContainerModeMultiModel = "MultiModel" ) +// ContainerMode_Values returns all elements of the ContainerMode enum +func ContainerMode_Values() []string { + return []string{ + ContainerModeSingleModel, + ContainerModeMultiModel, + } +} + const ( // ContentClassifierFreeOfPersonallyIdentifiableInformation is a ContentClassifier enum value ContentClassifierFreeOfPersonallyIdentifiableInformation = "FreeOfPersonallyIdentifiableInformation" @@ -46187,6 +49697,14 @@ const ( ContentClassifierFreeOfAdultContent = "FreeOfAdultContent" ) +// ContentClassifier_Values returns all elements of the ContentClassifier enum +func ContentClassifier_Values() []string { + return []string{ + ContentClassifierFreeOfPersonallyIdentifiableInformation, + ContentClassifierFreeOfAdultContent, + } +} + const ( // DetailedAlgorithmStatusNotStarted is a DetailedAlgorithmStatus enum value DetailedAlgorithmStatusNotStarted = "NotStarted" @@ -46201,6 +49719,16 @@ const ( DetailedAlgorithmStatusFailed = "Failed" ) +// DetailedAlgorithmStatus_Values returns all elements of the DetailedAlgorithmStatus enum +func DetailedAlgorithmStatus_Values() []string { + return []string{ + DetailedAlgorithmStatusNotStarted, + DetailedAlgorithmStatusInProgress, + DetailedAlgorithmStatusCompleted, + DetailedAlgorithmStatusFailed, + } +} + const ( // DetailedModelPackageStatusNotStarted is a DetailedModelPackageStatus enum value DetailedModelPackageStatusNotStarted = "NotStarted" @@ -46215,6 +49743,16 @@ const ( DetailedModelPackageStatusFailed = "Failed" ) +// DetailedModelPackageStatus_Values returns all elements of the DetailedModelPackageStatus enum +func DetailedModelPackageStatus_Values() []string { + return []string{ + DetailedModelPackageStatusNotStarted, + DetailedModelPackageStatusInProgress, + DetailedModelPackageStatusCompleted, + DetailedModelPackageStatusFailed, + } +} + const ( // DirectInternetAccessEnabled is a DirectInternetAccess enum value DirectInternetAccessEnabled = "Enabled" @@ -46223,6 +49761,14 @@ const ( DirectInternetAccessDisabled = "Disabled" ) +// DirectInternetAccess_Values returns all elements of the DirectInternetAccess enum +func DirectInternetAccess_Values() []string { + return []string{ + DirectInternetAccessEnabled, + DirectInternetAccessDisabled, + } +} + const ( // DomainStatusDeleting is a DomainStatus enum value DomainStatusDeleting = "Deleting" @@ -46237,6 +49783,16 @@ const ( DomainStatusPending = "Pending" ) +// DomainStatus_Values returns all elements of the DomainStatus enum +func DomainStatus_Values() []string { + return []string{ + DomainStatusDeleting, + DomainStatusFailed, + DomainStatusInService, + DomainStatusPending, + } +} + const ( // EndpointConfigSortKeyName is a EndpointConfigSortKey enum value EndpointConfigSortKeyName = "Name" @@ -46245,6 +49801,14 @@ const ( EndpointConfigSortKeyCreationTime = "CreationTime" ) +// EndpointConfigSortKey_Values returns all elements of the EndpointConfigSortKey enum +func EndpointConfigSortKey_Values() []string { + return []string{ + EndpointConfigSortKeyName, + EndpointConfigSortKeyCreationTime, + } +} + const ( // EndpointSortKeyName is a EndpointSortKey enum value EndpointSortKeyName = "Name" @@ -46256,6 +49820,15 @@ const ( EndpointSortKeyStatus = "Status" ) +// EndpointSortKey_Values returns all elements of the EndpointSortKey enum +func EndpointSortKey_Values() []string { + return []string{ + EndpointSortKeyName, + EndpointSortKeyCreationTime, + EndpointSortKeyStatus, + } +} + const ( // EndpointStatusOutOfService is a EndpointStatus enum value EndpointStatusOutOfService = "OutOfService" @@ -46282,6 +49855,20 @@ const ( EndpointStatusFailed = "Failed" ) +// EndpointStatus_Values returns all elements of the EndpointStatus enum +func EndpointStatus_Values() []string { + return []string{ + EndpointStatusOutOfService, + EndpointStatusCreating, + EndpointStatusUpdating, + EndpointStatusSystemUpdating, + EndpointStatusRollingBack, + EndpointStatusInService, + EndpointStatusDeleting, + EndpointStatusFailed, + } +} + const ( // ExecutionStatusPending is a ExecutionStatus enum value ExecutionStatusPending = "Pending" @@ -46305,6 +49892,19 @@ const ( ExecutionStatusStopped = "Stopped" ) +// ExecutionStatus_Values returns all elements of the ExecutionStatus enum +func ExecutionStatus_Values() []string { + return []string{ + ExecutionStatusPending, + ExecutionStatusCompleted, + ExecutionStatusCompletedWithViolations, + ExecutionStatusInProgress, + ExecutionStatusFailed, + ExecutionStatusStopping, + ExecutionStatusStopped, + } +} + const ( // FileSystemAccessModeRw is a FileSystemAccessMode enum value FileSystemAccessModeRw = "rw" @@ -46313,6 +49913,14 @@ const ( FileSystemAccessModeRo = "ro" ) +// FileSystemAccessMode_Values returns all elements of the FileSystemAccessMode enum +func FileSystemAccessMode_Values() []string { + return []string{ + FileSystemAccessModeRw, + FileSystemAccessModeRo, + } +} + const ( // FileSystemTypeEfs is a FileSystemType enum value FileSystemTypeEfs = "EFS" @@ -46321,6 +49929,14 @@ const ( FileSystemTypeFsxLustre = "FSxLustre" ) +// FileSystemType_Values returns all elements of the FileSystemType enum +func FileSystemType_Values() []string { + return []string{ + FileSystemTypeEfs, + FileSystemTypeFsxLustre, + } +} + const ( // FlowDefinitionStatusInitializing is a FlowDefinitionStatus enum value FlowDefinitionStatusInitializing = "Initializing" @@ -46333,11 +49949,18 @@ const ( // FlowDefinitionStatusDeleting is a FlowDefinitionStatus enum value FlowDefinitionStatusDeleting = "Deleting" - - // FlowDefinitionStatusDeleted is a FlowDefinitionStatus enum value - FlowDefinitionStatusDeleted = "Deleted" ) +// FlowDefinitionStatus_Values returns all elements of the FlowDefinitionStatus enum +func FlowDefinitionStatus_Values() []string { + return []string{ + FlowDefinitionStatusInitializing, + FlowDefinitionStatusActive, + FlowDefinitionStatusFailed, + FlowDefinitionStatusDeleting, + } +} + const ( // FrameworkTensorflow is a Framework enum value FrameworkTensorflow = "TENSORFLOW" @@ -46356,8 +49979,40 @@ const ( // FrameworkXgboost is a Framework enum value FrameworkXgboost = "XGBOOST" + + // FrameworkTflite is a Framework enum value + FrameworkTflite = "TFLITE" ) +// Framework_Values returns all elements of the Framework enum +func Framework_Values() []string { + return []string{ + FrameworkTensorflow, + FrameworkKeras, + FrameworkMxnet, + FrameworkOnnx, + FrameworkPytorch, + FrameworkXgboost, + FrameworkTflite, + } +} + +const ( + // HumanTaskUiStatusActive is a HumanTaskUiStatus enum value + HumanTaskUiStatusActive = "Active" + + // HumanTaskUiStatusDeleting is a HumanTaskUiStatus enum value + HumanTaskUiStatusDeleting = "Deleting" +) + +// HumanTaskUiStatus_Values returns all elements of the HumanTaskUiStatus enum +func HumanTaskUiStatus_Values() []string { + return []string{ + HumanTaskUiStatusActive, + HumanTaskUiStatusDeleting, + } +} + const ( // HyperParameterScalingTypeAuto is a HyperParameterScalingType enum value HyperParameterScalingTypeAuto = "Auto" @@ -46372,6 +50027,16 @@ const ( HyperParameterScalingTypeReverseLogarithmic = "ReverseLogarithmic" ) +// HyperParameterScalingType_Values returns all elements of the HyperParameterScalingType enum +func HyperParameterScalingType_Values() []string { + return []string{ + HyperParameterScalingTypeAuto, + HyperParameterScalingTypeLinear, + HyperParameterScalingTypeLogarithmic, + HyperParameterScalingTypeReverseLogarithmic, + } +} + const ( // HyperParameterTuningJobObjectiveTypeMaximize is a HyperParameterTuningJobObjectiveType enum value HyperParameterTuningJobObjectiveTypeMaximize = "Maximize" @@ -46380,6 +50045,14 @@ const ( HyperParameterTuningJobObjectiveTypeMinimize = "Minimize" ) +// HyperParameterTuningJobObjectiveType_Values returns all elements of the HyperParameterTuningJobObjectiveType enum +func HyperParameterTuningJobObjectiveType_Values() []string { + return []string{ + HyperParameterTuningJobObjectiveTypeMaximize, + HyperParameterTuningJobObjectiveTypeMinimize, + } +} + const ( // HyperParameterTuningJobSortByOptionsName is a HyperParameterTuningJobSortByOptions enum value HyperParameterTuningJobSortByOptionsName = "Name" @@ -46391,6 +50064,15 @@ const ( HyperParameterTuningJobSortByOptionsCreationTime = "CreationTime" ) +// HyperParameterTuningJobSortByOptions_Values returns all elements of the HyperParameterTuningJobSortByOptions enum +func HyperParameterTuningJobSortByOptions_Values() []string { + return []string{ + HyperParameterTuningJobSortByOptionsName, + HyperParameterTuningJobSortByOptionsStatus, + HyperParameterTuningJobSortByOptionsCreationTime, + } +} + const ( // HyperParameterTuningJobStatusCompleted is a HyperParameterTuningJobStatus enum value HyperParameterTuningJobStatusCompleted = "Completed" @@ -46408,6 +50090,17 @@ const ( HyperParameterTuningJobStatusStopping = "Stopping" ) +// HyperParameterTuningJobStatus_Values returns all elements of the HyperParameterTuningJobStatus enum +func HyperParameterTuningJobStatus_Values() []string { + return []string{ + HyperParameterTuningJobStatusCompleted, + HyperParameterTuningJobStatusInProgress, + HyperParameterTuningJobStatusFailed, + HyperParameterTuningJobStatusStopped, + HyperParameterTuningJobStatusStopping, + } +} + // The strategy hyperparameter tuning uses to find the best combination of hyperparameters // for your model. Currently, the only supported value is Bayesian. const ( @@ -46418,6 +50111,14 @@ const ( HyperParameterTuningJobStrategyTypeRandom = "Random" ) +// HyperParameterTuningJobStrategyType_Values returns all elements of the HyperParameterTuningJobStrategyType enum +func HyperParameterTuningJobStrategyType_Values() []string { + return []string{ + HyperParameterTuningJobStrategyTypeBayesian, + HyperParameterTuningJobStrategyTypeRandom, + } +} + const ( // HyperParameterTuningJobWarmStartTypeIdenticalDataAndAlgorithm is a HyperParameterTuningJobWarmStartType enum value HyperParameterTuningJobWarmStartTypeIdenticalDataAndAlgorithm = "IdenticalDataAndAlgorithm" @@ -46426,6 +50127,14 @@ const ( HyperParameterTuningJobWarmStartTypeTransferLearning = "TransferLearning" ) +// HyperParameterTuningJobWarmStartType_Values returns all elements of the HyperParameterTuningJobWarmStartType enum +func HyperParameterTuningJobWarmStartType_Values() []string { + return []string{ + HyperParameterTuningJobWarmStartTypeIdenticalDataAndAlgorithm, + HyperParameterTuningJobWarmStartTypeTransferLearning, + } +} + const ( // InstanceTypeMlT2Medium is a InstanceType enum value InstanceTypeMlT2Medium = "ml.t2.medium" @@ -46542,6 +50251,50 @@ const ( InstanceTypeMlP316xlarge = "ml.p3.16xlarge" ) +// InstanceType_Values returns all elements of the InstanceType enum +func InstanceType_Values() []string { + return []string{ + InstanceTypeMlT2Medium, + InstanceTypeMlT2Large, + InstanceTypeMlT2Xlarge, + InstanceTypeMlT22xlarge, + InstanceTypeMlT3Medium, + InstanceTypeMlT3Large, + InstanceTypeMlT3Xlarge, + InstanceTypeMlT32xlarge, + InstanceTypeMlM4Xlarge, + InstanceTypeMlM42xlarge, + InstanceTypeMlM44xlarge, + InstanceTypeMlM410xlarge, + InstanceTypeMlM416xlarge, + InstanceTypeMlM5Xlarge, + InstanceTypeMlM52xlarge, + InstanceTypeMlM54xlarge, + InstanceTypeMlM512xlarge, + InstanceTypeMlM524xlarge, + InstanceTypeMlC4Xlarge, + InstanceTypeMlC42xlarge, + InstanceTypeMlC44xlarge, + InstanceTypeMlC48xlarge, + InstanceTypeMlC5Xlarge, + InstanceTypeMlC52xlarge, + InstanceTypeMlC54xlarge, + InstanceTypeMlC59xlarge, + InstanceTypeMlC518xlarge, + InstanceTypeMlC5dXlarge, + InstanceTypeMlC5d2xlarge, + InstanceTypeMlC5d4xlarge, + InstanceTypeMlC5d9xlarge, + InstanceTypeMlC5d18xlarge, + InstanceTypeMlP2Xlarge, + InstanceTypeMlP28xlarge, + InstanceTypeMlP216xlarge, + InstanceTypeMlP32xlarge, + InstanceTypeMlP38xlarge, + InstanceTypeMlP316xlarge, + } +} + const ( // JoinSourceInput is a JoinSource enum value JoinSourceInput = "Input" @@ -46550,7 +50303,18 @@ const ( JoinSourceNone = "None" ) +// JoinSource_Values returns all elements of the JoinSource enum +func JoinSource_Values() []string { + return []string{ + JoinSourceInput, + JoinSourceNone, + } +} + const ( + // LabelingJobStatusInitializing is a LabelingJobStatus enum value + LabelingJobStatusInitializing = "Initializing" + // LabelingJobStatusInProgress is a LabelingJobStatus enum value LabelingJobStatusInProgress = "InProgress" @@ -46567,6 +50331,18 @@ const ( LabelingJobStatusStopped = "Stopped" ) +// LabelingJobStatus_Values returns all elements of the LabelingJobStatus enum +func LabelingJobStatus_Values() []string { + return []string{ + LabelingJobStatusInitializing, + LabelingJobStatusInProgress, + LabelingJobStatusCompleted, + LabelingJobStatusFailed, + LabelingJobStatusStopping, + LabelingJobStatusStopped, + } +} + const ( // ListCompilationJobsSortByName is a ListCompilationJobsSortBy enum value ListCompilationJobsSortByName = "Name" @@ -46578,11 +50354,43 @@ const ( ListCompilationJobsSortByStatus = "Status" ) +// ListCompilationJobsSortBy_Values returns all elements of the ListCompilationJobsSortBy enum +func ListCompilationJobsSortBy_Values() []string { + return []string{ + ListCompilationJobsSortByName, + ListCompilationJobsSortByCreationTime, + ListCompilationJobsSortByStatus, + } +} + const ( // ListLabelingJobsForWorkteamSortByOptionsCreationTime is a ListLabelingJobsForWorkteamSortByOptions enum value ListLabelingJobsForWorkteamSortByOptionsCreationTime = "CreationTime" ) +// ListLabelingJobsForWorkteamSortByOptions_Values returns all elements of the ListLabelingJobsForWorkteamSortByOptions enum +func ListLabelingJobsForWorkteamSortByOptions_Values() []string { + return []string{ + ListLabelingJobsForWorkteamSortByOptionsCreationTime, + } +} + +const ( + // ListWorkforcesSortByOptionsName is a ListWorkforcesSortByOptions enum value + ListWorkforcesSortByOptionsName = "Name" + + // ListWorkforcesSortByOptionsCreateDate is a ListWorkforcesSortByOptions enum value + ListWorkforcesSortByOptionsCreateDate = "CreateDate" +) + +// ListWorkforcesSortByOptions_Values returns all elements of the ListWorkforcesSortByOptions enum +func ListWorkforcesSortByOptions_Values() []string { + return []string{ + ListWorkforcesSortByOptionsName, + ListWorkforcesSortByOptionsCreateDate, + } +} + const ( // ListWorkteamsSortByOptionsName is a ListWorkteamsSortByOptions enum value ListWorkteamsSortByOptionsName = "Name" @@ -46591,6 +50399,14 @@ const ( ListWorkteamsSortByOptionsCreateDate = "CreateDate" ) +// ListWorkteamsSortByOptions_Values returns all elements of the ListWorkteamsSortByOptions enum +func ListWorkteamsSortByOptions_Values() []string { + return []string{ + ListWorkteamsSortByOptionsName, + ListWorkteamsSortByOptionsCreateDate, + } +} + const ( // ModelPackageSortByName is a ModelPackageSortBy enum value ModelPackageSortByName = "Name" @@ -46599,6 +50415,14 @@ const ( ModelPackageSortByCreationTime = "CreationTime" ) +// ModelPackageSortBy_Values returns all elements of the ModelPackageSortBy enum +func ModelPackageSortBy_Values() []string { + return []string{ + ModelPackageSortByName, + ModelPackageSortByCreationTime, + } +} + const ( // ModelPackageStatusPending is a ModelPackageStatus enum value ModelPackageStatusPending = "Pending" @@ -46616,6 +50440,17 @@ const ( ModelPackageStatusDeleting = "Deleting" ) +// ModelPackageStatus_Values returns all elements of the ModelPackageStatus enum +func ModelPackageStatus_Values() []string { + return []string{ + ModelPackageStatusPending, + ModelPackageStatusInProgress, + ModelPackageStatusCompleted, + ModelPackageStatusFailed, + ModelPackageStatusDeleting, + } +} + const ( // ModelSortKeyName is a ModelSortKey enum value ModelSortKeyName = "Name" @@ -46624,6 +50459,14 @@ const ( ModelSortKeyCreationTime = "CreationTime" ) +// ModelSortKey_Values returns all elements of the ModelSortKey enum +func ModelSortKey_Values() []string { + return []string{ + ModelSortKeyName, + ModelSortKeyCreationTime, + } +} + const ( // MonitoringExecutionSortKeyCreationTime is a MonitoringExecutionSortKey enum value MonitoringExecutionSortKeyCreationTime = "CreationTime" @@ -46635,6 +50478,15 @@ const ( MonitoringExecutionSortKeyStatus = "Status" ) +// MonitoringExecutionSortKey_Values returns all elements of the MonitoringExecutionSortKey enum +func MonitoringExecutionSortKey_Values() []string { + return []string{ + MonitoringExecutionSortKeyCreationTime, + MonitoringExecutionSortKeyScheduledTime, + MonitoringExecutionSortKeyStatus, + } +} + const ( // MonitoringScheduleSortKeyName is a MonitoringScheduleSortKey enum value MonitoringScheduleSortKeyName = "Name" @@ -46646,6 +50498,15 @@ const ( MonitoringScheduleSortKeyStatus = "Status" ) +// MonitoringScheduleSortKey_Values returns all elements of the MonitoringScheduleSortKey enum +func MonitoringScheduleSortKey_Values() []string { + return []string{ + MonitoringScheduleSortKeyName, + MonitoringScheduleSortKeyCreationTime, + MonitoringScheduleSortKeyStatus, + } +} + const ( // NotebookInstanceAcceleratorTypeMlEia1Medium is a NotebookInstanceAcceleratorType enum value NotebookInstanceAcceleratorTypeMlEia1Medium = "ml.eia1.medium" @@ -46666,6 +50527,18 @@ const ( NotebookInstanceAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) +// NotebookInstanceAcceleratorType_Values returns all elements of the NotebookInstanceAcceleratorType enum +func NotebookInstanceAcceleratorType_Values() []string { + return []string{ + NotebookInstanceAcceleratorTypeMlEia1Medium, + NotebookInstanceAcceleratorTypeMlEia1Large, + NotebookInstanceAcceleratorTypeMlEia1Xlarge, + NotebookInstanceAcceleratorTypeMlEia2Medium, + NotebookInstanceAcceleratorTypeMlEia2Large, + NotebookInstanceAcceleratorTypeMlEia2Xlarge, + } +} + const ( // NotebookInstanceLifecycleConfigSortKeyName is a NotebookInstanceLifecycleConfigSortKey enum value NotebookInstanceLifecycleConfigSortKeyName = "Name" @@ -46677,6 +50550,15 @@ const ( NotebookInstanceLifecycleConfigSortKeyLastModifiedTime = "LastModifiedTime" ) +// NotebookInstanceLifecycleConfigSortKey_Values returns all elements of the NotebookInstanceLifecycleConfigSortKey enum +func NotebookInstanceLifecycleConfigSortKey_Values() []string { + return []string{ + NotebookInstanceLifecycleConfigSortKeyName, + NotebookInstanceLifecycleConfigSortKeyCreationTime, + NotebookInstanceLifecycleConfigSortKeyLastModifiedTime, + } +} + const ( // NotebookInstanceLifecycleConfigSortOrderAscending is a NotebookInstanceLifecycleConfigSortOrder enum value NotebookInstanceLifecycleConfigSortOrderAscending = "Ascending" @@ -46685,6 +50567,14 @@ const ( NotebookInstanceLifecycleConfigSortOrderDescending = "Descending" ) +// NotebookInstanceLifecycleConfigSortOrder_Values returns all elements of the NotebookInstanceLifecycleConfigSortOrder enum +func NotebookInstanceLifecycleConfigSortOrder_Values() []string { + return []string{ + NotebookInstanceLifecycleConfigSortOrderAscending, + NotebookInstanceLifecycleConfigSortOrderDescending, + } +} + const ( // NotebookInstanceSortKeyName is a NotebookInstanceSortKey enum value NotebookInstanceSortKeyName = "Name" @@ -46696,6 +50586,15 @@ const ( NotebookInstanceSortKeyStatus = "Status" ) +// NotebookInstanceSortKey_Values returns all elements of the NotebookInstanceSortKey enum +func NotebookInstanceSortKey_Values() []string { + return []string{ + NotebookInstanceSortKeyName, + NotebookInstanceSortKeyCreationTime, + NotebookInstanceSortKeyStatus, + } +} + const ( // NotebookInstanceSortOrderAscending is a NotebookInstanceSortOrder enum value NotebookInstanceSortOrderAscending = "Ascending" @@ -46704,6 +50603,14 @@ const ( NotebookInstanceSortOrderDescending = "Descending" ) +// NotebookInstanceSortOrder_Values returns all elements of the NotebookInstanceSortOrder enum +func NotebookInstanceSortOrder_Values() []string { + return []string{ + NotebookInstanceSortOrderAscending, + NotebookInstanceSortOrderDescending, + } +} + const ( // NotebookInstanceStatusPending is a NotebookInstanceStatus enum value NotebookInstanceStatusPending = "Pending" @@ -46727,6 +50634,19 @@ const ( NotebookInstanceStatusUpdating = "Updating" ) +// NotebookInstanceStatus_Values returns all elements of the NotebookInstanceStatus enum +func NotebookInstanceStatus_Values() []string { + return []string{ + NotebookInstanceStatusPending, + NotebookInstanceStatusInService, + NotebookInstanceStatusStopping, + NotebookInstanceStatusStopped, + NotebookInstanceStatusFailed, + NotebookInstanceStatusDeleting, + NotebookInstanceStatusUpdating, + } +} + const ( // NotebookOutputOptionAllowed is a NotebookOutputOption enum value NotebookOutputOptionAllowed = "Allowed" @@ -46735,6 +50655,14 @@ const ( NotebookOutputOptionDisabled = "Disabled" ) +// NotebookOutputOption_Values returns all elements of the NotebookOutputOption enum +func NotebookOutputOption_Values() []string { + return []string{ + NotebookOutputOptionAllowed, + NotebookOutputOptionDisabled, + } +} + const ( // ObjectiveStatusSucceeded is a ObjectiveStatus enum value ObjectiveStatusSucceeded = "Succeeded" @@ -46746,6 +50674,15 @@ const ( ObjectiveStatusFailed = "Failed" ) +// ObjectiveStatus_Values returns all elements of the ObjectiveStatus enum +func ObjectiveStatus_Values() []string { + return []string{ + ObjectiveStatusSucceeded, + ObjectiveStatusPending, + ObjectiveStatusFailed, + } +} + const ( // OperatorEquals is a Operator enum value OperatorEquals = "Equals" @@ -46773,8 +50710,27 @@ const ( // OperatorNotExists is a Operator enum value OperatorNotExists = "NotExists" + + // OperatorIn is a Operator enum value + OperatorIn = "In" ) +// Operator_Values returns all elements of the Operator enum +func Operator_Values() []string { + return []string{ + OperatorEquals, + OperatorNotEquals, + OperatorGreaterThan, + OperatorGreaterThanOrEqualTo, + OperatorLessThan, + OperatorLessThanOrEqualTo, + OperatorContains, + OperatorExists, + OperatorNotExists, + OperatorIn, + } +} + const ( // OrderKeyAscending is a OrderKey enum value OrderKeyAscending = "Ascending" @@ -46783,6 +50739,14 @@ const ( OrderKeyDescending = "Descending" ) +// OrderKey_Values returns all elements of the OrderKey enum +func OrderKey_Values() []string { + return []string{ + OrderKeyAscending, + OrderKeyDescending, + } +} + const ( // ParameterTypeInteger is a ParameterType enum value ParameterTypeInteger = "Integer" @@ -46797,6 +50761,16 @@ const ( ParameterTypeFreeText = "FreeText" ) +// ParameterType_Values returns all elements of the ParameterType enum +func ParameterType_Values() []string { + return []string{ + ParameterTypeInteger, + ParameterTypeContinuous, + ParameterTypeCategorical, + ParameterTypeFreeText, + } +} + const ( // ProblemTypeBinaryClassification is a ProblemType enum value ProblemTypeBinaryClassification = "BinaryClassification" @@ -46808,6 +50782,15 @@ const ( ProblemTypeRegression = "Regression" ) +// ProblemType_Values returns all elements of the ProblemType enum +func ProblemType_Values() []string { + return []string{ + ProblemTypeBinaryClassification, + ProblemTypeMulticlassClassification, + ProblemTypeRegression, + } +} + const ( // ProcessingInstanceTypeMlT3Medium is a ProcessingInstanceType enum value ProcessingInstanceTypeMlT3Medium = "ml.t3.medium" @@ -46924,6 +50907,50 @@ const ( ProcessingInstanceTypeMlR524xlarge = "ml.r5.24xlarge" ) +// ProcessingInstanceType_Values returns all elements of the ProcessingInstanceType enum +func ProcessingInstanceType_Values() []string { + return []string{ + ProcessingInstanceTypeMlT3Medium, + ProcessingInstanceTypeMlT3Large, + ProcessingInstanceTypeMlT3Xlarge, + ProcessingInstanceTypeMlT32xlarge, + ProcessingInstanceTypeMlM4Xlarge, + ProcessingInstanceTypeMlM42xlarge, + ProcessingInstanceTypeMlM44xlarge, + ProcessingInstanceTypeMlM410xlarge, + ProcessingInstanceTypeMlM416xlarge, + ProcessingInstanceTypeMlC4Xlarge, + ProcessingInstanceTypeMlC42xlarge, + ProcessingInstanceTypeMlC44xlarge, + ProcessingInstanceTypeMlC48xlarge, + ProcessingInstanceTypeMlP2Xlarge, + ProcessingInstanceTypeMlP28xlarge, + ProcessingInstanceTypeMlP216xlarge, + ProcessingInstanceTypeMlP32xlarge, + ProcessingInstanceTypeMlP38xlarge, + ProcessingInstanceTypeMlP316xlarge, + ProcessingInstanceTypeMlC5Xlarge, + ProcessingInstanceTypeMlC52xlarge, + ProcessingInstanceTypeMlC54xlarge, + ProcessingInstanceTypeMlC59xlarge, + ProcessingInstanceTypeMlC518xlarge, + ProcessingInstanceTypeMlM5Large, + ProcessingInstanceTypeMlM5Xlarge, + ProcessingInstanceTypeMlM52xlarge, + ProcessingInstanceTypeMlM54xlarge, + ProcessingInstanceTypeMlM512xlarge, + ProcessingInstanceTypeMlM524xlarge, + ProcessingInstanceTypeMlR5Large, + ProcessingInstanceTypeMlR5Xlarge, + ProcessingInstanceTypeMlR52xlarge, + ProcessingInstanceTypeMlR54xlarge, + ProcessingInstanceTypeMlR58xlarge, + ProcessingInstanceTypeMlR512xlarge, + ProcessingInstanceTypeMlR516xlarge, + ProcessingInstanceTypeMlR524xlarge, + } +} + const ( // ProcessingJobStatusInProgress is a ProcessingJobStatus enum value ProcessingJobStatusInProgress = "InProgress" @@ -46941,6 +50968,17 @@ const ( ProcessingJobStatusStopped = "Stopped" ) +// ProcessingJobStatus_Values returns all elements of the ProcessingJobStatus enum +func ProcessingJobStatus_Values() []string { + return []string{ + ProcessingJobStatusInProgress, + ProcessingJobStatusCompleted, + ProcessingJobStatusFailed, + ProcessingJobStatusStopping, + ProcessingJobStatusStopped, + } +} + const ( // ProcessingS3CompressionTypeNone is a ProcessingS3CompressionType enum value ProcessingS3CompressionTypeNone = "None" @@ -46949,6 +50987,14 @@ const ( ProcessingS3CompressionTypeGzip = "Gzip" ) +// ProcessingS3CompressionType_Values returns all elements of the ProcessingS3CompressionType enum +func ProcessingS3CompressionType_Values() []string { + return []string{ + ProcessingS3CompressionTypeNone, + ProcessingS3CompressionTypeGzip, + } +} + const ( // ProcessingS3DataDistributionTypeFullyReplicated is a ProcessingS3DataDistributionType enum value ProcessingS3DataDistributionTypeFullyReplicated = "FullyReplicated" @@ -46957,6 +51003,14 @@ const ( ProcessingS3DataDistributionTypeShardedByS3key = "ShardedByS3Key" ) +// ProcessingS3DataDistributionType_Values returns all elements of the ProcessingS3DataDistributionType enum +func ProcessingS3DataDistributionType_Values() []string { + return []string{ + ProcessingS3DataDistributionTypeFullyReplicated, + ProcessingS3DataDistributionTypeShardedByS3key, + } +} + const ( // ProcessingS3DataTypeManifestFile is a ProcessingS3DataType enum value ProcessingS3DataTypeManifestFile = "ManifestFile" @@ -46965,6 +51019,14 @@ const ( ProcessingS3DataTypeS3prefix = "S3Prefix" ) +// ProcessingS3DataType_Values returns all elements of the ProcessingS3DataType enum +func ProcessingS3DataType_Values() []string { + return []string{ + ProcessingS3DataTypeManifestFile, + ProcessingS3DataTypeS3prefix, + } +} + const ( // ProcessingS3InputModePipe is a ProcessingS3InputMode enum value ProcessingS3InputModePipe = "Pipe" @@ -46973,6 +51035,14 @@ const ( ProcessingS3InputModeFile = "File" ) +// ProcessingS3InputMode_Values returns all elements of the ProcessingS3InputMode enum +func ProcessingS3InputMode_Values() []string { + return []string{ + ProcessingS3InputModePipe, + ProcessingS3InputModeFile, + } +} + const ( // ProcessingS3UploadModeContinuous is a ProcessingS3UploadMode enum value ProcessingS3UploadModeContinuous = "Continuous" @@ -46981,6 +51051,14 @@ const ( ProcessingS3UploadModeEndOfJob = "EndOfJob" ) +// ProcessingS3UploadMode_Values returns all elements of the ProcessingS3UploadMode enum +func ProcessingS3UploadMode_Values() []string { + return []string{ + ProcessingS3UploadModeContinuous, + ProcessingS3UploadModeEndOfJob, + } +} + const ( // ProductionVariantAcceleratorTypeMlEia1Medium is a ProductionVariantAcceleratorType enum value ProductionVariantAcceleratorTypeMlEia1Medium = "ml.eia1.medium" @@ -47001,6 +51079,18 @@ const ( ProductionVariantAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) +// ProductionVariantAcceleratorType_Values returns all elements of the ProductionVariantAcceleratorType enum +func ProductionVariantAcceleratorType_Values() []string { + return []string{ + ProductionVariantAcceleratorTypeMlEia1Medium, + ProductionVariantAcceleratorTypeMlEia1Large, + ProductionVariantAcceleratorTypeMlEia1Xlarge, + ProductionVariantAcceleratorTypeMlEia2Medium, + ProductionVariantAcceleratorTypeMlEia2Large, + ProductionVariantAcceleratorTypeMlEia2Xlarge, + } +} + const ( // ProductionVariantInstanceTypeMlT2Medium is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlT2Medium = "ml.t2.medium" @@ -47201,6 +51291,78 @@ const ( ProductionVariantInstanceTypeMlInf124xlarge = "ml.inf1.24xlarge" ) +// ProductionVariantInstanceType_Values returns all elements of the ProductionVariantInstanceType enum +func ProductionVariantInstanceType_Values() []string { + return []string{ + ProductionVariantInstanceTypeMlT2Medium, + ProductionVariantInstanceTypeMlT2Large, + ProductionVariantInstanceTypeMlT2Xlarge, + ProductionVariantInstanceTypeMlT22xlarge, + ProductionVariantInstanceTypeMlM4Xlarge, + ProductionVariantInstanceTypeMlM42xlarge, + ProductionVariantInstanceTypeMlM44xlarge, + ProductionVariantInstanceTypeMlM410xlarge, + ProductionVariantInstanceTypeMlM416xlarge, + ProductionVariantInstanceTypeMlM5Large, + ProductionVariantInstanceTypeMlM5Xlarge, + ProductionVariantInstanceTypeMlM52xlarge, + ProductionVariantInstanceTypeMlM54xlarge, + ProductionVariantInstanceTypeMlM512xlarge, + ProductionVariantInstanceTypeMlM524xlarge, + ProductionVariantInstanceTypeMlM5dLarge, + ProductionVariantInstanceTypeMlM5dXlarge, + ProductionVariantInstanceTypeMlM5d2xlarge, + ProductionVariantInstanceTypeMlM5d4xlarge, + ProductionVariantInstanceTypeMlM5d12xlarge, + ProductionVariantInstanceTypeMlM5d24xlarge, + ProductionVariantInstanceTypeMlC4Large, + ProductionVariantInstanceTypeMlC4Xlarge, + ProductionVariantInstanceTypeMlC42xlarge, + ProductionVariantInstanceTypeMlC44xlarge, + ProductionVariantInstanceTypeMlC48xlarge, + ProductionVariantInstanceTypeMlP2Xlarge, + ProductionVariantInstanceTypeMlP28xlarge, + ProductionVariantInstanceTypeMlP216xlarge, + ProductionVariantInstanceTypeMlP32xlarge, + ProductionVariantInstanceTypeMlP38xlarge, + ProductionVariantInstanceTypeMlP316xlarge, + ProductionVariantInstanceTypeMlC5Large, + ProductionVariantInstanceTypeMlC5Xlarge, + ProductionVariantInstanceTypeMlC52xlarge, + ProductionVariantInstanceTypeMlC54xlarge, + ProductionVariantInstanceTypeMlC59xlarge, + ProductionVariantInstanceTypeMlC518xlarge, + ProductionVariantInstanceTypeMlC5dLarge, + ProductionVariantInstanceTypeMlC5dXlarge, + ProductionVariantInstanceTypeMlC5d2xlarge, + ProductionVariantInstanceTypeMlC5d4xlarge, + ProductionVariantInstanceTypeMlC5d9xlarge, + ProductionVariantInstanceTypeMlC5d18xlarge, + ProductionVariantInstanceTypeMlG4dnXlarge, + ProductionVariantInstanceTypeMlG4dn2xlarge, + ProductionVariantInstanceTypeMlG4dn4xlarge, + ProductionVariantInstanceTypeMlG4dn8xlarge, + ProductionVariantInstanceTypeMlG4dn12xlarge, + ProductionVariantInstanceTypeMlG4dn16xlarge, + ProductionVariantInstanceTypeMlR5Large, + ProductionVariantInstanceTypeMlR5Xlarge, + ProductionVariantInstanceTypeMlR52xlarge, + ProductionVariantInstanceTypeMlR54xlarge, + ProductionVariantInstanceTypeMlR512xlarge, + ProductionVariantInstanceTypeMlR524xlarge, + ProductionVariantInstanceTypeMlR5dLarge, + ProductionVariantInstanceTypeMlR5dXlarge, + ProductionVariantInstanceTypeMlR5d2xlarge, + ProductionVariantInstanceTypeMlR5d4xlarge, + ProductionVariantInstanceTypeMlR5d12xlarge, + ProductionVariantInstanceTypeMlR5d24xlarge, + ProductionVariantInstanceTypeMlInf1Xlarge, + ProductionVariantInstanceTypeMlInf12xlarge, + ProductionVariantInstanceTypeMlInf16xlarge, + ProductionVariantInstanceTypeMlInf124xlarge, + } +} + const ( // RecordWrapperNone is a RecordWrapper enum value RecordWrapperNone = "None" @@ -47209,6 +51371,30 @@ const ( RecordWrapperRecordIo = "RecordIO" ) +// RecordWrapper_Values returns all elements of the RecordWrapper enum +func RecordWrapper_Values() []string { + return []string{ + RecordWrapperNone, + RecordWrapperRecordIo, + } +} + +const ( + // RepositoryAccessModePlatform is a RepositoryAccessMode enum value + RepositoryAccessModePlatform = "Platform" + + // RepositoryAccessModeVpc is a RepositoryAccessMode enum value + RepositoryAccessModeVpc = "Vpc" +) + +// RepositoryAccessMode_Values returns all elements of the RepositoryAccessMode enum +func RepositoryAccessMode_Values() []string { + return []string{ + RepositoryAccessModePlatform, + RepositoryAccessModeVpc, + } +} + const ( // ResourceTypeTrainingJob is a ResourceType enum value ResourceTypeTrainingJob = "TrainingJob" @@ -47223,6 +51409,16 @@ const ( ResourceTypeExperimentTrialComponent = "ExperimentTrialComponent" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeTrainingJob, + ResourceTypeExperiment, + ResourceTypeExperimentTrial, + ResourceTypeExperimentTrialComponent, + } +} + const ( // RetentionTypeRetain is a RetentionType enum value RetentionTypeRetain = "Retain" @@ -47231,6 +51427,14 @@ const ( RetentionTypeDelete = "Delete" ) +// RetentionType_Values returns all elements of the RetentionType enum +func RetentionType_Values() []string { + return []string{ + RetentionTypeRetain, + RetentionTypeDelete, + } +} + const ( // RootAccessEnabled is a RootAccess enum value RootAccessEnabled = "Enabled" @@ -47239,6 +51443,14 @@ const ( RootAccessDisabled = "Disabled" ) +// RootAccess_Values returns all elements of the RootAccess enum +func RootAccess_Values() []string { + return []string{ + RootAccessEnabled, + RootAccessDisabled, + } +} + const ( // RuleEvaluationStatusInProgress is a RuleEvaluationStatus enum value RuleEvaluationStatusInProgress = "InProgress" @@ -47259,6 +51471,18 @@ const ( RuleEvaluationStatusStopped = "Stopped" ) +// RuleEvaluationStatus_Values returns all elements of the RuleEvaluationStatus enum +func RuleEvaluationStatus_Values() []string { + return []string{ + RuleEvaluationStatusInProgress, + RuleEvaluationStatusNoIssuesFound, + RuleEvaluationStatusIssuesFound, + RuleEvaluationStatusError, + RuleEvaluationStatusStopping, + RuleEvaluationStatusStopped, + } +} + const ( // S3DataDistributionFullyReplicated is a S3DataDistribution enum value S3DataDistributionFullyReplicated = "FullyReplicated" @@ -47267,6 +51491,14 @@ const ( S3DataDistributionShardedByS3key = "ShardedByS3Key" ) +// S3DataDistribution_Values returns all elements of the S3DataDistribution enum +func S3DataDistribution_Values() []string { + return []string{ + S3DataDistributionFullyReplicated, + S3DataDistributionShardedByS3key, + } +} + const ( // S3DataTypeManifestFile is a S3DataType enum value S3DataTypeManifestFile = "ManifestFile" @@ -47278,6 +51510,15 @@ const ( S3DataTypeAugmentedManifestFile = "AugmentedManifestFile" ) +// S3DataType_Values returns all elements of the S3DataType enum +func S3DataType_Values() []string { + return []string{ + S3DataTypeManifestFile, + S3DataTypeS3prefix, + S3DataTypeAugmentedManifestFile, + } +} + const ( // ScheduleStatusPending is a ScheduleStatus enum value ScheduleStatusPending = "Pending" @@ -47292,6 +51533,16 @@ const ( ScheduleStatusStopped = "Stopped" ) +// ScheduleStatus_Values returns all elements of the ScheduleStatus enum +func ScheduleStatus_Values() []string { + return []string{ + ScheduleStatusPending, + ScheduleStatusFailed, + ScheduleStatusScheduled, + ScheduleStatusStopped, + } +} + const ( // SearchSortOrderAscending is a SearchSortOrder enum value SearchSortOrderAscending = "Ascending" @@ -47300,6 +51551,14 @@ const ( SearchSortOrderDescending = "Descending" ) +// SearchSortOrder_Values returns all elements of the SearchSortOrder enum +func SearchSortOrder_Values() []string { + return []string{ + SearchSortOrderAscending, + SearchSortOrderDescending, + } +} + const ( // SecondaryStatusStarting is a SecondaryStatus enum value SecondaryStatusStarting = "Starting" @@ -47344,6 +51603,26 @@ const ( SecondaryStatusMaxWaitTimeExceeded = "MaxWaitTimeExceeded" ) +// SecondaryStatus_Values returns all elements of the SecondaryStatus enum +func SecondaryStatus_Values() []string { + return []string{ + SecondaryStatusStarting, + SecondaryStatusLaunchingMlinstances, + SecondaryStatusPreparingTrainingStack, + SecondaryStatusDownloading, + SecondaryStatusDownloadingTrainingImage, + SecondaryStatusTraining, + SecondaryStatusUploading, + SecondaryStatusStopping, + SecondaryStatusStopped, + SecondaryStatusMaxRuntimeExceeded, + SecondaryStatusCompleted, + SecondaryStatusFailed, + SecondaryStatusInterrupted, + SecondaryStatusMaxWaitTimeExceeded, + } +} + const ( // SortByName is a SortBy enum value SortByName = "Name" @@ -47355,6 +51634,15 @@ const ( SortByStatus = "Status" ) +// SortBy_Values returns all elements of the SortBy enum +func SortBy_Values() []string { + return []string{ + SortByName, + SortByCreationTime, + SortByStatus, + } +} + const ( // SortExperimentsByName is a SortExperimentsBy enum value SortExperimentsByName = "Name" @@ -47363,6 +51651,14 @@ const ( SortExperimentsByCreationTime = "CreationTime" ) +// SortExperimentsBy_Values returns all elements of the SortExperimentsBy enum +func SortExperimentsBy_Values() []string { + return []string{ + SortExperimentsByName, + SortExperimentsByCreationTime, + } +} + const ( // SortOrderAscending is a SortOrder enum value SortOrderAscending = "Ascending" @@ -47371,6 +51667,14 @@ const ( SortOrderDescending = "Descending" ) +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} + const ( // SortTrialComponentsByName is a SortTrialComponentsBy enum value SortTrialComponentsByName = "Name" @@ -47379,6 +51683,14 @@ const ( SortTrialComponentsByCreationTime = "CreationTime" ) +// SortTrialComponentsBy_Values returns all elements of the SortTrialComponentsBy enum +func SortTrialComponentsBy_Values() []string { + return []string{ + SortTrialComponentsByName, + SortTrialComponentsByCreationTime, + } +} + const ( // SortTrialsByName is a SortTrialsBy enum value SortTrialsByName = "Name" @@ -47387,6 +51699,14 @@ const ( SortTrialsByCreationTime = "CreationTime" ) +// SortTrialsBy_Values returns all elements of the SortTrialsBy enum +func SortTrialsBy_Values() []string { + return []string{ + SortTrialsByName, + SortTrialsByCreationTime, + } +} + const ( // SplitTypeNone is a SplitType enum value SplitTypeNone = "None" @@ -47401,6 +51721,16 @@ const ( SplitTypeTfrecord = "TFRecord" ) +// SplitType_Values returns all elements of the SplitType enum +func SplitType_Values() []string { + return []string{ + SplitTypeNone, + SplitTypeLine, + SplitTypeRecordIo, + SplitTypeTfrecord, + } +} + const ( // TargetDeviceLambda is a TargetDevice enum value TargetDeviceLambda = "lambda" @@ -47423,6 +51753,9 @@ const ( // TargetDeviceMlP3 is a TargetDevice enum value TargetDeviceMlP3 = "ml_p3" + // TargetDeviceMlG4dn is a TargetDevice enum value + TargetDeviceMlG4dn = "ml_g4dn" + // TargetDeviceMlInf1 is a TargetDevice enum value TargetDeviceMlInf1 = "ml_inf1" @@ -47465,10 +51798,119 @@ const ( // TargetDeviceQcs603 is a TargetDevice enum value TargetDeviceQcs603 = "qcs603" + // TargetDeviceSitaraAm57x is a TargetDevice enum value + TargetDeviceSitaraAm57x = "sitara_am57x" + // TargetDeviceAmbaCv22 is a TargetDevice enum value TargetDeviceAmbaCv22 = "amba_cv22" + + // TargetDeviceX86Win32 is a TargetDevice enum value + TargetDeviceX86Win32 = "x86_win32" + + // TargetDeviceX86Win64 is a TargetDevice enum value + TargetDeviceX86Win64 = "x86_win64" + + // TargetDeviceCoreml is a TargetDevice enum value + TargetDeviceCoreml = "coreml" ) +// TargetDevice_Values returns all elements of the TargetDevice enum +func TargetDevice_Values() []string { + return []string{ + TargetDeviceLambda, + TargetDeviceMlM4, + TargetDeviceMlM5, + TargetDeviceMlC4, + TargetDeviceMlC5, + TargetDeviceMlP2, + TargetDeviceMlP3, + TargetDeviceMlG4dn, + TargetDeviceMlInf1, + TargetDeviceJetsonTx1, + TargetDeviceJetsonTx2, + TargetDeviceJetsonNano, + TargetDeviceJetsonXavier, + TargetDeviceRasp3b, + TargetDeviceImx8qm, + TargetDeviceDeeplens, + TargetDeviceRk3399, + TargetDeviceRk3288, + TargetDeviceAisage, + TargetDeviceSbeC, + TargetDeviceQcs605, + TargetDeviceQcs603, + TargetDeviceSitaraAm57x, + TargetDeviceAmbaCv22, + TargetDeviceX86Win32, + TargetDeviceX86Win64, + TargetDeviceCoreml, + } +} + +const ( + // TargetPlatformAcceleratorIntelGraphics is a TargetPlatformAccelerator enum value + TargetPlatformAcceleratorIntelGraphics = "INTEL_GRAPHICS" + + // TargetPlatformAcceleratorMali is a TargetPlatformAccelerator enum value + TargetPlatformAcceleratorMali = "MALI" + + // TargetPlatformAcceleratorNvidia is a TargetPlatformAccelerator enum value + TargetPlatformAcceleratorNvidia = "NVIDIA" +) + +// TargetPlatformAccelerator_Values returns all elements of the TargetPlatformAccelerator enum +func TargetPlatformAccelerator_Values() []string { + return []string{ + TargetPlatformAcceleratorIntelGraphics, + TargetPlatformAcceleratorMali, + TargetPlatformAcceleratorNvidia, + } +} + +const ( + // TargetPlatformArchX8664 is a TargetPlatformArch enum value + TargetPlatformArchX8664 = "X86_64" + + // TargetPlatformArchX86 is a TargetPlatformArch enum value + TargetPlatformArchX86 = "X86" + + // TargetPlatformArchArm64 is a TargetPlatformArch enum value + TargetPlatformArchArm64 = "ARM64" + + // TargetPlatformArchArmEabi is a TargetPlatformArch enum value + TargetPlatformArchArmEabi = "ARM_EABI" + + // TargetPlatformArchArmEabihf is a TargetPlatformArch enum value + TargetPlatformArchArmEabihf = "ARM_EABIHF" +) + +// TargetPlatformArch_Values returns all elements of the TargetPlatformArch enum +func TargetPlatformArch_Values() []string { + return []string{ + TargetPlatformArchX8664, + TargetPlatformArchX86, + TargetPlatformArchArm64, + TargetPlatformArchArmEabi, + TargetPlatformArchArmEabihf, + } +} + +const ( + // TargetPlatformOsAndroid is a TargetPlatformOs enum value + TargetPlatformOsAndroid = "ANDROID" + + // TargetPlatformOsLinux is a TargetPlatformOs enum value + TargetPlatformOsLinux = "LINUX" +) + +// TargetPlatformOs_Values returns all elements of the TargetPlatformOs enum +func TargetPlatformOs_Values() []string { + return []string{ + TargetPlatformOsAndroid, + TargetPlatformOsLinux, + } +} + const ( // TrainingInputModePipe is a TrainingInputMode enum value TrainingInputModePipe = "Pipe" @@ -47477,6 +51919,14 @@ const ( TrainingInputModeFile = "File" ) +// TrainingInputMode_Values returns all elements of the TrainingInputMode enum +func TrainingInputMode_Values() []string { + return []string{ + TrainingInputModePipe, + TrainingInputModeFile, + } +} + const ( // TrainingInstanceTypeMlM4Xlarge is a TrainingInstanceType enum value TrainingInstanceTypeMlM4Xlarge = "ml.m4.xlarge" @@ -47576,8 +52026,67 @@ const ( // TrainingInstanceTypeMlC518xlarge is a TrainingInstanceType enum value TrainingInstanceTypeMlC518xlarge = "ml.c5.18xlarge" + + // TrainingInstanceTypeMlC5nXlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC5nXlarge = "ml.c5n.xlarge" + + // TrainingInstanceTypeMlC5n2xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC5n2xlarge = "ml.c5n.2xlarge" + + // TrainingInstanceTypeMlC5n4xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC5n4xlarge = "ml.c5n.4xlarge" + + // TrainingInstanceTypeMlC5n9xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC5n9xlarge = "ml.c5n.9xlarge" + + // TrainingInstanceTypeMlC5n18xlarge is a TrainingInstanceType enum value + TrainingInstanceTypeMlC5n18xlarge = "ml.c5n.18xlarge" ) +// TrainingInstanceType_Values returns all elements of the TrainingInstanceType enum +func TrainingInstanceType_Values() []string { + return []string{ + TrainingInstanceTypeMlM4Xlarge, + TrainingInstanceTypeMlM42xlarge, + TrainingInstanceTypeMlM44xlarge, + TrainingInstanceTypeMlM410xlarge, + TrainingInstanceTypeMlM416xlarge, + TrainingInstanceTypeMlG4dnXlarge, + TrainingInstanceTypeMlG4dn2xlarge, + TrainingInstanceTypeMlG4dn4xlarge, + TrainingInstanceTypeMlG4dn8xlarge, + TrainingInstanceTypeMlG4dn12xlarge, + TrainingInstanceTypeMlG4dn16xlarge, + TrainingInstanceTypeMlM5Large, + TrainingInstanceTypeMlM5Xlarge, + TrainingInstanceTypeMlM52xlarge, + TrainingInstanceTypeMlM54xlarge, + TrainingInstanceTypeMlM512xlarge, + TrainingInstanceTypeMlM524xlarge, + TrainingInstanceTypeMlC4Xlarge, + TrainingInstanceTypeMlC42xlarge, + TrainingInstanceTypeMlC44xlarge, + TrainingInstanceTypeMlC48xlarge, + TrainingInstanceTypeMlP2Xlarge, + TrainingInstanceTypeMlP28xlarge, + TrainingInstanceTypeMlP216xlarge, + TrainingInstanceTypeMlP32xlarge, + TrainingInstanceTypeMlP38xlarge, + TrainingInstanceTypeMlP316xlarge, + TrainingInstanceTypeMlP3dn24xlarge, + TrainingInstanceTypeMlC5Xlarge, + TrainingInstanceTypeMlC52xlarge, + TrainingInstanceTypeMlC54xlarge, + TrainingInstanceTypeMlC59xlarge, + TrainingInstanceTypeMlC518xlarge, + TrainingInstanceTypeMlC5nXlarge, + TrainingInstanceTypeMlC5n2xlarge, + TrainingInstanceTypeMlC5n4xlarge, + TrainingInstanceTypeMlC5n9xlarge, + TrainingInstanceTypeMlC5n18xlarge, + } +} + const ( // TrainingJobEarlyStoppingTypeOff is a TrainingJobEarlyStoppingType enum value TrainingJobEarlyStoppingTypeOff = "Off" @@ -47586,6 +52095,14 @@ const ( TrainingJobEarlyStoppingTypeAuto = "Auto" ) +// TrainingJobEarlyStoppingType_Values returns all elements of the TrainingJobEarlyStoppingType enum +func TrainingJobEarlyStoppingType_Values() []string { + return []string{ + TrainingJobEarlyStoppingTypeOff, + TrainingJobEarlyStoppingTypeAuto, + } +} + const ( // TrainingJobSortByOptionsName is a TrainingJobSortByOptions enum value TrainingJobSortByOptionsName = "Name" @@ -47600,6 +52117,16 @@ const ( TrainingJobSortByOptionsFinalObjectiveMetricValue = "FinalObjectiveMetricValue" ) +// TrainingJobSortByOptions_Values returns all elements of the TrainingJobSortByOptions enum +func TrainingJobSortByOptions_Values() []string { + return []string{ + TrainingJobSortByOptionsName, + TrainingJobSortByOptionsCreationTime, + TrainingJobSortByOptionsStatus, + TrainingJobSortByOptionsFinalObjectiveMetricValue, + } +} + const ( // TrainingJobStatusInProgress is a TrainingJobStatus enum value TrainingJobStatusInProgress = "InProgress" @@ -47617,6 +52144,17 @@ const ( TrainingJobStatusStopped = "Stopped" ) +// TrainingJobStatus_Values returns all elements of the TrainingJobStatus enum +func TrainingJobStatus_Values() []string { + return []string{ + TrainingJobStatusInProgress, + TrainingJobStatusCompleted, + TrainingJobStatusFailed, + TrainingJobStatusStopping, + TrainingJobStatusStopped, + } +} + const ( // TransformInstanceTypeMlM4Xlarge is a TransformInstanceType enum value TransformInstanceTypeMlM4Xlarge = "ml.m4.xlarge" @@ -47697,6 +52235,38 @@ const ( TransformInstanceTypeMlM524xlarge = "ml.m5.24xlarge" ) +// TransformInstanceType_Values returns all elements of the TransformInstanceType enum +func TransformInstanceType_Values() []string { + return []string{ + TransformInstanceTypeMlM4Xlarge, + TransformInstanceTypeMlM42xlarge, + TransformInstanceTypeMlM44xlarge, + TransformInstanceTypeMlM410xlarge, + TransformInstanceTypeMlM416xlarge, + TransformInstanceTypeMlC4Xlarge, + TransformInstanceTypeMlC42xlarge, + TransformInstanceTypeMlC44xlarge, + TransformInstanceTypeMlC48xlarge, + TransformInstanceTypeMlP2Xlarge, + TransformInstanceTypeMlP28xlarge, + TransformInstanceTypeMlP216xlarge, + TransformInstanceTypeMlP32xlarge, + TransformInstanceTypeMlP38xlarge, + TransformInstanceTypeMlP316xlarge, + TransformInstanceTypeMlC5Xlarge, + TransformInstanceTypeMlC52xlarge, + TransformInstanceTypeMlC54xlarge, + TransformInstanceTypeMlC59xlarge, + TransformInstanceTypeMlC518xlarge, + TransformInstanceTypeMlM5Large, + TransformInstanceTypeMlM5Xlarge, + TransformInstanceTypeMlM52xlarge, + TransformInstanceTypeMlM54xlarge, + TransformInstanceTypeMlM512xlarge, + TransformInstanceTypeMlM524xlarge, + } +} + const ( // TransformJobStatusInProgress is a TransformJobStatus enum value TransformJobStatusInProgress = "InProgress" @@ -47714,6 +52284,17 @@ const ( TransformJobStatusStopped = "Stopped" ) +// TransformJobStatus_Values returns all elements of the TransformJobStatus enum +func TransformJobStatus_Values() []string { + return []string{ + TransformJobStatusInProgress, + TransformJobStatusCompleted, + TransformJobStatusFailed, + TransformJobStatusStopping, + TransformJobStatusStopped, + } +} + const ( // TrialComponentPrimaryStatusInProgress is a TrialComponentPrimaryStatus enum value TrialComponentPrimaryStatusInProgress = "InProgress" @@ -47723,8 +52304,25 @@ const ( // TrialComponentPrimaryStatusFailed is a TrialComponentPrimaryStatus enum value TrialComponentPrimaryStatusFailed = "Failed" + + // TrialComponentPrimaryStatusStopping is a TrialComponentPrimaryStatus enum value + TrialComponentPrimaryStatusStopping = "Stopping" + + // TrialComponentPrimaryStatusStopped is a TrialComponentPrimaryStatus enum value + TrialComponentPrimaryStatusStopped = "Stopped" ) +// TrialComponentPrimaryStatus_Values returns all elements of the TrialComponentPrimaryStatus enum +func TrialComponentPrimaryStatus_Values() []string { + return []string{ + TrialComponentPrimaryStatusInProgress, + TrialComponentPrimaryStatusCompleted, + TrialComponentPrimaryStatusFailed, + TrialComponentPrimaryStatusStopping, + TrialComponentPrimaryStatusStopped, + } +} + const ( // UserProfileSortKeyCreationTime is a UserProfileSortKey enum value UserProfileSortKeyCreationTime = "CreationTime" @@ -47733,6 +52331,14 @@ const ( UserProfileSortKeyLastModifiedTime = "LastModifiedTime" ) +// UserProfileSortKey_Values returns all elements of the UserProfileSortKey enum +func UserProfileSortKey_Values() []string { + return []string{ + UserProfileSortKeyCreationTime, + UserProfileSortKeyLastModifiedTime, + } +} + const ( // UserProfileStatusDeleting is a UserProfileStatus enum value UserProfileStatusDeleting = "Deleting" @@ -47747,6 +52353,16 @@ const ( UserProfileStatusPending = "Pending" ) +// UserProfileStatus_Values returns all elements of the UserProfileStatus enum +func UserProfileStatus_Values() []string { + return []string{ + UserProfileStatusDeleting, + UserProfileStatusFailed, + UserProfileStatusInService, + UserProfileStatusPending, + } +} + const ( // VariantPropertyTypeDesiredInstanceCount is a VariantPropertyType enum value VariantPropertyTypeDesiredInstanceCount = "DesiredInstanceCount" @@ -47757,3 +52373,12 @@ const ( // VariantPropertyTypeDataCaptureConfig is a VariantPropertyType enum value VariantPropertyTypeDataCaptureConfig = "DataCaptureConfig" ) + +// VariantPropertyType_Values returns all elements of the VariantPropertyType enum +func VariantPropertyType_Values() []string { + return []string{ + VariantPropertyTypeDesiredInstanceCount, + VariantPropertyTypeDesiredWeight, + VariantPropertyTypeDataCaptureConfig, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go index 38b7a815f..0dda1b81f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/doc.go @@ -5,6 +5,12 @@ // // Provides APIs for creating and managing Amazon SageMaker resources. // +// Other Resources: +// +// * Amazon SageMaker Developer Guide (https://docs.aws.amazon.com/sagemaker/latest/dg/whatis.html#first-time-user) +// +// * Amazon Augmented AI Runtime API Reference (https://docs.aws.amazon.com/augmented-ai/2019-11-07/APIReference/Welcome.html) +// // See https://docs.aws.amazon.com/goto/WebAPI/sagemaker-2017-07-24 for more information on this service. // // See sagemaker package documentation for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go index c64131bdd..b2b649cfb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go index 5362b3503..b89f513d5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/api.go @@ -58,28 +58,28 @@ func (c *SecretsManager) CancelRotateSecretRequest(input *CancelRotateSecretInpu // CancelRotateSecret API operation for AWS Secrets Manager. // // Disables automatic scheduled rotation and cancels the rotation of a secret -// if one is currently in progress. +// if currently in progress. // // To re-enable scheduled rotation, call RotateSecret with AutomaticallyRotateAfterDays -// set to a value greater than 0. This will immediately rotate your secret and -// then enable the automatic schedule. +// set to a value greater than 0. This immediately rotates your secret and then +// enables the automatic schedule. // -// If you cancel a rotation that is in progress, it can leave the VersionStage -// labels in an unexpected state. Depending on what step of the rotation was -// in progress, you might need to remove the staging label AWSPENDING from the -// partially created version, specified by the VersionId response value. You -// should also evaluate the partially rotated new version to see if it should -// be deleted, which you can do by removing all staging labels from the new -// version's VersionStage field. +// If you cancel a rotation while in progress, it can leave the VersionStage +// labels in an unexpected state. Depending on the step of the rotation in progress, +// you might need to remove the staging label AWSPENDING from the partially +// created version, specified by the VersionId response value. You should also +// evaluate the partially rotated new version to see if it should be deleted, +// which you can do by removing all staging labels from the new version VersionStage +// field. // // To successfully start a rotation, the staging label AWSPENDING must be in // one of the following states: // -// * Not be attached to any version at all +// * Not attached to any version at all // // * Attached to the same version as the staging label AWSCURRENT // -// If the staging label AWSPENDING is attached to a different version than the +// If the staging label AWSPENDING attached to a different version than the // version with AWSCURRENT then the attempt to rotate fails. // // Minimum permissions @@ -205,7 +205,7 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // labels" that identify where the version is in the rotation cycle. The SecretVersionsToStages // field of the secret contains the mapping of staging labels to the active // versions of the secret. Versions without a staging label are considered deprecated -// and are not included in the list. +// and not included in the list. // // You provide the secret data to be encrypted by putting text in either the // SecretString parameter or binary data in the SecretBinary parameter, but @@ -213,20 +213,19 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // also creates an initial secret version and automatically attaches the staging // label AWSCURRENT to the new version. // -// * If you call an operation that needs to encrypt or decrypt the SecretString -// or SecretBinary for a secret in the same account as the calling user and -// that secret doesn't specify a AWS KMS encryption key, Secrets Manager -// uses the account's default AWS managed customer master key (CMK) with -// the alias aws/secretsmanager. If this key doesn't already exist in your -// account then Secrets Manager creates it for you automatically. All users -// and roles in the same AWS account automatically have access to use the -// default CMK. Note that if an Secrets Manager API call results in AWS having -// to create the account's AWS-managed CMK, it can result in a one-time significant -// delay in returning the result. -// -// * If the secret is in a different AWS account from the credentials calling -// an API that requires encryption or decryption of the secret value then -// you must create and use a custom AWS KMS CMK because you can't access +// * If you call an operation to encrypt or decrypt the SecretString or SecretBinary +// for a secret in the same account as the calling user and that secret doesn't +// specify a AWS KMS encryption key, Secrets Manager uses the account's default +// AWS managed customer master key (CMK) with the alias aws/secretsmanager. +// If this key doesn't already exist in your account then Secrets Manager +// creates it for you automatically. All users and roles in the same AWS +// account automatically have access to use the default CMK. Note that if +// an Secrets Manager API call results in AWS creating the account's AWS-managed +// CMK, it can result in a one-time significant delay in returning the result. +// +// * If the secret resides in a different AWS account from the credentials +// calling an API that requires encryption or decryption of the secret value +// then you must create and use a custom AWS KMS CMK because you can't access // the default CMK for the account using credentials from a different AWS // account. Store the ARN of the CMK in the secret when you create the secret // or when you update it by including it in the KMSKeyId. If you call an @@ -243,10 +242,10 @@ func (c *SecretsManager) CreateSecretRequest(input *CreateSecretInput) (req *req // // * kms:GenerateDataKey - needed only if you use a customer-managed AWS // KMS key to encrypt the secret. You do not need this permission to use -// the account's default AWS managed CMK for Secrets Manager. +// the account default AWS managed CMK for Secrets Manager. // // * kms:Decrypt - needed only if you use a customer-managed AWS KMS key -// to encrypt the secret. You do not need this permission to use the account's +// to encrypt the secret. You do not need this permission to use the account // default AWS managed CMK for Secrets Manager. // // * secretsmanager:TagResource - needed only if you include the Tags parameter. @@ -384,7 +383,7 @@ func (c *SecretsManager) DeleteResourcePolicyRequest(input *DeleteResourcePolicy // DeleteResourcePolicy API operation for AWS Secrets Manager. // -// Deletes the resource-based permission policy that's attached to the secret. +// Deletes the resource-based permission policy attached to the secret. // // Minimum permissions // @@ -628,7 +627,7 @@ func (c *SecretsManager) DescribeSecretRequest(input *DescribeSecretInput) (req // DescribeSecret API operation for AWS Secrets Manager. // // Retrieves the details of a secret. It does not include the encrypted fields. -// Only those fields that are populated with a value are returned in the response. +// Secrets Manager only returns fields populated with a value in the response. // // Minimum permissions // @@ -831,10 +830,10 @@ func (c *SecretsManager) GetResourcePolicyRequest(input *GetResourcePolicyInput) // GetResourcePolicy API operation for AWS Secrets Manager. // -// Retrieves the JSON text of the resource-based policy document that's attached -// to the specified secret. The JSON request string input and response output -// are shown formatted with white space and line breaks for better readability. -// Submit your input as a single line JSON string. +// Retrieves the JSON text of the resource-based policy document attached to +// the specified secret. The JSON request string input and response output displays +// formatted code with white space and line breaks for better readability. Submit +// your input as a single line JSON string. // // Minimum permissions // @@ -846,8 +845,7 @@ func (c *SecretsManager) GetResourcePolicyRequest(input *GetResourcePolicyInput) // // * To attach a resource policy to a secret, use PutResourcePolicy. // -// * To delete the resource-based policy that's attached to a secret, use -// DeleteResourcePolicy. +// * To delete the resource-based policy attached to a secret, use DeleteResourcePolicy. // // * To list all of the currently available secrets, use ListSecrets. // @@ -1077,7 +1075,7 @@ func (c *SecretsManager) ListSecretVersionIdsRequest(input *ListSecretVersionIds // // Always check the NextToken response parameter when calling any of the List* // operations. These operations can occasionally return an empty or shorter -// than expected list of results even when there are more results available. +// than expected list of results even when there more results become available. // When this happens, the NextToken response parameter contains a value to pass // to the next call to the same API to request the next part of the list. // @@ -1239,7 +1237,7 @@ func (c *SecretsManager) ListSecretsRequest(input *ListSecretsInput) (req *reque // // Always check the NextToken response parameter when calling any of the List* // operations. These operations can occasionally return an empty or shorter -// than expected list of results even when there are more results available. +// than expected list of results even when there more results become available. // When this happens, the NextToken response parameter contains a value to pass // to the next call to the same API to request the next part of the list. // @@ -1408,7 +1406,7 @@ func (c *SecretsManager) PutResourcePolicyRequest(input *PutResourcePolicyInput) // // Related operations // -// * To retrieve the resource policy that's attached to a secret, use GetResourcePolicy. +// * To retrieve the resource policy attached to a secret, use GetResourcePolicy. // // * To delete the resource-based policy that's attached to a secret, use // DeleteResourcePolicy. @@ -1448,6 +1446,9 @@ func (c *SecretsManager) PutResourcePolicyRequest(input *PutResourcePolicyInput) // Lambda function ARN configured and you didn't include such an ARN as a // parameter in this call. // +// * PublicPolicyException +// The resource policy did not prevent broad access to the secret. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/PutResourcePolicy func (c *SecretsManager) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) { req, out := c.PutResourcePolicyRequest(input) @@ -1542,20 +1543,19 @@ func (c *SecretsManager) PutSecretValueRequest(input *PutSecretValueInput) (req // However, if the secret data is different, then the operation fails because // you cannot modify an existing version; you can only create new ones. // -// * If you call an operation that needs to encrypt or decrypt the SecretString -// or SecretBinary for a secret in the same account as the calling user and -// that secret doesn't specify a AWS KMS encryption key, Secrets Manager -// uses the account's default AWS managed customer master key (CMK) with -// the alias aws/secretsmanager. If this key doesn't already exist in your -// account then Secrets Manager creates it for you automatically. All users -// and roles in the same AWS account automatically have access to use the -// default CMK. Note that if an Secrets Manager API call results in AWS having -// to create the account's AWS-managed CMK, it can result in a one-time significant -// delay in returning the result. -// -// * If the secret is in a different AWS account from the credentials calling -// an API that requires encryption or decryption of the secret value then -// you must create and use a custom AWS KMS CMK because you can't access +// * If you call an operation to encrypt or decrypt the SecretString or SecretBinary +// for a secret in the same account as the calling user and that secret doesn't +// specify a AWS KMS encryption key, Secrets Manager uses the account's default +// AWS managed customer master key (CMK) with the alias aws/secretsmanager. +// If this key doesn't already exist in your account then Secrets Manager +// creates it for you automatically. All users and roles in the same AWS +// account automatically have access to use the default CMK. Note that if +// an Secrets Manager API call results in AWS creating the account's AWS-managed +// CMK, it can result in a one-time significant delay in returning the result. +// +// * If the secret resides in a different AWS account from the credentials +// calling an API that requires encryption or decryption of the secret value +// then you must create and use a custom AWS KMS CMK because you can't access // the default CMK for the account using credentials from a different AWS // account. Store the ARN of the CMK in the secret when you create the secret // or when you update it by including it in the KMSKeyId. If you call an @@ -1821,7 +1821,7 @@ func (c *SecretsManager) RotateSecretRequest(input *RotateSecretInput) (req *req // (https://docs.aws.amazon.com/secretsmanager/latest/userguide/rotating-secrets.html) // in the AWS Secrets Manager User Guide. // -// Secrets Manager schedules the next rotation when the previous one is complete. +// Secrets Manager schedules the next rotation when the previous one completes. // Secrets Manager schedules the date by adding the rotation interval (number // of days) to the actual date of the last rotation. The service chooses the // hour within that 24-hour date window randomly. The minute is also chosen @@ -1836,10 +1836,9 @@ func (c *SecretsManager) RotateSecretRequest(input *RotateSecretInput) (req *req // // * The AWSPENDING staging label is not attached to any version of the secret. // -// If instead the AWSPENDING staging label is present but is not attached to -// the same version as AWSCURRENT then any later invocation of RotateSecret -// assumes that a previous rotation request is still in progress and returns -// an error. +// If the AWSPENDING staging label is present but not attached to the same version +// as AWSCURRENT then any later invocation of RotateSecret assumes that a previous +// rotation request is still in progress and returns an error. // // Minimum permissions // @@ -1973,14 +1972,14 @@ func (c *SecretsManager) TagResourceRequest(input *TagResourceInput) (req *reque // // * Tag keys and values are case sensitive. // -// * Do not use the aws: prefix in your tag names or values because it is -// reserved for AWS use. You can't edit or delete tag names or values with -// this prefix. Tags with this prefix do not count against your tags per -// secret limit. +// * Do not use the aws: prefix in your tag names or values because AWS reserves +// it for AWS use. You can't edit or delete tag names or values with this +// prefix. Tags with this prefix do not count against your tags per secret +// limit. // -// * If your tagging schema will be used across multiple services and resources, -// remember that other services might have restrictions on allowed characters. -// Generally allowed characters are: letters, spaces, and numbers representable +// * If you use your tagging schema across multiple services and resources, +// remember other services might have restrictions on allowed characters. +// Generally allowed characters: letters, spaces, and numbers representable // in UTF-8, plus the following special characters: + - = . _ : / @. // // If you use tags as part of your security strategy, then adding or removing @@ -2236,20 +2235,19 @@ func (c *SecretsManager) UpdateSecretRequest(input *UpdateSecretInput) (req *req // Secrets Manager automatically attaches the staging label AWSCURRENT to // the new version. // -// * If you call an operation that needs to encrypt or decrypt the SecretString -// or SecretBinary for a secret in the same account as the calling user and -// that secret doesn't specify a AWS KMS encryption key, Secrets Manager -// uses the account's default AWS managed customer master key (CMK) with -// the alias aws/secretsmanager. If this key doesn't already exist in your -// account then Secrets Manager creates it for you automatically. All users -// and roles in the same AWS account automatically have access to use the -// default CMK. Note that if an Secrets Manager API call results in AWS having -// to create the account's AWS-managed CMK, it can result in a one-time significant -// delay in returning the result. -// -// * If the secret is in a different AWS account from the credentials calling -// an API that requires encryption or decryption of the secret value then -// you must create and use a custom AWS KMS CMK because you can't access +// * If you call an operation to encrypt or decrypt the SecretString or SecretBinary +// for a secret in the same account as the calling user and that secret doesn't +// specify a AWS KMS encryption key, Secrets Manager uses the account's default +// AWS managed customer master key (CMK) with the alias aws/secretsmanager. +// If this key doesn't already exist in your account then Secrets Manager +// creates it for you automatically. All users and roles in the same AWS +// account automatically have access to use the default CMK. Note that if +// an Secrets Manager API call results in AWS creating the account's AWS-managed +// CMK, it can result in a one-time significant delay in returning the result. +// +// * If the secret resides in a different AWS account from the credentials +// calling an API that requires encryption or decryption of the secret value +// then you must create and use a custom AWS KMS CMK because you can't access // the default CMK for the account using credentials from a different AWS // account. Store the ARN of the CMK in the secret when you create the secret // or when you update it by including it in the KMSKeyId. If you call an @@ -2487,12 +2485,115 @@ func (c *SecretsManager) UpdateSecretVersionStageWithContext(ctx aws.Context, in return out, req.Send() } +const opValidateResourcePolicy = "ValidateResourcePolicy" + +// ValidateResourcePolicyRequest generates a "aws/request.Request" representing the +// client's request for the ValidateResourcePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ValidateResourcePolicy for more information on using the ValidateResourcePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ValidateResourcePolicyRequest method. +// req, resp := client.ValidateResourcePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ValidateResourcePolicy +func (c *SecretsManager) ValidateResourcePolicyRequest(input *ValidateResourcePolicyInput) (req *request.Request, output *ValidateResourcePolicyOutput) { + op := &request.Operation{ + Name: opValidateResourcePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ValidateResourcePolicyInput{} + } + + output = &ValidateResourcePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ValidateResourcePolicy API operation for AWS Secrets Manager. +// +// Validates the JSON text of the resource-based policy document attached to +// the specified secret. The JSON request string input and response output displays +// formatted code with white space and line breaks for better readability. Submit +// your input as a single line JSON string. A resource-based policy is optional. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Secrets Manager's +// API operation ValidateResourcePolicy for usage and error information. +// +// Returned Error Types: +// * MalformedPolicyDocumentException +// The policy document that you provided isn't valid. +// +// * ResourceNotFoundException +// We can't find the resource that you asked for. +// +// * InvalidParameterException +// You provided an invalid value for a parameter. +// +// * InternalServiceError +// An error occurred on the server side. +// +// * InvalidRequestException +// You provided a parameter value that is not valid for the current state of +// the resource. +// +// Possible causes: +// +// * You tried to perform the operation on a secret that's currently marked +// deleted. +// +// * You tried to enable rotation on a secret that doesn't already have a +// Lambda function ARN configured and you didn't include such an ARN as a +// parameter in this call. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17/ValidateResourcePolicy +func (c *SecretsManager) ValidateResourcePolicy(input *ValidateResourcePolicyInput) (*ValidateResourcePolicyOutput, error) { + req, out := c.ValidateResourcePolicyRequest(input) + return out, req.Send() +} + +// ValidateResourcePolicyWithContext is the same as ValidateResourcePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See ValidateResourcePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecretsManager) ValidateResourcePolicyWithContext(ctx aws.Context, input *ValidateResourcePolicyInput, opts ...request.Option) (*ValidateResourcePolicyOutput, error) { + req, out := c.ValidateResourcePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + type CancelRotateSecretInput struct { _ struct{} `type:"structure"` - // Specifies the secret for which you want to cancel a rotation request. You - // can specify either the Amazon Resource Name (ARN) or the friendly name of - // the secret. + // Specifies the secret to cancel a rotation request. You can specify either + // the Amazon Resource Name (ARN) or the friendly name of the secret. // // If you specify an ARN, we generally recommend that you specify a complete // ARN. You can specify a partial ARN too—for example, if you don’t include @@ -2504,7 +2605,12 @@ type CancelRotateSecretInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -2551,11 +2657,11 @@ type CancelRotateSecretOutput struct { // The friendly name of the secret for which rotation was canceled. Name *string `min:"1" type:"string"` - // The unique identifier of the version of the secret that was created during - // the rotation. This version might not be complete, and should be evaluated - // for possible deletion. At the very least, you should remove the VersionStage - // value AWSPENDING to enable this version to be deleted. Failing to clean up - // a cancelled rotation can block you from successfully starting future rotations. + // The unique identifier of the version of the secret created during the rotation. + // This version might not be complete, and should be evaluated for possible + // deletion. At the very least, you should remove the VersionStage value AWSPENDING + // to enable this version to be deleted. Failing to clean up a cancelled rotation + // can block you from successfully starting future rotations. VersionId *string `min:"32" type:"string"` } @@ -2599,7 +2705,7 @@ type CreateSecretInput struct { // for you and includes it as the value for this parameter in the request. If // you don't use the SDK and instead generate a raw HTTP request to the Secrets // Manager service endpoint, then you must generate a ClientRequestToken yourself - // for the new version and include that value in the request. + // for the new version and include the value in the request. // // This value helps ensure idempotency. Secrets Manager uses this value to prevent // the accidental creation of duplicate versions if there are failures and retries @@ -2609,9 +2715,9 @@ type CreateSecretInput struct { // * If the ClientRequestToken value isn't already associated with a version // of the secret then a new version of the secret is created. // - // * If a version with this value already exists and that version's SecretString + // * If a version with this value already exists and the version SecretString // and SecretBinary values are the same as those in the request, then the - // request is ignored (the operation is idempotent). + // request is ignored. // // * If a version with this value already exists and that version's SecretString // and SecretBinary values are different from those in the request then the @@ -2638,9 +2744,9 @@ type CreateSecretInput struct { // you automatically the first time it needs to encrypt a version's SecretString // or SecretBinary fields. // - // You can use the account's default CMK to encrypt and decrypt only if you - // call this operation using credentials from the same account that owns the - // secret. If the secret is in a different account, then you must create a custom + // You can use the account default CMK to encrypt and decrypt only if you call + // this operation using credentials from the same account that owns the secret. + // If the secret resides in a different account, then you must create a custom // CMK and specify the ARN in this field. KmsKeyId *string `type:"string"` @@ -2649,10 +2755,10 @@ type CreateSecretInput struct { // The secret name must be ASCII letters, digits, or the following characters // : /_+=.@- // - // Don't end your secret name with a hyphen followed by six characters. If you - // do so, you risk confusion and unexpected results when searching for a secret - // by partial ARN. This is because Secrets Manager automatically adds a hyphen - // and six random characters at the end of the ARN. + // Do not end your secret name with a hyphen followed by six characters. If + // you do so, you risk confusion and unexpected results when searching for a + // secret by partial ARN. Secrets Manager automatically adds a hyphen and six + // random characters at the end of the ARN. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -2688,7 +2794,7 @@ type CreateSecretInput struct { // JSON for Parameters (https://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#cli-using-param-json) // in the AWS CLI User Guide. For example: // - // [{"username":"bob"},{"password":"abc123xyz456"}] + // {"username":"bob","password":"abc123xyz456"} // // If your command-line tool or SDK requires quotation marks around the parameter, // you should use single quotes to avoid confusion with the double quotes required @@ -2729,14 +2835,14 @@ type CreateSecretInput struct { // // * Tag keys and values are case sensitive. // - // * Do not use the aws: prefix in your tag names or values because it is - // reserved for AWS use. You can't edit or delete tag names or values with - // this prefix. Tags with this prefix do not count against your tags per - // secret limit. + // * Do not use the aws: prefix in your tag names or values because AWS reserves + // it for AWS use. You can't edit or delete tag names or values with this + // prefix. Tags with this prefix do not count against your tags per secret + // limit. // - // * If your tagging schema will be used across multiple services and resources, - // remember that other services might have restrictions on allowed characters. - // Generally allowed characters are: letters, spaces, and numbers representable + // * If you use your tagging schema across multiple services and resources, + // remember other services might have restrictions on allowed characters. + // Generally allowed characters: letters, spaces, and numbers representable // in UTF-8, plus the following special characters: + - = . _ : / @. Tags []*Tag `type:"list"` } @@ -2838,8 +2944,8 @@ type CreateSecretOutput struct { // The friendly name of the secret that you just created. Name *string `min:"1" type:"string"` - // The unique identifier that's associated with the version of the secret you - // just created. + // The unique identifier associated with the version of the secret you just + // created. VersionId *string `min:"32" type:"string"` } @@ -2874,8 +2980,8 @@ func (s *CreateSecretOutput) SetVersionId(v string) *CreateSecretOutput { // Secrets Manager can't decrypt the protected secret text using the provided // KMS key. type DecryptionFailure struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2892,17 +2998,17 @@ func (s DecryptionFailure) GoString() string { func newErrorDecryptionFailure(v protocol.ResponseMetadata) error { return &DecryptionFailure{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DecryptionFailure) Code() string { +func (s *DecryptionFailure) Code() string { return "DecryptionFailure" } // Message returns the exception's message. -func (s DecryptionFailure) Message() string { +func (s *DecryptionFailure) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2910,22 +3016,22 @@ func (s DecryptionFailure) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DecryptionFailure) OrigErr() error { +func (s *DecryptionFailure) OrigErr() error { return nil } -func (s DecryptionFailure) Error() string { +func (s *DecryptionFailure) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DecryptionFailure) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DecryptionFailure) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DecryptionFailure) RequestID() string { - return s.respMetadata.RequestID +func (s *DecryptionFailure) RequestID() string { + return s.RespMetadata.RequestID } type DeleteResourcePolicyInput struct { @@ -2945,7 +3051,12 @@ type DeleteResourcePolicyInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -3055,7 +3166,12 @@ type DeleteSecretInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -3165,7 +3281,12 @@ type DescribeSecretInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -3209,6 +3330,9 @@ type DescribeSecretOutput struct { // The ARN of the secret. ARN *string `min:"20" type:"string"` + // The date that the secret was created. + CreatedDate *time.Time `type:"timestamp"` + // This value exists if the secret is scheduled for deletion. Some time after // the specified date and time, Secrets Manager deletes the secret and all of // its versions. @@ -3288,6 +3412,12 @@ func (s *DescribeSecretOutput) SetARN(v string) *DescribeSecretOutput { return s } +// SetCreatedDate sets the CreatedDate field's value. +func (s *DescribeSecretOutput) SetCreatedDate(v time.Time) *DescribeSecretOutput { + s.CreatedDate = &v + return s +} + // SetDeletedDate sets the DeletedDate field's value. func (s *DescribeSecretOutput) SetDeletedDate(v time.Time) *DescribeSecretOutput { s.DeletedDate = &v @@ -3371,8 +3501,8 @@ func (s *DescribeSecretOutput) SetVersionIdsToStages(v map[string][]*string) *De // and not in an invalid state. For more information, see How Key State Affects // Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html). type EncryptionFailure struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3389,17 +3519,17 @@ func (s EncryptionFailure) GoString() string { func newErrorEncryptionFailure(v protocol.ResponseMetadata) error { return &EncryptionFailure{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EncryptionFailure) Code() string { +func (s *EncryptionFailure) Code() string { return "EncryptionFailure" } // Message returns the exception's message. -func (s EncryptionFailure) Message() string { +func (s *EncryptionFailure) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3407,22 +3537,68 @@ func (s EncryptionFailure) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EncryptionFailure) OrigErr() error { +func (s *EncryptionFailure) OrigErr() error { return nil } -func (s EncryptionFailure) Error() string { +func (s *EncryptionFailure) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EncryptionFailure) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EncryptionFailure) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EncryptionFailure) RequestID() string { - return s.respMetadata.RequestID +func (s *EncryptionFailure) RequestID() string { + return s.RespMetadata.RequestID +} + +// Allows you to filter your list of secrets. +type Filter struct { + _ struct{} `type:"structure"` + + // Filters your list of secrets by a specific key. + Key *string `type:"string" enum:"FilterNameStringType"` + + // Filters your list of secrets by a specific value. + Values []*string `min:"1" type:"list"` +} + +// String returns the string representation +func (s Filter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Filter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Filter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Filter"} + if s.Values != nil && len(s.Values) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Values", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Filter) SetKey(v string) *Filter { + s.Key = &v + return s +} + +// SetValues sets the Values field's value. +func (s *Filter) SetValues(v []*string) *Filter { + s.Values = v + return s } type GetRandomPasswordInput struct { @@ -3584,7 +3760,12 @@ type GetResourcePolicyInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -3687,7 +3868,12 @@ type GetSecretValueInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -3865,8 +4051,8 @@ func (s *GetSecretValueOutput) SetVersionStages(v []*string) *GetSecretValueOutp // An error occurred on the server side. type InternalServiceError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3883,17 +4069,17 @@ func (s InternalServiceError) GoString() string { func newErrorInternalServiceError(v protocol.ResponseMetadata) error { return &InternalServiceError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceError) Code() string { +func (s *InternalServiceError) Code() string { return "InternalServiceError" } // Message returns the exception's message. -func (s InternalServiceError) Message() string { +func (s *InternalServiceError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3901,28 +4087,28 @@ func (s InternalServiceError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceError) OrigErr() error { +func (s *InternalServiceError) OrigErr() error { return nil } -func (s InternalServiceError) Error() string { +func (s *InternalServiceError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceError) RequestID() string { + return s.RespMetadata.RequestID } // You provided an invalid NextToken value. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3939,17 +4125,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3957,28 +4143,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // You provided an invalid value for a parameter. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3995,17 +4181,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4013,22 +4199,22 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // You provided a parameter value that is not valid for the current state of @@ -4043,8 +4229,8 @@ func (s InvalidParameterException) RequestID() string { // Lambda function ARN configured and you didn't include such an ARN as a // parameter in this call. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4061,17 +4247,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4079,29 +4265,29 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The request failed because it would exceed one of the Secrets Manager internal // limits. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4118,17 +4304,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4136,22 +4322,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListSecretVersionIdsInput struct { @@ -4162,7 +4348,7 @@ type ListSecretVersionIdsInput struct { // deprecated and are subject to deletion by Secrets Manager as needed. IncludeDeprecated *bool `type:"boolean"` - // (Optional) Limits the number of results that you want to include in the response. + // (Optional) Limits the number of results you want to include in the response. // If you don't include this parameter, it defaults to a value that's specific // to the operation. If additional items exist beyond the maximum you specify, // the NextToken response element is present and has a value (isn't null). Include @@ -4174,9 +4360,9 @@ type ListSecretVersionIdsInput struct { MaxResults *int64 `min:"1" type:"integer"` // (Optional) Use this parameter in a request if you receive a NextToken response - // in a previous request that indicates that there's more output available. - // In a subsequent call, set it to the value of the previous call's NextToken - // response to indicate where the output should continue from. + // in a previous request indicating there's more output available. In a subsequent + // call, set it to the value of the previous call NextToken response to indicate + // where the output should continue from. NextToken *string `min:"1" type:"string"` // The identifier for the secret containing the versions you want to list. You @@ -4193,7 +4379,12 @@ type ListSecretVersionIdsInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -4272,8 +4463,8 @@ type ListSecretVersionIdsOutput struct { Name *string `min:"1" type:"string"` // If present in the response, this value indicates that there's more output - // available than what's included in the current response. This can occur even - // when the response includes no values at all, such as when you ask for a filtered + // available than included in the current response. This can occur even when + // the response includes no values at all, such as when you ask for a filtered // view of a very long list. Use this value in the NextToken request parameter // in a subsequent call to the operation to continue processing and get the // next part of the output. You should repeat this until the NextToken response @@ -4321,7 +4512,10 @@ func (s *ListSecretVersionIdsOutput) SetVersions(v []*SecretVersionsListEntry) * type ListSecretsInput struct { _ struct{} `type:"structure"` - // (Optional) Limits the number of results that you want to include in the response. + // Lists the secret request filters. + Filters []*Filter `type:"list"` + + // (Optional) Limits the number of results you want to include in the response. // If you don't include this parameter, it defaults to a value that's specific // to the operation. If additional items exist beyond the maximum you specify, // the NextToken response element is present and has a value (isn't null). Include @@ -4333,10 +4527,13 @@ type ListSecretsInput struct { MaxResults *int64 `min:"1" type:"integer"` // (Optional) Use this parameter in a request if you receive a NextToken response - // in a previous request that indicates that there's more output available. - // In a subsequent call, set it to the value of the previous call's NextToken - // response to indicate where the output should continue from. + // in a previous request indicating there's more output available. In a subsequent + // call, set it to the value of the previous call NextToken response to indicate + // where the output should continue from. NextToken *string `min:"1" type:"string"` + + // Lists secrets in the requested order. + SortOrder *string `type:"string" enum:"SortOrderType"` } // String returns the string representation @@ -4358,6 +4555,16 @@ func (s *ListSecretsInput) Validate() error { if s.NextToken != nil && len(*s.NextToken) < 1 { invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4365,6 +4572,12 @@ func (s *ListSecretsInput) Validate() error { return nil } +// SetFilters sets the Filters field's value. +func (s *ListSecretsInput) SetFilters(v []*Filter) *ListSecretsInput { + s.Filters = v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *ListSecretsInput) SetMaxResults(v int64) *ListSecretsInput { s.MaxResults = &v @@ -4377,12 +4590,18 @@ func (s *ListSecretsInput) SetNextToken(v string) *ListSecretsInput { return s } +// SetSortOrder sets the SortOrder field's value. +func (s *ListSecretsInput) SetSortOrder(v string) *ListSecretsInput { + s.SortOrder = &v + return s +} + type ListSecretsOutput struct { _ struct{} `type:"structure"` // If present in the response, this value indicates that there's more output - // available than what's included in the current response. This can occur even - // when the response includes no values at all, such as when you ask for a filtered + // available than included in the current response. This can occur even when + // the response includes no values at all, such as when you ask for a filtered // view of a very long list. Use this value in the NextToken request parameter // in a subsequent call to the operation to continue processing and get the // next part of the output. You should repeat this until the NextToken response @@ -4417,8 +4636,8 @@ func (s *ListSecretsOutput) SetSecretList(v []*SecretListEntry) *ListSecretsOutp // The policy document that you provided isn't valid. type MalformedPolicyDocumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4435,17 +4654,17 @@ func (s MalformedPolicyDocumentException) GoString() string { func newErrorMalformedPolicyDocumentException(v protocol.ResponseMetadata) error { return &MalformedPolicyDocumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MalformedPolicyDocumentException) Code() string { +func (s *MalformedPolicyDocumentException) Code() string { return "MalformedPolicyDocumentException" } // Message returns the exception's message. -func (s MalformedPolicyDocumentException) Message() string { +func (s *MalformedPolicyDocumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4453,28 +4672,28 @@ func (s MalformedPolicyDocumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MalformedPolicyDocumentException) OrigErr() error { +func (s *MalformedPolicyDocumentException) OrigErr() error { return nil } -func (s MalformedPolicyDocumentException) Error() string { +func (s *MalformedPolicyDocumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MalformedPolicyDocumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MalformedPolicyDocumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MalformedPolicyDocumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *MalformedPolicyDocumentException) RequestID() string { + return s.RespMetadata.RequestID } // The request failed because you did not complete all the prerequisite steps. type PreconditionNotMetException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4491,17 +4710,17 @@ func (s PreconditionNotMetException) GoString() string { func newErrorPreconditionNotMetException(v protocol.ResponseMetadata) error { return &PreconditionNotMetException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PreconditionNotMetException) Code() string { +func (s *PreconditionNotMetException) Code() string { return "PreconditionNotMetException" } // Message returns the exception's message. -func (s PreconditionNotMetException) Message() string { +func (s *PreconditionNotMetException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4509,27 +4728,87 @@ func (s PreconditionNotMetException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PreconditionNotMetException) OrigErr() error { +func (s *PreconditionNotMetException) OrigErr() error { return nil } -func (s PreconditionNotMetException) Error() string { +func (s *PreconditionNotMetException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PreconditionNotMetException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PreconditionNotMetException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PreconditionNotMetException) RequestID() string { - return s.respMetadata.RequestID +func (s *PreconditionNotMetException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The resource policy did not prevent broad access to the secret. +type PublicPolicyException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s PublicPolicyException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicPolicyException) GoString() string { + return s.String() +} + +func newErrorPublicPolicyException(v protocol.ResponseMetadata) error { + return &PublicPolicyException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *PublicPolicyException) Code() string { + return "PublicPolicyException" +} + +// Message returns the exception's message. +func (s *PublicPolicyException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *PublicPolicyException) OrigErr() error { + return nil +} + +func (s *PublicPolicyException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *PublicPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *PublicPolicyException) RequestID() string { + return s.RespMetadata.RequestID } type PutResourcePolicyInput struct { _ struct{} `type:"structure"` + // Makes an optional API call to Zelkova to validate the Resource Policy to + // prevent broad access to your secret. + BlockPublicPolicy *bool `type:"boolean"` + // A JSON-formatted string that's constructed according to the grammar and syntax // for an AWS resource-based policy. The policy in the string identifies who // can access or manage this secret and its versions. For information on how @@ -4553,7 +4832,12 @@ type PutResourcePolicyInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -4591,6 +4875,12 @@ func (s *PutResourcePolicyInput) Validate() error { return nil } +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PutResourcePolicyInput) SetBlockPublicPolicy(v bool) *PutResourcePolicyInput { + s.BlockPublicPolicy = &v + return s +} + // SetResourcePolicy sets the ResourcePolicy field's value. func (s *PutResourcePolicyInput) SetResourcePolicy(v string) *PutResourcePolicyInput { s.ResourcePolicy = &v @@ -4606,11 +4896,11 @@ func (s *PutResourcePolicyInput) SetSecretId(v string) *PutResourcePolicyInput { type PutResourcePolicyOutput struct { _ struct{} `type:"structure"` - // The ARN of the secret that the resource-based policy was retrieved for. + // The ARN of the secret retrieved by the resource-based policy. ARN *string `min:"20" type:"string"` - // The friendly name of the secret that the resource-based policy was retrieved - // for. + // The friendly name of the secret that the retrieved by the resource-based + // policy. Name *string `min:"1" type:"string"` } @@ -4661,7 +4951,7 @@ type PutSecretValueInput struct { // or SecretBinary values are the same as those in the request then the request // is ignored (the operation is idempotent). // - // * If a version with this value already exists and that version's SecretString + // * If a version with this value already exists and the version of the SecretString // and SecretBinary values are different from those in the request then the // request fails because you cannot modify an existing secret version. You // can only create new versions to store new secret values. @@ -4696,7 +4986,12 @@ type PutSecretValueInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -4856,8 +5151,8 @@ func (s *PutSecretValueOutput) SetVersionStages(v []*string) *PutSecretValueOutp // A resource with the ID you requested already exists. type ResourceExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4874,17 +5169,17 @@ func (s ResourceExistsException) GoString() string { func newErrorResourceExistsException(v protocol.ResponseMetadata) error { return &ResourceExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceExistsException) Code() string { +func (s *ResourceExistsException) Code() string { return "ResourceExistsException" } // Message returns the exception's message. -func (s ResourceExistsException) Message() string { +func (s *ResourceExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4892,28 +5187,28 @@ func (s ResourceExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceExistsException) OrigErr() error { +func (s *ResourceExistsException) OrigErr() error { return nil } -func (s ResourceExistsException) Error() string { +func (s *ResourceExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceExistsException) RequestID() string { + return s.RespMetadata.RequestID } // We can't find the resource that you asked for. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4930,17 +5225,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4948,22 +5243,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type RestoreSecretInput struct { @@ -4983,7 +5278,12 @@ type RestoreSecretInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -5066,9 +5366,9 @@ type RotateSecretInput struct { // service endpoint, then you must generate a ClientRequestToken yourself for // new versions and include that value in the request. // - // You only need to specify your own value if you are implementing your own - // retry logic and want to ensure that a given secret is not created twice. - // We recommend that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) + // You only need to specify your own value if you implement your own retry logic + // and want to ensure that a given secret is not created twice. We recommend + // that you generate a UUID-type (https://wikipedia.org/wiki/Universally_unique_identifier) // value to ensure uniqueness within the specified secret. // // Secrets Manager uses this value to prevent the accidental creation of duplicate @@ -5095,7 +5395,12 @@ type RotateSecretInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -5259,7 +5564,10 @@ type SecretListEntry struct { // in the AWS Secrets Manager User Guide. ARN *string `min:"20" type:"string"` - // The date and time on which this secret was deleted. Not present on active + // The date and time when a secret was created. + CreatedDate *time.Time `type:"timestamp"` + + // The date and time the deletion of the secret occurred. Not present on active // secrets. The secret can be recovered until the number of days in the recovery // window has passed, as specified in the RecoveryWindowInDays parameter of // the DeleteSecret operation. @@ -5268,10 +5576,10 @@ type SecretListEntry struct { // The user-provided description of the secret. Description *string `type:"string"` - // The ARN or alias of the AWS KMS customer master key (CMK) that's used to - // encrypt the SecretString and SecretBinary fields in each version of the secret. - // If you don't provide a key, then Secrets Manager defaults to encrypting the - // secret fields with the default KMS CMK (the one named awssecretsmanager) + // The ARN or alias of the AWS KMS customer master key (CMK) used to encrypt + // the SecretString and SecretBinary fields in each version of the secret. If + // you don't provide a key, then Secrets Manager defaults to encrypting the + // secret fields with the default KMS CMK, the key named awssecretsmanager, // for this account. KmsKeyId *string `type:"string"` @@ -5297,24 +5605,24 @@ type SecretListEntry struct { // Indicates whether automatic, scheduled rotation is enabled for this secret. RotationEnabled *bool `type:"boolean"` - // The ARN of an AWS Lambda function that's invoked by Secrets Manager to rotate - // and expire the secret either automatically per the schedule or manually by - // a call to RotateSecret. + // The ARN of an AWS Lambda function invoked by Secrets Manager to rotate and + // expire the secret either automatically per the schedule or manually by a + // call to RotateSecret. RotationLambdaARN *string `type:"string"` // A structure that defines the rotation configuration for the secret. RotationRules *RotationRulesType `type:"structure"` // A list of all of the currently assigned SecretVersionStage staging labels - // and the SecretVersionId that each is attached to. Staging labels are used - // to keep track of the different versions during the rotation process. + // and the SecretVersionId attached to each one. Staging labels are used to + // keep track of the different versions during the rotation process. // // A version that does not have any SecretVersionStage is considered deprecated // and subject to deletion. Such versions are not included in this list. SecretVersionsToStages map[string][]*string `type:"map"` - // The list of user-defined tags that are associated with the secret. To add - // tags to a secret, use TagResource. To remove tags, use UntagResource. + // The list of user-defined tags associated with the secret. To add tags to + // a secret, use TagResource. To remove tags, use UntagResource. Tags []*Tag `type:"list"` } @@ -5334,6 +5642,12 @@ func (s *SecretListEntry) SetARN(v string) *SecretListEntry { return s } +// SetCreatedDate sets the CreatedDate field's value. +func (s *SecretListEntry) SetCreatedDate(v time.Time) *SecretListEntry { + s.CreatedDate = &v + return s +} + // SetDeletedDate sets the DeletedDate field's value. func (s *SecretListEntry) SetDeletedDate(v time.Time) *SecretListEntry { s.DeletedDate = &v @@ -5472,7 +5786,7 @@ type Tag struct { // The key identifier, or name, of the tag. Key *string `min:"1" type:"string"` - // The string value that's associated with the key of the tag. + // The string value associated with the key of the tag. Value *string `type:"string"` } @@ -5527,7 +5841,12 @@ type TagResourceInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -5627,7 +5946,12 @@ type UntagResourceInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -5776,7 +6100,12 @@ type UpdateSecretInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -5926,7 +6255,7 @@ func (s *UpdateSecretOutput) SetVersionId(v string) *UpdateSecretOutput { type UpdateSecretVersionStageInput struct { _ struct{} `type:"structure"` - // (Optional) The secret version ID that you want to add the staging label to. + // (Optional) The secret version ID that you want to add the staging label. // If you want to remove a label from a version, then do not specify this parameter. // // If the staging label is already attached to a different version of the secret, @@ -5941,9 +6270,9 @@ type UpdateSecretVersionStageInput struct { // the version ID does not match, then the operation fails. RemoveFromVersionId *string `min:"32" type:"string"` - // Specifies the secret with the version whose list of staging labels you want - // to modify. You can specify either the Amazon Resource Name (ARN) or the friendly - // name of the secret. + // Specifies the secret with the version with the list of staging labels you + // want to modify. You can specify either the Amazon Resource Name (ARN) or + // the friendly name of the secret. // // If you specify an ARN, we generally recommend that you specify a complete // ARN. You can specify a partial ARN too—for example, if you don’t include @@ -5955,7 +6284,12 @@ type UpdateSecretVersionStageInput struct { // a partial ARN, then those characters cause Secrets Manager to assume that // you’re specifying a complete ARN. This confusion can cause unexpected results. // To avoid this situation, we recommend that you don’t create secret names - // that end with a hyphen followed by six characters. + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. // // SecretId is a required field SecretId *string `min:"1" type:"string" required:"true"` @@ -6031,10 +6365,10 @@ func (s *UpdateSecretVersionStageInput) SetVersionStage(v string) *UpdateSecretV type UpdateSecretVersionStageOutput struct { _ struct{} `type:"structure"` - // The ARN of the secret with the staging label that was modified. + // The ARN of the secret with the modified staging label. ARN *string `min:"20" type:"string"` - // The friendly name of the secret with the staging label that was modified. + // The friendly name of the secret with the modified staging label. Name *string `min:"1" type:"string"` } @@ -6059,3 +6393,185 @@ func (s *UpdateSecretVersionStageOutput) SetName(v string) *UpdateSecretVersionS s.Name = &v return s } + +type ValidateResourcePolicyInput struct { + _ struct{} `type:"structure"` + + // Identifies the Resource Policy attached to the secret. + // + // ResourcePolicy is a required field + ResourcePolicy *string `min:"1" type:"string" required:"true"` + + // The identifier for the secret that you want to validate a resource policy. + // You can specify either the Amazon Resource Name (ARN) or the friendly name + // of the secret. + // + // If you specify an ARN, we generally recommend that you specify a complete + // ARN. You can specify a partial ARN too—for example, if you don’t include + // the final hyphen and six random characters that Secrets Manager adds at the + // end of the ARN when you created the secret. A partial ARN match can work + // as long as it uniquely matches only one secret. However, if your secret has + // a name that ends in a hyphen followed by six characters (before Secrets Manager + // adds the hyphen and six characters to the ARN) and you try to use that as + // a partial ARN, then those characters cause Secrets Manager to assume that + // you’re specifying a complete ARN. This confusion can cause unexpected results. + // To avoid this situation, we recommend that you don’t create secret names + // ending with a hyphen followed by six characters. + // + // If you specify an incomplete ARN without the random suffix, and instead provide + // the 'friendly name', you must not include the random suffix. If you do include + // the random suffix added by Secrets Manager, you receive either a ResourceNotFoundException + // or an AccessDeniedException error, depending on your permissions. + SecretId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ValidateResourcePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateResourcePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ValidateResourcePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ValidateResourcePolicyInput"} + if s.ResourcePolicy == nil { + invalidParams.Add(request.NewErrParamRequired("ResourcePolicy")) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.SecretId != nil && len(*s.SecretId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SecretId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *ValidateResourcePolicyInput) SetResourcePolicy(v string) *ValidateResourcePolicyInput { + s.ResourcePolicy = &v + return s +} + +// SetSecretId sets the SecretId field's value. +func (s *ValidateResourcePolicyInput) SetSecretId(v string) *ValidateResourcePolicyInput { + s.SecretId = &v + return s +} + +type ValidateResourcePolicyOutput struct { + _ struct{} `type:"structure"` + + // Returns a message stating that your Reource Policy passed validation. + PolicyValidationPassed *bool `type:"boolean"` + + // Returns an error message if your policy doesn't pass validatation. + ValidationErrors []*ValidationErrorsEntry `type:"list"` +} + +// String returns the string representation +func (s ValidateResourcePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidateResourcePolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyValidationPassed sets the PolicyValidationPassed field's value. +func (s *ValidateResourcePolicyOutput) SetPolicyValidationPassed(v bool) *ValidateResourcePolicyOutput { + s.PolicyValidationPassed = &v + return s +} + +// SetValidationErrors sets the ValidationErrors field's value. +func (s *ValidateResourcePolicyOutput) SetValidationErrors(v []*ValidationErrorsEntry) *ValidateResourcePolicyOutput { + s.ValidationErrors = v + return s +} + +// Displays errors that occurred during validation of the resource policy. +type ValidationErrorsEntry struct { + _ struct{} `type:"structure"` + + // Checks the name of the policy. + CheckName *string `min:"1" type:"string"` + + // Displays error messages if validation encounters problems during validation + // of the resource policy. + ErrorMessage *string `type:"string"` +} + +// String returns the string representation +func (s ValidationErrorsEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ValidationErrorsEntry) GoString() string { + return s.String() +} + +// SetCheckName sets the CheckName field's value. +func (s *ValidationErrorsEntry) SetCheckName(v string) *ValidationErrorsEntry { + s.CheckName = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *ValidationErrorsEntry) SetErrorMessage(v string) *ValidationErrorsEntry { + s.ErrorMessage = &v + return s +} + +const ( + // FilterNameStringTypeDescription is a FilterNameStringType enum value + FilterNameStringTypeDescription = "description" + + // FilterNameStringTypeName is a FilterNameStringType enum value + FilterNameStringTypeName = "name" + + // FilterNameStringTypeTagKey is a FilterNameStringType enum value + FilterNameStringTypeTagKey = "tag-key" + + // FilterNameStringTypeTagValue is a FilterNameStringType enum value + FilterNameStringTypeTagValue = "tag-value" + + // FilterNameStringTypeAll is a FilterNameStringType enum value + FilterNameStringTypeAll = "all" +) + +// FilterNameStringType_Values returns all elements of the FilterNameStringType enum +func FilterNameStringType_Values() []string { + return []string{ + FilterNameStringTypeDescription, + FilterNameStringTypeName, + FilterNameStringTypeTagKey, + FilterNameStringTypeTagValue, + FilterNameStringTypeAll, + } +} + +const ( + // SortOrderTypeAsc is a SortOrderType enum value + SortOrderTypeAsc = "asc" + + // SortOrderTypeDesc is a SortOrderType enum value + SortOrderTypeDesc = "desc" +) + +// SortOrderType_Values returns all elements of the SortOrderType enum +func SortOrderType_Values() []string { + return []string{ + SortOrderTypeAsc, + SortOrderTypeDesc, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go index b931ba87c..71cfce041 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/doc.go @@ -3,7 +3,7 @@ // Package secretsmanager provides the client and types for making API // requests to AWS Secrets Manager. // -// AWS Secrets Manager is a web service that enables you to store, manage, and +// AWS Secrets Manager provides a service to enable you to store, manage, and // retrieve, secrets. // // This guide provides descriptions of the Secrets Manager API. For more information @@ -14,25 +14,25 @@ // This version of the Secrets Manager API Reference documents the Secrets Manager // API version 2017-10-17. // -// As an alternative to using the API directly, you can use one of the AWS SDKs, -// which consist of libraries and sample code for various programming languages -// and platforms (such as Java, Ruby, .NET, iOS, and Android). The SDKs provide -// a convenient way to create programmatic access to AWS Secrets Manager. For -// example, the SDKs take care of cryptographically signing requests, managing -// errors, and retrying requests automatically. For more information about the -// AWS SDKs, including how to download and install them, see Tools for Amazon -// Web Services (http://aws.amazon.com/tools/). -// -// We recommend that you use the AWS SDKs to make programmatic API calls to -// Secrets Manager. However, you also can use the Secrets Manager HTTP Query -// API to make direct calls to the Secrets Manager web service. To learn more -// about the Secrets Manager HTTP Query API, see Making Query Requests (https://docs.aws.amazon.com/secretsmanager/latest/userguide/query-requests.html) +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms such as Java, Ruby, .NET, iOS, and Android. The SDKs provide a +// convenient way to create programmatic access to AWS Secrets Manager. For +// example, the SDKs provide cryptographically signing requests, managing errors, +// and retrying requests automatically. For more information about the AWS SDKs, +// including downloading and installing them, see Tools for Amazon Web Services +// (http://aws.amazon.com/tools/). +// +// We recommend you use the AWS SDKs to make programmatic API calls to Secrets +// Manager. However, you also can use the Secrets Manager HTTP Query API to +// make direct calls to the Secrets Manager web service. To learn more about +// the Secrets Manager HTTP Query API, see Making Query Requests (https://docs.aws.amazon.com/secretsmanager/latest/userguide/query-requests.html) // in the AWS Secrets Manager User Guide. // -// Secrets Manager supports GET and POST requests for all actions. That is, -// the API doesn't require you to use GET for some actions and POST for others. -// However, GET requests are subject to the limitation size of a URL. Therefore, -// for operations that require larger sizes, use a POST request. +// Secrets Manager API supports GET and POST requests for all actions, and doesn't +// require you to use GET for some actions and POST for others. However, GET +// requests are subject to the limitation size of a URL. Therefore, for operations +// that require larger sizes, use a POST request. // // Support and Feedback for AWS Secrets Manager // @@ -44,25 +44,24 @@ // How examples are presented // // The JSON that AWS Secrets Manager expects as your request parameters and -// that the service returns as a response to HTTP query requests are single, +// the service returns as a response to HTTP query requests contain single, // long strings without line breaks or white space formatting. The JSON shown -// in the examples is formatted with both line breaks and white space to improve -// readability. When example input parameters would also result in long strings -// that extend beyond the screen, we insert line breaks to enhance readability. -// You should always submit the input as a single JSON text string. +// in the examples displays the code formatted with both line breaks and white +// space to improve readability. When example input parameters can also cause +// long strings extending beyond the screen, you can insert line breaks to enhance +// readability. You should always submit the input as a single JSON text string. // // Logging API Requests // // AWS Secrets Manager supports AWS CloudTrail, a service that records AWS API // calls for your AWS account and delivers log files to an Amazon S3 bucket. // By using information that's collected by AWS CloudTrail, you can determine -// which requests were successfully made to Secrets Manager, who made the request, -// when it was made, and so on. For more about AWS Secrets Manager and its support +// the requests successfully made to Secrets Manager, who made the request, +// when it was made, and so on. For more about AWS Secrets Manager and support // for AWS CloudTrail, see Logging AWS Secrets Manager Events with AWS CloudTrail // (http://docs.aws.amazon.com/secretsmanager/latest/userguide/monitoring.html#monitoring_cloudtrail) // in the AWS Secrets Manager User Guide. To learn more about CloudTrail, including -// how to turn it on and find your log files, see the AWS CloudTrail User Guide -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// enabling it and find your log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). // // See https://docs.aws.amazon.com/goto/WebAPI/secretsmanager-2017-10-17 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go index df82716bb..ab90196bb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/errors.go @@ -77,6 +77,12 @@ const ( // The request failed because you did not complete all the prerequisite steps. ErrCodePreconditionNotMetException = "PreconditionNotMetException" + // ErrCodePublicPolicyException for service response error code + // "PublicPolicyException". + // + // The resource policy did not prevent broad access to the secret. + ErrCodePublicPolicyException = "PublicPolicyException" + // ErrCodeResourceExistsException for service response error code // "ResourceExistsException". // @@ -100,6 +106,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "LimitExceededException": newErrorLimitExceededException, "MalformedPolicyDocumentException": newErrorMalformedPolicyDocumentException, "PreconditionNotMetException": newErrorPreconditionNotMetException, + "PublicPolicyException": newErrorPublicPolicyException, "ResourceExistsException": newErrorResourceExistsException, "ResourceNotFoundException": newErrorResourceNotFoundException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go index e9c80e1d3..abb0bee7a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go index 118713f6b..59d5d8f34 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/api.go @@ -349,6 +349,28 @@ func (c *SecurityHub) BatchImportFindingsRequest(input *BatchImportFindingsInput // The maximum allowed size for a finding is 240 Kb. An error is returned for // any finding larger than 240 Kb. // +// After a finding is created, BatchImportFindings cannot be used to update +// the following finding fields and objects, which Security Hub customers use +// to manage their investigation workflow. +// +// * Confidence +// +// * Criticality +// +// * Note +// +// * RelatedFindings +// +// * Severity +// +// * Types +// +// * UserDefinedFields +// +// * VerificationState +// +// * Workflow +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -393,6 +415,129 @@ func (c *SecurityHub) BatchImportFindingsWithContext(ctx aws.Context, input *Bat return out, req.Send() } +const opBatchUpdateFindings = "BatchUpdateFindings" + +// BatchUpdateFindingsRequest generates a "aws/request.Request" representing the +// client's request for the BatchUpdateFindings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchUpdateFindings for more information on using the BatchUpdateFindings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchUpdateFindingsRequest method. +// req, resp := client.BatchUpdateFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchUpdateFindings +func (c *SecurityHub) BatchUpdateFindingsRequest(input *BatchUpdateFindingsInput) (req *request.Request, output *BatchUpdateFindingsOutput) { + op := &request.Operation{ + Name: opBatchUpdateFindings, + HTTPMethod: "PATCH", + HTTPPath: "/findings/batchupdate", + } + + if input == nil { + input = &BatchUpdateFindingsInput{} + } + + output = &BatchUpdateFindingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchUpdateFindings API operation for AWS SecurityHub. +// +// Used by Security Hub customers to update information about their investigation +// into a finding. Requested by master accounts or member accounts. Master accounts +// can update findings for their account and their member accounts. Member accounts +// can update findings for their account. +// +// Updates from BatchUpdateFindings do not affect the value of UpdatedAt for +// a finding. +// +// Master and member accounts can use BatchUpdateFindings to update the following +// finding fields and objects. +// +// * Confidence +// +// * Criticality +// +// * Note +// +// * RelatedFindings +// +// * Severity +// +// * Types +// +// * UserDefinedFields +// +// * VerificationState +// +// * Workflow +// +// You can configure IAM policies to restrict access to fields and field values. +// For example, you might not want member accounts to be able to suppress findings +// or change the finding severity. See Configuring access to BatchUpdateFindings +// (https://docs.aws.amazon.com/securityhub/latest/userguide/finding-update-batchupdatefindings.html#batchupdatefindings-configure-access) +// in the AWS Security Hub User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation BatchUpdateFindings for usage and error information. +// +// Returned Error Types: +// * InternalException +// Internal server error. +// +// * InvalidInputException +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * LimitExceededException +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * InvalidAccessException +// AWS Security Hub isn't enabled for the account used to make this request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/BatchUpdateFindings +func (c *SecurityHub) BatchUpdateFindings(input *BatchUpdateFindingsInput) (*BatchUpdateFindingsOutput, error) { + req, out := c.BatchUpdateFindingsRequest(input) + return out, req.Send() +} + +// BatchUpdateFindingsWithContext is the same as BatchUpdateFindings with the addition of +// the ability to pass a context and additional request options. +// +// See BatchUpdateFindings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) BatchUpdateFindingsWithContext(ctx aws.Context, input *BatchUpdateFindingsInput, opts ...request.Option) (*BatchUpdateFindingsOutput, error) { + req, out := c.BatchUpdateFindingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateActionTarget = "CreateActionTarget" // CreateActionTargetRequest generates a "aws/request.Request" representing the @@ -640,10 +785,10 @@ func (c *SecurityHub) CreateMembersRequest(input *CreateMembersInput) (req *requ // Security Hub and become member accounts in Security Hub. // // If the account owner accepts the invitation, the account becomes a member -// account in Security Hub, and a permission policy is added that permits the -// master account to view the findings generated in the member account. When -// Security Hub is enabled in the invited account, findings start to be sent -// to both the member and master accounts. +// account in Security Hub. A permissions policy is added that permits the master +// account to view the findings generated in the member account. When Security +// Hub is enabled in the invited account, findings start to be sent to both +// the member and master accounts. // // To remove the association between the master and member accounts, use the // DisassociateFromMasterAccount or DisassociateMembers operation. @@ -2277,7 +2422,7 @@ func (c *SecurityHub) EnableImportFindingsForProductRequest(input *EnableImportF // Enables the integration of a partner product with Security Hub. Integrated // products send findings to Security Hub. // -// When you enable a product integration, a permission policy that grants permission +// When you enable a product integration, a permissions policy that grants permission // for the product to send findings to Security Hub is applied. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2376,14 +2521,24 @@ func (c *SecurityHub) EnableSecurityHubRequest(input *EnableSecurityHubInput) (r // you specify in the request. // // When you enable Security Hub, you grant to Security Hub the permissions necessary -// to gather findings from AWS Config, Amazon GuardDuty, Amazon Inspector, and -// Amazon Macie. +// to gather findings from other services that are integrated with Security +// Hub. // // When you use the EnableSecurityHub operation to enable Security Hub, you -// also automatically enable the CIS AWS Foundations standard. You do not enable -// the Payment Card Industry Data Security Standard (PCI DSS) standard. To enable -// a standard, use the BatchEnableStandards operation. To disable a standard, -// use the BatchDisableStandards operation. +// also automatically enable the following standards. +// +// * CIS AWS Foundations +// +// * AWS Foundational Security Best Practices +// +// You do not enable the Payment Card Industry Data Security Standard (PCI DSS) +// standard. +// +// To not enable the automatically enabled standards, set EnableDefaultStandards +// to false. +// +// After you enable Security Hub, to enable a standard, use the BatchEnableStandards +// operation. To disable a standard, use the BatchDisableStandards operation. // // To learn more, see Setting Up AWS Security Hub (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-settingup.html) // in the AWS Security Hub User Guide. @@ -4195,6 +4350,8 @@ func (c *SecurityHub) UpdateFindingsRequest(input *UpdateFindingsInput) (req *re // UpdateFindings API operation for AWS SecurityHub. // +// UpdateFindings is deprecated. Instead of UpdateFindings, use BatchUpdateFindings. +// // Updates the Note and RecordState of the Security Hub-aggregated findings // that the filter attributes specify. Any member account that can view the // finding also sees the update to the finding. @@ -4340,6 +4497,100 @@ func (c *SecurityHub) UpdateInsightWithContext(ctx aws.Context, input *UpdateIns return out, req.Send() } +const opUpdateSecurityHubConfiguration = "UpdateSecurityHubConfiguration" + +// UpdateSecurityHubConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSecurityHubConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSecurityHubConfiguration for more information on using the UpdateSecurityHubConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSecurityHubConfigurationRequest method. +// req, resp := client.UpdateSecurityHubConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateSecurityHubConfiguration +func (c *SecurityHub) UpdateSecurityHubConfigurationRequest(input *UpdateSecurityHubConfigurationInput) (req *request.Request, output *UpdateSecurityHubConfigurationOutput) { + op := &request.Operation{ + Name: opUpdateSecurityHubConfiguration, + HTTPMethod: "PATCH", + HTTPPath: "/accounts", + } + + if input == nil { + input = &UpdateSecurityHubConfigurationInput{} + } + + output = &UpdateSecurityHubConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateSecurityHubConfiguration API operation for AWS SecurityHub. +// +// Updates configuration options for Security Hub. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SecurityHub's +// API operation UpdateSecurityHubConfiguration for usage and error information. +// +// Returned Error Types: +// * InternalException +// Internal server error. +// +// * InvalidInputException +// The request was rejected because you supplied an invalid or out-of-range +// value for an input parameter. +// +// * InvalidAccessException +// AWS Security Hub isn't enabled for the account used to make this request. +// +// * LimitExceededException +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error code describes the limit exceeded. +// +// * ResourceNotFoundException +// The request was rejected because we can't find the specified resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/UpdateSecurityHubConfiguration +func (c *SecurityHub) UpdateSecurityHubConfiguration(input *UpdateSecurityHubConfigurationInput) (*UpdateSecurityHubConfigurationOutput, error) { + req, out := c.UpdateSecurityHubConfigurationRequest(input) + return out, req.Send() +} + +// UpdateSecurityHubConfigurationWithContext is the same as UpdateSecurityHubConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSecurityHubConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SecurityHub) UpdateSecurityHubConfigurationWithContext(ctx aws.Context, input *UpdateSecurityHubConfigurationInput, opts ...request.Option) (*UpdateSecurityHubConfigurationOutput, error) { + req, out := c.UpdateSecurityHubConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateStandardsControl = "UpdateStandardsControl" // UpdateStandardsControlRequest generates a "aws/request.Request" representing the @@ -4499,8 +4750,8 @@ func (s AcceptInvitationOutput) GoString() string { // You don't have permission to perform the action specified in the request. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -4519,17 +4770,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4537,22 +4788,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // The details of an AWS account. @@ -4669,2540 +4920,10255 @@ func (s *AvailabilityZone) SetZoneName(v string) *AvailabilityZone { return s } -// A distribution configuration. -type AwsCloudFrontDistributionDetails struct { +// Contains information about settings for logging access for the stage. +type AwsApiGatewayAccessLogSettings struct { _ struct{} `type:"structure"` - // The domain name corresponding to the distribution. - DomainName *string `type:"string"` - - // The entity tag is a hash of the object. - ETag *string `type:"string"` - - // The date and time that the distribution was last modified. - LastModifiedTime *string `type:"string"` - - // A complex type that controls whether access logs are written for the distribution. - Logging *AwsCloudFrontDistributionLogging `type:"structure"` - - // A complex type that contains information about origins for this distribution. - Origins *AwsCloudFrontDistributionOrigins `type:"structure"` - - // Indicates the current status of the distribution. - Status *string `type:"string"` + // The ARN of the CloudWatch Logs log group that receives the access logs. + DestinationArn *string `type:"string"` - // A unique identifier that specifies the AWS WAF web ACL, if any, to associate - // with this distribution. - WebAclId *string `type:"string"` + // A single-line format of the access logs of data, as specified by selected + // $context variables. The format must include at least $context.requestId. + Format *string `type:"string"` } // String returns the string representation -func (s AwsCloudFrontDistributionDetails) String() string { +func (s AwsApiGatewayAccessLogSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCloudFrontDistributionDetails) GoString() string { +func (s AwsApiGatewayAccessLogSettings) GoString() string { return s.String() } -// SetDomainName sets the DomainName field's value. -func (s *AwsCloudFrontDistributionDetails) SetDomainName(v string) *AwsCloudFrontDistributionDetails { - s.DomainName = &v +// SetDestinationArn sets the DestinationArn field's value. +func (s *AwsApiGatewayAccessLogSettings) SetDestinationArn(v string) *AwsApiGatewayAccessLogSettings { + s.DestinationArn = &v return s } -// SetETag sets the ETag field's value. -func (s *AwsCloudFrontDistributionDetails) SetETag(v string) *AwsCloudFrontDistributionDetails { - s.ETag = &v +// SetFormat sets the Format field's value. +func (s *AwsApiGatewayAccessLogSettings) SetFormat(v string) *AwsApiGatewayAccessLogSettings { + s.Format = &v return s } -// SetLastModifiedTime sets the LastModifiedTime field's value. -func (s *AwsCloudFrontDistributionDetails) SetLastModifiedTime(v string) *AwsCloudFrontDistributionDetails { - s.LastModifiedTime = &v - return s +// Contains information about settings for canary deployment in the stage. +type AwsApiGatewayCanarySettings struct { + _ struct{} `type:"structure"` + + // The deployment identifier for the canary deployment. + DeploymentId *string `type:"string"` + + // The percentage of traffic that is diverted to a canary deployment. + PercentTraffic *float64 `type:"double"` + + // Stage variables that are overridden in the canary release deployment. The + // variables include new stage variables that are introduced in the canary. + // + // Each variable is represented as a string-to-string map between the stage + // variable name and the variable value. + StageVariableOverrides map[string]*string `type:"map"` + + // Indicates whether the canary deployment uses the stage cache. + UseStageCache *bool `type:"boolean"` } -// SetLogging sets the Logging field's value. -func (s *AwsCloudFrontDistributionDetails) SetLogging(v *AwsCloudFrontDistributionLogging) *AwsCloudFrontDistributionDetails { - s.Logging = v +// String returns the string representation +func (s AwsApiGatewayCanarySettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsApiGatewayCanarySettings) GoString() string { + return s.String() +} + +// SetDeploymentId sets the DeploymentId field's value. +func (s *AwsApiGatewayCanarySettings) SetDeploymentId(v string) *AwsApiGatewayCanarySettings { + s.DeploymentId = &v return s } -// SetOrigins sets the Origins field's value. -func (s *AwsCloudFrontDistributionDetails) SetOrigins(v *AwsCloudFrontDistributionOrigins) *AwsCloudFrontDistributionDetails { - s.Origins = v +// SetPercentTraffic sets the PercentTraffic field's value. +func (s *AwsApiGatewayCanarySettings) SetPercentTraffic(v float64) *AwsApiGatewayCanarySettings { + s.PercentTraffic = &v return s } -// SetStatus sets the Status field's value. -func (s *AwsCloudFrontDistributionDetails) SetStatus(v string) *AwsCloudFrontDistributionDetails { - s.Status = &v +// SetStageVariableOverrides sets the StageVariableOverrides field's value. +func (s *AwsApiGatewayCanarySettings) SetStageVariableOverrides(v map[string]*string) *AwsApiGatewayCanarySettings { + s.StageVariableOverrides = v return s } -// SetWebAclId sets the WebAclId field's value. -func (s *AwsCloudFrontDistributionDetails) SetWebAclId(v string) *AwsCloudFrontDistributionDetails { - s.WebAclId = &v +// SetUseStageCache sets the UseStageCache field's value. +func (s *AwsApiGatewayCanarySettings) SetUseStageCache(v bool) *AwsApiGatewayCanarySettings { + s.UseStageCache = &v return s } -// A complex type that controls whether access logs are written for the distribution. -type AwsCloudFrontDistributionLogging struct { +// Contains information about the endpoints for the API. +type AwsApiGatewayEndpointConfiguration struct { _ struct{} `type:"structure"` - // The Amazon S3 bucket to store the access logs in. - Bucket *string `type:"string"` - - // With this field, you can enable or disable the selected distribution. - Enabled *bool `type:"boolean"` - - // Specifies whether you want CloudFront to include cookies in access logs. - IncludeCookies *bool `type:"boolean"` - - // An optional string that you want CloudFront to use as a prefix to the access - // log filenames for this distribution. - Prefix *string `type:"string"` + // A list of endpoint types for the REST API. + // + // For an edge-optimized API, the endpoint type is EDGE. For a Regional API, + // the endpoint type is REGIONAL. For a private API, the endpoint type is PRIVATE. + Types []*string `type:"list"` } // String returns the string representation -func (s AwsCloudFrontDistributionLogging) String() string { +func (s AwsApiGatewayEndpointConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCloudFrontDistributionLogging) GoString() string { +func (s AwsApiGatewayEndpointConfiguration) GoString() string { return s.String() } -// SetBucket sets the Bucket field's value. -func (s *AwsCloudFrontDistributionLogging) SetBucket(v string) *AwsCloudFrontDistributionLogging { - s.Bucket = &v +// SetTypes sets the Types field's value. +func (s *AwsApiGatewayEndpointConfiguration) SetTypes(v []*string) *AwsApiGatewayEndpointConfiguration { + s.Types = v return s } -// SetEnabled sets the Enabled field's value. -func (s *AwsCloudFrontDistributionLogging) SetEnabled(v bool) *AwsCloudFrontDistributionLogging { - s.Enabled = &v - return s -} +// Defines settings for a method for the stage. +type AwsApiGatewayMethodSettings struct { + _ struct{} `type:"structure"` -// SetIncludeCookies sets the IncludeCookies field's value. -func (s *AwsCloudFrontDistributionLogging) SetIncludeCookies(v bool) *AwsCloudFrontDistributionLogging { - s.IncludeCookies = &v - return s -} + // Indicates whether the cached responses are encrypted. + CacheDataEncrypted *bool `type:"boolean"` -// SetPrefix sets the Prefix field's value. -func (s *AwsCloudFrontDistributionLogging) SetPrefix(v string) *AwsCloudFrontDistributionLogging { - s.Prefix = &v - return s -} + // Specifies the time to live (TTL), in seconds, for cached responses. The higher + // the TTL, the longer the response is cached. + CacheTtlInSeconds *int64 `type:"integer"` -// A complex type that describes the Amazon S3 bucket, HTTP server (for example, -// a web server), Amazon MediaStore, or other server from which CloudFront gets -// your files. -type AwsCloudFrontDistributionOriginItem struct { - _ struct{} `type:"structure"` + // Indicates whether responses are cached and returned for requests. For responses + // to be cached, a cache cluster must be enabled on the stage. + CachingEnabled *bool `type:"boolean"` - // Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want - // CloudFront to get objects for this origin. - DomainName *string `type:"string"` + // Indicates whether data trace logging is enabled for the method. Data trace + // logging affects the log entries that are pushed to CloudWatch Logs. + DataTraceEnabled *bool `type:"boolean"` - // A unique identifier for the origin or origin group. - Id *string `type:"string"` + // The HTTP method. You can use an asterisk (*) as a wildcard to apply method + // settings to multiple methods. + HttpMethod *string `type:"string"` - // An optional element that causes CloudFront to request your content from a - // directory in your Amazon S3 bucket or your custom origin. - OriginPath *string `type:"string"` + // The logging level for this method. The logging level affects the log entries + // that are pushed to CloudWatch Logs. + // + // If the logging level is ERROR, then the logs only include error-level entries. + // + // If the logging level is INFO, then the logs include both ERROR events and + // extra informational events. + // + // Valid values: OFF | ERROR | INFO + LoggingLevel *string `type:"string"` + + // Indicates whether CloudWatch metrics are enabled for the method. + MetricsEnabled *bool `type:"boolean"` + + // Indicates whether authorization is required for a cache invalidation request. + RequireAuthorizationForCacheControl *bool `type:"boolean"` + + // The resource path for this method. Forward slashes (/) are encoded as ~1 + // . The initial slash must include a forward slash. + // + // For example, the path value /resource/subresource must be encoded as /~1resource~1subresource. + // + // To specify the root path, use only a slash (/). You can use an asterisk (*) + // as a wildcard to apply method settings to multiple methods. + ResourcePath *string `type:"string"` + + // The throttling burst limit for the method. + ThrottlingBurstLimit *int64 `type:"integer"` + + // The throttling rate limit for the method. + ThrottlingRateLimit *float64 `type:"double"` + + // Indicates how to handle unauthorized requests for cache invalidation. + // + // Valid values: FAIL_WITH_403 | SUCCEED_WITH_RESPONSE_HEADER | SUCCEED_WITHOUT_RESPONSE_HEADER + UnauthorizedCacheControlHeaderStrategy *string `type:"string"` } // String returns the string representation -func (s AwsCloudFrontDistributionOriginItem) String() string { +func (s AwsApiGatewayMethodSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCloudFrontDistributionOriginItem) GoString() string { +func (s AwsApiGatewayMethodSettings) GoString() string { return s.String() } -// SetDomainName sets the DomainName field's value. -func (s *AwsCloudFrontDistributionOriginItem) SetDomainName(v string) *AwsCloudFrontDistributionOriginItem { - s.DomainName = &v +// SetCacheDataEncrypted sets the CacheDataEncrypted field's value. +func (s *AwsApiGatewayMethodSettings) SetCacheDataEncrypted(v bool) *AwsApiGatewayMethodSettings { + s.CacheDataEncrypted = &v return s } -// SetId sets the Id field's value. -func (s *AwsCloudFrontDistributionOriginItem) SetId(v string) *AwsCloudFrontDistributionOriginItem { - s.Id = &v +// SetCacheTtlInSeconds sets the CacheTtlInSeconds field's value. +func (s *AwsApiGatewayMethodSettings) SetCacheTtlInSeconds(v int64) *AwsApiGatewayMethodSettings { + s.CacheTtlInSeconds = &v return s } -// SetOriginPath sets the OriginPath field's value. -func (s *AwsCloudFrontDistributionOriginItem) SetOriginPath(v string) *AwsCloudFrontDistributionOriginItem { - s.OriginPath = &v +// SetCachingEnabled sets the CachingEnabled field's value. +func (s *AwsApiGatewayMethodSettings) SetCachingEnabled(v bool) *AwsApiGatewayMethodSettings { + s.CachingEnabled = &v return s } -// A complex type that contains information about origins and origin groups -// for this distribution. -type AwsCloudFrontDistributionOrigins struct { - _ struct{} `type:"structure"` +// SetDataTraceEnabled sets the DataTraceEnabled field's value. +func (s *AwsApiGatewayMethodSettings) SetDataTraceEnabled(v bool) *AwsApiGatewayMethodSettings { + s.DataTraceEnabled = &v + return s +} - // A complex type that contains origins or origin groups for this distribution. - Items []*AwsCloudFrontDistributionOriginItem `type:"list"` +// SetHttpMethod sets the HttpMethod field's value. +func (s *AwsApiGatewayMethodSettings) SetHttpMethod(v string) *AwsApiGatewayMethodSettings { + s.HttpMethod = &v + return s } -// String returns the string representation -func (s AwsCloudFrontDistributionOrigins) String() string { - return awsutil.Prettify(s) +// SetLoggingLevel sets the LoggingLevel field's value. +func (s *AwsApiGatewayMethodSettings) SetLoggingLevel(v string) *AwsApiGatewayMethodSettings { + s.LoggingLevel = &v + return s } -// GoString returns the string representation -func (s AwsCloudFrontDistributionOrigins) GoString() string { - return s.String() +// SetMetricsEnabled sets the MetricsEnabled field's value. +func (s *AwsApiGatewayMethodSettings) SetMetricsEnabled(v bool) *AwsApiGatewayMethodSettings { + s.MetricsEnabled = &v + return s } -// SetItems sets the Items field's value. -func (s *AwsCloudFrontDistributionOrigins) SetItems(v []*AwsCloudFrontDistributionOriginItem) *AwsCloudFrontDistributionOrigins { - s.Items = v +// SetRequireAuthorizationForCacheControl sets the RequireAuthorizationForCacheControl field's value. +func (s *AwsApiGatewayMethodSettings) SetRequireAuthorizationForCacheControl(v bool) *AwsApiGatewayMethodSettings { + s.RequireAuthorizationForCacheControl = &v return s } -// Information about an AWS CodeBuild project. -type AwsCodeBuildProjectDetails struct { +// SetResourcePath sets the ResourcePath field's value. +func (s *AwsApiGatewayMethodSettings) SetResourcePath(v string) *AwsApiGatewayMethodSettings { + s.ResourcePath = &v + return s +} + +// SetThrottlingBurstLimit sets the ThrottlingBurstLimit field's value. +func (s *AwsApiGatewayMethodSettings) SetThrottlingBurstLimit(v int64) *AwsApiGatewayMethodSettings { + s.ThrottlingBurstLimit = &v + return s +} + +// SetThrottlingRateLimit sets the ThrottlingRateLimit field's value. +func (s *AwsApiGatewayMethodSettings) SetThrottlingRateLimit(v float64) *AwsApiGatewayMethodSettings { + s.ThrottlingRateLimit = &v + return s +} + +// SetUnauthorizedCacheControlHeaderStrategy sets the UnauthorizedCacheControlHeaderStrategy field's value. +func (s *AwsApiGatewayMethodSettings) SetUnauthorizedCacheControlHeaderStrategy(v string) *AwsApiGatewayMethodSettings { + s.UnauthorizedCacheControlHeaderStrategy = &v + return s +} + +// contains information about a REST API in version 1 of Amazon API Gateway. +type AwsApiGatewayRestApiDetails struct { _ struct{} `type:"structure"` - // The AWS Key Management Service (AWS KMS) customer master key (CMK) used to - // encrypt the build output artifacts. + // The source of the API key for metering requests according to a usage plan. // - // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, - // the CMK alias (using the format alias/alias-name). - EncryptionKey *string `type:"string"` + // HEADER indicates whether to read the API key from the X-API-Key header of + // a request. + // + // AUTHORIZER indicates whether to read the API key from the UsageIdentifierKey + // from a custom authorizer. + ApiKeySource *string `type:"string"` - // Information about the build environment for this build project. - Environment *AwsCodeBuildProjectEnvironment `type:"structure"` + // The list of binary media types supported by the REST API. + BinaryMediaTypes []*string `type:"list"` - // The name of the build project. - Name *string `type:"string"` + // Indicates when the API was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedDate *string `type:"string"` - // The ARN of the IAM role that enables AWS CodeBuild to interact with dependent - // AWS services on behalf of the AWS account. - ServiceRole *string `type:"string"` + // A description of the REST API. + Description *string `type:"string"` - // Information about the build input source code for this build project. - Source *AwsCodeBuildProjectSource `type:"structure"` + // The endpoint configuration of the REST API. + EndpointConfiguration *AwsApiGatewayEndpointConfiguration `type:"structure"` - // Information about the VPC configuration that AWS CodeBuild accesses. - VpcConfig *AwsCodeBuildProjectVpcConfig `type:"structure"` + // The identifier of the REST API. + Id *string `type:"string"` + + // The minimum size in bytes of a payload before compression is enabled. + // + // If null, then compression is disabled. + // + // If 0, then all payloads are compressed. + MinimumCompressionSize *int64 `type:"integer"` + + // The name of the REST API. + Name *string `type:"string"` + + // The version identifier for the REST API. + Version *string `type:"string"` } // String returns the string representation -func (s AwsCodeBuildProjectDetails) String() string { +func (s AwsApiGatewayRestApiDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCodeBuildProjectDetails) GoString() string { +func (s AwsApiGatewayRestApiDetails) GoString() string { return s.String() } -// SetEncryptionKey sets the EncryptionKey field's value. -func (s *AwsCodeBuildProjectDetails) SetEncryptionKey(v string) *AwsCodeBuildProjectDetails { - s.EncryptionKey = &v +// SetApiKeySource sets the ApiKeySource field's value. +func (s *AwsApiGatewayRestApiDetails) SetApiKeySource(v string) *AwsApiGatewayRestApiDetails { + s.ApiKeySource = &v return s } -// SetEnvironment sets the Environment field's value. -func (s *AwsCodeBuildProjectDetails) SetEnvironment(v *AwsCodeBuildProjectEnvironment) *AwsCodeBuildProjectDetails { - s.Environment = v +// SetBinaryMediaTypes sets the BinaryMediaTypes field's value. +func (s *AwsApiGatewayRestApiDetails) SetBinaryMediaTypes(v []*string) *AwsApiGatewayRestApiDetails { + s.BinaryMediaTypes = v return s } -// SetName sets the Name field's value. -func (s *AwsCodeBuildProjectDetails) SetName(v string) *AwsCodeBuildProjectDetails { - s.Name = &v +// SetCreatedDate sets the CreatedDate field's value. +func (s *AwsApiGatewayRestApiDetails) SetCreatedDate(v string) *AwsApiGatewayRestApiDetails { + s.CreatedDate = &v return s } -// SetServiceRole sets the ServiceRole field's value. -func (s *AwsCodeBuildProjectDetails) SetServiceRole(v string) *AwsCodeBuildProjectDetails { - s.ServiceRole = &v +// SetDescription sets the Description field's value. +func (s *AwsApiGatewayRestApiDetails) SetDescription(v string) *AwsApiGatewayRestApiDetails { + s.Description = &v return s } -// SetSource sets the Source field's value. -func (s *AwsCodeBuildProjectDetails) SetSource(v *AwsCodeBuildProjectSource) *AwsCodeBuildProjectDetails { - s.Source = v +// SetEndpointConfiguration sets the EndpointConfiguration field's value. +func (s *AwsApiGatewayRestApiDetails) SetEndpointConfiguration(v *AwsApiGatewayEndpointConfiguration) *AwsApiGatewayRestApiDetails { + s.EndpointConfiguration = v return s } -// SetVpcConfig sets the VpcConfig field's value. -func (s *AwsCodeBuildProjectDetails) SetVpcConfig(v *AwsCodeBuildProjectVpcConfig) *AwsCodeBuildProjectDetails { - s.VpcConfig = v +// SetId sets the Id field's value. +func (s *AwsApiGatewayRestApiDetails) SetId(v string) *AwsApiGatewayRestApiDetails { + s.Id = &v return s } -// Information about the build environment for this build project. -type AwsCodeBuildProjectEnvironment struct { +// SetMinimumCompressionSize sets the MinimumCompressionSize field's value. +func (s *AwsApiGatewayRestApiDetails) SetMinimumCompressionSize(v int64) *AwsApiGatewayRestApiDetails { + s.MinimumCompressionSize = &v + return s +} + +// SetName sets the Name field's value. +func (s *AwsApiGatewayRestApiDetails) SetName(v string) *AwsApiGatewayRestApiDetails { + s.Name = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *AwsApiGatewayRestApiDetails) SetVersion(v string) *AwsApiGatewayRestApiDetails { + s.Version = &v + return s +} + +// Provides information about a version 1 Amazon API Gateway stage. +type AwsApiGatewayStageDetails struct { _ struct{} `type:"structure"` - // The certificate to use with this build project. - Certificate *string `type:"string"` + // Settings for logging access for the stage. + AccessLogSettings *AwsApiGatewayAccessLogSettings `type:"structure"` - // The type of credentials AWS CodeBuild uses to pull images in your build. - // - // Valid values: - // - // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This - // requires that you modify your ECR repository policy to trust the AWS CodeBuild - // service principal. + // Indicates whether a cache cluster is enabled for the stage. + CacheClusterEnabled *bool `type:"boolean"` + + // If a cache cluster is enabled, the size of the cache cluster. + CacheClusterSize *string `type:"string"` + + // If a cache cluster is enabled, the status of the cache cluster. + CacheClusterStatus *string `type:"string"` + + // Information about settings for canary deployment in the stage. + CanarySettings *AwsApiGatewayCanarySettings `type:"structure"` + + // The identifier of the client certificate for the stage. + ClientCertificateId *string `type:"string"` + + // Indicates when the stage was created. // - // * SERVICE_ROLE specifies that AWS CodeBuild uses your build project's - // service role. + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedDate *string `type:"string"` + + // The identifier of the deployment that the stage points to. + DeploymentId *string `type:"string"` + + // A description of the stage. + Description *string `type:"string"` + + // The version of the API documentation that is associated with the stage. + DocumentationVersion *string `type:"string"` + + // Indicates when the stage was most recently updated. // - // When you use a cross-account or private registry image, you must use SERVICE_ROLE - // credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD - // credentials. - ImagePullCredentialsType *string `type:"string"` + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastUpdatedDate *string `type:"string"` - // The credentials for access to a private registry. - RegistryCredential *AwsCodeBuildProjectEnvironmentRegistryCredential `type:"structure"` + // Defines the method settings for the stage. + MethodSettings []*AwsApiGatewayMethodSettings `type:"list"` - // The type of build environment to use for related builds. + // The name of the stage. + StageName *string `type:"string"` + + // Indicates whether active tracing with AWS X-Ray is enabled for the stage. + TracingEnabled *bool `type:"boolean"` + + // A map that defines the stage variables for the stage. // - // The environment type ARM_CONTAINER is available only in regions US East (N. - // Virginia), US East (Ohio), US West (Oregon), Europe (Ireland), Asia Pacific - // (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and Europe (Frankfurt). + // Variable names can have alphanumeric and underscore characters. // - // The environment type LINUX_CONTAINER with compute type build.general1.2xlarge - // is available only in regions US East (N. Virginia), US East (N. Virginia), - // US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe - // (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), - // Asia Pacific (Sydney), China (Beijing), and China (Ningxia). + // Variable values can contain the following characters: // - // The environment type LINUX_GPU_CONTAINER is available only in regions US - // East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), - // Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), - // Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney) , China - // (Beijing), and China (Ningxia). + // * Uppercase and lowercase letters // - // Valid values: WINDOWS_CONTAINER | LINUX_CONTAINER | LINUX_GPU_CONTAINER | - // ARM_CONTAINER - Type *string `type:"string"` + // * Numbers + // + // * Special characters -._~:/?#&=, + Variables map[string]*string `type:"map"` + + // The ARN of the web ACL associated with the stage. + WebAclArn *string `type:"string"` } // String returns the string representation -func (s AwsCodeBuildProjectEnvironment) String() string { +func (s AwsApiGatewayStageDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCodeBuildProjectEnvironment) GoString() string { +func (s AwsApiGatewayStageDetails) GoString() string { return s.String() } -// SetCertificate sets the Certificate field's value. -func (s *AwsCodeBuildProjectEnvironment) SetCertificate(v string) *AwsCodeBuildProjectEnvironment { - s.Certificate = &v +// SetAccessLogSettings sets the AccessLogSettings field's value. +func (s *AwsApiGatewayStageDetails) SetAccessLogSettings(v *AwsApiGatewayAccessLogSettings) *AwsApiGatewayStageDetails { + s.AccessLogSettings = v return s } -// SetImagePullCredentialsType sets the ImagePullCredentialsType field's value. -func (s *AwsCodeBuildProjectEnvironment) SetImagePullCredentialsType(v string) *AwsCodeBuildProjectEnvironment { - s.ImagePullCredentialsType = &v +// SetCacheClusterEnabled sets the CacheClusterEnabled field's value. +func (s *AwsApiGatewayStageDetails) SetCacheClusterEnabled(v bool) *AwsApiGatewayStageDetails { + s.CacheClusterEnabled = &v return s } -// SetRegistryCredential sets the RegistryCredential field's value. -func (s *AwsCodeBuildProjectEnvironment) SetRegistryCredential(v *AwsCodeBuildProjectEnvironmentRegistryCredential) *AwsCodeBuildProjectEnvironment { - s.RegistryCredential = v +// SetCacheClusterSize sets the CacheClusterSize field's value. +func (s *AwsApiGatewayStageDetails) SetCacheClusterSize(v string) *AwsApiGatewayStageDetails { + s.CacheClusterSize = &v return s } -// SetType sets the Type field's value. -func (s *AwsCodeBuildProjectEnvironment) SetType(v string) *AwsCodeBuildProjectEnvironment { - s.Type = &v +// SetCacheClusterStatus sets the CacheClusterStatus field's value. +func (s *AwsApiGatewayStageDetails) SetCacheClusterStatus(v string) *AwsApiGatewayStageDetails { + s.CacheClusterStatus = &v return s } -// The credentials for access to a private registry. -type AwsCodeBuildProjectEnvironmentRegistryCredential struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets - // Manager. - // - // The credential can use the name of the credentials only if they exist in - // your current AWS Region. - Credential *string `type:"string"` +// SetCanarySettings sets the CanarySettings field's value. +func (s *AwsApiGatewayStageDetails) SetCanarySettings(v *AwsApiGatewayCanarySettings) *AwsApiGatewayStageDetails { + s.CanarySettings = v + return s +} - // The service that created the credentials to access a private Docker registry. - // - // The valid value,SECRETS_MANAGER, is for AWS Secrets Manager. - CredentialProvider *string `type:"string"` +// SetClientCertificateId sets the ClientCertificateId field's value. +func (s *AwsApiGatewayStageDetails) SetClientCertificateId(v string) *AwsApiGatewayStageDetails { + s.ClientCertificateId = &v + return s } -// String returns the string representation -func (s AwsCodeBuildProjectEnvironmentRegistryCredential) String() string { - return awsutil.Prettify(s) +// SetCreatedDate sets the CreatedDate field's value. +func (s *AwsApiGatewayStageDetails) SetCreatedDate(v string) *AwsApiGatewayStageDetails { + s.CreatedDate = &v + return s } -// GoString returns the string representation -func (s AwsCodeBuildProjectEnvironmentRegistryCredential) GoString() string { - return s.String() +// SetDeploymentId sets the DeploymentId field's value. +func (s *AwsApiGatewayStageDetails) SetDeploymentId(v string) *AwsApiGatewayStageDetails { + s.DeploymentId = &v + return s } -// SetCredential sets the Credential field's value. -func (s *AwsCodeBuildProjectEnvironmentRegistryCredential) SetCredential(v string) *AwsCodeBuildProjectEnvironmentRegistryCredential { - s.Credential = &v +// SetDescription sets the Description field's value. +func (s *AwsApiGatewayStageDetails) SetDescription(v string) *AwsApiGatewayStageDetails { + s.Description = &v return s } -// SetCredentialProvider sets the CredentialProvider field's value. -func (s *AwsCodeBuildProjectEnvironmentRegistryCredential) SetCredentialProvider(v string) *AwsCodeBuildProjectEnvironmentRegistryCredential { - s.CredentialProvider = &v +// SetDocumentationVersion sets the DocumentationVersion field's value. +func (s *AwsApiGatewayStageDetails) SetDocumentationVersion(v string) *AwsApiGatewayStageDetails { + s.DocumentationVersion = &v return s } -// Information about the build input source code for this build project. -type AwsCodeBuildProjectSource struct { - _ struct{} `type:"structure"` +// SetLastUpdatedDate sets the LastUpdatedDate field's value. +func (s *AwsApiGatewayStageDetails) SetLastUpdatedDate(v string) *AwsApiGatewayStageDetails { + s.LastUpdatedDate = &v + return s +} - // Information about the Git clone depth for the build project. - GitCloneDepth *int64 `type:"integer"` +// SetMethodSettings sets the MethodSettings field's value. +func (s *AwsApiGatewayStageDetails) SetMethodSettings(v []*AwsApiGatewayMethodSettings) *AwsApiGatewayStageDetails { + s.MethodSettings = v + return s +} - // Whether to ignore SSL warnings while connecting to the project source code. - InsecureSsl *bool `type:"boolean"` +// SetStageName sets the StageName field's value. +func (s *AwsApiGatewayStageDetails) SetStageName(v string) *AwsApiGatewayStageDetails { + s.StageName = &v + return s +} - // Information about the location of the source code to be built. - // - // Valid values include: - // - // * For source code settings that are specified in the source action of - // a pipeline in AWS CodePipeline, location should not be specified. If it - // is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline - // uses the settings in a pipeline's source action instead of this value. - // - // * For source code in an AWS CodeCommit repository, the HTTPS clone URL - // to the repository that contains the source code and the buildspec file - // (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name - // ). - // - // * For source code in an S3 input bucket, one of the following. The path - // to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip). - // The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/). - // - // * For source code in a GitHub repository, the HTTPS clone URL to the repository - // that contains the source and the buildspec file. - // - // * For source code in a Bitbucket repository, the HTTPS clone URL to the - // repository that contains the source and the buildspec file. - Location *string `type:"string"` +// SetTracingEnabled sets the TracingEnabled field's value. +func (s *AwsApiGatewayStageDetails) SetTracingEnabled(v bool) *AwsApiGatewayStageDetails { + s.TracingEnabled = &v + return s +} - // The type of repository that contains the source code to be built. Valid values - // are: - // - // * BITBUCKET - The source code is in a Bitbucket repository. +// SetVariables sets the Variables field's value. +func (s *AwsApiGatewayStageDetails) SetVariables(v map[string]*string) *AwsApiGatewayStageDetails { + s.Variables = v + return s +} + +// SetWebAclArn sets the WebAclArn field's value. +func (s *AwsApiGatewayStageDetails) SetWebAclArn(v string) *AwsApiGatewayStageDetails { + s.WebAclArn = &v + return s +} + +// Contains information about a version 2 API in Amazon API Gateway. +type AwsApiGatewayV2ApiDetails struct { + _ struct{} `type:"structure"` + + // The URI of the API. // - // * CODECOMMIT - The source code is in an AWS CodeCommit repository. + // Uses the format .execute-api..amazonaws.com // - // * CODEPIPELINE - The source code settings are specified in the source - // action of a pipeline in AWS CodePipeline. + // The stage name is typically appended to the URI to form a complete path to + // a deployed API stage. + ApiEndpoint *string `type:"string"` + + // The identifier of the API. + ApiId *string `type:"string"` + + // An API key selection expression. Supported only for WebSocket APIs. + ApiKeySelectionExpression *string `type:"string"` + + // A cross-origin resource sharing (CORS) configuration. Supported only for + // HTTP APIs. + CorsConfiguration *AwsCorsConfiguration `type:"structure"` + + // Indicates when the API was created. // - // * GITHUB - The source code is in a GitHub repository. + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedDate *string `type:"string"` + + // A description of the API. + Description *string `type:"string"` + + // The name of the API. + Name *string `type:"string"` + + // The API protocol for the API. // - // * GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise repository. + // Valid values: WEBSOCKET | HTTP + ProtocolType *string `type:"string"` + + // The route selection expression for the API. // - // * NO_SOURCE - The project does not have input source code. + // For HTTP APIs, must be ${request.method} ${request.path}. This is the default + // value for HTTP APIs. // - // * S3 - The source code is in an S3 input bucket. - Type *string `type:"string"` + // For WebSocket APIs, there is no default value. + RouteSelectionExpression *string `type:"string"` + + // The version identifier for the API. + Version *string `type:"string"` } // String returns the string representation -func (s AwsCodeBuildProjectSource) String() string { +func (s AwsApiGatewayV2ApiDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCodeBuildProjectSource) GoString() string { +func (s AwsApiGatewayV2ApiDetails) GoString() string { return s.String() } -// SetGitCloneDepth sets the GitCloneDepth field's value. -func (s *AwsCodeBuildProjectSource) SetGitCloneDepth(v int64) *AwsCodeBuildProjectSource { - s.GitCloneDepth = &v +// SetApiEndpoint sets the ApiEndpoint field's value. +func (s *AwsApiGatewayV2ApiDetails) SetApiEndpoint(v string) *AwsApiGatewayV2ApiDetails { + s.ApiEndpoint = &v return s } -// SetInsecureSsl sets the InsecureSsl field's value. -func (s *AwsCodeBuildProjectSource) SetInsecureSsl(v bool) *AwsCodeBuildProjectSource { - s.InsecureSsl = &v +// SetApiId sets the ApiId field's value. +func (s *AwsApiGatewayV2ApiDetails) SetApiId(v string) *AwsApiGatewayV2ApiDetails { + s.ApiId = &v return s } -// SetLocation sets the Location field's value. -func (s *AwsCodeBuildProjectSource) SetLocation(v string) *AwsCodeBuildProjectSource { - s.Location = &v +// SetApiKeySelectionExpression sets the ApiKeySelectionExpression field's value. +func (s *AwsApiGatewayV2ApiDetails) SetApiKeySelectionExpression(v string) *AwsApiGatewayV2ApiDetails { + s.ApiKeySelectionExpression = &v return s } -// SetType sets the Type field's value. -func (s *AwsCodeBuildProjectSource) SetType(v string) *AwsCodeBuildProjectSource { - s.Type = &v +// SetCorsConfiguration sets the CorsConfiguration field's value. +func (s *AwsApiGatewayV2ApiDetails) SetCorsConfiguration(v *AwsCorsConfiguration) *AwsApiGatewayV2ApiDetails { + s.CorsConfiguration = v return s } -// Information about the VPC configuration that AWS CodeBuild accesses. -type AwsCodeBuildProjectVpcConfig struct { +// SetCreatedDate sets the CreatedDate field's value. +func (s *AwsApiGatewayV2ApiDetails) SetCreatedDate(v string) *AwsApiGatewayV2ApiDetails { + s.CreatedDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AwsApiGatewayV2ApiDetails) SetDescription(v string) *AwsApiGatewayV2ApiDetails { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *AwsApiGatewayV2ApiDetails) SetName(v string) *AwsApiGatewayV2ApiDetails { + s.Name = &v + return s +} + +// SetProtocolType sets the ProtocolType field's value. +func (s *AwsApiGatewayV2ApiDetails) SetProtocolType(v string) *AwsApiGatewayV2ApiDetails { + s.ProtocolType = &v + return s +} + +// SetRouteSelectionExpression sets the RouteSelectionExpression field's value. +func (s *AwsApiGatewayV2ApiDetails) SetRouteSelectionExpression(v string) *AwsApiGatewayV2ApiDetails { + s.RouteSelectionExpression = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *AwsApiGatewayV2ApiDetails) SetVersion(v string) *AwsApiGatewayV2ApiDetails { + s.Version = &v + return s +} + +// Contains route settings for a stage. +type AwsApiGatewayV2RouteSettings struct { _ struct{} `type:"structure"` - // A list of one or more security group IDs in your Amazon VPC. - SecurityGroupIds []*string `type:"list"` + // Indicates whether data trace logging is enabled. Data trace logging affects + // the log entries that are pushed to CloudWatch Logs. Supported only for WebSocket + // APIs. + DataTraceEnabled *bool `type:"boolean"` - // A list of one or more subnet IDs in your Amazon VPC. - Subnets []*string `type:"list"` + // Indicates whether detailed metrics are enabled. + DetailedMetricsEnabled *bool `type:"boolean"` - // The ID of the VPC. - VpcId *string `type:"string"` + // The logging level. The logging level affects the log entries that are pushed + // to CloudWatch Logs. Supported only for WebSocket APIs. + // + // If the logging level is ERROR, then the logs only include error-level entries. + // + // If the logging level is INFO, then the logs include both ERROR events and + // extra informational events. + // + // Valid values: OFF | ERROR | INFO + LoggingLevel *string `type:"string"` + + // The throttling burst limit. + ThrottlingBurstLimit *int64 `type:"integer"` + + // The throttling rate limit. + ThrottlingRateLimit *float64 `type:"double"` } // String returns the string representation -func (s AwsCodeBuildProjectVpcConfig) String() string { +func (s AwsApiGatewayV2RouteSettings) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsCodeBuildProjectVpcConfig) GoString() string { +func (s AwsApiGatewayV2RouteSettings) GoString() string { return s.String() } -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *AwsCodeBuildProjectVpcConfig) SetSecurityGroupIds(v []*string) *AwsCodeBuildProjectVpcConfig { - s.SecurityGroupIds = v +// SetDataTraceEnabled sets the DataTraceEnabled field's value. +func (s *AwsApiGatewayV2RouteSettings) SetDataTraceEnabled(v bool) *AwsApiGatewayV2RouteSettings { + s.DataTraceEnabled = &v return s } -// SetSubnets sets the Subnets field's value. -func (s *AwsCodeBuildProjectVpcConfig) SetSubnets(v []*string) *AwsCodeBuildProjectVpcConfig { - s.Subnets = v +// SetDetailedMetricsEnabled sets the DetailedMetricsEnabled field's value. +func (s *AwsApiGatewayV2RouteSettings) SetDetailedMetricsEnabled(v bool) *AwsApiGatewayV2RouteSettings { + s.DetailedMetricsEnabled = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *AwsCodeBuildProjectVpcConfig) SetVpcId(v string) *AwsCodeBuildProjectVpcConfig { - s.VpcId = &v +// SetLoggingLevel sets the LoggingLevel field's value. +func (s *AwsApiGatewayV2RouteSettings) SetLoggingLevel(v string) *AwsApiGatewayV2RouteSettings { + s.LoggingLevel = &v return s } -// The details of an Amazon EC2 instance. -type AwsEc2InstanceDetails struct { +// SetThrottlingBurstLimit sets the ThrottlingBurstLimit field's value. +func (s *AwsApiGatewayV2RouteSettings) SetThrottlingBurstLimit(v int64) *AwsApiGatewayV2RouteSettings { + s.ThrottlingBurstLimit = &v + return s +} + +// SetThrottlingRateLimit sets the ThrottlingRateLimit field's value. +func (s *AwsApiGatewayV2RouteSettings) SetThrottlingRateLimit(v float64) *AwsApiGatewayV2RouteSettings { + s.ThrottlingRateLimit = &v + return s +} + +// Contains information about a version 2 stage for Amazon API Gateway. +type AwsApiGatewayV2StageDetails struct { _ struct{} `type:"structure"` - // The IAM profile ARN of the instance. - IamInstanceProfileArn *string `type:"string"` + // Information about settings for logging access for the stage. + AccessLogSettings *AwsApiGatewayAccessLogSettings `type:"structure"` - // The Amazon Machine Image (AMI) ID of the instance. - ImageId *string `type:"string"` + // Indicates whether the stage is managed by API Gateway. + ApiGatewayManaged *bool `type:"boolean"` - // The IPv4 addresses associated with the instance. - IpV4Addresses []*string `type:"list"` + // Indicates whether updates to an API automatically trigger a new deployment. + AutoDeploy *bool `type:"boolean"` - // The IPv6 addresses associated with the instance. - IpV6Addresses []*string `type:"list"` + // Indicates when the stage was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedDate *string `type:"string"` - // The key name associated with the instance. - KeyName *string `type:"string"` + // Default route settings for the stage. + DefaultRouteSettings *AwsApiGatewayV2RouteSettings `type:"structure"` - // The date/time the instance was launched. - LaunchedAt *string `type:"string"` + // The identifier of the deployment that the stage is associated with. + DeploymentId *string `type:"string"` - // The identifier of the subnet that the instance was launched in. - SubnetId *string `type:"string"` + // The description of the stage. + Description *string `type:"string"` - // The instance type of the instance. - Type *string `type:"string"` + // The status of the last deployment of a stage. Supported only if the stage + // has automatic deployment enabled. + LastDeploymentStatusMessage *string `type:"string"` - // The identifier of the VPC that the instance was launched in. - VpcId *string `type:"string"` + // Indicates when the stage was most recently updated. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastUpdatedDate *string `type:"string"` + + // The route settings for the stage. + RouteSettings *AwsApiGatewayV2RouteSettings `type:"structure"` + + // The name of the stage. + StageName *string `type:"string"` + + // A map that defines the stage variables for the stage. + // + // Variable names can have alphanumeric and underscore characters. + // + // Variable values can contain the following characters: + // + // * Uppercase and lowercase letters + // + // * Numbers + // + // * Special characters -._~:/?#&=, + StageVariables map[string]*string `type:"map"` } // String returns the string representation -func (s AwsEc2InstanceDetails) String() string { +func (s AwsApiGatewayV2StageDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2InstanceDetails) GoString() string { +func (s AwsApiGatewayV2StageDetails) GoString() string { return s.String() } -// SetIamInstanceProfileArn sets the IamInstanceProfileArn field's value. -func (s *AwsEc2InstanceDetails) SetIamInstanceProfileArn(v string) *AwsEc2InstanceDetails { - s.IamInstanceProfileArn = &v +// SetAccessLogSettings sets the AccessLogSettings field's value. +func (s *AwsApiGatewayV2StageDetails) SetAccessLogSettings(v *AwsApiGatewayAccessLogSettings) *AwsApiGatewayV2StageDetails { + s.AccessLogSettings = v return s } -// SetImageId sets the ImageId field's value. -func (s *AwsEc2InstanceDetails) SetImageId(v string) *AwsEc2InstanceDetails { - s.ImageId = &v +// SetApiGatewayManaged sets the ApiGatewayManaged field's value. +func (s *AwsApiGatewayV2StageDetails) SetApiGatewayManaged(v bool) *AwsApiGatewayV2StageDetails { + s.ApiGatewayManaged = &v return s } -// SetIpV4Addresses sets the IpV4Addresses field's value. -func (s *AwsEc2InstanceDetails) SetIpV4Addresses(v []*string) *AwsEc2InstanceDetails { - s.IpV4Addresses = v +// SetAutoDeploy sets the AutoDeploy field's value. +func (s *AwsApiGatewayV2StageDetails) SetAutoDeploy(v bool) *AwsApiGatewayV2StageDetails { + s.AutoDeploy = &v return s } -// SetIpV6Addresses sets the IpV6Addresses field's value. -func (s *AwsEc2InstanceDetails) SetIpV6Addresses(v []*string) *AwsEc2InstanceDetails { - s.IpV6Addresses = v +// SetCreatedDate sets the CreatedDate field's value. +func (s *AwsApiGatewayV2StageDetails) SetCreatedDate(v string) *AwsApiGatewayV2StageDetails { + s.CreatedDate = &v return s } -// SetKeyName sets the KeyName field's value. -func (s *AwsEc2InstanceDetails) SetKeyName(v string) *AwsEc2InstanceDetails { - s.KeyName = &v +// SetDefaultRouteSettings sets the DefaultRouteSettings field's value. +func (s *AwsApiGatewayV2StageDetails) SetDefaultRouteSettings(v *AwsApiGatewayV2RouteSettings) *AwsApiGatewayV2StageDetails { + s.DefaultRouteSettings = v return s } -// SetLaunchedAt sets the LaunchedAt field's value. -func (s *AwsEc2InstanceDetails) SetLaunchedAt(v string) *AwsEc2InstanceDetails { - s.LaunchedAt = &v +// SetDeploymentId sets the DeploymentId field's value. +func (s *AwsApiGatewayV2StageDetails) SetDeploymentId(v string) *AwsApiGatewayV2StageDetails { + s.DeploymentId = &v return s } -// SetSubnetId sets the SubnetId field's value. -func (s *AwsEc2InstanceDetails) SetSubnetId(v string) *AwsEc2InstanceDetails { - s.SubnetId = &v +// SetDescription sets the Description field's value. +func (s *AwsApiGatewayV2StageDetails) SetDescription(v string) *AwsApiGatewayV2StageDetails { + s.Description = &v return s } -// SetType sets the Type field's value. -func (s *AwsEc2InstanceDetails) SetType(v string) *AwsEc2InstanceDetails { - s.Type = &v +// SetLastDeploymentStatusMessage sets the LastDeploymentStatusMessage field's value. +func (s *AwsApiGatewayV2StageDetails) SetLastDeploymentStatusMessage(v string) *AwsApiGatewayV2StageDetails { + s.LastDeploymentStatusMessage = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *AwsEc2InstanceDetails) SetVpcId(v string) *AwsEc2InstanceDetails { - s.VpcId = &v +// SetLastUpdatedDate sets the LastUpdatedDate field's value. +func (s *AwsApiGatewayV2StageDetails) SetLastUpdatedDate(v string) *AwsApiGatewayV2StageDetails { + s.LastUpdatedDate = &v return s } -// Information about the network interface attachment. -type AwsEc2NetworkInterfaceAttachment struct { - _ struct{} `type:"structure"` +// SetRouteSettings sets the RouteSettings field's value. +func (s *AwsApiGatewayV2StageDetails) SetRouteSettings(v *AwsApiGatewayV2RouteSettings) *AwsApiGatewayV2StageDetails { + s.RouteSettings = v + return s +} - // The timestamp indicating when the attachment initiated. - AttachTime *string `type:"string"` +// SetStageName sets the StageName field's value. +func (s *AwsApiGatewayV2StageDetails) SetStageName(v string) *AwsApiGatewayV2StageDetails { + s.StageName = &v + return s +} - // The identifier of the network interface attachment - AttachmentId *string `type:"string"` +// SetStageVariables sets the StageVariables field's value. +func (s *AwsApiGatewayV2StageDetails) SetStageVariables(v map[string]*string) *AwsApiGatewayV2StageDetails { + s.StageVariables = v + return s +} - // Indicates whether the network interface is deleted when the instance is terminated. - DeleteOnTermination *bool `type:"boolean"` +// Provides details about an auto scaling group. +type AwsAutoScalingAutoScalingGroupDetails struct { + _ struct{} `type:"structure"` - // The device index of the network interface attachment on the instance. - DeviceIndex *int64 `type:"integer"` + // Indicates when the auto scaling group was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedTime *string `type:"string"` - // The ID of the instance. - InstanceId *string `type:"string"` + // The amount of time, in seconds, that Amazon EC2 Auto Scaling waits before + // it checks the health status of an EC2 instance that has come into service. + HealthCheckGracePeriod *int64 `type:"integer"` - // The AWS account ID of the owner of the instance. - InstanceOwnerId *string `type:"string"` + // The service to use for the health checks. + HealthCheckType *string `type:"string"` - // The attachment state. - // - // Valid values: attaching | attached | detaching | detached - Status *string `type:"string"` + // The name of the launch configuration. + LaunchConfigurationName *string `type:"string"` + + // The list of load balancers associated with the group. + LoadBalancerNames []*string `type:"list"` } // String returns the string representation -func (s AwsEc2NetworkInterfaceAttachment) String() string { +func (s AwsAutoScalingAutoScalingGroupDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2NetworkInterfaceAttachment) GoString() string { +func (s AwsAutoScalingAutoScalingGroupDetails) GoString() string { return s.String() } -// SetAttachTime sets the AttachTime field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetAttachTime(v string) *AwsEc2NetworkInterfaceAttachment { - s.AttachTime = &v - return s -} - -// SetAttachmentId sets the AttachmentId field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetAttachmentId(v string) *AwsEc2NetworkInterfaceAttachment { - s.AttachmentId = &v - return s -} - -// SetDeleteOnTermination sets the DeleteOnTermination field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetDeleteOnTermination(v bool) *AwsEc2NetworkInterfaceAttachment { - s.DeleteOnTermination = &v +// SetCreatedTime sets the CreatedTime field's value. +func (s *AwsAutoScalingAutoScalingGroupDetails) SetCreatedTime(v string) *AwsAutoScalingAutoScalingGroupDetails { + s.CreatedTime = &v return s } -// SetDeviceIndex sets the DeviceIndex field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetDeviceIndex(v int64) *AwsEc2NetworkInterfaceAttachment { - s.DeviceIndex = &v +// SetHealthCheckGracePeriod sets the HealthCheckGracePeriod field's value. +func (s *AwsAutoScalingAutoScalingGroupDetails) SetHealthCheckGracePeriod(v int64) *AwsAutoScalingAutoScalingGroupDetails { + s.HealthCheckGracePeriod = &v return s } -// SetInstanceId sets the InstanceId field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetInstanceId(v string) *AwsEc2NetworkInterfaceAttachment { - s.InstanceId = &v +// SetHealthCheckType sets the HealthCheckType field's value. +func (s *AwsAutoScalingAutoScalingGroupDetails) SetHealthCheckType(v string) *AwsAutoScalingAutoScalingGroupDetails { + s.HealthCheckType = &v return s } -// SetInstanceOwnerId sets the InstanceOwnerId field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetInstanceOwnerId(v string) *AwsEc2NetworkInterfaceAttachment { - s.InstanceOwnerId = &v +// SetLaunchConfigurationName sets the LaunchConfigurationName field's value. +func (s *AwsAutoScalingAutoScalingGroupDetails) SetLaunchConfigurationName(v string) *AwsAutoScalingAutoScalingGroupDetails { + s.LaunchConfigurationName = &v return s } -// SetStatus sets the Status field's value. -func (s *AwsEc2NetworkInterfaceAttachment) SetStatus(v string) *AwsEc2NetworkInterfaceAttachment { - s.Status = &v +// SetLoadBalancerNames sets the LoadBalancerNames field's value. +func (s *AwsAutoScalingAutoScalingGroupDetails) SetLoadBalancerNames(v []*string) *AwsAutoScalingAutoScalingGroupDetails { + s.LoadBalancerNames = v return s } -// Details about the network interface -type AwsEc2NetworkInterfaceDetails struct { +// Provides details about an AWS Certificate Manager certificate. +type AwsCertificateManagerCertificateDetails struct { _ struct{} `type:"structure"` - // The network interface attachment. - Attachment *AwsEc2NetworkInterfaceAttachment `type:"structure"` + // The ARN of the private certificate authority (CA) that will be used to issue + // the certificate. + CertificateAuthorityArn *string `type:"string"` - // The ID of the network interface. - NetworkInterfaceId *string `type:"string"` + // Indicates when the certificate was requested. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedAt *string `type:"string"` - // Security groups for the network interface. - SecurityGroups []*AwsEc2NetworkInterfaceSecurityGroup `type:"list"` + // The fully qualified domain name (FQDN), such as www.example.com, that is + // secured by the certificate. + DomainName *string `type:"string"` - // Indicates whether traffic to or from the instance is validated. - SourceDestCheck *bool `type:"boolean"` + // Contains information about the initial validation of each domain name that + // occurs as a result of the RequestCertificate request. + // + // Only provided if the certificate type is AMAZON_ISSUED. + DomainValidationOptions []*AwsCertificateManagerCertificateDomainValidationOption `type:"list"` + + // Contains a list of Extended Key Usage X.509 v3 extension objects. Each object + // specifies a purpose for which the certificate public key can be used and + // consists of a name and an object identifier (OID). + ExtendedKeyUsages []*AwsCertificateManagerCertificateExtendedKeyUsage `type:"list"` + + // For a failed certificate request, the reason for the failure. + // + // Valid values: NO_AVAILABLE_CONTACTS | ADDITIONAL_VERIFICATION_REQUIRED | + // DOMAIN_NOT_ALLOWED | INVALID_PUBLIC_DOMAIN | DOMAIN_VALIDATION_DENIED | CAA_ERROR + // | PCA_LIMIT_EXCEEDED | PCA_INVALID_ARN | PCA_INVALID_STATE | PCA_REQUEST_FAILED + // | PCA_NAME_CONSTRAINTS_VALIDATION | PCA_RESOURCE_NOT_FOUND | PCA_INVALID_ARGS + // | PCA_INVALID_DURATION | PCA_ACCESS_DENIED | SLR_NOT_FOUND | OTHER + FailureReason *string `type:"string"` + + // Indicates when the certificate was imported. Provided if the certificate + // type is IMPORTED. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + ImportedAt *string `type:"string"` + + // The list of ARNs for the AWS resources that use the certificate. + InUseBy []*string `type:"list"` + + // Indicates when the certificate was issued. Provided if the certificate type + // is AMAZON_ISSUED. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + IssuedAt *string `type:"string"` + + // The name of the certificate authority that issued and signed the certificate. + Issuer *string `type:"string"` + + // The algorithm that was used to generate the public-private key pair. + // + // Valid values: RSA_2048 | RSA_1024 |RSA_4096 | EC_prime256v1 | EC_secp384r1 + // | EC_secp521r1 + KeyAlgorithm *string `type:"string"` + + // A list of key usage X.509 v3 extension objects. + KeyUsages []*AwsCertificateManagerCertificateKeyUsage `type:"list"` + + // The time after which the certificate becomes invalid. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + NotAfter *string `type:"string"` + + // The time before which the certificate is not valid. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + NotBefore *string `type:"string"` + + // Provides a value that specifies whether to add the certificate to a transparency + // log. + Options *AwsCertificateManagerCertificateOptions `type:"structure"` + + // Whether the certificate is eligible for renewal. + // + // Valid values: ELIGIBLE | INELIGIBLE + RenewalEligibility *string `type:"string"` + + // Information about the status of the AWS Certificate Manager managed renewal + // for the certificate. Provided only when the certificate type is AMAZON_ISSUED. + RenewalSummary *AwsCertificateManagerCertificateRenewalSummary `type:"structure"` + + // The serial number of the certificate. + Serial *string `type:"string"` + + // The algorithm that was used to sign the certificate. + SignatureAlgorithm *string `type:"string"` + + // The status of the certificate. + // + // Valid values: PENDING_VALIDATION | ISSUED | INACTIVE | EXPIRED | VALIDATION_TIMED_OUT + // | REVOKED | FAILED + Status *string `type:"string"` + + // The name of the entity that is associated with the public key contained in + // the certificate. + Subject *string `type:"string"` + + // One or more domain names (subject alternative names) included in the certificate. + // This list contains the domain names that are bound to the public key that + // is contained in the certificate. + // + // The subject alternative names include the canonical domain name (CN) of the + // certificate and additional domain names that can be used to connect to the + // website. + SubjectAlternativeNames []*string `type:"list"` + + // The source of the certificate. For certificates that AWS Certificate Manager + // provides, Type is AMAZON_ISSUED. For certificates that are imported with + // ImportCertificate, Type is IMPORTED. + // + // Valid values: IMPORTED | AMAZON_ISSUED | PRIVATE + Type *string `type:"string"` } // String returns the string representation -func (s AwsEc2NetworkInterfaceDetails) String() string { +func (s AwsCertificateManagerCertificateDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2NetworkInterfaceDetails) GoString() string { +func (s AwsCertificateManagerCertificateDetails) GoString() string { return s.String() } -// SetAttachment sets the Attachment field's value. -func (s *AwsEc2NetworkInterfaceDetails) SetAttachment(v *AwsEc2NetworkInterfaceAttachment) *AwsEc2NetworkInterfaceDetails { - s.Attachment = v +// SetCertificateAuthorityArn sets the CertificateAuthorityArn field's value. +func (s *AwsCertificateManagerCertificateDetails) SetCertificateAuthorityArn(v string) *AwsCertificateManagerCertificateDetails { + s.CertificateAuthorityArn = &v return s } -// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. -func (s *AwsEc2NetworkInterfaceDetails) SetNetworkInterfaceId(v string) *AwsEc2NetworkInterfaceDetails { - s.NetworkInterfaceId = &v +// SetCreatedAt sets the CreatedAt field's value. +func (s *AwsCertificateManagerCertificateDetails) SetCreatedAt(v string) *AwsCertificateManagerCertificateDetails { + s.CreatedAt = &v return s } -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *AwsEc2NetworkInterfaceDetails) SetSecurityGroups(v []*AwsEc2NetworkInterfaceSecurityGroup) *AwsEc2NetworkInterfaceDetails { - s.SecurityGroups = v +// SetDomainName sets the DomainName field's value. +func (s *AwsCertificateManagerCertificateDetails) SetDomainName(v string) *AwsCertificateManagerCertificateDetails { + s.DomainName = &v return s } -// SetSourceDestCheck sets the SourceDestCheck field's value. -func (s *AwsEc2NetworkInterfaceDetails) SetSourceDestCheck(v bool) *AwsEc2NetworkInterfaceDetails { - s.SourceDestCheck = &v +// SetDomainValidationOptions sets the DomainValidationOptions field's value. +func (s *AwsCertificateManagerCertificateDetails) SetDomainValidationOptions(v []*AwsCertificateManagerCertificateDomainValidationOption) *AwsCertificateManagerCertificateDetails { + s.DomainValidationOptions = v return s } -// A security group associated with the network interface. -type AwsEc2NetworkInterfaceSecurityGroup struct { - _ struct{} `type:"structure"` - - // The ID of the security group. - GroupId *string `type:"string"` - - // The name of the security group. - GroupName *string `type:"string"` +// SetExtendedKeyUsages sets the ExtendedKeyUsages field's value. +func (s *AwsCertificateManagerCertificateDetails) SetExtendedKeyUsages(v []*AwsCertificateManagerCertificateExtendedKeyUsage) *AwsCertificateManagerCertificateDetails { + s.ExtendedKeyUsages = v + return s } -// String returns the string representation -func (s AwsEc2NetworkInterfaceSecurityGroup) String() string { - return awsutil.Prettify(s) +// SetFailureReason sets the FailureReason field's value. +func (s *AwsCertificateManagerCertificateDetails) SetFailureReason(v string) *AwsCertificateManagerCertificateDetails { + s.FailureReason = &v + return s } -// GoString returns the string representation -func (s AwsEc2NetworkInterfaceSecurityGroup) GoString() string { - return s.String() +// SetImportedAt sets the ImportedAt field's value. +func (s *AwsCertificateManagerCertificateDetails) SetImportedAt(v string) *AwsCertificateManagerCertificateDetails { + s.ImportedAt = &v + return s } -// SetGroupId sets the GroupId field's value. -func (s *AwsEc2NetworkInterfaceSecurityGroup) SetGroupId(v string) *AwsEc2NetworkInterfaceSecurityGroup { - s.GroupId = &v +// SetInUseBy sets the InUseBy field's value. +func (s *AwsCertificateManagerCertificateDetails) SetInUseBy(v []*string) *AwsCertificateManagerCertificateDetails { + s.InUseBy = v return s } -// SetGroupName sets the GroupName field's value. -func (s *AwsEc2NetworkInterfaceSecurityGroup) SetGroupName(v string) *AwsEc2NetworkInterfaceSecurityGroup { - s.GroupName = &v +// SetIssuedAt sets the IssuedAt field's value. +func (s *AwsCertificateManagerCertificateDetails) SetIssuedAt(v string) *AwsCertificateManagerCertificateDetails { + s.IssuedAt = &v return s } -// Details about an EC2 security group. -type AwsEc2SecurityGroupDetails struct { - _ struct{} `type:"structure"` - - // The ID of the security group. - GroupId *string `type:"string"` +// SetIssuer sets the Issuer field's value. +func (s *AwsCertificateManagerCertificateDetails) SetIssuer(v string) *AwsCertificateManagerCertificateDetails { + s.Issuer = &v + return s +} - // The name of the security group. - GroupName *string `type:"string"` +// SetKeyAlgorithm sets the KeyAlgorithm field's value. +func (s *AwsCertificateManagerCertificateDetails) SetKeyAlgorithm(v string) *AwsCertificateManagerCertificateDetails { + s.KeyAlgorithm = &v + return s +} - // The inbound rules associated with the security group. - IpPermissions []*AwsEc2SecurityGroupIpPermission `type:"list"` +// SetKeyUsages sets the KeyUsages field's value. +func (s *AwsCertificateManagerCertificateDetails) SetKeyUsages(v []*AwsCertificateManagerCertificateKeyUsage) *AwsCertificateManagerCertificateDetails { + s.KeyUsages = v + return s +} - // [VPC only] The outbound rules associated with the security group. - IpPermissionsEgress []*AwsEc2SecurityGroupIpPermission `type:"list"` +// SetNotAfter sets the NotAfter field's value. +func (s *AwsCertificateManagerCertificateDetails) SetNotAfter(v string) *AwsCertificateManagerCertificateDetails { + s.NotAfter = &v + return s +} - // The AWS account ID of the owner of the security group. - OwnerId *string `type:"string"` +// SetNotBefore sets the NotBefore field's value. +func (s *AwsCertificateManagerCertificateDetails) SetNotBefore(v string) *AwsCertificateManagerCertificateDetails { + s.NotBefore = &v + return s +} - // [VPC only] The ID of the VPC for the security group. - VpcId *string `type:"string"` +// SetOptions sets the Options field's value. +func (s *AwsCertificateManagerCertificateDetails) SetOptions(v *AwsCertificateManagerCertificateOptions) *AwsCertificateManagerCertificateDetails { + s.Options = v + return s } -// String returns the string representation -func (s AwsEc2SecurityGroupDetails) String() string { - return awsutil.Prettify(s) +// SetRenewalEligibility sets the RenewalEligibility field's value. +func (s *AwsCertificateManagerCertificateDetails) SetRenewalEligibility(v string) *AwsCertificateManagerCertificateDetails { + s.RenewalEligibility = &v + return s } -// GoString returns the string representation -func (s AwsEc2SecurityGroupDetails) GoString() string { - return s.String() +// SetRenewalSummary sets the RenewalSummary field's value. +func (s *AwsCertificateManagerCertificateDetails) SetRenewalSummary(v *AwsCertificateManagerCertificateRenewalSummary) *AwsCertificateManagerCertificateDetails { + s.RenewalSummary = v + return s } -// SetGroupId sets the GroupId field's value. -func (s *AwsEc2SecurityGroupDetails) SetGroupId(v string) *AwsEc2SecurityGroupDetails { - s.GroupId = &v +// SetSerial sets the Serial field's value. +func (s *AwsCertificateManagerCertificateDetails) SetSerial(v string) *AwsCertificateManagerCertificateDetails { + s.Serial = &v return s } -// SetGroupName sets the GroupName field's value. -func (s *AwsEc2SecurityGroupDetails) SetGroupName(v string) *AwsEc2SecurityGroupDetails { - s.GroupName = &v +// SetSignatureAlgorithm sets the SignatureAlgorithm field's value. +func (s *AwsCertificateManagerCertificateDetails) SetSignatureAlgorithm(v string) *AwsCertificateManagerCertificateDetails { + s.SignatureAlgorithm = &v return s } -// SetIpPermissions sets the IpPermissions field's value. -func (s *AwsEc2SecurityGroupDetails) SetIpPermissions(v []*AwsEc2SecurityGroupIpPermission) *AwsEc2SecurityGroupDetails { - s.IpPermissions = v +// SetStatus sets the Status field's value. +func (s *AwsCertificateManagerCertificateDetails) SetStatus(v string) *AwsCertificateManagerCertificateDetails { + s.Status = &v return s } -// SetIpPermissionsEgress sets the IpPermissionsEgress field's value. -func (s *AwsEc2SecurityGroupDetails) SetIpPermissionsEgress(v []*AwsEc2SecurityGroupIpPermission) *AwsEc2SecurityGroupDetails { - s.IpPermissionsEgress = v +// SetSubject sets the Subject field's value. +func (s *AwsCertificateManagerCertificateDetails) SetSubject(v string) *AwsCertificateManagerCertificateDetails { + s.Subject = &v return s } -// SetOwnerId sets the OwnerId field's value. -func (s *AwsEc2SecurityGroupDetails) SetOwnerId(v string) *AwsEc2SecurityGroupDetails { - s.OwnerId = &v +// SetSubjectAlternativeNames sets the SubjectAlternativeNames field's value. +func (s *AwsCertificateManagerCertificateDetails) SetSubjectAlternativeNames(v []*string) *AwsCertificateManagerCertificateDetails { + s.SubjectAlternativeNames = v return s } -// SetVpcId sets the VpcId field's value. -func (s *AwsEc2SecurityGroupDetails) SetVpcId(v string) *AwsEc2SecurityGroupDetails { - s.VpcId = &v +// SetType sets the Type field's value. +func (s *AwsCertificateManagerCertificateDetails) SetType(v string) *AwsCertificateManagerCertificateDetails { + s.Type = &v return s } -// An IP permission for an EC2 security group. -type AwsEc2SecurityGroupIpPermission struct { +// Contains information about one of the following: +// +// * The initial validation of each domain name that occurs as a result of +// the RequestCertificate request +// +// * The validation of each domain name in the certificate, as it pertains +// to AWS Certificate Manager managed renewal +type AwsCertificateManagerCertificateDomainValidationOption struct { _ struct{} `type:"structure"` - // The start of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 - // type number. - // - // A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 - // types, you must specify all codes. - FromPort *int64 `type:"integer"` - - // The IP protocol name (tcp, udp, icmp, icmpv6) or number. - // - // [VPC only] Use -1 to specify all protocols. - // - // When authorizing security group rules, specifying -1 or a protocol number - // other than tcp, udp, icmp, or icmpv6 allows traffic on all ports, regardless - // of any port range you specify. - // - // For tcp, udp, and icmp, you must specify a port range. - // - // For icmpv6, the port range is optional. If you omit the port range, traffic - // for all types and codes is allowed. - IpProtocol *string `type:"string"` + // A fully qualified domain name (FQDN) in the certificate. + DomainName *string `type:"string"` - // The IPv4 ranges. - IpRanges []*AwsEc2SecurityGroupIpRange `type:"list"` + // The CNAME record that is added to the DNS database for domain validation. + ResourceRecord *AwsCertificateManagerCertificateResourceRecord `type:"structure"` - // The IPv6 ranges. - Ipv6Ranges []*AwsEc2SecurityGroupIpv6Range `type:"list"` + // The domain name that AWS Certificate Manager uses to send domain validation + // emails. + ValidationDomain *string `type:"string"` - // [VPC only] The prefix list IDs for an AWS service. With outbound rules, this - // is the AWS service to access through a VPC endpoint from instances associated - // with the security group. - PrefixListIds []*AwsEc2SecurityGroupPrefixListId `type:"list"` + // A list of email addresses that AWS Certificate Manager uses to send domain + // validation emails. + ValidationEmails []*string `type:"list"` - // The end of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 - // code. - // - // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 - // types, you must specify all codes. - ToPort *int64 `type:"integer"` + // The method used to validate the domain name. + ValidationMethod *string `type:"string"` - // The security group and AWS account ID pairs. - UserIdGroupPairs []*AwsEc2SecurityGroupUserIdGroupPair `type:"list"` + // The validation status of the domain name. + ValidationStatus *string `type:"string"` } // String returns the string representation -func (s AwsEc2SecurityGroupIpPermission) String() string { +func (s AwsCertificateManagerCertificateDomainValidationOption) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2SecurityGroupIpPermission) GoString() string { +func (s AwsCertificateManagerCertificateDomainValidationOption) GoString() string { return s.String() } -// SetFromPort sets the FromPort field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetFromPort(v int64) *AwsEc2SecurityGroupIpPermission { - s.FromPort = &v - return s -} - -// SetIpProtocol sets the IpProtocol field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetIpProtocol(v string) *AwsEc2SecurityGroupIpPermission { - s.IpProtocol = &v +// SetDomainName sets the DomainName field's value. +func (s *AwsCertificateManagerCertificateDomainValidationOption) SetDomainName(v string) *AwsCertificateManagerCertificateDomainValidationOption { + s.DomainName = &v return s } -// SetIpRanges sets the IpRanges field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetIpRanges(v []*AwsEc2SecurityGroupIpRange) *AwsEc2SecurityGroupIpPermission { - s.IpRanges = v +// SetResourceRecord sets the ResourceRecord field's value. +func (s *AwsCertificateManagerCertificateDomainValidationOption) SetResourceRecord(v *AwsCertificateManagerCertificateResourceRecord) *AwsCertificateManagerCertificateDomainValidationOption { + s.ResourceRecord = v return s } -// SetIpv6Ranges sets the Ipv6Ranges field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetIpv6Ranges(v []*AwsEc2SecurityGroupIpv6Range) *AwsEc2SecurityGroupIpPermission { - s.Ipv6Ranges = v +// SetValidationDomain sets the ValidationDomain field's value. +func (s *AwsCertificateManagerCertificateDomainValidationOption) SetValidationDomain(v string) *AwsCertificateManagerCertificateDomainValidationOption { + s.ValidationDomain = &v return s } -// SetPrefixListIds sets the PrefixListIds field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetPrefixListIds(v []*AwsEc2SecurityGroupPrefixListId) *AwsEc2SecurityGroupIpPermission { - s.PrefixListIds = v +// SetValidationEmails sets the ValidationEmails field's value. +func (s *AwsCertificateManagerCertificateDomainValidationOption) SetValidationEmails(v []*string) *AwsCertificateManagerCertificateDomainValidationOption { + s.ValidationEmails = v return s } -// SetToPort sets the ToPort field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetToPort(v int64) *AwsEc2SecurityGroupIpPermission { - s.ToPort = &v +// SetValidationMethod sets the ValidationMethod field's value. +func (s *AwsCertificateManagerCertificateDomainValidationOption) SetValidationMethod(v string) *AwsCertificateManagerCertificateDomainValidationOption { + s.ValidationMethod = &v return s } -// SetUserIdGroupPairs sets the UserIdGroupPairs field's value. -func (s *AwsEc2SecurityGroupIpPermission) SetUserIdGroupPairs(v []*AwsEc2SecurityGroupUserIdGroupPair) *AwsEc2SecurityGroupIpPermission { - s.UserIdGroupPairs = v +// SetValidationStatus sets the ValidationStatus field's value. +func (s *AwsCertificateManagerCertificateDomainValidationOption) SetValidationStatus(v string) *AwsCertificateManagerCertificateDomainValidationOption { + s.ValidationStatus = &v return s } -// A range of IPv4 addresses. -type AwsEc2SecurityGroupIpRange struct { +// Contains information about an extended key usage X.509 v3 extension object. +type AwsCertificateManagerCertificateExtendedKeyUsage struct { _ struct{} `type:"structure"` - // The IPv4 CIDR range. You can either specify either a CIDR range or a source - // security group, but not both. To specify a single IPv4 address, use the /32 - // prefix length. - CidrIp *string `type:"string"` + // The name of an extension value. Indicates the purpose for which the certificate + // public key can be used. + Name *string `type:"string"` + + // An object identifier (OID) for the extension value. + // + // The format is numbers separated by periods. + OId *string `type:"string"` } // String returns the string representation -func (s AwsEc2SecurityGroupIpRange) String() string { +func (s AwsCertificateManagerCertificateExtendedKeyUsage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2SecurityGroupIpRange) GoString() string { +func (s AwsCertificateManagerCertificateExtendedKeyUsage) GoString() string { return s.String() } -// SetCidrIp sets the CidrIp field's value. -func (s *AwsEc2SecurityGroupIpRange) SetCidrIp(v string) *AwsEc2SecurityGroupIpRange { - s.CidrIp = &v +// SetName sets the Name field's value. +func (s *AwsCertificateManagerCertificateExtendedKeyUsage) SetName(v string) *AwsCertificateManagerCertificateExtendedKeyUsage { + s.Name = &v return s } -// A range of IPv6 addresses. -type AwsEc2SecurityGroupIpv6Range struct { +// SetOId sets the OId field's value. +func (s *AwsCertificateManagerCertificateExtendedKeyUsage) SetOId(v string) *AwsCertificateManagerCertificateExtendedKeyUsage { + s.OId = &v + return s +} + +// Contains information about a key usage X.509 v3 extension object. +type AwsCertificateManagerCertificateKeyUsage struct { _ struct{} `type:"structure"` - // The IPv6 CIDR range. You can either specify either a CIDR range or a source - // security group, but not both. To specify a single IPv6 address, use the /128 - // prefix length. - CidrIpv6 *string `type:"string"` + // The key usage extension name. + Name *string `type:"string"` } // String returns the string representation -func (s AwsEc2SecurityGroupIpv6Range) String() string { +func (s AwsCertificateManagerCertificateKeyUsage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2SecurityGroupIpv6Range) GoString() string { +func (s AwsCertificateManagerCertificateKeyUsage) GoString() string { return s.String() } -// SetCidrIpv6 sets the CidrIpv6 field's value. -func (s *AwsEc2SecurityGroupIpv6Range) SetCidrIpv6(v string) *AwsEc2SecurityGroupIpv6Range { - s.CidrIpv6 = &v +// SetName sets the Name field's value. +func (s *AwsCertificateManagerCertificateKeyUsage) SetName(v string) *AwsCertificateManagerCertificateKeyUsage { + s.Name = &v return s } -// A prefix list ID. -type AwsEc2SecurityGroupPrefixListId struct { +// Contains other options for the certificate. +type AwsCertificateManagerCertificateOptions struct { _ struct{} `type:"structure"` - // The ID of the prefix. - PrefixListId *string `type:"string"` + // Whether to add the certificate to a transparency log. + // + // Valid values: DISABLED | ENABLED + CertificateTransparencyLoggingPreference *string `type:"string"` } // String returns the string representation -func (s AwsEc2SecurityGroupPrefixListId) String() string { +func (s AwsCertificateManagerCertificateOptions) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2SecurityGroupPrefixListId) GoString() string { +func (s AwsCertificateManagerCertificateOptions) GoString() string { return s.String() } -// SetPrefixListId sets the PrefixListId field's value. -func (s *AwsEc2SecurityGroupPrefixListId) SetPrefixListId(v string) *AwsEc2SecurityGroupPrefixListId { - s.PrefixListId = &v +// SetCertificateTransparencyLoggingPreference sets the CertificateTransparencyLoggingPreference field's value. +func (s *AwsCertificateManagerCertificateOptions) SetCertificateTransparencyLoggingPreference(v string) *AwsCertificateManagerCertificateOptions { + s.CertificateTransparencyLoggingPreference = &v return s } -// A relationship between a security group and a user. -type AwsEc2SecurityGroupUserIdGroupPair struct { +// Contains information about the AWS Certificate Manager managed renewal for +// an AMAZON_ISSUED certificate. +type AwsCertificateManagerCertificateRenewalSummary struct { _ struct{} `type:"structure"` - // The ID of the security group. - GroupId *string `type:"string"` - - // The name of the security group. - GroupName *string `type:"string"` + // Information about the validation of each domain name in the certificate, + // as it pertains to AWS Certificate Manager managed renewal. Provided only + // when the certificate type is AMAZON_ISSUED. + DomainValidationOptions []*AwsCertificateManagerCertificateDomainValidationOption `type:"list"` - // The status of a VPC peering connection, if applicable. - PeeringStatus *string `type:"string"` + // The status of the AWS Certificate Manager managed renewal of the certificate. + // + // Valid values: PENDING_AUTO_RENEWAL | PENDING_VALIDATION | SUCCESS | FAILED + RenewalStatus *string `type:"string"` - // The ID of an AWS account. + // The reason that a renewal request was unsuccessful. // - // For a referenced security group in another VPC, the account ID of the referenced - // security group is returned in the response. If the referenced security group - // is deleted, this value is not returned. + // Valid values: NO_AVAILABLE_CONTACTS | ADDITIONAL_VERIFICATION_REQUIRED | + // DOMAIN_NOT_ALLOWED | INVALID_PUBLIC_DOMAIN | DOMAIN_VALIDATION_DENIED | CAA_ERROR + // | PCA_LIMIT_EXCEEDED | PCA_INVALID_ARN | PCA_INVALID_STATE | PCA_REQUEST_FAILED + // | PCA_NAME_CONSTRAINTS_VALIDATION | PCA_RESOURCE_NOT_FOUND | PCA_INVALID_ARGS + // | PCA_INVALID_DURATION | PCA_ACCESS_DENIED | SLR_NOT_FOUND | OTHER + RenewalStatusReason *string `type:"string"` + + // Indicates when the renewal summary was last updated. // - // [EC2-Classic] Required when adding or removing rules that reference a security - // group in another AWS. - UserId *string `type:"string"` - - // The ID of the VPC for the referenced security group, if applicable. - VpcId *string `type:"string"` - - // The ID of the VPC peering connection, if applicable. - VpcPeeringConnectionId *string `type:"string"` + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + UpdatedAt *string `type:"string"` } // String returns the string representation -func (s AwsEc2SecurityGroupUserIdGroupPair) String() string { +func (s AwsCertificateManagerCertificateRenewalSummary) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsEc2SecurityGroupUserIdGroupPair) GoString() string { +func (s AwsCertificateManagerCertificateRenewalSummary) GoString() string { return s.String() } -// SetGroupId sets the GroupId field's value. -func (s *AwsEc2SecurityGroupUserIdGroupPair) SetGroupId(v string) *AwsEc2SecurityGroupUserIdGroupPair { - s.GroupId = &v - return s -} - -// SetGroupName sets the GroupName field's value. -func (s *AwsEc2SecurityGroupUserIdGroupPair) SetGroupName(v string) *AwsEc2SecurityGroupUserIdGroupPair { - s.GroupName = &v - return s -} - -// SetPeeringStatus sets the PeeringStatus field's value. -func (s *AwsEc2SecurityGroupUserIdGroupPair) SetPeeringStatus(v string) *AwsEc2SecurityGroupUserIdGroupPair { - s.PeeringStatus = &v +// SetDomainValidationOptions sets the DomainValidationOptions field's value. +func (s *AwsCertificateManagerCertificateRenewalSummary) SetDomainValidationOptions(v []*AwsCertificateManagerCertificateDomainValidationOption) *AwsCertificateManagerCertificateRenewalSummary { + s.DomainValidationOptions = v return s } -// SetUserId sets the UserId field's value. -func (s *AwsEc2SecurityGroupUserIdGroupPair) SetUserId(v string) *AwsEc2SecurityGroupUserIdGroupPair { - s.UserId = &v +// SetRenewalStatus sets the RenewalStatus field's value. +func (s *AwsCertificateManagerCertificateRenewalSummary) SetRenewalStatus(v string) *AwsCertificateManagerCertificateRenewalSummary { + s.RenewalStatus = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *AwsEc2SecurityGroupUserIdGroupPair) SetVpcId(v string) *AwsEc2SecurityGroupUserIdGroupPair { - s.VpcId = &v +// SetRenewalStatusReason sets the RenewalStatusReason field's value. +func (s *AwsCertificateManagerCertificateRenewalSummary) SetRenewalStatusReason(v string) *AwsCertificateManagerCertificateRenewalSummary { + s.RenewalStatusReason = &v return s } -// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. -func (s *AwsEc2SecurityGroupUserIdGroupPair) SetVpcPeeringConnectionId(v string) *AwsEc2SecurityGroupUserIdGroupPair { - s.VpcPeeringConnectionId = &v +// SetUpdatedAt sets the UpdatedAt field's value. +func (s *AwsCertificateManagerCertificateRenewalSummary) SetUpdatedAt(v string) *AwsCertificateManagerCertificateRenewalSummary { + s.UpdatedAt = &v return s } -// Information about an Elasticsearch domain. -type AwsElasticsearchDomainDetails struct { +// Provides details about the CNAME record that is added to the DNS database +// for domain validation. +type AwsCertificateManagerCertificateResourceRecord struct { _ struct{} `type:"structure"` - // IAM policy document specifying the access policies for the new Amazon ES - // domain. - AccessPolicies *string `type:"string"` - - // Additional options for the domain endpoint. - DomainEndpointOptions *AwsElasticsearchDomainDomainEndpointOptions `type:"structure"` - - // Unique identifier for an Amazon ES domain. - DomainId *string `type:"string"` - - // Name of an Amazon ES domain. - // - // Domain names are unique across all domains owned by the same account within - // an AWS Region. - // - // Domain names must start with a lowercase letter and must be between 3 and - // 28 characters. - // - // Valid characters are a-z (lowercase only), 0-9, and – (hyphen). - DomainName *string `type:"string"` - - // Elasticsearch version. - ElasticsearchVersion *string `type:"string"` - - // Details about the configuration for encryption at rest. - EncryptionAtRestOptions *AwsElasticsearchDomainEncryptionAtRestOptions `type:"structure"` - - // Domain-specific endpoint used to submit index, search, and data upload requests - // to an Amazon ES domain. - // - // The endpoint is a service URL. - Endpoint *string `type:"string"` - - // The key-value pair that exists if the Amazon ES domain uses VPC endpoints. - Endpoints map[string]*string `type:"map"` + // The name of the resource. + Name *string `type:"string"` - // Details about the configuration for node-to-node encryption. - NodeToNodeEncryptionOptions *AwsElasticsearchDomainNodeToNodeEncryptionOptions `type:"structure"` + // The type of resource. + Type *string `type:"string"` - // Information that Amazon ES derives based on VPCOptions for the domain. - VPCOptions *AwsElasticsearchDomainVPCOptions `type:"structure"` + // The value of the resource. + Value *string `type:"string"` } // String returns the string representation -func (s AwsElasticsearchDomainDetails) String() string { +func (s AwsCertificateManagerCertificateResourceRecord) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsElasticsearchDomainDetails) GoString() string { +func (s AwsCertificateManagerCertificateResourceRecord) GoString() string { return s.String() } -// SetAccessPolicies sets the AccessPolicies field's value. -func (s *AwsElasticsearchDomainDetails) SetAccessPolicies(v string) *AwsElasticsearchDomainDetails { - s.AccessPolicies = &v - return s -} - -// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. -func (s *AwsElasticsearchDomainDetails) SetDomainEndpointOptions(v *AwsElasticsearchDomainDomainEndpointOptions) *AwsElasticsearchDomainDetails { - s.DomainEndpointOptions = v - return s -} - -// SetDomainId sets the DomainId field's value. -func (s *AwsElasticsearchDomainDetails) SetDomainId(v string) *AwsElasticsearchDomainDetails { - s.DomainId = &v - return s -} - -// SetDomainName sets the DomainName field's value. -func (s *AwsElasticsearchDomainDetails) SetDomainName(v string) *AwsElasticsearchDomainDetails { - s.DomainName = &v - return s -} - -// SetElasticsearchVersion sets the ElasticsearchVersion field's value. -func (s *AwsElasticsearchDomainDetails) SetElasticsearchVersion(v string) *AwsElasticsearchDomainDetails { - s.ElasticsearchVersion = &v - return s -} - -// SetEncryptionAtRestOptions sets the EncryptionAtRestOptions field's value. -func (s *AwsElasticsearchDomainDetails) SetEncryptionAtRestOptions(v *AwsElasticsearchDomainEncryptionAtRestOptions) *AwsElasticsearchDomainDetails { - s.EncryptionAtRestOptions = v - return s -} - -// SetEndpoint sets the Endpoint field's value. -func (s *AwsElasticsearchDomainDetails) SetEndpoint(v string) *AwsElasticsearchDomainDetails { - s.Endpoint = &v - return s -} - -// SetEndpoints sets the Endpoints field's value. -func (s *AwsElasticsearchDomainDetails) SetEndpoints(v map[string]*string) *AwsElasticsearchDomainDetails { - s.Endpoints = v +// SetName sets the Name field's value. +func (s *AwsCertificateManagerCertificateResourceRecord) SetName(v string) *AwsCertificateManagerCertificateResourceRecord { + s.Name = &v return s } -// SetNodeToNodeEncryptionOptions sets the NodeToNodeEncryptionOptions field's value. -func (s *AwsElasticsearchDomainDetails) SetNodeToNodeEncryptionOptions(v *AwsElasticsearchDomainNodeToNodeEncryptionOptions) *AwsElasticsearchDomainDetails { - s.NodeToNodeEncryptionOptions = v +// SetType sets the Type field's value. +func (s *AwsCertificateManagerCertificateResourceRecord) SetType(v string) *AwsCertificateManagerCertificateResourceRecord { + s.Type = &v return s } -// SetVPCOptions sets the VPCOptions field's value. -func (s *AwsElasticsearchDomainDetails) SetVPCOptions(v *AwsElasticsearchDomainVPCOptions) *AwsElasticsearchDomainDetails { - s.VPCOptions = v +// SetValue sets the Value field's value. +func (s *AwsCertificateManagerCertificateResourceRecord) SetValue(v string) *AwsCertificateManagerCertificateResourceRecord { + s.Value = &v return s } -// Additional options for the domain endpoint, such as whether to require HTTPS -// for all traffic. -type AwsElasticsearchDomainDomainEndpointOptions struct { +// Information about a cache behavior for the distribution. +type AwsCloudFrontDistributionCacheBehavior struct { _ struct{} `type:"structure"` - // Whether to require that all traffic to the domain arrive over HTTPS. - EnforceHTTPS *bool `type:"boolean"` - - // The TLS security policy to apply to the HTTPS endpoint of the Elasticsearch - // domain. + // The protocol that viewers can use to access the files in an origin. You can + // specify the following options: // - // Valid values: + // * allow-all - Viewers can use HTTP or HTTPS. // - // * Policy-Min-TLS-1-0-2019-07, which supports TLSv1.0 and higher + // * redirect-to-https - CloudFront responds to HTTP requests with an HTTP + // status code of 301 (Moved Permanently) and the HTTPS URL. The viewer then + // uses the new URL to resubmit. // - // * Policy-Min-TLS-1-2-2019-07, which only supports TLSv1.2 - TLSSecurityPolicy *string `type:"string"` + // * https-only - CloudFront responds to HTTP request with an HTTP status + // code of 403 (Forbidden). + ViewerProtocolPolicy *string `type:"string"` } // String returns the string representation -func (s AwsElasticsearchDomainDomainEndpointOptions) String() string { +func (s AwsCloudFrontDistributionCacheBehavior) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsElasticsearchDomainDomainEndpointOptions) GoString() string { +func (s AwsCloudFrontDistributionCacheBehavior) GoString() string { return s.String() } -// SetEnforceHTTPS sets the EnforceHTTPS field's value. -func (s *AwsElasticsearchDomainDomainEndpointOptions) SetEnforceHTTPS(v bool) *AwsElasticsearchDomainDomainEndpointOptions { - s.EnforceHTTPS = &v - return s -} - -// SetTLSSecurityPolicy sets the TLSSecurityPolicy field's value. -func (s *AwsElasticsearchDomainDomainEndpointOptions) SetTLSSecurityPolicy(v string) *AwsElasticsearchDomainDomainEndpointOptions { - s.TLSSecurityPolicy = &v +// SetViewerProtocolPolicy sets the ViewerProtocolPolicy field's value. +func (s *AwsCloudFrontDistributionCacheBehavior) SetViewerProtocolPolicy(v string) *AwsCloudFrontDistributionCacheBehavior { + s.ViewerProtocolPolicy = &v return s } -// Details about the configuration for encryption at rest. -type AwsElasticsearchDomainEncryptionAtRestOptions struct { +// Provides information about caching for the distribution. +type AwsCloudFrontDistributionCacheBehaviors struct { _ struct{} `type:"structure"` - // Whether encryption at rest is enabled. - Enabled *bool `type:"boolean"` - - // The KMS key ID. Takes the form 1a2a3a4-1a2a-3a4a-5a6a-1a2a3a4a5a6a. - KmsKeyId *string `type:"string"` + // The cache behaviors for the distribution. + Items []*AwsCloudFrontDistributionCacheBehavior `type:"list"` } // String returns the string representation -func (s AwsElasticsearchDomainEncryptionAtRestOptions) String() string { +func (s AwsCloudFrontDistributionCacheBehaviors) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsElasticsearchDomainEncryptionAtRestOptions) GoString() string { +func (s AwsCloudFrontDistributionCacheBehaviors) GoString() string { return s.String() } -// SetEnabled sets the Enabled field's value. -func (s *AwsElasticsearchDomainEncryptionAtRestOptions) SetEnabled(v bool) *AwsElasticsearchDomainEncryptionAtRestOptions { - s.Enabled = &v - return s -} - -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *AwsElasticsearchDomainEncryptionAtRestOptions) SetKmsKeyId(v string) *AwsElasticsearchDomainEncryptionAtRestOptions { - s.KmsKeyId = &v +// SetItems sets the Items field's value. +func (s *AwsCloudFrontDistributionCacheBehaviors) SetItems(v []*AwsCloudFrontDistributionCacheBehavior) *AwsCloudFrontDistributionCacheBehaviors { + s.Items = v return s } -// Details about the configuration for node-to-node encryption. -type AwsElasticsearchDomainNodeToNodeEncryptionOptions struct { +// Contains information about the default cache configuration for the distribution. +type AwsCloudFrontDistributionDefaultCacheBehavior struct { _ struct{} `type:"structure"` - // Whether node-to-node encryption is enabled. - Enabled *bool `type:"boolean"` + // The protocol that viewers can use to access the files in an origin. You can + // specify the following options: + // + // * allow-all - Viewers can use HTTP or HTTPS. + // + // * redirect-to-https - CloudFront responds to HTTP requests with an HTTP + // status code of 301 (Moved Permanently) and the HTTPS URL. The viewer then + // uses the new URL to resubmit. + // + // * https-only - CloudFront responds to HTTP request with an HTTP status + // code of 403 (Forbidden). + ViewerProtocolPolicy *string `type:"string"` } // String returns the string representation -func (s AwsElasticsearchDomainNodeToNodeEncryptionOptions) String() string { +func (s AwsCloudFrontDistributionDefaultCacheBehavior) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsElasticsearchDomainNodeToNodeEncryptionOptions) GoString() string { +func (s AwsCloudFrontDistributionDefaultCacheBehavior) GoString() string { return s.String() } -// SetEnabled sets the Enabled field's value. -func (s *AwsElasticsearchDomainNodeToNodeEncryptionOptions) SetEnabled(v bool) *AwsElasticsearchDomainNodeToNodeEncryptionOptions { - s.Enabled = &v +// SetViewerProtocolPolicy sets the ViewerProtocolPolicy field's value. +func (s *AwsCloudFrontDistributionDefaultCacheBehavior) SetViewerProtocolPolicy(v string) *AwsCloudFrontDistributionDefaultCacheBehavior { + s.ViewerProtocolPolicy = &v return s } -// Information that Amazon ES derives based on VPCOptions for the domain. -type AwsElasticsearchDomainVPCOptions struct { +// A distribution configuration. +type AwsCloudFrontDistributionDetails struct { _ struct{} `type:"structure"` - // The list of Availability Zones associated with the VPC subnets. - AvailabilityZones []*string `type:"list"` + // Provides information about the cache configuration for the distribution. + CacheBehaviors *AwsCloudFrontDistributionCacheBehaviors `type:"structure"` - // The list of security group IDs associated with the VPC endpoints for the - // domain. - SecurityGroupIds []*string `type:"list"` + // The default cache behavior for the configuration. + DefaultCacheBehavior *AwsCloudFrontDistributionDefaultCacheBehavior `type:"structure"` - // A list of subnet IDs associated with the VPC endpoints for the domain. - SubnetIds []*string `type:"list"` + // The object that CloudFront sends in response to requests from the origin + // (for example, index.html) when a viewer requests the root URL for the distribution + // (http://www.example.com) instead of an object in your distribution (http://www.example.com/product-description.html). + DefaultRootObject *string `type:"string"` - // ID for the VPC. - VPCId *string `type:"string"` + // The domain name corresponding to the distribution. + DomainName *string `type:"string"` + + // The entity tag is a hash of the object. + ETag *string `type:"string"` + + // Indicates when that the distribution was last modified. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastModifiedTime *string `type:"string"` + + // A complex type that controls whether access logs are written for the distribution. + Logging *AwsCloudFrontDistributionLogging `type:"structure"` + + // Provides information about the origin groups in the distribution. + OriginGroups *AwsCloudFrontDistributionOriginGroups `type:"structure"` + + // A complex type that contains information about origins for this distribution. + Origins *AwsCloudFrontDistributionOrigins `type:"structure"` + + // Indicates the current status of the distribution. + Status *string `type:"string"` + + // A unique identifier that specifies the AWS WAF web ACL, if any, to associate + // with this distribution. + WebAclId *string `type:"string"` } // String returns the string representation -func (s AwsElasticsearchDomainVPCOptions) String() string { +func (s AwsCloudFrontDistributionDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsElasticsearchDomainVPCOptions) GoString() string { +func (s AwsCloudFrontDistributionDetails) GoString() string { return s.String() } -// SetAvailabilityZones sets the AvailabilityZones field's value. -func (s *AwsElasticsearchDomainVPCOptions) SetAvailabilityZones(v []*string) *AwsElasticsearchDomainVPCOptions { - s.AvailabilityZones = v +// SetCacheBehaviors sets the CacheBehaviors field's value. +func (s *AwsCloudFrontDistributionDetails) SetCacheBehaviors(v *AwsCloudFrontDistributionCacheBehaviors) *AwsCloudFrontDistributionDetails { + s.CacheBehaviors = v return s } -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *AwsElasticsearchDomainVPCOptions) SetSecurityGroupIds(v []*string) *AwsElasticsearchDomainVPCOptions { - s.SecurityGroupIds = v +// SetDefaultCacheBehavior sets the DefaultCacheBehavior field's value. +func (s *AwsCloudFrontDistributionDetails) SetDefaultCacheBehavior(v *AwsCloudFrontDistributionDefaultCacheBehavior) *AwsCloudFrontDistributionDetails { + s.DefaultCacheBehavior = v return s } -// SetSubnetIds sets the SubnetIds field's value. -func (s *AwsElasticsearchDomainVPCOptions) SetSubnetIds(v []*string) *AwsElasticsearchDomainVPCOptions { - s.SubnetIds = v +// SetDefaultRootObject sets the DefaultRootObject field's value. +func (s *AwsCloudFrontDistributionDetails) SetDefaultRootObject(v string) *AwsCloudFrontDistributionDetails { + s.DefaultRootObject = &v return s } -// SetVPCId sets the VPCId field's value. -func (s *AwsElasticsearchDomainVPCOptions) SetVPCId(v string) *AwsElasticsearchDomainVPCOptions { - s.VPCId = &v +// SetDomainName sets the DomainName field's value. +func (s *AwsCloudFrontDistributionDetails) SetDomainName(v string) *AwsCloudFrontDistributionDetails { + s.DomainName = &v return s } -// Information about a load balancer. -type AwsElbv2LoadBalancerDetails struct { - _ struct{} `type:"structure"` - - // The Availability Zones for the load balancer. - AvailabilityZones []*AvailabilityZone `type:"list"` +// SetETag sets the ETag field's value. +func (s *AwsCloudFrontDistributionDetails) SetETag(v string) *AwsCloudFrontDistributionDetails { + s.ETag = &v + return s +} + +// SetLastModifiedTime sets the LastModifiedTime field's value. +func (s *AwsCloudFrontDistributionDetails) SetLastModifiedTime(v string) *AwsCloudFrontDistributionDetails { + s.LastModifiedTime = &v + return s +} + +// SetLogging sets the Logging field's value. +func (s *AwsCloudFrontDistributionDetails) SetLogging(v *AwsCloudFrontDistributionLogging) *AwsCloudFrontDistributionDetails { + s.Logging = v + return s +} + +// SetOriginGroups sets the OriginGroups field's value. +func (s *AwsCloudFrontDistributionDetails) SetOriginGroups(v *AwsCloudFrontDistributionOriginGroups) *AwsCloudFrontDistributionDetails { + s.OriginGroups = v + return s +} + +// SetOrigins sets the Origins field's value. +func (s *AwsCloudFrontDistributionDetails) SetOrigins(v *AwsCloudFrontDistributionOrigins) *AwsCloudFrontDistributionDetails { + s.Origins = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsCloudFrontDistributionDetails) SetStatus(v string) *AwsCloudFrontDistributionDetails { + s.Status = &v + return s +} + +// SetWebAclId sets the WebAclId field's value. +func (s *AwsCloudFrontDistributionDetails) SetWebAclId(v string) *AwsCloudFrontDistributionDetails { + s.WebAclId = &v + return s +} + +// A complex type that controls whether access logs are written for the distribution. +type AwsCloudFrontDistributionLogging struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket to store the access logs in. + Bucket *string `type:"string"` + + // With this field, you can enable or disable the selected distribution. + Enabled *bool `type:"boolean"` + + // Specifies whether you want CloudFront to include cookies in access logs. + IncludeCookies *bool `type:"boolean"` + + // An optional string that you want CloudFront to use as a prefix to the access + // log filenames for this distribution. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionLogging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionLogging) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *AwsCloudFrontDistributionLogging) SetBucket(v string) *AwsCloudFrontDistributionLogging { + s.Bucket = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *AwsCloudFrontDistributionLogging) SetEnabled(v bool) *AwsCloudFrontDistributionLogging { + s.Enabled = &v + return s +} + +// SetIncludeCookies sets the IncludeCookies field's value. +func (s *AwsCloudFrontDistributionLogging) SetIncludeCookies(v bool) *AwsCloudFrontDistributionLogging { + s.IncludeCookies = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AwsCloudFrontDistributionLogging) SetPrefix(v string) *AwsCloudFrontDistributionLogging { + s.Prefix = &v + return s +} + +// Information about an origin group for the distribution. +type AwsCloudFrontDistributionOriginGroup struct { + _ struct{} `type:"structure"` + + // Provides the criteria for an origin group to fail over. + FailoverCriteria *AwsCloudFrontDistributionOriginGroupFailover `type:"structure"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOriginGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOriginGroup) GoString() string { + return s.String() +} + +// SetFailoverCriteria sets the FailoverCriteria field's value. +func (s *AwsCloudFrontDistributionOriginGroup) SetFailoverCriteria(v *AwsCloudFrontDistributionOriginGroupFailover) *AwsCloudFrontDistributionOriginGroup { + s.FailoverCriteria = v + return s +} + +// Provides information about when an origin group fails over. +type AwsCloudFrontDistributionOriginGroupFailover struct { + _ struct{} `type:"structure"` + + // Information about the status codes that cause an origin group to fail over. + StatusCodes *AwsCloudFrontDistributionOriginGroupFailoverStatusCodes `type:"structure"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOriginGroupFailover) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOriginGroupFailover) GoString() string { + return s.String() +} + +// SetStatusCodes sets the StatusCodes field's value. +func (s *AwsCloudFrontDistributionOriginGroupFailover) SetStatusCodes(v *AwsCloudFrontDistributionOriginGroupFailoverStatusCodes) *AwsCloudFrontDistributionOriginGroupFailover { + s.StatusCodes = v + return s +} + +// The status codes that cause an origin group to fail over. +type AwsCloudFrontDistributionOriginGroupFailoverStatusCodes struct { + _ struct{} `type:"structure"` + + // The list of status code values that can cause a failover to the next origin. + Items []*int64 `type:"list"` + + // The number of status codes that can cause a failover. + Quantity *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOriginGroupFailoverStatusCodes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOriginGroupFailoverStatusCodes) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *AwsCloudFrontDistributionOriginGroupFailoverStatusCodes) SetItems(v []*int64) *AwsCloudFrontDistributionOriginGroupFailoverStatusCodes { + s.Items = v + return s +} + +// SetQuantity sets the Quantity field's value. +func (s *AwsCloudFrontDistributionOriginGroupFailoverStatusCodes) SetQuantity(v int64) *AwsCloudFrontDistributionOriginGroupFailoverStatusCodes { + s.Quantity = &v + return s +} + +// Provides information about origin groups that are associated with the distribution. +type AwsCloudFrontDistributionOriginGroups struct { + _ struct{} `type:"structure"` + + // The list of origin groups. + Items []*AwsCloudFrontDistributionOriginGroup `type:"list"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOriginGroups) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOriginGroups) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *AwsCloudFrontDistributionOriginGroups) SetItems(v []*AwsCloudFrontDistributionOriginGroup) *AwsCloudFrontDistributionOriginGroups { + s.Items = v + return s +} + +// A complex type that describes the Amazon S3 bucket, HTTP server (for example, +// a web server), Amazon Elemental MediaStore, or other server from which CloudFront +// gets your files. +type AwsCloudFrontDistributionOriginItem struct { + _ struct{} `type:"structure"` + + // Amazon S3 origins: The DNS name of the Amazon S3 bucket from which you want + // CloudFront to get objects for this origin. + DomainName *string `type:"string"` + + // A unique identifier for the origin or origin group. + Id *string `type:"string"` + + // An optional element that causes CloudFront to request your content from a + // directory in your Amazon S3 bucket or your custom origin. + OriginPath *string `type:"string"` + + // An origin that is an S3 bucket that is not configured with static website + // hosting. + S3OriginConfig *AwsCloudFrontDistributionOriginS3OriginConfig `type:"structure"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOriginItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOriginItem) GoString() string { + return s.String() +} + +// SetDomainName sets the DomainName field's value. +func (s *AwsCloudFrontDistributionOriginItem) SetDomainName(v string) *AwsCloudFrontDistributionOriginItem { + s.DomainName = &v + return s +} + +// SetId sets the Id field's value. +func (s *AwsCloudFrontDistributionOriginItem) SetId(v string) *AwsCloudFrontDistributionOriginItem { + s.Id = &v + return s +} + +// SetOriginPath sets the OriginPath field's value. +func (s *AwsCloudFrontDistributionOriginItem) SetOriginPath(v string) *AwsCloudFrontDistributionOriginItem { + s.OriginPath = &v + return s +} + +// SetS3OriginConfig sets the S3OriginConfig field's value. +func (s *AwsCloudFrontDistributionOriginItem) SetS3OriginConfig(v *AwsCloudFrontDistributionOriginS3OriginConfig) *AwsCloudFrontDistributionOriginItem { + s.S3OriginConfig = v + return s +} + +// Information about an origin that is an S3 bucket that is not configured with +// static website hosting. +type AwsCloudFrontDistributionOriginS3OriginConfig struct { + _ struct{} `type:"structure"` + + // The CloudFront origin access identity to associate with the origin. + OriginAccessIdentity *string `type:"string"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOriginS3OriginConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOriginS3OriginConfig) GoString() string { + return s.String() +} + +// SetOriginAccessIdentity sets the OriginAccessIdentity field's value. +func (s *AwsCloudFrontDistributionOriginS3OriginConfig) SetOriginAccessIdentity(v string) *AwsCloudFrontDistributionOriginS3OriginConfig { + s.OriginAccessIdentity = &v + return s +} + +// A complex type that contains information about origins and origin groups +// for this distribution. +type AwsCloudFrontDistributionOrigins struct { + _ struct{} `type:"structure"` + + // A complex type that contains origins or origin groups for this distribution. + Items []*AwsCloudFrontDistributionOriginItem `type:"list"` +} + +// String returns the string representation +func (s AwsCloudFrontDistributionOrigins) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudFrontDistributionOrigins) GoString() string { + return s.String() +} + +// SetItems sets the Items field's value. +func (s *AwsCloudFrontDistributionOrigins) SetItems(v []*AwsCloudFrontDistributionOriginItem) *AwsCloudFrontDistributionOrigins { + s.Items = v + return s +} + +// Provides details about a CloudTrail trail. +type AwsCloudTrailTrailDetails struct { + _ struct{} `type:"structure"` + + // The ARN of the log group that CloudTrail logs are delivered to. + CloudWatchLogsLogGroupArn *string `type:"string"` + + // The ARN of the role that the CloudWatch Logs endpoint assumes when it writes + // to the log group. + CloudWatchLogsRoleArn *string `type:"string"` + + // Indicates whether the trail has custom event selectors. + HasCustomEventSelectors *bool `type:"boolean"` + + // The Region where the trail was created. + HomeRegion *string `type:"string"` + + // Indicates whether the trail publishes events from global services such as + // IAM to the log files. + IncludeGlobalServiceEvents *bool `type:"boolean"` + + // Indicates whether the trail applies only to the current Region or to all + // Regions. + IsMultiRegionTrail *bool `type:"boolean"` + + // Whether the trail is created for all accounts in an organization in AWS Organizations, + // or only for the current AWS account. + IsOrganizationTrail *bool `type:"boolean"` + + // The AWS KMS key ID to use to encrypt the logs. + KmsKeyId *string `type:"string"` + + // Indicates whether CloudTrail log file validation is enabled. + LogFileValidationEnabled *bool `type:"boolean"` + + // The name of the trail. + Name *string `type:"string"` + + // The name of the S3 bucket where the log files are published. + S3BucketName *string `type:"string"` + + // The S3 key prefix. The key prefix is added after the name of the S3 bucket + // where the log files are published. + S3KeyPrefix *string `type:"string"` + + // The ARN of the SNS topic that is used for notifications of log file delivery. + SnsTopicArn *string `type:"string"` + + // The name of the SNS topic that is used for notifications of log file delivery. + SnsTopicName *string `type:"string"` + + // The ARN of the trail. + TrailArn *string `type:"string"` +} + +// String returns the string representation +func (s AwsCloudTrailTrailDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCloudTrailTrailDetails) GoString() string { + return s.String() +} + +// SetCloudWatchLogsLogGroupArn sets the CloudWatchLogsLogGroupArn field's value. +func (s *AwsCloudTrailTrailDetails) SetCloudWatchLogsLogGroupArn(v string) *AwsCloudTrailTrailDetails { + s.CloudWatchLogsLogGroupArn = &v + return s +} + +// SetCloudWatchLogsRoleArn sets the CloudWatchLogsRoleArn field's value. +func (s *AwsCloudTrailTrailDetails) SetCloudWatchLogsRoleArn(v string) *AwsCloudTrailTrailDetails { + s.CloudWatchLogsRoleArn = &v + return s +} + +// SetHasCustomEventSelectors sets the HasCustomEventSelectors field's value. +func (s *AwsCloudTrailTrailDetails) SetHasCustomEventSelectors(v bool) *AwsCloudTrailTrailDetails { + s.HasCustomEventSelectors = &v + return s +} + +// SetHomeRegion sets the HomeRegion field's value. +func (s *AwsCloudTrailTrailDetails) SetHomeRegion(v string) *AwsCloudTrailTrailDetails { + s.HomeRegion = &v + return s +} + +// SetIncludeGlobalServiceEvents sets the IncludeGlobalServiceEvents field's value. +func (s *AwsCloudTrailTrailDetails) SetIncludeGlobalServiceEvents(v bool) *AwsCloudTrailTrailDetails { + s.IncludeGlobalServiceEvents = &v + return s +} + +// SetIsMultiRegionTrail sets the IsMultiRegionTrail field's value. +func (s *AwsCloudTrailTrailDetails) SetIsMultiRegionTrail(v bool) *AwsCloudTrailTrailDetails { + s.IsMultiRegionTrail = &v + return s +} + +// SetIsOrganizationTrail sets the IsOrganizationTrail field's value. +func (s *AwsCloudTrailTrailDetails) SetIsOrganizationTrail(v bool) *AwsCloudTrailTrailDetails { + s.IsOrganizationTrail = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsCloudTrailTrailDetails) SetKmsKeyId(v string) *AwsCloudTrailTrailDetails { + s.KmsKeyId = &v + return s +} + +// SetLogFileValidationEnabled sets the LogFileValidationEnabled field's value. +func (s *AwsCloudTrailTrailDetails) SetLogFileValidationEnabled(v bool) *AwsCloudTrailTrailDetails { + s.LogFileValidationEnabled = &v + return s +} + +// SetName sets the Name field's value. +func (s *AwsCloudTrailTrailDetails) SetName(v string) *AwsCloudTrailTrailDetails { + s.Name = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *AwsCloudTrailTrailDetails) SetS3BucketName(v string) *AwsCloudTrailTrailDetails { + s.S3BucketName = &v + return s +} + +// SetS3KeyPrefix sets the S3KeyPrefix field's value. +func (s *AwsCloudTrailTrailDetails) SetS3KeyPrefix(v string) *AwsCloudTrailTrailDetails { + s.S3KeyPrefix = &v + return s +} + +// SetSnsTopicArn sets the SnsTopicArn field's value. +func (s *AwsCloudTrailTrailDetails) SetSnsTopicArn(v string) *AwsCloudTrailTrailDetails { + s.SnsTopicArn = &v + return s +} + +// SetSnsTopicName sets the SnsTopicName field's value. +func (s *AwsCloudTrailTrailDetails) SetSnsTopicName(v string) *AwsCloudTrailTrailDetails { + s.SnsTopicName = &v + return s +} + +// SetTrailArn sets the TrailArn field's value. +func (s *AwsCloudTrailTrailDetails) SetTrailArn(v string) *AwsCloudTrailTrailDetails { + s.TrailArn = &v + return s +} + +// Information about an AWS CodeBuild project. +type AwsCodeBuildProjectDetails struct { + _ struct{} `type:"structure"` + + // The AWS Key Management Service (AWS KMS) customer master key (CMK) used to + // encrypt the build output artifacts. + // + // You can specify either the Amazon Resource Name (ARN) of the CMK or, if available, + // the CMK alias (using the format alias/alias-name). + EncryptionKey *string `type:"string"` + + // Information about the build environment for this build project. + Environment *AwsCodeBuildProjectEnvironment `type:"structure"` + + // The name of the build project. + Name *string `type:"string"` + + // The ARN of the IAM role that enables AWS CodeBuild to interact with dependent + // AWS services on behalf of the AWS account. + ServiceRole *string `type:"string"` + + // Information about the build input source code for this build project. + Source *AwsCodeBuildProjectSource `type:"structure"` + + // Information about the VPC configuration that AWS CodeBuild accesses. + VpcConfig *AwsCodeBuildProjectVpcConfig `type:"structure"` +} + +// String returns the string representation +func (s AwsCodeBuildProjectDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCodeBuildProjectDetails) GoString() string { + return s.String() +} + +// SetEncryptionKey sets the EncryptionKey field's value. +func (s *AwsCodeBuildProjectDetails) SetEncryptionKey(v string) *AwsCodeBuildProjectDetails { + s.EncryptionKey = &v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *AwsCodeBuildProjectDetails) SetEnvironment(v *AwsCodeBuildProjectEnvironment) *AwsCodeBuildProjectDetails { + s.Environment = v + return s +} + +// SetName sets the Name field's value. +func (s *AwsCodeBuildProjectDetails) SetName(v string) *AwsCodeBuildProjectDetails { + s.Name = &v + return s +} + +// SetServiceRole sets the ServiceRole field's value. +func (s *AwsCodeBuildProjectDetails) SetServiceRole(v string) *AwsCodeBuildProjectDetails { + s.ServiceRole = &v + return s +} + +// SetSource sets the Source field's value. +func (s *AwsCodeBuildProjectDetails) SetSource(v *AwsCodeBuildProjectSource) *AwsCodeBuildProjectDetails { + s.Source = v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *AwsCodeBuildProjectDetails) SetVpcConfig(v *AwsCodeBuildProjectVpcConfig) *AwsCodeBuildProjectDetails { + s.VpcConfig = v + return s +} + +// Information about the build environment for this build project. +type AwsCodeBuildProjectEnvironment struct { + _ struct{} `type:"structure"` + + // The certificate to use with this build project. + Certificate *string `type:"string"` + + // The type of credentials AWS CodeBuild uses to pull images in your build. + // + // Valid values: + // + // * CODEBUILD specifies that AWS CodeBuild uses its own credentials. This + // requires that you modify your ECR repository policy to trust the AWS CodeBuild + // service principal. + // + // * SERVICE_ROLE specifies that AWS CodeBuild uses your build project's + // service role. + // + // When you use a cross-account or private registry image, you must use SERVICE_ROLE + // credentials. When you use an AWS CodeBuild curated image, you must use CODEBUILD + // credentials. + ImagePullCredentialsType *string `type:"string"` + + // The credentials for access to a private registry. + RegistryCredential *AwsCodeBuildProjectEnvironmentRegistryCredential `type:"structure"` + + // The type of build environment to use for related builds. + // + // The environment type ARM_CONTAINER is available only in Regions US East (N. + // Virginia), US East (Ohio), US West (Oregon), Europe (Ireland), Asia Pacific + // (Mumbai), Asia Pacific (Tokyo), Asia Pacific (Sydney), and Europe (Frankfurt). + // + // The environment type LINUX_CONTAINER with compute type build.general1.2xlarge + // is available only in Regions US East (N. Virginia), US East (N. Virginia), + // US West (Oregon), Canada (Central), Europe (Ireland), Europe (London), Europe + // (Frankfurt), Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific (Singapore), + // Asia Pacific (Sydney), China (Beijing), and China (Ningxia). + // + // The environment type LINUX_GPU_CONTAINER is available only in Regions US + // East (N. Virginia), US East (N. Virginia), US West (Oregon), Canada (Central), + // Europe (Ireland), Europe (London), Europe (Frankfurt), Asia Pacific (Tokyo), + // Asia Pacific (Seoul), Asia Pacific (Singapore), Asia Pacific (Sydney), China + // (Beijing), and China (Ningxia). + // + // Valid values: WINDOWS_CONTAINER | LINUX_CONTAINER | LINUX_GPU_CONTAINER | + // ARM_CONTAINER + Type *string `type:"string"` +} + +// String returns the string representation +func (s AwsCodeBuildProjectEnvironment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCodeBuildProjectEnvironment) GoString() string { + return s.String() +} + +// SetCertificate sets the Certificate field's value. +func (s *AwsCodeBuildProjectEnvironment) SetCertificate(v string) *AwsCodeBuildProjectEnvironment { + s.Certificate = &v + return s +} + +// SetImagePullCredentialsType sets the ImagePullCredentialsType field's value. +func (s *AwsCodeBuildProjectEnvironment) SetImagePullCredentialsType(v string) *AwsCodeBuildProjectEnvironment { + s.ImagePullCredentialsType = &v + return s +} + +// SetRegistryCredential sets the RegistryCredential field's value. +func (s *AwsCodeBuildProjectEnvironment) SetRegistryCredential(v *AwsCodeBuildProjectEnvironmentRegistryCredential) *AwsCodeBuildProjectEnvironment { + s.RegistryCredential = v + return s +} + +// SetType sets the Type field's value. +func (s *AwsCodeBuildProjectEnvironment) SetType(v string) *AwsCodeBuildProjectEnvironment { + s.Type = &v + return s +} + +// The credentials for access to a private registry. +type AwsCodeBuildProjectEnvironmentRegistryCredential struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) or name of credentials created using AWS Secrets + // Manager. + // + // The credential can use the name of the credentials only if they exist in + // your current AWS Region. + Credential *string `type:"string"` + + // The service that created the credentials to access a private Docker registry. + // + // The valid value,SECRETS_MANAGER, is for AWS Secrets Manager. + CredentialProvider *string `type:"string"` +} + +// String returns the string representation +func (s AwsCodeBuildProjectEnvironmentRegistryCredential) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCodeBuildProjectEnvironmentRegistryCredential) GoString() string { + return s.String() +} + +// SetCredential sets the Credential field's value. +func (s *AwsCodeBuildProjectEnvironmentRegistryCredential) SetCredential(v string) *AwsCodeBuildProjectEnvironmentRegistryCredential { + s.Credential = &v + return s +} + +// SetCredentialProvider sets the CredentialProvider field's value. +func (s *AwsCodeBuildProjectEnvironmentRegistryCredential) SetCredentialProvider(v string) *AwsCodeBuildProjectEnvironmentRegistryCredential { + s.CredentialProvider = &v + return s +} + +// Information about the build input source code for this build project. +type AwsCodeBuildProjectSource struct { + _ struct{} `type:"structure"` + + // Information about the Git clone depth for the build project. + GitCloneDepth *int64 `type:"integer"` + + // Whether to ignore SSL warnings while connecting to the project source code. + InsecureSsl *bool `type:"boolean"` + + // Information about the location of the source code to be built. + // + // Valid values include: + // + // * For source code settings that are specified in the source action of + // a pipeline in AWS CodePipeline, location should not be specified. If it + // is specified, AWS CodePipeline ignores it. This is because AWS CodePipeline + // uses the settings in a pipeline's source action instead of this value. + // + // * For source code in an AWS CodeCommit repository, the HTTPS clone URL + // to the repository that contains the source code and the build spec file + // (for example, https://git-codecommit.region-ID.amazonaws.com/v1/repos/repo-name + // ). + // + // * For source code in an S3 input bucket, one of the following. The path + // to the ZIP file that contains the source code (for example, bucket-name/path/to/object-name.zip). + // The path to the folder that contains the source code (for example, bucket-name/path/to/source-code/folder/). + // + // * For source code in a GitHub repository, the HTTPS clone URL to the repository + // that contains the source and the build spec file. + // + // * For source code in a Bitbucket repository, the HTTPS clone URL to the + // repository that contains the source and the build spec file. + Location *string `type:"string"` + + // The type of repository that contains the source code to be built. Valid values + // are: + // + // * BITBUCKET - The source code is in a Bitbucket repository. + // + // * CODECOMMIT - The source code is in an AWS CodeCommit repository. + // + // * CODEPIPELINE - The source code settings are specified in the source + // action of a pipeline in AWS CodePipeline. + // + // * GITHUB - The source code is in a GitHub repository. + // + // * GITHUB_ENTERPRISE - The source code is in a GitHub Enterprise repository. + // + // * NO_SOURCE - The project does not have input source code. + // + // * S3 - The source code is in an S3 input bucket. + Type *string `type:"string"` +} + +// String returns the string representation +func (s AwsCodeBuildProjectSource) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCodeBuildProjectSource) GoString() string { + return s.String() +} + +// SetGitCloneDepth sets the GitCloneDepth field's value. +func (s *AwsCodeBuildProjectSource) SetGitCloneDepth(v int64) *AwsCodeBuildProjectSource { + s.GitCloneDepth = &v + return s +} + +// SetInsecureSsl sets the InsecureSsl field's value. +func (s *AwsCodeBuildProjectSource) SetInsecureSsl(v bool) *AwsCodeBuildProjectSource { + s.InsecureSsl = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *AwsCodeBuildProjectSource) SetLocation(v string) *AwsCodeBuildProjectSource { + s.Location = &v + return s +} + +// SetType sets the Type field's value. +func (s *AwsCodeBuildProjectSource) SetType(v string) *AwsCodeBuildProjectSource { + s.Type = &v + return s +} + +// Information about the VPC configuration that AWS CodeBuild accesses. +type AwsCodeBuildProjectVpcConfig struct { + _ struct{} `type:"structure"` + + // A list of one or more security group IDs in your Amazon VPC. + SecurityGroupIds []*string `type:"list"` + + // A list of one or more subnet IDs in your Amazon VPC. + Subnets []*string `type:"list"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsCodeBuildProjectVpcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCodeBuildProjectVpcConfig) GoString() string { + return s.String() +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *AwsCodeBuildProjectVpcConfig) SetSecurityGroupIds(v []*string) *AwsCodeBuildProjectVpcConfig { + s.SecurityGroupIds = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *AwsCodeBuildProjectVpcConfig) SetSubnets(v []*string) *AwsCodeBuildProjectVpcConfig { + s.Subnets = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsCodeBuildProjectVpcConfig) SetVpcId(v string) *AwsCodeBuildProjectVpcConfig { + s.VpcId = &v + return s +} + +// Contains the cross-origin resource sharing (CORS) configuration for the API. +// CORS is only supported for HTTP APIs. +type AwsCorsConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether the CORS request includes credentials. + AllowCredentials *bool `type:"boolean"` + + // The allowed headers for CORS requests. + AllowHeaders []*string `type:"list"` + + // The allowed methods for CORS requests. + AllowMethods []*string `type:"list"` + + // The allowed origins for CORS requests. + AllowOrigins []*string `type:"list"` + + // The exposed headers for CORS requests. + ExposeHeaders []*string `type:"list"` + + // The number of seconds for which the browser caches preflight request results. + MaxAge *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsCorsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsCorsConfiguration) GoString() string { + return s.String() +} + +// SetAllowCredentials sets the AllowCredentials field's value. +func (s *AwsCorsConfiguration) SetAllowCredentials(v bool) *AwsCorsConfiguration { + s.AllowCredentials = &v + return s +} + +// SetAllowHeaders sets the AllowHeaders field's value. +func (s *AwsCorsConfiguration) SetAllowHeaders(v []*string) *AwsCorsConfiguration { + s.AllowHeaders = v + return s +} + +// SetAllowMethods sets the AllowMethods field's value. +func (s *AwsCorsConfiguration) SetAllowMethods(v []*string) *AwsCorsConfiguration { + s.AllowMethods = v + return s +} + +// SetAllowOrigins sets the AllowOrigins field's value. +func (s *AwsCorsConfiguration) SetAllowOrigins(v []*string) *AwsCorsConfiguration { + s.AllowOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *AwsCorsConfiguration) SetExposeHeaders(v []*string) *AwsCorsConfiguration { + s.ExposeHeaders = v + return s +} + +// SetMaxAge sets the MaxAge field's value. +func (s *AwsCorsConfiguration) SetMaxAge(v int64) *AwsCorsConfiguration { + s.MaxAge = &v + return s +} + +// Contains a definition of an attribute for the table. +type AwsDynamoDbTableAttributeDefinition struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + AttributeName *string `type:"string"` + + // The type of the attribute. + AttributeType *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableAttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableAttributeDefinition) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AwsDynamoDbTableAttributeDefinition) SetAttributeName(v string) *AwsDynamoDbTableAttributeDefinition { + s.AttributeName = &v + return s +} + +// SetAttributeType sets the AttributeType field's value. +func (s *AwsDynamoDbTableAttributeDefinition) SetAttributeType(v string) *AwsDynamoDbTableAttributeDefinition { + s.AttributeType = &v + return s +} + +// Provides information about the billing for read/write capacity on the table. +type AwsDynamoDbTableBillingModeSummary struct { + _ struct{} `type:"structure"` + + // The method used to charge for read and write throughput and to manage capacity. + BillingMode *string `type:"string"` + + // If the billing mode is PAY_PER_REQUEST, indicates when the billing mode was + // set to that value. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastUpdateToPayPerRequestDateTime *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableBillingModeSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableBillingModeSummary) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *AwsDynamoDbTableBillingModeSummary) SetBillingMode(v string) *AwsDynamoDbTableBillingModeSummary { + s.BillingMode = &v + return s +} + +// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value. +func (s *AwsDynamoDbTableBillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v string) *AwsDynamoDbTableBillingModeSummary { + s.LastUpdateToPayPerRequestDateTime = &v + return s +} + +// Provides details about a DynamoDB table. +type AwsDynamoDbTableDetails struct { + _ struct{} `type:"structure"` + + // A list of attribute definitions for the table. + AttributeDefinitions []*AwsDynamoDbTableAttributeDefinition `type:"list"` + + // Information about the billing for read/write capacity on the table. + BillingModeSummary *AwsDynamoDbTableBillingModeSummary `type:"structure"` + + // Indicates when the table was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreationDateTime *string `type:"string"` + + // List of global secondary indexes for the table. + GlobalSecondaryIndexes []*AwsDynamoDbTableGlobalSecondaryIndex `type:"list"` + + // The version of global tables being used. + GlobalTableVersion *string `type:"string"` + + // The number of items in the table. + ItemCount *int64 `type:"integer"` + + // The primary key structure for the table. + KeySchema []*AwsDynamoDbTableKeySchema `type:"list"` + + // The ARN of the latest stream for the table. + LatestStreamArn *string `type:"string"` + + // The label of the latest stream. The label is not a unique identifier. + LatestStreamLabel *string `type:"string"` + + // The list of local secondary indexes for the table. + LocalSecondaryIndexes []*AwsDynamoDbTableLocalSecondaryIndex `type:"list"` + + // Information about the provisioned throughput for the table. + ProvisionedThroughput *AwsDynamoDbTableProvisionedThroughput `type:"structure"` + + // The list of replicas of this table. + Replicas []*AwsDynamoDbTableReplica `type:"list"` + + // Information about the restore for the table. + RestoreSummary *AwsDynamoDbTableRestoreSummary `type:"structure"` + + // Information about the server-side encryption for the table. + SseDescription *AwsDynamoDbTableSseDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *AwsDynamoDbTableStreamSpecification `type:"structure"` + + // The identifier of the table. + TableId *string `type:"string"` + + // The name of the table. + TableName *string `type:"string"` + + // The total size of the table in bytes. + TableSizeBytes *int64 `type:"long"` + + // The current status of the table. + TableStatus *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableDetails) GoString() string { + return s.String() +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *AwsDynamoDbTableDetails) SetAttributeDefinitions(v []*AwsDynamoDbTableAttributeDefinition) *AwsDynamoDbTableDetails { + s.AttributeDefinitions = v + return s +} + +// SetBillingModeSummary sets the BillingModeSummary field's value. +func (s *AwsDynamoDbTableDetails) SetBillingModeSummary(v *AwsDynamoDbTableBillingModeSummary) *AwsDynamoDbTableDetails { + s.BillingModeSummary = v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *AwsDynamoDbTableDetails) SetCreationDateTime(v string) *AwsDynamoDbTableDetails { + s.CreationDateTime = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *AwsDynamoDbTableDetails) SetGlobalSecondaryIndexes(v []*AwsDynamoDbTableGlobalSecondaryIndex) *AwsDynamoDbTableDetails { + s.GlobalSecondaryIndexes = v + return s +} + +// SetGlobalTableVersion sets the GlobalTableVersion field's value. +func (s *AwsDynamoDbTableDetails) SetGlobalTableVersion(v string) *AwsDynamoDbTableDetails { + s.GlobalTableVersion = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *AwsDynamoDbTableDetails) SetItemCount(v int64) *AwsDynamoDbTableDetails { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *AwsDynamoDbTableDetails) SetKeySchema(v []*AwsDynamoDbTableKeySchema) *AwsDynamoDbTableDetails { + s.KeySchema = v + return s +} + +// SetLatestStreamArn sets the LatestStreamArn field's value. +func (s *AwsDynamoDbTableDetails) SetLatestStreamArn(v string) *AwsDynamoDbTableDetails { + s.LatestStreamArn = &v + return s +} + +// SetLatestStreamLabel sets the LatestStreamLabel field's value. +func (s *AwsDynamoDbTableDetails) SetLatestStreamLabel(v string) *AwsDynamoDbTableDetails { + s.LatestStreamLabel = &v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *AwsDynamoDbTableDetails) SetLocalSecondaryIndexes(v []*AwsDynamoDbTableLocalSecondaryIndex) *AwsDynamoDbTableDetails { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *AwsDynamoDbTableDetails) SetProvisionedThroughput(v *AwsDynamoDbTableProvisionedThroughput) *AwsDynamoDbTableDetails { + s.ProvisionedThroughput = v + return s +} + +// SetReplicas sets the Replicas field's value. +func (s *AwsDynamoDbTableDetails) SetReplicas(v []*AwsDynamoDbTableReplica) *AwsDynamoDbTableDetails { + s.Replicas = v + return s +} + +// SetRestoreSummary sets the RestoreSummary field's value. +func (s *AwsDynamoDbTableDetails) SetRestoreSummary(v *AwsDynamoDbTableRestoreSummary) *AwsDynamoDbTableDetails { + s.RestoreSummary = v + return s +} + +// SetSseDescription sets the SseDescription field's value. +func (s *AwsDynamoDbTableDetails) SetSseDescription(v *AwsDynamoDbTableSseDescription) *AwsDynamoDbTableDetails { + s.SseDescription = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *AwsDynamoDbTableDetails) SetStreamSpecification(v *AwsDynamoDbTableStreamSpecification) *AwsDynamoDbTableDetails { + s.StreamSpecification = v + return s +} + +// SetTableId sets the TableId field's value. +func (s *AwsDynamoDbTableDetails) SetTableId(v string) *AwsDynamoDbTableDetails { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *AwsDynamoDbTableDetails) SetTableName(v string) *AwsDynamoDbTableDetails { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *AwsDynamoDbTableDetails) SetTableSizeBytes(v int64) *AwsDynamoDbTableDetails { + s.TableSizeBytes = &v + return s +} + +// SetTableStatus sets the TableStatus field's value. +func (s *AwsDynamoDbTableDetails) SetTableStatus(v string) *AwsDynamoDbTableDetails { + s.TableStatus = &v + return s +} + +// Information abut a global secondary index for the table. +type AwsDynamoDbTableGlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // Whether the index is currently backfilling. + Backfilling *bool `type:"boolean"` + + // The ARN of the index. + IndexArn *string `type:"string"` + + // The name of the index. + IndexName *string `type:"string"` + + // The total size in bytes of the index. + IndexSizeBytes *int64 `type:"long"` + + // The current status of the index. + IndexStatus *string `type:"string"` + + // The number of items in the index. + ItemCount *int64 `type:"integer"` + + // The key schema for the index. + KeySchema []*AwsDynamoDbTableKeySchema `type:"list"` + + // Attributes that are copied from the table into an index. + Projection *AwsDynamoDbTableProjection `type:"structure"` + + // Information about the provisioned throughput settings for the indexes. + ProvisionedThroughput *AwsDynamoDbTableProvisionedThroughput `type:"structure"` +} + +// String returns the string representation +func (s AwsDynamoDbTableGlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableGlobalSecondaryIndex) GoString() string { + return s.String() +} + +// SetBackfilling sets the Backfilling field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetBackfilling(v bool) *AwsDynamoDbTableGlobalSecondaryIndex { + s.Backfilling = &v + return s +} + +// SetIndexArn sets the IndexArn field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetIndexArn(v string) *AwsDynamoDbTableGlobalSecondaryIndex { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetIndexName(v string) *AwsDynamoDbTableGlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetIndexSizeBytes(v int64) *AwsDynamoDbTableGlobalSecondaryIndex { + s.IndexSizeBytes = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetIndexStatus(v string) *AwsDynamoDbTableGlobalSecondaryIndex { + s.IndexStatus = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetItemCount(v int64) *AwsDynamoDbTableGlobalSecondaryIndex { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetKeySchema(v []*AwsDynamoDbTableKeySchema) *AwsDynamoDbTableGlobalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetProjection(v *AwsDynamoDbTableProjection) *AwsDynamoDbTableGlobalSecondaryIndex { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *AwsDynamoDbTableGlobalSecondaryIndex) SetProvisionedThroughput(v *AwsDynamoDbTableProvisionedThroughput) *AwsDynamoDbTableGlobalSecondaryIndex { + s.ProvisionedThroughput = v + return s +} + +// A component of the key schema for the DynamoDB table, a global secondary +// index, or a local secondary index. +type AwsDynamoDbTableKeySchema struct { + _ struct{} `type:"structure"` + + // The name of the key schema attribute. + AttributeName *string `type:"string"` + + // The type of key used for the key schema attribute. + KeyType *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableKeySchema) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableKeySchema) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AwsDynamoDbTableKeySchema) SetAttributeName(v string) *AwsDynamoDbTableKeySchema { + s.AttributeName = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *AwsDynamoDbTableKeySchema) SetKeyType(v string) *AwsDynamoDbTableKeySchema { + s.KeyType = &v + return s +} + +// Information about a local secondary index for a DynamoDB table. +type AwsDynamoDbTableLocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The ARN of the index. + IndexArn *string `type:"string"` + + // The name of the index. + IndexName *string `type:"string"` + + // The complete key schema for the index. + KeySchema []*AwsDynamoDbTableKeySchema `type:"list"` + + // Attributes that are copied from the table into the index. These are in addition + // to the primary key attributes and index key attributes, which are automatically + // projected. + Projection *AwsDynamoDbTableProjection `type:"structure"` +} + +// String returns the string representation +func (s AwsDynamoDbTableLocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableLocalSecondaryIndex) GoString() string { + return s.String() +} + +// SetIndexArn sets the IndexArn field's value. +func (s *AwsDynamoDbTableLocalSecondaryIndex) SetIndexArn(v string) *AwsDynamoDbTableLocalSecondaryIndex { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *AwsDynamoDbTableLocalSecondaryIndex) SetIndexName(v string) *AwsDynamoDbTableLocalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *AwsDynamoDbTableLocalSecondaryIndex) SetKeySchema(v []*AwsDynamoDbTableKeySchema) *AwsDynamoDbTableLocalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *AwsDynamoDbTableLocalSecondaryIndex) SetProjection(v *AwsDynamoDbTableProjection) *AwsDynamoDbTableLocalSecondaryIndex { + s.Projection = v + return s +} + +// For global and local secondary indexes, identifies the attributes that are +// copied from the table into the index. +type AwsDynamoDbTableProjection struct { + _ struct{} `type:"structure"` + + // The nonkey attributes that are projected into the index. For each attribute, + // provide the attribute name. + NonKeyAttributes []*string `type:"list"` + + // The types of attributes that are projected into the index. + ProjectionType *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableProjection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableProjection) GoString() string { + return s.String() +} + +// SetNonKeyAttributes sets the NonKeyAttributes field's value. +func (s *AwsDynamoDbTableProjection) SetNonKeyAttributes(v []*string) *AwsDynamoDbTableProjection { + s.NonKeyAttributes = v + return s +} + +// SetProjectionType sets the ProjectionType field's value. +func (s *AwsDynamoDbTableProjection) SetProjectionType(v string) *AwsDynamoDbTableProjection { + s.ProjectionType = &v + return s +} + +// Information about the provisioned throughput for the table or for a global +// secondary index. +type AwsDynamoDbTableProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // Indicates when the provisioned throughput was last decreased. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastDecreaseDateTime *string `type:"string"` + + // Indicates when the provisioned throughput was last increased. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastIncreaseDateTime *string `type:"string"` + + // The number of times during the current UTC calendar day that the provisioned + // throughput was decreased. + NumberOfDecreasesToday *int64 `type:"integer"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ReadCapacityUnits *int64 `type:"integer"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsDynamoDbTableProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableProvisionedThroughput) GoString() string { + return s.String() +} + +// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value. +func (s *AwsDynamoDbTableProvisionedThroughput) SetLastDecreaseDateTime(v string) *AwsDynamoDbTableProvisionedThroughput { + s.LastDecreaseDateTime = &v + return s +} + +// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value. +func (s *AwsDynamoDbTableProvisionedThroughput) SetLastIncreaseDateTime(v string) *AwsDynamoDbTableProvisionedThroughput { + s.LastIncreaseDateTime = &v + return s +} + +// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value. +func (s *AwsDynamoDbTableProvisionedThroughput) SetNumberOfDecreasesToday(v int64) *AwsDynamoDbTableProvisionedThroughput { + s.NumberOfDecreasesToday = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *AwsDynamoDbTableProvisionedThroughput) SetReadCapacityUnits(v int64) *AwsDynamoDbTableProvisionedThroughput { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *AwsDynamoDbTableProvisionedThroughput) SetWriteCapacityUnits(v int64) *AwsDynamoDbTableProvisionedThroughput { + s.WriteCapacityUnits = &v + return s +} + +// Replica-specific configuration for the provisioned throughput. +type AwsDynamoDbTableProvisionedThroughputOverride struct { + _ struct{} `type:"structure"` + + // The read capacity units for the replica. + ReadCapacityUnits *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsDynamoDbTableProvisionedThroughputOverride) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableProvisionedThroughputOverride) GoString() string { + return s.String() +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *AwsDynamoDbTableProvisionedThroughputOverride) SetReadCapacityUnits(v int64) *AwsDynamoDbTableProvisionedThroughputOverride { + s.ReadCapacityUnits = &v + return s +} + +// Information about a replica of a DynamoDB table. +type AwsDynamoDbTableReplica struct { + _ struct{} `type:"structure"` + + // List of global secondary indexes for the replica. + GlobalSecondaryIndexes []*AwsDynamoDbTableReplicaGlobalSecondaryIndex `type:"list"` + + // The identifier of the AWS KMS customer master key (CMK) that will be used + // for AWS KMS encryption for the replica. + KmsMasterKeyId *string `type:"string"` + + // Replica-specific configuration for the provisioned throughput. + ProvisionedThroughputOverride *AwsDynamoDbTableProvisionedThroughputOverride `type:"structure"` + + // The name of the Region where the replica is located. + RegionName *string `type:"string"` + + // The current status of the replica. + ReplicaStatus *string `type:"string"` + + // Detailed information about the replica status. + ReplicaStatusDescription *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableReplica) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableReplica) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *AwsDynamoDbTableReplica) SetGlobalSecondaryIndexes(v []*AwsDynamoDbTableReplicaGlobalSecondaryIndex) *AwsDynamoDbTableReplica { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKmsMasterKeyId sets the KmsMasterKeyId field's value. +func (s *AwsDynamoDbTableReplica) SetKmsMasterKeyId(v string) *AwsDynamoDbTableReplica { + s.KmsMasterKeyId = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *AwsDynamoDbTableReplica) SetProvisionedThroughputOverride(v *AwsDynamoDbTableProvisionedThroughputOverride) *AwsDynamoDbTableReplica { + s.ProvisionedThroughputOverride = v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *AwsDynamoDbTableReplica) SetRegionName(v string) *AwsDynamoDbTableReplica { + s.RegionName = &v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *AwsDynamoDbTableReplica) SetReplicaStatus(v string) *AwsDynamoDbTableReplica { + s.ReplicaStatus = &v + return s +} + +// SetReplicaStatusDescription sets the ReplicaStatusDescription field's value. +func (s *AwsDynamoDbTableReplica) SetReplicaStatusDescription(v string) *AwsDynamoDbTableReplica { + s.ReplicaStatusDescription = &v + return s +} + +// Information about a global secondary index for a DynamoDB table replica. +type AwsDynamoDbTableReplicaGlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the index. + IndexName *string `type:"string"` + + // Replica-specific configuration for the provisioned throughput for the index. + ProvisionedThroughputOverride *AwsDynamoDbTableProvisionedThroughputOverride `type:"structure"` +} + +// String returns the string representation +func (s AwsDynamoDbTableReplicaGlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableReplicaGlobalSecondaryIndex) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *AwsDynamoDbTableReplicaGlobalSecondaryIndex) SetIndexName(v string) *AwsDynamoDbTableReplicaGlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value. +func (s *AwsDynamoDbTableReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *AwsDynamoDbTableProvisionedThroughputOverride) *AwsDynamoDbTableReplicaGlobalSecondaryIndex { + s.ProvisionedThroughputOverride = v + return s +} + +// Information about the restore for the table. +type AwsDynamoDbTableRestoreSummary struct { + _ struct{} `type:"structure"` + + // Indicates the point in time that the table was restored to. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + RestoreDateTime *string `type:"string"` + + // Whether a restore is currently in progress. + RestoreInProgress *bool `type:"boolean"` + + // The ARN of the source backup from which the table was restored. + SourceBackupArn *string `type:"string"` + + // The ARN of the source table for the backup. + SourceTableArn *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableRestoreSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableRestoreSummary) GoString() string { + return s.String() +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *AwsDynamoDbTableRestoreSummary) SetRestoreDateTime(v string) *AwsDynamoDbTableRestoreSummary { + s.RestoreDateTime = &v + return s +} + +// SetRestoreInProgress sets the RestoreInProgress field's value. +func (s *AwsDynamoDbTableRestoreSummary) SetRestoreInProgress(v bool) *AwsDynamoDbTableRestoreSummary { + s.RestoreInProgress = &v + return s +} + +// SetSourceBackupArn sets the SourceBackupArn field's value. +func (s *AwsDynamoDbTableRestoreSummary) SetSourceBackupArn(v string) *AwsDynamoDbTableRestoreSummary { + s.SourceBackupArn = &v + return s +} + +// SetSourceTableArn sets the SourceTableArn field's value. +func (s *AwsDynamoDbTableRestoreSummary) SetSourceTableArn(v string) *AwsDynamoDbTableRestoreSummary { + s.SourceTableArn = &v + return s +} + +// Information about the server-side encryption for the table. +type AwsDynamoDbTableSseDescription struct { + _ struct{} `type:"structure"` + + // If the key is inaccessible, the date and time when DynamoDB detected that + // the key was inaccessible. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + InaccessibleEncryptionDateTime *string `type:"string"` + + // The ARN of the AWS KMS customer master key (CMK) that is used for the AWS + // KMS encryption. + KmsMasterKeyArn *string `type:"string"` + + // The type of server-side encryption. + SseType *string `type:"string"` + + // The status of the server-side encryption. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableSseDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableSseDescription) GoString() string { + return s.String() +} + +// SetInaccessibleEncryptionDateTime sets the InaccessibleEncryptionDateTime field's value. +func (s *AwsDynamoDbTableSseDescription) SetInaccessibleEncryptionDateTime(v string) *AwsDynamoDbTableSseDescription { + s.InaccessibleEncryptionDateTime = &v + return s +} + +// SetKmsMasterKeyArn sets the KmsMasterKeyArn field's value. +func (s *AwsDynamoDbTableSseDescription) SetKmsMasterKeyArn(v string) *AwsDynamoDbTableSseDescription { + s.KmsMasterKeyArn = &v + return s +} + +// SetSseType sets the SseType field's value. +func (s *AwsDynamoDbTableSseDescription) SetSseType(v string) *AwsDynamoDbTableSseDescription { + s.SseType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsDynamoDbTableSseDescription) SetStatus(v string) *AwsDynamoDbTableSseDescription { + s.Status = &v + return s +} + +// The current DynamoDB Streams configuration for the table. +type AwsDynamoDbTableStreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled on the table. + StreamEnabled *bool `type:"boolean"` + + // Determines the information that is written to the table. + StreamViewType *string `type:"string"` +} + +// String returns the string representation +func (s AwsDynamoDbTableStreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsDynamoDbTableStreamSpecification) GoString() string { + return s.String() +} + +// SetStreamEnabled sets the StreamEnabled field's value. +func (s *AwsDynamoDbTableStreamSpecification) SetStreamEnabled(v bool) *AwsDynamoDbTableStreamSpecification { + s.StreamEnabled = &v + return s +} + +// SetStreamViewType sets the StreamViewType field's value. +func (s *AwsDynamoDbTableStreamSpecification) SetStreamViewType(v string) *AwsDynamoDbTableStreamSpecification { + s.StreamViewType = &v + return s +} + +// Information about an Elastic IP address. +type AwsEc2EipDetails struct { + _ struct{} `type:"structure"` + + // The identifier that AWS assigns to represent the allocation of the Elastic + // IP address for use with Amazon VPC. + AllocationId *string `type:"string"` + + // The identifier that represents the association of the Elastic IP address + // with an EC2 instance. + AssociationId *string `type:"string"` + + // The domain in which to allocate the address. + // + // If the address is for use with EC2 instances in a VPC, then Domain is vpc. + // Otherwise, Domain is standard. + Domain *string `type:"string"` + + // The identifier of the EC2 instance. + InstanceId *string `type:"string"` + + // The name of the location from which the Elastic IP address is advertised. + NetworkBorderGroup *string `type:"string"` + + // The identifier of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The AWS account ID of the owner of the network interface. + NetworkInterfaceOwnerId *string `type:"string"` + + // The private IP address that is associated with the Elastic IP address. + PrivateIpAddress *string `type:"string"` + + // A public IP address that is associated with the EC2 instance. + PublicIp *string `type:"string"` + + // The identifier of an IP address pool. This parameter allows Amazon EC2 to + // select an IP address from the address pool. + PublicIpv4Pool *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2EipDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2EipDetails) GoString() string { + return s.String() +} + +// SetAllocationId sets the AllocationId field's value. +func (s *AwsEc2EipDetails) SetAllocationId(v string) *AwsEc2EipDetails { + s.AllocationId = &v + return s +} + +// SetAssociationId sets the AssociationId field's value. +func (s *AwsEc2EipDetails) SetAssociationId(v string) *AwsEc2EipDetails { + s.AssociationId = &v + return s +} + +// SetDomain sets the Domain field's value. +func (s *AwsEc2EipDetails) SetDomain(v string) *AwsEc2EipDetails { + s.Domain = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AwsEc2EipDetails) SetInstanceId(v string) *AwsEc2EipDetails { + s.InstanceId = &v + return s +} + +// SetNetworkBorderGroup sets the NetworkBorderGroup field's value. +func (s *AwsEc2EipDetails) SetNetworkBorderGroup(v string) *AwsEc2EipDetails { + s.NetworkBorderGroup = &v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AwsEc2EipDetails) SetNetworkInterfaceId(v string) *AwsEc2EipDetails { + s.NetworkInterfaceId = &v + return s +} + +// SetNetworkInterfaceOwnerId sets the NetworkInterfaceOwnerId field's value. +func (s *AwsEc2EipDetails) SetNetworkInterfaceOwnerId(v string) *AwsEc2EipDetails { + s.NetworkInterfaceOwnerId = &v + return s +} + +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *AwsEc2EipDetails) SetPrivateIpAddress(v string) *AwsEc2EipDetails { + s.PrivateIpAddress = &v + return s +} + +// SetPublicIp sets the PublicIp field's value. +func (s *AwsEc2EipDetails) SetPublicIp(v string) *AwsEc2EipDetails { + s.PublicIp = &v + return s +} + +// SetPublicIpv4Pool sets the PublicIpv4Pool field's value. +func (s *AwsEc2EipDetails) SetPublicIpv4Pool(v string) *AwsEc2EipDetails { + s.PublicIpv4Pool = &v + return s +} + +// The details of an Amazon EC2 instance. +type AwsEc2InstanceDetails struct { + _ struct{} `type:"structure"` + + // The IAM profile ARN of the instance. + IamInstanceProfileArn *string `type:"string"` + + // The Amazon Machine Image (AMI) ID of the instance. + ImageId *string `type:"string"` + + // The IPv4 addresses associated with the instance. + IpV4Addresses []*string `type:"list"` + + // The IPv6 addresses associated with the instance. + IpV6Addresses []*string `type:"list"` + + // The key name associated with the instance. + KeyName *string `type:"string"` + + // Indicates when the instance was launched. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LaunchedAt *string `type:"string"` + + // The identifier of the subnet that the instance was launched in. + SubnetId *string `type:"string"` + + // The instance type of the instance. + Type *string `type:"string"` + + // The identifier of the VPC that the instance was launched in. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2InstanceDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2InstanceDetails) GoString() string { + return s.String() +} + +// SetIamInstanceProfileArn sets the IamInstanceProfileArn field's value. +func (s *AwsEc2InstanceDetails) SetIamInstanceProfileArn(v string) *AwsEc2InstanceDetails { + s.IamInstanceProfileArn = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *AwsEc2InstanceDetails) SetImageId(v string) *AwsEc2InstanceDetails { + s.ImageId = &v + return s +} + +// SetIpV4Addresses sets the IpV4Addresses field's value. +func (s *AwsEc2InstanceDetails) SetIpV4Addresses(v []*string) *AwsEc2InstanceDetails { + s.IpV4Addresses = v + return s +} + +// SetIpV6Addresses sets the IpV6Addresses field's value. +func (s *AwsEc2InstanceDetails) SetIpV6Addresses(v []*string) *AwsEc2InstanceDetails { + s.IpV6Addresses = v + return s +} + +// SetKeyName sets the KeyName field's value. +func (s *AwsEc2InstanceDetails) SetKeyName(v string) *AwsEc2InstanceDetails { + s.KeyName = &v + return s +} + +// SetLaunchedAt sets the LaunchedAt field's value. +func (s *AwsEc2InstanceDetails) SetLaunchedAt(v string) *AwsEc2InstanceDetails { + s.LaunchedAt = &v + return s +} + +// SetSubnetId sets the SubnetId field's value. +func (s *AwsEc2InstanceDetails) SetSubnetId(v string) *AwsEc2InstanceDetails { + s.SubnetId = &v + return s +} + +// SetType sets the Type field's value. +func (s *AwsEc2InstanceDetails) SetType(v string) *AwsEc2InstanceDetails { + s.Type = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsEc2InstanceDetails) SetVpcId(v string) *AwsEc2InstanceDetails { + s.VpcId = &v + return s +} + +// Information about the network interface attachment. +type AwsEc2NetworkInterfaceAttachment struct { + _ struct{} `type:"structure"` + + // Indicates when the attachment initiated. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + AttachTime *string `type:"string"` + + // The identifier of the network interface attachment + AttachmentId *string `type:"string"` + + // Indicates whether the network interface is deleted when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The device index of the network interface attachment on the instance. + DeviceIndex *int64 `type:"integer"` + + // The ID of the instance. + InstanceId *string `type:"string"` + + // The AWS account ID of the owner of the instance. + InstanceOwnerId *string `type:"string"` + + // The attachment state. + // + // Valid values: attaching | attached | detaching | detached + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2NetworkInterfaceAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2NetworkInterfaceAttachment) GoString() string { + return s.String() +} + +// SetAttachTime sets the AttachTime field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetAttachTime(v string) *AwsEc2NetworkInterfaceAttachment { + s.AttachTime = &v + return s +} + +// SetAttachmentId sets the AttachmentId field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetAttachmentId(v string) *AwsEc2NetworkInterfaceAttachment { + s.AttachmentId = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetDeleteOnTermination(v bool) *AwsEc2NetworkInterfaceAttachment { + s.DeleteOnTermination = &v + return s +} + +// SetDeviceIndex sets the DeviceIndex field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetDeviceIndex(v int64) *AwsEc2NetworkInterfaceAttachment { + s.DeviceIndex = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetInstanceId(v string) *AwsEc2NetworkInterfaceAttachment { + s.InstanceId = &v + return s +} + +// SetInstanceOwnerId sets the InstanceOwnerId field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetInstanceOwnerId(v string) *AwsEc2NetworkInterfaceAttachment { + s.InstanceOwnerId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsEc2NetworkInterfaceAttachment) SetStatus(v string) *AwsEc2NetworkInterfaceAttachment { + s.Status = &v + return s +} + +// Details about the network interface +type AwsEc2NetworkInterfaceDetails struct { + _ struct{} `type:"structure"` + + // The network interface attachment. + Attachment *AwsEc2NetworkInterfaceAttachment `type:"structure"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // Security groups for the network interface. + SecurityGroups []*AwsEc2NetworkInterfaceSecurityGroup `type:"list"` + + // Indicates whether traffic to or from the instance is validated. + SourceDestCheck *bool `type:"boolean"` +} + +// String returns the string representation +func (s AwsEc2NetworkInterfaceDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2NetworkInterfaceDetails) GoString() string { + return s.String() +} + +// SetAttachment sets the Attachment field's value. +func (s *AwsEc2NetworkInterfaceDetails) SetAttachment(v *AwsEc2NetworkInterfaceAttachment) *AwsEc2NetworkInterfaceDetails { + s.Attachment = v + return s +} + +// SetNetworkInterfaceId sets the NetworkInterfaceId field's value. +func (s *AwsEc2NetworkInterfaceDetails) SetNetworkInterfaceId(v string) *AwsEc2NetworkInterfaceDetails { + s.NetworkInterfaceId = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *AwsEc2NetworkInterfaceDetails) SetSecurityGroups(v []*AwsEc2NetworkInterfaceSecurityGroup) *AwsEc2NetworkInterfaceDetails { + s.SecurityGroups = v + return s +} + +// SetSourceDestCheck sets the SourceDestCheck field's value. +func (s *AwsEc2NetworkInterfaceDetails) SetSourceDestCheck(v bool) *AwsEc2NetworkInterfaceDetails { + s.SourceDestCheck = &v + return s +} + +// A security group associated with the network interface. +type AwsEc2NetworkInterfaceSecurityGroup struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `type:"string"` + + // The name of the security group. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2NetworkInterfaceSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2NetworkInterfaceSecurityGroup) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *AwsEc2NetworkInterfaceSecurityGroup) SetGroupId(v string) *AwsEc2NetworkInterfaceSecurityGroup { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *AwsEc2NetworkInterfaceSecurityGroup) SetGroupName(v string) *AwsEc2NetworkInterfaceSecurityGroup { + s.GroupName = &v + return s +} + +// Details about an EC2 security group. +type AwsEc2SecurityGroupDetails struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `type:"string"` + + // The name of the security group. + GroupName *string `type:"string"` + + // The inbound rules associated with the security group. + IpPermissions []*AwsEc2SecurityGroupIpPermission `type:"list"` + + // [VPC only] The outbound rules associated with the security group. + IpPermissionsEgress []*AwsEc2SecurityGroupIpPermission `type:"list"` + + // The AWS account ID of the owner of the security group. + OwnerId *string `type:"string"` + + // [VPC only] The ID of the VPC for the security group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2SecurityGroupDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2SecurityGroupDetails) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *AwsEc2SecurityGroupDetails) SetGroupId(v string) *AwsEc2SecurityGroupDetails { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *AwsEc2SecurityGroupDetails) SetGroupName(v string) *AwsEc2SecurityGroupDetails { + s.GroupName = &v + return s +} + +// SetIpPermissions sets the IpPermissions field's value. +func (s *AwsEc2SecurityGroupDetails) SetIpPermissions(v []*AwsEc2SecurityGroupIpPermission) *AwsEc2SecurityGroupDetails { + s.IpPermissions = v + return s +} + +// SetIpPermissionsEgress sets the IpPermissionsEgress field's value. +func (s *AwsEc2SecurityGroupDetails) SetIpPermissionsEgress(v []*AwsEc2SecurityGroupIpPermission) *AwsEc2SecurityGroupDetails { + s.IpPermissionsEgress = v + return s +} + +// SetOwnerId sets the OwnerId field's value. +func (s *AwsEc2SecurityGroupDetails) SetOwnerId(v string) *AwsEc2SecurityGroupDetails { + s.OwnerId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsEc2SecurityGroupDetails) SetVpcId(v string) *AwsEc2SecurityGroupDetails { + s.VpcId = &v + return s +} + +// An IP permission for an EC2 security group. +type AwsEc2SecurityGroupIpPermission struct { + _ struct{} `type:"structure"` + + // The start of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 + // type number. + // + // A value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 + // types, you must specify all codes. + FromPort *int64 `type:"integer"` + + // The IP protocol name (tcp, udp, icmp, icmpv6) or number. + // + // [VPC only] Use -1 to specify all protocols. + // + // When authorizing security group rules, specifying -1 or a protocol number + // other than tcp, udp, icmp, or icmpv6 allows traffic on all ports, regardless + // of any port range you specify. + // + // For tcp, udp, and icmp, you must specify a port range. + // + // For icmpv6, the port range is optional. If you omit the port range, traffic + // for all types and codes is allowed. + IpProtocol *string `type:"string"` + + // The IPv4 ranges. + IpRanges []*AwsEc2SecurityGroupIpRange `type:"list"` + + // The IPv6 ranges. + Ipv6Ranges []*AwsEc2SecurityGroupIpv6Range `type:"list"` + + // [VPC only] The prefix list IDs for an AWS service. With outbound rules, this + // is the AWS service to access through a VPC endpoint from instances associated + // with the security group. + PrefixListIds []*AwsEc2SecurityGroupPrefixListId `type:"list"` + + // The end of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 + // code. + // + // A value of -1 indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 + // types, you must specify all codes. + ToPort *int64 `type:"integer"` + + // The security group and AWS account ID pairs. + UserIdGroupPairs []*AwsEc2SecurityGroupUserIdGroupPair `type:"list"` +} + +// String returns the string representation +func (s AwsEc2SecurityGroupIpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2SecurityGroupIpPermission) GoString() string { + return s.String() +} + +// SetFromPort sets the FromPort field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetFromPort(v int64) *AwsEc2SecurityGroupIpPermission { + s.FromPort = &v + return s +} + +// SetIpProtocol sets the IpProtocol field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetIpProtocol(v string) *AwsEc2SecurityGroupIpPermission { + s.IpProtocol = &v + return s +} + +// SetIpRanges sets the IpRanges field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetIpRanges(v []*AwsEc2SecurityGroupIpRange) *AwsEc2SecurityGroupIpPermission { + s.IpRanges = v + return s +} + +// SetIpv6Ranges sets the Ipv6Ranges field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetIpv6Ranges(v []*AwsEc2SecurityGroupIpv6Range) *AwsEc2SecurityGroupIpPermission { + s.Ipv6Ranges = v + return s +} + +// SetPrefixListIds sets the PrefixListIds field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetPrefixListIds(v []*AwsEc2SecurityGroupPrefixListId) *AwsEc2SecurityGroupIpPermission { + s.PrefixListIds = v + return s +} + +// SetToPort sets the ToPort field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetToPort(v int64) *AwsEc2SecurityGroupIpPermission { + s.ToPort = &v + return s +} + +// SetUserIdGroupPairs sets the UserIdGroupPairs field's value. +func (s *AwsEc2SecurityGroupIpPermission) SetUserIdGroupPairs(v []*AwsEc2SecurityGroupUserIdGroupPair) *AwsEc2SecurityGroupIpPermission { + s.UserIdGroupPairs = v + return s +} + +// A range of IPv4 addresses. +type AwsEc2SecurityGroupIpRange struct { + _ struct{} `type:"structure"` + + // The IPv4 CIDR range. You can specify either a CIDR range or a source security + // group, but not both. To specify a single IPv4 address, use the /32 prefix + // length. + CidrIp *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2SecurityGroupIpRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2SecurityGroupIpRange) GoString() string { + return s.String() +} + +// SetCidrIp sets the CidrIp field's value. +func (s *AwsEc2SecurityGroupIpRange) SetCidrIp(v string) *AwsEc2SecurityGroupIpRange { + s.CidrIp = &v + return s +} + +// A range of IPv6 addresses. +type AwsEc2SecurityGroupIpv6Range struct { + _ struct{} `type:"structure"` + + // The IPv6 CIDR range. You can specify either a CIDR range or a source security + // group, but not both. To specify a single IPv6 address, use the /128 prefix + // length. + CidrIpv6 *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2SecurityGroupIpv6Range) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2SecurityGroupIpv6Range) GoString() string { + return s.String() +} + +// SetCidrIpv6 sets the CidrIpv6 field's value. +func (s *AwsEc2SecurityGroupIpv6Range) SetCidrIpv6(v string) *AwsEc2SecurityGroupIpv6Range { + s.CidrIpv6 = &v + return s +} + +// A prefix list ID. +type AwsEc2SecurityGroupPrefixListId struct { + _ struct{} `type:"structure"` + + // The ID of the prefix. + PrefixListId *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2SecurityGroupPrefixListId) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2SecurityGroupPrefixListId) GoString() string { + return s.String() +} + +// SetPrefixListId sets the PrefixListId field's value. +func (s *AwsEc2SecurityGroupPrefixListId) SetPrefixListId(v string) *AwsEc2SecurityGroupPrefixListId { + s.PrefixListId = &v + return s +} + +// A relationship between a security group and a user. +type AwsEc2SecurityGroupUserIdGroupPair struct { + _ struct{} `type:"structure"` + + // The ID of the security group. + GroupId *string `type:"string"` + + // The name of the security group. + GroupName *string `type:"string"` + + // The status of a VPC peering connection, if applicable. + PeeringStatus *string `type:"string"` + + // The ID of an AWS account. + // + // For a referenced security group in another VPC, the account ID of the referenced + // security group is returned in the response. If the referenced security group + // is deleted, this value is not returned. + // + // [EC2-Classic] Required when adding or removing rules that reference a security + // group in another AWS. + UserId *string `type:"string"` + + // The ID of the VPC for the referenced security group, if applicable. + VpcId *string `type:"string"` + + // The ID of the VPC peering connection, if applicable. + VpcPeeringConnectionId *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2SecurityGroupUserIdGroupPair) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2SecurityGroupUserIdGroupPair) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *AwsEc2SecurityGroupUserIdGroupPair) SetGroupId(v string) *AwsEc2SecurityGroupUserIdGroupPair { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *AwsEc2SecurityGroupUserIdGroupPair) SetGroupName(v string) *AwsEc2SecurityGroupUserIdGroupPair { + s.GroupName = &v + return s +} + +// SetPeeringStatus sets the PeeringStatus field's value. +func (s *AwsEc2SecurityGroupUserIdGroupPair) SetPeeringStatus(v string) *AwsEc2SecurityGroupUserIdGroupPair { + s.PeeringStatus = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *AwsEc2SecurityGroupUserIdGroupPair) SetUserId(v string) *AwsEc2SecurityGroupUserIdGroupPair { + s.UserId = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsEc2SecurityGroupUserIdGroupPair) SetVpcId(v string) *AwsEc2SecurityGroupUserIdGroupPair { + s.VpcId = &v + return s +} + +// SetVpcPeeringConnectionId sets the VpcPeeringConnectionId field's value. +func (s *AwsEc2SecurityGroupUserIdGroupPair) SetVpcPeeringConnectionId(v string) *AwsEc2SecurityGroupUserIdGroupPair { + s.VpcPeeringConnectionId = &v + return s +} + +// An attachment to an AWS EC2 volume. +type AwsEc2VolumeAttachment struct { + _ struct{} `type:"structure"` + + // The datetime when the attachment initiated. + AttachTime *string `type:"string"` + + // Whether the EBS volume is deleted when the EC2 instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The identifier of the EC2 instance. + InstanceId *string `type:"string"` + + // The attachment state of the volume. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2VolumeAttachment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2VolumeAttachment) GoString() string { + return s.String() +} + +// SetAttachTime sets the AttachTime field's value. +func (s *AwsEc2VolumeAttachment) SetAttachTime(v string) *AwsEc2VolumeAttachment { + s.AttachTime = &v + return s +} + +// SetDeleteOnTermination sets the DeleteOnTermination field's value. +func (s *AwsEc2VolumeAttachment) SetDeleteOnTermination(v bool) *AwsEc2VolumeAttachment { + s.DeleteOnTermination = &v + return s +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AwsEc2VolumeAttachment) SetInstanceId(v string) *AwsEc2VolumeAttachment { + s.InstanceId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsEc2VolumeAttachment) SetStatus(v string) *AwsEc2VolumeAttachment { + s.Status = &v + return s +} + +// Details about an EC2 volume. +type AwsEc2VolumeDetails struct { + _ struct{} `type:"structure"` + + // The volume attachments. + Attachments []*AwsEc2VolumeAttachment `type:"list"` + + // Indicates when the volume was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateTime *string `type:"string"` + + // Whether the volume is encrypted. + Encrypted *bool `type:"boolean"` + + // The ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) + // that was used to protect the volume encryption key for the volume. + KmsKeyId *string `type:"string"` + + // The size of the volume, in GiBs. + Size *int64 `type:"integer"` + + // The snapshot from which the volume was created. + SnapshotId *string `type:"string"` + + // The volume state. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2VolumeDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2VolumeDetails) GoString() string { + return s.String() +} + +// SetAttachments sets the Attachments field's value. +func (s *AwsEc2VolumeDetails) SetAttachments(v []*AwsEc2VolumeAttachment) *AwsEc2VolumeDetails { + s.Attachments = v + return s +} + +// SetCreateTime sets the CreateTime field's value. +func (s *AwsEc2VolumeDetails) SetCreateTime(v string) *AwsEc2VolumeDetails { + s.CreateTime = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *AwsEc2VolumeDetails) SetEncrypted(v bool) *AwsEc2VolumeDetails { + s.Encrypted = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsEc2VolumeDetails) SetKmsKeyId(v string) *AwsEc2VolumeDetails { + s.KmsKeyId = &v + return s +} + +// SetSize sets the Size field's value. +func (s *AwsEc2VolumeDetails) SetSize(v int64) *AwsEc2VolumeDetails { + s.Size = &v + return s +} + +// SetSnapshotId sets the SnapshotId field's value. +func (s *AwsEc2VolumeDetails) SetSnapshotId(v string) *AwsEc2VolumeDetails { + s.SnapshotId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsEc2VolumeDetails) SetStatus(v string) *AwsEc2VolumeDetails { + s.Status = &v + return s +} + +// Details about an EC2 VPC. +type AwsEc2VpcDetails struct { + _ struct{} `type:"structure"` + + // Information about the IPv4 CIDR blocks associated with the VPC. + CidrBlockAssociationSet []*CidrBlockAssociation `type:"list"` + + // The identifier of the set of Dynamic Host Configuration Protocol (DHCP) options + // that are associated with the VPC. If the default options are associated with + // the VPC, then this is default. + DhcpOptionsId *string `type:"string"` + + // Information about the IPv6 CIDR blocks associated with the VPC. + Ipv6CidrBlockAssociationSet []*Ipv6CidrBlockAssociation `type:"list"` + + // The current state of the VPC. + State *string `type:"string"` +} + +// String returns the string representation +func (s AwsEc2VpcDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsEc2VpcDetails) GoString() string { + return s.String() +} + +// SetCidrBlockAssociationSet sets the CidrBlockAssociationSet field's value. +func (s *AwsEc2VpcDetails) SetCidrBlockAssociationSet(v []*CidrBlockAssociation) *AwsEc2VpcDetails { + s.CidrBlockAssociationSet = v + return s +} + +// SetDhcpOptionsId sets the DhcpOptionsId field's value. +func (s *AwsEc2VpcDetails) SetDhcpOptionsId(v string) *AwsEc2VpcDetails { + s.DhcpOptionsId = &v + return s +} + +// SetIpv6CidrBlockAssociationSet sets the Ipv6CidrBlockAssociationSet field's value. +func (s *AwsEc2VpcDetails) SetIpv6CidrBlockAssociationSet(v []*Ipv6CidrBlockAssociation) *AwsEc2VpcDetails { + s.Ipv6CidrBlockAssociationSet = v + return s +} + +// SetState sets the State field's value. +func (s *AwsEc2VpcDetails) SetState(v string) *AwsEc2VpcDetails { + s.State = &v + return s +} + +// Information about an Elasticsearch domain. +type AwsElasticsearchDomainDetails struct { + _ struct{} `type:"structure"` + + // IAM policy document specifying the access policies for the new Amazon ES + // domain. + AccessPolicies *string `type:"string"` + + // Additional options for the domain endpoint. + DomainEndpointOptions *AwsElasticsearchDomainDomainEndpointOptions `type:"structure"` + + // Unique identifier for an Amazon ES domain. + DomainId *string `type:"string"` + + // Name of an Amazon ES domain. + // + // Domain names are unique across all domains owned by the same account within + // an AWS Region. + // + // Domain names must start with a lowercase letter and must be between 3 and + // 28 characters. + // + // Valid characters are a-z (lowercase only), 0-9, and – (hyphen). + DomainName *string `type:"string"` + + // Elasticsearch version. + ElasticsearchVersion *string `type:"string"` + + // Details about the configuration for encryption at rest. + EncryptionAtRestOptions *AwsElasticsearchDomainEncryptionAtRestOptions `type:"structure"` + + // Domain-specific endpoint used to submit index, search, and data upload requests + // to an Amazon ES domain. + // + // The endpoint is a service URL. + Endpoint *string `type:"string"` + + // The key-value pair that exists if the Amazon ES domain uses VPC endpoints. + Endpoints map[string]*string `type:"map"` + + // Details about the configuration for node-to-node encryption. + NodeToNodeEncryptionOptions *AwsElasticsearchDomainNodeToNodeEncryptionOptions `type:"structure"` + + // Information that Amazon ES derives based on VPCOptions for the domain. + VPCOptions *AwsElasticsearchDomainVPCOptions `type:"structure"` +} + +// String returns the string representation +func (s AwsElasticsearchDomainDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElasticsearchDomainDetails) GoString() string { + return s.String() +} + +// SetAccessPolicies sets the AccessPolicies field's value. +func (s *AwsElasticsearchDomainDetails) SetAccessPolicies(v string) *AwsElasticsearchDomainDetails { + s.AccessPolicies = &v + return s +} + +// SetDomainEndpointOptions sets the DomainEndpointOptions field's value. +func (s *AwsElasticsearchDomainDetails) SetDomainEndpointOptions(v *AwsElasticsearchDomainDomainEndpointOptions) *AwsElasticsearchDomainDetails { + s.DomainEndpointOptions = v + return s +} + +// SetDomainId sets the DomainId field's value. +func (s *AwsElasticsearchDomainDetails) SetDomainId(v string) *AwsElasticsearchDomainDetails { + s.DomainId = &v + return s +} + +// SetDomainName sets the DomainName field's value. +func (s *AwsElasticsearchDomainDetails) SetDomainName(v string) *AwsElasticsearchDomainDetails { + s.DomainName = &v + return s +} + +// SetElasticsearchVersion sets the ElasticsearchVersion field's value. +func (s *AwsElasticsearchDomainDetails) SetElasticsearchVersion(v string) *AwsElasticsearchDomainDetails { + s.ElasticsearchVersion = &v + return s +} + +// SetEncryptionAtRestOptions sets the EncryptionAtRestOptions field's value. +func (s *AwsElasticsearchDomainDetails) SetEncryptionAtRestOptions(v *AwsElasticsearchDomainEncryptionAtRestOptions) *AwsElasticsearchDomainDetails { + s.EncryptionAtRestOptions = v + return s +} + +// SetEndpoint sets the Endpoint field's value. +func (s *AwsElasticsearchDomainDetails) SetEndpoint(v string) *AwsElasticsearchDomainDetails { + s.Endpoint = &v + return s +} + +// SetEndpoints sets the Endpoints field's value. +func (s *AwsElasticsearchDomainDetails) SetEndpoints(v map[string]*string) *AwsElasticsearchDomainDetails { + s.Endpoints = v + return s +} + +// SetNodeToNodeEncryptionOptions sets the NodeToNodeEncryptionOptions field's value. +func (s *AwsElasticsearchDomainDetails) SetNodeToNodeEncryptionOptions(v *AwsElasticsearchDomainNodeToNodeEncryptionOptions) *AwsElasticsearchDomainDetails { + s.NodeToNodeEncryptionOptions = v + return s +} + +// SetVPCOptions sets the VPCOptions field's value. +func (s *AwsElasticsearchDomainDetails) SetVPCOptions(v *AwsElasticsearchDomainVPCOptions) *AwsElasticsearchDomainDetails { + s.VPCOptions = v + return s +} + +// Additional options for the domain endpoint, such as whether to require HTTPS +// for all traffic. +type AwsElasticsearchDomainDomainEndpointOptions struct { + _ struct{} `type:"structure"` + + // Whether to require that all traffic to the domain arrive over HTTPS. + EnforceHTTPS *bool `type:"boolean"` + + // The TLS security policy to apply to the HTTPS endpoint of the Elasticsearch + // domain. + // + // Valid values: + // + // * Policy-Min-TLS-1-0-2019-07, which supports TLSv1.0 and higher + // + // * Policy-Min-TLS-1-2-2019-07, which only supports TLSv1.2 + TLSSecurityPolicy *string `type:"string"` +} + +// String returns the string representation +func (s AwsElasticsearchDomainDomainEndpointOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElasticsearchDomainDomainEndpointOptions) GoString() string { + return s.String() +} + +// SetEnforceHTTPS sets the EnforceHTTPS field's value. +func (s *AwsElasticsearchDomainDomainEndpointOptions) SetEnforceHTTPS(v bool) *AwsElasticsearchDomainDomainEndpointOptions { + s.EnforceHTTPS = &v + return s +} + +// SetTLSSecurityPolicy sets the TLSSecurityPolicy field's value. +func (s *AwsElasticsearchDomainDomainEndpointOptions) SetTLSSecurityPolicy(v string) *AwsElasticsearchDomainDomainEndpointOptions { + s.TLSSecurityPolicy = &v + return s +} + +// Details about the configuration for encryption at rest. +type AwsElasticsearchDomainEncryptionAtRestOptions struct { + _ struct{} `type:"structure"` + + // Whether encryption at rest is enabled. + Enabled *bool `type:"boolean"` + + // The KMS key ID. Takes the form 1a2a3a4-1a2a-3a4a-5a6a-1a2a3a4a5a6a. + KmsKeyId *string `type:"string"` +} + +// String returns the string representation +func (s AwsElasticsearchDomainEncryptionAtRestOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElasticsearchDomainEncryptionAtRestOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *AwsElasticsearchDomainEncryptionAtRestOptions) SetEnabled(v bool) *AwsElasticsearchDomainEncryptionAtRestOptions { + s.Enabled = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsElasticsearchDomainEncryptionAtRestOptions) SetKmsKeyId(v string) *AwsElasticsearchDomainEncryptionAtRestOptions { + s.KmsKeyId = &v + return s +} + +// Details about the configuration for node-to-node encryption. +type AwsElasticsearchDomainNodeToNodeEncryptionOptions struct { + _ struct{} `type:"structure"` + + // Whether node-to-node encryption is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s AwsElasticsearchDomainNodeToNodeEncryptionOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElasticsearchDomainNodeToNodeEncryptionOptions) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *AwsElasticsearchDomainNodeToNodeEncryptionOptions) SetEnabled(v bool) *AwsElasticsearchDomainNodeToNodeEncryptionOptions { + s.Enabled = &v + return s +} + +// Information that Amazon ES derives based on VPCOptions for the domain. +type AwsElasticsearchDomainVPCOptions struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones associated with the VPC subnets. + AvailabilityZones []*string `type:"list"` + + // The list of security group IDs associated with the VPC endpoints for the + // domain. + SecurityGroupIds []*string `type:"list"` + + // A list of subnet IDs associated with the VPC endpoints for the domain. + SubnetIds []*string `type:"list"` + + // ID for the VPC. + VPCId *string `type:"string"` +} + +// String returns the string representation +func (s AwsElasticsearchDomainVPCOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElasticsearchDomainVPCOptions) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *AwsElasticsearchDomainVPCOptions) SetAvailabilityZones(v []*string) *AwsElasticsearchDomainVPCOptions { + s.AvailabilityZones = v + return s +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *AwsElasticsearchDomainVPCOptions) SetSecurityGroupIds(v []*string) *AwsElasticsearchDomainVPCOptions { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *AwsElasticsearchDomainVPCOptions) SetSubnetIds(v []*string) *AwsElasticsearchDomainVPCOptions { + s.SubnetIds = v + return s +} + +// SetVPCId sets the VPCId field's value. +func (s *AwsElasticsearchDomainVPCOptions) SetVPCId(v string) *AwsElasticsearchDomainVPCOptions { + s.VPCId = &v + return s +} + +// Contains information about a stickiness policy that was created using CreateAppCookieStickinessPolicy. +type AwsElbAppCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The name of the application cookie used for stickiness. + CookieName *string `type:"string"` + + // The mnemonic name for the policy being created. The name must be unique within + // the set of policies for the load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbAppCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbAppCookieStickinessPolicy) GoString() string { + return s.String() +} + +// SetCookieName sets the CookieName field's value. +func (s *AwsElbAppCookieStickinessPolicy) SetCookieName(v string) *AwsElbAppCookieStickinessPolicy { + s.CookieName = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsElbAppCookieStickinessPolicy) SetPolicyName(v string) *AwsElbAppCookieStickinessPolicy { + s.PolicyName = &v + return s +} + +// Contains information about a stickiness policy that was created using CreateLBCookieStickinessPolicy. +type AwsElbLbCookieStickinessPolicy struct { + _ struct{} `type:"structure"` + + // The amount of time, in seconds, after which the cookie is considered stale. + // If an expiration period is not specified, the stickiness session lasts for + // the duration of the browser session. + CookieExpirationPeriod *int64 `type:"long"` + + // The name of the policy. The name must be unique within the set of policies + // for the load balancer. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbLbCookieStickinessPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLbCookieStickinessPolicy) GoString() string { + return s.String() +} + +// SetCookieExpirationPeriod sets the CookieExpirationPeriod field's value. +func (s *AwsElbLbCookieStickinessPolicy) SetCookieExpirationPeriod(v int64) *AwsElbLbCookieStickinessPolicy { + s.CookieExpirationPeriod = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsElbLbCookieStickinessPolicy) SetPolicyName(v string) *AwsElbLbCookieStickinessPolicy { + s.PolicyName = &v + return s +} + +// Contains information about the access log configuration for the load balancer. +type AwsElbLoadBalancerAccessLog struct { + _ struct{} `type:"structure"` + + // The interval in minutes for publishing the access logs. + // + // You can publish access logs either every 5 minutes or every 60 minutes. + EmitInterval *int64 `type:"integer"` + + // Indicates whether access logs are enabled for the load balancer. + Enabled *bool `type:"boolean"` + + // The name of the S3 bucket where the access logs are stored. + S3BucketName *string `type:"string"` + + // The logical hierarchy that was created for the S3 bucket. + // + // If a prefix is not provided, the log is placed at the root level of the bucket. + S3BucketPrefix *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerAccessLog) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerAccessLog) GoString() string { + return s.String() +} + +// SetEmitInterval sets the EmitInterval field's value. +func (s *AwsElbLoadBalancerAccessLog) SetEmitInterval(v int64) *AwsElbLoadBalancerAccessLog { + s.EmitInterval = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *AwsElbLoadBalancerAccessLog) SetEnabled(v bool) *AwsElbLoadBalancerAccessLog { + s.Enabled = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *AwsElbLoadBalancerAccessLog) SetS3BucketName(v string) *AwsElbLoadBalancerAccessLog { + s.S3BucketName = &v + return s +} + +// SetS3BucketPrefix sets the S3BucketPrefix field's value. +func (s *AwsElbLoadBalancerAccessLog) SetS3BucketPrefix(v string) *AwsElbLoadBalancerAccessLog { + s.S3BucketPrefix = &v + return s +} + +// Contains attributes for the load balancer. +type AwsElbLoadBalancerAttributes struct { + _ struct{} `type:"structure"` + + // Information about the access log configuration for the load balancer. + // + // If the access log is enabled, the load balancer captures detailed information + // about all requests. It delivers the information to a specified S3 bucket. + AccessLog *AwsElbLoadBalancerAccessLog `type:"structure"` + + // Information about the connection draining configuration for the load balancer. + // + // If connection draining is enabled, the load balancer allows existing requests + // to complete before it shifts traffic away from a deregistered or unhealthy + // instance. + ConnectionDraining *AwsElbLoadBalancerConnectionDraining `type:"structure"` + + // Connection settings for the load balancer. + // + // If an idle timeout is configured, the load balancer allows connections to + // remain idle for the specified duration. When a connection is idle, no data + // is sent over the connection. + ConnectionSettings *AwsElbLoadBalancerConnectionSettings `type:"structure"` + + // Cross-zone load balancing settings for the load balancer. + // + // If cross-zone load balancing is enabled, the load balancer routes the request + // traffic evenly across all instances regardless of the Availability Zones. + CrossZoneLoadBalancing *AwsElbLoadBalancerCrossZoneLoadBalancing `type:"structure"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerAttributes) GoString() string { + return s.String() +} + +// SetAccessLog sets the AccessLog field's value. +func (s *AwsElbLoadBalancerAttributes) SetAccessLog(v *AwsElbLoadBalancerAccessLog) *AwsElbLoadBalancerAttributes { + s.AccessLog = v + return s +} + +// SetConnectionDraining sets the ConnectionDraining field's value. +func (s *AwsElbLoadBalancerAttributes) SetConnectionDraining(v *AwsElbLoadBalancerConnectionDraining) *AwsElbLoadBalancerAttributes { + s.ConnectionDraining = v + return s +} + +// SetConnectionSettings sets the ConnectionSettings field's value. +func (s *AwsElbLoadBalancerAttributes) SetConnectionSettings(v *AwsElbLoadBalancerConnectionSettings) *AwsElbLoadBalancerAttributes { + s.ConnectionSettings = v + return s +} + +// SetCrossZoneLoadBalancing sets the CrossZoneLoadBalancing field's value. +func (s *AwsElbLoadBalancerAttributes) SetCrossZoneLoadBalancing(v *AwsElbLoadBalancerCrossZoneLoadBalancing) *AwsElbLoadBalancerAttributes { + s.CrossZoneLoadBalancing = v + return s +} + +// Provides information about the configuration of an EC2 instance for the load +// balancer. +type AwsElbLoadBalancerBackendServerDescription struct { + _ struct{} `type:"structure"` + + // The port on which the EC2 instance is listening. + InstancePort *int64 `type:"integer"` + + // The names of the policies that are enabled for the EC2 instance. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerBackendServerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerBackendServerDescription) GoString() string { + return s.String() +} + +// SetInstancePort sets the InstancePort field's value. +func (s *AwsElbLoadBalancerBackendServerDescription) SetInstancePort(v int64) *AwsElbLoadBalancerBackendServerDescription { + s.InstancePort = &v + return s +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *AwsElbLoadBalancerBackendServerDescription) SetPolicyNames(v []*string) *AwsElbLoadBalancerBackendServerDescription { + s.PolicyNames = v + return s +} + +// Contains information about the connection draining configuration for the +// load balancer. +type AwsElbLoadBalancerConnectionDraining struct { + _ struct{} `type:"structure"` + + // Indicates whether connection draining is enabled for the load balancer. + Enabled *bool `type:"boolean"` + + // The maximum time, in seconds, to keep the existing connections open before + // deregistering the instances. + Timeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerConnectionDraining) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerConnectionDraining) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *AwsElbLoadBalancerConnectionDraining) SetEnabled(v bool) *AwsElbLoadBalancerConnectionDraining { + s.Enabled = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *AwsElbLoadBalancerConnectionDraining) SetTimeout(v int64) *AwsElbLoadBalancerConnectionDraining { + s.Timeout = &v + return s +} + +// Contains connection settings for the load balancer. +type AwsElbLoadBalancerConnectionSettings struct { + _ struct{} `type:"structure"` + + // The time, in seconds, that the connection can be idle (no data is sent over + // the connection) before it is closed by the load balancer. + IdleTimeout *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerConnectionSettings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerConnectionSettings) GoString() string { + return s.String() +} + +// SetIdleTimeout sets the IdleTimeout field's value. +func (s *AwsElbLoadBalancerConnectionSettings) SetIdleTimeout(v int64) *AwsElbLoadBalancerConnectionSettings { + s.IdleTimeout = &v + return s +} + +// Contains cross-zone load balancing settings for the load balancer. +type AwsElbLoadBalancerCrossZoneLoadBalancing struct { + _ struct{} `type:"structure"` + + // Indicates whether cross-zone load balancing is enabled for the load balancer. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerCrossZoneLoadBalancing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerCrossZoneLoadBalancing) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *AwsElbLoadBalancerCrossZoneLoadBalancing) SetEnabled(v bool) *AwsElbLoadBalancerCrossZoneLoadBalancing { + s.Enabled = &v + return s +} + +// Contains details about a Classic Load Balancer. +type AwsElbLoadBalancerDetails struct { + _ struct{} `type:"structure"` + + // The list of Availability Zones for the load balancer. + AvailabilityZones []*string `type:"list"` + + // Information about the configuration of the EC2 instances. + BackendServerDescriptions []*AwsElbLoadBalancerBackendServerDescription `type:"list"` + + // The name of the Amazon Route 53 hosted zone for the load balancer. + CanonicalHostedZoneName *string `type:"string"` + + // The ID of the Amazon Route 53 hosted zone for the load balancer. + CanonicalHostedZoneNameID *string `type:"string"` + + // Indicates when the load balancer was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedTime *string `type:"string"` + + // The DNS name of the load balancer. + DnsName *string `type:"string"` + + // Information about the health checks that are conducted on the load balancer. + HealthCheck *AwsElbLoadBalancerHealthCheck `type:"structure"` + + // List of EC2 instances for the load balancer. + Instances []*AwsElbLoadBalancerInstance `type:"list"` + + // The policies that are enabled for the load balancer listeners. + ListenerDescriptions []*AwsElbLoadBalancerListenerDescription `type:"list"` + + // The attributes for a load balancer. + LoadBalancerAttributes *AwsElbLoadBalancerAttributes `type:"structure"` + + // The name of the load balancer. + LoadBalancerName *string `type:"string"` + + // The policies for a load balancer. + Policies *AwsElbLoadBalancerPolicies `type:"structure"` + + // The type of load balancer. Only provided if the load balancer is in a VPC. + // + // If Scheme is internet-facing, the load balancer has a public DNS name that + // resolves to a public IP address. + // + // If Scheme is internal, the load balancer has a public DNS name that resolves + // to a private IP address. + Scheme *string `type:"string"` + + // The security groups for the load balancer. Only provided if the load balancer + // is in a VPC. + SecurityGroups []*string `type:"list"` + + // Information about the security group for the load balancer. This is the security + // group that is used for inbound rules. + SourceSecurityGroup *AwsElbLoadBalancerSourceSecurityGroup `type:"structure"` + + // The list of subnet identifiers for the load balancer. + Subnets []*string `type:"list"` + + // The identifier of the VPC for the load balancer. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerDetails) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *AwsElbLoadBalancerDetails) SetAvailabilityZones(v []*string) *AwsElbLoadBalancerDetails { + s.AvailabilityZones = v + return s +} + +// SetBackendServerDescriptions sets the BackendServerDescriptions field's value. +func (s *AwsElbLoadBalancerDetails) SetBackendServerDescriptions(v []*AwsElbLoadBalancerBackendServerDescription) *AwsElbLoadBalancerDetails { + s.BackendServerDescriptions = v + return s +} + +// SetCanonicalHostedZoneName sets the CanonicalHostedZoneName field's value. +func (s *AwsElbLoadBalancerDetails) SetCanonicalHostedZoneName(v string) *AwsElbLoadBalancerDetails { + s.CanonicalHostedZoneName = &v + return s +} + +// SetCanonicalHostedZoneNameID sets the CanonicalHostedZoneNameID field's value. +func (s *AwsElbLoadBalancerDetails) SetCanonicalHostedZoneNameID(v string) *AwsElbLoadBalancerDetails { + s.CanonicalHostedZoneNameID = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *AwsElbLoadBalancerDetails) SetCreatedTime(v string) *AwsElbLoadBalancerDetails { + s.CreatedTime = &v + return s +} + +// SetDnsName sets the DnsName field's value. +func (s *AwsElbLoadBalancerDetails) SetDnsName(v string) *AwsElbLoadBalancerDetails { + s.DnsName = &v + return s +} + +// SetHealthCheck sets the HealthCheck field's value. +func (s *AwsElbLoadBalancerDetails) SetHealthCheck(v *AwsElbLoadBalancerHealthCheck) *AwsElbLoadBalancerDetails { + s.HealthCheck = v + return s +} + +// SetInstances sets the Instances field's value. +func (s *AwsElbLoadBalancerDetails) SetInstances(v []*AwsElbLoadBalancerInstance) *AwsElbLoadBalancerDetails { + s.Instances = v + return s +} + +// SetListenerDescriptions sets the ListenerDescriptions field's value. +func (s *AwsElbLoadBalancerDetails) SetListenerDescriptions(v []*AwsElbLoadBalancerListenerDescription) *AwsElbLoadBalancerDetails { + s.ListenerDescriptions = v + return s +} + +// SetLoadBalancerAttributes sets the LoadBalancerAttributes field's value. +func (s *AwsElbLoadBalancerDetails) SetLoadBalancerAttributes(v *AwsElbLoadBalancerAttributes) *AwsElbLoadBalancerDetails { + s.LoadBalancerAttributes = v + return s +} + +// SetLoadBalancerName sets the LoadBalancerName field's value. +func (s *AwsElbLoadBalancerDetails) SetLoadBalancerName(v string) *AwsElbLoadBalancerDetails { + s.LoadBalancerName = &v + return s +} + +// SetPolicies sets the Policies field's value. +func (s *AwsElbLoadBalancerDetails) SetPolicies(v *AwsElbLoadBalancerPolicies) *AwsElbLoadBalancerDetails { + s.Policies = v + return s +} + +// SetScheme sets the Scheme field's value. +func (s *AwsElbLoadBalancerDetails) SetScheme(v string) *AwsElbLoadBalancerDetails { + s.Scheme = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *AwsElbLoadBalancerDetails) SetSecurityGroups(v []*string) *AwsElbLoadBalancerDetails { + s.SecurityGroups = v + return s +} + +// SetSourceSecurityGroup sets the SourceSecurityGroup field's value. +func (s *AwsElbLoadBalancerDetails) SetSourceSecurityGroup(v *AwsElbLoadBalancerSourceSecurityGroup) *AwsElbLoadBalancerDetails { + s.SourceSecurityGroup = v + return s +} + +// SetSubnets sets the Subnets field's value. +func (s *AwsElbLoadBalancerDetails) SetSubnets(v []*string) *AwsElbLoadBalancerDetails { + s.Subnets = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsElbLoadBalancerDetails) SetVpcId(v string) *AwsElbLoadBalancerDetails { + s.VpcId = &v + return s +} + +// Contains information about the health checks that are conducted on the load +// balancer. +type AwsElbLoadBalancerHealthCheck struct { + _ struct{} `type:"structure"` + + // The number of consecutive health check successes required before the instance + // is moved to the Healthy state. + HealthyThreshold *int64 `type:"integer"` + + // The approximate interval, in seconds, between health checks of an individual + // instance. + Interval *int64 `type:"integer"` + + // The instance that is being checked. The target specifies the protocol and + // port. The available protocols are TCP, SSL, HTTP, and HTTPS. The range of + // valid ports is 1 through 65535. + // + // For the HTTP and HTTPS protocols, the target also specifies the ping path. + // + // For the TCP protocol, the target is specified as TCP: . + // + // For the SSL protocol, the target is specified as SSL. . + // + // For the HTTP and HTTPS protocols, the target is specified as :/ . + Target *string `type:"string"` + + // The amount of time, in seconds, during which no response means a failed health + // check. + Timeout *int64 `type:"integer"` + + // The number of consecutive health check failures that must occur before the + // instance is moved to the Unhealthy state. + UnhealthyThreshold *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerHealthCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerHealthCheck) GoString() string { + return s.String() +} + +// SetHealthyThreshold sets the HealthyThreshold field's value. +func (s *AwsElbLoadBalancerHealthCheck) SetHealthyThreshold(v int64) *AwsElbLoadBalancerHealthCheck { + s.HealthyThreshold = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *AwsElbLoadBalancerHealthCheck) SetInterval(v int64) *AwsElbLoadBalancerHealthCheck { + s.Interval = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *AwsElbLoadBalancerHealthCheck) SetTarget(v string) *AwsElbLoadBalancerHealthCheck { + s.Target = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *AwsElbLoadBalancerHealthCheck) SetTimeout(v int64) *AwsElbLoadBalancerHealthCheck { + s.Timeout = &v + return s +} + +// SetUnhealthyThreshold sets the UnhealthyThreshold field's value. +func (s *AwsElbLoadBalancerHealthCheck) SetUnhealthyThreshold(v int64) *AwsElbLoadBalancerHealthCheck { + s.UnhealthyThreshold = &v + return s +} + +// Provides information about an EC2 instance for a load balancer. +type AwsElbLoadBalancerInstance struct { + _ struct{} `type:"structure"` + + // The instance identifier. + InstanceId *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerInstance) GoString() string { + return s.String() +} + +// SetInstanceId sets the InstanceId field's value. +func (s *AwsElbLoadBalancerInstance) SetInstanceId(v string) *AwsElbLoadBalancerInstance { + s.InstanceId = &v + return s +} + +// Information about a load balancer listener. +type AwsElbLoadBalancerListener struct { + _ struct{} `type:"structure"` + + // The port on which the instance is listening. + InstancePort *int64 `type:"integer"` + + // The protocol to use to route traffic to instances. + // + // Valid values: HTTP | HTTPS | TCP | SSL + InstanceProtocol *string `type:"string"` + + // The port on which the load balancer is listening. + // + // On EC2-VPC, you can specify any port from the range 1-65535. + // + // On EC2-Classic, you can specify any port from the following list: 25, 80, + // 443, 465, 587, 1024-65535. + LoadBalancerPort *int64 `type:"integer"` + + // The load balancer transport protocol to use for routing. + // + // Valid values: HTTP | HTTPS | TCP | SSL + Protocol *string `type:"string"` + + // The ARN of the server certificate. + SslCertificateId *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerListener) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerListener) GoString() string { + return s.String() +} + +// SetInstancePort sets the InstancePort field's value. +func (s *AwsElbLoadBalancerListener) SetInstancePort(v int64) *AwsElbLoadBalancerListener { + s.InstancePort = &v + return s +} + +// SetInstanceProtocol sets the InstanceProtocol field's value. +func (s *AwsElbLoadBalancerListener) SetInstanceProtocol(v string) *AwsElbLoadBalancerListener { + s.InstanceProtocol = &v + return s +} + +// SetLoadBalancerPort sets the LoadBalancerPort field's value. +func (s *AwsElbLoadBalancerListener) SetLoadBalancerPort(v int64) *AwsElbLoadBalancerListener { + s.LoadBalancerPort = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *AwsElbLoadBalancerListener) SetProtocol(v string) *AwsElbLoadBalancerListener { + s.Protocol = &v + return s +} + +// SetSslCertificateId sets the SslCertificateId field's value. +func (s *AwsElbLoadBalancerListener) SetSslCertificateId(v string) *AwsElbLoadBalancerListener { + s.SslCertificateId = &v + return s +} + +// Lists the policies that are enabled for a load balancer listener. +type AwsElbLoadBalancerListenerDescription struct { + _ struct{} `type:"structure"` + + // Information about the listener. + Listener *AwsElbLoadBalancerListener `type:"structure"` + + // The policies enabled for the listener. + PolicyNames []*string `type:"list"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerListenerDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerListenerDescription) GoString() string { + return s.String() +} + +// SetListener sets the Listener field's value. +func (s *AwsElbLoadBalancerListenerDescription) SetListener(v *AwsElbLoadBalancerListener) *AwsElbLoadBalancerListenerDescription { + s.Listener = v + return s +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *AwsElbLoadBalancerListenerDescription) SetPolicyNames(v []*string) *AwsElbLoadBalancerListenerDescription { + s.PolicyNames = v + return s +} + +// Contains information about the policies for a load balancer. +type AwsElbLoadBalancerPolicies struct { + _ struct{} `type:"structure"` + + // The stickiness policies that are created using CreateAppCookieStickinessPolicy. + AppCookieStickinessPolicies []*AwsElbAppCookieStickinessPolicy `type:"list"` + + // The stickiness policies that are created using CreateLBCookieStickinessPolicy. + LbCookieStickinessPolicies []*AwsElbLbCookieStickinessPolicy `type:"list"` + + // The policies other than the stickiness policies. + OtherPolicies []*string `type:"list"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerPolicies) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerPolicies) GoString() string { + return s.String() +} + +// SetAppCookieStickinessPolicies sets the AppCookieStickinessPolicies field's value. +func (s *AwsElbLoadBalancerPolicies) SetAppCookieStickinessPolicies(v []*AwsElbAppCookieStickinessPolicy) *AwsElbLoadBalancerPolicies { + s.AppCookieStickinessPolicies = v + return s +} + +// SetLbCookieStickinessPolicies sets the LbCookieStickinessPolicies field's value. +func (s *AwsElbLoadBalancerPolicies) SetLbCookieStickinessPolicies(v []*AwsElbLbCookieStickinessPolicy) *AwsElbLoadBalancerPolicies { + s.LbCookieStickinessPolicies = v + return s +} + +// SetOtherPolicies sets the OtherPolicies field's value. +func (s *AwsElbLoadBalancerPolicies) SetOtherPolicies(v []*string) *AwsElbLoadBalancerPolicies { + s.OtherPolicies = v + return s +} + +// Contains information about the security group for the load balancer. +type AwsElbLoadBalancerSourceSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the security group. + GroupName *string `type:"string"` + + // The owner of the security group. + OwnerAlias *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbLoadBalancerSourceSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbLoadBalancerSourceSecurityGroup) GoString() string { + return s.String() +} + +// SetGroupName sets the GroupName field's value. +func (s *AwsElbLoadBalancerSourceSecurityGroup) SetGroupName(v string) *AwsElbLoadBalancerSourceSecurityGroup { + s.GroupName = &v + return s +} + +// SetOwnerAlias sets the OwnerAlias field's value. +func (s *AwsElbLoadBalancerSourceSecurityGroup) SetOwnerAlias(v string) *AwsElbLoadBalancerSourceSecurityGroup { + s.OwnerAlias = &v + return s +} + +// Information about a load balancer. +type AwsElbv2LoadBalancerDetails struct { + _ struct{} `type:"structure"` + + // The Availability Zones for the load balancer. + AvailabilityZones []*AvailabilityZone `type:"list"` + + // The ID of the Amazon Route 53 hosted zone associated with the load balancer. + CanonicalHostedZoneId *string `type:"string"` + + // Indicates when the load balancer was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedTime *string `type:"string"` + + // The public DNS name of the load balancer. + DNSName *string `type:"string"` + + // The type of IP addresses used by the subnets for your load balancer. The + // possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and + // IPv6 addresses). + IpAddressType *string `type:"string"` + + // The nodes of an Internet-facing load balancer have public IP addresses. + Scheme *string `type:"string"` + + // The IDs of the security groups for the load balancer. + SecurityGroups []*string `type:"list"` + + // The state of the load balancer. + State *LoadBalancerState `type:"structure"` + + // The type of load balancer. + Type *string `type:"string"` + + // The ID of the VPC for the load balancer. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsElbv2LoadBalancerDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsElbv2LoadBalancerDetails) GoString() string { + return s.String() +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *AwsElbv2LoadBalancerDetails) SetAvailabilityZones(v []*AvailabilityZone) *AwsElbv2LoadBalancerDetails { + s.AvailabilityZones = v + return s +} + +// SetCanonicalHostedZoneId sets the CanonicalHostedZoneId field's value. +func (s *AwsElbv2LoadBalancerDetails) SetCanonicalHostedZoneId(v string) *AwsElbv2LoadBalancerDetails { + s.CanonicalHostedZoneId = &v + return s +} + +// SetCreatedTime sets the CreatedTime field's value. +func (s *AwsElbv2LoadBalancerDetails) SetCreatedTime(v string) *AwsElbv2LoadBalancerDetails { + s.CreatedTime = &v + return s +} + +// SetDNSName sets the DNSName field's value. +func (s *AwsElbv2LoadBalancerDetails) SetDNSName(v string) *AwsElbv2LoadBalancerDetails { + s.DNSName = &v + return s +} + +// SetIpAddressType sets the IpAddressType field's value. +func (s *AwsElbv2LoadBalancerDetails) SetIpAddressType(v string) *AwsElbv2LoadBalancerDetails { + s.IpAddressType = &v + return s +} + +// SetScheme sets the Scheme field's value. +func (s *AwsElbv2LoadBalancerDetails) SetScheme(v string) *AwsElbv2LoadBalancerDetails { + s.Scheme = &v + return s +} + +// SetSecurityGroups sets the SecurityGroups field's value. +func (s *AwsElbv2LoadBalancerDetails) SetSecurityGroups(v []*string) *AwsElbv2LoadBalancerDetails { + s.SecurityGroups = v + return s +} + +// SetState sets the State field's value. +func (s *AwsElbv2LoadBalancerDetails) SetState(v *LoadBalancerState) *AwsElbv2LoadBalancerDetails { + s.State = v + return s +} + +// SetType sets the Type field's value. +func (s *AwsElbv2LoadBalancerDetails) SetType(v string) *AwsElbv2LoadBalancerDetails { + s.Type = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsElbv2LoadBalancerDetails) SetVpcId(v string) *AwsElbv2LoadBalancerDetails { + s.VpcId = &v + return s +} + +// IAM access key details related to a finding. +type AwsIamAccessKeyDetails struct { + _ struct{} `type:"structure"` + + // The identifier of the access key. + AccessKeyId *string `type:"string"` + + // The AWS account ID of the account for the key. + AccountId *string `type:"string"` + + // Indicates when the IAM access key was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedAt *string `type:"string"` + + // The ID of the principal associated with an access key. + PrincipalId *string `type:"string"` + + // The name of the principal. + PrincipalName *string `type:"string"` + + // The type of principal associated with an access key. + PrincipalType *string `type:"string"` + + // Information about the session that the key was used for. + SessionContext *AwsIamAccessKeySessionContext `type:"structure"` + + // The status of the IAM access key related to a finding. + Status *string `type:"string" enum:"AwsIamAccessKeyStatus"` + + // The user associated with the IAM access key related to a finding. + // + // The UserName parameter has been replaced with the PrincipalName parameter + // because access keys can also be assigned to principals that are not IAM users. + // + // Deprecated: This field is deprecated, use PrincipalName instead. + UserName *string `deprecated:"true" type:"string"` +} + +// String returns the string representation +func (s AwsIamAccessKeyDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamAccessKeyDetails) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *AwsIamAccessKeyDetails) SetAccessKeyId(v string) *AwsIamAccessKeyDetails { + s.AccessKeyId = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *AwsIamAccessKeyDetails) SetAccountId(v string) *AwsIamAccessKeyDetails { + s.AccountId = &v + return s +} + +// SetCreatedAt sets the CreatedAt field's value. +func (s *AwsIamAccessKeyDetails) SetCreatedAt(v string) *AwsIamAccessKeyDetails { + s.CreatedAt = &v + return s +} + +// SetPrincipalId sets the PrincipalId field's value. +func (s *AwsIamAccessKeyDetails) SetPrincipalId(v string) *AwsIamAccessKeyDetails { + s.PrincipalId = &v + return s +} + +// SetPrincipalName sets the PrincipalName field's value. +func (s *AwsIamAccessKeyDetails) SetPrincipalName(v string) *AwsIamAccessKeyDetails { + s.PrincipalName = &v + return s +} + +// SetPrincipalType sets the PrincipalType field's value. +func (s *AwsIamAccessKeyDetails) SetPrincipalType(v string) *AwsIamAccessKeyDetails { + s.PrincipalType = &v + return s +} + +// SetSessionContext sets the SessionContext field's value. +func (s *AwsIamAccessKeyDetails) SetSessionContext(v *AwsIamAccessKeySessionContext) *AwsIamAccessKeyDetails { + s.SessionContext = v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsIamAccessKeyDetails) SetStatus(v string) *AwsIamAccessKeyDetails { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AwsIamAccessKeyDetails) SetUserName(v string) *AwsIamAccessKeyDetails { + s.UserName = &v + return s +} + +// Provides information about the session that the key was used for. +type AwsIamAccessKeySessionContext struct { + _ struct{} `type:"structure"` + + // Attributes of the session that the key was used for. + Attributes *AwsIamAccessKeySessionContextAttributes `type:"structure"` + + // Information about the entity that created the session. + SessionIssuer *AwsIamAccessKeySessionContextSessionIssuer `type:"structure"` +} + +// String returns the string representation +func (s AwsIamAccessKeySessionContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamAccessKeySessionContext) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *AwsIamAccessKeySessionContext) SetAttributes(v *AwsIamAccessKeySessionContextAttributes) *AwsIamAccessKeySessionContext { + s.Attributes = v + return s +} + +// SetSessionIssuer sets the SessionIssuer field's value. +func (s *AwsIamAccessKeySessionContext) SetSessionIssuer(v *AwsIamAccessKeySessionContextSessionIssuer) *AwsIamAccessKeySessionContext { + s.SessionIssuer = v + return s +} + +// Attributes of the session that the key was used for. +type AwsIamAccessKeySessionContextAttributes struct { + _ struct{} `type:"structure"` + + // Indicates when the session was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreationDate *string `type:"string"` + + // Indicates whether the session used multi-factor authentication (MFA). + MfaAuthenticated *bool `type:"boolean"` +} + +// String returns the string representation +func (s AwsIamAccessKeySessionContextAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamAccessKeySessionContextAttributes) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *AwsIamAccessKeySessionContextAttributes) SetCreationDate(v string) *AwsIamAccessKeySessionContextAttributes { + s.CreationDate = &v + return s +} + +// SetMfaAuthenticated sets the MfaAuthenticated field's value. +func (s *AwsIamAccessKeySessionContextAttributes) SetMfaAuthenticated(v bool) *AwsIamAccessKeySessionContextAttributes { + s.MfaAuthenticated = &v + return s +} + +// Information about the entity that created the session. +type AwsIamAccessKeySessionContextSessionIssuer struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that created the session. + AccountId *string `type:"string"` + + // The ARN of the session. + Arn *string `type:"string"` + + // The principal ID of the principal (user, role, or group) that created the + // session. + PrincipalId *string `type:"string"` + + // The type of principal (user, role, or group) that created the session. + Type *string `type:"string"` + + // The name of the principal that created the session. + UserName *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamAccessKeySessionContextSessionIssuer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamAccessKeySessionContextSessionIssuer) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AwsIamAccessKeySessionContextSessionIssuer) SetAccountId(v string) *AwsIamAccessKeySessionContextSessionIssuer { + s.AccountId = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *AwsIamAccessKeySessionContextSessionIssuer) SetArn(v string) *AwsIamAccessKeySessionContextSessionIssuer { + s.Arn = &v + return s +} + +// SetPrincipalId sets the PrincipalId field's value. +func (s *AwsIamAccessKeySessionContextSessionIssuer) SetPrincipalId(v string) *AwsIamAccessKeySessionContextSessionIssuer { + s.PrincipalId = &v + return s +} + +// SetType sets the Type field's value. +func (s *AwsIamAccessKeySessionContextSessionIssuer) SetType(v string) *AwsIamAccessKeySessionContextSessionIssuer { + s.Type = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AwsIamAccessKeySessionContextSessionIssuer) SetUserName(v string) *AwsIamAccessKeySessionContextSessionIssuer { + s.UserName = &v + return s +} + +// A managed policy that is attached to an IAM principal. +type AwsIamAttachedManagedPolicy struct { + _ struct{} `type:"structure"` + + // The ARN of the policy. + PolicyArn *string `type:"string"` + + // The name of the policy. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamAttachedManagedPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamAttachedManagedPolicy) GoString() string { + return s.String() +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *AwsIamAttachedManagedPolicy) SetPolicyArn(v string) *AwsIamAttachedManagedPolicy { + s.PolicyArn = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsIamAttachedManagedPolicy) SetPolicyName(v string) *AwsIamAttachedManagedPolicy { + s.PolicyName = &v + return s +} + +// Contains details about an IAM group. +type AwsIamGroupDetails struct { + _ struct{} `type:"structure"` + + // A list of the managed policies that are attached to the IAM group. + AttachedManagedPolicies []*AwsIamAttachedManagedPolicy `type:"list"` + + // Indicates when the IAM group was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // The identifier of the IAM group. + GroupId *string `type:"string"` + + // The name of the IAM group. + GroupName *string `type:"string"` + + // The list of inline policies that are embedded in the group. + GroupPolicyList []*AwsIamGroupPolicy `type:"list"` + + // The path to the group. + Path *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamGroupDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamGroupDetails) GoString() string { + return s.String() +} + +// SetAttachedManagedPolicies sets the AttachedManagedPolicies field's value. +func (s *AwsIamGroupDetails) SetAttachedManagedPolicies(v []*AwsIamAttachedManagedPolicy) *AwsIamGroupDetails { + s.AttachedManagedPolicies = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamGroupDetails) SetCreateDate(v string) *AwsIamGroupDetails { + s.CreateDate = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *AwsIamGroupDetails) SetGroupId(v string) *AwsIamGroupDetails { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *AwsIamGroupDetails) SetGroupName(v string) *AwsIamGroupDetails { + s.GroupName = &v + return s +} + +// SetGroupPolicyList sets the GroupPolicyList field's value. +func (s *AwsIamGroupDetails) SetGroupPolicyList(v []*AwsIamGroupPolicy) *AwsIamGroupDetails { + s.GroupPolicyList = v + return s +} + +// SetPath sets the Path field's value. +func (s *AwsIamGroupDetails) SetPath(v string) *AwsIamGroupDetails { + s.Path = &v + return s +} + +// A managed policy that is attached to the IAM group. +type AwsIamGroupPolicy struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamGroupPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamGroupPolicy) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsIamGroupPolicy) SetPolicyName(v string) *AwsIamGroupPolicy { + s.PolicyName = &v + return s +} + +// Information about an instance profile. +type AwsIamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The ARN of the instance profile. + Arn *string `type:"string"` + + // Indicates when the instance profile was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // The identifier of the instance profile. + InstanceProfileId *string `type:"string"` + + // The name of the instance profile. + InstanceProfileName *string `type:"string"` + + // The path to the instance profile. + Path *string `type:"string"` + + // The roles associated with the instance profile. + Roles []*AwsIamInstanceProfileRole `type:"list"` +} + +// String returns the string representation +func (s AwsIamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamInstanceProfile) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsIamInstanceProfile) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsIamInstanceProfile"} + if s.Roles != nil { + for i, v := range s.Roles { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Roles", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *AwsIamInstanceProfile) SetArn(v string) *AwsIamInstanceProfile { + s.Arn = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamInstanceProfile) SetCreateDate(v string) *AwsIamInstanceProfile { + s.CreateDate = &v + return s +} + +// SetInstanceProfileId sets the InstanceProfileId field's value. +func (s *AwsIamInstanceProfile) SetInstanceProfileId(v string) *AwsIamInstanceProfile { + s.InstanceProfileId = &v + return s +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *AwsIamInstanceProfile) SetInstanceProfileName(v string) *AwsIamInstanceProfile { + s.InstanceProfileName = &v + return s +} + +// SetPath sets the Path field's value. +func (s *AwsIamInstanceProfile) SetPath(v string) *AwsIamInstanceProfile { + s.Path = &v + return s +} + +// SetRoles sets the Roles field's value. +func (s *AwsIamInstanceProfile) SetRoles(v []*AwsIamInstanceProfileRole) *AwsIamInstanceProfile { + s.Roles = v + return s +} + +// Information about a role associated with an instance profile. +type AwsIamInstanceProfileRole struct { + _ struct{} `type:"structure"` + + // The ARN of the role. + Arn *string `type:"string"` + + // The policy that grants an entity permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // Indicates when the role was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // The path to the role. + Path *string `type:"string"` + + // The identifier of the role. + RoleId *string `type:"string"` + + // The name of the role. + RoleName *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamInstanceProfileRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamInstanceProfileRole) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsIamInstanceProfileRole) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsIamInstanceProfileRole"} + if s.AssumeRolePolicyDocument != nil && len(*s.AssumeRolePolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssumeRolePolicyDocument", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *AwsIamInstanceProfileRole) SetArn(v string) *AwsIamInstanceProfileRole { + s.Arn = &v + return s +} + +// SetAssumeRolePolicyDocument sets the AssumeRolePolicyDocument field's value. +func (s *AwsIamInstanceProfileRole) SetAssumeRolePolicyDocument(v string) *AwsIamInstanceProfileRole { + s.AssumeRolePolicyDocument = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamInstanceProfileRole) SetCreateDate(v string) *AwsIamInstanceProfileRole { + s.CreateDate = &v + return s +} + +// SetPath sets the Path field's value. +func (s *AwsIamInstanceProfileRole) SetPath(v string) *AwsIamInstanceProfileRole { + s.Path = &v + return s +} + +// SetRoleId sets the RoleId field's value. +func (s *AwsIamInstanceProfileRole) SetRoleId(v string) *AwsIamInstanceProfileRole { + s.RoleId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *AwsIamInstanceProfileRole) SetRoleName(v string) *AwsIamInstanceProfileRole { + s.RoleName = &v + return s +} + +// Information about the policy used to set the permissions boundary for an +// IAM principal. +type AwsIamPermissionsBoundary struct { + _ struct{} `type:"structure"` + + // The ARN of the policy used to set the permissions boundary. + PermissionsBoundaryArn *string `type:"string"` + + // The usage type for the permissions boundary. + PermissionsBoundaryType *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamPermissionsBoundary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamPermissionsBoundary) GoString() string { + return s.String() +} + +// SetPermissionsBoundaryArn sets the PermissionsBoundaryArn field's value. +func (s *AwsIamPermissionsBoundary) SetPermissionsBoundaryArn(v string) *AwsIamPermissionsBoundary { + s.PermissionsBoundaryArn = &v + return s +} + +// SetPermissionsBoundaryType sets the PermissionsBoundaryType field's value. +func (s *AwsIamPermissionsBoundary) SetPermissionsBoundaryType(v string) *AwsIamPermissionsBoundary { + s.PermissionsBoundaryType = &v + return s +} + +// Represents an IAM permissions policy. +type AwsIamPolicyDetails struct { + _ struct{} `type:"structure"` + + // The number of users, groups, and roles that the policy is attached to. + AttachmentCount *int64 `type:"integer"` + + // When the policy was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // The identifier of the default version of the policy. + DefaultVersionId *string `type:"string"` + + // A description of the policy. + Description *string `type:"string"` + + // Whether the policy can be attached to a user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + Path *string `type:"string"` + + // The number of users and roles that use the policy to set the permissions + // boundary. + PermissionsBoundaryUsageCount *int64 `type:"integer"` + + // The unique identifier of the policy. + PolicyId *string `type:"string"` + + // The name of the policy. + PolicyName *string `type:"string"` + + // List of versions of the policy. + PolicyVersionList []*AwsIamPolicyVersion `type:"list"` + + // When the policy was most recently updated. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + UpdateDate *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamPolicyDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamPolicyDetails) GoString() string { + return s.String() +} + +// SetAttachmentCount sets the AttachmentCount field's value. +func (s *AwsIamPolicyDetails) SetAttachmentCount(v int64) *AwsIamPolicyDetails { + s.AttachmentCount = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamPolicyDetails) SetCreateDate(v string) *AwsIamPolicyDetails { + s.CreateDate = &v + return s +} + +// SetDefaultVersionId sets the DefaultVersionId field's value. +func (s *AwsIamPolicyDetails) SetDefaultVersionId(v string) *AwsIamPolicyDetails { + s.DefaultVersionId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AwsIamPolicyDetails) SetDescription(v string) *AwsIamPolicyDetails { + s.Description = &v + return s +} + +// SetIsAttachable sets the IsAttachable field's value. +func (s *AwsIamPolicyDetails) SetIsAttachable(v bool) *AwsIamPolicyDetails { + s.IsAttachable = &v + return s +} + +// SetPath sets the Path field's value. +func (s *AwsIamPolicyDetails) SetPath(v string) *AwsIamPolicyDetails { + s.Path = &v + return s +} + +// SetPermissionsBoundaryUsageCount sets the PermissionsBoundaryUsageCount field's value. +func (s *AwsIamPolicyDetails) SetPermissionsBoundaryUsageCount(v int64) *AwsIamPolicyDetails { + s.PermissionsBoundaryUsageCount = &v + return s +} + +// SetPolicyId sets the PolicyId field's value. +func (s *AwsIamPolicyDetails) SetPolicyId(v string) *AwsIamPolicyDetails { + s.PolicyId = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsIamPolicyDetails) SetPolicyName(v string) *AwsIamPolicyDetails { + s.PolicyName = &v + return s +} + +// SetPolicyVersionList sets the PolicyVersionList field's value. +func (s *AwsIamPolicyDetails) SetPolicyVersionList(v []*AwsIamPolicyVersion) *AwsIamPolicyDetails { + s.PolicyVersionList = v + return s +} + +// SetUpdateDate sets the UpdateDate field's value. +func (s *AwsIamPolicyDetails) SetUpdateDate(v string) *AwsIamPolicyDetails { + s.UpdateDate = &v + return s +} + +// A version of an IAM policy. +type AwsIamPolicyVersion struct { + _ struct{} `type:"structure"` + + // Indicates when the version was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // Whether the version is the default version. + IsDefaultVersion *bool `type:"boolean"` + + // The identifier of the policy version. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamPolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamPolicyVersion) GoString() string { + return s.String() +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamPolicyVersion) SetCreateDate(v string) *AwsIamPolicyVersion { + s.CreateDate = &v + return s +} + +// SetIsDefaultVersion sets the IsDefaultVersion field's value. +func (s *AwsIamPolicyVersion) SetIsDefaultVersion(v bool) *AwsIamPolicyVersion { + s.IsDefaultVersion = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *AwsIamPolicyVersion) SetVersionId(v string) *AwsIamPolicyVersion { + s.VersionId = &v + return s +} + +// Contains information about an IAM role, including all of the role's policies. +type AwsIamRoleDetails struct { + _ struct{} `type:"structure"` + + // The trust policy that grants permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // The list of the managed policies that are attached to the role. + AttachedManagedPolicies []*AwsIamAttachedManagedPolicy `type:"list"` + + // Indicates when the role was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // The list of instance profiles that contain this role. + InstanceProfileList []*AwsIamInstanceProfile `type:"list"` + + // The maximum session duration (in seconds) that you want to set for the specified + // role. + MaxSessionDuration *int64 `type:"integer"` + + // The path to the role. + Path *string `type:"string"` + + // Information about the policy used to set the permissions boundary for an + // IAM principal. + PermissionsBoundary *AwsIamPermissionsBoundary `type:"structure"` + + // The stable and unique string identifying the role. + RoleId *string `type:"string"` + + // The friendly name that identifies the role. + RoleName *string `type:"string"` + + // The list of inline policies that are embedded in the role. + RolePolicyList []*AwsIamRolePolicy `type:"list"` +} + +// String returns the string representation +func (s AwsIamRoleDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamRoleDetails) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsIamRoleDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsIamRoleDetails"} + if s.AssumeRolePolicyDocument != nil && len(*s.AssumeRolePolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssumeRolePolicyDocument", 1)) + } + if s.InstanceProfileList != nil { + for i, v := range s.InstanceProfileList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceProfileList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssumeRolePolicyDocument sets the AssumeRolePolicyDocument field's value. +func (s *AwsIamRoleDetails) SetAssumeRolePolicyDocument(v string) *AwsIamRoleDetails { + s.AssumeRolePolicyDocument = &v + return s +} + +// SetAttachedManagedPolicies sets the AttachedManagedPolicies field's value. +func (s *AwsIamRoleDetails) SetAttachedManagedPolicies(v []*AwsIamAttachedManagedPolicy) *AwsIamRoleDetails { + s.AttachedManagedPolicies = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamRoleDetails) SetCreateDate(v string) *AwsIamRoleDetails { + s.CreateDate = &v + return s +} + +// SetInstanceProfileList sets the InstanceProfileList field's value. +func (s *AwsIamRoleDetails) SetInstanceProfileList(v []*AwsIamInstanceProfile) *AwsIamRoleDetails { + s.InstanceProfileList = v + return s +} + +// SetMaxSessionDuration sets the MaxSessionDuration field's value. +func (s *AwsIamRoleDetails) SetMaxSessionDuration(v int64) *AwsIamRoleDetails { + s.MaxSessionDuration = &v + return s +} + +// SetPath sets the Path field's value. +func (s *AwsIamRoleDetails) SetPath(v string) *AwsIamRoleDetails { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *AwsIamRoleDetails) SetPermissionsBoundary(v *AwsIamPermissionsBoundary) *AwsIamRoleDetails { + s.PermissionsBoundary = v + return s +} + +// SetRoleId sets the RoleId field's value. +func (s *AwsIamRoleDetails) SetRoleId(v string) *AwsIamRoleDetails { + s.RoleId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *AwsIamRoleDetails) SetRoleName(v string) *AwsIamRoleDetails { + s.RoleName = &v + return s +} + +// SetRolePolicyList sets the RolePolicyList field's value. +func (s *AwsIamRoleDetails) SetRolePolicyList(v []*AwsIamRolePolicy) *AwsIamRoleDetails { + s.RolePolicyList = v + return s +} + +// An inline policy that is embedded in the role. +type AwsIamRolePolicy struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamRolePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamRolePolicy) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsIamRolePolicy) SetPolicyName(v string) *AwsIamRolePolicy { + s.PolicyName = &v + return s +} + +// Information about an IAM user. +type AwsIamUserDetails struct { + _ struct{} `type:"structure"` + + // A list of the managed policies that are attached to the user. + AttachedManagedPolicies []*AwsIamAttachedManagedPolicy `type:"list"` + + // Indicates when the user was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreateDate *string `type:"string"` + + // A list of IAM groups that the user belongs to. + GroupList []*string `type:"list"` + + // The path to the user. + Path *string `type:"string"` + + // The permissions boundary for the user. + PermissionsBoundary *AwsIamPermissionsBoundary `type:"structure"` + + // The unique identifier for the user. + UserId *string `type:"string"` + + // The name of the user. + UserName *string `type:"string"` + + // The list of inline policies that are embedded in the user. + UserPolicyList []*AwsIamUserPolicy `type:"list"` +} + +// String returns the string representation +func (s AwsIamUserDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamUserDetails) GoString() string { + return s.String() +} + +// SetAttachedManagedPolicies sets the AttachedManagedPolicies field's value. +func (s *AwsIamUserDetails) SetAttachedManagedPolicies(v []*AwsIamAttachedManagedPolicy) *AwsIamUserDetails { + s.AttachedManagedPolicies = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AwsIamUserDetails) SetCreateDate(v string) *AwsIamUserDetails { + s.CreateDate = &v + return s +} + +// SetGroupList sets the GroupList field's value. +func (s *AwsIamUserDetails) SetGroupList(v []*string) *AwsIamUserDetails { + s.GroupList = v + return s +} + +// SetPath sets the Path field's value. +func (s *AwsIamUserDetails) SetPath(v string) *AwsIamUserDetails { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *AwsIamUserDetails) SetPermissionsBoundary(v *AwsIamPermissionsBoundary) *AwsIamUserDetails { + s.PermissionsBoundary = v + return s +} + +// SetUserId sets the UserId field's value. +func (s *AwsIamUserDetails) SetUserId(v string) *AwsIamUserDetails { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AwsIamUserDetails) SetUserName(v string) *AwsIamUserDetails { + s.UserName = &v + return s +} + +// SetUserPolicyList sets the UserPolicyList field's value. +func (s *AwsIamUserDetails) SetUserPolicyList(v []*AwsIamUserPolicy) *AwsIamUserDetails { + s.UserPolicyList = v + return s +} + +// Information about an inline policy that is embedded in the user. +type AwsIamUserPolicy struct { + _ struct{} `type:"structure"` + + // The name of the policy. + PolicyName *string `type:"string"` +} + +// String returns the string representation +func (s AwsIamUserPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsIamUserPolicy) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AwsIamUserPolicy) SetPolicyName(v string) *AwsIamUserPolicy { + s.PolicyName = &v + return s +} + +// Contains metadata about a customer master key (CMK). +type AwsKmsKeyDetails struct { + _ struct{} `type:"structure"` + + // The twelve-digit account ID of the AWS account that owns the CMK. + AWSAccountId *string `type:"string"` + + // Indicates when the CMK was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreationDate *float64 `type:"double"` + + // A description of the key. + Description *string `type:"string"` + + // The globally unique identifier for the CMK. + KeyId *string `type:"string"` + + // The manager of the CMK. CMKs in your AWS account are either customer managed + // or AWS managed. + KeyManager *string `type:"string"` + + // The state of the CMK. + KeyState *string `type:"string"` + + // The source of the CMK's key material. + // + // When this value is AWS_KMS, AWS KMS created the key material. + // + // When this value is EXTERNAL, the key material was imported from your existing + // key management infrastructure or the CMK lacks key material. + // + // When this value is AWS_CLOUDHSM, the key material was created in the AWS + // CloudHSM cluster associated with a custom key store. + Origin *string `type:"string"` +} + +// String returns the string representation +func (s AwsKmsKeyDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsKmsKeyDetails) GoString() string { + return s.String() +} + +// SetAWSAccountId sets the AWSAccountId field's value. +func (s *AwsKmsKeyDetails) SetAWSAccountId(v string) *AwsKmsKeyDetails { + s.AWSAccountId = &v + return s +} + +// SetCreationDate sets the CreationDate field's value. +func (s *AwsKmsKeyDetails) SetCreationDate(v float64) *AwsKmsKeyDetails { + s.CreationDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AwsKmsKeyDetails) SetDescription(v string) *AwsKmsKeyDetails { + s.Description = &v + return s +} + +// SetKeyId sets the KeyId field's value. +func (s *AwsKmsKeyDetails) SetKeyId(v string) *AwsKmsKeyDetails { + s.KeyId = &v + return s +} + +// SetKeyManager sets the KeyManager field's value. +func (s *AwsKmsKeyDetails) SetKeyManager(v string) *AwsKmsKeyDetails { + s.KeyManager = &v + return s +} + +// SetKeyState sets the KeyState field's value. +func (s *AwsKmsKeyDetails) SetKeyState(v string) *AwsKmsKeyDetails { + s.KeyState = &v + return s +} + +// SetOrigin sets the Origin field's value. +func (s *AwsKmsKeyDetails) SetOrigin(v string) *AwsKmsKeyDetails { + s.Origin = &v + return s +} + +// The code for the Lambda function. You can specify either an object in Amazon +// S3, or upload a deployment package directly. +type AwsLambdaFunctionCode struct { + _ struct{} `type:"structure"` + + // An Amazon S3 bucket in the same AWS Region as your function. The bucket can + // be in a different AWS account. + S3Bucket *string `type:"string"` + + // The Amazon S3 key of the deployment package. + S3Key *string `type:"string"` + + // For versioned objects, the version of the deployment package object to use. + S3ObjectVersion *string `type:"string"` + + // The base64-encoded contents of the deployment package. AWS SDK and AWS CLI + // clients handle the encoding for you. + ZipFile *string `type:"string"` +} + +// String returns the string representation +func (s AwsLambdaFunctionCode) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionCode) GoString() string { + return s.String() +} + +// SetS3Bucket sets the S3Bucket field's value. +func (s *AwsLambdaFunctionCode) SetS3Bucket(v string) *AwsLambdaFunctionCode { + s.S3Bucket = &v + return s +} + +// SetS3Key sets the S3Key field's value. +func (s *AwsLambdaFunctionCode) SetS3Key(v string) *AwsLambdaFunctionCode { + s.S3Key = &v + return s +} + +// SetS3ObjectVersion sets the S3ObjectVersion field's value. +func (s *AwsLambdaFunctionCode) SetS3ObjectVersion(v string) *AwsLambdaFunctionCode { + s.S3ObjectVersion = &v + return s +} + +// SetZipFile sets the ZipFile field's value. +func (s *AwsLambdaFunctionCode) SetZipFile(v string) *AwsLambdaFunctionCode { + s.ZipFile = &v + return s +} + +// The dead-letter queue for failed asynchronous invocations. +type AwsLambdaFunctionDeadLetterConfig struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. + TargetArn *string `type:"string"` +} + +// String returns the string representation +func (s AwsLambdaFunctionDeadLetterConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionDeadLetterConfig) GoString() string { + return s.String() +} + +// SetTargetArn sets the TargetArn field's value. +func (s *AwsLambdaFunctionDeadLetterConfig) SetTargetArn(v string) *AwsLambdaFunctionDeadLetterConfig { + s.TargetArn = &v + return s +} + +// Details about a function's configuration. +type AwsLambdaFunctionDetails struct { + _ struct{} `type:"structure"` + + // An AwsLambdaFunctionCode object. + Code *AwsLambdaFunctionCode `type:"structure"` + + // The SHA256 hash of the function's deployment package. + CodeSha256 *string `type:"string"` + + // The function's dead letter queue. + DeadLetterConfig *AwsLambdaFunctionDeadLetterConfig `type:"structure"` + + // The function's environment variables. + Environment *AwsLambdaFunctionEnvironment `type:"structure"` + + // The name of the function. + FunctionName *string `type:"string"` + + // The function that Lambda calls to begin executing your function. + Handler *string `type:"string"` + + // The KMS key that's used to encrypt the function's environment variables. + // This key is only returned if you've configured a customer managed CMK. + KmsKeyArn *string `type:"string"` + + // Indicates when the function was last updated. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LastModified *string `type:"string"` + + // The function's layers. + Layers []*AwsLambdaFunctionLayer `type:"list"` + + // For Lambda@Edge functions, the ARN of the master function. + MasterArn *string `type:"string"` + + // The memory that's allocated to the function. + MemorySize *int64 `type:"integer"` + + // The latest updated revision of the function or alias. + RevisionId *string `type:"string"` + + // The function's execution role. + Role *string `type:"string"` + + // The runtime environment for the Lambda function. + Runtime *string `type:"string"` + + // The amount of time that Lambda allows a function to run before stopping it. + Timeout *int64 `type:"integer"` + + // The function's AWS X-Ray tracing configuration. + TracingConfig *AwsLambdaFunctionTracingConfig `type:"structure"` + + // The version of the Lambda function. + Version *string `type:"string"` + + // The function's networking configuration. + VpcConfig *AwsLambdaFunctionVpcConfig `type:"structure"` +} + +// String returns the string representation +func (s AwsLambdaFunctionDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionDetails) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *AwsLambdaFunctionDetails) SetCode(v *AwsLambdaFunctionCode) *AwsLambdaFunctionDetails { + s.Code = v + return s +} + +// SetCodeSha256 sets the CodeSha256 field's value. +func (s *AwsLambdaFunctionDetails) SetCodeSha256(v string) *AwsLambdaFunctionDetails { + s.CodeSha256 = &v + return s +} + +// SetDeadLetterConfig sets the DeadLetterConfig field's value. +func (s *AwsLambdaFunctionDetails) SetDeadLetterConfig(v *AwsLambdaFunctionDeadLetterConfig) *AwsLambdaFunctionDetails { + s.DeadLetterConfig = v + return s +} + +// SetEnvironment sets the Environment field's value. +func (s *AwsLambdaFunctionDetails) SetEnvironment(v *AwsLambdaFunctionEnvironment) *AwsLambdaFunctionDetails { + s.Environment = v + return s +} + +// SetFunctionName sets the FunctionName field's value. +func (s *AwsLambdaFunctionDetails) SetFunctionName(v string) *AwsLambdaFunctionDetails { + s.FunctionName = &v + return s +} + +// SetHandler sets the Handler field's value. +func (s *AwsLambdaFunctionDetails) SetHandler(v string) *AwsLambdaFunctionDetails { + s.Handler = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *AwsLambdaFunctionDetails) SetKmsKeyArn(v string) *AwsLambdaFunctionDetails { + s.KmsKeyArn = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *AwsLambdaFunctionDetails) SetLastModified(v string) *AwsLambdaFunctionDetails { + s.LastModified = &v + return s +} + +// SetLayers sets the Layers field's value. +func (s *AwsLambdaFunctionDetails) SetLayers(v []*AwsLambdaFunctionLayer) *AwsLambdaFunctionDetails { + s.Layers = v + return s +} + +// SetMasterArn sets the MasterArn field's value. +func (s *AwsLambdaFunctionDetails) SetMasterArn(v string) *AwsLambdaFunctionDetails { + s.MasterArn = &v + return s +} + +// SetMemorySize sets the MemorySize field's value. +func (s *AwsLambdaFunctionDetails) SetMemorySize(v int64) *AwsLambdaFunctionDetails { + s.MemorySize = &v + return s +} + +// SetRevisionId sets the RevisionId field's value. +func (s *AwsLambdaFunctionDetails) SetRevisionId(v string) *AwsLambdaFunctionDetails { + s.RevisionId = &v + return s +} + +// SetRole sets the Role field's value. +func (s *AwsLambdaFunctionDetails) SetRole(v string) *AwsLambdaFunctionDetails { + s.Role = &v + return s +} + +// SetRuntime sets the Runtime field's value. +func (s *AwsLambdaFunctionDetails) SetRuntime(v string) *AwsLambdaFunctionDetails { + s.Runtime = &v + return s +} + +// SetTimeout sets the Timeout field's value. +func (s *AwsLambdaFunctionDetails) SetTimeout(v int64) *AwsLambdaFunctionDetails { + s.Timeout = &v + return s +} + +// SetTracingConfig sets the TracingConfig field's value. +func (s *AwsLambdaFunctionDetails) SetTracingConfig(v *AwsLambdaFunctionTracingConfig) *AwsLambdaFunctionDetails { + s.TracingConfig = v + return s +} + +// SetVersion sets the Version field's value. +func (s *AwsLambdaFunctionDetails) SetVersion(v string) *AwsLambdaFunctionDetails { + s.Version = &v + return s +} + +// SetVpcConfig sets the VpcConfig field's value. +func (s *AwsLambdaFunctionDetails) SetVpcConfig(v *AwsLambdaFunctionVpcConfig) *AwsLambdaFunctionDetails { + s.VpcConfig = v + return s +} + +// A function's environment variable settings. +type AwsLambdaFunctionEnvironment struct { + _ struct{} `type:"structure"` + + // An AwsLambdaFunctionEnvironmentError object. + Error *AwsLambdaFunctionEnvironmentError `type:"structure"` + + // Environment variable key-value pairs. + Variables map[string]*string `type:"map"` +} + +// String returns the string representation +func (s AwsLambdaFunctionEnvironment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionEnvironment) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *AwsLambdaFunctionEnvironment) SetError(v *AwsLambdaFunctionEnvironmentError) *AwsLambdaFunctionEnvironment { + s.Error = v + return s +} + +// SetVariables sets the Variables field's value. +func (s *AwsLambdaFunctionEnvironment) SetVariables(v map[string]*string) *AwsLambdaFunctionEnvironment { + s.Variables = v + return s +} + +// Error messages for environment variables that couldn't be applied. +type AwsLambdaFunctionEnvironmentError struct { + _ struct{} `type:"structure"` + + // The error code. + ErrorCode *string `type:"string"` + + // The error message. + Message *string `type:"string"` +} + +// String returns the string representation +func (s AwsLambdaFunctionEnvironmentError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionEnvironmentError) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *AwsLambdaFunctionEnvironmentError) SetErrorCode(v string) *AwsLambdaFunctionEnvironmentError { + s.ErrorCode = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *AwsLambdaFunctionEnvironmentError) SetMessage(v string) *AwsLambdaFunctionEnvironmentError { + s.Message = &v + return s +} + +// An AWS Lambda layer. +type AwsLambdaFunctionLayer struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the function layer. + Arn *string `type:"string"` + + // The size of the layer archive in bytes. + CodeSize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsLambdaFunctionLayer) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionLayer) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AwsLambdaFunctionLayer) SetArn(v string) *AwsLambdaFunctionLayer { + s.Arn = &v + return s +} + +// SetCodeSize sets the CodeSize field's value. +func (s *AwsLambdaFunctionLayer) SetCodeSize(v int64) *AwsLambdaFunctionLayer { + s.CodeSize = &v + return s +} + +// The function's AWS X-Ray tracing configuration. +type AwsLambdaFunctionTracingConfig struct { + _ struct{} `type:"structure"` + + // The tracing mode. + Mode *string `type:"string"` +} + +// String returns the string representation +func (s AwsLambdaFunctionTracingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionTracingConfig) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *AwsLambdaFunctionTracingConfig) SetMode(v string) *AwsLambdaFunctionTracingConfig { + s.Mode = &v + return s +} + +// The VPC security groups and subnets that are attached to a Lambda function. +// For more information, see VPC Settings. +type AwsLambdaFunctionVpcConfig struct { + _ struct{} `type:"structure"` + + // A list of VPC security groups IDs. + SecurityGroupIds []*string `type:"list"` + + // A list of VPC subnet IDs. + SubnetIds []*string `type:"list"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsLambdaFunctionVpcConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaFunctionVpcConfig) GoString() string { + return s.String() +} + +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *AwsLambdaFunctionVpcConfig) SetSecurityGroupIds(v []*string) *AwsLambdaFunctionVpcConfig { + s.SecurityGroupIds = v + return s +} + +// SetSubnetIds sets the SubnetIds field's value. +func (s *AwsLambdaFunctionVpcConfig) SetSubnetIds(v []*string) *AwsLambdaFunctionVpcConfig { + s.SubnetIds = v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsLambdaFunctionVpcConfig) SetVpcId(v string) *AwsLambdaFunctionVpcConfig { + s.VpcId = &v + return s +} + +// Details about a Lambda layer version. +type AwsLambdaLayerVersionDetails struct { + _ struct{} `type:"structure"` + + // The layer's compatible runtimes. Maximum number of five items. + // + // Valid values: nodejs10.x | nodejs12.x | java8 | java11 | python2.7 | python3.6 + // | python3.7 | python3.8 | dotnetcore1.0 | dotnetcore2.1 | go1.x | ruby2.5 + // | provided + CompatibleRuntimes []*string `type:"list"` + + // Indicates when the version was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + CreatedDate *string `type:"string"` + + // The version number. + Version *int64 `type:"long"` +} + +// String returns the string representation +func (s AwsLambdaLayerVersionDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsLambdaLayerVersionDetails) GoString() string { + return s.String() +} + +// SetCompatibleRuntimes sets the CompatibleRuntimes field's value. +func (s *AwsLambdaLayerVersionDetails) SetCompatibleRuntimes(v []*string) *AwsLambdaLayerVersionDetails { + s.CompatibleRuntimes = v + return s +} + +// SetCreatedDate sets the CreatedDate field's value. +func (s *AwsLambdaLayerVersionDetails) SetCreatedDate(v string) *AwsLambdaLayerVersionDetails { + s.CreatedDate = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *AwsLambdaLayerVersionDetails) SetVersion(v int64) *AwsLambdaLayerVersionDetails { + s.Version = &v + return s +} + +// An IAM role that is associated with the Amazon RDS DB cluster. +type AwsRdsDbClusterAssociatedRole struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM role. + RoleArn *string `type:"string"` + + // The status of the association between the IAM role and the DB cluster. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbClusterAssociatedRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbClusterAssociatedRole) GoString() string { + return s.String() +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AwsRdsDbClusterAssociatedRole) SetRoleArn(v string) *AwsRdsDbClusterAssociatedRole { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbClusterAssociatedRole) SetStatus(v string) *AwsRdsDbClusterAssociatedRole { + s.Status = &v + return s +} + +// Information about an Amazon RDS DB cluster. +type AwsRdsDbClusterDetails struct { + _ struct{} `type:"structure"` + + // The status of the database activity stream. + ActivityStreamStatus *string `type:"string"` + + // For all database engines except Aurora, specifies the allocated storage size + // in gibibytes (GiB). + AllocatedStorage *int64 `type:"integer"` + + // A list of the IAM roles that are associated with the DB cluster. + AssociatedRoles []*AwsRdsDbClusterAssociatedRole `type:"list"` + + // A list of Availability Zones (AZs) where instances in the DB cluster can + // be created. + AvailabilityZones []*string `type:"list"` + + // The number of days for which automated backups are retained. + BackupRetentionPeriod *int64 `type:"integer"` + + // Indicates when the DB cluster was created, in Universal Coordinated Time + // (UTC). + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + ClusterCreateTime *string `type:"string"` + + // Whether tags are copied from the DB cluster to snapshots of the DB cluster. + CopyTagsToSnapshot *bool `type:"boolean"` + + // Whether the DB cluster is a clone of a DB cluster owned by a different AWS + // account. + CrossAccountClone *bool `type:"boolean"` + + // A list of custom endpoints for the DB cluster. + CustomEndpoints []*string `type:"list"` + + // The name of the database. + DatabaseName *string `type:"string"` + + // The DB cluster identifier that the user assigned to the cluster. This identifier + // is the unique key that identifies a DB cluster. + DbClusterIdentifier *string `type:"string"` + + // The list of instances that make up the DB cluster. + DbClusterMembers []*AwsRdsDbClusterMember `type:"list"` + + // The list of option group memberships for this DB cluster. + DbClusterOptionGroupMemberships []*AwsRdsDbClusterOptionGroupMembership `type:"list"` + + // The name of the DB cluster parameter group for the DB cluster. + DbClusterParameterGroup *string `type:"string"` + + // The identifier of the DB cluster. The identifier must be unique within each + // AWS Region and is immutable. + DbClusterResourceId *string `type:"string"` + + // The subnet group that is associated with the DB cluster, including the name, + // description, and subnets in the subnet group. + DbSubnetGroup *string `type:"string"` + + // Whether the DB cluster has deletion protection enabled. + DeletionProtection *bool `type:"boolean"` + + // The Active Directory domain membership records that are associated with the + // DB cluster. + DomainMemberships []*AwsRdsDbDomainMembership `type:"list"` + + // A list of log types that this DB cluster is configured to export to CloudWatch + // Logs. + EnabledCloudWatchLogsExports []*string `type:"list"` + + // The connection endpoint for the primary instance of the DB cluster. + Endpoint *string `type:"string"` + + // The name of the database engine to use for this DB cluster. + Engine *string `type:"string"` + + // The database engine mode of the DB cluster. + EngineMode *string `type:"string"` + + // The version number of the database engine to use. + EngineVersion *string `type:"string"` + + // Specifies the identifier that Amazon Route 53 assigns when you create a hosted + // zone. + HostedZoneId *string `type:"string"` + + // Whether the HTTP endpoint for an Aurora Serverless DB cluster is enabled. + HttpEndpointEnabled *bool `type:"boolean"` + + // Whether the mapping of IAM accounts to database accounts is enabled. + IamDatabaseAuthenticationEnabled *bool `type:"boolean"` + + // The ARN of the AWS KMS master key that is used to encrypt the database instances + // in the DB cluster. + KmsKeyId *string `type:"string"` + + // The name of the master user for the DB cluster. + MasterUsername *string `type:"string"` + + // Whether the DB cluster has instances in multiple Availability Zones. + MultiAz *bool `type:"boolean"` + + // The port number on which the DB instances in the DB cluster accept connections. + Port *int64 `type:"integer"` + + // The range of time each day when automated backups are created, if automated + // backups are enabled. + // + // Uses the format HH:MM-HH:MM. For example, 04:52-05:22. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Uses the format :HH:MM-:HH:MM. + // + // For the day values, use mon|tue|wed|thu|fri|sat|sun. + // + // For example, sun:09:32-sun:10:02. + PreferredMaintenanceWindow *string `type:"string"` + + // The identifiers of the read replicas that are associated with this DB cluster. + ReadReplicaIdentifiers []*string `type:"list"` + + // The reader endpoint for the DB cluster. + ReaderEndpoint *string `type:"string"` + + // The current status of this DB cluster. + Status *string `type:"string"` + + // Whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // A list of VPC security groups that the DB cluster belongs to. + VpcSecurityGroups []*AwsRdsDbInstanceVpcSecurityGroup `type:"list"` +} + +// String returns the string representation +func (s AwsRdsDbClusterDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbClusterDetails) GoString() string { + return s.String() +} + +// SetActivityStreamStatus sets the ActivityStreamStatus field's value. +func (s *AwsRdsDbClusterDetails) SetActivityStreamStatus(v string) *AwsRdsDbClusterDetails { + s.ActivityStreamStatus = &v + return s +} + +// SetAllocatedStorage sets the AllocatedStorage field's value. +func (s *AwsRdsDbClusterDetails) SetAllocatedStorage(v int64) *AwsRdsDbClusterDetails { + s.AllocatedStorage = &v + return s +} + +// SetAssociatedRoles sets the AssociatedRoles field's value. +func (s *AwsRdsDbClusterDetails) SetAssociatedRoles(v []*AwsRdsDbClusterAssociatedRole) *AwsRdsDbClusterDetails { + s.AssociatedRoles = v + return s +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *AwsRdsDbClusterDetails) SetAvailabilityZones(v []*string) *AwsRdsDbClusterDetails { + s.AvailabilityZones = v + return s +} + +// SetBackupRetentionPeriod sets the BackupRetentionPeriod field's value. +func (s *AwsRdsDbClusterDetails) SetBackupRetentionPeriod(v int64) *AwsRdsDbClusterDetails { + s.BackupRetentionPeriod = &v + return s +} + +// SetClusterCreateTime sets the ClusterCreateTime field's value. +func (s *AwsRdsDbClusterDetails) SetClusterCreateTime(v string) *AwsRdsDbClusterDetails { + s.ClusterCreateTime = &v + return s +} + +// SetCopyTagsToSnapshot sets the CopyTagsToSnapshot field's value. +func (s *AwsRdsDbClusterDetails) SetCopyTagsToSnapshot(v bool) *AwsRdsDbClusterDetails { + s.CopyTagsToSnapshot = &v + return s +} + +// SetCrossAccountClone sets the CrossAccountClone field's value. +func (s *AwsRdsDbClusterDetails) SetCrossAccountClone(v bool) *AwsRdsDbClusterDetails { + s.CrossAccountClone = &v + return s +} + +// SetCustomEndpoints sets the CustomEndpoints field's value. +func (s *AwsRdsDbClusterDetails) SetCustomEndpoints(v []*string) *AwsRdsDbClusterDetails { + s.CustomEndpoints = v + return s +} + +// SetDatabaseName sets the DatabaseName field's value. +func (s *AwsRdsDbClusterDetails) SetDatabaseName(v string) *AwsRdsDbClusterDetails { + s.DatabaseName = &v + return s +} + +// SetDbClusterIdentifier sets the DbClusterIdentifier field's value. +func (s *AwsRdsDbClusterDetails) SetDbClusterIdentifier(v string) *AwsRdsDbClusterDetails { + s.DbClusterIdentifier = &v + return s +} + +// SetDbClusterMembers sets the DbClusterMembers field's value. +func (s *AwsRdsDbClusterDetails) SetDbClusterMembers(v []*AwsRdsDbClusterMember) *AwsRdsDbClusterDetails { + s.DbClusterMembers = v + return s +} + +// SetDbClusterOptionGroupMemberships sets the DbClusterOptionGroupMemberships field's value. +func (s *AwsRdsDbClusterDetails) SetDbClusterOptionGroupMemberships(v []*AwsRdsDbClusterOptionGroupMembership) *AwsRdsDbClusterDetails { + s.DbClusterOptionGroupMemberships = v + return s +} + +// SetDbClusterParameterGroup sets the DbClusterParameterGroup field's value. +func (s *AwsRdsDbClusterDetails) SetDbClusterParameterGroup(v string) *AwsRdsDbClusterDetails { + s.DbClusterParameterGroup = &v + return s +} + +// SetDbClusterResourceId sets the DbClusterResourceId field's value. +func (s *AwsRdsDbClusterDetails) SetDbClusterResourceId(v string) *AwsRdsDbClusterDetails { + s.DbClusterResourceId = &v + return s +} + +// SetDbSubnetGroup sets the DbSubnetGroup field's value. +func (s *AwsRdsDbClusterDetails) SetDbSubnetGroup(v string) *AwsRdsDbClusterDetails { + s.DbSubnetGroup = &v + return s +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *AwsRdsDbClusterDetails) SetDeletionProtection(v bool) *AwsRdsDbClusterDetails { + s.DeletionProtection = &v + return s +} + +// SetDomainMemberships sets the DomainMemberships field's value. +func (s *AwsRdsDbClusterDetails) SetDomainMemberships(v []*AwsRdsDbDomainMembership) *AwsRdsDbClusterDetails { + s.DomainMemberships = v + return s +} + +// SetEnabledCloudWatchLogsExports sets the EnabledCloudWatchLogsExports field's value. +func (s *AwsRdsDbClusterDetails) SetEnabledCloudWatchLogsExports(v []*string) *AwsRdsDbClusterDetails { + s.EnabledCloudWatchLogsExports = v + return s +} + +// SetEndpoint sets the Endpoint field's value. +func (s *AwsRdsDbClusterDetails) SetEndpoint(v string) *AwsRdsDbClusterDetails { + s.Endpoint = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *AwsRdsDbClusterDetails) SetEngine(v string) *AwsRdsDbClusterDetails { + s.Engine = &v + return s +} + +// SetEngineMode sets the EngineMode field's value. +func (s *AwsRdsDbClusterDetails) SetEngineMode(v string) *AwsRdsDbClusterDetails { + s.EngineMode = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *AwsRdsDbClusterDetails) SetEngineVersion(v string) *AwsRdsDbClusterDetails { + s.EngineVersion = &v + return s +} + +// SetHostedZoneId sets the HostedZoneId field's value. +func (s *AwsRdsDbClusterDetails) SetHostedZoneId(v string) *AwsRdsDbClusterDetails { + s.HostedZoneId = &v + return s +} + +// SetHttpEndpointEnabled sets the HttpEndpointEnabled field's value. +func (s *AwsRdsDbClusterDetails) SetHttpEndpointEnabled(v bool) *AwsRdsDbClusterDetails { + s.HttpEndpointEnabled = &v + return s +} + +// SetIamDatabaseAuthenticationEnabled sets the IamDatabaseAuthenticationEnabled field's value. +func (s *AwsRdsDbClusterDetails) SetIamDatabaseAuthenticationEnabled(v bool) *AwsRdsDbClusterDetails { + s.IamDatabaseAuthenticationEnabled = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsRdsDbClusterDetails) SetKmsKeyId(v string) *AwsRdsDbClusterDetails { + s.KmsKeyId = &v + return s +} + +// SetMasterUsername sets the MasterUsername field's value. +func (s *AwsRdsDbClusterDetails) SetMasterUsername(v string) *AwsRdsDbClusterDetails { + s.MasterUsername = &v + return s +} + +// SetMultiAz sets the MultiAz field's value. +func (s *AwsRdsDbClusterDetails) SetMultiAz(v bool) *AwsRdsDbClusterDetails { + s.MultiAz = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AwsRdsDbClusterDetails) SetPort(v int64) *AwsRdsDbClusterDetails { + s.Port = &v + return s +} + +// SetPreferredBackupWindow sets the PreferredBackupWindow field's value. +func (s *AwsRdsDbClusterDetails) SetPreferredBackupWindow(v string) *AwsRdsDbClusterDetails { + s.PreferredBackupWindow = &v + return s +} + +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *AwsRdsDbClusterDetails) SetPreferredMaintenanceWindow(v string) *AwsRdsDbClusterDetails { + s.PreferredMaintenanceWindow = &v + return s +} + +// SetReadReplicaIdentifiers sets the ReadReplicaIdentifiers field's value. +func (s *AwsRdsDbClusterDetails) SetReadReplicaIdentifiers(v []*string) *AwsRdsDbClusterDetails { + s.ReadReplicaIdentifiers = v + return s +} + +// SetReaderEndpoint sets the ReaderEndpoint field's value. +func (s *AwsRdsDbClusterDetails) SetReaderEndpoint(v string) *AwsRdsDbClusterDetails { + s.ReaderEndpoint = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbClusterDetails) SetStatus(v string) *AwsRdsDbClusterDetails { + s.Status = &v + return s +} + +// SetStorageEncrypted sets the StorageEncrypted field's value. +func (s *AwsRdsDbClusterDetails) SetStorageEncrypted(v bool) *AwsRdsDbClusterDetails { + s.StorageEncrypted = &v + return s +} + +// SetVpcSecurityGroups sets the VpcSecurityGroups field's value. +func (s *AwsRdsDbClusterDetails) SetVpcSecurityGroups(v []*AwsRdsDbInstanceVpcSecurityGroup) *AwsRdsDbClusterDetails { + s.VpcSecurityGroups = v + return s +} + +// Information about an instance in the DB cluster. +type AwsRdsDbClusterMember struct { + _ struct{} `type:"structure"` + + // The status of the DB cluster parameter group for this member of the DB cluster. + DbClusterParameterGroupStatus *string `type:"string"` + + // The instance identifier for this member of the DB cluster. + DbInstanceIdentifier *string `type:"string"` + + // Whether the cluster member is the primary instance for the DB cluster. + IsClusterWriter *bool `type:"boolean"` + + // Specifies the order in which an Aurora replica is promoted to the primary + // instance when the existing primary instance fails. + PromotionTier *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsRdsDbClusterMember) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbClusterMember) GoString() string { + return s.String() +} + +// SetDbClusterParameterGroupStatus sets the DbClusterParameterGroupStatus field's value. +func (s *AwsRdsDbClusterMember) SetDbClusterParameterGroupStatus(v string) *AwsRdsDbClusterMember { + s.DbClusterParameterGroupStatus = &v + return s +} + +// SetDbInstanceIdentifier sets the DbInstanceIdentifier field's value. +func (s *AwsRdsDbClusterMember) SetDbInstanceIdentifier(v string) *AwsRdsDbClusterMember { + s.DbInstanceIdentifier = &v + return s +} + +// SetIsClusterWriter sets the IsClusterWriter field's value. +func (s *AwsRdsDbClusterMember) SetIsClusterWriter(v bool) *AwsRdsDbClusterMember { + s.IsClusterWriter = &v + return s +} + +// SetPromotionTier sets the PromotionTier field's value. +func (s *AwsRdsDbClusterMember) SetPromotionTier(v int64) *AwsRdsDbClusterMember { + s.PromotionTier = &v + return s +} + +// Information about an option group membership for a DB cluster. +type AwsRdsDbClusterOptionGroupMembership struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster option group. + DbClusterOptionGroupName *string `type:"string"` + + // The status of the DB cluster option group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbClusterOptionGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbClusterOptionGroupMembership) GoString() string { + return s.String() +} + +// SetDbClusterOptionGroupName sets the DbClusterOptionGroupName field's value. +func (s *AwsRdsDbClusterOptionGroupMembership) SetDbClusterOptionGroupName(v string) *AwsRdsDbClusterOptionGroupMembership { + s.DbClusterOptionGroupName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbClusterOptionGroupMembership) SetStatus(v string) *AwsRdsDbClusterOptionGroupMembership { + s.Status = &v + return s +} + +// Information about an Amazon RDS DB cluster snapshot. +type AwsRdsDbClusterSnapshotDetails struct { + _ struct{} `type:"structure"` + + // Specifies the allocated storage size in gibibytes (GiB). + AllocatedStorage *int64 `type:"integer"` + + // A list of Availability Zones where instances in the DB cluster can be created. + AvailabilityZones []*string `type:"list"` + + // Indicates when the DB cluster was created, in Universal Coordinated Time + // (UTC). + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + ClusterCreateTime *string `type:"string"` + + // The DB cluster identifier. + DbClusterIdentifier *string `type:"string"` + + // The identifier of the DB cluster snapshot. + DbClusterSnapshotIdentifier *string `type:"string"` + + Engine *string `type:"string"` + + // The version of the database engine to use. + EngineVersion *string `type:"string"` + + // Whether mapping of IAM accounts to database accounts is enabled. + IamDatabaseAuthenticationEnabled *bool `type:"boolean"` + + // The ARN of the AWS KMS master key that is used to encrypt the database instances + // in the DB cluster. + KmsKeyId *string `type:"string"` + + // The license model information for this DB cluster snapshot. + LicenseModel *string `type:"string"` + + // The name of the master user for the DB cluster. + MasterUsername *string `type:"string"` + + // Specifies the percentage of the estimated data that has been transferred. + PercentProgress *int64 `type:"integer"` + + // The port number on which the DB instances in the DB cluster accept connections. + Port *int64 `type:"integer"` + + // Indicates when the snapshot was taken. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + SnapshotCreateTime *string `type:"string"` + + // The type of DB cluster snapshot. + SnapshotType *string `type:"string"` + + // The status of this DB cluster snapshot. + Status *string `type:"string"` + + // Whether the DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // The VPC ID that is associated with the DB cluster snapshot. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbClusterSnapshotDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbClusterSnapshotDetails) GoString() string { + return s.String() +} + +// SetAllocatedStorage sets the AllocatedStorage field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetAllocatedStorage(v int64) *AwsRdsDbClusterSnapshotDetails { + s.AllocatedStorage = &v + return s +} + +// SetAvailabilityZones sets the AvailabilityZones field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetAvailabilityZones(v []*string) *AwsRdsDbClusterSnapshotDetails { + s.AvailabilityZones = v + return s +} + +// SetClusterCreateTime sets the ClusterCreateTime field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetClusterCreateTime(v string) *AwsRdsDbClusterSnapshotDetails { + s.ClusterCreateTime = &v + return s +} + +// SetDbClusterIdentifier sets the DbClusterIdentifier field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetDbClusterIdentifier(v string) *AwsRdsDbClusterSnapshotDetails { + s.DbClusterIdentifier = &v + return s +} + +// SetDbClusterSnapshotIdentifier sets the DbClusterSnapshotIdentifier field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetDbClusterSnapshotIdentifier(v string) *AwsRdsDbClusterSnapshotDetails { + s.DbClusterSnapshotIdentifier = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetEngine(v string) *AwsRdsDbClusterSnapshotDetails { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetEngineVersion(v string) *AwsRdsDbClusterSnapshotDetails { + s.EngineVersion = &v + return s +} + +// SetIamDatabaseAuthenticationEnabled sets the IamDatabaseAuthenticationEnabled field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetIamDatabaseAuthenticationEnabled(v bool) *AwsRdsDbClusterSnapshotDetails { + s.IamDatabaseAuthenticationEnabled = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetKmsKeyId(v string) *AwsRdsDbClusterSnapshotDetails { + s.KmsKeyId = &v + return s +} + +// SetLicenseModel sets the LicenseModel field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetLicenseModel(v string) *AwsRdsDbClusterSnapshotDetails { + s.LicenseModel = &v + return s +} + +// SetMasterUsername sets the MasterUsername field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetMasterUsername(v string) *AwsRdsDbClusterSnapshotDetails { + s.MasterUsername = &v + return s +} + +// SetPercentProgress sets the PercentProgress field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetPercentProgress(v int64) *AwsRdsDbClusterSnapshotDetails { + s.PercentProgress = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetPort(v int64) *AwsRdsDbClusterSnapshotDetails { + s.Port = &v + return s +} + +// SetSnapshotCreateTime sets the SnapshotCreateTime field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetSnapshotCreateTime(v string) *AwsRdsDbClusterSnapshotDetails { + s.SnapshotCreateTime = &v + return s +} + +// SetSnapshotType sets the SnapshotType field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetSnapshotType(v string) *AwsRdsDbClusterSnapshotDetails { + s.SnapshotType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetStatus(v string) *AwsRdsDbClusterSnapshotDetails { + s.Status = &v + return s +} + +// SetStorageEncrypted sets the StorageEncrypted field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetStorageEncrypted(v bool) *AwsRdsDbClusterSnapshotDetails { + s.StorageEncrypted = &v + return s +} + +// SetVpcId sets the VpcId field's value. +func (s *AwsRdsDbClusterSnapshotDetails) SetVpcId(v string) *AwsRdsDbClusterSnapshotDetails { + s.VpcId = &v + return s +} + +// Information about an Active Directory domain membership record associated +// with the DB instance. +type AwsRdsDbDomainMembership struct { + _ struct{} `type:"structure"` + + // The identifier of the Active Directory domain. + Domain *string `type:"string"` + + // The fully qualified domain name of the Active Directory domain. + Fqdn *string `type:"string"` + + // The name of the IAM role to use when making API calls to the Directory Service. + IamRoleName *string `type:"string"` + + // The status of the Active Directory Domain membership for the DB instance. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbDomainMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbDomainMembership) GoString() string { + return s.String() +} + +// SetDomain sets the Domain field's value. +func (s *AwsRdsDbDomainMembership) SetDomain(v string) *AwsRdsDbDomainMembership { + s.Domain = &v + return s +} + +// SetFqdn sets the Fqdn field's value. +func (s *AwsRdsDbDomainMembership) SetFqdn(v string) *AwsRdsDbDomainMembership { + s.Fqdn = &v + return s +} + +// SetIamRoleName sets the IamRoleName field's value. +func (s *AwsRdsDbDomainMembership) SetIamRoleName(v string) *AwsRdsDbDomainMembership { + s.IamRoleName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbDomainMembership) SetStatus(v string) *AwsRdsDbDomainMembership { + s.Status = &v + return s +} + +// An AWS Identity and Access Management (IAM) role associated with the DB instance. +type AwsRdsDbInstanceAssociatedRole struct { + _ struct{} `type:"structure"` + + // The name of the feature associated with the IAM)role. + FeatureName *string `type:"string"` + + // The Amazon Resource Name (ARN) of the IAM role that is associated with the + // DB instance. + RoleArn *string `type:"string"` + + // Describes the state of the association between the IAM role and the DB instance. + // The Status property returns one of the following values: + // + // * ACTIVE - The IAM role ARN is associated with the DB instance and can + // be used to access other AWS services on your behalf. + // + // * PENDING - The IAM role ARN is being associated with the DB instance. + // + // * INVALID - The IAM role ARN is associated with the DB instance. But the + // DB instance is unable to assume the IAM role in order to access other + // AWS services on your behalf. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbInstanceAssociatedRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbInstanceAssociatedRole) GoString() string { + return s.String() +} + +// SetFeatureName sets the FeatureName field's value. +func (s *AwsRdsDbInstanceAssociatedRole) SetFeatureName(v string) *AwsRdsDbInstanceAssociatedRole { + s.FeatureName = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AwsRdsDbInstanceAssociatedRole) SetRoleArn(v string) *AwsRdsDbInstanceAssociatedRole { + s.RoleArn = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbInstanceAssociatedRole) SetStatus(v string) *AwsRdsDbInstanceAssociatedRole { + s.Status = &v + return s +} + +// Contains the details of an Amazon RDS DB instance. +type AwsRdsDbInstanceDetails struct { + _ struct{} `type:"structure"` + + // The amount of storage (in gigabytes) to initially allocate for the DB instance. + AllocatedStorage *int64 `type:"integer"` + + // The AWS Identity and Access Management (IAM) roles associated with the DB + // instance. + AssociatedRoles []*AwsRdsDbInstanceAssociatedRole `type:"list"` + + // Indicates whether minor version patches are applied automatically. + AutoMinorVersionUpgrade *bool `type:"boolean"` + + // The Availability Zone where the DB instance will be created. + AvailabilityZone *string `type:"string"` + + // The number of days for which to retain automated backups. + BackupRetentionPeriod *int64 `type:"integer"` + + // The identifier of the CA certificate for this DB instance. + CACertificateIdentifier *string `type:"string"` + + // The name of the character set that this DB instance is associated with. + CharacterSetName *string `type:"string"` + + // Whether to copy resource tags to snapshots of the DB instance. + CopyTagsToSnapshot *bool `type:"boolean"` + + // If the DB instance is a member of a DB cluster, contains the name of the + // DB cluster that the DB instance is a member of. + DBClusterIdentifier *string `type:"string"` + + // Contains the name of the compute and memory capacity class of the DB instance. + DBInstanceClass *string `type:"string"` + + // Contains a user-supplied database identifier. This identifier is the unique + // key that identifies a DB instance. + DBInstanceIdentifier *string `type:"string"` + + // The meaning of this parameter differs according to the database engine you + // use. + // + // MySQL, MariaDB, SQL Server, PostgreSQL + // + // Contains the name of the initial database of this instance that was provided + // at create time, if one was specified when the DB instance was created. This + // same name is returned for the life of the DB instance. + // + // Oracle + // + // Contains the Oracle System ID (SID) of the created DB instance. Not shown + // when the returned parameters do not apply to an Oracle DB instance. + DBName *string `type:"string"` + + // Specifies the port that the DB instance listens on. If the DB instance is + // part of a DB cluster, this can be a different port than the DB cluster port. + DbInstancePort *int64 `type:"integer"` + + // The current status of the DB instance. + DbInstanceStatus *string `type:"string"` + + // A list of the DB parameter groups to assign to the DB instance. + DbParameterGroups []*AwsRdsDbParameterGroup `type:"list"` + + // A list of the DB security groups to assign to the DB instance. + DbSecurityGroups []*string `type:"list"` + + // Information about the subnet group that is associated with the DB instance. + DbSubnetGroup *AwsRdsDbSubnetGroup `type:"structure"` + + // The AWS Region-unique, immutable identifier for the DB instance. This identifier + // is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB + // instance is accessed. + DbiResourceId *string `type:"string"` + + // Indicates whether the DB instance has deletion protection enabled. + // + // When deletion protection is enabled, the database cannot be deleted. + DeletionProtection *bool `type:"boolean"` + + // The Active Directory domain membership records associated with the DB instance. + DomainMemberships []*AwsRdsDbDomainMembership `type:"list"` + + // A list of log types that this DB instance is configured to export to CloudWatch + // Logs. + EnabledCloudWatchLogsExports []*string `type:"list"` + + // Specifies the connection endpoint. + Endpoint *AwsRdsDbInstanceEndpoint `type:"structure"` + + // Provides the name of the database engine to use for this DB instance. + Engine *string `type:"string"` + + // Indicates the database engine version. + EngineVersion *string `type:"string"` + + // The ARN of the CloudWatch Logs log stream that receives the enhanced monitoring + // metrics data for the DB instance. + EnhancedMonitoringResourceArn *string `type:"string"` + + // True if mapping of AWS Identity and Access Management (IAM) accounts to database + // accounts is enabled, and otherwise false. + // + // IAM database authentication can be enabled for the following database engines. + // + // * For MySQL 5.6, minor version 5.6.34 or higher + // + // * For MySQL 5.7, minor version 5.7.16 or higher + // + // * Aurora 5.6 or higher + IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` + + // Indicates when the DB instance was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + InstanceCreateTime *string `type:"string"` + + // Specifies the provisioned IOPS (I/O operations per second) for this DB instance. + Iops *int64 `type:"integer"` + + // If StorageEncrypted is true, the AWS KMS key identifier for the encrypted + // DB instance. + KmsKeyId *string `type:"string"` + + // Specifies the latest time to which a database can be restored with point-in-time + // restore. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + LatestRestorableTime *string `type:"string"` + + // License model information for this DB instance. + LicenseModel *string `type:"string"` + + // Specifies the connection endpoint. + ListenerEndpoint *AwsRdsDbInstanceEndpoint `type:"structure"` + + // The master user name of the DB instance. + MasterUsername *string `type:"string"` + + // The upper limit to which Amazon RDS can automatically scale the storage of + // the DB instance. + MaxAllocatedStorage *int64 `type:"integer"` + + // The interval, in seconds, between points when enhanced monitoring metrics + // are collected for the DB instance. + MonitoringInterval *int64 `type:"integer"` + + // The ARN for the IAM role that permits Amazon RDS to send enhanced monitoring + // metrics to CloudWatch Logs. + MonitoringRoleArn *string `type:"string"` + + // Whether the DB instance is a multiple Availability Zone deployment. + MultiAz *bool `type:"boolean"` + + // The list of option group memberships for this DB instance. + OptionGroupMemberships []*AwsRdsDbOptionGroupMembership `type:"list"` + + // Changes to the DB instance that are currently pending. + PendingModifiedValues *AwsRdsDbPendingModifiedValues `type:"structure"` + + // Indicates whether Performance Insights is enabled for the DB instance. + PerformanceInsightsEnabled *bool `type:"boolean"` + + // The identifier of the AWS KMS key used to encrypt the Performance Insights + // data. + PerformanceInsightsKmsKeyId *string `type:"string"` + + // The number of days to retain Performance Insights data. + PerformanceInsightsRetentionPeriod *int64 `type:"integer"` + + // The range of time each day when automated backups are created, if automated + // backups are enabled. + // + // Uses the format HH:MM-HH:MM. For example, 04:52-05:22. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Uses the format :HH:MM-:HH:MM. + // + // For the day values, use mon|tue|wed|thu|fri|sat|sun. + // + // For example, sun:09:32-sun:10:02. + PreferredMaintenanceWindow *string `type:"string"` + + // The number of CPU cores and the number of threads per core for the DB instance + // class of the DB instance. + ProcessorFeatures []*AwsRdsDbProcessorFeature `type:"list"` + + // The order in which to promote an Aurora replica to the primary instance after + // a failure of the existing primary instance. + PromotionTier *int64 `type:"integer"` + + // Specifies the accessibility options for the DB instance. + // + // A value of true specifies an Internet-facing instance with a publicly resolvable + // DNS name, which resolves to a public IP address. + // + // A value of false specifies an internal instance with a DNS name that resolves + // to a private IP address. + PubliclyAccessible *bool `type:"boolean"` + + // List of identifiers of Aurora DB clusters to which the RDS DB instance is + // replicated as a read replica. + ReadReplicaDBClusterIdentifiers []*string `type:"list"` + + // List of identifiers of the read replicas associated with this DB instance. + ReadReplicaDBInstanceIdentifiers []*string `type:"list"` + + // If this DB instance is a read replica, contains the identifier of the source + // DB instance. + ReadReplicaSourceDBInstanceIdentifier *string `type:"string"` + + // For a DB instance with multi-Availability Zone support, the name of the secondary + // Availability Zone. + SecondaryAvailabilityZone *string `type:"string"` + + // The status of a read replica. If the instance isn't a read replica, this + // is empty. + StatusInfos []*AwsRdsDbStatusInfo `type:"list"` + + // Specifies whether the DB instance is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // The storage type for the DB instance. + StorageType *string `type:"string"` + + // The ARN from the key store with which the instance is associated for TDE + // encryption. + TdeCredentialArn *string `type:"string"` + + // The time zone of the DB instance. + Timezone *string `type:"string"` + + // A list of VPC security groups that the DB instance belongs to. + VpcSecurityGroups []*AwsRdsDbInstanceVpcSecurityGroup `type:"list"` +} + +// String returns the string representation +func (s AwsRdsDbInstanceDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbInstanceDetails) GoString() string { + return s.String() +} + +// SetAllocatedStorage sets the AllocatedStorage field's value. +func (s *AwsRdsDbInstanceDetails) SetAllocatedStorage(v int64) *AwsRdsDbInstanceDetails { + s.AllocatedStorage = &v + return s +} + +// SetAssociatedRoles sets the AssociatedRoles field's value. +func (s *AwsRdsDbInstanceDetails) SetAssociatedRoles(v []*AwsRdsDbInstanceAssociatedRole) *AwsRdsDbInstanceDetails { + s.AssociatedRoles = v + return s +} + +// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value. +func (s *AwsRdsDbInstanceDetails) SetAutoMinorVersionUpgrade(v bool) *AwsRdsDbInstanceDetails { + s.AutoMinorVersionUpgrade = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *AwsRdsDbInstanceDetails) SetAvailabilityZone(v string) *AwsRdsDbInstanceDetails { + s.AvailabilityZone = &v + return s +} + +// SetBackupRetentionPeriod sets the BackupRetentionPeriod field's value. +func (s *AwsRdsDbInstanceDetails) SetBackupRetentionPeriod(v int64) *AwsRdsDbInstanceDetails { + s.BackupRetentionPeriod = &v + return s +} + +// SetCACertificateIdentifier sets the CACertificateIdentifier field's value. +func (s *AwsRdsDbInstanceDetails) SetCACertificateIdentifier(v string) *AwsRdsDbInstanceDetails { + s.CACertificateIdentifier = &v + return s +} + +// SetCharacterSetName sets the CharacterSetName field's value. +func (s *AwsRdsDbInstanceDetails) SetCharacterSetName(v string) *AwsRdsDbInstanceDetails { + s.CharacterSetName = &v + return s +} + +// SetCopyTagsToSnapshot sets the CopyTagsToSnapshot field's value. +func (s *AwsRdsDbInstanceDetails) SetCopyTagsToSnapshot(v bool) *AwsRdsDbInstanceDetails { + s.CopyTagsToSnapshot = &v + return s +} + +// SetDBClusterIdentifier sets the DBClusterIdentifier field's value. +func (s *AwsRdsDbInstanceDetails) SetDBClusterIdentifier(v string) *AwsRdsDbInstanceDetails { + s.DBClusterIdentifier = &v + return s +} + +// SetDBInstanceClass sets the DBInstanceClass field's value. +func (s *AwsRdsDbInstanceDetails) SetDBInstanceClass(v string) *AwsRdsDbInstanceDetails { + s.DBInstanceClass = &v + return s +} + +// SetDBInstanceIdentifier sets the DBInstanceIdentifier field's value. +func (s *AwsRdsDbInstanceDetails) SetDBInstanceIdentifier(v string) *AwsRdsDbInstanceDetails { + s.DBInstanceIdentifier = &v + return s +} + +// SetDBName sets the DBName field's value. +func (s *AwsRdsDbInstanceDetails) SetDBName(v string) *AwsRdsDbInstanceDetails { + s.DBName = &v + return s +} + +// SetDbInstancePort sets the DbInstancePort field's value. +func (s *AwsRdsDbInstanceDetails) SetDbInstancePort(v int64) *AwsRdsDbInstanceDetails { + s.DbInstancePort = &v + return s +} + +// SetDbInstanceStatus sets the DbInstanceStatus field's value. +func (s *AwsRdsDbInstanceDetails) SetDbInstanceStatus(v string) *AwsRdsDbInstanceDetails { + s.DbInstanceStatus = &v + return s +} + +// SetDbParameterGroups sets the DbParameterGroups field's value. +func (s *AwsRdsDbInstanceDetails) SetDbParameterGroups(v []*AwsRdsDbParameterGroup) *AwsRdsDbInstanceDetails { + s.DbParameterGroups = v + return s +} + +// SetDbSecurityGroups sets the DbSecurityGroups field's value. +func (s *AwsRdsDbInstanceDetails) SetDbSecurityGroups(v []*string) *AwsRdsDbInstanceDetails { + s.DbSecurityGroups = v + return s +} + +// SetDbSubnetGroup sets the DbSubnetGroup field's value. +func (s *AwsRdsDbInstanceDetails) SetDbSubnetGroup(v *AwsRdsDbSubnetGroup) *AwsRdsDbInstanceDetails { + s.DbSubnetGroup = v + return s +} + +// SetDbiResourceId sets the DbiResourceId field's value. +func (s *AwsRdsDbInstanceDetails) SetDbiResourceId(v string) *AwsRdsDbInstanceDetails { + s.DbiResourceId = &v + return s +} + +// SetDeletionProtection sets the DeletionProtection field's value. +func (s *AwsRdsDbInstanceDetails) SetDeletionProtection(v bool) *AwsRdsDbInstanceDetails { + s.DeletionProtection = &v + return s +} + +// SetDomainMemberships sets the DomainMemberships field's value. +func (s *AwsRdsDbInstanceDetails) SetDomainMemberships(v []*AwsRdsDbDomainMembership) *AwsRdsDbInstanceDetails { + s.DomainMemberships = v + return s +} + +// SetEnabledCloudWatchLogsExports sets the EnabledCloudWatchLogsExports field's value. +func (s *AwsRdsDbInstanceDetails) SetEnabledCloudWatchLogsExports(v []*string) *AwsRdsDbInstanceDetails { + s.EnabledCloudWatchLogsExports = v + return s +} + +// SetEndpoint sets the Endpoint field's value. +func (s *AwsRdsDbInstanceDetails) SetEndpoint(v *AwsRdsDbInstanceEndpoint) *AwsRdsDbInstanceDetails { + s.Endpoint = v + return s +} + +// SetEngine sets the Engine field's value. +func (s *AwsRdsDbInstanceDetails) SetEngine(v string) *AwsRdsDbInstanceDetails { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *AwsRdsDbInstanceDetails) SetEngineVersion(v string) *AwsRdsDbInstanceDetails { + s.EngineVersion = &v + return s +} + +// SetEnhancedMonitoringResourceArn sets the EnhancedMonitoringResourceArn field's value. +func (s *AwsRdsDbInstanceDetails) SetEnhancedMonitoringResourceArn(v string) *AwsRdsDbInstanceDetails { + s.EnhancedMonitoringResourceArn = &v + return s +} + +// SetIAMDatabaseAuthenticationEnabled sets the IAMDatabaseAuthenticationEnabled field's value. +func (s *AwsRdsDbInstanceDetails) SetIAMDatabaseAuthenticationEnabled(v bool) *AwsRdsDbInstanceDetails { + s.IAMDatabaseAuthenticationEnabled = &v + return s +} + +// SetInstanceCreateTime sets the InstanceCreateTime field's value. +func (s *AwsRdsDbInstanceDetails) SetInstanceCreateTime(v string) *AwsRdsDbInstanceDetails { + s.InstanceCreateTime = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *AwsRdsDbInstanceDetails) SetIops(v int64) *AwsRdsDbInstanceDetails { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsRdsDbInstanceDetails) SetKmsKeyId(v string) *AwsRdsDbInstanceDetails { + s.KmsKeyId = &v + return s +} + +// SetLatestRestorableTime sets the LatestRestorableTime field's value. +func (s *AwsRdsDbInstanceDetails) SetLatestRestorableTime(v string) *AwsRdsDbInstanceDetails { + s.LatestRestorableTime = &v + return s +} + +// SetLicenseModel sets the LicenseModel field's value. +func (s *AwsRdsDbInstanceDetails) SetLicenseModel(v string) *AwsRdsDbInstanceDetails { + s.LicenseModel = &v + return s +} + +// SetListenerEndpoint sets the ListenerEndpoint field's value. +func (s *AwsRdsDbInstanceDetails) SetListenerEndpoint(v *AwsRdsDbInstanceEndpoint) *AwsRdsDbInstanceDetails { + s.ListenerEndpoint = v + return s +} + +// SetMasterUsername sets the MasterUsername field's value. +func (s *AwsRdsDbInstanceDetails) SetMasterUsername(v string) *AwsRdsDbInstanceDetails { + s.MasterUsername = &v + return s +} + +// SetMaxAllocatedStorage sets the MaxAllocatedStorage field's value. +func (s *AwsRdsDbInstanceDetails) SetMaxAllocatedStorage(v int64) *AwsRdsDbInstanceDetails { + s.MaxAllocatedStorage = &v + return s +} + +// SetMonitoringInterval sets the MonitoringInterval field's value. +func (s *AwsRdsDbInstanceDetails) SetMonitoringInterval(v int64) *AwsRdsDbInstanceDetails { + s.MonitoringInterval = &v + return s +} + +// SetMonitoringRoleArn sets the MonitoringRoleArn field's value. +func (s *AwsRdsDbInstanceDetails) SetMonitoringRoleArn(v string) *AwsRdsDbInstanceDetails { + s.MonitoringRoleArn = &v + return s +} + +// SetMultiAz sets the MultiAz field's value. +func (s *AwsRdsDbInstanceDetails) SetMultiAz(v bool) *AwsRdsDbInstanceDetails { + s.MultiAz = &v + return s +} + +// SetOptionGroupMemberships sets the OptionGroupMemberships field's value. +func (s *AwsRdsDbInstanceDetails) SetOptionGroupMemberships(v []*AwsRdsDbOptionGroupMembership) *AwsRdsDbInstanceDetails { + s.OptionGroupMemberships = v + return s +} + +// SetPendingModifiedValues sets the PendingModifiedValues field's value. +func (s *AwsRdsDbInstanceDetails) SetPendingModifiedValues(v *AwsRdsDbPendingModifiedValues) *AwsRdsDbInstanceDetails { + s.PendingModifiedValues = v + return s +} + +// SetPerformanceInsightsEnabled sets the PerformanceInsightsEnabled field's value. +func (s *AwsRdsDbInstanceDetails) SetPerformanceInsightsEnabled(v bool) *AwsRdsDbInstanceDetails { + s.PerformanceInsightsEnabled = &v + return s +} + +// SetPerformanceInsightsKmsKeyId sets the PerformanceInsightsKmsKeyId field's value. +func (s *AwsRdsDbInstanceDetails) SetPerformanceInsightsKmsKeyId(v string) *AwsRdsDbInstanceDetails { + s.PerformanceInsightsKmsKeyId = &v + return s +} + +// SetPerformanceInsightsRetentionPeriod sets the PerformanceInsightsRetentionPeriod field's value. +func (s *AwsRdsDbInstanceDetails) SetPerformanceInsightsRetentionPeriod(v int64) *AwsRdsDbInstanceDetails { + s.PerformanceInsightsRetentionPeriod = &v + return s +} + +// SetPreferredBackupWindow sets the PreferredBackupWindow field's value. +func (s *AwsRdsDbInstanceDetails) SetPreferredBackupWindow(v string) *AwsRdsDbInstanceDetails { + s.PreferredBackupWindow = &v + return s +} + +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *AwsRdsDbInstanceDetails) SetPreferredMaintenanceWindow(v string) *AwsRdsDbInstanceDetails { + s.PreferredMaintenanceWindow = &v + return s +} + +// SetProcessorFeatures sets the ProcessorFeatures field's value. +func (s *AwsRdsDbInstanceDetails) SetProcessorFeatures(v []*AwsRdsDbProcessorFeature) *AwsRdsDbInstanceDetails { + s.ProcessorFeatures = v + return s +} + +// SetPromotionTier sets the PromotionTier field's value. +func (s *AwsRdsDbInstanceDetails) SetPromotionTier(v int64) *AwsRdsDbInstanceDetails { + s.PromotionTier = &v + return s +} + +// SetPubliclyAccessible sets the PubliclyAccessible field's value. +func (s *AwsRdsDbInstanceDetails) SetPubliclyAccessible(v bool) *AwsRdsDbInstanceDetails { + s.PubliclyAccessible = &v + return s +} + +// SetReadReplicaDBClusterIdentifiers sets the ReadReplicaDBClusterIdentifiers field's value. +func (s *AwsRdsDbInstanceDetails) SetReadReplicaDBClusterIdentifiers(v []*string) *AwsRdsDbInstanceDetails { + s.ReadReplicaDBClusterIdentifiers = v + return s +} + +// SetReadReplicaDBInstanceIdentifiers sets the ReadReplicaDBInstanceIdentifiers field's value. +func (s *AwsRdsDbInstanceDetails) SetReadReplicaDBInstanceIdentifiers(v []*string) *AwsRdsDbInstanceDetails { + s.ReadReplicaDBInstanceIdentifiers = v + return s +} + +// SetReadReplicaSourceDBInstanceIdentifier sets the ReadReplicaSourceDBInstanceIdentifier field's value. +func (s *AwsRdsDbInstanceDetails) SetReadReplicaSourceDBInstanceIdentifier(v string) *AwsRdsDbInstanceDetails { + s.ReadReplicaSourceDBInstanceIdentifier = &v + return s +} + +// SetSecondaryAvailabilityZone sets the SecondaryAvailabilityZone field's value. +func (s *AwsRdsDbInstanceDetails) SetSecondaryAvailabilityZone(v string) *AwsRdsDbInstanceDetails { + s.SecondaryAvailabilityZone = &v + return s +} + +// SetStatusInfos sets the StatusInfos field's value. +func (s *AwsRdsDbInstanceDetails) SetStatusInfos(v []*AwsRdsDbStatusInfo) *AwsRdsDbInstanceDetails { + s.StatusInfos = v + return s +} + +// SetStorageEncrypted sets the StorageEncrypted field's value. +func (s *AwsRdsDbInstanceDetails) SetStorageEncrypted(v bool) *AwsRdsDbInstanceDetails { + s.StorageEncrypted = &v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *AwsRdsDbInstanceDetails) SetStorageType(v string) *AwsRdsDbInstanceDetails { + s.StorageType = &v + return s +} + +// SetTdeCredentialArn sets the TdeCredentialArn field's value. +func (s *AwsRdsDbInstanceDetails) SetTdeCredentialArn(v string) *AwsRdsDbInstanceDetails { + s.TdeCredentialArn = &v + return s +} + +// SetTimezone sets the Timezone field's value. +func (s *AwsRdsDbInstanceDetails) SetTimezone(v string) *AwsRdsDbInstanceDetails { + s.Timezone = &v + return s +} + +// SetVpcSecurityGroups sets the VpcSecurityGroups field's value. +func (s *AwsRdsDbInstanceDetails) SetVpcSecurityGroups(v []*AwsRdsDbInstanceVpcSecurityGroup) *AwsRdsDbInstanceDetails { + s.VpcSecurityGroups = v + return s +} + +// Specifies the connection endpoint. +type AwsRdsDbInstanceEndpoint struct { + _ struct{} `type:"structure"` + + // Specifies the DNS address of the DB instance. + Address *string `type:"string"` + + // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. + HostedZoneId *string `type:"string"` + + // Specifies the port that the database engine is listening on. + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsRdsDbInstanceEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbInstanceEndpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *AwsRdsDbInstanceEndpoint) SetAddress(v string) *AwsRdsDbInstanceEndpoint { + s.Address = &v + return s +} + +// SetHostedZoneId sets the HostedZoneId field's value. +func (s *AwsRdsDbInstanceEndpoint) SetHostedZoneId(v string) *AwsRdsDbInstanceEndpoint { + s.HostedZoneId = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AwsRdsDbInstanceEndpoint) SetPort(v int64) *AwsRdsDbInstanceEndpoint { + s.Port = &v + return s +} + +// A VPC security groups that the DB instance belongs to. +type AwsRdsDbInstanceVpcSecurityGroup struct { + _ struct{} `type:"structure"` + + // The status of the VPC security group. + Status *string `type:"string"` + + // The name of the VPC security group. + VpcSecurityGroupId *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbInstanceVpcSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbInstanceVpcSecurityGroup) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbInstanceVpcSecurityGroup) SetStatus(v string) *AwsRdsDbInstanceVpcSecurityGroup { + s.Status = &v + return s +} + +// SetVpcSecurityGroupId sets the VpcSecurityGroupId field's value. +func (s *AwsRdsDbInstanceVpcSecurityGroup) SetVpcSecurityGroupId(v string) *AwsRdsDbInstanceVpcSecurityGroup { + s.VpcSecurityGroupId = &v + return s +} + +type AwsRdsDbOptionGroupMembership struct { + _ struct{} `type:"structure"` + + OptionGroupName *string `type:"string"` + + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbOptionGroupMembership) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbOptionGroupMembership) GoString() string { + return s.String() +} + +// SetOptionGroupName sets the OptionGroupName field's value. +func (s *AwsRdsDbOptionGroupMembership) SetOptionGroupName(v string) *AwsRdsDbOptionGroupMembership { + s.OptionGroupName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbOptionGroupMembership) SetStatus(v string) *AwsRdsDbOptionGroupMembership { + s.Status = &v + return s +} + +type AwsRdsDbParameterGroup struct { + _ struct{} `type:"structure"` + + DbParameterGroupName *string `type:"string"` + + ParameterApplyStatus *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbParameterGroup) String() string { + return awsutil.Prettify(s) +} - // The ID of the Amazon Route 53 hosted zone associated with the load balancer. - CanonicalHostedZoneId *string `type:"string"` +// GoString returns the string representation +func (s AwsRdsDbParameterGroup) GoString() string { + return s.String() +} - // The date and time the load balancer was created. - CreatedTime *string `type:"string"` +// SetDbParameterGroupName sets the DbParameterGroupName field's value. +func (s *AwsRdsDbParameterGroup) SetDbParameterGroupName(v string) *AwsRdsDbParameterGroup { + s.DbParameterGroupName = &v + return s +} - // The public DNS name of the load balancer. - DNSName *string `type:"string"` +// SetParameterApplyStatus sets the ParameterApplyStatus field's value. +func (s *AwsRdsDbParameterGroup) SetParameterApplyStatus(v string) *AwsRdsDbParameterGroup { + s.ParameterApplyStatus = &v + return s +} + +type AwsRdsDbPendingModifiedValues struct { + _ struct{} `type:"structure"` + + AllocatedStorage *int64 `type:"integer"` + + BackupRetentionPeriod *int64 `type:"integer"` + + CaCertificateIdentifier *string `type:"string"` + + DbInstanceClass *string `type:"string"` + + DbInstanceIdentifier *string `type:"string"` + + DbSubnetGroupName *string `type:"string"` + + EngineVersion *string `type:"string"` + + Iops *int64 `type:"integer"` + + LicenseModel *string `type:"string"` + + MasterUserPassword *string `type:"string"` + + MultiAZ *bool `type:"boolean"` + + // Identifies the log types to enable and disable. + PendingCloudWatchLogsExports *AwsRdsPendingCloudWatchLogsExports `type:"structure"` + + Port *int64 `type:"integer"` + + ProcessorFeatures []*AwsRdsDbProcessorFeature `type:"list"` + + StorageType *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbPendingModifiedValues) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbPendingModifiedValues) GoString() string { + return s.String() +} + +// SetAllocatedStorage sets the AllocatedStorage field's value. +func (s *AwsRdsDbPendingModifiedValues) SetAllocatedStorage(v int64) *AwsRdsDbPendingModifiedValues { + s.AllocatedStorage = &v + return s +} + +// SetBackupRetentionPeriod sets the BackupRetentionPeriod field's value. +func (s *AwsRdsDbPendingModifiedValues) SetBackupRetentionPeriod(v int64) *AwsRdsDbPendingModifiedValues { + s.BackupRetentionPeriod = &v + return s +} + +// SetCaCertificateIdentifier sets the CaCertificateIdentifier field's value. +func (s *AwsRdsDbPendingModifiedValues) SetCaCertificateIdentifier(v string) *AwsRdsDbPendingModifiedValues { + s.CaCertificateIdentifier = &v + return s +} + +// SetDbInstanceClass sets the DbInstanceClass field's value. +func (s *AwsRdsDbPendingModifiedValues) SetDbInstanceClass(v string) *AwsRdsDbPendingModifiedValues { + s.DbInstanceClass = &v + return s +} + +// SetDbInstanceIdentifier sets the DbInstanceIdentifier field's value. +func (s *AwsRdsDbPendingModifiedValues) SetDbInstanceIdentifier(v string) *AwsRdsDbPendingModifiedValues { + s.DbInstanceIdentifier = &v + return s +} + +// SetDbSubnetGroupName sets the DbSubnetGroupName field's value. +func (s *AwsRdsDbPendingModifiedValues) SetDbSubnetGroupName(v string) *AwsRdsDbPendingModifiedValues { + s.DbSubnetGroupName = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *AwsRdsDbPendingModifiedValues) SetEngineVersion(v string) *AwsRdsDbPendingModifiedValues { + s.EngineVersion = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *AwsRdsDbPendingModifiedValues) SetIops(v int64) *AwsRdsDbPendingModifiedValues { + s.Iops = &v + return s +} + +// SetLicenseModel sets the LicenseModel field's value. +func (s *AwsRdsDbPendingModifiedValues) SetLicenseModel(v string) *AwsRdsDbPendingModifiedValues { + s.LicenseModel = &v + return s +} + +// SetMasterUserPassword sets the MasterUserPassword field's value. +func (s *AwsRdsDbPendingModifiedValues) SetMasterUserPassword(v string) *AwsRdsDbPendingModifiedValues { + s.MasterUserPassword = &v + return s +} + +// SetMultiAZ sets the MultiAZ field's value. +func (s *AwsRdsDbPendingModifiedValues) SetMultiAZ(v bool) *AwsRdsDbPendingModifiedValues { + s.MultiAZ = &v + return s +} + +// SetPendingCloudWatchLogsExports sets the PendingCloudWatchLogsExports field's value. +func (s *AwsRdsDbPendingModifiedValues) SetPendingCloudWatchLogsExports(v *AwsRdsPendingCloudWatchLogsExports) *AwsRdsDbPendingModifiedValues { + s.PendingCloudWatchLogsExports = v + return s +} + +// SetPort sets the Port field's value. +func (s *AwsRdsDbPendingModifiedValues) SetPort(v int64) *AwsRdsDbPendingModifiedValues { + s.Port = &v + return s +} + +// SetProcessorFeatures sets the ProcessorFeatures field's value. +func (s *AwsRdsDbPendingModifiedValues) SetProcessorFeatures(v []*AwsRdsDbProcessorFeature) *AwsRdsDbPendingModifiedValues { + s.ProcessorFeatures = v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *AwsRdsDbPendingModifiedValues) SetStorageType(v string) *AwsRdsDbPendingModifiedValues { + s.StorageType = &v + return s +} + +type AwsRdsDbProcessorFeature struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbProcessorFeature) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbProcessorFeature) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AwsRdsDbProcessorFeature) SetName(v string) *AwsRdsDbProcessorFeature { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AwsRdsDbProcessorFeature) SetValue(v string) *AwsRdsDbProcessorFeature { + s.Value = &v + return s +} + +type AwsRdsDbSnapshotDetails struct { + _ struct{} `type:"structure"` + + AllocatedStorage *int64 `type:"integer"` + + AvailabilityZone *string `type:"string"` + + DbInstanceIdentifier *string `type:"string"` + + DbSnapshotIdentifier *string `type:"string"` + + DbiResourceId *string `type:"string"` + + Encrypted *bool `type:"boolean"` + + Engine *string `type:"string"` + + EngineVersion *string `type:"string"` + + IamDatabaseAuthenticationEnabled *bool `type:"boolean"` + + InstanceCreateTime *string `type:"string"` + + Iops *int64 `type:"integer"` + + KmsKeyId *string `type:"string"` + + LicenseModel *string `type:"string"` + + MasterUsername *string `type:"string"` + + OptionGroupName *string `type:"string"` + + PercentProgress *int64 `type:"integer"` + + Port *int64 `type:"integer"` + + ProcessorFeatures []*AwsRdsDbProcessorFeature `type:"list"` + + SnapshotCreateTime *string `type:"string"` + + SnapshotType *string `type:"string"` + + SourceDbSnapshotIdentifier *string `type:"string"` + + SourceRegion *string `type:"string"` + + Status *string `type:"string"` + + StorageType *string `type:"string"` + + TdeCredentialArn *string `type:"string"` + + Timezone *string `type:"string"` + + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbSnapshotDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbSnapshotDetails) GoString() string { + return s.String() +} + +// SetAllocatedStorage sets the AllocatedStorage field's value. +func (s *AwsRdsDbSnapshotDetails) SetAllocatedStorage(v int64) *AwsRdsDbSnapshotDetails { + s.AllocatedStorage = &v + return s +} + +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *AwsRdsDbSnapshotDetails) SetAvailabilityZone(v string) *AwsRdsDbSnapshotDetails { + s.AvailabilityZone = &v + return s +} + +// SetDbInstanceIdentifier sets the DbInstanceIdentifier field's value. +func (s *AwsRdsDbSnapshotDetails) SetDbInstanceIdentifier(v string) *AwsRdsDbSnapshotDetails { + s.DbInstanceIdentifier = &v + return s +} + +// SetDbSnapshotIdentifier sets the DbSnapshotIdentifier field's value. +func (s *AwsRdsDbSnapshotDetails) SetDbSnapshotIdentifier(v string) *AwsRdsDbSnapshotDetails { + s.DbSnapshotIdentifier = &v + return s +} + +// SetDbiResourceId sets the DbiResourceId field's value. +func (s *AwsRdsDbSnapshotDetails) SetDbiResourceId(v string) *AwsRdsDbSnapshotDetails { + s.DbiResourceId = &v + return s +} + +// SetEncrypted sets the Encrypted field's value. +func (s *AwsRdsDbSnapshotDetails) SetEncrypted(v bool) *AwsRdsDbSnapshotDetails { + s.Encrypted = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *AwsRdsDbSnapshotDetails) SetEngine(v string) *AwsRdsDbSnapshotDetails { + s.Engine = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *AwsRdsDbSnapshotDetails) SetEngineVersion(v string) *AwsRdsDbSnapshotDetails { + s.EngineVersion = &v + return s +} + +// SetIamDatabaseAuthenticationEnabled sets the IamDatabaseAuthenticationEnabled field's value. +func (s *AwsRdsDbSnapshotDetails) SetIamDatabaseAuthenticationEnabled(v bool) *AwsRdsDbSnapshotDetails { + s.IamDatabaseAuthenticationEnabled = &v + return s +} + +// SetInstanceCreateTime sets the InstanceCreateTime field's value. +func (s *AwsRdsDbSnapshotDetails) SetInstanceCreateTime(v string) *AwsRdsDbSnapshotDetails { + s.InstanceCreateTime = &v + return s +} + +// SetIops sets the Iops field's value. +func (s *AwsRdsDbSnapshotDetails) SetIops(v int64) *AwsRdsDbSnapshotDetails { + s.Iops = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsRdsDbSnapshotDetails) SetKmsKeyId(v string) *AwsRdsDbSnapshotDetails { + s.KmsKeyId = &v + return s +} + +// SetLicenseModel sets the LicenseModel field's value. +func (s *AwsRdsDbSnapshotDetails) SetLicenseModel(v string) *AwsRdsDbSnapshotDetails { + s.LicenseModel = &v + return s +} + +// SetMasterUsername sets the MasterUsername field's value. +func (s *AwsRdsDbSnapshotDetails) SetMasterUsername(v string) *AwsRdsDbSnapshotDetails { + s.MasterUsername = &v + return s +} + +// SetOptionGroupName sets the OptionGroupName field's value. +func (s *AwsRdsDbSnapshotDetails) SetOptionGroupName(v string) *AwsRdsDbSnapshotDetails { + s.OptionGroupName = &v + return s +} + +// SetPercentProgress sets the PercentProgress field's value. +func (s *AwsRdsDbSnapshotDetails) SetPercentProgress(v int64) *AwsRdsDbSnapshotDetails { + s.PercentProgress = &v + return s +} + +// SetPort sets the Port field's value. +func (s *AwsRdsDbSnapshotDetails) SetPort(v int64) *AwsRdsDbSnapshotDetails { + s.Port = &v + return s +} + +// SetProcessorFeatures sets the ProcessorFeatures field's value. +func (s *AwsRdsDbSnapshotDetails) SetProcessorFeatures(v []*AwsRdsDbProcessorFeature) *AwsRdsDbSnapshotDetails { + s.ProcessorFeatures = v + return s +} + +// SetSnapshotCreateTime sets the SnapshotCreateTime field's value. +func (s *AwsRdsDbSnapshotDetails) SetSnapshotCreateTime(v string) *AwsRdsDbSnapshotDetails { + s.SnapshotCreateTime = &v + return s +} + +// SetSnapshotType sets the SnapshotType field's value. +func (s *AwsRdsDbSnapshotDetails) SetSnapshotType(v string) *AwsRdsDbSnapshotDetails { + s.SnapshotType = &v + return s +} + +// SetSourceDbSnapshotIdentifier sets the SourceDbSnapshotIdentifier field's value. +func (s *AwsRdsDbSnapshotDetails) SetSourceDbSnapshotIdentifier(v string) *AwsRdsDbSnapshotDetails { + s.SourceDbSnapshotIdentifier = &v + return s +} + +// SetSourceRegion sets the SourceRegion field's value. +func (s *AwsRdsDbSnapshotDetails) SetSourceRegion(v string) *AwsRdsDbSnapshotDetails { + s.SourceRegion = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRdsDbSnapshotDetails) SetStatus(v string) *AwsRdsDbSnapshotDetails { + s.Status = &v + return s +} + +// SetStorageType sets the StorageType field's value. +func (s *AwsRdsDbSnapshotDetails) SetStorageType(v string) *AwsRdsDbSnapshotDetails { + s.StorageType = &v + return s +} + +// SetTdeCredentialArn sets the TdeCredentialArn field's value. +func (s *AwsRdsDbSnapshotDetails) SetTdeCredentialArn(v string) *AwsRdsDbSnapshotDetails { + s.TdeCredentialArn = &v + return s +} + +// SetTimezone sets the Timezone field's value. +func (s *AwsRdsDbSnapshotDetails) SetTimezone(v string) *AwsRdsDbSnapshotDetails { + s.Timezone = &v + return s +} - // The type of IP addresses used by the subnets for your load balancer. The - // possible values are ipv4 (for IPv4 addresses) and dualstack (for IPv4 and - // IPv6 addresses). - IpAddressType *string `type:"string"` +// SetVpcId sets the VpcId field's value. +func (s *AwsRdsDbSnapshotDetails) SetVpcId(v string) *AwsRdsDbSnapshotDetails { + s.VpcId = &v + return s +} - // The nodes of an Internet-facing load balancer have public IP addresses. - Scheme *string `type:"string"` +// Information about the status of a read replica. +type AwsRdsDbStatusInfo struct { + _ struct{} `type:"structure"` - // The IDs of the security groups for the load balancer. - SecurityGroups []*string `type:"list"` + // If the read replica is currently in an error state, provides the error details. + Message *string `type:"string"` - // The state of the load balancer. - State *LoadBalancerState `type:"structure"` + // Whether the read replica instance is operating normally. + Normal *bool `type:"boolean"` - // The type of load balancer. - Type *string `type:"string"` + // The status of the read replica instance. + Status *string `type:"string"` - // The ID of the VPC for the load balancer. - VpcId *string `type:"string"` + // The type of status. For a read replica, the status type is read replication. + StatusType *string `type:"string"` } // String returns the string representation -func (s AwsElbv2LoadBalancerDetails) String() string { +func (s AwsRdsDbStatusInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsElbv2LoadBalancerDetails) GoString() string { +func (s AwsRdsDbStatusInfo) GoString() string { return s.String() } -// SetAvailabilityZones sets the AvailabilityZones field's value. -func (s *AwsElbv2LoadBalancerDetails) SetAvailabilityZones(v []*AvailabilityZone) *AwsElbv2LoadBalancerDetails { - s.AvailabilityZones = v +// SetMessage sets the Message field's value. +func (s *AwsRdsDbStatusInfo) SetMessage(v string) *AwsRdsDbStatusInfo { + s.Message = &v return s } -// SetCanonicalHostedZoneId sets the CanonicalHostedZoneId field's value. -func (s *AwsElbv2LoadBalancerDetails) SetCanonicalHostedZoneId(v string) *AwsElbv2LoadBalancerDetails { - s.CanonicalHostedZoneId = &v +// SetNormal sets the Normal field's value. +func (s *AwsRdsDbStatusInfo) SetNormal(v bool) *AwsRdsDbStatusInfo { + s.Normal = &v return s } -// SetCreatedTime sets the CreatedTime field's value. -func (s *AwsElbv2LoadBalancerDetails) SetCreatedTime(v string) *AwsElbv2LoadBalancerDetails { - s.CreatedTime = &v +// SetStatus sets the Status field's value. +func (s *AwsRdsDbStatusInfo) SetStatus(v string) *AwsRdsDbStatusInfo { + s.Status = &v return s } -// SetDNSName sets the DNSName field's value. -func (s *AwsElbv2LoadBalancerDetails) SetDNSName(v string) *AwsElbv2LoadBalancerDetails { - s.DNSName = &v +// SetStatusType sets the StatusType field's value. +func (s *AwsRdsDbStatusInfo) SetStatusType(v string) *AwsRdsDbStatusInfo { + s.StatusType = &v return s } -// SetIpAddressType sets the IpAddressType field's value. -func (s *AwsElbv2LoadBalancerDetails) SetIpAddressType(v string) *AwsElbv2LoadBalancerDetails { - s.IpAddressType = &v +// Information about the subnet group for the database instance. +type AwsRdsDbSubnetGroup struct { + _ struct{} `type:"structure"` + + // The ARN of the subnet group. + DbSubnetGroupArn *string `type:"string"` + + // The description of the subnet group. + DbSubnetGroupDescription *string `type:"string"` + + // The name of the subnet group. + DbSubnetGroupName *string `type:"string"` + + // The status of the subnet group. + SubnetGroupStatus *string `type:"string"` + + // A list of subnets in the subnet group. + Subnets []*AwsRdsDbSubnetGroupSubnet `type:"list"` + + // The VPC ID of the subnet group. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s AwsRdsDbSubnetGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRdsDbSubnetGroup) GoString() string { + return s.String() +} + +// SetDbSubnetGroupArn sets the DbSubnetGroupArn field's value. +func (s *AwsRdsDbSubnetGroup) SetDbSubnetGroupArn(v string) *AwsRdsDbSubnetGroup { + s.DbSubnetGroupArn = &v return s } -// SetScheme sets the Scheme field's value. -func (s *AwsElbv2LoadBalancerDetails) SetScheme(v string) *AwsElbv2LoadBalancerDetails { - s.Scheme = &v +// SetDbSubnetGroupDescription sets the DbSubnetGroupDescription field's value. +func (s *AwsRdsDbSubnetGroup) SetDbSubnetGroupDescription(v string) *AwsRdsDbSubnetGroup { + s.DbSubnetGroupDescription = &v return s } -// SetSecurityGroups sets the SecurityGroups field's value. -func (s *AwsElbv2LoadBalancerDetails) SetSecurityGroups(v []*string) *AwsElbv2LoadBalancerDetails { - s.SecurityGroups = v +// SetDbSubnetGroupName sets the DbSubnetGroupName field's value. +func (s *AwsRdsDbSubnetGroup) SetDbSubnetGroupName(v string) *AwsRdsDbSubnetGroup { + s.DbSubnetGroupName = &v return s } -// SetState sets the State field's value. -func (s *AwsElbv2LoadBalancerDetails) SetState(v *LoadBalancerState) *AwsElbv2LoadBalancerDetails { - s.State = v +// SetSubnetGroupStatus sets the SubnetGroupStatus field's value. +func (s *AwsRdsDbSubnetGroup) SetSubnetGroupStatus(v string) *AwsRdsDbSubnetGroup { + s.SubnetGroupStatus = &v return s } -// SetType sets the Type field's value. -func (s *AwsElbv2LoadBalancerDetails) SetType(v string) *AwsElbv2LoadBalancerDetails { - s.Type = &v +// SetSubnets sets the Subnets field's value. +func (s *AwsRdsDbSubnetGroup) SetSubnets(v []*AwsRdsDbSubnetGroupSubnet) *AwsRdsDbSubnetGroup { + s.Subnets = v return s } // SetVpcId sets the VpcId field's value. -func (s *AwsElbv2LoadBalancerDetails) SetVpcId(v string) *AwsElbv2LoadBalancerDetails { +func (s *AwsRdsDbSubnetGroup) SetVpcId(v string) *AwsRdsDbSubnetGroup { s.VpcId = &v return s } -// IAM access key details related to a finding. -type AwsIamAccessKeyDetails struct { +// Information about a subnet in a subnet group. +type AwsRdsDbSubnetGroupSubnet struct { _ struct{} `type:"structure"` - // The creation date/time of the IAM access key related to a finding. - CreatedAt *string `type:"string"` - - // The ID of the principal associated with an access key. - PrincipalId *string `type:"string"` - - // The name of the principal. - PrincipalName *string `type:"string"` - - // The type of principal associated with an access key. - PrincipalType *string `type:"string"` + // Information about the Availability Zone for a subnet in the subnet group. + SubnetAvailabilityZone *AwsRdsDbSubnetGroupSubnetAvailabilityZone `type:"structure"` - // The status of the IAM access key related to a finding. - Status *string `type:"string" enum:"AwsIamAccessKeyStatus"` + // The identifier of a subnet in the subnet group. + SubnetIdentifier *string `type:"string"` - // The user associated with the IAM access key related to a finding. - // - // The UserName parameter has been replaced with the PrincipalName parameter - // because access keys can also be assigned to principals that are not IAM users. - // - // Deprecated: This field is deprecated, use PrincipalName instead. - UserName *string `deprecated:"true" type:"string"` + // The status of a subnet in the subnet group. + SubnetStatus *string `type:"string"` } // String returns the string representation -func (s AwsIamAccessKeyDetails) String() string { +func (s AwsRdsDbSubnetGroupSubnet) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsIamAccessKeyDetails) GoString() string { +func (s AwsRdsDbSubnetGroupSubnet) GoString() string { return s.String() } -// SetCreatedAt sets the CreatedAt field's value. -func (s *AwsIamAccessKeyDetails) SetCreatedAt(v string) *AwsIamAccessKeyDetails { - s.CreatedAt = &v +// SetSubnetAvailabilityZone sets the SubnetAvailabilityZone field's value. +func (s *AwsRdsDbSubnetGroupSubnet) SetSubnetAvailabilityZone(v *AwsRdsDbSubnetGroupSubnetAvailabilityZone) *AwsRdsDbSubnetGroupSubnet { + s.SubnetAvailabilityZone = v return s } -// SetPrincipalId sets the PrincipalId field's value. -func (s *AwsIamAccessKeyDetails) SetPrincipalId(v string) *AwsIamAccessKeyDetails { - s.PrincipalId = &v +// SetSubnetIdentifier sets the SubnetIdentifier field's value. +func (s *AwsRdsDbSubnetGroupSubnet) SetSubnetIdentifier(v string) *AwsRdsDbSubnetGroupSubnet { + s.SubnetIdentifier = &v return s } -// SetPrincipalName sets the PrincipalName field's value. -func (s *AwsIamAccessKeyDetails) SetPrincipalName(v string) *AwsIamAccessKeyDetails { - s.PrincipalName = &v +// SetSubnetStatus sets the SubnetStatus field's value. +func (s *AwsRdsDbSubnetGroupSubnet) SetSubnetStatus(v string) *AwsRdsDbSubnetGroupSubnet { + s.SubnetStatus = &v return s } -// SetPrincipalType sets the PrincipalType field's value. -func (s *AwsIamAccessKeyDetails) SetPrincipalType(v string) *AwsIamAccessKeyDetails { - s.PrincipalType = &v - return s +// An Availability Zone for a subnet in a subnet group. +type AwsRdsDbSubnetGroupSubnetAvailabilityZone struct { + _ struct{} `type:"structure"` + + // The name of the Availability Zone for a subnet in the subnet group. + Name *string `type:"string"` } -// SetStatus sets the Status field's value. -func (s *AwsIamAccessKeyDetails) SetStatus(v string) *AwsIamAccessKeyDetails { - s.Status = &v - return s +// String returns the string representation +func (s AwsRdsDbSubnetGroupSubnetAvailabilityZone) String() string { + return awsutil.Prettify(s) } -// SetUserName sets the UserName field's value. -func (s *AwsIamAccessKeyDetails) SetUserName(v string) *AwsIamAccessKeyDetails { - s.UserName = &v +// GoString returns the string representation +func (s AwsRdsDbSubnetGroupSubnetAvailabilityZone) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *AwsRdsDbSubnetGroupSubnetAvailabilityZone) SetName(v string) *AwsRdsDbSubnetGroupSubnetAvailabilityZone { + s.Name = &v return s } -// Contains information about an IAM role, including all of the role's policies. -type AwsIamRoleDetails struct { +// Identifies the log types to enable and disable. +type AwsRdsPendingCloudWatchLogsExports struct { _ struct{} `type:"structure"` - // The trust policy that grants permission to assume the role. - AssumeRolePolicyDocument *string `min:"1" type:"string"` + // A list of log types that are being disabled. + LogTypesToDisable []*string `type:"list"` - // The date and time, in ISO 8601 date-time format, when the role was created. - CreateDate *string `type:"string"` + // A list of log types that are being enabled. + LogTypesToEnable []*string `type:"list"` +} - // The maximum session duration (in seconds) that you want to set for the specified - // role. - MaxSessionDuration *int64 `type:"integer"` +// String returns the string representation +func (s AwsRdsPendingCloudWatchLogsExports) String() string { + return awsutil.Prettify(s) +} - // The path to the role. - Path *string `type:"string"` +// GoString returns the string representation +func (s AwsRdsPendingCloudWatchLogsExports) GoString() string { + return s.String() +} - // The stable and unique string identifying the role. - RoleId *string `type:"string"` +// SetLogTypesToDisable sets the LogTypesToDisable field's value. +func (s *AwsRdsPendingCloudWatchLogsExports) SetLogTypesToDisable(v []*string) *AwsRdsPendingCloudWatchLogsExports { + s.LogTypesToDisable = v + return s +} - // The friendly name that identifies the role. - RoleName *string `type:"string"` +// SetLogTypesToEnable sets the LogTypesToEnable field's value. +func (s *AwsRdsPendingCloudWatchLogsExports) SetLogTypesToEnable(v []*string) *AwsRdsPendingCloudWatchLogsExports { + s.LogTypesToEnable = v + return s +} + +// A node in an Amazon Redshift cluster. +type AwsRedshiftClusterClusterNode struct { + _ struct{} `type:"structure"` + + // The role of the node. A node might be a leader node or a compute node. + NodeRole *string `type:"string"` + + // The private IP address of the node. + PrivateIpAddress *string `type:"string"` + + // The public IP address of the node. + PublicIpAddress *string `type:"string"` } // String returns the string representation -func (s AwsIamRoleDetails) String() string { +func (s AwsRedshiftClusterClusterNode) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsIamRoleDetails) GoString() string { +func (s AwsRedshiftClusterClusterNode) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *AwsIamRoleDetails) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "AwsIamRoleDetails"} - if s.AssumeRolePolicyDocument != nil && len(*s.AssumeRolePolicyDocument) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AssumeRolePolicyDocument", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetNodeRole sets the NodeRole field's value. +func (s *AwsRedshiftClusterClusterNode) SetNodeRole(v string) *AwsRedshiftClusterClusterNode { + s.NodeRole = &v + return s } -// SetAssumeRolePolicyDocument sets the AssumeRolePolicyDocument field's value. -func (s *AwsIamRoleDetails) SetAssumeRolePolicyDocument(v string) *AwsIamRoleDetails { - s.AssumeRolePolicyDocument = &v +// SetPrivateIpAddress sets the PrivateIpAddress field's value. +func (s *AwsRedshiftClusterClusterNode) SetPrivateIpAddress(v string) *AwsRedshiftClusterClusterNode { + s.PrivateIpAddress = &v return s } -// SetCreateDate sets the CreateDate field's value. -func (s *AwsIamRoleDetails) SetCreateDate(v string) *AwsIamRoleDetails { - s.CreateDate = &v +// SetPublicIpAddress sets the PublicIpAddress field's value. +func (s *AwsRedshiftClusterClusterNode) SetPublicIpAddress(v string) *AwsRedshiftClusterClusterNode { + s.PublicIpAddress = &v return s } -// SetMaxSessionDuration sets the MaxSessionDuration field's value. -func (s *AwsIamRoleDetails) SetMaxSessionDuration(v int64) *AwsIamRoleDetails { - s.MaxSessionDuration = &v - return s +// A cluster parameter group that is associated with an Amazon Redshift cluster. +type AwsRedshiftClusterClusterParameterGroup struct { + _ struct{} `type:"structure"` + + // The list of parameter statuses. + ClusterParameterStatusList []*AwsRedshiftClusterClusterParameterStatus `type:"list"` + + // The status of updates to the parameters. + ParameterApplyStatus *string `type:"string"` + + // The name of the parameter group. + ParameterGroupName *string `type:"string"` } -// SetPath sets the Path field's value. -func (s *AwsIamRoleDetails) SetPath(v string) *AwsIamRoleDetails { - s.Path = &v +// String returns the string representation +func (s AwsRedshiftClusterClusterParameterGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRedshiftClusterClusterParameterGroup) GoString() string { + return s.String() +} + +// SetClusterParameterStatusList sets the ClusterParameterStatusList field's value. +func (s *AwsRedshiftClusterClusterParameterGroup) SetClusterParameterStatusList(v []*AwsRedshiftClusterClusterParameterStatus) *AwsRedshiftClusterClusterParameterGroup { + s.ClusterParameterStatusList = v return s } -// SetRoleId sets the RoleId field's value. -func (s *AwsIamRoleDetails) SetRoleId(v string) *AwsIamRoleDetails { - s.RoleId = &v +// SetParameterApplyStatus sets the ParameterApplyStatus field's value. +func (s *AwsRedshiftClusterClusterParameterGroup) SetParameterApplyStatus(v string) *AwsRedshiftClusterClusterParameterGroup { + s.ParameterApplyStatus = &v return s } -// SetRoleName sets the RoleName field's value. -func (s *AwsIamRoleDetails) SetRoleName(v string) *AwsIamRoleDetails { - s.RoleName = &v +// SetParameterGroupName sets the ParameterGroupName field's value. +func (s *AwsRedshiftClusterClusterParameterGroup) SetParameterGroupName(v string) *AwsRedshiftClusterClusterParameterGroup { + s.ParameterGroupName = &v return s } -// Contains metadata about a customer master key (CMK). -type AwsKmsKeyDetails struct { +// The status of a parameter in a cluster parameter group for an Amazon Redshift +// cluster. +type AwsRedshiftClusterClusterParameterStatus struct { _ struct{} `type:"structure"` - // The twelve-digit account ID of the AWS account that owns the CMK. - AWSAccountId *string `type:"string"` - - // The date and time when the CMK was created. - CreationDate *float64 `type:"double"` - - // The globally unique identifier for the CMK. - KeyId *string `type:"string"` - - // The manager of the CMK. CMKs in your AWS account are either customer managed - // or AWS managed. - KeyManager *string `type:"string"` - - // The state of the CMK. - KeyState *string `type:"string"` + // The error that prevented the parameter from being applied to the database. + ParameterApplyErrorDescription *string `type:"string"` - // The source of the CMK's key material. + // The status of the parameter. Indicates whether the parameter is in sync with + // the database, waiting for a cluster reboot, or encountered an error when + // it was applied. // - // When this value is AWS_KMS, AWS KMS created the key material. - // - // When this value is EXTERNAL, the key material was imported from your existing - // key management infrastructure or the CMK lacks key material. - // - // When this value is AWS_CLOUDHSM, the key material was created in the AWS - // CloudHSM cluster associated with a custom key store. - Origin *string `type:"string"` + // Valid values: in-sync | pending-reboot | applying | invalid-parameter | apply-deferred + // | apply-error | unknown-error + ParameterApplyStatus *string `type:"string"` + + // The name of the parameter. + ParameterName *string `type:"string"` } // String returns the string representation -func (s AwsKmsKeyDetails) String() string { +func (s AwsRedshiftClusterClusterParameterStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsKmsKeyDetails) GoString() string { +func (s AwsRedshiftClusterClusterParameterStatus) GoString() string { return s.String() } -// SetAWSAccountId sets the AWSAccountId field's value. -func (s *AwsKmsKeyDetails) SetAWSAccountId(v string) *AwsKmsKeyDetails { - s.AWSAccountId = &v +// SetParameterApplyErrorDescription sets the ParameterApplyErrorDescription field's value. +func (s *AwsRedshiftClusterClusterParameterStatus) SetParameterApplyErrorDescription(v string) *AwsRedshiftClusterClusterParameterStatus { + s.ParameterApplyErrorDescription = &v return s } -// SetCreationDate sets the CreationDate field's value. -func (s *AwsKmsKeyDetails) SetCreationDate(v float64) *AwsKmsKeyDetails { - s.CreationDate = &v +// SetParameterApplyStatus sets the ParameterApplyStatus field's value. +func (s *AwsRedshiftClusterClusterParameterStatus) SetParameterApplyStatus(v string) *AwsRedshiftClusterClusterParameterStatus { + s.ParameterApplyStatus = &v return s } -// SetKeyId sets the KeyId field's value. -func (s *AwsKmsKeyDetails) SetKeyId(v string) *AwsKmsKeyDetails { - s.KeyId = &v +// SetParameterName sets the ParameterName field's value. +func (s *AwsRedshiftClusterClusterParameterStatus) SetParameterName(v string) *AwsRedshiftClusterClusterParameterStatus { + s.ParameterName = &v return s } -// SetKeyManager sets the KeyManager field's value. -func (s *AwsKmsKeyDetails) SetKeyManager(v string) *AwsKmsKeyDetails { - s.KeyManager = &v - return s +// A security group that is associated with the cluster. +type AwsRedshiftClusterClusterSecurityGroup struct { + _ struct{} `type:"structure"` + + // The name of the cluster security group. + ClusterSecurityGroupName *string `type:"string"` + + // The status of the cluster security group. + Status *string `type:"string"` +} + +// String returns the string representation +func (s AwsRedshiftClusterClusterSecurityGroup) String() string { + return awsutil.Prettify(s) } -// SetKeyState sets the KeyState field's value. -func (s *AwsKmsKeyDetails) SetKeyState(v string) *AwsKmsKeyDetails { - s.KeyState = &v +// GoString returns the string representation +func (s AwsRedshiftClusterClusterSecurityGroup) GoString() string { + return s.String() +} + +// SetClusterSecurityGroupName sets the ClusterSecurityGroupName field's value. +func (s *AwsRedshiftClusterClusterSecurityGroup) SetClusterSecurityGroupName(v string) *AwsRedshiftClusterClusterSecurityGroup { + s.ClusterSecurityGroupName = &v return s } -// SetOrigin sets the Origin field's value. -func (s *AwsKmsKeyDetails) SetOrigin(v string) *AwsKmsKeyDetails { - s.Origin = &v +// SetStatus sets the Status field's value. +func (s *AwsRedshiftClusterClusterSecurityGroup) SetStatus(v string) *AwsRedshiftClusterClusterSecurityGroup { + s.Status = &v return s } -// The code for the Lambda function. You can specify either an object in Amazon -// S3, or upload a deployment package directly. -type AwsLambdaFunctionCode struct { +// Information about a cross-Region snapshot copy. +type AwsRedshiftClusterClusterSnapshotCopyStatus struct { _ struct{} `type:"structure"` - // An Amazon S3 bucket in the same AWS Region as your function. The bucket can - // be in a different AWS account. - S3Bucket *string `type:"string"` + // The destination Region that snapshots are automatically copied to when cross-Region + // snapshot copy is enabled. + DestinationRegion *string `type:"string"` - // The Amazon S3 key of the deployment package. - S3Key *string `type:"string"` + // The number of days that manual snapshots are retained in the destination + // region after they are copied from a source region. + // + // If the value is -1, then the manual snapshot is retained indefinitely. + // + // Valid values: Either -1 or an integer between 1 and 3,653 + ManualSnapshotRetentionPeriod *int64 `type:"integer"` - // For versioned objects, the version of the deployment package object to use. - S3ObjectVersion *string `type:"string"` + // The number of days to retain automated snapshots in the destination Region + // after they are copied from a source Region. + RetentionPeriod *int64 `type:"integer"` - // The base64-encoded contents of the deployment package. AWS SDK and AWS CLI - // clients handle the encoding for you. - ZipFile *string `type:"string"` + // The name of the snapshot copy grant. + SnapshotCopyGrantName *string `type:"string"` } // String returns the string representation -func (s AwsLambdaFunctionCode) String() string { +func (s AwsRedshiftClusterClusterSnapshotCopyStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsLambdaFunctionCode) GoString() string { +func (s AwsRedshiftClusterClusterSnapshotCopyStatus) GoString() string { return s.String() } -// SetS3Bucket sets the S3Bucket field's value. -func (s *AwsLambdaFunctionCode) SetS3Bucket(v string) *AwsLambdaFunctionCode { - s.S3Bucket = &v +// SetDestinationRegion sets the DestinationRegion field's value. +func (s *AwsRedshiftClusterClusterSnapshotCopyStatus) SetDestinationRegion(v string) *AwsRedshiftClusterClusterSnapshotCopyStatus { + s.DestinationRegion = &v return s } -// SetS3Key sets the S3Key field's value. -func (s *AwsLambdaFunctionCode) SetS3Key(v string) *AwsLambdaFunctionCode { - s.S3Key = &v +// SetManualSnapshotRetentionPeriod sets the ManualSnapshotRetentionPeriod field's value. +func (s *AwsRedshiftClusterClusterSnapshotCopyStatus) SetManualSnapshotRetentionPeriod(v int64) *AwsRedshiftClusterClusterSnapshotCopyStatus { + s.ManualSnapshotRetentionPeriod = &v return s } -// SetS3ObjectVersion sets the S3ObjectVersion field's value. -func (s *AwsLambdaFunctionCode) SetS3ObjectVersion(v string) *AwsLambdaFunctionCode { - s.S3ObjectVersion = &v +// SetRetentionPeriod sets the RetentionPeriod field's value. +func (s *AwsRedshiftClusterClusterSnapshotCopyStatus) SetRetentionPeriod(v int64) *AwsRedshiftClusterClusterSnapshotCopyStatus { + s.RetentionPeriod = &v return s } -// SetZipFile sets the ZipFile field's value. -func (s *AwsLambdaFunctionCode) SetZipFile(v string) *AwsLambdaFunctionCode { - s.ZipFile = &v +// SetSnapshotCopyGrantName sets the SnapshotCopyGrantName field's value. +func (s *AwsRedshiftClusterClusterSnapshotCopyStatus) SetSnapshotCopyGrantName(v string) *AwsRedshiftClusterClusterSnapshotCopyStatus { + s.SnapshotCopyGrantName = &v return s } -// The dead-letter queue for failed asynchronous invocations. -type AwsLambdaFunctionDeadLetterConfig struct { +// A time windows during which maintenance was deferred for an Amazon Redshift +// cluster. +type AwsRedshiftClusterDeferredMaintenanceWindow struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of an Amazon SQS queue or Amazon SNS topic. - TargetArn *string `type:"string"` + // The end of the time window for which maintenance was deferred. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + DeferMaintenanceEndTime *string `type:"string"` + + // The identifier of the maintenance window. + DeferMaintenanceIdentifier *string `type:"string"` + + // The start of the time window for which maintenance was deferred. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + DeferMaintenanceStartTime *string `type:"string"` } // String returns the string representation -func (s AwsLambdaFunctionDeadLetterConfig) String() string { +func (s AwsRedshiftClusterDeferredMaintenanceWindow) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsLambdaFunctionDeadLetterConfig) GoString() string { +func (s AwsRedshiftClusterDeferredMaintenanceWindow) GoString() string { return s.String() } -// SetTargetArn sets the TargetArn field's value. -func (s *AwsLambdaFunctionDeadLetterConfig) SetTargetArn(v string) *AwsLambdaFunctionDeadLetterConfig { - s.TargetArn = &v +// SetDeferMaintenanceEndTime sets the DeferMaintenanceEndTime field's value. +func (s *AwsRedshiftClusterDeferredMaintenanceWindow) SetDeferMaintenanceEndTime(v string) *AwsRedshiftClusterDeferredMaintenanceWindow { + s.DeferMaintenanceEndTime = &v return s } -// Details about a function's configuration. -type AwsLambdaFunctionDetails struct { +// SetDeferMaintenanceIdentifier sets the DeferMaintenanceIdentifier field's value. +func (s *AwsRedshiftClusterDeferredMaintenanceWindow) SetDeferMaintenanceIdentifier(v string) *AwsRedshiftClusterDeferredMaintenanceWindow { + s.DeferMaintenanceIdentifier = &v + return s +} + +// SetDeferMaintenanceStartTime sets the DeferMaintenanceStartTime field's value. +func (s *AwsRedshiftClusterDeferredMaintenanceWindow) SetDeferMaintenanceStartTime(v string) *AwsRedshiftClusterDeferredMaintenanceWindow { + s.DeferMaintenanceStartTime = &v + return s +} + +// Details about an Amazon Redshift cluster. +type AwsRedshiftClusterDetails struct { _ struct{} `type:"structure"` - // An AwsLambdaFunctionCode object. - Code *AwsLambdaFunctionCode `type:"structure"` + // Indicates whether major version upgrades are applied automatically to the + // cluster during the maintenance window. + AllowVersionUpgrade *bool `type:"boolean"` - // The SHA256 hash of the function's deployment package. - CodeSha256 *string `type:"string"` + // The number of days that automatic cluster snapshots are retained. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` - // The function's dead letter queue. - DeadLetterConfig *AwsLambdaFunctionDeadLetterConfig `type:"structure"` + // The name of the Availability Zone in which the cluster is located. + AvailabilityZone *string `type:"string"` - // The function's environment variables. - Environment *AwsLambdaFunctionEnvironment `type:"structure"` + // The availability status of the cluster for queries. Possible values are the + // following: + // + // * Available - The cluster is available for queries. + // + // * Unavailable - The cluster is not available for queries. + // + // * Maintenance - The cluster is intermittently available for queries due + // to maintenance activities. + // + // * Modifying -The cluster is intermittently available for queries due to + // changes that modify the cluster. + // + // * Failed - The cluster failed and is not available for queries. + ClusterAvailabilityStatus *string `type:"string"` - // The name of the function. - FunctionName *string `type:"string"` + // Indicates when the cluster was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + ClusterCreateTime *string `type:"string"` - // The function that Lambda calls to begin executing your function. - Handler *string `type:"string"` + // The unique identifier of the cluster. + ClusterIdentifier *string `type:"string"` - // The KMS key that's used to encrypt the function's environment variables. - // This key is only returned if you've configured a customer managed CMK. - KmsKeyArn *string `type:"string"` + // The nodes in the cluster. + ClusterNodes []*AwsRedshiftClusterClusterNode `type:"list"` - // The date and time that the function was last updated, in ISO-8601 format - // (YYYY-MM-DDThh:mm:ss.sTZD). - LastModified *string `type:"string"` + // The list of cluster parameter groups that are associated with this cluster. + ClusterParameterGroups []*AwsRedshiftClusterClusterParameterGroup `type:"list"` - // The function's layers. - Layers []*AwsLambdaFunctionLayer `type:"list"` + // The public key for the cluster. + ClusterPublicKey *string `type:"string"` - // For Lambda@Edge functions, the ARN of the master function. - MasterArn *string `type:"string"` + // The specific revision number of the database in the cluster. + ClusterRevisionNumber *string `type:"string"` - // The memory that's allocated to the function. - MemorySize *int64 `type:"integer"` + // A list of cluster security groups that are associated with the cluster. + ClusterSecurityGroups []*AwsRedshiftClusterClusterSecurityGroup `type:"list"` - // The latest updated revision of the function or alias. - RevisionId *string `type:"string"` + // Information about the destination Region and retention period for the cross-Region + // snapshot copy. + ClusterSnapshotCopyStatus *AwsRedshiftClusterClusterSnapshotCopyStatus `type:"structure"` - // The function's execution role. - Role *string `type:"string"` + // The current status of the cluster. + // + // Valid values: available | available, prep-for-resize | available, resize-cleanup + // |cancelling-resize | creating | deleting | final-snapshot | hardware-failure + // | incompatible-hsm |incompatible-network | incompatible-parameters | incompatible-restore + // | modifying | paused | rebooting | renaming | resizing | rotating-keys | + // storage-full | updating-hsm + ClusterStatus *string `type:"string"` - // The runtime environment for the Lambda function. - Runtime *string `type:"string"` + // The name of the subnet group that is associated with the cluster. This parameter + // is valid only when the cluster is in a VPC. + ClusterSubnetGroupName *string `type:"string"` - // The amount of time that Lambda allows a function to run before stopping it. - Timeout *int64 `type:"integer"` + // The version ID of the Amazon Redshift engine that runs on the cluster. + ClusterVersion *string `type:"string"` - // The function's AWS X-Ray tracing configuration. - TracingConfig *AwsLambdaFunctionTracingConfig `type:"structure"` + // The name of the initial database that was created when the cluster was created. + // + // The same name is returned for the life of the cluster. + // + // If an initial database is not specified, a database named devdev is created + // by default. + DBName *string `type:"string"` - // The version of the Lambda function. - Version *string `type:"string"` + // List of time windows during which maintenance was deferred. + DeferredMaintenanceWindows []*AwsRedshiftClusterDeferredMaintenanceWindow `type:"list"` - // The function's networking configuration. - VpcConfig *AwsLambdaFunctionVpcConfig `type:"structure"` + // Information about the status of the Elastic IP (EIP) address. + ElasticIpStatus *AwsRedshiftClusterElasticIpStatus `type:"structure"` + + // The number of nodes that you can use the elastic resize method to resize + // the cluster to. + ElasticResizeNumberOfNodeOptions *string `type:"string"` + + // Indicates whether the data in the cluster is encrypted at rest. + Encrypted *bool `type:"boolean"` + + // The connection endpoint. + Endpoint *AwsRedshiftClusterEndpoint `type:"structure"` + + // Indicates whether to create the cluster with enhanced VPC routing enabled. + EnhancedVpcRouting *bool `type:"boolean"` + + // Indicates when the next snapshot is expected to be taken. The cluster must + // have a valid snapshot schedule and have backups enabled. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + ExpectedNextSnapshotScheduleTime *string `type:"string"` + + // The status of the next expected snapshot. + // + // Valid values: OnTrack | Pending + ExpectedNextSnapshotScheduleTimeStatus *string `type:"string"` + + // Information about whether the Amazon Redshift cluster finished applying any + // changes to hardware security module (HSM) settings that were specified in + // a modify cluster command. + HsmStatus *AwsRedshiftClusterHsmStatus `type:"structure"` + + // A list of IAM roles that the cluster can use to access other AWS services. + IamRoles []*AwsRedshiftClusterIamRole `type:"list"` + + // The identifier of the AWS KMS encryption key that is used to encrypt data + // in the cluster. + KmsKeyId *string `type:"string"` + + // The name of the maintenance track for the cluster. + MaintenanceTrackName *string `type:"string"` + + // The default number of days to retain a manual snapshot. + // + // If the value is -1, the snapshot is retained indefinitely. + // + // This setting doesn't change the retention period of existing snapshots. + // + // Valid values: Either -1 or an integer between 1 and 3,653 + ManualSnapshotRetentionPeriod *int64 `type:"integer"` + + // The master user name for the cluster. This name is used to connect to the + // database that is specified in as the value of DBName. + MasterUsername *string `type:"string"` + + // Indicates the start of the next maintenance window. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + NextMaintenanceWindowStartTime *string `type:"string"` + + // The node type for the nodes in the cluster. + NodeType *string `type:"string"` + + // The number of compute nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // A list of cluster operations that are waiting to start. + PendingActions []*string `type:"list"` + + // A list of changes to the cluster that are currently pending. + PendingModifiedValues *AwsRedshiftClusterPendingModifiedValues `type:"structure"` + + // The weekly time range, in Universal Coordinated Time (UTC), during which + // system maintenance can occur. + // + // Format: :HH:MM-:HH:MM + // + // For the day values, use mon | tue | wed | thu | fri | sat | sun + // + // For example, sun:09:32-sun:10:02 + PreferredMaintenanceWindow *string `type:"string"` + + // Whether the cluster can be accessed from a public network. + PubliclyAccessible *bool `type:"boolean"` + + // Information about the resize operation for the cluster. + ResizeInfo *AwsRedshiftClusterResizeInfo `type:"structure"` + + // Information about the status of a cluster restore action. Only applies to + // a cluster that was created by restoring a snapshot. + RestoreStatus *AwsRedshiftClusterRestoreStatus `type:"structure"` + + // A unique identifier for the cluster snapshot schedule. + SnapshotScheduleIdentifier *string `type:"string"` + + // The current state of the cluster snapshot schedule. + // + // Valid values: MODIFYING | ACTIVE | FAILED + SnapshotScheduleState *string `type:"string"` + + // The identifier of the VPC that the cluster is in, if the cluster is in a + // VPC. + VpcId *string `type:"string"` + + // The list of VPC security groups that the cluster belongs to, if the cluster + // is in a VPC. + VpcSecurityGroups []*AwsRedshiftClusterVpcSecurityGroup `type:"list"` } // String returns the string representation -func (s AwsLambdaFunctionDetails) String() string { +func (s AwsRedshiftClusterDetails) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsLambdaFunctionDetails) GoString() string { +func (s AwsRedshiftClusterDetails) GoString() string { return s.String() } -// SetCode sets the Code field's value. -func (s *AwsLambdaFunctionDetails) SetCode(v *AwsLambdaFunctionCode) *AwsLambdaFunctionDetails { - s.Code = v +// SetAllowVersionUpgrade sets the AllowVersionUpgrade field's value. +func (s *AwsRedshiftClusterDetails) SetAllowVersionUpgrade(v bool) *AwsRedshiftClusterDetails { + s.AllowVersionUpgrade = &v return s } -// SetCodeSha256 sets the CodeSha256 field's value. -func (s *AwsLambdaFunctionDetails) SetCodeSha256(v string) *AwsLambdaFunctionDetails { - s.CodeSha256 = &v +// SetAutomatedSnapshotRetentionPeriod sets the AutomatedSnapshotRetentionPeriod field's value. +func (s *AwsRedshiftClusterDetails) SetAutomatedSnapshotRetentionPeriod(v int64) *AwsRedshiftClusterDetails { + s.AutomatedSnapshotRetentionPeriod = &v return s } -// SetDeadLetterConfig sets the DeadLetterConfig field's value. -func (s *AwsLambdaFunctionDetails) SetDeadLetterConfig(v *AwsLambdaFunctionDeadLetterConfig) *AwsLambdaFunctionDetails { - s.DeadLetterConfig = v +// SetAvailabilityZone sets the AvailabilityZone field's value. +func (s *AwsRedshiftClusterDetails) SetAvailabilityZone(v string) *AwsRedshiftClusterDetails { + s.AvailabilityZone = &v return s } -// SetEnvironment sets the Environment field's value. -func (s *AwsLambdaFunctionDetails) SetEnvironment(v *AwsLambdaFunctionEnvironment) *AwsLambdaFunctionDetails { - s.Environment = v +// SetClusterAvailabilityStatus sets the ClusterAvailabilityStatus field's value. +func (s *AwsRedshiftClusterDetails) SetClusterAvailabilityStatus(v string) *AwsRedshiftClusterDetails { + s.ClusterAvailabilityStatus = &v return s } -// SetFunctionName sets the FunctionName field's value. -func (s *AwsLambdaFunctionDetails) SetFunctionName(v string) *AwsLambdaFunctionDetails { - s.FunctionName = &v +// SetClusterCreateTime sets the ClusterCreateTime field's value. +func (s *AwsRedshiftClusterDetails) SetClusterCreateTime(v string) *AwsRedshiftClusterDetails { + s.ClusterCreateTime = &v return s } -// SetHandler sets the Handler field's value. -func (s *AwsLambdaFunctionDetails) SetHandler(v string) *AwsLambdaFunctionDetails { - s.Handler = &v +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *AwsRedshiftClusterDetails) SetClusterIdentifier(v string) *AwsRedshiftClusterDetails { + s.ClusterIdentifier = &v return s } -// SetKmsKeyArn sets the KmsKeyArn field's value. -func (s *AwsLambdaFunctionDetails) SetKmsKeyArn(v string) *AwsLambdaFunctionDetails { - s.KmsKeyArn = &v +// SetClusterNodes sets the ClusterNodes field's value. +func (s *AwsRedshiftClusterDetails) SetClusterNodes(v []*AwsRedshiftClusterClusterNode) *AwsRedshiftClusterDetails { + s.ClusterNodes = v return s } -// SetLastModified sets the LastModified field's value. -func (s *AwsLambdaFunctionDetails) SetLastModified(v string) *AwsLambdaFunctionDetails { - s.LastModified = &v +// SetClusterParameterGroups sets the ClusterParameterGroups field's value. +func (s *AwsRedshiftClusterDetails) SetClusterParameterGroups(v []*AwsRedshiftClusterClusterParameterGroup) *AwsRedshiftClusterDetails { + s.ClusterParameterGroups = v return s } -// SetLayers sets the Layers field's value. -func (s *AwsLambdaFunctionDetails) SetLayers(v []*AwsLambdaFunctionLayer) *AwsLambdaFunctionDetails { - s.Layers = v +// SetClusterPublicKey sets the ClusterPublicKey field's value. +func (s *AwsRedshiftClusterDetails) SetClusterPublicKey(v string) *AwsRedshiftClusterDetails { + s.ClusterPublicKey = &v return s } -// SetMasterArn sets the MasterArn field's value. -func (s *AwsLambdaFunctionDetails) SetMasterArn(v string) *AwsLambdaFunctionDetails { - s.MasterArn = &v +// SetClusterRevisionNumber sets the ClusterRevisionNumber field's value. +func (s *AwsRedshiftClusterDetails) SetClusterRevisionNumber(v string) *AwsRedshiftClusterDetails { + s.ClusterRevisionNumber = &v return s } -// SetMemorySize sets the MemorySize field's value. -func (s *AwsLambdaFunctionDetails) SetMemorySize(v int64) *AwsLambdaFunctionDetails { - s.MemorySize = &v +// SetClusterSecurityGroups sets the ClusterSecurityGroups field's value. +func (s *AwsRedshiftClusterDetails) SetClusterSecurityGroups(v []*AwsRedshiftClusterClusterSecurityGroup) *AwsRedshiftClusterDetails { + s.ClusterSecurityGroups = v return s } -// SetRevisionId sets the RevisionId field's value. -func (s *AwsLambdaFunctionDetails) SetRevisionId(v string) *AwsLambdaFunctionDetails { - s.RevisionId = &v +// SetClusterSnapshotCopyStatus sets the ClusterSnapshotCopyStatus field's value. +func (s *AwsRedshiftClusterDetails) SetClusterSnapshotCopyStatus(v *AwsRedshiftClusterClusterSnapshotCopyStatus) *AwsRedshiftClusterDetails { + s.ClusterSnapshotCopyStatus = v return s } -// SetRole sets the Role field's value. -func (s *AwsLambdaFunctionDetails) SetRole(v string) *AwsLambdaFunctionDetails { - s.Role = &v +// SetClusterStatus sets the ClusterStatus field's value. +func (s *AwsRedshiftClusterDetails) SetClusterStatus(v string) *AwsRedshiftClusterDetails { + s.ClusterStatus = &v return s } -// SetRuntime sets the Runtime field's value. -func (s *AwsLambdaFunctionDetails) SetRuntime(v string) *AwsLambdaFunctionDetails { - s.Runtime = &v +// SetClusterSubnetGroupName sets the ClusterSubnetGroupName field's value. +func (s *AwsRedshiftClusterDetails) SetClusterSubnetGroupName(v string) *AwsRedshiftClusterDetails { + s.ClusterSubnetGroupName = &v return s } -// SetTimeout sets the Timeout field's value. -func (s *AwsLambdaFunctionDetails) SetTimeout(v int64) *AwsLambdaFunctionDetails { - s.Timeout = &v +// SetClusterVersion sets the ClusterVersion field's value. +func (s *AwsRedshiftClusterDetails) SetClusterVersion(v string) *AwsRedshiftClusterDetails { + s.ClusterVersion = &v return s } -// SetTracingConfig sets the TracingConfig field's value. -func (s *AwsLambdaFunctionDetails) SetTracingConfig(v *AwsLambdaFunctionTracingConfig) *AwsLambdaFunctionDetails { - s.TracingConfig = v +// SetDBName sets the DBName field's value. +func (s *AwsRedshiftClusterDetails) SetDBName(v string) *AwsRedshiftClusterDetails { + s.DBName = &v return s } -// SetVersion sets the Version field's value. -func (s *AwsLambdaFunctionDetails) SetVersion(v string) *AwsLambdaFunctionDetails { - s.Version = &v +// SetDeferredMaintenanceWindows sets the DeferredMaintenanceWindows field's value. +func (s *AwsRedshiftClusterDetails) SetDeferredMaintenanceWindows(v []*AwsRedshiftClusterDeferredMaintenanceWindow) *AwsRedshiftClusterDetails { + s.DeferredMaintenanceWindows = v return s } -// SetVpcConfig sets the VpcConfig field's value. -func (s *AwsLambdaFunctionDetails) SetVpcConfig(v *AwsLambdaFunctionVpcConfig) *AwsLambdaFunctionDetails { - s.VpcConfig = v +// SetElasticIpStatus sets the ElasticIpStatus field's value. +func (s *AwsRedshiftClusterDetails) SetElasticIpStatus(v *AwsRedshiftClusterElasticIpStatus) *AwsRedshiftClusterDetails { + s.ElasticIpStatus = v return s } -// A function's environment variable settings. -type AwsLambdaFunctionEnvironment struct { - _ struct{} `type:"structure"` - - // An AwsLambdaFunctionEnvironmentError object. - Error *AwsLambdaFunctionEnvironmentError `type:"structure"` +// SetElasticResizeNumberOfNodeOptions sets the ElasticResizeNumberOfNodeOptions field's value. +func (s *AwsRedshiftClusterDetails) SetElasticResizeNumberOfNodeOptions(v string) *AwsRedshiftClusterDetails { + s.ElasticResizeNumberOfNodeOptions = &v + return s +} - // Environment variable key-value pairs. - Variables map[string]*string `type:"map"` +// SetEncrypted sets the Encrypted field's value. +func (s *AwsRedshiftClusterDetails) SetEncrypted(v bool) *AwsRedshiftClusterDetails { + s.Encrypted = &v + return s } -// String returns the string representation -func (s AwsLambdaFunctionEnvironment) String() string { - return awsutil.Prettify(s) +// SetEndpoint sets the Endpoint field's value. +func (s *AwsRedshiftClusterDetails) SetEndpoint(v *AwsRedshiftClusterEndpoint) *AwsRedshiftClusterDetails { + s.Endpoint = v + return s } -// GoString returns the string representation -func (s AwsLambdaFunctionEnvironment) GoString() string { - return s.String() +// SetEnhancedVpcRouting sets the EnhancedVpcRouting field's value. +func (s *AwsRedshiftClusterDetails) SetEnhancedVpcRouting(v bool) *AwsRedshiftClusterDetails { + s.EnhancedVpcRouting = &v + return s } -// SetError sets the Error field's value. -func (s *AwsLambdaFunctionEnvironment) SetError(v *AwsLambdaFunctionEnvironmentError) *AwsLambdaFunctionEnvironment { - s.Error = v +// SetExpectedNextSnapshotScheduleTime sets the ExpectedNextSnapshotScheduleTime field's value. +func (s *AwsRedshiftClusterDetails) SetExpectedNextSnapshotScheduleTime(v string) *AwsRedshiftClusterDetails { + s.ExpectedNextSnapshotScheduleTime = &v return s } -// SetVariables sets the Variables field's value. -func (s *AwsLambdaFunctionEnvironment) SetVariables(v map[string]*string) *AwsLambdaFunctionEnvironment { - s.Variables = v +// SetExpectedNextSnapshotScheduleTimeStatus sets the ExpectedNextSnapshotScheduleTimeStatus field's value. +func (s *AwsRedshiftClusterDetails) SetExpectedNextSnapshotScheduleTimeStatus(v string) *AwsRedshiftClusterDetails { + s.ExpectedNextSnapshotScheduleTimeStatus = &v return s } -// Error messages for environment variables that couldn't be applied. -type AwsLambdaFunctionEnvironmentError struct { - _ struct{} `type:"structure"` +// SetHsmStatus sets the HsmStatus field's value. +func (s *AwsRedshiftClusterDetails) SetHsmStatus(v *AwsRedshiftClusterHsmStatus) *AwsRedshiftClusterDetails { + s.HsmStatus = v + return s +} - // The error code. - ErrorCode *string `type:"string"` +// SetIamRoles sets the IamRoles field's value. +func (s *AwsRedshiftClusterDetails) SetIamRoles(v []*AwsRedshiftClusterIamRole) *AwsRedshiftClusterDetails { + s.IamRoles = v + return s +} - // The error message. - Message *string `type:"string"` +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsRedshiftClusterDetails) SetKmsKeyId(v string) *AwsRedshiftClusterDetails { + s.KmsKeyId = &v + return s } -// String returns the string representation -func (s AwsLambdaFunctionEnvironmentError) String() string { - return awsutil.Prettify(s) +// SetMaintenanceTrackName sets the MaintenanceTrackName field's value. +func (s *AwsRedshiftClusterDetails) SetMaintenanceTrackName(v string) *AwsRedshiftClusterDetails { + s.MaintenanceTrackName = &v + return s } -// GoString returns the string representation -func (s AwsLambdaFunctionEnvironmentError) GoString() string { - return s.String() +// SetManualSnapshotRetentionPeriod sets the ManualSnapshotRetentionPeriod field's value. +func (s *AwsRedshiftClusterDetails) SetManualSnapshotRetentionPeriod(v int64) *AwsRedshiftClusterDetails { + s.ManualSnapshotRetentionPeriod = &v + return s } -// SetErrorCode sets the ErrorCode field's value. -func (s *AwsLambdaFunctionEnvironmentError) SetErrorCode(v string) *AwsLambdaFunctionEnvironmentError { - s.ErrorCode = &v +// SetMasterUsername sets the MasterUsername field's value. +func (s *AwsRedshiftClusterDetails) SetMasterUsername(v string) *AwsRedshiftClusterDetails { + s.MasterUsername = &v return s } -// SetMessage sets the Message field's value. -func (s *AwsLambdaFunctionEnvironmentError) SetMessage(v string) *AwsLambdaFunctionEnvironmentError { - s.Message = &v +// SetNextMaintenanceWindowStartTime sets the NextMaintenanceWindowStartTime field's value. +func (s *AwsRedshiftClusterDetails) SetNextMaintenanceWindowStartTime(v string) *AwsRedshiftClusterDetails { + s.NextMaintenanceWindowStartTime = &v return s } -// An AWS Lambda layer. -type AwsLambdaFunctionLayer struct { - _ struct{} `type:"structure"` +// SetNodeType sets the NodeType field's value. +func (s *AwsRedshiftClusterDetails) SetNodeType(v string) *AwsRedshiftClusterDetails { + s.NodeType = &v + return s +} - // The Amazon Resource Name (ARN) of the function layer. - Arn *string `type:"string"` +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *AwsRedshiftClusterDetails) SetNumberOfNodes(v int64) *AwsRedshiftClusterDetails { + s.NumberOfNodes = &v + return s +} - // The size of the layer archive in bytes. - CodeSize *int64 `type:"integer"` +// SetPendingActions sets the PendingActions field's value. +func (s *AwsRedshiftClusterDetails) SetPendingActions(v []*string) *AwsRedshiftClusterDetails { + s.PendingActions = v + return s } -// String returns the string representation -func (s AwsLambdaFunctionLayer) String() string { - return awsutil.Prettify(s) +// SetPendingModifiedValues sets the PendingModifiedValues field's value. +func (s *AwsRedshiftClusterDetails) SetPendingModifiedValues(v *AwsRedshiftClusterPendingModifiedValues) *AwsRedshiftClusterDetails { + s.PendingModifiedValues = v + return s } -// GoString returns the string representation -func (s AwsLambdaFunctionLayer) GoString() string { - return s.String() +// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value. +func (s *AwsRedshiftClusterDetails) SetPreferredMaintenanceWindow(v string) *AwsRedshiftClusterDetails { + s.PreferredMaintenanceWindow = &v + return s } -// SetArn sets the Arn field's value. -func (s *AwsLambdaFunctionLayer) SetArn(v string) *AwsLambdaFunctionLayer { - s.Arn = &v +// SetPubliclyAccessible sets the PubliclyAccessible field's value. +func (s *AwsRedshiftClusterDetails) SetPubliclyAccessible(v bool) *AwsRedshiftClusterDetails { + s.PubliclyAccessible = &v return s } -// SetCodeSize sets the CodeSize field's value. -func (s *AwsLambdaFunctionLayer) SetCodeSize(v int64) *AwsLambdaFunctionLayer { - s.CodeSize = &v +// SetResizeInfo sets the ResizeInfo field's value. +func (s *AwsRedshiftClusterDetails) SetResizeInfo(v *AwsRedshiftClusterResizeInfo) *AwsRedshiftClusterDetails { + s.ResizeInfo = v return s } -// The function's AWS X-Ray tracing configuration. -type AwsLambdaFunctionTracingConfig struct { - _ struct{} `type:"structure"` +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *AwsRedshiftClusterDetails) SetRestoreStatus(v *AwsRedshiftClusterRestoreStatus) *AwsRedshiftClusterDetails { + s.RestoreStatus = v + return s +} - // The tracing mode. - Mode *string `type:"string"` +// SetSnapshotScheduleIdentifier sets the SnapshotScheduleIdentifier field's value. +func (s *AwsRedshiftClusterDetails) SetSnapshotScheduleIdentifier(v string) *AwsRedshiftClusterDetails { + s.SnapshotScheduleIdentifier = &v + return s } -// String returns the string representation -func (s AwsLambdaFunctionTracingConfig) String() string { - return awsutil.Prettify(s) +// SetSnapshotScheduleState sets the SnapshotScheduleState field's value. +func (s *AwsRedshiftClusterDetails) SetSnapshotScheduleState(v string) *AwsRedshiftClusterDetails { + s.SnapshotScheduleState = &v + return s } -// GoString returns the string representation -func (s AwsLambdaFunctionTracingConfig) GoString() string { - return s.String() +// SetVpcId sets the VpcId field's value. +func (s *AwsRedshiftClusterDetails) SetVpcId(v string) *AwsRedshiftClusterDetails { + s.VpcId = &v + return s } -// SetMode sets the Mode field's value. -func (s *AwsLambdaFunctionTracingConfig) SetMode(v string) *AwsLambdaFunctionTracingConfig { - s.Mode = &v +// SetVpcSecurityGroups sets the VpcSecurityGroups field's value. +func (s *AwsRedshiftClusterDetails) SetVpcSecurityGroups(v []*AwsRedshiftClusterVpcSecurityGroup) *AwsRedshiftClusterDetails { + s.VpcSecurityGroups = v return s } -// The VPC security groups and subnets that are attached to a Lambda function. -// For more information, see VPC Settings. -type AwsLambdaFunctionVpcConfig struct { +// The status of the elastic IP (EIP) address for an Amazon Redshift cluster. +type AwsRedshiftClusterElasticIpStatus struct { _ struct{} `type:"structure"` - // A list of VPC security groups IDs. - SecurityGroupIds []*string `type:"list"` - - // A list of VPC subnet IDs. - SubnetIds []*string `type:"list"` + // The elastic IP address for the cluster. + ElasticIp *string `type:"string"` - // The ID of the VPC. - VpcId *string `type:"string"` + // The status of the elastic IP address. + Status *string `type:"string"` } // String returns the string representation -func (s AwsLambdaFunctionVpcConfig) String() string { +func (s AwsRedshiftClusterElasticIpStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsLambdaFunctionVpcConfig) GoString() string { +func (s AwsRedshiftClusterElasticIpStatus) GoString() string { return s.String() } -// SetSecurityGroupIds sets the SecurityGroupIds field's value. -func (s *AwsLambdaFunctionVpcConfig) SetSecurityGroupIds(v []*string) *AwsLambdaFunctionVpcConfig { - s.SecurityGroupIds = v - return s -} - -// SetSubnetIds sets the SubnetIds field's value. -func (s *AwsLambdaFunctionVpcConfig) SetSubnetIds(v []*string) *AwsLambdaFunctionVpcConfig { - s.SubnetIds = v +// SetElasticIp sets the ElasticIp field's value. +func (s *AwsRedshiftClusterElasticIpStatus) SetElasticIp(v string) *AwsRedshiftClusterElasticIpStatus { + s.ElasticIp = &v return s } -// SetVpcId sets the VpcId field's value. -func (s *AwsLambdaFunctionVpcConfig) SetVpcId(v string) *AwsLambdaFunctionVpcConfig { - s.VpcId = &v +// SetStatus sets the Status field's value. +func (s *AwsRedshiftClusterElasticIpStatus) SetStatus(v string) *AwsRedshiftClusterElasticIpStatus { + s.Status = &v return s } -// Details about a Lambda layer version. -type AwsLambdaLayerVersionDetails struct { +// The connection endpoint for an Amazon Redshift cluster. +type AwsRedshiftClusterEndpoint struct { _ struct{} `type:"structure"` - // The layer's compatible runtimes. Maximum number of 5 items. - // - // Valid values: nodejs10.x | nodejs12.x | java8 | java11 | python2.7 | python3.6 - // | python3.7 | python3.8 | dotnetcore1.0 | dotnetcore2.1 | go1.x | ruby2.5 - // | provided - CompatibleRuntimes []*string `type:"list"` - - // The date that the version was created, in ISO 8601 format. For example, 2018-11-27T15:10:45.123+0000. - CreatedDate *string `type:"string"` + // The DNS address of the cluster. + Address *string `type:"string"` - // The version number. - Version *int64 `type:"long"` + // The port that the database engine listens on. + Port *int64 `type:"integer"` } // String returns the string representation -func (s AwsLambdaLayerVersionDetails) String() string { +func (s AwsRedshiftClusterEndpoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsLambdaLayerVersionDetails) GoString() string { +func (s AwsRedshiftClusterEndpoint) GoString() string { return s.String() } -// SetCompatibleRuntimes sets the CompatibleRuntimes field's value. -func (s *AwsLambdaLayerVersionDetails) SetCompatibleRuntimes(v []*string) *AwsLambdaLayerVersionDetails { - s.CompatibleRuntimes = v - return s -} - -// SetCreatedDate sets the CreatedDate field's value. -func (s *AwsLambdaLayerVersionDetails) SetCreatedDate(v string) *AwsLambdaLayerVersionDetails { - s.CreatedDate = &v +// SetAddress sets the Address field's value. +func (s *AwsRedshiftClusterEndpoint) SetAddress(v string) *AwsRedshiftClusterEndpoint { + s.Address = &v return s } -// SetVersion sets the Version field's value. -func (s *AwsLambdaLayerVersionDetails) SetVersion(v int64) *AwsLambdaLayerVersionDetails { - s.Version = &v +// SetPort sets the Port field's value. +func (s *AwsRedshiftClusterEndpoint) SetPort(v int64) *AwsRedshiftClusterEndpoint { + s.Port = &v return s } -// An AWS Identity and Access Management (IAM) role associated with the DB instance. -type AwsRdsDbInstanceAssociatedRole struct { +// Information about whether an Amazon Redshift cluster finished applying any +// hardware changes to security module (HSM) settings that were specified in +// a modify cluster command. +type AwsRedshiftClusterHsmStatus struct { _ struct{} `type:"structure"` - // The name of the feature associated with the IAM)role. - FeatureName *string `type:"string"` + // The name of the HSM client certificate that the Amazon Redshift cluster uses + // to retrieve the data encryption keys that are stored in an HSM. + HsmClientCertificateIdentifier *string `type:"string"` - // The Amazon Resource Name (ARN) of the IAM role that is associated with the - // DB instance. - RoleArn *string `type:"string"` + // The name of the HSM configuration that contains the information that the + // Amazon Redshift cluster can use to retrieve and store keys in an HSM. + HsmConfigurationIdentifier *string `type:"string"` - // Describes the state of the association between the IAM role and the DB instance. - // The Status property returns one of the following values: - // - // * ACTIVE - the IAM role ARN is associated with the DB instance and can - // be used to access other AWS services on your behalf. + // Indicates whether the Amazon Redshift cluster has finished applying any HSM + // settings changes specified in a modify cluster command. // - // * PENDING - the IAM role ARN is being associated with the DB instance. + // Type: String // - // * INVALID - the IAM role ARN is associated with the DB instance, but the - // DB instance is unable to assume the IAM role in order to access other - // AWS services on your behalf. + // Valid values: active | applying Status *string `type:"string"` } // String returns the string representation -func (s AwsRdsDbInstanceAssociatedRole) String() string { +func (s AwsRedshiftClusterHsmStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsRdsDbInstanceAssociatedRole) GoString() string { +func (s AwsRedshiftClusterHsmStatus) GoString() string { return s.String() } -// SetFeatureName sets the FeatureName field's value. -func (s *AwsRdsDbInstanceAssociatedRole) SetFeatureName(v string) *AwsRdsDbInstanceAssociatedRole { - s.FeatureName = &v +// SetHsmClientCertificateIdentifier sets the HsmClientCertificateIdentifier field's value. +func (s *AwsRedshiftClusterHsmStatus) SetHsmClientCertificateIdentifier(v string) *AwsRedshiftClusterHsmStatus { + s.HsmClientCertificateIdentifier = &v return s } -// SetRoleArn sets the RoleArn field's value. -func (s *AwsRdsDbInstanceAssociatedRole) SetRoleArn(v string) *AwsRdsDbInstanceAssociatedRole { - s.RoleArn = &v +// SetHsmConfigurationIdentifier sets the HsmConfigurationIdentifier field's value. +func (s *AwsRedshiftClusterHsmStatus) SetHsmConfigurationIdentifier(v string) *AwsRedshiftClusterHsmStatus { + s.HsmConfigurationIdentifier = &v return s } // SetStatus sets the Status field's value. -func (s *AwsRdsDbInstanceAssociatedRole) SetStatus(v string) *AwsRdsDbInstanceAssociatedRole { +func (s *AwsRedshiftClusterHsmStatus) SetStatus(v string) *AwsRedshiftClusterHsmStatus { s.Status = &v return s } -// Contains the details of an Amazon RDS DB instance. -type AwsRdsDbInstanceDetails struct { +// An IAM role that the cluster can use to access other AWS services. +type AwsRedshiftClusterIamRole struct { _ struct{} `type:"structure"` - // The AWS Identity and Access Management (IAM) roles associated with the DB - // instance. - AssociatedRoles []*AwsRdsDbInstanceAssociatedRole `type:"list"` - - // The identifier of the CA certificate for this DB instance. - CACertificateIdentifier *string `type:"string"` - - // If the DB instance is a member of a DB cluster, contains the name of the - // DB cluster that the DB instance is a member of. - DBClusterIdentifier *string `type:"string"` - - // Contains the name of the compute and memory capacity class of the DB instance. - DBInstanceClass *string `type:"string"` - - // Contains a user-supplied database identifier. This identifier is the unique - // key that identifies a DB instance. - DBInstanceIdentifier *string `type:"string"` - - // The meaning of this parameter differs according to the database engine you - // use. - // - // MySQL, MariaDB, SQL Server, PostgreSQL - // - // Contains the name of the initial database of this instance that was provided - // at create time, if one was specified when the DB instance was created. This - // same name is returned for the life of the DB instance. - // - // Oracle - // - // Contains the Oracle System ID (SID) of the created DB instance. Not shown - // when the returned parameters do not apply to an Oracle DB instance. - DBName *string `type:"string"` - - // Specifies the port that the DB instance listens on. If the DB instance is - // part of a DB cluster, this can be a different port than the DB cluster port. - DbInstancePort *int64 `type:"integer"` - - // The AWS Region-unique, immutable identifier for the DB instance. This identifier - // is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB - // instance is accessed. - DbiResourceId *string `type:"string"` - - // Indicates whether the DB instance has deletion protection enabled. - // - // When deletion protection is enabled, the database cannot be deleted. - DeletionProtection *bool `type:"boolean"` - - // Specifies the connection endpoint. - Endpoint *AwsRdsDbInstanceEndpoint `type:"structure"` - - // Provides the name of the database engine to use for this DB instance. - Engine *string `type:"string"` - - // Indicates the database engine version. - EngineVersion *string `type:"string"` - - // True if mapping of AWS Identity and Access Management (IAM) accounts to database - // accounts is enabled, and otherwise false. - // - // IAM database authentication can be enabled for the following database engines. - // - // * For MySQL 5.6, minor version 5.6.34 or higher - // - // * For MySQL 5.7, minor version 5.7.16 or higher - // - // * Aurora 5.6 or higher - IAMDatabaseAuthenticationEnabled *bool `type:"boolean"` - - // Provides the date and time the DB instance was created. - InstanceCreateTime *string `type:"string"` - - // If StorageEncrypted is true, the AWS KMS key identifier for the encrypted - // DB instance. - KmsKeyId *string `type:"string"` - - // Specifies the accessibility options for the DB instance. - // - // A value of true specifies an Internet-facing instance with a publicly resolvable - // DNS name, which resolves to a public IP address. + // The status of the IAM role's association with the cluster. // - // A value of false specifies an internal instance with a DNS name that resolves - // to a private IP address. - PubliclyAccessible *bool `type:"boolean"` - - // Specifies whether the DB instance is encrypted. - StorageEncrypted *bool `type:"boolean"` - - // The ARN from the key store with which the instance is associated for TDE - // encryption. - TdeCredentialArn *string `type:"string"` + // Valid values: in-sync | adding | removing + ApplyStatus *string `type:"string"` - // A list of VPC security groups that the DB instance belongs to. - VpcSecurityGroups []*AwsRdsDbInstanceVpcSecurityGroup `type:"list"` + // The ARN of the IAM role. + IamRoleArn *string `type:"string"` } // String returns the string representation -func (s AwsRdsDbInstanceDetails) String() string { +func (s AwsRedshiftClusterIamRole) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsRdsDbInstanceDetails) GoString() string { +func (s AwsRedshiftClusterIamRole) GoString() string { return s.String() } -// SetAssociatedRoles sets the AssociatedRoles field's value. -func (s *AwsRdsDbInstanceDetails) SetAssociatedRoles(v []*AwsRdsDbInstanceAssociatedRole) *AwsRdsDbInstanceDetails { - s.AssociatedRoles = v +// SetApplyStatus sets the ApplyStatus field's value. +func (s *AwsRedshiftClusterIamRole) SetApplyStatus(v string) *AwsRedshiftClusterIamRole { + s.ApplyStatus = &v return s } -// SetCACertificateIdentifier sets the CACertificateIdentifier field's value. -func (s *AwsRdsDbInstanceDetails) SetCACertificateIdentifier(v string) *AwsRdsDbInstanceDetails { - s.CACertificateIdentifier = &v +// SetIamRoleArn sets the IamRoleArn field's value. +func (s *AwsRedshiftClusterIamRole) SetIamRoleArn(v string) *AwsRedshiftClusterIamRole { + s.IamRoleArn = &v return s } -// SetDBClusterIdentifier sets the DBClusterIdentifier field's value. -func (s *AwsRdsDbInstanceDetails) SetDBClusterIdentifier(v string) *AwsRdsDbInstanceDetails { - s.DBClusterIdentifier = &v - return s +// Changes to the Amazon Redshift cluster that are currently pending. +type AwsRedshiftClusterPendingModifiedValues struct { + _ struct{} `type:"structure"` + + // The pending or in-progress change to the automated snapshot retention period. + AutomatedSnapshotRetentionPeriod *int64 `type:"integer"` + + // The pending or in-progress change to the identifier for the cluster. + ClusterIdentifier *string `type:"string"` + + // The pending or in-progress change to the cluster type. + ClusterType *string `type:"string"` + + // The pending or in-progress change to the service version. + ClusterVersion *string `type:"string"` + + // The encryption type for a cluster. + EncryptionType *string `type:"string"` + + // Indicates whether to create the cluster with enhanced VPC routing enabled. + EnhancedVpcRouting *bool `type:"boolean"` + + // The name of the maintenance track that the cluster changes to during the + // next maintenance window. + MaintenanceTrackName *string `type:"string"` + + // The pending or in-progress change to the master user password for the cluster. + MasterUserPassword *string `type:"string"` + + // The pending or in-progress change to the cluster's node type. + NodeType *string `type:"string"` + + // The pending or in-progress change to the number of nodes in the cluster. + NumberOfNodes *int64 `type:"integer"` + + // The pending or in-progress change to whether the cluster can be connected + // to from the public network. + PubliclyAccessible *bool `type:"boolean"` } -// SetDBInstanceClass sets the DBInstanceClass field's value. -func (s *AwsRdsDbInstanceDetails) SetDBInstanceClass(v string) *AwsRdsDbInstanceDetails { - s.DBInstanceClass = &v - return s +// String returns the string representation +func (s AwsRedshiftClusterPendingModifiedValues) String() string { + return awsutil.Prettify(s) } -// SetDBInstanceIdentifier sets the DBInstanceIdentifier field's value. -func (s *AwsRdsDbInstanceDetails) SetDBInstanceIdentifier(v string) *AwsRdsDbInstanceDetails { - s.DBInstanceIdentifier = &v - return s +// GoString returns the string representation +func (s AwsRedshiftClusterPendingModifiedValues) GoString() string { + return s.String() } -// SetDBName sets the DBName field's value. -func (s *AwsRdsDbInstanceDetails) SetDBName(v string) *AwsRdsDbInstanceDetails { - s.DBName = &v +// SetAutomatedSnapshotRetentionPeriod sets the AutomatedSnapshotRetentionPeriod field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetAutomatedSnapshotRetentionPeriod(v int64) *AwsRedshiftClusterPendingModifiedValues { + s.AutomatedSnapshotRetentionPeriod = &v return s } -// SetDbInstancePort sets the DbInstancePort field's value. -func (s *AwsRdsDbInstanceDetails) SetDbInstancePort(v int64) *AwsRdsDbInstanceDetails { - s.DbInstancePort = &v +// SetClusterIdentifier sets the ClusterIdentifier field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetClusterIdentifier(v string) *AwsRedshiftClusterPendingModifiedValues { + s.ClusterIdentifier = &v return s } -// SetDbiResourceId sets the DbiResourceId field's value. -func (s *AwsRdsDbInstanceDetails) SetDbiResourceId(v string) *AwsRdsDbInstanceDetails { - s.DbiResourceId = &v +// SetClusterType sets the ClusterType field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetClusterType(v string) *AwsRedshiftClusterPendingModifiedValues { + s.ClusterType = &v return s } -// SetDeletionProtection sets the DeletionProtection field's value. -func (s *AwsRdsDbInstanceDetails) SetDeletionProtection(v bool) *AwsRdsDbInstanceDetails { - s.DeletionProtection = &v +// SetClusterVersion sets the ClusterVersion field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetClusterVersion(v string) *AwsRedshiftClusterPendingModifiedValues { + s.ClusterVersion = &v return s } -// SetEndpoint sets the Endpoint field's value. -func (s *AwsRdsDbInstanceDetails) SetEndpoint(v *AwsRdsDbInstanceEndpoint) *AwsRdsDbInstanceDetails { - s.Endpoint = v +// SetEncryptionType sets the EncryptionType field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetEncryptionType(v string) *AwsRedshiftClusterPendingModifiedValues { + s.EncryptionType = &v return s } -// SetEngine sets the Engine field's value. -func (s *AwsRdsDbInstanceDetails) SetEngine(v string) *AwsRdsDbInstanceDetails { - s.Engine = &v +// SetEnhancedVpcRouting sets the EnhancedVpcRouting field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetEnhancedVpcRouting(v bool) *AwsRedshiftClusterPendingModifiedValues { + s.EnhancedVpcRouting = &v return s } -// SetEngineVersion sets the EngineVersion field's value. -func (s *AwsRdsDbInstanceDetails) SetEngineVersion(v string) *AwsRdsDbInstanceDetails { - s.EngineVersion = &v +// SetMaintenanceTrackName sets the MaintenanceTrackName field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetMaintenanceTrackName(v string) *AwsRedshiftClusterPendingModifiedValues { + s.MaintenanceTrackName = &v return s } -// SetIAMDatabaseAuthenticationEnabled sets the IAMDatabaseAuthenticationEnabled field's value. -func (s *AwsRdsDbInstanceDetails) SetIAMDatabaseAuthenticationEnabled(v bool) *AwsRdsDbInstanceDetails { - s.IAMDatabaseAuthenticationEnabled = &v +// SetMasterUserPassword sets the MasterUserPassword field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetMasterUserPassword(v string) *AwsRedshiftClusterPendingModifiedValues { + s.MasterUserPassword = &v return s } -// SetInstanceCreateTime sets the InstanceCreateTime field's value. -func (s *AwsRdsDbInstanceDetails) SetInstanceCreateTime(v string) *AwsRdsDbInstanceDetails { - s.InstanceCreateTime = &v +// SetNodeType sets the NodeType field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetNodeType(v string) *AwsRedshiftClusterPendingModifiedValues { + s.NodeType = &v return s } -// SetKmsKeyId sets the KmsKeyId field's value. -func (s *AwsRdsDbInstanceDetails) SetKmsKeyId(v string) *AwsRdsDbInstanceDetails { - s.KmsKeyId = &v +// SetNumberOfNodes sets the NumberOfNodes field's value. +func (s *AwsRedshiftClusterPendingModifiedValues) SetNumberOfNodes(v int64) *AwsRedshiftClusterPendingModifiedValues { + s.NumberOfNodes = &v return s } // SetPubliclyAccessible sets the PubliclyAccessible field's value. -func (s *AwsRdsDbInstanceDetails) SetPubliclyAccessible(v bool) *AwsRdsDbInstanceDetails { +func (s *AwsRedshiftClusterPendingModifiedValues) SetPubliclyAccessible(v bool) *AwsRedshiftClusterPendingModifiedValues { s.PubliclyAccessible = &v return s } -// SetStorageEncrypted sets the StorageEncrypted field's value. -func (s *AwsRdsDbInstanceDetails) SetStorageEncrypted(v bool) *AwsRdsDbInstanceDetails { - s.StorageEncrypted = &v - return s +// Information about the resize operation for the cluster. +type AwsRedshiftClusterResizeInfo struct { + _ struct{} `type:"structure"` + + // Indicates whether the resize operation can be canceled. + AllowCancelResize *bool `type:"boolean"` + + // The type of resize operation. + // + // Valid values: ClassicResize + ResizeType *string `type:"string"` } -// SetTdeCredentialArn sets the TdeCredentialArn field's value. -func (s *AwsRdsDbInstanceDetails) SetTdeCredentialArn(v string) *AwsRdsDbInstanceDetails { - s.TdeCredentialArn = &v +// String returns the string representation +func (s AwsRedshiftClusterResizeInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsRedshiftClusterResizeInfo) GoString() string { + return s.String() +} + +// SetAllowCancelResize sets the AllowCancelResize field's value. +func (s *AwsRedshiftClusterResizeInfo) SetAllowCancelResize(v bool) *AwsRedshiftClusterResizeInfo { + s.AllowCancelResize = &v return s } -// SetVpcSecurityGroups sets the VpcSecurityGroups field's value. -func (s *AwsRdsDbInstanceDetails) SetVpcSecurityGroups(v []*AwsRdsDbInstanceVpcSecurityGroup) *AwsRdsDbInstanceDetails { - s.VpcSecurityGroups = v +// SetResizeType sets the ResizeType field's value. +func (s *AwsRedshiftClusterResizeInfo) SetResizeType(v string) *AwsRedshiftClusterResizeInfo { + s.ResizeType = &v return s } -// Specifies the connection endpoint. -type AwsRdsDbInstanceEndpoint struct { +// Information about the status of a cluster restore action. It only applies +// if the cluster was created by restoring a snapshot. +type AwsRedshiftClusterRestoreStatus struct { _ struct{} `type:"structure"` - // Specifies the DNS address of the DB instance. - Address *string `type:"string"` + // The number of megabytes per second being transferred from the backup storage. + // Returns the average rate for a completed backup. + // + // This field is only updated when you restore to DC2 and DS2 node types. + CurrentRestoreRateInMegaBytesPerSecond *float64 `type:"double"` - // Specifies the ID that Amazon Route 53 assigns when you create a hosted zone. - HostedZoneId *string `type:"string"` + // The amount of time an in-progress restore has been running, or the amount + // of time it took a completed restore to finish. + // + // This field is only updated when you restore to DC2 and DS2 node types. + ElapsedTimeInSeconds *int64 `type:"long"` - // Specifies the port that the database engine is listening on. - Port *int64 `type:"integer"` + // The estimate of the time remaining before the restore is complete. Returns + // 0 for a completed restore. + // + // This field is only updated when you restore to DC2 and DS2 node types. + EstimatedTimeToCompletionInSeconds *int64 `type:"long"` + + // The number of megabytes that were transferred from snapshot storage. + // + // This field is only updated when you restore to DC2 and DS2 node types. + ProgressInMegaBytes *int64 `type:"long"` + + // The size of the set of snapshot data that was used to restore the cluster. + // + // This field is only updated when you restore to DC2 and DS2 node types. + SnapshotSizeInMegaBytes *int64 `type:"long"` + + // The status of the restore action. + // + // Valid values: starting | restoring | completed | failed + Status *string `type:"string"` } // String returns the string representation -func (s AwsRdsDbInstanceEndpoint) String() string { +func (s AwsRedshiftClusterRestoreStatus) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsRdsDbInstanceEndpoint) GoString() string { +func (s AwsRedshiftClusterRestoreStatus) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *AwsRdsDbInstanceEndpoint) SetAddress(v string) *AwsRdsDbInstanceEndpoint { - s.Address = &v +// SetCurrentRestoreRateInMegaBytesPerSecond sets the CurrentRestoreRateInMegaBytesPerSecond field's value. +func (s *AwsRedshiftClusterRestoreStatus) SetCurrentRestoreRateInMegaBytesPerSecond(v float64) *AwsRedshiftClusterRestoreStatus { + s.CurrentRestoreRateInMegaBytesPerSecond = &v return s } -// SetHostedZoneId sets the HostedZoneId field's value. -func (s *AwsRdsDbInstanceEndpoint) SetHostedZoneId(v string) *AwsRdsDbInstanceEndpoint { - s.HostedZoneId = &v +// SetElapsedTimeInSeconds sets the ElapsedTimeInSeconds field's value. +func (s *AwsRedshiftClusterRestoreStatus) SetElapsedTimeInSeconds(v int64) *AwsRedshiftClusterRestoreStatus { + s.ElapsedTimeInSeconds = &v return s } -// SetPort sets the Port field's value. -func (s *AwsRdsDbInstanceEndpoint) SetPort(v int64) *AwsRdsDbInstanceEndpoint { - s.Port = &v +// SetEstimatedTimeToCompletionInSeconds sets the EstimatedTimeToCompletionInSeconds field's value. +func (s *AwsRedshiftClusterRestoreStatus) SetEstimatedTimeToCompletionInSeconds(v int64) *AwsRedshiftClusterRestoreStatus { + s.EstimatedTimeToCompletionInSeconds = &v return s } -// A VPC security groups that the DB instance belongs to. -type AwsRdsDbInstanceVpcSecurityGroup struct { +// SetProgressInMegaBytes sets the ProgressInMegaBytes field's value. +func (s *AwsRedshiftClusterRestoreStatus) SetProgressInMegaBytes(v int64) *AwsRedshiftClusterRestoreStatus { + s.ProgressInMegaBytes = &v + return s +} + +// SetSnapshotSizeInMegaBytes sets the SnapshotSizeInMegaBytes field's value. +func (s *AwsRedshiftClusterRestoreStatus) SetSnapshotSizeInMegaBytes(v int64) *AwsRedshiftClusterRestoreStatus { + s.SnapshotSizeInMegaBytes = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AwsRedshiftClusterRestoreStatus) SetStatus(v string) *AwsRedshiftClusterRestoreStatus { + s.Status = &v + return s +} + +// A VPC security group that the cluster belongs to, if the cluster is in a +// VPC. +type AwsRedshiftClusterVpcSecurityGroup struct { _ struct{} `type:"structure"` // The status of the VPC security group. Status *string `type:"string"` - // The name of the VPC security group. + // The identifier of the VPC security group. VpcSecurityGroupId *string `type:"string"` } // String returns the string representation -func (s AwsRdsDbInstanceVpcSecurityGroup) String() string { +func (s AwsRedshiftClusterVpcSecurityGroup) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s AwsRdsDbInstanceVpcSecurityGroup) GoString() string { +func (s AwsRedshiftClusterVpcSecurityGroup) GoString() string { return s.String() } // SetStatus sets the Status field's value. -func (s *AwsRdsDbInstanceVpcSecurityGroup) SetStatus(v string) *AwsRdsDbInstanceVpcSecurityGroup { +func (s *AwsRedshiftClusterVpcSecurityGroup) SetStatus(v string) *AwsRedshiftClusterVpcSecurityGroup { s.Status = &v return s } // SetVpcSecurityGroupId sets the VpcSecurityGroupId field's value. -func (s *AwsRdsDbInstanceVpcSecurityGroup) SetVpcSecurityGroupId(v string) *AwsRdsDbInstanceVpcSecurityGroup { +func (s *AwsRedshiftClusterVpcSecurityGroup) SetVpcSecurityGroupId(v string) *AwsRedshiftClusterVpcSecurityGroup { s.VpcSecurityGroupId = &v return s } @@ -7211,7 +15177,11 @@ func (s *AwsRdsDbInstanceVpcSecurityGroup) SetVpcSecurityGroupId(v string) *AwsR type AwsS3BucketDetails struct { _ struct{} `type:"structure"` - // The date and time when the S3 bucket was created. + // Indicates when the S3 bucket was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. CreatedAt *string `type:"string"` // The canonical user ID of the owner of the S3 bucket. @@ -7321,7 +15291,7 @@ type AwsS3BucketServerSideEncryptionRule struct { _ struct{} `type:"structure"` // Specifies the default server-side encryption to apply to new objects in the - // bucket. If a PUT Object request doesn't specify any server-side encryption, + // bucket. If a PUT object request doesn't specify any server-side encryption, // this default encryption is applied. ApplyServerSideEncryptionByDefault *AwsS3BucketServerSideEncryptionByDefault `type:"structure"` } @@ -7342,7 +15312,7 @@ func (s *AwsS3BucketServerSideEncryptionRule) SetApplyServerSideEncryptionByDefa return s } -// Details about an AWS S3 object. +// Details about an Amazon S3 object. type AwsS3ObjectDetails struct { _ struct{} `type:"structure"` @@ -7353,7 +15323,11 @@ type AwsS3ObjectDetails struct { // resource found at a URL. ETag *string `type:"string"` - // The date and time when the object was last modified. + // Indicates when the object was last modified. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. LastModified *string `type:"string"` // The identifier of the AWS Key Management Service (AWS KMS) symmetric customer @@ -7414,6 +15388,118 @@ func (s *AwsS3ObjectDetails) SetVersionId(v string) *AwsS3ObjectDetails { return s } +// Details about an AWS Secrets Manager secret. +type AwsSecretsManagerSecretDetails struct { + _ struct{} `type:"structure"` + + // Whether the secret is deleted. + Deleted *bool `type:"boolean"` + + // The user-provided description of the secret. + Description *string `type:"string"` + + // The ARN, Key ID, or alias of the AWS KMS customer master key (CMK) used to + // encrypt the SecretString or SecretBinary values for versions of this secret. + KmsKeyId *string `type:"string"` + + // The name of the secret. + Name *string `type:"string"` + + // Whether rotation is enabled. + RotationEnabled *bool `type:"boolean"` + + // The ARN of the Lambda function that rotates the secret. + RotationLambdaArn *string `type:"string"` + + // Whether the rotation occurred within the specified rotation frequency. + RotationOccurredWithinFrequency *bool `type:"boolean"` + + // Defines the rotation schedule for the secret. + RotationRules *AwsSecretsManagerSecretRotationRules `type:"structure"` +} + +// String returns the string representation +func (s AwsSecretsManagerSecretDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsSecretsManagerSecretDetails) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *AwsSecretsManagerSecretDetails) SetDeleted(v bool) *AwsSecretsManagerSecretDetails { + s.Deleted = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *AwsSecretsManagerSecretDetails) SetDescription(v string) *AwsSecretsManagerSecretDetails { + s.Description = &v + return s +} + +// SetKmsKeyId sets the KmsKeyId field's value. +func (s *AwsSecretsManagerSecretDetails) SetKmsKeyId(v string) *AwsSecretsManagerSecretDetails { + s.KmsKeyId = &v + return s +} + +// SetName sets the Name field's value. +func (s *AwsSecretsManagerSecretDetails) SetName(v string) *AwsSecretsManagerSecretDetails { + s.Name = &v + return s +} + +// SetRotationEnabled sets the RotationEnabled field's value. +func (s *AwsSecretsManagerSecretDetails) SetRotationEnabled(v bool) *AwsSecretsManagerSecretDetails { + s.RotationEnabled = &v + return s +} + +// SetRotationLambdaArn sets the RotationLambdaArn field's value. +func (s *AwsSecretsManagerSecretDetails) SetRotationLambdaArn(v string) *AwsSecretsManagerSecretDetails { + s.RotationLambdaArn = &v + return s +} + +// SetRotationOccurredWithinFrequency sets the RotationOccurredWithinFrequency field's value. +func (s *AwsSecretsManagerSecretDetails) SetRotationOccurredWithinFrequency(v bool) *AwsSecretsManagerSecretDetails { + s.RotationOccurredWithinFrequency = &v + return s +} + +// SetRotationRules sets the RotationRules field's value. +func (s *AwsSecretsManagerSecretDetails) SetRotationRules(v *AwsSecretsManagerSecretRotationRules) *AwsSecretsManagerSecretDetails { + s.RotationRules = v + return s +} + +// Defines the rotation schedule for the secret. +type AwsSecretsManagerSecretRotationRules struct { + _ struct{} `type:"structure"` + + // The number of days after the previous rotation to rotate the secret. + AutomaticallyAfterDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s AwsSecretsManagerSecretRotationRules) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsSecretsManagerSecretRotationRules) GoString() string { + return s.String() +} + +// SetAutomaticallyAfterDays sets the AutomaticallyAfterDays field's value. +func (s *AwsSecretsManagerSecretRotationRules) SetAutomaticallyAfterDays(v int64) *AwsSecretsManagerSecretRotationRules { + s.AutomaticallyAfterDays = &v + return s +} + // Provides consistent format for the contents of the Security Hub-aggregated // findings. AwsSecurityFinding format enables you to share findings between // AWS security services and third-party solutions, and security standards checks. @@ -7441,8 +15527,12 @@ type AwsSecurityFinding struct { // zero percent confidence and 100 means 100 percent confidence. Confidence *int64 `type:"integer"` - // An ISO8601-formatted timestamp that indicates when the security-findings - // provider created the potential security issue that a finding captured. + // Indicates when the security-findings provider created the potential security + // issue that a finding captured. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. // // CreatedAt is a required field CreatedAt *string `type:"string" required:"true"` @@ -7460,13 +15550,17 @@ type AwsSecurityFinding struct { // Description is a required field Description *string `type:"string" required:"true"` - // An ISO8601-formatted timestamp that indicates when the security-findings - // provider first observed the potential security issue that a finding captured. + // Indicates when the security-findings provider first observed the potential + // security issue that a finding captured. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. FirstObservedAt *string `type:"string"` // The identifier for the solution-specific component (a discrete unit of logic) // that generated a finding. In various security-findings providers' solutions, - // this generator can be called a rule, a check, a detector, a plug-in, etc. + // this generator can be called a rule, a check, a detector, a plugin, etc. // // GeneratorId is a required field GeneratorId *string `type:"string" required:"true"` @@ -7476,9 +15570,12 @@ type AwsSecurityFinding struct { // Id is a required field Id *string `type:"string" required:"true"` - // An ISO8601-formatted timestamp that indicates when the security-findings - // provider most recently observed the potential security issue that a finding - // captured. + // Indicates when the security-findings provider most recently observed the + // potential security issue that a finding captured. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. LastObservedAt *string `type:"string"` // A list of malware related to a finding. @@ -7487,9 +15584,17 @@ type AwsSecurityFinding struct { // The details of network-related information about a finding. Network *Network `type:"structure"` + // Provides information about a network path that is relevant to a finding. + // Each entry under NetworkPath represents a component of that path. + NetworkPath []*NetworkPathComponent `type:"list"` + // A user-defined note added to a finding. Note *Note `type:"structure"` + // Provides an overview of the patch compliance status for an instance against + // a selected compliance standard. + PatchSummary *PatchSummary `type:"structure"` + // The details of process-related information about a finding. Process *ProcessDetails `type:"structure"` @@ -7552,8 +15657,11 @@ type AwsSecurityFinding struct { // Types is a required field Types []*string `type:"list" required:"true"` - // An ISO8601-formatted timestamp that indicates when the security-findings - // provider last updated the finding record. + // Indicates when the security-findings provider last updated the finding record. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. // // UpdatedAt is a required field UpdatedAt *string `type:"string" required:"true"` @@ -7565,6 +15673,9 @@ type AwsSecurityFinding struct { // Indicates the veracity of a finding. VerificationState *string `type:"string" enum:"VerificationState"` + // Provides a list of vulnerabilities associated with the findings. + Vulnerabilities []*Vulnerability `type:"list"` + // Provides information about the status of the investigation into a finding. Workflow *Workflow `type:"structure"` @@ -7621,6 +15732,11 @@ func (s *AwsSecurityFinding) Validate() error { if s.UpdatedAt == nil { invalidParams.Add(request.NewErrParamRequired("UpdatedAt")) } + if s.Compliance != nil { + if err := s.Compliance.Validate(); err != nil { + invalidParams.AddNested("Compliance", err.(request.ErrInvalidParams)) + } + } if s.Malware != nil { for i, v := range s.Malware { if v == nil { @@ -7636,6 +15752,11 @@ func (s *AwsSecurityFinding) Validate() error { invalidParams.AddNested("Note", err.(request.ErrInvalidParams)) } } + if s.PatchSummary != nil { + if err := s.PatchSummary.Validate(); err != nil { + invalidParams.AddNested("PatchSummary", err.(request.ErrInvalidParams)) + } + } if s.RelatedFindings != nil { for i, v := range s.RelatedFindings { if v == nil { @@ -7656,6 +15777,16 @@ func (s *AwsSecurityFinding) Validate() error { } } } + if s.Vulnerabilities != nil { + for i, v := range s.Vulnerabilities { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Vulnerabilities", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7735,12 +15866,24 @@ func (s *AwsSecurityFinding) SetNetwork(v *Network) *AwsSecurityFinding { return s } +// SetNetworkPath sets the NetworkPath field's value. +func (s *AwsSecurityFinding) SetNetworkPath(v []*NetworkPathComponent) *AwsSecurityFinding { + s.NetworkPath = v + return s +} + // SetNote sets the Note field's value. func (s *AwsSecurityFinding) SetNote(v *Note) *AwsSecurityFinding { s.Note = v return s } +// SetPatchSummary sets the PatchSummary field's value. +func (s *AwsSecurityFinding) SetPatchSummary(v *PatchSummary) *AwsSecurityFinding { + s.PatchSummary = v + return s +} + // SetProcess sets the Process field's value. func (s *AwsSecurityFinding) SetProcess(v *ProcessDetails) *AwsSecurityFinding { s.Process = v @@ -7837,6 +15980,12 @@ func (s *AwsSecurityFinding) SetVerificationState(v string) *AwsSecurityFinding return s } +// SetVulnerabilities sets the Vulnerabilities field's value. +func (s *AwsSecurityFinding) SetVulnerabilities(v []*Vulnerability) *AwsSecurityFinding { + s.Vulnerabilities = v + return s +} + // SetWorkflow sets the Workflow field's value. func (s *AwsSecurityFinding) SetWorkflow(v *Workflow) *AwsSecurityFinding { s.Workflow = v @@ -7852,6 +16001,9 @@ func (s *AwsSecurityFinding) SetWorkflowState(v string) *AwsSecurityFinding { // A collection of attributes that are applied to all active Security Hub-aggregated // findings and that result in a subset of findings that are included in this // insight. +// +// You can filter by up to 10 finding attributes. For each attribute, you can +// provide up to 20 filter values. type AwsSecurityFindingFilters struct { _ struct{} `type:"structure"` @@ -7893,7 +16045,7 @@ type AwsSecurityFindingFilters struct { // The identifier for the solution-specific component (a discrete unit of logic) // that generated a finding. In various security-findings providers' solutions, - // this generator can be called a rule, a check, a detector, a plug-in, etc. + // this generator can be called a rule, a check, a detector, a plugin, etc. GeneratorId []*StringFilter `type:"list"` // The security findings provider-specific identifier for a finding. @@ -8129,6 +16281,9 @@ type AwsSecurityFindingFilters struct { VerificationState []*StringFilter `type:"list"` // The workflow state of a finding. + // + // Note that this field is deprecated. To search for a finding based on its + // workflow status, use WorkflowStatus. WorkflowState []*StringFilter `type:"list"` // The status of the investigation into a finding. Allowed values are the following. @@ -8661,11 +16816,66 @@ func (s *AwsSecurityFindingFilters) SetWorkflowStatus(v []*StringFilter) *AwsSec return s } +// Identifies a finding to update using BatchUpdateFindings. +type AwsSecurityFindingIdentifier struct { + _ struct{} `type:"structure"` + + // The identifier of the finding that was specified by the finding provider. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The ARN generated by Security Hub that uniquely identifies a product that + // generates findings. This can be the ARN for a third-party product that is + // integrated with Security Hub, or the ARN for a custom integration. + // + // ProductArn is a required field + ProductArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AwsSecurityFindingIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AwsSecurityFindingIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AwsSecurityFindingIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AwsSecurityFindingIdentifier"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.ProductArn == nil { + invalidParams.Add(request.NewErrParamRequired("ProductArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *AwsSecurityFindingIdentifier) SetId(v string) *AwsSecurityFindingIdentifier { + s.Id = &v + return s +} + +// SetProductArn sets the ProductArn field's value. +func (s *AwsSecurityFindingIdentifier) SetProductArn(v string) *AwsSecurityFindingIdentifier { + s.ProductArn = &v + return s +} + // A wrapper type for the topic's Amazon Resource Name (ARN). type AwsSnsTopicDetails struct { _ struct{} `type:"structure"` - // The ID of an AWS-managed customer master key (CMK) for Amazon SNS or a custom + // The ID of an AWS managed customer master key (CMK) for Amazon SNS or a custom // CMK. KmsMasterKeyId *string `type:"string"` @@ -8759,7 +16969,7 @@ type AwsSqsQueueDetails struct { // to encrypt or decrypt messages before calling AWS KMS again. KmsDataKeyReusePeriodSeconds *int64 `type:"integer"` - // The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom + // The ID of an AWS managed customer master key (CMK) for Amazon SQS or a custom // CMK. KmsMasterKeyId *string `type:"string"` @@ -8805,7 +17015,7 @@ func (s *AwsSqsQueueDetails) SetQueueName(v string) *AwsSqsQueueDetails { type AwsWafWebAclDetails struct { _ struct{} `type:"structure"` - // The action to perform if none of the Rules contained in the WebACL match. + // The action to perform if none of the rules contained in the WebACL match. DefaultAction *string `type:"string"` // A friendly name or description of the WebACL. You can't change the name of @@ -8859,7 +17069,7 @@ type AwsWafWebAclRule struct { _ struct{} `type:"structure"` // Specifies the action that CloudFront or AWS WAF takes when a web request - // matches the conditions in the Rule. + // matches the conditions in the rule. Action *WafAction `type:"structure"` // Rules to exclude from a rule group. @@ -8881,13 +17091,13 @@ type AwsWafWebAclRule struct { // update requests, ActivatedRule|Action is used instead of ActivatedRule|OverrideAction. OverrideAction *WafOverrideAction `type:"structure"` - // Specifies the order in which the Rules in a WebACL are evaluated. Rules with - // a lower value for Priority are evaluated before Rules with a higher value. - // The value must be a unique integer. If you add multiple Rules to a WebACL, + // Specifies the order in which the rules in a WebACL are evaluated. Rules with + // a lower value for Priority are evaluated before rules with a higher value. + // The value must be a unique integer. If you add multiple rules to a WebACL, // the values do not need to be consecutive. Priority *int64 `type:"integer"` - // The identifier for a Rule. + // The identifier for a rule. RuleId *string `type:"string"` // The rule type. @@ -9177,28 +17387,347 @@ func (s *BatchImportFindingsOutput) SetSuccessCount(v int64) *BatchImportFinding return s } -// Exclusive to findings that are generated as the result of a check run against -// a specific rule in a supported security standard, such as CIS AWS Foundations. -// Contains security standard-related finding details. -// -// Values include the following: -// -// * Allowed values are the following: PASSED - Standards check passed for -// all evaluated resources. WARNING - Some information is missing or this -// check is not supported given your configuration. FAILED - Standards check -// failed for at least one evaluated resource. NOT_AVAILABLE - Check could -// not be performed due to a service outage, API error, or because the result -// of the AWS Config evaluation was NOT_APPLICABLE. If the AWS Config evaluation -// result was NOT_APPLICABLE, then after 3 days, Security Hub automatically -// archives the finding. +type BatchUpdateFindingsInput struct { + _ struct{} `type:"structure"` + + // The updated value for the finding confidence. Confidence is defined as the + // likelihood that a finding accurately identifies the behavior or issue that + // it was intended to identify. + // + // Confidence is scored on a 0-100 basis using a ratio scale, where 0 means + // zero percent confidence and 100 means 100 percent confidence. + Confidence *int64 `type:"integer"` + + // The updated value for the level of importance assigned to the resources associated + // with the findings. + // + // A score of 0 means that the underlying resources have no criticality, and + // a score of 100 is reserved for the most critical resources. + Criticality *int64 `type:"integer"` + + // The list of findings to update. BatchUpdateFindings can be used to update + // up to 100 findings at a time. + // + // For each finding, the list provides the finding identifier and the ARN of + // the finding provider. + // + // FindingIdentifiers is a required field + FindingIdentifiers []*AwsSecurityFindingIdentifier `type:"list" required:"true"` + + // The updated note. + Note *NoteUpdate `type:"structure"` + + // A list of findings that are related to the updated findings. + RelatedFindings []*RelatedFinding `type:"list"` + + // Used to update the finding severity. + Severity *SeverityUpdate `type:"structure"` + + // One or more finding types in the format of namespace/category/classifier + // that classify a finding. + // + // Valid namespace values are as follows. + // + // * Software and Configuration Checks + // + // * TTPs + // + // * Effects + // + // * Unusual Behaviors + // + // * Sensitive Data Identifications + Types []*string `type:"list"` + + // A list of name/value string pairs associated with the finding. These are + // custom, user-defined fields added to a finding. + UserDefinedFields map[string]*string `type:"map"` + + // Indicates the veracity of a finding. + // + // The available values for VerificationState are as follows. + // + // * UNKNOWN – The default disposition of a security finding + // + // * TRUE_POSITIVE – The security finding is confirmed + // + // * FALSE_POSITIVE – The security finding was determined to be a false + // alarm + // + // * BENIGN_POSITIVE – A special case of TRUE_POSITIVE where the finding + // doesn't pose any threat, is expected, or both + VerificationState *string `type:"string" enum:"VerificationState"` + + // Used to update the workflow status of a finding. + // + // The workflow status indicates the progress of the investigation into the + // finding. + Workflow *WorkflowUpdate `type:"structure"` +} + +// String returns the string representation +func (s BatchUpdateFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdateFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchUpdateFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchUpdateFindingsInput"} + if s.FindingIdentifiers == nil { + invalidParams.Add(request.NewErrParamRequired("FindingIdentifiers")) + } + if s.FindingIdentifiers != nil { + for i, v := range s.FindingIdentifiers { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FindingIdentifiers", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Note != nil { + if err := s.Note.Validate(); err != nil { + invalidParams.AddNested("Note", err.(request.ErrInvalidParams)) + } + } + if s.RelatedFindings != nil { + for i, v := range s.RelatedFindings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RelatedFindings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConfidence sets the Confidence field's value. +func (s *BatchUpdateFindingsInput) SetConfidence(v int64) *BatchUpdateFindingsInput { + s.Confidence = &v + return s +} + +// SetCriticality sets the Criticality field's value. +func (s *BatchUpdateFindingsInput) SetCriticality(v int64) *BatchUpdateFindingsInput { + s.Criticality = &v + return s +} + +// SetFindingIdentifiers sets the FindingIdentifiers field's value. +func (s *BatchUpdateFindingsInput) SetFindingIdentifiers(v []*AwsSecurityFindingIdentifier) *BatchUpdateFindingsInput { + s.FindingIdentifiers = v + return s +} + +// SetNote sets the Note field's value. +func (s *BatchUpdateFindingsInput) SetNote(v *NoteUpdate) *BatchUpdateFindingsInput { + s.Note = v + return s +} + +// SetRelatedFindings sets the RelatedFindings field's value. +func (s *BatchUpdateFindingsInput) SetRelatedFindings(v []*RelatedFinding) *BatchUpdateFindingsInput { + s.RelatedFindings = v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *BatchUpdateFindingsInput) SetSeverity(v *SeverityUpdate) *BatchUpdateFindingsInput { + s.Severity = v + return s +} + +// SetTypes sets the Types field's value. +func (s *BatchUpdateFindingsInput) SetTypes(v []*string) *BatchUpdateFindingsInput { + s.Types = v + return s +} + +// SetUserDefinedFields sets the UserDefinedFields field's value. +func (s *BatchUpdateFindingsInput) SetUserDefinedFields(v map[string]*string) *BatchUpdateFindingsInput { + s.UserDefinedFields = v + return s +} + +// SetVerificationState sets the VerificationState field's value. +func (s *BatchUpdateFindingsInput) SetVerificationState(v string) *BatchUpdateFindingsInput { + s.VerificationState = &v + return s +} + +// SetWorkflow sets the Workflow field's value. +func (s *BatchUpdateFindingsInput) SetWorkflow(v *WorkflowUpdate) *BatchUpdateFindingsInput { + s.Workflow = v + return s +} + +type BatchUpdateFindingsOutput struct { + _ struct{} `type:"structure"` + + // The list of findings that were updated successfully. + // + // ProcessedFindings is a required field + ProcessedFindings []*AwsSecurityFindingIdentifier `type:"list" required:"true"` + + // The list of findings that were not updated. + // + // UnprocessedFindings is a required field + UnprocessedFindings []*BatchUpdateFindingsUnprocessedFinding `type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchUpdateFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdateFindingsOutput) GoString() string { + return s.String() +} + +// SetProcessedFindings sets the ProcessedFindings field's value. +func (s *BatchUpdateFindingsOutput) SetProcessedFindings(v []*AwsSecurityFindingIdentifier) *BatchUpdateFindingsOutput { + s.ProcessedFindings = v + return s +} + +// SetUnprocessedFindings sets the UnprocessedFindings field's value. +func (s *BatchUpdateFindingsOutput) SetUnprocessedFindings(v []*BatchUpdateFindingsUnprocessedFinding) *BatchUpdateFindingsOutput { + s.UnprocessedFindings = v + return s +} + +// A finding from a BatchUpdateFindings request that Security Hub was unable +// to update. +type BatchUpdateFindingsUnprocessedFinding struct { + _ struct{} `type:"structure"` + + // The code associated with the error. + // + // ErrorCode is a required field + ErrorCode *string `type:"string" required:"true"` + + // The message associated with the error. + // + // ErrorMessage is a required field + ErrorMessage *string `type:"string" required:"true"` + + // The identifier of the finding that was not updated. + // + // FindingIdentifier is a required field + FindingIdentifier *AwsSecurityFindingIdentifier `type:"structure" required:"true"` +} + +// String returns the string representation +func (s BatchUpdateFindingsUnprocessedFinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchUpdateFindingsUnprocessedFinding) GoString() string { + return s.String() +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *BatchUpdateFindingsUnprocessedFinding) SetErrorCode(v string) *BatchUpdateFindingsUnprocessedFinding { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *BatchUpdateFindingsUnprocessedFinding) SetErrorMessage(v string) *BatchUpdateFindingsUnprocessedFinding { + s.ErrorMessage = &v + return s +} + +// SetFindingIdentifier sets the FindingIdentifier field's value. +func (s *BatchUpdateFindingsUnprocessedFinding) SetFindingIdentifier(v *AwsSecurityFindingIdentifier) *BatchUpdateFindingsUnprocessedFinding { + s.FindingIdentifier = v + return s +} + +// An IPv4 CIDR block association. +type CidrBlockAssociation struct { + _ struct{} `type:"structure"` + + // The association ID for the IPv4 CIDR block. + AssociationId *string `type:"string"` + + // The IPv4 CIDR block. + CidrBlock *string `type:"string"` + + // Information about the state of the IPv4 CIDR block. + CidrBlockState *string `type:"string"` +} + +// String returns the string representation +func (s CidrBlockAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CidrBlockAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *CidrBlockAssociation) SetAssociationId(v string) *CidrBlockAssociation { + s.AssociationId = &v + return s +} + +// SetCidrBlock sets the CidrBlock field's value. +func (s *CidrBlockAssociation) SetCidrBlock(v string) *CidrBlockAssociation { + s.CidrBlock = &v + return s +} + +// SetCidrBlockState sets the CidrBlockState field's value. +func (s *CidrBlockAssociation) SetCidrBlockState(v string) *CidrBlockAssociation { + s.CidrBlockState = &v + return s +} + +// Contains finding details that are specific to control-based findings. Only +// returned for findings generated from controls. type Compliance struct { _ struct{} `type:"structure"` - // List of requirements that are related to a standards control. + // For a control, the industry or regulatory framework requirements that are + // related to the control. The check for that control is aligned with these + // requirements. RelatedRequirements []*string `type:"list"` // The result of a standards check. + // + // The valid values for Status are as follows. + // + // * PASSED - Standards check passed for all evaluated resources. WARNING + // - Some information is missing or this check is not supported for your + // configuration. FAILED - Standards check failed for at least one evaluated + // resource. NOT_AVAILABLE - Check could not be performed due to a service + // outage, API error, or because the result of the AWS Config evaluation + // was NOT_APPLICABLE. If the AWS Config evaluation result was NOT_APPLICABLE, + // then after 3 days, Security Hub automatically archives the finding. Status *string `type:"string" enum:"ComplianceStatus"` + + // For findings generated from controls, a list of reasons behind the value + // of Status. For the list of status reason codes and their meanings, see Standards-related + // information in the ASFF (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards-results.html#securityhub-standards-results-asff) + // in the AWS Security Hub User Guide. + StatusReasons []*StatusReason `type:"list"` } // String returns the string representation @@ -9211,6 +17740,26 @@ func (s Compliance) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Compliance) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Compliance"} + if s.StatusReasons != nil { + for i, v := range s.StatusReasons { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StatusReasons", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetRelatedRequirements sets the RelatedRequirements field's value. func (s *Compliance) SetRelatedRequirements(v []*string) *Compliance { s.RelatedRequirements = v @@ -9223,6 +17772,12 @@ func (s *Compliance) SetStatus(v string) *Compliance { return s } +// SetStatusReasons sets the StatusReasons field's value. +func (s *Compliance) SetStatusReasons(v []*StatusReason) *Compliance { + s.StatusReasons = v + return s +} + // Container details related to a finding. type ContainerDetails struct { _ struct{} `type:"structure"` @@ -9233,7 +17788,11 @@ type ContainerDetails struct { // The name of the image related to a finding. ImageName *string `type:"string"` - // The date and time when the container started. + // Indicates when the container started. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. LaunchedAt *string `type:"string"` // The name of the container related to a finding. @@ -9375,7 +17934,10 @@ type CreateInsightInput struct { // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` - // The attribute used as the aggregator to group related findings for the insight. + // The attribute used to group the findings for the insight. The grouping attribute + // identifies the type of item that the insight applies to. For example, if + // an insight is grouped by resource identifier, then the insight produces a + // list of resource identifiers. // // GroupByAttribute is a required field GroupByAttribute *string `type:"string" required:"true"` @@ -9482,27 +18044,69 @@ func (s *CreateMembersInput) SetAccountDetails(v []*AccountDetails) *CreateMembe return s } -type CreateMembersOutput struct { +type CreateMembersOutput struct { + _ struct{} `type:"structure"` + + // The list of AWS accounts that were not processed. For each account, the list + // includes the account ID and the email address. + UnprocessedAccounts []*Result `type:"list"` +} + +// String returns the string representation +func (s CreateMembersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMembersOutput) GoString() string { + return s.String() +} + +// SetUnprocessedAccounts sets the UnprocessedAccounts field's value. +func (s *CreateMembersOutput) SetUnprocessedAccounts(v []*Result) *CreateMembersOutput { + s.UnprocessedAccounts = v + return s +} + +// CVSS scores from the advisory related to the vulnerability. +type Cvss struct { _ struct{} `type:"structure"` - // The list of AWS accounts that were not processed. For each account, the list - // includes the account ID and the email address. - UnprocessedAccounts []*Result `type:"list"` + // The base CVSS score. + BaseScore *float64 `type:"double"` + + // The base scoring vector for the CVSS score. + BaseVector *string `type:"string"` + + // The version of CVSS for the CVSS score. + Version *string `type:"string"` } // String returns the string representation -func (s CreateMembersOutput) String() string { +func (s Cvss) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateMembersOutput) GoString() string { +func (s Cvss) GoString() string { return s.String() } -// SetUnprocessedAccounts sets the UnprocessedAccounts field's value. -func (s *CreateMembersOutput) SetUnprocessedAccounts(v []*Result) *CreateMembersOutput { - s.UnprocessedAccounts = v +// SetBaseScore sets the BaseScore field's value. +func (s *Cvss) SetBaseScore(v float64) *Cvss { + s.BaseScore = &v + return s +} + +// SetBaseVector sets the BaseVector field's value. +func (s *Cvss) SetBaseVector(v string) *Cvss { + s.BaseVector = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *Cvss) SetVersion(v string) *Cvss { + s.Version = &v return s } @@ -10004,6 +18608,13 @@ func (s *DescribeHubInput) SetHubArn(v string) *DescribeHubInput { type DescribeHubOutput struct { _ struct{} `type:"structure"` + // Whether to automatically enable new controls when they are added to standards + // that are enabled. + // + // If set to true, then new controls for enabled standards are enabled automatically. + // If set to false, then new controls are not enabled. + AutoEnableControls *bool `type:"boolean"` + // The ARN of the Hub resource that was retrieved. HubArn *string `type:"string"` @@ -10021,6 +18632,12 @@ func (s DescribeHubOutput) GoString() string { return s.String() } +// SetAutoEnableControls sets the AutoEnableControls field's value. +func (s *DescribeHubOutput) SetAutoEnableControls(v bool) *DescribeHubOutput { + s.AutoEnableControls = &v + return s +} + // SetHubArn sets the HubArn field's value. func (s *DescribeHubOutput) SetHubArn(v string) *DescribeHubOutput { s.HubArn = &v @@ -10507,7 +19124,13 @@ func (s *EnableImportFindingsForProductOutput) SetProductSubscriptionArn(v strin type EnableSecurityHubInput struct { _ struct{} `type:"structure"` - // The tags to add to the Hub resource when you enable Security Hub. + // Whether to enable the security standards that Security Hub has designated + // as automatically enabled. If you do not provide a value for EnableDefaultStandards, + // it is set to true. To not enable the automatically enabled standards, set + // EnableDefaultStandards to false. + EnableDefaultStandards *bool `type:"boolean"` + + // The tags to add to the hub resource when you enable Security Hub. Tags map[string]*string `min:"1" type:"map"` } @@ -10534,6 +19157,12 @@ func (s *EnableSecurityHubInput) Validate() error { return nil } +// SetEnableDefaultStandards sets the EnableDefaultStandards field's value. +func (s *EnableSecurityHubInput) SetEnableDefaultStandards(v bool) *EnableSecurityHubInput { + s.EnableDefaultStandards = &v + return s +} + // SetTags sets the Tags field's value. func (s *EnableSecurityHubInput) SetTags(v map[string]*string) *EnableSecurityHubInput { s.Tags = v @@ -10653,6 +19282,12 @@ type GetFindingsInput struct { // The finding attributes used to define a condition to filter the returned // findings. + // + // You can filter by up to 10 finding attributes. For each attribute, you can + // provide up to 20 filter values. + // + // Note that in the available filter fields, WorkflowState is deprecated. To + // search for a finding based on its workflow status, use WorkflowStatus. Filters *AwsSecurityFindingFilters `type:"structure"` // The maximum number of findings to return. @@ -11058,21 +19693,22 @@ func (s *GetMembersOutput) SetUnprocessedAccounts(v []*Result) *GetMembersOutput return s } -// Includes details of the list of the findings that cannot be imported. +// The list of the findings that cannot be imported. For each finding, the list +// provides the error. type ImportFindingsError struct { _ struct{} `type:"structure"` - // The code of the error made during the BatchImportFindings operation. + // The code of the error returned by the BatchImportFindings operation. // // ErrorCode is a required field ErrorCode *string `type:"string" required:"true"` - // The message of the error made during the BatchImportFindings operation. + // The message of the error returned by the BatchImportFindings operation. // // ErrorMessage is a required field ErrorMessage *string `type:"string" required:"true"` - // The ID of the error made during the BatchImportFindings operation. + // The identifier of the finding that could not be updated. // // Id is a required field Id *string `type:"string" required:"true"` @@ -11117,9 +19753,10 @@ type Insight struct { // Filters is a required field Filters *AwsSecurityFindingFilters `type:"structure" required:"true"` - // The attribute that the insight's findings are grouped by. This attribute - // is used as a findings aggregator for the purposes of viewing and managing - // multiple related findings under a single operand. + // The grouping attribute for the insight's findings. Indicates how to group + // the matching findings, and identifies the type of item that the insight applies + // to. For example, if an insight is grouped by resource identifier, then the + // insight produces a list of resource identifiers. // // GroupByAttribute is a required field GroupByAttribute *string `type:"string" required:"true"` @@ -11259,8 +19896,8 @@ func (s *InsightResults) SetResultValues(v []*InsightResultValue) *InsightResult // Internal server error. type InternalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -11279,17 +19916,17 @@ func (s InternalException) GoString() string { func newErrorInternalException(v protocol.ResponseMetadata) error { return &InternalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalException) Code() string { +func (s *InternalException) Code() string { return "InternalException" } // Message returns the exception's message. -func (s InternalException) Message() string { +func (s *InternalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11297,28 +19934,28 @@ func (s InternalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalException) OrigErr() error { +func (s *InternalException) OrigErr() error { return nil } -func (s InternalException) Error() string { +func (s *InternalException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalException) RequestID() string { + return s.RespMetadata.RequestID } // AWS Security Hub isn't enabled for the account used to make this request. type InvalidAccessException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -11337,17 +19974,17 @@ func (s InvalidAccessException) GoString() string { func newErrorInvalidAccessException(v protocol.ResponseMetadata) error { return &InvalidAccessException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAccessException) Code() string { +func (s *InvalidAccessException) Code() string { return "InvalidAccessException" } // Message returns the exception's message. -func (s InvalidAccessException) Message() string { +func (s *InvalidAccessException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11355,29 +19992,29 @@ func (s InvalidAccessException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAccessException) OrigErr() error { +func (s *InvalidAccessException) OrigErr() error { return nil } -func (s InvalidAccessException) Error() string { +func (s *InvalidAccessException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAccessException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAccessException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAccessException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAccessException) RequestID() string { + return s.RespMetadata.RequestID } // The request was rejected because you supplied an invalid or out-of-range // value for an input parameter. type InvalidInputException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -11396,17 +20033,17 @@ func (s InvalidInputException) GoString() string { func newErrorInvalidInputException(v protocol.ResponseMetadata) error { return &InvalidInputException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInputException) Code() string { +func (s *InvalidInputException) Code() string { return "InvalidInputException" } // Message returns the exception's message. -func (s InvalidInputException) Message() string { +func (s *InvalidInputException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11414,22 +20051,22 @@ func (s InvalidInputException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInputException) OrigErr() error { +func (s *InvalidInputException) OrigErr() error { return nil } -func (s InvalidInputException) Error() string { +func (s *InvalidInputException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInputException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInputException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInputException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInputException) RequestID() string { + return s.RespMetadata.RequestID } // Details about an invitation. @@ -11556,6 +20193,48 @@ func (s *IpFilter) SetCidr(v string) *IpFilter { return s } +// An IPV6 CIDR block association. +type Ipv6CidrBlockAssociation struct { + _ struct{} `type:"structure"` + + // The association ID for the IPv6 CIDR block. + AssociationId *string `type:"string"` + + // Information about the state of the CIDR block. + CidrBlockState *string `type:"string"` + + // The IPv6 CIDR block. + Ipv6CidrBlock *string `type:"string"` +} + +// String returns the string representation +func (s Ipv6CidrBlockAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Ipv6CidrBlockAssociation) GoString() string { + return s.String() +} + +// SetAssociationId sets the AssociationId field's value. +func (s *Ipv6CidrBlockAssociation) SetAssociationId(v string) *Ipv6CidrBlockAssociation { + s.AssociationId = &v + return s +} + +// SetCidrBlockState sets the CidrBlockState field's value. +func (s *Ipv6CidrBlockAssociation) SetCidrBlockState(v string) *Ipv6CidrBlockAssociation { + s.CidrBlockState = &v + return s +} + +// SetIpv6CidrBlock sets the Ipv6CidrBlock field's value. +func (s *Ipv6CidrBlockAssociation) SetIpv6CidrBlock(v string) *Ipv6CidrBlockAssociation { + s.Ipv6CidrBlock = &v + return s +} + // A keyword filter for querying findings. type KeywordFilter struct { _ struct{} `type:"structure"` @@ -11583,8 +20262,8 @@ func (s *KeywordFilter) SetValue(v string) *KeywordFilter { // The request was rejected because it attempted to create resources beyond // the current AWS account limits. The error code describes the limit exceeded. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -11603,17 +20282,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11621,22 +20300,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListEnabledProductsForImportInput struct { @@ -12066,18 +20745,39 @@ func (s *Malware) SetType(v string) *Malware { return s } -// The map filter for querying findings. +// A map filter for querying findings. Each map filter provides the field to +// check, the value to look for, and the comparison operator. type MapFilter struct { _ struct{} `type:"structure"` - // The condition to apply to a key value when querying for findings with a map - // filter. + // The condition to apply to the key value when querying for findings with a + // map filter. + // + // To search for values that exactly match the filter value, use EQUALS. For + // example, for the ResourceTags field, the filter Department EQUALS Security + // matches findings that have the value Security for the tag Department. + // + // To search for values other than the filter value, use NOT_EQUALS. For example, + // for the ResourceTags field, the filter Department NOT_EQUALS Finance matches + // findings that do not have the value Finance for the tag Department. + // + // EQUALS filters on the same field are joined by OR. A finding matches if it + // matches any one of those filters. + // + // NOT_EQUALS filters on the same field are joined by AND. A finding matches + // only if it matches all of those filters. + // + // You cannot have both an EQUALS filter and a NOT_EQUALS filter on the same + // field. Comparison *string `type:"string" enum:"MapFilterComparison"` - // The key of the map filter. + // The key of the map filter. For example, for ResourceTags, Key identifies + // the name of the tag. For UserDefinedFields, Key is the name of the field. Key *string `type:"string"` - // The value for the key in the map filter. + // The value for the key in the map filter. Filter values are case sensitive. + // For example, one of the values for a tag called Department might be Security. + // If you provide security as the filter value, then there is no match. Value *string `type:"string"` } @@ -12129,6 +20829,24 @@ type Member struct { // The status of the relationship between the member account and its master // account. + // + // The status can have one of the following values: + // + // * CREATED - Indicates that the master account added the member account, + // but has not yet invited the member account. + // + // * INVITED - Indicates that the master account invited the member account. + // The member account has not yet responded to the invitation. + // + // * ASSOCIATED - Indicates that the member account accepted the invitation. + // + // * REMOVED - Indicates that the master account disassociated the member + // account. + // + // * RESIGNED - Indicates that the member account disassociated themselves + // from the master account. + // + // * DELETED - Indicates that the master account deleted the member account. MemberStatus *string `type:"string"` // The timestamp for the date and time when the member account was updated. @@ -12200,6 +20918,9 @@ type Network struct { // The direction of network traffic associated with a finding. Direction *string `type:"string" enum:"NetworkDirection"` + // The range of open ports that is present on the network. + OpenPortRange *PortRange `type:"structure"` + // The protocol of network-related information about a finding. Protocol *string `type:"string"` @@ -12260,6 +20981,12 @@ func (s *Network) SetDirection(v string) *Network { return s } +// SetOpenPortRange sets the OpenPortRange field's value. +func (s *Network) SetOpenPortRange(v *PortRange) *Network { + s.OpenPortRange = v + return s +} + // SetProtocol sets the Protocol field's value. func (s *Network) SetProtocol(v string) *Network { s.Protocol = &v @@ -12296,6 +21023,135 @@ func (s *Network) SetSourcePort(v int64) *Network { return s } +// Details about a network path component that occurs before or after the current +// component. +type NetworkHeader struct { + _ struct{} `type:"structure"` + + // Information about the destination of the component. + Destination *NetworkPathComponentDetails `type:"structure"` + + // The protocol used for the component. + Protocol *string `type:"string"` + + // Information about the origin of the component. + Source *NetworkPathComponentDetails `type:"structure"` +} + +// String returns the string representation +func (s NetworkHeader) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkHeader) GoString() string { + return s.String() +} + +// SetDestination sets the Destination field's value. +func (s *NetworkHeader) SetDestination(v *NetworkPathComponentDetails) *NetworkHeader { + s.Destination = v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *NetworkHeader) SetProtocol(v string) *NetworkHeader { + s.Protocol = &v + return s +} + +// SetSource sets the Source field's value. +func (s *NetworkHeader) SetSource(v *NetworkPathComponentDetails) *NetworkHeader { + s.Source = v + return s +} + +// Information about a network path component. +type NetworkPathComponent struct { + _ struct{} `type:"structure"` + + // The identifier of a component in the network path. + ComponentId *string `type:"string"` + + // The type of component. + ComponentType *string `type:"string"` + + // Information about the component that comes after the current component in + // the network path. + Egress *NetworkHeader `type:"structure"` + + // Information about the component that comes before the current node in the + // network path. + Ingress *NetworkHeader `type:"structure"` +} + +// String returns the string representation +func (s NetworkPathComponent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkPathComponent) GoString() string { + return s.String() +} + +// SetComponentId sets the ComponentId field's value. +func (s *NetworkPathComponent) SetComponentId(v string) *NetworkPathComponent { + s.ComponentId = &v + return s +} + +// SetComponentType sets the ComponentType field's value. +func (s *NetworkPathComponent) SetComponentType(v string) *NetworkPathComponent { + s.ComponentType = &v + return s +} + +// SetEgress sets the Egress field's value. +func (s *NetworkPathComponent) SetEgress(v *NetworkHeader) *NetworkPathComponent { + s.Egress = v + return s +} + +// SetIngress sets the Ingress field's value. +func (s *NetworkPathComponent) SetIngress(v *NetworkHeader) *NetworkPathComponent { + s.Ingress = v + return s +} + +// Information about the destination of the next component in the network path. +type NetworkPathComponentDetails struct { + _ struct{} `type:"structure"` + + // The IP addresses of the destination. + Address []*string `type:"list"` + + // A list of port ranges for the destination. + PortRanges []*PortRange `type:"list"` +} + +// String returns the string representation +func (s NetworkPathComponentDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NetworkPathComponentDetails) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *NetworkPathComponentDetails) SetAddress(v []*string) *NetworkPathComponentDetails { + s.Address = v + return s +} + +// SetPortRanges sets the PortRanges field's value. +func (s *NetworkPathComponentDetails) SetPortRanges(v []*PortRange) *NetworkPathComponentDetails { + s.PortRanges = v + return s +} + // A user-defined note added to a finding. type Note struct { _ struct{} `type:"structure"` @@ -12307,6 +21163,10 @@ type Note struct { // The timestamp of when the note was updated. // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + // // UpdatedAt is a required field UpdatedAt *string `type:"string" required:"true"` @@ -12434,30 +21294,206 @@ type NumberFilter struct { } // String returns the string representation -func (s NumberFilter) String() string { +func (s NumberFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NumberFilter) GoString() string { + return s.String() +} + +// SetEq sets the Eq field's value. +func (s *NumberFilter) SetEq(v float64) *NumberFilter { + s.Eq = &v + return s +} + +// SetGte sets the Gte field's value. +func (s *NumberFilter) SetGte(v float64) *NumberFilter { + s.Gte = &v + return s +} + +// SetLte sets the Lte field's value. +func (s *NumberFilter) SetLte(v float64) *NumberFilter { + s.Lte = &v + return s +} + +// Provides an overview of the patch compliance status for an instance against +// a selected compliance standard. +type PatchSummary struct { + _ struct{} `type:"structure"` + + // The number of patches from the compliance standard that failed to install. + FailedCount *int64 `type:"integer"` + + // The identifier of the compliance standard that was used to determine the + // patch compliance status. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // The number of patches from the compliance standard that were installed successfully. + InstalledCount *int64 `type:"integer"` + + // The number of installed patches that are not part of the compliance standard. + InstalledOtherCount *int64 `type:"integer"` + + // The number of patches that were applied, but that require the instance to + // be rebooted in order to be marked as installed. + InstalledPendingReboot *int64 `type:"integer"` + + // The number of patches that are installed but are also on a list of patches + // that the customer rejected. + InstalledRejectedCount *int64 `type:"integer"` + + // The number of patches that are part of the compliance standard but are not + // installed. The count includes patches that failed to install. + MissingCount *int64 `type:"integer"` + + // The type of patch operation performed. For Patch Manager, the values are + // SCAN and INSTALL. + Operation *string `type:"string"` + + // Indicates when the operation completed. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + OperationEndTime *string `type:"string"` + + // Indicates when the operation started. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + OperationStartTime *string `type:"string"` + + // The reboot option specified for the instance. + RebootOption *string `type:"string"` +} + +// String returns the string representation +func (s PatchSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PatchSummary) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PatchSummary) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PatchSummary"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFailedCount sets the FailedCount field's value. +func (s *PatchSummary) SetFailedCount(v int64) *PatchSummary { + s.FailedCount = &v + return s +} + +// SetId sets the Id field's value. +func (s *PatchSummary) SetId(v string) *PatchSummary { + s.Id = &v + return s +} + +// SetInstalledCount sets the InstalledCount field's value. +func (s *PatchSummary) SetInstalledCount(v int64) *PatchSummary { + s.InstalledCount = &v + return s +} + +// SetInstalledOtherCount sets the InstalledOtherCount field's value. +func (s *PatchSummary) SetInstalledOtherCount(v int64) *PatchSummary { + s.InstalledOtherCount = &v + return s +} + +// SetInstalledPendingReboot sets the InstalledPendingReboot field's value. +func (s *PatchSummary) SetInstalledPendingReboot(v int64) *PatchSummary { + s.InstalledPendingReboot = &v + return s +} + +// SetInstalledRejectedCount sets the InstalledRejectedCount field's value. +func (s *PatchSummary) SetInstalledRejectedCount(v int64) *PatchSummary { + s.InstalledRejectedCount = &v + return s +} + +// SetMissingCount sets the MissingCount field's value. +func (s *PatchSummary) SetMissingCount(v int64) *PatchSummary { + s.MissingCount = &v + return s +} + +// SetOperation sets the Operation field's value. +func (s *PatchSummary) SetOperation(v string) *PatchSummary { + s.Operation = &v + return s +} + +// SetOperationEndTime sets the OperationEndTime field's value. +func (s *PatchSummary) SetOperationEndTime(v string) *PatchSummary { + s.OperationEndTime = &v + return s +} + +// SetOperationStartTime sets the OperationStartTime field's value. +func (s *PatchSummary) SetOperationStartTime(v string) *PatchSummary { + s.OperationStartTime = &v + return s +} + +// SetRebootOption sets the RebootOption field's value. +func (s *PatchSummary) SetRebootOption(v string) *PatchSummary { + s.RebootOption = &v + return s +} + +// A range of ports. +type PortRange struct { + _ struct{} `type:"structure"` + + // The first port in the port range. + Begin *int64 `type:"integer"` + + // The last port in the port range. + End *int64 `type:"integer"` +} + +// String returns the string representation +func (s PortRange) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NumberFilter) GoString() string { +func (s PortRange) GoString() string { return s.String() } -// SetEq sets the Eq field's value. -func (s *NumberFilter) SetEq(v float64) *NumberFilter { - s.Eq = &v - return s -} - -// SetGte sets the Gte field's value. -func (s *NumberFilter) SetGte(v float64) *NumberFilter { - s.Gte = &v +// SetBegin sets the Begin field's value. +func (s *PortRange) SetBegin(v int64) *PortRange { + s.Begin = &v return s } -// SetLte sets the Lte field's value. -func (s *NumberFilter) SetLte(v float64) *NumberFilter { - s.Lte = &v +// SetEnd sets the End field's value. +func (s *PortRange) SetEnd(v int64) *PortRange { + s.End = &v return s } @@ -12465,7 +21501,11 @@ func (s *NumberFilter) SetLte(v float64) *NumberFilter { type ProcessDetails struct { _ struct{} `type:"structure"` - // The date/time that the process was launched. + // Indicates when the process was launched. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. LaunchedAt *string `type:"string"` // The name of the process. @@ -12480,7 +21520,11 @@ type ProcessDetails struct { // The process ID. Pid *int64 `type:"integer"` - // The date and time when the process was terminated. + // Indicates when the process was terminated. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. TerminatedAt *string `type:"string"` } @@ -12766,6 +21810,8 @@ type Resource struct { // The canonical AWS external Region name where this resource is located. Region *string `type:"string"` + ResourceRole *string `type:"string"` + // A list of AWS tags associated with a resource at the time the finding was // processed. Tags map[string]*string `type:"map"` @@ -12836,6 +21882,12 @@ func (s *Resource) SetRegion(v string) *Resource { return s } +// SetResourceRole sets the ResourceRole field's value. +func (s *Resource) SetResourceRole(v string) *Resource { + s.ResourceRole = &v + return s +} + // SetTags sets the Tags field's value. func (s *Resource) SetTags(v map[string]*string) *Resource { s.Tags = v @@ -12850,8 +21902,8 @@ func (s *Resource) SetType(v string) *Resource { // The resource specified in the request conflicts with an existing resource. type ResourceConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -12870,17 +21922,17 @@ func (s ResourceConflictException) GoString() string { func newErrorResourceConflictException(v protocol.ResponseMetadata) error { return &ResourceConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceConflictException) Code() string { +func (s *ResourceConflictException) Code() string { return "ResourceConflictException" } // Message returns the exception's message. -func (s ResourceConflictException) Message() string { +func (s *ResourceConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12888,22 +21940,22 @@ func (s ResourceConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceConflictException) OrigErr() error { +func (s *ResourceConflictException) OrigErr() error { return nil } -func (s ResourceConflictException) Error() string { +func (s *ResourceConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceConflictException) RequestID() string { + return s.RespMetadata.RequestID } // Additional details about a resource related to a finding. @@ -12920,33 +21972,78 @@ func (s ResourceConflictException) RequestID() string { type ResourceDetails struct { _ struct{} `type:"structure"` + // contains information about a REST API in version 1 of Amazon API Gateway. + AwsApiGatewayRestApi *AwsApiGatewayRestApiDetails `type:"structure"` + + // Provides information about a version 1 Amazon API Gateway stage. + AwsApiGatewayStage *AwsApiGatewayStageDetails `type:"structure"` + + // Contains information about a version 2 API in Amazon API Gateway. + AwsApiGatewayV2Api *AwsApiGatewayV2ApiDetails `type:"structure"` + + // Contains information about a version 2 stage for Amazon API Gateway. + AwsApiGatewayV2Stage *AwsApiGatewayV2StageDetails `type:"structure"` + + // Details for an autoscaling group. + AwsAutoScalingAutoScalingGroup *AwsAutoScalingAutoScalingGroupDetails `type:"structure"` + + // Provides details about an AWS Certificate Manager certificate. + AwsCertificateManagerCertificate *AwsCertificateManagerCertificateDetails `type:"structure"` + // Details about a CloudFront distribution. AwsCloudFrontDistribution *AwsCloudFrontDistributionDetails `type:"structure"` + // Provides details about a CloudTrail trail. + AwsCloudTrailTrail *AwsCloudTrailTrailDetails `type:"structure"` + // Details for an AWS CodeBuild project. AwsCodeBuildProject *AwsCodeBuildProjectDetails `type:"structure"` + // Details about a DynamoDB table. + AwsDynamoDbTable *AwsDynamoDbTableDetails `type:"structure"` + + // Details about an Elastic IP address. + AwsEc2Eip *AwsEc2EipDetails `type:"structure"` + // Details about an Amazon EC2 instance related to a finding. AwsEc2Instance *AwsEc2InstanceDetails `type:"structure"` - // Details for an AWS EC2 network interface. + // Details for an Amazon EC2 network interface. AwsEc2NetworkInterface *AwsEc2NetworkInterfaceDetails `type:"structure"` // Details for an EC2 security group. AwsEc2SecurityGroup *AwsEc2SecurityGroupDetails `type:"structure"` + // Details for an EC2 volume. + AwsEc2Volume *AwsEc2VolumeDetails `type:"structure"` + + // Details for an EC2 VPC. + AwsEc2Vpc *AwsEc2VpcDetails `type:"structure"` + // Details for an Elasticsearch domain. AwsElasticsearchDomain *AwsElasticsearchDomainDetails `type:"structure"` + // Contains details about a Classic Load Balancer. + AwsElbLoadBalancer *AwsElbLoadBalancerDetails `type:"structure"` + // Details about a load balancer. AwsElbv2LoadBalancer *AwsElbv2LoadBalancerDetails `type:"structure"` // Details about an IAM access key related to a finding. AwsIamAccessKey *AwsIamAccessKeyDetails `type:"structure"` + // Contains details about an IAM group. + AwsIamGroup *AwsIamGroupDetails `type:"structure"` + + // Details about an IAM permissions policy. + AwsIamPolicy *AwsIamPolicyDetails `type:"structure"` + // Details about an IAM role. AwsIamRole *AwsIamRoleDetails `type:"structure"` + // Details about an IAM user. + AwsIamUser *AwsIamUserDetails `type:"structure"` + // Details about a KMS key. AwsKmsKey *AwsKmsKeyDetails `type:"structure"` @@ -12956,15 +22053,30 @@ type ResourceDetails struct { // Details for a Lambda layer version. AwsLambdaLayerVersion *AwsLambdaLayerVersionDetails `type:"structure"` - // Details for an RDS database instance. + // Details about an Amazon RDS database cluster. + AwsRdsDbCluster *AwsRdsDbClusterDetails `type:"structure"` + + // Details about an Amazon RDS database cluster snapshot. + AwsRdsDbClusterSnapshot *AwsRdsDbClusterSnapshotDetails `type:"structure"` + + // Details about an Amazon RDS database instance. AwsRdsDbInstance *AwsRdsDbInstanceDetails `type:"structure"` - // Details about an Amazon S3 Bucket related to a finding. + // Details about an Amazon RDS database snapshot. + AwsRdsDbSnapshot *AwsRdsDbSnapshotDetails `type:"structure"` + + // Details about an Amazon Redshift cluster. + AwsRedshiftCluster *AwsRedshiftClusterDetails `type:"structure"` + + // Details about an Amazon S3 bucket related to a finding. AwsS3Bucket *AwsS3BucketDetails `type:"structure"` // Details about an Amazon S3 object related to a finding. AwsS3Object *AwsS3ObjectDetails `type:"structure"` + // Details about a Secrets Manager secret. + AwsSecretsManagerSecret *AwsSecretsManagerSecretDetails `type:"structure"` + // Details about an SNS topic. AwsSnsTopic *AwsSnsTopicDetails `type:"structure"` @@ -13015,18 +22127,72 @@ func (s *ResourceDetails) Validate() error { return nil } +// SetAwsApiGatewayRestApi sets the AwsApiGatewayRestApi field's value. +func (s *ResourceDetails) SetAwsApiGatewayRestApi(v *AwsApiGatewayRestApiDetails) *ResourceDetails { + s.AwsApiGatewayRestApi = v + return s +} + +// SetAwsApiGatewayStage sets the AwsApiGatewayStage field's value. +func (s *ResourceDetails) SetAwsApiGatewayStage(v *AwsApiGatewayStageDetails) *ResourceDetails { + s.AwsApiGatewayStage = v + return s +} + +// SetAwsApiGatewayV2Api sets the AwsApiGatewayV2Api field's value. +func (s *ResourceDetails) SetAwsApiGatewayV2Api(v *AwsApiGatewayV2ApiDetails) *ResourceDetails { + s.AwsApiGatewayV2Api = v + return s +} + +// SetAwsApiGatewayV2Stage sets the AwsApiGatewayV2Stage field's value. +func (s *ResourceDetails) SetAwsApiGatewayV2Stage(v *AwsApiGatewayV2StageDetails) *ResourceDetails { + s.AwsApiGatewayV2Stage = v + return s +} + +// SetAwsAutoScalingAutoScalingGroup sets the AwsAutoScalingAutoScalingGroup field's value. +func (s *ResourceDetails) SetAwsAutoScalingAutoScalingGroup(v *AwsAutoScalingAutoScalingGroupDetails) *ResourceDetails { + s.AwsAutoScalingAutoScalingGroup = v + return s +} + +// SetAwsCertificateManagerCertificate sets the AwsCertificateManagerCertificate field's value. +func (s *ResourceDetails) SetAwsCertificateManagerCertificate(v *AwsCertificateManagerCertificateDetails) *ResourceDetails { + s.AwsCertificateManagerCertificate = v + return s +} + // SetAwsCloudFrontDistribution sets the AwsCloudFrontDistribution field's value. func (s *ResourceDetails) SetAwsCloudFrontDistribution(v *AwsCloudFrontDistributionDetails) *ResourceDetails { s.AwsCloudFrontDistribution = v return s } +// SetAwsCloudTrailTrail sets the AwsCloudTrailTrail field's value. +func (s *ResourceDetails) SetAwsCloudTrailTrail(v *AwsCloudTrailTrailDetails) *ResourceDetails { + s.AwsCloudTrailTrail = v + return s +} + // SetAwsCodeBuildProject sets the AwsCodeBuildProject field's value. func (s *ResourceDetails) SetAwsCodeBuildProject(v *AwsCodeBuildProjectDetails) *ResourceDetails { s.AwsCodeBuildProject = v return s } +// SetAwsDynamoDbTable sets the AwsDynamoDbTable field's value. +func (s *ResourceDetails) SetAwsDynamoDbTable(v *AwsDynamoDbTableDetails) *ResourceDetails { + s.AwsDynamoDbTable = v + return s +} + +// SetAwsEc2Eip sets the AwsEc2Eip field's value. +func (s *ResourceDetails) SetAwsEc2Eip(v *AwsEc2EipDetails) *ResourceDetails { + s.AwsEc2Eip = v + return s +} + // SetAwsEc2Instance sets the AwsEc2Instance field's value. func (s *ResourceDetails) SetAwsEc2Instance(v *AwsEc2InstanceDetails) *ResourceDetails { s.AwsEc2Instance = v @@ -13045,12 +22211,30 @@ func (s *ResourceDetails) SetAwsEc2SecurityGroup(v *AwsEc2SecurityGroupDetails) return s } +// SetAwsEc2Volume sets the AwsEc2Volume field's value. +func (s *ResourceDetails) SetAwsEc2Volume(v *AwsEc2VolumeDetails) *ResourceDetails { + s.AwsEc2Volume = v + return s +} + +// SetAwsEc2Vpc sets the AwsEc2Vpc field's value. +func (s *ResourceDetails) SetAwsEc2Vpc(v *AwsEc2VpcDetails) *ResourceDetails { + s.AwsEc2Vpc = v + return s +} + // SetAwsElasticsearchDomain sets the AwsElasticsearchDomain field's value. func (s *ResourceDetails) SetAwsElasticsearchDomain(v *AwsElasticsearchDomainDetails) *ResourceDetails { s.AwsElasticsearchDomain = v return s } +// SetAwsElbLoadBalancer sets the AwsElbLoadBalancer field's value. +func (s *ResourceDetails) SetAwsElbLoadBalancer(v *AwsElbLoadBalancerDetails) *ResourceDetails { + s.AwsElbLoadBalancer = v + return s +} + // SetAwsElbv2LoadBalancer sets the AwsElbv2LoadBalancer field's value. func (s *ResourceDetails) SetAwsElbv2LoadBalancer(v *AwsElbv2LoadBalancerDetails) *ResourceDetails { s.AwsElbv2LoadBalancer = v @@ -13063,12 +22247,30 @@ func (s *ResourceDetails) SetAwsIamAccessKey(v *AwsIamAccessKeyDetails) *Resourc return s } +// SetAwsIamGroup sets the AwsIamGroup field's value. +func (s *ResourceDetails) SetAwsIamGroup(v *AwsIamGroupDetails) *ResourceDetails { + s.AwsIamGroup = v + return s +} + +// SetAwsIamPolicy sets the AwsIamPolicy field's value. +func (s *ResourceDetails) SetAwsIamPolicy(v *AwsIamPolicyDetails) *ResourceDetails { + s.AwsIamPolicy = v + return s +} + // SetAwsIamRole sets the AwsIamRole field's value. func (s *ResourceDetails) SetAwsIamRole(v *AwsIamRoleDetails) *ResourceDetails { s.AwsIamRole = v return s } +// SetAwsIamUser sets the AwsIamUser field's value. +func (s *ResourceDetails) SetAwsIamUser(v *AwsIamUserDetails) *ResourceDetails { + s.AwsIamUser = v + return s +} + // SetAwsKmsKey sets the AwsKmsKey field's value. func (s *ResourceDetails) SetAwsKmsKey(v *AwsKmsKeyDetails) *ResourceDetails { s.AwsKmsKey = v @@ -13087,12 +22289,36 @@ func (s *ResourceDetails) SetAwsLambdaLayerVersion(v *AwsLambdaLayerVersionDetai return s } +// SetAwsRdsDbCluster sets the AwsRdsDbCluster field's value. +func (s *ResourceDetails) SetAwsRdsDbCluster(v *AwsRdsDbClusterDetails) *ResourceDetails { + s.AwsRdsDbCluster = v + return s +} + +// SetAwsRdsDbClusterSnapshot sets the AwsRdsDbClusterSnapshot field's value. +func (s *ResourceDetails) SetAwsRdsDbClusterSnapshot(v *AwsRdsDbClusterSnapshotDetails) *ResourceDetails { + s.AwsRdsDbClusterSnapshot = v + return s +} + // SetAwsRdsDbInstance sets the AwsRdsDbInstance field's value. func (s *ResourceDetails) SetAwsRdsDbInstance(v *AwsRdsDbInstanceDetails) *ResourceDetails { s.AwsRdsDbInstance = v return s } +// SetAwsRdsDbSnapshot sets the AwsRdsDbSnapshot field's value. +func (s *ResourceDetails) SetAwsRdsDbSnapshot(v *AwsRdsDbSnapshotDetails) *ResourceDetails { + s.AwsRdsDbSnapshot = v + return s +} + +// SetAwsRedshiftCluster sets the AwsRedshiftCluster field's value. +func (s *ResourceDetails) SetAwsRedshiftCluster(v *AwsRedshiftClusterDetails) *ResourceDetails { + s.AwsRedshiftCluster = v + return s +} + // SetAwsS3Bucket sets the AwsS3Bucket field's value. func (s *ResourceDetails) SetAwsS3Bucket(v *AwsS3BucketDetails) *ResourceDetails { s.AwsS3Bucket = v @@ -13105,6 +22331,12 @@ func (s *ResourceDetails) SetAwsS3Object(v *AwsS3ObjectDetails) *ResourceDetails return s } +// SetAwsSecretsManagerSecret sets the AwsSecretsManagerSecret field's value. +func (s *ResourceDetails) SetAwsSecretsManagerSecret(v *AwsSecretsManagerSecretDetails) *ResourceDetails { + s.AwsSecretsManagerSecret = v + return s +} + // SetAwsSnsTopic sets the AwsSnsTopic field's value. func (s *ResourceDetails) SetAwsSnsTopic(v *AwsSnsTopicDetails) *ResourceDetails { s.AwsSnsTopic = v @@ -13137,8 +22369,8 @@ func (s *ResourceDetails) SetOther(v map[string]*string) *ResourceDetails { // The request was rejected because we can't find the specified resource. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Code_ *string `locationName:"Code" type:"string"` @@ -13157,17 +22389,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13175,22 +22407,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Details about the account that was not processed. @@ -13227,6 +22459,15 @@ func (s *Result) SetProcessingResult(v string) *Result { } // The severity of the finding. +// +// The finding provider can provide the initial severity, but cannot update +// it after that. The severity can only be updated by a master account. It cannot +// be updated by a member account. +// +// The finding must have either Label or Normalized populated. If only one of +// these attributes is populated, then Security Hub automatically populates +// the other one. If neither attribute is populated, then the finding is invalid. +// Label is the preferred attribute. type Severity struct { _ struct{} `type:"structure"` @@ -13241,12 +22482,8 @@ type Severity struct { // * HIGH - The issue must be addressed as a priority. // // * CRITICAL - The issue must be remediated immediately to avoid it escalating. - Label *string `type:"string" enum:"SeverityLabel"` - - // Deprecated. This attribute is being deprecated. Instead of providing Normalized, - // provide Label. // - // If you provide Normalized and do not provide Label, Label is set automatically + // If you provide Normalized and do not provide Label, then Label is set automatically // as follows. // // * 0 - INFORMATIONAL @@ -13258,8 +22495,31 @@ type Severity struct { // * 70–89 - HIGH // // * 90–100 - CRITICAL + Label *string `type:"string" enum:"SeverityLabel"` + + // Deprecated. The normalized severity of a finding. This attribute is being + // deprecated. Instead of providing Normalized, provide Label. + // + // If you provide Label and do not provide Normalized, then Normalized is set + // automatically as follows. + // + // * INFORMATIONAL - 0 + // + // * LOW - 1 + // + // * MEDIUM - 40 + // + // * HIGH - 70 + // + // * CRITICAL - 90 Normalized *int64 `type:"integer"` + // The native severity from the finding product that generated the finding. + Original *string `type:"string"` + + // Deprecated. This attribute is being deprecated. Instead of providing Product, + // provide Original. + // // The native severity as defined by the AWS service or integrated partner product // that generated the finding. Product *float64 `type:"double"` @@ -13287,12 +22547,145 @@ func (s *Severity) SetNormalized(v int64) *Severity { return s } +// SetOriginal sets the Original field's value. +func (s *Severity) SetOriginal(v string) *Severity { + s.Original = &v + return s +} + // SetProduct sets the Product field's value. func (s *Severity) SetProduct(v float64) *Severity { s.Product = &v return s } +// Updates to the severity information for a finding. +type SeverityUpdate struct { + _ struct{} `type:"structure"` + + // The severity value of the finding. The allowed values are the following. + // + // * INFORMATIONAL - No issue was found. + // + // * LOW - The issue does not require action on its own. + // + // * MEDIUM - The issue must be addressed but not urgently. + // + // * HIGH - The issue must be addressed as a priority. + // + // * CRITICAL - The issue must be remediated immediately to avoid it escalating. + Label *string `type:"string" enum:"SeverityLabel"` + + // The normalized severity for the finding. This attribute is to be deprecated + // in favor of Label. + // + // If you provide Normalized and do not provide Label, Label is set automatically + // as follows. + // + // * 0 - INFORMATIONAL + // + // * 1–39 - LOW + // + // * 40–69 - MEDIUM + // + // * 70–89 - HIGH + // + // * 90–100 - CRITICAL + Normalized *int64 `type:"integer"` + + // The native severity as defined by the AWS service or integrated partner product + // that generated the finding. + Product *float64 `type:"double"` +} + +// String returns the string representation +func (s SeverityUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SeverityUpdate) GoString() string { + return s.String() +} + +// SetLabel sets the Label field's value. +func (s *SeverityUpdate) SetLabel(v string) *SeverityUpdate { + s.Label = &v + return s +} + +// SetNormalized sets the Normalized field's value. +func (s *SeverityUpdate) SetNormalized(v int64) *SeverityUpdate { + s.Normalized = &v + return s +} + +// SetProduct sets the Product field's value. +func (s *SeverityUpdate) SetProduct(v float64) *SeverityUpdate { + s.Product = &v + return s +} + +// Information about a software package. +type SoftwarePackage struct { + _ struct{} `type:"structure"` + + // The architecture used for the software package. + Architecture *string `type:"string"` + + // The epoch of the software package. + Epoch *string `type:"string"` + + // The name of the software package. + Name *string `type:"string"` + + // The release of the software package. + Release *string `type:"string"` + + // The version of the software package. + Version *string `type:"string"` +} + +// String returns the string representation +func (s SoftwarePackage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SoftwarePackage) GoString() string { + return s.String() +} + +// SetArchitecture sets the Architecture field's value. +func (s *SoftwarePackage) SetArchitecture(v string) *SoftwarePackage { + s.Architecture = &v + return s +} + +// SetEpoch sets the Epoch field's value. +func (s *SoftwarePackage) SetEpoch(v string) *SoftwarePackage { + s.Epoch = &v + return s +} + +// SetName sets the Name field's value. +func (s *SoftwarePackage) SetName(v string) *SoftwarePackage { + s.Name = &v + return s +} + +// SetRelease sets the Release field's value. +func (s *SoftwarePackage) SetRelease(v string) *SoftwarePackage { + s.Release = &v + return s +} + +// SetVersion sets the Version field's value. +func (s *SoftwarePackage) SetVersion(v string) *SoftwarePackage { + s.Version = &v + return s +} + // A collection of finding attributes used to sort findings. type SortCriterion struct { _ struct{} `type:"structure"` @@ -13333,6 +22726,14 @@ type Standard struct { // A description of the standard. Description *string `type:"string"` + // Whether the standard is enabled by default. When Security Hub is enabled + // from the console, if a standard is enabled by default, the check box for + // that standard is selected by default. + // + // When Security Hub is enabled using the EnableSecurityHub API operation, the + // standard is enabled by default unless EnableDefaultStandards is set to false. + EnabledByDefault *bool `type:"boolean"` + // The name of the standard. Name *string `type:"string"` @@ -13356,6 +22757,12 @@ func (s *Standard) SetDescription(v string) *Standard { return s } +// SetEnabledByDefault sets the EnabledByDefault field's value. +func (s *Standard) SetEnabledByDefault(v bool) *Standard { + s.EnabledByDefault = &v + return s +} + // SetName sets the Name field's value. func (s *Standard) SetName(v string) *Standard { s.Name = &v @@ -13577,15 +22984,66 @@ func (s *StandardsSubscriptionRequest) Validate() error { return nil } -// SetStandardsArn sets the StandardsArn field's value. -func (s *StandardsSubscriptionRequest) SetStandardsArn(v string) *StandardsSubscriptionRequest { - s.StandardsArn = &v +// SetStandardsArn sets the StandardsArn field's value. +func (s *StandardsSubscriptionRequest) SetStandardsArn(v string) *StandardsSubscriptionRequest { + s.StandardsArn = &v + return s +} + +// SetStandardsInput sets the StandardsInput field's value. +func (s *StandardsSubscriptionRequest) SetStandardsInput(v map[string]*string) *StandardsSubscriptionRequest { + s.StandardsInput = v + return s +} + +// Provides additional context for the value of Compliance.Status. +type StatusReason struct { + _ struct{} `type:"structure"` + + // The corresponding description for the status reason code. + Description *string `type:"string"` + + // A code that represents a reason for the control status. For the list of status + // reason codes and their meanings, see Standards-related information in the + // ASFF (https://docs.aws.amazon.com/securityhub/latest/userguide/securityhub-standards-results.html#securityhub-standards-results-asff) + // in the AWS Security Hub User Guide. + // + // ReasonCode is a required field + ReasonCode *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StatusReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StatusReason) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StatusReason) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StatusReason"} + if s.ReasonCode == nil { + invalidParams.Add(request.NewErrParamRequired("ReasonCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *StatusReason) SetDescription(v string) *StatusReason { + s.Description = &v return s } -// SetStandardsInput sets the StandardsInput field's value. -func (s *StandardsSubscriptionRequest) SetStandardsInput(v map[string]*string) *StandardsSubscriptionRequest { - s.StandardsInput = v +// SetReasonCode sets the ReasonCode field's value. +func (s *StatusReason) SetReasonCode(v string) *StatusReason { + s.ReasonCode = &v return s } @@ -13593,10 +23051,63 @@ func (s *StandardsSubscriptionRequest) SetStandardsInput(v map[string]*string) * type StringFilter struct { _ struct{} `type:"structure"` - // The condition to be applied to a string value when querying for findings. + // The condition to apply to a string value when querying for findings. To search + // for values that contain the filter criteria value, use one of the following + // comparison operators: + // + // * To search for values that exactly match the filter value, use EQUALS. + // For example, the filter ResourceType EQUALS AwsEc2SecurityGroup only matches + // findings that have a resource type of AwsEc2SecurityGroup. + // + // * To search for values that start with the filter value, use PREFIX. For + // example, the filter ResourceType PREFIX AwsIam matches findings that have + // a resource type that starts with AwsIam. Findings with a resource type + // of AwsIamPolicy, AwsIamRole, or AwsIamUser would all match. + // + // EQUALS and PREFIX filters on the same field are joined by OR. A finding matches + // if it matches any one of those filters. + // + // To search for values that do not contain the filter criteria value, use one + // of the following comparison operators: + // + // * To search for values that do not exactly match the filter value, use + // NOT_EQUALS. For example, the filter ResourceType NOT_EQUALS AwsIamPolicy + // matches findings that have a resource type other than AwsIamPolicy. + // + // * To search for values that do not start with the filter value, use PREFIX_NOT_EQUALS. + // For example, the filter ResourceType PREFIX_NOT_EQUALS AwsIam matches + // findings that have a resource type that does not start with AwsIam. Findings + // with a resource type of AwsIamPolicy, AwsIamRole, or AwsIamUser would + // all be excluded from the results. + // + // NOT_EQUALS and PREFIX_NOT_EQUALS filters on the same field are joined by + // AND. A finding matches only if it matches all of those filters. + // + // For filters on the same field, you cannot provide both an EQUALS filter and + // a NOT_EQUALS or PREFIX_NOT_EQUALS filter. Combining filters in this way always + // returns an error, even if the provided filter values would return valid results. + // + // You can combine PREFIX filters with NOT_EQUALS or PREFIX_NOT_EQUALS filters + // for the same field. Security Hub first processes the PREFIX filters, then + // the NOT_EQUALS or PREFIX_NOT_EQUALS filters. + // + // For example, for the following filter, Security Hub first identifies findings + // that have resource types that start with either AwsIAM or AwsEc2. It then + // excludes findings that have a resource type of AwsIamPolicy and findings + // that have a resource type of AwsEc2NetworkInterface. + // + // * ResourceType PREFIX AwsIam + // + // * ResourceType PREFIX AwsEc2 + // + // * ResourceType NOT_EQUALS AwsIamPolicy + // + // * ResourceType NOT_EQUALS AwsEc2NetworkInterface Comparison *string `type:"string" enum:"StringFilterComparison"` - // The string filter value. + // The string filter value. Filter values are case sensitive. For example, the + // product name for control-based findings is Security Hub. If you provide security + // hub as the filter text, then there is no match. Value *string `type:"string"` } @@ -13701,8 +23212,12 @@ type ThreatIntelIndicator struct { // The category of a threat intelligence indicator. Category *string `type:"string" enum:"ThreatIntelIndicatorCategory"` - // The date and time when the most recent instance of a threat intelligence - // indicator was observed. + // Indicates when the most recent instance of a threat intelligence indicator + // was observed. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. LastObservedAt *string `type:"string"` // The source of the threat intelligence indicator. @@ -14067,6 +23582,47 @@ func (s UpdateInsightOutput) GoString() string { return s.String() } +type UpdateSecurityHubConfigurationInput struct { + _ struct{} `type:"structure"` + + // Whether to automatically enable new controls when they are added to standards + // that are enabled. + // + // By default, this is set to true, and new controls are enabled automatically. + // To not automatically enable new controls, set this to false. + AutoEnableControls *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateSecurityHubConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityHubConfigurationInput) GoString() string { + return s.String() +} + +// SetAutoEnableControls sets the AutoEnableControls field's value. +func (s *UpdateSecurityHubConfigurationInput) SetAutoEnableControls(v bool) *UpdateSecurityHubConfigurationInput { + s.AutoEnableControls = &v + return s +} + +type UpdateSecurityHubConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSecurityHubConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSecurityHubConfigurationOutput) GoString() string { + return s.String() +} + type UpdateStandardsControlInput struct { _ struct{} `type:"structure"` @@ -14074,6 +23630,7 @@ type UpdateStandardsControlInput struct { ControlStatus *string `type:"string" enum:"ControlStatus"` // A description of the reason why you are disabling a security standard control. + // If you are disabling a control, then this is required. DisabledReason *string `type:"string"` // The ARN of the security standard control to enable or disable. @@ -14140,13 +23697,185 @@ func (s UpdateStandardsControlOutput) GoString() string { return s.String() } +// A vulnerability associated with a finding. +type Vulnerability struct { + _ struct{} `type:"structure"` + + // CVSS scores from the advisory related to the vulnerability. + Cvss []*Cvss `type:"list"` + + // The identifier of the vulnerability. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // A list of URLs that provide additional information about the vulnerability. + ReferenceUrls []*string `type:"list"` + + // List of vulnerabilities that are related to this vulnerability. + RelatedVulnerabilities []*string `type:"list"` + + // Information about the vendor that generates the vulnerability report. + Vendor *VulnerabilityVendor `type:"structure"` + + // List of software packages that have the vulnerability. + VulnerablePackages []*SoftwarePackage `type:"list"` +} + +// String returns the string representation +func (s Vulnerability) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Vulnerability) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Vulnerability) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Vulnerability"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Vendor != nil { + if err := s.Vendor.Validate(); err != nil { + invalidParams.AddNested("Vendor", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCvss sets the Cvss field's value. +func (s *Vulnerability) SetCvss(v []*Cvss) *Vulnerability { + s.Cvss = v + return s +} + +// SetId sets the Id field's value. +func (s *Vulnerability) SetId(v string) *Vulnerability { + s.Id = &v + return s +} + +// SetReferenceUrls sets the ReferenceUrls field's value. +func (s *Vulnerability) SetReferenceUrls(v []*string) *Vulnerability { + s.ReferenceUrls = v + return s +} + +// SetRelatedVulnerabilities sets the RelatedVulnerabilities field's value. +func (s *Vulnerability) SetRelatedVulnerabilities(v []*string) *Vulnerability { + s.RelatedVulnerabilities = v + return s +} + +// SetVendor sets the Vendor field's value. +func (s *Vulnerability) SetVendor(v *VulnerabilityVendor) *Vulnerability { + s.Vendor = v + return s +} + +// SetVulnerablePackages sets the VulnerablePackages field's value. +func (s *Vulnerability) SetVulnerablePackages(v []*SoftwarePackage) *Vulnerability { + s.VulnerablePackages = v + return s +} + +// A vendor that generates a vulnerability report. +type VulnerabilityVendor struct { + _ struct{} `type:"structure"` + + // The name of the vendor. + // + // Name is a required field + Name *string `type:"string" required:"true"` + + // The URL of the vulnerability advisory. + Url *string `type:"string"` + + // Indicates when the vulnerability advisory was created. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + VendorCreatedAt *string `type:"string"` + + // The severity that the vendor assigned to the vulnerability. + VendorSeverity *string `type:"string"` + + // Indicates when the vulnerability advisory was last updated. + // + // Uses the date-time format specified in RFC 3339 section 5.6, Internet Date/Time + // Format (https://tools.ietf.org/html/rfc3339#section-5.6). The value cannot + // contain spaces. For example, 2020-03-22T13:22:13.933Z. + VendorUpdatedAt *string `type:"string"` +} + +// String returns the string representation +func (s VulnerabilityVendor) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VulnerabilityVendor) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *VulnerabilityVendor) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VulnerabilityVendor"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *VulnerabilityVendor) SetName(v string) *VulnerabilityVendor { + s.Name = &v + return s +} + +// SetUrl sets the Url field's value. +func (s *VulnerabilityVendor) SetUrl(v string) *VulnerabilityVendor { + s.Url = &v + return s +} + +// SetVendorCreatedAt sets the VendorCreatedAt field's value. +func (s *VulnerabilityVendor) SetVendorCreatedAt(v string) *VulnerabilityVendor { + s.VendorCreatedAt = &v + return s +} + +// SetVendorSeverity sets the VendorSeverity field's value. +func (s *VulnerabilityVendor) SetVendorSeverity(v string) *VulnerabilityVendor { + s.VendorSeverity = &v + return s +} + +// SetVendorUpdatedAt sets the VendorUpdatedAt field's value. +func (s *VulnerabilityVendor) SetVendorUpdatedAt(v string) *VulnerabilityVendor { + s.VendorUpdatedAt = &v + return s +} + // Details about the action that CloudFront or AWS WAF takes when a web request -// matches the conditions in the Rule. +// matches the conditions in the rule. type WafAction struct { _ struct{} `type:"structure"` // Specifies how you want AWS WAF to respond to requests that match the settings - // in a Rule. + // in a rule. // // Valid settings include the following: // @@ -14264,6 +23993,43 @@ func (s *Workflow) SetStatus(v string) *Workflow { return s } +// Used to update information about the investigation into the finding. +type WorkflowUpdate struct { + _ struct{} `type:"structure"` + + // The status of the investigation into the finding. The allowed values are + // the following. + // + // * NEW - The initial state of a finding, before it is reviewed. + // + // * NOTIFIED - Indicates that you notified the resource owner about the + // security issue. Used when the initial reviewer is not the resource owner, + // and needs intervention from the resource owner. + // + // * RESOLVED - The finding was reviewed and remediated and is now considered + // resolved. + // + // * SUPPRESSED - The finding will not be reviewed again and will not be + // acted upon. + Status *string `type:"string" enum:"WorkflowStatus"` +} + +// String returns the string representation +func (s WorkflowUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkflowUpdate) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *WorkflowUpdate) SetStatus(v string) *WorkflowUpdate { + s.Status = &v + return s +} + const ( // AwsIamAccessKeyStatusActive is a AwsIamAccessKeyStatus enum value AwsIamAccessKeyStatusActive = "Active" @@ -14272,6 +24038,14 @@ const ( AwsIamAccessKeyStatusInactive = "Inactive" ) +// AwsIamAccessKeyStatus_Values returns all elements of the AwsIamAccessKeyStatus enum +func AwsIamAccessKeyStatus_Values() []string { + return []string{ + AwsIamAccessKeyStatusActive, + AwsIamAccessKeyStatusInactive, + } +} + const ( // ComplianceStatusPassed is a ComplianceStatus enum value ComplianceStatusPassed = "PASSED" @@ -14286,6 +24060,16 @@ const ( ComplianceStatusNotAvailable = "NOT_AVAILABLE" ) +// ComplianceStatus_Values returns all elements of the ComplianceStatus enum +func ComplianceStatus_Values() []string { + return []string{ + ComplianceStatusPassed, + ComplianceStatusWarning, + ComplianceStatusFailed, + ComplianceStatusNotAvailable, + } +} + const ( // ControlStatusEnabled is a ControlStatus enum value ControlStatusEnabled = "ENABLED" @@ -14294,11 +24078,26 @@ const ( ControlStatusDisabled = "DISABLED" ) +// ControlStatus_Values returns all elements of the ControlStatus enum +func ControlStatus_Values() []string { + return []string{ + ControlStatusEnabled, + ControlStatusDisabled, + } +} + const ( // DateRangeUnitDays is a DateRangeUnit enum value DateRangeUnitDays = "DAYS" ) +// DateRangeUnit_Values returns all elements of the DateRangeUnit enum +func DateRangeUnit_Values() []string { + return []string{ + DateRangeUnitDays, + } +} + const ( // IntegrationTypeSendFindingsToSecurityHub is a IntegrationType enum value IntegrationTypeSendFindingsToSecurityHub = "SEND_FINDINGS_TO_SECURITY_HUB" @@ -14307,6 +24106,14 @@ const ( IntegrationTypeReceiveFindingsFromSecurityHub = "RECEIVE_FINDINGS_FROM_SECURITY_HUB" ) +// IntegrationType_Values returns all elements of the IntegrationType enum +func IntegrationType_Values() []string { + return []string{ + IntegrationTypeSendFindingsToSecurityHub, + IntegrationTypeReceiveFindingsFromSecurityHub, + } +} + const ( // MalwareStateObserved is a MalwareState enum value MalwareStateObserved = "OBSERVED" @@ -14318,6 +24125,15 @@ const ( MalwareStateRemoved = "REMOVED" ) +// MalwareState_Values returns all elements of the MalwareState enum +func MalwareState_Values() []string { + return []string{ + MalwareStateObserved, + MalwareStateRemovalFailed, + MalwareStateRemoved, + } +} + const ( // MalwareTypeAdware is a MalwareType enum value MalwareTypeAdware = "ADWARE" @@ -14365,11 +24181,43 @@ const ( MalwareTypeWorm = "WORM" ) +// MalwareType_Values returns all elements of the MalwareType enum +func MalwareType_Values() []string { + return []string{ + MalwareTypeAdware, + MalwareTypeBlendedThreat, + MalwareTypeBotnetAgent, + MalwareTypeCoinMiner, + MalwareTypeExploitKit, + MalwareTypeKeylogger, + MalwareTypeMacro, + MalwareTypePotentiallyUnwanted, + MalwareTypeSpyware, + MalwareTypeRansomware, + MalwareTypeRemoteAccess, + MalwareTypeRootkit, + MalwareTypeTrojan, + MalwareTypeVirus, + MalwareTypeWorm, + } +} + const ( // MapFilterComparisonEquals is a MapFilterComparison enum value MapFilterComparisonEquals = "EQUALS" + + // MapFilterComparisonNotEquals is a MapFilterComparison enum value + MapFilterComparisonNotEquals = "NOT_EQUALS" ) +// MapFilterComparison_Values returns all elements of the MapFilterComparison enum +func MapFilterComparison_Values() []string { + return []string{ + MapFilterComparisonEquals, + MapFilterComparisonNotEquals, + } +} + const ( // NetworkDirectionIn is a NetworkDirection enum value NetworkDirectionIn = "IN" @@ -14378,6 +24226,14 @@ const ( NetworkDirectionOut = "OUT" ) +// NetworkDirection_Values returns all elements of the NetworkDirection enum +func NetworkDirection_Values() []string { + return []string{ + NetworkDirectionIn, + NetworkDirectionOut, + } +} + const ( // PartitionAws is a Partition enum value PartitionAws = "aws" @@ -14389,6 +24245,15 @@ const ( PartitionAwsUsGov = "aws-us-gov" ) +// Partition_Values returns all elements of the Partition enum +func Partition_Values() []string { + return []string{ + PartitionAws, + PartitionAwsCn, + PartitionAwsUsGov, + } +} + const ( // RecordStateActive is a RecordState enum value RecordStateActive = "ACTIVE" @@ -14397,6 +24262,14 @@ const ( RecordStateArchived = "ARCHIVED" ) +// RecordState_Values returns all elements of the RecordState enum +func RecordState_Values() []string { + return []string{ + RecordStateActive, + RecordStateArchived, + } +} + const ( // SeverityLabelInformational is a SeverityLabel enum value SeverityLabelInformational = "INFORMATIONAL" @@ -14414,6 +24287,17 @@ const ( SeverityLabelCritical = "CRITICAL" ) +// SeverityLabel_Values returns all elements of the SeverityLabel enum +func SeverityLabel_Values() []string { + return []string{ + SeverityLabelInformational, + SeverityLabelLow, + SeverityLabelMedium, + SeverityLabelHigh, + SeverityLabelCritical, + } +} + const ( // SeverityRatingLow is a SeverityRating enum value SeverityRatingLow = "LOW" @@ -14428,6 +24312,16 @@ const ( SeverityRatingCritical = "CRITICAL" ) +// SeverityRating_Values returns all elements of the SeverityRating enum +func SeverityRating_Values() []string { + return []string{ + SeverityRatingLow, + SeverityRatingMedium, + SeverityRatingHigh, + SeverityRatingCritical, + } +} + const ( // SortOrderAsc is a SortOrder enum value SortOrderAsc = "asc" @@ -14436,6 +24330,14 @@ const ( SortOrderDesc = "desc" ) +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAsc, + SortOrderDesc, + } +} + const ( // StandardsStatusPending is a StandardsStatus enum value StandardsStatusPending = "PENDING" @@ -14453,14 +24355,41 @@ const ( StandardsStatusIncomplete = "INCOMPLETE" ) +// StandardsStatus_Values returns all elements of the StandardsStatus enum +func StandardsStatus_Values() []string { + return []string{ + StandardsStatusPending, + StandardsStatusReady, + StandardsStatusFailed, + StandardsStatusDeleting, + StandardsStatusIncomplete, + } +} + const ( // StringFilterComparisonEquals is a StringFilterComparison enum value StringFilterComparisonEquals = "EQUALS" // StringFilterComparisonPrefix is a StringFilterComparison enum value StringFilterComparisonPrefix = "PREFIX" + + // StringFilterComparisonNotEquals is a StringFilterComparison enum value + StringFilterComparisonNotEquals = "NOT_EQUALS" + + // StringFilterComparisonPrefixNotEquals is a StringFilterComparison enum value + StringFilterComparisonPrefixNotEquals = "PREFIX_NOT_EQUALS" ) +// StringFilterComparison_Values returns all elements of the StringFilterComparison enum +func StringFilterComparison_Values() []string { + return []string{ + StringFilterComparisonEquals, + StringFilterComparisonPrefix, + StringFilterComparisonNotEquals, + StringFilterComparisonPrefixNotEquals, + } +} + const ( // ThreatIntelIndicatorCategoryBackdoor is a ThreatIntelIndicatorCategory enum value ThreatIntelIndicatorCategoryBackdoor = "BACKDOOR" @@ -14481,6 +24410,18 @@ const ( ThreatIntelIndicatorCategoryKeylogger = "KEYLOGGER" ) +// ThreatIntelIndicatorCategory_Values returns all elements of the ThreatIntelIndicatorCategory enum +func ThreatIntelIndicatorCategory_Values() []string { + return []string{ + ThreatIntelIndicatorCategoryBackdoor, + ThreatIntelIndicatorCategoryCardStealer, + ThreatIntelIndicatorCategoryCommandAndControl, + ThreatIntelIndicatorCategoryDropSite, + ThreatIntelIndicatorCategoryExploitSite, + ThreatIntelIndicatorCategoryKeylogger, + } +} + const ( // ThreatIntelIndicatorTypeDomain is a ThreatIntelIndicatorType enum value ThreatIntelIndicatorTypeDomain = "DOMAIN" @@ -14516,6 +24457,23 @@ const ( ThreatIntelIndicatorTypeUrl = "URL" ) +// ThreatIntelIndicatorType_Values returns all elements of the ThreatIntelIndicatorType enum +func ThreatIntelIndicatorType_Values() []string { + return []string{ + ThreatIntelIndicatorTypeDomain, + ThreatIntelIndicatorTypeEmailAddress, + ThreatIntelIndicatorTypeHashMd5, + ThreatIntelIndicatorTypeHashSha1, + ThreatIntelIndicatorTypeHashSha256, + ThreatIntelIndicatorTypeHashSha512, + ThreatIntelIndicatorTypeIpv4Address, + ThreatIntelIndicatorTypeIpv6Address, + ThreatIntelIndicatorTypeMutex, + ThreatIntelIndicatorTypeProcess, + ThreatIntelIndicatorTypeUrl, + } +} + const ( // VerificationStateUnknown is a VerificationState enum value VerificationStateUnknown = "UNKNOWN" @@ -14530,6 +24488,16 @@ const ( VerificationStateBenignPositive = "BENIGN_POSITIVE" ) +// VerificationState_Values returns all elements of the VerificationState enum +func VerificationState_Values() []string { + return []string{ + VerificationStateUnknown, + VerificationStateTruePositive, + VerificationStateFalsePositive, + VerificationStateBenignPositive, + } +} + const ( // WorkflowStateNew is a WorkflowState enum value WorkflowStateNew = "NEW" @@ -14547,6 +24515,17 @@ const ( WorkflowStateResolved = "RESOLVED" ) +// WorkflowState_Values returns all elements of the WorkflowState enum +func WorkflowState_Values() []string { + return []string{ + WorkflowStateNew, + WorkflowStateAssigned, + WorkflowStateInProgress, + WorkflowStateDeferred, + WorkflowStateResolved, + } +} + const ( // WorkflowStatusNew is a WorkflowStatus enum value WorkflowStatusNew = "NEW" @@ -14560,3 +24539,13 @@ const ( // WorkflowStatusSuppressed is a WorkflowStatus enum value WorkflowStatusSuppressed = "SUPPRESSED" ) + +// WorkflowStatus_Values returns all elements of the WorkflowStatus enum +func WorkflowStatus_Values() []string { + return []string{ + WorkflowStatusNew, + WorkflowStatusNotified, + WorkflowStatusResolved, + WorkflowStatusSuppressed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go index f849ae650..1fbe2991d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/doc.go @@ -26,12 +26,18 @@ // // The following throttling limits apply to using Security Hub API operations. // +// * BatchEnableStandards - RateLimit of 1 request per second, BurstLimit +// of 1 request per second. +// // * GetFindings - RateLimit of 3 requests per second. BurstLimit of 6 requests // per second. // // * UpdateFindings - RateLimit of 1 request per second. BurstLimit of 5 // requests per second. // +// * UpdateStandardsControl - RateLimit of 1 request per second, BurstLimit +// of 5 requests per second. +// // * All other operations - RateLimit of 10 requests per second. BurstLimit // of 30 requests per second. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go index 7d8ed87a4..098191159 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go index 1bad4cd42..803a1cef5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/api.go @@ -1727,8 +1727,8 @@ func (s *ApplicationSummary) SetSpdxLicenseId(v string) *ApplicationSummary { // One of the parameters in the request is invalid. type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 400 ErrorCode *string `locationName:"errorCode" type:"string"` @@ -1749,17 +1749,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "BadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1767,28 +1767,28 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The resource already exists. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 409 ErrorCode *string `locationName:"errorCode" type:"string"` @@ -1809,17 +1809,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1827,22 +1827,22 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } type CreateApplicationOutput struct { @@ -2677,8 +2677,8 @@ func (s DeleteApplicationOutput) GoString() string { // The client is not authenticated. type ForbiddenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 403 ErrorCode *string `locationName:"errorCode" type:"string"` @@ -2699,17 +2699,17 @@ func (s ForbiddenException) GoString() string { func newErrorForbiddenException(v protocol.ResponseMetadata) error { return &ForbiddenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ForbiddenException) Code() string { +func (s *ForbiddenException) Code() string { return "ForbiddenException" } // Message returns the exception's message. -func (s ForbiddenException) Message() string { +func (s *ForbiddenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2717,22 +2717,22 @@ func (s ForbiddenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ForbiddenException) OrigErr() error { +func (s *ForbiddenException) OrigErr() error { return nil } -func (s ForbiddenException) Error() string { +func (s *ForbiddenException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ForbiddenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ForbiddenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ForbiddenException) RequestID() string { - return s.respMetadata.RequestID +func (s *ForbiddenException) RequestID() string { + return s.RespMetadata.RequestID } type GetApplicationInput struct { @@ -3089,8 +3089,8 @@ func (s *GetCloudFormationTemplateOutput) SetTemplateUrl(v string) *GetCloudForm // The AWS Serverless Application Repository service encountered an internal // error. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 500 ErrorCode *string `locationName:"errorCode" type:"string"` @@ -3112,17 +3112,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3130,22 +3130,22 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } type ListApplicationDependenciesInput struct { @@ -3408,8 +3408,8 @@ func (s *ListApplicationsOutput) SetNextToken(v string) *ListApplicationsOutput // The resource (for example, an access policy statement) specified in the request // doesn't exist. type NotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 404 ErrorCode *string `locationName:"errorCode" type:"string"` @@ -3431,17 +3431,17 @@ func (s NotFoundException) GoString() string { func newErrorNotFoundException(v protocol.ResponseMetadata) error { return &NotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NotFoundException) Code() string { +func (s *NotFoundException) Code() string { return "NotFoundException" } // Message returns the exception's message. -func (s NotFoundException) Message() string { +func (s *NotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3449,22 +3449,22 @@ func (s NotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NotFoundException) OrigErr() error { +func (s *NotFoundException) OrigErr() error { return nil } -func (s NotFoundException) Error() string { +func (s *NotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *NotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Parameters supported by the application. @@ -3966,8 +3966,8 @@ func (s *Tag) SetValue(v string) *Tag { // The client is sending more than the allowed number of requests per unit of // time. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // 429 ErrorCode *string `locationName:"errorCode" type:"string"` @@ -3989,17 +3989,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4007,22 +4007,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } type UnshareApplicationInput struct { @@ -4520,6 +4520,16 @@ const ( CapabilityCapabilityResourcePolicy = "CAPABILITY_RESOURCE_POLICY" ) +// Capability_Values returns all elements of the Capability enum +func Capability_Values() []string { + return []string{ + CapabilityCapabilityIam, + CapabilityCapabilityNamedIam, + CapabilityCapabilityAutoExpand, + CapabilityCapabilityResourcePolicy, + } +} + const ( // StatusPreparing is a Status enum value StatusPreparing = "PREPARING" @@ -4530,3 +4540,12 @@ const ( // StatusExpired is a Status enum value StatusExpired = "EXPIRED" ) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusPreparing, + StatusActive, + StatusExpired, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go index ea934c8ac..289f4a111 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go index 4644fc013..18252e81b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/api.go @@ -327,6 +327,8 @@ func (c *ServiceCatalog) AssociateProductWithPortfolioRequest(input *AssociatePr // // Associates the specified product with the specified portfolio. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -851,6 +853,8 @@ func (c *ServiceCatalog) CreateConstraintRequest(input *CreateConstraintInput) ( // // Creates a constraint. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -941,6 +945,8 @@ func (c *ServiceCatalog) CreatePortfolioRequest(input *CreatePortfolioInput) (re // // Creates a portfolio. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1029,9 +1035,18 @@ func (c *ServiceCatalog) CreatePortfolioShareRequest(input *CreatePortfolioShare // CreatePortfolioShare API operation for AWS Service Catalog. // // Shares the specified portfolio with the specified account or organization -// node. Shares to an organization node can only be created by the master account -// of an Organization. AWSOrganizationsAccess must be enabled in order to create -// a portfolio share to an organization node. +// node. Shares to an organization node can only be created by the management +// account of an organization or by a delegated administrator. You can share +// portfolios to an organization, an organizational unit, or a specific account. +// +// Note that if a delegated admin is de-registered, they can no longer create +// portfolio shares. +// +// AWSOrganizationsAccess must be enabled in order to create a portfolio share +// to an organization node. +// +// You can't share a shared resource. This includes portfolios that contain +// a shared product. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1128,6 +1143,8 @@ func (c *ServiceCatalog) CreateProductRequest(input *CreateProductInput) (req *r // // Creates a product. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1577,6 +1594,8 @@ func (c *ServiceCatalog) DeleteConstraintRequest(input *DeleteConstraintInput) ( // // Deletes the specified constraint. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1663,6 +1682,8 @@ func (c *ServiceCatalog) DeletePortfolioRequest(input *DeletePortfolioInput) (re // You cannot delete a portfolio if it was shared with you or if it has associated // products, users, constraints, or shared accounts. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1753,8 +1774,11 @@ func (c *ServiceCatalog) DeletePortfolioShareRequest(input *DeletePortfolioShare // DeletePortfolioShare API operation for AWS Service Catalog. // // Stops sharing the specified portfolio with the specified account or organization -// node. Shares to an organization node can only be deleted by the master account -// of an Organization. +// node. Shares to an organization node can only be deleted by the management +// account of an organization or by a delegated administrator. +// +// Note that if a delegated admin is de-registered, portfolio shares created +// from that account are removed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1850,6 +1874,8 @@ func (c *ServiceCatalog) DeleteProductRequest(input *DeleteProductInput) (req *r // You cannot delete a product if it was shared with you or is associated with // a portfolio. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2449,6 +2475,8 @@ func (c *ServiceCatalog) DescribePortfolioRequest(input *DescribePortfolioInput) // // Gets information about the specified portfolio. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2527,7 +2555,8 @@ func (c *ServiceCatalog) DescribePortfolioShareStatusRequest(input *DescribePort // DescribePortfolioShareStatus API operation for AWS Service Catalog. // // Gets the status of the specified portfolio share operation. This API can -// only be called by the master account in the organization. +// only be called by the management account in the organization or by a delegated +// admin. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2708,6 +2737,9 @@ func (c *ServiceCatalog) DescribeProductAsAdminRequest(input *DescribeProductAsA // * ResourceNotFoundException // The specified resource was not found. // +// * InvalidParametersException +// One or more parameters provided to the operation are not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeProductAsAdmin func (c *ServiceCatalog) DescribeProductAsAdmin(input *DescribeProductAsAdminInput) (*DescribeProductAsAdminOutput, error) { req, out := c.DescribeProductAsAdminRequest(input) @@ -2869,6 +2901,9 @@ func (c *ServiceCatalog) DescribeProvisionedProductRequest(input *DescribeProvis // * ResourceNotFoundException // The specified resource was not found. // +// * InvalidParametersException +// One or more parameters provided to the operation are not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeProvisionedProduct func (c *ServiceCatalog) DescribeProvisionedProduct(input *DescribeProvisionedProductInput) (*DescribeProvisionedProductOutput, error) { req, out := c.DescribeProvisionedProductRequest(input) @@ -3031,6 +3066,9 @@ func (c *ServiceCatalog) DescribeProvisioningArtifactRequest(input *DescribeProv // * ResourceNotFoundException // The specified resource was not found. // +// * InvalidParametersException +// One or more parameters provided to the operation are not valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/DescribeProvisioningArtifact func (c *ServiceCatalog) DescribeProvisioningArtifact(input *DescribeProvisioningArtifactInput) (*DescribeProvisioningArtifactOutput, error) { req, out := c.DescribeProvisioningArtifactRequest(input) @@ -3526,7 +3564,12 @@ func (c *ServiceCatalog) DisableAWSOrganizationsAccessRequest(input *DisableAWSO // will not delete your current shares but it will prevent you from creating // new shares throughout your organization. Current shares will not be in sync // with your organization structure if it changes after calling this API. This -// API can only be called by the master account in the organization. +// API can only be called by the management account in the organization. +// +// This API can't be invoked if there are active delegated administrators in +// the organization. +// +// Note that a delegated administrator is not authorized to invoke DisableAWSOrganizationsAccess. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3779,6 +3822,8 @@ func (c *ServiceCatalog) DisassociateProductFromPortfolioRequest(input *Disassoc // // Disassociates the specified product from the specified portfolio. // +// A delegated admin is authorized to invoke this command. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4033,12 +4078,14 @@ func (c *ServiceCatalog) EnableAWSOrganizationsAccessRequest(input *EnableAWSOrg // Enable portfolio sharing feature through AWS Organizations. This API will // allow Service Catalog to receive updates on your organization in order to // sync your shares with the current structure. This API can only be called -// by the master account in the organization. +// by the management account in the organization. // // By calling this API Service Catalog will make a call to organizations:EnableAWSServiceAccess // on your behalf so that your shares can be in sync with any changes in your // AWS Organizations structure. // +// Note that a delegated administrator is not authorized to invoke EnableAWSOrganizationsAccess. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -4300,7 +4347,8 @@ func (c *ServiceCatalog) GetAWSOrganizationsAccessStatusRequest(input *GetAWSOrg // GetAWSOrganizationsAccessStatus API operation for AWS Service Catalog. // // Get the Access Status for AWS Organization portfolio share feature. This -// API can only be called by the master account in the organization. +// API can only be called by the management account in the organization or by +// a delegated admin. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4338,6 +4386,148 @@ func (c *ServiceCatalog) GetAWSOrganizationsAccessStatusWithContext(ctx aws.Cont return out, req.Send() } +const opGetProvisionedProductOutputs = "GetProvisionedProductOutputs" + +// GetProvisionedProductOutputsRequest generates a "aws/request.Request" representing the +// client's request for the GetProvisionedProductOutputs operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetProvisionedProductOutputs for more information on using the GetProvisionedProductOutputs +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetProvisionedProductOutputsRequest method. +// req, resp := client.GetProvisionedProductOutputsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/GetProvisionedProductOutputs +func (c *ServiceCatalog) GetProvisionedProductOutputsRequest(input *GetProvisionedProductOutputsInput) (req *request.Request, output *GetProvisionedProductOutputsOutput) { + op := &request.Operation{ + Name: opGetProvisionedProductOutputs, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"PageToken"}, + OutputTokens: []string{"NextPageToken"}, + LimitToken: "PageSize", + TruncationToken: "", + }, + } + + if input == nil { + input = &GetProvisionedProductOutputsInput{} + } + + output = &GetProvisionedProductOutputsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetProvisionedProductOutputs API operation for AWS Service Catalog. +// +// This API takes either a ProvisonedProductId or a ProvisionedProductName, +// along with a list of one or more output keys, and responds with the key/value +// pairs of those outputs. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Service Catalog's +// API operation GetProvisionedProductOutputs for usage and error information. +// +// Returned Error Types: +// * InvalidParametersException +// One or more parameters provided to the operation are not valid. +// +// * ResourceNotFoundException +// The specified resource was not found. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicecatalog-2015-12-10/GetProvisionedProductOutputs +func (c *ServiceCatalog) GetProvisionedProductOutputs(input *GetProvisionedProductOutputsInput) (*GetProvisionedProductOutputsOutput, error) { + req, out := c.GetProvisionedProductOutputsRequest(input) + return out, req.Send() +} + +// GetProvisionedProductOutputsWithContext is the same as GetProvisionedProductOutputs with the addition of +// the ability to pass a context and additional request options. +// +// See GetProvisionedProductOutputs for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceCatalog) GetProvisionedProductOutputsWithContext(ctx aws.Context, input *GetProvisionedProductOutputsInput, opts ...request.Option) (*GetProvisionedProductOutputsOutput, error) { + req, out := c.GetProvisionedProductOutputsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetProvisionedProductOutputsPages iterates over the pages of a GetProvisionedProductOutputs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetProvisionedProductOutputs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetProvisionedProductOutputs operation. +// pageNum := 0 +// err := client.GetProvisionedProductOutputsPages(params, +// func(page *servicecatalog.GetProvisionedProductOutputsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ServiceCatalog) GetProvisionedProductOutputsPages(input *GetProvisionedProductOutputsInput, fn func(*GetProvisionedProductOutputsOutput, bool) bool) error { + return c.GetProvisionedProductOutputsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetProvisionedProductOutputsPagesWithContext same as GetProvisionedProductOutputsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceCatalog) GetProvisionedProductOutputsPagesWithContext(ctx aws.Context, input *GetProvisionedProductOutputsInput, fn func(*GetProvisionedProductOutputsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetProvisionedProductOutputsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetProvisionedProductOutputsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetProvisionedProductOutputsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAcceptedPortfolioShares = "ListAcceptedPortfolioShares" // ListAcceptedPortfolioSharesRequest generates a "aws/request.Request" representing the @@ -4951,7 +5141,10 @@ func (c *ServiceCatalog) ListOrganizationPortfolioAccessRequest(input *ListOrgan // ListOrganizationPortfolioAccess API operation for AWS Service Catalog. // // Lists the organization nodes that have access to the specified portfolio. -// This API can only be called by the master account in the organization. +// This API can only be called by the management account in the organization +// or by a delegated admin. +// +// If a delegated admin is de-registered, they can no longer perform this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5096,6 +5289,10 @@ func (c *ServiceCatalog) ListPortfolioAccessRequest(input *ListPortfolioAccessIn // // Lists the account IDs that have access to the specified portfolio. // +// A delegated admin can list the accounts that have access to the shared portfolio. +// Note that if a delegated admin is de-registered, they can no longer perform +// this operation. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -8107,8 +8304,8 @@ type AcceptPortfolioShareInput struct { // The type of shared portfolios to accept. The default is to accept imported // portfolios. // - // * AWS_ORGANIZATIONS - Accept portfolios shared by the master account of - // your organization. + // * AWS_ORGANIZATIONS - Accept portfolios shared by the management account + // of your organization. // // * IMPORTED - Accept imported portfolios. // @@ -8904,6 +9101,14 @@ type ConstraintDetail struct { // The owner of the constraint. Owner *string `type:"string"` + // The identifier of the portfolio the product resides in. The constraint applies + // only to the instance of the product that lives within this portfolio. + PortfolioId *string `min:"1" type:"string"` + + // The identifier of the product the constraint applies to. Note that a constraint + // applies to a specific instance of a product within a certain portfolio. + ProductId *string `min:"1" type:"string"` + // The type of constraint. // // * LAUNCH @@ -8944,6 +9149,18 @@ func (s *ConstraintDetail) SetOwner(v string) *ConstraintDetail { return s } +// SetPortfolioId sets the PortfolioId field's value. +func (s *ConstraintDetail) SetPortfolioId(v string) *ConstraintDetail { + s.PortfolioId = &v + return s +} + +// SetProductId sets the ProductId field's value. +func (s *ConstraintDetail) SetProductId(v string) *ConstraintDetail { + s.ProductId = &v + return s +} + // SetType sets the Type field's value. func (s *ConstraintDetail) SetType(v string) *ConstraintDetail { s.Type = &v @@ -9150,10 +9367,25 @@ type CreateConstraintInput struct { // // LAUNCH // + // You are required to specify either the RoleArn or the LocalRoleName but can't + // use both. + // // Specify the RoleArn property as follows: // // {"RoleArn" : "arn:aws:iam::123456789012:role/LaunchRole"} // + // Specify the LocalRoleName property as follows: + // + // {"LocalRoleName": "SCBasicLaunchRole"} + // + // If you specify the LocalRoleName property, when an account uses the launch + // constraint, the IAM role with that name in the account will be used. This + // allows launch-role constraints to be account-agnostic so the administrator + // can create fewer resources per shared account. + // + // The given role name must exist in the account used to create the launch constraint + // and the account of the user who launches a product with this launch constraint. + // // You cannot have both a LAUNCH and a STACKSET constraint. // // You also cannot have more than one LAUNCH constraint on a product and portfolio. @@ -9511,9 +9743,9 @@ type CreatePortfolioShareInput struct { AccountId *string `type:"string"` // The organization node to whom you are going to share. If OrganizationNode - // is passed in, PortfolioShare will be created for the node and its children - // (when applies), and a PortfolioShareToken will be returned in the output - // in order for the administrator to monitor the status of the PortfolioShare + // is passed in, PortfolioShare will be created for the node an ListOrganizationPortfolioAccessd + // its children (when applies), and a PortfolioShareToken will be returned in + // the output in order for the administrator to monitor the status of the PortfolioShare // creation process. OrganizationNode *OrganizationNode `type:"structure"` @@ -9576,7 +9808,7 @@ func (s *CreatePortfolioShareInput) SetPortfolioId(v string) *CreatePortfolioSha type CreatePortfolioShareOutput struct { _ struct{} `type:"structure"` - // The portfolio share unique identifier. This will only be returned if portfolio + // The portfolio shares a unique identifier that only returns if the portfolio // is shared to an organization node. PortfolioShareToken *string `min:"1" type:"string"` } @@ -10221,7 +10453,11 @@ type CreateServiceActionInput struct { // // Name // - // The name of the AWS Systems Manager Document. For example, AWS-RestartEC2Instance. + // The name of the AWS Systems Manager document (SSM document). For example, + // AWS-RestartEC2Instance. + // + // If you are using a shared SSM document, you must provide the ARN instead + // of the name. // // Version // @@ -11456,9 +11692,10 @@ type DescribeProductAsAdminInput struct { AcceptLanguage *string `type:"string"` // The product identifier. - // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` + Id *string `min:"1" type:"string"` + + // The product name. + Name *string `type:"string"` } // String returns the string representation @@ -11474,9 +11711,6 @@ func (s DescribeProductAsAdminInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeProductAsAdminInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeProductAsAdminInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } @@ -11499,6 +11733,12 @@ func (s *DescribeProductAsAdminInput) SetId(v string) *DescribeProductAsAdminInp return s } +// SetName sets the Name field's value. +func (s *DescribeProductAsAdminInput) SetName(v string) *DescribeProductAsAdminInput { + s.Name = &v + return s +} + type DescribeProductAsAdminOutput struct { _ struct{} `type:"structure"` @@ -11572,9 +11812,10 @@ type DescribeProductInput struct { AcceptLanguage *string `type:"string"` // The product identifier. - // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` + Id *string `min:"1" type:"string"` + + // The product name. + Name *string `type:"string"` } // String returns the string representation @@ -11590,9 +11831,6 @@ func (s DescribeProductInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeProductInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeProductInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } @@ -11615,12 +11853,21 @@ func (s *DescribeProductInput) SetId(v string) *DescribeProductInput { return s } +// SetName sets the Name field's value. +func (s *DescribeProductInput) SetName(v string) *DescribeProductInput { + s.Name = &v + return s +} + type DescribeProductOutput struct { _ struct{} `type:"structure"` // Information about the associated budgets. Budgets []*BudgetDetail `type:"list"` + // Information about the associated launch paths. + LaunchPaths []*LaunchPath `type:"list"` + // Summary information about the product view. ProductViewSummary *ProductViewSummary `type:"structure"` @@ -11644,6 +11891,12 @@ func (s *DescribeProductOutput) SetBudgets(v []*BudgetDetail) *DescribeProductOu return s } +// SetLaunchPaths sets the LaunchPaths field's value. +func (s *DescribeProductOutput) SetLaunchPaths(v []*LaunchPath) *DescribeProductOutput { + s.LaunchPaths = v + return s +} + // SetProductViewSummary sets the ProductViewSummary field's value. func (s *DescribeProductOutput) SetProductViewSummary(v *ProductViewSummary) *DescribeProductOutput { s.ProductViewSummary = v @@ -11744,6 +11997,10 @@ func (s *DescribeProductViewOutput) SetProvisioningArtifacts(v []*ProvisioningAr return s } +// DescribeProvisionedProductAPI input structure. AcceptLanguage - [Optional] +// The language code for localization. Id - [Optional] The provisioned product +// identifier. Name - [Optional] Another provisioned product identifier. Customers +// must provide either Id or Name. type DescribeProvisionedProductInput struct { _ struct{} `type:"structure"` @@ -11757,9 +12014,10 @@ type DescribeProvisionedProductInput struct { AcceptLanguage *string `type:"string"` // The provisioned product identifier. - // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` + Id *string `min:"1" type:"string"` + + // The name of the provisioned product. + Name *string `min:"1" type:"string"` } // String returns the string representation @@ -11775,12 +12033,12 @@ func (s DescribeProvisionedProductInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeProvisionedProductInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeProvisionedProductInput"} - if s.Id == nil { - invalidParams.Add(request.NewErrParamRequired("Id")) - } if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11800,6 +12058,12 @@ func (s *DescribeProvisionedProductInput) SetId(v string) *DescribeProvisionedPr return s } +// SetName sets the Name field's value. +func (s *DescribeProvisionedProductInput) SetName(v string) *DescribeProvisionedProductInput { + s.Name = &v + return s +} + type DescribeProvisionedProductOutput struct { _ struct{} `type:"structure"` @@ -11962,14 +12226,16 @@ type DescribeProvisioningArtifactInput struct { AcceptLanguage *string `type:"string"` // The product identifier. - // - // ProductId is a required field - ProductId *string `min:"1" type:"string" required:"true"` + ProductId *string `min:"1" type:"string"` + + // The product name. + ProductName *string `type:"string"` // The identifier of the provisioning artifact. - // - // ProvisioningArtifactId is a required field - ProvisioningArtifactId *string `min:"1" type:"string" required:"true"` + ProvisioningArtifactId *string `min:"1" type:"string"` + + // The provisioning artifact name. + ProvisioningArtifactName *string `type:"string"` // Indicates whether a verbose level of detail is enabled. Verbose *bool `type:"boolean"` @@ -11988,15 +12254,9 @@ func (s DescribeProvisioningArtifactInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DescribeProvisioningArtifactInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DescribeProvisioningArtifactInput"} - if s.ProductId == nil { - invalidParams.Add(request.NewErrParamRequired("ProductId")) - } if s.ProductId != nil && len(*s.ProductId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProductId", 1)) } - if s.ProvisioningArtifactId == nil { - invalidParams.Add(request.NewErrParamRequired("ProvisioningArtifactId")) - } if s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProvisioningArtifactId", 1)) } @@ -12019,12 +12279,24 @@ func (s *DescribeProvisioningArtifactInput) SetProductId(v string) *DescribeProv return s } +// SetProductName sets the ProductName field's value. +func (s *DescribeProvisioningArtifactInput) SetProductName(v string) *DescribeProvisioningArtifactInput { + s.ProductName = &v + return s +} + // SetProvisioningArtifactId sets the ProvisioningArtifactId field's value. func (s *DescribeProvisioningArtifactInput) SetProvisioningArtifactId(v string) *DescribeProvisioningArtifactInput { s.ProvisioningArtifactId = &v return s } +// SetProvisioningArtifactName sets the ProvisioningArtifactName field's value. +func (s *DescribeProvisioningArtifactInput) SetProvisioningArtifactName(v string) *DescribeProvisioningArtifactInput { + s.ProvisioningArtifactName = &v + return s +} + // SetVerbose sets the Verbose field's value. func (s *DescribeProvisioningArtifactInput) SetVerbose(v bool) *DescribeProvisioningArtifactInput { s.Verbose = &v @@ -12086,18 +12358,27 @@ type DescribeProvisioningParametersInput struct { // The path identifier of the product. This value is optional if the product // has a default path, and required if the product has more than one path. To - // list the paths for a product, use ListLaunchPaths. + // list the paths for a product, use ListLaunchPaths. You must provide the name + // or ID, but not both. PathId *string `min:"1" type:"string"` - // The product identifier. - // - // ProductId is a required field - ProductId *string `min:"1" type:"string" required:"true"` + // The name of the path. You must provide the name or ID, but not both. + PathName *string `min:"1" type:"string"` - // The identifier of the provisioning artifact. - // - // ProvisioningArtifactId is a required field - ProvisioningArtifactId *string `min:"1" type:"string" required:"true"` + // The product identifier. You must provide the product name or ID, but not + // both. + ProductId *string `min:"1" type:"string"` + + // The name of the product. You must provide the name or ID, but not both. + ProductName *string `type:"string"` + + // The identifier of the provisioning artifact. You must provide the name or + // ID, but not both. + ProvisioningArtifactId *string `min:"1" type:"string"` + + // The name of the provisioning artifact. You must provide the name or ID, but + // not both. + ProvisioningArtifactName *string `type:"string"` } // String returns the string representation @@ -12116,15 +12397,12 @@ func (s *DescribeProvisioningParametersInput) Validate() error { if s.PathId != nil && len(*s.PathId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PathId", 1)) } - if s.ProductId == nil { - invalidParams.Add(request.NewErrParamRequired("ProductId")) + if s.PathName != nil && len(*s.PathName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathName", 1)) } if s.ProductId != nil && len(*s.ProductId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProductId", 1)) } - if s.ProvisioningArtifactId == nil { - invalidParams.Add(request.NewErrParamRequired("ProvisioningArtifactId")) - } if s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProvisioningArtifactId", 1)) } @@ -12147,24 +12425,45 @@ func (s *DescribeProvisioningParametersInput) SetPathId(v string) *DescribeProvi return s } +// SetPathName sets the PathName field's value. +func (s *DescribeProvisioningParametersInput) SetPathName(v string) *DescribeProvisioningParametersInput { + s.PathName = &v + return s +} + // SetProductId sets the ProductId field's value. func (s *DescribeProvisioningParametersInput) SetProductId(v string) *DescribeProvisioningParametersInput { s.ProductId = &v return s } +// SetProductName sets the ProductName field's value. +func (s *DescribeProvisioningParametersInput) SetProductName(v string) *DescribeProvisioningParametersInput { + s.ProductName = &v + return s +} + // SetProvisioningArtifactId sets the ProvisioningArtifactId field's value. func (s *DescribeProvisioningParametersInput) SetProvisioningArtifactId(v string) *DescribeProvisioningParametersInput { s.ProvisioningArtifactId = &v return s } +// SetProvisioningArtifactName sets the ProvisioningArtifactName field's value. +func (s *DescribeProvisioningParametersInput) SetProvisioningArtifactName(v string) *DescribeProvisioningParametersInput { + s.ProvisioningArtifactName = &v + return s +} + type DescribeProvisioningParametersOutput struct { _ struct{} `type:"structure"` // Information about the constraints used to provision the product. ConstraintSummaries []*ConstraintSummary `type:"list"` + // The output of the provisioning artifact. + ProvisioningArtifactOutputs []*ProvisioningArtifactOutput `type:"list"` + // Information about the parameters used to provision the product. ProvisioningArtifactParameters []*ProvisioningArtifactParameter `type:"list"` @@ -12196,8 +12495,14 @@ func (s *DescribeProvisioningParametersOutput) SetConstraintSummaries(v []*Const return s } -// SetProvisioningArtifactParameters sets the ProvisioningArtifactParameters field's value. -func (s *DescribeProvisioningParametersOutput) SetProvisioningArtifactParameters(v []*ProvisioningArtifactParameter) *DescribeProvisioningParametersOutput { +// SetProvisioningArtifactOutputs sets the ProvisioningArtifactOutputs field's value. +func (s *DescribeProvisioningParametersOutput) SetProvisioningArtifactOutputs(v []*ProvisioningArtifactOutput) *DescribeProvisioningParametersOutput { + s.ProvisioningArtifactOutputs = v + return s +} + +// SetProvisioningArtifactParameters sets the ProvisioningArtifactParameters field's value. +func (s *DescribeProvisioningParametersOutput) SetProvisioningArtifactParameters(v []*ProvisioningArtifactParameter) *DescribeProvisioningParametersOutput { s.ProvisioningArtifactParameters = v return s } @@ -13029,8 +13334,8 @@ func (s DisassociateTagOptionFromResourceOutput) GoString() string { // The specified resource is a duplicate. type DuplicateResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13047,17 +13352,17 @@ func (s DuplicateResourceException) GoString() string { func newErrorDuplicateResourceException(v protocol.ResponseMetadata) error { return &DuplicateResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateResourceException) Code() string { +func (s *DuplicateResourceException) Code() string { return "DuplicateResourceException" } // Message returns the exception's message. -func (s DuplicateResourceException) Message() string { +func (s *DuplicateResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13065,22 +13370,22 @@ func (s DuplicateResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateResourceException) OrigErr() error { +func (s *DuplicateResourceException) OrigErr() error { return nil } -func (s DuplicateResourceException) Error() string { +func (s *DuplicateResourceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateResourceException) RequestID() string { + return s.RespMetadata.RequestID } type EnableAWSOrganizationsAccessInput struct { @@ -13470,10 +13775,137 @@ func (s *GetAWSOrganizationsAccessStatusOutput) SetAccessStatus(v string) *GetAW return s } +type GetProvisionedProductOutputsInput struct { + _ struct{} `type:"structure"` + + // The language code. + // + // * en - English (default) + // + // * jp - Japanese + // + // * zh - Chinese + AcceptLanguage *string `type:"string"` + + // The list of keys that the API should return with their values. If none are + // provided, the API will return all outputs of the provisioned product. + OutputKeys []*string `type:"list"` + + // The maximum number of items to return with this call. + PageSize *int64 `type:"integer"` + + // The page token for the next set of results. To retrieve the first set of + // results, use null. + PageToken *string `type:"string"` + + // The identifier of the provisioned product that you want the outputs from. + ProvisionedProductId *string `min:"1" type:"string"` + + // The name of the provisioned product that you want the outputs from. + ProvisionedProductName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetProvisionedProductOutputsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProvisionedProductOutputsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetProvisionedProductOutputsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetProvisionedProductOutputsInput"} + if s.ProvisionedProductId != nil && len(*s.ProvisionedProductId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductId", 1)) + } + if s.ProvisionedProductName != nil && len(*s.ProvisionedProductName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptLanguage sets the AcceptLanguage field's value. +func (s *GetProvisionedProductOutputsInput) SetAcceptLanguage(v string) *GetProvisionedProductOutputsInput { + s.AcceptLanguage = &v + return s +} + +// SetOutputKeys sets the OutputKeys field's value. +func (s *GetProvisionedProductOutputsInput) SetOutputKeys(v []*string) *GetProvisionedProductOutputsInput { + s.OutputKeys = v + return s +} + +// SetPageSize sets the PageSize field's value. +func (s *GetProvisionedProductOutputsInput) SetPageSize(v int64) *GetProvisionedProductOutputsInput { + s.PageSize = &v + return s +} + +// SetPageToken sets the PageToken field's value. +func (s *GetProvisionedProductOutputsInput) SetPageToken(v string) *GetProvisionedProductOutputsInput { + s.PageToken = &v + return s +} + +// SetProvisionedProductId sets the ProvisionedProductId field's value. +func (s *GetProvisionedProductOutputsInput) SetProvisionedProductId(v string) *GetProvisionedProductOutputsInput { + s.ProvisionedProductId = &v + return s +} + +// SetProvisionedProductName sets the ProvisionedProductName field's value. +func (s *GetProvisionedProductOutputsInput) SetProvisionedProductName(v string) *GetProvisionedProductOutputsInput { + s.ProvisionedProductName = &v + return s +} + +type GetProvisionedProductOutputsOutput struct { + _ struct{} `type:"structure"` + + // The page token to use to retrieve the next set of results. If there are no + // additional results, this value is null. + NextPageToken *string `type:"string"` + + // Information about the product created as the result of a request. For example, + // the output for a CloudFormation-backed product that creates an S3 bucket + // would include the S3 bucket URL. + Outputs []*RecordOutput `type:"list"` +} + +// String returns the string representation +func (s GetProvisionedProductOutputsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetProvisionedProductOutputsOutput) GoString() string { + return s.String() +} + +// SetNextPageToken sets the NextPageToken field's value. +func (s *GetProvisionedProductOutputsOutput) SetNextPageToken(v string) *GetProvisionedProductOutputsOutput { + s.NextPageToken = &v + return s +} + +// SetOutputs sets the Outputs field's value. +func (s *GetProvisionedProductOutputsOutput) SetOutputs(v []*RecordOutput) *GetProvisionedProductOutputsOutput { + s.Outputs = v + return s +} + // One or more parameters provided to the operation are not valid. type InvalidParametersException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13490,17 +13922,17 @@ func (s InvalidParametersException) GoString() string { func newErrorInvalidParametersException(v protocol.ResponseMetadata) error { return &InvalidParametersException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParametersException) Code() string { +func (s *InvalidParametersException) Code() string { return "InvalidParametersException" } // Message returns the exception's message. -func (s InvalidParametersException) Message() string { +func (s *InvalidParametersException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13508,30 +13940,30 @@ func (s InvalidParametersException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParametersException) OrigErr() error { +func (s *InvalidParametersException) OrigErr() error { return nil } -func (s InvalidParametersException) Error() string { +func (s *InvalidParametersException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParametersException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParametersException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParametersException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParametersException) RequestID() string { + return s.RespMetadata.RequestID } // An attempt was made to modify a resource that is in a state that is not valid. // Check your resources to ensure that they are in valid states before retrying // the operation. type InvalidStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13548,17 +13980,17 @@ func (s InvalidStateException) GoString() string { func newErrorInvalidStateException(v protocol.ResponseMetadata) error { return &InvalidStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidStateException) Code() string { +func (s *InvalidStateException) Code() string { return "InvalidStateException" } // Message returns the exception's message. -func (s InvalidStateException) Message() string { +func (s *InvalidStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13566,22 +13998,55 @@ func (s InvalidStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidStateException) OrigErr() error { +func (s *InvalidStateException) OrigErr() error { return nil } -func (s InvalidStateException) Error() string { +func (s *InvalidStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidStateException) RequestID() string { + return s.RespMetadata.RequestID +} + +// A launch path object. +type LaunchPath struct { + _ struct{} `type:"structure"` + + // The identifier of the launch path. + Id *string `min:"1" type:"string"` + + // The name of the launch path. + Name *string `type:"string"` +} + +// String returns the string representation +func (s LaunchPath) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LaunchPath) GoString() string { + return s.String() +} + +// SetId sets the Id field's value. +func (s *LaunchPath) SetId(v string) *LaunchPath { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *LaunchPath) SetName(v string) *LaunchPath { + s.Name = &v + return s } // Summary information about a product path for a user. @@ -13639,8 +14104,8 @@ func (s *LaunchPathSummary) SetTags(v []*Tag) *LaunchPathSummary { // Decrease your resource use or increase your service limits and retry the // operation. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13657,17 +14122,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13675,22 +14140,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAcceptedPortfolioSharesInput struct { @@ -13714,8 +14179,8 @@ type ListAcceptedPortfolioSharesInput struct { // The type of shared portfolios to list. The default is to list imported portfolios. // - // * AWS_ORGANIZATIONS - List portfolios shared by the master account of - // your organization + // * AWS_ORGANIZATIONS - List portfolios shared by the management account + // of your organization // // * AWS_SERVICECATALOG - List default portfolios // @@ -15693,8 +16158,8 @@ func (s *ListTagOptionsOutput) SetTagOptionDetails(v []*TagOptionDetail) *ListTa // The operation is not supported. type OperationNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -15711,17 +16176,17 @@ func (s OperationNotSupportedException) GoString() string { func newErrorOperationNotSupportedException(v protocol.ResponseMetadata) error { return &OperationNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotSupportedException) Code() string { +func (s *OperationNotSupportedException) Code() string { return "OperationNotSupportedException" } // Message returns the exception's message. -func (s OperationNotSupportedException) Message() string { +func (s *OperationNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15729,22 +16194,22 @@ func (s OperationNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotSupportedException) OrigErr() error { +func (s *OperationNotSupportedException) OrigErr() error { return nil } -func (s OperationNotSupportedException) Error() string { +func (s *OperationNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the organization node. @@ -16137,13 +16602,18 @@ type ProvisionProductInput struct { // The path identifier of the product. This value is optional if the product // has a default path, and required if the product has more than one path. To - // list the paths for a product, use ListLaunchPaths. + // list the paths for a product, use ListLaunchPaths. You must provide the name + // or ID, but not both. PathId *string `min:"1" type:"string"` - // The product identifier. - // - // ProductId is a required field - ProductId *string `min:"1" type:"string" required:"true"` + // The name of the path. You must provide the name or ID, but not both. + PathName *string `min:"1" type:"string"` + + // The product identifier. You must provide the name or ID, but not both. + ProductId *string `min:"1" type:"string"` + + // The name of the product. You must provide the name or ID, but not both. + ProductName *string `type:"string"` // An idempotency token that uniquely identifies the provisioning request. ProvisionToken *string `min:"1" type:"string" idempotencyToken:"true"` @@ -16154,10 +16624,13 @@ type ProvisionProductInput struct { // ProvisionedProductName is a required field ProvisionedProductName *string `min:"1" type:"string" required:"true"` - // The identifier of the provisioning artifact. - // - // ProvisioningArtifactId is a required field - ProvisioningArtifactId *string `min:"1" type:"string" required:"true"` + // The identifier of the provisioning artifact. You must provide the name or + // ID, but not both. + ProvisioningArtifactId *string `min:"1" type:"string"` + + // The name of the provisioning artifact. You must provide the name or ID, but + // not both. + ProvisioningArtifactName *string `type:"string"` // Parameters specified by the administrator that are required for provisioning // the product. @@ -16187,8 +16660,8 @@ func (s *ProvisionProductInput) Validate() error { if s.PathId != nil && len(*s.PathId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PathId", 1)) } - if s.ProductId == nil { - invalidParams.Add(request.NewErrParamRequired("ProductId")) + if s.PathName != nil && len(*s.PathName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathName", 1)) } if s.ProductId != nil && len(*s.ProductId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProductId", 1)) @@ -16202,9 +16675,6 @@ func (s *ProvisionProductInput) Validate() error { if s.ProvisionedProductName != nil && len(*s.ProvisionedProductName) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProvisionedProductName", 1)) } - if s.ProvisioningArtifactId == nil { - invalidParams.Add(request.NewErrParamRequired("ProvisioningArtifactId")) - } if s.ProvisioningArtifactId != nil && len(*s.ProvisioningArtifactId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProvisioningArtifactId", 1)) } @@ -16258,12 +16728,24 @@ func (s *ProvisionProductInput) SetPathId(v string) *ProvisionProductInput { return s } +// SetPathName sets the PathName field's value. +func (s *ProvisionProductInput) SetPathName(v string) *ProvisionProductInput { + s.PathName = &v + return s +} + // SetProductId sets the ProductId field's value. func (s *ProvisionProductInput) SetProductId(v string) *ProvisionProductInput { s.ProductId = &v return s } +// SetProductName sets the ProductName field's value. +func (s *ProvisionProductInput) SetProductName(v string) *ProvisionProductInput { + s.ProductName = &v + return s +} + // SetProvisionToken sets the ProvisionToken field's value. func (s *ProvisionProductInput) SetProvisionToken(v string) *ProvisionProductInput { s.ProvisionToken = &v @@ -16282,6 +16764,12 @@ func (s *ProvisionProductInput) SetProvisioningArtifactId(v string) *ProvisionPr return s } +// SetProvisioningArtifactName sets the ProvisioningArtifactName field's value. +func (s *ProvisionProductInput) SetProvisioningArtifactName(v string) *ProvisionProductInput { + s.ProvisioningArtifactName = &v + return s +} + // SetProvisioningParameters sets the ProvisioningParameters field's value. func (s *ProvisionProductInput) SetProvisioningParameters(v []*ProvisioningParameter) *ProvisionProductInput { s.ProvisioningParameters = v @@ -16341,9 +16829,33 @@ type ProvisionedProductAttribute struct { // repeated request. IdempotencyToken *string `min:"1" type:"string"` + // The record identifier of the last request performed on this provisioned product + // of the following types: + // + // * ProvisionedProduct + // + // * UpdateProvisionedProduct + // + // * ExecuteProvisionedProductPlan + // + // * TerminateProvisionedProduct + LastProvisioningRecordId *string `min:"1" type:"string"` + // The record identifier of the last request performed on this provisioned product. LastRecordId *string `min:"1" type:"string"` + // The record identifier of the last successful request performed on this provisioned + // product of the following types: + // + // * ProvisionedProduct + // + // * UpdateProvisionedProduct + // + // * ExecuteProvisionedProductPlan + // + // * TerminateProvisionedProduct + LastSuccessfulProvisioningRecordId *string `min:"1" type:"string"` + // The user-friendly name of the provisioned product. Name *string `min:"1" type:"string"` @@ -16354,9 +16866,15 @@ type ProvisionedProductAttribute struct { // The product identifier. ProductId *string `min:"1" type:"string"` + // The name of the product. + ProductName *string `type:"string"` + // The identifier of the provisioning artifact. ProvisioningArtifactId *string `min:"1" type:"string"` + // The name of the provisioning artifact. + ProvisioningArtifactName *string `type:"string"` + // The current status of the provisioned product. // // * AVAILABLE - Stable state, ready to perform any operation. The most recent @@ -16431,12 +16949,24 @@ func (s *ProvisionedProductAttribute) SetIdempotencyToken(v string) *Provisioned return s } +// SetLastProvisioningRecordId sets the LastProvisioningRecordId field's value. +func (s *ProvisionedProductAttribute) SetLastProvisioningRecordId(v string) *ProvisionedProductAttribute { + s.LastProvisioningRecordId = &v + return s +} + // SetLastRecordId sets the LastRecordId field's value. func (s *ProvisionedProductAttribute) SetLastRecordId(v string) *ProvisionedProductAttribute { s.LastRecordId = &v return s } +// SetLastSuccessfulProvisioningRecordId sets the LastSuccessfulProvisioningRecordId field's value. +func (s *ProvisionedProductAttribute) SetLastSuccessfulProvisioningRecordId(v string) *ProvisionedProductAttribute { + s.LastSuccessfulProvisioningRecordId = &v + return s +} + // SetName sets the Name field's value. func (s *ProvisionedProductAttribute) SetName(v string) *ProvisionedProductAttribute { s.Name = &v @@ -16455,12 +16985,24 @@ func (s *ProvisionedProductAttribute) SetProductId(v string) *ProvisionedProduct return s } +// SetProductName sets the ProductName field's value. +func (s *ProvisionedProductAttribute) SetProductName(v string) *ProvisionedProductAttribute { + s.ProductName = &v + return s +} + // SetProvisioningArtifactId sets the ProvisioningArtifactId field's value. func (s *ProvisionedProductAttribute) SetProvisioningArtifactId(v string) *ProvisionedProductAttribute { s.ProvisioningArtifactId = &v return s } +// SetProvisioningArtifactName sets the ProvisioningArtifactName field's value. +func (s *ProvisionedProductAttribute) SetProvisioningArtifactName(v string) *ProvisionedProductAttribute { + s.ProvisioningArtifactName = &v + return s +} + // SetStatus sets the Status field's value. func (s *ProvisionedProductAttribute) SetStatus(v string) *ProvisionedProductAttribute { s.Status = &v @@ -16515,9 +17057,33 @@ type ProvisionedProductDetail struct { // repeated request. IdempotencyToken *string `min:"1" type:"string"` + // The record identifier of the last request performed on this provisioned product + // of the following types: + // + // * ProvisionedProduct + // + // * UpdateProvisionedProduct + // + // * ExecuteProvisionedProductPlan + // + // * TerminateProvisionedProduct + LastProvisioningRecordId *string `min:"1" type:"string"` + // The record identifier of the last request performed on this provisioned product. LastRecordId *string `type:"string"` + // The record identifier of the last successful request performed on this provisioned + // product of the following types: + // + // * ProvisionedProduct + // + // * UpdateProvisionedProduct + // + // * ExecuteProvisionedProductPlan + // + // * TerminateProvisionedProduct + LastSuccessfulProvisioningRecordId *string `min:"1" type:"string"` + // The user-friendly name of the provisioned product. Name *string `min:"1" type:"string"` @@ -16591,12 +17157,24 @@ func (s *ProvisionedProductDetail) SetIdempotencyToken(v string) *ProvisionedPro return s } +// SetLastProvisioningRecordId sets the LastProvisioningRecordId field's value. +func (s *ProvisionedProductDetail) SetLastProvisioningRecordId(v string) *ProvisionedProductDetail { + s.LastProvisioningRecordId = &v + return s +} + // SetLastRecordId sets the LastRecordId field's value. func (s *ProvisionedProductDetail) SetLastRecordId(v string) *ProvisionedProductDetail { s.LastRecordId = &v return s } +// SetLastSuccessfulProvisioningRecordId sets the LastSuccessfulProvisioningRecordId field's value. +func (s *ProvisionedProductDetail) SetLastSuccessfulProvisioningRecordId(v string) *ProvisionedProductDetail { + s.LastSuccessfulProvisioningRecordId = &v + return s +} + // SetName sets the Name field's value. func (s *ProvisionedProductDetail) SetName(v string) *ProvisionedProductDetail { s.Name = &v @@ -17004,6 +17582,39 @@ func (s *ProvisioningArtifactDetail) SetType(v string) *ProvisioningArtifactDeta return s } +// Provisioning artifact output. +type ProvisioningArtifactOutput struct { + _ struct{} `type:"structure"` + + // Description of the provisioning artifact output key. + Description *string `type:"string"` + + // The provisioning artifact output key. + Key *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ProvisioningArtifactOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisioningArtifactOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ProvisioningArtifactOutput) SetDescription(v string) *ProvisioningArtifactOutput { + s.Description = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ProvisioningArtifactOutput) SetKey(v string) *ProvisioningArtifactOutput { + s.Key = &v + return s +} + // Information about a parameter used to provision a product. type ProvisioningArtifactParameter struct { _ struct{} `type:"structure"` @@ -17787,8 +18398,8 @@ type RejectPortfolioShareInput struct { // The type of shared portfolios to reject. The default is to reject imported // portfolios. // - // * AWS_ORGANIZATIONS - Reject portfolios shared by the master account of - // your organization. + // * AWS_ORGANIZATIONS - Reject portfolios shared by the management account + // of your organization. // // * IMPORTED - Reject imported portfolios. // @@ -18043,8 +18654,8 @@ func (s *ResourceDetail) SetName(v string) *ResourceDetail { // A resource that is currently in use. Ensure that the resource is not in use // and retry the operation. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18061,17 +18672,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18079,28 +18690,28 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18117,17 +18728,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18135,22 +18746,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a change to a resource attribute. @@ -18564,7 +19175,8 @@ type SearchProvisionedProductsInput struct { // // When the key is SearchQuery, the searchable fields are arn, createdTime, // id, lastRecordId, idempotencyToken, name, physicalId, productId, provisioningArtifact, - // type, status, tags, userArn, and userArnSession. + // type, status, tags, userArn, userArnSession, lastProvisioningRecordId, lastSuccessfulProvisioningRecordId, + // productName, and provisioningArtifactName. // // Example: "SearchQuery":["status:AVAILABLE"] Filters map[string][]*string `type:"map"` @@ -19091,8 +19703,8 @@ func (s *TagOptionDetail) SetValue(v string) *TagOptionDetail { // process has not been performed for this account. Please use the AWS console // to perform the migration process before retrying the operation. type TagOptionNotMigratedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19109,17 +19721,17 @@ func (s TagOptionNotMigratedException) GoString() string { func newErrorTagOptionNotMigratedException(v protocol.ResponseMetadata) error { return &TagOptionNotMigratedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagOptionNotMigratedException) Code() string { +func (s *TagOptionNotMigratedException) Code() string { return "TagOptionNotMigratedException" } // Message returns the exception's message. -func (s TagOptionNotMigratedException) Message() string { +func (s *TagOptionNotMigratedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19127,22 +19739,22 @@ func (s TagOptionNotMigratedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagOptionNotMigratedException) OrigErr() error { +func (s *TagOptionNotMigratedException) OrigErr() error { return nil } -func (s TagOptionNotMigratedException) Error() string { +func (s *TagOptionNotMigratedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagOptionNotMigratedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagOptionNotMigratedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagOptionNotMigratedException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagOptionNotMigratedException) RequestID() string { + return s.RespMetadata.RequestID } // Summary information about a TagOption. @@ -19316,10 +19928,25 @@ type UpdateConstraintInput struct { // // LAUNCH // + // You are required to specify either the RoleArn or the LocalRoleName but can't + // use both. + // // Specify the RoleArn property as follows: // // {"RoleArn" : "arn:aws:iam::123456789012:role/LaunchRole"} // + // Specify the LocalRoleName property as follows: + // + // {"LocalRoleName": "SCBasicLaunchRole"} + // + // If you specify the LocalRoleName property, when an account uses the launch + // constraint, the IAM role with that name in the account will be used. This + // allows launch-role constraints to be account-agnostic so the administrator + // can create fewer resources per shared account. + // + // The given role name must exist in the account used to create the launch constraint + // and the account of the user who launches a product with this launch constraint. + // // You cannot have both a LAUNCH and a STACKSET constraint. // // You also cannot have more than one LAUNCH constraint on a product and portfolio. @@ -19791,15 +20418,22 @@ type UpdateProvisionedProductInput struct { // * zh - Chinese AcceptLanguage *string `type:"string"` - // The new path identifier. This value is optional if the product has a default - // path, and required if the product has more than one path. + // The path identifier. This value is optional if the product has a default + // path, and required if the product has more than one path. You must provide + // the name or ID, but not both. PathId *string `min:"1" type:"string"` - // The identifier of the product. + // The name of the path. You must provide the name or ID, but not both. + PathName *string `min:"1" type:"string"` + + // The identifier of the product. You must provide the name or ID, but not both. ProductId *string `min:"1" type:"string"` - // The identifier of the provisioned product. You cannot specify both ProvisionedProductName - // and ProvisionedProductId. + // The name of the product. You must provide the name or ID, but not both. + ProductName *string `type:"string"` + + // The identifier of the provisioned product. You must provide the name or ID, + // but not both. ProvisionedProductId *string `min:"1" type:"string"` // The name of the provisioned product. You cannot specify both ProvisionedProductName @@ -19809,6 +20443,10 @@ type UpdateProvisionedProductInput struct { // The identifier of the provisioning artifact. ProvisioningArtifactId *string `min:"1" type:"string"` + // The name of the provisioning artifact. You must provide the name or ID, but + // not both. + ProvisioningArtifactName *string `type:"string"` + // The new parameters. ProvisioningParameters []*UpdateProvisioningParameter `type:"list"` @@ -19840,6 +20478,9 @@ func (s *UpdateProvisionedProductInput) Validate() error { if s.PathId != nil && len(*s.PathId) < 1 { invalidParams.Add(request.NewErrParamMinLen("PathId", 1)) } + if s.PathName != nil && len(*s.PathName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathName", 1)) + } if s.ProductId != nil && len(*s.ProductId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ProductId", 1)) } @@ -19899,12 +20540,24 @@ func (s *UpdateProvisionedProductInput) SetPathId(v string) *UpdateProvisionedPr return s } +// SetPathName sets the PathName field's value. +func (s *UpdateProvisionedProductInput) SetPathName(v string) *UpdateProvisionedProductInput { + s.PathName = &v + return s +} + // SetProductId sets the ProductId field's value. func (s *UpdateProvisionedProductInput) SetProductId(v string) *UpdateProvisionedProductInput { s.ProductId = &v return s } +// SetProductName sets the ProductName field's value. +func (s *UpdateProvisionedProductInput) SetProductName(v string) *UpdateProvisionedProductInput { + s.ProductName = &v + return s +} + // SetProvisionedProductId sets the ProvisionedProductId field's value. func (s *UpdateProvisionedProductInput) SetProvisionedProductId(v string) *UpdateProvisionedProductInput { s.ProvisionedProductId = &v @@ -19923,6 +20576,12 @@ func (s *UpdateProvisionedProductInput) SetProvisioningArtifactId(v string) *Upd return s } +// SetProvisioningArtifactName sets the ProvisioningArtifactName field's value. +func (s *UpdateProvisionedProductInput) SetProvisioningArtifactName(v string) *UpdateProvisionedProductInput { + s.ProvisioningArtifactName = &v + return s +} + // SetProvisioningParameters sets the ProvisioningParameters field's value. func (s *UpdateProvisionedProductInput) SetProvisioningParameters(v []*UpdateProvisioningParameter) *UpdateProvisionedProductInput { s.ProvisioningParameters = v @@ -19993,9 +20652,9 @@ type UpdateProvisionedProductPropertiesInput struct { // A map that contains the provisioned product properties to be updated. // - // The OWNER key only accepts user ARNs. The owner is the user that is allowed - // to see, update, terminate, and execute service actions in the provisioned - // product. + // The OWNER key accepts user ARNs and role ARNs. The owner is the user that + // is allowed to see, update, terminate, and execute service actions in the + // provisioned product. // // The administrator can change the owner of a provisioned product to another // IAM user within the same account. Both end user owners and administrators @@ -20762,6 +21421,15 @@ const ( AccessLevelFilterKeyUser = "User" ) +// AccessLevelFilterKey_Values returns all elements of the AccessLevelFilterKey enum +func AccessLevelFilterKey_Values() []string { + return []string{ + AccessLevelFilterKeyAccount, + AccessLevelFilterKeyRole, + AccessLevelFilterKeyUser, + } +} + const ( // AccessStatusEnabled is a AccessStatus enum value AccessStatusEnabled = "ENABLED" @@ -20773,6 +21441,15 @@ const ( AccessStatusDisabled = "DISABLED" ) +// AccessStatus_Values returns all elements of the AccessStatus enum +func AccessStatus_Values() []string { + return []string{ + AccessStatusEnabled, + AccessStatusUnderChange, + AccessStatusDisabled, + } +} + const ( // ChangeActionAdd is a ChangeAction enum value ChangeActionAdd = "ADD" @@ -20784,11 +21461,27 @@ const ( ChangeActionRemove = "REMOVE" ) +// ChangeAction_Values returns all elements of the ChangeAction enum +func ChangeAction_Values() []string { + return []string{ + ChangeActionAdd, + ChangeActionModify, + ChangeActionRemove, + } +} + const ( // CopyOptionCopyTags is a CopyOption enum value CopyOptionCopyTags = "CopyTags" ) +// CopyOption_Values returns all elements of the CopyOption enum +func CopyOption_Values() []string { + return []string{ + CopyOptionCopyTags, + } +} + const ( // CopyProductStatusSucceeded is a CopyProductStatus enum value CopyProductStatusSucceeded = "SUCCEEDED" @@ -20800,6 +21493,15 @@ const ( CopyProductStatusFailed = "FAILED" ) +// CopyProductStatus_Values returns all elements of the CopyProductStatus enum +func CopyProductStatus_Values() []string { + return []string{ + CopyProductStatusSucceeded, + CopyProductStatusInProgress, + CopyProductStatusFailed, + } +} + const ( // EvaluationTypeStatic is a EvaluationType enum value EvaluationTypeStatic = "STATIC" @@ -20808,6 +21510,14 @@ const ( EvaluationTypeDynamic = "DYNAMIC" ) +// EvaluationType_Values returns all elements of the EvaluationType enum +func EvaluationType_Values() []string { + return []string{ + EvaluationTypeStatic, + EvaluationTypeDynamic, + } +} + const ( // OrganizationNodeTypeOrganization is a OrganizationNodeType enum value OrganizationNodeTypeOrganization = "ORGANIZATION" @@ -20819,6 +21529,15 @@ const ( OrganizationNodeTypeAccount = "ACCOUNT" ) +// OrganizationNodeType_Values returns all elements of the OrganizationNodeType enum +func OrganizationNodeType_Values() []string { + return []string{ + OrganizationNodeTypeOrganization, + OrganizationNodeTypeOrganizationalUnit, + OrganizationNodeTypeAccount, + } +} + const ( // PortfolioShareTypeImported is a PortfolioShareType enum value PortfolioShareTypeImported = "IMPORTED" @@ -20830,16 +21549,39 @@ const ( PortfolioShareTypeAwsOrganizations = "AWS_ORGANIZATIONS" ) +// PortfolioShareType_Values returns all elements of the PortfolioShareType enum +func PortfolioShareType_Values() []string { + return []string{ + PortfolioShareTypeImported, + PortfolioShareTypeAwsServicecatalog, + PortfolioShareTypeAwsOrganizations, + } +} + const ( // PrincipalTypeIam is a PrincipalType enum value PrincipalTypeIam = "IAM" ) +// PrincipalType_Values returns all elements of the PrincipalType enum +func PrincipalType_Values() []string { + return []string{ + PrincipalTypeIam, + } +} + const ( // ProductSourceAccount is a ProductSource enum value ProductSourceAccount = "ACCOUNT" ) +// ProductSource_Values returns all elements of the ProductSource enum +func ProductSource_Values() []string { + return []string{ + ProductSourceAccount, + } +} + const ( // ProductTypeCloudFormationTemplate is a ProductType enum value ProductTypeCloudFormationTemplate = "CLOUD_FORMATION_TEMPLATE" @@ -20848,6 +21590,14 @@ const ( ProductTypeMarketplace = "MARKETPLACE" ) +// ProductType_Values returns all elements of the ProductType enum +func ProductType_Values() []string { + return []string{ + ProductTypeCloudFormationTemplate, + ProductTypeMarketplace, + } +} + const ( // ProductViewFilterByFullTextSearch is a ProductViewFilterBy enum value ProductViewFilterByFullTextSearch = "FullTextSearch" @@ -20862,6 +21612,16 @@ const ( ProductViewFilterBySourceProductId = "SourceProductId" ) +// ProductViewFilterBy_Values returns all elements of the ProductViewFilterBy enum +func ProductViewFilterBy_Values() []string { + return []string{ + ProductViewFilterByFullTextSearch, + ProductViewFilterByOwner, + ProductViewFilterByProductType, + ProductViewFilterBySourceProductId, + } +} + const ( // ProductViewSortByTitle is a ProductViewSortBy enum value ProductViewSortByTitle = "Title" @@ -20873,11 +21633,27 @@ const ( ProductViewSortByCreationDate = "CreationDate" ) +// ProductViewSortBy_Values returns all elements of the ProductViewSortBy enum +func ProductViewSortBy_Values() []string { + return []string{ + ProductViewSortByTitle, + ProductViewSortByVersionCount, + ProductViewSortByCreationDate, + } +} + const ( // PropertyKeyOwner is a PropertyKey enum value PropertyKeyOwner = "OWNER" ) +// PropertyKey_Values returns all elements of the PropertyKey enum +func PropertyKey_Values() []string { + return []string{ + PropertyKeyOwner, + } +} + const ( // ProvisionedProductPlanStatusCreateInProgress is a ProvisionedProductPlanStatus enum value ProvisionedProductPlanStatusCreateInProgress = "CREATE_IN_PROGRESS" @@ -20898,11 +21674,30 @@ const ( ProvisionedProductPlanStatusExecuteFailed = "EXECUTE_FAILED" ) +// ProvisionedProductPlanStatus_Values returns all elements of the ProvisionedProductPlanStatus enum +func ProvisionedProductPlanStatus_Values() []string { + return []string{ + ProvisionedProductPlanStatusCreateInProgress, + ProvisionedProductPlanStatusCreateSuccess, + ProvisionedProductPlanStatusCreateFailed, + ProvisionedProductPlanStatusExecuteInProgress, + ProvisionedProductPlanStatusExecuteSuccess, + ProvisionedProductPlanStatusExecuteFailed, + } +} + const ( // ProvisionedProductPlanTypeCloudformation is a ProvisionedProductPlanType enum value ProvisionedProductPlanTypeCloudformation = "CLOUDFORMATION" ) +// ProvisionedProductPlanType_Values returns all elements of the ProvisionedProductPlanType enum +func ProvisionedProductPlanType_Values() []string { + return []string{ + ProvisionedProductPlanTypeCloudformation, + } +} + const ( // ProvisionedProductStatusAvailable is a ProvisionedProductStatus enum value ProvisionedProductStatusAvailable = "AVAILABLE" @@ -20920,11 +21715,29 @@ const ( ProvisionedProductStatusPlanInProgress = "PLAN_IN_PROGRESS" ) +// ProvisionedProductStatus_Values returns all elements of the ProvisionedProductStatus enum +func ProvisionedProductStatus_Values() []string { + return []string{ + ProvisionedProductStatusAvailable, + ProvisionedProductStatusUnderChange, + ProvisionedProductStatusTainted, + ProvisionedProductStatusError, + ProvisionedProductStatusPlanInProgress, + } +} + const ( // ProvisionedProductViewFilterBySearchQuery is a ProvisionedProductViewFilterBy enum value ProvisionedProductViewFilterBySearchQuery = "SearchQuery" ) +// ProvisionedProductViewFilterBy_Values returns all elements of the ProvisionedProductViewFilterBy enum +func ProvisionedProductViewFilterBy_Values() []string { + return []string{ + ProvisionedProductViewFilterBySearchQuery, + } +} + const ( // ProvisioningArtifactGuidanceDefault is a ProvisioningArtifactGuidance enum value ProvisioningArtifactGuidanceDefault = "DEFAULT" @@ -20933,11 +21746,26 @@ const ( ProvisioningArtifactGuidanceDeprecated = "DEPRECATED" ) +// ProvisioningArtifactGuidance_Values returns all elements of the ProvisioningArtifactGuidance enum +func ProvisioningArtifactGuidance_Values() []string { + return []string{ + ProvisioningArtifactGuidanceDefault, + ProvisioningArtifactGuidanceDeprecated, + } +} + const ( // ProvisioningArtifactPropertyNameId is a ProvisioningArtifactPropertyName enum value ProvisioningArtifactPropertyNameId = "Id" ) +// ProvisioningArtifactPropertyName_Values returns all elements of the ProvisioningArtifactPropertyName enum +func ProvisioningArtifactPropertyName_Values() []string { + return []string{ + ProvisioningArtifactPropertyNameId, + } +} + const ( // ProvisioningArtifactTypeCloudFormationTemplate is a ProvisioningArtifactType enum value ProvisioningArtifactTypeCloudFormationTemplate = "CLOUD_FORMATION_TEMPLATE" @@ -20949,6 +21777,15 @@ const ( ProvisioningArtifactTypeMarketplaceCar = "MARKETPLACE_CAR" ) +// ProvisioningArtifactType_Values returns all elements of the ProvisioningArtifactType enum +func ProvisioningArtifactType_Values() []string { + return []string{ + ProvisioningArtifactTypeCloudFormationTemplate, + ProvisioningArtifactTypeMarketplaceAmi, + ProvisioningArtifactTypeMarketplaceCar, + } +} + const ( // RecordStatusCreated is a RecordStatus enum value RecordStatusCreated = "CREATED" @@ -20966,6 +21803,17 @@ const ( RecordStatusFailed = "FAILED" ) +// RecordStatus_Values returns all elements of the RecordStatus enum +func RecordStatus_Values() []string { + return []string{ + RecordStatusCreated, + RecordStatusInProgress, + RecordStatusInProgressInError, + RecordStatusSucceeded, + RecordStatusFailed, + } +} + const ( // ReplacementTrue is a Replacement enum value ReplacementTrue = "TRUE" @@ -20977,6 +21825,15 @@ const ( ReplacementConditional = "CONDITIONAL" ) +// Replacement_Values returns all elements of the Replacement enum +func Replacement_Values() []string { + return []string{ + ReplacementTrue, + ReplacementFalse, + ReplacementConditional, + } +} + const ( // RequiresRecreationNever is a RequiresRecreation enum value RequiresRecreationNever = "NEVER" @@ -20988,6 +21845,15 @@ const ( RequiresRecreationAlways = "ALWAYS" ) +// RequiresRecreation_Values returns all elements of the RequiresRecreation enum +func RequiresRecreation_Values() []string { + return []string{ + RequiresRecreationNever, + RequiresRecreationConditionally, + RequiresRecreationAlways, + } +} + const ( // ResourceAttributeProperties is a ResourceAttribute enum value ResourceAttributeProperties = "PROPERTIES" @@ -21008,6 +21874,18 @@ const ( ResourceAttributeTags = "TAGS" ) +// ResourceAttribute_Values returns all elements of the ResourceAttribute enum +func ResourceAttribute_Values() []string { + return []string{ + ResourceAttributeProperties, + ResourceAttributeMetadata, + ResourceAttributeCreationpolicy, + ResourceAttributeUpdatepolicy, + ResourceAttributeDeletionpolicy, + ResourceAttributeTags, + } +} + const ( // ServiceActionAssociationErrorCodeDuplicateResource is a ServiceActionAssociationErrorCode enum value ServiceActionAssociationErrorCodeDuplicateResource = "DUPLICATE_RESOURCE" @@ -21025,6 +21903,17 @@ const ( ServiceActionAssociationErrorCodeThrottling = "THROTTLING" ) +// ServiceActionAssociationErrorCode_Values returns all elements of the ServiceActionAssociationErrorCode enum +func ServiceActionAssociationErrorCode_Values() []string { + return []string{ + ServiceActionAssociationErrorCodeDuplicateResource, + ServiceActionAssociationErrorCodeInternalFailure, + ServiceActionAssociationErrorCodeLimitExceeded, + ServiceActionAssociationErrorCodeResourceNotFound, + ServiceActionAssociationErrorCodeThrottling, + } +} + const ( // ServiceActionDefinitionKeyName is a ServiceActionDefinitionKey enum value ServiceActionDefinitionKeyName = "Name" @@ -21039,11 +21928,28 @@ const ( ServiceActionDefinitionKeyParameters = "Parameters" ) +// ServiceActionDefinitionKey_Values returns all elements of the ServiceActionDefinitionKey enum +func ServiceActionDefinitionKey_Values() []string { + return []string{ + ServiceActionDefinitionKeyName, + ServiceActionDefinitionKeyVersion, + ServiceActionDefinitionKeyAssumeRole, + ServiceActionDefinitionKeyParameters, + } +} + const ( // ServiceActionDefinitionTypeSsmAutomation is a ServiceActionDefinitionType enum value ServiceActionDefinitionTypeSsmAutomation = "SSM_AUTOMATION" ) +// ServiceActionDefinitionType_Values returns all elements of the ServiceActionDefinitionType enum +func ServiceActionDefinitionType_Values() []string { + return []string{ + ServiceActionDefinitionTypeSsmAutomation, + } +} + const ( // ShareStatusNotStarted is a ShareStatus enum value ShareStatusNotStarted = "NOT_STARTED" @@ -21061,6 +21967,17 @@ const ( ShareStatusError = "ERROR" ) +// ShareStatus_Values returns all elements of the ShareStatus enum +func ShareStatus_Values() []string { + return []string{ + ShareStatusNotStarted, + ShareStatusInProgress, + ShareStatusCompleted, + ShareStatusCompletedWithErrors, + ShareStatusError, + } +} + const ( // SortOrderAscending is a SortOrder enum value SortOrderAscending = "ASCENDING" @@ -21069,6 +21986,14 @@ const ( SortOrderDescending = "DESCENDING" ) +// SortOrder_Values returns all elements of the SortOrder enum +func SortOrder_Values() []string { + return []string{ + SortOrderAscending, + SortOrderDescending, + } +} + const ( // StackInstanceStatusCurrent is a StackInstanceStatus enum value StackInstanceStatusCurrent = "CURRENT" @@ -21080,6 +22005,15 @@ const ( StackInstanceStatusInoperable = "INOPERABLE" ) +// StackInstanceStatus_Values returns all elements of the StackInstanceStatus enum +func StackInstanceStatus_Values() []string { + return []string{ + StackInstanceStatusCurrent, + StackInstanceStatusOutdated, + StackInstanceStatusInoperable, + } +} + const ( // StackSetOperationTypeCreate is a StackSetOperationType enum value StackSetOperationTypeCreate = "CREATE" @@ -21091,6 +22025,15 @@ const ( StackSetOperationTypeDelete = "DELETE" ) +// StackSetOperationType_Values returns all elements of the StackSetOperationType enum +func StackSetOperationType_Values() []string { + return []string{ + StackSetOperationTypeCreate, + StackSetOperationTypeUpdate, + StackSetOperationTypeDelete, + } +} + const ( // StatusAvailable is a Status enum value StatusAvailable = "AVAILABLE" @@ -21101,3 +22044,12 @@ const ( // StatusFailed is a Status enum value StatusFailed = "FAILED" ) + +// Status_Values returns all elements of the Status enum +func Status_Values() []string { + return []string{ + StatusAvailable, + StatusCreating, + StatusFailed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go index 488fa6954..fcab94898 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go index 0fee97259..cc86b974f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/api.go @@ -61,8 +61,8 @@ func (c *ServiceDiscovery) CreateHttpNamespaceRequest(input *CreateHttpNamespace // namespace can be discovered using a DiscoverInstances request but can't be // discovered using DNS. // -// For the current limit on the number of namespaces that you can create using -// the same AWS account, see AWS Cloud Map Limits (http://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) +// For the current quota on the number of namespaces that you can create using +// the same AWS account, see AWS Cloud Map quotas (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) // in the AWS Cloud Map Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -82,12 +82,16 @@ func (c *ServiceDiscovery) CreateHttpNamespaceRequest(input *CreateHttpNamespace // The namespace that you're trying to create already exists. // // * ResourceLimitExceeded -// The resource can't be created because you've reached the limit on the number +// The resource can't be created because you've reached the quota on the number // of resources. // // * DuplicateRequest // The operation is already in progress. // +// * TooManyTagsException +// The list of tags on the resource is over the quota. The maximum number of +// tags that can be applied to a resource is 50. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/CreateHttpNamespace func (c *ServiceDiscovery) CreateHttpNamespace(input *CreateHttpNamespaceInput) (*CreateHttpNamespaceOutput, error) { req, out := c.CreateHttpNamespaceRequest(input) @@ -158,8 +162,8 @@ func (c *ServiceDiscovery) CreatePrivateDnsNamespaceRequest(input *CreatePrivate // a specified Amazon VPC. The namespace defines your service naming scheme. // For example, if you name your namespace example.com and name your service // backend, the resulting DNS name for the service will be backend.example.com. -// For the current limit on the number of namespaces that you can create using -// the same AWS account, see AWS Cloud Map Limits (http://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) +// For the current quota on the number of namespaces that you can create using +// the same AWS account, see AWS Cloud Map Limits (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) // in the AWS Cloud Map Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -179,12 +183,16 @@ func (c *ServiceDiscovery) CreatePrivateDnsNamespaceRequest(input *CreatePrivate // The namespace that you're trying to create already exists. // // * ResourceLimitExceeded -// The resource can't be created because you've reached the limit on the number +// The resource can't be created because you've reached the quota on the number // of resources. // // * DuplicateRequest // The operation is already in progress. // +// * TooManyTagsException +// The list of tags on the resource is over the quota. The maximum number of +// tags that can be applied to a resource is 50. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/CreatePrivateDnsNamespace func (c *ServiceDiscovery) CreatePrivateDnsNamespace(input *CreatePrivateDnsNamespaceInput) (*CreatePrivateDnsNamespaceOutput, error) { req, out := c.CreatePrivateDnsNamespaceRequest(input) @@ -254,9 +262,9 @@ func (c *ServiceDiscovery) CreatePublicDnsNamespaceRequest(input *CreatePublicDn // Creates a public namespace based on DNS, which will be visible on the internet. // The namespace defines your service naming scheme. For example, if you name // your namespace example.com and name your service backend, the resulting DNS -// name for the service will be backend.example.com. For the current limit on +// name for the service will be backend.example.com. For the current quota on // the number of namespaces that you can create using the same AWS account, -// see AWS Cloud Map Limits (http://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) +// see AWS Cloud Map Limits (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) // in the AWS Cloud Map Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -276,12 +284,16 @@ func (c *ServiceDiscovery) CreatePublicDnsNamespaceRequest(input *CreatePublicDn // The namespace that you're trying to create already exists. // // * ResourceLimitExceeded -// The resource can't be created because you've reached the limit on the number +// The resource can't be created because you've reached the quota on the number // of resources. // // * DuplicateRequest // The operation is already in progress. // +// * TooManyTagsException +// The list of tags on the resource is over the quota. The maximum number of +// tags that can be applied to a resource is 50. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/CreatePublicDnsNamespace func (c *ServiceDiscovery) CreatePublicDnsNamespace(input *CreatePublicDnsNamespaceInput) (*CreatePublicDnsNamespaceOutput, error) { req, out := c.CreatePublicDnsNamespaceRequest(input) @@ -355,12 +367,12 @@ func (c *ServiceDiscovery) CreateServiceRequest(input *CreateServiceInput) (req // // * Optionally, a health check // -// After you create the service, you can submit a RegisterInstance request, -// and AWS Cloud Map uses the values in the configuration to create the specified -// entities. +// After you create the service, you can submit a RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) +// request, and AWS Cloud Map uses the values in the configuration to create +// the specified entities. // -// For the current limit on the number of instances that you can register using -// the same namespace and using the same service, see AWS Cloud Map Limits (http://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) +// For the current quota on the number of instances that you can register using +// the same namespace and using the same service, see AWS Cloud Map Limits (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) // in the AWS Cloud Map Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -377,7 +389,7 @@ func (c *ServiceDiscovery) CreateServiceRequest(input *CreateServiceInput) (req // a string value might exceed length constraints. // // * ResourceLimitExceeded -// The resource can't be created because you've reached the limit on the number +// The resource can't be created because you've reached the quota on the number // of resources. // // * NamespaceNotFound @@ -387,6 +399,10 @@ func (c *ServiceDiscovery) CreateServiceRequest(input *CreateServiceInput) (req // The service can't be created because a service with the same name already // exists. // +// * TooManyTagsException +// The list of tags on the resource is over the quota. The maximum number of +// tags that can be applied to a resource is 50. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/CreateService func (c *ServiceDiscovery) CreateService(input *CreateServiceInput) (*CreateServiceOutput, error) { req, out := c.CreateServiceRequest(input) @@ -733,7 +749,10 @@ func (c *ServiceDiscovery) DiscoverInstancesRequest(input *DiscoverInstancesInpu // DiscoverInstances API operation for AWS Cloud Map. // -// Discovers registered instances for a specified namespace and service. +// Discovers registered instances for a specified namespace and service. You +// can use DiscoverInstances to discover instances for any type of namespace. +// For public and private DNS namespaces, you can also use DNS queries to discover +// instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -754,6 +773,12 @@ func (c *ServiceDiscovery) DiscoverInstancesRequest(input *DiscoverInstancesInpu // might be missing, a numeric value might be outside the allowed range, or // a string value might exceed length constraints. // +// * RequestLimitExceeded +// The operation can't be completed because you've reached the quota for the +// number of requests. For more information, see AWS Cloud Map API request throttling +// quota (https://docs.aws.amazon.com/cloud-map/latest/dg/throttling.html) in +// the AWS Cloud Map Developer Guide. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/DiscoverInstances func (c *ServiceDiscovery) DiscoverInstances(input *DiscoverInstancesInput) (*DiscoverInstancesOutput, error) { req, out := c.DiscoverInstancesRequest(input) @@ -1145,7 +1170,8 @@ func (c *ServiceDiscovery) GetOperationRequest(input *GetOperationInput) (req *r // Gets information about any operation that returns an operation ID in the // response, such as a CreateService request. // -// To get a list of operations that match specified criteria, see ListOperations. +// To get a list of operations that match specified criteria, see ListOperations +// (https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1831,6 +1857,90 @@ func (c *ServiceDiscovery) ListServicesPagesWithContext(ctx aws.Context, input * return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/ListTagsForResource +func (c *ServiceDiscovery) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS Cloud Map. +// +// Lists tags for the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Cloud Map's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The operation can't be completed because the resource was not found. +// +// * InvalidInput +// One or more specified values aren't valid. For example, a required value +// might be missing, a numeric value might be outside the allowed range, or +// a string value might exceed length constraints. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/ListTagsForResource +func (c *ServiceDiscovery) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceDiscovery) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterInstance = "RegisterInstance" // RegisterInstanceRequest generates a "aws/request.Request" representing the @@ -1892,7 +2002,7 @@ func (c *ServiceDiscovery) RegisterInstanceRequest(input *RegisterInstanceInput) // One RegisterInstance request must complete before you can submit another // request and specify the same service ID and instance ID. // -// For more information, see CreateService. +// For more information, see CreateService (https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html). // // When AWS Cloud Map receives a DNS query for the specified DNS name, it returns // the applicable value: @@ -1905,8 +2015,8 @@ func (c *ServiceDiscovery) RegisterInstanceRequest(input *RegisterInstanceInput) // * If you didn't specify a health check configuration: returns all the // records // -// For the current limit on the number of instances that you can register using -// the same namespace and using the same service, see AWS Cloud Map Limits (http://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) +// For the current quota on the number of instances that you can register using +// the same namespace and using the same service, see AWS Cloud Map Limits (https://docs.aws.amazon.com/cloud-map/latest/dg/cloud-map-limits.html) // in the AWS Cloud Map Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -1930,7 +2040,7 @@ func (c *ServiceDiscovery) RegisterInstanceRequest(input *RegisterInstanceInput) // For example, you can't delete a service that contains any instances. // // * ResourceLimitExceeded -// The resource can't be created because you've reached the limit on the number +// The resource can't be created because you've reached the quota on the number // of resources. // // * ServiceNotFound @@ -1958,6 +2068,180 @@ func (c *ServiceDiscovery) RegisterInstanceWithContext(ctx aws.Context, input *R return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/TagResource +func (c *ServiceDiscovery) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS Cloud Map. +// +// Adds one or more tags to the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Cloud Map's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The operation can't be completed because the resource was not found. +// +// * TooManyTagsException +// The list of tags on the resource is over the quota. The maximum number of +// tags that can be applied to a resource is 50. +// +// * InvalidInput +// One or more specified values aren't valid. For example, a required value +// might be missing, a numeric value might be outside the allowed range, or +// a string value might exceed length constraints. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/TagResource +func (c *ServiceDiscovery) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceDiscovery) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/UntagResource +func (c *ServiceDiscovery) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS Cloud Map. +// +// Removes one or more tags from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Cloud Map's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The operation can't be completed because the resource was not found. +// +// * InvalidInput +// One or more specified values aren't valid. For example, a required value +// might be missing, a numeric value might be outside the allowed range, or +// a string value might exceed length constraints. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/servicediscovery-2017-03-14/UntagResource +func (c *ServiceDiscovery) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ServiceDiscovery) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateInstanceCustomHealthStatus = "UpdateInstanceCustomHealthStatus" // UpdateInstanceCustomHealthStatusRequest generates a "aws/request.Request" representing the @@ -2011,7 +2295,7 @@ func (c *ServiceDiscovery) UpdateInstanceCustomHealthStatusRequest(input *Update // you create a service. You can't use it to change the status for Route 53 // health checks, which you define using HealthCheckConfig. // -// For more information, see HealthCheckCustomConfig. +// For more information, see HealthCheckCustomConfig (https://docs.aws.amazon.com/cloud-map/latest/api/API_HealthCheckCustomConfig.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2105,20 +2389,23 @@ func (c *ServiceDiscovery) UpdateServiceRequest(input *UpdateServiceInput) (req // // Submits a request to perform the following operations: // -// * Add or delete DnsRecords configurations -// // * Update the TTL setting for existing DnsRecords configurations // -// * Add, update, or delete HealthCheckConfig for a specified service +// * Add, update, or delete HealthCheckConfig for a specified service You +// can't add, update, or delete a HealthCheckCustomConfig configuration. // -// For public and private DNS namespaces, you must specify all DnsRecords configurations -// (and, optionally, HealthCheckConfig) that you want to appear in the updated -// service. Any current configurations that don't appear in an UpdateService -// request are deleted. +// For public and private DNS namespaces, note the following: // -// When you update the TTL setting for a service, AWS Cloud Map also updates -// the corresponding settings in all the records and health checks that were -// created by using the specified service. +// * If you omit any existing DnsRecords or HealthCheckConfig configurations +// from an UpdateService request, the configurations are deleted from the +// service. +// +// * If you omit an existing HealthCheckCustomConfig configuration from an +// UpdateService request, the configuration is not deleted from the service. +// +// When you update settings for a service, AWS Cloud Map also updates the corresponding +// settings in all the records and health checks that were created by using +// the specified service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2176,6 +2463,11 @@ type CreateHttpNamespaceInput struct { // // Name is a required field Name *string `type:"string" required:"true"` + + // The tags to add to the namespace. Each tag consists of a key and an optional + // value, both of which you define. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2194,6 +2486,16 @@ func (s *CreateHttpNamespaceInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2219,11 +2521,17 @@ func (s *CreateHttpNamespaceInput) SetName(v string) *CreateHttpNamespaceInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateHttpNamespaceInput) SetTags(v []*Tag) *CreateHttpNamespaceInput { + s.Tags = v + return s +} + type CreateHttpNamespaceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // To get the status of the operation, see GetOperation. + // To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -2261,6 +2569,11 @@ type CreatePrivateDnsNamespaceInput struct { // Name is a required field Name *string `type:"string" required:"true"` + // The tags to add to the namespace. Each tag consists of a key and an optional + // value, both of which you define. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + Tags []*Tag `type:"list"` + // The ID of the Amazon VPC that you want to associate the namespace with. // // Vpc is a required field @@ -2286,6 +2599,16 @@ func (s *CreatePrivateDnsNamespaceInput) Validate() error { if s.Vpc == nil { invalidParams.Add(request.NewErrParamRequired("Vpc")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2311,6 +2634,12 @@ func (s *CreatePrivateDnsNamespaceInput) SetName(v string) *CreatePrivateDnsName return s } +// SetTags sets the Tags field's value. +func (s *CreatePrivateDnsNamespaceInput) SetTags(v []*Tag) *CreatePrivateDnsNamespaceInput { + s.Tags = v + return s +} + // SetVpc sets the Vpc field's value. func (s *CreatePrivateDnsNamespaceInput) SetVpc(v string) *CreatePrivateDnsNamespaceInput { s.Vpc = &v @@ -2321,7 +2650,7 @@ type CreatePrivateDnsNamespaceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // To get the status of the operation, see GetOperation. + // To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -2356,6 +2685,11 @@ type CreatePublicDnsNamespaceInput struct { // // Name is a required field Name *string `type:"string" required:"true"` + + // The tags to add to the namespace. Each tag consists of a key and an optional + // value, both of which you define. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2374,6 +2708,16 @@ func (s *CreatePublicDnsNamespaceInput) Validate() error { if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2399,11 +2743,17 @@ func (s *CreatePublicDnsNamespaceInput) SetName(v string) *CreatePublicDnsNamesp return s } +// SetTags sets the Tags field's value. +func (s *CreatePublicDnsNamespaceInput) SetTags(v []*Tag) *CreatePublicDnsNamespaceInput { + s.Tags = v + return s +} + type CreatePublicDnsNamespaceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // To get the status of the operation, see GetOperation. + // To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -2438,10 +2788,10 @@ type CreateServiceInput struct { // that you want AWS Cloud Map to create when you register an instance. DnsConfig *DnsConfig `type:"structure"` - // Public DNS namespaces only. A complex type that contains settings for an - // optional Route 53 health check. If you specify settings for a health check, - // AWS Cloud Map associates the health check with all the Route 53 DNS records - // that you specify in DnsConfig. + // Public DNS and HTTP namespaces only. A complex type that contains settings + // for an optional Route 53 health check. If you specify settings for a health + // check, AWS Cloud Map associates the health check with all the Route 53 DNS + // records that you specify in DnsConfig. // // If you specify a health check configuration, you can specify either HealthCheckCustomConfig // or HealthCheckConfig but not both. @@ -2455,15 +2805,37 @@ type CreateServiceInput struct { // // If you specify a health check configuration, you can specify either HealthCheckCustomConfig // or HealthCheckConfig but not both. + // + // You can't add, update, or delete a HealthCheckCustomConfig configuration + // from an existing service. HealthCheckCustomConfig *HealthCheckCustomConfig `type:"structure"` // The name that you want to assign to the service. // + // If you want AWS Cloud Map to create an SRV record when you register an instance, + // and if you're using a system that requires a specific SRV format, such as + // HAProxy (http://www.haproxy.org/), specify the following for Name: + // + // * Start the name with an underscore (_), such as _exampleservice + // + // * End the name with ._protocol, such as ._tcp + // + // When you register an instance, AWS Cloud Map creates an SRV record and assigns + // a name to the record by concatenating the service name and the namespace + // name, for example: + // + // _exampleservice._tcp.example.com + // // Name is a required field Name *string `type:"string" required:"true"` // The ID of the namespace that you want to use to create the service. NamespaceId *string `type:"string"` + + // The tags to add to the service. Each tag consists of a key and an optional + // value, both of which you define. Tag keys can have a maximum character length + // of 128 characters, and tag values can have a maximum length of 256 characters. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2497,6 +2869,16 @@ func (s *CreateServiceInput) Validate() error { invalidParams.AddNested("HealthCheckCustomConfig", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2546,6 +2928,12 @@ func (s *CreateServiceInput) SetNamespaceId(v string) *CreateServiceInput { return s } +// SetTags sets the Tags field's value. +func (s *CreateServiceInput) SetTags(v []*Tag) *CreateServiceInput { + s.Tags = v + return s +} + type CreateServiceOutput struct { _ struct{} `type:"structure"` @@ -2572,8 +2960,8 @@ func (s *CreateServiceOutput) SetService(v *Service) *CreateServiceOutput { // The health check for the instance that is specified by ServiceId and InstanceId // is not a custom health check. type CustomHealthNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2590,17 +2978,17 @@ func (s CustomHealthNotFound) GoString() string { func newErrorCustomHealthNotFound(v protocol.ResponseMetadata) error { return &CustomHealthNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomHealthNotFound) Code() string { +func (s *CustomHealthNotFound) Code() string { return "CustomHealthNotFound" } // Message returns the exception's message. -func (s CustomHealthNotFound) Message() string { +func (s *CustomHealthNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2608,22 +2996,22 @@ func (s CustomHealthNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomHealthNotFound) OrigErr() error { +func (s *CustomHealthNotFound) OrigErr() error { return nil } -func (s CustomHealthNotFound) Error() string { +func (s *CustomHealthNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomHealthNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomHealthNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomHealthNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomHealthNotFound) RequestID() string { + return s.RespMetadata.RequestID } type DeleteNamespaceInput struct { @@ -2668,7 +3056,7 @@ type DeleteNamespaceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // To get the status of the operation, see GetOperation. + // To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -2743,7 +3131,8 @@ func (s DeleteServiceOutput) GoString() string { type DeregisterInstanceInput struct { _ struct{} `type:"structure"` - // The value that you specified for Id in the RegisterInstance request. + // The value that you specified for Id in the RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) + // request. // // InstanceId is a required field InstanceId *string `type:"string" required:"true"` @@ -2796,7 +3185,7 @@ type DeregisterInstanceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // For more information, see GetOperation. + // For more information, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -2822,9 +3211,9 @@ type DiscoverInstancesInput struct { // The health status of the instances that you want to discover. HealthStatus *string `type:"string" enum:"HealthStatusFilter"` - // The maximum number of instances that you want Cloud Map to return in the - // response to a DiscoverInstances request. If you don't specify a value for - // MaxResults, Cloud Map returns up to 100 instances. + // The maximum number of instances that you want AWS Cloud Map to return in + // the response to a DiscoverInstances request. If you don't specify a value + // for MaxResults, AWS Cloud Map returns up to 100 instances. MaxResults *int64 `min:"1" type:"integer"` // The name of the namespace that you specified when you registered the instance. @@ -2832,10 +3221,16 @@ type DiscoverInstancesInput struct { // NamespaceName is a required field NamespaceName *string `type:"string" required:"true"` - // A string map that contains attributes with values that you can use to filter - // instances by any custom attribute that you specified when you registered - // the instance. Only instances that match all the specified key/value pairs - // will be returned. + // Opportunistic filters to scope the results based on custom attributes. If + // there are instances that match both the filters specified in both the QueryParameters + // parameter and this parameter, they are returned. Otherwise, these filters + // are ignored and only instances that match the filters specified in the QueryParameters + // parameter are returned. + OptionalParameters map[string]*string `type:"map"` + + // Filters to scope the results based on custom attributes for the instance. + // For example, {version=v1, az=1a}. Only instances that match all the specified + // key-value pairs will be returned. QueryParameters map[string]*string `type:"map"` // The name of the service that you specified when you registered the instance. @@ -2891,6 +3286,12 @@ func (s *DiscoverInstancesInput) SetNamespaceName(v string) *DiscoverInstancesIn return s } +// SetOptionalParameters sets the OptionalParameters field's value. +func (s *DiscoverInstancesInput) SetOptionalParameters(v map[string]*string) *DiscoverInstancesInput { + s.OptionalParameters = v + return s +} + // SetQueryParameters sets the QueryParameters field's value. func (s *DiscoverInstancesInput) SetQueryParameters(v map[string]*string) *DiscoverInstancesInput { s.QueryParameters = v @@ -2966,7 +3367,7 @@ type DnsConfig struct { // all instances are healthy and returns the values for up to eight instances. // // For more information about the multivalue routing policy, see Multivalue - // Answer Routing (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-multivalue) + // Answer Routing (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-multivalue) // in the Route 53 Developer Guide. // // WEIGHTED @@ -2987,7 +3388,7 @@ type DnsConfig struct { // selected instance. // // For more information about the weighted routing policy, see Weighted Routing - // (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted) + // (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted) // in the Route 53 Developer Guide. RoutingPolicy *string `type:"string" enum:"RoutingPolicy"` } @@ -3130,30 +3531,34 @@ type DnsRecord struct { // // Alias records don't include a TTL because Route 53 uses the TTL for the AWS // resource that an alias record routes traffic to. If you include the AWS_ALIAS_DNS_NAME - // attribute when you submit a RegisterInstance request, the TTL value is ignored. - // Always specify a TTL for the service; you can use a service to register instances - // that create either alias or non-alias records. + // attribute when you submit a RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) + // request, the TTL value is ignored. Always specify a TTL for the service; + // you can use a service to register instances that create either alias or non-alias + // records. // // TTL is a required field TTL *int64 `type:"long" required:"true"` // The type of the resource, which indicates the type of value that Route 53 - // returns in response to DNS queries. + // returns in response to DNS queries. You can specify values for Type in the + // following combinations: // - // Note the following: + // * A + // + // * AAAA // - // * A, AAAA, and SRV records: You can specify settings for a maximum of - // one A, one AAAA, and one SRV record. You can specify them in any combination. + // * A and AAAA // - // * CNAME records: If you specify CNAME for Type, you can't define any other - // records. This is a limitation of DNS: you can't create a CNAME record - // and any other type of record that has the same name as a CNAME record. + // * SRV // - // * Alias records: If you want AWS Cloud Map to create a Route 53 alias - // record when you register an instance, specify A or AAAA for Type. + // * CNAME // - // * All records: You specify settings other than TTL and Type when you register - // an instance. + // If you want AWS Cloud Map to create a Route 53 alias record when you register + // an instance, specify A or AAAA for Type. + // + // You specify other settings, such as the IP address for A and AAAA records, + // when you register an instance. For more information, see RegisterInstance + // (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html). // // The following values are supported: // @@ -3171,7 +3576,8 @@ type DnsRecord struct { // Note the following: // // * You specify the domain name that you want to route traffic to when you - // register an instance. For more information, see RegisterInstanceRequest$Attributes. + // register an instance. For more information, see Attributes (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html#cloudmap-RegisterInstance-request-Attributes) + // in the topic RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html). // // * You must specify WEIGHTED for the value of RoutingPolicy. // @@ -3190,7 +3596,8 @@ type DnsRecord struct { // * The values of priority and weight are both set to 1 and can't be changed. // // * The value of port comes from the value that you specify for the AWS_INSTANCE_PORT - // attribute when you submit a RegisterInstance request. + // attribute when you submit a RegisterInstance (https://docs.aws.amazon.com/cloud-map/latest/api/API_RegisterInstance.html) + // request. // // * The value of service-hostname is a concatenation of the following values: // The value that you specify for InstanceId when you register an instance. @@ -3199,10 +3606,17 @@ type DnsRecord struct { // name of the namespace is example.com, the value of service-hostname is: // test.backend.example.com // - // If you specify settings for an SRV record and if you specify values for AWS_INSTANCE_IPV4, - // AWS_INSTANCE_IPV6, or both in the RegisterInstance request, AWS Cloud Map - // automatically creates A and/or AAAA records that have the same name as the - // value of service-hostname in the SRV record. You can ignore these records. + // If you specify settings for an SRV record, note the following: + // + // * If you specify values for AWS_INSTANCE_IPV4, AWS_INSTANCE_IPV6, or both + // in the RegisterInstance request, AWS Cloud Map automatically creates A + // and/or AAAA records that have the same name as the value of service-hostname + // in the SRV record. You can ignore these records. + // + // * If you're using a system that requires a specific SRV format, such as + // HAProxy, see the Name (https://docs.aws.amazon.com/cloud-map/latest/api/API_CreateService.html#cloudmap-CreateService-request-Name) + // element in the documentation about CreateService for information about + // how to specify the correct name format. // // Type is a required field Type *string `type:"string" required:"true" enum:"RecordType"` @@ -3248,8 +3662,8 @@ func (s *DnsRecord) SetType(v string) *DnsRecord { // The operation is already in progress. type DuplicateRequest struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The ID of the operation that is already in progress. DuplicateOperationId *string `type:"string"` @@ -3269,17 +3683,17 @@ func (s DuplicateRequest) GoString() string { func newErrorDuplicateRequest(v protocol.ResponseMetadata) error { return &DuplicateRequest{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateRequest) Code() string { +func (s *DuplicateRequest) Code() string { return "DuplicateRequest" } // Message returns the exception's message. -func (s DuplicateRequest) Message() string { +func (s *DuplicateRequest) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3287,22 +3701,22 @@ func (s DuplicateRequest) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateRequest) OrigErr() error { +func (s *DuplicateRequest) OrigErr() error { return nil } -func (s DuplicateRequest) Error() string { +func (s *DuplicateRequest) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateRequest) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateRequest) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateRequest) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateRequest) RequestID() string { + return s.RespMetadata.RequestID } type GetInstanceInput struct { @@ -3390,7 +3804,8 @@ type GetInstancesHealthStatusInput struct { // instances that are associated with the specified service. // // To get the IDs for the instances that you've registered by using a specified - // service, submit a ListInstances request. + // service, submit a ListInstances (https://docs.aws.amazon.com/cloud-map/latest/api/API_ListInstances.html) + // request. Instances []*string `min:"1" type:"list"` // The maximum number of instances that you want AWS Cloud Map to return in @@ -3682,9 +4097,10 @@ func (s *GetServiceOutput) SetService(v *Service) *GetServiceOutput { return s } -// Public DNS namespaces only. A complex type that contains settings for an -// optional health check. If you specify settings for a health check, AWS Cloud -// Map associates the health check with the records that you specify in DnsConfig. +// Public DNS and HTTP namespaces only. A complex type that contains settings +// for an optional health check. If you specify settings for a health check, +// AWS Cloud Map associates the health check with the records that you specify +// in DnsConfig. // // If you specify a health check configuration, you can specify either HealthCheckCustomConfig // or HealthCheckConfig but not both. @@ -3695,14 +4111,14 @@ func (s *GetServiceOutput) SetService(v *Service) *GetServiceOutput { // // Note the following about configuring health checks. // -// A and AAAA records +// A and AAAA records // // If DnsConfig includes configurations for both A and AAAA records, AWS Cloud // Map creates a health check that uses the IPv4 address to check the health // of the resource. If the endpoint that is specified by the IPv4 address is // unhealthy, Route 53 considers both the A and AAAA records to be unhealthy. // -// CNAME records +// CNAME records // // You can't specify settings for HealthCheckConfig when the DNSConfig includes // CNAME for the value of Type. If you do, the CreateService request will fail @@ -3719,7 +4135,7 @@ func (s *GetServiceOutput) SetService(v *Service) *GetServiceOutput { // Health checking regions // // Health checkers perform checks from all Route 53 health-checking regions. -// For a list of the current regions, see Regions (http://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions). +// For a list of the current regions, see Regions (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions). // // Alias records // @@ -3729,7 +4145,7 @@ func (s *GetServiceOutput) SetService(v *Service) *GetServiceOutput { // * Route 53 automatically sets EvaluateTargetHealth to true for alias records. // When EvaluateTargetHealth is true, the alias record inherits the health // of the referenced AWS resource. such as an ELB load balancer. For more -// information, see EvaluateTargetHealth (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). +// information, see EvaluateTargetHealth (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). // // * If you include HealthCheckConfig and then use the service to register // an instance that creates an alias record, Route 53 doesn't create the @@ -3746,7 +4162,7 @@ type HealthCheckConfig struct { // The number of consecutive health checks that an endpoint must pass or fail // for Route 53 to change the current status of the endpoint from unhealthy // to healthy or vice versa. For more information, see How Route 53 Determines - // Whether an Endpoint Is Healthy (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) + // Whether an Endpoint Is Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // in the Route 53 Developer Guide. FailureThreshold *int64 `min:"1" type:"integer"` @@ -3779,7 +4195,7 @@ type HealthCheckConfig struct { // for Type, don't specify a value for ResourcePath. // // For more information, see How Route 53 Determines Whether an Endpoint Is - // Healthy (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) + // Healthy (https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-determining-health-of-endpoints.html) // in the Route 53 Developer Guide. // // Type is a required field @@ -3848,8 +4264,8 @@ func (s *HealthCheckConfig) SetType(v string) *HealthCheckConfig { // or HealthCheckConfig but not both. // // To change the status of a custom health check, submit an UpdateInstanceCustomHealthStatus -// request. Cloud Map doesn't monitor the status of the resource, it just keeps -// a record of the status specified in the most recent UpdateInstanceCustomHealthStatus +// request. AWS Cloud Map doesn't monitor the status of the resource, it just +// keeps a record of the status specified in the most recent UpdateInstanceCustomHealthStatus // request. // // Here's how custom health checks work: @@ -3858,6 +4274,7 @@ func (s *HealthCheckConfig) SetType(v string) *HealthCheckConfig { // // The failure threshold indicates the number of 30-second intervals you want // AWS Cloud Map to wait between the time that your application sends an UpdateInstanceCustomHealthStatus +// (https://docs.aws.amazon.com/cloud-map/latest/api/API_UpdateInstanceCustomHealthStatus.html) // request and the time that AWS Cloud Map stops routing internet traffic to // the corresponding resource. // @@ -3878,22 +4295,22 @@ func (s *HealthCheckConfig) SetType(v string) *HealthCheckConfig { // If another UpdateInstanceCustomHealthStatus request doesn't arrive during // that time to change the status back to healthy, AWS Cloud Map stops routing // traffic to the resource. -// -// Note the following about configuring custom health checks. type HealthCheckCustomConfig struct { _ struct{} `type:"structure"` - // The number of 30-second intervals that you want Cloud Map to wait after receiving - // an UpdateInstanceCustomHealthStatus request before it changes the health - // status of a service instance. For example, suppose you specify a value of - // 2 for FailureTheshold, and then your application sends an UpdateInstanceCustomHealthStatus - // request. Cloud Map waits for approximately 60 seconds (2 x 30) before changing - // the status of the service instance based on that request. + // + // This parameter has been deprecated and is always set to 1. AWS Cloud Map + // waits for approximately 30 seconds after receiving an UpdateInstanceCustomHealthStatus + // request before changing the status of the service instance. + // + // The number of 30-second intervals that you want AWS Cloud Map to wait after + // receiving an UpdateInstanceCustomHealthStatus request before it changes the + // health status of a service instance. // // Sending a second or subsequent UpdateInstanceCustomHealthStatus request with - // the same value before FailureThreshold x 30 seconds has passed doesn't accelerate - // the change. Cloud Map still waits FailureThreshold x 30 seconds after the - // first request to make the change. + // the same value before 30 seconds has passed doesn't accelerate the change. + // AWS Cloud Map still waits 30 seconds after the first request to make the + // change. FailureThreshold *int64 `min:"1" type:"integer"` } @@ -3926,9 +4343,9 @@ func (s *HealthCheckCustomConfig) SetFailureThreshold(v int64) *HealthCheckCusto return s } -// In a response to a DiscoverInstance request, HttpInstanceSummary contains -// information about one instance that matches the values that you specified -// in the request. +// In a response to a DiscoverInstances (https://docs.aws.amazon.com/cloud-map/latest/api/API_DiscoverInstances.html) +// request, HttpInstanceSummary contains information about one instance that +// matches the values that you specified in the request. type HttpInstanceSummary struct { _ struct{} `type:"structure"` @@ -4033,7 +4450,7 @@ type Instance struct { // If you want AWS Cloud Map to create a Route 53 alias record that routes traffic // to an Elastic Load Balancing load balancer, specify the DNS name that is // associated with the load balancer. For information about how to get the DNS - // name, see "DNSName" in the topic AliasTarget (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html). + // name, see "DNSName" in the topic AliasTarget (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html). // // Note the following: // @@ -4053,6 +4470,11 @@ type Instance struct { // * If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values // for any of the AWS_INSTANCE attributes. // + // AWS_EC2_INSTANCE_ID + // + // HTTP namespaces only. The Amazon EC2 instance ID for the instance. The AWS_INSTANCE_IPV4 + // attribute contains the primary private IPv4 address. + // // AWS_INSTANCE_CNAME // // If the service configuration includes a CNAME record, the domain name that @@ -4087,8 +4509,8 @@ type Instance struct { // If the service includes HealthCheckConfig, the port on the endpoint that // you want Route 53 to send requests to. // - // This value is required if you specified settings for an SRV record when you - // created the service. + // This value is required if you specified settings for an SRV record or a Route + // 53 health check when you created the service. Attributes map[string]*string `type:"map"` // A unique string that identifies the request and that allows failed RegisterInstance @@ -4103,7 +4525,8 @@ type Instance struct { // // * If the service that is specified by ServiceId includes settings for // an SRV record, the value of InstanceId is automatically included as part - // of the value for the SRV record. For more information, see DnsRecord$Type. + // of the value for the SRV record. For more information, see DnsRecord > + // Type (https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type). // // * You can use this value to update an existing instance. // @@ -4151,8 +4574,8 @@ func (s *Instance) SetId(v string) *Instance { // No instance exists with the specified ID, or the instance was recently registered, // and information about the instance hasn't propagated yet. type InstanceNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4169,17 +4592,17 @@ func (s InstanceNotFound) GoString() string { func newErrorInstanceNotFound(v protocol.ResponseMetadata) error { return &InstanceNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InstanceNotFound) Code() string { +func (s *InstanceNotFound) Code() string { return "InstanceNotFound" } // Message returns the exception's message. -func (s InstanceNotFound) Message() string { +func (s *InstanceNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4187,22 +4610,22 @@ func (s InstanceNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InstanceNotFound) OrigErr() error { +func (s *InstanceNotFound) OrigErr() error { return nil } -func (s InstanceNotFound) Error() string { +func (s *InstanceNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InstanceNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InstanceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InstanceNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *InstanceNotFound) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that contains information about the instances that you registered @@ -4222,6 +4645,11 @@ type InstanceSummary struct { // Load Balancing load balancer, the DNS name that is associated with the // load balancer. // + // * AWS_EC2_INSTANCE_ID: (HTTP namespaces only) The Amazon EC2 instance + // ID for the instance. When the AWS_EC2_INSTANCE_ID attribute is specified, + // then the AWS_INSTANCE_IPV4 attribute contains the primary private IPv4 + // address. + // // * AWS_INSTANCE_CNAME: For a CNAME record, the domain name that Route 53 // returns in response to DNS queries, for example, example.com. // @@ -4266,8 +4694,8 @@ func (s *InstanceSummary) SetId(v string) *InstanceSummary { // might be missing, a numeric value might be outside the allowed range, or // a string value might exceed length constraints. type InvalidInput struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4284,17 +4712,17 @@ func (s InvalidInput) GoString() string { func newErrorInvalidInput(v protocol.ResponseMetadata) error { return &InvalidInput{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInput) Code() string { +func (s *InvalidInput) Code() string { return "InvalidInput" } // Message returns the exception's message. -func (s InvalidInput) Message() string { +func (s *InvalidInput) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4302,22 +4730,22 @@ func (s InvalidInput) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInput) OrigErr() error { +func (s *InvalidInput) OrigErr() error { return nil } -func (s InvalidInput) Error() string { +func (s *InvalidInput) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInput) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInput) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInput) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInput) RequestID() string { + return s.RespMetadata.RequestID } type ListInstancesInput struct { @@ -4777,6 +5205,71 @@ func (s *ListServicesOutput) SetServices(v []*ServiceSummary) *ListServicesOutpu return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to retrieve + // tags for. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags that are assigned to the resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s +} + // A complex type that contains information about a specified namespace. type Namespace struct { _ struct{} `type:"structure"` @@ -4811,7 +5304,17 @@ type Namespace struct { // The number of services that are associated with the namespace. ServiceCount *int64 `type:"integer"` - // The type of the namespace. Valid values are DNS_PUBLIC and DNS_PRIVATE. + // The type of the namespace. The methods for discovering instances depends + // on the value that you specify: + // + // * HTTP: Instances can be discovered only programmatically, using the AWS + // Cloud Map DiscoverInstances API. + // + // * DNS_PUBLIC: Instances can be discovered using public DNS queries and + // using the DiscoverInstances API. + // + // * DNS_PRIVATE: Instances can be discovered using DNS queries in VPCs and + // using the DiscoverInstances API. Type *string `type:"string" enum:"NamespaceType"` } @@ -4881,8 +5384,8 @@ func (s *Namespace) SetType(v string) *Namespace { // The namespace that you're trying to create already exists. type NamespaceAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The CreatorRequestId that was used to create the namespace. CreatorRequestId *string `type:"string"` @@ -4905,17 +5408,17 @@ func (s NamespaceAlreadyExists) GoString() string { func newErrorNamespaceAlreadyExists(v protocol.ResponseMetadata) error { return &NamespaceAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NamespaceAlreadyExists) Code() string { +func (s *NamespaceAlreadyExists) Code() string { return "NamespaceAlreadyExists" } // Message returns the exception's message. -func (s NamespaceAlreadyExists) Message() string { +func (s *NamespaceAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4923,22 +5426,22 @@ func (s NamespaceAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NamespaceAlreadyExists) OrigErr() error { +func (s *NamespaceAlreadyExists) OrigErr() error { return nil } -func (s NamespaceAlreadyExists) Error() string { +func (s *NamespaceAlreadyExists) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s NamespaceAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NamespaceAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NamespaceAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *NamespaceAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that identifies the namespaces that you want to list. You @@ -5019,8 +5522,8 @@ func (s *NamespaceFilter) SetValues(v []*string) *NamespaceFilter { // No namespace exists with the specified ID. type NamespaceNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5037,17 +5540,17 @@ func (s NamespaceNotFound) GoString() string { func newErrorNamespaceNotFound(v protocol.ResponseMetadata) error { return &NamespaceNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NamespaceNotFound) Code() string { +func (s *NamespaceNotFound) Code() string { return "NamespaceNotFound" } // Message returns the exception's message. -func (s NamespaceNotFound) Message() string { +func (s *NamespaceNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5055,22 +5558,22 @@ func (s NamespaceNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NamespaceNotFound) OrigErr() error { +func (s *NamespaceNotFound) OrigErr() error { return nil } -func (s NamespaceNotFound) Error() string { +func (s *NamespaceNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NamespaceNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NamespaceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NamespaceNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *NamespaceNotFound) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that contains information that is specific to the namespace @@ -5423,8 +5926,8 @@ func (s *OperationFilter) SetValues(v []*string) *OperationFilter { // No operation exists with the specified ID. type OperationNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5441,17 +5944,17 @@ func (s OperationNotFound) GoString() string { func newErrorOperationNotFound(v protocol.ResponseMetadata) error { return &OperationNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotFound) Code() string { +func (s *OperationNotFound) Code() string { return "OperationNotFound" } // Message returns the exception's message. -func (s OperationNotFound) Message() string { +func (s *OperationNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5459,26 +5962,27 @@ func (s OperationNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotFound) OrigErr() error { +func (s *OperationNotFound) OrigErr() error { return nil } -func (s OperationNotFound) Error() string { +func (s *OperationNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotFound) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that contains information about an operation that matches -// the criteria that you specified in a ListOperations request. +// the criteria that you specified in a ListOperations (https://docs.aws.amazon.com/cloud-map/latest/api/API_ListOperations.html) +// request. type OperationSummary struct { _ struct{} `type:"structure"` @@ -5537,7 +6041,7 @@ type RegisterInstanceInput struct { // If you want AWS Cloud Map to create an Amazon Route 53 alias record that // routes traffic to an Elastic Load Balancing load balancer, specify the DNS // name that is associated with the load balancer. For information about how - // to get the DNS name, see "DNSName" in the topic AliasTarget (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html) + // to get the DNS name, see "DNSName" in the topic AliasTarget (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html) // in the Route 53 API Reference. // // Note the following: @@ -5553,11 +6057,19 @@ type RegisterInstanceInput struct { // won't associate the health check with the alias record. // // * Auto naming currently doesn't support creating alias records that route - // traffic to AWS resources other than ELB load balancers. + // traffic to AWS resources other than Elastic Load Balancing load balancers. // // * If you specify a value for AWS_ALIAS_DNS_NAME, don't specify values // for any of the AWS_INSTANCE attributes. // + // AWS_EC2_INSTANCE_ID + // + // HTTP namespaces only. The Amazon EC2 instance ID for the instance. If the + // AWS_EC2_INSTANCE_ID attribute is specified, then the only other attribute + // that can be specified is AWS_INIT_HEALTH_STATUS. When the AWS_EC2_INSTANCE_ID + // attribute is specified, then the AWS_INSTANCE_IPV4 attribute will be filled + // out with the primary private IPv4 address. + // // AWS_INIT_HEALTH_STATUS // // If the service configuration includes HealthCheckCustomConfig, you can optionally @@ -5599,14 +6111,15 @@ type RegisterInstanceInput struct { // If the service includes HealthCheckConfig, the port on the endpoint that // you want Route 53 to send requests to. // - // This value is required if you specified settings for an SRV record when you - // created the service. + // This value is required if you specified settings for an SRV record or a Route + // 53 health check when you created the service. // // Custom attributes // // You can add up to 30 custom attributes. For each key-value pair, the maximum // length of the attribute name is 255 characters, and the maximum length of - // the attribute value is 1,024 characters. + // the attribute value is 1,024 characters. The total size of all provided attributes + // (sum of all keys and values) must not exceed 5,000 characters. // // Attributes is a required field Attributes map[string]*string `type:"map" required:"true"` @@ -5623,7 +6136,8 @@ type RegisterInstanceInput struct { // // * If the service that is specified by ServiceId includes settings for // an SRV record, the value of InstanceId is automatically included as part - // of the value for the SRV record. For more information, see DnsRecord$Type. + // of the value for the SRV record. For more information, see DnsRecord > + // Type (https://docs.aws.amazon.com/cloud-map/latest/api/API_DnsRecord.html#cloudmap-Type-DnsRecord-Type). // // * You can use this value to update an existing instance. // @@ -5702,7 +6216,7 @@ type RegisterInstanceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // To get the status of the operation, see GetOperation. + // To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -5722,11 +6236,70 @@ func (s *RegisterInstanceOutput) SetOperationId(v string) *RegisterInstanceOutpu return s } +// The operation can't be completed because you've reached the quota for the +// number of requests. For more information, see AWS Cloud Map API request throttling +// quota (https://docs.aws.amazon.com/cloud-map/latest/dg/throttling.html) in +// the AWS Cloud Map Developer Guide. +type RequestLimitExceeded struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s RequestLimitExceeded) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestLimitExceeded) GoString() string { + return s.String() +} + +func newErrorRequestLimitExceeded(v protocol.ResponseMetadata) error { + return &RequestLimitExceeded{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *RequestLimitExceeded) Code() string { + return "RequestLimitExceeded" +} + +// Message returns the exception's message. +func (s *RequestLimitExceeded) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *RequestLimitExceeded) OrigErr() error { + return nil +} + +func (s *RequestLimitExceeded) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *RequestLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *RequestLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + // The specified resource can't be deleted because it contains other resources. // For example, you can't delete a service that contains any instances. type ResourceInUse struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5743,17 +6316,17 @@ func (s ResourceInUse) GoString() string { func newErrorResourceInUse(v protocol.ResponseMetadata) error { return &ResourceInUse{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUse) Code() string { +func (s *ResourceInUse) Code() string { return "ResourceInUse" } // Message returns the exception's message. -func (s ResourceInUse) Message() string { +func (s *ResourceInUse) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5761,29 +6334,29 @@ func (s ResourceInUse) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUse) OrigErr() error { +func (s *ResourceInUse) OrigErr() error { return nil } -func (s ResourceInUse) Error() string { +func (s *ResourceInUse) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUse) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUse) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUse) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUse) RequestID() string { + return s.RespMetadata.RequestID } -// The resource can't be created because you've reached the limit on the number +// The resource can't be created because you've reached the quota on the number // of resources. type ResourceLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5800,17 +6373,17 @@ func (s ResourceLimitExceeded) GoString() string { func newErrorResourceLimitExceeded(v protocol.ResponseMetadata) error { return &ResourceLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceeded) Code() string { +func (s *ResourceLimitExceeded) Code() string { return "ResourceLimitExceeded" } // Message returns the exception's message. -func (s ResourceLimitExceeded) Message() string { +func (s *ResourceLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5818,22 +6391,78 @@ func (s ResourceLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceeded) OrigErr() error { +func (s *ResourceLimitExceeded) OrigErr() error { return nil } -func (s ResourceLimitExceeded) Error() string { +func (s *ResourceLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation can't be completed because the resource was not found. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that contains information about the specified service. @@ -5862,9 +6491,10 @@ type Service struct { // you want AWS Cloud Map to create when you register an instance. DnsConfig *DnsConfig `type:"structure"` - // Public DNS namespaces only. A complex type that contains settings for an - // optional health check. If you specify settings for a health check, AWS Cloud - // Map associates the health check with the records that you specify in DnsConfig. + // Public DNS and HTTP namespaces only. A complex type that contains settings + // for an optional health check. If you specify settings for a health check, + // AWS Cloud Map associates the health check with the records that you specify + // in DnsConfig. // // For information about the charges for health checks, see Amazon Route 53 // Pricing (http://aws.amazon.com/route53/pricing/). @@ -5882,7 +6512,8 @@ type Service struct { // The number of instances that are currently associated with the service. Instances // that were previously associated with the service but that have been deleted - // are not included in the count. + // are not included in the count. The count might not reflect pending registrations + // and deregistrations. InstanceCount *int64 `type:"integer"` // The name of the service. @@ -5971,8 +6602,8 @@ func (s *Service) SetNamespaceId(v string) *Service { // The service can't be created because a service with the same name already // exists. type ServiceAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The CreatorRequestId that was used to create the service. CreatorRequestId *string `type:"string"` @@ -5995,17 +6626,17 @@ func (s ServiceAlreadyExists) GoString() string { func newErrorServiceAlreadyExists(v protocol.ResponseMetadata) error { return &ServiceAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceAlreadyExists) Code() string { +func (s *ServiceAlreadyExists) Code() string { return "ServiceAlreadyExists" } // Message returns the exception's message. -func (s ServiceAlreadyExists) Message() string { +func (s *ServiceAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6013,22 +6644,22 @@ func (s ServiceAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceAlreadyExists) OrigErr() error { +func (s *ServiceAlreadyExists) OrigErr() error { return nil } -func (s ServiceAlreadyExists) Error() string { +func (s *ServiceAlreadyExists) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that contains changes to an existing service. @@ -6040,13 +6671,12 @@ type ServiceChange struct { // A complex type that contains information about the Route 53 DNS records that // you want AWS Cloud Map to create when you register an instance. - // - // DnsConfig is a required field - DnsConfig *DnsConfigChange `type:"structure" required:"true"` + DnsConfig *DnsConfigChange `type:"structure"` - // Public DNS namespaces only. A complex type that contains settings for an - // optional health check. If you specify settings for a health check, AWS Cloud - // Map associates the health check with the records that you specify in DnsConfig. + // Public DNS and HTTP namespaces only. A complex type that contains settings + // for an optional health check. If you specify settings for a health check, + // AWS Cloud Map associates the health check with the records that you specify + // in DnsConfig. // // If you specify a health check configuration, you can specify either HealthCheckCustomConfig // or HealthCheckConfig but not both. @@ -6057,14 +6687,14 @@ type ServiceChange struct { // // Note the following about configuring health checks. // - // A and AAAA records + // A and AAAA records // // If DnsConfig includes configurations for both A and AAAA records, AWS Cloud // Map creates a health check that uses the IPv4 address to check the health // of the resource. If the endpoint that is specified by the IPv4 address is // unhealthy, Route 53 considers both the A and AAAA records to be unhealthy. // - // CNAME records + // CNAME records // // You can't specify settings for HealthCheckConfig when the DNSConfig includes // CNAME for the value of Type. If you do, the CreateService request will fail @@ -6081,7 +6711,7 @@ type ServiceChange struct { // Health checking regions // // Health checkers perform checks from all Route 53 health-checking regions. - // For a list of the current regions, see Regions (http://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions). + // For a list of the current regions, see Regions (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions). // // Alias records // @@ -6091,7 +6721,7 @@ type ServiceChange struct { // * Route 53 automatically sets EvaluateTargetHealth to true for alias records. // When EvaluateTargetHealth is true, the alias record inherits the health // of the referenced AWS resource. such as an ELB load balancer. For more - // information, see EvaluateTargetHealth (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). + // information, see EvaluateTargetHealth (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). // // * If you include HealthCheckConfig and then use the service to register // an instance that creates an alias record, Route 53 doesn't create the @@ -6118,9 +6748,6 @@ func (s ServiceChange) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ServiceChange) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ServiceChange"} - if s.DnsConfig == nil { - invalidParams.Add(request.NewErrParamRequired("DnsConfig")) - } if s.DnsConfig != nil { if err := s.DnsConfig.Validate(); err != nil { invalidParams.AddNested("DnsConfig", err.(request.ErrInvalidParams)) @@ -6231,8 +6858,8 @@ func (s *ServiceFilter) SetValues(v []*string) *ServiceFilter { // No service exists with the specified ID. type ServiceNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6249,17 +6876,17 @@ func (s ServiceNotFound) GoString() string { func newErrorServiceNotFound(v protocol.ResponseMetadata) error { return &ServiceNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceNotFound) Code() string { +func (s *ServiceNotFound) Code() string { return "ServiceNotFound" } // Message returns the exception's message. -func (s ServiceNotFound) Message() string { +func (s *ServiceNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6267,22 +6894,22 @@ func (s ServiceNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceNotFound) OrigErr() error { +func (s *ServiceNotFound) OrigErr() error { return nil } -func (s ServiceNotFound) Error() string { +func (s *ServiceNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceNotFound) RequestID() string { + return s.RespMetadata.RequestID } // A complex type that contains information about a specified service. @@ -6303,9 +6930,10 @@ type ServiceSummary struct { // that you want AWS Cloud Map to create when you register an instance. DnsConfig *DnsConfig `type:"structure"` - // Public DNS namespaces only. A complex type that contains settings for an - // optional health check. If you specify settings for a health check, AWS Cloud - // Map associates the health check with the records that you specify in DnsConfig. + // Public DNS and HTTP namespaces only. A complex type that contains settings + // for an optional health check. If you specify settings for a health check, + // AWS Cloud Map associates the health check with the records that you specify + // in DnsConfig. // // If you specify a health check configuration, you can specify either HealthCheckCustomConfig // or HealthCheckConfig but not both. @@ -6316,14 +6944,14 @@ type ServiceSummary struct { // // Note the following about configuring health checks. // - // A and AAAA records + // A and AAAA records // // If DnsConfig includes configurations for both A and AAAA records, AWS Cloud // Map creates a health check that uses the IPv4 address to check the health // of the resource. If the endpoint that is specified by the IPv4 address is // unhealthy, Route 53 considers both the A and AAAA records to be unhealthy. // - // CNAME records + // CNAME records // // You can't specify settings for HealthCheckConfig when the DNSConfig includes // CNAME for the value of Type. If you do, the CreateService request will fail @@ -6340,7 +6968,7 @@ type ServiceSummary struct { // Health checking regions // // Health checkers perform checks from all Route 53 health-checking regions. - // For a list of the current regions, see Regions (http://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions). + // For a list of the current regions, see Regions (https://docs.aws.amazon.com/Route53/latest/APIReference/API_HealthCheckConfig.html#Route53-Type-HealthCheckConfig-Regions). // // Alias records // @@ -6350,7 +6978,7 @@ type ServiceSummary struct { // * Route 53 automatically sets EvaluateTargetHealth to true for alias records. // When EvaluateTargetHealth is true, the alias record inherits the health // of the referenced AWS resource. such as an ELB load balancer. For more - // information, see EvaluateTargetHealth (http://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). + // information, see EvaluateTargetHealth (https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-EvaluateTargetHealth). // // * If you include HealthCheckConfig and then use the service to register // an instance that creates an alias record, Route 53 doesn't create the @@ -6381,8 +7009,8 @@ type ServiceSummary struct { // or HealthCheckConfig but not both. // // To change the status of a custom health check, submit an UpdateInstanceCustomHealthStatus - // request. Cloud Map doesn't monitor the status of the resource, it just keeps - // a record of the status specified in the most recent UpdateInstanceCustomHealthStatus + // request. AWS Cloud Map doesn't monitor the status of the resource, it just + // keeps a record of the status specified in the most recent UpdateInstanceCustomHealthStatus // request. // // Here's how custom health checks work: @@ -6391,6 +7019,7 @@ type ServiceSummary struct { // // The failure threshold indicates the number of 30-second intervals you want // AWS Cloud Map to wait between the time that your application sends an UpdateInstanceCustomHealthStatus + // (https://docs.aws.amazon.com/cloud-map/latest/api/API_UpdateInstanceCustomHealthStatus.html) // request and the time that AWS Cloud Map stops routing internet traffic to // the corresponding resource. // @@ -6411,8 +7040,6 @@ type ServiceSummary struct { // If another UpdateInstanceCustomHealthStatus request doesn't arrive during // that time to change the status back to healthy, AWS Cloud Map stops routing // traffic to the resource. - // - // Note the following about configuring custom health checks. HealthCheckCustomConfig *HealthCheckCustomConfig `type:"structure"` // The ID that AWS Cloud Map assigned to the service when you created it. @@ -6420,7 +7047,8 @@ type ServiceSummary struct { // The number of instances that are currently associated with the service. Instances // that were previously associated with the service but that have been deleted - // are not included in the count. + // are not included in the count. The count might not reflect pending registrations + // and deregistrations. InstanceCount *int64 `type:"integer"` // The name of the service. @@ -6491,6 +7119,275 @@ func (s *ServiceSummary) SetName(v string) *ServiceSummary { return s } +// A custom key-value pair associated with a resource. +type Tag struct { + _ struct{} `type:"structure"` + + // The key identifier, or name, of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The string value associated with the key of the tag. You can set the value + // of a tag to an empty string, but you can't set the value of a tag to null. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to retrieve + // tags for. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The tags to add to the specified resource. Specifying the tag key is required. + // You can set the value of a tag to an empty string, but you can't set the + // value of a tag to null. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The list of tags on the resource is over the quota. The maximum number of +// tags that can be applied to a resource is 50. +type TooManyTagsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + // The name of the resource. + ResourceName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TooManyTagsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyTagsException) GoString() string { + return s.String() +} + +func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { + return &TooManyTagsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsException) Code() string { + return "TooManyTagsException" +} + +// Message returns the exception's message. +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resource that you want to retrieve + // tags for. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // The tag keys to remove from the specified resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateInstanceCustomHealthStatusInput struct { _ struct{} `type:"structure"` @@ -6633,7 +7530,7 @@ type UpdateServiceOutput struct { _ struct{} `type:"structure"` // A value that you can use to determine whether the request completed successfully. - // To get the status of the operation, see GetOperation. + // To get the status of the operation, see GetOperation (https://docs.aws.amazon.com/cloud-map/latest/api/API_GetOperation.html). OperationId *string `type:"string"` } @@ -6661,6 +7558,14 @@ const ( CustomHealthStatusUnhealthy = "UNHEALTHY" ) +// CustomHealthStatus_Values returns all elements of the CustomHealthStatus enum +func CustomHealthStatus_Values() []string { + return []string{ + CustomHealthStatusHealthy, + CustomHealthStatusUnhealthy, + } +} + const ( // FilterConditionEq is a FilterCondition enum value FilterConditionEq = "EQ" @@ -6672,6 +7577,15 @@ const ( FilterConditionBetween = "BETWEEN" ) +// FilterCondition_Values returns all elements of the FilterCondition enum +func FilterCondition_Values() []string { + return []string{ + FilterConditionEq, + FilterConditionIn, + FilterConditionBetween, + } +} + const ( // HealthCheckTypeHttp is a HealthCheckType enum value HealthCheckTypeHttp = "HTTP" @@ -6683,6 +7597,15 @@ const ( HealthCheckTypeTcp = "TCP" ) +// HealthCheckType_Values returns all elements of the HealthCheckType enum +func HealthCheckType_Values() []string { + return []string{ + HealthCheckTypeHttp, + HealthCheckTypeHttps, + HealthCheckTypeTcp, + } +} + const ( // HealthStatusHealthy is a HealthStatus enum value HealthStatusHealthy = "HEALTHY" @@ -6694,6 +7617,15 @@ const ( HealthStatusUnknown = "UNKNOWN" ) +// HealthStatus_Values returns all elements of the HealthStatus enum +func HealthStatus_Values() []string { + return []string{ + HealthStatusHealthy, + HealthStatusUnhealthy, + HealthStatusUnknown, + } +} + const ( // HealthStatusFilterHealthy is a HealthStatusFilter enum value HealthStatusFilterHealthy = "HEALTHY" @@ -6705,11 +7637,27 @@ const ( HealthStatusFilterAll = "ALL" ) +// HealthStatusFilter_Values returns all elements of the HealthStatusFilter enum +func HealthStatusFilter_Values() []string { + return []string{ + HealthStatusFilterHealthy, + HealthStatusFilterUnhealthy, + HealthStatusFilterAll, + } +} + const ( // NamespaceFilterNameType is a NamespaceFilterName enum value NamespaceFilterNameType = "TYPE" ) +// NamespaceFilterName_Values returns all elements of the NamespaceFilterName enum +func NamespaceFilterName_Values() []string { + return []string{ + NamespaceFilterNameType, + } +} + const ( // NamespaceTypeDnsPublic is a NamespaceType enum value NamespaceTypeDnsPublic = "DNS_PUBLIC" @@ -6721,6 +7669,15 @@ const ( NamespaceTypeHttp = "HTTP" ) +// NamespaceType_Values returns all elements of the NamespaceType enum +func NamespaceType_Values() []string { + return []string{ + NamespaceTypeDnsPublic, + NamespaceTypeDnsPrivate, + NamespaceTypeHttp, + } +} + const ( // OperationFilterNameNamespaceId is a OperationFilterName enum value OperationFilterNameNamespaceId = "NAMESPACE_ID" @@ -6738,6 +7695,17 @@ const ( OperationFilterNameUpdateDate = "UPDATE_DATE" ) +// OperationFilterName_Values returns all elements of the OperationFilterName enum +func OperationFilterName_Values() []string { + return []string{ + OperationFilterNameNamespaceId, + OperationFilterNameServiceId, + OperationFilterNameStatus, + OperationFilterNameType, + OperationFilterNameUpdateDate, + } +} + const ( // OperationStatusSubmitted is a OperationStatus enum value OperationStatusSubmitted = "SUBMITTED" @@ -6752,6 +7720,16 @@ const ( OperationStatusFail = "FAIL" ) +// OperationStatus_Values returns all elements of the OperationStatus enum +func OperationStatus_Values() []string { + return []string{ + OperationStatusSubmitted, + OperationStatusPending, + OperationStatusSuccess, + OperationStatusFail, + } +} + const ( // OperationTargetTypeNamespace is a OperationTargetType enum value OperationTargetTypeNamespace = "NAMESPACE" @@ -6763,6 +7741,15 @@ const ( OperationTargetTypeInstance = "INSTANCE" ) +// OperationTargetType_Values returns all elements of the OperationTargetType enum +func OperationTargetType_Values() []string { + return []string{ + OperationTargetTypeNamespace, + OperationTargetTypeService, + OperationTargetTypeInstance, + } +} + const ( // OperationTypeCreateNamespace is a OperationType enum value OperationTypeCreateNamespace = "CREATE_NAMESPACE" @@ -6780,6 +7767,17 @@ const ( OperationTypeDeregisterInstance = "DEREGISTER_INSTANCE" ) +// OperationType_Values returns all elements of the OperationType enum +func OperationType_Values() []string { + return []string{ + OperationTypeCreateNamespace, + OperationTypeDeleteNamespace, + OperationTypeUpdateService, + OperationTypeRegisterInstance, + OperationTypeDeregisterInstance, + } +} + const ( // RecordTypeSrv is a RecordType enum value RecordTypeSrv = "SRV" @@ -6794,6 +7792,16 @@ const ( RecordTypeCname = "CNAME" ) +// RecordType_Values returns all elements of the RecordType enum +func RecordType_Values() []string { + return []string{ + RecordTypeSrv, + RecordTypeA, + RecordTypeAaaa, + RecordTypeCname, + } +} + const ( // RoutingPolicyMultivalue is a RoutingPolicy enum value RoutingPolicyMultivalue = "MULTIVALUE" @@ -6802,7 +7810,22 @@ const ( RoutingPolicyWeighted = "WEIGHTED" ) +// RoutingPolicy_Values returns all elements of the RoutingPolicy enum +func RoutingPolicy_Values() []string { + return []string{ + RoutingPolicyMultivalue, + RoutingPolicyWeighted, + } +} + const ( // ServiceFilterNameNamespaceId is a ServiceFilterName enum value ServiceFilterNameNamespaceId = "NAMESPACE_ID" ) + +// ServiceFilterName_Values returns all elements of the ServiceFilterName enum +func ServiceFilterName_Values() []string { + return []string{ + ServiceFilterNameNamespaceId, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/errors.go b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/errors.go index 0e4e00857..540fb5208 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/errors.go @@ -54,6 +54,15 @@ const ( // No operation exists with the specified ID. ErrCodeOperationNotFound = "OperationNotFound" + // ErrCodeRequestLimitExceeded for service response error code + // "RequestLimitExceeded". + // + // The operation can't be completed because you've reached the quota for the + // number of requests. For more information, see AWS Cloud Map API request throttling + // quota (https://docs.aws.amazon.com/cloud-map/latest/dg/throttling.html) in + // the AWS Cloud Map Developer Guide. + ErrCodeRequestLimitExceeded = "RequestLimitExceeded" + // ErrCodeResourceInUse for service response error code // "ResourceInUse". // @@ -64,10 +73,16 @@ const ( // ErrCodeResourceLimitExceeded for service response error code // "ResourceLimitExceeded". // - // The resource can't be created because you've reached the limit on the number + // The resource can't be created because you've reached the quota on the number // of resources. ErrCodeResourceLimitExceeded = "ResourceLimitExceeded" + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The operation can't be completed because the resource was not found. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + // ErrCodeServiceAlreadyExists for service response error code // "ServiceAlreadyExists". // @@ -80,18 +95,28 @@ const ( // // No service exists with the specified ID. ErrCodeServiceNotFound = "ServiceNotFound" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // The list of tags on the resource is over the quota. The maximum number of + // tags that can be applied to a resource is 50. + ErrCodeTooManyTagsException = "TooManyTagsException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ - "CustomHealthNotFound": newErrorCustomHealthNotFound, - "DuplicateRequest": newErrorDuplicateRequest, - "InstanceNotFound": newErrorInstanceNotFound, - "InvalidInput": newErrorInvalidInput, - "NamespaceAlreadyExists": newErrorNamespaceAlreadyExists, - "NamespaceNotFound": newErrorNamespaceNotFound, - "OperationNotFound": newErrorOperationNotFound, - "ResourceInUse": newErrorResourceInUse, - "ResourceLimitExceeded": newErrorResourceLimitExceeded, - "ServiceAlreadyExists": newErrorServiceAlreadyExists, - "ServiceNotFound": newErrorServiceNotFound, + "CustomHealthNotFound": newErrorCustomHealthNotFound, + "DuplicateRequest": newErrorDuplicateRequest, + "InstanceNotFound": newErrorInstanceNotFound, + "InvalidInput": newErrorInvalidInput, + "NamespaceAlreadyExists": newErrorNamespaceAlreadyExists, + "NamespaceNotFound": newErrorNamespaceNotFound, + "OperationNotFound": newErrorOperationNotFound, + "RequestLimitExceeded": newErrorRequestLimitExceeded, + "ResourceInUse": newErrorResourceInUse, + "ResourceLimitExceeded": newErrorResourceLimitExceeded, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "ServiceAlreadyExists": newErrorServiceAlreadyExists, + "ServiceNotFound": newErrorServiceNotFound, + "TooManyTagsException": newErrorTooManyTagsException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go index 5287ae479..eee9f9328 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go index e1df5bbc1..0d705da1a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/api.go @@ -2008,8 +2008,8 @@ func (c *ServiceQuotas) RequestServiceQuotaIncreaseWithContext(ctx aws.Context, // The action you attempted is not allowed unless Service Access with Service // Quotas is enabled in your organization. To enable, call AssociateServiceQuotaTemplate. type AWSServiceAccessNotEnabledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2026,17 +2026,17 @@ func (s AWSServiceAccessNotEnabledException) GoString() string { func newErrorAWSServiceAccessNotEnabledException(v protocol.ResponseMetadata) error { return &AWSServiceAccessNotEnabledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AWSServiceAccessNotEnabledException) Code() string { +func (s *AWSServiceAccessNotEnabledException) Code() string { return "AWSServiceAccessNotEnabledException" } // Message returns the exception's message. -func (s AWSServiceAccessNotEnabledException) Message() string { +func (s *AWSServiceAccessNotEnabledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2044,28 +2044,28 @@ func (s AWSServiceAccessNotEnabledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AWSServiceAccessNotEnabledException) OrigErr() error { +func (s *AWSServiceAccessNotEnabledException) OrigErr() error { return nil } -func (s AWSServiceAccessNotEnabledException) Error() string { +func (s *AWSServiceAccessNotEnabledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AWSServiceAccessNotEnabledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AWSServiceAccessNotEnabledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AWSServiceAccessNotEnabledException) RequestID() string { - return s.respMetadata.RequestID +func (s *AWSServiceAccessNotEnabledException) RequestID() string { + return s.RespMetadata.RequestID } // You do not have sufficient access to perform this action. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2082,17 +2082,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2100,22 +2100,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type AssociateServiceQuotaTemplateInput struct { @@ -2237,8 +2237,8 @@ func (s DeleteServiceQuotaIncreaseRequestFromTemplateOutput) GoString() string { // You can't perform this action because a dependency does not have access. type DependencyAccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2255,17 +2255,17 @@ func (s DependencyAccessDeniedException) GoString() string { func newErrorDependencyAccessDeniedException(v protocol.ResponseMetadata) error { return &DependencyAccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DependencyAccessDeniedException) Code() string { +func (s *DependencyAccessDeniedException) Code() string { return "DependencyAccessDeniedException" } // Message returns the exception's message. -func (s DependencyAccessDeniedException) Message() string { +func (s *DependencyAccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2273,22 +2273,22 @@ func (s DependencyAccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DependencyAccessDeniedException) OrigErr() error { +func (s *DependencyAccessDeniedException) OrigErr() error { return nil } -func (s DependencyAccessDeniedException) Error() string { +func (s *DependencyAccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DependencyAccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DependencyAccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DependencyAccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DependencyAccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } type DisassociateServiceQuotaTemplateInput struct { @@ -2731,8 +2731,8 @@ func (s *GetServiceQuotaOutput) SetQuota(v *ServiceQuota) *GetServiceQuotaOutput // Invalid input was provided. type IllegalArgumentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2749,17 +2749,17 @@ func (s IllegalArgumentException) GoString() string { func newErrorIllegalArgumentException(v protocol.ResponseMetadata) error { return &IllegalArgumentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IllegalArgumentException) Code() string { +func (s *IllegalArgumentException) Code() string { return "IllegalArgumentException" } // Message returns the exception's message. -func (s IllegalArgumentException) Message() string { +func (s *IllegalArgumentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2767,28 +2767,28 @@ func (s IllegalArgumentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IllegalArgumentException) OrigErr() error { +func (s *IllegalArgumentException) OrigErr() error { return nil } -func (s IllegalArgumentException) Error() string { +func (s *IllegalArgumentException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IllegalArgumentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IllegalArgumentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IllegalArgumentException) RequestID() string { - return s.respMetadata.RequestID +func (s *IllegalArgumentException) RequestID() string { + return s.RespMetadata.RequestID } // Invalid input was provided. type InvalidPaginationTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2805,17 +2805,17 @@ func (s InvalidPaginationTokenException) GoString() string { func newErrorInvalidPaginationTokenException(v protocol.ResponseMetadata) error { return &InvalidPaginationTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPaginationTokenException) Code() string { +func (s *InvalidPaginationTokenException) Code() string { return "InvalidPaginationTokenException" } // Message returns the exception's message. -func (s InvalidPaginationTokenException) Message() string { +func (s *InvalidPaginationTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2823,28 +2823,28 @@ func (s InvalidPaginationTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPaginationTokenException) OrigErr() error { +func (s *InvalidPaginationTokenException) OrigErr() error { return nil } -func (s InvalidPaginationTokenException) Error() string { +func (s *InvalidPaginationTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPaginationTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPaginationTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPaginationTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPaginationTokenException) RequestID() string { + return s.RespMetadata.RequestID } // Invalid input was provided for the . type InvalidResourceStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -2861,17 +2861,17 @@ func (s InvalidResourceStateException) GoString() string { func newErrorInvalidResourceStateException(v protocol.ResponseMetadata) error { return &InvalidResourceStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceStateException) Code() string { +func (s *InvalidResourceStateException) Code() string { return "InvalidResourceStateException" } // Message returns the exception's message. -func (s InvalidResourceStateException) Message() string { +func (s *InvalidResourceStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2879,22 +2879,22 @@ func (s InvalidResourceStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceStateException) OrigErr() error { +func (s *InvalidResourceStateException) OrigErr() error { return nil } -func (s InvalidResourceStateException) Error() string { +func (s *InvalidResourceStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceStateException) RequestID() string { + return s.RespMetadata.RequestID } type ListAWSDefaultServiceQuotasInput struct { @@ -3638,8 +3638,8 @@ func (s *MetricInfo) SetMetricStatisticRecommendation(v string) *MetricInfo { // The account making this call is not a member of an organization. type NoAvailableOrganizationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3656,17 +3656,17 @@ func (s NoAvailableOrganizationException) GoString() string { func newErrorNoAvailableOrganizationException(v protocol.ResponseMetadata) error { return &NoAvailableOrganizationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoAvailableOrganizationException) Code() string { +func (s *NoAvailableOrganizationException) Code() string { return "NoAvailableOrganizationException" } // Message returns the exception's message. -func (s NoAvailableOrganizationException) Message() string { +func (s *NoAvailableOrganizationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3674,28 +3674,28 @@ func (s NoAvailableOrganizationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoAvailableOrganizationException) OrigErr() error { +func (s *NoAvailableOrganizationException) OrigErr() error { return nil } -func (s NoAvailableOrganizationException) Error() string { +func (s *NoAvailableOrganizationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoAvailableOrganizationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoAvailableOrganizationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoAvailableOrganizationException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoAvailableOrganizationException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource does not exist. type NoSuchResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3712,17 +3712,17 @@ func (s NoSuchResourceException) GoString() string { func newErrorNoSuchResourceException(v protocol.ResponseMetadata) error { return &NoSuchResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoSuchResourceException) Code() string { +func (s *NoSuchResourceException) Code() string { return "NoSuchResourceException" } // Message returns the exception's message. -func (s NoSuchResourceException) Message() string { +func (s *NoSuchResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3730,29 +3730,29 @@ func (s NoSuchResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoSuchResourceException) OrigErr() error { +func (s *NoSuchResourceException) OrigErr() error { return nil } -func (s NoSuchResourceException) Error() string { +func (s *NoSuchResourceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoSuchResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoSuchResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoSuchResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoSuchResourceException) RequestID() string { + return s.RespMetadata.RequestID } // The organization that your account belongs to, is not in All Features mode. // To enable all features mode, see EnableAllFeatures (https://docs.aws.amazon.com/organizations/latest/APIReference/API_EnableAllFeatures.html). type OrganizationNotInAllFeaturesModeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3769,17 +3769,17 @@ func (s OrganizationNotInAllFeaturesModeException) GoString() string { func newErrorOrganizationNotInAllFeaturesModeException(v protocol.ResponseMetadata) error { return &OrganizationNotInAllFeaturesModeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationNotInAllFeaturesModeException) Code() string { +func (s *OrganizationNotInAllFeaturesModeException) Code() string { return "OrganizationNotInAllFeaturesModeException" } // Message returns the exception's message. -func (s OrganizationNotInAllFeaturesModeException) Message() string { +func (s *OrganizationNotInAllFeaturesModeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3787,22 +3787,22 @@ func (s OrganizationNotInAllFeaturesModeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationNotInAllFeaturesModeException) OrigErr() error { +func (s *OrganizationNotInAllFeaturesModeException) OrigErr() error { return nil } -func (s OrganizationNotInAllFeaturesModeException) Error() string { +func (s *OrganizationNotInAllFeaturesModeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationNotInAllFeaturesModeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationNotInAllFeaturesModeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationNotInAllFeaturesModeException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationNotInAllFeaturesModeException) RequestID() string { + return s.RespMetadata.RequestID } type PutServiceQuotaIncreaseRequestIntoTemplateInput struct { @@ -3921,8 +3921,8 @@ func (s *PutServiceQuotaIncreaseRequestIntoTemplateOutput) SetServiceQuotaIncrea // some of the relevant resources, or use Service Quotas to request a service // quota increase. type QuotaExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3939,17 +3939,17 @@ func (s QuotaExceededException) GoString() string { func newErrorQuotaExceededException(v protocol.ResponseMetadata) error { return &QuotaExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s QuotaExceededException) Code() string { +func (s *QuotaExceededException) Code() string { return "QuotaExceededException" } // Message returns the exception's message. -func (s QuotaExceededException) Message() string { +func (s *QuotaExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3957,22 +3957,22 @@ func (s QuotaExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s QuotaExceededException) OrigErr() error { +func (s *QuotaExceededException) OrigErr() error { return nil } -func (s QuotaExceededException) Error() string { +func (s *QuotaExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s QuotaExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *QuotaExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s QuotaExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *QuotaExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A structure that contains information about the quota period. @@ -4248,8 +4248,8 @@ func (s *RequestedServiceQuotaChange) SetUnit(v string) *RequestedServiceQuotaCh // The specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4266,17 +4266,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4284,28 +4284,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Something went wrong. type ServiceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4322,17 +4322,17 @@ func (s ServiceException) GoString() string { func newErrorServiceException(v protocol.ResponseMetadata) error { return &ServiceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceException) Code() string { +func (s *ServiceException) Code() string { return "ServiceException" } // Message returns the exception's message. -func (s ServiceException) Message() string { +func (s *ServiceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4340,22 +4340,22 @@ func (s ServiceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceException) OrigErr() error { +func (s *ServiceException) OrigErr() error { return nil } -func (s ServiceException) Error() string { +func (s *ServiceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceException) RequestID() string { + return s.RespMetadata.RequestID } // A structure that contains the ServiceName and ServiceCode. It does not include @@ -4609,8 +4609,8 @@ func (s *ServiceQuotaIncreaseRequestInTemplate) SetUnit(v string) *ServiceQuotaI // // To use the template, call AssociateServiceQuotaTemplate. type ServiceQuotaTemplateNotInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4627,17 +4627,17 @@ func (s ServiceQuotaTemplateNotInUseException) GoString() string { func newErrorServiceQuotaTemplateNotInUseException(v protocol.ResponseMetadata) error { return &ServiceQuotaTemplateNotInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceQuotaTemplateNotInUseException) Code() string { +func (s *ServiceQuotaTemplateNotInUseException) Code() string { return "ServiceQuotaTemplateNotInUseException" } // Message returns the exception's message. -func (s ServiceQuotaTemplateNotInUseException) Message() string { +func (s *ServiceQuotaTemplateNotInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4645,29 +4645,29 @@ func (s ServiceQuotaTemplateNotInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceQuotaTemplateNotInUseException) OrigErr() error { +func (s *ServiceQuotaTemplateNotInUseException) OrigErr() error { return nil } -func (s ServiceQuotaTemplateNotInUseException) Error() string { +func (s *ServiceQuotaTemplateNotInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceQuotaTemplateNotInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceQuotaTemplateNotInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceQuotaTemplateNotInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceQuotaTemplateNotInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The Service Quotas template is not available in the Region where you are // making the request. Please make the request in us-east-1. type TemplatesNotAvailableInRegionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4684,17 +4684,17 @@ func (s TemplatesNotAvailableInRegionException) GoString() string { func newErrorTemplatesNotAvailableInRegionException(v protocol.ResponseMetadata) error { return &TemplatesNotAvailableInRegionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TemplatesNotAvailableInRegionException) Code() string { +func (s *TemplatesNotAvailableInRegionException) Code() string { return "TemplatesNotAvailableInRegionException" } // Message returns the exception's message. -func (s TemplatesNotAvailableInRegionException) Message() string { +func (s *TemplatesNotAvailableInRegionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4702,29 +4702,29 @@ func (s TemplatesNotAvailableInRegionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TemplatesNotAvailableInRegionException) OrigErr() error { +func (s *TemplatesNotAvailableInRegionException) OrigErr() error { return nil } -func (s TemplatesNotAvailableInRegionException) Error() string { +func (s *TemplatesNotAvailableInRegionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TemplatesNotAvailableInRegionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TemplatesNotAvailableInRegionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TemplatesNotAvailableInRegionException) RequestID() string { - return s.respMetadata.RequestID +func (s *TemplatesNotAvailableInRegionException) RequestID() string { + return s.RespMetadata.RequestID } // Due to throttling, the request was denied. Slow down the rate of request // calls, or request an increase for this quota. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4741,17 +4741,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4759,22 +4759,22 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -4791,6 +4791,16 @@ const ( ErrorCodeServiceQuotaNotAvailableError = "SERVICE_QUOTA_NOT_AVAILABLE_ERROR" ) +// ErrorCode_Values returns all elements of the ErrorCode enum +func ErrorCode_Values() []string { + return []string{ + ErrorCodeDependencyAccessDeniedError, + ErrorCodeDependencyThrottlingError, + ErrorCodeDependencyServiceError, + ErrorCodeServiceQuotaNotAvailableError, + } +} + const ( // PeriodUnitMicrosecond is a PeriodUnit enum value PeriodUnitMicrosecond = "MICROSECOND" @@ -4814,6 +4824,19 @@ const ( PeriodUnitWeek = "WEEK" ) +// PeriodUnit_Values returns all elements of the PeriodUnit enum +func PeriodUnit_Values() []string { + return []string{ + PeriodUnitMicrosecond, + PeriodUnitMillisecond, + PeriodUnitSecond, + PeriodUnitMinute, + PeriodUnitHour, + PeriodUnitDay, + PeriodUnitWeek, + } +} + const ( // RequestStatusPending is a RequestStatus enum value RequestStatusPending = "PENDING" @@ -4831,6 +4854,17 @@ const ( RequestStatusCaseClosed = "CASE_CLOSED" ) +// RequestStatus_Values returns all elements of the RequestStatus enum +func RequestStatus_Values() []string { + return []string{ + RequestStatusPending, + RequestStatusCaseOpened, + RequestStatusApproved, + RequestStatusDenied, + RequestStatusCaseClosed, + } +} + const ( // ServiceQuotaTemplateAssociationStatusAssociated is a ServiceQuotaTemplateAssociationStatus enum value ServiceQuotaTemplateAssociationStatusAssociated = "ASSOCIATED" @@ -4838,3 +4872,11 @@ const ( // ServiceQuotaTemplateAssociationStatusDisassociated is a ServiceQuotaTemplateAssociationStatus enum value ServiceQuotaTemplateAssociationStatusDisassociated = "DISASSOCIATED" ) + +// ServiceQuotaTemplateAssociationStatus_Values returns all elements of the ServiceQuotaTemplateAssociationStatus enum +func ServiceQuotaTemplateAssociationStatus_Values() []string { + return []string{ + ServiceQuotaTemplateAssociationStatusAssociated, + ServiceQuotaTemplateAssociationStatusDisassociated, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go index 6b126476e..4020a5393 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go index 5a2bc39fd..7dc9aef8a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -15124,6 +15124,14 @@ const ( BehaviorOnMXFailureRejectMessage = "RejectMessage" ) +// BehaviorOnMXFailure_Values returns all elements of the BehaviorOnMXFailure enum +func BehaviorOnMXFailure_Values() []string { + return []string{ + BehaviorOnMXFailureUseDefaultValue, + BehaviorOnMXFailureRejectMessage, + } +} + const ( // BounceTypeDoesNotExist is a BounceType enum value BounceTypeDoesNotExist = "DoesNotExist" @@ -15144,6 +15152,18 @@ const ( BounceTypeTemporaryFailure = "TemporaryFailure" ) +// BounceType_Values returns all elements of the BounceType enum +func BounceType_Values() []string { + return []string{ + BounceTypeDoesNotExist, + BounceTypeMessageTooLarge, + BounceTypeExceededQuota, + BounceTypeContentRejected, + BounceTypeUndefined, + BounceTypeTemporaryFailure, + } +} + const ( // BulkEmailStatusSuccess is a BulkEmailStatus enum value BulkEmailStatusSuccess = "Success" @@ -15188,6 +15208,26 @@ const ( BulkEmailStatusFailed = "Failed" ) +// BulkEmailStatus_Values returns all elements of the BulkEmailStatus enum +func BulkEmailStatus_Values() []string { + return []string{ + BulkEmailStatusSuccess, + BulkEmailStatusMessageRejected, + BulkEmailStatusMailFromDomainNotVerified, + BulkEmailStatusConfigurationSetDoesNotExist, + BulkEmailStatusTemplateDoesNotExist, + BulkEmailStatusAccountSuspended, + BulkEmailStatusAccountThrottled, + BulkEmailStatusAccountDailyQuotaExceeded, + BulkEmailStatusInvalidSendingPoolName, + BulkEmailStatusAccountSendingPaused, + BulkEmailStatusConfigurationSetSendingPaused, + BulkEmailStatusInvalidParameterValue, + BulkEmailStatusTransientFailure, + BulkEmailStatusFailed, + } +} + const ( // ConfigurationSetAttributeEventDestinations is a ConfigurationSetAttribute enum value ConfigurationSetAttributeEventDestinations = "eventDestinations" @@ -15202,6 +15242,16 @@ const ( ConfigurationSetAttributeReputationOptions = "reputationOptions" ) +// ConfigurationSetAttribute_Values returns all elements of the ConfigurationSetAttribute enum +func ConfigurationSetAttribute_Values() []string { + return []string{ + ConfigurationSetAttributeEventDestinations, + ConfigurationSetAttributeTrackingOptions, + ConfigurationSetAttributeDeliveryOptions, + ConfigurationSetAttributeReputationOptions, + } +} + const ( // CustomMailFromStatusPending is a CustomMailFromStatus enum value CustomMailFromStatusPending = "Pending" @@ -15216,6 +15266,16 @@ const ( CustomMailFromStatusTemporaryFailure = "TemporaryFailure" ) +// CustomMailFromStatus_Values returns all elements of the CustomMailFromStatus enum +func CustomMailFromStatus_Values() []string { + return []string{ + CustomMailFromStatusPending, + CustomMailFromStatusSuccess, + CustomMailFromStatusFailed, + CustomMailFromStatusTemporaryFailure, + } +} + const ( // DimensionValueSourceMessageTag is a DimensionValueSource enum value DimensionValueSourceMessageTag = "messageTag" @@ -15227,6 +15287,15 @@ const ( DimensionValueSourceLinkTag = "linkTag" ) +// DimensionValueSource_Values returns all elements of the DimensionValueSource enum +func DimensionValueSource_Values() []string { + return []string{ + DimensionValueSourceMessageTag, + DimensionValueSourceEmailHeader, + DimensionValueSourceLinkTag, + } +} + const ( // DsnActionFailed is a DsnAction enum value DsnActionFailed = "failed" @@ -15244,6 +15313,17 @@ const ( DsnActionExpanded = "expanded" ) +// DsnAction_Values returns all elements of the DsnAction enum +func DsnAction_Values() []string { + return []string{ + DsnActionFailed, + DsnActionDelayed, + DsnActionDelivered, + DsnActionRelayed, + DsnActionExpanded, + } +} + const ( // EventTypeSend is a EventType enum value EventTypeSend = "send" @@ -15270,6 +15350,20 @@ const ( EventTypeRenderingFailure = "renderingFailure" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeSend, + EventTypeReject, + EventTypeBounce, + EventTypeComplaint, + EventTypeDelivery, + EventTypeOpen, + EventTypeClick, + EventTypeRenderingFailure, + } +} + const ( // IdentityTypeEmailAddress is a IdentityType enum value IdentityTypeEmailAddress = "EmailAddress" @@ -15278,6 +15372,14 @@ const ( IdentityTypeDomain = "Domain" ) +// IdentityType_Values returns all elements of the IdentityType enum +func IdentityType_Values() []string { + return []string{ + IdentityTypeEmailAddress, + IdentityTypeDomain, + } +} + const ( // InvocationTypeEvent is a InvocationType enum value InvocationTypeEvent = "Event" @@ -15286,6 +15388,14 @@ const ( InvocationTypeRequestResponse = "RequestResponse" ) +// InvocationType_Values returns all elements of the InvocationType enum +func InvocationType_Values() []string { + return []string{ + InvocationTypeEvent, + InvocationTypeRequestResponse, + } +} + const ( // NotificationTypeBounce is a NotificationType enum value NotificationTypeBounce = "Bounce" @@ -15297,6 +15407,15 @@ const ( NotificationTypeDelivery = "Delivery" ) +// NotificationType_Values returns all elements of the NotificationType enum +func NotificationType_Values() []string { + return []string{ + NotificationTypeBounce, + NotificationTypeComplaint, + NotificationTypeDelivery, + } +} + const ( // ReceiptFilterPolicyBlock is a ReceiptFilterPolicy enum value ReceiptFilterPolicyBlock = "Block" @@ -15305,6 +15424,14 @@ const ( ReceiptFilterPolicyAllow = "Allow" ) +// ReceiptFilterPolicy_Values returns all elements of the ReceiptFilterPolicy enum +func ReceiptFilterPolicy_Values() []string { + return []string{ + ReceiptFilterPolicyBlock, + ReceiptFilterPolicyAllow, + } +} + const ( // SNSActionEncodingUtf8 is a SNSActionEncoding enum value SNSActionEncodingUtf8 = "UTF-8" @@ -15313,11 +15440,26 @@ const ( SNSActionEncodingBase64 = "Base64" ) +// SNSActionEncoding_Values returns all elements of the SNSActionEncoding enum +func SNSActionEncoding_Values() []string { + return []string{ + SNSActionEncodingUtf8, + SNSActionEncodingBase64, + } +} + const ( // StopScopeRuleSet is a StopScope enum value StopScopeRuleSet = "RuleSet" ) +// StopScope_Values returns all elements of the StopScope enum +func StopScope_Values() []string { + return []string{ + StopScopeRuleSet, + } +} + const ( // TlsPolicyRequire is a TlsPolicy enum value TlsPolicyRequire = "Require" @@ -15326,6 +15468,14 @@ const ( TlsPolicyOptional = "Optional" ) +// TlsPolicy_Values returns all elements of the TlsPolicy enum +func TlsPolicy_Values() []string { + return []string{ + TlsPolicyRequire, + TlsPolicyOptional, + } +} + const ( // VerificationStatusPending is a VerificationStatus enum value VerificationStatusPending = "Pending" @@ -15342,3 +15492,14 @@ const ( // VerificationStatusNotStarted is a VerificationStatus enum value VerificationStatusNotStarted = "NotStarted" ) + +// VerificationStatus_Values returns all elements of the VerificationStatus enum +func VerificationStatus_Values() []string { + return []string{ + VerificationStatusPending, + VerificationStatusSuccess, + VerificationStatusFailed, + VerificationStatusTemporaryFailure, + VerificationStatusNotStarted, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go index fe2dd8de0..20afce37b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go b/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go index 062b082f3..c82f0daa0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sfn/api.go @@ -172,10 +172,11 @@ func (c *SFN) CreateStateMachineRequest(input *CreateStateMachineInput) (req *re // // CreateStateMachine is an idempotent API. Subsequent requests won’t create // a duplicate resource if it was already created. CreateStateMachine's idempotency -// check is based on the state machine name, definition, type, and LoggingConfiguration. -// If a following request has a different roleArn or tags, Step Functions will -// ignore these differences and treat it as an idempotent request of the previous. -// In this case, roleArn and tags will not be updated, even if they are different. +// check is based on the state machine name, definition, type, LoggingConfiguration +// and TracingConfiguration. If a following request has a different roleArn +// or tags, Step Functions will ignore these differences and treat it as an +// idempotent request of the previous. In this case, roleArn and tags will not +// be updated, even if they are different. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -196,6 +197,10 @@ func (c *SFN) CreateStateMachineRequest(input *CreateStateMachineInput) (req *re // // * InvalidLoggingConfiguration // +// * InvalidTracingConfiguration +// Your tracingConfiguration key does not match, or enabled has not been set +// to true or false. +// // * StateMachineAlreadyExists // A state machine with the same name but a different definition or role ARN // already exists. @@ -2243,6 +2248,10 @@ func (c *SFN) UpdateStateMachineRequest(input *UpdateStateMachineInput) (req *re // // * InvalidLoggingConfiguration // +// * InvalidTracingConfiguration +// Your tracingConfiguration key does not match, or enabled has not been set +// to true or false. +// // * MissingRequiredParameter // Request is missing a required parameter. This error occurs if both definition // and roleArn are not specified. @@ -2277,8 +2286,8 @@ func (c *SFN) UpdateStateMachineWithContext(ctx aws.Context, input *UpdateStateM // The specified activity does not exist. type ActivityDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2295,17 +2304,17 @@ func (s ActivityDoesNotExist) GoString() string { func newErrorActivityDoesNotExist(v protocol.ResponseMetadata) error { return &ActivityDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActivityDoesNotExist) Code() string { +func (s *ActivityDoesNotExist) Code() string { return "ActivityDoesNotExist" } // Message returns the exception's message. -func (s ActivityDoesNotExist) Message() string { +func (s *ActivityDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2313,22 +2322,22 @@ func (s ActivityDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActivityDoesNotExist) OrigErr() error { +func (s *ActivityDoesNotExist) OrigErr() error { return nil } -func (s ActivityDoesNotExist) Error() string { +func (s *ActivityDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ActivityDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActivityDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActivityDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *ActivityDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an activity that failed during an execution. @@ -2367,8 +2376,8 @@ func (s *ActivityFailedEventDetails) SetError(v string) *ActivityFailedEventDeta // The maximum number of activities has been reached. Existing activities must // be deleted before a new activity can be created. type ActivityLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2385,17 +2394,17 @@ func (s ActivityLimitExceeded) GoString() string { func newErrorActivityLimitExceeded(v protocol.ResponseMetadata) error { return &ActivityLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActivityLimitExceeded) Code() string { +func (s *ActivityLimitExceeded) Code() string { return "ActivityLimitExceeded" } // Message returns the exception's message. -func (s ActivityLimitExceeded) Message() string { +func (s *ActivityLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2403,22 +2412,22 @@ func (s ActivityLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActivityLimitExceeded) OrigErr() error { +func (s *ActivityLimitExceeded) OrigErr() error { return nil } -func (s ActivityLimitExceeded) Error() string { +func (s *ActivityLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ActivityLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActivityLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActivityLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ActivityLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an activity. @@ -2525,9 +2534,13 @@ type ActivityScheduledEventDetails struct { // The maximum allowed duration between two heartbeats for the activity task. HeartbeatInSeconds *int64 `locationName:"heartbeatInSeconds" type:"long"` - // The JSON data input to the activity task. + // The JSON data input to the activity task. Length constraints apply to the + // payload size, and are expressed as bytes in UTF-8 encoding. Input *string `locationName:"input" type:"string" sensitive:"true"` + // Contains details about the input for an execution history event. + InputDetails *HistoryEventExecutionDataDetails `locationName:"inputDetails" type:"structure"` + // The Amazon Resource Name (ARN) of the scheduled activity. // // Resource is a required field @@ -2559,6 +2572,12 @@ func (s *ActivityScheduledEventDetails) SetInput(v string) *ActivityScheduledEve return s } +// SetInputDetails sets the InputDetails field's value. +func (s *ActivityScheduledEventDetails) SetInputDetails(v *HistoryEventExecutionDataDetails) *ActivityScheduledEventDetails { + s.InputDetails = v + return s +} + // SetResource sets the Resource field's value. func (s *ActivityScheduledEventDetails) SetResource(v string) *ActivityScheduledEventDetails { s.Resource = &v @@ -2601,8 +2620,12 @@ func (s *ActivityStartedEventDetails) SetWorkerName(v string) *ActivityStartedEv type ActivitySucceededEventDetails struct { _ struct{} `type:"structure"` - // The JSON data output by the activity task. + // The JSON data output by the activity task. Length constraints apply to the + // payload size, and are expressed as bytes in UTF-8 encoding. Output *string `locationName:"output" type:"string" sensitive:"true"` + + // Contains details about the output of an execution history event. + OutputDetails *HistoryEventExecutionDataDetails `locationName:"outputDetails" type:"structure"` } // String returns the string representation @@ -2621,6 +2644,12 @@ func (s *ActivitySucceededEventDetails) SetOutput(v string) *ActivitySucceededEv return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *ActivitySucceededEventDetails) SetOutputDetails(v *HistoryEventExecutionDataDetails) *ActivitySucceededEventDetails { + s.OutputDetails = v + return s +} + // Contains details about an activity timeout that occurred during an execution. type ActivityTimedOutEventDetails struct { _ struct{} `type:"structure"` @@ -2657,8 +2686,8 @@ func (s *ActivityTimedOutEventDetails) SetError(v string) *ActivityTimedOutEvent // The maximum number of workers concurrently polling for activity tasks has // been reached. type ActivityWorkerLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2675,17 +2704,17 @@ func (s ActivityWorkerLimitExceeded) GoString() string { func newErrorActivityWorkerLimitExceeded(v protocol.ResponseMetadata) error { return &ActivityWorkerLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ActivityWorkerLimitExceeded) Code() string { +func (s *ActivityWorkerLimitExceeded) Code() string { return "ActivityWorkerLimitExceeded" } // Message returns the exception's message. -func (s ActivityWorkerLimitExceeded) Message() string { +func (s *ActivityWorkerLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2693,22 +2722,47 @@ func (s ActivityWorkerLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ActivityWorkerLimitExceeded) OrigErr() error { +func (s *ActivityWorkerLimitExceeded) OrigErr() error { return nil } -func (s ActivityWorkerLimitExceeded) Error() string { +func (s *ActivityWorkerLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ActivityWorkerLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ActivityWorkerLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ActivityWorkerLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ActivityWorkerLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides details about execution input or output. +type CloudWatchEventsExecutionDataDetails struct { + _ struct{} `type:"structure"` + + // Indicates whether input or output was included in the response. Always true + // for API calls. + Included *bool `locationName:"included" type:"boolean"` +} + +// String returns the string representation +func (s CloudWatchEventsExecutionDataDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudWatchEventsExecutionDataDetails) GoString() string { + return s.String() +} + +// SetIncluded sets the Included field's value. +func (s *CloudWatchEventsExecutionDataDetails) SetIncluded(v bool) *CloudWatchEventsExecutionDataDetails { + s.Included = &v + return s } type CloudWatchLogsLogGroup struct { @@ -2922,6 +2976,9 @@ type CreateStateMachineInput struct { // _ . : / = + - @. Tags []*Tag `locationName:"tags" type:"list"` + // Selects whether AWS X-Ray tracing is enabled. + TracingConfiguration *TracingConfiguration `locationName:"tracingConfiguration" type:"structure"` + // Determines whether a Standard or Express state machine is created. The default // is STANDARD. You cannot update the type of a state machine once it has been // created. @@ -3011,6 +3068,12 @@ func (s *CreateStateMachineInput) SetTags(v []*Tag) *CreateStateMachineInput { return s } +// SetTracingConfiguration sets the TracingConfiguration field's value. +func (s *CreateStateMachineInput) SetTracingConfiguration(v *TracingConfiguration) *CreateStateMachineInput { + s.TracingConfiguration = v + return s +} + // SetType sets the Type field's value. func (s *CreateStateMachineInput) SetType(v string) *CreateStateMachineInput { s.Type = &v @@ -3315,10 +3378,12 @@ type DescribeExecutionOutput struct { // ExecutionArn is a required field ExecutionArn *string `locationName:"executionArn" min:"1" type:"string" required:"true"` - // The string that contains the JSON input data of the execution. - // - // Input is a required field - Input *string `locationName:"input" type:"string" required:"true" sensitive:"true"` + // The string that contains the JSON input data of the execution. Length constraints + // apply to the payload size, and are expressed as bytes in UTF-8 encoding. + Input *string `locationName:"input" type:"string" sensitive:"true"` + + // Provides details about execution input or output. + InputDetails *CloudWatchEventsExecutionDataDetails `locationName:"inputDetails" type:"structure"` // The name of the execution. // @@ -3338,12 +3403,16 @@ type DescribeExecutionOutput struct { // A-Z, a-z, - and _. Name *string `locationName:"name" min:"1" type:"string"` - // The JSON output data of the execution. + // The JSON output data of the execution. Length constraints apply to the payload + // size, and are expressed as bytes in UTF-8 encoding. // // This field is set only if the execution succeeds. If the execution fails, // this field is null. Output *string `locationName:"output" type:"string" sensitive:"true"` + // Provides details about execution input or output. + OutputDetails *CloudWatchEventsExecutionDataDetails `locationName:"outputDetails" type:"structure"` + // The date the execution is started. // // StartDate is a required field @@ -3361,6 +3430,9 @@ type DescribeExecutionOutput struct { // If the execution has already ended, the date the execution stopped. StopDate *time.Time `locationName:"stopDate" type:"timestamp"` + + // The AWS X-Ray trace header which was passed to the execution. + TraceHeader *string `locationName:"traceHeader" type:"string"` } // String returns the string representation @@ -3385,6 +3457,12 @@ func (s *DescribeExecutionOutput) SetInput(v string) *DescribeExecutionOutput { return s } +// SetInputDetails sets the InputDetails field's value. +func (s *DescribeExecutionOutput) SetInputDetails(v *CloudWatchEventsExecutionDataDetails) *DescribeExecutionOutput { + s.InputDetails = v + return s +} + // SetName sets the Name field's value. func (s *DescribeExecutionOutput) SetName(v string) *DescribeExecutionOutput { s.Name = &v @@ -3397,6 +3475,12 @@ func (s *DescribeExecutionOutput) SetOutput(v string) *DescribeExecutionOutput { return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *DescribeExecutionOutput) SetOutputDetails(v *CloudWatchEventsExecutionDataDetails) *DescribeExecutionOutput { + s.OutputDetails = v + return s +} + // SetStartDate sets the StartDate field's value. func (s *DescribeExecutionOutput) SetStartDate(v time.Time) *DescribeExecutionOutput { s.StartDate = &v @@ -3421,6 +3505,12 @@ func (s *DescribeExecutionOutput) SetStopDate(v time.Time) *DescribeExecutionOut return s } +// SetTraceHeader sets the TraceHeader field's value. +func (s *DescribeExecutionOutput) SetTraceHeader(v string) *DescribeExecutionOutput { + s.TraceHeader = &v + return s +} + type DescribeStateMachineForExecutionInput struct { _ struct{} `type:"structure"` @@ -3491,6 +3581,9 @@ type DescribeStateMachineForExecutionOutput struct { // StateMachineArn is a required field StateMachineArn *string `locationName:"stateMachineArn" min:"1" type:"string" required:"true"` + // Selects whether AWS X-Ray tracing is enabled. + TracingConfiguration *TracingConfiguration `locationName:"tracingConfiguration" type:"structure"` + // The date and time the state machine associated with an execution was updated. // For a newly created state machine, this is the creation date. // @@ -3538,6 +3631,12 @@ func (s *DescribeStateMachineForExecutionOutput) SetStateMachineArn(v string) *D return s } +// SetTracingConfiguration sets the TracingConfiguration field's value. +func (s *DescribeStateMachineForExecutionOutput) SetTracingConfiguration(v *TracingConfiguration) *DescribeStateMachineForExecutionOutput { + s.TracingConfiguration = v + return s +} + // SetUpdateDate sets the UpdateDate field's value. func (s *DescribeStateMachineForExecutionOutput) SetUpdateDate(v time.Time) *DescribeStateMachineForExecutionOutput { s.UpdateDate = &v @@ -3637,6 +3736,9 @@ type DescribeStateMachineOutput struct { // The current status of the state machine. Status *string `locationName:"status" type:"string" enum:"StateMachineStatus"` + // Selects whether AWS X-Ray tracing is enabled. + TracingConfiguration *TracingConfiguration `locationName:"tracingConfiguration" type:"structure"` + // The type of the state machine (STANDARD or EXPRESS). // // Type is a required field @@ -3695,6 +3797,12 @@ func (s *DescribeStateMachineOutput) SetStatus(v string) *DescribeStateMachineOu return s } +// SetTracingConfiguration sets the TracingConfiguration field's value. +func (s *DescribeStateMachineOutput) SetTracingConfiguration(v *TracingConfiguration) *DescribeStateMachineOutput { + s.TracingConfiguration = v + return s +} + // SetType sets the Type field's value. func (s *DescribeStateMachineOutput) SetType(v string) *DescribeStateMachineOutput { s.Type = &v @@ -3738,8 +3846,8 @@ func (s *ExecutionAbortedEventDetails) SetError(v string) *ExecutionAbortedEvent // // Executions with the same name and input are considered idempotent. type ExecutionAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3756,17 +3864,17 @@ func (s ExecutionAlreadyExists) GoString() string { func newErrorExecutionAlreadyExists(v protocol.ResponseMetadata) error { return &ExecutionAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExecutionAlreadyExists) Code() string { +func (s *ExecutionAlreadyExists) Code() string { return "ExecutionAlreadyExists" } // Message returns the exception's message. -func (s ExecutionAlreadyExists) Message() string { +func (s *ExecutionAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3774,28 +3882,28 @@ func (s ExecutionAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExecutionAlreadyExists) OrigErr() error { +func (s *ExecutionAlreadyExists) OrigErr() error { return nil } -func (s ExecutionAlreadyExists) Error() string { +func (s *ExecutionAlreadyExists) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExecutionAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExecutionAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExecutionAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *ExecutionAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // The specified execution does not exist. type ExecutionDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3812,17 +3920,17 @@ func (s ExecutionDoesNotExist) GoString() string { func newErrorExecutionDoesNotExist(v protocol.ResponseMetadata) error { return &ExecutionDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExecutionDoesNotExist) Code() string { +func (s *ExecutionDoesNotExist) Code() string { return "ExecutionDoesNotExist" } // Message returns the exception's message. -func (s ExecutionDoesNotExist) Message() string { +func (s *ExecutionDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3830,22 +3938,22 @@ func (s ExecutionDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExecutionDoesNotExist) OrigErr() error { +func (s *ExecutionDoesNotExist) OrigErr() error { return nil } -func (s ExecutionDoesNotExist) Error() string { +func (s *ExecutionDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExecutionDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExecutionDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExecutionDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *ExecutionDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an execution failure event. @@ -3884,8 +3992,8 @@ func (s *ExecutionFailedEventDetails) SetError(v string) *ExecutionFailedEventDe // The maximum number of running executions has been reached. Running executions // must end or be stopped before a new execution can be started. type ExecutionLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3902,17 +4010,17 @@ func (s ExecutionLimitExceeded) GoString() string { func newErrorExecutionLimitExceeded(v protocol.ResponseMetadata) error { return &ExecutionLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ExecutionLimitExceeded) Code() string { +func (s *ExecutionLimitExceeded) Code() string { return "ExecutionLimitExceeded" } // Message returns the exception's message. -func (s ExecutionLimitExceeded) Message() string { +func (s *ExecutionLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3920,22 +4028,22 @@ func (s ExecutionLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ExecutionLimitExceeded) OrigErr() error { +func (s *ExecutionLimitExceeded) OrigErr() error { return nil } -func (s ExecutionLimitExceeded) Error() string { +func (s *ExecutionLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ExecutionLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ExecutionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ExecutionLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ExecutionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about an execution. @@ -4036,9 +4144,13 @@ func (s *ExecutionListItem) SetStopDate(v time.Time) *ExecutionListItem { type ExecutionStartedEventDetails struct { _ struct{} `type:"structure"` - // The JSON data input to the execution. + // The JSON data input to the execution. Length constraints apply to the payload + // size, and are expressed as bytes in UTF-8 encoding. Input *string `locationName:"input" type:"string" sensitive:"true"` + // Contains details about the input for an execution history event. + InputDetails *HistoryEventExecutionDataDetails `locationName:"inputDetails" type:"structure"` + // The Amazon Resource Name (ARN) of the IAM role used for executing AWS Lambda // tasks. RoleArn *string `locationName:"roleArn" min:"1" type:"string"` @@ -4060,6 +4172,12 @@ func (s *ExecutionStartedEventDetails) SetInput(v string) *ExecutionStartedEvent return s } +// SetInputDetails sets the InputDetails field's value. +func (s *ExecutionStartedEventDetails) SetInputDetails(v *HistoryEventExecutionDataDetails) *ExecutionStartedEventDetails { + s.InputDetails = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *ExecutionStartedEventDetails) SetRoleArn(v string) *ExecutionStartedEventDetails { s.RoleArn = &v @@ -4070,8 +4188,12 @@ func (s *ExecutionStartedEventDetails) SetRoleArn(v string) *ExecutionStartedEve type ExecutionSucceededEventDetails struct { _ struct{} `type:"structure"` - // The JSON data output by the execution. + // The JSON data output by the execution. Length constraints apply to the payload + // size, and are expressed as bytes in UTF-8 encoding. Output *string `locationName:"output" type:"string" sensitive:"true"` + + // Contains details about the output of an execution history event. + OutputDetails *HistoryEventExecutionDataDetails `locationName:"outputDetails" type:"structure"` } // String returns the string representation @@ -4090,6 +4212,12 @@ func (s *ExecutionSucceededEventDetails) SetOutput(v string) *ExecutionSucceeded return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *ExecutionSucceededEventDetails) SetOutputDetails(v *HistoryEventExecutionDataDetails) *ExecutionSucceededEventDetails { + s.OutputDetails = v + return s +} + // Contains details about the execution timeout that occurred during the execution. type ExecutionTimedOutEventDetails struct { _ struct{} `type:"structure"` @@ -4182,7 +4310,8 @@ func (s *GetActivityTaskInput) SetWorkerName(v string) *GetActivityTaskInput { type GetActivityTaskOutput struct { _ struct{} `type:"structure"` - // The string that contains the JSON input data for the task. + // The string that contains the JSON input data for the task. Length constraints + // apply to the payload size, and are expressed as bytes in UTF-8 encoding. Input *string `locationName:"input" type:"string" sensitive:"true"` // A token that identifies the scheduled task. This token must be copied and @@ -4221,6 +4350,10 @@ type GetExecutionHistoryInput struct { // ExecutionArn is a required field ExecutionArn *string `locationName:"executionArn" min:"1" type:"string" required:"true"` + // You can select whether execution data (input or output of a history event) + // is returned. The default is true. + IncludeExecutionData *bool `locationName:"includeExecutionData" type:"boolean"` + // The maximum number of results that are returned per call. You can use nextToken // to obtain further pages of results. The default is 100 and the maximum allowed // page size is 1000. A value of 0 uses the default. @@ -4275,6 +4408,12 @@ func (s *GetExecutionHistoryInput) SetExecutionArn(v string) *GetExecutionHistor return s } +// SetIncludeExecutionData sets the IncludeExecutionData field's value. +func (s *GetExecutionHistoryInput) SetIncludeExecutionData(v bool) *GetExecutionHistoryInput { + s.IncludeExecutionData = &v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *GetExecutionHistoryInput) SetMaxResults(v int64) *GetExecutionHistoryInput { s.MaxResults = &v @@ -4680,10 +4819,35 @@ func (s *HistoryEvent) SetType(v string) *HistoryEvent { return s } +// Provides details about input or output in an execution history event. +type HistoryEventExecutionDataDetails struct { + _ struct{} `type:"structure"` + + // Indicates whether input or output was truncated in the response. Always false + // for API calls. + Truncated *bool `locationName:"truncated" type:"boolean"` +} + +// String returns the string representation +func (s HistoryEventExecutionDataDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HistoryEventExecutionDataDetails) GoString() string { + return s.String() +} + +// SetTruncated sets the Truncated field's value. +func (s *HistoryEventExecutionDataDetails) SetTruncated(v bool) *HistoryEventExecutionDataDetails { + s.Truncated = &v + return s +} + // The provided Amazon Resource Name (ARN) is invalid. type InvalidArn struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4700,17 +4864,17 @@ func (s InvalidArn) GoString() string { func newErrorInvalidArn(v protocol.ResponseMetadata) error { return &InvalidArn{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidArn) Code() string { +func (s *InvalidArn) Code() string { return "InvalidArn" } // Message returns the exception's message. -func (s InvalidArn) Message() string { +func (s *InvalidArn) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4718,28 +4882,28 @@ func (s InvalidArn) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidArn) OrigErr() error { +func (s *InvalidArn) OrigErr() error { return nil } -func (s InvalidArn) Error() string { +func (s *InvalidArn) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidArn) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidArn) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidArn) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidArn) RequestID() string { + return s.RespMetadata.RequestID } // The provided Amazon States Language definition is invalid. type InvalidDefinition struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4756,17 +4920,17 @@ func (s InvalidDefinition) GoString() string { func newErrorInvalidDefinition(v protocol.ResponseMetadata) error { return &InvalidDefinition{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDefinition) Code() string { +func (s *InvalidDefinition) Code() string { return "InvalidDefinition" } // Message returns the exception's message. -func (s InvalidDefinition) Message() string { +func (s *InvalidDefinition) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4774,28 +4938,28 @@ func (s InvalidDefinition) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDefinition) OrigErr() error { +func (s *InvalidDefinition) OrigErr() error { return nil } -func (s InvalidDefinition) Error() string { +func (s *InvalidDefinition) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDefinition) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDefinition) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDefinition) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDefinition) RequestID() string { + return s.RespMetadata.RequestID } // The provided JSON input data is invalid. type InvalidExecutionInput struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4812,17 +4976,17 @@ func (s InvalidExecutionInput) GoString() string { func newErrorInvalidExecutionInput(v protocol.ResponseMetadata) error { return &InvalidExecutionInput{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidExecutionInput) Code() string { +func (s *InvalidExecutionInput) Code() string { return "InvalidExecutionInput" } // Message returns the exception's message. -func (s InvalidExecutionInput) Message() string { +func (s *InvalidExecutionInput) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4830,27 +4994,27 @@ func (s InvalidExecutionInput) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidExecutionInput) OrigErr() error { +func (s *InvalidExecutionInput) OrigErr() error { return nil } -func (s InvalidExecutionInput) Error() string { +func (s *InvalidExecutionInput) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidExecutionInput) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidExecutionInput) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidExecutionInput) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidExecutionInput) RequestID() string { + return s.RespMetadata.RequestID } type InvalidLoggingConfiguration struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4867,17 +5031,17 @@ func (s InvalidLoggingConfiguration) GoString() string { func newErrorInvalidLoggingConfiguration(v protocol.ResponseMetadata) error { return &InvalidLoggingConfiguration{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidLoggingConfiguration) Code() string { +func (s *InvalidLoggingConfiguration) Code() string { return "InvalidLoggingConfiguration" } // Message returns the exception's message. -func (s InvalidLoggingConfiguration) Message() string { +func (s *InvalidLoggingConfiguration) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4885,28 +5049,28 @@ func (s InvalidLoggingConfiguration) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidLoggingConfiguration) OrigErr() error { +func (s *InvalidLoggingConfiguration) OrigErr() error { return nil } -func (s InvalidLoggingConfiguration) Error() string { +func (s *InvalidLoggingConfiguration) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidLoggingConfiguration) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidLoggingConfiguration) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidLoggingConfiguration) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidLoggingConfiguration) RequestID() string { + return s.RespMetadata.RequestID } // The provided name is invalid. type InvalidName struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4923,17 +5087,17 @@ func (s InvalidName) GoString() string { func newErrorInvalidName(v protocol.ResponseMetadata) error { return &InvalidName{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidName) Code() string { +func (s *InvalidName) Code() string { return "InvalidName" } // Message returns the exception's message. -func (s InvalidName) Message() string { +func (s *InvalidName) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4941,28 +5105,28 @@ func (s InvalidName) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidName) OrigErr() error { +func (s *InvalidName) OrigErr() error { return nil } -func (s InvalidName) Error() string { +func (s *InvalidName) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidName) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidName) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidName) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidName) RequestID() string { + return s.RespMetadata.RequestID } // The provided JSON output data is invalid. type InvalidOutput struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4979,17 +5143,17 @@ func (s InvalidOutput) GoString() string { func newErrorInvalidOutput(v protocol.ResponseMetadata) error { return &InvalidOutput{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOutput) Code() string { +func (s *InvalidOutput) Code() string { return "InvalidOutput" } // Message returns the exception's message. -func (s InvalidOutput) Message() string { +func (s *InvalidOutput) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4997,28 +5161,28 @@ func (s InvalidOutput) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOutput) OrigErr() error { +func (s *InvalidOutput) OrigErr() error { return nil } -func (s InvalidOutput) Error() string { +func (s *InvalidOutput) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOutput) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOutput) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOutput) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOutput) RequestID() string { + return s.RespMetadata.RequestID } // The provided token is invalid. type InvalidToken struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5035,17 +5199,17 @@ func (s InvalidToken) GoString() string { func newErrorInvalidToken(v protocol.ResponseMetadata) error { return &InvalidToken{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidToken) Code() string { +func (s *InvalidToken) Code() string { return "InvalidToken" } // Message returns the exception's message. -func (s InvalidToken) Message() string { +func (s *InvalidToken) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5053,22 +5217,79 @@ func (s InvalidToken) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidToken) OrigErr() error { +func (s *InvalidToken) OrigErr() error { return nil } -func (s InvalidToken) Error() string { +func (s *InvalidToken) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidToken) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidToken) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidToken) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidToken) RequestID() string { + return s.RespMetadata.RequestID +} + +// Your tracingConfiguration key does not match, or enabled has not been set +// to true or false. +type InvalidTracingConfiguration struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation +func (s InvalidTracingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InvalidTracingConfiguration) GoString() string { + return s.String() +} + +func newErrorInvalidTracingConfiguration(v protocol.ResponseMetadata) error { + return &InvalidTracingConfiguration{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidTracingConfiguration) Code() string { + return "InvalidTracingConfiguration" +} + +// Message returns the exception's message. +func (s *InvalidTracingConfiguration) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidTracingConfiguration) OrigErr() error { + return nil +} + +func (s *InvalidTracingConfiguration) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidTracingConfiguration) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidTracingConfiguration) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about a lambda function that failed during an execution. @@ -5142,9 +5363,13 @@ func (s *LambdaFunctionScheduleFailedEventDetails) SetError(v string) *LambdaFun type LambdaFunctionScheduledEventDetails struct { _ struct{} `type:"structure"` - // The JSON data input to the lambda function. + // The JSON data input to the lambda function. Length constraints apply to the + // payload size, and are expressed as bytes in UTF-8 encoding. Input *string `locationName:"input" type:"string" sensitive:"true"` + // Contains details about input for an execution history event. + InputDetails *HistoryEventExecutionDataDetails `locationName:"inputDetails" type:"structure"` + // The Amazon Resource Name (ARN) of the scheduled lambda function. // // Resource is a required field @@ -5170,6 +5395,12 @@ func (s *LambdaFunctionScheduledEventDetails) SetInput(v string) *LambdaFunction return s } +// SetInputDetails sets the InputDetails field's value. +func (s *LambdaFunctionScheduledEventDetails) SetInputDetails(v *HistoryEventExecutionDataDetails) *LambdaFunctionScheduledEventDetails { + s.InputDetails = v + return s +} + // SetResource sets the Resource field's value. func (s *LambdaFunctionScheduledEventDetails) SetResource(v string) *LambdaFunctionScheduledEventDetails { s.Resource = &v @@ -5220,8 +5451,12 @@ func (s *LambdaFunctionStartFailedEventDetails) SetError(v string) *LambdaFuncti type LambdaFunctionSucceededEventDetails struct { _ struct{} `type:"structure"` - // The JSON data output by the lambda function. + // The JSON data output by the lambda function. Length constraints apply to + // the payload size, and are expressed as bytes in UTF-8 encoding. Output *string `locationName:"output" type:"string" sensitive:"true"` + + // Contains details about the output of an execution history event. + OutputDetails *HistoryEventExecutionDataDetails `locationName:"outputDetails" type:"structure"` } // String returns the string representation @@ -5240,6 +5475,12 @@ func (s *LambdaFunctionSucceededEventDetails) SetOutput(v string) *LambdaFunctio return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *LambdaFunctionSucceededEventDetails) SetOutputDetails(v *HistoryEventExecutionDataDetails) *LambdaFunctionSucceededEventDetails { + s.OutputDetails = v + return s +} + // Contains details about a lambda function timeout that occurred during an // execution. type LambdaFunctionTimedOutEventDetails struct { @@ -5687,7 +5928,7 @@ type LoggingConfiguration struct { // be logged. Limited to size 1. Required, if your log level is not set to OFF. Destinations []*LogDestination `locationName:"destinations" type:"list"` - // Determines whether execution data is included in your log. When set to FALSE, + // Determines whether execution data is included in your log. When set to false, // data is excluded. IncludeExecutionData *bool `locationName:"includeExecutionData" type:"boolean"` @@ -5803,8 +6044,8 @@ func (s *MapStateStartedEventDetails) SetLength(v int64) *MapStateStartedEventDe // Request is missing a required parameter. This error occurs if both definition // and roleArn are not specified. type MissingRequiredParameter struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -5821,17 +6062,17 @@ func (s MissingRequiredParameter) GoString() string { func newErrorMissingRequiredParameter(v protocol.ResponseMetadata) error { return &MissingRequiredParameter{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MissingRequiredParameter) Code() string { +func (s *MissingRequiredParameter) Code() string { return "MissingRequiredParameter" } // Message returns the exception's message. -func (s MissingRequiredParameter) Message() string { +func (s *MissingRequiredParameter) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5839,29 +6080,29 @@ func (s MissingRequiredParameter) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MissingRequiredParameter) OrigErr() error { +func (s *MissingRequiredParameter) OrigErr() error { return nil } -func (s MissingRequiredParameter) Error() string { +func (s *MissingRequiredParameter) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MissingRequiredParameter) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MissingRequiredParameter) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MissingRequiredParameter) RequestID() string { - return s.respMetadata.RequestID +func (s *MissingRequiredParameter) RequestID() string { + return s.RespMetadata.RequestID } // Could not find the referenced resource. Only state machine and activity ARNs // are supported. type ResourceNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -5880,17 +6121,17 @@ func (s ResourceNotFound) GoString() string { func newErrorResourceNotFound(v protocol.ResponseMetadata) error { return &ResourceNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFound) Code() string { +func (s *ResourceNotFound) Code() string { return "ResourceNotFound" } // Message returns the exception's message. -func (s ResourceNotFound) Message() string { +func (s *ResourceNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5898,22 +6139,22 @@ func (s ResourceNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFound) OrigErr() error { +func (s *ResourceNotFound) OrigErr() error { return nil } -func (s ResourceNotFound) Error() string { +func (s *ResourceNotFound) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFound) RequestID() string { + return s.RespMetadata.RequestID } type SendTaskFailureInput struct { @@ -6051,7 +6292,8 @@ func (s SendTaskHeartbeatOutput) GoString() string { type SendTaskSuccessInput struct { _ struct{} `type:"structure"` - // The JSON output of the task. + // The JSON output of the task. Length constraints apply to the payload size, + // and are expressed as bytes in UTF-8 encoding. // // Output is a required field Output *string `locationName:"output" type:"string" required:"true" sensitive:"true"` @@ -6128,6 +6370,9 @@ type StartExecutionInput struct { // // If you don't include any JSON input data, you still must include the two // braces, for example: "input": "{}" + // + // Length constraints apply to the payload size, and are expressed as bytes + // in UTF-8 encoding. Input *string `locationName:"input" type:"string" sensitive:"true"` // The name of the execution. This name must be unique for your AWS account, @@ -6155,6 +6400,10 @@ type StartExecutionInput struct { // // StateMachineArn is a required field StateMachineArn *string `locationName:"stateMachineArn" min:"1" type:"string" required:"true"` + + // Passes the AWS X-Ray trace header. The trace header can also be passed in + // the request payload. + TraceHeader *string `locationName:"traceHeader" type:"string"` } // String returns the string representation @@ -6204,6 +6453,12 @@ func (s *StartExecutionInput) SetStateMachineArn(v string) *StartExecutionInput return s } +// SetTraceHeader sets the TraceHeader field's value. +func (s *StartExecutionInput) SetTraceHeader(v string) *StartExecutionInput { + s.TraceHeader = &v + return s +} + type StartExecutionOutput struct { _ struct{} `type:"structure"` @@ -6244,9 +6499,13 @@ func (s *StartExecutionOutput) SetStartDate(v time.Time) *StartExecutionOutput { type StateEnteredEventDetails struct { _ struct{} `type:"structure"` - // The string that contains the JSON input data for the state. + // The string that contains the JSON input data for the state. Length constraints + // apply to the payload size, and are expressed as bytes in UTF-8 encoding. Input *string `locationName:"input" type:"string" sensitive:"true"` + // Contains details about the input for an execution history event. + InputDetails *HistoryEventExecutionDataDetails `locationName:"inputDetails" type:"structure"` + // The name of the state. // // Name is a required field @@ -6269,6 +6528,12 @@ func (s *StateEnteredEventDetails) SetInput(v string) *StateEnteredEventDetails return s } +// SetInputDetails sets the InputDetails field's value. +func (s *StateEnteredEventDetails) SetInputDetails(v *HistoryEventExecutionDataDetails) *StateEnteredEventDetails { + s.InputDetails = v + return s +} + // SetName sets the Name field's value. func (s *StateEnteredEventDetails) SetName(v string) *StateEnteredEventDetails { s.Name = &v @@ -6299,8 +6564,12 @@ type StateExitedEventDetails struct { // Name is a required field Name *string `locationName:"name" min:"1" type:"string" required:"true"` - // The JSON output data of the state. + // The JSON output data of the state. Length constraints apply to the payload + // size, and are expressed as bytes in UTF-8 encoding. Output *string `locationName:"output" type:"string" sensitive:"true"` + + // Contains details about the output of an execution history event. + OutputDetails *HistoryEventExecutionDataDetails `locationName:"outputDetails" type:"structure"` } // String returns the string representation @@ -6325,11 +6594,17 @@ func (s *StateExitedEventDetails) SetOutput(v string) *StateExitedEventDetails { return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *StateExitedEventDetails) SetOutputDetails(v *HistoryEventExecutionDataDetails) *StateExitedEventDetails { + s.OutputDetails = v + return s +} + // A state machine with the same name but a different definition or role ARN // already exists. type StateMachineAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6346,17 +6621,17 @@ func (s StateMachineAlreadyExists) GoString() string { func newErrorStateMachineAlreadyExists(v protocol.ResponseMetadata) error { return &StateMachineAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StateMachineAlreadyExists) Code() string { +func (s *StateMachineAlreadyExists) Code() string { return "StateMachineAlreadyExists" } // Message returns the exception's message. -func (s StateMachineAlreadyExists) Message() string { +func (s *StateMachineAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6364,28 +6639,28 @@ func (s StateMachineAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StateMachineAlreadyExists) OrigErr() error { +func (s *StateMachineAlreadyExists) OrigErr() error { return nil } -func (s StateMachineAlreadyExists) Error() string { +func (s *StateMachineAlreadyExists) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StateMachineAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StateMachineAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StateMachineAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *StateMachineAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // The specified state machine is being deleted. type StateMachineDeleting struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6402,17 +6677,17 @@ func (s StateMachineDeleting) GoString() string { func newErrorStateMachineDeleting(v protocol.ResponseMetadata) error { return &StateMachineDeleting{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StateMachineDeleting) Code() string { +func (s *StateMachineDeleting) Code() string { return "StateMachineDeleting" } // Message returns the exception's message. -func (s StateMachineDeleting) Message() string { +func (s *StateMachineDeleting) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6420,28 +6695,28 @@ func (s StateMachineDeleting) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StateMachineDeleting) OrigErr() error { +func (s *StateMachineDeleting) OrigErr() error { return nil } -func (s StateMachineDeleting) Error() string { +func (s *StateMachineDeleting) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StateMachineDeleting) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StateMachineDeleting) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StateMachineDeleting) RequestID() string { - return s.respMetadata.RequestID +func (s *StateMachineDeleting) RequestID() string { + return s.RespMetadata.RequestID } // The specified state machine does not exist. type StateMachineDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6458,17 +6733,17 @@ func (s StateMachineDoesNotExist) GoString() string { func newErrorStateMachineDoesNotExist(v protocol.ResponseMetadata) error { return &StateMachineDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StateMachineDoesNotExist) Code() string { +func (s *StateMachineDoesNotExist) Code() string { return "StateMachineDoesNotExist" } // Message returns the exception's message. -func (s StateMachineDoesNotExist) Message() string { +func (s *StateMachineDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6476,29 +6751,29 @@ func (s StateMachineDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StateMachineDoesNotExist) OrigErr() error { +func (s *StateMachineDoesNotExist) OrigErr() error { return nil } -func (s StateMachineDoesNotExist) Error() string { +func (s *StateMachineDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StateMachineDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StateMachineDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StateMachineDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *StateMachineDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // The maximum number of state machines has been reached. Existing state machines // must be deleted before a new state machine can be created. type StateMachineLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6515,17 +6790,17 @@ func (s StateMachineLimitExceeded) GoString() string { func newErrorStateMachineLimitExceeded(v protocol.ResponseMetadata) error { return &StateMachineLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StateMachineLimitExceeded) Code() string { +func (s *StateMachineLimitExceeded) Code() string { return "StateMachineLimitExceeded" } // Message returns the exception's message. -func (s StateMachineLimitExceeded) Message() string { +func (s *StateMachineLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6533,22 +6808,22 @@ func (s StateMachineLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StateMachineLimitExceeded) OrigErr() error { +func (s *StateMachineLimitExceeded) OrigErr() error { return nil } -func (s StateMachineLimitExceeded) Error() string { +func (s *StateMachineLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StateMachineLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StateMachineLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StateMachineLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *StateMachineLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about the state machine. @@ -6624,8 +6899,8 @@ func (s *StateMachineListItem) SetType(v string) *StateMachineListItem { } type StateMachineTypeNotSupported struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6642,17 +6917,17 @@ func (s StateMachineTypeNotSupported) GoString() string { func newErrorStateMachineTypeNotSupported(v protocol.ResponseMetadata) error { return &StateMachineTypeNotSupported{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StateMachineTypeNotSupported) Code() string { +func (s *StateMachineTypeNotSupported) Code() string { return "StateMachineTypeNotSupported" } // Message returns the exception's message. -func (s StateMachineTypeNotSupported) Message() string { +func (s *StateMachineTypeNotSupported) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6660,22 +6935,22 @@ func (s StateMachineTypeNotSupported) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StateMachineTypeNotSupported) OrigErr() error { +func (s *StateMachineTypeNotSupported) OrigErr() error { return nil } -func (s StateMachineTypeNotSupported) Error() string { +func (s *StateMachineTypeNotSupported) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StateMachineTypeNotSupported) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StateMachineTypeNotSupported) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StateMachineTypeNotSupported) RequestID() string { - return s.respMetadata.RequestID +func (s *StateMachineTypeNotSupported) RequestID() string { + return s.RespMetadata.RequestID } type StopExecutionInput struct { @@ -6900,8 +7175,8 @@ func (s TagResourceOutput) GoString() string { } type TaskDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6918,17 +7193,17 @@ func (s TaskDoesNotExist) GoString() string { func newErrorTaskDoesNotExist(v protocol.ResponseMetadata) error { return &TaskDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TaskDoesNotExist) Code() string { +func (s *TaskDoesNotExist) Code() string { return "TaskDoesNotExist" } // Message returns the exception's message. -func (s TaskDoesNotExist) Message() string { +func (s *TaskDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6936,22 +7211,22 @@ func (s TaskDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TaskDoesNotExist) OrigErr() error { +func (s *TaskDoesNotExist) OrigErr() error { return nil } -func (s TaskDoesNotExist) Error() string { +func (s *TaskDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TaskDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TaskDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TaskDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *TaskDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about a task failure event. @@ -7013,7 +7288,11 @@ func (s *TaskFailedEventDetails) SetResourceType(v string) *TaskFailedEventDetai type TaskScheduledEventDetails struct { _ struct{} `type:"structure"` - // The JSON data passed to the resource referenced in a task state. + // The maximum allowed duration between two heartbeats for the task. + HeartbeatInSeconds *int64 `locationName:"heartbeatInSeconds" type:"long"` + + // The JSON data passed to the resource referenced in a task state. Length constraints + // apply to the payload size, and are expressed as bytes in UTF-8 encoding. // // Parameters is a required field Parameters *string `locationName:"parameters" type:"string" required:"true" sensitive:"true"` @@ -7047,6 +7326,12 @@ func (s TaskScheduledEventDetails) GoString() string { return s.String() } +// SetHeartbeatInSeconds sets the HeartbeatInSeconds field's value. +func (s *TaskScheduledEventDetails) SetHeartbeatInSeconds(v int64) *TaskScheduledEventDetails { + s.HeartbeatInSeconds = &v + return s +} + // SetParameters sets the Parameters field's value. func (s *TaskScheduledEventDetails) SetParameters(v string) *TaskScheduledEventDetails { s.Parameters = &v @@ -7228,9 +7513,13 @@ func (s *TaskSubmitFailedEventDetails) SetResourceType(v string) *TaskSubmitFail type TaskSubmittedEventDetails struct { _ struct{} `type:"structure"` - // The response from a resource when a task has started. + // The response from a resource when a task has started. Length constraints + // apply to the payload size, and are expressed as bytes in UTF-8 encoding. Output *string `locationName:"output" type:"string" sensitive:"true"` + // Contains details about the output of an execution history event. + OutputDetails *HistoryEventExecutionDataDetails `locationName:"outputDetails" type:"structure"` + // The service name of the resource in a task state. // // Resource is a required field @@ -7258,6 +7547,12 @@ func (s *TaskSubmittedEventDetails) SetOutput(v string) *TaskSubmittedEventDetai return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *TaskSubmittedEventDetails) SetOutputDetails(v *HistoryEventExecutionDataDetails) *TaskSubmittedEventDetails { + s.OutputDetails = v + return s +} + // SetResource sets the Resource field's value. func (s *TaskSubmittedEventDetails) SetResource(v string) *TaskSubmittedEventDetails { s.Resource = &v @@ -7275,9 +7570,13 @@ type TaskSucceededEventDetails struct { _ struct{} `type:"structure"` // The full JSON response from a resource when a task has succeeded. This response - // becomes the output of the related task. + // becomes the output of the related task. Length constraints apply to the payload + // size, and are expressed as bytes in UTF-8 encoding. Output *string `locationName:"output" type:"string" sensitive:"true"` + // Contains details about the output of an execution history event. + OutputDetails *HistoryEventExecutionDataDetails `locationName:"outputDetails" type:"structure"` + // The service name of the resource in a task state. // // Resource is a required field @@ -7305,6 +7604,12 @@ func (s *TaskSucceededEventDetails) SetOutput(v string) *TaskSucceededEventDetai return s } +// SetOutputDetails sets the OutputDetails field's value. +func (s *TaskSucceededEventDetails) SetOutputDetails(v *HistoryEventExecutionDataDetails) *TaskSucceededEventDetails { + s.OutputDetails = v + return s +} + // SetResource sets the Resource field's value. func (s *TaskSucceededEventDetails) SetResource(v string) *TaskSucceededEventDetails { s.Resource = &v @@ -7318,8 +7623,8 @@ func (s *TaskSucceededEventDetails) SetResourceType(v string) *TaskSucceededEven } type TaskTimedOut struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7336,17 +7641,17 @@ func (s TaskTimedOut) GoString() string { func newErrorTaskTimedOut(v protocol.ResponseMetadata) error { return &TaskTimedOut{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TaskTimedOut) Code() string { +func (s *TaskTimedOut) Code() string { return "TaskTimedOut" } // Message returns the exception's message. -func (s TaskTimedOut) Message() string { +func (s *TaskTimedOut) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7354,22 +7659,22 @@ func (s TaskTimedOut) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TaskTimedOut) OrigErr() error { +func (s *TaskTimedOut) OrigErr() error { return nil } -func (s TaskTimedOut) Error() string { +func (s *TaskTimedOut) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TaskTimedOut) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TaskTimedOut) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TaskTimedOut) RequestID() string { - return s.respMetadata.RequestID +func (s *TaskTimedOut) RequestID() string { + return s.RespMetadata.RequestID } // Contains details about a resource timeout that occurred during an execution. @@ -7431,8 +7736,8 @@ func (s *TaskTimedOutEventDetails) SetResourceType(v string) *TaskTimedOutEventD // Topic (https://docs.aws.amazon.com/step-functions/latest/dg/limits.html) // in the AWS Step Functions Developer Guide. type TooManyTags struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -7451,17 +7756,17 @@ func (s TooManyTags) GoString() string { func newErrorTooManyTags(v protocol.ResponseMetadata) error { return &TooManyTags{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTags) Code() string { +func (s *TooManyTags) Code() string { return "TooManyTags" } // Message returns the exception's message. -func (s TooManyTags) Message() string { +func (s *TooManyTags) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7469,22 +7774,47 @@ func (s TooManyTags) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTags) OrigErr() error { +func (s *TooManyTags) OrigErr() error { return nil } -func (s TooManyTags) Error() string { +func (s *TooManyTags) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTags) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTags) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTags) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTags) RequestID() string { + return s.RespMetadata.RequestID +} + +// Selects whether or not the state machine's AWS X-Ray tracing is enabled. +// Default is false +type TracingConfiguration struct { + _ struct{} `type:"structure"` + + // When set to true, AWS X-Ray tracing is enabled. + Enabled *bool `locationName:"enabled" type:"boolean"` +} + +// String returns the string representation +func (s TracingConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TracingConfiguration) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *TracingConfiguration) SetEnabled(v bool) *TracingConfiguration { + s.Enabled = &v + return s } type UntagResourceInput struct { @@ -7573,6 +7903,9 @@ type UpdateStateMachineInput struct { // // StateMachineArn is a required field StateMachineArn *string `locationName:"stateMachineArn" min:"1" type:"string" required:"true"` + + // Selects whether AWS X-Ray tracing is enabled. + TracingConfiguration *TracingConfiguration `locationName:"tracingConfiguration" type:"structure"` } // String returns the string representation @@ -7636,6 +7969,12 @@ func (s *UpdateStateMachineInput) SetStateMachineArn(v string) *UpdateStateMachi return s } +// SetTracingConfiguration sets the TracingConfiguration field's value. +func (s *UpdateStateMachineInput) SetTracingConfiguration(v *TracingConfiguration) *UpdateStateMachineInput { + s.TracingConfiguration = v + return s +} + type UpdateStateMachineOutput struct { _ struct{} `type:"structure"` @@ -7678,6 +8017,17 @@ const ( ExecutionStatusAborted = "ABORTED" ) +// ExecutionStatus_Values returns all elements of the ExecutionStatus enum +func ExecutionStatus_Values() []string { + return []string{ + ExecutionStatusRunning, + ExecutionStatusSucceeded, + ExecutionStatusFailed, + ExecutionStatusTimedOut, + ExecutionStatusAborted, + } +} + const ( // HistoryEventTypeActivityFailed is a HistoryEventType enum value HistoryEventTypeActivityFailed = "ActivityFailed" @@ -7845,6 +8195,67 @@ const ( HistoryEventTypeWaitStateExited = "WaitStateExited" ) +// HistoryEventType_Values returns all elements of the HistoryEventType enum +func HistoryEventType_Values() []string { + return []string{ + HistoryEventTypeActivityFailed, + HistoryEventTypeActivityScheduled, + HistoryEventTypeActivityScheduleFailed, + HistoryEventTypeActivityStarted, + HistoryEventTypeActivitySucceeded, + HistoryEventTypeActivityTimedOut, + HistoryEventTypeChoiceStateEntered, + HistoryEventTypeChoiceStateExited, + HistoryEventTypeExecutionAborted, + HistoryEventTypeExecutionFailed, + HistoryEventTypeExecutionStarted, + HistoryEventTypeExecutionSucceeded, + HistoryEventTypeExecutionTimedOut, + HistoryEventTypeFailStateEntered, + HistoryEventTypeLambdaFunctionFailed, + HistoryEventTypeLambdaFunctionScheduled, + HistoryEventTypeLambdaFunctionScheduleFailed, + HistoryEventTypeLambdaFunctionStarted, + HistoryEventTypeLambdaFunctionStartFailed, + HistoryEventTypeLambdaFunctionSucceeded, + HistoryEventTypeLambdaFunctionTimedOut, + HistoryEventTypeMapIterationAborted, + HistoryEventTypeMapIterationFailed, + HistoryEventTypeMapIterationStarted, + HistoryEventTypeMapIterationSucceeded, + HistoryEventTypeMapStateAborted, + HistoryEventTypeMapStateEntered, + HistoryEventTypeMapStateExited, + HistoryEventTypeMapStateFailed, + HistoryEventTypeMapStateStarted, + HistoryEventTypeMapStateSucceeded, + HistoryEventTypeParallelStateAborted, + HistoryEventTypeParallelStateEntered, + HistoryEventTypeParallelStateExited, + HistoryEventTypeParallelStateFailed, + HistoryEventTypeParallelStateStarted, + HistoryEventTypeParallelStateSucceeded, + HistoryEventTypePassStateEntered, + HistoryEventTypePassStateExited, + HistoryEventTypeSucceedStateEntered, + HistoryEventTypeSucceedStateExited, + HistoryEventTypeTaskFailed, + HistoryEventTypeTaskScheduled, + HistoryEventTypeTaskStarted, + HistoryEventTypeTaskStartFailed, + HistoryEventTypeTaskStateAborted, + HistoryEventTypeTaskStateEntered, + HistoryEventTypeTaskStateExited, + HistoryEventTypeTaskSubmitFailed, + HistoryEventTypeTaskSubmitted, + HistoryEventTypeTaskSucceeded, + HistoryEventTypeTaskTimedOut, + HistoryEventTypeWaitStateAborted, + HistoryEventTypeWaitStateEntered, + HistoryEventTypeWaitStateExited, + } +} + const ( // LogLevelAll is a LogLevel enum value LogLevelAll = "ALL" @@ -7859,6 +8270,16 @@ const ( LogLevelOff = "OFF" ) +// LogLevel_Values returns all elements of the LogLevel enum +func LogLevel_Values() []string { + return []string{ + LogLevelAll, + LogLevelError, + LogLevelFatal, + LogLevelOff, + } +} + const ( // StateMachineStatusActive is a StateMachineStatus enum value StateMachineStatusActive = "ACTIVE" @@ -7867,6 +8288,14 @@ const ( StateMachineStatusDeleting = "DELETING" ) +// StateMachineStatus_Values returns all elements of the StateMachineStatus enum +func StateMachineStatus_Values() []string { + return []string{ + StateMachineStatusActive, + StateMachineStatusDeleting, + } +} + const ( // StateMachineTypeStandard is a StateMachineType enum value StateMachineTypeStandard = "STANDARD" @@ -7874,3 +8303,11 @@ const ( // StateMachineTypeExpress is a StateMachineType enum value StateMachineTypeExpress = "EXPRESS" ) + +// StateMachineType_Values returns all elements of the StateMachineType enum +func StateMachineType_Values() []string { + return []string{ + StateMachineTypeStandard, + StateMachineTypeExpress, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go index 1bebc5bb2..41926dd25 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sfn/errors.go @@ -89,6 +89,13 @@ const ( // The provided token is invalid. ErrCodeInvalidToken = "InvalidToken" + // ErrCodeInvalidTracingConfiguration for service response error code + // "InvalidTracingConfiguration". + // + // Your tracingConfiguration key does not match, or enabled has not been set + // to true or false. + ErrCodeInvalidTracingConfiguration = "InvalidTracingConfiguration" + // ErrCodeMissingRequiredParameter for service response error code // "MissingRequiredParameter". // @@ -164,6 +171,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidName": newErrorInvalidName, "InvalidOutput": newErrorInvalidOutput, "InvalidToken": newErrorInvalidToken, + "InvalidTracingConfiguration": newErrorInvalidTracingConfiguration, "MissingRequiredParameter": newErrorMissingRequiredParameter, "ResourceNotFound": newErrorResourceNotFound, "StateMachineAlreadyExists": newErrorStateMachineAlreadyExists, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go b/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go index fc004f6c2..dbd7207e7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/api.go b/vendor/github.com/aws/aws-sdk-go/service/shield/api.go index 3155bfa30..bbbfcde82 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/api.go @@ -58,7 +58,7 @@ func (c *Shield) AssociateDRTLogBucketRequest(input *AssociateDRTLogBucketInput) // AssociateDRTLogBucket API operation for AWS Shield. // -// Authorizes the DDoS Response team (DRT) to access the specified Amazon S3 +// Authorizes the DDoS Response Team (DRT) to access the specified Amazon S3 // bucket containing your AWS WAF logs. You can associate up to 10 Amazon S3 // buckets with your subscription. // @@ -96,14 +96,14 @@ func (c *Shield) AssociateDRTLogBucketRequest(input *AssociateDRTLogBucketInput) // Exception that indicates that the parameters passed to the API are invalid. // // * AccessDeniedForDependencyException -// In order to grant the necessary access to the DDoS Response Team, the user -// submitting the request must have the iam:PassRole permission. This error +// In order to grant the necessary access to the DDoS Response Team (DRT), the +// user submitting the request must have the iam:PassRole permission. This error // indicates the user did not have the appropriate permissions. For more information, // see Granting a User Permissions to Pass a Role to an AWS Service (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html). // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // * ResourceNotFoundException // Exception indicating the specified resource does not exist. @@ -175,7 +175,7 @@ func (c *Shield) AssociateDRTRoleRequest(input *AssociateDRTRoleInput) (req *req // AssociateDRTRole API operation for AWS Shield. // -// Authorizes the DDoS Response team (DRT), using the specified role, to access +// Authorizes the DDoS Response Team (DRT), using the specified role, to access // your AWS account to assist with DDoS attack mitigation during potential attacks. // This enables the DRT to inspect your AWS WAF configuration and create or // update AWS WAF rules and web ACLs. @@ -224,14 +224,14 @@ func (c *Shield) AssociateDRTRoleRequest(input *AssociateDRTRoleInput) (req *req // Exception that indicates that the parameters passed to the API are invalid. // // * AccessDeniedForDependencyException -// In order to grant the necessary access to the DDoS Response Team, the user -// submitting the request must have the iam:PassRole permission. This error +// In order to grant the necessary access to the DDoS Response Team (DRT), the +// user submitting the request must have the iam:PassRole permission. This error // indicates the user did not have the appropriate permissions. For more information, // see Granting a User Permissions to Pass a Role to an AWS Service (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html). // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // * ResourceNotFoundException // Exception indicating the specified resource does not exist. @@ -338,8 +338,8 @@ func (c *Shield) AssociateHealthCheckRequest(input *AssociateHealthCheckInput) ( // Exception that indicates that the parameters passed to the API are invalid. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AssociateHealthCheck func (c *Shield) AssociateHealthCheck(input *AssociateHealthCheckInput) (*AssociateHealthCheckOutput, error) { @@ -363,6 +363,116 @@ func (c *Shield) AssociateHealthCheckWithContext(ctx aws.Context, input *Associa return out, req.Send() } +const opAssociateProactiveEngagementDetails = "AssociateProactiveEngagementDetails" + +// AssociateProactiveEngagementDetailsRequest generates a "aws/request.Request" representing the +// client's request for the AssociateProactiveEngagementDetails operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateProactiveEngagementDetails for more information on using the AssociateProactiveEngagementDetails +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateProactiveEngagementDetailsRequest method. +// req, resp := client.AssociateProactiveEngagementDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AssociateProactiveEngagementDetails +func (c *Shield) AssociateProactiveEngagementDetailsRequest(input *AssociateProactiveEngagementDetailsInput) (req *request.Request, output *AssociateProactiveEngagementDetailsOutput) { + op := &request.Operation{ + Name: opAssociateProactiveEngagementDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateProactiveEngagementDetailsInput{} + } + + output = &AssociateProactiveEngagementDetailsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AssociateProactiveEngagementDetails API operation for AWS Shield. +// +// Initializes proactive engagement and sets the list of contacts for the DDoS +// Response Team (DRT) to use. You must provide at least one phone number in +// the emergency contact list. +// +// After you have initialized proactive engagement using this call, to disable +// or enable proactive engagement, use the calls DisableProactiveEngagement +// and EnableProactiveEngagement. +// +// This call defines the list of email addresses and phone numbers that the +// DDoS Response Team (DRT) can use to contact you for escalations to the DRT +// and to initiate proactive customer support. +// +// The contacts that you provide in the request replace any contacts that were +// already defined. If you already have contacts defined and want to use them, +// retrieve the list using DescribeEmergencyContactSettings and then provide +// it to this call. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation AssociateProactiveEngagementDetails for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * InvalidOperationException +// Exception that indicates that the operation would not cause any change to +// occur. +// +// * InvalidParameterException +// Exception that indicates that the parameters passed to the API are invalid. +// +// * ResourceNotFoundException +// Exception indicating the specified resource does not exist. +// +// * OptimisticLockException +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/AssociateProactiveEngagementDetails +func (c *Shield) AssociateProactiveEngagementDetails(input *AssociateProactiveEngagementDetailsInput) (*AssociateProactiveEngagementDetailsOutput, error) { + req, out := c.AssociateProactiveEngagementDetailsRequest(input) + return out, req.Send() +} + +// AssociateProactiveEngagementDetailsWithContext is the same as AssociateProactiveEngagementDetails with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateProactiveEngagementDetails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) AssociateProactiveEngagementDetailsWithContext(ctx aws.Context, input *AssociateProactiveEngagementDetailsInput, opts ...request.Option) (*AssociateProactiveEngagementDetailsOutput, error) { + req, out := c.AssociateProactiveEngagementDetailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateProtection = "CreateProtection" // CreateProtectionRequest generates a "aws/request.Request" representing the @@ -449,8 +559,8 @@ func (c *Shield) CreateProtectionRequest(input *CreateProtectionInput) (req *req // Exception indicating the specified resource already exists. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // * ResourceNotFoundException // Exception indicating the specified resource does not exist. @@ -524,15 +634,6 @@ func (c *Shield) CreateSubscriptionRequest(input *CreateSubscriptionInput) (req // // Activates AWS Shield Advanced for an account. // -// As part of this request you can specify EmergencySettings that automaticaly -// grant the DDoS response team (DRT) needed permissions to assist you during -// a suspected DDoS attack. For more information see Authorize the DDoS Response -// Team to Create Rules and Web ACLs on Your Behalf (https://docs.aws.amazon.com/waf/latest/developerguide/authorize-DRT.html). -// -// To use the services of the DRT, you must be subscribed to the Business Support -// plan (https://aws.amazon.com/premiumsupport/business-support/) or the Enterprise -// Support plan (https://aws.amazon.com/premiumsupport/enterprise-support/). -// // When you initally create a subscription, your subscription is set to be automatically // renewed at the end of the existing subscription period. You can change this // by submitting an UpdateSubscription request. @@ -637,8 +738,8 @@ func (c *Shield) DeleteProtectionRequest(input *DeleteProtectionInput) (req *req // Exception indicating the specified resource does not exist. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DeleteProtection func (c *Shield) DeleteProtection(input *DeleteProtectionInput) (*DeleteProtectionOutput, error) { @@ -892,7 +993,7 @@ func (c *Shield) DescribeDRTAccessRequest(input *DescribeDRTAccessInput) (req *r // DescribeDRTAccess API operation for AWS Shield. // // Returns the current role and list of Amazon S3 log buckets used by the DDoS -// Response team (DRT) to access your AWS account while assisting with attack +// Response Team (DRT) to access your AWS account while assisting with attack // mitigation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -976,8 +1077,9 @@ func (c *Shield) DescribeEmergencyContactSettingsRequest(input *DescribeEmergenc // DescribeEmergencyContactSettings API operation for AWS Shield. // -// Lists the email addresses that the DRT can use to contact you during a suspected -// attack. +// A list of email addresses and phone numbers that the DDoS Response Team (DRT) +// can use to contact you if you have proactive engagement enabled, for escalations +// to the DRT and to initiate proactive customer support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1185,6 +1287,102 @@ func (c *Shield) DescribeSubscriptionWithContext(ctx aws.Context, input *Describ return out, req.Send() } +const opDisableProactiveEngagement = "DisableProactiveEngagement" + +// DisableProactiveEngagementRequest generates a "aws/request.Request" representing the +// client's request for the DisableProactiveEngagement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DisableProactiveEngagement for more information on using the DisableProactiveEngagement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DisableProactiveEngagementRequest method. +// req, resp := client.DisableProactiveEngagementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DisableProactiveEngagement +func (c *Shield) DisableProactiveEngagementRequest(input *DisableProactiveEngagementInput) (req *request.Request, output *DisableProactiveEngagementOutput) { + op := &request.Operation{ + Name: opDisableProactiveEngagement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableProactiveEngagementInput{} + } + + output = &DisableProactiveEngagementOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisableProactiveEngagement API operation for AWS Shield. +// +// Removes authorization from the DDoS Response Team (DRT) to notify contacts +// about escalations to the DRT and to initiate proactive customer support. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation DisableProactiveEngagement for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * InvalidOperationException +// Exception that indicates that the operation would not cause any change to +// occur. +// +// * InvalidParameterException +// Exception that indicates that the parameters passed to the API are invalid. +// +// * ResourceNotFoundException +// Exception indicating the specified resource does not exist. +// +// * OptimisticLockException +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DisableProactiveEngagement +func (c *Shield) DisableProactiveEngagement(input *DisableProactiveEngagementInput) (*DisableProactiveEngagementOutput, error) { + req, out := c.DisableProactiveEngagementRequest(input) + return out, req.Send() +} + +// DisableProactiveEngagementWithContext is the same as DisableProactiveEngagement with the addition of +// the ability to pass a context and additional request options. +// +// See DisableProactiveEngagement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) DisableProactiveEngagementWithContext(ctx aws.Context, input *DisableProactiveEngagementInput, opts ...request.Option) (*DisableProactiveEngagementOutput, error) { + req, out := c.DisableProactiveEngagementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDisassociateDRTLogBucket = "DisassociateDRTLogBucket" // DisassociateDRTLogBucketRequest generates a "aws/request.Request" representing the @@ -1230,7 +1428,7 @@ func (c *Shield) DisassociateDRTLogBucketRequest(input *DisassociateDRTLogBucket // DisassociateDRTLogBucket API operation for AWS Shield. // -// Removes the DDoS Response team's (DRT) access to the specified Amazon S3 +// Removes the DDoS Response Team's (DRT) access to the specified Amazon S3 // bucket containing your AWS WAF logs. // // To make a DisassociateDRTLogBucket request, you must be subscribed to the @@ -1260,14 +1458,14 @@ func (c *Shield) DisassociateDRTLogBucketRequest(input *DisassociateDRTLogBucket // The ARN of the role that you specifed does not exist. // // * AccessDeniedForDependencyException -// In order to grant the necessary access to the DDoS Response Team, the user -// submitting the request must have the iam:PassRole permission. This error +// In order to grant the necessary access to the DDoS Response Team (DRT), the +// user submitting the request must have the iam:PassRole permission. This error // indicates the user did not have the appropriate permissions. For more information, // see Granting a User Permissions to Pass a Role to an AWS Service (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html). // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // * ResourceNotFoundException // Exception indicating the specified resource does not exist. @@ -1339,7 +1537,7 @@ func (c *Shield) DisassociateDRTRoleRequest(input *DisassociateDRTRoleInput) (re // DisassociateDRTRole API operation for AWS Shield. // -// Removes the DDoS Response team's (DRT) access to your AWS account. +// Removes the DDoS Response Team's (DRT) access to your AWS account. // // To make a DisassociateDRTRole request, you must be subscribed to the Business // Support plan (https://aws.amazon.com/premiumsupport/business-support/) or @@ -1365,8 +1563,8 @@ func (c *Shield) DisassociateDRTRoleRequest(input *DisassociateDRTRoleInput) (re // occur. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // * ResourceNotFoundException // Exception indicating the specified resource does not exist. @@ -1467,8 +1665,8 @@ func (c *Shield) DisassociateHealthCheckRequest(input *DisassociateHealthCheckIn // Exception indicating the specified resource does not exist. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/DisassociateHealthCheck func (c *Shield) DisassociateHealthCheck(input *DisassociateHealthCheckInput) (*DisassociateHealthCheckOutput, error) { @@ -1492,6 +1690,103 @@ func (c *Shield) DisassociateHealthCheckWithContext(ctx aws.Context, input *Disa return out, req.Send() } +const opEnableProactiveEngagement = "EnableProactiveEngagement" + +// EnableProactiveEngagementRequest generates a "aws/request.Request" representing the +// client's request for the EnableProactiveEngagement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableProactiveEngagement for more information on using the EnableProactiveEngagement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableProactiveEngagementRequest method. +// req, resp := client.EnableProactiveEngagementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/EnableProactiveEngagement +func (c *Shield) EnableProactiveEngagementRequest(input *EnableProactiveEngagementInput) (req *request.Request, output *EnableProactiveEngagementOutput) { + op := &request.Operation{ + Name: opEnableProactiveEngagement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableProactiveEngagementInput{} + } + + output = &EnableProactiveEngagementOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableProactiveEngagement API operation for AWS Shield. +// +// Authorizes the DDoS Response Team (DRT) to use email and phone to notify +// contacts about escalations to the DRT and to initiate proactive customer +// support. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Shield's +// API operation EnableProactiveEngagement for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// Exception that indicates that a problem occurred with the service infrastructure. +// You can retry the request. +// +// * InvalidOperationException +// Exception that indicates that the operation would not cause any change to +// occur. +// +// * InvalidParameterException +// Exception that indicates that the parameters passed to the API are invalid. +// +// * ResourceNotFoundException +// Exception indicating the specified resource does not exist. +// +// * OptimisticLockException +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/EnableProactiveEngagement +func (c *Shield) EnableProactiveEngagement(input *EnableProactiveEngagementInput) (*EnableProactiveEngagementOutput, error) { + req, out := c.EnableProactiveEngagementRequest(input) + return out, req.Send() +} + +// EnableProactiveEngagementWithContext is the same as EnableProactiveEngagement with the addition of +// the ability to pass a context and additional request options. +// +// See EnableProactiveEngagement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) EnableProactiveEngagementWithContext(ctx aws.Context, input *EnableProactiveEngagementInput, opts ...request.Option) (*EnableProactiveEngagementOutput, error) { + req, out := c.EnableProactiveEngagementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetSubscriptionState = "GetSubscriptionState" // GetSubscriptionStateRequest generates a "aws/request.Request" representing the @@ -1603,6 +1898,12 @@ func (c *Shield) ListAttacksRequest(input *ListAttacksInput) (req *request.Reque Name: opListAttacks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1660,6 +1961,58 @@ func (c *Shield) ListAttacksWithContext(ctx aws.Context, input *ListAttacksInput return out, req.Send() } +// ListAttacksPages iterates over the pages of a ListAttacks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttacks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttacks operation. +// pageNum := 0 +// err := client.ListAttacksPages(params, +// func(page *shield.ListAttacksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Shield) ListAttacksPages(input *ListAttacksInput, fn func(*ListAttacksOutput, bool) bool) error { + return c.ListAttacksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAttacksPagesWithContext same as ListAttacksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) ListAttacksPagesWithContext(ctx aws.Context, input *ListAttacksInput, fn func(*ListAttacksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAttacksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAttacksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAttacksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListProtections = "ListProtections" // ListProtectionsRequest generates a "aws/request.Request" representing the @@ -1691,6 +2044,12 @@ func (c *Shield) ListProtectionsRequest(input *ListProtectionsInput) (req *reque Name: opListProtections, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1747,6 +2106,58 @@ func (c *Shield) ListProtectionsWithContext(ctx aws.Context, input *ListProtecti return out, req.Send() } +// ListProtectionsPages iterates over the pages of a ListProtections operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListProtections method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListProtections operation. +// pageNum := 0 +// err := client.ListProtectionsPages(params, +// func(page *shield.ListProtectionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Shield) ListProtectionsPages(input *ListProtectionsInput, fn func(*ListProtectionsOutput, bool) bool) error { + return c.ListProtectionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListProtectionsPagesWithContext same as ListProtectionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Shield) ListProtectionsPagesWithContext(ctx aws.Context, input *ListProtectionsInput, fn func(*ListProtectionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListProtectionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListProtectionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListProtectionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opUpdateEmergencyContactSettings = "UpdateEmergencyContactSettings" // UpdateEmergencyContactSettingsRequest generates a "aws/request.Request" representing the @@ -1792,8 +2203,10 @@ func (c *Shield) UpdateEmergencyContactSettingsRequest(input *UpdateEmergencyCon // UpdateEmergencyContactSettings API operation for AWS Shield. // -// Updates the details of the list of email addresses that the DRT can use to -// contact you during a suspected attack. +// Updates the details of the list of email addresses and phone numbers that +// the DDoS Response Team (DRT) can use to contact you if you have proactive +// engagement enabled, for escalations to the DRT and to initiate proactive +// customer support. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1811,8 +2224,8 @@ func (c *Shield) UpdateEmergencyContactSettingsRequest(input *UpdateEmergencyCon // Exception that indicates that the parameters passed to the API are invalid. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // * ResourceNotFoundException // Exception indicating the specified resource does not exist. @@ -1912,8 +2325,8 @@ func (c *Shield) UpdateSubscriptionRequest(input *UpdateSubscriptionInput) (req // Exception that indicates that the parameters passed to the API are invalid. // // * OptimisticLockException -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. // // See also, https://docs.aws.amazon.com/goto/WebAPI/shield-2016-06-02/UpdateSubscription func (c *Shield) UpdateSubscription(input *UpdateSubscriptionInput) (*UpdateSubscriptionOutput, error) { @@ -1940,8 +2353,8 @@ func (c *Shield) UpdateSubscriptionWithContext(ctx aws.Context, input *UpdateSub // Exception that indicates the specified AttackId does not exist, or the requester // does not have the appropriate permissions to access the AttackId. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -1958,17 +2371,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1976,31 +2389,31 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } -// In order to grant the necessary access to the DDoS Response Team, the user -// submitting the request must have the iam:PassRole permission. This error +// In order to grant the necessary access to the DDoS Response Team (DRT), the +// user submitting the request must have the iam:PassRole permission. This error // indicates the user did not have the appropriate permissions. For more information, // see Granting a User Permissions to Pass a Role to an AWS Service (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html). type AccessDeniedForDependencyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -2017,17 +2430,17 @@ func (s AccessDeniedForDependencyException) GoString() string { func newErrorAccessDeniedForDependencyException(v protocol.ResponseMetadata) error { return &AccessDeniedForDependencyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedForDependencyException) Code() string { +func (s *AccessDeniedForDependencyException) Code() string { return "AccessDeniedForDependencyException" } // Message returns the exception's message. -func (s AccessDeniedForDependencyException) Message() string { +func (s *AccessDeniedForDependencyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -2035,22 +2448,22 @@ func (s AccessDeniedForDependencyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedForDependencyException) OrigErr() error { +func (s *AccessDeniedForDependencyException) OrigErr() error { return nil } -func (s AccessDeniedForDependencyException) Error() string { +func (s *AccessDeniedForDependencyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedForDependencyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedForDependencyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedForDependencyException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedForDependencyException) RequestID() string { + return s.RespMetadata.RequestID } type AssociateDRTLogBucketInput struct { @@ -2243,6 +2656,77 @@ func (s AssociateHealthCheckOutput) GoString() string { return s.String() } +type AssociateProactiveEngagementDetailsInput struct { + _ struct{} `type:"structure"` + + // A list of email addresses and phone numbers that the DDoS Response Team (DRT) + // can use to contact you for escalations to the DRT and to initiate proactive + // customer support. + // + // To enable proactive engagement, the contact list must include at least one + // phone number. + // + // The contacts that you provide here replace any contacts that were already + // defined. If you already have contacts defined and want to use them, retrieve + // the list using DescribeEmergencyContactSettings and then provide it here. + // + // EmergencyContactList is a required field + EmergencyContactList []*EmergencyContact `type:"list" required:"true"` +} + +// String returns the string representation +func (s AssociateProactiveEngagementDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateProactiveEngagementDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateProactiveEngagementDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateProactiveEngagementDetailsInput"} + if s.EmergencyContactList == nil { + invalidParams.Add(request.NewErrParamRequired("EmergencyContactList")) + } + if s.EmergencyContactList != nil { + for i, v := range s.EmergencyContactList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EmergencyContactList", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEmergencyContactList sets the EmergencyContactList field's value. +func (s *AssociateProactiveEngagementDetailsInput) SetEmergencyContactList(v []*EmergencyContact) *AssociateProactiveEngagementDetailsInput { + s.EmergencyContactList = v + return s +} + +type AssociateProactiveEngagementDetailsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AssociateProactiveEngagementDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateProactiveEngagementDetailsOutput) GoString() string { + return s.String() +} + // The details of a DDoS attack. type AttackDetail struct { _ struct{} `type:"structure"` @@ -2895,8 +3379,9 @@ func (s DescribeEmergencyContactSettingsInput) GoString() string { type DescribeEmergencyContactSettingsOutput struct { _ struct{} `type:"structure"` - // A list of email addresses that the DRT can use to contact you during a suspected - // attack. + // A list of email addresses and phone numbers that the DDoS Response Team (DRT) + // can use to contact you if you have proactive engagement enabled, for escalations + // to the DRT and to initiate proactive customer support. EmergencyContactList []*EmergencyContact `type:"list"` } @@ -3028,6 +3513,34 @@ func (s *DescribeSubscriptionOutput) SetSubscription(v *Subscription) *DescribeS return s } +type DisableProactiveEngagementInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableProactiveEngagementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableProactiveEngagementInput) GoString() string { + return s.String() +} + +type DisableProactiveEngagementOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DisableProactiveEngagementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableProactiveEngagementOutput) GoString() string { + return s.String() +} + type DisassociateDRTLogBucketInput struct { _ struct{} `type:"structure"` @@ -3185,15 +3698,22 @@ func (s DisassociateHealthCheckOutput) GoString() string { return s.String() } -// Contact information that the DRT can use to contact you during a suspected -// attack. +// Contact information that the DRT can use to contact you if you have proactive +// engagement enabled, for escalations to the DRT and to initiate proactive +// customer support. type EmergencyContact struct { _ struct{} `type:"structure"` - // An email address that the DRT can use to contact you during a suspected attack. + // Additional notes regarding the contact. + ContactNotes *string `min:"1" type:"string"` + + // The email address for the contact. // // EmailAddress is a required field EmailAddress *string `min:"1" type:"string" required:"true"` + + // The phone number for the contact. + PhoneNumber *string `min:"1" type:"string"` } // String returns the string representation @@ -3209,12 +3729,18 @@ func (s EmergencyContact) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *EmergencyContact) Validate() error { invalidParams := request.ErrInvalidParams{Context: "EmergencyContact"} + if s.ContactNotes != nil && len(*s.ContactNotes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ContactNotes", 1)) + } if s.EmailAddress == nil { invalidParams.Add(request.NewErrParamRequired("EmailAddress")) } if s.EmailAddress != nil && len(*s.EmailAddress) < 1 { invalidParams.Add(request.NewErrParamMinLen("EmailAddress", 1)) } + if s.PhoneNumber != nil && len(*s.PhoneNumber) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PhoneNumber", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3222,12 +3748,52 @@ func (s *EmergencyContact) Validate() error { return nil } +// SetContactNotes sets the ContactNotes field's value. +func (s *EmergencyContact) SetContactNotes(v string) *EmergencyContact { + s.ContactNotes = &v + return s +} + // SetEmailAddress sets the EmailAddress field's value. func (s *EmergencyContact) SetEmailAddress(v string) *EmergencyContact { s.EmailAddress = &v return s } +// SetPhoneNumber sets the PhoneNumber field's value. +func (s *EmergencyContact) SetPhoneNumber(v string) *EmergencyContact { + s.PhoneNumber = &v + return s +} + +type EnableProactiveEngagementInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableProactiveEngagementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableProactiveEngagementInput) GoString() string { + return s.String() +} + +type EnableProactiveEngagementOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableProactiveEngagementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableProactiveEngagementOutput) GoString() string { + return s.String() +} + type GetSubscriptionStateInput struct { _ struct{} `type:"structure"` } @@ -3270,8 +3836,8 @@ func (s *GetSubscriptionStateOutput) SetSubscriptionState(v string) *GetSubscrip // Exception that indicates that a problem occurred with the service infrastructure. // You can retry the request. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3288,17 +3854,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "InternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3306,29 +3872,29 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // Exception that indicates that the operation would not cause any change to // occur. type InvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3345,17 +3911,17 @@ func (s InvalidOperationException) GoString() string { func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { return &InvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOperationException) Code() string { +func (s *InvalidOperationException) Code() string { return "InvalidOperationException" } // Message returns the exception's message. -func (s InvalidOperationException) Message() string { +func (s *InvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3363,29 +3929,29 @@ func (s InvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOperationException) OrigErr() error { +func (s *InvalidOperationException) OrigErr() error { return nil } -func (s InvalidOperationException) Error() string { +func (s *InvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // Exception that indicates that the NextToken specified in the request is invalid. // Submit the request using the NextToken value that was returned in the response. type InvalidPaginationTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3402,17 +3968,17 @@ func (s InvalidPaginationTokenException) GoString() string { func newErrorInvalidPaginationTokenException(v protocol.ResponseMetadata) error { return &InvalidPaginationTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPaginationTokenException) Code() string { +func (s *InvalidPaginationTokenException) Code() string { return "InvalidPaginationTokenException" } // Message returns the exception's message. -func (s InvalidPaginationTokenException) Message() string { +func (s *InvalidPaginationTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3420,28 +3986,28 @@ func (s InvalidPaginationTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPaginationTokenException) OrigErr() error { +func (s *InvalidPaginationTokenException) OrigErr() error { return nil } -func (s InvalidPaginationTokenException) Error() string { +func (s *InvalidPaginationTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPaginationTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPaginationTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPaginationTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPaginationTokenException) RequestID() string { + return s.RespMetadata.RequestID } // Exception that indicates that the parameters passed to the API are invalid. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3458,17 +4024,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3476,29 +4042,29 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // Exception that indicates that the resource is invalid. You might not have // access to the resource, or the resource might not exist. type InvalidResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3515,17 +4081,17 @@ func (s InvalidResourceException) GoString() string { func newErrorInvalidResourceException(v protocol.ResponseMetadata) error { return &InvalidResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceException) Code() string { +func (s *InvalidResourceException) Code() string { return "InvalidResourceException" } // Message returns the exception's message. -func (s InvalidResourceException) Message() string { +func (s *InvalidResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3533,22 +4099,22 @@ func (s InvalidResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceException) OrigErr() error { +func (s *InvalidResourceException) OrigErr() error { return nil } -func (s InvalidResourceException) Error() string { +func (s *InvalidResourceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceException) RequestID() string { + return s.RespMetadata.RequestID } // Specifies how many protections of a given type you can create. @@ -3590,8 +4156,8 @@ func (s *Limit) SetType(v string) *Limit { // // Limit is the threshold that would be exceeded. type LimitsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Limit *int64 `type:"long"` @@ -3612,17 +4178,17 @@ func (s LimitsExceededException) GoString() string { func newErrorLimitsExceededException(v protocol.ResponseMetadata) error { return &LimitsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitsExceededException) Code() string { +func (s *LimitsExceededException) Code() string { return "LimitsExceededException" } // Message returns the exception's message. -func (s LimitsExceededException) Message() string { +func (s *LimitsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3630,22 +4196,22 @@ func (s LimitsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitsExceededException) OrigErr() error { +func (s *LimitsExceededException) OrigErr() error { return nil } -func (s LimitsExceededException) Error() string { +func (s *LimitsExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitsExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAttacksInput struct { @@ -3872,8 +4438,8 @@ func (s *ListProtectionsOutput) SetProtections(v []*Protection) *ListProtections // of your subscription. This exception indicates that you are attempting to // change AutoRenew prior to that period. type LockedSubscriptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3890,17 +4456,17 @@ func (s LockedSubscriptionException) GoString() string { func newErrorLockedSubscriptionException(v protocol.ResponseMetadata) error { return &LockedSubscriptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LockedSubscriptionException) Code() string { +func (s *LockedSubscriptionException) Code() string { return "LockedSubscriptionException" } // Message returns the exception's message. -func (s LockedSubscriptionException) Message() string { +func (s *LockedSubscriptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3908,22 +4474,22 @@ func (s LockedSubscriptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LockedSubscriptionException) OrigErr() error { +func (s *LockedSubscriptionException) OrigErr() error { return nil } -func (s LockedSubscriptionException) Error() string { +func (s *LockedSubscriptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LockedSubscriptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LockedSubscriptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LockedSubscriptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *LockedSubscriptionException) RequestID() string { + return s.RespMetadata.RequestID } // The mitigation applied to a DDoS attack. @@ -3952,8 +4518,8 @@ func (s *Mitigation) SetMitigationName(v string) *Mitigation { // The ARN of the role that you specifed does not exist. type NoAssociatedRoleException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3970,17 +4536,17 @@ func (s NoAssociatedRoleException) GoString() string { func newErrorNoAssociatedRoleException(v protocol.ResponseMetadata) error { return &NoAssociatedRoleException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NoAssociatedRoleException) Code() string { +func (s *NoAssociatedRoleException) Code() string { return "NoAssociatedRoleException" } // Message returns the exception's message. -func (s NoAssociatedRoleException) Message() string { +func (s *NoAssociatedRoleException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3988,29 +4554,29 @@ func (s NoAssociatedRoleException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NoAssociatedRoleException) OrigErr() error { +func (s *NoAssociatedRoleException) OrigErr() error { return nil } -func (s NoAssociatedRoleException) Error() string { +func (s *NoAssociatedRoleException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NoAssociatedRoleException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NoAssociatedRoleException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NoAssociatedRoleException) RequestID() string { - return s.respMetadata.RequestID +func (s *NoAssociatedRoleException) RequestID() string { + return s.RespMetadata.RequestID } -// Exception that indicates that the protection state has been modified by another -// client. You can retry the request. +// Exception that indicates that the resource state has been modified by another +// client. Retrieve the resource and then retry your request. type OptimisticLockException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4027,17 +4593,17 @@ func (s OptimisticLockException) GoString() string { func newErrorOptimisticLockException(v protocol.ResponseMetadata) error { return &OptimisticLockException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OptimisticLockException) Code() string { +func (s *OptimisticLockException) Code() string { return "OptimisticLockException" } // Message returns the exception's message. -func (s OptimisticLockException) Message() string { +func (s *OptimisticLockException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4045,22 +4611,22 @@ func (s OptimisticLockException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OptimisticLockException) OrigErr() error { +func (s *OptimisticLockException) OrigErr() error { return nil } -func (s OptimisticLockException) Error() string { +func (s *OptimisticLockException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OptimisticLockException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OptimisticLockException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OptimisticLockException) RequestID() string { - return s.respMetadata.RequestID +func (s *OptimisticLockException) RequestID() string { + return s.RespMetadata.RequestID } // An object that represents a resource that is under DDoS protection. @@ -4117,8 +4683,8 @@ func (s *Protection) SetResourceArn(v string) *Protection { // Exception indicating the specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4135,17 +4701,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4153,28 +4719,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Exception indicating the specified resource does not exist. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -4191,17 +4757,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4209,22 +4775,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The attack information for the specified SubResource. @@ -4297,6 +4863,17 @@ type Subscription struct { // Specifies how many protections of a given type you can create. Limits []*Limit `type:"list"` + // If ENABLED, the DDoS Response Team (DRT) will use email and phone to notify + // contacts about escalations to the DRT and to initiate proactive customer + // support. + // + // If PENDING, you have requested proactive engagement and the request is pending. + // The status changes to ENABLED when your request is fully processed. + // + // If DISABLED, the DRT will not proactively notify contacts about escalations + // or to initiate proactive customer support. + ProactiveEngagementStatus *string `type:"string" enum:"ProactiveEngagementStatus"` + // The start time of the subscription, in Unix time in seconds. For more information // see timestamp (http://docs.aws.amazon.com/cli/latest/userguide/cli-using-param.html#parameter-types). StartTime *time.Time `type:"timestamp"` @@ -4333,6 +4910,12 @@ func (s *Subscription) SetLimits(v []*Limit) *Subscription { return s } +// SetProactiveEngagementStatus sets the ProactiveEngagementStatus field's value. +func (s *Subscription) SetProactiveEngagementStatus(v string) *Subscription { + s.ProactiveEngagementStatus = &v + return s +} + // SetStartTime sets the StartTime field's value. func (s *Subscription) SetStartTime(v time.Time) *Subscription { s.StartTime = &v @@ -4487,8 +5070,12 @@ func (s *TimeRange) SetToExclusive(v time.Time) *TimeRange { type UpdateEmergencyContactSettingsInput struct { _ struct{} `type:"structure"` - // A list of email addresses that the DRT can use to contact you during a suspected - // attack. + // A list of email addresses and phone numbers that the DDoS Response Team (DRT) + // can use to contact you if you have proactive engagement enabled, for escalations + // to the DRT and to initiate proactive customer support. + // + // If you have proactive engagement enabled, the contact list must include at + // least one phone number. EmergencyContactList []*EmergencyContact `type:"list"` } @@ -4591,6 +5178,14 @@ const ( AttackLayerApplication = "APPLICATION" ) +// AttackLayer_Values returns all elements of the AttackLayer enum +func AttackLayer_Values() []string { + return []string{ + AttackLayerNetwork, + AttackLayerApplication, + } +} + const ( // AttackPropertyIdentifierDestinationUrl is a AttackPropertyIdentifier enum value AttackPropertyIdentifierDestinationUrl = "DESTINATION_URL" @@ -4617,6 +5212,20 @@ const ( AttackPropertyIdentifierWordpressPingbackSource = "WORDPRESS_PINGBACK_SOURCE" ) +// AttackPropertyIdentifier_Values returns all elements of the AttackPropertyIdentifier enum +func AttackPropertyIdentifier_Values() []string { + return []string{ + AttackPropertyIdentifierDestinationUrl, + AttackPropertyIdentifierReferrer, + AttackPropertyIdentifierSourceAsn, + AttackPropertyIdentifierSourceCountry, + AttackPropertyIdentifierSourceIpAddress, + AttackPropertyIdentifierSourceUserAgent, + AttackPropertyIdentifierWordpressPingbackReflector, + AttackPropertyIdentifierWordpressPingbackSource, + } +} + const ( // AutoRenewEnabled is a AutoRenew enum value AutoRenewEnabled = "ENABLED" @@ -4625,6 +5234,34 @@ const ( AutoRenewDisabled = "DISABLED" ) +// AutoRenew_Values returns all elements of the AutoRenew enum +func AutoRenew_Values() []string { + return []string{ + AutoRenewEnabled, + AutoRenewDisabled, + } +} + +const ( + // ProactiveEngagementStatusEnabled is a ProactiveEngagementStatus enum value + ProactiveEngagementStatusEnabled = "ENABLED" + + // ProactiveEngagementStatusDisabled is a ProactiveEngagementStatus enum value + ProactiveEngagementStatusDisabled = "DISABLED" + + // ProactiveEngagementStatusPending is a ProactiveEngagementStatus enum value + ProactiveEngagementStatusPending = "PENDING" +) + +// ProactiveEngagementStatus_Values returns all elements of the ProactiveEngagementStatus enum +func ProactiveEngagementStatus_Values() []string { + return []string{ + ProactiveEngagementStatusEnabled, + ProactiveEngagementStatusDisabled, + ProactiveEngagementStatusPending, + } +} + const ( // SubResourceTypeIp is a SubResourceType enum value SubResourceTypeIp = "IP" @@ -4633,6 +5270,14 @@ const ( SubResourceTypeUrl = "URL" ) +// SubResourceType_Values returns all elements of the SubResourceType enum +func SubResourceType_Values() []string { + return []string{ + SubResourceTypeIp, + SubResourceTypeUrl, + } +} + const ( // SubscriptionStateActive is a SubscriptionState enum value SubscriptionStateActive = "ACTIVE" @@ -4641,6 +5286,14 @@ const ( SubscriptionStateInactive = "INACTIVE" ) +// SubscriptionState_Values returns all elements of the SubscriptionState enum +func SubscriptionState_Values() []string { + return []string{ + SubscriptionStateActive, + SubscriptionStateInactive, + } +} + const ( // UnitBits is a Unit enum value UnitBits = "BITS" @@ -4654,3 +5307,13 @@ const ( // UnitRequests is a Unit enum value UnitRequests = "REQUESTS" ) + +// Unit_Values returns all elements of the Unit enum +func Unit_Values() []string { + return []string{ + UnitBits, + UnitBytes, + UnitPackets, + UnitRequests, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go b/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go index a0af389f6..c30b5cc6a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/errors.go @@ -18,8 +18,8 @@ const ( // ErrCodeAccessDeniedForDependencyException for service response error code // "AccessDeniedForDependencyException". // - // In order to grant the necessary access to the DDoS Response Team, the user - // submitting the request must have the iam:PassRole permission. This error + // In order to grant the necessary access to the DDoS Response Team (DRT), the + // user submitting the request must have the iam:PassRole permission. This error // indicates the user did not have the appropriate permissions. For more information, // see Granting a User Permissions to Pass a Role to an AWS Service (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html). ErrCodeAccessDeniedForDependencyException = "AccessDeniedForDependencyException" @@ -86,8 +86,8 @@ const ( // ErrCodeOptimisticLockException for service response error code // "OptimisticLockException". // - // Exception that indicates that the protection state has been modified by another - // client. You can retry the request. + // Exception that indicates that the resource state has been modified by another + // client. Retrieve the resource and then retry your request. ErrCodeOptimisticLockException = "OptimisticLockException" // ErrCodeResourceAlreadyExistsException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go index a7930acdd..a831fb21d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go index 15a54d898..e71bb4bb7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol/query" - v2 "github.com/aws/aws-sdk-go/private/signer/v2" + "github.com/aws/aws-sdk-go/private/signer/v2" ) // SimpleDB provides the API operation methods for making requests to diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 780b18843..cc192c979 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -341,22 +341,33 @@ func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationI // CreatePlatformApplication API operation for Amazon Simple Notification Service. // // Creates a platform application object for one of the supported push notification -// services, such as APNS and FCM, to which devices and mobile apps may register. -// You must specify PlatformPrincipal and PlatformCredential attributes when -// using the CreatePlatformApplication action. The PlatformPrincipal is received -// from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is -// "SSL certificate". For FCM, PlatformPrincipal is not applicable. For ADM, -// PlatformPrincipal is "client id". The PlatformCredential is also received -// from the notification service. For WNS, PlatformPrincipal is "Package Security -// Identifier". For MPNS, PlatformPrincipal is "TLS certificate". For Baidu, -// PlatformPrincipal is "API key". -// -// For APNS/APNS_SANDBOX, PlatformCredential is "private key". For FCM, PlatformCredential -// is "API key". For ADM, PlatformCredential is "client secret". For WNS, PlatformCredential -// is "secret key". For MPNS, PlatformCredential is "private key". For Baidu, -// PlatformCredential is "secret key". The PlatformApplicationArn that is returned -// when using CreatePlatformApplication is then used as an attribute for the -// CreatePlatformEndpoint action. +// services, such as APNS and GCM (Firebase Cloud Messaging), to which devices +// and mobile apps may register. You must specify PlatformPrincipal and PlatformCredential +// attributes when using the CreatePlatformApplication action. +// +// PlatformPrincipal and PlatformCredential are received from the notification +// service. +// +// * For ADM, PlatformPrincipal is client id and PlatformCredential is client +// secret. +// +// * For Baidu, PlatformPrincipal is API key and PlatformCredential is secret +// key. +// +// * For APNS and APNS_SANDBOX, PlatformPrincipal is SSL certificate and +// PlatformCredential is private key. +// +// * For GCM (Firebase Cloud Messaging), there is no PlatformPrincipal and +// the PlatformCredential is API key. +// +// * For MPNS, PlatformPrincipal is TLS certificate and PlatformCredential +// is private key. +// +// * For WNS, PlatformPrincipal is Package Security Identifier and PlatformCredential +// is secret key. +// +// You can use the returned PlatformApplicationArn as an attribute for the CreatePlatformEndpoint +// action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -442,11 +453,10 @@ func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) // CreatePlatformEndpoint API operation for Amazon Simple Notification Service. // // Creates an endpoint for a device and mobile app on one of the supported push -// notification services, such as FCM and APNS. CreatePlatformEndpoint requires -// the PlatformApplicationArn that is returned from CreatePlatformApplication. -// The EndpointArn that is returned when using CreatePlatformEndpoint can then -// be used by the Publish action to send a message to a mobile app or by the -// Subscribe action for subscription to a topic. The CreatePlatformEndpoint +// notification services, such as GCM (Firebase Cloud Messaging) and APNS. CreatePlatformEndpoint +// requires the PlatformApplicationArn that is returned from CreatePlatformApplication. +// You can use the returned EndpointArn to send a message to a mobile app or +// by the Subscribe action for subscription to a topic. The CreatePlatformEndpoint // action is idempotent, so if the requester already owns an endpoint with the // same device token and attributes, that endpoint's ARN is returned without // creating a new endpoint. For more information, see Using Amazon SNS Mobile @@ -746,8 +756,8 @@ func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationI // DeletePlatformApplication API operation for Amazon Simple Notification Service. // // Deletes a platform application object for one of the supported push notification -// services, such as APNS and FCM. For more information, see Using Amazon SNS -// Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// services, such as APNS and GCM (Firebase Cloud Messaging). For more information, +// see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -937,8 +947,8 @@ func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (r // GetEndpointAttributes API operation for Amazon Simple Notification Service. // // Retrieves the endpoint attributes for a device on one of the supported push -// notification services, such as FCM and APNS. For more information, see Using -// Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// notification services, such as GCM (Firebase Cloud Messaging) and APNS. For +// more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1027,8 +1037,8 @@ func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicat // GetPlatformApplicationAttributes API operation for Amazon Simple Notification Service. // // Retrieves the attributes of the platform application object for the supported -// push notification services, such as APNS and FCM. For more information, see -// Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// push notification services, such as APNS and GCM (Firebase Cloud Messaging). +// For more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1395,12 +1405,13 @@ func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPl // ListEndpointsByPlatformApplication API operation for Amazon Simple Notification Service. // // Lists the endpoints and endpoint attributes for devices in a supported push -// notification service, such as FCM and APNS. The results for ListEndpointsByPlatformApplication -// are paginated and return a limited list of endpoints, up to 100. If additional -// records are available after the first page results, then a NextToken string -// will be returned. To receive the next page, you call ListEndpointsByPlatformApplication -// again using the NextToken string received from the previous call. When there -// are no more records to return, NextToken will be null. For more information, +// notification service, such as GCM (Firebase Cloud Messaging) and APNS. The +// results for ListEndpointsByPlatformApplication are paginated and return a +// limited list of endpoints, up to 100. If additional records are available +// after the first page results, then a NextToken string will be returned. To +// receive the next page, you call ListEndpointsByPlatformApplication again +// using the NextToken string received from the previous call. When there are +// no more records to return, NextToken will be null. For more information, // see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // // This action is throttled at 30 transactions per second (TPS). @@ -1647,13 +1658,13 @@ func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInp // ListPlatformApplications API operation for Amazon Simple Notification Service. // // Lists the platform application objects for the supported push notification -// services, such as APNS and FCM. The results for ListPlatformApplications -// are paginated and return a limited list of applications, up to 100. If additional -// records are available after the first page results, then a NextToken string -// will be returned. To receive the next page, you call ListPlatformApplications -// using the NextToken string received from the previous call. When there are -// no more records to return, NextToken will be null. For more information, -// see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// services, such as APNS and GCM (Firebase Cloud Messaging). The results for +// ListPlatformApplications are paginated and return a limited list of applications, +// up to 100. If additional records are available after the first page results, +// then a NextToken string will be returned. To receive the next page, you call +// ListPlatformApplications using the NextToken string received from the previous +// call. When there are no more records to return, NextToken will be null. For +// more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // // This action is throttled at 15 transactions per second (TPS). // @@ -2426,8 +2437,9 @@ func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output // Publish API operation for Amazon Simple Notification Service. // -// Sends a message to an Amazon SNS topic or sends a text message (SMS message) -// directly to a phone number. +// Sends a message to an Amazon SNS topic, a text message (SMS message) directly +// to a phone number, or a message to a mobile platform endpoint (when you specify +// the TargetArn). // // If you send a message to a topic, Amazon SNS delivers the message to each // endpoint that is subscribed to the topic. The format of the message depends @@ -2444,6 +2456,8 @@ func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output // For more information about formatting messages, see Send Custom Platform-Specific // Payloads in Messages to Mobile Devices (https://docs.aws.amazon.com/sns/latest/dg/mobile-push-send-custommessage.html). // +// You can publish messages only to topics and endpoints in the same AWS Region. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -2660,8 +2674,8 @@ func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (r // SetEndpointAttributes API operation for Amazon Simple Notification Service. // // Sets the attributes for an endpoint for a device on one of the supported -// push notification services, such as FCM and APNS. For more information, see -// Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// push notification services, such as GCM (Firebase Cloud Messaging) and APNS. +// For more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2751,8 +2765,8 @@ func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicat // SetPlatformApplicationAttributes API operation for Amazon Simple Notification Service. // // Sets the attributes of the platform application object for the supported -// push notification services, such as APNS and FCM. For more information, see -// Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// push notification services, such as APNS and GCM (Firebase Cloud Messaging). +// For more information, see Using Amazon SNS Mobile Push Notifications (https://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). // For information on configuring attributes for message delivery status, see // Using Amazon SNS Application Attributes for Message Delivery Status (https://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html). // @@ -3126,10 +3140,12 @@ func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, out // Subscribe API operation for Amazon Simple Notification Service. // -// Prepares to subscribe an endpoint by sending the endpoint a confirmation -// message. To actually create a subscription, the endpoint owner must call -// the ConfirmSubscription action with the token from the confirmation message. -// Confirmation tokens are valid for three days. +// Subscribes an endpoint to an Amazon SNS topic. If the endpoint type is HTTP/S +// or email, or if the endpoint and the topic are not in the same AWS account, +// the endpoint owner must the ConfirmSubscription action to confirm the subscription. +// +// You call the ConfirmSubscription action with the token from the subscription +// response. Confirmation tokens are valid for three days. // // This action is throttled at 100 transactions per second (TPS). // @@ -3782,7 +3798,7 @@ type CreatePlatformApplicationInput struct { Name *string `type:"string" required:"true"` // The following platforms are supported: ADM (Amazon Device Messaging), APNS - // (Apple Push Notification Service), APNS_SANDBOX, and FCM (Firebase Cloud + // (Apple Push Notification Service), APNS_SANDBOX, and GCM (Firebase Cloud // Messaging). // // Platform is a required field @@ -3880,8 +3896,9 @@ type CreatePlatformEndpointInput struct { // Unique identifier created by the notification service for an app on a device. // The specific name for Token will vary, depending on which notification service // is being used. For example, when using APNS as the notification service, - // you need the device token. Alternatively, when using FCM or ADM, the device - // token equivalent is called the registration ID. + // you need the device token. Alternatively, when using GCM (Firebase Cloud + // Messaging) or ADM, the device token equivalent is called the registration + // ID. // // Token is a required field Token *string `type:"string" required:"true"` @@ -4531,6 +4548,8 @@ type GetSubscriptionAttributesOutput struct { // account system defaults. // // * FilterPolicy – The filter policy JSON that is assigned to the subscription. + // For more information, see Amazon SNS Message Filtering (https://docs.aws.amazon.com/sns/latest/dg/sns-message-filtering.html) + // in the Amazon SNS Developer Guide. // // * Owner – The AWS account ID of the subscription's owner. // @@ -4636,7 +4655,7 @@ type GetTopicAttributesOutput struct { // // * TopicArn – The topic's ARN. // - // * EffectiveDeliveryPolicy – Yhe JSON serialization of the effective + // * EffectiveDeliveryPolicy – The JSON serialization of the effective // delivery policy, taking system defaults into account. // // The following attribute applies only to server-side-encryption (https://docs.aws.amazon.com/sns/latest/dg/sns-server-side-encryption.html): @@ -5641,27 +5660,27 @@ type SetPlatformApplicationAttributesInput struct { // the following: // // * PlatformCredential – The credential received from the notification - // service. For APNS/APNS_SANDBOX, PlatformCredential is private key. For - // FCM, PlatformCredential is "API key". For ADM, PlatformCredential is "client - // secret". + // service. For APNS and APNS_SANDBOX, PlatformCredential is private key. + // For GCM (Firebase Cloud Messaging), PlatformCredential is API key. For + // ADM, PlatformCredential is client secret. // // * PlatformPrincipal – The principal received from the notification service. - // For APNS/APNS_SANDBOX, PlatformPrincipal is SSL certificate. For FCM, - // PlatformPrincipal is not applicable. For ADM, PlatformPrincipal is "client - // id". + // For APNS and APNS_SANDBOX, PlatformPrincipal is SSL certificate. For GCM + // (Firebase Cloud Messaging), there is no PlatformPrincipal. For ADM, PlatformPrincipal + // is client id. // // * EventEndpointCreated – Topic ARN to which EndpointCreated event notifications - // should be sent. + // are sent. // // * EventEndpointDeleted – Topic ARN to which EndpointDeleted event notifications - // should be sent. + // are sent. // // * EventEndpointUpdated – Topic ARN to which EndpointUpdate event notifications - // should be sent. + // are sent. // // * EventDeliveryFailure – Topic ARN to which DeliveryFailure event notifications - // should be sent upon Direct Publish delivery failure (permanent) to one - // of the application's endpoints. + // are sent upon Direct Publish delivery failure (permanent) to one of the + // application's endpoints. // // * SuccessFeedbackRoleArn – IAM role ARN used to give Amazon SNS write // access to use CloudWatch Logs on your behalf. @@ -6083,9 +6102,11 @@ type SubscribeInput struct { // The endpoint that you want to receive notifications. Endpoints vary by protocol: // - // * For the http protocol, the endpoint is an URL beginning with http:// + // * For the http protocol, the (public) endpoint is a URL beginning with + // http:// // - // * For the https protocol, the endpoint is a URL beginning with https:// + // * For the https protocol, the (public) endpoint is a URL beginning with + // https:// // // * For the email protocol, the endpoint is an email address // @@ -6128,16 +6149,12 @@ type SubscribeInput struct { // Sets whether the response from the Subscribe request includes the subscription // ARN, even if the subscription is not yet confirmed. // - // * If you have the subscription ARN returned, the response includes the - // ARN in all cases, even if the subscription is not yet confirmed. - // - // * If you don't have the subscription ARN returned, in addition to the - // ARN for confirmed subscriptions, the response also includes the pending - // subscription ARN value for subscriptions that aren't yet confirmed. A - // subscription becomes confirmed when the subscriber calls the ConfirmSubscription - // action with a confirmation token. - // - // If you set this parameter to true, . + // If you set this parameter to true, the response includes the ARN in all cases, + // even if the subscription is not yet confirmed. In addition to the ARN for + // confirmed subscriptions, the response also includes the pending subscription + // ARN value for subscriptions that aren't yet confirmed. A subscription becomes + // confirmed when the subscriber calls the ConfirmSubscription action with a + // confirmation token. // // The default value is false. ReturnSubscriptionArn *bool `type:"boolean"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go index f5aa06567..21b616043 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go index bc087b5b6..45285d0a1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -81,9 +81,9 @@ func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Requ // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Cross-account permissions don't apply to this action. For more information, // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) @@ -316,9 +316,9 @@ func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibility // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -407,7 +407,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // CreateQueue API operation for Amazon Simple Queue Service. // // Creates a new standard or FIFO queue. You can pass one or more attributes -// in the request. Keep the following caveats in mind: +// in the request. Keep the following in mind: // // * If you don't specify the FifoQueue attribute, Amazon SQS creates a standard // queue. You can't change the queue type after you create it and you can't @@ -427,6 +427,9 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // to the limits related to queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-queues.html) // and is unique within the scope of your queues. // +// After you create a queue, you must wait at least one second after the queue +// is created to be able to use the queue. +// // To get the queue URL, use the GetQueueUrl action. GetQueueUrl requires only // the QueueName parameter. be aware of existing queue names: // @@ -441,9 +444,9 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Cross-account permissions don't apply to this action. For more information, // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) @@ -646,9 +649,9 @@ func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *re // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -738,7 +741,6 @@ func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, // DeleteQueue API operation for Amazon Simple Queue Service. // // Deletes the queue specified by the QueueUrl, regardless of the queue's contents. -// If the specified queue doesn't exist, Amazon SQS returns a successful response. // // Be careful with the DeleteQueue action: When you delete a queue, any messages // in the queue are no longer available. @@ -832,14 +834,6 @@ func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *re // To determine whether a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // -// Some actions take lists of parameters. These lists are specified using the -// param.n notation. Values of n are integers starting from 1. For example, -// a parameter list with two elements looks like this: -// -// &Attribute.1=first -// -// &Attribute.2=second -// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -990,6 +984,12 @@ func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueue Name: opListDeadLetterSourceQueues, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1006,6 +1006,14 @@ func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueue // Returns a list of your queues that have the RedrivePolicy queue attribute // configured with a dead-letter queue. // +// The ListDeadLetterSourceQueues methods supports pagination. Set parameter +// MaxResults in the request to specify the maximum number of results to be +// returned in the response. If you do not set MaxResults, the response includes +// a maximum of 1,000 results. If you set MaxResults and there are additional +// results to display, the response includes a value for NextToken. Use NextToken +// as a parameter in your next request to ListDeadLetterSourceQueues to receive +// the next page of results. +// // For more information about using dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. @@ -1043,6 +1051,58 @@ func (c *SQS) ListDeadLetterSourceQueuesWithContext(ctx aws.Context, input *List return out, req.Send() } +// ListDeadLetterSourceQueuesPages iterates over the pages of a ListDeadLetterSourceQueues operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeadLetterSourceQueues method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeadLetterSourceQueues operation. +// pageNum := 0 +// err := client.ListDeadLetterSourceQueuesPages(params, +// func(page *sqs.ListDeadLetterSourceQueuesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SQS) ListDeadLetterSourceQueuesPages(input *ListDeadLetterSourceQueuesInput, fn func(*ListDeadLetterSourceQueuesOutput, bool) bool) error { + return c.ListDeadLetterSourceQueuesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDeadLetterSourceQueuesPagesWithContext same as ListDeadLetterSourceQueuesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) ListDeadLetterSourceQueuesPagesWithContext(ctx aws.Context, input *ListDeadLetterSourceQueuesInput, fn func(*ListDeadLetterSourceQueuesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDeadLetterSourceQueuesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDeadLetterSourceQueuesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDeadLetterSourceQueuesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListQueueTags = "ListQueueTags" // ListQueueTagsRequest generates a "aws/request.Request" representing the @@ -1154,6 +1214,12 @@ func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, o Name: opListQueues, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -1167,9 +1233,17 @@ func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, o // ListQueues API operation for Amazon Simple Queue Service. // -// Returns a list of your queues. The maximum number of queues that can be returned -// is 1,000. If you specify a value for the optional QueueNamePrefix parameter, -// only queues with a name that begins with the specified value are returned. +// Returns a list of your queues in the current region. The response includes +// a maximum of 1,000 results. If you specify a value for the optional QueueNamePrefix +// parameter, only queues with a name that begins with the specified value are +// returned. +// +// The listQueues methods supports pagination. Set parameter MaxResults in the +// request to specify the maximum number of results to be returned in the response. +// If you do not set MaxResults, the response includes a maximum of 1,000 results. +// If you set MaxResults and there are additional results to display, the response +// includes a value for NextToken. Use NextToken as a parameter in your next +// request to listQueues to receive the next page of results. // // Cross-account permissions don't apply to this action. For more information, // see Grant Cross-Account Permissions to a Role and a User Name (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-customer-managed-policy-examples.html#grant-cross-account-permissions-to-role-and-user-name) @@ -1203,6 +1277,58 @@ func (c *SQS) ListQueuesWithContext(ctx aws.Context, input *ListQueuesInput, opt return out, req.Send() } +// ListQueuesPages iterates over the pages of a ListQueues operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListQueues method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListQueues operation. +// pageNum := 0 +// err := client.ListQueuesPages(params, +// func(page *sqs.ListQueuesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SQS) ListQueuesPages(input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool) error { + return c.ListQueuesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListQueuesPagesWithContext same as ListQueuesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SQS) ListQueuesPagesWithContext(ctx aws.Context, input *ListQueuesInput, fn func(*ListQueuesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListQueuesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListQueuesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListQueuesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opPurgeQueue = "PurgeQueue" // PurgeQueueRequest generates a "aws/request.Request" representing the @@ -1676,9 +1802,9 @@ func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *reques // param.n notation. Values of n are integers starting from 1. For example, // a parameter list with two elements looks like this: // -// &Attribute.1=first +// &AttributeName.1=first // -// &Attribute.2=second +// &AttributeName.2=second // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2129,7 +2255,7 @@ type BatchResultErrorEntry struct { // A message explaining why the action failed on this entry. Message *string `type:"string"` - // Specifies whether the error happened due to the producer. + // Specifies whether the error happened due to the caller of the batch API action. // // SenderFault is a required field SenderFault *bool `type:"boolean" required:"true"` @@ -2290,7 +2416,10 @@ type ChangeMessageVisibilityBatchRequestEntry struct { // An identifier for this particular receipt handle used to communicate the // result. // - // The Ids of a batch request need to be unique within a request + // The Ids of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // Id is a required field Id *string `type:"string" required:"true"` @@ -2390,7 +2519,7 @@ type ChangeMessageVisibilityInput struct { // ReceiptHandle is a required field ReceiptHandle *string `type:"string" required:"true"` - // The new value for the message's visibility timeout (in seconds). Values values: + // The new value for the message's visibility timeout (in seconds). Values range: // 0 to 43200. Maximum: 12 hours. // // VisibilityTimeout is a required field @@ -2466,41 +2595,41 @@ type CreateQueueInput struct { // The following lists the names, descriptions, and values of the special request // parameters that the CreateQueue action uses: // - // * DelaySeconds - The length of time, in seconds, for which the delivery + // * DelaySeconds – The length of time, in seconds, for which the delivery // of all messages in the queue is delayed. Valid values: An integer from // 0 to 900 seconds (15 minutes). Default: 0. // - // * MaximumMessageSize - The limit of how many bytes a message can contain + // * MaximumMessageSize – The limit of how many bytes a message can contain // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes // (1 KiB) to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). // - // * MessageRetentionPeriod - The length of time, in seconds, for which Amazon - // SQS retains a message. Valid values: An integer from 60 seconds (1 minute) - // to 1,209,600 seconds (14 days). Default: 345,600 (4 days). + // * MessageRetentionPeriod – The length of time, in seconds, for which + // Amazon SQS retains a message. Valid values: An integer from 60 seconds + // (1 minute) to 1,209,600 seconds (14 days). Default: 345,600 (4 days). // - // * Policy - The queue's policy. A valid AWS policy. For more information + // * Policy – The queue's policy. A valid AWS policy. For more information // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // - // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for + // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for // which a ReceiveMessage action waits for a message to arrive. Valid values: // An integer from 0 to 20 (seconds). Default: 0. // - // * RedrivePolicy - The string that includes the parameters for the dead-letter - // queue functionality of the source queue. For more information about the - // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter - // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // * RedrivePolicy – The string that includes the parameters for the dead-letter + // queue functionality of the source queue as a JSON object. For more information + // about the redrive policy and dead-letter queues, see Using Amazon SQS + // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // - The number of times a message is delivered to the source queue before + // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also // be a FIFO queue. Similarly, the dead-letter queue of a standard queue // must also be a standard queue. // - // * VisibilityTimeout - The visibility timeout for the queue, in seconds. + // * VisibilityTimeout – The visibility timeout for the queue, in seconds. // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) @@ -2508,15 +2637,15 @@ type CreateQueueInput struct { // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) + // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, // the alias of a custom CMK can, for example, be alias/MyAlias . For more // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which - // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) + // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for + // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds // (24 hours). Default: 300 (5 minutes). A shorter time period provides better @@ -2526,15 +2655,15 @@ type CreateQueueInput struct { // // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * FifoQueue - Designates a queue as FIFO. Valid values: true, false. If - // you don't specify the FifoQueue attribute, Amazon SQS creates a standard + // * FifoQueue – Designates a queue as FIFO. Valid values: true, false. + // If you don't specify the FifoQueue attribute, Amazon SQS creates a standard // queue. You can provide this attribute only during queue creation. You // can't change it for an existing queue. When you set this attribute, you // must also provide the MessageGroupId for your messages explicitly. For // more information, see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. // - // * ContentBasedDeduplication - Enables content-based deduplication. Valid + // * ContentBasedDeduplication – Enables content-based deduplication. Valid // values: true, false. For more information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. Every message must @@ -2771,7 +2900,10 @@ type DeleteMessageBatchRequestEntry struct { // An identifier for this particular receipt handle. This is used to communicate // the result. // - // The Ids of a batch request need to be unique within a request + // The Ids of a batch request need to be unique within a request. + // + // This identifier can have up to 80 characters. The following characters are + // accepted: alphanumeric characters, hyphens(-), and underscores (_). // // Id is a required field Id *string `type:"string" required:"true"` @@ -2979,79 +3111,84 @@ type GetQueueAttributesInput struct { // // The following attributes are supported: // - // * All - Returns all values. + // The ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // and ApproximateNumberOfMessagesVisible metrics may not achieve consistency + // until at least 1 minute after the producers stop sending messages. This period + // is required for the queue metadata to reach eventual consistency. + // + // * All – Returns all values. // - // * ApproximateNumberOfMessages - Returns the approximate number of messages + // * ApproximateNumberOfMessages – Returns the approximate number of messages // available for retrieval from the queue. // - // * ApproximateNumberOfMessagesDelayed - Returns the approximate number + // * ApproximateNumberOfMessagesDelayed – Returns the approximate number // of messages in the queue that are delayed and not available for reading // immediately. This can happen when the queue is configured as a delay queue // or when a message has been sent with a delay parameter. // - // * ApproximateNumberOfMessagesNotVisible - Returns the approximate number + // * ApproximateNumberOfMessagesNotVisible – Returns the approximate number // of messages that are in flight. Messages are considered to be in flight // if they have been sent to a client but have not yet been deleted or have // not yet reached the end of their visibility window. // - // * CreatedTimestamp - Returns the time when the queue was created in seconds - // (epoch time (http://en.wikipedia.org/wiki/Unix_time)). + // * CreatedTimestamp – Returns the time when the queue was created in + // seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)). // - // * DelaySeconds - Returns the default delay on the queue in seconds. + // * DelaySeconds – Returns the default delay on the queue in seconds. // - // * LastModifiedTimestamp - Returns the time when the queue was last changed + // * LastModifiedTimestamp – Returns the time when the queue was last changed // in seconds (epoch time (http://en.wikipedia.org/wiki/Unix_time)). // - // * MaximumMessageSize - Returns the limit of how many bytes a message can - // contain before Amazon SQS rejects it. + // * MaximumMessageSize – Returns the limit of how many bytes a message + // can contain before Amazon SQS rejects it. // - // * MessageRetentionPeriod - Returns the length of time, in seconds, for + // * MessageRetentionPeriod – Returns the length of time, in seconds, for // which Amazon SQS retains a message. // - // * Policy - Returns the policy of the queue. + // * Policy – Returns the policy of the queue. // - // * QueueArn - Returns the Amazon resource name (ARN) of the queue. + // * QueueArn – Returns the Amazon resource name (ARN) of the queue. // - // * ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, + // * ReceiveMessageWaitTimeSeconds – Returns the length of time, in seconds, // for which the ReceiveMessage action waits for a message to arrive. // - // * RedrivePolicy - Returns the string that includes the parameters for - // dead-letter queue functionality of the source queue. For more information + // * RedrivePolicy – The string that includes the parameters for the dead-letter + // queue functionality of the source queue as a JSON object. For more information // about the redrive policy and dead-letter queues, see Using Amazon SQS // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // - The number of times a message is delivered to the source queue before + // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message // to the dead-letter-queue. // - // * VisibilityTimeout - Returns the visibility timeout for the queue. For - // more information about the visibility timeout, see Visibility Timeout + // * VisibilityTimeout – Returns the visibility timeout for the queue. + // For more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId - Returns the ID of an AWS-managed customer master key - // (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). + // * KmsMasterKeyId – Returns the ID of an AWS-managed customer master + // key (CMK) for Amazon SQS or a custom CMK. For more information, see Key + // Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // - // * KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, + // * KmsDataKeyReusePeriodSeconds – Returns the length of time, in seconds, // for which Amazon SQS can reuse a data key to encrypt or decrypt messages // before calling AWS KMS again. For more information, see How Does the Data // Key Reuse Period Work? (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-how-does-the-data-key-reuse-period-work). // // The following attributes apply only to FIFO (first-in-first-out) queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * FifoQueue - Returns whether the queue is FIFO. For more information, + // * FifoQueue – Returns whether the queue is FIFO. For more information, // see FIFO Queue Logic (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-understanding-logic) // in the Amazon Simple Queue Service Developer Guide. To determine whether // a queue is FIFO (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html), // you can check whether QueueName ends with the .fifo suffix. // - // * ContentBasedDeduplication - Returns whether content-based deduplication + // * ContentBasedDeduplication – Returns whether content-based deduplication // is enabled for the queue. For more information, see Exactly-Once Processing // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. @@ -3202,6 +3339,13 @@ func (s *GetQueueUrlOutput) SetQueueUrl(v string) *GetQueueUrlOutput { type ListDeadLetterSourceQueuesInput struct { _ struct{} `type:"structure"` + // Maximum number of results to include in the response. Value range is 1 to + // 1000. You must set MaxResults to receive a value for NextToken in the response. + MaxResults *int64 `type:"integer"` + + // Pagination token to request the next set of results. + NextToken *string `type:"string"` + // The URL of a dead-letter queue. // // Queue URLs and names are case-sensitive. @@ -3233,6 +3377,18 @@ func (s *ListDeadLetterSourceQueuesInput) Validate() error { return nil } +// SetMaxResults sets the MaxResults field's value. +func (s *ListDeadLetterSourceQueuesInput) SetMaxResults(v int64) *ListDeadLetterSourceQueuesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListDeadLetterSourceQueuesInput) SetNextToken(v string) *ListDeadLetterSourceQueuesInput { + s.NextToken = &v + return s +} + // SetQueueUrl sets the QueueUrl field's value. func (s *ListDeadLetterSourceQueuesInput) SetQueueUrl(v string) *ListDeadLetterSourceQueuesInput { s.QueueUrl = &v @@ -3243,6 +3399,11 @@ func (s *ListDeadLetterSourceQueuesInput) SetQueueUrl(v string) *ListDeadLetterS type ListDeadLetterSourceQueuesOutput struct { _ struct{} `type:"structure"` + // Pagination token to include in the next request. Token value is null if there + // are no additional results to request, or if you did not set MaxResults in + // the request. + NextToken *string `type:"string"` + // A list of source queue URLs that have the RedrivePolicy queue attribute configured // with a dead-letter queue. // @@ -3260,6 +3421,12 @@ func (s ListDeadLetterSourceQueuesOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *ListDeadLetterSourceQueuesOutput) SetNextToken(v string) *ListDeadLetterSourceQueuesOutput { + s.NextToken = &v + return s +} + // SetQueueUrls sets the QueueUrls field's value. func (s *ListDeadLetterSourceQueuesOutput) SetQueueUrls(v []*string) *ListDeadLetterSourceQueuesOutput { s.QueueUrls = v @@ -3330,6 +3497,13 @@ func (s *ListQueueTagsOutput) SetTags(v map[string]*string) *ListQueueTagsOutput type ListQueuesInput struct { _ struct{} `type:"structure"` + // Maximum number of results to include in the response. Value range is 1 to + // 1000. You must set MaxResults to receive a value for NextToken in the response. + MaxResults *int64 `type:"integer"` + + // Pagination token to request the next set of results. + NextToken *string `type:"string"` + // A string to use for filtering the list results. Only those queues whose name // begins with the specified string are returned. // @@ -3347,6 +3521,18 @@ func (s ListQueuesInput) GoString() string { return s.String() } +// SetMaxResults sets the MaxResults field's value. +func (s *ListQueuesInput) SetMaxResults(v int64) *ListQueuesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListQueuesInput) SetNextToken(v string) *ListQueuesInput { + s.NextToken = &v + return s +} + // SetQueueNamePrefix sets the QueueNamePrefix field's value. func (s *ListQueuesInput) SetQueueNamePrefix(v string) *ListQueuesInput { s.QueueNamePrefix = &v @@ -3357,7 +3543,13 @@ func (s *ListQueuesInput) SetQueueNamePrefix(v string) *ListQueuesInput { type ListQueuesOutput struct { _ struct{} `type:"structure"` - // A list of queue URLs, up to 1,000 entries. + // Pagination token to include in the next request. Token value is null if there + // are no additional results to request, or if you did not set MaxResults in + // the request. + NextToken *string `type:"string"` + + // A list of queue URLs, up to 1,000 entries, or the value of MaxResults that + // you sent in the request. QueueUrls []*string `locationNameList:"QueueUrl" type:"list" flattened:"true"` } @@ -3371,6 +3563,12 @@ func (s ListQueuesOutput) GoString() string { return s.String() } +// SetNextToken sets the NextToken field's value. +func (s *ListQueuesOutput) SetNextToken(v string) *ListQueuesOutput { + s.NextToken = &v + return s +} + // SetQueueUrls sets the QueueUrls field's value. func (s *ListQueuesOutput) SetQueueUrls(v []*string) *ListQueuesOutput { s.QueueUrls = v @@ -3416,7 +3614,7 @@ type Message struct { MD5OfMessageAttributes *string `type:"string"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` @@ -3505,7 +3703,7 @@ type MessageAttributeValue struct { // Binary. For the Number data type, you must use StringValue. // // You can also append custom labels. For more information, see Amazon SQS Message - // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // in the Amazon Simple Queue Service Developer Guide. // // DataType is a required field @@ -3593,7 +3791,7 @@ type MessageSystemAttributeValue struct { // Binary. For the Number data type, you must use StringValue. // // You can also append custom labels. For more information, see Amazon SQS Message - // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // in the Amazon Simple Queue Service Developer Guide. // // DataType is a required field @@ -3720,31 +3918,31 @@ type ReceiveMessageInput struct { // A list of attributes that need to be returned along with each message. These // attributes include: // - // * All - Returns all values. + // * All – Returns all values. // - // * ApproximateFirstReceiveTimestamp - Returns the time the message was + // * ApproximateFirstReceiveTimestamp – Returns the time the message was // first received from the queue (epoch time (http://en.wikipedia.org/wiki/Unix_time) // in milliseconds). // - // * ApproximateReceiveCount - Returns the number of times a message has - // been received from the queue but not deleted. + // * ApproximateReceiveCount – Returns the number of times a message has + // been received across all queues but not deleted. // - // * AWSTraceHeader - Returns the AWS X-Ray trace header string. + // * AWSTraceHeader – Returns the AWS X-Ray trace header string. // // * SenderId For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R. // For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456. // - // * SentTimestamp - Returns the time the message was sent to the queue (epoch - // time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). + // * SentTimestamp – Returns the time the message was sent to the queue + // (epoch time (http://en.wikipedia.org/wiki/Unix_time) in milliseconds). // - // * MessageDeduplicationId - Returns the value provided by the producer + // * MessageDeduplicationId – Returns the value provided by the producer // that calls the SendMessage action. // - // * MessageGroupId - Returns the value provided by the producer that calls + // * MessageGroupId – Returns the value provided by the producer that calls // the SendMessage action. Messages with the same MessageGroupId are returned // in sequence. // - // * SequenceNumber - Returns the value provided by Amazon SQS. + // * SequenceNumber – Returns the value provided by Amazon SQS. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // The maximum number of messages to return. Amazon SQS never returns more messages @@ -3785,9 +3983,9 @@ type ReceiveMessageInput struct { // // The token used for deduplication of ReceiveMessage calls. If a networking // issue occurs after a ReceiveMessage action, and instead of a response you - // receive a generic error, you can retry the same action with an identical - // ReceiveRequestAttemptId to retrieve the same set of messages, even if their - // visibility timeout has not yet expired. + // receive a generic error, it is possible to retry the same action with an + // identical ReceiveRequestAttemptId to retrieve the same set of messages, even + // if their visibility timeout has not yet expired. // // * You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage // action. @@ -3798,7 +3996,7 @@ type ReceiveMessageInput struct { // * If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId, // Amazon SQS generates a ReceiveRequestAttemptId. // - // * You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId + // * It is possible to retry the ReceiveMessage action with the same ReceiveRequestAttemptId // if none of the messages have been modified (deleted or had their visibility // changes). // @@ -3825,7 +4023,7 @@ type ReceiveMessageInput struct { // no retries work until the original visibility timeout expires. As a result, // delays might occur but the messages in the queue remain in a strict order. // - // The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId + // The maximum length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using ReceiveRequestAttemptId, see Using the ReceiveRequestAttemptId @@ -3841,6 +4039,13 @@ type ReceiveMessageInput struct { // in the queue before returning. If a message is available, the call returns // sooner than WaitTimeSeconds. If no messages are available and the wait time // expires, the call returns successfully with an empty list of messages. + // + // To avoid HTTP errors, ensure that the HTTP response timeout for ReceiveMessage + // requests is longer than the WaitTimeSeconds parameter. For example, with + // the Java SDK, you can set HTTP transport settings using the NettyNioAsyncHttpClient + // (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/nio/netty/NettyNioAsyncHttpClient.html) + // for asynchronous clients, or the ApacheHttpClient (https://sdk.amazonaws.com/java/api/latest/software/amazon/awssdk/http/apache/ApacheHttpClient.html) + // for synchronous clients. WaitTimeSeconds *int64 `type:"integer"` } @@ -4121,7 +4326,7 @@ type SendMessageBatchRequestEntry struct { // An identifier for a message in this batch used to communicate the result. // - // The Ids of a batch request need to be unique within a request + // The Ids of a batch request need to be unique within a request. // // This identifier can have up to 80 characters. The following characters are // accepted: alphanumeric characters, hyphens(-), and underscores (_). @@ -4130,7 +4335,7 @@ type SendMessageBatchRequestEntry struct { Id *string `type:"string" required:"true"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` @@ -4216,7 +4421,7 @@ type SendMessageBatchRequestEntry struct { // // * Currently, the only supported message system attribute is AWSTraceHeader. // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace string. + // X-Ray trace header string. // // * The size of a message system attribute doesn't count towards the total // size of a message. @@ -4413,11 +4618,12 @@ type SendMessageInput struct { DelaySeconds *int64 `type:"integer"` // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // see Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // in the Amazon Simple Queue Service Developer Guide. MessageAttributes map[string]*MessageAttributeValue `locationName:"MessageAttribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - // The message to send. The maximum string size is 256 KB. + // The message to send. The minimum size is one character. The maximum size + // is 256 KB. // // A message can include only XML, JSON, and unformatted text. The following // Unicode characters are allowed: @@ -4467,7 +4673,7 @@ type SendMessageInput struct { // Amazon SQS continues to keep track of the message deduplication ID even after // the message is received and deleted. // - // The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId + // The maximum length of MessageDeduplicationId is 128 characters. MessageDeduplicationId // can contain alphanumeric characters (a-z, A-Z, 0-9) and punctuation (!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~). // // For best practices of using MessageDeduplicationId, see Using the MessageDeduplicationId @@ -4508,7 +4714,7 @@ type SendMessageInput struct { // // * Currently, the only supported message system attribute is AWSTraceHeader. // Its type must be String and its value must be a correctly formatted AWS - // X-Ray trace string. + // X-Ray trace header string. // // * The size of a message system attribute doesn't count towards the total // size of a message. @@ -4693,57 +4899,57 @@ type SetQueueAttributesInput struct { // The following lists the names, descriptions, and values of the special request // parameters that the SetQueueAttributes action uses: // - // * DelaySeconds - The length of time, in seconds, for which the delivery + // * DelaySeconds – The length of time, in seconds, for which the delivery // of all messages in the queue is delayed. Valid values: An integer from // 0 to 900 (15 minutes). Default: 0. // - // * MaximumMessageSize - The limit of how many bytes a message can contain + // * MaximumMessageSize – The limit of how many bytes a message can contain // before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes // (1 KiB) up to 262,144 bytes (256 KiB). Default: 262,144 (256 KiB). // - // * MessageRetentionPeriod - The length of time, in seconds, for which Amazon - // SQS retains a message. Valid values: An integer representing seconds, + // * MessageRetentionPeriod – The length of time, in seconds, for which + // Amazon SQS retains a message. Valid values: An integer representing seconds, // from 60 (1 minute) to 1,209,600 (14 days). Default: 345,600 (4 days). // - // * Policy - The queue's policy. A valid AWS policy. For more information + // * Policy – The queue's policy. A valid AWS policy. For more information // about policy structure, see Overview of AWS IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) // in the Amazon IAM User Guide. // - // * ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for + // * ReceiveMessageWaitTimeSeconds – The length of time, in seconds, for // which a ReceiveMessage action waits for a message to arrive. Valid values: - // an integer from 0 to 20 (seconds). Default: 0. + // An integer from 0 to 20 (seconds). Default: 0. // - // * RedrivePolicy - The string that includes the parameters for the dead-letter - // queue functionality of the source queue. For more information about the - // redrive policy and dead-letter queues, see Using Amazon SQS Dead-Letter - // Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) + // * RedrivePolicy – The string that includes the parameters for the dead-letter + // queue functionality of the source queue as a JSON object. For more information + // about the redrive policy and dead-letter queues, see Using Amazon SQS + // Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // in the Amazon Simple Queue Service Developer Guide. deadLetterTargetArn - // - The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon + // – The Amazon Resource Name (ARN) of the dead-letter queue to which Amazon // SQS moves messages after the value of maxReceiveCount is exceeded. maxReceiveCount - // - The number of times a message is delivered to the source queue before + // – The number of times a message is delivered to the source queue before // being moved to the dead-letter queue. When the ReceiveCount for a message // exceeds the maxReceiveCount for a queue, Amazon SQS moves the message // to the dead-letter-queue. The dead-letter queue of a FIFO queue must also // be a FIFO queue. Similarly, the dead-letter queue of a standard queue // must also be a standard queue. // - // * VisibilityTimeout - The visibility timeout for the queue, in seconds. - // Valid values: an integer from 0 to 43,200 (12 hours). Default: 30. For + // * VisibilityTimeout – The visibility timeout for the queue, in seconds. + // Valid values: An integer from 0 to 43,200 (12 hours). Default: 30. For // more information about the visibility timeout, see Visibility Timeout // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) // in the Amazon Simple Queue Service Developer Guide. // // The following attributes apply only to server-side-encryption (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html): // - // * KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) + // * KmsMasterKeyId – The ID of an AWS-managed customer master key (CMK) // for Amazon SQS or a custom CMK. For more information, see Key Terms (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-server-side-encryption.html#sqs-sse-key-terms). // While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs, // the alias of a custom CMK can, for example, be alias/MyAlias . For more // examples, see KeyId (https://docs.aws.amazon.com/kms/latest/APIReference/API_DescribeKey.html#API_DescribeKey_RequestParameters) // in the AWS Key Management Service API Reference. // - // * KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which - // Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) + // * KmsDataKeyReusePeriodSeconds – The length of time, in seconds, for + // which Amazon SQS can reuse a data key (https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#data-keys) // to encrypt or decrypt messages before calling AWS KMS again. An integer // representing seconds, between 60 seconds (1 minute) and 86,400 seconds // (24 hours). Default: 300 (5 minutes). A shorter time period provides better @@ -4754,7 +4960,7 @@ type SetQueueAttributesInput struct { // The following attribute applies only to FIFO (first-in-first-out) queues // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html): // - // * ContentBasedDeduplication - Enables content-based deduplication. For + // * ContentBasedDeduplication – Enables content-based deduplication. For // more information, see Exactly-Once Processing (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing) // in the Amazon Simple Queue Service Developer Guide. Every message must // have a unique MessageDeduplicationId, You may provide a MessageDeduplicationId @@ -4993,11 +5199,32 @@ const ( MessageSystemAttributeNameAwstraceHeader = "AWSTraceHeader" ) +// MessageSystemAttributeName_Values returns all elements of the MessageSystemAttributeName enum +func MessageSystemAttributeName_Values() []string { + return []string{ + MessageSystemAttributeNameSenderId, + MessageSystemAttributeNameSentTimestamp, + MessageSystemAttributeNameApproximateReceiveCount, + MessageSystemAttributeNameApproximateFirstReceiveTimestamp, + MessageSystemAttributeNameSequenceNumber, + MessageSystemAttributeNameMessageDeduplicationId, + MessageSystemAttributeNameMessageGroupId, + MessageSystemAttributeNameAwstraceHeader, + } +} + const ( // MessageSystemAttributeNameForSendsAwstraceHeader is a MessageSystemAttributeNameForSends enum value MessageSystemAttributeNameForSendsAwstraceHeader = "AWSTraceHeader" ) +// MessageSystemAttributeNameForSends_Values returns all elements of the MessageSystemAttributeNameForSends enum +func MessageSystemAttributeNameForSends_Values() []string { + return []string{ + MessageSystemAttributeNameForSendsAwstraceHeader, + } +} + const ( // QueueAttributeNameAll is a QueueAttributeName enum value QueueAttributeNameAll = "All" @@ -5053,3 +5280,27 @@ const ( // QueueAttributeNameKmsDataKeyReusePeriodSeconds is a QueueAttributeName enum value QueueAttributeNameKmsDataKeyReusePeriodSeconds = "KmsDataKeyReusePeriodSeconds" ) + +// QueueAttributeName_Values returns all elements of the QueueAttributeName enum +func QueueAttributeName_Values() []string { + return []string{ + QueueAttributeNameAll, + QueueAttributeNamePolicy, + QueueAttributeNameVisibilityTimeout, + QueueAttributeNameMaximumMessageSize, + QueueAttributeNameMessageRetentionPeriod, + QueueAttributeNameApproximateNumberOfMessages, + QueueAttributeNameApproximateNumberOfMessagesNotVisible, + QueueAttributeNameCreatedTimestamp, + QueueAttributeNameLastModifiedTimestamp, + QueueAttributeNameQueueArn, + QueueAttributeNameApproximateNumberOfMessagesDelayed, + QueueAttributeNameDelaySeconds, + QueueAttributeNameReceiveMessageWaitTimeSeconds, + QueueAttributeNameRedrivePolicy, + QueueAttributeNameFifoQueue, + QueueAttributeNameContentBasedDeduplication, + QueueAttributeNameKmsMasterKeyId, + QueueAttributeNameKmsDataKeyReusePeriodSeconds, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go index 3a3f55f09..523b40ee3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/doc.go @@ -10,6 +10,10 @@ // Amazon SQS moves data between distributed application components and helps // you decouple these components. // +// For information on the permissions you need to use this API, see Identity +// and access management (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-authentication-and-access-control.html) +// in the Amazon Simple Queue Service Developer Guide. +// // You can use AWS SDKs (http://aws.amazon.com/tools/#sdk) to access Amazon // SQS using your favorite programming language. The SDKs perform tasks such // as the following automatically: @@ -25,7 +29,7 @@ // * Amazon SQS Product Page (http://aws.amazon.com/sqs/) // // * Amazon Simple Queue Service Developer Guide Making API Requests (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-making-api-requests.html) -// Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) +// Amazon SQS Message Attributes (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-metadata.html#sqs-message-attributes) // Amazon SQS Dead-Letter Queues (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-dead-letter-queues.html) // // * Amazon SQS in the AWS CLI Command Reference (http://docs.aws.amazon.com/cli/latest/reference/sqs/index.html) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go index 95721a423..f4f614207 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go index 989c4580f..da137dbd7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/api.go @@ -73,10 +73,11 @@ func (c *SSM) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *requ // We recommend that you devise a set of tag keys that meets your needs for // each resource type. Using a consistent set of tag keys makes it easier for // you to manage your resources. You can search and filter the resources based -// on the tags you add. Tags don't have any semantic meaning to Amazon EC2 and -// are interpreted strictly as a string of characters. +// on the tags you add. Tags don't have any semantic meaning to and are interpreted +// strictly as a string of characters. // -// For more information about tags, see Tagging Your Amazon EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) +// For more information about using tags with EC2 instances, see Tagging your +// Amazon EC2 resources (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) // in the Amazon EC2 User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -290,7 +291,7 @@ func (c *SSM) CancelMaintenanceWindowExecutionRequest(input *CancelMaintenanceWi // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/CancelMaintenanceWindowExecution @@ -365,12 +366,11 @@ func (c *SSM) CreateActivationRequest(input *CreateActivationInput) (req *reques // Systems Manager capabilities. You use the activation code and ID when installing // SSM Agent on machines in your hybrid environment. For more information about // requirements for managing on-premises instances and VMs using Systems Manager, -// see Setting Up AWS Systems Manager for Hybrid Environments (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html) +// see Setting up AWS Systems Manager for hybrid environments (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances.html) // in the AWS Systems Manager User Guide. // -// On-premises servers or VMs that are registered with Systems Manager and Amazon -// EC2 instances that you manage with Systems Manager are all called managed -// instances. +// On-premises servers or VMs that are registered with Systems Manager and EC2 +// instances that you manage with Systems Manager are all called managed instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -449,15 +449,18 @@ func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *requ // CreateAssociation API operation for Amazon Simple Systems Manager (SSM). // -// Associates the specified Systems Manager document with the specified instances -// or targets. -// -// When you associate a document with one or more instances using instance IDs -// or tags, SSM Agent running on the instance processes the document and configures -// the instance as specified. -// -// If you associate a document with an instance that already has an associated -// document, the system returns the AssociationAlreadyExists exception. +// A State Manager association defines the state that you want to maintain on +// your instances. For example, an association can specify that anti-virus software +// must be installed and running on your instances, or that certain ports must +// be closed. For static targets, the association specifies a schedule for when +// the configuration is reapplied. For dynamic targets, such as an AWS Resource +// Group or an AWS Autoscaling Group, State Manager applies the configuration +// when new instances are added to the group. The association also specifies +// actions to take when applying the configuration. For example, an association +// for anti-virus software might run once a day. If the software is not installed, +// then State Manager installs it. If the software is installed, but the service +// is not running, then the association might instruct State Manager to start +// the service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -508,7 +511,7 @@ func (c *SSM) CreateAssociationRequest(input *CreateAssociationInput) (req *requ // // * InvalidTarget // The target is not valid or does not exist. It might not be configured for -// EC2 Systems Manager or you might not have permission to perform the operation. +// Systems Manager or you might not have permission to perform the operation. // // * InvalidSchedule // The schedule is invalid. Verify your cron or rate expression and try again. @@ -638,7 +641,7 @@ func (c *SSM) CreateAssociationBatchRequest(input *CreateAssociationBatchInput) // // * InvalidTarget // The target is not valid or does not exist. It might not be configured for -// EC2 Systems Manager or you might not have permission to perform the operation. +// Systems Manager or you might not have permission to perform the operation. // // * InvalidSchedule // The schedule is invalid. Verify your cron or rate expression and try again. @@ -709,10 +712,11 @@ func (c *SSM) CreateDocumentRequest(input *CreateDocumentInput) (req *request.Re // CreateDocument API operation for Amazon Simple Systems Manager (SSM). // -// Creates a Systems Manager document. -// -// After you create a document, you can use CreateAssociation to associate it -// with one or more running instances. +// Creates a Systems Manager (SSM) document. An SSM document defines the actions +// that Systems Manager performs on your managed instances. For more information +// about SSM documents, including information about supported schemas, features, +// and syntax, see AWS Systems Manager Documents (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-ssm-docs.html) +// in the AWS Systems Manager User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -832,7 +836,7 @@ func (c *SSM) CreateMaintenanceWindowRequest(input *CreateMaintenanceWindowInput // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -906,13 +910,13 @@ func (c *SSM) CreateOpsItemRequest(input *CreateOpsItemInput) (req *request.Requ // // Creates a new OpsItem. You must have permission in AWS Identity and Access // Management (IAM) to create a new OpsItem. For more information, see Getting -// Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) // in the AWS Systems Manager User Guide. // // Operations engineers and IT professionals use OpsCenter to view, investigate, // and remediate operational issues impacting the performance and health of // their AWS resources. For more information, see AWS Systems Manager OpsCenter -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) // in the AWS Systems Manager User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -931,7 +935,7 @@ func (c *SSM) CreateOpsItemRequest(input *CreateOpsItemInput) (req *request.Requ // // * OpsItemLimitExceededException // The request caused OpsItems to exceed one or more quotas. For information -// about OpsItem quotas, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). // // * OpsItemInvalidParameterException // A specified parameter argument isn't valid. Verify the available arguments @@ -1025,7 +1029,7 @@ func (c *SSM) CreatePatchBaselineRequest(input *CreatePatchBaselineInput) (req * // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -1104,16 +1108,16 @@ func (c *SSM) CreateResourceDataSyncRequest(input *CreateResourceDataSyncInput) // // You can configure Systems Manager Inventory to use the SyncToDestination // type to synchronize Inventory data from multiple AWS Regions to a single -// Amazon S3 bucket. For more information, see Configuring Resource Data Sync -// for Inventory (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html) +// S3 bucket. For more information, see Configuring Resource Data Sync for Inventory +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync.html) // in the AWS Systems Manager User Guide. // // You can configure Systems Manager Explorer to use the SyncFromSource type // to synchronize operational work items (OpsItems) and operational data (OpsData) -// from multiple AWS Regions to a single Amazon S3 bucket. This type can synchronize +// from multiple AWS Regions to a single S3 bucket. This type can synchronize // OpsItems and OpsData from multiple AWS accounts and Regions or EntireOrganization -// by using AWS Organizations. For more information, see Setting Up Explorer -// to Display Data from Multiple Accounts and Regions (http://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html) +// by using AWS Organizations. For more information, see Setting up Systems +// Manager Explorer to display data from multiple accounts and Regions (https://docs.aws.amazon.com/systems-manager/latest/userguide/Explorer-resource-data-sync.html) // in the AWS Systems Manager User Guide. // // A resource data sync is an asynchronous operation that returns immediately. @@ -1508,7 +1512,7 @@ func (c *SSM) DeleteInventoryRequest(input *DeleteInventoryInput) (req *request. // DeleteInventory API operation for Amazon Simple Systems Manager (SSM). // -// Delete a custom inventory type, or the data associated with a custom Inventory +// Delete a custom inventory type or the data associated with a custom Inventory // type. Deleting a custom inventory type is also referred to as deleting a // custom inventory schema. // @@ -2207,7 +2211,7 @@ func (c *SSM) DeregisterTargetFromMaintenanceWindowRequest(input *DeregisterTarg // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -2298,7 +2302,7 @@ func (c *SSM) DeregisterTaskFromMaintenanceWindowRequest(input *DeregisterTaskFr // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -2609,6 +2613,12 @@ func (c *SSM) DescribeAssociationExecutionTargetsRequest(input *DescribeAssociat Name: opDescribeAssociationExecutionTargets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -2667,6 +2677,58 @@ func (c *SSM) DescribeAssociationExecutionTargetsWithContext(ctx aws.Context, in return out, req.Send() } +// DescribeAssociationExecutionTargetsPages iterates over the pages of a DescribeAssociationExecutionTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAssociationExecutionTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAssociationExecutionTargets operation. +// pageNum := 0 +// err := client.DescribeAssociationExecutionTargetsPages(params, +// func(page *ssm.DescribeAssociationExecutionTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAssociationExecutionTargetsPages(input *DescribeAssociationExecutionTargetsInput, fn func(*DescribeAssociationExecutionTargetsOutput, bool) bool) error { + return c.DescribeAssociationExecutionTargetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAssociationExecutionTargetsPagesWithContext same as DescribeAssociationExecutionTargetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationExecutionTargetsPagesWithContext(ctx aws.Context, input *DescribeAssociationExecutionTargetsInput, fn func(*DescribeAssociationExecutionTargetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAssociationExecutionTargetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAssociationExecutionTargetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAssociationExecutionTargetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeAssociationExecutions = "DescribeAssociationExecutions" // DescribeAssociationExecutionsRequest generates a "aws/request.Request" representing the @@ -2698,6 +2760,12 @@ func (c *SSM) DescribeAssociationExecutionsRequest(input *DescribeAssociationExe Name: opDescribeAssociationExecutions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -2752,6 +2820,58 @@ func (c *SSM) DescribeAssociationExecutionsWithContext(ctx aws.Context, input *D return out, req.Send() } +// DescribeAssociationExecutionsPages iterates over the pages of a DescribeAssociationExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAssociationExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAssociationExecutions operation. +// pageNum := 0 +// err := client.DescribeAssociationExecutionsPages(params, +// func(page *ssm.DescribeAssociationExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAssociationExecutionsPages(input *DescribeAssociationExecutionsInput, fn func(*DescribeAssociationExecutionsOutput, bool) bool) error { + return c.DescribeAssociationExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAssociationExecutionsPagesWithContext same as DescribeAssociationExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAssociationExecutionsPagesWithContext(ctx aws.Context, input *DescribeAssociationExecutionsInput, fn func(*DescribeAssociationExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAssociationExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAssociationExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAssociationExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeAutomationExecutions = "DescribeAutomationExecutions" // DescribeAutomationExecutionsRequest generates a "aws/request.Request" representing the @@ -2783,6 +2903,12 @@ func (c *SSM) DescribeAutomationExecutionsRequest(input *DescribeAutomationExecu Name: opDescribeAutomationExecutions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -2840,6 +2966,58 @@ func (c *SSM) DescribeAutomationExecutionsWithContext(ctx aws.Context, input *De return out, req.Send() } +// DescribeAutomationExecutionsPages iterates over the pages of a DescribeAutomationExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutomationExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutomationExecutions operation. +// pageNum := 0 +// err := client.DescribeAutomationExecutionsPages(params, +// func(page *ssm.DescribeAutomationExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAutomationExecutionsPages(input *DescribeAutomationExecutionsInput, fn func(*DescribeAutomationExecutionsOutput, bool) bool) error { + return c.DescribeAutomationExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAutomationExecutionsPagesWithContext same as DescribeAutomationExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAutomationExecutionsPagesWithContext(ctx aws.Context, input *DescribeAutomationExecutionsInput, fn func(*DescribeAutomationExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAutomationExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutomationExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAutomationExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeAutomationStepExecutions = "DescribeAutomationStepExecutions" // DescribeAutomationStepExecutionsRequest generates a "aws/request.Request" representing the @@ -2871,6 +3049,12 @@ func (c *SSM) DescribeAutomationStepExecutionsRequest(input *DescribeAutomationS Name: opDescribeAutomationStepExecutions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -2933,6 +3117,58 @@ func (c *SSM) DescribeAutomationStepExecutionsWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeAutomationStepExecutionsPages iterates over the pages of a DescribeAutomationStepExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutomationStepExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutomationStepExecutions operation. +// pageNum := 0 +// err := client.DescribeAutomationStepExecutionsPages(params, +// func(page *ssm.DescribeAutomationStepExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAutomationStepExecutionsPages(input *DescribeAutomationStepExecutionsInput, fn func(*DescribeAutomationStepExecutionsOutput, bool) bool) error { + return c.DescribeAutomationStepExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAutomationStepExecutionsPagesWithContext same as DescribeAutomationStepExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAutomationStepExecutionsPagesWithContext(ctx aws.Context, input *DescribeAutomationStepExecutionsInput, fn func(*DescribeAutomationStepExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAutomationStepExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAutomationStepExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAutomationStepExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeAvailablePatches = "DescribeAvailablePatches" // DescribeAvailablePatchesRequest generates a "aws/request.Request" representing the @@ -2964,6 +3200,12 @@ func (c *SSM) DescribeAvailablePatchesRequest(input *DescribeAvailablePatchesInp Name: opDescribeAvailablePatches, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3012,6 +3254,58 @@ func (c *SSM) DescribeAvailablePatchesWithContext(ctx aws.Context, input *Descri return out, req.Send() } +// DescribeAvailablePatchesPages iterates over the pages of a DescribeAvailablePatches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAvailablePatches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAvailablePatches operation. +// pageNum := 0 +// err := client.DescribeAvailablePatchesPages(params, +// func(page *ssm.DescribeAvailablePatchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeAvailablePatchesPages(input *DescribeAvailablePatchesInput, fn func(*DescribeAvailablePatchesOutput, bool) bool) error { + return c.DescribeAvailablePatchesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeAvailablePatchesPagesWithContext same as DescribeAvailablePatchesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeAvailablePatchesPagesWithContext(ctx aws.Context, input *DescribeAvailablePatchesInput, fn func(*DescribeAvailablePatchesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeAvailablePatchesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeAvailablePatchesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeAvailablePatchesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeDocument = "DescribeDocument" // DescribeDocumentRequest generates a "aws/request.Request" representing the @@ -3216,6 +3510,12 @@ func (c *SSM) DescribeEffectiveInstanceAssociationsRequest(input *DescribeEffect Name: opDescribeEffectiveInstanceAssociations, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3279,6 +3579,58 @@ func (c *SSM) DescribeEffectiveInstanceAssociationsWithContext(ctx aws.Context, return out, req.Send() } +// DescribeEffectiveInstanceAssociationsPages iterates over the pages of a DescribeEffectiveInstanceAssociations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEffectiveInstanceAssociations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEffectiveInstanceAssociations operation. +// pageNum := 0 +// err := client.DescribeEffectiveInstanceAssociationsPages(params, +// func(page *ssm.DescribeEffectiveInstanceAssociationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeEffectiveInstanceAssociationsPages(input *DescribeEffectiveInstanceAssociationsInput, fn func(*DescribeEffectiveInstanceAssociationsOutput, bool) bool) error { + return c.DescribeEffectiveInstanceAssociationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeEffectiveInstanceAssociationsPagesWithContext same as DescribeEffectiveInstanceAssociationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeEffectiveInstanceAssociationsPagesWithContext(ctx aws.Context, input *DescribeEffectiveInstanceAssociationsInput, fn func(*DescribeEffectiveInstanceAssociationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeEffectiveInstanceAssociationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEffectiveInstanceAssociationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeEffectiveInstanceAssociationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeEffectivePatchesForPatchBaseline = "DescribeEffectivePatchesForPatchBaseline" // DescribeEffectivePatchesForPatchBaselineRequest generates a "aws/request.Request" representing the @@ -3310,6 +3662,12 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff Name: opDescribeEffectivePatchesForPatchBaseline, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3344,13 +3702,12 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineRequest(input *DescribeEff // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * UnsupportedOperatingSystem // The operating systems you specified is not supported, or the operation is -// not supported for the operating system. Valid operating systems include: -// Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. +// not supported for the operating system. // // * InternalServerError // An error occurred on the server side. @@ -3377,6 +3734,58 @@ func (c *SSM) DescribeEffectivePatchesForPatchBaselineWithContext(ctx aws.Contex return out, req.Send() } +// DescribeEffectivePatchesForPatchBaselinePages iterates over the pages of a DescribeEffectivePatchesForPatchBaseline operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEffectivePatchesForPatchBaseline method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEffectivePatchesForPatchBaseline operation. +// pageNum := 0 +// err := client.DescribeEffectivePatchesForPatchBaselinePages(params, +// func(page *ssm.DescribeEffectivePatchesForPatchBaselineOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeEffectivePatchesForPatchBaselinePages(input *DescribeEffectivePatchesForPatchBaselineInput, fn func(*DescribeEffectivePatchesForPatchBaselineOutput, bool) bool) error { + return c.DescribeEffectivePatchesForPatchBaselinePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeEffectivePatchesForPatchBaselinePagesWithContext same as DescribeEffectivePatchesForPatchBaselinePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeEffectivePatchesForPatchBaselinePagesWithContext(ctx aws.Context, input *DescribeEffectivePatchesForPatchBaselineInput, fn func(*DescribeEffectivePatchesForPatchBaselineOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeEffectivePatchesForPatchBaselineInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeEffectivePatchesForPatchBaselineRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeEffectivePatchesForPatchBaselineOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstanceAssociationsStatus = "DescribeInstanceAssociationsStatus" // DescribeInstanceAssociationsStatusRequest generates a "aws/request.Request" representing the @@ -3408,6 +3817,12 @@ func (c *SSM) DescribeInstanceAssociationsStatusRequest(input *DescribeInstanceA Name: opDescribeInstanceAssociationsStatus, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3471,6 +3886,58 @@ func (c *SSM) DescribeInstanceAssociationsStatusWithContext(ctx aws.Context, inp return out, req.Send() } +// DescribeInstanceAssociationsStatusPages iterates over the pages of a DescribeInstanceAssociationsStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceAssociationsStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceAssociationsStatus operation. +// pageNum := 0 +// err := client.DescribeInstanceAssociationsStatusPages(params, +// func(page *ssm.DescribeInstanceAssociationsStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstanceAssociationsStatusPages(input *DescribeInstanceAssociationsStatusInput, fn func(*DescribeInstanceAssociationsStatusOutput, bool) bool) error { + return c.DescribeInstanceAssociationsStatusPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstanceAssociationsStatusPagesWithContext same as DescribeInstanceAssociationsStatusPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstanceAssociationsStatusPagesWithContext(ctx aws.Context, input *DescribeInstanceAssociationsStatusInput, fn func(*DescribeInstanceAssociationsStatusOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstanceAssociationsStatusInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstanceAssociationsStatusRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstanceAssociationsStatusOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstanceInformation = "DescribeInstanceInformation" // DescribeInstanceInformationRequest generates a "aws/request.Request" representing the @@ -3521,16 +3988,18 @@ func (c *SSM) DescribeInstanceInformationRequest(input *DescribeInstanceInformat // DescribeInstanceInformation API operation for Amazon Simple Systems Manager (SSM). // -// Describes one or more of your instances. You can use this to get information -// about instances like the operating system platform, the SSM Agent version -// (Linux), status etc. If you specify one or more instance IDs, it returns -// information for those instances. If you do not specify instance IDs, it returns -// information for all your instances. If you specify an instance ID that is -// not valid or an instance that you do not own, you receive an error. +// Describes one or more of your instances, including information about the +// operating system platform, the version of SSM Agent installed on the instance, +// instance status, and so on. +// +// If you specify one or more instance IDs, it returns information for those +// instances. If you do not specify instance IDs, it returns information for +// all your instances. If you specify an instance ID that is not valid or an +// instance that you do not own, you receive an error. // // The IamRole field for this API action is the Amazon Identity and Access Management // (IAM) role assigned to on-premises instances. This call does not return the -// IAM role for Amazon EC2 instances. +// IAM role for EC2 instances. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3669,6 +4138,12 @@ func (c *SSM) DescribeInstancePatchStatesRequest(input *DescribeInstancePatchSta Name: opDescribeInstancePatchStates, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3720,6 +4195,58 @@ func (c *SSM) DescribeInstancePatchStatesWithContext(ctx aws.Context, input *Des return out, req.Send() } +// DescribeInstancePatchStatesPages iterates over the pages of a DescribeInstancePatchStates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstancePatchStates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstancePatchStates operation. +// pageNum := 0 +// err := client.DescribeInstancePatchStatesPages(params, +// func(page *ssm.DescribeInstancePatchStatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstancePatchStatesPages(input *DescribeInstancePatchStatesInput, fn func(*DescribeInstancePatchStatesOutput, bool) bool) error { + return c.DescribeInstancePatchStatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancePatchStatesPagesWithContext same as DescribeInstancePatchStatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchStatesPagesWithContext(ctx aws.Context, input *DescribeInstancePatchStatesInput, fn func(*DescribeInstancePatchStatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancePatchStatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancePatchStatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancePatchStatesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstancePatchStatesForPatchGroup = "DescribeInstancePatchStatesForPatchGroup" // DescribeInstancePatchStatesForPatchGroupRequest generates a "aws/request.Request" representing the @@ -3751,6 +4278,12 @@ func (c *SSM) DescribeInstancePatchStatesForPatchGroupRequest(input *DescribeIns Name: opDescribeInstancePatchStatesForPatchGroup, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3807,6 +4340,58 @@ func (c *SSM) DescribeInstancePatchStatesForPatchGroupWithContext(ctx aws.Contex return out, req.Send() } +// DescribeInstancePatchStatesForPatchGroupPages iterates over the pages of a DescribeInstancePatchStatesForPatchGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstancePatchStatesForPatchGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstancePatchStatesForPatchGroup operation. +// pageNum := 0 +// err := client.DescribeInstancePatchStatesForPatchGroupPages(params, +// func(page *ssm.DescribeInstancePatchStatesForPatchGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstancePatchStatesForPatchGroupPages(input *DescribeInstancePatchStatesForPatchGroupInput, fn func(*DescribeInstancePatchStatesForPatchGroupOutput, bool) bool) error { + return c.DescribeInstancePatchStatesForPatchGroupPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancePatchStatesForPatchGroupPagesWithContext same as DescribeInstancePatchStatesForPatchGroupPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchStatesForPatchGroupPagesWithContext(ctx aws.Context, input *DescribeInstancePatchStatesForPatchGroupInput, fn func(*DescribeInstancePatchStatesForPatchGroupOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancePatchStatesForPatchGroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancePatchStatesForPatchGroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancePatchStatesForPatchGroupOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInstancePatches = "DescribeInstancePatches" // DescribeInstancePatchesRequest generates a "aws/request.Request" representing the @@ -3838,6 +4423,12 @@ func (c *SSM) DescribeInstancePatchesRequest(input *DescribeInstancePatchesInput Name: opDescribeInstancePatches, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3906,6 +4497,58 @@ func (c *SSM) DescribeInstancePatchesWithContext(ctx aws.Context, input *Describ return out, req.Send() } +// DescribeInstancePatchesPages iterates over the pages of a DescribeInstancePatches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstancePatches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstancePatches operation. +// pageNum := 0 +// err := client.DescribeInstancePatchesPages(params, +// func(page *ssm.DescribeInstancePatchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInstancePatchesPages(input *DescribeInstancePatchesInput, fn func(*DescribeInstancePatchesOutput, bool) bool) error { + return c.DescribeInstancePatchesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstancePatchesPagesWithContext same as DescribeInstancePatchesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInstancePatchesPagesWithContext(ctx aws.Context, input *DescribeInstancePatchesInput, fn func(*DescribeInstancePatchesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstancePatchesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstancePatchesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInstancePatchesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeInventoryDeletions = "DescribeInventoryDeletions" // DescribeInventoryDeletionsRequest generates a "aws/request.Request" representing the @@ -3937,6 +4580,12 @@ func (c *SSM) DescribeInventoryDeletionsRequest(input *DescribeInventoryDeletion Name: opDescribeInventoryDeletions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -3992,6 +4641,58 @@ func (c *SSM) DescribeInventoryDeletionsWithContext(ctx aws.Context, input *Desc return out, req.Send() } +// DescribeInventoryDeletionsPages iterates over the pages of a DescribeInventoryDeletions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInventoryDeletions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInventoryDeletions operation. +// pageNum := 0 +// err := client.DescribeInventoryDeletionsPages(params, +// func(page *ssm.DescribeInventoryDeletionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeInventoryDeletionsPages(input *DescribeInventoryDeletionsInput, fn func(*DescribeInventoryDeletionsOutput, bool) bool) error { + return c.DescribeInventoryDeletionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInventoryDeletionsPagesWithContext same as DescribeInventoryDeletionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeInventoryDeletionsPagesWithContext(ctx aws.Context, input *DescribeInventoryDeletionsInput, fn func(*DescribeInventoryDeletionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInventoryDeletionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInventoryDeletionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeInventoryDeletionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowExecutionTaskInvocations = "DescribeMaintenanceWindowExecutionTaskInvocations" // DescribeMaintenanceWindowExecutionTaskInvocationsRequest generates a "aws/request.Request" representing the @@ -4023,6 +4724,12 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De Name: opDescribeMaintenanceWindowExecutionTaskInvocations, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4052,7 +4759,7 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsRequest(input *De // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -4080,6 +4787,58 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsWithContext(ctx a return out, req.Send() } +// DescribeMaintenanceWindowExecutionTaskInvocationsPages iterates over the pages of a DescribeMaintenanceWindowExecutionTaskInvocations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowExecutionTaskInvocations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowExecutionTaskInvocations operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowExecutionTaskInvocationsPages(params, +// func(page *ssm.DescribeMaintenanceWindowExecutionTaskInvocationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsPages(input *DescribeMaintenanceWindowExecutionTaskInvocationsInput, fn func(*DescribeMaintenanceWindowExecutionTaskInvocationsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowExecutionTaskInvocationsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowExecutionTaskInvocationsPagesWithContext same as DescribeMaintenanceWindowExecutionTaskInvocationsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionTaskInvocationsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionTaskInvocationsInput, fn func(*DescribeMaintenanceWindowExecutionTaskInvocationsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowExecutionTaskInvocationsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowExecutionTaskInvocationsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowExecutionTaskInvocationsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowExecutionTasks = "DescribeMaintenanceWindowExecutionTasks" // DescribeMaintenanceWindowExecutionTasksRequest generates a "aws/request.Request" representing the @@ -4111,6 +4870,12 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain Name: opDescribeMaintenanceWindowExecutionTasks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4139,7 +4904,7 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksRequest(input *DescribeMain // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -4167,6 +4932,58 @@ func (c *SSM) DescribeMaintenanceWindowExecutionTasksWithContext(ctx aws.Context return out, req.Send() } +// DescribeMaintenanceWindowExecutionTasksPages iterates over the pages of a DescribeMaintenanceWindowExecutionTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowExecutionTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowExecutionTasks operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowExecutionTasksPages(params, +// func(page *ssm.DescribeMaintenanceWindowExecutionTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowExecutionTasksPages(input *DescribeMaintenanceWindowExecutionTasksInput, fn func(*DescribeMaintenanceWindowExecutionTasksOutput, bool) bool) error { + return c.DescribeMaintenanceWindowExecutionTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowExecutionTasksPagesWithContext same as DescribeMaintenanceWindowExecutionTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionTasksPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionTasksInput, fn func(*DescribeMaintenanceWindowExecutionTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowExecutionTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowExecutionTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowExecutionTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowExecutions = "DescribeMaintenanceWindowExecutions" // DescribeMaintenanceWindowExecutionsRequest generates a "aws/request.Request" representing the @@ -4198,6 +5015,12 @@ func (c *SSM) DescribeMaintenanceWindowExecutionsRequest(input *DescribeMaintena Name: opDescribeMaintenanceWindowExecutions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4248,6 +5071,58 @@ func (c *SSM) DescribeMaintenanceWindowExecutionsWithContext(ctx aws.Context, in return out, req.Send() } +// DescribeMaintenanceWindowExecutionsPages iterates over the pages of a DescribeMaintenanceWindowExecutions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowExecutions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowExecutions operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowExecutionsPages(params, +// func(page *ssm.DescribeMaintenanceWindowExecutionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowExecutionsPages(input *DescribeMaintenanceWindowExecutionsInput, fn func(*DescribeMaintenanceWindowExecutionsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowExecutionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowExecutionsPagesWithContext same as DescribeMaintenanceWindowExecutionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowExecutionsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowExecutionsInput, fn func(*DescribeMaintenanceWindowExecutionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowExecutionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowExecutionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowExecutionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowSchedule = "DescribeMaintenanceWindowSchedule" // DescribeMaintenanceWindowScheduleRequest generates a "aws/request.Request" representing the @@ -4279,6 +5154,12 @@ func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanc Name: opDescribeMaintenanceWindowSchedule, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4310,7 +5191,7 @@ func (c *SSM) DescribeMaintenanceWindowScheduleRequest(input *DescribeMaintenanc // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/DescribeMaintenanceWindowSchedule @@ -4335,6 +5216,58 @@ func (c *SSM) DescribeMaintenanceWindowScheduleWithContext(ctx aws.Context, inpu return out, req.Send() } +// DescribeMaintenanceWindowSchedulePages iterates over the pages of a DescribeMaintenanceWindowSchedule operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowSchedule method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowSchedule operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowSchedulePages(params, +// func(page *ssm.DescribeMaintenanceWindowScheduleOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowSchedulePages(input *DescribeMaintenanceWindowScheduleInput, fn func(*DescribeMaintenanceWindowScheduleOutput, bool) bool) error { + return c.DescribeMaintenanceWindowSchedulePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowSchedulePagesWithContext same as DescribeMaintenanceWindowSchedulePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowSchedulePagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowScheduleInput, fn func(*DescribeMaintenanceWindowScheduleOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowScheduleInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowScheduleRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowScheduleOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowTargets = "DescribeMaintenanceWindowTargets" // DescribeMaintenanceWindowTargetsRequest generates a "aws/request.Request" representing the @@ -4366,6 +5299,12 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance Name: opDescribeMaintenanceWindowTargets, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4394,7 +5333,7 @@ func (c *SSM) DescribeMaintenanceWindowTargetsRequest(input *DescribeMaintenance // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -4422,6 +5361,58 @@ func (c *SSM) DescribeMaintenanceWindowTargetsWithContext(ctx aws.Context, input return out, req.Send() } +// DescribeMaintenanceWindowTargetsPages iterates over the pages of a DescribeMaintenanceWindowTargets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowTargets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowTargets operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowTargetsPages(params, +// func(page *ssm.DescribeMaintenanceWindowTargetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowTargetsPages(input *DescribeMaintenanceWindowTargetsInput, fn func(*DescribeMaintenanceWindowTargetsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowTargetsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowTargetsPagesWithContext same as DescribeMaintenanceWindowTargetsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowTargetsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowTargetsInput, fn func(*DescribeMaintenanceWindowTargetsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowTargetsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowTargetsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowTargetsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowTasks = "DescribeMaintenanceWindowTasks" // DescribeMaintenanceWindowTasksRequest generates a "aws/request.Request" representing the @@ -4453,6 +5444,12 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi Name: opDescribeMaintenanceWindowTasks, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4481,7 +5478,7 @@ func (c *SSM) DescribeMaintenanceWindowTasksRequest(input *DescribeMaintenanceWi // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -4509,6 +5506,58 @@ func (c *SSM) DescribeMaintenanceWindowTasksWithContext(ctx aws.Context, input * return out, req.Send() } +// DescribeMaintenanceWindowTasksPages iterates over the pages of a DescribeMaintenanceWindowTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowTasks operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowTasksPages(params, +// func(page *ssm.DescribeMaintenanceWindowTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowTasksPages(input *DescribeMaintenanceWindowTasksInput, fn func(*DescribeMaintenanceWindowTasksOutput, bool) bool) error { + return c.DescribeMaintenanceWindowTasksPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowTasksPagesWithContext same as DescribeMaintenanceWindowTasksPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowTasksPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowTasksInput, fn func(*DescribeMaintenanceWindowTasksOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowTasksInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowTasksRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowTasksOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindows = "DescribeMaintenanceWindows" // DescribeMaintenanceWindowsRequest generates a "aws/request.Request" representing the @@ -4540,6 +5589,12 @@ func (c *SSM) DescribeMaintenanceWindowsRequest(input *DescribeMaintenanceWindow Name: opDescribeMaintenanceWindows, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4588,6 +5643,58 @@ func (c *SSM) DescribeMaintenanceWindowsWithContext(ctx aws.Context, input *Desc return out, req.Send() } +// DescribeMaintenanceWindowsPages iterates over the pages of a DescribeMaintenanceWindows operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindows method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindows operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowsPages(params, +// func(page *ssm.DescribeMaintenanceWindowsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowsPages(input *DescribeMaintenanceWindowsInput, fn func(*DescribeMaintenanceWindowsOutput, bool) bool) error { + return c.DescribeMaintenanceWindowsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowsPagesWithContext same as DescribeMaintenanceWindowsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowsPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowsInput, fn func(*DescribeMaintenanceWindowsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeMaintenanceWindowsForTarget = "DescribeMaintenanceWindowsForTarget" // DescribeMaintenanceWindowsForTargetRequest generates a "aws/request.Request" representing the @@ -4619,6 +5726,12 @@ func (c *SSM) DescribeMaintenanceWindowsForTargetRequest(input *DescribeMaintena Name: opDescribeMaintenanceWindowsForTarget, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4668,6 +5781,58 @@ func (c *SSM) DescribeMaintenanceWindowsForTargetWithContext(ctx aws.Context, in return out, req.Send() } +// DescribeMaintenanceWindowsForTargetPages iterates over the pages of a DescribeMaintenanceWindowsForTarget operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMaintenanceWindowsForTarget method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMaintenanceWindowsForTarget operation. +// pageNum := 0 +// err := client.DescribeMaintenanceWindowsForTargetPages(params, +// func(page *ssm.DescribeMaintenanceWindowsForTargetOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeMaintenanceWindowsForTargetPages(input *DescribeMaintenanceWindowsForTargetInput, fn func(*DescribeMaintenanceWindowsForTargetOutput, bool) bool) error { + return c.DescribeMaintenanceWindowsForTargetPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeMaintenanceWindowsForTargetPagesWithContext same as DescribeMaintenanceWindowsForTargetPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeMaintenanceWindowsForTargetPagesWithContext(ctx aws.Context, input *DescribeMaintenanceWindowsForTargetInput, fn func(*DescribeMaintenanceWindowsForTargetOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeMaintenanceWindowsForTargetInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeMaintenanceWindowsForTargetRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeMaintenanceWindowsForTargetOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeOpsItems = "DescribeOpsItems" // DescribeOpsItemsRequest generates a "aws/request.Request" representing the @@ -4699,6 +5864,12 @@ func (c *SSM) DescribeOpsItemsRequest(input *DescribeOpsItemsInput) (req *reques Name: opDescribeOpsItems, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4714,13 +5885,13 @@ func (c *SSM) DescribeOpsItemsRequest(input *DescribeOpsItemsInput) (req *reques // // Query a set of OpsItems. You must have permission in AWS Identity and Access // Management (IAM) to query a list of OpsItems. For more information, see Getting -// Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) // in the AWS Systems Manager User Guide. // // Operations engineers and IT professionals use OpsCenter to view, investigate, // and remediate operational issues impacting the performance and health of // their AWS resources. For more information, see AWS Systems Manager OpsCenter -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) // in the AWS Systems Manager User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4756,6 +5927,58 @@ func (c *SSM) DescribeOpsItemsWithContext(ctx aws.Context, input *DescribeOpsIte return out, req.Send() } +// DescribeOpsItemsPages iterates over the pages of a DescribeOpsItems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOpsItems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOpsItems operation. +// pageNum := 0 +// err := client.DescribeOpsItemsPages(params, +// func(page *ssm.DescribeOpsItemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeOpsItemsPages(input *DescribeOpsItemsInput, fn func(*DescribeOpsItemsOutput, bool) bool) error { + return c.DescribeOpsItemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeOpsItemsPagesWithContext same as DescribeOpsItemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeOpsItemsPagesWithContext(ctx aws.Context, input *DescribeOpsItemsInput, fn func(*DescribeOpsItemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeOpsItemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeOpsItemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeOpsItemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeParameters = "DescribeParameters" // DescribeParametersRequest generates a "aws/request.Request" representing the @@ -4945,6 +6168,12 @@ func (c *SSM) DescribePatchBaselinesRequest(input *DescribePatchBaselinesInput) Name: opDescribePatchBaselines, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4993,6 +6222,58 @@ func (c *SSM) DescribePatchBaselinesWithContext(ctx aws.Context, input *Describe return out, req.Send() } +// DescribePatchBaselinesPages iterates over the pages of a DescribePatchBaselines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePatchBaselines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePatchBaselines operation. +// pageNum := 0 +// err := client.DescribePatchBaselinesPages(params, +// func(page *ssm.DescribePatchBaselinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribePatchBaselinesPages(input *DescribePatchBaselinesInput, fn func(*DescribePatchBaselinesOutput, bool) bool) error { + return c.DescribePatchBaselinesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePatchBaselinesPagesWithContext same as DescribePatchBaselinesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchBaselinesPagesWithContext(ctx aws.Context, input *DescribePatchBaselinesInput, fn func(*DescribePatchBaselinesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePatchBaselinesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePatchBaselinesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePatchBaselinesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribePatchGroupState = "DescribePatchGroupState" // DescribePatchGroupStateRequest generates a "aws/request.Request" representing the @@ -5106,6 +6387,12 @@ func (c *SSM) DescribePatchGroupsRequest(input *DescribePatchGroupsInput) (req * Name: opDescribePatchGroups, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -5154,6 +6441,58 @@ func (c *SSM) DescribePatchGroupsWithContext(ctx aws.Context, input *DescribePat return out, req.Send() } +// DescribePatchGroupsPages iterates over the pages of a DescribePatchGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePatchGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePatchGroups operation. +// pageNum := 0 +// err := client.DescribePatchGroupsPages(params, +// func(page *ssm.DescribePatchGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribePatchGroupsPages(input *DescribePatchGroupsInput, fn func(*DescribePatchGroupsOutput, bool) bool) error { + return c.DescribePatchGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePatchGroupsPagesWithContext same as DescribePatchGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchGroupsPagesWithContext(ctx aws.Context, input *DescribePatchGroupsInput, fn func(*DescribePatchGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePatchGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePatchGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePatchGroupsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribePatchProperties = "DescribePatchProperties" // DescribePatchPropertiesRequest generates a "aws/request.Request" representing the @@ -5185,6 +6524,12 @@ func (c *SSM) DescribePatchPropertiesRequest(input *DescribePatchPropertiesInput Name: opDescribePatchProperties, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -5207,10 +6552,6 @@ func (c *SSM) DescribePatchPropertiesRequest(input *DescribePatchPropertiesInput // The following section lists the properties that can be used in filters for // each major operating system type: // -// WINDOWS -// -// Valid properties: PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, MSRC_SEVERITY -// // AMAZON_LINUX // // Valid properties: PRODUCT, CLASSIFICATION, SEVERITY @@ -5219,10 +6560,18 @@ func (c *SSM) DescribePatchPropertiesRequest(input *DescribePatchPropertiesInput // // Valid properties: PRODUCT, CLASSIFICATION, SEVERITY // -// UBUNTU +// CENTOS +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// +// DEBIAN // // Valid properties: PRODUCT, PRIORITY // +// ORACLE_LINUX +// +// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// // REDHAT_ENTERPRISE_LINUX // // Valid properties: PRODUCT, CLASSIFICATION, SEVERITY @@ -5231,9 +6580,13 @@ func (c *SSM) DescribePatchPropertiesRequest(input *DescribePatchPropertiesInput // // Valid properties: PRODUCT, CLASSIFICATION, SEVERITY // -// CENTOS +// UBUNTU // -// Valid properties: PRODUCT, CLASSIFICATION, SEVERITY +// Valid properties: PRODUCT, PRIORITY +// +// WINDOWS +// +// Valid properties: PRODUCT, PRODUCT_FAMILY, CLASSIFICATION, MSRC_SEVERITY // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5268,6 +6621,58 @@ func (c *SSM) DescribePatchPropertiesWithContext(ctx aws.Context, input *Describ return out, req.Send() } +// DescribePatchPropertiesPages iterates over the pages of a DescribePatchProperties operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePatchProperties method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePatchProperties operation. +// pageNum := 0 +// err := client.DescribePatchPropertiesPages(params, +// func(page *ssm.DescribePatchPropertiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribePatchPropertiesPages(input *DescribePatchPropertiesInput, fn func(*DescribePatchPropertiesOutput, bool) bool) error { + return c.DescribePatchPropertiesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribePatchPropertiesPagesWithContext same as DescribePatchPropertiesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribePatchPropertiesPagesWithContext(ctx aws.Context, input *DescribePatchPropertiesInput, fn func(*DescribePatchPropertiesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribePatchPropertiesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribePatchPropertiesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribePatchPropertiesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opDescribeSessions = "DescribeSessions" // DescribeSessionsRequest generates a "aws/request.Request" representing the @@ -5299,6 +6704,12 @@ func (c *SSM) DescribeSessionsRequest(input *DescribeSessionsInput) (req *reques Name: opDescribeSessions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -5354,6 +6765,58 @@ func (c *SSM) DescribeSessionsWithContext(ctx aws.Context, input *DescribeSessio return out, req.Send() } +// DescribeSessionsPages iterates over the pages of a DescribeSessions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSessions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSessions operation. +// pageNum := 0 +// err := client.DescribeSessionsPages(params, +// func(page *ssm.DescribeSessionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) DescribeSessionsPages(input *DescribeSessionsInput, fn func(*DescribeSessionsOutput, bool) bool) error { + return c.DescribeSessionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeSessionsPagesWithContext same as DescribeSessionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) DescribeSessionsPagesWithContext(ctx aws.Context, input *DescribeSessionsInput, fn func(*DescribeSessionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeSessionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeSessionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*DescribeSessionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetAutomationExecution = "GetAutomationExecution" // GetAutomationExecutionRequest generates a "aws/request.Request" representing the @@ -5486,8 +6949,14 @@ func (c *SSM) GetCalendarStateRequest(input *GetCalendarStateInput) (req *reques // of the calendar at a specific time, and returns the next time that the Change // Calendar state will transition. If you do not specify a time, GetCalendarState // assumes the current time. Change Calendar entries have two possible states: -// OPEN or CLOSED. For more information about Systems Manager Change Calendar, -// see AWS Systems Manager Change Calendar (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-change-calendar.html) +// OPEN or CLOSED. +// +// If you specify more than one calendar in a request, the command returns the +// status of OPEN only if all calendars in the request are open. If one or more +// calendars in the request are closed, the status returned is CLOSED. +// +// For more information about Systems Manager Change Calendar, see AWS Systems +// Manager Change Calendar (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-change-calendar.html) // in the AWS Systems Manager User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5680,7 +7149,7 @@ func (c *SSM) GetConnectionStatusRequest(input *GetConnectionStatusInput) (req * // GetConnectionStatus API operation for Amazon Simple Systems Manager (SSM). // // Retrieves the Session Manager connection status for an instance to determine -// whether it is connected and ready to receive Session Manager connections. +// whether it is running and ready to receive Session Manager connections. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5859,14 +7328,13 @@ func (c *SSM) GetDeployablePatchSnapshotForInstanceRequest(input *GetDeployableP // // * UnsupportedOperatingSystem // The operating systems you specified is not supported, or the operation is -// not supported for the operating system. Valid operating systems include: -// Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. +// not supported for the operating system. // // * UnsupportedFeatureRequiredException -// Microsoft application patching is only available on EC2 instances and Advanced -// Instances. To patch Microsoft applications on on-premises servers and VMs, -// you must enable Advanced Instances. For more information, see Using the Advanced-Instances -// Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) +// Microsoft application patching is only available on EC2 instances and advanced +// instances. To patch Microsoft applications on on-premises servers and VMs, +// you must enable advanced instances. For more information, see Using the advanced-instances +// tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) // in the AWS Systems Manager User Guide. // // See also, https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06/GetDeployablePatchSnapshotForInstance @@ -6007,6 +7475,12 @@ func (c *SSM) GetInventoryRequest(input *GetInventoryInput) (req *request.Reques Name: opGetInventory, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -6075,6 +7549,58 @@ func (c *SSM) GetInventoryWithContext(ctx aws.Context, input *GetInventoryInput, return out, req.Send() } +// GetInventoryPages iterates over the pages of a GetInventory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetInventory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetInventory operation. +// pageNum := 0 +// err := client.GetInventoryPages(params, +// func(page *ssm.GetInventoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetInventoryPages(input *GetInventoryInput, fn func(*GetInventoryOutput, bool) bool) error { + return c.GetInventoryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetInventoryPagesWithContext same as GetInventoryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetInventoryPagesWithContext(ctx aws.Context, input *GetInventoryInput, fn func(*GetInventoryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetInventoryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetInventoryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetInventoryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetInventorySchema = "GetInventorySchema" // GetInventorySchemaRequest generates a "aws/request.Request" representing the @@ -6106,6 +7632,12 @@ func (c *SSM) GetInventorySchemaRequest(input *GetInventorySchemaInput) (req *re Name: opGetInventorySchema, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -6161,6 +7693,58 @@ func (c *SSM) GetInventorySchemaWithContext(ctx aws.Context, input *GetInventory return out, req.Send() } +// GetInventorySchemaPages iterates over the pages of a GetInventorySchema operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetInventorySchema method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetInventorySchema operation. +// pageNum := 0 +// err := client.GetInventorySchemaPages(params, +// func(page *ssm.GetInventorySchemaOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetInventorySchemaPages(input *GetInventorySchemaInput, fn func(*GetInventorySchemaOutput, bool) bool) error { + return c.GetInventorySchemaPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetInventorySchemaPagesWithContext same as GetInventorySchemaPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetInventorySchemaPagesWithContext(ctx aws.Context, input *GetInventorySchemaInput, fn func(*GetInventorySchemaOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetInventorySchemaInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetInventorySchemaRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetInventorySchemaOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetMaintenanceWindow = "GetMaintenanceWindow" // GetMaintenanceWindowRequest generates a "aws/request.Request" representing the @@ -6220,7 +7804,7 @@ func (c *SSM) GetMaintenanceWindowRequest(input *GetMaintenanceWindowInput) (req // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -6307,7 +7891,7 @@ func (c *SSM) GetMaintenanceWindowExecutionRequest(input *GetMaintenanceWindowEx // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -6395,7 +7979,7 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskRequest(input *GetMaintenanceWind // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -6482,7 +8066,7 @@ func (c *SSM) GetMaintenanceWindowExecutionTaskInvocationRequest(input *GetMaint // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -6569,7 +8153,7 @@ func (c *SSM) GetMaintenanceWindowTaskRequest(input *GetMaintenanceWindowTaskInp // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -6643,13 +8227,13 @@ func (c *SSM) GetOpsItemRequest(input *GetOpsItemInput) (req *request.Request, o // // Get information about an OpsItem by using the ID. You must have permission // in AWS Identity and Access Management (IAM) to view information about an -// OpsItem. For more information, see Getting Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// OpsItem. For more information, see Getting started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) // in the AWS Systems Manager User Guide. // // Operations engineers and IT professionals use OpsCenter to view, investigate, // and remediate operational issues impacting the performance and health of // their AWS resources. For more information, see AWS Systems Manager OpsCenter -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) // in the AWS Systems Manager User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6719,6 +8303,12 @@ func (c *SSM) GetOpsSummaryRequest(input *GetOpsSummaryInput) (req *request.Requ Name: opGetOpsSummary, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -6784,6 +8374,58 @@ func (c *SSM) GetOpsSummaryWithContext(ctx aws.Context, input *GetOpsSummaryInpu return out, req.Send() } +// GetOpsSummaryPages iterates over the pages of a GetOpsSummary operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetOpsSummary method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetOpsSummary operation. +// pageNum := 0 +// err := client.GetOpsSummaryPages(params, +// func(page *ssm.GetOpsSummaryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) GetOpsSummaryPages(input *GetOpsSummaryInput, fn func(*GetOpsSummaryOutput, bool) bool) error { + return c.GetOpsSummaryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetOpsSummaryPagesWithContext same as GetOpsSummaryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) GetOpsSummaryPagesWithContext(ctx aws.Context, input *GetOpsSummaryInput, fn func(*GetOpsSummaryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetOpsSummaryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetOpsSummaryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*GetOpsSummaryOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opGetParameter = "GetParameter" // GetParameterRequest generates a "aws/request.Request" representing the @@ -6924,7 +8566,7 @@ func (c *SSM) GetParameterHistoryRequest(input *GetParameterHistoryInput) (req * // GetParameterHistory API operation for Amazon Simple Systems Manager (SSM). // -// Query a list of all parameters used by the AWS account. +// Retrieves the history of all changes to a parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -7323,7 +8965,7 @@ func (c *SSM) GetPatchBaselineRequest(input *GetPatchBaselineInput) (req *reques // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InvalidResourceId @@ -7684,6 +9326,12 @@ func (c *SSM) ListAssociationVersionsRequest(input *ListAssociationVersionsInput Name: opListAssociationVersions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -7738,6 +9386,58 @@ func (c *SSM) ListAssociationVersionsWithContext(ctx aws.Context, input *ListAss return out, req.Send() } +// ListAssociationVersionsPages iterates over the pages of a ListAssociationVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAssociationVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAssociationVersions operation. +// pageNum := 0 +// err := client.ListAssociationVersionsPages(params, +// func(page *ssm.ListAssociationVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListAssociationVersionsPages(input *ListAssociationVersionsInput, fn func(*ListAssociationVersionsOutput, bool) bool) error { + return c.ListAssociationVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAssociationVersionsPagesWithContext same as ListAssociationVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListAssociationVersionsPagesWithContext(ctx aws.Context, input *ListAssociationVersionsInput, fn func(*ListAssociationVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAssociationVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAssociationVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAssociationVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListAssociations = "ListAssociations" // ListAssociationsRequest generates a "aws/request.Request" representing the @@ -8229,6 +9929,12 @@ func (c *SSM) ListComplianceItemsRequest(input *ListComplianceItemsInput) (req * Name: opListComplianceItems, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -8295,6 +10001,58 @@ func (c *SSM) ListComplianceItemsWithContext(ctx aws.Context, input *ListComplia return out, req.Send() } +// ListComplianceItemsPages iterates over the pages of a ListComplianceItems operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListComplianceItems method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListComplianceItems operation. +// pageNum := 0 +// err := client.ListComplianceItemsPages(params, +// func(page *ssm.ListComplianceItemsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListComplianceItemsPages(input *ListComplianceItemsInput, fn func(*ListComplianceItemsOutput, bool) bool) error { + return c.ListComplianceItemsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListComplianceItemsPagesWithContext same as ListComplianceItemsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListComplianceItemsPagesWithContext(ctx aws.Context, input *ListComplianceItemsInput, fn func(*ListComplianceItemsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListComplianceItemsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListComplianceItemsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListComplianceItemsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListComplianceSummaries = "ListComplianceSummaries" // ListComplianceSummariesRequest generates a "aws/request.Request" representing the @@ -8326,6 +10084,12 @@ func (c *SSM) ListComplianceSummariesRequest(input *ListComplianceSummariesInput Name: opListComplianceSummaries, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -8383,6 +10147,58 @@ func (c *SSM) ListComplianceSummariesWithContext(ctx aws.Context, input *ListCom return out, req.Send() } +// ListComplianceSummariesPages iterates over the pages of a ListComplianceSummaries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListComplianceSummaries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListComplianceSummaries operation. +// pageNum := 0 +// err := client.ListComplianceSummariesPages(params, +// func(page *ssm.ListComplianceSummariesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListComplianceSummariesPages(input *ListComplianceSummariesInput, fn func(*ListComplianceSummariesOutput, bool) bool) error { + return c.ListComplianceSummariesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListComplianceSummariesPagesWithContext same as ListComplianceSummariesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListComplianceSummariesPagesWithContext(ctx aws.Context, input *ListComplianceSummariesInput, fn func(*ListComplianceSummariesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListComplianceSummariesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListComplianceSummariesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListComplianceSummariesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListDocumentVersions = "ListDocumentVersions" // ListDocumentVersionsRequest generates a "aws/request.Request" representing the @@ -8414,6 +10230,12 @@ func (c *SSM) ListDocumentVersionsRequest(input *ListDocumentVersionsInput) (req Name: opListDocumentVersions, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -8468,6 +10290,58 @@ func (c *SSM) ListDocumentVersionsWithContext(ctx aws.Context, input *ListDocume return out, req.Send() } +// ListDocumentVersionsPages iterates over the pages of a ListDocumentVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDocumentVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDocumentVersions operation. +// pageNum := 0 +// err := client.ListDocumentVersionsPages(params, +// func(page *ssm.ListDocumentVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListDocumentVersionsPages(input *ListDocumentVersionsInput, fn func(*ListDocumentVersionsOutput, bool) bool) error { + return c.ListDocumentVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDocumentVersionsPagesWithContext same as ListDocumentVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListDocumentVersionsPagesWithContext(ctx aws.Context, input *ListDocumentVersionsInput, fn func(*ListDocumentVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDocumentVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDocumentVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDocumentVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListDocuments = "ListDocuments" // ListDocumentsRequest generates a "aws/request.Request" representing the @@ -8744,6 +10618,12 @@ func (c *SSM) ListResourceComplianceSummariesRequest(input *ListResourceComplian Name: opListResourceComplianceSummaries, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -8801,6 +10681,58 @@ func (c *SSM) ListResourceComplianceSummariesWithContext(ctx aws.Context, input return out, req.Send() } +// ListResourceComplianceSummariesPages iterates over the pages of a ListResourceComplianceSummaries operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceComplianceSummaries method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceComplianceSummaries operation. +// pageNum := 0 +// err := client.ListResourceComplianceSummariesPages(params, +// func(page *ssm.ListResourceComplianceSummariesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListResourceComplianceSummariesPages(input *ListResourceComplianceSummariesInput, fn func(*ListResourceComplianceSummariesOutput, bool) bool) error { + return c.ListResourceComplianceSummariesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResourceComplianceSummariesPagesWithContext same as ListResourceComplianceSummariesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListResourceComplianceSummariesPagesWithContext(ctx aws.Context, input *ListResourceComplianceSummariesInput, fn func(*ListResourceComplianceSummariesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResourceComplianceSummariesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResourceComplianceSummariesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResourceComplianceSummariesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListResourceDataSync = "ListResourceDataSync" // ListResourceDataSyncRequest generates a "aws/request.Request" representing the @@ -8832,6 +10764,12 @@ func (c *SSM) ListResourceDataSyncRequest(input *ListResourceDataSyncInput) (req Name: opListResourceDataSync, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -8895,6 +10833,58 @@ func (c *SSM) ListResourceDataSyncWithContext(ctx aws.Context, input *ListResour return out, req.Send() } +// ListResourceDataSyncPages iterates over the pages of a ListResourceDataSync operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceDataSync method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceDataSync operation. +// pageNum := 0 +// err := client.ListResourceDataSyncPages(params, +// func(page *ssm.ListResourceDataSyncOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SSM) ListResourceDataSyncPages(input *ListResourceDataSyncInput, fn func(*ListResourceDataSyncOutput, bool) bool) error { + return c.ListResourceDataSyncPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListResourceDataSyncPagesWithContext same as ListResourceDataSyncPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) ListResourceDataSyncPagesWithContext(ctx aws.Context, input *ListResourceDataSyncInput, fn func(*ListResourceDataSyncOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListResourceDataSyncInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListResourceDataSyncRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListResourceDataSyncOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -9424,7 +11414,7 @@ func (c *SSM) PutParameterRequest(input *PutParameterInput) (req *request.Reques // // * HierarchyLevelLimitExceededException // A hierarchy can have a maximum of 15 levels. For more information, see Requirements -// and Constraints for Parameter Names (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) +// and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) // in the AWS Systems Manager User Guide. // // * HierarchyTypeMismatchException @@ -9550,7 +11540,7 @@ func (c *SSM) RegisterDefaultPatchBaselineRequest(input *RegisterDefaultPatchBas // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -9641,7 +11631,7 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InvalidResourceId @@ -9653,7 +11643,7 @@ func (c *SSM) RegisterPatchBaselineForPatchGroupRequest(input *RegisterPatchBase // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -9744,7 +11734,7 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * ResourceLimitExceededException @@ -9752,7 +11742,7 @@ func (c *SSM) RegisterTargetWithMaintenanceWindowRequest(input *RegisterTargetWi // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -9843,7 +11833,7 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * ResourceLimitExceededException @@ -9851,7 +11841,7 @@ func (c *SSM) RegisterTaskWithMaintenanceWindowRequest(input *RegisterTaskWithMa // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * FeatureNotAvailableException @@ -10140,7 +12130,7 @@ func (c *SSM) ResumeSessionRequest(input *ResumeSessionInput) (req *request.Requ // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -10357,7 +12347,7 @@ func (c *SSM) SendCommandRequest(input *SendCommandInput) (req *request.Request, // The role name can't contain invalid characters. Also verify that you specified // an IAM role for notifications that includes the required trust policy. For // information about configuring the IAM role for Run Command notifications, -// see Configuring Amazon SNS Notifications for Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) +// see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) // in the AWS Systems Manager User Guide. // // * InvalidNotificationConfig @@ -10545,7 +12535,7 @@ func (c *SSM) StartAutomationExecutionRequest(input *StartAutomationExecutionInp // // * InvalidTarget // The target is not valid or does not exist. It might not be configured for -// EC2 Systems Manager or you might not have permission to perform the operation. +// Systems Manager or you might not have permission to perform the operation. // // * InternalServerError // An error occurred on the server side. @@ -10622,7 +12612,7 @@ func (c *SSM) StartSessionRequest(input *StartSessionInput) (req *request.Reques // // AWS CLI usage: start-session is an interactive command that requires the // Session Manager plugin to be installed on the client machine making the call. -// For information, see Install the Session Manager Plugin for the AWS CLI (http://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) +// For information, see Install the Session Manager plugin for the AWS CLI (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html) // in the AWS Systems Manager User Guide. // // AWS Tools for PowerShell usage: Start-SSMSession is not currently supported @@ -10641,9 +12631,11 @@ func (c *SSM) StartSessionRequest(input *StartSessionInput) (req *request.Reques // // * TargetNotConnected // The specified target instance for the session is not fully configured for -// use with Session Manager. For more information, see Getting Started with -// Session Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) -// in the AWS Systems Manager User Guide. +// use with Session Manager. For more information, see Getting started with +// Session Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) +// in the AWS Systems Manager User Guide. This error is also returned if you +// attempt to start a session on an instance that is located in a different +// account or Region // // * InternalServerError // An error occurred on the server side. @@ -10818,7 +12810,7 @@ func (c *SSM) TerminateSessionRequest(input *TerminateSessionInput) (req *reques // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -10943,7 +12935,7 @@ func (c *SSM) UpdateAssociationRequest(input *UpdateAssociationInput) (req *requ // // * InvalidTarget // The target is not valid or does not exist. It might not be configured for -// EC2 Systems Manager or you might not have permission to perform the operation. +// Systems Manager or you might not have permission to perform the operation. // // * InvalidAssociationVersion // The version you specified is not valid. Use ListAssociationVersions to view @@ -11345,7 +13337,7 @@ func (c *SSM) UpdateMaintenanceWindowRequest(input *UpdateMaintenanceWindowInput // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -11449,7 +13441,7 @@ func (c *SSM) UpdateMaintenanceWindowTargetRequest(input *UpdateMaintenanceWindo // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -11537,10 +13529,18 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // // * MaxErrors // -// If a parameter is null, then the corresponding field is not modified. Also, -// if you set Replace to true, then all fields required by the RegisterTaskWithMaintenanceWindow -// action are required for this request. Optional fields that aren't specified -// are set to null. +// If the value for a parameter in UpdateMaintenanceWindowTask is null, then +// the corresponding field is not modified. If you set Replace to true, then +// all fields required by the RegisterTaskWithMaintenanceWindow action are required +// for this request. Optional fields that aren't specified are set to null. +// +// When you update a maintenance window task that has options specified in TaskInvocationParameters, +// you must provide again all the TaskInvocationParameters values that you want +// to retain. The values you do not specify again are removed. For example, +// suppose that when you registered a Run Command task, you specified TaskInvocationParameters +// values for Comment, NotificationConfig, and OutputS3BucketName. If you update +// the maintenance window task and specify only a different OutputS3BucketName +// value, the values for Comment and NotificationConfig are removed. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11555,7 +13555,7 @@ func (c *SSM) UpdateMaintenanceWindowTaskRequest(input *UpdateMaintenanceWindowT // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -11628,8 +13628,10 @@ func (c *SSM) UpdateManagedInstanceRoleRequest(input *UpdateManagedInstanceRoleI // UpdateManagedInstanceRole API operation for Amazon Simple Systems Manager (SSM). // -// Assigns or changes an Amazon Identity and Access Management (IAM) role for -// the managed instance. +// Changes the Amazon Identity and Access Management (IAM) role that is assigned +// to the on-premises instance or virtual machines (VM). IAM roles are first +// assigned to these hybrid instances during the activation process. For more +// information, see CreateActivation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -11723,13 +13725,13 @@ func (c *SSM) UpdateOpsItemRequest(input *UpdateOpsItemInput) (req *request.Requ // // Edit or change an OpsItem. You must have permission in AWS Identity and Access // Management (IAM) to update an OpsItem. For more information, see Getting -// Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) +// started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html) // in the AWS Systems Manager User Guide. // // Operations engineers and IT professionals use OpsCenter to view, investigate, // and remediate operational issues impacting the performance and health of // their AWS resources. For more information, see AWS Systems Manager OpsCenter -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) // in the AWS Systems Manager User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -11751,7 +13753,7 @@ func (c *SSM) UpdateOpsItemRequest(input *UpdateOpsItemInput) (req *request.Requ // // * OpsItemLimitExceededException // The request caused OpsItems to exceed one or more quotas. For information -// about OpsItem quotas, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). // // * OpsItemInvalidParameterException // A specified parameter argument isn't valid. Verify the available arguments @@ -11842,7 +13844,7 @@ func (c *SSM) UpdatePatchBaselineRequest(input *UpdatePatchBaselineInput) (req * // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. // // * InternalServerError @@ -11922,6 +13924,9 @@ func (c *SSM) UpdateResourceDataSyncRequest(input *UpdateResourceDataSyncInput) // the Include all accounts from my AWS Organizations configuration option. // Instead, you must delete the first resource data sync, and create a new one. // +// This API action only supports a resource data sync that was created with +// a SyncFromSource SyncType. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -12325,8 +14330,8 @@ func (s AddTagsToResourceOutput) GoString() string { // Error returned if an attempt is made to register a patch group with a patch // baseline that is already registered with a different patch baseline. type AlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12343,17 +14348,17 @@ func (s AlreadyExistsException) GoString() string { func newErrorAlreadyExistsException(v protocol.ResponseMetadata) error { return &AlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AlreadyExistsException) Code() string { +func (s *AlreadyExistsException) Code() string { return "AlreadyExistsException" } // Message returns the exception's message. -func (s AlreadyExistsException) Message() string { +func (s *AlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12361,29 +14366,29 @@ func (s AlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AlreadyExistsException) OrigErr() error { +func (s *AlreadyExistsException) OrigErr() error { return nil } -func (s AlreadyExistsException) Error() string { +func (s *AlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *AlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // You must disassociate a document from all instances before you can delete // it. type AssociatedInstances struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12400,17 +14405,17 @@ func (s AssociatedInstances) GoString() string { func newErrorAssociatedInstances(v protocol.ResponseMetadata) error { return &AssociatedInstances{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociatedInstances) Code() string { +func (s *AssociatedInstances) Code() string { return "AssociatedInstances" } // Message returns the exception's message. -func (s AssociatedInstances) Message() string { +func (s *AssociatedInstances) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12418,22 +14423,22 @@ func (s AssociatedInstances) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociatedInstances) OrigErr() error { +func (s *AssociatedInstances) OrigErr() error { return nil } -func (s AssociatedInstances) Error() string { +func (s *AssociatedInstances) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociatedInstances) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociatedInstances) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociatedInstances) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociatedInstances) RequestID() string { + return s.RespMetadata.RequestID } // Describes an association of a Systems Manager document and an instance. @@ -12544,8 +14549,8 @@ func (s *Association) SetTargets(v []*Target) *Association { // The specified association already exists. type AssociationAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12562,17 +14567,17 @@ func (s AssociationAlreadyExists) GoString() string { func newErrorAssociationAlreadyExists(v protocol.ResponseMetadata) error { return &AssociationAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociationAlreadyExists) Code() string { +func (s *AssociationAlreadyExists) Code() string { return "AssociationAlreadyExists" } // Message returns the exception's message. -func (s AssociationAlreadyExists) Message() string { +func (s *AssociationAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12580,28 +14585,34 @@ func (s AssociationAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociationAlreadyExists) OrigErr() error { +func (s *AssociationAlreadyExists) OrigErr() error { return nil } -func (s AssociationAlreadyExists) Error() string { +func (s *AssociationAlreadyExists) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociationAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociationAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociationAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociationAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // Describes the parameters for a document. type AssociationDescription struct { _ struct{} `type:"structure"` + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + // The association ID. AssociationId *string `type:"string"` @@ -12665,7 +14676,7 @@ type AssociationDescription struct { // The name of the Systems Manager document. Name *string `type:"string"` - // An Amazon S3 bucket where you want to store the output details of the request. + // An S3 bucket where you want to store the output details of the request. OutputLocation *InstanceAssociationOutputLocation `type:"structure"` // Information about the association. @@ -12680,6 +14691,20 @@ type AssociationDescription struct { // The association status. Status *AssociationStatus `type:"structure"` + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + // The instances targeted by the request. Targets []*Target `type:"list"` } @@ -12694,6 +14719,12 @@ func (s AssociationDescription) GoString() string { return s.String() } +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *AssociationDescription) SetApplyOnlyAtCronInterval(v bool) *AssociationDescription { + s.ApplyOnlyAtCronInterval = &v + return s +} + // SetAssociationId sets the AssociationId field's value. func (s *AssociationDescription) SetAssociationId(v string) *AssociationDescription { s.AssociationId = &v @@ -12808,6 +14839,12 @@ func (s *AssociationDescription) SetStatus(v *AssociationStatus) *AssociationDes return s } +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *AssociationDescription) SetSyncCompliance(v string) *AssociationDescription { + s.SyncCompliance = &v + return s +} + // SetTargets sets the Targets field's value. func (s *AssociationDescription) SetTargets(v []*Target) *AssociationDescription { s.Targets = v @@ -12816,8 +14853,8 @@ func (s *AssociationDescription) SetTargets(v []*Target) *AssociationDescription // The specified association does not exist. type AssociationDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12834,17 +14871,17 @@ func (s AssociationDoesNotExist) GoString() string { func newErrorAssociationDoesNotExist(v protocol.ResponseMetadata) error { return &AssociationDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociationDoesNotExist) Code() string { +func (s *AssociationDoesNotExist) Code() string { return "AssociationDoesNotExist" } // Message returns the exception's message. -func (s AssociationDoesNotExist) Message() string { +func (s *AssociationDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12852,22 +14889,22 @@ func (s AssociationDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociationDoesNotExist) OrigErr() error { +func (s *AssociationDoesNotExist) OrigErr() error { return nil } -func (s AssociationDoesNotExist) Error() string { +func (s *AssociationDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociationDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociationDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociationDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociationDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // Includes information about the specified association. @@ -12960,8 +14997,8 @@ func (s *AssociationExecution) SetStatus(v string) *AssociationExecution { // The specified execution ID does not exist. Verify the ID number and try again. type AssociationExecutionDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12978,17 +15015,17 @@ func (s AssociationExecutionDoesNotExist) GoString() string { func newErrorAssociationExecutionDoesNotExist(v protocol.ResponseMetadata) error { return &AssociationExecutionDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociationExecutionDoesNotExist) Code() string { +func (s *AssociationExecutionDoesNotExist) Code() string { return "AssociationExecutionDoesNotExist" } // Message returns the exception's message. -func (s AssociationExecutionDoesNotExist) Message() string { +func (s *AssociationExecutionDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12996,22 +15033,22 @@ func (s AssociationExecutionDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociationExecutionDoesNotExist) OrigErr() error { +func (s *AssociationExecutionDoesNotExist) OrigErr() error { return nil } -func (s AssociationExecutionDoesNotExist) Error() string { +func (s *AssociationExecutionDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociationExecutionDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociationExecutionDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociationExecutionDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociationExecutionDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // Filters used in the request. @@ -13294,8 +15331,8 @@ func (s *AssociationFilter) SetValue(v string) *AssociationFilter { // You can have at most 2,000 active associations. type AssociationLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -13312,17 +15349,17 @@ func (s AssociationLimitExceeded) GoString() string { func newErrorAssociationLimitExceeded(v protocol.ResponseMetadata) error { return &AssociationLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociationLimitExceeded) Code() string { +func (s *AssociationLimitExceeded) Code() string { return "AssociationLimitExceeded" } // Message returns the exception's message. -func (s AssociationLimitExceeded) Message() string { +func (s *AssociationLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13330,22 +15367,22 @@ func (s AssociationLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociationLimitExceeded) OrigErr() error { +func (s *AssociationLimitExceeded) OrigErr() error { return nil } -func (s AssociationLimitExceeded) Error() string { +func (s *AssociationLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociationLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociationLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociationLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociationLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Information about the association. @@ -13475,6 +15512,12 @@ func (s *AssociationStatus) SetName(v string) *AssociationStatus { type AssociationVersionInfo struct { _ struct{} `type:"structure"` + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + // The ID created by the system when the association was created. AssociationId *string `type:"string"` @@ -13535,6 +15578,20 @@ type AssociationVersionInfo struct { // version was created. ScheduleExpression *string `min:"1" type:"string"` + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + // The targets specified for the association when the association version was // created. Targets []*Target `type:"list"` @@ -13550,6 +15607,12 @@ func (s AssociationVersionInfo) GoString() string { return s.String() } +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *AssociationVersionInfo) SetApplyOnlyAtCronInterval(v bool) *AssociationVersionInfo { + s.ApplyOnlyAtCronInterval = &v + return s +} + // SetAssociationId sets the AssociationId field's value. func (s *AssociationVersionInfo) SetAssociationId(v string) *AssociationVersionInfo { s.AssociationId = &v @@ -13622,6 +15685,12 @@ func (s *AssociationVersionInfo) SetScheduleExpression(v string) *AssociationVer return s } +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *AssociationVersionInfo) SetSyncCompliance(v string) *AssociationVersionInfo { + s.SyncCompliance = &v + return s +} + // SetTargets sets the Targets field's value. func (s *AssociationVersionInfo) SetTargets(v []*Target) *AssociationVersionInfo { s.Targets = v @@ -13631,8 +15700,8 @@ func (s *AssociationVersionInfo) SetTargets(v []*Target) *AssociationVersionInfo // You have reached the maximum number versions allowed for an association. // Each association has a limit of 1,000 versions. type AssociationVersionLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -13649,17 +15718,17 @@ func (s AssociationVersionLimitExceeded) GoString() string { func newErrorAssociationVersionLimitExceeded(v protocol.ResponseMetadata) error { return &AssociationVersionLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AssociationVersionLimitExceeded) Code() string { +func (s *AssociationVersionLimitExceeded) Code() string { return "AssociationVersionLimitExceeded" } // Message returns the exception's message. -func (s AssociationVersionLimitExceeded) Message() string { +func (s *AssociationVersionLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13667,22 +15736,22 @@ func (s AssociationVersionLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AssociationVersionLimitExceeded) OrigErr() error { +func (s *AssociationVersionLimitExceeded) OrigErr() error { return nil } -func (s AssociationVersionLimitExceeded) Error() string { +func (s *AssociationVersionLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AssociationVersionLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AssociationVersionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AssociationVersionLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *AssociationVersionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // A structure that includes attributes that describe a document attachment. @@ -13785,10 +15854,10 @@ type AttachmentsSource struct { // to a document. The format for Value depends on the type of key you specify. // // * For the key SourceUrl, the value is an S3 bucket location. For example: - // "Values": [ "s3://my-bucket/my-folder" ] + // "Values": [ "s3://doc-example-bucket/my-folder" ] // // * For the key S3FileUrl, the value is a file in an S3 bucket. For example: - // "Values": [ "s3://my-bucket/my-folder/my-file.py" ] + // "Values": [ "s3://doc-example-bucket/my-folder/my-file.py" ] // // * For the key AttachmentReference, the value is constructed from the name // of another SSM document in your account, a version number of that document, @@ -13844,8 +15913,8 @@ func (s *AttachmentsSource) SetValues(v []*string) *AttachmentsSource { // An Automation document with the specified name could not be found. type AutomationDefinitionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -13862,17 +15931,17 @@ func (s AutomationDefinitionNotFoundException) GoString() string { func newErrorAutomationDefinitionNotFoundException(v protocol.ResponseMetadata) error { return &AutomationDefinitionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AutomationDefinitionNotFoundException) Code() string { +func (s *AutomationDefinitionNotFoundException) Code() string { return "AutomationDefinitionNotFoundException" } // Message returns the exception's message. -func (s AutomationDefinitionNotFoundException) Message() string { +func (s *AutomationDefinitionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13880,28 +15949,28 @@ func (s AutomationDefinitionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AutomationDefinitionNotFoundException) OrigErr() error { +func (s *AutomationDefinitionNotFoundException) OrigErr() error { return nil } -func (s AutomationDefinitionNotFoundException) Error() string { +func (s *AutomationDefinitionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AutomationDefinitionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AutomationDefinitionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AutomationDefinitionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AutomationDefinitionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An Automation document with the specified name and version could not be found. type AutomationDefinitionVersionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -13918,17 +15987,17 @@ func (s AutomationDefinitionVersionNotFoundException) GoString() string { func newErrorAutomationDefinitionVersionNotFoundException(v protocol.ResponseMetadata) error { return &AutomationDefinitionVersionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AutomationDefinitionVersionNotFoundException) Code() string { +func (s *AutomationDefinitionVersionNotFoundException) Code() string { return "AutomationDefinitionVersionNotFoundException" } // Message returns the exception's message. -func (s AutomationDefinitionVersionNotFoundException) Message() string { +func (s *AutomationDefinitionVersionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13936,22 +16005,22 @@ func (s AutomationDefinitionVersionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AutomationDefinitionVersionNotFoundException) OrigErr() error { +func (s *AutomationDefinitionVersionNotFoundException) OrigErr() error { return nil } -func (s AutomationDefinitionVersionNotFoundException) Error() string { +func (s *AutomationDefinitionVersionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AutomationDefinitionVersionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AutomationDefinitionVersionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AutomationDefinitionVersionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AutomationDefinitionVersionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // Detailed information about the current state of an individual Automation @@ -14265,8 +16334,8 @@ func (s *AutomationExecutionFilter) SetValues(v []*string) *AutomationExecutionF // The number of simultaneously running Automation executions exceeded the allowable // limit. type AutomationExecutionLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14283,17 +16352,17 @@ func (s AutomationExecutionLimitExceededException) GoString() string { func newErrorAutomationExecutionLimitExceededException(v protocol.ResponseMetadata) error { return &AutomationExecutionLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AutomationExecutionLimitExceededException) Code() string { +func (s *AutomationExecutionLimitExceededException) Code() string { return "AutomationExecutionLimitExceededException" } // Message returns the exception's message. -func (s AutomationExecutionLimitExceededException) Message() string { +func (s *AutomationExecutionLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14301,22 +16370,22 @@ func (s AutomationExecutionLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AutomationExecutionLimitExceededException) OrigErr() error { +func (s *AutomationExecutionLimitExceededException) OrigErr() error { return nil } -func (s AutomationExecutionLimitExceededException) Error() string { +func (s *AutomationExecutionLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AutomationExecutionLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AutomationExecutionLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AutomationExecutionLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *AutomationExecutionLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Details about a specific Automation execution. @@ -14331,8 +16400,8 @@ type AutomationExecutionMetadata struct { // Use this filter with DescribeAutomationExecutions. Specify either Local or // CrossAccount. CrossAccount is an Automation that runs in multiple AWS Regions - // and accounts. For more information, see Executing Automations in Multiple - // AWS Regions and Accounts (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation-multiple-accounts-and-regions.html) + // and accounts. For more information, see Running Automation workflows in multiple + // AWS Regions and accounts (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation-multiple-accounts-and-regions.html) // in the AWS Systems Manager User Guide. AutomationType *string `type:"string" enum:"AutomationType"` @@ -14361,7 +16430,7 @@ type AutomationExecutionMetadata struct { // The list of execution outputs as defined in the Automation document. FailureMessage *string `type:"string"` - // An Amazon S3 bucket where execution information is stored. + // An S3 bucket where execution information is stored. LogFile *string `type:"string"` // The MaxConcurrency value specified by the user when starting the Automation. @@ -14540,8 +16609,8 @@ func (s *AutomationExecutionMetadata) SetTargets(v []*Target) *AutomationExecuti // There is no automation execution information for the requested automation // execution ID. type AutomationExecutionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14558,17 +16627,17 @@ func (s AutomationExecutionNotFoundException) GoString() string { func newErrorAutomationExecutionNotFoundException(v protocol.ResponseMetadata) error { return &AutomationExecutionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AutomationExecutionNotFoundException) Code() string { +func (s *AutomationExecutionNotFoundException) Code() string { return "AutomationExecutionNotFoundException" } // Message returns the exception's message. -func (s AutomationExecutionNotFoundException) Message() string { +func (s *AutomationExecutionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14576,29 +16645,29 @@ func (s AutomationExecutionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AutomationExecutionNotFoundException) OrigErr() error { +func (s *AutomationExecutionNotFoundException) OrigErr() error { return nil } -func (s AutomationExecutionNotFoundException) Error() string { +func (s *AutomationExecutionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AutomationExecutionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AutomationExecutionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AutomationExecutionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AutomationExecutionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified step name and execution ID don't exist. Verify the information // and try again. type AutomationStepNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -14615,17 +16684,17 @@ func (s AutomationStepNotFoundException) GoString() string { func newErrorAutomationStepNotFoundException(v protocol.ResponseMetadata) error { return &AutomationStepNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AutomationStepNotFoundException) Code() string { +func (s *AutomationStepNotFoundException) Code() string { return "AutomationStepNotFoundException" } // Message returns the exception's message. -func (s AutomationStepNotFoundException) Message() string { +func (s *AutomationStepNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14633,22 +16702,22 @@ func (s AutomationStepNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AutomationStepNotFoundException) OrigErr() error { +func (s *AutomationStepNotFoundException) OrigErr() error { return nil } -func (s AutomationStepNotFoundException) Error() string { +func (s *AutomationStepNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AutomationStepNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AutomationStepNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AutomationStepNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *AutomationStepNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type CancelCommandInput struct { @@ -14874,16 +16943,16 @@ type Command struct { // The maximum number of instances that are allowed to run the command at the // same time. You can specify a number of instances, such as 10, or a percentage // of instances, such as 10%. The default value is 50. For more information - // about how to use MaxConcurrency, see Running Commands Using Systems Manager - // Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html) + // about how to use MaxConcurrency, see Running commands using Systems Manager + // Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html) // in the AWS Systems Manager User Guide. MaxConcurrency *string `min:"1" type:"string"` // The maximum number of errors allowed before the system stops sending the // command to additional targets. You can specify a number of errors, such as // 10, or a percentage or errors, such as 10%. The default value is 0. For more - // information about how to use MaxErrors, see Running Commands Using Systems - // Manager Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html) + // information about how to use MaxErrors, see Running commands using Systems + // Manager Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/run-command.html) // in the AWS Systems Manager User Guide. MaxErrors *string `min:"1" type:"string"` @@ -14899,8 +16968,8 @@ type Command struct { OutputS3KeyPrefix *string `type:"string"` // (Deprecated) You can no longer specify this parameter. The system ignores - // it. Instead, Systems Manager automatically determines the Amazon S3 bucket - // region. + // it. Instead, Systems Manager automatically determines the Region of the S3 + // bucket. OutputS3Region *string `min:"3" type:"string"` // The parameter values to be inserted in the document when running the command. @@ -14919,8 +16988,8 @@ type Command struct { // A detailed status of the command execution. StatusDetails includes more information // than Status because it includes states resulting from error and concurrency // control parameters. StatusDetails can show different results than Status. - // For more information about these statuses, see Understanding Command Statuses - // (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // For more information about these statuses, see Understanding command statuses + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // @@ -14960,6 +17029,9 @@ type Command struct { // that you specify. Targets is required if you don't provide one or more instance // IDs in the call. Targets []*Target `type:"list"` + + // The TimeoutSeconds value specified for a command. + TimeoutSeconds *int64 `min:"30" type:"integer"` } // String returns the string representation @@ -15110,7 +17182,16 @@ func (s *Command) SetTargets(v []*Target) *Command { return s } +// SetTimeoutSeconds sets the TimeoutSeconds field's value. +func (s *Command) SetTimeoutSeconds(v int64) *Command { + s.TimeoutSeconds = &v + return s +} + // Describes a command filter. +// +// An instance ID can't be specified when a command status is Pending because +// the command hasn't run on the instance yet. type CommandFilter struct { _ struct{} `type:"structure"` @@ -15217,9 +17298,8 @@ type CommandInvocation struct { // The instance ID in which this invocation was requested. InstanceId *string `type:"string"` - // The name of the invocation target. For Amazon EC2 instances this is the value - // for the aws:Name tag. For on-premises instances, this is the name of the - // instance. + // The name of the invocation target. For EC2 instances this is the value for + // the aws:Name tag. For on-premises instances, this is the name of the instance. InstanceName *string `type:"string"` // Configurations for sending notifications about command status changes on @@ -15233,16 +17313,16 @@ type CommandInvocation struct { // notifications about command status changes on a per instance basis. ServiceRole *string `type:"string"` - // The URL to the plugin's StdErr file in Amazon S3, if the Amazon S3 bucket - // was defined for the parent command. For an invocation, StandardErrorUrl is - // populated if there is just one plugin defined for the command, and the Amazon - // S3 bucket was defined for the command. + // The URL to the plugin's StdErr file in Amazon S3, if the S3 bucket was defined + // for the parent command. For an invocation, StandardErrorUrl is populated + // if there is just one plugin defined for the command, and the S3 bucket was + // defined for the command. StandardErrorUrl *string `type:"string"` - // The URL to the plugin's StdOut file in Amazon S3, if the Amazon S3 bucket - // was defined for the parent command. For an invocation, StandardOutputUrl - // is populated if there is just one plugin defined for the command, and the - // Amazon S3 bucket was defined for the command. + // The URL to the plugin's StdOut file in Amazon S3, if the S3 bucket was defined + // for the parent command. For an invocation, StandardOutputUrl is populated + // if there is just one plugin defined for the command, and the S3 bucket was + // defined for the command. StandardOutputUrl *string `type:"string"` // Whether or not the invocation succeeded, failed, or is pending. @@ -15252,7 +17332,7 @@ type CommandInvocation struct { // targeted by the command). StatusDetails includes more information than Status // because it includes states resulting from error and concurrency control parameters. // StatusDetails can show different results than Status. For more information - // about these statuses, see Understanding Command Statuses (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // about these statuses, see Understanding command statuses (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // @@ -15419,13 +17499,13 @@ type CommandPlugin struct { // This was requested when issuing the command. For example, in the following // response: // - // test_folder/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-1234567876543/awsrunShellScript + // doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript // - // test_folder is the name of the Amazon S3 bucket; + // doc-example-bucket is the name of the S3 bucket; // // ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix; // - // i-1234567876543 is the instance ID; + // i-02573cafcfEXAMPLE is the instance ID; // // awsrunShellScript is the name of the plugin. OutputS3BucketName *string `min:"3" type:"string"` @@ -15434,20 +17514,19 @@ type CommandPlugin struct { // executions should be stored. This was requested when issuing the command. // For example, in the following response: // - // test_folder/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-1234567876543/awsrunShellScript + // doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript // - // test_folder is the name of the Amazon S3 bucket; + // doc-example-bucket is the name of the S3 bucket; // // ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix; // - // i-1234567876543 is the instance ID; + // i-02573cafcfEXAMPLE is the instance ID; // // awsrunShellScript is the name of the plugin. OutputS3KeyPrefix *string `type:"string"` // (Deprecated) You can no longer specify this parameter. The system ignores - // it. Instead, Systems Manager automatically determines the Amazon S3 bucket - // region. + // it. Instead, Systems Manager automatically determines the S3 bucket region. OutputS3Region *string `min:"3" type:"string"` // A numeric response code generated after running the plugin. @@ -15465,8 +17544,7 @@ type CommandPlugin struct { StandardErrorUrl *string `type:"string"` // The URL for the complete text written by the plugin to stdout in Amazon S3. - // If the Amazon S3 bucket for the command was not specified, then this string - // is empty. + // If the S3 bucket for the command was not specified, then this string is empty. StandardOutputUrl *string `type:"string"` // The status of this plugin. You can run a document with multiple plugins. @@ -15475,8 +17553,8 @@ type CommandPlugin struct { // A detailed status of the plugin execution. StatusDetails includes more information // than Status because it includes states resulting from error and concurrency // control parameters. StatusDetails can show different results than Status. - // For more information about these statuses, see Understanding Command Statuses - // (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // For more information about these statuses, see Understanding command statuses + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // @@ -15663,7 +17741,7 @@ func (s *ComplianceExecutionSummary) SetExecutionType(v string) *ComplianceExecu // Information about the compliance as defined by the resource type. For example, // for a patch resource type, Items includes information about the PatchSeverity, -// Classification, etc. +// Classification, and so on. type ComplianceItem struct { _ struct{} `type:"structure"` @@ -15693,7 +17771,8 @@ type ComplianceItem struct { // Critical, High, Medium, Low, Informational, Unspecified. Severity *string `type:"string" enum:"ComplianceSeverity"` - // The status of the compliance item. An item is either COMPLIANT or NON_COMPLIANT. + // The status of the compliance item. An item is either COMPLIANT, NON_COMPLIANT, + // or an empty string (for Windows patches that aren't applicable). Status *string `type:"string" enum:"ComplianceStatus"` // A title for the compliance item. For example, if the compliance item is a @@ -15955,8 +18034,8 @@ func (s *ComplianceSummaryItem) SetNonCompliantSummary(v *NonCompliantSummary) * // You specified too many custom compliance types. You can specify a maximum // of 10 different types. type ComplianceTypeCountLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -15973,17 +18052,17 @@ func (s ComplianceTypeCountLimitExceededException) GoString() string { func newErrorComplianceTypeCountLimitExceededException(v protocol.ResponseMetadata) error { return &ComplianceTypeCountLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ComplianceTypeCountLimitExceededException) Code() string { +func (s *ComplianceTypeCountLimitExceededException) Code() string { return "ComplianceTypeCountLimitExceededException" } // Message returns the exception's message. -func (s ComplianceTypeCountLimitExceededException) Message() string { +func (s *ComplianceTypeCountLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15991,22 +18070,22 @@ func (s ComplianceTypeCountLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ComplianceTypeCountLimitExceededException) OrigErr() error { +func (s *ComplianceTypeCountLimitExceededException) OrigErr() error { return nil } -func (s ComplianceTypeCountLimitExceededException) Error() string { +func (s *ComplianceTypeCountLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ComplianceTypeCountLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ComplianceTypeCountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ComplianceTypeCountLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ComplianceTypeCountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A summary of resources that are compliant. The summary is organized according @@ -16066,7 +18145,7 @@ type CreateActivationInput struct { // The Amazon Identity and Access Management (IAM) role that you want to assign // to the managed instance. This IAM role must provide AssumeRole permissions // for the Systems Manager service principal ssm.amazonaws.com. For more information, - // see Create an IAM Service Role for a Hybrid Environment (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-service-role.html) + // see Create an IAM service role for a hybrid environment (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-service-role.html) // in the AWS Systems Manager User Guide. // // IamRole is a required field @@ -16294,6 +18373,12 @@ func (s *CreateAssociationBatchOutput) SetSuccessful(v []*AssociationDescription type CreateAssociationBatchRequestEntry struct { _ struct{} `type:"structure"` + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + // Specify a descriptive name for the association. AssociationName *string `type:"string"` @@ -16358,7 +18443,7 @@ type CreateAssociationBatchRequestEntry struct { // Name is a required field Name *string `type:"string" required:"true"` - // An Amazon S3 bucket where you want to store the results of this request. + // An S3 bucket where you want to store the results of this request. OutputLocation *InstanceAssociationOutputLocation `type:"structure"` // A description of the parameters for a document. @@ -16367,6 +18452,20 @@ type CreateAssociationBatchRequestEntry struct { // A cron expression that specifies a schedule when the association runs. ScheduleExpression *string `min:"1" type:"string"` + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + // The instances targeted by the request. Targets []*Target `type:"list"` } @@ -16421,6 +18520,12 @@ func (s *CreateAssociationBatchRequestEntry) Validate() error { return nil } +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *CreateAssociationBatchRequestEntry) SetApplyOnlyAtCronInterval(v bool) *CreateAssociationBatchRequestEntry { + s.ApplyOnlyAtCronInterval = &v + return s +} + // SetAssociationName sets the AssociationName field's value. func (s *CreateAssociationBatchRequestEntry) SetAssociationName(v string) *CreateAssociationBatchRequestEntry { s.AssociationName = &v @@ -16487,6 +18592,12 @@ func (s *CreateAssociationBatchRequestEntry) SetScheduleExpression(v string) *Cr return s } +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *CreateAssociationBatchRequestEntry) SetSyncCompliance(v string) *CreateAssociationBatchRequestEntry { + s.SyncCompliance = &v + return s +} + // SetTargets sets the Targets field's value. func (s *CreateAssociationBatchRequestEntry) SetTargets(v []*Target) *CreateAssociationBatchRequestEntry { s.Targets = v @@ -16496,6 +18607,12 @@ func (s *CreateAssociationBatchRequestEntry) SetTargets(v []*Target) *CreateAsso type CreateAssociationInput struct { _ struct{} `type:"structure"` + // By default, when you create a new associations, the system runs it immediately + // after it is created and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // create it. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + // Specify a descriptive name for the association. AssociationName *string `type:"string"` @@ -16568,7 +18685,7 @@ type CreateAssociationInput struct { // Name is a required field Name *string `type:"string" required:"true"` - // An Amazon S3 bucket where you want to store the output details of the request. + // An S3 bucket where you want to store the output details of the request. OutputLocation *InstanceAssociationOutputLocation `type:"structure"` // The parameters for the runtime configuration of the document. @@ -16577,8 +18694,25 @@ type CreateAssociationInput struct { // A cron expression when the association will be applied to the target(s). ScheduleExpression *string `min:"1" type:"string"` - // The targets (either instances or tags) for the association. You must specify - // a value for Targets if you don't specify a value for InstanceId. + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + + // The targets for the association. You can target instances by using tags, + // AWS Resource Groups, all instances in an AWS account, or individual instance + // IDs. For more information about choosing targets for an association, see + // Using targets and rate controls with State Manager associations (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-state-manager-targets-and-rate-controls.html) + // in the AWS Systems Manager User Guide. Targets []*Target `type:"list"` } @@ -16632,6 +18766,12 @@ func (s *CreateAssociationInput) Validate() error { return nil } +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *CreateAssociationInput) SetApplyOnlyAtCronInterval(v bool) *CreateAssociationInput { + s.ApplyOnlyAtCronInterval = &v + return s +} + // SetAssociationName sets the AssociationName field's value. func (s *CreateAssociationInput) SetAssociationName(v string) *CreateAssociationInput { s.AssociationName = &v @@ -16698,6 +18838,12 @@ func (s *CreateAssociationInput) SetScheduleExpression(v string) *CreateAssociat return s } +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *CreateAssociationInput) SetSyncCompliance(v string) *CreateAssociationInput { + s.SyncCompliance = &v + return s +} + // SetTargets sets the Targets field's value. func (s *CreateAssociationInput) SetTargets(v []*Target) *CreateAssociationInput { s.Targets = v @@ -16734,7 +18880,17 @@ type CreateDocumentInput struct { // document. Attachments []*AttachmentsSource `type:"list"` - // A valid JSON or YAML string. + // The content for the new SSM document in JSON or YAML format. We recommend + // storing the contents for your new document in an external JSON or YAML file + // and referencing the file in a command. + // + // For examples, see the following topics in the AWS Systems Manager User Guide. + // + // * Create an SSM document (AWS API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html) + // + // * Create an SSM document (AWS CLI) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-cli.html) + // + // * Create an SSM document (API) (https://docs.aws.amazon.com/systems-manager/latest/userguide/create-ssm-document-api.html) // // Content is a required field Content *string `min:"1" type:"string" required:"true"` @@ -16748,10 +18904,10 @@ type CreateDocumentInput struct { // A name for the Systems Manager document. // - // Do not use the following to begin the names of documents you create. They - // are reserved by AWS for use as document prefixes: + // You can't use the following strings as document name prefixes. These are + // reserved by AWS for use as document name prefixes: // - // * aws + // * aws- // // * amazon // @@ -16760,8 +18916,13 @@ type CreateDocumentInput struct { // Name is a required field Name *string `type:"string" required:"true"` - // A list of SSM documents required by a document. For example, an ApplicationConfiguration - // document requires an ApplicationConfigurationSchema document. + // A list of SSM documents required by a document. This parameter is used exclusively + // by AWS AppConfig. When a user creates an AppConfig configuration in an SSM + // document, the user must also specify a required document for validation purposes. + // In this case, an ApplicationConfiguration document requires an ApplicationConfigurationSchema + // document for validation purposes. For more information, see AWS AppConfig + // (https://docs.aws.amazon.com/systems-manager/latest/userguide/appconfig.html) + // in the AWS Systems Manager User Guide. Requires []*DocumentRequires `min:"1" type:"list"` // Optional metadata that you assign to a resource. Tags enable you to categorize @@ -16781,8 +18942,8 @@ type CreateDocumentInput struct { // on. For example, to run a document on EC2 instances, specify the following // value: /AWS::EC2::Instance. If you specify a value of '/' the document can // run on all types of resources. If you don't specify a value, the document - // can't run on any resources. For a list of valid resource types, see AWS Resource - // Types Reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // can't run on any resources. For a list of valid resource types, see AWS resource + // and property types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide. TargetType *string `type:"string"` @@ -16978,6 +19139,18 @@ type CreateMaintenanceWindowInput struct { // Schedule is a required field Schedule *string `min:"1" type:"string" required:"true"` + // The number of days to wait after the date and time specified by a CRON expression + // before running the maintenance window. + // + // For example, the following cron expression schedules a maintenance window + // to run on the third Tuesday of every month at 11:30 PM. + // + // cron(0 30 23 ? * TUE#3 *) + // + // If the schedule offset is 2, the maintenance window won't run until two days + // later. + ScheduleOffset *int64 `min:"1" type:"integer"` + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database @@ -17049,6 +19222,9 @@ func (s *CreateMaintenanceWindowInput) Validate() error { if s.Schedule != nil && len(*s.Schedule) < 1 { invalidParams.Add(request.NewErrParamMinLen("Schedule", 1)) } + if s.ScheduleOffset != nil && *s.ScheduleOffset < 1 { + invalidParams.Add(request.NewErrParamMinValue("ScheduleOffset", 1)) + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -17114,6 +19290,12 @@ func (s *CreateMaintenanceWindowInput) SetSchedule(v string) *CreateMaintenanceW return s } +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *CreateMaintenanceWindowInput) SetScheduleOffset(v int64) *CreateMaintenanceWindowInput { + s.ScheduleOffset = &v + return s +} + // SetScheduleTimezone sets the ScheduleTimezone field's value. func (s *CreateMaintenanceWindowInput) SetScheduleTimezone(v string) *CreateMaintenanceWindowInput { s.ScheduleTimezone = &v @@ -17189,7 +19371,7 @@ type CreateOpsItemInput struct { // Use the /aws/resources key in OperationalData to specify a related resource // in the request. Use the /aws/automations key in OperationalData to associate // an Automation runbook with the OpsItem. To view AWS CLI example commands - // that use these keys, see Creating OpsItems Manually (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) // in the AWS Systems Manager User Guide. OperationalData map[string]*OpsItemDataValue `type:"map"` @@ -17204,14 +19386,16 @@ type CreateOpsItemInput struct { // Specify a severity to assign to an OpsItem. Severity *string `min:"1" type:"string"` - // The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. + // The origin of the OpsItem, such as Amazon EC2 or Systems Manager. + // + // The source name can't contain the following strings: aws, amazon, and amzn. // // Source is a required field Source *string `min:"1" type:"string" required:"true"` // Optional metadata that you assign to a resource. You can restrict access // to OpsItems by using an inline IAM policy that specifies tags. For more information, - // see Getting Started with OpsCenter (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html#OpsCenter-getting-started-user-permissions) + // see Getting started with OpsCenter (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-getting-started.html#OpsCenter-getting-started-user-permissions) // in the AWS Systems Manager User Guide. // // Tags use a key-value pair. For example: @@ -17387,8 +19571,8 @@ type CreatePatchBaselineInput struct { // A list of explicitly approved patches for the baseline. // // For information about accepted formats for lists of approved patches and - // rejected patches, see Package Name Formats for Approved and Rejected Patch - // Lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) // in the AWS Systems Manager User Guide. ApprovedPatches []*string `type:"list"` @@ -17423,8 +19607,8 @@ type CreatePatchBaselineInput struct { // A list of explicitly rejected patches for the baseline. // // For information about accepted formats for lists of approved patches and - // rejected patches, see Package Name Formats for Approved and Rejected Patch - // Lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) // in the AWS Systems Manager User Guide. RejectedPatches []*string `type:"list"` @@ -17627,7 +19811,8 @@ func (s *CreatePatchBaselineOutput) SetBaselineId(v string) *CreatePatchBaseline type CreateResourceDataSyncInput struct { _ struct{} `type:"structure"` - // Amazon S3 configuration details for the sync. + // Amazon S3 configuration details for the sync. This parameter is required + // if the SyncType value is SyncToDestination. S3Destination *ResourceDataSyncS3Destination `type:"structure"` // A name for the configuration. @@ -17635,13 +19820,17 @@ type CreateResourceDataSyncInput struct { // SyncName is a required field SyncName *string `min:"1" type:"string" required:"true"` - // Specify information about the data sources to synchronize. + // Specify information about the data sources to synchronize. This parameter + // is required if the SyncType value is SyncFromSource. SyncSource *ResourceDataSyncSource `type:"structure"` // Specify SyncToDestination to create a resource data sync that synchronizes - // data from multiple AWS Regions to an Amazon S3 bucket. Specify SyncFromSource - // to synchronize data from multiple AWS accounts and Regions, as listed in - // AWS Organizations. + // data to an S3 bucket for Inventory. If you specify SyncToDestination, you + // must provide a value for S3Destination. Specify SyncFromSource to synchronize + // data from a single account and multiple Regions, or multiple AWS accounts + // and Regions, as listed in AWS Organizations for Explorer. If you specify + // SyncFromSource, you must provide a value for SyncSource. The default value + // is SyncToDestination. SyncType *string `min:"1" type:"string"` } @@ -17725,8 +19914,8 @@ func (s CreateResourceDataSyncOutput) GoString() string { // You have exceeded the limit for custom schemas. Delete one or more custom // schemas and try again. type CustomSchemaCountLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -17743,17 +19932,17 @@ func (s CustomSchemaCountLimitExceededException) GoString() string { func newErrorCustomSchemaCountLimitExceededException(v protocol.ResponseMetadata) error { return &CustomSchemaCountLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s CustomSchemaCountLimitExceededException) Code() string { +func (s *CustomSchemaCountLimitExceededException) Code() string { return "CustomSchemaCountLimitExceededException" } // Message returns the exception's message. -func (s CustomSchemaCountLimitExceededException) Message() string { +func (s *CustomSchemaCountLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17761,22 +19950,22 @@ func (s CustomSchemaCountLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s CustomSchemaCountLimitExceededException) OrigErr() error { +func (s *CustomSchemaCountLimitExceededException) OrigErr() error { return nil } -func (s CustomSchemaCountLimitExceededException) Error() string { +func (s *CustomSchemaCountLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s CustomSchemaCountLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *CustomSchemaCountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s CustomSchemaCountLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *CustomSchemaCountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type DeleteActivationInput struct { @@ -17974,7 +20163,7 @@ type DeleteInventoryInput struct { _ struct{} `type:"structure"` // User-provided idempotency token. - ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + ClientToken *string `type:"string" idempotencyToken:"true"` // Use this option to view a summary of the deletion request without deleting // any data or the data type. This option is useful when you only want to understand @@ -17998,7 +20187,7 @@ type DeleteInventoryInput struct { SchemaDeleteOption *string `type:"string" enum:"InventorySchemaDeleteOption"` // The name of the custom inventory type for which you want to delete either - // all previously collected data, or the inventory type itself. + // all previously collected data or the inventory type itself. // // TypeName is a required field TypeName *string `min:"1" type:"string" required:"true"` @@ -18017,9 +20206,6 @@ func (s DeleteInventoryInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteInventoryInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteInventoryInput"} - if s.ClientToken != nil && len(*s.ClientToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) - } if s.TypeName == nil { invalidParams.Add(request.NewErrParamRequired("TypeName")) } @@ -18067,7 +20253,7 @@ type DeleteInventoryOutput struct { DeletionId *string `type:"string"` // A summary of the delete operation. For more information about this summary, - // see Understanding the Delete Inventory Summary (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-delete.html#sysman-inventory-delete-summary) + // see Deleting custom inventory (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete-summary) // in the AWS Systems Manager User Guide. DeletionSummary *InventoryDeletionSummary `type:"structure"` @@ -19692,7 +21878,7 @@ type DescribeDocumentPermissionOutput struct { // either an AWS account or All. AccountIds []*string `type:"list"` - // A list of of AWS accounts where the current document is shared and the version + // A list of AWS accounts where the current document is shared and the version // shared with each account. AccountSharingInfoList []*AccountSharingInfo `type:"list"` } @@ -20009,17 +22195,16 @@ type DescribeInstanceInformationInput struct { _ struct{} `type:"structure"` // One or more filters. Use a filter to return a more specific list of instances. - // You can filter on Amazon EC2 tag. Specify tags by using a key-value mapping. + // You can filter based on tags applied to EC2 instances. Use this Filters data + // type instead of InstanceInformationFilterList, which is deprecated. Filters []*InstanceInformationStringFilter `type:"list"` // This is a legacy method. We recommend that you don't use this method. Instead, - // use the InstanceInformationFilter action. The InstanceInformationFilter action - // enables you to return instance information by using tags that are specified - // as a key-value mapping. + // use the Filters data type. Filters enables you to return instance information + // by filtering based on tags applied to managed instances. // - // If you do use this method, then you can't use the InstanceInformationFilter - // action. Using this method and the InstanceInformationFilter action causes - // an exception error. + // Attempting to use InstanceInformationFilterList and Filters leads to an exception + // error. InstanceInformationFilterList []*InstanceInformationFilter `type:"list"` // The maximum number of items to return for this call. The call also returns @@ -21559,7 +23744,7 @@ type DescribeOpsItemsInput struct { // A token to start the list. Use this token to get the next set of results. NextToken *string `type:"string"` - // One or more filters to limit the reponse. + // One or more filters to limit the response. // // * Key: CreatedTime Operations: GreaterThan, LessThan // @@ -21765,8 +23950,7 @@ func (s *DescribeParametersInput) SetParameterFilters(v []*ParameterStringFilter type DescribeParametersOutput struct { _ struct{} `type:"structure"` - // The token to use when requesting the next set of items. If there are no additional - // items to return, the string is empty. + // The token to use when requesting the next set of items. NextToken *string `type:"string"` // Parameters returned by the request. @@ -22047,6 +24231,15 @@ type DescribePatchGroupsInput struct { _ struct{} `type:"structure"` // One or more filters. Use a filter to return a more specific list of results. + // + // For DescribePatchGroups,valid filter keys include the following: + // + // * NAME_PREFIX: The name of the patch group. Wildcards (*) are accepted. + // + // * OPERATING_SYSTEM: The supported operating system type to return results + // for. For valid operating system values, see GetDefaultPatchBaselineRequest$OperatingSystem + // in CreatePatchBaseline. Examples: --filters Key=NAME_PREFIX,Values=MyPatchGroup* + // --filters Key=OPERATING_SYSTEM,Values=AMAZON_LINUX_2 Filters []*PatchOrchestratorFilter `type:"list"` // The maximum number of patch groups to return (per page). @@ -22383,8 +24576,8 @@ func (s *DescribeSessionsOutput) SetSessions(v []*Session) *DescribeSessionsOutp // The specified document already exists. type DocumentAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -22401,17 +24594,17 @@ func (s DocumentAlreadyExists) GoString() string { func newErrorDocumentAlreadyExists(v protocol.ResponseMetadata) error { return &DocumentAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DocumentAlreadyExists) Code() string { +func (s *DocumentAlreadyExists) Code() string { return "DocumentAlreadyExists" } // Message returns the exception's message. -func (s DocumentAlreadyExists) Message() string { +func (s *DocumentAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22419,22 +24612,22 @@ func (s DocumentAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DocumentAlreadyExists) OrigErr() error { +func (s *DocumentAlreadyExists) OrigErr() error { return nil } -func (s DocumentAlreadyExists) Error() string { +func (s *DocumentAlreadyExists) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DocumentAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DocumentAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DocumentAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *DocumentAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // A default version of a document. @@ -22484,7 +24677,7 @@ type DocumentDescription struct { _ struct{} `type:"structure"` // Details about the document attachments, including names, locations, sizes, - // etc. + // and so on. AttachmentsInformation []*AttachmentInformation `type:"list"` // The date when the document was created. @@ -22554,7 +24747,7 @@ type DocumentDescription struct { // The target type which defines the kinds of resources the document can run // on. For example, /AWS::EC2::Instance. For a list of valid resource types, - // see AWS Resource Types Reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // see AWS resource and property types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide. TargetType *string `type:"string"` @@ -22704,7 +24897,7 @@ func (s *DocumentDescription) SetVersionName(v string) *DocumentDescription { return s } -// Describes a filter. +// This data type is deprecated. Instead, use DocumentKeyValuesFilter. type DocumentFilter struct { _ struct{} `type:"structure"` @@ -22794,7 +24987,7 @@ type DocumentIdentifier struct { // The target type which defines the kinds of resources the document can run // on. For example, /AWS::EC2::Instance. For a list of valid resource types, - // see AWS Resource Types Reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) + // see AWS resource and property types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) // in the AWS CloudFormation User Guide. TargetType *string `type:"string"` @@ -22884,24 +25077,68 @@ func (s *DocumentIdentifier) SetVersionName(v string) *DocumentIdentifier { // // For keys, you can specify one or more tags that have been applied to a document. // -// Other valid values include Owner, Name, PlatformTypes, and DocumentType. +// You can also use AWS-provided keys, some of which have specific allowed values. +// These keys and their associated values are as follows: +// +// DocumentType +// +// * ApplicationConfiguration +// +// * ApplicationConfigurationSchema +// +// * Automation +// +// * ChangeCalendar +// +// * Command +// +// * DeploymentStrategy +// +// * Package +// +// * Policy +// +// * Session +// +// Owner // // Note that only one Owner can be specified in a request. For example: Key=Owner,Values=Self. // -// If you use Name as a key, you can use a name prefix to return a list of documents. -// For example, in the AWS CLI, to return a list of all documents that begin -// with Te, run the following command: +// * Amazon +// +// * Private +// +// * Public +// +// * Self +// +// * ThirdParty +// +// PlatformTypes +// +// * Linux +// +// * Windows +// +// Name is another AWS-provided key. If you use Name as a key, you can use a +// name prefix to return a list of documents. For example, in the AWS CLI, to +// return a list of all documents that begin with Te, run the following command: // // aws ssm list-documents --filters Key=Name,Values=Te // +// You can also use the TargetType AWS-provided key. For a list of valid resource +// type values that can be used with this key, see AWS resource and property +// types reference (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) +// in the AWS CloudFormation User Guide. +// // If you specify more than two keys, only documents that are identified by // all the tags are returned in the results. If you specify more than two values // for a key, documents that are identified by any of the values are returned // in the results. // -// To specify a custom key and value pair, use the format Key=tag:[tagName],Values=[valueName]. +// To specify a custom key and value pair, use the format Key=tag:tagName,Values=valueName. // -// For example, if you created a Key called region and are using the AWS CLI +// For example, if you created a key called region and are using the AWS CLI // to call the list-documents command: // // aws ssm list-documents --filters Key=tag:region,Values=east,west Key=Owner,Values=Self @@ -22952,8 +25189,8 @@ func (s *DocumentKeyValuesFilter) SetValues(v []*string) *DocumentKeyValuesFilte // You can have at most 500 active Systems Manager documents. type DocumentLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -22970,17 +25207,17 @@ func (s DocumentLimitExceeded) GoString() string { func newErrorDocumentLimitExceeded(v protocol.ResponseMetadata) error { return &DocumentLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DocumentLimitExceeded) Code() string { +func (s *DocumentLimitExceeded) Code() string { return "DocumentLimitExceeded" } // Message returns the exception's message. -func (s DocumentLimitExceeded) Message() string { +func (s *DocumentLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -22988,22 +25225,22 @@ func (s DocumentLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DocumentLimitExceeded) OrigErr() error { +func (s *DocumentLimitExceeded) OrigErr() error { return nil } -func (s DocumentLimitExceeded) Error() string { +func (s *DocumentLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DocumentLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DocumentLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DocumentLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *DocumentLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Parameters specified in a System Manager document that run on the server @@ -23064,8 +25301,8 @@ func (s *DocumentParameter) SetType(v string) *DocumentParameter { // a document with a maximum of 20 accounts. You can publicly share up to five // documents. If you need to increase this limit, contact AWS Support. type DocumentPermissionLimit struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -23082,17 +25319,17 @@ func (s DocumentPermissionLimit) GoString() string { func newErrorDocumentPermissionLimit(v protocol.ResponseMetadata) error { return &DocumentPermissionLimit{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DocumentPermissionLimit) Code() string { +func (s *DocumentPermissionLimit) Code() string { return "DocumentPermissionLimit" } // Message returns the exception's message. -func (s DocumentPermissionLimit) Message() string { +func (s *DocumentPermissionLimit) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23100,22 +25337,22 @@ func (s DocumentPermissionLimit) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DocumentPermissionLimit) OrigErr() error { +func (s *DocumentPermissionLimit) OrigErr() error { return nil } -func (s DocumentPermissionLimit) Error() string { +func (s *DocumentPermissionLimit) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DocumentPermissionLimit) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DocumentPermissionLimit) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DocumentPermissionLimit) RequestID() string { - return s.respMetadata.RequestID +func (s *DocumentPermissionLimit) RequestID() string { + return s.RespMetadata.RequestID } // An SSM document required by the current document. @@ -23263,8 +25500,8 @@ func (s *DocumentVersionInfo) SetVersionName(v string) *DocumentVersionInfo { // The document has too many versions. Delete one or more document versions // and try again. type DocumentVersionLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -23281,17 +25518,17 @@ func (s DocumentVersionLimitExceeded) GoString() string { func newErrorDocumentVersionLimitExceeded(v protocol.ResponseMetadata) error { return &DocumentVersionLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DocumentVersionLimitExceeded) Code() string { +func (s *DocumentVersionLimitExceeded) Code() string { return "DocumentVersionLimitExceeded" } // Message returns the exception's message. -func (s DocumentVersionLimitExceeded) Message() string { +func (s *DocumentVersionLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23299,33 +25536,33 @@ func (s DocumentVersionLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DocumentVersionLimitExceeded) OrigErr() error { +func (s *DocumentVersionLimitExceeded) OrigErr() error { return nil } -func (s DocumentVersionLimitExceeded) Error() string { +func (s *DocumentVersionLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DocumentVersionLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DocumentVersionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DocumentVersionLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *DocumentVersionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Error returned when the ID specified for a resource, such as a maintenance // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. type DoesNotExistException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -23342,17 +25579,17 @@ func (s DoesNotExistException) GoString() string { func newErrorDoesNotExistException(v protocol.ResponseMetadata) error { return &DoesNotExistException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DoesNotExistException) Code() string { +func (s *DoesNotExistException) Code() string { return "DoesNotExistException" } // Message returns the exception's message. -func (s DoesNotExistException) Message() string { +func (s *DoesNotExistException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23360,29 +25597,29 @@ func (s DoesNotExistException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DoesNotExistException) OrigErr() error { +func (s *DoesNotExistException) OrigErr() error { return nil } -func (s DoesNotExistException) Error() string { +func (s *DoesNotExistException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DoesNotExistException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DoesNotExistException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DoesNotExistException) RequestID() string { - return s.respMetadata.RequestID +func (s *DoesNotExistException) RequestID() string { + return s.RespMetadata.RequestID } // The content of the association document matches another document. Change // the content of the document and try again. type DuplicateDocumentContent struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -23399,17 +25636,17 @@ func (s DuplicateDocumentContent) GoString() string { func newErrorDuplicateDocumentContent(v protocol.ResponseMetadata) error { return &DuplicateDocumentContent{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateDocumentContent) Code() string { +func (s *DuplicateDocumentContent) Code() string { return "DuplicateDocumentContent" } // Message returns the exception's message. -func (s DuplicateDocumentContent) Message() string { +func (s *DuplicateDocumentContent) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23417,29 +25654,29 @@ func (s DuplicateDocumentContent) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateDocumentContent) OrigErr() error { +func (s *DuplicateDocumentContent) OrigErr() error { return nil } -func (s DuplicateDocumentContent) Error() string { +func (s *DuplicateDocumentContent) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateDocumentContent) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateDocumentContent) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateDocumentContent) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateDocumentContent) RequestID() string { + return s.RespMetadata.RequestID } // The version name has already been used in this document. Specify a different // version name, and then try again. type DuplicateDocumentVersionName struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -23456,17 +25693,17 @@ func (s DuplicateDocumentVersionName) GoString() string { func newErrorDuplicateDocumentVersionName(v protocol.ResponseMetadata) error { return &DuplicateDocumentVersionName{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateDocumentVersionName) Code() string { +func (s *DuplicateDocumentVersionName) Code() string { return "DuplicateDocumentVersionName" } // Message returns the exception's message. -func (s DuplicateDocumentVersionName) Message() string { +func (s *DuplicateDocumentVersionName) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23474,28 +25711,28 @@ func (s DuplicateDocumentVersionName) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateDocumentVersionName) OrigErr() error { +func (s *DuplicateDocumentVersionName) OrigErr() error { return nil } -func (s DuplicateDocumentVersionName) Error() string { +func (s *DuplicateDocumentVersionName) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateDocumentVersionName) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateDocumentVersionName) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateDocumentVersionName) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateDocumentVersionName) RequestID() string { + return s.RespMetadata.RequestID } // You cannot specify an instance ID in more than one association. type DuplicateInstanceId struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -23512,17 +25749,17 @@ func (s DuplicateInstanceId) GoString() string { func newErrorDuplicateInstanceId(v protocol.ResponseMetadata) error { return &DuplicateInstanceId{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DuplicateInstanceId) Code() string { +func (s *DuplicateInstanceId) Code() string { return "DuplicateInstanceId" } // Message returns the exception's message. -func (s DuplicateInstanceId) Message() string { +func (s *DuplicateInstanceId) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23530,22 +25767,22 @@ func (s DuplicateInstanceId) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DuplicateInstanceId) OrigErr() error { +func (s *DuplicateInstanceId) OrigErr() error { return nil } -func (s DuplicateInstanceId) Error() string { +func (s *DuplicateInstanceId) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DuplicateInstanceId) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DuplicateInstanceId) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DuplicateInstanceId) RequestID() string { - return s.respMetadata.RequestID +func (s *DuplicateInstanceId) RequestID() string { + return s.RespMetadata.RequestID } // The EffectivePatch structure defines metadata about a patch along with the @@ -23678,8 +25915,8 @@ func (s *FailureDetails) SetFailureType(v string) *FailureDetails { // You attempted to register a LAMBDA or STEP_FUNCTIONS task in a region where // the corresponding service is not available. type FeatureNotAvailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -23696,17 +25933,17 @@ func (s FeatureNotAvailableException) GoString() string { func newErrorFeatureNotAvailableException(v protocol.ResponseMetadata) error { return &FeatureNotAvailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s FeatureNotAvailableException) Code() string { +func (s *FeatureNotAvailableException) Code() string { return "FeatureNotAvailableException" } // Message returns the exception's message. -func (s FeatureNotAvailableException) Message() string { +func (s *FeatureNotAvailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -23714,22 +25951,22 @@ func (s FeatureNotAvailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s FeatureNotAvailableException) OrigErr() error { +func (s *FeatureNotAvailableException) OrigErr() error { return nil } -func (s FeatureNotAvailableException) Error() string { +func (s *FeatureNotAvailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s FeatureNotAvailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *FeatureNotAvailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s FeatureNotAvailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *FeatureNotAvailableException) RequestID() string { + return s.RespMetadata.RequestID } type GetAutomationExecutionInput struct { @@ -23905,7 +26142,7 @@ type GetCommandInvocationInput struct { CommandId *string `min:"36" type:"string" required:"true"` // (Required) The ID of the managed instance targeted by the command. A managed - // instance can be an Amazon EC2 instance or an instance in your hybrid environment + // instance can be an EC2 instance or an instance in your hybrid environment // that is configured for Systems Manager. // // InstanceId is a required field @@ -23914,6 +26151,8 @@ type GetCommandInvocationInput struct { // (Optional) The name of the plugin for which you want detailed results. If // the document contains only one plugin, the name can be omitted and the details // will be returned. + // + // Plugin names are also referred to as step names in Systems Manager documents. PluginName *string `min:"4" type:"string"` } @@ -24007,8 +26246,8 @@ type GetCommandInvocationOutput struct { ExecutionStartDateTime *string `type:"string"` // The ID of the managed instance targeted by the command. A managed instance - // can be an Amazon EC2 instance or an instance in your hybrid environment that - // is configured for Systems Manager. + // can be an EC2 instance or an instance in your hybrid environment that is + // configured for Systems Manager. InstanceId *string `type:"string"` // The name of the plugin for which you want detailed results. For example, @@ -24034,7 +26273,7 @@ type GetCommandInvocationOutput struct { StandardOutputContent *string `type:"string"` // The URL for the complete text written by the plugin to stdout in Amazon S3. - // If an Amazon S3 bucket was not specified, then this string is empty. + // If an S3 bucket was not specified, then this string is empty. StandardOutputUrl *string `type:"string"` // The status of this invocation plugin. This status can be different than StatusDetails. @@ -24044,7 +26283,7 @@ type GetCommandInvocationOutput struct { // includes more information than Status because it includes states resulting // from error and concurrency control parameters. StatusDetails can show different // results than Status. For more information about these statuses, see Understanding - // Command Statuses (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) + // command statuses (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitor-commands.html) // in the AWS Systems Manager User Guide. StatusDetails can be one of the following // values: // @@ -24055,10 +26294,10 @@ type GetCommandInvocationOutput struct { // // * Delayed: The system attempted to send the command to the target, but // the target was not available. The instance might not be available because - // of network issues, the instance was stopped, etc. The system will try - // to deliver the command again. + // of network issues, because the instance was stopped, or for similar reasons. + // The system will try to send the command again. // - // * Success: The command or plugin was run successfully. This is a terminal + // * Success: The command or plugin ran successfully. This is a terminal // state. // // * Delivery Timed Out: The command was not delivered to the instance before @@ -24456,7 +26695,7 @@ type GetDocumentInput struct { // An optional field specifying the version of the artifact associated with // the document. For example, "Release 12, Update 6". This value is unique across - // all versions of a document, and cannot be changed. + // all versions of a document and can't be changed. VersionName *string `type:"string"` } @@ -24511,7 +26750,7 @@ type GetDocumentOutput struct { _ struct{} `type:"structure"` // A description of the document attachments, including names, locations, sizes, - // etc. + // and so on. AttachmentsContent []*AttachmentContent `type:"list"` // The contents of the Systems Manager document. @@ -25474,6 +27713,10 @@ type GetMaintenanceWindowOutput struct { // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` + // The number of days to wait to run a maintenance window after the scheduled + // CRON expression date and time. + ScheduleOffset *int64 `min:"1" type:"integer"` + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database @@ -25565,6 +27808,12 @@ func (s *GetMaintenanceWindowOutput) SetSchedule(v string) *GetMaintenanceWindow return s } +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *GetMaintenanceWindowOutput) SetScheduleOffset(v int64) *GetMaintenanceWindowOutput { + s.ScheduleOffset = &v + return s +} + // SetScheduleTimezone sets the ScheduleTimezone field's value. func (s *GetMaintenanceWindowOutput) SetScheduleTimezone(v string) *GetMaintenanceWindowOutput { s.ScheduleTimezone = &v @@ -26024,7 +28273,7 @@ type GetParameterHistoryInput struct { // results. MaxResults *int64 `min:"1" type:"integer"` - // The name of a parameter you want to query. + // The name of the parameter for which you want to review history. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -26210,6 +28459,12 @@ type GetParametersByPathInput struct { NextToken *string `type:"string"` // Filters to limit the request results. + // + // For GetParametersByPath, the following filter Key names are supported: Type, + // KeyId, Label, and DataType. + // + // The following Key values are not supported for GetParametersByPath: tag, + // Name, Path, and Tier. ParameterFilters []*ParameterStringFilter `type:"list"` // The hierarchy for the parameter. Hierarchies start with a forward slash (/) @@ -26717,7 +28972,8 @@ func (s *GetPatchBaselineOutput) SetSources(v []*PatchSource) *GetPatchBaselineO type GetServiceSettingInput struct { _ struct{} `type:"structure"` - // The ID of the service setting to get. + // The ID of the service setting to get. The setting ID can be /ssm/parameter-store/default-parameter-tier, + // /ssm/parameter-store/high-throughput-enabled, or /ssm/managed-instance/activation-tier. // // SettingId is a required field SettingId *string `min:"1" type:"string" required:"true"` @@ -26780,14 +29036,14 @@ func (s *GetServiceSettingOutput) SetServiceSetting(v *ServiceSetting) *GetServi } // A hierarchy can have a maximum of 15 levels. For more information, see Requirements -// and Constraints for Parameter Names (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) +// and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) // in the AWS Systems Manager User Guide. type HierarchyLevelLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A hierarchy can have a maximum of 15 levels. For more information, see Requirements - // and Constraints for Parameter Names (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) + // and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) // in the AWS Systems Manager User Guide. Message_ *string `locationName:"message" type:"string"` } @@ -26804,17 +29060,17 @@ func (s HierarchyLevelLimitExceededException) GoString() string { func newErrorHierarchyLevelLimitExceededException(v protocol.ResponseMetadata) error { return &HierarchyLevelLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s HierarchyLevelLimitExceededException) Code() string { +func (s *HierarchyLevelLimitExceededException) Code() string { return "HierarchyLevelLimitExceededException" } // Message returns the exception's message. -func (s HierarchyLevelLimitExceededException) Message() string { +func (s *HierarchyLevelLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26822,30 +29078,30 @@ func (s HierarchyLevelLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s HierarchyLevelLimitExceededException) OrigErr() error { +func (s *HierarchyLevelLimitExceededException) OrigErr() error { return nil } -func (s HierarchyLevelLimitExceededException) Error() string { +func (s *HierarchyLevelLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s HierarchyLevelLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *HierarchyLevelLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s HierarchyLevelLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *HierarchyLevelLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Parameter Store does not support changing a parameter type in a hierarchy. // For example, you can't change a parameter from a String type to a SecureString // type. You must create a new, unique parameter. type HierarchyTypeMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // Parameter Store does not support changing a parameter type in a hierarchy. // For example, you can't change a parameter from a String type to a SecureString @@ -26865,17 +29121,17 @@ func (s HierarchyTypeMismatchException) GoString() string { func newErrorHierarchyTypeMismatchException(v protocol.ResponseMetadata) error { return &HierarchyTypeMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s HierarchyTypeMismatchException) Code() string { +func (s *HierarchyTypeMismatchException) Code() string { return "HierarchyTypeMismatchException" } // Message returns the exception's message. -func (s HierarchyTypeMismatchException) Message() string { +func (s *HierarchyTypeMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26883,29 +29139,29 @@ func (s HierarchyTypeMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s HierarchyTypeMismatchException) OrigErr() error { +func (s *HierarchyTypeMismatchException) OrigErr() error { return nil } -func (s HierarchyTypeMismatchException) Error() string { +func (s *HierarchyTypeMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s HierarchyTypeMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *HierarchyTypeMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s HierarchyTypeMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *HierarchyTypeMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // Error returned when an idempotent operation is retried and the parameters // don't match the original call to the API with the same idempotency token. type IdempotentParameterMismatch struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -26922,17 +29178,17 @@ func (s IdempotentParameterMismatch) GoString() string { func newErrorIdempotentParameterMismatch(v protocol.ResponseMetadata) error { return &IdempotentParameterMismatch{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IdempotentParameterMismatch) Code() string { +func (s *IdempotentParameterMismatch) Code() string { return "IdempotentParameterMismatch" } // Message returns the exception's message. -func (s IdempotentParameterMismatch) Message() string { +func (s *IdempotentParameterMismatch) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26940,30 +29196,30 @@ func (s IdempotentParameterMismatch) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IdempotentParameterMismatch) OrigErr() error { +func (s *IdempotentParameterMismatch) OrigErr() error { return nil } -func (s IdempotentParameterMismatch) Error() string { +func (s *IdempotentParameterMismatch) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IdempotentParameterMismatch) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IdempotentParameterMismatch) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IdempotentParameterMismatch) RequestID() string { - return s.respMetadata.RequestID +func (s *IdempotentParameterMismatch) RequestID() string { + return s.RespMetadata.RequestID } // There is a conflict in the policies specified for this parameter. You can't, // for example, specify two Expiration policies for a parameter. Review your // policies, and try again. type IncompatiblePolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -26980,17 +29236,17 @@ func (s IncompatiblePolicyException) GoString() string { func newErrorIncompatiblePolicyException(v protocol.ResponseMetadata) error { return &IncompatiblePolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s IncompatiblePolicyException) Code() string { +func (s *IncompatiblePolicyException) Code() string { return "IncompatiblePolicyException" } // Message returns the exception's message. -func (s IncompatiblePolicyException) Message() string { +func (s *IncompatiblePolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -26998,22 +29254,22 @@ func (s IncompatiblePolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s IncompatiblePolicyException) OrigErr() error { +func (s *IncompatiblePolicyException) OrigErr() error { return nil } -func (s IncompatiblePolicyException) Error() string { +func (s *IncompatiblePolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s IncompatiblePolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *IncompatiblePolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s IncompatiblePolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *IncompatiblePolicyException) RequestID() string { + return s.RespMetadata.RequestID } // Status information about the aggregated associations. @@ -27100,11 +29356,11 @@ func (s *InstanceAssociation) SetInstanceId(v string) *InstanceAssociation { return s } -// An Amazon S3 bucket where you want to store the results of this request. +// An S3 bucket where you want to store the results of this request. type InstanceAssociationOutputLocation struct { _ struct{} `type:"structure"` - // An Amazon S3 bucket where you want to store the results of this request. + // An S3 bucket where you want to store the results of this request. S3Location *S3OutputLocation `type:"structure"` } @@ -27139,11 +29395,11 @@ func (s *InstanceAssociationOutputLocation) SetS3Location(v *S3OutputLocation) * return s } -// The URL of Amazon S3 bucket where you want to store the results of this request. +// The URL of S3 bucket where you want to store the results of this request. type InstanceAssociationOutputUrl struct { _ struct{} `type:"structure"` - // The URL of Amazon S3 bucket where you want to store the results of this request. + // The URL of S3 bucket where you want to store the results of this request. S3OutputUrl *S3OutputUrl `type:"structure"` } @@ -27197,8 +29453,7 @@ type InstanceAssociationStatusInfo struct { // The name of the association. Name *string `type:"string"` - // A URL for an Amazon S3 bucket where you want to store the results of this - // request. + // A URL for an S3 bucket where you want to store the results of this request. OutputUrl *InstanceAssociationOutputUrl `type:"structure"` // Status information about the instance association. @@ -27310,8 +29565,12 @@ type InstanceInformation struct { IPAddress *string `min:"1" type:"string"` // The Amazon Identity and Access Management (IAM) role assigned to the on-premises - // Systems Manager managed instances. This call does not return the IAM role - // for Amazon EC2 instances. + // Systems Manager managed instance. This call does not return the IAM role + // for EC2 instances. To retrieve the IAM role for an EC2 instance, use the + // Amazon EC2 DescribeInstances action. For information, see DescribeInstances + // (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // in the Amazon EC2 API Reference or describe-instances (http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html) + // in the AWS CLI Command Reference. IamRole *string `type:"string"` // The instance ID. @@ -27332,10 +29591,22 @@ type InstanceInformation struct { // The last date the association was successfully run. LastSuccessfulAssociationExecutionDate *time.Time `type:"timestamp"` - // The name of the managed instance. + // The name assigned to an on-premises server or virtual machine (VM) when it + // is activated as a Systems Manager managed instance. The name is specified + // as the DefaultInstanceName property using the CreateActivation command. It + // is applied to the managed instance by specifying the Activation Code and + // Activation ID when you install SSM Agent on the instance, as explained in + // Install SSM Agent for a hybrid environment (Linux) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-linux.html) + // and Install SSM Agent for a hybrid environment (Windows) (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-install-managed-win.html). + // To retrieve the Name tag of an EC2 instance, use the Amazon EC2 DescribeInstances + // action. For information, see DescribeInstances (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) + // in the Amazon EC2 API Reference or describe-instances (http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html) + // in the AWS CLI Command Reference. Name *string `type:"string"` // Connection status of SSM Agent. + // + // The status Inactive has been deprecated and is no longer in use. PingStatus *string `type:"string" enum:"PingStatus"` // The name of the operating system platform running on your instance. @@ -27618,12 +29889,12 @@ type InstancePatchState struct { FailedCount *int64 `type:"integer"` // An https URL or an Amazon S3 path-style URL to a list of patches to be installed. - // This patch installation list, which you maintain in an Amazon S3 bucket in - // YAML format and specify in the SSM document AWS-RunPatchBaseline, overrides - // the patches specified by the default patch baseline. + // This patch installation list, which you maintain in an S3 bucket in YAML + // format and specify in the SSM document AWS-RunPatchBaseline, overrides the + // patches specified by the default patch baseline. // // For more information about the InstallOverrideList parameter, see About the - // SSM Document AWS-RunPatchBaseline (http://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-about-aws-runpatchbaseline.html) + // SSM document AWS-RunPatchBaseline (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-about-aws-runpatchbaseline.html) // in the AWS Systems Manager User Guide. InstallOverrideList *string `min:"1" type:"string"` @@ -27638,7 +29909,7 @@ type InstancePatchState struct { // instance was rebooted. InstalledPendingRebootCount *int64 `type:"integer"` - // The number of instances with patches installed that are specified in a RejectedPatches + // The number of patches installed on an instance that are specified in a RejectedPatches // list. Patches with a status of InstalledRejected were typically installed // before they were added to a RejectedPatches list. // @@ -27916,8 +30187,8 @@ func (s *InstancePatchStateFilter) SetValues(v []*string) *InstancePatchStateFil // An error occurred on the server side. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -27934,17 +30205,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -27952,29 +30223,29 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // The activation is not valid. The activation might have been deleted, or the // ActivationId and the ActivationCode do not match. type InvalidActivation struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -27991,17 +30262,17 @@ func (s InvalidActivation) GoString() string { func newErrorInvalidActivation(v protocol.ResponseMetadata) error { return &InvalidActivation{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidActivation) Code() string { +func (s *InvalidActivation) Code() string { return "InvalidActivation" } // Message returns the exception's message. -func (s InvalidActivation) Message() string { +func (s *InvalidActivation) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28009,29 +30280,29 @@ func (s InvalidActivation) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidActivation) OrigErr() error { +func (s *InvalidActivation) OrigErr() error { return nil } -func (s InvalidActivation) Error() string { +func (s *InvalidActivation) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidActivation) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidActivation) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidActivation) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidActivation) RequestID() string { + return s.RespMetadata.RequestID } // The activation ID is not valid. Verify the you entered the correct ActivationId // or ActivationCode and try again. type InvalidActivationId struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28048,17 +30319,17 @@ func (s InvalidActivationId) GoString() string { func newErrorInvalidActivationId(v protocol.ResponseMetadata) error { return &InvalidActivationId{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidActivationId) Code() string { +func (s *InvalidActivationId) Code() string { return "InvalidActivationId" } // Message returns the exception's message. -func (s InvalidActivationId) Message() string { +func (s *InvalidActivationId) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28066,29 +30337,29 @@ func (s InvalidActivationId) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidActivationId) OrigErr() error { +func (s *InvalidActivationId) OrigErr() error { return nil } -func (s InvalidActivationId) Error() string { +func (s *InvalidActivationId) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidActivationId) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidActivationId) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidActivationId) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidActivationId) RequestID() string { + return s.RespMetadata.RequestID } // The specified aggregator is not valid for inventory groups. Verify that the // aggregator uses a valid inventory type such as AWS:Application or AWS:InstanceInformation. type InvalidAggregatorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28105,17 +30376,17 @@ func (s InvalidAggregatorException) GoString() string { func newErrorInvalidAggregatorException(v protocol.ResponseMetadata) error { return &InvalidAggregatorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAggregatorException) Code() string { +func (s *InvalidAggregatorException) Code() string { return "InvalidAggregatorException" } // Message returns the exception's message. -func (s InvalidAggregatorException) Message() string { +func (s *InvalidAggregatorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28123,28 +30394,28 @@ func (s InvalidAggregatorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAggregatorException) OrigErr() error { +func (s *InvalidAggregatorException) OrigErr() error { return nil } -func (s InvalidAggregatorException) Error() string { +func (s *InvalidAggregatorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAggregatorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAggregatorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAggregatorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAggregatorException) RequestID() string { + return s.RespMetadata.RequestID } // The request does not meet the regular expression requirement. type InvalidAllowedPatternException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The request does not meet the regular expression requirement. Message_ *string `locationName:"message" type:"string"` @@ -28162,17 +30433,17 @@ func (s InvalidAllowedPatternException) GoString() string { func newErrorInvalidAllowedPatternException(v protocol.ResponseMetadata) error { return &InvalidAllowedPatternException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAllowedPatternException) Code() string { +func (s *InvalidAllowedPatternException) Code() string { return "InvalidAllowedPatternException" } // Message returns the exception's message. -func (s InvalidAllowedPatternException) Message() string { +func (s *InvalidAllowedPatternException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28180,28 +30451,28 @@ func (s InvalidAllowedPatternException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAllowedPatternException) OrigErr() error { +func (s *InvalidAllowedPatternException) OrigErr() error { return nil } -func (s InvalidAllowedPatternException) Error() string { +func (s *InvalidAllowedPatternException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAllowedPatternException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAllowedPatternException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAllowedPatternException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAllowedPatternException) RequestID() string { + return s.RespMetadata.RequestID } // The association is not valid or does not exist. type InvalidAssociation struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28218,17 +30489,17 @@ func (s InvalidAssociation) GoString() string { func newErrorInvalidAssociation(v protocol.ResponseMetadata) error { return &InvalidAssociation{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAssociation) Code() string { +func (s *InvalidAssociation) Code() string { return "InvalidAssociation" } // Message returns the exception's message. -func (s InvalidAssociation) Message() string { +func (s *InvalidAssociation) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28236,30 +30507,30 @@ func (s InvalidAssociation) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAssociation) OrigErr() error { +func (s *InvalidAssociation) OrigErr() error { return nil } -func (s InvalidAssociation) Error() string { +func (s *InvalidAssociation) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAssociation) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAssociation) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAssociation) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAssociation) RequestID() string { + return s.RespMetadata.RequestID } // The version you specified is not valid. Use ListAssociationVersions to view // all versions of an association according to the association ID. Or, use the // $LATEST parameter to view the latest version of the association. type InvalidAssociationVersion struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28276,17 +30547,17 @@ func (s InvalidAssociationVersion) GoString() string { func newErrorInvalidAssociationVersion(v protocol.ResponseMetadata) error { return &InvalidAssociationVersion{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAssociationVersion) Code() string { +func (s *InvalidAssociationVersion) Code() string { return "InvalidAssociationVersion" } // Message returns the exception's message. -func (s InvalidAssociationVersion) Message() string { +func (s *InvalidAssociationVersion) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28294,30 +30565,30 @@ func (s InvalidAssociationVersion) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAssociationVersion) OrigErr() error { +func (s *InvalidAssociationVersion) OrigErr() error { return nil } -func (s InvalidAssociationVersion) Error() string { +func (s *InvalidAssociationVersion) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAssociationVersion) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAssociationVersion) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAssociationVersion) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAssociationVersion) RequestID() string { + return s.RespMetadata.RequestID } // The supplied parameters for invoking the specified Automation document are // incorrect. For example, they may not match the set of parameters permitted // for the specified Automation document. type InvalidAutomationExecutionParametersException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28334,17 +30605,17 @@ func (s InvalidAutomationExecutionParametersException) GoString() string { func newErrorInvalidAutomationExecutionParametersException(v protocol.ResponseMetadata) error { return &InvalidAutomationExecutionParametersException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAutomationExecutionParametersException) Code() string { +func (s *InvalidAutomationExecutionParametersException) Code() string { return "InvalidAutomationExecutionParametersException" } // Message returns the exception's message. -func (s InvalidAutomationExecutionParametersException) Message() string { +func (s *InvalidAutomationExecutionParametersException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28352,28 +30623,28 @@ func (s InvalidAutomationExecutionParametersException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAutomationExecutionParametersException) OrigErr() error { +func (s *InvalidAutomationExecutionParametersException) OrigErr() error { return nil } -func (s InvalidAutomationExecutionParametersException) Error() string { +func (s *InvalidAutomationExecutionParametersException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAutomationExecutionParametersException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAutomationExecutionParametersException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAutomationExecutionParametersException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAutomationExecutionParametersException) RequestID() string { + return s.RespMetadata.RequestID } // The signal is not valid for the current Automation execution. type InvalidAutomationSignalException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28390,17 +30661,17 @@ func (s InvalidAutomationSignalException) GoString() string { func newErrorInvalidAutomationSignalException(v protocol.ResponseMetadata) error { return &InvalidAutomationSignalException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAutomationSignalException) Code() string { +func (s *InvalidAutomationSignalException) Code() string { return "InvalidAutomationSignalException" } // Message returns the exception's message. -func (s InvalidAutomationSignalException) Message() string { +func (s *InvalidAutomationSignalException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28408,28 +30679,28 @@ func (s InvalidAutomationSignalException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAutomationSignalException) OrigErr() error { +func (s *InvalidAutomationSignalException) OrigErr() error { return nil } -func (s InvalidAutomationSignalException) Error() string { +func (s *InvalidAutomationSignalException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAutomationSignalException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAutomationSignalException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAutomationSignalException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAutomationSignalException) RequestID() string { + return s.RespMetadata.RequestID } // The specified update status operation is not valid. type InvalidAutomationStatusUpdateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28446,17 +30717,17 @@ func (s InvalidAutomationStatusUpdateException) GoString() string { func newErrorInvalidAutomationStatusUpdateException(v protocol.ResponseMetadata) error { return &InvalidAutomationStatusUpdateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAutomationStatusUpdateException) Code() string { +func (s *InvalidAutomationStatusUpdateException) Code() string { return "InvalidAutomationStatusUpdateException" } // Message returns the exception's message. -func (s InvalidAutomationStatusUpdateException) Message() string { +func (s *InvalidAutomationStatusUpdateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28464,27 +30735,27 @@ func (s InvalidAutomationStatusUpdateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAutomationStatusUpdateException) OrigErr() error { +func (s *InvalidAutomationStatusUpdateException) OrigErr() error { return nil } -func (s InvalidAutomationStatusUpdateException) Error() string { +func (s *InvalidAutomationStatusUpdateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAutomationStatusUpdateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAutomationStatusUpdateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAutomationStatusUpdateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAutomationStatusUpdateException) RequestID() string { + return s.RespMetadata.RequestID } type InvalidCommandId struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -28501,17 +30772,17 @@ func (s InvalidCommandId) GoString() string { func newErrorInvalidCommandId(v protocol.ResponseMetadata) error { return &InvalidCommandId{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidCommandId) Code() string { +func (s *InvalidCommandId) Code() string { return "InvalidCommandId" } // Message returns the exception's message. -func (s InvalidCommandId) Message() string { +func (s *InvalidCommandId) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28519,29 +30790,29 @@ func (s InvalidCommandId) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidCommandId) OrigErr() error { +func (s *InvalidCommandId) OrigErr() error { return nil } -func (s InvalidCommandId) Error() string { +func (s *InvalidCommandId) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidCommandId) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidCommandId) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidCommandId) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidCommandId) RequestID() string { + return s.RespMetadata.RequestID } // One or more of the parameters specified for the delete operation is not valid. // Verify all parameters and try again. type InvalidDeleteInventoryParametersException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28558,17 +30829,17 @@ func (s InvalidDeleteInventoryParametersException) GoString() string { func newErrorInvalidDeleteInventoryParametersException(v protocol.ResponseMetadata) error { return &InvalidDeleteInventoryParametersException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeleteInventoryParametersException) Code() string { +func (s *InvalidDeleteInventoryParametersException) Code() string { return "InvalidDeleteInventoryParametersException" } // Message returns the exception's message. -func (s InvalidDeleteInventoryParametersException) Message() string { +func (s *InvalidDeleteInventoryParametersException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28576,29 +30847,29 @@ func (s InvalidDeleteInventoryParametersException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeleteInventoryParametersException) OrigErr() error { +func (s *InvalidDeleteInventoryParametersException) OrigErr() error { return nil } -func (s InvalidDeleteInventoryParametersException) Error() string { +func (s *InvalidDeleteInventoryParametersException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeleteInventoryParametersException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeleteInventoryParametersException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeleteInventoryParametersException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeleteInventoryParametersException) RequestID() string { + return s.RespMetadata.RequestID } // The ID specified for the delete operation does not exist or is not valid. // Verify the ID and try again. type InvalidDeletionIdException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28615,17 +30886,17 @@ func (s InvalidDeletionIdException) GoString() string { func newErrorInvalidDeletionIdException(v protocol.ResponseMetadata) error { return &InvalidDeletionIdException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDeletionIdException) Code() string { +func (s *InvalidDeletionIdException) Code() string { return "InvalidDeletionIdException" } // Message returns the exception's message. -func (s InvalidDeletionIdException) Message() string { +func (s *InvalidDeletionIdException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28633,28 +30904,28 @@ func (s InvalidDeletionIdException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDeletionIdException) OrigErr() error { +func (s *InvalidDeletionIdException) OrigErr() error { return nil } -func (s InvalidDeletionIdException) Error() string { +func (s *InvalidDeletionIdException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDeletionIdException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDeletionIdException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDeletionIdException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDeletionIdException) RequestID() string { + return s.RespMetadata.RequestID } // The specified document does not exist. type InvalidDocument struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The document does not exist or the document is not available to the user. // This exception can be issued by CreateAssociation, CreateAssociationBatch, @@ -28675,17 +30946,17 @@ func (s InvalidDocument) GoString() string { func newErrorInvalidDocument(v protocol.ResponseMetadata) error { return &InvalidDocument{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDocument) Code() string { +func (s *InvalidDocument) Code() string { return "InvalidDocument" } // Message returns the exception's message. -func (s InvalidDocument) Message() string { +func (s *InvalidDocument) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28693,28 +30964,28 @@ func (s InvalidDocument) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDocument) OrigErr() error { +func (s *InvalidDocument) OrigErr() error { return nil } -func (s InvalidDocument) Error() string { +func (s *InvalidDocument) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDocument) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDocument) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDocument) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDocument) RequestID() string { + return s.RespMetadata.RequestID } // The content for the document is not valid. type InvalidDocumentContent struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description of the validation error. Message_ *string `locationName:"Message" type:"string"` @@ -28732,17 +31003,17 @@ func (s InvalidDocumentContent) GoString() string { func newErrorInvalidDocumentContent(v protocol.ResponseMetadata) error { return &InvalidDocumentContent{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDocumentContent) Code() string { +func (s *InvalidDocumentContent) Code() string { return "InvalidDocumentContent" } // Message returns the exception's message. -func (s InvalidDocumentContent) Message() string { +func (s *InvalidDocumentContent) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28750,29 +31021,29 @@ func (s InvalidDocumentContent) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDocumentContent) OrigErr() error { +func (s *InvalidDocumentContent) OrigErr() error { return nil } -func (s InvalidDocumentContent) Error() string { +func (s *InvalidDocumentContent) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDocumentContent) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDocumentContent) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDocumentContent) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDocumentContent) RequestID() string { + return s.RespMetadata.RequestID } // You attempted to delete a document while it is still shared. You must stop // sharing the document before you can delete it. type InvalidDocumentOperation struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28789,17 +31060,17 @@ func (s InvalidDocumentOperation) GoString() string { func newErrorInvalidDocumentOperation(v protocol.ResponseMetadata) error { return &InvalidDocumentOperation{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDocumentOperation) Code() string { +func (s *InvalidDocumentOperation) Code() string { return "InvalidDocumentOperation" } // Message returns the exception's message. -func (s InvalidDocumentOperation) Message() string { +func (s *InvalidDocumentOperation) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28807,28 +31078,28 @@ func (s InvalidDocumentOperation) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDocumentOperation) OrigErr() error { +func (s *InvalidDocumentOperation) OrigErr() error { return nil } -func (s InvalidDocumentOperation) Error() string { +func (s *InvalidDocumentOperation) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDocumentOperation) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDocumentOperation) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDocumentOperation) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDocumentOperation) RequestID() string { + return s.RespMetadata.RequestID } // The version of the document schema is not supported. type InvalidDocumentSchemaVersion struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28845,17 +31116,17 @@ func (s InvalidDocumentSchemaVersion) GoString() string { func newErrorInvalidDocumentSchemaVersion(v protocol.ResponseMetadata) error { return &InvalidDocumentSchemaVersion{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDocumentSchemaVersion) Code() string { +func (s *InvalidDocumentSchemaVersion) Code() string { return "InvalidDocumentSchemaVersion" } // Message returns the exception's message. -func (s InvalidDocumentSchemaVersion) Message() string { +func (s *InvalidDocumentSchemaVersion) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28863,29 +31134,29 @@ func (s InvalidDocumentSchemaVersion) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDocumentSchemaVersion) OrigErr() error { +func (s *InvalidDocumentSchemaVersion) OrigErr() error { return nil } -func (s InvalidDocumentSchemaVersion) Error() string { +func (s *InvalidDocumentSchemaVersion) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDocumentSchemaVersion) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDocumentSchemaVersion) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDocumentSchemaVersion) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDocumentSchemaVersion) RequestID() string { + return s.RespMetadata.RequestID } // The document type is not valid. Valid document types are described in the // DocumentType property. type InvalidDocumentType struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28902,17 +31173,17 @@ func (s InvalidDocumentType) GoString() string { func newErrorInvalidDocumentType(v protocol.ResponseMetadata) error { return &InvalidDocumentType{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDocumentType) Code() string { +func (s *InvalidDocumentType) Code() string { return "InvalidDocumentType" } // Message returns the exception's message. -func (s InvalidDocumentType) Message() string { +func (s *InvalidDocumentType) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28920,28 +31191,28 @@ func (s InvalidDocumentType) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDocumentType) OrigErr() error { +func (s *InvalidDocumentType) OrigErr() error { return nil } -func (s InvalidDocumentType) Error() string { +func (s *InvalidDocumentType) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDocumentType) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDocumentType) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDocumentType) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDocumentType) RequestID() string { + return s.RespMetadata.RequestID } // The document version is not valid or does not exist. type InvalidDocumentVersion struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -28958,17 +31229,17 @@ func (s InvalidDocumentVersion) GoString() string { func newErrorInvalidDocumentVersion(v protocol.ResponseMetadata) error { return &InvalidDocumentVersion{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidDocumentVersion) Code() string { +func (s *InvalidDocumentVersion) Code() string { return "InvalidDocumentVersion" } // Message returns the exception's message. -func (s InvalidDocumentVersion) Message() string { +func (s *InvalidDocumentVersion) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -28976,29 +31247,29 @@ func (s InvalidDocumentVersion) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidDocumentVersion) OrigErr() error { +func (s *InvalidDocumentVersion) OrigErr() error { return nil } -func (s InvalidDocumentVersion) Error() string { +func (s *InvalidDocumentVersion) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidDocumentVersion) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidDocumentVersion) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidDocumentVersion) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidDocumentVersion) RequestID() string { + return s.RespMetadata.RequestID } // The filter name is not valid. Verify the you entered the correct name and // try again. type InvalidFilter struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29015,17 +31286,17 @@ func (s InvalidFilter) GoString() string { func newErrorInvalidFilter(v protocol.ResponseMetadata) error { return &InvalidFilter{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFilter) Code() string { +func (s *InvalidFilter) Code() string { return "InvalidFilter" } // Message returns the exception's message. -func (s InvalidFilter) Message() string { +func (s *InvalidFilter) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29033,28 +31304,28 @@ func (s InvalidFilter) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFilter) OrigErr() error { +func (s *InvalidFilter) OrigErr() error { return nil } -func (s InvalidFilter) Error() string { +func (s *InvalidFilter) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFilter) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFilter) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFilter) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFilter) RequestID() string { + return s.RespMetadata.RequestID } // The specified key is not valid. type InvalidFilterKey struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29071,17 +31342,17 @@ func (s InvalidFilterKey) GoString() string { func newErrorInvalidFilterKey(v protocol.ResponseMetadata) error { return &InvalidFilterKey{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFilterKey) Code() string { +func (s *InvalidFilterKey) Code() string { return "InvalidFilterKey" } // Message returns the exception's message. -func (s InvalidFilterKey) Message() string { +func (s *InvalidFilterKey) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29089,29 +31360,29 @@ func (s InvalidFilterKey) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFilterKey) OrigErr() error { +func (s *InvalidFilterKey) OrigErr() error { return nil } -func (s InvalidFilterKey) Error() string { +func (s *InvalidFilterKey) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFilterKey) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFilterKey) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFilterKey) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFilterKey) RequestID() string { + return s.RespMetadata.RequestID } // The specified filter option is not valid. Valid options are Equals and BeginsWith. // For Path filter, valid options are Recursive and OneLevel. type InvalidFilterOption struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The specified filter option is not valid. Valid options are Equals and BeginsWith. // For Path filter, valid options are Recursive and OneLevel. @@ -29130,17 +31401,17 @@ func (s InvalidFilterOption) GoString() string { func newErrorInvalidFilterOption(v protocol.ResponseMetadata) error { return &InvalidFilterOption{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFilterOption) Code() string { +func (s *InvalidFilterOption) Code() string { return "InvalidFilterOption" } // Message returns the exception's message. -func (s InvalidFilterOption) Message() string { +func (s *InvalidFilterOption) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29148,28 +31419,28 @@ func (s InvalidFilterOption) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFilterOption) OrigErr() error { +func (s *InvalidFilterOption) OrigErr() error { return nil } -func (s InvalidFilterOption) Error() string { +func (s *InvalidFilterOption) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFilterOption) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFilterOption) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFilterOption) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFilterOption) RequestID() string { + return s.RespMetadata.RequestID } // The filter value is not valid. Verify the value and try again. type InvalidFilterValue struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29186,17 +31457,17 @@ func (s InvalidFilterValue) GoString() string { func newErrorInvalidFilterValue(v protocol.ResponseMetadata) error { return &InvalidFilterValue{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidFilterValue) Code() string { +func (s *InvalidFilterValue) Code() string { return "InvalidFilterValue" } // Message returns the exception's message. -func (s InvalidFilterValue) Message() string { +func (s *InvalidFilterValue) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29204,22 +31475,22 @@ func (s InvalidFilterValue) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidFilterValue) OrigErr() error { +func (s *InvalidFilterValue) OrigErr() error { return nil } -func (s InvalidFilterValue) Error() string { +func (s *InvalidFilterValue) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidFilterValue) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidFilterValue) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidFilterValue) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidFilterValue) RequestID() string { + return s.RespMetadata.RequestID } // The following problems can cause this exception: @@ -29233,8 +31504,8 @@ func (s InvalidFilterValue) RequestID() string { // The instance is not in valid state. Valid states are: Running, Pending, Stopped, // Stopping. Invalid states are: Shutting-down and Terminated. type InvalidInstanceId struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29251,17 +31522,17 @@ func (s InvalidInstanceId) GoString() string { func newErrorInvalidInstanceId(v protocol.ResponseMetadata) error { return &InvalidInstanceId{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInstanceId) Code() string { +func (s *InvalidInstanceId) Code() string { return "InvalidInstanceId" } // Message returns the exception's message. -func (s InvalidInstanceId) Message() string { +func (s *InvalidInstanceId) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29269,28 +31540,28 @@ func (s InvalidInstanceId) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInstanceId) OrigErr() error { +func (s *InvalidInstanceId) OrigErr() error { return nil } -func (s InvalidInstanceId) Error() string { +func (s *InvalidInstanceId) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInstanceId) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInstanceId) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInstanceId) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInstanceId) RequestID() string { + return s.RespMetadata.RequestID } // The specified filter value is not valid. type InvalidInstanceInformationFilterValue struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29307,17 +31578,17 @@ func (s InvalidInstanceInformationFilterValue) GoString() string { func newErrorInvalidInstanceInformationFilterValue(v protocol.ResponseMetadata) error { return &InvalidInstanceInformationFilterValue{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInstanceInformationFilterValue) Code() string { +func (s *InvalidInstanceInformationFilterValue) Code() string { return "InvalidInstanceInformationFilterValue" } // Message returns the exception's message. -func (s InvalidInstanceInformationFilterValue) Message() string { +func (s *InvalidInstanceInformationFilterValue) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29325,28 +31596,28 @@ func (s InvalidInstanceInformationFilterValue) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInstanceInformationFilterValue) OrigErr() error { +func (s *InvalidInstanceInformationFilterValue) OrigErr() error { return nil } -func (s InvalidInstanceInformationFilterValue) Error() string { +func (s *InvalidInstanceInformationFilterValue) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInstanceInformationFilterValue) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInstanceInformationFilterValue) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInstanceInformationFilterValue) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInstanceInformationFilterValue) RequestID() string { + return s.RespMetadata.RequestID } // The specified inventory group is not valid. type InvalidInventoryGroupException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29363,17 +31634,17 @@ func (s InvalidInventoryGroupException) GoString() string { func newErrorInvalidInventoryGroupException(v protocol.ResponseMetadata) error { return &InvalidInventoryGroupException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInventoryGroupException) Code() string { +func (s *InvalidInventoryGroupException) Code() string { return "InvalidInventoryGroupException" } // Message returns the exception's message. -func (s InvalidInventoryGroupException) Message() string { +func (s *InvalidInventoryGroupException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29381,29 +31652,29 @@ func (s InvalidInventoryGroupException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInventoryGroupException) OrigErr() error { +func (s *InvalidInventoryGroupException) OrigErr() error { return nil } -func (s InvalidInventoryGroupException) Error() string { +func (s *InvalidInventoryGroupException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInventoryGroupException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInventoryGroupException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInventoryGroupException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInventoryGroupException) RequestID() string { + return s.RespMetadata.RequestID } // You specified invalid keys or values in the Context attribute for InventoryItem. // Verify the keys and values, and try again. type InvalidInventoryItemContextException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29420,17 +31691,17 @@ func (s InvalidInventoryItemContextException) GoString() string { func newErrorInvalidInventoryItemContextException(v protocol.ResponseMetadata) error { return &InvalidInventoryItemContextException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInventoryItemContextException) Code() string { +func (s *InvalidInventoryItemContextException) Code() string { return "InvalidInventoryItemContextException" } // Message returns the exception's message. -func (s InvalidInventoryItemContextException) Message() string { +func (s *InvalidInventoryItemContextException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29438,28 +31709,28 @@ func (s InvalidInventoryItemContextException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInventoryItemContextException) OrigErr() error { +func (s *InvalidInventoryItemContextException) OrigErr() error { return nil } -func (s InvalidInventoryItemContextException) Error() string { +func (s *InvalidInventoryItemContextException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInventoryItemContextException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInventoryItemContextException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInventoryItemContextException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInventoryItemContextException) RequestID() string { + return s.RespMetadata.RequestID } // The request is not valid. type InvalidInventoryRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29476,17 +31747,17 @@ func (s InvalidInventoryRequestException) GoString() string { func newErrorInvalidInventoryRequestException(v protocol.ResponseMetadata) error { return &InvalidInventoryRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidInventoryRequestException) Code() string { +func (s *InvalidInventoryRequestException) Code() string { return "InvalidInventoryRequestException" } // Message returns the exception's message. -func (s InvalidInventoryRequestException) Message() string { +func (s *InvalidInventoryRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29494,28 +31765,28 @@ func (s InvalidInventoryRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidInventoryRequestException) OrigErr() error { +func (s *InvalidInventoryRequestException) OrigErr() error { return nil } -func (s InvalidInventoryRequestException) Error() string { +func (s *InvalidInventoryRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidInventoryRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidInventoryRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidInventoryRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidInventoryRequestException) RequestID() string { + return s.RespMetadata.RequestID } // One or more content items is not valid. type InvalidItemContentException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -29534,17 +31805,17 @@ func (s InvalidItemContentException) GoString() string { func newErrorInvalidItemContentException(v protocol.ResponseMetadata) error { return &InvalidItemContentException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidItemContentException) Code() string { +func (s *InvalidItemContentException) Code() string { return "InvalidItemContentException" } // Message returns the exception's message. -func (s InvalidItemContentException) Message() string { +func (s *InvalidItemContentException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29552,28 +31823,28 @@ func (s InvalidItemContentException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidItemContentException) OrigErr() error { +func (s *InvalidItemContentException) OrigErr() error { return nil } -func (s InvalidItemContentException) Error() string { +func (s *InvalidItemContentException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidItemContentException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidItemContentException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidItemContentException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidItemContentException) RequestID() string { + return s.RespMetadata.RequestID } // The query key ID is not valid. type InvalidKeyId struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29590,17 +31861,17 @@ func (s InvalidKeyId) GoString() string { func newErrorInvalidKeyId(v protocol.ResponseMetadata) error { return &InvalidKeyId{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidKeyId) Code() string { +func (s *InvalidKeyId) Code() string { return "InvalidKeyId" } // Message returns the exception's message. -func (s InvalidKeyId) Message() string { +func (s *InvalidKeyId) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29608,28 +31879,28 @@ func (s InvalidKeyId) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidKeyId) OrigErr() error { +func (s *InvalidKeyId) OrigErr() error { return nil } -func (s InvalidKeyId) Error() string { +func (s *InvalidKeyId) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidKeyId) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidKeyId) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidKeyId) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidKeyId) RequestID() string { + return s.RespMetadata.RequestID } // The specified token is not valid. type InvalidNextToken struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29646,17 +31917,17 @@ func (s InvalidNextToken) GoString() string { func newErrorInvalidNextToken(v protocol.ResponseMetadata) error { return &InvalidNextToken{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextToken) Code() string { +func (s *InvalidNextToken) Code() string { return "InvalidNextToken" } // Message returns the exception's message. -func (s InvalidNextToken) Message() string { +func (s *InvalidNextToken) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29664,29 +31935,29 @@ func (s InvalidNextToken) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextToken) OrigErr() error { +func (s *InvalidNextToken) OrigErr() error { return nil } -func (s InvalidNextToken) Error() string { +func (s *InvalidNextToken) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextToken) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextToken) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextToken) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextToken) RequestID() string { + return s.RespMetadata.RequestID } // One or more configuration items is not valid. Verify that a valid Amazon // Resource Name (ARN) was provided for an Amazon SNS topic. type InvalidNotificationConfig struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29703,17 +31974,17 @@ func (s InvalidNotificationConfig) GoString() string { func newErrorInvalidNotificationConfig(v protocol.ResponseMetadata) error { return &InvalidNotificationConfig{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNotificationConfig) Code() string { +func (s *InvalidNotificationConfig) Code() string { return "InvalidNotificationConfig" } // Message returns the exception's message. -func (s InvalidNotificationConfig) Message() string { +func (s *InvalidNotificationConfig) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29721,29 +31992,29 @@ func (s InvalidNotificationConfig) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNotificationConfig) OrigErr() error { +func (s *InvalidNotificationConfig) OrigErr() error { return nil } -func (s InvalidNotificationConfig) Error() string { +func (s *InvalidNotificationConfig) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNotificationConfig) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNotificationConfig) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNotificationConfig) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNotificationConfig) RequestID() string { + return s.RespMetadata.RequestID } // The delete inventory option specified is not valid. Verify the option and // try again. type InvalidOptionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29760,17 +32031,17 @@ func (s InvalidOptionException) GoString() string { func newErrorInvalidOptionException(v protocol.ResponseMetadata) error { return &InvalidOptionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOptionException) Code() string { +func (s *InvalidOptionException) Code() string { return "InvalidOptionException" } // Message returns the exception's message. -func (s InvalidOptionException) Message() string { +func (s *InvalidOptionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29778,28 +32049,28 @@ func (s InvalidOptionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOptionException) OrigErr() error { +func (s *InvalidOptionException) OrigErr() error { return nil } -func (s InvalidOptionException) Error() string { +func (s *InvalidOptionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOptionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOptionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOptionException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOptionException) RequestID() string { + return s.RespMetadata.RequestID } // The S3 bucket does not exist. type InvalidOutputFolder struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29816,17 +32087,17 @@ func (s InvalidOutputFolder) GoString() string { func newErrorInvalidOutputFolder(v protocol.ResponseMetadata) error { return &InvalidOutputFolder{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOutputFolder) Code() string { +func (s *InvalidOutputFolder) Code() string { return "InvalidOutputFolder" } // Message returns the exception's message. -func (s InvalidOutputFolder) Message() string { +func (s *InvalidOutputFolder) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29834,28 +32105,28 @@ func (s InvalidOutputFolder) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOutputFolder) OrigErr() error { +func (s *InvalidOutputFolder) OrigErr() error { return nil } -func (s InvalidOutputFolder) Error() string { +func (s *InvalidOutputFolder) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOutputFolder) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOutputFolder) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOutputFolder) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOutputFolder) RequestID() string { + return s.RespMetadata.RequestID } // The output location is not valid or does not exist. type InvalidOutputLocation struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -29872,17 +32143,17 @@ func (s InvalidOutputLocation) GoString() string { func newErrorInvalidOutputLocation(v protocol.ResponseMetadata) error { return &InvalidOutputLocation{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOutputLocation) Code() string { +func (s *InvalidOutputLocation) Code() string { return "InvalidOutputLocation" } // Message returns the exception's message. -func (s InvalidOutputLocation) Message() string { +func (s *InvalidOutputLocation) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29890,30 +32161,30 @@ func (s InvalidOutputLocation) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOutputLocation) OrigErr() error { +func (s *InvalidOutputLocation) OrigErr() error { return nil } -func (s InvalidOutputLocation) Error() string { +func (s *InvalidOutputLocation) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOutputLocation) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOutputLocation) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOutputLocation) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOutputLocation) RequestID() string { + return s.RespMetadata.RequestID } // You must specify values for all required parameters in the Systems Manager // document. You can only supply values to parameters defined in the Systems // Manager document. type InvalidParameters struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29930,17 +32201,17 @@ func (s InvalidParameters) GoString() string { func newErrorInvalidParameters(v protocol.ResponseMetadata) error { return &InvalidParameters{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameters) Code() string { +func (s *InvalidParameters) Code() string { return "InvalidParameters" } // Message returns the exception's message. -func (s InvalidParameters) Message() string { +func (s *InvalidParameters) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -29948,29 +32219,29 @@ func (s InvalidParameters) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameters) OrigErr() error { +func (s *InvalidParameters) OrigErr() error { return nil } -func (s InvalidParameters) Error() string { +func (s *InvalidParameters) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameters) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameters) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameters) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameters) RequestID() string { + return s.RespMetadata.RequestID } // The permission type is not supported. Share is the only supported permission // type. type InvalidPermissionType struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -29987,17 +32258,17 @@ func (s InvalidPermissionType) GoString() string { func newErrorInvalidPermissionType(v protocol.ResponseMetadata) error { return &InvalidPermissionType{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPermissionType) Code() string { +func (s *InvalidPermissionType) Code() string { return "InvalidPermissionType" } // Message returns the exception's message. -func (s InvalidPermissionType) Message() string { +func (s *InvalidPermissionType) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30005,28 +32276,28 @@ func (s InvalidPermissionType) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPermissionType) OrigErr() error { +func (s *InvalidPermissionType) OrigErr() error { return nil } -func (s InvalidPermissionType) Error() string { +func (s *InvalidPermissionType) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPermissionType) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPermissionType) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPermissionType) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPermissionType) RequestID() string { + return s.RespMetadata.RequestID } // The plugin name is not valid. type InvalidPluginName struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30043,17 +32314,17 @@ func (s InvalidPluginName) GoString() string { func newErrorInvalidPluginName(v protocol.ResponseMetadata) error { return &InvalidPluginName{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPluginName) Code() string { +func (s *InvalidPluginName) Code() string { return "InvalidPluginName" } // Message returns the exception's message. -func (s InvalidPluginName) Message() string { +func (s *InvalidPluginName) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30061,28 +32332,28 @@ func (s InvalidPluginName) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPluginName) OrigErr() error { +func (s *InvalidPluginName) OrigErr() error { return nil } -func (s InvalidPluginName) Error() string { +func (s *InvalidPluginName) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPluginName) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPluginName) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPluginName) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPluginName) RequestID() string { + return s.RespMetadata.RequestID } // A policy attribute or its value is invalid. type InvalidPolicyAttributeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30099,17 +32370,17 @@ func (s InvalidPolicyAttributeException) GoString() string { func newErrorInvalidPolicyAttributeException(v protocol.ResponseMetadata) error { return &InvalidPolicyAttributeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPolicyAttributeException) Code() string { +func (s *InvalidPolicyAttributeException) Code() string { return "InvalidPolicyAttributeException" } // Message returns the exception's message. -func (s InvalidPolicyAttributeException) Message() string { +func (s *InvalidPolicyAttributeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30117,29 +32388,29 @@ func (s InvalidPolicyAttributeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPolicyAttributeException) OrigErr() error { +func (s *InvalidPolicyAttributeException) OrigErr() error { return nil } -func (s InvalidPolicyAttributeException) Error() string { +func (s *InvalidPolicyAttributeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPolicyAttributeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPolicyAttributeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPolicyAttributeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPolicyAttributeException) RequestID() string { + return s.RespMetadata.RequestID } // The policy type is not supported. Parameter Store supports the following // policy types: Expiration, ExpirationNotification, and NoChangeNotification. type InvalidPolicyTypeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30156,17 +32427,17 @@ func (s InvalidPolicyTypeException) GoString() string { func newErrorInvalidPolicyTypeException(v protocol.ResponseMetadata) error { return &InvalidPolicyTypeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPolicyTypeException) Code() string { +func (s *InvalidPolicyTypeException) Code() string { return "InvalidPolicyTypeException" } // Message returns the exception's message. -func (s InvalidPolicyTypeException) Message() string { +func (s *InvalidPolicyTypeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30174,29 +32445,29 @@ func (s InvalidPolicyTypeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPolicyTypeException) OrigErr() error { +func (s *InvalidPolicyTypeException) OrigErr() error { return nil } -func (s InvalidPolicyTypeException) Error() string { +func (s *InvalidPolicyTypeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPolicyTypeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPolicyTypeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPolicyTypeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPolicyTypeException) RequestID() string { + return s.RespMetadata.RequestID } // The resource ID is not valid. Verify that you entered the correct ID and // try again. type InvalidResourceId struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30213,17 +32484,17 @@ func (s InvalidResourceId) GoString() string { func newErrorInvalidResourceId(v protocol.ResponseMetadata) error { return &InvalidResourceId{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceId) Code() string { +func (s *InvalidResourceId) Code() string { return "InvalidResourceId" } // Message returns the exception's message. -func (s InvalidResourceId) Message() string { +func (s *InvalidResourceId) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30231,29 +32502,29 @@ func (s InvalidResourceId) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceId) OrigErr() error { +func (s *InvalidResourceId) OrigErr() error { return nil } -func (s InvalidResourceId) Error() string { +func (s *InvalidResourceId) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceId) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceId) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceId) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceId) RequestID() string { + return s.RespMetadata.RequestID } // The resource type is not valid. For example, if you are attempting to tag // an instance, the instance must be a registered, managed instance. type InvalidResourceType struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -30270,17 +32541,17 @@ func (s InvalidResourceType) GoString() string { func newErrorInvalidResourceType(v protocol.ResponseMetadata) error { return &InvalidResourceType{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceType) Code() string { +func (s *InvalidResourceType) Code() string { return "InvalidResourceType" } // Message returns the exception's message. -func (s InvalidResourceType) Message() string { +func (s *InvalidResourceType) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30288,28 +32559,28 @@ func (s InvalidResourceType) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceType) OrigErr() error { +func (s *InvalidResourceType) OrigErr() error { return nil } -func (s InvalidResourceType) Error() string { +func (s *InvalidResourceType) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceType) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceType) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceType) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceType) RequestID() string { + return s.RespMetadata.RequestID } // The specified inventory item result attribute is not valid. type InvalidResultAttributeException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -30326,17 +32597,17 @@ func (s InvalidResultAttributeException) GoString() string { func newErrorInvalidResultAttributeException(v protocol.ResponseMetadata) error { return &InvalidResultAttributeException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResultAttributeException) Code() string { +func (s *InvalidResultAttributeException) Code() string { return "InvalidResultAttributeException" } // Message returns the exception's message. -func (s InvalidResultAttributeException) Message() string { +func (s *InvalidResultAttributeException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30344,32 +32615,32 @@ func (s InvalidResultAttributeException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResultAttributeException) OrigErr() error { +func (s *InvalidResultAttributeException) OrigErr() error { return nil } -func (s InvalidResultAttributeException) Error() string { +func (s *InvalidResultAttributeException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResultAttributeException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResultAttributeException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResultAttributeException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResultAttributeException) RequestID() string { + return s.RespMetadata.RequestID } // The role name can't contain invalid characters. Also verify that you specified // an IAM role for notifications that includes the required trust policy. For // information about configuring the IAM role for Run Command notifications, -// see Configuring Amazon SNS Notifications for Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) +// see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) // in the AWS Systems Manager User Guide. type InvalidRole struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -30386,17 +32657,17 @@ func (s InvalidRole) GoString() string { func newErrorInvalidRole(v protocol.ResponseMetadata) error { return &InvalidRole{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRole) Code() string { +func (s *InvalidRole) Code() string { return "InvalidRole" } // Message returns the exception's message. -func (s InvalidRole) Message() string { +func (s *InvalidRole) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30404,28 +32675,28 @@ func (s InvalidRole) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRole) OrigErr() error { +func (s *InvalidRole) OrigErr() error { return nil } -func (s InvalidRole) Error() string { +func (s *InvalidRole) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRole) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRole) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRole) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRole) RequestID() string { + return s.RespMetadata.RequestID } // The schedule is invalid. Verify your cron or rate expression and try again. type InvalidSchedule struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -30442,17 +32713,17 @@ func (s InvalidSchedule) GoString() string { func newErrorInvalidSchedule(v protocol.ResponseMetadata) error { return &InvalidSchedule{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidSchedule) Code() string { +func (s *InvalidSchedule) Code() string { return "InvalidSchedule" } // Message returns the exception's message. -func (s InvalidSchedule) Message() string { +func (s *InvalidSchedule) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30460,29 +32731,29 @@ func (s InvalidSchedule) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidSchedule) OrigErr() error { +func (s *InvalidSchedule) OrigErr() error { return nil } -func (s InvalidSchedule) Error() string { +func (s *InvalidSchedule) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidSchedule) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidSchedule) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidSchedule) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidSchedule) RequestID() string { + return s.RespMetadata.RequestID } // The target is not valid or does not exist. It might not be configured for -// EC2 Systems Manager or you might not have permission to perform the operation. +// Systems Manager or you might not have permission to perform the operation. type InvalidTarget struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -30499,17 +32770,17 @@ func (s InvalidTarget) GoString() string { func newErrorInvalidTarget(v protocol.ResponseMetadata) error { return &InvalidTarget{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTarget) Code() string { +func (s *InvalidTarget) Code() string { return "InvalidTarget" } // Message returns the exception's message. -func (s InvalidTarget) Message() string { +func (s *InvalidTarget) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30517,28 +32788,28 @@ func (s InvalidTarget) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTarget) OrigErr() error { +func (s *InvalidTarget) OrigErr() error { return nil } -func (s InvalidTarget) Error() string { +func (s *InvalidTarget) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTarget) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTarget) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTarget) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTarget) RequestID() string { + return s.RespMetadata.RequestID } // The parameter type name is not valid. type InvalidTypeNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -30555,17 +32826,17 @@ func (s InvalidTypeNameException) GoString() string { func newErrorInvalidTypeNameException(v protocol.ResponseMetadata) error { return &InvalidTypeNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidTypeNameException) Code() string { +func (s *InvalidTypeNameException) Code() string { return "InvalidTypeNameException" } // Message returns the exception's message. -func (s InvalidTypeNameException) Message() string { +func (s *InvalidTypeNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30573,28 +32844,28 @@ func (s InvalidTypeNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidTypeNameException) OrigErr() error { +func (s *InvalidTypeNameException) OrigErr() error { return nil } -func (s InvalidTypeNameException) Error() string { +func (s *InvalidTypeNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidTypeNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidTypeNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidTypeNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidTypeNameException) RequestID() string { + return s.RespMetadata.RequestID } // The update is not valid. type InvalidUpdate struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -30611,17 +32882,17 @@ func (s InvalidUpdate) GoString() string { func newErrorInvalidUpdate(v protocol.ResponseMetadata) error { return &InvalidUpdate{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidUpdate) Code() string { +func (s *InvalidUpdate) Code() string { return "InvalidUpdate" } // Message returns the exception's message. -func (s InvalidUpdate) Message() string { +func (s *InvalidUpdate) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -30629,22 +32900,22 @@ func (s InvalidUpdate) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidUpdate) OrigErr() error { +func (s *InvalidUpdate) OrigErr() error { return nil } -func (s InvalidUpdate) Error() string { +func (s *InvalidUpdate) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidUpdate) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidUpdate) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidUpdate) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidUpdate) RequestID() string { + return s.RespMetadata.RequestID } // Specifies the inventory type and attribute for the aggregation execution. @@ -30741,7 +33012,7 @@ type InventoryDeletionStatusItem struct { DeletionStartTime *time.Time `type:"timestamp"` // Information about the delete operation. For more information about this summary, - // see Understanding the Delete Inventory Summary (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete) + // see Understanding the delete inventory summary (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-custom.html#sysman-inventory-delete) // in the AWS Systems Manager User Guide. DeletionSummary *InventoryDeletionSummary `type:"structure"` @@ -30906,6 +33177,10 @@ type InventoryFilter struct { Key *string `min:"1" type:"string" required:"true"` // The type of filter. + // + // The Exists filter must be used with aggregators. For more information, see + // Aggregating inventory data (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-aggregate.html) + // in the AWS Systems Manager User Guide. Type *string `type:"string" enum:"InventoryQueryOperatorType"` // Inventory filter values. Example: inventory filter where instance IDs are @@ -31352,8 +33627,8 @@ func (s *InventoryResultItem) SetTypeName(v string) *InventoryResultItem { // The command ID and instance ID you specified did not match any invocations. // Verify the command ID and the instance ID and try again. type InvocationDoesNotExist struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -31370,17 +33645,17 @@ func (s InvocationDoesNotExist) GoString() string { func newErrorInvocationDoesNotExist(v protocol.ResponseMetadata) error { return &InvocationDoesNotExist{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvocationDoesNotExist) Code() string { +func (s *InvocationDoesNotExist) Code() string { return "InvocationDoesNotExist" } // Message returns the exception's message. -func (s InvocationDoesNotExist) Message() string { +func (s *InvocationDoesNotExist) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -31388,28 +33663,28 @@ func (s InvocationDoesNotExist) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvocationDoesNotExist) OrigErr() error { +func (s *InvocationDoesNotExist) OrigErr() error { return nil } -func (s InvocationDoesNotExist) Error() string { +func (s *InvocationDoesNotExist) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvocationDoesNotExist) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvocationDoesNotExist) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvocationDoesNotExist) RequestID() string { - return s.respMetadata.RequestID +func (s *InvocationDoesNotExist) RequestID() string { + return s.RespMetadata.RequestID } // The inventory item has invalid content. type ItemContentMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -31428,17 +33703,17 @@ func (s ItemContentMismatchException) GoString() string { func newErrorItemContentMismatchException(v protocol.ResponseMetadata) error { return &ItemContentMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ItemContentMismatchException) Code() string { +func (s *ItemContentMismatchException) Code() string { return "ItemContentMismatchException" } // Message returns the exception's message. -func (s ItemContentMismatchException) Message() string { +func (s *ItemContentMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -31446,28 +33721,28 @@ func (s ItemContentMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ItemContentMismatchException) OrigErr() error { +func (s *ItemContentMismatchException) OrigErr() error { return nil } -func (s ItemContentMismatchException) Error() string { +func (s *ItemContentMismatchException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ItemContentMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ItemContentMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ItemContentMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *ItemContentMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // The inventory item size has exceeded the size limit. type ItemSizeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -31486,17 +33761,17 @@ func (s ItemSizeLimitExceededException) GoString() string { func newErrorItemSizeLimitExceededException(v protocol.ResponseMetadata) error { return &ItemSizeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ItemSizeLimitExceededException) Code() string { +func (s *ItemSizeLimitExceededException) Code() string { return "ItemSizeLimitExceededException" } // Message returns the exception's message. -func (s ItemSizeLimitExceededException) Message() string { +func (s *ItemSizeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -31504,22 +33779,22 @@ func (s ItemSizeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ItemSizeLimitExceededException) OrigErr() error { +func (s *ItemSizeLimitExceededException) OrigErr() error { return nil } -func (s ItemSizeLimitExceededException) Error() string { +func (s *ItemSizeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ItemSizeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ItemSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ItemSizeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ItemSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type LabelParameterVersionInput struct { @@ -31595,7 +33870,7 @@ type LabelParameterVersionOutput struct { _ struct{} `type:"structure"` // The label does not meet the requirements. For information about parameter - // label requirements, see Labeling Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html) + // label requirements, see Labeling parameters (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-labels.html) // in the AWS Systems Manager User Guide. InvalidLabels []*string `min:"1" type:"list"` @@ -31834,7 +34109,7 @@ type ListCommandInvocationsInput struct { Details *bool `type:"boolean"` // (Optional) One or more filters. Use a filter to return a more specific list - // of results. Note that the DocumentName filter is not supported for ListCommandInvocations. + // of results. Filters []*CommandFilter `min:"1" type:"list"` // (Optional) The command execution details for a specific instance ID. @@ -31969,6 +34244,9 @@ type ListCommandsInput struct { Filters []*CommandFilter `min:"1" type:"list"` // (Optional) Lists commands issued against this instance ID. + // + // You can't specify an instance ID in the same command that you specify Status + // = Pending. This is because the command has not reached the instance yet. InstanceId *string `type:"string"` // (Optional) The maximum number of items to return for this call. The call @@ -32409,10 +34687,15 @@ func (s *ListDocumentVersionsOutput) SetNextToken(v string) *ListDocumentVersion type ListDocumentsInput struct { _ struct{} `type:"structure"` - // One or more filters. Use a filter to return a more specific list of results. + // This data type is deprecated. Instead, use Filters. DocumentFilterList []*DocumentFilter `min:"1" type:"list"` - // One or more filters. Use a filter to return a more specific list of results. + // One or more DocumentKeyValuesFilter objects. Use a filter to return a more + // specific list of results. For keys, you can specify one or more key-value + // pair tags that have been applied to a document. Other valid keys include + // Owner, Name, PlatformTypes, DocumentType, and TargetType. For example, to + // return documents you own use Key=Owner,Values=Self. To specify a custom key-value + // pair, use the format Key=tag:tagName,Values=valueName. Filters []*DocumentKeyValuesFilter `type:"list"` // The maximum number of items to return for this call. The call also returns @@ -32811,7 +35094,7 @@ type ListResourceDataSyncInput struct { NextToken *string `type:"string"` // View a list of resource data syncs according to the sync type. Specify SyncToDestination - // to view resource data syncs that synchronize data to an Amazon S3 buckets. + // to view resource data syncs that synchronize data to an Amazon S3 bucket. // Specify SyncFromSource to view resource data syncs from AWS Organizations // or from multiple AWS Regions. SyncType *string `min:"1" type:"string"` @@ -32969,7 +35252,7 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut return s } -// Information about an Amazon S3 bucket to write instance-level logs to. +// Information about an S3 bucket to write instance-level logs to. // // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters @@ -32978,15 +35261,15 @@ func (s *ListTagsForResourceOutput) SetTagList(v []*Tag) *ListTagsForResourceOut type LoggingInfo struct { _ struct{} `type:"structure"` - // The name of an Amazon S3 bucket where execution logs are stored . + // The name of an S3 bucket where execution logs are stored . // // S3BucketName is a required field S3BucketName *string `min:"3" type:"string" required:"true"` - // (Optional) The Amazon S3 bucket subfolder. + // (Optional) The S3 bucket subfolder. S3KeyPrefix *string `type:"string"` - // The region where the Amazon S3 bucket is located. + // The Region where the S3 bucket is located. // // S3Region is a required field S3Region *string `min:"3" type:"string" required:"true"` @@ -33468,6 +35751,10 @@ type MaintenanceWindowIdentity struct { // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` + // The number of days to wait to run a maintenance window after the scheduled + // CRON expression date and time. + ScheduleOffset *int64 `min:"1" type:"integer"` + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. ScheduleTimezone *string `type:"string"` @@ -33538,6 +35825,12 @@ func (s *MaintenanceWindowIdentity) SetSchedule(v string) *MaintenanceWindowIden return s } +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *MaintenanceWindowIdentity) SetScheduleOffset(v int64) *MaintenanceWindowIdentity { + s.ScheduleOffset = &v + return s +} + // SetScheduleTimezone sets the ScheduleTimezone field's value. func (s *MaintenanceWindowIdentity) SetScheduleTimezone(v string) *MaintenanceWindowIdentity { s.ScheduleTimezone = &v @@ -33720,10 +36013,10 @@ type MaintenanceWindowRunCommandParameters struct { // a per-instance basis. NotificationConfig *NotificationConfig `type:"structure"` - // The name of the Amazon S3 bucket. + // The name of the S3 bucket. OutputS3BucketName *string `min:"3" type:"string"` - // The Amazon S3 bucket subfolder. + // The S3 bucket subfolder. OutputS3KeyPrefix *string `type:"string"` // The parameters for the RUN_COMMAND task execution. @@ -33991,7 +36284,7 @@ type MaintenanceWindowTask struct { // A description of the task. Description *string `min:"1" type:"string" sensitive:"true"` - // Information about an Amazon S3 bucket to write task-level logs to. + // Information about an S3 bucket to write task-level logs to. // // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, // instead use the OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters @@ -34242,8 +36535,8 @@ func (s *MaintenanceWindowTaskParameterValueExpression) SetValues(v []*string) * // The size limit of a document is 64 KB. type MaxDocumentSizeExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -34260,17 +36553,17 @@ func (s MaxDocumentSizeExceeded) GoString() string { func newErrorMaxDocumentSizeExceeded(v protocol.ResponseMetadata) error { return &MaxDocumentSizeExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MaxDocumentSizeExceeded) Code() string { +func (s *MaxDocumentSizeExceeded) Code() string { return "MaxDocumentSizeExceeded" } // Message returns the exception's message. -func (s MaxDocumentSizeExceeded) Message() string { +func (s *MaxDocumentSizeExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -34278,22 +36571,22 @@ func (s MaxDocumentSizeExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MaxDocumentSizeExceeded) OrigErr() error { +func (s *MaxDocumentSizeExceeded) OrigErr() error { return nil } -func (s MaxDocumentSizeExceeded) Error() string { +func (s *MaxDocumentSizeExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MaxDocumentSizeExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MaxDocumentSizeExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MaxDocumentSizeExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *MaxDocumentSizeExceeded) RequestID() string { + return s.RespMetadata.RequestID } type ModifyDocumentPermissionInput struct { @@ -34439,8 +36732,8 @@ type NotificationConfig struct { // The different events for which you can receive notifications. These events // include the following: All (events), InProgress, Success, TimedOut, Cancelled, - // Failed. To learn more about these events, see Configuring Amazon SNS Notifications - // for AWS Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html) + // Failed. To learn more about these events, see Monitoring Systems Manager + // status changes using Amazon SNS notifications (https://docs.aws.amazon.com/systems-manager/latest/userguide/monitoring-sns-notifications.html) // in the AWS Systems Manager User Guide. NotificationEvents []*string `type:"list"` @@ -34490,7 +36783,7 @@ type OpsAggregator struct { Aggregators []*OpsAggregator `min:"1" type:"list"` // The name of an OpsItem attribute on which to limit the count of OpsItems. - AttributeName *string `type:"string"` + AttributeName *string `min:"1" type:"string"` // The aggregator filters. Filters []*OpsFilter `min:"1" type:"list"` @@ -34521,6 +36814,9 @@ func (s *OpsAggregator) Validate() error { if s.Aggregators != nil && len(s.Aggregators) < 1 { invalidParams.Add(request.NewErrParamMinLen("Aggregators", 1)) } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } if s.Filters != nil && len(s.Filters) < 1 { invalidParams.Add(request.NewErrParamMinLen("Filters", 1)) } @@ -34727,7 +37023,7 @@ func (s *OpsFilter) SetValues(v []*string) *OpsFilter { // Operations engineers and IT professionals use OpsCenter to view, investigate, // and remediate operational issues impacting the performance and health of // their AWS resources. For more information, see AWS Systems Manager OpsCenter -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter.html) // in the AWS Systems Manager User Guide. type OpsItem struct { _ struct{} `type:"structure"` @@ -34774,7 +37070,7 @@ type OpsItem struct { // Use the /aws/resources key in OperationalData to specify a related resource // in the request. Use the /aws/automations key in OperationalData to associate // an Automation runbook with the OpsItem. To view AWS CLI example commands - // that use these keys, see Creating OpsItems Manually (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) // in the AWS Systems Manager User Guide. OperationalData map[string]*OpsItemDataValue `type:"map"` @@ -34792,12 +37088,12 @@ type OpsItem struct { // The severity of the OpsItem. Severity options range from 1 to 4. Severity *string `min:"1" type:"string"` - // The origin of the OpsItem, such as Amazon EC2 or AWS Systems Manager. The - // impacted resource is a subset of source. + // The origin of the OpsItem, such as Amazon EC2 or Systems Manager. The impacted + // resource is a subset of source. Source *string `min:"1" type:"string"` // The OpsItem status. Status can be Open, In Progress, or Resolved. For more - // information, see Editing OpsItem Details (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) + // information, see Editing OpsItem details (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) // in the AWS Systems Manager User Guide. Status *string `type:"string" enum:"OpsItemStatus"` @@ -34918,8 +37214,8 @@ func (s *OpsItem) SetVersion(v string) *OpsItem { // The OpsItem already exists. type OpsItemAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -34938,17 +37234,17 @@ func (s OpsItemAlreadyExistsException) GoString() string { func newErrorOpsItemAlreadyExistsException(v protocol.ResponseMetadata) error { return &OpsItemAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OpsItemAlreadyExistsException) Code() string { +func (s *OpsItemAlreadyExistsException) Code() string { return "OpsItemAlreadyExistsException" } // Message returns the exception's message. -func (s OpsItemAlreadyExistsException) Message() string { +func (s *OpsItemAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -34956,22 +37252,22 @@ func (s OpsItemAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OpsItemAlreadyExistsException) OrigErr() error { +func (s *OpsItemAlreadyExistsException) OrigErr() error { return nil } -func (s OpsItemAlreadyExistsException) Error() string { +func (s *OpsItemAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s OpsItemAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OpsItemAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OpsItemAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *OpsItemAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // An object that defines the value of the key and its type in the OperationalData @@ -35078,8 +37374,8 @@ func (s *OpsItemFilter) SetValues(v []*string) *OpsItemFilter { // A specified parameter argument isn't valid. Verify the available arguments // and try again. type OpsItemInvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -35098,17 +37394,17 @@ func (s OpsItemInvalidParameterException) GoString() string { func newErrorOpsItemInvalidParameterException(v protocol.ResponseMetadata) error { return &OpsItemInvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OpsItemInvalidParameterException) Code() string { +func (s *OpsItemInvalidParameterException) Code() string { return "OpsItemInvalidParameterException" } // Message returns the exception's message. -func (s OpsItemInvalidParameterException) Message() string { +func (s *OpsItemInvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -35116,29 +37412,29 @@ func (s OpsItemInvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OpsItemInvalidParameterException) OrigErr() error { +func (s *OpsItemInvalidParameterException) OrigErr() error { return nil } -func (s OpsItemInvalidParameterException) Error() string { +func (s *OpsItemInvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s OpsItemInvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OpsItemInvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OpsItemInvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *OpsItemInvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The request caused OpsItems to exceed one or more quotas. For information -// about OpsItem quotas, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). +// about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). type OpsItemLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Limit *int64 `type:"integer"` @@ -35161,17 +37457,17 @@ func (s OpsItemLimitExceededException) GoString() string { func newErrorOpsItemLimitExceededException(v protocol.ResponseMetadata) error { return &OpsItemLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OpsItemLimitExceededException) Code() string { +func (s *OpsItemLimitExceededException) Code() string { return "OpsItemLimitExceededException" } // Message returns the exception's message. -func (s OpsItemLimitExceededException) Message() string { +func (s *OpsItemLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -35179,28 +37475,28 @@ func (s OpsItemLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OpsItemLimitExceededException) OrigErr() error { +func (s *OpsItemLimitExceededException) OrigErr() error { return nil } -func (s OpsItemLimitExceededException) Error() string { +func (s *OpsItemLimitExceededException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s OpsItemLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OpsItemLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OpsItemLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *OpsItemLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The specified OpsItem ID doesn't exist. Verify the ID and try again. type OpsItemNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -35217,17 +37513,17 @@ func (s OpsItemNotFoundException) GoString() string { func newErrorOpsItemNotFoundException(v protocol.ResponseMetadata) error { return &OpsItemNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OpsItemNotFoundException) Code() string { +func (s *OpsItemNotFoundException) Code() string { return "OpsItemNotFoundException" } // Message returns the exception's message. -func (s OpsItemNotFoundException) Message() string { +func (s *OpsItemNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -35235,22 +37531,22 @@ func (s OpsItemNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OpsItemNotFoundException) OrigErr() error { +func (s *OpsItemNotFoundException) OrigErr() error { return nil } -func (s OpsItemNotFoundException) Error() string { +func (s *OpsItemNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OpsItemNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OpsItemNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OpsItemNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *OpsItemNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // A notification about the OpsItem. @@ -35451,7 +37747,7 @@ func (s *OpsResultAttribute) SetTypeName(v string) *OpsResultAttribute { type OutputSource struct { _ struct{} `type:"structure"` - // The ID of the output source, for example the URL of an Amazon S3 bucket. + // The ID of the output source, for example the URL of an S3 bucket. OutputSourceId *string `min:"36" type:"string"` // The type of source where the association execution details are stored, for @@ -35481,13 +37777,17 @@ func (s *OutputSource) SetOutputSourceType(v string) *OutputSource { return s } -// An Amazon EC2 Systems Manager parameter in Parameter Store. +// An Systems Manager parameter in Parameter Store. type Parameter struct { _ struct{} `type:"structure"` // The Amazon Resource Name (ARN) of the parameter. ARN *string `type:"string"` + // The data type of the parameter, such as text or aws:ec2:image. The default + // is text. + DataType *string `type:"string"` + // Date the parameter was last changed or updated and the parameter version // was created. LastModifiedDate *time.Time `type:"timestamp"` @@ -35507,8 +37807,8 @@ type Parameter struct { // is the raw result or response from the source. SourceResult *string `type:"string"` - // The type of parameter. Valid values include the following: String, String - // list, Secure string. + // The type of parameter. Valid values include the following: String, StringList, + // and SecureString. Type *string `type:"string" enum:"ParameterType"` // The parameter value. @@ -35534,6 +37834,12 @@ func (s *Parameter) SetARN(v string) *Parameter { return s } +// SetDataType sets the DataType field's value. +func (s *Parameter) SetDataType(v string) *Parameter { + s.DataType = &v + return s +} + // SetLastModifiedDate sets the LastModifiedDate field's value. func (s *Parameter) SetLastModifiedDate(v time.Time) *Parameter { s.LastModifiedDate = &v @@ -35578,8 +37884,8 @@ func (s *Parameter) SetVersion(v int64) *Parameter { // The parameter already exists. You can't create duplicate parameters. type ParameterAlreadyExists struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -35596,17 +37902,17 @@ func (s ParameterAlreadyExists) GoString() string { func newErrorParameterAlreadyExists(v protocol.ResponseMetadata) error { return &ParameterAlreadyExists{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterAlreadyExists) Code() string { +func (s *ParameterAlreadyExists) Code() string { return "ParameterAlreadyExists" } // Message returns the exception's message. -func (s ParameterAlreadyExists) Message() string { +func (s *ParameterAlreadyExists) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -35614,22 +37920,22 @@ func (s ParameterAlreadyExists) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterAlreadyExists) OrigErr() error { +func (s *ParameterAlreadyExists) OrigErr() error { return nil } -func (s ParameterAlreadyExists) Error() string { +func (s *ParameterAlreadyExists) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterAlreadyExists) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterAlreadyExists) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterAlreadyExists) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterAlreadyExists) RequestID() string { + return s.RespMetadata.RequestID } // Information about parameter usage. @@ -35641,6 +37947,10 @@ type ParameterHistory struct { // a-zA-Z0-9_.- AllowedPattern *string `type:"string"` + // The data type of the parameter, such as text or aws:ec2:image. The default + // is text. + DataType *string `type:"string"` + // Information about the parameter. Description *string `type:"string"` @@ -35661,7 +37971,7 @@ type ParameterHistory struct { // Information about the policies assigned to a parameter. // - // Working with Parameter Policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) + // Assigning parameter policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html) // in the AWS Systems Manager User Guide. Policies []*ParameterInlinePolicy `type:"list"` @@ -35694,6 +38004,12 @@ func (s *ParameterHistory) SetAllowedPattern(v string) *ParameterHistory { return s } +// SetDataType sets the DataType field's value. +func (s *ParameterHistory) SetDataType(v string) *ParameterHistory { + s.DataType = &v + return s +} + // SetDescription sets the Description field's value. func (s *ParameterHistory) SetDescription(v string) *ParameterHistory { s.Description = &v @@ -35809,8 +38125,8 @@ func (s *ParameterInlinePolicy) SetPolicyType(v string) *ParameterInlinePolicy { // You have exceeded the number of parameters for this AWS account. Delete one // or more parameters and try again. type ParameterLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -35827,17 +38143,17 @@ func (s ParameterLimitExceeded) GoString() string { func newErrorParameterLimitExceeded(v protocol.ResponseMetadata) error { return &ParameterLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterLimitExceeded) Code() string { +func (s *ParameterLimitExceeded) Code() string { return "ParameterLimitExceeded" } // Message returns the exception's message. -func (s ParameterLimitExceeded) Message() string { +func (s *ParameterLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -35845,28 +38161,28 @@ func (s ParameterLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterLimitExceeded) OrigErr() error { +func (s *ParameterLimitExceeded) OrigErr() error { return nil } -func (s ParameterLimitExceeded) Error() string { +func (s *ParameterLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // The parameter exceeded the maximum number of allowed versions. type ParameterMaxVersionLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -35883,17 +38199,17 @@ func (s ParameterMaxVersionLimitExceeded) GoString() string { func newErrorParameterMaxVersionLimitExceeded(v protocol.ResponseMetadata) error { return &ParameterMaxVersionLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterMaxVersionLimitExceeded) Code() string { +func (s *ParameterMaxVersionLimitExceeded) Code() string { return "ParameterMaxVersionLimitExceeded" } // Message returns the exception's message. -func (s ParameterMaxVersionLimitExceeded) Message() string { +func (s *ParameterMaxVersionLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -35901,22 +38217,22 @@ func (s ParameterMaxVersionLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterMaxVersionLimitExceeded) OrigErr() error { +func (s *ParameterMaxVersionLimitExceeded) OrigErr() error { return nil } -func (s ParameterMaxVersionLimitExceeded) Error() string { +func (s *ParameterMaxVersionLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterMaxVersionLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterMaxVersionLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterMaxVersionLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterMaxVersionLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // Metadata includes information like the ARN of the last user and the date/time @@ -35929,6 +38245,10 @@ type ParameterMetadata struct { // a-zA-Z0-9_.- AllowedPattern *string `type:"string"` + // The data type of the parameter, such as text or aws:ec2:image. The default + // is text. + DataType *string `type:"string"` + // Description of the parameter actions. Description *string `type:"string"` @@ -35951,7 +38271,7 @@ type ParameterMetadata struct { Tier *string `type:"string" enum:"ParameterTier"` // The type of parameter. Valid parameter types include the following: String, - // String list, Secure string. + // StringList, and SecureString. Type *string `type:"string" enum:"ParameterType"` // The parameter version. @@ -35974,6 +38294,12 @@ func (s *ParameterMetadata) SetAllowedPattern(v string) *ParameterMetadata { return s } +// SetDataType sets the DataType field's value. +func (s *ParameterMetadata) SetDataType(v string) *ParameterMetadata { + s.DataType = &v + return s +} + // SetDescription sets the Description field's value. func (s *ParameterMetadata) SetDescription(v string) *ParameterMetadata { s.Description = &v @@ -36030,8 +38356,8 @@ func (s *ParameterMetadata) SetVersion(v int64) *ParameterMetadata { // The parameter could not be found. Verify the name and try again. type ParameterNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -36048,17 +38374,17 @@ func (s ParameterNotFound) GoString() string { func newErrorParameterNotFound(v protocol.ResponseMetadata) error { return &ParameterNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterNotFound) Code() string { +func (s *ParameterNotFound) Code() string { return "ParameterNotFound" } // Message returns the exception's message. -func (s ParameterNotFound) Message() string { +func (s *ParameterNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -36066,28 +38392,28 @@ func (s ParameterNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterNotFound) OrigErr() error { +func (s *ParameterNotFound) OrigErr() error { return nil } -func (s ParameterNotFound) Error() string { +func (s *ParameterNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterNotFound) RequestID() string { + return s.RespMetadata.RequestID } // The parameter name is not valid. type ParameterPatternMismatchException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The parameter name is not valid. Message_ *string `locationName:"message" type:"string"` @@ -36105,17 +38431,17 @@ func (s ParameterPatternMismatchException) GoString() string { func newErrorParameterPatternMismatchException(v protocol.ResponseMetadata) error { return &ParameterPatternMismatchException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterPatternMismatchException) Code() string { +func (s *ParameterPatternMismatchException) Code() string { return "ParameterPatternMismatchException" } // Message returns the exception's message. -func (s ParameterPatternMismatchException) Message() string { +func (s *ParameterPatternMismatchException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -36123,44 +38449,44 @@ func (s ParameterPatternMismatchException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterPatternMismatchException) OrigErr() error { +func (s *ParameterPatternMismatchException) OrigErr() error { return nil } -func (s ParameterPatternMismatchException) Error() string { +func (s *ParameterPatternMismatchException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterPatternMismatchException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterPatternMismatchException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterPatternMismatchException) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterPatternMismatchException) RequestID() string { + return s.RespMetadata.RequestID } // One or more filters. Use a filter to return a more specific list of results. -// -// The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath -// API actions. However, not all of the pattern values listed for Key can be -// used with both actions. -// -// For DescribeActions, all of the listed patterns are valid, with the exception -// of Label. -// -// For GetParametersByPath, the following patterns listed for Key are not valid: -// Name, Path, and Tier. -// -// For examples of CLI commands demonstrating valid parameter filter constructions, -// see Searching for Systems Manager Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-search.html) -// in the AWS Systems Manager User Guide. type ParameterStringFilter struct { _ struct{} `type:"structure"` // The name of the filter. // + // The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath + // API actions. However, not all of the pattern values listed for Key can be + // used with both actions. + // + // For DescribeActions, all of the listed patterns are valid, with the exception + // of Label. + // + // For GetParametersByPath, the following patterns listed for Key are not valid: + // tag, Name, Path, and Tier. + // + // For examples of CLI commands demonstrating valid parameter filter constructions, + // see Searching for Systems Manager parameters (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-search.html) + // in the AWS Systems Manager User Guide. + // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -36170,8 +38496,8 @@ type ParameterStringFilter struct { // and OneLevel.) // // For filters used with GetParametersByPath, valid options include Equals and - // BeginsWith. (Exception: For filters using the key Label, the only valid option - // is Equals.) + // BeginsWith. (Exception: For filters using Label as the Key name, the only + // valid option is Equals.) Option *string `min:"1" type:"string"` // The value you want to search for. @@ -36230,8 +38556,8 @@ func (s *ParameterStringFilter) SetValues(v []*string) *ParameterStringFilter { // A parameter version can have a maximum of ten labels. type ParameterVersionLabelLimitExceeded struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -36248,17 +38574,17 @@ func (s ParameterVersionLabelLimitExceeded) GoString() string { func newErrorParameterVersionLabelLimitExceeded(v protocol.ResponseMetadata) error { return &ParameterVersionLabelLimitExceeded{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterVersionLabelLimitExceeded) Code() string { +func (s *ParameterVersionLabelLimitExceeded) Code() string { return "ParameterVersionLabelLimitExceeded" } // Message returns the exception's message. -func (s ParameterVersionLabelLimitExceeded) Message() string { +func (s *ParameterVersionLabelLimitExceeded) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -36266,29 +38592,29 @@ func (s ParameterVersionLabelLimitExceeded) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterVersionLabelLimitExceeded) OrigErr() error { +func (s *ParameterVersionLabelLimitExceeded) OrigErr() error { return nil } -func (s ParameterVersionLabelLimitExceeded) Error() string { +func (s *ParameterVersionLabelLimitExceeded) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterVersionLabelLimitExceeded) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterVersionLabelLimitExceeded) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterVersionLabelLimitExceeded) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterVersionLabelLimitExceeded) RequestID() string { + return s.RespMetadata.RequestID } // The specified parameter version was not found. Verify the parameter name // and version, and try again. type ParameterVersionNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -36305,17 +38631,17 @@ func (s ParameterVersionNotFound) GoString() string { func newErrorParameterVersionNotFound(v protocol.ResponseMetadata) error { return &ParameterVersionNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ParameterVersionNotFound) Code() string { +func (s *ParameterVersionNotFound) Code() string { return "ParameterVersionNotFound" } // Message returns the exception's message. -func (s ParameterVersionNotFound) Message() string { +func (s *ParameterVersionNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -36323,22 +38649,22 @@ func (s ParameterVersionNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ParameterVersionNotFound) OrigErr() error { +func (s *ParameterVersionNotFound) OrigErr() error { return nil } -func (s ParameterVersionNotFound) Error() string { +func (s *ParameterVersionNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ParameterVersionNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ParameterVersionNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ParameterVersionNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ParameterVersionNotFound) RequestID() string { + return s.RespMetadata.RequestID } // This data type is deprecated. Instead, use ParameterStringFilter. @@ -36401,7 +38727,25 @@ func (s *ParametersFilter) SetValues(v []*string) *ParametersFilter { type Patch struct { _ struct{} `type:"structure"` - // The classification of the patch (for example, SecurityUpdates, Updates, CriticalUpdates). + // The Advisory ID of the patch. For example, RHSA-2020:3779. Applies to Linux-based + // instances only. + AdvisoryIds []*string `type:"list"` + + // The architecture of the patch. For example, in example-pkg-0.710.10-2.7.abcd.x86_64, + // the architecture is indicated by x86_64. Applies to Linux-based instances + // only. + Arch *string `type:"string"` + + // The Bugzilla ID of the patch. For example, 1600646. Applies to Linux-based + // instances only. + BugzillaIds []*string `type:"list"` + + // The Common Vulnerabilities and Exposures (CVE) ID of the patch. For example, + // CVE-1999-0067. Applies to Linux-based instances only. + CVEIds []*string `type:"list"` + + // The classification of the patch. For example, SecurityUpdates, Updates, or + // CriticalUpdates. Classification *string `type:"string"` // The URL where more information can be obtained about the patch. @@ -36410,36 +38754,65 @@ type Patch struct { // The description of the patch. Description *string `type:"string"` - // The ID of the patch (this is different than the Microsoft Knowledge Base - // ID). + // The epoch of the patch. For example in pkg-example-EE-20180914-2.2.amzn1.noarch, + // the epoch value is 20180914-2. Applies to Linux-based instances only. + Epoch *int64 `type:"integer"` + + // The ID of the patch. Applies to Windows patches only. + // + // This ID is not the same as the Microsoft Knowledge Base ID. Id *string `min:"1" type:"string"` - // The Microsoft Knowledge Base ID of the patch. + // The Microsoft Knowledge Base ID of the patch. Applies to Windows patches + // only. KbNumber *string `type:"string"` // The language of the patch if it's language-specific. Language *string `type:"string"` - // The ID of the MSRC bulletin the patch is related to. + // The ID of the Microsoft Security Response Center (MSRC) bulletin the patch + // is related to. For example, MS14-045. Applies to Windows patches only. MsrcNumber *string `type:"string"` - // The severity of the patch (for example Critical, Important, Moderate). + // The severity of the patch, such as Critical, Important, or Moderate. Applies + // to Windows patches only. MsrcSeverity *string `type:"string"` - // The specific product the patch is applicable for (for example, WindowsServer2016). + // The name of the patch. Applies to Linux-based instances only. + Name *string `type:"string"` + + // The specific product the patch is applicable for. For example, WindowsServer2016 + // or AmazonLinux2018.03. Product *string `type:"string"` - // The product family the patch is applicable for (for example, Windows). + // The product family the patch is applicable for. For example, Windows or Amazon + // Linux 2. ProductFamily *string `type:"string"` + // The particular release of a patch. For example, in pkg-example-EE-20180914-2.2.amzn1.noarch, + // the release is 2.amaz1. Applies to Linux-based instances only. + Release *string `type:"string"` + // The date the patch was released. ReleaseDate *time.Time `type:"timestamp"` + // The source patch repository for the operating system and version, such as + // trusty-security for Ubuntu Server 14.04 LTE and focal-security for Ubuntu + // Server 20.04 LTE. Applies to Linux-based instances only. + Repository *string `type:"string"` + + // The severity level of the patch. For example, CRITICAL or MODERATE. + Severity *string `type:"string"` + // The title of the patch. Title *string `type:"string"` // The name of the vendor providing the patch. Vendor *string `type:"string"` + + // The version number of the patch. For example, in example-pkg-1.710.10-2.7.abcd.x86_64, + // the version number is indicated by -1. Applies to Linux-based instances only. + Version *string `type:"string"` } // String returns the string representation @@ -36452,6 +38825,30 @@ func (s Patch) GoString() string { return s.String() } +// SetAdvisoryIds sets the AdvisoryIds field's value. +func (s *Patch) SetAdvisoryIds(v []*string) *Patch { + s.AdvisoryIds = v + return s +} + +// SetArch sets the Arch field's value. +func (s *Patch) SetArch(v string) *Patch { + s.Arch = &v + return s +} + +// SetBugzillaIds sets the BugzillaIds field's value. +func (s *Patch) SetBugzillaIds(v []*string) *Patch { + s.BugzillaIds = v + return s +} + +// SetCVEIds sets the CVEIds field's value. +func (s *Patch) SetCVEIds(v []*string) *Patch { + s.CVEIds = v + return s +} + // SetClassification sets the Classification field's value. func (s *Patch) SetClassification(v string) *Patch { s.Classification = &v @@ -36470,6 +38867,12 @@ func (s *Patch) SetDescription(v string) *Patch { return s } +// SetEpoch sets the Epoch field's value. +func (s *Patch) SetEpoch(v int64) *Patch { + s.Epoch = &v + return s +} + // SetId sets the Id field's value. func (s *Patch) SetId(v string) *Patch { s.Id = &v @@ -36500,6 +38903,12 @@ func (s *Patch) SetMsrcSeverity(v string) *Patch { return s } +// SetName sets the Name field's value. +func (s *Patch) SetName(v string) *Patch { + s.Name = &v + return s +} + // SetProduct sets the Product field's value. func (s *Patch) SetProduct(v string) *Patch { s.Product = &v @@ -36512,12 +38921,30 @@ func (s *Patch) SetProductFamily(v string) *Patch { return s } +// SetRelease sets the Release field's value. +func (s *Patch) SetRelease(v string) *Patch { + s.Release = &v + return s +} + // SetReleaseDate sets the ReleaseDate field's value. func (s *Patch) SetReleaseDate(v time.Time) *Patch { s.ReleaseDate = &v return s } +// SetRepository sets the Repository field's value. +func (s *Patch) SetRepository(v string) *Patch { + s.Repository = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *Patch) SetSeverity(v string) *Patch { + s.Severity = &v + return s +} + // SetTitle sets the Title field's value. func (s *Patch) SetTitle(v string) *Patch { s.Title = &v @@ -36530,6 +38957,12 @@ func (s *Patch) SetVendor(v string) *Patch { return s } +// SetVersion sets the Version field's value. +func (s *Patch) SetVersion(v string) *Patch { + s.Version = &v + return s +} + // Defines the basic information about a patch baseline. type PatchBaselineIdentity struct { _ struct{} `type:"structure"` @@ -36621,7 +39054,7 @@ type PatchComplianceData struct { // The state of the patch on the instance, such as INSTALLED or FAILED. // - // For descriptions of each patch state, see About Patch Compliance (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-compliance-about.html#sysman-compliance-monitor-patch) + // For descriptions of each patch state, see About patch compliance (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-compliance-about.html#sysman-compliance-monitor-patch) // in the AWS Systems Manager User Guide. // // State is a required field @@ -36891,15 +39324,17 @@ type PatchRule struct { // The number of days after the release date of each patch matched by the rule // that the patch is marked as approved in the patch baseline. For example, // a value of 7 means that patches are approved seven days after they are released. + // Not supported on Ubuntu Server. ApproveAfterDays *int64 `type:"integer"` // The cutoff date for auto approval of released patches. Any patches released - // on or before this date will be installed automatically + // on or before this date are installed automatically. Not supported on Ubuntu + // Server. + // + // Enter dates in the format YYYY-MM-DD. For example, 2020-12-31. ApproveUntilDate *string `min:"1" type:"string"` // A compliance severity level for all approved patches in a patch baseline. - // Valid compliance severity levels include the following: Unspecified, Critical, - // High, Medium, Low, and Informational. ComplianceLevel *string `type:"string" enum:"PatchComplianceLevel"` // For instances identified by the approval rule filters, enables a patch baseline @@ -37153,8 +39588,8 @@ func (s *PatchStatus) SetDeploymentStatus(v string) *PatchStatus { // You specified more than the maximum number of allowed policies for the parameter. // The maximum is 10. type PoliciesLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -37171,17 +39606,17 @@ func (s PoliciesLimitExceededException) GoString() string { func newErrorPoliciesLimitExceededException(v protocol.ResponseMetadata) error { return &PoliciesLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s PoliciesLimitExceededException) Code() string { +func (s *PoliciesLimitExceededException) Code() string { return "PoliciesLimitExceededException" } // Message returns the exception's message. -func (s PoliciesLimitExceededException) Message() string { +func (s *PoliciesLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -37189,22 +39624,22 @@ func (s PoliciesLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s PoliciesLimitExceededException) OrigErr() error { +func (s *PoliciesLimitExceededException) OrigErr() error { return nil } -func (s PoliciesLimitExceededException) Error() string { +func (s *PoliciesLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s PoliciesLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *PoliciesLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s PoliciesLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *PoliciesLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // An aggregate of step execution statuses displayed in the AWS Console for @@ -37296,7 +39731,7 @@ type PutComplianceItemsInput struct { // Information about the compliance as defined by the resource type. For example, // for a patch compliance type, Items includes information about the PatchSeverity, - // Classification, etc. + // Classification, and so on. // // Items is a required field Items []*ComplianceItemEntry `type:"list" required:"true"` @@ -37312,6 +39747,18 @@ type PutComplianceItemsInput struct { // // ResourceType is a required field ResourceType *string `min:"1" type:"string" required:"true"` + + // The mode for uploading compliance items. You can specify COMPLETE or PARTIAL. + // In COMPLETE mode, the system overwrites all existing compliance information + // for the resource. You must provide a full list of compliance items each time + // you send the request. + // + // In PARTIAL mode, the system overwrites compliance information for a specific + // association. The association must be configured with SyncCompliance set to + // MANUAL. By default, all requests use COMPLETE mode. + // + // This attribute is only valid for association compliance. + UploadType *string `type:"string" enum:"ComplianceUploadType"` } // String returns the string representation @@ -37409,6 +39856,12 @@ func (s *PutComplianceItemsInput) SetResourceType(v string) *PutComplianceItemsI return s } +// SetUploadType sets the UploadType field's value. +func (s *PutComplianceItemsInput) SetUploadType(v string) *PutComplianceItemsInput { + s.UploadType = &v + return s +} + type PutComplianceItemsOutput struct { _ struct{} `type:"structure"` } @@ -37519,6 +39972,22 @@ type PutParameterInput struct { // AllowedPattern=^\d+$ AllowedPattern *string `type:"string"` + // The data type for a String parameter. Supported data types include plain + // text and Amazon Machine Image IDs. + // + // The following data type values are supported. + // + // * text + // + // * aws:ec2:image + // + // When you create a String parameter and specify aws:ec2:image, Systems Manager + // validates the parameter value is in the required format, such as ami-12345abcdeEXAMPLE, + // and that the specified AMI is available in your AWS account. For more information, + // see Native parameter support for Amazon Machine Image IDs (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-ec2-aliases.html) + // in the AWS Systems Manager User Guide. + DataType *string `type:"string"` + // Information about the parameter that you want to add to the system. Optional // but recommended. // @@ -37562,8 +40031,8 @@ type PutParameterInput struct { // // * Parameter hierarchies are limited to a maximum depth of fifteen levels. // - // For additional information about valid values for parameter names, see Requirements - // and Constraints for Parameter Names (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) + // For additional information about valid values for parameter names, see About + // requirements and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) // in the AWS Systems Manager User Guide. // // The maximum length constraint listed below includes capacity for additional @@ -37600,8 +40069,8 @@ type PutParameterInput struct { // time, but it has not been changed. // // All existing policies are preserved until you send new policies or an empty - // policy. For more information about parameter policies, see Working with Parameter - // Policies (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-paramstore-su-policies.html). + // policy. For more information about parameter policies, see Assigning parameter + // policies (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-policies.html). Policies *string `min:"1" type:"string"` // Optional metadata that you assign to a resource. Tags enable you to categorize @@ -37632,7 +40101,7 @@ type PutParameterInput struct { // Advanced parameters have a content size limit of 8 KB and can be configured // to use parameter policies. You can create a maximum of 100,000 advanced parameters // for each Region in an AWS account. Advanced parameters incur a charge. For - // more information, see About Advanced Parameters (http://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html) + // more information, see Standard and advanced parameter tiers (https://docs.aws.amazon.com/systems-manager/latest/userguide/parameter-store-advanced-parameters.html) // in the AWS Systems Manager User Guide. // // You can change a standard parameter to an advanced parameter any time. But @@ -37680,25 +40149,29 @@ type PutParameterInput struct { // current Region. // // For more information about configuring the default tier option, see Specifying - // a Default Parameter Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/ps-default-tier.html) + // a default parameter tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/ps-default-tier.html) // in the AWS Systems Manager User Guide. Tier *string `type:"string" enum:"ParameterTier"` // The type of parameter that you want to add to the system. // + // SecureString is not currently supported for AWS CloudFormation templates + // or in the China Regions. + // // Items in a StringList must be separated by a comma (,). You can't use other // punctuation or special character to escape items in the list. If you have // a parameter value that requires a comma, then use the String data type. // - // SecureString is not currently supported for AWS CloudFormation templates - // or in the China Regions. - // - // Type is a required field - Type *string `type:"string" required:"true" enum:"ParameterType"` + // Specifying a parameter type is not required when updating a parameter. You + // must specify a parameter type when creating a parameter. + Type *string `type:"string" enum:"ParameterType"` // The parameter value that you want to add to the system. Standard parameters // have a value limit of 4 KB. Advanced parameters have a value limit of 8 KB. // + // Parameters can't be referenced or nested in the values of other parameters. + // You can't include {{}} or {{ssm:parameter-name}} in a parameter value. + // // Value is a required field Value *string `type:"string" required:"true"` } @@ -37728,9 +40201,6 @@ func (s *PutParameterInput) Validate() error { if s.Policies != nil && len(*s.Policies) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policies", 1)) } - if s.Type == nil { - invalidParams.Add(request.NewErrParamRequired("Type")) - } if s.Value == nil { invalidParams.Add(request.NewErrParamRequired("Value")) } @@ -37757,6 +40227,12 @@ func (s *PutParameterInput) SetAllowedPattern(v string) *PutParameterInput { return s } +// SetDataType sets the DataType field's value. +func (s *PutParameterInput) SetDataType(v string) *PutParameterInput { + s.DataType = &v + return s +} + // SetDescription sets the Description field's value. func (s *PutParameterInput) SetDescription(v string) *PutParameterInput { s.Description = &v @@ -38055,7 +40531,7 @@ type RegisterTargetWithMaintenanceWindowInput struct { // Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC // // For more information about these examples formats, including the best use - // case for each one, see Examples: Register Targets with a Maintenance Window + // case for each one, see Examples: Register targets with a maintenance window // (https://docs.aws.amazon.com/systems-manager/latest/userguide/mw-cli-tutorial-targets-examples.html) // in the AWS Systems Manager User Guide. // @@ -38196,7 +40672,7 @@ type RegisterTaskWithMaintenanceWindowInput struct { // An optional description for the task. Description *string `min:"1" type:"string" sensitive:"true"` - // A structure containing information about an Amazon S3 bucket to write instance-level + // A structure containing information about an S3 bucket to write instance-level // logs to. // // LoggingInfo has been deprecated. To specify an S3 bucket to contain logs, @@ -38231,10 +40707,10 @@ type RegisterTaskWithMaintenanceWindowInput struct { // For more information, see the following topics in the in the AWS Systems // Manager User Guide: // - // * Service-Linked Role Permissions for Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // * Using service-linked roles for Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) // - // * Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance - // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) + // * Should I use a service-linked role or a custom service role to run maintenance + // window tasks? (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string `type:"string"` // The targets (either instances or maintenance window targets). @@ -38605,7 +41081,9 @@ func (s RemoveTagsFromResourceOutput) GoString() string { type ResetServiceSettingInput struct { _ struct{} `type:"structure"` - // The ID of the service setting to reset. + // The Amazon Resource Name (ARN) of the service setting to reset. The setting + // ID can be /ssm/parameter-store/default-parameter-tier, /ssm/parameter-store/high-throughput-enabled, + // or /ssm/managed-instance/activation-tier. For example, arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. // // SettingId is a required field SettingId *string `min:"1" type:"string" required:"true"` @@ -38792,8 +41270,8 @@ func (s *ResourceComplianceSummaryItem) SetStatus(v string) *ResourceComplianceS // A sync configuration with the same name already exists. type ResourceDataSyncAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -38812,17 +41290,17 @@ func (s ResourceDataSyncAlreadyExistsException) GoString() string { func newErrorResourceDataSyncAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceDataSyncAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDataSyncAlreadyExistsException) Code() string { +func (s *ResourceDataSyncAlreadyExistsException) Code() string { return "ResourceDataSyncAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceDataSyncAlreadyExistsException) Message() string { +func (s *ResourceDataSyncAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -38830,22 +41308,22 @@ func (s ResourceDataSyncAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDataSyncAlreadyExistsException) OrigErr() error { +func (s *ResourceDataSyncAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceDataSyncAlreadyExistsException) Error() string { +func (s *ResourceDataSyncAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDataSyncAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDataSyncAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDataSyncAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDataSyncAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // Information about the AwsOrganizationsSource resource data sync source. A @@ -38920,8 +41398,8 @@ func (s *ResourceDataSyncAwsOrganizationsSource) SetOrganizationalUnits(v []*Res // Another UpdateResourceDataSync request is being processed. Wait a few minutes // and try again. type ResourceDataSyncConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -38938,17 +41416,17 @@ func (s ResourceDataSyncConflictException) GoString() string { func newErrorResourceDataSyncConflictException(v protocol.ResponseMetadata) error { return &ResourceDataSyncConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDataSyncConflictException) Code() string { +func (s *ResourceDataSyncConflictException) Code() string { return "ResourceDataSyncConflictException" } // Message returns the exception's message. -func (s ResourceDataSyncConflictException) Message() string { +func (s *ResourceDataSyncConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -38956,28 +41434,28 @@ func (s ResourceDataSyncConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDataSyncConflictException) OrigErr() error { +func (s *ResourceDataSyncConflictException) OrigErr() error { return nil } -func (s ResourceDataSyncConflictException) Error() string { +func (s *ResourceDataSyncConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDataSyncConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDataSyncConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDataSyncConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDataSyncConflictException) RequestID() string { + return s.RespMetadata.RequestID } // You have exceeded the allowed maximum sync configurations. type ResourceDataSyncCountExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -38994,17 +41472,17 @@ func (s ResourceDataSyncCountExceededException) GoString() string { func newErrorResourceDataSyncCountExceededException(v protocol.ResponseMetadata) error { return &ResourceDataSyncCountExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDataSyncCountExceededException) Code() string { +func (s *ResourceDataSyncCountExceededException) Code() string { return "ResourceDataSyncCountExceededException" } // Message returns the exception's message. -func (s ResourceDataSyncCountExceededException) Message() string { +func (s *ResourceDataSyncCountExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39012,28 +41490,68 @@ func (s ResourceDataSyncCountExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDataSyncCountExceededException) OrigErr() error { +func (s *ResourceDataSyncCountExceededException) OrigErr() error { return nil } -func (s ResourceDataSyncCountExceededException) Error() string { +func (s *ResourceDataSyncCountExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDataSyncCountExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDataSyncCountExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDataSyncCountExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDataSyncCountExceededException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Synchronize Systems Manager Inventory data from multiple AWS accounts defined +// in AWS Organizations to a centralized S3 bucket. Data is synchronized to +// individual key prefixes in the central bucket. Each key prefix represents +// a different AWS account ID. +type ResourceDataSyncDestinationDataSharing struct { + _ struct{} `type:"structure"` + + // The sharing data type. Only Organization is supported. + DestinationDataSharingType *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceDataSyncDestinationDataSharing) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceDataSyncDestinationDataSharing) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResourceDataSyncDestinationDataSharing) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResourceDataSyncDestinationDataSharing"} + if s.DestinationDataSharingType != nil && len(*s.DestinationDataSharingType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DestinationDataSharingType", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestinationDataSharingType sets the DestinationDataSharingType field's value. +func (s *ResourceDataSyncDestinationDataSharing) SetDestinationDataSharingType(v string) *ResourceDataSyncDestinationDataSharing { + s.DestinationDataSharingType = &v + return s } // The specified sync configuration is invalid. type ResourceDataSyncInvalidConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -39050,17 +41568,17 @@ func (s ResourceDataSyncInvalidConfigurationException) GoString() string { func newErrorResourceDataSyncInvalidConfigurationException(v protocol.ResponseMetadata) error { return &ResourceDataSyncInvalidConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDataSyncInvalidConfigurationException) Code() string { +func (s *ResourceDataSyncInvalidConfigurationException) Code() string { return "ResourceDataSyncInvalidConfigurationException" } // Message returns the exception's message. -func (s ResourceDataSyncInvalidConfigurationException) Message() string { +func (s *ResourceDataSyncInvalidConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39068,22 +41586,22 @@ func (s ResourceDataSyncInvalidConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDataSyncInvalidConfigurationException) OrigErr() error { +func (s *ResourceDataSyncInvalidConfigurationException) OrigErr() error { return nil } -func (s ResourceDataSyncInvalidConfigurationException) Error() string { +func (s *ResourceDataSyncInvalidConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDataSyncInvalidConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDataSyncInvalidConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDataSyncInvalidConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDataSyncInvalidConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // Information about a Resource Data Sync configuration, including its current @@ -39103,7 +41621,7 @@ type ResourceDataSyncItem struct { // The last time the configuration attempted to sync (UTC). LastSyncTime *time.Time `type:"timestamp"` - // Configuration information for the target Amazon S3 bucket. + // Configuration information for the target S3 bucket. S3Destination *ResourceDataSyncS3Destination `type:"structure"` // The date and time the configuration was created (UTC). @@ -39119,9 +41637,9 @@ type ResourceDataSyncItem struct { SyncSource *ResourceDataSyncSourceWithState `type:"structure"` // The type of resource data sync. If SyncType is SyncToDestination, then the - // resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType - // is SyncFromSource then the resource data sync synchronizes data from AWS - // Organizations or from multiple AWS Regions. + // resource data sync synchronizes data to an S3 bucket. If the SyncType is + // SyncFromSource then the resource data sync synchronizes data from AWS Organizations + // or from multiple AWS Regions. SyncType *string `min:"1" type:"string"` } @@ -39197,8 +41715,8 @@ func (s *ResourceDataSyncItem) SetSyncType(v string) *ResourceDataSyncItem { // The specified sync name was not found. type ResourceDataSyncNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -39219,17 +41737,17 @@ func (s ResourceDataSyncNotFoundException) GoString() string { func newErrorResourceDataSyncNotFoundException(v protocol.ResponseMetadata) error { return &ResourceDataSyncNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceDataSyncNotFoundException) Code() string { +func (s *ResourceDataSyncNotFoundException) Code() string { return "ResourceDataSyncNotFoundException" } // Message returns the exception's message. -func (s ResourceDataSyncNotFoundException) Message() string { +func (s *ResourceDataSyncNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39237,22 +41755,22 @@ func (s ResourceDataSyncNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceDataSyncNotFoundException) OrigErr() error { +func (s *ResourceDataSyncNotFoundException) OrigErr() error { return nil } -func (s ResourceDataSyncNotFoundException) Error() string { +func (s *ResourceDataSyncNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceDataSyncNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceDataSyncNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceDataSyncNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceDataSyncNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The AWS Organizations organizational unit data source for the sync. @@ -39292,23 +41810,26 @@ func (s *ResourceDataSyncOrganizationalUnit) SetOrganizationalUnitId(v string) * return s } -// Information about the target Amazon S3 bucket for the Resource Data Sync. +// Information about the target S3 bucket for the Resource Data Sync. type ResourceDataSyncS3Destination struct { _ struct{} `type:"structure"` // The ARN of an encryption key for a destination in Amazon S3. Must belong - // to the same Region as the destination Amazon S3 bucket. + // to the same Region as the destination S3 bucket. AWSKMSKeyARN *string `min:"1" type:"string"` - // The name of the Amazon S3 bucket where the aggregated data is stored. + // The name of the S3 bucket where the aggregated data is stored. // // BucketName is a required field BucketName *string `min:"1" type:"string" required:"true"` + // Enables destination data sharing. By default, this field is null. + DestinationDataSharing *ResourceDataSyncDestinationDataSharing `type:"structure"` + // An Amazon S3 prefix for the bucket. Prefix *string `min:"1" type:"string"` - // The AWS Region with the Amazon S3 bucket targeted by the Resource Data Sync. + // The AWS Region with the S3 bucket targeted by the Resource Data Sync. // // Region is a required field Region *string `min:"1" type:"string" required:"true"` @@ -39353,6 +41874,11 @@ func (s *ResourceDataSyncS3Destination) Validate() error { if s.SyncFormat == nil { invalidParams.Add(request.NewErrParamRequired("SyncFormat")) } + if s.DestinationDataSharing != nil { + if err := s.DestinationDataSharing.Validate(); err != nil { + invalidParams.AddNested("DestinationDataSharing", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -39372,6 +41898,12 @@ func (s *ResourceDataSyncS3Destination) SetBucketName(v string) *ResourceDataSyn return s } +// SetDestinationDataSharing sets the DestinationDataSharing field's value. +func (s *ResourceDataSyncS3Destination) SetDestinationDataSharing(v *ResourceDataSyncDestinationDataSharing) *ResourceDataSyncS3Destination { + s.DestinationDataSharing = v + return s +} + // SetPrefix sets the Prefix field's value. func (s *ResourceDataSyncS3Destination) SetPrefix(v string) *ResourceDataSyncS3Destination { s.Prefix = &v @@ -39564,8 +42096,8 @@ func (s *ResourceDataSyncSourceWithState) SetState(v string) *ResourceDataSyncSo // Error returned if an attempt is made to delete a patch baseline that is registered // for a patch group. type ResourceInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -39582,17 +42114,17 @@ func (s ResourceInUseException) GoString() string { func newErrorResourceInUseException(v protocol.ResponseMetadata) error { return &ResourceInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceInUseException) Code() string { +func (s *ResourceInUseException) Code() string { return "ResourceInUseException" } // Message returns the exception's message. -func (s ResourceInUseException) Message() string { +func (s *ResourceInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39600,33 +42132,33 @@ func (s ResourceInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceInUseException) OrigErr() error { +func (s *ResourceInUseException) OrigErr() error { return nil } -func (s ResourceInUseException) Error() string { +func (s *ResourceInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceInUseException) RequestID() string { + return s.RespMetadata.RequestID } // Error returned when the caller has exceeded the default resource quotas. // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager -// Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) +// service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. type ResourceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -39643,17 +42175,17 @@ func (s ResourceLimitExceededException) GoString() string { func newErrorResourceLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceededException) Code() string { +func (s *ResourceLimitExceededException) Code() string { return "ResourceLimitExceededException" } // Message returns the exception's message. -func (s ResourceLimitExceededException) Message() string { +func (s *ResourceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -39661,22 +42193,22 @@ func (s ResourceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceededException) OrigErr() error { +func (s *ResourceLimitExceededException) OrigErr() error { return nil } -func (s ResourceLimitExceededException) Error() string { +func (s *ResourceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The inventory item result attribute. @@ -39774,8 +42306,8 @@ type ResumeSessionOutput struct { // // region represents the Region identifier for an AWS Region supported by AWS // Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list - // of supported region values, see the Region column in Systems Manager Service - // Endpoints (http://docs.aws.amazon.com/general/latest/gr/ssm.html#ssm_region) + // of supported region values, see the Region column in Systems Manager service + // endpoints (http://docs.aws.amazon.com/general/latest/gr/ssm.html#ssm_region) // in the AWS General Reference. // // session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE. @@ -39814,19 +42346,19 @@ func (s *ResumeSessionOutput) SetTokenValue(v string) *ResumeSessionOutput { return s } -// An Amazon S3 bucket where you want to store the results of this request. +// An S3 bucket where you want to store the results of this request. type S3OutputLocation struct { _ struct{} `type:"structure"` - // The name of the Amazon S3 bucket. + // The name of the S3 bucket. OutputS3BucketName *string `min:"3" type:"string"` - // The Amazon S3 bucket subfolder. + // The S3 bucket subfolder. OutputS3KeyPrefix *string `type:"string"` // (Deprecated) You can no longer specify this parameter. The system ignores - // it. Instead, Systems Manager automatically determines the Amazon S3 bucket - // region. + // it. Instead, Systems Manager automatically determines the Region of the S3 + // bucket. OutputS3Region *string `min:"3" type:"string"` } @@ -39874,13 +42406,11 @@ func (s *S3OutputLocation) SetOutputS3Region(v string) *S3OutputLocation { return s } -// A URL for the Amazon S3 bucket where you want to store the results of this -// request. +// A URL for the S3 bucket where you want to store the results of this request. type S3OutputUrl struct { _ struct{} `type:"structure"` - // A URL for an Amazon S3 bucket where you want to store the results of this - // request. + // A URL for an S3 bucket where you want to store the results of this request. OutputUrl *string `type:"string"` } @@ -40080,18 +42610,25 @@ type SendCommandInput struct { // --document-version "3" DocumentVersion *string `type:"string"` - // The instance IDs where the command should run. You can specify a maximum - // of 50 IDs. If you prefer not to list individual instance IDs, you can instead - // send commands to a fleet of instances using the Targets parameter, which - // accepts EC2 tags. For more information about how to use targets, see Sending - // Commands to a Fleet (http://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) + // The IDs of the instances where the command should run. Specifying instance + // IDs is most useful when you are targeting a limited number of instances, + // though you can specify up to 50 IDs. + // + // To target a larger number of instances, or if you prefer not to list individual + // instance IDs, we recommend using the Targets option instead. Using Targets, + // which accepts tag key-value pairs to identify the instances to send commands + // to, you can a send command to tens, hundreds, or thousands of instances at + // once. + // + // For more information about how to use targets, see Using targets and rate + // controls to send commands to a fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) // in the AWS Systems Manager User Guide. InstanceIds []*string `type:"list"` // (Optional) The maximum number of instances that are allowed to run the command // at the same time. You can specify a number such as 10 or a percentage such // as 10%. The default value is 50. For more information about how to use MaxConcurrency, - // see Using Concurrency Controls (http://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-velocity) + // see Using concurrency controls (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-velocity) // in the AWS Systems Manager User Guide. MaxConcurrency *string `min:"1" type:"string"` @@ -40099,7 +42636,7 @@ type SendCommandInput struct { // command fails one more time beyond the value of MaxErrors, the systems stops // sending the command to additional targets. You can specify a number like // 10 or a percentage like 10%. The default value is 0. For more information - // about how to use MaxErrors, see Using Error Controls (http://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-maxerrors) + // about how to use MaxErrors, see Using error controls (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-maxerrors) // in the AWS Systems Manager User Guide. MaxErrors *string `min:"1" type:"string"` @@ -40114,8 +42651,8 @@ type SendCommandInput struct { OutputS3KeyPrefix *string `type:"string"` // (Deprecated) You can no longer specify this parameter. The system ignores - // it. Instead, Systems Manager automatically determines the Amazon S3 bucket - // region. + // it. Instead, Systems Manager automatically determines the Region of the S3 + // bucket. OutputS3Region *string `min:"3" type:"string"` // The required and optional parameters specified in the document being run. @@ -40125,10 +42662,17 @@ type SendCommandInput struct { // Service (Amazon SNS) notifications for Run Command commands. ServiceRoleArn *string `type:"string"` - // (Optional) An array of search criteria that targets instances using a Key,Value - // combination that you specify. Targets is required if you don't provide one - // or more instance IDs in the call. For more information about how to use targets, - // see Sending Commands to a Fleet (http://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) + // An array of search criteria that targets instances using a Key,Value combination + // that you specify. Specifying targets is most useful when you want to send + // a command to a large number of instances at once. Using Targets, which accepts + // tag key-value pairs to identify instances, you can send a command to tens, + // hundreds, or thousands of instances at once. + // + // To send a command to a smaller number of instances, you can use the InstanceIds + // option instead. + // + // For more information about how to use targets, see Sending commands to a + // fleet (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html) // in the AWS Systems Manager User Guide. Targets []*Target `type:"list"` @@ -40413,8 +42957,8 @@ func (s *ServiceSetting) SetStatus(v string) *ServiceSetting { // The specified service setting was not found. Either the service name or the // setting has not been provisioned by the AWS service team. type ServiceSettingNotFound struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -40431,17 +42975,17 @@ func (s ServiceSettingNotFound) GoString() string { func newErrorServiceSettingNotFound(v protocol.ResponseMetadata) error { return &ServiceSettingNotFound{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceSettingNotFound) Code() string { +func (s *ServiceSettingNotFound) Code() string { return "ServiceSettingNotFound" } // Message returns the exception's message. -func (s ServiceSettingNotFound) Message() string { +func (s *ServiceSettingNotFound) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -40449,22 +42993,22 @@ func (s ServiceSettingNotFound) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceSettingNotFound) OrigErr() error { +func (s *ServiceSettingNotFound) OrigErr() error { return nil } -func (s ServiceSettingNotFound) Error() string { +func (s *ServiceSettingNotFound) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceSettingNotFound) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceSettingNotFound) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceSettingNotFound) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceSettingNotFound) RequestID() string { + return s.RespMetadata.RequestID } // Information about a Session Manager connection to an instance. @@ -40866,8 +43410,8 @@ type StartAutomationExecutionInput struct { // A location is a combination of AWS Regions and/or AWS accounts where you // want to run the Automation. Use this action to start an Automation in multiple - // Regions and multiple accounts. For more information, see Executing Automations - // in Multiple AWS Regions and Accounts (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation-multiple-accounts-and-regions.html) + // Regions and multiple accounts. For more information, see Running Automation + // workflows in multiple AWS Regions and accounts (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-automation-multiple-accounts-and-regions.html) // in the AWS Systems Manager User Guide. TargetLocations []*TargetLocation `min:"1" type:"list"` @@ -41053,8 +43597,10 @@ type StartSessionInput struct { _ struct{} `type:"structure"` // The name of the SSM document to define the parameters and plugin settings - // for the session. For example, SSM-SessionManagerRunShell. If no document - // name is provided, a shell to the instance is launched by default. + // for the session. For example, SSM-SessionManagerRunShell. You can call the + // GetDocument API to verify the document exists before attempting to start + // a session. If no document name is provided, a shell to the instance is launched + // by default. DocumentName *string `type:"string"` // Reserved for future use. @@ -41121,8 +43667,8 @@ type StartSessionOutput struct { // // region represents the Region identifier for an AWS Region supported by AWS // Systems Manager, such as us-east-2 for the US East (Ohio) Region. For a list - // of supported region values, see the Region column in Systems Manager Service - // Endpoints (http://docs.aws.amazon.com/general/latest/gr/ssm.html#ssm_region) + // of supported region values, see the Region column in Systems Manager service + // endpoints (http://docs.aws.amazon.com/general/latest/gr/ssm.html#ssm_region) // in the AWS General Reference. // // session-id represents the ID of a Session Manager session, such as 1a2b3c4dEXAMPLE. @@ -41163,8 +43709,8 @@ func (s *StartSessionOutput) SetTokenValue(v string) *StartSessionOutput { // The updated status is the same as the current status. type StatusUnchanged struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -41181,17 +43727,17 @@ func (s StatusUnchanged) GoString() string { func newErrorStatusUnchanged(v protocol.ResponseMetadata) error { return &StatusUnchanged{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StatusUnchanged) Code() string { +func (s *StatusUnchanged) Code() string { return "StatusUnchanged" } // Message returns the exception's message. -func (s StatusUnchanged) Message() string { +func (s *StatusUnchanged) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -41199,22 +43745,22 @@ func (s StatusUnchanged) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StatusUnchanged) OrigErr() error { +func (s *StatusUnchanged) OrigErr() error { return nil } -func (s StatusUnchanged) Error() string { +func (s *StatusUnchanged) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StatusUnchanged) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StatusUnchanged) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StatusUnchanged) RequestID() string { - return s.respMetadata.RequestID +func (s *StatusUnchanged) RequestID() string { + return s.RespMetadata.RequestID } // Detailed information about an the execution state of an Automation step. @@ -41567,8 +44113,8 @@ func (s StopAutomationExecutionOutput) GoString() string { // The sub-type count exceeded the limit for the inventory type. type SubTypeCountLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -41585,17 +44131,17 @@ func (s SubTypeCountLimitExceededException) GoString() string { func newErrorSubTypeCountLimitExceededException(v protocol.ResponseMetadata) error { return &SubTypeCountLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubTypeCountLimitExceededException) Code() string { +func (s *SubTypeCountLimitExceededException) Code() string { return "SubTypeCountLimitExceededException" } // Message returns the exception's message. -func (s SubTypeCountLimitExceededException) Message() string { +func (s *SubTypeCountLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -41603,22 +44149,22 @@ func (s SubTypeCountLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubTypeCountLimitExceededException) OrigErr() error { +func (s *SubTypeCountLimitExceededException) OrigErr() error { return nil } -func (s SubTypeCountLimitExceededException) Error() string { +func (s *SubTypeCountLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubTypeCountLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubTypeCountLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubTypeCountLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *SubTypeCountLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // Metadata that you assign to your AWS resources. Tags enable you to categorize @@ -41694,9 +44240,11 @@ func (s *Tag) SetValue(v string) *Tag { // // * Key=tag-key,Values=my-tag-key-1,my-tag-key-2 // -// * (Maintenance window targets only) Key=resource-groups:Name,Values=resource-group-name +// * Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=resource-group-name +// +// * Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 // -// * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 +// * Automation targets only: Key=ResourceGroup;Values=resource-group-name // // For example: // @@ -41706,21 +44254,22 @@ func (s *Tag) SetValue(v string) *Tag { // // * Key=tag-key,Values=Name,Instance-Type,CostCenter // -// * (Maintenance window targets only) Key=resource-groups:Name,Values=ProductionResourceGroup +// * Run Command and Maintenance window targets only: Key=resource-groups:Name,Values=ProductionResourceGroup // This example demonstrates how to target all resources in the resource // group ProductionResourceGroup in your maintenance window. // -// * (Maintenance window targets only) Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC -// This example demonstrates how to target only Amazon EC2 instances and -// VPCs in your maintenance window. +// * Maintenance window targets only: Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC +// This example demonstrates how to target only EC2 instances and VPCs in +// your maintenance window. // -// * (State Manager association targets only) Key=InstanceIds,Values=* This +// * Automation targets only: Key=ResourceGroup,Values=MyResourceGroup +// +// * State Manager association targets only: Key=InstanceIds,Values=* This // example demonstrates how to target all managed instances in the AWS Region // where the association was created. // -// For information about how to send commands that target instances using Key,Value -// parameters, see Using Targets and Rate Controls to Send Commands to a Fleet -// (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) +// For more information about how to send commands that target instances using +// Key,Value parameters, see Targeting multiple instances (https://docs.aws.amazon.com/systems-manager/latest/userguide/send-commands-multiple.html#send-commands-targeting) // in the AWS Systems Manager User Guide. type Target struct { _ struct{} `type:"structure"` @@ -41731,7 +44280,7 @@ type Target struct { // User-defined criteria that maps to Key. For example, if you specified tag:ServerRole, // you could specify value:WebServer to run a command on instances that include - // Amazon EC2 tags of ServerRole,WebServer. + // EC2 tags of ServerRole,WebServer. Values []*string `type:"list"` } @@ -41773,8 +44322,8 @@ func (s *Target) SetValues(v []*string) *Target { // You specified the Safe option for the DeregisterTargetFromMaintenanceWindow // operation, but the target is still referenced in a task. type TargetInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -41791,17 +44340,17 @@ func (s TargetInUseException) GoString() string { func newErrorTargetInUseException(v protocol.ResponseMetadata) error { return &TargetInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TargetInUseException) Code() string { +func (s *TargetInUseException) Code() string { return "TargetInUseException" } // Message returns the exception's message. -func (s TargetInUseException) Message() string { +func (s *TargetInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -41809,22 +44358,22 @@ func (s TargetInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TargetInUseException) OrigErr() error { +func (s *TargetInUseException) OrigErr() error { return nil } -func (s TargetInUseException) Error() string { +func (s *TargetInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TargetInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TargetInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TargetInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *TargetInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The combination of AWS Regions and accounts targeted by the current Automation @@ -41916,12 +44465,14 @@ func (s *TargetLocation) SetTargetLocationMaxErrors(v string) *TargetLocation { } // The specified target instance for the session is not fully configured for -// use with Session Manager. For more information, see Getting Started with -// Session Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) -// in the AWS Systems Manager User Guide. +// use with Session Manager. For more information, see Getting started with +// Session Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) +// in the AWS Systems Manager User Guide. This error is also returned if you +// attempt to start a session on an instance that is located in a different +// account or Region type TargetNotConnected struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -41938,17 +44489,17 @@ func (s TargetNotConnected) GoString() string { func newErrorTargetNotConnected(v protocol.ResponseMetadata) error { return &TargetNotConnected{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TargetNotConnected) Code() string { +func (s *TargetNotConnected) Code() string { return "TargetNotConnected" } // Message returns the exception's message. -func (s TargetNotConnected) Message() string { +func (s *TargetNotConnected) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -41956,22 +44507,22 @@ func (s TargetNotConnected) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TargetNotConnected) OrigErr() error { +func (s *TargetNotConnected) OrigErr() error { return nil } -func (s TargetNotConnected) Error() string { +func (s *TargetNotConnected) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TargetNotConnected) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TargetNotConnected) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TargetNotConnected) RequestID() string { - return s.respMetadata.RequestID +func (s *TargetNotConnected) RequestID() string { + return s.RespMetadata.RequestID } type TerminateSessionInput struct { @@ -42041,8 +44592,8 @@ func (s *TerminateSessionOutput) SetSessionId(v string) *TerminateSessionOutput // The Targets parameter includes too many tags. Remove one or more tags and // try the command again. type TooManyTagsError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -42059,17 +44610,17 @@ func (s TooManyTagsError) GoString() string { func newErrorTooManyTagsError(v protocol.ResponseMetadata) error { return &TooManyTagsError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsError) Code() string { +func (s *TooManyTagsError) Code() string { return "TooManyTagsError" } // Message returns the exception's message. -func (s TooManyTagsError) Message() string { +func (s *TooManyTagsError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42077,29 +44628,29 @@ func (s TooManyTagsError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsError) OrigErr() error { +func (s *TooManyTagsError) OrigErr() error { return nil } -func (s TooManyTagsError) Error() string { +func (s *TooManyTagsError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsError) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsError) RequestID() string { + return s.RespMetadata.RequestID } // There are concurrent updates for a resource that supports one update at a // time. type TooManyUpdates struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42116,17 +44667,17 @@ func (s TooManyUpdates) GoString() string { func newErrorTooManyUpdates(v protocol.ResponseMetadata) error { return &TooManyUpdates{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyUpdates) Code() string { +func (s *TooManyUpdates) Code() string { return "TooManyUpdates" } // Message returns the exception's message. -func (s TooManyUpdates) Message() string { +func (s *TooManyUpdates) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42134,28 +44685,28 @@ func (s TooManyUpdates) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyUpdates) OrigErr() error { +func (s *TooManyUpdates) OrigErr() error { return nil } -func (s TooManyUpdates) Error() string { +func (s *TooManyUpdates) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyUpdates) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyUpdates) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyUpdates) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyUpdates) RequestID() string { + return s.RespMetadata.RequestID } // The size of inventory data has exceeded the total size limit for the resource. type TotalSizeLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42172,17 +44723,17 @@ func (s TotalSizeLimitExceededException) GoString() string { func newErrorTotalSizeLimitExceededException(v protocol.ResponseMetadata) error { return &TotalSizeLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TotalSizeLimitExceededException) Code() string { +func (s *TotalSizeLimitExceededException) Code() string { return "TotalSizeLimitExceededException" } // Message returns the exception's message. -func (s TotalSizeLimitExceededException) Message() string { +func (s *TotalSizeLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42190,29 +44741,29 @@ func (s TotalSizeLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TotalSizeLimitExceededException) OrigErr() error { +func (s *TotalSizeLimitExceededException) OrigErr() error { return nil } -func (s TotalSizeLimitExceededException) Error() string { +func (s *TotalSizeLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TotalSizeLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TotalSizeLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TotalSizeLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *TotalSizeLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The calendar entry contained in the specified Systems Manager document is // not supported. type UnsupportedCalendarException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42229,17 +44780,17 @@ func (s UnsupportedCalendarException) GoString() string { func newErrorUnsupportedCalendarException(v protocol.ResponseMetadata) error { return &UnsupportedCalendarException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedCalendarException) Code() string { +func (s *UnsupportedCalendarException) Code() string { return "UnsupportedCalendarException" } // Message returns the exception's message. -func (s UnsupportedCalendarException) Message() string { +func (s *UnsupportedCalendarException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42247,32 +44798,32 @@ func (s UnsupportedCalendarException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedCalendarException) OrigErr() error { +func (s *UnsupportedCalendarException) OrigErr() error { return nil } -func (s UnsupportedCalendarException) Error() string { +func (s *UnsupportedCalendarException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedCalendarException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedCalendarException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedCalendarException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedCalendarException) RequestID() string { + return s.RespMetadata.RequestID } -// Microsoft application patching is only available on EC2 instances and Advanced -// Instances. To patch Microsoft applications on on-premises servers and VMs, -// you must enable Advanced Instances. For more information, see Using the Advanced-Instances -// Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) +// Microsoft application patching is only available on EC2 instances and advanced +// instances. To patch Microsoft applications on on-premises servers and VMs, +// you must enable advanced instances. For more information, see Using the advanced-instances +// tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) // in the AWS Systems Manager User Guide. type UnsupportedFeatureRequiredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42289,17 +44840,17 @@ func (s UnsupportedFeatureRequiredException) GoString() string { func newErrorUnsupportedFeatureRequiredException(v protocol.ResponseMetadata) error { return &UnsupportedFeatureRequiredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedFeatureRequiredException) Code() string { +func (s *UnsupportedFeatureRequiredException) Code() string { return "UnsupportedFeatureRequiredException" } // Message returns the exception's message. -func (s UnsupportedFeatureRequiredException) Message() string { +func (s *UnsupportedFeatureRequiredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42307,30 +44858,30 @@ func (s UnsupportedFeatureRequiredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedFeatureRequiredException) OrigErr() error { +func (s *UnsupportedFeatureRequiredException) OrigErr() error { return nil } -func (s UnsupportedFeatureRequiredException) Error() string { +func (s *UnsupportedFeatureRequiredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedFeatureRequiredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedFeatureRequiredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedFeatureRequiredException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedFeatureRequiredException) RequestID() string { + return s.RespMetadata.RequestID } // The Context attribute that you specified for the InventoryItem is not allowed // for this inventory type. You can only use the Context attribute with inventory // types like AWS:ComplianceItem. type UnsupportedInventoryItemContextException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -42349,17 +44900,17 @@ func (s UnsupportedInventoryItemContextException) GoString() string { func newErrorUnsupportedInventoryItemContextException(v protocol.ResponseMetadata) error { return &UnsupportedInventoryItemContextException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedInventoryItemContextException) Code() string { +func (s *UnsupportedInventoryItemContextException) Code() string { return "UnsupportedInventoryItemContextException" } // Message returns the exception's message. -func (s UnsupportedInventoryItemContextException) Message() string { +func (s *UnsupportedInventoryItemContextException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42367,30 +44918,30 @@ func (s UnsupportedInventoryItemContextException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedInventoryItemContextException) OrigErr() error { +func (s *UnsupportedInventoryItemContextException) OrigErr() error { return nil } -func (s UnsupportedInventoryItemContextException) Error() string { +func (s *UnsupportedInventoryItemContextException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedInventoryItemContextException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedInventoryItemContextException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedInventoryItemContextException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedInventoryItemContextException) RequestID() string { + return s.RespMetadata.RequestID } // Inventory item type schema version has to match supported versions in the // service. Check output of GetInventorySchema to see the available schema version // for each type. type UnsupportedInventorySchemaVersionException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42407,17 +44958,17 @@ func (s UnsupportedInventorySchemaVersionException) GoString() string { func newErrorUnsupportedInventorySchemaVersionException(v protocol.ResponseMetadata) error { return &UnsupportedInventorySchemaVersionException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedInventorySchemaVersionException) Code() string { +func (s *UnsupportedInventorySchemaVersionException) Code() string { return "UnsupportedInventorySchemaVersionException" } // Message returns the exception's message. -func (s UnsupportedInventorySchemaVersionException) Message() string { +func (s *UnsupportedInventorySchemaVersionException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42425,30 +44976,29 @@ func (s UnsupportedInventorySchemaVersionException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedInventorySchemaVersionException) OrigErr() error { +func (s *UnsupportedInventorySchemaVersionException) OrigErr() error { return nil } -func (s UnsupportedInventorySchemaVersionException) Error() string { +func (s *UnsupportedInventorySchemaVersionException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedInventorySchemaVersionException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedInventorySchemaVersionException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedInventorySchemaVersionException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedInventorySchemaVersionException) RequestID() string { + return s.RespMetadata.RequestID } // The operating systems you specified is not supported, or the operation is -// not supported for the operating system. Valid operating systems include: -// Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. +// not supported for the operating system. type UnsupportedOperatingSystem struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42465,17 +45015,17 @@ func (s UnsupportedOperatingSystem) GoString() string { func newErrorUnsupportedOperatingSystem(v protocol.ResponseMetadata) error { return &UnsupportedOperatingSystem{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperatingSystem) Code() string { +func (s *UnsupportedOperatingSystem) Code() string { return "UnsupportedOperatingSystem" } // Message returns the exception's message. -func (s UnsupportedOperatingSystem) Message() string { +func (s *UnsupportedOperatingSystem) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42483,28 +45033,28 @@ func (s UnsupportedOperatingSystem) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperatingSystem) OrigErr() error { +func (s *UnsupportedOperatingSystem) OrigErr() error { return nil } -func (s UnsupportedOperatingSystem) Error() string { +func (s *UnsupportedOperatingSystem) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperatingSystem) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperatingSystem) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperatingSystem) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperatingSystem) RequestID() string { + return s.RespMetadata.RequestID } // The parameter type is not supported. type UnsupportedParameterType struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -42521,17 +45071,17 @@ func (s UnsupportedParameterType) GoString() string { func newErrorUnsupportedParameterType(v protocol.ResponseMetadata) error { return &UnsupportedParameterType{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedParameterType) Code() string { +func (s *UnsupportedParameterType) Code() string { return "UnsupportedParameterType" } // Message returns the exception's message. -func (s UnsupportedParameterType) Message() string { +func (s *UnsupportedParameterType) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42539,29 +45089,29 @@ func (s UnsupportedParameterType) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedParameterType) OrigErr() error { +func (s *UnsupportedParameterType) OrigErr() error { return nil } -func (s UnsupportedParameterType) Error() string { +func (s *UnsupportedParameterType) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedParameterType) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedParameterType) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedParameterType) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedParameterType) RequestID() string { + return s.RespMetadata.RequestID } // The document does not support the platform type of the given instance ID(s). // For example, you sent an document for a Windows instance to a Linux instance. type UnsupportedPlatformType struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -42578,17 +45128,17 @@ func (s UnsupportedPlatformType) GoString() string { func newErrorUnsupportedPlatformType(v protocol.ResponseMetadata) error { return &UnsupportedPlatformType{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedPlatformType) Code() string { +func (s *UnsupportedPlatformType) Code() string { return "UnsupportedPlatformType" } // Message returns the exception's message. -func (s UnsupportedPlatformType) Message() string { +func (s *UnsupportedPlatformType) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -42596,27 +45146,39 @@ func (s UnsupportedPlatformType) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedPlatformType) OrigErr() error { +func (s *UnsupportedPlatformType) OrigErr() error { return nil } -func (s UnsupportedPlatformType) Error() string { +func (s *UnsupportedPlatformType) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedPlatformType) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedPlatformType) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedPlatformType) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedPlatformType) RequestID() string { + return s.RespMetadata.RequestID } type UpdateAssociationInput struct { _ struct{} `type:"structure"` + // By default, when you update an association, the system runs it immediately + // after it is updated and then according to the schedule you specified. Specify + // this option if you don't want an association to run immediately after you + // update it. + // + // Also, if you specified this option when you created the association, you + // can reset it. To do so, specify the no-apply-only-at-cron-interval parameter + // when you update the association from the command line. This parameter forces + // the association to run immediately after updating it and according to the + // interval specified. + ApplyOnlyAtCronInterval *bool `type:"boolean"` + // The ID of the association you want to update. // // AssociationId is a required field @@ -42686,7 +45248,7 @@ type UpdateAssociationInput struct { // or My-Document. Name *string `type:"string"` - // An Amazon S3 bucket where you want to store the results of this request. + // An S3 bucket where you want to store the results of this request. OutputLocation *InstanceAssociationOutputLocation `type:"structure"` // The parameters you want to update for the association. If you create a parameter @@ -42696,6 +45258,20 @@ type UpdateAssociationInput struct { // The cron expression used to schedule the association that you want to update. ScheduleExpression *string `min:"1" type:"string"` + // The mode for generating association compliance. You can specify AUTO or MANUAL. + // In AUTO mode, the system uses the status of the association execution to + // determine the compliance status. If the association execution runs successfully, + // then the association is COMPLIANT. If the association execution doesn't run + // successfully, the association is NON-COMPLIANT. + // + // In MANUAL mode, you must specify the AssociationId as a parameter for the + // PutComplianceItems API action. In this case, compliance data is not managed + // by State Manager. It is managed by your direct call to the PutComplianceItems + // API action. + // + // By default, all associations use AUTO mode. + SyncCompliance *string `type:"string" enum:"AssociationSyncCompliance"` + // The targets of the association. Targets []*Target `type:"list"` } @@ -42750,6 +45326,12 @@ func (s *UpdateAssociationInput) Validate() error { return nil } +// SetApplyOnlyAtCronInterval sets the ApplyOnlyAtCronInterval field's value. +func (s *UpdateAssociationInput) SetApplyOnlyAtCronInterval(v bool) *UpdateAssociationInput { + s.ApplyOnlyAtCronInterval = &v + return s +} + // SetAssociationId sets the AssociationId field's value. func (s *UpdateAssociationInput) SetAssociationId(v string) *UpdateAssociationInput { s.AssociationId = &v @@ -42822,6 +45404,12 @@ func (s *UpdateAssociationInput) SetScheduleExpression(v string) *UpdateAssociat return s } +// SetSyncCompliance sets the SyncCompliance field's value. +func (s *UpdateAssociationInput) SetSyncCompliance(v string) *UpdateAssociationInput { + s.SyncCompliance = &v + return s +} + // SetTargets sets the Targets field's value. func (s *UpdateAssociationInput) SetTargets(v []*Target) *UpdateAssociationInput { s.Targets = v @@ -43037,7 +45625,9 @@ type UpdateDocumentInput struct { // supports JSON and YAML documents. JSON is the default format. DocumentFormat *string `type:"string" enum:"DocumentFormat"` - // (Required) The version of the document that you want to update. + // (Required) The latest version of the document that you want to update. The + // latest document version can be specified using the $LATEST variable or by + // the version number. Updating a previous version of a document is not supported. DocumentVersion *string `type:"string"` // The name of the document that you want to update. @@ -43194,6 +45784,18 @@ type UpdateMaintenanceWindowInput struct { // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` + // The number of days to wait after the date and time specified by a CRON expression + // before running the maintenance window. + // + // For example, the following cron expression schedules a maintenance window + // to run the third Tuesday of every month at 11:30 PM. + // + // cron(0 30 23 ? * TUE#3 *) + // + // If the schedule offset is 2, the maintenance window won't run until two days + // later. + ScheduleOffset *int64 `min:"1" type:"integer"` + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database @@ -43237,6 +45839,9 @@ func (s *UpdateMaintenanceWindowInput) Validate() error { if s.Schedule != nil && len(*s.Schedule) < 1 { invalidParams.Add(request.NewErrParamMinLen("Schedule", 1)) } + if s.ScheduleOffset != nil && *s.ScheduleOffset < 1 { + invalidParams.Add(request.NewErrParamMinValue("ScheduleOffset", 1)) + } if s.WindowId == nil { invalidParams.Add(request.NewErrParamRequired("WindowId")) } @@ -43304,6 +45909,12 @@ func (s *UpdateMaintenanceWindowInput) SetSchedule(v string) *UpdateMaintenanceW return s } +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *UpdateMaintenanceWindowInput) SetScheduleOffset(v int64) *UpdateMaintenanceWindowInput { + s.ScheduleOffset = &v + return s +} + // SetScheduleTimezone sets the ScheduleTimezone field's value. func (s *UpdateMaintenanceWindowInput) SetScheduleTimezone(v string) *UpdateMaintenanceWindowInput { s.ScheduleTimezone = &v @@ -43353,6 +45964,10 @@ type UpdateMaintenanceWindowOutput struct { // The schedule of the maintenance window in the form of a cron or rate expression. Schedule *string `min:"1" type:"string"` + // The number of days to wait to run a maintenance window after the scheduled + // CRON expression date and time. + ScheduleOffset *int64 `min:"1" type:"integer"` + // The time zone that the scheduled maintenance window executions are based // on, in Internet Assigned Numbers Authority (IANA) format. For example: "America/Los_Angeles", // "etc/UTC", or "Asia/Seoul". For more information, see the Time Zone Database @@ -43426,6 +46041,12 @@ func (s *UpdateMaintenanceWindowOutput) SetSchedule(v string) *UpdateMaintenance return s } +// SetScheduleOffset sets the ScheduleOffset field's value. +func (s *UpdateMaintenanceWindowOutput) SetScheduleOffset(v int64) *UpdateMaintenanceWindowOutput { + s.ScheduleOffset = &v + return s +} + // SetScheduleTimezone sets the ScheduleTimezone field's value. func (s *UpdateMaintenanceWindowOutput) SetScheduleTimezone(v string) *UpdateMaintenanceWindowOutput { s.ScheduleTimezone = &v @@ -43666,7 +46287,7 @@ type UpdateMaintenanceWindowTaskInput struct { // Tasks that have the same priority are scheduled in parallel. Priority *int64 `type:"integer"` - // If True, then all fields that are required by the RegisterTaskWithMaintenanceWndow + // If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow // action are also required for this API request. Optional fields that are not // specified are set to null. Replace *bool `type:"boolean"` @@ -43679,10 +46300,10 @@ type UpdateMaintenanceWindowTaskInput struct { // For more information, see the following topics in the in the AWS Systems // Manager User Guide: // - // * Service-Linked Role Permissions for Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) + // * Using service-linked roles for Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/using-service-linked-roles.html#slr-permissions) // - // * Should I Use a Service-Linked Role or a Custom Service Role to Run Maintenance - // Window Tasks? (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) + // * Should I use a service-linked role or a custom service role to run maintenance + // window tasks? (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-maintenance-permissions.html#maintenance-window-tasks-service-role) ServiceRoleArn *string `type:"string"` // The targets (either instances or tags) to modify. Instances are specified @@ -43695,6 +46316,14 @@ type UpdateMaintenanceWindowTaskInput struct { // The parameters that the task should use during execution. Populate only the // fields that match the task type. All other fields should be empty. + // + // When you update a maintenance window task that has options specified in TaskInvocationParameters, + // you must provide again all the TaskInvocationParameters values that you want + // to retain. The values you do not specify again are removed. For example, + // suppose that when you registered a Run Command task, you specified TaskInvocationParameters + // values for Comment, NotificationConfig, and OutputS3BucketName. If you update + // the maintenance window task and specify only a different OutputS3BucketName + // value, the values for Comment and NotificationConfig are removed. TaskInvocationParameters *MaintenanceWindowTaskInvocationParameters `type:"structure"` // The parameters to modify. @@ -44117,7 +46746,7 @@ type UpdateOpsItemInput struct { // Use the /aws/resources key in OperationalData to specify a related resource // in the request. Use the /aws/automations key in OperationalData to associate // an Automation runbook with the OpsItem. To view AWS CLI example commands - // that use these keys, see Creating OpsItems Manually (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) + // that use these keys, see Creating OpsItems manually (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-creating-OpsItems.html#OpsCenter-manually-create-OpsItems) // in the AWS Systems Manager User Guide. OperationalData map[string]*OpsItemDataValue `type:"map"` @@ -44141,7 +46770,7 @@ type UpdateOpsItemInput struct { Severity *string `min:"1" type:"string"` // The OpsItem status. Status can be Open, In Progress, or Resolved. For more - // information, see Editing OpsItem Details (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems-editing-details.html) + // information, see Editing OpsItem details (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-working-with-OpsItems.html#OpsCenter-working-with-OpsItems-editing-details) // in the AWS Systems Manager User Guide. Status *string `type:"string" enum:"OpsItemStatus"` @@ -44287,8 +46916,8 @@ type UpdatePatchBaselineInput struct { // A list of explicitly approved patches for the baseline. // // For information about accepted formats for lists of approved patches and - // rejected patches, see Package Name Formats for Approved and Rejected Patch - // Lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) // in the AWS Systems Manager User Guide. ApprovedPatches []*string `type:"list"` @@ -44317,8 +46946,8 @@ type UpdatePatchBaselineInput struct { // A list of explicitly rejected patches for the baseline. // // For information about accepted formats for lists of approved patches and - // rejected patches, see Package Name Formats for Approved and Rejected Patch - // Lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) + // rejected patches, see About package name formats for approved and rejected + // patch lists (https://docs.aws.amazon.com/systems-manager/latest/userguide/patch-manager-approved-rejected-package-name-formats.html) // in the AWS Systems Manager User Guide. RejectedPatches []*string `type:"list"` @@ -44630,10 +47259,7 @@ type UpdateResourceDataSyncInput struct { // SyncSource is a required field SyncSource *ResourceDataSyncSource `type:"structure" required:"true"` - // The type of resource data sync. If SyncType is SyncToDestination, then the - // resource data sync synchronizes data to an Amazon S3 bucket. If the SyncType - // is SyncFromSource then the resource data sync synchronizes data from AWS - // Organizations or from multiple AWS Regions. + // The type of resource data sync. The supported SyncType is SyncFromSource. // // SyncType is a required field SyncType *string `min:"1" type:"string" required:"true"` @@ -44715,12 +47341,30 @@ func (s UpdateResourceDataSyncOutput) GoString() string { type UpdateServiceSettingInput struct { _ struct{} `type:"structure"` - // The ID of the service setting to update. + // The Amazon Resource Name (ARN) of the service setting to reset. For example, + // arn:aws:ssm:us-east-1:111122223333:servicesetting/ssm/parameter-store/high-throughput-enabled. + // The setting ID can be one of the following. + // + // * /ssm/parameter-store/default-parameter-tier + // + // * /ssm/parameter-store/high-throughput-enabled + // + // * /ssm/managed-instance/activation-tier // // SettingId is a required field SettingId *string `min:"1" type:"string" required:"true"` - // The new value to specify for the service setting. + // The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier + // setting ID, the setting value can be one of the following. + // + // * Standard + // + // * Advanced + // + // * Intelligent-Tiering + // + // For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier + // setting IDs, the setting value can be true or false. // // SettingValue is a required field SettingValue *string `min:"1" type:"string" required:"true"` @@ -44802,6 +47446,17 @@ const ( AssociationComplianceSeverityUnspecified = "UNSPECIFIED" ) +// AssociationComplianceSeverity_Values returns all elements of the AssociationComplianceSeverity enum +func AssociationComplianceSeverity_Values() []string { + return []string{ + AssociationComplianceSeverityCritical, + AssociationComplianceSeverityHigh, + AssociationComplianceSeverityMedium, + AssociationComplianceSeverityLow, + AssociationComplianceSeverityUnspecified, + } +} + const ( // AssociationExecutionFilterKeyExecutionId is a AssociationExecutionFilterKey enum value AssociationExecutionFilterKeyExecutionId = "ExecutionId" @@ -44813,6 +47468,15 @@ const ( AssociationExecutionFilterKeyCreatedTime = "CreatedTime" ) +// AssociationExecutionFilterKey_Values returns all elements of the AssociationExecutionFilterKey enum +func AssociationExecutionFilterKey_Values() []string { + return []string{ + AssociationExecutionFilterKeyExecutionId, + AssociationExecutionFilterKeyStatus, + AssociationExecutionFilterKeyCreatedTime, + } +} + const ( // AssociationExecutionTargetsFilterKeyStatus is a AssociationExecutionTargetsFilterKey enum value AssociationExecutionTargetsFilterKeyStatus = "Status" @@ -44824,6 +47488,15 @@ const ( AssociationExecutionTargetsFilterKeyResourceType = "ResourceType" ) +// AssociationExecutionTargetsFilterKey_Values returns all elements of the AssociationExecutionTargetsFilterKey enum +func AssociationExecutionTargetsFilterKey_Values() []string { + return []string{ + AssociationExecutionTargetsFilterKeyStatus, + AssociationExecutionTargetsFilterKeyResourceId, + AssociationExecutionTargetsFilterKeyResourceType, + } +} + const ( // AssociationFilterKeyInstanceId is a AssociationFilterKey enum value AssociationFilterKeyInstanceId = "InstanceId" @@ -44845,8 +47518,25 @@ const ( // AssociationFilterKeyAssociationName is a AssociationFilterKey enum value AssociationFilterKeyAssociationName = "AssociationName" + + // AssociationFilterKeyResourceGroupName is a AssociationFilterKey enum value + AssociationFilterKeyResourceGroupName = "ResourceGroupName" ) +// AssociationFilterKey_Values returns all elements of the AssociationFilterKey enum +func AssociationFilterKey_Values() []string { + return []string{ + AssociationFilterKeyInstanceId, + AssociationFilterKeyName, + AssociationFilterKeyAssociationId, + AssociationFilterKeyAssociationStatusName, + AssociationFilterKeyLastExecutedBefore, + AssociationFilterKeyLastExecutedAfter, + AssociationFilterKeyAssociationName, + AssociationFilterKeyResourceGroupName, + } +} + const ( // AssociationFilterOperatorTypeEqual is a AssociationFilterOperatorType enum value AssociationFilterOperatorTypeEqual = "EQUAL" @@ -44858,6 +47548,15 @@ const ( AssociationFilterOperatorTypeGreaterThan = "GREATER_THAN" ) +// AssociationFilterOperatorType_Values returns all elements of the AssociationFilterOperatorType enum +func AssociationFilterOperatorType_Values() []string { + return []string{ + AssociationFilterOperatorTypeEqual, + AssociationFilterOperatorTypeLessThan, + AssociationFilterOperatorTypeGreaterThan, + } +} + const ( // AssociationStatusNamePending is a AssociationStatusName enum value AssociationStatusNamePending = "Pending" @@ -44869,11 +47568,43 @@ const ( AssociationStatusNameFailed = "Failed" ) +// AssociationStatusName_Values returns all elements of the AssociationStatusName enum +func AssociationStatusName_Values() []string { + return []string{ + AssociationStatusNamePending, + AssociationStatusNameSuccess, + AssociationStatusNameFailed, + } +} + +const ( + // AssociationSyncComplianceAuto is a AssociationSyncCompliance enum value + AssociationSyncComplianceAuto = "AUTO" + + // AssociationSyncComplianceManual is a AssociationSyncCompliance enum value + AssociationSyncComplianceManual = "MANUAL" +) + +// AssociationSyncCompliance_Values returns all elements of the AssociationSyncCompliance enum +func AssociationSyncCompliance_Values() []string { + return []string{ + AssociationSyncComplianceAuto, + AssociationSyncComplianceManual, + } +} + const ( // AttachmentHashTypeSha256 is a AttachmentHashType enum value AttachmentHashTypeSha256 = "Sha256" ) +// AttachmentHashType_Values returns all elements of the AttachmentHashType enum +func AttachmentHashType_Values() []string { + return []string{ + AttachmentHashTypeSha256, + } +} + const ( // AttachmentsSourceKeySourceUrl is a AttachmentsSourceKey enum value AttachmentsSourceKeySourceUrl = "SourceUrl" @@ -44885,6 +47616,15 @@ const ( AttachmentsSourceKeyAttachmentReference = "AttachmentReference" ) +// AttachmentsSourceKey_Values returns all elements of the AttachmentsSourceKey enum +func AttachmentsSourceKey_Values() []string { + return []string{ + AttachmentsSourceKeySourceUrl, + AttachmentsSourceKeyS3fileUrl, + AttachmentsSourceKeyAttachmentReference, + } +} + const ( // AutomationExecutionFilterKeyDocumentNamePrefix is a AutomationExecutionFilterKey enum value AutomationExecutionFilterKeyDocumentNamePrefix = "DocumentNamePrefix" @@ -44914,6 +47654,21 @@ const ( AutomationExecutionFilterKeyTagKey = "TagKey" ) +// AutomationExecutionFilterKey_Values returns all elements of the AutomationExecutionFilterKey enum +func AutomationExecutionFilterKey_Values() []string { + return []string{ + AutomationExecutionFilterKeyDocumentNamePrefix, + AutomationExecutionFilterKeyExecutionStatus, + AutomationExecutionFilterKeyExecutionId, + AutomationExecutionFilterKeyParentExecutionId, + AutomationExecutionFilterKeyCurrentAction, + AutomationExecutionFilterKeyStartTimeBefore, + AutomationExecutionFilterKeyStartTimeAfter, + AutomationExecutionFilterKeyAutomationType, + AutomationExecutionFilterKeyTagKey, + } +} + const ( // AutomationExecutionStatusPending is a AutomationExecutionStatus enum value AutomationExecutionStatusPending = "Pending" @@ -44940,6 +47695,20 @@ const ( AutomationExecutionStatusFailed = "Failed" ) +// AutomationExecutionStatus_Values returns all elements of the AutomationExecutionStatus enum +func AutomationExecutionStatus_Values() []string { + return []string{ + AutomationExecutionStatusPending, + AutomationExecutionStatusInProgress, + AutomationExecutionStatusWaiting, + AutomationExecutionStatusSuccess, + AutomationExecutionStatusTimedOut, + AutomationExecutionStatusCancelling, + AutomationExecutionStatusCancelled, + AutomationExecutionStatusFailed, + } +} + const ( // AutomationTypeCrossAccount is a AutomationType enum value AutomationTypeCrossAccount = "CrossAccount" @@ -44948,6 +47717,14 @@ const ( AutomationTypeLocal = "Local" ) +// AutomationType_Values returns all elements of the AutomationType enum +func AutomationType_Values() []string { + return []string{ + AutomationTypeCrossAccount, + AutomationTypeLocal, + } +} + const ( // CalendarStateOpen is a CalendarState enum value CalendarStateOpen = "OPEN" @@ -44956,6 +47733,14 @@ const ( CalendarStateClosed = "CLOSED" ) +// CalendarState_Values returns all elements of the CalendarState enum +func CalendarState_Values() []string { + return []string{ + CalendarStateOpen, + CalendarStateClosed, + } +} + const ( // CommandFilterKeyInvokedAfter is a CommandFilterKey enum value CommandFilterKeyInvokedAfter = "InvokedAfter" @@ -44973,6 +47758,17 @@ const ( CommandFilterKeyDocumentName = "DocumentName" ) +// CommandFilterKey_Values returns all elements of the CommandFilterKey enum +func CommandFilterKey_Values() []string { + return []string{ + CommandFilterKeyInvokedAfter, + CommandFilterKeyInvokedBefore, + CommandFilterKeyStatus, + CommandFilterKeyExecutionStage, + CommandFilterKeyDocumentName, + } +} + const ( // CommandInvocationStatusPending is a CommandInvocationStatus enum value CommandInvocationStatusPending = "Pending" @@ -44999,6 +47795,20 @@ const ( CommandInvocationStatusCancelling = "Cancelling" ) +// CommandInvocationStatus_Values returns all elements of the CommandInvocationStatus enum +func CommandInvocationStatus_Values() []string { + return []string{ + CommandInvocationStatusPending, + CommandInvocationStatusInProgress, + CommandInvocationStatusDelayed, + CommandInvocationStatusSuccess, + CommandInvocationStatusCancelled, + CommandInvocationStatusTimedOut, + CommandInvocationStatusFailed, + CommandInvocationStatusCancelling, + } +} + const ( // CommandPluginStatusPending is a CommandPluginStatus enum value CommandPluginStatusPending = "Pending" @@ -45019,6 +47829,18 @@ const ( CommandPluginStatusFailed = "Failed" ) +// CommandPluginStatus_Values returns all elements of the CommandPluginStatus enum +func CommandPluginStatus_Values() []string { + return []string{ + CommandPluginStatusPending, + CommandPluginStatusInProgress, + CommandPluginStatusSuccess, + CommandPluginStatusTimedOut, + CommandPluginStatusCancelled, + CommandPluginStatusFailed, + } +} + const ( // CommandStatusPending is a CommandStatus enum value CommandStatusPending = "Pending" @@ -45042,6 +47864,19 @@ const ( CommandStatusCancelling = "Cancelling" ) +// CommandStatus_Values returns all elements of the CommandStatus enum +func CommandStatus_Values() []string { + return []string{ + CommandStatusPending, + CommandStatusInProgress, + CommandStatusSuccess, + CommandStatusCancelled, + CommandStatusFailed, + CommandStatusTimedOut, + CommandStatusCancelling, + } +} + const ( // ComplianceQueryOperatorTypeEqual is a ComplianceQueryOperatorType enum value ComplianceQueryOperatorTypeEqual = "EQUAL" @@ -45059,6 +47894,17 @@ const ( ComplianceQueryOperatorTypeGreaterThan = "GREATER_THAN" ) +// ComplianceQueryOperatorType_Values returns all elements of the ComplianceQueryOperatorType enum +func ComplianceQueryOperatorType_Values() []string { + return []string{ + ComplianceQueryOperatorTypeEqual, + ComplianceQueryOperatorTypeNotEqual, + ComplianceQueryOperatorTypeBeginWith, + ComplianceQueryOperatorTypeLessThan, + ComplianceQueryOperatorTypeGreaterThan, + } +} + const ( // ComplianceSeverityCritical is a ComplianceSeverity enum value ComplianceSeverityCritical = "CRITICAL" @@ -45079,6 +47925,18 @@ const ( ComplianceSeverityUnspecified = "UNSPECIFIED" ) +// ComplianceSeverity_Values returns all elements of the ComplianceSeverity enum +func ComplianceSeverity_Values() []string { + return []string{ + ComplianceSeverityCritical, + ComplianceSeverityHigh, + ComplianceSeverityMedium, + ComplianceSeverityLow, + ComplianceSeverityInformational, + ComplianceSeverityUnspecified, + } +} + const ( // ComplianceStatusCompliant is a ComplianceStatus enum value ComplianceStatusCompliant = "COMPLIANT" @@ -45087,6 +47945,30 @@ const ( ComplianceStatusNonCompliant = "NON_COMPLIANT" ) +// ComplianceStatus_Values returns all elements of the ComplianceStatus enum +func ComplianceStatus_Values() []string { + return []string{ + ComplianceStatusCompliant, + ComplianceStatusNonCompliant, + } +} + +const ( + // ComplianceUploadTypeComplete is a ComplianceUploadType enum value + ComplianceUploadTypeComplete = "COMPLETE" + + // ComplianceUploadTypePartial is a ComplianceUploadType enum value + ComplianceUploadTypePartial = "PARTIAL" +) + +// ComplianceUploadType_Values returns all elements of the ComplianceUploadType enum +func ComplianceUploadType_Values() []string { + return []string{ + ComplianceUploadTypeComplete, + ComplianceUploadTypePartial, + } +} + const ( // ConnectionStatusConnected is a ConnectionStatus enum value ConnectionStatusConnected = "Connected" @@ -45095,6 +47977,14 @@ const ( ConnectionStatusNotConnected = "NotConnected" ) +// ConnectionStatus_Values returns all elements of the ConnectionStatus enum +func ConnectionStatus_Values() []string { + return []string{ + ConnectionStatusConnected, + ConnectionStatusNotConnected, + } +} + const ( // DescribeActivationsFilterKeysActivationIds is a DescribeActivationsFilterKeys enum value DescribeActivationsFilterKeysActivationIds = "ActivationIds" @@ -45106,6 +47996,15 @@ const ( DescribeActivationsFilterKeysIamRole = "IamRole" ) +// DescribeActivationsFilterKeys_Values returns all elements of the DescribeActivationsFilterKeys enum +func DescribeActivationsFilterKeys_Values() []string { + return []string{ + DescribeActivationsFilterKeysActivationIds, + DescribeActivationsFilterKeysDefaultInstanceName, + DescribeActivationsFilterKeysIamRole, + } +} + const ( // DocumentFilterKeyName is a DocumentFilterKey enum value DocumentFilterKeyName = "Name" @@ -45120,6 +48019,16 @@ const ( DocumentFilterKeyDocumentType = "DocumentType" ) +// DocumentFilterKey_Values returns all elements of the DocumentFilterKey enum +func DocumentFilterKey_Values() []string { + return []string{ + DocumentFilterKeyName, + DocumentFilterKeyOwner, + DocumentFilterKeyPlatformTypes, + DocumentFilterKeyDocumentType, + } +} + const ( // DocumentFormatYaml is a DocumentFormat enum value DocumentFormatYaml = "YAML" @@ -45131,6 +48040,15 @@ const ( DocumentFormatText = "TEXT" ) +// DocumentFormat_Values returns all elements of the DocumentFormat enum +func DocumentFormat_Values() []string { + return []string{ + DocumentFormatYaml, + DocumentFormatJson, + DocumentFormatText, + } +} + const ( // DocumentHashTypeSha256 is a DocumentHashType enum value DocumentHashTypeSha256 = "Sha256" @@ -45139,6 +48057,14 @@ const ( DocumentHashTypeSha1 = "Sha1" ) +// DocumentHashType_Values returns all elements of the DocumentHashType enum +func DocumentHashType_Values() []string { + return []string{ + DocumentHashTypeSha256, + DocumentHashTypeSha1, + } +} + const ( // DocumentParameterTypeString is a DocumentParameterType enum value DocumentParameterTypeString = "String" @@ -45147,11 +48073,26 @@ const ( DocumentParameterTypeStringList = "StringList" ) +// DocumentParameterType_Values returns all elements of the DocumentParameterType enum +func DocumentParameterType_Values() []string { + return []string{ + DocumentParameterTypeString, + DocumentParameterTypeStringList, + } +} + const ( // DocumentPermissionTypeShare is a DocumentPermissionType enum value DocumentPermissionTypeShare = "Share" ) +// DocumentPermissionType_Values returns all elements of the DocumentPermissionType enum +func DocumentPermissionType_Values() []string { + return []string{ + DocumentPermissionTypeShare, + } +} + // The status of a document. const ( // DocumentStatusCreating is a DocumentStatus enum value @@ -45170,6 +48111,17 @@ const ( DocumentStatusFailed = "Failed" ) +// DocumentStatus_Values returns all elements of the DocumentStatus enum +func DocumentStatus_Values() []string { + return []string{ + DocumentStatusCreating, + DocumentStatusActive, + DocumentStatusUpdating, + DocumentStatusDeleting, + DocumentStatusFailed, + } +} + const ( // DocumentTypeCommand is a DocumentType enum value DocumentTypeCommand = "Command" @@ -45199,6 +48151,21 @@ const ( DocumentTypeChangeCalendar = "ChangeCalendar" ) +// DocumentType_Values returns all elements of the DocumentType enum +func DocumentType_Values() []string { + return []string{ + DocumentTypeCommand, + DocumentTypePolicy, + DocumentTypeAutomation, + DocumentTypeSession, + DocumentTypePackage, + DocumentTypeApplicationConfiguration, + DocumentTypeApplicationConfigurationSchema, + DocumentTypeDeploymentStrategy, + DocumentTypeChangeCalendar, + } +} + const ( // ExecutionModeAuto is a ExecutionMode enum value ExecutionModeAuto = "Auto" @@ -45207,6 +48174,14 @@ const ( ExecutionModeInteractive = "Interactive" ) +// ExecutionMode_Values returns all elements of the ExecutionMode enum +func ExecutionMode_Values() []string { + return []string{ + ExecutionModeAuto, + ExecutionModeInteractive, + } +} + const ( // FaultClient is a Fault enum value FaultClient = "Client" @@ -45218,6 +48193,15 @@ const ( FaultUnknown = "Unknown" ) +// Fault_Values returns all elements of the Fault enum +func Fault_Values() []string { + return []string{ + FaultClient, + FaultServer, + FaultUnknown, + } +} + const ( // InstanceInformationFilterKeyInstanceIds is a InstanceInformationFilterKey enum value InstanceInformationFilterKeyInstanceIds = "InstanceIds" @@ -45244,6 +48228,20 @@ const ( InstanceInformationFilterKeyAssociationStatus = "AssociationStatus" ) +// InstanceInformationFilterKey_Values returns all elements of the InstanceInformationFilterKey enum +func InstanceInformationFilterKey_Values() []string { + return []string{ + InstanceInformationFilterKeyInstanceIds, + InstanceInformationFilterKeyAgentVersion, + InstanceInformationFilterKeyPingStatus, + InstanceInformationFilterKeyPlatformTypes, + InstanceInformationFilterKeyActivationIds, + InstanceInformationFilterKeyIamRole, + InstanceInformationFilterKeyResourceType, + InstanceInformationFilterKeyAssociationStatus, + } +} + const ( // InstancePatchStateOperatorTypeEqual is a InstancePatchStateOperatorType enum value InstancePatchStateOperatorTypeEqual = "Equal" @@ -45258,6 +48256,16 @@ const ( InstancePatchStateOperatorTypeGreaterThan = "GreaterThan" ) +// InstancePatchStateOperatorType_Values returns all elements of the InstancePatchStateOperatorType enum +func InstancePatchStateOperatorType_Values() []string { + return []string{ + InstancePatchStateOperatorTypeEqual, + InstancePatchStateOperatorTypeNotEqual, + InstancePatchStateOperatorTypeLessThan, + InstancePatchStateOperatorTypeGreaterThan, + } +} + const ( // InventoryAttributeDataTypeString is a InventoryAttributeDataType enum value InventoryAttributeDataTypeString = "string" @@ -45266,6 +48274,14 @@ const ( InventoryAttributeDataTypeNumber = "number" ) +// InventoryAttributeDataType_Values returns all elements of the InventoryAttributeDataType enum +func InventoryAttributeDataType_Values() []string { + return []string{ + InventoryAttributeDataTypeString, + InventoryAttributeDataTypeNumber, + } +} + const ( // InventoryDeletionStatusInProgress is a InventoryDeletionStatus enum value InventoryDeletionStatusInProgress = "InProgress" @@ -45274,6 +48290,14 @@ const ( InventoryDeletionStatusComplete = "Complete" ) +// InventoryDeletionStatus_Values returns all elements of the InventoryDeletionStatus enum +func InventoryDeletionStatus_Values() []string { + return []string{ + InventoryDeletionStatusInProgress, + InventoryDeletionStatusComplete, + } +} + const ( // InventoryQueryOperatorTypeEqual is a InventoryQueryOperatorType enum value InventoryQueryOperatorTypeEqual = "Equal" @@ -45294,6 +48318,18 @@ const ( InventoryQueryOperatorTypeExists = "Exists" ) +// InventoryQueryOperatorType_Values returns all elements of the InventoryQueryOperatorType enum +func InventoryQueryOperatorType_Values() []string { + return []string{ + InventoryQueryOperatorTypeEqual, + InventoryQueryOperatorTypeNotEqual, + InventoryQueryOperatorTypeBeginWith, + InventoryQueryOperatorTypeLessThan, + InventoryQueryOperatorTypeGreaterThan, + InventoryQueryOperatorTypeExists, + } +} + const ( // InventorySchemaDeleteOptionDisableSchema is a InventorySchemaDeleteOption enum value InventorySchemaDeleteOptionDisableSchema = "DisableSchema" @@ -45302,6 +48338,14 @@ const ( InventorySchemaDeleteOptionDeleteSchema = "DeleteSchema" ) +// InventorySchemaDeleteOption_Values returns all elements of the InventorySchemaDeleteOption enum +func InventorySchemaDeleteOption_Values() []string { + return []string{ + InventorySchemaDeleteOptionDisableSchema, + InventorySchemaDeleteOptionDeleteSchema, + } +} + const ( // LastResourceDataSyncStatusSuccessful is a LastResourceDataSyncStatus enum value LastResourceDataSyncStatusSuccessful = "Successful" @@ -45313,6 +48357,15 @@ const ( LastResourceDataSyncStatusInProgress = "InProgress" ) +// LastResourceDataSyncStatus_Values returns all elements of the LastResourceDataSyncStatus enum +func LastResourceDataSyncStatus_Values() []string { + return []string{ + LastResourceDataSyncStatusSuccessful, + LastResourceDataSyncStatusFailed, + LastResourceDataSyncStatusInProgress, + } +} + const ( // MaintenanceWindowExecutionStatusPending is a MaintenanceWindowExecutionStatus enum value MaintenanceWindowExecutionStatusPending = "PENDING" @@ -45339,6 +48392,20 @@ const ( MaintenanceWindowExecutionStatusSkippedOverlapping = "SKIPPED_OVERLAPPING" ) +// MaintenanceWindowExecutionStatus_Values returns all elements of the MaintenanceWindowExecutionStatus enum +func MaintenanceWindowExecutionStatus_Values() []string { + return []string{ + MaintenanceWindowExecutionStatusPending, + MaintenanceWindowExecutionStatusInProgress, + MaintenanceWindowExecutionStatusSuccess, + MaintenanceWindowExecutionStatusFailed, + MaintenanceWindowExecutionStatusTimedOut, + MaintenanceWindowExecutionStatusCancelling, + MaintenanceWindowExecutionStatusCancelled, + MaintenanceWindowExecutionStatusSkippedOverlapping, + } +} + const ( // MaintenanceWindowResourceTypeInstance is a MaintenanceWindowResourceType enum value MaintenanceWindowResourceTypeInstance = "INSTANCE" @@ -45347,6 +48414,14 @@ const ( MaintenanceWindowResourceTypeResourceGroup = "RESOURCE_GROUP" ) +// MaintenanceWindowResourceType_Values returns all elements of the MaintenanceWindowResourceType enum +func MaintenanceWindowResourceType_Values() []string { + return []string{ + MaintenanceWindowResourceTypeInstance, + MaintenanceWindowResourceTypeResourceGroup, + } +} + const ( // MaintenanceWindowTaskTypeRunCommand is a MaintenanceWindowTaskType enum value MaintenanceWindowTaskTypeRunCommand = "RUN_COMMAND" @@ -45361,6 +48436,16 @@ const ( MaintenanceWindowTaskTypeLambda = "LAMBDA" ) +// MaintenanceWindowTaskType_Values returns all elements of the MaintenanceWindowTaskType enum +func MaintenanceWindowTaskType_Values() []string { + return []string{ + MaintenanceWindowTaskTypeRunCommand, + MaintenanceWindowTaskTypeAutomation, + MaintenanceWindowTaskTypeStepFunctions, + MaintenanceWindowTaskTypeLambda, + } +} + const ( // NotificationEventAll is a NotificationEvent enum value NotificationEventAll = "All" @@ -45381,6 +48466,18 @@ const ( NotificationEventFailed = "Failed" ) +// NotificationEvent_Values returns all elements of the NotificationEvent enum +func NotificationEvent_Values() []string { + return []string{ + NotificationEventAll, + NotificationEventInProgress, + NotificationEventSuccess, + NotificationEventTimedOut, + NotificationEventCancelled, + NotificationEventFailed, + } +} + const ( // NotificationTypeCommand is a NotificationType enum value NotificationTypeCommand = "Command" @@ -45389,6 +48486,14 @@ const ( NotificationTypeInvocation = "Invocation" ) +// NotificationType_Values returns all elements of the NotificationType enum +func NotificationType_Values() []string { + return []string{ + NotificationTypeCommand, + NotificationTypeInvocation, + } +} + const ( // OperatingSystemWindows is a OperatingSystem enum value OperatingSystemWindows = "WINDOWS" @@ -45410,8 +48515,29 @@ const ( // OperatingSystemCentos is a OperatingSystem enum value OperatingSystemCentos = "CENTOS" + + // OperatingSystemOracleLinux is a OperatingSystem enum value + OperatingSystemOracleLinux = "ORACLE_LINUX" + + // OperatingSystemDebian is a OperatingSystem enum value + OperatingSystemDebian = "DEBIAN" ) +// OperatingSystem_Values returns all elements of the OperatingSystem enum +func OperatingSystem_Values() []string { + return []string{ + OperatingSystemWindows, + OperatingSystemAmazonLinux, + OperatingSystemAmazonLinux2, + OperatingSystemUbuntu, + OperatingSystemRedhatEnterpriseLinux, + OperatingSystemSuse, + OperatingSystemCentos, + OperatingSystemOracleLinux, + OperatingSystemDebian, + } +} + const ( // OpsFilterOperatorTypeEqual is a OpsFilterOperatorType enum value OpsFilterOperatorTypeEqual = "Equal" @@ -45432,6 +48558,18 @@ const ( OpsFilterOperatorTypeExists = "Exists" ) +// OpsFilterOperatorType_Values returns all elements of the OpsFilterOperatorType enum +func OpsFilterOperatorType_Values() []string { + return []string{ + OpsFilterOperatorTypeEqual, + OpsFilterOperatorTypeNotEqual, + OpsFilterOperatorTypeBeginWith, + OpsFilterOperatorTypeLessThan, + OpsFilterOperatorTypeGreaterThan, + OpsFilterOperatorTypeExists, + } +} + const ( // OpsItemDataTypeSearchableString is a OpsItemDataType enum value OpsItemDataTypeSearchableString = "SearchableString" @@ -45440,6 +48578,14 @@ const ( OpsItemDataTypeString = "String" ) +// OpsItemDataType_Values returns all elements of the OpsItemDataType enum +func OpsItemDataType_Values() []string { + return []string{ + OpsItemDataTypeSearchableString, + OpsItemDataTypeString, + } +} + const ( // OpsItemFilterKeyStatus is a OpsItemFilterKey enum value OpsItemFilterKeyStatus = "Status" @@ -45487,6 +48633,27 @@ const ( OpsItemFilterKeySeverity = "Severity" ) +// OpsItemFilterKey_Values returns all elements of the OpsItemFilterKey enum +func OpsItemFilterKey_Values() []string { + return []string{ + OpsItemFilterKeyStatus, + OpsItemFilterKeyCreatedBy, + OpsItemFilterKeySource, + OpsItemFilterKeyPriority, + OpsItemFilterKeyTitle, + OpsItemFilterKeyOpsItemId, + OpsItemFilterKeyCreatedTime, + OpsItemFilterKeyLastModifiedTime, + OpsItemFilterKeyOperationalData, + OpsItemFilterKeyOperationalDataKey, + OpsItemFilterKeyOperationalDataValue, + OpsItemFilterKeyResourceId, + OpsItemFilterKeyAutomationId, + OpsItemFilterKeyCategory, + OpsItemFilterKeySeverity, + } +} + const ( // OpsItemFilterOperatorEqual is a OpsItemFilterOperator enum value OpsItemFilterOperatorEqual = "Equal" @@ -45501,6 +48668,16 @@ const ( OpsItemFilterOperatorLessThan = "LessThan" ) +// OpsItemFilterOperator_Values returns all elements of the OpsItemFilterOperator enum +func OpsItemFilterOperator_Values() []string { + return []string{ + OpsItemFilterOperatorEqual, + OpsItemFilterOperatorContains, + OpsItemFilterOperatorGreaterThan, + OpsItemFilterOperatorLessThan, + } +} + const ( // OpsItemStatusOpen is a OpsItemStatus enum value OpsItemStatusOpen = "Open" @@ -45512,6 +48689,15 @@ const ( OpsItemStatusResolved = "Resolved" ) +// OpsItemStatus_Values returns all elements of the OpsItemStatus enum +func OpsItemStatus_Values() []string { + return []string{ + OpsItemStatusOpen, + OpsItemStatusInProgress, + OpsItemStatusResolved, + } +} + const ( // ParameterTierStandard is a ParameterTier enum value ParameterTierStandard = "Standard" @@ -45523,6 +48709,15 @@ const ( ParameterTierIntelligentTiering = "Intelligent-Tiering" ) +// ParameterTier_Values returns all elements of the ParameterTier enum +func ParameterTier_Values() []string { + return []string{ + ParameterTierStandard, + ParameterTierAdvanced, + ParameterTierIntelligentTiering, + } +} + const ( // ParameterTypeString is a ParameterType enum value ParameterTypeString = "String" @@ -45534,6 +48729,15 @@ const ( ParameterTypeSecureString = "SecureString" ) +// ParameterType_Values returns all elements of the ParameterType enum +func ParameterType_Values() []string { + return []string{ + ParameterTypeString, + ParameterTypeStringList, + ParameterTypeSecureString, + } +} + const ( // ParametersFilterKeyName is a ParametersFilterKey enum value ParametersFilterKeyName = "Name" @@ -45545,6 +48749,15 @@ const ( ParametersFilterKeyKeyId = "KeyId" ) +// ParametersFilterKey_Values returns all elements of the ParametersFilterKey enum +func ParametersFilterKey_Values() []string { + return []string{ + ParametersFilterKeyName, + ParametersFilterKeyType, + ParametersFilterKeyKeyId, + } +} + const ( // PatchActionAllowAsDependency is a PatchAction enum value PatchActionAllowAsDependency = "ALLOW_AS_DEPENDENCY" @@ -45553,6 +48766,14 @@ const ( PatchActionBlock = "BLOCK" ) +// PatchAction_Values returns all elements of the PatchAction enum +func PatchAction_Values() []string { + return []string{ + PatchActionAllowAsDependency, + PatchActionBlock, + } +} + const ( // PatchComplianceDataStateInstalled is a PatchComplianceDataState enum value PatchComplianceDataStateInstalled = "INSTALLED" @@ -45576,6 +48797,19 @@ const ( PatchComplianceDataStateFailed = "FAILED" ) +// PatchComplianceDataState_Values returns all elements of the PatchComplianceDataState enum +func PatchComplianceDataState_Values() []string { + return []string{ + PatchComplianceDataStateInstalled, + PatchComplianceDataStateInstalledOther, + PatchComplianceDataStateInstalledPendingReboot, + PatchComplianceDataStateInstalledRejected, + PatchComplianceDataStateMissing, + PatchComplianceDataStateNotApplicable, + PatchComplianceDataStateFailed, + } +} + const ( // PatchComplianceLevelCritical is a PatchComplianceLevel enum value PatchComplianceLevelCritical = "CRITICAL" @@ -45596,6 +48830,18 @@ const ( PatchComplianceLevelUnspecified = "UNSPECIFIED" ) +// PatchComplianceLevel_Values returns all elements of the PatchComplianceLevel enum +func PatchComplianceLevel_Values() []string { + return []string{ + PatchComplianceLevelCritical, + PatchComplianceLevelHigh, + PatchComplianceLevelMedium, + PatchComplianceLevelLow, + PatchComplianceLevelInformational, + PatchComplianceLevelUnspecified, + } +} + const ( // PatchDeploymentStatusApproved is a PatchDeploymentStatus enum value PatchDeploymentStatusApproved = "APPROVED" @@ -45610,7 +48856,26 @@ const ( PatchDeploymentStatusExplicitRejected = "EXPLICIT_REJECTED" ) +// PatchDeploymentStatus_Values returns all elements of the PatchDeploymentStatus enum +func PatchDeploymentStatus_Values() []string { + return []string{ + PatchDeploymentStatusApproved, + PatchDeploymentStatusPendingApproval, + PatchDeploymentStatusExplicitApproved, + PatchDeploymentStatusExplicitRejected, + } +} + const ( + // PatchFilterKeyArch is a PatchFilterKey enum value + PatchFilterKeyArch = "ARCH" + + // PatchFilterKeyAdvisoryId is a PatchFilterKey enum value + PatchFilterKeyAdvisoryId = "ADVISORY_ID" + + // PatchFilterKeyBugzillaId is a PatchFilterKey enum value + PatchFilterKeyBugzillaId = "BUGZILLA_ID" + // PatchFilterKeyPatchSet is a PatchFilterKey enum value PatchFilterKeyPatchSet = "PATCH_SET" @@ -45623,9 +48888,18 @@ const ( // PatchFilterKeyClassification is a PatchFilterKey enum value PatchFilterKeyClassification = "CLASSIFICATION" + // PatchFilterKeyCveId is a PatchFilterKey enum value + PatchFilterKeyCveId = "CVE_ID" + + // PatchFilterKeyEpoch is a PatchFilterKey enum value + PatchFilterKeyEpoch = "EPOCH" + // PatchFilterKeyMsrcSeverity is a PatchFilterKey enum value PatchFilterKeyMsrcSeverity = "MSRC_SEVERITY" + // PatchFilterKeyName is a PatchFilterKey enum value + PatchFilterKeyName = "NAME" + // PatchFilterKeyPatchId is a PatchFilterKey enum value PatchFilterKeyPatchId = "PATCH_ID" @@ -45635,10 +48909,47 @@ const ( // PatchFilterKeyPriority is a PatchFilterKey enum value PatchFilterKeyPriority = "PRIORITY" + // PatchFilterKeyRepository is a PatchFilterKey enum value + PatchFilterKeyRepository = "REPOSITORY" + + // PatchFilterKeyRelease is a PatchFilterKey enum value + PatchFilterKeyRelease = "RELEASE" + // PatchFilterKeySeverity is a PatchFilterKey enum value PatchFilterKeySeverity = "SEVERITY" + + // PatchFilterKeySecurity is a PatchFilterKey enum value + PatchFilterKeySecurity = "SECURITY" + + // PatchFilterKeyVersion is a PatchFilterKey enum value + PatchFilterKeyVersion = "VERSION" ) +// PatchFilterKey_Values returns all elements of the PatchFilterKey enum +func PatchFilterKey_Values() []string { + return []string{ + PatchFilterKeyArch, + PatchFilterKeyAdvisoryId, + PatchFilterKeyBugzillaId, + PatchFilterKeyPatchSet, + PatchFilterKeyProduct, + PatchFilterKeyProductFamily, + PatchFilterKeyClassification, + PatchFilterKeyCveId, + PatchFilterKeyEpoch, + PatchFilterKeyMsrcSeverity, + PatchFilterKeyName, + PatchFilterKeyPatchId, + PatchFilterKeySection, + PatchFilterKeyPriority, + PatchFilterKeyRepository, + PatchFilterKeyRelease, + PatchFilterKeySeverity, + PatchFilterKeySecurity, + PatchFilterKeyVersion, + } +} + const ( // PatchOperationTypeScan is a PatchOperationType enum value PatchOperationTypeScan = "Scan" @@ -45647,6 +48958,14 @@ const ( PatchOperationTypeInstall = "Install" ) +// PatchOperationType_Values returns all elements of the PatchOperationType enum +func PatchOperationType_Values() []string { + return []string{ + PatchOperationTypeScan, + PatchOperationTypeInstall, + } +} + const ( // PatchPropertyProduct is a PatchProperty enum value PatchPropertyProduct = "PRODUCT" @@ -45667,6 +48986,18 @@ const ( PatchPropertySeverity = "SEVERITY" ) +// PatchProperty_Values returns all elements of the PatchProperty enum +func PatchProperty_Values() []string { + return []string{ + PatchPropertyProduct, + PatchPropertyProductFamily, + PatchPropertyClassification, + PatchPropertyMsrcSeverity, + PatchPropertyPriority, + PatchPropertySeverity, + } +} + const ( // PatchSetOs is a PatchSet enum value PatchSetOs = "OS" @@ -45675,6 +49006,14 @@ const ( PatchSetApplication = "APPLICATION" ) +// PatchSet_Values returns all elements of the PatchSet enum +func PatchSet_Values() []string { + return []string{ + PatchSetOs, + PatchSetApplication, + } +} + const ( // PingStatusOnline is a PingStatus enum value PingStatusOnline = "Online" @@ -45686,6 +49025,15 @@ const ( PingStatusInactive = "Inactive" ) +// PingStatus_Values returns all elements of the PingStatus enum +func PingStatus_Values() []string { + return []string{ + PingStatusOnline, + PingStatusConnectionLost, + PingStatusInactive, + } +} + const ( // PlatformTypeWindows is a PlatformType enum value PlatformTypeWindows = "Windows" @@ -45694,6 +49042,14 @@ const ( PlatformTypeLinux = "Linux" ) +// PlatformType_Values returns all elements of the PlatformType enum +func PlatformType_Values() []string { + return []string{ + PlatformTypeWindows, + PlatformTypeLinux, + } +} + const ( // RebootOptionRebootIfNeeded is a RebootOption enum value RebootOptionRebootIfNeeded = "RebootIfNeeded" @@ -45702,11 +49058,26 @@ const ( RebootOptionNoReboot = "NoReboot" ) +// RebootOption_Values returns all elements of the RebootOption enum +func RebootOption_Values() []string { + return []string{ + RebootOptionRebootIfNeeded, + RebootOptionNoReboot, + } +} + const ( // ResourceDataSyncS3FormatJsonSerDe is a ResourceDataSyncS3Format enum value ResourceDataSyncS3FormatJsonSerDe = "JsonSerDe" ) +// ResourceDataSyncS3Format_Values returns all elements of the ResourceDataSyncS3Format enum +func ResourceDataSyncS3Format_Values() []string { + return []string{ + ResourceDataSyncS3FormatJsonSerDe, + } +} + const ( // ResourceTypeManagedInstance is a ResourceType enum value ResourceTypeManagedInstance = "ManagedInstance" @@ -45718,6 +49089,15 @@ const ( ResourceTypeEc2instance = "EC2Instance" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeManagedInstance, + ResourceTypeDocument, + ResourceTypeEc2instance, + } +} + const ( // ResourceTypeForTaggingDocument is a ResourceTypeForTagging enum value ResourceTypeForTaggingDocument = "Document" @@ -45738,6 +49118,18 @@ const ( ResourceTypeForTaggingOpsItem = "OpsItem" ) +// ResourceTypeForTagging_Values returns all elements of the ResourceTypeForTagging enum +func ResourceTypeForTagging_Values() []string { + return []string{ + ResourceTypeForTaggingDocument, + ResourceTypeForTaggingManagedInstance, + ResourceTypeForTaggingMaintenanceWindow, + ResourceTypeForTaggingParameter, + ResourceTypeForTaggingPatchBaseline, + ResourceTypeForTaggingOpsItem, + } +} + const ( // SessionFilterKeyInvokedAfter is a SessionFilterKey enum value SessionFilterKeyInvokedAfter = "InvokedAfter" @@ -45755,6 +49147,17 @@ const ( SessionFilterKeyStatus = "Status" ) +// SessionFilterKey_Values returns all elements of the SessionFilterKey enum +func SessionFilterKey_Values() []string { + return []string{ + SessionFilterKeyInvokedAfter, + SessionFilterKeyInvokedBefore, + SessionFilterKeyTarget, + SessionFilterKeyOwner, + SessionFilterKeyStatus, + } +} + const ( // SessionStateActive is a SessionState enum value SessionStateActive = "Active" @@ -45763,6 +49166,14 @@ const ( SessionStateHistory = "History" ) +// SessionState_Values returns all elements of the SessionState enum +func SessionState_Values() []string { + return []string{ + SessionStateActive, + SessionStateHistory, + } +} + const ( // SessionStatusConnected is a SessionStatus enum value SessionStatusConnected = "Connected" @@ -45783,6 +49194,18 @@ const ( SessionStatusFailed = "Failed" ) +// SessionStatus_Values returns all elements of the SessionStatus enum +func SessionStatus_Values() []string { + return []string{ + SessionStatusConnected, + SessionStatusConnecting, + SessionStatusDisconnected, + SessionStatusTerminated, + SessionStatusTerminating, + SessionStatusFailed, + } +} + const ( // SignalTypeApprove is a SignalType enum value SignalTypeApprove = "Approve" @@ -45800,6 +49223,17 @@ const ( SignalTypeResume = "Resume" ) +// SignalType_Values returns all elements of the SignalType enum +func SignalType_Values() []string { + return []string{ + SignalTypeApprove, + SignalTypeReject, + SignalTypeStartStep, + SignalTypeStopStep, + SignalTypeResume, + } +} + const ( // StepExecutionFilterKeyStartTimeBefore is a StepExecutionFilterKey enum value StepExecutionFilterKeyStartTimeBefore = "StartTimeBefore" @@ -45820,6 +49254,18 @@ const ( StepExecutionFilterKeyAction = "Action" ) +// StepExecutionFilterKey_Values returns all elements of the StepExecutionFilterKey enum +func StepExecutionFilterKey_Values() []string { + return []string{ + StepExecutionFilterKeyStartTimeBefore, + StepExecutionFilterKeyStartTimeAfter, + StepExecutionFilterKeyStepExecutionStatus, + StepExecutionFilterKeyStepExecutionId, + StepExecutionFilterKeyStepName, + StepExecutionFilterKeyAction, + } +} + const ( // StopTypeComplete is a StopType enum value StopTypeComplete = "Complete" @@ -45827,3 +49273,11 @@ const ( // StopTypeCancel is a StopType enum value StopTypeCancel = "Cancel" ) + +// StopType_Values returns all elements of the StopType enum +func StopType_Values() []string { + return []string{ + StopTypeComplete, + StopTypeCancel, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go index 48d6d3ee3..2fe2457ca 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/doc.go @@ -8,19 +8,20 @@ // system (OS) patches, automating the creation of Amazon Machine Images (AMIs), // and configuring operating systems (OSs) and applications at scale. Systems // Manager lets you remotely and securely manage the configuration of your managed -// instances. A managed instance is any Amazon EC2 instance or on-premises machine -// in your hybrid environment that has been configured for Systems Manager. +// instances. A managed instance is any Amazon Elastic Compute Cloud instance +// (EC2 instance), or any on-premises server or virtual machine (VM) in your +// hybrid environment that has been configured for Systems Manager. // // This reference is intended to be used with the AWS Systems Manager User Guide -// (http://docs.aws.amazon.com/systems-manager/latest/userguide/). +// (https://docs.aws.amazon.com/systems-manager/latest/userguide/). // // To get started, verify prerequisites and configure managed instances. For -// more information, see Setting Up AWS Systems Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) +// more information, see Setting up AWS Systems Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-setting-up.html) // in the AWS Systems Manager User Guide. // -// For information about other API actions you can perform on Amazon EC2 instances, -// see the Amazon EC2 API Reference (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/). -// For information about how to use a Query API, see Making API Requests (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html). +// For information about other API actions you can perform on EC2 instances, +// see the Amazon EC2 API Reference (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/). +// For information about how to use a Query API, see Making API requests (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/making-api-requests.html). // // See https://docs.aws.amazon.com/goto/WebAPI/ssm-2014-11-06 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go index 8e3eb9ba3..938a767cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/errors.go @@ -134,7 +134,7 @@ const ( // window or Patch baseline, doesn't exist. // // For information about resource quotas in Systems Manager, see Systems Manager - // Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) + // service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. ErrCodeDoesNotExistException = "DoesNotExistException" @@ -169,7 +169,7 @@ const ( // "HierarchyLevelLimitExceededException". // // A hierarchy can have a maximum of 15 levels. For more information, see Requirements - // and Constraints for Parameter Names (http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) + // and constraints for parameter names (https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-parameter-name-constraints.html) // in the AWS Systems Manager User Guide. ErrCodeHierarchyLevelLimitExceededException = "HierarchyLevelLimitExceededException" @@ -489,7 +489,7 @@ const ( // The role name can't contain invalid characters. Also verify that you specified // an IAM role for notifications that includes the required trust policy. For // information about configuring the IAM role for Run Command notifications, - // see Configuring Amazon SNS Notifications for Run Command (http://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) + // see Configuring Amazon SNS Notifications for Run Command (https://docs.aws.amazon.com/systems-manager/latest/userguide/rc-sns-notifications.html) // in the AWS Systems Manager User Guide. ErrCodeInvalidRole = "InvalidRole" @@ -503,7 +503,7 @@ const ( // "InvalidTarget". // // The target is not valid or does not exist. It might not be configured for - // EC2 Systems Manager or you might not have permission to perform the operation. + // Systems Manager or you might not have permission to perform the operation. ErrCodeInvalidTarget = "InvalidTarget" // ErrCodeInvalidTypeNameException for service response error code @@ -560,7 +560,7 @@ const ( // "OpsItemLimitExceededException". // // The request caused OpsItems to exceed one or more quotas. For information - // about OpsItem quotas, see What are the resource limits for OpsCenter? (http://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). + // about OpsItem quotas, see What are the resource limits for OpsCenter? (https://docs.aws.amazon.com/systems-manager/latest/userguide/OpsCenter-learn-more.html#OpsCenter-learn-more-limits). ErrCodeOpsItemLimitExceededException = "OpsItemLimitExceededException" // ErrCodeOpsItemNotFoundException for service response error code @@ -665,7 +665,7 @@ const ( // For example, too many maintenance windows or patch baselines have been created. // // For information about resource quotas in Systems Manager, see Systems Manager - // Service Quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) + // service quotas (http://docs.aws.amazon.com/general/latest/gr/ssm.html#limits_ssm) // in the AWS General Reference. ErrCodeResourceLimitExceededException = "ResourceLimitExceededException" @@ -699,9 +699,11 @@ const ( // "TargetNotConnected". // // The specified target instance for the session is not fully configured for - // use with Session Manager. For more information, see Getting Started with - // Session Manager (http://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) - // in the AWS Systems Manager User Guide. + // use with Session Manager. For more information, see Getting started with + // Session Manager (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-getting-started.html) + // in the AWS Systems Manager User Guide. This error is also returned if you + // attempt to start a session on an instance that is located in a different + // account or Region ErrCodeTargetNotConnected = "TargetNotConnected" // ErrCodeTooManyTagsError for service response error code @@ -734,10 +736,10 @@ const ( // ErrCodeUnsupportedFeatureRequiredException for service response error code // "UnsupportedFeatureRequiredException". // - // Microsoft application patching is only available on EC2 instances and Advanced - // Instances. To patch Microsoft applications on on-premises servers and VMs, - // you must enable Advanced Instances. For more information, see Using the Advanced-Instances - // Tier (http://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) + // Microsoft application patching is only available on EC2 instances and advanced + // instances. To patch Microsoft applications on on-premises servers and VMs, + // you must enable advanced instances. For more information, see Using the advanced-instances + // tier (https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-managedinstances-advanced.html) // in the AWS Systems Manager User Guide. ErrCodeUnsupportedFeatureRequiredException = "UnsupportedFeatureRequiredException" @@ -761,8 +763,7 @@ const ( // "UnsupportedOperatingSystem". // // The operating systems you specified is not supported, or the operation is - // not supported for the operating system. Valid operating systems include: - // Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu. + // not supported for the operating system. ErrCodeUnsupportedOperatingSystem = "UnsupportedOperatingSystem" // ErrCodeUnsupportedParameterType for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go index 6aa946f41..9d0970209 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go new file mode 100644 index 000000000..4bc0d1401 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/waiters.go @@ -0,0 +1,91 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssm + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilCommandExecuted uses the Amazon SSM API operation +// GetCommandInvocation to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *SSM) WaitUntilCommandExecuted(input *GetCommandInvocationInput) error { + return c.WaitUntilCommandExecutedWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilCommandExecutedWithContext is an extended version of WaitUntilCommandExecuted. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSM) WaitUntilCommandExecutedWithContext(ctx aws.Context, input *GetCommandInvocationInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilCommandExecuted", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Pending", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "InProgress", + }, + { + State: request.RetryWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Delayed", + }, + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Success", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Cancelled", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "TimedOut", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Failed", + }, + { + State: request.FailureWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Status", + Expected: "Cancelling", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetCommandInvocationInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetCommandInvocationRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go index 43fe01b36..64bc174c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/api.go @@ -60,8 +60,8 @@ func (c *StorageGateway) ActivateGatewayRequest(input *ActivateGatewayInput) (re // process, you specify information such as the AWS Region that you want to // use for storing snapshots or tapes, the time zone for scheduled snapshots // the gateway snapshot schedule window, an activation key, and a name for your -// gateway. The activation process also associates your gateway with your account; -// for more information, see UpdateGatewayInformation. +// gateway. The activation process also associates your gateway with your account. +// For more information, see UpdateGatewayInformation. // // You must turn on the gateway VM before you can activate your gateway. // @@ -148,8 +148,8 @@ func (c *StorageGateway) AddCacheRequest(input *AddCacheInput) (req *request.Req // AddCache API operation for AWS Storage Gateway. // // Configures one or more gateway local disks as cache for a gateway. This operation -// is only supported in the cached volume, tape and file gateway type (see Storage -// Gateway Concepts (https://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html)). +// is only supported in the cached volume, tape, and file gateway type (see +// How AWS Storage Gateway works (architecture) (https://docs.aws.amazon.com/storagegateway/latest/userguide/StorageGatewayConcepts.html). // // In the request, you specify the gateway Amazon Resource Name (ARN) to which // you want to add cache, and one or more disk IDs that you want to configure @@ -523,10 +523,10 @@ func (c *StorageGateway) AssignTapePoolRequest(input *AssignTapePoolInput) (req // Assigns a tape to a tape pool for archiving. The tape assigned to a pool // is archived in the S3 storage class that is associated with the pool. When // you use your backup application to eject the tape, the tape is archived directly -// into the S3 storage class (Glacier or Deep Archive) that corresponds to the -// pool. +// into the S3 storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds +// to the pool. // -// Valid values: "GLACIER", "DEEP_ARCHIVE" +// Valid Values: GLACIER | DEEP_ARCHIVE // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -972,15 +972,15 @@ func (c *StorageGateway) CreateNFSFileShareRequest(input *CreateNFSFileShareInpu // // Creates a Network File System (NFS) file share on an existing file gateway. // In Storage Gateway, a file share is a file system mount point backed by Amazon -// S3 cloud storage. Storage Gateway exposes file shares using a NFS interface. +// S3 cloud storage. Storage Gateway exposes file shares using an NFS interface. // This operation is only supported for file gateways. // // File gateway requires AWS Security Token Service (AWS STS) to be activated -// to enable you create a file share. Make sure AWS STS is activated in the +// to enable you to create a file share. Make sure AWS STS is activated in the // AWS Region you are creating your file gateway in. If AWS STS is not activated // in the AWS Region, activate it. For information about how to activate AWS -// STS, see Activating and Deactivating AWS STS in an AWS Region in the AWS -// Identity and Access Management User Guide. +// STS, see Activating and deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the AWS Identity and Access Management User Guide. // // File gateway does not support creating hard or symbolic links on a file share. // @@ -1068,14 +1068,14 @@ func (c *StorageGateway) CreateSMBFileShareRequest(input *CreateSMBFileShareInpu // // Creates a Server Message Block (SMB) file share on an existing file gateway. // In Storage Gateway, a file share is a file system mount point backed by Amazon -// S3 cloud storage. Storage Gateway expose file shares using a SMB interface. +// S3 cloud storage. Storage Gateway exposes file shares using an SMB interface. // This operation is only supported for file gateways. // // File gateways require AWS Security Token Service (AWS STS) to be activated // to enable you to create a file share. Make sure that AWS STS is activated // in the AWS Region you are creating your file gateway in. If AWS STS is not // activated in this AWS Region, activate it. For information about how to activate -// AWS STS, see Activating and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// AWS STS, see Activating and deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // File gateways don't support creating hard or symbolic links on a file share. @@ -1165,23 +1165,25 @@ func (c *StorageGateway) CreateSnapshotRequest(input *CreateSnapshotInput) (req // Initiates a snapshot of a volume. // // AWS Storage Gateway provides the ability to back up point-in-time snapshots -// of your data to Amazon Simple Storage (S3) for durable off-site recovery, +// of your data to Amazon Simple Storage (Amazon S3) for durable off-site recovery, // as well as import the data to an Amazon Elastic Block Store (EBS) volume // in Amazon Elastic Compute Cloud (EC2). You can take snapshots of your gateway -// volume on a scheduled or ad hoc basis. This API enables you to take ad-hoc -// snapshot. For more information, see Editing a Snapshot Schedule (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot). +// volume on a scheduled or ad hoc basis. This API enables you to take an ad +// hoc snapshot. For more information, see Editing a snapshot schedule (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#SchedulingSnapshot). // -// In the CreateSnapshot request you identify the volume by providing its Amazon +// In the CreateSnapshot request, you identify the volume by providing its Amazon // Resource Name (ARN). You must also provide description for the snapshot. // When AWS Storage Gateway takes the snapshot of specified volume, the snapshot -// and description appears in the AWS Storage Gateway Console. In response, +// and description appears in the AWS Storage Gateway console. In response, // AWS Storage Gateway returns you a snapshot ID. You can use this snapshot // ID to check the snapshot progress or later use it when you want to create // a volume from a snapshot. This operation is only supported in stored and // cached volume gateway type. // // To list or delete a snapshot, you must use the Amazon EC2 API. For more information, -// see DescribeSnapshots or DeleteSnapshot in the EC2 API reference (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Operations.html). +// see DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) +// or DeleteSnapshot (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteSnapshot.html) +// in the Amazon Elastic Compute Cloud API Reference. // // Volume and snapshot IDs are changing to a longer length ID format. For more // information, see the important note on the Welcome (https://docs.aws.amazon.com/storagegateway/latest/APIReference/Welcome.html) @@ -1289,7 +1291,9 @@ func (c *StorageGateway) CreateSnapshotFromVolumeRecoveryPointRequest(input *Cre // a volume from a snapshot. // // To list or delete a snapshot, you must use the Amazon EC2 API. For more information, -// in Amazon Elastic Compute Cloud API Reference. +// see DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) +// or DeleteSnapshot (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DeleteSnapshot.html) +// in the Amazon Elastic Compute Cloud API Reference. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1385,8 +1389,8 @@ func (c *StorageGateway) CreateStorediSCSIVolumeRequest(input *CreateStorediSCSI // snapshot, or create an empty volume. If you choose to create an empty gateway // volume, then any existing data on the disk is erased. // -// In the request you must specify the gateway and the disk information on which -// you are creating the volume. In response, the gateway creates the volume +// In the request, you must specify the gateway and the disk information on +// which you are creating the volume. In response, the gateway creates the volume // and returns volume information such as the volume Amazon Resource Name (ARN), // its size, and the iSCSI target ARN that initiators can use to connect to // the volume target. @@ -1429,6 +1433,91 @@ func (c *StorageGateway) CreateStorediSCSIVolumeWithContext(ctx aws.Context, inp return out, req.Send() } +const opCreateTapePool = "CreateTapePool" + +// CreateTapePoolRequest generates a "aws/request.Request" representing the +// client's request for the CreateTapePool operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTapePool for more information on using the CreateTapePool +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTapePoolRequest method. +// req, resp := client.CreateTapePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapePool +func (c *StorageGateway) CreateTapePoolRequest(input *CreateTapePoolInput) (req *request.Request, output *CreateTapePoolOutput) { + op := &request.Operation{ + Name: opCreateTapePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTapePoolInput{} + } + + output = &CreateTapePoolOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTapePool API operation for AWS Storage Gateway. +// +// Creates a new custom tape pool. You can use custom tape pool to enable tape +// retention lock on tapes that are archived in the custom pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation CreateTapePool for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/CreateTapePool +func (c *StorageGateway) CreateTapePool(input *CreateTapePoolInput) (*CreateTapePoolOutput, error) { + req, out := c.CreateTapePoolRequest(input) + return out, req.Send() +} + +// CreateTapePoolWithContext is the same as CreateTapePool with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTapePool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) CreateTapePoolWithContext(ctx aws.Context, input *CreateTapePoolInput, opts ...request.Option) (*CreateTapePoolOutput, error) { + req, out := c.CreateTapePoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateTapeWithBarcode = "CreateTapeWithBarcode" // CreateTapeWithBarcodeRequest generates a "aws/request.Request" representing the @@ -1474,9 +1563,9 @@ func (c *StorageGateway) CreateTapeWithBarcodeRequest(input *CreateTapeWithBarco // CreateTapeWithBarcode API operation for AWS Storage Gateway. // // Creates a virtual tape by using your own barcode. You write data to the virtual -// tape and then archive the tape. A barcode is unique and can not be reused -// if it has already been used on a tape . This applies to barcodes used on -// deleted tapes. This operation is only supported in the tape gateway type. +// tape and then archive the tape. A barcode is unique and cannot be reused +// if it has already been used on a tape. This applies to barcodes used on deleted +// tapes. This operation is only supported in the tape gateway type. // // Cache storage must be allocated to the gateway before you can create a virtual // tape. Use the AddCache operation to add cache storage to a gateway. @@ -1608,6 +1697,92 @@ func (c *StorageGateway) CreateTapesWithContext(ctx aws.Context, input *CreateTa return out, req.Send() } +const opDeleteAutomaticTapeCreationPolicy = "DeleteAutomaticTapeCreationPolicy" + +// DeleteAutomaticTapeCreationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAutomaticTapeCreationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAutomaticTapeCreationPolicy for more information on using the DeleteAutomaticTapeCreationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAutomaticTapeCreationPolicyRequest method. +// req, resp := client.DeleteAutomaticTapeCreationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteAutomaticTapeCreationPolicy +func (c *StorageGateway) DeleteAutomaticTapeCreationPolicyRequest(input *DeleteAutomaticTapeCreationPolicyInput) (req *request.Request, output *DeleteAutomaticTapeCreationPolicyOutput) { + op := &request.Operation{ + Name: opDeleteAutomaticTapeCreationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAutomaticTapeCreationPolicyInput{} + } + + output = &DeleteAutomaticTapeCreationPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteAutomaticTapeCreationPolicy API operation for AWS Storage Gateway. +// +// Deletes the automatic tape creation policy of a gateway. If you delete this +// policy, new virtual tapes must be created manually. Use the Amazon Resource +// Name (ARN) of the gateway in your request to remove the policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation DeleteAutomaticTapeCreationPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteAutomaticTapeCreationPolicy +func (c *StorageGateway) DeleteAutomaticTapeCreationPolicy(input *DeleteAutomaticTapeCreationPolicyInput) (*DeleteAutomaticTapeCreationPolicyOutput, error) { + req, out := c.DeleteAutomaticTapeCreationPolicyRequest(input) + return out, req.Send() +} + +// DeleteAutomaticTapeCreationPolicyWithContext is the same as DeleteAutomaticTapeCreationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAutomaticTapeCreationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) DeleteAutomaticTapeCreationPolicyWithContext(ctx aws.Context, input *DeleteAutomaticTapeCreationPolicyInput, opts ...request.Option) (*DeleteAutomaticTapeCreationPolicyOutput, error) { + req, out := c.DeleteAutomaticTapeCreationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteBandwidthRateLimit = "DeleteBandwidthRateLimit" // DeleteBandwidthRateLimitRequest generates a "aws/request.Request" representing the @@ -1927,7 +2102,7 @@ func (c *StorageGateway) DeleteGatewayRequest(input *DeleteGatewayInput) (req *r // for these snapshots. You can choose to remove all remaining Amazon EBS snapshots // by canceling your Amazon EC2 subscription. If you prefer not to cancel your // Amazon EC2 subscription, you can delete your snapshots using the Amazon EC2 -// console. For more information, see the AWS Storage Gateway Detail Page (http://aws.amazon.com/storagegateway). +// console. For more information, see the AWS Storage Gateway detail page (http://aws.amazon.com/storagegateway). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2015,13 +2190,14 @@ func (c *StorageGateway) DeleteSnapshotScheduleRequest(input *DeleteSnapshotSche // // You can take snapshots of your gateway volumes on a scheduled or ad hoc basis. // This API action enables you to delete a snapshot schedule for a volume. For -// more information, see Working with Snapshots (https://docs.aws.amazon.com/storagegateway/latest/userguide/WorkingWithSnapshots.html). +// more information, see Backing up your volumes (https://docs.aws.amazon.com/storagegateway/latest/userguide/backing-up-volumes.html). // In the DeleteSnapshotSchedule request, you identify the volume by providing // its Amazon Resource Name (ARN). This operation is only supported in stored // and cached volume gateway types. // -// To list or delete a snapshot, you must use the Amazon EC2 API. in Amazon -// Elastic Compute Cloud API Reference. +// To list or delete a snapshot, you must use the Amazon EC2 API. For more information, +// go to DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) +// in the Amazon Elastic Compute Cloud API Reference. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2231,6 +2407,92 @@ func (c *StorageGateway) DeleteTapeArchiveWithContext(ctx aws.Context, input *De return out, req.Send() } +const opDeleteTapePool = "DeleteTapePool" + +// DeleteTapePoolRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTapePool operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTapePool for more information on using the DeleteTapePool +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTapePoolRequest method. +// req, resp := client.DeleteTapePoolRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTapePool +func (c *StorageGateway) DeleteTapePoolRequest(input *DeleteTapePoolInput) (req *request.Request, output *DeleteTapePoolOutput) { + op := &request.Operation{ + Name: opDeleteTapePool, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTapePoolInput{} + } + + output = &DeleteTapePoolOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteTapePool API operation for AWS Storage Gateway. +// +// Delete a custom tape pool. A custom tape pool can only be deleted if there +// are no tapes in the pool and if there are no automatic tape creation policies +// that reference the custom tape pool. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation DeleteTapePool for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/DeleteTapePool +func (c *StorageGateway) DeleteTapePool(input *DeleteTapePoolInput) (*DeleteTapePoolOutput, error) { + req, out := c.DeleteTapePoolRequest(input) + return out, req.Send() +} + +// DeleteTapePoolWithContext is the same as DeleteTapePool with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTapePool for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) DeleteTapePoolWithContext(ctx aws.Context, input *DeleteTapePoolInput, opts ...request.Option) (*DeleteTapePoolOutput, error) { + req, out := c.DeleteTapePoolRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteVolume = "DeleteVolume" // DeleteVolumeRequest generates a "aws/request.Request" representing the @@ -2461,7 +2723,7 @@ func (c *StorageGateway) DescribeBandwidthRateLimitRequest(input *DescribeBandwi // // Returns the bandwidth rate limits of a gateway. By default, these limits // are not set, which means no bandwidth rate limiting is in effect. This operation -// is supported for the stored volume, cached volume and tape gateway types.' +// is supported for the stored volume, cached volume, and tape gateway types. // // This operation only returns a value for a bandwidth rate limit only if the // limit is set. If no limits are set for the gateway, then this operation returns @@ -2551,7 +2813,7 @@ func (c *StorageGateway) DescribeCacheRequest(input *DescribeCacheInput) (req *r // DescribeCache API operation for AWS Storage Gateway. // // Returns information about the cache of a gateway. This operation is only -// supported in the cached volume, tape and file gateway types. +// supported in the cached volume, tape, and file gateway types. // // The response includes disk IDs that are configured as cache, and it includes // the amount of cache allocated and used. @@ -2642,7 +2904,7 @@ func (c *StorageGateway) DescribeCachediSCSIVolumesRequest(input *DescribeCached // operation is only supported in the cached volume gateway types. // // The list of gateway volumes in the request must be from one gateway. In the -// response Amazon Storage Gateway returns volume information sorted by volume +// response, AWS Storage Gateway returns volume information sorted by volume // Amazon Resource Name (ARN). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3329,7 +3591,7 @@ func (c *StorageGateway) DescribeStorediSCSIVolumesRequest(input *DescribeStored // // Returns the description of the gateway volumes specified in the request. // The list of gateway volumes in the request must be from one gateway. In the -// response Amazon Storage Gateway returns volume information sorted by volume +// response, AWS Storage Gateway returns volume information sorted by volume // ARNs. This operation is only supported in stored volume gateway type. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3854,7 +4116,7 @@ func (c *StorageGateway) DescribeUploadBufferRequest(input *DescribeUploadBuffer // DescribeUploadBuffer API operation for AWS Storage Gateway. // // Returns information about the upload buffer of a gateway. This operation -// is supported for the stored volume, cached volume and tape gateway types. +// is supported for the stored volume, cached volume, and tape gateway types. // // The response includes disk IDs that are configured as upload buffer space, // and it includes the amount of upload buffer space allocated and used. @@ -4274,7 +4536,7 @@ func (c *StorageGateway) DisableGatewayRequest(input *DisableGatewayInput) (req // Use this operation for a tape gateway that is not reachable or not functioning. // This operation is only supported in the tape gateway type. // -// Once a gateway is disabled it cannot be enabled. +// After a gateway is disabled, it cannot be enabled. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4399,66 +4661,61 @@ func (c *StorageGateway) JoinDomainWithContext(ctx aws.Context, input *JoinDomai return out, req.Send() } -const opListFileShares = "ListFileShares" +const opListAutomaticTapeCreationPolicies = "ListAutomaticTapeCreationPolicies" -// ListFileSharesRequest generates a "aws/request.Request" representing the -// client's request for the ListFileShares operation. The "output" return +// ListAutomaticTapeCreationPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAutomaticTapeCreationPolicies operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListFileShares for more information on using the ListFileShares +// See ListAutomaticTapeCreationPolicies for more information on using the ListAutomaticTapeCreationPolicies // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListFileSharesRequest method. -// req, resp := client.ListFileSharesRequest(params) +// // Example sending a request using the ListAutomaticTapeCreationPoliciesRequest method. +// req, resp := client.ListAutomaticTapeCreationPoliciesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares -func (c *StorageGateway) ListFileSharesRequest(input *ListFileSharesInput) (req *request.Request, output *ListFileSharesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListAutomaticTapeCreationPolicies +func (c *StorageGateway) ListAutomaticTapeCreationPoliciesRequest(input *ListAutomaticTapeCreationPoliciesInput) (req *request.Request, output *ListAutomaticTapeCreationPoliciesOutput) { op := &request.Operation{ - Name: opListFileShares, + Name: opListAutomaticTapeCreationPolicies, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker"}, - LimitToken: "Limit", - TruncationToken: "", - }, } if input == nil { - input = &ListFileSharesInput{} + input = &ListAutomaticTapeCreationPoliciesInput{} } - output = &ListFileSharesOutput{} + output = &ListAutomaticTapeCreationPoliciesOutput{} req = c.newRequest(op, input, output) return } -// ListFileShares API operation for AWS Storage Gateway. +// ListAutomaticTapeCreationPolicies API operation for AWS Storage Gateway. // -// Gets a list of the file shares for a specific file gateway, or the list of -// file shares that belong to the calling user account. This operation is only -// supported for file gateways. +// Lists the automatic tape creation policies for a gateway. If there are no +// automatic tape creation policies for the gateway, it returns an empty list. +// +// This operation is only supported for tape gateways. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Storage Gateway's -// API operation ListFileShares for usage and error information. +// API operation ListAutomaticTapeCreationPolicies for usage and error information. // // Returned Error Types: // * InvalidGatewayRequestException @@ -4469,53 +4726,145 @@ func (c *StorageGateway) ListFileSharesRequest(input *ListFileSharesInput) (req // An internal server error has occurred during the request. For more information, // see the error and message fields. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares -func (c *StorageGateway) ListFileShares(input *ListFileSharesInput) (*ListFileSharesOutput, error) { - req, out := c.ListFileSharesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListAutomaticTapeCreationPolicies +func (c *StorageGateway) ListAutomaticTapeCreationPolicies(input *ListAutomaticTapeCreationPoliciesInput) (*ListAutomaticTapeCreationPoliciesOutput, error) { + req, out := c.ListAutomaticTapeCreationPoliciesRequest(input) return out, req.Send() } -// ListFileSharesWithContext is the same as ListFileShares with the addition of +// ListAutomaticTapeCreationPoliciesWithContext is the same as ListAutomaticTapeCreationPolicies with the addition of // the ability to pass a context and additional request options. // -// See ListFileShares for details on how to use this API operation. +// See ListAutomaticTapeCreationPolicies for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *StorageGateway) ListFileSharesWithContext(ctx aws.Context, input *ListFileSharesInput, opts ...request.Option) (*ListFileSharesOutput, error) { - req, out := c.ListFileSharesRequest(input) +func (c *StorageGateway) ListAutomaticTapeCreationPoliciesWithContext(ctx aws.Context, input *ListAutomaticTapeCreationPoliciesInput, opts ...request.Option) (*ListAutomaticTapeCreationPoliciesOutput, error) { + req, out := c.ListAutomaticTapeCreationPoliciesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListFileSharesPages iterates over the pages of a ListFileShares operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. +const opListFileShares = "ListFileShares" + +// ListFileSharesRequest generates a "aws/request.Request" representing the +// client's request for the ListFileShares operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListFileShares method for more information on how to use this operation. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Note: This operation can generate multiple requests to a service. +// See ListFileShares for more information on using the ListFileShares +// API call, and error handling. // -// // Example iterating over at most 3 pages of a ListFileShares operation. -// pageNum := 0 -// err := client.ListFileSharesPages(params, -// func(page *storagegateway.ListFileSharesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -func (c *StorageGateway) ListFileSharesPages(input *ListFileSharesInput, fn func(*ListFileSharesOutput, bool) bool) error { - return c.ListFileSharesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListFileSharesPagesWithContext same as ListFileSharesPages except -// it takes a Context and allows setting request options on the pages. // -// The context must be non-nil and will be used for request cancellation. If +// // Example sending a request using the ListFileSharesRequest method. +// req, resp := client.ListFileSharesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares +func (c *StorageGateway) ListFileSharesRequest(input *ListFileSharesInput) (req *request.Request, output *ListFileSharesOutput) { + op := &request.Operation{ + Name: opListFileShares, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListFileSharesInput{} + } + + output = &ListFileSharesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListFileShares API operation for AWS Storage Gateway. +// +// Gets a list of the file shares for a specific file gateway, or the list of +// file shares that belong to the calling user account. This operation is only +// supported for file gateways. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation ListFileShares for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListFileShares +func (c *StorageGateway) ListFileShares(input *ListFileSharesInput) (*ListFileSharesOutput, error) { + req, out := c.ListFileSharesRequest(input) + return out, req.Send() +} + +// ListFileSharesWithContext is the same as ListFileShares with the addition of +// the ability to pass a context and additional request options. +// +// See ListFileShares for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) ListFileSharesWithContext(ctx aws.Context, input *ListFileSharesInput, opts ...request.Option) (*ListFileSharesOutput, error) { + req, out := c.ListFileSharesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListFileSharesPages iterates over the pages of a ListFileShares operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFileShares method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFileShares operation. +// pageNum := 0 +// err := client.ListFileSharesPages(params, +// func(page *storagegateway.ListFileSharesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *StorageGateway) ListFileSharesPages(input *ListFileSharesInput, fn func(*ListFileSharesOutput, bool) bool) error { + return c.ListFileSharesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListFileSharesPagesWithContext same as ListFileSharesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. @@ -4931,6 +5280,98 @@ func (c *StorageGateway) ListTagsForResourcePagesWithContext(ctx aws.Context, in return p.Err() } +const opListTapePools = "ListTapePools" + +// ListTapePoolsRequest generates a "aws/request.Request" representing the +// client's request for the ListTapePools operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTapePools for more information on using the ListTapePools +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTapePoolsRequest method. +// req, resp := client.ListTapePoolsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTapePools +func (c *StorageGateway) ListTapePoolsRequest(input *ListTapePoolsInput) (req *request.Request, output *ListTapePoolsOutput) { + op := &request.Operation{ + Name: opListTapePools, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTapePoolsInput{} + } + + output = &ListTapePoolsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTapePools API operation for AWS Storage Gateway. +// +// Lists custom tape pools. You specify custom tape pools to list by specifying +// one or more custom tape pool Amazon Resource Names (ARNs). If you don't specify +// a custom tape pool ARN, the operation lists all custom tape pools. +// +// This operation supports pagination. You can optionally specify the Limit +// parameter in the body to limit the number of tape pools in the response. +// If the number of tape pools returned in the response is truncated, the response +// includes a Marker element that you can use in your subsequent request to +// retrieve the next set of tape pools. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation ListTapePools for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/ListTapePools +func (c *StorageGateway) ListTapePools(input *ListTapePoolsInput) (*ListTapePoolsOutput, error) { + req, out := c.ListTapePoolsRequest(input) + return out, req.Send() +} + +// ListTapePoolsWithContext is the same as ListTapePools with the addition of +// the ability to pass a context and additional request options. +// +// See ListTapePools for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) ListTapePoolsWithContext(ctx aws.Context, input *ListTapePoolsInput, opts ...request.Option) (*ListTapePoolsOutput, error) { + req, out := c.ListTapePoolsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTapes = "ListTapes" // ListTapesRequest generates a "aws/request.Request" representing the @@ -5469,8 +5910,8 @@ func (c *StorageGateway) NotifyWhenUploadedRequest(input *NotifyWhenUploadedInpu // event targets such as Amazon SNS or AWS Lambda function. This operation is // only supported for file gateways. // -// For more information, see Getting File Upload Notification in the Storage -// Gateway User Guide (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification). +// For more information, see Getting file upload notification (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-upload-notification) +// in the AWS Storage Gateway User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5555,11 +5996,12 @@ func (c *StorageGateway) RefreshCacheRequest(input *RefreshCacheInput) (req *req // RefreshCache API operation for AWS Storage Gateway. // // Refreshes the cache for the specified file share. This operation finds objects -// in the Amazon S3 bucket that were added, removed or replaced since the gateway +// in the Amazon S3 bucket that were added, removed, or replaced since the gateway // last listed the bucket's contents and cached the results. This operation // is only supported in the file gateway type. You can subscribe to be notified // through an Amazon CloudWatch event when your RefreshCache operation completes. -// For more information, see Getting Notified About File Operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification). +// For more information, see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) +// in the AWS Storage Gateway User Guide. // // When this API is called, it only initiates the refresh operation. When the // API call completes and returns a success code, it doesn't necessarily mean @@ -5571,13 +6013,15 @@ func (c *StorageGateway) RefreshCacheRequest(input *RefreshCacheInput) (req *req // Throttle limit: This API is asynchronous so the gateway will accept no more // than two refreshes at any time. We recommend using the refresh-complete CloudWatch // event notification before issuing additional requests. For more information, -// see Getting Notified About File Operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification). +// see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) +// in the AWS Storage Gateway User Guide. // // If you invoke the RefreshCache API when two requests are already being processed, // any new request will cause an InvalidGatewayRequestException error because // too many requests were sent to the server. // -// For more information, see "https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification". +// For more information, see Getting notified about file operations (https://docs.aws.amazon.com/storagegateway/latest/userguide/monitoring-file-gateway.html#get-notification) +// in the AWS Storage Gateway User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -5746,12 +6190,12 @@ func (c *StorageGateway) ResetCacheRequest(input *ResetCacheInput) (req *request // ResetCache API operation for AWS Storage Gateway. // -// Resets all cache disks that have encountered a error and makes the disks +// Resets all cache disks that have encountered an error and makes the disks // available for reconfiguration as cache storage. If your cache disk encounters -// a error, the gateway prevents read and write operations on virtual tapes +// an error, the gateway prevents read and write operations on virtual tapes // in the gateway. For example, an error can occur when a disk is corrupted // or removed from the gateway. When a cache is reset, the gateway loses its -// cache storage. At this point you can reconfigure the disks as cache disks. +// cache storage. At this point, you can reconfigure the disks as cache disks. // This operation is only supported in the cached volume and tape types. // // If the cache disk you are resetting contains data that has not been uploaded @@ -6444,6 +6888,96 @@ func (c *StorageGateway) StartGatewayWithContext(ctx aws.Context, input *StartGa return out, req.Send() } +const opUpdateAutomaticTapeCreationPolicy = "UpdateAutomaticTapeCreationPolicy" + +// UpdateAutomaticTapeCreationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAutomaticTapeCreationPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAutomaticTapeCreationPolicy for more information on using the UpdateAutomaticTapeCreationPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAutomaticTapeCreationPolicyRequest method. +// req, resp := client.UpdateAutomaticTapeCreationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateAutomaticTapeCreationPolicy +func (c *StorageGateway) UpdateAutomaticTapeCreationPolicyRequest(input *UpdateAutomaticTapeCreationPolicyInput) (req *request.Request, output *UpdateAutomaticTapeCreationPolicyOutput) { + op := &request.Operation{ + Name: opUpdateAutomaticTapeCreationPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAutomaticTapeCreationPolicyInput{} + } + + output = &UpdateAutomaticTapeCreationPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateAutomaticTapeCreationPolicy API operation for AWS Storage Gateway. +// +// Updates the automatic tape creation policy of a gateway. Use this to update +// the policy with a new set of automatic tape creation rules. This is only +// supported for tape gateways. +// +// By default, there is no automatic tape creation policy. +// +// A gateway can have only one automatic tape creation policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Storage Gateway's +// API operation UpdateAutomaticTapeCreationPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidGatewayRequestException +// An exception occurred because an invalid gateway request was issued to the +// service. For more information, see the error and message fields. +// +// * InternalServerError +// An internal server error has occurred during the request. For more information, +// see the error and message fields. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30/UpdateAutomaticTapeCreationPolicy +func (c *StorageGateway) UpdateAutomaticTapeCreationPolicy(input *UpdateAutomaticTapeCreationPolicyInput) (*UpdateAutomaticTapeCreationPolicyOutput, error) { + req, out := c.UpdateAutomaticTapeCreationPolicyRequest(input) + return out, req.Send() +} + +// UpdateAutomaticTapeCreationPolicyWithContext is the same as UpdateAutomaticTapeCreationPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAutomaticTapeCreationPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *StorageGateway) UpdateAutomaticTapeCreationPolicyWithContext(ctx aws.Context, input *UpdateAutomaticTapeCreationPolicyInput, opts ...request.Option) (*UpdateAutomaticTapeCreationPolicyOutput, error) { + req, out := c.UpdateAutomaticTapeCreationPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateBandwidthRateLimit = "UpdateBandwidthRateLimit" // UpdateBandwidthRateLimitRequest generates a "aws/request.Request" representing the @@ -6491,7 +7025,7 @@ func (c *StorageGateway) UpdateBandwidthRateLimitRequest(input *UpdateBandwidthR // Updates the bandwidth rate limits of a gateway. You can update both the upload // and download bandwidth rate limit or specify only one of the two. If you // don't set a bandwidth rate limit, the existing rate limit remains. This operation -// is supported for the stored volume, cached volume and tape gateway types.' +// is supported for the stored volume, cached volume, and tape gateway types. // // By default, a gateway's bandwidth rate limits are not set. If you don't set // any limit, the gateway does not have any limitations on its bandwidth usage @@ -6676,7 +7210,7 @@ func (c *StorageGateway) UpdateGatewayInformationRequest(input *UpdateGatewayInf // zone. To specify which gateway to update, use the Amazon Resource Name (ARN) // of the gateway in your request. // -// For Gateways activated after September 2, 2015, the gateway's ARN contains +// For gateways activated after September 2, 2015, the gateway's ARN contains // the gateway ID rather than the gateway name. However, changing the name of // the gateway has no effect on the gateway's ARN. // @@ -6773,9 +7307,9 @@ func (c *StorageGateway) UpdateGatewaySoftwareNowRequest(input *UpdateGatewaySof // A software update forces a system restart of your gateway. You can minimize // the chance of any disruption to your applications by increasing your iSCSI // Initiators' timeouts. For more information about increasing iSCSI Initiator -// timeouts for Windows and Linux, see Customizing Your Windows iSCSI Settings +// timeouts for Windows and Linux, see Customizing your Windows iSCSI settings // (https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorWindowsClient.html#CustomizeWindowsiSCSISettings) -// and Customizing Your Linux iSCSI Settings (https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings), +// and Customizing your Linux iSCSI settings (https://docs.aws.amazon.com/storagegateway/latest/userguide/ConfiguringiSCSIClientInitiatorRedHatClient.html#CustomizeLinuxiSCSISettings), // respectively. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7058,7 +7592,7 @@ func (c *StorageGateway) UpdateSMBFileShareRequest(input *UpdateSMBFileShareInpu // to enable you to create a file share. Make sure that AWS STS is activated // in the AWS Region you are creating your file gateway in. If AWS STS is not // activated in this AWS Region, activate it. For information about how to activate -// AWS STS, see Activating and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// AWS STS, see Activating and deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the AWS Identity and Access Management User Guide. // // File gateways don't support creating hard or symbolic links on a file share. @@ -7383,9 +7917,9 @@ func (c *StorageGateway) UpdateVTLDeviceTypeWithContext(ctx aws.Context, input * // // * ActivateGatewayInput$GatewayType // -// * ActivateGatewayInput$TapeDriveType -// // * ActivateGatewayInput$MediumChangerType +// +// * ActivateGatewayInput$TapeDriveType type ActivateGatewayInput struct { _ struct{} `type:"structure"` @@ -7397,8 +7931,8 @@ type ActivateGatewayInput struct { // defaults -- the arguments you pass to the ActivateGateway API call determine // the actual configuration of your gateway. // - // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html - // in the Storage Gateway User Guide. + // For more information, see Getting activation key (https://docs.aws.amazon.com/storagegateway/latest/userguide/get-activation-key.html) + // in the AWS Storage Gateway User Guide. // // ActivationKey is a required field ActivationKey *string `min:"1" type:"string" required:"true"` @@ -7411,11 +7945,11 @@ type ActivateGatewayInput struct { // A value that indicates the AWS Region where you want to store your data. // The gateway AWS Region specified must be the same AWS Region as the AWS Region // in your Host header in the request. For more information about available - // AWS Regions and endpoints for AWS Storage Gateway, see Regions and Endpoints - // (https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) in the - // Amazon Web Services Glossary. + // AWS Regions and endpoints for AWS Storage Gateway, see AWS Storage Gateway + // endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/sg.html) + // in the AWS General Reference. // - // Valid Values: See AWS Storage Gateway Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) + // Valid Values: See AWS Storage Gateway endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/sg.html) // in the AWS General Reference. // // GatewayRegion is a required field @@ -7434,13 +7968,13 @@ type ActivateGatewayInput struct { // is critical to all later functions of the gateway and cannot be changed after // activation. The default value is CACHED. // - // Valid Values: "STORED", "CACHED", "VTL", "FILE_S3" + // Valid Values: STORED | CACHED | VTL | FILE_S3 GatewayType *string `min:"2" type:"string"` // The value that indicates the type of medium changer to use for tape gateway. // This field is optional. // - // Valid Values: "STK-L700", "AWS-Gateway-VTL" + // Valid Values: STK-L700 | AWS-Gateway-VTL | IBM-03584L32-0402 MediumChangerType *string `min:"2" type:"string"` // A list of up to 50 tags that you can assign to the gateway. Each tag is a @@ -7455,7 +7989,7 @@ type ActivateGatewayInput struct { // The value that indicates the type of tape drive to use for tape gateway. // This field is optional. // - // Valid Values: "IBM-ULT3580-TD5" + // Valid Values: IBM-ULT3580-TD5 TapeDriveType *string `min:"2" type:"string"` } @@ -7606,7 +8140,7 @@ type AddCacheInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field @@ -7784,7 +8318,7 @@ type AddUploadBufferInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field @@ -7869,7 +8403,7 @@ type AddWorkingStorageInput struct { _ struct{} `type:"structure"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. // // DiskIds is a required field @@ -7923,8 +8457,8 @@ func (s *AddWorkingStorageInput) SetGatewayARN(v string) *AddWorkingStorageInput return s } -// A JSON object containing the of the gateway for which working storage was -// configured. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway for +// which working storage was configured. type AddWorkingStorageOutput struct { _ struct{} `type:"structure"` @@ -7952,13 +8486,22 @@ func (s *AddWorkingStorageOutput) SetGatewayARN(v string) *AddWorkingStorageOutp type AssignTapePoolInput struct { _ struct{} `type:"structure"` + // Set permissions to bypass governance retention. If the lock type of the archived + // tape is Governance, the tape's archived age is not older than RetentionLockInDays, + // and the user does not already have BypassGovernanceRetention, setting this + // to TRUE enables the user to bypass the retention lock. This parameter is + // set to true by default for calls from the console. + // + // Valid values: TRUE | FALSE + BypassGovernanceRetention *bool `type:"boolean"` + // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // - // Valid values: "GLACIER", "DEEP_ARCHIVE" + // Valid Values: GLACIER | DEEP_ARCHIVE // // PoolId is a required field PoolId *string `min:"1" type:"string" required:"true"` @@ -8002,6 +8545,12 @@ func (s *AssignTapePoolInput) Validate() error { return nil } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *AssignTapePoolInput) SetBypassGovernanceRetention(v bool) *AssignTapePoolInput { + s.BypassGovernanceRetention = &v + return s +} + // SetPoolId sets the PoolId field's value. func (s *AssignTapePoolInput) SetPoolId(v string) *AssignTapePoolInput { s.PoolId = &v @@ -8182,24 +8731,207 @@ func (s *AttachVolumeOutput) SetVolumeARN(v string) *AttachVolumeOutput { return s } -// Describes an iSCSI cached volume. -type CachediSCSIVolume struct { +// Information about the gateway's automatic tape creation policies, including +// the automatic tape creation rules and the gateway that is using the policies. +type AutomaticTapeCreationPolicyInfo struct { _ struct{} `type:"structure"` - // The date the volume was created. Volumes created prior to March 28, 2017 - // don’t have this time stamp. - CreatedDate *time.Time `type:"timestamp"` + // An automatic tape creation policy consists of a list of automatic tape creation + // rules. This returns the rules that determine when and how to automatically + // create new tapes. + AutomaticTapeCreationRules []*AutomaticTapeCreationRule `min:"1" type:"list"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. - KMSKey *string `min:"7" type:"string"` + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} - // If the cached volume was created from a snapshot, this field contains the - // snapshot ID used, e.g. snap-78e22663. Otherwise, this field is not included. - SourceSnapshotId *string `type:"string"` +// String returns the string representation +func (s AutomaticTapeCreationPolicyInfo) String() string { + return awsutil.Prettify(s) +} - // The name of the iSCSI target used by an initiator to connect to a volume - // and used as a suffix for the target ARN. For example, specifying TargetName +// GoString returns the string representation +func (s AutomaticTapeCreationPolicyInfo) GoString() string { + return s.String() +} + +// SetAutomaticTapeCreationRules sets the AutomaticTapeCreationRules field's value. +func (s *AutomaticTapeCreationPolicyInfo) SetAutomaticTapeCreationRules(v []*AutomaticTapeCreationRule) *AutomaticTapeCreationPolicyInfo { + s.AutomaticTapeCreationRules = v + return s +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *AutomaticTapeCreationPolicyInfo) SetGatewayARN(v string) *AutomaticTapeCreationPolicyInfo { + s.GatewayARN = &v + return s +} + +// An automatic tape creation policy consists of automatic tape creation rules +// where each rule defines when and how to create new tapes. For more information +// about automatic tape creation, see Creating Tapes Automatically (https://docs.aws.amazon.com/storagegateway/latest/userguide/GettingStartedCreateTapes.html#CreateTapesAutomatically). +type AutomaticTapeCreationRule struct { + _ struct{} `type:"structure"` + + // The minimum number of available virtual tapes that the gateway maintains + // at all times. If the number of tapes on the gateway goes below this value, + // the gateway creates as many new tapes as are needed to have MinimumNumTapes + // on the gateway. For more information about automatic tape creation, see Creating + // Tapes Automatically (https://docs.aws.amazon.com/storagegateway/latest/userguide/GettingStartedCreateTapes.html#CreateTapesAutomatically). + // + // MinimumNumTapes is a required field + MinimumNumTapes *int64 `min:"1" type:"integer" required:"true"` + + // The ID of the pool that you want to add your tape to for archiving. The tape + // in this pool is archived in the Amazon S3 storage class that is associated + // with the pool. When you use your backup application to eject the tape, the + // tape is archived directly into the storage class (S3 Glacier or S3 Glacier + // Deep Archive) that corresponds to the pool. + // + // Valid Values: GLACIER | DEEP_ARCHIVE + // + // PoolId is a required field + PoolId *string `min:"1" type:"string" required:"true"` + + // A prefix that you append to the barcode of the virtual tape that you are + // creating. This prefix makes the barcode unique. + // + // The prefix must be 1-4 characters in length and must be one of the uppercase + // letters from A to Z. + // + // TapeBarcodePrefix is a required field + TapeBarcodePrefix *string `min:"1" type:"string" required:"true"` + + // The size, in bytes, of the virtual tape capacity. + // + // TapeSizeInBytes is a required field + TapeSizeInBytes *int64 `type:"long" required:"true"` + + // Set to true to indicate that tapes are to be archived as write-once-read-many + // (WORM). Set to false when WORM is not enabled for tapes. + Worm *bool `type:"boolean"` +} + +// String returns the string representation +func (s AutomaticTapeCreationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutomaticTapeCreationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutomaticTapeCreationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutomaticTapeCreationRule"} + if s.MinimumNumTapes == nil { + invalidParams.Add(request.NewErrParamRequired("MinimumNumTapes")) + } + if s.MinimumNumTapes != nil && *s.MinimumNumTapes < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinimumNumTapes", 1)) + } + if s.PoolId == nil { + invalidParams.Add(request.NewErrParamRequired("PoolId")) + } + if s.PoolId != nil && len(*s.PoolId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PoolId", 1)) + } + if s.TapeBarcodePrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TapeBarcodePrefix")) + } + if s.TapeBarcodePrefix != nil && len(*s.TapeBarcodePrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TapeBarcodePrefix", 1)) + } + if s.TapeSizeInBytes == nil { + invalidParams.Add(request.NewErrParamRequired("TapeSizeInBytes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMinimumNumTapes sets the MinimumNumTapes field's value. +func (s *AutomaticTapeCreationRule) SetMinimumNumTapes(v int64) *AutomaticTapeCreationRule { + s.MinimumNumTapes = &v + return s +} + +// SetPoolId sets the PoolId field's value. +func (s *AutomaticTapeCreationRule) SetPoolId(v string) *AutomaticTapeCreationRule { + s.PoolId = &v + return s +} + +// SetTapeBarcodePrefix sets the TapeBarcodePrefix field's value. +func (s *AutomaticTapeCreationRule) SetTapeBarcodePrefix(v string) *AutomaticTapeCreationRule { + s.TapeBarcodePrefix = &v + return s +} + +// SetTapeSizeInBytes sets the TapeSizeInBytes field's value. +func (s *AutomaticTapeCreationRule) SetTapeSizeInBytes(v int64) *AutomaticTapeCreationRule { + s.TapeSizeInBytes = &v + return s +} + +// SetWorm sets the Worm field's value. +func (s *AutomaticTapeCreationRule) SetWorm(v bool) *AutomaticTapeCreationRule { + s.Worm = &v + return s +} + +// Lists refresh cache information. +type CacheAttributes struct { + _ struct{} `type:"structure"` + + // Refreshes a file share's cache by using Time To Live (TTL). TTL is the length + // of time since the last refresh after which access to the directory would + // cause the file gateway to first refresh that directory's contents from the + // Amazon S3 bucket. The TTL duration is in seconds. + // + // Valid Values: 300 to 2,592,000 seconds (5 minutes to 30 days) + CacheStaleTimeoutInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CacheAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CacheAttributes) GoString() string { + return s.String() +} + +// SetCacheStaleTimeoutInSeconds sets the CacheStaleTimeoutInSeconds field's value. +func (s *CacheAttributes) SetCacheStaleTimeoutInSeconds(v int64) *CacheAttributes { + s.CacheStaleTimeoutInSeconds = &v + return s +} + +// Describes an iSCSI cached volume. +type CachediSCSIVolume struct { + _ struct{} `type:"structure"` + + // The date the volume was created. Volumes created prior to March 28, 2017 + // don’t have this timestamp. + CreatedDate *time.Time `type:"timestamp"` + + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. + KMSKey *string `min:"7" type:"string"` + + // If the cached volume was created from a snapshot, this field contains the + // snapshot ID used, e.g., snap-78e22663. Otherwise, this field is not included. + SourceSnapshotId *string `type:"string"` + + // The name of the iSCSI target used by an initiator to connect to a volume + // and used as a suffix for the target ARN. For example, specifying TargetName // as myvolume results in the target ARN of arn:aws:storagegateway:us-east-2:111122223333:gateway/sgw-12A3456B/target/iqn.1997-05.com.amazon:myvolume. // The target name must be unique across all volumes on a gateway. // @@ -8211,11 +8943,11 @@ type CachediSCSIVolume struct { VolumeARN *string `min:"50" type:"string"` // A value that indicates whether a storage volume is attached to or detached - // from a gateway. For more information, see Moving Your Volumes to a Different - // Gateway (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume). + // from a gateway. For more information, see Moving your volumes to a different + // gateway (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume). VolumeAttachmentStatus *string `min:"3" type:"string"` - // The unique identifier of the volume, e.g. vol-AE4B946D. + // The unique identifier of the volume, e.g., vol-AE4B946D. VolumeId *string `min:"12" type:"string"` // Represents the percentage complete if the volume is restoring or bootstrapping @@ -8521,7 +9253,7 @@ type ChapInfo struct { SecretToAuthenticateInitiator *string `min:"1" type:"string" sensitive:"true"` // The secret key that the target must provide to participate in mutual CHAP - // with the initiator (e.g. Windows client). + // with the initiator (e.g., Windows client). SecretToAuthenticateTarget *string `min:"1" type:"string" sensitive:"true"` // The Amazon Resource Name (ARN) of the volume. @@ -8580,12 +9312,15 @@ type CreateCachediSCSIVolumeInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The network interface of the gateway on which to expose the iSCSI target. @@ -8599,7 +9334,7 @@ type CreateCachediSCSIVolumeInput struct { // The snapshot ID (e.g. "snap-1122aabb") of the snapshot to restore as the // new cached volume. Specify this field if you want to create the iSCSI storage - // volume from a snapshot otherwise do not include this field. To list snapshots + // volume from a snapshot; otherwise, do not include this field. To list snapshots // for your account use DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) // in the Amazon Elastic Compute Cloud API Reference. SnapshotId *string `type:"string"` @@ -8793,6 +9528,9 @@ func (s *CreateCachediSCSIVolumeOutput) SetVolumeARN(v string) *CreateCachediSCS type CreateNFSFileShareInput struct { _ struct{} `type:"structure"` + // Refresh cache information. + CacheAttributes *CacheAttributes `type:"structure"` + // The list of clients that are allowed to access the file gateway. The list // must contain either valid IP addresses or valid CIDR blocks. ClientList []*string `min:"1" type:"list"` @@ -8804,10 +9542,16 @@ type CreateNFSFileShareInput struct { ClientToken *string `min:"5" type:"string" required:"true"` // The default storage class for objects put into an Amazon S3 bucket by the - // file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. - // If this field is not populated, the default value S3_STANDARD is used. Optional. + // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. + // + // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` + // The name of the file share. Optional. + // + // FileShareName must be set if an S3 prefix name is set in LocationARN. + FileShareName *string `min:"1" type:"string"` + // The Amazon Resource Name (ARN) of the file gateway on which you want to create // a file share. // @@ -8816,18 +9560,24 @@ type CreateNFSFileShareInput struct { // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, - // and otherwise to false. The default value is true. + // otherwise set to false. The default value is true. + // + // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) AWS KMS key used for Amazon S3 server side - // encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` - // The ARN of the backed storage used for storing file data. + // The ARN of the backend storage used for storing file data. A prefix name + // can be added to the S3 bucket name. It must end with a "/". // // LocationARN is a required field LocationARN *string `min:"16" type:"string" required:"true"` @@ -8835,22 +9585,27 @@ type CreateNFSFileShareInput struct { // File share default values. Optional. NFSFileShareDefaults *NFSFileShareDefaults `type:"structure"` - // A value that sets the access control list permission for objects in the S3 - // bucket that a file gateway puts objects into. The default value is "private". + // A value that sets the access control list (ACL) permission for objects in + // the S3 bucket that a file gateway puts objects into. The default value is + // private. ObjectACL *string `type:"string" enum:"ObjectACL"` - // A value that sets the write status of a file share. This value is true if - // the write status is read-only, and otherwise false. + // A value that sets the write status of a file share. Set this value to true + // to set the write status to read-only, otherwise set to false. + // + // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the - // requester pays the costs. Otherwise the S3 bucket owner pays. However, the + // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. + // + // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the AWS Identity and Access Management (IAM) role that a file @@ -8859,13 +9614,15 @@ type CreateNFSFileShareInput struct { // Role is a required field Role *string `min:"20" type:"string" required:"true"` - // A value that maps a user to anonymous user. Valid options are the following: + // A value that maps a user to anonymous user. + // + // Valid values are the following: // - // * RootSquash - Only root is mapped to anonymous user. + // * RootSquash: Only root is mapped to anonymous user. // - // * NoSquash - No one is mapped to anonymous user + // * NoSquash: No one is mapped to anonymous user. // - // * AllSquash - Everyone is mapped to anonymous user. + // * AllSquash: Everyone is mapped to anonymous user. Squash *string `min:"5" type:"string"` // A list of up to 50 tags that can be assigned to the NFS file share. Each @@ -8903,6 +9660,9 @@ func (s *CreateNFSFileShareInput) Validate() error { if s.DefaultStorageClass != nil && len(*s.DefaultStorageClass) < 5 { invalidParams.Add(request.NewErrParamMinLen("DefaultStorageClass", 5)) } + if s.FileShareName != nil && len(*s.FileShareName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileShareName", 1)) + } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } @@ -8949,6 +9709,12 @@ func (s *CreateNFSFileShareInput) Validate() error { return nil } +// SetCacheAttributes sets the CacheAttributes field's value. +func (s *CreateNFSFileShareInput) SetCacheAttributes(v *CacheAttributes) *CreateNFSFileShareInput { + s.CacheAttributes = v + return s +} + // SetClientList sets the ClientList field's value. func (s *CreateNFSFileShareInput) SetClientList(v []*string) *CreateNFSFileShareInput { s.ClientList = v @@ -8967,6 +9733,12 @@ func (s *CreateNFSFileShareInput) SetDefaultStorageClass(v string) *CreateNFSFil return s } +// SetFileShareName sets the FileShareName field's value. +func (s *CreateNFSFileShareInput) SetFileShareName(v string) *CreateNFSFileShareInput { + s.FileShareName = &v + return s +} + // SetGatewayARN sets the GatewayARN field's value. func (s *CreateNFSFileShareInput) SetGatewayARN(v string) *CreateNFSFileShareInput { s.GatewayARN = &v @@ -9067,19 +9839,32 @@ func (s *CreateNFSFileShareOutput) SetFileShareARN(v string) *CreateNFSFileShare type CreateSMBFileShareInput struct { _ struct{} `type:"structure"` - // A list of users in the Active Directory that will be granted administrator + // A list of users or groups in the Active Directory that will be granted administrator // privileges on the file share. These users can do all file operations as the - // super-user. + // super-user. Acceptable formats include: DOMAIN\User1, user1, @group1, and + // @DOMAIN\group1. // // Use this option very carefully, because any user in this list can do anything // they like on the file share, regardless of file permissions. AdminUserList []*string `type:"list"` - // The authentication method that users use to access the file share. + // The Amazon Resource Name (ARN) of the storage used for the audit logs. + AuditDestinationARN *string `type:"string"` + + // The authentication method that users use to access the file share. The default + // is ActiveDirectory. // - // Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory. + // Valid Values: ActiveDirectory | GuestAccess Authentication *string `min:"5" type:"string"` + // Refresh cache information. + CacheAttributes *CacheAttributes `type:"structure"` + + // The case of an object name in an Amazon S3 bucket. For ClientSpecified, the + // client determines the case sensitivity. For CaseSensitive, the gateway determines + // the case sensitivity. The default value is ClientSpecified. + CaseSensitivity *string `type:"string" enum:"CaseSensitivity"` + // A unique string value that you supply that is used by file gateway to ensure // idempotent file share creation. // @@ -9087,55 +9872,72 @@ type CreateSMBFileShareInput struct { ClientToken *string `min:"5" type:"string" required:"true"` // The default storage class for objects put into an Amazon S3 bucket by the - // file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. - // If this field is not populated, the default value S3_STANDARD is used. Optional. + // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. + // + // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` - // The Amazon Resource Name (ARN) of the file gateway on which you want to create - // a file share. + // The name of the file share. Optional. + // + // FileShareName must be set if an S3 prefix name is set in LocationARN. + FileShareName *string `min:"1" type:"string"` + + // The ARN of the file gateway on which you want to create a file share. // // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, - // and otherwise to false. The default value is true. + // otherwise set to false. The default value is true. + // + // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are not allowed to - // access the file share. A group must be prefixed with the @ character. For - // example @group1. Can only be set if Authentication is set to ActiveDirectory. + // access the file share. A group must be prefixed with the @ character. Acceptable + // formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. Can only + // be set if Authentication is set to ActiveDirectory. InvalidUserList []*string `type:"list"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` - // The ARN of the backed storage used for storing file data. + // The ARN of the backend storage used for storing file data. A prefix name + // can be added to the S3 bucket name. It must end with a "/". // // LocationARN is a required field LocationARN *string `min:"16" type:"string" required:"true"` - // A value that sets the access control list permission for objects in the S3 - // bucket that a file gateway puts objects into. The default value is "private". + // A value that sets the access control list (ACL) permission for objects in + // the S3 bucket that a file gateway puts objects into. The default value is + // private. ObjectACL *string `type:"string" enum:"ObjectACL"` - // A value that sets the write status of a file share. This value is true if - // the write status is read-only, and otherwise false. + // A value that sets the write status of a file share. Set this value to true + // to set the write status to read-only, otherwise set to false. + // + // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the - // requester pays the costs. Otherwise the S3 bucket owner pays. However, the + // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. + // + // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the AWS Identity and Access Management (IAM) role that a file @@ -9144,12 +9946,15 @@ type CreateSMBFileShareInput struct { // Role is a required field Role *string `min:"20" type:"string" required:"true"` - // Set this value to "true to enable ACL (access control list) on the SMB file - // share. Set it to "false" to map file and directory permissions to the POSIX + // Set this value to true to enable access control list (ACL) on the SMB file + // share. Set it to false to map file and directory permissions to the POSIX // permissions. // - // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html - // in the Storage Gateway User Guide. + // For more information, see Using Microsoft Windows ACLs to control access + // to an SMB file share (https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html) + // in the AWS Storage Gateway User Guide. + // + // Valid Values: true | false SMBACLEnabled *bool `type:"boolean"` // A list of up to 50 tags that can be assigned to the NFS file share. Each @@ -9162,8 +9967,9 @@ type CreateSMBFileShareInput struct { Tags []*Tag `type:"list"` // A list of users or groups in the Active Directory that are allowed to access - // the file share. A group must be prefixed with the @ character. For example - // @group1. Can only be set if Authentication is set to ActiveDirectory. + // the file share. A group must be prefixed with the @ character. Acceptable + // formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. Can only + // be set if Authentication is set to ActiveDirectory. ValidUserList []*string `type:"list"` } @@ -9192,6 +9998,9 @@ func (s *CreateSMBFileShareInput) Validate() error { if s.DefaultStorageClass != nil && len(*s.DefaultStorageClass) < 5 { invalidParams.Add(request.NewErrParamMinLen("DefaultStorageClass", 5)) } + if s.FileShareName != nil && len(*s.FileShareName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileShareName", 1)) + } if s.GatewayARN == nil { invalidParams.Add(request.NewErrParamRequired("GatewayARN")) } @@ -9236,12 +10045,30 @@ func (s *CreateSMBFileShareInput) SetAdminUserList(v []*string) *CreateSMBFileSh return s } +// SetAuditDestinationARN sets the AuditDestinationARN field's value. +func (s *CreateSMBFileShareInput) SetAuditDestinationARN(v string) *CreateSMBFileShareInput { + s.AuditDestinationARN = &v + return s +} + // SetAuthentication sets the Authentication field's value. func (s *CreateSMBFileShareInput) SetAuthentication(v string) *CreateSMBFileShareInput { s.Authentication = &v return s } +// SetCacheAttributes sets the CacheAttributes field's value. +func (s *CreateSMBFileShareInput) SetCacheAttributes(v *CacheAttributes) *CreateSMBFileShareInput { + s.CacheAttributes = v + return s +} + +// SetCaseSensitivity sets the CaseSensitivity field's value. +func (s *CreateSMBFileShareInput) SetCaseSensitivity(v string) *CreateSMBFileShareInput { + s.CaseSensitivity = &v + return s +} + // SetClientToken sets the ClientToken field's value. func (s *CreateSMBFileShareInput) SetClientToken(v string) *CreateSMBFileShareInput { s.ClientToken = &v @@ -9254,6 +10081,12 @@ func (s *CreateSMBFileShareInput) SetDefaultStorageClass(v string) *CreateSMBFil return s } +// SetFileShareName sets the FileShareName field's value. +func (s *CreateSMBFileShareInput) SetFileShareName(v string) *CreateSMBFileShareInput { + s.FileShareName = &v + return s +} + // SetGatewayARN sets the GatewayARN field's value. func (s *CreateSMBFileShareInput) SetGatewayARN(v string) *CreateSMBFileShareInput { s.GatewayARN = &v @@ -9361,7 +10194,7 @@ type CreateSnapshotFromVolumeRecoveryPointInput struct { // Textual description of the snapshot that appears in the Amazon EC2 console, // Elastic Block Store snapshots panel in the Description field, and in the - // AWS Storage Gateway snapshot Details pane, Description field + // AWS Storage Gateway snapshot Details pane, Description field. // // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` @@ -9494,7 +10327,7 @@ type CreateSnapshotInput struct { // Textual description of the snapshot that appears in the Amazon EC2 console, // Elastic Block Store snapshots panel in the Description field, and in the - // AWS Storage Gateway snapshot Details pane, Description field + // AWS Storage Gateway snapshot Details pane, Description field. // // SnapshotDescription is a required field SnapshotDescription *string `min:"1" type:"string" required:"true"` @@ -9637,12 +10470,15 @@ type CreateStorediSCSIVolumeInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the KMS key used for Amazon S3 server side - // encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The network interface of the gateway on which to expose the iSCSI target. @@ -9654,17 +10490,17 @@ type CreateStorediSCSIVolumeInput struct { // NetworkInterfaceId is a required field NetworkInterfaceId *string `type:"string" required:"true"` - // Specify this field as true if you want to preserve the data on the local - // disk. Otherwise, specifying this field as false creates an empty volume. + // Set to true true if you want to preserve the data on the local disk. Otherwise, + // set to false to create an empty volume. // - // Valid Values: true, false + // Valid Values: true | false // // PreserveExistingData is a required field PreserveExistingData *bool `type:"boolean" required:"true"` // The snapshot ID (e.g. "snap-1122aabb") of the snapshot to restore as the // new stored volume. Specify this field if you want to create the iSCSI storage - // volume from a snapshot otherwise do not include this field. To list snapshots + // volume from a snapshot; otherwise, do not include this field. To list snapshots // for your account use DescribeSnapshots (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeSnapshots.html) // in the Amazon Elastic Compute Cloud API Reference. SnapshotId *string `type:"string"` @@ -9844,6 +10680,137 @@ func (s *CreateStorediSCSIVolumeOutput) SetVolumeSizeInBytes(v int64) *CreateSto return s } +type CreateTapePoolInput struct { + _ struct{} `type:"structure"` + + // The name of the new custom tape pool. + // + // PoolName is a required field + PoolName *string `min:"1" type:"string" required:"true"` + + // Tape retention lock time is set in days. Tape retention lock can be enabled + // for up to 100 years (36,500 days). + RetentionLockTimeInDays *int64 `type:"integer"` + + // Tape retention lock can be configured in two modes. When configured in governance + // mode, AWS accounts with specific IAM permissions are authorized to remove + // the tape retention lock from archived virtual tapes. When configured in compliance + // mode, the tape retention lock cannot be removed by any user, including the + // root AWS account. + RetentionLockType *string `type:"string" enum:"RetentionLockType"` + + // The storage class that is associated with the new custom pool. When you use + // your backup application to eject the tape, the tape is archived directly + // into the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds + // to the pool. + // + // StorageClass is a required field + StorageClass *string `type:"string" required:"true" enum:"TapeStorageClass"` + + // A list of up to 50 tags that can be assigned to tape pool. Each tag is a + // key-value pair. + // + // Valid characters for key and value are letters, spaces, and numbers representable + // in UTF-8 format, and the following special characters: + - = . _ : / @. The + // maximum length of a tag's key is 128 characters, and the maximum length for + // a tag's value is 256. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateTapePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTapePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTapePoolInput"} + if s.PoolName == nil { + invalidParams.Add(request.NewErrParamRequired("PoolName")) + } + if s.PoolName != nil && len(*s.PoolName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PoolName", 1)) + } + if s.StorageClass == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClass")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPoolName sets the PoolName field's value. +func (s *CreateTapePoolInput) SetPoolName(v string) *CreateTapePoolInput { + s.PoolName = &v + return s +} + +// SetRetentionLockTimeInDays sets the RetentionLockTimeInDays field's value. +func (s *CreateTapePoolInput) SetRetentionLockTimeInDays(v int64) *CreateTapePoolInput { + s.RetentionLockTimeInDays = &v + return s +} + +// SetRetentionLockType sets the RetentionLockType field's value. +func (s *CreateTapePoolInput) SetRetentionLockType(v string) *CreateTapePoolInput { + s.RetentionLockType = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateTapePoolInput) SetStorageClass(v string) *CreateTapePoolInput { + s.StorageClass = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTapePoolInput) SetTags(v []*Tag) *CreateTapePoolInput { + s.Tags = v + return s +} + +type CreateTapePoolOutput struct { + _ struct{} `type:"structure"` + + // The unique Amazon Resource Name (ARN) that represents the custom tape pool. + // Use the ListTapePools operation to return a list of tape pools for your account + // and AWS Region. + PoolARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s CreateTapePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapePoolOutput) GoString() string { + return s.String() +} + +// SetPoolARN sets the PoolARN field's value. +func (s *CreateTapePoolOutput) SetPoolARN(v string) *CreateTapePoolOutput { + s.PoolARN = &v + return s +} + // CreateTapeWithBarcodeInput type CreateTapeWithBarcodeInput struct { _ struct{} `type:"structure"` @@ -9855,21 +10822,24 @@ type CreateTapeWithBarcodeInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS Key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Deep Archive) + // that corresponds to the pool. // - // Valid values: "GLACIER", "DEEP_ARCHIVE" + // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // A list of up to 50 tags that can be assigned to a virtual tape that has a @@ -9891,10 +10861,14 @@ type CreateTapeWithBarcodeInput struct { // The size, in bytes, of the virtual tape that you want to create. // - // The size must be aligned by gigabyte (1024*1024*1024 byte). + // The size must be aligned by gigabyte (1024*1024*1024 bytes). // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` + + // Set to TRUE if the tape you are creating is to be configured as a write-once-read-many + // (WORM) tape. + Worm *bool `type:"boolean"` } // String returns the string representation @@ -9990,6 +10964,12 @@ func (s *CreateTapeWithBarcodeInput) SetTapeSizeInBytes(v int64) *CreateTapeWith return s } +// SetWorm sets the Worm field's value. +func (s *CreateTapeWithBarcodeInput) SetWorm(v bool) *CreateTapeWithBarcodeInput { + s.Worm = &v + return s +} + // CreateTapeOutput type CreateTapeWithBarcodeOutput struct { _ struct{} `type:"structure"` @@ -10034,12 +11014,15 @@ type CreateTapesInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The number of virtual tapes that you want to create. @@ -10050,10 +11033,10 @@ type CreateTapesInput struct { // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // - // Valid values: "GLACIER", "DEEP_ARCHIVE" + // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // A list of up to 50 tags that can be assigned to a virtual tape. Each tag @@ -10076,10 +11059,14 @@ type CreateTapesInput struct { // The size, in bytes, of the virtual tapes that you want to create. // - // The size must be aligned by gigabyte (1024*1024*1024 byte). + // The size must be aligned by gigabyte (1024*1024*1024 bytes). // // TapeSizeInBytes is a required field TapeSizeInBytes *int64 `type:"long" required:"true"` + + // Set to TRUE if the tape you are creating is to be configured as a write-once-read-many + // (WORM) tape. + Worm *bool `type:"boolean"` } // String returns the string representation @@ -10199,28 +11186,100 @@ func (s *CreateTapesInput) SetTapeSizeInBytes(v int64) *CreateTapesInput { return s } -// CreateTapeOutput -type CreateTapesOutput struct { +// SetWorm sets the Worm field's value. +func (s *CreateTapesInput) SetWorm(v bool) *CreateTapesInput { + s.Worm = &v + return s +} + +// CreateTapeOutput +type CreateTapesOutput struct { + _ struct{} `type:"structure"` + + // A list of unique Amazon Resource Names (ARNs) that represents the virtual + // tapes that were created. + TapeARNs []*string `type:"list"` +} + +// String returns the string representation +func (s CreateTapesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTapesOutput) GoString() string { + return s.String() +} + +// SetTapeARNs sets the TapeARNs field's value. +func (s *CreateTapesOutput) SetTapeARNs(v []*string) *CreateTapesOutput { + s.TapeARNs = v + return s +} + +type DeleteAutomaticTapeCreationPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAutomaticTapeCreationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAutomaticTapeCreationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAutomaticTapeCreationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAutomaticTapeCreationPolicyInput"} + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *DeleteAutomaticTapeCreationPolicyInput) SetGatewayARN(v string) *DeleteAutomaticTapeCreationPolicyInput { + s.GatewayARN = &v + return s +} + +type DeleteAutomaticTapeCreationPolicyOutput struct { _ struct{} `type:"structure"` - // A list of unique Amazon Resource Names (ARNs) that represents the virtual - // tapes that were created. - TapeARNs []*string `type:"list"` + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` } // String returns the string representation -func (s CreateTapesOutput) String() string { +func (s DeleteAutomaticTapeCreationPolicyOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTapesOutput) GoString() string { +func (s DeleteAutomaticTapeCreationPolicyOutput) GoString() string { return s.String() } -// SetTapeARNs sets the TapeARNs field's value. -func (s *CreateTapesOutput) SetTapeARNs(v []*string) *CreateTapesOutput { - s.TapeARNs = v +// SetGatewayARN sets the GatewayARN field's value. +func (s *DeleteAutomaticTapeCreationPolicyOutput) SetGatewayARN(v string) *DeleteAutomaticTapeCreationPolicyOutput { + s.GatewayARN = &v return s } @@ -10233,7 +11292,7 @@ type DeleteBandwidthRateLimitInput struct { // One of the BandwidthType values that indicates the gateway bandwidth rate // limit to delete. // - // Valid Values: Upload, Download, All. + // Valid Values: UPLOAD | DOWNLOAD | ALL // // BandwidthType is a required field BandwidthType *string `min:"3" type:"string" required:"true"` @@ -10289,8 +11348,8 @@ func (s *DeleteBandwidthRateLimitInput) SetGatewayARN(v string) *DeleteBandwidth return s } -// A JSON object containing the of the gateway whose bandwidth rate information -// was deleted. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway whose +// bandwidth rate information was deleted. type DeleteBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` @@ -10425,6 +11484,8 @@ type DeleteFileShareInput struct { // and aborts all data uploads to AWS. Otherwise, the file share is not deleted // until all data is uploaded to AWS. This process aborts the data upload process, // and the file share enters the FORCE_DELETING status. + // + // Valid Values: true | false ForceDelete *bool `type:"boolean"` } @@ -10626,6 +11687,12 @@ func (s *DeleteSnapshotScheduleOutput) SetVolumeARN(v string) *DeleteSnapshotSch type DeleteTapeArchiveInput struct { _ struct{} `type:"structure"` + // Set to TRUE to delete an archived tape that belongs to a custom pool with + // tape retention lock. Only archived tapes with tape retention lock set to + // governance can be deleted. Archived tapes with tape retention lock set to + // compliance can't be deleted. + BypassGovernanceRetention *bool `type:"boolean"` + // The Amazon Resource Name (ARN) of the virtual tape to delete from the virtual // tape shelf (VTS). // @@ -10659,6 +11726,12 @@ func (s *DeleteTapeArchiveInput) Validate() error { return nil } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteTapeArchiveInput) SetBypassGovernanceRetention(v bool) *DeleteTapeArchiveInput { + s.BypassGovernanceRetention = &v + return s +} + // SetTapeARN sets the TapeARN field's value. func (s *DeleteTapeArchiveInput) SetTapeARN(v string) *DeleteTapeArchiveInput { s.TapeARN = &v @@ -10694,6 +11767,12 @@ func (s *DeleteTapeArchiveOutput) SetTapeARN(v string) *DeleteTapeArchiveOutput type DeleteTapeInput struct { _ struct{} `type:"structure"` + // Set to TRUE to delete an archived tape that belongs to a custom pool with + // tape retention lock. Only archived tapes with tape retention lock set to + // governance can be deleted. Archived tapes with tape retention lock set to + // compliance can't be deleted. + BypassGovernanceRetention *bool `type:"boolean"` + // The unique Amazon Resource Name (ARN) of the gateway that the virtual tape // to delete is associated with. Use the ListGateways operation to return a // list of gateways for your account and AWS Region. @@ -10739,6 +11818,12 @@ func (s *DeleteTapeInput) Validate() error { return nil } +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteTapeInput) SetBypassGovernanceRetention(v bool) *DeleteTapeInput { + s.BypassGovernanceRetention = &v + return s +} + // SetGatewayARN sets the GatewayARN field's value. func (s *DeleteTapeInput) SetGatewayARN(v string) *DeleteTapeInput { s.GatewayARN = &v @@ -10775,6 +11860,70 @@ func (s *DeleteTapeOutput) SetTapeARN(v string) *DeleteTapeOutput { return s } +type DeleteTapePoolInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the custom tape pool to delete. + // + // PoolARN is a required field + PoolARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTapePoolInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapePoolInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTapePoolInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTapePoolInput"} + if s.PoolARN == nil { + invalidParams.Add(request.NewErrParamRequired("PoolARN")) + } + if s.PoolARN != nil && len(*s.PoolARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("PoolARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPoolARN sets the PoolARN field's value. +func (s *DeleteTapePoolInput) SetPoolARN(v string) *DeleteTapePoolInput { + s.PoolARN = &v + return s +} + +type DeleteTapePoolOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the custom tape pool being deleted. + PoolARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s DeleteTapePoolOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTapePoolOutput) GoString() string { + return s.String() +} + +// SetPoolARN sets the PoolARN field's value. +func (s *DeleteTapePoolOutput) SetPoolARN(v string) *DeleteTapePoolOutput { + s.PoolARN = &v + return s +} + // A JSON object containing the DeleteVolumeInput$VolumeARN to delete. type DeleteVolumeInput struct { _ struct{} `type:"structure"` @@ -10818,7 +11967,8 @@ func (s *DeleteVolumeInput) SetVolumeARN(v string) *DeleteVolumeInput { return s } -// A JSON object containing the of the storage volume that was deleted +// A JSON object containing the Amazon Resource Name (ARN) of the storage volume +// that was deleted. type DeleteVolumeOutput struct { _ struct{} `type:"structure"` @@ -10929,7 +12079,7 @@ func (s *DescribeAvailabilityMonitorTestOutput) SetStatus(v string) *DescribeAva return s } -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeBandwidthRateLimitInput struct { _ struct{} `type:"structure"` @@ -11062,7 +12212,7 @@ func (s *DescribeCacheInput) SetGatewayARN(v string) *DescribeCacheInput { type DescribeCacheOutput struct { _ struct{} `type:"structure"` - // The amount of cache in bytes allocated to the a gateway. + // The amount of cache in bytes allocated to a gateway. CacheAllocatedInBytes *int64 `type:"long"` // The file share's contribution to the overall percentage of the gateway's @@ -11084,7 +12234,7 @@ type DescribeCacheOutput struct { CacheUsedPercentage *float64 `type:"double"` // An array of strings that identify disks that are to be configured as working - // storage. Each string have a minimum length of 1 and maximum length of 300. + // storage. Each string has a minimum length of 1 and maximum length of 300. // You can get the disk IDs from the ListLocalDisks API. DiskIds []*string `type:"list"` @@ -11149,8 +12299,8 @@ type DescribeCachediSCSIVolumesInput struct { _ struct{} `type:"structure"` // An array of strings where each string represents the Amazon Resource Name - // (ARN) of a cached volume. All of the specified cached volumes must from the - // same gateway. Use ListVolumes to get volume ARNs for a gateway. + // (ARN) of a cached volume. All of the specified cached volumes must be from + // the same gateway. Use ListVolumes to get volume ARNs for a gateway. // // VolumeARNs is a required field VolumeARNs []*string `type:"list" required:"true"` @@ -11254,7 +12404,7 @@ func (s *DescribeChapCredentialsInput) SetTargetARN(v string) *DescribeChapCrede return s } -// A JSON object containing a . +// A JSON object containing the following fields: type DescribeChapCredentialsOutput struct { _ struct{} `type:"structure"` @@ -11339,16 +12489,25 @@ func (s *DescribeGatewayInformationInput) SetGatewayARN(v string) *DescribeGatew type DescribeGatewayInformationOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon CloudWatch Log Group that is + // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that is // used to monitor events in the gateway. CloudWatchLogGroupARN *string `type:"string"` + // Date after which this gateway will not receive software updates for new features + // and bug fixes. + DeprecationDate *string `min:"1" type:"string"` + // The ID of the Amazon EC2 instance that was used to launch the gateway. Ec2InstanceId *string `type:"string"` // The AWS Region where the Amazon EC2 instance is located. Ec2InstanceRegion *string `type:"string"` + // The type of endpoint for your gateway. + // + // Valid Values: STANDARD | FIPS + EndpointType *string `min:"4" type:"string"` + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` @@ -11387,6 +12546,9 @@ type DescribeGatewayInformationOutput struct { // this field is not returned in the response. NextUpdateAvailabilityDate *string `min:"1" type:"string"` + // Date after which this gateway will not receive software updates for new features. + SoftwareUpdatesEndDate *string `min:"1" type:"string"` + // A list of up to 50 tags assigned to the gateway, sorted alphabetically by // key name. Each tag is a key-value pair. For a gateway with more than 10 tags // assigned, you can view all tags using the ListTagsForResource API operation. @@ -11413,6 +12575,12 @@ func (s *DescribeGatewayInformationOutput) SetCloudWatchLogGroupARN(v string) *D return s } +// SetDeprecationDate sets the DeprecationDate field's value. +func (s *DescribeGatewayInformationOutput) SetDeprecationDate(v string) *DescribeGatewayInformationOutput { + s.DeprecationDate = &v + return s +} + // SetEc2InstanceId sets the Ec2InstanceId field's value. func (s *DescribeGatewayInformationOutput) SetEc2InstanceId(v string) *DescribeGatewayInformationOutput { s.Ec2InstanceId = &v @@ -11425,6 +12593,12 @@ func (s *DescribeGatewayInformationOutput) SetEc2InstanceRegion(v string) *Descr return s } +// SetEndpointType sets the EndpointType field's value. +func (s *DescribeGatewayInformationOutput) SetEndpointType(v string) *DescribeGatewayInformationOutput { + s.EndpointType = &v + return s +} + // SetGatewayARN sets the GatewayARN field's value. func (s *DescribeGatewayInformationOutput) SetGatewayARN(v string) *DescribeGatewayInformationOutput { s.GatewayARN = &v @@ -11485,6 +12659,12 @@ func (s *DescribeGatewayInformationOutput) SetNextUpdateAvailabilityDate(v strin return s } +// SetSoftwareUpdatesEndDate sets the SoftwareUpdatesEndDate field's value. +func (s *DescribeGatewayInformationOutput) SetSoftwareUpdatesEndDate(v string) *DescribeGatewayInformationOutput { + s.SoftwareUpdatesEndDate = &v + return s +} + // SetTags sets the Tags field's value. func (s *DescribeGatewayInformationOutput) SetTags(v []*Tag) *DescribeGatewayInformationOutput { s.Tags = v @@ -11497,7 +12677,7 @@ func (s *DescribeGatewayInformationOutput) SetVPCEndpoint(v string) *DescribeGat return s } -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeMaintenanceStartTimeInput struct { _ struct{} `type:"structure"` @@ -11557,8 +12737,6 @@ type DescribeMaintenanceStartTimeOutput struct { // The day of the month component of the maintenance start time represented // as an ordinal number from 1 to 28, where 1 represents the first day of the // month and 28 represents the last day of the month. - // - // This value is only available for tape and volume gateways. DayOfMonth *int64 `min:"1" type:"integer"` // An ordinal number between 0 and 6 that represents the day of the week, where @@ -11839,24 +13017,27 @@ type DescribeSMBSettingsOutput struct { // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` - // This value is true if a password for the guest user “smbguest” is set, - // and otherwise false. + // This value is true if a password for the guest user smbguest is set, otherwise + // false. + // + // Valid Values: true | false SMBGuestPasswordSet *bool `type:"boolean"` // The type of security strategy that was specified for file gateway. // - // ClientSpecified: if you use this option, requests are established based on - // what is negotiated by the client. This option is recommended when you want - // to maximize compatibility across different clients in your environment. + // * ClientSpecified: If you use this option, requests are established based + // on what is negotiated by the client. This option is recommended when you + // want to maximize compatibility across different clients in your environment. // - // MandatorySigning: if you use this option, file gateway only allows connections - // from SMBv2 or SMBv3 clients that have signing enabled. This option works - // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. + // * MandatorySigning: If you use this option, file gateway only allows connections + // from SMBv2 or SMBv3 clients that have signing enabled. This option works + // with SMB clients on Microsoft Windows Vista, Windows Server 2008 or newer. // - // MandatoryEncryption: if you use this option, file gateway only allows connections - // from SMBv3 clients that have encryption enabled. This option is highly recommended - // for environments that handle sensitive data. This option works with SMB clients - // on Microsoft Windows 8, Windows Server 2012 or newer. + // * MandatoryEncryption: If you use this option, file gateway only allows + // connections from SMBv3 clients that have encryption enabled. This option + // is highly recommended for environments that handle sensitive data. This + // option works with SMB clients on Microsoft Windows 8, Windows Server 2012 + // or newer. SMBSecurityStrategy *string `type:"string" enum:"SMBSecurityStrategy"` } @@ -12021,8 +13202,8 @@ type DescribeStorediSCSIVolumesInput struct { _ struct{} `type:"structure"` // An array of strings where each string represents the Amazon Resource Name - // (ARN) of a stored volume. All of the specified stored volumes must from the - // same gateway. Use ListVolumes to get volume ARNs for a gateway. + // (ARN) of a stored volume. All of the specified stored volumes must be from + // the same gateway. Use ListVolumes to get volume ARNs for a gateway. // // VolumeARNs is a required field VolumeARNs []*string `type:"list" required:"true"` @@ -12073,7 +13254,7 @@ type DescribeStorediSCSIVolumesOutput struct { // // * NetworkInterfacePort: The port used to communicate with iSCSI targets. // - // * PreservedExistingData: Indicates if when the stored volume was created, + // * PreservedExistingData: Indicates when the stored volume was created, // existing data on the underlying local disk was preserved. // // * SourceSnapshotId: If the stored volume was created from a snapshot, @@ -12106,7 +13287,7 @@ type DescribeStorediSCSIVolumesOutput struct { // of the volume. // // * VolumeType: One of the enumeration values describing the type of the - // volume. Currently, on STORED volumes are supported. + // volume. Currently, only STORED volumes are supported. StorediSCSIVolumes []*StorediSCSIVolume `type:"list"` } @@ -12130,7 +13311,7 @@ func (s *DescribeStorediSCSIVolumesOutput) SetStorediSCSIVolumes(v []*StorediSCS type DescribeTapeArchivesInput struct { _ struct{} `type:"structure"` - // Specifies that the number of virtual tapes descried be limited to the specified + // Specifies that the number of virtual tapes described be limited to the specified // number. Limit *int64 `min:"1" type:"integer"` @@ -12201,7 +13382,7 @@ type DescribeTapeArchivesOutput struct { // An array of virtual tape objects in the virtual tape shelf (VTS). The description // includes of the Amazon Resource Name (ARN) of the virtual tapes. The information // returned includes the Amazon Resource Names (ARNs) of the tapes, size of - // the tapes, status of the tapes, progress of the description and tape barcode. + // the tapes, status of the tapes, progress of the description, and tape barcode. TapeArchives []*TapeArchive `type:"list"` } @@ -12659,7 +13840,7 @@ type DescribeVTLDevicesOutput struct { // to describe, this field does not appear in the response. Marker *string `min:"1" type:"string"` - // An array of VTL device objects composed of the Amazon Resource Name(ARN) + // An array of VTL device objects composed of the Amazon Resource Name (ARN) // of the VTL devices. VTLDevices []*VTLDevice `type:"list"` } @@ -12692,7 +13873,7 @@ func (s *DescribeVTLDevicesOutput) SetVTLDevices(v []*VTLDevice) *DescribeVTLDev return s } -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type DescribeWorkingStorageInput struct { _ struct{} `type:"structure"` @@ -12799,6 +13980,8 @@ type DetachVolumeInput struct { // Set to true to forcibly remove the iSCSI connection of the target volume // and detach the volume. The default is false. If this value is set to false, // you must manually disconnect the iSCSI connection from the target volume. + // + // Valid Values: true | false ForceDetach *bool `type:"boolean"` // The Amazon Resource Name (ARN) of the volume to detach from the gateway. @@ -12998,7 +14181,9 @@ type Disk struct { DiskAllocationResource *string `type:"string"` // One of the DiskAllocationType enumeration values that identifies how a local - // disk is used. Valid values: UPLOAD_BUFFER, CACHE_STORAGE + // disk is used. + // + // Valid Values: UPLOAD_BUFFER | CACHE_STORAGE DiskAllocationType *string `min:"3" type:"string"` // A list of values that represents attributes of a local disk. @@ -13079,9 +14264,9 @@ func (s *Disk) SetDiskStatus(v string) *Disk { return s } -// Provides additional information about an error that was returned by the service -// as an or. See the errorCode and errorDetails members for more information -// about the error. +// Provides additional information about an error that was returned by the service. +// See the errorCode and errorDetails members for more information about the +// error. type Error struct { _ struct{} `type:"structure"` @@ -13124,8 +14309,9 @@ type FileShareInfo struct { // The ID of the file share. FileShareId *string `min:"12" type:"string"` - // The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE - // and DELETING. + // The status of the file share. + // + // Valid Values: CREATING | UPDATING | AVAILABLE | DELETING FileShareStatus *string `min:"3" type:"string"` // The type of the file share. @@ -13200,7 +14386,7 @@ type GatewayInfo struct { // The state of the gateway. // - // Valid Values: DISABLED or ACTIVE + // Valid Values: DISABLED | ACTIVE GatewayOperationalState *string `min:"2" type:"string"` // The type of the gateway. @@ -13262,8 +14448,8 @@ func (s *GatewayInfo) SetGatewayType(v string) *GatewayInfo { // An internal server error has occurred during the request. For more information, // see the error and message fields. type InternalServerError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A StorageGatewayError that provides more information about the cause of the // error. @@ -13285,17 +14471,17 @@ func (s InternalServerError) GoString() string { func newErrorInternalServerError(v protocol.ResponseMetadata) error { return &InternalServerError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerError) Code() string { +func (s *InternalServerError) Code() string { return "InternalServerError" } // Message returns the exception's message. -func (s InternalServerError) Message() string { +func (s *InternalServerError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13303,29 +14489,29 @@ func (s InternalServerError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerError) OrigErr() error { +func (s *InternalServerError) OrigErr() error { return nil } -func (s InternalServerError) Error() string { +func (s *InternalServerError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerError) RequestID() string { + return s.RespMetadata.RequestID } // An exception occurred because an invalid gateway request was issued to the // service. For more information, see the error and message fields. type InvalidGatewayRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A StorageGatewayError that provides more detail about the cause of the error. Error_ *Error `locationName:"error" type:"structure"` @@ -13346,17 +14532,17 @@ func (s InvalidGatewayRequestException) GoString() string { func newErrorInvalidGatewayRequestException(v protocol.ResponseMetadata) error { return &InvalidGatewayRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidGatewayRequestException) Code() string { +func (s *InvalidGatewayRequestException) Code() string { return "InvalidGatewayRequestException" } // Message returns the exception's message. -func (s InvalidGatewayRequestException) Message() string { +func (s *InvalidGatewayRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -13364,22 +14550,22 @@ func (s InvalidGatewayRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidGatewayRequestException) OrigErr() error { +func (s *InvalidGatewayRequestException) OrigErr() error { return nil } -func (s InvalidGatewayRequestException) Error() string { +func (s *InvalidGatewayRequestException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidGatewayRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidGatewayRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidGatewayRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidGatewayRequestException) RequestID() string { + return s.RespMetadata.RequestID } // JoinDomainInput @@ -13566,11 +14752,73 @@ func (s *JoinDomainOutput) SetGatewayARN(v string) *JoinDomainOutput { return s } +type ListAutomaticTapeCreationPoliciesInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s ListAutomaticTapeCreationPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAutomaticTapeCreationPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAutomaticTapeCreationPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAutomaticTapeCreationPoliciesInput"} + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *ListAutomaticTapeCreationPoliciesInput) SetGatewayARN(v string) *ListAutomaticTapeCreationPoliciesInput { + s.GatewayARN = &v + return s +} + +type ListAutomaticTapeCreationPoliciesOutput struct { + _ struct{} `type:"structure"` + + // Gets a listing of information about the gateway's automatic tape creation + // policies, including the automatic tape creation rules and the gateway that + // is using the policies. + AutomaticTapeCreationPolicyInfos []*AutomaticTapeCreationPolicyInfo `type:"list"` +} + +// String returns the string representation +func (s ListAutomaticTapeCreationPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAutomaticTapeCreationPoliciesOutput) GoString() string { + return s.String() +} + +// SetAutomaticTapeCreationPolicyInfos sets the AutomaticTapeCreationPolicyInfos field's value. +func (s *ListAutomaticTapeCreationPoliciesOutput) SetAutomaticTapeCreationPolicyInfos(v []*AutomaticTapeCreationPolicyInfo) *ListAutomaticTapeCreationPoliciesOutput { + s.AutomaticTapeCreationPolicyInfos = v + return s +} + // ListFileShareInput type ListFileSharesInput struct { _ struct{} `type:"structure"` - // The Amazon resource Name (ARN) of the gateway whose file shares you want + // The Amazon Resource Name (ARN) of the gateway whose file shares you want // to list. If this field is not present, all file shares under your account // are listed. GatewayARN *string `min:"50" type:"string"` @@ -13765,7 +15013,7 @@ func (s *ListGatewaysOutput) SetMarker(v string) *ListGatewaysOutput { return s } -// A JSON object containing the of the gateway. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway. type ListLocalDisksInput struct { _ struct{} `type:"structure"` @@ -13920,7 +15168,7 @@ type ListTagsForResourceOutput struct { // list of tags. Marker *string `min:"1" type:"string"` - // he Amazon Resource Name (ARN) of the resource for which you want to list + // The Amazon Resource Name (ARN) of the resource for which you want to list // tags. ResourceARN *string `min:"50" type:"string"` @@ -13956,6 +15204,103 @@ func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput return s } +type ListTapePoolsInput struct { + _ struct{} `type:"structure"` + + // An optional number limit for the tape pools in the list returned by this + // call. + Limit *int64 `min:"1" type:"integer"` + + // A string that indicates the position at which to begin the returned list + // of tape pools. + Marker *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of each of the custom tape pools you want + // to list. If you don't specify a custom tape pool ARN, the response lists + // all custom tape pools. + PoolARNs []*string `type:"list"` +} + +// String returns the string representation +func (s ListTapePoolsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTapePoolsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTapePoolsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTapePoolsInput"} + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLimit sets the Limit field's value. +func (s *ListTapePoolsInput) SetLimit(v int64) *ListTapePoolsInput { + s.Limit = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListTapePoolsInput) SetMarker(v string) *ListTapePoolsInput { + s.Marker = &v + return s +} + +// SetPoolARNs sets the PoolARNs field's value. +func (s *ListTapePoolsInput) SetPoolARNs(v []*string) *ListTapePoolsInput { + s.PoolARNs = v + return s +} + +type ListTapePoolsOutput struct { + _ struct{} `type:"structure"` + + // A string that indicates the position at which to begin the returned list + // of tape pools. Use the marker in your next request to continue pagination + // of tape pools. If there are no more tape pools to list, this element does + // not appear in the response body. + Marker *string `min:"1" type:"string"` + + // An array of PoolInfo objects, where each object describes a single custom + // tape pool. If there are no custom tape pools, the PoolInfos is an empty array. + PoolInfos []*PoolInfo `type:"list"` +} + +// String returns the string representation +func (s ListTapePoolsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTapePoolsOutput) GoString() string { + return s.String() +} + +// SetMarker sets the Marker field's value. +func (s *ListTapePoolsOutput) SetMarker(v string) *ListTapePoolsOutput { + s.Marker = &v + return s +} + +// SetPoolInfos sets the PoolInfos field's value. +func (s *ListTapePoolsOutput) SetPoolInfos(v []*PoolInfo) *ListTapePoolsOutput { + s.PoolInfos = v + return s +} + // A JSON object that contains one or more of the following fields: // // * ListTapesInput$Limit @@ -14037,9 +15382,9 @@ type ListTapesOutput struct { // in the response body. Marker *string `min:"1" type:"string"` - // An array of TapeInfo objects, where each object describes an a single tape. - // If there not tapes in the tape library or VTS, then the TapeInfos is an empty - // array. + // An array of TapeInfo objects, where each object describes a single tape. + // If there are no tapes in the tape library or VTS, then the TapeInfos is an + // empty array. TapeInfos []*TapeInfo `type:"list"` } @@ -14337,13 +15682,13 @@ func (s *ListVolumesOutput) SetVolumeInfos(v []*VolumeInfo) *ListVolumesOutput { type NFSFileShareDefaults struct { _ struct{} `type:"structure"` - // The Unix directory mode in the form "nnnn". For example, "0666" represents + // The Unix directory mode in the form "nnnn". For example, 0666 represents // the default access mode for all directories inside the file share. The default // value is 0777. DirectoryMode *string `min:"1" type:"string"` - // The Unix file mode in the form "nnnn". For example, "0666" represents the - // default file mode inside the file share. The default value is 0666. + // The Unix file mode in the form "nnnn". For example, 0666 represents the default + // file mode inside the file share. The default value is 0666. FileMode *string `min:"1" type:"string"` // The default group ID for the file share (unless the files have another group @@ -14411,13 +15756,17 @@ func (s *NFSFileShareDefaults) SetOwnerId(v int64) *NFSFileShareDefaults { type NFSFileShareInfo struct { _ struct{} `type:"structure"` + // Refresh cache information. + CacheAttributes *CacheAttributes `type:"structure"` + // The list of clients that are allowed to access the file gateway. The list // must contain either valid IP addresses or valid CIDR blocks. ClientList []*string `min:"1" type:"list"` // The default storage class for objects put into an Amazon S3 bucket by the - // file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. - // If this field is not populated, the default value S3_STANDARD is used. Optional. + // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. + // + // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file share. @@ -14426,8 +15775,14 @@ type NFSFileShareInfo struct { // The ID of the file share. FileShareId *string `min:"12" type:"string"` - // The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE - // and DELETING. + // The name of the file share. Optional. + // + // FileShareName must be set if an S3 prefix name is set in LocationARN. + FileShareName *string `min:"1" type:"string"` + + // The status of the file share. + // + // Valid Values: CREATING | UPDATING | AVAILABLE | DELETING FileShareStatus *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation @@ -14436,18 +15791,24 @@ type NFSFileShareInfo struct { // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, - // and otherwise to false. The default value is true. + // otherwise set to false. The default value is true. + // + // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` - // The ARN of the backend storage used for storing file data. + // The ARN of the backend storage used for storing file data. A prefix name + // can be added to the S3 bucket name. It must end with a "/". LocationARN *string `min:"16" type:"string"` // Describes Network File System (NFS) file share default values. Files and @@ -14458,25 +15819,30 @@ type NFSFileShareInfo struct { // gateways. NFSFileShareDefaults *NFSFileShareDefaults `type:"structure"` - // A value that sets the access control list permission for objects in the S3 - // bucket that a file gateway puts objects into. The default value is "private". + // A value that sets the access control list (ACL) permission for objects in + // the S3 bucket that a file gateway puts objects into. The default value is + // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // The file share path used by the NFS client to identify the mount point. Path *string `type:"string"` - // A value that sets the write status of a file share. This value is true if - // the write status is read-only, and otherwise false. + // A value that sets the write status of a file share. Set this value to true + // to set the write status to read-only, otherwise set to false. + // + // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the - // requester pays the costs. Otherwise the S3 bucket owner pays. However, the + // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. + // + // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the IAM role that file gateway assumes when it accesses the underlying @@ -14485,11 +15851,11 @@ type NFSFileShareInfo struct { // The user mapped to anonymous user. Valid options are the following: // - // * RootSquash - Only root is mapped to anonymous user. + // * RootSquash: Only root is mapped to anonymous user. // - // * NoSquash - No one is mapped to anonymous user + // * NoSquash: No one is mapped to anonymous user. // - // * AllSquash - Everyone is mapped to anonymous user. + // * AllSquash: Everyone is mapped to anonymous user. Squash *string `min:"5" type:"string"` // A list of up to 50 tags assigned to the NFS file share, sorted alphabetically @@ -14508,6 +15874,12 @@ func (s NFSFileShareInfo) GoString() string { return s.String() } +// SetCacheAttributes sets the CacheAttributes field's value. +func (s *NFSFileShareInfo) SetCacheAttributes(v *CacheAttributes) *NFSFileShareInfo { + s.CacheAttributes = v + return s +} + // SetClientList sets the ClientList field's value. func (s *NFSFileShareInfo) SetClientList(v []*string) *NFSFileShareInfo { s.ClientList = v @@ -14532,6 +15904,12 @@ func (s *NFSFileShareInfo) SetFileShareId(v string) *NFSFileShareInfo { return s } +// SetFileShareName sets the FileShareName field's value. +func (s *NFSFileShareInfo) SetFileShareName(v string) *NFSFileShareInfo { + s.FileShareName = &v + return s +} + // SetFileShareStatus sets the FileShareStatus field's value. func (s *NFSFileShareInfo) SetFileShareStatus(v string) *NFSFileShareInfo { s.FileShareStatus = &v @@ -14702,36 +16080,116 @@ func (s *NotifyWhenUploadedInput) SetFileShareARN(v string) *NotifyWhenUploadedI return s } -type NotifyWhenUploadedOutput struct { +type NotifyWhenUploadedOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the file share. + FileShareARN *string `min:"50" type:"string"` + + // The randomly generated ID of the notification that was sent. This ID is in + // UUID format. + NotificationId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s NotifyWhenUploadedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotifyWhenUploadedOutput) GoString() string { + return s.String() +} + +// SetFileShareARN sets the FileShareARN field's value. +func (s *NotifyWhenUploadedOutput) SetFileShareARN(v string) *NotifyWhenUploadedOutput { + s.FileShareARN = &v + return s +} + +// SetNotificationId sets the NotificationId field's value. +func (s *NotifyWhenUploadedOutput) SetNotificationId(v string) *NotifyWhenUploadedOutput { + s.NotificationId = &v + return s +} + +// Describes a custom tape pool. +type PoolInfo struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the file share. - FileShareARN *string `min:"50" type:"string"` + // The Amazon Resource Name (ARN) of the custom tape pool. Use the ListTapePools + // operation to return a list of custom tape pools for your account and AWS + // Region. + PoolARN *string `min:"50" type:"string"` - // The randomly generated ID of the notification that was sent. This ID is in - // UUID format. - NotificationId *string `min:"1" type:"string"` + // The name of the custom tape pool. PoolName can use all ASCII characters, + // except '/' and '\'. + PoolName *string `min:"1" type:"string"` + + // Status of the custom tape pool. Pool can be ACTIVE or DELETED. + PoolStatus *string `type:"string" enum:"PoolStatus"` + + // Tape retention lock time is set in days. Tape retention lock can be enabled + // for up to 100 years (36,500 days). + RetentionLockTimeInDays *int64 `type:"integer"` + + // Tape retention lock type, which can be configured in two modes. When configured + // in governance mode, AWS accounts with specific IAM permissions are authorized + // to remove the tape retention lock from archived virtual tapes. When configured + // in compliance mode, the tape retention lock cannot be removed by any user, + // including the root AWS account. + RetentionLockType *string `type:"string" enum:"RetentionLockType"` + + // The storage class that is associated with the custom pool. When you use your + // backup application to eject the tape, the tape is archived directly into + // the storage class (S3 Glacier or S3 Glacier Deep Archive) that corresponds + // to the pool. + StorageClass *string `type:"string" enum:"TapeStorageClass"` } // String returns the string representation -func (s NotifyWhenUploadedOutput) String() string { +func (s PoolInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s NotifyWhenUploadedOutput) GoString() string { +func (s PoolInfo) GoString() string { return s.String() } -// SetFileShareARN sets the FileShareARN field's value. -func (s *NotifyWhenUploadedOutput) SetFileShareARN(v string) *NotifyWhenUploadedOutput { - s.FileShareARN = &v +// SetPoolARN sets the PoolARN field's value. +func (s *PoolInfo) SetPoolARN(v string) *PoolInfo { + s.PoolARN = &v return s } -// SetNotificationId sets the NotificationId field's value. -func (s *NotifyWhenUploadedOutput) SetNotificationId(v string) *NotifyWhenUploadedOutput { - s.NotificationId = &v +// SetPoolName sets the PoolName field's value. +func (s *PoolInfo) SetPoolName(v string) *PoolInfo { + s.PoolName = &v + return s +} + +// SetPoolStatus sets the PoolStatus field's value. +func (s *PoolInfo) SetPoolStatus(v string) *PoolInfo { + s.PoolStatus = &v + return s +} + +// SetRetentionLockTimeInDays sets the RetentionLockTimeInDays field's value. +func (s *PoolInfo) SetRetentionLockTimeInDays(v int64) *PoolInfo { + s.RetentionLockTimeInDays = &v + return s +} + +// SetRetentionLockType sets the RetentionLockType field's value. +func (s *PoolInfo) SetRetentionLockType(v string) *PoolInfo { + s.RetentionLockType = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PoolInfo) SetStorageClass(v string) *PoolInfo { + s.StorageClass = &v return s } @@ -14746,17 +16204,19 @@ type RefreshCacheInput struct { // A comma-separated list of the paths of folders to refresh in the cache. The // default is ["/"]. The default refreshes objects and folders at the root of - // the Amazon S3 bucket. If Recursive is set to "true", the entire S3 bucket - // that the file share has access to is refreshed. + // the Amazon S3 bucket. If Recursive is set to true, the entire S3 bucket that + // the file share has access to is refreshed. FolderList []*string `min:"1" type:"list"` // A value that specifies whether to recursively refresh folders in the cache. // The refresh includes folders that were in the cache the last time the gateway - // listed the folder's contents. If this value set to "true", each folder that + // listed the folder's contents. If this value set to true, each folder that // is listed in FolderList is recursively updated. Otherwise, subfolders listed // in FolderList are not refreshed. Only objects that are in folders listed // directly under FolderList are found and used for the update. The default - // is "true". + // is true. + // + // Valid Values: true | false Recursive *bool `type:"boolean"` } @@ -14852,7 +16312,7 @@ type RemoveTagsFromResourceInput struct { ResourceARN *string `min:"50" type:"string" required:"true"` // The keys of the tags you want to remove from the specified resource. A tag - // is composed of a key/value pair. + // is composed of a key-value pair. // // TagKeys is a required field TagKeys []*string `type:"list" required:"true"` @@ -15173,17 +16633,30 @@ type SMBFileShareInfo struct { // A list of users or groups in the Active Directory that have administrator // rights to the file share. A group must be prefixed with the @ character. - // For example @group1. Can only be set if Authentication is set to ActiveDirectory. + // Acceptable formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. + // Can only be set if Authentication is set to ActiveDirectory. AdminUserList []*string `type:"list"` - // The authentication method of the file share. + // The Amazon Resource Name (ARN) of the storage used for the audit logs. + AuditDestinationARN *string `type:"string"` + + // The authentication method of the file share. The default is ActiveDirectory. // - // Valid values are ActiveDirectory or GuestAccess. The default is ActiveDirectory. + // Valid Values: ActiveDirectory | GuestAccess Authentication *string `min:"5" type:"string"` + // Refresh cache information. + CacheAttributes *CacheAttributes `type:"structure"` + + // The case of an object name in an Amazon S3 bucket. For ClientSpecified, the + // client determines the case sensitivity. For CaseSensitive, the gateway determines + // the case sensitivity. The default value is ClientSpecified. + CaseSensitivity *string `type:"string" enum:"CaseSensitivity"` + // The default storage class for objects put into an Amazon S3 bucket by the - // file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. - // If this field is not populated, the default value S3_STANDARD is used. Optional. + // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. + // + // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file share. @@ -15192,8 +16665,14 @@ type SMBFileShareInfo struct { // The ID of the file share. FileShareId *string `min:"12" type:"string"` - // The status of the file share. Possible values are CREATING, UPDATING, AVAILABLE - // and DELETING. + // The name of the file share. Optional. + // + // FileShareName must be set if an S3 prefix name is set in LocationARN. + FileShareName *string `min:"1" type:"string"` + + // The status of the file share. + // + // Valid Values: CREATING | UPDATING | AVAILABLE | DELETING FileShareStatus *string `min:"3" type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation @@ -15202,56 +16681,69 @@ type SMBFileShareInfo struct { // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, - // and otherwise to false. The default value is true. + // otherwise set to false. The default value is true. + // + // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are not allowed to - // access the file share. A group must be prefixed with the @ character. For - // example @group1. Can only be set if Authentication is set to ActiveDirectory. + // access the file share. A group must be prefixed with the @ character. Acceptable + // formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. Can only + // be set if Authentication is set to ActiveDirectory. InvalidUserList []*string `type:"list"` - // True to use Amazon S3 server-side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` - // The ARN of the backend storage used for storing file data. + // The ARN of the backend storage used for storing file data. A prefix name + // can be added to the S3 bucket name. It must end with a "/". LocationARN *string `min:"16" type:"string"` - // A value that sets the access control list permission for objects in the S3 - // bucket that a file gateway puts objects into. The default value is "private". + // A value that sets the access control list (ACL) permission for objects in + // the S3 bucket that a file gateway puts objects into. The default value is + // private. ObjectACL *string `type:"string" enum:"ObjectACL"` // The file share path used by the SMB client to identify the mount point. Path *string `type:"string"` - // A value that sets the write status of a file share. This value is true if - // the write status is read-only, and otherwise false. + // A value that sets the write status of a file share. Set this value to true + // to set the write status to read-only, otherwise set to false. + // + // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the - // requester pays the costs. Otherwise the S3 bucket owner pays. However, the + // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. + // + // Valid Values: true | false RequesterPays *bool `type:"boolean"` // The ARN of the IAM role that file gateway assumes when it accesses the underlying // storage. Role *string `min:"20" type:"string"` - // If this value is set to "true", indicates that ACL (access control list) - // is enabled on the SMB file share. If it is set to "false", it indicates that + // If this value is set to true, it indicates that access control list (ACL) + // is enabled on the SMB file share. If it is set to false, it indicates that // file and directory permissions are mapped to the POSIX permission. // - // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html - // in the Storage Gateway User Guide. + // For more information, see Using Microsoft Windows ACLs to control access + // to an SMB file share (https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html) + // in the AWS Storage Gateway User Guide. SMBACLEnabled *bool `type:"boolean"` // A list of up to 50 tags assigned to the SMB file share, sorted alphabetically @@ -15260,8 +16752,9 @@ type SMBFileShareInfo struct { Tags []*Tag `type:"list"` // A list of users or groups in the Active Directory that are allowed to access - // the file share. A group must be prefixed with the @ character. For example - // @group1. Can only be set if Authentication is set to ActiveDirectory. + // the file share. A group must be prefixed with the @ character. Acceptable + // formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. Can only + // be set if Authentication is set to ActiveDirectory. ValidUserList []*string `type:"list"` } @@ -15281,12 +16774,30 @@ func (s *SMBFileShareInfo) SetAdminUserList(v []*string) *SMBFileShareInfo { return s } +// SetAuditDestinationARN sets the AuditDestinationARN field's value. +func (s *SMBFileShareInfo) SetAuditDestinationARN(v string) *SMBFileShareInfo { + s.AuditDestinationARN = &v + return s +} + // SetAuthentication sets the Authentication field's value. func (s *SMBFileShareInfo) SetAuthentication(v string) *SMBFileShareInfo { s.Authentication = &v return s } +// SetCacheAttributes sets the CacheAttributes field's value. +func (s *SMBFileShareInfo) SetCacheAttributes(v *CacheAttributes) *SMBFileShareInfo { + s.CacheAttributes = v + return s +} + +// SetCaseSensitivity sets the CaseSensitivity field's value. +func (s *SMBFileShareInfo) SetCaseSensitivity(v string) *SMBFileShareInfo { + s.CaseSensitivity = &v + return s +} + // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *SMBFileShareInfo) SetDefaultStorageClass(v string) *SMBFileShareInfo { s.DefaultStorageClass = &v @@ -15305,6 +16816,12 @@ func (s *SMBFileShareInfo) SetFileShareId(v string) *SMBFileShareInfo { return s } +// SetFileShareName sets the FileShareName field's value. +func (s *SMBFileShareInfo) SetFileShareName(v string) *SMBFileShareInfo { + s.FileShareName = &v + return s +} + // SetFileShareStatus sets the FileShareStatus field's value. func (s *SMBFileShareInfo) SetFileShareStatus(v string) *SMBFileShareInfo { s.FileShareStatus = &v @@ -15398,8 +16915,8 @@ func (s *SMBFileShareInfo) SetValidUserList(v []*string) *SMBFileShareInfo { // An internal server error has occurred because the service is unavailable. // For more information, see the error and message fields. type ServiceUnavailableError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A StorageGatewayError that provides more information about the cause of the // error. @@ -15421,17 +16938,17 @@ func (s ServiceUnavailableError) GoString() string { func newErrorServiceUnavailableError(v protocol.ResponseMetadata) error { return &ServiceUnavailableError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableError) Code() string { +func (s *ServiceUnavailableError) Code() string { return "ServiceUnavailableError" } // Message returns the exception's message. -func (s ServiceUnavailableError) Message() string { +func (s *ServiceUnavailableError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15439,22 +16956,22 @@ func (s ServiceUnavailableError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableError) OrigErr() error { +func (s *ServiceUnavailableError) OrigErr() error { return nil } -func (s ServiceUnavailableError) Error() string { +func (s *ServiceUnavailableError) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableError) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableError) RequestID() string { + return s.RespMetadata.RequestID } // SetLocalConsolePasswordInput @@ -15551,7 +17068,7 @@ type SetSMBGuestPasswordInput struct { // GatewayARN is a required field GatewayARN *string `min:"50" type:"string" required:"true"` - // The password that you want to set for your SMB Server. + // The password that you want to set for your SMB server. // // Password is a required field Password *string `min:"6" type:"string" required:"true" sensitive:"true"` @@ -15625,7 +17142,8 @@ func (s *SetSMBGuestPasswordOutput) SetGatewayARN(v string) *SetSMBGuestPassword return s } -// A JSON object containing the of the gateway to shut down. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway to +// shut down. type ShutdownGatewayInput struct { _ struct{} `type:"structure"` @@ -15668,7 +17186,8 @@ func (s *ShutdownGatewayInput) SetGatewayARN(v string) *ShutdownGatewayInput { return s } -// A JSON object containing the of the gateway that was shut down. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was shut down. type ShutdownGatewayOutput struct { _ struct{} `type:"structure"` @@ -15759,7 +17278,8 @@ func (s *StartAvailabilityMonitorTestOutput) SetGatewayARN(v string) *StartAvail return s } -// A JSON object containing the of the gateway to start. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway to +// start. type StartGatewayInput struct { _ struct{} `type:"structure"` @@ -15802,7 +17322,8 @@ func (s *StartGatewayInput) SetGatewayARN(v string) *StartGatewayInput { return s } -// A JSON object containing the of the gateway that was restarted. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was restarted. type StartGatewayOutput struct { _ struct{} `type:"structure"` @@ -15832,17 +17353,18 @@ type StorediSCSIVolume struct { _ struct{} `type:"structure"` // The date the volume was created. Volumes created prior to March 28, 2017 - // don’t have this time stamp. + // don’t have this timestamp. CreatedDate *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // Indicates if when the stored volume was created, existing data on the underlying // local disk was preserved. // - // Valid Values: true, false + // Valid Values: true | false PreservedExistingData *bool `type:"boolean"` // If the stored volume was created from a snapshot, this field contains the @@ -15863,14 +17385,14 @@ type StorediSCSIVolume struct { // A value that indicates whether a storage volume is attached to, detached // from, or is in the process of detaching from a gateway. For more information, - // see Moving Your Volumes to a Different Gateway (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume). + // see Moving your volumes to a different gateway (https://docs.aws.amazon.com/storagegateway/latest/userguide/managing-volumes.html#attach-detach-volume). VolumeAttachmentStatus *string `min:"3" type:"string"` // The ID of the local disk that was specified in the CreateStorediSCSIVolume // operation. VolumeDiskId *string `min:"1" type:"string"` - // The unique identifier of the volume, e.g. vol-AE4B946D. + // The unique identifier of the volume, e.g., vol-AE4B946D. VolumeId *string `min:"12" type:"string"` // Represents the percentage complete if the volume is restoring or bootstrapping @@ -16005,11 +17527,11 @@ func (s *StorediSCSIVolume) SetVolumeiSCSIAttributes(v *VolumeiSCSIAttributes) * // A key-value pair that helps you manage, filter, and search for your resource. // Allowed characters: letters, white space, and numbers, representable in UTF-8, -// and the following characters: + - = . _ : / +// and the following characters: + - = . _ : /. type Tag struct { _ struct{} `type:"structure"` - // Tag key (String). The key can't start with aws:. + // Tag key. The key can't start with aws:. // // Key is a required field Key *string `min:"1" type:"string" required:"true"` @@ -16065,17 +17587,21 @@ func (s *Tag) SetValue(v string) *Tag { type Tape struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` + // The date that the tape enters a custom tape pool. + PoolEntryDate *time.Time `type:"timestamp"` + // The ID of the pool that contains tapes that will be archived. The tapes in // this pool are archived in the S3 storage class that is associated with the // pool. When you use your backup application to eject the tape, the tape is - // archived directly into the storage class (Glacier or Deep Archive) that corresponds - // to the pool. + // archived directly into the storage class (S3 Glacier or S3 Glacier Deep Archive) + // that corresponds to the pool. // - // Valid values: "GLACIER", "DEEP_ARCHIVE" + // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` // For archiving virtual tapes, indicates how much data remains to be uploaded @@ -16084,6 +17610,9 @@ type Tape struct { // Range: 0 (not started) to 100 (complete). Progress *float64 `type:"double"` + // The date that the tape is first archived with tape retention lock enabled. + RetentionStartDate *time.Time `type:"timestamp"` + // The Amazon Resource Name (ARN) of the virtual tape. TapeARN *string `min:"50" type:"string"` @@ -16107,6 +17636,9 @@ type Tape struct { // The virtual tape library (VTL) device that the virtual tape is associated // with. VTLDevice *string `min:"50" type:"string"` + + // If the tape is archived as write-once-read-many (WORM), this value is true. + Worm *bool `type:"boolean"` } // String returns the string representation @@ -16125,6 +17657,12 @@ func (s *Tape) SetKMSKey(v string) *Tape { return s } +// SetPoolEntryDate sets the PoolEntryDate field's value. +func (s *Tape) SetPoolEntryDate(v time.Time) *Tape { + s.PoolEntryDate = &v + return s +} + // SetPoolId sets the PoolId field's value. func (s *Tape) SetPoolId(v string) *Tape { s.PoolId = &v @@ -16137,6 +17675,12 @@ func (s *Tape) SetProgress(v float64) *Tape { return s } +// SetRetentionStartDate sets the RetentionStartDate field's value. +func (s *Tape) SetRetentionStartDate(v time.Time) *Tape { + s.RetentionStartDate = &v + return s +} + // SetTapeARN sets the TapeARN field's value. func (s *Tape) SetTapeARN(v string) *Tape { s.TapeARN = &v @@ -16179,26 +17723,43 @@ func (s *Tape) SetVTLDevice(v string) *Tape { return s } +// SetWorm sets the Worm field's value. +func (s *Tape) SetWorm(v bool) *Tape { + s.Worm = &v + return s +} + // Represents a virtual tape that is archived in the virtual tape shelf (VTS). type TapeArchive struct { _ struct{} `type:"structure"` // The time that the archiving of the virtual tape was completed. // - // The default time stamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' + // The default timestamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' // format. CompletionTime *time.Time `type:"timestamp"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` + // The time that the tape entered the custom tape pool. + // + // The default timestamp format is in the ISO8601 extended YYYY-MM-DD'T'HH:MM:SS'Z' + // format. + PoolEntryDate *time.Time `type:"timestamp"` + // The ID of the pool that was used to archive the tape. The tapes in this pool // are archived in the S3 storage class that is associated with the pool. // - // Valid values: "GLACIER", "DEEP_ARCHIVE" + // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` + // If the archived tape is subject to tape retention lock, the date that the + // archived tape started being retained. + RetentionStartDate *time.Time `type:"timestamp"` + // The Amazon Resource Name (ARN) of the tape gateway that the virtual tape // is being retrieved to. // @@ -16224,6 +17785,9 @@ type TapeArchive struct { // // This value is not available for tapes created prior to May 13, 2015. TapeUsedInBytes *int64 `type:"long"` + + // Set to true if the archived tape is stored as write-once-read-many (WORM). + Worm *bool `type:"boolean"` } // String returns the string representation @@ -16248,12 +17812,24 @@ func (s *TapeArchive) SetKMSKey(v string) *TapeArchive { return s } +// SetPoolEntryDate sets the PoolEntryDate field's value. +func (s *TapeArchive) SetPoolEntryDate(v time.Time) *TapeArchive { + s.PoolEntryDate = &v + return s +} + // SetPoolId sets the PoolId field's value. func (s *TapeArchive) SetPoolId(v string) *TapeArchive { s.PoolId = &v return s } +// SetRetentionStartDate sets the RetentionStartDate field's value. +func (s *TapeArchive) SetRetentionStartDate(v time.Time) *TapeArchive { + s.RetentionStartDate = &v + return s +} + // SetRetrievedTo sets the RetrievedTo field's value. func (s *TapeArchive) SetRetrievedTo(v string) *TapeArchive { s.RetrievedTo = &v @@ -16296,6 +17872,12 @@ func (s *TapeArchive) SetTapeUsedInBytes(v int64) *TapeArchive { return s } +// SetWorm sets the Worm field's value. +func (s *TapeArchive) SetWorm(v bool) *TapeArchive { + s.Worm = &v + return s +} + // Describes a virtual tape. type TapeInfo struct { _ struct{} `type:"structure"` @@ -16304,15 +17886,22 @@ type TapeInfo struct { // to return a list of gateways for your account and AWS Region. GatewayARN *string `min:"50" type:"string"` + // The date that the tape entered the custom tape pool with tape retention lock + // enabled. + PoolEntryDate *time.Time `type:"timestamp"` + // The ID of the pool that you want to add your tape to for archiving. The tape // in this pool is archived in the S3 storage class that is associated with // the pool. When you use your backup application to eject the tape, the tape - // is archived directly into the storage class (Glacier or Deep Archive) that - // corresponds to the pool. + // is archived directly into the storage class (S3 Glacier or S3 Glacier Deep + // Archive) that corresponds to the pool. // - // Valid values: "GLACIER", "DEEP_ARCHIVE" + // Valid Values: GLACIER | DEEP_ARCHIVE PoolId *string `min:"1" type:"string"` + // The date that the tape became subject to tape retention lock. + RetentionStartDate *time.Time `type:"timestamp"` + // The Amazon Resource Name (ARN) of a virtual tape. TapeARN *string `min:"50" type:"string"` @@ -16342,12 +17931,24 @@ func (s *TapeInfo) SetGatewayARN(v string) *TapeInfo { return s } +// SetPoolEntryDate sets the PoolEntryDate field's value. +func (s *TapeInfo) SetPoolEntryDate(v time.Time) *TapeInfo { + s.PoolEntryDate = &v + return s +} + // SetPoolId sets the PoolId field's value. func (s *TapeInfo) SetPoolId(v string) *TapeInfo { s.PoolId = &v return s } +// SetRetentionStartDate sets the RetentionStartDate field's value. +func (s *TapeInfo) SetRetentionStartDate(v time.Time) *TapeInfo { + s.RetentionStartDate = &v + return s +} + // SetTapeARN sets the TapeARN field's value. func (s *TapeInfo) SetTapeARN(v string) *TapeInfo { s.TapeARN = &v @@ -16382,7 +17983,7 @@ type TapeRecoveryPointInfo struct { // The time when the point-in-time view of the virtual tape was replicated for // later recovery. // - // The default time stamp format of the tape recovery point time is in the ISO8601 + // The default timestamp format of the tape recovery point time is in the ISO8601 // extended YYYY-MM-DD'T'HH:MM:SS'Z' format. TapeRecoveryPointTime *time.Time `type:"timestamp"` @@ -16427,6 +18028,100 @@ func (s *TapeRecoveryPointInfo) SetTapeStatus(v string) *TapeRecoveryPointInfo { return s } +type UpdateAutomaticTapeCreationPolicyInput struct { + _ struct{} `type:"structure"` + + // An automatic tape creation policy consists of a list of automatic tape creation + // rules. The rules determine when and how to automatically create new tapes. + // + // AutomaticTapeCreationRules is a required field + AutomaticTapeCreationRules []*AutomaticTapeCreationRule `min:"1" type:"list" required:"true"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + // + // GatewayARN is a required field + GatewayARN *string `min:"50" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAutomaticTapeCreationPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutomaticTapeCreationPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAutomaticTapeCreationPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAutomaticTapeCreationPolicyInput"} + if s.AutomaticTapeCreationRules == nil { + invalidParams.Add(request.NewErrParamRequired("AutomaticTapeCreationRules")) + } + if s.AutomaticTapeCreationRules != nil && len(s.AutomaticTapeCreationRules) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutomaticTapeCreationRules", 1)) + } + if s.GatewayARN == nil { + invalidParams.Add(request.NewErrParamRequired("GatewayARN")) + } + if s.GatewayARN != nil && len(*s.GatewayARN) < 50 { + invalidParams.Add(request.NewErrParamMinLen("GatewayARN", 50)) + } + if s.AutomaticTapeCreationRules != nil { + for i, v := range s.AutomaticTapeCreationRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AutomaticTapeCreationRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutomaticTapeCreationRules sets the AutomaticTapeCreationRules field's value. +func (s *UpdateAutomaticTapeCreationPolicyInput) SetAutomaticTapeCreationRules(v []*AutomaticTapeCreationRule) *UpdateAutomaticTapeCreationPolicyInput { + s.AutomaticTapeCreationRules = v + return s +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *UpdateAutomaticTapeCreationPolicyInput) SetGatewayARN(v string) *UpdateAutomaticTapeCreationPolicyInput { + s.GatewayARN = &v + return s +} + +type UpdateAutomaticTapeCreationPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + // to return a list of gateways for your account and AWS Region. + GatewayARN *string `min:"50" type:"string"` +} + +// String returns the string representation +func (s UpdateAutomaticTapeCreationPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAutomaticTapeCreationPolicyOutput) GoString() string { + return s.String() +} + +// SetGatewayARN sets the GatewayARN field's value. +func (s *UpdateAutomaticTapeCreationPolicyOutput) SetGatewayARN(v string) *UpdateAutomaticTapeCreationPolicyOutput { + s.GatewayARN = &v + return s +} + // A JSON object containing one or more of the following fields: // // * UpdateBandwidthRateLimitInput$AverageDownloadRateLimitInBitsPerSec @@ -16498,8 +18193,8 @@ func (s *UpdateBandwidthRateLimitInput) SetGatewayARN(v string) *UpdateBandwidth return s } -// A JSON object containing the of the gateway whose throttle information was -// updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway whose +// throttle information was updated. type UpdateBandwidthRateLimitOutput struct { _ struct{} `type:"structure"` @@ -16670,7 +18365,7 @@ type UpdateGatewayInformationInput struct { // The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that you // want to use to monitor and log events in the gateway. // - // For more information, see What Is Amazon CloudWatch Logs? (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html). + // For more information, see What is Amazon CloudWatch Logs? (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/WhatIsCloudWatchLogs.html) CloudWatchLogGroupARN *string `type:"string"` // The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation @@ -16742,7 +18437,8 @@ func (s *UpdateGatewayInformationInput) SetGatewayTimezone(v string) *UpdateGate return s } -// A JSON object containing the ARN of the gateway that was updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was updated. type UpdateGatewayInformationOutput struct { _ struct{} `type:"structure"` @@ -16776,7 +18472,8 @@ func (s *UpdateGatewayInformationOutput) SetGatewayName(v string) *UpdateGateway return s } -// A JSON object containing the of the gateway to update. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway to +// update. type UpdateGatewaySoftwareNowInput struct { _ struct{} `type:"structure"` @@ -16819,7 +18516,8 @@ func (s *UpdateGatewaySoftwareNowInput) SetGatewayARN(v string) *UpdateGatewaySo return s } -// A JSON object containing the of the gateway that was updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway that +// was updated. type UpdateGatewaySoftwareNowOutput struct { _ struct{} `type:"structure"` @@ -16859,8 +18557,6 @@ type UpdateMaintenanceStartTimeInput struct { // The day of the month component of the maintenance start time represented // as an ordinal number from 1 to 28, where 1 represents the first day of the // month and 28 represents the last day of the month. - // - // This value is only available for tape and volume gateways. DayOfMonth *int64 `min:"1" type:"integer"` // The day of the week component of the maintenance start time week represented @@ -16953,8 +18649,8 @@ func (s *UpdateMaintenanceStartTimeInput) SetMinuteOfHour(v int64) *UpdateMainte return s } -// A JSON object containing the of the gateway whose maintenance start time -// is updated. +// A JSON object containing the Amazon Resource Name (ARN) of the gateway whose +// maintenance start time is updated. type UpdateMaintenanceStartTimeOutput struct { _ struct{} `type:"structure"` @@ -16983,13 +18679,17 @@ func (s *UpdateMaintenanceStartTimeOutput) SetGatewayARN(v string) *UpdateMainte type UpdateNFSFileShareInput struct { _ struct{} `type:"structure"` + // Refresh cache information. + CacheAttributes *CacheAttributes `type:"structure"` + // The list of clients that are allowed to access the file gateway. The list // must contain either valid IP addresses or valid CIDR blocks. ClientList []*string `min:"1" type:"list"` // The default storage class for objects put into an Amazon S3 bucket by the - // file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. - // If this field is not populated, the default value S3_STANDARD is used. Optional. + // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. + // + // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the file share to be updated. @@ -16997,47 +18697,64 @@ type UpdateNFSFileShareInput struct { // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` + // The name of the file share. Optional. + // + // FileShareName must be set if an S3 prefix name is set in LocationARN. + FileShareName *string `min:"1" type:"string"` + // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, - // and otherwise to false. The default value is true. + // otherwise set to false. The default value is true. + // + // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` // The default values for the file share. Optional. NFSFileShareDefaults *NFSFileShareDefaults `type:"structure"` - // A value that sets the access control list permission for objects in the S3 - // bucket that a file gateway puts objects into. The default value is "private". + // A value that sets the access control list (ACL) permission for objects in + // the S3 bucket that a file gateway puts objects into. The default value is + // private. ObjectACL *string `type:"string" enum:"ObjectACL"` - // A value that sets the write status of a file share. This value is true if - // the write status is read-only, and otherwise false. + // A value that sets the write status of a file share. Set this value to true + // to set the write status to read-only, otherwise set to false. + // + // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the - // requester pays the costs. Otherwise the S3 bucket owner pays. However, the + // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. + // + // Valid Values: true | false RequesterPays *bool `type:"boolean"` - // The user mapped to anonymous user. Valid options are the following: + // The user mapped to anonymous user. + // + // Valid values are the following: // - // * RootSquash - Only root is mapped to anonymous user. + // * RootSquash: Only root is mapped to anonymous user. // - // * NoSquash - No one is mapped to anonymous user + // * NoSquash: No one is mapped to anonymous user. // - // * AllSquash - Everyone is mapped to anonymous user. + // * AllSquash: Everyone is mapped to anonymous user. Squash *string `min:"5" type:"string"` } @@ -17066,6 +18783,9 @@ func (s *UpdateNFSFileShareInput) Validate() error { if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } + if s.FileShareName != nil && len(*s.FileShareName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileShareName", 1)) + } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } @@ -17084,6 +18804,12 @@ func (s *UpdateNFSFileShareInput) Validate() error { return nil } +// SetCacheAttributes sets the CacheAttributes field's value. +func (s *UpdateNFSFileShareInput) SetCacheAttributes(v *CacheAttributes) *UpdateNFSFileShareInput { + s.CacheAttributes = v + return s +} + // SetClientList sets the ClientList field's value. func (s *UpdateNFSFileShareInput) SetClientList(v []*string) *UpdateNFSFileShareInput { s.ClientList = v @@ -17102,6 +18828,12 @@ func (s *UpdateNFSFileShareInput) SetFileShareARN(v string) *UpdateNFSFileShareI return s } +// SetFileShareName sets the FileShareName field's value. +func (s *UpdateNFSFileShareInput) SetFileShareName(v string) *UpdateNFSFileShareInput { + s.FileShareName = &v + return s +} + // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *UpdateNFSFileShareInput) SetGuessMIMETypeEnabled(v bool) *UpdateNFSFileShareInput { s.GuessMIMETypeEnabled = &v @@ -17178,14 +18910,27 @@ func (s *UpdateNFSFileShareOutput) SetFileShareARN(v string) *UpdateNFSFileShare type UpdateSMBFileShareInput struct { _ struct{} `type:"structure"` - // A list of users in the Active Directory that have administrator rights to - // the file share. A group must be prefixed with the @ character. For example - // @group1. Can only be set if Authentication is set to ActiveDirectory. + // A list of users or groups in the Active Directory that have administrator + // rights to the file share. A group must be prefixed with the @ character. + // Acceptable formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. + // Can only be set if Authentication is set to ActiveDirectory. AdminUserList []*string `type:"list"` + // The Amazon Resource Name (ARN) of the storage used for the audit logs. + AuditDestinationARN *string `type:"string"` + + // Refresh cache information. + CacheAttributes *CacheAttributes `type:"structure"` + + // The case of an object name in an Amazon S3 bucket. For ClientSpecified, the + // client determines the case sensitivity. For CaseSensitive, the gateway determines + // the case sensitivity. The default value is ClientSpecified. + CaseSensitivity *string `type:"string" enum:"CaseSensitivity"` + // The default storage class for objects put into an Amazon S3 bucket by the - // file gateway. Possible values are S3_STANDARD, S3_STANDARD_IA, or S3_ONEZONE_IA. - // If this field is not populated, the default value S3_STANDARD is used. Optional. + // file gateway. The default value is S3_INTELLIGENT_TIERING. Optional. + // + // Valid Values: S3_STANDARD | S3_INTELLIGENT_TIERING | S3_STANDARD_IA | S3_ONEZONE_IA DefaultStorageClass *string `min:"5" type:"string"` // The Amazon Resource Name (ARN) of the SMB file share that you want to update. @@ -17193,53 +18938,73 @@ type UpdateSMBFileShareInput struct { // FileShareARN is a required field FileShareARN *string `min:"50" type:"string" required:"true"` + // The name of the file share. Optional. + // + // FileShareName must be set if an S3 prefix name is set in LocationARN. + FileShareName *string `min:"1" type:"string"` + // A value that enables guessing of the MIME type for uploaded objects based // on file extensions. Set this value to true to enable MIME type guessing, - // and otherwise to false. The default value is true. + // otherwise set to false. The default value is true. + // + // Valid Values: true | false GuessMIMETypeEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are not allowed to - // access the file share. A group must be prefixed with the @ character. For - // example @group1. Can only be set if Authentication is set to ActiveDirectory. + // access the file share. A group must be prefixed with the @ character. Acceptable + // formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. Can only + // be set if Authentication is set to ActiveDirectory. InvalidUserList []*string `type:"list"` - // True to use Amazon S3 server side encryption with your own AWS KMS key, or - // false to use a key managed by Amazon S3. Optional. + // Set to true to use Amazon S3 server-side encryption with your own AWS KMS + // key, or false to use a key managed by Amazon S3. Optional. + // + // Valid Values: true | false KMSEncrypted *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the AWS KMS key used for Amazon S3 server - // side encryption. This value can only be set when KMSEncrypted is true. Optional. + // The Amazon Resource Name (ARN) of a symmetric customer master key (CMK) used + // for Amazon S3 server-side encryption. Storage Gateway does not support asymmetric + // CMKs. This value can only be set when KMSEncrypted is true. Optional. KMSKey *string `min:"7" type:"string"` - // A value that sets the access control list permission for objects in the S3 - // bucket that a file gateway puts objects into. The default value is "private". + // A value that sets the access control list (ACL) permission for objects in + // the S3 bucket that a file gateway puts objects into. The default value is + // private. ObjectACL *string `type:"string" enum:"ObjectACL"` - // A value that sets the write status of a file share. This value is true if - // the write status is read-only, and otherwise false. + // A value that sets the write status of a file share. Set this value to true + // to set write status to read-only, otherwise set to false. + // + // Valid Values: true | false ReadOnly *bool `type:"boolean"` // A value that sets who pays the cost of the request and the cost associated // with data download from the S3 bucket. If this value is set to true, the - // requester pays the costs. Otherwise the S3 bucket owner pays. However, the + // requester pays the costs; otherwise, the S3 bucket owner pays. However, the // S3 bucket owner always pays the cost of storing data. // // RequesterPays is a configuration for the S3 bucket that backs the file share, // so make sure that the configuration on the file share is the same as the // S3 bucket configuration. + // + // Valid Values: true | false RequesterPays *bool `type:"boolean"` - // Set this value to "true to enable ACL (access control list) on the SMB file - // share. Set it to "false" to map file and directory permissions to the POSIX + // Set this value to true to enable access control list (ACL) on the SMB file + // share. Set it to false to map file and directory permissions to the POSIX // permissions. // - // For more information, see https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.htmlin - // the Storage Gateway User Guide. + // For more information, see Using Microsoft Windows ACLs to control access + // to an SMB file share (https://docs.aws.amazon.com/storagegateway/latest/userguide/smb-acl.html) + // in the AWS Storage Gateway User Guide. + // + // Valid Values: true | false SMBACLEnabled *bool `type:"boolean"` // A list of users or groups in the Active Directory that are allowed to access - // the file share. A group must be prefixed with the @ character. For example - // @group1. Can only be set if Authentication is set to ActiveDirectory. + // the file share. A group must be prefixed with the @ character. Acceptable + // formats include: DOMAIN\User1, user1, @group1, and @DOMAIN\group1. Can only + // be set if Authentication is set to ActiveDirectory. ValidUserList []*string `type:"list"` } @@ -17265,6 +19030,9 @@ func (s *UpdateSMBFileShareInput) Validate() error { if s.FileShareARN != nil && len(*s.FileShareARN) < 50 { invalidParams.Add(request.NewErrParamMinLen("FileShareARN", 50)) } + if s.FileShareName != nil && len(*s.FileShareName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("FileShareName", 1)) + } if s.KMSKey != nil && len(*s.KMSKey) < 7 { invalidParams.Add(request.NewErrParamMinLen("KMSKey", 7)) } @@ -17281,6 +19049,24 @@ func (s *UpdateSMBFileShareInput) SetAdminUserList(v []*string) *UpdateSMBFileSh return s } +// SetAuditDestinationARN sets the AuditDestinationARN field's value. +func (s *UpdateSMBFileShareInput) SetAuditDestinationARN(v string) *UpdateSMBFileShareInput { + s.AuditDestinationARN = &v + return s +} + +// SetCacheAttributes sets the CacheAttributes field's value. +func (s *UpdateSMBFileShareInput) SetCacheAttributes(v *CacheAttributes) *UpdateSMBFileShareInput { + s.CacheAttributes = v + return s +} + +// SetCaseSensitivity sets the CaseSensitivity field's value. +func (s *UpdateSMBFileShareInput) SetCaseSensitivity(v string) *UpdateSMBFileShareInput { + s.CaseSensitivity = &v + return s +} + // SetDefaultStorageClass sets the DefaultStorageClass field's value. func (s *UpdateSMBFileShareInput) SetDefaultStorageClass(v string) *UpdateSMBFileShareInput { s.DefaultStorageClass = &v @@ -17293,6 +19079,12 @@ func (s *UpdateSMBFileShareInput) SetFileShareARN(v string) *UpdateSMBFileShareI return s } +// SetFileShareName sets the FileShareName field's value. +func (s *UpdateSMBFileShareInput) SetFileShareName(v string) *UpdateSMBFileShareInput { + s.FileShareName = &v + return s +} + // SetGuessMIMETypeEnabled sets the GuessMIMETypeEnabled field's value. func (s *UpdateSMBFileShareInput) SetGuessMIMETypeEnabled(v bool) *UpdateSMBFileShareInput { s.GuessMIMETypeEnabled = &v @@ -17585,7 +19377,8 @@ func (s *UpdateSnapshotScheduleInput) SetVolumeARN(v string) *UpdateSnapshotSche return s } -// A JSON object containing the of the updated storage volume. +// A JSON object containing the Amazon Resource Name (ARN) of the updated storage +// volume. type UpdateSnapshotScheduleOutput struct { _ struct{} `type:"structure"` @@ -17615,7 +19408,7 @@ type UpdateVTLDeviceTypeInput struct { // The type of medium changer you want to select. // - // Valid Values: "STK-L700", "AWS-Gateway-VTL" + // Valid Values: STK-L700 | AWS-Gateway-VTL | IBM-03584L32-0402 // // DeviceType is a required field DeviceType *string `min:"2" type:"string" required:"true"` @@ -17989,6 +19782,19 @@ const ( ActiveDirectoryStatusUnknownError = "UNKNOWN_ERROR" ) +// ActiveDirectoryStatus_Values returns all elements of the ActiveDirectoryStatus enum +func ActiveDirectoryStatus_Values() []string { + return []string{ + ActiveDirectoryStatusAccessDenied, + ActiveDirectoryStatusDetached, + ActiveDirectoryStatusJoined, + ActiveDirectoryStatusJoining, + ActiveDirectoryStatusNetworkError, + ActiveDirectoryStatusTimeout, + ActiveDirectoryStatusUnknownError, + } +} + const ( // AvailabilityMonitorTestStatusComplete is a AvailabilityMonitorTestStatus enum value AvailabilityMonitorTestStatusComplete = "COMPLETE" @@ -18000,6 +19806,31 @@ const ( AvailabilityMonitorTestStatusPending = "PENDING" ) +// AvailabilityMonitorTestStatus_Values returns all elements of the AvailabilityMonitorTestStatus enum +func AvailabilityMonitorTestStatus_Values() []string { + return []string{ + AvailabilityMonitorTestStatusComplete, + AvailabilityMonitorTestStatusFailed, + AvailabilityMonitorTestStatusPending, + } +} + +const ( + // CaseSensitivityClientSpecified is a CaseSensitivity enum value + CaseSensitivityClientSpecified = "ClientSpecified" + + // CaseSensitivityCaseSensitive is a CaseSensitivity enum value + CaseSensitivityCaseSensitive = "CaseSensitive" +) + +// CaseSensitivity_Values returns all elements of the CaseSensitivity enum +func CaseSensitivity_Values() []string { + return []string{ + CaseSensitivityClientSpecified, + CaseSensitivityCaseSensitive, + } +} + const ( // ErrorCodeActivationKeyExpired is a ErrorCode enum value ErrorCodeActivationKeyExpired = "ActivationKeyExpired" @@ -18188,6 +20019,74 @@ const ( ErrorCodeVolumeNotReady = "VolumeNotReady" ) +// ErrorCode_Values returns all elements of the ErrorCode enum +func ErrorCode_Values() []string { + return []string{ + ErrorCodeActivationKeyExpired, + ErrorCodeActivationKeyInvalid, + ErrorCodeActivationKeyNotFound, + ErrorCodeGatewayInternalError, + ErrorCodeGatewayNotConnected, + ErrorCodeGatewayNotFound, + ErrorCodeGatewayProxyNetworkConnectionBusy, + ErrorCodeAuthenticationFailure, + ErrorCodeBandwidthThrottleScheduleNotFound, + ErrorCodeBlocked, + ErrorCodeCannotExportSnapshot, + ErrorCodeChapCredentialNotFound, + ErrorCodeDiskAlreadyAllocated, + ErrorCodeDiskDoesNotExist, + ErrorCodeDiskSizeGreaterThanVolumeMaxSize, + ErrorCodeDiskSizeLessThanVolumeSize, + ErrorCodeDiskSizeNotGigAligned, + ErrorCodeDuplicateCertificateInfo, + ErrorCodeDuplicateSchedule, + ErrorCodeEndpointNotFound, + ErrorCodeIamnotSupported, + ErrorCodeInitiatorInvalid, + ErrorCodeInitiatorNotFound, + ErrorCodeInternalError, + ErrorCodeInvalidGateway, + ErrorCodeInvalidEndpoint, + ErrorCodeInvalidParameters, + ErrorCodeInvalidSchedule, + ErrorCodeLocalStorageLimitExceeded, + ErrorCodeLunAlreadyAllocated, + ErrorCodeLunInvalid, + ErrorCodeJoinDomainInProgress, + ErrorCodeMaximumContentLengthExceeded, + ErrorCodeMaximumTapeCartridgeCountExceeded, + ErrorCodeMaximumVolumeCountExceeded, + ErrorCodeNetworkConfigurationChanged, + ErrorCodeNoDisksAvailable, + ErrorCodeNotImplemented, + ErrorCodeNotSupported, + ErrorCodeOperationAborted, + ErrorCodeOutdatedGateway, + ErrorCodeParametersNotImplemented, + ErrorCodeRegionInvalid, + ErrorCodeRequestTimeout, + ErrorCodeServiceUnavailable, + ErrorCodeSnapshotDeleted, + ErrorCodeSnapshotIdInvalid, + ErrorCodeSnapshotInProgress, + ErrorCodeSnapshotNotFound, + ErrorCodeSnapshotScheduleNotFound, + ErrorCodeStagingAreaFull, + ErrorCodeStorageFailure, + ErrorCodeTapeCartridgeNotFound, + ErrorCodeTargetAlreadyExists, + ErrorCodeTargetInvalid, + ErrorCodeTargetNotFound, + ErrorCodeUnauthorizedOperation, + ErrorCodeVolumeAlreadyExists, + ErrorCodeVolumeIdInvalid, + ErrorCodeVolumeInUse, + ErrorCodeVolumeNotFound, + ErrorCodeVolumeNotReady, + } +} + // The type of the file share. const ( // FileShareTypeNfs is a FileShareType enum value @@ -18197,6 +20096,14 @@ const ( FileShareTypeSmb = "SMB" ) +// FileShareType_Values returns all elements of the FileShareType enum +func FileShareType_Values() []string { + return []string{ + FileShareTypeNfs, + FileShareTypeSmb, + } +} + const ( // HostEnvironmentVmware is a HostEnvironment enum value HostEnvironmentVmware = "VMWARE" @@ -18214,8 +20121,20 @@ const ( HostEnvironmentOther = "OTHER" ) -// A value that sets the access control list permission for objects in the S3 -// bucket that a file gateway puts objects into. The default value is "private". +// HostEnvironment_Values returns all elements of the HostEnvironment enum +func HostEnvironment_Values() []string { + return []string{ + HostEnvironmentVmware, + HostEnvironmentHyperV, + HostEnvironmentEc2, + HostEnvironmentKvm, + HostEnvironmentOther, + } +} + +// A value that sets the access control list (ACL) permission for objects in +// the S3 bucket that a file gateway puts objects into. The default value is +// private. const ( // ObjectACLPrivate is a ObjectACL enum value ObjectACLPrivate = "private" @@ -18239,6 +20158,55 @@ const ( ObjectACLAwsExecRead = "aws-exec-read" ) +// ObjectACL_Values returns all elements of the ObjectACL enum +func ObjectACL_Values() []string { + return []string{ + ObjectACLPrivate, + ObjectACLPublicRead, + ObjectACLPublicReadWrite, + ObjectACLAuthenticatedRead, + ObjectACLBucketOwnerRead, + ObjectACLBucketOwnerFullControl, + ObjectACLAwsExecRead, + } +} + +const ( + // PoolStatusActive is a PoolStatus enum value + PoolStatusActive = "ACTIVE" + + // PoolStatusDeleted is a PoolStatus enum value + PoolStatusDeleted = "DELETED" +) + +// PoolStatus_Values returns all elements of the PoolStatus enum +func PoolStatus_Values() []string { + return []string{ + PoolStatusActive, + PoolStatusDeleted, + } +} + +const ( + // RetentionLockTypeCompliance is a RetentionLockType enum value + RetentionLockTypeCompliance = "COMPLIANCE" + + // RetentionLockTypeGovernance is a RetentionLockType enum value + RetentionLockTypeGovernance = "GOVERNANCE" + + // RetentionLockTypeNone is a RetentionLockType enum value + RetentionLockTypeNone = "NONE" +) + +// RetentionLockType_Values returns all elements of the RetentionLockType enum +func RetentionLockType_Values() []string { + return []string{ + RetentionLockTypeCompliance, + RetentionLockTypeGovernance, + RetentionLockTypeNone, + } +} + const ( // SMBSecurityStrategyClientSpecified is a SMBSecurityStrategy enum value SMBSecurityStrategyClientSpecified = "ClientSpecified" @@ -18249,3 +20217,28 @@ const ( // SMBSecurityStrategyMandatoryEncryption is a SMBSecurityStrategy enum value SMBSecurityStrategyMandatoryEncryption = "MandatoryEncryption" ) + +// SMBSecurityStrategy_Values returns all elements of the SMBSecurityStrategy enum +func SMBSecurityStrategy_Values() []string { + return []string{ + SMBSecurityStrategyClientSpecified, + SMBSecurityStrategyMandatorySigning, + SMBSecurityStrategyMandatoryEncryption, + } +} + +const ( + // TapeStorageClassDeepArchive is a TapeStorageClass enum value + TapeStorageClassDeepArchive = "DEEP_ARCHIVE" + + // TapeStorageClassGlacier is a TapeStorageClass enum value + TapeStorageClassGlacier = "GLACIER" +) + +// TapeStorageClass_Values returns all elements of the TapeStorageClass enum +func TapeStorageClass_Values() []string { + return []string{ + TapeStorageClassDeepArchive, + TapeStorageClassGlacier, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go index c3eb54cd5..a6a9f896d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/doc.go @@ -7,20 +7,20 @@ // appliance with cloud-based storage to provide seamless and secure integration // between an organization's on-premises IT environment and the AWS storage // infrastructure. The service enables you to securely upload data to the AWS -// cloud for cost effective backup and rapid disaster recovery. +// Cloud for cost effective backup and rapid disaster recovery. // // Use the following links to get started using the AWS Storage Gateway Service // API Reference: // -// * AWS Storage Gateway Required Request Headers (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewayHTTPRequestsHeaders): +// * AWS Storage Gateway required request headers (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewayHTTPRequestsHeaders): // Describes the required headers that you must send with every POST request // to AWS Storage Gateway. // -// * Signing Requests (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewaySigningRequests): +// * Signing requests (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#AWSStorageGatewaySigningRequests): // AWS Storage Gateway requires that you authenticate every request you send; // this topic describes how sign such a request. // -// * Error Responses (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#APIErrorResponses): +// * Error responses (https://docs.aws.amazon.com/storagegateway/latest/userguide/AWSStorageGatewayAPI.html#APIErrorResponses): // Provides reference information about AWS Storage Gateway errors. // // * Operations in AWS Storage Gateway (https://docs.aws.amazon.com/storagegateway/latest/APIReference/API_Operations.html): @@ -28,7 +28,7 @@ // their request parameters, response elements, possible errors, and examples // of requests and responses. // -// * AWS Storage Gateway Regions and Endpoints: (http://docs.aws.amazon.com/general/latest/gr/rande.html#sg_region) +// * AWS Storage Gateway endpoints and quotas (https://docs.aws.amazon.com/general/latest/gr/sg.html): // Provides a list of each AWS Region and the endpoints available for use // with AWS Storage Gateway. // @@ -44,7 +44,7 @@ // volumes and snapshots will be created with a 17-character string. Starting // in April 2016, you will be able to use these longer IDs so you can test your // systems with the new format. For more information, see Longer EC2 and EBS -// Resource IDs (https://aws.amazon.com/ec2/faqs/#longer-ids). +// resource IDs (http://aws.amazon.com/ec2/faqs/#longer-ids). // // For example, a volume Amazon Resource Name (ARN) with the longer volume ID // format looks like the following: @@ -54,7 +54,7 @@ // A snapshot ID with the longer ID format looks like the following: snap-78e226633445566ee. // // For more information, see Announcement: Heads-up – Longer AWS Storage Gateway -// volume and snapshot IDs coming in 2016 (https://forums.aws.amazon.com/ann.jspa?annID=3557). +// volume and snapshot IDs coming in 2016 (http://forums.aws.amazon.com/ann.jspa?annID=3557). // // See https://docs.aws.amazon.com/goto/WebAPI/storagegateway-2013-06-30 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go index 8409c39da..0e86cd183 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 7f60d4aa1..bfc4372f9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -207,6 +207,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) @@ -626,7 +630,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) // and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). // Walk through the process of authenticating through Login with Amazon, // Facebook, or Google, getting temporary security credentials, and then // using those credentials to make a request to AWS. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go index fcb720dca..cb1debbaa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -3,87 +3,11 @@ // Package sts provides the client and types for making API // requests to AWS Security Token Service. // -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// By default, AWS Security Token Service (STS) is available as a global service, -// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. -// Global requests map to the US East (N. Virginia) region. AWS recommends using -// Regional AWS STS endpoints instead of the global endpoint to reduce latency, -// build in redundancy, and increase session token validity. For more information, -// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// Most AWS Regions are enabled for operations in all AWS services by default. -// Those Regions are automatically activated for use with AWS STS. Some Regions, -// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more -// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) -// in the AWS General Reference. When you enable these AWS Regions, they are -// automatically activated for use with AWS STS. You cannot activate the STS -// endpoint for a Region that is disabled. Tokens that are valid in all AWS -// Regions are longer than tokens that are valid in Regions that are enabled -// by default. Changing this setting might affect existing systems where you -// temporarily store tokens. For more information, see Managing Global Endpoint -// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) -// in the IAM User Guide. -// -// After you activate a Region for use with AWS STS, you can direct AWS STS -// API calls to that Region. AWS STS recommends that you provide both the Region -// and endpoint when you make calls to a Regional endpoint. You can provide -// the Region alone for manually enabled Regions, such as Asia Pacific (Hong -// Kong). In this case, the calls are directed to the STS Regional endpoint. -// However, if you provide the Region alone for Regions enabled by default, -// the calls are directed to the global endpoint of https://sts.amazonaws.com. -// -// To view the list of AWS STS endpoints and whether they are active by default, -// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) -// in the IAM User Guide. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. -// -// If you activate AWS STS endpoints in Regions other than the default global -// endpoint, then you must also turn on CloudTrail logging in those Regions. -// This is necessary to record any AWS STS API calls that are made in those -// Regions. For more information, see Turning On CloudTrail in Additional Regions -// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) -// in the AWS CloudTrail User Guide. -// -// AWS Security Token Service (STS) is a global service with a single endpoint -// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls -// to a global service. However, because this endpoint is physically located -// in the US East (N. Virginia) Region, your logs list us-east-1 as the event -// Region. CloudTrail does not write these logs to the US East (Ohio) Region -// unless you choose to include global service logs in that Region. CloudTrail -// writes calls to all Regional endpoints to their respective Regions. For example, -// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) -// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU -// (Frankfurt) Region. -// -// To learn more about CloudTrail, including how to turn it on and find your -// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// AWS Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for AWS Identity and Access Management (IAM) users or for users +// that you authenticate (federated users). This guide provides descriptions +// of the STS API. For more information about using this service, see Temporary +// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). // // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index c08a01a31..d34a68553 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/api.go b/vendor/github.com/aws/aws-sdk-go/service/swf/api.go index b2e19132d..746d05b11 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/api.go @@ -7143,8 +7143,8 @@ func (s *DecisionTaskTimedOutEventAttributes) SetTimeoutType(v string) *Decision // If these parameters aren't set and no default parameters were defined in // the workflow type, this error is displayed. type DefaultUndefinedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7161,17 +7161,17 @@ func (s DefaultUndefinedFault) GoString() string { func newErrorDefaultUndefinedFault(v protocol.ResponseMetadata) error { return &DefaultUndefinedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DefaultUndefinedFault) Code() string { +func (s *DefaultUndefinedFault) Code() string { return "DefaultUndefinedFault" } // Message returns the exception's message. -func (s DefaultUndefinedFault) Message() string { +func (s *DefaultUndefinedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7179,22 +7179,22 @@ func (s DefaultUndefinedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DefaultUndefinedFault) OrigErr() error { +func (s *DefaultUndefinedFault) OrigErr() error { return nil } -func (s DefaultUndefinedFault) Error() string { +func (s *DefaultUndefinedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DefaultUndefinedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DefaultUndefinedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DefaultUndefinedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *DefaultUndefinedFault) RequestID() string { + return s.RespMetadata.RequestID } type DeprecateActivityTypeInput struct { @@ -7830,8 +7830,8 @@ func (s *DescribeWorkflowTypeOutput) SetTypeInfo(v *WorkflowTypeInfo) *DescribeW // registering a domain that is either already registered or deprecated, or // if you undeprecate a domain that is currently registered. type DomainAlreadyExistsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -7849,17 +7849,17 @@ func (s DomainAlreadyExistsFault) GoString() string { func newErrorDomainAlreadyExistsFault(v protocol.ResponseMetadata) error { return &DomainAlreadyExistsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DomainAlreadyExistsFault) Code() string { +func (s *DomainAlreadyExistsFault) Code() string { return "DomainAlreadyExistsFault" } // Message returns the exception's message. -func (s DomainAlreadyExistsFault) Message() string { +func (s *DomainAlreadyExistsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7867,22 +7867,22 @@ func (s DomainAlreadyExistsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DomainAlreadyExistsFault) OrigErr() error { +func (s *DomainAlreadyExistsFault) OrigErr() error { return nil } -func (s DomainAlreadyExistsFault) Error() string { +func (s *DomainAlreadyExistsFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DomainAlreadyExistsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DomainAlreadyExistsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DomainAlreadyExistsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *DomainAlreadyExistsFault) RequestID() string { + return s.RespMetadata.RequestID } // Contains the configuration settings of a domain. @@ -7913,8 +7913,8 @@ func (s *DomainConfiguration) SetWorkflowExecutionRetentionPeriodInDays(v string // Returned when the specified domain has been deprecated. type DomainDeprecatedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -7932,17 +7932,17 @@ func (s DomainDeprecatedFault) GoString() string { func newErrorDomainDeprecatedFault(v protocol.ResponseMetadata) error { return &DomainDeprecatedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DomainDeprecatedFault) Code() string { +func (s *DomainDeprecatedFault) Code() string { return "DomainDeprecatedFault" } // Message returns the exception's message. -func (s DomainDeprecatedFault) Message() string { +func (s *DomainDeprecatedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7950,22 +7950,22 @@ func (s DomainDeprecatedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DomainDeprecatedFault) OrigErr() error { +func (s *DomainDeprecatedFault) OrigErr() error { return nil } -func (s DomainDeprecatedFault) Error() string { +func (s *DomainDeprecatedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DomainDeprecatedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DomainDeprecatedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DomainDeprecatedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *DomainDeprecatedFault) RequestID() string { + return s.RespMetadata.RequestID } // Contains general information about a domain. @@ -9438,8 +9438,8 @@ func (s *LambdaFunctionTimedOutEventAttributes) SetTimeoutType(v string) *Lambda // To address this fault you should either clean up unused resources or increase // the limit by contacting AWS. type LimitExceededFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -9457,17 +9457,17 @@ func (s LimitExceededFault) GoString() string { func newErrorLimitExceededFault(v protocol.ResponseMetadata) error { return &LimitExceededFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededFault) Code() string { +func (s *LimitExceededFault) Code() string { return "LimitExceededFault" } // Message returns the exception's message. -func (s LimitExceededFault) Message() string { +func (s *LimitExceededFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9475,22 +9475,22 @@ func (s LimitExceededFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededFault) OrigErr() error { +func (s *LimitExceededFault) OrigErr() error { return nil } -func (s LimitExceededFault) Error() string { +func (s *LimitExceededFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededFault) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededFault) RequestID() string { + return s.RespMetadata.RequestID } type ListActivityTypesInput struct { @@ -10352,8 +10352,8 @@ func (s *MarkerRecordedEventAttributes) SetMarkerName(v string) *MarkerRecordedE // Returned when the caller doesn't have sufficient permissions to invoke the // action. type OperationNotPermittedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -10371,17 +10371,17 @@ func (s OperationNotPermittedFault) GoString() string { func newErrorOperationNotPermittedFault(v protocol.ResponseMetadata) error { return &OperationNotPermittedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotPermittedFault) Code() string { +func (s *OperationNotPermittedFault) Code() string { return "OperationNotPermittedFault" } // Message returns the exception's message. -func (s OperationNotPermittedFault) Message() string { +func (s *OperationNotPermittedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10389,22 +10389,22 @@ func (s OperationNotPermittedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotPermittedFault) OrigErr() error { +func (s *OperationNotPermittedFault) OrigErr() error { return nil } -func (s OperationNotPermittedFault) Error() string { +func (s *OperationNotPermittedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotPermittedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotPermittedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotPermittedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotPermittedFault) RequestID() string { + return s.RespMetadata.RequestID } // Contains the count of tasks in a task list. @@ -14603,8 +14603,8 @@ func (s *TimerStartedEventAttributes) SetTimerId(v string) *TimerStartedEventAtt // You've exceeded the number of tags allowed for a domain. type TooManyTagsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14621,17 +14621,17 @@ func (s TooManyTagsFault) GoString() string { func newErrorTooManyTagsFault(v protocol.ResponseMetadata) error { return &TooManyTagsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsFault) Code() string { +func (s *TooManyTagsFault) Code() string { return "TooManyTagsFault" } // Message returns the exception's message. -func (s TooManyTagsFault) Message() string { +func (s *TooManyTagsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14639,30 +14639,30 @@ func (s TooManyTagsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsFault) OrigErr() error { +func (s *TooManyTagsFault) OrigErr() error { return nil } -func (s TooManyTagsFault) Error() string { +func (s *TooManyTagsFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsFault) RequestID() string { + return s.RespMetadata.RequestID } // Returned if the type already exists in the specified domain. You may get // this fault if you are registering a type that is either already registered // or deprecated, or if you undeprecate a type that is currently registered. type TypeAlreadyExistsFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -14680,17 +14680,17 @@ func (s TypeAlreadyExistsFault) GoString() string { func newErrorTypeAlreadyExistsFault(v protocol.ResponseMetadata) error { return &TypeAlreadyExistsFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TypeAlreadyExistsFault) Code() string { +func (s *TypeAlreadyExistsFault) Code() string { return "TypeAlreadyExistsFault" } // Message returns the exception's message. -func (s TypeAlreadyExistsFault) Message() string { +func (s *TypeAlreadyExistsFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14698,28 +14698,28 @@ func (s TypeAlreadyExistsFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TypeAlreadyExistsFault) OrigErr() error { +func (s *TypeAlreadyExistsFault) OrigErr() error { return nil } -func (s TypeAlreadyExistsFault) Error() string { +func (s *TypeAlreadyExistsFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TypeAlreadyExistsFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TypeAlreadyExistsFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TypeAlreadyExistsFault) RequestID() string { - return s.respMetadata.RequestID +func (s *TypeAlreadyExistsFault) RequestID() string { + return s.RespMetadata.RequestID } // Returned when the specified activity or workflow type was already deprecated. type TypeDeprecatedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -14737,17 +14737,17 @@ func (s TypeDeprecatedFault) GoString() string { func newErrorTypeDeprecatedFault(v protocol.ResponseMetadata) error { return &TypeDeprecatedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TypeDeprecatedFault) Code() string { +func (s *TypeDeprecatedFault) Code() string { return "TypeDeprecatedFault" } // Message returns the exception's message. -func (s TypeDeprecatedFault) Message() string { +func (s *TypeDeprecatedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14755,22 +14755,22 @@ func (s TypeDeprecatedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TypeDeprecatedFault) OrigErr() error { +func (s *TypeDeprecatedFault) OrigErr() error { return nil } -func (s TypeDeprecatedFault) Error() string { +func (s *TypeDeprecatedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TypeDeprecatedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TypeDeprecatedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TypeDeprecatedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *TypeDeprecatedFault) RequestID() string { + return s.RespMetadata.RequestID } type UndeprecateActivityTypeInput struct { @@ -14980,8 +14980,8 @@ func (s UndeprecateWorkflowTypeOutput) GoString() string { // operation (region or domain). This could happen if the named resource was // never created or is no longer available for this operation. type UnknownResourceFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -14999,17 +14999,17 @@ func (s UnknownResourceFault) GoString() string { func newErrorUnknownResourceFault(v protocol.ResponseMetadata) error { return &UnknownResourceFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnknownResourceFault) Code() string { +func (s *UnknownResourceFault) Code() string { return "UnknownResourceFault" } // Message returns the exception's message. -func (s UnknownResourceFault) Message() string { +func (s *UnknownResourceFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15017,22 +15017,22 @@ func (s UnknownResourceFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnknownResourceFault) OrigErr() error { +func (s *UnknownResourceFault) OrigErr() error { return nil } -func (s UnknownResourceFault) Error() string { +func (s *UnknownResourceFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnknownResourceFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnknownResourceFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnknownResourceFault) RequestID() string { - return s.respMetadata.RequestID +func (s *UnknownResourceFault) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -15166,8 +15166,8 @@ func (s *WorkflowExecution) SetWorkflowId(v string) *WorkflowExecution { // Returned by StartWorkflowExecution when an open execution with the same workflowId // is already running in the specified domain. type WorkflowExecutionAlreadyStartedFault struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // A description that may help with diagnosing the cause of the fault. Message_ *string `locationName:"message" type:"string"` @@ -15185,17 +15185,17 @@ func (s WorkflowExecutionAlreadyStartedFault) GoString() string { func newErrorWorkflowExecutionAlreadyStartedFault(v protocol.ResponseMetadata) error { return &WorkflowExecutionAlreadyStartedFault{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WorkflowExecutionAlreadyStartedFault) Code() string { +func (s *WorkflowExecutionAlreadyStartedFault) Code() string { return "WorkflowExecutionAlreadyStartedFault" } // Message returns the exception's message. -func (s WorkflowExecutionAlreadyStartedFault) Message() string { +func (s *WorkflowExecutionAlreadyStartedFault) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -15203,22 +15203,22 @@ func (s WorkflowExecutionAlreadyStartedFault) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WorkflowExecutionAlreadyStartedFault) OrigErr() error { +func (s *WorkflowExecutionAlreadyStartedFault) OrigErr() error { return nil } -func (s WorkflowExecutionAlreadyStartedFault) Error() string { +func (s *WorkflowExecutionAlreadyStartedFault) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WorkflowExecutionAlreadyStartedFault) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WorkflowExecutionAlreadyStartedFault) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WorkflowExecutionAlreadyStartedFault) RequestID() string { - return s.respMetadata.RequestID +func (s *WorkflowExecutionAlreadyStartedFault) RequestID() string { + return s.RespMetadata.RequestID } // Provides the details of the WorkflowExecutionCancelRequested event. @@ -16607,6 +16607,16 @@ const ( ActivityTaskTimeoutTypeHeartbeat = "HEARTBEAT" ) +// ActivityTaskTimeoutType_Values returns all elements of the ActivityTaskTimeoutType enum +func ActivityTaskTimeoutType_Values() []string { + return []string{ + ActivityTaskTimeoutTypeStartToClose, + ActivityTaskTimeoutTypeScheduleToStart, + ActivityTaskTimeoutTypeScheduleToClose, + ActivityTaskTimeoutTypeHeartbeat, + } +} + const ( // CancelTimerFailedCauseTimerIdUnknown is a CancelTimerFailedCause enum value CancelTimerFailedCauseTimerIdUnknown = "TIMER_ID_UNKNOWN" @@ -16615,6 +16625,14 @@ const ( CancelTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// CancelTimerFailedCause_Values returns all elements of the CancelTimerFailedCause enum +func CancelTimerFailedCause_Values() []string { + return []string{ + CancelTimerFailedCauseTimerIdUnknown, + CancelTimerFailedCauseOperationNotPermitted, + } +} + const ( // CancelWorkflowExecutionFailedCauseUnhandledDecision is a CancelWorkflowExecutionFailedCause enum value CancelWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" @@ -16623,6 +16641,14 @@ const ( CancelWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// CancelWorkflowExecutionFailedCause_Values returns all elements of the CancelWorkflowExecutionFailedCause enum +func CancelWorkflowExecutionFailedCause_Values() []string { + return []string{ + CancelWorkflowExecutionFailedCauseUnhandledDecision, + CancelWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // ChildPolicyTerminate is a ChildPolicy enum value ChildPolicyTerminate = "TERMINATE" @@ -16634,6 +16660,15 @@ const ( ChildPolicyAbandon = "ABANDON" ) +// ChildPolicy_Values returns all elements of the ChildPolicy enum +func ChildPolicy_Values() []string { + return []string{ + ChildPolicyTerminate, + ChildPolicyRequestCancel, + ChildPolicyAbandon, + } +} + const ( // CloseStatusCompleted is a CloseStatus enum value CloseStatusCompleted = "COMPLETED" @@ -16654,6 +16689,18 @@ const ( CloseStatusTimedOut = "TIMED_OUT" ) +// CloseStatus_Values returns all elements of the CloseStatus enum +func CloseStatus_Values() []string { + return []string{ + CloseStatusCompleted, + CloseStatusFailed, + CloseStatusCanceled, + CloseStatusTerminated, + CloseStatusContinuedAsNew, + CloseStatusTimedOut, + } +} + const ( // CompleteWorkflowExecutionFailedCauseUnhandledDecision is a CompleteWorkflowExecutionFailedCause enum value CompleteWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" @@ -16662,6 +16709,14 @@ const ( CompleteWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// CompleteWorkflowExecutionFailedCause_Values returns all elements of the CompleteWorkflowExecutionFailedCause enum +func CompleteWorkflowExecutionFailedCause_Values() []string { + return []string{ + CompleteWorkflowExecutionFailedCauseUnhandledDecision, + CompleteWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision is a ContinueAsNewWorkflowExecutionFailedCause enum value ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" @@ -16691,11 +16746,33 @@ const ( ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// ContinueAsNewWorkflowExecutionFailedCause_Values returns all elements of the ContinueAsNewWorkflowExecutionFailedCause enum +func ContinueAsNewWorkflowExecutionFailedCause_Values() []string { + return []string{ + ContinueAsNewWorkflowExecutionFailedCauseUnhandledDecision, + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDeprecated, + ContinueAsNewWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist, + ContinueAsNewWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined, + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined, + ContinueAsNewWorkflowExecutionFailedCauseDefaultTaskListUndefined, + ContinueAsNewWorkflowExecutionFailedCauseDefaultChildPolicyUndefined, + ContinueAsNewWorkflowExecutionFailedCauseContinueAsNewWorkflowExecutionRateExceeded, + ContinueAsNewWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // DecisionTaskTimeoutTypeStartToClose is a DecisionTaskTimeoutType enum value DecisionTaskTimeoutTypeStartToClose = "START_TO_CLOSE" ) +// DecisionTaskTimeoutType_Values returns all elements of the DecisionTaskTimeoutType enum +func DecisionTaskTimeoutType_Values() []string { + return []string{ + DecisionTaskTimeoutTypeStartToClose, + } +} + const ( // DecisionTypeScheduleActivityTask is a DecisionType enum value DecisionTypeScheduleActivityTask = "ScheduleActivityTask" @@ -16737,6 +16814,25 @@ const ( DecisionTypeScheduleLambdaFunction = "ScheduleLambdaFunction" ) +// DecisionType_Values returns all elements of the DecisionType enum +func DecisionType_Values() []string { + return []string{ + DecisionTypeScheduleActivityTask, + DecisionTypeRequestCancelActivityTask, + DecisionTypeCompleteWorkflowExecution, + DecisionTypeFailWorkflowExecution, + DecisionTypeCancelWorkflowExecution, + DecisionTypeContinueAsNewWorkflowExecution, + DecisionTypeRecordMarker, + DecisionTypeStartTimer, + DecisionTypeCancelTimer, + DecisionTypeSignalExternalWorkflowExecution, + DecisionTypeRequestCancelExternalWorkflowExecution, + DecisionTypeStartChildWorkflowExecution, + DecisionTypeScheduleLambdaFunction, + } +} + const ( // EventTypeWorkflowExecutionStarted is a EventType enum value EventTypeWorkflowExecutionStarted = "WorkflowExecutionStarted" @@ -16901,6 +16997,66 @@ const ( EventTypeStartLambdaFunctionFailed = "StartLambdaFunctionFailed" ) +// EventType_Values returns all elements of the EventType enum +func EventType_Values() []string { + return []string{ + EventTypeWorkflowExecutionStarted, + EventTypeWorkflowExecutionCancelRequested, + EventTypeWorkflowExecutionCompleted, + EventTypeCompleteWorkflowExecutionFailed, + EventTypeWorkflowExecutionFailed, + EventTypeFailWorkflowExecutionFailed, + EventTypeWorkflowExecutionTimedOut, + EventTypeWorkflowExecutionCanceled, + EventTypeCancelWorkflowExecutionFailed, + EventTypeWorkflowExecutionContinuedAsNew, + EventTypeContinueAsNewWorkflowExecutionFailed, + EventTypeWorkflowExecutionTerminated, + EventTypeDecisionTaskScheduled, + EventTypeDecisionTaskStarted, + EventTypeDecisionTaskCompleted, + EventTypeDecisionTaskTimedOut, + EventTypeActivityTaskScheduled, + EventTypeScheduleActivityTaskFailed, + EventTypeActivityTaskStarted, + EventTypeActivityTaskCompleted, + EventTypeActivityTaskFailed, + EventTypeActivityTaskTimedOut, + EventTypeActivityTaskCanceled, + EventTypeActivityTaskCancelRequested, + EventTypeRequestCancelActivityTaskFailed, + EventTypeWorkflowExecutionSignaled, + EventTypeMarkerRecorded, + EventTypeRecordMarkerFailed, + EventTypeTimerStarted, + EventTypeStartTimerFailed, + EventTypeTimerFired, + EventTypeTimerCanceled, + EventTypeCancelTimerFailed, + EventTypeStartChildWorkflowExecutionInitiated, + EventTypeStartChildWorkflowExecutionFailed, + EventTypeChildWorkflowExecutionStarted, + EventTypeChildWorkflowExecutionCompleted, + EventTypeChildWorkflowExecutionFailed, + EventTypeChildWorkflowExecutionTimedOut, + EventTypeChildWorkflowExecutionCanceled, + EventTypeChildWorkflowExecutionTerminated, + EventTypeSignalExternalWorkflowExecutionInitiated, + EventTypeSignalExternalWorkflowExecutionFailed, + EventTypeExternalWorkflowExecutionSignaled, + EventTypeRequestCancelExternalWorkflowExecutionInitiated, + EventTypeRequestCancelExternalWorkflowExecutionFailed, + EventTypeExternalWorkflowExecutionCancelRequested, + EventTypeLambdaFunctionScheduled, + EventTypeLambdaFunctionStarted, + EventTypeLambdaFunctionCompleted, + EventTypeLambdaFunctionFailed, + EventTypeLambdaFunctionTimedOut, + EventTypeScheduleLambdaFunctionFailed, + EventTypeStartLambdaFunctionFailed, + } +} + const ( // ExecutionStatusOpen is a ExecutionStatus enum value ExecutionStatusOpen = "OPEN" @@ -16909,6 +17065,14 @@ const ( ExecutionStatusClosed = "CLOSED" ) +// ExecutionStatus_Values returns all elements of the ExecutionStatus enum +func ExecutionStatus_Values() []string { + return []string{ + ExecutionStatusOpen, + ExecutionStatusClosed, + } +} + const ( // FailWorkflowExecutionFailedCauseUnhandledDecision is a FailWorkflowExecutionFailedCause enum value FailWorkflowExecutionFailedCauseUnhandledDecision = "UNHANDLED_DECISION" @@ -16917,16 +17081,38 @@ const ( FailWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// FailWorkflowExecutionFailedCause_Values returns all elements of the FailWorkflowExecutionFailedCause enum +func FailWorkflowExecutionFailedCause_Values() []string { + return []string{ + FailWorkflowExecutionFailedCauseUnhandledDecision, + FailWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // LambdaFunctionTimeoutTypeStartToClose is a LambdaFunctionTimeoutType enum value LambdaFunctionTimeoutTypeStartToClose = "START_TO_CLOSE" ) +// LambdaFunctionTimeoutType_Values returns all elements of the LambdaFunctionTimeoutType enum +func LambdaFunctionTimeoutType_Values() []string { + return []string{ + LambdaFunctionTimeoutTypeStartToClose, + } +} + const ( // RecordMarkerFailedCauseOperationNotPermitted is a RecordMarkerFailedCause enum value RecordMarkerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// RecordMarkerFailedCause_Values returns all elements of the RecordMarkerFailedCause enum +func RecordMarkerFailedCause_Values() []string { + return []string{ + RecordMarkerFailedCauseOperationNotPermitted, + } +} + const ( // RegistrationStatusRegistered is a RegistrationStatus enum value RegistrationStatusRegistered = "REGISTERED" @@ -16935,6 +17121,14 @@ const ( RegistrationStatusDeprecated = "DEPRECATED" ) +// RegistrationStatus_Values returns all elements of the RegistrationStatus enum +func RegistrationStatus_Values() []string { + return []string{ + RegistrationStatusRegistered, + RegistrationStatusDeprecated, + } +} + const ( // RequestCancelActivityTaskFailedCauseActivityIdUnknown is a RequestCancelActivityTaskFailedCause enum value RequestCancelActivityTaskFailedCauseActivityIdUnknown = "ACTIVITY_ID_UNKNOWN" @@ -16943,6 +17137,14 @@ const ( RequestCancelActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// RequestCancelActivityTaskFailedCause_Values returns all elements of the RequestCancelActivityTaskFailedCause enum +func RequestCancelActivityTaskFailedCause_Values() []string { + return []string{ + RequestCancelActivityTaskFailedCauseActivityIdUnknown, + RequestCancelActivityTaskFailedCauseOperationNotPermitted, + } +} + const ( // RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution is a RequestCancelExternalWorkflowExecutionFailedCause enum value RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" @@ -16954,6 +17156,15 @@ const ( RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// RequestCancelExternalWorkflowExecutionFailedCause_Values returns all elements of the RequestCancelExternalWorkflowExecutionFailedCause enum +func RequestCancelExternalWorkflowExecutionFailedCause_Values() []string { + return []string{ + RequestCancelExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution, + RequestCancelExternalWorkflowExecutionFailedCauseRequestCancelExternalWorkflowExecutionRateExceeded, + RequestCancelExternalWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // ScheduleActivityTaskFailedCauseActivityTypeDeprecated is a ScheduleActivityTaskFailedCause enum value ScheduleActivityTaskFailedCauseActivityTypeDeprecated = "ACTIVITY_TYPE_DEPRECATED" @@ -16989,6 +17200,23 @@ const ( ScheduleActivityTaskFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// ScheduleActivityTaskFailedCause_Values returns all elements of the ScheduleActivityTaskFailedCause enum +func ScheduleActivityTaskFailedCause_Values() []string { + return []string{ + ScheduleActivityTaskFailedCauseActivityTypeDeprecated, + ScheduleActivityTaskFailedCauseActivityTypeDoesNotExist, + ScheduleActivityTaskFailedCauseActivityIdAlreadyInUse, + ScheduleActivityTaskFailedCauseOpenActivitiesLimitExceeded, + ScheduleActivityTaskFailedCauseActivityCreationRateExceeded, + ScheduleActivityTaskFailedCauseDefaultScheduleToCloseTimeoutUndefined, + ScheduleActivityTaskFailedCauseDefaultTaskListUndefined, + ScheduleActivityTaskFailedCauseDefaultScheduleToStartTimeoutUndefined, + ScheduleActivityTaskFailedCauseDefaultStartToCloseTimeoutUndefined, + ScheduleActivityTaskFailedCauseDefaultHeartbeatTimeoutUndefined, + ScheduleActivityTaskFailedCauseOperationNotPermitted, + } +} + const ( // ScheduleLambdaFunctionFailedCauseIdAlreadyInUse is a ScheduleLambdaFunctionFailedCause enum value ScheduleLambdaFunctionFailedCauseIdAlreadyInUse = "ID_ALREADY_IN_USE" @@ -17003,6 +17231,16 @@ const ( ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion = "LAMBDA_SERVICE_NOT_AVAILABLE_IN_REGION" ) +// ScheduleLambdaFunctionFailedCause_Values returns all elements of the ScheduleLambdaFunctionFailedCause enum +func ScheduleLambdaFunctionFailedCause_Values() []string { + return []string{ + ScheduleLambdaFunctionFailedCauseIdAlreadyInUse, + ScheduleLambdaFunctionFailedCauseOpenLambdaFunctionsLimitExceeded, + ScheduleLambdaFunctionFailedCauseLambdaFunctionCreationRateExceeded, + ScheduleLambdaFunctionFailedCauseLambdaServiceNotAvailableInRegion, + } +} + const ( // SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution is a SignalExternalWorkflowExecutionFailedCause enum value SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution = "UNKNOWN_EXTERNAL_WORKFLOW_EXECUTION" @@ -17014,6 +17252,15 @@ const ( SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// SignalExternalWorkflowExecutionFailedCause_Values returns all elements of the SignalExternalWorkflowExecutionFailedCause enum +func SignalExternalWorkflowExecutionFailedCause_Values() []string { + return []string{ + SignalExternalWorkflowExecutionFailedCauseUnknownExternalWorkflowExecution, + SignalExternalWorkflowExecutionFailedCauseSignalExternalWorkflowExecutionRateExceeded, + SignalExternalWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist is a StartChildWorkflowExecutionFailedCause enum value StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist = "WORKFLOW_TYPE_DOES_NOT_EXIST" @@ -17049,11 +17296,35 @@ const ( StartChildWorkflowExecutionFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// StartChildWorkflowExecutionFailedCause_Values returns all elements of the StartChildWorkflowExecutionFailedCause enum +func StartChildWorkflowExecutionFailedCause_Values() []string { + return []string{ + StartChildWorkflowExecutionFailedCauseWorkflowTypeDoesNotExist, + StartChildWorkflowExecutionFailedCauseWorkflowTypeDeprecated, + StartChildWorkflowExecutionFailedCauseOpenChildrenLimitExceeded, + StartChildWorkflowExecutionFailedCauseOpenWorkflowsLimitExceeded, + StartChildWorkflowExecutionFailedCauseChildCreationRateExceeded, + StartChildWorkflowExecutionFailedCauseWorkflowAlreadyRunning, + StartChildWorkflowExecutionFailedCauseDefaultExecutionStartToCloseTimeoutUndefined, + StartChildWorkflowExecutionFailedCauseDefaultTaskListUndefined, + StartChildWorkflowExecutionFailedCauseDefaultTaskStartToCloseTimeoutUndefined, + StartChildWorkflowExecutionFailedCauseDefaultChildPolicyUndefined, + StartChildWorkflowExecutionFailedCauseOperationNotPermitted, + } +} + const ( // StartLambdaFunctionFailedCauseAssumeRoleFailed is a StartLambdaFunctionFailedCause enum value StartLambdaFunctionFailedCauseAssumeRoleFailed = "ASSUME_ROLE_FAILED" ) +// StartLambdaFunctionFailedCause_Values returns all elements of the StartLambdaFunctionFailedCause enum +func StartLambdaFunctionFailedCause_Values() []string { + return []string{ + StartLambdaFunctionFailedCauseAssumeRoleFailed, + } +} + const ( // StartTimerFailedCauseTimerIdAlreadyInUse is a StartTimerFailedCause enum value StartTimerFailedCauseTimerIdAlreadyInUse = "TIMER_ID_ALREADY_IN_USE" @@ -17068,11 +17339,28 @@ const ( StartTimerFailedCauseOperationNotPermitted = "OPERATION_NOT_PERMITTED" ) +// StartTimerFailedCause_Values returns all elements of the StartTimerFailedCause enum +func StartTimerFailedCause_Values() []string { + return []string{ + StartTimerFailedCauseTimerIdAlreadyInUse, + StartTimerFailedCauseOpenTimersLimitExceeded, + StartTimerFailedCauseTimerCreationRateExceeded, + StartTimerFailedCauseOperationNotPermitted, + } +} + const ( // WorkflowExecutionCancelRequestedCauseChildPolicyApplied is a WorkflowExecutionCancelRequestedCause enum value WorkflowExecutionCancelRequestedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" ) +// WorkflowExecutionCancelRequestedCause_Values returns all elements of the WorkflowExecutionCancelRequestedCause enum +func WorkflowExecutionCancelRequestedCause_Values() []string { + return []string{ + WorkflowExecutionCancelRequestedCauseChildPolicyApplied, + } +} + const ( // WorkflowExecutionTerminatedCauseChildPolicyApplied is a WorkflowExecutionTerminatedCause enum value WorkflowExecutionTerminatedCauseChildPolicyApplied = "CHILD_POLICY_APPLIED" @@ -17084,7 +17372,23 @@ const ( WorkflowExecutionTerminatedCauseOperatorInitiated = "OPERATOR_INITIATED" ) +// WorkflowExecutionTerminatedCause_Values returns all elements of the WorkflowExecutionTerminatedCause enum +func WorkflowExecutionTerminatedCause_Values() []string { + return []string{ + WorkflowExecutionTerminatedCauseChildPolicyApplied, + WorkflowExecutionTerminatedCauseEventLimitExceeded, + WorkflowExecutionTerminatedCauseOperatorInitiated, + } +} + const ( // WorkflowExecutionTimeoutTypeStartToClose is a WorkflowExecutionTimeoutType enum value WorkflowExecutionTimeoutTypeStartToClose = "START_TO_CLOSE" ) + +// WorkflowExecutionTimeoutType_Values returns all elements of the WorkflowExecutionTimeoutType enum +func WorkflowExecutionTimeoutType_Values() []string { + return []string{ + WorkflowExecutionTimeoutTypeStartToClose, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go index ec7fd1e82..a6ee9188d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go index eba6b2235..3ff005236 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go @@ -55,26 +55,29 @@ func (c *Transfer) CreateServerRequest(input *CreateServerInput) (req *request.R return } -// CreateServer API operation for AWS Transfer for SFTP. +// CreateServer API operation for AWS Transfer Family. // -// Instantiates an autoscaling virtual server based on Secure File Transfer -// Protocol (SFTP) in AWS. When you make updates to your server or when you -// work with users, use the service-generated ServerId property that is assigned -// to the newly created server. +// Instantiates an autoscaling virtual server based on the selected file transfer +// protocol in AWS. When you make updates to your file transfer protocol-enabled +// server or when you work with users, use the service-generated ServerId property +// that is assigned to the newly created server. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation CreateServer for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You do not have sufficient access to perform this action. +// // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -83,6 +86,11 @@ func (c *Transfer) CreateServerRequest(input *CreateServerInput) (req *request.R // * ResourceExistsException // The requested resource does not exist. // +// * ThrottlingException +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/CreateServer func (c *Transfer) CreateServer(input *CreateServerInput) (*CreateServerOutput, error) { req, out := c.CreateServerRequest(input) @@ -147,29 +155,29 @@ func (c *Transfer) CreateUserRequest(input *CreateUserInput) (req *request.Reque return } -// CreateUser API operation for AWS Transfer for SFTP. +// CreateUser API operation for AWS Transfer Family. // -// Creates a user and associates them with an existing Secure File Transfer -// Protocol (SFTP) server. You can only create and associate users with SFTP -// servers that have the IdentityProviderType set to SERVICE_MANAGED. Using -// parameters for CreateUser, you can specify the user name, set the home directory, -// store the user's public key, and assign the user's AWS Identity and Access -// Management (IAM) role. You can also optionally add a scope-down policy, and -// assign metadata with tags that can be used to group and search for users. +// Creates a user and associates them with an existing file transfer protocol-enabled +// server. You can only create and associate users with servers that have the +// IdentityProviderType set to SERVICE_MANAGED. Using parameters for CreateUser, +// you can specify the user name, set the home directory, store the user's public +// key, and assign the user's AWS Identity and Access Management (IAM) role. +// You can also optionally add a scope-down policy, and assign metadata with +// tags that can be used to group and search for users. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation CreateUser for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -180,7 +188,7 @@ func (c *Transfer) CreateUserRequest(input *CreateUserInput) (req *request.Reque // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/CreateUser func (c *Transfer) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { @@ -247,9 +255,9 @@ func (c *Transfer) DeleteServerRequest(input *DeleteServerInput) (req *request.R return } -// DeleteServer API operation for AWS Transfer for SFTP. +// DeleteServer API operation for AWS Transfer Family. // -// Deletes the Secure File Transfer Protocol (SFTP) server that you specify. +// Deletes the file transfer protocol-enabled server that you specify. // // No response returns from this operation. // @@ -257,15 +265,18 @@ func (c *Transfer) DeleteServerRequest(input *DeleteServerInput) (req *request.R // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation DeleteServer for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You do not have sufficient access to perform this action. +// // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -273,7 +284,7 @@ func (c *Transfer) DeleteServerRequest(input *DeleteServerInput) (req *request.R // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DeleteServer func (c *Transfer) DeleteServer(input *DeleteServerInput) (*DeleteServerOutput, error) { @@ -340,7 +351,7 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re return } -// DeleteSshPublicKey API operation for AWS Transfer for SFTP. +// DeleteSshPublicKey API operation for AWS Transfer Family. // // Deletes a user's Secure Shell (SSH) public key. // @@ -350,15 +361,15 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation DeleteSshPublicKey for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -366,7 +377,7 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // * ThrottlingException // The request was denied due to request throttling. @@ -438,9 +449,10 @@ func (c *Transfer) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque return } -// DeleteUser API operation for AWS Transfer for SFTP. +// DeleteUser API operation for AWS Transfer Family. // -// Deletes the user belonging to the server you specify. +// Deletes the user belonging to a file transfer protocol-enabled server you +// specify. // // No response returns from this operation. // @@ -450,15 +462,15 @@ func (c *Transfer) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation DeleteUser for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -466,7 +478,7 @@ func (c *Transfer) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DeleteUser func (c *Transfer) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { @@ -490,6 +502,99 @@ func (c *Transfer) DeleteUserWithContext(ctx aws.Context, input *DeleteUserInput return out, req.Send() } +const opDescribeSecurityPolicy = "DescribeSecurityPolicy" + +// DescribeSecurityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSecurityPolicy for more information on using the DescribeSecurityPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSecurityPolicyRequest method. +// req, resp := client.DescribeSecurityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DescribeSecurityPolicy +func (c *Transfer) DescribeSecurityPolicyRequest(input *DescribeSecurityPolicyInput) (req *request.Request, output *DescribeSecurityPolicyOutput) { + op := &request.Operation{ + Name: opDescribeSecurityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityPolicyInput{} + } + + output = &DescribeSecurityPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSecurityPolicy API operation for AWS Transfer Family. +// +// Describes the security policy that is attached to your file transfer protocol-enabled +// server. The response contains a description of the security policy's properties. +// For more information about security policies, see Working with security policies +// (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Transfer Family's +// API operation DescribeSecurityPolicy for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// The request has failed because the AWS Transfer Family service is not available. +// +// * InternalServiceError +// This exception is thrown when an error occurs in the AWS Transfer Family +// service. +// +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// * ResourceNotFoundException +// This exception is thrown when a resource is not found by the AWS Transfer +// Family service. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DescribeSecurityPolicy +func (c *Transfer) DescribeSecurityPolicy(input *DescribeSecurityPolicyInput) (*DescribeSecurityPolicyOutput, error) { + req, out := c.DescribeSecurityPolicyRequest(input) + return out, req.Send() +} + +// DescribeSecurityPolicyWithContext is the same as DescribeSecurityPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSecurityPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Transfer) DescribeSecurityPolicyWithContext(ctx aws.Context, input *DescribeSecurityPolicyInput, opts ...request.Option) (*DescribeSecurityPolicyOutput, error) { + req, out := c.DescribeSecurityPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeServer = "DescribeServer" // DescribeServerRequest generates a "aws/request.Request" representing the @@ -532,26 +637,27 @@ func (c *Transfer) DescribeServerRequest(input *DescribeServerInput) (req *reque return } -// DescribeServer API operation for AWS Transfer for SFTP. +// DescribeServer API operation for AWS Transfer Family. // -// Describes the server that you specify by passing the ServerId parameter. +// Describes a file transfer protocol-enabled server that you specify by passing +// the ServerId parameter. // -// The response contains a description of the server's properties. When you -// set EndpointType to VPC, the response will contain the EndpointDetails. +// The response contains a description of a server's properties. When you set +// EndpointType to VPC, the response will contain the EndpointDetails. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation DescribeServer for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -559,7 +665,7 @@ func (c *Transfer) DescribeServerRequest(input *DescribeServerInput) (req *reque // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DescribeServer func (c *Transfer) DescribeServer(input *DescribeServerInput) (*DescribeServerOutput, error) { @@ -625,10 +731,10 @@ func (c *Transfer) DescribeUserRequest(input *DescribeUserInput) (req *request.R return } -// DescribeUser API operation for AWS Transfer for SFTP. +// DescribeUser API operation for AWS Transfer Family. // -// Describes the user assigned to a specific server, as identified by its ServerId -// property. +// Describes the user assigned to the specific file transfer protocol-enabled +// server, as identified by its ServerId property. // // The response from this call returns the properties of the user associated // with the ServerId value that was specified. @@ -637,15 +743,15 @@ func (c *Transfer) DescribeUserRequest(input *DescribeUserInput) (req *request.R // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation DescribeUser for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -653,7 +759,7 @@ func (c *Transfer) DescribeUserRequest(input *DescribeUserInput) (req *request.R // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DescribeUser func (c *Transfer) DescribeUser(input *DescribeUserInput) (*DescribeUserOutput, error) { @@ -719,10 +825,11 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re return } -// ImportSshPublicKey API operation for AWS Transfer for SFTP. +// ImportSshPublicKey API operation for AWS Transfer Family. // // Adds a Secure Shell (SSH) public key to a user account identified by a UserName -// value assigned to a specific server, identified by ServerId. +// value assigned to the specific file transfer protocol-enabled server, identified +// by ServerId. // // The response returns the UserName value, the ServerId value, and the name // of the SshPublicKeyId. @@ -731,15 +838,15 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation ImportSshPublicKey for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -750,7 +857,7 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // * ThrottlingException // The request was denied due to request throttling. @@ -779,6 +886,154 @@ func (c *Transfer) ImportSshPublicKeyWithContext(ctx aws.Context, input *ImportS return out, req.Send() } +const opListSecurityPolicies = "ListSecurityPolicies" + +// ListSecurityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListSecurityPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSecurityPolicies for more information on using the ListSecurityPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSecurityPoliciesRequest method. +// req, resp := client.ListSecurityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ListSecurityPolicies +func (c *Transfer) ListSecurityPoliciesRequest(input *ListSecurityPoliciesInput) (req *request.Request, output *ListSecurityPoliciesOutput) { + op := &request.Operation{ + Name: opListSecurityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSecurityPoliciesInput{} + } + + output = &ListSecurityPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSecurityPolicies API operation for AWS Transfer Family. +// +// Lists the security policies that are attached to your file transfer protocol-enabled +// servers. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Transfer Family's +// API operation ListSecurityPolicies for usage and error information. +// +// Returned Error Types: +// * ServiceUnavailableException +// The request has failed because the AWS Transfer Family service is not available. +// +// * InternalServiceError +// This exception is thrown when an error occurs in the AWS Transfer Family +// service. +// +// * InvalidNextTokenException +// The NextToken parameter that was passed is invalid. +// +// * InvalidRequestException +// This exception is thrown when the client submits a malformed request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ListSecurityPolicies +func (c *Transfer) ListSecurityPolicies(input *ListSecurityPoliciesInput) (*ListSecurityPoliciesOutput, error) { + req, out := c.ListSecurityPoliciesRequest(input) + return out, req.Send() +} + +// ListSecurityPoliciesWithContext is the same as ListSecurityPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListSecurityPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Transfer) ListSecurityPoliciesWithContext(ctx aws.Context, input *ListSecurityPoliciesInput, opts ...request.Option) (*ListSecurityPoliciesOutput, error) { + req, out := c.ListSecurityPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSecurityPoliciesPages iterates over the pages of a ListSecurityPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSecurityPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSecurityPolicies operation. +// pageNum := 0 +// err := client.ListSecurityPoliciesPages(params, +// func(page *transfer.ListSecurityPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Transfer) ListSecurityPoliciesPages(input *ListSecurityPoliciesInput, fn func(*ListSecurityPoliciesOutput, bool) bool) error { + return c.ListSecurityPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSecurityPoliciesPagesWithContext same as ListSecurityPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Transfer) ListSecurityPoliciesPagesWithContext(ctx aws.Context, input *ListSecurityPoliciesInput, fn func(*ListSecurityPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSecurityPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSecurityPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSecurityPoliciesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListServers = "ListServers" // ListServersRequest generates a "aws/request.Request" representing the @@ -827,24 +1082,24 @@ func (c *Transfer) ListServersRequest(input *ListServersInput) (req *request.Req return } -// ListServers API operation for AWS Transfer for SFTP. +// ListServers API operation for AWS Transfer Family. // -// Lists the Secure File Transfer Protocol (SFTP) servers that are associated -// with your AWS account. +// Lists the file transfer protocol-enabled servers that are associated with +// your AWS account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation ListServers for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidNextTokenException @@ -975,7 +1230,7 @@ func (c *Transfer) ListTagsForResourceRequest(input *ListTagsForResourceInput) ( return } -// ListTagsForResource API operation for AWS Transfer for SFTP. +// ListTagsForResource API operation for AWS Transfer Family. // // Lists all of the tags associated with the Amazon Resource Number (ARN) you // specify. The resource can be a user, server, or role. @@ -984,15 +1239,15 @@ func (c *Transfer) ListTagsForResourceRequest(input *ListTagsForResourceInput) ( // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation ListTagsForResource for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidNextTokenException @@ -1123,23 +1378,24 @@ func (c *Transfer) ListUsersRequest(input *ListUsersInput) (req *request.Request return } -// ListUsers API operation for AWS Transfer for SFTP. +// ListUsers API operation for AWS Transfer Family. // -// Lists the users for the server that you specify by passing the ServerId parameter. +// Lists the users for a file transfer protocol-enabled server that you specify +// by passing the ServerId parameter. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation ListUsers for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidNextTokenException @@ -1150,7 +1406,7 @@ func (c *Transfer) ListUsersRequest(input *ListUsersInput) (req *request.Request // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ListUsers func (c *Transfer) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { @@ -1269,11 +1525,11 @@ func (c *Transfer) StartServerRequest(input *StartServerInput) (req *request.Req return } -// StartServer API operation for AWS Transfer for SFTP. +// StartServer API operation for AWS Transfer Family. // -// Changes the state of a Secure File Transfer Protocol (SFTP) server from OFFLINE -// to ONLINE. It has no impact on an SFTP server that is already ONLINE. An -// ONLINE server can accept and process file transfer jobs. +// Changes the state of a file transfer protocol-enabled server from OFFLINE +// to ONLINE. It has no impact on a server that is already ONLINE. An ONLINE +// server can accept and process file transfer jobs. // // The state of STARTING indicates that the server is in an intermediate state, // either not fully able to respond, or not fully online. The values of START_FAILED @@ -1285,15 +1541,15 @@ func (c *Transfer) StartServerRequest(input *StartServerInput) (req *request.Req // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation StartServer for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1301,7 +1557,7 @@ func (c *Transfer) StartServerRequest(input *StartServerInput) (req *request.Req // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // * ThrottlingException // The request was denied due to request throttling. @@ -1373,13 +1629,15 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque return } -// StopServer API operation for AWS Transfer for SFTP. +// StopServer API operation for AWS Transfer Family. +// +// Changes the state of a file transfer protocol-enabled server from ONLINE +// to OFFLINE. An OFFLINE server cannot accept and process file transfer jobs. +// Information tied to your server, such as server and user properties, are +// not affected by stopping your server. // -// Changes the state of an SFTP server from ONLINE to OFFLINE. An OFFLINE server -// cannot accept and process file transfer jobs. Information tied to your server -// such as server and user properties are not affected by stopping your server. -// Stopping a server will not reduce or impact your Secure File Transfer Protocol -// (SFTP) endpoint billing. +// Stopping the server will not reduce or impact your file transfer protocol +// endpoint billing; you must delete the server to stop being billed. // // The state of STOPPING indicates that the server is in an intermediate state, // either not fully able to respond, or not fully offline. The values of STOP_FAILED @@ -1391,15 +1649,15 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation StopServer for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1407,7 +1665,7 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // * ThrottlingException // The request was denied due to request throttling. @@ -1479,7 +1737,7 @@ func (c *Transfer) TagResourceRequest(input *TagResourceInput) (req *request.Req return } -// TagResource API operation for AWS Transfer for SFTP. +// TagResource API operation for AWS Transfer Family. // // Attaches a key-value pair to a resource, as identified by its Amazon Resource // Name (ARN). Resources are users, servers, roles, and other entities. @@ -1490,15 +1748,15 @@ func (c *Transfer) TagResourceRequest(input *TagResourceInput) (req *request.Req // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation TagResource for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1506,7 +1764,7 @@ func (c *Transfer) TagResourceRequest(input *TagResourceInput) (req *request.Req // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TagResource func (c *Transfer) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -1572,27 +1830,28 @@ func (c *Transfer) TestIdentityProviderRequest(input *TestIdentityProviderInput) return } -// TestIdentityProvider API operation for AWS Transfer for SFTP. +// TestIdentityProvider API operation for AWS Transfer Family. // -// If the IdentityProviderType of the server is API_Gateway, tests whether your -// API Gateway is set up successfully. We highly recommend that you call this -// operation to test your authentication method as soon as you create your server. -// By doing so, you can troubleshoot issues with the API Gateway integration -// to ensure that your users can successfully use the service. +// If the IdentityProviderType of a file transfer protocol-enabled server is +// API_Gateway, tests whether your API Gateway is set up successfully. We highly +// recommend that you call this operation to test your authentication method +// as soon as you create your server. By doing so, you can troubleshoot issues +// with the API Gateway integration to ensure that your users can successfully +// use the service. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation TestIdentityProvider for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1600,7 +1859,7 @@ func (c *Transfer) TestIdentityProviderRequest(input *TestIdentityProviderInput) // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TestIdentityProvider func (c *Transfer) TestIdentityProvider(input *TestIdentityProviderInput) (*TestIdentityProviderOutput, error) { @@ -1667,7 +1926,7 @@ func (c *Transfer) UntagResourceRequest(input *UntagResourceInput) (req *request return } -// UntagResource API operation for AWS Transfer for SFTP. +// UntagResource API operation for AWS Transfer Family. // // Detaches a key-value pair from a resource, as identified by its Amazon Resource // Name (ARN). Resources are users, servers, roles, and other entities. @@ -1678,15 +1937,15 @@ func (c *Transfer) UntagResourceRequest(input *UntagResourceInput) (req *request // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation UntagResource for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1694,7 +1953,7 @@ func (c *Transfer) UntagResourceRequest(input *UntagResourceInput) (req *request // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UntagResource func (c *Transfer) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { @@ -1760,31 +2019,34 @@ func (c *Transfer) UpdateServerRequest(input *UpdateServerInput) (req *request.R return } -// UpdateServer API operation for AWS Transfer for SFTP. +// UpdateServer API operation for AWS Transfer Family. // -// Updates the server properties after that server has been created. +// Updates the file transfer protocol-enabled server's properties after that +// server has been created. // -// The UpdateServer call returns the ServerId of the Secure File Transfer Protocol -// (SFTP) server you updated. +// The UpdateServer call returns the ServerId of the server you updated. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation UpdateServer for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// You do not have sufficient access to perform this action. +// // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * ConflictException -// This exception is thrown when the UpdatServer is called for a server that -// has VPC as the endpoint type and the server's VpcEndpointID is not in the -// available state. +// This exception is thrown when the UpdatServer is called for a file transfer +// protocol-enabled server that has VPC as the endpoint type and the server's +// VpcEndpointID is not in the available state. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1795,7 +2057,7 @@ func (c *Transfer) UpdateServerRequest(input *UpdateServerInput) (req *request.R // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // * ThrottlingException // The request was denied due to request throttling. @@ -1866,7 +2128,7 @@ func (c *Transfer) UpdateUserRequest(input *UpdateUserInput) (req *request.Reque return } -// UpdateUser API operation for AWS Transfer for SFTP. +// UpdateUser API operation for AWS Transfer Family. // // Assigns new properties to a user. Parameters you pass modify any or all of // the following: the home directory, role, and policy for the UserName and @@ -1878,15 +2140,15 @@ func (c *Transfer) UpdateUserRequest(input *UpdateUserInput) (req *request.Reque // with awserr.Error's Code and Message methods to get detailed information about // the error. // -// See the AWS API reference guide for AWS Transfer for SFTP's +// See the AWS API reference guide for AWS Transfer Family's // API operation UpdateUser for usage and error information. // // Returned Error Types: // * ServiceUnavailableException -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. // // * InternalServiceError -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. // // * InvalidRequestException @@ -1894,7 +2156,7 @@ func (c *Transfer) UpdateUserRequest(input *UpdateUserInput) (req *request.Reque // // * ResourceNotFoundException // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. // // * ThrottlingException // The request was denied due to request throttling. @@ -1923,12 +2185,68 @@ func (c *Transfer) UpdateUserWithContext(ctx aws.Context, input *UpdateUserInput return out, req.Send() } -// This exception is thrown when the UpdatServer is called for a server that -// has VPC as the endpoint type and the server's VpcEndpointID is not in the -// available state. +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// This exception is thrown when the UpdatServer is called for a file transfer +// protocol-enabled server that has VPC as the endpoint type and the server's +// VpcEndpointID is not in the available state. type ConflictException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -1945,17 +2263,17 @@ func (s ConflictException) GoString() string { func newErrorConflictException(v protocol.ResponseMetadata) error { return &ConflictException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ConflictException) Code() string { +func (s *ConflictException) Code() string { return "ConflictException" } // Message returns the exception's message. -func (s ConflictException) Message() string { +func (s *ConflictException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -1963,69 +2281,133 @@ func (s ConflictException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ConflictException) OrigErr() error { +func (s *ConflictException) OrigErr() error { return nil } -func (s ConflictException) Error() string { +func (s *ConflictException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ConflictException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ConflictException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ConflictException) RequestID() string { - return s.respMetadata.RequestID +func (s *ConflictException) RequestID() string { + return s.RespMetadata.RequestID } type CreateServerInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. + // Required when Protocols is set to FTPS. + // + // To request a new public certificate, see Request a public certificate (https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) + // in the AWS Certificate Manager User Guide. + // + // To import an existing certificate into ACM, see Importing certificates into + // ACM (https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) + // in the AWS Certificate Manager User Guide. + // + // To request a private certificate to use FTPS through private IP addresses, + // see Request a private certificate (https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) + // in the AWS Certificate Manager User Guide. + // + // Certificates with the following cryptographic algorithms and key sizes are + // supported: + // + // * 2048-bit RSA (RSA_2048) + // + // * 4096-bit RSA (RSA_4096) + // + // * Elliptic Prime Curve 256 bit (EC_prime256v1) + // + // * Elliptic Prime Curve 384 bit (EC_secp384r1) + // + // * Elliptic Prime Curve 521 bit (EC_secp521r1) + // + // The certificate must be a valid SSL/TLS X.509 version 3 certificate with + // FQDN or IP address specified and information about the issuer. + Certificate *string `type:"string"` + // The virtual private cloud (VPC) endpoint settings that are configured for - // your SFTP server. With a VPC endpoint, you can restrict access to your SFTP - // server to resources only within your VPC. To control incoming internet traffic, - // you will need to invoke the UpdateServer API and attach an Elastic IP to - // your server's endpoint. + // your server. When you host your endpoint within your VPC, you can make it + // accessible only to resources within your VPC, or you can attach Elastic IPs + // and make it accessible to clients over the internet. Your VPC's default security + // groups are automatically assigned to your endpoint. EndpointDetails *EndpointDetails `type:"structure"` - // The type of VPC endpoint that you want your SFTP server to connect to. You - // can choose to connect to the public internet or a virtual private cloud (VPC) - // endpoint. With a VPC endpoint, you can restrict access to your SFTP server - // and resources only within your VPC. + // The type of VPC endpoint that you want your server to connect to. You can + // choose to connect to the public internet or a VPC endpoint. With a VPC endpoint, + // you can restrict access to your server and resources only within your VPC. + // + // It is recommended that you use VPC as the EndpointType. With this endpoint + // type, you have the option to directly associate up to three Elastic IPv4 + // addresses (BYO IP included) with your server's endpoint and use VPC security + // groups to restrict traffic by the client's public IP address. This is not + // possible with EndpointType set to VPC_ENDPOINT. EndpointType *string `type:"string" enum:"EndpointType"` - // The RSA private key as generated by the ssh-keygen -N "" -f my-new-server-key + // The RSA private key as generated by the ssh-keygen -N "" -m PEM -f my-new-server-key // command. // - // If you aren't planning to migrate existing users from an existing SFTP server - // to a new AWS SFTP server, don't update the host key. Accidentally changing + // If you aren't planning to migrate existing users from an existing SFTP-enabled + // server to a new server, don't update the host key. Accidentally changing // a server's host key can be disruptive. // - // For more information, see "https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" - // in the AWS SFTP User Guide. + // For more information, see Change the host key for your SFTP-enabled server + // (https://docs.aws.amazon.com/transfer/latest/userguide/edit-server-config.html#configuring-servers-change-host-key) + // in the AWS Transfer Family User Guide. HostKey *string `type:"string" sensitive:"true"` - // This parameter is required when the IdentityProviderType is set to API_GATEWAY. - // Accepts an array containing all of the information required to call a customer-supplied - // authentication API, including the API Gateway URL. This property is not required - // when the IdentityProviderType is set to SERVICE_MANAGED. + // Required when IdentityProviderType is set to API_GATEWAY. Accepts an array + // containing all of the information required to call a customer-supplied authentication + // API, including the API Gateway URL. Not required when IdentityProviderType + // is set to SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // Specifies the mode of authentication for the SFTP server. The default value - // is SERVICE_MANAGED, which allows you to store and access SFTP user credentials - // within the AWS Transfer for SFTP service. Use the API_GATEWAY value to integrate - // with an identity provider of your choosing. The API_GATEWAY setting requires - // you to provide an API Gateway endpoint URL to call for authentication using - // the IdentityProviderDetails parameter. + // Specifies the mode of authentication for a server. The default value is SERVICE_MANAGED, + // which allows you to store and access user credentials within the AWS Transfer + // Family service. Use the API_GATEWAY value to integrate with an identity provider + // of your choosing. The API_GATEWAY setting requires you to provide an API + // Gateway endpoint URL to call for authentication using the IdentityProviderDetails + // parameter. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` - // A value that allows the service to write your SFTP users' activity to your - // Amazon CloudWatch logs for monitoring and auditing purposes. + // Allows the service to write your users' activity to your Amazon CloudWatch + // logs for monitoring and auditing purposes. LoggingRole *string `min:"20" type:"string"` + // Specifies the file transfer protocol or protocols over which your file transfer + // protocol client can connect to your server's endpoint. The available protocols + // are: + // + // * SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over + // SSH + // + // * FTPS (File Transfer Protocol Secure): File transfer with TLS encryption + // + // * FTP (File Transfer Protocol): Unencrypted file transfer + // + // If you select FTPS, you must choose a certificate stored in AWS Certificate + // Manager (ACM) which will be used to identify your server when clients connect + // to it over FTPS. + // + // If Protocol includes either FTP or FTPS, then the EndpointType must be VPC + // and the IdentityProviderType must be API_GATEWAY. + // + // If Protocol includes FTP, then AddressAllocationIds cannot be associated. + // + // If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and + // the IdentityProviderType can be set to SERVICE_MANAGED. + Protocols []*string `min:"1" type:"list"` + + // Specifies the name of the security policy that is attached to the server. + SecurityPolicyName *string `type:"string"` + // Key-value pairs that can be used to group and search for servers. Tags []*Tag `min:"1" type:"list"` } @@ -2046,6 +2428,9 @@ func (s *CreateServerInput) Validate() error { if s.LoggingRole != nil && len(*s.LoggingRole) < 20 { invalidParams.Add(request.NewErrParamMinLen("LoggingRole", 20)) } + if s.Protocols != nil && len(s.Protocols) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Protocols", 1)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } @@ -2076,6 +2461,12 @@ func (s *CreateServerInput) Validate() error { return nil } +// SetCertificate sets the Certificate field's value. +func (s *CreateServerInput) SetCertificate(v string) *CreateServerInput { + s.Certificate = &v + return s +} + // SetEndpointDetails sets the EndpointDetails field's value. func (s *CreateServerInput) SetEndpointDetails(v *EndpointDetails) *CreateServerInput { s.EndpointDetails = v @@ -2112,6 +2503,18 @@ func (s *CreateServerInput) SetLoggingRole(v string) *CreateServerInput { return s } +// SetProtocols sets the Protocols field's value. +func (s *CreateServerInput) SetProtocols(v []*string) *CreateServerInput { + s.Protocols = v + return s +} + +// SetSecurityPolicyName sets the SecurityPolicyName field's value. +func (s *CreateServerInput) SetSecurityPolicyName(v string) *CreateServerInput { + s.SecurityPolicyName = &v + return s +} + // SetTags sets the Tags field's value. func (s *CreateServerInput) SetTags(v []*Tag) *CreateServerInput { s.Tags = v @@ -2121,7 +2524,7 @@ func (s *CreateServerInput) SetTags(v []*Tag) *CreateServerInput { type CreateServerOutput struct { _ struct{} `type:"structure"` - // The service-assigned ID of the SFTP server that is created. + // The service-assigned ID of the server that is created. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -2147,39 +2550,40 @@ type CreateUserInput struct { _ struct{} `type:"structure"` // The landing directory (folder) for a user when they log in to the server - // using their SFTP client. + // using the client. // - // An example is /home/username. + // An example is your-Amazon-S3-bucket-name>/home/username . HomeDirectory *string `type:"string"` - // Logical directory mappings that specify what S3 paths and keys should be - // visible to your user and how you want to make them visible. You will need + // Logical directory mappings that specify what Amazon S3 paths and keys should + // be visible to your user and how you want to make them visible. You will need // to specify the "Entry" and "Target" pair, where Entry shows how the path - // is made visible and Target is the actual S3 path. If you only specify a target, - // it will be displayed as is. You will need to also make sure that your AWS - // IAM Role provides access to paths in Target. The following is an example. + // is made visible and Target is the actual Amazon S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your IAM role provides access to paths in Target. The following is an example. // // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' // - // In most cases, you can use this value instead of the scope down policy to + // In most cases, you can use this value instead of the scope-down policy to // lock your user down to the designated home directory ("chroot"). To do this, // you can set Entry to '/' and set Target to the HomeDirectory parameter value. // - // If the target of a logical directory entry does not exist in S3, the entry - // will be ignored. As a workaround, you can use the S3 api to create 0 byte - // objects as place holders for your directory. If using the CLI, use the s3api - // call instead of s3 so you can use the put-object operation. For example, - // you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. - // Make sure that the end of the key name ends in a / for it to be considered - // a folder. + // If the target of a logical directory entry does not exist in Amazon S3, the + // entry will be ignored. As a workaround, you can use the Amazon S3 API to + // create 0 byte objects as place holders for your directory. If using the CLI, + // use the s3api call instead of s3 so you can use the put-object operation. + // For example, you use the following: aws s3api put-object --bucket bucketname + // --key path/to/folder/. Make sure that the end of the key name ends in a '/' + // for it to be considered a folder. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory - // to be when they log into the SFTP server. If you set it to PATH, the user - // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. - // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings - // for how you want to make S3 paths visible to your user. + // to be when they log into the server. If you set it to PATH, the user will + // see the absolute Amazon S3 bucket paths as is in their file transfer protocol + // clients. If you set it LOGICAL, you will need to provide mappings in the + // HomeDirectoryMappings for how you want to make Amazon S3 paths visible to + // your users. HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` // A scope-down policy for your user so you can use the same IAM role across @@ -2187,45 +2591,45 @@ type CreateUserInput struct { // Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, // ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. // - // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // For scope-down policies, AWS Transfer Family stores the policy as a JSON // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the // policy as a JSON blob and pass it in the Policy argument. // - // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating - // a Scope-Down Policy. + // For an example of a scope-down policy, see Creating a scope-down policy (https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down). // - // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // For more information, see AssumeRole (https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) // in the AWS Security Token Service API Reference. Policy *string `type:"string"` - // The IAM role that controls your user's access to your Amazon S3 bucket. The + // The IAM role that controls your users' access to your Amazon S3 bucket. The // policies attached to this role will determine the level of access you want // to provide your users when transferring files into and out of your Amazon // S3 bucket or buckets. The IAM role should also contain a trust relationship - // that allows the SFTP server to access your resources when servicing your - // SFTP user's transfer requests. + // that allows the server to access your resources when servicing your users' + // transfer requests. // // Role is a required field Role *string `min:"20" type:"string" required:"true"` - // A system-assigned unique identifier for an SFTP server instance. This is - // the specific SFTP server that you added your user to. + // A system-assigned unique identifier for a server instance. This is the specific + // server that you added your user to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` // The public portion of the Secure Shell (SSH) key used to authenticate the - // user to the SFTP server. + // user to the server. SshPublicKeyBody *string `type:"string"` // Key-value pairs that can be used to group and search for users. Tags are // metadata attached to users for any purpose. Tags []*Tag `min:"1" type:"list"` - // A unique string that identifies a user and is associated with a server as - // specified by the ServerId. This user name must be a minimum of 3 and a maximum - // of 32 characters long. The following are valid characters: a-z, A-Z, 0-9, - // underscore, and hyphen. The user name can't start with a hyphen. + // A unique string that identifies a user and is associated with a as specified + // by the ServerId. This user name must be a minimum of 3 and a maximum of 100 + // characters long. The following are valid characters: a-z, A-Z, 0-9, underscore + // '_', hyphen '-', period '.', and at sign '@'. The user name can't start with + // a hyphen, period, or at sign. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -2352,12 +2756,12 @@ func (s *CreateUserInput) SetUserName(v string) *CreateUserInput { type CreateUserOutput struct { _ struct{} `type:"structure"` - // The ID of the SFTP server that the user is attached to. + // The ID of the server that the user is attached to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // A unique string that identifies a user account associated with an SFTP server. + // A unique string that identifies a user account associated with a server. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -2388,7 +2792,7 @@ func (s *CreateUserOutput) SetUserName(v string) *CreateUserOutput { type DeleteServerInput struct { _ struct{} `type:"structure"` - // A unique system-assigned identifier for an SFTP server instance. + // A unique system-assigned identifier for a server instance. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -2443,7 +2847,7 @@ func (s DeleteServerOutput) GoString() string { type DeleteSshPublicKeyInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) + // A system-assigned unique identifier for a file transfer protocol-enabled // server instance that has the user assigned to it. // // ServerId is a required field @@ -2533,13 +2937,13 @@ func (s DeleteSshPublicKeyOutput) GoString() string { type DeleteUserInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server instance that has - // the user assigned to it. + // A system-assigned unique identifier for a server instance that has the user + // assigned to it. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // A unique string that identifies a user that is being deleted from the server. + // A unique string that identifies a user that is being deleted from a server. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -2603,10 +3007,73 @@ func (s DeleteUserOutput) GoString() string { return s.String() } +type DescribeSecurityPolicyInput struct { + _ struct{} `type:"structure"` + + // Specifies the name of the security policy that is attached to the server. + // + // SecurityPolicyName is a required field + SecurityPolicyName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeSecurityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecurityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityPolicyInput"} + if s.SecurityPolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("SecurityPolicyName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSecurityPolicyName sets the SecurityPolicyName field's value. +func (s *DescribeSecurityPolicyInput) SetSecurityPolicyName(v string) *DescribeSecurityPolicyInput { + s.SecurityPolicyName = &v + return s +} + +type DescribeSecurityPolicyOutput struct { + _ struct{} `type:"structure"` + + // An array containing the properties of the security policy. + // + // SecurityPolicy is a required field + SecurityPolicy *DescribedSecurityPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeSecurityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityPolicyOutput) GoString() string { + return s.String() +} + +// SetSecurityPolicy sets the SecurityPolicy field's value. +func (s *DescribeSecurityPolicyOutput) SetSecurityPolicy(v *DescribedSecurityPolicy) *DescribeSecurityPolicyOutput { + s.SecurityPolicy = v + return s +} + type DescribeServerInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server. + // A system-assigned unique identifier for a server. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -2647,7 +3114,7 @@ func (s *DescribeServerInput) SetServerId(v string) *DescribeServerInput { type DescribeServerOutput struct { _ struct{} `type:"structure"` - // An array containing the properties of the server with the ServerID you specified. + // An array containing the properties of a server with the ServerID you specified. // // Server is a required field Server *DescribedServer `type:"structure" required:"true"` @@ -2672,14 +3139,13 @@ func (s *DescribeServerOutput) SetServer(v *DescribedServer) *DescribeServerOutp type DescribeUserInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that has this user - // assigned. + // A system-assigned unique identifier for a server that has this user assigned. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` // The name of the user assigned to one or more servers. User names are part - // of the sign-in credentials to use the AWS Transfer for SFTP service and perform + // of the sign-in credentials to use the AWS Transfer Family service and perform // file transfer tasks. // // UserName is a required field @@ -2733,8 +3199,7 @@ func (s *DescribeUserInput) SetUserName(v string) *DescribeUserInput { type DescribeUserOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that has this user - // assigned. + // A system-assigned unique identifier for a server that has this user assigned. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -2768,55 +3233,148 @@ func (s *DescribeUserOutput) SetUser(v *DescribedUser) *DescribeUserOutput { return s } -// Describes the properties of the server that was specified. Information returned -// includes the following: the server Amazon Resource Name (ARN), the authentication -// configuration and type, the logging role, the server ID and state, and assigned -// tags or metadata. +// Describes the properties of a security policy that was specified. For more +// information about security policies, see Working with security policies (https://docs.aws.amazon.com/transfer/latest/userguide/security-policies.html). +type DescribedSecurityPolicy struct { + _ struct{} `type:"structure"` + + // Specifies whether this policy enables Federal Information Processing Standards + // (FIPS). + Fips *bool `type:"boolean"` + + // Specifies the name of the security policy that is attached to the server. + // + // SecurityPolicyName is a required field + SecurityPolicyName *string `type:"string" required:"true"` + + // Specifies the enabled Secure Shell (SSH) cipher encryption algorithms in + // the security policy that is attached to the server. + SshCiphers []*string `type:"list"` + + // Specifies the enabled SSH key exchange (KEX) encryption algorithms in the + // security policy that is attached to the server. + SshKexs []*string `type:"list"` + + // Specifies the enabled SSH message authentication code (MAC) encryption algorithms + // in the security policy that is attached to the server. + SshMacs []*string `type:"list"` + + // Specifies the enabled Transport Layer Security (TLS) cipher encryption algorithms + // in the security policy that is attached to the server. + TlsCiphers []*string `type:"list"` +} + +// String returns the string representation +func (s DescribedSecurityPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribedSecurityPolicy) GoString() string { + return s.String() +} + +// SetFips sets the Fips field's value. +func (s *DescribedSecurityPolicy) SetFips(v bool) *DescribedSecurityPolicy { + s.Fips = &v + return s +} + +// SetSecurityPolicyName sets the SecurityPolicyName field's value. +func (s *DescribedSecurityPolicy) SetSecurityPolicyName(v string) *DescribedSecurityPolicy { + s.SecurityPolicyName = &v + return s +} + +// SetSshCiphers sets the SshCiphers field's value. +func (s *DescribedSecurityPolicy) SetSshCiphers(v []*string) *DescribedSecurityPolicy { + s.SshCiphers = v + return s +} + +// SetSshKexs sets the SshKexs field's value. +func (s *DescribedSecurityPolicy) SetSshKexs(v []*string) *DescribedSecurityPolicy { + s.SshKexs = v + return s +} + +// SetSshMacs sets the SshMacs field's value. +func (s *DescribedSecurityPolicy) SetSshMacs(v []*string) *DescribedSecurityPolicy { + s.SshMacs = v + return s +} + +// SetTlsCiphers sets the TlsCiphers field's value. +func (s *DescribedSecurityPolicy) SetTlsCiphers(v []*string) *DescribedSecurityPolicy { + s.TlsCiphers = v + return s +} + +// Describes the properties of a file transfer protocol-enabled server that +// was specified. type DescribedServer struct { _ struct{} `type:"structure"` - // Specifies the unique Amazon Resource Name (ARN) for the server to be described. + // Specifies the unique Amazon Resource Name (ARN) of the server. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // The virtual private cloud (VPC) endpoint settings that you configured for - // your SFTP server. + // Specifies the ARN of the AWS Certificate Manager (ACM) certificate. Required + // when Protocols is set to FTPS. + Certificate *string `type:"string"` + + // Specifies the virtual private cloud (VPC) endpoint settings that you configured + // for your server. EndpointDetails *EndpointDetails `type:"structure"` - // The type of endpoint that your SFTP server is connected to. If your SFTP - // server is connected to a VPC endpoint, your server isn't accessible over - // the public internet. + // Defines the type of endpoint that your server is connected to. If your server + // is connected to a VPC endpoint, your server isn't accessible over the public + // internet. EndpointType *string `type:"string" enum:"EndpointType"` - // This value contains the message-digest algorithm (MD5) hash of the server's - // host key. This value is equivalent to the output of the ssh-keygen -l -E - // md5 -f my-new-server-key command. + // Specifies the Base64-encoded SHA256 fingerprint of the server's host key. + // This value is equivalent to the output of the ssh-keygen -l -f my-new-server-key + // command. HostKeyFingerprint *string `type:"string"` // Specifies information to call a customer-supplied authentication API. This - // field is not populated when the IdentityProviderType of the server is SERVICE_MANAGED>. + // field is not populated when the IdentityProviderType of a server is SERVICE_MANAGED. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // This property defines the mode of authentication method enabled for this - // service. A value of SERVICE_MANAGED means that you are using this server - // to store and access SFTP user credentials within the service. A value of - // API_GATEWAY indicates that you have integrated an API Gateway endpoint that - // will be invoked for authenticating your user into the service. + // Specifies the mode of authentication method enabled for this service. A value + // of SERVICE_MANAGED means that you are using this server to store and access + // user credentials within the service. A value of API_GATEWAY indicates that + // you have integrated an API Gateway endpoint that will be invoked for authenticating + // your user into the service. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` - // This property is an AWS Identity and Access Management (IAM) entity that - // allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. - // When set, user activity can be viewed in your CloudWatch logs. + // Specifies the AWS Identity and Access Management (IAM) role that allows a + // server to turn on Amazon CloudWatch logging for Amazon S3 events. When set, + // user activity can be viewed in your CloudWatch logs. LoggingRole *string `min:"20" type:"string"` - // This property is a unique system-assigned identifier for the SFTP server - // that you instantiate. + // Specifies the file transfer protocol or protocols over which your file transfer + // protocol client can connect to your server's endpoint. The available protocols + // are: + // + // * SFTP (Secure Shell (SSH) File Transfer Protocol): File transfer over + // SSH + // + // * FTPS (File Transfer Protocol Secure): File transfer with TLS encryption + // + // * FTP (File Transfer Protocol): Unencrypted file transfer + Protocols []*string `min:"1" type:"list"` + + // Specifies the name of the security policy that is attached to the server. + SecurityPolicyName *string `type:"string"` + + // Specifies the unique system-assigned identifier for a server that you instantiate. ServerId *string `min:"19" type:"string"` - // The condition of the SFTP server for the server that was described. A value - // of ONLINE indicates that the server can accept jobs and transfer files. A - // State value of OFFLINE means that the server cannot perform file transfer + // Specifies the condition of a server for the server that was described. A + // value of ONLINE indicates that the server can accept jobs and transfer files. + // A State value of OFFLINE means that the server cannot perform file transfer // operations. // // The states of STARTING and STOPPING indicate that the server is in an intermediate @@ -2824,12 +3382,12 @@ type DescribedServer struct { // of START_FAILED or STOP_FAILED can indicate an error condition. State *string `type:"string" enum:"State"` - // This property contains the key-value pairs that you can use to search for - // and group servers that were assigned to the server that was described. + // Specifies the key-value pairs that you can use to search for and group servers + // that were assigned to the server that was described. Tags []*Tag `min:"1" type:"list"` - // The number of users that are assigned to the SFTP server you specified with - // the ServerId. + // Specifies the number of users that are assigned to a server you specified + // with the ServerId. UserCount *int64 `type:"integer"` } @@ -2849,6 +3407,12 @@ func (s *DescribedServer) SetArn(v string) *DescribedServer { return s } +// SetCertificate sets the Certificate field's value. +func (s *DescribedServer) SetCertificate(v string) *DescribedServer { + s.Certificate = &v + return s +} + // SetEndpointDetails sets the EndpointDetails field's value. func (s *DescribedServer) SetEndpointDetails(v *EndpointDetails) *DescribedServer { s.EndpointDetails = v @@ -2885,6 +3449,18 @@ func (s *DescribedServer) SetLoggingRole(v string) *DescribedServer { return s } +// SetProtocols sets the Protocols field's value. +func (s *DescribedServer) SetProtocols(v []*string) *DescribedServer { + s.Protocols = v + return s +} + +// SetSecurityPolicyName sets the SecurityPolicyName field's value. +func (s *DescribedServer) SetSecurityPolicyName(v string) *DescribedServer { + s.SecurityPolicyName = &v + return s +} + // SetServerId sets the ServerId field's value. func (s *DescribedServer) SetServerId(v string) *DescribedServer { s.ServerId = &v @@ -2909,66 +3485,64 @@ func (s *DescribedServer) SetUserCount(v int64) *DescribedServer { return s } -// Returns properties of the user that you want to describe. +// Describes the properties of a user that was specified. type DescribedUser struct { _ struct{} `type:"structure"` - // This property contains the unique Amazon Resource Name (ARN) for the user - // that was requested to be described. + // Specifies the unique Amazon Resource Name (ARN) for the user that was requested + // to be described. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // This property specifies the landing directory (or folder), which is the location - // that files are written to or read from in an Amazon S3 bucket for the described - // user. An example is /your s3 bucket name/home/username . + // Specifies the landing directory (or folder), which is the location that files + // are written to or read from in an Amazon S3 bucket, for the described user. + // An example is your-Amazon-S3-bucket-name>/home/username . HomeDirectory *string `type:"string"` - // Logical directory mappings that you specified for what S3 paths and keys - // should be visible to your user and how you want to make them visible. You - // will need to specify the "Entry" and "Target" pair, where Entry shows how - // the path is made visible and Target is the actual S3 path. If you only specify - // a target, it will be displayed as is. You will need to also make sure that - // your AWS IAM Role provides access to paths in Target. - // - // In most cases, you can use this value instead of the scope down policy to - // lock your user down to the designated home directory ("chroot"). To do this, - // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + // Specifies the logical directory mappings that specify what Amazon S3 paths + // and keys should be visible to your user and how you want to make them visible. + // You will need to specify the "Entry" and "Target" pair, where Entry shows + // how the path is made visible and Target is the actual Amazon S3 path. If + // you only specify a target, it will be displayed as is. You will need to also + // make sure that your AWS Identity and Access Management (IAM) role provides + // access to paths in Target. // - // In most cases, you can use this value instead of the scope down policy to + // In most cases, you can use this value instead of the scope-down policy to // lock your user down to the designated home directory ("chroot"). To do this, // you can set Entry to '/' and set Target to the HomeDirectory parameter value. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` - // The type of landing directory (folder) you mapped for your users' to see - // when they log into the SFTP server. If you set it to PATH, the user will - // see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you - // set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings - // for how you want to make S3 paths visible to your user. + // Specifies the type of landing directory (folder) you mapped for your users + // to see when they log into the file transfer protocol-enabled server. If you + // set it to PATH, the user will see the absolute Amazon S3 bucket paths as + // is in their file transfer protocol clients. If you set it LOGICAL, you will + // need to provide mappings in the HomeDirectoryMappings for how you want to + // make Amazon S3 paths visible to your users. HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` // Specifies the name of the policy in use for the described user. Policy *string `type:"string"` - // This property specifies the IAM role that controls your user's access to - // your Amazon S3 bucket. The policies attached to this role will determine - // the level of access you want to provide your users when transferring files - // into and out of your Amazon S3 bucket or buckets. The IAM role should also - // contain a trust relationship that allows the SFTP server to access your resources - // when servicing your SFTP user's transfer requests. + // Specifies the IAM role that controls your users' access to your Amazon S3 + // bucket. The policies attached to this role will determine the level of access + // you want to provide your users when transferring files into and out of your + // Amazon S3 bucket or buckets. The IAM role should also contain a trust relationship + // that allows a server to access your resources when servicing your users' + // transfer requests. Role *string `min:"20" type:"string"` - // This property contains the public key portion of the Secure Shell (SSH) keys - // stored for the described user. + // Specifies the public key portion of the Secure Shell (SSH) keys stored for + // the described user. SshPublicKeys []*SshPublicKey `type:"list"` - // This property contains the key-value pairs for the user requested. Tag can - // be used to search for and group users for a variety of purposes. + // Specifies the key-value pairs for the user requested. Tag can be used to + // search for and group users for a variety of purposes. Tags []*Tag `min:"1" type:"list"` - // This property is the name of the user that was requested to be described. - // User names are used for authentication purposes. This is the string that - // will be used by your user when they log in to your SFTP server. + // Specifies the name of the user that was requested to be described. User names + // are used for authentication purposes. This is the string that will be used + // by your user when they log in to your server. UserName *string `min:"3" type:"string"` } @@ -3037,28 +3611,44 @@ func (s *DescribedUser) SetUserName(v string) *DescribedUser { } // The virtual private cloud (VPC) endpoint settings that are configured for -// your SFTP server. With a VPC endpoint, you can restrict access to your SFTP -// server and resources only within your VPC. To control incoming internet traffic, -// invoke the UpdateServer API and attach an Elastic IP to your server's endpoint. +// your file transfer protocol-enabled server. With a VPC endpoint, you can +// restrict access to your server and resources only within your VPC. To control +// incoming internet traffic, invoke the UpdateServer API and attach an Elastic +// IP to your server's endpoint. type EndpointDetails struct { _ struct{} `type:"structure"` // A list of address allocation IDs that are required to attach an Elastic IP - // address to your SFTP server's endpoint. This is only valid in the UpdateServer - // API. + // address to your server's endpoint. // - // This property can only be use when EndpointType is set to VPC. + // This property can only be set when EndpointType is set to VPC and it is only + // valid in the UpdateServer API. AddressAllocationIds []*string `type:"list"` - // A list of subnet IDs that are required to host your SFTP server endpoint - // in your VPC. + // A list of security groups IDs that are available to attach to your server's + // endpoint. + // + // This property can only be set when EndpointType is set to VPC. + // + // You can only edit the SecurityGroupIds property in the UpdateServer API and + // only if you are changing the EndpointType from PUBLIC or VPC_ENDPOINT to + // VPC. + SecurityGroupIds []*string `type:"list"` + + // A list of subnet IDs that are required to host your server endpoint in your + // VPC. + // + // This property can only be set when EndpointType is set to VPC. SubnetIds []*string `type:"list"` // The ID of the VPC endpoint. + // + // This property can only be set when EndpointType is set to VPC_ENDPOINT. VpcEndpointId *string `min:"22" type:"string"` - // The VPC ID of the virtual private cloud in which the SFTP server's endpoint - // will be hosted. + // The VPC ID of the VPC in which a server's endpoint will be hosted. + // + // This property can only be set when EndpointType is set to VPC. VpcId *string `type:"string"` } @@ -3091,6 +3681,12 @@ func (s *EndpointDetails) SetAddressAllocationIds(v []*string) *EndpointDetails return s } +// SetSecurityGroupIds sets the SecurityGroupIds field's value. +func (s *EndpointDetails) SetSecurityGroupIds(v []*string) *EndpointDetails { + s.SecurityGroupIds = v + return s +} + // SetSubnetIds sets the SubnetIds field's value. func (s *EndpointDetails) SetSubnetIds(v []*string) *EndpointDetails { s.SubnetIds = v @@ -3109,7 +3705,7 @@ func (s *EndpointDetails) SetVpcId(v string) *EndpointDetails { return s } -// Represents an object that contains entries and a targets for HomeDirectoryMappings. +// Represents an object that contains entries and targets for HomeDirectoryMappings. type HomeDirectoryMapEntry struct { _ struct{} `type:"structure"` @@ -3163,16 +3759,15 @@ func (s *HomeDirectoryMapEntry) SetTarget(v string) *HomeDirectoryMapEntry { } // Returns information related to the type of user authentication that is in -// use for a server's users. A server can have only one method of authentication. +// use for a file transfer protocol-enabled server's users. A server can have +// only one method of authentication. type IdentityProviderDetails struct { _ struct{} `type:"structure"` - // The InvocationRole parameter provides the type of InvocationRole used to - // authenticate the user account. + // Provides the type of InvocationRole used to authenticate the user account. InvocationRole *string `min:"20" type:"string"` - // The Url parameter provides contains the location of the service endpoint - // used to authenticate users. + // Provides the location of the service endpoint used to authenticate users. Url *string `type:"string"` } @@ -3214,7 +3809,7 @@ func (s *IdentityProviderDetails) SetUrl(v string) *IdentityProviderDetails { type ImportSshPublicKeyInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server. + // A system-assigned unique identifier for a server. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -3283,19 +3878,18 @@ func (s *ImportSshPublicKeyInput) SetUserName(v string) *ImportSshPublicKeyInput return s } -// This response identifies the user, the server they belong to, and the identifier -// of the SSH public key associated with that user. A user can have more than -// one key on each server that they are associated with. +// Identifies the user, the server they belong to, and the identifier of the +// SSH public key associated with that user. A user can have more than one key +// on each server that they are associated with. type ImportSshPublicKeyOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server. + // A system-assigned unique identifier for a server. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // This identifier is the name given to a public key by the system that was - // imported. + // The name given to a public key by the system that was imported. // // SshPublicKeyId is a required field SshPublicKeyId *string `min:"21" type:"string" required:"true"` @@ -3334,11 +3928,11 @@ func (s *ImportSshPublicKeyOutput) SetUserName(v string) *ImportSshPublicKeyOutp return s } -// This exception is thrown when an error occurs in the AWS Transfer for SFTP +// This exception is thrown when an error occurs in the AWS Transfer Family // service. type InternalServiceError struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3355,17 +3949,17 @@ func (s InternalServiceError) GoString() string { func newErrorInternalServiceError(v protocol.ResponseMetadata) error { return &InternalServiceError{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServiceError) Code() string { +func (s *InternalServiceError) Code() string { return "InternalServiceError" } // Message returns the exception's message. -func (s InternalServiceError) Message() string { +func (s *InternalServiceError) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3373,28 +3967,28 @@ func (s InternalServiceError) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServiceError) OrigErr() error { +func (s *InternalServiceError) OrigErr() error { return nil } -func (s InternalServiceError) Error() string { +func (s *InternalServiceError) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServiceError) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServiceError) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServiceError) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServiceError) RequestID() string { + return s.RespMetadata.RequestID } // The NextToken parameter that was passed is invalid. type InvalidNextTokenException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3411,17 +4005,17 @@ func (s InvalidNextTokenException) GoString() string { func newErrorInvalidNextTokenException(v protocol.ResponseMetadata) error { return &InvalidNextTokenException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidNextTokenException) Code() string { +func (s *InvalidNextTokenException) Code() string { return "InvalidNextTokenException" } // Message returns the exception's message. -func (s InvalidNextTokenException) Message() string { +func (s *InvalidNextTokenException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3429,28 +4023,28 @@ func (s InvalidNextTokenException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidNextTokenException) OrigErr() error { +func (s *InvalidNextTokenException) OrigErr() error { return nil } -func (s InvalidNextTokenException) Error() string { +func (s *InvalidNextTokenException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidNextTokenException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidNextTokenException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidNextTokenException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidNextTokenException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when the client submits a malformed request. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -3467,17 +4061,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3485,22 +4079,110 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListSecurityPoliciesInput struct { + _ struct{} `type:"structure"` + + // Specifies the number of security policies to return as a response to the + // ListSecurityPolicies query. + MaxResults *int64 `min:"1" type:"integer"` + + // When additional results are obtained from the ListSecurityPolicies command, + // a NextToken parameter is returned in the output. You can then pass the NextToken + // parameter in a subsequent command to continue listing additional security + // policies. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSecurityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSecurityPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSecurityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSecurityPoliciesInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListSecurityPoliciesInput) SetMaxResults(v int64) *ListSecurityPoliciesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSecurityPoliciesInput) SetNextToken(v string) *ListSecurityPoliciesInput { + s.NextToken = &v + return s +} + +type ListSecurityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // When you can get additional results from the ListSecurityPolicies operation, + // a NextToken parameter is returned in the output. In a following command, + // you can pass in the NextToken parameter to continue listing security policies. + NextToken *string `min:"1" type:"string"` + + // An array of security policies that were listed. + // + // SecurityPolicyNames is a required field + SecurityPolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListSecurityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSecurityPoliciesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListSecurityPoliciesOutput) SetNextToken(v string) *ListSecurityPoliciesOutput { + s.NextToken = &v + return s +} + +// SetSecurityPolicyNames sets the SecurityPolicyNames field's value. +func (s *ListSecurityPoliciesOutput) SetSecurityPolicyNames(v []*string) *ListSecurityPoliciesOutput { + s.SecurityPolicyNames = v + return s } type ListServersInput struct { @@ -3663,7 +4345,7 @@ func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceIn type ListTagsForResourceOutput struct { _ struct{} `type:"structure"` - // This value is the ARN you specified to list the tags of. + // The ARN you specified to list the tags of. Arn *string `min:"20" type:"string"` // When you can get additional results from the ListTagsForResource call, a @@ -3715,8 +4397,8 @@ type ListUsersInput struct { // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` - // A system-assigned unique identifier for a Secure File Transfer Protocol (SFTP) - // server that has users assigned to it. + // A system-assigned unique identifier for a server that has users assigned + // to it. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -3780,8 +4462,8 @@ type ListUsersOutput struct { // to the NextToken parameter to continue listing additional users. NextToken *string `min:"1" type:"string"` - // A system-assigned unique identifier for an SFTP server that the users are - // assigned to. + // A system-assigned unique identifier for a server that the users are assigned + // to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -3821,46 +4503,46 @@ func (s *ListUsersOutput) SetUsers(v []*ListedUser) *ListUsersOutput { return s } -// Returns properties of the server that was specified. +// Returns properties of a file transfer protocol-enabled server that was specified. type ListedServer struct { _ struct{} `type:"structure"` - // The unique Amazon Resource Name (ARN) for the server to be listed. + // Specifies the unique Amazon Resource Name (ARN) for a server to be listed. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // The type of VPC endpoint that your SFTP server is connected to. If your SFTP + // Specifies the type of VPC endpoint that your server is connected to. If your // server is connected to a VPC endpoint, your server isn't accessible over // the public internet. EndpointType *string `type:"string" enum:"EndpointType"` - // The authentication method used to validate a user for the server that was - // specified. This can include Secure Shell (SSH), user name and password combinations, - // or your own custom authentication method. Valid values include SERVICE_MANAGED - // or API_GATEWAY. + // Specifies the authentication method used to validate a user for a server + // that was specified. This can include Secure Shell (SSH), user name and password + // combinations, or your own custom authentication method. Valid values include + // SERVICE_MANAGED or API_GATEWAY. IdentityProviderType *string `type:"string" enum:"IdentityProviderType"` - // The AWS Identity and Access Management entity that allows the server to turn - // on Amazon CloudWatch logging. + // Specifies the AWS Identity and Access Management (IAM) role that allows a + // server to turn on Amazon CloudWatch logging. LoggingRole *string `min:"20" type:"string"` - // This value is the unique system assigned identifier for the SFTP servers - // that were listed. + // Specifies the unique system assigned identifier for the servers that were + // listed. ServerId *string `min:"19" type:"string"` - // This property describes the condition of the SFTP server for the server that - // was described. A value of ONLINE> indicates that the server can accept jobs - // and transfer files. A State value of OFFLINE means that the server cannot - // perform file transfer operations. + // Specifies the condition of a server for the server that was described. A + // value of ONLINE indicates that the server can accept jobs and transfer files. + // A State value of OFFLINE means that the server cannot perform file transfer + // operations. // // The states of STARTING and STOPPING indicate that the server is in an intermediate // state, either not fully able to respond, or not fully offline. The values // of START_FAILED or STOP_FAILED can indicate an error condition. State *string `type:"string" enum:"State"` - // This property is a numeric value that indicates the number of users that - // are assigned to the SFTP server you specified with the ServerId. + // Specifies the number of users that are assigned to a server you specified + // with the ServerId. UserCount *int64 `type:"integer"` } @@ -3920,34 +4602,35 @@ func (s *ListedServer) SetUserCount(v int64) *ListedServer { type ListedUser struct { _ struct{} `type:"structure"` - // This property is the unique Amazon Resource Name (ARN) for the user that - // you want to learn about. + // Provides the unique Amazon Resource Name (ARN) for the user that you want + // to learn about. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` - // This value specifies the location that files are written to or read from - // an Amazon S3 bucket for the user you specify by their ARN. + // Specifies the location that files are written to or read from an Amazon S3 + // bucket for the user you specify by their ARN. HomeDirectory *string `type:"string"` - // The type of landing directory (folder) you mapped for your users' home directory. - // If you set it to PATH, the user will see the absolute Amazon S3 bucket paths - // as is in their SFTP clients. If you set it LOGICAL, you will need to provide - // mappings in the HomeDirectoryMappings for how you want to make S3 paths visible - // to your user. + // Specifies the type of landing directory (folder) you mapped for your users' + // home directory. If you set it to PATH, the user will see the absolute Amazon + // S3 bucket paths as is in their file transfer protocol clients. If you set + // it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make Amazon S3 paths visible to your users. HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` - // The role in use by this user. A role is an AWS Identity and Access Management - // (IAM) entity that, in this case, allows the SFTP server to act on a user's - // behalf. It allows the server to inherit the trust relationship that enables - // that user to perform file operations to their Amazon S3 bucket. + // Specifies the role that is in use by this user. A role is an AWS Identity + // and Access Management (IAM) entity that, in this case, allows a file transfer + // protocol-enabled server to act on a user's behalf. It allows the server to + // inherit the trust relationship that enables that user to perform file operations + // to their Amazon S3 bucket. Role *string `min:"20" type:"string"` - // This value is the number of SSH public keys stored for the user you specified. + // Specifies the number of SSH public keys stored for the user you specified. SshPublicKeyCount *int64 `type:"integer"` - // The name of the user whose ARN was specified. User names are used for authentication - // purposes. + // Specifies the name of the user whose ARN was specified. User names are used + // for authentication purposes. UserName *string `min:"3" type:"string"` } @@ -3999,8 +4682,8 @@ func (s *ListedUser) SetUserName(v string) *ListedUser { // The requested resource does not exist. type ResourceExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -4023,17 +4706,17 @@ func (s ResourceExistsException) GoString() string { func newErrorResourceExistsException(v protocol.ResponseMetadata) error { return &ResourceExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceExistsException) Code() string { +func (s *ResourceExistsException) Code() string { return "ResourceExistsException" } // Message returns the exception's message. -func (s ResourceExistsException) Message() string { +func (s *ResourceExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4041,29 +4724,29 @@ func (s ResourceExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceExistsException) OrigErr() error { +func (s *ResourceExistsException) OrigErr() error { return nil } -func (s ResourceExistsException) Error() string { +func (s *ResourceExistsException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceExistsException) RequestID() string { + return s.RespMetadata.RequestID } // This exception is thrown when a resource is not found by the AWS Transfer -// for SFTP service. +// Family service. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` @@ -4086,17 +4769,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4104,28 +4787,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// The request has failed because the AWS Transfer for SFTP service is not available. +// The request has failed because the AWS Transfer Family service is not available. type ServiceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4142,17 +4825,17 @@ func (s ServiceUnavailableException) GoString() string { func newErrorServiceUnavailableException(v protocol.ResponseMetadata) error { return &ServiceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceUnavailableException) Code() string { +func (s *ServiceUnavailableException) Code() string { return "ServiceUnavailableException" } // Message returns the exception's message. -func (s ServiceUnavailableException) Message() string { +func (s *ServiceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4160,43 +4843,45 @@ func (s ServiceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceUnavailableException) OrigErr() error { +func (s *ServiceUnavailableException) OrigErr() error { return nil } -func (s ServiceUnavailableException) Error() string { +func (s *ServiceUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } // Provides information about the public Secure Shell (SSH) key that is associated -// with a user account for a specific server (as identified by ServerId). The -// information returned includes the date the key was imported, the public key -// contents, and the public key ID. A user can store more than one SSH public -// key associated with their user name on a specific SFTP server. +// with a user account for the specific file transfer protocol-enabled server +// (as identified by ServerId). The information returned includes the date the +// key was imported, the public key contents, and the public key ID. A user +// can store more than one SSH public key associated with their user name on +// a specific server. type SshPublicKey struct { _ struct{} `type:"structure"` - // The date that the public key was added to the user account. + // Specifies the date that the public key was added to the user account. // // DateImported is a required field DateImported *time.Time `type:"timestamp" required:"true"` - // The content of the SSH public key as specified by the PublicKeyId. + // Specifies the content of the SSH public key as specified by the PublicKeyId. // // SshPublicKeyBody is a required field SshPublicKeyBody *string `type:"string" required:"true"` - // The SshPublicKeyId parameter contains the identifier of the public key. + // Specifies the SshPublicKeyId parameter contains the identifier of the public + // key. // // SshPublicKeyId is a required field SshPublicKeyId *string `min:"21" type:"string" required:"true"` @@ -4233,7 +4918,7 @@ func (s *SshPublicKey) SetSshPublicKeyId(v string) *SshPublicKey { type StartServerInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that you start. + // A system-assigned unique identifier for a server that you start. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -4288,7 +4973,7 @@ func (s StartServerOutput) GoString() string { type StopServerInput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that you stopped. + // A system-assigned unique identifier for a server that you stopped. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -4354,8 +5039,7 @@ type Tag struct { // Key is a required field Key *string `type:"string" required:"true"` - // This property contains one or more values that you assigned to the key name - // you create. + // Contains one or more values that you assigned to the key name you create. // // Value is a required field Value *string `type:"string" required:"true"` @@ -4493,7 +5177,21 @@ type TestIdentityProviderInput struct { // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // This request parameter is the name of the user account to be tested. + // The type of file transfer protocol to be tested. + // + // The available protocols are: + // + // * Secure Shell (SSH) File Transfer Protocol (SFTP) + // + // * File Transfer Protocol Secure (FTPS) + // + // * File Transfer Protocol (FTP) + ServerProtocol *string `type:"string" enum:"Protocol"` + + // The source IP address of the user account to be tested. + SourceIp *string `type:"string"` + + // The name of the user account to be tested. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -4540,6 +5238,18 @@ func (s *TestIdentityProviderInput) SetServerId(v string) *TestIdentityProviderI return s } +// SetServerProtocol sets the ServerProtocol field's value. +func (s *TestIdentityProviderInput) SetServerProtocol(v string) *TestIdentityProviderInput { + s.ServerProtocol = &v + return s +} + +// SetSourceIp sets the SourceIp field's value. +func (s *TestIdentityProviderInput) SetSourceIp(v string) *TestIdentityProviderInput { + s.SourceIp = &v + return s +} + // SetUserName sets the UserName field's value. func (s *TestIdentityProviderInput) SetUserName(v string) *TestIdentityProviderInput { s.UserName = &v @@ -4610,8 +5320,8 @@ func (s *TestIdentityProviderOutput) SetUrl(v string) *TestIdentityProviderOutpu // // HTTP Status Code: 400 type ThrottlingException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` @@ -4630,17 +5340,17 @@ func (s ThrottlingException) GoString() string { func newErrorThrottlingException(v protocol.ResponseMetadata) error { return &ThrottlingException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottlingException) Code() string { +func (s *ThrottlingException) Code() string { return "ThrottlingException" } // Message returns the exception's message. -func (s ThrottlingException) Message() string { +func (s *ThrottlingException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4648,30 +5358,30 @@ func (s ThrottlingException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottlingException) OrigErr() error { +func (s *ThrottlingException) OrigErr() error { return nil } -func (s ThrottlingException) Error() string { +func (s *ThrottlingException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottlingException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottlingException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottlingException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottlingException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { _ struct{} `type:"structure"` - // This is the value of the resource that will have the tag removed. An Amazon - // Resource Name (ARN) is an identifier for a specific AWS resource, such as - // a server, user, or role. + // The value of the resource that will have the tag removed. An Amazon Resource + // Name (ARN) is an identifier for a specific AWS resource, such as a server, + // user, or role. // // Arn is a required field Arn *string `min:"20" type:"string" required:"true"` @@ -4745,40 +5455,103 @@ func (s UntagResourceOutput) GoString() string { type UpdateServerInput struct { _ struct{} `type:"structure"` + // The Amazon Resource Name (ARN) of the AWS Certificate Manager (ACM) certificate. + // Required when Protocols is set to FTPS. + // + // To request a new public certificate, see Request a public certificate (https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-public.html) + // in the AWS Certificate Manager User Guide. + // + // To import an existing certificate into ACM, see Importing certificates into + // ACM (https://docs.aws.amazon.com/acm/latest/userguide/import-certificate.html) + // in the AWS Certificate Manager User Guide. + // + // To request a private certificate to use FTPS through private IP addresses, + // see Request a private certificate (https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-request-private.html) + // in the AWS Certificate Manager User Guide. + // + // Certificates with the following cryptographic algorithms and key sizes are + // supported: + // + // * 2048-bit RSA (RSA_2048) + // + // * 4096-bit RSA (RSA_4096) + // + // * Elliptic Prime Curve 256 bit (EC_prime256v1) + // + // * Elliptic Prime Curve 384 bit (EC_secp384r1) + // + // * Elliptic Prime Curve 521 bit (EC_secp521r1) + // + // The certificate must be a valid SSL/TLS X.509 version 3 certificate with + // FQDN or IP address specified and information about the issuer. + Certificate *string `type:"string"` + // The virtual private cloud (VPC) endpoint settings that are configured for - // your SFTP server. With a VPC endpoint, you can restrict access to your SFTP - // server to resources only within your VPC. To control incoming internet traffic, + // your server. With a VPC endpoint, you can restrict access to your server + // to resources only within your VPC. To control incoming internet traffic, // you will need to associate one or more Elastic IP addresses with your server's // endpoint. EndpointDetails *EndpointDetails `type:"structure"` - // The type of endpoint that you want your SFTP server to connect to. You can - // choose to connect to the public internet or a virtual private cloud (VPC) - // endpoint. With a VPC endpoint, your SFTP server isn't accessible over the - // public internet. + // The type of endpoint that you want your server to connect to. You can choose + // to connect to the public internet or a VPC endpoint. With a VPC endpoint, + // you can restrict access to your server and resources only within your VPC. + // + // It is recommended that you use VPC as the EndpointType. With this endpoint + // type, you have the option to directly associate up to three Elastic IPv4 + // addresses (BYO IP included) with your server's endpoint and use VPC security + // groups to restrict traffic by the client's public IP address. This is not + // possible with EndpointType set to VPC_ENDPOINT. EndpointType *string `type:"string" enum:"EndpointType"` - // The RSA private key as generated by ssh-keygen -N "" -f my-new-server-key. + // The RSA private key as generated by ssh-keygen -N "" -m PEM -f my-new-server-key. // - // If you aren't planning to migrate existing users from an existing SFTP server - // to a new AWS SFTP server, don't update the host key. Accidentally changing - // a server's host key can be disruptive. + // If you aren't planning to migrate existing users from an existing server + // to a new server, don't update the host key. Accidentally changing a server's + // host key can be disruptive. // - // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" - // in the AWS SFTP User Guide. + // For more information, see Change the host key for your SFTP-enabled server + // (https://docs.aws.amazon.com/transfer/latest/userguide/edit-server-config.html#configuring-servers-change-host-key) + // in the AWS Transfer Family User Guide. HostKey *string `type:"string" sensitive:"true"` - // This response parameter is an array containing all of the information required - // to call a customer's authentication API method. + // An array containing all of the information required to call a customer's + // authentication API method. IdentityProviderDetails *IdentityProviderDetails `type:"structure"` - // A value that changes the AWS Identity and Access Management (IAM) role that - // allows Amazon S3 events to be logged in Amazon CloudWatch, turning logging - // on or off. + // Changes the AWS Identity and Access Management (IAM) role that allows Amazon + // S3 events to be logged in Amazon CloudWatch, turning logging on or off. LoggingRole *string `type:"string"` - // A system-assigned unique identifier for an SFTP server instance that the - // user account is assigned to. + // Specifies the file transfer protocol or protocols over which your file transfer + // protocol client can connect to your server's endpoint. The available protocols + // are: + // + // * Secure Shell (SSH) File Transfer Protocol (SFTP): File transfer over + // SSH + // + // * File Transfer Protocol Secure (FTPS): File transfer with TLS encryption + // + // * File Transfer Protocol (FTP): Unencrypted file transfer + // + // If you select FTPS, you must choose a certificate stored in AWS Certificate + // Manager (ACM) which will be used to identify your server when clients connect + // to it over FTPS. + // + // If Protocol includes either FTP or FTPS, then the EndpointType must be VPC + // and the IdentityProviderType must be API_GATEWAY. + // + // If Protocol includes FTP, then AddressAllocationIds cannot be associated. + // + // If Protocol is set only to SFTP, the EndpointType can be set to PUBLIC and + // the IdentityProviderType can be set to SERVICE_MANAGED. + Protocols []*string `min:"1" type:"list"` + + // Specifies the name of the security policy that is attached to the server. + SecurityPolicyName *string `type:"string"` + + // A system-assigned unique identifier for a server instance that the user account + // is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -4797,6 +5570,9 @@ func (s UpdateServerInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateServerInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateServerInput"} + if s.Protocols != nil && len(s.Protocols) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Protocols", 1)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } @@ -4820,6 +5596,12 @@ func (s *UpdateServerInput) Validate() error { return nil } +// SetCertificate sets the Certificate field's value. +func (s *UpdateServerInput) SetCertificate(v string) *UpdateServerInput { + s.Certificate = &v + return s +} + // SetEndpointDetails sets the EndpointDetails field's value. func (s *UpdateServerInput) SetEndpointDetails(v *EndpointDetails) *UpdateServerInput { s.EndpointDetails = v @@ -4850,6 +5632,18 @@ func (s *UpdateServerInput) SetLoggingRole(v string) *UpdateServerInput { return s } +// SetProtocols sets the Protocols field's value. +func (s *UpdateServerInput) SetProtocols(v []*string) *UpdateServerInput { + s.Protocols = v + return s +} + +// SetSecurityPolicyName sets the SecurityPolicyName field's value. +func (s *UpdateServerInput) SetSecurityPolicyName(v string) *UpdateServerInput { + s.SecurityPolicyName = &v + return s +} + // SetServerId sets the ServerId field's value. func (s *UpdateServerInput) SetServerId(v string) *UpdateServerInput { s.ServerId = &v @@ -4859,8 +5653,8 @@ func (s *UpdateServerInput) SetServerId(v string) *UpdateServerInput { type UpdateServerOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server that the user account - // is assigned to. + // A system-assigned unique identifier for a server that the user account is + // assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` @@ -4885,78 +5679,77 @@ func (s *UpdateServerOutput) SetServerId(v string) *UpdateServerOutput { type UpdateUserInput struct { _ struct{} `type:"structure"` - // A parameter that specifies the landing directory (folder) for a user when - // they log in to the server using their client. + // Specifies the landing directory (folder) for a user when they log in to the + // server using their file transfer protocol client. // - // An example is /home/username. + // An example is your-Amazon-S3-bucket-name>/home/username. HomeDirectory *string `type:"string"` - // Logical directory mappings that specify what S3 paths and keys should be - // visible to your user and how you want to make them visible. You will need + // Logical directory mappings that specify what Amazon S3 paths and keys should + // be visible to your user and how you want to make them visible. You will need // to specify the "Entry" and "Target" pair, where Entry shows how the path - // is made visible and Target is the actual S3 path. If you only specify a target, - // it will be displayed as is. You will need to also make sure that your AWS - // IAM Role provides access to paths in Target. The following is an example. + // is made visible and Target is the actual Amazon S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your IAM role provides access to paths in Target. The following is an example. // // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' // - // In most cases, you can use this value instead of the scope down policy to + // In most cases, you can use this value instead of the scope-down policy to // lock your user down to the designated home directory ("chroot"). To do this, // you can set Entry to '/' and set Target to the HomeDirectory parameter value. // - // If the target of a logical directory entry does not exist in S3, the entry - // will be ignored. As a workaround, you can use the S3 api to create 0 byte - // objects as place holders for your directory. If using the CLI, use the s3api - // call instead of s3 so you can use the put-object operation. For example, - // you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. - // Make sure that the end of the key name ends in a / for it to be considered - // a folder. + // If the target of a logical directory entry does not exist in Amazon S3, the + // entry will be ignored. As a workaround, you can use the Amazon S3 API to + // create 0 byte objects as place holders for your directory. If using the CLI, + // use the s3api call instead of s3 so you can use the put-object operation. + // For example, you use the following: aws s3api put-object --bucket bucketname + // --key path/to/folder/. Make sure that the end of the key name ends in a / + // for it to be considered a folder. HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` // The type of landing directory (folder) you want your users' home directory - // to be when they log into the SFTP serve. If you set it to PATH, the user - // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. - // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings - // for how you want to make S3 paths visible to your user. + // to be when they log into the server. If you set it to PATH, the user will + // see the absolute Amazon S3 bucket paths as is in their file transfer protocol + // clients. If you set it LOGICAL, you will need to provide mappings in the + // HomeDirectoryMappings for how you want to make Amazon S3 paths visible to + // your users. HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` // Allows you to supply a scope-down policy for your user so you can use the - // same AWS Identity and Access Management (IAM) role across multiple users. - // The policy scopes down user access to portions of your Amazon S3 bucket. - // Variables you can use inside this policy include ${Transfer:UserName}, ${Transfer:HomeDirectory}, - // and ${Transfer:HomeBucket}. + // same IAM role across multiple users. The policy scopes down user access to + // portions of your Amazon S3 bucket. Variables you can use inside this policy + // include ${Transfer:UserName}, ${Transfer:HomeDirectory}, and ${Transfer:HomeBucket}. // - // For scope-down policies, AWS Transfer for SFTP stores the policy as a JSON + // For scope-down policies, AWS Transfer Family stores the policy as a JSON // blob, instead of the Amazon Resource Name (ARN) of the policy. You save the // policy as a JSON blob and pass it in the Policy argument. // - // For an example of a scope-down policy, see "https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down">Creating - // a Scope-Down Policy. + // For an example of a scope-down policy, see Creating a scope-down policy (https://docs.aws.amazon.com/transfer/latest/userguide/users.html#users-policies-scope-down). // - // For more information, see "https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html" + // For more information, see AssumeRole (https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) // in the AWS Security Token Service API Reference. Policy *string `type:"string"` - // The IAM role that controls your user's access to your Amazon S3 bucket. The + // The IAM role that controls your users' access to your Amazon S3 bucket. The // policies attached to this role will determine the level of access you want // to provide your users when transferring files into and out of your Amazon // S3 bucket or buckets. The IAM role should also contain a trust relationship - // that allows the Secure File Transfer Protocol (SFTP) server to access your - // resources when servicing your SFTP user's transfer requests. + // that allows the server to access your resources when servicing your users' + // transfer requests. Role *string `min:"20" type:"string"` - // A system-assigned unique identifier for an SFTP server instance that the - // user account is assigned to. + // A system-assigned unique identifier for a server instance that the user account + // is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user and is associated with a server as - // specified by the ServerId. This is the string that will be used by your user - // when they log in to your SFTP server. This user name is a minimum of 3 and - // a maximum of 32 characters long. The following are valid characters: a-z, - // A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen. + // specified by the ServerId. This user name must be a minimum of 3 and a maximum + // of 100 characters long. The following are valid characters: a-z, A-Z, 0-9, + // underscore '_', hyphen '-', period '.', and at sign '@'. The user name can't + // start with a hyphen, period, or at sign. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -5052,19 +5845,19 @@ func (s *UpdateUserInput) SetUserName(v string) *UpdateUserInput { return s } -// UpdateUserResponse returns the user name and server identifier for the request -// to update a user's properties. +// UpdateUserResponse returns the user name and identifier for the request to +// update a user's properties. type UpdateUserOutput struct { _ struct{} `type:"structure"` - // A system-assigned unique identifier for an SFTP server instance that the - // user account is assigned to. + // A system-assigned unique identifier for a server instance that the user account + // is assigned to. // // ServerId is a required field ServerId *string `min:"19" type:"string" required:"true"` - // The unique identifier for a user that is assigned to the SFTP server instance - // that was specified in the request. + // The unique identifier for a user that is assigned to a server instance that + // was specified in the request. // // UserName is a required field UserName *string `min:"3" type:"string" required:"true"` @@ -5103,6 +5896,15 @@ const ( EndpointTypeVpcEndpoint = "VPC_ENDPOINT" ) +// EndpointType_Values returns all elements of the EndpointType enum +func EndpointType_Values() []string { + return []string{ + EndpointTypePublic, + EndpointTypeVpc, + EndpointTypeVpcEndpoint, + } +} + const ( // HomeDirectoryTypePath is a HomeDirectoryType enum value HomeDirectoryTypePath = "PATH" @@ -5111,11 +5913,20 @@ const ( HomeDirectoryTypeLogical = "LOGICAL" ) +// HomeDirectoryType_Values returns all elements of the HomeDirectoryType enum +func HomeDirectoryType_Values() []string { + return []string{ + HomeDirectoryTypePath, + HomeDirectoryTypeLogical, + } +} + // Returns information related to the type of user authentication that is in -// use for a server's users. For SERVICE_MANAGED authentication, the Secure -// Shell (SSH) public keys are stored with a user on an SFTP server instance. -// For API_GATEWAY authentication, your custom authentication method is implemented -// by using an API call. A server can have only one method of authentication. +// use for a file transfer protocol-enabled server's users. For SERVICE_MANAGED +// authentication, the Secure Shell (SSH) public keys are stored with a user +// on the server instance. For API_GATEWAY authentication, your custom authentication +// method is implemented by using an API call. The server can have only one +// method of authentication. const ( // IdentityProviderTypeServiceManaged is a IdentityProviderType enum value IdentityProviderTypeServiceManaged = "SERVICE_MANAGED" @@ -5124,15 +5935,43 @@ const ( IdentityProviderTypeApiGateway = "API_GATEWAY" ) -// Describes the condition of the SFTP server with respect to its ability to -// perform file operations. There are six possible states: OFFLINE, ONLINE, -// STARTING, STOPPING, START_FAILED, and STOP_FAILED. +// IdentityProviderType_Values returns all elements of the IdentityProviderType enum +func IdentityProviderType_Values() []string { + return []string{ + IdentityProviderTypeServiceManaged, + IdentityProviderTypeApiGateway, + } +} + +const ( + // ProtocolSftp is a Protocol enum value + ProtocolSftp = "SFTP" + + // ProtocolFtp is a Protocol enum value + ProtocolFtp = "FTP" + + // ProtocolFtps is a Protocol enum value + ProtocolFtps = "FTPS" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolSftp, + ProtocolFtp, + ProtocolFtps, + } +} + +// Describes the condition of a file transfer protocol-enabled server with respect +// to its ability to perform file operations. There are six possible states: +// OFFLINE, ONLINE, STARTING, STOPPING, START_FAILED, and STOP_FAILED. // -// OFFLINE indicates that the SFTP server exists, but that it is not available -// for file operations. ONLINE indicates that the SFTP server is available to -// perform file operations. STARTING indicates that the SFTP server's was instantiated, -// but the server is not yet available to perform file operations. Under normal -// conditions, it can take a couple of minutes for an SFTP server to be completely +// OFFLINE indicates that the server exists, but that it is not available for +// file operations. ONLINE indicates that the server is available to perform +// file operations. STARTING indicates that the server's was instantiated, but +// the server is not yet available to perform file operations. Under normal +// conditions, it can take a couple of minutes for the server to be completely // operational. Both START_FAILED and STOP_FAILED are error conditions. const ( // StateOffline is a State enum value @@ -5153,3 +5992,15 @@ const ( // StateStopFailed is a State enum value StateStopFailed = "STOP_FAILED" ) + +// State_Values returns all elements of the State enum +func State_Values() []string { + return []string{ + StateOffline, + StateOnline, + StateStarting, + StateStopping, + StateStartFailed, + StateStopFailed, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go index 51a9c01b3..fcbf894a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/doc.go @@ -1,18 +1,19 @@ // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. // Package transfer provides the client and types for making API -// requests to AWS Transfer for SFTP. -// -// AWS Transfer for SFTP is a fully managed service that enables the transfer -// of files directly into and out of Amazon S3 using the Secure File Transfer -// Protocol (SFTP)—also known as Secure Shell (SSH) File Transfer Protocol. -// AWS helps you seamlessly migrate your file transfer workflows to AWS Transfer -// for SFTP—by integrating with existing authentication systems, and providing -// DNS routing with Amazon Route 53—so nothing changes for your customers -// and partners, or their applications. With your data in S3, you can use it -// with AWS services for processing, analytics, machine learning, and archiving. -// Getting started with AWS Transfer for SFTP (AWS SFTP) is easy; there is no -// infrastructure to buy and set up. +// requests to AWS Transfer Family. +// +// AWS Transfer Family is a fully managed service that enables the transfer +// of files over the File Transfer Protocol (FTP), File Transfer Protocol over +// SSL (FTPS), or Secure Shell (SSH) File Transfer Protocol (SFTP) directly +// into and out of Amazon Simple Storage Service (Amazon S3). AWS helps you +// seamlessly migrate your file transfer workflows to AWS Transfer Family by +// integrating with existing authentication systems, and providing DNS routing +// with Amazon Route 53 so nothing changes for your customers and partners, +// or their applications. With your data in Amazon S3, you can use it with AWS +// services for processing, analytics, machine learning, and archiving. Getting +// started with AWS Transfer Family is easy since there is no infrastructure +// to buy and set up. // // See https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05 for more information on this service. // @@ -21,7 +22,7 @@ // // Using the Client // -// To contact AWS Transfer for SFTP with the SDK use the New function to create +// To contact AWS Transfer Family with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // @@ -31,7 +32,7 @@ // See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // -// See the AWS Transfer for SFTP client Transfer for more +// See the AWS Transfer Family client Transfer for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/transfer/#New package transfer diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go index 0aef6abd4..d4a863ac5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go @@ -8,18 +8,24 @@ import ( const ( + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + // ErrCodeConflictException for service response error code // "ConflictException". // - // This exception is thrown when the UpdatServer is called for a server that - // has VPC as the endpoint type and the server's VpcEndpointID is not in the - // available state. + // This exception is thrown when the UpdatServer is called for a file transfer + // protocol-enabled server that has VPC as the endpoint type and the server's + // VpcEndpointID is not in the available state. ErrCodeConflictException = "ConflictException" // ErrCodeInternalServiceError for service response error code // "InternalServiceError". // - // This exception is thrown when an error occurs in the AWS Transfer for SFTP + // This exception is thrown when an error occurs in the AWS Transfer Family // service. ErrCodeInternalServiceError = "InternalServiceError" @@ -45,13 +51,13 @@ const ( // "ResourceNotFoundException". // // This exception is thrown when a resource is not found by the AWS Transfer - // for SFTP service. + // Family service. ErrCodeResourceNotFoundException = "ResourceNotFoundException" // ErrCodeServiceUnavailableException for service response error code // "ServiceUnavailableException". // - // The request has failed because the AWS Transfer for SFTP service is not available. + // The request has failed because the AWS Transfer Family service is not available. ErrCodeServiceUnavailableException = "ServiceUnavailableException" // ErrCodeThrottlingException for service response error code @@ -64,6 +70,7 @@ const ( ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, "ConflictException": newErrorConflictException, "InternalServiceError": newErrorInternalServiceError, "InvalidNextTokenException": newErrorInvalidNextTokenException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go index 9b385e525..3c51a21dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go @@ -7,13 +7,13 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) // Transfer provides the API operation methods for making requests to -// AWS Transfer for SFTP. See this package's package overview docs +// AWS Transfer Family. See this package's package overview docs // for details on the service. // // Transfer methods are safe to use concurrently. It is not safe to diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/api.go b/vendor/github.com/aws/aws-sdk-go/service/waf/api.go index 1267ed1a5..bf68bfcee 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/api.go @@ -57,6 +57,16 @@ func (c *WAF) CreateByteMatchSetRequest(input *CreateByteMatchSetInput) (req *re // CreateByteMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part // of a web request that you want AWS WAF to inspect, such as the values of // the User-Agent header or the query string. For example, you can create a @@ -204,6 +214,16 @@ func (c *WAF) CreateGeoMatchSetRequest(input *CreateGeoMatchSetInput) (req *requ // CreateGeoMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates an GeoMatchSet, which you use to specify which web requests you want // to allow or block based on the country that the requests originate from. // For example, if you're receiving a lot of requests from one or more countries @@ -350,6 +370,16 @@ func (c *WAF) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Request, // CreateIPSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates an IPSet, which you use to specify which web requests that you want // to allow or block based on the IP addresses that the requests originate from. // For example, if you're receiving a lot of requests from one or more individual @@ -497,6 +527,16 @@ func (c *WAF) CreateRateBasedRuleRequest(input *CreateRateBasedRuleInput) (req * // CreateRateBasedRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RateBasedRule. The RateBasedRule contains a RateLimit, which specifies // the maximum number of requests that AWS WAF allows from a specified IP address // in a five-minute period. The RateBasedRule also contains the IPSet objects, @@ -504,23 +544,22 @@ func (c *WAF) CreateRateBasedRuleRequest(input *CreateRateBasedRuleInput) (req * // you want to count or block if these requests exceed the RateLimit. // // If you add more than one predicate to a RateBasedRule, a request not only -// must exceed the RateLimit, but it also must match all the specifications -// to be counted or blocked. For example, suppose you add the following to a -// RateBasedRule: +// must exceed the RateLimit, but it also must match all the conditions to be +// counted or blocked. For example, suppose you add the following to a RateBasedRule: // // * An IPSet that matches the IP address 192.0.2.44/32 // // * A ByteMatchSet that matches BadBot in the User-Agent header // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // You then add the RateBasedRule to a WebACL and specify that you want to block // requests that meet the conditions in the rule. For a request to be blocked, // it must come from the IP address 192.0.2.44 and the User-Agent header in // the request must contain the value BadBot. Further, requests that match these -// two conditions must be received at a rate of more than 15,000 requests every +// two conditions must be received at a rate of more than 1,000 requests every // five minutes. If both conditions are met and the rate is exceeded, AWS WAF -// blocks the requests. If the rate drops below 15,000 for a five-minute period, +// blocks the requests. If the rate drops below 1,000 for a five-minute period, // AWS WAF no longer blocks the requests. // // As a second example, suppose you want to limit requests to a particular page @@ -532,7 +571,7 @@ func (c *WAF) CreateRateBasedRuleRequest(input *CreateRateBasedRuleInput) (req * // // * A TargetString of login // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // By adding this RateBasedRule to a WebACL, you could limit requests to your // login page without affecting the rest of your site. @@ -685,6 +724,16 @@ func (c *WAF) CreateRegexMatchSetRequest(input *CreateRegexMatchSetInput) (req * // CreateRegexMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the // part of a web request that you want AWS WAF to inspect, such as the values // of the User-Agent header or the query string. For example, you can create @@ -800,6 +849,16 @@ func (c *WAF) CreateRegexPatternSetRequest(input *CreateRegexPatternSetInput) (r // CreateRegexPatternSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify // the regular expression (regex) pattern that you want AWS WAF to search for, // such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests. @@ -911,6 +970,16 @@ func (c *WAF) CreateRuleRequest(input *CreateRuleInput) (req *request.Request, o // CreateRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and // other predicates that identify the requests that you want to block. If you // add more than one predicate to a Rule, a request must match all of the specifications @@ -1074,6 +1143,16 @@ func (c *WAF) CreateRuleGroupRequest(input *CreateRuleGroupInput) (req *request. // CreateRuleGroup API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RuleGroup. A rule group is a collection of predefined rules that // you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group. // @@ -1186,6 +1265,16 @@ func (c *WAF) CreateSizeConstraintSetRequest(input *CreateSizeConstraintSetInput // CreateSizeConstraintSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify // the part of a web request that you want AWS WAF to check for length, such // as the length of the User-Agent header or the length of the query string. @@ -1334,6 +1423,16 @@ func (c *WAF) CreateSqlInjectionMatchSetRequest(input *CreateSqlInjectionMatchSe // CreateSqlInjectionMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests // that contain snippets of SQL code in a specified part of web requests. AWS // WAF searches for character sequences that are likely to be malicious strings. @@ -1478,6 +1577,16 @@ func (c *WAF) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Reques // CreateWebACL API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a WebACL, which contains the Rules that identify the CloudFront web // requests that you want to allow, block, or count. AWS WAF evaluates Rules // in order based on the value of Priority for each Rule. @@ -1596,6 +1705,169 @@ func (c *WAF) CreateWebACLWithContext(ctx aws.Context, input *CreateWebACLInput, return out, req.Send() } +const opCreateWebACLMigrationStack = "CreateWebACLMigrationStack" + +// CreateWebACLMigrationStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebACLMigrationStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWebACLMigrationStack for more information on using the CreateWebACLMigrationStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWebACLMigrationStackRequest method. +// req, resp := client.CreateWebACLMigrationStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACLMigrationStack +func (c *WAF) CreateWebACLMigrationStackRequest(input *CreateWebACLMigrationStackInput) (req *request.Request, output *CreateWebACLMigrationStackOutput) { + op := &request.Operation{ + Name: opCreateWebACLMigrationStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateWebACLMigrationStackInput{} + } + + output = &CreateWebACLMigrationStackOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWebACLMigrationStack API operation for AWS WAF. +// +// Creates an AWS CloudFormation WAFV2 template for the specified web ACL in +// the specified Amazon S3 bucket. Then, in CloudFormation, you create a stack +// from the template, to create the web ACL and its resources in AWS WAFV2. +// Use this to migrate your AWS WAF Classic web ACL to the latest version of +// AWS WAF. +// +// This is part of a larger migration procedure for web ACLs from AWS WAF Classic +// to the latest version of AWS WAF. For the full procedure, including caveats +// and manual steps to complete the migration and switch over to the new web +// ACL, see Migrating your AWS WAF Classic resources to AWS WAF (https://docs.aws.amazon.com/waf/latest/developerguide/waf-migrating-from-classic.html) +// in the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF's +// API operation CreateWebACLMigrationStack for usage and error information. +// +// Returned Error Types: +// * InternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * InvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * InvalidOperationException +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * NonexistentItemException +// The operation failed because the referenced object doesn't exist. +// +// * WAFEntityMigrationException +// The operation failed due to a problem with the migration. The failure cause +// is provided in the exception, in the MigrationErrorType: +// +// * ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the +// IgnoreUnsupportedType is not set to true. +// +// * ENTITY_NOT_FOUND - The web ACL doesn't exist. +// +// * S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject +// action to the specified Amazon S3 bucket. +// +// * S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to +// perform the PutObject action in the bucket. +// +// * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. +// +// * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as +// the web ACL. +// +// * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 +// bucket for another reason. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24/CreateWebACLMigrationStack +func (c *WAF) CreateWebACLMigrationStack(input *CreateWebACLMigrationStackInput) (*CreateWebACLMigrationStackOutput, error) { + req, out := c.CreateWebACLMigrationStackRequest(input) + return out, req.Send() +} + +// CreateWebACLMigrationStackWithContext is the same as CreateWebACLMigrationStack with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWebACLMigrationStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAF) CreateWebACLMigrationStackWithContext(ctx aws.Context, input *CreateWebACLMigrationStackInput, opts ...request.Option) (*CreateWebACLMigrationStackOutput, error) { + req, out := c.CreateWebACLMigrationStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateXssMatchSet = "CreateXssMatchSet" // CreateXssMatchSetRequest generates a "aws/request.Request" representing the @@ -1640,6 +1912,16 @@ func (c *WAF) CreateXssMatchSetRequest(input *CreateXssMatchSetInput) (req *requ // CreateXssMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates an XssMatchSet, which you use to allow, block, or count requests // that contain cross-site scripting attacks in the specified part of web requests. // AWS WAF searches for character sequences that are likely to be malicious @@ -1785,6 +2067,16 @@ func (c *WAF) DeleteByteMatchSetRequest(input *DeleteByteMatchSetInput) (req *re // DeleteByteMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's // still used in any Rules or if it still includes any ByteMatchTuple objects // (any filters). @@ -1912,6 +2204,16 @@ func (c *WAF) DeleteGeoMatchSetRequest(input *DeleteGeoMatchSetInput) (req *requ // DeleteGeoMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's // still used in any Rules or if it still includes any countries. // @@ -2038,6 +2340,16 @@ func (c *WAF) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Request, // DeleteIPSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes an IPSet. You can't delete an IPSet if it's still used // in any Rules or if it still includes any IP addresses. // @@ -2165,6 +2477,16 @@ func (c *WAF) DeleteLoggingConfigurationRequest(input *DeleteLoggingConfiguratio // DeleteLoggingConfiguration API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes the LoggingConfiguration from the specified web ACL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2253,6 +2575,16 @@ func (c *WAF) DeletePermissionPolicyRequest(input *DeletePermissionPolicyInput) // DeletePermissionPolicy API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes an IAM policy from the specified RuleGroup. // // The user making the request must be the owner of the RuleGroup. @@ -2342,6 +2674,16 @@ func (c *WAF) DeleteRateBasedRuleRequest(input *DeleteRateBasedRuleInput) (req * // DeleteRateBasedRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RateBasedRule. You can't delete a rule if it's still // used in any WebACL objects or if it still includes any predicates, such as // ByteMatchSet objects. @@ -2474,6 +2816,16 @@ func (c *WAF) DeleteRegexMatchSetRequest(input *DeleteRegexMatchSetInput) (req * // DeleteRegexMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if // it's still used in any Rules or if it still includes any RegexMatchTuples // objects (any filters). @@ -2601,6 +2953,16 @@ func (c *WAF) DeleteRegexPatternSetRequest(input *DeleteRegexPatternSetInput) (r // DeleteRegexPatternSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet // if it's still used in any RegexMatchSet or if the RegexPatternSet is not // empty. @@ -2716,6 +3078,16 @@ func (c *WAF) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, o // DeleteRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a Rule. You can't delete a Rule if it's still used in // any WebACL objects or if it still includes any predicates, such as ByteMatchSet // objects. @@ -2846,6 +3218,16 @@ func (c *WAF) DeleteRuleGroupRequest(input *DeleteRuleGroupInput) (req *request. // DeleteRuleGroup API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still // used in any WebACL objects or if it still includes any rules. // @@ -2989,6 +3371,16 @@ func (c *WAF) DeleteSizeConstraintSetRequest(input *DeleteSizeConstraintSetInput // DeleteSizeConstraintSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet // if it's still used in any Rules or if it still includes any SizeConstraint // objects (any filters). @@ -3116,6 +3508,16 @@ func (c *WAF) DeleteSqlInjectionMatchSetRequest(input *DeleteSqlInjectionMatchSe // DeleteSqlInjectionMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet // if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple // objects. @@ -3244,6 +3646,16 @@ func (c *WAF) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Reques // DeleteWebACL API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a WebACL. You can't delete a WebACL if it still contains // any Rules. // @@ -3371,6 +3783,16 @@ func (c *WAF) DeleteXssMatchSetRequest(input *DeleteXssMatchSetInput) (req *requ // DeleteXssMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's // still used in any Rules or if it still contains any XssMatchTuple objects. // @@ -3498,6 +3920,16 @@ func (c *WAF) GetByteMatchSetRequest(input *GetByteMatchSetInput) (req *request. // GetByteMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the ByteMatchSet specified by ByteMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3585,6 +4017,16 @@ func (c *WAF) GetChangeTokenRequest(input *GetChangeTokenInput) (req *request.Re // GetChangeToken API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // When you want to create, update, or delete AWS WAF objects, get a change // token and include the change token in the create, update, or delete request. // Change tokens ensure that your application doesn't submit conflicting requests @@ -3679,6 +4121,16 @@ func (c *WAF) GetChangeTokenStatusRequest(input *GetChangeTokenStatusInput) (req // GetChangeTokenStatus API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the status of a ChangeToken that you got by calling GetChangeToken. // ChangeTokenStatus is one of the following values: // @@ -3772,6 +4224,16 @@ func (c *WAF) GetGeoMatchSetRequest(input *GetGeoMatchSetInput) (req *request.Re // GetGeoMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the GeoMatchSet that is specified by GeoMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3859,6 +4321,16 @@ func (c *WAF) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, outpu // GetIPSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the IPSet that is specified by IPSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3946,6 +4418,16 @@ func (c *WAF) GetLoggingConfigurationRequest(input *GetLoggingConfigurationInput // GetLoggingConfiguration API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the LoggingConfiguration for the specified web ACL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4029,6 +4511,16 @@ func (c *WAF) GetPermissionPolicyRequest(input *GetPermissionPolicyInput) (req * // GetPermissionPolicy API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the IAM policy attached to the RuleGroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4112,6 +4604,16 @@ func (c *WAF) GetRateBasedRuleRequest(input *GetRateBasedRuleInput) (req *reques // GetRateBasedRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RateBasedRule that is specified by the RuleId that you included // in the GetRateBasedRule request. // @@ -4200,6 +4702,16 @@ func (c *WAF) GetRateBasedRuleManagedKeysRequest(input *GetRateBasedRuleManagedK // GetRateBasedRuleManagedKeys API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of IP addresses currently being blocked by the RateBasedRule // that is specified by the RuleId. The maximum number of managed keys that // will be blocked is 10,000. If more than 10,000 addresses exceed the rate @@ -4319,6 +4831,16 @@ func (c *WAF) GetRegexMatchSetRequest(input *GetRegexMatchSetInput) (req *reques // GetRegexMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RegexMatchSet specified by RegexMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4406,6 +4928,16 @@ func (c *WAF) GetRegexPatternSetRequest(input *GetRegexPatternSetInput) (req *re // GetRegexPatternSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RegexPatternSet specified by RegexPatternSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4493,6 +5025,16 @@ func (c *WAF) GetRuleRequest(input *GetRuleInput) (req *request.Request, output // GetRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the Rule that is specified by the RuleId that you included in the // GetRule request. // @@ -4581,6 +5123,16 @@ func (c *WAF) GetRuleGroupRequest(input *GetRuleGroupInput) (req *request.Reques // GetRuleGroup API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RuleGroup that is specified by the RuleGroupId that you included // in the GetRuleGroup request. // @@ -4667,6 +5219,16 @@ func (c *WAF) GetSampledRequestsRequest(input *GetSampledRequestsInput) (req *re // GetSampledRequests API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Gets detailed information about a specified number of requests--a sample--that // AWS WAF randomly selects from among the first 5,000 requests that your AWS // resource received during a time range that you choose. You can specify a @@ -4760,6 +5322,16 @@ func (c *WAF) GetSizeConstraintSetRequest(input *GetSizeConstraintSetInput) (req // GetSizeConstraintSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the SizeConstraintSet specified by SizeConstraintSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4847,6 +5419,16 @@ func (c *WAF) GetSqlInjectionMatchSetRequest(input *GetSqlInjectionMatchSetInput // GetSqlInjectionMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4934,6 +5516,16 @@ func (c *WAF) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, out // GetWebACL API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the WebACL that is specified by WebACLId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5021,6 +5613,16 @@ func (c *WAF) GetXssMatchSetRequest(input *GetXssMatchSetInput) (req *request.Re // GetXssMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the XssMatchSet that is specified by XssMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5108,6 +5710,16 @@ func (c *WAF) ListActivatedRulesInRuleGroupRequest(input *ListActivatedRulesInRu // ListActivatedRulesInRuleGroup API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of ActivatedRule objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5220,6 +5832,16 @@ func (c *WAF) ListByteMatchSetsRequest(input *ListByteMatchSetsInput) (req *requ // ListByteMatchSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of ByteMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5304,6 +5926,16 @@ func (c *WAF) ListGeoMatchSetsRequest(input *ListGeoMatchSetsInput) (req *reques // ListGeoMatchSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of GeoMatchSetSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5388,6 +6020,16 @@ func (c *WAF) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, o // ListIPSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of IPSetSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5472,6 +6114,16 @@ func (c *WAF) ListLoggingConfigurationsRequest(input *ListLoggingConfigurationsI // ListLoggingConfigurations API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of LoggingConfiguration objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5584,6 +6236,16 @@ func (c *WAF) ListRateBasedRulesRequest(input *ListRateBasedRulesInput) (req *re // ListRateBasedRules API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5668,6 +6330,16 @@ func (c *WAF) ListRegexMatchSetsRequest(input *ListRegexMatchSetsInput) (req *re // ListRegexMatchSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RegexMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5752,6 +6424,16 @@ func (c *WAF) ListRegexPatternSetsRequest(input *ListRegexPatternSetsInput) (req // ListRegexPatternSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RegexPatternSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5836,6 +6518,16 @@ func (c *WAF) ListRuleGroupsRequest(input *ListRuleGroupsInput) (req *request.Re // ListRuleGroups API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleGroup objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5916,6 +6608,16 @@ func (c *WAF) ListRulesRequest(input *ListRulesInput) (req *request.Request, out // ListRules API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6000,6 +6702,16 @@ func (c *WAF) ListSizeConstraintSetsRequest(input *ListSizeConstraintSetsInput) // ListSizeConstraintSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of SizeConstraintSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6084,6 +6796,16 @@ func (c *WAF) ListSqlInjectionMatchSetsRequest(input *ListSqlInjectionMatchSetsI // ListSqlInjectionMatchSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of SqlInjectionMatchSet objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6168,6 +6890,16 @@ func (c *WAF) ListSubscribedRuleGroupsRequest(input *ListSubscribedRuleGroupsInp // ListSubscribedRuleGroups API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleGroup objects that you are subscribed to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6251,6 +6983,26 @@ func (c *WAF) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req * // ListTagsForResource API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Retrieves the tags associated with the specified AWS resource. Tags are key:value +// pairs that you can use to categorize and manage your resources, for purposes +// like billing. For example, you might set the tag key to "customer" and the +// value to the customer name or ID. You can specify one or more tags to add +// to each AWS resource, up to 50 tags for a resource. +// +// Tagging is only available through the API, SDKs, and CLI. You can't manage +// or view tags through the AWS WAF Classic console. You can tag the AWS resources +// that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6367,6 +7119,16 @@ func (c *WAF) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Request, // ListWebACLs API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of WebACLSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6451,6 +7213,16 @@ func (c *WAF) ListXssMatchSetsRequest(input *ListXssMatchSetsInput) (req *reques // ListXssMatchSets API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of XssMatchSet objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6535,6 +7307,16 @@ func (c *WAF) PutLoggingConfigurationRequest(input *PutLoggingConfigurationInput // PutLoggingConfiguration API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Associates a LoggingConfiguration with a specified web ACL. // // You can access information about all traffic that AWS WAF inspects using @@ -6652,8 +7434,18 @@ func (c *WAF) PutPermissionPolicyRequest(input *PutPermissionPolicyInput) (req * // PutPermissionPolicy API operation for AWS WAF. // -// Attaches a IAM policy to the specified resource. The only supported use for -// this action is to share a RuleGroup across accounts. +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Attaches an IAM policy to the specified resource. The only supported use +// for this action is to share a RuleGroup across accounts. // // The PutPermissionPolicy is subject to the following restrictions: // @@ -6790,6 +7582,27 @@ func (c *WAF) TagResourceRequest(input *TagResourceInput) (req *request.Request, // TagResource API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Associates tags with the specified AWS resource. Tags are key:value pairs +// that you can use to categorize and manage your resources, for purposes like +// billing. For example, you might set the tag key to "customer" and the value +// to the customer name or ID. You can specify one or more tags to add to each +// AWS resource, up to 50 tags for a resource. +// +// Tagging is only available through the API, SDKs, and CLI. You can't manage +// or view tags through the AWS WAF Classic console. You can use this action +// to tag the AWS resources that you manage through AWS WAF Classic: web ACLs, +// rule groups, and rules. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6913,6 +7726,16 @@ func (c *WAF) UntagResourceRequest(input *UntagResourceInput) (req *request.Requ // UntagResource API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7029,6 +7852,16 @@ func (c *WAF) UpdateByteMatchSetRequest(input *UpdateByteMatchSetInput) (req *re // UpdateByteMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For // each ByteMatchTuple object, you specify the following values: // @@ -7225,6 +8058,16 @@ func (c *WAF) UpdateGeoMatchSetRequest(input *UpdateGeoMatchSetInput) (req *requ // UpdateGeoMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each // GeoMatchConstraint object, you specify the following values: // @@ -7420,6 +8263,16 @@ func (c *WAF) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Request, // UpdateIPSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor // object, you specify the following values: // @@ -7639,6 +8492,16 @@ func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req * // UpdateRateBasedRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes Predicate objects in a rule and updates the RateLimit // in the rule. // @@ -7655,13 +8518,13 @@ func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req * // // * A ByteMatchSet that matches BadBot in the User-Agent header // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // You then add the RateBasedRule to a WebACL and specify that you want to block // requests that satisfy the rule. For a request to be blocked, it must come // from the IP address 192.0.2.44 and the User-Agent header in the request must // contain the value BadBot. Further, requests that match these two conditions -// much be received at a rate of more than 15,000 every five minutes. If the +// much be received at a rate of more than 1,000 every five minutes. If the // rate drops below this limit, AWS WAF no longer blocks the requests. // // As a second example, suppose you want to limit requests to a particular page @@ -7673,7 +8536,7 @@ func (c *WAF) UpdateRateBasedRuleRequest(input *UpdateRateBasedRuleInput) (req * // // * A TargetString of login // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // By adding this RateBasedRule to a WebACL, you could limit requests to your // login page without affecting the rest of your site. @@ -7844,6 +8707,16 @@ func (c *WAF) UpdateRegexMatchSetRequest(input *UpdateRegexMatchSetInput) (req * // UpdateRegexMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet. // For each RegexMatchSetUpdate object, you specify the following values: // @@ -8011,6 +8884,16 @@ func (c *WAF) UpdateRegexPatternSetRequest(input *UpdateRegexPatternSetInput) (r // UpdateRegexPatternSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes RegexPatternString objects in a RegexPatternSet. For each // RegexPatternString object, you specify the following values: // @@ -8175,6 +9058,16 @@ func (c *WAF) UpdateRuleRequest(input *UpdateRuleInput) (req *request.Request, o // UpdateRule API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies // a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests // that you want to allow, block, or count. If you add more than one predicate @@ -8376,6 +9269,16 @@ func (c *WAF) UpdateRuleGroupRequest(input *UpdateRuleGroupInput) (req *request. // UpdateRuleGroup API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes ActivatedRule objects in a RuleGroup. // // You can only insert REGULAR rules into a rule group. @@ -8554,6 +9457,16 @@ func (c *WAF) UpdateSizeConstraintSetRequest(input *UpdateSizeConstraintSetInput // UpdateSizeConstraintSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. // For each SizeConstraint object, you specify the following values: // @@ -8761,6 +9674,16 @@ func (c *WAF) UpdateSqlInjectionMatchSetRequest(input *UpdateSqlInjectionMatchSe // UpdateSqlInjectionMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. // For each SqlInjectionMatchTuple object, you specify the following values: // @@ -8953,6 +9876,16 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // UpdateWebACL API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies // web requests that you want to allow, block, or count. When you update a WebACL, // you specify the following values: @@ -8995,12 +9928,12 @@ func (c *WAF) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Reques // with a CloudFront distribution. // // The ActivatedRule can be a rule group. If you specify a rule group as your -// ActivatedRule, you can exclude specific rules from that rule group. +// ActivatedRule , you can exclude specific rules from that rule group. // // If you already have a rule group associated with a web ACL and want to submit // an UpdateWebACL request to exclude certain rules from that rule group, you // must first remove the rule group from the web ACL, the re-insert it again, -// specifying the excluded rules. For details, see ActivatedRule$ExcludedRules. +// specifying the excluded rules. For details, see ActivatedRule$ExcludedRules . // // Be aware that if you try to add a RATE_BASED rule to a web ACL without setting // the rule type when first creating the rule, the UpdateWebACL request will @@ -9179,6 +10112,16 @@ func (c *WAF) UpdateXssMatchSetRequest(input *UpdateXssMatchSetInput) (req *requ // UpdateXssMatchSet API operation for AWS WAF. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For // each XssMatchTuple object, you specify the following values: // @@ -9327,6 +10270,16 @@ func (c *WAF) UpdateXssMatchSetWithContext(ctx aws.Context, input *UpdateXssMatc return out, req.Send() } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The ActivatedRule object in an UpdateWebACL request specifies a Rule that // you want to insert or delete, the priority of the Rule in the WebACL, and // the action that you want AWS WAF to take when a web request matches the Rule @@ -9517,8 +10470,8 @@ func (s *ActivatedRule) SetType(v string) *ActivatedRule { } type BadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9535,17 +10488,17 @@ func (s BadRequestException) GoString() string { func newErrorBadRequestException(v protocol.ResponseMetadata) error { return &BadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s BadRequestException) Code() string { +func (s *BadRequestException) Code() string { return "WAFBadRequestException" } // Message returns the exception's message. -func (s BadRequestException) Message() string { +func (s *BadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9553,24 +10506,34 @@ func (s BadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s BadRequestException) OrigErr() error { +func (s *BadRequestException) OrigErr() error { return nil } -func (s BadRequestException) Error() string { +func (s *BadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s BadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *BadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s BadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *BadRequestException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // In a GetByteMatchSet request, ByteMatchSet is a complex type that contains // the ByteMatchSetId and Name of a ByteMatchSet, and the values that you specified // when you updated the ByteMatchSet. @@ -9633,6 +10596,16 @@ func (s *ByteMatchSet) SetName(v string) *ByteMatchSet { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returned by ListByteMatchSets. Each ByteMatchSetSummary object includes the // Name and ByteMatchSetId for one ByteMatchSet. type ByteMatchSetSummary struct { @@ -9676,6 +10649,16 @@ func (s *ByteMatchSetSummary) SetName(v string) *ByteMatchSetSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // In an UpdateByteMatchSet request, ByteMatchSetUpdate specifies whether to // insert or delete a ByteMatchTuple and includes the settings for the ByteMatchTuple. type ByteMatchSetUpdate struct { @@ -9738,6 +10721,16 @@ func (s *ByteMatchSetUpdate) SetByteMatchTuple(v *ByteMatchTuple) *ByteMatchSetU return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The bytes (typically a string that corresponds with ASCII characters) that // you want AWS WAF to search for in web requests, the location in requests // that you want AWS WAF to search, and other settings. @@ -9860,7 +10853,7 @@ type ByteMatchTuple struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on TargetString before inspecting a request + // AWS WAF performs the transformation on FieldToMatch before inspecting it // for a match. // // You can only specify a single type of TextTransformation. @@ -10293,7 +11286,7 @@ type CreateRateBasedRuleInput struct { // change the name of the metric after you create the RateBasedRule. // // MetricName is a required field - MetricName *string `type:"string" required:"true"` + MetricName *string `min:"1" type:"string" required:"true"` // A friendly name or description of the RateBasedRule. You can't change the // name of a RateBasedRule after you create it. @@ -10343,6 +11336,9 @@ func (s *CreateRateBasedRuleInput) Validate() error { if s.MetricName == nil { invalidParams.Add(request.NewErrParamRequired("MetricName")) } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -10649,7 +11645,7 @@ type CreateRuleGroupInput struct { // of the metric after you create the RuleGroup. // // MetricName is a required field - MetricName *string `type:"string" required:"true"` + MetricName *string `min:"1" type:"string" required:"true"` // A friendly name or description of the RuleGroup. You can't change Name after // you create a RuleGroup. @@ -10682,6 +11678,9 @@ func (s *CreateRuleGroupInput) Validate() error { if s.MetricName == nil { invalidParams.Add(request.NewErrParamRequired("MetricName")) } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -10781,7 +11780,7 @@ type CreateRuleInput struct { // of the metric after you create the Rule. // // MetricName is a required field - MetricName *string `type:"string" required:"true"` + MetricName *string `min:"1" type:"string" required:"true"` // A friendly name or description of the Rule. You can't change the name of // a Rule after you create it. @@ -10814,6 +11813,9 @@ func (s *CreateRuleInput) Validate() error { if s.MetricName == nil { invalidParams.Add(request.NewErrParamRequired("MetricName")) } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -11108,7 +12110,7 @@ type CreateWebACLInput struct { // after you create the WebACL. // // MetricName is a required field - MetricName *string `type:"string" required:"true"` + MetricName *string `min:"1" type:"string" required:"true"` // A friendly name or description of the WebACL. You can't change Name after // you create the WebACL. @@ -11144,6 +12146,9 @@ func (s *CreateWebACLInput) Validate() error { if s.MetricName == nil { invalidParams.Add(request.NewErrParamRequired("MetricName")) } + if s.MetricName != nil && len(*s.MetricName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MetricName", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -11205,6 +12210,116 @@ func (s *CreateWebACLInput) SetTags(v []*Tag) *CreateWebACLInput { return s } +type CreateWebACLMigrationStackInput struct { + _ struct{} `type:"structure"` + + // Indicates whether to exclude entities that can't be migrated or to stop the + // migration. Set this to true to ignore unsupported entities in the web ACL + // during the migration. Otherwise, if AWS WAF encounters unsupported entities, + // it stops the process and throws an exception. + // + // IgnoreUnsupportedType is a required field + IgnoreUnsupportedType *bool `type:"boolean" required:"true"` + + // The name of the Amazon S3 bucket to store the CloudFormation template in. + // The S3 bucket must be configured as follows for the migration: + // + // * The bucket name must start with aws-waf-migration-. For example, aws-waf-migration-my-web-acl. + // + // * The bucket must be in the Region where you are deploying the template. + // For example, for a web ACL in us-west-2, you must use an Amazon S3 bucket + // in us-west-2 and you must deploy the template stack to us-west-2. + // + // * The bucket policies must permit the migration process to write data. + // For listings of the bucket policies, see the Examples section. + // + // S3BucketName is a required field + S3BucketName *string `min:"3" type:"string" required:"true"` + + // The UUID of the WAF Classic web ACL that you want to migrate to WAF v2. + // + // WebACLId is a required field + WebACLId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWebACLMigrationStackInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLMigrationStackInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateWebACLMigrationStackInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateWebACLMigrationStackInput"} + if s.IgnoreUnsupportedType == nil { + invalidParams.Add(request.NewErrParamRequired("IgnoreUnsupportedType")) + } + if s.S3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketName")) + } + if s.S3BucketName != nil && len(*s.S3BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("S3BucketName", 3)) + } + if s.WebACLId == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLId")) + } + if s.WebACLId != nil && len(*s.WebACLId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIgnoreUnsupportedType sets the IgnoreUnsupportedType field's value. +func (s *CreateWebACLMigrationStackInput) SetIgnoreUnsupportedType(v bool) *CreateWebACLMigrationStackInput { + s.IgnoreUnsupportedType = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *CreateWebACLMigrationStackInput) SetS3BucketName(v string) *CreateWebACLMigrationStackInput { + s.S3BucketName = &v + return s +} + +// SetWebACLId sets the WebACLId field's value. +func (s *CreateWebACLMigrationStackInput) SetWebACLId(v string) *CreateWebACLMigrationStackInput { + s.WebACLId = &v + return s +} + +type CreateWebACLMigrationStackOutput struct { + _ struct{} `type:"structure"` + + // The URL of the template created in Amazon S3. + // + // S3ObjectUrl is a required field + S3ObjectUrl *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateWebACLMigrationStackOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateWebACLMigrationStackOutput) GoString() string { + return s.String() +} + +// SetS3ObjectUrl sets the S3ObjectUrl field's value. +func (s *CreateWebACLMigrationStackOutput) SetS3ObjectUrl(v string) *CreateWebACLMigrationStackOutput { + s.S3ObjectUrl = &v + return s +} + type CreateWebACLOutput struct { _ struct{} `type:"structure"` @@ -12462,8 +13577,8 @@ func (s *DeleteXssMatchSetOutput) SetChangeToken(v string) *DeleteXssMatchSetOut // The name specified is invalid. type DisallowedNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12480,17 +13595,17 @@ func (s DisallowedNameException) GoString() string { func newErrorDisallowedNameException(v protocol.ResponseMetadata) error { return &DisallowedNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DisallowedNameException) Code() string { +func (s *DisallowedNameException) Code() string { return "WAFDisallowedNameException" } // Message returns the exception's message. -func (s DisallowedNameException) Message() string { +func (s *DisallowedNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12498,24 +13613,34 @@ func (s DisallowedNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DisallowedNameException) OrigErr() error { +func (s *DisallowedNameException) OrigErr() error { return nil } -func (s DisallowedNameException) Error() string { +func (s *DisallowedNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DisallowedNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DisallowedNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DisallowedNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *DisallowedNameException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The rule to exclude from a rule group. This is applicable only when the ActivatedRule // refers to a RuleGroup. The rule must belong to the RuleGroup that is specified // by the ActivatedRule. @@ -12560,6 +13685,16 @@ func (s *ExcludedRule) SetRuleId(v string) *ExcludedRule { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies where in a web request to look for TargetString. type FieldToMatch struct { _ struct{} `type:"structure"` @@ -12573,7 +13708,7 @@ type FieldToMatch struct { // parameter name is not case sensitive. // // If the value of Type is any other value, omit Data. - Data *string `type:"string"` + Data *string `min:"1" type:"string"` // The part of the web request that you want AWS WAF to search for a specified // string. Parts of a request that you can search include the following: @@ -12625,6 +13760,9 @@ func (s FieldToMatch) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *FieldToMatch) Validate() error { invalidParams := request.ErrInvalidParams{Context: "FieldToMatch"} + if s.Data != nil && len(*s.Data) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Data", 1)) + } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -12647,6 +13785,16 @@ func (s *FieldToMatch) SetType(v string) *FieldToMatch { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The country from which web requests originate that you want AWS WAF to search // for. type GeoMatchConstraint struct { @@ -12702,6 +13850,16 @@ func (s *GeoMatchConstraint) SetValue(v string) *GeoMatchConstraint { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains one or more countries that AWS WAF will search for. type GeoMatchSet struct { _ struct{} `type:"structure"` @@ -12755,6 +13913,16 @@ func (s *GeoMatchSet) SetName(v string) *GeoMatchSet { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains the identifier and the name of the GeoMatchSet. type GeoMatchSetSummary struct { _ struct{} `type:"structure"` @@ -12794,6 +13962,16 @@ func (s *GeoMatchSetSummary) SetName(v string) *GeoMatchSetSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the type of update to perform to an GeoMatchSet with UpdateGeoMatchSet. type GeoMatchSetUpdate struct { _ struct{} `type:"structure"` @@ -13742,9 +14920,10 @@ type GetSampledRequestsInput struct { RuleId *string `min:"1" type:"string" required:"true"` // The start date and time and the end date and time of the range for which - // you want GetSampledRequests to return a sample of requests. Specify the date - // and time in the following format: "2016-09-27T14:50Z". You can specify any - // time range in the previous three hours. + // you want GetSampledRequests to return a sample of requests. You must specify + // the times in Coordinated Universal Time (UTC) format. UTC format includes + // the special designator, Z. For example, "2016-09-27T14:50Z". You can specify + // any time range in the previous three hours. // // TimeWindow is a required field TimeWindow *TimeWindow `type:"structure" required:"true"` @@ -13841,7 +15020,8 @@ type GetSampledRequestsOutput struct { // Usually, TimeWindow is the time range that you specified in the GetSampledRequests // request. However, if your AWS resource received more than 5,000 requests // during the time range that you specified in the request, GetSampledRequests - // returns the time range for the first 5,000 requests. + // returns the time range for the first 5,000 requests. Times are in Coordinated + // Universal Time (UTC) format. TimeWindow *TimeWindow `type:"structure"` } @@ -14175,6 +15355,16 @@ func (s *GetXssMatchSetOutput) SetXssMatchSet(v *XssMatchSet) *GetXssMatchSetOut return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The response from a GetSampledRequests request includes an HTTPHeader complex // type that appears as Headers in the response syntax. HTTPHeader contains // the names and values of all of the headers that appear in one of the web @@ -14211,6 +15401,16 @@ func (s *HTTPHeader) SetValue(v string) *HTTPHeader { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The response from a GetSampledRequests request includes an HTTPRequest complex // type that appears as Request in the response syntax. HTTPRequest contains // information about one of the web requests that were returned by GetSampledRequests. @@ -14294,6 +15494,16 @@ func (s *HTTPRequest) SetURI(v string) *HTTPRequest { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains one or more IP addresses or blocks of IP addresses specified in // Classless Inter-Domain Routing (CIDR) notation. AWS WAF supports IPv4 address // ranges: /8 and any range between /16 through /32. AWS WAF supports IPv6 address @@ -14359,6 +15569,16 @@ func (s *IPSet) SetName(v string) *IPSet { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the IP address type (IPV4 or IPV6) and the IP address range (in // CIDR format) that web requests originate from. type IPSetDescriptor struct { @@ -14390,7 +15610,7 @@ type IPSetDescriptor struct { // specify 1111:0000:0000:0000:0000:0000:0000:0000/64. // // Value is a required field - Value *string `type:"string" required:"true"` + Value *string `min:"1" type:"string" required:"true"` } // String returns the string representation @@ -14412,6 +15632,9 @@ func (s *IPSetDescriptor) Validate() error { if s.Value == nil { invalidParams.Add(request.NewErrParamRequired("Value")) } + if s.Value != nil && len(*s.Value) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Value", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -14431,6 +15654,16 @@ func (s *IPSetDescriptor) SetValue(v string) *IPSetDescriptor { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains the identifier and the name of the IPSet. type IPSetSummary struct { _ struct{} `type:"structure"` @@ -14470,6 +15703,16 @@ func (s *IPSetSummary) SetName(v string) *IPSetSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the type of update to perform to an IPSet with UpdateIPSet. type IPSetUpdate struct { _ struct{} `type:"structure"` @@ -14532,8 +15775,8 @@ func (s *IPSetUpdate) SetIPSetDescriptor(v *IPSetDescriptor) *IPSetUpdate { // The operation failed because of a system problem, even though the request // was valid. Retry your request. type InternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14550,17 +15793,17 @@ func (s InternalErrorException) GoString() string { func newErrorInternalErrorException(v protocol.ResponseMetadata) error { return &InternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalErrorException) Code() string { +func (s *InternalErrorException) Code() string { return "WAFInternalErrorException" } // Message returns the exception's message. -func (s InternalErrorException) Message() string { +func (s *InternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14568,29 +15811,29 @@ func (s InternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalErrorException) OrigErr() error { +func (s *InternalErrorException) OrigErr() error { return nil } -func (s InternalErrorException) Error() string { +func (s *InternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. type InvalidAccountException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14607,17 +15850,17 @@ func (s InvalidAccountException) GoString() string { func newErrorInvalidAccountException(v protocol.ResponseMetadata) error { return &InvalidAccountException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidAccountException) Code() string { +func (s *InvalidAccountException) Code() string { return "WAFInvalidAccountException" } // Message returns the exception's message. -func (s InvalidAccountException) Message() string { +func (s *InvalidAccountException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14625,22 +15868,22 @@ func (s InvalidAccountException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidAccountException) OrigErr() error { +func (s *InvalidAccountException) OrigErr() error { return nil } -func (s InvalidAccountException) Error() string { +func (s *InvalidAccountException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidAccountException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidAccountException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidAccountException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidAccountException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because there was nothing to do. For example: @@ -14660,8 +15903,8 @@ func (s InvalidAccountException) RequestID() string { // * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple // already exists in the specified WebACL. type InvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14678,17 +15921,17 @@ func (s InvalidOperationException) GoString() string { func newErrorInvalidOperationException(v protocol.ResponseMetadata) error { return &InvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidOperationException) Code() string { +func (s *InvalidOperationException) Code() string { return "WAFInvalidOperationException" } // Message returns the exception's message. -func (s InvalidOperationException) Message() string { +func (s *InvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14696,22 +15939,22 @@ func (s InvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidOperationException) OrigErr() error { +func (s *InvalidOperationException) OrigErr() error { return nil } -func (s InvalidOperationException) Error() string { +func (s *InvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because AWS WAF didn't recognize a parameter in the @@ -14742,8 +15985,8 @@ func (s InvalidOperationException) RequestID() string { // * Your request references an ARN that is malformed, or corresponds to // a resource with which a web ACL cannot be associated. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Field *string `locationName:"field" type:"string" enum:"ParameterExceptionField"` @@ -14766,17 +16009,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "WAFInvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14784,22 +16027,22 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because the specified policy is not in the proper format. @@ -14825,8 +16068,8 @@ func (s InvalidParameterException) RequestID() string { // // * Your policy must be composed using IAM Policy version 2012-10-17. type InvalidPermissionPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14843,17 +16086,17 @@ func (s InvalidPermissionPolicyException) GoString() string { func newErrorInvalidPermissionPolicyException(v protocol.ResponseMetadata) error { return &InvalidPermissionPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPermissionPolicyException) Code() string { +func (s *InvalidPermissionPolicyException) Code() string { return "WAFInvalidPermissionPolicyException" } // Message returns the exception's message. -func (s InvalidPermissionPolicyException) Message() string { +func (s *InvalidPermissionPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14861,28 +16104,28 @@ func (s InvalidPermissionPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPermissionPolicyException) OrigErr() error { +func (s *InvalidPermissionPolicyException) OrigErr() error { return nil } -func (s InvalidPermissionPolicyException) Error() string { +func (s *InvalidPermissionPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPermissionPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPermissionPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPermissionPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPermissionPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // The regular expression (regex) you specified in RegexPatternString is invalid. type InvalidRegexPatternException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14899,17 +16142,17 @@ func (s InvalidRegexPatternException) GoString() string { func newErrorInvalidRegexPatternException(v protocol.ResponseMetadata) error { return &InvalidRegexPatternException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRegexPatternException) Code() string { +func (s *InvalidRegexPatternException) Code() string { return "WAFInvalidRegexPatternException" } // Message returns the exception's message. -func (s InvalidRegexPatternException) Message() string { +func (s *InvalidRegexPatternException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14917,22 +16160,22 @@ func (s InvalidRegexPatternException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRegexPatternException) OrigErr() error { +func (s *InvalidRegexPatternException) OrigErr() error { return nil } -func (s InvalidRegexPatternException) Error() string { +func (s *InvalidRegexPatternException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRegexPatternException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRegexPatternException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRegexPatternException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRegexPatternException) RequestID() string { + return s.RespMetadata.RequestID } // The operation exceeds a resource limit, for example, the maximum number of @@ -14940,8 +16183,8 @@ func (s InvalidRegexPatternException) RequestID() string { // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. type LimitsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -14958,17 +16201,17 @@ func (s LimitsExceededException) GoString() string { func newErrorLimitsExceededException(v protocol.ResponseMetadata) error { return &LimitsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitsExceededException) Code() string { +func (s *LimitsExceededException) Code() string { return "WAFLimitsExceededException" } // Message returns the exception's message. -func (s LimitsExceededException) Message() string { +func (s *LimitsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -14976,22 +16219,22 @@ func (s LimitsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitsExceededException) OrigErr() error { +func (s *LimitsExceededException) OrigErr() error { return nil } -func (s LimitsExceededException) Error() string { +func (s *LimitsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitsExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListActivatedRulesInRuleGroupInput struct { @@ -16209,6 +17452,25 @@ type ListTagsForResourceOutput struct { NextMarker *string `min:"1" type:"string"` + // + // This is AWS WAF Classic documentation. For more information, see AWS WAF + // Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) + // in the developer guide. + // + // For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS + // WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). + // With the latest version, AWS WAF has a single set of endpoints for regional + // and global use. + // + // Information for a tag associated with an AWS resource. Tags are key:value + // pairs that you can use to categorize and manage your resources, for purposes + // like billing. For example, you might set the tag key to "customer" and the + // value to the customer name or ID. You can specify one or more tags to add + // to each AWS resource, up to 50 tags for a resource. + // + // Tagging is only available through the API, SDKs, and CLI. You can't manage + // or view tags through the AWS WAF Classic console. You can tag the AWS resources + // that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. TagInfoForResource *TagInfoForResource `type:"structure"` } @@ -16412,6 +17674,16 @@ func (s *ListXssMatchSetsOutput) SetXssMatchSets(v []*XssMatchSetSummary) *ListX return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The Amazon Kinesis Data Firehose, RedactedFields information, and the web // ACL Amazon Resource Name (ARN). type LoggingConfiguration struct { @@ -16507,8 +17779,8 @@ func (s *LoggingConfiguration) SetResourceArn(v string) *LoggingConfiguration { // // * You tried to delete an IPSet that references one or more IP addresses. type NonEmptyEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16525,17 +17797,17 @@ func (s NonEmptyEntityException) GoString() string { func newErrorNonEmptyEntityException(v protocol.ResponseMetadata) error { return &NonEmptyEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NonEmptyEntityException) Code() string { +func (s *NonEmptyEntityException) Code() string { return "WAFNonEmptyEntityException" } // Message returns the exception's message. -func (s NonEmptyEntityException) Message() string { +func (s *NonEmptyEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16543,22 +17815,22 @@ func (s NonEmptyEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NonEmptyEntityException) OrigErr() error { +func (s *NonEmptyEntityException) OrigErr() error { return nil } -func (s NonEmptyEntityException) Error() string { +func (s *NonEmptyEntityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NonEmptyEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NonEmptyEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NonEmptyEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *NonEmptyEntityException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to add an object to or delete an object @@ -16576,8 +17848,8 @@ func (s NonEmptyEntityException) RequestID() string { // * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from // a ByteMatchSet that doesn't exist. type NonexistentContainerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16594,17 +17866,17 @@ func (s NonexistentContainerException) GoString() string { func newErrorNonexistentContainerException(v protocol.ResponseMetadata) error { return &NonexistentContainerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NonexistentContainerException) Code() string { +func (s *NonexistentContainerException) Code() string { return "WAFNonexistentContainerException" } // Message returns the exception's message. -func (s NonexistentContainerException) Message() string { +func (s *NonexistentContainerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16612,28 +17884,28 @@ func (s NonexistentContainerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NonexistentContainerException) OrigErr() error { +func (s *NonexistentContainerException) OrigErr() error { return nil } -func (s NonexistentContainerException) Error() string { +func (s *NonexistentContainerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NonexistentContainerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NonexistentContainerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NonexistentContainerException) RequestID() string { - return s.respMetadata.RequestID +func (s *NonexistentContainerException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because the referenced object doesn't exist. type NonexistentItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -16650,17 +17922,17 @@ func (s NonexistentItemException) GoString() string { func newErrorNonexistentItemException(v protocol.ResponseMetadata) error { return &NonexistentItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NonexistentItemException) Code() string { +func (s *NonexistentItemException) Code() string { return "WAFNonexistentItemException" } // Message returns the exception's message. -func (s NonexistentItemException) Message() string { +func (s *NonexistentItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -16668,24 +17940,34 @@ func (s NonexistentItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NonexistentItemException) OrigErr() error { +func (s *NonexistentItemException) OrigErr() error { return nil } -func (s NonexistentItemException) Error() string { +func (s *NonexistentItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NonexistentItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NonexistentItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NonexistentItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *NonexistentItemException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the ByteMatchSet, IPSet, SqlInjectionMatchSet, XssMatchSet, RegexMatchSet, // GeoMatchSet, and SizeConstraintSet objects that you want to add to a Rule // and, for each object, indicates whether you want to negate the settings, @@ -16914,6 +18196,16 @@ func (s PutPermissionPolicyOutput) GoString() string { return s.String() } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A RateBasedRule is identical to a regular Rule, with one addition: a RateBasedRule // counts the number of requests that arrive from a specified IP address every // five minutes. For example, based on recent requests that you've seen from @@ -16924,9 +18216,9 @@ func (s PutPermissionPolicyOutput) GoString() string { // // * They contain the value BadBot in the User-Agent header. // -// In the rule, you also define the rate limit as 15,000. +// In the rule, you also define the rate limit as 1,000. // -// Requests that meet both of these conditions and exceed 15,000 requests every +// Requests that meet both of these conditions and exceed 1,000 requests every // five minutes trigger the rule's action (block or count), which is defined // in the web ACL. type RateBasedRule struct { @@ -16943,7 +18235,7 @@ type RateBasedRule struct { // 128 and minimum length one. It can't contain whitespace or metric names reserved // for AWS WAF, including "All" and "Default_Action." You can't change the name // of the metric after you create the RateBasedRule. - MetricName *string `type:"string"` + MetricName *string `min:"1" type:"string"` // A friendly name or description for a RateBasedRule. You can't change the // name of a RateBasedRule after you create it. @@ -17029,8 +18321,8 @@ func (s *RateBasedRule) SetRuleId(v string) *RateBasedRule { // // * You tried to delete a Rule that is still referenced by a WebACL. type ReferencedItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -17047,17 +18339,17 @@ func (s ReferencedItemException) GoString() string { func newErrorReferencedItemException(v protocol.ResponseMetadata) error { return &ReferencedItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReferencedItemException) Code() string { +func (s *ReferencedItemException) Code() string { return "WAFReferencedItemException" } // Message returns the exception's message. -func (s ReferencedItemException) Message() string { +func (s *ReferencedItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -17065,24 +18357,34 @@ func (s ReferencedItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReferencedItemException) OrigErr() error { +func (s *ReferencedItemException) OrigErr() error { return nil } -func (s ReferencedItemException) Error() string { +func (s *ReferencedItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReferencedItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReferencedItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReferencedItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReferencedItemException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // In a GetRegexMatchSet request, RegexMatchSet is a complex type that contains // the RegexMatchSetId and Name of a RegexMatchSet, and the values that you // specified when you updated the RegexMatchSet. @@ -17149,6 +18451,16 @@ func (s *RegexMatchSet) SetRegexMatchTuples(v []*RegexMatchTuple) *RegexMatchSet return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returned by ListRegexMatchSets. Each RegexMatchSetSummary object includes // the Name and RegexMatchSetId for one RegexMatchSet. type RegexMatchSetSummary struct { @@ -17192,6 +18504,16 @@ func (s *RegexMatchSetSummary) SetRegexMatchSetId(v string) *RegexMatchSetSummar return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // In an UpdateRegexMatchSet request, RegexMatchSetUpdate specifies whether // to insert or delete a RegexMatchTuple and includes the settings for the RegexMatchTuple. type RegexMatchSetUpdate struct { @@ -17255,6 +18577,16 @@ func (s *RegexMatchSetUpdate) SetRegexMatchTuple(v *RegexMatchTuple) *RegexMatch return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The regular expression pattern that you want AWS WAF to search for in web // requests, the location in requests that you want AWS WAF to search, and other // settings. Each RegexMatchTuple object contains: @@ -17418,6 +18750,16 @@ func (s *RegexMatchTuple) SetTextTransformation(v string) *RegexMatchTuple { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The RegexPatternSet specifies the regular expression (regex) pattern that // you want AWS WAF to search for, such as B[a@]dB[o0]t. You can then configure // AWS WAF to reject those requests. @@ -17472,6 +18814,16 @@ func (s *RegexPatternSet) SetRegexPatternStrings(v []*string) *RegexPatternSet { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returned by ListRegexPatternSets. Each RegexPatternSetSummary object includes // the Name and RegexPatternSetId for one RegexPatternSet. type RegexPatternSetSummary struct { @@ -17516,6 +18868,16 @@ func (s *RegexPatternSetSummary) SetRegexPatternSetId(v string) *RegexPatternSet return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // In an UpdateRegexPatternSet request, RegexPatternSetUpdate specifies whether // to insert or delete a RegexPatternString and includes the settings for the // RegexPatternString. @@ -17575,6 +18937,16 @@ func (s *RegexPatternSetUpdate) SetRegexPatternString(v string) *RegexPatternSet return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A combination of ByteMatchSet, IPSet, and/or SqlInjectionMatchSet objects // that identify the web requests that you want to allow, block, or count. For // example, you might create a Rule that includes the following predicates: @@ -17595,7 +18967,7 @@ type Rule struct { // 128 and minimum length one. It can't contain whitespace or metric names reserved // for AWS WAF, including "All" and "Default_Action." You can't change MetricName // after you create the Rule. - MetricName *string `type:"string"` + MetricName *string `min:"1" type:"string"` // The friendly name or description for the Rule. You can't change the name // of a Rule after you create it. @@ -17652,6 +19024,16 @@ func (s *Rule) SetRuleId(v string) *Rule { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A collection of predefined rules that you can add to a web ACL. // // Rule groups are subject to the following limits: @@ -17670,7 +19052,7 @@ type RuleGroup struct { // 128 and minimum length one. It can't contain whitespace or metric names reserved // for AWS WAF, including "All" and "Default_Action." You can't change the name // of the metric after you create the RuleGroup. - MetricName *string `type:"string"` + MetricName *string `min:"1" type:"string"` // The friendly name or description for the RuleGroup. You can't change the // name of a RuleGroup after you create it. @@ -17715,6 +19097,16 @@ func (s *RuleGroup) SetRuleGroupId(v string) *RuleGroup { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains the identifier and the friendly name or description of the RuleGroup. type RuleGroupSummary struct { _ struct{} `type:"structure"` @@ -17758,6 +19150,16 @@ func (s *RuleGroupSummary) SetRuleGroupId(v string) *RuleGroupSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies an ActivatedRule and indicates whether you want to add it to a // RuleGroup or delete it from a RuleGroup. type RuleGroupUpdate struct { @@ -17820,6 +19222,16 @@ func (s *RuleGroupUpdate) SetActivatedRule(v *ActivatedRule) *RuleGroupUpdate { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains the identifier and the friendly name or description of the Rule. type RuleSummary struct { _ struct{} `type:"structure"` @@ -17863,6 +19275,16 @@ func (s *RuleSummary) SetRuleId(v string) *RuleSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies a Predicate (such as an IPSet) and indicates whether you want to // add it to a Rule or delete it from a Rule. type RuleUpdate struct { @@ -17923,6 +19345,16 @@ func (s *RuleUpdate) SetPredicate(v *Predicate) *RuleUpdate { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The response from a GetSampledRequests request includes a SampledHTTPRequests // complex type that appears as SampledRequests in the response syntax. SampledHTTPRequests // contains one SampledHTTPRequest object for each web request that is returned @@ -18006,8 +19438,8 @@ func (s *SampledHTTPRequest) SetWeight(v int64) *SampledHTTPRequest { // exception again, you will have to wait additional time until the role is // unlocked. type ServiceLinkedRoleErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18024,17 +19456,17 @@ func (s ServiceLinkedRoleErrorException) GoString() string { func newErrorServiceLinkedRoleErrorException(v protocol.ResponseMetadata) error { return &ServiceLinkedRoleErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ServiceLinkedRoleErrorException) Code() string { +func (s *ServiceLinkedRoleErrorException) Code() string { return "WAFServiceLinkedRoleErrorException" } // Message returns the exception's message. -func (s ServiceLinkedRoleErrorException) Message() string { +func (s *ServiceLinkedRoleErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18042,24 +19474,34 @@ func (s ServiceLinkedRoleErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ServiceLinkedRoleErrorException) OrigErr() error { +func (s *ServiceLinkedRoleErrorException) OrigErr() error { return nil } -func (s ServiceLinkedRoleErrorException) Error() string { +func (s *ServiceLinkedRoleErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ServiceLinkedRoleErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ServiceLinkedRoleErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ServiceLinkedRoleErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *ServiceLinkedRoleErrorException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies a constraint on the size of a part of the web request. AWS WAF // uses the Size, ComparisonOperator, and FieldToMatch to build an expression // in the form of "Size ComparisonOperator size in bytes of FieldToMatch". If @@ -18109,7 +19551,7 @@ type SizeConstraint struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // AWS WAF performs the transformation on FieldToMatch before inspecting it // for a match. // // You can only specify a single type of TextTransformation. @@ -18249,6 +19691,16 @@ func (s *SizeConstraint) SetTextTransformation(v string) *SizeConstraint { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A complex type that contains SizeConstraint objects, which specify the parts // of web requests that you want AWS WAF to inspect the size of. If a SizeConstraintSet // contains more than one SizeConstraint object, a request only needs to match @@ -18304,6 +19756,16 @@ func (s *SizeConstraintSet) SetSizeConstraints(v []*SizeConstraint) *SizeConstra return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The Id and Name of a SizeConstraintSet. type SizeConstraintSetSummary struct { _ struct{} `type:"structure"` @@ -18347,6 +19809,16 @@ func (s *SizeConstraintSetSummary) SetSizeConstraintSetId(v string) *SizeConstra return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the part of a web request that you want to inspect the size of // and indicates whether you want to add the specification to a SizeConstraintSet // or delete it from a SizeConstraintSet. @@ -18411,6 +19883,16 @@ func (s *SizeConstraintSetUpdate) SetSizeConstraint(v *SizeConstraint) *SizeCons return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A complex type that contains SqlInjectionMatchTuple objects, which specify // the parts of web requests that you want AWS WAF to inspect for snippets of // malicious SQL code and, if you want AWS WAF to inspect a header, the name @@ -18469,6 +19951,16 @@ func (s *SqlInjectionMatchSet) SetSqlInjectionMatchTuples(v []*SqlInjectionMatch return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The Id and Name of a SqlInjectionMatchSet. type SqlInjectionMatchSetSummary struct { _ struct{} `type:"structure"` @@ -18512,6 +20004,16 @@ func (s *SqlInjectionMatchSetSummary) SetSqlInjectionMatchSetId(v string) *SqlIn return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the part of a web request that you want to inspect for snippets // of malicious SQL code and indicates whether you want to add the specification // to a SqlInjectionMatchSet or delete it from a SqlInjectionMatchSet. @@ -18575,6 +20077,16 @@ func (s *SqlInjectionMatchSetUpdate) SetSqlInjectionMatchTuple(v *SqlInjectionMa return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the part of a web request that you want AWS WAF to inspect for // snippets of malicious SQL code and, if you want AWS WAF to inspect a header, // the name of the header. @@ -18588,7 +20100,7 @@ type SqlInjectionMatchTuple struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // AWS WAF performs the transformation on FieldToMatch before inspecting it // for a match. // // You can only specify a single type of TextTransformation. @@ -18709,8 +20221,8 @@ func (s *SqlInjectionMatchTuple) SetTextTransformation(v string) *SqlInjectionMa // The operation failed because you tried to create, update, or delete an object // by using a change token that has already been used. type StaleDataException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18727,17 +20239,17 @@ func (s StaleDataException) GoString() string { func newErrorStaleDataException(v protocol.ResponseMetadata) error { return &StaleDataException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s StaleDataException) Code() string { +func (s *StaleDataException) Code() string { return "WAFStaleDataException" } // Message returns the exception's message. -func (s StaleDataException) Message() string { +func (s *StaleDataException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18745,24 +20257,34 @@ func (s StaleDataException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s StaleDataException) OrigErr() error { +func (s *StaleDataException) OrigErr() error { return nil } -func (s StaleDataException) Error() string { +func (s *StaleDataException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s StaleDataException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *StaleDataException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s StaleDataException) RequestID() string { - return s.respMetadata.RequestID +func (s *StaleDataException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A summary of the rule groups you are subscribed to. type SubscribedRuleGroupSummary struct { _ struct{} `type:"structure"` @@ -18774,7 +20296,7 @@ type SubscribedRuleGroupSummary struct { // of the metric after you create the RuleGroup. // // MetricName is a required field - MetricName *string `type:"string" required:"true"` + MetricName *string `min:"1" type:"string" required:"true"` // A friendly name or description of the RuleGroup. You can't change the name // of a RuleGroup after you create it. @@ -18818,8 +20340,8 @@ func (s *SubscribedRuleGroupSummary) SetRuleGroupId(v string) *SubscribedRuleGro // The specified subscription does not exist. type SubscriptionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18836,17 +20358,17 @@ func (s SubscriptionNotFoundException) GoString() string { func newErrorSubscriptionNotFoundException(v protocol.ResponseMetadata) error { return &SubscriptionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s SubscriptionNotFoundException) Code() string { +func (s *SubscriptionNotFoundException) Code() string { return "WAFSubscriptionNotFoundException" } // Message returns the exception's message. -func (s SubscriptionNotFoundException) Message() string { +func (s *SubscriptionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18854,30 +20376,51 @@ func (s SubscriptionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s SubscriptionNotFoundException) OrigErr() error { +func (s *SubscriptionNotFoundException) OrigErr() error { return nil } -func (s SubscriptionNotFoundException) Error() string { +func (s *SubscriptionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s SubscriptionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *SubscriptionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s SubscriptionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *SubscriptionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// A tag associated with an AWS resource. Tags are key:value pairs that you +// can use to categorize and manage your resources, for purposes like billing. +// For example, you might set the tag key to "customer" and the value to the +// customer name or ID. You can specify one or more tags to add to each AWS +// resource, up to 50 tags for a resource. +// +// Tagging is only available through the API, SDKs, and CLI. You can't manage +// or view tags through the AWS WAF Classic console. You can tag the AWS resources +// that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. type Tag struct { _ struct{} `type:"structure"` - Key *string `min:"1" type:"string"` + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` - Value *string `type:"string"` + // Value is a required field + Value *string `type:"string" required:"true"` } // String returns the string representation @@ -18893,9 +20436,15 @@ func (s Tag) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *Tag) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } if s.Key != nil && len(*s.Key) < 1 { invalidParams.Add(request.NewErrParamMinLen("Key", 1)) } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } if invalidParams.Len() > 0 { return invalidParams @@ -18915,6 +20464,25 @@ func (s *Tag) SetValue(v string) *Tag { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Information for a tag associated with an AWS resource. Tags are key:value +// pairs that you can use to categorize and manage your resources, for purposes +// like billing. For example, you might set the tag key to "customer" and the +// value to the customer name or ID. You can specify one or more tags to add +// to each AWS resource, up to 50 tags for a resource. +// +// Tagging is only available through the API, SDKs, and CLI. You can't manage +// or view tags through the AWS WAF Classic console. You can tag the AWS resources +// that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. type TagInfoForResource struct { _ struct{} `type:"structure"` @@ -18946,8 +20514,8 @@ func (s *TagInfoForResource) SetTagList(v []*Tag) *TagInfoForResource { } type TagOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -18964,17 +20532,17 @@ func (s TagOperationException) GoString() string { func newErrorTagOperationException(v protocol.ResponseMetadata) error { return &TagOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagOperationException) Code() string { +func (s *TagOperationException) Code() string { return "WAFTagOperationException" } // Message returns the exception's message. -func (s TagOperationException) Message() string { +func (s *TagOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -18982,27 +20550,27 @@ func (s TagOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagOperationException) OrigErr() error { +func (s *TagOperationException) OrigErr() error { return nil } -func (s TagOperationException) Error() string { +func (s *TagOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagOperationException) RequestID() string { + return s.RespMetadata.RequestID } type TagOperationInternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -19019,17 +20587,17 @@ func (s TagOperationInternalErrorException) GoString() string { func newErrorTagOperationInternalErrorException(v protocol.ResponseMetadata) error { return &TagOperationInternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TagOperationInternalErrorException) Code() string { +func (s *TagOperationInternalErrorException) Code() string { return "WAFTagOperationInternalErrorException" } // Message returns the exception's message. -func (s TagOperationInternalErrorException) Message() string { +func (s *TagOperationInternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -19037,22 +20605,22 @@ func (s TagOperationInternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TagOperationInternalErrorException) OrigErr() error { +func (s *TagOperationInternalErrorException) OrigErr() error { return nil } -func (s TagOperationInternalErrorException) Error() string { +func (s *TagOperationInternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TagOperationInternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TagOperationInternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TagOperationInternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *TagOperationInternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } type TagResourceInput struct { @@ -19133,9 +20701,22 @@ func (s TagResourceOutput) GoString() string { return s.String() } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // In a GetSampledRequests request, the StartTime and EndTime objects specify // the time range for which you want AWS WAF to return a sample of web requests. // +// You must specify the times in Coordinated Universal Time (UTC) format. UTC +// format includes the special designator, Z. For example, "2016-09-27T14:50Z". +// // In a GetSampledRequests response, the StartTime and EndTime objects specify // the time range for which AWS WAF actually returned a sample of web requests. // AWS WAF gets the specified number of requests from among the first 5,000 @@ -19147,17 +20728,19 @@ type TimeWindow struct { _ struct{} `type:"structure"` // The end of the time range from which you want GetSampledRequests to return - // a sample of the requests that your AWS resource received. Specify the date - // and time in the following format: "2016-09-27T14:50Z". You can specify any - // time range in the previous three hours. + // a sample of the requests that your AWS resource received. You must specify + // the date and time in Coordinated Universal Time (UTC) format. UTC format + // includes the special designator, Z. For example, "2016-09-27T14:50Z". You + // can specify any time range in the previous three hours. // // EndTime is a required field EndTime *time.Time `type:"timestamp" required:"true"` // The beginning of the time range from which you want GetSampledRequests to - // return a sample of the requests that your AWS resource received. Specify - // the date and time in the following format: "2016-09-27T14:50Z". You can specify - // any time range in the previous three hours. + // return a sample of the requests that your AWS resource received. You must + // specify the date and time in Coordinated Universal Time (UTC) format. UTC + // format includes the special designator, Z. For example, "2016-09-27T14:50Z". + // You can specify any time range in the previous three hours. // // StartTime is a required field StartTime *time.Time `type:"timestamp" required:"true"` @@ -20704,6 +22287,96 @@ func (s *UpdateXssMatchSetOutput) SetChangeToken(v string) *UpdateXssMatchSetOut return s } +// The operation failed due to a problem with the migration. The failure cause +// is provided in the exception, in the MigrationErrorType: +// +// * ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the +// IgnoreUnsupportedType is not set to true. +// +// * ENTITY_NOT_FOUND - The web ACL doesn't exist. +// +// * S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject +// action to the specified Amazon S3 bucket. +// +// * S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to +// perform the PutObject action in the bucket. +// +// * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. +// +// * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as +// the web ACL. +// +// * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 +// bucket for another reason. +type WAFEntityMigrationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + MigrationErrorReason *string `type:"string"` + + MigrationErrorType *string `type:"string" enum:"MigrationErrorType"` +} + +// String returns the string representation +func (s WAFEntityMigrationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WAFEntityMigrationException) GoString() string { + return s.String() +} + +func newErrorWAFEntityMigrationException(v protocol.ResponseMetadata) error { + return &WAFEntityMigrationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *WAFEntityMigrationException) Code() string { + return "WAFEntityMigrationException" +} + +// Message returns the exception's message. +func (s *WAFEntityMigrationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *WAFEntityMigrationException) OrigErr() error { + return nil +} + +func (s *WAFEntityMigrationException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *WAFEntityMigrationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *WAFEntityMigrationException) RequestID() string { + return s.RespMetadata.RequestID +} + +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // For the action that is associated with a rule in a WebACL, specifies the // action that you want AWS WAF to perform when a web request matches all of // the conditions in a rule. For the default action in a WebACL, specifies the @@ -20757,6 +22430,16 @@ func (s *WafAction) SetType(v string) *WafAction { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The action to take if any rule within the RuleGroup matches a request. type WafOverrideAction struct { _ struct{} `type:"structure"` @@ -20797,6 +22480,16 @@ func (s *WafOverrideAction) SetType(v string) *WafOverrideAction { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains the Rules that identify the requests that you want to allow, block, // or count. In a WebACL, you also specify a default action (ALLOW or BLOCK), // and the action for each Rule that you add to a WebACL, for example, block @@ -20819,7 +22512,7 @@ type WebACL struct { // 128 and minimum length one. It can't contain whitespace or metric names reserved // for AWS WAF, including "All" and "Default_Action." You can't change MetricName // after you create the WebACL. - MetricName *string `type:"string"` + MetricName *string `min:"1" type:"string"` // A friendly name or description of the WebACL. You can't change the name of // a WebACL after you create it. @@ -20890,6 +22583,16 @@ func (s *WebACL) SetWebACLId(v string) *WebACL { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Contains the identifier and the name or description of the WebACL. type WebACLSummary struct { _ struct{} `type:"structure"` @@ -20932,6 +22635,16 @@ func (s *WebACLSummary) SetWebACLId(v string) *WebACLSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies whether to insert a Rule into or delete a Rule from a WebACL. type WebACLUpdate struct { _ struct{} `type:"structure"` @@ -20993,6 +22706,16 @@ func (s *WebACLUpdate) SetActivatedRule(v *ActivatedRule) *WebACLUpdate { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // A complex type that contains XssMatchTuple objects, which specify the parts // of web requests that you want AWS WAF to inspect for cross-site scripting // attacks and, if you want AWS WAF to inspect a header, the name of the header. @@ -21050,6 +22773,16 @@ func (s *XssMatchSet) SetXssMatchTuples(v []*XssMatchTuple) *XssMatchSet { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // The Id and Name of an XssMatchSet. type XssMatchSetSummary struct { _ struct{} `type:"structure"` @@ -21092,6 +22825,16 @@ func (s *XssMatchSetSummary) SetXssMatchSetId(v string) *XssMatchSetSummary { return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the part of a web request that you want to inspect for cross-site // scripting attacks and indicates whether you want to add the specification // to an XssMatchSet or delete it from an XssMatchSet. @@ -21155,6 +22898,16 @@ func (s *XssMatchSetUpdate) SetXssMatchTuple(v *XssMatchTuple) *XssMatchSetUpdat return s } +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Specifies the part of a web request that you want AWS WAF to inspect for // cross-site scripting attacks and, if you want AWS WAF to inspect a header, // the name of the header. @@ -21168,7 +22921,7 @@ type XssMatchTuple struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass AWS WAF. If you specify a transformation, - // AWS WAF performs the transformation on FieldToMatch before inspecting a request + // AWS WAF performs the transformation on FieldToMatch before inspecting it // for a match. // // You can only specify a single type of TextTransformation. @@ -21294,6 +23047,14 @@ const ( ChangeActionDelete = "DELETE" ) +// ChangeAction_Values returns all elements of the ChangeAction enum +func ChangeAction_Values() []string { + return []string{ + ChangeActionInsert, + ChangeActionDelete, + } +} + const ( // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value ChangeTokenStatusProvisioned = "PROVISIONED" @@ -21305,6 +23066,15 @@ const ( ChangeTokenStatusInsync = "INSYNC" ) +// ChangeTokenStatus_Values returns all elements of the ChangeTokenStatus enum +func ChangeTokenStatus_Values() []string { + return []string{ + ChangeTokenStatusProvisioned, + ChangeTokenStatusPending, + ChangeTokenStatusInsync, + } +} + const ( // ComparisonOperatorEq is a ComparisonOperator enum value ComparisonOperatorEq = "EQ" @@ -21325,11 +23095,30 @@ const ( ComparisonOperatorGt = "GT" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + } +} + const ( // GeoMatchConstraintTypeCountry is a GeoMatchConstraintType enum value GeoMatchConstraintTypeCountry = "Country" ) +// GeoMatchConstraintType_Values returns all elements of the GeoMatchConstraintType enum +func GeoMatchConstraintType_Values() []string { + return []string{ + GeoMatchConstraintTypeCountry, + } +} + const ( // GeoMatchConstraintValueAf is a GeoMatchConstraintValue enum value GeoMatchConstraintValueAf = "AF" @@ -22079,6 +23868,261 @@ const ( GeoMatchConstraintValueZw = "ZW" ) +// GeoMatchConstraintValue_Values returns all elements of the GeoMatchConstraintValue enum +func GeoMatchConstraintValue_Values() []string { + return []string{ + GeoMatchConstraintValueAf, + GeoMatchConstraintValueAx, + GeoMatchConstraintValueAl, + GeoMatchConstraintValueDz, + GeoMatchConstraintValueAs, + GeoMatchConstraintValueAd, + GeoMatchConstraintValueAo, + GeoMatchConstraintValueAi, + GeoMatchConstraintValueAq, + GeoMatchConstraintValueAg, + GeoMatchConstraintValueAr, + GeoMatchConstraintValueAm, + GeoMatchConstraintValueAw, + GeoMatchConstraintValueAu, + GeoMatchConstraintValueAt, + GeoMatchConstraintValueAz, + GeoMatchConstraintValueBs, + GeoMatchConstraintValueBh, + GeoMatchConstraintValueBd, + GeoMatchConstraintValueBb, + GeoMatchConstraintValueBy, + GeoMatchConstraintValueBe, + GeoMatchConstraintValueBz, + GeoMatchConstraintValueBj, + GeoMatchConstraintValueBm, + GeoMatchConstraintValueBt, + GeoMatchConstraintValueBo, + GeoMatchConstraintValueBq, + GeoMatchConstraintValueBa, + GeoMatchConstraintValueBw, + GeoMatchConstraintValueBv, + GeoMatchConstraintValueBr, + GeoMatchConstraintValueIo, + GeoMatchConstraintValueBn, + GeoMatchConstraintValueBg, + GeoMatchConstraintValueBf, + GeoMatchConstraintValueBi, + GeoMatchConstraintValueKh, + GeoMatchConstraintValueCm, + GeoMatchConstraintValueCa, + GeoMatchConstraintValueCv, + GeoMatchConstraintValueKy, + GeoMatchConstraintValueCf, + GeoMatchConstraintValueTd, + GeoMatchConstraintValueCl, + GeoMatchConstraintValueCn, + GeoMatchConstraintValueCx, + GeoMatchConstraintValueCc, + GeoMatchConstraintValueCo, + GeoMatchConstraintValueKm, + GeoMatchConstraintValueCg, + GeoMatchConstraintValueCd, + GeoMatchConstraintValueCk, + GeoMatchConstraintValueCr, + GeoMatchConstraintValueCi, + GeoMatchConstraintValueHr, + GeoMatchConstraintValueCu, + GeoMatchConstraintValueCw, + GeoMatchConstraintValueCy, + GeoMatchConstraintValueCz, + GeoMatchConstraintValueDk, + GeoMatchConstraintValueDj, + GeoMatchConstraintValueDm, + GeoMatchConstraintValueDo, + GeoMatchConstraintValueEc, + GeoMatchConstraintValueEg, + GeoMatchConstraintValueSv, + GeoMatchConstraintValueGq, + GeoMatchConstraintValueEr, + GeoMatchConstraintValueEe, + GeoMatchConstraintValueEt, + GeoMatchConstraintValueFk, + GeoMatchConstraintValueFo, + GeoMatchConstraintValueFj, + GeoMatchConstraintValueFi, + GeoMatchConstraintValueFr, + GeoMatchConstraintValueGf, + GeoMatchConstraintValuePf, + GeoMatchConstraintValueTf, + GeoMatchConstraintValueGa, + GeoMatchConstraintValueGm, + GeoMatchConstraintValueGe, + GeoMatchConstraintValueDe, + GeoMatchConstraintValueGh, + GeoMatchConstraintValueGi, + GeoMatchConstraintValueGr, + GeoMatchConstraintValueGl, + GeoMatchConstraintValueGd, + GeoMatchConstraintValueGp, + GeoMatchConstraintValueGu, + GeoMatchConstraintValueGt, + GeoMatchConstraintValueGg, + GeoMatchConstraintValueGn, + GeoMatchConstraintValueGw, + GeoMatchConstraintValueGy, + GeoMatchConstraintValueHt, + GeoMatchConstraintValueHm, + GeoMatchConstraintValueVa, + GeoMatchConstraintValueHn, + GeoMatchConstraintValueHk, + GeoMatchConstraintValueHu, + GeoMatchConstraintValueIs, + GeoMatchConstraintValueIn, + GeoMatchConstraintValueId, + GeoMatchConstraintValueIr, + GeoMatchConstraintValueIq, + GeoMatchConstraintValueIe, + GeoMatchConstraintValueIm, + GeoMatchConstraintValueIl, + GeoMatchConstraintValueIt, + GeoMatchConstraintValueJm, + GeoMatchConstraintValueJp, + GeoMatchConstraintValueJe, + GeoMatchConstraintValueJo, + GeoMatchConstraintValueKz, + GeoMatchConstraintValueKe, + GeoMatchConstraintValueKi, + GeoMatchConstraintValueKp, + GeoMatchConstraintValueKr, + GeoMatchConstraintValueKw, + GeoMatchConstraintValueKg, + GeoMatchConstraintValueLa, + GeoMatchConstraintValueLv, + GeoMatchConstraintValueLb, + GeoMatchConstraintValueLs, + GeoMatchConstraintValueLr, + GeoMatchConstraintValueLy, + GeoMatchConstraintValueLi, + GeoMatchConstraintValueLt, + GeoMatchConstraintValueLu, + GeoMatchConstraintValueMo, + GeoMatchConstraintValueMk, + GeoMatchConstraintValueMg, + GeoMatchConstraintValueMw, + GeoMatchConstraintValueMy, + GeoMatchConstraintValueMv, + GeoMatchConstraintValueMl, + GeoMatchConstraintValueMt, + GeoMatchConstraintValueMh, + GeoMatchConstraintValueMq, + GeoMatchConstraintValueMr, + GeoMatchConstraintValueMu, + GeoMatchConstraintValueYt, + GeoMatchConstraintValueMx, + GeoMatchConstraintValueFm, + GeoMatchConstraintValueMd, + GeoMatchConstraintValueMc, + GeoMatchConstraintValueMn, + GeoMatchConstraintValueMe, + GeoMatchConstraintValueMs, + GeoMatchConstraintValueMa, + GeoMatchConstraintValueMz, + GeoMatchConstraintValueMm, + GeoMatchConstraintValueNa, + GeoMatchConstraintValueNr, + GeoMatchConstraintValueNp, + GeoMatchConstraintValueNl, + GeoMatchConstraintValueNc, + GeoMatchConstraintValueNz, + GeoMatchConstraintValueNi, + GeoMatchConstraintValueNe, + GeoMatchConstraintValueNg, + GeoMatchConstraintValueNu, + GeoMatchConstraintValueNf, + GeoMatchConstraintValueMp, + GeoMatchConstraintValueNo, + GeoMatchConstraintValueOm, + GeoMatchConstraintValuePk, + GeoMatchConstraintValuePw, + GeoMatchConstraintValuePs, + GeoMatchConstraintValuePa, + GeoMatchConstraintValuePg, + GeoMatchConstraintValuePy, + GeoMatchConstraintValuePe, + GeoMatchConstraintValuePh, + GeoMatchConstraintValuePn, + GeoMatchConstraintValuePl, + GeoMatchConstraintValuePt, + GeoMatchConstraintValuePr, + GeoMatchConstraintValueQa, + GeoMatchConstraintValueRe, + GeoMatchConstraintValueRo, + GeoMatchConstraintValueRu, + GeoMatchConstraintValueRw, + GeoMatchConstraintValueBl, + GeoMatchConstraintValueSh, + GeoMatchConstraintValueKn, + GeoMatchConstraintValueLc, + GeoMatchConstraintValueMf, + GeoMatchConstraintValuePm, + GeoMatchConstraintValueVc, + GeoMatchConstraintValueWs, + GeoMatchConstraintValueSm, + GeoMatchConstraintValueSt, + GeoMatchConstraintValueSa, + GeoMatchConstraintValueSn, + GeoMatchConstraintValueRs, + GeoMatchConstraintValueSc, + GeoMatchConstraintValueSl, + GeoMatchConstraintValueSg, + GeoMatchConstraintValueSx, + GeoMatchConstraintValueSk, + GeoMatchConstraintValueSi, + GeoMatchConstraintValueSb, + GeoMatchConstraintValueSo, + GeoMatchConstraintValueZa, + GeoMatchConstraintValueGs, + GeoMatchConstraintValueSs, + GeoMatchConstraintValueEs, + GeoMatchConstraintValueLk, + GeoMatchConstraintValueSd, + GeoMatchConstraintValueSr, + GeoMatchConstraintValueSj, + GeoMatchConstraintValueSz, + GeoMatchConstraintValueSe, + GeoMatchConstraintValueCh, + GeoMatchConstraintValueSy, + GeoMatchConstraintValueTw, + GeoMatchConstraintValueTj, + GeoMatchConstraintValueTz, + GeoMatchConstraintValueTh, + GeoMatchConstraintValueTl, + GeoMatchConstraintValueTg, + GeoMatchConstraintValueTk, + GeoMatchConstraintValueTo, + GeoMatchConstraintValueTt, + GeoMatchConstraintValueTn, + GeoMatchConstraintValueTr, + GeoMatchConstraintValueTm, + GeoMatchConstraintValueTc, + GeoMatchConstraintValueTv, + GeoMatchConstraintValueUg, + GeoMatchConstraintValueUa, + GeoMatchConstraintValueAe, + GeoMatchConstraintValueGb, + GeoMatchConstraintValueUs, + GeoMatchConstraintValueUm, + GeoMatchConstraintValueUy, + GeoMatchConstraintValueUz, + GeoMatchConstraintValueVu, + GeoMatchConstraintValueVe, + GeoMatchConstraintValueVn, + GeoMatchConstraintValueVg, + GeoMatchConstraintValueVi, + GeoMatchConstraintValueWf, + GeoMatchConstraintValueEh, + GeoMatchConstraintValueYe, + GeoMatchConstraintValueZm, + GeoMatchConstraintValueZw, + } +} + const ( // IPSetDescriptorTypeIpv4 is a IPSetDescriptorType enum value IPSetDescriptorTypeIpv4 = "IPV4" @@ -22087,6 +24131,14 @@ const ( IPSetDescriptorTypeIpv6 = "IPV6" ) +// IPSetDescriptorType_Values returns all elements of the IPSetDescriptorType enum +func IPSetDescriptorType_Values() []string { + return []string{ + IPSetDescriptorTypeIpv4, + IPSetDescriptorTypeIpv6, + } +} + const ( // MatchFieldTypeUri is a MatchFieldType enum value MatchFieldTypeUri = "URI" @@ -22110,6 +24162,55 @@ const ( MatchFieldTypeAllQueryArgs = "ALL_QUERY_ARGS" ) +// MatchFieldType_Values returns all elements of the MatchFieldType enum +func MatchFieldType_Values() []string { + return []string{ + MatchFieldTypeUri, + MatchFieldTypeQueryString, + MatchFieldTypeHeader, + MatchFieldTypeMethod, + MatchFieldTypeBody, + MatchFieldTypeSingleQueryArg, + MatchFieldTypeAllQueryArgs, + } +} + +const ( + // MigrationErrorTypeEntityNotSupported is a MigrationErrorType enum value + MigrationErrorTypeEntityNotSupported = "ENTITY_NOT_SUPPORTED" + + // MigrationErrorTypeEntityNotFound is a MigrationErrorType enum value + MigrationErrorTypeEntityNotFound = "ENTITY_NOT_FOUND" + + // MigrationErrorTypeS3BucketNoPermission is a MigrationErrorType enum value + MigrationErrorTypeS3BucketNoPermission = "S3_BUCKET_NO_PERMISSION" + + // MigrationErrorTypeS3BucketNotAccessible is a MigrationErrorType enum value + MigrationErrorTypeS3BucketNotAccessible = "S3_BUCKET_NOT_ACCESSIBLE" + + // MigrationErrorTypeS3BucketNotFound is a MigrationErrorType enum value + MigrationErrorTypeS3BucketNotFound = "S3_BUCKET_NOT_FOUND" + + // MigrationErrorTypeS3BucketInvalidRegion is a MigrationErrorType enum value + MigrationErrorTypeS3BucketInvalidRegion = "S3_BUCKET_INVALID_REGION" + + // MigrationErrorTypeS3InternalError is a MigrationErrorType enum value + MigrationErrorTypeS3InternalError = "S3_INTERNAL_ERROR" +) + +// MigrationErrorType_Values returns all elements of the MigrationErrorType enum +func MigrationErrorType_Values() []string { + return []string{ + MigrationErrorTypeEntityNotSupported, + MigrationErrorTypeEntityNotFound, + MigrationErrorTypeS3BucketNoPermission, + MigrationErrorTypeS3BucketNotAccessible, + MigrationErrorTypeS3BucketNotFound, + MigrationErrorTypeS3BucketInvalidRegion, + MigrationErrorTypeS3InternalError, + } +} + const ( // ParameterExceptionFieldChangeAction is a ParameterExceptionField enum value ParameterExceptionFieldChangeAction = "CHANGE_ACTION" @@ -22166,6 +24267,30 @@ const ( ParameterExceptionFieldTagKeys = "TAG_KEYS" ) +// ParameterExceptionField_Values returns all elements of the ParameterExceptionField enum +func ParameterExceptionField_Values() []string { + return []string{ + ParameterExceptionFieldChangeAction, + ParameterExceptionFieldWafAction, + ParameterExceptionFieldWafOverrideAction, + ParameterExceptionFieldPredicateType, + ParameterExceptionFieldIpsetType, + ParameterExceptionFieldByteMatchFieldType, + ParameterExceptionFieldSqlInjectionMatchFieldType, + ParameterExceptionFieldByteMatchTextTransformation, + ParameterExceptionFieldByteMatchPositionalConstraint, + ParameterExceptionFieldSizeConstraintComparisonOperator, + ParameterExceptionFieldGeoMatchLocationType, + ParameterExceptionFieldGeoMatchLocationValue, + ParameterExceptionFieldRateKey, + ParameterExceptionFieldRuleType, + ParameterExceptionFieldNextMarker, + ParameterExceptionFieldResourceArn, + ParameterExceptionFieldTags, + ParameterExceptionFieldTagKeys, + } +} + const ( // ParameterExceptionReasonInvalidOption is a ParameterExceptionReason enum value ParameterExceptionReasonInvalidOption = "INVALID_OPTION" @@ -22180,6 +24305,16 @@ const ( ParameterExceptionReasonInvalidTagKey = "INVALID_TAG_KEY" ) +// ParameterExceptionReason_Values returns all elements of the ParameterExceptionReason enum +func ParameterExceptionReason_Values() []string { + return []string{ + ParameterExceptionReasonInvalidOption, + ParameterExceptionReasonIllegalCombination, + ParameterExceptionReasonIllegalArgument, + ParameterExceptionReasonInvalidTagKey, + } +} + const ( // PositionalConstraintExactly is a PositionalConstraint enum value PositionalConstraintExactly = "EXACTLY" @@ -22197,6 +24332,17 @@ const ( PositionalConstraintContainsWord = "CONTAINS_WORD" ) +// PositionalConstraint_Values returns all elements of the PositionalConstraint enum +func PositionalConstraint_Values() []string { + return []string{ + PositionalConstraintExactly, + PositionalConstraintStartsWith, + PositionalConstraintEndsWith, + PositionalConstraintContains, + PositionalConstraintContainsWord, + } +} + const ( // PredicateTypeIpmatch is a PredicateType enum value PredicateTypeIpmatch = "IPMatch" @@ -22220,11 +24366,31 @@ const ( PredicateTypeRegexMatch = "RegexMatch" ) +// PredicateType_Values returns all elements of the PredicateType enum +func PredicateType_Values() []string { + return []string{ + PredicateTypeIpmatch, + PredicateTypeByteMatch, + PredicateTypeSqlInjectionMatch, + PredicateTypeGeoMatch, + PredicateTypeSizeConstraint, + PredicateTypeXssMatch, + PredicateTypeRegexMatch, + } +} + const ( // RateKeyIp is a RateKey enum value RateKeyIp = "IP" ) +// RateKey_Values returns all elements of the RateKey enum +func RateKey_Values() []string { + return []string{ + RateKeyIp, + } +} + const ( // TextTransformationNone is a TextTransformation enum value TextTransformationNone = "NONE" @@ -22245,6 +24411,18 @@ const ( TextTransformationUrlDecode = "URL_DECODE" ) +// TextTransformation_Values returns all elements of the TextTransformation enum +func TextTransformation_Values() []string { + return []string{ + TextTransformationNone, + TextTransformationCompressWhiteSpace, + TextTransformationHtmlEntityDecode, + TextTransformationLowercase, + TextTransformationCmdLine, + TextTransformationUrlDecode, + } +} + const ( // WafActionTypeBlock is a WafActionType enum value WafActionTypeBlock = "BLOCK" @@ -22256,6 +24434,15 @@ const ( WafActionTypeCount = "COUNT" ) +// WafActionType_Values returns all elements of the WafActionType enum +func WafActionType_Values() []string { + return []string{ + WafActionTypeBlock, + WafActionTypeAllow, + WafActionTypeCount, + } +} + const ( // WafOverrideActionTypeNone is a WafOverrideActionType enum value WafOverrideActionTypeNone = "NONE" @@ -22264,6 +24451,14 @@ const ( WafOverrideActionTypeCount = "COUNT" ) +// WafOverrideActionType_Values returns all elements of the WafOverrideActionType enum +func WafOverrideActionType_Values() []string { + return []string{ + WafOverrideActionTypeNone, + WafOverrideActionTypeCount, + } +} + const ( // WafRuleTypeRegular is a WafRuleType enum value WafRuleTypeRegular = "REGULAR" @@ -22274,3 +24469,12 @@ const ( // WafRuleTypeGroup is a WafRuleType enum value WafRuleTypeGroup = "GROUP" ) + +// WafRuleType_Values returns all elements of the WafRuleType enum +func WafRuleType_Values() []string { + return []string{ + WafRuleTypeRegular, + WafRuleTypeRateBased, + WafRuleTypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/doc.go b/vendor/github.com/aws/aws-sdk-go/service/waf/doc.go index a3399aaa7..2ce69f0af 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/doc.go @@ -3,13 +3,25 @@ // Package waf provides the client and types for making API // requests to AWS WAF. // -// This is the AWS WAF API Reference for using AWS WAF with Amazon CloudFront. -// The AWS WAF actions and data types listed in the reference are available -// for protecting Amazon CloudFront distributions. You can use these actions -// and data types via the endpoint waf.amazonaws.com. This guide is for developers -// who need detailed information about the AWS WAF API actions, data types, -// and errors. For detailed information about AWS WAF features and an overview -// of how to use the AWS WAF API, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// This is the AWS WAF Classic API Reference for using AWS WAF Classic with +// Amazon CloudFront. The AWS WAF Classic actions and data types listed in the +// reference are available for protecting Amazon CloudFront distributions. You +// can use these actions and data types via the endpoint waf.amazonaws.com. +// This guide is for developers who need detailed information about the AWS +// WAF Classic API actions, data types, and errors. For detailed information +// about AWS WAF Classic features and an overview of how to use the AWS WAF +// Classic API, see the AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. // // See https://docs.aws.amazon.com/goto/WebAPI/waf-2015-08-24 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go b/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go index 9a71b5b9b..083ea5286 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/errors.go @@ -213,6 +213,32 @@ const ( // ErrCodeTagOperationInternalErrorException for service response error code // "WAFTagOperationInternalErrorException". ErrCodeTagOperationInternalErrorException = "WAFTagOperationInternalErrorException" + + // ErrCodeWAFEntityMigrationException for service response error code + // "WAFEntityMigrationException". + // + // The operation failed due to a problem with the migration. The failure cause + // is provided in the exception, in the MigrationErrorType: + // + // * ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the + // IgnoreUnsupportedType is not set to true. + // + // * ENTITY_NOT_FOUND - The web ACL doesn't exist. + // + // * S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject + // action to the specified Amazon S3 bucket. + // + // * S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to + // perform the PutObject action in the bucket. + // + // * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. + // + // * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as + // the web ACL. + // + // * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 + // bucket for another reason. + ErrCodeWAFEntityMigrationException = "WAFEntityMigrationException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ @@ -234,4 +260,5 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "WAFSubscriptionNotFoundException": newErrorSubscriptionNotFoundException, "WAFTagOperationException": newErrorTagOperationException, "WAFTagOperationInternalErrorException": newErrorTagOperationInternalErrorException, + "WAFEntityMigrationException": newErrorWAFEntityMigrationException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go index cb4310ef3..0271c68fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go index ec82a1dc5..0d42f2963 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/api.go @@ -58,6 +58,16 @@ func (c *WAFRegional) AssociateWebACLRequest(input *AssociateWebACLInput) (req * // AssociateWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic Regional documentation. For more information, see +// AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Associates a web ACL with a resource, either an application load balancer // or Amazon API Gateway stage. // @@ -179,6 +189,16 @@ func (c *WAFRegional) CreateByteMatchSetRequest(input *waf.CreateByteMatchSetInp // CreateByteMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a ByteMatchSet. You then use UpdateByteMatchSet to identify the part // of a web request that you want AWS WAF to inspect, such as the values of // the User-Agent header or the query string. For example, you can create a @@ -326,6 +346,16 @@ func (c *WAFRegional) CreateGeoMatchSetRequest(input *waf.CreateGeoMatchSetInput // CreateGeoMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates an GeoMatchSet, which you use to specify which web requests you want // to allow or block based on the country that the requests originate from. // For example, if you're receiving a lot of requests from one or more countries @@ -472,6 +502,16 @@ func (c *WAFRegional) CreateIPSetRequest(input *waf.CreateIPSetInput) (req *requ // CreateIPSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates an IPSet, which you use to specify which web requests that you want // to allow or block based on the IP addresses that the requests originate from. // For example, if you're receiving a lot of requests from one or more individual @@ -619,6 +659,16 @@ func (c *WAFRegional) CreateRateBasedRuleRequest(input *waf.CreateRateBasedRuleI // CreateRateBasedRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RateBasedRule. The RateBasedRule contains a RateLimit, which specifies // the maximum number of requests that AWS WAF allows from a specified IP address // in a five-minute period. The RateBasedRule also contains the IPSet objects, @@ -626,23 +676,22 @@ func (c *WAFRegional) CreateRateBasedRuleRequest(input *waf.CreateRateBasedRuleI // you want to count or block if these requests exceed the RateLimit. // // If you add more than one predicate to a RateBasedRule, a request not only -// must exceed the RateLimit, but it also must match all the specifications -// to be counted or blocked. For example, suppose you add the following to a -// RateBasedRule: +// must exceed the RateLimit, but it also must match all the conditions to be +// counted or blocked. For example, suppose you add the following to a RateBasedRule: // // * An IPSet that matches the IP address 192.0.2.44/32 // // * A ByteMatchSet that matches BadBot in the User-Agent header // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // You then add the RateBasedRule to a WebACL and specify that you want to block // requests that meet the conditions in the rule. For a request to be blocked, // it must come from the IP address 192.0.2.44 and the User-Agent header in // the request must contain the value BadBot. Further, requests that match these -// two conditions must be received at a rate of more than 15,000 requests every +// two conditions must be received at a rate of more than 1,000 requests every // five minutes. If both conditions are met and the rate is exceeded, AWS WAF -// blocks the requests. If the rate drops below 15,000 for a five-minute period, +// blocks the requests. If the rate drops below 1,000 for a five-minute period, // AWS WAF no longer blocks the requests. // // As a second example, suppose you want to limit requests to a particular page @@ -654,7 +703,7 @@ func (c *WAFRegional) CreateRateBasedRuleRequest(input *waf.CreateRateBasedRuleI // // * A TargetString of login // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // By adding this RateBasedRule to a WebACL, you could limit requests to your // login page without affecting the rest of your site. @@ -807,6 +856,16 @@ func (c *WAFRegional) CreateRegexMatchSetRequest(input *waf.CreateRegexMatchSetI // CreateRegexMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RegexMatchSet. You then use UpdateRegexMatchSet to identify the // part of a web request that you want AWS WAF to inspect, such as the values // of the User-Agent header or the query string. For example, you can create @@ -922,6 +981,16 @@ func (c *WAFRegional) CreateRegexPatternSetRequest(input *waf.CreateRegexPattern // CreateRegexPatternSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RegexPatternSet. You then use UpdateRegexPatternSet to specify // the regular expression (regex) pattern that you want AWS WAF to search for, // such as B[a@]dB[o0]t. You can then configure AWS WAF to reject those requests. @@ -1033,6 +1102,16 @@ func (c *WAFRegional) CreateRuleRequest(input *waf.CreateRuleInput) (req *reques // CreateRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a Rule, which contains the IPSet objects, ByteMatchSet objects, and // other predicates that identify the requests that you want to block. If you // add more than one predicate to a Rule, a request must match all of the specifications @@ -1196,6 +1275,16 @@ func (c *WAFRegional) CreateRuleGroupRequest(input *waf.CreateRuleGroupInput) (r // CreateRuleGroup API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a RuleGroup. A rule group is a collection of predefined rules that // you add to a web ACL. You use UpdateRuleGroup to add rules to the rule group. // @@ -1308,6 +1397,16 @@ func (c *WAFRegional) CreateSizeConstraintSetRequest(input *waf.CreateSizeConstr // CreateSizeConstraintSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a SizeConstraintSet. You then use UpdateSizeConstraintSet to identify // the part of a web request that you want AWS WAF to check for length, such // as the length of the User-Agent header or the length of the query string. @@ -1456,6 +1555,16 @@ func (c *WAFRegional) CreateSqlInjectionMatchSetRequest(input *waf.CreateSqlInje // CreateSqlInjectionMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a SqlInjectionMatchSet, which you use to allow, block, or count requests // that contain snippets of SQL code in a specified part of web requests. AWS // WAF searches for character sequences that are likely to be malicious strings. @@ -1600,6 +1709,16 @@ func (c *WAFRegional) CreateWebACLRequest(input *waf.CreateWebACLInput) (req *re // CreateWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates a WebACL, which contains the Rules that identify the CloudFront web // requests that you want to allow, block, or count. AWS WAF evaluates Rules // in order based on the value of Priority for each Rule. @@ -1718,6 +1837,169 @@ func (c *WAFRegional) CreateWebACLWithContext(ctx aws.Context, input *waf.Create return out, req.Send() } +const opCreateWebACLMigrationStack = "CreateWebACLMigrationStack" + +// CreateWebACLMigrationStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateWebACLMigrationStack operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateWebACLMigrationStack for more information on using the CreateWebACLMigrationStack +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateWebACLMigrationStackRequest method. +// req, resp := client.CreateWebACLMigrationStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACLMigrationStack +func (c *WAFRegional) CreateWebACLMigrationStackRequest(input *waf.CreateWebACLMigrationStackInput) (req *request.Request, output *waf.CreateWebACLMigrationStackOutput) { + op := &request.Operation{ + Name: opCreateWebACLMigrationStack, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &waf.CreateWebACLMigrationStackInput{} + } + + output = &waf.CreateWebACLMigrationStackOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateWebACLMigrationStack API operation for AWS WAF Regional. +// +// Creates an AWS CloudFormation WAFV2 template for the specified web ACL in +// the specified Amazon S3 bucket. Then, in CloudFormation, you create a stack +// from the template, to create the web ACL and its resources in AWS WAFV2. +// Use this to migrate your AWS WAF Classic web ACL to the latest version of +// AWS WAF. +// +// This is part of a larger migration procedure for web ACLs from AWS WAF Classic +// to the latest version of AWS WAF. For the full procedure, including caveats +// and manual steps to complete the migration and switch over to the new web +// ACL, see Migrating your AWS WAF Classic resources to AWS WAF (https://docs.aws.amazon.com/waf/latest/developerguide/waf-migrating-from-classic.html) +// in the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAF Regional's +// API operation CreateWebACLMigrationStack for usage and error information. +// +// Returned Error Types: +// * WAFInternalErrorException +// The operation failed because of a system problem, even though the request +// was valid. Retry your request. +// +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name. +// +// * You specified an invalid value. +// +// * You tried to update an object (ByteMatchSet, IPSet, Rule, or WebACL) +// using an action other than INSERT or DELETE. +// +// * You tried to create a WebACL with a DefaultAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to create a RateBasedRule with a RateKey value other than +// IP. +// +// * You tried to update a WebACL with a WafAction Type other than ALLOW, +// BLOCK, or COUNT. +// +// * You tried to update a ByteMatchSet with a FieldToMatch Type other than +// HEADER, METHOD, QUERY_STRING, URI, or BODY. +// +// * You tried to update a ByteMatchSet with a Field of HEADER but no value +// for Data. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a web ACL cannot be associated. +// +// * WAFInvalidOperationException +// The operation failed because there was nothing to do. For example: +// +// * You tried to remove a Rule from a WebACL, but the Rule isn't in the +// specified WebACL. +// +// * You tried to remove an IP address from an IPSet, but the IP address +// isn't in the specified IPSet. +// +// * You tried to remove a ByteMatchTuple from a ByteMatchSet, but the ByteMatchTuple +// isn't in the specified WebACL. +// +// * You tried to add a Rule to a WebACL, but the Rule already exists in +// the specified WebACL. +// +// * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple +// already exists in the specified WebACL. +// +// * WAFNonexistentItemException +// The operation failed because the referenced object doesn't exist. +// +// * WAFEntityMigrationException +// The operation failed due to a problem with the migration. The failure cause +// is provided in the exception, in the MigrationErrorType: +// +// * ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the +// IgnoreUnsupportedType is not set to true. +// +// * ENTITY_NOT_FOUND - The web ACL doesn't exist. +// +// * S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject +// action to the specified Amazon S3 bucket. +// +// * S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to +// perform the PutObject action in the bucket. +// +// * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. +// +// * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as +// the web ACL. +// +// * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 +// bucket for another reason. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28/CreateWebACLMigrationStack +func (c *WAFRegional) CreateWebACLMigrationStack(input *waf.CreateWebACLMigrationStackInput) (*waf.CreateWebACLMigrationStackOutput, error) { + req, out := c.CreateWebACLMigrationStackRequest(input) + return out, req.Send() +} + +// CreateWebACLMigrationStackWithContext is the same as CreateWebACLMigrationStack with the addition of +// the ability to pass a context and additional request options. +// +// See CreateWebACLMigrationStack for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFRegional) CreateWebACLMigrationStackWithContext(ctx aws.Context, input *waf.CreateWebACLMigrationStackInput, opts ...request.Option) (*waf.CreateWebACLMigrationStackOutput, error) { + req, out := c.CreateWebACLMigrationStackRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateXssMatchSet = "CreateXssMatchSet" // CreateXssMatchSetRequest generates a "aws/request.Request" representing the @@ -1762,6 +2044,16 @@ func (c *WAFRegional) CreateXssMatchSetRequest(input *waf.CreateXssMatchSetInput // CreateXssMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Creates an XssMatchSet, which you use to allow, block, or count requests // that contain cross-site scripting attacks in the specified part of web requests. // AWS WAF searches for character sequences that are likely to be malicious @@ -1907,6 +2199,16 @@ func (c *WAFRegional) DeleteByteMatchSetRequest(input *waf.DeleteByteMatchSetInp // DeleteByteMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a ByteMatchSet. You can't delete a ByteMatchSet if it's // still used in any Rules or if it still includes any ByteMatchTuple objects // (any filters). @@ -2034,6 +2336,16 @@ func (c *WAFRegional) DeleteGeoMatchSetRequest(input *waf.DeleteGeoMatchSetInput // DeleteGeoMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a GeoMatchSet. You can't delete a GeoMatchSet if it's // still used in any Rules or if it still includes any countries. // @@ -2160,6 +2472,16 @@ func (c *WAFRegional) DeleteIPSetRequest(input *waf.DeleteIPSetInput) (req *requ // DeleteIPSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes an IPSet. You can't delete an IPSet if it's still used // in any Rules or if it still includes any IP addresses. // @@ -2287,6 +2609,16 @@ func (c *WAFRegional) DeleteLoggingConfigurationRequest(input *waf.DeleteLogging // DeleteLoggingConfiguration API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes the LoggingConfiguration from the specified web ACL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -2375,6 +2707,16 @@ func (c *WAFRegional) DeletePermissionPolicyRequest(input *waf.DeletePermissionP // DeletePermissionPolicy API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes an IAM policy from the specified RuleGroup. // // The user making the request must be the owner of the RuleGroup. @@ -2464,6 +2806,16 @@ func (c *WAFRegional) DeleteRateBasedRuleRequest(input *waf.DeleteRateBasedRuleI // DeleteRateBasedRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RateBasedRule. You can't delete a rule if it's still // used in any WebACL objects or if it still includes any predicates, such as // ByteMatchSet objects. @@ -2596,6 +2948,16 @@ func (c *WAFRegional) DeleteRegexMatchSetRequest(input *waf.DeleteRegexMatchSetI // DeleteRegexMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RegexMatchSet. You can't delete a RegexMatchSet if // it's still used in any Rules or if it still includes any RegexMatchTuples // objects (any filters). @@ -2723,6 +3085,16 @@ func (c *WAFRegional) DeleteRegexPatternSetRequest(input *waf.DeleteRegexPattern // DeleteRegexPatternSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RegexPatternSet. You can't delete a RegexPatternSet // if it's still used in any RegexMatchSet or if the RegexPatternSet is not // empty. @@ -2838,6 +3210,16 @@ func (c *WAFRegional) DeleteRuleRequest(input *waf.DeleteRuleInput) (req *reques // DeleteRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a Rule. You can't delete a Rule if it's still used in // any WebACL objects or if it still includes any predicates, such as ByteMatchSet // objects. @@ -2968,6 +3350,16 @@ func (c *WAFRegional) DeleteRuleGroupRequest(input *waf.DeleteRuleGroupInput) (r // DeleteRuleGroup API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a RuleGroup. You can't delete a RuleGroup if it's still // used in any WebACL objects or if it still includes any rules. // @@ -3111,6 +3503,16 @@ func (c *WAFRegional) DeleteSizeConstraintSetRequest(input *waf.DeleteSizeConstr // DeleteSizeConstraintSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a SizeConstraintSet. You can't delete a SizeConstraintSet // if it's still used in any Rules or if it still includes any SizeConstraint // objects (any filters). @@ -3238,6 +3640,16 @@ func (c *WAFRegional) DeleteSqlInjectionMatchSetRequest(input *waf.DeleteSqlInje // DeleteSqlInjectionMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a SqlInjectionMatchSet. You can't delete a SqlInjectionMatchSet // if it's still used in any Rules or if it still contains any SqlInjectionMatchTuple // objects. @@ -3366,6 +3778,16 @@ func (c *WAFRegional) DeleteWebACLRequest(input *waf.DeleteWebACLInput) (req *re // DeleteWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes a WebACL. You can't delete a WebACL if it still contains // any Rules. // @@ -3493,6 +3915,16 @@ func (c *WAFRegional) DeleteXssMatchSetRequest(input *waf.DeleteXssMatchSetInput // DeleteXssMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Permanently deletes an XssMatchSet. You can't delete an XssMatchSet if it's // still used in any Rules or if it still contains any XssMatchTuple objects. // @@ -3621,6 +4053,16 @@ func (c *WAFRegional) DisassociateWebACLRequest(input *DisassociateWebACLInput) // DisassociateWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic Regional documentation. For more information, see +// AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Removes a web ACL from the specified resource, either an application load // balancer or Amazon API Gateway stage. // @@ -3738,6 +4180,16 @@ func (c *WAFRegional) GetByteMatchSetRequest(input *waf.GetByteMatchSetInput) (r // GetByteMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the ByteMatchSet specified by ByteMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -3825,6 +4277,16 @@ func (c *WAFRegional) GetChangeTokenRequest(input *waf.GetChangeTokenInput) (req // GetChangeToken API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // When you want to create, update, or delete AWS WAF objects, get a change // token and include the change token in the create, update, or delete request. // Change tokens ensure that your application doesn't submit conflicting requests @@ -3919,6 +4381,16 @@ func (c *WAFRegional) GetChangeTokenStatusRequest(input *waf.GetChangeTokenStatu // GetChangeTokenStatus API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the status of a ChangeToken that you got by calling GetChangeToken. // ChangeTokenStatus is one of the following values: // @@ -4012,6 +4484,16 @@ func (c *WAFRegional) GetGeoMatchSetRequest(input *waf.GetGeoMatchSetInput) (req // GetGeoMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the GeoMatchSet that is specified by GeoMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4099,6 +4581,16 @@ func (c *WAFRegional) GetIPSetRequest(input *waf.GetIPSetInput) (req *request.Re // GetIPSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the IPSet that is specified by IPSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4186,6 +4678,16 @@ func (c *WAFRegional) GetLoggingConfigurationRequest(input *waf.GetLoggingConfig // GetLoggingConfiguration API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the LoggingConfiguration for the specified web ACL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4269,6 +4771,16 @@ func (c *WAFRegional) GetPermissionPolicyRequest(input *waf.GetPermissionPolicyI // GetPermissionPolicy API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the IAM policy attached to the RuleGroup. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4352,6 +4864,16 @@ func (c *WAFRegional) GetRateBasedRuleRequest(input *waf.GetRateBasedRuleInput) // GetRateBasedRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RateBasedRule that is specified by the RuleId that you included // in the GetRateBasedRule request. // @@ -4440,6 +4962,16 @@ func (c *WAFRegional) GetRateBasedRuleManagedKeysRequest(input *waf.GetRateBased // GetRateBasedRuleManagedKeys API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of IP addresses currently being blocked by the RateBasedRule // that is specified by the RuleId. The maximum number of managed keys that // will be blocked is 10,000. If more than 10,000 addresses exceed the rate @@ -4559,6 +5091,16 @@ func (c *WAFRegional) GetRegexMatchSetRequest(input *waf.GetRegexMatchSetInput) // GetRegexMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RegexMatchSet specified by RegexMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4646,6 +5188,16 @@ func (c *WAFRegional) GetRegexPatternSetRequest(input *waf.GetRegexPatternSetInp // GetRegexPatternSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RegexPatternSet specified by RegexPatternSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -4733,6 +5285,16 @@ func (c *WAFRegional) GetRuleRequest(input *waf.GetRuleInput) (req *request.Requ // GetRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the Rule that is specified by the RuleId that you included in the // GetRule request. // @@ -4821,6 +5383,16 @@ func (c *WAFRegional) GetRuleGroupRequest(input *waf.GetRuleGroupInput) (req *re // GetRuleGroup API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the RuleGroup that is specified by the RuleGroupId that you included // in the GetRuleGroup request. // @@ -4907,6 +5479,16 @@ func (c *WAFRegional) GetSampledRequestsRequest(input *waf.GetSampledRequestsInp // GetSampledRequests API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Gets detailed information about a specified number of requests--a sample--that // AWS WAF randomly selects from among the first 5,000 requests that your AWS // resource received during a time range that you choose. You can specify a @@ -5000,6 +5582,16 @@ func (c *WAFRegional) GetSizeConstraintSetRequest(input *waf.GetSizeConstraintSe // GetSizeConstraintSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the SizeConstraintSet specified by SizeConstraintSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5087,6 +5679,16 @@ func (c *WAFRegional) GetSqlInjectionMatchSetRequest(input *waf.GetSqlInjectionM // GetSqlInjectionMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the SqlInjectionMatchSet that is specified by SqlInjectionMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5174,6 +5776,16 @@ func (c *WAFRegional) GetWebACLRequest(input *waf.GetWebACLInput) (req *request. // GetWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the WebACL that is specified by WebACLId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5261,6 +5873,16 @@ func (c *WAFRegional) GetWebACLForResourceRequest(input *GetWebACLForResourceInp // GetWebACLForResource API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic Regional documentation. For more information, see +// AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the web ACL for the specified resource, either an application load // balancer or Amazon API Gateway stage. // @@ -5382,6 +6004,16 @@ func (c *WAFRegional) GetXssMatchSetRequest(input *waf.GetXssMatchSetInput) (req // GetXssMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns the XssMatchSet that is specified by XssMatchSetId. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5469,6 +6101,16 @@ func (c *WAFRegional) ListActivatedRulesInRuleGroupRequest(input *waf.ListActiva // ListActivatedRulesInRuleGroup API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of ActivatedRule objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5581,6 +6223,16 @@ func (c *WAFRegional) ListByteMatchSetsRequest(input *waf.ListByteMatchSetsInput // ListByteMatchSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of ByteMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5665,6 +6317,16 @@ func (c *WAFRegional) ListGeoMatchSetsRequest(input *waf.ListGeoMatchSetsInput) // ListGeoMatchSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of GeoMatchSetSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5749,6 +6411,16 @@ func (c *WAFRegional) ListIPSetsRequest(input *waf.ListIPSetsInput) (req *reques // ListIPSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of IPSetSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5833,6 +6505,16 @@ func (c *WAFRegional) ListLoggingConfigurationsRequest(input *waf.ListLoggingCon // ListLoggingConfigurations API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of LoggingConfiguration objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -5945,6 +6627,16 @@ func (c *WAFRegional) ListRateBasedRulesRequest(input *waf.ListRateBasedRulesInp // ListRateBasedRules API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6029,6 +6721,16 @@ func (c *WAFRegional) ListRegexMatchSetsRequest(input *waf.ListRegexMatchSetsInp // ListRegexMatchSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RegexMatchSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6113,6 +6815,16 @@ func (c *WAFRegional) ListRegexPatternSetsRequest(input *waf.ListRegexPatternSet // ListRegexPatternSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RegexPatternSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6197,6 +6909,16 @@ func (c *WAFRegional) ListResourcesForWebACLRequest(input *ListResourcesForWebAC // ListResourcesForWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic Regional documentation. For more information, see +// AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of resources associated with the specified web ACL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6313,6 +7035,16 @@ func (c *WAFRegional) ListRuleGroupsRequest(input *waf.ListRuleGroupsInput) (req // ListRuleGroups API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleGroup objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6393,6 +7125,16 @@ func (c *WAFRegional) ListRulesRequest(input *waf.ListRulesInput) (req *request. // ListRules API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6477,6 +7219,16 @@ func (c *WAFRegional) ListSizeConstraintSetsRequest(input *waf.ListSizeConstrain // ListSizeConstraintSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of SizeConstraintSetSummary objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6561,6 +7313,16 @@ func (c *WAFRegional) ListSqlInjectionMatchSetsRequest(input *waf.ListSqlInjecti // ListSqlInjectionMatchSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of SqlInjectionMatchSet objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6645,6 +7407,16 @@ func (c *WAFRegional) ListSubscribedRuleGroupsRequest(input *waf.ListSubscribedR // ListSubscribedRuleGroups API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of RuleGroup objects that you are subscribed to. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6728,6 +7500,26 @@ func (c *WAFRegional) ListTagsForResourceRequest(input *waf.ListTagsForResourceI // ListTagsForResource API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Retrieves the tags associated with the specified AWS resource. Tags are key:value +// pairs that you can use to categorize and manage your resources, for purposes +// like billing. For example, you might set the tag key to "customer" and the +// value to the customer name or ID. You can specify one or more tags to add +// to each AWS resource, up to 50 tags for a resource. +// +// Tagging is only available through the API, SDKs, and CLI. You can't manage +// or view tags through the AWS WAF Classic console. You can tag the AWS resources +// that you manage through AWS WAF Classic: web ACLs, rule groups, and rules. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -6844,6 +7636,16 @@ func (c *WAFRegional) ListWebACLsRequest(input *waf.ListWebACLsInput) (req *requ // ListWebACLs API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of WebACLSummary objects in the response. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -6928,6 +7730,16 @@ func (c *WAFRegional) ListXssMatchSetsRequest(input *waf.ListXssMatchSetsInput) // ListXssMatchSets API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns an array of XssMatchSet objects. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -7012,6 +7824,16 @@ func (c *WAFRegional) PutLoggingConfigurationRequest(input *waf.PutLoggingConfig // PutLoggingConfiguration API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Associates a LoggingConfiguration with a specified web ACL. // // You can access information about all traffic that AWS WAF inspects using @@ -7129,8 +7951,18 @@ func (c *WAFRegional) PutPermissionPolicyRequest(input *waf.PutPermissionPolicyI // PutPermissionPolicy API operation for AWS WAF Regional. // -// Attaches a IAM policy to the specified resource. The only supported use for -// this action is to share a RuleGroup across accounts. +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Attaches an IAM policy to the specified resource. The only supported use +// for this action is to share a RuleGroup across accounts. // // The PutPermissionPolicy is subject to the following restrictions: // @@ -7267,6 +8099,27 @@ func (c *WAFRegional) TagResourceRequest(input *waf.TagResourceInput) (req *requ // TagResource API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// Associates tags with the specified AWS resource. Tags are key:value pairs +// that you can use to categorize and manage your resources, for purposes like +// billing. For example, you might set the tag key to "customer" and the value +// to the customer name or ID. You can specify one or more tags to add to each +// AWS resource, up to 50 tags for a resource. +// +// Tagging is only available through the API, SDKs, and CLI. You can't manage +// or view tags through the AWS WAF Classic console. You can use this action +// to tag the AWS resources that you manage through AWS WAF Classic: web ACLs, +// rule groups, and rules. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7390,6 +8243,16 @@ func (c *WAFRegional) UntagResourceRequest(input *waf.UntagResourceInput) (req * // UntagResource API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -7506,6 +8369,16 @@ func (c *WAFRegional) UpdateByteMatchSetRequest(input *waf.UpdateByteMatchSetInp // UpdateByteMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes ByteMatchTuple objects (filters) in a ByteMatchSet. For // each ByteMatchTuple object, you specify the following values: // @@ -7702,6 +8575,16 @@ func (c *WAFRegional) UpdateGeoMatchSetRequest(input *waf.UpdateGeoMatchSetInput // UpdateGeoMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes GeoMatchConstraint objects in an GeoMatchSet. For each // GeoMatchConstraint object, you specify the following values: // @@ -7897,6 +8780,16 @@ func (c *WAFRegional) UpdateIPSetRequest(input *waf.UpdateIPSetInput) (req *requ // UpdateIPSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes IPSetDescriptor objects in an IPSet. For each IPSetDescriptor // object, you specify the following values: // @@ -8116,6 +9009,16 @@ func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleI // UpdateRateBasedRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes Predicate objects in a rule and updates the RateLimit // in the rule. // @@ -8132,13 +9035,13 @@ func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleI // // * A ByteMatchSet that matches BadBot in the User-Agent header // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // You then add the RateBasedRule to a WebACL and specify that you want to block // requests that satisfy the rule. For a request to be blocked, it must come // from the IP address 192.0.2.44 and the User-Agent header in the request must // contain the value BadBot. Further, requests that match these two conditions -// much be received at a rate of more than 15,000 every five minutes. If the +// much be received at a rate of more than 1,000 every five minutes. If the // rate drops below this limit, AWS WAF no longer blocks the requests. // // As a second example, suppose you want to limit requests to a particular page @@ -8150,7 +9053,7 @@ func (c *WAFRegional) UpdateRateBasedRuleRequest(input *waf.UpdateRateBasedRuleI // // * A TargetString of login // -// Further, you specify a RateLimit of 15,000. +// Further, you specify a RateLimit of 1,000. // // By adding this RateBasedRule to a WebACL, you could limit requests to your // login page without affecting the rest of your site. @@ -8321,6 +9224,16 @@ func (c *WAFRegional) UpdateRegexMatchSetRequest(input *waf.UpdateRegexMatchSetI // UpdateRegexMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes RegexMatchTuple objects (filters) in a RegexMatchSet. // For each RegexMatchSetUpdate object, you specify the following values: // @@ -8488,6 +9401,16 @@ func (c *WAFRegional) UpdateRegexPatternSetRequest(input *waf.UpdateRegexPattern // UpdateRegexPatternSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes RegexPatternString objects in a RegexPatternSet. For each // RegexPatternString object, you specify the following values: // @@ -8652,6 +9575,16 @@ func (c *WAFRegional) UpdateRuleRequest(input *waf.UpdateRuleInput) (req *reques // UpdateRule API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes Predicate objects in a Rule. Each Predicate object identifies // a predicate, such as a ByteMatchSet or an IPSet, that specifies the web requests // that you want to allow, block, or count. If you add more than one predicate @@ -8853,6 +9786,16 @@ func (c *WAFRegional) UpdateRuleGroupRequest(input *waf.UpdateRuleGroupInput) (r // UpdateRuleGroup API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes ActivatedRule objects in a RuleGroup. // // You can only insert REGULAR rules into a rule group. @@ -9031,6 +9974,16 @@ func (c *WAFRegional) UpdateSizeConstraintSetRequest(input *waf.UpdateSizeConstr // UpdateSizeConstraintSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes SizeConstraint objects (filters) in a SizeConstraintSet. // For each SizeConstraint object, you specify the following values: // @@ -9238,6 +10191,16 @@ func (c *WAFRegional) UpdateSqlInjectionMatchSetRequest(input *waf.UpdateSqlInje // UpdateSqlInjectionMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes SqlInjectionMatchTuple objects (filters) in a SqlInjectionMatchSet. // For each SqlInjectionMatchTuple object, you specify the following values: // @@ -9430,6 +10393,16 @@ func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *re // UpdateWebACL API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes ActivatedRule objects in a WebACL. Each Rule identifies // web requests that you want to allow, block, or count. When you update a WebACL, // you specify the following values: @@ -9472,12 +10445,12 @@ func (c *WAFRegional) UpdateWebACLRequest(input *waf.UpdateWebACLInput) (req *re // with a CloudFront distribution. // // The ActivatedRule can be a rule group. If you specify a rule group as your -// ActivatedRule, you can exclude specific rules from that rule group. +// ActivatedRule , you can exclude specific rules from that rule group. // // If you already have a rule group associated with a web ACL and want to submit // an UpdateWebACL request to exclude certain rules from that rule group, you // must first remove the rule group from the web ACL, the re-insert it again, -// specifying the excluded rules. For details, see ActivatedRule$ExcludedRules. +// specifying the excluded rules. For details, see ActivatedRule$ExcludedRules . // // Be aware that if you try to add a RATE_BASED rule to a web ACL without setting // the rule type when first creating the rule, the UpdateWebACL request will @@ -9656,6 +10629,16 @@ func (c *WAFRegional) UpdateXssMatchSetRequest(input *waf.UpdateXssMatchSetInput // UpdateXssMatchSet API operation for AWS WAF Regional. // +// +// This is AWS WAF Classic documentation. For more information, see AWS WAF +// Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// // Inserts or deletes XssMatchTuple objects (filters) in an XssMatchSet. For // each XssMatchTuple object, you specify the following values: // @@ -10096,8 +11079,8 @@ func (s *ListResourcesForWebACLOutput) SetResourceArns(v []*string) *ListResourc } type WAFBadRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10114,17 +11097,17 @@ func (s WAFBadRequestException) GoString() string { func newErrorWAFBadRequestException(v protocol.ResponseMetadata) error { return &WAFBadRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFBadRequestException) Code() string { +func (s *WAFBadRequestException) Code() string { return "WAFBadRequestException" } // Message returns the exception's message. -func (s WAFBadRequestException) Message() string { +func (s *WAFBadRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10132,28 +11115,28 @@ func (s WAFBadRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFBadRequestException) OrigErr() error { +func (s *WAFBadRequestException) OrigErr() error { return nil } -func (s WAFBadRequestException) Error() string { +func (s *WAFBadRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFBadRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFBadRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFBadRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFBadRequestException) RequestID() string { + return s.RespMetadata.RequestID } // The name specified is invalid. type WAFDisallowedNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10170,17 +11153,17 @@ func (s WAFDisallowedNameException) GoString() string { func newErrorWAFDisallowedNameException(v protocol.ResponseMetadata) error { return &WAFDisallowedNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFDisallowedNameException) Code() string { +func (s *WAFDisallowedNameException) Code() string { return "WAFDisallowedNameException" } // Message returns the exception's message. -func (s WAFDisallowedNameException) Message() string { +func (s *WAFDisallowedNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10188,29 +11171,109 @@ func (s WAFDisallowedNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFDisallowedNameException) OrigErr() error { +func (s *WAFDisallowedNameException) OrigErr() error { return nil } -func (s WAFDisallowedNameException) Error() string { +func (s *WAFDisallowedNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFDisallowedNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFDisallowedNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFDisallowedNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFDisallowedNameException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation failed due to a problem with the migration. The failure cause +// is provided in the exception, in the MigrationErrorType: +// +// * ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the +// IgnoreUnsupportedType is not set to true. +// +// * ENTITY_NOT_FOUND - The web ACL doesn't exist. +// +// * S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject +// action to the specified Amazon S3 bucket. +// +// * S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to +// perform the PutObject action in the bucket. +// +// * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. +// +// * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as +// the web ACL. +// +// * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 +// bucket for another reason. +type WAFEntityMigrationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` + + MigrationErrorReason *string `type:"string"` + + MigrationErrorType *string `type:"string" enum:"MigrationErrorType"` +} + +// String returns the string representation +func (s WAFEntityMigrationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WAFEntityMigrationException) GoString() string { + return s.String() +} + +func newErrorWAFEntityMigrationException(v protocol.ResponseMetadata) error { + return &WAFEntityMigrationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *WAFEntityMigrationException) Code() string { + return "WAFEntityMigrationException" +} + +// Message returns the exception's message. +func (s *WAFEntityMigrationException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *WAFEntityMigrationException) OrigErr() error { + return nil +} + +func (s *WAFEntityMigrationException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *WAFEntityMigrationException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *WAFEntityMigrationException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because of a system problem, even though the request // was valid. Retry your request. type WAFInternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10227,17 +11290,17 @@ func (s WAFInternalErrorException) GoString() string { func newErrorWAFInternalErrorException(v protocol.ResponseMetadata) error { return &WAFInternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInternalErrorException) Code() string { +func (s *WAFInternalErrorException) Code() string { return "WAFInternalErrorException" } // Message returns the exception's message. -func (s WAFInternalErrorException) Message() string { +func (s *WAFInternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10245,29 +11308,29 @@ func (s WAFInternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInternalErrorException) OrigErr() error { +func (s *WAFInternalErrorException) OrigErr() error { return nil } -func (s WAFInternalErrorException) Error() string { +func (s *WAFInternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to create, update, or delete an object // by using an invalid account identifier. type WAFInvalidAccountException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10284,17 +11347,17 @@ func (s WAFInvalidAccountException) GoString() string { func newErrorWAFInvalidAccountException(v protocol.ResponseMetadata) error { return &WAFInvalidAccountException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidAccountException) Code() string { +func (s *WAFInvalidAccountException) Code() string { return "WAFInvalidAccountException" } // Message returns the exception's message. -func (s WAFInvalidAccountException) Message() string { +func (s *WAFInvalidAccountException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10302,22 +11365,22 @@ func (s WAFInvalidAccountException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidAccountException) OrigErr() error { +func (s *WAFInvalidAccountException) OrigErr() error { return nil } -func (s WAFInvalidAccountException) Error() string { +func (s *WAFInvalidAccountException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidAccountException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidAccountException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidAccountException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidAccountException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because there was nothing to do. For example: @@ -10337,8 +11400,8 @@ func (s WAFInvalidAccountException) RequestID() string { // * You tried to add a ByteMatchTuple to a ByteMatchSet, but the ByteMatchTuple // already exists in the specified WebACL. type WAFInvalidOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10355,17 +11418,17 @@ func (s WAFInvalidOperationException) GoString() string { func newErrorWAFInvalidOperationException(v protocol.ResponseMetadata) error { return &WAFInvalidOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidOperationException) Code() string { +func (s *WAFInvalidOperationException) Code() string { return "WAFInvalidOperationException" } // Message returns the exception's message. -func (s WAFInvalidOperationException) Message() string { +func (s *WAFInvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10373,22 +11436,22 @@ func (s WAFInvalidOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidOperationException) OrigErr() error { +func (s *WAFInvalidOperationException) OrigErr() error { return nil } -func (s WAFInvalidOperationException) Error() string { +func (s *WAFInvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because AWS WAF didn't recognize a parameter in the @@ -10419,8 +11482,8 @@ func (s WAFInvalidOperationException) RequestID() string { // * Your request references an ARN that is malformed, or corresponds to // a resource with which a web ACL cannot be associated. type WAFInvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Field *string `locationName:"field" type:"string" enum:"ParameterExceptionField"` @@ -10443,17 +11506,17 @@ func (s WAFInvalidParameterException) GoString() string { func newErrorWAFInvalidParameterException(v protocol.ResponseMetadata) error { return &WAFInvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidParameterException) Code() string { +func (s *WAFInvalidParameterException) Code() string { return "WAFInvalidParameterException" } // Message returns the exception's message. -func (s WAFInvalidParameterException) Message() string { +func (s *WAFInvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10461,22 +11524,22 @@ func (s WAFInvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidParameterException) OrigErr() error { +func (s *WAFInvalidParameterException) OrigErr() error { return nil } -func (s WAFInvalidParameterException) Error() string { +func (s *WAFInvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because the specified policy is not in the proper format. @@ -10502,8 +11565,8 @@ func (s WAFInvalidParameterException) RequestID() string { // // * Your policy must be composed using IAM Policy version 2012-10-17. type WAFInvalidPermissionPolicyException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10520,17 +11583,17 @@ func (s WAFInvalidPermissionPolicyException) GoString() string { func newErrorWAFInvalidPermissionPolicyException(v protocol.ResponseMetadata) error { return &WAFInvalidPermissionPolicyException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidPermissionPolicyException) Code() string { +func (s *WAFInvalidPermissionPolicyException) Code() string { return "WAFInvalidPermissionPolicyException" } // Message returns the exception's message. -func (s WAFInvalidPermissionPolicyException) Message() string { +func (s *WAFInvalidPermissionPolicyException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10538,28 +11601,28 @@ func (s WAFInvalidPermissionPolicyException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidPermissionPolicyException) OrigErr() error { +func (s *WAFInvalidPermissionPolicyException) OrigErr() error { return nil } -func (s WAFInvalidPermissionPolicyException) Error() string { +func (s *WAFInvalidPermissionPolicyException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidPermissionPolicyException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidPermissionPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidPermissionPolicyException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidPermissionPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // The regular expression (regex) you specified in RegexPatternString is invalid. type WAFInvalidRegexPatternException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10576,17 +11639,17 @@ func (s WAFInvalidRegexPatternException) GoString() string { func newErrorWAFInvalidRegexPatternException(v protocol.ResponseMetadata) error { return &WAFInvalidRegexPatternException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidRegexPatternException) Code() string { +func (s *WAFInvalidRegexPatternException) Code() string { return "WAFInvalidRegexPatternException" } // Message returns the exception's message. -func (s WAFInvalidRegexPatternException) Message() string { +func (s *WAFInvalidRegexPatternException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10594,22 +11657,22 @@ func (s WAFInvalidRegexPatternException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidRegexPatternException) OrigErr() error { +func (s *WAFInvalidRegexPatternException) OrigErr() error { return nil } -func (s WAFInvalidRegexPatternException) Error() string { +func (s *WAFInvalidRegexPatternException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidRegexPatternException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidRegexPatternException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidRegexPatternException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidRegexPatternException) RequestID() string { + return s.RespMetadata.RequestID } // The operation exceeds a resource limit, for example, the maximum number of @@ -10617,8 +11680,8 @@ func (s WAFInvalidRegexPatternException) RequestID() string { // see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. type WAFLimitsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10635,17 +11698,17 @@ func (s WAFLimitsExceededException) GoString() string { func newErrorWAFLimitsExceededException(v protocol.ResponseMetadata) error { return &WAFLimitsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFLimitsExceededException) Code() string { +func (s *WAFLimitsExceededException) Code() string { return "WAFLimitsExceededException" } // Message returns the exception's message. -func (s WAFLimitsExceededException) Message() string { +func (s *WAFLimitsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10653,22 +11716,22 @@ func (s WAFLimitsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFLimitsExceededException) OrigErr() error { +func (s *WAFLimitsExceededException) OrigErr() error { return nil } -func (s WAFLimitsExceededException) Error() string { +func (s *WAFLimitsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFLimitsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFLimitsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFLimitsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFLimitsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to delete an object that isn't empty. @@ -10684,8 +11747,8 @@ func (s WAFLimitsExceededException) RequestID() string { // // * You tried to delete an IPSet that references one or more IP addresses. type WAFNonEmptyEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10702,17 +11765,17 @@ func (s WAFNonEmptyEntityException) GoString() string { func newErrorWAFNonEmptyEntityException(v protocol.ResponseMetadata) error { return &WAFNonEmptyEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFNonEmptyEntityException) Code() string { +func (s *WAFNonEmptyEntityException) Code() string { return "WAFNonEmptyEntityException" } // Message returns the exception's message. -func (s WAFNonEmptyEntityException) Message() string { +func (s *WAFNonEmptyEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10720,22 +11783,22 @@ func (s WAFNonEmptyEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFNonEmptyEntityException) OrigErr() error { +func (s *WAFNonEmptyEntityException) OrigErr() error { return nil } -func (s WAFNonEmptyEntityException) Error() string { +func (s *WAFNonEmptyEntityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFNonEmptyEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFNonEmptyEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFNonEmptyEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFNonEmptyEntityException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to add an object to or delete an object @@ -10753,8 +11816,8 @@ func (s WAFNonEmptyEntityException) RequestID() string { // * You tried to add a ByteMatchTuple to or delete a ByteMatchTuple from // a ByteMatchSet that doesn't exist. type WAFNonexistentContainerException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10771,17 +11834,17 @@ func (s WAFNonexistentContainerException) GoString() string { func newErrorWAFNonexistentContainerException(v protocol.ResponseMetadata) error { return &WAFNonexistentContainerException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFNonexistentContainerException) Code() string { +func (s *WAFNonexistentContainerException) Code() string { return "WAFNonexistentContainerException" } // Message returns the exception's message. -func (s WAFNonexistentContainerException) Message() string { +func (s *WAFNonexistentContainerException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10789,28 +11852,28 @@ func (s WAFNonexistentContainerException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFNonexistentContainerException) OrigErr() error { +func (s *WAFNonexistentContainerException) OrigErr() error { return nil } -func (s WAFNonexistentContainerException) Error() string { +func (s *WAFNonexistentContainerException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFNonexistentContainerException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFNonexistentContainerException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFNonexistentContainerException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFNonexistentContainerException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because the referenced object doesn't exist. type WAFNonexistentItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10827,17 +11890,17 @@ func (s WAFNonexistentItemException) GoString() string { func newErrorWAFNonexistentItemException(v protocol.ResponseMetadata) error { return &WAFNonexistentItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFNonexistentItemException) Code() string { +func (s *WAFNonexistentItemException) Code() string { return "WAFNonexistentItemException" } // Message returns the exception's message. -func (s WAFNonexistentItemException) Message() string { +func (s *WAFNonexistentItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10845,22 +11908,22 @@ func (s WAFNonexistentItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFNonexistentItemException) OrigErr() error { +func (s *WAFNonexistentItemException) OrigErr() error { return nil } -func (s WAFNonexistentItemException) Error() string { +func (s *WAFNonexistentItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFNonexistentItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFNonexistentItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFNonexistentItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFNonexistentItemException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to delete an object that is still @@ -10870,8 +11933,8 @@ func (s WAFNonexistentItemException) RequestID() string { // // * You tried to delete a Rule that is still referenced by a WebACL. type WAFReferencedItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10888,17 +11951,17 @@ func (s WAFReferencedItemException) GoString() string { func newErrorWAFReferencedItemException(v protocol.ResponseMetadata) error { return &WAFReferencedItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFReferencedItemException) Code() string { +func (s *WAFReferencedItemException) Code() string { return "WAFReferencedItemException" } // Message returns the exception's message. -func (s WAFReferencedItemException) Message() string { +func (s *WAFReferencedItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10906,22 +11969,22 @@ func (s WAFReferencedItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFReferencedItemException) OrigErr() error { +func (s *WAFReferencedItemException) OrigErr() error { return nil } -func (s WAFReferencedItemException) Error() string { +func (s *WAFReferencedItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFReferencedItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFReferencedItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFReferencedItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFReferencedItemException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF is not able to access the service linked role. This can be caused @@ -10933,8 +11996,8 @@ func (s WAFReferencedItemException) RequestID() string { // exception again, you will have to wait additional time until the role is // unlocked. type WAFServiceLinkedRoleErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -10951,17 +12014,17 @@ func (s WAFServiceLinkedRoleErrorException) GoString() string { func newErrorWAFServiceLinkedRoleErrorException(v protocol.ResponseMetadata) error { return &WAFServiceLinkedRoleErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFServiceLinkedRoleErrorException) Code() string { +func (s *WAFServiceLinkedRoleErrorException) Code() string { return "WAFServiceLinkedRoleErrorException" } // Message returns the exception's message. -func (s WAFServiceLinkedRoleErrorException) Message() string { +func (s *WAFServiceLinkedRoleErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -10969,29 +12032,29 @@ func (s WAFServiceLinkedRoleErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFServiceLinkedRoleErrorException) OrigErr() error { +func (s *WAFServiceLinkedRoleErrorException) OrigErr() error { return nil } -func (s WAFServiceLinkedRoleErrorException) Error() string { +func (s *WAFServiceLinkedRoleErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFServiceLinkedRoleErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFServiceLinkedRoleErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFServiceLinkedRoleErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFServiceLinkedRoleErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because you tried to create, update, or delete an object // by using a change token that has already been used. type WAFStaleDataException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11008,17 +12071,17 @@ func (s WAFStaleDataException) GoString() string { func newErrorWAFStaleDataException(v protocol.ResponseMetadata) error { return &WAFStaleDataException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFStaleDataException) Code() string { +func (s *WAFStaleDataException) Code() string { return "WAFStaleDataException" } // Message returns the exception's message. -func (s WAFStaleDataException) Message() string { +func (s *WAFStaleDataException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11026,28 +12089,28 @@ func (s WAFStaleDataException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFStaleDataException) OrigErr() error { +func (s *WAFStaleDataException) OrigErr() error { return nil } -func (s WAFStaleDataException) Error() string { +func (s *WAFStaleDataException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFStaleDataException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFStaleDataException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFStaleDataException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFStaleDataException) RequestID() string { + return s.RespMetadata.RequestID } // The specified subscription does not exist. type WAFSubscriptionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11064,17 +12127,17 @@ func (s WAFSubscriptionNotFoundException) GoString() string { func newErrorWAFSubscriptionNotFoundException(v protocol.ResponseMetadata) error { return &WAFSubscriptionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFSubscriptionNotFoundException) Code() string { +func (s *WAFSubscriptionNotFoundException) Code() string { return "WAFSubscriptionNotFoundException" } // Message returns the exception's message. -func (s WAFSubscriptionNotFoundException) Message() string { +func (s *WAFSubscriptionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11082,27 +12145,27 @@ func (s WAFSubscriptionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFSubscriptionNotFoundException) OrigErr() error { +func (s *WAFSubscriptionNotFoundException) OrigErr() error { return nil } -func (s WAFSubscriptionNotFoundException) Error() string { +func (s *WAFSubscriptionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFSubscriptionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFSubscriptionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFSubscriptionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFSubscriptionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type WAFTagOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11119,17 +12182,17 @@ func (s WAFTagOperationException) GoString() string { func newErrorWAFTagOperationException(v protocol.ResponseMetadata) error { return &WAFTagOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFTagOperationException) Code() string { +func (s *WAFTagOperationException) Code() string { return "WAFTagOperationException" } // Message returns the exception's message. -func (s WAFTagOperationException) Message() string { +func (s *WAFTagOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11137,27 +12200,27 @@ func (s WAFTagOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFTagOperationException) OrigErr() error { +func (s *WAFTagOperationException) OrigErr() error { return nil } -func (s WAFTagOperationException) Error() string { +func (s *WAFTagOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFTagOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFTagOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFTagOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFTagOperationException) RequestID() string { + return s.RespMetadata.RequestID } type WAFTagOperationInternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11174,17 +12237,17 @@ func (s WAFTagOperationInternalErrorException) GoString() string { func newErrorWAFTagOperationInternalErrorException(v protocol.ResponseMetadata) error { return &WAFTagOperationInternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFTagOperationInternalErrorException) Code() string { +func (s *WAFTagOperationInternalErrorException) Code() string { return "WAFTagOperationInternalErrorException" } // Message returns the exception's message. -func (s WAFTagOperationInternalErrorException) Message() string { +func (s *WAFTagOperationInternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11192,29 +12255,29 @@ func (s WAFTagOperationInternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFTagOperationInternalErrorException) OrigErr() error { +func (s *WAFTagOperationInternalErrorException) OrigErr() error { return nil } -func (s WAFTagOperationInternalErrorException) Error() string { +func (s *WAFTagOperationInternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFTagOperationInternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFTagOperationInternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFTagOperationInternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFTagOperationInternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because the entity referenced is temporarily unavailable. // Retry your request. type WAFUnavailableEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -11231,17 +12294,17 @@ func (s WAFUnavailableEntityException) GoString() string { func newErrorWAFUnavailableEntityException(v protocol.ResponseMetadata) error { return &WAFUnavailableEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFUnavailableEntityException) Code() string { +func (s *WAFUnavailableEntityException) Code() string { return "WAFUnavailableEntityException" } // Message returns the exception's message. -func (s WAFUnavailableEntityException) Message() string { +func (s *WAFUnavailableEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -11249,22 +12312,22 @@ func (s WAFUnavailableEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFUnavailableEntityException) OrigErr() error { +func (s *WAFUnavailableEntityException) OrigErr() error { return nil } -func (s WAFUnavailableEntityException) Error() string { +func (s *WAFUnavailableEntityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFUnavailableEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFUnavailableEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFUnavailableEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFUnavailableEntityException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -11275,6 +12338,14 @@ const ( ChangeActionDelete = "DELETE" ) +// ChangeAction_Values returns all elements of the ChangeAction enum +func ChangeAction_Values() []string { + return []string{ + ChangeActionInsert, + ChangeActionDelete, + } +} + const ( // ChangeTokenStatusProvisioned is a ChangeTokenStatus enum value ChangeTokenStatusProvisioned = "PROVISIONED" @@ -11286,6 +12357,15 @@ const ( ChangeTokenStatusInsync = "INSYNC" ) +// ChangeTokenStatus_Values returns all elements of the ChangeTokenStatus enum +func ChangeTokenStatus_Values() []string { + return []string{ + ChangeTokenStatusProvisioned, + ChangeTokenStatusPending, + ChangeTokenStatusInsync, + } +} + const ( // ComparisonOperatorEq is a ComparisonOperator enum value ComparisonOperatorEq = "EQ" @@ -11306,11 +12386,30 @@ const ( ComparisonOperatorGt = "GT" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + } +} + const ( // GeoMatchConstraintTypeCountry is a GeoMatchConstraintType enum value GeoMatchConstraintTypeCountry = "Country" ) +// GeoMatchConstraintType_Values returns all elements of the GeoMatchConstraintType enum +func GeoMatchConstraintType_Values() []string { + return []string{ + GeoMatchConstraintTypeCountry, + } +} + const ( // GeoMatchConstraintValueAf is a GeoMatchConstraintValue enum value GeoMatchConstraintValueAf = "AF" @@ -12060,6 +13159,261 @@ const ( GeoMatchConstraintValueZw = "ZW" ) +// GeoMatchConstraintValue_Values returns all elements of the GeoMatchConstraintValue enum +func GeoMatchConstraintValue_Values() []string { + return []string{ + GeoMatchConstraintValueAf, + GeoMatchConstraintValueAx, + GeoMatchConstraintValueAl, + GeoMatchConstraintValueDz, + GeoMatchConstraintValueAs, + GeoMatchConstraintValueAd, + GeoMatchConstraintValueAo, + GeoMatchConstraintValueAi, + GeoMatchConstraintValueAq, + GeoMatchConstraintValueAg, + GeoMatchConstraintValueAr, + GeoMatchConstraintValueAm, + GeoMatchConstraintValueAw, + GeoMatchConstraintValueAu, + GeoMatchConstraintValueAt, + GeoMatchConstraintValueAz, + GeoMatchConstraintValueBs, + GeoMatchConstraintValueBh, + GeoMatchConstraintValueBd, + GeoMatchConstraintValueBb, + GeoMatchConstraintValueBy, + GeoMatchConstraintValueBe, + GeoMatchConstraintValueBz, + GeoMatchConstraintValueBj, + GeoMatchConstraintValueBm, + GeoMatchConstraintValueBt, + GeoMatchConstraintValueBo, + GeoMatchConstraintValueBq, + GeoMatchConstraintValueBa, + GeoMatchConstraintValueBw, + GeoMatchConstraintValueBv, + GeoMatchConstraintValueBr, + GeoMatchConstraintValueIo, + GeoMatchConstraintValueBn, + GeoMatchConstraintValueBg, + GeoMatchConstraintValueBf, + GeoMatchConstraintValueBi, + GeoMatchConstraintValueKh, + GeoMatchConstraintValueCm, + GeoMatchConstraintValueCa, + GeoMatchConstraintValueCv, + GeoMatchConstraintValueKy, + GeoMatchConstraintValueCf, + GeoMatchConstraintValueTd, + GeoMatchConstraintValueCl, + GeoMatchConstraintValueCn, + GeoMatchConstraintValueCx, + GeoMatchConstraintValueCc, + GeoMatchConstraintValueCo, + GeoMatchConstraintValueKm, + GeoMatchConstraintValueCg, + GeoMatchConstraintValueCd, + GeoMatchConstraintValueCk, + GeoMatchConstraintValueCr, + GeoMatchConstraintValueCi, + GeoMatchConstraintValueHr, + GeoMatchConstraintValueCu, + GeoMatchConstraintValueCw, + GeoMatchConstraintValueCy, + GeoMatchConstraintValueCz, + GeoMatchConstraintValueDk, + GeoMatchConstraintValueDj, + GeoMatchConstraintValueDm, + GeoMatchConstraintValueDo, + GeoMatchConstraintValueEc, + GeoMatchConstraintValueEg, + GeoMatchConstraintValueSv, + GeoMatchConstraintValueGq, + GeoMatchConstraintValueEr, + GeoMatchConstraintValueEe, + GeoMatchConstraintValueEt, + GeoMatchConstraintValueFk, + GeoMatchConstraintValueFo, + GeoMatchConstraintValueFj, + GeoMatchConstraintValueFi, + GeoMatchConstraintValueFr, + GeoMatchConstraintValueGf, + GeoMatchConstraintValuePf, + GeoMatchConstraintValueTf, + GeoMatchConstraintValueGa, + GeoMatchConstraintValueGm, + GeoMatchConstraintValueGe, + GeoMatchConstraintValueDe, + GeoMatchConstraintValueGh, + GeoMatchConstraintValueGi, + GeoMatchConstraintValueGr, + GeoMatchConstraintValueGl, + GeoMatchConstraintValueGd, + GeoMatchConstraintValueGp, + GeoMatchConstraintValueGu, + GeoMatchConstraintValueGt, + GeoMatchConstraintValueGg, + GeoMatchConstraintValueGn, + GeoMatchConstraintValueGw, + GeoMatchConstraintValueGy, + GeoMatchConstraintValueHt, + GeoMatchConstraintValueHm, + GeoMatchConstraintValueVa, + GeoMatchConstraintValueHn, + GeoMatchConstraintValueHk, + GeoMatchConstraintValueHu, + GeoMatchConstraintValueIs, + GeoMatchConstraintValueIn, + GeoMatchConstraintValueId, + GeoMatchConstraintValueIr, + GeoMatchConstraintValueIq, + GeoMatchConstraintValueIe, + GeoMatchConstraintValueIm, + GeoMatchConstraintValueIl, + GeoMatchConstraintValueIt, + GeoMatchConstraintValueJm, + GeoMatchConstraintValueJp, + GeoMatchConstraintValueJe, + GeoMatchConstraintValueJo, + GeoMatchConstraintValueKz, + GeoMatchConstraintValueKe, + GeoMatchConstraintValueKi, + GeoMatchConstraintValueKp, + GeoMatchConstraintValueKr, + GeoMatchConstraintValueKw, + GeoMatchConstraintValueKg, + GeoMatchConstraintValueLa, + GeoMatchConstraintValueLv, + GeoMatchConstraintValueLb, + GeoMatchConstraintValueLs, + GeoMatchConstraintValueLr, + GeoMatchConstraintValueLy, + GeoMatchConstraintValueLi, + GeoMatchConstraintValueLt, + GeoMatchConstraintValueLu, + GeoMatchConstraintValueMo, + GeoMatchConstraintValueMk, + GeoMatchConstraintValueMg, + GeoMatchConstraintValueMw, + GeoMatchConstraintValueMy, + GeoMatchConstraintValueMv, + GeoMatchConstraintValueMl, + GeoMatchConstraintValueMt, + GeoMatchConstraintValueMh, + GeoMatchConstraintValueMq, + GeoMatchConstraintValueMr, + GeoMatchConstraintValueMu, + GeoMatchConstraintValueYt, + GeoMatchConstraintValueMx, + GeoMatchConstraintValueFm, + GeoMatchConstraintValueMd, + GeoMatchConstraintValueMc, + GeoMatchConstraintValueMn, + GeoMatchConstraintValueMe, + GeoMatchConstraintValueMs, + GeoMatchConstraintValueMa, + GeoMatchConstraintValueMz, + GeoMatchConstraintValueMm, + GeoMatchConstraintValueNa, + GeoMatchConstraintValueNr, + GeoMatchConstraintValueNp, + GeoMatchConstraintValueNl, + GeoMatchConstraintValueNc, + GeoMatchConstraintValueNz, + GeoMatchConstraintValueNi, + GeoMatchConstraintValueNe, + GeoMatchConstraintValueNg, + GeoMatchConstraintValueNu, + GeoMatchConstraintValueNf, + GeoMatchConstraintValueMp, + GeoMatchConstraintValueNo, + GeoMatchConstraintValueOm, + GeoMatchConstraintValuePk, + GeoMatchConstraintValuePw, + GeoMatchConstraintValuePs, + GeoMatchConstraintValuePa, + GeoMatchConstraintValuePg, + GeoMatchConstraintValuePy, + GeoMatchConstraintValuePe, + GeoMatchConstraintValuePh, + GeoMatchConstraintValuePn, + GeoMatchConstraintValuePl, + GeoMatchConstraintValuePt, + GeoMatchConstraintValuePr, + GeoMatchConstraintValueQa, + GeoMatchConstraintValueRe, + GeoMatchConstraintValueRo, + GeoMatchConstraintValueRu, + GeoMatchConstraintValueRw, + GeoMatchConstraintValueBl, + GeoMatchConstraintValueSh, + GeoMatchConstraintValueKn, + GeoMatchConstraintValueLc, + GeoMatchConstraintValueMf, + GeoMatchConstraintValuePm, + GeoMatchConstraintValueVc, + GeoMatchConstraintValueWs, + GeoMatchConstraintValueSm, + GeoMatchConstraintValueSt, + GeoMatchConstraintValueSa, + GeoMatchConstraintValueSn, + GeoMatchConstraintValueRs, + GeoMatchConstraintValueSc, + GeoMatchConstraintValueSl, + GeoMatchConstraintValueSg, + GeoMatchConstraintValueSx, + GeoMatchConstraintValueSk, + GeoMatchConstraintValueSi, + GeoMatchConstraintValueSb, + GeoMatchConstraintValueSo, + GeoMatchConstraintValueZa, + GeoMatchConstraintValueGs, + GeoMatchConstraintValueSs, + GeoMatchConstraintValueEs, + GeoMatchConstraintValueLk, + GeoMatchConstraintValueSd, + GeoMatchConstraintValueSr, + GeoMatchConstraintValueSj, + GeoMatchConstraintValueSz, + GeoMatchConstraintValueSe, + GeoMatchConstraintValueCh, + GeoMatchConstraintValueSy, + GeoMatchConstraintValueTw, + GeoMatchConstraintValueTj, + GeoMatchConstraintValueTz, + GeoMatchConstraintValueTh, + GeoMatchConstraintValueTl, + GeoMatchConstraintValueTg, + GeoMatchConstraintValueTk, + GeoMatchConstraintValueTo, + GeoMatchConstraintValueTt, + GeoMatchConstraintValueTn, + GeoMatchConstraintValueTr, + GeoMatchConstraintValueTm, + GeoMatchConstraintValueTc, + GeoMatchConstraintValueTv, + GeoMatchConstraintValueUg, + GeoMatchConstraintValueUa, + GeoMatchConstraintValueAe, + GeoMatchConstraintValueGb, + GeoMatchConstraintValueUs, + GeoMatchConstraintValueUm, + GeoMatchConstraintValueUy, + GeoMatchConstraintValueUz, + GeoMatchConstraintValueVu, + GeoMatchConstraintValueVe, + GeoMatchConstraintValueVn, + GeoMatchConstraintValueVg, + GeoMatchConstraintValueVi, + GeoMatchConstraintValueWf, + GeoMatchConstraintValueEh, + GeoMatchConstraintValueYe, + GeoMatchConstraintValueZm, + GeoMatchConstraintValueZw, + } +} + const ( // IPSetDescriptorTypeIpv4 is a IPSetDescriptorType enum value IPSetDescriptorTypeIpv4 = "IPV4" @@ -12068,6 +13422,14 @@ const ( IPSetDescriptorTypeIpv6 = "IPV6" ) +// IPSetDescriptorType_Values returns all elements of the IPSetDescriptorType enum +func IPSetDescriptorType_Values() []string { + return []string{ + IPSetDescriptorTypeIpv4, + IPSetDescriptorTypeIpv6, + } +} + const ( // MatchFieldTypeUri is a MatchFieldType enum value MatchFieldTypeUri = "URI" @@ -12091,6 +13453,55 @@ const ( MatchFieldTypeAllQueryArgs = "ALL_QUERY_ARGS" ) +// MatchFieldType_Values returns all elements of the MatchFieldType enum +func MatchFieldType_Values() []string { + return []string{ + MatchFieldTypeUri, + MatchFieldTypeQueryString, + MatchFieldTypeHeader, + MatchFieldTypeMethod, + MatchFieldTypeBody, + MatchFieldTypeSingleQueryArg, + MatchFieldTypeAllQueryArgs, + } +} + +const ( + // MigrationErrorTypeEntityNotSupported is a MigrationErrorType enum value + MigrationErrorTypeEntityNotSupported = "ENTITY_NOT_SUPPORTED" + + // MigrationErrorTypeEntityNotFound is a MigrationErrorType enum value + MigrationErrorTypeEntityNotFound = "ENTITY_NOT_FOUND" + + // MigrationErrorTypeS3BucketNoPermission is a MigrationErrorType enum value + MigrationErrorTypeS3BucketNoPermission = "S3_BUCKET_NO_PERMISSION" + + // MigrationErrorTypeS3BucketNotAccessible is a MigrationErrorType enum value + MigrationErrorTypeS3BucketNotAccessible = "S3_BUCKET_NOT_ACCESSIBLE" + + // MigrationErrorTypeS3BucketNotFound is a MigrationErrorType enum value + MigrationErrorTypeS3BucketNotFound = "S3_BUCKET_NOT_FOUND" + + // MigrationErrorTypeS3BucketInvalidRegion is a MigrationErrorType enum value + MigrationErrorTypeS3BucketInvalidRegion = "S3_BUCKET_INVALID_REGION" + + // MigrationErrorTypeS3InternalError is a MigrationErrorType enum value + MigrationErrorTypeS3InternalError = "S3_INTERNAL_ERROR" +) + +// MigrationErrorType_Values returns all elements of the MigrationErrorType enum +func MigrationErrorType_Values() []string { + return []string{ + MigrationErrorTypeEntityNotSupported, + MigrationErrorTypeEntityNotFound, + MigrationErrorTypeS3BucketNoPermission, + MigrationErrorTypeS3BucketNotAccessible, + MigrationErrorTypeS3BucketNotFound, + MigrationErrorTypeS3BucketInvalidRegion, + MigrationErrorTypeS3InternalError, + } +} + const ( // ParameterExceptionFieldChangeAction is a ParameterExceptionField enum value ParameterExceptionFieldChangeAction = "CHANGE_ACTION" @@ -12147,6 +13558,30 @@ const ( ParameterExceptionFieldTagKeys = "TAG_KEYS" ) +// ParameterExceptionField_Values returns all elements of the ParameterExceptionField enum +func ParameterExceptionField_Values() []string { + return []string{ + ParameterExceptionFieldChangeAction, + ParameterExceptionFieldWafAction, + ParameterExceptionFieldWafOverrideAction, + ParameterExceptionFieldPredicateType, + ParameterExceptionFieldIpsetType, + ParameterExceptionFieldByteMatchFieldType, + ParameterExceptionFieldSqlInjectionMatchFieldType, + ParameterExceptionFieldByteMatchTextTransformation, + ParameterExceptionFieldByteMatchPositionalConstraint, + ParameterExceptionFieldSizeConstraintComparisonOperator, + ParameterExceptionFieldGeoMatchLocationType, + ParameterExceptionFieldGeoMatchLocationValue, + ParameterExceptionFieldRateKey, + ParameterExceptionFieldRuleType, + ParameterExceptionFieldNextMarker, + ParameterExceptionFieldResourceArn, + ParameterExceptionFieldTags, + ParameterExceptionFieldTagKeys, + } +} + const ( // ParameterExceptionReasonInvalidOption is a ParameterExceptionReason enum value ParameterExceptionReasonInvalidOption = "INVALID_OPTION" @@ -12161,6 +13596,16 @@ const ( ParameterExceptionReasonInvalidTagKey = "INVALID_TAG_KEY" ) +// ParameterExceptionReason_Values returns all elements of the ParameterExceptionReason enum +func ParameterExceptionReason_Values() []string { + return []string{ + ParameterExceptionReasonInvalidOption, + ParameterExceptionReasonIllegalCombination, + ParameterExceptionReasonIllegalArgument, + ParameterExceptionReasonInvalidTagKey, + } +} + const ( // PositionalConstraintExactly is a PositionalConstraint enum value PositionalConstraintExactly = "EXACTLY" @@ -12178,6 +13623,17 @@ const ( PositionalConstraintContainsWord = "CONTAINS_WORD" ) +// PositionalConstraint_Values returns all elements of the PositionalConstraint enum +func PositionalConstraint_Values() []string { + return []string{ + PositionalConstraintExactly, + PositionalConstraintStartsWith, + PositionalConstraintEndsWith, + PositionalConstraintContains, + PositionalConstraintContainsWord, + } +} + const ( // PredicateTypeIpmatch is a PredicateType enum value PredicateTypeIpmatch = "IPMatch" @@ -12201,11 +13657,31 @@ const ( PredicateTypeRegexMatch = "RegexMatch" ) +// PredicateType_Values returns all elements of the PredicateType enum +func PredicateType_Values() []string { + return []string{ + PredicateTypeIpmatch, + PredicateTypeByteMatch, + PredicateTypeSqlInjectionMatch, + PredicateTypeGeoMatch, + PredicateTypeSizeConstraint, + PredicateTypeXssMatch, + PredicateTypeRegexMatch, + } +} + const ( // RateKeyIp is a RateKey enum value RateKeyIp = "IP" ) +// RateKey_Values returns all elements of the RateKey enum +func RateKey_Values() []string { + return []string{ + RateKeyIp, + } +} + const ( // ResourceTypeApplicationLoadBalancer is a ResourceType enum value ResourceTypeApplicationLoadBalancer = "APPLICATION_LOAD_BALANCER" @@ -12214,6 +13690,14 @@ const ( ResourceTypeApiGateway = "API_GATEWAY" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeApplicationLoadBalancer, + ResourceTypeApiGateway, + } +} + const ( // TextTransformationNone is a TextTransformation enum value TextTransformationNone = "NONE" @@ -12234,6 +13718,18 @@ const ( TextTransformationUrlDecode = "URL_DECODE" ) +// TextTransformation_Values returns all elements of the TextTransformation enum +func TextTransformation_Values() []string { + return []string{ + TextTransformationNone, + TextTransformationCompressWhiteSpace, + TextTransformationHtmlEntityDecode, + TextTransformationLowercase, + TextTransformationCmdLine, + TextTransformationUrlDecode, + } +} + const ( // WafActionTypeBlock is a WafActionType enum value WafActionTypeBlock = "BLOCK" @@ -12245,6 +13741,15 @@ const ( WafActionTypeCount = "COUNT" ) +// WafActionType_Values returns all elements of the WafActionType enum +func WafActionType_Values() []string { + return []string{ + WafActionTypeBlock, + WafActionTypeAllow, + WafActionTypeCount, + } +} + const ( // WafOverrideActionTypeNone is a WafOverrideActionType enum value WafOverrideActionTypeNone = "NONE" @@ -12253,6 +13758,14 @@ const ( WafOverrideActionTypeCount = "COUNT" ) +// WafOverrideActionType_Values returns all elements of the WafOverrideActionType enum +func WafOverrideActionType_Values() []string { + return []string{ + WafOverrideActionTypeNone, + WafOverrideActionTypeCount, + } +} + const ( // WafRuleTypeRegular is a WafRuleType enum value WafRuleTypeRegular = "REGULAR" @@ -12263,3 +13776,12 @@ const ( // WafRuleTypeGroup is a WafRuleType enum value WafRuleTypeGroup = "GROUP" ) + +// WafRuleType_Values returns all elements of the WafRuleType enum +func WafRuleType_Values() []string { + return []string{ + WafRuleTypeRegular, + WafRuleTypeRateBased, + WafRuleTypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/doc.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/doc.go index 861b0ae88..943ae86e1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/doc.go @@ -3,15 +3,27 @@ // Package wafregional provides the client and types for making API // requests to AWS WAF Regional. // -// This is the AWS WAF Regional API Reference for using AWS WAF with Elastic -// Load Balancing (ELB) Application Load Balancers. The AWS WAF actions and -// data types listed in the reference are available for protecting Application -// Load Balancers. You can use these actions and data types by means of the -// endpoints listed in AWS Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region). +// +// This is AWS WAF Classic Regional documentation. For more information, see +// AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. +// +// For the latest version of AWS WAF, use the AWS WAFV2 API and see the AWS +// WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// With the latest version, AWS WAF has a single set of endpoints for regional +// and global use. +// +// This is the AWS WAF Regional Classic API Reference for using AWS WAF Classic +// with the AWS resources, Elastic Load Balancing (ELB) Application Load Balancers +// and API Gateway APIs. The AWS WAF Classic actions and data types listed in +// the reference are available for protecting Elastic Load Balancing (ELB) Application +// Load Balancers and API Gateway APIs. You can use these actions and data types +// by means of the endpoints listed in AWS Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region). // This guide is for developers who need detailed information about the AWS -// WAF API actions, data types, and errors. For detailed information about AWS -// WAF features and an overview of how to use the AWS WAF API, see the AWS WAF -// Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). +// WAF Classic API actions, data types, and errors. For detailed information +// about AWS WAF Classic features and an overview of how to use the AWS WAF +// Classic API, see the AWS WAF Classic (https://docs.aws.amazon.com/waf/latest/developerguide/classic-waf-chapter.html) +// in the developer guide. // // See https://docs.aws.amazon.com/goto/WebAPI/waf-regional-2016-11-28 for more information on this service. // diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go index e02205357..32d651ca7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/errors.go @@ -18,6 +18,32 @@ const ( // The name specified is invalid. ErrCodeWAFDisallowedNameException = "WAFDisallowedNameException" + // ErrCodeWAFEntityMigrationException for service response error code + // "WAFEntityMigrationException". + // + // The operation failed due to a problem with the migration. The failure cause + // is provided in the exception, in the MigrationErrorType: + // + // * ENTITY_NOT_SUPPORTED - The web ACL has an unsupported entity but the + // IgnoreUnsupportedType is not set to true. + // + // * ENTITY_NOT_FOUND - The web ACL doesn't exist. + // + // * S3_BUCKET_NO_PERMISSION - You don't have permission to perform the PutObject + // action to the specified Amazon S3 bucket. + // + // * S3_BUCKET_NOT_ACCESSIBLE - The bucket policy doesn't allow AWS WAF to + // perform the PutObject action in the bucket. + // + // * S3_BUCKET_NOT_FOUND - The S3 bucket doesn't exist. + // + // * S3_BUCKET_INVALID_REGION - The S3 bucket is not in the same Region as + // the web ACL. + // + // * S3_INTERNAL_ERROR - AWS WAF failed to create the template in the S3 + // bucket for another reason. + ErrCodeWAFEntityMigrationException = "WAFEntityMigrationException" + // ErrCodeWAFInternalErrorException for service response error code // "WAFInternalErrorException". // @@ -225,6 +251,7 @@ const ( var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "WAFBadRequestException": newErrorWAFBadRequestException, "WAFDisallowedNameException": newErrorWAFDisallowedNameException, + "WAFEntityMigrationException": newErrorWAFEntityMigrationException, "WAFInternalErrorException": newErrorWAFInternalErrorException, "WAFInvalidAccountException": newErrorWAFInvalidAccountException, "WAFInvalidOperationException": newErrorWAFInvalidOperationException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go index 814e91823..769e41767 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafv2/api.go b/vendor/github.com/aws/aws-sdk-go/service/wafv2/api.go index 3d2b56c25..a7112a251 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafv2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafv2/api.go @@ -64,12 +64,13 @@ func (c *WAFV2) AssociateWebACLRequest(input *AssociateWebACLInput) (req *reques // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // // Associates a Web ACL with a regional application resource, to protect the -// resource. A regional application can be an Application Load Balancer (ALB) -// or an API Gateway stage. +// resource. A regional application can be an Application Load Balancer (ALB), +// an API Gateway REST API, or an AppSync GraphQL API. // -// For AWS CloudFront, you can associate the Web ACL by providing the ARN of -// the WebACL to the CloudFront API call UpdateDistribution. For information, -// see UpdateDistribution (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html). +// For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution +// configuration. To associate a Web ACL, in the CloudFront call UpdateDistribution, +// set the web ACL ID to the Amazon Resource Name (ARN) of the Web ACL. For +// information, see UpdateDistribution (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -105,6 +106,9 @@ func (c *WAFV2) AssociateWebACLRequest(input *AssociateWebACLInput) (req *reques // * WAFUnavailableEntityException // AWS WAF couldn’t retrieve the resource that you requested. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/AssociateWebACL func (c *WAFV2) AssociateWebACL(input *AssociateWebACLInput) (*AssociateWebACLOutput, error) { req, out := c.AssociateWebACLRequest(input) @@ -360,6 +364,9 @@ func (c *WAFV2) CreateIPSetRequest(input *CreateIPSetInput) (req *request.Reques // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/CreateIPSet func (c *WAFV2) CreateIPSet(input *CreateIPSetInput) (*CreateIPSetOutput, error) { req, out := c.CreateIPSetRequest(input) @@ -484,6 +491,9 @@ func (c *WAFV2) CreateRegexPatternSetRequest(input *CreateRegexPatternSetInput) // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/CreateRegexPatternSet func (c *WAFV2) CreateRegexPatternSet(input *CreateRegexPatternSetInput) (*CreateRegexPatternSetOutput, error) { req, out := c.CreateRegexPatternSetRequest(input) @@ -618,6 +628,13 @@ func (c *WAFV2) CreateRuleGroupRequest(input *CreateRuleGroupInput) (req *reques // // * WAFSubscriptionNotFoundException // +// * WAFNonexistentItemException +// AWS WAF couldn’t perform the operation because your resource doesn’t +// exist. +// +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/CreateRuleGroup func (c *WAFV2) CreateRuleGroup(input *CreateRuleGroupInput) (*CreateRuleGroupOutput, error) { req, out := c.CreateRuleGroupRequest(input) @@ -698,7 +715,8 @@ func (c *WAFV2) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Requ // the rules. The rules in a Web ACL can be a combination of the types Rule, // RuleGroup, and managed rule group. You can associate a Web ACL with one or // more AWS resources to protect. The resources can be Amazon CloudFront, an -// Amazon API Gateway API, or an Application Load Balancer. +// Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync +// GraphQL API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -763,6 +781,9 @@ func (c *WAFV2) CreateWebACLRequest(input *CreateWebACLInput) (req *request.Requ // // * WAFSubscriptionNotFoundException // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/CreateWebACL func (c *WAFV2) CreateWebACL(input *CreateWebACLInput) (*CreateWebACLOutput, error) { req, out := c.CreateWebACLRequest(input) @@ -785,6 +806,118 @@ func (c *WAFV2) CreateWebACLWithContext(ctx aws.Context, input *CreateWebACLInpu return out, req.Send() } +const opDeleteFirewallManagerRuleGroups = "DeleteFirewallManagerRuleGroups" + +// DeleteFirewallManagerRuleGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFirewallManagerRuleGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteFirewallManagerRuleGroups for more information on using the DeleteFirewallManagerRuleGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteFirewallManagerRuleGroupsRequest method. +// req, resp := client.DeleteFirewallManagerRuleGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteFirewallManagerRuleGroups +func (c *WAFV2) DeleteFirewallManagerRuleGroupsRequest(input *DeleteFirewallManagerRuleGroupsInput) (req *request.Request, output *DeleteFirewallManagerRuleGroupsOutput) { + op := &request.Operation{ + Name: opDeleteFirewallManagerRuleGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteFirewallManagerRuleGroupsInput{} + } + + output = &DeleteFirewallManagerRuleGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteFirewallManagerRuleGroups API operation for AWS WAFV2. +// +// Deletes all rule groups that are managed by AWS Firewall Manager for the +// specified web ACL. +// +// You can only use this if ManagedByFirewallManager is false in the specified +// WebACL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAFV2's +// API operation DeleteFirewallManagerRuleGroups for usage and error information. +// +// Returned Error Types: +// * WAFInternalErrorException +// Your request is valid, but AWS WAF couldn’t perform the operation because +// of a system problem. Retry your request. +// +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name or value. +// +// * Your nested statement isn't valid. You might have tried to nest a statement +// that can’t be nested. +// +// * You tried to update a WebACL with a DefaultAction that isn't among the +// types available at DefaultAction. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a Web ACL cannot be associated. +// +// * WAFNonexistentItemException +// AWS WAF couldn’t perform the operation because your resource doesn’t +// exist. +// +// * WAFOptimisticLockException +// AWS WAF couldn’t save your changes because you tried to update or delete +// a resource that has changed since you last retrieved it. Get the resource +// again, make any changes you need to make to the new copy, and retry your +// operation. +// +// * WAFInvalidOperationException +// The operation isn't valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteFirewallManagerRuleGroups +func (c *WAFV2) DeleteFirewallManagerRuleGroups(input *DeleteFirewallManagerRuleGroupsInput) (*DeleteFirewallManagerRuleGroupsOutput, error) { + req, out := c.DeleteFirewallManagerRuleGroupsRequest(input) + return out, req.Send() +} + +// DeleteFirewallManagerRuleGroupsWithContext is the same as DeleteFirewallManagerRuleGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteFirewallManagerRuleGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFV2) DeleteFirewallManagerRuleGroupsWithContext(ctx aws.Context, input *DeleteFirewallManagerRuleGroupsInput, opts ...request.Option) (*DeleteFirewallManagerRuleGroupsOutput, error) { + req, out := c.DeleteFirewallManagerRuleGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteIPSet = "DeleteIPSet" // DeleteIPSetRequest generates a "aws/request.Request" representing the @@ -885,6 +1018,9 @@ func (c *WAFV2) DeleteIPSetRequest(input *DeleteIPSetInput) (req *request.Reques // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteIPSet func (c *WAFV2) DeleteIPSet(input *DeleteIPSetInput) (*DeleteIPSetOutput, error) { req, out := c.DeleteIPSetRequest(input) @@ -981,6 +1117,24 @@ func (c *WAFV2) DeleteLoggingConfigurationRequest(input *DeleteLoggingConfigurat // again, make any changes you need to make to the new copy, and retry your // operation. // +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name or value. +// +// * Your nested statement isn't valid. You might have tried to nest a statement +// that can’t be nested. +// +// * You tried to update a WebACL with a DefaultAction that isn't among the +// types available at DefaultAction. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a Web ACL cannot be associated. +// +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteLoggingConfiguration func (c *WAFV2) DeleteLoggingConfiguration(input *DeleteLoggingConfigurationInput) (*DeleteLoggingConfigurationOutput, error) { req, out := c.DeleteLoggingConfigurationRequest(input) @@ -1003,6 +1157,108 @@ func (c *WAFV2) DeleteLoggingConfigurationWithContext(ctx aws.Context, input *De return out, req.Send() } +const opDeletePermissionPolicy = "DeletePermissionPolicy" + +// DeletePermissionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePermissionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePermissionPolicy for more information on using the DeletePermissionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePermissionPolicyRequest method. +// req, resp := client.DeletePermissionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeletePermissionPolicy +func (c *WAFV2) DeletePermissionPolicyRequest(input *DeletePermissionPolicyInput) (req *request.Request, output *DeletePermissionPolicyOutput) { + op := &request.Operation{ + Name: opDeletePermissionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePermissionPolicyInput{} + } + + output = &DeletePermissionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePermissionPolicy API operation for AWS WAFV2. +// +// Permanently deletes an IAM policy from the specified rule group. +// +// You must be the owner of the rule group to perform this operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAFV2's +// API operation DeletePermissionPolicy for usage and error information. +// +// Returned Error Types: +// * WAFNonexistentItemException +// AWS WAF couldn’t perform the operation because your resource doesn’t +// exist. +// +// * WAFInternalErrorException +// Your request is valid, but AWS WAF couldn’t perform the operation because +// of a system problem. Retry your request. +// +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name or value. +// +// * Your nested statement isn't valid. You might have tried to nest a statement +// that can’t be nested. +// +// * You tried to update a WebACL with a DefaultAction that isn't among the +// types available at DefaultAction. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a Web ACL cannot be associated. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeletePermissionPolicy +func (c *WAFV2) DeletePermissionPolicy(input *DeletePermissionPolicyInput) (*DeletePermissionPolicyOutput, error) { + req, out := c.DeletePermissionPolicyRequest(input) + return out, req.Send() +} + +// DeletePermissionPolicyWithContext is the same as DeletePermissionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePermissionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFV2) DeletePermissionPolicyWithContext(ctx aws.Context, input *DeletePermissionPolicyInput, opts ...request.Option) (*DeletePermissionPolicyOutput, error) { + req, out := c.DeletePermissionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteRegexPatternSet = "DeleteRegexPatternSet" // DeleteRegexPatternSetRequest generates a "aws/request.Request" representing the @@ -1103,6 +1359,9 @@ func (c *WAFV2) DeleteRegexPatternSetRequest(input *DeleteRegexPatternSetInput) // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteRegexPatternSet func (c *WAFV2) DeleteRegexPatternSet(input *DeleteRegexPatternSetInput) (*DeleteRegexPatternSetOutput, error) { req, out := c.DeleteRegexPatternSetRequest(input) @@ -1225,6 +1484,9 @@ func (c *WAFV2) DeleteRuleGroupRequest(input *DeleteRuleGroupInput) (req *reques // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteRuleGroup func (c *WAFV2) DeleteRuleGroup(input *DeleteRuleGroupInput) (*DeleteRuleGroupOutput, error) { req, out := c.DeleteRuleGroupRequest(input) @@ -1299,6 +1561,9 @@ func (c *WAFV2) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Requ // // Deletes the specified WebACL. // +// You can only use this if ManagedByFirewallManager is false in the specified +// WebACL. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. @@ -1347,6 +1612,9 @@ func (c *WAFV2) DeleteWebACLRequest(input *DeleteWebACLInput) (req *request.Requ // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DeleteWebACL func (c *WAFV2) DeleteWebACL(input *DeleteWebACLInput) (*DeleteWebACLOutput, error) { req, out := c.DeleteWebACLRequest(input) @@ -1456,6 +1724,9 @@ func (c *WAFV2) DescribeManagedRuleGroupRequest(input *DescribeManagedRuleGroupI // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DescribeManagedRuleGroup func (c *WAFV2) DescribeManagedRuleGroup(input *DescribeManagedRuleGroupInput) (*DescribeManagedRuleGroupOutput, error) { req, out := c.DescribeManagedRuleGroupRequest(input) @@ -1529,11 +1800,13 @@ func (c *WAFV2) DisassociateWebACLRequest(input *DisassociateWebACLInput) (req * // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // // Disassociates a Web ACL from a regional application resource. A regional -// application can be an Application Load Balancer (ALB) or an API Gateway stage. +// application can be an Application Load Balancer (ALB), an API Gateway REST +// API, or an AppSync GraphQL API. // -// For AWS CloudFront, you can disassociate the Web ACL by providing an empty -// web ACL ARN in the CloudFront API call UpdateDistribution. For information, -// see UpdateDistribution (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html). +// For AWS CloudFront, don't use this call. Instead, use your CloudFront distribution +// configuration. To disassociate a Web ACL, provide an empty web ACL ID in +// the CloudFront call UpdateDistribution. For information, see UpdateDistribution +// (https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_UpdateDistribution.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1566,6 +1839,9 @@ func (c *WAFV2) DisassociateWebACLRequest(input *DisassociateWebACLInput) (req * // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/DisassociateWebACL func (c *WAFV2) DisassociateWebACL(input *DisassociateWebACLInput) (*DisassociateWebACLOutput, error) { req, out := c.DisassociateWebACLRequest(input) @@ -1670,6 +1946,9 @@ func (c *WAFV2) GetIPSetRequest(input *GetIPSetInput) (req *request.Request, out // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetIPSet func (c *WAFV2) GetIPSet(input *GetIPSetInput) (*GetIPSetOutput, error) { req, out := c.GetIPSetRequest(input) @@ -1759,6 +2038,24 @@ func (c *WAFV2) GetLoggingConfigurationRequest(input *GetLoggingConfigurationInp // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name or value. +// +// * Your nested statement isn't valid. You might have tried to nest a statement +// that can’t be nested. +// +// * You tried to update a WebACL with a DefaultAction that isn't among the +// types available at DefaultAction. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a Web ACL cannot be associated. +// +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetLoggingConfiguration func (c *WAFV2) GetLoggingConfiguration(input *GetLoggingConfigurationInput) (*GetLoggingConfigurationOutput, error) { req, out := c.GetLoggingConfigurationRequest(input) @@ -1781,68 +2078,66 @@ func (c *WAFV2) GetLoggingConfigurationWithContext(ctx aws.Context, input *GetLo return out, req.Send() } -const opGetRateBasedStatementManagedKeys = "GetRateBasedStatementManagedKeys" +const opGetPermissionPolicy = "GetPermissionPolicy" -// GetRateBasedStatementManagedKeysRequest generates a "aws/request.Request" representing the -// client's request for the GetRateBasedStatementManagedKeys operation. The "output" return +// GetPermissionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPermissionPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetRateBasedStatementManagedKeys for more information on using the GetRateBasedStatementManagedKeys +// See GetPermissionPolicy for more information on using the GetPermissionPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetRateBasedStatementManagedKeysRequest method. -// req, resp := client.GetRateBasedStatementManagedKeysRequest(params) +// // Example sending a request using the GetPermissionPolicyRequest method. +// req, resp := client.GetPermissionPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRateBasedStatementManagedKeys -func (c *WAFV2) GetRateBasedStatementManagedKeysRequest(input *GetRateBasedStatementManagedKeysInput) (req *request.Request, output *GetRateBasedStatementManagedKeysOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetPermissionPolicy +func (c *WAFV2) GetPermissionPolicyRequest(input *GetPermissionPolicyInput) (req *request.Request, output *GetPermissionPolicyOutput) { op := &request.Operation{ - Name: opGetRateBasedStatementManagedKeys, + Name: opGetPermissionPolicy, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetRateBasedStatementManagedKeysInput{} + input = &GetPermissionPolicyInput{} } - output = &GetRateBasedStatementManagedKeysOutput{} + output = &GetPermissionPolicyOutput{} req = c.newRequest(op, input, output) return } -// GetRateBasedStatementManagedKeys API operation for AWS WAFV2. -// +// GetPermissionPolicy API operation for AWS WAFV2. // -// This is the latest version of AWS WAF, named AWS WAFV2, released in November, -// 2019. For information, including how to migrate your AWS WAF resources from -// the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// Returns the IAM policy that is attached to the specified rule group. // -// Retrieves the keys that are currently blocked by a rate-based rule. The maximum -// number of managed keys that can be blocked for a single rate-based rule is -// 10,000. If more than 10,000 addresses exceed the rate limit, those with the -// highest rates are blocked. +// You must be the owner of the rule group to perform this operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS WAFV2's -// API operation GetRateBasedStatementManagedKeys for usage and error information. +// API operation GetPermissionPolicy for usage and error information. // // Returned Error Types: +// * WAFNonexistentItemException +// AWS WAF couldn’t perform the operation because your resource doesn’t +// exist. +// // * WAFInternalErrorException // Your request is valid, but AWS WAF couldn’t perform the operation because // of a system problem. Retry your request. @@ -1862,37 +2157,143 @@ func (c *WAFV2) GetRateBasedStatementManagedKeysRequest(input *GetRateBasedState // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // -// * WAFNonexistentItemException -// AWS WAF couldn’t perform the operation because your resource doesn’t -// exist. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRateBasedStatementManagedKeys -func (c *WAFV2) GetRateBasedStatementManagedKeys(input *GetRateBasedStatementManagedKeysInput) (*GetRateBasedStatementManagedKeysOutput, error) { - req, out := c.GetRateBasedStatementManagedKeysRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetPermissionPolicy +func (c *WAFV2) GetPermissionPolicy(input *GetPermissionPolicyInput) (*GetPermissionPolicyOutput, error) { + req, out := c.GetPermissionPolicyRequest(input) return out, req.Send() } -// GetRateBasedStatementManagedKeysWithContext is the same as GetRateBasedStatementManagedKeys with the addition of +// GetPermissionPolicyWithContext is the same as GetPermissionPolicy with the addition of // the ability to pass a context and additional request options. // -// See GetRateBasedStatementManagedKeys for details on how to use this API operation. +// See GetPermissionPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WAFV2) GetRateBasedStatementManagedKeysWithContext(ctx aws.Context, input *GetRateBasedStatementManagedKeysInput, opts ...request.Option) (*GetRateBasedStatementManagedKeysOutput, error) { - req, out := c.GetRateBasedStatementManagedKeysRequest(input) +func (c *WAFV2) GetPermissionPolicyWithContext(ctx aws.Context, input *GetPermissionPolicyInput, opts ...request.Option) (*GetPermissionPolicyOutput, error) { + req, out := c.GetPermissionPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetRegexPatternSet = "GetRegexPatternSet" +const opGetRateBasedStatementManagedKeys = "GetRateBasedStatementManagedKeys" -// GetRegexPatternSetRequest generates a "aws/request.Request" representing the -// client's request for the GetRegexPatternSet operation. The "output" return -// value will be populated with the request's response once the request completes +// GetRateBasedStatementManagedKeysRequest generates a "aws/request.Request" representing the +// client's request for the GetRateBasedStatementManagedKeys operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRateBasedStatementManagedKeys for more information on using the GetRateBasedStatementManagedKeys +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRateBasedStatementManagedKeysRequest method. +// req, resp := client.GetRateBasedStatementManagedKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRateBasedStatementManagedKeys +func (c *WAFV2) GetRateBasedStatementManagedKeysRequest(input *GetRateBasedStatementManagedKeysInput) (req *request.Request, output *GetRateBasedStatementManagedKeysOutput) { + op := &request.Operation{ + Name: opGetRateBasedStatementManagedKeys, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRateBasedStatementManagedKeysInput{} + } + + output = &GetRateBasedStatementManagedKeysOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRateBasedStatementManagedKeys API operation for AWS WAFV2. +// +// +// This is the latest version of AWS WAF, named AWS WAFV2, released in November, +// 2019. For information, including how to migrate your AWS WAF resources from +// the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// +// Retrieves the keys that are currently blocked by a rate-based rule. The maximum +// number of managed keys that can be blocked for a single rate-based rule is +// 10,000. If more than 10,000 addresses exceed the rate limit, those with the +// highest rates are blocked. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAFV2's +// API operation GetRateBasedStatementManagedKeys for usage and error information. +// +// Returned Error Types: +// * WAFInternalErrorException +// Your request is valid, but AWS WAF couldn’t perform the operation because +// of a system problem. Retry your request. +// +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name or value. +// +// * Your nested statement isn't valid. You might have tried to nest a statement +// that can’t be nested. +// +// * You tried to update a WebACL with a DefaultAction that isn't among the +// types available at DefaultAction. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a Web ACL cannot be associated. +// +// * WAFNonexistentItemException +// AWS WAF couldn’t perform the operation because your resource doesn’t +// exist. +// +// * WAFInvalidOperationException +// The operation isn't valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRateBasedStatementManagedKeys +func (c *WAFV2) GetRateBasedStatementManagedKeys(input *GetRateBasedStatementManagedKeysInput) (*GetRateBasedStatementManagedKeysOutput, error) { + req, out := c.GetRateBasedStatementManagedKeysRequest(input) + return out, req.Send() +} + +// GetRateBasedStatementManagedKeysWithContext is the same as GetRateBasedStatementManagedKeys with the addition of +// the ability to pass a context and additional request options. +// +// See GetRateBasedStatementManagedKeys for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFV2) GetRateBasedStatementManagedKeysWithContext(ctx aws.Context, input *GetRateBasedStatementManagedKeysInput, opts ...request.Option) (*GetRateBasedStatementManagedKeysOutput, error) { + req, out := c.GetRateBasedStatementManagedKeysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRegexPatternSet = "GetRegexPatternSet" + +// GetRegexPatternSetRequest generates a "aws/request.Request" representing the +// client's request for the GetRegexPatternSet operation. The "output" return +// value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. @@ -1970,6 +2371,9 @@ func (c *WAFV2) GetRegexPatternSetRequest(input *GetRegexPatternSetInput) (req * // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRegexPatternSet func (c *WAFV2) GetRegexPatternSet(input *GetRegexPatternSetInput) (*GetRegexPatternSetOutput, error) { req, out := c.GetRegexPatternSetRequest(input) @@ -2074,6 +2478,9 @@ func (c *WAFV2) GetRuleGroupRequest(input *GetRuleGroupInput) (req *request.Requ // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetRuleGroup func (c *WAFV2) GetRuleGroup(input *GetRuleGroupInput) (*GetRuleGroupOutput, error) { req, out := c.GetRuleGroupRequest(input) @@ -2292,6 +2699,9 @@ func (c *WAFV2) GetWebACLRequest(input *GetWebACLInput) (req *request.Request, o // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetWebACL func (c *WAFV2) GetWebACL(input *GetWebACLInput) (*GetWebACLOutput, error) { req, out := c.GetWebACLRequest(input) @@ -2399,6 +2809,9 @@ func (c *WAFV2) GetWebACLForResourceRequest(input *GetWebACLForResourceInput) (r // * WAFUnavailableEntityException // AWS WAF couldn’t retrieve the resource that you requested. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/GetWebACLForResource func (c *WAFV2) GetWebACLForResource(input *GetWebACLForResourceInput) (*GetWebACLForResourceOutput, error) { req, out := c.GetWebACLForResourceRequest(input) @@ -2501,6 +2914,9 @@ func (c *WAFV2) ListAvailableManagedRuleGroupsRequest(input *ListAvailableManage // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListAvailableManagedRuleGroups func (c *WAFV2) ListAvailableManagedRuleGroups(input *ListAvailableManagedRuleGroupsInput) (*ListAvailableManagedRuleGroupsOutput, error) { req, out := c.ListAvailableManagedRuleGroupsRequest(input) @@ -2601,6 +3017,9 @@ func (c *WAFV2) ListIPSetsRequest(input *ListIPSetsInput) (req *request.Request, // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListIPSets func (c *WAFV2) ListIPSets(input *ListIPSetsInput) (*ListIPSetsOutput, error) { req, out := c.ListIPSetsRequest(input) @@ -2701,6 +3120,9 @@ func (c *WAFV2) ListLoggingConfigurationsRequest(input *ListLoggingConfiguration // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListLoggingConfigurations func (c *WAFV2) ListLoggingConfigurations(input *ListLoggingConfigurationsInput) (*ListLoggingConfigurationsOutput, error) { req, out := c.ListLoggingConfigurationsRequest(input) @@ -2802,6 +3224,9 @@ func (c *WAFV2) ListRegexPatternSetsRequest(input *ListRegexPatternSetsInput) (r // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListRegexPatternSets func (c *WAFV2) ListRegexPatternSets(input *ListRegexPatternSetsInput) (*ListRegexPatternSetsOutput, error) { req, out := c.ListRegexPatternSetsRequest(input) @@ -2908,6 +3333,9 @@ func (c *WAFV2) ListResourcesForWebACLRequest(input *ListResourcesForWebACLInput // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListResourcesForWebACL func (c *WAFV2) ListResourcesForWebACL(input *ListResourcesForWebACLInput) (*ListResourcesForWebACLOutput, error) { req, out := c.ListResourcesForWebACLRequest(input) @@ -3009,6 +3437,9 @@ func (c *WAFV2) ListRuleGroupsRequest(input *ListRuleGroupsInput) (req *request. // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListRuleGroups func (c *WAFV2) ListRuleGroups(input *ListRuleGroupsInput) (*ListRuleGroupsOutput, error) { req, out := c.ListRuleGroupsRequest(input) @@ -3080,7 +3511,15 @@ func (c *WAFV2) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req // 2019. For information, including how to migrate your AWS WAF resources from // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // -// Retrieves the TagInfoForResource for the specified resource. +// Retrieves the TagInfoForResource for the specified resource. Tags are key:value +// pairs that you can use to categorize and manage your resources, for purposes +// like billing. For example, you might set the tag key to "customer" and the +// value to the customer name or ID. You can specify one or more tags to add +// to each AWS resource, up to 50 tags for a resource. +// +// You can tag the AWS resources that you manage through AWS WAF: web ACLs, +// rule groups, IP sets, and regex pattern sets. You can't manage or view tags +// through the AWS WAF console. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3120,6 +3559,9 @@ func (c *WAFV2) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListTagsForResource func (c *WAFV2) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { req, out := c.ListTagsForResourceRequest(input) @@ -3220,6 +3662,9 @@ func (c *WAFV2) ListWebACLsRequest(input *ListWebACLsInput) (req *request.Reques // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/ListWebACLs func (c *WAFV2) ListWebACLs(input *ListWebACLsInput) (*ListWebACLsOutput, error) { req, out := c.ListWebACLsRequest(input) @@ -3299,10 +3744,13 @@ func (c *WAFV2) PutLoggingConfigurationRequest(input *PutLoggingConfigurationInp // // Create an Amazon Kinesis Data Firehose. // -// Create the data firehose with a PUT source and in the region that you are +// Create the data firehose with a PUT source and in the Region that you are // operating. If you are capturing logs for Amazon CloudFront, always create // the firehose in US East (N. Virginia). // +// Give the data firehose a name that starts with the prefix aws-waf-logs-. +// For example, aws-waf-logs-us-east-2-analytics. +// // Do not create the data firehose using a Kinesis stream as your source. // // Associate that firehose to your web ACL using a PutLoggingConfiguration request. @@ -3360,6 +3808,15 @@ func (c *WAFV2) PutLoggingConfigurationRequest(input *PutLoggingConfigurationInp // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. // +// * WAFInvalidOperationException +// The operation isn't valid. +// +// * WAFLimitsExceededException +// AWS WAF couldn’t perform the operation because you exceeded your resource +// limit. For example, the maximum number of WebACL objects that you can create +// for an AWS account. For more information, see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) +// in the AWS WAF Developer Guide. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/PutLoggingConfiguration func (c *WAFV2) PutLoggingConfiguration(input *PutLoggingConfigurationInput) (*PutLoggingConfigurationOutput, error) { req, out := c.PutLoggingConfigurationRequest(input) @@ -3382,6 +3839,137 @@ func (c *WAFV2) PutLoggingConfigurationWithContext(ctx aws.Context, input *PutLo return out, req.Send() } +const opPutPermissionPolicy = "PutPermissionPolicy" + +// PutPermissionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutPermissionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPermissionPolicy for more information on using the PutPermissionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPermissionPolicyRequest method. +// req, resp := client.PutPermissionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/PutPermissionPolicy +func (c *WAFV2) PutPermissionPolicyRequest(input *PutPermissionPolicyInput) (req *request.Request, output *PutPermissionPolicyOutput) { + op := &request.Operation{ + Name: opPutPermissionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutPermissionPolicyInput{} + } + + output = &PutPermissionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutPermissionPolicy API operation for AWS WAFV2. +// +// Attaches an IAM policy to the specified resource. Use this to share a rule +// group across accounts. +// +// You must be the owner of the rule group to perform this operation. +// +// This action is subject to the following restrictions: +// +// * You can attach only one policy with each PutPermissionPolicy request. +// +// * The ARN in the request must be a valid WAF RuleGroup ARN and the rule +// group must exist in the same region. +// +// * The user making the request must be the owner of the rule group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS WAFV2's +// API operation PutPermissionPolicy for usage and error information. +// +// Returned Error Types: +// * WAFNonexistentItemException +// AWS WAF couldn’t perform the operation because your resource doesn’t +// exist. +// +// * WAFInternalErrorException +// Your request is valid, but AWS WAF couldn’t perform the operation because +// of a system problem. Retry your request. +// +// * WAFInvalidParameterException +// The operation failed because AWS WAF didn't recognize a parameter in the +// request. For example: +// +// * You specified an invalid parameter name or value. +// +// * Your nested statement isn't valid. You might have tried to nest a statement +// that can’t be nested. +// +// * You tried to update a WebACL with a DefaultAction that isn't among the +// types available at DefaultAction. +// +// * Your request references an ARN that is malformed, or corresponds to +// a resource with which a Web ACL cannot be associated. +// +// * WAFInvalidPermissionPolicyException +// The operation failed because the specified policy isn't in the proper format. +// +// The policy specifications must conform to the following: +// +// * The policy must be composed using IAM Policy version 2012-10-17 or version +// 2015-01-01. +// +// * The policy must include specifications for Effect, Action, and Principal. +// +// * Effect must specify Allow. +// +// * Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. +// AWS WAF rejects any extra actions or wildcard actions in the policy. +// +// * The policy must not include a Resource parameter. +// +// For more information, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/PutPermissionPolicy +func (c *WAFV2) PutPermissionPolicy(input *PutPermissionPolicyInput) (*PutPermissionPolicyOutput, error) { + req, out := c.PutPermissionPolicyRequest(input) + return out, req.Send() +} + +// PutPermissionPolicyWithContext is the same as PutPermissionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutPermissionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WAFV2) PutPermissionPolicyWithContext(ctx aws.Context, input *PutPermissionPolicyInput, opts ...request.Option) (*PutPermissionPolicyOutput, error) { + req, out := c.PutPermissionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the @@ -3433,10 +4021,14 @@ func (c *WAFV2) TagResourceRequest(input *TagResourceInput) (req *request.Reques // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // // Associates tags with the specified AWS resource. Tags are key:value pairs -// that you can associate with AWS resources. For example, the tag key might -// be "customer" and the tag value might be "companyA." You can specify one -// or more tags to add to each container. You can add up to 50 tags to each -// AWS resource. +// that you can use to categorize and manage your resources, for purposes like +// billing. For example, you might set the tag key to "customer" and the value +// to the customer name or ID. You can specify one or more tags to add to each +// AWS resource, up to 50 tags for a resource. +// +// You can tag the AWS resources that you manage through AWS WAF: web ACLs, +// rule groups, IP sets, and regex pattern sets. You can't manage or view tags +// through the AWS WAF console. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3482,6 +4074,9 @@ func (c *WAFV2) TagResourceRequest(input *TagResourceInput) (req *request.Reques // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/TagResource func (c *WAFV2) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) @@ -3597,6 +4192,9 @@ func (c *WAFV2) UntagResourceRequest(input *UntagResourceInput) (req *request.Re // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/UntagResource func (c *WAFV2) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -3717,6 +4315,9 @@ func (c *WAFV2) UpdateIPSetRequest(input *UpdateIPSetInput) (req *request.Reques // for an AWS account. For more information, see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/UpdateIPSet func (c *WAFV2) UpdateIPSet(input *UpdateIPSetInput) (*UpdateIPSetOutput, error) { req, out := c.UpdateIPSetRequest(input) @@ -3837,6 +4438,9 @@ func (c *WAFV2) UpdateRegexPatternSetRequest(input *UpdateRegexPatternSetInput) // for an AWS account. For more information, see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/UpdateRegexPatternSet func (c *WAFV2) UpdateRegexPatternSet(input *UpdateRegexPatternSetInput) (*UpdateRegexPatternSetOutput, error) { req, out := c.UpdateRegexPatternSetRequest(input) @@ -3968,6 +4572,9 @@ func (c *WAFV2) UpdateRuleGroupRequest(input *UpdateRuleGroupInput) (req *reques // // * WAFSubscriptionNotFoundException // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/UpdateRuleGroup func (c *WAFV2) UpdateRuleGroup(input *UpdateRuleGroupInput) (*UpdateRuleGroupOutput, error) { req, out := c.UpdateRuleGroupRequest(input) @@ -4048,7 +4655,8 @@ func (c *WAFV2) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Requ // the rules. The rules in a Web ACL can be a combination of the types Rule, // RuleGroup, and managed rule group. You can associate a Web ACL with one or // more AWS resources to protect. The resources can be Amazon CloudFront, an -// Amazon API Gateway API, or an Application Load Balancer. +// Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync +// GraphQL API. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4106,6 +4714,9 @@ func (c *WAFV2) UpdateWebACLRequest(input *UpdateWebACLInput) (req *request.Requ // // * WAFSubscriptionNotFoundException // +// * WAFInvalidOperationException +// The operation isn't valid. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/UpdateWebACL func (c *WAFV2) UpdateWebACL(input *UpdateWebACLInput) (*UpdateWebACLOutput, error) { req, out := c.UpdateWebACLRequest(input) @@ -4230,7 +4841,9 @@ type AssociateWebACLInput struct { // // * For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // * For an Amazon API Gateway stage: arn:aws:apigateway:region::/restapis/api-id/stages/stage-name + // * For an API Gateway REST API: arn:aws:apigateway:region::/restapis/api-id/stages/stage-name + // + // * For an AppSync GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId // // ResourceArn is a required field ResourceArn *string `min:"20" type:"string" required:"true"` @@ -4411,7 +5024,7 @@ type ByteMatchStatement struct { // in the part of web requests that you designate for inspection in FieldToMatch. // The maximum length of the value is 50 bytes. // - // Valid values depend on the areas that you specify for inspection in FieldToMatch: + // Valid values depend on the component that you specify for inspection in FieldToMatch: // // * Method: The HTTP method that you want AWS WAF to search for. This indicates // the type of operation specified in the request. @@ -4445,8 +5058,8 @@ type ByteMatchStatement struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass detection. If you specify one // or more transformations in a rule statement, AWS WAF performs all transformations - // on the content identified by FieldToMatch, starting from the lowest priority - // setting, before inspecting the content for a match. + // on the content of the request component identified by FieldToMatch, starting + // from the lowest priority setting, before inspecting the content for a match. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -4535,13 +5148,13 @@ type CheckCapacityInput struct { Rules []*Rule `type:"list" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -4672,8 +5285,8 @@ type CreateIPSetInput struct { // Addresses is a required field Addresses []*string `type:"list" required:"true"` - // A friendly description of the IP set. You cannot change the description of - // an IP set after you create it. + // A description of the IP set that helps with identification. You cannot change + // the description of an IP set after you create it. Description *string `min:"1" type:"string"` // Specify IPV4 or IPV6. @@ -4681,20 +5294,20 @@ type CreateIPSetInput struct { // IPAddressVersion is a required field IPAddressVersion *string `type:"string" required:"true" enum:"IPAddressVersion"` - // A friendly name of the IP set. You cannot change the name of an IPSet after - // you create it. + // The name of the IP set. You cannot change the name of an IPSet after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -4822,12 +5435,11 @@ func (s *CreateIPSetOutput) SetSummary(v *IPSetSummary) *CreateIPSetOutput { type CreateRegexPatternSetInput struct { _ struct{} `type:"structure"` - // A friendly description of the set. You cannot change the description of a - // set after you create it. + // A description of the set that helps with identification. You cannot change + // the description of a set after you create it. Description *string `min:"1" type:"string"` - // A friendly name of the set. You cannot change the name after you create the - // set. + // The name of the set. You cannot change the name after you create the set. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -4835,16 +5447,16 @@ type CreateRegexPatternSetInput struct { // Array of regular expression strings. // // RegularExpressionList is a required field - RegularExpressionList []*Regex `min:"1" type:"list" required:"true"` + RegularExpressionList []*Regex `type:"list" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -4881,9 +5493,6 @@ func (s *CreateRegexPatternSetInput) Validate() error { if s.RegularExpressionList == nil { invalidParams.Add(request.NewErrParamRequired("RegularExpressionList")) } - if s.RegularExpressionList != nil && len(s.RegularExpressionList) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RegularExpressionList", 1)) - } if s.Scope == nil { invalidParams.Add(request.NewErrParamRequired("Scope")) } @@ -4994,12 +5603,12 @@ type CreateRuleGroupInput struct { // Capacity is a required field Capacity *int64 `min:"1" type:"long" required:"true"` - // A friendly description of the rule group. You cannot change the description - // of a rule group after you create it. + // A description of the rule group that helps with identification. You cannot + // change the description of a rule group after you create it. Description *string `min:"1" type:"string"` - // A friendly name of the rule group. You cannot change the name of a rule group - // after you create it. + // The name of the rule group. You cannot change the name of a rule group after + // you create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -5011,13 +5620,13 @@ type CreateRuleGroupInput struct { Rules []*Rule `type:"list"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -5179,12 +5788,12 @@ type CreateWebACLInput struct { // DefaultAction is a required field DefaultAction *DefaultAction `type:"structure" required:"true"` - // A friendly description of the Web ACL. You cannot change the description - // of a Web ACL after you create it. + // A description of the Web ACL that helps with identification. You cannot change + // the description of a Web ACL after you create it. Description *string `min:"1" type:"string"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -5196,13 +5805,13 @@ type CreateWebACLInput struct { Rules []*Rule `type:"list"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -5392,14 +6001,13 @@ func (s *DefaultAction) SetBlock(v *BlockAction) *DefaultAction { return s } -type DeleteIPSetInput struct { +type DeleteFirewallManagerRuleGroupsInput struct { _ struct{} `type:"structure"` - // A unique identifier for the set. This ID is returned in the responses to - // create and list commands. You provide it to operations like update and delete. + // The Amazon Resource Name (ARN) of the web ACL. // - // Id is a required field - Id *string `min:"1" type:"string" required:"true"` + // WebACLArn is a required field + WebACLArn *string `min:"20" type:"string" required:"true"` // A token used for optimistic locking. AWS WAF returns a token to your get // and list requests, to mark the state of the entity at the time of the request. @@ -5410,23 +6018,119 @@ type DeleteIPSetInput struct { // If this happens, perform another get, and use the new token returned by that // operation. // - // LockToken is a required field - LockToken *string `min:"1" type:"string" required:"true"` + // WebACLLockToken is a required field + WebACLLockToken *string `min:"1" type:"string" required:"true"` +} - // A friendly name of the IP set. You cannot change the name of an IPSet after - // you create it. - // - // Name is a required field - Name *string `min:"1" type:"string" required:"true"` +// String returns the string representation +func (s DeleteFirewallManagerRuleGroupsInput) String() string { + return awsutil.Prettify(s) +} - // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. +// GoString returns the string representation +func (s DeleteFirewallManagerRuleGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteFirewallManagerRuleGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteFirewallManagerRuleGroupsInput"} + if s.WebACLArn == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLArn")) + } + if s.WebACLArn != nil && len(*s.WebACLArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("WebACLArn", 20)) + } + if s.WebACLLockToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebACLLockToken")) + } + if s.WebACLLockToken != nil && len(*s.WebACLLockToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WebACLLockToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWebACLArn sets the WebACLArn field's value. +func (s *DeleteFirewallManagerRuleGroupsInput) SetWebACLArn(v string) *DeleteFirewallManagerRuleGroupsInput { + s.WebACLArn = &v + return s +} + +// SetWebACLLockToken sets the WebACLLockToken field's value. +func (s *DeleteFirewallManagerRuleGroupsInput) SetWebACLLockToken(v string) *DeleteFirewallManagerRuleGroupsInput { + s.WebACLLockToken = &v + return s +} + +type DeleteFirewallManagerRuleGroupsOutput struct { + _ struct{} `type:"structure"` + + // A token used for optimistic locking. AWS WAF returns a token to your get + // and list requests, to mark the state of the entity at the time of the request. + // To make changes to the entity associated with the token, you provide the + // token to operations like update and delete. AWS WAF uses the token to ensure + // that no changes have been made to the entity since you last retrieved it. + // If a change has been made, the update fails with a WAFOptimisticLockException. + // If this happens, perform another get, and use the new token returned by that + // operation. + NextWebACLLockToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteFirewallManagerRuleGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteFirewallManagerRuleGroupsOutput) GoString() string { + return s.String() +} + +// SetNextWebACLLockToken sets the NextWebACLLockToken field's value. +func (s *DeleteFirewallManagerRuleGroupsOutput) SetNextWebACLLockToken(v string) *DeleteFirewallManagerRuleGroupsOutput { + s.NextWebACLLockToken = &v + return s +} + +type DeleteIPSetInput struct { + _ struct{} `type:"structure"` + + // A unique identifier for the set. This ID is returned in the responses to + // create and list commands. You provide it to operations like update and delete. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` + + // A token used for optimistic locking. AWS WAF returns a token to your get + // and list requests, to mark the state of the entity at the time of the request. + // To make changes to the entity associated with the token, you provide the + // token to operations like update and delete. AWS WAF uses the token to ensure + // that no changes have been made to the entity since you last retrieved it. + // If a change has been made, the update fails with a WAFOptimisticLockException. + // If this happens, perform another get, and use the new token returned by that + // operation. + // + // LockToken is a required field + LockToken *string `min:"1" type:"string" required:"true"` + + // The name of the IP set. You cannot change the name of an IPSet after you + // create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // Specifies whether this is for an AWS CloudFront distribution or for a regional + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -5570,6 +6274,64 @@ func (s DeleteLoggingConfigurationOutput) GoString() string { return s.String() } +type DeletePermissionPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the rule group from which you want to delete + // the policy. + // + // You must be the owner of the rule group to perform this operation. + // + // ResourceArn is a required field + ResourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePermissionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePermissionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePermissionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePermissionPolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *DeletePermissionPolicyInput) SetResourceArn(v string) *DeletePermissionPolicyInput { + s.ResourceArn = &v + return s +} + +type DeletePermissionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePermissionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePermissionPolicyOutput) GoString() string { + return s.String() +} + type DeleteRegexPatternSetInput struct { _ struct{} `type:"structure"` @@ -5591,20 +6353,19 @@ type DeleteRegexPatternSetInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the set. You cannot change the name after you create the - // set. + // The name of the set. You cannot change the name after you create the set. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -5714,20 +6475,20 @@ type DeleteRuleGroupInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the rule group. You cannot change the name of a rule group - // after you create it. + // The name of the rule group. You cannot change the name of a rule group after + // you create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -5837,20 +6598,20 @@ type DeleteWebACLInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -5948,13 +6709,13 @@ type DescribeManagedRuleGroupInput struct { Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -6068,7 +6829,9 @@ type DisassociateWebACLInput struct { // // * For an Application Load Balancer: arn:aws:elasticloadbalancing:region:account-id:loadbalancer/app/load-balancer-name/load-balancer-id // - // * For an Amazon API Gateway stage: arn:aws:apigateway:region::/restapis/api-id/stages/stage-name + // * For an API Gateway REST API: arn:aws:apigateway:region::/restapis/api-id/stages/stage-name + // + // * For an AppSync GraphQL API: arn:aws:appsync:region:account-id:apis/GraphQLApiId // // ResourceArn is a required field ResourceArn *string `min:"20" type:"string" required:"true"` @@ -6174,9 +6937,12 @@ func (s *ExcludedRule) SetName(v string) *ExcludedRule { // 2019. For information, including how to migrate your AWS WAF resources from // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // -// The part of a web request that you want AWS WAF to inspect. Include the FieldToMatch -// types that you want to inspect, with additional specifications as needed, -// according to the type. +// The part of a web request that you want AWS WAF to inspect. Include the single +// FieldToMatch type that you want to inspect, with additional specifications +// as needed, according to the type. You specify a single request component +// in FieldToMatch for each rule statement that requires it. To inspect more +// than one component of a web request, create a separate rule statement for +// each component. type FieldToMatch struct { _ struct{} `type:"structure"` @@ -6295,6 +7061,238 @@ func (s *FieldToMatch) SetUriPath(v *UriPath) *FieldToMatch { return s } +// A rule group that's defined for an AWS Firewall Manager WAF policy. +type FirewallManagerRuleGroup struct { + _ struct{} `type:"structure"` + + // The processing guidance for an AWS Firewall Manager rule. This is like a + // regular rule Statement, but it can only contain a rule group reference. + // + // FirewallManagerStatement is a required field + FirewallManagerStatement *FirewallManagerStatement `type:"structure" required:"true"` + + // The name of the rule group. You cannot change the name of a rule group after + // you create it. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The override action to apply to the rules in a rule group. Used only for + // rule statements that reference a rule group, like RuleGroupReferenceStatement + // and ManagedRuleGroupStatement. + // + // Set the override action to none to leave the rule actions in effect. Set + // it to count to only count matches, regardless of the rule action settings. + // + // In a Rule, you must specify either this OverrideAction setting or the rule + // Action setting, but not both: + // + // * If the rule statement references a rule group, use this override action + // setting and not the action setting. + // + // * If the rule statement does not reference a rule group, use the rule + // action setting and not this rule override action setting. + // + // OverrideAction is a required field + OverrideAction *OverrideAction `type:"structure" required:"true"` + + // If you define more than one rule group in the first or last Firewall Manager + // rule groups, AWS WAF evaluates each request against the rule groups in order, + // starting from the lowest priority setting. The priorities don't need to be + // consecutive, but they must all be different. + // + // Priority is a required field + Priority *int64 `type:"integer" required:"true"` + + // + // This is the latest version of AWS WAF, named AWS WAFV2, released in November, + // 2019. For information, including how to migrate your AWS WAF resources from + // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). + // + // Defines and enables Amazon CloudWatch metrics and web request sample collection. + // + // VisibilityConfig is a required field + VisibilityConfig *VisibilityConfig `type:"structure" required:"true"` +} + +// String returns the string representation +func (s FirewallManagerRuleGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirewallManagerRuleGroup) GoString() string { + return s.String() +} + +// SetFirewallManagerStatement sets the FirewallManagerStatement field's value. +func (s *FirewallManagerRuleGroup) SetFirewallManagerStatement(v *FirewallManagerStatement) *FirewallManagerRuleGroup { + s.FirewallManagerStatement = v + return s +} + +// SetName sets the Name field's value. +func (s *FirewallManagerRuleGroup) SetName(v string) *FirewallManagerRuleGroup { + s.Name = &v + return s +} + +// SetOverrideAction sets the OverrideAction field's value. +func (s *FirewallManagerRuleGroup) SetOverrideAction(v *OverrideAction) *FirewallManagerRuleGroup { + s.OverrideAction = v + return s +} + +// SetPriority sets the Priority field's value. +func (s *FirewallManagerRuleGroup) SetPriority(v int64) *FirewallManagerRuleGroup { + s.Priority = &v + return s +} + +// SetVisibilityConfig sets the VisibilityConfig field's value. +func (s *FirewallManagerRuleGroup) SetVisibilityConfig(v *VisibilityConfig) *FirewallManagerRuleGroup { + s.VisibilityConfig = v + return s +} + +// The processing guidance for an AWS Firewall Manager rule. This is like a +// regular rule Statement, but it can only contain a rule group reference. +type FirewallManagerStatement struct { + _ struct{} `type:"structure"` + + // + // This is the latest version of AWS WAF, named AWS WAFV2, released in November, + // 2019. For information, including how to migrate your AWS WAF resources from + // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). + // + // A rule statement used to run the rules that are defined in a managed rule + // group. To use this, provide the vendor name and the name of the rule group + // in this statement. You can retrieve the required names by calling ListAvailableManagedRuleGroups. + // + // You can't nest a ManagedRuleGroupStatement, for example for use inside a + // NotStatement or OrStatement. It can only be referenced as a top-level statement + // within a rule. + ManagedRuleGroupStatement *ManagedRuleGroupStatement `type:"structure"` + + // + // This is the latest version of AWS WAF, named AWS WAFV2, released in November, + // 2019. For information, including how to migrate your AWS WAF resources from + // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). + // + // A rule statement used to run the rules that are defined in a RuleGroup. To + // use this, create a rule group with your rules, then provide the ARN of the + // rule group in this statement. + // + // You cannot nest a RuleGroupReferenceStatement, for example for use inside + // a NotStatement or OrStatement. It can only be referenced as a top-level statement + // within a rule. + RuleGroupReferenceStatement *RuleGroupReferenceStatement `type:"structure"` +} + +// String returns the string representation +func (s FirewallManagerStatement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FirewallManagerStatement) GoString() string { + return s.String() +} + +// SetManagedRuleGroupStatement sets the ManagedRuleGroupStatement field's value. +func (s *FirewallManagerStatement) SetManagedRuleGroupStatement(v *ManagedRuleGroupStatement) *FirewallManagerStatement { + s.ManagedRuleGroupStatement = v + return s +} + +// SetRuleGroupReferenceStatement sets the RuleGroupReferenceStatement field's value. +func (s *FirewallManagerStatement) SetRuleGroupReferenceStatement(v *RuleGroupReferenceStatement) *FirewallManagerStatement { + s.RuleGroupReferenceStatement = v + return s +} + +// The configuration for inspecting IP addresses in an HTTP header that you +// specify, instead of using the IP address that's reported by the web request +// origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify +// any header name. +// +// If the specified header isn't present in the request, AWS WAF doesn't apply +// the rule to the web request at all. +// +// This configuration is used for GeoMatchStatement and RateBasedStatement. +// For IPSetReferenceStatement, use IPSetForwardedIPConfig instead. +// +// AWS WAF only evaluates the first IP address found in the specified HTTP header. +type ForwardedIPConfig struct { + _ struct{} `type:"structure"` + + // The match status to assign to the web request if the request doesn't have + // a valid IP address in the specified position. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + // + // You can specify the following fallback behaviors: + // + // * MATCH - Treat the web request as matching the rule statement. AWS WAF + // applies the rule action to the request. + // + // * NO_MATCH - Treat the web request as not matching the rule statement. + // + // FallbackBehavior is a required field + FallbackBehavior *string `type:"string" required:"true" enum:"FallbackBehavior"` + + // The name of the HTTP header to use for the IP address. For example, to use + // the X-Forwarded-For (XFF) header, set this to X-Forwarded-For. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + // + // HeaderName is a required field + HeaderName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ForwardedIPConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ForwardedIPConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ForwardedIPConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ForwardedIPConfig"} + if s.FallbackBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("FallbackBehavior")) + } + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderName != nil && len(*s.HeaderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HeaderName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFallbackBehavior sets the FallbackBehavior field's value. +func (s *ForwardedIPConfig) SetFallbackBehavior(v string) *ForwardedIPConfig { + s.FallbackBehavior = &v + return s +} + +// SetHeaderName sets the HeaderName field's value. +func (s *ForwardedIPConfig) SetHeaderName(v string) *ForwardedIPConfig { + s.HeaderName = &v + return s +} + // // This is the latest version of AWS WAF, named AWS WAFV2, released in November, // 2019. For information, including how to migrate your AWS WAF resources from @@ -6307,6 +7305,15 @@ type GeoMatchStatement struct { // An array of two-character country codes, for example, [ "US", "CN" ], from // the alpha-2 country ISO codes of the ISO 3166 international standard. CountryCodes []*string `min:"1" type:"list"` + + // The configuration for inspecting IP addresses in an HTTP header that you + // specify, instead of using the IP address that's reported by the web request + // origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify + // any header name. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + ForwardedIPConfig *ForwardedIPConfig `type:"structure"` } // String returns the string representation @@ -6325,6 +7332,11 @@ func (s *GeoMatchStatement) Validate() error { if s.CountryCodes != nil && len(s.CountryCodes) < 1 { invalidParams.Add(request.NewErrParamMinLen("CountryCodes", 1)) } + if s.ForwardedIPConfig != nil { + if err := s.ForwardedIPConfig.Validate(); err != nil { + invalidParams.AddNested("ForwardedIPConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -6338,6 +7350,12 @@ func (s *GeoMatchStatement) SetCountryCodes(v []*string) *GeoMatchStatement { return s } +// SetForwardedIPConfig sets the ForwardedIPConfig field's value. +func (s *GeoMatchStatement) SetForwardedIPConfig(v *ForwardedIPConfig) *GeoMatchStatement { + s.ForwardedIPConfig = v + return s +} + type GetIPSetInput struct { _ struct{} `type:"structure"` @@ -6347,20 +7365,20 @@ type GetIPSetInput struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the IP set. You cannot change the name of an IPSet after - // you create it. + // The name of the IP set. You cannot change the name of an IPSet after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -6537,6 +7555,71 @@ func (s *GetLoggingConfigurationOutput) SetLoggingConfiguration(v *LoggingConfig return s } +type GetPermissionPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the rule group for which you want to get + // the policy. + // + // ResourceArn is a required field + ResourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPermissionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPermissionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPermissionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPermissionPolicyInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *GetPermissionPolicyInput) SetResourceArn(v string) *GetPermissionPolicyInput { + s.ResourceArn = &v + return s +} + +type GetPermissionPolicyOutput struct { + _ struct{} `type:"structure"` + + // The IAM policy that is attached to the specified rule group. + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetPermissionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPermissionPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetPermissionPolicyOutput) SetPolicy(v string) *GetPermissionPolicyOutput { + s.Policy = &v + return s +} + type GetRateBasedStatementManagedKeysInput struct { _ struct{} `type:"structure"` @@ -6546,13 +7629,13 @@ type GetRateBasedStatementManagedKeysInput struct { RuleName *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -6567,8 +7650,8 @@ type GetRateBasedStatementManagedKeysInput struct { // WebACLId is a required field WebACLId *string `min:"1" type:"string" required:"true"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. // // WebACLName is a required field WebACLName *string `min:"1" type:"string" required:"true"` @@ -6680,20 +7763,19 @@ type GetRegexPatternSetInput struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the set. You cannot change the name after you create the - // set. + // The name of the set. You cannot change the name after you create the set. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -6812,20 +7894,20 @@ type GetRuleGroupInput struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the rule group. You cannot change the name of a rule group - // after you create it. + // The name of the rule group. You cannot change the name of a rule group after + // you create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -6953,13 +8035,13 @@ type GetSampledRequestsInput struct { RuleMetricName *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -6968,9 +8050,10 @@ type GetSampledRequestsInput struct { Scope *string `type:"string" required:"true" enum:"Scope"` // The start date and time and the end date and time of the range for which - // you want GetSampledRequests to return a sample of requests. Specify the date - // and time in the following format: "2016-09-27T14:50Z". You can specify any - // time range in the previous three hours. + // you want GetSampledRequests to return a sample of requests. You must specify + // the times in Coordinated Universal Time (UTC) format. UTC format includes + // the special designator, Z. For example, "2016-09-27T14:50Z". You can specify + // any time range in the previous three hours. // // TimeWindow is a required field TimeWindow *TimeWindow `type:"structure" required:"true"` @@ -7076,7 +8159,8 @@ type GetSampledRequestsOutput struct { // Usually, TimeWindow is the time range that you specified in the GetSampledRequests // request. However, if your AWS resource received more than 5,000 requests // during the time range that you specified in the request, GetSampledRequests - // returns the time range for the first 5,000 requests. + // returns the time range for the first 5,000 requests. Times are in Coordinated + // Universal Time (UTC) format. TimeWindow *TimeWindow `type:"structure"` } @@ -7183,20 +8267,20 @@ type GetWebACLInput struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -7471,8 +8555,8 @@ type IPSet struct { // Addresses is a required field Addresses []*string `type:"list" required:"true"` - // A friendly description of the IP set. You cannot change the description of - // an IP set after you create it. + // A description of the IP set that helps with identification. You cannot change + // the description of an IP set after you create it. Description *string `min:"1" type:"string"` // Specify IPV4 or IPV6. @@ -7486,8 +8570,8 @@ type IPSet struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the IP set. You cannot change the name of an IPSet after - // you create it. + // The name of the IP set. You cannot change the name of an IPSet after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -7539,6 +8623,115 @@ func (s *IPSet) SetName(v string) *IPSet { return s } +// The configuration for inspecting IP addresses in an HTTP header that you +// specify, instead of using the IP address that's reported by the web request +// origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify +// any header name. +// +// If the specified header isn't present in the request, AWS WAF doesn't apply +// the rule to the web request at all. +// +// This configuration is used only for IPSetReferenceStatement. For GeoMatchStatement +// and RateBasedStatement, use ForwardedIPConfig instead. +type IPSetForwardedIPConfig struct { + _ struct{} `type:"structure"` + + // The match status to assign to the web request if the request doesn't have + // a valid IP address in the specified position. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + // + // You can specify the following fallback behaviors: + // + // * MATCH - Treat the web request as matching the rule statement. AWS WAF + // applies the rule action to the request. + // + // * NO_MATCH - Treat the web request as not matching the rule statement. + // + // FallbackBehavior is a required field + FallbackBehavior *string `type:"string" required:"true" enum:"FallbackBehavior"` + + // The name of the HTTP header to use for the IP address. For example, to use + // the X-Forwarded-For (XFF) header, set this to X-Forwarded-For. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + // + // HeaderName is a required field + HeaderName *string `min:"1" type:"string" required:"true"` + + // The position in the header to search for the IP address. The header can contain + // IP addresses of the original client and also of proxies. For example, the + // header value could be 10.1.1.1, 127.0.0.0, 10.10.10.10 where the first IP + // address identifies the original client and the rest identify proxies that + // the request went through. + // + // The options for this setting are the following: + // + // * FIRST - Inspect the first IP address in the list of IP addresses in + // the header. This is usually the client's original IP. + // + // * LAST - Inspect the last IP address in the list of IP addresses in the + // header. + // + // * ANY - Inspect all IP addresses in the header for a match. If the header + // contains more than 10 IP addresses, AWS WAF inspects the last 10. + // + // Position is a required field + Position *string `type:"string" required:"true" enum:"ForwardedIPPosition"` +} + +// String returns the string representation +func (s IPSetForwardedIPConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IPSetForwardedIPConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IPSetForwardedIPConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IPSetForwardedIPConfig"} + if s.FallbackBehavior == nil { + invalidParams.Add(request.NewErrParamRequired("FallbackBehavior")) + } + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderName != nil && len(*s.HeaderName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HeaderName", 1)) + } + if s.Position == nil { + invalidParams.Add(request.NewErrParamRequired("Position")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFallbackBehavior sets the FallbackBehavior field's value. +func (s *IPSetForwardedIPConfig) SetFallbackBehavior(v string) *IPSetForwardedIPConfig { + s.FallbackBehavior = &v + return s +} + +// SetHeaderName sets the HeaderName field's value. +func (s *IPSetForwardedIPConfig) SetHeaderName(v string) *IPSetForwardedIPConfig { + s.HeaderName = &v + return s +} + +// SetPosition sets the Position field's value. +func (s *IPSetForwardedIPConfig) SetPosition(v string) *IPSetForwardedIPConfig { + s.Position = &v + return s +} + // // This is the latest version of AWS WAF, named AWS WAFV2, released in November, // 2019. For information, including how to migrate your AWS WAF resources from @@ -7560,6 +8753,15 @@ type IPSetReferenceStatement struct { // // ARN is a required field ARN *string `min:"20" type:"string" required:"true"` + + // The configuration for inspecting IP addresses in an HTTP header that you + // specify, instead of using the IP address that's reported by the web request + // origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify + // any header name. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + IPSetForwardedIPConfig *IPSetForwardedIPConfig `type:"structure"` } // String returns the string representation @@ -7581,6 +8783,11 @@ func (s *IPSetReferenceStatement) Validate() error { if s.ARN != nil && len(*s.ARN) < 20 { invalidParams.Add(request.NewErrParamMinLen("ARN", 20)) } + if s.IPSetForwardedIPConfig != nil { + if err := s.IPSetForwardedIPConfig.Validate(); err != nil { + invalidParams.AddNested("IPSetForwardedIPConfig", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -7594,6 +8801,12 @@ func (s *IPSetReferenceStatement) SetARN(v string) *IPSetReferenceStatement { return s } +// SetIPSetForwardedIPConfig sets the IPSetForwardedIPConfig field's value. +func (s *IPSetReferenceStatement) SetIPSetForwardedIPConfig(v *IPSetForwardedIPConfig) *IPSetReferenceStatement { + s.IPSetForwardedIPConfig = v + return s +} + // // This is the latest version of AWS WAF, named AWS WAFV2, released in November, // 2019. For information, including how to migrate your AWS WAF resources from @@ -7609,8 +8822,8 @@ type IPSetSummary struct { // The Amazon Resource Name (ARN) of the entity. ARN *string `min:"20" type:"string"` - // A friendly description of the IP set. You cannot change the description of - // an IP set after you create it. + // A description of the IP set that helps with identification. You cannot change + // the description of an IP set after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the set. This ID is returned in the responses to @@ -7627,8 +8840,8 @@ type IPSetSummary struct { // operation. LockToken *string `min:"1" type:"string"` - // A friendly name of the IP set. You cannot change the name of an IPSet after - // you create it. + // The name of the IP set. You cannot change the name of an IPSet after you + // create it. Name *string `min:"1" type:"string"` } @@ -7687,13 +8900,13 @@ type ListAvailableManagedRuleGroupsInput struct { NextMarker *string `min:"1" type:"string"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -7798,13 +9011,13 @@ type ListIPSetsInput struct { NextMarker *string `min:"1" type:"string"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -7911,13 +9124,13 @@ type ListLoggingConfigurationsInput struct { NextMarker *string `min:"1" type:"string"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -8017,13 +9230,13 @@ type ListRegexPatternSetsInput struct { NextMarker *string `min:"1" type:"string"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -8117,7 +9330,8 @@ type ListResourcesForWebACLInput struct { _ struct{} `type:"structure"` // Used for web ACLs that are scoped for regional applications. A regional application - // can be an Application Load Balancer (ALB) or an API Gateway stage. + // can be an Application Load Balancer (ALB), an API Gateway REST API, or an + // AppSync GraphQL API. ResourceType *string `type:"string" enum:"ResourceType"` // The Amazon Resource Name (ARN) of the Web ACL. @@ -8202,13 +9416,13 @@ type ListRuleGroupsInput struct { NextMarker *string `min:"1" type:"string"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -8418,13 +9632,13 @@ type ListWebACLsInput struct { NextMarker *string `min:"1" type:"string"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -8531,9 +9745,16 @@ type LoggingConfiguration struct { // LogDestinationConfigs is a required field LogDestinationConfigs []*string `min:"1" type:"list" required:"true"` + // Indicates whether the logging configuration was created by AWS Firewall Manager, + // as part of an AWS WAF policy configuration. If true, only Firewall Manager + // can modify or delete the configuration. + ManagedByFirewallManager *bool `type:"boolean"` + // The parts of the request that you want to keep out of the logs. For example, - // if you redact the cookie field, the cookie field in the firehose will be + // if you redact the HEADER field, the HEADER field in the firehose will be // xxx. + // + // You must use one of the following values: URI, QUERY_STRING, HEADER, or METHOD. RedactedFields []*FieldToMatch `type:"list"` // The Amazon Resource Name (ARN) of the web ACL that you want to associate @@ -8591,6 +9812,12 @@ func (s *LoggingConfiguration) SetLogDestinationConfigs(v []*string) *LoggingCon return s } +// SetManagedByFirewallManager sets the ManagedByFirewallManager field's value. +func (s *LoggingConfiguration) SetManagedByFirewallManager(v bool) *LoggingConfiguration { + s.ManagedByFirewallManager = &v + return s +} + // SetRedactedFields sets the RedactedFields field's value. func (s *LoggingConfiguration) SetRedactedFields(v []*FieldToMatch) *LoggingConfiguration { s.RedactedFields = v @@ -8906,14 +10133,21 @@ func (s *OrStatement) SetStatements(v []*Statement) *OrStatement { return s } +// The override action to apply to the rules in a rule group. Used only for +// rule statements that reference a rule group, like RuleGroupReferenceStatement +// and ManagedRuleGroupStatement. // -// This is the latest version of AWS WAF, named AWS WAFV2, released in November, -// 2019. For information, including how to migrate your AWS WAF resources from -// the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). +// Set the override action to none to leave the rule actions in effect. Set +// it to count to only count matches, regardless of the rule action settings. +// +// In a Rule, you must specify either this OverrideAction setting or the rule +// Action setting, but not both: // -// The action to use to override the rule's Action setting. You can use no override -// action, in which case the rule action is in effect, or count, in which case, -// if the rule matches a web request, it only counts the match. +// * If the rule statement references a rule group, use this override action +// setting and not the action setting. +// +// * If the rule statement does not reference a rule group, use the rule +// action setting and not this rule override action setting. type OverrideAction struct { _ struct{} `type:"structure"` @@ -9026,6 +10260,95 @@ func (s *PutLoggingConfigurationOutput) SetLoggingConfiguration(v *LoggingConfig return s } +type PutPermissionPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy to attach to the specified rule group. + // + // The policy specifications must conform to the following: + // + // * The policy must be composed using IAM Policy version 2012-10-17 or version + // 2015-01-01. + // + // * The policy must include specifications for Effect, Action, and Principal. + // + // * Effect must specify Allow. + // + // * Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. + // AWS WAF rejects any extra actions or wildcard actions in the policy. + // + // * The policy must not include a Resource parameter. + // + // For more information, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). + // + // Policy is a required field + Policy *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach + // the policy. + // + // ResourceArn is a required field + ResourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutPermissionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPermissionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPermissionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPermissionPolicyInput"} + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicy sets the Policy field's value. +func (s *PutPermissionPolicyInput) SetPolicy(v string) *PutPermissionPolicyInput { + s.Policy = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *PutPermissionPolicyInput) SetResourceArn(v string) *PutPermissionPolicyInput { + s.ResourceArn = &v + return s +} + +type PutPermissionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPermissionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPermissionPolicyOutput) GoString() string { + return s.String() +} + // // This is the latest version of AWS WAF, named AWS WAFV2, released in November, // 2019. For information, including how to migrate your AWS WAF resources from @@ -9087,14 +10410,32 @@ func (s QueryString) GoString() string { type RateBasedStatement struct { _ struct{} `type:"structure"` - // Setting that indicates how to aggregate the request counts. Currently, you - // must set this to IP. The request counts are aggregated on IP addresses. + // Setting that indicates how to aggregate the request counts. The options are + // the following: + // + // * IP - Aggregate the request counts on the IP address from the web request + // origin. + // + // * FORWARDED_IP - Aggregate the request counts on the first IP address + // in an HTTP header. If you use this, configure the ForwardedIPConfig, to + // specify the header to use. // // AggregateKeyType is a required field AggregateKeyType *string `type:"string" required:"true" enum:"RateBasedStatementAggregateKeyType"` + // The configuration for inspecting IP addresses in an HTTP header that you + // specify, instead of using the IP address that's reported by the web request + // origin. Commonly, this is the X-Forwarded-For (XFF) header, but you can specify + // any header name. + // + // If the specified header isn't present in the request, AWS WAF doesn't apply + // the rule to the web request at all. + // + // This is required if AggregateKeyType is set to FORWARDED_IP. + ForwardedIPConfig *ForwardedIPConfig `type:"structure"` + // The limit on requests per 5-minute period for a single originating IP address. - // If the statement includes a ScopDownStatement, this limit is applied only + // If the statement includes a ScopeDownStatement, this limit is applied only // to the requests that match the statement. // // Limit is a required field @@ -9128,6 +10469,11 @@ func (s *RateBasedStatement) Validate() error { if s.Limit != nil && *s.Limit < 100 { invalidParams.Add(request.NewErrParamMinValue("Limit", 100)) } + if s.ForwardedIPConfig != nil { + if err := s.ForwardedIPConfig.Validate(); err != nil { + invalidParams.AddNested("ForwardedIPConfig", err.(request.ErrInvalidParams)) + } + } if s.ScopeDownStatement != nil { if err := s.ScopeDownStatement.Validate(); err != nil { invalidParams.AddNested("ScopeDownStatement", err.(request.ErrInvalidParams)) @@ -9146,6 +10492,12 @@ func (s *RateBasedStatement) SetAggregateKeyType(v string) *RateBasedStatement { return s } +// SetForwardedIPConfig sets the ForwardedIPConfig field's value. +func (s *RateBasedStatement) SetForwardedIPConfig(v *ForwardedIPConfig) *RateBasedStatement { + s.ForwardedIPConfig = v + return s +} + // SetLimit sets the Limit field's value. func (s *RateBasedStatement) SetLimit(v int64) *RateBasedStatement { s.Limit = &v @@ -9252,20 +10604,19 @@ type RegexPatternSet struct { // The Amazon Resource Name (ARN) of the entity. ARN *string `min:"20" type:"string"` - // A friendly description of the set. You cannot change the description of a - // set after you create it. + // A description of the set that helps with identification. You cannot change + // the description of a set after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the set. This ID is returned in the responses to // create and list commands. You provide it to operations like update and delete. Id *string `min:"1" type:"string"` - // A friendly name of the set. You cannot change the name after you create the - // set. + // The name of the set. You cannot change the name after you create the set. Name *string `min:"1" type:"string"` // The regular expression patterns in the set. - RegularExpressionList []*Regex `min:"1" type:"list"` + RegularExpressionList []*Regex `type:"list"` } // String returns the string representation @@ -9342,8 +10693,8 @@ type RegexPatternSetReferenceStatement struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass detection. If you specify one // or more transformations in a rule statement, AWS WAF performs all transformations - // on the content identified by FieldToMatch, starting from the lowest priority - // setting, before inspecting the content for a match. + // on the content of the request component identified by FieldToMatch, starting + // from the lowest priority setting, before inspecting the content for a match. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -9432,8 +10783,8 @@ type RegexPatternSetSummary struct { // The Amazon Resource Name (ARN) of the entity. ARN *string `min:"20" type:"string"` - // A friendly description of the set. You cannot change the description of a - // set after you create it. + // A description of the set that helps with identification. You cannot change + // the description of a set after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the set. This ID is returned in the responses to @@ -9450,8 +10801,8 @@ type RegexPatternSetSummary struct { // operation. LockToken *string `min:"1" type:"string"` - // A friendly name of the data type instance. You cannot change the name after - // you create the instance. + // The name of the data type instance. You cannot change the name after you + // create the instance. Name *string `min:"1" type:"string"` } @@ -9508,19 +10859,44 @@ type Rule struct { _ struct{} `type:"structure"` // The action that AWS WAF should take on a web request when it matches the - // rule's statement. Settings at the web ACL level can override the rule action + // rule statement. Settings at the web ACL level can override the rule action // setting. + // + // This is used only for rules whose statements do not reference a rule group. + // Rule statements that reference a rule group include RuleGroupReferenceStatement + // and ManagedRuleGroupStatement. + // + // You must specify either this Action setting or the rule OverrideAction setting, + // but not both: + // + // * If the rule statement does not reference a rule group, use this rule + // action setting and not the rule override action setting. + // + // * If the rule statement references a rule group, use the override action + // setting and not this action setting. Action *RuleAction `type:"structure"` - // A friendly name of the rule. You can't change the name of a Rule after you - // create it. + // The name of the rule. You can't change the name of a Rule after you create + // it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` - // The action to use to override the rule's Action setting. You can use no override - // action, in which case the rule action is in effect, or count action, in which - // case, if the rule matches a web request, it only counts the match. + // The override action to apply to the rules in a rule group. Used only for + // rule statements that reference a rule group, like RuleGroupReferenceStatement + // and ManagedRuleGroupStatement. + // + // Set the override action to none to leave the rule actions in effect. Set + // it to count to only count matches, regardless of the rule action settings. + // + // In a Rule, you must specify either this OverrideAction setting or the rule + // Action setting, but not both: + // + // * If the rule statement references a rule group, use this override action + // setting and not the action setting. + // + // * If the rule statement does not reference a rule group, use the rule + // action setting and not this rule override action setting. OverrideAction *OverrideAction `type:"structure"` // If you define more than one Rule in a WebACL, AWS WAF evaluates each request @@ -9708,8 +11084,8 @@ type RuleGroup struct { // Capacity is a required field Capacity *int64 `min:"1" type:"long" required:"true"` - // A friendly description of the rule group. You cannot change the description - // of a rule group after you create it. + // A description of the rule group that helps with identification. You cannot + // change the description of a rule group after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the rule group. This ID is returned in the responses @@ -9719,8 +11095,8 @@ type RuleGroup struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the rule group. You cannot change the name of a rule group - // after you create it. + // The name of the rule group. You cannot change the name of a rule group after + // you create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -9877,8 +11253,8 @@ type RuleGroupSummary struct { // The Amazon Resource Name (ARN) of the entity. ARN *string `min:"20" type:"string"` - // A friendly description of the rule group. You cannot change the description - // of a rule group after you create it. + // A description of the rule group that helps with identification. You cannot + // change the description of a rule group after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the rule group. This ID is returned in the responses @@ -9896,8 +11272,8 @@ type RuleGroupSummary struct { // operation. LockToken *string `min:"1" type:"string"` - // A friendly name of the data type instance. You cannot change the name after - // you create the instance. + // The name of the data type instance. You cannot change the name after you + // create the instance. Name *string `min:"1" type:"string"` } @@ -10011,7 +11387,7 @@ type SampledHTTPRequest struct { // The name of the Rule that the request matched. For managed rule groups, the // format for this name is ##. // For your own rule groups, the format for this name is #. If the rule is not in a rule group, the format is . + // name>. If the rule is not in a rule group, this field is absent. RuleNameWithinRuleGroup *string `min:"1" type:"string"` // The time at which AWS WAF received the request from your AWS resource, in @@ -10206,8 +11582,8 @@ type SizeConstraintStatement struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass detection. If you specify one // or more transformations in a rule statement, AWS WAF performs all transformations - // on the content identified by FieldToMatch, starting from the lowest priority - // setting, before inspecting the content for a match. + // on the content of the request component identified by FieldToMatch, starting + // from the lowest priority setting, before inspecting the content for a match. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -10311,8 +11687,8 @@ type SqliMatchStatement struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass detection. If you specify one // or more transformations in a rule statement, AWS WAF performs all transformations - // on the content identified by FieldToMatch, starting from the lowest priority - // setting, before inspecting the content for a match. + // on the content of the request component identified by FieldToMatch, starting + // from the lowest priority setting, before inspecting the content for a match. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -10682,11 +12058,18 @@ func (s *Statement) SetXssMatchStatement(v *XssMatchStatement) *Statement { // 2019. For information, including how to migrate your AWS WAF resources from // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // -// A collection of key:value pairs associated with an AWS resource. The key:value -// pair can be anything you define. Typically, the tag key represents a category -// (such as "environment") and the tag value represents a specific value within -// that category (such as "test," "development," or "production"). You can add -// up to 50 tags to each AWS resource. +// A tag associated with an AWS resource. Tags are key:value pairs that you +// can use to categorize and manage your resources, for purposes like billing +// or other management. Typically, the tag key represents a category, such as +// "environment", and the tag value represents a specific value within that +// category, such as "test," "development," or "production". Or you might set +// the tag key to "customer" and the value to the customer name or ID. You can +// specify one or more tags to add to each AWS resource, up to 50 tags for a +// resource. +// +// You can tag the AWS resources that you manage through AWS WAF: web ACLs, +// rule groups, IP sets, and regex pattern sets. You can't manage or view tags +// through the AWS WAF console. type Tag struct { _ struct{} `type:"structure"` @@ -10750,7 +12133,18 @@ func (s *Tag) SetValue(v string) *Tag { // 2019. For information, including how to migrate your AWS WAF resources from // the prior release, see the AWS WAF Developer Guide (https://docs.aws.amazon.com/waf/latest/developerguide/waf-chapter.html). // -// The collection of tagging definitions for an AWS resource. +// The collection of tagging definitions for an AWS resource. Tags are key:value +// pairs that you can use to categorize and manage your resources, for purposes +// like billing or other management. Typically, the tag key represents a category, +// such as "environment", and the tag value represents a specific value within +// that category, such as "test," "development," or "production". Or you might +// set the tag key to "customer" and the value to the customer name or ID. You +// can specify one or more tags to add to each AWS resource, up to 50 tags for +// a resource. +// +// You can tag the AWS resources that you manage through AWS WAF: web ACLs, +// rule groups, IP sets, and regex pattern sets. You can't manage or view tags +// through the AWS WAF console. type TagInfoForResource struct { _ struct{} `type:"structure"` @@ -11001,6 +12395,10 @@ func (s *TextTransformation) SetType(v string) *TextTransformation { // In a GetSampledRequests request, the StartTime and EndTime objects specify // the time range for which you want AWS WAF to return a sample of web requests. // +// You must specify the times in Coordinated Universal Time (UTC) format. UTC +// format includes the special designator, Z. For example, "2016-09-27T14:50Z". +// You can specify any time range in the previous three hours. +// // In a GetSampledRequests response, the StartTime and EndTime objects specify // the time range for which AWS WAF actually returned a sample of web requests. // AWS WAF gets the specified number of requests from among the first 5,000 @@ -11012,17 +12410,19 @@ type TimeWindow struct { _ struct{} `type:"structure"` // The end of the time range from which you want GetSampledRequests to return - // a sample of the requests that your AWS resource received. Specify the date - // and time in the following format: "2016-09-27T14:50Z". You can specify any - // time range in the previous three hours. + // a sample of the requests that your AWS resource received. You must specify + // the times in Coordinated Universal Time (UTC) format. UTC format includes + // the special designator, Z. For example, "2016-09-27T14:50Z". You can specify + // any time range in the previous three hours. // // EndTime is a required field EndTime *time.Time `type:"timestamp" required:"true"` // The beginning of the time range from which you want GetSampledRequests to - // return a sample of the requests that your AWS resource received. Specify - // the date and time in the following format: "2016-09-27T14:50Z". You can specify - // any time range in the previous three hours. + // return a sample of the requests that your AWS resource received. You must + // specify the times in Coordinated Universal Time (UTC) format. UTC format + // includes the special designator, Z. For example, "2016-09-27T14:50Z". You + // can specify any time range in the previous three hours. // // StartTime is a required field StartTime *time.Time `type:"timestamp" required:"true"` @@ -11166,8 +12566,8 @@ type UpdateIPSetInput struct { // Addresses is a required field Addresses []*string `type:"list" required:"true"` - // A friendly description of the IP set. You cannot change the description of - // an IP set after you create it. + // A description of the IP set that helps with identification. You cannot change + // the description of an IP set after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the set. This ID is returned in the responses to @@ -11188,20 +12588,20 @@ type UpdateIPSetInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the IP set. You cannot change the name of an IPSet after - // you create it. + // The name of the IP set. You cannot change the name of an IPSet after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -11320,8 +12720,8 @@ func (s *UpdateIPSetOutput) SetNextLockToken(v string) *UpdateIPSetOutput { type UpdateRegexPatternSetInput struct { _ struct{} `type:"structure"` - // A friendly description of the set. You cannot change the description of a - // set after you create it. + // A description of the set that helps with identification. You cannot change + // the description of a set after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the set. This ID is returned in the responses to @@ -11342,23 +12742,22 @@ type UpdateRegexPatternSetInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the set. You cannot change the name after you create the - // set. + // The name of the set. You cannot change the name after you create the set. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` // RegularExpressionList is a required field - RegularExpressionList []*Regex `min:"1" type:"list" required:"true"` + RegularExpressionList []*Regex `type:"list" required:"true"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -11404,9 +12803,6 @@ func (s *UpdateRegexPatternSetInput) Validate() error { if s.RegularExpressionList == nil { invalidParams.Add(request.NewErrParamRequired("RegularExpressionList")) } - if s.RegularExpressionList != nil && len(s.RegularExpressionList) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RegularExpressionList", 1)) - } if s.Scope == nil { invalidParams.Add(request.NewErrParamRequired("Scope")) } @@ -11490,8 +12886,8 @@ func (s *UpdateRegexPatternSetOutput) SetNextLockToken(v string) *UpdateRegexPat type UpdateRuleGroupInput struct { _ struct{} `type:"structure"` - // A friendly description of the rule group. You cannot change the description - // of a rule group after you create it. + // A description of the rule group that helps with identification. You cannot + // change the description of a rule group after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the rule group. This ID is returned in the responses @@ -11513,8 +12909,8 @@ type UpdateRuleGroupInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the rule group. You cannot change the name of a rule group - // after you create it. + // The name of the rule group. You cannot change the name of a rule group after + // you create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -11526,13 +12922,13 @@ type UpdateRuleGroupInput struct { Rules []*Rule `type:"list"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -11682,8 +13078,8 @@ type UpdateWebACLInput struct { // DefaultAction is a required field DefaultAction *DefaultAction `type:"structure" required:"true"` - // A friendly description of the Web ACL. You cannot change the description - // of a Web ACL after you create it. + // A description of the Web ACL that helps with identification. You cannot change + // the description of a Web ACL after you create it. Description *string `min:"1" type:"string"` // The unique identifier for the Web ACL. This ID is returned in the responses @@ -11705,8 +13101,8 @@ type UpdateWebACLInput struct { // LockToken is a required field LockToken *string `min:"1" type:"string" required:"true"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` @@ -11718,13 +13114,13 @@ type UpdateWebACLInput struct { Rules []*Rule `type:"list"` // Specifies whether this is for an AWS CloudFront distribution or for a regional - // application. A regional application can be an Application Load Balancer (ALB) - // or an API Gateway stage. + // application. A regional application can be an Application Load Balancer (ALB), + // an API Gateway REST API, or an AppSync GraphQL API. // // To work with CloudFront, you must also specify the Region US East (N. Virginia) // as follows: // - // * CLI - Specify the region when you use the CloudFront scope: --scope=CLOUDFRONT + // * CLI - Specify the Region when you use the CloudFront scope: --scope=CLOUDFRONT // --region=us-east-1. // // * API and SDKs - For all calls, use the Region endpoint us-east-1. @@ -11914,10 +13310,10 @@ type VisibilityConfig struct { // CloudWatchMetricsEnabled is a required field CloudWatchMetricsEnabled *bool `type:"boolean" required:"true"` - // A friendly name of the CloudWatch metric. The name can contain only alphanumeric - // characters (A-Z, a-z, 0-9), with length from one to 128 characters. It can't - // contain whitespace or metric names reserved for AWS WAF, for example "All" - // and "Default_Action." You can't change a MetricName after you create a VisibilityConfig. + // A name of the CloudWatch metric. The name can contain only the characters: + // A-Z, a-z, 0-9, - (hyphen), and _ (underscore). The name can be from one to + // 128 characters long. It can't contain whitespace or metric names reserved + // for AWS WAF, for example "All" and "Default_Action." // // MetricName is a required field MetricName *string `min:"1" type:"string" required:"true"` @@ -11983,8 +13379,8 @@ func (s *VisibilityConfig) SetSampledRequestsEnabled(v bool) *VisibilityConfig { // AWS WAF couldn’t perform the operation because your resource is being used // by another resource or it’s associated with another resource. type WAFAssociatedItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12001,17 +13397,17 @@ func (s WAFAssociatedItemException) GoString() string { func newErrorWAFAssociatedItemException(v protocol.ResponseMetadata) error { return &WAFAssociatedItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFAssociatedItemException) Code() string { +func (s *WAFAssociatedItemException) Code() string { return "WAFAssociatedItemException" } // Message returns the exception's message. -func (s WAFAssociatedItemException) Message() string { +func (s *WAFAssociatedItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12019,29 +13415,29 @@ func (s WAFAssociatedItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFAssociatedItemException) OrigErr() error { +func (s *WAFAssociatedItemException) OrigErr() error { return nil } -func (s WAFAssociatedItemException) Error() string { +func (s *WAFAssociatedItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFAssociatedItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFAssociatedItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFAssociatedItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFAssociatedItemException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t perform the operation because the resource that you tried // to save is a duplicate of an existing one. type WAFDuplicateItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12058,17 +13454,17 @@ func (s WAFDuplicateItemException) GoString() string { func newErrorWAFDuplicateItemException(v protocol.ResponseMetadata) error { return &WAFDuplicateItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFDuplicateItemException) Code() string { +func (s *WAFDuplicateItemException) Code() string { return "WAFDuplicateItemException" } // Message returns the exception's message. -func (s WAFDuplicateItemException) Message() string { +func (s *WAFDuplicateItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12076,29 +13472,29 @@ func (s WAFDuplicateItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFDuplicateItemException) OrigErr() error { +func (s *WAFDuplicateItemException) OrigErr() error { return nil } -func (s WAFDuplicateItemException) Error() string { +func (s *WAFDuplicateItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFDuplicateItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFDuplicateItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFDuplicateItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFDuplicateItemException) RequestID() string { + return s.RespMetadata.RequestID } // Your request is valid, but AWS WAF couldn’t perform the operation because // of a system problem. Retry your request. type WAFInternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12115,17 +13511,73 @@ func (s WAFInternalErrorException) GoString() string { func newErrorWAFInternalErrorException(v protocol.ResponseMetadata) error { return &WAFInternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInternalErrorException) Code() string { +func (s *WAFInternalErrorException) Code() string { return "WAFInternalErrorException" } // Message returns the exception's message. -func (s WAFInternalErrorException) Message() string { +func (s *WAFInternalErrorException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *WAFInternalErrorException) OrigErr() error { + return nil +} + +func (s *WAFInternalErrorException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *WAFInternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *WAFInternalErrorException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation isn't valid. +type WAFInvalidOperationException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s WAFInvalidOperationException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WAFInvalidOperationException) GoString() string { + return s.String() +} + +func newErrorWAFInvalidOperationException(v protocol.ResponseMetadata) error { + return &WAFInvalidOperationException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *WAFInvalidOperationException) Code() string { + return "WAFInvalidOperationException" +} + +// Message returns the exception's message. +func (s *WAFInvalidOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12133,22 +13585,22 @@ func (s WAFInternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInternalErrorException) OrigErr() error { +func (s *WAFInvalidOperationException) OrigErr() error { return nil } -func (s WAFInternalErrorException) Error() string { +func (s *WAFInvalidOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidOperationException) RequestID() string { + return s.RespMetadata.RequestID } // The operation failed because AWS WAF didn't recognize a parameter in the @@ -12165,8 +13617,8 @@ func (s WAFInternalErrorException) RequestID() string { // * Your request references an ARN that is malformed, or corresponds to // a resource with which a Web ACL cannot be associated. type WAFInvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Field *string `type:"string" enum:"ParameterExceptionField"` @@ -12189,17 +13641,17 @@ func (s WAFInvalidParameterException) GoString() string { func newErrorWAFInvalidParameterException(v protocol.ResponseMetadata) error { return &WAFInvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidParameterException) Code() string { +func (s *WAFInvalidParameterException) Code() string { return "WAFInvalidParameterException" } // Message returns the exception's message. -func (s WAFInvalidParameterException) Message() string { +func (s *WAFInvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12207,29 +13659,101 @@ func (s WAFInvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidParameterException) OrigErr() error { +func (s *WAFInvalidParameterException) OrigErr() error { return nil } -func (s WAFInvalidParameterException) Error() string { +func (s *WAFInvalidParameterException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *WAFInvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The operation failed because the specified policy isn't in the proper format. +// +// The policy specifications must conform to the following: +// +// * The policy must be composed using IAM Policy version 2012-10-17 or version +// 2015-01-01. +// +// * The policy must include specifications for Effect, Action, and Principal. +// +// * Effect must specify Allow. +// +// * Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. +// AWS WAF rejects any extra actions or wildcard actions in the policy. +// +// * The policy must not include a Resource parameter. +// +// For more information, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). +type WAFInvalidPermissionPolicyException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s WAFInvalidPermissionPolicyException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WAFInvalidPermissionPolicyException) GoString() string { + return s.String() +} + +func newErrorWAFInvalidPermissionPolicyException(v protocol.ResponseMetadata) error { + return &WAFInvalidPermissionPolicyException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *WAFInvalidPermissionPolicyException) Code() string { + return "WAFInvalidPermissionPolicyException" +} + +// Message returns the exception's message. +func (s *WAFInvalidPermissionPolicyException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *WAFInvalidPermissionPolicyException) OrigErr() error { + return nil +} + +func (s *WAFInvalidPermissionPolicyException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *WAFInvalidPermissionPolicyException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidPermissionPolicyException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t perform the operation because the resource that you requested // isn’t valid. Check the resource, and try again. type WAFInvalidResourceException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12246,17 +13770,17 @@ func (s WAFInvalidResourceException) GoString() string { func newErrorWAFInvalidResourceException(v protocol.ResponseMetadata) error { return &WAFInvalidResourceException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFInvalidResourceException) Code() string { +func (s *WAFInvalidResourceException) Code() string { return "WAFInvalidResourceException" } // Message returns the exception's message. -func (s WAFInvalidResourceException) Message() string { +func (s *WAFInvalidResourceException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12264,22 +13788,22 @@ func (s WAFInvalidResourceException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFInvalidResourceException) OrigErr() error { +func (s *WAFInvalidResourceException) OrigErr() error { return nil } -func (s WAFInvalidResourceException) Error() string { +func (s *WAFInvalidResourceException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFInvalidResourceException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFInvalidResourceException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFInvalidResourceException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFInvalidResourceException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t perform the operation because you exceeded your resource @@ -12287,8 +13811,8 @@ func (s WAFInvalidResourceException) RequestID() string { // for an AWS account. For more information, see Limits (https://docs.aws.amazon.com/waf/latest/developerguide/limits.html) // in the AWS WAF Developer Guide. type WAFLimitsExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12305,17 +13829,17 @@ func (s WAFLimitsExceededException) GoString() string { func newErrorWAFLimitsExceededException(v protocol.ResponseMetadata) error { return &WAFLimitsExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFLimitsExceededException) Code() string { +func (s *WAFLimitsExceededException) Code() string { return "WAFLimitsExceededException" } // Message returns the exception's message. -func (s WAFLimitsExceededException) Message() string { +func (s *WAFLimitsExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12323,29 +13847,29 @@ func (s WAFLimitsExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFLimitsExceededException) OrigErr() error { +func (s *WAFLimitsExceededException) OrigErr() error { return nil } -func (s WAFLimitsExceededException) Error() string { +func (s *WAFLimitsExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFLimitsExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFLimitsExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFLimitsExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFLimitsExceededException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t perform the operation because your resource doesn’t // exist. type WAFNonexistentItemException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12362,17 +13886,17 @@ func (s WAFNonexistentItemException) GoString() string { func newErrorWAFNonexistentItemException(v protocol.ResponseMetadata) error { return &WAFNonexistentItemException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFNonexistentItemException) Code() string { +func (s *WAFNonexistentItemException) Code() string { return "WAFNonexistentItemException" } // Message returns the exception's message. -func (s WAFNonexistentItemException) Message() string { +func (s *WAFNonexistentItemException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12380,22 +13904,22 @@ func (s WAFNonexistentItemException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFNonexistentItemException) OrigErr() error { +func (s *WAFNonexistentItemException) OrigErr() error { return nil } -func (s WAFNonexistentItemException) Error() string { +func (s *WAFNonexistentItemException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFNonexistentItemException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFNonexistentItemException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFNonexistentItemException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFNonexistentItemException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t save your changes because you tried to update or delete @@ -12403,8 +13927,8 @@ func (s WAFNonexistentItemException) RequestID() string { // again, make any changes you need to make to the new copy, and retry your // operation. type WAFOptimisticLockException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12421,17 +13945,17 @@ func (s WAFOptimisticLockException) GoString() string { func newErrorWAFOptimisticLockException(v protocol.ResponseMetadata) error { return &WAFOptimisticLockException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFOptimisticLockException) Code() string { +func (s *WAFOptimisticLockException) Code() string { return "WAFOptimisticLockException" } // Message returns the exception's message. -func (s WAFOptimisticLockException) Message() string { +func (s *WAFOptimisticLockException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12439,22 +13963,22 @@ func (s WAFOptimisticLockException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFOptimisticLockException) OrigErr() error { +func (s *WAFOptimisticLockException) OrigErr() error { return nil } -func (s WAFOptimisticLockException) Error() string { +func (s *WAFOptimisticLockException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFOptimisticLockException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFOptimisticLockException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFOptimisticLockException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFOptimisticLockException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF is not able to access the service linked role. This can be caused @@ -12466,8 +13990,8 @@ func (s WAFOptimisticLockException) RequestID() string { // again. If you receive this same exception again, you will have to wait additional // time until the role is unlocked. type WAFServiceLinkedRoleErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -12484,17 +14008,17 @@ func (s WAFServiceLinkedRoleErrorException) GoString() string { func newErrorWAFServiceLinkedRoleErrorException(v protocol.ResponseMetadata) error { return &WAFServiceLinkedRoleErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFServiceLinkedRoleErrorException) Code() string { +func (s *WAFServiceLinkedRoleErrorException) Code() string { return "WAFServiceLinkedRoleErrorException" } // Message returns the exception's message. -func (s WAFServiceLinkedRoleErrorException) Message() string { +func (s *WAFServiceLinkedRoleErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12502,27 +14026,27 @@ func (s WAFServiceLinkedRoleErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFServiceLinkedRoleErrorException) OrigErr() error { +func (s *WAFServiceLinkedRoleErrorException) OrigErr() error { return nil } -func (s WAFServiceLinkedRoleErrorException) Error() string { +func (s *WAFServiceLinkedRoleErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFServiceLinkedRoleErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFServiceLinkedRoleErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFServiceLinkedRoleErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFServiceLinkedRoleErrorException) RequestID() string { + return s.RespMetadata.RequestID } type WAFSubscriptionNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12539,17 +14063,17 @@ func (s WAFSubscriptionNotFoundException) GoString() string { func newErrorWAFSubscriptionNotFoundException(v protocol.ResponseMetadata) error { return &WAFSubscriptionNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFSubscriptionNotFoundException) Code() string { +func (s *WAFSubscriptionNotFoundException) Code() string { return "WAFSubscriptionNotFoundException" } // Message returns the exception's message. -func (s WAFSubscriptionNotFoundException) Message() string { +func (s *WAFSubscriptionNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12557,28 +14081,28 @@ func (s WAFSubscriptionNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFSubscriptionNotFoundException) OrigErr() error { +func (s *WAFSubscriptionNotFoundException) OrigErr() error { return nil } -func (s WAFSubscriptionNotFoundException) Error() string { +func (s *WAFSubscriptionNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFSubscriptionNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFSubscriptionNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFSubscriptionNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFSubscriptionNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // An error occurred during the tagging operation. Retry your request. type WAFTagOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12595,17 +14119,17 @@ func (s WAFTagOperationException) GoString() string { func newErrorWAFTagOperationException(v protocol.ResponseMetadata) error { return &WAFTagOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFTagOperationException) Code() string { +func (s *WAFTagOperationException) Code() string { return "WAFTagOperationException" } // Message returns the exception's message. -func (s WAFTagOperationException) Message() string { +func (s *WAFTagOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12613,29 +14137,29 @@ func (s WAFTagOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFTagOperationException) OrigErr() error { +func (s *WAFTagOperationException) OrigErr() error { return nil } -func (s WAFTagOperationException) Error() string { +func (s *WAFTagOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFTagOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFTagOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFTagOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFTagOperationException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t perform your tagging operation because of an internal // error. Retry your request. type WAFTagOperationInternalErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12652,17 +14176,17 @@ func (s WAFTagOperationInternalErrorException) GoString() string { func newErrorWAFTagOperationInternalErrorException(v protocol.ResponseMetadata) error { return &WAFTagOperationInternalErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFTagOperationInternalErrorException) Code() string { +func (s *WAFTagOperationInternalErrorException) Code() string { return "WAFTagOperationInternalErrorException" } // Message returns the exception's message. -func (s WAFTagOperationInternalErrorException) Message() string { +func (s *WAFTagOperationInternalErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12670,28 +14194,28 @@ func (s WAFTagOperationInternalErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFTagOperationInternalErrorException) OrigErr() error { +func (s *WAFTagOperationInternalErrorException) OrigErr() error { return nil } -func (s WAFTagOperationInternalErrorException) Error() string { +func (s *WAFTagOperationInternalErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFTagOperationInternalErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFTagOperationInternalErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFTagOperationInternalErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFTagOperationInternalErrorException) RequestID() string { + return s.RespMetadata.RequestID } // AWS WAF couldn’t retrieve the resource that you requested. Retry your request. type WAFUnavailableEntityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -12708,17 +14232,17 @@ func (s WAFUnavailableEntityException) GoString() string { func newErrorWAFUnavailableEntityException(v protocol.ResponseMetadata) error { return &WAFUnavailableEntityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WAFUnavailableEntityException) Code() string { +func (s *WAFUnavailableEntityException) Code() string { return "WAFUnavailableEntityException" } // Message returns the exception's message. -func (s WAFUnavailableEntityException) Message() string { +func (s *WAFUnavailableEntityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -12726,22 +14250,22 @@ func (s WAFUnavailableEntityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WAFUnavailableEntityException) OrigErr() error { +func (s *WAFUnavailableEntityException) OrigErr() error { return nil } -func (s WAFUnavailableEntityException) Error() string { +func (s *WAFUnavailableEntityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WAFUnavailableEntityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WAFUnavailableEntityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WAFUnavailableEntityException) RequestID() string { - return s.respMetadata.RequestID +func (s *WAFUnavailableEntityException) RequestID() string { + return s.RespMetadata.RequestID } // @@ -12756,7 +14280,8 @@ func (s WAFUnavailableEntityException) RequestID() string { // the rules. The rules in a Web ACL can be a combination of the types Rule, // RuleGroup, and managed rule group. You can associate a Web ACL with one or // more AWS resources to protect. The resources can be Amazon CloudFront, an -// Amazon API Gateway API, or an Application Load Balancer. +// Amazon API Gateway REST API, an Application Load Balancer, or an AWS AppSync +// GraphQL API. type WebACL struct { _ struct{} `type:"structure"` @@ -12782,8 +14307,8 @@ type WebACL struct { // DefaultAction is a required field DefaultAction *DefaultAction `type:"structure" required:"true"` - // A friendly description of the Web ACL. You cannot change the description - // of a Web ACL after you create it. + // A description of the Web ACL that helps with identification. You cannot change + // the description of a Web ACL after you create it. Description *string `min:"1" type:"string"` // A unique identifier for the WebACL. This ID is returned in the responses @@ -12793,12 +14318,39 @@ type WebACL struct { // Id is a required field Id *string `min:"1" type:"string" required:"true"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // Indicates whether this web ACL is managed by AWS Firewall Manager. If true, + // then only AWS Firewall Manager can delete the web ACL or any Firewall Manager + // rule groups in the web ACL. + ManagedByFirewallManager *bool `type:"boolean"` + + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. // // Name is a required field Name *string `min:"1" type:"string" required:"true"` + // The last set of rules for AWS WAF to process in the web ACL. This is defined + // in an AWS Firewall Manager WAF policy and contains only rule group references. + // You can't alter these. Any rules and rule groups that you define for the + // web ACL are prioritized before these. + // + // In the Firewall Manager WAF policy, the Firewall Manager administrator can + // define a set of rule groups to run first in the web ACL and a set of rule + // groups to run last. Within each set, the administrator prioritizes the rule + // groups, to determine their relative processing order. + PostProcessFirewallManagerRuleGroups []*FirewallManagerRuleGroup `type:"list"` + + // The first set of rules for AWS WAF to process in the web ACL. This is defined + // in an AWS Firewall Manager WAF policy and contains only rule group references. + // You can't alter these. Any rules and rule groups that you define for the + // web ACL are prioritized after these. + // + // In the Firewall Manager WAF policy, the Firewall Manager administrator can + // define a set of rule groups to run first in the web ACL and a set of rule + // groups to run last. Within each set, the administrator prioritizes the rule + // groups, to determine their relative processing order. + PreProcessFirewallManagerRuleGroups []*FirewallManagerRuleGroup `type:"list"` + // The Rule statements used to identify the web requests that you want to allow, // block, or count. Each rule includes one top-level statement that AWS WAF // uses to identify matching web requests, and parameters that govern how AWS @@ -12851,12 +14403,30 @@ func (s *WebACL) SetId(v string) *WebACL { return s } +// SetManagedByFirewallManager sets the ManagedByFirewallManager field's value. +func (s *WebACL) SetManagedByFirewallManager(v bool) *WebACL { + s.ManagedByFirewallManager = &v + return s +} + // SetName sets the Name field's value. func (s *WebACL) SetName(v string) *WebACL { s.Name = &v return s } +// SetPostProcessFirewallManagerRuleGroups sets the PostProcessFirewallManagerRuleGroups field's value. +func (s *WebACL) SetPostProcessFirewallManagerRuleGroups(v []*FirewallManagerRuleGroup) *WebACL { + s.PostProcessFirewallManagerRuleGroups = v + return s +} + +// SetPreProcessFirewallManagerRuleGroups sets the PreProcessFirewallManagerRuleGroups field's value. +func (s *WebACL) SetPreProcessFirewallManagerRuleGroups(v []*FirewallManagerRuleGroup) *WebACL { + s.PreProcessFirewallManagerRuleGroups = v + return s +} + // SetRules sets the Rules field's value. func (s *WebACL) SetRules(v []*Rule) *WebACL { s.Rules = v @@ -12883,8 +14453,8 @@ type WebACLSummary struct { // The Amazon Resource Name (ARN) of the entity. ARN *string `min:"20" type:"string"` - // A friendly description of the Web ACL. You cannot change the description - // of a Web ACL after you create it. + // A description of the Web ACL that helps with identification. You cannot change + // the description of a Web ACL after you create it. Description *string `min:"1" type:"string"` // The unique identifier for the Web ACL. This ID is returned in the responses @@ -12902,8 +14472,8 @@ type WebACLSummary struct { // operation. LockToken *string `min:"1" type:"string"` - // A friendly name of the Web ACL. You cannot change the name of a Web ACL after - // you create it. + // The name of the Web ACL. You cannot change the name of a Web ACL after you + // create it. Name *string `min:"1" type:"string"` } @@ -12971,8 +14541,8 @@ type XssMatchStatement struct { // Text transformations eliminate some of the unusual formatting that attackers // use in web requests in an effort to bypass detection. If you specify one // or more transformations in a rule statement, AWS WAF performs all transformations - // on the content identified by FieldToMatch, starting from the lowest priority - // setting, before inspecting the content for a match. + // on the content of the request component identified by FieldToMatch, starting + // from the lowest priority setting, before inspecting the content for a match. // // TextTransformations is a required field TextTransformations []*TextTransformation `min:"1" type:"list" required:"true"` @@ -13054,6 +14624,18 @@ const ( ComparisonOperatorGt = "GT" ) +// ComparisonOperator_Values returns all elements of the ComparisonOperator enum +func ComparisonOperator_Values() []string { + return []string{ + ComparisonOperatorEq, + ComparisonOperatorNe, + ComparisonOperatorLe, + ComparisonOperatorLt, + ComparisonOperatorGe, + ComparisonOperatorGt, + } +} + const ( // CountryCodeAf is a CountryCode enum value CountryCodeAf = "AF" @@ -13803,6 +15385,297 @@ const ( CountryCodeZw = "ZW" ) +// CountryCode_Values returns all elements of the CountryCode enum +func CountryCode_Values() []string { + return []string{ + CountryCodeAf, + CountryCodeAx, + CountryCodeAl, + CountryCodeDz, + CountryCodeAs, + CountryCodeAd, + CountryCodeAo, + CountryCodeAi, + CountryCodeAq, + CountryCodeAg, + CountryCodeAr, + CountryCodeAm, + CountryCodeAw, + CountryCodeAu, + CountryCodeAt, + CountryCodeAz, + CountryCodeBs, + CountryCodeBh, + CountryCodeBd, + CountryCodeBb, + CountryCodeBy, + CountryCodeBe, + CountryCodeBz, + CountryCodeBj, + CountryCodeBm, + CountryCodeBt, + CountryCodeBo, + CountryCodeBq, + CountryCodeBa, + CountryCodeBw, + CountryCodeBv, + CountryCodeBr, + CountryCodeIo, + CountryCodeBn, + CountryCodeBg, + CountryCodeBf, + CountryCodeBi, + CountryCodeKh, + CountryCodeCm, + CountryCodeCa, + CountryCodeCv, + CountryCodeKy, + CountryCodeCf, + CountryCodeTd, + CountryCodeCl, + CountryCodeCn, + CountryCodeCx, + CountryCodeCc, + CountryCodeCo, + CountryCodeKm, + CountryCodeCg, + CountryCodeCd, + CountryCodeCk, + CountryCodeCr, + CountryCodeCi, + CountryCodeHr, + CountryCodeCu, + CountryCodeCw, + CountryCodeCy, + CountryCodeCz, + CountryCodeDk, + CountryCodeDj, + CountryCodeDm, + CountryCodeDo, + CountryCodeEc, + CountryCodeEg, + CountryCodeSv, + CountryCodeGq, + CountryCodeEr, + CountryCodeEe, + CountryCodeEt, + CountryCodeFk, + CountryCodeFo, + CountryCodeFj, + CountryCodeFi, + CountryCodeFr, + CountryCodeGf, + CountryCodePf, + CountryCodeTf, + CountryCodeGa, + CountryCodeGm, + CountryCodeGe, + CountryCodeDe, + CountryCodeGh, + CountryCodeGi, + CountryCodeGr, + CountryCodeGl, + CountryCodeGd, + CountryCodeGp, + CountryCodeGu, + CountryCodeGt, + CountryCodeGg, + CountryCodeGn, + CountryCodeGw, + CountryCodeGy, + CountryCodeHt, + CountryCodeHm, + CountryCodeVa, + CountryCodeHn, + CountryCodeHk, + CountryCodeHu, + CountryCodeIs, + CountryCodeIn, + CountryCodeId, + CountryCodeIr, + CountryCodeIq, + CountryCodeIe, + CountryCodeIm, + CountryCodeIl, + CountryCodeIt, + CountryCodeJm, + CountryCodeJp, + CountryCodeJe, + CountryCodeJo, + CountryCodeKz, + CountryCodeKe, + CountryCodeKi, + CountryCodeKp, + CountryCodeKr, + CountryCodeKw, + CountryCodeKg, + CountryCodeLa, + CountryCodeLv, + CountryCodeLb, + CountryCodeLs, + CountryCodeLr, + CountryCodeLy, + CountryCodeLi, + CountryCodeLt, + CountryCodeLu, + CountryCodeMo, + CountryCodeMk, + CountryCodeMg, + CountryCodeMw, + CountryCodeMy, + CountryCodeMv, + CountryCodeMl, + CountryCodeMt, + CountryCodeMh, + CountryCodeMq, + CountryCodeMr, + CountryCodeMu, + CountryCodeYt, + CountryCodeMx, + CountryCodeFm, + CountryCodeMd, + CountryCodeMc, + CountryCodeMn, + CountryCodeMe, + CountryCodeMs, + CountryCodeMa, + CountryCodeMz, + CountryCodeMm, + CountryCodeNa, + CountryCodeNr, + CountryCodeNp, + CountryCodeNl, + CountryCodeNc, + CountryCodeNz, + CountryCodeNi, + CountryCodeNe, + CountryCodeNg, + CountryCodeNu, + CountryCodeNf, + CountryCodeMp, + CountryCodeNo, + CountryCodeOm, + CountryCodePk, + CountryCodePw, + CountryCodePs, + CountryCodePa, + CountryCodePg, + CountryCodePy, + CountryCodePe, + CountryCodePh, + CountryCodePn, + CountryCodePl, + CountryCodePt, + CountryCodePr, + CountryCodeQa, + CountryCodeRe, + CountryCodeRo, + CountryCodeRu, + CountryCodeRw, + CountryCodeBl, + CountryCodeSh, + CountryCodeKn, + CountryCodeLc, + CountryCodeMf, + CountryCodePm, + CountryCodeVc, + CountryCodeWs, + CountryCodeSm, + CountryCodeSt, + CountryCodeSa, + CountryCodeSn, + CountryCodeRs, + CountryCodeSc, + CountryCodeSl, + CountryCodeSg, + CountryCodeSx, + CountryCodeSk, + CountryCodeSi, + CountryCodeSb, + CountryCodeSo, + CountryCodeZa, + CountryCodeGs, + CountryCodeSs, + CountryCodeEs, + CountryCodeLk, + CountryCodeSd, + CountryCodeSr, + CountryCodeSj, + CountryCodeSz, + CountryCodeSe, + CountryCodeCh, + CountryCodeSy, + CountryCodeTw, + CountryCodeTj, + CountryCodeTz, + CountryCodeTh, + CountryCodeTl, + CountryCodeTg, + CountryCodeTk, + CountryCodeTo, + CountryCodeTt, + CountryCodeTn, + CountryCodeTr, + CountryCodeTm, + CountryCodeTc, + CountryCodeTv, + CountryCodeUg, + CountryCodeUa, + CountryCodeAe, + CountryCodeGb, + CountryCodeUs, + CountryCodeUm, + CountryCodeUy, + CountryCodeUz, + CountryCodeVu, + CountryCodeVe, + CountryCodeVn, + CountryCodeVg, + CountryCodeVi, + CountryCodeWf, + CountryCodeEh, + CountryCodeYe, + CountryCodeZm, + CountryCodeZw, + } +} + +const ( + // FallbackBehaviorMatch is a FallbackBehavior enum value + FallbackBehaviorMatch = "MATCH" + + // FallbackBehaviorNoMatch is a FallbackBehavior enum value + FallbackBehaviorNoMatch = "NO_MATCH" +) + +// FallbackBehavior_Values returns all elements of the FallbackBehavior enum +func FallbackBehavior_Values() []string { + return []string{ + FallbackBehaviorMatch, + FallbackBehaviorNoMatch, + } +} + +const ( + // ForwardedIPPositionFirst is a ForwardedIPPosition enum value + ForwardedIPPositionFirst = "FIRST" + + // ForwardedIPPositionLast is a ForwardedIPPosition enum value + ForwardedIPPositionLast = "LAST" + + // ForwardedIPPositionAny is a ForwardedIPPosition enum value + ForwardedIPPositionAny = "ANY" +) + +// ForwardedIPPosition_Values returns all elements of the ForwardedIPPosition enum +func ForwardedIPPosition_Values() []string { + return []string{ + ForwardedIPPositionFirst, + ForwardedIPPositionLast, + ForwardedIPPositionAny, + } +} + const ( // IPAddressVersionIpv4 is a IPAddressVersion enum value IPAddressVersionIpv4 = "IPV4" @@ -13811,6 +15684,14 @@ const ( IPAddressVersionIpv6 = "IPV6" ) +// IPAddressVersion_Values returns all elements of the IPAddressVersion enum +func IPAddressVersion_Values() []string { + return []string{ + IPAddressVersionIpv4, + IPAddressVersionIpv6, + } +} + const ( // ParameterExceptionFieldWebAcl is a ParameterExceptionField enum value ParameterExceptionFieldWebAcl = "WEB_ACL" @@ -13922,8 +15803,75 @@ const ( // ParameterExceptionFieldMetricName is a ParameterExceptionField enum value ParameterExceptionFieldMetricName = "METRIC_NAME" + + // ParameterExceptionFieldFirewallManagerStatement is a ParameterExceptionField enum value + ParameterExceptionFieldFirewallManagerStatement = "FIREWALL_MANAGER_STATEMENT" + + // ParameterExceptionFieldFallbackBehavior is a ParameterExceptionField enum value + ParameterExceptionFieldFallbackBehavior = "FALLBACK_BEHAVIOR" + + // ParameterExceptionFieldPosition is a ParameterExceptionField enum value + ParameterExceptionFieldPosition = "POSITION" + + // ParameterExceptionFieldForwardedIpConfig is a ParameterExceptionField enum value + ParameterExceptionFieldForwardedIpConfig = "FORWARDED_IP_CONFIG" + + // ParameterExceptionFieldIpSetForwardedIpConfig is a ParameterExceptionField enum value + ParameterExceptionFieldIpSetForwardedIpConfig = "IP_SET_FORWARDED_IP_CONFIG" + + // ParameterExceptionFieldHeaderName is a ParameterExceptionField enum value + ParameterExceptionFieldHeaderName = "HEADER_NAME" ) +// ParameterExceptionField_Values returns all elements of the ParameterExceptionField enum +func ParameterExceptionField_Values() []string { + return []string{ + ParameterExceptionFieldWebAcl, + ParameterExceptionFieldRuleGroup, + ParameterExceptionFieldRegexPatternSet, + ParameterExceptionFieldIpSet, + ParameterExceptionFieldManagedRuleSet, + ParameterExceptionFieldRule, + ParameterExceptionFieldExcludedRule, + ParameterExceptionFieldStatement, + ParameterExceptionFieldByteMatchStatement, + ParameterExceptionFieldSqliMatchStatement, + ParameterExceptionFieldXssMatchStatement, + ParameterExceptionFieldSizeConstraintStatement, + ParameterExceptionFieldGeoMatchStatement, + ParameterExceptionFieldRateBasedStatement, + ParameterExceptionFieldRuleGroupReferenceStatement, + ParameterExceptionFieldRegexPatternReferenceStatement, + ParameterExceptionFieldIpSetReferenceStatement, + ParameterExceptionFieldManagedRuleSetStatement, + ParameterExceptionFieldAndStatement, + ParameterExceptionFieldOrStatement, + ParameterExceptionFieldNotStatement, + ParameterExceptionFieldIpAddress, + ParameterExceptionFieldIpAddressVersion, + ParameterExceptionFieldFieldToMatch, + ParameterExceptionFieldTextTransformation, + ParameterExceptionFieldSingleQueryArgument, + ParameterExceptionFieldSingleHeader, + ParameterExceptionFieldDefaultAction, + ParameterExceptionFieldRuleAction, + ParameterExceptionFieldEntityLimit, + ParameterExceptionFieldOverrideAction, + ParameterExceptionFieldScopeValue, + ParameterExceptionFieldResourceArn, + ParameterExceptionFieldResourceType, + ParameterExceptionFieldTags, + ParameterExceptionFieldTagKeys, + ParameterExceptionFieldMetricName, + ParameterExceptionFieldFirewallManagerStatement, + ParameterExceptionFieldFallbackBehavior, + ParameterExceptionFieldPosition, + ParameterExceptionFieldForwardedIpConfig, + ParameterExceptionFieldIpSetForwardedIpConfig, + ParameterExceptionFieldHeaderName, + } +} + const ( // PositionalConstraintExactly is a PositionalConstraint enum value PositionalConstraintExactly = "EXACTLY" @@ -13941,19 +15889,53 @@ const ( PositionalConstraintContainsWord = "CONTAINS_WORD" ) +// PositionalConstraint_Values returns all elements of the PositionalConstraint enum +func PositionalConstraint_Values() []string { + return []string{ + PositionalConstraintExactly, + PositionalConstraintStartsWith, + PositionalConstraintEndsWith, + PositionalConstraintContains, + PositionalConstraintContainsWord, + } +} + const ( // RateBasedStatementAggregateKeyTypeIp is a RateBasedStatementAggregateKeyType enum value RateBasedStatementAggregateKeyTypeIp = "IP" + + // RateBasedStatementAggregateKeyTypeForwardedIp is a RateBasedStatementAggregateKeyType enum value + RateBasedStatementAggregateKeyTypeForwardedIp = "FORWARDED_IP" ) +// RateBasedStatementAggregateKeyType_Values returns all elements of the RateBasedStatementAggregateKeyType enum +func RateBasedStatementAggregateKeyType_Values() []string { + return []string{ + RateBasedStatementAggregateKeyTypeIp, + RateBasedStatementAggregateKeyTypeForwardedIp, + } +} + const ( // ResourceTypeApplicationLoadBalancer is a ResourceType enum value ResourceTypeApplicationLoadBalancer = "APPLICATION_LOAD_BALANCER" // ResourceTypeApiGateway is a ResourceType enum value ResourceTypeApiGateway = "API_GATEWAY" + + // ResourceTypeAppsync is a ResourceType enum value + ResourceTypeAppsync = "APPSYNC" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeApplicationLoadBalancer, + ResourceTypeApiGateway, + ResourceTypeAppsync, + } +} + const ( // ScopeCloudfront is a Scope enum value ScopeCloudfront = "CLOUDFRONT" @@ -13962,6 +15944,14 @@ const ( ScopeRegional = "REGIONAL" ) +// Scope_Values returns all elements of the Scope enum +func Scope_Values() []string { + return []string{ + ScopeCloudfront, + ScopeRegional, + } +} + const ( // TextTransformationTypeNone is a TextTransformationType enum value TextTransformationTypeNone = "NONE" @@ -13981,3 +15971,15 @@ const ( // TextTransformationTypeUrlDecode is a TextTransformationType enum value TextTransformationTypeUrlDecode = "URL_DECODE" ) + +// TextTransformationType_Values returns all elements of the TextTransformationType enum +func TextTransformationType_Values() []string { + return []string{ + TextTransformationTypeNone, + TextTransformationTypeCompressWhiteSpace, + TextTransformationTypeHtmlEntityDecode, + TextTransformationTypeLowercase, + TextTransformationTypeCmdLine, + TextTransformationTypeUrlDecode, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafv2/doc.go b/vendor/github.com/aws/aws-sdk-go/service/wafv2/doc.go index 7ba3ae4f6..7b4202efb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafv2/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafv2/doc.go @@ -21,25 +21,26 @@ // // AWS WAF is a web application firewall that lets you monitor the HTTP and // HTTPS requests that are forwarded to Amazon CloudFront, an Amazon API Gateway -// API, or an Application Load Balancer. AWS WAF also lets you control access -// to your content. Based on conditions that you specify, such as the IP addresses -// that requests originate from or the values of query strings, API Gateway, -// CloudFront, or the Application Load Balancer responds to requests either -// with the requested content or with an HTTP 403 status code (Forbidden). You -// also can configure CloudFront to return a custom error page when a request -// is blocked. +// REST API, an Application Load Balancer, or an AWS AppSync GraphQL API. AWS +// WAF also lets you control access to your content. Based on conditions that +// you specify, such as the IP addresses that requests originate from or the +// values of query strings, the API Gateway REST API, CloudFront distribution, +// the Application Load Balancer, or the AWS AppSync GraphQL API responds to +// requests either with the requested content or with an HTTP 403 status code +// (Forbidden). You also can configure CloudFront to return a custom error page +// when a request is blocked. // // This API guide is for developers who need detailed information about AWS // WAF API actions, data types, and errors. For detailed information about AWS // WAF features and an overview of how to use AWS WAF, see the AWS WAF Developer // Guide (https://docs.aws.amazon.com/waf/latest/developerguide/). // -// You can make API calls using the endpoints listed in AWS Service Endpoints -// for AWS WAF (https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region). +// You can make calls using the endpoints listed in AWS Service Endpoints for +// AWS WAF (https://docs.aws.amazon.com/general/latest/gr/rande.html#waf_region). // // * For regional applications, you can use any of the endpoints in the list. -// A regional application can be an Application Load Balancer (ALB) or an -// API Gateway stage. +// A regional application can be an Application Load Balancer (ALB), an API +// Gateway REST API, or an AppSync GraphQL API. // // * For AWS CloudFront applications, you must use the API endpoint listed // for US East (N. Virginia): us-east-1. @@ -56,9 +57,9 @@ // need to distinguish the scope, you specify a Scope parameter and set it // to CLOUDFRONT or REGIONAL. // -// * You can define a Web ACL or rule group with a single API call, and update +// * You can define a Web ACL or rule group with a single call, and update // it with a single call. You define all rule specifications in JSON format, -// and pass them to your rule group or Web ACL API calls. +// and pass them to your rule group or Web ACL calls. // // * The limits AWS WAF places on the use of rules more closely reflects // the cost of running each type of rule. Rule groups include capacity settings, diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafv2/errors.go b/vendor/github.com/aws/aws-sdk-go/service/wafv2/errors.go index e4985c1df..e3d65731f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafv2/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafv2/errors.go @@ -29,6 +29,12 @@ const ( // of a system problem. Retry your request. ErrCodeWAFInternalErrorException = "WAFInternalErrorException" + // ErrCodeWAFInvalidOperationException for service response error code + // "WAFInvalidOperationException". + // + // The operation isn't valid. + ErrCodeWAFInvalidOperationException = "WAFInvalidOperationException" + // ErrCodeWAFInvalidParameterException for service response error code // "WAFInvalidParameterException". // @@ -47,6 +53,28 @@ const ( // a resource with which a Web ACL cannot be associated. ErrCodeWAFInvalidParameterException = "WAFInvalidParameterException" + // ErrCodeWAFInvalidPermissionPolicyException for service response error code + // "WAFInvalidPermissionPolicyException". + // + // The operation failed because the specified policy isn't in the proper format. + // + // The policy specifications must conform to the following: + // + // * The policy must be composed using IAM Policy version 2012-10-17 or version + // 2015-01-01. + // + // * The policy must include specifications for Effect, Action, and Principal. + // + // * Effect must specify Allow. + // + // * Action must specify wafv2:CreateWebACL, wafv2:UpdateWebACL, and wafv2:PutFirewallManagerRuleGroups. + // AWS WAF rejects any extra actions or wildcard actions in the policy. + // + // * The policy must not include a Resource parameter. + // + // For more information, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html). + ErrCodeWAFInvalidPermissionPolicyException = "WAFInvalidPermissionPolicyException" + // ErrCodeWAFInvalidResourceException for service response error code // "WAFInvalidResourceException". // @@ -120,7 +148,9 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "WAFAssociatedItemException": newErrorWAFAssociatedItemException, "WAFDuplicateItemException": newErrorWAFDuplicateItemException, "WAFInternalErrorException": newErrorWAFInternalErrorException, + "WAFInvalidOperationException": newErrorWAFInvalidOperationException, "WAFInvalidParameterException": newErrorWAFInvalidParameterException, + "WAFInvalidPermissionPolicyException": newErrorWAFInvalidPermissionPolicyException, "WAFInvalidResourceException": newErrorWAFInvalidResourceException, "WAFLimitsExceededException": newErrorWAFLimitsExceededException, "WAFNonexistentItemException": newErrorWAFNonexistentItemException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/wafv2/service.go index ee77d1dbe..26dda5619 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafv2/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go b/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go index 201091da1..e7cba85d4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/worklink/api.go @@ -1717,6 +1717,9 @@ func (c *WorkLink) ListDomainsRequest(input *ListDomainsInput) (req *request.Req // * InvalidRequestException // The request is not valid. // +// * ResourceNotFoundException +// The requested resource was not found. +// // * TooManyRequestsException // The number of requests exceeds the limit. // @@ -1940,6 +1943,85 @@ func (c *WorkLink) ListFleetsPagesWithContext(ctx aws.Context, input *ListFleets return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListTagsForResource +func (c *WorkLink) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/tags/{ResourceArn}", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for Amazon WorkLink. +// +// Retrieves a list of tags for the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkLink's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListTagsForResource +func (c *WorkLink) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListWebsiteAuthorizationProviders = "ListWebsiteAuthorizationProviders" // ListWebsiteAuthorizationProvidersRequest generates a "aws/request.Request" representing the @@ -2514,6 +2596,168 @@ func (c *WorkLink) SignOutUserWithContext(ctx aws.Context, input *SignOutUserInp return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/TagResource +func (c *WorkLink) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/tags/{ResourceArn}", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for Amazon WorkLink. +// +// Adds or overwrites one or more tags for the specified resource, such as a +// fleet. Each tag consists of a key and an optional value. If a resource already +// has a tag with the same key, this operation updates its value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkLink's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/TagResource +func (c *WorkLink) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UntagResource +func (c *WorkLink) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "DELETE", + HTTPPath: "/tags/{ResourceArn}", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for Amazon WorkLink. +// +// Removes one or more tags from the specified resource. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkLink's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UntagResource +func (c *WorkLink) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkLink) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateAuditStreamConfiguration = "UpdateAuditStreamConfiguration" // UpdateAuditStreamConfigurationRequest generates a "aws/request.Request" representing the @@ -3356,6 +3600,9 @@ type CreateFleetInput struct { // The option to optimize for better performance by routing traffic through // the closest AWS Region to users, which may be outside of your home Region. OptimizeForEndUserLocation *bool `type:"boolean"` + + // The tags to add to the resource. A tag is a key-value pair. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -3377,6 +3624,9 @@ func (s *CreateFleetInput) Validate() error { if s.FleetName != nil && len(*s.FleetName) < 1 { invalidParams.Add(request.NewErrParamMinLen("FleetName", 1)) } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3402,10 +3652,16 @@ func (s *CreateFleetInput) SetOptimizeForEndUserLocation(v bool) *CreateFleetInp return s } +// SetTags sets the Tags field's value. +func (s *CreateFleetInput) SetTags(v map[string]*string) *CreateFleetInput { + s.Tags = v + return s +} + type CreateFleetOutput struct { _ struct{} `type:"structure"` - // The ARN of the fleet. + // The Amazon Resource Name (ARN) of the fleet. FleetArn *string `min:"20" type:"string"` } @@ -3964,7 +4220,7 @@ func (s *DescribeDomainOutput) SetDomainStatus(v string) *DescribeDomainOutput { type DescribeFleetMetadataInput struct { _ struct{} `type:"structure"` - // The ARN of the fleet. + // The Amazon Resource Name (ARN) of the fleet. // // FleetArn is a required field FleetArn *string `min:"20" type:"string" required:"true"` @@ -4026,6 +4282,9 @@ type DescribeFleetMetadataOutput struct { // The option to optimize for better performance by routing traffic through // the closest AWS Region to users, which may be outside of your home Region. OptimizeForEndUserLocation *bool `type:"boolean"` + + // The tags attached to the resource. A tag is a key-value pair. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -4080,6 +4339,12 @@ func (s *DescribeFleetMetadataOutput) SetOptimizeForEndUserLocation(v bool) *Des return s } +// SetTags sets the Tags field's value. +func (s *DescribeFleetMetadataOutput) SetTags(v map[string]*string) *DescribeFleetMetadataOutput { + s.Tags = v + return s +} + type DescribeIdentityProviderConfigurationInput struct { _ struct{} `type:"structure"` @@ -4577,10 +4842,10 @@ type FleetSummary struct { // The time when the fleet was created. CreatedTime *time.Time `type:"timestamp"` - // The name to display. + // The name of the fleet to display. DisplayName *string `type:"string"` - // The ARN of the fleet. + // The Amazon Resource Name (ARN) of the fleet. FleetArn *string `min:"20" type:"string"` // The name of the fleet. @@ -4591,6 +4856,9 @@ type FleetSummary struct { // The time when the fleet was last updated. LastUpdatedTime *time.Time `type:"timestamp"` + + // The tags attached to the resource. A tag is a key-value pair. + Tags map[string]*string `min:"1" type:"map"` } // String returns the string representation @@ -4645,10 +4913,16 @@ func (s *FleetSummary) SetLastUpdatedTime(v time.Time) *FleetSummary { return s } +// SetTags sets the Tags field's value. +func (s *FleetSummary) SetTags(v map[string]*string) *FleetSummary { + s.Tags = v + return s +} + // The service is temporarily unavailable. type InternalServerErrorException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4665,17 +4939,17 @@ func (s InternalServerErrorException) GoString() string { func newErrorInternalServerErrorException(v protocol.ResponseMetadata) error { return &InternalServerErrorException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InternalServerErrorException) Code() string { +func (s *InternalServerErrorException) Code() string { return "InternalServerErrorException" } // Message returns the exception's message. -func (s InternalServerErrorException) Message() string { +func (s *InternalServerErrorException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4683,28 +4957,28 @@ func (s InternalServerErrorException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InternalServerErrorException) OrigErr() error { +func (s *InternalServerErrorException) OrigErr() error { return nil } -func (s InternalServerErrorException) Error() string { +func (s *InternalServerErrorException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InternalServerErrorException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InternalServerErrorException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InternalServerErrorException) RequestID() string { - return s.respMetadata.RequestID +func (s *InternalServerErrorException) RequestID() string { + return s.RespMetadata.RequestID } // The request is not valid. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4721,17 +4995,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4739,22 +5013,22 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID } type ListDevicesInput struct { @@ -5037,6 +5311,70 @@ func (s *ListFleetsOutput) SetNextToken(v string) *ListFleetsOutput { return s } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the fleet. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsForResourceInput) SetResourceArn(v string) *ListTagsForResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // The tags attached to the resource. A tag is a key-value pair. + Tags map[string]*string `min:"1" type:"map"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v map[string]*string) *ListTagsForResourceOutput { + s.Tags = v + return s +} + type ListWebsiteAuthorizationProvidersInput struct { _ struct{} `type:"structure"` @@ -5237,8 +5575,8 @@ func (s *ListWebsiteCertificateAuthoritiesOutput) SetWebsiteCertificateAuthoriti // The resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5255,17 +5593,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5273,28 +5611,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The requested resource was not found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5311,17 +5649,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5329,22 +5667,22 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } type RestoreDomainAccessInput struct { @@ -5563,10 +5901,82 @@ func (s SignOutUserOutput) GoString() string { return s.String() } +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the fleet. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" min:"20" type:"string" required:"true"` + + // The tags to add to the resource. A tag is a key-value pair. + // + // Tags is a required field + Tags map[string]*string `min:"1" type:"map" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v map[string]*string) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + // The number of requests exceeds the limit. type TooManyRequestsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5583,17 +5993,17 @@ func (s TooManyRequestsException) GoString() string { func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { return &TooManyRequestsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyRequestsException) Code() string { +func (s *TooManyRequestsException) Code() string { return "TooManyRequestsException" } // Message returns the exception's message. -func (s TooManyRequestsException) Message() string { +func (s *TooManyRequestsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5601,28 +6011,28 @@ func (s TooManyRequestsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyRequestsException) OrigErr() error { +func (s *TooManyRequestsException) OrigErr() error { return nil } -func (s TooManyRequestsException) Error() string { +func (s *TooManyRequestsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyRequestsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyRequestsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID } // You are not authorized to perform this action. type UnauthorizedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5639,17 +6049,17 @@ func (s UnauthorizedException) GoString() string { func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { return &UnauthorizedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnauthorizedException) Code() string { +func (s *UnauthorizedException) Code() string { return "UnauthorizedException" } // Message returns the exception's message. -func (s UnauthorizedException) Message() string { +func (s *UnauthorizedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5657,22 +6067,94 @@ func (s UnauthorizedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnauthorizedException) OrigErr() error { +func (s *UnauthorizedException) OrigErr() error { return nil } -func (s UnauthorizedException) Error() string { +func (s *UnauthorizedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnauthorizedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnauthorizedException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the fleet. + // + // ResourceArn is a required field + ResourceArn *string `location:"uri" locationName:"ResourceArn" min:"20" type:"string" required:"true"` + + // The list of tag keys to remove from the resource. + // + // TagKeys is a required field + TagKeys []*string `location:"querystring" locationName:"tagKeys" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 20)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.TagKeys != nil && len(s.TagKeys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TagKeys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() } type UpdateAuditStreamConfigurationInput struct { @@ -6242,6 +6724,13 @@ const ( AuthorizationProviderTypeSaml = "SAML" ) +// AuthorizationProviderType_Values returns all elements of the AuthorizationProviderType enum +func AuthorizationProviderType_Values() []string { + return []string{ + AuthorizationProviderTypeSaml, + } +} + const ( // DeviceStatusActive is a DeviceStatus enum value DeviceStatusActive = "ACTIVE" @@ -6250,6 +6739,14 @@ const ( DeviceStatusSignedOut = "SIGNED_OUT" ) +// DeviceStatus_Values returns all elements of the DeviceStatus enum +func DeviceStatus_Values() []string { + return []string{ + DeviceStatusActive, + DeviceStatusSignedOut, + } +} + const ( // DomainStatusPendingValidation is a DomainStatus enum value DomainStatusPendingValidation = "PENDING_VALIDATION" @@ -6276,6 +6773,20 @@ const ( DomainStatusFailedToDisassociate = "FAILED_TO_DISASSOCIATE" ) +// DomainStatus_Values returns all elements of the DomainStatus enum +func DomainStatus_Values() []string { + return []string{ + DomainStatusPendingValidation, + DomainStatusAssociating, + DomainStatusActive, + DomainStatusInactive, + DomainStatusDisassociating, + DomainStatusDisassociated, + DomainStatusFailedToAssociate, + DomainStatusFailedToDisassociate, + } +} + const ( // FleetStatusCreating is a FleetStatus enum value FleetStatusCreating = "CREATING" @@ -6296,7 +6807,26 @@ const ( FleetStatusFailedToDelete = "FAILED_TO_DELETE" ) +// FleetStatus_Values returns all elements of the FleetStatus enum +func FleetStatus_Values() []string { + return []string{ + FleetStatusCreating, + FleetStatusActive, + FleetStatusDeleting, + FleetStatusDeleted, + FleetStatusFailedToCreate, + FleetStatusFailedToDelete, + } +} + const ( // IdentityProviderTypeSaml is a IdentityProviderType enum value IdentityProviderTypeSaml = "SAML" ) + +// IdentityProviderType_Values returns all elements of the IdentityProviderType enum +func IdentityProviderType_Values() []string { + return []string{ + IdentityProviderTypeSaml, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/worklink/doc.go b/vendor/github.com/aws/aws-sdk-go/service/worklink/doc.go index 691b27039..dcde2c87d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/worklink/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/worklink/doc.go @@ -4,10 +4,10 @@ // requests to Amazon WorkLink. // // Amazon WorkLink is a cloud-based service that provides secure access to internal -// websites and web apps from iOS phones. In a single step, your users, such -// as employees, can access internal websites as efficiently as they access -// any other public website. They enter a URL in their web browser, or choose -// a link to an internal website in an email. Amazon WorkLink authenticates +// websites and web apps from iOS and Android phones. In a single step, your +// users, such as employees, can access internal websites as efficiently as +// they access any other public website. They enter a URL in their web browser, +// or choose a link to an internal website in an email. Amazon WorkLink authenticates // the user's access and securely renders authorized internal web content in // a secure rendering service in the AWS cloud. Amazon WorkLink doesn't download // or store any internal web content on mobile devices. diff --git a/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go b/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go index 6e554bd05..cafef37dd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workmail/api.go b/vendor/github.com/aws/aws-sdk-go/service/workmail/api.go index 8d0f24732..83dfcdb19 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workmail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workmail/api.go @@ -84,8 +84,8 @@ func (c *WorkMail) AssociateDelegateToResourceRequest(input *AssociateDelegateTo // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/AssociateDelegateToResource func (c *WorkMail) AssociateDelegateToResource(input *AssociateDelegateToResourceInput) (*AssociateDelegateToResourceOutput, error) { @@ -168,7 +168,7 @@ func (c *WorkMail) AssociateMemberToGroupRequest(input *AssociateMemberToGroupIn // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EntityNotFoundException // The identifier supplied for the user, group, or resource does not exist in @@ -186,8 +186,8 @@ func (c *WorkMail) AssociateMemberToGroupRequest(input *AssociateMemberToGroupIn // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * UnsupportedOperationException // You can't perform a write operation against a read-only directory. @@ -214,6 +214,101 @@ func (c *WorkMail) AssociateMemberToGroupWithContext(ctx aws.Context, input *Ass return out, req.Send() } +const opCancelMailboxExportJob = "CancelMailboxExportJob" + +// CancelMailboxExportJobRequest generates a "aws/request.Request" representing the +// client's request for the CancelMailboxExportJob operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CancelMailboxExportJob for more information on using the CancelMailboxExportJob +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CancelMailboxExportJobRequest method. +// req, resp := client.CancelMailboxExportJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/CancelMailboxExportJob +func (c *WorkMail) CancelMailboxExportJobRequest(input *CancelMailboxExportJobInput) (req *request.Request, output *CancelMailboxExportJobOutput) { + op := &request.Operation{ + Name: opCancelMailboxExportJob, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CancelMailboxExportJobInput{} + } + + output = &CancelMailboxExportJobOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CancelMailboxExportJob API operation for Amazon WorkMail. +// +// Cancels a mailbox export job. +// +// If the mailbox export job is near completion, it might not be possible to +// cancel it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation CancelMailboxExportJob for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/CancelMailboxExportJob +func (c *WorkMail) CancelMailboxExportJob(input *CancelMailboxExportJobInput) (*CancelMailboxExportJobOutput, error) { + req, out := c.CancelMailboxExportJobRequest(input) + return out, req.Send() +} + +// CancelMailboxExportJobWithContext is the same as CancelMailboxExportJob with the addition of +// the ability to pass a context and additional request options. +// +// See CancelMailboxExportJob for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) CancelMailboxExportJobWithContext(ctx aws.Context, input *CancelMailboxExportJobInput, opts ...request.Option) (*CancelMailboxExportJobOutput, error) { + req, out := c.CancelMailboxExportJobRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateAlias = "CreateAlias" // CreateAliasRequest generates a "aws/request.Request" representing the @@ -297,8 +392,8 @@ func (c *WorkMail) CreateAliasRequest(input *CreateAliasInput) (req *request.Req // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * LimitExceededException // The request exceeds the limit of the resource. @@ -384,7 +479,7 @@ func (c *WorkMail) CreateGroupRequest(input *CreateGroupInput) (req *request.Req // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. @@ -397,8 +492,8 @@ func (c *WorkMail) CreateGroupRequest(input *CreateGroupInput) (req *request.Req // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * ReservedNameException // This user, group, or resource name is not allowed in Amazon WorkMail. @@ -428,6 +523,116 @@ func (c *WorkMail) CreateGroupWithContext(ctx aws.Context, input *CreateGroupInp return out, req.Send() } +const opCreateOrganization = "CreateOrganization" + +// CreateOrganizationRequest generates a "aws/request.Request" representing the +// client's request for the CreateOrganization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateOrganization for more information on using the CreateOrganization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateOrganizationRequest method. +// req, resp := client.CreateOrganizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/CreateOrganization +func (c *WorkMail) CreateOrganizationRequest(input *CreateOrganizationInput) (req *request.Request, output *CreateOrganizationOutput) { + op := &request.Operation{ + Name: opCreateOrganization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOrganizationInput{} + } + + output = &CreateOrganizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateOrganization API operation for Amazon WorkMail. +// +// Creates a new Amazon WorkMail organization. Optionally, you can choose to +// associate an existing AWS Directory Service directory with your organization. +// If an AWS Directory Service directory ID is specified, the organization alias +// must match the directory alias. If you choose not to associate an existing +// directory with your organization, then we create a new Amazon WorkMail directory +// for you. For more information, see Adding an organization (https://docs.aws.amazon.com/workmail/latest/adminguide/add_new_organization.html) +// in the Amazon WorkMail Administrator Guide. +// +// You can associate multiple email domains with an organization, then set your +// default email domain from the Amazon WorkMail console. You can also associate +// a domain that is managed in an Amazon Route 53 public hosted zone. For more +// information, see Adding a domain (https://docs.aws.amazon.com/workmail/latest/adminguide/add_domain.html) +// and Choosing the default domain (https://docs.aws.amazon.com/workmail/latest/adminguide/default_domain.html) +// in the Amazon WorkMail Administrator Guide. +// +// Optionally, you can use a customer managed master key from AWS Key Management +// Service (AWS KMS) to encrypt email for your organization. If you don't associate +// an AWS KMS key, Amazon WorkMail creates a default AWS managed master key +// for you. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation CreateOrganization for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * DirectoryInUseException +// The directory is already in use by another WorkMail organization in the same +// account and Region. +// +// * DirectoryUnavailableException +// The directory is unavailable. It might be located in another Region or deleted. +// +// * LimitExceededException +// The request exceeds the limit of the resource. +// +// * NameAvailabilityException +// The user, group, or resource name isn't unique in Amazon WorkMail. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/CreateOrganization +func (c *WorkMail) CreateOrganization(input *CreateOrganizationInput) (*CreateOrganizationOutput, error) { + req, out := c.CreateOrganizationRequest(input) + return out, req.Send() +} + +// CreateOrganizationWithContext is the same as CreateOrganization with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOrganization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) CreateOrganizationWithContext(ctx aws.Context, input *CreateOrganizationInput, opts ...request.Option) (*CreateOrganizationOutput, error) { + req, out := c.CreateOrganizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateResource = "CreateResource" // CreateResourceRequest generates a "aws/request.Request" representing the @@ -486,7 +691,7 @@ func (c *WorkMail) CreateResourceRequest(input *CreateResourceInput) (req *reque // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. @@ -499,8 +704,8 @@ func (c *WorkMail) CreateResourceRequest(input *CreateResourceInput) (req *reque // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * ReservedNameException // This user, group, or resource name is not allowed in Amazon WorkMail. @@ -586,7 +791,7 @@ func (c *WorkMail) CreateUserRequest(input *CreateUserInput) (req *request.Reque // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. @@ -603,8 +808,8 @@ func (c *WorkMail) CreateUserRequest(input *CreateUserInput) (req *request.Reque // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * ReservedNameException // This user, group, or resource name is not allowed in Amazon WorkMail. @@ -694,8 +899,8 @@ func (c *WorkMail) DeleteAccessControlRuleRequest(input *DeleteAccessControlRule // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteAccessControlRule func (c *WorkMail) DeleteAccessControlRule(input *DeleteAccessControlRuleInput) (*DeleteAccessControlRuleOutput, error) { @@ -790,8 +995,8 @@ func (c *WorkMail) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Req // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteAlias func (c *WorkMail) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { @@ -874,7 +1079,7 @@ func (c *WorkMail) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Req // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EntityStateException // You are performing an operation on a user, group, or resource that isn't @@ -888,8 +1093,8 @@ func (c *WorkMail) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Req // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * UnsupportedOperationException // You can't perform a write operation against a read-only directory. @@ -987,8 +1192,8 @@ func (c *WorkMail) DeleteMailboxPermissionsRequest(input *DeleteMailboxPermissio // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteMailboxPermissions func (c *WorkMail) DeleteMailboxPermissions(input *DeleteMailboxPermissionsInput) (*DeleteMailboxPermissionsOutput, error) { @@ -1012,6 +1217,97 @@ func (c *WorkMail) DeleteMailboxPermissionsWithContext(ctx aws.Context, input *D return out, req.Send() } +const opDeleteOrganization = "DeleteOrganization" + +// DeleteOrganizationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOrganization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteOrganization for more information on using the DeleteOrganization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteOrganizationRequest method. +// req, resp := client.DeleteOrganizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteOrganization +func (c *WorkMail) DeleteOrganizationRequest(input *DeleteOrganizationInput) (req *request.Request, output *DeleteOrganizationOutput) { + op := &request.Operation{ + Name: opDeleteOrganization, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOrganizationInput{} + } + + output = &DeleteOrganizationOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteOrganization API operation for Amazon WorkMail. +// +// Deletes an Amazon WorkMail organization and all underlying AWS resources +// managed by Amazon WorkMail as part of the organization. You can choose whether +// to delete the associated directory. For more information, see Removing an +// organization (https://docs.aws.amazon.com/workmail/latest/adminguide/remove_organization.html) +// in the Amazon WorkMail Administrator Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation DeleteOrganization for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteOrganization +func (c *WorkMail) DeleteOrganization(input *DeleteOrganizationInput) (*DeleteOrganizationOutput, error) { + req, out := c.DeleteOrganizationRequest(input) + return out, req.Send() +} + +// DeleteOrganizationWithContext is the same as DeleteOrganization with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteOrganization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) DeleteOrganizationWithContext(ctx aws.Context, input *DeleteOrganizationInput, opts ...request.Option) (*DeleteOrganizationOutput, error) { + req, out := c.DeleteOrganizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteResource = "DeleteResource" // DeleteResourceRequest generates a "aws/request.Request" representing the @@ -1079,8 +1375,8 @@ func (c *WorkMail) DeleteResourceRequest(input *DeleteResourceInput) (req *reque // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteResource func (c *WorkMail) DeleteResource(input *DeleteResourceInput) (*DeleteResourceOutput, error) { @@ -1104,6 +1400,94 @@ func (c *WorkMail) DeleteResourceWithContext(ctx aws.Context, input *DeleteResou return out, req.Send() } +const opDeleteRetentionPolicy = "DeleteRetentionPolicy" + +// DeleteRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRetentionPolicy for more information on using the DeleteRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRetentionPolicyRequest method. +// req, resp := client.DeleteRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteRetentionPolicy +func (c *WorkMail) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { + op := &request.Operation{ + Name: opDeleteRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRetentionPolicyInput{} + } + + output = &DeleteRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRetentionPolicy API operation for Amazon WorkMail. +// +// Deletes the specified retention policy from the specified organization. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation DeleteRetentionPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeleteRetentionPolicy +func (c *WorkMail) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + return out, req.Send() +} + +// DeleteRetentionPolicyWithContext is the same as DeleteRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) DeleteRetentionPolicyWithContext(ctx aws.Context, input *DeleteRetentionPolicyInput, opts ...request.Option) (*DeleteRetentionPolicyOutput, error) { + req, out := c.DeleteRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteUser = "DeleteUser" // DeleteUserRequest generates a "aws/request.Request" representing the @@ -1168,7 +1552,7 @@ func (c *WorkMail) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EntityStateException // You are performing an operation on a user, group, or resource that isn't @@ -1182,8 +1566,8 @@ func (c *WorkMail) DeleteUserRequest(input *DeleteUserInput) (req *request.Reque // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * UnsupportedOperationException // You can't perform a write operation against a read-only directory. @@ -1284,8 +1668,8 @@ func (c *WorkMail) DeregisterFromWorkMailRequest(input *DeregisterFromWorkMailIn // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DeregisterFromWorkMail func (c *WorkMail) DeregisterFromWorkMail(input *DeregisterFromWorkMailInput) (*DeregisterFromWorkMailOutput, error) { @@ -1293,80 +1677,167 @@ func (c *WorkMail) DeregisterFromWorkMail(input *DeregisterFromWorkMailInput) (* return out, req.Send() } -// DeregisterFromWorkMailWithContext is the same as DeregisterFromWorkMail with the addition of +// DeregisterFromWorkMailWithContext is the same as DeregisterFromWorkMail with the addition of +// the ability to pass a context and additional request options. +// +// See DeregisterFromWorkMail for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) DeregisterFromWorkMailWithContext(ctx aws.Context, input *DeregisterFromWorkMailInput, opts ...request.Option) (*DeregisterFromWorkMailOutput, error) { + req, out := c.DeregisterFromWorkMailRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGroup = "DescribeGroup" + +// DescribeGroupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGroup for more information on using the DescribeGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGroupRequest method. +// req, resp := client.DescribeGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeGroup +func (c *WorkMail) DescribeGroupRequest(input *DescribeGroupInput) (req *request.Request, output *DescribeGroupOutput) { + op := &request.Operation{ + Name: opDescribeGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGroupInput{} + } + + output = &DescribeGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeGroup API operation for Amazon WorkMail. +// +// Returns the data available for the group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation DescribeGroup for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. +// +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeGroup +func (c *WorkMail) DescribeGroup(input *DescribeGroupInput) (*DescribeGroupOutput, error) { + req, out := c.DescribeGroupRequest(input) + return out, req.Send() +} + +// DescribeGroupWithContext is the same as DescribeGroup with the addition of // the ability to pass a context and additional request options. // -// See DeregisterFromWorkMail for details on how to use this API operation. +// See DescribeGroup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) DeregisterFromWorkMailWithContext(ctx aws.Context, input *DeregisterFromWorkMailInput, opts ...request.Option) (*DeregisterFromWorkMailOutput, error) { - req, out := c.DeregisterFromWorkMailRequest(input) +func (c *WorkMail) DescribeGroupWithContext(ctx aws.Context, input *DescribeGroupInput, opts ...request.Option) (*DescribeGroupOutput, error) { + req, out := c.DescribeGroupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeGroup = "DescribeGroup" +const opDescribeMailboxExportJob = "DescribeMailboxExportJob" -// DescribeGroupRequest generates a "aws/request.Request" representing the -// client's request for the DescribeGroup operation. The "output" return +// DescribeMailboxExportJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMailboxExportJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeGroup for more information on using the DescribeGroup +// See DescribeMailboxExportJob for more information on using the DescribeMailboxExportJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeGroupRequest method. -// req, resp := client.DescribeGroupRequest(params) +// // Example sending a request using the DescribeMailboxExportJobRequest method. +// req, resp := client.DescribeMailboxExportJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeGroup -func (c *WorkMail) DescribeGroupRequest(input *DescribeGroupInput) (req *request.Request, output *DescribeGroupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeMailboxExportJob +func (c *WorkMail) DescribeMailboxExportJobRequest(input *DescribeMailboxExportJobInput) (req *request.Request, output *DescribeMailboxExportJobOutput) { op := &request.Operation{ - Name: opDescribeGroup, + Name: opDescribeMailboxExportJob, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeGroupInput{} + input = &DescribeMailboxExportJobInput{} } - output = &DescribeGroupOutput{} + output = &DescribeMailboxExportJobOutput{} req = c.newRequest(op, input, output) return } -// DescribeGroup API operation for Amazon WorkMail. +// DescribeMailboxExportJob API operation for Amazon WorkMail. // -// Returns the data available for the group. +// Describes the current status of a mailbox export job. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon WorkMail's -// API operation DescribeGroup for usage and error information. +// API operation DescribeMailboxExportJob for usage and error information. // // Returned Error Types: -// * EntityNotFoundException -// The identifier supplied for the user, group, or resource does not exist in -// your organization. -// // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. // @@ -1375,26 +1846,30 @@ func (c *WorkMail) DescribeGroupRequest(input *DescribeGroupInput) (req *request // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeGroup -func (c *WorkMail) DescribeGroup(input *DescribeGroupInput) (*DescribeGroupOutput, error) { - req, out := c.DescribeGroupRequest(input) +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeMailboxExportJob +func (c *WorkMail) DescribeMailboxExportJob(input *DescribeMailboxExportJobInput) (*DescribeMailboxExportJobOutput, error) { + req, out := c.DescribeMailboxExportJobRequest(input) return out, req.Send() } -// DescribeGroupWithContext is the same as DescribeGroup with the addition of +// DescribeMailboxExportJobWithContext is the same as DescribeMailboxExportJob with the addition of // the ability to pass a context and additional request options. // -// See DescribeGroup for details on how to use this API operation. +// See DescribeMailboxExportJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) DescribeGroupWithContext(ctx aws.Context, input *DescribeGroupInput, opts ...request.Option) (*DescribeGroupOutput, error) { - req, out := c.DescribeGroupRequest(input) +func (c *WorkMail) DescribeMailboxExportJobWithContext(ctx aws.Context, input *DescribeMailboxExportJobInput, opts ...request.Option) (*DescribeMailboxExportJobOutput, error) { + req, out := c.DescribeMailboxExportJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -1549,8 +2024,8 @@ func (c *WorkMail) DescribeResourceRequest(input *DescribeResourceInput) (req *r // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeResource func (c *WorkMail) DescribeResource(input *DescribeResourceInput) (*DescribeResourceOutput, error) { @@ -1640,8 +2115,8 @@ func (c *WorkMail) DescribeUserRequest(input *DescribeUserInput) (req *request.R // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DescribeUser func (c *WorkMail) DescribeUser(input *DescribeUserInput) (*DescribeUserOutput, error) { @@ -1736,8 +2211,8 @@ func (c *WorkMail) DisassociateDelegateFromResourceRequest(input *DisassociateDe // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/DisassociateDelegateFromResource func (c *WorkMail) DisassociateDelegateFromResource(input *DisassociateDelegateFromResourceInput) (*DisassociateDelegateFromResourceOutput, error) { @@ -1820,7 +2295,7 @@ func (c *WorkMail) DisassociateMemberFromGroupRequest(input *DisassociateMemberF // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EntityNotFoundException // The identifier supplied for the user, group, or resource does not exist in @@ -1838,8 +2313,8 @@ func (c *WorkMail) DisassociateMemberFromGroupRequest(input *DisassociateMemberF // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * UnsupportedOperationException // You can't perform a write operation against a read-only directory. @@ -1933,8 +2408,8 @@ func (c *WorkMail) GetAccessControlEffectRequest(input *GetAccessControlEffectIn // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/GetAccessControlEffect func (c *WorkMail) GetAccessControlEffect(input *GetAccessControlEffectInput) (*GetAccessControlEffectOutput, error) { @@ -1958,6 +2433,97 @@ func (c *WorkMail) GetAccessControlEffectWithContext(ctx aws.Context, input *Get return out, req.Send() } +const opGetDefaultRetentionPolicy = "GetDefaultRetentionPolicy" + +// GetDefaultRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetDefaultRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetDefaultRetentionPolicy for more information on using the GetDefaultRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetDefaultRetentionPolicyRequest method. +// req, resp := client.GetDefaultRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/GetDefaultRetentionPolicy +func (c *WorkMail) GetDefaultRetentionPolicyRequest(input *GetDefaultRetentionPolicyInput) (req *request.Request, output *GetDefaultRetentionPolicyOutput) { + op := &request.Operation{ + Name: opGetDefaultRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetDefaultRetentionPolicyInput{} + } + + output = &GetDefaultRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetDefaultRetentionPolicy API operation for Amazon WorkMail. +// +// Gets the default retention policy details for the specified organization. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation GetDefaultRetentionPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/GetDefaultRetentionPolicy +func (c *WorkMail) GetDefaultRetentionPolicy(input *GetDefaultRetentionPolicyInput) (*GetDefaultRetentionPolicyOutput, error) { + req, out := c.GetDefaultRetentionPolicyRequest(input) + return out, req.Send() +} + +// GetDefaultRetentionPolicyWithContext is the same as GetDefaultRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetDefaultRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) GetDefaultRetentionPolicyWithContext(ctx aws.Context, input *GetDefaultRetentionPolicyInput, opts ...request.Option) (*GetDefaultRetentionPolicyOutput, error) { + req, out := c.GetDefaultRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opGetMailboxDetails = "GetMailboxDetails" // GetMailboxDetailsRequest generates a "aws/request.Request" representing the @@ -2017,8 +2583,8 @@ func (c *WorkMail) GetMailboxDetailsRequest(input *GetMailboxDetailsInput) (req // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * EntityNotFoundException // The identifier supplied for the user, group, or resource does not exist in @@ -2105,8 +2671,8 @@ func (c *WorkMail) ListAccessControlRulesRequest(input *ListAccessControlRulesIn // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListAccessControlRules func (c *WorkMail) ListAccessControlRules(input *ListAccessControlRulesInput) (*ListAccessControlRulesOutput, error) { @@ -2206,8 +2772,8 @@ func (c *WorkMail) ListAliasesRequest(input *ListAliasesInput) (req *request.Req // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListAliases func (c *WorkMail) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { @@ -2341,17 +2907,166 @@ func (c *WorkMail) ListGroupMembersRequest(input *ListGroupMembersInput) (req *r // the error. // // See the AWS API reference guide for Amazon WorkMail's -// API operation ListGroupMembers for usage and error information. +// API operation ListGroupMembers for usage and error information. +// +// Returned Error Types: +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. +// +// * EntityStateException +// You are performing an operation on a user, group, or resource that isn't +// in the expected state, such as trying to delete an active user. +// +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListGroupMembers +func (c *WorkMail) ListGroupMembers(input *ListGroupMembersInput) (*ListGroupMembersOutput, error) { + req, out := c.ListGroupMembersRequest(input) + return out, req.Send() +} + +// ListGroupMembersWithContext is the same as ListGroupMembers with the addition of +// the ability to pass a context and additional request options. +// +// See ListGroupMembers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) ListGroupMembersWithContext(ctx aws.Context, input *ListGroupMembersInput, opts ...request.Option) (*ListGroupMembersOutput, error) { + req, out := c.ListGroupMembersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListGroupMembersPages iterates over the pages of a ListGroupMembers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupMembers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupMembers operation. +// pageNum := 0 +// err := client.ListGroupMembersPages(params, +// func(page *workmail.ListGroupMembersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *WorkMail) ListGroupMembersPages(input *ListGroupMembersInput, fn func(*ListGroupMembersOutput, bool) bool) error { + return c.ListGroupMembersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGroupMembersPagesWithContext same as ListGroupMembersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) ListGroupMembersPagesWithContext(ctx aws.Context, input *ListGroupMembersInput, fn func(*ListGroupMembersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGroupMembersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGroupMembersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListGroupMembersOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGroups = "ListGroups" + +// ListGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGroups for more information on using the ListGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGroupsRequest method. +// req, resp := client.ListGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListGroups +func (c *WorkMail) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { + op := &request.Operation{ + Name: opListGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListGroupsInput{} + } + + output = &ListGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListGroups API operation for Amazon WorkMail. +// +// Returns summaries of the organization's groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation ListGroups for usage and error information. // // Returned Error Types: // * EntityNotFoundException // The identifier supplied for the user, group, or resource does not exist in // your organization. // -// * EntityStateException -// You are performing an operation on a user, group, or resource that isn't -// in the expected state, such as trying to delete an active user. -// // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. // @@ -2360,68 +3075,68 @@ func (c *WorkMail) ListGroupMembersRequest(input *ListGroupMembersInput) (req *r // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListGroupMembers -func (c *WorkMail) ListGroupMembers(input *ListGroupMembersInput) (*ListGroupMembersOutput, error) { - req, out := c.ListGroupMembersRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListGroups +func (c *WorkMail) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) return out, req.Send() } -// ListGroupMembersWithContext is the same as ListGroupMembers with the addition of +// ListGroupsWithContext is the same as ListGroups with the addition of // the ability to pass a context and additional request options. // -// See ListGroupMembers for details on how to use this API operation. +// See ListGroups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) ListGroupMembersWithContext(ctx aws.Context, input *ListGroupMembersInput, opts ...request.Option) (*ListGroupMembersOutput, error) { - req, out := c.ListGroupMembersRequest(input) +func (c *WorkMail) ListGroupsWithContext(ctx aws.Context, input *ListGroupsInput, opts ...request.Option) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListGroupMembersPages iterates over the pages of a ListGroupMembers operation, +// ListGroupsPages iterates over the pages of a ListGroups operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListGroupMembers method for more information on how to use this operation. +// See ListGroups method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListGroupMembers operation. +// // Example iterating over at most 3 pages of a ListGroups operation. // pageNum := 0 -// err := client.ListGroupMembersPages(params, -// func(page *workmail.ListGroupMembersOutput, lastPage bool) bool { +// err := client.ListGroupsPages(params, +// func(page *workmail.ListGroupsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *WorkMail) ListGroupMembersPages(input *ListGroupMembersInput, fn func(*ListGroupMembersOutput, bool) bool) error { - return c.ListGroupMembersPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *WorkMail) ListGroupsPages(input *ListGroupsInput, fn func(*ListGroupsOutput, bool) bool) error { + return c.ListGroupsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListGroupMembersPagesWithContext same as ListGroupMembersPages except +// ListGroupsPagesWithContext same as ListGroupsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) ListGroupMembersPagesWithContext(ctx aws.Context, input *ListGroupMembersInput, fn func(*ListGroupMembersOutput, bool) bool, opts ...request.Option) error { +func (c *WorkMail) ListGroupsPagesWithContext(ctx aws.Context, input *ListGroupsInput, fn func(*ListGroupsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListGroupMembersInput + var inCpy *ListGroupsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListGroupMembersRequest(inCpy) + req, _ := c.ListGroupsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -2429,7 +3144,7 @@ func (c *WorkMail) ListGroupMembersPagesWithContext(ctx aws.Context, input *List } for p.Next() { - if !fn(p.Page().(*ListGroupMembersOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) { break } } @@ -2437,35 +3152,35 @@ func (c *WorkMail) ListGroupMembersPagesWithContext(ctx aws.Context, input *List return p.Err() } -const opListGroups = "ListGroups" +const opListMailboxExportJobs = "ListMailboxExportJobs" -// ListGroupsRequest generates a "aws/request.Request" representing the -// client's request for the ListGroups operation. The "output" return +// ListMailboxExportJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListMailboxExportJobs operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGroups for more information on using the ListGroups +// See ListMailboxExportJobs for more information on using the ListMailboxExportJobs // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGroupsRequest method. -// req, resp := client.ListGroupsRequest(params) +// // Example sending a request using the ListMailboxExportJobsRequest method. +// req, resp := client.ListMailboxExportJobsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListGroups -func (c *WorkMail) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListMailboxExportJobs +func (c *WorkMail) ListMailboxExportJobsRequest(input *ListMailboxExportJobsInput) (req *request.Request, output *ListMailboxExportJobsOutput) { op := &request.Operation{ - Name: opListGroups, + Name: opListMailboxExportJobs, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -2477,30 +3192,27 @@ func (c *WorkMail) ListGroupsRequest(input *ListGroupsInput) (req *request.Reque } if input == nil { - input = &ListGroupsInput{} + input = &ListMailboxExportJobsInput{} } - output = &ListGroupsOutput{} + output = &ListMailboxExportJobsOutput{} req = c.newRequest(op, input, output) return } -// ListGroups API operation for Amazon WorkMail. +// ListMailboxExportJobs API operation for Amazon WorkMail. // -// Returns summaries of the organization's groups. +// Lists the mailbox export jobs started for the specified organization within +// the last seven days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon WorkMail's -// API operation ListGroups for usage and error information. +// API operation ListMailboxExportJobs for usage and error information. // // Returned Error Types: -// * EntityNotFoundException -// The identifier supplied for the user, group, or resource does not exist in -// your organization. -// // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. // @@ -2509,68 +3221,68 @@ func (c *WorkMail) ListGroupsRequest(input *ListGroupsInput) (req *request.Reque // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListGroups -func (c *WorkMail) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { - req, out := c.ListGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListMailboxExportJobs +func (c *WorkMail) ListMailboxExportJobs(input *ListMailboxExportJobsInput) (*ListMailboxExportJobsOutput, error) { + req, out := c.ListMailboxExportJobsRequest(input) return out, req.Send() } -// ListGroupsWithContext is the same as ListGroups with the addition of +// ListMailboxExportJobsWithContext is the same as ListMailboxExportJobs with the addition of // the ability to pass a context and additional request options. // -// See ListGroups for details on how to use this API operation. +// See ListMailboxExportJobs for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) ListGroupsWithContext(ctx aws.Context, input *ListGroupsInput, opts ...request.Option) (*ListGroupsOutput, error) { - req, out := c.ListGroupsRequest(input) +func (c *WorkMail) ListMailboxExportJobsWithContext(ctx aws.Context, input *ListMailboxExportJobsInput, opts ...request.Option) (*ListMailboxExportJobsOutput, error) { + req, out := c.ListMailboxExportJobsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListGroupsPages iterates over the pages of a ListGroups operation, +// ListMailboxExportJobsPages iterates over the pages of a ListMailboxExportJobs operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListGroups method for more information on how to use this operation. +// See ListMailboxExportJobs method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListGroups operation. +// // Example iterating over at most 3 pages of a ListMailboxExportJobs operation. // pageNum := 0 -// err := client.ListGroupsPages(params, -// func(page *workmail.ListGroupsOutput, lastPage bool) bool { +// err := client.ListMailboxExportJobsPages(params, +// func(page *workmail.ListMailboxExportJobsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *WorkMail) ListGroupsPages(input *ListGroupsInput, fn func(*ListGroupsOutput, bool) bool) error { - return c.ListGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *WorkMail) ListMailboxExportJobsPages(input *ListMailboxExportJobsInput, fn func(*ListMailboxExportJobsOutput, bool) bool) error { + return c.ListMailboxExportJobsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListGroupsPagesWithContext same as ListGroupsPages except +// ListMailboxExportJobsPagesWithContext same as ListMailboxExportJobsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) ListGroupsPagesWithContext(ctx aws.Context, input *ListGroupsInput, fn func(*ListGroupsOutput, bool) bool, opts ...request.Option) error { +func (c *WorkMail) ListMailboxExportJobsPagesWithContext(ctx aws.Context, input *ListMailboxExportJobsInput, fn func(*ListMailboxExportJobsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListGroupsInput + var inCpy *ListMailboxExportJobsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListGroupsRequest(inCpy) + req, _ := c.ListMailboxExportJobsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -2578,7 +3290,7 @@ func (c *WorkMail) ListGroupsPagesWithContext(ctx aws.Context, input *ListGroups } for p.Next() { - if !fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListMailboxExportJobsOutput), !p.HasNextPage()) { break } } @@ -2659,8 +3371,8 @@ func (c *WorkMail) ListMailboxPermissionsRequest(input *ListMailboxPermissionsIn // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListMailboxPermissions func (c *WorkMail) ListMailboxPermissions(input *ListMailboxPermissionsInput) (*ListMailboxPermissionsOutput, error) { @@ -2786,7 +3498,7 @@ func (c *WorkMail) ListOrganizationsRequest(input *ListOrganizationsInput) (req // ListOrganizations API operation for Amazon WorkMail. // -// Returns summaries of the customer's non-deleted organizations. +// Returns summaries of the customer's organizations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2950,8 +3662,8 @@ func (c *WorkMail) ListResourceDelegatesRequest(input *ListResourceDelegatesInpu // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListResourceDelegates func (c *WorkMail) ListResourceDelegates(input *ListResourceDelegatesInput) (*ListResourceDelegatesOutput, error) { @@ -3095,8 +3807,8 @@ func (c *WorkMail) ListResourcesRequest(input *ListResourcesInput) (req *request // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListResources func (c *WorkMail) ListResources(input *ListResourcesInput) (*ListResourcesOutput, error) { @@ -3319,8 +4031,8 @@ func (c *WorkMail) ListUsersRequest(input *ListUsersInput) (req *request.Request // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ListUsers func (c *WorkMail) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { @@ -3469,8 +4181,8 @@ func (c *WorkMail) PutAccessControlRuleRequest(input *PutAccessControlRuleInput) // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/PutAccessControlRule func (c *WorkMail) PutAccessControlRule(input *PutAccessControlRuleInput) (*PutAccessControlRuleOutput, error) { @@ -3566,8 +4278,8 @@ func (c *WorkMail) PutMailboxPermissionsRequest(input *PutMailboxPermissionsInpu // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/PutMailboxPermissions func (c *WorkMail) PutMailboxPermissions(input *PutMailboxPermissionsInput) (*PutMailboxPermissionsOutput, error) { @@ -3591,6 +4303,97 @@ func (c *WorkMail) PutMailboxPermissionsWithContext(ctx aws.Context, input *PutM return out, req.Send() } +const opPutRetentionPolicy = "PutRetentionPolicy" + +// PutRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutRetentionPolicy for more information on using the PutRetentionPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutRetentionPolicyRequest method. +// req, resp := client.PutRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/PutRetentionPolicy +func (c *WorkMail) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { + op := &request.Operation{ + Name: opPutRetentionPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRetentionPolicyInput{} + } + + output = &PutRetentionPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutRetentionPolicy API operation for Amazon WorkMail. +// +// Puts a retention policy to the specified organization. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation PutRetentionPolicy for usage and error information. +// +// Returned Error Types: +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// * LimitExceededException +// The request exceeds the limit of the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/PutRetentionPolicy +func (c *WorkMail) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + return out, req.Send() +} + +// PutRetentionPolicyWithContext is the same as PutRetentionPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutRetentionPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) PutRetentionPolicyWithContext(ctx aws.Context, input *PutRetentionPolicyInput, opts ...request.Option) (*PutRetentionPolicyOutput, error) { + req, out := c.PutRetentionPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opRegisterToWorkMail = "RegisterToWorkMail" // RegisterToWorkMailRequest generates a "aws/request.Request" representing the @@ -3658,7 +4461,7 @@ func (c *WorkMail) RegisterToWorkMailRequest(input *RegisterToWorkMailInput) (re // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EmailAddressInUseException // The email address that you're trying to assign is already created for a different @@ -3691,8 +4494,8 @@ func (c *WorkMail) RegisterToWorkMailRequest(input *RegisterToWorkMailInput) (re // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/RegisterToWorkMail func (c *WorkMail) RegisterToWorkMail(input *RegisterToWorkMailInput) (*RegisterToWorkMailOutput, error) { @@ -3716,110 +4519,207 @@ func (c *WorkMail) RegisterToWorkMailWithContext(ctx aws.Context, input *Registe return out, req.Send() } -const opResetPassword = "ResetPassword" +const opResetPassword = "ResetPassword" + +// ResetPasswordRequest generates a "aws/request.Request" representing the +// client's request for the ResetPassword operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetPassword for more information on using the ResetPassword +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetPasswordRequest method. +// req, resp := client.ResetPasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ResetPassword +func (c *WorkMail) ResetPasswordRequest(input *ResetPasswordInput) (req *request.Request, output *ResetPasswordOutput) { + op := &request.Operation{ + Name: opResetPassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetPasswordInput{} + } + + output = &ResetPasswordOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResetPassword API operation for Amazon WorkMail. +// +// Allows the administrator to reset the password for a user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkMail's +// API operation ResetPassword for usage and error information. +// +// Returned Error Types: +// * DirectoryServiceAuthenticationFailedException +// The directory service doesn't recognize the credentials supplied by WorkMail. +// +// * DirectoryUnavailableException +// The directory is unavailable. It might be located in another Region or deleted. +// +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. +// +// * EntityStateException +// You are performing an operation on a user, group, or resource that isn't +// in the expected state, such as trying to delete an active user. +// +// * InvalidParameterException +// One or more of the input parameters don't match the service's restrictions. +// +// * InvalidPasswordException +// The supplied password doesn't match the minimum security constraints, such +// as length or use of special characters. +// +// * OrganizationNotFoundException +// An operation received a valid organization identifier that either doesn't +// belong or exist in the system. +// +// * OrganizationStateException +// The organization must have a valid state to perform certain operations on +// the organization or its members. +// +// * UnsupportedOperationException +// You can't perform a write operation against a read-only directory. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ResetPassword +func (c *WorkMail) ResetPassword(input *ResetPasswordInput) (*ResetPasswordOutput, error) { + req, out := c.ResetPasswordRequest(input) + return out, req.Send() +} + +// ResetPasswordWithContext is the same as ResetPassword with the addition of +// the ability to pass a context and additional request options. +// +// See ResetPassword for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkMail) ResetPasswordWithContext(ctx aws.Context, input *ResetPasswordInput, opts ...request.Option) (*ResetPasswordOutput, error) { + req, out := c.ResetPasswordRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartMailboxExportJob = "StartMailboxExportJob" -// ResetPasswordRequest generates a "aws/request.Request" representing the -// client's request for the ResetPassword operation. The "output" return +// StartMailboxExportJobRequest generates a "aws/request.Request" representing the +// client's request for the StartMailboxExportJob operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ResetPassword for more information on using the ResetPassword +// See StartMailboxExportJob for more information on using the StartMailboxExportJob // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ResetPasswordRequest method. -// req, resp := client.ResetPasswordRequest(params) +// // Example sending a request using the StartMailboxExportJobRequest method. +// req, resp := client.StartMailboxExportJobRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ResetPassword -func (c *WorkMail) ResetPasswordRequest(input *ResetPasswordInput) (req *request.Request, output *ResetPasswordOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/StartMailboxExportJob +func (c *WorkMail) StartMailboxExportJobRequest(input *StartMailboxExportJobInput) (req *request.Request, output *StartMailboxExportJobOutput) { op := &request.Operation{ - Name: opResetPassword, + Name: opStartMailboxExportJob, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ResetPasswordInput{} + input = &StartMailboxExportJobInput{} } - output = &ResetPasswordOutput{} + output = &StartMailboxExportJobOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// ResetPassword API operation for Amazon WorkMail. +// StartMailboxExportJob API operation for Amazon WorkMail. // -// Allows the administrator to reset the password for a user. +// Starts a mailbox export job to export MIME-format email messages and calendar +// items from the specified mailbox to the specified Amazon Simple Storage Service +// (Amazon S3) bucket. For more information, see Exporting mailbox content (https://docs.aws.amazon.com/workmail/latest/adminguide/mail-export.html) +// in the Amazon WorkMail Administrator Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon WorkMail's -// API operation ResetPassword for usage and error information. +// API operation StartMailboxExportJob for usage and error information. // // Returned Error Types: -// * DirectoryServiceAuthenticationFailedException -// The directory service doesn't recognize the credentials supplied by WorkMail. -// -// * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. -// -// * EntityNotFoundException -// The identifier supplied for the user, group, or resource does not exist in -// your organization. -// -// * EntityStateException -// You are performing an operation on a user, group, or resource that isn't -// in the expected state, such as trying to delete an active user. -// // * InvalidParameterException // One or more of the input parameters don't match the service's restrictions. // -// * InvalidPasswordException -// The supplied password doesn't match the minimum security constraints, such -// as length or use of special characters. -// // * OrganizationNotFoundException // An operation received a valid organization identifier that either doesn't // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // -// * UnsupportedOperationException -// You can't perform a write operation against a read-only directory. +// * EntityNotFoundException +// The identifier supplied for the user, group, or resource does not exist in +// your organization. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/ResetPassword -func (c *WorkMail) ResetPassword(input *ResetPasswordInput) (*ResetPasswordOutput, error) { - req, out := c.ResetPasswordRequest(input) +// * LimitExceededException +// The request exceeds the limit of the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/StartMailboxExportJob +func (c *WorkMail) StartMailboxExportJob(input *StartMailboxExportJobInput) (*StartMailboxExportJobOutput, error) { + req, out := c.StartMailboxExportJobRequest(input) return out, req.Send() } -// ResetPasswordWithContext is the same as ResetPassword with the addition of +// StartMailboxExportJobWithContext is the same as StartMailboxExportJob with the addition of // the ability to pass a context and additional request options. // -// See ResetPassword for details on how to use this API operation. +// See StartMailboxExportJob for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkMail) ResetPasswordWithContext(ctx aws.Context, input *ResetPasswordInput, opts ...request.Option) (*ResetPasswordOutput, error) { - req, out := c.ResetPasswordRequest(input) +func (c *WorkMail) StartMailboxExportJobWithContext(ctx aws.Context, input *StartMailboxExportJobInput, opts ...request.Option) (*StartMailboxExportJobOutput, error) { + req, out := c.StartMailboxExportJobRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -3888,8 +4788,8 @@ func (c *WorkMail) TagResourceRequest(input *TagResourceInput) (req *request.Req // The resource can have up to 50 user-applied tags. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/TagResource func (c *WorkMail) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { @@ -4057,8 +4957,8 @@ func (c *WorkMail) UpdateMailboxQuotaRequest(input *UpdateMailboxQuotaInput) (re // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * EntityNotFoundException // The identifier supplied for the user, group, or resource does not exist in @@ -4152,7 +5052,7 @@ func (c *WorkMail) UpdatePrimaryEmailAddressRequest(input *UpdatePrimaryEmailAdd // The directory service doesn't recognize the credentials supplied by WorkMail. // // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EmailAddressInUseException // The email address that you're trying to assign is already created for a different @@ -4185,8 +5085,8 @@ func (c *WorkMail) UpdatePrimaryEmailAddressRequest(input *UpdatePrimaryEmailAdd // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // * UnsupportedOperationException // You can't perform a write operation against a read-only directory. @@ -4271,7 +5171,7 @@ func (c *WorkMail) UpdateResourceRequest(input *UpdateResourceInput) (req *reque // // Returned Error Types: // * DirectoryUnavailableException -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. // // * EntityNotFoundException // The identifier supplied for the user, group, or resource does not exist in @@ -4306,8 +5206,8 @@ func (c *WorkMail) UpdateResourceRequest(input *UpdateResourceInput) (req *reque // belong or exist in the system. // // * OrganizationStateException -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. // // See also, https://docs.aws.amazon.com/goto/WebAPI/workmail-2017-10-01/UpdateResource func (c *WorkMail) UpdateResource(input *UpdateResourceInput) (*UpdateResourceOutput, error) { @@ -4458,12 +5358,12 @@ type AssociateDelegateToResourceInput struct { // The organization under which the resource exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The resource for which members (users or groups) are associated. // // ResourceId is a required field - ResourceId *string `type:"string" required:"true"` + ResourceId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -4488,9 +5388,15 @@ func (s *AssociateDelegateToResourceInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } + if s.ResourceId != nil && len(*s.ResourceId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4546,7 +5452,7 @@ type AssociateMemberToGroupInput struct { // The organization under which the group exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -4577,6 +5483,9 @@ func (s *AssociateMemberToGroupInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4660,6 +5569,90 @@ func (s *BookingOptions) SetAutoDeclineRecurringRequests(v bool) *BookingOptions return s } +type CancelMailboxExportJobInput struct { + _ struct{} `type:"structure"` + + // The idempotency token for the client request. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The job ID. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s CancelMailboxExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelMailboxExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CancelMailboxExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CancelMailboxExportJobInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *CancelMailboxExportJobInput) SetClientToken(v string) *CancelMailboxExportJobInput { + s.ClientToken = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *CancelMailboxExportJobInput) SetJobId(v string) *CancelMailboxExportJobInput { + s.JobId = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *CancelMailboxExportJobInput) SetOrganizationId(v string) *CancelMailboxExportJobInput { + s.OrganizationId = &v + return s +} + +type CancelMailboxExportJobOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CancelMailboxExportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancelMailboxExportJobOutput) GoString() string { + return s.String() +} + type CreateAliasInput struct { _ struct{} `type:"structure"` @@ -4676,7 +5669,7 @@ type CreateAliasInput struct { // The organization under which the member (user or group) exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -4707,6 +5700,9 @@ func (s *CreateAliasInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4757,7 +5753,7 @@ type CreateGroupInput struct { // The organization under which the group is to be created. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -4782,6 +5778,9 @@ func (s *CreateGroupInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -4824,6 +5823,137 @@ func (s *CreateGroupOutput) SetGroupId(v string) *CreateGroupOutput { return s } +type CreateOrganizationInput struct { + _ struct{} `type:"structure"` + + // The organization alias. + // + // Alias is a required field + Alias *string `min:"1" type:"string" required:"true"` + + // The idempotency token associated with the request. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The AWS Directory Service directory ID. + DirectoryId *string `min:"12" type:"string"` + + // The email domains to associate with the organization. + Domains []*Domain `type:"list"` + + // When true, allows organization interoperability between Amazon WorkMail and + // Microsoft Exchange. Can only be set to true if an AD Connector directory + // ID is included in the request. + EnableInteroperability *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) of a customer managed master key from AWS + // KMS. + KmsKeyArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateOrganizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrganizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOrganizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOrganizationInput"} + if s.Alias == nil { + invalidParams.Add(request.NewErrParamRequired("Alias")) + } + if s.Alias != nil && len(*s.Alias) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Alias", 1)) + } + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DirectoryId != nil && len(*s.DirectoryId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("DirectoryId", 12)) + } + if s.KmsKeyArn != nil && len(*s.KmsKeyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyArn", 20)) + } + if s.Domains != nil { + for i, v := range s.Domains { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Domains", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAlias sets the Alias field's value. +func (s *CreateOrganizationInput) SetAlias(v string) *CreateOrganizationInput { + s.Alias = &v + return s +} + +// SetClientToken sets the ClientToken field's value. +func (s *CreateOrganizationInput) SetClientToken(v string) *CreateOrganizationInput { + s.ClientToken = &v + return s +} + +// SetDirectoryId sets the DirectoryId field's value. +func (s *CreateOrganizationInput) SetDirectoryId(v string) *CreateOrganizationInput { + s.DirectoryId = &v + return s +} + +// SetDomains sets the Domains field's value. +func (s *CreateOrganizationInput) SetDomains(v []*Domain) *CreateOrganizationInput { + s.Domains = v + return s +} + +// SetEnableInteroperability sets the EnableInteroperability field's value. +func (s *CreateOrganizationInput) SetEnableInteroperability(v bool) *CreateOrganizationInput { + s.EnableInteroperability = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *CreateOrganizationInput) SetKmsKeyArn(v string) *CreateOrganizationInput { + s.KmsKeyArn = &v + return s +} + +type CreateOrganizationOutput struct { + _ struct{} `type:"structure"` + + // The organization ID. + OrganizationId *string `min:"34" type:"string"` +} + +// String returns the string representation +func (s CreateOrganizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOrganizationOutput) GoString() string { + return s.String() +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *CreateOrganizationOutput) SetOrganizationId(v string) *CreateOrganizationOutput { + s.OrganizationId = &v + return s +} + type CreateResourceInput struct { _ struct{} `type:"structure"` @@ -4836,7 +5966,7 @@ type CreateResourceInput struct { // created. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The type of the new resource. The available types are equipment and room. // @@ -4866,6 +5996,9 @@ func (s *CreateResourceInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.Type == nil { invalidParams.Add(request.NewErrParamRequired("Type")) } @@ -4898,7 +6031,7 @@ type CreateResourceOutput struct { _ struct{} `type:"structure"` // The identifier of the new resource. - ResourceId *string `type:"string"` + ResourceId *string `min:"34" type:"string"` } // String returns the string representation @@ -4934,7 +6067,7 @@ type CreateUserInput struct { // The identifier of the organization for which the user is created. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The password for the new user. // @@ -4967,6 +6100,9 @@ func (s *CreateUserInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.Password == nil { invalidParams.Add(request.NewErrParamRequired("Password")) } @@ -5071,7 +6207,9 @@ type DeleteAccessControlRuleInput struct { Name *string `min:"1" type:"string" required:"true"` // The identifier for the organization. - OrganizationId *string `type:"string"` + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5093,6 +6231,12 @@ func (s *DeleteAccessControlRuleInput) Validate() error { if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5145,7 +6289,7 @@ type DeleteAliasInput struct { // The identifier for the organization under which the user exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5176,6 +6320,9 @@ func (s *DeleteAliasInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5226,7 +6373,7 @@ type DeleteGroupInput struct { // The organization that contains the group. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5251,6 +6398,9 @@ func (s *DeleteGroupInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5287,7 +6437,7 @@ func (s DeleteGroupOutput) GoString() string { type DeleteMailboxPermissionsInput struct { _ struct{} `type:"structure"` - // The identifier of the member (user or group)that owns the mailbox. + // The identifier of the member (user or group) that owns the mailbox. // // EntityId is a required field EntityId *string `min:"12" type:"string" required:"true"` @@ -5302,7 +6452,7 @@ type DeleteMailboxPermissionsInput struct { // exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5333,6 +6483,9 @@ func (s *DeleteMailboxPermissionsInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5358,20 +6511,120 @@ func (s *DeleteMailboxPermissionsInput) SetOrganizationId(v string) *DeleteMailb return s } -type DeleteMailboxPermissionsOutput struct { +type DeleteMailboxPermissionsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteMailboxPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMailboxPermissionsOutput) GoString() string { + return s.String() +} + +type DeleteOrganizationInput struct { + _ struct{} `type:"structure"` + + // The idempotency token associated with the request. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // If true, deletes the AWS Directory Service directory associated with the + // organization. + // + // DeleteDirectory is a required field + DeleteDirectory *bool `type:"boolean" required:"true"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOrganizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOrganizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOrganizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOrganizationInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.DeleteDirectory == nil { + invalidParams.Add(request.NewErrParamRequired("DeleteDirectory")) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *DeleteOrganizationInput) SetClientToken(v string) *DeleteOrganizationInput { + s.ClientToken = &v + return s +} + +// SetDeleteDirectory sets the DeleteDirectory field's value. +func (s *DeleteOrganizationInput) SetDeleteDirectory(v bool) *DeleteOrganizationInput { + s.DeleteDirectory = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *DeleteOrganizationInput) SetOrganizationId(v string) *DeleteOrganizationInput { + s.OrganizationId = &v + return s +} + +type DeleteOrganizationOutput struct { _ struct{} `type:"structure"` + + // The organization ID. + OrganizationId *string `min:"34" type:"string"` + + // The state of the organization. + State *string `type:"string"` } // String returns the string representation -func (s DeleteMailboxPermissionsOutput) String() string { +func (s DeleteOrganizationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteMailboxPermissionsOutput) GoString() string { +func (s DeleteOrganizationOutput) GoString() string { return s.String() } +// SetOrganizationId sets the OrganizationId field's value. +func (s *DeleteOrganizationOutput) SetOrganizationId(v string) *DeleteOrganizationOutput { + s.OrganizationId = &v + return s +} + +// SetState sets the State field's value. +func (s *DeleteOrganizationOutput) SetState(v string) *DeleteOrganizationOutput { + s.State = &v + return s +} + type DeleteResourceInput struct { _ struct{} `type:"structure"` @@ -5379,12 +6632,12 @@ type DeleteResourceInput struct { // deleted. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier of the resource to be deleted. // // ResourceId is a required field - ResourceId *string `type:"string" required:"true"` + ResourceId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5403,9 +6656,15 @@ func (s *DeleteResourceInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } + if s.ResourceId != nil && len(*s.ResourceId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5439,13 +6698,85 @@ func (s DeleteResourceOutput) GoString() string { return s.String() } +type DeleteRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The retention policy ID. + // + // Id is a required field + Id *string `min:"1" type:"string" required:"true"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetId sets the Id field's value. +func (s *DeleteRetentionPolicyInput) SetId(v string) *DeleteRetentionPolicyInput { + s.Id = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *DeleteRetentionPolicyInput) SetOrganizationId(v string) *DeleteRetentionPolicyInput { + s.OrganizationId = &v + return s +} + +type DeleteRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRetentionPolicyOutput) GoString() string { + return s.String() +} + type DeleteUserInput struct { _ struct{} `type:"structure"` // The organization that contains the user to be deleted. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier of the user to be deleted. // @@ -5469,6 +6800,9 @@ func (s *DeleteUserInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.UserId == nil { invalidParams.Add(request.NewErrParamRequired("UserId")) } @@ -5520,7 +6854,7 @@ type DeregisterFromWorkMailInput struct { // exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5545,6 +6879,9 @@ func (s *DeregisterFromWorkMailInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5589,7 +6926,7 @@ type DescribeGroupInput struct { // The identifier for the organization under which the group exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5614,6 +6951,9 @@ func (s *DescribeGroupInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5704,13 +7044,196 @@ func (s *DescribeGroupOutput) SetState(v string) *DescribeGroupOutput { return s } +type DescribeMailboxExportJobInput struct { + _ struct{} `type:"structure"` + + // The mailbox export job ID. + // + // JobId is a required field + JobId *string `min:"1" type:"string" required:"true"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeMailboxExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMailboxExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeMailboxExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeMailboxExportJobInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 1)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *DescribeMailboxExportJobInput) SetJobId(v string) *DescribeMailboxExportJobInput { + s.JobId = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *DescribeMailboxExportJobInput) SetOrganizationId(v string) *DescribeMailboxExportJobInput { + s.OrganizationId = &v + return s +} + +type DescribeMailboxExportJobOutput struct { + _ struct{} `type:"structure"` + + // The mailbox export job description. + Description *string `type:"string"` + + // The mailbox export job end timestamp. + EndTime *time.Time `type:"timestamp"` + + // The identifier of the user or resource associated with the mailbox. + EntityId *string `min:"12" type:"string"` + + // Error information for failed mailbox export jobs. + ErrorInfo *string `min:"1" type:"string"` + + // The estimated progress of the mailbox export job, in percentage points. + EstimatedProgress *int64 `type:"integer"` + + // The Amazon Resource Name (ARN) of the symmetric AWS Key Management Service + // (AWS KMS) key that encrypts the exported mailbox content. + KmsKeyArn *string `min:"20" type:"string"` + + // The ARN of the AWS Identity and Access Management (IAM) role that grants + // write permission to the Amazon Simple Storage Service (Amazon S3) bucket. + RoleArn *string `min:"20" type:"string"` + + // The name of the S3 bucket. + S3BucketName *string `min:"1" type:"string"` + + // The path to the S3 bucket and file that the mailbox export job is exporting + // to. + S3Path *string `min:"1" type:"string"` + + // The S3 bucket prefix. + S3Prefix *string `min:"1" type:"string"` + + // The mailbox export job start timestamp. + StartTime *time.Time `type:"timestamp"` + + // The state of the mailbox export job. + State *string `type:"string" enum:"MailboxExportJobState"` +} + +// String returns the string representation +func (s DescribeMailboxExportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeMailboxExportJobOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *DescribeMailboxExportJobOutput) SetDescription(v string) *DescribeMailboxExportJobOutput { + s.Description = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *DescribeMailboxExportJobOutput) SetEndTime(v time.Time) *DescribeMailboxExportJobOutput { + s.EndTime = &v + return s +} + +// SetEntityId sets the EntityId field's value. +func (s *DescribeMailboxExportJobOutput) SetEntityId(v string) *DescribeMailboxExportJobOutput { + s.EntityId = &v + return s +} + +// SetErrorInfo sets the ErrorInfo field's value. +func (s *DescribeMailboxExportJobOutput) SetErrorInfo(v string) *DescribeMailboxExportJobOutput { + s.ErrorInfo = &v + return s +} + +// SetEstimatedProgress sets the EstimatedProgress field's value. +func (s *DescribeMailboxExportJobOutput) SetEstimatedProgress(v int64) *DescribeMailboxExportJobOutput { + s.EstimatedProgress = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *DescribeMailboxExportJobOutput) SetKmsKeyArn(v string) *DescribeMailboxExportJobOutput { + s.KmsKeyArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *DescribeMailboxExportJobOutput) SetRoleArn(v string) *DescribeMailboxExportJobOutput { + s.RoleArn = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *DescribeMailboxExportJobOutput) SetS3BucketName(v string) *DescribeMailboxExportJobOutput { + s.S3BucketName = &v + return s +} + +// SetS3Path sets the S3Path field's value. +func (s *DescribeMailboxExportJobOutput) SetS3Path(v string) *DescribeMailboxExportJobOutput { + s.S3Path = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *DescribeMailboxExportJobOutput) SetS3Prefix(v string) *DescribeMailboxExportJobOutput { + s.S3Prefix = &v + return s +} + +// SetStartTime sets the StartTime field's value. +func (s *DescribeMailboxExportJobOutput) SetStartTime(v time.Time) *DescribeMailboxExportJobOutput { + s.StartTime = &v + return s +} + +// SetState sets the State field's value. +func (s *DescribeMailboxExportJobOutput) SetState(v string) *DescribeMailboxExportJobOutput { + s.State = &v + return s +} + type DescribeOrganizationInput struct { _ struct{} `type:"structure"` // The identifier for the organization to be described. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5729,6 +7252,9 @@ func (s *DescribeOrganizationInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5769,7 +7295,7 @@ type DescribeOrganizationOutput struct { ErrorMessage *string `type:"string"` // The identifier of an organization. - OrganizationId *string `type:"string"` + OrganizationId *string `min:"34" type:"string"` // The state of an organization. State *string `type:"string"` @@ -5846,12 +7372,12 @@ type DescribeResourceInput struct { // described. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier of the resource to be described. // // ResourceId is a required field - ResourceId *string `type:"string" required:"true"` + ResourceId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -5870,9 +7396,15 @@ func (s *DescribeResourceInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } + if s.ResourceId != nil && len(*s.ResourceId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -5913,7 +7445,7 @@ type DescribeResourceOutput struct { Name *string `min:"1" type:"string"` // The identifier of the described resource. - ResourceId *string `type:"string"` + ResourceId *string `min:"34" type:"string"` // The state of the resource: enabled (registered to Amazon WorkMail), disabled // (deregistered or never registered to WorkMail), or deleted. @@ -5987,7 +7519,7 @@ type DescribeUserInput struct { // The identifier for the organization under which the user exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier for the user to be described. // @@ -6011,6 +7543,9 @@ func (s *DescribeUserInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.UserId == nil { invalidParams.Add(request.NewErrParamRequired("UserId")) } @@ -6130,10 +7665,67 @@ func (s *DescribeUserOutput) SetUserRole(v string) *DescribeUserOutput { return s } +// The directory is already in use by another WorkMail organization in the same +// account and Region. +type DirectoryInUseException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` +} + +// String returns the string representation +func (s DirectoryInUseException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DirectoryInUseException) GoString() string { + return s.String() +} + +func newErrorDirectoryInUseException(v protocol.ResponseMetadata) error { + return &DirectoryInUseException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *DirectoryInUseException) Code() string { + return "DirectoryInUseException" +} + +// Message returns the exception's message. +func (s *DirectoryInUseException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DirectoryInUseException) OrigErr() error { + return nil +} + +func (s *DirectoryInUseException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DirectoryInUseException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DirectoryInUseException) RequestID() string { + return s.RespMetadata.RequestID +} + // The directory service doesn't recognize the credentials supplied by WorkMail. type DirectoryServiceAuthenticationFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6150,17 +7742,17 @@ func (s DirectoryServiceAuthenticationFailedException) GoString() string { func newErrorDirectoryServiceAuthenticationFailedException(v protocol.ResponseMetadata) error { return &DirectoryServiceAuthenticationFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryServiceAuthenticationFailedException) Code() string { +func (s *DirectoryServiceAuthenticationFailedException) Code() string { return "DirectoryServiceAuthenticationFailedException" } // Message returns the exception's message. -func (s DirectoryServiceAuthenticationFailedException) Message() string { +func (s *DirectoryServiceAuthenticationFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6168,28 +7760,28 @@ func (s DirectoryServiceAuthenticationFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryServiceAuthenticationFailedException) OrigErr() error { +func (s *DirectoryServiceAuthenticationFailedException) OrigErr() error { return nil } -func (s DirectoryServiceAuthenticationFailedException) Error() string { +func (s *DirectoryServiceAuthenticationFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryServiceAuthenticationFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryServiceAuthenticationFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryServiceAuthenticationFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryServiceAuthenticationFailedException) RequestID() string { + return s.RespMetadata.RequestID } -// The directory on which you are trying to perform operations isn't available. +// The directory is unavailable. It might be located in another Region or deleted. type DirectoryUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6206,17 +7798,17 @@ func (s DirectoryUnavailableException) GoString() string { func newErrorDirectoryUnavailableException(v protocol.ResponseMetadata) error { return &DirectoryUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s DirectoryUnavailableException) Code() string { +func (s *DirectoryUnavailableException) Code() string { return "DirectoryUnavailableException" } // Message returns the exception's message. -func (s DirectoryUnavailableException) Message() string { +func (s *DirectoryUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6224,22 +7816,22 @@ func (s DirectoryUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s DirectoryUnavailableException) OrigErr() error { +func (s *DirectoryUnavailableException) OrigErr() error { return nil } -func (s DirectoryUnavailableException) Error() string { +func (s *DirectoryUnavailableException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s DirectoryUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *DirectoryUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s DirectoryUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *DirectoryUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type DisassociateDelegateFromResourceInput struct { @@ -6254,12 +7846,12 @@ type DisassociateDelegateFromResourceInput struct { // The identifier for the organization under which the resource exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier of the resource from which delegates' set members are removed. // // ResourceId is a required field - ResourceId *string `type:"string" required:"true"` + ResourceId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -6284,9 +7876,15 @@ func (s *DisassociateDelegateFromResourceInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } + if s.ResourceId != nil && len(*s.ResourceId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6342,7 +7940,7 @@ type DisassociateMemberFromGroupInput struct { // The identifier for the organization under which the group exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -6373,6 +7971,9 @@ func (s *DisassociateMemberFromGroupInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6402,21 +8003,76 @@ type DisassociateMemberFromGroupOutput struct { _ struct{} `type:"structure"` } -// String returns the string representation -func (s DisassociateMemberFromGroupOutput) String() string { - return awsutil.Prettify(s) +// String returns the string representation +func (s DisassociateMemberFromGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateMemberFromGroupOutput) GoString() string { + return s.String() +} + +// The domain to associate with an Amazon WorkMail organization. +// +// When you configure a domain hosted in Amazon Route 53 (Route 53), all recommended +// DNS records are added to the organization when you create it. For more information, +// see Adding a domain (https://docs.aws.amazon.com/workmail/latest/adminguide/add_domain.html) +// in the Amazon WorkMail Administrator Guide. +type Domain struct { + _ struct{} `type:"structure"` + + // The fully qualified domain name. + DomainName *string `min:"3" type:"string"` + + // The hosted zone ID for a domain hosted in Route 53. Required when configuring + // a domain hosted in Route 53. + HostedZoneId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s Domain) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Domain) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Domain) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Domain"} + if s.DomainName != nil && len(*s.DomainName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("DomainName", 3)) + } + if s.HostedZoneId != nil && len(*s.HostedZoneId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HostedZoneId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDomainName sets the DomainName field's value. +func (s *Domain) SetDomainName(v string) *Domain { + s.DomainName = &v + return s } -// GoString returns the string representation -func (s DisassociateMemberFromGroupOutput) GoString() string { - return s.String() +// SetHostedZoneId sets the HostedZoneId field's value. +func (s *Domain) SetHostedZoneId(v string) *Domain { + s.HostedZoneId = &v + return s } // The email address that you're trying to assign is already created for a different // user, group, or resource. type EmailAddressInUseException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6433,17 +8089,17 @@ func (s EmailAddressInUseException) GoString() string { func newErrorEmailAddressInUseException(v protocol.ResponseMetadata) error { return &EmailAddressInUseException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EmailAddressInUseException) Code() string { +func (s *EmailAddressInUseException) Code() string { return "EmailAddressInUseException" } // Message returns the exception's message. -func (s EmailAddressInUseException) Message() string { +func (s *EmailAddressInUseException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6451,28 +8107,28 @@ func (s EmailAddressInUseException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EmailAddressInUseException) OrigErr() error { +func (s *EmailAddressInUseException) OrigErr() error { return nil } -func (s EmailAddressInUseException) Error() string { +func (s *EmailAddressInUseException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EmailAddressInUseException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EmailAddressInUseException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EmailAddressInUseException) RequestID() string { - return s.respMetadata.RequestID +func (s *EmailAddressInUseException) RequestID() string { + return s.RespMetadata.RequestID } // The user, group, or resource that you're trying to register is already registered. type EntityAlreadyRegisteredException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6489,17 +8145,17 @@ func (s EntityAlreadyRegisteredException) GoString() string { func newErrorEntityAlreadyRegisteredException(v protocol.ResponseMetadata) error { return &EntityAlreadyRegisteredException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityAlreadyRegisteredException) Code() string { +func (s *EntityAlreadyRegisteredException) Code() string { return "EntityAlreadyRegisteredException" } // Message returns the exception's message. -func (s EntityAlreadyRegisteredException) Message() string { +func (s *EntityAlreadyRegisteredException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6507,29 +8163,29 @@ func (s EntityAlreadyRegisteredException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityAlreadyRegisteredException) OrigErr() error { +func (s *EntityAlreadyRegisteredException) OrigErr() error { return nil } -func (s EntityAlreadyRegisteredException) Error() string { +func (s *EntityAlreadyRegisteredException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityAlreadyRegisteredException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityAlreadyRegisteredException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityAlreadyRegisteredException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityAlreadyRegisteredException) RequestID() string { + return s.RespMetadata.RequestID } // The identifier supplied for the user, group, or resource does not exist in // your organization. type EntityNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6546,17 +8202,17 @@ func (s EntityNotFoundException) GoString() string { func newErrorEntityNotFoundException(v protocol.ResponseMetadata) error { return &EntityNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityNotFoundException) Code() string { +func (s *EntityNotFoundException) Code() string { return "EntityNotFoundException" } // Message returns the exception's message. -func (s EntityNotFoundException) Message() string { +func (s *EntityNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6564,29 +8220,29 @@ func (s EntityNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityNotFoundException) OrigErr() error { +func (s *EntityNotFoundException) OrigErr() error { return nil } -func (s EntityNotFoundException) Error() string { +func (s *EntityNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // You are performing an operation on a user, group, or resource that isn't // in the expected state, such as trying to delete an active user. type EntityStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6603,17 +8259,17 @@ func (s EntityStateException) GoString() string { func newErrorEntityStateException(v protocol.ResponseMetadata) error { return &EntityStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s EntityStateException) Code() string { +func (s *EntityStateException) Code() string { return "EntityStateException" } // Message returns the exception's message. -func (s EntityStateException) Message() string { +func (s *EntityStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6621,22 +8277,88 @@ func (s EntityStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s EntityStateException) OrigErr() error { +func (s *EntityStateException) OrigErr() error { return nil } -func (s EntityStateException) Error() string { +func (s *EntityStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s EntityStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *EntityStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s EntityStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *EntityStateException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The configuration applied to an organization's folders by its retention policy. +type FolderConfiguration struct { + _ struct{} `type:"structure"` + + // The action to take on the folder contents at the end of the folder configuration + // period. + // + // Action is a required field + Action *string `type:"string" required:"true" enum:"RetentionAction"` + + // The folder name. + // + // Name is a required field + Name *string `type:"string" required:"true" enum:"FolderName"` + + // The period of time at which the folder configuration action is applied. + Period *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s FolderConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FolderConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FolderConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FolderConfiguration"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *FolderConfiguration) SetAction(v string) *FolderConfiguration { + s.Action = &v + return s +} + +// SetName sets the Name field's value. +func (s *FolderConfiguration) SetName(v string) *FolderConfiguration { + s.Name = &v + return s +} + +// SetPeriod sets the Period field's value. +func (s *FolderConfiguration) SetPeriod(v int64) *FolderConfiguration { + s.Period = &v + return s } type GetAccessControlEffectInput struct { @@ -6656,7 +8378,7 @@ type GetAccessControlEffectInput struct { // The identifier for the organization. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The user ID. // @@ -6692,6 +8414,9 @@ func (s *GetAccessControlEffectInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.UserId == nil { invalidParams.Add(request.NewErrParamRequired("UserId")) } @@ -6761,6 +8486,97 @@ func (s *GetAccessControlEffectOutput) SetMatchedRules(v []*string) *GetAccessCo return s } +type GetDefaultRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetDefaultRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDefaultRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetDefaultRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetDefaultRetentionPolicyInput"} + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *GetDefaultRetentionPolicyInput) SetOrganizationId(v string) *GetDefaultRetentionPolicyInput { + s.OrganizationId = &v + return s +} + +type GetDefaultRetentionPolicyOutput struct { + _ struct{} `type:"structure"` + + // The retention policy description. + Description *string `type:"string"` + + // The retention policy folder configurations. + FolderConfigurations []*FolderConfiguration `type:"list"` + + // The retention policy ID. + Id *string `min:"1" type:"string"` + + // The retention policy name. + Name *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetDefaultRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetDefaultRetentionPolicyOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *GetDefaultRetentionPolicyOutput) SetDescription(v string) *GetDefaultRetentionPolicyOutput { + s.Description = &v + return s +} + +// SetFolderConfigurations sets the FolderConfigurations field's value. +func (s *GetDefaultRetentionPolicyOutput) SetFolderConfigurations(v []*FolderConfiguration) *GetDefaultRetentionPolicyOutput { + s.FolderConfigurations = v + return s +} + +// SetId sets the Id field's value. +func (s *GetDefaultRetentionPolicyOutput) SetId(v string) *GetDefaultRetentionPolicyOutput { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetDefaultRetentionPolicyOutput) SetName(v string) *GetDefaultRetentionPolicyOutput { + s.Name = &v + return s +} + type GetMailboxDetailsInput struct { _ struct{} `type:"structure"` @@ -6768,7 +8584,7 @@ type GetMailboxDetailsInput struct { // details are being requested. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier for the user whose mailbox details are being requested. // @@ -6792,6 +8608,9 @@ func (s *GetMailboxDetailsInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.UserId == nil { invalidParams.Add(request.NewErrParamRequired("UserId")) } @@ -6922,8 +8741,8 @@ func (s *Group) SetState(v string) *Group { // to auto-respond to requests or have at least one delegate associated that // can do so on its behalf. type InvalidConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6940,17 +8759,17 @@ func (s InvalidConfigurationException) GoString() string { func newErrorInvalidConfigurationException(v protocol.ResponseMetadata) error { return &InvalidConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidConfigurationException) Code() string { +func (s *InvalidConfigurationException) Code() string { return "InvalidConfigurationException" } // Message returns the exception's message. -func (s InvalidConfigurationException) Message() string { +func (s *InvalidConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6958,28 +8777,28 @@ func (s InvalidConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidConfigurationException) OrigErr() error { +func (s *InvalidConfigurationException) OrigErr() error { return nil } -func (s InvalidConfigurationException) Error() string { +func (s *InvalidConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // One or more of the input parameters don't match the service's restrictions. type InvalidParameterException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -6996,17 +8815,17 @@ func (s InvalidParameterException) GoString() string { func newErrorInvalidParameterException(v protocol.ResponseMetadata) error { return &InvalidParameterException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterException) Code() string { +func (s *InvalidParameterException) Code() string { return "InvalidParameterException" } // Message returns the exception's message. -func (s InvalidParameterException) Message() string { +func (s *InvalidParameterException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7014,29 +8833,29 @@ func (s InvalidParameterException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterException) OrigErr() error { +func (s *InvalidParameterException) OrigErr() error { return nil } -func (s InvalidParameterException) Error() string { +func (s *InvalidParameterException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterException) RequestID() string { + return s.RespMetadata.RequestID } // The supplied password doesn't match the minimum security constraints, such // as length or use of special characters. type InvalidPasswordException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7053,17 +8872,17 @@ func (s InvalidPasswordException) GoString() string { func newErrorInvalidPasswordException(v protocol.ResponseMetadata) error { return &InvalidPasswordException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidPasswordException) Code() string { +func (s *InvalidPasswordException) Code() string { return "InvalidPasswordException" } // Message returns the exception's message. -func (s InvalidPasswordException) Message() string { +func (s *InvalidPasswordException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7071,28 +8890,28 @@ func (s InvalidPasswordException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidPasswordException) OrigErr() error { +func (s *InvalidPasswordException) OrigErr() error { return nil } -func (s InvalidPasswordException) Error() string { +func (s *InvalidPasswordException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidPasswordException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidPasswordException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidPasswordException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidPasswordException) RequestID() string { + return s.RespMetadata.RequestID } // The request exceeds the limit of the resource. type LimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -7109,17 +8928,17 @@ func (s LimitExceededException) GoString() string { func newErrorLimitExceededException(v protocol.ResponseMetadata) error { return &LimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s LimitExceededException) Code() string { +func (s *LimitExceededException) Code() string { return "LimitExceededException" } // Message returns the exception's message. -func (s LimitExceededException) Message() string { +func (s *LimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7127,22 +8946,22 @@ func (s LimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s LimitExceededException) OrigErr() error { +func (s *LimitExceededException) OrigErr() error { return nil } -func (s LimitExceededException) Error() string { +func (s *LimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s LimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *LimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s LimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *LimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } type ListAccessControlRulesInput struct { @@ -7151,7 +8970,7 @@ type ListAccessControlRulesInput struct { // The identifier for the organization. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -7170,6 +8989,9 @@ func (s *ListAccessControlRulesInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7224,7 +9046,7 @@ type ListAliasesInput struct { // The identifier for the organization under which the entity exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -7255,6 +9077,9 @@ func (s *ListAliasesInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7337,7 +9162,7 @@ type ListGroupMembersInput struct { // The identifier for the organization under which the group exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -7368,6 +9193,9 @@ func (s *ListGroupMembersInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7445,7 +9273,7 @@ type ListGroupsInput struct { // The identifier for the organization under which the groups exist. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -7470,6 +9298,9 @@ func (s *ListGroupsInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7528,6 +9359,103 @@ func (s *ListGroupsOutput) SetNextToken(v string) *ListGroupsOutput { return s } +type ListMailboxExportJobsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results to return in a single call. + MaxResults *int64 `min:"1" type:"integer"` + + // The token to use to retrieve the next page of results. + NextToken *string `min:"1" type:"string"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListMailboxExportJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMailboxExportJobsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMailboxExportJobsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMailboxExportJobsInput"} + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListMailboxExportJobsInput) SetMaxResults(v int64) *ListMailboxExportJobsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMailboxExportJobsInput) SetNextToken(v string) *ListMailboxExportJobsInput { + s.NextToken = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *ListMailboxExportJobsInput) SetOrganizationId(v string) *ListMailboxExportJobsInput { + s.OrganizationId = &v + return s +} + +type ListMailboxExportJobsOutput struct { + _ struct{} `type:"structure"` + + // The mailbox export job details. + Jobs []*MailboxExportJob `type:"list"` + + // The token to use to retrieve the next page of results. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMailboxExportJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMailboxExportJobsOutput) GoString() string { + return s.String() +} + +// SetJobs sets the Jobs field's value. +func (s *ListMailboxExportJobsOutput) SetJobs(v []*MailboxExportJob) *ListMailboxExportJobsOutput { + s.Jobs = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListMailboxExportJobsOutput) SetNextToken(v string) *ListMailboxExportJobsOutput { + s.NextToken = &v + return s +} + type ListMailboxPermissionsInput struct { _ struct{} `type:"structure"` @@ -7548,7 +9476,7 @@ type ListMailboxPermissionsInput struct { // exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -7579,6 +9507,9 @@ func (s *ListMailboxPermissionsInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7738,7 +9669,7 @@ type ListResourceDelegatesInput struct { // delegates are listed. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier for the resource whose delegates are listed. // @@ -7768,6 +9699,9 @@ func (s *ListResourceDelegatesInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } @@ -7852,7 +9786,7 @@ type ListResourcesInput struct { // The identifier for the organization under which the resources exist. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -7877,6 +9811,9 @@ func (s *ListResourcesInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8013,7 +9950,7 @@ type ListUsersInput struct { // The identifier for the organization under which the users exist. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -8038,6 +9975,9 @@ func (s *ListUsersInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8099,8 +10039,8 @@ func (s *ListUsersOutput) SetUsers(v []*User) *ListUsersOutput { // For an email or alias to be created in Amazon WorkMail, the included domain // must be defined in the organization. type MailDomainNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8117,17 +10057,17 @@ func (s MailDomainNotFoundException) GoString() string { func newErrorMailDomainNotFoundException(v protocol.ResponseMetadata) error { return &MailDomainNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MailDomainNotFoundException) Code() string { +func (s *MailDomainNotFoundException) Code() string { return "MailDomainNotFoundException" } // Message returns the exception's message. -func (s MailDomainNotFoundException) Message() string { +func (s *MailDomainNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8135,29 +10075,29 @@ func (s MailDomainNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s MailDomainNotFoundException) OrigErr() error { +func (s *MailDomainNotFoundException) OrigErr() error { return nil } -func (s MailDomainNotFoundException) Error() string { +func (s *MailDomainNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s MailDomainNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *MailDomainNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s MailDomainNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *MailDomainNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // After a domain has been added to the organization, it must be verified. The // domain is not yet verified. type MailDomainStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8174,40 +10114,138 @@ func (s MailDomainStateException) GoString() string { func newErrorMailDomainStateException(v protocol.ResponseMetadata) error { return &MailDomainStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s MailDomainStateException) Code() string { +func (s *MailDomainStateException) Code() string { return "MailDomainStateException" } // Message returns the exception's message. -func (s MailDomainStateException) Message() string { +func (s *MailDomainStateException) Message() string { if s.Message_ != nil { return *s.Message_ } return "" } -// OrigErr always returns nil, satisfies awserr.Error interface. -func (s MailDomainStateException) OrigErr() error { - return nil +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *MailDomainStateException) OrigErr() error { + return nil +} + +func (s *MailDomainStateException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *MailDomainStateException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *MailDomainStateException) RequestID() string { + return s.RespMetadata.RequestID +} + +// The details of a mailbox export job, including the user or resource ID associated +// with the mailbox and the S3 bucket that the mailbox contents are exported +// to. +type MailboxExportJob struct { + _ struct{} `type:"structure"` + + // The mailbox export job description. + Description *string `type:"string"` + + // The mailbox export job end timestamp. + EndTime *time.Time `type:"timestamp"` + + // The identifier of the user or resource associated with the mailbox. + EntityId *string `min:"12" type:"string"` + + // The estimated progress of the mailbox export job, in percentage points. + EstimatedProgress *int64 `type:"integer"` + + // The identifier of the mailbox export job. + JobId *string `min:"1" type:"string"` + + // The name of the S3 bucket. + S3BucketName *string `min:"1" type:"string"` + + // The path to the S3 bucket and file that the mailbox export job exports to. + S3Path *string `min:"1" type:"string"` + + // The mailbox export job start timestamp. + StartTime *time.Time `type:"timestamp"` + + // The state of the mailbox export job. + State *string `type:"string" enum:"MailboxExportJobState"` +} + +// String returns the string representation +func (s MailboxExportJob) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MailboxExportJob) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *MailboxExportJob) SetDescription(v string) *MailboxExportJob { + s.Description = &v + return s +} + +// SetEndTime sets the EndTime field's value. +func (s *MailboxExportJob) SetEndTime(v time.Time) *MailboxExportJob { + s.EndTime = &v + return s +} + +// SetEntityId sets the EntityId field's value. +func (s *MailboxExportJob) SetEntityId(v string) *MailboxExportJob { + s.EntityId = &v + return s +} + +// SetEstimatedProgress sets the EstimatedProgress field's value. +func (s *MailboxExportJob) SetEstimatedProgress(v int64) *MailboxExportJob { + s.EstimatedProgress = &v + return s +} + +// SetJobId sets the JobId field's value. +func (s *MailboxExportJob) SetJobId(v string) *MailboxExportJob { + s.JobId = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *MailboxExportJob) SetS3BucketName(v string) *MailboxExportJob { + s.S3BucketName = &v + return s } -func (s MailDomainStateException) Error() string { - return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +// SetS3Path sets the S3Path field's value. +func (s *MailboxExportJob) SetS3Path(v string) *MailboxExportJob { + s.S3Path = &v + return s } -// Status code returns the HTTP status code for the request's response error. -func (s MailDomainStateException) StatusCode() int { - return s.respMetadata.StatusCode +// SetStartTime sets the StartTime field's value. +func (s *MailboxExportJob) SetStartTime(v time.Time) *MailboxExportJob { + s.StartTime = &v + return s } -// RequestID returns the service's response RequestID for request. -func (s MailDomainStateException) RequestID() string { - return s.respMetadata.RequestID +// SetState sets the State field's value. +func (s *MailboxExportJob) SetState(v string) *MailboxExportJob { + s.State = &v + return s } // The representation of a user or group. @@ -8281,8 +10319,8 @@ func (s *Member) SetType(v string) *Member { // The user, group, or resource name isn't unique in Amazon WorkMail. type NameAvailabilityException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8299,17 +10337,17 @@ func (s NameAvailabilityException) GoString() string { func newErrorNameAvailabilityException(v protocol.ResponseMetadata) error { return &NameAvailabilityException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s NameAvailabilityException) Code() string { +func (s *NameAvailabilityException) Code() string { return "NameAvailabilityException" } // Message returns the exception's message. -func (s NameAvailabilityException) Message() string { +func (s *NameAvailabilityException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8317,29 +10355,29 @@ func (s NameAvailabilityException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s NameAvailabilityException) OrigErr() error { +func (s *NameAvailabilityException) OrigErr() error { return nil } -func (s NameAvailabilityException) Error() string { +func (s *NameAvailabilityException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s NameAvailabilityException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *NameAvailabilityException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s NameAvailabilityException) RequestID() string { - return s.respMetadata.RequestID +func (s *NameAvailabilityException) RequestID() string { + return s.RespMetadata.RequestID } // An operation received a valid organization identifier that either doesn't // belong or exist in the system. type OrganizationNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8356,17 +10394,17 @@ func (s OrganizationNotFoundException) GoString() string { func newErrorOrganizationNotFoundException(v protocol.ResponseMetadata) error { return &OrganizationNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationNotFoundException) Code() string { +func (s *OrganizationNotFoundException) Code() string { return "OrganizationNotFoundException" } // Message returns the exception's message. -func (s OrganizationNotFoundException) Message() string { +func (s *OrganizationNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8374,29 +10412,29 @@ func (s OrganizationNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationNotFoundException) OrigErr() error { +func (s *OrganizationNotFoundException) OrigErr() error { return nil } -func (s OrganizationNotFoundException) Error() string { +func (s *OrganizationNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } -// The organization must have a valid state (Active or Synchronizing) to perform -// certain operations on the organization or its members. +// The organization must have a valid state to perform certain operations on +// the organization or its members. type OrganizationStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8413,17 +10451,17 @@ func (s OrganizationStateException) GoString() string { func newErrorOrganizationStateException(v protocol.ResponseMetadata) error { return &OrganizationStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OrganizationStateException) Code() string { +func (s *OrganizationStateException) Code() string { return "OrganizationStateException" } // Message returns the exception's message. -func (s OrganizationStateException) Message() string { +func (s *OrganizationStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8431,22 +10469,22 @@ func (s OrganizationStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OrganizationStateException) OrigErr() error { +func (s *OrganizationStateException) OrigErr() error { return nil } -func (s OrganizationStateException) Error() string { +func (s *OrganizationStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OrganizationStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OrganizationStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OrganizationStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *OrganizationStateException) RequestID() string { + return s.RespMetadata.RequestID } // The representation of an organization. @@ -8456,13 +10494,16 @@ type OrganizationSummary struct { // The alias associated with the organization. Alias *string `min:"1" type:"string"` + // The default email domain associated with the organization. + DefaultMailDomain *string `min:"3" type:"string"` + // The error message associated with the organization. It is only present if // unexpected behavior has occurred with regards to the organization. It provides // insight or solutions regarding unexpected behavior. ErrorMessage *string `type:"string"` // The identifier associated with the organization. - OrganizationId *string `type:"string"` + OrganizationId *string `min:"34" type:"string"` // The state associated with the organization. State *string `type:"string"` @@ -8484,6 +10525,12 @@ func (s *OrganizationSummary) SetAlias(v string) *OrganizationSummary { return s } +// SetDefaultMailDomain sets the DefaultMailDomain field's value. +func (s *OrganizationSummary) SetDefaultMailDomain(v string) *OrganizationSummary { + s.DefaultMailDomain = &v + return s +} + // SetErrorMessage sets the ErrorMessage field's value. func (s *OrganizationSummary) SetErrorMessage(v string) *OrganizationSummary { s.ErrorMessage = &v @@ -8595,7 +10642,7 @@ type PutAccessControlRuleInput struct { // The identifier of the organization. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // User IDs to include in the rule. UserIds []*string `type:"list"` @@ -8629,6 +10676,9 @@ func (s *PutAccessControlRuleInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8728,7 +10778,7 @@ type PutMailboxPermissionsInput struct { // exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The permissions granted to the grantee. SEND_AS allows the grantee to send // email as the owner of the mailbox (the grantee is not mentioned on these @@ -8769,6 +10819,9 @@ func (s *PutMailboxPermissionsInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.PermissionValues == nil { invalidParams.Add(request.NewErrParamRequired("PermissionValues")) } @@ -8817,6 +10870,123 @@ func (s PutMailboxPermissionsOutput) GoString() string { return s.String() } +type PutRetentionPolicyInput struct { + _ struct{} `type:"structure"` + + // The retention policy description. + Description *string `type:"string" sensitive:"true"` + + // The retention policy folder configurations. + // + // FolderConfigurations is a required field + FolderConfigurations []*FolderConfiguration `type:"list" required:"true"` + + // The retention policy ID. + Id *string `min:"1" type:"string"` + + // The retention policy name. + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The organization ID. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRetentionPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRetentionPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} + if s.FolderConfigurations == nil { + invalidParams.Add(request.NewErrParamRequired("FolderConfigurations")) + } + if s.Id != nil && len(*s.Id) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Id", 1)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + if s.FolderConfigurations != nil { + for i, v := range s.FolderConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "FolderConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *PutRetentionPolicyInput) SetDescription(v string) *PutRetentionPolicyInput { + s.Description = &v + return s +} + +// SetFolderConfigurations sets the FolderConfigurations field's value. +func (s *PutRetentionPolicyInput) SetFolderConfigurations(v []*FolderConfiguration) *PutRetentionPolicyInput { + s.FolderConfigurations = v + return s +} + +// SetId sets the Id field's value. +func (s *PutRetentionPolicyInput) SetId(v string) *PutRetentionPolicyInput { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *PutRetentionPolicyInput) SetName(v string) *PutRetentionPolicyInput { + s.Name = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *PutRetentionPolicyInput) SetOrganizationId(v string) *PutRetentionPolicyInput { + s.OrganizationId = &v + return s +} + +type PutRetentionPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRetentionPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRetentionPolicyOutput) GoString() string { + return s.String() +} + type RegisterToWorkMailInput struct { _ struct{} `type:"structure"` @@ -8834,7 +11004,7 @@ type RegisterToWorkMailInput struct { // exists. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -8865,6 +11035,9 @@ func (s *RegisterToWorkMailInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8906,8 +11079,8 @@ func (s RegisterToWorkMailOutput) GoString() string { // This user, group, or resource name is not allowed in Amazon WorkMail. type ReservedNameException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -8924,17 +11097,17 @@ func (s ReservedNameException) GoString() string { func newErrorReservedNameException(v protocol.ResponseMetadata) error { return &ReservedNameException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ReservedNameException) Code() string { +func (s *ReservedNameException) Code() string { return "ReservedNameException" } // Message returns the exception's message. -func (s ReservedNameException) Message() string { +func (s *ReservedNameException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8942,22 +11115,22 @@ func (s ReservedNameException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ReservedNameException) OrigErr() error { +func (s *ReservedNameException) OrigErr() error { return nil } -func (s ReservedNameException) Error() string { +func (s *ReservedNameException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ReservedNameException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ReservedNameException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ReservedNameException) RequestID() string { - return s.respMetadata.RequestID +func (s *ReservedNameException) RequestID() string { + return s.RespMetadata.RequestID } type ResetPasswordInput struct { @@ -8967,7 +11140,7 @@ type ResetPasswordInput struct { // is reset. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The new password for the user. // @@ -8996,6 +11169,9 @@ func (s *ResetPasswordInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.Password == nil { invalidParams.Add(request.NewErrParamRequired("Password")) } @@ -9124,8 +11300,8 @@ func (s *Resource) SetType(v string) *Resource { // The resource cannot be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -9142,17 +11318,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9160,22 +11336,194 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartMailboxExportJobInput struct { + _ struct{} `type:"structure"` + + // The idempotency token for the client request. + ClientToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The mailbox export job description. + Description *string `type:"string"` + + // The identifier of the user or resource associated with the mailbox. + // + // EntityId is a required field + EntityId *string `min:"12" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the symmetric AWS Key Management Service + // (AWS KMS) key that encrypts the exported mailbox content. + // + // KmsKeyArn is a required field + KmsKeyArn *string `min:"20" type:"string" required:"true"` + + // The identifier associated with the organization. + // + // OrganizationId is a required field + OrganizationId *string `min:"34" type:"string" required:"true"` + + // The ARN of the AWS Identity and Access Management (IAM) role that grants + // write permission to the S3 bucket. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The name of the S3 bucket. + // + // S3BucketName is a required field + S3BucketName *string `min:"1" type:"string" required:"true"` + + // The S3 bucket prefix. + // + // S3Prefix is a required field + S3Prefix *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartMailboxExportJobInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMailboxExportJobInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMailboxExportJobInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMailboxExportJobInput"} + if s.ClientToken != nil && len(*s.ClientToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientToken", 1)) + } + if s.EntityId == nil { + invalidParams.Add(request.NewErrParamRequired("EntityId")) + } + if s.EntityId != nil && len(*s.EntityId) < 12 { + invalidParams.Add(request.NewErrParamMinLen("EntityId", 12)) + } + if s.KmsKeyArn == nil { + invalidParams.Add(request.NewErrParamRequired("KmsKeyArn")) + } + if s.KmsKeyArn != nil && len(*s.KmsKeyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("KmsKeyArn", 20)) + } + if s.OrganizationId == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationId")) + } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.S3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketName")) + } + if s.S3BucketName != nil && len(*s.S3BucketName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3BucketName", 1)) + } + if s.S3Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("S3Prefix")) + } + if s.S3Prefix != nil && len(*s.S3Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("S3Prefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientToken sets the ClientToken field's value. +func (s *StartMailboxExportJobInput) SetClientToken(v string) *StartMailboxExportJobInput { + s.ClientToken = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *StartMailboxExportJobInput) SetDescription(v string) *StartMailboxExportJobInput { + s.Description = &v + return s +} + +// SetEntityId sets the EntityId field's value. +func (s *StartMailboxExportJobInput) SetEntityId(v string) *StartMailboxExportJobInput { + s.EntityId = &v + return s +} + +// SetKmsKeyArn sets the KmsKeyArn field's value. +func (s *StartMailboxExportJobInput) SetKmsKeyArn(v string) *StartMailboxExportJobInput { + s.KmsKeyArn = &v + return s +} + +// SetOrganizationId sets the OrganizationId field's value. +func (s *StartMailboxExportJobInput) SetOrganizationId(v string) *StartMailboxExportJobInput { + s.OrganizationId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *StartMailboxExportJobInput) SetRoleArn(v string) *StartMailboxExportJobInput { + s.RoleArn = &v + return s +} + +// SetS3BucketName sets the S3BucketName field's value. +func (s *StartMailboxExportJobInput) SetS3BucketName(v string) *StartMailboxExportJobInput { + s.S3BucketName = &v + return s +} + +// SetS3Prefix sets the S3Prefix field's value. +func (s *StartMailboxExportJobInput) SetS3Prefix(v string) *StartMailboxExportJobInput { + s.S3Prefix = &v + return s +} + +type StartMailboxExportJobOutput struct { + _ struct{} `type:"structure"` + + // The job ID. + JobId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s StartMailboxExportJobOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMailboxExportJobOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *StartMailboxExportJobOutput) SetJobId(v string) *StartMailboxExportJobOutput { + s.JobId = &v + return s } // Describes a tag applied to a resource. @@ -9315,8 +11663,8 @@ func (s TagResourceOutput) GoString() string { // The resource can have up to 50 user-applied tags. type TooManyTagsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -9333,17 +11681,17 @@ func (s TooManyTagsException) GoString() string { func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { return &TooManyTagsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s TooManyTagsException) Code() string { +func (s *TooManyTagsException) Code() string { return "TooManyTagsException" } // Message returns the exception's message. -func (s TooManyTagsException) Message() string { +func (s *TooManyTagsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9351,28 +11699,28 @@ func (s TooManyTagsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s TooManyTagsException) OrigErr() error { +func (s *TooManyTagsException) OrigErr() error { return nil } -func (s TooManyTagsException) Error() string { +func (s *TooManyTagsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s TooManyTagsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s TooManyTagsException) RequestID() string { - return s.respMetadata.RequestID +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID } // You can't perform a write operation against a read-only directory. type UnsupportedOperationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -9389,17 +11737,17 @@ func (s UnsupportedOperationException) GoString() string { func newErrorUnsupportedOperationException(v protocol.ResponseMetadata) error { return &UnsupportedOperationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedOperationException) Code() string { +func (s *UnsupportedOperationException) Code() string { return "UnsupportedOperationException" } // Message returns the exception's message. -func (s UnsupportedOperationException) Message() string { +func (s *UnsupportedOperationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9407,22 +11755,22 @@ func (s UnsupportedOperationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedOperationException) OrigErr() error { +func (s *UnsupportedOperationException) OrigErr() error { return nil } -func (s UnsupportedOperationException) Error() string { +func (s *UnsupportedOperationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedOperationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedOperationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedOperationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedOperationException) RequestID() string { + return s.RespMetadata.RequestID } type UntagResourceInput struct { @@ -9506,7 +11854,7 @@ type UpdateMailboxQuotaInput struct { // the mailbox quota. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifer for the user for whom to update the mailbox quota. // @@ -9536,6 +11884,9 @@ func (s *UpdateMailboxQuotaInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.UserId == nil { invalidParams.Add(request.NewErrParamRequired("UserId")) } @@ -9597,7 +11948,7 @@ type UpdatePrimaryEmailAddressInput struct { // The organization that contains the user, group, or resource to update. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -9628,6 +11979,9 @@ func (s *UpdatePrimaryEmailAddressInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9680,12 +12034,12 @@ type UpdateResourceInput struct { // updated. // // OrganizationId is a required field - OrganizationId *string `type:"string" required:"true"` + OrganizationId *string `min:"34" type:"string" required:"true"` // The identifier of the resource to be updated. // // ResourceId is a required field - ResourceId *string `type:"string" required:"true"` + ResourceId *string `min:"34" type:"string" required:"true"` } // String returns the string representation @@ -9707,9 +12061,15 @@ func (s *UpdateResourceInput) Validate() error { if s.OrganizationId == nil { invalidParams.Add(request.NewErrParamRequired("OrganizationId")) } + if s.OrganizationId != nil && len(*s.OrganizationId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("OrganizationId", 34)) + } if s.ResourceId == nil { invalidParams.Add(request.NewErrParamRequired("ResourceId")) } + if s.ResourceId != nil && len(*s.ResourceId) < 34 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 34)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9850,6 +12210,14 @@ const ( AccessControlRuleEffectDeny = "DENY" ) +// AccessControlRuleEffect_Values returns all elements of the AccessControlRuleEffect enum +func AccessControlRuleEffect_Values() []string { + return []string{ + AccessControlRuleEffectAllow, + AccessControlRuleEffectDeny, + } +} + const ( // EntityStateEnabled is a EntityState enum value EntityStateEnabled = "ENABLED" @@ -9861,6 +12229,67 @@ const ( EntityStateDeleted = "DELETED" ) +// EntityState_Values returns all elements of the EntityState enum +func EntityState_Values() []string { + return []string{ + EntityStateEnabled, + EntityStateDisabled, + EntityStateDeleted, + } +} + +const ( + // FolderNameInbox is a FolderName enum value + FolderNameInbox = "INBOX" + + // FolderNameDeletedItems is a FolderName enum value + FolderNameDeletedItems = "DELETED_ITEMS" + + // FolderNameSentItems is a FolderName enum value + FolderNameSentItems = "SENT_ITEMS" + + // FolderNameDrafts is a FolderName enum value + FolderNameDrafts = "DRAFTS" + + // FolderNameJunkEmail is a FolderName enum value + FolderNameJunkEmail = "JUNK_EMAIL" +) + +// FolderName_Values returns all elements of the FolderName enum +func FolderName_Values() []string { + return []string{ + FolderNameInbox, + FolderNameDeletedItems, + FolderNameSentItems, + FolderNameDrafts, + FolderNameJunkEmail, + } +} + +const ( + // MailboxExportJobStateRunning is a MailboxExportJobState enum value + MailboxExportJobStateRunning = "RUNNING" + + // MailboxExportJobStateCompleted is a MailboxExportJobState enum value + MailboxExportJobStateCompleted = "COMPLETED" + + // MailboxExportJobStateFailed is a MailboxExportJobState enum value + MailboxExportJobStateFailed = "FAILED" + + // MailboxExportJobStateCancelled is a MailboxExportJobState enum value + MailboxExportJobStateCancelled = "CANCELLED" +) + +// MailboxExportJobState_Values returns all elements of the MailboxExportJobState enum +func MailboxExportJobState_Values() []string { + return []string{ + MailboxExportJobStateRunning, + MailboxExportJobStateCompleted, + MailboxExportJobStateFailed, + MailboxExportJobStateCancelled, + } +} + const ( // MemberTypeGroup is a MemberType enum value MemberTypeGroup = "GROUP" @@ -9869,6 +12298,14 @@ const ( MemberTypeUser = "USER" ) +// MemberType_Values returns all elements of the MemberType enum +func MemberType_Values() []string { + return []string{ + MemberTypeGroup, + MemberTypeUser, + } +} + const ( // PermissionTypeFullAccess is a PermissionType enum value PermissionTypeFullAccess = "FULL_ACCESS" @@ -9880,6 +12317,15 @@ const ( PermissionTypeSendOnBehalf = "SEND_ON_BEHALF" ) +// PermissionType_Values returns all elements of the PermissionType enum +func PermissionType_Values() []string { + return []string{ + PermissionTypeFullAccess, + PermissionTypeSendAs, + PermissionTypeSendOnBehalf, + } +} + const ( // ResourceTypeRoom is a ResourceType enum value ResourceTypeRoom = "ROOM" @@ -9888,6 +12334,34 @@ const ( ResourceTypeEquipment = "EQUIPMENT" ) +// ResourceType_Values returns all elements of the ResourceType enum +func ResourceType_Values() []string { + return []string{ + ResourceTypeRoom, + ResourceTypeEquipment, + } +} + +const ( + // RetentionActionNone is a RetentionAction enum value + RetentionActionNone = "NONE" + + // RetentionActionDelete is a RetentionAction enum value + RetentionActionDelete = "DELETE" + + // RetentionActionPermanentlyDelete is a RetentionAction enum value + RetentionActionPermanentlyDelete = "PERMANENTLY_DELETE" +) + +// RetentionAction_Values returns all elements of the RetentionAction enum +func RetentionAction_Values() []string { + return []string{ + RetentionActionNone, + RetentionActionDelete, + RetentionActionPermanentlyDelete, + } +} + const ( // UserRoleUser is a UserRole enum value UserRoleUser = "USER" @@ -9898,3 +12372,12 @@ const ( // UserRoleSystemUser is a UserRole enum value UserRoleSystemUser = "SYSTEM_USER" ) + +// UserRole_Values returns all elements of the UserRole enum +func UserRole_Values() []string { + return []string{ + UserRoleUser, + UserRoleResource, + UserRoleSystemUser, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/workmail/errors.go b/vendor/github.com/aws/aws-sdk-go/service/workmail/errors.go index 1008704cc..799c08b5f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workmail/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workmail/errors.go @@ -8,6 +8,13 @@ import ( const ( + // ErrCodeDirectoryInUseException for service response error code + // "DirectoryInUseException". + // + // The directory is already in use by another WorkMail organization in the same + // account and Region. + ErrCodeDirectoryInUseException = "DirectoryInUseException" + // ErrCodeDirectoryServiceAuthenticationFailedException for service response error code // "DirectoryServiceAuthenticationFailedException". // @@ -17,7 +24,7 @@ const ( // ErrCodeDirectoryUnavailableException for service response error code // "DirectoryUnavailableException". // - // The directory on which you are trying to perform operations isn't available. + // The directory is unavailable. It might be located in another Region or deleted. ErrCodeDirectoryUnavailableException = "DirectoryUnavailableException" // ErrCodeEmailAddressInUseException for service response error code @@ -104,8 +111,8 @@ const ( // ErrCodeOrganizationStateException for service response error code // "OrganizationStateException". // - // The organization must have a valid state (Active or Synchronizing) to perform - // certain operations on the organization or its members. + // The organization must have a valid state to perform certain operations on + // the organization or its members. ErrCodeOrganizationStateException = "OrganizationStateException" // ErrCodeReservedNameException for service response error code @@ -134,6 +141,7 @@ const ( ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "DirectoryInUseException": newErrorDirectoryInUseException, "DirectoryServiceAuthenticationFailedException": newErrorDirectoryServiceAuthenticationFailedException, "DirectoryUnavailableException": newErrorDirectoryUnavailableException, "EmailAddressInUseException": newErrorEmailAddressInUseException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/workmail/service.go b/vendor/github.com/aws/aws-sdk-go/service/workmail/service.go index 09cc83f5b..f814d6222 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workmail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workmail/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go index 5ec35c9e4..f4d371c0d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -13,6 +13,105 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) +const opAssociateConnectionAlias = "AssociateConnectionAlias" + +// AssociateConnectionAliasRequest generates a "aws/request.Request" representing the +// client's request for the AssociateConnectionAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssociateConnectionAlias for more information on using the AssociateConnectionAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AssociateConnectionAliasRequest method. +// req, resp := client.AssociateConnectionAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/AssociateConnectionAlias +func (c *WorkSpaces) AssociateConnectionAliasRequest(input *AssociateConnectionAliasInput) (req *request.Request, output *AssociateConnectionAliasOutput) { + op := &request.Operation{ + Name: opAssociateConnectionAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssociateConnectionAliasInput{} + } + + output = &AssociateConnectionAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssociateConnectionAlias API operation for Amazon WorkSpaces. +// +// Associates the specified connection alias with the specified directory to +// enable cross-Region redirection. For more information, see Cross-Region Redirection +// for Amazon WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +// +// Before performing this operation, call DescribeConnectionAliases (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html) +// to make sure that the current state of the connection alias is CREATED. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation AssociateConnectionAlias for usage and error information. +// +// Returned Error Types: +// * ResourceAssociatedException +// The resource is associated with a directory. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * ResourceNotFoundException +// The resource could not be found. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * InvalidResourceStateException +// The state of the resource is not valid for this operation. +// +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/AssociateConnectionAlias +func (c *WorkSpaces) AssociateConnectionAlias(input *AssociateConnectionAliasInput) (*AssociateConnectionAliasOutput, error) { + req, out := c.AssociateConnectionAliasRequest(input) + return out, req.Send() +} + +// AssociateConnectionAliasWithContext is the same as AssociateConnectionAlias with the addition of +// the ability to pass a context and additional request options. +// +// See AssociateConnectionAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) AssociateConnectionAliasWithContext(ctx aws.Context, input *AssociateConnectionAliasInput, opts ...request.Option) (*AssociateConnectionAliasOutput, error) { + req, out := c.AssociateConnectionAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opAssociateIpGroups = "AssociateIpGroups" // AssociateIpGroupsRequest generates a "aws/request.Request" representing the @@ -248,6 +347,15 @@ func (c *WorkSpaces) CopyWorkspaceImageRequest(input *CopyWorkspaceImageInput) ( // CopyWorkspaceImage API operation for Amazon WorkSpaces. // // Copies the specified image from the specified Region to the current Region. +// For more information about copying images, see Copy a Custom WorkSpaces Image +// (https://docs.aws.amazon.com/workspaces/latest/adminguide/copy-custom-image.html). +// +// Before copying a shared image, be sure to verify that it has been shared +// from the correct AWS account. To determine if an image has been shared and +// to see the AWS account ID that owns an image, use the DescribeWorkSpaceImages +// (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaceImages.html) +// and DescribeWorkspaceImagePermissions (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaceImagePermissions.html) +// API operations. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -300,6 +408,102 @@ func (c *WorkSpaces) CopyWorkspaceImageWithContext(ctx aws.Context, input *CopyW return out, req.Send() } +const opCreateConnectionAlias = "CreateConnectionAlias" + +// CreateConnectionAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateConnectionAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateConnectionAlias for more information on using the CreateConnectionAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateConnectionAliasRequest method. +// req, resp := client.CreateConnectionAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateConnectionAlias +func (c *WorkSpaces) CreateConnectionAliasRequest(input *CreateConnectionAliasInput) (req *request.Request, output *CreateConnectionAliasOutput) { + op := &request.Operation{ + Name: opCreateConnectionAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateConnectionAliasInput{} + } + + output = &CreateConnectionAliasOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateConnectionAlias API operation for Amazon WorkSpaces. +// +// Creates the specified connection alias for use with cross-Region redirection. +// For more information, see Cross-Region Redirection for Amazon WorkSpaces +// (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation CreateConnectionAlias for usage and error information. +// +// Returned Error Types: +// * ResourceAlreadyExistsException +// The specified resource already exists. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * ResourceLimitExceededException +// Your resource limits have been exceeded. +// +// * InvalidResourceStateException +// The state of the resource is not valid for this operation. +// +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/CreateConnectionAlias +func (c *WorkSpaces) CreateConnectionAlias(input *CreateConnectionAliasInput) (*CreateConnectionAliasOutput, error) { + req, out := c.CreateConnectionAliasRequest(input) + return out, req.Send() +} + +// CreateConnectionAliasWithContext is the same as CreateConnectionAlias with the addition of +// the ability to pass a context and additional request options. +// +// See CreateConnectionAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) CreateConnectionAliasWithContext(ctx aws.Context, input *CreateConnectionAliasInput, opts ...request.Option) (*CreateConnectionAliasOutput, error) { + req, out := c.CreateConnectionAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateIpGroup = "CreateIpGroup" // CreateIpGroupRequest generates a "aws/request.Request" representing the @@ -573,6 +777,113 @@ func (c *WorkSpaces) CreateWorkspacesWithContext(ctx aws.Context, input *CreateW return out, req.Send() } +const opDeleteConnectionAlias = "DeleteConnectionAlias" + +// DeleteConnectionAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConnectionAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteConnectionAlias for more information on using the DeleteConnectionAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteConnectionAliasRequest method. +// req, resp := client.DeleteConnectionAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteConnectionAlias +func (c *WorkSpaces) DeleteConnectionAliasRequest(input *DeleteConnectionAliasInput) (req *request.Request, output *DeleteConnectionAliasOutput) { + op := &request.Operation{ + Name: opDeleteConnectionAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteConnectionAliasInput{} + } + + output = &DeleteConnectionAliasOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteConnectionAlias API operation for Amazon WorkSpaces. +// +// Deletes the specified connection alias. For more information, see Cross-Region +// Redirection for Amazon WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +// +// If you will no longer be using a fully qualified domain name (FQDN) as the +// registration code for your WorkSpaces users, you must take certain precautions +// to prevent potential security issues. For more information, see Security +// Considerations if You Stop Using Cross-Region Redirection (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html#cross-region-redirection-security-considerations). +// +// To delete a connection alias that has been shared, the shared account must +// first disassociate the connection alias from any directories it has been +// associated with. Then you must unshare the connection alias from the account +// it has been shared with. You can delete a connection alias only after it +// is no longer shared with any accounts or associated with any directories. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DeleteConnectionAlias for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The resource could not be found. +// +// * ResourceAssociatedException +// The resource is associated with a directory. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * InvalidResourceStateException +// The state of the resource is not valid for this operation. +// +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DeleteConnectionAlias +func (c *WorkSpaces) DeleteConnectionAlias(input *DeleteConnectionAliasInput) (*DeleteConnectionAliasOutput, error) { + req, out := c.DeleteConnectionAliasRequest(input) + return out, req.Send() +} + +// DeleteConnectionAliasWithContext is the same as DeleteConnectionAlias with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteConnectionAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DeleteConnectionAliasWithContext(ctx aws.Context, input *DeleteConnectionAliasInput, opts ...request.Option) (*DeleteConnectionAliasOutput, error) { + req, out := c.DeleteConnectionAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteIpGroup = "DeleteIpGroup" // DeleteIpGroupRequest generates a "aws/request.Request" representing the @@ -793,8 +1104,8 @@ func (c *WorkSpaces) DeleteWorkspaceImageRequest(input *DeleteWorkspaceImageInpu // DeleteWorkspaceImage API operation for Amazon WorkSpaces. // // Deletes the specified image from your account. To delete an image, you must -// first delete any bundles that are associated with the image and un-share -// the image if it is shared with other accounts. +// first delete any bundles that are associated with the image and unshare the +// image if it is shared with other accounts. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1174,89 +1485,266 @@ func (c *WorkSpaces) DescribeClientPropertiesWithContext(ctx aws.Context, input return out, req.Send() } -const opDescribeIpGroups = "DescribeIpGroups" +const opDescribeConnectionAliasPermissions = "DescribeConnectionAliasPermissions" -// DescribeIpGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeIpGroups operation. The "output" return +// DescribeConnectionAliasPermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnectionAliasPermissions operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DescribeIpGroups for more information on using the DescribeIpGroups +// See DescribeConnectionAliasPermissions for more information on using the DescribeConnectionAliasPermissions // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DescribeIpGroupsRequest method. -// req, resp := client.DescribeIpGroupsRequest(params) +// // Example sending a request using the DescribeConnectionAliasPermissionsRequest method. +// req, resp := client.DescribeConnectionAliasPermissionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeIpGroups -func (c *WorkSpaces) DescribeIpGroupsRequest(input *DescribeIpGroupsInput) (req *request.Request, output *DescribeIpGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeConnectionAliasPermissions +func (c *WorkSpaces) DescribeConnectionAliasPermissionsRequest(input *DescribeConnectionAliasPermissionsInput) (req *request.Request, output *DescribeConnectionAliasPermissionsOutput) { op := &request.Operation{ - Name: opDescribeIpGroups, + Name: opDescribeConnectionAliasPermissions, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DescribeIpGroupsInput{} + input = &DescribeConnectionAliasPermissionsInput{} } - output = &DescribeIpGroupsOutput{} + output = &DescribeConnectionAliasPermissionsOutput{} req = c.newRequest(op, input, output) return } -// DescribeIpGroups API operation for Amazon WorkSpaces. +// DescribeConnectionAliasPermissions API operation for Amazon WorkSpaces. // -// Describes one or more of your IP access control groups. +// Describes the permissions that the owner of a connection alias has granted +// to another AWS account for the specified connection alias. For more information, +// see Cross-Region Redirection for Amazon WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon WorkSpaces's -// API operation DescribeIpGroups for usage and error information. +// API operation DescribeConnectionAliasPermissions for usage and error information. // // Returned Error Types: +// * AccessDeniedException +// The user is not authorized to access a resource. +// // * InvalidParameterValuesException // One or more parameter values are not valid. // -// * AccessDeniedException -// The user is not authorized to access a resource. +// * ResourceNotFoundException +// The resource could not be found. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeIpGroups -func (c *WorkSpaces) DescribeIpGroups(input *DescribeIpGroupsInput) (*DescribeIpGroupsOutput, error) { - req, out := c.DescribeIpGroupsRequest(input) +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeConnectionAliasPermissions +func (c *WorkSpaces) DescribeConnectionAliasPermissions(input *DescribeConnectionAliasPermissionsInput) (*DescribeConnectionAliasPermissionsOutput, error) { + req, out := c.DescribeConnectionAliasPermissionsRequest(input) return out, req.Send() } -// DescribeIpGroupsWithContext is the same as DescribeIpGroups with the addition of +// DescribeConnectionAliasPermissionsWithContext is the same as DescribeConnectionAliasPermissions with the addition of // the ability to pass a context and additional request options. // -// See DescribeIpGroups for details on how to use this API operation. +// See DescribeConnectionAliasPermissions for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkSpaces) DescribeIpGroupsWithContext(ctx aws.Context, input *DescribeIpGroupsInput, opts ...request.Option) (*DescribeIpGroupsOutput, error) { - req, out := c.DescribeIpGroupsRequest(input) +func (c *WorkSpaces) DescribeConnectionAliasPermissionsWithContext(ctx aws.Context, input *DescribeConnectionAliasPermissionsInput, opts ...request.Option) (*DescribeConnectionAliasPermissionsOutput, error) { + req, out := c.DescribeConnectionAliasPermissionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opDescribeTags = "DescribeTags" +const opDescribeConnectionAliases = "DescribeConnectionAliases" + +// DescribeConnectionAliasesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConnectionAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeConnectionAliases for more information on using the DescribeConnectionAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeConnectionAliasesRequest method. +// req, resp := client.DescribeConnectionAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeConnectionAliases +func (c *WorkSpaces) DescribeConnectionAliasesRequest(input *DescribeConnectionAliasesInput) (req *request.Request, output *DescribeConnectionAliasesOutput) { + op := &request.Operation{ + Name: opDescribeConnectionAliases, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeConnectionAliasesInput{} + } + + output = &DescribeConnectionAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeConnectionAliases API operation for Amazon WorkSpaces. +// +// Retrieves a list that describes the connection aliases used for cross-Region +// redirection. For more information, see Cross-Region Redirection for Amazon +// WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeConnectionAliases for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeConnectionAliases +func (c *WorkSpaces) DescribeConnectionAliases(input *DescribeConnectionAliasesInput) (*DescribeConnectionAliasesOutput, error) { + req, out := c.DescribeConnectionAliasesRequest(input) + return out, req.Send() +} + +// DescribeConnectionAliasesWithContext is the same as DescribeConnectionAliases with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeConnectionAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeConnectionAliasesWithContext(ctx aws.Context, input *DescribeConnectionAliasesInput, opts ...request.Option) (*DescribeConnectionAliasesOutput, error) { + req, out := c.DescribeConnectionAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeIpGroups = "DescribeIpGroups" + +// DescribeIpGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIpGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeIpGroups for more information on using the DescribeIpGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeIpGroupsRequest method. +// req, resp := client.DescribeIpGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeIpGroups +func (c *WorkSpaces) DescribeIpGroupsRequest(input *DescribeIpGroupsInput) (req *request.Request, output *DescribeIpGroupsOutput) { + op := &request.Operation{ + Name: opDescribeIpGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIpGroupsInput{} + } + + output = &DescribeIpGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeIpGroups API operation for Amazon WorkSpaces. +// +// Describes one or more of your IP access control groups. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeIpGroups for usage and error information. +// +// Returned Error Types: +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeIpGroups +func (c *WorkSpaces) DescribeIpGroups(input *DescribeIpGroupsInput) (*DescribeIpGroupsOutput, error) { + req, out := c.DescribeIpGroupsRequest(input) + return out, req.Send() +} + +// DescribeIpGroupsWithContext is the same as DescribeIpGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeIpGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeIpGroupsWithContext(ctx aws.Context, input *DescribeIpGroupsInput, opts ...request.Option) (*DescribeIpGroupsOutput, error) { + req, out := c.DescribeIpGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTags = "DescribeTags" // DescribeTagsRequest generates a "aws/request.Request" representing the // client's request for the DescribeTags operation. The "output" return @@ -1611,6 +2099,92 @@ func (c *WorkSpaces) DescribeWorkspaceDirectoriesPagesWithContext(ctx aws.Contex return p.Err() } +const opDescribeWorkspaceImagePermissions = "DescribeWorkspaceImagePermissions" + +// DescribeWorkspaceImagePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeWorkspaceImagePermissions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeWorkspaceImagePermissions for more information on using the DescribeWorkspaceImagePermissions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeWorkspaceImagePermissionsRequest method. +// req, resp := client.DescribeWorkspaceImagePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceImagePermissions +func (c *WorkSpaces) DescribeWorkspaceImagePermissionsRequest(input *DescribeWorkspaceImagePermissionsInput) (req *request.Request, output *DescribeWorkspaceImagePermissionsOutput) { + op := &request.Operation{ + Name: opDescribeWorkspaceImagePermissions, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeWorkspaceImagePermissionsInput{} + } + + output = &DescribeWorkspaceImagePermissionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeWorkspaceImagePermissions API operation for Amazon WorkSpaces. +// +// Describes the permissions that the owner of an image has granted to other +// AWS accounts for an image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DescribeWorkspaceImagePermissions for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The resource could not be found. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DescribeWorkspaceImagePermissions +func (c *WorkSpaces) DescribeWorkspaceImagePermissions(input *DescribeWorkspaceImagePermissionsInput) (*DescribeWorkspaceImagePermissionsOutput, error) { + req, out := c.DescribeWorkspaceImagePermissionsRequest(input) + return out, req.Send() +} + +// DescribeWorkspaceImagePermissionsWithContext is the same as DescribeWorkspaceImagePermissions with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeWorkspaceImagePermissions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DescribeWorkspaceImagePermissionsWithContext(ctx aws.Context, input *DescribeWorkspaceImagePermissionsInput, opts ...request.Option) (*DescribeWorkspaceImagePermissionsOutput, error) { + req, out := c.DescribeWorkspaceImagePermissionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeWorkspaceImages = "DescribeWorkspaceImages" // DescribeWorkspaceImagesRequest generates a "aws/request.Request" representing the @@ -1998,114 +2572,212 @@ func (c *WorkSpaces) DescribeWorkspacesConnectionStatusWithContext(ctx aws.Conte return out, req.Send() } -const opDisassociateIpGroups = "DisassociateIpGroups" +const opDisassociateConnectionAlias = "DisassociateConnectionAlias" -// DisassociateIpGroupsRequest generates a "aws/request.Request" representing the -// client's request for the DisassociateIpGroups operation. The "output" return +// DisassociateConnectionAliasRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateConnectionAlias operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See DisassociateIpGroups for more information on using the DisassociateIpGroups +// See DisassociateConnectionAlias for more information on using the DisassociateConnectionAlias // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the DisassociateIpGroupsRequest method. -// req, resp := client.DisassociateIpGroupsRequest(params) +// // Example sending a request using the DisassociateConnectionAliasRequest method. +// req, resp := client.DisassociateConnectionAliasRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DisassociateIpGroups -func (c *WorkSpaces) DisassociateIpGroupsRequest(input *DisassociateIpGroupsInput) (req *request.Request, output *DisassociateIpGroupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DisassociateConnectionAlias +func (c *WorkSpaces) DisassociateConnectionAliasRequest(input *DisassociateConnectionAliasInput) (req *request.Request, output *DisassociateConnectionAliasOutput) { op := &request.Operation{ - Name: opDisassociateIpGroups, + Name: opDisassociateConnectionAlias, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &DisassociateIpGroupsInput{} + input = &DisassociateConnectionAliasInput{} } - output = &DisassociateIpGroupsOutput{} + output = &DisassociateConnectionAliasOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// DisassociateIpGroups API operation for Amazon WorkSpaces. +// DisassociateConnectionAlias API operation for Amazon WorkSpaces. // -// Disassociates the specified IP access control group from the specified directory. +// Disassociates a connection alias from a directory. Disassociating a connection +// alias disables cross-Region redirection between two directories in different +// AWS Regions. For more information, see Cross-Region Redirection for Amazon +// WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +// +// Before performing this operation, call DescribeConnectionAliases (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html) +// to make sure that the current state of the connection alias is CREATED. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon WorkSpaces's -// API operation DisassociateIpGroups for usage and error information. +// API operation DisassociateConnectionAlias for usage and error information. // // Returned Error Types: -// * InvalidParameterValuesException -// One or more parameter values are not valid. +// * AccessDeniedException +// The user is not authorized to access a resource. // // * ResourceNotFoundException // The resource could not be found. // +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// // * InvalidResourceStateException // The state of the resource is not valid for this operation. // -// * AccessDeniedException -// The user is not authorized to access a resource. +// * OperationNotSupportedException +// This operation is not supported. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DisassociateIpGroups -func (c *WorkSpaces) DisassociateIpGroups(input *DisassociateIpGroupsInput) (*DisassociateIpGroupsOutput, error) { - req, out := c.DisassociateIpGroupsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DisassociateConnectionAlias +func (c *WorkSpaces) DisassociateConnectionAlias(input *DisassociateConnectionAliasInput) (*DisassociateConnectionAliasOutput, error) { + req, out := c.DisassociateConnectionAliasRequest(input) return out, req.Send() } -// DisassociateIpGroupsWithContext is the same as DisassociateIpGroups with the addition of +// DisassociateConnectionAliasWithContext is the same as DisassociateConnectionAlias with the addition of // the ability to pass a context and additional request options. // -// See DisassociateIpGroups for details on how to use this API operation. +// See DisassociateConnectionAlias for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *WorkSpaces) DisassociateIpGroupsWithContext(ctx aws.Context, input *DisassociateIpGroupsInput, opts ...request.Option) (*DisassociateIpGroupsOutput, error) { - req, out := c.DisassociateIpGroupsRequest(input) +func (c *WorkSpaces) DisassociateConnectionAliasWithContext(ctx aws.Context, input *DisassociateConnectionAliasInput, opts ...request.Option) (*DisassociateConnectionAliasOutput, error) { + req, out := c.DisassociateConnectionAliasRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opImportWorkspaceImage = "ImportWorkspaceImage" +const opDisassociateIpGroups = "DisassociateIpGroups" -// ImportWorkspaceImageRequest generates a "aws/request.Request" representing the -// client's request for the ImportWorkspaceImage operation. The "output" return +// DisassociateIpGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateIpGroups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ImportWorkspaceImage for more information on using the ImportWorkspaceImage +// See DisassociateIpGroups for more information on using the DisassociateIpGroups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ImportWorkspaceImageRequest method. -// req, resp := client.ImportWorkspaceImageRequest(params) +// // Example sending a request using the DisassociateIpGroupsRequest method. +// req, resp := client.DisassociateIpGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DisassociateIpGroups +func (c *WorkSpaces) DisassociateIpGroupsRequest(input *DisassociateIpGroupsInput) (req *request.Request, output *DisassociateIpGroupsOutput) { + op := &request.Operation{ + Name: opDisassociateIpGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisassociateIpGroupsInput{} + } + + output = &DisassociateIpGroupsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DisassociateIpGroups API operation for Amazon WorkSpaces. +// +// Disassociates the specified IP access control group from the specified directory. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation DisassociateIpGroups for usage and error information. +// +// Returned Error Types: +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * ResourceNotFoundException +// The resource could not be found. +// +// * InvalidResourceStateException +// The state of the resource is not valid for this operation. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/DisassociateIpGroups +func (c *WorkSpaces) DisassociateIpGroups(input *DisassociateIpGroupsInput) (*DisassociateIpGroupsOutput, error) { + req, out := c.DisassociateIpGroupsRequest(input) + return out, req.Send() +} + +// DisassociateIpGroupsWithContext is the same as DisassociateIpGroups with the addition of +// the ability to pass a context and additional request options. +// +// See DisassociateIpGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) DisassociateIpGroupsWithContext(ctx aws.Context, input *DisassociateIpGroupsInput, opts ...request.Option) (*DisassociateIpGroupsOutput, error) { + req, out := c.DisassociateIpGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opImportWorkspaceImage = "ImportWorkspaceImage" + +// ImportWorkspaceImageRequest generates a "aws/request.Request" representing the +// client's request for the ImportWorkspaceImage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportWorkspaceImage for more information on using the ImportWorkspaceImage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportWorkspaceImageRequest method. +// req, resp := client.ImportWorkspaceImageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled @@ -2131,9 +2803,10 @@ func (c *WorkSpaces) ImportWorkspaceImageRequest(input *ImportWorkspaceImageInpu // ImportWorkspaceImage API operation for Amazon WorkSpaces. // -// Imports the specified Windows 7 or Windows 10 Bring Your Own License (BYOL) -// image into Amazon WorkSpaces. The image must be an already licensed EC2 image -// that is in your AWS account, and you must own the image. +// Imports the specified Windows 10 Bring Your Own License (BYOL) image into +// Amazon WorkSpaces. The image must be an already licensed Amazon EC2 image +// that is in your AWS account, and you must own the image. For more information +// about creating BYOL images, see Bring Your Own Windows Desktop Licenses (https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -2231,6 +2904,10 @@ func (c *WorkSpaces) ListAvailableManagementCidrRangesRequest(input *ListAvailab // you can use for the network management interface when you enable Bring Your // Own License (BYOL). // +// This operation can be run only by AWS accounts that are enabled for BYOL. +// If your account isn't enabled for BYOL, you'll receive an AccessDeniedException +// error. +// // The management network interface is connected to a secure Amazon WorkSpaces // management network. It is used for interactive streaming of the WorkSpace // desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage @@ -2793,6 +3470,9 @@ func (c *WorkSpaces) ModifyWorkspaceCreationPropertiesRequest(input *ModifyWorks // * ResourceNotFoundException // The resource could not be found. // +// * OperationNotSupportedException +// This operation is not supported. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/ModifyWorkspaceCreationProperties func (c *WorkSpaces) ModifyWorkspaceCreationProperties(input *ModifyWorkspaceCreationPropertiesInput) (*ModifyWorkspaceCreationPropertiesOutput, error) { req, out := c.ModifyWorkspaceCreationPropertiesRequest(input) @@ -2860,7 +3540,9 @@ func (c *WorkSpaces) ModifyWorkspacePropertiesRequest(input *ModifyWorkspaceProp // ModifyWorkspaceProperties API operation for Amazon WorkSpaces. // -// Modifies the specified WorkSpace properties. +// Modifies the specified WorkSpace properties. For important information about +// how to modify the size of the root and user volumes, see Modify a WorkSpace +// (https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3133,7 +3815,7 @@ func (c *WorkSpaces) RebuildWorkspacesRequest(input *RebuildWorkspacesInput) (re // Rebuilds the specified WorkSpace. // // You cannot rebuild a WorkSpace unless its state is AVAILABLE, ERROR, UNHEALTHY, -// or STOPPED. +// STOPPED, or REBOOTING. // // Rebuilding a WorkSpace is a potentially destructive action that can result // in the loss of data. For more information, see Rebuild a WorkSpace (https://docs.aws.amazon.com/workspaces/latest/adminguide/reset-workspace.html). @@ -3665,13 +4347,18 @@ func (c *WorkSpaces) TerminateWorkspacesRequest(input *TerminateWorkspacesInput) // Terminates the specified WorkSpaces. // // Terminating a WorkSpace is a permanent action and cannot be undone. The user's -// data is destroyed. If you need to archive any user data, contact Amazon Web -// Services before terminating the WorkSpace. +// data is destroyed. If you need to archive any user data, contact AWS Support +// before terminating the WorkSpace. // // You can terminate a WorkSpace that is in any state except SUSPENDED. // // This operation is asynchronous and returns before the WorkSpaces have been -// completely terminated. +// completely terminated. After a WorkSpace is terminated, the TERMINATED state +// is returned only briefly before the WorkSpace directory metadata is cleaned +// up, so this state is rarely returned. To confirm that a WorkSpace is terminated, +// check for the WorkSpace ID by using DescribeWorkSpaces (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaces.html). +// If the WorkSpace ID isn't returned, then the WorkSpace has been successfully +// terminated. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3701,6 +4388,119 @@ func (c *WorkSpaces) TerminateWorkspacesWithContext(ctx aws.Context, input *Term return out, req.Send() } +const opUpdateConnectionAliasPermission = "UpdateConnectionAliasPermission" + +// UpdateConnectionAliasPermissionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConnectionAliasPermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateConnectionAliasPermission for more information on using the UpdateConnectionAliasPermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateConnectionAliasPermissionRequest method. +// req, resp := client.UpdateConnectionAliasPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UpdateConnectionAliasPermission +func (c *WorkSpaces) UpdateConnectionAliasPermissionRequest(input *UpdateConnectionAliasPermissionInput) (req *request.Request, output *UpdateConnectionAliasPermissionOutput) { + op := &request.Operation{ + Name: opUpdateConnectionAliasPermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateConnectionAliasPermissionInput{} + } + + output = &UpdateConnectionAliasPermissionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateConnectionAliasPermission API operation for Amazon WorkSpaces. +// +// Shares or unshares a connection alias with one account by specifying whether +// that account has permission to associate the connection alias with a directory. +// If the association permission is granted, the connection alias is shared +// with that account. If the association permission is revoked, the connection +// alias is unshared with the account. For more information, see Cross-Region +// Redirection for Amazon WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +// +// * Before performing this operation, call DescribeConnectionAliases (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeConnectionAliases.html) +// to make sure that the current state of the connection alias is CREATED. +// +// * To delete a connection alias that has been shared, the shared account +// must first disassociate the connection alias from any directories it has +// been associated with. Then you must unshare the connection alias from +// the account it has been shared with. You can delete a connection alias +// only after it is no longer shared with any accounts or associated with +// any directories. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation UpdateConnectionAliasPermission for usage and error information. +// +// Returned Error Types: +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * ResourceLimitExceededException +// Your resource limits have been exceeded. +// +// * ResourceNotFoundException +// The resource could not be found. +// +// * ResourceAssociatedException +// The resource is associated with a directory. +// +// * InvalidResourceStateException +// The state of the resource is not valid for this operation. +// +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UpdateConnectionAliasPermission +func (c *WorkSpaces) UpdateConnectionAliasPermission(input *UpdateConnectionAliasPermissionInput) (*UpdateConnectionAliasPermissionOutput, error) { + req, out := c.UpdateConnectionAliasPermissionRequest(input) + return out, req.Send() +} + +// UpdateConnectionAliasPermissionWithContext is the same as UpdateConnectionAliasPermission with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateConnectionAliasPermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) UpdateConnectionAliasPermissionWithContext(ctx aws.Context, input *UpdateConnectionAliasPermissionInput, opts ...request.Option) (*UpdateConnectionAliasPermissionOutput, error) { + req, out := c.UpdateConnectionAliasPermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateRulesOfIpGroup = "UpdateRulesOfIpGroup" // UpdateRulesOfIpGroupRequest generates a "aws/request.Request" representing the @@ -3794,10 +4594,114 @@ func (c *WorkSpaces) UpdateRulesOfIpGroupWithContext(ctx aws.Context, input *Upd return out, req.Send() } +const opUpdateWorkspaceImagePermission = "UpdateWorkspaceImagePermission" + +// UpdateWorkspaceImagePermissionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateWorkspaceImagePermission operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateWorkspaceImagePermission for more information on using the UpdateWorkspaceImagePermission +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateWorkspaceImagePermissionRequest method. +// req, resp := client.UpdateWorkspaceImagePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UpdateWorkspaceImagePermission +func (c *WorkSpaces) UpdateWorkspaceImagePermissionRequest(input *UpdateWorkspaceImagePermissionInput) (req *request.Request, output *UpdateWorkspaceImagePermissionOutput) { + op := &request.Operation{ + Name: opUpdateWorkspaceImagePermission, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateWorkspaceImagePermissionInput{} + } + + output = &UpdateWorkspaceImagePermissionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateWorkspaceImagePermission API operation for Amazon WorkSpaces. +// +// Shares or unshares an image with one account by specifying whether that account +// has permission to copy the image. If the copy image permission is granted, +// the image is shared with that account. If the copy image permission is revoked, +// the image is unshared with the account. For more information about sharing +// images, see Share or Unshare a Custom WorkSpaces Image (https://docs.aws.amazon.com/workspaces/latest/adminguide/share-custom-image.html). +// +// * To delete an image that has been shared, you must unshare the image +// before you delete it. +// +// * Sharing Bring Your Own License (BYOL) images across AWS accounts isn't +// supported at this time in the AWS GovCloud (US-West) Region. To share +// BYOL images across accounts in the AWS GovCloud (US-West) Region, contact +// AWS Support. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon WorkSpaces's +// API operation UpdateWorkspaceImagePermission for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The resource could not be found. +// +// * ResourceUnavailableException +// The specified resource is not available. +// +// * AccessDeniedException +// The user is not authorized to access a resource. +// +// * InvalidParameterValuesException +// One or more parameter values are not valid. +// +// * OperationNotSupportedException +// This operation is not supported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/workspaces-2015-04-08/UpdateWorkspaceImagePermission +func (c *WorkSpaces) UpdateWorkspaceImagePermission(input *UpdateWorkspaceImagePermissionInput) (*UpdateWorkspaceImagePermissionOutput, error) { + req, out := c.UpdateWorkspaceImagePermissionRequest(input) + return out, req.Send() +} + +// UpdateWorkspaceImagePermissionWithContext is the same as UpdateWorkspaceImagePermission with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateWorkspaceImagePermission for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *WorkSpaces) UpdateWorkspaceImagePermissionWithContext(ctx aws.Context, input *UpdateWorkspaceImagePermissionInput, opts ...request.Option) (*UpdateWorkspaceImagePermissionOutput, error) { + req, out := c.UpdateWorkspaceImagePermissionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + // The user is not authorized to access a resource. type AccessDeniedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -3814,17 +4718,17 @@ func (s AccessDeniedException) GoString() string { func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { return &AccessDeniedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s AccessDeniedException) Code() string { +func (s *AccessDeniedException) Code() string { return "AccessDeniedException" } // Message returns the exception's message. -func (s AccessDeniedException) Message() string { +func (s *AccessDeniedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -3832,22 +4736,22 @@ func (s AccessDeniedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s AccessDeniedException) OrigErr() error { +func (s *AccessDeniedException) OrigErr() error { return nil } -func (s AccessDeniedException) Error() string { +func (s *AccessDeniedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s AccessDeniedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s AccessDeniedException) RequestID() string { - return s.respMetadata.RequestID +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID } // Describes a modification to the configuration of Bring Your Own License (BYOL) @@ -3922,22 +4826,105 @@ func (s *AccountModification) SetStartTime(v time.Time) *AccountModification { return s } -type AssociateIpGroupsInput struct { +type AssociateConnectionAliasInput struct { _ struct{} `type:"structure"` - // The identifier of the directory. + // The identifier of the connection alias. // - // DirectoryId is a required field - DirectoryId *string `min:"10" type:"string" required:"true"` + // AliasId is a required field + AliasId *string `min:"13" type:"string" required:"true"` - // The identifiers of one or more IP access control groups. + // The identifier of the directory to associate the connection alias with. // - // GroupIds is a required field - GroupIds []*string `type:"list" required:"true"` + // ResourceId is a required field + ResourceId *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s AssociateIpGroupsInput) String() string { +func (s AssociateConnectionAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateConnectionAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssociateConnectionAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssociateConnectionAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.AliasId != nil && len(*s.AliasId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("AliasId", 13)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *AssociateConnectionAliasInput) SetAliasId(v string) *AssociateConnectionAliasInput { + s.AliasId = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *AssociateConnectionAliasInput) SetResourceId(v string) *AssociateConnectionAliasInput { + s.ResourceId = &v + return s +} + +type AssociateConnectionAliasOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias association. You use the connection + // identifier in the DNS TXT record when you're configuring your DNS routing + // policies. + ConnectionIdentifier *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AssociateConnectionAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssociateConnectionAliasOutput) GoString() string { + return s.String() +} + +// SetConnectionIdentifier sets the ConnectionIdentifier field's value. +func (s *AssociateConnectionAliasOutput) SetConnectionIdentifier(v string) *AssociateConnectionAliasOutput { + s.ConnectionIdentifier = &v + return s +} + +type AssociateIpGroupsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the directory. + // + // DirectoryId is a required field + DirectoryId *string `min:"10" type:"string" required:"true"` + + // The identifiers of one or more IP access control groups. + // + // GroupIds is a required field + GroupIds []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AssociateIpGroupsInput) String() string { return awsutil.Prettify(s) } @@ -4140,6 +5127,182 @@ func (s *ComputeType) SetName(v string) *ComputeType { return s } +// Describes a connection alias. Connection aliases are used for cross-Region +// redirection. For more information, see Cross-Region Redirection for Amazon +// WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +type ConnectionAlias struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias. + AliasId *string `min:"13" type:"string"` + + // The association status of the connection alias. + Associations []*ConnectionAliasAssociation `min:"1" type:"list"` + + // The connection string specified for the connection alias. The connection + // string must be in the form of a fully qualified domain name (FQDN), such + // as www.example.com. + ConnectionString *string `min:"1" type:"string"` + + // The identifier of the AWS account that owns the connection alias. + OwnerAccountId *string `type:"string"` + + // The current state of the connection alias. + State *string `type:"string" enum:"ConnectionAliasState"` +} + +// String returns the string representation +func (s ConnectionAlias) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionAlias) GoString() string { + return s.String() +} + +// SetAliasId sets the AliasId field's value. +func (s *ConnectionAlias) SetAliasId(v string) *ConnectionAlias { + s.AliasId = &v + return s +} + +// SetAssociations sets the Associations field's value. +func (s *ConnectionAlias) SetAssociations(v []*ConnectionAliasAssociation) *ConnectionAlias { + s.Associations = v + return s +} + +// SetConnectionString sets the ConnectionString field's value. +func (s *ConnectionAlias) SetConnectionString(v string) *ConnectionAlias { + s.ConnectionString = &v + return s +} + +// SetOwnerAccountId sets the OwnerAccountId field's value. +func (s *ConnectionAlias) SetOwnerAccountId(v string) *ConnectionAlias { + s.OwnerAccountId = &v + return s +} + +// SetState sets the State field's value. +func (s *ConnectionAlias) SetState(v string) *ConnectionAlias { + s.State = &v + return s +} + +// Describes a connection alias association that is used for cross-Region redirection. +// For more information, see Cross-Region Redirection for Amazon WorkSpaces +// (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +type ConnectionAliasAssociation struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that associated the connection alias with + // a directory. + AssociatedAccountId *string `type:"string"` + + // The association status of the connection alias. + AssociationStatus *string `type:"string" enum:"AssociationStatus"` + + // The identifier of the connection alias association. You use the connection + // identifier in the DNS TXT record when you're configuring your DNS routing + // policies. + ConnectionIdentifier *string `min:"1" type:"string"` + + // The identifier of the directory associated with a connection alias. + ResourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ConnectionAliasAssociation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionAliasAssociation) GoString() string { + return s.String() +} + +// SetAssociatedAccountId sets the AssociatedAccountId field's value. +func (s *ConnectionAliasAssociation) SetAssociatedAccountId(v string) *ConnectionAliasAssociation { + s.AssociatedAccountId = &v + return s +} + +// SetAssociationStatus sets the AssociationStatus field's value. +func (s *ConnectionAliasAssociation) SetAssociationStatus(v string) *ConnectionAliasAssociation { + s.AssociationStatus = &v + return s +} + +// SetConnectionIdentifier sets the ConnectionIdentifier field's value. +func (s *ConnectionAliasAssociation) SetConnectionIdentifier(v string) *ConnectionAliasAssociation { + s.ConnectionIdentifier = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *ConnectionAliasAssociation) SetResourceId(v string) *ConnectionAliasAssociation { + s.ResourceId = &v + return s +} + +// Describes the permissions for a connection alias. Connection aliases are +// used for cross-Region redirection. For more information, see Cross-Region +// Redirection for Amazon WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/cross-region-redirection.html). +type ConnectionAliasPermission struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified AWS account is allowed to associate the connection + // alias with a directory. + // + // AllowAssociation is a required field + AllowAssociation *bool `type:"boolean" required:"true"` + + // The identifier of the AWS account that the connection alias is shared with. + // + // SharedAccountId is a required field + SharedAccountId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ConnectionAliasPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConnectionAliasPermission) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConnectionAliasPermission) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConnectionAliasPermission"} + if s.AllowAssociation == nil { + invalidParams.Add(request.NewErrParamRequired("AllowAssociation")) + } + if s.SharedAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("SharedAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowAssociation sets the AllowAssociation field's value. +func (s *ConnectionAliasPermission) SetAllowAssociation(v bool) *ConnectionAliasPermission { + s.AllowAssociation = &v + return s +} + +// SetSharedAccountId sets the SharedAccountId field's value. +func (s *ConnectionAliasPermission) SetSharedAccountId(v string) *ConnectionAliasPermission { + s.SharedAccountId = &v + return s +} + type CopyWorkspaceImageInput struct { _ struct{} `type:"structure"` @@ -4266,6 +5429,95 @@ func (s *CopyWorkspaceImageOutput) SetImageId(v string) *CopyWorkspaceImageOutpu return s } +type CreateConnectionAliasInput struct { + _ struct{} `type:"structure"` + + // A connection string in the form of a fully qualified domain name (FQDN), + // such as www.example.com. + // + // After you create a connection string, it is always associated to your AWS + // account. You cannot recreate the same connection string with a different + // account, even if you delete all instances of it from the original account. + // The connection string is globally reserved for your account. + // + // ConnectionString is a required field + ConnectionString *string `min:"1" type:"string" required:"true"` + + // The tags to associate with the connection alias. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateConnectionAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateConnectionAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateConnectionAliasInput"} + if s.ConnectionString == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionString")) + } + if s.ConnectionString != nil && len(*s.ConnectionString) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ConnectionString", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConnectionString sets the ConnectionString field's value. +func (s *CreateConnectionAliasInput) SetConnectionString(v string) *CreateConnectionAliasInput { + s.ConnectionString = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateConnectionAliasInput) SetTags(v []*Tag) *CreateConnectionAliasInput { + s.Tags = v + return s +} + +type CreateConnectionAliasOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias. + AliasId *string `min:"13" type:"string"` +} + +// String returns the string representation +func (s CreateConnectionAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateConnectionAliasOutput) GoString() string { + return s.String() +} + +// SetAliasId sets the AliasId field's value. +func (s *CreateConnectionAliasOutput) SetAliasId(v string) *CreateConnectionAliasOutput { + s.AliasId = &v + return s +} + type CreateIpGroupInput struct { _ struct{} `type:"structure"` @@ -4368,15 +5620,13 @@ type CreateTagsInput struct { _ struct{} `type:"structure"` // The identifier of the WorkSpaces resource. The supported resource types are - // WorkSpaces, registered directories, images, custom bundles, and IP access - // control groups. + // WorkSpaces, registered directories, images, custom bundles, IP access control + // groups, and connection aliases. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` - // The tags. Each WorkSpaces resource can have a maximum of 50 tags. If you - // want to add new tags to a set of existing tags, you must submit all of the - // existing tags along with the new ones. + // The tags. Each WorkSpaces resource can have a maximum of 50 tags. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` @@ -4539,8 +5789,9 @@ func (s *CreateWorkspacesOutput) SetPendingRequests(v []*Workspace) *CreateWorks type DefaultWorkspaceCreationProperties struct { _ struct{} `type:"structure"` - // The identifier of any security groups to apply to WorkSpaces when they are - // created. + // The identifier of the default security group to apply to WorkSpaces when + // they are created. For more information, see Security Groups for Your WorkSpaces + // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-security-groups.html). CustomSecurityGroupId *string `min:"11" type:"string"` // The organizational unit (OU) in the directory for the WorkSpace machine accounts. @@ -4615,6 +5866,61 @@ func (s *DefaultWorkspaceCreationProperties) SetUserEnabledAsLocalAdministrator( return s } +type DeleteConnectionAliasInput struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias to delete. + // + // AliasId is a required field + AliasId *string `min:"13" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteConnectionAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteConnectionAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteConnectionAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.AliasId != nil && len(*s.AliasId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("AliasId", 13)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DeleteConnectionAliasInput) SetAliasId(v string) *DeleteConnectionAliasInput { + s.AliasId = &v + return s +} + +type DeleteConnectionAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteConnectionAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteConnectionAliasOutput) GoString() string { + return s.String() +} + type DeleteIpGroupInput struct { _ struct{} `type:"structure"` @@ -4671,8 +5977,8 @@ type DeleteTagsInput struct { _ struct{} `type:"structure"` // The identifier of the WorkSpaces resource. The supported resource types are - // WorkSpaces, registered directories, images, custom bundles, and IP access - // control groups. + // WorkSpaces, registered directories, images, custom bundles, IP access control + // groups, and connection aliases. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -5033,10 +6339,224 @@ func (s *DescribeClientPropertiesOutput) SetClientPropertiesList(v []*ClientProp return s } -type DescribeIpGroupsInput struct { +type DescribeConnectionAliasPermissionsInput struct { _ struct{} `type:"structure"` - // The identifiers of one or more IP access control groups. + // The identifier of the connection alias. + // + // AliasId is a required field + AliasId *string `min:"13" type:"string" required:"true"` + + // The maximum number of results to return. + MaxResults *int64 `min:"1" type:"integer"` + + // If you received a NextToken from a previous call that was paginated, provide + // this token to receive the next set of results. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionAliasPermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionAliasPermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionAliasPermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionAliasPermissionsInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.AliasId != nil && len(*s.AliasId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("AliasId", 13)) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DescribeConnectionAliasPermissionsInput) SetAliasId(v string) *DescribeConnectionAliasPermissionsInput { + s.AliasId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeConnectionAliasPermissionsInput) SetMaxResults(v int64) *DescribeConnectionAliasPermissionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeConnectionAliasPermissionsInput) SetNextToken(v string) *DescribeConnectionAliasPermissionsInput { + s.NextToken = &v + return s +} + +type DescribeConnectionAliasPermissionsOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias. + AliasId *string `min:"13" type:"string"` + + // The permissions associated with a connection alias. + ConnectionAliasPermissions []*ConnectionAliasPermission `min:"1" type:"list"` + + // The token to use to retrieve the next set of results, or null if no more + // results are available. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionAliasPermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionAliasPermissionsOutput) GoString() string { + return s.String() +} + +// SetAliasId sets the AliasId field's value. +func (s *DescribeConnectionAliasPermissionsOutput) SetAliasId(v string) *DescribeConnectionAliasPermissionsOutput { + s.AliasId = &v + return s +} + +// SetConnectionAliasPermissions sets the ConnectionAliasPermissions field's value. +func (s *DescribeConnectionAliasPermissionsOutput) SetConnectionAliasPermissions(v []*ConnectionAliasPermission) *DescribeConnectionAliasPermissionsOutput { + s.ConnectionAliasPermissions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeConnectionAliasPermissionsOutput) SetNextToken(v string) *DescribeConnectionAliasPermissionsOutput { + s.NextToken = &v + return s +} + +type DescribeConnectionAliasesInput struct { + _ struct{} `type:"structure"` + + // The identifiers of the connection aliases to describe. + AliasIds []*string `min:"1" type:"list"` + + // The maximum number of connection aliases to return. + Limit *int64 `min:"1" type:"integer"` + + // If you received a NextToken from a previous call that was paginated, provide + // this token to receive the next set of results. + NextToken *string `min:"1" type:"string"` + + // The identifier of the directory associated with the connection alias. + ResourceId *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeConnectionAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeConnectionAliasesInput"} + if s.AliasIds != nil && len(s.AliasIds) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AliasIds", 1)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasIds sets the AliasIds field's value. +func (s *DescribeConnectionAliasesInput) SetAliasIds(v []*string) *DescribeConnectionAliasesInput { + s.AliasIds = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *DescribeConnectionAliasesInput) SetLimit(v int64) *DescribeConnectionAliasesInput { + s.Limit = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeConnectionAliasesInput) SetNextToken(v string) *DescribeConnectionAliasesInput { + s.NextToken = &v + return s +} + +// SetResourceId sets the ResourceId field's value. +func (s *DescribeConnectionAliasesInput) SetResourceId(v string) *DescribeConnectionAliasesInput { + s.ResourceId = &v + return s +} + +type DescribeConnectionAliasesOutput struct { + _ struct{} `type:"structure"` + + // Information about the specified connection aliases. + ConnectionAliases []*ConnectionAlias `min:"1" type:"list"` + + // The token to use to retrieve the next set of results, or null if no more + // results are available. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeConnectionAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeConnectionAliasesOutput) GoString() string { + return s.String() +} + +// SetConnectionAliases sets the ConnectionAliases field's value. +func (s *DescribeConnectionAliasesOutput) SetConnectionAliases(v []*ConnectionAlias) *DescribeConnectionAliasesOutput { + s.ConnectionAliases = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeConnectionAliasesOutput) SetNextToken(v string) *DescribeConnectionAliasesOutput { + s.NextToken = &v + return s +} + +type DescribeIpGroupsInput struct { + _ struct{} `type:"structure"` + + // The identifiers of one or more IP access control groups. GroupIds []*string `type:"list"` // The maximum number of items to return. @@ -5128,8 +6648,8 @@ type DescribeTagsInput struct { _ struct{} `type:"structure"` // The identifier of the WorkSpaces resource. The supported resource types are - // WorkSpaces, registered directories, images, custom bundles, and IP access - // control groups. + // WorkSpaces, registered directories, images, custom bundles, IP access control + // groups, and connection aliases. // // ResourceId is a required field ResourceId *string `min:"1" type:"string" required:"true"` @@ -5382,12 +6902,120 @@ func (s *DescribeWorkspaceDirectoriesOutput) SetNextToken(v string) *DescribeWor return s } +type DescribeWorkspaceImagePermissionsInput struct { + _ struct{} `type:"structure"` + + // The identifier of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The maximum number of items to return. + MaxResults *int64 `min:"1" type:"integer"` + + // If you received a NextToken from a previous call that was paginated, provide + // this token to receive the next set of results. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceImagePermissionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceImagePermissionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeWorkspaceImagePermissionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeWorkspaceImagePermissionsInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeWorkspaceImagePermissionsInput) SetImageId(v string) *DescribeWorkspaceImagePermissionsInput { + s.ImageId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeWorkspaceImagePermissionsInput) SetMaxResults(v int64) *DescribeWorkspaceImagePermissionsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspaceImagePermissionsInput) SetNextToken(v string) *DescribeWorkspaceImagePermissionsInput { + s.NextToken = &v + return s +} + +type DescribeWorkspaceImagePermissionsOutput struct { + _ struct{} `type:"structure"` + + // The identifier of the image. + ImageId *string `type:"string"` + + // The identifiers of the AWS accounts that the image has been shared with. + ImagePermissions []*ImagePermission `type:"list"` + + // The token to use to retrieve the next set of results, or null if no more + // results are available. + NextToken *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DescribeWorkspaceImagePermissionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspaceImagePermissionsOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeWorkspaceImagePermissionsOutput) SetImageId(v string) *DescribeWorkspaceImagePermissionsOutput { + s.ImageId = &v + return s +} + +// SetImagePermissions sets the ImagePermissions field's value. +func (s *DescribeWorkspaceImagePermissionsOutput) SetImagePermissions(v []*ImagePermission) *DescribeWorkspaceImagePermissionsOutput { + s.ImagePermissions = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspaceImagePermissionsOutput) SetNextToken(v string) *DescribeWorkspaceImagePermissionsOutput { + s.NextToken = &v + return s +} + type DescribeWorkspaceImagesInput struct { _ struct{} `type:"structure"` // The identifier of the image. ImageIds []*string `min:"1" type:"list"` + // The type (owned or shared) of the image. + ImageType *string `type:"string" enum:"ImageType"` + // The maximum number of items to return. MaxResults *int64 `min:"1" type:"integer"` @@ -5431,6 +7059,12 @@ func (s *DescribeWorkspaceImagesInput) SetImageIds(v []*string) *DescribeWorkspa return s } +// SetImageType sets the ImageType field's value. +func (s *DescribeWorkspaceImagesInput) SetImageType(v string) *DescribeWorkspaceImagesInput { + s.ImageType = &v + return s +} + // SetMaxResults sets the MaxResults field's value. func (s *DescribeWorkspaceImagesInput) SetMaxResults(v int64) *DescribeWorkspaceImagesInput { s.MaxResults = &v @@ -5739,35 +7373,90 @@ type DescribeWorkspacesOutput struct { // results are available. NextToken *string `min:"1" type:"string"` - // Information about the WorkSpaces. - // - // Because CreateWorkspaces is an asynchronous operation, some of the returned - // information could be incomplete. - Workspaces []*Workspace `type:"list"` + // Information about the WorkSpaces. + // + // Because CreateWorkspaces is an asynchronous operation, some of the returned + // information could be incomplete. + Workspaces []*Workspace `type:"list"` +} + +// String returns the string representation +func (s DescribeWorkspacesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeWorkspacesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeWorkspacesOutput) SetNextToken(v string) *DescribeWorkspacesOutput { + s.NextToken = &v + return s +} + +// SetWorkspaces sets the Workspaces field's value. +func (s *DescribeWorkspacesOutput) SetWorkspaces(v []*Workspace) *DescribeWorkspacesOutput { + s.Workspaces = v + return s +} + +type DisassociateConnectionAliasInput struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias to disassociate. + // + // AliasId is a required field + AliasId *string `min:"13" type:"string" required:"true"` +} + +// String returns the string representation +func (s DisassociateConnectionAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisassociateConnectionAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DisassociateConnectionAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisassociateConnectionAliasInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.AliasId != nil && len(*s.AliasId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("AliasId", 13)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *DisassociateConnectionAliasInput) SetAliasId(v string) *DisassociateConnectionAliasInput { + s.AliasId = &v + return s +} + +type DisassociateConnectionAliasOutput struct { + _ struct{} `type:"structure"` } // String returns the string representation -func (s DescribeWorkspacesOutput) String() string { +func (s DisassociateConnectionAliasOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeWorkspacesOutput) GoString() string { +func (s DisassociateConnectionAliasOutput) GoString() string { return s.String() } -// SetNextToken sets the NextToken field's value. -func (s *DescribeWorkspacesOutput) SetNextToken(v string) *DescribeWorkspacesOutput { - s.NextToken = &v - return s -} - -// SetWorkspaces sets the Workspaces field's value. -func (s *DescribeWorkspacesOutput) SetWorkspaces(v []*Workspace) *DescribeWorkspacesOutput { - s.Workspaces = v - return s -} - type DisassociateIpGroupsInput struct { _ struct{} `type:"structure"` @@ -5925,9 +7614,42 @@ func (s *FailedWorkspaceChangeRequest) SetWorkspaceId(v string) *FailedWorkspace return s } +// Describes the AWS accounts that have been granted permission to use a shared +// image. For more information about sharing images, see Share or Unshare a +// Custom WorkSpaces Image (https://docs.aws.amazon.com/workspaces/latest/adminguide/share-custom-image.html). +type ImagePermission struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that an image has been shared with. + SharedAccountId *string `type:"string"` +} + +// String returns the string representation +func (s ImagePermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImagePermission) GoString() string { + return s.String() +} + +// SetSharedAccountId sets the SharedAccountId field's value. +func (s *ImagePermission) SetSharedAccountId(v string) *ImagePermission { + s.SharedAccountId = &v + return s +} + type ImportWorkspaceImageInput struct { _ struct{} `type:"structure"` + // If specified, the version of Microsoft Office to subscribe to. Valid only + // for Windows 10 BYOL images. For more information about subscribing to Office + // for BYOL images, see Bring Your Own Windows Desktop Licenses (https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). + // + // Although this parameter is an array, only one item is allowed at this time. + Applications []*string `min:"1" type:"list"` + // The identifier of the EC2 image. // // Ec2ImageId is a required field @@ -5943,7 +7665,8 @@ type ImportWorkspaceImageInput struct { // ImageName is a required field ImageName *string `min:"1" type:"string" required:"true"` - // The ingestion process to be used when importing the image. + // The ingestion process to be used when importing the image. For non-GPU-enabled + // bundles (bundles other than Graphics or GraphicsPro), specify BYOL_REGULAR. // // IngestionProcess is a required field IngestionProcess *string `type:"string" required:"true" enum:"WorkspaceImageIngestionProcess"` @@ -5965,6 +7688,9 @@ func (s ImportWorkspaceImageInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ImportWorkspaceImageInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ImportWorkspaceImageInput"} + if s.Applications != nil && len(s.Applications) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Applications", 1)) + } if s.Ec2ImageId == nil { invalidParams.Add(request.NewErrParamRequired("Ec2ImageId")) } @@ -6000,6 +7726,12 @@ func (s *ImportWorkspaceImageInput) Validate() error { return nil } +// SetApplications sets the Applications field's value. +func (s *ImportWorkspaceImageInput) SetApplications(v []*string) *ImportWorkspaceImageInput { + s.Applications = v + return s +} + // SetEc2ImageId sets the Ec2ImageId field's value. func (s *ImportWorkspaceImageInput) SetEc2ImageId(v string) *ImportWorkspaceImageInput { s.Ec2ImageId = &v @@ -6055,8 +7787,8 @@ func (s *ImportWorkspaceImageOutput) SetImageId(v string) *ImportWorkspaceImageO // One or more parameter values are not valid. type InvalidParameterValuesException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception error message. Message_ *string `locationName:"message" type:"string"` @@ -6074,17 +7806,17 @@ func (s InvalidParameterValuesException) GoString() string { func newErrorInvalidParameterValuesException(v protocol.ResponseMetadata) error { return &InvalidParameterValuesException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidParameterValuesException) Code() string { +func (s *InvalidParameterValuesException) Code() string { return "InvalidParameterValuesException" } // Message returns the exception's message. -func (s InvalidParameterValuesException) Message() string { +func (s *InvalidParameterValuesException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6092,28 +7824,28 @@ func (s InvalidParameterValuesException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidParameterValuesException) OrigErr() error { +func (s *InvalidParameterValuesException) OrigErr() error { return nil } -func (s InvalidParameterValuesException) Error() string { +func (s *InvalidParameterValuesException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidParameterValuesException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidParameterValuesException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidParameterValuesException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidParameterValuesException) RequestID() string { + return s.RespMetadata.RequestID } // The state of the resource is not valid for this operation. type InvalidResourceStateException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6130,17 +7862,17 @@ func (s InvalidResourceStateException) GoString() string { func newErrorInvalidResourceStateException(v protocol.ResponseMetadata) error { return &InvalidResourceStateException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidResourceStateException) Code() string { +func (s *InvalidResourceStateException) Code() string { return "InvalidResourceStateException" } // Message returns the exception's message. -func (s InvalidResourceStateException) Message() string { +func (s *InvalidResourceStateException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6148,22 +7880,22 @@ func (s InvalidResourceStateException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidResourceStateException) OrigErr() error { +func (s *InvalidResourceStateException) OrigErr() error { return nil } -func (s InvalidResourceStateException) Error() string { +func (s *InvalidResourceStateException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidResourceStateException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidResourceStateException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidResourceStateException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidResourceStateException) RequestID() string { + return s.RespMetadata.RequestID } // Describes an IP access control group. @@ -6958,8 +8690,8 @@ func (s *OperatingSystem) SetType(v string) *OperatingSystem { // The properties of this WorkSpace are currently being modified. Try again // in a moment. type OperationInProgressException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -6976,17 +8708,17 @@ func (s OperationInProgressException) GoString() string { func newErrorOperationInProgressException(v protocol.ResponseMetadata) error { return &OperationInProgressException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationInProgressException) Code() string { +func (s *OperationInProgressException) Code() string { return "OperationInProgressException" } // Message returns the exception's message. -func (s OperationInProgressException) Message() string { +func (s *OperationInProgressException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -6994,28 +8726,28 @@ func (s OperationInProgressException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationInProgressException) OrigErr() error { +func (s *OperationInProgressException) OrigErr() error { return nil } -func (s OperationInProgressException) Error() string { +func (s *OperationInProgressException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationInProgressException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationInProgressException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationInProgressException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationInProgressException) RequestID() string { + return s.RespMetadata.RequestID } // This operation is not supported. type OperationNotSupportedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7032,17 +8764,17 @@ func (s OperationNotSupportedException) GoString() string { func newErrorOperationNotSupportedException(v protocol.ResponseMetadata) error { return &OperationNotSupportedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s OperationNotSupportedException) Code() string { +func (s *OperationNotSupportedException) Code() string { return "OperationNotSupportedException" } // Message returns the exception's message. -func (s OperationNotSupportedException) Message() string { +func (s *OperationNotSupportedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7050,22 +8782,22 @@ func (s OperationNotSupportedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s OperationNotSupportedException) OrigErr() error { +func (s *OperationNotSupportedException) OrigErr() error { return nil } -func (s OperationNotSupportedException) Error() string { +func (s *OperationNotSupportedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s OperationNotSupportedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *OperationNotSupportedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s OperationNotSupportedException) RequestID() string { - return s.respMetadata.RequestID +func (s *OperationNotSupportedException) RequestID() string { + return s.RespMetadata.RequestID } // Describes the information used to reboot a WorkSpace. @@ -7427,8 +9159,8 @@ func (s RegisterWorkspaceDirectoryOutput) GoString() string { // The specified resource already exists. type ResourceAlreadyExistsException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7445,17 +9177,17 @@ func (s ResourceAlreadyExistsException) GoString() string { func newErrorResourceAlreadyExistsException(v protocol.ResponseMetadata) error { return &ResourceAlreadyExistsException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAlreadyExistsException) Code() string { +func (s *ResourceAlreadyExistsException) Code() string { return "ResourceAlreadyExistsException" } // Message returns the exception's message. -func (s ResourceAlreadyExistsException) Message() string { +func (s *ResourceAlreadyExistsException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7463,28 +9195,28 @@ func (s ResourceAlreadyExistsException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAlreadyExistsException) OrigErr() error { +func (s *ResourceAlreadyExistsException) OrigErr() error { return nil } -func (s ResourceAlreadyExistsException) Error() string { +func (s *ResourceAlreadyExistsException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAlreadyExistsException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAlreadyExistsException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAlreadyExistsException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAlreadyExistsException) RequestID() string { + return s.RespMetadata.RequestID } // The resource is associated with a directory. type ResourceAssociatedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7501,17 +9233,17 @@ func (s ResourceAssociatedException) GoString() string { func newErrorResourceAssociatedException(v protocol.ResponseMetadata) error { return &ResourceAssociatedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceAssociatedException) Code() string { +func (s *ResourceAssociatedException) Code() string { return "ResourceAssociatedException" } // Message returns the exception's message. -func (s ResourceAssociatedException) Message() string { +func (s *ResourceAssociatedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7519,28 +9251,28 @@ func (s ResourceAssociatedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceAssociatedException) OrigErr() error { +func (s *ResourceAssociatedException) OrigErr() error { return nil } -func (s ResourceAssociatedException) Error() string { +func (s *ResourceAssociatedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceAssociatedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceAssociatedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceAssociatedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceAssociatedException) RequestID() string { + return s.RespMetadata.RequestID } // The resource could not be created. type ResourceCreationFailedException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -7557,17 +9289,17 @@ func (s ResourceCreationFailedException) GoString() string { func newErrorResourceCreationFailedException(v protocol.ResponseMetadata) error { return &ResourceCreationFailedException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceCreationFailedException) Code() string { +func (s *ResourceCreationFailedException) Code() string { return "ResourceCreationFailedException" } // Message returns the exception's message. -func (s ResourceCreationFailedException) Message() string { +func (s *ResourceCreationFailedException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7575,28 +9307,28 @@ func (s ResourceCreationFailedException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceCreationFailedException) OrigErr() error { +func (s *ResourceCreationFailedException) OrigErr() error { return nil } -func (s ResourceCreationFailedException) Error() string { +func (s *ResourceCreationFailedException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceCreationFailedException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceCreationFailedException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceCreationFailedException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceCreationFailedException) RequestID() string { + return s.RespMetadata.RequestID } // Your resource limits have been exceeded. type ResourceLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception error message. Message_ *string `locationName:"message" type:"string"` @@ -7614,17 +9346,17 @@ func (s ResourceLimitExceededException) GoString() string { func newErrorResourceLimitExceededException(v protocol.ResponseMetadata) error { return &ResourceLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceLimitExceededException) Code() string { +func (s *ResourceLimitExceededException) Code() string { return "ResourceLimitExceededException" } // Message returns the exception's message. -func (s ResourceLimitExceededException) Message() string { +func (s *ResourceLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7632,28 +9364,28 @@ func (s ResourceLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceLimitExceededException) OrigErr() error { +func (s *ResourceLimitExceededException) OrigErr() error { return nil } -func (s ResourceLimitExceededException) Error() string { +func (s *ResourceLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // The resource could not be found. type ResourceNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The resource could not be found. Message_ *string `locationName:"message" type:"string"` @@ -7674,17 +9406,17 @@ func (s ResourceNotFoundException) GoString() string { func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { return &ResourceNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceNotFoundException) Code() string { +func (s *ResourceNotFoundException) Code() string { return "ResourceNotFoundException" } // Message returns the exception's message. -func (s ResourceNotFoundException) Message() string { +func (s *ResourceNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7692,28 +9424,28 @@ func (s ResourceNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceNotFoundException) OrigErr() error { +func (s *ResourceNotFoundException) OrigErr() error { return nil } -func (s ResourceNotFoundException) Error() string { +func (s *ResourceNotFoundException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } // The specified resource is not available. type ResourceUnavailableException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` // The exception error message. Message_ *string `locationName:"message" type:"string"` @@ -7734,17 +9466,17 @@ func (s ResourceUnavailableException) GoString() string { func newErrorResourceUnavailableException(v protocol.ResponseMetadata) error { return &ResourceUnavailableException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ResourceUnavailableException) Code() string { +func (s *ResourceUnavailableException) Code() string { return "ResourceUnavailableException" } // Message returns the exception's message. -func (s ResourceUnavailableException) Message() string { +func (s *ResourceUnavailableException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -7752,22 +9484,22 @@ func (s ResourceUnavailableException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ResourceUnavailableException) OrigErr() error { +func (s *ResourceUnavailableException) OrigErr() error { return nil } -func (s ResourceUnavailableException) Error() string { +func (s *ResourceUnavailableException) Error() string { return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) } // Status code returns the HTTP status code for the request's response error. -func (s ResourceUnavailableException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ResourceUnavailableException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ResourceUnavailableException) RequestID() string { - return s.respMetadata.RequestID +func (s *ResourceUnavailableException) RequestID() string { + return s.RespMetadata.RequestID } type RestoreWorkspaceInput struct { @@ -8345,8 +10077,8 @@ func (s *TerminateWorkspacesOutput) SetFailedRequests(v []*FailedWorkspaceChange // network IP range. For more information, see Configure a VPC for Amazon WorkSpaces // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-vpc.html). type UnsupportedNetworkConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8363,17 +10095,17 @@ func (s UnsupportedNetworkConfigurationException) GoString() string { func newErrorUnsupportedNetworkConfigurationException(v protocol.ResponseMetadata) error { return &UnsupportedNetworkConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedNetworkConfigurationException) Code() string { +func (s *UnsupportedNetworkConfigurationException) Code() string { return "UnsupportedNetworkConfigurationException" } // Message returns the exception's message. -func (s UnsupportedNetworkConfigurationException) Message() string { +func (s *UnsupportedNetworkConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8381,30 +10113,30 @@ func (s UnsupportedNetworkConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedNetworkConfigurationException) OrigErr() error { +func (s *UnsupportedNetworkConfigurationException) OrigErr() error { return nil } -func (s UnsupportedNetworkConfigurationException) Error() string { +func (s *UnsupportedNetworkConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedNetworkConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedNetworkConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedNetworkConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedNetworkConfigurationException) RequestID() string { + return s.RespMetadata.RequestID } // The configuration of this WorkSpace is not supported for this operation. // For more information, see Required Configuration and Service Components for // WorkSpaces (https://docs.aws.amazon.com/workspaces/latest/adminguide/required-service-components.html). type UnsupportedWorkspaceConfigurationException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -8421,17 +10153,17 @@ func (s UnsupportedWorkspaceConfigurationException) GoString() string { func newErrorUnsupportedWorkspaceConfigurationException(v protocol.ResponseMetadata) error { return &UnsupportedWorkspaceConfigurationException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s UnsupportedWorkspaceConfigurationException) Code() string { +func (s *UnsupportedWorkspaceConfigurationException) Code() string { return "UnsupportedWorkspaceConfigurationException" } // Message returns the exception's message. -func (s UnsupportedWorkspaceConfigurationException) Message() string { +func (s *UnsupportedWorkspaceConfigurationException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -8439,22 +10171,98 @@ func (s UnsupportedWorkspaceConfigurationException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s UnsupportedWorkspaceConfigurationException) OrigErr() error { +func (s *UnsupportedWorkspaceConfigurationException) OrigErr() error { return nil } -func (s UnsupportedWorkspaceConfigurationException) Error() string { +func (s *UnsupportedWorkspaceConfigurationException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s UnsupportedWorkspaceConfigurationException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *UnsupportedWorkspaceConfigurationException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s UnsupportedWorkspaceConfigurationException) RequestID() string { - return s.respMetadata.RequestID +func (s *UnsupportedWorkspaceConfigurationException) RequestID() string { + return s.RespMetadata.RequestID +} + +type UpdateConnectionAliasPermissionInput struct { + _ struct{} `type:"structure"` + + // The identifier of the connection alias that you want to update permissions + // for. + // + // AliasId is a required field + AliasId *string `min:"13" type:"string" required:"true"` + + // Indicates whether to share or unshare the connection alias with the specified + // AWS account. + // + // ConnectionAliasPermission is a required field + ConnectionAliasPermission *ConnectionAliasPermission `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateConnectionAliasPermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConnectionAliasPermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateConnectionAliasPermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateConnectionAliasPermissionInput"} + if s.AliasId == nil { + invalidParams.Add(request.NewErrParamRequired("AliasId")) + } + if s.AliasId != nil && len(*s.AliasId) < 13 { + invalidParams.Add(request.NewErrParamMinLen("AliasId", 13)) + } + if s.ConnectionAliasPermission == nil { + invalidParams.Add(request.NewErrParamRequired("ConnectionAliasPermission")) + } + if s.ConnectionAliasPermission != nil { + if err := s.ConnectionAliasPermission.Validate(); err != nil { + invalidParams.AddNested("ConnectionAliasPermission", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAliasId sets the AliasId field's value. +func (s *UpdateConnectionAliasPermissionInput) SetAliasId(v string) *UpdateConnectionAliasPermissionInput { + s.AliasId = &v + return s +} + +// SetConnectionAliasPermission sets the ConnectionAliasPermission field's value. +func (s *UpdateConnectionAliasPermissionInput) SetConnectionAliasPermission(v *ConnectionAliasPermission) *UpdateConnectionAliasPermissionInput { + s.ConnectionAliasPermission = v + return s +} + +type UpdateConnectionAliasPermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateConnectionAliasPermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateConnectionAliasPermissionOutput) GoString() string { + return s.String() } type UpdateRulesOfIpGroupInput struct { @@ -8523,6 +10331,90 @@ func (s UpdateRulesOfIpGroupOutput) GoString() string { return s.String() } +type UpdateWorkspaceImagePermissionInput struct { + _ struct{} `type:"structure"` + + // The permission to copy the image. This permission can be revoked only after + // an image has been shared. + // + // AllowCopyImage is a required field + AllowCopyImage *bool `type:"boolean" required:"true"` + + // The identifier of the image. + // + // ImageId is a required field + ImageId *string `type:"string" required:"true"` + + // The identifier of the AWS account to share or unshare the image with. + // + // Before sharing the image, confirm that you are sharing to the correct AWS + // account ID. + // + // SharedAccountId is a required field + SharedAccountId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateWorkspaceImagePermissionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWorkspaceImagePermissionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateWorkspaceImagePermissionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateWorkspaceImagePermissionInput"} + if s.AllowCopyImage == nil { + invalidParams.Add(request.NewErrParamRequired("AllowCopyImage")) + } + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.SharedAccountId == nil { + invalidParams.Add(request.NewErrParamRequired("SharedAccountId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowCopyImage sets the AllowCopyImage field's value. +func (s *UpdateWorkspaceImagePermissionInput) SetAllowCopyImage(v bool) *UpdateWorkspaceImagePermissionInput { + s.AllowCopyImage = &v + return s +} + +// SetImageId sets the ImageId field's value. +func (s *UpdateWorkspaceImagePermissionInput) SetImageId(v string) *UpdateWorkspaceImagePermissionInput { + s.ImageId = &v + return s +} + +// SetSharedAccountId sets the SharedAccountId field's value. +func (s *UpdateWorkspaceImagePermissionInput) SetSharedAccountId(v string) *UpdateWorkspaceImagePermissionInput { + s.SharedAccountId = &v + return s +} + +type UpdateWorkspaceImagePermissionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateWorkspaceImagePermissionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateWorkspaceImagePermissionOutput) GoString() string { + return s.String() +} + // Describes the user storage for a WorkSpace bundle. type UserStorage struct { _ struct{} `type:"structure"` @@ -8554,7 +10446,8 @@ type Workspace struct { // The identifier of the bundle used to create the WorkSpace. BundleId *string `type:"string"` - // The name of the WorkSpace, as seen by the operating system. + // The name of the WorkSpace, as seen by the operating system. The format of + // this name varies. For more information, see Launch a WorkSpace (https://docs.aws.amazon.com/workspaces/latest/adminguide/launch-workspaces-tutorials.html). ComputerName *string `type:"string"` // The identifier of the AWS Directory Service directory for the WorkSpace. @@ -8577,6 +10470,13 @@ type Workspace struct { RootVolumeEncryptionEnabled *bool `type:"boolean"` // The operational state of the WorkSpace. + // + // After a WorkSpace is terminated, the TERMINATED state is returned only briefly + // before the WorkSpace directory metadata is cleaned up, so this state is rarely + // returned. To confirm that a WorkSpace is terminated, check for the WorkSpace + // ID by using DescribeWorkSpaces (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaces.html). + // If the WorkSpace ID isn't returned, then the WorkSpace has been successfully + // terminated. State *string `type:"string" enum:"WorkspaceState"` // The identifier of the subnet for the WorkSpace. @@ -8941,7 +10841,17 @@ type WorkspaceCreationProperties struct { // The identifier of your custom security group. CustomSecurityGroupId *string `min:"11" type:"string"` - // The default organizational unit (OU) for your WorkSpace directories. + // The default organizational unit (OU) for your WorkSpaces directories. This + // string must be the full Lightweight Directory Access Protocol (LDAP) distinguished + // name for the target domain and OU. It must be in the form "OU=value,DC=value,DC=value", + // where value is any string of characters, and the number of domain components + // (DCs) is two or more. For example, OU=WorkSpaces_machines,DC=machines,DC=example,DC=com. + // + // * To avoid errors, certain characters in the distinguished name must be + // escaped. For more information, see Distinguished Names (https://docs.microsoft.com/previous-versions/windows/desktop/ldap/distinguished-names) + // in the Microsoft documentation. + // + // * The API doesn't validate whether the OU exists. DefaultOu *string `type:"string"` // Indicates whether internet access is enabled for your WorkSpaces. @@ -8951,6 +10861,22 @@ type WorkspaceCreationProperties struct { // information, see WorkSpace Maintenance (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspace-maintenance.html). EnableMaintenanceMode *bool `type:"boolean"` + // Indicates whether Amazon WorkDocs is enabled for your WorkSpaces. + // + // If WorkDocs is already enabled for a WorkSpaces directory and you disable + // it, new WorkSpaces launched in the directory will not have WorkDocs enabled. + // However, WorkDocs remains enabled for any existing WorkSpaces, unless you + // either disable users' access to WorkDocs or you delete the WorkDocs site. + // To disable users' access to WorkDocs, see Disabling Users (https://docs.aws.amazon.com/workdocs/latest/adminguide/inactive-user.html) + // in the Amazon WorkDocs Administration Guide. To delete a WorkDocs site, see + // Deleting a Site (https://docs.aws.amazon.com/workdocs/latest/adminguide/manage-sites.html) + // in the Amazon WorkDocs Administration Guide. + // + // If you enable WorkDocs on a directory that already has existing WorkSpaces, + // the existing WorkSpaces and any new WorkSpaces that are launched in the directory + // will have WorkDocs enabled. + EnableWorkDocs *bool `type:"boolean"` + // Indicates whether users are local administrators of their WorkSpaces. UserEnabledAsLocalAdministrator *bool `type:"boolean"` } @@ -9002,6 +10928,12 @@ func (s *WorkspaceCreationProperties) SetEnableMaintenanceMode(v bool) *Workspac return s } +// SetEnableWorkDocs sets the EnableWorkDocs field's value. +func (s *WorkspaceCreationProperties) SetEnableWorkDocs(v bool) *WorkspaceCreationProperties { + s.EnableWorkDocs = &v + return s +} + // SetUserEnabledAsLocalAdministrator sets the UserEnabledAsLocalAdministrator field's value. func (s *WorkspaceCreationProperties) SetUserEnabledAsLocalAdministrator(v bool) *WorkspaceCreationProperties { s.UserEnabledAsLocalAdministrator = &v @@ -9044,7 +10976,13 @@ type WorkspaceDirectory struct { // The default self-service permissions for WorkSpaces in the directory. SelfservicePermissions *SelfservicePermissions `type:"structure"` - // The state of the directory's registration with Amazon WorkSpaces. + // The state of the directory's registration with Amazon WorkSpaces. After a + // directory is deregistered, the DEREGISTERED state is returned very briefly + // before the directory metadata is cleaned up, so this state is rarely returned. + // To confirm that a directory is deregistered, check for the directory ID by + // using DescribeWorkspaceDirectories (https://docs.aws.amazon.com/workspaces/latest/api/API_DescribeWorkspaceDirectories.html). + // If the directory ID isn't returned, then the directory has been successfully + // deregistered. State *string `type:"string" enum:"WorkspaceDirectoryState"` // The identifiers of the subnets used with the directory. @@ -9175,6 +11113,11 @@ func (s *WorkspaceDirectory) SetWorkspaceSecurityGroupId(v string) *WorkspaceDir type WorkspaceImage struct { _ struct{} `type:"structure"` + // The date when the image was created. If the image has been shared, the AWS + // account that the image has been shared with sees the original creation date + // of the image. + Created *time.Time `type:"timestamp"` + // The description of the image. Description *string `min:"1" type:"string"` @@ -9193,6 +11136,9 @@ type WorkspaceImage struct { // The operating system that the image is running. OperatingSystem *OperatingSystem `type:"structure"` + // The identifier of the AWS account that owns the image. + OwnerAccountId *string `type:"string"` + // Specifies whether the image is running on dedicated hardware. When Bring // Your Own License (BYOL) is enabled, this value is set to DEDICATED. For more // information, see Bring Your Own Windows Desktop Images (https://docs.aws.amazon.com/workspaces/latest/adminguide/byol-windows-images.html). @@ -9212,6 +11158,12 @@ func (s WorkspaceImage) GoString() string { return s.String() } +// SetCreated sets the Created field's value. +func (s *WorkspaceImage) SetCreated(v time.Time) *WorkspaceImage { + s.Created = &v + return s +} + // SetDescription sets the Description field's value. func (s *WorkspaceImage) SetDescription(v string) *WorkspaceImage { s.Description = &v @@ -9248,6 +11200,12 @@ func (s *WorkspaceImage) SetOperatingSystem(v *OperatingSystem) *WorkspaceImage return s } +// SetOwnerAccountId sets the OwnerAccountId field's value. +func (s *WorkspaceImage) SetOwnerAccountId(v string) *WorkspaceImage { + s.OwnerAccountId = &v + return s +} + // SetRequiredTenancy sets the RequiredTenancy field's value. func (s *WorkspaceImage) SetRequiredTenancy(v string) *WorkspaceImage { s.RequiredTenancy = &v @@ -9267,7 +11225,8 @@ type WorkspaceProperties struct { // The compute type. For more information, see Amazon WorkSpaces Bundles (http://aws.amazon.com/workspaces/details/#Amazon_WorkSpaces_Bundles). ComputeTypeName *string `type:"string" enum:"Compute"` - // The size of the root volume. + // The size of the root volume. For important information about how to modify + // the size of the root and user volumes, see Modify a WorkSpace (https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html). RootVolumeSizeGib *int64 `type:"integer"` // The running mode. For more information, see Manage the WorkSpace Running @@ -9278,7 +11237,8 @@ type WorkspaceProperties struct { // Configured in 60-minute intervals. RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` - // The size of the user storage. + // The size of the user storage. For important information about how to modify + // the size of the root and user volumes, see Modify a WorkSpace (https://docs.aws.amazon.com/workspaces/latest/adminguide/modify-workspaces.html). UserVolumeSizeGib *int64 `type:"integer"` } @@ -9459,8 +11419,8 @@ func (s *WorkspaceRequest) SetWorkspaceProperties(v *WorkspaceProperties) *Works // role before you can register a directory. For more information, see Creating // the workspaces_DefaultRole Role (https://docs.aws.amazon.com/workspaces/latest/adminguide/workspaces-access-control.html#create-default-role). type WorkspacesDefaultRoleNotFoundException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"message" type:"string"` } @@ -9477,17 +11437,17 @@ func (s WorkspacesDefaultRoleNotFoundException) GoString() string { func newErrorWorkspacesDefaultRoleNotFoundException(v protocol.ResponseMetadata) error { return &WorkspacesDefaultRoleNotFoundException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s WorkspacesDefaultRoleNotFoundException) Code() string { +func (s *WorkspacesDefaultRoleNotFoundException) Code() string { return "WorkspacesDefaultRoleNotFoundException" } // Message returns the exception's message. -func (s WorkspacesDefaultRoleNotFoundException) Message() string { +func (s *WorkspacesDefaultRoleNotFoundException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -9495,22 +11455,22 @@ func (s WorkspacesDefaultRoleNotFoundException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s WorkspacesDefaultRoleNotFoundException) OrigErr() error { +func (s *WorkspacesDefaultRoleNotFoundException) OrigErr() error { return nil } -func (s WorkspacesDefaultRoleNotFoundException) Error() string { +func (s *WorkspacesDefaultRoleNotFoundException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s WorkspacesDefaultRoleNotFoundException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *WorkspacesDefaultRoleNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s WorkspacesDefaultRoleNotFoundException) RequestID() string { - return s.respMetadata.RequestID +func (s *WorkspacesDefaultRoleNotFoundException) RequestID() string { + return s.RespMetadata.RequestID } const ( @@ -9521,6 +11481,58 @@ const ( AccessPropertyValueDeny = "DENY" ) +// AccessPropertyValue_Values returns all elements of the AccessPropertyValue enum +func AccessPropertyValue_Values() []string { + return []string{ + AccessPropertyValueAllow, + AccessPropertyValueDeny, + } +} + +const ( + // ApplicationMicrosoftOffice2016 is a Application enum value + ApplicationMicrosoftOffice2016 = "Microsoft_Office_2016" + + // ApplicationMicrosoftOffice2019 is a Application enum value + ApplicationMicrosoftOffice2019 = "Microsoft_Office_2019" +) + +// Application_Values returns all elements of the Application enum +func Application_Values() []string { + return []string{ + ApplicationMicrosoftOffice2016, + ApplicationMicrosoftOffice2019, + } +} + +const ( + // AssociationStatusNotAssociated is a AssociationStatus enum value + AssociationStatusNotAssociated = "NOT_ASSOCIATED" + + // AssociationStatusAssociatedWithOwnerAccount is a AssociationStatus enum value + AssociationStatusAssociatedWithOwnerAccount = "ASSOCIATED_WITH_OWNER_ACCOUNT" + + // AssociationStatusAssociatedWithSharedAccount is a AssociationStatus enum value + AssociationStatusAssociatedWithSharedAccount = "ASSOCIATED_WITH_SHARED_ACCOUNT" + + // AssociationStatusPendingAssociation is a AssociationStatus enum value + AssociationStatusPendingAssociation = "PENDING_ASSOCIATION" + + // AssociationStatusPendingDisassociation is a AssociationStatus enum value + AssociationStatusPendingDisassociation = "PENDING_DISASSOCIATION" +) + +// AssociationStatus_Values returns all elements of the AssociationStatus enum +func AssociationStatus_Values() []string { + return []string{ + AssociationStatusNotAssociated, + AssociationStatusAssociatedWithOwnerAccount, + AssociationStatusAssociatedWithSharedAccount, + AssociationStatusPendingAssociation, + AssociationStatusPendingDisassociation, + } +} + const ( // ComputeValue is a Compute enum value ComputeValue = "VALUE" @@ -9544,6 +11556,39 @@ const ( ComputeGraphicspro = "GRAPHICSPRO" ) +// Compute_Values returns all elements of the Compute enum +func Compute_Values() []string { + return []string{ + ComputeValue, + ComputeStandard, + ComputePerformance, + ComputePower, + ComputeGraphics, + ComputePowerpro, + ComputeGraphicspro, + } +} + +const ( + // ConnectionAliasStateCreating is a ConnectionAliasState enum value + ConnectionAliasStateCreating = "CREATING" + + // ConnectionAliasStateCreated is a ConnectionAliasState enum value + ConnectionAliasStateCreated = "CREATED" + + // ConnectionAliasStateDeleting is a ConnectionAliasState enum value + ConnectionAliasStateDeleting = "DELETING" +) + +// ConnectionAliasState_Values returns all elements of the ConnectionAliasState enum +func ConnectionAliasState_Values() []string { + return []string{ + ConnectionAliasStateCreating, + ConnectionAliasStateCreated, + ConnectionAliasStateDeleting, + } +} + const ( // ConnectionStateConnected is a ConnectionState enum value ConnectionStateConnected = "CONNECTED" @@ -9555,6 +11600,15 @@ const ( ConnectionStateUnknown = "UNKNOWN" ) +// ConnectionState_Values returns all elements of the ConnectionState enum +func ConnectionState_Values() []string { + return []string{ + ConnectionStateConnected, + ConnectionStateDisconnected, + ConnectionStateUnknown, + } +} + const ( // DedicatedTenancyModificationStateEnumPending is a DedicatedTenancyModificationStateEnum enum value DedicatedTenancyModificationStateEnumPending = "PENDING" @@ -9566,11 +11620,27 @@ const ( DedicatedTenancyModificationStateEnumFailed = "FAILED" ) +// DedicatedTenancyModificationStateEnum_Values returns all elements of the DedicatedTenancyModificationStateEnum enum +func DedicatedTenancyModificationStateEnum_Values() []string { + return []string{ + DedicatedTenancyModificationStateEnumPending, + DedicatedTenancyModificationStateEnumCompleted, + DedicatedTenancyModificationStateEnumFailed, + } +} + const ( // DedicatedTenancySupportEnumEnabled is a DedicatedTenancySupportEnum enum value DedicatedTenancySupportEnumEnabled = "ENABLED" ) +// DedicatedTenancySupportEnum_Values returns all elements of the DedicatedTenancySupportEnum enum +func DedicatedTenancySupportEnum_Values() []string { + return []string{ + DedicatedTenancySupportEnumEnabled, + } +} + const ( // DedicatedTenancySupportResultEnumEnabled is a DedicatedTenancySupportResultEnum enum value DedicatedTenancySupportResultEnumEnabled = "ENABLED" @@ -9579,6 +11649,30 @@ const ( DedicatedTenancySupportResultEnumDisabled = "DISABLED" ) +// DedicatedTenancySupportResultEnum_Values returns all elements of the DedicatedTenancySupportResultEnum enum +func DedicatedTenancySupportResultEnum_Values() []string { + return []string{ + DedicatedTenancySupportResultEnumEnabled, + DedicatedTenancySupportResultEnumDisabled, + } +} + +const ( + // ImageTypeOwned is a ImageType enum value + ImageTypeOwned = "OWNED" + + // ImageTypeShared is a ImageType enum value + ImageTypeShared = "SHARED" +) + +// ImageType_Values returns all elements of the ImageType enum +func ImageType_Values() []string { + return []string{ + ImageTypeOwned, + ImageTypeShared, + } +} + const ( // ModificationResourceEnumRootVolume is a ModificationResourceEnum enum value ModificationResourceEnumRootVolume = "ROOT_VOLUME" @@ -9590,6 +11684,15 @@ const ( ModificationResourceEnumComputeType = "COMPUTE_TYPE" ) +// ModificationResourceEnum_Values returns all elements of the ModificationResourceEnum enum +func ModificationResourceEnum_Values() []string { + return []string{ + ModificationResourceEnumRootVolume, + ModificationResourceEnumUserVolume, + ModificationResourceEnumComputeType, + } +} + const ( // ModificationStateEnumUpdateInitiated is a ModificationStateEnum enum value ModificationStateEnumUpdateInitiated = "UPDATE_INITIATED" @@ -9598,6 +11701,14 @@ const ( ModificationStateEnumUpdateInProgress = "UPDATE_IN_PROGRESS" ) +// ModificationStateEnum_Values returns all elements of the ModificationStateEnum enum +func ModificationStateEnum_Values() []string { + return []string{ + ModificationStateEnumUpdateInitiated, + ModificationStateEnumUpdateInProgress, + } +} + const ( // OperatingSystemTypeWindows is a OperatingSystemType enum value OperatingSystemTypeWindows = "WINDOWS" @@ -9606,6 +11717,14 @@ const ( OperatingSystemTypeLinux = "LINUX" ) +// OperatingSystemType_Values returns all elements of the OperatingSystemType enum +func OperatingSystemType_Values() []string { + return []string{ + OperatingSystemTypeWindows, + OperatingSystemTypeLinux, + } +} + const ( // ReconnectEnumEnabled is a ReconnectEnum enum value ReconnectEnumEnabled = "ENABLED" @@ -9614,6 +11733,14 @@ const ( ReconnectEnumDisabled = "DISABLED" ) +// ReconnectEnum_Values returns all elements of the ReconnectEnum enum +func ReconnectEnum_Values() []string { + return []string{ + ReconnectEnumEnabled, + ReconnectEnumDisabled, + } +} + const ( // RunningModeAutoStop is a RunningMode enum value RunningModeAutoStop = "AUTO_STOP" @@ -9622,6 +11749,14 @@ const ( RunningModeAlwaysOn = "ALWAYS_ON" ) +// RunningMode_Values returns all elements of the RunningMode enum +func RunningMode_Values() []string { + return []string{ + RunningModeAutoStop, + RunningModeAlwaysOn, + } +} + const ( // TargetWorkspaceStateAvailable is a TargetWorkspaceState enum value TargetWorkspaceStateAvailable = "AVAILABLE" @@ -9630,6 +11765,14 @@ const ( TargetWorkspaceStateAdminMaintenance = "ADMIN_MAINTENANCE" ) +// TargetWorkspaceState_Values returns all elements of the TargetWorkspaceState enum +func TargetWorkspaceState_Values() []string { + return []string{ + TargetWorkspaceStateAvailable, + TargetWorkspaceStateAdminMaintenance, + } +} + const ( // TenancyDedicated is a Tenancy enum value TenancyDedicated = "DEDICATED" @@ -9638,6 +11781,14 @@ const ( TenancyShared = "SHARED" ) +// Tenancy_Values returns all elements of the Tenancy enum +func Tenancy_Values() []string { + return []string{ + TenancyDedicated, + TenancyShared, + } +} + const ( // WorkspaceDirectoryStateRegistering is a WorkspaceDirectoryState enum value WorkspaceDirectoryStateRegistering = "REGISTERING" @@ -9655,6 +11806,17 @@ const ( WorkspaceDirectoryStateError = "ERROR" ) +// WorkspaceDirectoryState_Values returns all elements of the WorkspaceDirectoryState enum +func WorkspaceDirectoryState_Values() []string { + return []string{ + WorkspaceDirectoryStateRegistering, + WorkspaceDirectoryStateRegistered, + WorkspaceDirectoryStateDeregistering, + WorkspaceDirectoryStateDeregistered, + WorkspaceDirectoryStateError, + } +} + const ( // WorkspaceDirectoryTypeSimpleAd is a WorkspaceDirectoryType enum value WorkspaceDirectoryTypeSimpleAd = "SIMPLE_AD" @@ -9663,6 +11825,14 @@ const ( WorkspaceDirectoryTypeAdConnector = "AD_CONNECTOR" ) +// WorkspaceDirectoryType_Values returns all elements of the WorkspaceDirectoryType enum +func WorkspaceDirectoryType_Values() []string { + return []string{ + WorkspaceDirectoryTypeSimpleAd, + WorkspaceDirectoryTypeAdConnector, + } +} + const ( // WorkspaceImageIngestionProcessByolRegular is a WorkspaceImageIngestionProcess enum value WorkspaceImageIngestionProcessByolRegular = "BYOL_REGULAR" @@ -9674,6 +11844,15 @@ const ( WorkspaceImageIngestionProcessByolGraphicspro = "BYOL_GRAPHICSPRO" ) +// WorkspaceImageIngestionProcess_Values returns all elements of the WorkspaceImageIngestionProcess enum +func WorkspaceImageIngestionProcess_Values() []string { + return []string{ + WorkspaceImageIngestionProcessByolRegular, + WorkspaceImageIngestionProcessByolGraphics, + WorkspaceImageIngestionProcessByolGraphicspro, + } +} + const ( // WorkspaceImageRequiredTenancyDefault is a WorkspaceImageRequiredTenancy enum value WorkspaceImageRequiredTenancyDefault = "DEFAULT" @@ -9682,6 +11861,14 @@ const ( WorkspaceImageRequiredTenancyDedicated = "DEDICATED" ) +// WorkspaceImageRequiredTenancy_Values returns all elements of the WorkspaceImageRequiredTenancy enum +func WorkspaceImageRequiredTenancy_Values() []string { + return []string{ + WorkspaceImageRequiredTenancyDefault, + WorkspaceImageRequiredTenancyDedicated, + } +} + const ( // WorkspaceImageStateAvailable is a WorkspaceImageState enum value WorkspaceImageStateAvailable = "AVAILABLE" @@ -9693,6 +11880,15 @@ const ( WorkspaceImageStateError = "ERROR" ) +// WorkspaceImageState_Values returns all elements of the WorkspaceImageState enum +func WorkspaceImageState_Values() []string { + return []string{ + WorkspaceImageStateAvailable, + WorkspaceImageStatePending, + WorkspaceImageStateError, + } +} + const ( // WorkspaceStatePending is a WorkspaceState enum value WorkspaceStatePending = "PENDING" @@ -9745,3 +11941,26 @@ const ( // WorkspaceStateError is a WorkspaceState enum value WorkspaceStateError = "ERROR" ) + +// WorkspaceState_Values returns all elements of the WorkspaceState enum +func WorkspaceState_Values() []string { + return []string{ + WorkspaceStatePending, + WorkspaceStateAvailable, + WorkspaceStateImpaired, + WorkspaceStateUnhealthy, + WorkspaceStateRebooting, + WorkspaceStateStarting, + WorkspaceStateRebuilding, + WorkspaceStateRestoring, + WorkspaceStateMaintenance, + WorkspaceStateAdminMaintenance, + WorkspaceStateTerminating, + WorkspaceStateTerminated, + WorkspaceStateSuspended, + WorkspaceStateUpdating, + WorkspaceStateStopping, + WorkspaceStateStopped, + WorkspaceStateError, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go index dc077391c..e9700bd88 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/xray/api.go b/vendor/github.com/aws/aws-sdk-go/service/xray/api.go index cdd6fd1f9..964f2763c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/xray/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/xray/api.go @@ -1740,6 +1740,93 @@ func (c *XRay) GetTraceSummariesPagesWithContext(ctx aws.Context, input *GetTrac return p.Err() } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsForResource for more information on using the ListTagsForResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/xray-2016-04-12/ListTagsForResource +func (c *XRay) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/ListTagsForResource", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + output = &ListTagsForResourceOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListTagsForResource API operation for AWS X-Ray. +// +// Returns a list of tags that are applied to the specified AWS X-Ray group +// or sampling rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS X-Ray's +// API operation ListTagsForResource for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is missing required parameters or has invalid parameters. +// +// * ThrottledException +// The request exceeds the maximum number of requests per second. +// +// * ResourceNotFoundException +// The resource was not found. Verify that the name or Amazon Resource Name +// (ARN) of the resource is correct. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/xray-2016-04-12/ListTagsForResource +func (c *XRay) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + return out, req.Send() +} + +// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsForResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *XRay) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutEncryptionConfig = "PutEncryptionConfig" // PutEncryptionConfigRequest generates a "aws/request.Request" representing the @@ -1958,7 +2045,7 @@ func (c *XRay) PutTraceSegmentsRequest(input *PutTraceSegmentsInput) (req *reque // schema, see AWS X-Ray Segment Documents (https://docs.aws.amazon.com/xray/latest/devguide/xray-api-segmentdocuments.html) // in the AWS X-Ray Developer Guide. // -// Required Segment Document Fields +// Required segment document fields // // * name - The name of the service that handled the request. // @@ -1976,18 +2063,17 @@ func (c *XRay) PutTraceSegmentsRequest(input *PutTraceSegmentsInput) (req *reque // or 1.480615200090E9. Specify either an end_time or in_progress. // // * in_progress - Set to true instead of specifying an end_time to record -// that a segment has been started, but is not complete. Send an in progress +// that a segment has been started, but is not complete. Send an in-progress // segment when your application receives a request that will take a long -// time to serve, to trace the fact that the request was received. When the -// response is sent, send the complete segment to overwrite the in-progress -// segment. +// time to serve, to trace that the request was received. When the response +// is sent, send the complete segment to overwrite the in-progress segment. // // A trace_id consists of three numbers separated by hyphens. For example, 1-58406520-a006649127e371903a2de979. // This includes: // // Trace ID Format // -// * The version number, i.e. 1. +// * The version number, for instance, 1. // // * The time of the original request, in Unix epoch time, in 8 hexadecimal // digits. For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 @@ -2032,6 +2118,184 @@ func (c *XRay) PutTraceSegmentsWithContext(ctx aws.Context, input *PutTraceSegme return out, req.Send() } +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/xray-2016-04-12/TagResource +func (c *XRay) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/TagResource", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagResource API operation for AWS X-Ray. +// +// Applies tags to an existing AWS X-Ray group or sampling rule. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS X-Ray's +// API operation TagResource for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is missing required parameters or has invalid parameters. +// +// * ThrottledException +// The request exceeds the maximum number of requests per second. +// +// * ResourceNotFoundException +// The resource was not found. Verify that the name or Amazon Resource Name +// (ARN) of the resource is correct. +// +// * TooManyTagsException +// You have exceeded the maximum number of tags you can apply to this resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/xray-2016-04-12/TagResource +func (c *XRay) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *XRay) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/xray-2016-04-12/UntagResource +func (c *XRay) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/UntagResource", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagResource API operation for AWS X-Ray. +// +// Removes tags from an AWS X-Ray group or sampling rule. You cannot edit or +// delete system tags (those with an aws: prefix). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS X-Ray's +// API operation UntagResource for usage and error information. +// +// Returned Error Types: +// * InvalidRequestException +// The request is missing required parameters or has invalid parameters. +// +// * ThrottledException +// The request exceeds the maximum number of requests per second. +// +// * ResourceNotFoundException +// The resource was not found. Verify that the name or Amazon Resource Name +// (ARN) of the resource is correct. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/xray-2016-04-12/UntagResource +func (c *XRay) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *XRay) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateGroup = "UpdateGroup" // UpdateGroupRequest generates a "aws/request.Request" representing the @@ -2238,7 +2502,7 @@ func (s *Alias) SetType(v string) *Alias { return s } -// Value of a segment annotation. Has one of three value types: Number, Boolean +// Value of a segment annotation. Has one of three value types: Number, Boolean, // or String. type AnnotationValue struct { _ struct{} `type:"structure"` @@ -2281,11 +2545,11 @@ func (s *AnnotationValue) SetStringValue(v string) *AnnotationValue { return s } -// A list of availability zones corresponding to the segments in a trace. +// A list of Availability Zones corresponding to the segments in a trace. type AvailabilityZoneDetail struct { _ struct{} `type:"structure"` - // The name of a corresponding availability zone. + // The name of a corresponding Availability Zone. Name *string `type:"string"` } @@ -2466,6 +2730,37 @@ type CreateGroupInput struct { // // GroupName is a required field GroupName *string `min:"1" type:"string" required:"true"` + + // The structure containing configurations related to insights. + // + // * The InsightsEnabled boolean can be set to true to enable insights for + // the new group or false to disable insights for the new group. + // + // * The NotifcationsEnabled boolean can be set to true to enable insights + // notifications for the new group. Notifications may only be enabled on + // a group with InsightsEnabled set to true. + InsightsConfiguration *InsightsConfiguration `type:"structure"` + + // A map that contains one or more tag keys and tag values to attach to an X-Ray + // group. For more information about ways to use tags, see Tagging AWS resources + // (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) in the AWS + // General Reference. + // + // The following restrictions apply to tags: + // + // * Maximum number of user-applied tags per resource: 50 + // + // * Maximum tag key length: 128 Unicode characters + // + // * Maximum tag value length: 256 Unicode characters + // + // * Valid values for key and value: a-z, A-Z, 0-9, space, and the following + // characters: _ . : / = + - and @ + // + // * Tag keys and values are case sensitive. + // + // * Don't use aws: as a prefix for keys; it's reserved for AWS use. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2487,6 +2782,16 @@ func (s *CreateGroupInput) Validate() error { if s.GroupName != nil && len(*s.GroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2506,12 +2811,25 @@ func (s *CreateGroupInput) SetGroupName(v string) *CreateGroupInput { return s } +// SetInsightsConfiguration sets the InsightsConfiguration field's value. +func (s *CreateGroupInput) SetInsightsConfiguration(v *InsightsConfiguration) *CreateGroupInput { + s.InsightsConfiguration = v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateGroupInput) SetTags(v []*Tag) *CreateGroupInput { + s.Tags = v + return s +} + type CreateGroupOutput struct { _ struct{} `type:"structure"` // The group that was created. Contains the name of the group that was created, - // the ARN of the group that was generated based on the group name, and the - // filter expression that was assigned to the group. + // the Amazon Resource Name (ARN) of the group that was generated based on the + // group name, the filter expression, and the insight configuration that was + // assigned to the group. Group *Group `type:"structure"` } @@ -2538,6 +2856,27 @@ type CreateSamplingRuleInput struct { // // SamplingRule is a required field SamplingRule *SamplingRule `type:"structure" required:"true"` + + // A map that contains one or more tag keys and tag values to attach to an X-Ray + // sampling rule. For more information about ways to use tags, see Tagging AWS + // resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the AWS General Reference. + // + // The following restrictions apply to tags: + // + // * Maximum number of user-applied tags per resource: 50 + // + // * Maximum tag key length: 128 Unicode characters + // + // * Maximum tag value length: 256 Unicode characters + // + // * Valid values for key and value: a-z, A-Z, 0-9, space, and the following + // characters: _ . : / = + - and @ + // + // * Tag keys and values are case sensitive. + // + // * Don't use aws: as a prefix for keys; it's reserved for AWS use. + Tags []*Tag `type:"list"` } // String returns the string representation @@ -2561,6 +2900,16 @@ func (s *CreateSamplingRuleInput) Validate() error { invalidParams.AddNested("SamplingRule", err.(request.ErrInvalidParams)) } } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2574,6 +2923,12 @@ func (s *CreateSamplingRuleInput) SetSamplingRule(v *SamplingRule) *CreateSampli return s } +// SetTags sets the Tags field's value. +func (s *CreateSamplingRuleInput) SetTags(v []*Tag) *CreateSamplingRuleInput { + s.Tags = v + return s +} + type CreateSamplingRuleOutput struct { _ struct{} `type:"structure"` @@ -2893,6 +3248,9 @@ func (s *EncryptionConfig) SetType(v string) *EncryptionConfig { type ErrorRootCause struct { _ struct{} `type:"structure"` + // A flag that denotes that the root cause impacts the trace client. + ClientImpacting *bool `type:"boolean"` + // A list of services corresponding to an error. A service identifies a segment // and it contains a name, account ID, type, and inferred flag. Services []*ErrorRootCauseService `type:"list"` @@ -2908,6 +3266,12 @@ func (s ErrorRootCause) GoString() string { return s.String() } +// SetClientImpacting sets the ClientImpacting field's value. +func (s *ErrorRootCause) SetClientImpacting(v bool) *ErrorRootCause { + s.ClientImpacting = &v + return s +} + // SetServices sets the Services field's value. func (s *ErrorRootCause) SetServices(v []*ErrorRootCauseService) *ErrorRootCause { s.Services = v @@ -3073,6 +3437,9 @@ func (s *ErrorStatistics) SetTotalCount(v int64) *ErrorStatistics { type FaultRootCause struct { _ struct{} `type:"structure"` + // A flag that denotes that the root cause impacts the trace client. + ClientImpacting *bool `type:"boolean"` + // A list of corresponding services. A service identifies a segment and it contains // a name, account ID, type, and inferred flag. Services []*FaultRootCauseService `type:"list"` @@ -3088,6 +3455,12 @@ func (s FaultRootCause) GoString() string { return s.String() } +// SetClientImpacting sets the ClientImpacting field's value. +func (s *FaultRootCause) SetClientImpacting(v bool) *FaultRootCause { + s.ClientImpacting = &v + return s +} + // SetServices sets the Services field's value. func (s *FaultRootCause) SetServices(v []*FaultRootCauseService) *FaultRootCause { s.Services = v @@ -3329,7 +3702,8 @@ type GetGroupOutput struct { _ struct{} `type:"structure"` // The group that was requested. Contains the name of the group, the ARN of - // the group, and the filter expression that assigned to the group. + // the group, the filter expression, and the insight configuration assigned + // to the group. Group *Group `type:"structure"` } @@ -3626,10 +4000,11 @@ type GetServiceGraphInput struct { // EndTime is a required field EndTime *time.Time `type:"timestamp" required:"true"` - // The ARN of a group to generate a graph based on. + // The Amazon Resource Name (ARN) of a group based on which you want to generate + // a graph. GroupARN *string `min:"1" type:"string"` - // The name of a group to generate a graph based on. + // The name of a group based on which you want to generate a graph. GroupName *string `min:"1" type:"string"` // Pagination token. @@ -3778,7 +4153,8 @@ type GetTimeSeriesServiceStatisticsInput struct { // edge statistics are returned. EntitySelectorExpression *string `min:"1" type:"string"` - // The ARN of the group for which to pull statistics from. + // The Amazon Resource Name (ARN) of the group for which to pull statistics + // from. GroupARN *string `min:"1" type:"string"` // The case-sensitive name of the group for which to pull statistics from. @@ -3877,8 +4253,8 @@ type GetTimeSeriesServiceStatisticsOutput struct { _ struct{} `type:"structure"` // A flag indicating whether or not a group's filter expression has been consistent, - // or if a returned aggregation may show statistics from an older version of - // the group's filter expression. + // or if a returned aggregation might show statistics from an older version + // of the group's filter expression. ContainsOldGroupVersions *bool `type:"boolean"` // Pagination token. @@ -4014,7 +4390,7 @@ type GetTraceSummariesInput struct { // Set to true to get summaries for only a subset of available traces. Sampling *bool `type:"boolean"` - // A paramater to indicate whether to enable sampling on trace summaries. Input + // A parameter to indicate whether to enable sampling on trace summaries. Input // parameters are Name and Value. SamplingStrategy *SamplingStrategy `type:"structure"` @@ -4104,7 +4480,7 @@ type GetTraceSummariesOutput struct { // If the requested time frame contained more than one page of results, you // can use this token to retrieve the next page. The first page contains the - // most most recent results, closest to the end of the time frame. + // most recent results, closest to the end of the time frame. NextToken *string `type:"string"` // Trace IDs and annotations for traces that were found in the specified time @@ -4157,11 +4533,20 @@ type Group struct { // The filter expression defining the parameters to include traces. FilterExpression *string `type:"string"` - // The ARN of the group generated based on the GroupName. + // The Amazon Resource Name (ARN) of the group generated based on the GroupName. GroupARN *string `type:"string"` // The unique case-sensitive name of the group. GroupName *string `type:"string"` + + // The structure containing configurations related to insights. + // + // * The InsightsEnabled boolean can be set to true to enable insights for + // the group or false to disable insights for the group. + // + // * The NotifcationsEnabled boolean can be set to true to enable insights + // notifications through Amazon EventBridge for the group. + InsightsConfiguration *InsightsConfiguration `type:"structure"` } // String returns the string representation @@ -4192,6 +4577,12 @@ func (s *Group) SetGroupName(v string) *Group { return s } +// SetInsightsConfiguration sets the InsightsConfiguration field's value. +func (s *Group) SetInsightsConfiguration(v *InsightsConfiguration) *Group { + s.InsightsConfiguration = v + return s +} + // Details for a group without metadata. type GroupSummary struct { _ struct{} `type:"structure"` @@ -4204,6 +4595,16 @@ type GroupSummary struct { // The unique case-sensitive name of the group. GroupName *string `type:"string"` + + // The structure containing configurations related to insights. + // + // * The InsightsEnabled boolean can be set to true to enable insights for + // the group or false to disable insights for the group. + // + // * The NotificationsEnabled boolean can be set to true to enable insights + // notifications. Notifications can only be enabled on a group with InsightsEnabled + // set to true. + InsightsConfiguration *InsightsConfiguration `type:"structure"` } // String returns the string representation @@ -4234,6 +4635,12 @@ func (s *GroupSummary) SetGroupName(v string) *GroupSummary { return s } +// SetInsightsConfiguration sets the InsightsConfiguration field's value. +func (s *GroupSummary) SetInsightsConfiguration(v *InsightsConfiguration) *GroupSummary { + s.InsightsConfiguration = v + return s +} + // An entry in a histogram for a statistic. A histogram maps the range of observed // values on the X axis, and the prevalence of each value on the Y axis. type HistogramEntry struct { @@ -4328,16 +4735,52 @@ func (s *Http) SetUserAgent(v string) *Http { return s } -// A list of EC2 instance IDs corresponding to the segments in a trace. -type InstanceIdDetail struct { +// The structure containing configurations related to insights. +type InsightsConfiguration struct { _ struct{} `type:"structure"` - // The ID of a corresponding EC2 instance. - Id *string `type:"string"` + // Set the InsightsEnabled value to true to enable insights or false to disable + // insights. + InsightsEnabled *bool `type:"boolean"` + + // Set the NotificationsEnabled value to true to enable insights notifications. + // Notifications can only be enabled on a group with InsightsEnabled set to + // true. + NotificationsEnabled *bool `type:"boolean"` } // String returns the string representation -func (s InstanceIdDetail) String() string { +func (s InsightsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InsightsConfiguration) GoString() string { + return s.String() +} + +// SetInsightsEnabled sets the InsightsEnabled field's value. +func (s *InsightsConfiguration) SetInsightsEnabled(v bool) *InsightsConfiguration { + s.InsightsEnabled = &v + return s +} + +// SetNotificationsEnabled sets the NotificationsEnabled field's value. +func (s *InsightsConfiguration) SetNotificationsEnabled(v bool) *InsightsConfiguration { + s.NotificationsEnabled = &v + return s +} + +// A list of EC2 instance IDs corresponding to the segments in a trace. +type InstanceIdDetail struct { + _ struct{} `type:"structure"` + + // The ID of a corresponding EC2 instance. + Id *string `type:"string"` +} + +// String returns the string representation +func (s InstanceIdDetail) String() string { return awsutil.Prettify(s) } @@ -4354,8 +4797,8 @@ func (s *InstanceIdDetail) SetId(v string) *InstanceIdDetail { // The request is missing required parameters or has invalid parameters. type InvalidRequestException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4372,17 +4815,17 @@ func (s InvalidRequestException) GoString() string { func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { return &InvalidRequestException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s InvalidRequestException) Code() string { +func (s *InvalidRequestException) Code() string { return "InvalidRequestException" } // Message returns the exception's message. -func (s InvalidRequestException) Message() string { +func (s *InvalidRequestException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4390,22 +4833,108 @@ func (s InvalidRequestException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s InvalidRequestException) OrigErr() error { +func (s *InvalidRequestException) OrigErr() error { return nil } -func (s InvalidRequestException) Error() string { +func (s *InvalidRequestException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s InvalidRequestException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s InvalidRequestException) RequestID() string { - return s.respMetadata.RequestID +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // A pagination token. If multiple pages of results are returned, use the NextToken + // value returned with the current page of results as the value of this parameter + // to get the next page of results. + NextToken *string `type:"string"` + + // The Amazon Resource Number (ARN) of an X-Ray group or sampling rule. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceInput) SetNextToken(v string) *ListTagsForResourceInput { + s.NextToken = &v + return s +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *ListTagsForResourceInput) SetResourceARN(v string) *ListTagsForResourceInput { + s.ResourceARN = &v + return s +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // A pagination token. If multiple pages of results are returned, use the NextToken + // value returned with the current page of results to get the next page of results. + NextToken *string `type:"string"` + + // A list of tags, as key and value pairs, that is associated with the specified + // X-Ray group or sampling rule. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsForResourceOutput) SetNextToken(v string) *ListTagsForResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsForResourceOutput) SetTags(v []*Tag) *ListTagsForResourceOutput { + s.Tags = v + return s } type PutEncryptionConfigInput struct { @@ -4662,10 +5191,72 @@ func (s *ResourceARNDetail) SetARN(v string) *ResourceARNDetail { return s } +// The resource was not found. Verify that the name or Amazon Resource Name +// (ARN) of the resource is correct. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + ResourceName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + // The root cause information for a response time warning. type ResponseTimeRootCause struct { _ struct{} `type:"structure"` + // A flag that denotes that the root cause impacts the trace client. + ClientImpacting *bool `type:"boolean"` + // A list of corresponding services. A service identifies a segment and contains // a name, account ID, type, and inferred flag. Services []*ResponseTimeRootCauseService `type:"list"` @@ -4681,6 +5272,12 @@ func (s ResponseTimeRootCause) GoString() string { return s.String() } +// SetClientImpacting sets the ClientImpacting field's value. +func (s *ResponseTimeRootCause) SetClientImpacting(v bool) *ResponseTimeRootCause { + s.ClientImpacting = &v + return s +} + // SetServices sets the Services field's value. func (s *ResponseTimeRootCause) SetServices(v []*ResponseTimeRootCauseService) *ResponseTimeRootCause { s.Services = v @@ -4692,7 +5289,7 @@ func (s *ResponseTimeRootCause) SetServices(v []*ResponseTimeRootCauseService) * type ResponseTimeRootCauseEntity struct { _ struct{} `type:"structure"` - // The types and messages of the exceptions. + // The type and messages of the exceptions. Coverage *float64 `type:"double"` // The name of the entity. @@ -4834,8 +5431,8 @@ func (s *RootCauseException) SetName(v string) *RootCauseException { // You have reached the maximum number of sampling rules. type RuleLimitExceededException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -4852,17 +5449,17 @@ func (s RuleLimitExceededException) GoString() string { func newErrorRuleLimitExceededException(v protocol.ResponseMetadata) error { return &RuleLimitExceededException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s RuleLimitExceededException) Code() string { +func (s *RuleLimitExceededException) Code() string { return "RuleLimitExceededException" } // Message returns the exception's message. -func (s RuleLimitExceededException) Message() string { +func (s *RuleLimitExceededException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -4870,22 +5467,22 @@ func (s RuleLimitExceededException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s RuleLimitExceededException) OrigErr() error { +func (s *RuleLimitExceededException) OrigErr() error { return nil } -func (s RuleLimitExceededException) Error() string { +func (s *RuleLimitExceededException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s RuleLimitExceededException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *RuleLimitExceededException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s RuleLimitExceededException) RequestID() string { - return s.respMetadata.RequestID +func (s *RuleLimitExceededException) RequestID() string { + return s.RespMetadata.RequestID } // A sampling rule that services use to decide whether to instrument a request. @@ -5280,7 +5877,7 @@ func (s *SamplingRuleUpdate) SetURLPath(v string) *SamplingRuleUpdate { } // Aggregated request sampling data for a sampling rule across all services -// for a 10 second window. +// for a 10-second window. type SamplingStatisticSummary struct { _ struct{} `type:"structure"` @@ -5499,7 +6096,7 @@ type SamplingTargetDocument struct { // again. Interval *int64 `type:"integer"` - // The number of requests per second that X-Ray allocated this service. + // The number of requests per second that X-Ray allocated for this service. ReservoirQuota *int64 `type:"integer"` // When the reservoir quota expires. @@ -5589,7 +6186,7 @@ func (s *Segment) SetId(v string) *Segment { } // Information about an application that processed requests, users that made -// requests, or downstream services, resources and applications that an application +// requests, or downstream services, resources, and applications that an application // used. type Service struct { _ struct{} `type:"structure"` @@ -5633,7 +6230,7 @@ type Service struct { // The type of service. // // * AWS Resource - The type of an AWS resource. For example, AWS::EC2::Instance - // for a application running on Amazon EC2 or AWS::DynamoDB::Table for an + // for an application running on Amazon EC2 or AWS::DynamoDB::Table for an // Amazon DynamoDB table that the application used. // // * AWS Service - The type of an AWS service. For example, AWS::DynamoDB @@ -5840,6 +6437,176 @@ func (s *ServiceStatistics) SetTotalResponseTime(v float64) *ServiceStatistics { return s } +// A map that contains tag keys and tag values to attach to an AWS X-Ray group +// or sampling rule. For more information about ways to use tags, see Tagging +// AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) +// in the AWS General Reference. +// +// The following restrictions apply to tags: +// +// * Maximum number of user-applied tags per resource: 50 +// +// * Tag keys and values are case sensitive. +// +// * Don't use aws: as a prefix for keys; it's reserved for AWS use. You +// cannot edit or delete system tags. +type Tag struct { + _ struct{} `type:"structure"` + + // A tag key, such as Stage or Name. A tag key cannot be empty. The key can + // be a maximum of 128 characters, and can contain only Unicode letters, numbers, + // or separators, or the following special characters: + - = . _ : / + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // An optional tag value, such as Production or test-only. The value can be + // a maximum of 255 characters, and contain only Unicode letters, numbers, or + // separators, or the following special characters: + - = . _ : / + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number (ARN) of an X-Ray group or sampling rule. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // A map that contains one or more tag keys and tag values to attach to an X-Ray + // group or sampling rule. For more information about ways to use tags, see + // Tagging AWS resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in the AWS General Reference. + // + // The following restrictions apply to tags: + // + // * Maximum number of user-applied tags per resource: 50 + // + // * Maximum tag key length: 128 Unicode characters + // + // * Maximum tag value length: 256 Unicode characters + // + // * Valid values for key and value: a-z, A-Z, 0-9, space, and the following + // characters: _ . : / = + - and @ + // + // * Tag keys and values are case sensitive. + // + // * Don't use aws: as a prefix for keys; it's reserved for AWS use. You + // cannot edit or delete system tags. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *TagResourceInput) SetResourceARN(v string) *TagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + type TelemetryRecord struct { _ struct{} `type:"structure"` @@ -5918,8 +6685,8 @@ func (s *TelemetryRecord) SetTimestamp(v time.Time) *TelemetryRecord { // The request exceeds the maximum number of requests per second. type ThrottledException struct { - _ struct{} `type:"structure"` - respMetadata protocol.ResponseMetadata + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` Message_ *string `locationName:"Message" type:"string"` } @@ -5936,17 +6703,17 @@ func (s ThrottledException) GoString() string { func newErrorThrottledException(v protocol.ResponseMetadata) error { return &ThrottledException{ - respMetadata: v, + RespMetadata: v, } } // Code returns the exception type name. -func (s ThrottledException) Code() string { +func (s *ThrottledException) Code() string { return "ThrottledException" } // Message returns the exception's message. -func (s ThrottledException) Message() string { +func (s *ThrottledException) Message() string { if s.Message_ != nil { return *s.Message_ } @@ -5954,22 +6721,22 @@ func (s ThrottledException) Message() string { } // OrigErr always returns nil, satisfies awserr.Error interface. -func (s ThrottledException) OrigErr() error { +func (s *ThrottledException) OrigErr() error { return nil } -func (s ThrottledException) Error() string { +func (s *ThrottledException) Error() string { return fmt.Sprintf("%s: %s", s.Code(), s.Message()) } // Status code returns the HTTP status code for the request's response error. -func (s ThrottledException) StatusCode() int { - return s.respMetadata.StatusCode +func (s *ThrottledException) StatusCode() int { + return s.RespMetadata.StatusCode } // RequestID returns the service's response RequestID for request. -func (s ThrottledException) RequestID() string { - return s.respMetadata.RequestID +func (s *ThrottledException) RequestID() string { + return s.RespMetadata.RequestID } // A list of TimeSeriesStatistic structures. @@ -6023,6 +6790,64 @@ func (s *TimeSeriesServiceStatistics) SetTimestamp(v time.Time) *TimeSeriesServi return s } +// You have exceeded the maximum number of tags you can apply to this resource. +type TooManyTagsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"Message" type:"string"` + + ResourceName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s TooManyTagsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TooManyTagsException) GoString() string { + return s.String() +} + +func newErrorTooManyTagsException(v protocol.ResponseMetadata) error { + return &TooManyTagsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyTagsException) Code() string { + return "TooManyTagsException" +} + +// Message returns the exception's message. +func (s *TooManyTagsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyTagsException) OrigErr() error { + return nil +} + +func (s *TooManyTagsException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyTagsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyTagsException) RequestID() string { + return s.RespMetadata.RequestID +} + // A collection of segment documents with matching trace IDs. type Trace struct { _ struct{} `type:"structure"` @@ -6035,6 +6860,11 @@ type Trace struct { // and subsegments. Id *string `min:"1" type:"string"` + // LimitExceeded is set to true when the trace has exceeded one of the defined + // quotas. For more information about quotas, see AWS X-Ray endpoints and quotas + // (https://docs.aws.amazon.com/general/latest/gr/xray.html). + LimitExceeded *bool `type:"boolean"` + // Segment documents for the segments and subsegments that comprise the trace. Segments []*Segment `type:"list"` } @@ -6061,6 +6891,12 @@ func (s *Trace) SetId(v string) *Trace { return s } +// SetLimitExceeded sets the LimitExceeded field's value. +func (s *Trace) SetLimitExceeded(v bool) *Trace { + s.LimitExceeded = &v + return s +} + // SetSegments sets the Segments field's value. func (s *Trace) SetSegments(v []*Segment) *Trace { s.Segments = v @@ -6074,7 +6910,7 @@ type TraceSummary struct { // Annotations from the trace's segment documents. Annotations map[string][]*ValueWithServiceIds `type:"map"` - // A list of availability zones for any zone corresponding to the trace segments. + // A list of Availability Zones for any zone corresponding to the trace segments. AvailabilityZones []*AvailabilityZoneDetail `type:"list"` // The length of time in seconds between the start time of the root segment @@ -6087,8 +6923,7 @@ type TraceSummary struct { // A collection of ErrorRootCause structures corresponding to the trace segments. ErrorRootCauses []*ErrorRootCause `type:"list"` - // A collection of FaultRootCause structures corresponding to the the trace - // segments. + // A collection of FaultRootCause structures corresponding to the trace segments. FaultRootCauses []*FaultRootCause `type:"list"` // The root segment document has a 400 series error. @@ -6387,6 +7222,76 @@ func (s *UnprocessedTraceSegment) SetMessage(v string) *UnprocessedTraceSegment return s } +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Number (ARN) of an X-Ray group or sampling rule. + // + // ResourceARN is a required field + ResourceARN *string `min:"1" type:"string" required:"true"` + + // Keys for one or more tags that you want to remove from an X-Ray group or + // sampling rule. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceARN == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceARN")) + } + if s.ResourceARN != nil && len(*s.ResourceARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceARN", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceARN sets the ResourceARN field's value. +func (s *UntagResourceInput) SetResourceARN(v string) *UntagResourceInput { + s.ResourceARN = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + type UpdateGroupInput struct { _ struct{} `type:"structure"` @@ -6398,6 +7303,16 @@ type UpdateGroupInput struct { // The case-sensitive name of the group. GroupName *string `min:"1" type:"string"` + + // The structure containing configurations related to insights. + // + // * The InsightsEnabled boolean can be set to true to enable insights for + // the group or false to disable insights for the group. + // + // * The NotifcationsEnabled boolean can be set to true to enable insights + // notifications for the group. Notifications can only be enabled on a group + // with InsightsEnabled set to true. + InsightsConfiguration *InsightsConfiguration `type:"structure"` } // String returns the string representation @@ -6444,12 +7359,18 @@ func (s *UpdateGroupInput) SetGroupName(v string) *UpdateGroupInput { return s } +// SetInsightsConfiguration sets the InsightsConfiguration field's value. +func (s *UpdateGroupInput) SetInsightsConfiguration(v *InsightsConfiguration) *UpdateGroupInput { + s.InsightsConfiguration = v + return s +} + type UpdateGroupOutput struct { _ struct{} `type:"structure"` // The group that was updated. Contains the name of the group that was updated, - // the ARN of the group that was updated, and the updated filter expression - // assigned to the group. + // the ARN of the group that was updated, the updated filter expression, and + // the updated insight configuration assigned to the group. Group *Group `type:"structure"` } @@ -6576,6 +7497,14 @@ const ( EncryptionStatusActive = "ACTIVE" ) +// EncryptionStatus_Values returns all elements of the EncryptionStatus enum +func EncryptionStatus_Values() []string { + return []string{ + EncryptionStatusUpdating, + EncryptionStatusActive, + } +} + const ( // EncryptionTypeNone is a EncryptionType enum value EncryptionTypeNone = "NONE" @@ -6584,6 +7513,14 @@ const ( EncryptionTypeKms = "KMS" ) +// EncryptionType_Values returns all elements of the EncryptionType enum +func EncryptionType_Values() []string { + return []string{ + EncryptionTypeNone, + EncryptionTypeKms, + } +} + const ( // SamplingStrategyNamePartialScan is a SamplingStrategyName enum value SamplingStrategyNamePartialScan = "PartialScan" @@ -6592,6 +7529,14 @@ const ( SamplingStrategyNameFixedRate = "FixedRate" ) +// SamplingStrategyName_Values returns all elements of the SamplingStrategyName enum +func SamplingStrategyName_Values() []string { + return []string{ + SamplingStrategyNamePartialScan, + SamplingStrategyNameFixedRate, + } +} + const ( // TimeRangeTypeTraceId is a TimeRangeType enum value TimeRangeTypeTraceId = "TraceId" @@ -6599,3 +7544,11 @@ const ( // TimeRangeTypeEvent is a TimeRangeType enum value TimeRangeTypeEvent = "Event" ) + +// TimeRangeType_Values returns all elements of the TimeRangeType enum +func TimeRangeType_Values() []string { + return []string{ + TimeRangeTypeTraceId, + TimeRangeTypeEvent, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/xray/errors.go b/vendor/github.com/aws/aws-sdk-go/service/xray/errors.go index 459870ffc..7ee15afb6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/xray/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/xray/errors.go @@ -14,6 +14,13 @@ const ( // The request is missing required parameters or has invalid parameters. ErrCodeInvalidRequestException = "InvalidRequestException" + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The resource was not found. Verify that the name or Amazon Resource Name + // (ARN) of the resource is correct. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + // ErrCodeRuleLimitExceededException for service response error code // "RuleLimitExceededException". // @@ -25,10 +32,18 @@ const ( // // The request exceeds the maximum number of requests per second. ErrCodeThrottledException = "ThrottledException" + + // ErrCodeTooManyTagsException for service response error code + // "TooManyTagsException". + // + // You have exceeded the maximum number of tags you can apply to this resource. + ErrCodeTooManyTagsException = "TooManyTagsException" ) var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, "RuleLimitExceededException": newErrorRuleLimitExceededException, "ThrottledException": newErrorThrottledException, + "TooManyTagsException": newErrorTooManyTagsException, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/xray/service.go b/vendor/github.com/aws/aws-sdk-go/service/xray/service.go index 55a475393..134114cdf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/xray/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/xray/service.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" - v4 "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol" "github.com/aws/aws-sdk-go/private/protocol/restjson" ) diff --git a/vendor/github.com/beevik/etree/.travis.yml b/vendor/github.com/beevik/etree/.travis.yml deleted file mode 100644 index f4cb25d47..000000000 --- a/vendor/github.com/beevik/etree/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -sudo: false - -go: - - 1.11.x - - tip - -matrix: - allow_failures: - - go: tip - -script: - - go vet ./... - - go test -v ./... diff --git a/vendor/github.com/beevik/etree/CONTRIBUTORS b/vendor/github.com/beevik/etree/CONTRIBUTORS deleted file mode 100644 index 03211a85e..000000000 --- a/vendor/github.com/beevik/etree/CONTRIBUTORS +++ /dev/null @@ -1,10 +0,0 @@ -Brett Vickers (beevik) -Felix Geisendörfer (felixge) -Kamil Kisiel (kisielk) -Graham King (grahamking) -Matt Smith (ma314smith) -Michal Jemala (michaljemala) -Nicolas Piganeau (npiganeau) -Chris Brown (ccbrown) -Earncef Sequeira (earncef) -Gabriel de Labachelerie (wuzuf) diff --git a/vendor/github.com/beevik/etree/LICENSE b/vendor/github.com/beevik/etree/LICENSE deleted file mode 100644 index 26f1f7751..000000000 --- a/vendor/github.com/beevik/etree/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2015-2019 Brett Vickers. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/etree/README.md b/vendor/github.com/beevik/etree/README.md deleted file mode 100644 index 08ec26b0a..000000000 --- a/vendor/github.com/beevik/etree/README.md +++ /dev/null @@ -1,205 +0,0 @@ -[![Build Status](https://travis-ci.org/beevik/etree.svg?branch=master)](https://travis-ci.org/beevik/etree) -[![GoDoc](https://godoc.org/github.com/beevik/etree?status.svg)](https://godoc.org/github.com/beevik/etree) - -etree -===== - -The etree package is a lightweight, pure go package that expresses XML in -the form of an element tree. Its design was inspired by the Python -[ElementTree](http://docs.python.org/2/library/xml.etree.elementtree.html) -module. - -Some of the package's capabilities and features: - -* Represents XML documents as trees of elements for easy traversal. -* Imports, serializes, modifies or creates XML documents from scratch. -* Writes and reads XML to/from files, byte slices, strings and io interfaces. -* Performs simple or complex searches with lightweight XPath-like query APIs. -* Auto-indents XML using spaces or tabs for better readability. -* Implemented in pure go; depends only on standard go libraries. -* Built on top of the go [encoding/xml](http://golang.org/pkg/encoding/xml) - package. - -### Creating an XML document - -The following example creates an XML document from scratch using the etree -package and outputs its indented contents to stdout. -```go -doc := etree.NewDocument() -doc.CreateProcInst("xml", `version="1.0" encoding="UTF-8"`) -doc.CreateProcInst("xml-stylesheet", `type="text/xsl" href="style.xsl"`) - -people := doc.CreateElement("People") -people.CreateComment("These are all known people") - -jon := people.CreateElement("Person") -jon.CreateAttr("name", "Jon") - -sally := people.CreateElement("Person") -sally.CreateAttr("name", "Sally") - -doc.Indent(2) -doc.WriteTo(os.Stdout) -``` - -Output: -```xml - - - - - - - -``` - -### Reading an XML file - -Suppose you have a file on disk called `bookstore.xml` containing the -following data: - -```xml - - - - Everyday Italian - Giada De Laurentiis - 2005 - 30.00 - - - - Harry Potter - J K. Rowling - 2005 - 29.99 - - - - XQuery Kick Start - James McGovern - Per Bothner - Kurt Cagle - James Linn - Vaidyanathan Nagarajan - 2003 - 49.99 - - - - Learning XML - Erik T. Ray - 2003 - 39.95 - - - -``` - -This code reads the file's contents into an etree document. -```go -doc := etree.NewDocument() -if err := doc.ReadFromFile("bookstore.xml"); err != nil { - panic(err) -} -``` - -You can also read XML from a string, a byte slice, or an `io.Reader`. - -### Processing elements and attributes - -This example illustrates several ways to access elements and attributes using -etree selection queries. -```go -root := doc.SelectElement("bookstore") -fmt.Println("ROOT element:", root.Tag) - -for _, book := range root.SelectElements("book") { - fmt.Println("CHILD element:", book.Tag) - if title := book.SelectElement("title"); title != nil { - lang := title.SelectAttrValue("lang", "unknown") - fmt.Printf(" TITLE: %s (%s)\n", title.Text(), lang) - } - for _, attr := range book.Attr { - fmt.Printf(" ATTR: %s=%s\n", attr.Key, attr.Value) - } -} -``` -Output: -``` -ROOT element: bookstore -CHILD element: book - TITLE: Everyday Italian (en) - ATTR: category=COOKING -CHILD element: book - TITLE: Harry Potter (en) - ATTR: category=CHILDREN -CHILD element: book - TITLE: XQuery Kick Start (en) - ATTR: category=WEB -CHILD element: book - TITLE: Learning XML (en) - ATTR: category=WEB -``` - -### Path queries - -This example uses etree's path functions to select all book titles that fall -into the category of 'WEB'. The double-slash prefix in the path causes the -search for book elements to occur recursively; book elements may appear at any -level of the XML hierarchy. -```go -for _, t := range doc.FindElements("//book[@category='WEB']/title") { - fmt.Println("Title:", t.Text()) -} -``` - -Output: -``` -Title: XQuery Kick Start -Title: Learning XML -``` - -This example finds the first book element under the root bookstore element and -outputs the tag and text of each of its child elements. -```go -for _, e := range doc.FindElements("./bookstore/book[1]/*") { - fmt.Printf("%s: %s\n", e.Tag, e.Text()) -} -``` - -Output: -``` -title: Everyday Italian -author: Giada De Laurentiis -year: 2005 -price: 30.00 -``` - -This example finds all books with a price of 49.99 and outputs their titles. -```go -path := etree.MustCompilePath("./bookstore/book[p:price='49.99']/title") -for _, e := range doc.FindElementsPath(path) { - fmt.Println(e.Text()) -} -``` - -Output: -``` -XQuery Kick Start -``` - -Note that this example uses the FindElementsPath function, which takes as an -argument a pre-compiled path object. Use precompiled paths when you plan to -search with the same path more than once. - -### Other features - -These are just a few examples of the things the etree package can do. See the -[documentation](http://godoc.org/github.com/beevik/etree) for a complete -description of its capabilities. - -### Contributing - -This project accepts contributions. Just fork the repo and submit a pull -request! diff --git a/vendor/github.com/beevik/etree/RELEASE_NOTES.md b/vendor/github.com/beevik/etree/RELEASE_NOTES.md deleted file mode 100644 index ee59d7abf..000000000 --- a/vendor/github.com/beevik/etree/RELEASE_NOTES.md +++ /dev/null @@ -1,109 +0,0 @@ -Release v1.1.0 -============== - -**New Features** - -* New attribute helpers. - * Added the `Element.SortAttrs` method, which lexicographically sorts an - element's attributes by key. -* New `ReadSettings` properties. - * Added `Entity` for the support of custom entity maps. -* New `WriteSettings` properties. - * Added `UseCRLF` to allow the output of CR-LF newlines instead of the - default LF newlines. This is useful on Windows systems. -* Additional support for text and CDATA sections. - * The `Element.Text` method now returns the concatenation of all consecutive - character data tokens immediately following an element's opening tag. - * Added `Element.SetCData` to replace the character data immediately - following an element's opening tag with a CDATA section. - * Added `Element.CreateCData` to create and add a CDATA section child - `CharData` token to an element. - * Added `Element.CreateText` to create and add a child text `CharData` token - to an element. - * Added `NewCData` to create a parentless CDATA section `CharData` token. - * Added `NewText` to create a parentless text `CharData` - token. - * Added `CharData.IsCData` to detect if the token contains a CDATA section. - * Added `CharData.IsWhitespace` to detect if the token contains whitespace - inserted by one of the document Indent functions. - * Modified `Element.SetText` so that it replaces a run of consecutive - character data tokens following the element's opening tag (instead of just - the first one). -* New "tail text" support. - * Added the `Element.Tail` method, which returns the text immediately - following an element's closing tag. - * Added the `Element.SetTail` method, which modifies the text immediately - following an element's closing tag. -* New element child insertion and removal methods. - * Added the `Element.InsertChildAt` method, which inserts a new child token - before the specified child token index. - * Added the `Element.RemoveChildAt` method, which removes the child token at - the specified child token index. -* New element and attribute queries. - * Added the `Element.Index` method, which returns the element's index within - its parent element's child token list. - * Added the `Element.NamespaceURI` method to return the namespace URI - associated with an element. - * Added the `Attr.NamespaceURI` method to return the namespace URI - associated with an element. - * Added the `Attr.Element` method to return the element that an attribute - belongs to. -* New Path filter functions. - * Added `[local-name()='val']` to keep elements whose unprefixed tag matches - the desired value. - * Added `[name()='val']` to keep elements whose full tag matches the desired - value. - * Added `[namespace-prefix()='val']` to keep elements whose namespace prefix - matches the desired value. - * Added `[namespace-uri()='val']` to keep elements whose namespace URI - matches the desired value. - -**Bug Fixes** - -* A default XML `CharSetReader` is now used to prevent failed parsing of XML - documents using certain encodings. - ([Issue](https://github.com/beevik/etree/issues/53)). -* All characters are now properly escaped according to XML parsing rules. - ([Issue](https://github.com/beevik/etree/issues/55)). -* The `Document.Indent` and `Document.IndentTabs` functions no longer insert - empty string `CharData` tokens. - -**Deprecated** - -* `Element` - * The `InsertChild` method is deprecated. Use `InsertChildAt` instead. - * The `CreateCharData` method is deprecated. Use `CreateText` instead. -* `CharData` - * The `NewCharData` method is deprecated. Use `NewText` instead. - - -Release v1.0.1 -============== - -**Changes** - -* Added support for absolute etree Path queries. An absolute path begins with - `/` or `//` and begins its search from the element's document root. -* Added [`GetPath`](https://godoc.org/github.com/beevik/etree#Element.GetPath) - and [`GetRelativePath`](https://godoc.org/github.com/beevik/etree#Element.GetRelativePath) - functions to the [`Element`](https://godoc.org/github.com/beevik/etree#Element) - type. - -**Breaking changes** - -* A path starting with `//` is now interpreted as an absolute path. - Previously, it was interpreted as a relative path starting from the element - whose - [`FindElement`](https://godoc.org/github.com/beevik/etree#Element.FindElement) - method was called. To remain compatible with this release, all paths - prefixed with `//` should be prefixed with `.//` when called from any - element other than the document's root. -* [**edit 2/1/2019**]: Minor releases should not contain breaking changes. - Even though this breaking change was very minor, it was a mistake to include - it in this minor release. In the future, all breaking changes will be - limited to major releases (e.g., version 2.0.0). - -Release v1.0.0 -============== - -Initial release. diff --git a/vendor/github.com/beevik/etree/etree.go b/vendor/github.com/beevik/etree/etree.go deleted file mode 100644 index 9e24f9012..000000000 --- a/vendor/github.com/beevik/etree/etree.go +++ /dev/null @@ -1,1453 +0,0 @@ -// Copyright 2015-2019 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package etree provides XML services through an Element Tree -// abstraction. -package etree - -import ( - "bufio" - "bytes" - "encoding/xml" - "errors" - "io" - "os" - "sort" - "strings" -) - -const ( - // NoIndent is used with Indent to disable all indenting. - NoIndent = -1 -) - -// ErrXML is returned when XML parsing fails due to incorrect formatting. -var ErrXML = errors.New("etree: invalid XML format") - -// ReadSettings allow for changing the default behavior of the ReadFrom* -// methods. -type ReadSettings struct { - // CharsetReader to be passed to standard xml.Decoder. Default: nil. - CharsetReader func(charset string, input io.Reader) (io.Reader, error) - - // Permissive allows input containing common mistakes such as missing tags - // or attribute values. Default: false. - Permissive bool - - // Entity to be passed to standard xml.Decoder. Default: nil. - Entity map[string]string -} - -// newReadSettings creates a default ReadSettings record. -func newReadSettings() ReadSettings { - return ReadSettings{ - CharsetReader: func(label string, input io.Reader) (io.Reader, error) { - return input, nil - }, - Permissive: false, - } -} - -// WriteSettings allow for changing the serialization behavior of the WriteTo* -// methods. -type WriteSettings struct { - // CanonicalEndTags forces the production of XML end tags, even for - // elements that have no child elements. Default: false. - CanonicalEndTags bool - - // CanonicalText forces the production of XML character references for - // text data characters &, <, and >. If false, XML character references - // are also produced for " and '. Default: false. - CanonicalText bool - - // CanonicalAttrVal forces the production of XML character references for - // attribute value characters &, < and ". If false, XML character - // references are also produced for > and '. Default: false. - CanonicalAttrVal bool - - // When outputting indented XML, use a carriage return and linefeed - // ("\r\n") as a new-line delimiter instead of just a linefeed ("\n"). - // This is useful on Windows-based systems. - UseCRLF bool -} - -// newWriteSettings creates a default WriteSettings record. -func newWriteSettings() WriteSettings { - return WriteSettings{ - CanonicalEndTags: false, - CanonicalText: false, - CanonicalAttrVal: false, - UseCRLF: false, - } -} - -// A Token is an empty interface that represents an Element, CharData, -// Comment, Directive, or ProcInst. -type Token interface { - Parent() *Element - Index() int - dup(parent *Element) Token - setParent(parent *Element) - setIndex(index int) - writeTo(w *bufio.Writer, s *WriteSettings) -} - -// A Document is a container holding a complete XML hierarchy. Its embedded -// element contains zero or more children, one of which is usually the root -// element. The embedded element may include other children such as -// processing instructions or BOM CharData tokens. -type Document struct { - Element - ReadSettings ReadSettings - WriteSettings WriteSettings -} - -// An Element represents an XML element, its attributes, and its child tokens. -type Element struct { - Space, Tag string // namespace prefix and tag - Attr []Attr // key-value attribute pairs - Child []Token // child tokens (elements, comments, etc.) - parent *Element // parent element - index int // token index in parent's children -} - -// An Attr represents a key-value attribute of an XML element. -type Attr struct { - Space, Key string // The attribute's namespace prefix and key - Value string // The attribute value string - element *Element // element containing the attribute -} - -// charDataFlags are used with CharData tokens to store additional settings. -type charDataFlags uint8 - -const ( - // The CharData was created by an indent function as whitespace. - whitespaceFlag charDataFlags = 1 << iota - - // The CharData contains a CDATA section. - cdataFlag -) - -// CharData can be used to represent character data or a CDATA section within -// an XML document. -type CharData struct { - Data string - parent *Element - index int - flags charDataFlags -} - -// A Comment represents an XML comment. -type Comment struct { - Data string - parent *Element - index int -} - -// A Directive represents an XML directive. -type Directive struct { - Data string - parent *Element - index int -} - -// A ProcInst represents an XML processing instruction. -type ProcInst struct { - Target string - Inst string - parent *Element - index int -} - -// NewDocument creates an XML document without a root element. -func NewDocument() *Document { - return &Document{ - Element{Child: make([]Token, 0)}, - newReadSettings(), - newWriteSettings(), - } -} - -// Copy returns a recursive, deep copy of the document. -func (d *Document) Copy() *Document { - return &Document{*(d.dup(nil).(*Element)), d.ReadSettings, d.WriteSettings} -} - -// Root returns the root element of the document, or nil if there is no root -// element. -func (d *Document) Root() *Element { - for _, t := range d.Child { - if c, ok := t.(*Element); ok { - return c - } - } - return nil -} - -// SetRoot replaces the document's root element with e. If the document -// already has a root when this function is called, then the document's -// original root is unbound first. If the element e is bound to another -// document (or to another element within a document), then it is unbound -// first. -func (d *Document) SetRoot(e *Element) { - if e.parent != nil { - e.parent.RemoveChild(e) - } - - p := &d.Element - e.setParent(p) - - // If there is already a root element, replace it. - for i, t := range p.Child { - if _, ok := t.(*Element); ok { - t.setParent(nil) - t.setIndex(-1) - p.Child[i] = e - e.setIndex(i) - return - } - } - - // No existing root element, so add it. - p.addChild(e) -} - -// ReadFrom reads XML from the reader r into the document d. It returns the -// number of bytes read and any error encountered. -func (d *Document) ReadFrom(r io.Reader) (n int64, err error) { - return d.Element.readFrom(r, d.ReadSettings) -} - -// ReadFromFile reads XML from the string s into the document d. -func (d *Document) ReadFromFile(filename string) error { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - _, err = d.ReadFrom(f) - return err -} - -// ReadFromBytes reads XML from the byte slice b into the document d. -func (d *Document) ReadFromBytes(b []byte) error { - _, err := d.ReadFrom(bytes.NewReader(b)) - return err -} - -// ReadFromString reads XML from the string s into the document d. -func (d *Document) ReadFromString(s string) error { - _, err := d.ReadFrom(strings.NewReader(s)) - return err -} - -// WriteTo serializes an XML document into the writer w. It -// returns the number of bytes written and any error encountered. -func (d *Document) WriteTo(w io.Writer) (n int64, err error) { - cw := newCountWriter(w) - b := bufio.NewWriter(cw) - for _, c := range d.Child { - c.writeTo(b, &d.WriteSettings) - } - err, n = b.Flush(), cw.bytes - return -} - -// WriteToFile serializes an XML document into the file named -// filename. -func (d *Document) WriteToFile(filename string) error { - f, err := os.Create(filename) - if err != nil { - return err - } - defer f.Close() - _, err = d.WriteTo(f) - return err -} - -// WriteToBytes serializes the XML document into a slice of -// bytes. -func (d *Document) WriteToBytes() (b []byte, err error) { - var buf bytes.Buffer - if _, err = d.WriteTo(&buf); err != nil { - return - } - return buf.Bytes(), nil -} - -// WriteToString serializes the XML document into a string. -func (d *Document) WriteToString() (s string, err error) { - var b []byte - if b, err = d.WriteToBytes(); err != nil { - return - } - return string(b), nil -} - -type indentFunc func(depth int) string - -// Indent modifies the document's element tree by inserting character data -// tokens containing newlines and indentation. The amount of indentation per -// depth level is given as spaces. Pass etree.NoIndent for spaces if you want -// no indentation at all. -func (d *Document) Indent(spaces int) { - var indent indentFunc - switch { - case spaces < 0: - indent = func(depth int) string { return "" } - case d.WriteSettings.UseCRLF == true: - indent = func(depth int) string { return indentCRLF(depth*spaces, indentSpaces) } - default: - indent = func(depth int) string { return indentLF(depth*spaces, indentSpaces) } - } - d.Element.indent(0, indent) -} - -// IndentTabs modifies the document's element tree by inserting CharData -// tokens containing newlines and tabs for indentation. One tab is used per -// indentation level. -func (d *Document) IndentTabs() { - var indent indentFunc - switch d.WriteSettings.UseCRLF { - case true: - indent = func(depth int) string { return indentCRLF(depth, indentTabs) } - default: - indent = func(depth int) string { return indentLF(depth, indentTabs) } - } - d.Element.indent(0, indent) -} - -// NewElement creates an unparented element with the specified tag. The tag -// may be prefixed by a namespace prefix and a colon. -func NewElement(tag string) *Element { - space, stag := spaceDecompose(tag) - return newElement(space, stag, nil) -} - -// newElement is a helper function that creates an element and binds it to -// a parent element if possible. -func newElement(space, tag string, parent *Element) *Element { - e := &Element{ - Space: space, - Tag: tag, - Attr: make([]Attr, 0), - Child: make([]Token, 0), - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(e) - } - return e -} - -// Copy creates a recursive, deep copy of the element and all its attributes -// and children. The returned element has no parent but can be parented to a -// another element using AddElement, or to a document using SetRoot. -func (e *Element) Copy() *Element { - return e.dup(nil).(*Element) -} - -// FullTag returns the element e's complete tag, including namespace prefix if -// present. -func (e *Element) FullTag() string { - if e.Space == "" { - return e.Tag - } - return e.Space + ":" + e.Tag -} - -// NamespaceURI returns the XML namespace URI associated with the element. If -// the element is part of the XML default namespace, NamespaceURI returns the -// empty string. -func (e *Element) NamespaceURI() string { - if e.Space == "" { - return e.findDefaultNamespaceURI() - } - return e.findLocalNamespaceURI(e.Space) -} - -// findLocalNamespaceURI finds the namespace URI corresponding to the -// requested prefix. -func (e *Element) findLocalNamespaceURI(prefix string) string { - for _, a := range e.Attr { - if a.Space == "xmlns" && a.Key == prefix { - return a.Value - } - } - - if e.parent == nil { - return "" - } - - return e.parent.findLocalNamespaceURI(prefix) -} - -// findDefaultNamespaceURI finds the default namespace URI of the element. -func (e *Element) findDefaultNamespaceURI() string { - for _, a := range e.Attr { - if a.Space == "" && a.Key == "xmlns" { - return a.Value - } - } - - if e.parent == nil { - return "" - } - - return e.parent.findDefaultNamespaceURI() -} - -// hasText returns true if the element has character data immediately -// folllowing the element's opening tag. -func (e *Element) hasText() bool { - if len(e.Child) == 0 { - return false - } - _, ok := e.Child[0].(*CharData) - return ok -} - -// namespacePrefix returns the namespace prefix associated with the element. -func (e *Element) namespacePrefix() string { - return e.Space -} - -// name returns the tag associated with the element. -func (e *Element) name() string { - return e.Tag -} - -// Text returns all character data immediately following the element's opening -// tag. -func (e *Element) Text() string { - if len(e.Child) == 0 { - return "" - } - - text := "" - for _, ch := range e.Child { - if cd, ok := ch.(*CharData); ok { - if text == "" { - text = cd.Data - } else { - text = text + cd.Data - } - } else { - break - } - } - return text -} - -// SetText replaces all character data immediately following an element's -// opening tag with the requested string. -func (e *Element) SetText(text string) { - e.replaceText(0, text, 0) -} - -// SetCData replaces all character data immediately following an element's -// opening tag with a CDATA section. -func (e *Element) SetCData(text string) { - e.replaceText(0, text, cdataFlag) -} - -// Tail returns all character data immediately following the element's end -// tag. -func (e *Element) Tail() string { - if e.Parent() == nil { - return "" - } - - p := e.Parent() - i := e.Index() - - text := "" - for _, ch := range p.Child[i+1:] { - if cd, ok := ch.(*CharData); ok { - if text == "" { - text = cd.Data - } else { - text = text + cd.Data - } - } else { - break - } - } - return text -} - -// SetTail replaces all character data immediately following the element's end -// tag with the requested string. -func (e *Element) SetTail(text string) { - if e.Parent() == nil { - return - } - - p := e.Parent() - p.replaceText(e.Index()+1, text, 0) -} - -// replaceText is a helper function that replaces a series of chardata tokens -// starting at index i with the requested text. -func (e *Element) replaceText(i int, text string, flags charDataFlags) { - end := e.findTermCharDataIndex(i) - - switch { - case end == i: - if text != "" { - // insert a new chardata token at index i - cd := newCharData(text, flags, nil) - e.InsertChildAt(i, cd) - } - - case end == i+1: - if text == "" { - // remove the chardata token at index i - e.RemoveChildAt(i) - } else { - // replace the first and only character token at index i - cd := e.Child[i].(*CharData) - cd.Data, cd.flags = text, flags - } - - default: - if text == "" { - // remove all chardata tokens starting from index i - copy(e.Child[i:], e.Child[end:]) - removed := end - i - e.Child = e.Child[:len(e.Child)-removed] - for j := i; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } - } else { - // replace the first chardata token at index i and remove all - // subsequent chardata tokens - cd := e.Child[i].(*CharData) - cd.Data, cd.flags = text, flags - copy(e.Child[i+1:], e.Child[end:]) - removed := end - (i + 1) - e.Child = e.Child[:len(e.Child)-removed] - for j := i + 1; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } - } - } -} - -// findTermCharDataIndex finds the index of the first child token that isn't -// a CharData token. It starts from the requested start index. -func (e *Element) findTermCharDataIndex(start int) int { - for i := start; i < len(e.Child); i++ { - if _, ok := e.Child[i].(*CharData); !ok { - return i - } - } - return len(e.Child) -} - -// CreateElement creates an element with the specified tag and adds it as the -// last child element of the element e. The tag may be prefixed by a namespace -// prefix and a colon. -func (e *Element) CreateElement(tag string) *Element { - space, stag := spaceDecompose(tag) - return newElement(space, stag, e) -} - -// AddChild adds the token t as the last child of element e. If token t was -// already the child of another element, it is first removed from its current -// parent element. -func (e *Element) AddChild(t Token) { - if t.Parent() != nil { - t.Parent().RemoveChild(t) - } - - t.setParent(e) - e.addChild(t) -} - -// InsertChild inserts the token t before e's existing child token ex. If ex -// is nil or ex is not a child of e, then t is added to the end of e's child -// token list. If token t was already the child of another element, it is -// first removed from its current parent element. -// -// Deprecated: InsertChild is deprecated. Use InsertChildAt instead. -func (e *Element) InsertChild(ex Token, t Token) { - if ex == nil || ex.Parent() != e { - e.AddChild(t) - return - } - - if t.Parent() != nil { - t.Parent().RemoveChild(t) - } - - t.setParent(e) - - i := ex.Index() - e.Child = append(e.Child, nil) - copy(e.Child[i+1:], e.Child[i:]) - e.Child[i] = t - - for j := i; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } -} - -// InsertChildAt inserts the token t into the element e's list of child tokens -// just before the requested index. If the index is greater than or equal to -// the length of the list of child tokens, the token t is added to the end of -// the list. -func (e *Element) InsertChildAt(index int, t Token) { - if index >= len(e.Child) { - e.AddChild(t) - return - } - - if t.Parent() != nil { - if t.Parent() == e && t.Index() > index { - index-- - } - t.Parent().RemoveChild(t) - } - - t.setParent(e) - - e.Child = append(e.Child, nil) - copy(e.Child[index+1:], e.Child[index:]) - e.Child[index] = t - - for j := index; j < len(e.Child); j++ { - e.Child[j].setIndex(j) - } -} - -// RemoveChild attempts to remove the token t from element e's list of -// children. If the token t is a child of e, then it is returned. Otherwise, -// nil is returned. -func (e *Element) RemoveChild(t Token) Token { - if t.Parent() != e { - return nil - } - return e.RemoveChildAt(t.Index()) -} - -// RemoveChildAt removes the index-th child token from the element e. The -// removed child token is returned. If the index is out of bounds, no child is -// removed and nil is returned. -func (e *Element) RemoveChildAt(index int) Token { - if index >= len(e.Child) { - return nil - } - - t := e.Child[index] - for j := index + 1; j < len(e.Child); j++ { - e.Child[j].setIndex(j - 1) - } - e.Child = append(e.Child[:index], e.Child[index+1:]...) - t.setIndex(-1) - t.setParent(nil) - return t -} - -// ReadFrom reads XML from the reader r and stores the result as a new child -// of element e. -func (e *Element) readFrom(ri io.Reader, settings ReadSettings) (n int64, err error) { - r := newCountReader(ri) - dec := xml.NewDecoder(r) - dec.CharsetReader = settings.CharsetReader - dec.Strict = !settings.Permissive - dec.Entity = settings.Entity - var stack stack - stack.push(e) - for { - t, err := dec.RawToken() - switch { - case err == io.EOF: - return r.bytes, nil - case err != nil: - return r.bytes, err - case stack.empty(): - return r.bytes, ErrXML - } - - top := stack.peek().(*Element) - - switch t := t.(type) { - case xml.StartElement: - e := newElement(t.Name.Space, t.Name.Local, top) - for _, a := range t.Attr { - e.createAttr(a.Name.Space, a.Name.Local, a.Value, e) - } - stack.push(e) - case xml.EndElement: - stack.pop() - case xml.CharData: - data := string(t) - var flags charDataFlags - if isWhitespace(data) { - flags = whitespaceFlag - } - newCharData(data, flags, top) - case xml.Comment: - newComment(string(t), top) - case xml.Directive: - newDirective(string(t), top) - case xml.ProcInst: - newProcInst(t.Target, string(t.Inst), top) - } - } -} - -// SelectAttr finds an element attribute matching the requested key and -// returns it if found. Returns nil if no matching attribute is found. The key -// may be prefixed by a namespace prefix and a colon. -func (e *Element) SelectAttr(key string) *Attr { - space, skey := spaceDecompose(key) - for i, a := range e.Attr { - if spaceMatch(space, a.Space) && skey == a.Key { - return &e.Attr[i] - } - } - return nil -} - -// SelectAttrValue finds an element attribute matching the requested key and -// returns its value if found. The key may be prefixed by a namespace prefix -// and a colon. If the key is not found, the dflt value is returned instead. -func (e *Element) SelectAttrValue(key, dflt string) string { - space, skey := spaceDecompose(key) - for _, a := range e.Attr { - if spaceMatch(space, a.Space) && skey == a.Key { - return a.Value - } - } - return dflt -} - -// ChildElements returns all elements that are children of element e. -func (e *Element) ChildElements() []*Element { - var elements []*Element - for _, t := range e.Child { - if c, ok := t.(*Element); ok { - elements = append(elements, c) - } - } - return elements -} - -// SelectElement returns the first child element with the given tag. The tag -// may be prefixed by a namespace prefix and a colon. Returns nil if no -// element with a matching tag was found. -func (e *Element) SelectElement(tag string) *Element { - space, stag := spaceDecompose(tag) - for _, t := range e.Child { - if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { - return c - } - } - return nil -} - -// SelectElements returns a slice of all child elements with the given tag. -// The tag may be prefixed by a namespace prefix and a colon. -func (e *Element) SelectElements(tag string) []*Element { - space, stag := spaceDecompose(tag) - var elements []*Element - for _, t := range e.Child { - if c, ok := t.(*Element); ok && spaceMatch(space, c.Space) && stag == c.Tag { - elements = append(elements, c) - } - } - return elements -} - -// FindElement returns the first element matched by the XPath-like path -// string. Returns nil if no element is found using the path. Panics if an -// invalid path string is supplied. -func (e *Element) FindElement(path string) *Element { - return e.FindElementPath(MustCompilePath(path)) -} - -// FindElementPath returns the first element matched by the XPath-like path -// string. Returns nil if no element is found using the path. -func (e *Element) FindElementPath(path Path) *Element { - p := newPather() - elements := p.traverse(e, path) - switch { - case len(elements) > 0: - return elements[0] - default: - return nil - } -} - -// FindElements returns a slice of elements matched by the XPath-like path -// string. Panics if an invalid path string is supplied. -func (e *Element) FindElements(path string) []*Element { - return e.FindElementsPath(MustCompilePath(path)) -} - -// FindElementsPath returns a slice of elements matched by the Path object. -func (e *Element) FindElementsPath(path Path) []*Element { - p := newPather() - return p.traverse(e, path) -} - -// GetPath returns the absolute path of the element. -func (e *Element) GetPath() string { - path := []string{} - for seg := e; seg != nil; seg = seg.Parent() { - if seg.Tag != "" { - path = append(path, seg.Tag) - } - } - - // Reverse the path. - for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { - path[i], path[j] = path[j], path[i] - } - - return "/" + strings.Join(path, "/") -} - -// GetRelativePath returns the path of the element relative to the source -// element. If the two elements are not part of the same element tree, then -// GetRelativePath returns the empty string. -func (e *Element) GetRelativePath(source *Element) string { - var path []*Element - - if source == nil { - return "" - } - - // Build a reverse path from the element toward the root. Stop if the - // source element is encountered. - var seg *Element - for seg = e; seg != nil && seg != source; seg = seg.Parent() { - path = append(path, seg) - } - - // If we found the source element, reverse the path and compose the - // string. - if seg == source { - if len(path) == 0 { - return "." - } - parts := []string{} - for i := len(path) - 1; i >= 0; i-- { - parts = append(parts, path[i].Tag) - } - return "./" + strings.Join(parts, "/") - } - - // The source wasn't encountered, so climb from the source element toward - // the root of the tree until an element in the reversed path is - // encountered. - - findPathIndex := func(e *Element, path []*Element) int { - for i, ee := range path { - if e == ee { - return i - } - } - return -1 - } - - climb := 0 - for seg = source; seg != nil; seg = seg.Parent() { - i := findPathIndex(seg, path) - if i >= 0 { - path = path[:i] // truncate at found segment - break - } - climb++ - } - - // No element in the reversed path was encountered, so the two elements - // must not be part of the same tree. - if seg == nil { - return "" - } - - // Reverse the (possibly truncated) path and prepend ".." segments to - // climb. - parts := []string{} - for i := 0; i < climb; i++ { - parts = append(parts, "..") - } - for i := len(path) - 1; i >= 0; i-- { - parts = append(parts, path[i].Tag) - } - return strings.Join(parts, "/") -} - -// indent recursively inserts proper indentation between an -// XML element's child tokens. -func (e *Element) indent(depth int, indent indentFunc) { - e.stripIndent() - n := len(e.Child) - if n == 0 { - return - } - - oldChild := e.Child - e.Child = make([]Token, 0, n*2+1) - isCharData, firstNonCharData := false, true - for _, c := range oldChild { - // Insert NL+indent before child if it's not character data. - // Exceptions: when it's the first non-character-data child, or when - // the child is at root depth. - _, isCharData = c.(*CharData) - if !isCharData { - if !firstNonCharData || depth > 0 { - s := indent(depth) - if s != "" { - newCharData(s, whitespaceFlag, e) - } - } - firstNonCharData = false - } - - e.addChild(c) - - // Recursively process child elements. - if ce, ok := c.(*Element); ok { - ce.indent(depth+1, indent) - } - } - - // Insert NL+indent before the last child. - if !isCharData { - if !firstNonCharData || depth > 0 { - s := indent(depth - 1) - if s != "" { - newCharData(s, whitespaceFlag, e) - } - } - } -} - -// stripIndent removes any previously inserted indentation. -func (e *Element) stripIndent() { - // Count the number of non-indent child tokens - n := len(e.Child) - for _, c := range e.Child { - if cd, ok := c.(*CharData); ok && cd.IsWhitespace() { - n-- - } - } - if n == len(e.Child) { - return - } - - // Strip out indent CharData - newChild := make([]Token, n) - j := 0 - for _, c := range e.Child { - if cd, ok := c.(*CharData); ok && cd.IsWhitespace() { - continue - } - newChild[j] = c - newChild[j].setIndex(j) - j++ - } - e.Child = newChild -} - -// dup duplicates the element. -func (e *Element) dup(parent *Element) Token { - ne := &Element{ - Space: e.Space, - Tag: e.Tag, - Attr: make([]Attr, len(e.Attr)), - Child: make([]Token, len(e.Child)), - parent: parent, - index: e.index, - } - for i, t := range e.Child { - ne.Child[i] = t.dup(ne) - } - for i, a := range e.Attr { - ne.Attr[i] = a - } - return ne -} - -// Parent returns the element token's parent element, or nil if it has no -// parent. -func (e *Element) Parent() *Element { - return e.parent -} - -// Index returns the index of this element within its parent element's -// list of child tokens. If this element has no parent element, the index -// is -1. -func (e *Element) Index() int { - return e.index -} - -// setParent replaces the element token's parent. -func (e *Element) setParent(parent *Element) { - e.parent = parent -} - -// setIndex sets the element token's index within its parent's Child slice. -func (e *Element) setIndex(index int) { - e.index = index -} - -// writeTo serializes the element to the writer w. -func (e *Element) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteByte('<') - w.WriteString(e.FullTag()) - for _, a := range e.Attr { - w.WriteByte(' ') - a.writeTo(w, s) - } - if len(e.Child) > 0 { - w.WriteString(">") - for _, c := range e.Child { - c.writeTo(w, s) - } - w.Write([]byte{'<', '/'}) - w.WriteString(e.FullTag()) - w.WriteByte('>') - } else { - if s.CanonicalEndTags { - w.Write([]byte{'>', '<', '/'}) - w.WriteString(e.FullTag()) - w.WriteByte('>') - } else { - w.Write([]byte{'/', '>'}) - } - } -} - -// addChild adds a child token to the element e. -func (e *Element) addChild(t Token) { - t.setIndex(len(e.Child)) - e.Child = append(e.Child, t) -} - -// CreateAttr creates an attribute and adds it to element e. The key may be -// prefixed by a namespace prefix and a colon. If an attribute with the key -// already exists, its value is replaced. -func (e *Element) CreateAttr(key, value string) *Attr { - space, skey := spaceDecompose(key) - return e.createAttr(space, skey, value, e) -} - -// createAttr is a helper function that creates attributes. -func (e *Element) createAttr(space, key, value string, parent *Element) *Attr { - for i, a := range e.Attr { - if space == a.Space && key == a.Key { - e.Attr[i].Value = value - return &e.Attr[i] - } - } - a := Attr{ - Space: space, - Key: key, - Value: value, - element: parent, - } - e.Attr = append(e.Attr, a) - return &e.Attr[len(e.Attr)-1] -} - -// RemoveAttr removes and returns a copy of the first attribute of the element -// whose key matches the given key. The key may be prefixed by a namespace -// prefix and a colon. If a matching attribute does not exist, nil is -// returned. -func (e *Element) RemoveAttr(key string) *Attr { - space, skey := spaceDecompose(key) - for i, a := range e.Attr { - if space == a.Space && skey == a.Key { - e.Attr = append(e.Attr[0:i], e.Attr[i+1:]...) - return &Attr{ - Space: a.Space, - Key: a.Key, - Value: a.Value, - element: nil, - } - } - } - return nil -} - -// SortAttrs sorts the element's attributes lexicographically by key. -func (e *Element) SortAttrs() { - sort.Sort(byAttr(e.Attr)) -} - -type byAttr []Attr - -func (a byAttr) Len() int { - return len(a) -} - -func (a byAttr) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a byAttr) Less(i, j int) bool { - sp := strings.Compare(a[i].Space, a[j].Space) - if sp == 0 { - return strings.Compare(a[i].Key, a[j].Key) < 0 - } - return sp < 0 -} - -// FullKey returns the attribute a's complete key, including namespace prefix -// if present. -func (a *Attr) FullKey() string { - if a.Space == "" { - return a.Key - } - return a.Space + ":" + a.Key -} - -// Element returns the element containing the attribute. -func (a *Attr) Element() *Element { - return a.element -} - -// NamespaceURI returns the XML namespace URI associated with the attribute. -// If the element is part of the XML default namespace, NamespaceURI returns -// the empty string. -func (a *Attr) NamespaceURI() string { - return a.element.NamespaceURI() -} - -// writeTo serializes the attribute to the writer. -func (a *Attr) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString(a.FullKey()) - w.WriteString(`="`) - var m escapeMode - if s.CanonicalAttrVal { - m = escapeCanonicalAttr - } else { - m = escapeNormal - } - escapeString(w, a.Value, m) - w.WriteByte('"') -} - -// NewText creates a parentless CharData token containing character data. -func NewText(text string) *CharData { - return newCharData(text, 0, nil) -} - -// NewCData creates a parentless XML character CDATA section. -func NewCData(data string) *CharData { - return newCharData(data, cdataFlag, nil) -} - -// NewCharData creates a parentless CharData token containing character data. -// -// Deprecated: NewCharData is deprecated. Instead, use NewText, which does the -// same thing. -func NewCharData(data string) *CharData { - return newCharData(data, 0, nil) -} - -// newCharData creates a character data token and binds it to a parent -// element. If parent is nil, the CharData token remains unbound. -func newCharData(data string, flags charDataFlags, parent *Element) *CharData { - c := &CharData{ - Data: data, - parent: parent, - index: -1, - flags: flags, - } - if parent != nil { - parent.addChild(c) - } - return c -} - -// CreateText creates a CharData token containing character data and adds it -// as a child of element e. -func (e *Element) CreateText(text string) *CharData { - return newCharData(text, 0, e) -} - -// CreateCData creates a CharData token containing a CDATA section and adds it -// as a child of element e. -func (e *Element) CreateCData(data string) *CharData { - return newCharData(data, cdataFlag, e) -} - -// CreateCharData creates a CharData token containing character data and adds -// it as a child of element e. -// -// Deprecated: CreateCharData is deprecated. Instead, use CreateText, which -// does the same thing. -func (e *Element) CreateCharData(data string) *CharData { - return newCharData(data, 0, e) -} - -// dup duplicates the character data. -func (c *CharData) dup(parent *Element) Token { - return &CharData{ - Data: c.Data, - flags: c.flags, - parent: parent, - index: c.index, - } -} - -// IsCData returns true if the character data token is to be encoded as a -// CDATA section. -func (c *CharData) IsCData() bool { - return (c.flags & cdataFlag) != 0 -} - -// IsWhitespace returns true if the character data token was created by one of -// the document Indent methods to contain only whitespace. -func (c *CharData) IsWhitespace() bool { - return (c.flags & whitespaceFlag) != 0 -} - -// Parent returns the character data token's parent element, or nil if it has -// no parent. -func (c *CharData) Parent() *Element { - return c.parent -} - -// Index returns the index of this CharData token within its parent element's -// list of child tokens. If this CharData token has no parent element, the -// index is -1. -func (c *CharData) Index() int { - return c.index -} - -// setParent replaces the character data token's parent. -func (c *CharData) setParent(parent *Element) { - c.parent = parent -} - -// setIndex sets the CharData token's index within its parent element's Child -// slice. -func (c *CharData) setIndex(index int) { - c.index = index -} - -// writeTo serializes character data to the writer. -func (c *CharData) writeTo(w *bufio.Writer, s *WriteSettings) { - if c.IsCData() { - w.WriteString(``) - } else { - var m escapeMode - if s.CanonicalText { - m = escapeCanonicalText - } else { - m = escapeNormal - } - escapeString(w, c.Data, m) - } -} - -// NewComment creates a parentless XML comment. -func NewComment(comment string) *Comment { - return newComment(comment, nil) -} - -// NewComment creates an XML comment and binds it to a parent element. If -// parent is nil, the Comment remains unbound. -func newComment(comment string, parent *Element) *Comment { - c := &Comment{ - Data: comment, - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(c) - } - return c -} - -// CreateComment creates an XML comment and adds it as a child of element e. -func (e *Element) CreateComment(comment string) *Comment { - return newComment(comment, e) -} - -// dup duplicates the comment. -func (c *Comment) dup(parent *Element) Token { - return &Comment{ - Data: c.Data, - parent: parent, - index: c.index, - } -} - -// Parent returns comment token's parent element, or nil if it has no parent. -func (c *Comment) Parent() *Element { - return c.parent -} - -// Index returns the index of this Comment token within its parent element's -// list of child tokens. If this Comment token has no parent element, the -// index is -1. -func (c *Comment) Index() int { - return c.index -} - -// setParent replaces the comment token's parent. -func (c *Comment) setParent(parent *Element) { - c.parent = parent -} - -// setIndex sets the Comment token's index within its parent element's Child -// slice. -func (c *Comment) setIndex(index int) { - c.index = index -} - -// writeTo serialies the comment to the writer. -func (c *Comment) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString("") -} - -// NewDirective creates a parentless XML directive. -func NewDirective(data string) *Directive { - return newDirective(data, nil) -} - -// newDirective creates an XML directive and binds it to a parent element. If -// parent is nil, the Directive remains unbound. -func newDirective(data string, parent *Element) *Directive { - d := &Directive{ - Data: data, - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(d) - } - return d -} - -// CreateDirective creates an XML directive and adds it as the last child of -// element e. -func (e *Element) CreateDirective(data string) *Directive { - return newDirective(data, e) -} - -// dup duplicates the directive. -func (d *Directive) dup(parent *Element) Token { - return &Directive{ - Data: d.Data, - parent: parent, - index: d.index, - } -} - -// Parent returns directive token's parent element, or nil if it has no -// parent. -func (d *Directive) Parent() *Element { - return d.parent -} - -// Index returns the index of this Directive token within its parent element's -// list of child tokens. If this Directive token has no parent element, the -// index is -1. -func (d *Directive) Index() int { - return d.index -} - -// setParent replaces the directive token's parent. -func (d *Directive) setParent(parent *Element) { - d.parent = parent -} - -// setIndex sets the Directive token's index within its parent element's Child -// slice. -func (d *Directive) setIndex(index int) { - d.index = index -} - -// writeTo serializes the XML directive to the writer. -func (d *Directive) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString("") -} - -// NewProcInst creates a parentless XML processing instruction. -func NewProcInst(target, inst string) *ProcInst { - return newProcInst(target, inst, nil) -} - -// newProcInst creates an XML processing instruction and binds it to a parent -// element. If parent is nil, the ProcInst remains unbound. -func newProcInst(target, inst string, parent *Element) *ProcInst { - p := &ProcInst{ - Target: target, - Inst: inst, - parent: parent, - index: -1, - } - if parent != nil { - parent.addChild(p) - } - return p -} - -// CreateProcInst creates a processing instruction and adds it as a child of -// element e. -func (e *Element) CreateProcInst(target, inst string) *ProcInst { - return newProcInst(target, inst, e) -} - -// dup duplicates the procinst. -func (p *ProcInst) dup(parent *Element) Token { - return &ProcInst{ - Target: p.Target, - Inst: p.Inst, - parent: parent, - index: p.index, - } -} - -// Parent returns processing instruction token's parent element, or nil if it -// has no parent. -func (p *ProcInst) Parent() *Element { - return p.parent -} - -// Index returns the index of this ProcInst token within its parent element's -// list of child tokens. If this ProcInst token has no parent element, the -// index is -1. -func (p *ProcInst) Index() int { - return p.index -} - -// setParent replaces the processing instruction token's parent. -func (p *ProcInst) setParent(parent *Element) { - p.parent = parent -} - -// setIndex sets the processing instruction token's index within its parent -// element's Child slice. -func (p *ProcInst) setIndex(index int) { - p.index = index -} - -// writeTo serializes the processing instruction to the writer. -func (p *ProcInst) writeTo(w *bufio.Writer, s *WriteSettings) { - w.WriteString("") -} diff --git a/vendor/github.com/beevik/etree/helpers.go b/vendor/github.com/beevik/etree/helpers.go deleted file mode 100644 index 825e14e91..000000000 --- a/vendor/github.com/beevik/etree/helpers.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015-2019 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package etree - -import ( - "bufio" - "io" - "strings" - "unicode/utf8" -) - -// A simple stack -type stack struct { - data []interface{} -} - -func (s *stack) empty() bool { - return len(s.data) == 0 -} - -func (s *stack) push(value interface{}) { - s.data = append(s.data, value) -} - -func (s *stack) pop() interface{} { - value := s.data[len(s.data)-1] - s.data[len(s.data)-1] = nil - s.data = s.data[:len(s.data)-1] - return value -} - -func (s *stack) peek() interface{} { - return s.data[len(s.data)-1] -} - -// A fifo is a simple first-in-first-out queue. -type fifo struct { - data []interface{} - head, tail int -} - -func (f *fifo) add(value interface{}) { - if f.len()+1 >= len(f.data) { - f.grow() - } - f.data[f.tail] = value - if f.tail++; f.tail == len(f.data) { - f.tail = 0 - } -} - -func (f *fifo) remove() interface{} { - value := f.data[f.head] - f.data[f.head] = nil - if f.head++; f.head == len(f.data) { - f.head = 0 - } - return value -} - -func (f *fifo) len() int { - if f.tail >= f.head { - return f.tail - f.head - } - return len(f.data) - f.head + f.tail -} - -func (f *fifo) grow() { - c := len(f.data) * 2 - if c == 0 { - c = 4 - } - buf, count := make([]interface{}, c), f.len() - if f.tail >= f.head { - copy(buf[0:count], f.data[f.head:f.tail]) - } else { - hindex := len(f.data) - f.head - copy(buf[0:hindex], f.data[f.head:]) - copy(buf[hindex:count], f.data[:f.tail]) - } - f.data, f.head, f.tail = buf, 0, count -} - -// countReader implements a proxy reader that counts the number of -// bytes read from its encapsulated reader. -type countReader struct { - r io.Reader - bytes int64 -} - -func newCountReader(r io.Reader) *countReader { - return &countReader{r: r} -} - -func (cr *countReader) Read(p []byte) (n int, err error) { - b, err := cr.r.Read(p) - cr.bytes += int64(b) - return b, err -} - -// countWriter implements a proxy writer that counts the number of -// bytes written by its encapsulated writer. -type countWriter struct { - w io.Writer - bytes int64 -} - -func newCountWriter(w io.Writer) *countWriter { - return &countWriter{w: w} -} - -func (cw *countWriter) Write(p []byte) (n int, err error) { - b, err := cw.w.Write(p) - cw.bytes += int64(b) - return b, err -} - -// isWhitespace returns true if the byte slice contains only -// whitespace characters. -func isWhitespace(s string) bool { - for i := 0; i < len(s); i++ { - if c := s[i]; c != ' ' && c != '\t' && c != '\n' && c != '\r' { - return false - } - } - return true -} - -// spaceMatch returns true if namespace a is the empty string -// or if namespace a equals namespace b. -func spaceMatch(a, b string) bool { - switch { - case a == "": - return true - default: - return a == b - } -} - -// spaceDecompose breaks a namespace:tag identifier at the ':' -// and returns the two parts. -func spaceDecompose(str string) (space, key string) { - colon := strings.IndexByte(str, ':') - if colon == -1 { - return "", str - } - return str[:colon], str[colon+1:] -} - -// Strings used by indentCRLF and indentLF -const ( - indentSpaces = "\r\n " - indentTabs = "\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t" -) - -// indentCRLF returns a CRLF newline followed by n copies of the first -// non-CRLF character in the source string. -func indentCRLF(n int, source string) string { - switch { - case n < 0: - return source[:2] - case n < len(source)-1: - return source[:n+2] - default: - return source + strings.Repeat(source[2:3], n-len(source)+2) - } -} - -// indentLF returns a LF newline followed by n copies of the first non-LF -// character in the source string. -func indentLF(n int, source string) string { - switch { - case n < 0: - return source[1:2] - case n < len(source)-1: - return source[1 : n+2] - default: - return source[1:] + strings.Repeat(source[2:3], n-len(source)+2) - } -} - -// nextIndex returns the index of the next occurrence of sep in s, -// starting from offset. It returns -1 if the sep string is not found. -func nextIndex(s, sep string, offset int) int { - switch i := strings.Index(s[offset:], sep); i { - case -1: - return -1 - default: - return offset + i - } -} - -// isInteger returns true if the string s contains an integer. -func isInteger(s string) bool { - for i := 0; i < len(s); i++ { - if (s[i] < '0' || s[i] > '9') && !(i == 0 && s[i] == '-') { - return false - } - } - return true -} - -type escapeMode byte - -const ( - escapeNormal escapeMode = iota - escapeCanonicalText - escapeCanonicalAttr -) - -// escapeString writes an escaped version of a string to the writer. -func escapeString(w *bufio.Writer, s string, m escapeMode) { - var esc []byte - last := 0 - for i := 0; i < len(s); { - r, width := utf8.DecodeRuneInString(s[i:]) - i += width - switch r { - case '&': - esc = []byte("&") - case '<': - esc = []byte("<") - case '>': - if m == escapeCanonicalAttr { - continue - } - esc = []byte(">") - case '\'': - if m != escapeNormal { - continue - } - esc = []byte("'") - case '"': - if m == escapeCanonicalText { - continue - } - esc = []byte(""") - case '\t': - if m != escapeCanonicalAttr { - continue - } - esc = []byte(" ") - case '\n': - if m != escapeCanonicalAttr { - continue - } - esc = []byte(" ") - case '\r': - if m == escapeNormal { - continue - } - esc = []byte(" ") - default: - if !isInCharacterRange(r) || (r == 0xFFFD && width == 1) { - esc = []byte("\uFFFD") - break - } - continue - } - w.WriteString(s[last : i-width]) - w.Write(esc) - last = i - } - w.WriteString(s[last:]) -} - -func isInCharacterRange(r rune) bool { - return r == 0x09 || - r == 0x0A || - r == 0x0D || - r >= 0x20 && r <= 0xD7FF || - r >= 0xE000 && r <= 0xFFFD || - r >= 0x10000 && r <= 0x10FFFF -} diff --git a/vendor/github.com/beevik/etree/path.go b/vendor/github.com/beevik/etree/path.go deleted file mode 100644 index 82db0ac55..000000000 --- a/vendor/github.com/beevik/etree/path.go +++ /dev/null @@ -1,582 +0,0 @@ -// Copyright 2015-2019 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package etree - -import ( - "strconv" - "strings" -) - -/* -A Path is a string that represents a search path through an etree starting -from the document root or an arbitrary element. Paths are used with the -Element object's Find* methods to locate and return desired elements. - -A Path consists of a series of slash-separated "selectors", each of which may -be modified by one or more bracket-enclosed "filters". Selectors are used to -traverse the etree from element to element, while filters are used to narrow -the list of candidate elements at each node. - -Although etree Path strings are similar to XPath strings -(https://www.w3.org/TR/1999/REC-xpath-19991116/), they have a more limited set -of selectors and filtering options. - -The following selectors are supported by etree Path strings: - - . Select the current element. - .. Select the parent of the current element. - * Select all child elements of the current element. - / Select the root element when used at the start of a path. - // Select all descendants of the current element. - tag Select all child elements with a name matching the tag. - -The following basic filters are supported by etree Path strings: - - [@attrib] Keep elements with an attribute named attrib. - [@attrib='val'] Keep elements with an attribute named attrib and value matching val. - [tag] Keep elements with a child element named tag. - [tag='val'] Keep elements with a child element named tag and text matching val. - [n] Keep the n-th element, where n is a numeric index starting from 1. - -The following function filters are also supported: - - [text()] Keep elements with non-empty text. - [text()='val'] Keep elements whose text matches val. - [local-name()='val'] Keep elements whose un-prefixed tag matches val. - [name()='val'] Keep elements whose full tag exactly matches val. - [namespace-prefix()='val'] Keep elements whose namespace prefix matches val. - [namespace-uri()='val'] Keep elements whose namespace URI matches val. - -Here are some examples of Path strings: - -- Select the bookstore child element of the root element: - /bookstore - -- Beginning from the root element, select the title elements of all -descendant book elements having a 'category' attribute of 'WEB': - //book[@category='WEB']/title - -- Beginning from the current element, select the first descendant -book element with a title child element containing the text 'Great -Expectations': - .//book[title='Great Expectations'][1] - -- Beginning from the current element, select all child elements of -book elements with an attribute 'language' set to 'english': - ./book/*[@language='english'] - -- Beginning from the current element, select all child elements of -book elements containing the text 'special': - ./book/*[text()='special'] - -- Beginning from the current element, select all descendant book -elements whose title child element has a 'language' attribute of 'french': - .//book/title[@language='french']/.. - -- Beginning from the current element, select all book elements -belonging to the http://www.w3.org/TR/html4/ namespace: - .//book[namespace-uri()='http://www.w3.org/TR/html4/'] - -*/ -type Path struct { - segments []segment -} - -// ErrPath is returned by path functions when an invalid etree path is provided. -type ErrPath string - -// Error returns the string describing a path error. -func (err ErrPath) Error() string { - return "etree: " + string(err) -} - -// CompilePath creates an optimized version of an XPath-like string that -// can be used to query elements in an element tree. -func CompilePath(path string) (Path, error) { - var comp compiler - segments := comp.parsePath(path) - if comp.err != ErrPath("") { - return Path{nil}, comp.err - } - return Path{segments}, nil -} - -// MustCompilePath creates an optimized version of an XPath-like string that -// can be used to query elements in an element tree. Panics if an error -// occurs. Use this function to create Paths when you know the path is -// valid (i.e., if it's hard-coded). -func MustCompilePath(path string) Path { - p, err := CompilePath(path) - if err != nil { - panic(err) - } - return p -} - -// A segment is a portion of a path between "/" characters. -// It contains one selector and zero or more [filters]. -type segment struct { - sel selector - filters []filter -} - -func (seg *segment) apply(e *Element, p *pather) { - seg.sel.apply(e, p) - for _, f := range seg.filters { - f.apply(p) - } -} - -// A selector selects XML elements for consideration by the -// path traversal. -type selector interface { - apply(e *Element, p *pather) -} - -// A filter pares down a list of candidate XML elements based -// on a path filter in [brackets]. -type filter interface { - apply(p *pather) -} - -// A pather is helper object that traverses an element tree using -// a Path object. It collects and deduplicates all elements matching -// the path query. -type pather struct { - queue fifo - results []*Element - inResults map[*Element]bool - candidates []*Element - scratch []*Element // used by filters -} - -// A node represents an element and the remaining path segments that -// should be applied against it by the pather. -type node struct { - e *Element - segments []segment -} - -func newPather() *pather { - return &pather{ - results: make([]*Element, 0), - inResults: make(map[*Element]bool), - candidates: make([]*Element, 0), - scratch: make([]*Element, 0), - } -} - -// traverse follows the path from the element e, collecting -// and then returning all elements that match the path's selectors -// and filters. -func (p *pather) traverse(e *Element, path Path) []*Element { - for p.queue.add(node{e, path.segments}); p.queue.len() > 0; { - p.eval(p.queue.remove().(node)) - } - return p.results -} - -// eval evalutes the current path node by applying the remaining -// path's selector rules against the node's element. -func (p *pather) eval(n node) { - p.candidates = p.candidates[0:0] - seg, remain := n.segments[0], n.segments[1:] - seg.apply(n.e, p) - - if len(remain) == 0 { - for _, c := range p.candidates { - if in := p.inResults[c]; !in { - p.inResults[c] = true - p.results = append(p.results, c) - } - } - } else { - for _, c := range p.candidates { - p.queue.add(node{c, remain}) - } - } -} - -// A compiler generates a compiled path from a path string. -type compiler struct { - err ErrPath -} - -// parsePath parses an XPath-like string describing a path -// through an element tree and returns a slice of segment -// descriptors. -func (c *compiler) parsePath(path string) []segment { - // If path ends with //, fix it - if strings.HasSuffix(path, "//") { - path = path + "*" - } - - var segments []segment - - // Check for an absolute path - if strings.HasPrefix(path, "/") { - segments = append(segments, segment{new(selectRoot), []filter{}}) - path = path[1:] - } - - // Split path into segments - for _, s := range splitPath(path) { - segments = append(segments, c.parseSegment(s)) - if c.err != ErrPath("") { - break - } - } - return segments -} - -func splitPath(path string) []string { - pieces := make([]string, 0) - start := 0 - inquote := false - for i := 0; i+1 <= len(path); i++ { - if path[i] == '\'' { - inquote = !inquote - } else if path[i] == '/' && !inquote { - pieces = append(pieces, path[start:i]) - start = i + 1 - } - } - return append(pieces, path[start:]) -} - -// parseSegment parses a path segment between / characters. -func (c *compiler) parseSegment(path string) segment { - pieces := strings.Split(path, "[") - seg := segment{ - sel: c.parseSelector(pieces[0]), - filters: []filter{}, - } - for i := 1; i < len(pieces); i++ { - fpath := pieces[i] - if fpath[len(fpath)-1] != ']' { - c.err = ErrPath("path has invalid filter [brackets].") - break - } - seg.filters = append(seg.filters, c.parseFilter(fpath[:len(fpath)-1])) - } - return seg -} - -// parseSelector parses a selector at the start of a path segment. -func (c *compiler) parseSelector(path string) selector { - switch path { - case ".": - return new(selectSelf) - case "..": - return new(selectParent) - case "*": - return new(selectChildren) - case "": - return new(selectDescendants) - default: - return newSelectChildrenByTag(path) - } -} - -var fnTable = map[string]struct { - hasFn func(e *Element) bool - getValFn func(e *Element) string -}{ - "local-name": {nil, (*Element).name}, - "name": {nil, (*Element).FullTag}, - "namespace-prefix": {nil, (*Element).namespacePrefix}, - "namespace-uri": {nil, (*Element).NamespaceURI}, - "text": {(*Element).hasText, (*Element).Text}, -} - -// parseFilter parses a path filter contained within [brackets]. -func (c *compiler) parseFilter(path string) filter { - if len(path) == 0 { - c.err = ErrPath("path contains an empty filter expression.") - return nil - } - - // Filter contains [@attr='val'], [fn()='val'], or [tag='val']? - eqindex := strings.Index(path, "='") - if eqindex >= 0 { - rindex := nextIndex(path, "'", eqindex+2) - if rindex != len(path)-1 { - c.err = ErrPath("path has mismatched filter quotes.") - return nil - } - - key := path[:eqindex] - value := path[eqindex+2 : rindex] - - switch { - case key[0] == '@': - return newFilterAttrVal(key[1:], value) - case strings.HasSuffix(key, "()"): - fn := key[:len(key)-2] - if t, ok := fnTable[fn]; ok && t.getValFn != nil { - return newFilterFuncVal(t.getValFn, value) - } - c.err = ErrPath("path has unknown function " + fn) - return nil - default: - return newFilterChildText(key, value) - } - } - - // Filter contains [@attr], [N], [tag] or [fn()] - switch { - case path[0] == '@': - return newFilterAttr(path[1:]) - case strings.HasSuffix(path, "()"): - fn := path[:len(path)-2] - if t, ok := fnTable[fn]; ok && t.hasFn != nil { - return newFilterFunc(t.hasFn) - } - c.err = ErrPath("path has unknown function " + fn) - return nil - case isInteger(path): - pos, _ := strconv.Atoi(path) - switch { - case pos > 0: - return newFilterPos(pos - 1) - default: - return newFilterPos(pos) - } - default: - return newFilterChild(path) - } -} - -// selectSelf selects the current element into the candidate list. -type selectSelf struct{} - -func (s *selectSelf) apply(e *Element, p *pather) { - p.candidates = append(p.candidates, e) -} - -// selectRoot selects the element's root node. -type selectRoot struct{} - -func (s *selectRoot) apply(e *Element, p *pather) { - root := e - for root.parent != nil { - root = root.parent - } - p.candidates = append(p.candidates, root) -} - -// selectParent selects the element's parent into the candidate list. -type selectParent struct{} - -func (s *selectParent) apply(e *Element, p *pather) { - if e.parent != nil { - p.candidates = append(p.candidates, e.parent) - } -} - -// selectChildren selects the element's child elements into the -// candidate list. -type selectChildren struct{} - -func (s *selectChildren) apply(e *Element, p *pather) { - for _, c := range e.Child { - if c, ok := c.(*Element); ok { - p.candidates = append(p.candidates, c) - } - } -} - -// selectDescendants selects all descendant child elements -// of the element into the candidate list. -type selectDescendants struct{} - -func (s *selectDescendants) apply(e *Element, p *pather) { - var queue fifo - for queue.add(e); queue.len() > 0; { - e := queue.remove().(*Element) - p.candidates = append(p.candidates, e) - for _, c := range e.Child { - if c, ok := c.(*Element); ok { - queue.add(c) - } - } - } -} - -// selectChildrenByTag selects into the candidate list all child -// elements of the element having the specified tag. -type selectChildrenByTag struct { - space, tag string -} - -func newSelectChildrenByTag(path string) *selectChildrenByTag { - s, l := spaceDecompose(path) - return &selectChildrenByTag{s, l} -} - -func (s *selectChildrenByTag) apply(e *Element, p *pather) { - for _, c := range e.Child { - if c, ok := c.(*Element); ok && spaceMatch(s.space, c.Space) && s.tag == c.Tag { - p.candidates = append(p.candidates, c) - } - } -} - -// filterPos filters the candidate list, keeping only the -// candidate at the specified index. -type filterPos struct { - index int -} - -func newFilterPos(pos int) *filterPos { - return &filterPos{pos} -} - -func (f *filterPos) apply(p *pather) { - if f.index >= 0 { - if f.index < len(p.candidates) { - p.scratch = append(p.scratch, p.candidates[f.index]) - } - } else { - if -f.index <= len(p.candidates) { - p.scratch = append(p.scratch, p.candidates[len(p.candidates)+f.index]) - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterAttr filters the candidate list for elements having -// the specified attribute. -type filterAttr struct { - space, key string -} - -func newFilterAttr(str string) *filterAttr { - s, l := spaceDecompose(str) - return &filterAttr{s, l} -} - -func (f *filterAttr) apply(p *pather) { - for _, c := range p.candidates { - for _, a := range c.Attr { - if spaceMatch(f.space, a.Space) && f.key == a.Key { - p.scratch = append(p.scratch, c) - break - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterAttrVal filters the candidate list for elements having -// the specified attribute with the specified value. -type filterAttrVal struct { - space, key, val string -} - -func newFilterAttrVal(str, value string) *filterAttrVal { - s, l := spaceDecompose(str) - return &filterAttrVal{s, l, value} -} - -func (f *filterAttrVal) apply(p *pather) { - for _, c := range p.candidates { - for _, a := range c.Attr { - if spaceMatch(f.space, a.Space) && f.key == a.Key && f.val == a.Value { - p.scratch = append(p.scratch, c) - break - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterFunc filters the candidate list for elements satisfying a custom -// boolean function. -type filterFunc struct { - fn func(e *Element) bool -} - -func newFilterFunc(fn func(e *Element) bool) *filterFunc { - return &filterFunc{fn} -} - -func (f *filterFunc) apply(p *pather) { - for _, c := range p.candidates { - if f.fn(c) { - p.scratch = append(p.scratch, c) - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterFuncVal filters the candidate list for elements containing a value -// matching the result of a custom function. -type filterFuncVal struct { - fn func(e *Element) string - val string -} - -func newFilterFuncVal(fn func(e *Element) string, value string) *filterFuncVal { - return &filterFuncVal{fn, value} -} - -func (f *filterFuncVal) apply(p *pather) { - for _, c := range p.candidates { - if f.fn(c) == f.val { - p.scratch = append(p.scratch, c) - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterChild filters the candidate list for elements having -// a child element with the specified tag. -type filterChild struct { - space, tag string -} - -func newFilterChild(str string) *filterChild { - s, l := spaceDecompose(str) - return &filterChild{s, l} -} - -func (f *filterChild) apply(p *pather) { - for _, c := range p.candidates { - for _, cc := range c.Child { - if cc, ok := cc.(*Element); ok && - spaceMatch(f.space, cc.Space) && - f.tag == cc.Tag { - p.scratch = append(p.scratch, c) - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} - -// filterChildText filters the candidate list for elements having -// a child element with the specified tag and text. -type filterChildText struct { - space, tag, text string -} - -func newFilterChildText(str, text string) *filterChildText { - s, l := spaceDecompose(str) - return &filterChildText{s, l, text} -} - -func (f *filterChildText) apply(p *pather) { - for _, c := range p.candidates { - for _, cc := range c.Child { - if cc, ok := cc.(*Element); ok && - spaceMatch(f.space, cc.Space) && - f.tag == cc.Tag && - f.text == cc.Text() { - p.scratch = append(p.scratch, c) - } - } - } - p.candidates, p.scratch = p.scratch, p.candidates[0:0] -} diff --git a/vendor/github.com/bflad/tfproviderlint/passes/helper/resource/testmatchresourceattrcallexpr/testmatchresourceattrcallexpr.go b/vendor/github.com/bflad/tfproviderlint/passes/helper/resource/testmatchresourceattrcallexpr/testmatchresourceattrcallexpr.go deleted file mode 100644 index 2c1bf97f2..000000000 --- a/vendor/github.com/bflad/tfproviderlint/passes/helper/resource/testmatchresourceattrcallexpr/testmatchresourceattrcallexpr.go +++ /dev/null @@ -1,13 +0,0 @@ -package testmatchresourceattrcallexpr - -import ( - "github.com/bflad/tfproviderlint/helper/analysisutils" - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/resource" -) - -var Analyzer = analysisutils.FunctionCallExprAnalyzer( - "testmatchresourceattrcallexpr", - resource.IsFunc, - resource.PackagePath, - resource.FuncNameTestMatchResourceAttr, -) diff --git a/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetokexistscallexpr/resourcedatagetokexistscallexpr.go b/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetokexistscallexpr/resourcedatagetokexistscallexpr.go deleted file mode 100644 index 5af5001a6..000000000 --- a/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetokexistscallexpr/resourcedatagetokexistscallexpr.go +++ /dev/null @@ -1,14 +0,0 @@ -package resourcedatagetokexistscallexpr - -import ( - "github.com/bflad/tfproviderlint/helper/analysisutils" - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" -) - -var Analyzer = analysisutils.ReceiverMethodCallExprAnalyzer( - "resourcedatagetokexistscallexpr", - schema.IsReceiverMethod, - schema.PackagePath, - schema.TypeNameResourceData, - "GetOkExists", -) diff --git a/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourceinforesourceonly/resourceinforesourceonly.go b/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourceinforesourceonly/resourceinforesourceonly.go deleted file mode 100644 index d91aaac3f..000000000 --- a/vendor/github.com/bflad/tfproviderlint/passes/helper/schema/resourceinforesourceonly/resourceinforesourceonly.go +++ /dev/null @@ -1,35 +0,0 @@ -package resourceinforesourceonly - -import ( - "reflect" - - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" - "github.com/bflad/tfproviderlint/passes/helper/schema/resourceinfo" - "golang.org/x/tools/go/analysis" -) - -var Analyzer = &analysis.Analyzer{ - Name: "resourceinforesourceonly", - Doc: "find github.com/hashicorp/terraform-plugin-sdk/helper/schema.Resource literals of Resources (not Data Sources) for later passes", - Requires: []*analysis.Analyzer{ - resourceinfo.Analyzer, - }, - Run: run, - ResultType: reflect.TypeOf([]*schema.ResourceInfo{}), -} - -func run(pass *analysis.Pass) (interface{}, error) { - resourceInfos := pass.ResultOf[resourceinfo.Analyzer].([]*schema.ResourceInfo) - - var result []*schema.ResourceInfo - - for _, resourceInfo := range resourceInfos { - if !resourceInfo.IsResource() { - continue - } - - result = append(result, resourceInfo) - } - - return result, nil -} diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR001/README.md b/vendor/github.com/bflad/tfproviderlint/xpasses/XR001/README.md deleted file mode 100644 index 87fc41538..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR001/README.md +++ /dev/null @@ -1,28 +0,0 @@ -# XR001 - -The XR001 analyzer reports usage of [`GetOkExists()`](https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/schema#ResourceData.GetOkExists) calls, which generally do not work as expected. Usage should be moved to standard `Get()` and `GetOk()` calls. - -## Flagged Code - -```go -d.GetOkExists("example") -``` - -## Passing Code - -```go -d.Get("example") - -// or - -d.GetOk("example") -``` - -## Ignoring Reports - -Singular reports can be ignored by adding the a `//lintignore:XR001` Go code comment at the end of the offending line or on the line immediately proceding, e.g. - -```go -//lintignore:XR001 -d.GetOkExists("example") -``` diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR001/XR001.go b/vendor/github.com/bflad/tfproviderlint/xpasses/XR001/XR001.go deleted file mode 100644 index 2782f0bff..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR001/XR001.go +++ /dev/null @@ -1,43 +0,0 @@ -// Package XR001 defines an Analyzer that checks for -// ResourceData.Set() calls using * dereferences -package XR001 - -import ( - "go/ast" - - "github.com/bflad/tfproviderlint/passes/commentignore" - "github.com/bflad/tfproviderlint/passes/helper/schema/resourcedatagetokexistscallexpr" - "golang.org/x/tools/go/analysis" -) - -const Doc = `check for ResourceData.GetOkExists() calls - -The XR001 analyzer reports usage of GetOkExists() calls, which generally do -not work as expected. Usage should be moved to standard Get() and GetOk() -calls.` - -const analyzerName = "XR001" - -var Analyzer = &analysis.Analyzer{ - Name: analyzerName, - Doc: Doc, - Requires: []*analysis.Analyzer{ - resourcedatagetokexistscallexpr.Analyzer, - commentignore.Analyzer, - }, - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) - callExprs := pass.ResultOf[resourcedatagetokexistscallexpr.Analyzer].([]*ast.CallExpr) - for _, callExpr := range callExprs { - if ignorer.ShouldIgnore(analyzerName, callExpr) { - continue - } - - pass.Reportf(callExpr.Pos(), "%s: ResourceData.GetOkExists() call should be avoided", analyzerName) - } - - return nil, nil -} diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR002/README.md b/vendor/github.com/bflad/tfproviderlint/xpasses/XR002/README.md deleted file mode 100644 index 5b15db4b6..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR002/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# XR002 - -The XR002 analyzer reports missing usage of `Importer` in resources. - -## Flagged Code - -```go -&schema.Resource{ - Create: /* ... */, - Delete: /* ... */, - Read: /* ... */, - Schema: /* ... */, -} -``` - -## Passing Code - -```go -&schema.Resource{ - Create: /* ... */, - Delete: /* ... */, - Importer: &schema.ResourceImporter{/* ... */}, - Read: /* ... */, - Schema: /* ... */, -} -``` - -## Ignoring Reports - -Singular reports can be ignored by adding the a `//lintignore:XR002` Go code comment at the end of the offending line or on the line immediately proceding, e.g. - -```go -//lintignore:XR002 -&schema.Resource{ - Create: /* ... */, - Delete: /* ... */, - Read: /* ... */, -} -``` diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR002/XR002.go b/vendor/github.com/bflad/tfproviderlint/xpasses/XR002/XR002.go deleted file mode 100644 index ad6d939f6..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR002/XR002.go +++ /dev/null @@ -1,43 +0,0 @@ -package XR002 - -import ( - "golang.org/x/tools/go/analysis" - - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" - "github.com/bflad/tfproviderlint/passes/commentignore" - "github.com/bflad/tfproviderlint/passes/helper/schema/resourceinforesourceonly" -) - -const Doc = `check for Resource that should implement Importer - -The XR002 analyzer reports missing usage of Importer in resources.` - -const analyzerName = "XR002" - -var Analyzer = &analysis.Analyzer{ - Name: analyzerName, - Doc: Doc, - Requires: []*analysis.Analyzer{ - commentignore.Analyzer, - resourceinforesourceonly.Analyzer, - }, - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) - resources := pass.ResultOf[resourceinforesourceonly.Analyzer].([]*schema.ResourceInfo) - for _, resource := range resources { - if ignorer.ShouldIgnore(analyzerName, resource.AstCompositeLit) { - continue - } - - if resource.DeclaresField(schema.ResourceFieldImporter) { - continue - } - - pass.Reportf(resource.AstCompositeLit.Pos(), "%s: resource should include Importer implementation", analyzerName) - } - - return nil, nil -} diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR003/README.md b/vendor/github.com/bflad/tfproviderlint/xpasses/XR003/README.md deleted file mode 100644 index 1faff8622..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR003/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# XR003 - -The XR003 analyzer reports missing usage of `Timeouts` in resources. - -## Flagged Code - -```go -&schema.Resource{ - Create: /* ... */, - Delete: /* ... */, - Read: /* ... */, - Schema: /* ... */, -} -``` - -## Passing Code - -```go -&schema.Resource{ - Create: /* ... */, - Delete: /* ... */, - Read: /* ... */, - Schema: /* ... */, - Timeouts: &schema.ResourceTimeout{/* ... */}, -} -``` - -## Ignoring Reports - -Singular reports can be ignored by adding the a `//lintignore:XR003` Go code comment at the end of the offending line or on the line immediately proceding, e.g. - -```go -//lintignore:XR003 -&schema.Resource{ - Create: /* ... */, - Delete: /* ... */, - Read: /* ... */, -} -``` diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR003/XR003.go b/vendor/github.com/bflad/tfproviderlint/xpasses/XR003/XR003.go deleted file mode 100644 index 9c449cb4f..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR003/XR003.go +++ /dev/null @@ -1,43 +0,0 @@ -package XR003 - -import ( - "golang.org/x/tools/go/analysis" - - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" - "github.com/bflad/tfproviderlint/passes/commentignore" - "github.com/bflad/tfproviderlint/passes/helper/schema/resourceinforesourceonly" -) - -const Doc = `check for Resource that should implement Timeouts - -The XR003 analyzer reports missing usage of Timeouts in resources.` - -const analyzerName = "XR003" - -var Analyzer = &analysis.Analyzer{ - Name: analyzerName, - Doc: Doc, - Requires: []*analysis.Analyzer{ - commentignore.Analyzer, - resourceinforesourceonly.Analyzer, - }, - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) - resources := pass.ResultOf[resourceinforesourceonly.Analyzer].([]*schema.ResourceInfo) - for _, resource := range resources { - if ignorer.ShouldIgnore(analyzerName, resource.AstCompositeLit) { - continue - } - - if resource.DeclaresField(schema.ResourceFieldTimeouts) { - continue - } - - pass.Reportf(resource.AstCompositeLit.Pos(), "%s: resource should include Timeouts implementation", analyzerName) - } - - return nil, nil -} diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR004/README.md b/vendor/github.com/bflad/tfproviderlint/xpasses/XR004/README.md deleted file mode 100644 index 55b14bbe5..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR004/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# XR004 - -The XR004 analyzer reports [`Set()`](https://godoc.org/github.com/hashicorp/terraform-plugin-sdk/helper/schema#ResourceData.Set) calls that receive a complex value type, but do not perform error checking. This error checking is to prevent issues where the code is not able to properly set the Terraform state for drift detection. Addition details are available in the [Extending Terraform documentation](https://www.terraform.io/docs/extend/best-practices/detecting-drift.html#error-checking-aggregate-types). - -## Flagged Code - -```go -d.Set("example", []interface{}{}) - -d.Set("example", map[string]interface{}{}) - -d.Set("example", schema.NewSet(/* ... */)) -``` - -## Passing Code - -```go -if err := d.Set("example", []interface{}{}); err != nil { - return fmt.Errorf("error setting example: %s", err) -} - -if err := d.Set("example", map[string]interface{}{}); err != nil { - return fmt.Errorf("error setting example: %s", err) -} - -if err := d.Set("example", schema.NewSet(/* ... */)); err != nil { - return fmt.Errorf("error setting example: %s", err) -} -``` - -## Ignoring Reports - -Singular reports can be ignored by adding the a `//lintignore:XR004` Go code comment at the end of the offending line or on the line immediately proceding, e.g. - -```go -//lintignore:XR004 -d.Set("example", []interface{}{}) -``` diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XR004/XR004.go b/vendor/github.com/bflad/tfproviderlint/xpasses/XR004/XR004.go deleted file mode 100644 index f86861212..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XR004/XR004.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package XR004 defines an Analyzer that checks for -// ResourceData.Set() calls missing error checking with -// complex types -package XR004 - -import ( - "go/ast" - "go/types" - - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" - "github.com/bflad/tfproviderlint/passes/commentignore" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/passes/inspect" - "golang.org/x/tools/go/ast/inspector" -) - -const Doc = `check for ResourceData.Set() calls missing error checking with complex values - -The XR004 analyzer reports Set() calls that receive a complex value type, but -does not perform error checking. This error checking is to prevent issues where -the code is not able to properly set the Terraform state for drift detection. - -Reference: https://www.terraform.io/docs/extend/best-practices/detecting-drift.html#error-checking-aggregate-types` - -const analyzerName = "XR004" - -var Analyzer = &analysis.Analyzer{ - Name: analyzerName, - Doc: Doc, - Requires: []*analysis.Analyzer{ - inspect.Analyzer, - commentignore.Analyzer, - }, - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) - inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) - - nodeFilter := []ast.Node{ - (*ast.ExprStmt)(nil), - } - - inspect.Preorder(nodeFilter, func(n ast.Node) { - exprStmt := n.(*ast.ExprStmt) - - callExpr, ok := exprStmt.X.(*ast.CallExpr) - - if !ok { - return - } - - if !schema.IsReceiverMethod(callExpr.Fun, pass.TypesInfo, schema.TypeNameResourceData, "Set") { - return - } - - if ignorer.ShouldIgnore(analyzerName, callExpr) { - return - } - - if len(callExpr.Args) < 2 { - return - } - - if isBasicType(pass.TypesInfo.TypeOf(callExpr.Args[1]).Underlying()) { - return - } - - pass.Reportf(callExpr.Pos(), "%s: ResourceData.Set() should perform error checking with complex values", analyzerName) - }) - - return nil, nil -} - -func isBasicType(t types.Type) bool { - switch t := t.(type) { - case *types.Basic: - return isAllowedBasicType(t) - case *types.Pointer: - return isBasicType(t.Elem()) - } - - return false -} - -var allowedBasicKindTypes = []types.BasicKind{ - types.Bool, - types.Float32, - types.Float64, - types.Int, - types.Int8, - types.Int16, - types.Int32, - types.Int64, - types.String, - types.UntypedNil, -} - -func isAllowedBasicType(b *types.Basic) bool { - for _, allowedBasicKindType := range allowedBasicKindTypes { - if b.Kind() == allowedBasicKindType { - return true - } - } - - return false -} diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XS001/README.md b/vendor/github.com/bflad/tfproviderlint/xpasses/XS001/README.md deleted file mode 100644 index 923a1996c..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XS001/README.md +++ /dev/null @@ -1,40 +0,0 @@ -# XS001 - -The XS001 analyzer reports cases of schemas where `Description` is not configured, which is generally useful for providers that wish to automatically generate documentation based on the schema information. - -## Flagged Code - -```go -map[string]*schema.Schema{ - "attribute_name": { - Optional: true, - Type: schema.TypeString, - }, -} -``` - -## Passing Code - -```go -map[string]*schema.Schema{ - "attribute_name": { - Description: "does something useful", - Optional: true, - Type: schema.TypeString, - }, -} -``` - -## Ignoring Reports - -Singular reports can be ignored by adding the a `//lintignore:XS001` Go code comment at the end of the offending line or on the line immediately proceding, e.g. - -```go -//lintignore:XS001 -map[string]*schema.Schema{ - "attribute_name": { - Optional: true, - Type: schema.TypeString, - }, -} -``` diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/XS001/XS001.go b/vendor/github.com/bflad/tfproviderlint/xpasses/XS001/XS001.go deleted file mode 100644 index 0ae6a9f8b..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/XS001/XS001.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package XS001 defines an Analyzer that checks for -// Schema that Description is configured -package XS001 - -import ( - "go/ast" - - "github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema" - "github.com/bflad/tfproviderlint/passes/commentignore" - "github.com/bflad/tfproviderlint/passes/helper/schema/schemamapcompositelit" - "golang.org/x/tools/go/analysis" -) - -const Doc = `check for Schema that Description is configured - -The XS001 analyzer reports cases of schemas where Description is not -configured, which is generally useful for providers that wish to -automatically generate documentation based on the schema information.` - -const analyzerName = "XS001" - -var Analyzer = &analysis.Analyzer{ - Name: analyzerName, - Doc: Doc, - Requires: []*analysis.Analyzer{ - schemamapcompositelit.Analyzer, - commentignore.Analyzer, - }, - Run: run, -} - -func run(pass *analysis.Pass) (interface{}, error) { - ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer) - schemamapcompositelits := pass.ResultOf[schemamapcompositelit.Analyzer].([]*ast.CompositeLit) - - for _, smap := range schemamapcompositelits { - for _, schemaCompositeLit := range schema.GetSchemaMapSchemas(smap) { - schemaInfo := schema.NewSchemaInfo(schemaCompositeLit, pass.TypesInfo) - - if ignorer.ShouldIgnore(analyzerName, schemaInfo.AstCompositeLit) { - continue - } - - if schemaInfo.Schema.Description != "" { - continue - } - - switch t := schemaInfo.AstCompositeLit.Type.(type) { - default: - pass.Reportf(schemaInfo.AstCompositeLit.Lbrace, "%s: schema should configure Description", analyzerName) - case *ast.SelectorExpr: - pass.Reportf(t.Sel.Pos(), "%s: schema should configure Description", analyzerName) - } - } - } - - return nil, nil -} diff --git a/vendor/github.com/bflad/tfproviderlint/xpasses/checks.go b/vendor/github.com/bflad/tfproviderlint/xpasses/checks.go deleted file mode 100644 index 0b1023e7e..000000000 --- a/vendor/github.com/bflad/tfproviderlint/xpasses/checks.go +++ /dev/null @@ -1,21 +0,0 @@ -package xpasses - -import ( - "github.com/bflad/tfproviderlint/xpasses/XR001" - "github.com/bflad/tfproviderlint/xpasses/XR002" - "github.com/bflad/tfproviderlint/xpasses/XR003" - "github.com/bflad/tfproviderlint/xpasses/XR004" - "github.com/bflad/tfproviderlint/xpasses/XS001" - "golang.org/x/tools/go/analysis" -) - -// AllChecks contains all Analyzers that report issues -// This can be consumed via multichecker.Main(xpasses.AllChecks...) or by -// combining these Analyzers with additional custom Analyzers -var AllChecks = []*analysis.Analyzer{ - XR001.Analyzer, - XR002.Analyzer, - XR003.Analyzer, - XR004.Analyzer, - XS001.Analyzer, -} diff --git a/vendor/github.com/boombuler/barcode/.gitignore b/vendor/github.com/boombuler/barcode/.gitignore deleted file mode 100644 index 1d74e2196..000000000 --- a/vendor/github.com/boombuler/barcode/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.vscode/ diff --git a/vendor/github.com/boombuler/barcode/README.md b/vendor/github.com/boombuler/barcode/README.md deleted file mode 100644 index 2a988db39..000000000 --- a/vendor/github.com/boombuler/barcode/README.md +++ /dev/null @@ -1,53 +0,0 @@ -[![Join the chat at https://gitter.im/golang-barcode/Lobby](https://badges.gitter.im/golang-barcode/Lobby.svg)](https://gitter.im/golang-barcode/Lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -## Introduction ## - -This is a package for GO which can be used to create different types of barcodes. - -## Supported Barcode Types ## -* 2 of 5 -* Aztec Code -* Codabar -* Code 128 -* Code 39 -* Code 93 -* Datamatrix -* EAN 13 -* EAN 8 -* PDF 417 -* QR Code - -## Example ## - -This is a simple example on how to create a QR-Code and write it to a png-file -```go -package main - -import ( - "image/png" - "os" - - "github.com/boombuler/barcode" - "github.com/boombuler/barcode/qr" -) - -func main() { - // Create the barcode - qrCode, _ := qr.Encode("Hello World", qr.M, qr.Auto) - - // Scale the barcode to 200x200 pixels - qrCode, _ = barcode.Scale(qrCode, 200, 200) - - // create the output file - file, _ := os.Create("qrcode.png") - defer file.Close() - - // encode the barcode as png - png.Encode(file, qrCode) -} -``` - -## Documentation ## -See [GoDoc](https://godoc.org/github.com/boombuler/barcode) - -To create a barcode use the Encode function from one of the subpackages. diff --git a/vendor/github.com/boombuler/barcode/barcode.go b/vendor/github.com/boombuler/barcode/barcode.go deleted file mode 100644 index 25f4a693d..000000000 --- a/vendor/github.com/boombuler/barcode/barcode.go +++ /dev/null @@ -1,42 +0,0 @@ -package barcode - -import "image" - -const ( - TypeAztec = "Aztec" - TypeCodabar = "Codabar" - TypeCode128 = "Code 128" - TypeCode39 = "Code 39" - TypeCode93 = "Code 93" - TypeDataMatrix = "DataMatrix" - TypeEAN8 = "EAN 8" - TypeEAN13 = "EAN 13" - TypePDF = "PDF417" - TypeQR = "QR Code" - Type2of5 = "2 of 5" - Type2of5Interleaved = "2 of 5 (interleaved)" -) - -// Contains some meta information about a barcode -type Metadata struct { - // the name of the barcode kind - CodeKind string - // contains 1 for 1D barcodes or 2 for 2D barcodes - Dimensions byte -} - -// a rendered and encoded barcode -type Barcode interface { - image.Image - // returns some meta information about the barcode - Metadata() Metadata - // the data that was encoded in this barcode - Content() string -} - -// Additional interface that some barcodes might implement to provide -// the value of its checksum. -type BarcodeIntCS interface { - Barcode - CheckSum() int -} diff --git a/vendor/github.com/boombuler/barcode/go.mod b/vendor/github.com/boombuler/barcode/go.mod deleted file mode 100644 index ed53593b9..000000000 --- a/vendor/github.com/boombuler/barcode/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/boombuler/barcode diff --git a/vendor/github.com/boombuler/barcode/qr/alphanumeric.go b/vendor/github.com/boombuler/barcode/qr/alphanumeric.go deleted file mode 100644 index 4ded7c8e0..000000000 --- a/vendor/github.com/boombuler/barcode/qr/alphanumeric.go +++ /dev/null @@ -1,66 +0,0 @@ -package qr - -import ( - "errors" - "fmt" - "strings" - - "github.com/boombuler/barcode/utils" -) - -const charSet string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:" - -func stringToAlphaIdx(content string) <-chan int { - result := make(chan int) - go func() { - for _, r := range content { - idx := strings.IndexRune(charSet, r) - result <- idx - if idx < 0 { - break - } - } - close(result) - }() - - return result -} - -func encodeAlphaNumeric(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) { - - contentLenIsOdd := len(content)%2 == 1 - contentBitCount := (len(content) / 2) * 11 - if contentLenIsOdd { - contentBitCount += 6 - } - vi := findSmallestVersionInfo(ecl, alphaNumericMode, contentBitCount) - if vi == nil { - return nil, nil, errors.New("To much data to encode") - } - - res := new(utils.BitList) - res.AddBits(int(alphaNumericMode), 4) - res.AddBits(len(content), vi.charCountBits(alphaNumericMode)) - - encoder := stringToAlphaIdx(content) - - for idx := 0; idx < len(content)/2; idx++ { - c1 := <-encoder - c2 := <-encoder - if c1 < 0 || c2 < 0 { - return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, AlphaNumeric) - } - res.AddBits(c1*45+c2, 11) - } - if contentLenIsOdd { - c := <-encoder - if c < 0 { - return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, AlphaNumeric) - } - res.AddBits(c, 6) - } - - addPaddingAndTerminator(res, vi) - - return res, vi, nil -} diff --git a/vendor/github.com/boombuler/barcode/qr/automatic.go b/vendor/github.com/boombuler/barcode/qr/automatic.go deleted file mode 100644 index e7c56013f..000000000 --- a/vendor/github.com/boombuler/barcode/qr/automatic.go +++ /dev/null @@ -1,23 +0,0 @@ -package qr - -import ( - "fmt" - - "github.com/boombuler/barcode/utils" -) - -func encodeAuto(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) { - bits, vi, _ := Numeric.getEncoder()(content, ecl) - if bits != nil && vi != nil { - return bits, vi, nil - } - bits, vi, _ = AlphaNumeric.getEncoder()(content, ecl) - if bits != nil && vi != nil { - return bits, vi, nil - } - bits, vi, _ = Unicode.getEncoder()(content, ecl) - if bits != nil && vi != nil { - return bits, vi, nil - } - return nil, nil, fmt.Errorf("No encoding found to encode \"%s\"", content) -} diff --git a/vendor/github.com/boombuler/barcode/qr/blocks.go b/vendor/github.com/boombuler/barcode/qr/blocks.go deleted file mode 100644 index d3173787f..000000000 --- a/vendor/github.com/boombuler/barcode/qr/blocks.go +++ /dev/null @@ -1,59 +0,0 @@ -package qr - -type block struct { - data []byte - ecc []byte -} -type blockList []*block - -func splitToBlocks(data <-chan byte, vi *versionInfo) blockList { - result := make(blockList, vi.NumberOfBlocksInGroup1+vi.NumberOfBlocksInGroup2) - - for b := 0; b < int(vi.NumberOfBlocksInGroup1); b++ { - blk := new(block) - blk.data = make([]byte, vi.DataCodeWordsPerBlockInGroup1) - for cw := 0; cw < int(vi.DataCodeWordsPerBlockInGroup1); cw++ { - blk.data[cw] = <-data - } - blk.ecc = ec.calcECC(blk.data, vi.ErrorCorrectionCodewordsPerBlock) - result[b] = blk - } - - for b := 0; b < int(vi.NumberOfBlocksInGroup2); b++ { - blk := new(block) - blk.data = make([]byte, vi.DataCodeWordsPerBlockInGroup2) - for cw := 0; cw < int(vi.DataCodeWordsPerBlockInGroup2); cw++ { - blk.data[cw] = <-data - } - blk.ecc = ec.calcECC(blk.data, vi.ErrorCorrectionCodewordsPerBlock) - result[int(vi.NumberOfBlocksInGroup1)+b] = blk - } - - return result -} - -func (bl blockList) interleave(vi *versionInfo) []byte { - var maxCodewordCount int - if vi.DataCodeWordsPerBlockInGroup1 > vi.DataCodeWordsPerBlockInGroup2 { - maxCodewordCount = int(vi.DataCodeWordsPerBlockInGroup1) - } else { - maxCodewordCount = int(vi.DataCodeWordsPerBlockInGroup2) - } - resultLen := (vi.DataCodeWordsPerBlockInGroup1+vi.ErrorCorrectionCodewordsPerBlock)*vi.NumberOfBlocksInGroup1 + - (vi.DataCodeWordsPerBlockInGroup2+vi.ErrorCorrectionCodewordsPerBlock)*vi.NumberOfBlocksInGroup2 - - result := make([]byte, 0, resultLen) - for i := 0; i < maxCodewordCount; i++ { - for b := 0; b < len(bl); b++ { - if len(bl[b].data) > i { - result = append(result, bl[b].data[i]) - } - } - } - for i := 0; i < int(vi.ErrorCorrectionCodewordsPerBlock); i++ { - for b := 0; b < len(bl); b++ { - result = append(result, bl[b].ecc[i]) - } - } - return result -} diff --git a/vendor/github.com/boombuler/barcode/qr/encoder.go b/vendor/github.com/boombuler/barcode/qr/encoder.go deleted file mode 100644 index 2c6ab2111..000000000 --- a/vendor/github.com/boombuler/barcode/qr/encoder.go +++ /dev/null @@ -1,416 +0,0 @@ -// Package qr can be used to create QR barcodes. -package qr - -import ( - "image" - - "github.com/boombuler/barcode" - "github.com/boombuler/barcode/utils" -) - -type encodeFn func(content string, eccLevel ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) - -// Encoding mode for QR Codes. -type Encoding byte - -const ( - // Auto will choose ths best matching encoding - Auto Encoding = iota - // Numeric encoding only encodes numbers [0-9] - Numeric - // AlphaNumeric encoding only encodes uppercase letters, numbers and [Space], $, %, *, +, -, ., /, : - AlphaNumeric - // Unicode encoding encodes the string as utf-8 - Unicode - // only for testing purpose - unknownEncoding -) - -func (e Encoding) getEncoder() encodeFn { - switch e { - case Auto: - return encodeAuto - case Numeric: - return encodeNumeric - case AlphaNumeric: - return encodeAlphaNumeric - case Unicode: - return encodeUnicode - } - return nil -} - -func (e Encoding) String() string { - switch e { - case Auto: - return "Auto" - case Numeric: - return "Numeric" - case AlphaNumeric: - return "AlphaNumeric" - case Unicode: - return "Unicode" - } - return "" -} - -// Encode returns a QR barcode with the given content, error correction level and uses the given encoding -func Encode(content string, level ErrorCorrectionLevel, mode Encoding) (barcode.Barcode, error) { - bits, vi, err := mode.getEncoder()(content, level) - if err != nil { - return nil, err - } - - blocks := splitToBlocks(bits.IterateBytes(), vi) - data := blocks.interleave(vi) - result := render(data, vi) - result.content = content - return result, nil -} - -func render(data []byte, vi *versionInfo) *qrcode { - dim := vi.modulWidth() - results := make([]*qrcode, 8) - for i := 0; i < 8; i++ { - results[i] = newBarcode(dim) - } - - occupied := newBarcode(dim) - - setAll := func(x int, y int, val bool) { - occupied.Set(x, y, true) - for i := 0; i < 8; i++ { - results[i].Set(x, y, val) - } - } - - drawFinderPatterns(vi, setAll) - drawAlignmentPatterns(occupied, vi, setAll) - - //Timing Pattern: - var i int - for i = 0; i < dim; i++ { - if !occupied.Get(i, 6) { - setAll(i, 6, i%2 == 0) - } - if !occupied.Get(6, i) { - setAll(6, i, i%2 == 0) - } - } - // Dark Module - setAll(8, dim-8, true) - - drawVersionInfo(vi, setAll) - drawFormatInfo(vi, -1, occupied.Set) - for i := 0; i < 8; i++ { - drawFormatInfo(vi, i, results[i].Set) - } - - // Write the data - var curBitNo int - - for pos := range iterateModules(occupied) { - var curBit bool - if curBitNo < len(data)*8 { - curBit = ((data[curBitNo/8] >> uint(7-(curBitNo%8))) & 1) == 1 - } else { - curBit = false - } - - for i := 0; i < 8; i++ { - setMasked(pos.X, pos.Y, curBit, i, results[i].Set) - } - curBitNo++ - } - - lowestPenalty := ^uint(0) - lowestPenaltyIdx := -1 - for i := 0; i < 8; i++ { - p := results[i].calcPenalty() - if p < lowestPenalty { - lowestPenalty = p - lowestPenaltyIdx = i - } - } - return results[lowestPenaltyIdx] -} - -func setMasked(x, y int, val bool, mask int, set func(int, int, bool)) { - switch mask { - case 0: - val = val != (((y + x) % 2) == 0) - break - case 1: - val = val != ((y % 2) == 0) - break - case 2: - val = val != ((x % 3) == 0) - break - case 3: - val = val != (((y + x) % 3) == 0) - break - case 4: - val = val != (((y/2 + x/3) % 2) == 0) - break - case 5: - val = val != (((y*x)%2)+((y*x)%3) == 0) - break - case 6: - val = val != ((((y*x)%2)+((y*x)%3))%2 == 0) - break - case 7: - val = val != ((((y+x)%2)+((y*x)%3))%2 == 0) - } - set(x, y, val) -} - -func iterateModules(occupied *qrcode) <-chan image.Point { - result := make(chan image.Point) - allPoints := make(chan image.Point) - go func() { - curX := occupied.dimension - 1 - curY := occupied.dimension - 1 - isUpward := true - - for true { - if isUpward { - allPoints <- image.Pt(curX, curY) - allPoints <- image.Pt(curX-1, curY) - curY-- - if curY < 0 { - curY = 0 - curX -= 2 - if curX == 6 { - curX-- - } - if curX < 0 { - break - } - isUpward = false - } - } else { - allPoints <- image.Pt(curX, curY) - allPoints <- image.Pt(curX-1, curY) - curY++ - if curY >= occupied.dimension { - curY = occupied.dimension - 1 - curX -= 2 - if curX == 6 { - curX-- - } - isUpward = true - if curX < 0 { - break - } - } - } - } - - close(allPoints) - }() - go func() { - for pt := range allPoints { - if !occupied.Get(pt.X, pt.Y) { - result <- pt - } - } - close(result) - }() - return result -} - -func drawFinderPatterns(vi *versionInfo, set func(int, int, bool)) { - dim := vi.modulWidth() - drawPattern := func(xoff int, yoff int) { - for x := -1; x < 8; x++ { - for y := -1; y < 8; y++ { - val := (x == 0 || x == 6 || y == 0 || y == 6 || (x > 1 && x < 5 && y > 1 && y < 5)) && (x <= 6 && y <= 6 && x >= 0 && y >= 0) - - if x+xoff >= 0 && x+xoff < dim && y+yoff >= 0 && y+yoff < dim { - set(x+xoff, y+yoff, val) - } - } - } - } - drawPattern(0, 0) - drawPattern(0, dim-7) - drawPattern(dim-7, 0) -} - -func drawAlignmentPatterns(occupied *qrcode, vi *versionInfo, set func(int, int, bool)) { - drawPattern := func(xoff int, yoff int) { - for x := -2; x <= 2; x++ { - for y := -2; y <= 2; y++ { - val := x == -2 || x == 2 || y == -2 || y == 2 || (x == 0 && y == 0) - set(x+xoff, y+yoff, val) - } - } - } - positions := vi.alignmentPatternPlacements() - - for _, x := range positions { - for _, y := range positions { - if occupied.Get(x, y) { - continue - } - drawPattern(x, y) - } - } -} - -var formatInfos = map[ErrorCorrectionLevel]map[int][]bool{ - L: { - 0: []bool{true, true, true, false, true, true, true, true, true, false, false, false, true, false, false}, - 1: []bool{true, true, true, false, false, true, false, true, true, true, true, false, false, true, true}, - 2: []bool{true, true, true, true, true, false, true, true, false, true, false, true, false, true, false}, - 3: []bool{true, true, true, true, false, false, false, true, false, false, true, true, true, false, true}, - 4: []bool{true, true, false, false, true, true, false, false, false, true, false, true, true, true, true}, - 5: []bool{true, true, false, false, false, true, true, false, false, false, true, true, false, false, false}, - 6: []bool{true, true, false, true, true, false, false, false, true, false, false, false, false, false, true}, - 7: []bool{true, true, false, true, false, false, true, false, true, true, true, false, true, true, false}, - }, - M: { - 0: []bool{true, false, true, false, true, false, false, false, false, false, true, false, false, true, false}, - 1: []bool{true, false, true, false, false, false, true, false, false, true, false, false, true, false, true}, - 2: []bool{true, false, true, true, true, true, false, false, true, true, true, true, true, false, false}, - 3: []bool{true, false, true, true, false, true, true, false, true, false, false, true, false, true, true}, - 4: []bool{true, false, false, false, true, false, true, true, true, true, true, true, false, false, true}, - 5: []bool{true, false, false, false, false, false, false, true, true, false, false, true, true, true, false}, - 6: []bool{true, false, false, true, true, true, true, true, false, false, true, false, true, true, true}, - 7: []bool{true, false, false, true, false, true, false, true, false, true, false, false, false, false, false}, - }, - Q: { - 0: []bool{false, true, true, false, true, false, true, false, true, false, true, true, true, true, true}, - 1: []bool{false, true, true, false, false, false, false, false, true, true, false, true, false, false, false}, - 2: []bool{false, true, true, true, true, true, true, false, false, true, true, false, false, false, true}, - 3: []bool{false, true, true, true, false, true, false, false, false, false, false, false, true, true, false}, - 4: []bool{false, true, false, false, true, false, false, true, false, true, true, false, true, false, false}, - 5: []bool{false, true, false, false, false, false, true, true, false, false, false, false, false, true, true}, - 6: []bool{false, true, false, true, true, true, false, true, true, false, true, true, false, true, false}, - 7: []bool{false, true, false, true, false, true, true, true, true, true, false, true, true, false, true}, - }, - H: { - 0: []bool{false, false, true, false, true, true, false, true, false, false, false, true, false, false, true}, - 1: []bool{false, false, true, false, false, true, true, true, false, true, true, true, true, true, false}, - 2: []bool{false, false, true, true, true, false, false, true, true, true, false, false, true, true, true}, - 3: []bool{false, false, true, true, false, false, true, true, true, false, true, false, false, false, false}, - 4: []bool{false, false, false, false, true, true, true, false, true, true, false, false, false, true, false}, - 5: []bool{false, false, false, false, false, true, false, false, true, false, true, false, true, false, true}, - 6: []bool{false, false, false, true, true, false, true, false, false, false, false, true, true, false, false}, - 7: []bool{false, false, false, true, false, false, false, false, false, true, true, true, false, true, true}, - }, -} - -func drawFormatInfo(vi *versionInfo, usedMask int, set func(int, int, bool)) { - var formatInfo []bool - - if usedMask == -1 { - formatInfo = []bool{true, true, true, true, true, true, true, true, true, true, true, true, true, true, true} // Set all to true cause -1 --> occupied mask. - } else { - formatInfo = formatInfos[vi.Level][usedMask] - } - - if len(formatInfo) == 15 { - dim := vi.modulWidth() - set(0, 8, formatInfo[0]) - set(1, 8, formatInfo[1]) - set(2, 8, formatInfo[2]) - set(3, 8, formatInfo[3]) - set(4, 8, formatInfo[4]) - set(5, 8, formatInfo[5]) - set(7, 8, formatInfo[6]) - set(8, 8, formatInfo[7]) - set(8, 7, formatInfo[8]) - set(8, 5, formatInfo[9]) - set(8, 4, formatInfo[10]) - set(8, 3, formatInfo[11]) - set(8, 2, formatInfo[12]) - set(8, 1, formatInfo[13]) - set(8, 0, formatInfo[14]) - - set(8, dim-1, formatInfo[0]) - set(8, dim-2, formatInfo[1]) - set(8, dim-3, formatInfo[2]) - set(8, dim-4, formatInfo[3]) - set(8, dim-5, formatInfo[4]) - set(8, dim-6, formatInfo[5]) - set(8, dim-7, formatInfo[6]) - set(dim-8, 8, formatInfo[7]) - set(dim-7, 8, formatInfo[8]) - set(dim-6, 8, formatInfo[9]) - set(dim-5, 8, formatInfo[10]) - set(dim-4, 8, formatInfo[11]) - set(dim-3, 8, formatInfo[12]) - set(dim-2, 8, formatInfo[13]) - set(dim-1, 8, formatInfo[14]) - } -} - -var versionInfoBitsByVersion = map[byte][]bool{ - 7: []bool{false, false, false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false}, - 8: []bool{false, false, true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false}, - 9: []bool{false, false, true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true}, - 10: []bool{false, false, true, false, true, false, false, true, false, false, true, true, false, true, false, false, true, true}, - 11: []bool{false, false, true, false, true, true, true, false, true, true, true, true, true, true, false, true, true, false}, - 12: []bool{false, false, true, true, false, false, false, true, true, true, false, true, true, false, false, false, true, false}, - 13: []bool{false, false, true, true, false, true, true, false, false, false, false, true, false, false, false, true, true, true}, - 14: []bool{false, false, true, true, true, false, false, true, true, false, false, false, false, false, true, true, false, true}, - 15: []bool{false, false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false, false}, - 16: []bool{false, true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false, false}, - 17: []bool{false, true, false, false, false, true, false, true, false, false, false, true, false, true, true, true, false, true}, - 18: []bool{false, true, false, false, true, false, true, false, true, false, false, false, false, true, false, true, true, true}, - 19: []bool{false, true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true, false}, - 20: []bool{false, true, false, true, false, false, true, false, false, true, true, false, true, false, false, true, true, false}, - 21: []bool{false, true, false, true, false, true, false, true, true, false, true, false, false, false, false, false, true, true}, - 22: []bool{false, true, false, true, true, false, true, false, false, false, true, true, false, false, true, false, false, true}, - 23: []bool{false, true, false, true, true, true, false, true, true, true, true, true, true, false, true, true, false, false}, - 24: []bool{false, true, true, false, false, false, true, true, true, false, true, true, false, false, false, true, false, false}, - 25: []bool{false, true, true, false, false, true, false, false, false, true, true, true, true, false, false, false, false, true}, - 26: []bool{false, true, true, false, true, false, true, true, true, true, true, false, true, false, true, false, true, true}, - 27: []bool{false, true, true, false, true, true, false, false, false, false, true, false, false, false, true, true, true, false}, - 28: []bool{false, true, true, true, false, false, true, true, false, false, false, false, false, true, true, false, true, false}, - 29: []bool{false, true, true, true, false, true, false, false, true, true, false, false, true, true, true, true, true, true}, - 30: []bool{false, true, true, true, true, false, true, true, false, true, false, true, true, true, false, true, false, true}, - 31: []bool{false, true, true, true, true, true, false, false, true, false, false, true, false, true, false, false, false, false}, - 32: []bool{true, false, false, false, false, false, true, false, false, true, true, true, false, true, false, true, false, true}, - 33: []bool{true, false, false, false, false, true, false, true, true, false, true, true, true, true, false, false, false, false}, - 34: []bool{true, false, false, false, true, false, true, false, false, false, true, false, true, true, true, false, true, false}, - 35: []bool{true, false, false, false, true, true, false, true, true, true, true, false, false, true, true, true, true, true}, - 36: []bool{true, false, false, true, false, false, true, false, true, true, false, false, false, false, true, false, true, true}, - 37: []bool{true, false, false, true, false, true, false, true, false, false, false, false, true, false, true, true, true, false}, - 38: []bool{true, false, false, true, true, false, true, false, true, false, false, true, true, false, false, true, false, false}, - 39: []bool{true, false, false, true, true, true, false, true, false, true, false, true, false, false, false, false, false, true}, - 40: []bool{true, false, true, false, false, false, true, true, false, false, false, true, true, false, true, false, false, true}, -} - -func drawVersionInfo(vi *versionInfo, set func(int, int, bool)) { - versionInfoBits, ok := versionInfoBitsByVersion[vi.Version] - - if ok && len(versionInfoBits) > 0 { - for i := 0; i < len(versionInfoBits); i++ { - x := (vi.modulWidth() - 11) + i%3 - y := i / 3 - set(x, y, versionInfoBits[len(versionInfoBits)-i-1]) - set(y, x, versionInfoBits[len(versionInfoBits)-i-1]) - } - } - -} - -func addPaddingAndTerminator(bl *utils.BitList, vi *versionInfo) { - for i := 0; i < 4 && bl.Len() < vi.totalDataBytes()*8; i++ { - bl.AddBit(false) - } - - for bl.Len()%8 != 0 { - bl.AddBit(false) - } - - for i := 0; bl.Len() < vi.totalDataBytes()*8; i++ { - if i%2 == 0 { - bl.AddByte(236) - } else { - bl.AddByte(17) - } - } -} diff --git a/vendor/github.com/boombuler/barcode/qr/errorcorrection.go b/vendor/github.com/boombuler/barcode/qr/errorcorrection.go deleted file mode 100644 index 08ebf0ce6..000000000 --- a/vendor/github.com/boombuler/barcode/qr/errorcorrection.go +++ /dev/null @@ -1,29 +0,0 @@ -package qr - -import ( - "github.com/boombuler/barcode/utils" -) - -type errorCorrection struct { - rs *utils.ReedSolomonEncoder -} - -var ec = newErrorCorrection() - -func newErrorCorrection() *errorCorrection { - fld := utils.NewGaloisField(285, 256, 0) - return &errorCorrection{utils.NewReedSolomonEncoder(fld)} -} - -func (ec *errorCorrection) calcECC(data []byte, eccCount byte) []byte { - dataInts := make([]int, len(data)) - for i := 0; i < len(data); i++ { - dataInts[i] = int(data[i]) - } - res := ec.rs.Encode(dataInts, int(eccCount)) - result := make([]byte, len(res)) - for i := 0; i < len(res); i++ { - result[i] = byte(res[i]) - } - return result -} diff --git a/vendor/github.com/boombuler/barcode/qr/numeric.go b/vendor/github.com/boombuler/barcode/qr/numeric.go deleted file mode 100644 index 49b44cc45..000000000 --- a/vendor/github.com/boombuler/barcode/qr/numeric.go +++ /dev/null @@ -1,56 +0,0 @@ -package qr - -import ( - "errors" - "fmt" - "strconv" - - "github.com/boombuler/barcode/utils" -) - -func encodeNumeric(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) { - contentBitCount := (len(content) / 3) * 10 - switch len(content) % 3 { - case 1: - contentBitCount += 4 - case 2: - contentBitCount += 7 - } - vi := findSmallestVersionInfo(ecl, numericMode, contentBitCount) - if vi == nil { - return nil, nil, errors.New("To much data to encode") - } - res := new(utils.BitList) - res.AddBits(int(numericMode), 4) - res.AddBits(len(content), vi.charCountBits(numericMode)) - - for pos := 0; pos < len(content); pos += 3 { - var curStr string - if pos+3 <= len(content) { - curStr = content[pos : pos+3] - } else { - curStr = content[pos:] - } - - i, err := strconv.Atoi(curStr) - if err != nil || i < 0 { - return nil, nil, fmt.Errorf("\"%s\" can not be encoded as %s", content, Numeric) - } - var bitCnt byte - switch len(curStr) % 3 { - case 0: - bitCnt = 10 - case 1: - bitCnt = 4 - break - case 2: - bitCnt = 7 - break - } - - res.AddBits(i, bitCnt) - } - - addPaddingAndTerminator(res, vi) - return res, vi, nil -} diff --git a/vendor/github.com/boombuler/barcode/qr/qrcode.go b/vendor/github.com/boombuler/barcode/qr/qrcode.go deleted file mode 100644 index 13607604b..000000000 --- a/vendor/github.com/boombuler/barcode/qr/qrcode.go +++ /dev/null @@ -1,166 +0,0 @@ -package qr - -import ( - "image" - "image/color" - "math" - - "github.com/boombuler/barcode" - "github.com/boombuler/barcode/utils" -) - -type qrcode struct { - dimension int - data *utils.BitList - content string -} - -func (qr *qrcode) Content() string { - return qr.content -} - -func (qr *qrcode) Metadata() barcode.Metadata { - return barcode.Metadata{barcode.TypeQR, 2} -} - -func (qr *qrcode) ColorModel() color.Model { - return color.Gray16Model -} - -func (qr *qrcode) Bounds() image.Rectangle { - return image.Rect(0, 0, qr.dimension, qr.dimension) -} - -func (qr *qrcode) At(x, y int) color.Color { - if qr.Get(x, y) { - return color.Black - } - return color.White -} - -func (qr *qrcode) Get(x, y int) bool { - return qr.data.GetBit(x*qr.dimension + y) -} - -func (qr *qrcode) Set(x, y int, val bool) { - qr.data.SetBit(x*qr.dimension+y, val) -} - -func (qr *qrcode) calcPenalty() uint { - return qr.calcPenaltyRule1() + qr.calcPenaltyRule2() + qr.calcPenaltyRule3() + qr.calcPenaltyRule4() -} - -func (qr *qrcode) calcPenaltyRule1() uint { - var result uint - for x := 0; x < qr.dimension; x++ { - checkForX := false - var cntX uint - checkForY := false - var cntY uint - - for y := 0; y < qr.dimension; y++ { - if qr.Get(x, y) == checkForX { - cntX++ - } else { - checkForX = !checkForX - if cntX >= 5 { - result += cntX - 2 - } - cntX = 1 - } - - if qr.Get(y, x) == checkForY { - cntY++ - } else { - checkForY = !checkForY - if cntY >= 5 { - result += cntY - 2 - } - cntY = 1 - } - } - - if cntX >= 5 { - result += cntX - 2 - } - if cntY >= 5 { - result += cntY - 2 - } - } - - return result -} - -func (qr *qrcode) calcPenaltyRule2() uint { - var result uint - for x := 0; x < qr.dimension-1; x++ { - for y := 0; y < qr.dimension-1; y++ { - check := qr.Get(x, y) - if qr.Get(x, y+1) == check && qr.Get(x+1, y) == check && qr.Get(x+1, y+1) == check { - result += 3 - } - } - } - return result -} - -func (qr *qrcode) calcPenaltyRule3() uint { - pattern1 := []bool{true, false, true, true, true, false, true, false, false, false, false} - pattern2 := []bool{false, false, false, false, true, false, true, true, true, false, true} - - var result uint - for x := 0; x <= qr.dimension-len(pattern1); x++ { - for y := 0; y < qr.dimension; y++ { - pattern1XFound := true - pattern2XFound := true - pattern1YFound := true - pattern2YFound := true - - for i := 0; i < len(pattern1); i++ { - iv := qr.Get(x+i, y) - if iv != pattern1[i] { - pattern1XFound = false - } - if iv != pattern2[i] { - pattern2XFound = false - } - iv = qr.Get(y, x+i) - if iv != pattern1[i] { - pattern1YFound = false - } - if iv != pattern2[i] { - pattern2YFound = false - } - } - if pattern1XFound || pattern2XFound { - result += 40 - } - if pattern1YFound || pattern2YFound { - result += 40 - } - } - } - - return result -} - -func (qr *qrcode) calcPenaltyRule4() uint { - totalNum := qr.data.Len() - trueCnt := 0 - for i := 0; i < totalNum; i++ { - if qr.data.GetBit(i) { - trueCnt++ - } - } - percDark := float64(trueCnt) * 100 / float64(totalNum) - floor := math.Abs(math.Floor(percDark/5) - 10) - ceil := math.Abs(math.Ceil(percDark/5) - 10) - return uint(math.Min(floor, ceil) * 10) -} - -func newBarcode(dim int) *qrcode { - res := new(qrcode) - res.dimension = dim - res.data = utils.NewBitList(dim * dim) - return res -} diff --git a/vendor/github.com/boombuler/barcode/qr/unicode.go b/vendor/github.com/boombuler/barcode/qr/unicode.go deleted file mode 100644 index a9135ab6d..000000000 --- a/vendor/github.com/boombuler/barcode/qr/unicode.go +++ /dev/null @@ -1,27 +0,0 @@ -package qr - -import ( - "errors" - - "github.com/boombuler/barcode/utils" -) - -func encodeUnicode(content string, ecl ErrorCorrectionLevel) (*utils.BitList, *versionInfo, error) { - data := []byte(content) - - vi := findSmallestVersionInfo(ecl, byteMode, len(data)*8) - if vi == nil { - return nil, nil, errors.New("To much data to encode") - } - - // It's not correct to add the unicode bytes to the result directly but most readers can't handle the - // required ECI header... - res := new(utils.BitList) - res.AddBits(int(byteMode), 4) - res.AddBits(len(content), vi.charCountBits(byteMode)) - for _, b := range data { - res.AddByte(b) - } - addPaddingAndTerminator(res, vi) - return res, vi, nil -} diff --git a/vendor/github.com/boombuler/barcode/qr/versioninfo.go b/vendor/github.com/boombuler/barcode/qr/versioninfo.go deleted file mode 100644 index 6852a5766..000000000 --- a/vendor/github.com/boombuler/barcode/qr/versioninfo.go +++ /dev/null @@ -1,310 +0,0 @@ -package qr - -import "math" - -// ErrorCorrectionLevel indicates the amount of "backup data" stored in the QR code -type ErrorCorrectionLevel byte - -const ( - // L recovers 7% of data - L ErrorCorrectionLevel = iota - // M recovers 15% of data - M - // Q recovers 25% of data - Q - // H recovers 30% of data - H -) - -func (ecl ErrorCorrectionLevel) String() string { - switch ecl { - case L: - return "L" - case M: - return "M" - case Q: - return "Q" - case H: - return "H" - } - return "unknown" -} - -type encodingMode byte - -const ( - numericMode encodingMode = 1 - alphaNumericMode encodingMode = 2 - byteMode encodingMode = 4 - kanjiMode encodingMode = 8 -) - -type versionInfo struct { - Version byte - Level ErrorCorrectionLevel - ErrorCorrectionCodewordsPerBlock byte - NumberOfBlocksInGroup1 byte - DataCodeWordsPerBlockInGroup1 byte - NumberOfBlocksInGroup2 byte - DataCodeWordsPerBlockInGroup2 byte -} - -var versionInfos = []*versionInfo{ - &versionInfo{1, L, 7, 1, 19, 0, 0}, - &versionInfo{1, M, 10, 1, 16, 0, 0}, - &versionInfo{1, Q, 13, 1, 13, 0, 0}, - &versionInfo{1, H, 17, 1, 9, 0, 0}, - &versionInfo{2, L, 10, 1, 34, 0, 0}, - &versionInfo{2, M, 16, 1, 28, 0, 0}, - &versionInfo{2, Q, 22, 1, 22, 0, 0}, - &versionInfo{2, H, 28, 1, 16, 0, 0}, - &versionInfo{3, L, 15, 1, 55, 0, 0}, - &versionInfo{3, M, 26, 1, 44, 0, 0}, - &versionInfo{3, Q, 18, 2, 17, 0, 0}, - &versionInfo{3, H, 22, 2, 13, 0, 0}, - &versionInfo{4, L, 20, 1, 80, 0, 0}, - &versionInfo{4, M, 18, 2, 32, 0, 0}, - &versionInfo{4, Q, 26, 2, 24, 0, 0}, - &versionInfo{4, H, 16, 4, 9, 0, 0}, - &versionInfo{5, L, 26, 1, 108, 0, 0}, - &versionInfo{5, M, 24, 2, 43, 0, 0}, - &versionInfo{5, Q, 18, 2, 15, 2, 16}, - &versionInfo{5, H, 22, 2, 11, 2, 12}, - &versionInfo{6, L, 18, 2, 68, 0, 0}, - &versionInfo{6, M, 16, 4, 27, 0, 0}, - &versionInfo{6, Q, 24, 4, 19, 0, 0}, - &versionInfo{6, H, 28, 4, 15, 0, 0}, - &versionInfo{7, L, 20, 2, 78, 0, 0}, - &versionInfo{7, M, 18, 4, 31, 0, 0}, - &versionInfo{7, Q, 18, 2, 14, 4, 15}, - &versionInfo{7, H, 26, 4, 13, 1, 14}, - &versionInfo{8, L, 24, 2, 97, 0, 0}, - &versionInfo{8, M, 22, 2, 38, 2, 39}, - &versionInfo{8, Q, 22, 4, 18, 2, 19}, - &versionInfo{8, H, 26, 4, 14, 2, 15}, - &versionInfo{9, L, 30, 2, 116, 0, 0}, - &versionInfo{9, M, 22, 3, 36, 2, 37}, - &versionInfo{9, Q, 20, 4, 16, 4, 17}, - &versionInfo{9, H, 24, 4, 12, 4, 13}, - &versionInfo{10, L, 18, 2, 68, 2, 69}, - &versionInfo{10, M, 26, 4, 43, 1, 44}, - &versionInfo{10, Q, 24, 6, 19, 2, 20}, - &versionInfo{10, H, 28, 6, 15, 2, 16}, - &versionInfo{11, L, 20, 4, 81, 0, 0}, - &versionInfo{11, M, 30, 1, 50, 4, 51}, - &versionInfo{11, Q, 28, 4, 22, 4, 23}, - &versionInfo{11, H, 24, 3, 12, 8, 13}, - &versionInfo{12, L, 24, 2, 92, 2, 93}, - &versionInfo{12, M, 22, 6, 36, 2, 37}, - &versionInfo{12, Q, 26, 4, 20, 6, 21}, - &versionInfo{12, H, 28, 7, 14, 4, 15}, - &versionInfo{13, L, 26, 4, 107, 0, 0}, - &versionInfo{13, M, 22, 8, 37, 1, 38}, - &versionInfo{13, Q, 24, 8, 20, 4, 21}, - &versionInfo{13, H, 22, 12, 11, 4, 12}, - &versionInfo{14, L, 30, 3, 115, 1, 116}, - &versionInfo{14, M, 24, 4, 40, 5, 41}, - &versionInfo{14, Q, 20, 11, 16, 5, 17}, - &versionInfo{14, H, 24, 11, 12, 5, 13}, - &versionInfo{15, L, 22, 5, 87, 1, 88}, - &versionInfo{15, M, 24, 5, 41, 5, 42}, - &versionInfo{15, Q, 30, 5, 24, 7, 25}, - &versionInfo{15, H, 24, 11, 12, 7, 13}, - &versionInfo{16, L, 24, 5, 98, 1, 99}, - &versionInfo{16, M, 28, 7, 45, 3, 46}, - &versionInfo{16, Q, 24, 15, 19, 2, 20}, - &versionInfo{16, H, 30, 3, 15, 13, 16}, - &versionInfo{17, L, 28, 1, 107, 5, 108}, - &versionInfo{17, M, 28, 10, 46, 1, 47}, - &versionInfo{17, Q, 28, 1, 22, 15, 23}, - &versionInfo{17, H, 28, 2, 14, 17, 15}, - &versionInfo{18, L, 30, 5, 120, 1, 121}, - &versionInfo{18, M, 26, 9, 43, 4, 44}, - &versionInfo{18, Q, 28, 17, 22, 1, 23}, - &versionInfo{18, H, 28, 2, 14, 19, 15}, - &versionInfo{19, L, 28, 3, 113, 4, 114}, - &versionInfo{19, M, 26, 3, 44, 11, 45}, - &versionInfo{19, Q, 26, 17, 21, 4, 22}, - &versionInfo{19, H, 26, 9, 13, 16, 14}, - &versionInfo{20, L, 28, 3, 107, 5, 108}, - &versionInfo{20, M, 26, 3, 41, 13, 42}, - &versionInfo{20, Q, 30, 15, 24, 5, 25}, - &versionInfo{20, H, 28, 15, 15, 10, 16}, - &versionInfo{21, L, 28, 4, 116, 4, 117}, - &versionInfo{21, M, 26, 17, 42, 0, 0}, - &versionInfo{21, Q, 28, 17, 22, 6, 23}, - &versionInfo{21, H, 30, 19, 16, 6, 17}, - &versionInfo{22, L, 28, 2, 111, 7, 112}, - &versionInfo{22, M, 28, 17, 46, 0, 0}, - &versionInfo{22, Q, 30, 7, 24, 16, 25}, - &versionInfo{22, H, 24, 34, 13, 0, 0}, - &versionInfo{23, L, 30, 4, 121, 5, 122}, - &versionInfo{23, M, 28, 4, 47, 14, 48}, - &versionInfo{23, Q, 30, 11, 24, 14, 25}, - &versionInfo{23, H, 30, 16, 15, 14, 16}, - &versionInfo{24, L, 30, 6, 117, 4, 118}, - &versionInfo{24, M, 28, 6, 45, 14, 46}, - &versionInfo{24, Q, 30, 11, 24, 16, 25}, - &versionInfo{24, H, 30, 30, 16, 2, 17}, - &versionInfo{25, L, 26, 8, 106, 4, 107}, - &versionInfo{25, M, 28, 8, 47, 13, 48}, - &versionInfo{25, Q, 30, 7, 24, 22, 25}, - &versionInfo{25, H, 30, 22, 15, 13, 16}, - &versionInfo{26, L, 28, 10, 114, 2, 115}, - &versionInfo{26, M, 28, 19, 46, 4, 47}, - &versionInfo{26, Q, 28, 28, 22, 6, 23}, - &versionInfo{26, H, 30, 33, 16, 4, 17}, - &versionInfo{27, L, 30, 8, 122, 4, 123}, - &versionInfo{27, M, 28, 22, 45, 3, 46}, - &versionInfo{27, Q, 30, 8, 23, 26, 24}, - &versionInfo{27, H, 30, 12, 15, 28, 16}, - &versionInfo{28, L, 30, 3, 117, 10, 118}, - &versionInfo{28, M, 28, 3, 45, 23, 46}, - &versionInfo{28, Q, 30, 4, 24, 31, 25}, - &versionInfo{28, H, 30, 11, 15, 31, 16}, - &versionInfo{29, L, 30, 7, 116, 7, 117}, - &versionInfo{29, M, 28, 21, 45, 7, 46}, - &versionInfo{29, Q, 30, 1, 23, 37, 24}, - &versionInfo{29, H, 30, 19, 15, 26, 16}, - &versionInfo{30, L, 30, 5, 115, 10, 116}, - &versionInfo{30, M, 28, 19, 47, 10, 48}, - &versionInfo{30, Q, 30, 15, 24, 25, 25}, - &versionInfo{30, H, 30, 23, 15, 25, 16}, - &versionInfo{31, L, 30, 13, 115, 3, 116}, - &versionInfo{31, M, 28, 2, 46, 29, 47}, - &versionInfo{31, Q, 30, 42, 24, 1, 25}, - &versionInfo{31, H, 30, 23, 15, 28, 16}, - &versionInfo{32, L, 30, 17, 115, 0, 0}, - &versionInfo{32, M, 28, 10, 46, 23, 47}, - &versionInfo{32, Q, 30, 10, 24, 35, 25}, - &versionInfo{32, H, 30, 19, 15, 35, 16}, - &versionInfo{33, L, 30, 17, 115, 1, 116}, - &versionInfo{33, M, 28, 14, 46, 21, 47}, - &versionInfo{33, Q, 30, 29, 24, 19, 25}, - &versionInfo{33, H, 30, 11, 15, 46, 16}, - &versionInfo{34, L, 30, 13, 115, 6, 116}, - &versionInfo{34, M, 28, 14, 46, 23, 47}, - &versionInfo{34, Q, 30, 44, 24, 7, 25}, - &versionInfo{34, H, 30, 59, 16, 1, 17}, - &versionInfo{35, L, 30, 12, 121, 7, 122}, - &versionInfo{35, M, 28, 12, 47, 26, 48}, - &versionInfo{35, Q, 30, 39, 24, 14, 25}, - &versionInfo{35, H, 30, 22, 15, 41, 16}, - &versionInfo{36, L, 30, 6, 121, 14, 122}, - &versionInfo{36, M, 28, 6, 47, 34, 48}, - &versionInfo{36, Q, 30, 46, 24, 10, 25}, - &versionInfo{36, H, 30, 2, 15, 64, 16}, - &versionInfo{37, L, 30, 17, 122, 4, 123}, - &versionInfo{37, M, 28, 29, 46, 14, 47}, - &versionInfo{37, Q, 30, 49, 24, 10, 25}, - &versionInfo{37, H, 30, 24, 15, 46, 16}, - &versionInfo{38, L, 30, 4, 122, 18, 123}, - &versionInfo{38, M, 28, 13, 46, 32, 47}, - &versionInfo{38, Q, 30, 48, 24, 14, 25}, - &versionInfo{38, H, 30, 42, 15, 32, 16}, - &versionInfo{39, L, 30, 20, 117, 4, 118}, - &versionInfo{39, M, 28, 40, 47, 7, 48}, - &versionInfo{39, Q, 30, 43, 24, 22, 25}, - &versionInfo{39, H, 30, 10, 15, 67, 16}, - &versionInfo{40, L, 30, 19, 118, 6, 119}, - &versionInfo{40, M, 28, 18, 47, 31, 48}, - &versionInfo{40, Q, 30, 34, 24, 34, 25}, - &versionInfo{40, H, 30, 20, 15, 61, 16}, -} - -func (vi *versionInfo) totalDataBytes() int { - g1Data := int(vi.NumberOfBlocksInGroup1) * int(vi.DataCodeWordsPerBlockInGroup1) - g2Data := int(vi.NumberOfBlocksInGroup2) * int(vi.DataCodeWordsPerBlockInGroup2) - return (g1Data + g2Data) -} - -func (vi *versionInfo) charCountBits(m encodingMode) byte { - switch m { - case numericMode: - if vi.Version < 10 { - return 10 - } else if vi.Version < 27 { - return 12 - } - return 14 - - case alphaNumericMode: - if vi.Version < 10 { - return 9 - } else if vi.Version < 27 { - return 11 - } - return 13 - - case byteMode: - if vi.Version < 10 { - return 8 - } - return 16 - - case kanjiMode: - if vi.Version < 10 { - return 8 - } else if vi.Version < 27 { - return 10 - } - return 12 - default: - return 0 - } -} - -func (vi *versionInfo) modulWidth() int { - return ((int(vi.Version) - 1) * 4) + 21 -} - -func (vi *versionInfo) alignmentPatternPlacements() []int { - if vi.Version == 1 { - return make([]int, 0) - } - - first := 6 - last := vi.modulWidth() - 7 - space := float64(last - first) - count := int(math.Ceil(space/28)) + 1 - - result := make([]int, count) - result[0] = first - result[len(result)-1] = last - if count > 2 { - step := int(math.Ceil(float64(last-first) / float64(count-1))) - if step%2 == 1 { - frac := float64(last-first) / float64(count-1) - _, x := math.Modf(frac) - if x >= 0.5 { - frac = math.Ceil(frac) - } else { - frac = math.Floor(frac) - } - - if int(frac)%2 == 0 { - step-- - } else { - step++ - } - } - - for i := 1; i <= count-2; i++ { - result[i] = last - (step * (count - 1 - i)) - } - } - - return result -} - -func findSmallestVersionInfo(ecl ErrorCorrectionLevel, mode encodingMode, dataBits int) *versionInfo { - dataBits = dataBits + 4 // mode indicator - for _, vi := range versionInfos { - if vi.Level == ecl { - if (vi.totalDataBytes() * 8) >= (dataBits + int(vi.charCountBits(mode))) { - return vi - } - } - } - return nil -} diff --git a/vendor/github.com/boombuler/barcode/scaledbarcode.go b/vendor/github.com/boombuler/barcode/scaledbarcode.go deleted file mode 100644 index 152b18017..000000000 --- a/vendor/github.com/boombuler/barcode/scaledbarcode.go +++ /dev/null @@ -1,134 +0,0 @@ -package barcode - -import ( - "errors" - "fmt" - "image" - "image/color" - "math" -) - -type wrapFunc func(x, y int) color.Color - -type scaledBarcode struct { - wrapped Barcode - wrapperFunc wrapFunc - rect image.Rectangle -} - -type intCSscaledBC struct { - scaledBarcode -} - -func (bc *scaledBarcode) Content() string { - return bc.wrapped.Content() -} - -func (bc *scaledBarcode) Metadata() Metadata { - return bc.wrapped.Metadata() -} - -func (bc *scaledBarcode) ColorModel() color.Model { - return bc.wrapped.ColorModel() -} - -func (bc *scaledBarcode) Bounds() image.Rectangle { - return bc.rect -} - -func (bc *scaledBarcode) At(x, y int) color.Color { - return bc.wrapperFunc(x, y) -} - -func (bc *intCSscaledBC) CheckSum() int { - if cs, ok := bc.wrapped.(BarcodeIntCS); ok { - return cs.CheckSum() - } - return 0 -} - -// Scale returns a resized barcode with the given width and height. -func Scale(bc Barcode, width, height int) (Barcode, error) { - switch bc.Metadata().Dimensions { - case 1: - return scale1DCode(bc, width, height) - case 2: - return scale2DCode(bc, width, height) - } - - return nil, errors.New("unsupported barcode format") -} - -func newScaledBC(wrapped Barcode, wrapperFunc wrapFunc, rect image.Rectangle) Barcode { - result := &scaledBarcode{ - wrapped: wrapped, - wrapperFunc: wrapperFunc, - rect: rect, - } - - if _, ok := wrapped.(BarcodeIntCS); ok { - return &intCSscaledBC{*result} - } - return result -} - -func scale2DCode(bc Barcode, width, height int) (Barcode, error) { - orgBounds := bc.Bounds() - orgWidth := orgBounds.Max.X - orgBounds.Min.X - orgHeight := orgBounds.Max.Y - orgBounds.Min.Y - - factor := int(math.Min(float64(width)/float64(orgWidth), float64(height)/float64(orgHeight))) - if factor <= 0 { - return nil, fmt.Errorf("can not scale barcode to an image smaller than %dx%d", orgWidth, orgHeight) - } - - offsetX := (width - (orgWidth * factor)) / 2 - offsetY := (height - (orgHeight * factor)) / 2 - - wrap := func(x, y int) color.Color { - if x < offsetX || y < offsetY { - return color.White - } - x = (x - offsetX) / factor - y = (y - offsetY) / factor - if x >= orgWidth || y >= orgHeight { - return color.White - } - return bc.At(x, y) - } - - return newScaledBC( - bc, - wrap, - image.Rect(0, 0, width, height), - ), nil -} - -func scale1DCode(bc Barcode, width, height int) (Barcode, error) { - orgBounds := bc.Bounds() - orgWidth := orgBounds.Max.X - orgBounds.Min.X - factor := int(float64(width) / float64(orgWidth)) - - if factor <= 0 { - return nil, fmt.Errorf("can not scale barcode to an image smaller than %dx1", orgWidth) - } - offsetX := (width - (orgWidth * factor)) / 2 - - wrap := func(x, y int) color.Color { - if x < offsetX { - return color.White - } - x = (x - offsetX) / factor - - if x >= orgWidth { - return color.White - } - return bc.At(x, 0) - } - - return newScaledBC( - bc, - wrap, - image.Rect(0, 0, width, height), - ), nil -} diff --git a/vendor/github.com/boombuler/barcode/utils/base1dcode.go b/vendor/github.com/boombuler/barcode/utils/base1dcode.go deleted file mode 100644 index a335c0c74..000000000 --- a/vendor/github.com/boombuler/barcode/utils/base1dcode.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package utils contain some utilities which are needed to create barcodes -package utils - -import ( - "image" - "image/color" - - "github.com/boombuler/barcode" -) - -type base1DCode struct { - *BitList - kind string - content string -} - -type base1DCodeIntCS struct { - base1DCode - checksum int -} - -func (c *base1DCode) Content() string { - return c.content -} - -func (c *base1DCode) Metadata() barcode.Metadata { - return barcode.Metadata{c.kind, 1} -} - -func (c *base1DCode) ColorModel() color.Model { - return color.Gray16Model -} - -func (c *base1DCode) Bounds() image.Rectangle { - return image.Rect(0, 0, c.Len(), 1) -} - -func (c *base1DCode) At(x, y int) color.Color { - if c.GetBit(x) { - return color.Black - } - return color.White -} - -func (c *base1DCodeIntCS) CheckSum() int { - return c.checksum -} - -// New1DCodeIntCheckSum creates a new 1D barcode where the bars are represented by the bits in the bars BitList -func New1DCodeIntCheckSum(codeKind, content string, bars *BitList, checksum int) barcode.BarcodeIntCS { - return &base1DCodeIntCS{base1DCode{bars, codeKind, content}, checksum} -} - -// New1DCode creates a new 1D barcode where the bars are represented by the bits in the bars BitList -func New1DCode(codeKind, content string, bars *BitList) barcode.Barcode { - return &base1DCode{bars, codeKind, content} -} diff --git a/vendor/github.com/boombuler/barcode/utils/bitlist.go b/vendor/github.com/boombuler/barcode/utils/bitlist.go deleted file mode 100644 index bb05e53b5..000000000 --- a/vendor/github.com/boombuler/barcode/utils/bitlist.go +++ /dev/null @@ -1,119 +0,0 @@ -package utils - -// BitList is a list that contains bits -type BitList struct { - count int - data []int32 -} - -// NewBitList returns a new BitList with the given length -// all bits are initialize with false -func NewBitList(capacity int) *BitList { - bl := new(BitList) - bl.count = capacity - x := 0 - if capacity%32 != 0 { - x = 1 - } - bl.data = make([]int32, capacity/32+x) - return bl -} - -// Len returns the number of contained bits -func (bl *BitList) Len() int { - return bl.count -} - -func (bl *BitList) grow() { - growBy := len(bl.data) - if growBy < 128 { - growBy = 128 - } else if growBy >= 1024 { - growBy = 1024 - } - - nd := make([]int32, len(bl.data)+growBy) - copy(nd, bl.data) - bl.data = nd -} - -// AddBit appends the given bits to the end of the list -func (bl *BitList) AddBit(bits ...bool) { - for _, bit := range bits { - itmIndex := bl.count / 32 - for itmIndex >= len(bl.data) { - bl.grow() - } - bl.SetBit(bl.count, bit) - bl.count++ - } -} - -// SetBit sets the bit at the given index to the given value -func (bl *BitList) SetBit(index int, value bool) { - itmIndex := index / 32 - itmBitShift := 31 - (index % 32) - if value { - bl.data[itmIndex] = bl.data[itmIndex] | 1<> uint(itmBitShift)) & 1) == 1 -} - -// AddByte appends all 8 bits of the given byte to the end of the list -func (bl *BitList) AddByte(b byte) { - for i := 7; i >= 0; i-- { - bl.AddBit(((b >> uint(i)) & 1) == 1) - } -} - -// AddBits appends the last (LSB) 'count' bits of 'b' the the end of the list -func (bl *BitList) AddBits(b int, count byte) { - for i := int(count) - 1; i >= 0; i-- { - bl.AddBit(((b >> uint(i)) & 1) == 1) - } -} - -// GetBytes returns all bits of the BitList as a []byte -func (bl *BitList) GetBytes() []byte { - len := bl.count >> 3 - if (bl.count % 8) != 0 { - len++ - } - result := make([]byte, len) - for i := 0; i < len; i++ { - shift := (3 - (i % 4)) * 8 - result[i] = (byte)((bl.data[i/4] >> uint(shift)) & 0xFF) - } - return result -} - -// IterateBytes iterates through all bytes contained in the BitList -func (bl *BitList) IterateBytes() <-chan byte { - res := make(chan byte) - - go func() { - c := bl.count - shift := 24 - i := 0 - for c > 0 { - res <- byte((bl.data[i] >> uint(shift)) & 0xFF) - shift -= 8 - if shift < 0 { - shift = 24 - i++ - } - c -= 8 - } - close(res) - }() - - return res -} diff --git a/vendor/github.com/boombuler/barcode/utils/galoisfield.go b/vendor/github.com/boombuler/barcode/utils/galoisfield.go deleted file mode 100644 index 68726fbfd..000000000 --- a/vendor/github.com/boombuler/barcode/utils/galoisfield.go +++ /dev/null @@ -1,65 +0,0 @@ -package utils - -// GaloisField encapsulates galois field arithmetics -type GaloisField struct { - Size int - Base int - ALogTbl []int - LogTbl []int -} - -// NewGaloisField creates a new galois field -func NewGaloisField(pp, fieldSize, b int) *GaloisField { - result := new(GaloisField) - - result.Size = fieldSize - result.Base = b - result.ALogTbl = make([]int, fieldSize) - result.LogTbl = make([]int, fieldSize) - - x := 1 - for i := 0; i < fieldSize; i++ { - result.ALogTbl[i] = x - x = x * 2 - if x >= fieldSize { - x = (x ^ pp) & (fieldSize - 1) - } - } - - for i := 0; i < fieldSize; i++ { - result.LogTbl[result.ALogTbl[i]] = int(i) - } - - return result -} - -func (gf *GaloisField) Zero() *GFPoly { - return NewGFPoly(gf, []int{0}) -} - -// AddOrSub add or substract two numbers -func (gf *GaloisField) AddOrSub(a, b int) int { - return a ^ b -} - -// Multiply multiplys two numbers -func (gf *GaloisField) Multiply(a, b int) int { - if a == 0 || b == 0 { - return 0 - } - return gf.ALogTbl[(gf.LogTbl[a]+gf.LogTbl[b])%(gf.Size-1)] -} - -// Divide divides two numbers -func (gf *GaloisField) Divide(a, b int) int { - if b == 0 { - panic("divide by zero") - } else if a == 0 { - return 0 - } - return gf.ALogTbl[(gf.LogTbl[a]-gf.LogTbl[b])%(gf.Size-1)] -} - -func (gf *GaloisField) Invers(num int) int { - return gf.ALogTbl[(gf.Size-1)-gf.LogTbl[num]] -} diff --git a/vendor/github.com/boombuler/barcode/utils/gfpoly.go b/vendor/github.com/boombuler/barcode/utils/gfpoly.go deleted file mode 100644 index c56bb40b9..000000000 --- a/vendor/github.com/boombuler/barcode/utils/gfpoly.go +++ /dev/null @@ -1,103 +0,0 @@ -package utils - -type GFPoly struct { - gf *GaloisField - Coefficients []int -} - -func (gp *GFPoly) Degree() int { - return len(gp.Coefficients) - 1 -} - -func (gp *GFPoly) Zero() bool { - return gp.Coefficients[0] == 0 -} - -// GetCoefficient returns the coefficient of x ^ degree -func (gp *GFPoly) GetCoefficient(degree int) int { - return gp.Coefficients[gp.Degree()-degree] -} - -func (gp *GFPoly) AddOrSubstract(other *GFPoly) *GFPoly { - if gp.Zero() { - return other - } else if other.Zero() { - return gp - } - smallCoeff := gp.Coefficients - largeCoeff := other.Coefficients - if len(smallCoeff) > len(largeCoeff) { - largeCoeff, smallCoeff = smallCoeff, largeCoeff - } - sumDiff := make([]int, len(largeCoeff)) - lenDiff := len(largeCoeff) - len(smallCoeff) - copy(sumDiff, largeCoeff[:lenDiff]) - for i := lenDiff; i < len(largeCoeff); i++ { - sumDiff[i] = int(gp.gf.AddOrSub(int(smallCoeff[i-lenDiff]), int(largeCoeff[i]))) - } - return NewGFPoly(gp.gf, sumDiff) -} - -func (gp *GFPoly) MultByMonominal(degree int, coeff int) *GFPoly { - if coeff == 0 { - return gp.gf.Zero() - } - size := len(gp.Coefficients) - result := make([]int, size+degree) - for i := 0; i < size; i++ { - result[i] = int(gp.gf.Multiply(int(gp.Coefficients[i]), int(coeff))) - } - return NewGFPoly(gp.gf, result) -} - -func (gp *GFPoly) Multiply(other *GFPoly) *GFPoly { - if gp.Zero() || other.Zero() { - return gp.gf.Zero() - } - aCoeff := gp.Coefficients - aLen := len(aCoeff) - bCoeff := other.Coefficients - bLen := len(bCoeff) - product := make([]int, aLen+bLen-1) - for i := 0; i < aLen; i++ { - ac := int(aCoeff[i]) - for j := 0; j < bLen; j++ { - bc := int(bCoeff[j]) - product[i+j] = int(gp.gf.AddOrSub(int(product[i+j]), gp.gf.Multiply(ac, bc))) - } - } - return NewGFPoly(gp.gf, product) -} - -func (gp *GFPoly) Divide(other *GFPoly) (quotient *GFPoly, remainder *GFPoly) { - quotient = gp.gf.Zero() - remainder = gp - fld := gp.gf - denomLeadTerm := other.GetCoefficient(other.Degree()) - inversDenomLeadTerm := fld.Invers(int(denomLeadTerm)) - for remainder.Degree() >= other.Degree() && !remainder.Zero() { - degreeDiff := remainder.Degree() - other.Degree() - scale := int(fld.Multiply(int(remainder.GetCoefficient(remainder.Degree())), inversDenomLeadTerm)) - term := other.MultByMonominal(degreeDiff, scale) - itQuot := NewMonominalPoly(fld, degreeDiff, scale) - quotient = quotient.AddOrSubstract(itQuot) - remainder = remainder.AddOrSubstract(term) - } - return -} - -func NewMonominalPoly(field *GaloisField, degree int, coeff int) *GFPoly { - if coeff == 0 { - return field.Zero() - } - result := make([]int, degree+1) - result[0] = coeff - return NewGFPoly(field, result) -} - -func NewGFPoly(field *GaloisField, coefficients []int) *GFPoly { - for len(coefficients) > 1 && coefficients[0] == 0 { - coefficients = coefficients[1:] - } - return &GFPoly{field, coefficients} -} diff --git a/vendor/github.com/boombuler/barcode/utils/reedsolomon.go b/vendor/github.com/boombuler/barcode/utils/reedsolomon.go deleted file mode 100644 index 53af91ad4..000000000 --- a/vendor/github.com/boombuler/barcode/utils/reedsolomon.go +++ /dev/null @@ -1,44 +0,0 @@ -package utils - -import ( - "sync" -) - -type ReedSolomonEncoder struct { - gf *GaloisField - polynomes []*GFPoly - m *sync.Mutex -} - -func NewReedSolomonEncoder(gf *GaloisField) *ReedSolomonEncoder { - return &ReedSolomonEncoder{ - gf, []*GFPoly{NewGFPoly(gf, []int{1})}, new(sync.Mutex), - } -} - -func (rs *ReedSolomonEncoder) getPolynomial(degree int) *GFPoly { - rs.m.Lock() - defer rs.m.Unlock() - - if degree >= len(rs.polynomes) { - last := rs.polynomes[len(rs.polynomes)-1] - for d := len(rs.polynomes); d <= degree; d++ { - next := last.Multiply(NewGFPoly(rs.gf, []int{1, rs.gf.ALogTbl[d-1+rs.gf.Base]})) - rs.polynomes = append(rs.polynomes, next) - last = next - } - } - return rs.polynomes[degree] -} - -func (rs *ReedSolomonEncoder) Encode(data []int, eccCount int) []int { - generator := rs.getPolynomial(eccCount) - info := NewGFPoly(rs.gf, data) - info = info.MultByMonominal(eccCount, 1) - _, remainder := info.Divide(generator) - - result := make([]int, eccCount) - numZero := int(eccCount) - len(remainder.Coefficients) - copy(result[numZero:], remainder.Coefficients) - return result -} diff --git a/vendor/github.com/boombuler/barcode/utils/runeint.go b/vendor/github.com/boombuler/barcode/utils/runeint.go deleted file mode 100644 index d2e5e61e5..000000000 --- a/vendor/github.com/boombuler/barcode/utils/runeint.go +++ /dev/null @@ -1,19 +0,0 @@ -package utils - -// RuneToInt converts a rune between '0' and '9' to an integer between 0 and 9 -// If the rune is outside of this range -1 is returned. -func RuneToInt(r rune) int { - if r >= '0' && r <= '9' { - return int(r - '0') - } - return -1 -} - -// IntToRune converts a digit 0 - 9 to the rune '0' - '9'. If the given int is outside -// of this range 'F' is returned! -func IntToRune(i int) rune { - if i >= 0 && i <= 9 { - return rune(i + '0') - } - return 'F' -} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go index c1313b171..514a9a5c4 100644 --- a/vendor/github.com/gobwas/glob/match/any.go +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -2,7 +2,6 @@ package match import ( "fmt" - "github.com/gobwas/glob/util/strings" ) diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go index 4b26d2086..7fd763ecd 100644 --- a/vendor/github.com/gobwas/glob/match/list.go +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -2,9 +2,8 @@ package match import ( "fmt" - "unicode/utf8" - "github.com/gobwas/glob/util/runes" + "unicode/utf8" ) type List struct { diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go index bf2c3eacf..ee6e3954c 100644 --- a/vendor/github.com/gobwas/glob/match/single.go +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -2,9 +2,8 @@ package match import ( "fmt" - "unicode/utf8" - "github.com/gobwas/glob/util/runes" + "unicode/utf8" ) // single represents ? diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go index d988e56e3..429b40943 100644 --- a/vendor/github.com/gobwas/glob/syntax/ast/parser.go +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -3,9 +3,8 @@ package ast import ( "errors" "fmt" - "unicode/utf8" - "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" ) type Lexer interface { diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go index 086679ada..a1c8d1962 100644 --- a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -3,9 +3,8 @@ package lexer import ( "bytes" "fmt" - "unicode/utf8" - "github.com/gobwas/glob/util/runes" + "unicode/utf8" ) const ( diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 000000000..eac1c7664 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go b/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go new file mode 100644 index 000000000..fd2f51d89 --- /dev/null +++ b/vendor/github.com/golang/protobuf/internal/gengogrpc/grpc.go @@ -0,0 +1,398 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gengogrpc contains the gRPC code generator. +package gengogrpc + +import ( + "fmt" + "strconv" + "strings" + + "google.golang.org/protobuf/compiler/protogen" + + "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + contextPackage = protogen.GoImportPath("context") + grpcPackage = protogen.GoImportPath("google.golang.org/grpc") + codesPackage = protogen.GoImportPath("google.golang.org/grpc/codes") + statusPackage = protogen.GoImportPath("google.golang.org/grpc/status") +) + +// GenerateFile generates a _grpc.pb.go file containing gRPC service definitions. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + if len(file.Services) == 0 { + return nil + } + filename := file.GeneratedFilenamePrefix + "_grpc.pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + g.P("// Code generated by protoc-gen-go-grpc. DO NOT EDIT.") + g.P() + g.P("package ", file.GoPackageName) + g.P() + GenerateFileContent(gen, file, g) + return g +} + +// GenerateFileContent generates the gRPC service definitions, excluding the package statement. +func GenerateFileContent(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile) { + if len(file.Services) == 0 { + return + } + + // TODO: Remove this. We don't need to include these references any more. + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ ", contextPackage.Ident("Context")) + g.P("var _ ", grpcPackage.Ident("ClientConnInterface")) + g.P() + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the grpc package it is being compiled against.") + g.P("const _ = ", grpcPackage.Ident("SupportPackageIsVersion6")) + g.P() + for _, service := range file.Services { + genService(gen, file, g, service) + } +} + +func genService(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, service *protogen.Service) { + clientName := service.GoName + "Client" + + g.P("// ", clientName, " is the client API for ", service.GoName, " service.") + g.P("//") + g.P("// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.") + + // Client interface. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(clientName, service.Location) + g.P("type ", clientName, " interface {") + for _, method := range service.Methods { + g.Annotate(clientName+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + clientSignature(g, method)) + } + g.P("}") + g.P() + + // Client structure. + g.P("type ", unexport(clientName), " struct {") + g.P("cc ", grpcPackage.Ident("ClientConnInterface")) + g.P("}") + g.P() + + // NewClient factory. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func New", clientName, " (cc ", grpcPackage.Ident("ClientConnInterface"), ") ", clientName, " {") + g.P("return &", unexport(clientName), "{cc}") + g.P("}") + g.P() + + var methodIndex, streamIndex int + // Client method implementations. + for _, method := range service.Methods { + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + // Unary RPC method + genClientMethod(gen, file, g, method, methodIndex) + methodIndex++ + } else { + // Streaming RPC method + genClientMethod(gen, file, g, method, streamIndex) + streamIndex++ + } + } + + // Server interface. + serverType := service.GoName + "Server" + g.P("// ", serverType, " is the server API for ", service.GoName, " service.") + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P("//") + g.P(deprecationComment) + } + g.Annotate(serverType, service.Location) + g.P("type ", serverType, " interface {") + for _, method := range service.Methods { + g.Annotate(serverType+"."+method.GoName, method.Location) + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P(method.Comments.Leading, + serverSignature(g, method)) + } + g.P("}") + g.P() + + // Server Unimplemented struct for forward compatibility. + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + for _, method := range service.Methods { + nilArg := "" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + nilArg = "nil," + } + g.P("func (*Unimplemented", serverType, ") ", serverSignature(g, method), "{") + g.P("return ", nilArg, statusPackage.Ident("Errorf"), "(", codesPackage.Ident("Unimplemented"), `, "method `, method.GoName, ` not implemented")`) + g.P("}") + } + g.P() + + // Server registration. + if service.Desc.Options().(*descriptorpb.ServiceOptions).GetDeprecated() { + g.P(deprecationComment) + } + serviceDescVar := "_" + service.GoName + "_serviceDesc" + g.P("func Register", service.GoName, "Server(s *", grpcPackage.Ident("Server"), ", srv ", serverType, ") {") + g.P("s.RegisterService(&", serviceDescVar, `, srv)`) + g.P("}") + g.P() + + // Server handler implementations. + var handlerNames []string + for _, method := range service.Methods { + hname := genServerMethod(gen, file, g, method) + handlerNames = append(handlerNames, hname) + } + + // Service descriptor. + g.P("var ", serviceDescVar, " = ", grpcPackage.Ident("ServiceDesc"), " {") + g.P("ServiceName: ", strconv.Quote(string(service.Desc.FullName())), ",") + g.P("HandlerType: (*", serverType, ")(nil),") + g.P("Methods: []", grpcPackage.Ident("MethodDesc"), "{") + for i, method := range service.Methods { + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("MethodName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + g.P("},") + } + g.P("},") + g.P("Streams: []", grpcPackage.Ident("StreamDesc"), "{") + for i, method := range service.Methods { + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + continue + } + g.P("{") + g.P("StreamName: ", strconv.Quote(string(method.Desc.Name())), ",") + g.P("Handler: ", handlerNames[i], ",") + if method.Desc.IsStreamingServer() { + g.P("ServerStreams: true,") + } + if method.Desc.IsStreamingClient() { + g.P("ClientStreams: true,") + } + g.P("},") + } + g.P("},") + g.P("Metadata: \"", file.Desc.Path(), "\",") + g.P("}") + g.P() +} + +func clientSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + s := method.GoName + "(ctx " + g.QualifiedGoIdent(contextPackage.Ident("Context")) + if !method.Desc.IsStreamingClient() { + s += ", in *" + g.QualifiedGoIdent(method.Input.GoIdent) + } + s += ", opts ..." + g.QualifiedGoIdent(grpcPackage.Ident("CallOption")) + ") (" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + s += "*" + g.QualifiedGoIdent(method.Output.GoIdent) + } else { + s += method.Parent.GoName + "_" + method.GoName + "Client" + } + s += ", error)" + return s +} + +func genClientMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method, index int) { + service := method.Parent + sname := fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.Desc.Name()) + + if method.Desc.Options().(*descriptorpb.MethodOptions).GetDeprecated() { + g.P(deprecationComment) + } + g.P("func (c *", unexport(service.GoName), "Client) ", clientSignature(g, method), "{") + if !method.Desc.IsStreamingServer() && !method.Desc.IsStreamingClient() { + g.P("out := new(", method.Output.GoIdent, ")") + g.P(`err := c.cc.Invoke(ctx, "`, sname, `", in, out, opts...)`) + g.P("if err != nil { return nil, err }") + g.P("return out, nil") + g.P("}") + g.P() + return + } + streamType := unexport(service.GoName) + method.GoName + "Client" + serviceDescVar := "_" + service.GoName + "_serviceDesc" + g.P("stream, err := c.cc.NewStream(ctx, &", serviceDescVar, ".Streams[", index, `], "`, sname, `", opts...)`) + g.P("if err != nil { return nil, err }") + g.P("x := &", streamType, "{stream}") + if !method.Desc.IsStreamingClient() { + g.P("if err := x.ClientStream.SendMsg(in); err != nil { return nil, err }") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + } + g.P("return x, nil") + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingClient() + genRecv := method.Desc.IsStreamingServer() + genCloseAndRecv := !method.Desc.IsStreamingServer() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Client interface {") + if genSend { + g.P("Send(*", method.Input.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Output.GoIdent, ", error)") + } + if genCloseAndRecv { + g.P("CloseAndRecv() (*", method.Output.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ClientStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Input.GoIdent, ") error {") + g.P("return x.ClientStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Output.GoIdent, ", error) {") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + if genCloseAndRecv { + g.P("func (x *", streamType, ") CloseAndRecv() (*", method.Output.GoIdent, ", error) {") + g.P("if err := x.ClientStream.CloseSend(); err != nil { return nil, err }") + g.P("m := new(", method.Output.GoIdent, ")") + g.P("if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } +} + +func serverSignature(g *protogen.GeneratedFile, method *protogen.Method) string { + var reqArgs []string + ret := "error" + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, g.QualifiedGoIdent(contextPackage.Ident("Context"))) + ret = "(*" + g.QualifiedGoIdent(method.Output.GoIdent) + ", error)" + } + if !method.Desc.IsStreamingClient() { + reqArgs = append(reqArgs, "*"+g.QualifiedGoIdent(method.Input.GoIdent)) + } + if method.Desc.IsStreamingClient() || method.Desc.IsStreamingServer() { + reqArgs = append(reqArgs, method.Parent.GoName+"_"+method.GoName+"Server") + } + return method.GoName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + +func genServerMethod(gen *protogen.Plugin, file *protogen.File, g *protogen.GeneratedFile, method *protogen.Method) string { + service := method.Parent + hname := fmt.Sprintf("_%s_%s_Handler", service.GoName, method.GoName) + + if !method.Desc.IsStreamingClient() && !method.Desc.IsStreamingServer() { + g.P("func ", hname, "(srv interface{}, ctx ", contextPackage.Ident("Context"), ", dec func(interface{}) error, interceptor ", grpcPackage.Ident("UnaryServerInterceptor"), ") (interface{}, error) {") + g.P("in := new(", method.Input.GoIdent, ")") + g.P("if err := dec(in); err != nil { return nil, err }") + g.P("if interceptor == nil { return srv.(", service.GoName, "Server).", method.GoName, "(ctx, in) }") + g.P("info := &", grpcPackage.Ident("UnaryServerInfo"), "{") + g.P("Server: srv,") + g.P("FullMethod: ", strconv.Quote(fmt.Sprintf("/%s/%s", service.Desc.FullName(), method.GoName)), ",") + g.P("}") + g.P("handler := func(ctx ", contextPackage.Ident("Context"), ", req interface{}) (interface{}, error) {") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(ctx, req.(*", method.Input.GoIdent, "))") + g.P("}") + g.P("return interceptor(ctx, in, info, handler)") + g.P("}") + g.P() + return hname + } + streamType := unexport(service.GoName) + method.GoName + "Server" + g.P("func ", hname, "(srv interface{}, stream ", grpcPackage.Ident("ServerStream"), ") error {") + if !method.Desc.IsStreamingClient() { + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := stream.RecvMsg(m); err != nil { return err }") + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(m, &", streamType, "{stream})") + } else { + g.P("return srv.(", service.GoName, "Server).", method.GoName, "(&", streamType, "{stream})") + } + g.P("}") + g.P() + + genSend := method.Desc.IsStreamingServer() + genSendAndClose := !method.Desc.IsStreamingServer() + genRecv := method.Desc.IsStreamingClient() + + // Stream auxiliary types and methods. + g.P("type ", service.GoName, "_", method.GoName, "Server interface {") + if genSend { + g.P("Send(*", method.Output.GoIdent, ") error") + } + if genSendAndClose { + g.P("SendAndClose(*", method.Output.GoIdent, ") error") + } + if genRecv { + g.P("Recv() (*", method.Input.GoIdent, ", error)") + } + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + g.P("type ", streamType, " struct {") + g.P(grpcPackage.Ident("ServerStream")) + g.P("}") + g.P() + + if genSend { + g.P("func (x *", streamType, ") Send(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genSendAndClose { + g.P("func (x *", streamType, ") SendAndClose(m *", method.Output.GoIdent, ") error {") + g.P("return x.ServerStream.SendMsg(m)") + g.P("}") + g.P() + } + if genRecv { + g.P("func (x *", streamType, ") Recv() (*", method.Input.GoIdent, ", error) {") + g.P("m := new(", method.Input.GoIdent, ")") + g.P("if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err }") + g.P("return m, nil") + g.P("}") + g.P() + } + + return hname +} + +const deprecationComment = "// Deprecated: Do not use." + +func unexport(s string) string { return strings.ToLower(s[:1]) + s[1:] } diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 000000000..e810e6fea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f7..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08be..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 000000000..d399bf069 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go index 35b882c09..e8db57e09 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -1,63 +1,113 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -import "errors" +import ( + "encoding/json" + "errors" + "fmt" + "strconv" -// Deprecated: do not use. + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } -// Deprecated: do not use. +// Deprecated: Do not use. func GetStats() Stats { return Stats{} } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSet(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSet([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSetJSON(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSetJSON([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617ce..2187e877f 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2cf..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41b3..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add30..42fc120c9 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,607 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } } - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: return nil, ErrMissingExtension } - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } + return v, nil +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByName(field) } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) if err != nil { - return + if err == ErrMissingExtension { + continue + } + return vs, err } + vs[i] = v } - return + return vs, nil } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable } - registeredExtensions := RegisteredExtensions(pb) - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } - - extensions = append(extensions, desc) } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) return nil } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] } -} -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } - m[desc.Field] = desc + return xts, nil } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } + return true + default: + return false } - return v } -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - return v + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328bb7..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a75676..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa9194a..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe071..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index a4b8c0cd3..dcdc2202f 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,162 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -170,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -189,356 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } + + prop.Prop = append(prop.Prop, p) } - // Re-order prop.order. - sort.Sort(prop) + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 000000000..5aee89c32 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 000000000..1e7ff6420 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa95..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc52..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee725b..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 000000000..4a5931009 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 000000000..a31134eeb --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3af2..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 000000000..d7c28da5a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 000000000..398e34859 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 403fde9e3..63dc05785 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,2888 +1,200 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/descriptor.proto +// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto package descriptor import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" ) -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{0} -} - -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{1} -} - -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2} -} - -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 0} -} - -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{2, 1} -} +// Symbols defined in public import of google/protobuf/descriptor.proto. -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo +type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} +const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE +const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT +const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64 +const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64 +const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32 +const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64 +const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32 +const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL +const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING +const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP +const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE +const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES +const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32 +const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM +const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32 +const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64 +const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32 +const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64 -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} +var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name +var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{3} -} +const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL +const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED +const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} +var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name +var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} +type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} +const FileOptions_SPEED = descriptorpb.FileOptions_SPEED +const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE +const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo +var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name +var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} +type FieldOptions_CType = descriptorpb.FieldOptions_CType -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +const FieldOptions_STRING = descriptorpb.FieldOptions_STRING +const FieldOptions_CORD = descriptorpb.FieldOptions_CORD +const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{4} -} +var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name +var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} +type FieldOptions_JSType = descriptorpb.FieldOptions_JSType -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo +const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL +const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING +const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} +var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name +var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} +type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{5} -} - -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6} -} - -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{6, 0} -} - -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{7} -} - -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{8} -} - -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{9} -} - -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18} -} - -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{18, 0} -} - -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19} -} - -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{19, 0} -} - -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20} -} - -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_e5baabe45344a177, []int{20, 0} -} - -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} +const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN +const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS +const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } +var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name +var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value -var fileDescriptor_e5baabe45344a177 = []byte{ - // 2589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, - 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, - 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, - 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, - 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, - 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, - 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, - 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, - 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, - 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, - 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, - 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, - 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, - 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, - 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, - 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, - 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, - 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, - 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, - 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, - 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, - 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, - 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, - 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, - 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, - 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, - 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, - 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, - 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, - 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, - 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, - 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, - 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, - 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, - 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, - 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, - 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, - 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, - 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, - 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, - 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, - 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, - 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, - 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, - 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, - 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, - 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, - 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, - 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, - 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, - 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, - 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, - 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, - 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, - 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, - 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, - 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, - 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, - 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, - 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, - 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, - 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, - 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, - 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, - 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, - 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, - 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, - 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, - 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, - 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, - 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, - 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, - 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, - 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, - 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, - 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, - 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, - 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, - 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, - 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, - 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, - 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, - 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, - 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, - 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, - 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, - 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, - 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, - 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, - 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, - 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, - 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, - 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, - 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, - 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, - 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, - 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, - 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, - 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, - 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, - 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, - 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, - 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, - 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, - 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, - 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, - 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, - 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, - 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, - 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, - 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, - 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, - 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, - 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, - 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, - 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, - 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, - 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, - 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, - 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, - 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, - 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, - 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, - 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, - 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, - 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, - 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, - 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, - 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, - 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, - 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, - 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, - 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, - 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, - 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, - 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, - 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, - 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, - 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, - 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, - 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, - 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, - 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, - 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, - 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, - 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, - 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, - 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, - 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, - 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, - 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, - 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, - 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, - 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, - 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, - 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, - 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, - 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, - 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, - 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, - 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, +type FileDescriptorSet = descriptorpb.FileDescriptorSet +type FileDescriptorProto = descriptorpb.FileDescriptorProto +type DescriptorProto = descriptorpb.DescriptorProto +type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions +type FieldDescriptorProto = descriptorpb.FieldDescriptorProto +type OneofDescriptorProto = descriptorpb.OneofDescriptorProto +type EnumDescriptorProto = descriptorpb.EnumDescriptorProto +type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto +type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto +type MethodDescriptorProto = descriptorpb.MethodDescriptorProto + +const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming +const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming + +type FileOptions = descriptorpb.FileOptions + +const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles +const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8 +const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor +const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices +const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices +const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices +const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices +const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated +const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas + +type MessageOptions = descriptorpb.MessageOptions + +const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat +const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor +const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated + +type FieldOptions = descriptorpb.FieldOptions + +const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype +const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype +const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy +const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated +const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak + +type OneofOptions = descriptorpb.OneofOptions +type EnumOptions = descriptorpb.EnumOptions + +const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated + +type EnumValueOptions = descriptorpb.EnumValueOptions + +const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated + +type ServiceOptions = descriptorpb.ServiceOptions + +const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated + +type MethodOptions = descriptorpb.MethodOptions + +const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated +const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel + +type UninterpretedOption = descriptorpb.UninterpretedOption +type SourceCodeInfo = descriptorpb.SourceCodeInfo +type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo +type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange +type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange +type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange +type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart +type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location +type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation + +var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x32, +} + +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() } +func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() { + if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil + file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto deleted file mode 100644 index ed08fcbc5..000000000 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto +++ /dev/null @@ -1,883 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; -option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; - optional int32 end = 2; - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - }; - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - }; - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default=false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default=false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default=false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default=false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default=SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default=false]; - optional bool java_generic_services = 17 [default=false]; - optional bool py_generic_services = 18 [default=false]; - optional bool php_generic_services = 42 [default=false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default=false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default=false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be used - // for determining the namespace. - optional string php_metadata_namespace = 44; - - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - optional string ruby_package = 45; - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default=false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default=false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default=false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementions still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - reserved 8; // javalite_serializable - reserved 9; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default=false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default=false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default=false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default=false]; - - reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default=false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default=false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = - 34 [default=IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendent. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed=true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed=true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed=true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/main.go b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go new file mode 100644 index 000000000..d45b719d1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/main.go @@ -0,0 +1,74 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// protoc-gen-go is a plugin for the Google protocol buffer compiler to generate +// Go code. Install it by building this program and making it accessible within +// your PATH with the name: +// protoc-gen-go +// +// The 'go' suffix becomes part of the argument for the protocol compiler, +// such that it can be invoked as: +// protoc --go_out=paths=source_relative:. path/to/file.proto +// +// This generates Go bindings for the protocol buffer defined by file.proto. +// With that input, the output will be written to: +// path/to/file.pb.go +// +// See the README and documentation for protocol buffers to learn more: +// https://developers.google.com/protocol-buffers/ +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/golang/protobuf/internal/gengogrpc" + gengo "google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo" + "google.golang.org/protobuf/compiler/protogen" +) + +func main() { + var ( + flags flag.FlagSet + plugins = flags.String("plugins", "", "list of plugins to enable (supported values: grpc)") + importPrefix = flags.String("import_prefix", "", "prefix to prepend to import paths") + ) + importRewriteFunc := func(importPath protogen.GoImportPath) protogen.GoImportPath { + switch importPath { + case "context", "fmt", "math": + return importPath + } + if *importPrefix != "" { + return protogen.GoImportPath(*importPrefix) + importPath + } + return importPath + } + protogen.Options{ + ParamFunc: flags.Set, + ImportRewriteFunc: importRewriteFunc, + }.Run(func(gen *protogen.Plugin) error { + grpc := false + for _, plugin := range strings.Split(*plugins, ",") { + switch plugin { + case "grpc": + grpc = true + case "": + default: + return fmt.Errorf("protoc-gen-go: unknown plugin %q", plugin) + } + } + for _, f := range gen.Files { + if !f.Generate { + continue + } + g := gengo.GenerateFile(gen, f) + if grpc { + gengogrpc.GenerateFileContent(gen, f, g) + } + } + gen.SupportedFeatures = gengo.SupportedFeatures + return nil + }) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go index 70276e8f5..e729dcff1 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -1,141 +1,165 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - import ( "fmt" - "reflect" "strings" "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" ) -const googleApis = "type.googleapis.com/" +const urlPrefix = "type.googleapis.com/" -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { if any == nil { return "", fmt.Errorf("message is nil") } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) } - return any.TypeUrl[slash+1:], nil + return name, nil } -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) if err != nil { return nil, err } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil } -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) if err != nil { return nil, err } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err } - return reflect.New(t.Elem()).Interface().(proto.Message), nil + return proto.MessageV1(mt.New().Interface()), nil } -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. // -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { var err error - d.Message, err = Empty(any) + dm.Message, err = Empty(any) if err != nil { return err } } - return UnmarshalAny(any, d.Message) + m = dm.Message } - aname, err := AnyMessageName(any) + anyName, err := AnyMessageName(any) if err != nil { return err } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) } - return proto.Unmarshal(any.Value, pb) + return proto.Unmarshal(any.Value, m) } -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { return false } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index fb8765a60..0ef27d33d 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,201 +1,62 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto +// source: github.com/golang/protobuf/ptypes/any/any.proto package any import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +// Symbols defined in public import of google/protobuf/any.proto. -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} -} +type Any = anypb.Any -func (*Any) XXX_WellKnownType() string { return "Any" } +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 493294255..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go index c0d595da7..fb9edd5c6 100644 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -1,35 +1,6 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. -/* -Package ptypes contains code for interacting with well-known types. -*/ +// Package ptypes provides functionality for interacting with well-known types. package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go index 26d1ca2fb..6110ae8a4 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -1,102 +1,72 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - import ( "errors" "fmt" "time" - durpb "github.com/golang/protobuf/ptypes/duration" + durationpb "github.com/golang/protobuf/ptypes/duration" ) +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) minSeconds = -maxSeconds ) -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { return 0, err } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) } } return d, nil } -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { nanos := d.Nanoseconds() secs := nanos / 1e9 nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, + return &durationpb.Duration{ + Seconds: int64(secs), Nanos: int32(nanos), } } + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 2e875b80c..d0079ee3e 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,162 +1,63 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto +// source: github.com/golang/protobuf/ptypes/duration/duration.proto package duration import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +// Symbols defined in public import of google/protobuf/duration.proto. -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} -} +type Duration = durationpb.Duration -func (*Duration) XXX_WellKnownType() string { return "Duration" } +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce41a..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go new file mode 100644 index 000000000..16686a655 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/empty/empty.proto + +package empty + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/empty.proto. + +type Empty = emptypb.Empty + +var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ + 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } +func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { + if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File + file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 8da0df01a..026d0d491 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -1,46 +1,18 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package ptypes -// This file implements operations on google.protobuf.Timestamp. - import ( "errors" "fmt" "time" - tspb "github.com/golang/protobuf/ptypes/timestamp" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" ) +// Range of google.protobuf.Duration as specified in timestamp.proto. const ( // Seconds field of the earliest valid Timestamp. // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). @@ -50,44 +22,18 @@ const ( maxValidSeconds = 253402300800 ) -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// Timestamp converts a timestamppb.Timestamp to a time.Time. // It returns an error if the argument is invalid. // -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the // time.Unix function when passed the contents of the Timestamp, in the UTC // locale. This may or may not be a meaningful time; many invalid Timestamps // do map to valid time.Times. // // A nil Timestamp returns an error. The first return value in that case is // undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { // Don't return the zero value on error, because corresponds to a valid // timestamp. Instead return whatever time.Unix gives us. var t time.Time @@ -100,7 +46,7 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { } // TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { +func TimestampNow() *timestamppb.Timestamp { ts, err := TimestampProto(time.Now()) if err != nil { panic("ptypes: time.Now() out of Timestamp range") @@ -110,8 +56,8 @@ func TimestampNow() *tspb.Timestamp { // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ Seconds: t.Unix(), Nanos: int32(t.Nanosecond()), } @@ -121,12 +67,37 @@ func TimestampProto(t time.Time) (*tspb.Timestamp, error) { return ts, nil } -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { t, err := Timestamp(ts) if err != nil { return fmt.Sprintf("(%v)", err) } return t.Format(time.RFC3339Nano) } + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index 66532d3d0..a76f80760 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,180 +1,64 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto package timestamp import ( - fmt "fmt" - math "math" - - proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +// Symbols defined in public import of google/protobuf/timestamp.proto. -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} -} +type Timestamp = timestamppb.Timestamp -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil } diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa03..000000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d9b..000000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa19520..000000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae3160..000000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea12879a..000000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb0353..000000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b84..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f65e..000000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f2049b..000000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 8d393e904..000000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 150d91bc8..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979fe..000000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae905e..000000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod deleted file mode 100644 index f6406bb2c..000000000 --- a/vendor/github.com/golang/snappy/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692ea4..000000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 2133562b0..580ae2097 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -6,6 +6,10 @@ // // This package is intended to be a more powerful and safer alternative to // reflect.DeepEqual for comparing whether two values are semantically equal. +// It is intended to only be used in tests, as performance is not a goal and +// it may panic if it cannot compare the values. Its propensity towards +// panicking means that its unsuitable for production environments where a +// spurious panic may be fatal. // // The primary features of cmp are: // @@ -22,8 +26,8 @@ // equality is determined by recursively comparing the primitive kinds on both // values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported // fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared -// using the AllowUnexported option. +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly +// compared using the Exporter option. package cmp import ( @@ -62,8 +66,8 @@ import ( // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported -// option explicitly permits comparing the unexported field. +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option +// explicitly permits comparing the unexported field. // // Slices are equal if they are both nil or both non-nil, where recursively // calling Equal on all non-ignored slice or array elements report equal. @@ -80,31 +84,14 @@ import ( // Pointers and interfaces are equal if they are both nil or both non-nil, // where they have the same underlying concrete type and recursively // calling Equal on the underlying values reports equal. +// +// Before recursing into a pointer, slice element, or map, the current path +// is checked to detect whether the address has already been visited. +// If there is a cycle, then the pointed at values are considered equal +// only if both addresses were previously visited in the same path step. func Equal(x, y interface{}, opts ...Option) bool { - vx := reflect.ValueOf(x) - vy := reflect.ValueOf(y) - - // If the inputs are different types, auto-wrap them in an empty interface - // so that they have the same parent type. - var t reflect.Type - if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() - if vx.IsValid() { - vvx := reflect.New(t).Elem() - vvx.Set(vx) - vx = vvx - } - if vy.IsValid() { - vvy := reflect.New(t).Elem() - vvy.Set(vy) - vy = vvy - } - } else { - t = vx.Type() - } - s := newState(opts) - s.compareAny(&pathStep{t, vx, vy}) + s.compareAny(rootStep(x, y)) return s.result.Equal() } @@ -123,20 +110,63 @@ func Equal(x, y interface{}, opts ...Option) bool { // Do not depend on this output being stable. If you need the ability to // programmatically interpret the difference, consider using a custom Reporter. func Diff(x, y interface{}, opts ...Option) string { + s := newState(opts) + + // Optimization: If there are no other reporters, we can optimize for the + // common case where the result is equal (and thus no reported difference). + // This avoids the expensive construction of a difference tree. + if len(s.reporters) == 0 { + s.compareAny(rootStep(x, y)) + if s.result.Equal() { + return "" + } + s.result = diff.Result{} // Reset results + } + r := new(defaultReporter) - eq := Equal(x, y, Options(opts), Reporter(r)) + s.reporters = append(s.reporters, reporter{r}) + s.compareAny(rootStep(x, y)) d := r.String() - if (d == "") != eq { + if (d == "") != s.result.Equal() { panic("inconsistent difference and equality results") } return d } +// rootStep constructs the first path step. If x and y have differing types, +// then they are stored within an empty interface type. +func rootStep(x, y interface{}) PathStep { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + return &pathStep{t, vx, vy} +} + type state struct { // These fields represent the "comparison state". // Calling statelessCompare must not result in observable changes to these. result diff.Result // The current result of comparison curPath Path // The current path in the value tree + curPtrs pointerPath // The current set of visited pointers reporters []reporter // Optional reporters // recChecker checks for infinite cycles applying the same set of @@ -148,13 +178,14 @@ type state struct { dynChecker dynChecker // These fields, once set by processOption, will not change. - exporters map[reflect.Type]bool // Set of structs with unexported field visibility - opts Options // List of all fundamental and filter options + exporters []exporter // List of exporters for structs with unexported fields + opts Options // List of all fundamental and filter options } func newState(opts []Option) *state { // Always ensure a validator option exists to validate the inputs. s := &state{opts: Options{validator{}}} + s.curPtrs.Init() s.processOption(Options(opts)) return s } @@ -174,13 +205,8 @@ func (s *state) processOption(opt Option) { panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) } s.opts = append(s.opts, opt) - case visibleStructs: - if s.exporters == nil { - s.exporters = make(map[reflect.Type]bool) - } - for t := range opt { - s.exporters[t] = true - } + case exporter: + s.exporters = append(s.exporters, opt) case reporter: s.reporters = append(s.reporters, opt) default: @@ -192,9 +218,9 @@ func (s *state) processOption(opt Option) { // This function is stateless in that it does not alter the current result, // or output to any registered reporters. func (s *state) statelessCompare(step PathStep) diff.Result { - // We do not save and restore the curPath because all of the compareX - // methods should properly push and pop from the path. - // It is an implementation bug if the contents of curPath differs from + // We do not save and restore curPath and curPtrs because all of the + // compareX methods should properly push and pop from them. + // It is an implementation bug if the contents of the paths differ from // when calling this function to when returning from it. oldResult, oldReporters := s.result, s.reporters @@ -216,9 +242,17 @@ func (s *state) compareAny(step PathStep) { } s.recChecker.Check(s.curPath) - // Obtain the current type and values. + // Cycle-detection for slice elements (see NOTE in compareSlice). t := step.Type() vx, vy := step.Values() + if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { + px, py := vx.Addr(), vy.Addr() + if eq, visited := s.curPtrs.Push(px, py); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(px, py) + } // Rule 1: Check whether an option applies on this node in the value tree. if s.tryOptions(t, vx, vy) { @@ -342,7 +376,7 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { // assuming that T is assignable to R. // Otherwise, it returns the input value as is. func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). if !flags.AtLeastGo110 { if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { return reflect.New(t).Elem() @@ -352,8 +386,10 @@ func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { } func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy + var mayForce, mayForceInit bool step := StructField{&structField{}} for i := 0; i < t.NumField(); i++ { step.typ = t.Field(i).Type @@ -372,10 +408,18 @@ func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { // For retrieveUnexportedField to work, the parent struct must // be addressable. Create a new copy of the values if // necessary to make them addressable. + addr = vx.CanAddr() || vy.CanAddr() vax = makeAddressable(vx) vay = makeAddressable(vy) } - step.mayForce = s.exporters[t] + if !mayForceInit { + for _, xf := range s.exporters { + mayForce = mayForce || xf(t) + } + mayForceInit = true + } + step.mayForce = mayForce + step.paddr = addr step.pvx = vax step.pvy = vay step.field = t.Field(i) @@ -391,9 +435,21 @@ func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer + // since slices represents a list of pointers, rather than a single pointer. + // The pointer checking logic must be handled on a per-element basis + // in compareAny. + // + // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting + // pointer P, a length N, and a capacity C. Supposing each slice element has + // a memory size of M, then the slice is equivalent to the list of pointers: + // [P+i*M for i in range(N)] + // + // For example, v[:0] and v[:1] are slices with the same starting pointer, + // but they are clearly different values. Using the slice pointer alone + // violates the assumption that equal pointers implies equal values. - step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} withIndexes := func(ix, iy int) SliceIndex { if ix >= 0 { step.vx, step.xkey = vx.Index(ix), ix @@ -470,7 +526,12 @@ func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for maps. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) // We combine and sort the two map keys so that we can perform the // comparisons in a deterministic order. @@ -507,7 +568,12 @@ func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { return } - // TODO: Support cyclic data structures. + // Cycle-detection for pointers. + if eq, visited := s.curPtrs.Push(vx, vy); visited { + s.report(eq, reportByCycle) + return + } + defer s.curPtrs.Pop(vx, vy) vx, vy = vx.Elem(), vy.Elem() s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index abc3a1c3e..dfa5d2137 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -8,8 +8,8 @@ package cmp import "reflect" -const supportAllowUnexported = false +const supportExporters = false -func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { - panic("retrieveUnexportedField is not implemented") +func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { + panic("no support for forcibly accessing unexported fields") } diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 59d4ee91b..351f1a34b 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -11,13 +11,25 @@ import ( "unsafe" ) -const supportAllowUnexported = true +const supportExporters = true // retrieveUnexportedField uses unsafe to forcibly retrieve any field from // a struct such that the value has read-write permissions. // // The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { - return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +// describing the field to retrieve. If addr is false, +// then the returned value will be shallowed copied to be non-addressable. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { + ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() + if !addr { + // A field is addressable if and only if the struct is addressable. + // If the original parent value was not addressable, shallow copy the + // value to make it non-addressable to avoid leaking an implementation + // detail of how forcibly exporting a field works. + if ve.Kind() == reflect.Interface && ve.IsNil() { + return reflect.Zero(f.Type) + } + return reflect.ValueOf(ve.Interface()).Convert(f.Type) + } + return ve } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 3d2e42662..730e223ee 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -12,6 +12,13 @@ // is more important than obtaining a minimal Levenshtein distance. package diff +import ( + "math/rand" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + // EditType represents a single operation within an edit-script. type EditType uint8 @@ -112,6 +119,8 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } +var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) + // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. // @@ -159,6 +168,17 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. + // To ensure flexibility in changing the algorithm in the future, + // introduce some degree of deliberate instability. + // This is achieved by fiddling the zigzag iterator to start searching + // the graph starting from the bottom-right versus than the top-left. + // The result may differ depending on the starting search location, + // but still produces a valid edit script. + zigzagInit := randInt // either 0 or 1 + if flags.Deterministic { + zigzagInit = 0 + } + // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -209,7 +229,7 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { break } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go new file mode 100644 index 000000000..8228e7d51 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -0,0 +1,157 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "reflect" + "strconv" +) + +// TypeString is nearly identical to reflect.Type.String, +// but has an additional option to specify that full type names be used. +func TypeString(t reflect.Type, qualified bool) string { + return string(appendTypeName(nil, t, qualified, false)) +} + +func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { + // BUG: Go reflection provides no way to disambiguate two named types + // of the same name and within the same package, + // but declared within the namespace of different functions. + + // Named type. + if t.Name() != "" { + if qualified && t.PkgPath() != "" { + b = append(b, '"') + b = append(b, t.PkgPath()...) + b = append(b, '"') + b = append(b, '.') + b = append(b, t.Name()...) + } else { + b = append(b, t.String()...) + } + return b + } + + // Unnamed type. + switch k := t.Kind(); k { + case reflect.Bool, reflect.String, reflect.UnsafePointer, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + b = append(b, k.String()...) + case reflect.Chan: + if t.ChanDir() == reflect.RecvDir { + b = append(b, "<-"...) + } + b = append(b, "chan"...) + if t.ChanDir() == reflect.SendDir { + b = append(b, "<-"...) + } + b = append(b, ' ') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Func: + if !elideFunc { + b = append(b, "func"...) + } + b = append(b, '(') + for i := 0; i < t.NumIn(); i++ { + if i > 0 { + b = append(b, ", "...) + } + if i == t.NumIn()-1 && t.IsVariadic() { + b = append(b, "..."...) + b = appendTypeName(b, t.In(i).Elem(), qualified, false) + } else { + b = appendTypeName(b, t.In(i), qualified, false) + } + } + b = append(b, ')') + switch t.NumOut() { + case 0: + // Do nothing + case 1: + b = append(b, ' ') + b = appendTypeName(b, t.Out(0), qualified, false) + default: + b = append(b, " ("...) + for i := 0; i < t.NumOut(); i++ { + if i > 0 { + b = append(b, ", "...) + } + b = appendTypeName(b, t.Out(i), qualified, false) + } + b = append(b, ')') + } + case reflect.Struct: + b = append(b, "struct{ "...) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + b = append(b, "; "...) + } + sf := t.Field(i) + if !sf.Anonymous { + if qualified && sf.PkgPath != "" { + b = append(b, '"') + b = append(b, sf.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, sf.Name...) + b = append(b, ' ') + } + b = appendTypeName(b, sf.Type, qualified, false) + if sf.Tag != "" { + b = append(b, ' ') + b = strconv.AppendQuote(b, string(sf.Tag)) + } + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + case reflect.Slice, reflect.Array: + b = append(b, '[') + if k == reflect.Array { + b = strconv.AppendUint(b, uint64(t.Len()), 10) + } + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Map: + b = append(b, "map["...) + b = appendTypeName(b, t.Key(), qualified, false) + b = append(b, ']') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Ptr: + b = append(b, '*') + b = appendTypeName(b, t.Elem(), qualified, false) + case reflect.Interface: + b = append(b, "interface{ "...) + for i := 0; i < t.NumMethod(); i++ { + if i > 0 { + b = append(b, "; "...) + } + m := t.Method(i) + if qualified && m.PkgPath != "" { + b = append(b, '"') + b = append(b, m.PkgPath...) + b = append(b, '"') + b = append(b, '.') + } + b = append(b, m.Name...) + b = appendTypeName(b, m.Type, qualified, true) + } + if b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } else { + b = append(b, ' ') + } + b = append(b, '}') + default: + panic("invalid kind: " + k.String()) + } + return b +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 0a01c4796..e9e384a1c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -21,3 +21,13 @@ func PointerOf(v reflect.Value) Pointer { // assumes that the GC implementation does not use a moving collector. return Pointer{v.Pointer(), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == 0 +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return p.p +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index da134ae2a..b50c17ec7 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -24,3 +24,13 @@ func PointerOf(v reflect.Value) Pointer { // which is necessary if the GC ever uses a moving collector. return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} } + +// IsNil reports whether the pointer is nil. +func (p Pointer) IsNil() bool { + return p.p == nil +} + +// Uintptr returns the pointer as a uintptr. +func (p Pointer) Uintptr() uintptr { + return uintptr(p.p) +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 793448160..abbd2a63b 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -225,8 +225,20 @@ func (validator) apply(s *state, vx, vy reflect.Value) { // Unable to Interface implies unexported field without visibility access. if !vx.CanInterface() || !vy.CanInterface() { - const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" - panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" + var name string + if t := s.curPath.Index(-2).Type(); t.Name() != "" { + // Named type with unexported fields. + name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType + } else { + // Unnamed type with unexported fields. Derive PkgPath from field. + var pkgPath string + for i := 0; i < t.NumField() && pkgPath == ""; i++ { + pkgPath = t.Field(i).PkgPath + } + name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) + } + panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) } panic("not reachable") @@ -360,9 +372,8 @@ func (cm comparer) String() string { return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) } -// AllowUnexported returns an Option that forcibly allows operations on -// unexported fields in certain structs, which are specified by passing in a -// value of each struct type. +// Exporter returns an Option that specifies whether Equal is allowed to +// introspect into the unexported fields of certain struct types. // // Users of this option must understand that comparing on unexported fields // from external packages is not safe since changes in the internal @@ -386,10 +397,24 @@ func (cm comparer) String() string { // // In other cases, the cmpopts.IgnoreUnexported option can be used to ignore // all unexported fields on specified struct types. -func AllowUnexported(types ...interface{}) Option { - if !supportAllowUnexported { - panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") +func Exporter(f func(reflect.Type) bool) Option { + if !supportExporters { + panic("Exporter is not supported on purego builds") } + return exporter(f) +} + +type exporter func(reflect.Type) bool + +func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// AllowUnexported returns an Options that allows Equal to forcibly introspect +// unexported fields of the specified struct types. +// +// See Exporter for the proper use of this option. +func AllowUnexported(types ...interface{}) Option { m := make(map[reflect.Type]bool) for _, typ := range types { t := reflect.TypeOf(typ) @@ -398,13 +423,7 @@ func AllowUnexported(types ...interface{}) Option { } m[t] = true } - return visibleStructs(m) -} - -type visibleStructs map[reflect.Type]bool - -func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") + return exporter(func(t reflect.Type) bool { return m[t] }) } // Result represents the comparison result for a single node and @@ -436,6 +455,11 @@ func (r Result) ByFunc() bool { return r.flags&reportByFunc != 0 } +// ByCycle reports whether a reference cycle was detected. +func (r Result) ByCycle() bool { + return r.flags&reportByCycle != 0 +} + type resultFlags uint const ( @@ -446,6 +470,7 @@ const ( reportByIgnore reportByMethod reportByFunc + reportByCycle ) // Reporter is an Option that can be passed to Equal. When Equal traverses diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 96fffd291..603dbb002 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -10,6 +10,8 @@ import ( "strings" "unicode" "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/value" ) // Path is a list of PathSteps describing the sequence of operations to get @@ -41,7 +43,7 @@ type PathStep interface { // In some cases, one or both may be invalid or have restrictions: // • For StructField, both are not interface-able if the current field // is unexported and the struct type is not explicitly permitted by - // AllowUnexported to traverse unexported fields. + // an Exporter to traverse unexported fields. // • For SliceIndex, one may be invalid if an element is missing from // either the x or y slice. // • For MapIndex, one may be invalid if an entry is missing from @@ -175,7 +177,8 @@ type structField struct { // pvx, pvy, and field are only valid if unexported is true. unexported bool mayForce bool // Forcibly allow visibility - pvx, pvy reflect.Value // Parent values + paddr bool // Was parent addressable? + pvx, pvy reflect.Value // Parent values (always addressible) field reflect.StructField // Field information } @@ -187,8 +190,8 @@ func (sf StructField) Values() (vx, vy reflect.Value) { // Forcibly obtain read-write access to an unexported struct field. if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field) - vy = retrieveUnexportedField(sf.pvy, sf.field) + vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) + vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) return vx, vy // CanInterface reports true } return sf.vx, sf.vy // CanInterface reports false @@ -207,6 +210,7 @@ type SliceIndex struct{ *sliceIndex } type sliceIndex struct { pathStep xkey, ykey int + isSlice bool // False for reflect.Array } func (si SliceIndex) Type() reflect.Type { return si.typ } @@ -301,6 +305,72 @@ func (tf Transform) Func() reflect.Value { return tf.trans.fnc } // The == operator can be used to detect the exact option used. func (tf Transform) Option() Option { return tf.trans } +// pointerPath represents a dual-stack of pointers encountered when +// recursively traversing the x and y values. This data structure supports +// detection of cycles and determining whether the cycles are equal. +// In Go, cycles can occur via pointers, slices, and maps. +// +// The pointerPath uses a map to represent a stack; where descension into a +// pointer pushes the address onto the stack, and ascension from a pointer +// pops the address from the stack. Thus, when traversing into a pointer from +// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles +// by checking whether the pointer has already been visited. The cycle detection +// uses a seperate stack for the x and y values. +// +// If a cycle is detected we need to determine whether the two pointers +// should be considered equal. The definition of equality chosen by Equal +// requires two graphs to have the same structure. To determine this, both the +// x and y values must have a cycle where the previous pointers were also +// encountered together as a pair. +// +// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and +// MapIndex with pointer information for the x and y values. +// Suppose px and py are two pointers to compare, we then search the +// Path for whether px was ever encountered in the Path history of x, and +// similarly so with py. If either side has a cycle, the comparison is only +// equal if both px and py have a cycle resulting from the same PathStep. +// +// Using a map as a stack is more performant as we can perform cycle detection +// in O(1) instead of O(N) where N is len(Path). +type pointerPath struct { + // mx is keyed by x pointers, where the value is the associated y pointer. + mx map[value.Pointer]value.Pointer + // my is keyed by y pointers, where the value is the associated x pointer. + my map[value.Pointer]value.Pointer +} + +func (p *pointerPath) Init() { + p.mx = make(map[value.Pointer]value.Pointer) + p.my = make(map[value.Pointer]value.Pointer) +} + +// Push indicates intent to descend into pointers vx and vy where +// visited reports whether either has been seen before. If visited before, +// equal reports whether both pointers were encountered together. +// Pop must be called if and only if the pointers were never visited. +// +// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map +// and be non-nil. +func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { + px := value.PointerOf(vx) + py := value.PointerOf(vy) + _, ok1 := p.mx[px] + _, ok2 := p.my[py] + if ok1 || ok2 { + equal = p.mx[px] == py && p.my[py] == px // Pointers paired together + return equal, true + } + p.mx[px] = py + p.my[py] = px + return false, false +} + +// Pop ascends from pointers vx and vy. +func (p pointerPath) Pop(vx, vy reflect.Value) { + delete(p.mx, value.PointerOf(vx)) + delete(p.my, value.PointerOf(vy)) +} + // isExported reports whether the identifier is exported. func isExported(id string) bool { r, _ := utf8.DecodeRuneInString(id) diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index 6ddf29993..aafcb3635 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -41,7 +41,10 @@ func (r *defaultReporter) String() string { if r.root.NumDiff == 0 { return "" } - return formatOptions{}.FormatDiff(r.root).String() + ptrs := new(pointerReferences) + text := formatOptions{}.FormatDiff(r.root, ptrs) + resolveReferences(text) + return text.String() } func assert(ok bool) { diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 17a05eede..9e2180964 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -11,14 +11,6 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) -// TODO: Enforce limits? -// * Enforce maximum number of records to print per node? -// * Enforce maximum size in bytes allowed? -// * As a heuristic, use less verbosity for equal nodes than unequal nodes. -// TODO: Enforce unique outputs? -// * Avoid Stringer methods if it results in same output? -// * Print pointer address if outputs still equal? - // numContextRecords is the number of surrounding equal records to print. const numContextRecords = 2 @@ -71,24 +63,66 @@ func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { opts.TypeMode = t return opts } +func (opts formatOptions) WithVerbosity(level int) formatOptions { + opts.VerbosityLevel = level + opts.LimitVerbosity = true + return opts +} +func (opts formatOptions) verbosity() uint { + switch { + case opts.VerbosityLevel < 0: + return 0 + case opts.VerbosityLevel > 16: + return 16 // some reasonable maximum to avoid shift overflow + default: + return uint(opts.VerbosityLevel) + } +} + +const maxVerbosityPreset = 3 + +// verbosityPreset modifies the verbosity settings given an index +// between 0 and maxVerbosityPreset, inclusive. +func verbosityPreset(opts formatOptions, i int) formatOptions { + opts.VerbosityLevel = int(opts.verbosity()) + 2*i + if i > 0 { + opts.AvoidStringer = true + } + if i >= maxVerbosityPreset { + opts.PrintAddresses = true + opts.QualifiedNames = true + } + return opts +} // FormatDiff converts a valueNode tree into a textNode tree, where the later // is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode) textNode { +func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { + if opts.DiffMode == diffIdentical { + opts = opts.WithVerbosity(1) + } else { + opts = opts.WithVerbosity(3) + } + // Check whether we have specialized formatting for this node. // This is not necessary, but helpful for producing more readable outputs. if opts.CanFormatDiffSlice(v) { return opts.FormatDiffSlice(v) } + var parentKind reflect.Kind + if v.parent != nil && v.parent.TransformerName == "" { + parentKind = v.parent.Type.Kind() + } + // For leaf nodes, format the value based on the reflect.Values alone. if v.MaxDepth == 0 { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, visitedPointers{}) - outy := opts.FormatValue(v.ValueY, visitedPointers{}) + outx := opts.FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.FormatValue(v.ValueY, parentKind, ptrs) if v.NumIgnored > 0 && v.NumSame == 0 { return textEllipsis } else if outx.Len() < outy.Len() { @@ -101,8 +135,13 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { // Format unequal. assert(opts.DiffMode == diffUnknown) var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) + outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) + outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: '-', Value: outx}) } @@ -111,34 +150,57 @@ func (opts formatOptions) FormatDiff(v *valueNode) textNode { } return opts.WithTypeMode(emitType).FormatType(v.Type, list) case diffRemoved: - return opts.FormatValue(v.ValueX, visitedPointers{}) + return opts.FormatValue(v.ValueX, parentKind, ptrs) case diffInserted: - return opts.FormatValue(v.ValueY, visitedPointers{}) + return opts.FormatValue(v.ValueY, parentKind, ptrs) default: panic("invalid diff mode") } } + // Register slice element to support cycle detection. + if parentKind == reflect.Slice { + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) + defer ptrs.Pop() + defer func() { out = wrapTrunkReferences(ptrRefs, out) }() + } + // Descend into the child value node. if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value) - out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) + out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} return opts.FormatType(v.Type, out) } else { switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: - return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Struct, reflect.Array, reflect.Slice: + out = opts.formatDiffList(v.Records, k, ptrs) + out = opts.FormatType(v.Type, out) + case reflect.Map: + // Register map to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.formatDiffList(v.Records, k, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = opts.FormatType(v.Type, out) case reflect.Ptr: - return textWrap{"&", opts.FormatDiff(v.Value), ""} + // Register pointer to support cycle detection. + ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) + defer ptrs.Pop() + + out = opts.FormatDiff(v.Value, ptrs) + out = wrapTrunkReferences(ptrRefs, out) + out = &textWrap{Prefix: "&", Value: out} case reflect.Interface: - return opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) default: panic(fmt.Sprintf("%v cannot have children", k)) } + return out } } -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { // Derive record name based on the data structure kind. var name string var formatKey func(reflect.Value) string @@ -154,7 +216,17 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case reflect.Map: name = "entry" opts = opts.WithTypeMode(elideType) - formatKey = formatMapKey + formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } + } + + maxLen := -1 + if opts.LimitVerbosity { + if opts.DiffMode == diffIdentical { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + } else { + maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... + } + opts.VerbosityLevel-- } // Handle unification. @@ -163,6 +235,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te var list textList var deferredEllipsis bool // Add final "..." to indicate records were dropped for _, r := range recs { + if len(list) == maxLen { + deferredEllipsis = true + break + } + // Elide struct fields that are zero value. if k == reflect.Struct { var isZero bool @@ -186,23 +263,31 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te } continue } - if out := opts.FormatDiff(r.Value); out != nil { + if out := opts.FormatDiff(r.Value, ptrs); out != nil { list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) } } if deferredEllipsis { list.AppendEllipsis(diffStats{}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case diffUnknown: default: panic("invalid diff mode") } // Handle differencing. + var numDiffs int var list textList + var keys []reflect.Value // invariant: len(list) == len(keys) groups := coalesceAdjacentRecords(name, recs) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Handle equal records. if ds.NumDiff() == 0 { // Compute the number of leading and trailing records to print. @@ -226,16 +311,21 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te // Format the equal values. for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } if numEqual > numLo+numHi { ds.NumIdentical -= numLo + numHi list.AppendEllipsis(ds) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } recs = recs[numEqual:] continue @@ -247,24 +337,70 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) te case opts.CanFormatDiffSlice(r.Value): out := opts.FormatDiffSlice(r.Value) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { + opts2 := verbosityPreset(opts, i) + outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) + outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) + } if outx != nil { list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + keys = append(keys, r.Key) } if outy != nil { list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + keys = append(keys, r.Key) } default: - out := opts.FormatDiff(r.Value) + out := opts.FormatDiff(r.Value, ptrs) list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + keys = append(keys, r.Key) } } recs = recs[ds.NumDiff():] + numDiffs += ds.NumDiff() + } + if maxGroup.IsZero() { + assert(len(recs) == 0) + } else { + list.AppendEllipsis(maxGroup) + for len(keys) < len(list) { + keys = append(keys, reflect.Value{}) + } } - assert(len(recs) == 0) - return textWrap{"{", list, "}"} + assert(len(list) == len(keys)) + + // For maps, the default formatting logic uses fmt.Stringer which may + // produce ambiguous output. Avoid calling String to disambiguate. + if k == reflect.Map { + var ambiguous bool + seenKeys := map[string]reflect.Value{} + for i, currKey := range keys { + if currKey.IsValid() { + strKey := list[i].Key + prevKey, seen := seenKeys[strKey] + if seen && prevKey.CanInterface() && currKey.CanInterface() { + ambiguous = prevKey.Interface() != currKey.Interface() + if ambiguous { + break + } + } + seenKeys[strKey] = currKey + } + } + if ambiguous { + for i, k := range keys { + if k.IsValid() { + list[i].Key = formatMapKey(k, true, ptrs) + } + } + } + } + + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} } // coalesceAdjacentRecords coalesces the list of records into groups of diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go new file mode 100644 index 000000000..d620c2c20 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -0,0 +1,264 @@ +// Copyright 2020, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +const ( + pointerDelimPrefix = "⟪" + pointerDelimSuffix = "⟫" +) + +// formatPointer prints the address of the pointer. +func formatPointer(p value.Pointer, withDelims bool) string { + v := p.Uintptr() + if flags.Deterministic { + v = 0xdeadf00f // Only used for stable testing purposes + } + if withDelims { + return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix + } + return formatHex(uint64(v)) +} + +// pointerReferences is a stack of pointers visited so far. +type pointerReferences [][2]value.Pointer + +func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { + if deref && vx.IsValid() { + vx = vx.Addr() + } + if deref && vy.IsValid() { + vy = vy.Addr() + } + switch d { + case diffUnknown, diffIdentical: + pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} + case diffRemoved: + pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} + case diffInserted: + pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} + } + *ps = append(*ps, pp) + return pp +} + +func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { + p = value.PointerOf(v) + for _, pp := range *ps { + if p == pp[0] || p == pp[1] { + return p, true + } + } + *ps = append(*ps, [2]value.Pointer{p, p}) + return p, false +} + +func (ps *pointerReferences) Pop() { + *ps = (*ps)[:len(*ps)-1] +} + +// trunkReferences is metadata for a textNode indicating that the sub-tree +// represents the value for either pointer in a pair of references. +type trunkReferences struct{ pp [2]value.Pointer } + +// trunkReference is metadata for a textNode indicating that the sub-tree +// represents the value for the given pointer reference. +type trunkReference struct{ p value.Pointer } + +// leafReference is metadata for a textNode indicating that the value is +// truncated as it refers to another part of the tree (i.e., a trunk). +type leafReference struct{ p value.Pointer } + +func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { + switch { + case pp[0].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} + case pp[1].IsNil(): + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + case pp[0] == pp[1]: + return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} + default: + return &textWrap{Value: s, Metadata: trunkReferences{pp}} + } +} +func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} +} +func makeLeafReference(p value.Pointer, printAddress bool) textNode { + out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} + var prefix string + if printAddress { + prefix = formatPointer(p, true) + } + return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} +} + +// resolveReferences walks the textNode tree searching for any leaf reference +// metadata and resolves each against the corresponding trunk references. +// Since pointer addresses in memory are not particularly readable to the user, +// it replaces each pointer value with an arbitrary and unique reference ID. +func resolveReferences(s textNode) { + var walkNodes func(textNode, func(textNode)) + walkNodes = func(s textNode, f func(textNode)) { + f(s) + switch s := s.(type) { + case *textWrap: + walkNodes(s.Value, f) + case textList: + for _, r := range s { + walkNodes(r.Value, f) + } + } + } + + // Collect all trunks and leaves with reference metadata. + var trunks, leaves []*textWrap + walkNodes(s, func(s textNode) { + if s, ok := s.(*textWrap); ok { + switch s.Metadata.(type) { + case leafReference: + leaves = append(leaves, s) + case trunkReference, trunkReferences: + trunks = append(trunks, s) + } + } + }) + + // No leaf references to resolve. + if len(leaves) == 0 { + return + } + + // Collect the set of all leaf references to resolve. + leafPtrs := make(map[value.Pointer]bool) + for _, leaf := range leaves { + leafPtrs[leaf.Metadata.(leafReference).p] = true + } + + // Collect the set of trunk pointers that are always paired together. + // This allows us to assign a single ID to both pointers for brevity. + // If a pointer in a pair ever occurs by itself or as a different pair, + // then the pair is broken. + pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) + unpair := func(p value.Pointer) { + if !pairedTrunkPtrs[p].IsNil() { + pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half + } + pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + unpair(p.p) // standalone pointer cannot be part of a pair + case trunkReferences: + p0, ok0 := pairedTrunkPtrs[p.pp[0]] + p1, ok1 := pairedTrunkPtrs[p.pp[1]] + switch { + case !ok0 && !ok1: + // Register the newly seen pair. + pairedTrunkPtrs[p.pp[0]] = p.pp[1] + pairedTrunkPtrs[p.pp[1]] = p.pp[0] + case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: + // Exact pair already seen; do nothing. + default: + // Pair conflicts with some other pair; break all pairs. + unpair(p.pp[0]) + unpair(p.pp[1]) + } + } + } + + // Correlate each pointer referenced by leaves to a unique identifier, + // and print the IDs for each trunk that matches those pointers. + var nextID uint + ptrIDs := make(map[value.Pointer]uint) + newID := func() uint { + id := nextID + nextID++ + return id + } + for _, trunk := range trunks { + switch p := trunk.Metadata.(type) { + case trunkReference: + if print := leafPtrs[p.p]; print { + id, ok := ptrIDs[p.p] + if !ok { + id = newID() + ptrIDs[p.p] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } + case trunkReferences: + print0 := leafPtrs[p.pp[0]] + print1 := leafPtrs[p.pp[1]] + if print0 || print1 { + id0, ok0 := ptrIDs[p.pp[0]] + id1, ok1 := ptrIDs[p.pp[1]] + isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] + if isPair { + var id uint + assert(ok0 == ok1) // must be seen together or not at all + if ok0 { + assert(id0 == id1) // must have the same ID + id = id0 + } else { + id = newID() + ptrIDs[p.pp[0]] = id + ptrIDs[p.pp[1]] = id + } + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) + } else { + if print0 && !ok0 { + id0 = newID() + ptrIDs[p.pp[0]] = id0 + } + if print1 && !ok1 { + id1 = newID() + ptrIDs[p.pp[1]] = id1 + } + switch { + case print0 && print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) + case print0: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) + case print1: + trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) + } + } + } + } + } + + // Update all leaf references with the unique identifier. + for _, leaf := range leaves { + if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { + leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) + } + } +} + +func formatReference(id uint) string { + return fmt.Sprintf("ref#%d", id) +} + +func updateReferencePrefix(prefix, ref string) string { + if prefix == "" { + return pointerDelimPrefix + ref + pointerDelimSuffix + } + suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) + return pointerDelimPrefix + ref + ": " + suffix +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 2761b6289..2d722ea51 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -10,8 +10,8 @@ import ( "strconv" "strings" "unicode" + "unicode/utf8" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/value" ) @@ -20,14 +20,22 @@ type formatValueOptions struct { // methods like error.Error or fmt.Stringer.String. AvoidStringer bool - // ShallowPointers controls whether to avoid descending into pointers. - // Useful when printing map keys, where pointer comparison is performed - // on the pointer address rather than the pointed-at value. - ShallowPointers bool - // PrintAddresses controls whether to print the address of all pointers, // slice elements, and maps. PrintAddresses bool + + // QualifiedNames controls whether FormatType uses the fully qualified name + // (including the full package path as opposed to just the package name). + QualifiedNames bool + + // VerbosityLevel controls the amount of output to produce. + // A higher value produces more output. A value of zero or lower produces + // no output (represented using an ellipsis). + // If LimitVerbosity is false, then the level is treated as infinite. + VerbosityLevel int + + // LimitVerbosity specifies that formatting should respect VerbosityLevel. + LimitVerbosity bool } // FormatType prints the type as if it were wrapping s. @@ -44,12 +52,15 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { default: return s } + if opts.DiffMode == diffIdentical { + return s // elide type for identical nodes + } case elideType: return s } // Determine the type label, applying special handling for unnamed types. - typeName := t.String() + typeName := value.TypeString(t, opts.QualifiedNames) if t.Name() == "" { // According to Go grammar, certain type literals contain symbols that // do not strongly bind to the next lexicographical token (e.g., *T). @@ -57,39 +68,78 @@ func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { case reflect.Chan, reflect.Func, reflect.Ptr: typeName = "(" + typeName + ")" } - typeName = strings.Replace(typeName, "struct {", "struct{", -1) - typeName = strings.Replace(typeName, "interface {", "interface{", -1) } + return &textWrap{Prefix: typeName, Value: wrapParens(s)} +} - // Avoid wrap the value in parenthesis if unnecessary. - if s, ok := s.(textWrap); ok { - hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") - hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") +// wrapParens wraps s with a set of parenthesis, but avoids it if the +// wrapped node itself is already surrounded by a pair of parenthesis or braces. +// It handles unwrapping one level of pointer-reference nodes. +func wrapParens(s textNode) textNode { + var refNode *textWrap + if s2, ok := s.(*textWrap); ok { + // Unwrap a single pointer reference node. + switch s2.Metadata.(type) { + case leafReference, trunkReference, trunkReferences: + refNode = s2 + if s3, ok := refNode.Value.(*textWrap); ok { + s2 = s3 + } + } + + // Already has delimiters that make parenthesis unnecessary. + hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") + hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") if hasParens || hasBraces { - return textWrap{typeName, s, ""} + return s } } - return textWrap{typeName + "(", s, ")"} + if refNode != nil { + refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} + return s + } + return &textWrap{Prefix: "(", Value: s, Suffix: ")"} } // FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in m. As pointers are visited, m is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { +// into pointers already in ptrs. As pointers are visited, ptrs is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { if !v.IsValid() { return nil } t := v.Type() + // Check slice element for cycles. + if parentKind == reflect.Slice { + ptrRef, visited := ptrs.Push(v.Addr()) + if visited { + return makeLeafReference(ptrRef, false) + } + defer ptrs.Pop() + defer func() { out = wrapTrunkReference(ptrRef, false, out) }() + } + // Check whether there is an Error or String method to call. if !opts.AvoidStringer && v.CanInterface() { // Avoid calling Error or String methods on nil receivers since many // implementations crash when doing so. if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + var prefix, strVal string switch v := v.Interface().(type) { case error: - return textLine("e" + formatString(v.Error())) + prefix, strVal = "e", v.Error() case fmt.Stringer: - return textLine("s" + formatString(v.String())) + prefix, strVal = "s", v.String() + } + if prefix != "" { + maxLen := len(strVal) + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + } + if len(strVal) > maxLen+len(textEllipsis) { + return textLine(prefix + formatString(strVal[:maxLen]) + string(textEllipsis)) + } + return textLine(prefix + formatString(strVal)) } } } @@ -102,94 +152,136 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t } }() - var ptr string switch t.Kind() { case reflect.Bool: return textLine(fmt.Sprint(v.Bool())) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // Unnamed uints are usually bytes or words, so use hexadecimal. - if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return textLine(fmt.Sprint(v.Uint())) + case reflect.Uint8: + if parentKind == reflect.Slice || parentKind == reflect.Array { return textLine(formatHex(v.Uint())) } return textLine(fmt.Sprint(v.Uint())) + case reflect.Uintptr: + return textLine(formatHex(v.Uint())) case reflect.Float32, reflect.Float64: return textLine(fmt.Sprint(v.Float())) case reflect.Complex64, reflect.Complex128: return textLine(fmt.Sprint(v.Complex())) case reflect.String: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... + } + if v.Len() > maxLen+len(textEllipsis) { + return textLine(formatString(v.String()[:maxLen]) + string(textEllipsis)) + } return textLine(formatString(v.String())) case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(v)) + return textLine(formatPointer(value.PointerOf(v), true)) case reflect.Struct: var list textList + v := makeAddressable(v) // needed for retrieveUnexportedField + maxLen := v.NumField() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) if value.IsZero(vv) { continue // Elide fields with zero values } - s := opts.WithTypeMode(autoType).FormatValue(vv, m) - list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sf := t.Field(i) + if supportExporters && !isExported(sf.Name) { + vv = retrieveUnexportedField(v, sf, true) + } + s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) + list = append(list, textRecord{Key: sf.Name, Value: s}) } - return textWrap{"{", list, "}"} + return &textWrap{Prefix: "{", Value: list, Suffix: "}"} case reflect.Slice: if v.IsNil() { return textNil } - if opts.PrintAddresses { - ptr = formatPointer(v) - } fallthrough case reflect.Array: + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for i := 0; i < v.Len(); i++ { - vi := v.Index(i) - if vi.CanAddr() { // Check for cyclic elements - p := vi.Addr() - if m.Visit(p) { - var out textNode - out = textLine(formatPointer(p)) - out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) - out = textWrap{"*", out, ""} - list = append(list, textRecord{Value: out}) - continue - } + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break } - s := opts.WithTypeMode(elideType).FormatValue(vi, m) + s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) list = append(list, textRecord{Value: s}) } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + if t.Kind() == reflect.Slice && opts.PrintAddresses { + header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) + out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} + } + return out case reflect.Map: if v.IsNil() { return textNil } - if m.Visit(v) { - return textLine(formatPointer(v)) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + return makeLeafReference(ptrRef, opts.PrintAddresses) } + defer ptrs.Pop() + maxLen := v.Len() + if opts.LimitVerbosity { + maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... + opts.VerbosityLevel-- + } var list textList for _, k := range value.SortKeys(v.MapKeys()) { - sk := formatMapKey(k) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + if len(list) == maxLen { + list.AppendEllipsis(diffStats{}) + break + } + sk := formatMapKey(k, false, ptrs) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) list = append(list, textRecord{Key: sk, Value: sv}) } - if opts.PrintAddresses { - ptr = formatPointer(v) - } - return textWrap{ptr + "{", list, "}"} + + out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + return out case reflect.Ptr: if v.IsNil() { return textNil } - if m.Visit(v) || opts.ShallowPointers { - return textLine(formatPointer(v)) - } - if opts.PrintAddresses { - ptr = formatPointer(v) + + // Check pointer for cycles. + ptrRef, visited := ptrs.Push(v) + if visited { + out = makeLeafReference(ptrRef, opts.PrintAddresses) + return &textWrap{Prefix: "&", Value: out} } + defer ptrs.Pop() + skipType = true // Let the underlying value print the type instead - return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) + out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) + out = &textWrap{Prefix: "&", Value: out} + return out case reflect.Interface: if v.IsNil() { return textNil @@ -197,7 +289,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) } @@ -205,11 +297,14 @@ func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out t // formatMapKey formats v as if it were a map key. // The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value) string { +func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { var opts formatOptions + opts.DiffMode = diffIdentical opts.TypeMode = elideType - opts.ShallowPointers = true - s := opts.FormatValue(v, visitedPointers{}).String() + opts.PrintAddresses = disambiguate + opts.AvoidStringer = disambiguate + opts.QualifiedNames = disambiguate + s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } @@ -227,7 +322,7 @@ func formatString(s string) string { rawInvalid := func(r rune) bool { return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') } - if strings.IndexFunc(s, rawInvalid) < 0 { + if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { return "`" + s + "`" } return qs @@ -256,23 +351,3 @@ func formatHex(u uint64) string { } return fmt.Sprintf(f, u) } - -// formatPointer prints the address of the pointer. -func formatPointer(v reflect.Value) string { - p := v.Pointer() - if flags.Deterministic { - p = 0xdeadf00f // Only used for stable testing purposes - } - return fmt.Sprintf("⟪0x%x⟫", p) -} - -type visitedPointers map[value.Pointer]struct{} - -// Visit inserts pointer v into the visited map and reports whether it had -// already been visited before. -func (m visitedPointers) Visit(v reflect.Value) bool { - p := value.PointerOf(v) - _, visited := m[p] - m[p] = struct{}{} - return visited -} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index eafcf2e4c..35315dad3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "reflect" + "strconv" "strings" "unicode" "unicode/utf8" @@ -23,11 +24,25 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { return false // Must be formatting in diff mode case v.NumDiff == 0: return false // No differences detected - case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: - // TODO: Handle the case where someone uses bytes.Equal on a large slice. - return false // Some custom option was used to determined equality case !v.ValueX.IsValid() || !v.ValueY.IsValid(): return false // Both values must be valid + case v.Type.Kind() == reflect.Slice && (v.ValueX.Len() == 0 || v.ValueY.Len() == 0): + return false // Both slice values have to be non-empty + case v.NumIgnored > 0: + return false // Some ignore option was used + case v.NumTransformed > 0: + return false // Some transform option was used + case v.NumCompared > 1: + return false // More than one comparison was used + case v.NumCompared == 1 && v.Type.Name() != "": + // The need for cmp to check applicability of options on every element + // in a slice is a significant performance detriment for large []byte. + // The workaround is to specify Comparer(bytes.Equal), + // which enables cmp to compare []byte more efficiently. + // If they differ, we still want to provide batched diffing. + // The logic disallows named types since they tend to have their own + // String method, with nicer formatting than what this provides. + return false } switch t := v.Type; t.Kind() { @@ -82,7 +97,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } if isText || isBinary { var numLines, lastLineIdx, maxLineLen int - isBinary = false + isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) for i, r := range sx + sy { if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { isBinary = true @@ -97,7 +112,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } } isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 } // Format the string into printable records. @@ -117,6 +132,83 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "\n" + + // If possible, use a custom triple-quote (""") syntax for printing + // differences in a string literal. This format is more readable, + // but has edge-cases where differences are visually indistinguishable. + // This format is avoided under the following conditions: + // • A line starts with `"""` + // • A line starts with "..." + // • A line contains non-printable characters + // • Adjacent different lines differ only by whitespace + // + // For example: + // """ + // ... // 3 identical lines + // foo + // bar + // - baz + // + BAZ + // """ + isTripleQuoted := true + prevRemoveLines := map[string]bool{} + prevInsertLines := map[string]bool{} + var list2 textList + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + for _, r := range list { + if !r.Value.Equal(textEllipsis) { + line, _ := strconv.Unquote(string(r.Value.(textLine))) + line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support + normLine := strings.Map(func(r rune) rune { + if unicode.IsSpace(r) { + return -1 // drop whitespace to avoid visually indistinguishable output + } + return r + }, line) + isPrintable := func(r rune) bool { + return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable + } + isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" + switch r.Diff { + case diffRemoved: + isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] + prevRemoveLines[normLine] = true + case diffInserted: + isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] + prevInsertLines[normLine] = true + } + if !isTripleQuoted { + break + } + r.Value = textLine(line) + r.ElideComma = true + } + if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group + prevRemoveLines = map[string]bool{} + prevInsertLines = map[string]bool{} + } + list2 = append(list2, r) + } + if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { + list2 = list2[:len(list2)-1] // elide single empty line at the end + } + list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) + if isTripleQuoted { + var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} + switch t.Kind() { + case reflect.String: + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + // Always emit type for slices since the triple-quote syntax + // looks like a string (not a slice). + opts = opts.WithTypeMode(emitType) + out = opts.FormatType(t, out) + } + return out + } + // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. @@ -129,6 +221,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }, ) delim = "" + // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. // The output is inspired by hexdump. @@ -145,6 +238,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s), Comment: comment} }, ) + // For all other slices of primitive types, // then perform differencing in approximately fixed-sized chunks. // The size of each chunk depends on the width of the element kind. @@ -172,7 +266,9 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + ss = append(ss, fmt.Sprint(v.Index(i).Uint())) + case reflect.Uint8, reflect.Uintptr: ss = append(ss, formatHex(v.Index(i).Uint())) case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: ss = append(ss, fmt.Sprint(v.Index(i).Interface())) @@ -185,7 +281,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Wrap the output with appropriate type information. - var out textNode = textWrap{"{", list, "}"} + var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} if !isText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). @@ -196,12 +292,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } switch t.Kind() { case reflect.String: - out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf(string("")) { out = opts.FormatType(t, out) } case reflect.Slice: - out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} if t != reflect.TypeOf([]byte(nil)) { out = opts.FormatType(t, out) } @@ -242,9 +338,22 @@ func (opts formatOptions) formatDiffSlice( return n0 - v.Len() } + var numDiffs int + maxLen := -1 + if opts.LimitVerbosity { + maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... + opts.VerbosityLevel-- + } + groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + maxGroup := diffStats{Name: name} for i, ds := range groups { + if maxLen >= 0 && numDiffs >= maxLen { + maxGroup = maxGroup.Append(ds) + continue + } + // Print equal. if ds.NumDiff() == 0 { // Compute the number of leading and trailing equal bytes to print. @@ -273,12 +382,18 @@ func (opts formatOptions) formatDiffSlice( } // Print unequal. + len0 := len(list) nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) vx = vx.Slice(nx, vx.Len()) ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) vy = vy.Slice(ny, vy.Len()) + numDiffs += len(list) - len0 + } + if maxGroup.IsZero() { + assert(vx.Len() == 0 && vy.Len() == 0) + } else { + list.AppendEllipsis(maxGroup) } - assert(vx.Len() == 0 && vy.Len() == 0) return list } diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b8fcab7b..8b12c05cd 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -10,12 +10,15 @@ import ( "math/rand" "strings" "time" + "unicode/utf8" "github.com/google/go-cmp/cmp/internal/flags" ) var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 +const maxColumnLength = 80 + type indentMode int func (n indentMode) appendIndent(b []byte, d diffMode) []byte { @@ -91,21 +94,22 @@ type textNode interface { // textWrap is a wrapper that concatenates a prefix and/or a suffix // to the underlying node. type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" + Metadata interface{} // arbitrary metadata; has no effect on formatting } -func (s textWrap) Len() int { +func (s *textWrap) Len() int { return len(s.Prefix) + s.Value.Len() + len(s.Suffix) } -func (s1 textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(textWrap); ok { +func (s1 *textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(*textWrap); ok { return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix } return false } -func (s textWrap) String() string { +func (s *textWrap) String() string { var d diffMode var n indentMode _, s2 := s.formatCompactTo(nil, d) @@ -114,7 +118,7 @@ func (s textWrap) String() string { b = append(b, '\n') // Trailing newline return string(b) } -func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { +func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { n0 := len(b) // Original buffer length b = append(b, s.Prefix...) b, s.Value = s.Value.formatCompactTo(b, d) @@ -124,7 +128,7 @@ func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } return b, s } -func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { +func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = append(b, s.Prefix...) b = s.Value.formatExpandedTo(b, d, n) b = append(b, s.Suffix...) @@ -136,22 +140,23 @@ func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { // of the textList.formatCompactTo method. type textList []textRecord type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - Comment fmt.Stringer // e.g., "6 identical fields" + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + ElideComma bool // avoid trailing comma + Comment fmt.Stringer // e.g., "6 identical fields" } // AppendEllipsis appends a new ellipsis node to the list if none already // exists at the end. If cs is non-zero it coalesces the statistics with the // previous diffStats. func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := ds != diffStats{} + hasStats := !ds.IsZero() if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) } else { - *s = append(*s, textRecord{Value: textEllipsis}) + *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) } return } @@ -191,7 +196,7 @@ func (s1 textList) Equal(s2 textNode) bool { } func (s textList) String() string { - return textWrap{"{", s, "}"}.String() + return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() } func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { @@ -221,7 +226,7 @@ func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { } // Force multi-lined output when printing a removed/inserted node that // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { multiLine = true } if !multiLine { @@ -236,16 +241,50 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { _, isLine := r.Value.(textLine) return r.Key == "" || !isLine }, - func(r textRecord) int { return len(r.Key) }, + func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, ) alignValueLens := s.alignLens( func(r textRecord) bool { _, isLine := r.Value.(textLine) return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil }, - func(r textRecord) int { return len(r.Value.(textLine)) }, + func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, ) + // Format lists of simple lists in a batched form. + // If the list is sequence of only textLine values, + // then batch multiple values on a single line. + var isSimple bool + for _, r := range s { + _, isLine := r.Value.(textLine) + isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil + if !isSimple { + break + } + } + if isSimple { + n++ + var batch []byte + emitBatch := func() { + if len(batch) > 0 { + b = n.appendIndent(append(b, '\n'), d) + b = append(b, bytes.TrimRight(batch, " ")...) + batch = batch[:0] + } + } + for _, r := range s { + line := r.Value.(textLine) + if len(batch)+len(line)+len(", ") > maxColumnLength { + emitBatch() + } + batch = append(batch, line...) + batch = append(batch, ", "...) + } + emitBatch() + n-- + return n.appendIndent(append(b, '\n'), d) + } + // Format the list as a multi-lined output. n++ for i, r := range s { @@ -256,7 +295,7 @@ func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { b = alignKeyLens[i].appendChar(b, ' ') b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.Value.Equal(textEllipsis) { + if !r.ElideComma { b = append(b, ',') } b = alignValueLens[i].appendChar(b, ' ') @@ -332,6 +371,11 @@ type diffStats struct { NumModified int } +func (s diffStats) IsZero() bool { + s.Name = "" + return s == diffStats{} +} + func (s diffStats) NumDiff() int { return s.NumRemoved + s.NumInserted + s.NumModified } diff --git a/vendor/github.com/gookit/color/color_16.go b/vendor/github.com/gookit/color/color_16.go index 8fd643392..0c621a674 100644 --- a/vendor/github.com/gookit/color/color_16.go +++ b/vendor/github.com/gookit/color/color_16.go @@ -20,8 +20,8 @@ const ( FgGreen FgYellow FgBlue - FgMagenta // 品红 - FgCyan // 青色 + FgMagenta // 品红 + FgCyan // 青色 FgWhite // FgDefault revert default FG FgDefault Color = 39 @@ -46,7 +46,7 @@ const ( BgBlack Color = iota + 40 BgRed BgGreen - BgYellow // BgBrown like yellow + BgYellow // BgBrown like yellow BgBlue BgMagenta BgCyan diff --git a/vendor/github.com/gookit/color/color_windows.go b/vendor/github.com/gookit/color/color_windows.go index 7cf0e177b..3d652f1d4 100644 --- a/vendor/github.com/gookit/color/color_windows.go +++ b/vendor/github.com/gookit/color/color_windows.go @@ -302,7 +302,7 @@ func IsTerminal(fd int) bool { // from package: golang.org/x/sys/windows type ( short int16 - word uint16 + word uint16 // coord cursor position coordinates coord struct { diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/go-checkpoint/LICENSE similarity index 50% rename from vendor/github.com/hashicorp/golang-lru/LICENSE rename to vendor/github.com/hashicorp/go-checkpoint/LICENSE index be2cc4dfb..c33dcc7c9 100644 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ b/vendor/github.com/hashicorp/go-checkpoint/LICENSE @@ -2,89 +2,89 @@ Mozilla Public License, version 2.0 1. Definitions -1.1. "Contributor" +1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. "Contributor Version" +1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + Contributor and that particular Contributor’s Contribution. -1.3. "Contribution" +1.3. “Contribution” means Covered Software of a particular Contributor. -1.4. "Covered Software" +1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. "Incompatible With Secondary Licenses" +1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. -1.6. "Executable Form" +1.6. “Executable Form” means any form of the work other than Source Code Form. -1.7. "Larger Work" +1.7. “Larger Work” - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. -1.8. "License" +1.8. “License” means this document. -1.9. "Licensable" +1.9. “Licensable” - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. -1.10. "Modifications" +1.10. “Modifications” means any of the following: - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. "Patent Claims" of a Contributor +1.11. “Patent Claims” of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. -1.12. "Secondary License" +1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. "Source Code Form" +1.13. “Source Code Form” means the form of the work preferred for making modifications. -1.14. "You" (or "Your") +1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is + License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause + definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -100,59 +100,57 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party's + b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions @@ -165,12 +163,11 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form @@ -182,40 +179,39 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). 3.4. Notices - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -224,14 +220,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. 5. Termination @@ -239,22 +235,21 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -263,16 +258,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. 7. Limitation of Liability @@ -284,29 +279,27 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. 10. Versions of the License @@ -320,24 +313,23 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice @@ -348,15 +340,15 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. -Exhibit B - "Incompatible With Secondary Licenses" Notice +Exhibit B - “Incompatible With Secondary Licenses” Notice - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-checkpoint/README.md b/vendor/github.com/hashicorp/go-checkpoint/README.md new file mode 100644 index 000000000..e717b6ad3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/README.md @@ -0,0 +1,22 @@ +# Go Checkpoint Client + +[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at +Hashicorp that we use to check version information, broadcast security +bulletins, etc. + +We understand that software making remote calls over the internet +for any reason can be undesirable. Because of this, Checkpoint can be +disabled in all of our software that includes it. You can view the source +of this client to see that we're not sending any private information. + +Each Hashicorp application has it's specific configuration option +to disable checkpoint calls, but the `CHECKPOINT_DISABLE` makes +the underlying checkpoint component itself disabled. For example +in the case of packer: +``` +CHECKPOINT_DISABLE=1 packer build +``` + +**Note:** This repository is probably useless outside of internal HashiCorp +use. It is open source for disclosure and because our open source projects +must be able to link to it. diff --git a/vendor/github.com/hashicorp/go-checkpoint/check.go b/vendor/github.com/hashicorp/go-checkpoint/check.go new file mode 100644 index 000000000..109d0d352 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/check.go @@ -0,0 +1,368 @@ +package checkpoint + +import ( + crand "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB} + +// CheckParams are the parameters for configuring a check request. +type CheckParams struct { + // Product and version are used to lookup the correct product and + // alerts for the proper version. The version is also used to perform + // a version check. + Product string + Version string + + // Arch and OS are used to filter alerts potentially only to things + // affecting a specific os/arch combination. If these aren't specified, + // they'll be automatically filled in. + Arch string + OS string + + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string + SignatureFile string + + // CacheFile, if specified, will cache the result of a check. The + // duration of the cache is specified by CacheDuration, and defaults + // to 48 hours if not specified. If the CacheFile is newer than the + // CacheDuration, than the Check will short-circuit and use those + // results. + // + // If the CacheFile directory doesn't exist, it will be created with + // permissions 0755. + CacheFile string + CacheDuration time.Duration + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// CheckResponse is the response for a check request. +type CheckResponse struct { + Product string `json:"product"` + CurrentVersion string `json:"current_version"` + CurrentReleaseDate int `json:"current_release_date"` + CurrentDownloadURL string `json:"current_download_url"` + CurrentChangelogURL string `json:"current_changelog_url"` + ProjectWebsite string `json:"project_website"` + Outdated bool `json:"outdated"` + Alerts []*CheckAlert `json:"alerts"` +} + +// CheckAlert is a single alert message from a check request. +// +// These never have to be manually constructed, and are typically populated +// into a CheckResponse as a result of the Check request. +type CheckAlert struct { + ID int `json:"id"` + Date int `json:"date"` + Message string `json:"message"` + URL string `json:"url"` + Level string `json:"level"` +} + +// Check checks for alerts and new version information. +func Check(p *CheckParams) (*CheckResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &CheckResponse{}, nil + } + + // Set a default timeout of 3 sec for the check request (in milliseconds) + timeout := 3000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + // If we have a cached result, then use that + if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil { + return nil, err + } else if r != nil { + defer r.Close() + return checkResult(r) + } + + var u url.URL + + if p.Arch == "" { + p.Arch = runtime.GOARCH + } + if p.OS == "" { + p.OS = runtime.GOOS + } + + // If we're given a SignatureFile, then attempt to read that. + signature := p.Signature + if p.Signature == "" && p.SignatureFile != "" { + var err error + signature, err = checkSignature(p.SignatureFile) + if err != nil { + return nil, err + } + } + + v := u.Query() + v.Set("version", p.Version) + v.Set("arch", p.Arch) + v.Set("os", p.OS) + v.Set("signature", signature) + + u.Scheme = "https" + u.Host = "checkpoint-api.hashicorp.com" + u.Path = fmt.Sprintf("/v1/check/%s", p.Product) + u.RawQuery = v.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + var r io.Reader = resp.Body + if p.CacheFile != "" { + // Make sure the directory holding our cache exists. + if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil { + return nil, err + } + + // We have to cache the result, so write the response to the + // file as we read it. + f, err := os.Create(p.CacheFile) + if err != nil { + return nil, err + } + + // Write the cache header + if err := writeCacheHeader(f, p.Version); err != nil { + f.Close() + os.Remove(p.CacheFile) + return nil, err + } + + defer f.Close() + r = io.TeeReader(r, f) + } + + return checkResult(r) +} + +// CheckInterval is used to check for a response on a given interval duration. +// The interval is not exact, and checks are randomized to prevent a thundering +// herd. However, it is expected that on average one check is performed per +// interval. The returned channel may be closed to stop background checks. +func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} { + doneCh := make(chan struct{}) + + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return doneCh + } + + go func() { + for { + select { + case <-time.After(randomStagger(interval)): + resp, err := Check(p) + cb(resp, err) + case <-doneCh: + return + } + } + }() + + return doneCh +} + +// randomStagger returns an interval that is between 3/4 and 5/4 of +// the given interval. The expected value is the interval. +func randomStagger(interval time.Duration) time.Duration { + stagger := time.Duration(mrand.Int63()) % (interval / 2) + return 3*(interval/4) + stagger +} + +func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + // File doesn't exist, not a problem + return nil, nil + } + + return nil, err + } + + if d == 0 { + d = 48 * time.Hour + } + + if fi.ModTime().Add(d).Before(time.Now()) { + // Cache is busted, delete the old file and re-request. We ignore + // errors here because re-creating the file is fine too. + os.Remove(path) + return nil, nil + } + + // File looks good so far, open it up so we can inspect the contents. + f, err := os.Open(path) + if err != nil { + return nil, err + } + + // Check the signature of the file + var sig [4]byte + if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil { + f.Close() + return nil, err + } + if !reflect.DeepEqual(sig, magicBytes) { + // Signatures don't match. Reset. + f.Close() + return nil, nil + } + + // Check the version. If it changed, then rewrite + var length uint32 + if err := binary.Read(f, binary.LittleEndian, &length); err != nil { + f.Close() + return nil, err + } + data := make([]byte, length) + if _, err := io.ReadFull(f, data); err != nil { + f.Close() + return nil, err + } + if string(data) != current { + // Version changed, reset + f.Close() + return nil, nil + } + + return f, nil +} +func checkResult(r io.Reader) (*CheckResponse, error) { + var result CheckResponse + if err := json.NewDecoder(r).Decode(&result); err != nil { + return nil, err + } + return &result, nil +} + +func checkSignature(path string) (string, error) { + _, err := os.Stat(path) + if err == nil { + // The file exists, read it out + sigBytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + // Split the file into lines + lines := strings.SplitN(string(sigBytes), "\n", 2) + if len(lines) > 0 { + return strings.TrimSpace(lines[0]), nil + } + } + + // If this isn't a non-exist error, then return that. + if !os.IsNotExist(err) { + return "", err + } + + // The file doesn't exist, so create a signature. + var b [16]byte + n := 0 + for n < 16 { + n2, err := crand.Read(b[n:]) + if err != nil { + return "", err + } + + n += n2 + } + signature := fmt.Sprintf( + "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + // Make sure the directory holding our signature exists. + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return "", err + } + + // Write the signature + if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil { + return "", err + } + + return signature, nil +} + +func writeCacheHeader(f io.Writer, v string) error { + // Write our signature first + if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil { + return err + } + + // Write out our current version length + length := uint32(len(v)) + if err := binary.Write(f, binary.LittleEndian, length); err != nil { + return err + } + + _, err := f.Write([]byte(v)) + return err +} + +// userMessage is suffixed to the signature file to provide feedback. +var userMessage = ` +This signature is a randomly generated UUID used to de-duplicate +alerts and version information. This signature is random, it is +not based on any personally identifiable information. To create +a new signature, you can simply delete this file at any time. +See the documentation for the software using Checkpoint for more +information on how to disable it. +` diff --git a/vendor/github.com/hashicorp/go-checkpoint/go.mod b/vendor/github.com/hashicorp/go-checkpoint/go.mod new file mode 100644 index 000000000..be0c793e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-checkpoint + +require ( + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-uuid v1.0.0 +) diff --git a/vendor/github.com/hashicorp/go-checkpoint/go.sum b/vendor/github.com/hashicorp/go-checkpoint/go.sum new file mode 100644 index 000000000..2128a0c8d --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= diff --git a/vendor/github.com/hashicorp/go-checkpoint/telemetry.go b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go new file mode 100644 index 000000000..b9ee62983 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go @@ -0,0 +1,118 @@ +package checkpoint + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "runtime" + "time" + + "github.com/hashicorp/go-cleanhttp" + uuid "github.com/hashicorp/go-uuid" +) + +// ReportParams are the parameters for configuring a telemetry report. +type ReportParams struct { + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string `json:"signature"` + SignatureFile string `json:"-"` + + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Arch string `json:"arch"` + OS string `json:"os"` + Payload interface{} `json:"payload,omitempty"` + Product string `json:"product"` + RunID string `json:"run_id"` + SchemaVersion string `json:"schema_version"` + Version string `json:"version"` +} + +func (i *ReportParams) signature() string { + signature := i.Signature + if i.Signature == "" && i.SignatureFile != "" { + var err error + signature, err = checkSignature(i.SignatureFile) + if err != nil { + return "" + } + } + return signature +} + +// Report sends telemetry information to checkpoint +func Report(ctx context.Context, r *ReportParams) error { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil + } + + req, err := ReportRequest(r) + if err != nil { + return err + } + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + if resp.StatusCode != 201 { + return fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + return nil +} + +// ReportRequest creates a request object for making a report +func ReportRequest(r *ReportParams) (*http.Request, error) { + // Populate some fields automatically if we can + if r.RunID == "" { + uuid, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + r.RunID = uuid + } + if r.Arch == "" { + r.Arch = runtime.GOARCH + } + if r.OS == "" { + r.OS = runtime.GOOS + } + if r.Signature == "" { + r.Signature = r.signature() + } + + b, err := json.Marshal(r) + if err != nil { + return nil, err + } + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/telemetry/%s", r.Product), + } + + req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + return req, nil +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/versions.go b/vendor/github.com/hashicorp/go-checkpoint/versions.go new file mode 100644 index 000000000..a5b0d3b32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/versions.go @@ -0,0 +1,90 @@ +package checkpoint + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +// VersionsParams are the parameters for a versions request. +type VersionsParams struct { + // Service is used to lookup the correct service. + Service string + + // Product is used to filter the version contraints. + Product string + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// VersionsResponse is the response for a versions request. +type VersionsResponse struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// Versions returns the version constrains for a given service and product. +func Versions(p *VersionsParams) (*VersionsResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &VersionsResponse{}, nil + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + v := url.Values{} + v.Set("product", p.Product) + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/versions/%s", p.Service), + RawQuery: v.Encode(), + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + result := &VersionsResponse{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/github.com/boombuler/barcode/LICENSE b/vendor/github.com/hashicorp/go-cty/LICENSE similarity index 94% rename from vendor/github.com/boombuler/barcode/LICENSE rename to vendor/github.com/hashicorp/go-cty/LICENSE index 862b0ddcd..d6503b555 100644 --- a/vendor/github.com/boombuler/barcode/LICENSE +++ b/vendor/github.com/hashicorp/go-cty/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2014 Florian Sundermann +Copyright (c) 2017-2018 Martin Atkins Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule.go b/vendor/github.com/hashicorp/go-cty/cty/capsule.go new file mode 100644 index 000000000..2fdc15eae --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule.go @@ -0,0 +1,128 @@ +package cty + +import ( + "fmt" + "reflect" +) + +type capsuleType struct { + typeImplSigil + Name string + GoType reflect.Type + Ops *CapsuleOps +} + +func (t *capsuleType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(*capsuleType); ok { + // capsule types compare by pointer identity + return otherP == t + } + return false +} + +func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string { + return t.Name +} + +func (t *capsuleType) GoString() string { + impl := t.Ops.TypeGoString + if impl == nil { + // To get a useful representation of our native type requires some + // shenanigans. + victimVal := reflect.Zero(t.GoType) + if t.Ops == noCapsuleOps { + return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface()) + } else { + // Including the operations in the output will make this _very_ long, + // so in practice any capsule type with ops ought to provide a + // TypeGoString function to override this with something more + // reasonable. + return fmt.Sprintf("cty.CapsuleWithOps(%q, reflect.TypeOf(%#v), %#v)", t.Name, victimVal.Interface(), t.Ops) + } + } + return impl(t.GoType) +} + +// Capsule creates a new Capsule type. +// +// A Capsule type is a special type that can be used to transport arbitrary +// Go native values of a given type through the cty type system. A language +// that uses cty as its type system might, for example, provide functions +// that return capsule-typed values and then other functions that operate +// on those values. +// +// From cty's perspective, Capsule types have a few interesting characteristics, +// described in the following paragraphs. +// +// Each capsule type has an associated Go native type that it is able to +// transport. Capsule types compare by identity, so each call to the +// Capsule function creates an entirely-distinct cty Type, even if two calls +// use the same native type. +// +// Each capsule-typed value contains a pointer to a value of the given native +// type. A capsule-typed value by default supports no operations except +// equality, and equality is implemented by pointer identity of the +// encapsulated pointer. A capsule type can optionally have its own +// implementations of certain operations if it is created with CapsuleWithOps +// instead of Capsule. +// +// The given name is used as the new type's "friendly name". This can be any +// string in principle, but will usually be a short, all-lowercase name aimed +// at users of the embedding language (i.e. not mention Go-specific details) +// and will ideally not create ambiguity with any predefined cty type. +// +// Capsule types are never introduced by any standard cty operation, so a +// calling application opts in to including them within its own type system +// by creating them and introducing them via its own functions. At that point, +// the application is responsible for dealing with any capsule-typed values +// that might be returned. +func Capsule(name string, nativeType reflect.Type) Type { + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: noCapsuleOps, + }, + } +} + +// CapsuleWithOps is like Capsule except the caller may provide an object +// representing some overloaded operation implementations to associate with +// the given capsule type. +// +// All of the other caveats and restrictions for capsule types still apply, but +// overloaded operations can potentially help a capsule type participate better +// in cty operations. +func CapsuleWithOps(name string, nativeType reflect.Type, ops *CapsuleOps) Type { + // Copy the operations to make sure the caller can't modify them after + // we're constructed. + ourOps := *ops + ourOps.assertValid() + + return Type{ + &capsuleType{ + Name: name, + GoType: nativeType, + Ops: &ourOps, + }, + } +} + +// IsCapsuleType returns true if this type is a capsule type, as created +// by cty.Capsule . +func (t Type) IsCapsuleType() bool { + _, ok := t.typeImpl.(*capsuleType) + return ok +} + +// EncapsulatedType returns the encapsulated native type of a capsule type, +// or panics if the receiver is not a Capsule type. +// +// Is IsCapsuleType to determine if this method is safe to call. +func (t Type) EncapsulatedType() reflect.Type { + impl, ok := t.typeImpl.(*capsuleType) + if !ok { + panic("not a capsule type") + } + return impl.GoType +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go new file mode 100644 index 000000000..3ff6855ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go @@ -0,0 +1,132 @@ +package cty + +import ( + "reflect" +) + +// CapsuleOps represents a set of overloaded operations for a capsule type. +// +// Each field is a reference to a function that can either be nil or can be +// set to an implementation of the corresponding operation. If an operation +// function is nil then it isn't supported for the given capsule type. +type CapsuleOps struct { + // GoString provides the GoString implementation for values of the + // corresponding type. Conventionally this should return a string + // representation of an expression that would produce an equivalent + // value. + GoString func(val interface{}) string + + // TypeGoString provides the GoString implementation for the corresponding + // capsule type itself. + TypeGoString func(goTy reflect.Type) string + + // Equals provides the implementation of the Equals operation. This is + // called only with known, non-null values of the corresponding type, + // but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.Equals on those nested values. + // + // The result value must always be of type cty.Bool, or the Equals + // operation will panic. + // + // If RawEquals is set without also setting Equals, the RawEquals + // implementation will be used as a fallback implementation. That fallback + // is appropriate only for leaf types that do not contain any nested + // cty.Value that would need to distinguish Equals vs. RawEquals for their + // own equality. + // + // If RawEquals is nil then Equals must also be nil, selecting the default + // pointer-identity comparison instead. + Equals func(a, b interface{}) Value + + // RawEquals provides the implementation of the RawEquals operation. + // This is called only with known, non-null values of the corresponding + // type, but if the corresponding type is a compound type then it must be + // ready to detect and handle nested unknown or null values, usually + // by recursively calling Value.RawEquals on those nested values. + // + // If RawEquals is nil, values of the corresponding type are compared by + // pointer identity of the encapsulated value. + RawEquals func(a, b interface{}) bool + + // ConversionFrom can provide conversions from the corresponding type to + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given source type. Return nil to indicate + // that no such conversion is available. + ConversionFrom func(src Type) func(interface{}, Path) (Value, error) + + // ConversionTo can provide conversions to the corresponding type from + // some other type when values of the corresponding type are used with + // the "convert" package. (The main cty package does not use this operation.) + // + // This function itself returns a function, allowing it to switch its + // behavior depending on the given destination type. Return nil to indicate + // that no such conversion is available. + ConversionTo func(dst Type) func(Value, Path) (interface{}, error) + + // ExtensionData is an extension point for applications that wish to + // create their own extension features using capsule types. + // + // The key argument is any value that can be compared with Go's == + // operator, but should be of a named type in a package belonging to the + // application defining the key. An ExtensionData implementation must + // check to see if the given key is familar to it, and if so return a + // suitable value for the key. + // + // If the given key is unrecognized, the ExtensionData function must + // return a nil interface. (Importantly, not an interface containing a nil + // pointer of some other type.) + // The common implementation of ExtensionData is a single switch statement + // over "key" which has a default case returning nil. + // + // The meaning of any given key is entirely up to the application that + // defines it. Applications consuming ExtensionData from capsule types + // should do so defensively: if the result of ExtensionData is not valid, + // prefer to ignore it or gracefully produce an error rather than causing + // a panic. + ExtensionData func(key interface{}) interface{} +} + +// noCapsuleOps is a pointer to a CapsuleOps with no functions set, which +// is used as the default operations value when a type is created using +// the Capsule function. +var noCapsuleOps = &CapsuleOps{} + +func (ops *CapsuleOps) assertValid() { + if ops.RawEquals == nil && ops.Equals != nil { + panic("Equals cannot be set without RawEquals") + } +} + +// CapsuleOps returns a pointer to the CapsuleOps value for a capsule type, +// or panics if the receiver is not a capsule type. +// +// The caller must not modify the CapsuleOps. +func (ty Type) CapsuleOps() *CapsuleOps { + if !ty.IsCapsuleType() { + panic("not a capsule-typed value") + } + + return ty.typeImpl.(*capsuleType).Ops +} + +// CapsuleExtensionData is a convenience interface to the ExtensionData +// function that can be optionally implemented for a capsule type. It will +// check to see if the underlying type implements ExtensionData and call it +// if so. If not, it will return nil to indicate that the given key is not +// supported. +// +// See the documentation for CapsuleOps.ExtensionData for more information +// on the purpose of and usage of this mechanism. +// +// If CapsuleExtensionData is called on a non-capsule type then it will panic. +func (ty Type) CapsuleExtensionData(key interface{}) interface{} { + ops := ty.CapsuleOps() + if ops.ExtensionData == nil { + return nil + } + return ops.ExtensionData(key) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/collection.go b/vendor/github.com/hashicorp/go-cty/cty/collection.go new file mode 100644 index 000000000..ab3919b14 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/collection.go @@ -0,0 +1,34 @@ +package cty + +import ( + "errors" +) + +type collectionTypeImpl interface { + ElementType() Type +} + +// IsCollectionType returns true if the given type supports the operations +// that are defined for all collection types. +func (t Type) IsCollectionType() bool { + _, ok := t.typeImpl.(collectionTypeImpl) + return ok +} + +// ElementType returns the element type of the receiver if it is a collection +// type, or panics if it is not. Use IsCollectionType first to test whether +// this method will succeed. +func (t Type) ElementType() Type { + if ct, ok := t.typeImpl.(collectionTypeImpl); ok { + return ct.ElementType() + } + panic(errors.New("not a collection type")) +} + +// ElementCallback is a callback type used for iterating over elements of +// collections and attributes of objects. +// +// The types of key and value depend on what type is being iterated over. +// Return true to stop iterating after the current element, or false to +// continue iterating. +type ElementCallback func(key Value, val Value) (stop bool) diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go new file mode 100644 index 000000000..6ad3bff45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go @@ -0,0 +1,165 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// compareTypes implements a preference order for unification. +// +// The result of this method is not useful for anything other than unification +// preferences, since it assumes that the caller will verify that any suggested +// conversion is actually possible and it is thus able to to make certain +// optimistic assumptions. +func compareTypes(a cty.Type, b cty.Type) int { + + // DynamicPseudoType always has lowest preference, because anything can + // convert to it (it acts as a placeholder for "any type") and we want + // to optimistically assume that any dynamics will converge on matching + // their neighbors. + if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType { + if a != cty.DynamicPseudoType { + return -1 + } + if b != cty.DynamicPseudoType { + return 1 + } + return 0 + } + + if a.IsPrimitiveType() && b.IsPrimitiveType() { + // String is a supertype of all primitive types, because we can + // represent all primitive values as specially-formatted strings. + if a == cty.String || b == cty.String { + if a != cty.String { + return 1 + } + if b != cty.String { + return -1 + } + return 0 + } + } + + if a.IsListType() && b.IsListType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsSetType() && b.IsSetType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + if a.IsMapType() && b.IsMapType() { + return compareTypes(a.ElementType(), b.ElementType()) + } + + // From this point on we may have swapped the two items in order to + // simplify our cases. Therefore any non-zero return after this point + // must be multiplied by "swap" to potentially invert the return value + // if needed. + swap := 1 + switch { + case a.IsTupleType() && b.IsListType(): + fallthrough + case a.IsObjectType() && b.IsMapType(): + fallthrough + case a.IsSetType() && b.IsTupleType(): + fallthrough + case a.IsSetType() && b.IsListType(): + a, b = b, a + swap = -1 + } + + if b.IsSetType() && (a.IsTupleType() || a.IsListType()) { + // We'll just optimistically assume that the element types are + // unifyable/convertible, and let a second recursive pass + // figure out how to make that so. + return -1 * swap + } + + if a.IsListType() && b.IsTupleType() { + // We'll just optimistically assume that the tuple's element types + // can be unified into something compatible with the list's element + // type. + return -1 * swap + } + + if a.IsMapType() && b.IsObjectType() { + // We'll just optimistically assume that the object's attribute types + // can be unified into something compatible with the map's element + // type. + return -1 * swap + } + + // For object and tuple types, comparing two types doesn't really tell + // the whole story because it may be possible to construct a new type C + // that is the supertype of both A and B by unifying each attribute/element + // separately. That possibility is handled by Unify as a follow-up if + // type sorting is insufficient to produce a valid result. + // + // Here we will take care of the simple possibilities where no new type + // is needed. + if a.IsObjectType() && b.IsObjectType() { + atysA := a.AttributeTypes() + atysB := b.AttributeTypes() + + if len(atysA) != len(atysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for k := range atysA { + if _, has := atysB[k]; !has { + return 0 + } + + cmp := compareTypes(atysA[k], atysB[k]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + if a.IsTupleType() && b.IsTupleType() { + etysA := a.TupleElementTypes() + etysB := b.TupleElementTypes() + + if len(etysA) != len(etysB) { + return 0 + } + + hasASuper := false + hasBSuper := false + for i := range etysA { + cmp := compareTypes(etysA[i], etysB[i]) + if cmp < 0 { + hasASuper = true + } else if cmp > 0 { + hasBSuper = true + } + } + + switch { + case hasASuper && hasBSuper: + return 0 + case hasASuper: + return -1 * swap + case hasBSuper: + return 1 * swap + default: + return 0 + } + } + + return 0 +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go new file mode 100644 index 000000000..9c59c8f74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go @@ -0,0 +1,190 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversion is an internal variant of Conversion that carries around +// a cty.Path to be used in error responses. +type conversion func(cty.Value, cty.Path) (cty.Value, error) + +func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion { + conv := getConversionKnown(in, out, unsafe) + if conv == nil { + return nil + } + + // Wrap the conversion in some standard checks that we don't want to + // have to repeat in every conversion function. + var ret conversion + ret = func(in cty.Value, path cty.Path) (cty.Value, error) { + if in.IsMarked() { + // We must unmark during the conversion and then re-apply the + // same marks to the result. + in, inMarks := in.Unmark() + v, err := ret(in, path) + if v != cty.NilVal { + v = v.WithMarks(inMarks) + } + return v, err + } + + if out == cty.DynamicPseudoType { + // Conversion to DynamicPseudoType always just passes through verbatim. + return in, nil + } + if !in.IsKnown() { + return cty.UnknownVal(out), nil + } + if in.IsNull() { + // We'll pass through nulls, albeit type converted, and let + // the caller deal with whatever handling they want to do in + // case null values are considered valid in some applications. + return cty.NullVal(out), nil + } + + return conv(in, path) + } + + return ret +} + +func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion { + switch { + + case out == cty.DynamicPseudoType: + // Conversion *to* DynamicPseudoType means that the caller wishes + // to allow any type in this position, so we'll produce a do-nothing + // conversion that just passes through the value as-is. + return dynamicPassthrough + + case unsafe && in == cty.DynamicPseudoType: + // Conversion *from* DynamicPseudoType means that we have a value + // whose type isn't yet known during type checking. For these we will + // assume that conversion will succeed and deal with any errors that + // result (which is why we can only do this when "unsafe" is set). + return dynamicFixup(out) + + case in.IsPrimitiveType() && out.IsPrimitiveType(): + conv := primitiveConversionsSafe[in][out] + if conv != nil { + return conv + } + if unsafe { + return primitiveConversionsUnsafe[in][out] + } + return nil + + case out.IsObjectType() && in.IsObjectType(): + return conversionObjectToObject(in, out, unsafe) + + case out.IsTupleType() && in.IsTupleType(): + return conversionTupleToTuple(in, out, unsafe) + + case out.IsListType() && (in.IsListType() || in.IsSetType()): + inEty := in.ElementType() + outEty := out.ElementType() + if inEty.Equals(outEty) { + // This indicates that we're converting from list to set with + // the same element type, so we don't need an element converter. + return conversionCollectionToList(outEty, nil) + } + + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToList(outEty, convEty) + + case out.IsSetType() && (in.IsListType() || in.IsSetType()): + if in.IsListType() && !unsafe { + // Conversion from list to map is unsafe because it will lose + // information: the ordering will not be preserved, and any + // duplicate elements will be conflated. + return nil + } + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if inEty.Equals(outEty) { + // This indicates that we're converting from set to list with + // the same element type, so we don't need an element converter. + return conversionCollectionToSet(outEty, nil) + } + + if convEty == nil { + return nil + } + return conversionCollectionToSet(outEty, convEty) + + case out.IsMapType() && in.IsMapType(): + inEty := in.ElementType() + outEty := out.ElementType() + convEty := getConversion(inEty, outEty, unsafe) + if convEty == nil { + return nil + } + return conversionCollectionToMap(outEty, convEty) + + case out.IsListType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToList(in, outEty, unsafe) + + case out.IsSetType() && in.IsTupleType(): + outEty := out.ElementType() + return conversionTupleToSet(in, outEty, unsafe) + + case out.IsMapType() && in.IsObjectType(): + outEty := out.ElementType() + return conversionObjectToMap(in, outEty, unsafe) + + case out.IsObjectType() && in.IsMapType(): + if !unsafe { + // Converting a map to an object is an "unsafe" conversion, + // because we don't know if all the map keys will correspond to + // object attributes. + return nil + } + return conversionMapToObject(in, out, unsafe) + + case in.IsCapsuleType() || out.IsCapsuleType(): + if !unsafe { + // Capsule types can only participate in "unsafe" conversions, + // because we don't know enough about their conversion behaviors + // to be sure that they will always be safe. + return nil + } + if in.Equals(out) { + // conversion to self is never allowed + return nil + } + if out.IsCapsuleType() { + if fn := out.CapsuleOps().ConversionTo; fn != nil { + return conversionToCapsule(in, out, fn) + } + } + if in.IsCapsuleType() { + if fn := in.CapsuleOps().ConversionFrom; fn != nil { + return conversionFromCapsule(in, out, fn) + } + } + // No conversion operation is available, then. + return nil + + default: + return nil + + } +} + +// retConversion wraps a conversion (internal type) so it can be returned +// as a Conversion (public type). +func retConversion(conv conversion) Conversion { + if conv == nil { + return nil + } + + return func(in cty.Value) (cty.Value, error) { + return conv(in, cty.Path(nil)) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go new file mode 100644 index 000000000..6a6006af9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go @@ -0,0 +1,31 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +func conversionToCapsule(inTy, outTy cty.Type, fn func(inTy cty.Type) func(cty.Value, cty.Path) (interface{}, error)) conversion { + rawConv := fn(inTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + rawV, err := rawConv(in, path) + if err != nil { + return cty.NilVal, err + } + return cty.CapsuleVal(outTy, rawV), nil + } +} + +func conversionFromCapsule(inTy, outTy cty.Type, fn func(outTy cty.Type) func(interface{}, cty.Path) (cty.Value, error)) conversion { + rawConv := fn(outTy) + if rawConv == nil { + return nil + } + + return func(in cty.Value, path cty.Path) (cty.Value, error) { + return rawConv(in.EncapsulatedValue(), path) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go new file mode 100644 index 000000000..575973d3c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go @@ -0,0 +1,488 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionCollectionToList returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a list. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a list. (For example, +// if we're converting from a set into a list of the same element type.) +func conversionCollectionToList(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(elems), nil + } +} + +// conversionCollectionToSet returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a set. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a set. (For example, +// if we're converting from a list into a set of the same element type.) +func conversionCollectionToSet(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, val.LengthInt()) + i := int64(0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty set + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(elems), nil + } +} + +// conversionCollectionToMap returns a conversion that will apply the given +// conversion to all of the elements of a collection (something that supports +// ForEachElement and LengthInt) and then returns the result as a map. +// +// "conv" can be nil if the elements are expected to already be of the +// correct type and just need to be re-wrapped into a map. +func conversionCollectionToMap(ety cty.Type, conv conversion) conversion { + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, 0) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: key, + } + + keyStr, err := Convert(key, cty.String) + if err != nil { + // Should never happen, because keys can only be numbers or + // strings and both can convert to string. + return cty.DynamicVal, elemPath.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName()) + } + + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[keyStr.AsString()] = val + } + + if len(elems) == 0 { + // Prefer a concrete type over a dynamic type when returning an + // empty map + if ety == cty.DynamicPseudoType { + ety = val.Type().ElementType() + } + return cty.MapValEmpty(ety), nil + } + + if ety.IsCollectionType() || ety.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, false); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionTupleToSet returns a conversion that will take a value of the +// given tuple type and return a set of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.SetValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.SetVal(elems), nil + } +} + +// conversionTupleToList returns a conversion that will take a value of the +// given tuple type and return a list of the given element type. +// +// Will panic if the given tupleType isn't actually a tuple type. +func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion { + tupleEtys := tupleType.TupleElementTypes() + + if len(tupleEtys) == 0 { + // Empty tuple short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.ListValEmpty(listEty), nil + } + } + + if listEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + listEty, _ = unify(tupleEtys, unsafe) + if listEty == cty.NilType { + return nil + } + } + + elemConvs := make([]conversion, len(tupleEtys)) + for i, tupleEty := range tupleEtys { + if tupleEty.Equals(listEty) { + // no conversion required + continue + } + + elemConvs[i] = getConversion(tupleEty, listEty, unsafe) + if elemConvs[i] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make([]cty.Value, 0, len(elemConvs)) + elemPath := append(path.Copy(), nil) + i := int64(0) + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(i), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems = append(elems, val) + + i++ + } + + return cty.ListVal(elems), nil + } +} + +// conversionObjectToMap returns a conversion that will take a value of the +// given object type and return a map of the given element type. +// +// Will panic if the given objectType isn't actually an object type. +func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion { + objectAtys := objectType.AttributeTypes() + + if len(objectAtys) == 0 { + // Empty object short-circuit + return func(val cty.Value, path cty.Path) (cty.Value, error) { + return cty.MapValEmpty(mapEty), nil + } + } + + if mapEty == cty.DynamicPseudoType { + // This is a special case where the caller wants us to find + // a suitable single type that all elements can convert to, if + // possible. + objectAtysList := make([]cty.Type, 0, len(objectAtys)) + for _, aty := range objectAtys { + objectAtysList = append(objectAtysList, aty) + } + mapEty, _ = unify(objectAtysList, unsafe) + if mapEty == cty.NilType { + return nil + } + } + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(objectAty, mapEty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + elems[name.AsString()] = val + } + + if mapEty.IsCollectionType() || mapEty.IsObjectType() { + var err error + if elems, err = conversionUnifyCollectionElements(elems, path, unsafe); err != nil { + return cty.NilVal, err + } + } + + if err := conversionCheckMapElementTypes(elems, path); err != nil { + return cty.NilVal, err + } + + return cty.MapVal(elems), nil + } +} + +// conversionMapToObject returns a conversion that will take a value of the +// given map type and return an object of the given type. The object attribute +// types must all be compatible with the map element type. +// +// Will panic if the given mapType and objType are not maps and objects +// respectively. +func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conversion { + objectAtys := objType.AttributeTypes() + mapEty := mapType.ElementType() + + elemConvs := make(map[string]conversion, len(objectAtys)) + for name, objectAty := range objectAtys { + if objectAty.Equals(mapEty) { + // no conversion required + continue + } + + elemConvs[name] = getConversion(mapEty, objectAty, unsafe) + if elemConvs[name] == nil { + // If any of our element conversions are impossible, then the our + // whole conversion is impossible. + return nil + } + } + + // If we fall out here then a conversion is possible, using the + // element conversions in elemConvs + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elems := make(map[string]cty.Value, len(elemConvs)) + elemPath := append(path.Copy(), nil) + it := val.ElementIterator() + for it.Next() { + name, val := it.Element() + + // if there is no corresponding attribute, we skip this key + if _, ok := objectAtys[name.AsString()]; !ok { + continue + } + + var err error + + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: name, + } + + conv := elemConvs[name.AsString()] + if conv != nil { + val, err = conv(val, elemPath) + if err != nil { + return cty.NilVal, err + } + } + + elems[name.AsString()] = val + } + + return cty.ObjectVal(elems), nil + } +} + +func conversionUnifyCollectionElements(elems map[string]cty.Value, path cty.Path, unsafe bool) (map[string]cty.Value, error) { + elemTypes := make([]cty.Type, 0, len(elems)) + for _, elem := range elems { + elemTypes = append(elemTypes, elem.Type()) + } + unifiedType, _ := unify(elemTypes, unsafe) + if unifiedType == cty.NilType { + } + + unifiedElems := make(map[string]cty.Value) + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elem.Type().Equals(unifiedType) { + unifiedElems[name] = elem + continue + } + conv := getConversion(elem.Type(), unifiedType, unsafe) + if conv == nil { + } + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + val, err := conv(elem, elemPath) + if err != nil { + return nil, err + } + unifiedElems[name] = val + } + + return unifiedElems, nil +} + +func conversionCheckMapElementTypes(elems map[string]cty.Value, path cty.Path) error { + elementType := cty.NilType + elemPath := append(path.Copy(), nil) + + for name, elem := range elems { + if elementType == cty.NilType { + elementType = elem.Type() + continue + } + if !elementType.Equals(elem.Type()) { + elemPath[len(elemPath)-1] = cty.IndexStep{ + Key: cty.StringVal(name), + } + return elemPath.NewErrorf("%s is required", elementType.FriendlyName()) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go new file mode 100644 index 000000000..5f571da13 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go @@ -0,0 +1,33 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// dynamicFixup deals with just-in-time conversions of values that were +// input-typed as cty.DynamicPseudoType during analysis, ensuring that +// we end up with the desired output type once the value is known, or +// failing with an error if that is not possible. +// +// This is in the spirit of the cty philosophy of optimistically assuming that +// DynamicPseudoType values will become the intended value eventually, and +// dealing with any inconsistencies during final evaluation. +func dynamicFixup(wantType cty.Type) conversion { + return func(in cty.Value, path cty.Path) (cty.Value, error) { + ret, err := Convert(in, wantType) + if err != nil { + // Re-wrap this error so that the returned path is relative + // to the caller's original value, rather than relative to our + // conversion value here. + return cty.NilVal, path.NewError(err) + } + return ret, nil + } +} + +// dynamicPassthrough is an identity conversion that is used when the +// target type is DynamicPseudoType, indicating that the caller doesn't care +// which type is returned. +func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) { + return in, nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go new file mode 100644 index 000000000..93743ca82 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go @@ -0,0 +1,76 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionObjectToObject returns a conversion that will make the input +// object type conform to the output object type, if possible. +// +// Conversion is possible only if the output type is a subset of the input +// type, meaning that each attribute of the output type has a corresponding +// attribute in the input type where a recursive conversion is available. +// +// Shallow object conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit the above definition of "subset". +func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion { + inAtys := in.AttributeTypes() + outAtys := out.AttributeTypes() + attrConvs := make(map[string]conversion) + + for name, outAty := range outAtys { + inAty, exists := inAtys[name] + if !exists { + // No conversion is available, then. + return nil + } + + if inAty.Equals(outAty) { + // No conversion needed, but we'll still record the attribute + // in our map for later reference. + attrConvs[name] = nil + continue + } + + attrConvs[name] = getConversion(inAty, outAty, unsafe) + if attrConvs[name] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the attribute + // conversions given in attrConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + attrVals := make(map[string]cty.Value, len(attrConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + for it := val.ElementIterator(); it.Next(); { + nameVal, val := it.Element() + var err error + + name := nameVal.AsString() + *pathStep = cty.GetAttrStep{ + Name: name, + } + + conv, exists := attrConvs[name] + if !exists { + continue + } + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + attrVals[name] = val + } + + return cty.ObjectVal(attrVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go new file mode 100644 index 000000000..a55344413 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go @@ -0,0 +1,57 @@ +package convert + +import ( + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +var stringTrue = cty.StringVal("true") +var stringFalse = cty.StringVal("false") + +var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{ + cty.Number: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + f := val.AsBigFloat() + return cty.StringVal(f.Text('f', -1)), nil + }, + }, + cty.Bool: { + cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) { + if val.True() { + return stringTrue, nil + } else { + return stringFalse, nil + } + }, + }, +} + +var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{ + cty.String: { + cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) { + v, err := cty.ParseNumberVal(val.AsString()) + if err != nil { + return cty.NilVal, path.NewErrorf("a number is required") + } + return v, nil + }, + cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) { + switch val.AsString() { + case "true", "1": + return cty.True, nil + case "false", "0": + return cty.False, nil + default: + switch strings.ToLower(val.AsString()) { + case "true": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"true\"") + case "false": + return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"false\"") + default: + return cty.NilVal, path.NewErrorf("a bool is required") + } + } + }, + }, +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go new file mode 100644 index 000000000..d89ec3808 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go @@ -0,0 +1,71 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// conversionTupleToTuple returns a conversion that will make the input +// tuple type conform to the output tuple type, if possible. +// +// Conversion is possible only if the two tuple types have the same number +// of elements and the corresponding elements by index can be converted. +// +// Shallow tuple conversions work the same for both safe and unsafe modes, +// but the safety flag is passed on to recursive conversions and may thus +// limit which element type conversions are possible. +func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion { + inEtys := in.TupleElementTypes() + outEtys := out.TupleElementTypes() + + if len(inEtys) != len(outEtys) { + return nil // no conversion is possible + } + + elemConvs := make([]conversion, len(inEtys)) + + for i, outEty := range outEtys { + inEty := inEtys[i] + + if inEty.Equals(outEty) { + // No conversion needed, so we can leave this one nil. + continue + } + + elemConvs[i] = getConversion(inEty, outEty, unsafe) + if elemConvs[i] == nil { + // If a recursive conversion isn't available, then our top-level + // configuration is impossible too. + return nil + } + } + + // If we get here then a conversion is possible, using the element + // conversions given in elemConvs. + return func(val cty.Value, path cty.Path) (cty.Value, error) { + elemVals := make([]cty.Value, len(elemConvs)) + path = append(path, nil) + pathStep := &path[len(path)-1] + + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, val := it.Element() + var err error + + *pathStep = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + conv := elemConvs[i] + if conv != nil { + val, err = conv(val, path) + if err != nil { + return cty.NilVal, err + } + } + + elemVals[i] = val + } + + return cty.TupleVal(elemVals), nil + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go new file mode 100644 index 000000000..2037299ba --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go @@ -0,0 +1,15 @@ +// Package convert contains some routines for converting between cty types. +// The intent of providing this package is to encourage applications using +// cty to have consistent type conversion behavior for maximal interoperability +// when Values pass from one application to another. +// +// The conversions are categorized into two categories. "Safe" conversions are +// ones that are guaranteed to succeed if given a non-null value of the +// appropriate source type. "Unsafe" conversions, on the other hand, are valid +// for only a subset of input values, and thus may fail with an error when +// called for values outside of that valid subset. +// +// The functions whose names end in Unsafe support all of the conversions that +// are supported by the corresponding functions whose names do not have that +// suffix, and then additional unsafe conversions as well. +package convert diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go new file mode 100644 index 000000000..72f307f20 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go @@ -0,0 +1,220 @@ +package convert + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +// MismatchMessage is a helper to return an English-language description of +// the differences between got and want, phrased as a reason why got does +// not conform to want. +// +// This function does not itself attempt conversion, and so it should generally +// be used only after a conversion has failed, to report the conversion failure +// to an English-speaking user. The result will be confusing got is actually +// conforming to or convertable to want. +// +// The shorthand helper function Convert uses this function internally to +// produce its error messages, so callers of that function do not need to +// also use MismatchMessage. +// +// This function is similar to Type.TestConformance, but it is tailored to +// describing conversion failures and so the messages it generates relate +// specifically to the conversion rules implemented in this package. +func MismatchMessage(got, want cty.Type) string { + switch { + + case got.IsObjectType() && want.IsObjectType(): + // If both types are object types then we may be able to say something + // about their respective attributes. + return mismatchMessageObjects(got, want) + + case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to list failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all list elements must have the same type" + + case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from tuple to set failed then it's because we couldn't + // find a common type to convert all of the tuple elements to. + return "all set elements must have the same type" + + case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType: + // If conversion from object to map failed then it's because we couldn't + // find a common type to convert all of the object attributes to. + return "all map elements must have the same type" + + case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType(): + return mismatchMessageCollectionsFromStructural(got, want) + + case got.IsCollectionType() && want.IsCollectionType(): + return mismatchMessageCollectionsFromCollections(got, want) + + default: + // If we have nothing better to say, we'll just state what was required. + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageObjects(got, want cty.Type) string { + // Per our conversion rules, "got" is allowed to be a superset of "want", + // and so we'll produce error messages here under that assumption. + gotAtys := got.AttributeTypes() + wantAtys := want.AttributeTypes() + + // If we find missing attributes then we'll report those in preference, + // but if not then we will report a maximum of one non-conforming + // attribute, just to keep our messages relatively terse. + // We'll also prefer to report a recursive type error from an _unsafe_ + // conversion over a safe one, because these are subjectively more + // "serious". + var missingAttrs []string + var unsafeMismatchAttr string + var safeMismatchAttr string + + for name, wantAty := range wantAtys { + gotAty, exists := gotAtys[name] + if !exists { + missingAttrs = append(missingAttrs, name) + continue + } + + // We'll now try to convert these attributes in isolation and + // see if we have a nested conversion error to report. + // We'll try an unsafe conversion first, and then fall back on + // safe if unsafe is possible. + + // If we already have an unsafe mismatch attr error then we won't bother + // hunting for another one. + if unsafeMismatchAttr != "" { + continue + } + if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil { + unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + + // If we already have a safe mismatch attr error then we won't bother + // hunting for another one. + if safeMismatchAttr != "" { + continue + } + if conv := GetConversion(gotAty, wantAty); conv == nil { + safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty)) + } + } + + // We should now have collected at least one problem. If we have more than + // one then we'll use our preference order to decide what is most important + // to report. + switch { + + case len(missingAttrs) != 0: + sort.Strings(missingAttrs) + switch len(missingAttrs) { + case 1: + return fmt.Sprintf("attribute %q is required", missingAttrs[0]) + case 2: + return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1]) + default: + sort.Strings(missingAttrs) + var buf bytes.Buffer + for _, name := range missingAttrs[:len(missingAttrs)-1] { + fmt.Fprintf(&buf, "%q, ", name) + } + fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1]) + return fmt.Sprintf("attributes %s are required", buf.Bytes()) + } + + case unsafeMismatchAttr != "": + return unsafeMismatchAttr + + case safeMismatchAttr != "": + return safeMismatchAttr + + default: + // We should never get here, but if we do then we'll return + // just a generic message. + return "incorrect object attributes" + } +} + +func mismatchMessageCollectionsFromStructural(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !got.IsTupleType(): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsObjectType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll move on to checking + // individual elements. + wantEty := want.ElementType() + switch { + case got.IsTupleType(): + for i, gotEty := range got.TupleElementTypes() { + if gotEty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotEty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + case got.IsObjectType(): + for name, gotAty := range got.AttributeTypes() { + if gotAty.Equals(wantEty) { + continue // exact match, so no problem + } + if conv := getConversion(gotAty, wantEty, true); conv != nil { + continue // conversion is available, so no problem + } + return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty)) + } + + // If we get down here then something weird is going on but we'll + // return a reasonable fallback message anyway. + return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint()) + + default: + // Should not be possible to get here since we only call this function + // with got as structural types, but... + return want.FriendlyNameForConstraint() + " required" + } +} + +func mismatchMessageCollectionsFromCollections(got, want cty.Type) string { + // First some straightforward cases where the kind is just altogether wrong. + switch { + case want.IsListType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsSetType() && !(got.IsListType() || got.IsSetType()): + return want.FriendlyNameForConstraint() + " required" + case want.IsMapType() && !got.IsMapType(): + return want.FriendlyNameForConstraint() + " required" + } + + // If the kinds are matched well enough then we'll check the element types. + gotEty := got.ElementType() + wantEty := want.ElementType() + noun := "element type" + switch { + case want.IsListType(): + noun = "list element type" + case want.IsSetType(): + noun = "set element type" + case want.IsMapType(): + noun = "map element type" + } + return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/public.go b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go new file mode 100644 index 000000000..3b50a6926 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go @@ -0,0 +1,83 @@ +package convert + +import ( + "errors" + + "github.com/hashicorp/go-cty/cty" +) + +// This file contains the public interface of this package, which is intended +// to be a small, convenient interface designed for easy integration into +// a hypothetical language type checker and interpreter. + +// Conversion is a named function type representing a conversion from a +// value of one type to a value of another type. +// +// The source type for a conversion is always the source type given to +// the function that returned the Conversion, but there is no way to recover +// that from a Conversion value itself. If a Conversion is given a value +// that is not of its expected type (with the exception of DynamicPseudoType, +// which is always supported) then the function may panic or produce undefined +// results. +type Conversion func(in cty.Value) (out cty.Value, err error) + +// GetConversion returns a Conversion between the given in and out Types if +// a safe one is available, or returns nil otherwise. +func GetConversion(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, false)) +} + +// GetConversionUnsafe returns a Conversion between the given in and out Types +// if either a safe or unsafe one is available, or returns nil otherwise. +func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion { + return retConversion(getConversion(in, out, true)) +} + +// Convert returns the result of converting the given value to the given type +// if an safe or unsafe conversion is available, or returns an error if such a +// conversion is impossible. +// +// This is a convenience wrapper around calling GetConversionUnsafe and then +// immediately passing the given value to the resulting function. +func Convert(in cty.Value, want cty.Type) (cty.Value, error) { + if in.Type().Equals(want) { + return in, nil + } + + conv := GetConversionUnsafe(in.Type(), want) + if conv == nil { + return cty.NilVal, errors.New(MismatchMessage(in.Type(), want)) + } + return conv(in) +} + +// Unify attempts to find the most general type that can be converted from +// all of the given types. If this is possible, that type is returned along +// with a slice of necessary conversions for some of the given types. +// +// If no common supertype can be found, this function returns cty.NilType and +// a nil slice. +// +// If a common supertype *can* be found, the returned slice will always be +// non-nil and will contain a non-nil conversion for each given type that +// needs to be converted, with indices corresponding to the input slice. +// Any given type that does *not* need conversion (because it is already of +// the appropriate type) will have a nil Conversion. +// +// cty.DynamicPseudoType is, as usual, a special case. If the given type list +// contains a mixture of dynamic and non-dynamic types, the dynamic types are +// disregarded for type selection and a conversion is returned for them that +// will attempt a late conversion of the given value to the target type, +// failing with a conversion error if the eventual concrete type is not +// compatible. If *all* given types are DynamicPseudoType, or in the +// degenerate case of an empty slice of types, the returned type is itself +// cty.DynamicPseudoType and no conversions are attempted. +func Unify(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, false) +} + +// UnifyUnsafe is the same as Unify except that it may return unsafe +// conversions in situations where a safe conversion isn't also available. +func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) { + return unify(types, true) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go new file mode 100644 index 000000000..8a9c32766 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go @@ -0,0 +1,69 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// sortTypes produces an ordering of the given types that serves as a +// preference order for the result of unification of the given types. +// The return value is a slice of indices into the given slice, and will +// thus always be the same length as the given slice. +// +// The goal is that the most general of the given types will appear first +// in the ordering. If there are uncomparable pairs of types in the list +// then they will appear in an undefined order, and the unification pass +// will presumably then fail. +func sortTypes(tys []cty.Type) []int { + l := len(tys) + + // First we build a graph whose edges represent "more general than", + // which we will then do a topological sort of. + edges := make([][]int, l) + for i := 0; i < (l - 1); i++ { + for j := i + 1; j < l; j++ { + cmp := compareTypes(tys[i], tys[j]) + switch { + case cmp < 0: + edges[i] = append(edges[i], j) + case cmp > 0: + edges[j] = append(edges[j], i) + } + } + } + + // Compute the in-degree of each node + inDegree := make([]int, l) + for _, outs := range edges { + for _, j := range outs { + inDegree[j]++ + } + } + + // The array backing our result will double as our queue for visiting + // the nodes, with the queue slice moving along this array until it + // is empty and positioned at the end of the array. Thus our visiting + // order is also our result order. + result := make([]int, l) + queue := result[0:0] + + // Initialize the queue with any item of in-degree 0, preserving + // their relative order. + for i, n := range inDegree { + if n == 0 { + queue = append(queue, i) + } + } + + for len(queue) != 0 { + i := queue[0] + queue = queue[1:] + for _, j := range edges[i] { + inDegree[j]-- + if inDegree[j] == 0 { + queue = append(queue, j) + } + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go new file mode 100644 index 000000000..b2a3bbe54 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go @@ -0,0 +1,357 @@ +package convert + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// The current unify implementation is somewhat inefficient, but we accept this +// under the assumption that it will generally be used with small numbers of +// types and with types of reasonable complexity. However, it does have a +// "happy path" where all of the given types are equal. +// +// This function is likely to have poor performance in cases where any given +// types are very complex (lots of deeply-nested structures) or if the list +// of types itself is very large. In particular, it will walk the nested type +// structure under the given types several times, especially when given a +// list of types for which unification is not possible, since each permutation +// will be tried to determine that result. +func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + if len(types) == 0 { + // Degenerate case + return cty.NilType, nil + } + + // If all of the given types are of the same structural kind, we may be + // able to construct a new type that they can all be unified to, even if + // that is not one of the given types. We must try this before the general + // behavior below because in unsafe mode we can convert an object type to + // a subset of that type, which would be a much less useful conversion for + // unification purposes. + { + mapCt := 0 + objectCt := 0 + tupleCt := 0 + dynamicCt := 0 + for _, ty := range types { + switch { + case ty.IsMapType(): + mapCt++ + case ty.IsObjectType(): + objectCt++ + case ty.IsTupleType(): + tupleCt++ + case ty == cty.DynamicPseudoType: + dynamicCt++ + default: + break + } + } + switch { + case mapCt > 0 && (mapCt+dynamicCt) == len(types): + return unifyMapTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && (objectCt+dynamicCt) == len(types): + return unifyObjectTypes(types, unsafe, dynamicCt > 0) + case tupleCt > 0 && (tupleCt+dynamicCt) == len(types): + return unifyTupleTypes(types, unsafe, dynamicCt > 0) + case objectCt > 0 && tupleCt > 0: + // Can never unify object and tuple types since they have incompatible kinds + return cty.NilType, nil + } + } + + prefOrder := sortTypes(types) + + // sortTypes gives us an order where earlier items are preferable as + // our result type. We'll now walk through these and choose the first + // one we encounter for which conversions exist for all source types. + conversions := make([]Conversion, len(types)) +Preferences: + for _, wantTypeIdx := range prefOrder { + wantType := types[wantTypeIdx] + for i, tryType := range types { + if i == wantTypeIdx { + // Don't need to convert our wanted type to itself + conversions[i] = nil + continue + } + + if tryType.Equals(wantType) { + conversions[i] = nil + continue + } + + if unsafe { + conversions[i] = GetConversionUnsafe(tryType, wantType) + } else { + conversions[i] = GetConversion(tryType, wantType) + } + + if conversions[i] == nil { + // wantType is not a suitable unification type, so we'll + // try the next one in our preference order. + continue Preferences + } + } + + return wantType, conversions + } + + // If we fall out here, no unification is possible + return cty.NilType, nil +} + +func unifyMapTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + elemTypes := make([]cty.Type, 0, len(types)) + for _, ty := range types { + elemTypes = append(elemTypes, ty.ElementType()) + } + retElemType, _ := unify(elemTypes, unsafe) + if retElemType == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(retElemType) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return cty.NilType, nil + } + } + + return retTy, conversions +} + +func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given object types have the same set of attribute names + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given object types have different attribute names or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the attribute types together to produce a map type. + // + // Our unification behavior is intentionally stricter than our conversion + // behavior for subset object types because user intent is different with + // unification use-cases: it makes sense to allow {"foo":true} to convert + // to emptyobjectval, but unifying an object with an attribute with the + // empty object type should be an error because unifying to the empty + // object type would be suprising and useless. + + firstAttrs := types[0].AttributeTypes() + for _, ty := range types[1:] { + thisAttrs := ty.AttributeTypes() + if len(thisAttrs) != len(firstAttrs) { + // If number of attributes is different then there can be no + // object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + for name := range thisAttrs { + if _, ok := firstAttrs[name]; !ok { + // If attribute names don't exactly match then there can be + // no object type in common. + return unifyObjectTypesToMap(types, unsafe) + } + } + } + + // If we get here then we've proven that all of the given object types + // have exactly the same set of attribute names, though the types may + // differ. + retAtys := make(map[string]cty.Type) + atysAcross := make([]cty.Type, len(types)) + for name := range firstAttrs { + for i, ty := range types { + atysAcross[i] = ty.AttributeType(name) + } + retAtys[name], _ = unify(atysAcross, unsafe) + if retAtys[name] == cty.NilType { + // Cannot unify this attribute alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Object(retAtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyObjectTypes, where we see if we can + // construct a map type that can accept all of the attribute types. + + var atys []cty.Type + for _, ty := range types { + for _, aty := range ty.AttributeTypes() { + atys = append(atys, aty) + } + } + + ety, _ := unify(atys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.Map(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + return cty.NilType, nil + } + } + return retTy, conversions +} + +func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) { + // If we had any dynamic types in the input here then we can't predict + // what path we'll take through here once these become known types, so + // we'll conservatively produce DynamicVal for these. + if hasDynamic { + return unifyAllAsDynamic(types) + } + + // There are two different ways we can succeed here: + // - If all of the given tuple types have the same sequence of element types + // and the corresponding types are all unifyable, then we construct that + // type. + // - If the given tuple types have different element types or their + // corresponding types are not unifyable, we'll instead try to unify + // all of the elements types together to produce a list type. + + firstEtys := types[0].TupleElementTypes() + for _, ty := range types[1:] { + thisEtys := ty.TupleElementTypes() + if len(thisEtys) != len(firstEtys) { + // If number of elements is different then there can be no + // tuple type in common. + return unifyTupleTypesToList(types, unsafe) + } + } + + // If we get here then we've proven that all of the given tuple types + // have the same number of elements, though the types may differ. + retEtys := make([]cty.Type, len(firstEtys)) + atysAcross := make([]cty.Type, len(types)) + for idx := range firstEtys { + for tyI, ty := range types { + atysAcross[tyI] = ty.TupleElementTypes()[idx] + } + retEtys[idx], _ = unify(atysAcross, unsafe) + if retEtys[idx] == cty.NilType { + // Cannot unify this element alone, which means that unification + // of everything down to a map type can't be possible either. + return cty.NilType, nil + } + } + retTy := cty.Tuple(retEtys) + + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyTupleTypesToList(types, unsafe) + } + } + + return retTy, conversions +} + +func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) { + // This is our fallback case for unifyTupleTypes, where we see if we can + // construct a list type that can accept all of the element types. + + var etys []cty.Type + for _, ty := range types { + for _, ety := range ty.TupleElementTypes() { + etys = append(etys, ety) + } + } + + ety, _ := unify(etys, unsafe) + if ety == cty.NilType { + return cty.NilType, nil + } + + retTy := cty.List(ety) + conversions := make([]Conversion, len(types)) + for i, ty := range types { + if ty.Equals(retTy) { + continue + } + if unsafe { + conversions[i] = GetConversionUnsafe(ty, retTy) + } else { + conversions[i] = GetConversion(ty, retTy) + } + if conversions[i] == nil { + // Shouldn't be reachable, since we were able to unify + return unifyObjectTypesToMap(types, unsafe) + } + } + return retTy, conversions +} + +func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) { + conversions := make([]Conversion, len(types)) + for i := range conversions { + conversions[i] = func(cty.Value) (cty.Value, error) { + return cty.DynamicVal, nil + } + } + return cty.DynamicPseudoType, conversions +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/doc.go new file mode 100644 index 000000000..d31f0547b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/doc.go @@ -0,0 +1,18 @@ +// Package cty (pronounced see-tie) provides some infrastructure for a type +// system that might be useful for applications that need to represent +// configuration values provided by the user whose types are not known +// at compile time, particularly if the calling application also allows +// such values to be used in expressions. +// +// The type system consists of primitive types Number, String and Bool, as +// well as List and Map collection types and Object types that can have +// arbitrarily-typed sets of attributes. +// +// A set of operations is defined on these types, which is accessible via +// the wrapper struct Value, which annotates the raw, internal representation +// of a value with its corresponding type. +// +// This package is oriented towards being a building block for configuration +// languages used to bootstrap an application. It is not optimized for use +// in tight loops where CPU time or memory pressure are a concern. +package cty diff --git a/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go new file mode 100644 index 000000000..31567e766 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go @@ -0,0 +1,194 @@ +package cty + +import ( + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ElementIterator is the interface type returned by Value.ElementIterator to +// allow the caller to iterate over elements of a collection-typed value. +// +// Its usage pattern is as follows: +// +// it := val.ElementIterator() +// for it.Next() { +// key, val := it.Element() +// // ... +// } +type ElementIterator interface { + Next() bool + Element() (key Value, value Value) +} + +func canElementIterator(val Value) bool { + switch { + case val.IsMarked(): + return false + case val.ty.IsListType(): + return true + case val.ty.IsMapType(): + return true + case val.ty.IsSetType(): + return true + case val.ty.IsTupleType(): + return true + case val.ty.IsObjectType(): + return true + default: + return false + } +} + +func elementIterator(val Value) ElementIterator { + val.assertUnmarked() + switch { + case val.ty.IsListType(): + return &listElementIterator{ + ety: val.ty.ElementType(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsMapType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same input map. + rawMap := val.v.(map[string]interface{}) + keys := make([]string, 0, len(rawMap)) + for key := range rawMap { + keys = append(keys, key) + } + sort.Strings(keys) + + return &mapElementIterator{ + ety: val.ty.ElementType(), + vals: rawMap, + keys: keys, + idx: -1, + } + case val.ty.IsSetType(): + rawSet := val.v.(set.Set) + return &setElementIterator{ + ety: val.ty.ElementType(), + setIt: rawSet.Iterator(), + } + case val.ty.IsTupleType(): + return &tupleElementIterator{ + etys: val.ty.TupleElementTypes(), + vals: val.v.([]interface{}), + idx: -1, + } + case val.ty.IsObjectType(): + // We iterate the keys in a predictable lexicographical order so + // that results will always be stable given the same object type. + atys := val.ty.AttributeTypes() + keys := make([]string, 0, len(atys)) + for key := range atys { + keys = append(keys, key) + } + sort.Strings(keys) + + return &objectElementIterator{ + atys: atys, + vals: val.v.(map[string]interface{}), + attrNames: keys, + idx: -1, + } + default: + panic("attempt to iterate on non-collection, non-tuple type") + } +} + +type listElementIterator struct { + ety Type + vals []interface{} + idx int +} + +func (it *listElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.ety, + v: it.vals[i], + } +} + +func (it *listElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type mapElementIterator struct { + ety Type + vals map[string]interface{} + keys []string + idx int +} + +func (it *mapElementIterator) Element() (Value, Value) { + key := it.keys[it.idx] + return StringVal(key), Value{ + ty: it.ety, + v: it.vals[key], + } +} + +func (it *mapElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.keys) +} + +type setElementIterator struct { + ety Type + setIt *set.Iterator +} + +func (it *setElementIterator) Element() (Value, Value) { + val := Value{ + ty: it.ety, + v: it.setIt.Value(), + } + return val, val +} + +func (it *setElementIterator) Next() bool { + return it.setIt.Next() +} + +type tupleElementIterator struct { + etys []Type + vals []interface{} + idx int +} + +func (it *tupleElementIterator) Element() (Value, Value) { + i := it.idx + return NumberIntVal(int64(i)), Value{ + ty: it.etys[i], + v: it.vals[i], + } +} + +func (it *tupleElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} + +type objectElementIterator struct { + atys map[string]Type + vals map[string]interface{} + attrNames []string + idx int +} + +func (it *objectElementIterator) Element() (Value, Value) { + key := it.attrNames[it.idx] + return StringVal(key), Value{ + ty: it.atys[key], + v: it.vals[key], + } +} + +func (it *objectElementIterator) Next() bool { + it.idx++ + return it.idx < len(it.attrNames) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/error.go b/vendor/github.com/hashicorp/go-cty/cty/error.go new file mode 100644 index 000000000..dd139f724 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/error.go @@ -0,0 +1,55 @@ +package cty + +import ( + "fmt" +) + +// PathError is a specialization of error that represents where in a +// potentially-deep data structure an error occured, using a Path. +type PathError struct { + error + Path Path +} + +func errorf(path Path, f string, args ...interface{}) error { + // We need to copy the Path because often our caller builds it by + // continually mutating the same underlying buffer. + sPath := make(Path, len(path)) + copy(sPath, path) + return PathError{ + error: fmt.Errorf(f, args...), + Path: sPath, + } +} + +// NewErrorf creates a new PathError for the current path by passing the +// given format and arguments to fmt.Errorf and then wrapping the result +// similarly to NewError. +func (p Path) NewErrorf(f string, args ...interface{}) error { + return errorf(p, f, args...) +} + +// NewError creates a new PathError for the current path, wrapping the given +// error. +func (p Path) NewError(err error) error { + // if we're being asked to wrap an existing PathError then our new + // PathError will be the concatenation of the two paths, ensuring + // that we still get a single flat PathError that's thus easier for + // callers to deal with. + perr, wrappingPath := err.(PathError) + pathLen := len(p) + if wrappingPath { + pathLen = pathLen + len(perr.Path) + } + + sPath := make(Path, pathLen) + copy(sPath, p) + if wrappingPath { + copy(sPath[len(p):], perr.Path) + } + + return PathError{ + error: err, + Path: sPath, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gob.go b/vendor/github.com/hashicorp/go-cty/cty/gob.go new file mode 100644 index 000000000..80929aa54 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gob.go @@ -0,0 +1,204 @@ +package cty + +import ( + "bytes" + "encoding/gob" + "errors" + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Values to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent values of capsule types in gob, +// because the types themselves cannot be represented. +func (val Value) GobEncode() ([]byte, error) { + if val.IsMarked() { + return nil, errors.New("value is marked") + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gv := gobValue{ + Version: 0, + Ty: val.ty, + V: val.v, + } + + err := enc.Encode(gv) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Value: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementation of the gob.GobDecoder interface, which +// inverts the operation performed by GobEncode. See the documentation of +// GobEncode for considerations when using cty.Value instances with gob. +func (val *Value) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gv gobValue + err := dec.Decode(&gv) + if err != nil { + return fmt.Errorf("error decoding cty.Value: %s", err) + } + if gv.Version != 0 { + return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version) + } + + // Because big.Float.GobEncode is implemented with a pointer reciever, + // gob encoding of an interface{} containing a *big.Float value does not + // round-trip correctly, emerging instead as a non-pointer big.Float. + // The rest of cty expects all number values to be represented by + // *big.Float, so we'll fix that up here. + gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty) + + val.ty = gv.Ty + val.v = gv.V + + return nil +} + +// GobEncode is an implementation of the gob.GobEncoder interface, which +// allows Types to be included in structures encoded with encoding/gob. +// +// Currently it is not possible to represent capsule types in gob. +func (t Type) GobEncode() ([]byte, error) { + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + + gt := gobType{ + Version: 0, + Impl: t.typeImpl, + } + + err := enc.Encode(gt) + if err != nil { + return nil, fmt.Errorf("error encoding cty.Type: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is an implementatino of the gob.GobDecoder interface, which +// reverses the encoding performed by GobEncode to allow types to be recovered +// from gob buffers. +func (t *Type) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gt gobType + err := dec.Decode(>) + if err != nil { + return fmt.Errorf("error decoding cty.Type: %s", err) + } + if gt.Version != 0 { + return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version) + } + + t.typeImpl = gt.Impl + + return nil +} + +// Capsule types cannot currently be gob-encoded, because they rely on pointer +// equality and we have no way to recover the original pointer on decode. +func (t *capsuleType) GobEncode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +func (t *capsuleType) GobDecode() ([]byte, error) { + return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName)) +} + +type gobValue struct { + Version int + Ty Type + V interface{} +} + +type gobType struct { + Version int + Impl typeImpl +} + +type gobCapsuleTypeImpl struct { +} + +// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number +// values through gob: the big.Float.GobEncode method is implemented on a +// pointer receiver, and so it loses the "pointer-ness" of the value on +// encode, causing the values to emerge the other end as big.Float rather than +// *big.Float as we expect elsewhere in cty. +// +// The implementation of gobDecodeFixNumberPtr mutates the given raw value +// during its work, and may either return the same value mutated or a new +// value. Callers must no longer use whatever value they pass as "raw" after +// this function is called. +func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} { + // Unfortunately we need to work recursively here because number values + // might be embedded in structural or collection type values. + + switch { + case ty.Equals(Number): + if bf, ok := raw.(big.Float); ok { + return &bf // wrap in pointer + } + case ty.IsMapType() && ty.ElementType().Equals(Number): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + m[k] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsListType() && ty.ElementType().Equals(Number): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + s[i] = gobDecodeFixNumberPtr(v, ty.ElementType()) + } + } + case ty.IsSetType() && ty.ElementType().Equals(Number): + if s, ok := raw.(set.Set); ok { + newS := set.NewSet(s.Rules()) + for it := s.Iterator(); it.Next(); { + newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType()) + newS.Add(newV) + } + return newS + } + case ty.IsObjectType(): + if m, ok := raw.(map[string]interface{}); ok { + for k, v := range m { + aty := ty.AttributeType(k) + m[k] = gobDecodeFixNumberPtr(v, aty) + } + } + case ty.IsTupleType(): + if s, ok := raw.([]interface{}); ok { + for i, v := range s { + ety := ty.TupleElementType(i) + s[i] = gobDecodeFixNumberPtr(v, ety) + } + } + } + + return raw +} + +// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr +// that works with already-constructed values. This is primarily for testing, +// to fix up intentionally-invalid number values for the parts of the test +// code that need them to be valid, such as calling GoString on them. +func gobDecodeFixNumberPtrVal(v Value) Value { + raw := gobDecodeFixNumberPtr(v.v, v.ty) + return Value{ + v: raw, + ty: v.ty, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go new file mode 100644 index 000000000..a5177d22b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go @@ -0,0 +1,7 @@ +// Package gocty deals with converting between cty Values and native go +// values. +// +// It operates under a similar principle to the encoding/json and +// encoding/xml packages in the standard library, using reflection to +// populate native Go data structures from cty values and vice-versa. +package gocty diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go new file mode 100644 index 000000000..0677a0794 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go @@ -0,0 +1,43 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/set" +) + +var valueType = reflect.TypeOf(cty.Value{}) +var typeType = reflect.TypeOf(cty.Type{}) + +var setType = reflect.TypeOf(set.Set{}) + +var bigFloatType = reflect.TypeOf(big.Float{}) +var bigIntType = reflect.TypeOf(big.Int{}) + +var emptyInterfaceType = reflect.TypeOf(interface{}(nil)) + +var stringType = reflect.TypeOf("") + +// structTagIndices interrogates the fields of the given type (which must +// be a struct type, or we'll panic) and returns a map from the cty +// attribute names declared via struct tags to the indices of the +// fields holding those tags. +// +// This function will panic if two fields within the struct are tagged with +// the same cty attribute name. +func structTagIndices(st reflect.Type) map[string]int { + ct := st.NumField() + ret := make(map[string]int, ct) + + for i := 0; i < ct; i++ { + field := st.Field(i) + attrName := field.Tag.Get("cty") + if attrName != "" { + ret[attrName] = i + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go new file mode 100644 index 000000000..fc35c1692 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go @@ -0,0 +1,548 @@ +package gocty + +import ( + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/set" +) + +// ToCtyValue produces a cty.Value from a Go value. The result will conform +// to the given type, or an error will be returned if this is not possible. +// +// The target type serves as a hint to resolve ambiguities in the mapping. +// For example, the Go type set.Set tells us that the value is a set but +// does not describe the set's element type. This also allows for convenient +// conversions, such as populating a set from a slice rather than having to +// first explicitly instantiate a set.Set. +// +// The audience of this function is assumed to be the developers of Go code +// that is integrating with cty, and thus the error messages it returns are +// presented from Go's perspective. These messages are thus not appropriate +// for display to end-users. An error returned from ToCtyValue represents a +// bug in the calling program, not user error. +func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) { + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time toCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given Type is. + path := make(cty.Path, 0) + return toCtyValue(reflect.ValueOf(val), ty, path) +} + +func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) { + if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) { + // If the source value is a cty.Value then we'll try to just pass + // through to the target type directly. + return toCtyPassthrough(val, ty, path) + } + + switch ty { + case cty.Bool: + return toCtyBool(val, path) + case cty.Number: + return toCtyNumber(val, path) + case cty.String: + return toCtyString(val, path) + case cty.DynamicPseudoType: + return toCtyDynamic(val, path) + } + + switch { + case ty.IsListType(): + return toCtyList(val, ty.ElementType(), path) + case ty.IsMapType(): + return toCtyMap(val, ty.ElementType(), path) + case ty.IsSetType(): + return toCtySet(val, ty.ElementType(), path) + case ty.IsObjectType(): + return toCtyObject(val, ty.AttributeTypes(), path) + case ty.IsTupleType(): + return toCtyTuple(val, ty.TupleElementTypes(), path) + case ty.IsCapsuleType(): + return toCtyCapsule(val, ty, path) + } + + // We should never fall out here + return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty) +} + +func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Bool), nil + } + + switch val.Kind() { + + case reflect.Bool: + return cty.BoolVal(val.Bool()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind()) + + } + +} + +func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Number), nil + } + + switch val.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.NumberIntVal(val.Int()), nil + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.NumberUIntVal(val.Uint()), nil + + case reflect.Float32, reflect.Float64: + return cty.NumberFloatVal(val.Float()), nil + + case reflect.Struct: + if val.Type().AssignableTo(bigIntType) { + bigInt := val.Interface().(big.Int) + bigFloat := (&big.Float{}).SetInt(&bigInt) + val = reflect.ValueOf(*bigFloat) + } + + if val.Type().AssignableTo(bigFloatType) { + bigFloat := val.Interface().(big.Float) + return cty.NumberVal(&bigFloat), nil + } + + fallthrough + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind()) + + } + +} + +func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.String), nil + } + + switch val.Kind() { + + case reflect.String: + return cty.StringVal(val.String()), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind()) + + } + +} + +func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.List(ety)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.List(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.ListValEmpty(ety), nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, val.Len()) + for i := range vals { + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ListVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety)) + + } +} + +func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Map(ety)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Map(ety)), nil + } + + if val.Len() == 0 { + return cty.MapValEmpty(ety), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our index step. + path = append(path, cty.PathStep(nil)) + + vals := make(map[string]cty.Value, val.Len()) + for _, kv := range val.MapKeys() { + k := kv.String() + var err error + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.MapVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety)) + + } +} + +func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Set(ety)), nil + } + + var vals []cty.Value + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Set(ety)), nil + } + fallthrough + case reflect.Array: + if val.Len() == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, val.Len()) + for i := range vals { + var err error + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + case reflect.Struct: + + if !val.Type().AssignableTo(setType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety)) + } + + rawSet := val.Interface().(set.Set) + inVals := rawSet.Values() + + if len(inVals) == 0 { + return cty.SetValEmpty(ety), nil + } + + vals = make([]cty.Value, len(inVals)) + for i := range inVals { + var err error + vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety)) + + } + + return cty.SetVal(vals), nil +} + +func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + switch val.Kind() { + + case reflect.Map: + if val.IsNil() { + return cty.NullVal(cty.Object(attrTypes)), nil + } + + keyType := val.Type().Key() + if keyType.Kind() != reflect.String { + return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType) + } + + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + haveKeys := make(map[string]struct{}, val.Len()) + for _, kv := range val.MapKeys() { + haveKeys[kv.String()] = struct{}{} + } + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + var err error + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if _, have := haveKeys[k]; !have { + vals[k] = cty.NullVal(at) + continue + } + + vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + case reflect.Struct: + if len(attrTypes) == 0 { + return cty.EmptyObjectVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our GetAttr step. + path = append(path, cty.PathStep(nil)) + + attrFields := structTagIndices(val.Type()) + + vals := make(map[string]cty.Value, len(attrTypes)) + for k, at := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + if fieldIdx, have := attrFields[k]; have { + var err error + vals[k], err = toCtyValue(val.Field(fieldIdx), at, path) + if err != nil { + return cty.NilVal, err + } + } else { + vals[k] = cty.NullVal(at) + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.ObjectVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes)) + + } +} + +func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + switch val.Kind() { + + case reflect.Slice: + if val.IsNil() { + return cty.NullVal(cty.Tuple(elemTypes)), nil + } + + if val.Len() != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Index(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + case reflect.Struct: + fieldCount := val.Type().NumField() + if fieldCount != len(elemTypes) { + return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes)) + } + + if len(elemTypes) == 0 { + return cty.EmptyTupleVal, nil + } + + // While we work on our elements we'll temporarily grow + // path to give us a place to put our Index step. + path = append(path, cty.PathStep(nil)) + + vals := make([]cty.Value, len(elemTypes)) + for i, ety := range elemTypes { + var err error + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + vals[i], err = toCtyValue(val.Field(i), ety, path) + if err != nil { + return cty.NilVal, err + } + } + + // Discard our extra path segment, retaining it as extra capacity + // for future appending to the path. + path = path[:len(path)-1] + + return cty.TupleVal(vals), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes)) + + } +} + +func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(capsuleType), nil + } + + if val.Kind() != reflect.Ptr { + if !val.CanAddr() { + return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType) + } + + val = val.Addr() + } + + if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) { + return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType) + } + + return cty.CapsuleVal(capsuleType, val.Interface()), nil +} + +func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) { + if val = toCtyUnwrapPointer(val); !val.IsValid() { + return cty.NullVal(cty.DynamicPseudoType), nil + } + + switch val.Kind() { + + case reflect.Struct: + if !val.Type().AssignableTo(valueType) { + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type()) + } + + return val.Interface().(cty.Value), nil + + default: + return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind()) + + } + +} + +func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) { + if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() { + return cty.NullVal(wantTy), nil + } + + givenVal := wrappedVal.Interface().(cty.Value) + + val, err := convert.Convert(givenVal, wantTy) + if err != nil { + return cty.NilVal, path.NewErrorf("unsuitable value: %s", err) + } + return val, nil +} + +// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three +// possible outcomes: +// +// - Given value isn't a pointer, so it's just returned as-is. +// - Given value is a non-nil pointer, in which case it is dereferenced +// and the result returned. +// - Given value is a nil pointer, in which case an invalid value is returned. +// +// For nested pointer types, like **int, they are all dereferenced in turn +// until a non-pointer value is found, or until a nil pointer is encountered. +func toCtyUnwrapPointer(val reflect.Value) reflect.Value { + for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface { + if val.IsNil() { + return reflect.Value{} + } + + val = val.Elem() + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go new file mode 100644 index 000000000..404faba18 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go @@ -0,0 +1,686 @@ +package gocty + +import ( + "math" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer, +// using a fixed set of conversion rules. +// +// This function considers its audience to be the creator of the cty Value +// given, and thus the error messages it generates are (unlike with ToCtyValue) +// presented in cty terminology that is generally appropriate to return to +// end-users in applications where cty data structures are built from +// user-provided configuration. In particular this means that if incorrect +// target types are provided by the calling application the resulting error +// messages are likely to be confusing, since we assume that the given target +// type is correct and the cty.Value is where the error lies. +// +// If an error is returned, the target data structure may have been partially +// populated, but the degree to which this is true is an implementation +// detail that the calling application should not rely on. +// +// The function will panic if given a non-pointer as the Go value target, +// since that is considered to be a bug in the calling program. +func FromCtyValue(val cty.Value, target interface{}) error { + tVal := reflect.ValueOf(target) + if tVal.Kind() != reflect.Ptr { + panic("target value is not a pointer") + } + if tVal.IsNil() { + panic("target value is nil pointer") + } + + // 'path' starts off as empty but will grow for each level of recursive + // call we make, so by the time fromCtyValue returns it is likely to have + // unused capacity on the end of it, depending on how deeply-recursive + // the given cty.Value is. + path := make(cty.Path, 0) + return fromCtyValue(val, tVal, path) +} + +func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error { + ty := val.Type() + + deepTarget := fromCtyPopulatePtr(target, false) + + // If we're decoding into a cty.Value then we just pass through the + // value as-is, to enable partial decoding. This is the only situation + // where unknown values are permitted. + if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) { + deepTarget.Set(reflect.ValueOf(val)) + return nil + } + + // Lists and maps can be nil without indirection, but everything else + // requires a pointer and we set it immediately to nil. + // We also make an exception for capsule types because we want to handle + // pointers specially for these. + // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while + // other types can assume no nulls after this point.) + if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() { + target = fromCtyPopulatePtr(target, true) + if target.Kind() != reflect.Ptr { + return path.NewErrorf("null value is not allowed") + } + + target.Set(reflect.Zero(target.Type())) + return nil + } + + target = deepTarget + + if !val.IsKnown() { + return path.NewErrorf("value must be known") + } + + switch ty { + case cty.Bool: + return fromCtyBool(val, target, path) + case cty.Number: + return fromCtyNumber(val, target, path) + case cty.String: + return fromCtyString(val, target, path) + } + + switch { + case ty.IsListType(): + return fromCtyList(val, target, path) + case ty.IsMapType(): + return fromCtyMap(val, target, path) + case ty.IsSetType(): + return fromCtySet(val, target, path) + case ty.IsObjectType(): + return fromCtyObject(val, target, path) + case ty.IsTupleType(): + return fromCtyTuple(val, target, path) + case ty.IsCapsuleType(): + return fromCtyCapsule(val, target, path) + } + + // We should never fall out here; reaching here indicates a bug in this + // function. + return path.NewErrorf("unsupported source type %#v", ty) +} + +func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Bool: + target.SetBool(val.True()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error { + bf := val.AsBigFloat() + + switch target.Kind() { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return fromCtyNumberInt(bf, target, path) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return fromCtyNumberUInt(bf, target, path) + + case reflect.Float32, reflect.Float64: + return fromCtyNumberFloat(bf, target, path) + + case reflect.Struct: + return fromCtyNumberBig(bf, target, path) + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var min int64 + var max int64 + switch target.Type().Bits() { + case 8: + min = math.MinInt8 + max = math.MaxInt8 + case 16: + min = math.MinInt16 + max = math.MaxInt16 + case 32: + min = math.MinInt32 + max = math.MaxInt32 + case 64: + min = math.MinInt64 + max = math.MaxInt64 + default: + panic("weird number of bits in target int") + } + + iv, accuracy := bf.Int64() + if accuracy != big.Exact || iv < min || iv > max { + return path.NewErrorf("value must be a whole number, between %d and %d", min, max) + } + + target.SetInt(iv) + return nil +} + +func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error { + // Doing this with switch rather than << arithmetic because << with + // result >32-bits is not portable to 32-bit systems. + var max uint64 + switch target.Type().Bits() { + case 8: + max = math.MaxUint8 + case 16: + max = math.MaxUint16 + case 32: + max = math.MaxUint32 + case 64: + max = math.MaxUint64 + default: + panic("weird number of bits in target uint") + } + + iv, accuracy := bf.Uint64() + if accuracy != big.Exact || iv > max { + return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max) + } + + target.SetUint(iv) + return nil +} + +func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.Float32, reflect.Float64: + fv, accuracy := bf.Float64() + if accuracy != big.Exact { + // We allow the precision to be truncated as part of our conversion, + // but we don't want to silently introduce infinities. + if math.IsInf(fv, 0) { + return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64) + } + } + target.SetFloat(fv) + return nil + default: + panic("unsupported kind of float") + } +} + +func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error { + switch { + + case bigFloatType.ConvertibleTo(target.Type()): + // Easy! + target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type())) + return nil + + case bigIntType.ConvertibleTo(target.Type()): + bi, accuracy := bf.Int(nil) + if accuracy != big.Exact { + return path.NewErrorf("value must be a whole number") + } + target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type())) + return nil + + default: + return likelyRequiredTypesError(path, target) + } +} + +func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + case reflect.String: + target.SetString(val.AsString()) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a list of length %d", target.Len()) + } + + path = append(path, nil) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Map: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + tv := reflect.MakeMap(target.Type()) + et := target.Type().Elem() + + path = append(path, nil) + + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + path[len(path)-1] = cty.IndexStep{ + Key: key, + } + + ks := key.AsString() + + targetElem := reflect.New(et) + err = fromCtyValue(val, targetElem, path) + + tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem()) + + return err != nil + }) + if err != nil { + return err + } + + path = path[:len(path)-1] + + target.Set(tv) + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error { + switch target.Kind() { + + case reflect.Slice: + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + length := val.LengthInt() + tv := reflect.MakeSlice(target.Type(), length, length) + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := tv.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + target.Set(tv) + return nil + + case reflect.Array: + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + length := val.LengthInt() + if length != target.Len() { + return path.NewErrorf("must be a set of length %d", target.Len()) + } + + i := 0 + var err error + val.ForEachElement(func(key cty.Value, val cty.Value) bool { + targetElem := target.Index(i) + err = fromCtyValue(val, targetElem, path) + if err != nil { + return true + } + + i++ + return false + }) + if err != nil { + return err + } + + return nil + + // TODO: decode into set.Set instance + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + attrTypes := val.Type().AttributeTypes() + targetFields := structTagIndices(target.Type()) + + path = append(path, nil) + + for k, i := range targetFields { + if _, exists := attrTypes[k]; !exists { + // If the field in question isn't able to represent nil, + // that's an error. + fk := target.Field(i).Kind() + switch fk { + case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface: + // okay + default: + return path.NewErrorf("missing required attribute %q", k) + } + } + } + + for k := range attrTypes { + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + fieldIdx, exists := targetFields[k] + if !exists { + return path.NewErrorf("unsupported attribute %q", k) + } + + ev := val.GetAttr(k) + + targetField := target.Field(fieldIdx) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error { + + switch target.Kind() { + + case reflect.Struct: + + elemTypes := val.Type().TupleElementTypes() + fieldCount := target.Type().NumField() + + if fieldCount != len(elemTypes) { + return path.NewErrorf("a tuple of %d elements is required", fieldCount) + } + + path = append(path, nil) + + for i := range elemTypes { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + ev := val.Index(cty.NumberIntVal(int64(i))) + + targetField := target.Field(i) + err := fromCtyValue(ev, targetField, path) + if err != nil { + return err + } + } + + path = path[:len(path)-1] + + return nil + + default: + return likelyRequiredTypesError(path, target) + + } +} + +func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error { + + if target.Kind() == reflect.Ptr { + // Walk through indirection until we get to the last pointer, + // which we might set to null below. + target = fromCtyPopulatePtr(target, true) + + if val.IsNull() { + target.Set(reflect.Zero(target.Type())) + return nil + } + + // Since a capsule contains a pointer to an object, we'll preserve + // that pointer on the way out and thus allow the caller to recover + // the original object, rather than a copy of it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Elem().Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + target.Set(reflect.ValueOf(val.EncapsulatedValue())) + + return nil + } else { + if val.IsNull() { + return path.NewErrorf("null value is not allowed") + } + + // If our target isn't a pointer then we will attempt to copy + // the encapsulated value into it. + + eType := val.Type().EncapsulatedType() + + if !eType.AssignableTo(target.Type()) { + // Our interface contract promises that we won't expose Go + // implementation details in error messages, so we need to keep + // this vague. This can only arise if a calling application has + // more than one capsule type in play and a user mixes them up. + return path.NewErrorf("incorrect type %s", val.Type().FriendlyName()) + } + + // We know that EncapsulatedValue is always a pointer, so we + // can safely call .Elem on its reflect.Value. + target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem()) + + return nil + } + +} + +// fromCtyPopulatePtr recognizes when target is a pointer type and allocates +// a value to assign to that pointer, which it returns. +// +// If the given value has multiple levels of indirection, like **int, these +// will be processed in turn so that the return value is guaranteed to be +// a non-pointer. +// +// As an exception, if decodingNull is true then the returned value will be +// the final level of pointer, if any, so that the caller can assign it +// as nil to represent a null value. If the given target value is not a pointer +// at all then the returned value will be just the given target, so the caller +// must test if the returned value is a pointer before trying to assign nil +// to it. +func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value { + for { + if target.Kind() == reflect.Interface && !target.IsNil() { + e := target.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + target = e + } + } + + if target.Kind() != reflect.Ptr { + break + } + + // Stop early if we're decodingNull and we've found our last indirection + if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() { + break + } + + if target.IsNil() { + target.Set(reflect.New(target.Type().Elem())) + } + + target = target.Elem() + } + return target +} + +// likelyRequiredTypesError returns an error that states which types are +// acceptable by making some assumptions about what types we support for +// each target Go kind. It's not a precise science but it allows us to return +// an error message that is cty-user-oriented rather than Go-oriented. +// +// Generally these error messages should be a matter of last resort, since +// the calling application should be validating user-provided value types +// before decoding anyway. +func likelyRequiredTypesError(path cty.Path, target reflect.Value) error { + switch target.Kind() { + + case reflect.Bool: + return path.NewErrorf("bool value is required") + + case reflect.String: + return path.NewErrorf("string value is required") + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + fallthrough + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + fallthrough + case reflect.Float32, reflect.Float64: + return path.NewErrorf("number value is required") + + case reflect.Slice, reflect.Array: + return path.NewErrorf("list or set value is required") + + case reflect.Map: + return path.NewErrorf("map or object value is required") + + case reflect.Struct: + switch { + + case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType): + return path.NewErrorf("number value is required") + + case target.Type().AssignableTo(setType): + return path.NewErrorf("set or list value is required") + + default: + return path.NewErrorf("object or tuple value is required") + + } + + default: + // We should avoid getting into this path, since this error + // message is rather useless. + return path.NewErrorf("incorrect type") + + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go new file mode 100644 index 000000000..b41342535 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go @@ -0,0 +1,108 @@ +package gocty + +import ( + "reflect" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts +// to find a suitable cty.Type instance that could be used for a conversion +// with ToCtyValue. +// +// This allows -- for simple situations at least -- types to be defined just +// once in Go and the cty types derived from the Go types, but in the process +// it makes some assumptions that may be undesirable so applications are +// encouraged to build their cty types directly if exacting control is +// required. +// +// Not all Go types can be represented as cty types, so an error may be +// returned which is usually considered to be a bug in the calling program. +// In particular, ImpliedType will never use capsule types in its returned +// type, because it cannot know the capsule types supported by the calling +// program. +func ImpliedType(gv interface{}) (cty.Type, error) { + rt := reflect.TypeOf(gv) + var path cty.Path + return impliedType(rt, path) +} + +func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) { + switch rt.Kind() { + + case reflect.Ptr: + return impliedType(rt.Elem(), path) + + // Primitive types + case reflect.Bool: + return cty.Bool, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return cty.Number, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return cty.Number, nil + case reflect.Float32, reflect.Float64: + return cty.Number, nil + case reflect.String: + return cty.String, nil + + // Collection types + case reflect.Slice: + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.List(ety), nil + case reflect.Map: + if !stringType.AssignableTo(rt.Key()) { + return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt) + } + path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)}) + ety, err := impliedType(rt.Elem(), path) + if err != nil { + return cty.NilType, err + } + return cty.Map(ety), nil + + // Structural types + case reflect.Struct: + return impliedStructType(rt, path) + + default: + return cty.NilType, path.NewErrorf("no cty.Type for %s", rt) + } +} + +func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) { + if valueType.AssignableTo(rt) { + // Special case: cty.Value represents cty.DynamicPseudoType, for + // type conformance checking. + return cty.DynamicPseudoType, nil + } + + fieldIdxs := structTagIndices(rt) + if len(fieldIdxs) == 0 { + return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt) + } + + atys := make(map[string]cty.Type, len(fieldIdxs)) + + { + // Temporary extension of path for attributes + path := append(path, nil) + + for k, fi := range fieldIdxs { + path[len(path)-1] = cty.GetAttrStep{Name: k} + + ft := rt.Field(fi).Type + aty, err := impliedType(ft, path) + if err != nil { + return cty.NilType, err + } + + atys[k] = aty + } + } + + return cty.Object(atys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/helper.go b/vendor/github.com/hashicorp/go-cty/cty/helper.go new file mode 100644 index 000000000..1b88e9fa0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/helper.go @@ -0,0 +1,99 @@ +package cty + +import ( + "fmt" +) + +// anyUnknown is a helper to easily check if a set of values contains any +// unknowns, for operations that short-circuit to return unknown in that case. +func anyUnknown(values ...Value) bool { + for _, val := range values { + if val.v == unknown { + return true + } + } + return false +} + +// typeCheck tests whether all of the given values belong to the given type. +// If the given types are a mixture of the given type and the dynamic +// pseudo-type then a short-circuit dynamic value is returned. If the given +// values are all of the correct type but at least one is unknown then +// a short-circuit unknown value is returned. If any other types appear then +// an error is returned. Otherwise (finally!) the result is nil, nil. +func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) { + hasDynamic := false + hasUnknown := false + + for i, val := range values { + if val.ty == DynamicPseudoType { + hasDynamic = true + continue + } + + if !val.Type().Equals(required) { + return nil, fmt.Errorf( + "type mismatch: want %s but value %d is %s", + required.FriendlyName(), + i, val.ty.FriendlyName(), + ) + } + + if val.v == unknown { + hasUnknown = true + } + } + + if hasDynamic { + return &DynamicVal, nil + } + + if hasUnknown { + ret := UnknownVal(ret) + return &ret, nil + } + + return nil, nil +} + +// mustTypeCheck is a wrapper around typeCheck that immediately panics if +// any error is returned. +func mustTypeCheck(required Type, ret Type, values ...Value) *Value { + shortCircuit, err := typeCheck(required, ret, values...) + if err != nil { + panic(err) + } + return shortCircuit +} + +// shortCircuitForceType takes the return value from mustTypeCheck and +// replaces it with an unknown of the given type if the original value was +// DynamicVal. +// +// This is useful for operations that are specified to always return a +// particular type, since then a dynamic result can safely be "upgrade" to +// a strongly-typed unknown, which then allows subsequent operations to +// be actually type-checked. +// +// It is safe to use this only if the operation in question is defined as +// returning either a value of the given type or panicking, since we know +// then that subsequent operations won't run if the operation panics. +// +// If the given short-circuit value is *not* DynamicVal then it must be +// of the given type, or this function will panic. +func forceShortCircuitType(shortCircuit *Value, ty Type) *Value { + if shortCircuit == nil { + return nil + } + + if shortCircuit.ty == DynamicPseudoType { + ret := UnknownVal(ty) + return &ret + } + + if !shortCircuit.ty.Equals(ty) { + panic("forceShortCircuitType got value of wrong type") + } + + return shortCircuit +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json.go b/vendor/github.com/hashicorp/go-cty/cty/json.go new file mode 100644 index 000000000..c421a62ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json.go @@ -0,0 +1,176 @@ +package cty + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// MarshalJSON is an implementation of json.Marshaler that allows Type +// instances to be serialized as JSON. +// +// All standard types can be serialized, but capsule types cannot since there +// is no way to automatically recover the original pointer and capsule types +// compare by equality. +func (t Type) MarshalJSON() ([]byte, error) { + switch impl := t.typeImpl.(type) { + case primitiveType: + switch impl.Kind { + case primitiveTypeBool: + return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil + case primitiveTypeNumber: + return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil + case primitiveTypeString: + return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil + default: + panic("unknown primitive type kind") + } + case typeList, typeMap, typeSet: + buf := &bytes.Buffer{} + etyJSON, err := t.ElementType().MarshalJSON() + if err != nil { + return nil, err + } + buf.WriteRune('[') + switch impl.(type) { + case typeList: + buf.WriteString(`"list"`) + case typeMap: + buf.WriteString(`"map"`) + case typeSet: + buf.WriteString(`"set"`) + } + buf.WriteRune(',') + buf.Write(etyJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeObject: + buf := &bytes.Buffer{} + atysJSON, err := json.Marshal(t.AttributeTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["object",`) + buf.Write(atysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case typeTuple: + buf := &bytes.Buffer{} + etysJSON, err := json.Marshal(t.TupleElementTypes()) + if err != nil { + return nil, err + } + buf.WriteString(`["tuple",`) + buf.Write(etysJSON) + buf.WriteRune(']') + return buf.Bytes(), nil + case pseudoTypeDynamic: + return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil + case *capsuleType: + return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName()) + default: + // should never happen + panic("unknown type implementation") + } +} + +// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of +// MarshalJSON for information on the limitations of JSON serialization of +// types. +func (t *Type) UnmarshalJSON(buf []byte) error { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + + tok, err := dec.Token() + if err != nil { + return err + } + + switch v := tok.(type) { + case string: + switch v { + case "bool": + *t = Bool + case "number": + *t = Number + case "string": + *t = String + case "dynamic": + *t = DynamicPseudoType + default: + return fmt.Errorf("invalid primitive type name %q", v) + } + + if dec.More() { + return fmt.Errorf("extraneous data after type description") + } + return nil + case json.Delim: + if rune(v) != '[' { + return fmt.Errorf("invalid complex type description") + } + + tok, err = dec.Token() + if err != nil { + return err + } + + kind, ok := tok.(string) + if !ok { + return fmt.Errorf("invalid complex type kind name") + } + + switch kind { + case "list": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = List(ety) + case "map": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Map(ety) + case "set": + var ety Type + err = dec.Decode(&ety) + if err != nil { + return err + } + *t = Set(ety) + case "object": + var atys map[string]Type + err = dec.Decode(&atys) + if err != nil { + return err + } + *t = Object(atys) + case "tuple": + var etys []Type + err = dec.Decode(&etys) + if err != nil { + return err + } + *t = Tuple(etys) + default: + return fmt.Errorf("invalid complex type kind name") + } + + tok, err = dec.Token() + if err != nil { + return err + } + if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() { + return fmt.Errorf("unexpected extra data in type description") + } + + return nil + + default: + return fmt.Errorf("invalid type description") + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/doc.go b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go new file mode 100644 index 000000000..8916513d6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go @@ -0,0 +1,11 @@ +// Package json provides functions for serializing cty types and values in +// JSON format, and for decoding them again. +// +// Since the cty type system is a superset of the JSON type system, +// round-tripping through JSON is lossy unless type information is provided +// both at encoding time and decoding time. Callers of this package are +// therefore suggested to define their expected structure as a cty.Type +// and pass it in consistently both when encoding and when decoding, though +// default (type-lossy) behavior is provided for situations where the precise +// representation of the data is not significant. +package json diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go new file mode 100644 index 000000000..728ab0100 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go @@ -0,0 +1,193 @@ +package json + +import ( + "bytes" + "encoding/json" + "sort" + + "github.com/hashicorp/go-cty/cty" +) + +func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, b) + } + + if val.IsNull() { + b.WriteString("null") + return nil + } + + if !val.IsKnown() { + return path.NewErrorf("value is not known") + } + + // The caller should've guaranteed that the given val is conformant with + // the given type t, so we'll proceed under that assumption here. + + switch { + case t.IsPrimitiveType(): + switch t { + case cty.String: + json, err := json.Marshal(val.AsString()) + if err != nil { + return path.NewErrorf("failed to serialize value: %s", err) + } + b.Write(json) + return nil + case cty.Number: + if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) { + return path.NewErrorf("cannot serialize infinity as JSON") + } + b.WriteString(val.AsBigFloat().Text('f', -1)) + return nil + case cty.Bool: + if val.True() { + b.WriteString("true") + } else { + b.WriteString("false") + } + return nil + default: + panic("unsupported primitive type") + } + case t.IsListType(), t.IsSetType(): + b.WriteRune('[') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune(']') + return nil + case t.IsMapType(): + b.WriteRune('{') + first := true + ety := t.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + if !first { + b.WriteRune(',') + } + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(ev, ety, path, b) + if err != nil { + return err + } + first = false + } + b.WriteRune('}') + return nil + case t.IsTupleType(): + b.WriteRune('[') + etys := t.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + for it.Next() { + if i > 0 { + b.WriteRune(',') + } + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, b) + if err != nil { + return err + } + i++ + } + b.WriteRune(']') + return nil + case t.IsObjectType(): + b.WriteRune('{') + atys := t.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + for i, k := range names { + aty := atys[k] + if i > 0 { + b.WriteRune(',') + } + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, b) + if err != nil { + return err + } + b.WriteRune(':') + err = marshal(av, aty, path, b) + if err != nil { + return err + } + } + b.WriteRune('}') + return nil + case t.IsCapsuleType(): + rawVal := val.EncapsulatedValue() + jsonVal, err := json.Marshal(rawVal) + if err != nil { + return path.NewError(err) + } + b.Write(jsonVal) + return nil + default: + // should never happen + return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error { + typeJSON, err := MarshalType(val.Type()) + if err != nil { + return path.NewErrorf("failed to serialize type: %s", err) + } + b.WriteString(`{"value":`) + marshal(val, val.Type(), path, b) + b.WriteString(`,"type":`) + b.Write(typeJSON) + b.WriteRune('}') + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/simple.go b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go new file mode 100644 index 000000000..aaba8c3bd --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go @@ -0,0 +1,41 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// SimpleJSONValue is a wrapper around cty.Value that adds implementations of +// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic +// encoding and decoding of values. +// +// The couplet Marshal and Unmarshal both take extra type information to +// inform the encoding and decoding process so that all of the cty types +// can be represented even though JSON's type system is a subset. +// +// SimpleJSONValue instead takes the approach of discarding the value's type +// information and then deriving a new type from the stored structure when +// decoding. This results in the same data being returned but not necessarily +// with exactly the same type. +// +// For information on how types are inferred when decoding, see the +// documentation of the function ImpliedType. +type SimpleJSONValue struct { + cty.Value +} + +// MarshalJSON is an implementation of json.Marshaler. See the documentation +// of SimpleJSONValue for more information. +func (v SimpleJSONValue) MarshalJSON() ([]byte, error) { + return Marshal(v.Value, v.Type()) +} + +// UnmarshalJSON is an implementation of json.Unmarshaler. See the +// documentation of SimpleJSONValue for more information. +func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error { + t, err := ImpliedType(buf) + if err != nil { + return err + } + v.Value, err = Unmarshal(buf, t) + return err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type.go b/vendor/github.com/hashicorp/go-cty/cty/json/type.go new file mode 100644 index 000000000..59d7f2e17 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type.go @@ -0,0 +1,23 @@ +package json + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// MarshalType returns a JSON serialization of the given type. +// +// This is just a thin wrapper around t.MarshalJSON, for symmetry with +// UnmarshalType. +func MarshalType(t cty.Type) ([]byte, error) { + return t.MarshalJSON() +} + +// UnmarshalType decodes a JSON serialization of the given type as produced +// by either Type.MarshalJSON or MarshalType. +// +// This is a convenience wrapper around Type.UnmarshalJSON. +func UnmarshalType(buf []byte) (cty.Type, error) { + var t cty.Type + err := t.UnmarshalJSON(buf) + return t, err +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go new file mode 100644 index 000000000..8adf22bbe --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go @@ -0,0 +1,170 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// JSON-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary JSON without explicit cty Type +// information. +// +// The rules are as follows: +// +// JSON strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// JSON objects map to cty object types, with the attributes defined by the +// object keys and the types of their values. +// +// JSON arrays map to cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any JSON syntax errors will be returned as an error, and the type will +// be the invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + if dec.More() { + return cty.NilType, fmt.Errorf("extraneous data after JSON object") + } + + return ty, nil +} + +func impliedType(dec *json.Decoder) (cty.Type, error) { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + return impliedTypeForTok(tok, dec) +} + +func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) { + if tok == nil { + return cty.DynamicPseudoType, nil + } + + switch ttok := tok.(type) { + case bool: + return cty.Bool, nil + + case json.Number: + return cty.Number, nil + + case string: + return cty.String, nil + + case json.Delim: + + switch rune(ttok) { + case '{': + return impliedObjectType(dec) + case '[': + return impliedTupleType(dec) + default: + return cty.NilType, fmt.Errorf("unexpected token %q", ttok) + } + + default: + return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok) + } +} + +func impliedObjectType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the { delimiter + // and so our next token should be the first object key. + + var atys map[string]cty.Type + + for { + // Read the object key first + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) != '}' { + return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok) + } + break + } + + key, ok := tok.(string) + if !ok { + return cty.NilType, fmt.Errorf("expected string but found %T", tok) + } + + // Now read the value + tok, err = dec.Token() + if err != nil { + return cty.NilType, err + } + + aty, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[key] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *json.Decoder) (cty.Type, error) { + // By the time we get in here, we've already consumed the [ delimiter + // and so our next token should be the first value. + + var etys []cty.Type + + for { + tok, err := dec.Token() + if err != nil { + return cty.NilType, err + } + + if ttok, ok := tok.(json.Delim); ok { + if rune(ttok) == ']' { + break + } + } + + ety, err := impliedTypeForTok(tok, dec) + if err != nil { + return cty.NilType, err + } + etys = append(etys, ety) + } + + if len(etys) == 0 { + return cty.EmptyTuple, nil + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go new file mode 100644 index 000000000..5ad190d33 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go @@ -0,0 +1,459 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + + tok, err := dec.Token() + if err != nil { + return cty.NilVal, path.NewError(err) + } + + if tok == nil { + return cty.NullVal(t), nil + } + + if t == cty.DynamicPseudoType { + return unmarshalDynamic(buf, path) + } + + switch { + case t.IsPrimitiveType(): + val, err := unmarshalPrimitive(tok, t, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case t.IsListType(): + return unmarshalList(buf, t.ElementType(), path) + case t.IsSetType(): + return unmarshalSet(buf, t.ElementType(), path) + case t.IsMapType(): + return unmarshalMap(buf, t.ElementType(), path) + case t.IsTupleType(): + return unmarshalTuple(buf, t.TupleElementTypes(), path) + case t.IsObjectType(): + return unmarshalObject(buf, t.AttributeTypes(), path) + case t.IsCapsuleType(): + return unmarshalCapsule(buf, t, path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName()) + } +} + +func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) { + + switch t { + case cty.Bool: + switch v := tok.(type) { + case bool: + return cty.BoolVal(v), nil + case string: + val, err := convert.Convert(cty.StringVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("bool is required") + } + case cty.Number: + if v, ok := tok.(json.Number); ok { + tok = string(v) + } + switch v := tok.(type) { + case string: + val, err := cty.ParseNumberVal(v) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("number is required") + } + case cty.String: + switch v := tok.(type) { + case string: + return cty.StringVal(v), nil + case json.Number: + return cty.StringVal(string(v)), nil + case bool: + val, err := convert.Convert(cty.BoolVal(v), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil + default: + return cty.NilVal, path.NewErrorf("string is required") + } + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int64 + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(idx), + } + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read list value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.ListValEmpty(ety), nil + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(ety), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read set value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + path := append(path, nil) + + for dec.More() { + path[len(path)-1] = cty.IndexStep{ + Key: cty.UnknownVal(cty.String), + } + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map key: %s", err) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(k), + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read map value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '['); err != nil { + return cty.NilVal, path.NewError(err) + } + + var vals []cty.Value + + { + path := append(path, nil) + var idx int + + for dec.More() { + if idx >= len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys)) + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(idx)), + } + ety := etys[idx] + idx++ + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err) + } + + el, err := unmarshal(rawVal, ety, path) + if err != nil { + return cty.NilVal, err + } + + vals = append(vals, el) + } + } + + if err := requireDelim(dec, ']'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if len(vals) != len(etys) { + return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys)) + } + + if len(vals) == 0 { + return cty.EmptyTupleVal, nil + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + vals := make(map[string]cty.Value) + + { + objPath := path // some errors report from the object's perspective + path := append(path, nil) // path to a specific attribute + + for dec.More() { + + var err error + + k, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object key: %s", err) + } + + aty, ok := atys[k] + if !ok { + return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k) + } + + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read object value: %s", err) + } + + el, err := unmarshal(rawVal, aty, path) + if err != nil { + return cty.NilVal, err + } + + vals[k] = el + } + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + // Make sure we have a value for every attribute + for k, aty := range atys { + if _, exists := vals[k]; !exists { + vals[k] = cty.NullVal(aty) + } + } + + if len(vals) == 0 { + return cty.EmptyObjectVal, nil + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) { + rawType := t.EncapsulatedType() + ptrPtr := reflect.New(reflect.PtrTo(rawType)) + ptrPtr.Elem().Set(reflect.New(rawType)) + ptr := ptrPtr.Elem().Interface() + err := json.Unmarshal(buf, ptr) + if err != nil { + return cty.NilVal, path.NewError(err) + } + + return cty.CapsuleVal(t, ptr), nil +} + +func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) { + dec := bufDecoder(buf) + if err := requireDelim(dec, '{'); err != nil { + return cty.NilVal, path.NewError(err) + } + + var t cty.Type + var valBody []byte // defer actual decoding until we know the type + + for dec.More() { + var err error + + key, err := requireObjectKey(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err) + } + + rawVal, err := readRawValue(dec) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err) + } + + switch key { + case "type": + err := json.Unmarshal(rawVal, &t) + if err != nil { + return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err) + } + case "value": + valBody = rawVal + default: + return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key) + } + + } + + if err := requireDelim(dec, '}'); err != nil { + return cty.NilVal, path.NewError(err) + } + + if t == cty.NilType { + return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value") + } + if valBody == nil { + return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value") + } + + val, err := Unmarshal([]byte(valBody), t) + if err != nil { + return cty.NilVal, path.NewError(err) + } + return val, nil +} + +func requireDelim(dec *json.Decoder, d rune) error { + tok, err := dec.Token() + if err != nil { + return err + } + + if tok != json.Delim(d) { + return fmt.Errorf("missing expected %c", d) + } + + return nil +} + +func requireObjectKey(dec *json.Decoder) (string, error) { + tok, err := dec.Token() + if err != nil { + return "", err + } + if s, ok := tok.(string); ok { + return s, nil + } + return "", fmt.Errorf("missing expected object key") +} + +func readRawValue(dec *json.Decoder) ([]byte, error) { + var rawVal json.RawMessage + err := dec.Decode(&rawVal) + if err != nil { + return nil, err + } + return []byte(rawVal), nil +} + +func bufDecoder(buf []byte) *json.Decoder { + r := bytes.NewReader(buf) + dec := json.NewDecoder(r) + dec.UseNumber() + return dec +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/value.go b/vendor/github.com/hashicorp/go-cty/cty/json/value.go new file mode 100644 index 000000000..50748f709 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/json/value.go @@ -0,0 +1,65 @@ +package json + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// Marshal produces a JSON representation of the given value that can later +// be decoded into a value of the given type. +// +// A type is specified separately to allow for the given type to include +// cty.DynamicPseudoType to represent situations where any type is permitted +// and so type information must be included to allow recovery of the stored +// structure when decoding. +// +// The given type will also be used to attempt automatic conversions of any +// non-conformant types in the given value, although this will not always +// be possible. If the value cannot be made to be conformant then an error is +// returned, which may be a cty.PathError. +// +// Capsule-typed values can be marshalled, but with some caveats. Since +// capsule values are compared by pointer equality, it is impossible to recover +// a value that will compare equal to the original value. Additionally, +// it's not possible to JSON-serialize the capsule type itself, so it's not +// valid to use capsule types within parts of the value that are conformed to +// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as +// the encapsulated type itself is serializable with the Marshal function +// in encoding/json. +func Marshal(val cty.Value, t cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(t) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, t) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + buf := &bytes.Buffer{} + var path cty.Path + err := marshal(val, t, path, buf) + + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Unmarshal decodes a JSON representation of the given value into a cty Value +// conforming to the given type. +// +// While decoding, type conversions will be done where possible to make +// the result conformant even if the types given in JSON are not exactly +// correct. If conversion isn't possible then an error is returned, which +// may be a cty.PathError. +func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) { + var path cty.Path + return unmarshal(buf, t, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/list_type.go b/vendor/github.com/hashicorp/go-cty/cty/list_type.go new file mode 100644 index 000000000..2ef02a12f --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/list_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeList struct { + typeImplSigil + ElementTypeT Type +} + +// List creates a map type with the given element Type. +// +// List types are CollectionType implementations. +func List(elem Type) Type { + return Type{ + typeList{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a list whose element type is +// equal to that of the receiver. +func (t typeList) Equals(other Type) bool { + ot, isList := other.typeImpl.(typeList) + if !isList { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeList) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "list of " + elemName +} + +func (t typeList) ElementType() Type { + return t.ElementTypeT +} + +func (t typeList) GoString() string { + return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT) +} + +// IsListType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsListType() bool { + _, ok := t.typeImpl.(typeList) + return ok +} + +// ListElementType is a convenience method that checks if the given type is +// a list type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.ListElementType(); et != nil { +// // Do something with *et +// } +func (t Type) ListElementType() *Type { + if lt, ok := t.typeImpl.(typeList); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/map_type.go b/vendor/github.com/hashicorp/go-cty/cty/map_type.go new file mode 100644 index 000000000..82d36c628 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/map_type.go @@ -0,0 +1,74 @@ +package cty + +import ( + "fmt" +) + +// TypeList instances represent specific list types. Each distinct ElementType +// creates a distinct, non-equal list type. +type typeMap struct { + typeImplSigil + ElementTypeT Type +} + +// Map creates a map type with the given element Type. +// +// Map types are CollectionType implementations. +func Map(elem Type) Type { + return Type{ + typeMap{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a map whose element type is +// equal to that of the receiver. +func (t typeMap) Equals(other Type) bool { + ot, isMap := other.typeImpl.(typeMap) + if !isMap { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "map of " + elemName +} + +func (t typeMap) ElementType() Type { + return t.ElementTypeT +} + +func (t typeMap) GoString() string { + return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT) +} + +// IsMapType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsMapType() bool { + _, ok := t.typeImpl.(typeMap) + return ok +} + +// MapElementType is a convenience method that checks if the given type is +// a map type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.MapElementType(); et != nil { +// // Do something with *et +// } +func (t Type) MapElementType() *Type { + if lt, ok := t.typeImpl.(typeMap); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/marks.go b/vendor/github.com/hashicorp/go-cty/cty/marks.go new file mode 100644 index 000000000..3898e4553 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/marks.go @@ -0,0 +1,296 @@ +package cty + +import ( + "fmt" + "strings" +) + +// marker is an internal wrapper type used to add special "marks" to values. +// +// A "mark" is an annotation that can be used to represent additional +// characteristics of values that propagate through operation methods to +// result values. However, a marked value cannot be used with integration +// methods normally associated with its type, in order to ensure that +// calling applications don't inadvertently drop marks as they round-trip +// values out of cty and back in again. +// +// Marked values are created only explicitly by the calling application, so +// an application that never marks a value does not need to worry about +// encountering marked values. +type marker struct { + realV interface{} + marks ValueMarks +} + +// ValueMarks is a map, representing a set, of "mark" values associated with +// a Value. See Value.Mark for more information on the usage of mark values. +type ValueMarks map[interface{}]struct{} + +// NewValueMarks constructs a new ValueMarks set with the given mark values. +func NewValueMarks(marks ...interface{}) ValueMarks { + if len(marks) == 0 { + return nil + } + ret := make(ValueMarks, len(marks)) + for _, v := range marks { + ret[v] = struct{}{} + } + return ret +} + +// Equal returns true if the receiver and the given ValueMarks both contain +// the same marks. +func (m ValueMarks) Equal(o ValueMarks) bool { + if len(m) != len(o) { + return false + } + for v := range m { + if _, ok := o[v]; !ok { + return false + } + } + return true +} + +func (m ValueMarks) GoString() string { + var s strings.Builder + s.WriteString("cty.NewValueMarks(") + i := 0 + for mv := range m { + if i != 0 { + s.WriteString(", ") + } + s.WriteString(fmt.Sprintf("%#v", mv)) + i++ + } + s.WriteString(")") + return s.String() +} + +// IsMarked returns true if and only if the receiving value carries at least +// one mark. A marked value cannot be used directly with integration methods +// without explicitly unmarking it (and retrieving the markings) first. +func (val Value) IsMarked() bool { + _, ok := val.v.(marker) + return ok +} + +// HasMark returns true if and only if the receiving value has the given mark. +func (val Value) HasMark(mark interface{}) bool { + if mr, ok := val.v.(marker); ok { + _, ok := mr.marks[mark] + return ok + } + return false +} + +// ContainsMarked returns true if the receiving value or any value within it +// is marked. +// +// This operation is relatively expensive. If you only need a shallow result, +// use IsMarked instead. +func (val Value) ContainsMarked() bool { + ret := false + Walk(val, func(_ Path, v Value) (bool, error) { + if v.IsMarked() { + ret = true + return false, nil + } + return true, nil + }) + return ret +} + +func (val Value) assertUnmarked() { + if val.IsMarked() { + panic("value is marked, so must be unmarked first") + } +} + +// Marks returns a map (representing a set) of all of the mark values +// associated with the receiving value, without changing the marks. Returns nil +// if the value is not marked at all. +func (val Value) Marks() ValueMarks { + if mr, ok := val.v.(marker); ok { + // copy so that the caller can't mutate our internals + ret := make(ValueMarks, len(mr.marks)) + for k, v := range mr.marks { + ret[k] = v + } + return ret + } + return nil +} + +// HasSameMarks returns true if an only if the receiver and the given other +// value have identical marks. +func (val Value) HasSameMarks(other Value) bool { + vm, vmOK := val.v.(marker) + om, omOK := other.v.(marker) + if vmOK != omOK { + return false + } + if vmOK { + return vm.marks.Equal(om.marks) + } + return true +} + +// Mark returns a new value that as the same type and underlying value as +// the receiver but that also carries the given value as a "mark". +// +// Marks are used to carry additional application-specific characteristics +// associated with values. A marked value can be used with operation methods, +// in which case the marks are propagated to the operation results. A marked +// value _cannot_ be used with integration methods, so callers of those +// must derive an unmarked value using Unmark (and thus explicitly handle +// the markings) before calling the integration methods. +// +// The mark value can be any value that would be valid to use as a map key. +// The mark value should be of a named type in order to use the type itself +// as a namespace for markings. That type can be unexported if desired, in +// order to ensure that the mark can only be handled through the defining +// package's own functions. +// +// An application that never calls this method does not need to worry about +// handling marked values. +func (val Value) Mark(mark interface{}) Value { + var newMarker marker + newMarker.realV = val.v + if mr, ok := val.v.(marker); ok { + // It's already a marker, so we'll retain existing marks. + newMarker.marks = make(ValueMarks, len(mr.marks)+1) + for k, v := range mr.marks { + newMarker.marks[k] = v + } + } else { + // It's not a marker yet, so we're creating the first mark. + newMarker.marks = make(ValueMarks, 1) + } + newMarker.marks[mark] = struct{}{} + return Value{ + ty: val.ty, + v: newMarker, + } +} + +// Unmark separates the marks of the receiving value from the value itself, +// removing a new unmarked value and a map (representing a set) of the marks. +// +// If the receiver isn't marked, Unmark returns it verbatim along with a nil +// map of marks. +func (val Value) Unmark() (Value, ValueMarks) { + if !val.IsMarked() { + return val, nil + } + mr := val.v.(marker) + marks := val.Marks() // copy so that the caller can't mutate our internals + return Value{ + ty: val.ty, + v: mr.realV, + }, marks +} + +// UnmarkDeep is similar to Unmark, but it works with an entire nested structure +// rather than just the given value directly. +// +// The result is guaranteed to contain no nested values that are marked, and +// the returned marks set includes the superset of all of the marks encountered +// during the operation. +func (val Value) UnmarkDeep() (Value, ValueMarks) { + marks := make(ValueMarks) + ret, _ := Transform(val, func(_ Path, v Value) (Value, error) { + unmarkedV, valueMarks := v.Unmark() + for m, s := range valueMarks { + marks[m] = s + } + return unmarkedV, nil + }) + return ret, marks +} + +func (val Value) unmarkForce() Value { + unw, _ := val.Unmark() + return unw +} + +// WithMarks returns a new value that has the same type and underlying value +// as the receiver and also has the marks from the given maps (representing +// sets). +func (val Value) WithMarks(marks ...ValueMarks) Value { + if len(marks) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, s := range marks { + markCount += len(s) + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, s := range marks { + for m := range s { + newMarks[m] = struct{}{} + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} + +// WithSameMarks returns a new value that has the same type and underlying +// value as the receiver and also has the marks from the given source values. +// +// Use this if you are implementing your own higher-level operations against +// cty using the integration methods, to re-introduce the marks from the +// source values of the operation. +func (val Value) WithSameMarks(srcs ...Value) Value { + if len(srcs) == 0 { + return val + } + ownMarks := val.Marks() + markCount := len(ownMarks) + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + markCount += len(mr.marks) + } + } + if markCount == 0 { + return val + } + newMarks := make(ValueMarks, markCount) + for m := range ownMarks { + newMarks[m] = struct{}{} + } + for _, sv := range srcs { + if mr, ok := sv.v.(marker); ok { + for m := range mr.marks { + newMarks[m] = struct{}{} + } + } + } + v := val.v + if mr, ok := v.(marker); ok { + v = mr.realV + } + return Value{ + ty: val.ty, + v: marker{ + realV: v, + marks: newMarks, + }, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go new file mode 100644 index 000000000..1eb99f28a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go @@ -0,0 +1,14 @@ +// Package msgpack provides functions for serializing cty values in the +// msgpack encoding, and decoding them again. +// +// If the same type information is provided both at encoding and decoding time +// then values can be round-tripped without loss, except for capsule types +// which are not currently supported. +// +// If any unknown values are passed to Marshal then they will be represented +// using a msgpack extension with type code zero, which is understood by +// the Unmarshal function within this package but will not be understood by +// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values +// are used if interoperability with other msgpack implementations is +// required. +package msgpack diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go new file mode 100644 index 000000000..ce59d9ff4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go @@ -0,0 +1,31 @@ +package msgpack + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" +) + +type dynamicVal struct { + Value cty.Value + Path cty.Path +} + +func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) { + // Rather than defining a msgpack-specific serialization of types, + // instead we use the existing JSON serialization. + typeJSON, err := dv.Value.Type().MarshalJSON() + if err != nil { + return nil, dv.Path.NewErrorf("failed to serialize type: %s", err) + } + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + enc.EncodeArrayLen(2) + enc.EncodeBytes(typeJSON) + err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go new file mode 100644 index 000000000..6db0815e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go @@ -0,0 +1,8 @@ +package msgpack + +import ( + "math" +) + +var negativeInfinity = math.Inf(-1) +var positiveInfinity = math.Inf(1) diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go new file mode 100644 index 000000000..8a43c16ac --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go @@ -0,0 +1,211 @@ +package msgpack + +import ( + "bytes" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" + "github.com/vmihailenco/msgpack" +) + +// Marshal produces a msgpack serialization of the given value that +// can be decoded into the given type later using Unmarshal. +// +// The given value must conform to the given type, or an error will +// be returned. +func Marshal(val cty.Value, ty cty.Type) ([]byte, error) { + errs := val.Type().TestConformance(ty) + if errs != nil { + // Attempt a conversion + var err error + val, err = convert.Convert(val, ty) + if err != nil { + return nil, err + } + } + + // From this point onward, val can be assumed to be conforming to t. + + var path cty.Path + var buf bytes.Buffer + enc := msgpack.NewEncoder(&buf) + + err := marshal(val, ty, path, enc) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error { + if val.IsMarked() { + return path.NewErrorf("value has marks, so it cannot be seralized") + } + + // If we're going to decode as DynamicPseudoType then we need to save + // dynamic type information to recover the real type. + if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType { + return marshalDynamic(val, path, enc) + } + + if !val.IsKnown() { + err := enc.Encode(unknownVal) + if err != nil { + return path.NewError(err) + } + return nil + } + if val.IsNull() { + err := enc.EncodeNil() + if err != nil { + return path.NewError(err) + } + return nil + } + + // The caller should've guaranteed that the given val is conformant with + // the given type ty, so we'll proceed under that assumption here. + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + err := enc.EncodeString(val.AsString()) + if err != nil { + return path.NewError(err) + } + return nil + case cty.Number: + var err error + switch { + case val.RawEquals(cty.PositiveInfinity): + err = enc.EncodeFloat64(positiveInfinity) + case val.RawEquals(cty.NegativeInfinity): + err = enc.EncodeFloat64(negativeInfinity) + default: + bf := val.AsBigFloat() + if iv, acc := bf.Int64(); acc == big.Exact { + err = enc.EncodeInt(iv) + } else if fv, acc := bf.Float64(); acc == big.Exact { + err = enc.EncodeFloat64(fv) + } else { + err = enc.EncodeString(bf.Text('f', -1)) + } + } + if err != nil { + return path.NewError(err) + } + return nil + case cty.Bool: + err := enc.EncodeBool(val.True()) + if err != nil { + return path.NewError(err) + } + return nil + default: + panic("unsupported primitive type") + } + case ty.IsListType(), ty.IsSetType(): + enc.EncodeArrayLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsMapType(): + enc.EncodeMapLen(val.LengthInt()) + ety := ty.ElementType() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + for it.Next() { + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + var err error + err = marshal(ek, ek.Type(), path, enc) + if err != nil { + return err + } + err = marshal(ev, ety, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsTupleType(): + etys := ty.TupleElementTypes() + it := val.ElementIterator() + path := append(path, nil) // local override of 'path' with extra element + i := 0 + enc.EncodeArrayLen(len(etys)) + for it.Next() { + ety := etys[i] + ek, ev := it.Element() + path[len(path)-1] = cty.IndexStep{ + Key: ek, + } + err := marshal(ev, ety, path, enc) + if err != nil { + return err + } + i++ + } + return nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + path := append(path, nil) // local override of 'path' with extra element + + names := make([]string, 0, len(atys)) + for k := range atys { + names = append(names, k) + } + sort.Strings(names) + + enc.EncodeMapLen(len(names)) + + for _, k := range names { + aty := atys[k] + av := val.GetAttr(k) + path[len(path)-1] = cty.GetAttrStep{ + Name: k, + } + var err error + err = marshal(cty.StringVal(k), cty.String, path, enc) + if err != nil { + return err + } + err = marshal(av, aty, path, enc) + if err != nil { + return err + } + } + return nil + case ty.IsCapsuleType(): + return path.NewErrorf("capsule types not supported for msgpack encoding") + default: + // should never happen + return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName()) + } +} + +// marshalDynamic adds an extra wrapping object containing dynamic type +// information for the given value. +func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error { + dv := dynamicVal{ + Value: val, + Path: path, + } + return enc.Encode(&dv) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go new file mode 100644 index 000000000..86664bac5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go @@ -0,0 +1,167 @@ +package msgpack + +import ( + "bytes" + "fmt" + "io" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" + msgpackcodes "github.com/vmihailenco/msgpack/codes" +) + +// ImpliedType returns the cty Type implied by the structure of the given +// msgpack-compliant buffer. This function implements the default type mapping +// behavior used when decoding arbitrary msgpack without explicit cty Type +// information. +// +// The rules are as follows: +// +// msgpack strings, numbers and bools map to their equivalent primitive type in +// cty. +// +// msgpack maps become cty object types, with the attributes defined by the +// map keys and the types of their values. +// +// msgpack arrays become cty tuple types, with the elements defined by the +// types of the array members. +// +// Any nulls are typed as DynamicPseudoType, so callers of this function +// must be prepared to deal with this. Callers that do not wish to deal with +// dynamic typing should not use this function and should instead describe +// their required types explicitly with a cty.Type instance when decoding. +// +// Any unknown values are similarly typed as DynamicPseudoType, because these +// do not carry type information on the wire. +// +// Any parse errors will be returned as an error, and the type will be the +// invalid value cty.NilType. +func ImpliedType(buf []byte) (cty.Type, error) { + r := bytes.NewReader(buf) + dec := msgpack.NewDecoder(r) + + ty, err := impliedType(dec) + if err != nil { + return cty.NilType, err + } + + // We must now be at the end of the buffer + err = dec.Skip() + if err != io.EOF { + return ty, fmt.Errorf("extra bytes after msgpack value") + } + + return ty, nil +} + +func impliedType(dec *msgpack.Decoder) (cty.Type, error) { + // If this function returns with a nil error then it must have already + // consumed the next value from the decoder, since when called recursively + // the caller will be expecting to find a following value here. + + code, err := dec.PeekCode() + if err != nil { + return cty.NilType, err + } + + switch { + + case code == msgpackcodes.Nil || msgpackcodes.IsExt(code): + err := dec.Skip() + return cty.DynamicPseudoType, err + + case code == msgpackcodes.True || code == msgpackcodes.False: + _, err := dec.DecodeBool() + return cty.Bool, err + + case msgpackcodes.IsFixedNum(code): + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64: + _, err := dec.DecodeInt64() + return cty.Number, err + + case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64: + _, err := dec.DecodeUint64() + return cty.Number, err + + case code == msgpackcodes.Float || code == msgpackcodes.Double: + _, err := dec.DecodeFloat64() + return cty.Number, err + + case msgpackcodes.IsString(code): + _, err := dec.DecodeString() + return cty.String, err + + case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32: + return impliedObjectType(dec) + + case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32: + return impliedTupleType(dec) + + default: + return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code) + } +} + +func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of map. + l, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + var atys map[string]cty.Type + + for i := 0; i < l; i++ { + // Read the map key first. We require maps to be strings, but msgpack + // doesn't so we're prepared to error here if not. + k, err := dec.DecodeString() + if err != nil { + return cty.DynamicPseudoType, err + } + + aty, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + + if atys == nil { + atys = make(map[string]cty.Type) + } + atys[k] = aty + } + + if len(atys) == 0 { + return cty.EmptyObject, nil + } + + return cty.Object(atys), nil +} + +func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) { + // If we get in here then we've already peeked the next code and know + // it's some sort of array. + l, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicPseudoType, nil + } + + if l == 0 { + return cty.EmptyTuple, nil + } + + etys := make([]cty.Type, l) + + for i := 0; i < l; i++ { + ety, err := impliedType(dec) + if err != nil { + return cty.DynamicPseudoType, err + } + etys[i] = ety + } + + return cty.Tuple(etys), nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go new file mode 100644 index 000000000..6507bc4be --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go @@ -0,0 +1,16 @@ +package msgpack + +type unknownType struct{} + +var unknownVal = unknownType{} + +// unknownValBytes is the raw bytes of the msgpack fixext1 value we +// write to represent an unknown value. It's an extension value of +// type zero whose value is irrelevant. Since it's irrelevant, we +// set it to a single byte whose value is also zero, since that's +// the most compact possible representation. +var unknownValBytes = []byte{0xd4, 0, 0} + +func (uv unknownType) MarshalMsgpack() ([]byte, error) { + return unknownValBytes, nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go new file mode 100644 index 000000000..67f4c9a4c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go @@ -0,0 +1,334 @@ +package msgpack + +import ( + "bytes" + + "github.com/hashicorp/go-cty/cty" + "github.com/vmihailenco/msgpack" + msgpackCodes "github.com/vmihailenco/msgpack/codes" +) + +// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of +// the given type, returning the result. +// +// If an error is returned, the error is written with a hypothetical +// end-user that wrote the msgpack file as its audience, using cty type +// system concepts rather than Go type system concepts. +func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) { + r := bytes.NewReader(b) + dec := msgpack.NewDecoder(r) + + var path cty.Path + return unmarshal(dec, ty, path) +} + +func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + if msgpackCodes.IsExt(peek) { + // We just assume _all_ extensions are unknown values, + // since we don't have any other extensions. + dec.Skip() // skip what we've peeked + return cty.UnknownVal(ty), nil + } + if ty == cty.DynamicPseudoType { + return unmarshalDynamic(dec, path) + } + if peek == msgpackCodes.Nil { + dec.Skip() // skip what we've peeked + return cty.NullVal(ty), nil + } + + switch { + case ty.IsPrimitiveType(): + val, err := unmarshalPrimitive(dec, ty, path) + if err != nil { + return cty.NilVal, err + } + return val, nil + case ty.IsListType(): + return unmarshalList(dec, ty.ElementType(), path) + case ty.IsSetType(): + return unmarshalSet(dec, ty.ElementType(), path) + case ty.IsMapType(): + return unmarshalMap(dec, ty.ElementType(), path) + case ty.IsTupleType(): + return unmarshalTuple(dec, ty.TupleElementTypes(), path) + case ty.IsObjectType(): + return unmarshalObject(dec, ty.AttributeTypes(), path) + default: + return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName()) + } +} + +func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) { + switch ty { + case cty.Bool: + rv, err := dec.DecodeBool() + if err != nil { + return cty.DynamicVal, path.NewErrorf("bool is required") + } + return cty.BoolVal(rv), nil + case cty.Number: + // Marshal will try int and float first, if the value can be + // losslessly represented in these encodings, and then fall + // back on a string if the number is too large or too precise. + peek, err := dec.PeekCode() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + + if msgpackCodes.IsFixedNum(peek) { + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + } + + switch peek { + case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64: + rv, err := dec.DecodeInt64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberIntVal(rv), nil + case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64: + rv, err := dec.DecodeUint64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberUIntVal(rv), nil + case msgpackCodes.Float, msgpackCodes.Double: + rv, err := dec.DecodeFloat64() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return cty.NumberFloatVal(rv), nil + default: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + v, err := cty.ParseNumberVal(rv) + if err != nil { + return cty.DynamicVal, path.NewErrorf("number is required") + } + return v, nil + } + case cty.String: + rv, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path.NewErrorf("string is required") + } + return cty.StringVal(rv), nil + default: + // should never happen + panic("unsupported primitive type") + } +} + +func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a list is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.List(ety)), nil + case length == 0: + return cty.ListValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.ListVal(vals), nil +} + +func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a set is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Set(ety)), nil + case length == 0: + return cty.SetValEmpty(ety), nil + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.SetVal(vals), nil +} + +func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a map is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Map(ety)), nil + case length == 0: + return cty.MapValEmpty(ety), nil + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + path[:len(path)-1].NewErrorf("non-string key in map") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.MapVal(vals), nil +} + +func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("a tuple is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Tuple(etys)), nil + case length == 0: + return cty.TupleVal(nil), nil + case length != len(etys): + return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys)) + } + + vals := make([]cty.Value, 0, length) + path = append(path, nil) + for i := 0; i < length; i++ { + path[len(path)-1] = cty.IndexStep{ + Key: cty.NumberIntVal(int64(i)), + } + ety := etys[i] + + val, err := unmarshal(dec, ety, path) + if err != nil { + return cty.DynamicVal, err + } + + vals = append(vals, val) + } + + return cty.TupleVal(vals), nil +} + +func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeMapLen() + if err != nil { + return cty.DynamicVal, path.NewErrorf("an object is required") + } + + switch { + case length < 0: + return cty.NullVal(cty.Object(atys)), nil + case length == 0: + return cty.ObjectVal(nil), nil + case length != len(atys): + return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)", + len(atys), length) + } + + vals := make(map[string]cty.Value, length) + path = append(path, nil) + for i := 0; i < length; i++ { + key, err := dec.DecodeString() + if err != nil { + return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings") + } + + path[len(path)-1] = cty.IndexStep{ + Key: cty.StringVal(key), + } + aty, exists := atys[key] + if !exists { + return cty.DynamicVal, path.NewErrorf("unsupported attribute") + } + + val, err := unmarshal(dec, aty, path) + if err != nil { + return cty.DynamicVal, err + } + + vals[key] = val + } + + return cty.ObjectVal(vals), nil +} + +func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) { + length, err := dec.DecodeArrayLen() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + switch { + case length == -1: + return cty.NullVal(cty.DynamicPseudoType), nil + case length != 2: + return cty.DynamicVal, path.NewErrorf( + "dynamic value array must have exactly two elements", + ) + } + + typeJSON, err := dec.DecodeBytes() + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + var ty cty.Type + err = (&ty).UnmarshalJSON(typeJSON) + if err != nil { + return cty.DynamicVal, path.NewError(err) + } + + return unmarshal(dec, ty, path) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/null.go b/vendor/github.com/hashicorp/go-cty/cty/null.go new file mode 100644 index 000000000..d58d0287b --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/null.go @@ -0,0 +1,14 @@ +package cty + +// NullVal returns a null value of the given type. A null can be created of any +// type, but operations on such values will always panic. Calling applications +// are encouraged to use nulls only sparingly, particularly when user-provided +// expressions are to be evaluated, since the precence of nulls creates a +// much higher chance of evaluation errors that can't be caught by a type +// checker. +func NullVal(t Type) Value { + return Value{ + ty: t, + v: nil, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/object_type.go b/vendor/github.com/hashicorp/go-cty/cty/object_type.go new file mode 100644 index 000000000..187d38751 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/object_type.go @@ -0,0 +1,135 @@ +package cty + +import ( + "fmt" +) + +type typeObject struct { + typeImplSigil + AttrTypes map[string]Type +} + +// Object creates an object type with the given attribute types. +// +// After a map is passed to this function the caller must no longer access it, +// since ownership is transferred to this library. +func Object(attrTypes map[string]Type) Type { + attrTypesNorm := make(map[string]Type, len(attrTypes)) + for k, v := range attrTypes { + attrTypesNorm[NormalizeString(k)] = v + } + + return Type{ + typeObject{ + AttrTypes: attrTypesNorm, + }, + } +} + +func (t typeObject) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeObject); ok { + if len(t.AttrTypes) != len(ot.AttrTypes) { + // Fast path: if we don't have the same number of attributes + // then we can't possibly be equal. This also avoids the need + // to test attributes in both directions below, since we know + // there can't be extras in "other". + return false + } + + for attr, ty := range t.AttrTypes { + oty, ok := ot.AttrTypes[attr] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write an object type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // an object type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "object" +} + +func (t typeObject) GoString() string { + if len(t.AttrTypes) == 0 { + return "cty.EmptyObject" + } + return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes) +} + +// EmptyObject is a shorthand for Object(map[string]Type{}), to more +// easily talk about the empty object type. +var EmptyObject Type + +// EmptyObjectVal is the only possible non-null, non-unknown value of type +// EmptyObject. +var EmptyObjectVal Value + +func init() { + EmptyObject = Object(map[string]Type{}) + EmptyObjectVal = Value{ + ty: EmptyObject, + v: map[string]interface{}{}, + } +} + +// IsObjectType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsObjectType() bool { + _, ok := t.typeImpl.(typeObject) + return ok +} + +// HasAttribute returns true if the receiver has an attribute with the given +// name, regardless of its type. Will panic if the reciever isn't an object +// type; use IsObjectType to determine whether this operation will succeed. +func (t Type) HasAttribute(name string) bool { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + _, hasAttr := ot.AttrTypes[name] + return hasAttr + } + panic("HasAttribute on non-object Type") +} + +// AttributeType returns the type of the attribute with the given name. Will +// panic if the receiver is not an object type (use IsObjectType to confirm) +// or if the object type has no such attribute (use HasAttribute to confirm). +func (t Type) AttributeType(name string) Type { + name = NormalizeString(name) + if ot, ok := t.typeImpl.(typeObject); ok { + aty, hasAttr := ot.AttrTypes[name] + if !hasAttr { + panic("no such attribute") + } + return aty + } + panic("AttributeType on non-object Type") +} + +// AttributeTypes returns a map from attribute names to their associated +// types. Will panic if the receiver is not an object type (use IsObjectType +// to confirm). +// +// The returned map is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the returned +// map. For many purposes the attribute-related methods of Value are more +// appropriate and more convenient to use. +func (t Type) AttributeTypes() map[string]Type { + if ot, ok := t.typeImpl.(typeObject); ok { + return ot.AttrTypes + } + panic("AttributeTypes on non-object Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path.go b/vendor/github.com/hashicorp/go-cty/cty/path.go new file mode 100644 index 000000000..636e68c63 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path.go @@ -0,0 +1,270 @@ +package cty + +import ( + "errors" + "fmt" +) + +// A Path is a sequence of operations to locate a nested value within a +// data structure. +// +// The empty Path represents the given item. Any PathSteps within represent +// taking a single step down into a data structure. +// +// Path has some convenience methods for gradually constructing a path, +// but callers can also feel free to just produce a slice of PathStep manually +// and convert to this type, which may be more appropriate in environments +// where memory pressure is a concern. +// +// Although a Path is technically mutable, by convention callers should not +// mutate a path once it has been built and passed to some other subsystem. +// Instead, use Copy and then mutate the copy before using it. +type Path []PathStep + +// PathStep represents a single step down into a data structure, as part +// of a Path. PathStep is a closed interface, meaning that the only +// permitted implementations are those within this package. +type PathStep interface { + pathStepSigil() pathStepImpl + Apply(Value) (Value, error) +} + +// embed pathImpl into a struct to declare it a PathStep implementation +type pathStepImpl struct{} + +func (p pathStepImpl) pathStepSigil() pathStepImpl { + return p +} + +// Index returns a new Path that is the reciever with an IndexStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) Index(v Value) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = IndexStep{ + Key: v, + } + return ret +} + +// IndexInt is a typed convenience method for Index. +func (p Path) IndexInt(v int) Path { + return p.Index(NumberIntVal(int64(v))) +} + +// IndexString is a typed convenience method for Index. +func (p Path) IndexString(v string) Path { + return p.Index(StringVal(v)) +} + +// IndexPath is a convenience method to start a new Path with an IndexStep. +func IndexPath(v Value) Path { + return Path{}.Index(v) +} + +// IndexIntPath is a typed convenience method for IndexPath. +func IndexIntPath(v int) Path { + return IndexPath(NumberIntVal(int64(v))) +} + +// IndexStringPath is a typed convenience method for IndexPath. +func IndexStringPath(v string) Path { + return IndexPath(StringVal(v)) +} + +// GetAttr returns a new Path that is the reciever with a GetAttrStep appended +// to the end. +// +// This is provided as a convenient way to construct paths, but each call +// will create garbage so it should not be used where memory pressure is a +// concern. +func (p Path) GetAttr(name string) Path { + ret := make(Path, len(p)+1) + copy(ret, p) + ret[len(p)] = GetAttrStep{ + Name: name, + } + return ret +} + +// Equals compares 2 Paths for exact equality. +func (p Path) Equals(other Path) bool { + if len(p) != len(other) { + return false + } + + for i := range p { + pv := p[i] + switch pv := pv.(type) { + case GetAttrStep: + ov, ok := other[i].(GetAttrStep) + if !ok || pv != ov { + return false + } + case IndexStep: + ov, ok := other[i].(IndexStep) + if !ok { + return false + } + + if !pv.Key.RawEquals(ov.Key) { + return false + } + default: + // Any invalid steps default to evaluating false. + return false + } + } + + return true + +} + +// HasPrefix determines if the path p contains the provided prefix. +func (p Path) HasPrefix(prefix Path) bool { + if len(prefix) > len(p) { + return false + } + + return p[:len(prefix)].Equals(prefix) +} + +// GetAttrPath is a convenience method to start a new Path with a GetAttrStep. +func GetAttrPath(name string) Path { + return Path{}.GetAttr(name) +} + +// Apply applies each of the steps in turn to successive values starting with +// the given value, and returns the result. If any step returns an error, +// the whole operation returns an error. +func (p Path) Apply(val Value) (Value, error) { + var err error + for i, step := range p { + val, err = step.Apply(val) + if err != nil { + return NilVal, fmt.Errorf("at step %d: %s", i, err) + } + } + return val, nil +} + +// LastStep applies the given path up to the last step and then returns +// the resulting value and the final step. +// +// This is useful when dealing with assignment operations, since in that +// case the *value* of the last step is not important (and may not, in fact, +// present at all) and we care only about its location. +// +// Since LastStep applies all steps except the last, it will return errors +// for those steps in the same way as Apply does. +// +// If the path has *no* steps then the returned PathStep will be nil, +// representing that any operation should be applied directly to the +// given value. +func (p Path) LastStep(val Value) (Value, PathStep, error) { + var err error + + if len(p) == 0 { + return val, nil, nil + } + + journey := p[:len(p)-1] + val, err = journey.Apply(val) + if err != nil { + return NilVal, nil, err + } + return val, p[len(p)-1], nil +} + +// Copy makes a shallow copy of the receiver. Often when paths are passed to +// caller code they come with the constraint that they are valid only until +// the caller returns, due to how they are constructed internally. Callers +// can use Copy to conveniently produce a copy of the value that _they_ control +// the validity of. +func (p Path) Copy() Path { + ret := make(Path, len(p)) + copy(ret, p) + return ret +} + +// IndexStep is a Step implementation representing applying the index operation +// to a value, which must be of either a list, map, or set type. +// +// When describing a path through a *type* rather than a concrete value, +// the Key may be an unknown value, indicating that the step applies to +// *any* key of the given type. +// +// When indexing into a set, the Key is actually the element being accessed +// itself, since in sets elements are their own identity. +type IndexStep struct { + pathStepImpl + Key Value +} + +// Apply returns the value resulting from indexing the given value with +// our key value. +func (s IndexStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot index a null value") + } + + switch s.Key.Type() { + case Number: + if !(val.Type().IsListType() || val.Type().IsTupleType()) { + return NilVal, errors.New("not a list type") + } + case String: + if !val.Type().IsMapType() { + return NilVal, errors.New("not a map type") + } + default: + return NilVal, errors.New("key value not number or string") + } + + has := val.HasIndex(s.Key) + if !has.IsKnown() { + return UnknownVal(val.Type().ElementType()), nil + } + if !has.True() { + return NilVal, errors.New("value does not have given index key") + } + + return val.Index(s.Key), nil +} + +func (s IndexStep) GoString() string { + return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key) +} + +// GetAttrStep is a Step implementation representing retrieving an attribute +// from a value, which must be of an object type. +type GetAttrStep struct { + pathStepImpl + Name string +} + +// Apply returns the value of our named attribute from the given value, which +// must be of an object type that has a value of that name. +func (s GetAttrStep) Apply(val Value) (Value, error) { + if val == NilVal || val.IsNull() { + return NilVal, errors.New("cannot access attributes on a null value") + } + + if !val.Type().IsObjectType() { + return NilVal, errors.New("not an object type") + } + + if !val.Type().HasAttribute(s.Name) { + return NilVal, fmt.Errorf("object has no attribute %q", s.Name) + } + + return val.GetAttr(s.Name), nil +} + +func (s GetAttrStep) GoString() string { + return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/path_set.go b/vendor/github.com/hashicorp/go-cty/cty/path_set.go new file mode 100644 index 000000000..977523de5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/path_set.go @@ -0,0 +1,198 @@ +package cty + +import ( + "fmt" + "hash/crc64" + + "github.com/hashicorp/go-cty/cty/set" +) + +// PathSet represents a set of Path objects. This can be used, for example, +// to talk about a subset of paths within a value that meet some criteria, +// without directly modifying the values at those paths. +type PathSet struct { + set set.Set +} + +// NewPathSet creates and returns a PathSet, with initial contents optionally +// set by the given arguments. +func NewPathSet(paths ...Path) PathSet { + ret := PathSet{ + set: set.NewSet(pathSetRules{}), + } + + for _, path := range paths { + ret.Add(path) + } + + return ret +} + +// Add inserts a single given path into the set. +// +// Paths are immutable after construction by convention. It is particularly +// important not to mutate a path after it has been placed into a PathSet. +// If a Path is mutated while in a set, behavior is undefined. +func (s PathSet) Add(path Path) { + s.set.Add(path) +} + +// AddAllSteps is like Add but it also adds all of the steps leading to +// the given path. +// +// For example, if given a path representing "foo.bar", it will add both +// "foo" and "bar". +func (s PathSet) AddAllSteps(path Path) { + for i := 1; i <= len(path); i++ { + s.Add(path[:i]) + } +} + +// Has returns true if the given path is in the receiving set. +func (s PathSet) Has(path Path) bool { + return s.set.Has(path) +} + +// List makes and returns a slice of all of the paths in the receiving set, +// in an undefined but consistent order. +func (s PathSet) List() []Path { + if s.Empty() { + return nil + } + ret := make([]Path, 0, s.set.Length()) + for it := s.set.Iterator(); it.Next(); { + ret = append(ret, it.Value().(Path)) + } + return ret +} + +// Remove modifies the receving set to no longer include the given path. +// If the given path was already absent, this is a no-op. +func (s PathSet) Remove(path Path) { + s.set.Remove(path) +} + +// Empty returns true if the length of the receiving set is zero. +func (s PathSet) Empty() bool { + return s.set.Length() == 0 +} + +// Union returns a new set whose contents are the union of the receiver and +// the given other set. +func (s PathSet) Union(other PathSet) PathSet { + return PathSet{ + set: s.set.Union(other.set), + } +} + +// Intersection returns a new set whose contents are the intersection of the +// receiver and the given other set. +func (s PathSet) Intersection(other PathSet) PathSet { + return PathSet{ + set: s.set.Intersection(other.set), + } +} + +// Subtract returns a new set whose contents are those from the receiver with +// any elements of the other given set subtracted. +func (s PathSet) Subtract(other PathSet) PathSet { + return PathSet{ + set: s.set.Subtract(other.set), + } +} + +// SymmetricDifference returns a new set whose contents are the symmetric +// difference of the receiver and the given other set. +func (s PathSet) SymmetricDifference(other PathSet) PathSet { + return PathSet{ + set: s.set.SymmetricDifference(other.set), + } +} + +// Equal returns true if and only if both the receiver and the given other +// set contain exactly the same paths. +func (s PathSet) Equal(other PathSet) bool { + if s.set.Length() != other.set.Length() { + return false + } + // Now we know the lengths are the same we only need to test in one + // direction whether everything in one is in the other. + for it := s.set.Iterator(); it.Next(); { + if !other.set.Has(it.Value()) { + return false + } + } + return true +} + +var crc64Table = crc64.MakeTable(crc64.ISO) + +var indexStepPlaceholder = []byte("#") + +// pathSetRules is an implementation of set.Rules from the set package, +// used internally within PathSet. +type pathSetRules struct { +} + +func (r pathSetRules) Hash(v interface{}) int { + path := v.(Path) + hash := crc64.New(crc64Table) + + for _, rawStep := range path { + switch step := rawStep.(type) { + case GetAttrStep: + // (this creates some garbage converting the string name to a + // []byte, but that's okay since cty is not designed to be + // used in tight loops under memory pressure.) + hash.Write([]byte(step.Name)) + default: + // For any other step type we just append a predefined value, + // which means that e.g. all indexes into a given collection will + // hash to the same value but we assume that collections are + // small and thus this won't hurt too much. + hash.Write(indexStepPlaceholder) + } + } + + // We discard half of the hash on 32-bit platforms; collisions just make + // our lookups take marginally longer, so not a big deal. + return int(hash.Sum64()) +} + +func (r pathSetRules) Equivalent(a, b interface{}) bool { + aPath := a.(Path) + bPath := b.(Path) + + if len(aPath) != len(bPath) { + return false + } + + for i := range aPath { + switch aStep := aPath[i].(type) { + case GetAttrStep: + bStep, ok := bPath[i].(GetAttrStep) + if !ok { + return false + } + + if aStep.Name != bStep.Name { + return false + } + case IndexStep: + bStep, ok := bPath[i].(IndexStep) + if !ok { + return false + } + + eq := aStep.Key.Equals(bStep.Key) + if !eq.IsKnown() || eq.False() { + return false + } + default: + // Should never happen, since we document PathStep as a closed type. + panic(fmt.Errorf("unsupported step type %T", aStep)) + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go new file mode 100644 index 000000000..7b3d1196c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go @@ -0,0 +1,122 @@ +package cty + +import "math/big" + +// primitiveType is the hidden implementation of the various primitive types +// that are exposed as variables in this package. +type primitiveType struct { + typeImplSigil + Kind primitiveTypeKind +} + +type primitiveTypeKind byte + +const ( + primitiveTypeBool primitiveTypeKind = 'B' + primitiveTypeNumber primitiveTypeKind = 'N' + primitiveTypeString primitiveTypeKind = 'S' +) + +func (t primitiveType) Equals(other Type) bool { + if otherP, ok := other.typeImpl.(primitiveType); ok { + return otherP.Kind == t.Kind + } + return false +} + +func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string { + switch t.Kind { + case primitiveTypeBool: + return "bool" + case primitiveTypeNumber: + return "number" + case primitiveTypeString: + return "string" + default: + // should never happen + panic("invalid primitive type") + } +} + +func (t primitiveType) GoString() string { + switch t.Kind { + case primitiveTypeBool: + return "cty.Bool" + case primitiveTypeNumber: + return "cty.Number" + case primitiveTypeString: + return "cty.String" + default: + // should never happen + panic("invalid primitive type") + } +} + +// Number is the numeric type. Number values are arbitrary-precision +// decimal numbers, which can then be converted into Go's various numeric +// types only if they are in the appropriate range. +var Number Type + +// String is the string type. String values are sequences of unicode codepoints +// encoded internally as UTF-8. +var String Type + +// Bool is the boolean type. The two values of this type are True and False. +var Bool Type + +// True is the truthy value of type Bool +var True Value + +// False is the falsey value of type Bool +var False Value + +// Zero is a number value representing exactly zero. +var Zero Value + +// PositiveInfinity is a Number value representing positive infinity +var PositiveInfinity Value + +// NegativeInfinity is a Number value representing negative infinity +var NegativeInfinity Value + +func init() { + Number = Type{ + primitiveType{Kind: primitiveTypeNumber}, + } + String = Type{ + primitiveType{Kind: primitiveTypeString}, + } + Bool = Type{ + primitiveType{Kind: primitiveTypeBool}, + } + True = Value{ + ty: Bool, + v: true, + } + False = Value{ + ty: Bool, + v: false, + } + Zero = Value{ + ty: Number, + v: big.NewFloat(0), + } + PositiveInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(false), + } + NegativeInfinity = Value{ + ty: Number, + v: (&big.Float{}).SetInf(true), + } +} + +// IsPrimitiveType returns true if and only if the reciever is a primitive +// type, which means it's either number, string, or bool. Any two primitive +// types can be safely compared for equality using the standard == operator +// without panic, which is not a guarantee that holds for all types. Primitive +// types can therefore also be used in switch statements. +func (t Type) IsPrimitiveType() bool { + _, ok := t.typeImpl.(primitiveType) + return ok +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/gob.go b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go new file mode 100644 index 000000000..da2978f65 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go @@ -0,0 +1,76 @@ +package set + +import ( + "bytes" + "encoding/gob" + "fmt" +) + +// GobEncode is an implementation of the interface gob.GobEncoder, allowing +// sets to be included in structures encoded via gob. +// +// The set rules are included in the serialized value, so the caller must +// register its concrete rules type with gob.Register before using a +// set in a gob, and possibly also implement GobEncode/GobDecode to customize +// how any parameters are persisted. +// +// The set elements are also included, so if they are of non-primitive types +// they too must be registered with gob. +// +// If the produced gob values will persist for a long time, the caller must +// ensure compatibility of the rules implementation. In particular, if the +// definition of element equivalence changes between encoding and decoding +// then two distinct stored elements may be considered equivalent on decoding, +// causing the recovered set to have fewer elements than when it was stored. +func (s Set) GobEncode() ([]byte, error) { + gs := gobSet{ + Version: 0, + Rules: s.rules, + Values: s.Values(), + } + + buf := &bytes.Buffer{} + enc := gob.NewEncoder(buf) + err := enc.Encode(gs) + if err != nil { + return nil, fmt.Errorf("error encoding set.Set: %s", err) + } + + return buf.Bytes(), nil +} + +// GobDecode is the opposite of GobEncode. See GobEncode for information +// on the requirements for and caveats of including set values in gobs. +func (s *Set) GobDecode(buf []byte) error { + r := bytes.NewReader(buf) + dec := gob.NewDecoder(r) + + var gs gobSet + err := dec.Decode(&gs) + if err != nil { + return fmt.Errorf("error decoding set.Set: %s", err) + } + if gs.Version != 0 { + return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version) + } + + victim := NewSetFromSlice(gs.Rules, gs.Values) + s.vals = victim.vals + s.rules = victim.rules + return nil +} + +type gobSet struct { + Version int + Rules Rules + + // The bucket-based representation is for efficient in-memory access, but + // for serialization it's enough to just retain the values themselves, + // which we can re-bucket using the rules (which may have changed!) when + // we re-inflate. + Values []interface{} +} + +func init() { + gob.Register([]interface{}(nil)) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go new file mode 100644 index 000000000..4a60494f9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go @@ -0,0 +1,15 @@ +package set + +type Iterator struct { + vals []interface{} + idx int +} + +func (it *Iterator) Value() interface{} { + return it.vals[it.idx] +} + +func (it *Iterator) Next() bool { + it.idx++ + return it.idx < len(it.vals) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/ops.go b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go new file mode 100644 index 000000000..fd1555f21 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go @@ -0,0 +1,210 @@ +package set + +import ( + "sort" +) + +// Add inserts the given value into the receiving Set. +// +// This mutates the set in-place. This operation is not thread-safe. +func (s Set) Add(val interface{}) { + hv := s.rules.Hash(val) + if _, ok := s.vals[hv]; !ok { + s.vals[hv] = make([]interface{}, 0, 1) + } + bucket := s.vals[hv] + + // See if an equivalent value is already present + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return + } + } + + s.vals[hv] = append(bucket, val) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s Set) Remove(val interface{}) { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return + } + + for i, ev := range bucket { + if s.rules.Equivalent(val, ev) { + newBucket := make([]interface{}, 0, len(bucket)-1) + newBucket = append(newBucket, bucket[:i]...) + newBucket = append(newBucket, bucket[i+1:]...) + if len(newBucket) > 0 { + s.vals[hv] = newBucket + } else { + delete(s.vals, hv) + } + return + } + } +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s Set) Has(val interface{}) bool { + hv := s.rules.Hash(val) + bucket, ok := s.vals[hv] + if !ok { + return false + } + + for _, ev := range bucket { + if s.rules.Equivalent(val, ev) { + return true + } + } + return false +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s Set) Copy() Set { + ret := NewSet(s.rules) + for k, v := range s.vals { + ret.vals[k] = v + } + return ret +} + +// Iterator returns an iterator over values in the set. If the set's rules +// implement OrderedRules then the result is ordered per those rules. If +// no order is provided, or if it is not a total order, then the iteration +// order is undefined but consistent for a particular version of cty. Do not +// rely on specific ordering between cty releases unless the rules order is a +// total order. +// +// The pattern for using the returned iterator is: +// +// it := set.Iterator() +// for it.Next() { +// val := it.Value() +// // ... +// } +// +// Once an iterator has been created for a set, the set *must not* be mutated +// until the iterator is no longer in use. +func (s Set) Iterator() *Iterator { + vals := s.Values() + + return &Iterator{ + vals: vals, + idx: -1, + } +} + +// EachValue calls the given callback once for each value in the set, in an +// undefined order that callers should not depend on. +func (s Set) EachValue(cb func(interface{})) { + it := s.Iterator() + for it.Next() { + cb(it.Value()) + } +} + +// Values returns a slice of all the values in the set. If the set rules have +// an order then the result is in that order. If no order is provided or if +// it is not a total order then the result order is undefined, but consistent +// for a particular set value within a specific release of cty. +func (s Set) Values() []interface{} { + var ret []interface{} + // Sort the bucketIds to ensure that we always traverse in a + // consistent order. + bucketIDs := make([]int, 0, len(s.vals)) + for id := range s.vals { + bucketIDs = append(bucketIDs, id) + } + sort.Ints(bucketIDs) + + for _, bucketID := range bucketIDs { + ret = append(ret, s.vals[bucketID]...) + } + + if orderRules, ok := s.rules.(OrderedRules); ok { + sort.SliceStable(ret, func(i, j int) bool { + return orderRules.Less(ret[i], ret[j]) + }) + } + + return ret +} + +// Length returns the number of values in the set. +func (s Set) Length() int { + var count int + for _, bucket := range s.vals { + count = count + len(bucket) + } + return count +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same rules, or +// else this function will panic. +func (s1 Set) Union(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + rs.Add(v) + }) + s2.EachValue(func(v interface{}) { + rs.Add(v) + }) + return rs +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Intersection(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same rules, +// or else this function will panic. +func (s1 Set) Subtract(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + return rs +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same rules, or else this function will +// panic. +func (s1 Set) SymmetricDifference(s2 Set) Set { + mustHaveSameRules(s1, s2) + rs := NewSet(s1.rules) + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + rs.Add(v) + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + rs.Add(v) + } + }) + return rs +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/rules.go b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go new file mode 100644 index 000000000..51f744b5e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go @@ -0,0 +1,43 @@ +package set + +// Rules represents the operations that define membership for a Set. +// +// Each Set has a Rules instance, whose methods must satisfy the interface +// contracts given below for any value that will be added to the set. +type Rules interface { + // Hash returns an int that somewhat-uniquely identifies the given value. + // + // A good hash function will minimize collisions for values that will be + // added to the set, though collisions *are* permitted. Collisions will + // simply reduce the efficiency of operations on the set. + Hash(interface{}) int + + // Equivalent returns true if and only if the two values are considered + // equivalent for the sake of set membership. Two values that are + // equivalent cannot exist in the set at the same time, and if two + // equivalent values are added it is undefined which one will be + // returned when enumerating all of the set members. + // + // Two values that are equivalent *must* result in the same hash value, + // though it is *not* required that two values with the same hash value + // be equivalent. + Equivalent(interface{}, interface{}) bool +} + +// OrderedRules is an extension of Rules that can apply a partial order to +// element values. When a set's Rules implements OrderedRules an iterator +// over the set will return items in the order described by the rules. +// +// If the given order is not a total order (that is, some pairs of non-equivalent +// elements do not have a defined order) then the resulting iteration order +// is undefined but consistent for a particular version of cty. The exact +// order in that case is not part of the contract and is subject to change +// between versions. +type OrderedRules interface { + Rules + + // Less returns true if and only if the first argument should sort before + // the second argument. If the second argument should sort before the first + // or if there is no defined order for the values, return false. + Less(interface{}, interface{}) bool +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/set.go b/vendor/github.com/hashicorp/go-cty/cty/set/set.go new file mode 100644 index 000000000..b4fb316f1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set/set.go @@ -0,0 +1,62 @@ +package set + +import ( + "fmt" +) + +// Set is an implementation of the concept of a set: a collection where all +// values are conceptually either in or out of the set, but the members are +// not ordered. +// +// This type primarily exists to be the internal type of sets in cty, but +// it is considered to be at the same level of abstraction as Go's built in +// slice and map collection types, and so should make no cty-specific +// assumptions. +// +// Set operations are not thread safe. It is the caller's responsibility to +// provide mutex guarantees where necessary. +// +// Set operations are not optimized to minimize memory pressure. Mutating +// a set will generally create garbage and so should perhaps be avoided in +// tight loops where memory pressure is a concern. +type Set struct { + vals map[int][]interface{} + rules Rules +} + +// NewSet returns an empty set with the membership rules given. +func NewSet(rules Rules) Set { + return Set{ + vals: map[int][]interface{}{}, + rules: rules, + } +} + +func NewSetFromSlice(rules Rules, vals []interface{}) Set { + s := NewSet(rules) + for _, v := range vals { + s.Add(v) + } + return s +} + +func sameRules(s1 Set, s2 Set) bool { + return s1.rules == s2.rules +} + +func mustHaveSameRules(s1 Set, s2 Set) { + if !sameRules(s1, s2) { + panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules)) + } +} + +// HasRules returns true if and only if the receiving set has the given rules +// instance as its rules. +func (s Set) HasRules(rules Rules) bool { + return s.rules == rules +} + +// Rules returns the receiving set's rules instance. +func (s Set) Rules() Rules { + return s.rules +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_helper.go b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go new file mode 100644 index 000000000..316228426 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go @@ -0,0 +1,132 @@ +package cty + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty/set" +) + +// ValueSet is to cty.Set what []cty.Value is to cty.List and +// map[string]cty.Value is to cty.Map. It's provided to allow callers a +// convenient interface for manipulating sets before wrapping them in cty.Set +// values using cty.SetValFromValueSet. +// +// Unlike value slices and value maps, ValueSet instances have a single +// homogenous element type because that is a requirement of the underlying +// set implementation, which uses the element type to select a suitable +// hashing function. +// +// Set mutations are not concurrency-safe. +type ValueSet struct { + // ValueSet is just a thin wrapper around a set.Set with our value-oriented + // "rules" applied. We do this so that the caller can work in terms of + // cty.Value objects even though the set internals use the raw values. + s set.Set +} + +// NewValueSet creates and returns a new ValueSet with the given element type. +func NewValueSet(ety Type) ValueSet { + return newValueSet(set.NewSet(setRules{Type: ety})) +} + +func newValueSet(s set.Set) ValueSet { + return ValueSet{ + s: s, + } +} + +// ElementType returns the element type for the receiving ValueSet. +func (s ValueSet) ElementType() Type { + return s.s.Rules().(setRules).Type +} + +// Add inserts the given value into the receiving set. +func (s ValueSet) Add(v Value) { + s.requireElementType(v) + s.s.Add(v.v) +} + +// Remove deletes the given value from the receiving set, if indeed it was +// there in the first place. If the value is not present, this is a no-op. +func (s ValueSet) Remove(v Value) { + s.requireElementType(v) + s.s.Remove(v.v) +} + +// Has returns true if the given value is in the receiving set, or false if +// it is not. +func (s ValueSet) Has(v Value) bool { + s.requireElementType(v) + return s.s.Has(v.v) +} + +// Copy performs a shallow copy of the receiving set, returning a new set +// with the same rules and elements. +func (s ValueSet) Copy() ValueSet { + return newValueSet(s.s.Copy()) +} + +// Length returns the number of values in the set. +func (s ValueSet) Length() int { + return s.s.Length() +} + +// Values returns a slice of all of the values in the set in no particular +// order. +func (s ValueSet) Values() []Value { + l := s.s.Length() + if l == 0 { + return nil + } + ret := make([]Value, 0, l) + ety := s.ElementType() + for it := s.s.Iterator(); it.Next(); { + ret = append(ret, Value{ + ty: ety, + v: it.Value(), + }) + } + return ret +} + +// Union returns a new set that contains all of the members of both the +// receiving set and the given set. Both sets must have the same element type, +// or else this function will panic. +func (s ValueSet) Union(other ValueSet) ValueSet { + return newValueSet(s.s.Union(other.s)) +} + +// Intersection returns a new set that contains the values that both the +// receiver and given sets have in common. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Intersection(other ValueSet) ValueSet { + return newValueSet(s.s.Intersection(other.s)) +} + +// Subtract returns a new set that contains all of the values from the receiver +// that are not also in the given set. Both sets must have the same element +// type, or else this function will panic. +func (s ValueSet) Subtract(other ValueSet) ValueSet { + return newValueSet(s.s.Subtract(other.s)) +} + +// SymmetricDifference returns a new set that contains all of the values from +// both the receiver and given sets, except those that both sets have in +// common. Both sets must have the same element type, or else this function +// will panic. +func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet { + return newValueSet(s.s.SymmetricDifference(other.s)) +} + +// requireElementType panics if the given value is not of the set's element type. +// +// It also panics if the given value is marked, because marked values cannot +// be stored in sets. +func (s ValueSet) requireElementType(v Value) { + if v.IsMarked() { + panic("cannot store marked value directly in a set (make the set itself unknown instead)") + } + if !v.Type().Equals(s.ElementType()) { + panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType())) + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_internals.go b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go new file mode 100644 index 000000000..408019809 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go @@ -0,0 +1,244 @@ +package cty + +import ( + "bytes" + "fmt" + "hash/crc32" + "math/big" + "sort" + + "github.com/hashicorp/go-cty/cty/set" +) + +// setRules provides a Rules implementation for the ./set package that +// respects the equality rules for cty values of the given type. +// +// This implementation expects that values added to the set will be +// valid internal values for the given Type, which is to say that wrapping +// the given value in a Value struct along with the ruleset's type should +// produce a valid, working Value. +type setRules struct { + Type Type +} + +var _ set.OrderedRules = setRules{} + +// Hash returns a hash value for the receiver that can be used for equality +// checks where some inaccuracy is tolerable. +// +// The hash function is value-type-specific, so it is not meaningful to compare +// hash results for values of different types. +// +// This function is not safe to use for security-related applications, since +// the hash used is not strong enough. +func (val Value) Hash() int { + hashBytes, marks := makeSetHashBytes(val) + if len(marks) > 0 { + panic("can't take hash of value that has marks or has embedded values that have marks") + } + return int(crc32.ChecksumIEEE(hashBytes)) +} + +func (r setRules) Hash(v interface{}) int { + return Value{ + ty: r.Type, + v: v, + }.Hash() +} + +func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + eqv := v1v.Equals(v2v) + + // By comparing the result to true we ensure that an Unknown result, + // which will result if either value is unknown, will be considered + // as non-equivalent. Two unknown values are not equivalent for the + // sake of set membership. + return eqv.v == true +} + +// Less is an implementation of set.OrderedRules so that we can iterate over +// set elements in a consistent order, where such an order is possible. +func (r setRules) Less(v1, v2 interface{}) bool { + v1v := Value{ + ty: r.Type, + v: v1, + } + v2v := Value{ + ty: r.Type, + v: v2, + } + + if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less + return false + } + + // Null values always sort after non-null values + if v2v.IsNull() && !v1v.IsNull() { + return true + } else if v1v.IsNull() { + return false + } + // Unknown values always sort after known values + if v1v.IsKnown() && !v2v.IsKnown() { + return true + } else if !v1v.IsKnown() { + return false + } + + switch r.Type { + case String: + // String values sort lexicographically + return v1v.AsString() < v2v.AsString() + case Bool: + // Weird to have a set of bools, but if we do then false sorts before true. + if v2v.True() || !v1v.True() { + return true + } + return false + case Number: + v1f := v1v.AsBigFloat() + v2f := v2v.AsBigFloat() + return v1f.Cmp(v2f) < 0 + default: + // No other types have a well-defined ordering, so we just produce a + // default consistent-but-undefined ordering then. This situation is + // not considered a compatibility constraint; callers should rely only + // on the ordering rules for primitive values. + v1h, _ := makeSetHashBytes(v1v) + v2h, _ := makeSetHashBytes(v2v) + return bytes.Compare(v1h, v2h) < 0 + } +} + +func makeSetHashBytes(val Value) ([]byte, ValueMarks) { + var buf bytes.Buffer + marks := make(ValueMarks) + appendSetHashBytes(val, &buf, marks) + return buf.Bytes(), marks +} + +func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) { + // Exactly what bytes we generate here don't matter as long as the following + // constraints hold: + // - Unknown and null values all generate distinct strings from + // each other and from any normal value of the given type. + // - The delimiter used to separate items in a compound structure can + // never appear literally in any of its elements. + // Since we don't support hetrogenous lists we don't need to worry about + // collisions between values of different types, apart from + // PseudoTypeDynamic. + // If in practice we *do* get a collision then it's not a big deal because + // the Equivalent function will still distinguish values, but set + // performance will be best if we are able to produce a distinct string + // for each distinct value, unknown values notwithstanding. + + // Marks aren't considered part of a value for equality-testing purposes, + // so we'll unmark our value before we work with it but we'll remember + // the marks in case the caller needs to re-apply them to a derived + // value. + if val.IsMarked() { + unmarkedVal, valMarks := val.Unmark() + for m := range valMarks { + marks[m] = struct{}{} + } + val = unmarkedVal + } + + if !val.IsKnown() { + buf.WriteRune('?') + return + } + if val.IsNull() { + buf.WriteRune('~') + return + } + + switch val.ty { + case Number: + // Due to an unfortunate quirk of gob encoding for big.Float, we end up + // with non-pointer values immediately after a gob round-trip, and + // we end up in here before we've had a chance to run + // gobDecodeFixNumberPtr on the inner values of a gob-encoded set, + // and so sadly we must make a special effort to handle that situation + // here just so that we can get far enough along to fix it up for + // everything else in this package. + if bf, ok := val.v.(big.Float); ok { + buf.WriteString(bf.String()) + return + } + buf.WriteString(val.v.(*big.Float).String()) + return + case Bool: + if val.v.(bool) { + buf.WriteRune('T') + } else { + buf.WriteRune('F') + } + return + case String: + buf.WriteString(fmt.Sprintf("%q", val.v.(string))) + return + } + + if val.ty.IsMapType() { + buf.WriteRune('{') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(keyVal, buf, marks) + buf.WriteRune(':') + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('}') + return + } + + if val.ty.IsListType() || val.ty.IsSetType() { + buf.WriteRune('[') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune(']') + return + } + + if val.ty.IsObjectType() { + buf.WriteRune('<') + attrNames := make([]string, 0, len(val.ty.AttributeTypes())) + for attrName := range val.ty.AttributeTypes() { + attrNames = append(attrNames, attrName) + } + sort.Strings(attrNames) + for _, attrName := range attrNames { + appendSetHashBytes(val.GetAttr(attrName), buf, marks) + buf.WriteRune(';') + } + buf.WriteRune('>') + return + } + + if val.ty.IsTupleType() { + buf.WriteRune('<') + val.ForEachElement(func(keyVal, elementVal Value) bool { + appendSetHashBytes(elementVal, buf, marks) + buf.WriteRune(';') + return false + }) + buf.WriteRune('>') + return + } + + // should never get down here + panic("unsupported type in set hash") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_type.go b/vendor/github.com/hashicorp/go-cty/cty/set_type.go new file mode 100644 index 000000000..cbc3706f2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/set_type.go @@ -0,0 +1,72 @@ +package cty + +import ( + "fmt" +) + +type typeSet struct { + typeImplSigil + ElementTypeT Type +} + +// Set creates a set type with the given element Type. +// +// Set types are CollectionType implementations. +func Set(elem Type) Type { + return Type{ + typeSet{ + ElementTypeT: elem, + }, + } +} + +// Equals returns true if the other Type is a set whose element type is +// equal to that of the receiver. +func (t typeSet) Equals(other Type) bool { + ot, isSet := other.typeImpl.(typeSet) + if !isSet { + return false + } + + return t.ElementTypeT.Equals(ot.ElementTypeT) +} + +func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string { + elemName := t.ElementTypeT.friendlyNameMode(mode) + if mode == friendlyTypeConstraintName { + if t.ElementTypeT == DynamicPseudoType { + elemName = "any single type" + } + } + return "set of " + elemName +} + +func (t typeSet) ElementType() Type { + return t.ElementTypeT +} + +func (t typeSet) GoString() string { + return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT) +} + +// IsSetType returns true if the given type is a list type, regardless of its +// element type. +func (t Type) IsSetType() bool { + _, ok := t.typeImpl.(typeSet) + return ok +} + +// SetElementType is a convenience method that checks if the given type is +// a set type, returning a pointer to its element type if so and nil +// otherwise. This is intended to allow convenient conditional branches, +// like so: +// +// if et := t.SetElementType(); et != nil { +// // Do something with *et +// } +func (t Type) SetElementType() *Type { + if lt, ok := t.typeImpl.(typeSet); ok { + return <.ElementTypeT + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go new file mode 100644 index 000000000..798cacd63 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go @@ -0,0 +1,121 @@ +package cty + +import ( + "fmt" +) + +type typeTuple struct { + typeImplSigil + ElemTypes []Type +} + +// Tuple creates a tuple type with the given element types. +// +// After a slice is passed to this function the caller must no longer access +// the underlying array, since ownership is transferred to this library. +func Tuple(elemTypes []Type) Type { + return Type{ + typeTuple{ + ElemTypes: elemTypes, + }, + } +} + +func (t typeTuple) Equals(other Type) bool { + if ot, ok := other.typeImpl.(typeTuple); ok { + if len(t.ElemTypes) != len(ot.ElemTypes) { + // Fast path: if we don't have the same number of elements + // then we can't possibly be equal. + return false + } + + for i, ty := range t.ElemTypes { + oty := ot.ElemTypes[i] + if !ok { + return false + } + if !oty.Equals(ty) { + return false + } + } + + return true + } + return false +} + +func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string { + // There isn't really a friendly way to write a tuple type due to its + // complexity, so we'll just do something English-ish. Callers will + // probably want to make some extra effort to avoid ever printing out + // a tuple type FriendlyName in its entirety. For example, could + // produce an error message by diffing two object types and saying + // something like "Expected attribute foo to be string, but got number". + // TODO: Finish this + return "tuple" +} + +func (t typeTuple) GoString() string { + if len(t.ElemTypes) == 0 { + return "cty.EmptyTuple" + } + return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes) +} + +// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about +// the empty tuple type. +var EmptyTuple Type + +// EmptyTupleVal is the only possible non-null, non-unknown value of type +// EmptyTuple. +var EmptyTupleVal Value + +func init() { + EmptyTuple = Tuple([]Type{}) + EmptyTupleVal = Value{ + ty: EmptyTuple, + v: []interface{}{}, + } +} + +// IsTupleType returns true if the given type is an object type, regardless +// of its element type. +func (t Type) IsTupleType() bool { + _, ok := t.typeImpl.(typeTuple) + return ok +} + +// Length returns the number of elements of the receiving tuple type. +// Will panic if the reciever isn't a tuple type; use IsTupleType to determine +// whether this operation will succeed. +func (t Type) Length() int { + if ot, ok := t.typeImpl.(typeTuple); ok { + return len(ot.ElemTypes) + } + panic("Length on non-tuple Type") +} + +// TupleElementType returns the type of the element with the given index. Will +// panic if the receiver is not a tuple type (use IsTupleType to confirm) +// or if the index is out of range (use Length to confirm). +func (t Type) TupleElementType(idx int) Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes[idx] + } + panic("TupleElementType on non-tuple Type") +} + +// TupleElementTypes returns a slice of the recieving tuple type's element +// types. Will panic if the receiver is not a tuple type (use IsTupleType +// to confirm). +// +// The returned slice is part of the internal state of the type, and is provided +// for read access only. It is forbidden for any caller to modify the +// underlying array. For many purposes the element-related methods of Value +// are more appropriate and more convenient to use. +func (t Type) TupleElementTypes() []Type { + if ot, ok := t.typeImpl.(typeTuple); ok { + return ot.ElemTypes + } + panic("TupleElementTypes on non-tuple Type") +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/type.go b/vendor/github.com/hashicorp/go-cty/cty/type.go new file mode 100644 index 000000000..730cb9862 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type.go @@ -0,0 +1,120 @@ +package cty + +// Type represents value types within the type system. +// +// This is a closed interface type, meaning that only the concrete +// implementations provided within this package are considered valid. +type Type struct { + typeImpl +} + +type typeImpl interface { + // isTypeImpl is a do-nothing method that exists only to express + // that a type is an implementation of typeImpl. + isTypeImpl() typeImplSigil + + // Equals returns true if the other given Type exactly equals the + // receiver Type. + Equals(other Type) bool + + // FriendlyName returns a human-friendly *English* name for the given + // type. + FriendlyName(mode friendlyTypeNameMode) string + + // GoString implements the GoStringer interface from package fmt. + GoString() string +} + +// Base implementation of Type to embed into concrete implementations +// to signal that they are implementations of Type. +type typeImplSigil struct{} + +func (t typeImplSigil) isTypeImpl() typeImplSigil { + return typeImplSigil{} +} + +// Equals returns true if the other given Type exactly equals the receiver +// type. +func (t Type) Equals(other Type) bool { + return t.typeImpl.Equals(other) +} + +// FriendlyName returns a human-friendly *English* name for the given type. +func (t Type) FriendlyName() string { + return t.typeImpl.FriendlyName(friendlyTypeName) +} + +// FriendlyNameForConstraint is similar to FriendlyName except that the +// result is specialized for describing type _constraints_ rather than types +// themselves. This is more appropriate when reporting that a particular value +// does not conform to an expected type constraint. +// +// In particular, this function uses the term "any type" to refer to +// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName. +func (t Type) FriendlyNameForConstraint() string { + return t.typeImpl.FriendlyName(friendlyTypeConstraintName) +} + +// friendlyNameMode is an internal combination of the various FriendlyName* +// variants that just directly takes a mode, for easy passthrough for +// recursive name construction. +func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string { + return t.typeImpl.FriendlyName(mode) +} + +// GoString returns a string approximating how the receiver type would be +// expressed in Go source code. +func (t Type) GoString() string { + if t.typeImpl == nil { + return "cty.NilType" + } + + return t.typeImpl.GoString() +} + +// NilType is an invalid type used when a function is returning an error +// and has no useful type to return. It should not be used and any methods +// called on it will panic. +var NilType = Type{} + +// HasDynamicTypes returns true either if the receiver is itself +// DynamicPseudoType or if it is a compound type whose descendent elements +// are DynamicPseudoType. +func (t Type) HasDynamicTypes() bool { + switch { + case t == DynamicPseudoType: + return true + case t.IsPrimitiveType(): + return false + case t.IsCollectionType(): + return false + case t.IsObjectType(): + attrTypes := t.AttributeTypes() + for _, at := range attrTypes { + if at.HasDynamicTypes() { + return true + } + } + return false + case t.IsTupleType(): + elemTypes := t.TupleElementTypes() + for _, et := range elemTypes { + if et.HasDynamicTypes() { + return true + } + } + return false + case t.IsCapsuleType(): + return false + default: + // Should never happen, since above should be exhaustive + panic("HasDynamicTypes does not support the given type") + } +} + +type friendlyTypeNameMode rune + +const ( + friendlyTypeName friendlyTypeNameMode = 'N' + friendlyTypeConstraintName friendlyTypeNameMode = 'C' +) diff --git a/vendor/github.com/hashicorp/go-cty/cty/type_conform.go b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go new file mode 100644 index 000000000..476eeea87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go @@ -0,0 +1,139 @@ +package cty + +// TestConformance recursively walks the receiver and the given other type and +// returns nil if the receiver *conforms* to the given type. +// +// Type conformance is similar to type equality but has one crucial difference: +// PseudoTypeDynamic can be used within the given type to represent that +// *any* type is allowed. +// +// If any non-conformities are found, the returned slice will be non-nil and +// contain at least one error value. It will be nil if the type is entirely +// conformant. +// +// Note that the special behavior of PseudoTypeDynamic is the *only* exception +// to normal type equality. Calling applications may wish to apply their own +// automatic conversion logic to the given data structure to create a more +// liberal notion of conformance to a type. +// +// Returned errors are usually (but not always) PathError instances that +// indicate where in the structure the error was found. If a returned error +// is of that type then the error message is written for (English-speaking) +// end-users working within the cty type system, not mentioning any Go-oriented +// implementation details. +func (t Type) TestConformance(other Type) []error { + path := make(Path, 0) + var errs []error + testConformance(t, other, path, &errs) + return errs +} + +func testConformance(given Type, want Type, path Path, errs *[]error) { + if want.Equals(DynamicPseudoType) { + // anything goes! + return + } + + if given.Equals(want) { + // Any equal types are always conformant + return + } + + // The remainder of this function is concerned with detecting + // and reporting the specific non-conformance, since we wouldn't + // have got here if the types were not divergent. + // We treat compound structures as special so that we can report + // specifically what is non-conforming, rather than simply returning + // the entire type names and letting the user puzzle it out. + + if given.IsObjectType() && want.IsObjectType() { + givenAttrs := given.AttributeTypes() + wantAttrs := want.AttributeTypes() + + for k := range givenAttrs { + if _, exists := wantAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "unsupported attribute %q", k), + ) + } + } + for k := range wantAttrs { + if _, exists := givenAttrs[k]; !exists { + *errs = append( + *errs, + errorf(path, "missing required attribute %q", k), + ) + } + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for k, wantAttrType := range wantAttrs { + if givenAttrType, exists := givenAttrs[k]; exists { + path[pathIdx] = GetAttrStep{Name: k} + testConformance(givenAttrType, wantAttrType, path, errs) + } + } + + path = path[0:pathIdx] + + return + } + + if given.IsTupleType() && want.IsTupleType() { + givenElems := given.TupleElementTypes() + wantElems := want.TupleElementTypes() + + if len(givenElems) != len(wantElems) { + *errs = append( + *errs, + errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)), + ) + return + } + + path = append(path, nil) + pathIdx := len(path) - 1 + + for i, wantElemType := range wantElems { + givenElemType := givenElems[i] + path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))} + testConformance(givenElemType, wantElemType, path, errs) + } + + path = path[0:pathIdx] + + return + } + + if given.IsListType() && want.IsListType() { + path = append(path, IndexStep{Key: UnknownVal(Number)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsMapType() && want.IsMapType() { + path = append(path, IndexStep{Key: UnknownVal(String)}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + if given.IsSetType() && want.IsSetType() { + path = append(path, IndexStep{Key: UnknownVal(given.ElementType())}) + pathIdx := len(path) - 1 + testConformance(given.ElementType(), want.ElementType(), path, errs) + path = path[0:pathIdx] + return + } + + *errs = append( + *errs, + errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()), + ) +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go new file mode 100644 index 000000000..ec05bb18a --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go @@ -0,0 +1,57 @@ +package cty + +import ( + "encoding/gob" + "fmt" + "math/big" + "strings" + + "github.com/hashicorp/go-cty/cty/set" +) + +// InternalTypesToRegister is a slice of values that covers all of the +// internal types used in the representation of cty.Type and cty.Value +// across all cty Types. +// +// This is intended to be used to register these types with encoding +// packages that require registration of types used in interfaces, such as +// encoding/gob, thus allowing cty types and values to be included in streams +// created from those packages. However, registering with gob is not necessary +// since that is done automatically as a side-effect of importing this package. +// +// Callers should not do anything with the values here except pass them on +// verbatim to a registration function. +// +// If the calling application uses Capsule types that wrap local structs either +// directly or indirectly, these structs may also need to be registered in +// order to support encoding and decoding of values of these types. That is the +// responsibility of the calling application. +var InternalTypesToRegister []interface{} + +func init() { + InternalTypesToRegister = []interface{}{ + primitiveType{}, + typeList{}, + typeMap{}, + typeObject{}, + typeSet{}, + setRules{}, + set.Set{}, + typeTuple{}, + big.Float{}, + capsuleType{}, + []interface{}(nil), + map[string]interface{}(nil), + } + + // Register these with gob here, rather than in gob.go, to ensure + // that this will always happen after we build the above. + for _, tv := range InternalTypesToRegister { + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/hashicorp/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/unknown.go new file mode 100644 index 000000000..e54179eb1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown.go @@ -0,0 +1,84 @@ +package cty + +// unknownType is the placeholder type used for the sigil value representing +// "Unknown", to make it unambigiously distinct from any other possible value. +type unknownType struct { +} + +// Unknown is a special value that can be +var unknown interface{} = &unknownType{} + +// UnknownVal returns an Value that represents an unknown value of the given +// type. Unknown values can be used to represent a value that is +// not yet known. Its meaning is undefined in cty, but it could be used by +// an calling application to allow partial evaluation. +// +// Unknown values of any type can be created of any type. All operations on +// Unknown values themselves return Unknown. +func UnknownVal(t Type) Value { + return Value{ + ty: t, + v: unknown, + } +} + +func (t unknownType) GoString() string { + // This is the stringification of our internal unknown marker. The + // stringification of the public representation of unknowns is in + // Value.GoString. + return "cty.unknown" +} + +type pseudoTypeDynamic struct { + typeImplSigil +} + +// DynamicPseudoType represents the dynamic pseudo-type. +// +// This type can represent situations where a type is not yet known. Its +// meaning is undefined in cty, but it could be used by a calling +// application to allow expression type checking with some types not yet known. +// For example, the application might optimistically permit any operation on +// values of this type in type checking, allowing a partial type-check result, +// and then repeat the check when more information is known to get the +// final, concrete type. +// +// It is a pseudo-type because it is used only as a sigil to the calling +// application. "Unknown" is the only valid value of this pseudo-type, so +// operations on values of this type will always short-circuit as per +// the rules for that special value. +var DynamicPseudoType Type + +func (t pseudoTypeDynamic) Equals(other Type) bool { + _, ok := other.typeImpl.(pseudoTypeDynamic) + return ok +} + +func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string { + switch mode { + case friendlyTypeConstraintName: + return "any type" + default: + return "dynamic" + } +} + +func (t pseudoTypeDynamic) GoString() string { + return "cty.DynamicPseudoType" +} + +// DynamicVal is the only valid value of the pseudo-type dynamic. +// This value can be used as a placeholder where a value or expression's +// type and value are both unknown, thus allowing partial evaluation. See +// the docs for DynamicPseudoType for more information. +var DynamicVal Value + +func init() { + DynamicPseudoType = Type{ + pseudoTypeDynamic{}, + } + DynamicVal = Value{ + ty: DynamicPseudoType, + v: unknown, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go new file mode 100644 index 000000000..ba926475c --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go @@ -0,0 +1,64 @@ +package cty + +// UnknownAsNull returns a value of the same type as the given value but +// with any unknown values (including nested values) replaced with null +// values of the same type. +// +// This can be useful if a result is to be serialized in a format that can't +// represent unknowns, such as JSON, as long as the caller does not need to +// retain the unknown value information. +func UnknownAsNull(val Value) Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return NullVal(ty) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make([]Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, UnknownAsNull(v)) + } + switch { + case ty.IsListType(): + return ListVal(vals) + case ty.IsTupleType(): + return TupleVal(vals) + default: + return SetVal(vals) + } + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return val + } + vals := make(map[string]Value, length) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vals[k.AsString()] = UnknownAsNull(v) + } + switch { + case ty.IsMapType(): + return MapVal(vals) + default: + return ObjectVal(vals) + } + } + + return val +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value.go b/vendor/github.com/hashicorp/go-cty/cty/value.go new file mode 100644 index 000000000..1025ba82e --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value.go @@ -0,0 +1,108 @@ +package cty + +// Value represents a value of a particular type, and is the interface by +// which operations are executed on typed values. +// +// Value has two different classes of method. Operation methods stay entirely +// within the type system (methods accept and return Value instances) and +// are intended for use in implementing a language in terms of cty, while +// integration methods either enter or leave the type system, working with +// native Go values. Operation methods are guaranteed to support all of the +// expected short-circuit behavior for unknown and dynamic values, while +// integration methods may not. +// +// The philosophy for the operations API is that it's the caller's +// responsibility to ensure that the given types and values satisfy the +// specified invariants during a separate type check, so that the caller is +// able to return errors to its user from the application's own perspective. +// +// Consequently the design of these methods assumes such checks have already +// been done and panics if any invariants turn out not to be satisfied. These +// panic errors are not intended to be handled, but rather indicate a bug in +// the calling application that should be fixed with more checks prior to +// executing operations. +// +// A related consequence of this philosophy is that no automatic type +// conversions are done. If a method specifies that its argument must be +// number then it's the caller's responsibility to do that conversion before +// the call, thus allowing the application to have more constrained conversion +// rules than are offered by the built-in converter where necessary. +type Value struct { + ty Type + v interface{} +} + +// Type returns the type of the value. +func (val Value) Type() Type { + return val.ty +} + +// IsKnown returns true if the value is known. That is, if it is not +// the result of the unknown value constructor Unknown(...), and is not +// the result of an operation on another unknown value. +// +// Unknown values are only produced either directly or as a result of +// operating on other unknown values, and so an application that never +// introduces Unknown values can be guaranteed to never receive any either. +func (val Value) IsKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsKnown() + } + return val.v != unknown +} + +// IsNull returns true if the value is null. Values of any type can be +// null, but any operations on a null value will panic. No operation ever +// produces null, so an application that never introduces Null values can +// be guaranteed to never receive any either. +func (val Value) IsNull() bool { + if val.IsMarked() { + return val.unmarkForce().IsNull() + } + return val.v == nil +} + +// NilVal is an invalid Value that can be used as a placeholder when returning +// with an error from a function that returns (Value, error). +// +// NilVal is *not* a valid error and so no operations may be performed on it. +// Any attempt to use it will result in a panic. +// +// This should not be confused with the idea of a Null value, as returned by +// NullVal. NilVal is a nil within the *Go* type system, and is invalid in +// the cty type system. Null values *do* exist in the cty type system. +var NilVal = Value{ + ty: Type{typeImpl: nil}, + v: nil, +} + +// IsWhollyKnown is an extension of IsKnown that also recursively checks +// inside collections and structures to see if there are any nested unknown +// values. +func (val Value) IsWhollyKnown() bool { + if val.IsMarked() { + return val.unmarkForce().IsWhollyKnown() + } + + if !val.IsKnown() { + return false + } + + if val.IsNull() { + // Can't recurse into a null, so we're done + return true + } + + switch { + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsWhollyKnown() { + return false + } + } + return true + default: + return true + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_init.go b/vendor/github.com/hashicorp/go-cty/cty/value_init.go new file mode 100644 index 000000000..853a5a7db --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_init.go @@ -0,0 +1,324 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "golang.org/x/text/unicode/norm" + + "github.com/hashicorp/go-cty/cty/set" +) + +// BoolVal returns a Value of type Number whose internal value is the given +// bool. +func BoolVal(v bool) Value { + return Value{ + ty: Bool, + v: v, + } +} + +// NumberVal returns a Value of type Number whose internal value is the given +// big.Float. The returned value becomes the owner of the big.Float object, +// and so it's forbidden for the caller to mutate the object after it's +// wrapped in this way. +func NumberVal(v *big.Float) Value { + return Value{ + ty: Number, + v: v, + } +} + +// ParseNumberVal returns a Value of type number produced by parsing the given +// string as a decimal real number. To ensure that two identical strings will +// always produce an equal number, always use this function to derive a number +// from a string; it will ensure that the precision and rounding mode for the +// internal big decimal is configured in a consistent way. +// +// If the given string cannot be parsed as a number, the returned error has +// the message "a number is required", making it suitable to return to an +// end-user to signal a type conversion error. +// +// If the given string contains a number that becomes a recurring fraction +// when expressed in binary then it will be truncated to have a 512-bit +// mantissa. Note that this is a higher precision than that of a float64, +// so coverting the same decimal number first to float64 and then calling +// NumberFloatVal will not produce an equal result; the conversion first +// to float64 will round the mantissa to fewer than 512 bits. +func ParseNumberVal(s string) (Value, error) { + // Base 10, precision 512, and rounding to nearest even is the standard + // way to handle numbers arriving as strings. + f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven) + if err != nil { + return NilVal, fmt.Errorf("a number is required") + } + return NumberVal(f), nil +} + +// MustParseNumberVal is like ParseNumberVal but it will panic in case of any +// error. It can be used during initialization or any other situation where +// the given string is a constant or otherwise known to be correct by the +// caller. +func MustParseNumberVal(s string) Value { + ret, err := ParseNumberVal(s) + if err != nil { + panic(err) + } + return ret +} + +// NumberIntVal returns a Value of type Number whose internal value is equal +// to the given integer. +func NumberIntVal(v int64) Value { + return NumberVal(new(big.Float).SetInt64(v)) +} + +// NumberUIntVal returns a Value of type Number whose internal value is equal +// to the given unsigned integer. +func NumberUIntVal(v uint64) Value { + return NumberVal(new(big.Float).SetUint64(v)) +} + +// NumberFloatVal returns a Value of type Number whose internal value is +// equal to the given float. +func NumberFloatVal(v float64) Value { + return NumberVal(new(big.Float).SetFloat64(v)) +} + +// StringVal returns a Value of type String whose internal value is the +// given string. +// +// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and +// they are NFC-normalized on entry into the world of cty values. +// +// If the given string is not valid UTF-8 then behavior of string operations +// is undefined. +func StringVal(v string) Value { + return Value{ + ty: String, + v: NormalizeString(v), + } +} + +// NormalizeString applies the same normalization that cty applies when +// constructing string values. +// +// A return value from this function can be meaningfully compared byte-for-byte +// with a Value.AsString result. +func NormalizeString(s string) string { + return norm.NFC.String(s) +} + +// ObjectVal returns a Value of an object type whose structure is defined +// by the key names and value types in the given map. +func ObjectVal(attrs map[string]Value) Value { + attrTypes := make(map[string]Type, len(attrs)) + attrVals := make(map[string]interface{}, len(attrs)) + + for attr, val := range attrs { + attr = NormalizeString(attr) + attrTypes[attr] = val.ty + attrVals[attr] = val.v + } + + return Value{ + ty: Object(attrTypes), + v: attrVals, + } +} + +// TupleVal returns a Value of a tuple type whose element types are +// defined by the value types in the given slice. +func TupleVal(elems []Value) Value { + elemTypes := make([]Type, len(elems)) + elemVals := make([]interface{}, len(elems)) + + for i, val := range elems { + elemTypes[i] = val.ty + elemVals[i] = val.v + } + + return Value{ + ty: Tuple(elemTypes), + v: elemVals, + } +} + +// ListVal returns a Value of list type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also ListValEmpty.) +func ListVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call ListVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + + for i, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent list element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + return Value{ + ty: List(elementType), + v: rawList, + } +} + +// ListValEmpty returns an empty list of the given element type. +func ListValEmpty(element Type) Value { + return Value{ + ty: List(element), + v: []interface{}{}, + } +} + +// MapVal returns a Value of a map type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given map is empty, since then the element type cannot be inferred. +// (See also MapValEmpty.) +func MapVal(vals map[string]Value) Value { + if len(vals) == 0 { + panic("must not call MapVal with empty map") + } + elementType := DynamicPseudoType + rawMap := make(map[string]interface{}, len(vals)) + + for key, val := range vals { + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent map element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawMap[NormalizeString(key)] = val.v + } + + return Value{ + ty: Map(elementType), + v: rawMap, + } +} + +// MapValEmpty returns an empty map of the given element type. +func MapValEmpty(element Type) Value { + return Value{ + ty: Map(element), + v: map[string]interface{}{}, + } +} + +// SetVal returns a Value of set type whose element type is defined by +// the types of the given values, which must be homogenous. +// +// If the types are not all consistent (aside from elements that are of the +// dynamic pseudo-type) then this function will panic. It will panic also +// if the given list is empty, since then the element type cannot be inferred. +// (See also SetValEmpty.) +func SetVal(vals []Value) Value { + if len(vals) == 0 { + panic("must not call SetVal with empty slice") + } + elementType := DynamicPseudoType + rawList := make([]interface{}, len(vals)) + var markSets []ValueMarks + + for i, val := range vals { + if unmarkedVal, marks := val.UnmarkDeep(); len(marks) > 0 { + val = unmarkedVal + markSets = append(markSets, marks) + } + if val.ContainsMarked() { + // FIXME: Allow this, but unmark the values and apply the + // marking to the set itself instead. + panic("set cannot contain marked values") + } + if elementType == DynamicPseudoType { + elementType = val.ty + } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) { + panic(fmt.Errorf( + "inconsistent set element types (%#v then %#v)", + elementType, val.ty, + )) + } + + rawList[i] = val.v + } + + rawVal := set.NewSetFromSlice(setRules{elementType}, rawList) + + return Value{ + ty: Set(elementType), + v: rawVal, + }.WithMarks(markSets...) +} + +// SetValFromValueSet returns a Value of set type based on an already-constructed +// ValueSet. +// +// The element type of the returned value is the element type of the given +// set. +func SetValFromValueSet(s ValueSet) Value { + ety := s.ElementType() + rawVal := s.s.Copy() // copy so caller can't mutate what we wrap + + return Value{ + ty: Set(ety), + v: rawVal, + } +} + +// SetValEmpty returns an empty set of the given element type. +func SetValEmpty(element Type) Value { + return Value{ + ty: Set(element), + v: set.NewSet(setRules{element}), + } +} + +// CapsuleVal creates a value of the given capsule type using the given +// wrapVal, which must be a pointer to a value of the capsule type's native +// type. +// +// This function will panic if the given type is not a capsule type, if +// the given wrapVal is not compatible with the given capsule type, or if +// wrapVal is not a pointer. +func CapsuleVal(ty Type, wrapVal interface{}) Value { + if !ty.IsCapsuleType() { + panic("not a capsule type") + } + + wv := reflect.ValueOf(wrapVal) + if wv.Kind() != reflect.Ptr { + panic("wrapVal is not a pointer") + } + + it := ty.typeImpl.(*capsuleType).GoType + if !wv.Type().Elem().AssignableTo(it) { + panic("wrapVal target is not compatible with the given capsule type") + } + + return Value{ + ty: ty, + v: wrapVal, + } +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_ops.go b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go new file mode 100644 index 000000000..69e5a8abb --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go @@ -0,0 +1,1290 @@ +package cty + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/hashicorp/go-cty/cty/set" +) + +// GoString is an implementation of fmt.GoStringer that produces concise +// source-like representations of values suitable for use in debug messages. +func (val Value) GoString() string { + if val.IsMarked() { + unVal, marks := val.Unmark() + if len(marks) == 1 { + var mark interface{} + for m := range marks { + mark = m + } + return fmt.Sprintf("%#v.Mark(%#v)", unVal, mark) + } + return fmt.Sprintf("%#v.WithMarks(%#v)", unVal, marks) + } + + if val == NilVal { + return "cty.NilVal" + } + + if val.IsNull() { + return fmt.Sprintf("cty.NullVal(%#v)", val.ty) + } + if val == DynamicVal { // is unknown, so must be before the IsKnown check below + return "cty.DynamicVal" + } + if !val.IsKnown() { + return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty) + } + + // By the time we reach here we've dealt with all of the exceptions around + // unknowns and nulls, so we're guaranteed that the values are the + // canonical internal representation of the given type. + + switch val.ty { + case Bool: + if val.v.(bool) { + return "cty.True" + } + return "cty.False" + case Number: + fv := val.v.(*big.Float) + // We'll try to use NumberIntVal or NumberFloatVal if we can, since + // the fully-general initializer call is pretty ugly-looking. + if fv.IsInt() { + return fmt.Sprintf("cty.NumberIntVal(%#v)", fv) + } + if rfv, accuracy := fv.Float64(); accuracy == big.Exact { + return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv) + } + return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1)) + case String: + return fmt.Sprintf("cty.StringVal(%#v)", val.v) + } + + switch { + case val.ty.IsSetType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.SetVal(%#v)", vals) + case val.ty.IsListType(): + vals := val.AsValueSlice() + if len(vals) == 0 { + return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.ListVal(%#v)", vals) + case val.ty.IsMapType(): + vals := val.AsValueMap() + if len(vals) == 0 { + return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType()) + } + return fmt.Sprintf("cty.MapVal(%#v)", vals) + case val.ty.IsTupleType(): + if val.ty.Equals(EmptyTuple) { + return "cty.EmptyTupleVal" + } + vals := val.AsValueSlice() + return fmt.Sprintf("cty.TupleVal(%#v)", vals) + case val.ty.IsObjectType(): + if val.ty.Equals(EmptyObject) { + return "cty.EmptyObjectVal" + } + vals := val.AsValueMap() + return fmt.Sprintf("cty.ObjectVal(%#v)", vals) + case val.ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().GoString + if impl == nil { + return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v) + } + return impl(val.EncapsulatedValue()) + } + + // Default exposes implementation details, so should actually cover + // all of the cases above for good caller UX. + return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v) +} + +// Equals returns True if the receiver and the given other value have the +// same type and are exactly equal in value. +// +// As a special case, two null values are always equal regardless of type. +// +// The usual short-circuit rules apply, so the result will be unknown if +// either of the given values are. +// +// Use RawEquals to compare if two values are equal *ignoring* the +// short-circuit rules and the exception for null values. +func (val Value) Equals(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Equals(other).WithMarks(valMarks, otherMarks) + } + + // Start by handling Unknown values before considering types. + // This needs to be done since Null values are always equal regardless of + // type. + switch { + case !val.IsKnown() && !other.IsKnown(): + // both unknown + return UnknownVal(Bool) + case val.IsKnown() && !other.IsKnown(): + switch { + case val.IsNull(), other.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !val.ty.Equals(other.ty): + // There is no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + case other.IsKnown() && !val.IsKnown(): + switch { + case other.IsNull(), val.ty.HasDynamicTypes(): + // If known is Null, we need to wait for the unkown value since + // nulls of any type are equal. + // An unkown with a dynamic type compares as unknown, which we need + // to check before the type comparison below. + return UnknownVal(Bool) + case !other.ty.Equals(val.ty): + // There's no null comparison or dynamic types, so unequal types + // will never be equal. + return False + default: + return UnknownVal(Bool) + } + } + + switch { + case val.IsNull() && other.IsNull(): + // Nulls are always equal, regardless of type + return BoolVal(true) + case val.IsNull() || other.IsNull(): + // If only one is null then the result must be false + return BoolVal(false) + } + + if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() { + return UnknownVal(Bool) + } + + if !val.ty.Equals(other.ty) { + return BoolVal(false) + } + + ty := val.ty + result := false + + switch { + case ty == Number: + result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0 + case ty == Bool: + result = val.v.(bool) == other.v.(bool) + case ty == String: + // Simple equality is safe because we NFC-normalize strings as they + // enter our world from StringVal, and so we can assume strings are + // always in normal form. + result = val.v.(string) == other.v.(string) + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + result = true + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + result = true + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + result = true + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + equal := true + + // Note that by our definition of sets it's never possible for two + // sets that contain unknown values (directly or indicrectly) to + // ever be equal, even if they are otherwise identical. + + // FIXME: iterating both lists and checking each item is not the + // ideal implementation here, but it works with the primitives we + // have in the set implementation. Perhaps the set implementation + // can provide its own equality test later. + s1.EachValue(func(v interface{}) { + if !s2.Has(v) { + equal = false + } + }) + s2.EachValue(func(v interface{}) { + if !s1.Has(v) { + equal = false + } + }) + + result = equal + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + result = true + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + result = false + break + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.Equals(rhs) + if !eq.IsKnown() { + return UnknownVal(Bool) + } + if eq.False() { + result = false + break + } + } + } + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().Equals + if impl == nil { + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return BoolVal(val.v == other.v) + } + return BoolVal(impl(val.v, other.v)) + } + ret := impl(val.v, other.v) + if !ret.Type().Equals(Bool) { + panic(fmt.Sprintf("Equals for %#v returned %#v, not cty.Bool", ty, ret.Type())) + } + return ret + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in Equals", ty)) + } + + return BoolVal(result) +} + +// NotEqual is a shorthand for Equals followed by Not. +func (val Value) NotEqual(other Value) Value { + return val.Equals(other).Not() +} + +// True returns true if the receiver is True, false if False, and panics if +// the receiver is not of type Bool. +// +// This is a helper function to help write application logic that works with +// values, rather than a first-class operation. It does not work with unknown +// or null values. For more robust handling with unknown value +// short-circuiting, use val.Equals(cty.True). +func (val Value) True() bool { + val.assertUnmarked() + if val.ty != Bool { + panic("not bool") + } + return val.Equals(True).v.(bool) +} + +// False is the opposite of True. +func (val Value) False() bool { + return !val.True() +} + +// RawEquals returns true if and only if the two given values have the same +// type and equal value, ignoring the usual short-circuit rules about +// unknowns and dynamic types. +// +// This method is more appropriate for testing than for real use, since it +// skips over usual semantics around unknowns but as a consequence allows +// testing the result of another operation that is expected to return unknown. +// It returns a primitive Go bool rather than a Value to remind us that it +// is not a first-class value operation. +func (val Value) RawEquals(other Value) bool { + if !val.ty.Equals(other.ty) { + return false + } + if !val.HasSameMarks(other) { + return false + } + // Since we've now checked the marks, we'll unmark for the rest of this... + val = val.unmarkForce() + other = other.unmarkForce() + + if (!val.IsKnown()) && (!other.IsKnown()) { + return true + } + if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) { + return false + } + if val.IsNull() && other.IsNull() { + return true + } + if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) { + return false + } + if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType { + return true + } + + ty := val.ty + switch { + case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType: + return val.Equals(other).True() + case ty.IsObjectType(): + oty := ty.typeImpl.(typeObject) + for attr, aty := range oty.AttrTypes { + lhs := Value{ + ty: aty, + v: val.v.(map[string]interface{})[attr], + } + rhs := Value{ + ty: aty, + v: other.v.(map[string]interface{})[attr], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsTupleType(): + tty := ty.typeImpl.(typeTuple) + for i, ety := range tty.ElemTypes { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + case ty.IsListType(): + ety := ty.typeImpl.(typeList).ElementTypeT + if len(val.v.([]interface{})) == len(other.v.([]interface{})) { + for i := range val.v.([]interface{}) { + lhs := Value{ + ty: ety, + v: val.v.([]interface{})[i], + } + rhs := Value{ + ty: ety, + v: other.v.([]interface{})[i], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsSetType(): + s1 := val.v.(set.Set) + s2 := other.v.(set.Set) + + // Since we're intentionally ignoring our rule that two unknowns + // are never equal, we can cheat here. + // (This isn't 100% right since e.g. it will fail if the set contains + // numbers that are infinite, which DeepEqual can't compare properly. + // We're accepting that limitation for simplicity here, since this + // function is here primarily for testing.) + return reflect.DeepEqual(s1, s2) + + case ty.IsMapType(): + ety := ty.typeImpl.(typeMap).ElementTypeT + if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) { + for k := range val.v.(map[string]interface{}) { + if _, ok := other.v.(map[string]interface{})[k]; !ok { + return false + } + lhs := Value{ + ty: ety, + v: val.v.(map[string]interface{})[k], + } + rhs := Value{ + ty: ety, + v: other.v.(map[string]interface{})[k], + } + eq := lhs.RawEquals(rhs) + if !eq { + return false + } + } + return true + } + return false + case ty.IsCapsuleType(): + impl := val.ty.CapsuleOps().RawEquals + if impl == nil { + // A capsule type's encapsulated value is a pointer to a value of its + // native type, so we can just compare these to get the identity test + // we need. + return val.v == other.v + } + return impl(val.v, other.v) + + default: + // should never happen + panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty)) + } +} + +// Add returns the sum of the receiver and the given other value. Both values +// must be numbers; this method will panic if not. +func (val Value) Add(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Add(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Add(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Subtract returns receiver minus the given other value. Both values must be +// numbers; this method will panic if not. +func (val Value) Subtract(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Subtract(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + return val.Add(other.Negate()) +} + +// Negate returns the numeric negative of the receiver, which must be a number. +// This method will panic when given a value of any other type. +func (val Value) Negate() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Negate().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float).Neg(val.v.(*big.Float)) + return NumberVal(ret) +} + +// Multiply returns the product of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +func (val Value) Multiply(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Multiply(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Mul(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Divide returns the quotient of the receiver and the given other value. +// Both values must be numbers; this method will panic if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// If both values are zero or infinity, this function will panic with +// an instance of big.ErrNaN. +func (val Value) Divide(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Divide(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := new(big.Float) + ret.Quo(val.v.(*big.Float), other.v.(*big.Float)) + return NumberVal(ret) +} + +// Modulo returns the remainder of an integer division of the receiver and +// the given other value. Both values must be numbers; this method will panic +// if not. +// +// If the "other" value is exactly zero, this operation will return either +// PositiveInfinity or NegativeInfinity, depending on the sign of the +// receiver value. For some use-cases the presence of infinities may be +// undesirable, in which case the caller should check whether the +// other value equals zero before calling and raise an error instead. +// +// This operation is primarily here for use with nonzero natural numbers. +// Modulo with "other" as a non-natural number gets somewhat philosophical, +// and this function takes a position on what that should mean, but callers +// may wish to disallow such things outright or implement their own modulo +// if they disagree with the interpretation used here. +func (val Value) Modulo(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Modulo(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + // We cheat a bit here with infinities, just abusing the Multiply operation + // to get an infinite result of the correct sign. + if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity { + return val.Multiply(other) + } + + if other.RawEquals(Zero) { + return val + } + + // FIXME: This is a bit clumsy. Should come back later and see if there's a + // more straightforward way to do this. + rat := val.Divide(other) + ratFloorInt := &big.Int{} + rat.v.(*big.Float).Int(ratFloorInt) + work := (&big.Float{}).SetInt(ratFloorInt) + work.Mul(other.v.(*big.Float), work) + work.Sub(val.v.(*big.Float), work) + + return NumberVal(work) +} + +// Absolute returns the absolute (signless) value of the receiver, which must +// be a number or this method will panic. +func (val Value) Absolute() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Absolute().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Number) + return *shortCircuit + } + + ret := (&big.Float{}).Abs(val.v.(*big.Float)) + return NumberVal(ret) +} + +// GetAttr returns the value of the given attribute of the receiver, which +// must be of an object type that has an attribute of the given name. +// This method will panic if the receiver type is not compatible. +// +// The method will also panic if the given attribute name is not defined +// for the value's type. Use the attribute-related methods on Type to +// check for the validity of an attribute before trying to use it. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be DynamicVal. +func (val Value) GetAttr(name string) Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.GetAttr(name).WithMarks(valMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + if !val.ty.IsObjectType() { + panic("value is not an object") + } + + name = NormalizeString(name) + if !val.ty.HasAttribute(name) { + panic("value has no attribute of that name") + } + + attrType := val.ty.AttributeType(name) + + if !val.IsKnown() { + return UnknownVal(attrType) + } + + return Value{ + ty: attrType, + v: val.v.(map[string]interface{})[name], + } +} + +// Index returns the value of an element of the receiver, which must have +// either a list, map or tuple type. This method will panic if the receiver +// type is not compatible. +// +// The key value must be the correct type for the receving collection: a +// number if the collection is a list or tuple, or a string if it is a map. +// In the case of a list or tuple, the given number must be convertable to int +// or this method will panic. The key may alternatively be of +// DynamicPseudoType, in which case the result itself is an unknown of the +// collection's element type. +// +// The result is of the receiver collection's element type, or in the case +// of a tuple the type of the specific element index requested. +// +// This method may be called on a value whose type is DynamicPseudoType, +// in which case the result will also be the DynamicValue. +func (val Value) Index(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.Index(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return DynamicVal + } + + switch { + case val.Type().IsListType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != Number { + panic("element key for list must be number") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + return Value{ + ty: elty, + v: val.v.([]interface{})[index], + } + case val.Type().IsMapType(): + elty := val.Type().ElementType() + if key.Type() == DynamicPseudoType { + return UnknownVal(elty) + } + + if key.Type() != String { + panic("element key for map must be string") + } + if !key.IsKnown() { + return UnknownVal(elty) + } + + if !val.IsKnown() { + return UnknownVal(elty) + } + + keyStr := key.v.(string) + + return Value{ + ty: elty, + v: val.v.(map[string]interface{})[keyStr], + } + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return DynamicVal + } + + if key.Type() != Number { + panic("element key for tuple must be number") + } + if !key.IsKnown() { + return DynamicVal + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + panic("element key for list must be non-negative integer") + } + + eltys := val.Type().TupleElementTypes() + + if !val.IsKnown() { + return UnknownVal(eltys[index]) + } + + return Value{ + ty: eltys[index], + v: val.v.([]interface{})[index], + } + default: + panic("not a list, map, or tuple type") + } +} + +// HasIndex returns True if the receiver (which must be supported for Index) +// has an element with the given index key, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the collection or the +// key value are unknown. +// +// This method will panic if the receiver is not indexable, but does not +// impose any panic-causing type constraints on the key. +func (val Value) HasIndex(key Value) Value { + if val.IsMarked() || key.IsMarked() { + val, valMarks := val.Unmark() + key, keyMarks := key.Unmark() + return val.HasIndex(key).WithMarks(valMarks, keyMarks) + } + + if val.ty == DynamicPseudoType { + return UnknownVal(Bool) + } + + switch { + case val.Type().IsListType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0) + case val.Type().IsMapType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != String { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + if !val.IsKnown() { + return UnknownVal(Bool) + } + + keyStr := key.v.(string) + _, exists := val.v.(map[string]interface{})[keyStr] + + return BoolVal(exists) + case val.Type().IsTupleType(): + if key.Type() == DynamicPseudoType { + return UnknownVal(Bool) + } + + if key.Type() != Number { + return False + } + if !key.IsKnown() { + return UnknownVal(Bool) + } + + index, accuracy := key.v.(*big.Float).Int64() + if accuracy != big.Exact || index < 0 { + return False + } + + length := val.Type().Length() + return BoolVal(int(index) < length && index >= 0) + default: + panic("not a list, map, or tuple type") + } +} + +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + if val.IsMarked() || elem.IsMarked() { + val, valMarks := val.Unmark() + elem, elemMarks := elem.Unmark() + return val.HasElement(elem).WithMarks(valMarks, elemMarks) + } + + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + if !ty.ElementType().Equals(elem.Type()) { + return False + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + +// Length returns the length of the receiver, which must be a collection type +// or tuple type, as a number value. If the receiver is not a compatible type +// then this method will panic. +// +// If the receiver is unknown then the result is also unknown. +// +// If the receiver is null then this function will panic. +// +// Note that Length is not supported for strings. To determine the length +// of a string, call AsString and take the length of the native Go string +// that is returned. +func (val Value) Length() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Length().WithMarks(valMarks) + } + + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return NumberIntVal(int64(val.Type().Length())) + } + + if !val.IsKnown() { + return UnknownVal(Number) + } + + return NumberIntVal(int64(val.LengthInt())) +} + +// LengthInt is like Length except it returns an int. It has the same behavior +// as Length except that it will panic if the receiver is unknown. +// +// This is an integration method provided for the convenience of code bridging +// into Go's type system. +func (val Value) LengthInt() int { + val.assertUnmarked() + if val.Type().IsTupleType() { + // For tuples, we can return the length even if the value is not known. + return val.Type().Length() + } + if val.Type().IsObjectType() { + // For objects, the length is the number of attributes associated with the type. + return len(val.Type().AttributeTypes()) + } + if !val.IsKnown() { + panic("value is not known") + } + if val.IsNull() { + panic("value is null") + } + + switch { + + case val.ty.IsListType(): + return len(val.v.([]interface{})) + + case val.ty.IsSetType(): + return val.v.(set.Set).Length() + + case val.ty.IsMapType(): + return len(val.v.(map[string]interface{})) + + default: + panic("value is not a collection") + } +} + +// ElementIterator returns an ElementIterator for iterating the elements +// of the receiver, which must be a collection type, a tuple type, or an object +// type. If called on a method of any other type, this method will panic. +// +// The value must be Known and non-Null, or this method will panic. +// +// If the receiver is of a list type, the returned keys will be of type Number +// and the values will be of the list's element type. +// +// If the receiver is of a map type, the returned keys will be of type String +// and the value will be of the map's element type. Elements are passed in +// ascending lexicographical order by key. +// +// If the receiver is of a set type, each element is returned as both the +// key and the value, since set members are their own identity. +// +// If the receiver is of a tuple type, the returned keys will be of type Number +// and the value will be of the corresponding element's type. +// +// If the receiver is of an object type, the returned keys will be of type +// String and the value will be of the corresponding attributes's type. +// +// ElementIterator is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ElementIterator() ElementIterator { + val.assertUnmarked() + if !val.IsKnown() { + panic("can't use ElementIterator on unknown value") + } + if val.IsNull() { + panic("can't use ElementIterator on null value") + } + return elementIterator(val) +} + +// CanIterateElements returns true if the receiver can support the +// ElementIterator method (and by extension, ForEachElement) without panic. +func (val Value) CanIterateElements() bool { + return canElementIterator(val) +} + +// ForEachElement executes a given callback function for each element of +// the receiver, which must be a collection type or tuple type, or this method +// will panic. +// +// ForEachElement uses ElementIterator internally, and so the values passed +// to the callback are as described for ElementIterator. +// +// Returns true if the iteration exited early due to the callback function +// returning true, or false if the loop ran to completion. +// +// ForEachElement is an integration method, so it cannot handle Unknown +// values. This method will panic if the receiver is Unknown. +func (val Value) ForEachElement(cb ElementCallback) bool { + val.assertUnmarked() + it := val.ElementIterator() + for it.Next() { + key, val := it.Element() + stop := cb(key, val) + if stop { + return true + } + } + return false +} + +// Not returns the logical inverse of the receiver, which must be of type +// Bool or this method will panic. +func (val Value) Not() Value { + if val.IsMarked() { + val, valMarks := val.Unmark() + return val.Not().WithMarks(valMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(!val.v.(bool)) +} + +// And returns the result of logical AND with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) And(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.And(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) && other.v.(bool)) +} + +// Or returns the result of logical OR with the receiver and the other given +// value, which must both be of type Bool or this method will panic. +func (val Value) Or(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.Or(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(bool) || other.v.(bool)) +} + +// LessThan returns True if the receiver is less than the other given value, +// which must both be numbers or this method will panic. +func (val Value) LessThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.LessThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0) +} + +// GreaterThan returns True if the receiver is greater than the other given +// value, which must both be numbers or this method will panic. +func (val Value) GreaterThan(other Value) Value { + if val.IsMarked() || other.IsMarked() { + val, valMarks := val.Unmark() + other, otherMarks := other.Unmark() + return val.GreaterThan(other).WithMarks(valMarks, otherMarks) + } + + if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil { + shortCircuit = forceShortCircuitType(shortCircuit, Bool) + return *shortCircuit + } + + return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0) +} + +// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or. +func (val Value) LessThanOrEqualTo(other Value) Value { + return val.LessThan(other).Or(val.Equals(other)) +} + +// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or. +func (val Value) GreaterThanOrEqualTo(other Value) Value { + return val.GreaterThan(other).Or(val.Equals(other)) +} + +// AsString returns the native string from a non-null, non-unknown cty.String +// value, or panics if called on any other value. +func (val Value) AsString() string { + val.assertUnmarked() + if val.ty != String { + panic("not a string") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + return val.v.(string) +} + +// AsBigFloat returns a big.Float representation of a non-null, non-unknown +// cty.Number value, or panics if called on any other value. +// +// For more convenient conversions to other native numeric types, use the +// "gocty" package. +func (val Value) AsBigFloat() *big.Float { + val.assertUnmarked() + if val.ty != Number { + panic("not a number") + } + if val.IsNull() { + panic("value is null") + } + if !val.IsKnown() { + panic("value is unknown") + } + + // Copy the float so that callers can't mutate our internal state + ret := *(val.v.(*big.Float)) + + return &ret +} + +// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown +// value of any type that CanIterateElements, or panics if called on +// any other value. +// +// For more convenient conversions to slices of more specific types, use +// the "gocty" package. +func (val Value) AsValueSlice() []Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret = append(ret, v) + } + return ret +} + +// AsValueMap returns a map[string]cty.Value representation of a non-null, +// non-unknown value of any type that CanIterateElements, or panics if called +// on any other value. +// +// For more convenient conversions to maps of more specific types, use +// the "gocty" package. +func (val Value) AsValueMap() map[string]Value { + val.assertUnmarked() + l := val.LengthInt() + if l == 0 { + return nil + } + + ret := make(map[string]Value, l) + for it := val.ElementIterator(); it.Next(); { + k, v := it.Element() + ret[k.AsString()] = v + } + return ret +} + +// AsValueSet returns a ValueSet representation of a non-null, +// non-unknown value of any collection type, or panics if called +// on any other value. +// +// Unlike AsValueSlice and AsValueMap, this method requires specifically a +// collection type (list, set or map) and does not allow structural types +// (tuple or object), because the ValueSet type requires homogenous +// element types. +// +// The returned ValueSet can store only values of the receiver's element type. +func (val Value) AsValueSet() ValueSet { + val.assertUnmarked() + if !val.Type().IsCollectionType() { + panic("not a collection type") + } + + // We don't give the caller our own set.Set (assuming we're a cty.Set value) + // because then the caller could mutate our internals, which is forbidden. + // Instead, we will construct a new set and append our elements into it. + ret := NewValueSet(val.Type().ElementType()) + for it := val.ElementIterator(); it.Next(); { + _, v := it.Element() + ret.Add(v) + } + return ret +} + +// EncapsulatedValue returns the native value encapsulated in a non-null, +// non-unknown capsule-typed value, or panics if called on any other value. +// +// The result is the same pointer that was passed to CapsuleVal to create +// the value. Since cty considers values to be immutable, it is strongly +// recommended to treat the encapsulated value itself as immutable too. +func (val Value) EncapsulatedValue() interface{} { + val.assertUnmarked() + if !val.Type().IsCapsuleType() { + panic("not a capsule-typed value") + } + + return val.v +} diff --git a/vendor/github.com/hashicorp/go-cty/cty/walk.go b/vendor/github.com/hashicorp/go-cty/cty/walk.go new file mode 100644 index 000000000..a6943babe --- /dev/null +++ b/vendor/github.com/hashicorp/go-cty/cty/walk.go @@ -0,0 +1,182 @@ +package cty + +// Walk visits all of the values in a possibly-complex structure, calling +// a given function for each value. +// +// For example, given a list of strings the callback would first be called +// with the whole list and then called once for each element of the list. +// +// The callback function may prevent recursive visits to child values by +// returning false. The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Walk(val Value, cb func(Path, Value) (bool, error)) error { + var path Path + return walk(path, val, cb) +} + +func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error { + deeper, err := cb(path, val) + if err != nil { + return err + } + if !deeper { + return nil + } + + if val.IsNull() || !val.IsKnown() { + // Can't recurse into null or unknown values, regardless of type + return nil + } + + ty := val.Type() + switch { + case ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + nameVal, av := it.Element() + path := append(path, GetAttrStep{ + Name: nameVal.AsString(), + }) + err := walk(path, av, cb) + if err != nil { + return err + } + } + case val.CanIterateElements(): + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + err := walk(path, ev, cb) + if err != nil { + return err + } + } + } + return nil +} + +// Transform visits all of the values in a possibly-complex structure, +// calling a given function for each value which has an opportunity to +// replace that value. +// +// Unlike Walk, Transform visits child nodes first, so for a list of strings +// it would first visit the strings and then the _new_ list constructed +// from the transformed values of the list items. +// +// This is useful for creating the effect of being able to make deep mutations +// to a value even though values are immutable. However, it's the responsibility +// of the given function to preserve expected invariants, such as homogenity of +// element types in collections; this function can panic if such invariants +// are violated, just as if new values were constructed directly using the +// value constructor functions. An easy way to preserve invariants is to +// ensure that the transform function never changes the value type. +// +// The callback function my halt the walk altogether by +// returning a non-nil error. If the returned error is about the element +// currently being visited, it is recommended to use the provided path +// value to produce a PathError describing that context. +// +// The path passed to the given function may not be used after that function +// returns, since its backing array is re-used for other calls. +func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) { + var path Path + return transform(path, val, cb) +} + +func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) { + ty := val.Type() + var newVal Value + + switch { + + case val.IsNull() || !val.IsKnown(): + // Can't recurse into null or unknown values, regardless of type + newVal = val + + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty sequence + newVal = val + default: + elems := make([]Value, 0, l) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems = append(elems, newEv) + } + switch { + case ty.IsListType(): + newVal = ListVal(elems) + case ty.IsSetType(): + newVal = SetVal(elems) + case ty.IsTupleType(): + newVal = TupleVal(elems) + default: + panic("unknown sequence type") // should never happen because of the case we are in + } + } + + case ty.IsMapType(): + l := val.LengthInt() + switch l { + case 0: + // No deep transform for an empty map + newVal = val + default: + elems := make(map[string]Value) + for it := val.ElementIterator(); it.Next(); { + kv, ev := it.Element() + path := append(path, IndexStep{ + Key: kv, + }) + newEv, err := transform(path, ev, cb) + if err != nil { + return DynamicVal, err + } + elems[kv.AsString()] = newEv + } + newVal = MapVal(elems) + } + + case ty.IsObjectType(): + switch { + case ty.Equals(EmptyObject): + // No deep transform for an empty object + newVal = val + default: + atys := ty.AttributeTypes() + newAVs := make(map[string]Value) + for name := range atys { + av := val.GetAttr(name) + path := append(path, GetAttrStep{ + Name: name, + }) + newAV, err := transform(path, av, cb) + if err != nil { + return DynamicVal, err + } + newAVs[name] = newAV + } + newVal = ObjectVal(newAVs) + } + + default: + newVal = val + } + + return cb(path, newVal) +} diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml deleted file mode 100644 index 4fe9176aa..000000000 --- a/vendor/github.com/hashicorp/go-getter/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -sudo: false - -addons: - apt: - sources: - - sourceline: 'ppa:git-core/ppa' - packages: - - git - -language: go - -os: - - linux - - osx - -go: - - "1.11.x" - -before_script: - - go build ./cmd/go-getter - -branches: - only: - - master diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 3de23c709..bbcd15de9 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -1,10 +1,10 @@ # go-getter -[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis] +[![CircleCI](https://circleci.com/gh/hashicorp/go-getter/tree/master.svg?style=svg)][circleci] [![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor] [![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] -[travis]: http://travis-ci.org/hashicorp/go-getter +[circleci]: https://circleci.com/gh/hashicorp/go-getter/tree/master [godocs]: http://godoc.org/github.com/hashicorp/go-getter [appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master @@ -356,3 +356,7 @@ In order to access to GCS, authentication credentials should be provided. More i - gcs::https://www.googleapis.com/storage/v1/bucket - gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip - www.googleapis.com/storage/v1/bucket/foo + +#### GCS Testing + +The tests for `get_gcs.go` require you to have GCP credentials set in your environment. These credentials can have any level of permissions to any project, they just need to exist. This means setting `GOOGLE_APPLICATION_CREDENTIALS="~/path/to/credentials.json"` or `GOOGLE_CREDENTIALS="{stringified-credentials-json}"`. Due to this configuration, `get_gcs_test.go` will fail for external contributors in CircleCI. diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index bb1ec316d..1b9f4be81 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -1,6 +1,7 @@ package getter import ( + "bytes" "context" "encoding/base64" "fmt" @@ -9,6 +10,7 @@ import ( "os" "os/exec" "path/filepath" + "regexp" "runtime" "strconv" "strings" @@ -24,6 +26,8 @@ type GitGetter struct { getter } +var defaultBranchRegexp = regexp.MustCompile(`\s->\sorigin/(.*)`) + func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { return ClientModeDir, nil } @@ -182,10 +186,10 @@ func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, dep cmd.Dir = dst if getRunCommand(cmd) != nil { - // Not a branch, switch to master. This will also catch non-existent - // branches, in which case we want to switch to master and then - // checkout the proper branch later. - ref = "master" + // Not a branch, switch to default branch. This will also catch + // non-existent branches, in which case we want to switch to default + // and then checkout the proper branch later. + ref = findDefaultBranch(dst) } // We have to be on a branch to pull @@ -216,6 +220,22 @@ func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, return getRunCommand(cmd) } +// findDefaultBranch checks the repo's origin remote for its default branch +// (generally "master"). "master" is returned if an origin default branch +// can't be determined. +func findDefaultBranch(dst string) string { + var stdoutbuf bytes.Buffer + cmd := exec.Command("git", "branch", "-r", "--points-at", "refs/remotes/origin/HEAD") + cmd.Dir = dst + cmd.Stdout = &stdoutbuf + err := cmd.Run() + matches := defaultBranchRegexp.FindStringSubmatch(stdoutbuf.String()) + if err != nil || matches == nil { + return "master" + } + return matches[len(matches)-1] +} + // setupGitEnv sets up the environment for the given command. This is used to // pass configuration data to git and ssh and enables advanced cloning methods. func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 7c4541c6e..9ffdba78a 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -9,7 +9,6 @@ import ( "net/url" "os" "path/filepath" - "strconv" "strings" safetemp "github.com/hashicorp/go-safetemp" @@ -88,7 +87,10 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { return err } - req.Header = g.Header + if g.Header != nil { + req.Header = g.Header + } + resp, err := g.Client.Do(req) if err != nil { return err @@ -128,6 +130,12 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { return g.getSubdir(ctx, dst, source, subDir) } +// GetFile fetches the file from src and stores it at dst. +// If the server supports Accept-Range, HttpGetter will attempt a range +// request. This means it is the caller's responsibility to ensure that an +// older version of the destination file does not exist, else it will be either +// falsely identified as being replaced, or corrupted with extra bytes +// appended. func (g *HttpGetter) GetFile(dst string, src *url.URL) error { ctx := g.Context() if g.Netrc { @@ -136,7 +144,6 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { return err } } - // Create all the parent directories if needed if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { return err @@ -165,18 +172,17 @@ func (g *HttpGetter) GetFile(dst string, src *url.URL) error { req.Header = g.Header } headResp, err := g.Client.Do(req) - if err == nil && headResp != nil { + if err == nil { headResp.Body.Close() if headResp.StatusCode == 200 { // If the HEAD request succeeded, then attempt to set the range // query if we can. - if headResp.Header.Get("Accept-Ranges") == "bytes" { + if headResp.Header.Get("Accept-Ranges") == "bytes" && headResp.ContentLength >= 0 { if fi, err := f.Stat(); err == nil { - if _, err = f.Seek(0, os.SEEK_END); err == nil { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + if _, err = f.Seek(0, io.SeekEnd); err == nil { currentFileSize = fi.Size() - totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) - if currentFileSize >= totalFileSize { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", currentFileSize)) + if currentFileSize >= headResp.ContentLength { // file already present return nil } diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index fe305ad59..46ee09fc0 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -141,11 +141,6 @@ This plugin system will give host processes a system for constraining versions. This is in addition to the protocol versioning already present which is more for larger underlying changes. -**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter) -to support automatic download + install of plugins. Paired with cryptographically -secure plugins (above), we can make this a safe operation for an amazing -user experience. - ## What About Shared Libraries? When we started using plugins (late 2012, early 2013), plugins over RPC diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index bc56559c6..780a3121d 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -159,11 +159,8 @@ type ClientConfig struct { // SyncStdout, SyncStderr can be set to override the // respective os.Std* values in the plugin. Care should be taken to - // avoid races here. If these are nil, then this will automatically be - // hooked up to os.Stdin, Stdout, and Stderr, respectively. - // - // If the default values (nil) are used, then this package will not - // sync any of these streams. + // avoid races here. If these are nil, then this will be set to + // ioutil.Discard. SyncStdout io.Writer SyncStderr io.Writer @@ -215,6 +212,12 @@ type ReattachConfig struct { Protocol Protocol Addr net.Addr Pid int + + // Test is set to true if this is reattaching to to a plugin in "test mode" + // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the + // process and instead will rely on the plugin to terminate itself. This + // should not be used in non-test environments. + Test bool } // SecureConfig is used to configure a client to verify the integrity of an @@ -690,14 +693,14 @@ func (c *Client) Start() (addr net.Addr, err error) { // Check the core protocol. Wrapped in a {} for scoping. { - var coreProtocol int64 - coreProtocol, err = strconv.ParseInt(parts[0], 10, 0) + var coreProtocol int + coreProtocol, err = strconv.Atoi(parts[0]) if err != nil { err = fmt.Errorf("Error parsing core protocol version: %s", err) return } - if int(coreProtocol) != CoreProtocolVersion { + if coreProtocol != CoreProtocolVersion { err = fmt.Errorf("Incompatible core API version with plugin. "+ "Plugin version: %s, Core version: %d\n\n"+ "To fix this, the plugin usually only needs to be recompiled.\n"+ @@ -788,7 +791,10 @@ func (c *Client) reattach() (net.Addr, error) { // Verify the process still exists. If not, then it is an error p, err := os.FindProcess(c.config.Reattach.Pid) if err != nil { - return nil, err + // On Unix systems, FindProcess never returns an error. + // On Windows, for non-existent pids it returns: + // os.SyscallError - 'OpenProcess: the paremter is incorrect' + return nil, ErrProcessNotFound } // Attempt to connect to the addr since on Unix systems FindProcess @@ -825,15 +831,21 @@ func (c *Client) reattach() (net.Addr, error) { c.exited = true }(p.Pid) - // Set the address and process + // Set the address and protocol c.address = c.config.Reattach.Addr - c.process = p c.protocol = c.config.Reattach.Protocol if c.protocol == "" { // Default the protocol to net/rpc for backwards compatibility c.protocol = ProtocolNetRPC } + // If we're in test mode, we do NOT set the process. This avoids the + // process being killed (the only purpose we have for c.process), since + // in test mode the process is responsible for exiting on its own. + if !c.config.Reattach.Test { + c.process = p + } + return c.address, nil } diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod index f3ddf44e4..f0115b782 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.mod +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -1,17 +1,15 @@ module github.com/hashicorp/go-plugin +go 1.13 + require ( - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/protobuf v1.2.0 + github.com/golang/protobuf v1.3.4 github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/jhump/protoreflect v1.6.0 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 github.com/oklog/run v1.0.0 github.com/stretchr/testify v1.3.0 // indirect - golang.org/x/net v0.0.0-20180826012351-8a410e7b638d - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect - golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect - golang.org/x/text v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect - google.golang.org/grpc v1.14.0 + golang.org/x/net v0.0.0-20190311183353-d8887717615a + google.golang.org/grpc v1.27.1 ) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum index 21b14e998..5d497615f 100644 --- a/vendor/github.com/hashicorp/go-plugin/go.sum +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -1,31 +1,74 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= +github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= -golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 959df46d0..978121913 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -61,6 +61,13 @@ func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { go broker.Run() go brokerGRPCClient.StartStream() + // Start the stdio client + stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn) + if err != nil { + return nil, err + } + go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr) + cl := &GRPCClient{ Conn: conn, Plugins: c.config.Plugins, diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go index d3dbf1ced..387628bf4 100644 --- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -14,6 +14,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/reflection" ) // GRPCServiceName is the name of the service that the health check should @@ -51,9 +52,10 @@ type GRPCServer struct { Stdout io.Reader Stderr io.Reader - config GRPCServerConfig - server *grpc.Server - broker *GRPCBroker + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + stdioServer *grpcStdioServer logger hclog.Logger } @@ -73,6 +75,9 @@ func (s *GRPCServer) Init() error { GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + // Register the reflection service + reflection.Register(s.server) + // Register the broker service brokerServer := newGRPCBrokerServer() plugin.RegisterGRPCBrokerServer(s.server, brokerServer) @@ -80,11 +85,13 @@ func (s *GRPCServer) Init() error { go s.broker.Run() // Register the controller - controllerServer := &grpcControllerServer{ - server: s, - } + controllerServer := &grpcControllerServer{server: s} plugin.RegisterGRPCControllerServer(s.server, controllerServer) + // Register the stdio service + s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr) + plugin.RegisterGRPCStdioServer(s.server, s.stdioServer) + // Register all our plugins onto the gRPC server. for k, raw := range s.Plugins { p, ok := raw.(GRPCPlugin) diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go new file mode 100644 index 000000000..6231a9fd6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go @@ -0,0 +1,207 @@ +package plugin + +import ( + "bufio" + "bytes" + "context" + "io" + + empty "github.com/golang/protobuf/ptypes/empty" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of +// stdio data. This is currently 1 KB for no reason other than that seems like +// enough (stdio data isn't that common) and is fairly low. +const grpcStdioBuffer = 1 * 1024 + +// grpcStdioServer implements the Stdio service and streams stdiout/stderr. +type grpcStdioServer struct { + stdoutCh <-chan []byte + stderrCh <-chan []byte +} + +// newGRPCStdioServer creates a new grpcStdioServer and starts the stream +// copying for the given out and err readers. +// +// This must only be called ONCE per srcOut, srcErr. +func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer { + stdoutCh := make(chan []byte) + stderrCh := make(chan []byte) + + // Begin copying the streams + go copyChan(log, stdoutCh, srcOut) + go copyChan(log, stderrCh, srcErr) + + // Construct our server + return &grpcStdioServer{ + stdoutCh: stdoutCh, + stderrCh: stderrCh, + } +} + +// StreamStdio streams our stdout/err as the response. +func (s *grpcStdioServer) StreamStdio( + _ *empty.Empty, + srv plugin.GRPCStdio_StreamStdioServer, +) error { + // Share the same data value between runs. Sending this over the wire + // marshals it so we can reuse this. + var data plugin.StdioData + + for { + // Read our data + select { + case data.Data = <-s.stdoutCh: + data.Channel = plugin.StdioData_STDOUT + + case data.Data = <-s.stderrCh: + data.Channel = plugin.StdioData_STDERR + + case <-srv.Context().Done(): + return nil + } + + // Not sure if this is possible, but if we somehow got here and + // we didn't populate any data at all, then just continue. + if len(data.Data) == 0 { + continue + } + + // Send our data to the client. + if err := srv.Send(&data); err != nil { + return err + } + } +} + +// grpcStdioClient wraps the stdio service as a client to copy +// the stdio data to output writers. +type grpcStdioClient struct { + log hclog.Logger + stdioClient plugin.GRPCStdio_StreamStdioClient +} + +// newGRPCStdioClient creates a grpcStdioClient. This will perform the +// initial connection to the stdio service. If the stdio service is unavailable +// then this will be a no-op. This allows this to work without error for +// plugins that don't support this. +func newGRPCStdioClient( + ctx context.Context, + log hclog.Logger, + conn *grpc.ClientConn, +) (*grpcStdioClient, error) { + client := plugin.NewGRPCStdioClient(conn) + + // Connect immediately to the endpoint + stdioClient, err := client.StreamStdio(ctx, &empty.Empty{}) + + // If we get an Unavailable or Unimplemented error, this means that the plugin isn't + // updated and linking to the latest version of go-plugin that supports + // this. We fall back to the previous behavior of just not syncing anything. + if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented { + log.Warn("stdio service not available, stdout/stderr syncing unavailable") + stdioClient = nil + err = nil + } + if err != nil { + return nil, err + } + + return &grpcStdioClient{ + log: log, + stdioClient: stdioClient, + }, nil +} + +// Run starts the loop that receives stdio data and writes it to the given +// writers. This blocks and should be run in a goroutine. +func (c *grpcStdioClient) Run(stdout, stderr io.Writer) { + // This will be nil if stdio is not supported by the plugin + if c.stdioClient == nil { + c.log.Warn("stdio service unavailable, run will do nothing") + return + } + + for { + c.log.Trace("waiting for stdio data") + data, err := c.stdioClient.Recv() + if err != nil { + if err == io.EOF || + status.Code(err) == codes.Unavailable || + status.Code(err) == codes.Canceled || + status.Code(err) == codes.Unimplemented || + err == context.Canceled { + c.log.Warn("received EOF, stopping recv loop", "err", err) + return + } + + c.log.Error("error receiving data", "err", err) + continue + } + + // Determine our output writer based on channel + var w io.Writer + switch data.Channel { + case plugin.StdioData_STDOUT: + w = stdout + + case plugin.StdioData_STDERR: + w = stderr + + default: + c.log.Warn("unknown channel, dropping", "channel", data.Channel) + continue + } + + // Write! In the event of an error we just continue. + if c.log.IsTrace() { + c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data)) + } + if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil { + c.log.Error("failed to copy all bytes", "err", err) + } + } +} + +// copyChan copies an io.Reader into a channel. +func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) { + bufsrc := bufio.NewReader(src) + + for { + // Make our data buffer. We allocate a new one per loop iteration + // so that we can send it over the channel. + var data [1024]byte + + // Read the data, this will block until data is available + n, err := bufsrc.Read(data[:]) + + // We have to check if we have data BEFORE err != nil. The bufio + // docs guarantee n == 0 on EOF but its better to be safe here. + if n > 0 { + // We have data! Send it on the channel. This will block if there + // is no reader on the other side. We expect that go-plugin will + // connect immediately to the stdio server to drain this so we want + // this block to happen for backpressure. + dst <- data[:n] + } + + // If we hit EOF we're done copying + if err == io.EOF { + log.Debug("stdio EOF, exiting copy loop") + return + } + + // Any other error we just exit the loop. We don't expect there to + // be errors since our use case for this is reading/writing from + // a in-process pipe (os.Pipe). + if err != nil { + log.Warn("error copying stdio data, stopping copy", "err", err) + return + } + } +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go index aa2fdc813..fb9d41525 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -1,3 +1,3 @@ -//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go index 1c2834d97..6bf103859 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -3,11 +3,11 @@ package plugin -import ( - fmt "fmt" - math "math" +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" - proto "github.com/golang/protobuf/proto" +import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) @@ -36,17 +36,16 @@ func (m *ConnInfo) Reset() { *m = ConnInfo{} } func (m *ConnInfo) String() string { return proto.CompactTextString(m) } func (*ConnInfo) ProtoMessage() {} func (*ConnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_802e9beed3ec3b28, []int{0} + return fileDescriptor_grpc_broker_3322b07398605250, []int{0} } - func (m *ConnInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ConnInfo.Unmarshal(m, b) } func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) } -func (m *ConnInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnInfo.Merge(m, src) +func (dst *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(dst, src) } func (m *ConnInfo) XXX_Size() int { return xxx_messageInfo_ConnInfo.Size(m) @@ -82,23 +81,6 @@ func init() { proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") } -func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } - -var fileDescriptor_802e9beed3ec3b28 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, - 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, - 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, - 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, - 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, - 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, - 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, - 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, - 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -202,3 +184,20 @@ var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ }, Metadata: "grpc_broker.proto", } + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } + +var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto index 3fa79e8ac..aa3df4630 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -11,5 +11,3 @@ message ConnInfo { service GRPCBroker { rpc StartStream(stream ConnInfo) returns (stream ConnInfo); } - - diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go index 6366140e6..3e39da95a 100644 --- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -3,11 +3,11 @@ package plugin -import ( - fmt "fmt" - math "math" +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" - proto "github.com/golang/protobuf/proto" +import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) @@ -33,17 +33,16 @@ func (m *Empty) Reset() { *m = Empty{} } func (m *Empty) String() string { return proto.CompactTextString(m) } func (*Empty) ProtoMessage() {} func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_23c2c7e42feab570, []int{0} + return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} } - func (m *Empty) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Empty.Unmarshal(m, b) } func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Empty.Marshal(b, m, deterministic) } -func (m *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(m, src) +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) } func (m *Empty) XXX_Size() int { return xxx_messageInfo_Empty.Size(m) @@ -58,19 +57,6 @@ func init() { proto.RegisterType((*Empty)(nil), "plugin.Empty") } -func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } - -var fileDescriptor_23c2c7e42feab570 = []byte{ - // 108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, - 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, - 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, - 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, -} - // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn @@ -142,3 +128,18 @@ var _GRPCController_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "grpc_controller.proto", } + +func init() { + proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) +} + +var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go new file mode 100644 index 000000000..c8f94921b --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -0,0 +1,233 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_stdio.proto + +package plugin + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import empty "github.com/golang/protobuf/ptypes/empty" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StdioData_Channel int32 + +const ( + StdioData_INVALID StdioData_Channel = 0 + StdioData_STDOUT StdioData_Channel = 1 + StdioData_STDERR StdioData_Channel = 2 +) + +var StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", +} +var StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, +} + +func (x StdioData_Channel) String() string { + return proto.EnumName(StdioData_Channel_name, int32(x)) +} +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StdioData) Reset() { *m = StdioData{} } +func (m *StdioData) String() string { return proto.CompactTextString(m) } +func (*StdioData) ProtoMessage() {} +func (*StdioData) Descriptor() ([]byte, []int) { + return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} +} +func (m *StdioData) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StdioData.Unmarshal(m, b) +} +func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) +} +func (dst *StdioData) XXX_Merge(src proto.Message) { + xxx_messageInfo_StdioData.Merge(dst, src) +} +func (m *StdioData) XXX_Size() int { + return xxx_messageInfo_StdioData.Size(m) +} +func (m *StdioData) XXX_DiscardUnknown() { + xxx_messageInfo_StdioData.DiscardUnknown(m) +} + +var xxx_messageInfo_StdioData proto.InternalMessageInfo + +func (m *StdioData) GetChannel() StdioData_Channel { + if m != nil { + return m.Channel + } + return StdioData_INVALID +} + +func (m *StdioData) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*StdioData)(nil), "plugin.StdioData") + proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc *grpc.ClientConn +} + +func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error +} + +func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { + s.RegisterService(&_GRPCStdio_serviceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(empty.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc_stdio.proto", +} + +func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } + +var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ + // 221 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, + 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, + 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, + 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, + 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, + 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, + 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, + 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, + 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, + 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, + 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, + 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, + 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, + 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto new file mode 100644 index 000000000..ce1a12230 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +import "google/protobuf/empty.proto"; + +// GRPCStdio is a service that is automatically run by the plugin process +// to stream any stdout/err data so that it can be mirrored on the plugin +// host side. +service GRPCStdio { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData); +} + +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +message StdioData { + enum Channel { + INVALID = 0; + STDOUT = 1; + STDERR = 2; + } + + Channel channel = 1; + bytes data = 2; +} diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index 4c230e3ab..002d6080d 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,11 +1,13 @@ package plugin import ( + "context" "crypto/tls" "crypto/x509" "encoding/base64" "errors" "fmt" + "io" "io/ioutil" "log" "net" @@ -15,10 +17,8 @@ import ( "sort" "strconv" "strings" - "sync/atomic" - - "github.com/hashicorp/go-hclog" + hclog "github.com/hashicorp/go-hclog" "google.golang.org/grpc" ) @@ -85,6 +85,51 @@ type ServeConfig struct { // Logger is used to pass a logger into the server. If none is provided the // server will create a default logger. Logger hclog.Logger + + // Test, if non-nil, will put plugin serving into "test mode". This is + // meant to be used as part of `go test` within a plugin's codebase to + // launch the plugin in-process and output a ReattachConfig. + // + // This changes the behavior of the server in a number of ways to + // accomodate the expectation of running in-process: + // + // * The handshake cookie is not validated. + // * Stdout/stderr will receive plugin reads and writes + // * Connection information will not be sent to stdout + // + Test *ServeTestConfig +} + +// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test. +type ServeTestConfig struct { + // Context, if set, will force the plugin serving to end when cancelled. + // This is only a test configuration because the non-test configuration + // expects to take over the process and therefore end on an interrupt or + // kill signal. For tests, we need to kill the plugin serving routinely + // and this provides a way to do so. + // + // If you want to wait for the plugin process to close before moving on, + // you can wait on CloseCh. + Context context.Context + + // If this channel is non-nil, we will send the ReattachConfig via + // this channel. This can be encoded (via JSON recommended) to the + // plugin client to attach to this plugin. + ReattachConfigCh chan<- *ReattachConfig + + // CloseCh, if non-nil, will be closed when serving exits. This can be + // used along with Context to determine when the server is fully shut down. + // If this is not set, you can still use Context on its own, but note there + // may be a period of time between canceling the context and the plugin + // server being shut down. + CloseCh chan<- struct{} + + // SyncStdio, if true, will enable the client side "SyncStdout/Stderr" + // functionality to work. This defaults to false because the implementation + // of making this work within test environments is particularly messy + // and SyncStdio functionality is fairly rare, so we default to the simple + // scenario. + SyncStdio bool } // protocolVersion determines the protocol version and plugin set to be used by @@ -169,26 +214,46 @@ func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { // Serve serves the plugins given by ServeConfig. // // Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to os.Stderr. +// fixable errors will be output to os.Stderr and the process will +// exit with a status code of 1. Serve will panic for unexpected +// conditions where a user's fix is unknown. // // This is the method that plugins should call in their main() functions. func Serve(opts *ServeConfig) { - // Validate the handshake config - if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { - fmt.Fprintf(os.Stderr, - "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ - "key or value was set. Please notify the plugin author and report\n"+ - "this as a bug.\n") - os.Exit(1) - } + exitCode := -1 + // We use this to trigger an `os.Exit` so that we can execute our other + // deferred functions. In test mode, we just output the err to stderr + // and return. + defer func() { + if opts.Test == nil && exitCode >= 0 { + os.Exit(exitCode) + } - // First check the cookie - if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { - fmt.Fprintf(os.Stderr, - "This binary is a plugin. These are not meant to be executed directly.\n"+ - "Please execute the program that consumes these plugins, which will\n"+ - "load any plugins automatically\n") - os.Exit(1) + if opts.Test != nil && opts.Test.CloseCh != nil { + close(opts.Test.CloseCh) + } + }() + + if opts.Test == nil { + // Validate the handshake config + if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" { + fmt.Fprintf(os.Stderr, + "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+ + "key or value was set. Please notify the plugin author and report\n"+ + "this as a bug.\n") + exitCode = 1 + return + } + + // First check the cookie + if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue { + fmt.Fprintf(os.Stderr, + "This binary is a plugin. These are not meant to be executed directly.\n"+ + "Please execute the program that consumes these plugins, which will\n"+ + "load any plugins automatically\n") + exitCode = 1 + return + } } // negotiate the version and plugins @@ -208,19 +273,6 @@ func Serve(opts *ServeConfig) { }) } - // Create our new stdout, stderr files. These will override our built-in - // stdout/stderr so that it works across the stream boundary. - stdout_r, stdout_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - stderr_r, stderr_w, err := os.Pipe() - if err != nil { - fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) - os.Exit(1) - } - // Register a listener so we can accept a connection listener, err := serverListener() if err != nil { @@ -281,6 +333,33 @@ func Serve(opts *ServeConfig) { // Create the channel to tell us when we're done doneCh := make(chan struct{}) + // Create our new stdout, stderr files. These will override our built-in + // stdout/stderr so that it works across the stream boundary. + var stdout_r, stderr_r io.Reader + stdout_r, stdout_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + stderr_r, stderr_w, err := os.Pipe() + if err != nil { + fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err) + os.Exit(1) + } + + // If we're in test mode, we tee off the reader and write the data + // as-is to our normal Stdout and Stderr so that they continue working + // while stdio works. This is because in test mode, we assume we're running + // in `go test` or some equivalent and we want output to go to standard + // locations. + if opts.Test != nil { + // TODO(mitchellh): This isn't super ideal because a TeeReader + // only works if the reader side is actively read. If we never + // connect via a plugin client, the output still gets swallowed. + stdout_r = io.TeeReader(stdout_r, os.Stdout) + stderr_r = io.TeeReader(stderr_r, os.Stderr) + } + // Build the server type var server ServerProtocol switch protoType { @@ -323,35 +402,96 @@ func Serve(opts *ServeConfig) { logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) - // Output the address and service name to stdout so that the client can bring it up. - fmt.Printf("%d|%d|%s|%s|%s|%s\n", - CoreProtocolVersion, - protoVersion, - listener.Addr().Network(), - listener.Addr().String(), - protoType, - serverCert) - os.Stdout.Sync() - - // Eat the interrupts - ch := make(chan os.Signal, 1) - signal.Notify(ch, os.Interrupt) - go func() { - var count int32 = 0 - for { - <-ch - newCount := atomic.AddInt32(&count, 1) - logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) + // Output the address and service name to stdout so that the client can + // bring it up. In test mode, we don't do this because clients will + // attach via a reattach config. + if opts.Test == nil { + fmt.Printf("%d|%d|%s|%s|%s|%s\n", + CoreProtocolVersion, + protoVersion, + listener.Addr().Network(), + listener.Addr().String(), + protoType, + serverCert) + os.Stdout.Sync() + } else if ch := opts.Test.ReattachConfigCh; ch != nil { + // Send back the reattach config that can be used. This isn't + // quite ready if they connect immediately but the client should + // retry a few times. + ch <- &ReattachConfig{ + Protocol: protoType, + Addr: listener.Addr(), + Pid: os.Getpid(), + Test: true, } - }() + } + + // Eat the interrupts. In test mode we disable this so that go test + // can be cancelled properly. + if opts.Test == nil { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + go func() { + count := 0 + for { + <-ch + count++ + logger.Trace("plugin received interrupt signal, ignoring", "count", count) + } + }() + } - // Set our new out, err - os.Stdout = stdout_w - os.Stderr = stderr_w + // Set our stdout, stderr to the stdio stream that clients can retrieve + // using ClientConfig.SyncStdout/err. We only do this for non-test mode + // or if the test mode explicitly requests it. + // + // In test mode, we use a multiwriter so that the data continues going + // to the normal stdout/stderr so output can show up in test logs. We + // also send to the stdio stream so that clients can continue working + // if they depend on that. + if opts.Test == nil || opts.Test.SyncStdio { + if opts.Test != nil { + // In test mode we need to maintain the original values so we can + // reset it. + defer func(out, err *os.File) { + os.Stdout = out + os.Stderr = err + }(os.Stdout, os.Stderr) + } + os.Stdout = stdout_w + os.Stderr = stderr_w + } // Accept connections and wait for completion go server.Serve(listener) - <-doneCh + + ctx := context.Background() + if opts.Test != nil && opts.Test.Context != nil { + ctx = opts.Test.Context + } + select { + case <-ctx.Done(): + // Cancellation. We can stop the server by closing the listener. + // This isn't graceful at all but this is currently only used by + // tests and its our only way to stop. + listener.Close() + + // If this is a grpc server, then we also ask the server itself to + // end which will kill all connections. There isn't an easy way to do + // this for net/rpc currently but net/rpc is more and more unused. + if s, ok := server.(*GRPCServer); ok { + s.Stop() + } + + // Wait for the server itself to shut down + <-doneCh + + case <-doneCh: + // Note that given the documentation of Serve we should probably be + // setting exitCode = 0 and using os.Exit here. That's how it used to + // work before extracting this library. However, for years we've done + // this so we'll keep this functionality. + } } func serverListener() (net.Listener, error) { @@ -390,7 +530,7 @@ func serverListener_tcp() (net.Listener, error) { } if minPort > maxPort { - return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) + return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) } for port := minPort; port <= maxPort; port++ { diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go index 608996ede..911227f61 100644 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -40,7 +40,7 @@ func FormatUUID(buf []byte) (string, error) { } func ParseUUID(uuid string) ([]byte, error) { - if len(uuid) != 2*uuidLen+4 { + if len(uuid) != 2 * uuidLen + 4 { return nil, fmt.Errorf("uuid string is wrong length") } diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go deleted file mode 100644 index 5673773b2..000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ /dev/null @@ -1,161 +0,0 @@ -package simplelru - -import ( - "container/list" - "errors" -) - -// EvictCallback is used to get a callback when a cache entry is evicted -type EvictCallback func(key interface{}, value interface{}) - -// LRU implements a non-thread safe fixed size LRU cache -type LRU struct { - size int - evictList *list.List - items map[interface{}]*list.Element - onEvict EvictCallback -} - -// entry is used to hold a value in the evictList -type entry struct { - key interface{} - value interface{} -} - -// NewLRU constructs an LRU of the given size -func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { - if size <= 0 { - return nil, errors.New("Must provide a positive size") - } - c := &LRU{ - size: size, - evictList: list.New(), - items: make(map[interface{}]*list.Element), - onEvict: onEvict, - } - return c, nil -} - -// Purge is used to completely clear the cache. -func (c *LRU) Purge() { - for k, v := range c.items { - if c.onEvict != nil { - c.onEvict(k, v.Value.(*entry).value) - } - delete(c.items, k) - } - c.evictList.Init() -} - -// Add adds a value to the cache. Returns true if an eviction occurred. -func (c *LRU) Add(key, value interface{}) (evicted bool) { - // Check for existing item - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - ent.Value.(*entry).value = value - return false - } - - // Add new item - ent := &entry{key, value} - entry := c.evictList.PushFront(ent) - c.items[key] = entry - - evict := c.evictList.Len() > c.size - // Verify size not exceeded - if evict { - c.removeOldest() - } - return evict -} - -// Get looks up a key's value from the cache. -func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { - if ent, ok := c.items[key]; ok { - c.evictList.MoveToFront(ent) - return ent.Value.(*entry).value, true - } - return -} - -// Contains checks if a key is in the cache, without updating the recent-ness -// or deleting it for being stale. -func (c *LRU) Contains(key interface{}) (ok bool) { - _, ok = c.items[key] - return ok -} - -// Peek returns the key value (or undefined if not found) without updating -// the "recently used"-ness of the key. -func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { - var ent *list.Element - if ent, ok = c.items[key]; ok { - return ent.Value.(*entry).value, true - } - return nil, ok -} - -// Remove removes the provided key from the cache, returning if the -// key was contained. -func (c *LRU) Remove(key interface{}) (present bool) { - if ent, ok := c.items[key]; ok { - c.removeElement(ent) - return true - } - return false -} - -// RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { - ent := c.evictList.Back() - if ent != nil { - kv := ent.Value.(*entry) - return kv.key, kv.value, true - } - return nil, nil, false -} - -// Keys returns a slice of the keys in the cache, from oldest to newest. -func (c *LRU) Keys() []interface{} { - keys := make([]interface{}, len(c.items)) - i := 0 - for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { - keys[i] = ent.Value.(*entry).key - i++ - } - return keys -} - -// Len returns the number of items in the cache. -func (c *LRU) Len() int { - return c.evictList.Len() -} - -// removeOldest removes the oldest item from the cache. -func (c *LRU) removeOldest() { - ent := c.evictList.Back() - if ent != nil { - c.removeElement(ent) - } -} - -// removeElement is used to remove a given list element from the cache -func (c *LRU) removeElement(e *list.Element) { - c.evictList.Remove(e) - kv := e.Value.(*entry) - delete(c.items, kv.key) - if c.onEvict != nil { - c.onEvict(kv.key, kv.value) - } -} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go deleted file mode 100644 index 74c707744..000000000 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ /dev/null @@ -1,36 +0,0 @@ -package simplelru - -// LRUCache is the interface for simple LRU cache. -type LRUCache interface { - // Adds a value to the cache, returns true if an eviction occurred and - // updates the "recently used"-ness of the key. - Add(key, value interface{}) bool - - // Returns key's value from the cache and - // updates the "recently used"-ness of the key. #value, isFound - Get(key interface{}) (value interface{}, ok bool) - - // Check if a key exsists in cache without updating the recent-ness. - Contains(key interface{}) (ok bool) - - // Returns key's value without updating the "recently used"-ness of the key. - Peek(key interface{}) (value interface{}, ok bool) - - // Removes a key from the cache. - Remove(key interface{}) bool - - // Removes the oldest entry from cache. - RemoveOldest() (interface{}, interface{}, bool) - - // Returns the oldest entry from the cache. #key, value, isFound - GetOldest() (interface{}, interface{}, bool) - - // Returns a slice of the keys in the cache, from oldest to newest. - Keys() []interface{} - - // Returns the number of items in the cache. - Len() int - - // Clear all cache entries - Purge() -} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go index a2ff7df18..9cb3aeef1 100644 --- a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go @@ -7,8 +7,8 @@ import ( "github.com/hashicorp/hcl/v2/hclsyntax" - "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclparse" ctyjson "github.com/zclconf/go-cty/cty/json" ) diff --git a/vendor/github.com/hashicorp/terraform-exec/LICENSE b/vendor/github.com/hashicorp/terraform-exec/LICENSE new file mode 100644 index 000000000..a612ad981 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go new file mode 100644 index 000000000..a14f88c91 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/pubkey.go @@ -0,0 +1,32 @@ +package tfinstall + +const hashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JAU4EEwEKADgWIQSRpuf4XQXG +VjC+8YlRhS2HNI/8TAUCXn0BIQIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK +CRBRhS2HNI/8TJITCACT2Zu2l8Jo/YLQMs+iYsC3gn5qJE/qf60VWpOnP0LG24rj +k3j4ET5P2ow/o9lQNCM/fJrEB2CwhnlvbrLbNBbt2e35QVWvvxwFZwVcoBQXTXdT ++G2cKS2Snc0bhNF7jcPX1zau8gxLurxQBaRdoL38XQ41aKfdOjEico4ZxQYSrOoC +RbF6FODXj+ZL8CzJFa2Sd0rHAROHoF7WhKOvTrg1u8JvHrSgvLYGBHQZUV23cmXH +yvzITl5jFzORf9TUdSv8tnuAnNsOV4vOA6lj61Z3/0Vgor+ZByfiznonPHQtKYtY +kac1M/Dq2xZYiSf0tDFywgUDIF/IyS348wKmnDGjuQENBFMORM0BCADWj1GNOP4O +wJmJDjI2gmeok6fYQeUbI/+Hnv5Z/cAK80Tvft3noy1oedxaDdazvrLu7YlyQOWA +M1curbqJa6ozPAwc7T8XSwWxIuFfo9rStHQE3QUARxIdziQKTtlAbXI2mQU99c6x +vSueQ/gq3ICFRBwCmPAm+JCwZG+cDLJJ/g6wEilNATSFdakbMX4lHUB2X0qradNO +J66pdZWxTCxRLomPBWa5JEPanbosaJk0+n9+P6ImPiWpt8wiu0Qzfzo7loXiDxo/ +0G8fSbjYsIF+skY+zhNbY1MenfIPctB9X5iyW291mWW7rhhZyuqqxN2xnmPPgFmi +QGd+8KVodadHABEBAAGJATwEGAECACYCGwwWIQSRpuf4XQXGVjC+8YlRhS2HNI/8 +TAUCXn0BRAUJEvOKdwAKCRBRhS2HNI/8TEzUB/9pEHVwtTxL8+VRq559Q0tPOIOb +h3b+GroZRQGq/tcQDVbYOO6cyRMR9IohVJk0b9wnnUHoZpoA4H79UUfIB4sZngma +enL/9magP1uAHxPxEa5i/yYqR0MYfz4+PGdvqyj91NrkZm3WIpwzqW/KZp8YnD77 +VzGVodT8xqAoHW+bHiza9Jmm9Rkf5/0i0JY7GXoJgk4QBG/Fcp0OR5NUWxN3PEM0 +dpeiU4GI5wOz5RAIOvSv7u1h0ZxMnJG4B4MKniIAr4yD7WYYZh/VxEPeiS/E1CVx +qHV5VVCoEIoYVHIuFIyFu1lIcei53VD6V690rmn0bp4A5hs+kErhThvkok3c +=+mCN +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go new file mode 100644 index 000000000..0330f03b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/tfinstall.go @@ -0,0 +1,287 @@ +package tfinstall + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/go-checkpoint" + "github.com/hashicorp/go-getter" + "github.com/hashicorp/go-version" + "golang.org/x/crypto/openpgp" +) + +const baseUrl = "https://releases.hashicorp.com/terraform" + +type ExecPathFinder interface { + ExecPath() (string, error) +} + +type ExactPathOption struct { + execPath string +} + +func ExactPath(execPath string) *ExactPathOption { + opt := &ExactPathOption{ + execPath: execPath, + } + return opt +} + +func (opt *ExactPathOption) ExecPath() (string, error) { + if _, err := os.Stat(opt.execPath); err != nil { + // fall through to the next strategy if the local path does not exist + return "", nil + } + return opt.execPath, nil +} + +type LookPathOption struct { +} + +func LookPath() *LookPathOption { + opt := &LookPathOption{} + + return opt +} + +func (opt *LookPathOption) ExecPath() (string, error) { + p, err := exec.LookPath("terraform") + if err != nil { + if notFoundErr, ok := err.(*exec.Error); ok && notFoundErr.Err == exec.ErrNotFound { + log.Printf("[WARN] could not locate a terraform executable on system path; continuing") + return "", nil + } + return "", err + } + return p, nil +} + +type LatestVersionOption struct { + forceCheckpoint bool + installDir string +} + +func LatestVersion(installDir string, forceCheckpoint bool) *LatestVersionOption { + opt := &LatestVersionOption{ + forceCheckpoint: forceCheckpoint, + installDir: installDir, + } + + return opt +} + +func (opt *LatestVersionOption) ExecPath() (string, error) { + v, err := latestVersion(opt.forceCheckpoint) + if err != nil { + return "", err + } + + return downloadWithVerification(v, opt.installDir) +} + +type ExactVersionOption struct { + tfVersion string + installDir string +} + +func ExactVersion(tfVersion string, installDir string) *ExactVersionOption { + opt := &ExactVersionOption{ + tfVersion: tfVersion, + installDir: installDir, + } + + return opt +} + +func (opt *ExactVersionOption) ExecPath() (string, error) { + // validate version + _, err := version.NewVersion(opt.tfVersion) + if err != nil { + return "", err + } + + return downloadWithVerification(opt.tfVersion, opt.installDir) +} + +func Find(opts ...ExecPathFinder) (string, error) { + var terraformPath string + + // go through the options in order + // until a valid terraform executable is found + for _, opt := range opts { + p, err := opt.ExecPath() + if err != nil { + return "", fmt.Errorf("unexpected error: %s", err) + } + + if p == "" { + // strategy did not locate an executable - fall through to next + continue + } else { + terraformPath = p + break + } + } + + err := runTerraformVersion(terraformPath) + if err != nil { + return "", fmt.Errorf("executable found at path %s is not terraform: %s", terraformPath, err) + } + + if terraformPath == "" { + return "", fmt.Errorf("could not find terraform executable") + } + + return terraformPath, nil +} + +func downloadWithVerification(tfVersion string, installDir string) (string, error) { + osName := runtime.GOOS + archName := runtime.GOARCH + + // setup: ensure we have a place to put our downloaded terraform binary + var tfDir string + var err error + if installDir == "" { + tfDir, err = ioutil.TempDir("", "tfexec") + if err != nil { + return "", fmt.Errorf("failed to create temp dir: %s", err) + } + } else { + if _, err := os.Stat(installDir); err != nil { + return "", fmt.Errorf("could not access directory %s for installing Terraform: %s", installDir, err) + } + tfDir = installDir + + } + + // setup: getter client + httpHeader := make(http.Header) + httpHeader.Set("User-Agent", "HashiCorp-tfinstall/"+Version) + httpGetter := &getter.HttpGetter{ + Netrc: true, + } + client := getter.Client{ + Getters: map[string]getter.Getter{ + "https": httpGetter, + }, + } + client.Mode = getter.ClientModeAny + + // firstly, download and verify the signature of the checksum file + + sumsTmpDir, err := ioutil.TempDir("", "tfinstall") + if err != nil { + return "", err + } + defer os.RemoveAll(sumsTmpDir) + + sumsFilename := "terraform_" + tfVersion + "_SHA256SUMS" + sumsSigFilename := sumsFilename + ".sig" + + sumsUrl := fmt.Sprintf("%s/%s/%s", + baseUrl, tfVersion, sumsFilename) + sumsSigUrl := fmt.Sprintf("%s/%s/%s", + baseUrl, tfVersion, sumsSigFilename) + + client.Src = sumsUrl + client.Dst = sumsTmpDir + err = client.Get() + if err != nil { + return "", fmt.Errorf("error fetching checksums: %s", err) + } + + client.Src = sumsSigUrl + err = client.Get() + if err != nil { + return "", fmt.Errorf("error fetching checksums signature: %s", err) + } + + sumsPath := filepath.Join(sumsTmpDir, sumsFilename) + sumsSigPath := filepath.Join(sumsTmpDir, sumsSigFilename) + + err = verifySumsSignature(sumsPath, sumsSigPath) + if err != nil { + return "", err + } + + // secondly, download Terraform itself, verifying the checksum + url := tfUrl(tfVersion, osName, archName) + client.Src = url + client.Dst = tfDir + client.Mode = getter.ClientModeDir + err = client.Get() + if err != nil { + return "", err + } + + return filepath.Join(tfDir, "terraform"), nil +} + +func tfUrl(tfVersion, osName, archName string) string { + sumsFilename := "terraform_" + tfVersion + "_SHA256SUMS" + sumsUrl := fmt.Sprintf("%s/%s/%s", + baseUrl, tfVersion, sumsFilename) + return fmt.Sprintf( + "%s/%s/terraform_%s_%s_%s.zip?checksum=file:%s", + baseUrl, tfVersion, tfVersion, osName, archName, sumsUrl, + ) +} + +func latestVersion(forceCheckpoint bool) (string, error) { + resp, err := checkpoint.Check(&checkpoint.CheckParams{ + Product: "terraform", + Force: forceCheckpoint, + }) + if err != nil { + return "", err + } + + if resp.CurrentVersion == "" { + return "", fmt.Errorf("could not determine latest version of terraform using checkpoint: CHECKPOINT_DISABLE may be set") + } + + return resp.CurrentVersion, nil +} + +// verifySumsSignature downloads SHA256SUMS and SHA256SUMS.sig and verifies +// the signature using the HashiCorp public key. +func verifySumsSignature(sumsPath, sumsSigPath string) error { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashicorpPublicKey)) + if err != nil { + return err + } + data, err := os.Open(sumsPath) + if err != nil { + return err + } + sig, err := os.Open(sumsSigPath) + if err != nil { + return err + } + _, err = openpgp.CheckDetachedSignature(el, data, sig) + + return err +} + +func runTerraformVersion(execPath string) error { + cmd := exec.Command(execPath, "version") + + out, err := cmd.Output() + if err != nil { + return err + } + + if !strings.HasPrefix(string(out), "Terraform v") { + return fmt.Errorf("located executable at %s, but output of `terraform version` was:\n%s", execPath, out) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-exec/tfinstall/version.go b/vendor/github.com/hashicorp/terraform-exec/tfinstall/version.go new file mode 100644 index 000000000..d13844aa0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-exec/tfinstall/version.go @@ -0,0 +1,4 @@ +package tfinstall + +// Version is the tfinstall package version, used in user agent headers +const Version = "0.3.0" diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go index e025fbe04..5dd430a55 100644 --- a/vendor/github.com/hashicorp/terraform-json/schemas.go +++ b/vendor/github.com/hashicorp/terraform-json/schemas.go @@ -83,6 +83,18 @@ type Schema struct { Block *SchemaBlock `json:"block,omitempty"` } +// SchemaDescriptionKind describes the format type for a particular description's field. +type SchemaDescriptionKind string + +const ( + // SchemaDescriptionKindPlain indicates a string in plain text format. + SchemaDescriptionKindPlain SchemaDescriptionKind = "plaintext" + + // SchemaDescriptionKindMarkdown indicates a Markdown string and may need to be + // processed prior to presentation. + SchemaDescriptionKindMarkdown SchemaDescriptionKind = "markdown" +) + // SchemaBlock represents a nested block within a particular schema. type SchemaBlock struct { // The attributes defined at the particular level of this block. @@ -90,6 +102,14 @@ type SchemaBlock struct { // Any nested blocks within this particular block. NestedBlocks map[string]*SchemaBlockType `json:"block_types,omitempty"` + + // The description for this block and format of the description. If + // no kind is provided, it can be assumed to be plain text. + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + + // If true, this block is deprecated. + Deprecated bool `json:"deprecated,omitempty"` } // SchemaNestingMode is the nesting mode for a particular nested @@ -145,8 +165,13 @@ type SchemaAttribute struct { // The attribute type. AttributeType cty.Type `json:"type,omitempty"` - // The description field for this attribute. - Description string `json:"description,omitempty"` + // The description field for this attribute. If no kind is + // provided, it can be assumed to be plain text. + Description string `json:"description,omitempty"` + DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"` + + // If true, this attribute is deprecated. + Deprecated bool `json:"deprecated,omitempty"` // If true, this attribute is required - it has to be entered in // configuration. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go deleted file mode 100644 index b09199953..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/compose.go +++ /dev/null @@ -1,72 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// All returns a CustomizeDiffFunc that runs all of the given -// CustomizeDiffFuncs and returns all of the errors produced. -// -// If one function produces an error, functions after it are still run. -// If this is not desirable, use function Sequence instead. -// -// If multiple functions returns errors, the result is a multierror. -// -// For example: -// -// &schema.Resource{ -// // ... -// CustomizeDiff: customdiff.All( -// customdiff.ValidateChange("size", func (old, new, meta interface{}) error { -// // If we are increasing "size" then the new value must be -// // a multiple of the old value. -// if new.(int) <= old.(int) { -// return nil -// } -// if (new.(int) % old.(int)) != 0 { -// return fmt.Errorf("new size value must be an integer multiple of old value %d", old.(int)) -// } -// return nil -// }), -// customdiff.ForceNewIfChange("size", func (old, new, meta interface{}) bool { -// // "size" can only increase in-place, so we must create a new resource -// // if it is decreased. -// return new.(int) < old.(int) -// }), -// customdiff.ComputedIf("version_id", func (d *schema.ResourceDiff, meta interface{}) bool { -// // Any change to "content" causes a new "version_id" to be allocated. -// return d.HasChange("content") -// }), -// ), -// } -// -func All(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - var err error - for _, f := range funcs { - thisErr := f(d, meta) - if thisErr != nil { - err = multierror.Append(err, thisErr) - } - } - return err - } -} - -// Sequence returns a CustomizeDiffFunc that runs all of the given -// CustomizeDiffFuncs in sequence, stopping at the first one that returns -// an error and returning that error. -// -// If all functions succeed, the combined function also succeeds. -func Sequence(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - for _, f := range funcs { - err := f(d, meta) - if err != nil { - return err - } - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go deleted file mode 100644 index 54ea5c402..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/computed.go +++ /dev/null @@ -1,16 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ComputedIf returns a CustomizeDiffFunc that sets the given key's new value -// as computed if the given condition function returns true. -func ComputedIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if f(d, meta) { - d.SetNewComputed(key) - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go deleted file mode 100644 index 1d8e2bfd6..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/condition.go +++ /dev/null @@ -1,60 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ResourceConditionFunc is a function type that makes a boolean decision based -// on an entire resource diff. -type ResourceConditionFunc func(d *schema.ResourceDiff, meta interface{}) bool - -// ValueChangeConditionFunc is a function type that makes a boolean decision -// by comparing two values. -type ValueChangeConditionFunc func(old, new, meta interface{}) bool - -// ValueConditionFunc is a function type that makes a boolean decision based -// on a given value. -type ValueConditionFunc func(value, meta interface{}) bool - -// If returns a CustomizeDiffFunc that calls the given condition -// function and then calls the given CustomizeDiffFunc only if the condition -// function returns true. -// -// This can be used to include conditional customizations when composing -// customizations using All and Sequence, but should generally be used only in -// simple scenarios. Prefer directly writing a CustomizeDiffFunc containing -// a conditional branch if the given CustomizeDiffFunc is already a -// locally-defined function, since this avoids obscuring the control flow. -func If(cond ResourceConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if cond(d, meta) { - return f(d, meta) - } - return nil - } -} - -// IfValueChange returns a CustomizeDiffFunc that calls the given condition -// function with the old and new values of the given key and then calls the -// given CustomizeDiffFunc only if the condition function returns true. -func IfValueChange(key string, cond ValueChangeConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange(key) - if cond(old, new, meta) { - return f(d, meta) - } - return nil - } -} - -// IfValue returns a CustomizeDiffFunc that calls the given condition -// function with the new values of the given key and then calls the -// given CustomizeDiffFunc only if the condition function returns true. -func IfValue(key string, cond ValueConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if cond(d.Get(key), meta) { - return f(d, meta) - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go deleted file mode 100644 index c6ad1199c..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -// Package customdiff provides a set of reusable and composable functions -// to enable more "declarative" use of the CustomizeDiff mechanism available -// for resources in package helper/schema. -// -// The intent of these helpers is to make the intent of a set of diff -// customizations easier to see, rather than lost in a sea of Go function -// boilerplate. They should _not_ be used in situations where they _obscure_ -// intent, e.g. by over-using the composition functions where a single -// function containing normal Go control flow statements would be more -// straightforward. -package customdiff diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go deleted file mode 100644 index 26afa8cb6..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/force_new.go +++ /dev/null @@ -1,40 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ForceNewIf returns a CustomizeDiffFunc that flags the given key as -// requiring a new resource if the given condition function returns true. -// -// The return value of the condition function is ignored if the old and new -// values of the field compare equal, since no attribute diff is generated in -// that case. -func ForceNewIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - if f(d, meta) { - d.ForceNew(key) - } - return nil - } -} - -// ForceNewIfChange returns a CustomizeDiffFunc that flags the given key as -// requiring a new resource if the given condition function returns true. -// -// The return value of the condition function is ignored if the old and new -// values compare equal, since no attribute diff is generated in that case. -// -// This function is similar to ForceNewIf but provides the condition function -// only the old and new values of the given key, which leads to more compact -// and explicit code in the common case where the decision can be made with -// only the specific field value. -func ForceNewIfChange(key string, f ValueChangeConditionFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange(key) - if f(old, new, meta) { - d.ForceNew(key) - } - return nil - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go deleted file mode 100644 index 0bc2c6950..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/customdiff/validate.go +++ /dev/null @@ -1,38 +0,0 @@ -package customdiff - -import ( - "github.com/hashicorp/terraform-plugin-sdk/helper/schema" -) - -// ValueChangeValidationFunc is a function type that validates the difference -// (or lack thereof) between two values, returning an error if the change -// is invalid. -type ValueChangeValidationFunc func(old, new, meta interface{}) error - -// ValueValidationFunc is a function type that validates a particular value, -// returning an error if the value is invalid. -type ValueValidationFunc func(value, meta interface{}) error - -// ValidateChange returns a CustomizeDiffFunc that applies the given validation -// function to the change for the given key, returning any error produced. -func ValidateChange(key string, f ValueChangeValidationFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - old, new := d.GetChange(key) - return f(old, new, meta) - } -} - -// ValidateValue returns a CustomizeDiffFunc that applies the given validation -// function to value of the given key, returning any error produced. -// -// This should generally not be used since it is functionally equivalent to -// a validation function applied directly to the schema attribute in question, -// but is provided for situations where composing multiple CustomizeDiffFuncs -// together makes intent clearer than spreading that validation across the -// schema. -func ValidateValue(key string, f ValueValidationFunc) schema.CustomizeDiffFunc { - return func(d *schema.ResourceDiff, meta interface{}) error { - val := d.Get(key) - return f(val, meta) - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go deleted file mode 100644 index 110ed18cd..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/encryption/encryption.go +++ /dev/null @@ -1,40 +0,0 @@ -package encryption - -import ( - "encoding/base64" - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys" -) - -// RetrieveGPGKey returns the PGP key specified as the pgpKey parameter, or queries -// the public key from the keybase service if the parameter is a keybase username -// prefixed with the phrase "keybase:" -func RetrieveGPGKey(pgpKey string) (string, error) { - const keybasePrefix = "keybase:" - - encryptionKey := pgpKey - if strings.HasPrefix(pgpKey, keybasePrefix) { - publicKeys, err := pgpkeys.FetchKeybasePubkeys([]string{pgpKey}) - if err != nil { - return "", errwrap.Wrapf(fmt.Sprintf("Error retrieving Public Key for %s: {{err}}", pgpKey), err) - } - encryptionKey = publicKeys[pgpKey] - } - - return encryptionKey, nil -} - -// EncryptValue encrypts the given value with the given encryption key. Description -// should be set such that errors return a meaningful user-facing response. -func EncryptValue(encryptionKey, value, description string) (string, string, error) { - fingerprints, encryptedValue, err := - pgpkeys.EncryptShares([][]byte{[]byte(value)}, []string{encryptionKey}) - if err != nil { - return "", "", errwrap.Wrapf(fmt.Sprintf("Error encrypting %s: {{err}}", description), err) - } - - return fingerprints[0], base64.StdEncoding.EncodeToString(encryptedValue[0]), nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go deleted file mode 100644 index 6917f2142..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/helper/mutexkv/mutexkv.go +++ /dev/null @@ -1,51 +0,0 @@ -package mutexkv - -import ( - "log" - "sync" -) - -// MutexKV is a simple key/value store for arbitrary mutexes. It can be used to -// serialize changes across arbitrary collaborators that share knowledge of the -// keys they must serialize on. -// -// The initial use case is to let aws_security_group_rule resources serialize -// their access to individual security groups based on SG ID. -type MutexKV struct { - lock sync.Mutex - store map[string]*sync.Mutex -} - -// Locks the mutex for the given key. Caller is responsible for calling Unlock -// for the same key -func (m *MutexKV) Lock(key string) { - log.Printf("[DEBUG] Locking %q", key) - m.get(key).Lock() - log.Printf("[DEBUG] Locked %q", key) -} - -// Unlock the mutex for the given key. Caller must have called Lock for the same key first -func (m *MutexKV) Unlock(key string) { - log.Printf("[DEBUG] Unlocking %q", key) - m.get(key).Unlock() - log.Printf("[DEBUG] Unlocked %q", key) -} - -// Returns a mutex for the given key, no guarantee of its lock status -func (m *MutexKV) get(key string) *sync.Mutex { - m.lock.Lock() - defer m.lock.Unlock() - mutex, ok := m.store[key] - if !ok { - mutex = &sync.Mutex{} - m.store[key] = mutex - } - return mutex -} - -// Returns a properly initalized MutexKV -func NewMutexKV() *MutexKV { - return &MutexKV{ - store: make(map[string]*sync.Mutex), - } -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go index 57914a49f..641e71dec 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/initwd/from_module.go @@ -2,6 +2,7 @@ package initwd import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" "io/ioutil" "log" "os" @@ -9,8 +10,6 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform-plugin-sdk/internal/earlyconfig" - version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-config-inspect/tfconfig" "github.com/hashicorp/terraform-plugin-sdk/internal/modsdir" diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go index 7a72467da..4ef22052c 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/client.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc" "github.com/hashicorp/terraform-plugin-sdk/internal/registry/response" "github.com/hashicorp/terraform-plugin-sdk/internal/version" - svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/disco" ) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go index 4ac22db87..c9bc40bee 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/friendly_host.go @@ -4,7 +4,7 @@ import ( "regexp" "strings" - svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost" ) var ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go index f3175e6a3..7205d03b8 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/registry/regsrc/terraform_provider.go @@ -5,7 +5,7 @@ import ( "runtime" "strings" - svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost" ) var ( diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go index c6902cb62..86fd21e41 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/tfplugin5/tfplugin5.pb.go @@ -6,12 +6,11 @@ package tfplugin5 import ( context "context" fmt "fmt" - math "math" - proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go deleted file mode 100644 index 8f6be2bf0..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/encrypt_decrypt.go +++ /dev/null @@ -1,85 +0,0 @@ -package pgpkeys - -import ( - "bytes" - "encoding/base64" - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/keybase/go-crypto/openpgp" - "github.com/keybase/go-crypto/openpgp/packet" -) - -// EncryptShares takes an ordered set of byte slices to encrypt and the -// corresponding base64-encoded public keys to encrypt them with, encrypts each -// byte slice with the corresponding public key. -// -// Note: There is no corresponding test function; this functionality is -// thoroughly tested in the init and rekey command unit tests -func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) { - if len(input) != len(pgpKeys) { - return nil, nil, fmt.Errorf("mismatch between number items to encrypt and number of PGP keys") - } - encryptedShares := make([][]byte, 0, len(pgpKeys)) - entities, err := GetEntities(pgpKeys) - if err != nil { - return nil, nil, err - } - for i, entity := range entities { - ctBuf := bytes.NewBuffer(nil) - pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil) - if err != nil { - return nil, nil, errwrap.Wrapf("error setting up encryption for PGP message: {{err}}", err) - } - _, err = pt.Write(input[i]) - if err != nil { - return nil, nil, errwrap.Wrapf("error encrypting PGP message: {{err}}", err) - } - pt.Close() - encryptedShares = append(encryptedShares, ctBuf.Bytes()) - } - - fingerprints, err := GetFingerprints(nil, entities) - if err != nil { - return nil, nil, err - } - - return fingerprints, encryptedShares, nil -} - -// GetFingerprints takes in a list of openpgp Entities and returns the -// fingerprints. If entities is nil, it will instead parse both entities and -// fingerprints from the pgpKeys string slice. -func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) { - if entities == nil { - var err error - entities, err = GetEntities(pgpKeys) - - if err != nil { - return nil, err - } - } - ret := make([]string, 0, len(entities)) - for _, entity := range entities { - ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)) - } - return ret, nil -} - -// GetEntities takes in a string array of base64-encoded PGP keys and returns -// the openpgp Entities -func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) { - ret := make([]*openpgp.Entity, 0, len(pgpKeys)) - for _, keystring := range pgpKeys { - data, err := base64.StdEncoding.DecodeString(keystring) - if err != nil { - return nil, errwrap.Wrapf("error decoding given PGP key: {{err}}", err) - } - entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) - if err != nil { - return nil, errwrap.Wrapf("error parsing given PGP key: {{err}}", err) - } - ret = append(ret, entity) - } - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go deleted file mode 100644 index e83dac99b..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/flag.go +++ /dev/null @@ -1,112 +0,0 @@ -package pgpkeys - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "os" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/keybase/go-crypto/openpgp" -) - -// PGPPubKeyFiles implements the flag.Value interface and allows parsing and -// reading a list of PGP public key files. -type PubKeyFilesFlag []string - -func (p *PubKeyFilesFlag) String() string { - return fmt.Sprint(*p) -} - -func (p *PubKeyFilesFlag) Set(val string) error { - if len(*p) > 0 { - return errors.New("can only be specified once") - } - - keys, err := ParsePGPKeys(strings.Split(val, ",")) - if err != nil { - return err - } - - *p = PubKeyFilesFlag(keys) - return nil -} - -// ParsePGPKeys takes a list of PGP keys and parses them either using keybase -// or reading them from disk and returns the "expanded" list of pgp keys in -// the same order. -func ParsePGPKeys(keyfiles []string) ([]string, error) { - keys := make([]string, len(keyfiles)) - - keybaseMap, err := FetchKeybasePubkeys(keyfiles) - if err != nil { - return nil, err - } - - for i, keyfile := range keyfiles { - keyfile = strings.TrimSpace(keyfile) - - if strings.HasPrefix(keyfile, kbPrefix) { - key, ok := keybaseMap[keyfile] - if !ok || key == "" { - return nil, fmt.Errorf("keybase user %q not found", strings.TrimPrefix(keyfile, kbPrefix)) - } - keys[i] = key - continue - } - - pgpStr, err := ReadPGPFile(keyfile) - if err != nil { - return nil, err - } - keys[i] = pgpStr - } - - return keys, nil -} - -// ReadPGPFile reads the given PGP file from disk. -func ReadPGPFile(path string) (string, error) { - if path[0] == '@' { - path = path[1:] - } - f, err := os.Open(path) - if err != nil { - return "", err - } - defer f.Close() - buf := bytes.NewBuffer(nil) - _, err = buf.ReadFrom(f) - if err != nil { - return "", err - } - - // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string - keyReader := bytes.NewReader(buf.Bytes()) - entityList, err := openpgp.ReadArmoredKeyRing(keyReader) - if err == nil { - if len(entityList) != 1 { - return "", fmt.Errorf("more than one key found in file %q", path) - } - if entityList[0] == nil { - return "", fmt.Errorf("primary key was nil for file %q", path) - } - - serializedEntity := bytes.NewBuffer(nil) - err = entityList[0].Serialize(serializedEntity) - if err != nil { - return "", errwrap.Wrapf(fmt.Sprintf("error serializing entity for file %q: {{err}}", path), err) - } - - return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil - } - - _, err = base64.StdEncoding.DecodeString(buf.String()) - if err == nil { - return buf.String(), nil - } - return base64.StdEncoding.EncodeToString(buf.Bytes()), nil - -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go deleted file mode 100644 index 7d153346a..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/helper/pgpkeys/keybase.go +++ /dev/null @@ -1,117 +0,0 @@ -package pgpkeys - -import ( - "bytes" - "encoding/base64" - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil" - "github.com/keybase/go-crypto/openpgp" -) - -const ( - kbPrefix = "keybase:" -) - -// FetchKeybasePubkeys fetches public keys from Keybase given a set of -// usernames, which are derived from correctly formatted input entries. It -// doesn't use their client code due to both the API and the fact that it is -// considered alpha and probably best not to rely on it. The keys are returned -// as base64-encoded strings. -func FetchKeybasePubkeys(input []string) (map[string]string, error) { - client := cleanhttp.DefaultClient() - if client == nil { - return nil, fmt.Errorf("unable to create an http client") - } - - if len(input) == 0 { - return nil, nil - } - - usernames := make([]string, 0, len(input)) - for _, v := range input { - if strings.HasPrefix(v, kbPrefix) { - usernames = append(usernames, strings.TrimPrefix(v, kbPrefix)) - } - } - - if len(usernames) == 0 { - return nil, nil - } - - ret := make(map[string]string, len(usernames)) - url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ",")) - resp, err := client.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - type PublicKeys struct { - Primary struct { - Bundle string - } - } - - type LThem struct { - PublicKeys `json:"public_keys"` - } - - type KbResp struct { - Status struct { - Name string - } - Them []LThem - } - - out := &KbResp{ - Them: []LThem{}, - } - - if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil { - return nil, err - } - - if out.Status.Name != "OK" { - return nil, fmt.Errorf("got non-OK response: %q", out.Status.Name) - } - - missingNames := make([]string, 0, len(usernames)) - var keyReader *bytes.Reader - serializedEntity := bytes.NewBuffer(nil) - for i, themVal := range out.Them { - if themVal.Primary.Bundle == "" { - missingNames = append(missingNames, usernames[i]) - continue - } - keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle)) - entityList, err := openpgp.ReadArmoredKeyRing(keyReader) - if err != nil { - return nil, err - } - if len(entityList) != 1 { - return nil, fmt.Errorf("primary key could not be parsed for user %q", usernames[i]) - } - if entityList[0] == nil { - return nil, fmt.Errorf("primary key was nil for user %q", usernames[i]) - } - - serializedEntity.Reset() - err = entityList[0].Serialize(serializedEntity) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error serializing entity for user %q: {{err}}", usernames[i]), err) - } - - // The API returns values in the same ordering requested, so this should properly match - ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes()) - } - - if len(missingNames) > 0 { - return nil, fmt.Errorf("unable to fetch keys for user(s) %q from keybase", strings.Join(missingNames, ",")) - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go deleted file mode 100644 index 356d4548f..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil/compress.go +++ /dev/null @@ -1,207 +0,0 @@ -package compressutil - -import ( - "bytes" - "compress/gzip" - "compress/lzw" - "fmt" - "io" - - "github.com/golang/snappy" - "github.com/hashicorp/errwrap" - "github.com/pierrec/lz4" -) - -const ( - // A byte value used as a canary prefix for the compressed information - // which is used to distinguish if a JSON input is compressed or not. - // The value of this constant should not be a first character of any - // valid JSON string. - - CompressionTypeGzip = "gzip" - CompressionCanaryGzip byte = 'G' - - CompressionTypeLZW = "lzw" - CompressionCanaryLZW byte = 'L' - - CompressionTypeSnappy = "snappy" - CompressionCanarySnappy byte = 'S' - - CompressionTypeLZ4 = "lz4" - CompressionCanaryLZ4 byte = '4' -) - -// SnappyReadCloser embeds the snappy reader which implements the io.Reader -// interface. The decompress procedure in this utility expects an -// io.ReadCloser. This type implements the io.Closer interface to retain the -// generic way of decompression. -type CompressUtilReadCloser struct { - io.Reader -} - -// Close is a noop method implemented only to satisfy the io.Closer interface -func (c *CompressUtilReadCloser) Close() error { - return nil -} - -// CompressionConfig is used to select a compression type to be performed by -// Compress and Decompress utilities. -// Supported types are: -// * CompressionTypeLZW -// * CompressionTypeGzip -// * CompressionTypeSnappy -// * CompressionTypeLZ4 -// -// When using CompressionTypeGzip, the compression levels can also be chosen: -// * gzip.DefaultCompression -// * gzip.BestSpeed -// * gzip.BestCompression -type CompressionConfig struct { - // Type of the compression algorithm to be used - Type string - - // When using Gzip format, the compression level to employ - GzipCompressionLevel int -} - -// Compress places the canary byte in a buffer and uses the same buffer to fill -// in the compressed information of the given input. The configuration supports -// two type of compression: LZW and Gzip. When using Gzip compression format, -// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will -// be assumed. -func Compress(data []byte, config *CompressionConfig) ([]byte, error) { - var buf bytes.Buffer - var writer io.WriteCloser - var err error - - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - // Write the canary into the buffer and create writer to compress the - // input data based on the configured type - switch config.Type { - case CompressionTypeLZW: - buf.Write([]byte{CompressionCanaryLZW}) - writer = lzw.NewWriter(&buf, lzw.LSB, 8) - - case CompressionTypeGzip: - buf.Write([]byte{CompressionCanaryGzip}) - - switch { - case config.GzipCompressionLevel == gzip.BestCompression, - config.GzipCompressionLevel == gzip.BestSpeed, - config.GzipCompressionLevel == gzip.DefaultCompression: - // These are valid compression levels - default: - // If compression level is set to NoCompression or to - // any invalid value, fallback to Defaultcompression - config.GzipCompressionLevel = gzip.DefaultCompression - } - writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) - - case CompressionTypeSnappy: - buf.Write([]byte{CompressionCanarySnappy}) - writer = snappy.NewBufferedWriter(&buf) - - case CompressionTypeLZ4: - buf.Write([]byte{CompressionCanaryLZ4}) - writer = lz4.NewWriter(&buf) - - default: - return nil, fmt.Errorf("unsupported compression type") - } - - if err != nil { - return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) - } - - if writer == nil { - return nil, fmt.Errorf("failed to create a compression writer") - } - - // Compress the input and place it in the same buffer containing the - // canary byte. - if _, err = writer.Write(data); err != nil { - return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) - } - - // Close the io.WriteCloser - if err = writer.Close(); err != nil { - return nil, err - } - - // Return the compressed bytes with canary byte at the start - return buf.Bytes(), nil -} - -// Decompress checks if the first byte in the input matches the canary byte. -// If the first byte is a canary byte, then the input past the canary byte -// will be decompressed using the method specified in the given configuration. -// If the first byte isn't a canary byte, then the utility returns a boolean -// value indicating that the input was not compressed. -func Decompress(data []byte) ([]byte, bool, error) { - var err error - var reader io.ReadCloser - if data == nil || len(data) == 0 { - return nil, false, fmt.Errorf("'data' being decompressed is empty") - } - - canary := data[0] - cData := data[1:] - - switch canary { - // If the first byte matches the canary byte, remove the canary - // byte and try to decompress the data that is after the canary. - case CompressionCanaryGzip: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - reader, err = gzip.NewReader(bytes.NewReader(cData)) - - case CompressionCanaryLZW: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) - - case CompressionCanarySnappy: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - reader = &CompressUtilReadCloser{ - Reader: snappy.NewReader(bytes.NewReader(cData)), - } - - case CompressionCanaryLZ4: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - reader = &CompressUtilReadCloser{ - Reader: lz4.NewReader(bytes.NewReader(cData)), - } - - default: - // If the first byte doesn't match the canary byte, it means - // that the content was not compressed at all. Indicate the - // caller that the input was not compressed. - return nil, true, nil - } - if err != nil { - return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) - } - if reader == nil { - return nil, false, fmt.Errorf("failed to create a compression reader") - } - - // Close the io.ReadCloser - defer reader.Close() - - // Read all the compressed data into a buffer - var buf bytes.Buffer - if _, err = io.Copy(&buf, reader); err != nil { - return nil, false, err - } - - return buf.Bytes(), false, nil -} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go deleted file mode 100644 index b5dbca4f8..000000000 --- a/vendor/github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/jsonutil/json.go +++ /dev/null @@ -1,100 +0,0 @@ -package jsonutil - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/internal/vault/sdk/helper/compressutil" -) - -// Encodes/Marshals the given object into JSON -func EncodeJSON(in interface{}) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("input for encoding is nil") - } - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - if err := enc.Encode(in); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// EncodeJSONAndCompress encodes the given input into JSON and compresses the -// encoded value (using Gzip format BestCompression level, by default). A -// canary byte is placed at the beginning of the returned bytes for the logic -// in decompression method to identify compressed input. -func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("input for encoding is nil") - } - - // First JSON encode the given input - encodedBytes, err := EncodeJSON(in) - if err != nil { - return nil, err - } - - if config == nil { - config = &compressutil.CompressionConfig{ - Type: compressutil.CompressionTypeGzip, - GzipCompressionLevel: gzip.BestCompression, - } - } - - return compressutil.Compress(encodedBytes, config) -} - -// DecodeJSON tries to decompress the given data. The call to decompress, fails -// if the content was not compressed in the first place, which is identified by -// a canary byte before the compressed data. If the data is not compressed, it -// is JSON decoded directly. Otherwise the decompressed data will be JSON -// decoded. -func DecodeJSON(data []byte, out interface{}) error { - if data == nil || len(data) == 0 { - return fmt.Errorf("'data' being decoded is nil") - } - if out == nil { - return fmt.Errorf("output parameter 'out' is nil") - } - - // Decompress the data if it was compressed in the first place - decompressedBytes, uncompressed, err := compressutil.Decompress(data) - if err != nil { - return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) - } - if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { - return fmt.Errorf("decompressed data being decoded is invalid") - } - - // If the input supplied failed to contain the compression canary, it - // will be notified by the compression utility. Decode the decompressed - // input. - if !uncompressed { - data = decompressedBytes - } - - return DecodeJSONFromReader(bytes.NewReader(data), out) -} - -// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object -func DecodeJSONFromReader(r io.Reader, out interface{}) error { - if r == nil { - return fmt.Errorf("'io.Reader' being decoded is nil") - } - if out == nil { - return fmt.Errorf("output parameter 'out' is nil") - } - - dec := json.NewDecoder(r) - - // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. - dec.UseNumber() - - // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' - return dec.Decode(out) -} diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE similarity index 50% rename from vendor/github.com/hashicorp/vault/LICENSE rename to vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE index e87a115e4..c33dcc7c9 100644 --- a/vendor/github.com/hashicorp/vault/LICENSE +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/LICENSE @@ -2,89 +2,89 @@ Mozilla Public License, version 2.0 1. Definitions -1.1. "Contributor" +1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. "Contributor Version" +1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + Contributor and that particular Contributor’s Contribution. -1.3. "Contribution" +1.3. “Contribution” means Covered Software of a particular Contributor. -1.4. "Covered Software" +1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. "Incompatible With Secondary Licenses" +1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. -1.6. "Executable Form" +1.6. “Executable Form” means any form of the work other than Source Code Form. -1.7. "Larger Work" +1.7. “Larger Work” - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. -1.8. "License" +1.8. “License” means this document. -1.9. "Licensable" +1.9. “Licensable” - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. -1.10. "Modifications" +1.10. “Modifications” means any of the following: - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. "Patent Claims" of a Contributor +1.11. “Patent Claims” of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. -1.12. "Secondary License" +1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. "Source Code Form" +1.13. “Source Code Form” means the form of the work preferred for making modifications. -1.14. "You" (or "Your") +1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is + License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause + definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -100,59 +100,57 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party's + b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions @@ -165,12 +163,11 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form @@ -182,40 +179,39 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). 3.4. Notices - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -224,14 +220,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. 5. Termination @@ -239,22 +235,21 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -263,16 +258,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. 7. Limitation of Liability @@ -284,29 +279,27 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. 10. Versions of the License @@ -320,24 +313,23 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice @@ -348,16 +340,15 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. -Exhibit B - "Incompatible With Secondary Licenses" Notice +Exhibit B - “Incompatible With Secondary Licenses” Notice - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go new file mode 100644 index 000000000..3b1ced344 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/diagnostic.go @@ -0,0 +1,104 @@ +package diag + +import ( + "errors" + "fmt" + + "github.com/hashicorp/go-cty/cty" +) + +// Diagnostics is a collection of Diagnostic. +// +// Developers should append and build the list of diagnostics up until a fatal +// error is reached, at which point they should return the Diagnostics to the +// SDK. +type Diagnostics []Diagnostic + +// HasError returns true is Diagnostics contains an instance of +// Severity == Error. +// +// This helper aims to mimic the go error practices of if err != nil. After any +// operation that returns Diagnostics, check that it HasError and bubble up the +// stack. +func (diags Diagnostics) HasError() bool { + for i := range diags { + if diags[i].Severity == Error { + return true + } + } + return false +} + +// Diagnostic is a contextual message intended at outlining problems in user +// configuration. +// +// It supports multiple levels of severity (Error or Warning), a short Summary +// of the problem, an optional longer Detail message that can assist the user in +// fixing the problem, as well as an AttributePath representation which +// Terraform uses to indicate where the issue took place in the user's +// configuration. +// +// A Diagnostic will typically be used to pinpoint a problem with user +// configuration, however it can still be used to present warnings or errors +// to the user without any AttributePath set. +type Diagnostic struct { + // Severity indicates the level of the Diagnostic. Currently can be set to + // either Error or Warning + Severity Severity + + // Summary is a short description of the problem, rendered above location + // information + Summary string + + // Detail is an optional second message rendered below location information + // typically used to communicate a potential fix to the user. + Detail string + + // AttributePath is a representation of the path starting from the root of + // block (resource, datasource, provider) under evaluation by the SDK, to + // the attribute that the Diagnostic should be associated to. Terraform will + // use this information to render information on where the problem took + // place in the user's configuration. + // + // It is represented with cty.Path, which is a list of steps of either + // cty.GetAttrStep (an actual attribute) or cty.IndexStep (a step with Key + // of cty.StringVal for map indexes, and cty.NumberVal for list indexes). + // + // PLEASE NOTE: While cty can support indexing into sets, the SDK and + // protocol currently do not. For any Diagnostic related to a schema.TypeSet + // or a child of that type, please terminate the path at the schema.TypeSet + // and opt for more verbose Summary and Detail to help guide the user. + // + // Validity of the AttributePath is currently the responsibility of the + // developer, Terraform should render the root block (provider, resource, + // datasource) in cases where the attribute path is invalid. + AttributePath cty.Path +} + +// Validate ensures a valid Severity and a non-empty Summary are set. +func (d Diagnostic) Validate() error { + var validSev bool + for _, sev := range severities { + if d.Severity == sev { + validSev = true + break + } + } + if !validSev { + return fmt.Errorf("invalid severity: %v", d.Severity) + } + if d.Summary == "" { + return errors.New("empty summary") + } + return nil +} + +// Severity is an enum type marking the severity level of a Diagnostic +type Severity int + +const ( + Error Severity = iota + Warning +) + +var severities = []Severity{Error, Warning} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go new file mode 100644 index 000000000..8037b1301 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/diag/helpers.go @@ -0,0 +1,36 @@ +package diag + +import "fmt" + +// FromErr will convert an error into a Diagnostics. This returns Diagnostics +// as the most common use case in Go will be handling a single error +// returned from a function. +// +// if err != nil { +// return diag.FromErr(err) +// } +func FromErr(err error) Diagnostics { + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: err.Error(), + }, + } +} + +// Errorf creates a Diagnostics with a single Error level Diagnostic entry. +// The summary is populated by performing a fmt.Sprintf with the supplied +// values. This returns a single error in a Diagnostics as errors typically +// do not occur in multiples as warnings may. +// +// if unexpectedCondition { +// return diag.Errorf("unexpected: %s", someValue) +// } +func Errorf(format string, a ...interface{}) Diagnostics { + return Diagnostics{ + Diagnostic{ + Severity: Error, + Summary: fmt.Sprintf(format, a...), + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go new file mode 100644 index 000000000..546c5e1cf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/logging.go @@ -0,0 +1,116 @@ +package logging + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "syscall" + + "github.com/hashicorp/logutils" + testing "github.com/mitchellh/go-testing-interface" +) + +// These are the environmental variables that determine if we log, and if +// we log whether or not the log should go to a file. +const ( + EnvLog = "TF_LOG" // Set to True + EnvLogFile = "TF_LOG_PATH" // Set to a file + // EnvLogPathMask splits test log files by name. + EnvLogPathMask = "TF_LOG_PATH_MASK" +) + +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} + +// LogOutput determines where we should send logs (if anywhere) and the log level. +func LogOutput(t testing.T) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + if logPath := os.Getenv(EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// SetOutput checks for a log destination with LogOutput, and calls +// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses +// ioutil.Discard. Any error from LogOutout is fatal. +func SetOutput(t testing.T) { + out, err := LogOutput(t) + if err != nil { + log.Fatal(err) + } + + if out == nil { + out = ioutil.Discard + } + + log.SetOutput(out) +} + +// LogLevel returns the current log level string based the environment vars +func LogLevel() string { + envLevel := os.Getenv(EnvLog) + if envLevel == "" { + return "" + } + + logLevel := "TRACE" + if isValidLogLevel(envLevel) { + // allow following for better ux: info, Info or INFO + logLevel = strings.ToUpper(envLevel) + } else { + log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", + envLevel, ValidLevels) + } + + return logLevel +} + +// IsDebugOrHigher returns whether or not the current log level is debug or trace +func IsDebugOrHigher() bool { + level := string(LogLevel()) + return level == "DEBUG" || level == "TRACE" +} + +func isValidLogLevel(level string) bool { + for _, l := range ValidLevels { + if strings.ToUpper(level) == string(l) { + return true + } + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go new file mode 100644 index 000000000..bddabe647 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging/transport.go @@ -0,0 +1,70 @@ +package logging + +import ( + "bytes" + "encoding/json" + "log" + "net/http" + "net/http/httputil" + "strings" +) + +type transport struct { + name string + transport http.RoundTripper +} + +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + if IsDebugOrHigher() { + reqData, err := httputil.DumpRequestOut(req, true) + if err == nil { + log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) + } else { + log.Printf("[ERROR] %s API Request error: %#v", t.name, err) + } + } + + resp, err := t.transport.RoundTrip(req) + if err != nil { + return resp, err + } + + if IsDebugOrHigher() { + respData, err := httputil.DumpResponse(resp, true) + if err == nil { + log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) + } else { + log.Printf("[ERROR] %s API Response error: %#v", t.name, err) + } + } + + return resp, nil +} + +func NewTransport(name string, t http.RoundTripper) *transport { + return &transport{name, t} +} + +// prettyPrintJsonLines iterates through a []byte line-by-line, +// transforming any lines that are complete json into pretty-printed json. +func prettyPrintJsonLines(b []byte) string { + parts := strings.Split(string(b), "\n") + for i, p := range parts { + if b := []byte(p); json.Valid(b) { + var out bytes.Buffer + json.Indent(&out, b, "", " ") + parts[i] = out.String() + } + } + return strings.Join(parts, "\n") +} + +const logReqMsg = `%s API Request Details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +const logRespMsg = `%s API Response Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go new file mode 100644 index 000000000..7ee21614b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/error.go @@ -0,0 +1,79 @@ +package resource + +import ( + "fmt" + "strings" + "time" +) + +type NotFoundError struct { + LastError error + LastRequest interface{} + LastResponse interface{} + Message string + Retries int +} + +func (e *NotFoundError) Error() string { + if e.Message != "" { + return e.Message + } + + if e.Retries > 0 { + return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries) + } + + return "couldn't find resource" +} + +// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending +type UnexpectedStateError struct { + LastError error + State string + ExpectedState []string +} + +func (e *UnexpectedStateError) Error() string { + return fmt.Sprintf( + "unexpected state '%s', wanted target '%s'. last error: %s", + e.State, + strings.Join(e.ExpectedState, ", "), + e.LastError, + ) +} + +// TimeoutError is returned when WaitForState times out +type TimeoutError struct { + LastError error + LastState string + Timeout time.Duration + ExpectedState []string +} + +func (e *TimeoutError) Error() string { + expectedState := "resource to be gone" + if len(e.ExpectedState) > 0 { + expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", ")) + } + + extraInfo := make([]string, 0) + if e.LastState != "" { + extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState)) + } + if e.Timeout > 0 { + extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String())) + } + + suffix := "" + if len(extraInfo) > 0 { + suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", ")) + } + + if e.LastError != nil { + return fmt.Sprintf("timeout while waiting for %s%s: %s", + expectedState, suffix, e.LastError) + } + + return fmt.Sprintf("timeout while waiting for %s%s", + expectedState, suffix) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go new file mode 100644 index 000000000..44949550e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/id.go @@ -0,0 +1,45 @@ +package resource + +import ( + "fmt" + "strings" + "sync" + "time" +) + +const UniqueIdPrefix = `terraform-` + +// idCounter is a monotonic counter for generating ordered unique ids. +var idMutex sync.Mutex +var idCounter uint32 + +// Helper for a resource to generate a unique identifier w/ default prefix +func UniqueId() string { + return PrefixedUniqueId(UniqueIdPrefix) +} + +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +const UniqueIDSuffixLength = 26 + +// Helper for a resource to generate a unique identifier w/ given prefix +// +// After the prefix, the ID consists of an incrementing 26 digit value (to match +// previous timestamp output). After the prefix, the ID consists of a timestamp +// and an incrementing 8 hex digit value The timestamp means that multiple IDs +// created with the same prefix will sort in the order of their creation, even +// across multiple terraform executions, as long as the clock is not turned back +// between calls, and as long as any given terraform execution generates fewer +// than 4 billion IDs. +func PrefixedUniqueId(prefix string) string { + // Be precise to 4 digits of fractional seconds, but remove the dot before the + // fractional seconds. + timestamp := strings.Replace( + time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) + + idMutex.Lock() + defer idMutex.Unlock() + idCounter++ + return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go new file mode 100644 index 000000000..6d25e4527 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/plugin.go @@ -0,0 +1,154 @@ +package resource + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + grpcplugin "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + tftest "github.com/hashicorp/terraform-plugin-test/v2" + testing "github.com/mitchellh/go-testing-interface" +) + +func runProviderCommand(t testing.T, f func() error, wd *tftest.WorkingDir, factories map[string]func() (*schema.Provider, error)) error { + // don't point to this as a test failure location + // point to whatever called it + t.Helper() + + // Run the providers in the same process as the test runner using the + // reattach behavior in Terraform. This ensures we get test coverage + // and enables the use of delve as a debugger. + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // this is needed so Terraform doesn't default to expecting protocol 4; + // we're skipping the handshake because Terraform didn't launch the + // plugins. + os.Setenv("PLUGIN_PROTOCOL_VERSIONS", "5") + + // Terraform 0.12.X and 0.13.X+ treat namespaceless providers + // differently in terms of what namespace they default to. So we're + // going to set both variations, as we don't know which version of + // Terraform we're talking to. We're also going to allow overriding + // the host or namespace using environment variables. + var namespaces []string + host := "registry.terraform.io" + if v := os.Getenv("TF_ACC_PROVIDER_NAMESPACE"); v != "" { + namespaces = append(namespaces, v) + } else { + namespaces = append(namespaces, "-", "hashicorp") + } + if v := os.Getenv("TF_ACC_PROVIDER_HOST"); v != "" { + host = v + } + + // Spin up gRPC servers for every provider factory, start a + // WaitGroup to listen for all of the close channels. + var wg sync.WaitGroup + reattachInfo := map[string]plugin.ReattachConfig{} + for providerName, factory := range factories { + // providerName may be returned as terraform-provider-foo, and + // we need just foo. So let's fix that. + providerName = strings.TrimPrefix(providerName, "terraform-provider-") + + provider, err := factory() + if err != nil { + return fmt.Errorf("unable to create provider %q from factory: %w", providerName, err) + } + + // keep track of the running factory, so we can make sure it's + // shut down. + wg.Add(1) + + // configure the settings our plugin will be served with + // the GRPCProviderFunc wraps a non-gRPC provider server + // into a gRPC interface, and the logger just discards logs + // from go-plugin. + opts := &plugin.ServeOpts{ + GRPCProviderFunc: func() proto.ProviderServer { + return grpcplugin.NewGRPCProviderServer(provider) + }, + Logger: hclog.New(&hclog.LoggerOptions{ + Name: "plugintest", + Level: hclog.Trace, + Output: ioutil.Discard, + }), + } + + // let's actually start the provider server + config, closeCh, err := plugin.DebugServe(ctx, opts) + if err != nil { + return fmt.Errorf("unable to serve provider %q: %w", providerName, err) + } + + // plugin.DebugServe hijacks our log output location, so let's + // reset it + logging.SetOutput(t) + + // when the provider exits, remove one from the waitgroup + // so we can track when everything is done + go func(c <-chan struct{}) { + <-c + wg.Done() + }(closeCh) + + // set our provider's reattachinfo in our map, once + // for every namespace that different Terraform versions + // may expect. + for _, ns := range namespaces { + reattachInfo[strings.TrimSuffix(host, "/")+"/"+ + strings.TrimSuffix(ns, "/")+"/"+ + providerName] = config + } + } + + // set the environment variable that will tell Terraform how to + // connect to our various running servers. + reattachStr, err := json.Marshal(reattachInfo) + if err != nil { + return err + } + wd.Setenv("TF_REATTACH_PROVIDERS", string(reattachStr)) + + // ok, let's call whatever Terraform command the test was trying to + // call, now that we know it'll attach back to those servers we just + // started. + err = f() + if err != nil { + log.Printf("[WARN] Got error running Terraform: %s", err) + } + + // cancel the servers so they'll return. Otherwise, this closeCh won't + // get closed, and we'll hang here. + cancel() + + // wait for the servers to actually shut down; it may take a moment for + // them to clean up, or whatever. + // TODO: add a timeout here? + // PC: do we need one? The test will time out automatically... + wg.Wait() + + // once we've run the Terraform command, let's remove the reattach + // information from the WorkingDir's environment. The WorkingDir will + // persist until the next call, but the server in the reattach info + // doesn't exist anymore at this point, so the reattach info is no + // longer valid. In theory it should be overwritten in the next call, + // but just to avoid any confusing bug reports, let's just unset the + // environment variable altogether. + wd.Unsetenv("TF_REATTACH_PROVIDERS") + + // return any error returned from the orchestration code running + // Terraform commands + return err +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go new file mode 100644 index 000000000..6eda1993f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state.go @@ -0,0 +1,280 @@ +package resource + +import ( + "context" + "log" + "time" +) + +var refreshGracePeriod = 30 * time.Second + +// StateRefreshFunc is a function type used for StateChangeConf that is +// responsible for refreshing the item being watched for a state change. +// +// It returns three results. `result` is any object that will be returned +// as the final object after waiting for state change. This allows you to +// return the final updated object, for example an EC2 instance after refreshing +// it. A nil result represents not found. +// +// `state` is the latest state of that object. And `err` is any error that +// may have happened while refreshing the state. +type StateRefreshFunc func() (result interface{}, state string, err error) + +// StateChangeConf is the configuration struct used for `WaitForState`. +type StateChangeConf struct { + Delay time.Duration // Wait this time before starting checks + Pending []string // States that are "allowed" and will continue trying + Refresh StateRefreshFunc // Refreshes the current state + Target []string // Target state + Timeout time.Duration // The amount of time to wait before timeout + MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often + NotFoundChecks int // Number of times to allow not found (nil result from Refresh) + + // This is to work around inconsistent APIs + ContinuousTargetOccurence int // Number of times the Target state has to occur continuously +} + +// WaitForStateContext watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// If the Refresh function returns an error, exit immediately with that error. +// +// If the Refresh function returns a state other than the Target state or one +// listed in Pending, return immediately with an error. +// +// If the Timeout is exceeded before reaching the Target state, return an +// error. +// +// Otherwise, the result is the result of the first call to the Refresh function to +// reach the target state. +// +// Cancellation from the passed in context will cancel the refresh loop +func (conf *StateChangeConf) WaitForStateContext(ctx context.Context) (interface{}, error) { + log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) + + notfoundTick := 0 + targetOccurence := 0 + + // Set a default for times to check for not found + if conf.NotFoundChecks == 0 { + conf.NotFoundChecks = 20 + } + + if conf.ContinuousTargetOccurence == 0 { + conf.ContinuousTargetOccurence = 1 + } + + type Result struct { + Result interface{} + State string + Error error + Done bool + } + + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + + go func() { + defer close(resCh) + + select { + case <-time.After(conf.Delay): + case <-cancelCh: + return + } + + // start with 0 delay for the first loop + var wait time.Duration + + for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + + res, currentState, err := conf.Refresh() + result = Result{ + Result: res, + State: currentState, + Error: err, + } + + if err != nil { + resCh <- result + return + } + + // If we're waiting for the absence of a thing, then return + if res == nil && len(conf.Target) == 0 { + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + + if res == nil { + // If we didn't find the resource, check if we have been + // not finding it for awhile, and if so, report an error. + notfoundTick++ + if notfoundTick > conf.NotFoundChecks { + result.Error = &NotFoundError{ + LastError: err, + Retries: notfoundTick, + } + resCh <- result + return + } + } else { + // Reset the counter for when a resource isn't found + notfoundTick = 0 + found := false + + for _, allowed := range conf.Target { + if currentState == allowed { + found = true + targetOccurence++ + if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result + return + } + continue + } + } + + for _, allowed := range conf.Pending { + if currentState == allowed { + found = true + targetOccurence = 0 + break + } + } + + if !found && len(conf.Pending) > 0 { + result.Error = &UnexpectedStateError{ + LastError: err, + State: result.State, + ExpectedState: conf.Target, + } + resCh <- result + return + } + } + + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + + // If a poll interval has been specified, choose that interval. + // Otherwise bound the default value. + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } + } + + log.Printf("[TRACE] Waiting %s before next try", wait) + } + }() + + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + case <-ctx.Done(): + close(cancelCh) + return nil, ctx.Err() + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-ctx.Done(): + log.Println("[ERROR] Context cancelation detected, abandoning grace period") + break forSelect + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } + } + } +} + +// WaitForState watches an object and waits for it to achieve the state +// specified in the configuration using the specified Refresh() func, +// waiting the number of seconds specified in the timeout configuration. +// +// Deprecated: Please use WaitForStateContext to ensure proper plugin shutdown +func (conf *StateChangeConf) WaitForState() (interface{}, error) { + return conf.WaitForStateContext(context.Background()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go new file mode 100644 index 000000000..42c7b6408 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/state_shim.go @@ -0,0 +1,288 @@ +package resource + +import ( + "fmt" + "strconv" + + tfjson "github.com/hashicorp/terraform-json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +type shimmedState struct { + state *terraform.State +} + +func shimStateFromJson(jsonState *tfjson.State) (*terraform.State, error) { + state := terraform.NewState() + state.TFVersion = jsonState.TerraformVersion + + if jsonState.Values == nil { + // the state is empty + return state, nil + } + + for key, output := range jsonState.Values.Outputs { + os, err := shimOutputState(output) + if err != nil { + return nil, err + } + state.RootModule().Outputs[key] = os + } + + ss := &shimmedState{state} + err := ss.shimStateModule(jsonState.Values.RootModule) + if err != nil { + return nil, err + } + + return state, nil +} + +func shimOutputState(so *tfjson.StateOutput) (*terraform.OutputState, error) { + os := &terraform.OutputState{ + Sensitive: so.Sensitive, + } + + switch v := so.Value.(type) { + case string: + os.Type = "string" + os.Value = v + return os, nil + case []interface{}: + os.Type = "list" + if len(v) == 0 { + os.Value = v + return os, nil + } + switch firstElem := v[0].(type) { + case string: + elements := make([]interface{}, len(v)) + for i, el := range v { + elements[i] = el.(string) + } + os.Value = elements + case bool: + elements := make([]interface{}, len(v)) + for i, el := range v { + elements[i] = el.(bool) + } + os.Value = elements + // unmarshalled number from JSON will always be float64 + case float64: + elements := make([]interface{}, len(v)) + for i, el := range v { + elements[i] = el.(float64) + } + os.Value = elements + case []interface{}: + os.Value = v + case map[string]interface{}: + os.Value = v + default: + return nil, fmt.Errorf("unexpected output list element type: %T", firstElem) + } + return os, nil + case map[string]interface{}: + os.Type = "map" + os.Value = v + return os, nil + case bool: + os.Type = "string" + os.Value = strconv.FormatBool(v) + return os, nil + // unmarshalled number from JSON will always be float64 + case float64: + os.Type = "string" + os.Value = strconv.FormatFloat(v, 'f', -1, 64) + return os, nil + } + + return nil, fmt.Errorf("unexpected output type: %T", so.Value) +} + +func (ss *shimmedState) shimStateModule(sm *tfjson.StateModule) error { + var path addrs.ModuleInstance + + if sm.Address == "" { + path = addrs.RootModuleInstance + } else { + var diags tfdiags.Diagnostics + path, diags = addrs.ParseModuleInstanceStr(sm.Address) + if diags.HasErrors() { + return diags.Err() + } + } + + mod := ss.state.AddModule(path) + for _, res := range sm.Resources { + resourceState, err := shimResourceState(res) + if err != nil { + return err + } + + key, err := shimResourceStateKey(res) + if err != nil { + return err + } + + mod.Resources[key] = resourceState + } + + if len(sm.ChildModules) > 0 { + return fmt.Errorf("Modules are not supported. Found %d modules.", + len(sm.ChildModules)) + } + return nil +} + +func shimResourceStateKey(res *tfjson.StateResource) (string, error) { + if res.Index == nil { + return res.Address, nil + } + + var mode terraform.ResourceMode + switch res.Mode { + case tfjson.DataResourceMode: + mode = terraform.DataResourceMode + case tfjson.ManagedResourceMode: + mode = terraform.ManagedResourceMode + default: + return "", fmt.Errorf("unexpected resource mode for %q", res.Address) + } + + var index int + switch idx := res.Index.(type) { + case float64: + index = int(idx) + default: + return "", fmt.Errorf("unexpected index type (%T) for %q, "+ + "for_each is not supported", res.Index, res.Address) + } + + rsk := &terraform.ResourceStateKey{ + Mode: mode, + Type: res.Type, + Name: res.Name, + Index: index, + } + + return rsk.String(), nil +} + +func shimResourceState(res *tfjson.StateResource) (*terraform.ResourceState, error) { + sf := &shimmedFlatmap{} + err := sf.FromMap(res.AttributeValues) + if err != nil { + return nil, err + } + attributes := sf.Flatmap() + + if _, ok := attributes["id"]; !ok { + return nil, fmt.Errorf("no %q found in attributes", "id") + } + + return &terraform.ResourceState{ + Provider: res.ProviderName, + Type: res.Type, + Primary: &terraform.InstanceState{ + ID: attributes["id"], + Attributes: attributes, + Meta: map[string]interface{}{ + "schema_version": int(res.SchemaVersion), + }, + Tainted: res.Tainted, + }, + Dependencies: res.DependsOn, + }, nil +} + +type shimmedFlatmap struct { + m map[string]string +} + +func (sf *shimmedFlatmap) FromMap(attributes map[string]interface{}) error { + if sf.m == nil { + sf.m = make(map[string]string, len(attributes)) + } + + return sf.AddMap("", attributes) +} + +func (sf *shimmedFlatmap) AddMap(prefix string, m map[string]interface{}) error { + for key, value := range m { + k := key + if prefix != "" { + k = fmt.Sprintf("%s.%s", prefix, key) + } + + err := sf.AddEntry(k, value) + if err != nil { + return err + } + } + + mapLength := "%" + if prefix != "" { + mapLength = fmt.Sprintf("%s.%s", prefix, "%") + } + + sf.AddEntry(mapLength, strconv.Itoa(len(m))) + + return nil +} + +func (sf *shimmedFlatmap) AddSlice(name string, elements []interface{}) error { + for i, elem := range elements { + key := fmt.Sprintf("%s.%d", name, i) + err := sf.AddEntry(key, elem) + if err != nil { + return err + } + } + + sliceLength := fmt.Sprintf("%s.#", name) + sf.AddEntry(sliceLength, strconv.Itoa(len(elements))) + + return nil +} + +func (sf *shimmedFlatmap) AddEntry(key string, value interface{}) error { + switch el := value.(type) { + case nil: + // omit the entry + return nil + case bool: + sf.m[key] = strconv.FormatBool(el) + case float64: + sf.m[key] = strconv.FormatFloat(el, 'f', -1, 64) + case string: + sf.m[key] = el + case map[string]interface{}: + err := sf.AddMap(key, el) + if err != nil { + return err + } + case []interface{}: + err := sf.AddSlice(key, el) + if err != nil { + return err + } + default: + // This should never happen unless terraform-json + // changes how attributes (types) are represented. + // + // We handle all types which the JSON unmarshaler + // can possibly produce + // https://golang.org/pkg/encoding/json/#Unmarshal + + return fmt.Errorf("%q: unexpected type (%T)", key, el) + } + return nil +} + +func (sf *shimmedFlatmap) Flatmap() map[string]string { + return sf.m +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go new file mode 100644 index 000000000..0787141be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing.go @@ -0,0 +1,1040 @@ +package resource + +import ( + "errors" + "flag" + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + + "github.com/hashicorp/go-multierror" + tftest "github.com/hashicorp/terraform-plugin-test/v2" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepAllowFailures = flag.Bool("sweep-allow-failures", false, "Enable to allow Sweeper Tests to continue after failures") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m interface { + Run() int +}) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + + if _, err := runSweepers(regions, sweepers, *flagSweepAllowFailures); err != nil { + os.Exit(1) + } + } else { + exitCode := m.Run() + os.Exit(exitCode) + } +} + +func runSweepers(regions []string, sweepers map[string]*Sweeper, allowFailures bool) (map[string]map[string]error, error) { + var sweeperErrorFound bool + sweeperRunList := make(map[string]map[string]error) + + for _, region := range regions { + region = strings.TrimSpace(region) + + var regionSweeperErrorFound bool + regionSweeperRunList := make(map[string]error) + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper, sweepers, regionSweeperRunList, allowFailures); err != nil { + if allowFailures { + continue + } + + sweeperRunList[region] = regionSweeperRunList + return sweeperRunList, fmt.Errorf("sweeper (%s) for region (%s) failed: %s", sweeper.Name, region, err) + } + } + + log.Printf("Sweeper Tests ran successfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr == nil { + fmt.Printf("\t- %s\n", sweeper) + } else { + regionSweeperErrorFound = true + } + } + + if regionSweeperErrorFound { + sweeperErrorFound = true + log.Printf("Sweeper Tests ran unsuccessfully:\n") + for sweeper, sweeperErr := range regionSweeperRunList { + if sweeperErr != nil { + fmt.Printf("\t- %s: %s\n", sweeper, sweeperErr) + } + } + } + + sweeperRunList[region] = regionSweeperRunList + } + + if sweeperErrorFound { + return sweeperRunList, errors.New("at least one sweeper failed") + } + + return sweeperRunList, nil +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + for foundName, foundSweeper := range filterSweeperWithDependencies(name, source) { + sweepers[foundName] = foundSweeper + } + } + } + } + return sweepers +} + +// filterSweeperWithDependencies recursively returns sweeper and all dependencies. +// Since filterSweepers performs fuzzy matching, this function is used +// to perform exact sweeper and dependency lookup. +func filterSweeperWithDependencies(name string, source map[string]*Sweeper) map[string]*Sweeper { + result := make(map[string]*Sweeper) + + currentSweeper, ok := source[name] + if !ok { + log.Printf("[WARN] Sweeper has dependency (%s), but that sweeper was not found", name) + return result + } + + result[name] = currentSweeper + + for _, dependency := range currentSweeper.Dependencies { + for foundName, foundSweeper := range filterSweeperWithDependencies(dependency, source) { + result[foundName] = foundSweeper + } + } + + return result +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper, sweepers map[string]*Sweeper, sweeperRunList map[string]error, allowFailures bool) error { + for _, dep := range s.Dependencies { + depSweeper, ok := sweepers[dep] + + if !ok { + return fmt.Errorf("sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + err := runSweeperWithRegion(region, depSweeper, sweepers, sweeperRunList, allowFailures) + + if err != nil { + if allowFailures { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", depSweeper.Name, region, err) + continue + } + + return err + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + log.Printf("[DEBUG] Running Sweeper (%s) in region (%s)", s.Name, region) + + runE := s.F(region) + + sweeperRunList[s.Name] = runE + + if runE != nil { + log.Printf("[ERROR] Error running Sweeper (%s) in region (%s): %s", s.Name, region, runE) + } + + return runE +} + +const TestEnvVar = "TF_ACC" + +// TestCheckFunc is the callback type used with acceptance tests to check +// the state of a resource. The state passed in is the latest state known, +// or in the case of being after a destroy, it is the last known state when +// it was created. +type TestCheckFunc func(*terraform.State) error + +// ImportStateCheckFunc is the check function for ImportState tests +type ImportStateCheckFunc func([]*terraform.InstanceState) error + +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + +// TestCase is a single acceptance test case used to test the apply/destroy +// lifecycle of a resource in a specific configuration. +// +// When the destroy plan is executed, the config from the last TestStep +// is used to plan it. +type TestCase struct { + // IsUnitTest allows a test to run regardless of the TF_ACC + // environment variable. This should be used with care - only for + // fast tests on local resources (e.g. remote state with a local + // backend) but can be used to increase confidence in correct + // operation of Terraform without waiting for a full acctest run. + IsUnitTest bool + + // PreCheck, if non-nil, will be called before any test steps are + // executed. It will only be executed in the case that the steps + // would run, so it can be used for some validation before running + // acceptance tests, such as verifying that keys are setup. + PreCheck func() + + // ProviderFactories can be specified for the providers that are valid. + // + // These are the providers that can be referenced within the test. Each key + // is an individually addressable provider. Typically you will only pass a + // single value here for the provider you are testing. Aliases are not + // supported by the test framework, so to use multiple provider instances, + // you should add additional copies to this map with unique names. To set + // their configuration, you would reference them similar to the following: + // + // provider "my_factory_key" { + // # ... + // } + // + // resource "my_resource" "mr" { + // provider = my_factory_key + // + // # ... + // } + ProviderFactories map[string]func() (*schema.Provider, error) + + // Providers is the ResourceProvider that will be under test. + // + // Deprecated: Providers is deprecated, please use ProviderFactories + Providers map[string]*schema.Provider + + // ExternalProviders are providers the TestCase relies on that should + // be downloaded from the registry during init. This is only really + // necessary to set if you're using import, as providers in your config + // will be automatically retrieved during init. Import doesn't use a + // config, however, so we allow manually specifying them here to be + // downloaded for import tests. + ExternalProviders map[string]ExternalProvider + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // CheckDestroy is called after the resource is finally destroyed + // to allow the tester to test that the resource is truly gone. + CheckDestroy TestCheckFunc + + // Steps are the apply sequences done within the context of the + // same state. Each step can have its own check to verify correctness. + Steps []TestStep + + // The settings below control the "ID-only refresh test." This is + // an enabled-by-default test that tests that a refresh can be + // refreshed with only an ID to result in the same attributes. + // This validates completeness of Refresh. + // + // IDRefreshName is the name of the resource to check. This will + // default to the first non-nil primary resource in the state. + // + // IDRefreshIgnore is a list of configuration keys that will be ignored. + IDRefreshName string + IDRefreshIgnore []string +} + +// ExternalProvider holds information about third-party providers that should +// be downloaded by Terraform as part of running the test step. +type ExternalProvider struct { + VersionConstraint string // the version constraint for the provider + Source string // the provider source +} + +// TestStep is a single apply sequence of a test, done within the +// context of a state. +// +// Multiple TestSteps can be sequenced in a Test to allow testing +// potentially complex update logic. In general, simply create/destroy +// tests will only need one step. +type TestStep struct { + // ResourceName should be set to the name of the resource + // that is being tested. Example: "aws_instance.foo". Various test + // modes use this to auto-detect state information. + // + // This is only required if the test mode settings below say it is + // for the mode you're using. + ResourceName string + + // PreConfig is called before the Config is applied to perform any per-step + // setup that needs to happen. This is called regardless of "test mode" + // below. + PreConfig func() + + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + + //--------------------------------------------------------------- + // Test modes. One of the following groups of settings must be + // set to determine what the test step will do. Ideally we would've + // used Go interfaces here but there are now hundreds of tests we don't + // want to re-type so instead we just determine which step logic + // to run based on what settings below are set. + //--------------------------------------------------------------- + + //--------------------------------------------------------------- + // Plan, Apply testing + //--------------------------------------------------------------- + + // Config a string of the configuration to give to Terraform. If this + // is set, then the TestCase will execute this step with the same logic + // as a `terraform apply`. + Config string + + // Check is called after the Config is applied. Use this step to + // make your own API calls to check the status of things, and to + // inspect the format of the ResourceState itself. + // + // If an error is returned, the test will fail. In this case, a + // destroy plan will still be attempted. + // + // If this is nil, no check is done on this step. + Check TestCheckFunc + + // Destroy will create a destroy plan if set to true. + Destroy bool + + // ExpectNonEmptyPlan can be set to true for specific types of tests that are + // looking to verify that a diff occurs + ExpectNonEmptyPlan bool + + // ExpectError allows the construction of test cases that we expect to fail + // with an error. The specified regexp must match against the error for the + // test to pass. + ExpectError *regexp.Regexp + + // PlanOnly can be set to only run `plan` with this configuration, and not + // actually apply it. This is useful for ensuring config changes result in + // no-op plans + PlanOnly bool + + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + + // PreventPostDestroyRefresh can be set to true for cases where data sources + // are tested alongside real resources + PreventPostDestroyRefresh bool + + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + + //--------------------------------------------------------------- + // ImportState testing + //--------------------------------------------------------------- + + // ImportState, if true, will test the functionality of ImportState + // by importing the resource with ResourceName (must be set) and the + // ID of that resource. + ImportState bool + + // ImportStateId is the ID to perform an ImportState operation with. + // This is optional. If it isn't set, then the resource ID is automatically + // determined by inspecting the state for ResourceName's ID. + ImportStateId string + + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + + // ImportStateCheck checks the results of ImportState. It should be + // used to verify that the resulting value of ImportState has the + // proper resources, IDs, and attributes. + ImportStateCheck ImportStateCheckFunc + + // ImportStateVerify, if true, will also check that the state values + // that are finally put into the state after import match for all the + // IDs returned by the Import. Note that this checks for strict equality + // and does not respect DiffSuppressFunc or CustomizeDiff. + // + // ImportStateVerifyIgnore is a list of prefixes of fields that should + // not be verified to be equal. These can be set to ephemeral fields or + // fields that can't be refreshed and don't matter. + ImportStateVerify bool + ImportStateVerifyIgnore []string +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t testing.T, c TestCase) { + t.Helper() + t.Parallel() + Test(t, c) +} + +// Test performs an acceptance test on a resource. +// +// Tests are not run unless an environmental variable "TF_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t testing.T, c TestCase) { + t.Helper() + + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. You can opt out + // of this with OverrideEnvVar on individual TestCases. + if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + logging.SetOutput(t) + + // Copy any explicitly passed providers to factories, this is for backwards compatibility. + if len(c.Providers) > 0 { + c.ProviderFactories = map[string]func() (*schema.Provider, error){} + + for name, p := range c.Providers { + if _, ok := c.ProviderFactories[name]; ok { + t.Fatalf("ProviderFactory for %q already exists, cannot overwrite with Provider", name) + } + prov := p + c.ProviderFactories[name] = func() (*schema.Provider, error) { + return prov, nil + } + } + } + + // Run the PreCheck if we have it. + // This is done after the auto-configure to allow providers + // to override the default auto-configure parameters. + if c.PreCheck != nil { + c.PreCheck() + } + + sourceDir, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting working dir: %s", err) + } + helper := tftest.AutoInitProviderHelper(sourceDir) + defer func(helper *tftest.Helper) { + err := helper.Close() + if err != nil { + log.Printf("Error cleaning up temporary test files: %s", err) + } + }(helper) + + runNewTest(t, c, helper) +} + +// testProviderConfig takes the list of Providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func testProviderConfig(c TestCase) (string, error) { + var lines []string + var requiredProviders []string + for p := range c.Providers { + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + } + for p, v := range c.ExternalProviders { + if _, ok := c.Providers[p]; ok { + return "", fmt.Errorf("Provider %q set in both Providers and ExternalProviders for TestCase. Must be set in only one.", p) + } + if _, ok := c.ProviderFactories[p]; ok { + return "", fmt.Errorf("Provider %q set in both ProviderFactories and ExternalProviders for TestCase. Must be set in only one.", p) + } + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + var providerBlock string + if v.VersionConstraint != "" { + providerBlock = fmt.Sprintf("%s\nversion = %q", providerBlock, v.VersionConstraint) + } + if v.Source != "" { + providerBlock = fmt.Sprintf("%s\nsource = %q", providerBlock, v.Source) + } + if providerBlock != "" { + providerBlock = fmt.Sprintf("%s = {%s\n}\n", p, providerBlock) + } + requiredProviders = append(requiredProviders, providerBlock) + } + + if len(requiredProviders) > 0 { + lines = append([]string{fmt.Sprintf("terraform {\nrequired_providers {\n%s}\n}\n\n", strings.Join(requiredProviders, ""))}, lines...) + } + + return strings.Join(lines, ""), nil +} + +// UnitTest is a helper to force the acceptance testing harness to run in the +// normal unit test suite. This should only be used for resource that don't +// have any external dependencies. +func UnitTest(t testing.T, c TestCase) { + t.Helper() + + c.IsUnitTest = true + Test(t, c) +} + +func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { + if c.ResourceName == "" { + return nil, fmt.Errorf("ResourceName must be set in TestStep") + } + + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.ResourceName]; ok { + return v, nil + } + } + } + + return nil, fmt.Errorf( + "Resource specified by ResourceName couldn't be found: %s", c.ResourceName) +} + +// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + for i, f := range fs { + if err := f(s); err != nil { + return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err) + } + } + + return nil + } +} + +// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into +// a single TestCheckFunc. +// +// As a user testing their provider, this lets you decompose your checks +// into smaller pieces more easily. +// +// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the +// TestCheckFuncs and aggregates failures. +func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + var result *multierror.Error + + for i, f := range fs { + if err := f(s); err != nil { + result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)) + } + } + + return result.ErrorOrNil() + } +} + +// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value +// exists in state for the given name/key combination. It is useful when +// testing that computed values were set, when it is not possible to +// know ahead of time what the values will be. +func TestCheckResourceAttrSet(name, key string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + }) +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttrSet(is, name, key) + }) +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { + return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) + } + + return nil +} + +// TestCheckResourceAttr is a TestCheckFunc which validates +// the value in state for the given name/key combination. +func TestCheckResourceAttr(name, key, value string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + }) +} + +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckResourceAttr(is, name, key, value) + }) +} + +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + +// TestCheckNoResourceAttr is a TestCheckFunc which ensures that +// NO value exists in state for the given name/key combination. +func TestCheckNoResourceAttr(name, key string) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + }) +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testCheckNoResourceAttr(is, name, key) + }) +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { + return nil + } + + if exists { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil +} + +// TestMatchResourceAttr is a TestCheckFunc which checks that the value +// in state for the given name/key combination matches the given regex. +func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := primaryInstanceState(s, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + }) +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSet(key, func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err + } + + return testMatchResourceAttr(is, name, key, r) + }) +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) + } + + return nil +} + +// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the +// value is a pointer so that it can be updated while the test is running. +// It will only be dereferenced at the point this step is run. +func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckResourceAttr(name, key, *value)(s) + } +} + +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + +// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values +// in state for a pair of name/key combinations are equal. +func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { + isFirst, err := primaryInstanceState(s, nameFirst) + if err != nil { + return err + } + + isSecond, err := primaryInstanceState(s, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + }) +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return checkIfIndexesIntoTypeSetPair(keyFirst, keySecond, func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err + } + + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err + } + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + }) +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + if nameFirst == nameSecond && keyFirst == keySecond { + return fmt.Errorf( + "comparing self: resource %s attribute %s", + nameFirst, + keyFirst, + ) + } + + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. + return nil + } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil +} + +// TestCheckOutput checks an output in the Terraform configuration +func TestCheckOutput(name, value string) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if rs.Value != value { + return fmt.Errorf( + "Output '%s': expected %#v, got %#v", + name, + value, + rs) + } + + return nil + } +} + +func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Outputs[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + if !r.MatchString(rs.Value.(string)) { + return fmt.Errorf( + "Output '%s': %#v didn't match %q", + name, + rs, + r.String()) + } + + return nil + } +} + +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { + rs, ok := ms.Resources[name] + if !ok { + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) + } + + is := rs.Primary + if is == nil { + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) + } + + return is, nil +} + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} + +// indexesIntoTypeSet is a heuristic to try and identify if a flatmap style +// string address uses a precalculated TypeSet hash, which are integers and +// typically are large and obviously not a list index +func indexesIntoTypeSet(key string) bool { + for _, part := range strings.Split(key, ".") { + if i, err := strconv.Atoi(part); err == nil && i > 100 { + return true + } + } + return false +} + +func checkIfIndexesIntoTypeSet(key string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && indexesIntoTypeSet(key) { + return fmt.Errorf("Error in test check: %s\nTest check address %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, key) + } + return err + } +} + +func checkIfIndexesIntoTypeSetPair(keyFirst, keySecond string, f TestCheckFunc) TestCheckFunc { + return func(s *terraform.State) error { + err := f(s) + if err != nil && s.IsBinaryDrivenTest && (indexesIntoTypeSet(keyFirst) || indexesIntoTypeSet(keySecond)) { + return fmt.Errorf("Error in test check: %s\nTest check address %q or %q likely indexes into TypeSet\nThis is currently not possible in the SDK", err, keyFirst, keySecond) + } + return err + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go new file mode 100644 index 000000000..1e39294fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_config.go @@ -0,0 +1,25 @@ +package resource + +import ( + "errors" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go new file mode 100644 index 000000000..b4aff32f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new.go @@ -0,0 +1,242 @@ +package resource + +import ( + "fmt" + "log" + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + tfjson "github.com/hashicorp/terraform-json" + tftest "github.com/hashicorp/terraform-plugin-test/v2" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func runPostTestDestroy(t testing.T, c TestCase, wd *tftest.WorkingDir, factories map[string]func() (*schema.Provider, error)) error { + t.Helper() + + err := runProviderCommand(t, func() error { + wd.RequireDestroy(t) + return nil + }, wd, factories) + if err != nil { + return err + } + + if c.CheckDestroy != nil { + var statePostDestroy *terraform.State + err := runProviderCommand(t, func() error { + statePostDestroy = getState(t, wd) + return nil + }, wd, factories) + if err != nil { + return err + } + + if err := c.CheckDestroy(statePostDestroy); err != nil { + return err + } + } + + return nil +} + +func runNewTest(t testing.T, c TestCase, helper *tftest.Helper) { + t.Helper() + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + wd := helper.RequireNewWorkingDir(t) + + defer func() { + var statePreDestroy *terraform.State + err := runProviderCommand(t, func() error { + statePreDestroy = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + t.Fatalf("Error retrieving state, there may be dangling resources: %s", err.Error()) + return + } + + if !stateIsEmpty(statePreDestroy) { + runPostTestDestroy(t, c, wd, c.ProviderFactories) + } + + wd.Close() + }() + + providerCfg, err := testProviderConfig(c) + if err != nil { + t.Fatal(err) + } + + wd.RequireSetConfig(t, providerCfg) + err = runProviderCommand(t, func() error { + wd.RequireInit(t) + return nil + }, wd, c.ProviderFactories) + if err != nil { + t.Fatalf("Error running init: %s", err.Error()) + return + } + + // use this to track last step succesfully applied + // acts as default for import tests + var appliedCfg string + + for i, step := range c.Steps { + if step.PreConfig != nil { + step.PreConfig() + } + + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d/%d", i+1, len(c.Steps)) + continue + } + } + + if step.ImportState { + err := testStepNewImportState(t, c, helper, wd, step, appliedCfg) + if err != nil { + t.Fatal(err) + } + continue + } + + if step.Config != "" { + err := testStepNewConfig(t, c, wd, step) + if step.ExpectError != nil { + if err == nil { + t.Fatalf("Step %d/%d, expected an error but got none", i+1, len(c.Steps)) + } + if !step.ExpectError.MatchString(err.Error()) { + t.Fatalf("Step %d/%d, expected an error with pattern, no match on: %s", i+1, len(c.Steps), err) + } + } else { + if err != nil { + t.Fatalf("Step %d/%d error: %s", i+1, len(c.Steps), err) + } + } + appliedCfg = step.Config + continue + } + + t.Fatal("Unsupported test mode") + } +} + +func getState(t testing.T, wd *tftest.WorkingDir) *terraform.State { + t.Helper() + + jsonState := wd.RequireState(t) + state, err := shimStateFromJson(jsonState) + if err != nil { + t.Fatal(err) + } + return state +} + +func stateIsEmpty(state *terraform.State) bool { + return state.Empty() || !state.HasResources() +} + +func planIsEmpty(plan *tfjson.Plan) bool { + for _, rc := range plan.ResourceChanges { + if rc.Mode == tfjson.DataResourceMode { + // Skip data sources as the current implementation ignores + // existing state and they are all re-read every time + continue + } + + for _, a := range rc.Change.Actions { + if a != tfjson.ActionNoop { + return false + } + } + } + return true +} + +func testIDRefresh(c TestCase, t testing.T, wd *tftest.WorkingDir, step TestStep, r *terraform.ResourceState) error { + t.Helper() + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + + // Build the state. The state is just the resource with an ID. There + // are no attributes. We only set what is needed to perform a refresh. + state := terraform.NewState() + state.RootModule().Resources = make(map[string]*terraform.ResourceState) + state.RootModule().Resources[c.IDRefreshName] = &terraform.ResourceState{} + + // Temporarily set the config to a minimal provider config for the refresh + // test. After the refresh we can reset it. + cfg, err := testProviderConfig(c) + if err != nil { + return err + } + wd.RequireSetConfig(t, cfg) + defer wd.RequireSetConfig(t, step.Config) + + // Refresh! + err = runProviderCommand(t, func() error { + wd.RequireRefresh(t) + state = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + // Verify attribute equivalence. + actualR := state.RootModule().Resources[c.IDRefreshName] + if actualR == nil { + return fmt.Errorf("Resource gone!") + } + if actualR.Primary == nil { + return fmt.Errorf("Resource has no primary instance") + } + actual := actualR.Primary.Attributes + expected := r.Primary.Attributes + // Remove fields we're ignoring + for _, v := range c.IDRefreshIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + return fmt.Errorf( + "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go new file mode 100644 index 000000000..c847cf455 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_config.go @@ -0,0 +1,182 @@ +package resource + +import ( + tfjson "github.com/hashicorp/terraform-json" + tftest "github.com/hashicorp/terraform-plugin-test/v2" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep) error { + t.Helper() + + var idRefreshCheck *terraform.ResourceState + idRefresh := c.IDRefreshName != "" + + if !step.Destroy { + var state *terraform.State + err := runProviderCommand(t, func() error { + state = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + if err := testStepTaint(state, step); err != nil { + t.Fatalf("Error when tainting resources: %s", err) + } + } + + wd.RequireSetConfig(t, step.Config) + + // require a refresh before applying + // failing to do this will result in data sources not being updated + err := runProviderCommand(t, func() error { + return wd.Refresh() + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + if !step.PlanOnly { + err := runProviderCommand(t, func() error { + return wd.Apply() + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + var state *terraform.State + err = runProviderCommand(t, func() error { + state = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + if step.Check != nil { + state.IsBinaryDrivenTest = true + if err := step.Check(state); err != nil { + t.Fatal(err) + } + } + } + + // Test for perpetual diffs by performing a plan, a refresh, and another plan + + // do a plan + err = runProviderCommand(t, func() error { + return wd.CreatePlan() + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + var plan *tfjson.Plan + err = runProviderCommand(t, func() error { + plan = wd.RequireSavedPlan(t) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + if !planIsEmpty(plan) { + if step.ExpectNonEmptyPlan { + t.Log("[INFO] Got non-empty plan, as expected") + } else { + var stdout string + err = runProviderCommand(t, func() error { + stdout = wd.RequireSavedPlanStdout(t) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + t.Fatalf("After applying this test step, the plan was not empty.\nstdout:\n\n%s", stdout) + } + } + + // do a refresh + if !c.PreventPostDestroyRefresh { + err := runProviderCommand(t, func() error { + return wd.Refresh() + }, wd, c.ProviderFactories) + if err != nil { + return err + } + } + + // do another plan + err = runProviderCommand(t, func() error { + return wd.CreatePlan() + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + err = runProviderCommand(t, func() error { + plan = wd.RequireSavedPlan(t) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + + // check if plan is empty + if !planIsEmpty(plan) { + if step.ExpectNonEmptyPlan { + t.Log("[INFO] Got non-empty plan, as expected") + } else { + var stdout string + err = runProviderCommand(t, func() error { + stdout = wd.RequireSavedPlanStdout(t) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + t.Fatalf("After applying this test step and performing a `terraform refresh`, the plan was not empty.\nstdout\n\n%s", stdout) + } + } + + // ID-ONLY REFRESH + // If we've never checked an id-only refresh and our state isn't + // empty, find the first resource and test it. + var state *terraform.State + err = runProviderCommand(t, func() error { + state = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + return err + } + if idRefresh && idRefreshCheck == nil && !state.Empty() { + // Find the first non-nil resource in the state + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[c.IDRefreshName]; ok { + idRefreshCheck = v + } + + break + } + } + + // If we have an instance to check for refreshes, do it + // immediately. We do it in the middle of another test + // because it shouldn't affect the overall state (refresh + // is read-only semantically) and we want to fail early if + // this fails. If refresh isn't read-only, then this will have + // caught a different bug. + if idRefreshCheck != nil { + if err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil { + t.Fatalf( + "[ERROR] Test: ID-only test failed: %s", err) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go new file mode 100644 index 000000000..5ef8d722b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/testing_new_import_state.go @@ -0,0 +1,185 @@ +package resource + +import ( + "reflect" + "strings" + + "github.com/davecgh/go-spew/spew" + tftest "github.com/hashicorp/terraform-plugin-test/v2" + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func testStepNewImportState(t testing.T, c TestCase, helper *tftest.Helper, wd *tftest.WorkingDir, step TestStep, cfg string) error { + t.Helper() + + spewConf := spew.NewDefaultConfig() + spewConf.SortKeys = true + + if step.ResourceName == "" { + t.Fatal("ResourceName is required for an import state test") + } + + // get state from check sequence + var state *terraform.State + err := runProviderCommand(t, func() error { + state = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Determine the ID to import + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + t.Fatal(err) + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: + resource, err := testResource(step, state) + if err != nil { + t.Fatal(err) + } + importId = resource.Primary.ID + } + importId = step.ImportStateIdPrefix + importId + + // Create working directory for import tests + if step.Config == "" { + step.Config = cfg + if step.Config == "" { + t.Fatal("Cannot import state with no specified config") + } + } + importWd := helper.RequireNewWorkingDir(t) + defer importWd.Close() + importWd.RequireSetConfig(t, step.Config) + + err = runProviderCommand(t, func() error { + importWd.RequireInit(t) + return nil + }, importWd, c.ProviderFactories) + if err != nil { + t.Fatalf("Error running init: %s", err) + } + + err = runProviderCommand(t, func() error { + importWd.RequireImport(t, step.ResourceName, importId) + return nil + }, importWd, c.ProviderFactories) + if err != nil { + t.Fatalf("Error running import: %s", err) + } + + var importState *terraform.State + err = runProviderCommand(t, func() error { + importState = getState(t, wd) + return nil + }, wd, c.ProviderFactories) + if err != nil { + t.Fatalf("Error getting state: %s", err) + } + + // Go through the imported state and verify + if step.ImportStateCheck != nil { + var states []*terraform.InstanceState + for _, r := range importState.RootModule().Resources { + if r.Primary != nil { + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) + } + } + if err := step.ImportStateCheck(states); err != nil { + t.Fatal(err) + } + } + + // Verify that all the states match + if step.ImportStateVerify { + new := importState.RootModule().Resources + old := state.RootModule().Resources + + for _, r := range new { + // Find the existing resource + var oldR *terraform.ResourceState + for _, r2 := range old { + if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type { + oldR = r2 + break + } + } + if oldR == nil { + t.Fatalf( + "Failed state verification, resource with ID %s not found", + r.Primary.ID) + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + + // Compare their attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + actual[k] = v + } + + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } + expected[k] = v + } + + // Remove fields we're ignoring + for _, v := range step.ImportStateVerifyIgnore { + for k := range actual { + if strings.HasPrefix(k, v) { + delete(actual, k) + } + } + for k := range expected { + if strings.HasPrefix(k, v) { + delete(expected, k) + } + } + } + + if !reflect.DeepEqual(actual, expected) { + // Determine only the different attributes + for k, v := range expected { + if av, ok := actual[k]; ok && v == av { + delete(expected, k) + delete(actual, k) + } + } + + t.Fatalf( + "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+ + "\n\n%s\n\n%s", + spewConf.Sdump(actual), spewConf.Sdump(expected)) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go new file mode 100644 index 000000000..b625feae0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource/wait.go @@ -0,0 +1,109 @@ +package resource + +import ( + "context" + "errors" + "sync" + "time" +) + +// RetryContext is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Cancellation from the passed in context will propagate through to the +// underlying StateChangeConf +func RetryContext(ctx context.Context, timeout time.Duration, f RetryFunc) error { + // These are used to pull the error out of the function; need a mutex to + // avoid a data race. + var resultErr error + var resultErrMu sync.Mutex + + c := &StateChangeConf{ + Pending: []string{"retryableerror"}, + Target: []string{"success"}, + Timeout: timeout, + MinTimeout: 500 * time.Millisecond, + Refresh: func() (interface{}, string, error) { + rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + + if rerr == nil { + resultErr = nil + return 42, "success", nil + } + + resultErr = rerr.Err + + if rerr.Retryable { + return 42, "retryableerror", nil + } + return nil, "quit", rerr.Err + }, + } + + _, waitErr := c.WaitForStateContext(ctx) + + // Need to acquire the lock here to be able to avoid race using resultErr as + // the return value + resultErrMu.Lock() + defer resultErrMu.Unlock() + + // resultErr may be nil because the wait timed out and resultErr was never + // set; this is still an error + if resultErr == nil { + return waitErr + } + // resultErr takes precedence over waitErr if both are set because it is + // more likely to be useful + return resultErr +} + +// Retry is a basic wrapper around StateChangeConf that will just retry +// a function until it no longer returns an error. +// +// Deprecated: Please use RetryContext to ensure proper plugin shutdown +func Retry(timeout time.Duration, f RetryFunc) error { + return RetryContext(context.Background(), timeout, f) +} + +// RetryFunc is the function retried until it succeeds. +type RetryFunc func() *RetryError + +// RetryError is the required return type of RetryFunc. It forces client code +// to choose whether or not a given error is retryable. +type RetryError struct { + Err error + Retryable bool +} + +// RetryableError is a helper to create a RetryError that's retryable from a +// given error. To prevent logic errors, will return an error when passed a +// nil error. +func RetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: true} +} + +// NonRetryableError is a helper to create a RetryError that's _not_ retryable +// from a given error. To prevent logic errors, will return an error when +// passed a nil error. +func NonRetryableError(err error) *RetryError { + if err == nil { + return &RetryError{ + Err: errors.New("empty non-retryable error received. " + + "This is a bug with the Terraform provider and should be " + + "reported as a GitHub issue in the provider repository."), + Retryable: false, + } + } + return &RetryError{Err: err, Retryable: false} +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md new file mode 100644 index 000000000..28c83628e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/README.md @@ -0,0 +1,11 @@ +# Terraform Helper Lib: schema + +The `schema` package provides a high-level interface for writing resource +providers for Terraform. + +If you're writing a resource provider, we recommend you use this package. + +The interface exposed by this package is much friendlier than trying to +write to the Terraform API directly. The core Terraform API is low-level +and built for maximum flexibility and control, whereas this library is built +as a framework around that to more easily write common providers. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go new file mode 100644 index 000000000..bb1fc1dbe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/core_schema.go @@ -0,0 +1,368 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// StringKind represents the format a string is in. +type StringKind configschema.StringKind + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain = StringKind(configschema.StringPlain) + + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown = StringKind(configschema.StringMarkdown) +) + +var ( + // DescriptionKind is the default StringKind of descriptions in this provider. + // It defaults to StringPlain but can be globally switched to StringMarkdown. + DescriptionKind = StringPlain + + // SchemaDescriptionBuilder converts helper/schema.Schema Descriptions to configschema.Attribute + // and Block Descriptions. This method can be used to modify the description text prior to it + // being returned in the schema. + SchemaDescriptionBuilder = func(s *Schema) string { + return s.Description + } + + // ResourceDescriptionBuilder converts helper/schema.Resource Descriptions to configschema.Block + // Descriptions at the resource top level. This method can be used to modify the description prior + // to it being returned in the schema. + ResourceDescriptionBuilder = func(r *Resource) string { + return r.Description + } +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by Terraform core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// Terraform core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + if schema.Type == TypeMap { + // For TypeMap in particular, it isn't valid for Elem to be a + // *Resource (since that would be ambiguous in flatmap) and + // so Elem is treated as a TypeString schema if so. This matches + // how the field readers treat this situation, for compatibility + // with configurations targeting Terraform 0.11 and earlier. + if _, isResource := schema.Elem.(*Resource); isResource { + sch := *schema // shallow copy + sch.Elem = &Schema{ + Type: TypeString, + } + ret.Attributes[name] = sch.coreConfigSchemaAttribute() + continue + } + } + switch schema.ConfigMode { + case SchemaConfigModeAttr: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case SchemaConfigModeBlock: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: // SchemaConfigModeAuto, or any other invalid value + if schema.Computed && !schema.Optional { + // Computed-only schemas are always handled as attributes, + // because they never appear in configuration. + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema, ValueType: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + // The Schema.DefaultFunc capability adds some extra weirdness here since + // it can be combined with "Required: true" to create a sitution where + // required-ness is conditional. Terraform Core doesn't share this concept, + // so we must sniff for this possibility here and conditionally turn + // off the "Required" flag if it looks like the DefaultFunc is going + // to provide a value. + // This is not 100% true to the original interface of DefaultFunc but + // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc + // situations, which are the main cases we care about. + // + // Note that this also has a consequence for commands that return schema + // information for documentation purposes: running those for certain + // providers will produce different results depending on which environment + // variables are set. We accept that weirdness in order to keep this + // interface to core otherwise simple. + reqd := s.Required + opt := s.Optional + if reqd && s.DefaultFunc != nil { + v, err := s.DefaultFunc() + // We can't report errors from here, so we'll instead just force + // "Required" to false and let the provider try calling its + // DefaultFunc again during the validate step, where it can then + // return the error. + if err != nil || (err == nil && v != nil) { + reqd = false + opt = true + } + } + + desc := SchemaDescriptionBuilder(s) + descKind := configschema.StringKind(DescriptionKind) + if desc == "" { + // fallback to plain text if empty + descKind = configschema.StringPlain + } + + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: desc, + DescriptionKind: descKind, + Deprecated: s.Deprecated != "", + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { + ret.Block = *nested + + desc := SchemaDescriptionBuilder(s) + descKind := configschema.StringKind(DescriptionKind) + if desc == "" { + // fallback to plain text if empty + descKind = configschema.StringPlain + } + // set these on the block from the attribute Schema + ret.Block.Description = desc + ret.Block.DescriptionKind = descKind + ret.Block.Deprecated = s.Deprecated != "" + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + if s.Optional && s.MinItems > 0 { + // Historically helper/schema would ignore MinItems if Optional were + // set, so we must mimic this behavior here to ensure that providers + // relying on that undocumented behavior can continue to operate as + // they did before. + ret.MinItems = 0 + } + if s.Computed && !s.Optional { + // MinItems/MaxItems are meaningless for computed nested blocks, since + // they are never set by the user anyway. This ensures that we'll never + // generate weird errors about them. + ret.MinItems = 0 + ret.MaxItems = 0 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case ValueType: + // This represents a mistake in the provider code, but it's a + // common one so we'll just shim it. + elemType = (&Schema{Type: set}).coreConfigSchemaType() + case *Resource: + // By default we construct a NestedBlock in this case, but this + // behavior is selected either for computed-only schemas or + // when ConfigMode is explicitly SchemaConfigModeBlock. + // See schemaMap.CoreConfigSchema for the exact rules. + elemType = set.coreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on +// the resource's schema. CoreConfigSchema adds the implicitly required "id" +// attribute for top level resources if it doesn't exist. +func (r *Resource) CoreConfigSchema() *configschema.Block { + block := r.coreConfigSchema() + + desc := ResourceDescriptionBuilder(r) + descKind := configschema.StringKind(DescriptionKind) + if desc == "" { + // fallback to plain text if empty + descKind = configschema.StringPlain + } + + // Only apply Resource Description, Kind, Deprecation at top level + block.Description = desc + block.DescriptionKind = descKind + block.Deprecated = r.DeprecationMessage != "" + + if block.Attributes == nil { + block.Attributes = map[string]*configschema.Attribute{} + } + + // Add the implicitly required "id" field if it doesn't exist + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + + return block +} + +func (r *Resource) coreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go new file mode 100644 index 000000000..8d93750ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/data_source_resource_shim.go @@ -0,0 +1,59 @@ +package schema + +import ( + "fmt" +) + +// DataSourceResourceShim takes a Resource instance describing a data source +// (with a Read implementation and a Schema, at least) and returns a new +// Resource instance with additional Create and Delete implementations that +// allow the data source to be used as a resource. +// +// This is a backward-compatibility layer for data sources that were formerly +// read-only resources before the data source concept was added. It should not +// be used for any *new* data sources. +// +// The Read function for the data source *must* call d.SetId with a non-empty +// id in order for this shim to function as expected. +// +// The provided Resource instance, and its schema, will be modified in-place +// to make it suitable for use as a full resource. +func DataSourceResourceShim(name string, dataSource *Resource) *Resource { + // Recursively, in-place adjust the schema so that it has ForceNew + // on any user-settable resource. + dataSourceResourceShimAdjustSchema(dataSource.Schema) + + dataSource.Create = CreateFunc(dataSource.Read) + dataSource.Delete = func(d *ResourceData, meta interface{}) error { + d.SetId("") + return nil + } + dataSource.Update = nil // should already be nil, but let's make sure + + // FIXME: Link to some further docs either on the website or in the + // changelog, once such a thing exists. + dataSource.DeprecationMessage = fmt.Sprintf( + "using %s as a resource is deprecated; consider using the data source instead", + name, + ) + + return dataSource +} + +func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) { + for _, s := range schema { + // If the attribute is configurable then it must be ForceNew, + // since we have no Update implementation. + if s.Required || s.Optional { + s.ForceNew = true + } + + // If the attribute is a nested resource, we need to recursively + // apply these same adjustments to it. + if s.Elem != nil { + if r, ok := s.Elem.(*Resource); ok { + dataSourceResourceShimAdjustSchema(r.Schema) + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go new file mode 100644 index 000000000..d5e20e038 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/equal.go @@ -0,0 +1,6 @@ +package schema + +// Equal is an interface that checks for deep equality between two objects. +type Equal interface { + Equal(interface{}) bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go new file mode 100644 index 000000000..c7721bcd3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader.go @@ -0,0 +1,335 @@ +package schema + +import ( + "fmt" + "strconv" +) + +// FieldReaders are responsible for decoding fields out of data into +// the proper typed representation. ResourceData uses this to query data +// out of multiple sources: config, state, diffs, etc. +type FieldReader interface { + ReadField([]string) (FieldReadResult, error) +} + +// FieldReadResult encapsulates all the resulting data from reading +// a field. +type FieldReadResult struct { + // Value is the actual read value. NegValue is the _negative_ value + // or the items that should be removed (if they existed). NegValue + // doesn't make sense for primitives but is important for any + // container types such as maps, sets, lists. + Value interface{} + ValueProcessed interface{} + + // Exists is true if the field was found in the data. False means + // it wasn't found if there was no error. + Exists bool + + // Computed is true if the field was found but the value + // is computed. + Computed bool +} + +// ValueOrZero returns the value of this result or the zero value of the +// schema type, ensuring a consistent non-nil return value. +func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { + if r.Value != nil { + return r.Value + } + + return s.ZeroValue() +} + +// addrToSchema finds the final element schema for the given address +// and the given schema. It returns all the schemas that led to the final +// schema. These are in order of the address (out to in). +func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { + current := &Schema{ + Type: typeObject, + Elem: schemaMap, + } + + // If we aren't given an address, then the user is requesting the + // full object, so we return the special value which is the full object. + if len(addr) == 0 { + return []*Schema{current} + } + + result := make([]*Schema, 0, len(addr)) + for len(addr) > 0 { + k := addr[0] + addr = addr[1:] + + REPEAT: + // We want to trim off the first "typeObject" since its not a + // real lookup that people do. i.e. []string{"foo"} in a structure + // isn't {typeObject, typeString}, its just a {typeString}. + if len(result) > 0 || current.Type != typeObject { + result = append(result, current) + } + + switch t := current.Type; t { + case TypeBool, TypeInt, TypeFloat, TypeString: + if len(addr) > 0 { + return nil + } + case TypeList, TypeSet: + isIndex := len(addr) > 0 && addr[0] == "#" + + switch v := current.Elem.(type) { + case *Resource: + current = &Schema{ + Type: typeObject, + Elem: v.Schema, + } + case *Schema: + current = v + case ValueType: + current = &Schema{Type: v} + default: + // we may not know the Elem type and are just looking for the + // index + if isIndex { + break + } + + if len(addr) == 0 { + // we've processed the address, so return what we've + // collected + return result + } + + if len(addr) == 1 { + if _, err := strconv.Atoi(addr[0]); err == nil { + // we're indexing a value without a schema. This can + // happen if the list is nested in another schema type. + // Default to a TypeString like we do with a map + current = &Schema{Type: TypeString} + break + } + } + + return nil + } + + // If we only have one more thing and the next thing + // is a #, then we're accessing the index which is always + // an int. + if isIndex { + current = &Schema{Type: TypeInt} + break + } + + case TypeMap: + if len(addr) > 0 { + switch v := current.Elem.(type) { + case ValueType: + current = &Schema{Type: v} + case *Schema: + current, _ = current.Elem.(*Schema) + default: + // maps default to string values. This is all we can have + // if this is nested in another list or map. + current = &Schema{Type: TypeString} + } + } + case typeObject: + // If we're already in the object, then we want to handle Sets + // and Lists specially. Basically, their next key is the lookup + // key (the set value or the list element). For these scenarios, + // we just want to skip it and move to the next element if there + // is one. + if len(result) > 0 { + lastType := result[len(result)-2].Type + if lastType == TypeSet || lastType == TypeList { + if len(addr) == 0 { + break + } + + k = addr[0] + addr = addr[1:] + } + } + + m := current.Elem.(map[string]*Schema) + val, ok := m[k] + if !ok { + return nil + } + + current = val + goto REPEAT + } + } + + return result +} + +// readListField is a generic method for reading a list field out of a +// a FieldReader. It does this based on the assumption that there is a key +// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc. +// after that point. +func readListField( + r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) { + addrPadded := make([]string, len(addr)+1) + copy(addrPadded, addr) + addrPadded[len(addrPadded)-1] = "#" + + // Get the number of elements in the list + countResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !countResult.Exists { + // No count, means we have no list + countResult.Value = 0 + } + + // If we have an empty list, then return an empty list + if countResult.Computed || countResult.Value.(int) == 0 { + return FieldReadResult{ + Value: []interface{}{}, + Exists: countResult.Exists, + Computed: countResult.Computed, + }, nil + } + + // Go through each count, and get the item value out of it + result := make([]interface{}, countResult.Value.(int)) + for i := range result { + is := strconv.FormatInt(int64(i), 10) + addrPadded[len(addrPadded)-1] = is + rawResult, err := r.ReadField(addrPadded) + if err != nil { + return FieldReadResult{}, err + } + if !rawResult.Exists { + // This should never happen, because by the time the data + // gets to the FieldReaders, all the defaults should be set by + // Schema. + rawResult.Value = nil + } + + result[i] = rawResult.Value + } + + return FieldReadResult{ + Value: result, + Exists: true, + }, nil +} + +// readObjectField is a generic method for reading objects out of FieldReaders +// based on the assumption that building an address of []string{k, FIELD} +// will result in the proper field data. +func readObjectField( + r FieldReader, + addr []string, + schema map[string]*Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + exists := false + for field, s := range schema { + addrRead := make([]string, len(addr), len(addr)+1) + copy(addrRead, addr) + addrRead = append(addrRead, field) + rawResult, err := r.ReadField(addrRead) + if err != nil { + return FieldReadResult{}, err + } + if rawResult.Exists { + exists = true + } + + result[field] = rawResult.ValueOrZero(s) + } + + return FieldReadResult{ + Value: result, + Exists: exists, + }, nil +} + +// convert map values to the proper primitive type based on schema.Elem +func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error { + elemType, err := getValueType(k, schema) + if err != nil { + return err + } + + switch elemType { + case TypeInt, TypeFloat, TypeBool: + for k, v := range m { + vs, ok := v.(string) + if !ok { + continue + } + + v, err := stringToPrimitive(vs, false, &Schema{Type: elemType}) + if err != nil { + return err + } + + m[k] = v + } + } + return nil +} + +func stringToPrimitive( + value string, computed bool, schema *Schema) (interface{}, error) { + var returnVal interface{} + switch schema.Type { + case TypeBool: + if value == "" { + returnVal = false + break + } + if computed { + break + } + + v, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + + returnVal = v + case TypeFloat: + if value == "" { + returnVal = 0.0 + break + } + if computed { + break + } + + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return nil, err + } + + returnVal = v + case TypeInt: + if value == "" { + returnVal = 0 + break + } + if computed { + break + } + + v, err := strconv.ParseInt(value, 0, 0) + if err != nil { + return nil, err + } + + returnVal = int(v) + case TypeString: + returnVal = value + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } + + return returnVal, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go new file mode 100644 index 000000000..3f1f5e8ab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_config.go @@ -0,0 +1,328 @@ +package schema + +import ( + "fmt" + "log" + "strconv" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// ConfigFieldReader reads fields out of an untyped map[string]string to the +// best of its ability. It also applies defaults from the Schema. (The other +// field readers do not need default handling because they source fully +// populated data structures.) +type ConfigFieldReader struct { + Config *terraform.ResourceConfig + Schema map[string]*Schema + + indexMaps map[string]map[string]int + once sync.Once +} + +func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) { + r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) }) + return r.readField(address, false) +} + +func (r *ConfigFieldReader) readField( + address []string, nested bool) (FieldReadResult, error) { + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + if !nested { + // If we have a set anywhere in the address, then we need to + // read that set out in order and actually replace that part of + // the address with the real list index. i.e. set.50 might actually + // map to set.12 in the config, since it is in list order in the + // config, not indexed by set value. + for i, v := range schemaList { + // Sets are the only thing that cause this issue. + if v.Type != TypeSet { + continue + } + + // If we're at the end of the list, then we don't have to worry + // about this because we're just requesting the whole set. + if i == len(schemaList)-1 { + continue + } + + // If we're looking for the count, then ignore... + if address[i+1] == "#" { + continue + } + + indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")] + if !ok { + // Get the set so we can get the index map that tells us the + // mapping of the hash code to the list index + _, err := r.readSet(address[:i+1], v) + if err != nil { + return FieldReadResult{}, err + } + indexMap = r.indexMaps[strings.Join(address[:i+1], ".")] + } + + index, ok := indexMap[address[i+1]] + if !ok { + return FieldReadResult{}, nil + } + + address[i+1] = strconv.FormatInt(int64(index), 10) + } + } + + k := strings.Join(address, ".") + schema := schemaList[len(schemaList)-1] + + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil + } + } + } + + switch schema.Type { + case TypeBool, TypeFloat, TypeInt, TypeString: + return r.readPrimitive(k, schema) + case TypeList: + return readListField(&nestedConfigFieldReader{r}, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField( + &nestedConfigFieldReader{r}, + address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + // We want both the raw value and the interpolated. We use the interpolated + // to store actual values and we use the raw one to check for + // computed keys. Actual values are obtained in the switch, depending on + // the type of the raw value. + mraw, ok := r.Config.GetRaw(k) + if !ok { + // check if this is from an interpolated field by seeing if it exists + // in the config + _, ok := r.Config.Get(k) + if !ok { + // this really doesn't exist + return FieldReadResult{}, nil + } + + // We couldn't fetch the value from a nested data structure, so treat the + // raw value as an interpolation string. The mraw value is only used + // for the type switch below. + mraw = "${INTERPOLATED}" + } + + result := make(map[string]interface{}) + computed := false + switch m := mraw.(type) { + case string: + // This is a map which has come out of an interpolated variable, so we + // can just get the value directly from config. Values cannot be computed + // currently. + v, _ := r.Config.Get(k) + + // If this isn't a map[string]interface, it must be computed. + mapV, ok := v.(map[string]interface{}) + if !ok { + return FieldReadResult{ + Exists: true, + Computed: true, + }, nil + } + + // Otherwise we can proceed as usual. + for i, iv := range mapV { + result[i] = iv + } + case []interface{}: + for i, innerRaw := range m { + for ik := range innerRaw.(map[string]interface{}) { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case []map[string]interface{}: + for i, innerRaw := range m { + for ik := range innerRaw { + key := fmt.Sprintf("%s.%d.%s", k, i, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + } + case map[string]interface{}: + for ik := range m { + key := fmt.Sprintf("%s.%s", k, ik) + if r.Config.IsComputed(key) { + computed = true + break + } + + v, _ := r.Config.Get(key) + result[ik] = v + } + case nil: + // the map may have been empty on the configuration, so we leave the + // empty result + default: + panic(fmt.Sprintf("unknown type: %#v", mraw)) + } + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var value interface{} + if !computed { + value = result + } + + return FieldReadResult{ + Value: value, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readPrimitive( + k string, schema *Schema) (FieldReadResult, error) { + raw, ok := r.Config.Get(k) + if !ok { + // Nothing in config, but we might still have a default from the schema + var err error + raw, err = schema.DefaultValue() + if err != nil { + return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err) + } + + if raw == nil { + return FieldReadResult{}, nil + } + } + + var result string + if err := mapstructure.WeakDecode(raw, &result); err != nil { + return FieldReadResult{}, err + } + + computed := r.Config.IsComputed(k) + returnVal, err := stringToPrimitive(result, computed, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + Computed: computed, + }, nil +} + +func (r *ConfigFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + indexMap := make(map[string]int) + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + raw, err := readListField(&nestedConfigFieldReader{r}, address, schema) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + return FieldReadResult{Value: set}, nil + } + + // If the list is computed, the set is necessarilly computed + if raw.Computed { + return FieldReadResult{ + Value: set, + Exists: true, + Computed: raw.Computed, + }, nil + } + + // Build up the set from the list elements + for i, v := range raw.Value.([]interface{}) { + // Check if any of the keys in this item are computed + computed := r.hasComputedSubKeys( + fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema) + + code := set.add(v, computed) + indexMap[code] = i + } + + r.indexMaps[strings.Join(address, ".")] = indexMap + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// hasComputedSubKeys walks through a schema and returns whether or not the +// given key contains any subkeys that are computed. +func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool { + prefix := key + "." + + switch t := schema.Elem.(type) { + case *Resource: + for k, schema := range t.Schema { + if r.Config.IsComputed(prefix + k) { + return true + } + + if r.hasComputedSubKeys(prefix+k, schema) { + return true + } + } + } + + return false +} + +// nestedConfigFieldReader is a funny little thing that just wraps a +// ConfigFieldReader to call readField when ReadField is called so that +// we don't recalculate the set rewrites in the address, which leads to +// an infinite loop. +type nestedConfigFieldReader struct { + Reader *ConfigFieldReader +} + +func (r *nestedConfigFieldReader) ReadField( + address []string) (FieldReadResult, error) { + return r.Reader.readField(address, true) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go new file mode 100644 index 000000000..642e7f32e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_diff.go @@ -0,0 +1,245 @@ +package schema + +import ( + "fmt" + "strings" + + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// DiffFieldReader reads fields out of a diff structures. +// +// It also requires access to a Reader that reads fields from the structure +// that the diff was derived from. This is usually the state. This is required +// because a diff on its own doesn't have complete data about full objects +// such as maps. +// +// The Source MUST be the data that the diff was derived from. If it isn't, +// the behavior of this struct is undefined. +// +// Reading fields from a DiffFieldReader is identical to reading from +// Source except the diff will be applied to the end result. +// +// The "Exists" field on the result will be set to true if the complete +// field exists whether its from the source, diff, or a combination of both. +// It cannot be determined whether a retrieved value is composed of +// diff elements. +type DiffFieldReader struct { + Diff *terraform.InstanceDiff + Source FieldReader + Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error +} + +func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} + return FieldReadResult{}, nil + } + + var res FieldReadResult + var err error + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + res, err = r.readPrimitive(address, schema) + case TypeList: + res, err = readListField(r, address, schema) + case TypeMap: + res, err = r.readMap(address, schema) + case TypeSet: + res, err = r.readSet(address, schema) + case typeObject: + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err +} + +func (r *DiffFieldReader) readMap( + address []string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // First read the map from the underlying source + source, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if source.Exists { + // readMap may return a nil value, or an unknown value placeholder in + // some cases, causing the type assertion to panic if we don't assign the ok value + result, _ = source.Value.(map[string]interface{}) + resultSet = true + } + + // Next, read all the elements we have in our diff, and apply + // the diff to our result. + prefix := strings.Join(address, ".") + "." + for k, v := range r.Diff.Attributes { + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasPrefix(k, prefix+"%") { + // Ignore the count field + continue + } + + resultSet = true + + k = k[len(prefix):] + if v.NewRemoved { + delete(result, k) + continue + } + + result[k] = v.New + } + + key := address[len(address)-1] + err = mapValuesToPrimitive(key, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *DiffFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + + attrD, ok := r.Diff.Attributes[strings.Join(address, ".")] + if !ok { + return result, nil + } + + var resultVal string + if !attrD.NewComputed { + resultVal = attrD.New + if attrD.NewExtra != nil { + result.ValueProcessed = resultVal + if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil { + return FieldReadResult{}, err + } + } + } + + result.Computed = attrD.NewComputed + result.Exists = true + result.Value, err = stringToPrimitive(resultVal, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return result, nil +} + +func (r *DiffFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + prefix := strings.Join(address, ".") + "." + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // Go through the map and find all the set items + for k, d := range r.Diff.Attributes { + if d.NewRemoved { + // If the field is removed, we always ignore it + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + if strings.HasSuffix(k, "#") { + // Ignore any count field + continue + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + raw, err := r.ReadField(append(address, idx)) + if err != nil { + return FieldReadResult{}, err + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + } + + // Determine if the set "exists". It exists if there are items or if + // the diff explicitly wanted it empty. + exists := set.Len() > 0 + if !exists { + // We could check if the diff value is "0" here but I think the + // existence of "#" on its own is enough to show it existed. This + // protects us in the future from the zero value changing from + // "0" to "" breaking us (if that were to happen). + if _, ok := r.Diff.Attributes[prefix+"#"]; ok { + exists = true + } + } + + if !exists { + result, err := r.Source.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + if result.Exists { + return result, nil + } + } + + return FieldReadResult{ + Value: set, + Exists: exists, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go new file mode 100644 index 000000000..092dd7f68 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_map.go @@ -0,0 +1,202 @@ +package schema + +import ( + "fmt" + "strings" +) + +// MapFieldReader reads fields out of an untyped map[string]string to +// the best of its ability. +type MapFieldReader struct { + Map MapReader + Schema map[string]*Schema +} + +func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) { + k := strings.Join(address, ".") + schemaList := addrToSchema(address, r.Schema) + if len(schemaList) == 0 { + return FieldReadResult{}, nil + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return r.readPrimitive(address, schema) + case TypeList: + return readListField(r, address, schema) + case TypeMap: + return r.readMap(k, schema) + case TypeSet: + return r.readSet(address, schema) + case typeObject: + return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} + +func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) { + result := make(map[string]interface{}) + resultSet := false + + // If the name of the map field is directly in the map with an + // empty string, it means that the map is being deleted, so mark + // that is is set. + if v, ok := r.Map.Access(k); ok && v == "" { + resultSet = true + } + + prefix := k + "." + r.Map.Range(func(k, v string) bool { + if strings.HasPrefix(k, prefix) { + resultSet = true + + key := k[len(prefix):] + if key != "%" && key != "#" { + result[key] = v + } + } + + return true + }) + + err := mapValuesToPrimitive(k, result, schema) + if err != nil { + return FieldReadResult{}, nil + } + + var resultVal interface{} + if resultSet { + resultVal = result + } + + return FieldReadResult{ + Value: resultVal, + Exists: resultSet, + }, nil +} + +func (r *MapFieldReader) readPrimitive( + address []string, schema *Schema) (FieldReadResult, error) { + k := strings.Join(address, ".") + result, ok := r.Map.Access(k) + if !ok { + return FieldReadResult{}, nil + } + + returnVal, err := stringToPrimitive(result, false, schema) + if err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: returnVal, + Exists: true, + }, nil +} + +func (r *MapFieldReader) readSet( + address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + + // Get the number of elements in the list + countRaw, err := r.readPrimitive( + append(address, "#"), &Schema{Type: TypeInt}) + if err != nil { + return FieldReadResult{}, err + } + if !countRaw.Exists { + // No count, means we have no list + countRaw.Value = 0 + } + + // Create the set that will be our result + set := schema.ZeroValue().(*Set) + + // If we have an empty list, then return an empty list + if countRaw.Computed || countRaw.Value.(int) == 0 { + return FieldReadResult{ + Value: set, + Exists: countRaw.Exists, + Computed: countRaw.Computed, + }, nil + } + + // Go through the map and find all the set items + prefix := strings.Join(address, ".") + "." + countExpected := countRaw.Value.(int) + countActual := make(map[string]struct{}) + completed := r.Map.Range(func(k, _ string) bool { + if !strings.HasPrefix(k, prefix) { + return true + } + if strings.HasPrefix(k, prefix+"#") { + // Ignore the count field + return true + } + + // Split the key, since it might be a sub-object like "idx.field" + parts := strings.Split(k[len(prefix):], ".") + idx := parts[0] + + var raw FieldReadResult + raw, err = r.ReadField(append(address, idx)) + if err != nil { + return false + } + if !raw.Exists { + // This shouldn't happen because we just verified it does exist + panic("missing field in set: " + k + "." + idx) + } + + set.Add(raw.Value) + + // Due to the way multimap readers work, if we've seen the number + // of fields we expect, then exit so that we don't read later values. + // For example: the "set" map might have "ports.#", "ports.0", and + // "ports.1", but the "state" map might have those plus "ports.2". + // We don't want "ports.2" + countActual[idx] = struct{}{} + if len(countActual) >= countExpected { + return false + } + + return true + }) + if !completed && err != nil { + return FieldReadResult{}, err + } + + return FieldReadResult{ + Value: set, + Exists: true, + }, nil +} + +// MapReader is an interface that is given to MapFieldReader for accessing +// a "map". This can be used to have alternate implementations. For a basic +// map[string]string, use BasicMapReader. +type MapReader interface { + Access(string) (string, bool) + Range(func(string, string) bool) bool +} + +// BasicMapReader implements MapReader for a single map. +type BasicMapReader map[string]string + +func (r BasicMapReader) Access(k string) (string, bool) { + v, ok := r[k] + return v, ok +} + +func (r BasicMapReader) Range(f func(string, string) bool) bool { + for k, v := range r { + if cont := f(k, v); !cont { + return false + } + } + + return true +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go new file mode 100644 index 000000000..89ad3a86f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_reader_multi.go @@ -0,0 +1,63 @@ +package schema + +import ( + "fmt" +) + +// MultiLevelFieldReader reads from other field readers, +// merging their results along the way in a specific order. You can specify +// "levels" and name them in order to read only an exact level or up to +// a specific level. +// +// This is useful for saying things such as "read the field from the state +// and config and merge them" or "read the latest value of the field". +type MultiLevelFieldReader struct { + Readers map[string]FieldReader + Levels []string +} + +func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) { + return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1]) +} + +func (r *MultiLevelFieldReader) ReadFieldExact( + address []string, level string) (FieldReadResult, error) { + reader, ok := r.Readers[level] + if !ok { + return FieldReadResult{}, fmt.Errorf( + "Unknown reader level: %s", level) + } + + result, err := reader.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", level, err) + } + + return result, nil +} + +func (r *MultiLevelFieldReader) ReadFieldMerge( + address []string, level string) (FieldReadResult, error) { + var result FieldReadResult + for _, l := range r.Levels { + if r, ok := r.Readers[l]; ok { + out, err := r.ReadField(address) + if err != nil { + return FieldReadResult{}, fmt.Errorf( + "Error reading level %s: %s", l, err) + } + + // TODO: computed + if out.Exists { + result = out + } + } + + if l == level { + break + } + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go new file mode 100644 index 000000000..9abc41b54 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer.go @@ -0,0 +1,8 @@ +package schema + +// FieldWriters are responsible for writing fields by address into +// a proper typed representation. ResourceData uses this to write new data +// into existing sources. +type FieldWriter interface { + WriteField([]string, interface{}) error +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go new file mode 100644 index 000000000..85d05be4c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/field_writer_map.go @@ -0,0 +1,357 @@ +package schema + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" +) + +// MapFieldWriter writes data into a single map[string]string structure. +type MapFieldWriter struct { + Schema map[string]*Schema + + lock sync.Mutex + result map[string]string +} + +// Map returns the underlying map that is being written to. +func (w *MapFieldWriter) Map() map[string]string { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + return w.result +} + +func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + w.result[addr] = value +} + +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + +func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { + w.lock.Lock() + defer w.lock.Unlock() + if w.result == nil { + w.result = make(map[string]string) + } + + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + // If we're setting anything other than a list root or set root, + // then disallow it. + for _, schema := range schemaList[:len(schemaList)-1] { + if schema.Type == TypeList { + return fmt.Errorf( + "%s: can only set full list", + strings.Join(addr, ".")) + } + + if schema.Type == TypeMap { + return fmt.Errorf( + "%s: can only set full map", + strings.Join(addr, ".")) + } + + if schema.Type == TypeSet { + return fmt.Errorf( + "%s: can only set full set", + strings.Join(addr, ".")) + } + } + + return w.set(addr, value) +} + +func (w *MapFieldWriter) set(addr []string, value interface{}) error { + schemaList := addrToSchema(addr, w.Schema) + if len(schemaList) == 0 { + return fmt.Errorf("Invalid address to set: %#v", addr) + } + + schema := schemaList[len(schemaList)-1] + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + return w.setPrimitive(addr, value, schema) + case TypeList: + return w.setList(addr, value, schema) + case TypeMap: + return w.setMap(addr, value, schema) + case TypeSet: + return w.setSet(addr, value, schema) + case typeObject: + return w.setObject(addr, value, schema) + default: + panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) + } +} + +func (w *MapFieldWriter) setList( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + setElement := func(idx string, value interface{}) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + return w.set(append(addrCopy, idx), value) + } + + var vs []interface{} + if err := mapstructure.Decode(v, &vs); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + + // Set the entire list. + var err error + for i, elem := range vs { + is := strconv.FormatInt(int64(i), 10) + err = setElement(is, elem) + if err != nil { + break + } + } + if err != nil { + for i := range vs { + is := strconv.FormatInt(int64(i), 10) + setElement(is, nil) + } + + return err + } + + w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10) + return nil +} + +func (w *MapFieldWriter) setMap( + addr []string, + value interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + v := reflect.ValueOf(value) + vs := make(map[string]interface{}) + + if value == nil { + // The empty string here means the map is removed. + w.result[k] = "" + return nil + } + + if v.Kind() != reflect.Map { + return fmt.Errorf("%s: must be a map", k) + } + if v.Type().Key().Kind() != reflect.String { + return fmt.Errorf("%s: keys must strings", k) + } + for _, mk := range v.MapKeys() { + mv := v.MapIndex(mk) + vs[mk.String()] = mv.Interface() + } + + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + + // Remove the pure key since we're setting the full map value + delete(w.result, k) + + // Set each subkey + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + for subKey, v := range vs { + if err := w.set(append(addrCopy, subKey), v); err != nil { + return err + } + } + + // Set the count + w.result[k+".%"] = strconv.Itoa(len(vs)) + + return nil +} + +func (w *MapFieldWriter) setObject( + addr []string, + value interface{}, + schema *Schema) error { + // Set the entire object. First decode into a proper structure + var v map[string]interface{} + if err := mapstructure.Decode(value, &v); err != nil { + return fmt.Errorf("%s: %s", strings.Join(addr, "."), err) + } + + // Make space for additional elements in the address + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + + // Set each element in turn + var err error + for k1, v1 := range v { + if err = w.set(append(addrCopy, k1), v1); err != nil { + break + } + } + if err != nil { + for k1 := range v { + w.set(append(addrCopy, k1), nil) + } + } + + return err +} + +func (w *MapFieldWriter) setPrimitive( + addr []string, + v interface{}, + schema *Schema) error { + k := strings.Join(addr, ".") + + if v == nil { + // The empty string here means the value is removed. + w.result[k] = "" + return nil + } + + var set string + switch schema.Type { + case TypeBool: + var b bool + if err := mapstructure.Decode(v, &b); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + set = strconv.FormatBool(b) + case TypeString: + if err := mapstructure.Decode(v, &set); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + case TypeInt: + var n int + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatInt(int64(n), 10) + case TypeFloat: + var n float64 + if err := mapstructure.Decode(v, &n); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + set = strconv.FormatFloat(float64(n), 'G', -1, 64) + default: + return fmt.Errorf("Unknown type: %#v", schema.Type) + } + + w.result[k] = set + return nil +} + +func (w *MapFieldWriter) setSet( + addr []string, + value interface{}, + schema *Schema) error { + addrCopy := make([]string, len(addr), len(addr)+1) + copy(addrCopy, addr) + k := strings.Join(addr, ".") + + if value == nil { + w.result[k+".#"] = "0" + return nil + } + + // If it is a slice, then we have to turn it into a *Set so that + // we get the proper order back based on the hash code. + if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { + // Build a temp *ResourceData to use for the conversion + tempAddr := addr[len(addr)-1:] + tempSchema := *schema + tempSchema.Type = TypeList + tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema} + tempW := &MapFieldWriter{Schema: tempSchemaMap} + + // Set the entire list, this lets us get sane values out of it + if err := tempW.WriteField(tempAddr, value); err != nil { + return err + } + + // Build the set by going over the list items in order and + // hashing them into the set. The reason we go over the list and + // not the `value` directly is because this forces all types + // to become []interface{} (generic) instead of []string, which + // most hash functions are expecting. + s := schema.ZeroValue().(*Set) + tempR := &MapFieldReader{ + Map: BasicMapReader(tempW.Map()), + Schema: tempSchemaMap, + } + for i := 0; i < v.Len(); i++ { + is := strconv.FormatInt(int64(i), 10) + result, err := tempR.ReadField(append(tempAddr, is)) + if err != nil { + return err + } + if !result.Exists { + panic("set item just set doesn't exist") + } + + s.Add(result.Value) + } + + value = s + } + + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + + if value.(*Set) == nil { + w.result[k+".#"] = "0" + return nil + } + + for code, elem := range value.(*Set).m { + if err := w.set(append(addrCopy, code), elem); err != nil { + return err + } + } + + w.result[k+".#"] = strconv.Itoa(value.(*Set).Len()) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go new file mode 100644 index 000000000..0184d7b08 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/getsource_string.go @@ -0,0 +1,46 @@ +// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[getSourceState-1] + _ = x[getSourceConfig-2] + _ = x[getSourceDiff-4] + _ = x[getSourceSet-8] + _ = x[getSourceExact-16] + _ = x[getSourceLevelMask-15] +} + +const ( + _getSource_name_0 = "getSourceStategetSourceConfig" + _getSource_name_1 = "getSourceDiff" + _getSource_name_2 = "getSourceSet" + _getSource_name_3 = "getSourceLevelMaskgetSourceExact" +) + +var ( + _getSource_index_0 = [...]uint8{0, 14, 29} + _getSource_index_3 = [...]uint8{0, 18, 32} +) + +func (i getSource) String() string { + switch { + case 1 <= i && i <= 2: + i -= 1 + return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]] + case i == 4: + return _getSource_name_1 + case i == 8: + return _getSource_name_2 + case 15 <= i && i <= 16: + i -= 15 + return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] + default: + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go new file mode 100644 index 000000000..209f3691d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/provider.go @@ -0,0 +1,469 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "sort" + "strings" + + "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + grpcpluginctx "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/context" + "github.com/hashicorp/terraform-plugin-sdk/v2/meta" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const uaEnvVar = "TF_APPEND_USER_AGENT" + +var ReservedProviderFields = []string{ + "alias", + "version", +} + +// StopContext returns a context safe for global use that will cancel +// when Terraform requests a stop. This function should only be called +// within a ConfigureContextFunc, passing in the request scoped context +// received in that method. +// +// Deprecated: The use of a global context is discouraged. Please use the new +// context aware CRUD methods. +func StopContext(ctx context.Context) (context.Context, bool) { + stopContext, ok := ctx.Value(grpcpluginctx.StopContextKey).(context.Context) + return stopContext, ok +} + +// Provider represents a resource provider in Terraform, and properly +// implements all of the ResourceProvider API. +// +// By defining a schema for the configuration of the provider, the +// map of supporting resources, and a configuration function, the schema +// framework takes over and handles all the provider operations for you. +// +// After defining the provider structure, it is unlikely that you'll require any +// of the methods on Provider itself. +type Provider struct { + // Schema is the schema for the configuration of this provider. If this + // provider has no configuration, this can be omitted. + // + // The keys of this map are the configuration keys, and the value is + // the schema describing the value of the configuration. + Schema map[string]*Schema + + // ResourcesMap is the list of available resources that this provider + // can manage, along with their Resource structure defining their + // own schemas and CRUD operations. + // + // Provider automatically handles routing operations such as Apply, + // Diff, etc. to the proper resource. + ResourcesMap map[string]*Resource + + // DataSourcesMap is the collection of available data sources that + // this provider implements, with a Resource instance defining + // the schema and Read operation of each. + // + // Resource instances for data sources must have a Read function + // and must *not* implement Create, Update or Delete. + DataSourcesMap map[string]*Resource + + // ProviderMetaSchema is the schema for the configuration of the meta + // information for this provider. If this provider has no meta info, + // this can be omitted. This functionality is currently experimental + // and subject to change or break without warning; it should only be + // used by providers that are collaborating on its use with the + // Terraform team. + ProviderMetaSchema map[string]*Schema + + // ConfigureFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. + // + // Deprecated: Please use ConfigureContextFunc instead. + ConfigureFunc ConfigureFunc + + // ConfigureContextFunc is a function for configuring the provider. If the + // provider doesn't need to be configured, this can be omitted. This function + // receives a context.Context that will cancel when Terraform sends a + // cancellation signal. This function can yield Diagnostics. + ConfigureContextFunc ConfigureContextFunc + + meta interface{} + + TerraformVersion string +} + +// ConfigureFunc is the function used to configure a Provider. +// +// Deprecated: Please use ConfigureContextFunc +type ConfigureFunc func(*ResourceData) (interface{}, error) + +// ConfigureContextFunc is the function used to configure a Provider. +// +// The interface{} value returned by this function is stored and passed into +// the subsequent resources as the meta parameter. This return value is +// usually used to pass along a configured API client, a configuration +// structure, etc. +type ConfigureContextFunc func(context.Context, *ResourceData) (interface{}, diag.Diagnostics) + +// InternalValidate should be called to validate the structure +// of the provider. +// +// This should be called in a unit test for any provider to verify +// before release that a provider is properly configured for use with +// this library. +func (p *Provider) InternalValidate() error { + if p == nil { + return errors.New("provider is nil") + } + + if p.ConfigureFunc != nil && p.ConfigureContextFunc != nil { + return errors.New("ConfigureFunc and ConfigureContextFunc must not both be set") + } + + var validationErrors error + sm := schemaMap(p.Schema) + if err := sm.InternalValidate(sm); err != nil { + validationErrors = multierror.Append(validationErrors, err) + } + + // Provider-specific checks + for k := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + + for k, r := range p.ResourcesMap { + if err := r.InternalValidate(nil, true); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) + } + } + + for k, r := range p.DataSourcesMap { + if err := r.InternalValidate(nil, false); err != nil { + validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err)) + } + } + + return validationErrors +} + +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + +// Meta returns the metadata associated with this provider that was +// returned by the Configure call. It will be nil until Configure is called. +func (p *Provider) Meta() interface{} { + return p.meta +} + +// SetMeta can be used to forcefully set the Meta object of the provider. +// Note that if Configure is called the return value will override anything +// set here. +func (p *Provider) SetMeta(v interface{}) { + p.meta = v +} + +// GetSchema returns the config schema for the main provider +// configuration, as would appear in a "provider" block in the +// configuration files. +// +// Currently not all providers support schema. Callers must therefore +// first call Resources and DataSources and ensure that at least one +// resource or data source has the SchemaAvailable flag set. +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + +// Validate is called once at the beginning with the raw configuration +// (no interpolation done) and can return diagnostics +// +// This is called once with the provider configuration only. It may not +// be called at all if no provider configuration is given. +// +// This should not assume that any values of the configurations are valid. +// The primary use case of this call is to check that required keys are +// set. +func (p *Provider) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + if err := p.InternalValidate(); err != nil { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: "InternalValidate", + Detail: fmt.Sprintf("Internal validation of the provider failed! This is always a bug\n"+ + "with the provider itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err), + }, + } + } + + return schemaMap(p.Schema).Validate(c) +} + +// ValidateResource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per resource. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. +func (p *Provider) ValidateResource( + t string, c *terraform.ResourceConfig) diag.Diagnostics { + r, ok := p.ResourcesMap[t] + if !ok { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support resource: %s", t), + }, + } + } + + return r.Validate(c) +} + +// Configure configures the provider itself with the configuration +// given. This is useful for setting things like access keys. +// +// This won't be called at all if no provider configuration is given. +func (p *Provider) Configure(ctx context.Context, c *terraform.ResourceConfig) diag.Diagnostics { + // No configuration + if p.ConfigureFunc == nil && p.ConfigureContextFunc == nil { + return nil + } + + sm := schemaMap(p.Schema) + + // Get a ResourceData for this configuration. To do this, we actually + // generate an intermediary "diff" although that is never exposed. + diff, err := sm.Diff(ctx, nil, c, nil, p.meta, true) + if err != nil { + return diag.FromErr(err) + } + + data, err := sm.Data(nil, diff) + if err != nil { + return diag.FromErr(err) + } + + if p.ConfigureFunc != nil { + meta, err := p.ConfigureFunc(data) + if err != nil { + return diag.FromErr(err) + } + p.meta = meta + } + if p.ConfigureContextFunc != nil { + meta, diags := p.ConfigureContextFunc(ctx, data) + if diags.HasError() { + return diags + } + p.meta = meta + } + + return nil +} + +// Resources returns all the available resource types that this provider +// knows how to manage. +func (p *Provider) Resources() []terraform.ResourceType { + keys := make([]string, 0, len(p.ResourcesMap)) + for k := range p.ResourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.ResourceType, 0, len(keys)) + for _, k := range keys { + resource := p.ResourcesMap[k] + + // This isn't really possible (it'd fail InternalValidate), but + // we do it anyways to avoid a panic. + if resource == nil { + resource = &Resource{} + } + + result = append(result, terraform.ResourceType{ + Name: k, + Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +// ImportState requests that the given resource be imported. +// +// The returned InstanceState only requires ID be set. Importing +// will always call Refresh after the state to complete it. +// +// IMPORTANT: InstanceState doesn't have the resource type attached +// to it. A type must be specified on the state via the Ephemeral +// field on the state. +// +// This function can return multiple states. Normally, an import +// will map 1:1 to a physical resource. However, some resources map +// to multiple. For example, an AWS security group may contain many rules. +// Each rule is represented by a separate resource in Terraform, +// therefore multiple states are returned. +func (p *Provider) ImportState( + ctx context.Context, + info *terraform.InstanceInfo, + id string) ([]*terraform.InstanceState, error) { + // Find the resource + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + // If it doesn't support import, error + if r.Importer == nil { + return nil, fmt.Errorf("resource %s doesn't support import", info.Type) + } + + // Create the data + data := r.Data(nil) + data.SetId(id) + data.SetType(info.Type) + + // Call the import function + results := []*ResourceData{data} + if r.Importer.State != nil || r.Importer.StateContext != nil { + var err error + if r.Importer.StateContext != nil { + results, err = r.Importer.StateContext(ctx, data, p.meta) + } else { + results, err = r.Importer.State(data, p.meta) + } + if err != nil { + return nil, err + } + } + + // Convert the results to InstanceState values and return it + states := make([]*terraform.InstanceState, len(results)) + for i, r := range results { + states[i] = r.State() + } + + // Verify that all are non-nil. If there are any nil the error + // isn't obvious so we circumvent that with a friendlier error. + for _, s := range states { + if s == nil { + return nil, fmt.Errorf( + "nil entry in ImportState results. This is always a bug with\n" + + "the resource that is being imported. Please report this as\n" + + "a bug to Terraform.") + } + } + + return states, nil +} + +// ValidateDataSource is called once at the beginning with the raw +// configuration (no interpolation done) and can return diagnostics. +// +// This is called once per data source instance. +// +// This should not assume any of the values in the resource configuration +// are valid since it is possible they have to be interpolated still. +// The primary use case of this call is to check that the required keys +// are set and that the general structure is correct. +func (p *Provider) ValidateDataSource( + t string, c *terraform.ResourceConfig) diag.Diagnostics { + r, ok := p.DataSourcesMap[t] + if !ok { + return []diag.Diagnostic{ + { + Severity: diag.Error, + Summary: fmt.Sprintf("Provider doesn't support data source: %s", t), + }, + } + } + + return r.Validate(c) +} + +// DataSources returns all of the available data sources that this +// provider implements. +func (p *Provider) DataSources() []terraform.DataSource { + keys := make([]string, 0, len(p.DataSourcesMap)) + for k := range p.DataSourcesMap { + keys = append(keys, k) + } + sort.Strings(keys) + + result := make([]terraform.DataSource, 0, len(keys)) + for _, k := range keys { + result = append(result, terraform.DataSource{ + Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, + }) + } + + return result +} + +// UserAgent returns a string suitable for use in the User-Agent header of +// requests generated by the provider. The generated string contains the +// version of Terraform, the Plugin SDK, and the provider used to generate the +// request. `name` should be the hyphen-separated reporting name of the +// provider, and `version` should be the version of the provider. +// +// If TF_APPEND_USER_AGENT is set, its value will be appended to the returned +// string. +func (p *Provider) UserAgent(name, version string) string { + ua := fmt.Sprintf("Terraform/%s (+https://www.terraform.io) Terraform-Plugin-SDK/%s", p.TerraformVersion, meta.SDKVersionString()) + if name != "" { + ua += " " + name + if version != "" { + ua += "/" + version + } + } + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go new file mode 100644 index 000000000..f295cab7e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource.go @@ -0,0 +1,809 @@ +package schema + +import ( + "context" + "errors" + "fmt" + "log" + "strconv" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +// Resource represents a thing in Terraform that has a set of configurable +// attributes and a lifecycle (create, read, update, delete). +// +// The Resource schema is an abstraction that allows provider writers to +// worry only about CRUD operations while off-loading validation, diff +// generation, etc. to this higher level library. +// +// In spite of the name, this struct is not used only for terraform resources, +// but also for data sources. In the case of data sources, the Create, +// Update and Delete functions must not be provided. +type Resource struct { + // Schema is the schema for the configuration of this resource. + // + // The keys of this map are the configuration keys, and the values + // describe the schema of the configuration value. + // + // The schema is used to represent both configurable data as well + // as data that might be computed in the process of creating this + // resource. + Schema map[string]*Schema + + // SchemaVersion is the version number for this resource's Schema + // definition. The current SchemaVersion stored in the state for each + // resource. Provider authors can increment this version number + // when Schema semantics change. If the State's SchemaVersion is less than + // the current SchemaVersion, the InstanceState is yielded to the + // MigrateState callback, where the provider can make whatever changes it + // needs to update the state to be compatible to the latest version of the + // Schema. + // + // When unset, SchemaVersion defaults to 0, so provider authors can start + // their Versioning at any integer >= 1 + SchemaVersion int + + // MigrateState is responsible for updating an InstanceState with an old + // version to the format expected by the current version of the Schema. + // + // It is called during Refresh if the State's stored SchemaVersion is less + // than the current SchemaVersion of the Resource. + // + // The function is yielded the state's stored SchemaVersion and a pointer to + // the InstanceState that needs updating, as well as the configured + // provider's configured meta interface{}, in case the migration process + // needs to make any remote API calls. + // + // Deprecated: MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + MigrateState StateMigrateFunc + + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + + // The functions below are the CRUD operations for this resource. + // + // Deprecated: Please use the context aware equivalents instead. Only one of + // the operations or context aware equivalent can be set, not both. + Create CreateFunc + Read ReadFunc + Update UpdateFunc + Delete DeleteFunc + + // Exists is a function that is called to check if a resource still + // exists. If this returns false, then this will affect the diff + // accordingly. If this function isn't set, it will not be called. You + // can also signal existence in the Read method by calling d.SetId("") + // if the Resource is no longer present and should be removed from state. + // The *ResourceData passed to Exists should _not_ be modified. + // + // Deprecated: ReadContext should be able to encapsulate the logic of Exists + Exists ExistsFunc + + // The functions below are the CRUD operations for this resource. + // + // The only optional operation is Update. If Update is not + // implemented, then updates will not be supported for this resource. + // + // The ResourceData parameter in the functions below are used to + // query configuration and changes for the resource as well as to set + // the ID, computed data, etc. + // + // The interface{} parameter is the result of the ConfigureFunc in + // the provider for this resource. If the provider does not define + // a ConfigureFunc, this will be nil. This parameter should be used + // to store API clients, configuration structures, etc. + // + // These functions are passed a context configured to timeout with whatever + // was set as the timeout for this operation. Useful for forwarding on to + // backend SDK's that accept context. The context will also cancel if + // Terraform sends a cancellation signal. + // + // These functions return diagnostics, allowing developers to build + // a list of warnings and errors to be presented to the Terraform user. + // The AttributePath of those diagnostics should be built within these + // functions, please consult go-cty documentation for building a cty.Path + CreateContext CreateContextFunc + ReadContext ReadContextFunc + UpdateContext UpdateContextFunc + DeleteContext DeleteContextFunc + + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + + // Importer is the ResourceImporter implementation for this resource. + // If this is nil, then this resource does not support importing. If + // this is non-nil, then it supports importing and ResourceImporter + // must be validated. The validity of ResourceImporter is verified + // by InternalValidate on Resource. + Importer *ResourceImporter + + // If non-empty, this string is emitted as a warning during Validate. + DeprecationMessage string + + // Timeouts allow users to specify specific time durations in which an + // operation should time out, to allow them to extend an action to suit their + // usage. For example, a user may specify a large Creation timeout for their + // AWS RDS Instance due to it's size, or restoring from a snapshot. + // Resource implementors must enable Timeout support by adding the allowed + // actions (Create, Read, Update, Delete, Default) to the Resource struct, and + // accessing them in the matching methods. + Timeouts *ResourceTimeout + + // Description is used as the description for docs, the language server and + // other user facing usage. It can be plain-text or markdown depending on the + // global DescriptionKind setting. + Description string +} + +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + +// The following function types are of the legacy CRUD operations. +// +// Deprecated: Please use the context aware equivalents instead. +type CreateFunc func(*ResourceData, interface{}) error +type ReadFunc func(*ResourceData, interface{}) error +type UpdateFunc func(*ResourceData, interface{}) error +type DeleteFunc func(*ResourceData, interface{}) error +type ExistsFunc func(*ResourceData, interface{}) (bool, error) + +// See Resource documentation. +type CreateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type ReadContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type UpdateContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type DeleteContextFunc func(context.Context, *ResourceData, interface{}) diag.Diagnostics + +// See Resource documentation. +type StateMigrateFunc func( + int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) + +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(ctx context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(context.Context, *ResourceDiff, interface{}) error + +func (r *Resource) create(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Create != nil { + if err := r.Create(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutCreate)) + defer cancel() + return r.CreateContext(ctx, d, meta) +} + +func (r *Resource) read(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Read != nil { + if err := r.Read(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutRead)) + defer cancel() + return r.ReadContext(ctx, d, meta) +} + +func (r *Resource) update(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Update != nil { + if err := r.Update(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutUpdate)) + defer cancel() + return r.UpdateContext(ctx, d, meta) +} + +func (r *Resource) delete(ctx context.Context, d *ResourceData, meta interface{}) diag.Diagnostics { + if r.Delete != nil { + if err := r.Delete(d, meta); err != nil { + return diag.FromErr(err) + } + return nil + } + ctx, cancel := context.WithTimeout(ctx, d.Timeout(TimeoutDelete)) + defer cancel() + return r.DeleteContext(ctx, d, meta) +} + +// Apply creates, updates, and/or deletes a resource. +func (r *Resource) Apply( + ctx context.Context, + s *terraform.InstanceState, + d *terraform.InstanceDiff, + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + data, err := schemaMap(r.Schema).Data(s, d) + if err != nil { + return s, diag.FromErr(err) + } + + if s != nil && data != nil { + data.providerMeta = s.ProviderMeta + } + + // Instance Diff shoould have the timeout info, need to copy it over to the + // ResourceData meta + rt := ResourceTimeout{} + if _, ok := d.Meta[TimeoutKey]; ok { + if err := rt.DiffDecode(d); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + } else { + log.Printf("[DEBUG] No meta timeoutkey found in Apply()") + } + data.timeouts = &rt + + if s == nil { + // The Terraform API dictates that this should never happen, but + // it doesn't hurt to be safe in this case. + s = new(terraform.InstanceState) + } + + var diags diag.Diagnostics + + if d.Destroy || d.RequiresNew() { + if s.ID != "" { + // Destroy the resource since it is created + diags = append(diags, r.delete(ctx, data, meta)...) + if diags.HasError() { + return r.recordCurrentSchemaVersion(data.State()), diags + } + + // Make sure the ID is gone. + data.SetId("") + } + + // If we're only destroying, and not creating, then return + // now since we're done! + if !d.RequiresNew() { + return nil, diags + } + + // Reset the data to be stateless since we just destroyed + data, err = schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, append(diags, diag.FromErr(err)...) + } + + // data was reset, need to re-apply the parsed timeouts + data.timeouts = &rt + } + + if data.Id() == "" { + // We're creating, it is a new resource. + data.MarkNewResource() + diags = append(diags, r.create(ctx, data, meta)...) + } else { + if !r.updateFuncSet() { + return s, append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "doesn't support update", + }) + } + diags = append(diags, r.update(ctx, data, meta)...) + } + + return r.recordCurrentSchemaVersion(data.State()), diags +} + +// Diff returns a diff of this resource. +func (r *Resource) Diff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + t := &ResourceTimeout{} + err := t.ConfigDecode(r, c) + + if err != nil { + return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) + } + + instanceDiff, err := schemaMap(r.Schema).Diff(ctx, s, c, r.CustomizeDiff, meta, true) + if err != nil { + return instanceDiff, err + } + + if instanceDiff != nil { + if err := t.DiffEncode(instanceDiff); err != nil { + log.Printf("[ERR] Error encoding timeout to instance diff: %s", err) + } + } else { + log.Printf("[DEBUG] Instance Diff is nil in Diff()") + } + + return instanceDiff, err +} + +func (r *Resource) SimpleDiff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(ctx, s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + +// Validate validates the resource configuration against the schema. +func (r *Resource) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + diags := schemaMap(r.Schema).Validate(c) + + if r.DeprecationMessage != "" { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Resource", + Detail: r.DeprecationMessage, + }) + } + + return diags +} + +// ReadDataApply loads the data for a data source, given a diff that +// describes the configuration arguments and desired computed attributes. +func (r *Resource) ReadDataApply( + ctx context.Context, + d *terraform.InstanceDiff, + meta interface{}, +) (*terraform.InstanceState, diag.Diagnostics) { + // Data sources are always built completely from scratch + // on each read, so the source state is always nil. + data, err := schemaMap(r.Schema).Data(nil, d) + if err != nil { + return nil, diag.FromErr(err) + } + + diags := r.read(ctx, data, meta) + state := data.State() + if state != nil && state.ID == "" { + // Data sources can set an ID if they want, but they aren't + // required to; we'll provide a placeholder if they don't, + // to preserve the invariant that all resources have non-empty + // ids. + state.ID = "-" + } + + return r.recordCurrentSchemaVersion(state), diags +} + +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( + ctx context.Context, + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, diag.Diagnostics) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return s, diag.FromErr(err) + } + data.timeouts = &rt + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + exists, err := r.Exists(data, meta) + if err != nil { + return s, diag.FromErr(err) + } + + if !exists { + return nil, nil + } + } + + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return s, diag.FromErr(err) + } + data.timeouts = &rt + + if s != nil { + data.providerMeta = s.ProviderMeta + } + + diags := r.read(ctx, data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), diags +} + +func (r *Resource) createFuncSet() bool { + return (r.Create != nil || r.CreateContext != nil) +} + +func (r *Resource) readFuncSet() bool { + return (r.Read != nil || r.ReadContext != nil) +} + +func (r *Resource) updateFuncSet() bool { + return (r.Update != nil || r.UpdateContext != nil) +} + +func (r *Resource) deleteFuncSet() bool { + return (r.Delete != nil || r.DeleteContext != nil) +} + +// InternalValidate should be called to validate the structure +// of the resource. +// +// This should be called in a unit test for any resource to verify +// before release that a resource is properly configured for use with +// this library. +// +// Provider.InternalValidate() will automatically call this for all of +// the resources it manages, so you don't need to call this manually if it +// is part of a Provider. +func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error { + if r == nil { + return errors.New("resource is nil") + } + + if !writable { + if r.createFuncSet() || r.updateFuncSet() || r.deleteFuncSet() { + return fmt.Errorf("must not implement Create, Update or Delete") + } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } + } + + tsm := topSchemaMap + + if r.isTopLevel() && writable { + // All non-Computed attributes must be ForceNew if Update is not defined + if !r.updateFuncSet() { + nonForceNewAttrs := make([]string, 0) + for k, v := range r.Schema { + if !v.ForceNew && !v.Computed { + nonForceNewAttrs = append(nonForceNewAttrs, k) + } + } + if len(nonForceNewAttrs) > 0 { + return fmt.Errorf( + "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs) + } + } else { + nonUpdateableAttrs := make([]string, 0) + for k, v := range r.Schema { + if v.ForceNew || v.Computed && !v.Optional { + nonUpdateableAttrs = append(nonUpdateableAttrs, k) + } + } + updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs) + if updateableAttrs == 0 { + return fmt.Errorf( + "All fields are ForceNew or Computed w/out Optional, Update is superfluous") + } + } + + tsm = schemaMap(r.Schema) + + // Destroy, and Read are required + if !r.readFuncSet() { + return fmt.Errorf("Read must be implemented") + } + if !r.deleteFuncSet() { + return fmt.Errorf("Delete must be implemented") + } + + // If we have an importer, we need to verify the importer. + if r.Importer != nil { + if err := r.Importer.InternalValidate(); err != nil { + return err + } + } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + // check context funcs are not set alongside their nonctx counterparts + if r.CreateContext != nil && r.Create != nil { + return fmt.Errorf("CreateContext and Create should not both be set") + } + if r.ReadContext != nil && r.Read != nil { + return fmt.Errorf("ReadContext and Read should not both be set") + } + if r.UpdateContext != nil && r.Update != nil { + return fmt.Errorf("UpdateContext and Update should not both be set") + } + if r.DeleteContext != nil && r.Delete != nil { + return fmt.Errorf("DeleteContext and Delete should not both be set") + } + + return schemaMap(r.Schema).InternalValidate(tsm) +} + +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && s.Deprecated != "" { + return false + } + + for _, reservedName := range ReservedResourceFields { + if name == reservedName { + return true + } + } + return false +} + +// Data returns a ResourceData struct for this Resource. Each return value +// is a separate copy and can be safely modified differently. +// +// The data returned from this function has no actual affect on the Resource +// itself (including the state given to this function). +// +// This function is useful for unit tests and ResourceImporter functions. +func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { + result, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + // At the time of writing, this isn't possible (Data never returns + // non-nil errors). We panic to find this in the future if we have to. + // I don't see a reason for Data to ever return an error. + panic(err) + } + + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + + // Set the schema version to latest by default + result.meta = map[string]interface{}{ + "schema_version": strconv.Itoa(r.SchemaVersion), + } + + return result +} + +// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing +// +// TODO: May be able to be removed with the above ResourceData function. +func (r *Resource) TestResourceData() *ResourceData { + return &ResourceData{ + schema: r.Schema, + } +} + +// Returns true if the resource is "top level" i.e. not a sub-resource. +func (r *Resource) isTopLevel() bool { + // TODO: This is a heuristic; replace with a definitive attribute? + return (r.createFuncSet() || r.readFuncSet()) +} + +func (r *Resource) recordCurrentSchemaVersion( + state *terraform.InstanceState) *terraform.InstanceState { + if state != nil && r.SchemaVersion > 0 { + if state.Meta == nil { + state.Meta = make(map[string]interface{}) + } + state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion) + } + return state +} + +// Noop is a convenience implementation of resource function which takes +// no action and returns no error. +func Noop(*ResourceData, interface{}) error { + return nil +} + +// NoopContext is a convenience implementation of context aware resource function which takes +// no action and returns no error. +func NoopContext(context.Context, *ResourceData, interface{}) diag.Diagnostics { + return nil +} + +// RemoveFromState is a convenience implementation of a resource function +// which sets the resource ID to empty string (to remove it from state) +// and returns no error. +func RemoveFromState(d *ResourceData, _ interface{}) error { + d.SetId("") + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go new file mode 100644 index 000000000..1906343d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data.go @@ -0,0 +1,540 @@ +package schema + +import ( + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/gocty" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// ResourceData is used to query and set the attributes of a resource. +// +// ResourceData is the primary argument received for CRUD operations on +// a resource as well as configuration of a provider. It is a powerful +// structure that can be used to not only query data, but also check for changes +// +// The most relevant methods to take a look at are Get and Set. +type ResourceData struct { + // Settable (internally) + schema map[string]*Schema + config *terraform.ResourceConfig + state *terraform.InstanceState + diff *terraform.InstanceDiff + meta map[string]interface{} + timeouts *ResourceTimeout + providerMeta cty.Value + + // Don't set + multiReader *MultiLevelFieldReader + setWriter *MapFieldWriter + newState *terraform.InstanceState + partial bool + once sync.Once + isNew bool + + panicOnError bool +} + +// getResult is the internal structure that is generated when a Get +// is called that contains some extra data that might be used. +type getResult struct { + Value interface{} + ValueProcessed interface{} + Computed bool + Exists bool + Schema *Schema +} + +// Get returns the data for the given key, or nil if the key doesn't exist +// in the schema. +// +// If the key does exist in the schema but doesn't exist in the configuration, +// then the default value for that type will be returned. For strings, this is +// "", for numbers it is 0, etc. +// +// If you want to test if something is set at all in the configuration, +// use GetOk. +func (d *ResourceData) Get(key string) interface{} { + v, _ := d.GetOk(key) + return v +} + +// GetChange returns the old and new value for a given key. +// +// HasChange should be used to check if a change exists. It is possible +// that both the old and new value are the same if the old value was not +// set and the new value is. This is common, for example, for boolean +// fields which have a zero value of false. +func (d *ResourceData) GetChange(key string) (interface{}, interface{}) { + o, n := d.getChange(key, getSourceState, getSourceDiff) + return o.Value, n.Value +} + +// GetOk returns the data for the given key and whether or not the key +// has been set to a non-zero value at some point. +// +// The first result will not necessarilly be nil if the value doesn't exist. +// The second result should be checked to determine this information. +func (d *ResourceData) GetOk(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists can check if TypeBool attributes that are Optional with +// no Default value have been set. +// +// Deprecated: usage is discouraged due to undefined behaviors and may be +// removed in a future version of the SDK +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + +func (d *ResourceData) getRaw(key string, level getSource) getResult { + var parts []string + if key != "" { + parts = strings.Split(key, ".") + } + + return d.get(parts, level) +} + +// HasChanges returns whether or not any of the given keys has been changed. +func (d *ResourceData) HasChanges(keys ...string) bool { + for _, key := range keys { + if d.HasChange(key) { + return true + } + } + return false +} + +// HasChange returns whether or not the given key has been changed. +func (d *ResourceData) HasChange(key string) bool { + o, n := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := o.(Equal); ok { + return !eq.Equal(n) + } + + return !reflect.DeepEqual(o, n) +} + +// Partial is a legacy function that was used for capturing state of specific +// attributes if an update only partially worked. Enabling this flag without +// setting any specific keys with the now removed SetPartial has a useful side +// effect of preserving all of the resource's previous state. Although confusing, +// it has been discovered that during an update when an error is returned, the +// proposed config is set into state, even without any calls to d.Set. +// +// In practice this default behavior goes mostly unnoticed since Terraform +// refreshes between operations by default. The state situation discussed is +// subject to further investigation and potential change. Until then, this +// function has been preserved for the specific usecase. +func (d *ResourceData) Partial(on bool) { + d.partial = on +} + +// Set sets the value for the given key. +// +// If the key is invalid or the value is not a correct type, an error +// will be returned. +func (d *ResourceData) Set(key string, value interface{}) error { + d.once.Do(d.init) + + // If the value is a pointer to a non-struct, get its value and + // use that. This allows Set to take a pointer to primitives to + // simplify the interface. + reflectVal := reflect.ValueOf(value) + if reflectVal.Kind() == reflect.Ptr { + if reflectVal.IsNil() { + // If the pointer is nil, then the value is just nil + value = nil + } else { + // Otherwise, we dereference the pointer as long as its not + // a pointer to a struct, since struct pointers are allowed. + reflectVal = reflect.Indirect(reflectVal) + if reflectVal.Kind() != reflect.Struct { + value = reflectVal.Interface() + } + } + } + + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil { + if d.panicOnError { + panic(err) + } else { + log.Printf("[ERROR] setting state: %s", err) + } + } + return err +} + +func (d *ResourceData) MarkNewResource() { + d.isNew = true +} + +func (d *ResourceData) IsNewResource() bool { + return d.isNew +} + +// Id returns the ID of the resource. +func (d *ResourceData) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } + } + + if d.newState != nil { + result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } + } + + return result +} + +// ConnInfo returns the connection info for this resource. +func (d *ResourceData) ConnInfo() map[string]string { + if d.newState != nil { + return d.newState.Ephemeral.ConnInfo + } + + if d.state != nil { + return d.state.Ephemeral.ConnInfo + } + + return nil +} + +// SetId sets the ID of the resource. If the value is blank, then the +// resource is destroyed. +func (d *ResourceData) SetId(v string) { + d.once.Do(d.init) + d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v +} + +// SetConnInfo sets the connection info for a resource. +func (d *ResourceData) SetConnInfo(v map[string]string) { + d.once.Do(d.init) + d.newState.Ephemeral.ConnInfo = v +} + +// SetType sets the ephemeral type for the data. This is only required +// for importing. +func (d *ResourceData) SetType(t string) { + d.once.Do(d.init) + d.newState.Ephemeral.Type = t +} + +// State returns the new InstanceState after the diff and any Set +// calls. +func (d *ResourceData) State() *terraform.InstanceState { + var result terraform.InstanceState + result.ID = d.Id() + result.Meta = d.meta + + // If we have no ID, then this resource doesn't exist and we just + // return nil. + if result.ID == "" { + return nil + } + + if d.timeouts != nil { + if err := d.timeouts.StateEncode(&result); err != nil { + log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err) + } + } + + // Look for a magic key in the schema that determines we skip the + // integrity check of fields existing in the schema, allowing dynamic + // keys to be created. + hasDynamicAttributes := false + for k := range d.schema { + if k == "__has_dynamic_attributes" { + hasDynamicAttributes = true + log.Printf("[INFO] Resource %s has dynamic attributes", result.ID) + } + } + + // In order to build the final state attributes, we read the full + // attribute set as a map[string]interface{}, write it to a MapFieldWriter, + // and then use that map. + rawMap := make(map[string]interface{}) + for k := range d.schema { + source := getSourceSet + if d.partial { + source = getSourceState + } + raw := d.get([]string{k}, source) + if raw.Exists && !raw.Computed { + rawMap[k] = raw.Value + if raw.ValueProcessed != nil { + rawMap[k] = raw.ValueProcessed + } + } + } + + mapW := &MapFieldWriter{Schema: d.schema} + if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) + return nil + } + + result.Attributes = mapW.Map() + + if hasDynamicAttributes { + // If we have dynamic attributes, just copy the attributes map + // one for one into the result attributes. + for k, v := range d.setWriter.Map() { + // Don't clobber schema values. This limits usage of dynamic + // attributes to names which _do not_ conflict with schema + // keys! + if _, ok := result.Attributes[k]; !ok { + result.Attributes[k] = v + } + } + } + + if d.newState != nil { + result.Ephemeral = d.newState.Ephemeral + } + + // TODO: This is hacky and we can remove this when we have a proper + // state writer. We should instead have a proper StateFieldWriter + // and use that. + for k, schema := range d.schema { + if schema.Type != TypeMap { + continue + } + + if result.Attributes[k] == "" { + delete(result.Attributes, k) + } + } + + if v := d.Id(); v != "" { + result.Attributes["id"] = d.Id() + } + + if d.state != nil { + result.Tainted = d.state.Tainted + } + + return &result +} + +// Timeout returns the data for the given timeout key +// Returns a duration of 20 minutes for any key not found, or not found and no default. +func (d *ResourceData) Timeout(key string) time.Duration { + key = strings.ToLower(key) + + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + + var timeout *time.Duration + switch key { + case TimeoutCreate: + timeout = d.timeouts.Create + case TimeoutRead: + timeout = d.timeouts.Read + case TimeoutUpdate: + timeout = d.timeouts.Update + case TimeoutDelete: + timeout = d.timeouts.Delete + } + + if timeout != nil { + return *timeout + } + + if d.timeouts.Default != nil { + return *d.timeouts.Default + } + + return defaultTimeout +} + +func (d *ResourceData) init() { + // Initialize the field that will store our new state + var copyState terraform.InstanceState + if d.state != nil { + copyState = *d.state.DeepCopy() + } + d.newState = ©State + + // Initialize the map for storing set data + d.setWriter = &MapFieldWriter{Schema: d.schema} + + // Initialize the reader for getting data from the + // underlying sources (config, diff, etc.) + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["set"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.setWriter.Map()), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "set", + }, + + Readers: readers, + } +} + +func (d *ResourceData) diffChange( + k string) (interface{}, interface{}, bool, bool, bool) { + // Get the change between the state and the config. + o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) + if !o.Exists { + o.Value = nil + } + if !n.Exists { + n.Value = nil + } + + // Return the old, new, and whether there is a change + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false +} + +func (d *ResourceData) getChange( + k string, + oldLevel getSource, + newLevel getSource) (getResult, getResult) { + var parts, parts2 []string + if k != "" { + parts = strings.Split(k, ".") + parts2 = strings.Split(k, ".") + } + + o := d.get(parts, oldLevel) + n := d.get(parts2, newLevel) + return o, n +} + +func (d *ResourceData) get(addr []string, source getSource) getResult { + d.once.Do(d.init) + + level := "set" + flags := source & ^getSourceLevelMask + exact := flags&getSourceExact != 0 + source = source & getSourceLevelMask + if source >= getSourceSet { + level = "set" + } else if source >= getSourceDiff { + level = "diff" + } else if source >= getSourceConfig { + level = "config" + } else { + level = "state" + } + + var result FieldReadResult + var err error + if exact { + result, err = d.multiReader.ReadFieldExact(addr, level) + } else { + result, err = d.multiReader.ReadFieldMerge(addr, level) + } + if err != nil { + panic(err) + } + + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +func (d *ResourceData) GetProviderMeta(dst interface{}) error { + if d.providerMeta.IsNull() { + return nil + } + return gocty.FromCtyValue(d.providerMeta, &dst) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go new file mode 100644 index 000000000..8bfb079be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_data_get_source.go @@ -0,0 +1,17 @@ +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=getSource resource_data_get_source.go + +// getSource represents the level we want to get for a value (internally). +// Any source less than or equal to the level will be loaded (whichever +// has a value first). +type getSource byte + +const ( + getSourceState getSource = 1 << iota + getSourceConfig + getSourceDiff + getSourceSet + getSourceExact // Only get from the _exact_ level + getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go new file mode 100644 index 000000000..984929df7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_diff.go @@ -0,0 +1,559 @@ +package schema + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// newValueWriter is a minor re-implementation of MapFieldWriter to include +// keys that should be marked as computed, to represent the new part of a +// pseudo-diff. +type newValueWriter struct { + *MapFieldWriter + + // A list of keys that should be marked as computed. + computedKeys map[string]bool + + // A lock to prevent races on writes. The underlying writer will have one as + // well - this is for computed keys. + lock sync.Mutex + + // To be used with init. + once sync.Once +} + +// init performs any initialization tasks for the newValueWriter. +func (w *newValueWriter) init() { + if w.computedKeys == nil { + w.computedKeys = make(map[string]bool) + } +} + +// WriteField overrides MapValueWriter's WriteField, adding the ability to flag +// the address as computed. +func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error { + // Fail the write if we have a non-nil value and computed is true. + // NewComputed values should not have a value when written. + if value != nil && computed { + return errors.New("Non-nil value with computed set") + } + + if err := w.MapFieldWriter.WriteField(address, value); err != nil { + return err + } + + w.once.Do(w.init) + + w.lock.Lock() + defer w.lock.Unlock() + if computed { + w.computedKeys[strings.Join(address, ".")] = true + } + return nil +} + +// ComputedKeysMap returns the underlying computed keys map. +func (w *newValueWriter) ComputedKeysMap() map[string]bool { + w.once.Do(w.init) + return w.computedKeys +} + +// newValueReader is a minor re-implementation of MapFieldReader and is the +// read counterpart to MapValueWriter, allowing the read of keys flagged as +// computed to accommodate the diff override logic in ResourceDiff. +type newValueReader struct { + *MapFieldReader + + // The list of computed keys from a newValueWriter. + computedKeys map[string]bool +} + +// ReadField reads the values from the underlying writer, returning the +// computed value if it is found as well. +func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) { + addrKey := strings.Join(address, ".") + v, err := r.MapFieldReader.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + for computedKey := range r.computedKeys { + if childAddrOf(addrKey, computedKey) { + if strings.HasSuffix(addrKey, ".#") { + // This is a count value for a list or set that has been marked as + // computed, or a sub-list/sub-set of a complex resource that has + // been marked as computed. We need to pass through to other readers + // so that an accurate previous count can be fetched for the diff. + v.Exists = false + } + v.Computed = true + } + } + + return v, nil +} + +// ResourceDiff is used to query and make custom changes to an in-flight diff. +// It can be used to veto particular changes in the diff, customize the diff +// that has been created, or diff values not controlled by config. +// +// The object functions similar to ResourceData, however most notably lacks +// Set, SetPartial, and Partial, as it should be used to change diff values +// only. Most other first-class ResourceData functions exist, namely Get, +// GetOk, HasChange, and GetChange exist. +// +// All functions in ResourceDiff, save for ForceNew, can only be used on +// computed fields. +type ResourceDiff struct { + // The schema for the resource being worked on. + schema map[string]*Schema + + // The current config for this resource. + config *terraform.ResourceConfig + + // The state for this resource as it exists post-refresh, after the initial + // diff. + state *terraform.InstanceState + + // The diff created by Terraform. This diff is used, along with state, + // config, and custom-set diff data, to provide a multi-level reader + // experience similar to ResourceData. + diff *terraform.InstanceDiff + + // The internal reader structure that contains the state, config, the default + // diff, and the new diff. + multiReader *MultiLevelFieldReader + + // A writer that writes overridden new fields. + newWriter *newValueWriter + + // Tracks which keys have been updated by ResourceDiff to ensure that the + // diff does not get re-run on keys that were not touched, or diffs that were + // just removed (re-running on the latter would just roll back the removal). + updatedKeys map[string]bool + + // Tracks which keys were flagged as forceNew. These keys are not saved in + // newWriter, but we need to track them so that they can be re-diffed later. + forcedNewKeys map[string]bool +} + +// newResourceDiff creates a new ResourceDiff instance. +func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff { + d := &ResourceDiff{ + config: config, + state: state, + diff: diff, + schema: schema, + } + + d.newWriter = &newValueWriter{ + MapFieldWriter: &MapFieldWriter{Schema: d.schema}, + } + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["newDiff"] = &newValueReader{ + MapFieldReader: &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.newWriter.Map()), + }, + computedKeys: d.newWriter.ComputedKeysMap(), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "newDiff", + }, + + Readers: readers, + } + + d.updatedKeys = make(map[string]bool) + d.forcedNewKeys = make(map[string]bool) + + return d +} + +// UpdatedKeys returns the keys that were updated by this ResourceDiff run. +// These are the only keys that a diff should be re-calculated for. +// +// This is the combined result of both keys for which diff values were updated +// for or cleared, and also keys that were flagged to be re-diffed as a result +// of ForceNew. +func (d *ResourceDiff) UpdatedKeys() []string { + var s []string + for k := range d.updatedKeys { + s = append(s, k) + } + for k := range d.forcedNewKeys { + for _, l := range s { + if k == l { + break + } + } + s = append(s, k) + } + return s +} + +// Clear wipes the diff for a particular key. It is called by ResourceDiff's +// functionality to remove any possibility of conflicts, but can be called on +// its own to just remove a specific key from the diff completely. +// +// Note that this does not wipe an override. This function is only allowed on +// computed keys. +func (d *ResourceDiff) Clear(key string) error { + if err := d.checkKey(key, "Clear", true); err != nil { + return err + } + + return d.clear(key) +} + +func (d *ResourceDiff) clear(key string) error { + // Check the schema to make sure that this key exists first. + schemaL := addrToSchema(strings.Split(key, "."), d.schema) + if len(schemaL) == 0 { + return fmt.Errorf("%s is not a valid key", key) + } + + for k := range d.diff.Attributes { + if strings.HasPrefix(k, key) { + delete(d.diff.Attributes, k) + } + } + return nil +} + +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff +// where we need to act on all nested fields +// without calling out each one separately +func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { + keys := make([]string, 0) + for k := range d.diff.Attributes { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + +// diffChange helps to implement resourceDiffer and derives its change values +// from ResourceDiff's own change data, in addition to existing diff, config, and state. +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) + + if !old.Exists { + old.Value = nil + } + if !new.Exists || d.removed(key) { + new.Value = nil + } + + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized +} + +// SetNew is used to set a new diff value for the mentioned key. The value must +// be correct for the attribute's schema (mostly relevant for maps, lists, and +// sets). The original value from the state is used as the old value. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNew(key string, value interface{}) error { + if err := d.checkKey(key, "SetNew", false); err != nil { + return err + } + + return d.setDiff(key, value, false) +} + +// SetNewComputed functions like SetNew, except that it blanks out a new value +// and marks it as computed. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNewComputed(key string) error { + if err := d.checkKey(key, "SetNewComputed", false); err != nil { + return err + } + + return d.setDiff(key, nil, true) +} + +// setDiff performs common diff setting behaviour. +func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { + if err := d.clear(key); err != nil { + return err + } + + if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) + } + + d.updatedKeys[key] = true + + return nil +} + +// ForceNew force-flags ForceNew in the schema for a specific key, and +// re-calculates its diff, effectively causing this attribute to force a new +// resource. +// +// Keep in mind that forcing a new resource will force a second run of the +// resource's CustomizeDiff function (with a new ResourceDiff) once the current +// one has completed. This second run is performed without state. This behavior +// will be the same as if a new resource is being created and is performed to +// ensure that the diff looks like the diff for a new resource as much as +// possible. CustomizeDiff should expect such a scenario and act correctly. +// +// This function is a no-op/error if there is no diff. +// +// Note that the change to schema is permanent for the lifecycle of this +// specific ResourceDiff instance. +func (d *ResourceDiff) ForceNew(key string) error { + if !d.HasChange(key) { + return fmt.Errorf("ForceNew: No changes for %s", key) + } + + keyParts := strings.Split(key, ".") + var schema *Schema + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } else { + return fmt.Errorf("ForceNew: %s is not a valid key", key) + } + + schema.ForceNew = true + + // Flag this for a re-diff. Don't save any values to guarantee that existing + // diffs aren't messed with, as this gets messy when dealing with complex + // structures, zero values, etc. + d.forcedNewKeys[keyParts[0]] = true + + return nil +} + +// Get hands off to ResourceData.Get. +func (d *ResourceDiff) Get(key string) interface{} { + r, _ := d.GetOk(key) + return r +} + +// GetChange gets the change between the state and diff, checking first to see +// if an overridden diff exists. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { + old, new, _ := d.getChange(key) + return old.Value, new.Value +} + +// GetOk functions the same way as ResourceData.GetOk, but it also checks the +// new diff levels to provide data consistent with the current state of the +// customized diff. +func (d *ResourceDiff) GetOk(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists functions the same way as GetOkExists within ResourceData, but +// it also checks the new diff levels to provide data consistent with the +// current state of the customized diff. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + return r.Value, exists +} + +// NewValueKnown returns true if the new value for the given key is available +// as its final value at diff time. If the return value is false, this means +// either the value is based of interpolation that was unavailable at diff +// time, or that the value was explicitly marked as computed by SetNewComputed. +func (d *ResourceDiff) NewValueKnown(key string) bool { + r := d.get(strings.Split(key, "."), "newDiff") + return !r.Computed +} + +// HasChange checks to see if there is a change between state and the diff, or +// in the overridden diff. +func (d *ResourceDiff) HasChange(key string) bool { + old, new := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := old.(Equal); ok { + return !eq.Equal(new) + } + + return !reflect.DeepEqual(old, new) +} + +// Id returns the ID of this resource. +// +// Note that technically, ID does not change during diffs (it either has +// already changed in the refresh, or will change on update), hence we do not +// support updating the ID or fetching it from anything else other than state. +func (d *ResourceDiff) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + return result +} + +// getChange gets values from two different levels, designed for use in +// diffChange, HasChange, and GetChange. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { + old := d.get(strings.Split(key, "."), "state") + var new getResult + for p := range d.updatedKeys { + if childAddrOf(key, p) { + new = d.getExact(strings.Split(key, "."), "newDiff") + return old, new, true + } + } + new = d.get(strings.Split(key, "."), "newDiff") + return old, new, false +} + +// removed checks to see if the key is present in the existing, pre-customized +// diff and if it was marked as NewRemoved. +func (d *ResourceDiff) removed(k string) bool { + diff, ok := d.diff.Attributes[k] + if !ok { + return false + } + return diff.NewRemoved +} + +// get performs the appropriate multi-level reader logic for ResourceDiff, +// starting at source. Refer to newResourceDiff for the level order. +func (d *ResourceDiff) get(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldMerge(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// getExact gets an attribute from the exact level referenced by source. +func (d *ResourceDiff) getExact(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldExact(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// finalizeResult does some post-processing of the result produced by get and getExact. +func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult { + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +// childAddrOf does a comparison of two addresses to see if one is the child of +// the other. +func childAddrOf(child, parent string) bool { + cs := strings.Split(child, ".") + ps := strings.Split(parent, ".") + if len(ps) > len(cs) { + return false + } + return reflect.DeepEqual(ps, cs[:len(ps)]) +} + +// checkKey checks the key to make sure it exists and is computed. +func (d *ResourceDiff) checkKey(key, caller string, nested bool) error { + var schema *Schema + if nested { + keyParts := strings.Split(key, ".") + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + } else { + s, ok := d.schema[key] + if ok { + schema = s + } + } + if schema == nil { + return fmt.Errorf("%s: invalid key: %s", caller, key) + } + if !schema.Computed { + return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go new file mode 100644 index 000000000..3b17c8e96 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_importer.go @@ -0,0 +1,79 @@ +package schema + +import ( + "context" + "errors" +) + +// ResourceImporter defines how a resource is imported in Terraform. This +// can be set onto a Resource struct to make it Importable. Not all resources +// have to be importable; if a Resource doesn't have a ResourceImporter then +// it won't be importable. +// +// "Importing" in Terraform is the process of taking an already-created +// resource and bringing it under Terraform management. This can include +// updating Terraform state, generating Terraform configuration, etc. +type ResourceImporter struct { + // State is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. + // + // Deprecated: State is deprecated in favor of StateContext. + // Only one of the two functions can bet set. + State StateFunc + + // StateContext is called to convert an ID to one or more InstanceState to + // insert into the Terraform state. If this isn't specified, then + // the ID is passed straight through. This function receives a context + // that will cancel if Terraform sends a cancellation signal. + StateContext StateContextFunc +} + +// StateFunc is the function called to import a resource into the Terraform state. +// +// Deprecated: Please use the context aware equivalent StateContextFunc. +type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error) + +// StateContextFunc is the function called to import a resource into the +// Terraform state. It is given a ResourceData with only ID set. This +// ID is going to be an arbitrary value given by the user and may not map +// directly to the ID format that the resource expects, so that should +// be validated. +// +// This should return a slice of ResourceData that turn into the state +// that was imported. This might be as simple as returning only the argument +// that was given to the function. In other cases (such as AWS security groups), +// an import may fan out to multiple resources and this will have to return +// multiple. +// +// To create the ResourceData structures for other resource types (if +// you have to), instantiate your resource and call the Data function. +type StateContextFunc func(context.Context, *ResourceData, interface{}) ([]*ResourceData, error) + +// InternalValidate should be called to validate the structure of this +// importer. This should be called in a unit test. +// +// Resource.InternalValidate() will automatically call this, so this doesn't +// need to be called manually. Further, Resource.InternalValidate() is +// automatically called by Provider.InternalValidate(), so you only need +// to internal validate the provider. +func (r *ResourceImporter) InternalValidate() error { + if r.State != nil && r.StateContext != nil { + return errors.New("Both State and StateContext cannot be set.") + } + return nil +} + +// ImportStatePassthrough is an implementation of StateFunc that can be +// used to simply pass the ID directly through. +// +// Deprecated: Please use the context aware ImportStatePassthroughContext instead +func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} + +// ImportStatePassthroughContext is an implementation of StateContextFunc that can be +// used to simply pass the ID directly through. This should be used only +// in the case that an ID-only refresh is possible. +func ImportStatePassthroughContext(ctx context.Context, d *ResourceData, m interface{}) ([]*ResourceData, error) { + return []*ResourceData{d}, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go new file mode 100644 index 000000000..cee0b6781 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/resource_timeout.go @@ -0,0 +1,264 @@ +package schema + +import ( + "fmt" + "log" + "time" + + "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0" +const TimeoutsConfigKey = "timeouts" + +const ( + TimeoutCreate = "create" + TimeoutRead = "read" + TimeoutUpdate = "update" + TimeoutDelete = "delete" + TimeoutDefault = "default" +) + +func timeoutKeys() []string { + return []string{ + TimeoutCreate, + TimeoutRead, + TimeoutUpdate, + TimeoutDelete, + TimeoutDefault, + } +} + +// could be time.Duration, int64 or float64 +func DefaultTimeout(tx interface{}) *time.Duration { + var td time.Duration + switch raw := tx.(type) { + case time.Duration: + return &raw + case int64: + td = time.Duration(raw) + case float64: + td = time.Duration(int64(raw)) + default: + log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx) + } + return &td +} + +type ResourceTimeout struct { + Create, Read, Update, Delete, Default *time.Duration +} + +// ConfigDecode takes a schema and the configuration (available in Diff) and +// validates, parses the timeouts into `t` +func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error { + if s.Timeouts != nil { + raw, err := copystructure.Copy(s.Timeouts) + if err != nil { + log.Printf("[DEBUG] Error with deep copy: %s", err) + } + *t = *raw.(*ResourceTimeout) + } + + if raw, ok := c.Config[TimeoutsConfigKey]; ok { + var rawTimeouts []map[string]interface{} + switch raw := raw.(type) { + case map[string]interface{}: + rawTimeouts = append(rawTimeouts, raw) + case []map[string]interface{}: + rawTimeouts = raw + case string: + if raw == hcl2shim.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil + } else { + log.Printf("[ERROR] Invalid timeout value: %q", raw) + return fmt.Errorf("Invalid Timeout value found") + } + case []interface{}: + for _, r := range raw { + if rMap, ok := r.(map[string]interface{}); ok { + rawTimeouts = append(rawTimeouts, rMap) + } else { + // Go will not allow a fallthrough + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + } + default: + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } + + for _, timeoutValues := range rawTimeouts { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break + } + } + + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } + + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err) + } + + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } + + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) + } + + *timeout = rt + } + return nil + } + } + + return nil +} + +func unsupportedTimeoutKeyError(key string) error { + return fmt.Errorf("Timeout Key (%s) is not supported", key) +} + +// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder +// interface: they encode/decode a timeouts struct from an instance diff, which is +// where the timeout data is stored after a diff to pass into Apply. +// +// StateEncode encodes the timeout into the ResourceData's InstanceState for +// saving to state +// +func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error { + return t.metaEncode(id) +} + +func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error { + return t.metaEncode(is) +} + +// metaEncode encodes the ResourceTimeout into a map[string]interface{} format +// and stores it in the Meta field of the interface it's given. +// Assumes the interface is either *terraform.InstanceState or +// *terraform.InstanceDiff, returns an error otherwise +func (t *ResourceTimeout) metaEncode(ids interface{}) error { + m := make(map[string]interface{}) + + if t.Create != nil { + m[TimeoutCreate] = t.Create.Nanoseconds() + } + if t.Read != nil { + m[TimeoutRead] = t.Read.Nanoseconds() + } + if t.Update != nil { + m[TimeoutUpdate] = t.Update.Nanoseconds() + } + if t.Delete != nil { + m[TimeoutDelete] = t.Delete.Nanoseconds() + } + if t.Default != nil { + m[TimeoutDefault] = t.Default.Nanoseconds() + // for any key above that is nil, if default is specified, we need to + // populate it with the default + for _, k := range timeoutKeys() { + if _, ok := m[k]; !ok { + m[k] = t.Default.Nanoseconds() + } + } + } + + // only add the Timeout to the Meta if we have values + if len(m) > 0 { + switch instance := ids.(type) { + case *terraform.InstanceDiff: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + case *terraform.InstanceState: + if instance.Meta == nil { + instance.Meta = make(map[string]interface{}) + } + instance.Meta[TimeoutKey] = m + default: + return fmt.Errorf("Error matching type for Diff Encode") + } + } + + return nil +} + +func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error { + return t.metaDecode(id) +} +func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error { + return t.metaDecode(is) +} + +func (t *ResourceTimeout) metaDecode(ids interface{}) error { + var rawMeta interface{} + var ok bool + switch rawInstance := ids.(type) { + case *terraform.InstanceDiff: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + case *terraform.InstanceState: + rawMeta, ok = rawInstance.Meta[TimeoutKey] + if !ok { + return nil + } + default: + return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids) + } + + times := rawMeta.(map[string]interface{}) + if len(times) == 0 { + return nil + } + + if v, ok := times[TimeoutCreate]; ok { + t.Create = DefaultTimeout(v) + } + if v, ok := times[TimeoutRead]; ok { + t.Read = DefaultTimeout(v) + } + if v, ok := times[TimeoutUpdate]; ok { + t.Update = DefaultTimeout(v) + } + if v, ok := times[TimeoutDelete]; ok { + t.Delete = DefaultTimeout(v) + } + if v, ok := times[TimeoutDefault]; ok { + t.Default = DefaultTimeout(v) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go new file mode 100644 index 000000000..24befc9d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/schema.go @@ -0,0 +1,2121 @@ +// schema is a high-level framework for easily writing new providers +// for Terraform. Usage of schema is recommended over attempting to write +// to the low-level plugin interfaces manually. +// +// schema breaks down provider creation into simple CRUD operations for +// resources. The logic of diffing, destroying before creating, updating +// or creating, etc. is all handled by the framework. The plugin author +// only needs to implement a configuration schema and the CRUD operations and +// everything else is meant to just work. +// +// A good starting point is to view the Provider structure. +package schema + +import ( + "context" + "fmt" + "log" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// Schema is used to describe the structure of a value. +// +// Read the documentation of the struct elements for important details. +type Schema struct { + // Type is the type of the value and must be one of the ValueType values. + // + // This type not only determines what type is expected/valid in configuring + // this value, but also what type is returned when ResourceData.Get is + // called. The types returned by Get are: + // + // TypeBool - bool + // TypeInt - int + // TypeFloat - float64 + // TypeString - string + // TypeList - []interface{} + // TypeMap - map[string]interface{} + // TypeSet - *schema.Set + // + Type ValueType + + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + + // If one of these is set, then this item can come from the configuration. + // Both cannot be set. If Optional is set, the value is optional. If + // Required is set, the value is required. + // + // One of these must be set if the value is not computed. That is: + // value either comes from the config, is computed, or is both. + Optional bool + Required bool + + // If this is non-nil, the provided function will be used during diff + // of this field. If this is nil, a default diff for the type of the + // schema will be used. + // + // This allows comparison based on something other than primitive, list + // or map equality - for example SSH public keys may be considered + // equivalent regardless of trailing whitespace. + DiffSuppressFunc SchemaDiffSuppressFunc + + // If this is non-nil, then this will be a default value that is used + // when this item is not set in the configuration. + // + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. + // + // If Required is true above, then Default cannot be set. DefaultFunc + // can be set with Required. If the DefaultFunc returns nil, then there + // will be no default and the user will be asked to fill it in. + // + // If either of these is set, then the user won't be asked for input + // for this key if the default is not nil. + Default interface{} + DefaultFunc SchemaDefaultFunc + + // Description is used as the description for docs, the language server and + // other user facing usage. It can be plain-text or markdown depending on the + // global DescriptionKind setting. + Description string + + // InputDefault is the default value to use for when inputs are requested. + // This differs from Default in that if Default is set, no input is + // asked for. If Input is asked, this will be the default value offered. + InputDefault string + + // The fields below relate to diffs. + // + // If Computed is true, then the result of this value is computed + // (unless specified by config) on creation. + // + // If ForceNew is true, then a change in this resource necessitates + // the creation of a new resource. + // + // StateFunc is a function called to change the value of this before + // storing it in the state (and likewise before comparing for diffs). + // The use for this is for example with large strings, you may want + // to simply store the hash of it. + Computed bool + ForceNew bool + StateFunc SchemaStateFunc + + // The following fields are only set for a TypeList, TypeSet, or TypeMap. + // + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially managed via its own CRUD actions on the API. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. + // + // MaxItems defines a maximum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however more than one instance would + // cause instability. + // + // MinItems defines a minimum amount of items that can exist within a + // TypeSet or TypeList. Specific use cases would be if a TypeSet is being + // used to wrap a complex structure, however less than one instance would + // cause instability. + // + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // The following fields are only valid for a TypeSet type. + // + // Set defines a function to determine the unique ID of an item so that + // a proper set can be built. + Set SchemaSetFunc + + // ComputedWhen is a set of queries on the configuration. Whenever any + // of these things is changed, it will require a recompute (this requires + // that Computed is set to true). + // + // NOTE: This currently does not work. + ComputedWhen []string + + // ConflictsWith is a set of schema keys that conflict with this schema. + // This will only check that they're set in the _config_. This will not + // raise an error for a malfunctioning resource that sets a conflicting + // key. + // + // ExactlyOneOf is a set of schema keys that, when set, only one of the + // keys in that list can be specified. It will error if none are + // specified as well. + // + // AtLeastOneOf is a set of schema keys that, when set, at least one of + // the keys in that list must be specified. + // + // RequiredWith is a set of schema keys that must be set simultaneously. + ConflictsWith []string + ExactlyOneOf []string + AtLeastOneOf []string + RequiredWith []string + + // When Deprecated is set, this attribute is deprecated. + // + // A deprecated field still works, but will probably stop working in near + // future. This string is the message shown to the user with instructions on + // how to address the deprecation. + Deprecated string + + // ValidateFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield warnings or + // errors based on inspection of that value. + // + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + // + // Deprecated: please use ValidateDiagFunc + ValidateFunc SchemaValidateFunc + + // ValidateDiagFunc allows individual fields to define arbitrary validation + // logic. It is yielded the provided config value as an interface{} that is + // guaranteed to be of the proper Schema type, and it can yield diagnostics + // based on inspection of that value. + // + // ValidateDiagFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. + // + // ValidateDiagFunc is also yielded the cty.Path the SDK has built up to this + // attribute. The SDK will automatically set the AttributePath of any returned + // Diagnostics to this path. Therefore the developer does not need to set + // the AttributePath for primitive types. + // + // In the case of TypeMap to provide the most precise information, please + // set an AttributePath with the additional cty.IndexStep: + // + // AttributePath: cty.IndexStringPath("key_name") + // + // Or alternatively use the passed in path to create the absolute path: + // + // AttributePath: append(path, cty.IndexStep{Key: cty.StringVal("key_name")}) + ValidateDiagFunc SchemaValidateDiagFunc + + // Sensitive ensures that the attribute's value does not get displayed in + // logs or regular output. It should be used for passwords or other + // secret fields. Future versions of Terraform may encrypt these + // values. + Sensitive bool +} + +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine +// whether a detected diff on a schema element is "valid" or not, and +// suppress it from the plan if necessary. +// +// Return true if the diff should be suppressed, false to retain it. +type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool + +// SchemaDefaultFunc is a function called to return a default value for +// a field. +type SchemaDefaultFunc func() (interface{}, error) + +// EnvDefaultFunc is a helper function that returns the value of the +// given environment variable, if one exists, or the default value +// otherwise. +func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return dv, nil + } +} + +// MultiEnvDefaultFunc is a helper function that returns the value of the first +// environment variable in the given list that returns a non-empty value. If +// none of the environment variables return a value, the default value is +// returned. +func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc { + return func() (interface{}, error) { + for _, k := range ks { + if v := os.Getenv(k); v != "" { + return v, nil + } + } + return dv, nil + } +} + +// SchemaSetFunc is a function that must return a unique ID for the given +// element. This unique ID is used to store the element in a hash. +type SchemaSetFunc func(interface{}) int + +// SchemaStateFunc is a function used to convert some type to a string +// to be stored in the state. +type SchemaStateFunc func(interface{}) string + +// SchemaValidateFunc is a function used to validate a single field in the +// schema. +// +// Deprecated: please use SchemaValidateDiagFunc +type SchemaValidateFunc func(interface{}, string) ([]string, []error) + +// SchemaValidateDiagFunc is a function used to validate a single field in the +// schema and has Diagnostic support. +type SchemaValidateDiagFunc func(interface{}, cty.Path) diag.Diagnostics + +func (s *Schema) GoString() string { + return fmt.Sprintf("*%#v", *s) +} + +// Returns a default value for this schema by either reading Default or +// evaluating DefaultFunc. If neither of these are defined, returns nil. +func (s *Schema) DefaultValue() (interface{}, error) { + if s.Default != nil { + return s.Default, nil + } + + if s.DefaultFunc != nil { + defaultValue, err := s.DefaultFunc() + if err != nil { + return nil, fmt.Errorf("error loading default: %s", err) + } + return defaultValue, nil + } + + return nil, nil +} + +// Returns a zero value for the schema. +func (s *Schema) ZeroValue() interface{} { + // If it's a set then we'll do a bit of extra work to provide the + // right hashing function in our empty value. + if s.Type == TypeSet { + setFunc := s.Set + if setFunc == nil { + // Default set function uses the schema to hash the whole value + elem := s.Elem + switch t := elem.(type) { + case *Schema: + setFunc = HashSchema(t) + case *Resource: + setFunc = HashResource(t) + default: + panic("invalid set element type") + } + } + return &Set{F: setFunc} + } else { + return s.Type.Zero() + } +} + +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { + if d == nil { + return d + } + + if s.Type == TypeBool { + normalizeBoolString := func(s string) string { + switch s { + case "0": + return "false" + case "1": + return "true" + } + return s + } + d.Old = normalizeBoolString(d.Old) + d.New = normalizeBoolString(d.New) + } + + if s.Computed && !d.NewRemoved && d.New == "" { + // Computed attribute without a new value set + d.NewComputed = true + } + + if s.ForceNew { + // ForceNew, mark that this field is requiring new under the + // following conditions, explained below: + // + // * Old != New - There is a change in value. This field + // is therefore causing a new resource. + // + // * NewComputed - This field is being computed, hence a + // potential change in value, mark as causing a new resource. + d.RequiresNew = d.Old != d.New || d.NewComputed + } + + if d.NewRemoved { + return d + } + + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } + } + + if d.New == "" && !d.NewComputed { + // Computed attribute without a new value set + d.NewComputed = true + } + } + + if s.Sensitive { + // Set the Sensitive flag so output is hidden in the UI + d.Sensitive = true + } + + return d +} + +func (s *Schema) validateFunc(decoded interface{}, k string, path cty.Path) diag.Diagnostics { + var diags diag.Diagnostics + + if s.ValidateDiagFunc != nil { + diags = s.ValidateDiagFunc(decoded, path) + for i := range diags { + if !diags[i].AttributePath.HasPrefix(path) { + diags[i].AttributePath = append(path, diags[i].AttributePath...) + } + } + } else if s.ValidateFunc != nil { + ws, es := s.ValidateFunc(decoded, k) + for _, w := range ws { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: w, + AttributePath: path, + }) + } + for _, e := range es { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: e.Error(), + AttributePath: path, + }) + } + } + + return diags +} + +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + +// schemaMap is a wrapper that adds nice functions on top of schemas. +type schemaMap map[string]*Schema + +func (m schemaMap) panicOnError() bool { + return os.Getenv("TF_ACC") != "" +} + +// Data returns a ResourceData for the given schema, state, and diff. +// +// The diff is optional. +func (m schemaMap) Data( + s *terraform.InstanceState, + d *terraform.InstanceDiff) (*ResourceData, error) { + return &ResourceData{ + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), + }, nil +} + +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + +// Diff returns the diff for a resource given the schema map, +// state, and configuration. +func (m schemaMap) Diff( + ctx context.Context, + s *terraform.InstanceState, + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { + result := new(terraform.InstanceDiff) + result.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Make sure to mark if the resource is tainted + if s != nil { + result.DestroyTainted = s.Tainted + } + + d := &ResourceData{ + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), + } + + for k, schema := range m { + err := m.diff(k, schema, result, d, false) + if err != nil { + return nil, err + } + } + + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } + + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(ctx, rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) + if err != nil { + return nil, err + } + } + } + + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted + + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() + + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } + } + + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(ctx, rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } + } + + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } + } + + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } + + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } + + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 + } + + } + + // Go through and detect all of the ComputedWhens now that we've + // finished the diff. + // TODO + + if result.Empty() { + // If we don't have any diff elements, just return nil + return nil, nil + } + + return result, nil +} + +// Validate validates the configuration against this schema mapping. +func (m schemaMap) Validate(c *terraform.ResourceConfig) diag.Diagnostics { + return m.validateObject("", m, c, cty.Path{}) +} + +// InternalValidate validates the format of this schema. This should be called +// from a unit test (and not in user-path code) to verify that a schema +// is properly built. +func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { + if topSchemaMap == nil { + topSchemaMap = m + } + for k, v := range m { + if v.Type == TypeInvalid { + return fmt.Errorf("%s: Type must be specified", k) + } + + if v.Optional && v.Required { + return fmt.Errorf("%s: Optional or Required must be set, not both", k) + } + + if v.Required && v.Computed { + return fmt.Errorf("%s: Cannot be both Required and Computed", k) + } + + if !v.Required && !v.Optional && !v.Computed { + return fmt.Errorf("%s: One of optional, required, or computed must be set", k) + } + + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + + if v.Computed && v.Default != nil { + return fmt.Errorf("%s: Default must be nil if computed", k) + } + + if v.Required && v.Default != nil { + return fmt.Errorf("%s: Default cannot be set with Required", k) + } + + if len(v.ComputedWhen) > 0 && !v.Computed { + return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k) + } + + if len(v.ConflictsWith) > 0 && v.Required { + return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k) + } + + if len(v.ExactlyOneOf) > 0 && v.Required { + return fmt.Errorf("%s: ExactlyOneOf cannot be set with Required", k) + } + + if len(v.AtLeastOneOf) > 0 && v.Required { + return fmt.Errorf("%s: AtLeastOneOf cannot be set with Required", k) + } + + if len(v.ConflictsWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ConflictsWith, topSchemaMap, v, false) + if err != nil { + return fmt.Errorf("ConflictsWith: %+v", err) + } + } + + if len(v.RequiredWith) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.RequiredWith, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("RequiredWith: %+v", err) + } + } + + if len(v.ExactlyOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.ExactlyOneOf, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("ExactlyOneOf: %+v", err) + } + } + + if len(v.AtLeastOneOf) > 0 { + err := checkKeysAgainstSchemaFlags(k, v.AtLeastOneOf, topSchemaMap, v, true) + if err != nil { + return fmt.Errorf("AtLeastOneOf: %+v", err) + } + } + + if v.Type == TypeList || v.Type == TypeSet { + if v.Elem == nil { + return fmt.Errorf("%s: Elem must be set for lists", k) + } + + if v.Default != nil { + return fmt.Errorf("%s: Default is not valid for lists or sets", k) + } + + if v.Type != TypeSet && v.Set != nil { + return fmt.Errorf("%s: Set can only be set for TypeSet", k) + } + + switch t := v.Elem.(type) { + case *Resource: + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { + return err + } + case *Schema: + bad := t.Computed || t.Optional || t.Required + if bad { + return fmt.Errorf( + "%s: Elem must have only Type set", k) + } + } + } else { + if v.MaxItems > 0 || v.MinItems > 0 { + return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k) + } + } + + if v.Type == TypeMap && v.Elem != nil { + switch v.Elem.(type) { + case *Resource: + return fmt.Errorf("%s: TypeMap with Elem *Resource not supported,"+ + "use TypeList/TypeSet with Elem *Resource or TypeMap with Elem *Schema", k) + } + } + + if computedOnly { + if len(v.AtLeastOneOf) > 0 { + return fmt.Errorf("%s: AtLeastOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if len(v.ConflictsWith) > 0 { + return fmt.Errorf("%s: ConflictsWith is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.Default != nil { + return fmt.Errorf("%s: Default is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DefaultFunc != nil { + return fmt.Errorf("%s: DefaultFunc is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + if len(v.ExactlyOneOf) > 0 { + return fmt.Errorf("%s: ExactlyOneOf is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.InputDefault != "" { + return fmt.Errorf("%s: InputDefault is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MaxItems > 0 { + return fmt.Errorf("%s: MaxItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.MinItems > 0 { + return fmt.Errorf("%s: MinItems is for configurable attributes,"+ + "there's nothing to configure on computed-only field", k) + } + if v.StateFunc != nil { + return fmt.Errorf("%s: StateFunc is extraneous, "+ + "value should just be changed before setting on computed-only field", k) + } + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateDiagFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + } + + if v.ValidateFunc != nil || v.ValidateDiagFunc != nil { + switch v.Type { + case TypeList, TypeSet: + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc are not yet supported on lists or sets.", k) + } + } + + if v.ValidateFunc != nil && v.ValidateDiagFunc != nil { + return fmt.Errorf("%s: ValidateFunc and ValidateDiagFunc cannot both be set", k) + } + + if v.Deprecated == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) + } + } + } + + return nil +} + +func checkKeysAgainstSchemaFlags(k string, keys []string, topSchemaMap schemaMap, self *Schema, allowSelfReference bool) error { + for _, key := range keys { + parts := strings.Split(key, ".") + sm := topSchemaMap + var target *Schema + for idx, part := range parts { + // Skip index fields if 0 + partInt, err := strconv.Atoi(part) + + if err == nil { + if partInt != 0 { + return fmt.Errorf("%s configuration block reference (%s) can only use the .0. index for TypeList and MaxItems: 1 configuration blocks", k, key) + } + + continue + } + + var ok bool + if target, ok = sm[part]; !ok { + return fmt.Errorf("%s references unknown attribute (%s) at part (%s)", k, key, part) + } + + subResource, ok := target.Elem.(*Resource) + + if !ok { + continue + } + + // Skip Type/MaxItems check if not the last element + if (target.Type == TypeSet || target.MaxItems != 1) && idx+1 != len(parts) { + return fmt.Errorf("%s configuration block reference (%s) can only be used with TypeList and MaxItems: 1 configuration blocks", k, key) + } + + sm = schemaMap(subResource.Schema) + } + + if target == nil { + return fmt.Errorf("%s cannot find target attribute (%s), sm: %#v", k, key, sm) + } + + if target == self && !allowSelfReference { + return fmt.Errorf("%s cannot reference self (%s)", k, key) + } + + if target.Required { + return fmt.Errorf("%s cannot contain Required attribute (%s)", k, key) + } + + if len(target.ComputedWhen) > 0 { + return fmt.Errorf("%s cannot contain Computed(When) attribute (%s)", k, key) + } + } + + return nil +} + +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} + +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string +} + +func (m schemaMap) diff( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + unsupressedDiff := new(terraform.InstanceDiff) + unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff) + + var err error + switch schema.Type { + case TypeBool, TypeInt, TypeFloat, TypeString: + err = m.diffString(k, schema, unsupressedDiff, d, all) + case TypeList: + err = m.diffList(k, schema, unsupressedDiff, d, all) + case TypeMap: + err = m.diffMap(k, schema, unsupressedDiff, d, all) + case TypeSet: + err = m.diffSet(k, schema, unsupressedDiff, d, all) + default: + err = fmt.Errorf("%s: unknown type %#v", k, schema.Type) + } + + for attrK, attrV := range unsupressedDiff.Attributes { + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } + diff.Attributes[attrK] = attrV + } + + return err +} + +func (m schemaMap) diffList( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + o, n, _, computedList, customized := d.diffChange(k) + if computedList { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedList && schema.Computed { + return nil + } + + if o == nil { + o = []interface{}{} + } + if n == nil { + n = []interface{}{} + } + if s, ok := o.(*Set); ok { + o = s.List() + } + if s, ok := n.(*Set); ok { + n = s.List() + } + os := o.([]interface{}) + vs := n.([]interface{}) + + // If the new value was set, and the two are equal, then we're done. + // We have to do this check here because sets might be NOT + // reflect.DeepEqual so we need to wait until we get the []interface{} + if !all && nSet && reflect.DeepEqual(os, vs) { + return nil + } + + // Get the counts + oldLen := len(os) + newLen := len(vs) + oldStr := strconv.FormatInt(int64(oldLen), 10) + + // If the whole list is computed, then say that the # is computed + if computedList { + diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ + Old: oldStr, + NewComputed: true, + RequiresNew: schema.ForceNew, + } + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + computed := oldLen == 0 && newLen == 0 && schema.Computed + if changed || computed || all { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + newStr := "" + if !computed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Figure out the maximum + maxLen := oldLen + if newLen > maxLen { + maxLen = newLen + } + + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for i := 0; i < maxLen; i++ { + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%d.%s", k, i, k2) + err := m.diff(subK, schema, diff, d, all) + if err != nil { + return err + } + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeList). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + for i := 0; i < maxLen; i++ { + subK := fmt.Sprintf("%s.%d", k, i) + err := m.diff(subK, &t2, diff, d, all) + if err != nil { + return err + } + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + + return nil +} + +func (m schemaMap) diffMap( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + prefix := k + "." + + // First get all the values from the state + var stateMap, configMap map[string]string + o, n, _, nComputed, customized := d.diffChange(k) + if err := mapstructure.WeakDecode(o, &stateMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(n, &configMap); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + // Keep track of whether the state _exists_ at all prior to clearing it + stateExists := o != nil + + // Delete any count values, since we don't use those + delete(configMap, "%") + delete(stateMap, "%") + + // Check if the number of elements has changed. + oldLen, newLen := len(stateMap), len(configMap) + changed := oldLen != newLen + if oldLen != 0 && newLen == 0 && schema.Computed { + changed = false + } + + // It is computed if we have no old value, no new value, the schema + // says it is computed, and it didn't exist in the state before. The + // last point means: if it existed in the state, even empty, then it + // has already been computed. + computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists + + // If the count has changed or we're computed, then add a diff for the + // count. "nComputed" means that the new value _contains_ a value that + // is computed. We don't do granular diffs for this yet, so we mark the + // whole map as computed. + if changed || computed || nComputed { + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed || nComputed, + ForceNew: schema.ForceNew, + } + + oldStr := strconv.FormatInt(int64(oldLen), 10) + newStr := "" + if !computed && !nComputed { + newStr = strconv.FormatInt(int64(newLen), 10) + } else { + oldStr = "" + } + + diff.Attributes[k+".%"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // If the new map is nil and we're computed, then ignore it. + if n == nil && schema.Computed { + return nil + } + + // Now we compare, preferring values from the config map + for k, v := range configMap { + old, ok := stateMap[k] + delete(stateMap, k) + + if old == v && ok && !all { + continue + } + + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) + } + for k, v := range stateMap { + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) + } + + return nil +} + +func (m schemaMap) diffSet( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + + o, n, _, computedSet, customized := d.diffChange(k) + if computedSet { + n = nil + } + nSet := n != nil + + // If we have an old value and no new value is set or will be + // computed once all variables can be interpolated and we're + // computed, then nothing has changed. + if o != nil && n == nil && !computedSet && schema.Computed { + return nil + } + + if o == nil { + o = schema.ZeroValue().(*Set) + } + if n == nil { + n = schema.ZeroValue().(*Set) + } + os := o.(*Set) + ns := n.(*Set) + + // If the new value was set, compare the listCode's to determine if + // the two are equal. Comparing listCode's instead of the actual values + // is needed because there could be computed values in the set which + // would result in false positives while comparing. + if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) { + return nil + } + + // Get the counts + oldLen := os.Len() + newLen := ns.Len() + oldStr := strconv.Itoa(oldLen) + newStr := strconv.Itoa(newLen) + + // Build a schema for our count + countSchema := &Schema{ + Type: TypeInt, + Computed: schema.Computed, + ForceNew: schema.ForceNew, + } + + // If the set computed then say that the # is computed + if computedSet || schema.Computed && !nSet { + // If # already exists, equals 0 and no new set is supplied, there + // is nothing to record in the diff + count, ok := d.GetOk(k + ".#") + if ok && count.(int) == 0 && !nSet && !computedSet { + return nil + } + + // Set the count but make sure that if # does not exist, we don't + // use the zeroed value + countStr := strconv.Itoa(count.(int)) + if !ok { + countStr = "" + } + + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) + return nil + } + + // If the counts are not the same, then record that diff + changed := oldLen != newLen + if changed || all { + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) + } + + // Build the list of codes that will make up our set. This is the + // removed codes as well as all the codes in the new codes. + codes := make([][]string, 2) + codes[0] = os.Difference(ns).listCode() + codes[1] = ns.listCode() + for _, list := range codes { + for _, code := range list { + switch t := schema.Elem.(type) { + case *Resource: + // This is a complex resource + for k2, schema := range t.Schema { + subK := fmt.Sprintf("%s.%s.%s", k, code, k2) + err := m.diff(subK, schema, diff, d, true) + if err != nil { + return err + } + } + case *Schema: + // Copy the schema so that we can set Computed/ForceNew from + // the parent schema (the TypeSet). + t2 := *t + t2.ForceNew = schema.ForceNew + + // This is just a primitive element, so go through each and + // just diff each. + subK := fmt.Sprintf("%s.%s", k, code) + err := m.diff(subK, &t2, diff, d, true) + if err != nil { + return err + } + default: + return fmt.Errorf("%s: unknown element type (internal)", k) + } + } + } + + return nil +} + +func (m schemaMap) diffString( + k string, + schema *Schema, + diff *terraform.InstanceDiff, + d resourceDiffer, + all bool) error { + var originalN interface{} + var os, ns string + o, n, _, computed, customized := d.diffChange(k) + if schema.StateFunc != nil && n != nil { + originalN = n + n = schema.StateFunc(n) + } + nraw := n + if nraw == nil && o != nil { + nraw = schema.Type.Zero() + } + if err := mapstructure.WeakDecode(o, &os); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + if err := mapstructure.WeakDecode(nraw, &ns); err != nil { + return fmt.Errorf("%s: %s", k, err) + } + + if os == ns && !all && !computed { + // They're the same value. If there old value is not blank or we + // have an ID, then return right away since we're already setup. + if os != "" || d.Id() != "" { + return nil + } + + // Otherwise, only continue if we're computed + if !schema.Computed { + return nil + } + } + + removed := false + if o != nil && n == nil && !computed { + removed = true + } + if removed && schema.Computed { + return nil + } + + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) + + return nil +} + +func (m schemaMap) validate( + k string, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + raw, ok := c.Get(k) + if !ok && schema.DefaultFunc != nil { + // We have a dynamic default. Check if we have a value. + var err error + raw, err = schema.DefaultFunc() + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Loading Default", + Detail: err.Error(), + AttributePath: path, + }) + } + + // We're okay as long as we had a value set + ok = raw != nil + } + + err := validateExactlyOneAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "ExactlyOne", + Detail: err.Error(), + AttributePath: path, + }) + } + + err = validateAtLeastOneAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "AtLeastOne", + Detail: err.Error(), + AttributePath: path, + }) + } + + if !ok { + if schema.Required { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Required attribute is not set", + AttributePath: path, + }) + } + return diags + } + + if !schema.Required && !schema.Optional { + // This is a computed-only field + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Computed attribute cannot be set", + AttributePath: path, + }) + } + + err = validateRequiredWithAttribute(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "RequiredWith", + Detail: err.Error(), + AttributePath: path, + }) + } + + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + if schema.Deprecated != "" { + return append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Attribute is deprecated", + Detail: schema.Deprecated, + AttributePath: path, + }) + } + return diags + } + + err = validateConflictingAttributes(k, schema, c) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "ConflictsWith", + Detail: err.Error(), + AttributePath: path, + }) + } + + return m.validateType(k, raw, schema, c, path) +} + +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == hcl2shim.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} +func validateConflictingAttributes( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ConflictsWith) == 0 { + return nil + } + + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == hcl2shim.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } + return fmt.Errorf( + "%q: conflicts with %s", k, conflictingKey) + } + } + + return nil +} + +func removeDuplicates(elements []string) []string { + encountered := make(map[string]struct{}, 0) + result := []string{} + + for v := range elements { + if _, ok := encountered[elements[v]]; !ok { + encountered[elements[v]] = struct{}{} + result = append(result, elements[v]) + } + } + + return result +} + +func validateRequiredWithAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.RequiredWith) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.RequiredWith, k)) + sort.Strings(allKeys) + + for _, key := range allKeys { + if _, ok := c.Get(key); !ok { + return fmt.Errorf("%q: all of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + } + + return nil +} + +func validateExactlyOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.ExactlyOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.ExactlyOneOf, k)) + sort.Strings(allKeys) + specified := make([]string, 0) + unknownVariableValueCount := 0 + for _, exactlyOneOfKey := range allKeys { + if c.IsComputed(exactlyOneOfKey) { + unknownVariableValueCount++ + continue + } + + _, ok := c.Get(exactlyOneOfKey) + if ok { + specified = append(specified, exactlyOneOfKey) + } + } + + if len(specified) == 0 && unknownVariableValueCount == 0 { + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) + } + + if len(specified) > 1 { + return fmt.Errorf("%q: only one of `%s` can be specified, but `%s` were specified.", k, strings.Join(allKeys, ","), strings.Join(specified, ",")) + } + + return nil +} + +func validateAtLeastOneAttribute( + k string, + schema *Schema, + c *terraform.ResourceConfig) error { + + if len(schema.AtLeastOneOf) == 0 { + return nil + } + + allKeys := removeDuplicates(append(schema.AtLeastOneOf, k)) + sort.Strings(allKeys) + + for _, atLeastOneOfKey := range allKeys { + if _, ok := c.Get(atLeastOneOfKey); ok { + // We can ignore hcl2shim.UnknownVariable by assuming it's been set and additional validation elsewhere + // will uncover this if it is in fact null. + return nil + } + } + + return fmt.Errorf("%q: one of `%s` must be specified", k, strings.Join(allKeys, ",")) +} + +func (m schemaMap) validateList( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return diags + } + } + + // schemaMap can't validate nil + if raw == nil { + return diags + } + + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + + if rawV.Kind() != reflect.Slice { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a list", + AttributePath: path, + }) + } + + // We can't validate list length if this came from a dynamic block. + // Since there's no way to determine if something was from a dynamic block + // at this point, we're going to skip validation in the new protocol if + // there are any unknowns. Validate will eventually be called again once + // all values are known. + if !isWhollyKnown(raw) { + return diags + } + + // Validate length + if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "List longer than MaxItems", + Detail: fmt.Sprintf("Attribute supports %d item maximum, config has %d declared", schema.MaxItems, rawV.Len()), + AttributePath: path, + }) + } + + if schema.MinItems > 0 && rawV.Len() < schema.MinItems { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "List shorter than MinItems", + Detail: fmt.Sprintf("Attribute supports %d item minimum, config has %d declared", schema.MinItems, rawV.Len()), + AttributePath: path, + }) + } + + // Now build the []interface{} + raws := make([]interface{}, rawV.Len()) + for i := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for i, raw := range raws { + key := fmt.Sprintf("%s.%d", k, i) + + // Reify the key value from the ResourceConfig. + // If the list was computed we have all raw values, but some of these + // may be known in the config, and aren't individually marked as Computed. + if r, ok := c.Get(key); ok { + raw = r + } + + p := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + + switch t := schema.Elem.(type) { + case *Resource: + // This is a sub-resource + diags = append(diags, m.validateObject(key, t.Schema, c, p)...) + case *Schema: + diags = append(diags, m.validateType(key, raw, t, c, p)...) + } + + } + + return diags +} + +func (m schemaMap) validateMap( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == hcl2shim.UnknownVariableValue { + return diags + } + } + + // schemaMap can't validate nil + if raw == nil { + return diags + } + // We use reflection to verify the slice because you can't + // case to []interface{} unless the slice is exactly that type. + rawV := reflect.ValueOf(raw) + switch rawV.Kind() { + case reflect.String: + // If raw and reified are equal, this is a string and should + // be rejected. + reified, reifiedOk := c.Get(k) + if reifiedOk && raw == reified && !c.IsComputed(k) { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) + } + // Otherwise it's likely raw is an interpolation. + return diags + case reflect.Map: + case reflect.Slice: + default: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) + } + + // If it is not a slice, validate directly + if rawV.Kind() != reflect.Slice { + mapIface := rawV.Interface() + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags + } + + return schema.validateFunc(mapIface, k, path) + } + + // It is a slice, verify that all the elements are maps + raws := make([]interface{}, rawV.Len()) + for i := range raws { + raws[i] = rawV.Index(i).Interface() + } + + for _, raw := range raws { + v := reflect.ValueOf(raw) + if v.Kind() != reflect.Map { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute should be a map", + AttributePath: path, + }) + } + mapIface := v.Interface() + diags = append(diags, validateMapValues(k, mapIface.(map[string]interface{}), schema, path)...) + if diags.HasError() { + return diags + } + } + + validatableMap := make(map[string]interface{}) + for _, raw := range raws { + for k, v := range raw.(map[string]interface{}) { + validatableMap[k] = v + } + } + + return schema.validateFunc(validatableMap, k, path) +} + +func validateMapValues(k string, m map[string]interface{}, schema *Schema, path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + for key, raw := range m { + valueType, err := getValueType(k, schema) + p := append(path, cty.IndexStep{Key: cty.StringVal(key)}) + if err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + + switch valueType { + case TypeBool: + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeInt: + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeFloat: + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + case TypeString: + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: p, + }) + } + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + } + return diags +} + +func getValueType(k string, schema *Schema) (ValueType, error) { + if schema.Elem == nil { + return TypeString, nil + } + if vt, ok := schema.Elem.(ValueType); ok { + return vt, nil + } + + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. + if s, ok := schema.Elem.(*Schema); ok { + return s.Type, nil + } + + if _, ok := schema.Elem.(*Resource); ok { + // TODO: We don't actually support this (yet) + // but silently pass the validation, until we decide + // how to handle nested structures in maps + return TypeString, nil + } + return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem) +} + +func (m schemaMap) validateObject( + k string, + schema map[string]*Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + raw, _ := c.Get(k) + + // schemaMap can't validate nil + if raw == nil { + return diags + } + + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Expected Object Type", + Detail: fmt.Sprintf("Expected object, got %s", reflect.ValueOf(raw).Kind()), + AttributePath: path, + }) + } + + for subK, s := range schema { + key := subK + if k != "" { + key = fmt.Sprintf("%s.%s", k, subK) + } + diags = append(diags, m.validate(key, s, c, append(path, cty.GetAttrStep{Name: subK}))...) + } + + // Detect any extra/unknown keys and report those as errors. + if m, ok := raw.(map[string]interface{}); ok { + for subk := range m { + if _, ok := schema[subk]; !ok { + if subk == TimeoutsConfigKey { + continue + } + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Invalid or unknown key", + AttributePath: append(path, cty.GetAttrStep{Name: subk}), + }) + } + } + } + + return diags +} + +func (m schemaMap) validatePrimitive( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + + // a nil value shouldn't happen in the old protocol, and in the new + // protocol the types have already been validated. Either way, we can't + // reflect on nil, so don't panic. + if raw == nil { + return diags + } + + // Catch if the user gave a complex type where a primitive was + // expected, so we can return a friendly error message that + // doesn't contain Go type system terminology. + switch reflect.ValueOf(raw).Type().Kind() { + case reflect.Slice: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a list", + AttributePath: path, + }) + case reflect.Map: + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "Attribute must be a single value, not a map", + AttributePath: path, + }) + default: // ok + } + + if c.IsComputed(k) { + // If the key is being computed, then it is not an error as + // long as it's not a slice or map. + return diags + } + + var decoded interface{} + switch schema.Type { + case TypeBool: + // Verify that we can parse this as the correct type + var n bool + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + case TypeInt: + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("Attribute must be a whole number, got %v", raw), + AttributePath: path, + }) + } + case TypeFloat: + // Verify that we can parse this as an int + var n float64 + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + case TypeString: + // Verify that we can parse this as a string + var n string + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: err.Error(), + AttributePath: path, + }) + } + decoded = n + default: + panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type)) + } + + return append(diags, schema.validateFunc(decoded, k, path)...) +} + +func (m schemaMap) validateType( + k string, + raw interface{}, + schema *Schema, + c *terraform.ResourceConfig, + path cty.Path) diag.Diagnostics { + + var diags diag.Diagnostics + switch schema.Type { + case TypeList: + diags = m.validateList(k, raw, schema, c, path) + case TypeSet: + // indexing into sets is not representable in the current protocol + // best we can do is associate the path up to this attribute. + diags = m.validateList(k, raw, schema, c, path) + log.Printf("[WARN] Truncating attribute path of %d diagnostics for TypeSet", len(diags)) + for i := range diags { + diags[i].AttributePath = path + } + case TypeMap: + diags = m.validateMap(k, raw, schema, c, path) + default: + diags = m.validatePrimitive(k, raw, schema, c, path) + } + + if schema.Deprecated != "" { + diags = append(diags, diag.Diagnostic{ + Severity: diag.Warning, + Summary: "Deprecated Attribute", + Detail: schema.Deprecated, + AttributePath: path, + }) + } + + return diags +} + +// Zero returns the zero value for a type. +func (t ValueType) Zero() interface{} { + switch t { + case TypeInvalid: + return nil + case TypeBool: + return false + case TypeInt: + return 0 + case TypeFloat: + return 0.0 + case TypeString: + return "" + case TypeList: + return []interface{}{} + case TypeMap: + return map[string]interface{}{} + case TypeSet: + return new(Set) + case typeObject: + return map[string]interface{}{} + default: + panic(fmt.Sprintf("unknown type %s", t)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go new file mode 100644 index 000000000..0e0e3cca9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/serialize.go @@ -0,0 +1,130 @@ +package schema + +import ( + "bytes" + "fmt" + "sort" + "strconv" +) + +func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { + if val == nil { + buf.WriteRune(';') + return + } + + switch schema.Type { + case TypeBool: + if val.(bool) { + buf.WriteRune('1') + } else { + buf.WriteRune('0') + } + case TypeInt: + buf.WriteString(strconv.Itoa(val.(int))) + case TypeFloat: + buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64)) + case TypeString: + buf.WriteString(val.(string)) + case TypeList: + buf.WriteRune('(') + l := val.([]interface{}) + for _, innerVal := range l { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune(')') + case TypeMap: + + m := val.(map[string]interface{}) + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + buf.WriteRune('[') + for _, k := range keys { + innerVal := m[k] + if innerVal == nil { + continue + } + buf.WriteString(k) + buf.WriteRune(':') + + switch innerVal := innerVal.(type) { + case int: + buf.WriteString(strconv.Itoa(innerVal)) + case float64: + buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64)) + case string: + buf.WriteString(innerVal) + default: + panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal)) + } + + buf.WriteRune(';') + } + buf.WriteRune(']') + case TypeSet: + buf.WriteRune('{') + s := val.(*Set) + for _, innerVal := range s.List() { + serializeCollectionMemberForHash(buf, innerVal, schema.Elem) + } + buf.WriteRune('}') + default: + panic("unknown schema type to serialize") + } + buf.WriteRune(';') +} + +// SerializeValueForHash appends a serialization of the given resource config +// to the given buffer, guaranteeing deterministic results given the same value +// and schema. +// +// Its primary purpose is as input into a hashing function in order +// to hash complex substructures when used in sets, and so the serialization +// is not reversible. +func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } + sm := resource.Schema + m := val.(map[string]interface{}) + var keys []string + allComputed := true + for k, v := range sm { + if v.Optional || v.Required { + allComputed = false + } + + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + innerSchema := sm[k] + // Skip attributes that are not user-provided. Computed attributes + // do not contribute to the hash since their ultimate value cannot + // be known at plan/diff time. + if !allComputed && !(innerSchema.Required || innerSchema.Optional) { + continue + } + + buf.WriteString(k) + buf.WriteRune(':') + innerVal := m[k] + SerializeValueForHash(buf, innerVal, innerSchema) + } +} + +func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) { + switch tElem := elem.(type) { + case *Schema: + SerializeValueForHash(buf, val, tElem) + case *Resource: + buf.WriteRune('<') + SerializeResourceForHash(buf, val, tElem) + buf.WriteString(">;") + default: + panic(fmt.Sprintf("invalid element type: %T", tElem)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go new file mode 100644 index 000000000..a510e60ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/set.go @@ -0,0 +1,276 @@ +package schema + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode" +) + +// HashString hashes strings. If you want a Set of strings, this is the +// SchemaSetFunc you want. +func HashString(v interface{}) int { + return hashcode.String(v.(string)) +} + +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + +// HashResource hashes complex structures that are described using +// a *Resource. This is the default set implementation used when a set's +// element type is a full resource. +func HashResource(resource *Resource) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeResourceForHash(&buf, v, resource) + return hashcode.String(buf.String()) + } +} + +// HashSchema hashes values that are described using a *Schema. This is the +// default set implementation used when a set's element type is a single +// schema. +func HashSchema(schema *Schema) SchemaSetFunc { + return func(v interface{}) int { + var buf bytes.Buffer + SerializeValueForHash(&buf, v, schema) + return hashcode.String(buf.String()) + } +} + +// Set is a set data structure that is returned for elements of type +// TypeSet. +type Set struct { + F SchemaSetFunc + + m map[string]interface{} + once sync.Once +} + +// NewSet is a convenience method for creating a new set with the given +// items. +func NewSet(f SchemaSetFunc, items []interface{}) *Set { + s := &Set{F: f} + for _, i := range items { + s.Add(i) + } + + return s +} + +// CopySet returns a copy of another set. +func CopySet(otherSet *Set) *Set { + return NewSet(otherSet.F, otherSet.List()) +} + +// Add adds an item to the set if it isn't already in the set. +func (s *Set) Add(item interface{}) { + s.add(item, false) +} + +// Remove removes an item if it's already in the set. Idempotent. +func (s *Set) Remove(item interface{}) { + s.remove(item) +} + +// Contains checks if the set has the given item. +func (s *Set) Contains(item interface{}) bool { + _, ok := s.m[s.hash(item)] + return ok +} + +// Len returns the amount of items in the set. +func (s *Set) Len() int { + return len(s.m) +} + +// List returns the elements of this set in slice format. +// +// The order of the returned elements is deterministic. Given the same +// set, the order of this will always be the same. +func (s *Set) List() []interface{} { + result := make([]interface{}, len(s.m)) + for i, k := range s.listCode() { + result[i] = s.m[k] + } + + return result +} + +// Difference performs a set difference of the two sets, returning +// a new third set that has only the elements unique to this set. +func (s *Set) Difference(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; !ok { + result.m[k] = v + } + } + + return result +} + +// Intersection performs the set intersection of the two sets +// and returns a new third set. +func (s *Set) Intersection(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + if _, ok := other.m[k]; ok { + result.m[k] = v + } + } + + return result +} + +// Union performs the set union of the two sets and returns a new third +// set. +func (s *Set) Union(other *Set) *Set { + result := &Set{F: s.F} + result.once.Do(result.init) + + for k, v := range s.m { + result.m[k] = v + } + for k, v := range other.m { + result.m[k] = v + } + + return result +} + +func checkSetMapEqual(m1, m2 map[string]interface{}) bool { + if (m1 == nil) != (m2 == nil) { + return false + } + if len(m1) != len(m2) { + return false + } + for k := range m1 { + v1 := m1[k] + v2, ok := m2[k] + if !ok { + return false + } + switch v1.(type) { + case map[string]interface{}: + same := checkSetMapEqual(v1.(map[string]interface{}), v2.(map[string]interface{})) + if !same { + return false + } + case *Set: + same := v1.(*Set).Equal(v2) + if !same { + return false + } + default: + same := reflect.DeepEqual(v1, v2) + if !same { + return false + } + } + } + return true +} + +func (s *Set) Equal(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + return checkSetMapEqual(s.m, other.m) +} + +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + +func (s *Set) GoString() string { + return fmt.Sprintf("*Set(%#v)", s.m) +} + +func (s *Set) init() { + s.m = make(map[string]interface{}) +} + +func (s *Set) add(item interface{}, computed bool) string { + s.once.Do(s.init) + + code := s.hash(item) + if computed { + code = "~" + code + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } + + if _, ok := s.m[code]; !ok { + s.m[code] = item + } + + return code +} + +func (s *Set) hash(item interface{}) string { + code := s.F(item) + // Always return a nonnegative hashcode. + if code < 0 { + code = -code + } + return strconv.Itoa(code) +} + +func (s *Set) remove(item interface{}) string { + s.once.Do(s.init) + + code := s.hash(item) + delete(s.m, code) + + return code +} + +func (s *Set) listCode() []string { + // Sort the hash codes so the order of the list is deterministic + keys := make([]string, 0, len(s.m)) + for k := range s.m { + keys = append(keys, k) + } + sort.Sort(sort.StringSlice(keys)) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go new file mode 100644 index 000000000..40a5abc0d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/shims.go @@ -0,0 +1,116 @@ +package schema + +import ( + "context" + "encoding/json" + + "github.com/hashicorp/go-cty/cty" + ctyjson "github.com/hashicorp/go-cty/cty/json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a terraform.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(ctx context.Context, prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(ctx, prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(ctx context.Context, prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := terraform.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(ctx, instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == hcl2shim.UnknownVariableValue { + delete(cfg, k) + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a terraform.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go new file mode 100644 index 000000000..f345f8326 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/testing.go @@ -0,0 +1,29 @@ +package schema + +import ( + "context" + + testing "github.com/mitchellh/go-testing-interface" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +// TestResourceDataRaw creates a ResourceData from a raw configuration map. +func TestResourceDataRaw(t testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + + c := terraform.NewResourceConfigRaw(raw) + + sm := schemaMap(schema) + diff, err := sm.Diff(context.Background(), nil, c, nil, nil, true) + if err != nil { + t.Fatalf("err: %s", err) + } + + result, err := sm.Data(nil, diff) + if err != nil { + t.Fatalf("err: %s", err) + } + + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go new file mode 100644 index 000000000..0f65d692f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype.go @@ -0,0 +1,21 @@ +package schema + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ValueType valuetype.go + +// ValueType is an enum of the type that can be represented by a schema. +type ValueType int + +const ( + TypeInvalid ValueType = iota + TypeBool + TypeInt + TypeFloat + TypeString + TypeList + TypeMap + TypeSet + typeObject +) + +// NOTE: ValueType has more functions defined on it in schema.go. We can't +// put them here because we reference other files. diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go new file mode 100644 index 000000000..914ca32cb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/valuetype_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT. + +package schema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypeBool-1] + _ = x[TypeInt-2] + _ = x[TypeFloat-3] + _ = x[TypeString-4] + _ = x[TypeList-5] + _ = x[TypeMap-6] + _ = x[TypeSet-7] + _ = x[typeObject-8] +} + +const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" + +var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} + +func (i ValueType) String() string { + if i < 0 || i >= ValueType(len(_ValueType_index)-1) { + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go new file mode 100644 index 000000000..46093314f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/doc.go @@ -0,0 +1,17 @@ +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go new file mode 100644 index 000000000..064aeda28 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/instance_key.go @@ -0,0 +1,47 @@ +package addrs + +import ( + "fmt" +) + +// instanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// intKey and stringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type instanceKey interface { + instanceKeySigil() + String() string +} + +// NoKey represents the absense of an instanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey instanceKey + +// intKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type intKey int + +func (k intKey) instanceKeySigil() { +} + +func (k intKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// stringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type stringKey string + +func (k stringKey) instanceKeySigil() { +} + +func (k stringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go new file mode 100644 index 000000000..98699f46e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module.go @@ -0,0 +1,13 @@ +package addrs + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go new file mode 100644 index 000000000..f31d833d2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/module_instance.go @@ -0,0 +1,241 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl/v2" + "github.com/hashicorp/hcl/v2/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +func parseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "A module instance address must begin with \"module.\".", + )) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid module instance address", + "The module instance address is followed by additional invalid content.", + )) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + for _, err := range parseDiags.Errs() { + // ignore warnings, they don't matter in this case + diags = append(diags, tfdiags.FromError(err)) + } + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := parseModuleInstance(traversal) + diags = append(diags, addrDiags...) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Module address prefix must be followed by dot and then a name.", + )) + break + } + + if next != "module" { + break + } + + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Prefix \"module.\" must be followed by a module name.", + )) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = stringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = intKey(idxInt) + } else { + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + fmt.Sprintf("Invalid module index: %s.", err), + )) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = append(diags, tfdiags.Diag( + tfdiags.Error, + "Invalid address operator", + "Invalid module key: must be either a string or an integer.", + )) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey instanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key instanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go new file mode 100644 index 000000000..f56032b5d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resource.go @@ -0,0 +1,130 @@ +package addrs + +import ( + "fmt" +) + +// resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type resource struct { + Mode resourceMode + Type string + Name string +} + +func (r resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +// resourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type resourceInstance struct { + Resource resource + Key instanceKey +} + +func (r resourceInstance) ContainingResource() resource { + return r.Resource +} + +func (r resourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +// absResource is an absolute address for a resource under a given module path. +type absResource struct { + Module ModuleInstance + Resource resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode resourceMode, typeName string, name string) absResource { + return absResource{ + Module: m, + Resource: resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +func (r absResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// absResourceInstance is an absolute address for a resource instance under a +// given module path. +type absResourceInstance struct { + Module ModuleInstance + Resource resourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode resourceMode, typeName string, name string, key instanceKey) absResourceInstance { + return absResourceInstance{ + Module: m, + Resource: resourceInstance{ + Resource: resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an absResource value. +func (r absResourceInstance) ContainingResource() absResource { + return absResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +func (r absResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +// resourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type resourceMode rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type resourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode resourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode resourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode resourceMode = 'D' +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go new file mode 100644 index 000000000..3b2e65c3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type resourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _resourceMode_name_0 = "InvalidResourceMode" + _resourceMode_name_1 = "DataResourceMode" + _resourceMode_name_2 = "ManagedResourceMode" +) + +func (i resourceMode) String() string { + switch { + case i == 0: + return _resourceMode_name_0 + case i == 68: + return _resourceMode_name_1 + case i == 77: + return _resourceMode_name_2 + default: + return "resourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go new file mode 100644 index 000000000..48278abed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/coerce_value.go @@ -0,0 +1,250 @@ +package configschema + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + switch { + case in.IsNull(): + return cty.NullVal(b.ImpliedType()), nil + case !in.IsKnown(): + return cty.UnknownVal(b.ImpliedType()), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + for name, attrS := range b.Attributes { + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrS.Type) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + } + + val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + + attrs[name] = val + } + for typeName, blockS := range b.BlockTypes { + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + default: + attrs[typeName] = blockS.EmptyValue() + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + default: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + default: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} + +func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + val, err := convert.Convert(in, a.Type) + if err != nil { + return cty.UnknownVal(a.Type), path.NewError(err) + } + return val, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go new file mode 100644 index 000000000..caf8d730c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go new file mode 100644 index 000000000..51b8c5d24 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/empty_value.go @@ -0,0 +1,59 @@ +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the recieving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the recieving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.Type) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go new file mode 100644 index 000000000..edc9dadcc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/implied_type.go @@ -0,0 +1,68 @@ +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + atys := make(map[string]cty.Type) + + for name, attrS := range b.Attributes { + atys[name] = attrS.Type + } + + for name, blockS := range b.BlockTypes { + if _, exists := atys[name]; exists { + panic("invalid schema, blocks and attributes cannot have the same name") + } + + childType := blockS.Block.ImpliedType() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + atys[name] = childType + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.List(childType) + } + case NestingSet: + if childType.HasDynamicTypes() { + panic("can't use cty.DynamicPseudoType inside a block type with NestingSet") + } + atys[name] = cty.Set(childType) + case NestingMap: + // We prefer to use a map where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use an object + // instead, which means our type _constraint_ must be + // cty.DynamicPseudoType to allow the tuple type to be decided + // separately for each value. + if childType.HasDynamicTypes() { + atys[name] = cty.DynamicPseudoType + } else { + atys[name] = cty.Map(childType) + } + default: + panic("invalid nesting type") + } + } + + return cty.Object(atys) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go new file mode 100644 index 000000000..febe743e1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go new file mode 100644 index 000000000..6cddb9c63 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema/schema.go @@ -0,0 +1,155 @@ +package configschema + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// StringKind represents the format a string is in. +type StringKind int + +const ( + // StringPlain indicates a string is plain-text and requires no processing for display. + StringPlain StringKind = iota + // StringMarkdown indicates a string is in markdown format and may + // require additional processing to display. + StringMarkdown +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock + + // Description and DescriptionKind contain a user facing description of the block + // and the format of that string. + Description string + DescriptionKind StringKind + + // Deprecated indicates whether the block has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + DescriptionKind StringKind + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool + + // Deprecated indicates whether the attribute has been marked as deprecated in the + // provider and usage should be discouraged. + Deprecated bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate go run golang.org/x/tools/cmd/stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go new file mode 100644 index 000000000..e620e76a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/flatmap.go @@ -0,0 +1,423 @@ +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/go-cty/cty/convert" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + len := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + len++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(len) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go new file mode 100644 index 000000000..e557845a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/paths.go @@ -0,0 +1,276 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go new file mode 100644 index 000000000..68f48da8f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl/v2" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go new file mode 100644 index 000000000..91e91547a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values.go @@ -0,0 +1,230 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go new file mode 100644 index 000000000..87638b4e4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim/values_equiv.go @@ -0,0 +1,214 @@ +package hcl2shim + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by Terraform Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that Terraform Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus Terraform Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go new file mode 100644 index 000000000..6ccc52318 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/hashcode/hashcode.go @@ -0,0 +1,35 @@ +package hashcode + +import ( + "bytes" + "fmt" + "hash/crc32" +) + +// String hashes a string to a unique hashcode. +// +// crc32 returns a uint32, but for our use we need +// and non negative integer. Here we cast to an integer +// and invert it if the result is negative. +func String(s string) int { + v := int(crc32.ChecksumIEEE([]byte(s))) + if v >= 0 { + return v + } + if -v >= 0 { + return -v + } + // v == MinInt + return 0 +} + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/context/context.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/context/context.go new file mode 100644 index 000000000..0939edeff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/context/context.go @@ -0,0 +1,7 @@ +package context + +type Key string + +var ( + StopContextKey = Key("StopContext") +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/doc.go new file mode 100644 index 000000000..82b5937bf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/doc.go @@ -0,0 +1,6 @@ +// Package plugin contains types and functions to help Terraform plugins +// implement the plugin rpc interface. +// The primary Provider type will be responsible for converting from the grpc +// wire protocol to the types and methods known to the provider +// implementations. +package plugin diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go new file mode 100644 index 000000000..6eb20a061 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/grpc_provider.go @@ -0,0 +1,1464 @@ +package plugin + +import ( + "context" + "encoding/json" + "fmt" + "log" + "strconv" + "sync" + + "github.com/hashicorp/go-cty/cty" + ctyconvert "github.com/hashicorp/go-cty/cty/convert" + "github.com/hashicorp/go-cty/cty/msgpack" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" + c "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/context" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const newExtraKey = "_new_extra_shim" + +func NewGRPCProviderServer(p *schema.Provider) *GRPCProviderServer { + return &GRPCProviderServer{ + provider: p, + stopCh: make(chan struct{}), + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *schema.Provider + stopCh chan struct{} + stopMu sync.Mutex +} + +// mergeStop is called in a goroutine and waits for the global stop signal +// and propagates cancellation to the passed in ctx/cancel func. The ctx is +// also passed to this function and waited upon so no goroutine leak is caused. +func mergeStop(ctx context.Context, cancel context.CancelFunc, stopCh chan struct{}) { + select { + case <-ctx.Done(): + return + case <-stopCh: + cancel() + } +} + +// StopContext derives a new context from the passed in grpc context. +// It creates a goroutine to wait for the server stop and propagates +// cancellation to the derived grpc context. +func (s *GRPCProviderServer) StopContext(ctx context.Context) context.Context { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + stoppable, cancel := context.WithCancel(ctx) + go mergeStop(stoppable, cancel, s.stopCh) + return stoppable +} + +func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { + + resp := &proto.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*proto.Schema), + DataSourceSchemas: make(map[string]*proto.Schema), + } + + resp.Provider = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + } + + resp.ProviderMeta = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderMetaSchemaBlock()), + } + + for typ, res := range s.provider.ResourcesMap { + resp.ResourceSchemas[typ] = &proto.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + resp.DataSourceSchemas[typ] = &proto.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { + return schema.InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getProviderMetaSchemaBlock() *configschema.Block { + return schema.InternalMap(s.provider.ProviderMetaSchema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { + resp := &proto.PrepareProviderConfig_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return resp, nil + } + + configVal, err = schemaBlock.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.Validate(config)) + + preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { + resp := &proto.ValidateResourceTypeConfig_Response{} + + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateResource(req.TypeName, config)) + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { + resp := &proto.ValidateDataSourceConfig_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, s.provider.ValidateDataSource(req.TypeName, config)) + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(ctx context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { + resp := &proto.UpgradeResourceState_Response{} + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + version := int(req.Version) + + jsonMap := map[string]interface{}{} + var err error + + switch { + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + case len(req.RawState.Flatmap) > 0: + jsonMap, version, err = s.upgradeFlatmapState(ctx, version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // if there's a JSON state, we need to decode it. + case len(req.RawState.Json) > 0: + err = json.Unmarshal(req.RawState.Json, &jsonMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + default: + log.Println("[DEBUG] no state provided to upgrade") + return resp, nil + } + + // complete the upgrade of the JSON states + jsonMap, err = s.upgradeJSONState(ctx, version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // The provider isn't required to clean out removed fields + s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to make sure blocks are represented correctly, which means + // that missing blocks are empty collections, rather than null. + // First we need to CoerceValue to ensure that all object types match. + val, err = schemaBlock.CoerceValue(val) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // Normalize the value and fill in any missing blocks. + val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(ctx context.Context, version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := res.CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate && res.MigrateState == nil { + // Providers were previously allowed to bump the version + // without declaring MigrateState. + // If there are further upgraders, then we've only updated that far. + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else if requiresMigrate { + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(ctx context.Context, version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(ctx, m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +// Remove any attributes no longer present in the schema, so that the json can +// be correctly decoded. +func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { + // we're only concerned with finding maps that corespond to object + // attributes + switch v := v.(type) { + case []interface{}: + // If these aren't blocks the next call will be a noop + if ty.IsListType() || ty.IsSetType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + } + return + case map[string]interface{}: + // map blocks aren't yet supported, but handle this just in case + if ty.IsMapType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + return + } + + if ty == cty.DynamicPseudoType { + log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + return + } + + if !ty.IsObjectType() { + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + log.Printf("[WARN] unexpected type %#v for map in json state", ty) + return + } + + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + delete(v, attr) + continue + } + + s.removeAttributes(attrV, attrTy) + } + } +} + +func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { + s.stopMu.Lock() + defer s.stopMu.Unlock() + + // stop + close(s.stopCh) + // reset the stop signal + s.stopCh = make(chan struct{}) + + return &proto.Stop_Response{}, nil +} + +func (s *GRPCProviderServer) Configure(ctx context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { + resp := &proto.Configure_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + // TODO: remove global stop context hack + // This attaches a global stop synchro'd context onto the provider.Configure + // request scoped context. This provides a substitute for the removed provider.StopContext() + // function. Ideally a provider should migrate to the context aware API that receives + // request scoped contexts, however this is a large undertaking for very large providers. + ctxHack := context.WithValue(ctx, c.StopContextKey, s.StopContext(context.Background())) + diags := s.provider.Configure(ctxHack, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(ctx context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { + resp := &proto.ReadResource_Response{ + // helper/schema did previously handle private data during refresh, but + // core is now going to expect this to be maintained in order to + // persist it in the state. + Private: req.Private, + } + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.Private) > 0 { + if err := json.Unmarshal(req.Private, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + instanceState.Meta = private + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + instanceState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.RefreshWithoutUpgrade(ctx, instanceState, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + if diags.HasError() { + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, false) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(ctx context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { + resp := &proto.PlanResourceChange_Response{} + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) + + diff, err := res.SimpleDiff(ctx, priorState, cfg, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &proto.DynamicValue{ + Msgpack: plannedMP, + } + + // encode any timeouts into the diff Meta + t := &schema.ResourceTimeout{} + if err := t.ConfigDecode(res, cfg); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if err := t.DiffEncode(diff); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(ctx context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { + resp := &proto.ApplyResourceChange_Response{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res, ok := s.provider.ResourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown resource type: %s", req.TypeName)) + return resp, nil + } + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + } + } else { + diff, err = schema.DiffFromValues(ctx, priorStateVal, plannedStateVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + pmSchemaBlock := s.getProviderMetaSchemaBlock() + if pmSchemaBlock != nil && req.ProviderMeta != nil { + providerSchemaVal, err := msgpack.Unmarshal(req.ProviderMeta.Msgpack, pmSchemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorState.ProviderMeta = providerSchemaVal + } + + newInstanceState, diags := res.Apply(ctx, priorState, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(ctx context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { + resp := &proto.ImportResourceState_Response{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(ctx, info, req.Id) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + schemaBlock := s.getResourceSchemaBlock(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Normalize the value and fill in any missing blocks. + newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + importedResource := &proto.ImportResourceState_ImportedResource{ + TypeName: resourceType, + State: &proto.DynamicValue{ + Msgpack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { + resp := &proto.ReadDataSource_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // we need to still build the diff separately with the Read method to match + // the old behavior + res, ok := s.provider.DataSourcesMap[req.TypeName] + if !ok { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("unknown data source: %s", req.TypeName)) + return resp, nil + } + diff, err := res.Diff(ctx, nil, config, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we can get the new complete data source + newInstanceState, diags := res.ReadDataApply(ctx, diff, s.provider.Meta()) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, diags) + if diags.HasError() { + return resp, nil + } + + newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.State = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil +} + +func pathToAttributePath(path cty.Path) *proto.AttributePath { + var steps []*proto.AttributePath_Step + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: s.Name, + }, + }) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: i, + }, + }) + case cty.String: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: s.Key.AsString(), + }, + }) + } + } + } + + return &proto.AttributePath{Steps: steps} +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[schema.TimeoutsConfigKey] + if ok { + toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[schema.TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *schema.Resource) *schema.Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(schema.Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*schema.Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *schema.Schema) *schema.Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(schema.Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *schema.Schema: + newSchema.Elem = stripSchema(e) + case *schema.Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { + ty := dst.Type() + if !src.IsNull() && !src.IsKnown() { + // Return src during plan to retain unknown interpolated placeholders, + // which could be lost if we're only updating a resource. If this is a + // read scenario, then there shouldn't be any unknowns at all. + if dst.IsNull() && !apply { + return src + } + return dst + } + + // Handle null/empty changes for collections during apply. + // A change between null and empty values prefers src to make sure the state + // is consistent between plan and apply. + if ty.IsCollectionType() && apply { + dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 + srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 + + if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { + return src + } + } + + // check the invariants that we need below, to ensure we are working with + // non-null and known values. + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal, ok := dstMap[key] + if !ok && apply && ty.IsMapType() { + // don't transfer old map values to dst during apply + continue + } + + if dstVal == cty.NilVal { + if !apply && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + + dstMap[key] = normalizeNullValues(dstVal, v, apply) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && apply { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && apply { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a read or plan. + if !apply && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty == cty.String: + // The legacy SDK should not be able to remove a value during plan or + // apply, however we are only going to overwrite this if the source was + // an empty string, since that is what is often equated with unset and + // lost in the diff process. + if dst.IsNull() && src.AsString() == "" { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { + var diags []*proto.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + // if this is a set, the kv is also going to be null which + // isn't a valid path element, so we can't append it to the + // diagnostic. + p := path + if !kv.IsNull() { + p = append(p, cty.IndexStep{Key: kv}) + } + + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(p), + }) + continue + } + + d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ev, append(path, step)) + diags = convert.AppendProtoDiag(diags, d) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go new file mode 100644 index 000000000..fcf156b2d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin/unknown.go @@ -0,0 +1,132 @@ +package plugin + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go new file mode 100644 index 000000000..0189190ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plans/objchange/normalize_obj.go @@ -0,0 +1,133 @@ +package objchange + +import ( + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// NormalizeObjectFromLegacySDK takes an object that may have been generated +// by the legacy Terraform SDK (i.e. returned from a provider with the +// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// assumptions we would normally enforce if the provider had not opted out. +// +// In particular, this function guarantees that a value representing a nested +// block will never itself be unknown or null, instead representing that as +// a non-null value that may contain null/unknown values. +// +// The input value must still conform to the implied type of the given schema, +// or else this function may produce garbage results or panic. This is usually +// okay because type consistency is enforced when deserializing the value +// returned from the provider over the RPC wire protocol anyway. +func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + if val == cty.NilVal || val.IsNull() { + // This should never happen in reasonable use, but we'll allow it + // and normalize to a null of the expected type rather than panicking + // below. + return cty.NullVal(schema.ImpliedType()) + } + + vals := make(map[string]cty.Value) + for name := range schema.Attributes { + // No normalization for attributes, since them being type-conformant + // is all that we require. + vals[name] = val.GetAttr(name) + } + for name, blockS := range schema.BlockTypes { + lv := val.GetAttr(name) + + // Legacy SDK never generates dynamically-typed attributes and so our + // normalization code doesn't deal with them, but we need to make sure + // we still pass them through properly so that we don't interfere with + // objects generated by other SDKs. + if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { + vals[name] = lv + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if lv.IsKnown() { + if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { + vals[name] = blockS.EmptyValue() + } else { + vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block) + } + } else { + vals[name] = unknownBlockStub(&blockS.Block) + } + case configschema.NestingList: + switch { + case !lv.IsKnown(): + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.ListVal(subVals) + } + case configschema.NestingSet: + switch { + case !lv.IsKnown(): + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.SetVal(subVals) + } + default: + // The legacy SDK doesn't support NestingMap, so we just assume + // maps are always okay. (If not, we would've detected and returned + // an error to the user before we got here.) + vals[name] = lv + } + } + return cty.ObjectVal(vals) +} + +// unknownBlockStub constructs an object value that approximates an unknown +// block by producing a known block object with all of its leaf attribute +// values set to unknown. +// +// Blocks themselves cannot be unknown, so if the legacy SDK tries to return +// such a thing, we'll use this result instead. This convention mimics how +// the dynamic block feature deals with being asked to iterate over an unknown +// value, because our value-checking functions already accept this convention +// as a special case. +func unknownBlockStub(schema *configschema.Block) cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range schema.Attributes { + vals[name] = cty.UnknownVal(attrS.Type) + } + for name, blockS := range schema.BlockTypes { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + vals[name] = unknownBlockStub(&blockS.Block) + case configschema.NestingList: + // In principle we may be expected to produce a tuple value here, + // if there are any dynamically-typed attributes in our nested block, + // but the legacy SDK doesn't support that, so we just assume it'll + // never be necessary to normalize those. (Incorrect usage in any + // other SDK would be caught and returned as an error before we + // get here.) + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingSet: + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingMap: + // A nesting map can never be unknown since we then wouldn't know + // what the keys are. (Legacy SDK doesn't support NestingMap anyway, + // so this should never arise.) + vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) + } + } + return cty.ObjectVal(vals) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go new file mode 100644 index 000000000..d7bfb0676 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/diagnostics.go @@ -0,0 +1,142 @@ +package convert + +import ( + "fmt" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case diag.Diagnostics: + diags = append(diags, DiagsToProto(d)...) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiags converts a list of proto.Diagnostics to a diag.Diagnostics. +func ProtoToDiags(ds []*proto.Diagnostic) diag.Diagnostics { + var diags diag.Diagnostics + for _, d := range ds { + var severity diag.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = diag.Error + case proto.Diagnostic_WARNING: + severity = diag.Warning + } + + diags = append(diags, diag.Diagnostic{ + Severity: severity, + Summary: d.Summary, + Detail: d.Detail, + AttributePath: AttributePathToPath(d.Attribute), + }) + } + + return diags +} + +func DiagsToProto(diags diag.Diagnostics) []*proto.Diagnostic { + var ds []*proto.Diagnostic + for _, d := range diags { + if err := d.Validate(); err != nil { + panic(fmt.Errorf("Invalid diagnostic: %s. This is always a bug in the provider implementation", err)) + } + protoDiag := &proto.Diagnostic{ + Summary: d.Summary, + Detail: d.Detail, + Attribute: PathToAttributePath(d.AttributePath), + } + if d.Severity == diag.Error { + protoDiag.Severity = proto.Diagnostic_ERROR + } else if d.Severity == diag.Warning { + protoDiag.Severity = proto.Diagnostic_WARNING + } + ds = append(ds, protoDiag) + } + return ds +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + if ap == nil { + return p + } + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// PathToAttributePath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go new file mode 100644 index 000000000..4921ebab2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/plugin/convert/schema.go @@ -0,0 +1,183 @@ +package convert + +import ( + "encoding/json" + "log" + "reflect" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{ + Description: b.Description, + DescriptionKind: protoStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + DescriptionKind: protoStringKind(a.DescriptionKind), + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoStringKind(k configschema.StringKind) proto.StringKind { + switch k { + default: + log.Printf("[TRACE] unexpected configschema.StringKind: %d", k) + return proto.StringKind_PLAIN + case configschema.StringPlain: + return proto.StringKind_PLAIN + case configschema.StringMarkdown: + return proto.StringKind_MARKDOWN + } +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + + Description: b.Description, + DescriptionKind: schemaStringKind(b.DescriptionKind), + Deprecated: b.Deprecated, + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + DescriptionKind: schemaStringKind(a.DescriptionKind), + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + Deprecated: a.Deprecated, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaStringKind(k proto.StringKind) configschema.StringKind { + switch k { + default: + log.Printf("[TRACE] unexpected proto.StringKind: %d", k) + return configschema.StringPlain + case proto.StringKind_PLAIN: + return configschema.StringPlain + case proto.StringKind_MARKDOWN: + return configschema.StringMarkdown + } +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go new file mode 100644 index 000000000..2201d0c0b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/config_traversals.go @@ -0,0 +1,56 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/hashicorp/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go new file mode 100644 index 000000000..b063bc1de --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/contextual.go @@ -0,0 +1,81 @@ +package tfdiags + +import ( + "github.com/hashicorp/go-cty/cty" +) + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go new file mode 100644 index 000000000..a36da370f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic.go @@ -0,0 +1,20 @@ +package tfdiags + +type Diagnostic interface { + Severity() Severity + Description() Description +} + +type Severity rune + +//go:generate go run golang.org/x/tools/cmd/stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go new file mode 100644 index 000000000..348162087 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostic_base.go @@ -0,0 +1,31 @@ +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func Diag(sev Severity, summary, detail string) Diagnostic { + return &diagnosticBase{ + severity: sev, + summary: summary, + detail: detail, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go new file mode 100644 index 000000000..925c14fa7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/diagnostics.go @@ -0,0 +1,211 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "sort" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + + switch { + case iSev != jSev: + return iSev == Warning + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go new file mode 100644 index 000000000..c427879eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go new file mode 100644 index 000000000..b63c5d741 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/error.go @@ -0,0 +1,24 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func FromError(err error) Diagnostic { + return &nativeError{ + err: err, + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go new file mode 100644 index 000000000..1b78887eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/rpc_friendly.go @@ -0,0 +1,41 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go new file mode 100644 index 000000000..78a721068 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/severity_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Error-69] + _ = x[Warning-87] +} + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go new file mode 100644 index 000000000..091917045 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags/simple_warning.go @@ -0,0 +1,20 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/generate.sh new file mode 100644 index 000000000..de1d693ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/generate.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# We do not run protoc under go:generate because we want to ensure that all +# dependencies of go:generate are "go get"-able for general dev environment +# usability. To compile all protobuf files in this repository, run +# "make protobuf" at the top-level. + +set -eu + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +cd "$DIR" + +protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 000000000..84179725d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,3634 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tfplugin5.proto + +package tfplugin5 + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type StringKind int32 + +const ( + StringKind_PLAIN StringKind = 0 + StringKind_MARKDOWN StringKind = 1 +) + +var StringKind_name = map[int32]string{ + 0: "PLAIN", + 1: "MARKDOWN", +} + +var StringKind_value = map[string]int32{ + "PLAIN": 0, + "MARKDOWN": 1, +} + +func (x StringKind) String() string { + return proto.EnumName(StringKind_name, int32(x)) +} + +func (StringKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{0} +} + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +var Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", +} + +var Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, +} + +func (x Diagnostic_Severity) String() string { + return proto.EnumName(Diagnostic_Severity_name, int32(x)) +} + +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +var Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", +} + +var Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) +} + +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicValue) Reset() { *m = DynamicValue{} } +func (m *DynamicValue) String() string { return proto.CompactTextString(m) } +func (*DynamicValue) ProtoMessage() {} +func (*DynamicValue) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{0} +} + +func (m *DynamicValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicValue.Unmarshal(m, b) +} +func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) +} +func (m *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(m, src) +} +func (m *DynamicValue) XXX_Size() int { + return xxx_messageInfo_DynamicValue.Size(m) +} +func (m *DynamicValue) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicValue proto.InternalMessageInfo + +func (m *DynamicValue) GetMsgpack() []byte { + if m != nil { + return m.Msgpack + } + return nil +} + +func (m *DynamicValue) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +type Diagnostic struct { + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Diagnostic) Reset() { *m = Diagnostic{} } +func (m *Diagnostic) String() string { return proto.CompactTextString(m) } +func (*Diagnostic) ProtoMessage() {} +func (*Diagnostic) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1} +} + +func (m *Diagnostic) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Diagnostic.Unmarshal(m, b) +} +func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) +} +func (m *Diagnostic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Diagnostic.Merge(m, src) +} +func (m *Diagnostic) XXX_Size() int { + return xxx_messageInfo_Diagnostic.Size(m) +} +func (m *Diagnostic) XXX_DiscardUnknown() { + xxx_messageInfo_Diagnostic.DiscardUnknown(m) +} + +var xxx_messageInfo_Diagnostic proto.InternalMessageInfo + +func (m *Diagnostic) GetSeverity() Diagnostic_Severity { + if m != nil { + return m.Severity + } + return Diagnostic_INVALID +} + +func (m *Diagnostic) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Diagnostic) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Diagnostic) GetAttribute() *AttributePath { + if m != nil { + return m.Attribute + } + return nil +} + +type AttributePath struct { + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath) Reset() { *m = AttributePath{} } +func (m *AttributePath) String() string { return proto.CompactTextString(m) } +func (*AttributePath) ProtoMessage() {} +func (*AttributePath) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2} +} + +func (m *AttributePath) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath.Unmarshal(m, b) +} +func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) +} +func (m *AttributePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath.Merge(m, src) +} +func (m *AttributePath) XXX_Size() int { + return xxx_messageInfo_AttributePath.Size(m) +} +func (m *AttributePath) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath proto.InternalMessageInfo + +func (m *AttributePath) GetSteps() []*AttributePath_Step { + if m != nil { + return m.Steps + } + return nil +} + +type AttributePath_Step struct { + // Types that are valid to be assigned to Selector: + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } +func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } +func (*AttributePath_Step) ProtoMessage() {} +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2, 0} +} + +func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) +} +func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) +} +func (m *AttributePath_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath_Step.Merge(m, src) +} +func (m *AttributePath_Step) XXX_Size() int { + return xxx_messageInfo_AttributePath_Step.Size(m) +} +func (m *AttributePath_Step) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *AttributePath_Step) GetAttributeName() string { + if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyString() string { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributePath_Step) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } +} + +type Stop struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop) Reset() { *m = Stop{} } +func (m *Stop) String() string { return proto.CompactTextString(m) } +func (*Stop) ProtoMessage() {} +func (*Stop) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3} +} + +func (m *Stop) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop.Unmarshal(m, b) +} +func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop.Marshal(b, m, deterministic) +} +func (m *Stop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop.Merge(m, src) +} +func (m *Stop) XXX_Size() int { + return xxx_messageInfo_Stop.Size(m) +} +func (m *Stop) XXX_DiscardUnknown() { + xxx_messageInfo_Stop.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop proto.InternalMessageInfo + +type Stop_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Request) Reset() { *m = Stop_Request{} } +func (m *Stop_Request) String() string { return proto.CompactTextString(m) } +func (*Stop_Request) ProtoMessage() {} +func (*Stop_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 0} +} + +func (m *Stop_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Request.Unmarshal(m, b) +} +func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) +} +func (m *Stop_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Request.Merge(m, src) +} +func (m *Stop_Request) XXX_Size() int { + return xxx_messageInfo_Stop_Request.Size(m) +} +func (m *Stop_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Request proto.InternalMessageInfo + +type Stop_Response struct { + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Response) Reset() { *m = Stop_Response{} } +func (m *Stop_Response) String() string { return proto.CompactTextString(m) } +func (*Stop_Response) ProtoMessage() {} +func (*Stop_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 1} +} + +func (m *Stop_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Response.Unmarshal(m, b) +} +func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) +} +func (m *Stop_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Response.Merge(m, src) +} +func (m *Stop_Response) XXX_Size() int { + return xxx_messageInfo_Stop_Response.Size(m) +} +func (m *Stop_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Response proto.InternalMessageInfo + +func (m *Stop_Response) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawState) Reset() { *m = RawState{} } +func (m *RawState) String() string { return proto.CompactTextString(m) } +func (*RawState) ProtoMessage() {} +func (*RawState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{4} +} + +func (m *RawState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RawState.Unmarshal(m, b) +} +func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RawState.Marshal(b, m, deterministic) +} +func (m *RawState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawState.Merge(m, src) +} +func (m *RawState) XXX_Size() int { + return xxx_messageInfo_RawState.Size(m) +} +func (m *RawState) XXX_DiscardUnknown() { + xxx_messageInfo_RawState.DiscardUnknown(m) +} + +var xxx_messageInfo_RawState proto.InternalMessageInfo + +func (m *RawState) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +func (m *RawState) GetFlatmap() map[string]string { + if m != nil { + return m.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +type Schema_Block struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Block) Reset() { *m = Schema_Block{} } +func (m *Schema_Block) String() string { return proto.CompactTextString(m) } +func (*Schema_Block) ProtoMessage() {} +func (*Schema_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 0} +} + +func (m *Schema_Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Block.Unmarshal(m, b) +} +func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) +} +func (m *Schema_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Block.Merge(m, src) +} +func (m *Schema_Block) XXX_Size() int { + return xxx_messageInfo_Schema_Block.Size(m) +} +func (m *Schema_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Block proto.InternalMessageInfo + +func (m *Schema_Block) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema_Block) GetAttributes() []*Schema_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if m != nil { + return m.BlockTypes + } + return nil +} + +func (m *Schema_Block) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema_Block) GetDescriptionKind() StringKind { + if m != nil { + return m.DescriptionKind + } + return StringKind_PLAIN +} + +func (m *Schema_Block) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +type Schema_Attribute struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } +func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } +func (*Schema_Attribute) ProtoMessage() {} +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 1} +} + +func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) +} +func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) +} +func (m *Schema_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Attribute.Merge(m, src) +} +func (m *Schema_Attribute) XXX_Size() int { + return xxx_messageInfo_Schema_Attribute.Size(m) +} +func (m *Schema_Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo + +func (m *Schema_Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schema_Attribute) GetType() []byte { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema_Attribute) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema_Attribute) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Schema_Attribute) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +func (m *Schema_Attribute) GetComputed() bool { + if m != nil { + return m.Computed + } + return false +} + +func (m *Schema_Attribute) GetSensitive() bool { + if m != nil { + return m.Sensitive + } + return false +} + +func (m *Schema_Attribute) GetDescriptionKind() StringKind { + if m != nil { + return m.DescriptionKind + } + return StringKind_PLAIN +} + +func (m *Schema_Attribute) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +type Schema_NestedBlock struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } +func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } +func (*Schema_NestedBlock) ProtoMessage() {} +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2} +} + +func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) +} +func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) +} +func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_NestedBlock.Merge(m, src) +} +func (m *Schema_NestedBlock) XXX_Size() int { + return xxx_messageInfo_Schema_NestedBlock.Size(m) +} +func (m *Schema_NestedBlock) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo + +func (m *Schema_NestedBlock) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *Schema_NestedBlock) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if m != nil { + return m.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (m *Schema_NestedBlock) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema_NestedBlock) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +type GetProviderSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } +func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema) ProtoMessage() {} +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6} +} + +func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) +} +func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema.Merge(m, src) +} +func (m *GetProviderSchema) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema.Size(m) +} +func (m *GetProviderSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo + +type GetProviderSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } +func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Request) ProtoMessage() {} +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 0} +} + +func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) +} +func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) +} +func (m *GetProviderSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Request.Size(m) +} +func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo + +type GetProviderSchema_Response struct { + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } +func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Response) ProtoMessage() {} +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 1} +} + +func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) +} +func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) +} +func (m *GetProviderSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Response.Size(m) +} +func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo + +func (m *GetProviderSchema_Response) GetProvider() *Schema { + if m != nil { + return m.Provider + } + return nil +} + +func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if m != nil { + return m.ResourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if m != nil { + return m.DataSourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *GetProviderSchema_Response) GetProviderMeta() *Schema { + if m != nil { + return m.ProviderMeta + } + return nil +} + +type PrepareProviderConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } +func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig) ProtoMessage() {} +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7} +} + +func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) +} +func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig.Merge(m, src) +} +func (m *PrepareProviderConfig) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig.Size(m) +} +func (m *PrepareProviderConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo + +type PrepareProviderConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } +func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Request) ProtoMessage() {} +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 0} +} + +func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) +} +func (m *PrepareProviderConfig_Request) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) +} +func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } +func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Response) ProtoMessage() {} +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 1} +} + +func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) +} +func (m *PrepareProviderConfig_Response) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) +} +func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if m != nil { + return m.PreparedConfig + } + return nil +} + +func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type UpgradeResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } +func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState) ProtoMessage() {} +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8} +} + +func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) +} +func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState.Merge(m, src) +} +func (m *UpgradeResourceState) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState.Size(m) +} +func (m *UpgradeResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo + +type UpgradeResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } +func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Request) ProtoMessage() {} +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 0} +} + +func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) +} +func (m *UpgradeResourceState_Request) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Request.Size(m) +} +func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo + +func (m *UpgradeResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *UpgradeResourceState_Request) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *UpgradeResourceState_Request) GetRawState() *RawState { + if m != nil { + return m.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } +func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Response) ProtoMessage() {} +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 1} +} + +func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) +} +func (m *UpgradeResourceState_Response) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Response.Size(m) +} +func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo + +func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if m != nil { + return m.UpgradedState + } + return nil +} + +func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } +func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig) ProtoMessage() {} +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9} +} + +func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) +} +func (m *ValidateResourceTypeConfig) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) +} +func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo + +type ValidateResourceTypeConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } +func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 0} +} + +func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) +} +func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } +func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 1} +} + +func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) +} +func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } +func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig) ProtoMessage() {} +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10} +} + +func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) +} +func (m *ValidateDataSourceConfig) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig.Size(m) +} +func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo + +type ValidateDataSourceConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } +func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 0} +} + +func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) +} +func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } +func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 1} +} + +func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) +} +func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type Configure struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure) Reset() { *m = Configure{} } +func (m *Configure) String() string { return proto.CompactTextString(m) } +func (*Configure) ProtoMessage() {} +func (*Configure) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11} +} + +func (m *Configure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure.Unmarshal(m, b) +} +func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure.Marshal(b, m, deterministic) +} +func (m *Configure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure.Merge(m, src) +} +func (m *Configure) XXX_Size() int { + return xxx_messageInfo_Configure.Size(m) +} +func (m *Configure) XXX_DiscardUnknown() { + xxx_messageInfo_Configure.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure proto.InternalMessageInfo + +type Configure_Request struct { + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Request) Reset() { *m = Configure_Request{} } +func (m *Configure_Request) String() string { return proto.CompactTextString(m) } +func (*Configure_Request) ProtoMessage() {} +func (*Configure_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 0} +} + +func (m *Configure_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Request.Unmarshal(m, b) +} +func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) +} +func (m *Configure_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Request.Merge(m, src) +} +func (m *Configure_Request) XXX_Size() int { + return xxx_messageInfo_Configure_Request.Size(m) +} +func (m *Configure_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Request proto.InternalMessageInfo + +func (m *Configure_Request) GetTerraformVersion() string { + if m != nil { + return m.TerraformVersion + } + return "" +} + +func (m *Configure_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type Configure_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Response) Reset() { *m = Configure_Response{} } +func (m *Configure_Response) String() string { return proto.CompactTextString(m) } +func (*Configure_Response) ProtoMessage() {} +func (*Configure_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 1} +} + +func (m *Configure_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Response.Unmarshal(m, b) +} +func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) +} +func (m *Configure_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Response.Merge(m, src) +} +func (m *Configure_Response) XXX_Size() int { + return xxx_messageInfo_Configure_Response.Size(m) +} +func (m *Configure_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Response proto.InternalMessageInfo + +func (m *Configure_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource) Reset() { *m = ReadResource{} } +func (m *ReadResource) String() string { return proto.CompactTextString(m) } +func (*ReadResource) ProtoMessage() {} +func (*ReadResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12} +} + +func (m *ReadResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource.Unmarshal(m, b) +} +func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) +} +func (m *ReadResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource.Merge(m, src) +} +func (m *ReadResource) XXX_Size() int { + return xxx_messageInfo_ReadResource.Size(m) +} +func (m *ReadResource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource proto.InternalMessageInfo + +type ReadResource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } +func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Request) ProtoMessage() {} +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 0} +} + +func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) +} +func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) +} +func (m *ReadResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Request.Merge(m, src) +} +func (m *ReadResource_Request) XXX_Size() int { + return xxx_messageInfo_ReadResource_Request.Size(m) +} +func (m *ReadResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo + +func (m *ReadResource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadResource_Request) GetCurrentState() *DynamicValue { + if m != nil { + return m.CurrentState + } + return nil +} + +func (m *ReadResource_Request) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ReadResource_Request) GetProviderMeta() *DynamicValue { + if m != nil { + return m.ProviderMeta + } + return nil +} + +type ReadResource_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } +func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Response) ProtoMessage() {} +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 1} +} + +func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) +} +func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) +} +func (m *ReadResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Response.Merge(m, src) +} +func (m *ReadResource_Response) XXX_Size() int { + return xxx_messageInfo_ReadResource_Response.Size(m) +} +func (m *ReadResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo + +func (m *ReadResource_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ReadResource_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type PlanResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } +func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange) ProtoMessage() {} +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13} +} + +func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) +} +func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange.Merge(m, src) +} +func (m *PlanResourceChange) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange.Size(m) +} +func (m *PlanResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo + +type PlanResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } +func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Request) ProtoMessage() {} +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 0} +} + +func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) +} +func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) +} +func (m *PlanResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Request.Size(m) +} +func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo + +func (m *PlanResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if m != nil { + return m.ProposedNewState + } + return nil +} + +func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { + if m != nil { + return m.PriorPrivate + } + return nil +} + +func (m *PlanResourceChange_Request) GetProviderMeta() *DynamicValue { + if m != nil { + return m.ProviderMeta + } + return nil +} + +type PlanResourceChange_Response struct { + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } +func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Response) ProtoMessage() {} +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 1} +} + +func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) +} +func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) +} +func (m *PlanResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Response.Size(m) +} +func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo + +func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if m != nil { + return m.RequiresReplace + } + return nil +} + +func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } +func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange) ProtoMessage() {} +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14} +} + +func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) +} +func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange.Merge(m, src) +} +func (m *ApplyResourceChange) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange.Size(m) +} +func (m *ApplyResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo + +type ApplyResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } +func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Request) ProtoMessage() {} +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 0} +} + +func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) +} +func (m *ApplyResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Request.Size(m) +} +func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo + +func (m *ApplyResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +func (m *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue { + if m != nil { + return m.ProviderMeta + } + return nil +} + +type ApplyResourceChange_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } +func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Response) ProtoMessage() {} +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 1} +} + +func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) +} +func (m *ApplyResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Response.Size(m) +} +func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo + +func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ApplyResourceChange_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ImportResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } +func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState) ProtoMessage() {} +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15} +} + +func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) +} +func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) +} +func (m *ImportResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState.Merge(m, src) +} +func (m *ImportResourceState) XXX_Size() int { + return xxx_messageInfo_ImportResourceState.Size(m) +} +func (m *ImportResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo + +type ImportResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } +func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Request) ProtoMessage() {} +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 0} +} + +func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) +} +func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Request.Merge(m, src) +} +func (m *ImportResourceState_Request) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Request.Size(m) +} +func (m *ImportResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo + +func (m *ImportResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_Request) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } +func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_ImportedResource) ProtoMessage() {} +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 1} +} + +func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) +} +func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) +} +func (m *ImportResourceState_ImportedResource) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) +} +func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo + +func (m *ImportResourceState_ImportedResource) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ImportResourceState_Response struct { + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } +func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Response) ProtoMessage() {} +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 2} +} + +func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) +} +func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Response.Merge(m, src) +} +func (m *ImportResourceState_Response) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Response.Size(m) +} +func (m *ImportResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo + +func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if m != nil { + return m.ImportedResources + } + return nil +} + +func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadDataSource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } +func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource) ProtoMessage() {} +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16} +} + +func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) +} +func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) +} +func (m *ReadDataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource.Merge(m, src) +} +func (m *ReadDataSource) XXX_Size() int { + return xxx_messageInfo_ReadDataSource.Size(m) +} +func (m *ReadDataSource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo + +type ReadDataSource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } +func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Request) ProtoMessage() {} +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 0} +} + +func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) +} +func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Request.Merge(m, src) +} +func (m *ReadDataSource_Request) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Request.Size(m) +} +func (m *ReadDataSource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo + +func (m *ReadDataSource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadDataSource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ReadDataSource_Request) GetProviderMeta() *DynamicValue { + if m != nil { + return m.ProviderMeta + } + return nil +} + +type ReadDataSource_Response struct { + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } +func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Response) ProtoMessage() {} +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 1} +} + +func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) +} +func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Response.Merge(m, src) +} +func (m *ReadDataSource_Response) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Response.Size(m) +} +func (m *ReadDataSource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo + +func (m *ReadDataSource_Response) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type GetProvisionerSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } +func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema) ProtoMessage() {} +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17} +} + +func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) +} +func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema.Merge(m, src) +} +func (m *GetProvisionerSchema) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema.Size(m) +} +func (m *GetProvisionerSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo + +type GetProvisionerSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } +func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Request) ProtoMessage() {} +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 0} +} + +func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) +} +func (m *GetProvisionerSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) +} +func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo + +type GetProvisionerSchema_Response struct { + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } +func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Response) ProtoMessage() {} +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 1} +} + +func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) +} +func (m *GetProvisionerSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) +} +func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo + +func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if m != nil { + return m.Provisioner + } + return nil +} + +func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } +func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig) ProtoMessage() {} +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18} +} + +func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) +} +func (m *ValidateProvisionerConfig) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig.Size(m) +} +func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo + +type ValidateProvisionerConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } +func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 0} +} + +func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) +} +func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } +func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 1} +} + +func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) +} +func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ProvisionResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } +func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource) ProtoMessage() {} +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19} +} + +func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) +} +func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) +} +func (m *ProvisionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource.Merge(m, src) +} +func (m *ProvisionResource) XXX_Size() int { + return xxx_messageInfo_ProvisionResource.Size(m) +} +func (m *ProvisionResource) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo + +type ProvisionResource_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } +func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Request) ProtoMessage() {} +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 0} +} + +func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) +} +func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Request.Merge(m, src) +} +func (m *ProvisionResource_Request) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Request.Size(m) +} +func (m *ProvisionResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo + +func (m *ProvisionResource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ProvisionResource_Request) GetConnection() *DynamicValue { + if m != nil { + return m.Connection + } + return nil +} + +type ProvisionResource_Response struct { + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } +func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Response) ProtoMessage() {} +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 1} +} + +func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) +} +func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Response.Merge(m, src) +} +func (m *ProvisionResource_Response) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Response.Size(m) +} +func (m *ProvisionResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo + +func (m *ProvisionResource_Response) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func init() { + proto.RegisterEnum("tfplugin5.StringKind", StringKind_name, StringKind_value) + proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) + proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) + proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") + proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") + proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") + proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") + proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") + proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") + proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") + proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") + proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") + proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") + proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") + proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") + proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") + proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") + proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") + proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") + proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") + proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") + proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") + proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") + proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") + proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") + proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") + proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") + proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") + proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") + proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") + proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") + proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") + proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") + proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") + proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") + proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") + proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") + proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") + proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") + proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") + proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") + proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") + proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") + proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") + proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") + proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") + proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") + proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") + proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") + proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") + proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") + proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") + proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") + proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") + proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") + proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") + proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") + proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") + proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") +} + +func init() { + proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) +} + +var fileDescriptor_17ae6090ff270234 = []byte{ + // 2010 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49, + 0x15, 0x9f, 0x6e, 0xdb, 0x89, 0xfd, 0xec, 0x49, 0x3a, 0x35, 0x1f, 0x98, 0xde, 0x0f, 0x82, 0x61, + 0x49, 0x96, 0xdd, 0xf1, 0xac, 0x32, 0x30, 0xbb, 0x84, 0xd1, 0x6a, 0xb3, 0x49, 0xc8, 0x44, 0x33, + 0xf1, 0x84, 0xf2, 0xcc, 0x04, 0x09, 0x69, 0xad, 0x1a, 0x77, 0xc5, 0xd3, 0xc4, 0xee, 0xee, 0xad, + 0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0x46, 0xa0, 0x85, 0x03, 0x70, 0x81, 0x03, 0xe2, 0xc4, 0x0d, + 0xf1, 0x75, 0xe1, 0xce, 0x81, 0x3b, 0xdc, 0x56, 0x1c, 0xb9, 0xf0, 0x17, 0xa0, 0xaa, 0xae, 0xee, + 0x2e, 0xdb, 0xed, 0xa4, 0x93, 0xec, 0x0a, 0xed, 0xad, 0xeb, 0xbd, 0x5f, 0xbd, 0xf7, 0xea, 0xbd, + 0x5f, 0xbd, 0xaa, 0xb2, 0x61, 0x91, 0x1f, 0x06, 0xfd, 0x61, 0xcf, 0xf5, 0xbe, 0xde, 0x0c, 0x98, + 0xcf, 0x7d, 0x54, 0x49, 0x04, 0x8d, 0x7b, 0x50, 0xdb, 0x1a, 0x79, 0x64, 0xe0, 0x76, 0x9f, 0x92, + 0xfe, 0x90, 0xa2, 0x3a, 0xcc, 0x0f, 0xc2, 0x5e, 0x40, 0xba, 0x47, 0x75, 0x63, 0xd9, 0x58, 0xad, + 0xe1, 0x78, 0x88, 0x10, 0x14, 0xbf, 0x17, 0xfa, 0x5e, 0xdd, 0x94, 0x62, 0xf9, 0xdd, 0xf8, 0xd8, + 0x00, 0xd8, 0x72, 0x49, 0xcf, 0xf3, 0x43, 0xee, 0x76, 0xd1, 0x3a, 0x94, 0x43, 0x7a, 0x4c, 0x99, + 0xcb, 0x47, 0x72, 0xf6, 0xc2, 0xda, 0xab, 0xcd, 0xd4, 0x77, 0x0a, 0x6c, 0xb6, 0x15, 0x0a, 0x27, + 0x78, 0xe1, 0x38, 0x1c, 0x0e, 0x06, 0x84, 0x8d, 0xa4, 0x87, 0x0a, 0x8e, 0x87, 0xe8, 0x26, 0xcc, + 0x39, 0x94, 0x13, 0xb7, 0x5f, 0x2f, 0x48, 0x85, 0x1a, 0xa1, 0xbb, 0x50, 0x21, 0x9c, 0x33, 0xf7, + 0xd9, 0x90, 0xd3, 0x7a, 0x71, 0xd9, 0x58, 0xad, 0xae, 0xd5, 0x35, 0x77, 0x1b, 0xb1, 0x6e, 0x9f, + 0xf0, 0xe7, 0x38, 0x85, 0x36, 0x6e, 0x43, 0x39, 0xf6, 0x8f, 0xaa, 0x30, 0xbf, 0xdb, 0x7a, 0xba, + 0xf1, 0x70, 0x77, 0xcb, 0xba, 0x82, 0x2a, 0x50, 0xda, 0xc6, 0xf8, 0x11, 0xb6, 0x0c, 0x21, 0x3f, + 0xd8, 0xc0, 0xad, 0xdd, 0xd6, 0x8e, 0x65, 0x36, 0xfe, 0x65, 0xc0, 0xd5, 0x31, 0x6b, 0xe8, 0x0e, + 0x94, 0x42, 0x4e, 0x83, 0xb0, 0x6e, 0x2c, 0x17, 0x56, 0xab, 0x6b, 0xaf, 0xcc, 0x72, 0xdb, 0x6c, + 0x73, 0x1a, 0xe0, 0x08, 0x6b, 0x7f, 0x64, 0x40, 0x51, 0x8c, 0xd1, 0x0a, 0x2c, 0x24, 0xd1, 0x74, + 0x3c, 0x32, 0xa0, 0x32, 0x59, 0x95, 0xfb, 0x57, 0xf0, 0xd5, 0x44, 0xde, 0x22, 0x03, 0x8a, 0x9a, + 0x80, 0x68, 0x9f, 0x0e, 0xa8, 0xc7, 0x3b, 0x47, 0x74, 0xd4, 0x09, 0x39, 0x73, 0xbd, 0x5e, 0x94, + 0x9e, 0xfb, 0x57, 0xb0, 0xa5, 0x74, 0x0f, 0xe8, 0xa8, 0x2d, 0x35, 0x68, 0x15, 0x16, 0x75, 0xbc, + 0xeb, 0x71, 0x99, 0xb2, 0x82, 0xb0, 0x9c, 0x82, 0x77, 0x3d, 0xfe, 0x3e, 0x88, 0x4a, 0xf5, 0x69, + 0x97, 0xfb, 0xac, 0x71, 0x47, 0x84, 0xe5, 0x07, 0x76, 0x05, 0xe6, 0x31, 0xfd, 0x70, 0x48, 0x43, + 0x6e, 0x2f, 0x43, 0x19, 0xd3, 0x30, 0xf0, 0xbd, 0x90, 0xa2, 0xeb, 0x50, 0xda, 0x66, 0xcc, 0x67, + 0x51, 0x90, 0x38, 0x1a, 0x34, 0x7e, 0x66, 0x40, 0x19, 0x93, 0x17, 0x6d, 0x4e, 0x38, 0x4d, 0xa8, + 0x61, 0xa4, 0xd4, 0x40, 0xeb, 0x30, 0x7f, 0xd8, 0x27, 0x7c, 0x40, 0x82, 0xba, 0x29, 0x93, 0xb4, + 0xac, 0x25, 0x29, 0x9e, 0xd9, 0xfc, 0x56, 0x04, 0xd9, 0xf6, 0x38, 0x1b, 0xe1, 0x78, 0x82, 0xbd, + 0x0e, 0x35, 0x5d, 0x81, 0x2c, 0x28, 0x1c, 0xd1, 0x91, 0x0a, 0x40, 0x7c, 0x8a, 0xa0, 0x8e, 0x05, + 0x5f, 0x15, 0x57, 0xa2, 0xc1, 0xba, 0xf9, 0x8e, 0xd1, 0xf8, 0xfb, 0x3c, 0xcc, 0xb5, 0xbb, 0xcf, + 0xe9, 0x80, 0x08, 0x4a, 0x1d, 0x53, 0x16, 0xba, 0x2a, 0xb2, 0x02, 0x8e, 0x87, 0xe8, 0x16, 0x94, + 0x9e, 0xf5, 0xfd, 0xee, 0x91, 0x9c, 0x5e, 0x5d, 0xfb, 0x9c, 0x16, 0x5a, 0x34, 0xb7, 0xf9, 0xbe, + 0x50, 0xe3, 0x08, 0x65, 0xff, 0xda, 0x84, 0x92, 0x14, 0x9c, 0x62, 0xf2, 0x9b, 0x00, 0x49, 0xf1, + 0x42, 0xb5, 0xe4, 0x97, 0xa6, 0xed, 0x26, 0xf4, 0xc0, 0x1a, 0x1c, 0xbd, 0x0b, 0x55, 0xe9, 0xa9, + 0xc3, 0x47, 0x01, 0x0d, 0xeb, 0x85, 0x29, 0x56, 0xa9, 0xd9, 0x2d, 0x1a, 0x72, 0xea, 0x44, 0xb1, + 0x81, 0x9c, 0xf1, 0x58, 0x4c, 0x40, 0xcb, 0x50, 0x75, 0x68, 0xd8, 0x65, 0x6e, 0xc0, 0x45, 0x68, + 0x45, 0x99, 0x14, 0x5d, 0x84, 0xde, 0x03, 0x4b, 0x1b, 0x76, 0x8e, 0x5c, 0xcf, 0xa9, 0x97, 0xe4, + 0x16, 0xbd, 0xa1, 0xbb, 0x91, 0x3c, 0x7a, 0xe0, 0x7a, 0x0e, 0x5e, 0xd4, 0xe0, 0x42, 0x80, 0x5e, + 0x05, 0x70, 0x68, 0xc0, 0x68, 0x97, 0x70, 0xea, 0xd4, 0xe7, 0x96, 0x8d, 0xd5, 0x32, 0xd6, 0x24, + 0xf6, 0xef, 0x4c, 0xa8, 0x24, 0xab, 0x13, 0x94, 0x48, 0x99, 0x8d, 0xe5, 0xb7, 0x90, 0x89, 0xf5, + 0xc5, 0x1d, 0x44, 0x7c, 0x4f, 0x46, 0x5e, 0x98, 0x8e, 0xdc, 0x86, 0x32, 0xa3, 0x1f, 0x0e, 0x5d, + 0x46, 0x1d, 0xb9, 0xb0, 0x32, 0x4e, 0xc6, 0x42, 0xe7, 0x4b, 0x14, 0xe9, 0xcb, 0xd5, 0x94, 0x71, + 0x32, 0x16, 0xba, 0xae, 0x3f, 0x08, 0x86, 0x69, 0xb4, 0xc9, 0x18, 0xbd, 0x0c, 0x95, 0x90, 0x7a, + 0xa1, 0xcb, 0xdd, 0x63, 0x5a, 0x9f, 0x97, 0xca, 0x54, 0x90, 0x99, 0xab, 0xf2, 0x25, 0x72, 0x55, + 0x99, 0xca, 0xd5, 0x6f, 0x4d, 0xa8, 0x6a, 0xb5, 0x44, 0x2f, 0x41, 0x45, 0x64, 0x43, 0x6b, 0x06, + 0xb8, 0x2c, 0x04, 0xb2, 0x0b, 0x9c, 0x8f, 0xac, 0x68, 0x13, 0xe6, 0x3d, 0x1a, 0x72, 0xd1, 0x29, + 0x0a, 0x32, 0xe8, 0xd7, 0x4f, 0xe5, 0x91, 0xfc, 0x76, 0xbd, 0xde, 0x9e, 0xef, 0x50, 0x1c, 0xcf, + 0x14, 0x01, 0x0d, 0x5c, 0xaf, 0xe3, 0x72, 0x3a, 0x08, 0x65, 0xd6, 0x0b, 0xb8, 0x3c, 0x70, 0xbd, + 0x5d, 0x31, 0x96, 0x4a, 0x72, 0xa2, 0x94, 0x25, 0xa5, 0x24, 0x27, 0x52, 0xd9, 0xd8, 0x8b, 0x56, + 0xa6, 0x2c, 0x8e, 0x37, 0x58, 0x80, 0xb9, 0xf6, 0x6e, 0x6b, 0xe7, 0xe1, 0xb6, 0x65, 0xa0, 0x32, + 0x14, 0x1f, 0xee, 0xb6, 0x1f, 0x5b, 0x26, 0x9a, 0x87, 0x42, 0x7b, 0xfb, 0xb1, 0x55, 0x10, 0x1f, + 0x7b, 0x1b, 0xfb, 0x56, 0x51, 0x34, 0xe2, 0x1d, 0xfc, 0xe8, 0xc9, 0xbe, 0x55, 0x6a, 0xfc, 0xa3, + 0x08, 0x4b, 0x3b, 0x94, 0xef, 0x33, 0xff, 0xd8, 0x75, 0x28, 0x8b, 0xe2, 0xd7, 0x5b, 0xd5, 0xef, + 0x8b, 0x5a, 0xaf, 0xba, 0x05, 0xe5, 0x40, 0x21, 0x65, 0x1a, 0xab, 0x6b, 0x4b, 0x53, 0x8b, 0xc7, + 0x09, 0x04, 0x51, 0xb0, 0x18, 0x0d, 0xfd, 0x21, 0xeb, 0xd2, 0x4e, 0x28, 0x95, 0xf1, 0xce, 0x5d, + 0xd7, 0xa6, 0x4d, 0xb9, 0x6f, 0xc6, 0xfe, 0xc4, 0x87, 0x9c, 0x1d, 0xc9, 0xc3, 0xa8, 0x8d, 0x2d, + 0xb2, 0x71, 0x29, 0xea, 0xc3, 0x35, 0x87, 0x70, 0xd2, 0x99, 0xf0, 0x14, 0xed, 0xf2, 0x7b, 0xf9, + 0x3c, 0x6d, 0x11, 0x4e, 0xda, 0xd3, 0xbe, 0x96, 0x9c, 0x49, 0x39, 0x7a, 0x1b, 0xaa, 0x4e, 0x72, + 0xd2, 0x8a, 0xe2, 0x09, 0x2f, 0x37, 0x32, 0xcf, 0x61, 0xac, 0x23, 0xd1, 0x5d, 0xb8, 0x1a, 0x67, + 0xa6, 0x33, 0xa0, 0x9c, 0xc8, 0xd2, 0x66, 0x66, 0xb0, 0x16, 0xe3, 0xf6, 0x28, 0x27, 0xf6, 0x13, + 0xb8, 0x9e, 0x95, 0x87, 0x8c, 0xae, 0xbd, 0xa2, 0x77, 0xed, 0x4c, 0xcb, 0x69, 0x23, 0xb7, 0x0f, + 0xe0, 0x66, 0xf6, 0xa2, 0x2f, 0x69, 0xb8, 0xf1, 0x4f, 0x03, 0x6e, 0xec, 0x33, 0x1a, 0x10, 0x46, + 0xe3, 0x6c, 0x6f, 0xfa, 0xde, 0xa1, 0xdb, 0xb3, 0xd7, 0x13, 0x5a, 0xa1, 0xdb, 0x30, 0xd7, 0x95, + 0x42, 0xc5, 0x23, 0x7d, 0xd7, 0xe9, 0x17, 0x26, 0xac, 0x60, 0xf6, 0x0f, 0x0d, 0x8d, 0x87, 0xef, + 0xc1, 0x62, 0x10, 0x79, 0x70, 0x3a, 0xf9, 0xcc, 0x2c, 0xc4, 0xf8, 0x28, 0x94, 0xc9, 0x2a, 0x9a, + 0x79, 0xab, 0xd8, 0xf8, 0xb1, 0x09, 0xd7, 0x9f, 0x04, 0x3d, 0x46, 0x1c, 0x9a, 0x54, 0x45, 0x1c, + 0xb5, 0x36, 0x4b, 0x17, 0x77, 0x6a, 0xbb, 0xd1, 0x8e, 0x38, 0x73, 0xfc, 0x88, 0x7b, 0x0b, 0x2a, + 0x8c, 0xbc, 0xe8, 0x84, 0xc2, 0x9c, 0xec, 0x2d, 0xd5, 0xb5, 0x6b, 0x19, 0x87, 0x3a, 0x2e, 0x33, + 0xf5, 0x65, 0xff, 0x40, 0x4f, 0xca, 0xbb, 0xb0, 0x30, 0x8c, 0x02, 0x73, 0x94, 0x8d, 0x33, 0x72, + 0x72, 0x35, 0x86, 0x47, 0xb7, 0x8c, 0x0b, 0xa7, 0xe4, 0xcf, 0x06, 0xd8, 0x4f, 0x49, 0xdf, 0x75, + 0x44, 0x70, 0x2a, 0x27, 0xe2, 0xdc, 0x54, 0x55, 0x3f, 0xc8, 0x99, 0x98, 0x94, 0x12, 0x66, 0x3e, + 0x4a, 0x6c, 0x6a, 0x8b, 0x9f, 0x08, 0xde, 0xc8, 0x1d, 0xfc, 0x1f, 0x0d, 0xa8, 0xc7, 0xc1, 0xa7, + 0xfb, 0xe1, 0x33, 0x11, 0xfa, 0x9f, 0x0c, 0xa8, 0x44, 0x81, 0x0e, 0x19, 0xb5, 0x7b, 0x69, 0xac, + 0x6f, 0xc0, 0x12, 0xa7, 0x8c, 0x91, 0x43, 0x9f, 0x0d, 0x3a, 0xfa, 0x7d, 0xaa, 0x82, 0xad, 0x44, + 0xf1, 0x54, 0xb1, 0xee, 0xff, 0x13, 0xfb, 0xc7, 0x26, 0xd4, 0x30, 0x25, 0x4e, 0xcc, 0x17, 0xfb, + 0xaf, 0x46, 0xce, 0x5c, 0xdf, 0x83, 0xab, 0xdd, 0x21, 0x63, 0xe2, 0x12, 0x1e, 0xb1, 0xfc, 0x8c, + 0xb0, 0x6b, 0x0a, 0x1d, 0x91, 0xbc, 0x0e, 0xf3, 0x01, 0x73, 0x8f, 0xe3, 0x1d, 0x56, 0xc3, 0xf1, + 0x50, 0xd8, 0x1d, 0x6f, 0xcf, 0xc5, 0x33, 0xec, 0x8e, 0x35, 0xe9, 0x9f, 0xea, 0x3b, 0xf1, 0x6b, + 0x50, 0xf1, 0xe8, 0x8b, 0x7c, 0x9b, 0xb0, 0xec, 0xd1, 0x17, 0x97, 0xdb, 0x7f, 0xb3, 0xd7, 0xd4, + 0xf8, 0x6f, 0x11, 0xd0, 0x7e, 0x9f, 0x78, 0x71, 0x96, 0x37, 0x9f, 0x13, 0xaf, 0x47, 0xed, 0xbf, + 0x98, 0x39, 0x73, 0xfd, 0x0e, 0x54, 0x03, 0xe6, 0xfa, 0x2c, 0x5f, 0xa6, 0x41, 0x62, 0xa3, 0xc5, + 0x6c, 0x03, 0x0a, 0x98, 0x1f, 0xf8, 0x21, 0x75, 0x3a, 0x69, 0x2e, 0x0a, 0xa7, 0x1b, 0xb0, 0xe2, + 0x29, 0xad, 0x38, 0x27, 0x29, 0x39, 0x8b, 0xb9, 0xc8, 0x89, 0xbe, 0x24, 0xaa, 0x28, 0x22, 0x8e, + 0x33, 0x52, 0x92, 0x19, 0xa9, 0x49, 0xe1, 0xfe, 0xac, 0x52, 0xcf, 0x9d, 0xa7, 0xd4, 0xbf, 0x32, + 0xb5, 0x52, 0x0b, 0x53, 0x7d, 0xe2, 0x79, 0x79, 0x7b, 0x6e, 0x4d, 0xa1, 0xa3, 0xe5, 0x6d, 0x8a, + 0x0b, 0x92, 0xbc, 0x6b, 0x87, 0x1d, 0x46, 0x83, 0x3e, 0xe9, 0x52, 0x55, 0xf7, 0xd9, 0x2f, 0xed, + 0xc5, 0x78, 0x06, 0x8e, 0x26, 0xa0, 0x15, 0x58, 0x8c, 0x43, 0x18, 0xa7, 0xc1, 0x82, 0x12, 0xc7, + 0xcb, 0xbe, 0xf0, 0xcd, 0xe5, 0x4d, 0x40, 0x7d, 0xda, 0x23, 0xdd, 0x91, 0x7c, 0x3f, 0x75, 0xc2, + 0x51, 0xc8, 0xe9, 0x40, 0x3d, 0x08, 0xac, 0x48, 0x23, 0xfa, 0x7d, 0x5b, 0xca, 0x1b, 0x3f, 0x29, + 0xc2, 0xb5, 0x8d, 0x20, 0xe8, 0x8f, 0x26, 0x58, 0xf7, 0x87, 0x4f, 0x9f, 0x75, 0x53, 0xd5, 0x28, + 0x9c, 0xa7, 0x1a, 0xe7, 0x26, 0x5b, 0x46, 0xe6, 0x4b, 0x99, 0x99, 0xbf, 0x1c, 0xe1, 0xfe, 0x76, + 0xf9, 0xde, 0xa2, 0xb5, 0x08, 0x73, 0xbc, 0xed, 0x4d, 0x90, 0xa2, 0x70, 0x49, 0x52, 0x14, 0x67, + 0x90, 0xe2, 0x3f, 0x26, 0x5c, 0xdb, 0x1d, 0x04, 0x3e, 0xe3, 0xe3, 0xb7, 0xa6, 0xbb, 0x39, 0x39, + 0xb1, 0x00, 0xa6, 0xeb, 0xa8, 0x5f, 0x23, 0x4c, 0xd7, 0xb1, 0x4f, 0xc0, 0x8a, 0xcc, 0xd1, 0xe4, + 0x08, 0x39, 0xf3, 0x95, 0x97, 0x8b, 0x4e, 0x11, 0x6a, 0x76, 0x4f, 0xb5, 0x7f, 0xa3, 0x57, 0xe3, + 0x03, 0x40, 0xae, 0x0a, 0xa3, 0x13, 0x3f, 0x4b, 0xe2, 0x63, 0xf0, 0xb6, 0xe6, 0x22, 0x63, 0xe9, + 0xcd, 0xc9, 0xf8, 0xf1, 0x92, 0x3b, 0x21, 0x09, 0x2f, 0x7e, 0x27, 0xfb, 0xa5, 0x09, 0x0b, 0xe2, + 0x7c, 0x4d, 0xaf, 0x34, 0xf6, 0x47, 0xc6, 0xa7, 0x74, 0x9b, 0x99, 0xa6, 0x77, 0xe1, 0x3c, 0xf4, + 0x66, 0x63, 0x0f, 0xcc, 0x52, 0x2e, 0x66, 0xab, 0x2a, 0x5d, 0x38, 0x3d, 0xbf, 0x30, 0xe0, 0x7a, + 0xfc, 0x1a, 0x14, 0xb7, 0xa0, 0xac, 0x97, 0xef, 0x89, 0x16, 0xd7, 0x1d, 0xd1, 0x92, 0x12, 0xec, + 0xec, 0xb7, 0xaf, 0x8e, 0xba, 0x44, 0xf1, 0x0c, 0xf8, 0x7c, 0x7c, 0x27, 0xd5, 0x42, 0xfc, 0x04, + 0x5e, 0x51, 0x9f, 0xc8, 0xdd, 0xed, 0xdf, 0x06, 0x2c, 0x25, 0x61, 0x25, 0x17, 0xb8, 0xf0, 0xe2, + 0x61, 0xa1, 0xb7, 0x01, 0xba, 0xbe, 0xe7, 0xd1, 0x2e, 0x8f, 0x9f, 0x45, 0xa7, 0x35, 0xfc, 0x14, + 0x6a, 0x7f, 0x57, 0x5b, 0xcf, 0x4d, 0x98, 0xf3, 0x87, 0x3c, 0x18, 0x72, 0x45, 0x68, 0x35, 0xba, + 0x70, 0x19, 0xbe, 0xfa, 0x1a, 0x40, 0xfa, 0x23, 0x14, 0xaa, 0x40, 0x69, 0xff, 0xe1, 0xc6, 0x6e, + 0xcb, 0xba, 0x82, 0x6a, 0x50, 0xde, 0xdb, 0xc0, 0x0f, 0xb6, 0x1e, 0x1d, 0xb4, 0x2c, 0x63, 0xed, + 0xe7, 0x15, 0x28, 0xc7, 0x0f, 0x5d, 0xf4, 0x1d, 0xa8, 0xec, 0x50, 0xae, 0x7e, 0x20, 0xfd, 0xf2, + 0x19, 0xbf, 0x3d, 0x44, 0x3c, 0x7b, 0x2d, 0xd7, 0x2f, 0x14, 0xa8, 0x3f, 0xe3, 0x55, 0x8d, 0x56, + 0xb5, 0xf9, 0x99, 0x88, 0xc4, 0xd3, 0xeb, 0x39, 0x90, 0xca, 0xdb, 0xf7, 0x4f, 0x7b, 0xd2, 0xa1, + 0x5b, 0x9a, 0xa1, 0xd9, 0xb0, 0xc4, 0x6f, 0x33, 0x2f, 0x5c, 0x39, 0x1f, 0xce, 0x7e, 0x92, 0xa1, + 0x37, 0x32, 0x6c, 0x4d, 0x82, 0x12, 0xc7, 0x6f, 0xe6, 0x03, 0x2b, 0xb7, 0x6e, 0xf6, 0xcb, 0x1e, + 0xad, 0x68, 0x56, 0xb2, 0x00, 0x89, 0xbb, 0xd5, 0xb3, 0x81, 0xca, 0xd5, 0x7d, 0xed, 0xe5, 0x86, + 0x5e, 0xd6, 0xa6, 0x25, 0xd2, 0xc4, 0xe8, 0x2b, 0x33, 0xb4, 0xca, 0xd2, 0xb7, 0xc7, 0xdf, 0x51, + 0xe8, 0x0b, 0xfa, 0x2f, 0x06, 0x9a, 0x22, 0xb1, 0xb7, 0x3c, 0x1b, 0xa0, 0x4c, 0x76, 0xb3, 0x1e, + 0x0d, 0x48, 0xa7, 0xe9, 0xb4, 0x3a, 0x31, 0xff, 0x95, 0xb3, 0x60, 0xca, 0xc9, 0x61, 0xe6, 0x25, + 0x11, 0xe9, 0xd3, 0x33, 0xf4, 0x89, 0x9b, 0x95, 0x33, 0x71, 0xa9, 0x9f, 0x8c, 0xc3, 0x77, 0xcc, + 0x4f, 0xd6, 0xe1, 0x9c, 0xe5, 0x27, 0x1b, 0xa7, 0xfc, 0x1c, 0x4c, 0x9e, 0xb7, 0xe8, 0x8b, 0x13, + 0x89, 0x4e, 0x55, 0x89, 0xf5, 0xc6, 0x69, 0x10, 0x65, 0xf8, 0x1b, 0xd1, 0xdf, 0x47, 0x68, 0xec, + 0x77, 0x69, 0xee, 0x07, 0x89, 0x91, 0xfa, 0xb4, 0x22, 0x9a, 0xba, 0xf6, 0xa3, 0x02, 0x54, 0xb5, + 0xf3, 0x03, 0x7d, 0xa0, 0x37, 0xa7, 0x95, 0x8c, 0xb6, 0xa3, 0x1f, 0x85, 0x99, 0xac, 0x9e, 0x01, + 0x54, 0xa1, 0x9e, 0x9c, 0x72, 0x6c, 0xa1, 0xac, 0xbd, 0x38, 0x85, 0x4a, 0x9c, 0xde, 0xca, 0x89, + 0x56, 0x9e, 0x9f, 0x65, 0x9c, 0x48, 0x63, 0xed, 0x77, 0x4a, 0x9b, 0xd9, 0x7e, 0xb3, 0x50, 0x91, + 0x87, 0xb7, 0x8c, 0x4b, 0x14, 0xe2, 0xd9, 0x9c, 0xfc, 0x5f, 0xf8, 0xce, 0xff, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x8e, 0xe1, 0x22, 0x2a, 0x1e, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + //////// Information about what a provider supports/expects + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc grpc.ClientConnInterface +} + +func NewProviderClient(cc grpc.ClientConnInterface) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + //////// Information about what a provider supports/expects + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + //////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + //////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + //////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProviderServer can be embedded to have forward compatible implementations. +type UnimplementedProviderServer struct { +} + +func (*UnimplementedProviderServer) GetSchema(ctx context.Context, req *GetProviderSchema_Request) (*GetProviderSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProviderServer) PrepareProviderConfig(ctx context.Context, req *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProviderConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateResourceTypeConfig(ctx context.Context, req *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateResourceTypeConfig not implemented") +} +func (*UnimplementedProviderServer) ValidateDataSourceConfig(ctx context.Context, req *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateDataSourceConfig not implemented") +} +func (*UnimplementedProviderServer) UpgradeResourceState(ctx context.Context, req *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpgradeResourceState not implemented") +} +func (*UnimplementedProviderServer) Configure(ctx context.Context, req *Configure_Request) (*Configure_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Configure not implemented") +} +func (*UnimplementedProviderServer) ReadResource(ctx context.Context, req *ReadResource_Request) (*ReadResource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadResource not implemented") +} +func (*UnimplementedProviderServer) PlanResourceChange(ctx context.Context, req *PlanResourceChange_Request) (*PlanResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method PlanResourceChange not implemented") +} +func (*UnimplementedProviderServer) ApplyResourceChange(ctx context.Context, req *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplyResourceChange not implemented") +} +func (*UnimplementedProviderServer) ImportResourceState(ctx context.Context, req *ImportResourceState_Request) (*ImportResourceState_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportResourceState not implemented") +} +func (*UnimplementedProviderServer) ReadDataSource(ctx context.Context, req *ReadDataSource_Request) (*ReadDataSource_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") +} +func (*UnimplementedProviderServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc grpc.ClientConnInterface +} + +func NewProvisionerClient(cc grpc.ClientConnInterface) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +// UnimplementedProvisionerServer can be embedded to have forward compatible implementations. +type UnimplementedProvisionerServer struct { +} + +func (*UnimplementedProvisionerServer) GetSchema(ctx context.Context, req *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSchema not implemented") +} +func (*UnimplementedProvisionerServer) ValidateProvisionerConfig(ctx context.Context, req *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateProvisionerConfig not implemented") +} +func (*UnimplementedProvisionerServer) ProvisionResource(req *ProvisionResource_Request, srv Provisioner_ProvisionResourceServer) error { + return status.Errorf(codes.Unimplemented, "method ProvisionResource not implemented") +} +func (*UnimplementedProvisionerServer) Stop(ctx context.Context, req *Stop_Request) (*Stop_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.proto new file mode 100644 index 000000000..4f365697a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5/tfplugin5.proto @@ -0,0 +1,368 @@ +// Terraform Plugin RPC protocol version 5.2 +// +// This file defines version 5.2 of the RPC protocol. To implement a plugin +// against this protocol, copy this definition into your own codebase and +// use protoc to generate stubs for your target language. +// +// This file will not be updated. Any minor versions of protocol 5 to follow +// should copy this file and modify the copy while maintaing backwards +// compatibility. Breaking changes, if any are required, will come +// in a subsequent major version with its own separate proto definition. +// +// Note that only the proto files included in a release tag of Terraform are +// official protocol releases. Proto files taken from other commits may include +// incomplete changes or features that did not make it into a final release. +// In all reasonable cases, plugin developers should take the proto file from +// the tag of the most recent release of Terraform, and not from the master +// branch or any other development branch. +// +syntax = "proto3"; + +package tfplugin5; + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +message DynamicValue { + bytes msgpack = 1; + bytes json = 2; +} + +message Diagnostic { + enum Severity { + INVALID = 0; + ERROR = 1; + WARNING = 2; + } + Severity severity = 1; + string summary = 2; + string detail = 3; + AttributePath attribute = 4; +} + +message AttributePath { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + // Set "element_key_*" to represent looking up an element in + // an indexable collection type. + string element_key_string = 2; + int64 element_key_int = 3; + } + } + repeated Step steps = 1; +} + +message Stop { + message Request { + } + message Response { + string Error = 1; + } +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +message RawState { + bytes json = 1; + map flatmap = 2; +} + +enum StringKind { + PLAIN = 0; + MARKDOWN = 1; +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +message Schema { + message Block { + int64 version = 1; + repeated Attribute attributes = 2; + repeated NestedBlock block_types = 3; + string description = 4; + StringKind description_kind = 5; + bool deprecated = 6; + } + + message Attribute { + string name = 1; + bytes type = 2; + string description = 3; + bool required = 4; + bool optional = 5; + bool computed = 6; + bool sensitive = 7; + StringKind description_kind = 8; + bool deprecated = 9; + } + + message NestedBlock { + enum NestingMode { + INVALID = 0; + SINGLE = 1; + LIST = 2; + SET = 3; + MAP = 4; + GROUP = 5; + } + + string type_name = 1; + Block block = 2; + NestingMode nesting = 3; + int64 min_items = 4; + int64 max_items = 5; + } + + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + int64 version = 1; + + // Block is the top level configuration block for this schema. + Block block = 2; +} + +service Provider { + //////// Information about what a provider supports/expects + rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response); + rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response); + rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response); + rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response); + rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response); + + //////// One-time initialization, called before other functions below + rpc Configure(Configure.Request) returns (Configure.Response); + + //////// Managed Resource Lifecycle + rpc ReadResource(ReadResource.Request) returns (ReadResource.Response); + rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response); + rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response); + rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response); + + rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + + //////// Graceful Shutdown + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProviderSchema { + message Request { + } + message Response { + Schema provider = 1; + map resource_schemas = 2; + map data_source_schemas = 3; + repeated Diagnostic diagnostics = 4; + Schema provider_meta = 5; + } +} + +message PrepareProviderConfig { + message Request { + DynamicValue config = 1; + } + message Response { + DynamicValue prepared_config = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message UpgradeResourceState { + message Request { + string type_name = 1; + + // version is the schema_version number recorded in the state file + int64 version = 2; + + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState raw_state = 3; + } + message Response { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + DynamicValue upgraded_state = 1; + + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateResourceTypeConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ValidateDataSourceConfig { + message Request { + string type_name = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message Configure { + message Request { + string terraform_version = 1; + DynamicValue config = 2; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ReadResource { + message Request { + string type_name = 1; + DynamicValue current_state = 2; + bytes private = 3; + DynamicValue provider_meta = 4; + } + message Response { + DynamicValue new_state = 1; + repeated Diagnostic diagnostics = 2; + bytes private = 3; + } +} + +message PlanResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue proposed_new_state = 3; + DynamicValue config = 4; + bytes prior_private = 5; + DynamicValue provider_meta = 6; + } + + message Response { + DynamicValue planned_state = 1; + repeated AttributePath requires_replace = 2; + bytes planned_private = 3; + repeated Diagnostic diagnostics = 4; + + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 5; + } +} + +message ApplyResourceChange { + message Request { + string type_name = 1; + DynamicValue prior_state = 2; + DynamicValue planned_state = 3; + DynamicValue config = 4; + bytes planned_private = 5; + DynamicValue provider_meta = 6; + } + message Response { + DynamicValue new_state = 1; + bytes private = 2; + repeated Diagnostic diagnostics = 3; + + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + bool legacy_type_system = 4; + } +} + +message ImportResourceState { + message Request { + string type_name = 1; + string id = 2; + } + + message ImportedResource { + string type_name = 1; + DynamicValue state = 2; + bytes private = 3; + } + + message Response { + repeated ImportedResource imported_resources = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ReadDataSource { + message Request { + string type_name = 1; + DynamicValue config = 2; + DynamicValue provider_meta = 3; + } + message Response { + DynamicValue state = 1; + repeated Diagnostic diagnostics = 2; + } +} + +service Provisioner { + rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response); + rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response); + rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response); + rpc Stop(Stop.Request) returns (Stop.Response); +} + +message GetProvisionerSchema { + message Request { + } + message Response { + Schema provisioner = 1; + repeated Diagnostic diagnostics = 2; + } +} + +message ValidateProvisionerConfig { + message Request { + DynamicValue config = 1; + } + message Response { + repeated Diagnostic diagnostics = 1; + } +} + +message ProvisionResource { + message Request { + DynamicValue config = 1; + DynamicValue connection = 2; + } + message Response { + string output = 1; + repeated Diagnostic diagnostics = 2; + } +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go new file mode 100644 index 000000000..e626faa3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -0,0 +1,36 @@ +// The meta package provides a location to set the release version +// and any other relevant metadata for the SDK. +// +// This package should not import any other SDK packages. +package meta + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var SDKVersion = "2.0.1" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var SDKPrerelease = "" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(SDKVersion)) +} + +// VersionString returns the complete version string, including prerelease +func SDKVersionString() string { + if SDKPrerelease != "" { + return fmt.Sprintf("%s-%s", SDKVersion, SDKPrerelease) + } + return SDKVersion +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go new file mode 100644 index 000000000..6f30f1d1e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/debug.go @@ -0,0 +1,102 @@ +package plugin + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/signal" + "time" + + "github.com/hashicorp/go-plugin" +) + +// ReattachConfig holds the information Terraform needs to be able to attach +// itself to a provider process, so it can drive the process. +type ReattachConfig struct { + Protocol string + Pid int + Test bool + Addr ReattachConfigAddr +} + +// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr. +type ReattachConfigAddr struct { + Network string + String string +} + +// DebugServe starts a plugin server in debug mode; this should only be used +// when the provider will manage its own lifecycle. It is not recommended for +// normal usage; Serve is the correct function for that. +func DebugServe(ctx context.Context, opts *ServeOpts) (ReattachConfig, <-chan struct{}, error) { + reattachCh := make(chan *plugin.ReattachConfig) + closeCh := make(chan struct{}) + + opts.TestConfig = &plugin.ServeTestConfig{ + Context: ctx, + ReattachConfigCh: reattachCh, + CloseCh: closeCh, + } + + go Serve(opts) + + var config *plugin.ReattachConfig + select { + case config = <-reattachCh: + case <-time.After(2 * time.Second): + return ReattachConfig{}, closeCh, errors.New("timeout waiting on reattach config") + } + + if config == nil { + return ReattachConfig{}, closeCh, errors.New("nil reattach config received") + } + + return ReattachConfig{ + Protocol: string(config.Protocol), + Pid: config.Pid, + Test: config.Test, + Addr: ReattachConfigAddr{ + Network: config.Addr.Network(), + String: config.Addr.String(), + }, + }, closeCh, nil +} + +// Debug starts a debug server and controls its lifecycle, printing the +// information needed for Terraform to connect to the provider to stdout. +// os.Interrupt will be captured and used to stop the server. +func Debug(ctx context.Context, providerAddr string, opts *ServeOpts) error { + ctx, cancel := context.WithCancel(ctx) + // Ctrl-C will stop the server + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt) + defer func() { + signal.Stop(sigCh) + cancel() + }() + config, closeCh, err := DebugServe(ctx, opts) + if err != nil { + return fmt.Errorf("Error launching debug server: %w", err) + } + go func() { + select { + case <-sigCh: + cancel() + case <-ctx.Done(): + } + }() + reattachStr, err := json.Marshal(map[string]ReattachConfig{ + providerAddr: config, + }) + if err != nil { + return fmt.Errorf("Error building reattach string: %w", err) + } + + fmt.Printf("Provider server started; to attach Terraform, set TF_REATTACH_PROVIDERS to the following:\n%s\n", string(reattachStr)) + + // wait for the server to be done + <-closeCh + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go new file mode 100644 index 000000000..4a605ce3a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/grpc_provider.go @@ -0,0 +1,41 @@ +package plugin + +import ( + "context" + "errors" + "net/rpc" + + plugin "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) + +var ( + _ plugin.GRPCPlugin = (*gRPCProviderPlugin)(nil) + _ plugin.Plugin = (*gRPCProviderPlugin)(nil) +) + +// gRPCProviderPlugin implements plugin.GRPCPlugin and plugin.Plugin for the go-plugin package. +// the only real implementation is GRPCSServer, the other methods are only satisfied +// for compatibility with go-plugin +type gRPCProviderPlugin struct { + GRPCProvider func() proto.ProviderServer +} + +func (p *gRPCProviderPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + return nil, errors.New("terraform-plugin-sdk only implements grpc servers") +} + +func (p *gRPCProviderPlugin) Client(*plugin.MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("terraform-plugin-sdk only implements grpc servers") +} + +func (p *gRPCProviderPlugin) GRPCClient(context.Context, *plugin.GRPCBroker, *grpc.ClientConn) (interface{}, error) { + return nil, errors.New("terraform-plugin-sdk only implements grpc servers") +} + +func (p *gRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go new file mode 100644 index 000000000..e5649d070 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/plugin/serve.go @@ -0,0 +1,82 @@ +package plugin + +import ( + "context" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + grpcplugin "github.com/hashicorp/terraform-plugin-sdk/v2/internal/helper/plugin" + proto "github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfplugin5" +) + +const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. + ProviderPluginName = "provider" +) + +// Handshake is the HandshakeConfig used to configure clients and servers. +var Handshake = plugin.HandshakeConfig{ + // The magic cookie values should NEVER be changed. + MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2", +} + +type ProviderFunc func() *schema.Provider +type GRPCProviderFunc func() proto.ProviderServer + +// ServeOpts are the configurations to serve a plugin. +type ServeOpts struct { + ProviderFunc ProviderFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + + // Logger is the logger that go-plugin will use. + Logger hclog.Logger + + // TestConfig should only be set when the provider is being tested; it + // will opt out of go-plugin's lifecycle management and other features, + // and will use the supplied configuration options to control the + // plugin's lifecycle and communicate connection information. See the + // go-plugin GoDoc for more information. + TestConfig *plugin.ServeTestConfig +} + +// Serve serves a plugin. This function never returns and should be the final +// function called in the main function of the plugin. +func Serve(opts *ServeOpts) { + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + opts.GRPCProviderFunc = func() proto.ProviderServer { + return grpcplugin.NewGRPCProviderServer(opts.ProviderFunc()) + } + } + + provider := opts.GRPCProviderFunc() + plugin.Serve(&plugin.ServeConfig{ + HandshakeConfig: Handshake, + VersionedPlugins: map[int]plugin.PluginSet{ + 5: { + ProviderPluginName: &gRPCProviderPlugin{ + GRPCProvider: func() proto.ProviderServer { + return provider + }, + }, + }, + }, + GRPCServer: func(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(append(opts, grpc.UnaryInterceptor(func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + ctx = provider.(*grpcplugin.GRPCProviderServer).StopContext(ctx) + return handler(ctx, req) + }))...) + }, + Logger: opts.Logger, + Test: opts.TestConfig, + }) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go new file mode 100644 index 000000000..c7d82cf3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/diff.go @@ -0,0 +1,990 @@ +package terraform + +import ( + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +// diffChangeType is an enum with the kind of changes a diff has planned. +type diffChangeType byte + +const ( + diffInvalid diffChangeType = iota + diffNone + diffCreate + diffUpdate + diffDestroy + diffDestroyCreate +) + +// multiVal matches the index key to a flatmapped set, list or map +var multiVal = regexp.MustCompile(`\.(#|%)$`) + +// InstanceDiff is the diff of a resource from some state to another. +type InstanceDiff struct { + mu sync.Mutex + Attributes map[string]*ResourceAttrDiff + Destroy bool + DestroyDeposed bool + DestroyTainted bool + + // Meta is a simple K/V map that is stored in a diff and persisted to + // plans but otherwise is completely ignored by Terraform core. It is + // meant to be used for additional data a resource may want to pass through. + // The value here must only contain Go primitives and collections. + Meta map[string]interface{} +} + +func (d *InstanceDiff) Lock() { d.mu.Lock() } +func (d *InstanceDiff) Unlock() { d.mu.Unlock() } + +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + // helper/schema should not insert nil diff values, but don't panic + // if it does. + if diff == nil { + continue + } + + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = hcl2shim.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != hcl2shim.UnknownVariableValue && + diff.Old != hcl2shim.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = hcl2shim.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = hcl2shim.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + +// ResourceAttrDiff is the diff of a single attribute of a resource. +type ResourceAttrDiff struct { + Old string // Old Value + New string // New Value + NewComputed bool // True if new value is computed (unknown currently) + NewRemoved bool // True if this attribute is being removed + NewExtra interface{} // Extra information for the provider + RequiresNew bool // True if change requires new resource + Sensitive bool // True if the data should not be displayed in UI output + Type diffAttrType +} + +func (d *ResourceAttrDiff) GoString() string { + return fmt.Sprintf("*%#v", *d) +} + +// DiffAttrType is an enum type that says whether a resource attribute +// diff is an input attribute (comes from the configuration) or an +// output attribute (comes as a result of applying the configuration). An +// example input would be "ami" for AWS and an example output would be +// "private_ip". +type diffAttrType byte + +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + +// ChangeType returns the diffChangeType represented by the diff +// for this single instance. +func (d *InstanceDiff) ChangeType() diffChangeType { + if d.Empty() { + return diffNone + } + + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { + return diffDestroyCreate + } + + if d.GetDestroy() || d.GetDestroyDeposed() { + return diffDestroy + } + + if d.RequiresNew() { + return diffCreate + } + + return diffUpdate +} + +// Empty returns true if this diff encapsulates no changes. +func (d *InstanceDiff) Empty() bool { + if d == nil { + return true + } + + d.mu.Lock() + defer d.mu.Unlock() + return !d.Destroy && + !d.DestroyTainted && + !d.DestroyDeposed && + len(d.Attributes) == 0 +} + +// Equal compares two diffs for exact equality. +// +// This is different from the Same comparison that is supported which +// checks for operation equality taking into account computed values. Equal +// instead checks for exact equality. +// TODO: investigate why removing this unused method causes panic in tests +func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool { + // If one is nil, they must both be nil + if d == nil || d2 == nil { + return d == d2 + } + + // Use DeepEqual + return reflect.DeepEqual(d, d2) +} + +func (d *InstanceDiff) GoString() string { + return fmt.Sprintf("*%#v", InstanceDiff{ + Attributes: d.Attributes, + Destroy: d.Destroy, + DestroyTainted: d.DestroyTainted, + DestroyDeposed: d.DestroyDeposed, + }) +} + +// RequiresNew returns true if the diff requires the creation of a new +// resource (implying the destruction of the old). +func (d *InstanceDiff) RequiresNew() bool { + if d == nil { + return false + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + + if d.DestroyTainted { + return true + } + + for _, rd := range d.Attributes { + if rd != nil && rd.RequiresNew { + return true + } + } + + return false +} + +func (d *InstanceDiff) GetDestroyDeposed() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyDeposed +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + +// Same checks whether or not two InstanceDiff's are the "same". When +// we say "same", it is not necessarily exactly equal. Instead, it is +// just checking that the same attributes are changing, a destroy +// isn't suddenly happening, etc. +func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: + return true, "" + } + + d.mu.Lock() + defer d.mu.Unlock() + + // If we're going from requiring new to NOT requiring new, then we have + // to see if all required news were computed. If so, it is allowed since + // computed may also mean "same value and therefore not new". + oldNew := d.requiresNew() + newNew := d2.RequiresNew() + if oldNew && !newNew { + oldNew = false + + // This section builds a list of ignorable attributes for requiresNew + // by removing off any elements of collections going to zero elements. + // For collections going to zero, they may not exist at all in the + // new diff (and hence RequiresNew == false). + ignoreAttrs := make(map[string]struct{}) + for k, diffOld := range d.Attributes { + if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") { + continue + } + + // This case is in here as a protection measure. The bug that this + // code originally fixed (GH-11349) didn't have to deal with computed + // so I'm not 100% sure what the correct behavior is. Best to leave + // the old behavior. + if diffOld.NewComputed { + continue + } + + // We're looking for the case a map goes to exactly 0. + if diffOld.New != "0" { + continue + } + + // Found it! Ignore all of these. The prefix here is stripping + // off the "%" so it is just "k." + prefix := k[:len(k)-1] + for k2 := range d.Attributes { + if strings.HasPrefix(k2, prefix) { + ignoreAttrs[k2] = struct{}{} + } + } + } + + for k, rd := range d.Attributes { + if _, ok := ignoreAttrs[k]; ok { + continue + } + + // If the field is requires new and NOT computed, then what + // we have is a diff mismatch for sure. We set that the old + // diff does REQUIRE a ForceNew. + if rd != nil && rd.RequiresNew && !rd.NewComputed { + oldNew = true + break + } + } + } + + if oldNew != newNew { + return false, fmt.Sprintf( + "diff RequiresNew; old: %t, new: %t", oldNew, newNew) + } + + // Verify that destroy matches. The second boolean here allows us to + // have mismatching Destroy if we're moving from RequiresNew true + // to false above. Therefore, the second boolean will only pass if + // we're moving from Destroy: true to false as well. + if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew { + return false, fmt.Sprintf( + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) + } + + // Go through the old diff and make sure the new diff has all the + // same attributes. To start, build up the check map to be all the keys. + checkOld := make(map[string]struct{}) + checkNew := make(map[string]struct{}) + for k := range d.Attributes { + checkOld[k] = struct{}{} + } + for k := range d2.CopyAttributes() { + checkNew[k] = struct{}{} + } + + // Make an ordered list so we are sure the approximated hashes are left + // to process at the end of the loop + keys := make([]string, 0, len(d.Attributes)) + for k := range d.Attributes { + keys = append(keys, k) + } + sort.StringSlice(keys).Sort() + + for _, k := range keys { + diffOld := d.Attributes[k] + + if _, ok := checkOld[k]; !ok { + // We're not checking this key for whatever reason (see where + // check is modified). + continue + } + + // Remove this key since we'll never hit it again + delete(checkOld, k) + delete(checkNew, k) + + _, ok := d2.GetAttribute(k) + if !ok { + // If there's no new attribute, and the old diff expected the attribute + // to be removed, that's just fine. + if diffOld.NewRemoved { + continue + } + + // If the last diff was a computed value then the absense of + // that value is allowed since it may mean the value ended up + // being the same. + if diffOld.NewComputed { + ok = true + } + + // No exact match, but maybe this is a set containing computed + // values. So check if there is an approximate hash in the key + // and if so, try to match the key. + if strings.Contains(k, "~") { + parts := strings.Split(k, ".") + parts2 := append([]string(nil), parts...) + + re := regexp.MustCompile(`^~\d+$`) + for i, part := range parts { + if re.MatchString(part) { + // we're going to consider this the base of a + // computed hash, and remove all longer matching fields + ok = true + + parts2[i] = `\d+` + parts2 = parts2[:i+1] + break + } + } + + re, err := regexp.Compile("^" + strings.Join(parts2, `\.`)) + if err != nil { + return false, fmt.Sprintf("regexp failed to compile; err: %#v", err) + } + + for k2 := range checkNew { + if re.MatchString(k2) { + delete(checkNew, k2) + } + } + } + + // This is a little tricky, but when a diff contains a computed + // list, set, or map that can only be interpolated after the apply + // command has created the dependent resources, it could turn out + // that the result is actually the same as the existing state which + // would remove the key from the diff. + if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + // Similarly, in a RequiresNew scenario, a list that shows up in the plan + // diff can disappear from the apply diff, which is calculated from an + // empty state. + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + ok = true + } + + if !ok { + return false, fmt.Sprintf("attribute mismatch: %s", k) + } + } + + // search for the suffix of the base of a [computed] map, list or set. + match := multiVal.FindStringSubmatch(k) + + if diffOld.NewComputed && len(match) == 2 { + matchLen := len(match[1]) + + // This is a computed list, set, or map, so remove any keys with + // this prefix from the check list. + kprefix := k[:len(k)-matchLen] + for k2 := range checkOld { + if strings.HasPrefix(k2, kprefix) { + delete(checkOld, k2) + } + } + for k2 := range checkNew { + if strings.HasPrefix(k2, kprefix) { + delete(checkNew, k2) + } + } + } + + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). + } + + // Check for leftover attributes + if len(checkNew) > 0 { + extras := make([]string, 0, len(checkNew)) + for attr := range checkNew { + extras = append(extras, attr) + } + return false, + fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", ")) + } + + return true, "" +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go new file mode 100644 index 000000000..b01e5a48d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=instanceType instancetype.go + +// instanceType is an enum of the various types of instances store in the State +type instanceType int + +const ( + typeInvalid instanceType = iota + typePrimary + typeTainted + typeDeposed +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go new file mode 100644 index 000000000..782ef90c0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/instancetype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=instanceType instancetype.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[typeInvalid-0] + _ = x[typePrimary-1] + _ = x[typeTainted-2] + _ = x[typeDeposed-3] +} + +const _instanceType_name = "typeInvalidtypePrimarytypeTaintedtypeDeposed" + +var _instanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i instanceType) String() string { + if i < 0 || i >= instanceType(len(_instanceType_index)-1) { + return "instanceType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _instanceType_name[_instanceType_index[i]:_instanceType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go new file mode 100644 index 000000000..11b63de8a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource.go @@ -0,0 +1,333 @@ +package terraform + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/go-cty/cty" + "github.com/mitchellh/copystructure" + "github.com/mitchellh/reflectwalk" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +// InstanceInfo is used to hold information about the instance and/or +// resource being modified. +type InstanceInfo struct { + // Id is a unique name to represent this instance. This is not related + // to InstanceState.ID in any way. + Id string + + // ModulePath is the complete path of the module containing this + // instance. + ModulePath []string + + // Type is the resource type of this instance + Type string +} + +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. +type ResourceConfig struct { + ComputedKeys []string + Raw map[string]interface{} + Config map[string]interface{} +} + +// NewResourceConfigRaw constructs a ResourceConfig whose content is exactly +// the given value. +// +// The given value may contain hcl2shim.UnknownVariableValue to signal that +// something is computed, but it must not contain unprocessed interpolation +// sequences as we might've seen in Terraform v0.11 and prior. +func NewResourceConfigRaw(raw map[string]interface{}) *ResourceConfig { + v := hcl2shim.HCL2ValueFromConfigValue(raw) + + // This is a little weird but we round-trip the value through the hcl2shim + // package here for two reasons: firstly, because that reduces the risk + // of it including something unlike what NewResourceConfigShimmed would + // produce, and secondly because it creates a copy of "raw" just in case + // something is relying on the fact that in the old world the raw and + // config maps were always distinct, and thus you could in principle mutate + // one without affecting the other. (I sure hope nobody was doing that, though!) + cfg := hcl2shim.ConfigValueFromHCL2(v).(map[string]interface{}) + + return &ResourceConfig{ + Raw: raw, + Config: cfg, + + ComputedKeys: newResourceConfigShimmedComputedKeys(v, ""), + } +} + +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + +// DeepCopy performs a deep copy of the configuration. This makes it safe +// to modify any of the structures that are part of the resource config without +// affecting the original configuration. +func (c *ResourceConfig) DeepCopy() *ResourceConfig { + // DeepCopying a nil should return a nil to avoid panics + if c == nil { + return nil + } + + // Copy, this will copy all the exported attributes + copy, err := copystructure.Config{Lock: true}.Copy(c) + if err != nil { + panic(err) + } + + // Force the type + result := copy.(*ResourceConfig) + + return result +} + +// Equal checks the equality of two resource configs. +func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool { + // If either are nil, then they're only equal if they're both nil + if c == nil || c2 == nil { + return c == c2 + } + + // Sort the computed keys so they're deterministic + sort.Strings(c.ComputedKeys) + sort.Strings(c2.ComputedKeys) + + // Two resource configs if their exported properties are equal. + // We don't compare "raw" because it is never used again after + // initialization and for all intents and purposes they are equal + // if the exported properties are equal. + check := [][2]interface{}{ + {c.ComputedKeys, c2.ComputedKeys}, + {c.Raw, c2.Raw}, + {c.Config, c2.Config}, + } + for _, pair := range check { + if !reflect.DeepEqual(pair[0], pair[1]) { + return false + } + } + + return true +} + +// Get looks up a configuration value by key and returns the value. +// +// The second return value is true if the get was successful. Get will +// return the raw value if the key is computed, so you should pair this +// with IsComputed. +func (c *ResourceConfig) Get(k string) (interface{}, bool) { + // We aim to get a value from the configuration. If it is computed, + // then we return the pure raw value. + source := c.Config + if c.IsComputed(k) { + source = c.Raw + } + + return c.get(k, source) +} + +// GetRaw looks up a configuration value by key and returns the value, +// from the raw, uninterpolated config. +// +// The second return value is true if the get was successful. Get will +// not succeed if the value is being computed. +func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) { + return c.get(k, c.Raw) +} + +// IsComputed returns whether the given key is computed or not. +func (c *ResourceConfig) IsComputed(k string) bool { + // The next thing we do is check the config if we get a computed + // value out of it. + v, ok := c.get(k, c.Config) + if !ok { + return false + } + + // If value is nil, then it isn't computed + if v == nil { + return false + } + + // Test if the value contains an unknown value + var w unknownCheckWalker + if err := reflectwalk.Walk(v, &w); err != nil { + panic(err) + } + + return w.Unknown +} + +func (c *ResourceConfig) get( + k string, raw map[string]interface{}) (interface{}, bool) { + parts := strings.Split(k, ".") + if len(parts) == 1 && parts[0] == "" { + parts = nil + } + + var current interface{} = raw + var previous interface{} = nil + for i, part := range parts { + if current == nil { + return nil, false + } + + cv := reflect.ValueOf(current) + switch cv.Kind() { + case reflect.Map: + previous = current + v := cv.MapIndex(reflect.ValueOf(part)) + if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + + return v.Interface(), true + } + + return nil, false + } + + current = v.Interface() + case reflect.Slice: + previous = current + + if part == "#" { + // If any value in a list is computed, this whole thing + // is computed and we can't read any part of it. + for i := 0; i < cv.Len(); i++ { + if v := cv.Index(i).Interface(); v == hcl2shim.UnknownVariableValue { + return v, true + } + } + + current = cv.Len() + } else { + i, err := strconv.ParseInt(part, 0, 0) + if err != nil { + return nil, false + } + if int(i) < 0 || int(i) >= cv.Len() { + return nil, false + } + current = cv.Index(int(i)).Interface() + } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + v, ok := prevMap[actualKey] + return v, ok + } + + return nil, false + default: + panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) + } + } + + return current, true +} + +// unknownCheckWalker +type unknownCheckWalker struct { + Unknown bool +} + +// TODO: investigate why deleting this causes odd runtime test failures +// must be some kind of interface implementation +func (w *unknownCheckWalker) Primitive(v reflect.Value) error { + if v.Interface() == hcl2shim.UnknownVariableValue { + w.Unknown = true + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go new file mode 100644 index 000000000..ec2665d31 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_address.go @@ -0,0 +1,226 @@ +package terraform + +import ( + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// resourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type resourceAddress struct { + // Addresses a resource falling somewhere in the module path + // When specified alone, addresses all resources within a module path + Path []string + + // Addresses a specific resource that occurs in a list + Index int + + InstanceType instanceType + InstanceTypeSet bool + Name string + Type string + Mode ResourceMode // significant only if InstanceTypeSet +} + +// String outputs the address that parses into this address. +func (r *resourceAddress) String() string { + var result []string + for _, p := range r.Path { + result = append(result, "module", p) + } + + switch r.Mode { + case ManagedResourceMode: + // nothing to do + case DataResourceMode: + result = append(result, "data") + default: + panic(fmt.Errorf("unsupported resource mode %s", r.Mode)) + } + + if r.Type != "" { + result = append(result, r.Type) + } + + if r.Name != "" { + name := r.Name + if r.InstanceTypeSet { + switch r.InstanceType { + case typePrimary: + name += ".primary" + case typeDeposed: + name += ".deposed" + case typeTainted: + name += ".tainted" + } + } + + if r.Index >= 0 { + name += fmt.Sprintf("[%d]", r.Index) + } + result = append(result, name) + } + + return strings.Join(result, ".") +} + +func parseResourceAddress(s string) (*resourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + mode := ManagedResourceMode + if matches["data_prefix"] != "" { + mode = DataResourceMode + } + resourceIndex, err := parseResourceIndex(matches["index"]) + if err != nil { + return nil, err + } + instanceType, err := parseInstanceType(matches["instance_type"]) + if err != nil { + return nil, err + } + path := parseResourcePath(matches["path"]) + + // not allowed to say "data." without a type following + if mode == DataResourceMode && matches["type"] == "" { + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) + } + + return &resourceAddress{ + Path: path, + Index: resourceIndex, + InstanceType: instanceType, + InstanceTypeSet: matches["instance_type"] != "", + Name: matches["name"], + Type: matches["type"], + Mode: mode, + }, nil +} + +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *resourceAddress) Less(other *resourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + +func parseResourceIndex(s string) (int, error) { + if s == "" { + return -1, nil + } + return strconv.Atoi(s) +} + +func parseResourcePath(s string) []string { + if s == "" { + return nil + } + parts := strings.Split(s, ".") + path := make([]string, 0, len(parts)) + for _, s := range parts { + // Due to the limitations of the regexp match below, the path match has + // some noise in it we have to filter out :| + if s == "" || s == "module" { + continue + } + path = append(path, s) + } + return path +} + +func parseInstanceType(s string) (instanceType, error) { + switch s { + case "", "primary": + return typePrimary, nil + case "deposed": + return typeDeposed, nil + case "tainted": + return typeTainted, nil + default: + return typeInvalid, fmt.Errorf("Unexpected value for instanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "module.foo.module.bar" (optional) + `(?P(?:module\.(?P[^.]+)\.?)*)` + + // possibly "data.", if targeting is a data resource + `(?P(?:data\.)?)` + + // "aws_instance.web" (optional when module path specified) + `(?:(?P[^.]+)\.(?P[^.[]+))?` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("invalid resource address %q", s) + } + + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + + return matches, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go new file mode 100644 index 000000000..c83643a65 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode.go @@ -0,0 +1,12 @@ +package terraform + +//go:generate go run golang.org/x/tools/cmd/stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go + +// ResourceMode is deprecated, use addrs.ResourceMode instead. +// It has been preserved for backwards compatibility. +type ResourceMode int + +const ( + ManagedResourceMode ResourceMode = iota + DataResourceMode +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go new file mode 100644 index 000000000..ba84346a2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_mode_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} + +const _ResourceMode_name = "ManagedResourceModeDataResourceMode" + +var _ResourceMode_index = [...]uint8{0, 19, 35} + +func (i ResourceMode) String() string { + if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go new file mode 100644 index 000000000..ece8fc660 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/resource_provider.go @@ -0,0 +1,26 @@ +package terraform + +// ResourceType is a type of resource that a resource provider can manage. +type ResourceType struct { + Name string // Name of the resource, example "instance" (no provider prefix) + Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// DataSource is a data source that a resource provider implements. +type DataSource struct { + Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go new file mode 100644 index 000000000..07e5a84fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/schemas.go @@ -0,0 +1,26 @@ +package terraform + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/configschema" +) + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go new file mode 100644 index 000000000..87f7610a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state.go @@ -0,0 +1,1685 @@ +package terraform + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" + + "github.com/hashicorp/go-cty/cty" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/mitchellh/copystructure" + + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/addrs" + "github.com/hashicorp/terraform-plugin-sdk/v2/internal/configs/hcl2shim" +) + +const ( + // StateVersion is the current version for our state file + stateVersion = 3 +) + +// rootModulePath is the path of the root module +var rootModulePath = []string{"root"} + +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// +// normalizeModulePath takes a raw module path and returns a path that +// has the rootModulePath prepended to it. If I could go back in time I +// would've never had a rootModulePath (empty path would be root). We can +// still fix this but thats a big refactor that my branch doesn't make sense +// for. Instead, this function normalizes paths. +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. + + if len(p) > 0 && p[0] == "root" { + p = p[1:] + } + + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret +} + +// State keeps track of a snapshot state-of-the-world that Terraform +// can use to keep track of what real world resources it is actually +// managing. +type State struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *RemoteState `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *BackendState `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*ModuleState `json:"modules"` + + mu sync.Mutex + + // IsBinaryDrivenTest is a special flag that assists with a binary driver + // heuristic, it should not be set externally + IsBinaryDrivenTest bool +} + +func (s *State) Lock() { s.mu.Lock() } +func (s *State) Unlock() { s.mu.Unlock() } + +// NewState is used to initialize a blank state +func NewState() *State { + s := &State{} + s.init() + return s +} + +// Children returns the ModuleStates that are direct children of +// the given path. If the path is "root", for example, then children +// returned might be "root.child", but not "root.child.grandchild". +func (s *State) Children(path []string) []*ModuleState { + s.Lock() + defer s.Unlock() + // TODO: test + + return s.children(path) +} + +func (s *State) children(path []string) []*ModuleState { + result := make([]*ModuleState, 0) + for _, m := range s.Modules { + if m == nil { + continue + } + + if len(m.Path) != len(path)+1 { + continue + } + if !reflect.DeepEqual(path, m.Path[:len(path)]) { + continue + } + + result = append(result, m) + } + + return result +} + +// AddModule adds the module with the given path to the state. +// +// This should be the preferred method to add module states since it +// allows us to optimize lookups later as well as control sorting. +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { + s.Lock() + defer s.Unlock() + + return s.addModule(path) +} + +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { + // check if the module exists first + m := s.moduleByPath(path) + if m != nil { + return m + } + + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} + m.init() + s.Modules = append(s.Modules, m) + s.sort() + return m +} + +// ModuleByPath is used to lookup the module state for the given path. +// This should be the preferred lookup mechanism as it allows for future +// lookup optimizations. +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { + if s == nil { + return nil + } + s.Lock() + defer s.Unlock() + + return s.moduleByPath(path) +} + +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { + for _, mod := range s.Modules { + if mod == nil { + continue + } + if mod.Path == nil { + panic("missing module path") + } + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { + return mod + } + } + return nil +} + +// Empty returns true if the state is empty. +func (s *State) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return len(s.Modules) == 0 +} + +// HasResources returns true if the state contains any resources. +// +// This is similar to !s.Empty, but returns true also in the case where the +// state has modules but all of them are devoid of resources. +func (s *State) HasResources() bool { + if s.Empty() { + return false + } + + for _, mod := range s.Modules { + if len(mod.Resources) > 0 { + return true + } + } + + return false +} + +// IsRemote returns true if State represents a state that exists and is +// remote. +func (s *State) IsRemote() bool { + if s == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Remote == nil { + return false + } + if s.Remote.Type == "" { + return false + } + + return true +} + +// Validate validates the integrity of this state file. +// +// Certain properties of the statefile are expected by Terraform in order +// to behave properly. The core of Terraform will assume that once it +// receives a State structure that it has been validated. This validation +// check should be called to ensure that. +// +// If this returns an error, then the user should be notified. The error +// response will include detailed information on the nature of the error. +func (s *State) Validate() error { + s.Lock() + defer s.Unlock() + + var result error + + // !!!! FOR DEVELOPERS !!!! + // + // Any errors returned from this Validate function will BLOCK TERRAFORM + // from loading a state file. Therefore, this should only contain checks + // that are only resolvable through manual intervention. + // + // !!!! FOR DEVELOPERS !!!! + + // Make sure there are no duplicate module states. We open a new + // block here so we can use basic variable names and future validations + // can do the same. + { + found := make(map[string]struct{}) + for _, ms := range s.Modules { + if ms == nil { + continue + } + + key := strings.Join(ms.Path, ".") + if _, ok := found[key]; ok { + result = multierror.Append(result, fmt.Errorf( + strings.TrimSpace(stateValidateErrMultiModule), key)) + continue + } + + found[key] = struct{}{} + } + } + + return result +} + +// Remove removes the item in the state at the given address, returning +// any errors that may have occurred. +// +// If the address references a module state or resource, it will delete +// all children as well. To check what will be deleted, use a StateFilter +// first. +func (s *State) Remove(addr ...string) error { + s.Lock() + defer s.Unlock() + + // Filter out what we need to delete + filter := &stateFilter{State: s} + results, err := filter.filter(addr...) + if err != nil { + return err + } + + // If we have no results, just exit early, we're not going to do anything. + // While what happens below is fairly fast, this is an important early + // exit since the prune below might modify the state more and we don't + // want to modify the state if we don't have to. + if len(results) == 0 { + return nil + } + + // Go through each result and grab what we need + removed := make(map[interface{}]struct{}) + for _, r := range results { + // Convert the path to our own type + path := append([]string{"root"}, r.Path...) + + // If we removed this already, then ignore + if _, ok := removed[r.Value]; ok { + continue + } + + // If we removed the parent already, then ignore + if r.Parent != nil { + if _, ok := removed[r.Parent.Value]; ok { + continue + } + } + + // Add this to the removed list + removed[r.Value] = struct{}{} + + switch v := r.Value.(type) { + case *ModuleState: + s.removeModule(path, v) + case *ResourceState: + s.removeResource(path, v) + case *InstanceState: + s.removeInstance(path, r.Parent.Value.(*ResourceState), v) + default: + return fmt.Errorf("unknown type to delete: %T", r.Value) + } + } + + // Prune since the removal functions often do the bare minimum to + // remove a thing and may leave around dangling empty modules, resources, + // etc. Prune will clean that all up. + s.prune() + + return nil +} + +func (s *State) removeModule(path []string, v *ModuleState) { + for i, m := range s.Modules { + if m == v { + s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil + return + } + } +} + +func (s *State) removeResource(path []string, v *ResourceState) { + // Get the module this resource lives in. If it doesn't exist, we're done. + mod := s.moduleByPath(normalizeModulePath(path)) + if mod == nil { + return + } + + // Find this resource. This is a O(N) lookup when if we had the key + // it could be O(1) but even with thousands of resources this shouldn't + // matter right now. We can easily up performance here when the time comes. + for k, r := range mod.Resources { + if r == v { + // Found it + delete(mod.Resources, k) + return + } + } +} + +func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) { + // Go through the resource and find the instance that matches this + // (if any) and remove it. + + // Check primary + if r.Primary == v { + r.Primary = nil + return + } + + // Check lists + lists := [][]*InstanceState{r.Deposed} + for _, is := range lists { + for i, instance := range is { + if instance == v { + // Found it, remove it + is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil + + // Done + return + } + } + } +} + +// RootModule returns the ModuleState for the root module +func (s *State) RootModule() *ModuleState { + root := s.ModuleByPath(addrs.RootModuleInstance) + if root == nil { + panic("missing root module") + } + return root +} + +// Equal tests if one state is equal to another. +func (s *State) Equal(other *State) bool { + // If one is nil, we do a direct check + if s == nil || other == nil { + return s == other + } + + s.Lock() + defer s.Unlock() + return s.equal(other) +} + +func (s *State) equal(other *State) bool { + if s == nil || other == nil { + return s == other + } + + // If the versions are different, they're certainly not equal + if s.Version != other.Version { + return false + } + + // If any of the modules are not equal, then this state isn't equal + if len(s.Modules) != len(other.Modules) { + return false + } + for _, m := range s.Modules { + // This isn't very optimal currently but works. + otherM := other.moduleByPath(normalizeModulePath(m.Path)) + if otherM == nil { + return false + } + + // If they're not equal, then we're not equal! + if !m.Equal(otherM) { + return false + } + } + + return true +} + +type StateAgeComparison int + +const ( + StateAgeEqual StateAgeComparison = 0 + StateAgeReceiverNewer StateAgeComparison = 1 + StateAgeReceiverOlder StateAgeComparison = -1 +) + +// CompareAges compares one state with another for which is "older". +// +// This is a simple check using the state's serial, and is thus only as +// reliable as the serial itself. In the normal case, only one state +// exists for a given combination of lineage/serial, but Terraform +// does not guarantee this and so the result of this method should be +// used with care. +// +// Returns an integer that is negative if the receiver is older than +// the argument, positive if the converse, and zero if they are equal. +// An error is returned if the two states are not of the same lineage, +// in which case the integer returned has no meaning. +func (s *State) CompareAges(other *State) (StateAgeComparison, error) { + // nil states are "older" than actual states + switch { + case s != nil && other == nil: + return StateAgeReceiverNewer, nil + case s == nil && other != nil: + return StateAgeReceiverOlder, nil + case s == nil && other == nil: + return StateAgeEqual, nil + } + + if !s.SameLineage(other) { + return StateAgeEqual, fmt.Errorf( + "can't compare two states of differing lineage", + ) + } + + s.Lock() + defer s.Unlock() + + switch { + case s.Serial < other.Serial: + return StateAgeReceiverOlder, nil + case s.Serial > other.Serial: + return StateAgeReceiverNewer, nil + default: + return StateAgeEqual, nil + } +} + +// SameLineage returns true only if the state given in argument belongs +// to the same "lineage" of states as the receiver. +func (s *State) SameLineage(other *State) bool { + s.Lock() + defer s.Unlock() + + // If one of the states has no lineage then it is assumed to predate + // this concept, and so we'll accept it as belonging to any lineage + // so that a lineage string can be assigned to newer versions + // without breaking compatibility with older versions. + if s.Lineage == "" || other.Lineage == "" { + return true + } + + return s.Lineage == other.Lineage +} + +// DeepCopy performs a deep copy of the state structure and returns +// a new structure. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*State) +} + +func (s *State) Init() { + s.Lock() + defer s.Unlock() + s.init() +} + +func (s *State) init() { + if s.Version == 0 { + s.Version = stateVersion + } + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) + } + s.ensureHasLineage() + + for _, mod := range s.Modules { + if mod != nil { + mod.init() + } + } + + if s.Remote != nil { + s.Remote.init() + } + +} + +func (s *State) EnsureHasLineage() { + s.Lock() + defer s.Unlock() + + s.ensureHasLineage() +} + +func (s *State) ensureHasLineage() { + if s.Lineage == "" { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) + } + } else { + if os.Getenv("TF_ACC") == "" || os.Getenv("TF_ACC_STATE_LINEAGE") == "1" { + log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) + } + } +} + +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + s.Lock() + defer s.Unlock() + + s.addModuleState(mod) +} + +func (s *State) addModuleState(mod *ModuleState) { + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + +// prune is used to remove any resources that are no longer required +func (s *State) prune() { + if s == nil { + return + } + + // Filter out empty modules. + // A module is always assumed to have a path, and it's length isn't always + // bounds checked later on. Modules may be "emptied" during destroy, but we + // never want to store those in the state. + for i := 0; i < len(s.Modules); i++ { + if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 { + s.Modules = append(s.Modules[:i], s.Modules[i+1:]...) + i-- + } + } + + for _, mod := range s.Modules { + mod.prune() + } + if s.Remote != nil && s.Remote.Empty() { + s.Remote = nil + } +} + +// sort sorts the modules +func (s *State) sort() { + sort.Sort(moduleStateSort(s.Modules)) + + // Allow modules to be sorted + for _, m := range s.Modules { + if m != nil { + m.sort() + } + } +} + +func (s *State) String() string { + if s == nil { + return "" + } + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + for _, m := range s.Modules { + mStr := m.String() + + // If we're the root module, we just write the output directly. + if reflect.DeepEqual(m.Path, rootModulePath) { + buf.WriteString(mStr + "\n") + continue + } + + buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], "."))) + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// BackendState stores the configuration to connect to a remote backend. +type BackendState struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} + +// RemoteState is used to track the information about a remote +// state store that we push/pull state to. +type RemoteState struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` + + mu sync.Mutex +} + +func (s *RemoteState) Lock() { s.mu.Lock() } +func (s *RemoteState) Unlock() { s.mu.Unlock() } + +func (r *RemoteState) init() { + r.Lock() + defer r.Unlock() + + if r.Config == nil { + r.Config = make(map[string]string) + } +} + +func (r *RemoteState) Empty() bool { + if r == nil { + return true + } + r.Lock() + defer r.Unlock() + + return r.Type == "" +} + +// OutputState is used to track the state relevant to a single output. +type OutputState struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +func (s *OutputState) Lock() { s.mu.Lock() } +func (s *OutputState) Unlock() { s.mu.Unlock() } + +func (s *OutputState) String() string { + return fmt.Sprintf("%#v", s.Value) +} + +// Equal compares two OutputState structures for equality. nil values are +// considered equal. +func (s *OutputState) Equal(other *OutputState) bool { + if s == nil && other == nil { + return true + } + + if s == nil || other == nil { + return false + } + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Sensitive != other.Sensitive { + return false + } + + if !reflect.DeepEqual(s.Value, other.Value) { + return false + } + + return true +} + +// ModuleState is used to track all the state relevant to a single +// module. Previous to Terraform 0.3, all state belonged to the "root" +// module. +type ModuleState struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*OutputState `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*ResourceState `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + mu sync.Mutex +} + +func (s *ModuleState) Lock() { s.mu.Lock() } +func (s *ModuleState) Unlock() { s.mu.Unlock() } + +// Equal tests whether one module state is equal to another. +func (m *ModuleState) Equal(other *ModuleState) bool { + m.Lock() + defer m.Unlock() + + // Paths must be equal + if !reflect.DeepEqual(m.Path, other.Path) { + return false + } + + // Outputs must be equal + if len(m.Outputs) != len(other.Outputs) { + return false + } + for k, v := range m.Outputs { + if !other.Outputs[k].Equal(v) { + return false + } + } + + // Dependencies must be equal. This sorts these in place but + // this shouldn't cause any problems. + sort.Strings(m.Dependencies) + sort.Strings(other.Dependencies) + if len(m.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range m.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // Resources must be equal + if len(m.Resources) != len(other.Resources) { + return false + } + for k, r := range m.Resources { + otherR, ok := other.Resources[k] + if !ok { + return false + } + + if !r.Equal(otherR) { + return false + } + } + + return true +} + +func (m *ModuleState) init() { + m.Lock() + defer m.Unlock() + + if m.Path == nil { + m.Path = []string{} + } + if m.Outputs == nil { + m.Outputs = make(map[string]*OutputState) + } + if m.Resources == nil { + m.Resources = make(map[string]*ResourceState) + } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } +} + +// prune is used to remove any resources that are no longer required +func (m *ModuleState) prune() { + m.Lock() + defer m.Unlock() + + for k, v := range m.Resources { + if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 { + delete(m.Resources, k) + continue + } + + v.prune() + } + + for k, v := range m.Outputs { + if v.Value == hcl2shim.UnknownVariableValue { + delete(m.Outputs, k) + } + } + + m.Dependencies = uniqueStrings(m.Dependencies) +} + +func (m *ModuleState) sort() { + for _, v := range m.Resources { + v.sort() + } +} + +func (m *ModuleState) String() string { + m.Lock() + defer m.Unlock() + + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + names := make([]string, 0, len(m.Resources)) + for name := range m.Resources { + names = append(names, name) + } + + sort.Sort(resourceNameSort(names)) + + for _, k := range names { + rs := m.Resources[k] + var id string + if rs.Primary != nil { + id = rs.Primary.ID + } + if id == "" { + id = "" + } + + taintStr := "" + if rs.Primary.Tainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(rs.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + if rs.Provider != "" { + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.Provider)) + } + + var attributes map[string]string + if rs.Primary != nil { + attributes = rs.Primary.Attributes + } + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + for idx, t := range rs.Deposed { + taintStr := "" + if t.Tainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr)) + } + + if len(rs.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range rs.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep)) + } + } + } + + if len(m.Outputs) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.Outputs)) + for k := range m.Outputs { + ks = append(ks, k) + } + + sort.Strings(ks) + + for _, k := range ks { + v := m.Outputs[k] + switch vTyped := v.Value.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + } + } + } + + return buf.String() +} + +// ResourceStateKey is a structured representation of the key used for the +// ModuleState.Resources mapping +type ResourceStateKey struct { + Name string + Type string + Mode ResourceMode + Index int +} + +// Equal determines whether two ResourceStateKeys are the same +func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool { + if rsk == nil || other == nil { + return false + } + if rsk.Mode != other.Mode { + return false + } + if rsk.Type != other.Type { + return false + } + if rsk.Name != other.Name { + return false + } + if rsk.Index != other.Index { + return false + } + return true +} + +func (rsk *ResourceStateKey) String() string { + if rsk == nil { + return "" + } + var prefix string + switch rsk.Mode { + case ManagedResourceMode: + prefix = "" + case DataResourceMode: + prefix = "data." + default: + panic(fmt.Errorf("unknown resource mode %s", rsk.Mode)) + } + if rsk.Index == -1 { + return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name) + } + return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index) +} + +// ParseResourceStateKey accepts a key in the format used by +// ModuleState.Resources and returns a resource name and resource index. In the +// state, a resource has the format "type.name.index" or "type.name". In the +// latter case, the index is returned as -1. +func parseResourceStateKey(k string) (*ResourceStateKey, error) { + parts := strings.Split(k, ".") + mode := ManagedResourceMode + if len(parts) > 0 && parts[0] == "data" { + mode = DataResourceMode + // Don't need the constant "data" prefix for parsing + // now that we've figured out the mode. + parts = parts[1:] + } + if len(parts) < 2 || len(parts) > 3 { + return nil, fmt.Errorf("Malformed resource state key: %s", k) + } + rsk := &ResourceStateKey{ + Mode: mode, + Type: parts[0], + Name: parts[1], + Index: -1, + } + if len(parts) == 3 { + index, err := strconv.Atoi(parts[2]) + if err != nil { + return nil, fmt.Errorf("Malformed resource state key index: %s", k) + } + rsk.Index = index + } + return rsk, nil +} + +// ResourceState holds the state of a resource that is used so that +// a provider can find and manage an existing resource as well as for +// storing attributes that are used to populate variables of child +// resources. +// +// Attributes has attributes about the created resource that are +// queryable in interpolation: "${type.id.attr}" +// +// Extra is just extra data that a provider can return that we store +// for later, but is not exposed in any way to the user. +// +type ResourceState struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *InstanceState `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*InstanceState `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +func (s *ResourceState) Lock() { s.mu.Lock() } +func (s *ResourceState) Unlock() { s.mu.Unlock() } + +// Equal tests whether two ResourceStates are equal. +func (s *ResourceState) Equal(other *ResourceState) bool { + s.Lock() + defer s.Unlock() + + if s.Type != other.Type { + return false + } + + if s.Provider != other.Provider { + return false + } + + // Dependencies must be equal + sort.Strings(s.Dependencies) + sort.Strings(other.Dependencies) + if len(s.Dependencies) != len(other.Dependencies) { + return false + } + for i, d := range s.Dependencies { + if other.Dependencies[i] != d { + return false + } + } + + // States must be equal + if !s.Primary.Equal(other.Primary) { + return false + } + + return true +} + +// Taint marks a resource as tainted. +func (s *ResourceState) Taint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = true + } +} + +// Untaint unmarks a resource as tainted. +func (s *ResourceState) Untaint() { + s.Lock() + defer s.Unlock() + + if s.Primary != nil { + s.Primary.Tainted = false + } +} + +func (s *ResourceState) init() { + s.Lock() + defer s.Unlock() + + if s.Primary == nil { + s.Primary = &InstanceState{} + } + s.Primary.init() + + if s.Dependencies == nil { + s.Dependencies = []string{} + } + + if s.Deposed == nil { + s.Deposed = make([]*InstanceState, 0) + } +} + +// prune is used to remove any instances that are no longer required +func (s *ResourceState) prune() { + s.Lock() + defer s.Unlock() + + n := len(s.Deposed) + for i := 0; i < n; i++ { + inst := s.Deposed[i] + if inst == nil || inst.ID == "" { + copy(s.Deposed[i:], s.Deposed[i+1:]) + s.Deposed[n-1] = nil + n-- + i-- + } + } + s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) +} + +func (s *ResourceState) sort() { + s.Lock() + defer s.Unlock() + + sort.Strings(s.Dependencies) +} + +func (s *ResourceState) String() string { + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("Type = %s", s.Type)) + return buf.String() +} + +// InstanceState is used to track the unique state information belonging +// to a given instance. +type InstanceState struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Ephemeral is used to store any state associated with this instance + // that is necessary for the Terraform run to complete, but is not + // persisted to a state file. + Ephemeral EphemeralState `json:"-"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + ProviderMeta cty.Value + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` + + mu sync.Mutex +} + +func (s *InstanceState) Lock() { s.mu.Lock() } +func (s *InstanceState) Unlock() { s.mu.Unlock() } + +func (s *InstanceState) init() { + s.Lock() + defer s.Unlock() + + if s.Attributes == nil { + s.Attributes = make(map[string]string) + } + if s.Meta == nil { + s.Meta = make(map[string]interface{}) + } + s.Ephemeral.init() +} + +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + +// Copy all the Fields from another InstanceState +func (s *InstanceState) Set(from *InstanceState) { + s.Lock() + defer s.Unlock() + + from.Lock() + defer from.Unlock() + + s.ID = from.ID + s.Attributes = from.Attributes + s.Ephemeral = from.Ephemeral + s.Meta = from.Meta + s.Tainted = from.Tainted +} + +func (s *InstanceState) DeepCopy() *InstanceState { + copy, err := copystructure.Config{Lock: true}.Copy(s) + if err != nil { + panic(err) + } + + return copy.(*InstanceState) +} + +func (s *InstanceState) Empty() bool { + if s == nil { + return true + } + s.Lock() + defer s.Unlock() + + return s.ID == "" +} + +func (s *InstanceState) Equal(other *InstanceState) bool { + // Short circuit some nil checks + if s == nil || other == nil { + return s == other + } + s.Lock() + defer s.Unlock() + + // IDs must be equal + if s.ID != other.ID { + return false + } + + // Attributes must be equal + if len(s.Attributes) != len(other.Attributes) { + return false + } + for k, v := range s.Attributes { + otherV, ok := other.Attributes[k] + if !ok { + return false + } + + if v != otherV { + return false + } + } + + // Meta must be equal + if len(s.Meta) != len(other.Meta) { + return false + } + if s.Meta != nil && other.Meta != nil { + // We only do the deep check if both are non-nil. If one is nil + // we treat it as equal since their lengths are both zero (check + // above). + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { + return false + } + } + + if s.Tainted != other.Tainted { + return false + } + + return true +} + +// MergeDiff takes a ResourceDiff and merges the attributes into +// this resource state in order to generate a new state. This new +// state can be used to provide updated attribute lookups for +// variable interpolation. +// +// If the diff attribute requires computing the value, and hence +// won't be available until apply, the value is replaced with the +// computeID. +func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { + result := s.DeepCopy() + if result == nil { + result = new(InstanceState) + } + result.init() + + if s != nil { + s.Lock() + defer s.Unlock() + for k, v := range s.Attributes { + result.Attributes[k] = v + } + } + if d != nil { + for k, diff := range d.CopyAttributes() { + if diff.NewRemoved { + delete(result.Attributes, k) + continue + } + if diff.NewComputed { + result.Attributes[k] = hcl2shim.UnknownVariableValue + continue + } + + result.Attributes[k] = diff.New + } + } + + return result +} + +func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + + s.Lock() + defer s.Unlock() + + var buf bytes.Buffer + + if s.ID == "" { + return notCreated + } + + buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) + + attributes := s.Attributes + attrKeys := make([]string, 0, len(attributes)) + for ak := range attributes { + if ak == "id" { + continue + } + + attrKeys = append(attrKeys, ak) + } + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av)) + } + + buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted)) + + return buf.String() +} + +// EphemeralState is used for transient state that is only kept in-memory +type EphemeralState struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` + + // Type is used to specify the resource type for this instance. This is only + // required for import operations (as documented). If the documentation + // doesn't state that you need to set this, then don't worry about + // setting it. + Type string `json:"-"` +} + +func (e *EphemeralState) init() { + if e.ConnInfo == nil { + e.ConnInfo = make(map[string]string) + } +} + +// resourceNameSort implements the sort.Interface to sort name parts lexically for +// strings and numerically for integer indexes. +type resourceNameSort []string + +func (r resourceNameSort) Len() int { return len(r) } +func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] } + +func (r resourceNameSort) Less(i, j int) bool { + iParts := strings.Split(r[i], ".") + jParts := strings.Split(r[j], ".") + + end := len(iParts) + if len(jParts) < end { + end = len(jParts) + } + + for idx := 0; idx < end; idx++ { + if iParts[idx] == jParts[idx] { + continue + } + + // sort on the first non-matching part + iInt, iIntErr := strconv.Atoi(iParts[idx]) + jInt, jIntErr := strconv.Atoi(jParts[idx]) + + switch { + case iIntErr == nil && jIntErr == nil: + // sort numerically if both parts are integers + return iInt < jInt + case iIntErr == nil: + // numbers sort before strings + return true + case jIntErr == nil: + return false + default: + return iParts[idx] < jParts[idx] + } + } + + return r[i] < r[j] +} + +// moduleStateSort implements sort.Interface to sort module states +type moduleStateSort []*ModuleState + +func (s moduleStateSort) Len() int { + return len(s) +} + +func (s moduleStateSort) Less(i, j int) bool { + a := s[i] + b := s[j] + + // If either is nil, then the nil one is "less" than + if a == nil || b == nil { + return a == nil + } + + // If the lengths are different, then the shorter one always wins + if len(a.Path) != len(b.Path) { + return len(a.Path) < len(b.Path) + } + + // Otherwise, compare lexically + return strings.Join(a.Path, ".") < strings.Join(b.Path, ".") +} + +func (s moduleStateSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +const stateValidateErrMultiModule = ` +Multiple modules with the same path: %s + +This means that there are multiple entries in the "modules" field +in your state file that point to the same module. This will cause Terraform +to behave in unexpected and error prone ways and is invalid. Please back up +and modify your state file manually to resolve this. +` diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go new file mode 100644 index 000000000..01d039272 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/state_filter.go @@ -0,0 +1,267 @@ +package terraform + +import ( + "fmt" + "sort" +) + +// stateFilter is responsible for filtering and searching a state. +// +// This is a separate struct from State rather than a method on State +// because StateFilter might create sidecar data structures to optimize +// filtering on the state. +// +// If you change the State, the filter created is invalid and either +// Reset should be called or a new one should be allocated. StateFilter +// will not watch State for changes and do this for you. If you filter after +// changing the State without calling Reset, the behavior is not defined. +type stateFilter struct { + State *State +} + +// Filter takes the addresses specified by fs and finds all the matches. +// The values of fs are resource addressing syntax that can be parsed by +// parseResourceAddress. +func (f *stateFilter) filter(fs ...string) ([]*stateFilterResult, error) { + // Parse all the addresses + as := make([]*resourceAddress, len(fs)) + for i, v := range fs { + a, err := parseResourceAddress(v) + if err != nil { + return nil, fmt.Errorf("Error parsing address '%s': %s", v, err) + } + + as[i] = a + } + + // If we weren't given any filters, then we list all + if len(fs) == 0 { + as = append(as, &resourceAddress{Index: -1}) + } + + // Filter each of the address. We keep track of this in a map to + // strip duplicates. + resultSet := make(map[string]*stateFilterResult) + for _, a := range as { + for _, r := range f.filterSingle(a) { + resultSet[r.String()] = r + } + } + + // Make the result list + results := make([]*stateFilterResult, 0, len(resultSet)) + for _, v := range resultSet { + results = append(results, v) + } + + // Sort them and return + sort.Sort(stateFilterResultSlice(results)) + return results, nil +} + +func (f *stateFilter) filterSingle(a *resourceAddress) []*stateFilterResult { + // The slice to keep track of results + var results []*stateFilterResult + + // Go through modules first. + modules := make([]*ModuleState, 0, len(f.State.Modules)) + for _, m := range f.State.Modules { + if f.relevant(a, m) { + modules = append(modules, m) + + // Only add the module to the results if we haven't specified a type. + // We also ignore the root module. + if a.Type == "" && len(m.Path) > 1 { + results = append(results, &stateFilterResult{ + Path: m.Path[1:], + Address: (&resourceAddress{Path: m.Path[1:]}).String(), + Value: m, + }) + } + } + } + + // With the modules set, go through all the resources within + // the modules to find relevant resources. + for _, m := range modules { + for n, r := range m.Resources { + // The name in the state contains valuable information. Parse. + key, err := parseResourceStateKey(n) + if err != nil { + // If we get an error parsing, then just ignore it + // out of the state. + continue + } + + // Older states and test fixtures often don't contain the + // type directly on the ResourceState. We add this so StateFilter + // is a bit more robust. + if r.Type == "" { + r.Type = key.Type + } + + if f.relevant(a, r) { + if a.Name != "" && a.Name != key.Name { + // Name doesn't match + continue + } + + if a.Index >= 0 && key.Index != a.Index { + // Index doesn't match + continue + } + + if a.Name != "" && a.Name != key.Name { + continue + } + + // Build the address for this resource + addr := &resourceAddress{ + Path: m.Path[1:], + Name: key.Name, + Type: key.Type, + Index: key.Index, + } + + // Add the resource level result + resourceResult := &stateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Value: r, + } + if !a.InstanceTypeSet { + results = append(results, resourceResult) + } + + // Add the instances + if r.Primary != nil { + addr.InstanceType = typePrimary + addr.InstanceTypeSet = false + results = append(results, &stateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: r.Primary, + }) + } + + for _, instance := range r.Deposed { + if f.relevant(a, instance) { + addr.InstanceType = typeDeposed + addr.InstanceTypeSet = true + results = append(results, &stateFilterResult{ + Path: addr.Path, + Address: addr.String(), + Parent: resourceResult, + Value: instance, + }) + } + } + } + } + } + + return results +} + +// relevant checks for relevance of this address against the given value. +func (f *stateFilter) relevant(addr *resourceAddress, raw interface{}) bool { + switch v := raw.(type) { + case *ModuleState: + path := v.Path[1:] + + if len(addr.Path) > len(path) { + // Longer path in address means there is no way we match. + return false + } + + // Check for a prefix match + for i, p := range addr.Path { + if path[i] != p { + // Any mismatches don't match. + return false + } + } + + return true + case *ResourceState: + if addr.Type == "" { + // If we have no resource type, then we're interested in all! + return true + } + + // If the type doesn't match we fail immediately + if v.Type != addr.Type { + return false + } + + return true + default: + // If we don't know about it, let's just say no + return false + } +} + +// stateFilterResult is a single result from a filter operation. Filter +// can match multiple things within a state (module, resource, instance, etc.) +// and this unifies that. +type stateFilterResult struct { + // Module path of the result + Path []string + + // Address is the address that can be used to reference this exact result. + Address string + + // Parent, if non-nil, is a parent of this result. For instances, the + // parent would be a resource. For resources, the parent would be + // a module. For modules, this is currently nil. + Parent *stateFilterResult + + // Value is the actual value. This must be type switched on. It can be + // any data structures that `State` can hold: `ModuleState`, + // `ResourceState`, `InstanceState`. + Value interface{} +} + +func (r *stateFilterResult) String() string { + return fmt.Sprintf("%T: %s", r.Value, r.Address) +} + +func (r *stateFilterResult) sortedType() int { + switch r.Value.(type) { + case *ModuleState: + return 0 + case *ResourceState: + return 1 + case *InstanceState: + return 2 + default: + return 50 + } +} + +// stateFilterResultSlice is a slice of results that implements +// sort.Interface. The sorting goal is what is most appealing to +// human output. +type stateFilterResultSlice []*stateFilterResult + +func (s stateFilterResultSlice) Len() int { return len(s) } +func (s stateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s stateFilterResultSlice) Less(i, j int) bool { + a, b := s[i], s[j] + + // if these address contain an index, we want to sort by index rather than name + addrA, errA := parseResourceAddress(a.Address) + addrB, errB := parseResourceAddress(b.Address) + if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index { + return addrA.Index < addrB.Index + } + + // If the addresses are different it is just lexographic sorting + if a.Address != b.Address { + return a.Address < b.Address + } + + // Addresses are the same, which means it matters on the type + return a.sortedType() < b.sortedType() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go new file mode 100644 index 000000000..01ac810f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/terraform/util.go @@ -0,0 +1,22 @@ +package terraform + +import ( + "sort" +) + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go b/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go index b5051cf2b..d2f118e15 100644 --- a/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go +++ b/vendor/github.com/hashicorp/terraform-plugin-test/terraform.go @@ -4,14 +4,13 @@ import ( "bytes" "encoding/json" "fmt" + getter "github.com/hashicorp/go-getter" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" - - getter "github.com/hashicorp/go-getter" ) const releaseHost = "https://releases.hashicorp.com" diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md b/vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md new file mode 100644 index 000000000..b1ed72131 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/CHANGELOG.md @@ -0,0 +1,30 @@ +# 2.0.0 (August 10, 2020) + +FEATURES: + + - Simplified API signatures to reflect no longer needing provider name ([#32](https://github.com/hashicorp/terraform-plugin-test/pull/32)) + - Implement SavedPlanStdout which captures a non-json stdout run of `terraform show` of a planfile ([#34](https://github.com/hashicorp/terraform-plugin-test/pull/34)) + +# 1.4.4 (July 10, 2020) + +BUG FIXES: + + - Fix Windows bug in versions of Terraform below 0.13.0-beta2 ([#30](https://github.com/hashicorp/terraform-plugin-test/pull/30)) + +# 1.4.3 (July 7, 2020) + +DEPENDENCIES: + + - `github.com/hashicorp/go-getter@v1.4.0` ([#29](https://github.com/hashicorp/terraform-plugin-test/pull/29)) + +# 1.4.2 (July 7, 2020) + +DEPENDENCIES: + + - `github.com/hashicorp/terraform-exec@v0.1.1` ([#28](https://github.com/hashicorp/terraform-plugin-test/pull/28)) + +# 1.4.1 (July 7, 2020) + +BUG FIXES: + + - Fix auto-install Terraform feature ([#26](https://github.com/hashicorp/terraform-plugin-test/pull/26)) diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE new file mode 100644 index 000000000..82b4de97c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md b/vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md new file mode 100644 index 000000000..48abe71e9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/README.md @@ -0,0 +1,4 @@ +# Terraform Plugin Test Helper Library + +This is an **experimental** library for testing Terraform plugins in their +natural habitat as child processes of a real `terraform` executable. diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go new file mode 100644 index 000000000..9f72c3e8a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/config.go @@ -0,0 +1,53 @@ +package tftest + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/terraform-exec/tfinstall" +) + +// Config is used to configure the test helper. In most normal test programs +// the configuration is discovered automatically by an Init* function using +// DiscoverConfig, but this is exposed so that more complex scenarios can be +// implemented by direct configuration. +type Config struct { + SourceDir string + TerraformExec string + execTempDir string + PreviousPluginExec string +} + +// DiscoverConfig uses environment variables and other means to automatically +// discover a reasonable test helper configuration. +func DiscoverConfig(sourceDir string) (*Config, error) { + tfVersion := os.Getenv("TF_ACC_TERRAFORM_VERSION") + tfPath := os.Getenv("TF_ACC_TERRAFORM_PATH") + + tempDir := os.Getenv("TF_ACC_TEMP_DIR") + tfDir, err := ioutil.TempDir(tempDir, "tftest-terraform") + if err != nil { + return nil, fmt.Errorf("failed to create temp dir: %w", err) + } + + finders := []tfinstall.ExecPathFinder{} + switch { + case tfPath != "": + finders = append(finders, tfinstall.ExactPath(tfPath)) + case tfVersion != "": + finders = append(finders, tfinstall.ExactVersion(tfVersion, tfDir)) + default: + finders = append(finders, tfinstall.LookPath(), tfinstall.LatestVersion(tfDir, true)) + } + tfExec, err := tfinstall.Find(finders...) + if err != nil { + return nil, err + } + + return &Config{ + SourceDir: sourceDir, + TerraformExec: tfExec, + execTempDir: tfDir, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go new file mode 100644 index 000000000..3b120c679 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/doc.go @@ -0,0 +1,7 @@ +// Package tftest contains utilities to help with writing tests for +// Terraform plugins. +// +// This is not a package for testing configurations or modules written in the +// Terraform language. It is for testing the plugins that allow Terraform to +// manage various cloud services and other APIs. +package tftest diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/go.mod b/vendor/github.com/hashicorp/terraform-plugin-test/v2/go.mod new file mode 100644 index 000000000..857517efb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/go.mod @@ -0,0 +1,9 @@ +module github.com/hashicorp/terraform-plugin-test/v2 + +go 1.12 + +require ( + github.com/hashicorp/go-getter v1.4.0 + github.com/hashicorp/terraform-exec v0.3.0 + github.com/hashicorp/terraform-json v0.5.0 +) diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/go.sum b/vendor/github.com/hashicorp/terraform-plugin-test/v2/go.sum new file mode 100644 index 000000000..3151061c0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/go.sum @@ -0,0 +1,177 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-getter v1.4.0 h1:ENHNi8494porjD0ZhIrjlAHnveSFhY7hvOJrV/fsKkw= +github.com/hashicorp/go-getter v1.4.0/go.mod h1:7qxyCd8rBfcShwsvxgIguu4KbS3l8bUCwg2Umn7RjeY= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/terraform-exec v0.3.0 h1:5WLBsnv9BoEUGlHJZETROZZxw+qO3/TFQEh6JMP2uaY= +github.com/hashicorp/terraform-exec v0.3.0/go.mod h1:yKWvMPtkTaHpeAmllw+1qdHZ7E5u+pAZ+x8e2jQF6gM= +github.com/hashicorp/terraform-json v0.5.0 h1:7TV3/F3y7QVSuN4r9BEXqnWqrAyeOtON8f0wvREtyzs= +github.com/hashicorp/terraform-json v0.5.0/go.mod h1:eAbqb4w0pSlRmdvl8fOyHAi/+8jnkVYN28gJkSJrLhU= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/zclconf/go-cty v1.2.1 h1:vGMsygfmeCl4Xb6OA5U5XVAaQZ69FvoG7X2jUtQujb8= +github.com/zclconf/go-cty v1.2.1/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go new file mode 100644 index 000000000..819937b38 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/guard.go @@ -0,0 +1,94 @@ +package tftest + +import ( + "fmt" + "os" + "testing" +) + +// AcceptanceTest is a test guard that will produce a log and call SkipNow on +// the given TestControl if the environment variable TF_ACC isn't set to +// indicate that the caller wants to run acceptance tests. +// +// Call this immediately at the start of each acceptance test function to +// signal that it may cost money and thus requires this opt-in enviromment +// variable. +// +// For the purpose of this function, an "acceptance test" is any est that +// reaches out to services that are not directly controlled by the test program +// itself, particularly if those requests may lead to service charges. For any +// system where it is possible and realistic to run a local instance of the +// service for testing (e.g. in a daemon launched by the test program itself), +// prefer to do this and _don't_ call AcceptanceTest, thus allowing tests to be +// run more easily and without external cost by contributors. +func AcceptanceTest(t TestControl) { + t.Helper() + if os.Getenv("TF_ACC") != "" { + t.Log("TF_ACC is not set") + t.SkipNow() + } +} + +// LongTest is a test guard that will produce a log and call SkipNow on the +// given TestControl if the test harness is currently running in "short mode". +// +// What is considered a "long test" will always be pretty subjective, but test +// implementers should think of this in terms of what seems like it'd be +// inconvenient to run repeatedly for quick feedback while testing a new feature +// under development. +// +// When testing resource types that always take several minutes to complete +// operations, consider having a single general test that covers the basic +// functionality and then mark any other more specific tests as long tests so +// that developers can quickly smoke-test a particular feature when needed +// but can still run the full set of tests for a feature when needed. +func LongTest(t TestControl) { + t.Helper() + if testing.Short() { + t.Log("skipping long test because of short mode") + t.SkipNow() + } +} + +// TestControl is an interface requiring a subset of *testing.T which is used +// by the test guards and helpers in this package. Most callers can simply +// pass their *testing.T value here, but the interface allows other +// implementations to potentially be provided instead, for example to allow +// meta-testing (testing of the test utilities themselves). +// +// This interface also describes the subset of normal test functionality the +// guards and helpers can perform: they can only create log lines, fail tests, +// and skip tests. All other test control is the responsibility of the main +// test code. +type TestControl interface { + Helper() + Log(args ...interface{}) + FailNow() + SkipNow() +} + +// testingT wraps a TestControl to recover some of the convenience behaviors +// that would normally come from a real *testing.T, so we can keep TestControl +// small while still having these conveniences. This is an abstraction +// inversion, but accepted because it makes the public API more convenient +// without any considerable disadvantage. +type testingT struct { + TestControl +} + +func (t testingT) Logf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) +} + +func (t testingT) Fatalf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) + t.FailNow() +} + +func (t testingT) Skipf(f string, args ...interface{}) { + t.Helper() + t.Log(fmt.Sprintf(f, args...)) + t.SkipNow() +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go new file mode 100644 index 000000000..a9878899a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/helper.go @@ -0,0 +1,222 @@ +package tftest + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + getter "github.com/hashicorp/go-getter" +) + +const subprocessCurrentSigil = "4acd63807899403ca4859f5bb948d2c6" +const subprocessPreviousSigil = "2279afb8cf71423996be1fd65d32f13b" + +// AutoInitProviderHelper is the main entrypoint for testing provider plugins +// using this package. It is intended to be called during TestMain to prepare +// for provider testing. +// +// AutoInitProviderHelper will discover the location of a current Terraform CLI +// executable to test against, detect whether a prior version of the plugin is +// available for upgrade tests, and then will return an object containing the +// results of that initialization which can then be stored in a global variable +// for use in other tests. +func AutoInitProviderHelper(sourceDir string) *Helper { + helper, err := AutoInitHelper(sourceDir) + if err != nil { + fmt.Fprintf(os.Stderr, "cannot run Terraform provider tests: %s\n", err) + os.Exit(1) + } + return helper +} + +// Helper is intended as a per-package singleton created in TestMain which +// other tests in a package can use to create Terraform execution contexts +type Helper struct { + baseDir string + + // sourceDir is the dir containing the provider source code, needed + // for tests that use fixture files. + sourceDir string + terraformExec string + + // execTempDir is created during DiscoverConfig to store any downloaded + // binaries + execTempDir string +} + +// AutoInitHelper uses the auto-discovery behavior of DiscoverConfig to prepare +// a configuration and then calls InitHelper with it. This is a convenient +// way to get the standard init behavior based on environment variables, and +// callers should use this unless they have an unusual requirement that calls +// for constructing a config in a different way. +func AutoInitHelper(sourceDir string) (*Helper, error) { + config, err := DiscoverConfig(sourceDir) + if err != nil { + return nil, err + } + + return InitHelper(config) +} + +// InitHelper prepares a testing helper with the given configuration. +// +// For most callers it is sufficient to call AutoInitHelper instead, which +// will construct a configuration automatically based on certain environment +// variables. +// +// If this function returns an error then it may have left some temporary files +// behind in the system's temporary directory. There is currently no way to +// automatically clean those up. +func InitHelper(config *Config) (*Helper, error) { + tempDir := os.Getenv("TF_ACC_TEMP_DIR") + baseDir, err := ioutil.TempDir(tempDir, "tftest") + if err != nil { + return nil, fmt.Errorf("failed to create temporary directory for test helper: %s", err) + } + + return &Helper{ + baseDir: baseDir, + sourceDir: config.SourceDir, + terraformExec: config.TerraformExec, + execTempDir: config.execTempDir, + }, nil +} + +// symlinkAuxiliaryProviders discovers auxiliary provider binaries, used in +// multi-provider tests, and symlinks them to the plugin directory. +// +// Auxiliary provider binaries should be included in the provider source code +// directory, under the path terraform.d/plugins/$GOOS_$GOARCH/provider-name. +// +// The environment variable TF_ACC_PROVIDER_ROOT_DIR must be set to the path of +// the provider source code directory root in order to use this feature. +func symlinkAuxiliaryProviders(pluginDir string) error { + providerRootDir := os.Getenv("TF_ACC_PROVIDER_ROOT_DIR") + if providerRootDir == "" { + // common case; assume intentional and do not log + return nil + } + + _, err := os.Stat(filepath.Join(providerRootDir, "terraform.d", "plugins")) + if os.IsNotExist(err) { + fmt.Printf("No terraform.d/plugins directory found: continuing. Unset TF_ACC_PROVIDER_ROOT_DIR or supply provider binaries in terraform.d/plugins/$GOOS_$GOARCH to disable this message.") + return nil + } else if err != nil { + return fmt.Errorf("Unexpected error: %s", err) + } + + auxiliaryProviderDir := filepath.Join(providerRootDir, "terraform.d", "plugins", runtime.GOOS+"_"+runtime.GOARCH) + + // If we can't os.Stat() terraform.d/plugins/$GOOS_$GOARCH, however, + // assume the omission was unintentional, and error. + _, err = os.Stat(auxiliaryProviderDir) + if os.IsNotExist(err) { + return fmt.Errorf("error finding auxiliary provider dir %s: %s", auxiliaryProviderDir, err) + } else if err != nil { + return fmt.Errorf("Unexpected error: %s", err) + } + + // now find all the providers in that dir and symlink them to the plugin dir + providers, err := ioutil.ReadDir(auxiliaryProviderDir) + if err != nil { + return fmt.Errorf("error reading auxiliary providers: %s", err) + } + + zipDecompressor := new(getter.ZipDecompressor) + + for _, provider := range providers { + filename := provider.Name() + filenameExt := filepath.Ext(filename) + name := strings.TrimSuffix(filename, filenameExt) + path := filepath.Join(auxiliaryProviderDir, name) + symlinkPath := filepath.Join(pluginDir, name) + + // exit early if we have already symlinked this provider + _, err := os.Stat(symlinkPath) + if err == nil { + continue + } + + // if filename ends in .zip, assume it is a zip and extract it + // otherwise assume it is a provider binary + if filenameExt == ".zip" { + _, err = os.Stat(path) + if os.IsNotExist(err) { + zipDecompressor.Decompress(path, filepath.Join(auxiliaryProviderDir, filename), false) + } else if err != nil { + return fmt.Errorf("Unexpected error: %s", err) + } + } + + err = symlinkFile(path, symlinkPath) + if err != nil { + return fmt.Errorf("error symlinking auxiliary provider %s: %s", name, err) + } + } + + return nil +} + +// Close cleans up temporary files and directories created to support this +// helper, returning an error if any of the cleanup fails. +// +// Call this before returning from TestMain to minimize the amount of detritus +// left behind in the filesystem after the tests complete. +func (h *Helper) Close() error { + if h.execTempDir != "" { + err := os.RemoveAll(h.execTempDir) + if err != nil { + return err + } + } + return os.RemoveAll(h.baseDir) +} + +// NewWorkingDir creates a new working directory for use in the implementation +// of a single test, returning a WorkingDir object representing that directory. +// +// If the working directory object is not itself closed by the time the test +// program exits, the Close method on the helper itself will attempt to +// delete it. +func (h *Helper) NewWorkingDir() (*WorkingDir, error) { + dir, err := ioutil.TempDir(h.baseDir, "work") + if err != nil { + return nil, err + } + + // symlink the provider source files into the base directory + err = symlinkDirectoriesOnly(h.sourceDir, dir) + if err != nil { + return nil, err + } + + return &WorkingDir{ + h: h, + baseArgs: []string{"-no-color"}, + baseDir: dir, + }, nil +} + +// RequireNewWorkingDir is a variant of NewWorkingDir that takes a TestControl +// object and will immediately fail the running test if the creation of the +// working directory fails. +func (h *Helper) RequireNewWorkingDir(t TestControl) *WorkingDir { + t.Helper() + + wd, err := h.NewWorkingDir() + if err != nil { + t := testingT{t} + t.Fatalf("failed to create new working directory: %s", err) + return nil + } + return wd +} + +// TerraformExecPath returns the location of the Terraform CLI executable that +// should be used when running tests. +func (h *Helper) TerraformExecPath() string { + return h.terraformExec +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go new file mode 100644 index 000000000..4764c6b4c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/plugin.go @@ -0,0 +1,15 @@ +package tftest + +import ( + "os" +) + +// RunningAsPlugin returns true if it detects the usual Terraform plugin +// detection environment variables, suggesting that the current process is +// being launched as a plugin server. +func RunningAsPlugin() bool { + const cookieVar = "TF_PLUGIN_MAGIC_COOKIE" + const cookieVal = "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2" + + return os.Getenv(cookieVar) == cookieVal +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/terraform.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/terraform.go new file mode 100644 index 000000000..124c9e44c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/terraform.go @@ -0,0 +1,106 @@ +package tftest + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "strings" +) + +// getTerraformEnv returns the appropriate Env for the Terraform command. +func (wd *WorkingDir) getTerraformEnv() []string { + var env []string + for _, e := range os.Environ() { + env = append(env, e) + } + + env = append(env, "TF_DISABLE_PLUGIN_TLS=1") + env = append(env, "TF_SKIP_PROVIDER_VERIFY=1") + + // FIXME: Ideally in testing.Verbose mode we'd turn on Terraform DEBUG + // logging, perhaps redirected to a separate fd other than stderr to avoid + // polluting it, and then propagate the log lines out into t.Log so that + // they are visible to the person running the test. Currently though, + // Terraform CLI is able to send logs only to either an on-disk file or + // to stderr. + env = append(env, "TF_LOG=") // so logging can't pollute our stderr output + env = append(env, "TF_INPUT=0") + + // don't propagate the magic cookie + env = append(env, "TF_PLUGIN_MAGIC_COOKIE=") + + if p := os.Getenv("TF_ACC_LOG_PATH"); p != "" { + env = append(env, "TF_LOG=TRACE") + env = append(env, "TF_LOG_PATH="+p) + } + + for k, v := range wd.env { + env = append(env, k+"="+v) + } + return env +} + +// runTerraform runs the configured Terraform CLI executable with the given +// arguments, returning an error if it produces a non-successful exit status. +// if captureStdout is non-nil, the process will write it's stdout to the +// provided io.Writer +func (wd *WorkingDir) runTerraform(captureStdout io.Writer, args ...string) error { + allArgs := []string{"terraform"} + allArgs = append(allArgs, args...) + + env := wd.getTerraformEnv() + + var errBuf strings.Builder + + cmd := &exec.Cmd{ + Path: wd.h.TerraformExecPath(), + Args: allArgs, + Dir: wd.baseDir, + Stderr: &errBuf, + Env: env, + } + + if captureStdout != nil { + cmd.Stdout = captureStdout + } + + err := cmd.Run() + if tErr, ok := err.(*exec.ExitError); ok { + err = fmt.Errorf("terraform failed: %s\n\nstderr:\n%s", tErr.ProcessState.String(), errBuf.String()) + } + return err +} + +// runTerraformJSON runs the configured Terraform CLI executable with the given +// arguments and tries to decode its stdout into the given target value (which +// must be a non-nil pointer) as JSON. +func (wd *WorkingDir) runTerraformJSON(target interface{}, args ...string) error { + allArgs := []string{"terraform"} + allArgs = append(allArgs, args...) + + env := wd.getTerraformEnv() + + var outBuf bytes.Buffer + var errBuf strings.Builder + + cmd := &exec.Cmd{ + Path: wd.h.TerraformExecPath(), + Args: allArgs, + Dir: wd.baseDir, + Stderr: &errBuf, + Stdout: &outBuf, + Env: env, + } + err := cmd.Run() + if err != nil { + if tErr, ok := err.(*exec.ExitError); ok { + err = fmt.Errorf("terraform failed: %s\n\nstderr:\n%s", tErr.ProcessState.String(), errBuf.String()) + } + return err + } + + return json.Unmarshal(outBuf.Bytes(), target) +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go new file mode 100644 index 000000000..57bc84f2d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/util.go @@ -0,0 +1,95 @@ +package tftest + +import ( + "os" + "path/filepath" +) + +func symlinkFile(src string, dest string) (err error) { + err = os.Symlink(src, dest) + if err == nil { + srcInfo, err := os.Stat(src) + if err != nil { + err = os.Chmod(dest, srcInfo.Mode()) + } + } + + return +} + +// symlinkDir is a simplistic function for recursively symlinking all files in a directory to a new path. +// It is intended only for limited internal use and does not cover all edge cases. +func symlinkDir(srcDir string, destDir string) (err error) { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return err + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return err + } + + directory, _ := os.Open(srcDir) + defer directory.Close() + objects, err := directory.Readdir(-1) + + for _, obj := range objects { + srcPath := filepath.Join(srcDir, obj.Name()) + destPath := filepath.Join(destDir, obj.Name()) + + if obj.IsDir() { + err = symlinkDir(srcPath, destPath) + if err != nil { + return err + } + } else { + err = symlinkFile(srcPath, destPath) + if err != nil { + return err + } + } + + } + return +} + +// symlinkDirectoriesOnly finds only the first-level child directories in srcDir +// and symlinks them into destDir. +// Unlike symlinkDir, this is done non-recursively in order to limit the number +// of file descriptors used. +func symlinkDirectoriesOnly(srcDir string, destDir string) (err error) { + srcInfo, err := os.Stat(srcDir) + if err != nil { + return err + } + + err = os.MkdirAll(destDir, srcInfo.Mode()) + if err != nil { + return err + } + + directory, err := os.Open(srcDir) + if err != nil { + return err + } + defer directory.Close() + objects, err := directory.Readdir(-1) + if err != nil { + return err + } + + for _, obj := range objects { + srcPath := filepath.Join(srcDir, obj.Name()) + destPath := filepath.Join(destDir, obj.Name()) + + if obj.IsDir() { + err = symlinkFile(srcPath, destPath) + if err != nil { + return err + } + } + + } + return +} diff --git a/vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go b/vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go new file mode 100644 index 000000000..e91792359 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-plugin-test/v2/working_dir.go @@ -0,0 +1,426 @@ +package tftest + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + tfjson "github.com/hashicorp/terraform-json" +) + +// WorkingDir represents a distinct working directory that can be used for +// running tests. Each test should construct its own WorkingDir by calling +// NewWorkingDir or RequireNewWorkingDir on its package's singleton +// tftest.Helper. +type WorkingDir struct { + h *Helper + + // baseDir is the root of the working directory tree + baseDir string + + // baseArgs is arguments that should be appended to all commands + baseArgs []string + + // configDir contains the singular config file generated for each test + configDir string + + env map[string]string +} + +// Close deletes the directories and files created to represent the receiving +// working directory. After this method is called, the working directory object +// is invalid and may no longer be used. +func (wd *WorkingDir) Close() error { + return os.RemoveAll(wd.baseDir) +} + +// Setenv sets an environment variable on the WorkingDir. +func (wd *WorkingDir) Setenv(envVar, val string) { + if wd.env == nil { + wd.env = map[string]string{} + } + wd.env[envVar] = val +} + +// Unsetenv removes an environment variable from the WorkingDir. +func (wd *WorkingDir) Unsetenv(envVar string) { + delete(wd.env, envVar) +} + +// GetHelper returns the Helper set on the WorkingDir. +func (wd *WorkingDir) GetHelper() *Helper { + return wd.h +} + +func (wd *WorkingDir) relativeConfigDir() (string, error) { + relPath, err := filepath.Rel(wd.baseDir, wd.configDir) + if err != nil { + return "", fmt.Errorf("Error determining relative path of configuration directory: %w", err) + } + return relPath, nil +} + +// SetConfig sets a new configuration for the working directory. +// +// This must be called at least once before any call to Init, Plan, Apply, or +// Destroy to establish the configuration. Any previously-set configuration is +// discarded and any saved plan is cleared. +func (wd *WorkingDir) SetConfig(cfg string) error { + // Each call to SetConfig creates a new directory under our baseDir. + // We create them within so that our final cleanup step will delete them + // automatically without any additional tracking. + configDir, err := ioutil.TempDir(wd.baseDir, "config") + if err != nil { + return err + } + configFilename := filepath.Join(configDir, "terraform_plugin_test.tf") + err = ioutil.WriteFile(configFilename, []byte(cfg), 0700) + if err != nil { + return err + } + + wd.configDir = configDir + + // Changing configuration invalidates any saved plan. + err = wd.ClearPlan() + if err != nil { + return err + } + return nil +} + +// RequireSetConfig is a variant of SetConfig that will fail the test via the +// given TestControl if the configuration cannot be set. +func (wd *WorkingDir) RequireSetConfig(t TestControl, cfg string) { + t.Helper() + if err := wd.SetConfig(cfg); err != nil { + t := testingT{t} + t.Fatalf("failed to set config: %s", err) + } +} + +// ClearState deletes any Terraform state present in the working directory. +// +// Any remote objects tracked by the state are not destroyed first, so this +// will leave them dangling in the remote system. +func (wd *WorkingDir) ClearState() error { + err := os.Remove(filepath.Join(wd.baseDir, "terraform.tfstate")) + if os.IsNotExist(err) { + return nil + } + return err +} + +// RequireClearState is a variant of ClearState that will fail the test via the +// given TestControl if the state cannot be cleared. +func (wd *WorkingDir) RequireClearState(t TestControl) { + t.Helper() + if err := wd.ClearState(); err != nil { + t := testingT{t} + t.Fatalf("failed to clear state: %s", err) + } +} + +// ClearPlan deletes any saved plan present in the working directory. +func (wd *WorkingDir) ClearPlan() error { + err := os.Remove(wd.planFilename()) + if os.IsNotExist(err) { + return nil + } + return err +} + +// RequireClearPlan is a variant of ClearPlan that will fail the test via the +// given TestControl if the plan cannot be cleared. +func (wd *WorkingDir) RequireClearPlan(t TestControl) { + t.Helper() + if err := wd.ClearPlan(); err != nil { + t := testingT{t} + t.Fatalf("failed to clear plan: %s", err) + } +} + +func (wd *WorkingDir) init() error { + args := []string{"init", wd.configDir} + args = append(args, wd.baseArgs...) + return wd.runTerraform(nil, args...) +} + +// Init runs "terraform init" for the given working directory, forcing Terraform +// to use the current version of the plugin under test. +func (wd *WorkingDir) Init() error { + if wd.configDir == "" { + return fmt.Errorf("must call SetConfig before Init") + } + return wd.init() +} + +// RequireInit is a variant of Init that will fail the test via the given +// TestControl if init fails. +func (wd *WorkingDir) RequireInit(t TestControl) { + t.Helper() + if err := wd.Init(); err != nil { + t := testingT{t} + t.Fatalf("init failed: %s", err) + } +} + +func (wd *WorkingDir) planFilename() string { + return filepath.Join(wd.baseDir, "tfplan") +} + +// CreatePlan runs "terraform plan" to create a saved plan file, which if successful +// will then be used for the next call to Apply. +func (wd *WorkingDir) CreatePlan() error { + args := []string{"plan", "-refresh=false"} + args = append(args, wd.baseArgs...) + args = append(args, "-out=tfplan", wd.configDir) + return wd.runTerraform(nil, args...) +} + +// RequireCreatePlan is a variant of CreatePlan that will fail the test via +// the given TestControl if plan creation fails. +func (wd *WorkingDir) RequireCreatePlan(t TestControl) { + t.Helper() + if err := wd.CreatePlan(); err != nil { + t := testingT{t} + t.Fatalf("failed to create plan: %s", err) + } +} + +// Apply runs "terraform apply". If CreatePlan has previously completed +// successfully and the saved plan has not been cleared in the meantime then +// this will apply the saved plan. Otherwise, it will implicitly create a new +// plan and apply it. +func (wd *WorkingDir) Apply() error { + args := []string{"apply", "-refresh=false"} + args = append(args, wd.baseArgs...) + + if wd.HasSavedPlan() { + args = append(args, "tfplan") + } else { + // we need to use a relative config dir here or we get an + // error about Terraform not having any configuration. See + // https://github.com/hashicorp/terraform-plugin-sdk/issues/495 + // for more info. + configDir, err := wd.relativeConfigDir() + if err != nil { + return err + } + args = append(args, "-auto-approve") + args = append(args, configDir) + } + + return wd.runTerraform(nil, args...) +} + +// RequireApply is a variant of Apply that will fail the test via +// the given TestControl if the apply operation fails. +func (wd *WorkingDir) RequireApply(t TestControl) { + t.Helper() + if err := wd.Apply(); err != nil { + t := testingT{t} + t.Fatalf("failed to apply: %s", err) + } +} + +// Destroy runs "terraform destroy". It does not consider or modify any saved +// plan, and is primarily for cleaning up at the end of a test run. +// +// If destroy fails then remote objects might still exist, and continue to +// exist after a particular test is concluded. +func (wd *WorkingDir) Destroy() error { + args := []string{"destroy", "-refresh=false"} + args = append(args, wd.baseArgs...) + + args = append(args, "-auto-approve", wd.configDir) + return wd.runTerraform(nil, args...) +} + +// RequireDestroy is a variant of Destroy that will fail the test via +// the given TestControl if the destroy operation fails. +// +// If destroy fails then remote objects might still exist, and continue to +// exist after a particular test is concluded. +func (wd *WorkingDir) RequireDestroy(t TestControl) { + t.Helper() + if err := wd.Destroy(); err != nil { + t := testingT{t} + t.Logf("WARNING: destroy failed, so remote objects may still exist and be subject to billing") + t.Fatalf("failed to destroy: %s", err) + } +} + +// HasSavedPlan returns true if there is a saved plan in the working directory. If +// so, a subsequent call to Apply will apply that saved plan. +func (wd *WorkingDir) HasSavedPlan() bool { + _, err := os.Stat(wd.planFilename()) + return err == nil +} + +// SavedPlan returns an object describing the current saved plan file, if any. +// +// If no plan is saved or if the plan file cannot be read, SavedPlan returns +// an error. +func (wd *WorkingDir) SavedPlan() (*tfjson.Plan, error) { + if !wd.HasSavedPlan() { + return nil, fmt.Errorf("there is no current saved plan") + } + + var ret tfjson.Plan + + args := []string{"show"} + args = append(args, wd.baseArgs...) + args = append(args, "-json", wd.planFilename()) + + err := wd.runTerraformJSON(&ret, args...) + if err != nil { + return nil, err + } + + return &ret, nil +} + +// RequireSavedPlan is a variant of SavedPlan that will fail the test via +// the given TestControl if the plan cannot be read. +func (wd *WorkingDir) RequireSavedPlan(t TestControl) *tfjson.Plan { + t.Helper() + ret, err := wd.SavedPlan() + if err != nil { + t := testingT{t} + t.Fatalf("failed to read saved plan: %s", err) + } + return ret +} + +// SavedPlanStdout returns a stdout capture of the current saved plan file, if any. +// +// If no plan is saved or if the plan file cannot be read, SavedPlanStdout returns +// an error. +func (wd *WorkingDir) SavedPlanStdout() (string, error) { + if !wd.HasSavedPlan() { + return "", fmt.Errorf("there is no current saved plan") + } + + var ret bytes.Buffer + + args := []string{"show"} + args = append(args, wd.baseArgs...) + args = append(args, wd.planFilename()) + + err := wd.runTerraform(&ret, args...) + if err != nil { + return "", err + } + + return ret.String(), nil +} + +// RequireSavedPlanStdout is a variant of SavedPlanStdout that will fail the test via +// the given TestControl if the plan cannot be read. +func (wd *WorkingDir) RequireSavedPlanStdout(t TestControl) string { + t.Helper() + ret, err := wd.SavedPlanStdout() + if err != nil { + t := testingT{t} + t.Fatalf("failed to read saved plan: %s", err) + } + return ret +} + +// State returns an object describing the current state. +// +// If the state cannot be read, State returns an error. +func (wd *WorkingDir) State() (*tfjson.State, error) { + var ret tfjson.State + + args := []string{"show"} + args = append(args, wd.baseArgs...) + args = append(args, "-json") + + err := wd.runTerraformJSON(&ret, args...) + if err != nil { + return nil, err + } + + return &ret, nil +} + +// RequireState is a variant of State that will fail the test via +// the given TestControl if the state cannot be read. +func (wd *WorkingDir) RequireState(t TestControl) *tfjson.State { + t.Helper() + ret, err := wd.State() + if err != nil { + t := testingT{t} + t.Fatalf("failed to read state plan: %s", err) + } + return ret +} + +// Import runs terraform import +func (wd *WorkingDir) Import(resource, id string) error { + args := []string{"import"} + args = append(args, wd.baseArgs...) + args = append(args, "-config="+wd.configDir, resource, id) + return wd.runTerraform(nil, args...) +} + +// RequireImport is a variant of Import that will fail the test via +// the given TestControl if the import is non successful. +func (wd *WorkingDir) RequireImport(t TestControl, resource, id string) { + t.Helper() + if err := wd.Import(resource, id); err != nil { + t := testingT{t} + t.Fatalf("failed to import: %s", err) + } +} + +// Refresh runs terraform refresh +func (wd *WorkingDir) Refresh() error { + args := []string{"refresh"} + args = append(args, wd.baseArgs...) + args = append(args, "-state="+filepath.Join(wd.baseDir, "terraform.tfstate")) + args = append(args, wd.configDir) + return wd.runTerraform(nil, args...) +} + +// RequireRefresh is a variant of Refresh that will fail the test via +// the given TestControl if the refresh is non successful. +func (wd *WorkingDir) RequireRefresh(t TestControl) { + t.Helper() + if err := wd.Refresh(); err != nil { + t := testingT{t} + t.Fatalf("failed to refresh: %s", err) + } +} + +// Schemas returns an object describing the provider schemas. +// +// If the schemas cannot be read, Schemas returns an error. +func (wd *WorkingDir) Schemas() (*tfjson.ProviderSchemas, error) { + args := []string{"providers", wd.configDir, "schema"} + + var ret tfjson.ProviderSchemas + err := wd.runTerraformJSON(&ret, args...) + if err != nil { + return nil, err + } + + return &ret, nil +} + +// RequireSchemas is a variant of Schemas that will fail the test via +// the given TestControl if the schemas cannot be read. +func (wd *WorkingDir) RequireSchemas(t TestControl) *tfjson.ProviderSchemas { + t.Helper() + + ret, err := wd.Schemas() + if err != nil { + t := testingT{t} + t.Fatalf("failed to read schemas: %s", err) + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go index 09c57a330..0dae567db 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/cache.go @@ -1,7 +1,7 @@ package auth import ( - svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost" ) // CachingCredentialsSource creates a new credentials source that wraps another diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go index fc40e4cc4..36441cd11 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/credentials.go @@ -6,8 +6,9 @@ import ( "fmt" "net/http" - svchost "github.com/hashicorp/terraform-svchost" "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform-svchost" ) // Credentials is a list of CredentialsSource objects that can be tried in diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go index 02d5dd0f5..76505f209 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/helper_program.go @@ -7,8 +7,9 @@ import ( "os/exec" "path/filepath" - svchost "github.com/hashicorp/terraform-svchost" ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform-svchost" ) type helperProgramCredentialsSource struct { diff --git a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go index df4b6eff6..f8b0b076e 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/auth/static.go +++ b/vendor/github.com/hashicorp/terraform-svchost/auth/static.go @@ -3,7 +3,7 @@ package auth import ( "fmt" - svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost" ) // StaticCredentialsSource is a credentials source that retrieves credentials diff --git a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go index 97fb4b0b5..978313633 100644 --- a/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go +++ b/vendor/github.com/hashicorp/terraform-svchost/disco/disco.go @@ -17,7 +17,7 @@ import ( "net/url" "time" - svchost "github.com/hashicorp/terraform-svchost" + "github.com/hashicorp/terraform-svchost" "github.com/hashicorp/terraform-svchost/auth" ) diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go deleted file mode 100644 index a7fb87bcf..000000000 --- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go +++ /dev/null @@ -1,192 +0,0 @@ -package compressutil - -import ( - "bytes" - "compress/gzip" - "compress/lzw" - "fmt" - "io" - - "github.com/golang/snappy" - "github.com/hashicorp/errwrap" -) - -const ( - // A byte value used as a canary prefix for the compressed information - // which is used to distinguish if a JSON input is compressed or not. - // The value of this constant should not be a first character of any - // valid JSON string. - - // Byte value used as canary when using Gzip format - CompressionCanaryGzip byte = 'G' - - // Byte value used as canary when using Lzw format - CompressionCanaryLzw byte = 'L' - - // Byte value used as canary when using Snappy format - CompressionCanarySnappy byte = 'S' - - CompressionTypeLzw = "lzw" - - CompressionTypeGzip = "gzip" - - CompressionTypeSnappy = "snappy" -) - -// SnappyReadCloser embeds the snappy reader which implements the io.Reader -// interface. The decompress procedure in this utility expects an -// io.ReadCloser. This type implements the io.Closer interface to retain the -// generic way of decompression. -type SnappyReadCloser struct { - *snappy.Reader -} - -// Close is a noop method implemented only to satisfy the io.Closer interface -func (s *SnappyReadCloser) Close() error { - return nil -} - -// CompressionConfig is used to select a compression type to be performed by -// Compress and Decompress utilities. -// Supported types are: -// * CompressionTypeLzw -// * CompressionTypeGzip -// * CompressionTypeSnappy -// -// When using CompressionTypeGzip, the compression levels can also be chosen: -// * gzip.DefaultCompression -// * gzip.BestSpeed -// * gzip.BestCompression -type CompressionConfig struct { - // Type of the compression algorithm to be used - Type string - - // When using Gzip format, the compression level to employ - GzipCompressionLevel int -} - -// Compress places the canary byte in a buffer and uses the same buffer to fill -// in the compressed information of the given input. The configuration supports -// two type of compression: LZW and Gzip. When using Gzip compression format, -// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will -// be assumed. -func Compress(data []byte, config *CompressionConfig) ([]byte, error) { - var buf bytes.Buffer - var writer io.WriteCloser - var err error - - if config == nil { - return nil, fmt.Errorf("config is nil") - } - - // Write the canary into the buffer and create writer to compress the - // input data based on the configured type - switch config.Type { - case CompressionTypeLzw: - buf.Write([]byte{CompressionCanaryLzw}) - - writer = lzw.NewWriter(&buf, lzw.LSB, 8) - case CompressionTypeGzip: - buf.Write([]byte{CompressionCanaryGzip}) - - switch { - case config.GzipCompressionLevel == gzip.BestCompression, - config.GzipCompressionLevel == gzip.BestSpeed, - config.GzipCompressionLevel == gzip.DefaultCompression: - // These are valid compression levels - default: - // If compression level is set to NoCompression or to - // any invalid value, fallback to Defaultcompression - config.GzipCompressionLevel = gzip.DefaultCompression - } - writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) - case CompressionTypeSnappy: - buf.Write([]byte{CompressionCanarySnappy}) - writer = snappy.NewBufferedWriter(&buf) - default: - return nil, fmt.Errorf("unsupported compression type") - } - - if err != nil { - return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) - } - - if writer == nil { - return nil, fmt.Errorf("failed to create a compression writer") - } - - // Compress the input and place it in the same buffer containing the - // canary byte. - if _, err = writer.Write(data); err != nil { - return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) - } - - // Close the io.WriteCloser - if err = writer.Close(); err != nil { - return nil, err - } - - // Return the compressed bytes with canary byte at the start - return buf.Bytes(), nil -} - -// Decompress checks if the first byte in the input matches the canary byte. -// If the first byte is a canary byte, then the input past the canary byte -// will be decompressed using the method specified in the given configuration. -// If the first byte isn't a canary byte, then the utility returns a boolean -// value indicating that the input was not compressed. -func Decompress(data []byte) ([]byte, bool, error) { - var err error - var reader io.ReadCloser - if data == nil || len(data) == 0 { - return nil, false, fmt.Errorf("'data' being decompressed is empty") - } - - switch { - // If the first byte matches the canary byte, remove the canary - // byte and try to decompress the data that is after the canary. - case data[0] == CompressionCanaryGzip: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - data = data[1:] - reader, err = gzip.NewReader(bytes.NewReader(data)) - case data[0] == CompressionCanaryLzw: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - data = data[1:] - reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8) - - case data[0] == CompressionCanarySnappy: - if len(data) < 2 { - return nil, false, fmt.Errorf("invalid 'data' after the canary") - } - data = data[1:] - reader = &SnappyReadCloser{ - Reader: snappy.NewReader(bytes.NewReader(data)), - } - default: - // If the first byte doesn't match the canary byte, it means - // that the content was not compressed at all. Indicate the - // caller that the input was not compressed. - return nil, true, nil - } - if err != nil { - return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) - } - if reader == nil { - return nil, false, fmt.Errorf("failed to create a compression reader") - } - - // Close the io.ReadCloser - defer reader.Close() - - // Read all the compressed data into a buffer - var buf bytes.Buffer - if _, err = io.Copy(&buf, reader); err != nil { - return nil, false, err - } - - return buf.Bytes(), false, nil -} diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go deleted file mode 100644 index d03ddef5f..000000000 --- a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go +++ /dev/null @@ -1,100 +0,0 @@ -package jsonutil - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "fmt" - "io" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/helper/compressutil" -) - -// Encodes/Marshals the given object into JSON -func EncodeJSON(in interface{}) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("input for encoding is nil") - } - var buf bytes.Buffer - enc := json.NewEncoder(&buf) - if err := enc.Encode(in); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// EncodeJSONAndCompress encodes the given input into JSON and compresses the -// encoded value (using Gzip format BestCompression level, by default). A -// canary byte is placed at the beginning of the returned bytes for the logic -// in decompression method to identify compressed input. -func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { - if in == nil { - return nil, fmt.Errorf("input for encoding is nil") - } - - // First JSON encode the given input - encodedBytes, err := EncodeJSON(in) - if err != nil { - return nil, err - } - - if config == nil { - config = &compressutil.CompressionConfig{ - Type: compressutil.CompressionTypeGzip, - GzipCompressionLevel: gzip.BestCompression, - } - } - - return compressutil.Compress(encodedBytes, config) -} - -// DecodeJSON tries to decompress the given data. The call to decompress, fails -// if the content was not compressed in the first place, which is identified by -// a canary byte before the compressed data. If the data is not compressed, it -// is JSON decoded directly. Otherwise the decompressed data will be JSON -// decoded. -func DecodeJSON(data []byte, out interface{}) error { - if data == nil || len(data) == 0 { - return fmt.Errorf("'data' being decoded is nil") - } - if out == nil { - return fmt.Errorf("output parameter 'out' is nil") - } - - // Decompress the data if it was compressed in the first place - decompressedBytes, uncompressed, err := compressutil.Decompress(data) - if err != nil { - return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) - } - if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { - return fmt.Errorf("decompressed data being decoded is invalid") - } - - // If the input supplied failed to contain the compression canary, it - // will be notified by the compression utility. Decode the decompressed - // input. - if !uncompressed { - data = decompressedBytes - } - - return DecodeJSONFromReader(bytes.NewReader(data), out) -} - -// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object -func DecodeJSONFromReader(r io.Reader, out interface{}) error { - if r == nil { - return fmt.Errorf("'io.Reader' being decoded is nil") - } - if out == nil { - return fmt.Errorf("output parameter 'out' is nil") - } - - dec := json.NewDecoder(r) - - // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. - dec.UseNumber() - - // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' - return dec.Decode(out) -} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go deleted file mode 100644 index eef4c5ed0..000000000 --- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/encrypt_decrypt.go +++ /dev/null @@ -1,118 +0,0 @@ -package pgpkeys - -import ( - "bytes" - "encoding/base64" - "fmt" - - "github.com/hashicorp/errwrap" - "github.com/keybase/go-crypto/openpgp" - "github.com/keybase/go-crypto/openpgp/packet" -) - -// EncryptShares takes an ordered set of byte slices to encrypt and the -// corresponding base64-encoded public keys to encrypt them with, encrypts each -// byte slice with the corresponding public key. -// -// Note: There is no corresponding test function; this functionality is -// thoroughly tested in the init and rekey command unit tests -func EncryptShares(input [][]byte, pgpKeys []string) ([]string, [][]byte, error) { - if len(input) != len(pgpKeys) { - return nil, nil, fmt.Errorf("mismatch between number items to encrypt and number of PGP keys") - } - encryptedShares := make([][]byte, 0, len(pgpKeys)) - entities, err := GetEntities(pgpKeys) - if err != nil { - return nil, nil, err - } - for i, entity := range entities { - ctBuf := bytes.NewBuffer(nil) - pt, err := openpgp.Encrypt(ctBuf, []*openpgp.Entity{entity}, nil, nil, nil) - if err != nil { - return nil, nil, errwrap.Wrapf("error setting up encryption for PGP message: {{err}}", err) - } - _, err = pt.Write(input[i]) - if err != nil { - return nil, nil, errwrap.Wrapf("error encrypting PGP message: {{err}}", err) - } - pt.Close() - encryptedShares = append(encryptedShares, ctBuf.Bytes()) - } - - fingerprints, err := GetFingerprints(nil, entities) - if err != nil { - return nil, nil, err - } - - return fingerprints, encryptedShares, nil -} - -// GetFingerprints takes in a list of openpgp Entities and returns the -// fingerprints. If entities is nil, it will instead parse both entities and -// fingerprints from the pgpKeys string slice. -func GetFingerprints(pgpKeys []string, entities []*openpgp.Entity) ([]string, error) { - if entities == nil { - var err error - entities, err = GetEntities(pgpKeys) - - if err != nil { - return nil, err - } - } - ret := make([]string, 0, len(entities)) - for _, entity := range entities { - ret = append(ret, fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)) - } - return ret, nil -} - -// GetEntities takes in a string array of base64-encoded PGP keys and returns -// the openpgp Entities -func GetEntities(pgpKeys []string) ([]*openpgp.Entity, error) { - ret := make([]*openpgp.Entity, 0, len(pgpKeys)) - for _, keystring := range pgpKeys { - data, err := base64.StdEncoding.DecodeString(keystring) - if err != nil { - return nil, errwrap.Wrapf("error decoding given PGP key: {{err}}", err) - } - entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(data))) - if err != nil { - return nil, errwrap.Wrapf("error parsing given PGP key: {{err}}", err) - } - ret = append(ret, entity) - } - return ret, nil -} - -// DecryptBytes takes in base64-encoded encrypted bytes and the base64-encoded -// private key and decrypts it. A bytes.Buffer is returned to allow the caller -// to do useful thing with it (get it as a []byte, get it as a string, use it -// as an io.Reader, etc), and also because this function doesn't know if what -// comes out is binary data or a string, so let the caller decide. -func DecryptBytes(encodedCrypt, privKey string) (*bytes.Buffer, error) { - privKeyBytes, err := base64.StdEncoding.DecodeString(privKey) - if err != nil { - return nil, errwrap.Wrapf("error decoding base64 private key: {{err}}", err) - } - - cryptBytes, err := base64.StdEncoding.DecodeString(encodedCrypt) - if err != nil { - return nil, errwrap.Wrapf("error decoding base64 crypted bytes: {{err}}", err) - } - - entity, err := openpgp.ReadEntity(packet.NewReader(bytes.NewBuffer(privKeyBytes))) - if err != nil { - return nil, errwrap.Wrapf("error parsing private key: {{err}}", err) - } - - entityList := &openpgp.EntityList{entity} - md, err := openpgp.ReadMessage(bytes.NewBuffer(cryptBytes), entityList, nil, nil) - if err != nil { - return nil, errwrap.Wrapf("error decrypting the messages: {{err}}", err) - } - - ptBuf := bytes.NewBuffer(nil) - ptBuf.ReadFrom(md.UnverifiedBody) - - return ptBuf, nil -} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go deleted file mode 100644 index bb0f367d6..000000000 --- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/flag.go +++ /dev/null @@ -1,140 +0,0 @@ -package pgpkeys - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "os" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/keybase/go-crypto/openpgp" -) - -// PubKeyFileFlag implements flag.Value and command.Example to receive exactly -// one PGP or keybase key via a flag. -type PubKeyFileFlag string - -func (p *PubKeyFileFlag) String() string { return string(*p) } - -func (p *PubKeyFileFlag) Set(val string) error { - if p != nil && *p != "" { - return errors.New("can only be specified once") - } - - keys, err := ParsePGPKeys(strings.Split(val, ",")) - if err != nil { - return err - } - - if len(keys) > 1 { - return errors.New("can only specify one pgp key") - } - - *p = PubKeyFileFlag(keys[0]) - return nil -} - -func (p *PubKeyFileFlag) Example() string { return "keybase:user" } - -// PGPPubKeyFiles implements the flag.Value interface and allows parsing and -// reading a list of PGP public key files. -type PubKeyFilesFlag []string - -func (p *PubKeyFilesFlag) String() string { - return fmt.Sprint(*p) -} - -func (p *PubKeyFilesFlag) Set(val string) error { - if len(*p) > 0 { - return errors.New("can only be specified once") - } - - keys, err := ParsePGPKeys(strings.Split(val, ",")) - if err != nil { - return err - } - - *p = PubKeyFilesFlag(keys) - return nil -} - -func (p *PubKeyFilesFlag) Example() string { return "keybase:user1, keybase:user2, ..." } - -// ParsePGPKeys takes a list of PGP keys and parses them either using keybase -// or reading them from disk and returns the "expanded" list of pgp keys in -// the same order. -func ParsePGPKeys(keyfiles []string) ([]string, error) { - keys := make([]string, len(keyfiles)) - - keybaseMap, err := FetchKeybasePubkeys(keyfiles) - if err != nil { - return nil, err - } - - for i, keyfile := range keyfiles { - keyfile = strings.TrimSpace(keyfile) - - if strings.HasPrefix(keyfile, kbPrefix) { - key, ok := keybaseMap[keyfile] - if !ok || key == "" { - return nil, fmt.Errorf("keybase user %q not found", strings.TrimPrefix(keyfile, kbPrefix)) - } - keys[i] = key - continue - } - - pgpStr, err := ReadPGPFile(keyfile) - if err != nil { - return nil, err - } - keys[i] = pgpStr - } - - return keys, nil -} - -// ReadPGPFile reads the given PGP file from disk. -func ReadPGPFile(path string) (string, error) { - if path[0] == '@' { - path = path[1:] - } - f, err := os.Open(path) - if err != nil { - return "", err - } - defer f.Close() - buf := bytes.NewBuffer(nil) - _, err = buf.ReadFrom(f) - if err != nil { - return "", err - } - - // First parse as an armored keyring file, if that doesn't work, treat it as a straight binary/b64 string - keyReader := bytes.NewReader(buf.Bytes()) - entityList, err := openpgp.ReadArmoredKeyRing(keyReader) - if err == nil { - if len(entityList) != 1 { - return "", fmt.Errorf("more than one key found in file %q", path) - } - if entityList[0] == nil { - return "", fmt.Errorf("primary key was nil for file %q", path) - } - - serializedEntity := bytes.NewBuffer(nil) - err = entityList[0].Serialize(serializedEntity) - if err != nil { - return "", errwrap.Wrapf(fmt.Sprintf("error serializing entity for file %q: {{err}}", path), err) - } - - return base64.StdEncoding.EncodeToString(serializedEntity.Bytes()), nil - } - - _, err = base64.StdEncoding.DecodeString(buf.String()) - if err == nil { - return buf.String(), nil - } - return base64.StdEncoding.EncodeToString(buf.Bytes()), nil - -} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go deleted file mode 100644 index eba067762..000000000 --- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/keybase.go +++ /dev/null @@ -1,117 +0,0 @@ -package pgpkeys - -import ( - "bytes" - "encoding/base64" - "fmt" - "strings" - - "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/vault/helper/jsonutil" - "github.com/keybase/go-crypto/openpgp" -) - -const ( - kbPrefix = "keybase:" -) - -// FetchKeybasePubkeys fetches public keys from Keybase given a set of -// usernames, which are derived from correctly formatted input entries. It -// doesn't use their client code due to both the API and the fact that it is -// considered alpha and probably best not to rely on it. The keys are returned -// as base64-encoded strings. -func FetchKeybasePubkeys(input []string) (map[string]string, error) { - client := cleanhttp.DefaultClient() - if client == nil { - return nil, fmt.Errorf("unable to create an http client") - } - - if len(input) == 0 { - return nil, nil - } - - usernames := make([]string, 0, len(input)) - for _, v := range input { - if strings.HasPrefix(v, kbPrefix) { - usernames = append(usernames, strings.TrimPrefix(v, kbPrefix)) - } - } - - if len(usernames) == 0 { - return nil, nil - } - - ret := make(map[string]string, len(usernames)) - url := fmt.Sprintf("https://keybase.io/_/api/1.0/user/lookup.json?usernames=%s&fields=public_keys", strings.Join(usernames, ",")) - resp, err := client.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - type PublicKeys struct { - Primary struct { - Bundle string - } - } - - type LThem struct { - PublicKeys `json:"public_keys"` - } - - type KbResp struct { - Status struct { - Name string - } - Them []LThem - } - - out := &KbResp{ - Them: []LThem{}, - } - - if err := jsonutil.DecodeJSONFromReader(resp.Body, out); err != nil { - return nil, err - } - - if out.Status.Name != "OK" { - return nil, fmt.Errorf("got non-OK response: %q", out.Status.Name) - } - - missingNames := make([]string, 0, len(usernames)) - var keyReader *bytes.Reader - serializedEntity := bytes.NewBuffer(nil) - for i, themVal := range out.Them { - if themVal.Primary.Bundle == "" { - missingNames = append(missingNames, usernames[i]) - continue - } - keyReader = bytes.NewReader([]byte(themVal.Primary.Bundle)) - entityList, err := openpgp.ReadArmoredKeyRing(keyReader) - if err != nil { - return nil, err - } - if len(entityList) != 1 { - return nil, fmt.Errorf("primary key could not be parsed for user %q", usernames[i]) - } - if entityList[0] == nil { - return nil, fmt.Errorf("primary key was nil for user %q", usernames[i]) - } - - serializedEntity.Reset() - err = entityList[0].Serialize(serializedEntity) - if err != nil { - return nil, errwrap.Wrapf(fmt.Sprintf("error serializing entity for user %q: {{err}}", usernames[i]), err) - } - - // The API returns values in the same ordering requested, so this should properly match - ret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes()) - } - - if len(missingNames) > 0 { - return nil, fmt.Errorf("unable to fetch keys for user(s) %q from keybase", strings.Join(missingNames, ",")) - } - - return ret, nil -} diff --git a/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go b/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go deleted file mode 100644 index c10a9055e..000000000 --- a/vendor/github.com/hashicorp/vault/helper/pgpkeys/test_keys.go +++ /dev/null @@ -1,271 +0,0 @@ -package pgpkeys - -const ( - TestPrivKey1 = `lQOYBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da -rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ -063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f -sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg -8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAEAB/wL+KX0mdeISEpX -oDgt766Key1Kthe8nbEs5dOXIsP7OR7ZPcnE2hy6gftgVFnBGEZnWVN70vmJd6Z5y9d1mI+GecXj -UL0EpI0EmohyYDJsHUnght/5ecRNFA+VeNmGPYNQGCeHJyZOiFunGGENpHU7BbubAht8delz37Mx -JQgvMyR6AKvg8HKBoQeqV1uMWNJE/vKwV/z1dh1sjK/GFxu05Qaq0GTfAjVLuFOyJTS95yq6gblD -jUdbHLp7tBeqIKo9voWCJF5mGOlq3973vVoWETy9b0YYPCE/M7fXmK9dJITHqkROLMW6TgcFeIw4 -yL5KOBCHk+QGPSvyQN7R7Fd5BADwuT1HZmvg7Y9GjarKXDjxdNemUiHtba2rUzfH6uNmKNQvwQek -nma5palNUJ4/dz1aPB21FUBXJF5yWwXEdApl+lIDU0J5m4UD26rqEVRq9Kx3GsX+yfcwObkrSzW6 -kmnQSB5KI0fIuegMTM+Jxo3pB/mIRwDTMmk+vfzIGyW+7QQA8aFwFLMdKdfLgSGbl5Z6etmOAVQ2 -Oe2ebegU9z/ewi/Rdt2s9yQiAdGVM8+q15Saz8a+kyS/l1CjNPzr3VpYx1OdZ3gb7i2xoy9GdMYR -ZpTq3TuST95kx/9DqA97JrP23G47U0vwF/cg8ixCYF8Fz5dG4DEsxgMwKqhGdW58wMMD/iytkfMk -Vk6Z958Rpy7lhlC6L3zpO38767bSeZ8gRRi/NMFVOSGYepKFarnfxcTiNa+EoSVA6hUo1N64nALE -sJBpyOoTfKIpz7WwTF1+WogkiYrfM6lHon1+3qlziAcRW0IohM3g2C1i3GWdON4Cl8/PDO3R0E52 -N6iG/ctNNeMiPe60EFZhdWx0IFRlc3QgS2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUI -AgkKCwQWAgMBAh4BAheAAAoJEOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d -4hIHsG7kmJRTJfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C -Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF3 -9jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poe -o+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeUR -BRWdA5gEVduM9QEIAL53hJ5bZJ7oEDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkf -Rqnv981fFwGnh2+I1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a -9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu -9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/z -bfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOleYwxk+FoTqXEAEQEAAQAH+wVyQXaNwnjQ -xfW+M8SJNo0C7e+0d7HsuBTA/d/eP4bj6+X8RaRFVwiMvSAoxsqBNCLJP00qzzKfRQWJseD1H35z -UjM7rNVUEL2k1yppyp61S0qj0TdhVUfJDYZqRYonVgRMvzfDTB1ryKrefKenQYL/jGd9VYMnKmWZ -6GVk4WWXXx61iOt2HNcmSXKetMM1Mg67woPZkA3fJaXZ+zW0zMu4lTSB7yl3+vLGIFYILkCFnREr -drQ+pmIMwozUAt+pBq8dylnkHh6g/FtRfWmLIMDqM1NlyuHRp3dyLDFdTA93osLG0QJblfX54W34 -byX7a4HASelGi3nPjjOAsTFDkuEEANV2viaWk1CV4ryDrXGmy4Xo32Md+laGPRcVfbJ0mjZjhQsO -gWC1tjMs1qZMPhcrKIBCjjdAcAIrGV9h3CXc0uGuez4XxLO+TPBKaS0B8rKhnKph1YZuf+HrOhzS -astDnOjNIT+qucCL/qSbdYpj9of3yY61S59WphPOBjoVM3BFBADka6ZCk81gx8jA2E1e9UqQDmdM -FZaVA1E7++kqVSFRDJGnq+5GrBTwCJ+sevi+Rvf8Nx4AXvpCdtMBPX9RogsUFcR0pMrKBrgRo/Vg -EpuodY2Ef1VtqXR24OxtRf1UwvHKydIsU05rzMAy5uGgQvTzRTXxZFLGUY31wjWqmo9VPQP+PnwA -K83EV2kk2bsXwZ9MXg05iXqGQYR4bEc/12v04BtaNaDS53hBDO4JIa3Bnz+5oUoYhb8FgezUKA9I -n6RdKTTP1BLAu8titeozpNF07V++dPiSE2wrIVsaNHL1pUwW0ql50titVwe+EglWiCKPtJBcCPUA -3oepSPchiDjPqrNCYIkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZAQIABgUCVduM -9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYulEimOPzLUX/Z -XZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr -9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEc -ZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4 -EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY+XsKVYRf -NIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcIPXFv3m3WfUln -G/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O9uK3lQozbw2g -H9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKf -PRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h -7u2CfYyFPu3AlUaGNMBlvy6PEpU=` - - TestPrivKey2 = `lQOYBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG -Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 -0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e -Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk -Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAEAB/oCBqTIsxlUgLtz -HRpWW5MJ+93xvmVV0JHhRK/ygKghq+zpC6S+cn7dwrEj1JTPh+17lyemYQK+RMeiBEduoWNKuHUd -WX353w2411rrc/VuGTglzhd8Ir2BdJlPesCzw4JQnrWqcBqN52W+iwhnE7PWVhnvItWnx6APK5Se -q7dzFWy8Z8tNIHm0pBQbeyo6x2rHHSWkr2fs7V02qFQhii1ayFRMcgdOWSNX6CaZJuYhk/DyjApN -9pVhi3P1pNMpFeV0Pt8Gl1f/9o6/HpAYYEt/6vtVRhFUGgtNi95oc0oyzIJxliRvd6+Z236osigQ -QEBwj1ImRK8TKyWPlykiJWc5BADfldgOCA55o3Qz/z/oVE1mm+a3FmPPTQlHBXotNEsrWV2wmJHe -lNQPI6ZwMtLrBSg8PUpG2Rvao6XJ4ZBl/VcDwfcLgCnALPCcL0L0Z3vH3Sc9Ta/bQWJODG7uSaI1 -iVJ7ArKNtVzTqRQWK967mol9CCqh4A0jRrH0aVEFbrqQ/QQA58iEJaFhzFZjufjC9N8Isn3Ky7xu -h+dk001RNCb1GnNZcx4Ld2IB+uXyYjtg7dNaUhGgGuCBo9nax89bMsBzzUukx3SHq1pxopMg6Dm8 -ImBoIAicuQWgEkaP2T0rlwCozUalJZaG1gyrzkPhkeY7CglpJycHLHfY2MIb46c8+58D/iJ83Q5j -Y4x+sqW2QeUYFwqCcOW8Urg64UxEkgXZXiNMwTAJCaxp/Pz7cgeUDwgv+6CXEdnT1910+byzK9ha -V1Q/65+/JYuCeyHxcoAb4Wtpdl7GALGd/1G0UAmq47yrefEr/b00uS35i1qUUhOzo1NmEZch/bvF -kmJ+WtAHunZcOCu0EFZhdWx0IFRlc3QgS2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUI -AgkKCwQWAgMBAh4BAheAAAoJEOuDLGfrXolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHip -ZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABq -hb5ojexdnAYRswaHV201ZCclj9rnJN1PAg0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmG -kdrg8K8ARmRILjmwuBAgJM0eXBZHNGWXelk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0 -vDttB+ZXqF88W9jAYlvdgbTtajNF5IDYDjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlx -k4edA5gEVduQkQEIAOjZV5tbpfIh5QefpIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe -4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/t -GF5xE3e5CoZRsHV/c92h3t1LdJNOnC5mUKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBH -yt0tdHtIWuQv6joTJzujqViRhlCwQYzQSKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1r -ENO8JOuPu6tMS+znFu67skq2gFFZwCQWIjdHm+2ukE+PE580WAWudyMAEQEAAQAH/i7ndRPI+t0T -AdEu0dTIdyrrg3g7gd471kQtIVZZwTYSy2yhNY/Ciu72s3ab8QNCxY8dNL5bRk8FKjHslAoNSFdO -8iZSLiDgIHOZOcjYe6pqdgQaeTHodm1Otrn2SbB+K/3oX6W/y1xe18aSojGba/nHMj5PeJbIN9Pi -jmh0WMLD/0rmkTTxR7qQ5+kMV4O29xY4qjdYRD5O0adeZX0mNncmlmQ+rX9yxrtSgFROu1jwVtfP -hcNetifTTshJnTwND8hux5ECEadlIVBHypW28Hth9TRBXmddTmv7L7mdtUO6DybgkpWpw4k4LPsk -uZ6aY4wcGRp7EVfWGr9NHbq/n+0EAOlhDXIGdylkQsndjBMyhPsXZa5fFBmOyHjXj733195Jgr1v -ZjaIomrA9cvYrmN75oKrG1jJsMEl6HfC/ZPzEj6E51/p1PRdHP7CdUUA+DG8x4M3jn+e43psVuAR -a1XbN+8/bOa0ubt7ljVPjAEvWRSvU9dRaQz93w3fduAuM07dBAD/ayK3e0d6JMJMrU50lNOXQBgL -rFbg4rWzPO9BJQdhjOhmOZQiUa1Q+EV+s95yIUg1OAfaMP9KRIljr5RCdGNS6WoMNBAQOSrZpelf -jW4NpzphNfWDGVkUoPoskVtJz/nu9d860dGd3Al0kSmtUpMu5QKlo+sSxXUPbWLUn8V9/wP/ScCW -H+0gtL4R7SFazPeTIP+Cu5oR7A/DlFVLJKa3vo+atkhSvwxHGbg04vb/W4mKhGGVtMBtlhRmaWOe -PhUulU5FdaYsdlpN/Yd+hhgU6NHlyImPGVEHWD8c6CG8qoZfpR33j2sqshs4i/MtJZeBvl62vxPn -9bDN7KAjFNll9axAjIkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZAQIABgUCVduQ -kQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDDhnV3bXQsCvn/ -6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQe3l4CqJvkn6j -ybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4KBIrp/bhG6Pdn -igKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eYENtyOmEMWOFC -LLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H/1trYUtJjXQK -HmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7PkUZTfpaP/L6 -DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0UPEnjvtZTp5yO -hTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQdw/2epIewH0L/ -FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4MFOMVRn1dc3q -dXlg3mimA+iK7tABQfG0RJ9YzWs=` - - TestPrivKey3 = `lQOXBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj -6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 -Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH -CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy -resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAEAB/dQbElFIa0VklZa -39ZLhtbBxACSWH3ql3EtRZaB2Mh4zSALbFyJDQfScOy8AZHmv66Ozxit9X9WsYr9OzcHujgl/2da -A3lybF6iLw1YDNaL11G6kuyn5sFP6lYGMRGOIWSik9oSVF6slo8m8ujRLdBsdMXVcElHKzCJiWmt -JZHEnUkl9X96fIPajMBfWjHHwcaeMOc77nvjwqy5wC4EY8TSVYzxeZHL7DADQ0EHBcThlmfizpCq -26LMVb6ju8STH7uDDFyKmhr/hC2vOkt+PKsvBCmW8/ESanO1zKPD9cvSsOWr2rZWNnkDRftqzOU5 -OCrI+3o9E74+toNb07bPntEEAMEStOzSvqZ6NKdh7EZYPA4mkkFC+EiHYIoinP1sd9V8O2Hq+dzx -yFHtWu0LmP6uWXk45vsP9y1UMJcEa33ew5JJa7zgucI772/BNvd/Oys/PqwIAl6uNIY8uYLgmn4L -1IPatp7vDiXzZSivPZd4yN4S4zCypZp9cnpO3qv8q7CtBADW87IA0TabdoxiN+m4XL7sYDRIfglr -MRPAlfrkAUaGDBx/t1xb6IaKk7giFdwHpTI6+g9XNkqKqogMe4Fp+nsd1xtfsNUBn6iKZavm5kXe -Lp9QgE+K6mvIreOTe2PKQqXqgPRG6+SRGatoKeY76fIpd8AxOJyWERxcq2lUHLn45QP/UXDTcYB7 -gzJtZrfpXN0GqQ0lYXMzbQfLnkUsu3mYzArfNy0otzEmKTkwmKclNY1/EJSzSdHfgmeA260a0nLK -64C0wPgSmOqw90qwi5odAYSjSFBapDbyGF86JpHrLxyEEpGoXanRPwWfbiWp19Nwg6nknA87AtaM -3+AHjbWzwCpHL7QQVmF1bHQgVGVzdCBLZXkgM4kBOAQTAQIAIgUCVduSIwIbLwYLCQgHAwIGFQgC -CQoLBBYCAwECHgECF4AACgkQ9HlLVvwtxt1aMQf/aaGoL1rRWTUjM6DEShXFhWpV29rEjSdNk5N+ -ZwVifgdCVD5IsSjI1Z7mO2SHHiTm4eKnHAofM6/TZgzXg1YLpu8rDYJARMsM8bgK/xgxSamGjm2c -wN220jOnwePIlG0drNTW5N6zb/K6qHoscJ6NUkjS5JPdGJuq7B0bdCM8/xSbG75gL34U5bYqK38B -DwmW4UMl2rf/BJfxV9hmsZ2Cat4TspgyiWEKTMZI+PugXKDDwuoqgm+320K4EqFkwG4y/WwHkKgk -hZ0+io5lzhTsvVd2p8q8VlH9GG5eA3WWQj0yqucsOmKQvcuT5y0vFY6NQJbyuioqgdlgEXtc+p0B -+Z0DmARV25IjAQgA49yN3hCBsuWoiTezoE9FHJXOCVOBR1/4jStQPJtoMl8mhtl3xTp7iGQ+9GhD -y0l5+fP+qcP/rfBq0BslhxVOZ7jQjdUoM6ZUZzJoPGIo/V2KwqpwQl3tdCIjvagCJeYQfTL7lTCc -4ySz+XBoAYMwZVGMcRcjp+JE8Wx9Ovzuq8wnelbU6I5dVJ7O4E1OWbIkLuytDX+fDEvfft6/oPXN -Bl3cm6FzEuQetQQss3DOG9xnvS+DrjmMCbPwR2a++ioQ8+geoqA/kB4cAI6xOb3ncoeGDHc1i4Y9 -T9Ggi+6Aq3girmfDtNYVOM8cZUXcZNCvLkJn8DNeIvnuFUSEO+a5PwARAQABAAf/TPd98CmRNdV/ -VUI8aYT9Kkervdi4DVzsfvrHcoFn88PSJrCkVTmI6qw526Kwa6VZD0YMmll7LszLt5nD1lorDrwN -rir3FmMzlVwge20IvXRwX4rkunYxtA2oFvL+LsEEhtXGx0ERbWRDapk+eGxQ15hxIO4Y/Cdg9E+a -CWfQUrTSnC6qMVfVYMGfnM1yNX3OWattEFfmxQas5XqQk/0FgjCZALixdanjN/r1tjp5/2MiSD8N -Wkemzsr6yPicnc3+BOZc5YOOnH8FqBvVHcDlSJI6pCOCEiO3Pq2QEk/1evONulbF116mLnQoGrpp -W77l+5O42VUpZfjROCPd5DYyMQQA492CFXZpDIJ2emB9/nK8X6IzdVRK3oof8btbSNnme5afIzhs -wR1ruX30O7ThfB+5ezpbgK1C988CWkr9SNSTy43omarafGig6/Y1RzdiITILuIGfbChoSpc70jXx -U0nzJ/1i9yZ/vDgP3EC2miRhlDcp5w0Bu0oMBlgG/1uhj0cEAP/+7aFGP0fo2MZPhyl5feHKWj4k -85XoAIpMBnzF6HTGU3ljAE56a+4sVw3bWB755DPhvpZvDkX60I9iIJxio8TK5ITdfjlLhxuskXyt -ycwWI/4J+soeq4meoxK9jxZJuDl/qvoGfyzNg1oy2OBehX8+6erW46kr6Z/MQutS3zJJBACmJHrK -VR40qD7a8KbvfuM3ruwlm5JqT/Ykq1gfKKxHjWDIUIeyBX/axGQvAGNYeuuQCzZ0+QsEWur3C4kN -U+Pb5K1WGyOKkhJzivSI56AG3d8TA/Q0JhqST6maY0fvUoahWSCcpd7MULa3n1zx5Wsvi8mkVtup -Js/IDi/kqneqM0XviQI+BBgBAgAJBQJV25IjAhsuASkJEPR5S1b8LcbdwF0gBBkBAgAGBQJV25Ij -AAoJEAUj/03Hcrkg84UIAKxn9nizYtwSgDnVNb5PnD5h6+Ui6r7ffYm2o0im4YhakbFTHIPI9PRh -BavRI5sE5Fg2vtE/x38jattoUrJoNoq9Gh9iv5PBfL3amEGjul0RRqYGl+ub+yv7YGAAHbHcdZen -4gx15VWGpB7y3hycWbdzV8h3EAPKIm5XmB7YyXmArnI3CoJA+HtTZGoL6WZWUwka9YichGfaZ/oD -umENg1l87Pp2RqvjLKHmv2tGCtnDzyv/IiWur9zopFQiCc8ysVgRq6CA5x5nzbv6MqRspYUS4e2I -LFbuREA3blR+caw9oX41IYzarW8IbgeIXJ3HqUyhczRKF/z5nDKtX/kHMCqlbAgAnfu0TALnwVuj -KeXLo4Y7OA9LTEqfORcw62q5OjSoQf/VsRSwGSefv3kGZk5N/igELluU3qpG/twZI/TSL6zGqXU2 -FOMlyMm1849TOB9b4B//4dHrjzPhztzowKMMUqeTxmSgYtFTshKN6eQ0XO+7ZuOXEmSKXS4kOUs9 -ttfzSiPNXUZL2D5nFU9H7rw3VAuXYVTrOx+Dfi6mYsscbxUbi8THODI2Q7B9Ni92DJE1OOe4+57o -fXZ9ln24I14bna/uVHd6hBwLEE6eLCCKkHxQnnZFZduXDHMK0a0OL8RYHfMtNSem4pyC5wDQui1u -KFIzGEPKVoBF9U7VBXpyxpsz+A==` - - TestPubKey1 = `mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzzwiMwBS5cD0da -rGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7H+/mhfFvKmgr0Y5kDCF1j0T/ -063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0f -sF5St9jhO7mbZU9EFkv9O3t3EaURfHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg -8hQssKeVGpuskTdz5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3Qg -S2V5IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOfLr44B -HbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRTJfjECi+AuTGeDwBy84TD -cRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3CEe8cMwIPqPT2kajJVdOyrvkyuFOdPFOE -A7bdCH0MqgIdM2SdF8t40k/ATfuD2K1ZmumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlB -C0yoArn+0QA3cf2B9T4zJ2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXa -QKfOt5x+7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7oEDCn -aY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I1Ktm698UAZS9Jt8y -jak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okjh5o/3d4cBt1yZPUJJyLKY43Wvptb -6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTjOleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5N -ZLGnED3lq+hQNbe+8UI5tD2oP/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu -9p315E87DOleYwxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ -AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVhEGipBmpDGRYu -lEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHkGRHG0/DGum0l4sKTta3OPGHN -C1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRdtPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0 -YwKoz3h9+QEcZHvsjSZjgydKvfLYcm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJi -oPn2jVMnXCm4EKc7fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH -/AtY+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7moViAAcI -PXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWkojHqyob3cyLgy6z9Q557O -9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJiEBt8UZGypQ/E56/343epmYAe0a87sHx -8iDV+dNtDVKfPRENiLOOc19MmS+phmUyrbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKd -OIu60YPNE4+h7u2CfYyFPu3AlUaGNMBlvy6PEpU=` - - TestPubKey2 = `mQENBFXbkJEBCADKb1ZvlT14XrJa2rTOe5924LQr2PTZlRv+651TXy33yEhelZ+V4sMrELN8fKEG -Zy1kNixmbq3MCF/671k3LigHA7VrOaH9iiQgr6IIq2MeIkUYKZ27C992vQkYLjbYUG8+zl5h69S4 -0Ixm0yL0M54XOJ0gm+maEK1ZESKTUlDNkIS7l0jLZSYwfUeGXSEt6FWs8OgbyRTaHw4PDHrDEE9e -Q67K6IZ3YMhPOL4fVk4Jwrp5R/RwiklT+lNozWEyFVwPFH4MeQMs9nMbt+fWlTzEA7tI4acI9yDk -Cm1yN2R9rmY0UjODRiJw6z6sLV2T+Pf32n3MNSUOYczOjZa4VBwjABEBAAG0EFZhdWx0IFRlc3Qg -S2V5IDKJATgEEwECACIFAlXbkJECGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOuDLGfr -XolXqz4H/28IuoRxGKoJ064YHjPkkpoddW6zdzzNfHipZnNfEUiTEls4qF1IB81M2xqfiXIFRIdO -2kaLkRPFhO0hRxbtI6VuZYLgG3QCaXhxW6GyFa5zKABqhb5ojexdnAYRswaHV201ZCclj9rnJN1P -Ag0Rz6MdX/w1euEWktQxWKo42oZKyx8oT9p6lrv5KRmGkdrg8K8ARmRILjmwuBAgJM0eXBZHNGWX -elk4YmOgnAAcZA6ZAo1G+8Pg6pKKP61ETewuCg3/u7N0vDttB+ZXqF88W9jAYlvdgbTtajNF5IDY -DjTzWfeCaIB18F9gOzXq15SwWeDDI+CU9Nmq358IzXlxk4e5AQ0EVduQkQEIAOjZV5tbpfIh5Qef -pIp2dpGMVfpgPj4RNc15CyFnb8y6dhCrdybkY9GveXJe4F3GNYnSfB42cgxrfhizX3LakmZQ/SAg -+YO5KxfCIN7Q9LPNeTgPsZZT6h8lVuXUxOFKXfRaR3/tGF5xE3e5CoZRsHV/c92h3t1LdJNOnC5m -UKIPO4zDxiw/C2T2q3rP1kmIMaOH724kEH5A+xcp1cBHyt0tdHtIWuQv6joTJzujqViRhlCwQYzQ -SKpSBxwhBsorPvyinZI/ZXA4XXZc5RoMqV9rikedrb1rENO8JOuPu6tMS+znFu67skq2gFFZwCQW -IjdHm+2ukE+PE580WAWudyMAEQEAAYkCPgQYAQIACQUCVduQkQIbLgEpCRDrgyxn616JV8BdIAQZ -AQIABgUCVduQkQAKCRArYtevdF38xtzgB/4zVzozBpVOnagRkA7FDsHo36xX60Lik+ew0m28ueDD -hnV3bXQsCvn/6wiCVWqLOTDeYCPlyTTpEMyk8zwdCICW6MgSkVHWcEDOrRqIrqm86rirjTGjJSgQ -e3l4CqJvkn6jybShYoBk1OZZV6vVv9hPTXXv9E6dLKoEW5YZBrrF+VC0w1iOIvaAQ+QXph20eV4K -BIrp/bhG6PdnigKxuBZ79cdqDnXIzT9UiIa6LYpR0rbeg+7BmuZTTPS8t+41hIiKS+UZFdKa67eY -ENtyOmEMWOFCLLRJGxkleukchiMJ70rknloZXsvJIweXBzSZ6m7mJQBgaig/L/dXyjv6+j2pNB4H -/1trYUtJjXQKHmqlgCmpCkHt3g7JoxWvglnDNmE6q3hIWuVIYQpnzZy1g05+X9Egwc1WVpBB02H7 -PkUZTfpaP/L6DLneMmSKPhZE3I+lPIPjwrxqh6xy5uQezcWkJTNKvPWF4FJzrVvx7XTPjfGvOB0U -PEnjvtZTp5yOhTeZK7DgIEtb/Wcrqs+iRArQKboM930ORSZhwvGK3F9V/gMDpIrvge5vDFsTEYQd -w/2epIewH0L/FUb/6jBRcVEpGo9Ayg+Jnhq14GOGcd1y9oMZ48kYVLVBTA9tQ+82WE8Bch7uFPj4 -MFOMVRn1dc3qdXlg3mimA+iK7tABQfG0RJ9YzWs=` - - TestPubKey3 = `mQENBFXbkiMBCACiHW4/VI2JkfvSEINddS7vE6wEu5e1leNQDaLUh6PrATQZS2a4Q6kRE6WlJumj -6wCeN753Cm93UGQl2Bi3USIEeArIZnPTcocrckOVXxtoLBNKXgqKvEsDXgfw8A+doSfXoDm/3Js4 -Wy3WsYKNR9LaPuJZHnpjsFAJhvRVyhH4UFD+1RTSSefq1mozPfDdMoZeZNEpfhwt3DuTJs7RqcTH -CgR2CqhEHnOOE5jJUljHKYLCglE2+8dth1bZlQi4xly/VHZzP3Bn7wKeolK/ROP6VZz/e0xq/BKy -resmxvlBWZ1zWwqGIrV9b0uwYvGrh2hOd5C5+5oGaA2MGcjxwaLBABEBAAG0EFZhdWx0IFRlc3Qg -S2V5IDOJATgEEwECACIFAlXbkiMCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEPR5S1b8 -LcbdWjEH/2mhqC9a0Vk1IzOgxEoVxYVqVdvaxI0nTZOTfmcFYn4HQlQ+SLEoyNWe5jtkhx4k5uHi -pxwKHzOv02YM14NWC6bvKw2CQETLDPG4Cv8YMUmpho5tnMDdttIzp8HjyJRtHazU1uTes2/yuqh6 -LHCejVJI0uST3RibquwdG3QjPP8Umxu+YC9+FOW2Kit/AQ8JluFDJdq3/wSX8VfYZrGdgmreE7KY -MolhCkzGSPj7oFygw8LqKoJvt9tCuBKhZMBuMv1sB5CoJIWdPoqOZc4U7L1XdqfKvFZR/RhuXgN1 -lkI9MqrnLDpikL3Lk+ctLxWOjUCW8roqKoHZYBF7XPqdAfm5AQ0EVduSIwEIAOPcjd4QgbLlqIk3 -s6BPRRyVzglTgUdf+I0rUDybaDJfJobZd8U6e4hkPvRoQ8tJefnz/qnD/63watAbJYcVTme40I3V -KDOmVGcyaDxiKP1disKqcEJd7XQiI72oAiXmEH0y+5UwnOMks/lwaAGDMGVRjHEXI6fiRPFsfTr8 -7qvMJ3pW1OiOXVSezuBNTlmyJC7srQ1/nwxL337ev6D1zQZd3JuhcxLkHrUELLNwzhvcZ70vg645 -jAmz8EdmvvoqEPPoHqKgP5AeHACOsTm953KHhgx3NYuGPU/RoIvugKt4Iq5nw7TWFTjPHGVF3GTQ -ry5CZ/AzXiL57hVEhDvmuT8AEQEAAYkCPgQYAQIACQUCVduSIwIbLgEpCRD0eUtW/C3G3cBdIAQZ -AQIABgUCVduSIwAKCRAFI/9Nx3K5IPOFCACsZ/Z4s2LcEoA51TW+T5w+YevlIuq+332JtqNIpuGI -WpGxUxyDyPT0YQWr0SObBORYNr7RP8d/I2rbaFKyaDaKvRofYr+TwXy92phBo7pdEUamBpfrm/sr -+2BgAB2x3HWXp+IMdeVVhqQe8t4cnFm3c1fIdxADyiJuV5ge2Ml5gK5yNwqCQPh7U2RqC+lmVlMJ -GvWInIRn2mf6A7phDYNZfOz6dkar4yyh5r9rRgrZw88r/yIlrq/c6KRUIgnPMrFYEauggOceZ827 -+jKkbKWFEuHtiCxW7kRAN25UfnGsPaF+NSGM2q1vCG4HiFydx6lMoXM0Shf8+ZwyrV/5BzAqpWwI -AJ37tEwC58Fboynly6OGOzgPS0xKnzkXMOtquTo0qEH/1bEUsBknn795BmZOTf4oBC5blN6qRv7c -GSP00i+sxql1NhTjJcjJtfOPUzgfW+Af/+HR648z4c7c6MCjDFKnk8ZkoGLRU7ISjenkNFzvu2bj -lxJkil0uJDlLPbbX80ojzV1GS9g+ZxVPR+68N1QLl2FU6zsfg34upmLLHG8VG4vExzgyNkOwfTYv -dgyRNTjnuPue6H12fZZ9uCNeG52v7lR3eoQcCxBOniwgipB8UJ52RWXblwxzCtGtDi/EWB3zLTUn -puKcgucA0LotbihSMxhDylaARfVO1QV6csabM/g=` - - TestAAPubKey1 = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mQENBFXbjPUBCADjNjCUQwfxKL+RR2GA6pv/1K+zJZ8UWIF9S0lk7cVIEfJiprzz -wiMwBS5cD0darGin1FHvIWOZxujA7oW0O2TUuatqI3aAYDTfRYurh6iKLC+VS+F7 -H+/mhfFvKmgr0Y5kDCF1j0T/063QZ84IRGucR/X43IY7kAtmxGXH0dYOCzOe5UBX -1fTn3mXGe2ImCDWBH7gOViynXmb6XNvXkP0fsF5St9jhO7mbZU9EFkv9O3t3EaUR -fHopsCVDOlCkFCw5ArY+DUORHRzoMX0PnkyQb5OzibkChzpg8hQssKeVGpuskTdz -5Q7PtdW71jXd4fFVzoNH8fYwRpziD2xNvi6HABEBAAG0EFZhdWx0IFRlc3QgS2V5 -IDGJATgEEwECACIFAlXbjPUCGy8GCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJ -EOfLr44BHbeTo+sH/i7bapIgPnZsJ81hmxPj4W12uvunksGJiC7d4hIHsG7kmJRT -JfjECi+AuTGeDwBy84TDcRaOB6e79fj65Fg6HgSahDUtKJbGxj/lWzmaBuTzlN3C -Ee8cMwIPqPT2kajJVdOyrvkyuFOdPFOEA7bdCH0MqgIdM2SdF8t40k/ATfuD2K1Z -mumJ508I3gF39jgTnPzD4C8quswrMQ3bzfvKC3klXRlBC0yoArn+0QA3cf2B9T4z -J2qnvgotVbeK/b1OJRNj6Poeo+SsWNc/A5mw7lGScnDgL3yfwCm1gQXaQKfOt5x+ -7GqhWDw10q+bJpJlI10FfzAnhMF9etSqSeURBRW5AQ0EVduM9QEIAL53hJ5bZJ7o -EDCnaY+SCzt9QsAfnFTAnZJQrvkvusJzrTQ088eUQmAjvxkfRqnv981fFwGnh2+I -1Ktm698UAZS9Jt8yjak9wWUICKQO5QUt5k8cHwldQXNXVXFa+TpQWQR5yW1a9okj -h5o/3d4cBt1yZPUJJyLKY43Wvptb6EuEsScO2DnRkh5wSMDQ7dTooddJCmaq3LTj -OleRFQbu9ij386Do6jzK69mJU56TfdcydkxkWF5NZLGnED3lq+hQNbe+8UI5tD2o -P/3r5tXKgMy1R/XPvR/zbfwvx4FAKFOP01awLq4P3d/2xOkMu4Lu9p315E87DOle -Ywxk+FoTqXEAEQEAAYkCPgQYAQIACQUCVduM9QIbLgEpCRDny6+OAR23k8BdIAQZ -AQIABgUCVduM9QAKCRAID0JGyHtSGmqYB/4m4rJbbWa7dBJ8VqRU7ZKnNRDR9CVh -EGipBmpDGRYulEimOPzLUX/ZXZmTZzgemeXLBaJJlWnopVUWuAsyjQuZAfdd8nHk -GRHG0/DGum0l4sKTta3OPGHNC1z1dAcQ1RCr9bTD3PxjLBczdGqhzw71trkQRBRd -tPiUchltPMIyjUHqVJ0xmg0hPqFic0fICsr0YwKoz3h9+QEcZHvsjSZjgydKvfLY -cm+4DDMCCqcHuJrbXJKUWmJcXR0y/+HQONGrGJ5xWdO+6eJioPn2jVMnXCm4EKc7 -fcLFrz/LKmJ8seXhxjM3EdFtylBGCrx3xdK0f+JDNQaC/rhUb5V2XuX6VwoH/AtY -+XsKVYRfNIupLOUcf/srsm3IXT4SXWVomOc9hjGQiJ3rraIbADsc+6bCAr4XNZS7 -moViAAcIPXFv3m3WfUlnG/om78UjQqyVACRZqqAGmuPq+TSkRUCpt9h+A39LQWko -jHqyob3cyLgy6z9Q557O9uK3lQozbw2gH9zC0RqnePl+rsWIUU/ga16fH6pWc1uJ -iEBt8UZGypQ/E56/343epmYAe0a87sHx8iDV+dNtDVKfPRENiLOOc19MmS+phmUy -rbHqI91c0pmysYcJZCD3a502X1gpjFbPZcRtiTmGnUKdOIu60YPNE4+h7u2CfYyF -Pu3AlUaGNMBlvy6PEpU= -=NUTS ------END PGP PUBLIC KEY BLOCK-----` -) diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml index 1f9807757..c56f37c0c 100644 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -3,7 +3,26 @@ language: go sudo: false go: - - 1.4 + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + - tip -install: go get -v -t ./... -script: make test +allow_failures: + - go: tip + +script: make build + +matrix: + include: + - language: go + go: 1.15.x + script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile index a828d2848..fb38ec276 100644 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -1,6 +1,8 @@ CMD = jpgo +SRC_PKGS=./ ./cmd/... ./fuzz/... + help: @echo "Please use \`make ' where is one of" @echo " test to run all the tests" @@ -9,21 +11,22 @@ help: generate: - go generate ./... + go generate ${SRC_PKGS} build: rm -f $(CMD) - go build ./... + go build ${SRC_PKGS} rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... mv cmd/$(CMD)/$(CMD) . -test: - go test -v ./... +test: test-internal-testify + echo "making tests ${SRC_PKGS}" + go test -v ${SRC_PKGS} check: - go vet ./... - @echo "golint ./..." - @lint=`golint ./...`; \ + go vet ${SRC_PKGS} + @echo "golint ${SRC_PKGS}" + @lint=`golint ${SRC_PKGS}`; \ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ echo "$$lint"; \ if [ "$$lint" != "" ]; then exit 1; fi @@ -42,3 +45,7 @@ bench: pprof-cpu: go tool pprof ./go-jmespath.test ./cpu.out + +test-internal-testify: + cd internal/testify && go test ./... + diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md index 187ef676d..110ad7999 100644 --- a/vendor/github.com/jmespath/go-jmespath/README.md +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -4,4 +4,84 @@ -See http://jmespath.org for more info. +go-jmespath is a GO implementation of JMESPath, +which is a query language for JSON. It will take a JSON +document and transform it into another JSON document +through a JMESPath expression. + +Using go-jmespath is really easy. There's a single function +you use, `jmespath.search`: + + +```go +> import "github.com/jmespath/go-jmespath" +> +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.Search("foo.bar.baz[2]", data) +result = 2 +``` + +In the example we gave the ``search`` function input data of +`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}` as well as the JMESPath +expression `foo.bar.baz[2]`, and the `search` function evaluated +the expression against the input data to produce the result ``2``. + +The JMESPath language can do a lot more than select an element +from a list. Here are a few more examples: + +```go +> var jsondata = []byte(`{"foo": {"bar": {"baz": [0, 1, 2, 3, 4]}}}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo.bar", data) +result = { "baz": [ 0, 1, 2, 3, 4 ] } + + +> var jsondata = []byte(`{"foo": [{"first": "a", "last": "b"}, + {"first": "c", "last": "d"}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search({"foo[*].first", data) +result [ 'a', 'c' ] + + +> var jsondata = []byte(`{"foo": [{"age": 20}, {"age": 25}, + {"age": 30}, {"age": 35}, + {"age": 40}]}`) // your data +> var data interface{} +> err := json.Unmarshal(jsondata, &data) +> result, err := jmespath.search("foo[?age > `30`]") +result = [ { age: 35 }, { age: 40 } ] +``` + +You can also pre-compile your query. This is usefull if +you are going to run multiple searches with it: + +```go + > var jsondata = []byte(`{"foo": "bar"}`) + > var data interface{} + > err := json.Unmarshal(jsondata, &data) + > precompiled, err := Compile("foo") + > if err != nil{ + > // ... handle the error + > } + > result, err := precompiled.Search(data) + result = "bar" +``` + +## More Resources + +The example above only show a small amount of what +a JMESPath expression can do. If you want to take a +tour of the language, the *best* place to go is the +[JMESPath Tutorial](http://jmespath.org/tutorial.html). + +One of the best things about JMESPath is that it is +implemented in many different programming languages including +python, ruby, php, lua, etc. To see a complete list of libraries, +check out the [JMESPath libraries page](http://jmespath.org/libraries.html). + +And finally, the full JMESPath specification can be found +on the [JMESPath site](http://jmespath.org/specification.html). diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 8e26ffeec..010efe9bf 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is +// JMESPath is the representation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/jmespath/go-jmespath/go.mod b/vendor/github.com/jmespath/go-jmespath/go.mod new file mode 100644 index 000000000..4d448e88b --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.mod @@ -0,0 +1,5 @@ +module github.com/jmespath/go-jmespath + +go 1.14 + +require github.com/jmespath/go-jmespath/internal/testify v1.5.1 diff --git a/vendor/github.com/jmespath/go-jmespath/go.sum b/vendor/github.com/jmespath/go-jmespath/go.sum new file mode 100644 index 000000000..d2db411e5 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go index 1240a1755..4abc303ab 100644 --- a/vendor/github.com/jmespath/go-jmespath/parser.go +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -137,7 +137,7 @@ func (p *Parser) Parse(expression string) (ASTNode, error) { } if p.current() != tEOF { return ASTNode{}, p.syntaxError(fmt.Sprintf( - "Unexpected token at the end of the expresssion: %s", p.current())) + "Unexpected token at the end of the expression: %s", p.current())) } return parsed, nil } diff --git a/vendor/github.com/jstemmer/go-junit-report/.gitignore b/vendor/github.com/jstemmer/go-junit-report/.gitignore new file mode 100644 index 000000000..720bda607 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/.gitignore @@ -0,0 +1 @@ +go-junit-report diff --git a/vendor/github.com/jstemmer/go-junit-report/.travis.yml b/vendor/github.com/jstemmer/go-junit-report/.travis.yml new file mode 100644 index 000000000..d0dff3ef8 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - tip + - "1.13.x" + - "1.12.x" + - "1.11.x" + - "1.10.x" + - "1.9.x" + - "1.8.x" + - "1.7.x" + - "1.6.x" + - "1.5.x" + - "1.4.x" + - "1.3.x" + - "1.2.x" diff --git a/vendor/github.com/jstemmer/go-junit-report/LICENSE b/vendor/github.com/jstemmer/go-junit-report/LICENSE new file mode 100644 index 000000000..f346564ce --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012 Joel Stemmer + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/jstemmer/go-junit-report/README.md b/vendor/github.com/jstemmer/go-junit-report/README.md new file mode 100644 index 000000000..5b5f608be --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/README.md @@ -0,0 +1,49 @@ +# go-junit-report + +Converts `go test` output to an xml report, suitable for applications that +expect junit xml reports (e.g. [Jenkins](http://jenkins-ci.org)). + +[![Build Status][travis-badge]][travis-link] +[![Report Card][report-badge]][report-link] + +## Installation + +Go version 1.2 or higher is required. Install or update using the `go get` +command: + +```bash +go get -u github.com/jstemmer/go-junit-report +``` + +## Usage + +go-junit-report reads the `go test` verbose output from standard in and writes +junit compatible XML to standard out. + +```bash +go test -v 2>&1 | go-junit-report > report.xml +``` + +Note that it also can parse benchmark output with `-bench` flag: +```bash +go test -v -bench . -count 5 2>&1 | go-junit-report > report.xml +``` + +## Contribution + +Create an Issue and discuss the fix or feature, then fork the package. +Clone to github.com/jstemmer/go-junit-report. This is necessary because go import uses this path. +Fix or implement feature. Test and then commit change. +Specify #Issue and describe change in the commit message. +Create Pull Request. It can be merged by owner or administrator then. + +### Run Tests + +```bash +go test +``` + +[travis-badge]: https://travis-ci.org/jstemmer/go-junit-report.svg +[travis-link]: https://travis-ci.org/jstemmer/go-junit-report +[report-badge]: https://goreportcard.com/badge/github.com/jstemmer/go-junit-report +[report-link]: https://goreportcard.com/report/github.com/jstemmer/go-junit-report diff --git a/vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go b/vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go new file mode 100644 index 000000000..6e1a0f31d --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/formatter/formatter.go @@ -0,0 +1,182 @@ +package formatter + +import ( + "bufio" + "encoding/xml" + "fmt" + "io" + "runtime" + "strings" + "time" + + "github.com/jstemmer/go-junit-report/parser" +) + +// JUnitTestSuites is a collection of JUnit test suites. +type JUnitTestSuites struct { + XMLName xml.Name `xml:"testsuites"` + Suites []JUnitTestSuite `xml:"testsuite"` +} + +// JUnitTestSuite is a single JUnit test suite which may contain many +// testcases. +type JUnitTestSuite struct { + XMLName xml.Name `xml:"testsuite"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Time string `xml:"time,attr"` + Name string `xml:"name,attr"` + Properties []JUnitProperty `xml:"properties>property,omitempty"` + TestCases []JUnitTestCase `xml:"testcase"` +} + +// JUnitTestCase is a single test case with its result. +type JUnitTestCase struct { + XMLName xml.Name `xml:"testcase"` + Classname string `xml:"classname,attr"` + Name string `xml:"name,attr"` + Time string `xml:"time,attr"` + SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"` + Failure *JUnitFailure `xml:"failure,omitempty"` +} + +// JUnitSkipMessage contains the reason why a testcase was skipped. +type JUnitSkipMessage struct { + Message string `xml:"message,attr"` +} + +// JUnitProperty represents a key/value pair used to define properties. +type JUnitProperty struct { + Name string `xml:"name,attr"` + Value string `xml:"value,attr"` +} + +// JUnitFailure contains data related to a failed test. +type JUnitFailure struct { + Message string `xml:"message,attr"` + Type string `xml:"type,attr"` + Contents string `xml:",chardata"` +} + +// JUnitReportXML writes a JUnit xml representation of the given report to w +// in the format described at http://windyroad.org/dl/Open%20Source/JUnit.xsd +func JUnitReportXML(report *parser.Report, noXMLHeader bool, goVersion string, w io.Writer) error { + suites := JUnitTestSuites{} + + // convert Report to JUnit test suites + for _, pkg := range report.Packages { + pkg.Benchmarks = mergeBenchmarks(pkg.Benchmarks) + ts := JUnitTestSuite{ + Tests: len(pkg.Tests) + len(pkg.Benchmarks), + Failures: 0, + Time: formatTime(pkg.Duration), + Name: pkg.Name, + Properties: []JUnitProperty{}, + TestCases: []JUnitTestCase{}, + } + + classname := pkg.Name + if idx := strings.LastIndex(classname, "/"); idx > -1 && idx < len(pkg.Name) { + classname = pkg.Name[idx+1:] + } + + // properties + if goVersion == "" { + // if goVersion was not specified as a flag, fall back to version reported by runtime + goVersion = runtime.Version() + } + ts.Properties = append(ts.Properties, JUnitProperty{"go.version", goVersion}) + if pkg.CoveragePct != "" { + ts.Properties = append(ts.Properties, JUnitProperty{"coverage.statements.pct", pkg.CoveragePct}) + } + + // individual test cases + for _, test := range pkg.Tests { + testCase := JUnitTestCase{ + Classname: classname, + Name: test.Name, + Time: formatTime(test.Duration), + Failure: nil, + } + + if test.Result == parser.FAIL { + ts.Failures++ + testCase.Failure = &JUnitFailure{ + Message: "Failed", + Type: "", + Contents: strings.Join(test.Output, "\n"), + } + } + + if test.Result == parser.SKIP { + testCase.SkipMessage = &JUnitSkipMessage{strings.Join(test.Output, "\n")} + } + + ts.TestCases = append(ts.TestCases, testCase) + } + + // individual benchmarks + for _, benchmark := range pkg.Benchmarks { + benchmarkCase := JUnitTestCase{ + Classname: classname, + Name: benchmark.Name, + Time: formatBenchmarkTime(benchmark.Duration), + } + + ts.TestCases = append(ts.TestCases, benchmarkCase) + } + + suites.Suites = append(suites.Suites, ts) + } + + // to xml + bytes, err := xml.MarshalIndent(suites, "", "\t") + if err != nil { + return err + } + + writer := bufio.NewWriter(w) + + if !noXMLHeader { + writer.WriteString(xml.Header) + } + + writer.Write(bytes) + writer.WriteByte('\n') + writer.Flush() + + return nil +} + +func mergeBenchmarks(benchmarks []*parser.Benchmark) []*parser.Benchmark { + var merged []*parser.Benchmark + benchmap := make(map[string][]*parser.Benchmark) + for _, bm := range benchmarks { + if _, ok := benchmap[bm.Name]; !ok { + merged = append(merged, &parser.Benchmark{Name: bm.Name}) + } + benchmap[bm.Name] = append(benchmap[bm.Name], bm) + } + + for _, bm := range merged { + for _, b := range benchmap[bm.Name] { + bm.Allocs += b.Allocs + bm.Bytes += b.Bytes + bm.Duration += b.Duration + } + n := len(benchmap[bm.Name]) + bm.Allocs /= n + bm.Bytes /= n + bm.Duration /= time.Duration(n) + } + + return merged +} + +func formatTime(d time.Duration) string { + return fmt.Sprintf("%.3f", d.Seconds()) +} + +func formatBenchmarkTime(d time.Duration) string { + return fmt.Sprintf("%.9f", d.Seconds()) +} diff --git a/vendor/github.com/jstemmer/go-junit-report/go-junit-report.go b/vendor/github.com/jstemmer/go-junit-report/go-junit-report.go new file mode 100644 index 000000000..1332f3b65 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/go-junit-report.go @@ -0,0 +1,45 @@ +package main + +import ( + "flag" + "fmt" + "os" + + "github.com/jstemmer/go-junit-report/formatter" + "github.com/jstemmer/go-junit-report/parser" +) + +var ( + noXMLHeader = flag.Bool("no-xml-header", false, "do not print xml header") + packageName = flag.String("package-name", "", "specify a package name (compiled test have no package name in output)") + goVersionFlag = flag.String("go-version", "", "specify the value to use for the go.version property in the generated XML") + setExitCode = flag.Bool("set-exit-code", false, "set exit code to 1 if tests failed") +) + +func main() { + flag.Parse() + + if flag.NArg() != 0 { + fmt.Fprintf(os.Stderr, "%s does not accept positional arguments\n", os.Args[0]) + flag.Usage() + os.Exit(1) + } + + // Read input + report, err := parser.Parse(os.Stdin, *packageName) + if err != nil { + fmt.Printf("Error reading input: %s\n", err) + os.Exit(1) + } + + // Write xml + err = formatter.JUnitReportXML(report, *noXMLHeader, *goVersionFlag, os.Stdout) + if err != nil { + fmt.Printf("Error writing XML: %s\n", err) + os.Exit(1) + } + + if *setExitCode && report.Failures() > 0 { + os.Exit(1) + } +} diff --git a/vendor/github.com/jstemmer/go-junit-report/go.mod b/vendor/github.com/jstemmer/go-junit-report/go.mod new file mode 100644 index 000000000..de52369ac --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/go.mod @@ -0,0 +1,3 @@ +module github.com/jstemmer/go-junit-report + +go 1.2 diff --git a/vendor/github.com/jstemmer/go-junit-report/parser/parser.go b/vendor/github.com/jstemmer/go-junit-report/parser/parser.go new file mode 100644 index 000000000..e268128a2 --- /dev/null +++ b/vendor/github.com/jstemmer/go-junit-report/parser/parser.go @@ -0,0 +1,319 @@ +package parser + +import ( + "bufio" + "io" + "regexp" + "strconv" + "strings" + "time" +) + +// Result represents a test result. +type Result int + +// Test result constants +const ( + PASS Result = iota + FAIL + SKIP +) + +// Report is a collection of package tests. +type Report struct { + Packages []Package +} + +// Package contains the test results of a single package. +type Package struct { + Name string + Duration time.Duration + Tests []*Test + Benchmarks []*Benchmark + CoveragePct string + + // Time is deprecated, use Duration instead. + Time int // in milliseconds +} + +// Test contains the results of a single test. +type Test struct { + Name string + Duration time.Duration + Result Result + Output []string + + SubtestIndent string + + // Time is deprecated, use Duration instead. + Time int // in milliseconds +} + +// Benchmark contains the results of a single benchmark. +type Benchmark struct { + Name string + Duration time.Duration + // number of B/op + Bytes int + // number of allocs/op + Allocs int +} + +var ( + regexStatus = regexp.MustCompile(`--- (PASS|FAIL|SKIP): (.+) \((\d+\.\d+)(?: seconds|s)\)`) + regexIndent = regexp.MustCompile(`^([ \t]+)---`) + regexCoverage = regexp.MustCompile(`^coverage:\s+(\d+\.\d+)%\s+of\s+statements(?:\sin\s.+)?$`) + regexResult = regexp.MustCompile(`^(ok|FAIL)\s+([^ ]+)\s+(?:(\d+\.\d+)s|\(cached\)|(\[\w+ failed]))(?:\s+coverage:\s+(\d+\.\d+)%\sof\sstatements(?:\sin\s.+)?)?$`) + // regexBenchmark captures 3-5 groups: benchmark name, number of times ran, ns/op (with or without decimal), B/op (optional), and allocs/op (optional). + regexBenchmark = regexp.MustCompile(`^(Benchmark[^ -]+)(?:-\d+\s+|\s+)(\d+)\s+(\d+|\d+\.\d+)\sns/op(?:\s+(\d+)\sB/op)?(?:\s+(\d+)\sallocs/op)?`) + regexOutput = regexp.MustCompile(`( )*\t(.*)`) + regexSummary = regexp.MustCompile(`^(PASS|FAIL|SKIP)$`) + regexPackageWithTest = regexp.MustCompile(`^# ([^\[\]]+) \[[^\]]+\]$`) +) + +// Parse parses go test output from reader r and returns a report with the +// results. An optional pkgName can be given, which is used in case a package +// result line is missing. +func Parse(r io.Reader, pkgName string) (*Report, error) { + reader := bufio.NewReader(r) + + report := &Report{make([]Package, 0)} + + // keep track of tests we find + var tests []*Test + + // keep track of benchmarks we find + var benchmarks []*Benchmark + + // sum of tests' time, use this if current test has no result line (when it is compiled test) + var testsTime time.Duration + + // current test + var cur string + + // coverage percentage report for current package + var coveragePct string + + // stores mapping between package name and output of build failures + var packageCaptures = map[string][]string{} + + // the name of the package which it's build failure output is being captured + var capturedPackage string + + // capture any non-test output + var buffers = map[string][]string{} + + // parse lines + for { + l, _, err := reader.ReadLine() + if err != nil && err == io.EOF { + break + } else if err != nil { + return nil, err + } + + line := string(l) + + if strings.HasPrefix(line, "=== RUN ") { + // new test + cur = strings.TrimSpace(line[8:]) + tests = append(tests, &Test{ + Name: cur, + Result: FAIL, + Output: make([]string, 0), + }) + + // clear the current build package, so output lines won't be added to that build + capturedPackage = "" + } else if matches := regexBenchmark.FindStringSubmatch(line); len(matches) == 6 { + bytes, _ := strconv.Atoi(matches[4]) + allocs, _ := strconv.Atoi(matches[5]) + + benchmarks = append(benchmarks, &Benchmark{ + Name: matches[1], + Duration: parseNanoseconds(matches[3]), + Bytes: bytes, + Allocs: allocs, + }) + } else if strings.HasPrefix(line, "=== PAUSE ") { + continue + } else if strings.HasPrefix(line, "=== CONT ") { + cur = strings.TrimSpace(line[8:]) + continue + } else if matches := regexResult.FindStringSubmatch(line); len(matches) == 6 { + if matches[5] != "" { + coveragePct = matches[5] + } + if strings.HasSuffix(matches[4], "failed]") { + // the build of the package failed, inject a dummy test into the package + // which indicate about the failure and contain the failure description. + tests = append(tests, &Test{ + Name: matches[4], + Result: FAIL, + Output: packageCaptures[matches[2]], + }) + } else if matches[1] == "FAIL" && !containsFailures(tests) && len(buffers[cur]) > 0 { + // This package didn't have any failing tests, but still it + // failed with some output. Create a dummy test with the + // output. + tests = append(tests, &Test{ + Name: "Failure", + Result: FAIL, + Output: buffers[cur], + }) + buffers[cur] = buffers[cur][0:0] + } + + // all tests in this package are finished + report.Packages = append(report.Packages, Package{ + Name: matches[2], + Duration: parseSeconds(matches[3]), + Tests: tests, + Benchmarks: benchmarks, + CoveragePct: coveragePct, + + Time: int(parseSeconds(matches[3]) / time.Millisecond), // deprecated + }) + + buffers[cur] = buffers[cur][0:0] + tests = make([]*Test, 0) + benchmarks = make([]*Benchmark, 0) + coveragePct = "" + cur = "" + testsTime = 0 + } else if matches := regexStatus.FindStringSubmatch(line); len(matches) == 4 { + cur = matches[2] + test := findTest(tests, cur) + if test == nil { + continue + } + + // test status + if matches[1] == "PASS" { + test.Result = PASS + } else if matches[1] == "SKIP" { + test.Result = SKIP + } else { + test.Result = FAIL + } + + if matches := regexIndent.FindStringSubmatch(line); len(matches) == 2 { + test.SubtestIndent = matches[1] + } + + test.Output = buffers[cur] + + test.Name = matches[2] + test.Duration = parseSeconds(matches[3]) + testsTime += test.Duration + + test.Time = int(test.Duration / time.Millisecond) // deprecated + } else if matches := regexCoverage.FindStringSubmatch(line); len(matches) == 2 { + coveragePct = matches[1] + } else if matches := regexOutput.FindStringSubmatch(line); capturedPackage == "" && len(matches) == 3 { + // Sub-tests start with one or more series of 4-space indents, followed by a hard tab, + // followed by the test output + // Top-level tests start with a hard tab. + test := findTest(tests, cur) + if test == nil { + continue + } + test.Output = append(test.Output, matches[2]) + } else if strings.HasPrefix(line, "# ") { + // indicates a capture of build output of a package. set the current build package. + packageWithTestBinary := regexPackageWithTest.FindStringSubmatch(line) + if packageWithTestBinary != nil { + // Sometimes, the text after "# " shows the name of the test binary + // (".test") in addition to the package + // e.g.: "# package/name [package/name.test]" + capturedPackage = packageWithTestBinary[1] + } else { + capturedPackage = line[2:] + } + } else if capturedPackage != "" { + // current line is build failure capture for the current built package + packageCaptures[capturedPackage] = append(packageCaptures[capturedPackage], line) + } else if regexSummary.MatchString(line) { + // unset current test name so any additional output after the + // summary is captured separately. + cur = "" + } else { + // buffer anything else that we didn't recognize + buffers[cur] = append(buffers[cur], line) + + // if we have a current test, also append to its output + test := findTest(tests, cur) + if test != nil { + if strings.HasPrefix(line, test.SubtestIndent+" ") { + test.Output = append(test.Output, strings.TrimPrefix(line, test.SubtestIndent+" ")) + } + } + } + } + + if len(tests) > 0 { + // no result line found + report.Packages = append(report.Packages, Package{ + Name: pkgName, + Duration: testsTime, + Time: int(testsTime / time.Millisecond), + Tests: tests, + Benchmarks: benchmarks, + CoveragePct: coveragePct, + }) + } + + return report, nil +} + +func parseSeconds(t string) time.Duration { + if t == "" { + return time.Duration(0) + } + // ignore error + d, _ := time.ParseDuration(t + "s") + return d +} + +func parseNanoseconds(t string) time.Duration { + // note: if input < 1 ns precision, result will be 0s. + if t == "" { + return time.Duration(0) + } + // ignore error + d, _ := time.ParseDuration(t + "ns") + return d +} + +func findTest(tests []*Test, name string) *Test { + for i := len(tests) - 1; i >= 0; i-- { + if tests[i].Name == name { + return tests[i] + } + } + return nil +} + +func containsFailures(tests []*Test) bool { + for _, test := range tests { + if test.Result == FAIL { + return true + } + } + return false +} + +// Failures counts the number of failed tests in this report +func (r *Report) Failures() int { + count := 0 + + for _, p := range r.Packages { + for _, t := range p.Tests { + if t.Result == FAIL { + count++ + } + } + } + + return count +} diff --git a/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go b/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go deleted file mode 100644 index 77fb8b9a0..000000000 --- a/vendor/github.com/keybase/go-crypto/brainpool/brainpool.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package brainpool implements Brainpool elliptic curves. -// Implementation of rcurves is from github.com/ebfe/brainpool -// Note that these curves are implemented with naive, non-constant time operations -// and are likely not suitable for enviroments where timing attacks are a concern. -package brainpool - -import ( - "crypto/elliptic" - "math/big" - "sync" -) - -var ( - once sync.Once - p256t1, p384t1, p512t1 *elliptic.CurveParams - p256r1, p384r1, p512r1 *rcurve -) - -func initAll() { - initP256t1() - initP384t1() - initP512t1() - initP256r1() - initP384r1() - initP512r1() -} - -func initP256t1() { - p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} - p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) - p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) - p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) - p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) - p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) - p256t1.BitSize = 256 -} - -func initP256r1() { - twisted := p256t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP256r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) - params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) - z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) - p256r1 = newrcurve(twisted, params, z) -} - -func initP384t1() { - p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} - p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) - p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) - p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) - p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) - p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) - p384t1.BitSize = 384 -} - -func initP384r1() { - twisted := p384t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP384r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) - params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) - z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) - p384r1 = newrcurve(twisted, params, z) -} - -func initP512t1() { - p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} - p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) - p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) - p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) - p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) - p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) - p512t1.BitSize = 512 -} - -func initP512r1() { - twisted := p512t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP512r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) - params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) - z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) - p512r1 = newrcurve(twisted, params, z) -} - -// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) -func P256t1() elliptic.Curve { - once.Do(initAll) - return p256t1 -} - -// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) -func P256r1() elliptic.Curve { - once.Do(initAll) - return p256r1 -} - -// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) -func P384t1() elliptic.Curve { - once.Do(initAll) - return p384t1 -} - -// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) -func P384r1() elliptic.Curve { - once.Do(initAll) - return p384r1 -} - -// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) -func P512t1() elliptic.Curve { - once.Do(initAll) - return p512t1 -} - -// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) -func P512r1() elliptic.Curve { - once.Do(initAll) - return p512r1 -} diff --git a/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go b/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go deleted file mode 100644 index 7e291d6aa..000000000 --- a/vendor/github.com/keybase/go-crypto/brainpool/rcurve.go +++ /dev/null @@ -1,83 +0,0 @@ -package brainpool - -import ( - "crypto/elliptic" - "math/big" -) - -var _ elliptic.Curve = (*rcurve)(nil) - -type rcurve struct { - twisted elliptic.Curve - params *elliptic.CurveParams - z *big.Int - zinv *big.Int - z2 *big.Int - z3 *big.Int - zinv2 *big.Int - zinv3 *big.Int -} - -var ( - two = big.NewInt(2) - three = big.NewInt(3) -) - -func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { - zinv := new(big.Int).ModInverse(z, params.P) - return &rcurve{ - twisted: twisted, - params: params, - z: z, - zinv: zinv, - z2: new(big.Int).Exp(z, two, params.P), - z3: new(big.Int).Exp(z, three, params.P), - zinv2: new(big.Int).Exp(zinv, two, params.P), - zinv3: new(big.Int).Exp(zinv, three, params.P), - } -} - -func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { - var tx, ty big.Int - tx.Mul(x, curve.z2) - tx.Mod(&tx, curve.params.P) - ty.Mul(y, curve.z3) - ty.Mod(&ty, curve.params.P) - return &tx, &ty -} - -func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { - var x, y big.Int - x.Mul(tx, curve.zinv2) - x.Mod(&x, curve.params.P) - y.Mul(ty, curve.zinv3) - y.Mod(&y, curve.params.P) - return &x, &y -} - -func (curve *rcurve) Params() *elliptic.CurveParams { - return curve.params -} - -func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { - return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) -} - -func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { - tx1, ty1 := curve.toTwisted(x1, y1) - tx2, ty2 := curve.toTwisted(x2, y2) - return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) -} - -func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { - return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) -} - -func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { - tx1, ty1 := curve.toTwisted(x1, y1) - return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) -} - -func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { - return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) -} diff --git a/vendor/github.com/keybase/go-crypto/cast5/cast5.go b/vendor/github.com/keybase/go-crypto/cast5/cast5.go deleted file mode 100644 index 8c1b299bf..000000000 --- a/vendor/github.com/keybase/go-crypto/cast5/cast5.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common -// OpenPGP cipher. -package cast5 - -import "errors" - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go deleted file mode 100644 index d7d81bac0..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -package armor // import "github.com/keybase/go-crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "io" - "strings" - - "github.com/keybase/go-crypto/openpgp/errors" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - for { - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if len(strings.TrimSpace(string(line))) > 0 { - break - } - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF { - if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go b/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go deleted file mode 100644 index 075a1978e..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine, []byte{'\n'}) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go b/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389f..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 15dafc556..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -package elgamal // import "github.com/keybase/go-crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - s.ModInverse(s, priv.P) - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go b/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go deleted file mode 100644 index d48c6c9b6..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -package errors // import "github.com/keybase/go-crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/keys.go b/vendor/github.com/keybase/go-crypto/openpgp/keys.go deleted file mode 100644 index c0177b3aa..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/keys.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "io" - "time" - - "github.com/keybase/go-crypto/openpgp/armor" - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/packet" - "github.com/keybase/go-crypto/rsa" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey - BadSubkeys []BadSubkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature - Revocation *packet.Signature -} - -// BadSubkey is one that failed reconstruction, but we'll keep it around for -// informational purposes. -type BadSubkey struct { - Subkey - Err error -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - - // NOTE(maxtaco) - // If there is a Flags subpacket, then we have to follow it, and only - // use keys that are marked for Encryption of Communication. If there - // isn't a Flags subpacket, and this is an Encrypt-Only key (right now only ElGamal - // suffices), then we implicitly use it. The check for primary below is a little - // more open-ended, but for now, let's be strict and potentially open up - // if we see bugs in the wild. - // - // One more note: old DSA/ElGamal keys tend not to have the Flags subpacket, - // so this sort of thing is pretty important for encrypting to older keys. - // - if ((subkey.Sig.FlagsValid && subkey.Sig.FlagEncryptCommunications) || - (!subkey.Sig.FlagsValid && subkey.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal)) && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - subkey.Revocation == nil && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - // - // NOTE(maxtaco) - see note above, how this policy is a little too open-ended - // for my liking, but leave it for now. - i := e.primaryIdentity() - if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications) && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if (!subkey.Sig.FlagsValid || subkey.Sig.FlagSign) && - subkey.PrivateKey.PrivateKey != nil && - subkey.PublicKey.PubKeyAlgo.CanSign() && - subkey.Revocation == nil && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign) && - e.PrimaryKey.PubKeyAlgo.CanSign() && - !i.SelfSignature.KeyExpired(now) && - e.PrivateKey.PrivateKey != nil { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - - // If there's both a a revocation and a sig, then take the - // revocation. Otherwise, we can proceed with the sig. - sig := subKey.Revocation - if sig == nil { - sig = subKey.Sig - } - - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if requiredUsage != 0 { - var usage byte - - switch { - case key.SelfSignature.FlagsValid: - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - - case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal: - // We also need to handle the case where, although the sig's - // flags aren't valid, the key can is implicitly usable for - // encryption by virtue of being ElGamal. See also the comment - // in encryptionKey() above. - usage |= packet.KeyFlagEncryptCommunications - usage |= packet.KeyFlagEncryptStorage - - case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoDSA: - usage |= packet.KeyFlagSign - - // For a primary RSA key without any key flags, be as permissiable - // as possible. - case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoRSA && - key.Entity.PrimaryKey.KeyId == id: - usage = (packet.KeyFlagCertify | packet.KeyFlagSign | - packet.KeyFlagEncryptCommunications | packet.KeyFlagEncryptStorage) - } - - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && subKey.PrivateKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } - - panic("unreachable") -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } else { - e.PrimaryKey = &e.PrivateKey.PublicKey - } - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var current *Identity - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - switch pkt := p.(type) { - case *packet.UserId: - - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - current = new(Identity) - current.Name = pkt.Id - current.UserId = pkt - case *packet.Signature: - - // These are signatures by other people on this key. Let's just ignore them - // from the beginning, since they shouldn't affect our key decoding one way - // or the other. - if pkt.IssuerKeyId != nil && *pkt.IssuerKeyId != e.PrimaryKey.KeyId { - continue - } - - // If this is a signature made by the keyholder, and the signature has stubbed out - // critical packets, then *now* we need to bail out. - if e := pkt.StubbedOutCriticalError; e != nil { - return nil, e - } - - // Next handle the case of a self-signature. According to RFC8440, - // Section 5.2.3.3, if there are several self-signatures, - // we should take the newer one. If they were both created - // at the same time, but one of them has keyflags specified and the - // other doesn't, keep the one with the keyflags. We have actually - // seen this in the wild (see the 'Yield' test in read_test.go). - // If there is a tie, and both have the same value for FlagsValid, - // then "last writer wins." - // - // HOWEVER! We have seen yet more keys in the wild (see the 'Spiros' - // test in read_test.go), in which the later self-signature is a bunch - // of junk, and doesn't even specify key flags. Does it really make - // sense to overwrite reasonable key flags with the empty set? I'm not - // sure what that would be trying to achieve, and plus GPG seems to be - // ok with this situation, and ignores the later (empty) keyflag set. - // So further tighten our overwrite rules, and only allow the later - // signature to overwrite the earlier signature if so doing won't - // trash the key flags. - if current != nil && - (current.SelfSignature == nil || - (!pkt.CreationTime.Before(current.SelfSignature.CreationTime) && - (pkt.FlagsValid || !current.SelfSignature.FlagsValid))) && - (pkt.SigType == packet.SigTypePositiveCert || pkt.SigType == packet.SigTypeGenericCert) && - pkt.IssuerKeyId != nil && - *pkt.IssuerKeyId == e.PrimaryKey.KeyId { - - if err = e.PrimaryKey.VerifyUserIdSignature(current.Name, e.PrimaryKey, pkt); err == nil { - - current.SelfSignature = pkt - - // NOTE(maxtaco) 2016.01.11 - // Only register an identity once we've gotten a valid self-signature. - // It's possible therefore for us to throw away `current` in the case - // no valid self-signatures were found. That's OK as long as there are - // other identies that make sense. - // - // NOTE! We might later see a revocation for this very same UID, and it - // won't be undone. We've preserved this feature from the original - // Google OpenPGP we forked from. - e.Identities[current.Name] = current - } else { - // We really should warn that there was a failure here. Not raise an error - // since this really shouldn't be a fail-stop error. - } - } else if pkt.SigType == packet.SigTypeKeyRevocation { - // These revocations won't revoke UIDs as handled above, so lookout! - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } else if current == nil { - // NOTE(maxtaco) - // - // See https://github.com/keybase/client/issues/2666 - // - // There might have been a user attribute picture before this signature, - // in which case this is still a valid PGP key. In the future we might - // not ignore user attributes (like picture). But either way, it doesn't - // make sense to bail out here. Keep looking for other valid signatures. - // - // Used to be: - // return nil, errors.StructuralError("signature packet found before user id packet") - } else { - current.Signatures = append(current.Signatures, pkt) - } - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - var lastErr error - for { - p, err := packets.Next() - if err == io.EOF { - break - } - if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - sig, ok := p.(*packet.Signature) - if !ok { - // Hit a non-signature packet, so assume we're up to the next key - packets.Unread(p) - break - } - if st := sig.SigType; st != packet.SigTypeSubkeyBinding && st != packet.SigTypeSubkeyRevocation { - - // Note(maxtaco): - // We used to error out here, but instead, let's fast-forward past - // packets that are in the wrong place (like misplaced 0x13 signatures) - // until we get to one that works. For a test case, - // see TestWithBadSubkeySignaturePackets. - - continue - } - err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig) - if err != nil { - // Non valid signature, so again, no need to abandon all hope, just continue; - // make a note of the error we hit. - lastErr = errors.StructuralError("subkey signature invalid: " + err.Error()) - continue - } - switch sig.SigType { - case packet.SigTypeSubkeyBinding: - // First writer wins - if subKey.Sig == nil { - subKey.Sig = sig - } - case packet.SigTypeSubkeyRevocation: - // First writer wins - if subKey.Revocation == nil { - subKey.Revocation = sig - } - } - } - if subKey.Sig != nil { - e.Subkeys = append(e.Subkeys, subKey) - } else { - if lastErr == nil { - lastErr = errors.StructuralError("Subkey wasn't signed; expected a 'binding' signature") - } - e.BadSubkeys = append(e.BadSubkeys, BadSubkey{Subkey: subKey, Err: lastErr}) - } - return nil -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - currentTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Name, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: currentTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: currentTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, to -// the given Writer. For now, it must only be used on an Entity returned from -// NewEntity. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - if e.PrivateKey.PrivateKey != nil { - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - // Workaround shortcoming of SignKey(), which doesn't work to reverse-sign - // sub-signing keys. So if requested, just reuse the signatures already - // available to us (if we read this key from a keyring). - if e.PrivateKey.PrivateKey != nil && !config.ReuseSignatures() { - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - } - - if subkey.Revocation != nil { - err = subkey.Revocation.Serialize(w) - if err != nil { - return - } - } - - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w. (No private -// key material will be output). -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - - if subkey.Revocation != nil { - err = subkey.Revocation.Serialize(w) - if err != nil { - return err - } - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} - -// CopySubkeyRevocations copies subkey revocations from the src Entity over -// to the receiver entity. We need this because `gpg --export-secret-key` does -// not appear to output subkey revocations. In this case we need to manually -// merge with the output of `gpg --export`. -func (e *Entity) CopySubkeyRevocations(src *Entity) { - m := make(map[[20]byte]*packet.Signature) - for _, subkey := range src.Subkeys { - if subkey.Revocation != nil { - m[subkey.PublicKey.Fingerprint] = subkey.Revocation - } - } - for i, subkey := range e.Subkeys { - if r := m[subkey.PublicKey.Fingerprint]; r != nil { - e.Subkeys[i].Revocation = r - } - } -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go deleted file mode 100644 index f023fe533..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "io" - "strconv" - - "github.com/keybase/go-crypto/openpgp/errors" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go deleted file mode 100644 index f4125e189..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/config.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int - // ReuseSignatures tells us to reuse existing Signatures - // on serialized output. - ReuseSignaturesOnSerialize bool -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} - -func (c *Config) ReuseSignatures() bool { - return c != nil && c.ReuseSignaturesOnSerialize -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go deleted file mode 100644 index 687e43453..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/ecdh.go +++ /dev/null @@ -1,18 +0,0 @@ -package packet - -import ( - "crypto/ecdsa" - "errors" - "math/big" -) - -type ecdhPrivateKey struct { - ecdsa.PublicKey - x *big.Int -} - -func (e *ecdhPrivateKey) Decrypt(b []byte) ([]byte, error) { - // TODO(maxtaco): compute the shared secret, run the KDF and - // recover the decrypted shard key. - return nil, errors.New("ECDH decrypt unimplemented") -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index e06448549..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" - "math/big" - "strconv" - - "github.com/keybase/go-crypto/openpgp/elgamal" - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/rsa" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - case PubKeyAlgoECDH: - ecdh, ok := priv.PrivateKey.(*ecdhPrivateKey) - if !ok { - return errors.InvalidArgumentError("bad internal ECDH key") - } - b, err = ecdh.Decrypt(e.encryptedMPI1.bytes) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e51..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a54..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index af404bb10..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "io" - "strconv" - - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/s2k" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go deleted file mode 100644 index cdeea012f..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/keybase/go-crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go deleted file mode 100644 index feba7bcd3..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -package packet // import "github.com/keybase/go-crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "io" - "math/big" - - "github.com/keybase/go-crypto/cast5" - "github.com/keybase/go-crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte -} - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - for len(p) > 0 { - for power := uint(14); power < 32; power-- { - l := 1 << power - if len(p) >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(p[:l]) - n += m - if err != nil { - return - } - p = p[l:] - break - } - } - } - return -} - -func (w *partialLengthWriter) Close() error { - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } - - panic("unreachable") -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - // RFC -1 - PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - return -} - -// mpiLength returns the length of the given *big.Int when serialized as an -// MPI. -func mpiLength(n *big.Int) (mpiLengthInBytes int) { - mpiLengthInBytes = 2 /* MPI length */ - mpiLengthInBytes += (n.BitLen() + 7) / 8 - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go deleted file mode 100644 index 78d233cf6..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/sha1" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "github.com/keybase/go-crypto/openpgp/elgamal" - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/s2k" - "github.com/keybase/go-crypto/rsa" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *rsa.PrivateKey or *dsa.PrivateKey. - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - // S2K == nil implies that we got a "GNU Dummy" S2K. For instance, - // because our master secret key is on a USB key in a vault somewhere. - // In that case, there is no further data to consume here. - if pk.s2k == nil { - pk.Encrypted = false - return - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = ioutil.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - - privateKeyBuf := bytes.NewBuffer(nil) - - if pk.PrivateKey == nil { - _, err = buf.Write([]byte{ - 254, // SHA-1 Convention - 9, // Encryption scheme (AES256) - 101, // GNU Extensions - 2, // Hash value (SHA1) - 'G', 'N', 'U', // "GNU" as a string - 1, // Extension type 1001 (minus 1000) - }) - } else { - buf.WriteByte(0 /* no encryption */) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - case *ecdhPrivateKey: - err = serializeECDHPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - if len(privateKeyBytes) > 0 { - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - } - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -func serializeECDHPrivateKey(w io.Writer, priv *ecdhPrivateKey) error { - return writeBig(w, priv.x) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - // For GNU Dummy S2K, there's no key here, so don't do anything. - if pk.s2k == nil { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - case PubKeyAlgoECDH: - return pk.parseECDHPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - priv := new(ecdhPrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - priv.x = new(big.Int).SetBytes(d) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - ecdsaPriv := new(ecdsa.PrivateKey) - ecdsaPriv.PublicKey = *ecdsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - ecdsaPriv.D = new(big.Int).SetBytes(d) - pk.PrivateKey = ecdsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go deleted file mode 100644 index ff82a5be2..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,870 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "github.com/agl/ed25519" - "github.com/keybase/go-crypto/brainpool" - "github.com/keybase/go-crypto/openpgp/elgamal" - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/rsa" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} - // Brainpool curve P-256r1 - oidCurveP256r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07} - // Brainpool curve P-384r1 - oidCurveP384r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B} - // Brainpool curve P-512r1 - oidCurveP512r1 []byte = []byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D} - // EdDSA - oidEdDSA []byte = []byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01} -) - -const maxOIDLength = 10 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -type edDSAkey struct { - ecdsaKey -} - -func (e *edDSAkey) Verify(payload []byte, r parsedMPI, s parsedMPI) bool { - var key [ed25519.PublicKeySize]byte - var sig [ed25519.SignatureSize]byte - - // NOTE(maxtaco): I'm not entirely sure why we need to ignore the first byte. - copy(key[:], e.p.bytes[1:]) - n := copy(sig[:], r.bytes) - copy(sig[n:], s.bytes) - - return ed25519.Verify(&key, payload, &sig) -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else if bytes.Equal(f.oid, oidCurveP256r1) { - c = brainpool.P256r1() - } else if bytes.Equal(f.oid, oidCurveP384r1) { - c = brainpool.P384r1() - } else if bytes.Equal(f.oid, oidCurveP512r1) { - c = brainpool.P512r1() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf - - // EdDSA fields (no RFC available), uses ecdsa scaffolding - edk *edDSAkey -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func FromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -func FromBytes(bytes []byte) parsedMPI { - return parsedMPI{ - bytes: bytes, - bitLength: uint16(8 * len(bytes)), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: FromBig(pub.N), - e: FromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: FromBig(pub.P), - q: FromBig(pub.Q), - g: FromBig(pub.G), - y: FromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// check EdDSA public key material. -// There is currently no RFC for it, but it doesn't mean it's not -// implemented or in use. -func (e *edDSAkey) check() error { - if !bytes.Equal(e.oid, oidEdDSA) { - return errors.UnsupportedError(fmt.Sprintf("Bad OID for EdDSA key: %v", e.oid)) - } - return nil -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: FromBig(pub.P), - g: FromBig(pub.G), - y: FromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - case brainpool.P256r1(): - pk.ec.oid = oidCurveP256r1 - case brainpool.P384r1(): - pk.ec.oid = oidCurveP384r1 - case brainpool.P512r1(): - pk.ec.oid = oidCurveP512r1 - } - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes)) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoEdDSA: - pk.edk = &edDSAkey{} - if err = pk.edk.parse(r); err != nil { - return err - } - err = pk.edk.check() - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 7 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int64(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - case PubKeyAlgoEdDSA: - pLength += uint16(pk.edk.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - case PubKeyAlgoEdDSA: - length += pk.edk.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoEdDSA: - return pk.edk.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - // NOTE(maxtaco) 2016-08-22 - // - // We used to do this: - // - // if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - // return errors.SignatureError("hash tag doesn't match") - // } - // - // But don't do anything in this case. Some GPGs generate bad - // 2-byte hash prefixes, but GPG also doesn't seem to care on - // import. See BrentMaxwell's key. I think it's safe to disable - // this check! - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - case PubKeyAlgoEdDSA: - if !pk.edk.Verify(hashBytes, sig.EdDSASigR, sig.EdDSASigS) { - return errors.SignatureError("EdDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } - panic("unreachable") -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } - panic("unreachable") -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - updateKeySignatureHash(pk, signed, h) - - return -} - -// updateKeySignatureHash does the actual hash updates for keySignatureHash. -func updateKeySignatureHash(pk, signed signingKey, h hash.Hash) { - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - - // BUG(maxtaco) - // - // We should check for more than FlagsSign here, because if - // you read keys.go, we can sometimes use signing subkeys even if they're - // not explicitly flagged as such. However, so doing fails lots of currently - // working tests, so I'm not going to do much here. - // - // In other words, we should have this disjunction in the condition above: - // - // || (!sig.FlagsValid && pk.PubKeyAlgo.CanSign()) { - // - - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -type teeHash struct { - h hash.Hash -} - -func (t teeHash) Write(b []byte) (n int, err error) { - fmt.Printf("hash -> %s %+v\n", string(b), b) - return t.h.Write(b) -} -func (t teeHash) Sum(b []byte) []byte { return t.h.Sum(b) } -func (t teeHash) Reset() { t.h.Reset() } -func (t teeHash) Size() int { return t.h.Size() } -func (t teeHash) BlockSize() int { return t.h.BlockSize() } - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - updateUserIdSignatureHash(id, pk, h) - - return -} - -// updateUserIdSignatureHash does the actual hash updates for -// userIdSignatureHash. -func updateUserIdSignatureHash(id string, pk *PublicKey, h hash.Hash) { - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 52474677b..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/rsa" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: FromBig(pub.N), - e: FromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 7 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int64(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } - panic("unreachable") -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go deleted file mode 100644 index 957b3b897..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - - "github.com/keybase/go-crypto/openpgp/errors" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go deleted file mode 100644 index 994c49787..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/binary" - "hash" - "io" - "strconv" - "time" - - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/s2k" - "github.com/keybase/go-crypto/rsa" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signer can be implemented by application code to do actual signing. -type Signer interface { - hash.Hash - Sign(sig *Signature) error - KeyId() uint64 - PublicKeyAlgo() PublicKeyAlgorithm -} - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - EdDSASigR, EdDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - PreferredKeyServer string - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // PolicyURI is optional. See RFC 4880, Section 5.2.3.20 for details - PolicyURI string - - // Regex is a regex that can match a PGP UID. See RFC 4880, 5.2.3.14 for details - Regex string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - // StubbedOutCriticalError is not fail-stop, since it shouldn't break key parsing - // when appearing in WoT-style cross signatures. But it should prevent a signature - // from being applied to a primary or subkey. - StubbedOutCriticalError error - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoEdDSA: - sig.EdDSASigR.bytes, sig.EdDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.EdDSASigS.bytes, sig.EdDSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - regularExpressionSubpacket signatureSubpacketType = 6 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - prefKeyServerSubpacket signatureSubpacketType = 24 - primaryUserIdSubpacket signatureSubpacketType = 25 - policyURISubpacket signatureSubpacketType = 26 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - case policyURISubpacket: - // See RFC 4880, Section 5.2.3.20 - sig.PolicyURI = string(subpacket[:]) - case regularExpressionSubpacket: - sig.Regex = string(subpacket[:]) - if isCritical { - sig.StubbedOutCriticalError = errors.UnsupportedError("regex support is stubbed out") - } - case prefKeyServerSubpacket: - sig.PreferredKeyServer = string(subpacket[:]) - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - signer, hashIsSigner := h.(Signer) - - if !hashIsSigner && (priv == nil || priv.PrivateKey == nil) { - err = errors.InvalidArgumentError("attempting to sign with nil PrivateKey") - return - } - - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - if hashIsSigner { - err = signer.Sign(sig) - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, err = rsa.SignPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), sig.Hash, digest) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - r, s, err := ecdsa.Sign(config.Random(), priv.PrivateKey.(*ecdsa.PrivateKey), digest) - if err == nil { - sig.ECDSASigR = FromBig(r) - sig.ECDSASigS = FromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return nil - } - return sig.Sign(h, priv, config) -} - -// SignUserIdWithSigner computes a signature from priv, asserting that pub is a -// valid key for the identity id. On success, the signature is stored in sig. -// Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserIdWithSigner(id string, pub *PublicKey, s Signer, config *Config) error { - updateUserIdSignatureHash(id, pub, s) - - return sig.Sign(s, nil, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKeyWithSigner computes a signature using s, asserting that -// signeePubKey is a subkey. On success, the signature is stored in sig. Call -// Serialize to write it out. If config is nil, sensible defaults will be used. -func (sig *Signature) SignKeyWithSigner(signeePubKey *PublicKey, signerPubKey *PublicKey, s Signer, config *Config) error { - updateKeySignatureHash(signerPubKey, signeePubKey, s) - - return sig.Sign(s, nil, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoEdDSA: - sigLength = 2 + len(sig.EdDSASigR.bytes) - sigLength += 2 + len(sig.EdDSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoEdDSA: - err = writeMPIs(w, sig.EdDSASigR, sig.EdDSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index dfca651be..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index d2bef0ce5..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - if ske.s2k == nil { - return errors.UnsupportedError("can't use dummy S2K for symmetric key encryption") - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l := len(plaintextKey); l == 0 || l%cipherFunc.blockSize() != 0 { - return nil, cipherFunc, errors.StructuralError("length of decrypted key not a multiple of block size") - } - - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index fd4f8f015..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "hash" - "io" - "strconv" - - "github.com/keybase/go-crypto/openpgp/errors" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go deleted file mode 100644 index 96a2b382a..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// the user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go b/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go deleted file mode 100644 index d6bea7d4a..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/patch.sh b/vendor/github.com/keybase/go-crypto/openpgp/patch.sh deleted file mode 100644 index 23cacc83d..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/patch.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh - -patch < sig-v3.patch -patch < s2k-gnu-dummy.patch -find . -type f -name '*.go' -exec sed -i'' -e 's/golang.org\/x\/crypto\/openpgp/github.com\/keybase\/go-crypto\/openpgp/' {} \; -find . -type f -name '*.go-e' -exec rm {} \; -go test ./... diff --git a/vendor/github.com/keybase/go-crypto/openpgp/read.go b/vendor/github.com/keybase/go-crypto/openpgp/read.go deleted file mode 100644 index 766d9f57f..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/read.go +++ /dev/null @@ -1,451 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -package openpgp // import "github.com/keybase/go-crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "github.com/keybase/go-crypto/openpgp/armor" - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - signer, _, err = checkDetachedSignature(keyring, signed, signature) - return signer, err -} - -func checkDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, &issuerKeyId, nil - } - } - - return nil, nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - signer, _, err = checkArmoredDetachedSignature(keyring, signed, signature) - return signer, err -} - -func checkArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - return checkDetachedSignature(keyring, signed, body) -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go deleted file mode 100644 index 01bb67852..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -package s2k // import "github.com/keybase/go-crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "github.com/keybase/go-crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -func parseGNUExtensions(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - // A three-byte string identifier - _, err = io.ReadFull(r, buf[:3]) - if err != nil { - return - } - gnuExt := string(buf[:3]) - - if gnuExt != "GNU" { - return nil, errors.UnsupportedError("Malformed GNU extension: " + gnuExt) - } - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - gnuExtType := int(buf[0]) - switch gnuExtType { - case 1: - return nil, nil - case 2: - // Read a serial number, which is prefixed by a 1-byte length. - // The maximum length is 16. - var lenBuf [1]byte - _, err = io.ReadFull(r, lenBuf[:]) - if err != nil { - return - } - - maxLen := 16 - ivLen := int(lenBuf[0]) - if ivLen > maxLen { - ivLen = maxLen - } - ivBuf := make([]byte, ivLen) - // For now we simply discard the IV - _, err = io.ReadFull(r, ivBuf) - if err != nil { - return - } - return nil, nil - default: - return nil, errors.UnsupportedError("unknown S2K GNU protection mode: " + strconv.Itoa(int(gnuExtType))) - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - // GNU Extensions; handle them before we try to look for a hash, which won't - // be needed in most cases anyway. - if buf[0] == 101 { - return parseGNUExtensions(r) - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id, or panics if id is unknown. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch b/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch deleted file mode 100644 index bfd764afe..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/sig-v3.patch +++ /dev/null @@ -1,135 +0,0 @@ -diff --git a/openpgp/read.go b/openpgp/read.go -index a6cecc5..0c9397b 100644 ---- a/openpgp/read.go -+++ b/openpgp/read.go -@@ -56,8 +56,9 @@ type MessageDetails struct { - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) -- SignatureError error // nil if the signature is good. -- Signature *packet.Signature // the signature packet itself. -+ SignatureError error // nil if the signature is good. -+ Signature *packet.Signature // the signature packet itself, if v4 (default) -+ SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser - } -@@ -334,13 +335,15 @@ func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - } - - var ok bool -- if scr.md.Signature, ok = p.(*packet.Signature); !ok { -+ if scr.md.Signature, ok = p.(*packet.Signature); ok { -+ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) -+ } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { -+ scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) -+ } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - -- scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) -- - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. -diff --git a/openpgp/read_test.go b/openpgp/read_test.go -index 52f942c..abe8d7b 100644 ---- a/openpgp/read_test.go -+++ b/openpgp/read_test.go -@@ -13,6 +13,7 @@ import ( - "strings" - "testing" - -+ "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - ) - -@@ -411,6 +412,50 @@ func TestIssue11504(t *testing.T) { - testReadMessageError(t, "9303000130303030303030303030983002303030303030030000000130") - } - -+// TestSignatureV3Message tests the verification of V3 signature, generated -+// with a modern V4-style key. Some people have their clients set to generate -+// V3 signatures, so it's useful to be able to verify them. -+func TestSignatureV3Message(t *testing.T) { -+ sig, err := armor.Decode(strings.NewReader(signedMessageV3)) -+ if err != nil { -+ t.Error(err) -+ return -+ } -+ key, err := ReadArmoredKeyRing(strings.NewReader(keyV4forVerifyingSignedMessageV3)) -+ if err != nil { -+ t.Error(err) -+ return -+ } -+ md, err := ReadMessage(sig.Body, key, nil, nil) -+ if err != nil { -+ t.Error(err) -+ return -+ } -+ -+ _, err = ioutil.ReadAll(md.UnverifiedBody) -+ if err != nil { -+ t.Error(err) -+ return -+ } -+ -+ // We'll see a sig error here after reading in the UnverifiedBody above, -+ // if there was one to see. -+ if err = md.SignatureError; err != nil { -+ t.Error(err) -+ return -+ } -+ -+ if md.SignatureV3 == nil { -+ t.Errorf("No available signature after checking signature") -+ return -+ } -+ if md.Signature != nil { -+ t.Errorf("Did not expect a signature V4 back") -+ return -+ } -+ return -+} -+ - const testKey1KeyId = 0xA34D7E18C20C31BB - const testKey3KeyId = 0x338934250CCC0360 - -@@ -504,3 +549,36 @@ const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6 - const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` - - const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` -+ -+const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- -+Comment: GPGTools - https://gpgtools.org -+ -+mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY -+BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z -+tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 -+JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV -+/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ -+K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H -+JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx -+YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 -+b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi -+UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M -+pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM -+AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz -+786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd -+EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB -+=RZia -+-----END PGP PUBLIC KEY BLOCK----- -+` -+ -+const signedMessageV3 = `-----BEGIN PGP MESSAGE----- -+Comment: GPGTools - https://gpgtools.org -+ -+owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP -+q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka -+uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka -+DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d -+iT57d/OhWwA= -+=hG7R -+-----END PGP MESSAGE----- -+` diff --git a/vendor/github.com/keybase/go-crypto/openpgp/write.go b/vendor/github.com/keybase/go-crypto/openpgp/write.go deleted file mode 100644 index 03b019e78..000000000 --- a/vendor/github.com/keybase/go-crypto/openpgp/write.go +++ /dev/null @@ -1,495 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "github.com/keybase/go-crypto/openpgp/armor" - "github.com/keybase/go-crypto/openpgp/errors" - "github.com/keybase/go-crypto/openpgp/packet" - "github.com/keybase/go-crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -// SignWithSigner signs the message of type sigType with s and writes the -// signature to w. -// If config is nil, sensible defaults will be used. -func SignWithSigner(s packet.Signer, w io.Writer, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - keyId := s.KeyId() - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = s.PublicKeyAlgo() - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &keyId - - s.Reset() - - wrapped := s.(hash.Hash) - - if sigType == packet.SigTypeText { - wrapped = NewCanonicalTextHash(s) - } - - io.Copy(wrapped, message) - - err = sig.Sign(s, nil, config) - if err != nil { - return - } - - err = sig.Serialize(w) - - return -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - signerSubkey, ok := signer.signingKey(config.Now()) - if !ok { - err = errors.InvalidArgumentError("no valid signing keys") - return - } - if signerSubkey.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signerSubkey.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signerSubkey.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signerSubkey.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signerSubkey.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - - // If no preferences were specified, assume something safe and reasonable. - defaultCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES192), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - - defaultHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.RIPEMD160), - } - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common ciphers") - } - if len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common hashes") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specifed by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(encryptedData); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := encryptedData - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{encryptedData} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// AttachedSign is like openpgp.Encrypt (as in p.crypto/openpgp/write.go), but -// don't encrypt at all, just sign the literal unencrypted data. -// Unfortunately we need to duplicate some code here that's already -// in write.go -func AttachedSign(out io.WriteCloser, signed Entity, hints *FileHints, - config *packet.Config) (in io.WriteCloser, err error) { - - if hints == nil { - hints = &FileHints{} - } - - if config == nil { - config = &packet.Config{} - } - - var signer *packet.PrivateKey - - signKey, ok := signed.signingKey(config.Now()) - if !ok { - err = errors.InvalidArgumentError("no valid signing keys") - return - } - signer = signKey.PrivateKey - if signer == nil { - err = errors.InvalidArgumentError("no valid signing keys") - return - } - if signer.Encrypted { - err = errors.InvalidArgumentError("signing key must be decrypted") - return - } - - hasher := crypto.SHA512 - - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hasher, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - - if err = ops.Serialize(out); err != nil { - return - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - - // We don't want the literal serializer to closer the output stream - // since we're going to need to write to it when we finish up the - // signature stuff. - in, err = packet.SerializeLiteral(noOpCloser{out}, hints.IsBinary, hints.FileName, epochSeconds) - - if err != nil { - return - } - - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - in = signatureWriter{out, in, hasher, hasher.New(), signer, config} - - return -} diff --git a/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go b/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go deleted file mode 100644 index 5c5f415c8..000000000 --- a/vendor/github.com/keybase/go-crypto/rsa/pkcs1v15.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rsa - -import ( - "crypto" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// This file implements encryption and decryption using PKCS#1 v1.5 padding. - -// PKCS1v15DecrypterOpts is for passing options to PKCS#1 v1.5 decryption using -// the crypto.Decrypter interface. -type PKCS1v15DecryptOptions struct { - // SessionKeyLen is the length of the session key that is being - // decrypted. If not zero, then a padding error during decryption will - // cause a random plaintext of this length to be returned rather than - // an error. These alternatives happen in constant time. - SessionKeyLen int -} - -// EncryptPKCS1v15 encrypts the given message with RSA and the padding scheme from PKCS#1 v1.5. -// The message must be no longer than the length of the public modulus minus 11 bytes. -// -// The rand parameter is used as a source of entropy to ensure that encrypting -// the same message twice doesn't result in the same ciphertext. -// -// WARNING: use of this function to encrypt plaintexts other than session keys -// is dangerous. Use RSA OAEP in new protocols. -func EncryptPKCS1v15(rand io.Reader, pub *PublicKey, msg []byte) (out []byte, err error) { - if err := checkPub(pub); err != nil { - return nil, err - } - k := (pub.N.BitLen() + 7) / 8 - if len(msg) > k-11 { - err = ErrMessageTooLong - return - } - - // EM = 0x00 || 0x02 || PS || 0x00 || M - em := make([]byte, k) - em[1] = 2 - ps, mm := em[2:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, rand) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - c := encrypt(new(big.Int), pub, m) - - copyWithLeftPad(em, c.Bytes()) - out = em - return -} - -// DecryptPKCS1v15 decrypts a plaintext using RSA and the padding scheme from PKCS#1 v1.5. -// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks. -// -// Note that whether this function returns an error or not discloses secret -// information. If an attacker can cause this function to run repeatedly and -// learn whether each instance returned an error then they can decrypt and -// forge signatures as if they had the private key. See -// DecryptPKCS1v15SessionKey for a way of solving this problem. -func DecryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (out []byte, err error) { - if err := checkPub(&priv.PublicKey); err != nil { - return nil, err - } - valid, out, index, err := decryptPKCS1v15(rand, priv, ciphertext) - if err != nil { - return - } - if valid == 0 { - return nil, ErrDecryption - } - out = out[index:] - return -} - -// DecryptPKCS1v15SessionKey decrypts a session key using RSA and the padding scheme from PKCS#1 v1.5. -// If rand != nil, it uses RSA blinding to avoid timing side-channel attacks. -// It returns an error if the ciphertext is the wrong length or if the -// ciphertext is greater than the public modulus. Otherwise, no error is -// returned. If the padding is valid, the resulting plaintext message is copied -// into key. Otherwise, key is unchanged. These alternatives occur in constant -// time. It is intended that the user of this function generate a random -// session key beforehand and continue the protocol with the resulting value. -// This will remove any possibility that an attacker can learn any information -// about the plaintext. -// See ``Chosen Ciphertext Attacks Against Protocols Based on the RSA -// Encryption Standard PKCS #1'', Daniel Bleichenbacher, Advances in Cryptology -// (Crypto '98). -// -// Note that if the session key is too small then it may be possible for an -// attacker to brute-force it. If they can do that then they can learn whether -// a random value was used (because it'll be different for the same ciphertext) -// and thus whether the padding was correct. This defeats the point of this -// function. Using at least a 16-byte key will protect against this attack. -func DecryptPKCS1v15SessionKey(rand io.Reader, priv *PrivateKey, ciphertext []byte, key []byte) (err error) { - if err := checkPub(&priv.PublicKey); err != nil { - return err - } - k := (priv.N.BitLen() + 7) / 8 - if k-(len(key)+3+8) < 0 { - return ErrDecryption - } - - valid, em, index, err := decryptPKCS1v15(rand, priv, ciphertext) - if err != nil { - return - } - - if len(em) != k { - // This should be impossible because decryptPKCS1v15 always - // returns the full slice. - return ErrDecryption - } - - valid &= subtle.ConstantTimeEq(int32(len(em)-index), int32(len(key))) - subtle.ConstantTimeCopy(valid, key, em[len(em)-len(key):]) - return -} - -// decryptPKCS1v15 decrypts ciphertext using priv and blinds the operation if -// rand is not nil. It returns one or zero in valid that indicates whether the -// plaintext was correctly structured. In either case, the plaintext is -// returned in em so that it may be read independently of whether it was valid -// in order to maintain constant memory access patterns. If the plaintext was -// valid then index contains the index of the original message in em. -func decryptPKCS1v15(rand io.Reader, priv *PrivateKey, ciphertext []byte) (valid int, em []byte, index int, err error) { - k := (priv.N.BitLen() + 7) / 8 - if k < 11 { - err = ErrDecryption - return - } - - c := new(big.Int).SetBytes(ciphertext) - m, err := decrypt(rand, priv, c) - if err != nil { - return - } - - em = leftPad(m.Bytes(), k) - firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) - secondByteIsTwo := subtle.ConstantTimeByteEq(em[1], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - lookingForIndex := 1 - - for i := 2; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - // The PS padding must be at least 8 bytes long, and it starts two - // bytes into em. - validPS := subtle.ConstantTimeLessOrEq(2+8, index) - - valid = firstByteIsZero & secondByteIsTwo & (^lookingForIndex & 1) & validPS - index = subtle.ConstantTimeSelect(valid, index+1, 0) - return valid, em, index, nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - // In tests, the PRNG may return all zeros so we do - // this to break the loop. - s[i] ^= 0x42 - } - } - - return -} - -// These are ASN1 DER structures: -// DigestInfo ::= SEQUENCE { -// digestAlgorithm AlgorithmIdentifier, -// digest OCTET STRING -// } -// For performance, we don't use the generic ASN1 encoder. Rather, we -// precompute a prefix of the digest value that makes a valid ASN1 DER string -// with the correct contents. -var hashPrefixes = map[crypto.Hash][]byte{ - crypto.MD5: {0x30, 0x20, 0x30, 0x0c, 0x06, 0x08, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10}, - crypto.SHA1: {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03, 0x02, 0x1a, 0x05, 0x00, 0x04, 0x14}, - crypto.SHA224: {0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04, 0x05, 0x00, 0x04, 0x1c}, - crypto.SHA256: {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}, - crypto.SHA384: {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}, - crypto.SHA512: {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}, - crypto.MD5SHA1: {}, // A special TLS case which doesn't use an ASN1 prefix. - crypto.RIPEMD160: {0x30, 0x20, 0x30, 0x08, 0x06, 0x06, 0x28, 0xcf, 0x06, 0x03, 0x00, 0x31, 0x04, 0x14}, -} - -// SignPKCS1v15 calculates the signature of hashed using RSASSA-PKCS1-V1_5-SIGN from RSA PKCS#1 v1.5. -// Note that hashed must be the result of hashing the input message using the -// given hash function. If hash is zero, hashed is signed directly. This isn't -// advisable except for interoperability. -// -// If rand is not nil then RSA blinding will be used to avoid timing side-channel attacks. -// -// This function is deterministic. Thus, if the set of possible messages is -// small, an attacker may be able to build a map from messages to signatures -// and identify the signed messages. As ever, signatures provide authenticity, -// not confidentiality. -func SignPKCS1v15(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte) (s []byte, err error) { - hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed)) - if err != nil { - return - } - - tLen := len(prefix) + hashLen - k := (priv.N.BitLen() + 7) / 8 - if k < tLen+11 { - return nil, ErrMessageTooLong - } - - // EM = 0x00 || 0x01 || PS || 0x00 || T - em := make([]byte, k) - em[1] = 1 - for i := 2; i < k-tLen-1; i++ { - em[i] = 0xff - } - copy(em[k-tLen:k-hashLen], prefix) - copy(em[k-hashLen:k], hashed) - - m := new(big.Int).SetBytes(em) - c, err := decryptAndCheck(rand, priv, m) - if err != nil { - return - } - - copyWithLeftPad(em, c.Bytes()) - s = em - return -} - -// VerifyPKCS1v15 verifies an RSA PKCS#1 v1.5 signature. -// hashed is the result of hashing the input message using the given hash -// function and sig is the signature. A valid signature is indicated by -// returning a nil error. If hash is zero then hashed is used directly. This -// isn't advisable except for interoperability. -func VerifyPKCS1v15(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte) (err error) { - hashLen, prefix, err := pkcs1v15HashInfo(hash, len(hashed)) - if err != nil { - return - } - - tLen := len(prefix) + hashLen - k := (pub.N.BitLen() + 7) / 8 - if k < tLen+11 { - err = ErrVerification - return - } - - c := new(big.Int).SetBytes(sig) - m := encrypt(new(big.Int), pub, c) - em := leftPad(m.Bytes(), k) - // EM = 0x00 || 0x01 || PS || 0x00 || T - - ok := subtle.ConstantTimeByteEq(em[0], 0) - ok &= subtle.ConstantTimeByteEq(em[1], 1) - ok &= subtle.ConstantTimeCompare(em[k-hashLen:k], hashed) - ok &= subtle.ConstantTimeCompare(em[k-tLen:k-hashLen], prefix) - ok &= subtle.ConstantTimeByteEq(em[k-tLen-1], 0) - - for i := 2; i < k-tLen-1; i++ { - ok &= subtle.ConstantTimeByteEq(em[i], 0xff) - } - - if ok != 1 { - return ErrVerification - } - - return nil -} - -func pkcs1v15HashInfo(hash crypto.Hash, inLen int) (hashLen int, prefix []byte, err error) { - // Special case: crypto.Hash(0) is used to indicate that the data is - // signed directly. - if hash == 0 { - return inLen, nil, nil - } - - hashLen = hash.Size() - if inLen != hashLen { - return 0, nil, errors.New("crypto/rsa: input must be hashed message") - } - prefix, ok := hashPrefixes[hash] - if !ok { - return 0, nil, errors.New("crypto/rsa: unsupported hash function") - } - return -} - -// copyWithLeftPad copies src to the end of dest, padding with zero bytes as -// needed. -func copyWithLeftPad(dest, src []byte) { - numPaddingBytes := len(dest) - len(src) - for i := 0; i < numPaddingBytes; i++ { - dest[i] = 0 - } - copy(dest[numPaddingBytes:], src) -} diff --git a/vendor/github.com/keybase/go-crypto/rsa/pss.go b/vendor/github.com/keybase/go-crypto/rsa/pss.go deleted file mode 100644 index 8a94589b1..000000000 --- a/vendor/github.com/keybase/go-crypto/rsa/pss.go +++ /dev/null @@ -1,297 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package rsa - -// This file implements the PSS signature scheme [1]. -// -// [1] http://www.rsa.com/rsalabs/pkcs/files/h11300-wp-pkcs-1v2-2-rsa-cryptography-standard.pdf - -import ( - "bytes" - "crypto" - "errors" - "hash" - "io" - "math/big" -) - -func emsaPSSEncode(mHash []byte, emBits int, salt []byte, hash hash.Hash) ([]byte, error) { - // See [1], section 9.1.1 - hLen := hash.Size() - sLen := len(salt) - emLen := (emBits + 7) / 8 - - // 1. If the length of M is greater than the input limitation for the - // hash function (2^61 - 1 octets for SHA-1), output "message too - // long" and stop. - // - // 2. Let mHash = Hash(M), an octet string of length hLen. - - if len(mHash) != hLen { - return nil, errors.New("crypto/rsa: input must be hashed message") - } - - // 3. If emLen < hLen + sLen + 2, output "encoding error" and stop. - - if emLen < hLen+sLen+2 { - return nil, errors.New("crypto/rsa: encoding error") - } - - em := make([]byte, emLen) - db := em[:emLen-sLen-hLen-2+1+sLen] - h := em[emLen-sLen-hLen-2+1+sLen : emLen-1] - - // 4. Generate a random octet string salt of length sLen; if sLen = 0, - // then salt is the empty string. - // - // 5. Let - // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt; - // - // M' is an octet string of length 8 + hLen + sLen with eight - // initial zero octets. - // - // 6. Let H = Hash(M'), an octet string of length hLen. - - var prefix [8]byte - - hash.Write(prefix[:]) - hash.Write(mHash) - hash.Write(salt) - - h = hash.Sum(h[:0]) - hash.Reset() - - // 7. Generate an octet string PS consisting of emLen - sLen - hLen - 2 - // zero octets. The length of PS may be 0. - // - // 8. Let DB = PS || 0x01 || salt; DB is an octet string of length - // emLen - hLen - 1. - - db[emLen-sLen-hLen-2] = 0x01 - copy(db[emLen-sLen-hLen-1:], salt) - - // 9. Let dbMask = MGF(H, emLen - hLen - 1). - // - // 10. Let maskedDB = DB \xor dbMask. - - mgf1XOR(db, hash, h) - - // 11. Set the leftmost 8 * emLen - emBits bits of the leftmost octet in - // maskedDB to zero. - - db[0] &= (0xFF >> uint(8*emLen-emBits)) - - // 12. Let EM = maskedDB || H || 0xbc. - em[emLen-1] = 0xBC - - // 13. Output EM. - return em, nil -} - -func emsaPSSVerify(mHash, em []byte, emBits, sLen int, hash hash.Hash) error { - // 1. If the length of M is greater than the input limitation for the - // hash function (2^61 - 1 octets for SHA-1), output "inconsistent" - // and stop. - // - // 2. Let mHash = Hash(M), an octet string of length hLen. - hLen := hash.Size() - if hLen != len(mHash) { - return ErrVerification - } - - // 3. If emLen < hLen + sLen + 2, output "inconsistent" and stop. - emLen := (emBits + 7) / 8 - if emLen < hLen+sLen+2 { - return ErrVerification - } - - // 4. If the rightmost octet of EM does not have hexadecimal value - // 0xbc, output "inconsistent" and stop. - if em[len(em)-1] != 0xBC { - return ErrVerification - } - - // 5. Let maskedDB be the leftmost emLen - hLen - 1 octets of EM, and - // let H be the next hLen octets. - db := em[:emLen-hLen-1] - h := em[emLen-hLen-1 : len(em)-1] - - // 6. If the leftmost 8 * emLen - emBits bits of the leftmost octet in - // maskedDB are not all equal to zero, output "inconsistent" and - // stop. - if em[0]&(0xFF<> uint(8*emLen-emBits)) - - if sLen == PSSSaltLengthAuto { - FindSaltLength: - for sLen = emLen - (hLen + 2); sLen >= 0; sLen-- { - switch db[emLen-hLen-sLen-2] { - case 1: - break FindSaltLength - case 0: - continue - default: - return ErrVerification - } - } - if sLen < 0 { - return ErrVerification - } - } else { - // 10. If the emLen - hLen - sLen - 2 leftmost octets of DB are not zero - // or if the octet at position emLen - hLen - sLen - 1 (the leftmost - // position is "position 1") does not have hexadecimal value 0x01, - // output "inconsistent" and stop. - for _, e := range db[:emLen-hLen-sLen-2] { - if e != 0x00 { - return ErrVerification - } - } - if db[emLen-hLen-sLen-2] != 0x01 { - return ErrVerification - } - } - - // 11. Let salt be the last sLen octets of DB. - salt := db[len(db)-sLen:] - - // 12. Let - // M' = (0x)00 00 00 00 00 00 00 00 || mHash || salt ; - // M' is an octet string of length 8 + hLen + sLen with eight - // initial zero octets. - // - // 13. Let H' = Hash(M'), an octet string of length hLen. - var prefix [8]byte - hash.Write(prefix[:]) - hash.Write(mHash) - hash.Write(salt) - - h0 := hash.Sum(nil) - - // 14. If H = H', output "consistent." Otherwise, output "inconsistent." - if !bytes.Equal(h0, h) { - return ErrVerification - } - return nil -} - -// signPSSWithSalt calculates the signature of hashed using PSS [1] with specified salt. -// Note that hashed must be the result of hashing the input message using the -// given hash function. salt is a random sequence of bytes whose length will be -// later used to verify the signature. -func signPSSWithSalt(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed, salt []byte) (s []byte, err error) { - nBits := priv.N.BitLen() - em, err := emsaPSSEncode(hashed, nBits-1, salt, hash.New()) - if err != nil { - return - } - m := new(big.Int).SetBytes(em) - c, err := decryptAndCheck(rand, priv, m) - if err != nil { - return - } - s = make([]byte, (nBits+7)/8) - copyWithLeftPad(s, c.Bytes()) - return -} - -const ( - // PSSSaltLengthAuto causes the salt in a PSS signature to be as large - // as possible when signing, and to be auto-detected when verifying. - PSSSaltLengthAuto = 0 - // PSSSaltLengthEqualsHash causes the salt length to equal the length - // of the hash used in the signature. - PSSSaltLengthEqualsHash = -1 -) - -// PSSOptions contains options for creating and verifying PSS signatures. -type PSSOptions struct { - // SaltLength controls the length of the salt used in the PSS - // signature. It can either be a number of bytes, or one of the special - // PSSSaltLength constants. - SaltLength int - - // Hash, if not zero, overrides the hash function passed to SignPSS. - // This is the only way to specify the hash function when using the - // crypto.Signer interface. - Hash crypto.Hash -} - -// HashFunc returns pssOpts.Hash so that PSSOptions implements -// crypto.SignerOpts. -func (pssOpts *PSSOptions) HashFunc() crypto.Hash { - return pssOpts.Hash -} - -func (opts *PSSOptions) saltLength() int { - if opts == nil { - return PSSSaltLengthAuto - } - return opts.SaltLength -} - -// SignPSS calculates the signature of hashed using RSASSA-PSS [1]. -// Note that hashed must be the result of hashing the input message using the -// given hash function. The opts argument may be nil, in which case sensible -// defaults are used. -func SignPSS(rand io.Reader, priv *PrivateKey, hash crypto.Hash, hashed []byte, opts *PSSOptions) (s []byte, err error) { - saltLength := opts.saltLength() - switch saltLength { - case PSSSaltLengthAuto: - saltLength = (priv.N.BitLen()+7)/8 - 2 - hash.Size() - case PSSSaltLengthEqualsHash: - saltLength = hash.Size() - } - - if opts != nil && opts.Hash != 0 { - hash = opts.Hash - } - - salt := make([]byte, saltLength) - if _, err = io.ReadFull(rand, salt); err != nil { - return - } - return signPSSWithSalt(rand, priv, hash, hashed, salt) -} - -// VerifyPSS verifies a PSS signature. -// hashed is the result of hashing the input message using the given hash -// function and sig is the signature. A valid signature is indicated by -// returning a nil error. The opts argument may be nil, in which case sensible -// defaults are used. -func VerifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, opts *PSSOptions) error { - return verifyPSS(pub, hash, hashed, sig, opts.saltLength()) -} - -// verifyPSS verifies a PSS signature with the given salt length. -func verifyPSS(pub *PublicKey, hash crypto.Hash, hashed []byte, sig []byte, saltLen int) error { - nBits := pub.N.BitLen() - if len(sig) != (nBits+7)/8 { - return ErrVerification - } - s := new(big.Int).SetBytes(sig) - m := encrypt(new(big.Int), pub, s) - emBits := nBits - 1 - emLen := (emBits + 7) / 8 - if emLen < len(m.Bytes()) { - return ErrVerification - } - em := make([]byte, emLen) - copyWithLeftPad(em, m.Bytes()) - if saltLen == PSSSaltLengthEqualsHash { - saltLen = hash.Size() - } - return emsaPSSVerify(hashed, em, emBits, saltLen, hash.New()) -} diff --git a/vendor/github.com/keybase/go-crypto/rsa/rsa.go b/vendor/github.com/keybase/go-crypto/rsa/rsa.go deleted file mode 100644 index ff6b11b3e..000000000 --- a/vendor/github.com/keybase/go-crypto/rsa/rsa.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package rsa implements RSA encryption as specified in PKCS#1. -// -// RSA is a single, fundamental operation that is used in this package to -// implement either public-key encryption or public-key signatures. -// -// The original specification for encryption and signatures with RSA is PKCS#1 -// and the terms "RSA encryption" and "RSA signatures" by default refer to -// PKCS#1 version 1.5. However, that specification has flaws and new designs -// should use version two, usually called by just OAEP and PSS, where -// possible. -// -// Two sets of interfaces are included in this package. When a more abstract -// interface isn't neccessary, there are functions for encrypting/decrypting -// with v1.5/OAEP and signing/verifying with v1.5/PSS. If one needs to abstract -// over the public-key primitive, the PrivateKey struct implements the -// Decrypter and Signer interfaces from the crypto package. -package rsa - -import ( - "crypto" - "crypto/rand" - "crypto/subtle" - "errors" - "hash" - "io" - "math/big" -) - -var bigZero = big.NewInt(0) -var bigOne = big.NewInt(1) - -// A PublicKey represents the public part of an RSA key. -type PublicKey struct { - N *big.Int // modulus - E int64 // public exponent -} - -// OAEPOptions is an interface for passing options to OAEP decryption using the -// crypto.Decrypter interface. -type OAEPOptions struct { - // Hash is the hash function that will be used when generating the mask. - Hash crypto.Hash - // Label is an arbitrary byte string that must be equal to the value - // used when encrypting. - Label []byte -} - -var ( - errPublicModulus = errors.New("crypto/rsa: missing public modulus") - errPublicExponentSmall = errors.New("crypto/rsa: public exponent too small") - errPublicExponentLarge = errors.New("crypto/rsa: public exponent too large") -) - -// checkPub sanity checks the public key before we use it. -// We require pub.E to fit into a 32-bit integer so that we -// do not have different behavior depending on whether -// int is 32 or 64 bits. See also -// http://www.imperialviolet.org/2012/03/16/rsae.html. -func checkPub(pub *PublicKey) error { - if pub.N == nil { - return errPublicModulus - } - if pub.E < 2 { - return errPublicExponentSmall - } - if pub.E > 1<<63-1 { - return errPublicExponentLarge - } - return nil -} - -// A PrivateKey represents an RSA key -type PrivateKey struct { - PublicKey // public part. - D *big.Int // private exponent - Primes []*big.Int // prime factors of N, has >= 2 elements. - - // Precomputed contains precomputed values that speed up private - // operations, if available. - Precomputed PrecomputedValues -} - -// Public returns the public key corresponding to priv. -func (priv *PrivateKey) Public() crypto.PublicKey { - return &priv.PublicKey -} - -// Sign signs msg with priv, reading randomness from rand. If opts is a -// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will -// be used. This method is intended to support keys where the private part is -// kept in, for example, a hardware module. Common uses should use the Sign* -// functions in this package. -func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) { - if pssOpts, ok := opts.(*PSSOptions); ok { - return SignPSS(rand, priv, pssOpts.Hash, msg, pssOpts) - } - - return SignPKCS1v15(rand, priv, opts.HashFunc(), msg) -} - -// Decrypt decrypts ciphertext with priv. If opts is nil or of type -// *PKCS1v15DecryptOptions then PKCS#1 v1.5 decryption is performed. Otherwise -// opts must have type *OAEPOptions and OAEP decryption is done. -func (priv *PrivateKey) Decrypt(rand io.Reader, ciphertext []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) { - if opts == nil { - return DecryptPKCS1v15(rand, priv, ciphertext) - } - - switch opts := opts.(type) { - case *OAEPOptions: - return DecryptOAEP(opts.Hash.New(), rand, priv, ciphertext, opts.Label) - - case *PKCS1v15DecryptOptions: - if l := opts.SessionKeyLen; l > 0 { - plaintext = make([]byte, l) - if _, err := io.ReadFull(rand, plaintext); err != nil { - return nil, err - } - if err := DecryptPKCS1v15SessionKey(rand, priv, ciphertext, plaintext); err != nil { - return nil, err - } - return plaintext, nil - } else { - return DecryptPKCS1v15(rand, priv, ciphertext) - } - - default: - return nil, errors.New("crypto/rsa: invalid options for Decrypt") - } -} - -type PrecomputedValues struct { - Dp, Dq *big.Int // D mod (P-1) (or mod Q-1) - Qinv *big.Int // Q^-1 mod P - - // CRTValues is used for the 3rd and subsequent primes. Due to a - // historical accident, the CRT for the first two primes is handled - // differently in PKCS#1 and interoperability is sufficiently - // important that we mirror this. - CRTValues []CRTValue -} - -// CRTValue contains the precomputed Chinese remainder theorem values. -type CRTValue struct { - Exp *big.Int // D mod (prime-1). - Coeff *big.Int // R·Coeff ≡ 1 mod Prime. - R *big.Int // product of primes prior to this (inc p and q). -} - -// Validate performs basic sanity checks on the key. -// It returns nil if the key is valid, or else an error describing a problem. -func (priv *PrivateKey) Validate() error { - if err := checkPub(&priv.PublicKey); err != nil { - return err - } - - // Check that Πprimes == n. - modulus := new(big.Int).Set(bigOne) - for _, prime := range priv.Primes { - // Any primes ≤ 1 will cause divide-by-zero panics later. - if prime.Cmp(bigOne) <= 0 { - return errors.New("crypto/rsa: invalid prime value") - } - modulus.Mul(modulus, prime) - } - if modulus.Cmp(priv.N) != 0 { - return errors.New("crypto/rsa: invalid modulus") - } - - // Check that de ≡ 1 mod p-1, for each prime. - // This implies that e is coprime to each p-1 as e has a multiplicative - // inverse. Therefore e is coprime to lcm(p-1,q-1,r-1,...) = - // exponent(ℤ/nℤ). It also implies that a^de ≡ a mod p as a^(p-1) ≡ 1 - // mod p. Thus a^de ≡ a mod n for all a coprime to n, as required. - congruence := new(big.Int) - de := new(big.Int).SetInt64(int64(priv.E)) - de.Mul(de, priv.D) - for _, prime := range priv.Primes { - pminus1 := new(big.Int).Sub(prime, bigOne) - congruence.Mod(de, pminus1) - if congruence.Cmp(bigOne) != 0 { - return errors.New("crypto/rsa: invalid exponents") - } - } - return nil -} - -// GenerateKey generates an RSA keypair of the given bit size using the -// random source random (for example, crypto/rand.Reader). -func GenerateKey(random io.Reader, bits int) (priv *PrivateKey, err error) { - return GenerateMultiPrimeKey(random, 2, bits) -} - -// GenerateMultiPrimeKey generates a multi-prime RSA keypair of the given bit -// size and the given random source, as suggested in [1]. Although the public -// keys are compatible (actually, indistinguishable) from the 2-prime case, -// the private keys are not. Thus it may not be possible to export multi-prime -// private keys in certain formats or to subsequently import them into other -// code. -// -// Table 1 in [2] suggests maximum numbers of primes for a given size. -// -// [1] US patent 4405829 (1972, expired) -// [2] http://www.cacr.math.uwaterloo.ca/techreports/2006/cacr2006-16.pdf -func GenerateMultiPrimeKey(random io.Reader, nprimes int, bits int) (priv *PrivateKey, err error) { - priv = new(PrivateKey) - priv.E = 65537 - - if nprimes < 2 { - return nil, errors.New("crypto/rsa: GenerateMultiPrimeKey: nprimes must be >= 2") - } - - primes := make([]*big.Int, nprimes) - -NextSetOfPrimes: - for { - todo := bits - // crypto/rand should set the top two bits in each prime. - // Thus each prime has the form - // p_i = 2^bitlen(p_i) × 0.11... (in base 2). - // And the product is: - // P = 2^todo × α - // where α is the product of nprimes numbers of the form 0.11... - // - // If α < 1/2 (which can happen for nprimes > 2), we need to - // shift todo to compensate for lost bits: the mean value of 0.11... - // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 - // will give good results. - if nprimes >= 7 { - todo += (nprimes - 2) / 5 - } - for i := 0; i < nprimes; i++ { - primes[i], err = rand.Prime(random, todo/(nprimes-i)) - if err != nil { - return nil, err - } - todo -= primes[i].BitLen() - } - - // Make sure that primes is pairwise unequal. - for i, prime := range primes { - for j := 0; j < i; j++ { - if prime.Cmp(primes[j]) == 0 { - continue NextSetOfPrimes - } - } - } - - n := new(big.Int).Set(bigOne) - totient := new(big.Int).Set(bigOne) - pminus1 := new(big.Int) - for _, prime := range primes { - n.Mul(n, prime) - pminus1.Sub(prime, bigOne) - totient.Mul(totient, pminus1) - } - if n.BitLen() != bits { - // This should never happen for nprimes == 2 because - // crypto/rand should set the top two bits in each prime. - // For nprimes > 2 we hope it does not happen often. - continue NextSetOfPrimes - } - - g := new(big.Int) - priv.D = new(big.Int) - y := new(big.Int) - e := big.NewInt(int64(priv.E)) - g.GCD(priv.D, y, e, totient) - - if g.Cmp(bigOne) == 0 { - if priv.D.Sign() < 0 { - priv.D.Add(priv.D, totient) - } - priv.Primes = primes - priv.N = n - - break - } - } - - priv.Precompute() - return -} - -// incCounter increments a four byte, big-endian counter. -func incCounter(c *[4]byte) { - if c[3]++; c[3] != 0 { - return - } - if c[2]++; c[2] != 0 { - return - } - if c[1]++; c[1] != 0 { - return - } - c[0]++ -} - -// mgf1XOR XORs the bytes in out with a mask generated using the MGF1 function -// specified in PKCS#1 v2.1. -func mgf1XOR(out []byte, hash hash.Hash, seed []byte) { - var counter [4]byte - var digest []byte - - done := 0 - for done < len(out) { - hash.Write(seed) - hash.Write(counter[0:4]) - digest = hash.Sum(digest[:0]) - hash.Reset() - - for i := 0; i < len(digest) && done < len(out); i++ { - out[done] ^= digest[i] - done++ - } - incCounter(&counter) - } -} - -// ErrMessageTooLong is returned when attempting to encrypt a message which is -// too large for the size of the public key. -var ErrMessageTooLong = errors.New("crypto/rsa: message too long for RSA public key size") - -func encrypt(c *big.Int, pub *PublicKey, m *big.Int) *big.Int { - e := big.NewInt(int64(pub.E)) - c.Exp(m, e, pub.N) - return c -} - -// EncryptOAEP encrypts the given message with RSA-OAEP. -// -// OAEP is parameterised by a hash function that is used as a random oracle. -// Encryption and decryption of a given message must use the same hash function -// and sha256.New() is a reasonable choice. -// -// The random parameter is used as a source of entropy to ensure that -// encrypting the same message twice doesn't result in the same ciphertext. -// -// The label parameter may contain arbitrary data that will not be encrypted, -// but which gives important context to the message. For example, if a given -// public key is used to decrypt two types of messages then distinct label -// values could be used to ensure that a ciphertext for one purpose cannot be -// used for another by an attacker. If not required it can be empty. -// -// The message must be no longer than the length of the public modulus less -// twice the hash length plus 2. -func EncryptOAEP(hash hash.Hash, random io.Reader, pub *PublicKey, msg []byte, label []byte) (out []byte, err error) { - if err := checkPub(pub); err != nil { - return nil, err - } - hash.Reset() - k := (pub.N.BitLen() + 7) / 8 - if len(msg) > k-2*hash.Size()-2 { - err = ErrMessageTooLong - return - } - - hash.Write(label) - lHash := hash.Sum(nil) - hash.Reset() - - em := make([]byte, k) - seed := em[1 : 1+hash.Size()] - db := em[1+hash.Size():] - - copy(db[0:hash.Size()], lHash) - db[len(db)-len(msg)-1] = 1 - copy(db[len(db)-len(msg):], msg) - - _, err = io.ReadFull(random, seed) - if err != nil { - return - } - - mgf1XOR(db, hash, seed) - mgf1XOR(seed, hash, db) - - m := new(big.Int) - m.SetBytes(em) - c := encrypt(new(big.Int), pub, m) - out = c.Bytes() - - if len(out) < k { - // If the output is too small, we need to left-pad with zeros. - t := make([]byte, k) - copy(t[k-len(out):], out) - out = t - } - - return -} - -// ErrDecryption represents a failure to decrypt a message. -// It is deliberately vague to avoid adaptive attacks. -var ErrDecryption = errors.New("crypto/rsa: decryption error") - -// ErrVerification represents a failure to verify a signature. -// It is deliberately vague to avoid adaptive attacks. -var ErrVerification = errors.New("crypto/rsa: verification error") - -// modInverse returns ia, the inverse of a in the multiplicative group of prime -// order n. It requires that a be a member of the group (i.e. less than n). -func modInverse(a, n *big.Int) (ia *big.Int, ok bool) { - g := new(big.Int) - x := new(big.Int) - y := new(big.Int) - g.GCD(x, y, a, n) - if g.Cmp(bigOne) != 0 { - // In this case, a and n aren't coprime and we cannot calculate - // the inverse. This happens because the values of n are nearly - // prime (being the product of two primes) rather than truly - // prime. - return - } - - if x.Cmp(bigOne) < 0 { - // 0 is not the multiplicative inverse of any element so, if x - // < 1, then x is negative. - x.Add(x, n) - } - - return x, true -} - -// Precompute performs some calculations that speed up private key operations -// in the future. -func (priv *PrivateKey) Precompute() { - if priv.Precomputed.Dp != nil { - return - } - - priv.Precomputed.Dp = new(big.Int).Sub(priv.Primes[0], bigOne) - priv.Precomputed.Dp.Mod(priv.D, priv.Precomputed.Dp) - - priv.Precomputed.Dq = new(big.Int).Sub(priv.Primes[1], bigOne) - priv.Precomputed.Dq.Mod(priv.D, priv.Precomputed.Dq) - - priv.Precomputed.Qinv = new(big.Int).ModInverse(priv.Primes[1], priv.Primes[0]) - - r := new(big.Int).Mul(priv.Primes[0], priv.Primes[1]) - priv.Precomputed.CRTValues = make([]CRTValue, len(priv.Primes)-2) - for i := 2; i < len(priv.Primes); i++ { - prime := priv.Primes[i] - values := &priv.Precomputed.CRTValues[i-2] - - values.Exp = new(big.Int).Sub(prime, bigOne) - values.Exp.Mod(priv.D, values.Exp) - - values.R = new(big.Int).Set(r) - values.Coeff = new(big.Int).ModInverse(r, prime) - - r.Mul(r, prime) - } -} - -// decrypt performs an RSA decryption, resulting in a plaintext integer. If a -// random source is given, RSA blinding is used. -func decrypt(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) { - // TODO(agl): can we get away with reusing blinds? - if c.Cmp(priv.N) > 0 { - err = ErrDecryption - return - } - - var ir *big.Int - if random != nil { - // Blinding enabled. Blinding involves multiplying c by r^e. - // Then the decryption operation performs (m^e * r^e)^d mod n - // which equals mr mod n. The factor of r can then be removed - // by multiplying by the multiplicative inverse of r. - - var r *big.Int - - for { - r, err = rand.Int(random, priv.N) - if err != nil { - return - } - if r.Cmp(bigZero) == 0 { - r = bigOne - } - var ok bool - ir, ok = modInverse(r, priv.N) - if ok { - break - } - } - bigE := big.NewInt(int64(priv.E)) - rpowe := new(big.Int).Exp(r, bigE, priv.N) - cCopy := new(big.Int).Set(c) - cCopy.Mul(cCopy, rpowe) - cCopy.Mod(cCopy, priv.N) - c = cCopy - } - - if priv.Precomputed.Dp == nil { - m = new(big.Int).Exp(c, priv.D, priv.N) - } else { - // We have the precalculated values needed for the CRT. - m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0]) - m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1]) - m.Sub(m, m2) - if m.Sign() < 0 { - m.Add(m, priv.Primes[0]) - } - m.Mul(m, priv.Precomputed.Qinv) - m.Mod(m, priv.Primes[0]) - m.Mul(m, priv.Primes[1]) - m.Add(m, m2) - - for i, values := range priv.Precomputed.CRTValues { - prime := priv.Primes[2+i] - m2.Exp(c, values.Exp, prime) - m2.Sub(m2, m) - m2.Mul(m2, values.Coeff) - m2.Mod(m2, prime) - if m2.Sign() < 0 { - m2.Add(m2, prime) - } - m2.Mul(m2, values.R) - m.Add(m, m2) - } - } - - if ir != nil { - // Unblind. - m.Mul(m, ir) - m.Mod(m, priv.N) - } - - return -} - -func decryptAndCheck(random io.Reader, priv *PrivateKey, c *big.Int) (m *big.Int, err error) { - m, err = decrypt(random, priv, c) - if err != nil { - return nil, err - } - - // In order to defend against errors in the CRT computation, m^e is - // calculated, which should match the original ciphertext. - check := encrypt(new(big.Int), &priv.PublicKey, m) - if c.Cmp(check) != 0 { - return nil, errors.New("rsa: internal error") - } - return m, nil -} - -// DecryptOAEP decrypts ciphertext using RSA-OAEP. - -// OAEP is parameterised by a hash function that is used as a random oracle. -// Encryption and decryption of a given message must use the same hash function -// and sha256.New() is a reasonable choice. -// -// The random parameter, if not nil, is used to blind the private-key operation -// and avoid timing side-channel attacks. Blinding is purely internal to this -// function – the random data need not match that used when encrypting. -// -// The label parameter must match the value given when encrypting. See -// EncryptOAEP for details. -func DecryptOAEP(hash hash.Hash, random io.Reader, priv *PrivateKey, ciphertext []byte, label []byte) (msg []byte, err error) { - if err := checkPub(&priv.PublicKey); err != nil { - return nil, err - } - k := (priv.N.BitLen() + 7) / 8 - if len(ciphertext) > k || - k < hash.Size()*2+2 { - err = ErrDecryption - return - } - - c := new(big.Int).SetBytes(ciphertext) - - m, err := decrypt(random, priv, c) - if err != nil { - return - } - - hash.Write(label) - lHash := hash.Sum(nil) - hash.Reset() - - // Converting the plaintext number to bytes will strip any - // leading zeros so we may have to left pad. We do this unconditionally - // to avoid leaking timing information. (Although we still probably - // leak the number of leading zeros. It's not clear that we can do - // anything about this.) - em := leftPad(m.Bytes(), k) - - firstByteIsZero := subtle.ConstantTimeByteEq(em[0], 0) - - seed := em[1 : hash.Size()+1] - db := em[hash.Size()+1:] - - mgf1XOR(seed, hash, db) - mgf1XOR(db, hash, seed) - - lHash2 := db[0:hash.Size()] - - // We have to validate the plaintext in constant time in order to avoid - // attacks like: J. Manger. A Chosen Ciphertext Attack on RSA Optimal - // Asymmetric Encryption Padding (OAEP) as Standardized in PKCS #1 - // v2.0. In J. Kilian, editor, Advances in Cryptology. - lHash2Good := subtle.ConstantTimeCompare(lHash, lHash2) - - // The remainder of the plaintext must be zero or more 0x00, followed - // by 0x01, followed by the message. - // lookingForIndex: 1 iff we are still looking for the 0x01 - // index: the offset of the first 0x01 byte - // invalid: 1 iff we saw a non-zero byte before the 0x01. - var lookingForIndex, index, invalid int - lookingForIndex = 1 - rest := db[hash.Size():] - - for i := 0; i < len(rest); i++ { - equals0 := subtle.ConstantTimeByteEq(rest[i], 0) - equals1 := subtle.ConstantTimeByteEq(rest[i], 1) - index = subtle.ConstantTimeSelect(lookingForIndex&equals1, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals1, 0, lookingForIndex) - invalid = subtle.ConstantTimeSelect(lookingForIndex&^equals0, 1, invalid) - } - - if firstByteIsZero&lHash2Good&^invalid&^lookingForIndex != 1 { - err = ErrDecryption - return - } - - msg = rest[index+1:] - return -} - -// leftPad returns a new slice of length size. The contents of input are right -// aligned in the new slice. -func leftPad(input []byte, size int) (out []byte) { - n := len(input) - if n > size { - n = size - } - out = make([]byte, size) - copy(out[len(out)-n:], input) - return -} diff --git a/vendor/github.com/mitchellh/cli/cli.go b/vendor/github.com/mitchellh/cli/cli.go index ba0cc6dbb..c2dbe55aa 100644 --- a/vendor/github.com/mitchellh/cli/cli.go +++ b/vendor/github.com/mitchellh/cli/cli.go @@ -404,8 +404,8 @@ func (c *CLI) initAutocomplete() { cmd.Flags = map[string]complete.Predictor{ "-" + c.AutocompleteInstall: complete.PredictNothing, "-" + c.AutocompleteUninstall: complete.PredictNothing, - "-help": complete.PredictNothing, - "-version": complete.PredictNothing, + "-help": complete.PredictNothing, + "-version": complete.PredictNothing, } } cmd.GlobalFlags = c.AutocompleteGlobalFlags diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing.go b/vendor/github.com/mitchellh/go-testing-interface/testing.go index 204afb420..b05a49a69 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing.go @@ -22,6 +22,7 @@ type T interface { Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -31,10 +32,12 @@ type T interface { // RuntimeT implements T and can be instantiated and run at runtime to // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors -// list to determine whether to exit yourself. Name and Skip methods are -// unimplemented noops. +// list to determine whether to exit yourself. +// +// Parallel does not do anything. type RuntimeT struct { - failed bool + failed bool + skipped bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -77,8 +80,26 @@ func (t *RuntimeT) Logf(format string, args ...interface{}) { log.Println(fmt.Sprintf(format, args...)) } -func (t *RuntimeT) Name() string { return "" } -func (t *RuntimeT) Skip(args ...interface{}) {} -func (t *RuntimeT) SkipNow() {} -func (t *RuntimeT) Skipf(format string, args ...interface{}) {} -func (t *RuntimeT) Skipped() bool { return false } +func (t *RuntimeT) Name() string { + return "" +} + +func (t *RuntimeT) Parallel() {} + +func (t *RuntimeT) Skip(args ...interface{}) { + log.Print(args...) + t.SkipNow() +} + +func (t *RuntimeT) SkipNow() { + t.skipped = true +} + +func (t *RuntimeT) Skipf(format string, args ...interface{}) { + log.Printf(format, args...) + t.SkipNow() +} + +func (t *RuntimeT) Skipped() bool { + return t.skipped +} diff --git a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go index 31b42cadf..f09c066a4 100644 --- a/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go +++ b/vendor/github.com/mitchellh/go-testing-interface/testing_go19.go @@ -27,6 +27,7 @@ type T interface { Log(args ...interface{}) Logf(format string, args ...interface{}) Name() string + Parallel() Skip(args ...interface{}) SkipNow() Skipf(format string, args ...interface{}) @@ -38,9 +39,11 @@ type T interface { // mimic *testing.T behavior. Unlike *testing.T, this will simply panic // for calls to Fatal. For calls to Error, you'll have to check the errors // list to determine whether to exit yourself. +// +// Parallel does not do anything. type RuntimeT struct { - skipped bool failed bool + skipped bool } func (t *RuntimeT) Error(args ...interface{}) { @@ -87,6 +90,8 @@ func (t *RuntimeT) Name() string { return "" } +func (t *RuntimeT) Parallel() {} + func (t *RuntimeT) Skip(args ...interface{}) { log.Print(args...) t.SkipNow() diff --git a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go b/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go index 6d127c490..8f57ea0a4 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go +++ b/vendor/github.com/nbutton23/zxcvbn-go/entropy/entropyCalculator.go @@ -1,13 +1,12 @@ package entropy import ( + "github.com/nbutton23/zxcvbn-go/adjacency" + "github.com/nbutton23/zxcvbn-go/match" + "github.com/nbutton23/zxcvbn-go/utils/math" "math" "regexp" "unicode" - - "github.com/nbutton23/zxcvbn-go/adjacency" - "github.com/nbutton23/zxcvbn-go/match" - zxcvbnmath "github.com/nbutton23/zxcvbn-go/utils/math" ) const ( diff --git a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go b/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go index cd013bb3d..4f68a6dca 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go +++ b/vendor/github.com/nbutton23/zxcvbn-go/scoring/scoring.go @@ -2,12 +2,11 @@ package scoring import ( "fmt" - "math" - "sort" - "github.com/nbutton23/zxcvbn-go/entropy" "github.com/nbutton23/zxcvbn-go/match" - zxcvbnmath "github.com/nbutton23/zxcvbn-go/utils/math" + "github.com/nbutton23/zxcvbn-go/utils/math" + "math" + "sort" ) const ( diff --git a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go b/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go index 49d1fdbcb..9c34b1c8c 100644 --- a/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go +++ b/vendor/github.com/nbutton23/zxcvbn-go/zxcvbn.go @@ -6,7 +6,7 @@ import ( "github.com/nbutton23/zxcvbn-go/match" "github.com/nbutton23/zxcvbn-go/matching" "github.com/nbutton23/zxcvbn-go/scoring" - zxcvbnmath "github.com/nbutton23/zxcvbn-go/utils/math" + "github.com/nbutton23/zxcvbn-go/utils/math" ) // PasswordStrength takes a password, userInputs and optional filters and returns a MinEntropyMatch diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore deleted file mode 100644 index e48bab32a..000000000 --- a/vendor/github.com/pierrec/lz4/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -# Created by https://www.gitignore.io/api/macos - -### macOS ### -*.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns -.com.apple.timemachine.donotpresent - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -# End of https://www.gitignore.io/api/macos - -lz4c/lz4c diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml deleted file mode 100644 index b2c806d57..000000000 --- a/vendor/github.com/pierrec/lz4/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.9.x - - 1.10.x - - master - -matrix: - fast_finish: true - allow_failures: - - go: master - -sudo: false - -script: - - go test -v -cpu=2 - - go test -v -cpu=2 -race diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE deleted file mode 100644 index bd899d835..000000000 --- a/vendor/github.com/pierrec/lz4/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2015, Pierre Curto -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md deleted file mode 100644 index 50a10ee16..000000000 --- a/vendor/github.com/pierrec/lz4/README.md +++ /dev/null @@ -1,24 +0,0 @@ -[![godoc](https://godoc.org/github.com/pierrec/lz4?status.png)](https://godoc.org/github.com/pierrec/lz4) - -# lz4 -LZ4 compression and decompression in pure Go. - -## Usage - -```go -import "github.com/pierrec/lz4" -``` - -## Description -Package lz4 implements reading and writing lz4 compressed data (a frame), -as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. - -This package is **compatible with the LZ4 frame format** although the block level compression -and decompression functions are exposed and are fully compatible with the lz4 block format -definition, they are low level and should not be used directly. - -For a complete description of an lz4 compressed block, see: -http://fastcompression.blogspot.fr/2011/05/lz4-explained.html - -See https://github.com/Cyan4973/lz4 for the reference C implementation. - diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go deleted file mode 100644 index ef24f17e5..000000000 --- a/vendor/github.com/pierrec/lz4/block.go +++ /dev/null @@ -1,397 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "errors" -) - -var ( - // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed - // block is corrupted or the destination buffer is not large enough for the uncompressed data. - ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short") - // ErrInvalid is returned when reading an invalid LZ4 archive. - ErrInvalid = errors.New("lz4: bad magic number") -) - -// blockHash hashes 4 bytes into a value < winSize. -func blockHash(x uint32) uint32 { - const hasher uint32 = 2654435761 // Knuth multiplicative hash. - return x * hasher >> hashShift -} - -// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. -func CompressBlockBound(n int) int { - return n + n/255 + 16 -} - -// UncompressBlock uncompresses the source buffer into the destination one, -// and returns the uncompressed size. -// -// The destination buffer must be sized appropriately. -// -// An error is returned if the source data is invalid or the destination buffer is too small. -func UncompressBlock(src, dst []byte) (si int, err error) { - defer func() { - // It is now faster to let the runtime panic and recover on out of bound slice access - // than checking indices as we go along. - if recover() != nil { - err = ErrInvalidSourceShortBuffer - } - }() - sn := len(src) - if sn == 0 { - return 0, nil - } - var di int - - for { - // Literals and match lengths (token). - b := int(src[si]) - si++ - - // Literals. - if lLen := b >> 4; lLen > 0 { - if lLen == 0xF { - for src[si] == 0xFF { - lLen += 0xFF - si++ - } - lLen += int(src[si]) - si++ - } - i := si - si += lLen - di += copy(dst[di:], src[i:si]) - - if si >= sn { - return di, nil - } - } - - si++ - _ = src[si] // Bound check elimination. - offset := int(src[si-1]) | int(src[si])<<8 - si++ - - // Match. - mLen := b & 0xF - if mLen == 0xF { - for src[si] == 0xFF { - mLen += 0xFF - si++ - } - mLen += int(src[si]) - si++ - } - mLen += minMatch - - // Copy the match. - i := di - offset - if offset > 0 && mLen >= offset { - // Efficiently copy the match dst[di-offset:di] into the dst slice. - bytesToCopy := offset * (mLen / offset) - expanded := dst[i:] - for n := offset; n <= bytesToCopy+offset; n *= 2 { - copy(expanded[n:], expanded[:n]) - } - di += bytesToCopy - mLen -= bytesToCopy - } - di += copy(dst[di:], dst[i:i+mLen]) - } -} - -// CompressBlock compresses the source buffer into the destination one. -// This is the fast version of LZ4 compression and also the default one. -// The size of hashTable must be at least 64Kb. -// -// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlock(src, dst []byte, hashTable []int) (di int, err error) { - defer func() { - if recover() != nil { - err = ErrInvalidSourceShortBuffer - } - }() - - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 { - return 0, nil - } - var si int - - // Fast scan strategy: the hash table only stores the last 4 bytes sequences. - // const accInit = 1 << skipStrength - - anchor := si // Position of the current literals. - // acc := accInit // Variable step: improves performance on non-compressible data. - - for si < sn { - // Hash the next 4 bytes (sequence)... - match := binary.LittleEndian.Uint32(src[si:]) - h := blockHash(match) - - ref := hashTable[h] - hashTable[h] = si - if ref >= sn { // Invalid reference (dirty hashtable). - si++ - continue - } - offset := si - ref - if offset <= 0 || offset >= winSize || // Out of window. - match != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches. - // si += acc >> skipStrength - // acc++ - si++ - continue - } - - // Match found. - // acc = accInit - lLen := si - anchor // Literal length. - - // Encode match length part 1. - si += minMatch - mLen := si // Match length has minMatch already. - // Find the longest match, first looking by batches of 8 bytes. - for si < sn && binary.LittleEndian.Uint64(src[si:]) == binary.LittleEndian.Uint64(src[si-offset:]) { - si += 8 - } - // Then byte by byte. - for si < sn && src[si] == src[si-offset] { - si++ - } - - mLen = si - mLen - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:], src[anchor:anchor+lLen]) - di += lLen + 2 - anchor = si - - // Encode offset. - _ = dst[di] // Bound check elimination. - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - } - - if anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:], src[anchor:]) - return di, nil -} - -// CompressBlockHC compresses the source buffer src into the destination dst -// with max search depth (use 0 or negative value for no max). -// -// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. -// -// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. -// -// An error is returned if the destination buffer is too small. -func CompressBlockHC(src, dst []byte, depth int) (di int, err error) { - defer func() { - if recover() != nil { - err = ErrInvalidSourceShortBuffer - } - }() - - sn, dn := len(src)-mfLimit, len(dst) - if sn <= 0 || dn == 0 { - return 0, nil - } - var si int - - // hashTable: stores the last position found for a given hash - // chaingTable: stores previous positions for a given hash - var hashTable, chainTable [winSize]int - - if depth <= 0 { - depth = winSize - } - - anchor := si - for si < sn { - // Hash the next 4 bytes (sequence). - match := binary.LittleEndian.Uint32(src[si:]) - h := blockHash(match) - - // Follow the chain until out of window and give the longest match. - mLen := 0 - offset := 0 - for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] { - // The first (mLen==0) or next byte (mLen>=minMatch) at current match length - // must match to improve on the match length. - if src[next+mLen] != src[si+mLen] { - continue - } - ml := 0 - // Compare the current position with a previous with the same hash. - for ml < sn-si && binary.LittleEndian.Uint64(src[next+ml:]) == binary.LittleEndian.Uint64(src[si+ml:]) { - ml += 8 - } - for ml < sn-si && src[next+ml] == src[si+ml] { - ml++ - } - if ml+1 < minMatch || ml <= mLen { - // Match too small ( winStart { - winStart = ws - } - for si, ml := winStart, si+mLen; si < ml; { - match >>= 8 - match |= uint32(src[si+3]) << 24 - h := blockHash(match) - chainTable[si&winMask] = hashTable[h] - hashTable[h] = si - si++ - } - - lLen := si - anchor - si += mLen - mLen -= minMatch // Match length does not include minMatch. - - if mLen < 0xF { - dst[di] = byte(mLen) - } else { - dst[di] = 0xF - } - - // Encode literals length. - if lLen < 0xF { - dst[di] |= byte(lLen << 4) - } else { - dst[di] |= 0xF0 - di++ - l := lLen - 0xF - for ; l >= 0xFF; l -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(l) - } - di++ - - // Literals. - copy(dst[di:], src[anchor:anchor+lLen]) - di += lLen - anchor = si - - // Encode offset. - di += 2 - dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) - - // Encode match length part 2. - if mLen >= 0xF { - for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(mLen) - di++ - } - } - - if anchor == 0 { - // Incompressible. - return 0, nil - } - - // Last literals. - lLen := len(src) - anchor - if lLen < 0xF { - dst[di] = byte(lLen << 4) - } else { - dst[di] = 0xF0 - di++ - lLen -= 0xF - for ; lLen >= 0xFF; lLen -= 0xFF { - dst[di] = 0xFF - di++ - } - dst[di] = byte(lLen) - } - di++ - - // Write the last literals. - if di >= anchor { - // Incompressible. - return 0, nil - } - di += copy(dst[di:], src[anchor:]) - return di, nil -} diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go deleted file mode 100644 index bc5e78d40..000000000 --- a/vendor/github.com/pierrec/lz4/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build lz4debug - -package lz4 - -import ( - "fmt" - "os" - "path/filepath" - "runtime" -) - -const debugFlag = true - -func debug(args ...interface{}) { - _, file, line, _ := runtime.Caller(1) - file = filepath.Base(file) - - f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0]) - if f[len(f)-1] != '\n' { - f += "\n" - } - fmt.Fprintf(os.Stderr, f, args[1:]...) -} diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go deleted file mode 100644 index 44211ad96..000000000 --- a/vendor/github.com/pierrec/lz4/debug_stub.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !lz4debug - -package lz4 - -const debugFlag = false - -func debug(args ...interface{}) {} diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go deleted file mode 100644 index 850a6fdf6..000000000 --- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go +++ /dev/null @@ -1,222 +0,0 @@ -// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version). -// (https://github.com/Cyan4973/XXH/) -package xxh32 - -import ( - "encoding/binary" -) - -const ( - prime32_1 uint32 = 2654435761 - prime32_2 uint32 = 2246822519 - prime32_3 uint32 = 3266489917 - prime32_4 uint32 = 668265263 - prime32_5 uint32 = 374761393 - - prime32_1plus2 uint32 = 606290984 - prime32_minus1 uint32 = 1640531535 -) - -// XXHZero represents an xxhash32 object with seed 0. -type XXHZero struct { - v1 uint32 - v2 uint32 - v3 uint32 - v4 uint32 - totalLen uint64 - buf [16]byte - bufused int -} - -// Sum appends the current hash to b and returns the resulting slice. -// It does not change the underlying hash state. -func (xxh XXHZero) Sum(b []byte) []byte { - h32 := xxh.Sum32() - return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) -} - -// Reset resets the Hash to its initial state. -func (xxh *XXHZero) Reset() { - xxh.v1 = prime32_1plus2 - xxh.v2 = prime32_2 - xxh.v3 = 0 - xxh.v4 = prime32_minus1 - xxh.totalLen = 0 - xxh.bufused = 0 -} - -// Size returns the number of bytes returned by Sum(). -func (xxh *XXHZero) Size() int { - return 4 -} - -// BlockSize gives the minimum number of bytes accepted by Write(). -func (xxh *XXHZero) BlockSize() int { - return 1 -} - -// Write adds input bytes to the Hash. -// It never returns an error. -func (xxh *XXHZero) Write(input []byte) (int, error) { - if xxh.totalLen == 0 { - xxh.Reset() - } - n := len(input) - m := xxh.bufused - - xxh.totalLen += uint64(n) - - r := len(xxh.buf) - m - if n < r { - copy(xxh.buf[m:], input) - xxh.bufused += len(input) - return n, nil - } - - p := 0 - // Causes compiler to work directly from registers instead of stack: - v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 - if m > 0 { - // some data left from previous update - copy(xxh.buf[xxh.bufused:], input[:r]) - xxh.bufused += len(input) - r - - // fast rotl(13) - buf := xxh.buf[:16] // BCE hint. - v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime32_2) * prime32_1 - v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime32_2) * prime32_1 - v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime32_2) * prime32_1 - v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime32_2) * prime32_1 - p = r - xxh.bufused = 0 - } - - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1 - } - xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 - - copy(xxh.buf[xxh.bufused:], input[p:]) - xxh.bufused += len(input) - p - - return n, nil -} - -// Sum32 returns the 32 bits Hash value. -func (xxh *XXHZero) Sum32() uint32 { - h32 := uint32(xxh.totalLen) - if h32 >= 16 { - h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) - } else { - h32 += prime32_5 - } - - p := 0 - n := xxh.bufused - buf := xxh.buf - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime32_3 - h32 = rol17(h32) * prime32_4 - } - for ; p < n; p++ { - h32 += uint32(buf[p]) * prime32_5 - h32 = rol11(h32) * prime32_1 - } - - h32 ^= h32 >> 15 - h32 *= prime32_2 - h32 ^= h32 >> 13 - h32 *= prime32_3 - h32 ^= h32 >> 16 - - return h32 -} - -// ChecksumZero returns the 32bits Hash value. -func ChecksumZero(input []byte) uint32 { - n := len(input) - h32 := uint32(n) - - if n < 16 { - h32 += prime32_5 - } else { - v1 := prime32_1plus2 - v2 := prime32_2 - v3 := uint32(0) - v4 := prime32_minus1 - p := 0 - for n := n - 16; p <= n; p += 16 { - sub := input[p:][:16] //BCE hint for compiler - v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime32_2) * prime32_1 - v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime32_2) * prime32_1 - v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime32_2) * prime32_1 - v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime32_2) * prime32_1 - } - input = input[p:] - n -= p - h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - } - - p := 0 - for n := n - 4; p <= n; p += 4 { - h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime32_3 - h32 = rol17(h32) * prime32_4 - } - for p < n { - h32 += uint32(input[p]) * prime32_5 - h32 = rol11(h32) * prime32_1 - p++ - } - - h32 ^= h32 >> 15 - h32 *= prime32_2 - h32 ^= h32 >> 13 - h32 *= prime32_3 - h32 ^= h32 >> 16 - - return h32 -} - -// Uint32Zero hashes x with seed 0. -func Uint32Zero(x uint32) uint32 { - h := prime32_5 + 4 + x*prime32_3 - h = rol17(h) * prime32_4 - h ^= h >> 15 - h *= prime32_2 - h ^= h >> 13 - h *= prime32_3 - h ^= h >> 16 - return h -} - -func rol1(u uint32) uint32 { - return u<<1 | u>>31 -} - -func rol7(u uint32) uint32 { - return u<<7 | u>>25 -} - -func rol11(u uint32) uint32 { - return u<<11 | u>>21 -} - -func rol12(u uint32) uint32 { - return u<<12 | u>>20 -} - -func rol13(u uint32) uint32 { - return u<<13 | u>>19 -} - -func rol17(u uint32) uint32 { - return u<<17 | u>>15 -} - -func rol18(u uint32) uint32 { - return u<<18 | u>>14 -} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go deleted file mode 100644 index 35802756c..000000000 --- a/vendor/github.com/pierrec/lz4/lz4.go +++ /dev/null @@ -1,68 +0,0 @@ -// Package lz4 implements reading and writing lz4 compressed data (a frame), -// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html. -// -// Although the block level compression and decompression functions are exposed and are fully compatible -// with the lz4 block format definition, they are low level and should not be used directly. -// For a complete description of an lz4 compressed block, see: -// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html -// -// See https://github.com/Cyan4973/lz4 for the reference C implementation. -// -package lz4 - -const ( - // Extension is the LZ4 frame file name extension - Extension = ".lz4" - // Version is the LZ4 frame format version - Version = 1 - - frameMagic uint32 = 0x184D2204 - frameSkipMagic uint32 = 0x184D2A50 - - // The following constants are used to setup the compression algorithm. - minMatch = 4 // the minimum size of the match sequence size (4 bytes) - winSizeLog = 16 // LZ4 64Kb window size limit - winSize = 1 << winSizeLog - winMask = winSize - 1 // 64Kb window of previous data for dependent blocks - compressedBlockFlag = 1 << 31 - compressedBlockMask = compressedBlockFlag - 1 - - // hashLog determines the size of the hash table used to quickly find a previous match position. - // Its value influences the compression speed and memory usage, the lower the faster, - // but at the expense of the compression ratio. - // 16 seems to be the best compromise. - hashLog = 16 - hashTableSize = 1 << hashLog - hashShift = uint((minMatch * 8) - hashLog) - - mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. - skipStrength = 6 // variable step for fast scan -) - -// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. -var ( - bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} - bsMapValue = make(map[int]byte, len(bsMapID)) -) - -// Reversed. -func init() { - for i, v := range bsMapID { - bsMapValue[v] = i - } -} - -// Header describes the various flags that can be set on a Writer or obtained from a Reader. -// The default values match those of the LZ4 frame format definition -// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). -// -// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. -// It is the caller responsibility to check them if necessary. -type Header struct { - BlockChecksum bool // Compressed blocks checksum flag. - NoChecksum bool // Frame checksum flag. - BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. - Size uint64 // Frame total size. It is _not_ computed by the Writer. - CompressionLevel int // Compression level (higher is better, use 0 for fastest compression). - done bool // Header processed flag (Read or Write and checked). -} diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go deleted file mode 100644 index 9a0fb0070..000000000 --- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build go1.10 - -package lz4 - -import ( - "fmt" - "strings" -) - -func (h Header) String() string { - var s strings.Builder - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go deleted file mode 100644 index 12c761a2e..000000000 --- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build !go1.10 - -package lz4 - -import ( - "bytes" - "fmt" -) - -func (h Header) String() string { - var s bytes.Buffer - - s.WriteString(fmt.Sprintf("%T{", h)) - if h.BlockChecksum { - s.WriteString("BlockChecksum: true ") - } - if h.NoChecksum { - s.WriteString("NoChecksum: true ") - } - if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 { - s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs)) - } - if l := h.CompressionLevel; l != 0 { - s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l)) - } - s.WriteByte('}') - - return s.String() -} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go deleted file mode 100644 index f08db47df..000000000 --- a/vendor/github.com/pierrec/lz4/reader.go +++ /dev/null @@ -1,295 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// Reader implements the LZ4 frame decoder. -// The Header is set after the first call to Read(). -// The Header may change between Read() calls in case of concatenated frames. -type Reader struct { - Header - - buf [8]byte // Scrap buffer. - pos int64 // Current position in src. - src io.Reader // Source. - zdata []byte // Compressed data. - data []byte // Uncompressed data. - idx int // Index of unread bytes into data. - checksum xxh32.XXHZero // Frame hash. -} - -// NewReader returns a new LZ4 frame decoder. -// No access to the underlying io.Reader is performed. -func NewReader(src io.Reader) *Reader { - r := &Reader{src: src} - return r -} - -// readHeader checks the frame magic number and parses the frame descriptoz. -// Skippable frames are supported even as a first frame although the LZ4 -// specifications recommends skippable frames not to be used as first frames. -func (z *Reader) readHeader(first bool) error { - defer z.checksum.Reset() - - buf := z.buf[:] - for { - magic, err := z.readUint32() - if err != nil { - z.pos += 4 - if !first && err == io.ErrUnexpectedEOF { - return io.EOF - } - return err - } - if magic == frameMagic { - break - } - if magic>>8 != frameSkipMagic>>8 { - return ErrInvalid - } - skipSize, err := z.readUint32() - if err != nil { - return err - } - z.pos += 4 - m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) - if err != nil { - return err - } - z.pos += m - } - - // Header. - if _, err := io.ReadFull(z.src, buf[:2]); err != nil { - return err - } - z.pos += 8 - - b := buf[0] - if v := b >> 6; v != Version { - return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version) - } - if b>>5&1 == 0 { - return fmt.Errorf("lz4: block dependency not supported") - } - z.BlockChecksum = b>>4&1 > 0 - frameSize := b>>3&1 > 0 - z.NoChecksum = b>>2&1 == 0 - - bmsID := buf[1] >> 4 & 0x7 - bSize, ok := bsMapID[bmsID] - if !ok { - return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID) - } - z.BlockMaxSize = bSize - - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - if debugFlag { - debug("header block max size id=%d size=%d", bmsID, bSize) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = len(z.data) - - z.checksum.Write(buf[0:2]) - - if frameSize { - buf := buf[:8] - if _, err := io.ReadFull(z.src, buf); err != nil { - return err - } - z.Size = binary.LittleEndian.Uint64(buf) - z.pos += 8 - z.checksum.Write(buf) - } - - // Header checksum. - if _, err := io.ReadFull(z.src, buf[:1]); err != nil { - return err - } - z.pos++ - if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { - return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h) - } - - z.Header.done = true - if debugFlag { - debug("header read: %v", z.Header) - } - - return nil -} - -// Read decompresses data from the underlying source into the supplied buffer. -// -// Since there can be multiple streams concatenated, Header values may -// change between calls to Read(). If that is the case, no data is actually read from -// the underlying io.Reader, to allow for potential input buffer resizing. -func (z *Reader) Read(buf []byte) (int, error) { - if debugFlag { - debug("Read buf len=%d", len(buf)) - } - if !z.Header.done { - if err := z.readHeader(true); err != nil { - return 0, err - } - if debugFlag { - debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d", - len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx) - } - } - - if len(buf) == 0 { - return 0, nil - } - - if z.idx == len(z.data) { - // No data ready for reading, process the next block. - if debugFlag { - debug("reading block from writer") - } - // Block length: 0 = end of frame, highest bit set: uncompressed. - bLen, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if bLen == 0 { - // End of frame reached. - if !z.NoChecksum { - // Validate the frame checksum. - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - if debugFlag { - debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum) - } - z.pos += 4 - if h := z.checksum.Sum32(); checksum != h { - return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum) - } - } - - // Get ready for the next concatenated frame and keep the position. - pos := z.pos - z.Reset(z.src) - z.pos = pos - - // Since multiple frames can be concatenated, check for more. - return 0, z.readHeader(false) - } - - if debugFlag { - debug("raw block size %d", bLen) - } - if bLen&compressedBlockFlag > 0 { - // Uncompressed block. - bLen &= compressedBlockMask - if debugFlag { - debug("uncompressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - z.data = z.data[:bLen] - if _, err := io.ReadFull(z.src, z.data); err != nil { - return 0, err - } - z.pos += int64(bLen) - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(z.data); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - } else { - // Compressed block. - if debugFlag { - debug("compressed block size %d", bLen) - } - if int(bLen) > cap(z.data) { - return 0, fmt.Errorf("lz4: invalid block size: %d", bLen) - } - zdata := z.zdata[:bLen] - if _, err := io.ReadFull(z.src, zdata); err != nil { - return 0, err - } - z.pos += int64(bLen) - - if z.BlockChecksum { - checksum, err := z.readUint32() - if err != nil { - return 0, err - } - z.pos += 4 - - if h := xxh32.ChecksumZero(zdata); h != checksum { - return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum) - } - } - - n, err := UncompressBlock(zdata, z.data) - if err != nil { - return 0, err - } - z.data = z.data[:n] - } - - if !z.NoChecksum { - z.checksum.Write(z.data) - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - } - z.idx = 0 - } - - n := copy(buf, z.data[z.idx:]) - z.idx += n - if debugFlag { - debug("copied %d bytes to input", n) - } - - return n, nil -} - -// Reset discards the Reader's state and makes it equivalent to the -// result of its original state from NewReader, but reading from r instead. -// This permits reusing a Reader rather than allocating a new one. -func (z *Reader) Reset(r io.Reader) { - z.Header = Header{} - z.pos = 0 - z.src = r - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 - z.checksum.Reset() -} - -// readUint32 reads an uint32 into the supplied buffer. -// The idea is to make use of the already allocated buffers avoiding additional allocations. -func (z *Reader) readUint32() (uint32, error) { - buf := z.buf[:4] - _, err := io.ReadFull(z.src, buf) - x := binary.LittleEndian.Uint32(buf) - return x, err -} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go deleted file mode 100644 index 012043802..000000000 --- a/vendor/github.com/pierrec/lz4/writer.go +++ /dev/null @@ -1,267 +0,0 @@ -package lz4 - -import ( - "encoding/binary" - "fmt" - "io" - - "github.com/pierrec/lz4/internal/xxh32" -) - -// Writer implements the LZ4 frame encoder. -type Writer struct { - Header - - buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes - dst io.Writer // Destination. - checksum xxh32.XXHZero // Frame checksum. - zdata []byte // Compressed data. - data []byte // Data to be compressed. - idx int // Index into data. - hashtable [winSize]int // Hash table used in CompressBlock(). -} - -// NewWriter returns a new LZ4 frame encoder. -// No access to the underlying io.Writer is performed. -// The supplied Header is checked at the first Write. -// It is ok to change it before the first Write but then not until a Reset() is performed. -func NewWriter(dst io.Writer) *Writer { - return &Writer{dst: dst} -} - -// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. -func (z *Writer) writeHeader() error { - // Default to 4Mb if BlockMaxSize is not set. - if z.Header.BlockMaxSize == 0 { - z.Header.BlockMaxSize = bsMapID[7] - } - // The only option that needs to be validated. - bSize := z.Header.BlockMaxSize - bSizeID, ok := bsMapValue[bSize] - if !ok { - return fmt.Errorf("lz4: invalid block max size: %d", bSize) - } - // Allocate the compressed/uncompressed buffers. - // The compressed buffer cannot exceed the uncompressed one. - if n := 2 * bSize; cap(z.zdata) < n { - z.zdata = make([]byte, n, n) - } - z.zdata = z.zdata[:bSize] - z.data = z.zdata[:cap(z.zdata)][bSize:] - z.idx = 0 - - // Size is optional. - buf := z.buf[:] - - // Set the fixed size data: magic number, block max size and flags. - binary.LittleEndian.PutUint32(buf[0:], frameMagic) - flg := byte(Version << 6) - flg |= 1 << 5 // No block dependency. - if z.Header.BlockChecksum { - flg |= 1 << 4 - } - if z.Header.Size > 0 { - flg |= 1 << 3 - } - if !z.Header.NoChecksum { - flg |= 1 << 2 - } - buf[4] = flg - buf[5] = bSizeID << 4 - - // Current buffer size: magic(4) + flags(1) + block max size (1). - n := 6 - // Optional items. - if z.Header.Size > 0 { - binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) - n += 8 - } - - // The header checksum includes the flags, block max size and optional Size. - buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF) - z.checksum.Reset() - - // Header ready, write it out. - if _, err := z.dst.Write(buf[0 : n+1]); err != nil { - return err - } - z.Header.done = true - if debugFlag { - debug("wrote header %v", z.Header) - } - - return nil -} - -// Write compresses data from the supplied buffer into the underlying io.Writer. -// Write does not return until the data has been written. -func (z *Writer) Write(buf []byte) (int, error) { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return 0, err - } - } - if debugFlag { - debug("input buffer len=%d index=%d", len(buf), z.idx) - } - - zn := len(z.data) - var n int - for len(buf) > 0 { - if z.idx == 0 && len(buf) >= zn { - // Avoid a copy as there is enough data for a block. - if err := z.compressBlock(buf[:zn]); err != nil { - return n, err - } - n += zn - buf = buf[zn:] - continue - } - // Accumulate the data to be compressed. - m := copy(z.data[z.idx:], buf) - n += m - z.idx += m - buf = buf[m:] - if debugFlag { - debug("%d bytes copied to buf, current index %d", n, z.idx) - } - - if z.idx < len(z.data) { - // Buffer not filled. - if debugFlag { - debug("need more data for compression") - } - return n, nil - } - - // Buffer full. - if err := z.compressBlock(z.data); err != nil { - return n, err - } - z.idx = 0 - } - - return n, nil -} - -// compressBlock compresses a block. -func (z *Writer) compressBlock(data []byte) error { - if !z.NoChecksum { - z.checksum.Write(data) - } - - // The compressed block size cannot exceed the input's. - var zn int - var err error - - if level := z.Header.CompressionLevel; level != 0 { - zn, err = CompressBlockHC(data, z.zdata, level) - } else { - zn, err = CompressBlock(data, z.zdata, z.hashtable[:]) - } - - var zdata []byte - var bLen uint32 - if debugFlag { - debug("block compression %d => %d", len(data), zn) - } - if err == nil && zn > 0 && zn < len(data) { - // Compressible and compressed size smaller than uncompressed: ok! - bLen = uint32(zn) - zdata = z.zdata[:zn] - } else { - // Uncompressed block. - bLen = uint32(len(data)) | compressedBlockFlag - zdata = data - } - if debugFlag { - debug("block compression to be written len=%d data len=%d", bLen, len(zdata)) - } - - // Write the block. - if err := z.writeUint32(bLen); err != nil { - return err - } - if _, err := z.dst.Write(zdata); err != nil { - return err - } - - if z.BlockChecksum { - checksum := xxh32.ChecksumZero(zdata) - if debugFlag { - debug("block checksum %x", checksum) - } - if err := z.writeUint32(checksum); err != nil { - return err - } - } - if debugFlag { - debug("current frame checksum %x", z.checksum.Sum32()) - } - - return nil -} - -// Flush flushes any pending compressed data to the underlying writer. -// Flush does not return until the data has been written. -// If the underlying writer returns an error, Flush returns that error. -func (z *Writer) Flush() error { - if debugFlag { - debug("flush with index %d", z.idx) - } - if z.idx == 0 { - return nil - } - - return z.compressBlock(z.data[:z.idx]) -} - -// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. -func (z *Writer) Close() error { - if !z.Header.done { - if err := z.writeHeader(); err != nil { - return err - } - } - - if err := z.Flush(); err != nil { - return err - } - - if debugFlag { - debug("writing last empty block") - } - if err := z.writeUint32(0); err != nil { - return err - } - if !z.NoChecksum { - checksum := z.checksum.Sum32() - if debugFlag { - debug("stream checksum %x", checksum) - } - if err := z.writeUint32(checksum); err != nil { - return err - } - } - return nil -} - -// Reset clears the state of the Writer z such that it is equivalent to its -// initial state from NewWriter, but instead writing to w. -// No access to the underlying io.Writer is performed. -func (z *Writer) Reset(w io.Writer) { - z.Header = Header{} - z.dst = w - z.checksum.Reset() - z.zdata = z.zdata[:0] - z.data = z.data[:0] - z.idx = 0 -} - -// writeUint32 writes a uint32 to the underlying writer. -func (z *Writer) writeUint32(x uint32) error { - buf := z.buf[:4] - binary.LittleEndian.PutUint32(buf, x) - _, err := z.dst.Write(buf) - return err -} diff --git a/vendor/github.com/pquerna/otp/.travis.yml b/vendor/github.com/pquerna/otp/.travis.yml deleted file mode 100644 index 5a9ed93af..000000000 --- a/vendor/github.com/pquerna/otp/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -env: - - GO111MODULE=on - -go: - - "1.12" diff --git a/vendor/github.com/pquerna/otp/NOTICE b/vendor/github.com/pquerna/otp/NOTICE deleted file mode 100644 index 50e2e7501..000000000 --- a/vendor/github.com/pquerna/otp/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -otp -Copyright (c) 2014, Paul Querna - -This product includes software developed by -Paul Querna (http://paul.querna.org/). diff --git a/vendor/github.com/pquerna/otp/README.md b/vendor/github.com/pquerna/otp/README.md deleted file mode 100644 index 148e8980d..000000000 --- a/vendor/github.com/pquerna/otp/README.md +++ /dev/null @@ -1,60 +0,0 @@ -# otp: One Time Password utilities Go / Golang - -[![GoDoc](https://godoc.org/github.com/pquerna/otp?status.svg)](https://godoc.org/github.com/pquerna/otp) [![Build Status](https://travis-ci.org/pquerna/otp.svg?branch=master)](https://travis-ci.org/pquerna/otp) - -# Why One Time Passwords? - -One Time Passwords (OTPs) are an mechanism to improve security over passwords alone. When a Time-based OTP (TOTP) is stored on a user's phone, and combined with something the user knows (Password), you have an easy on-ramp to [Multi-factor authentication](http://en.wikipedia.org/wiki/Multi-factor_authentication) without adding a dependency on a SMS provider. This Password and TOTP combination is used by many popular websites including Google, Github, Facebook, Salesforce and many others. - -The `otp` library enables you to easily add TOTPs to your own application, increasing your user's security against mass-password breaches and malware. - -Because TOTP is standardized and widely deployed, there are many [mobile clients and software implementations](http://en.wikipedia.org/wiki/Time-based_One-time_Password_Algorithm#Client_implementations). - -## `otp` Supports: - -* Generating QR Code images for easy user enrollment. -* Time-based One-time Password Algorithm (TOTP) (RFC 6238): Time based OTP, the most commonly used method. -* HMAC-based One-time Password Algorithm (HOTP) (RFC 4226): Counter based OTP, which TOTP is based upon. -* Generation and Validation of codes for either algorithm. - -## Implementing TOTP in your application: - -### User Enrollment - -For an example of a working enrollment work flow, [Github has documented theirs](https://help.github.com/articles/configuring-two-factor-authentication-via-a-totp-mobile-app/ -), but the basics are: - -1. Generate new TOTP Key for a User. `key,_ := totp.Generate(...)`. -1. Display the Key's Secret and QR-Code for the User. `key.Secret()` and `key.Image(...)`. -1. Test that the user can successfully use their TOTP. `totp.Validate(...)`. -1. Store TOTP Secret for the User in your backend. `key.Secret()` -1. Provide the user with "recovery codes". (See Recovery Codes bellow) - -### Code Generation - -* In either TOTP or HOTP cases, use the `GenerateCode` function and a counter or - `time.Time` struct to generate a valid code compatible with most implementations. -* For uncommon or custom settings, or to catch unlikely errors, use `GenerateCodeCustom` - in either module. - -### Validation - -1. Prompt and validate User's password as normal. -1. If the user has TOTP enabled, prompt for TOTP passcode. -1. Retrieve the User's TOTP Secret from your backend. -1. Validate the user's passcode. `totp.Validate(...)` - - -### Recovery Codes - -When a user loses access to their TOTP device, they would no longer have access to their account. Because TOTPs are often configured on mobile devices that can be lost, stolen or damaged, this is a common problem. For this reason many providers give their users "backup codes" or "recovery codes". These are a set of one time use codes that can be used instead of the TOTP. These can simply be randomly generated strings that you store in your backend. [Github's documentation provides an overview of the user experience]( -https://help.github.com/articles/downloading-your-two-factor-authentication-recovery-codes/). - - -## Improvements, bugs, adding feature, etc: - -Please [open issues in Github](https://github.com/pquerna/otp/issues) for ideas, bugs, and general thoughts. Pull requests are of course preferred :) - -## License - -`otp` is licensed under the [Apache License, Version 2.0](./LICENSE) diff --git a/vendor/github.com/pquerna/otp/doc.go b/vendor/github.com/pquerna/otp/doc.go deleted file mode 100644 index b8b4c8cc1..000000000 --- a/vendor/github.com/pquerna/otp/doc.go +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package otp implements both HOTP and TOTP based -// one time passcodes in a Google Authenticator compatible manner. -// -// When adding a TOTP for a user, you must store the "secret" value -// persistently. It is recommend to store the secret in an encrypted field in your -// datastore. Due to how TOTP works, it is not possible to store a hash -// for the secret value like you would a password. -// -// To enroll a user, you must first generate an OTP for them. Google -// Authenticator supports using a QR code as an enrollment method: -// -// import ( -// "github.com/pquerna/otp/totp" -// -// "bytes" -// "image/png" -// ) -// -// key, err := totp.Generate(totp.GenerateOpts{ -// Issuer: "Example.com", -// AccountName: "alice@example.com", -// }) -// -// // Convert TOTP key into a QR code encoded as a PNG image. -// var buf bytes.Buffer -// img, err := key.Image(200, 200) -// png.Encode(&buf, img) -// -// // display the QR code to the user. -// display(buf.Bytes()) -// -// // Now Validate that the user's successfully added the passcode. -// passcode := promptForPasscode() -// valid := totp.Validate(passcode, key.Secret()) -// -// if valid { -// // User successfully used their TOTP, save it to your backend! -// storeSecret("alice@example.com", key.Secret()) -// } -// -// Validating a TOTP passcode is very easy, just prompt the user for a passcode -// and retrieve the associated user's previously stored secret. -// import "github.com/pquerna/otp/totp" -// -// passcode := promptForPasscode() -// secret := getSecret("alice@example.com") -// -// valid := totp.Validate(passcode, secret) -// -// if valid { -// // Success! continue login process. -// } -package otp diff --git a/vendor/github.com/pquerna/otp/go.mod b/vendor/github.com/pquerna/otp/go.mod deleted file mode 100644 index 77f2d6ccf..000000000 --- a/vendor/github.com/pquerna/otp/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/pquerna/otp - -go 1.12 - -require ( - github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc - github.com/stretchr/testify v1.3.0 -) diff --git a/vendor/github.com/pquerna/otp/go.sum b/vendor/github.com/pquerna/otp/go.sum deleted file mode 100644 index 6848b56f6..000000000 --- a/vendor/github.com/pquerna/otp/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc= -github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= -github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/pquerna/otp/hotp/hotp.go b/vendor/github.com/pquerna/otp/hotp/hotp.go deleted file mode 100644 index 5e99e2218..000000000 --- a/vendor/github.com/pquerna/otp/hotp/hotp.go +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package hotp - -import ( - "github.com/pquerna/otp" - - "crypto/hmac" - "crypto/rand" - "crypto/subtle" - "encoding/base32" - "encoding/binary" - "fmt" - "math" - "net/url" - "strings" -) - -const debug = false - -// Validate a HOTP passcode given a counter and secret. -// This is a shortcut for ValidateCustom, with parameters that -// are compataible with Google-Authenticator. -func Validate(passcode string, counter uint64, secret string) bool { - rv, _ := ValidateCustom( - passcode, - counter, - secret, - ValidateOpts{ - Digits: otp.DigitsSix, - Algorithm: otp.AlgorithmSHA1, - }, - ) - return rv -} - -// ValidateOpts provides options for ValidateCustom(). -type ValidateOpts struct { - // Digits as part of the input. Defaults to 6. - Digits otp.Digits - // Algorithm to use for HMAC. Defaults to SHA1. - Algorithm otp.Algorithm -} - -// GenerateCode creates a HOTP passcode given a counter and secret. -// This is a shortcut for GenerateCodeCustom, with parameters that -// are compataible with Google-Authenticator. -func GenerateCode(secret string, counter uint64) (string, error) { - return GenerateCodeCustom(secret, counter, ValidateOpts{ - Digits: otp.DigitsSix, - Algorithm: otp.AlgorithmSHA1, - }) -} - -// GenerateCodeCustom uses a counter and secret value and options struct to -// create a passcode. -func GenerateCodeCustom(secret string, counter uint64, opts ValidateOpts) (passcode string, err error) { - // As noted in issue #10 and #17 this adds support for TOTP secrets that are - // missing their padding. - secret = strings.TrimSpace(secret) - if n := len(secret) % 8; n != 0 { - secret = secret + strings.Repeat("=", 8-n) - } - - // As noted in issue #24 Google has started producing base32 in lower case, - // but the StdEncoding (and the RFC), expect a dictionary of only upper case letters. - secret = strings.ToUpper(secret) - - secretBytes, err := base32.StdEncoding.DecodeString(secret) - if err != nil { - return "", otp.ErrValidateSecretInvalidBase32 - } - - buf := make([]byte, 8) - mac := hmac.New(opts.Algorithm.Hash, secretBytes) - binary.BigEndian.PutUint64(buf, counter) - if debug { - fmt.Printf("counter=%v\n", counter) - fmt.Printf("buf=%v\n", buf) - } - - mac.Write(buf) - sum := mac.Sum(nil) - - // "Dynamic truncation" in RFC 4226 - // http://tools.ietf.org/html/rfc4226#section-5.4 - offset := sum[len(sum)-1] & 0xf - value := int64(((int(sum[offset]) & 0x7f) << 24) | - ((int(sum[offset+1] & 0xff)) << 16) | - ((int(sum[offset+2] & 0xff)) << 8) | - (int(sum[offset+3]) & 0xff)) - - l := opts.Digits.Length() - mod := int32(value % int64(math.Pow10(l))) - - if debug { - fmt.Printf("offset=%v\n", offset) - fmt.Printf("value=%v\n", value) - fmt.Printf("mod'ed=%v\n", mod) - } - - return opts.Digits.Format(mod), nil -} - -// ValidateCustom validates an HOTP with customizable options. Most users should -// use Validate(). -func ValidateCustom(passcode string, counter uint64, secret string, opts ValidateOpts) (bool, error) { - passcode = strings.TrimSpace(passcode) - - if len(passcode) != opts.Digits.Length() { - return false, otp.ErrValidateInputInvalidLength - } - - otpstr, err := GenerateCodeCustom(secret, counter, opts) - if err != nil { - return false, err - } - - if subtle.ConstantTimeCompare([]byte(otpstr), []byte(passcode)) == 1 { - return true, nil - } - - return false, nil -} - -// GenerateOpts provides options for .Generate() -type GenerateOpts struct { - // Name of the issuing Organization/Company. - Issuer string - // Name of the User's Account (eg, email address) - AccountName string - // Size in size of the generated Secret. Defaults to 10 bytes. - SecretSize uint - // Secret to store. Defaults to a randomly generated secret of SecretSize. You should generally leave this empty. - Secret []byte - // Digits to request. Defaults to 6. - Digits otp.Digits - // Algorithm to use for HMAC. Defaults to SHA1. - Algorithm otp.Algorithm -} - -var b32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding) - -// Generate creates a new HOTP Key. -func Generate(opts GenerateOpts) (*otp.Key, error) { - // url encode the Issuer/AccountName - if opts.Issuer == "" { - return nil, otp.ErrGenerateMissingIssuer - } - - if opts.AccountName == "" { - return nil, otp.ErrGenerateMissingAccountName - } - - if opts.SecretSize == 0 { - opts.SecretSize = 10 - } - - if opts.Digits == 0 { - opts.Digits = otp.DigitsSix - } - - // otpauth://totp/Example:alice@google.com?secret=JBSWY3DPEHPK3PXP&issuer=Example - - v := url.Values{} - if len(opts.Secret) != 0 { - v.Set("secret", b32NoPadding.EncodeToString(opts.Secret)) - } else { - secret := make([]byte, opts.SecretSize) - _, err := rand.Read(secret) - if err != nil { - return nil, err - } - v.Set("secret", b32NoPadding.EncodeToString(secret)) - } - - v.Set("issuer", opts.Issuer) - v.Set("algorithm", opts.Algorithm.String()) - v.Set("digits", opts.Digits.String()) - - u := url.URL{ - Scheme: "otpauth", - Host: "hotp", - Path: "/" + opts.Issuer + ":" + opts.AccountName, - RawQuery: v.Encode(), - } - - return otp.NewKeyFromURL(u.String()) -} diff --git a/vendor/github.com/pquerna/otp/otp.go b/vendor/github.com/pquerna/otp/otp.go deleted file mode 100644 index 5db93029c..000000000 --- a/vendor/github.com/pquerna/otp/otp.go +++ /dev/null @@ -1,207 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package otp - -import ( - "github.com/boombuler/barcode" - "github.com/boombuler/barcode/qr" - - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "errors" - "fmt" - "hash" - "image" - "net/url" - "strings" -) - -// Error when attempting to convert the secret from base32 to raw bytes. -var ErrValidateSecretInvalidBase32 = errors.New("Decoding of secret as base32 failed.") - -// The user provided passcode length was not expected. -var ErrValidateInputInvalidLength = errors.New("Input length unexpected") - -// When generating a Key, the Issuer must be set. -var ErrGenerateMissingIssuer = errors.New("Issuer must be set") - -// When generating a Key, the Account Name must be set. -var ErrGenerateMissingAccountName = errors.New("AccountName must be set") - -// Key represents an TOTP or HTOP key. -type Key struct { - orig string - url *url.URL -} - -// NewKeyFromURL creates a new Key from an TOTP or HOTP url. -// -// The URL format is documented here: -// https://github.com/google/google-authenticator/wiki/Key-Uri-Format -// -func NewKeyFromURL(orig string) (*Key, error) { - s := strings.TrimSpace(orig) - - u, err := url.Parse(s) - - if err != nil { - return nil, err - } - - return &Key{ - orig: s, - url: u, - }, nil -} - -func (k *Key) String() string { - return k.orig -} - -// Image returns an QR-Code image of the specified width and height, -// suitable for use by many clients like Google-Authenricator -// to enroll a user's TOTP/HOTP key. -func (k *Key) Image(width int, height int) (image.Image, error) { - b, err := qr.Encode(k.orig, qr.M, qr.Auto) - - if err != nil { - return nil, err - } - - b, err = barcode.Scale(b, width, height) - - if err != nil { - return nil, err - } - - return b, nil -} - -// Type returns "hotp" or "totp". -func (k *Key) Type() string { - return k.url.Host -} - -// Issuer returns the name of the issuing organization. -func (k *Key) Issuer() string { - q := k.url.Query() - - issuer := q.Get("issuer") - - if issuer != "" { - return issuer - } - - p := strings.TrimPrefix(k.url.Path, "/") - i := strings.Index(p, ":") - - if i == -1 { - return "" - } - - return p[:i] -} - -// AccountName returns the name of the user's account. -func (k *Key) AccountName() string { - p := strings.TrimPrefix(k.url.Path, "/") - i := strings.Index(p, ":") - - if i == -1 { - return p - } - - return p[i+1:] -} - -// Secret returns the opaque secret for this Key. -func (k *Key) Secret() string { - q := k.url.Query() - - return q.Get("secret") -} - -// URL returns the OTP URL as a string -func (k *Key) URL() string { - return k.url.String() -} - -// Algorithm represents the hashing function to use in the HMAC -// operation needed for OTPs. -type Algorithm int - -const ( - AlgorithmSHA1 Algorithm = iota - AlgorithmSHA256 - AlgorithmSHA512 - AlgorithmMD5 -) - -func (a Algorithm) String() string { - switch a { - case AlgorithmSHA1: - return "SHA1" - case AlgorithmSHA256: - return "SHA256" - case AlgorithmSHA512: - return "SHA512" - case AlgorithmMD5: - return "MD5" - } - panic("unreached") -} - -func (a Algorithm) Hash() hash.Hash { - switch a { - case AlgorithmSHA1: - return sha1.New() - case AlgorithmSHA256: - return sha256.New() - case AlgorithmSHA512: - return sha512.New() - case AlgorithmMD5: - return md5.New() - } - panic("unreached") -} - -// Digits represents the number of digits present in the -// user's OTP passcode. Six and Eight are the most common values. -type Digits int - -const ( - DigitsSix Digits = 6 - DigitsEight Digits = 8 -) - -// Format converts an integer into the zero-filled size for this Digits. -func (d Digits) Format(in int32) string { - f := fmt.Sprintf("%%0%dd", d) - return fmt.Sprintf(f, in) -} - -// Length returns the number of characters for this Digits. -func (d Digits) Length() int { - return int(d) -} - -func (d Digits) String() string { - return fmt.Sprintf("%d", d) -} diff --git a/vendor/github.com/pquerna/otp/totp/totp.go b/vendor/github.com/pquerna/otp/totp/totp.go deleted file mode 100644 index b46fa567e..000000000 --- a/vendor/github.com/pquerna/otp/totp/totp.go +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package totp - -import ( - "github.com/pquerna/otp" - "github.com/pquerna/otp/hotp" - - "crypto/rand" - "encoding/base32" - "math" - "net/url" - "strconv" - "time" -) - -// Validate a TOTP using the current time. -// A shortcut for ValidateCustom, Validate uses a configuration -// that is compatible with Google-Authenticator and most clients. -func Validate(passcode string, secret string) bool { - rv, _ := ValidateCustom( - passcode, - secret, - time.Now().UTC(), - ValidateOpts{ - Period: 30, - Skew: 1, - Digits: otp.DigitsSix, - Algorithm: otp.AlgorithmSHA1, - }, - ) - return rv -} - -// GenerateCode creates a TOTP token using the current time. -// A shortcut for GenerateCodeCustom, GenerateCode uses a configuration -// that is compatible with Google-Authenticator and most clients. -func GenerateCode(secret string, t time.Time) (string, error) { - return GenerateCodeCustom(secret, t, ValidateOpts{ - Period: 30, - Skew: 1, - Digits: otp.DigitsSix, - Algorithm: otp.AlgorithmSHA1, - }) -} - -// ValidateOpts provides options for ValidateCustom(). -type ValidateOpts struct { - // Number of seconds a TOTP hash is valid for. Defaults to 30 seconds. - Period uint - // Periods before or after the current time to allow. Value of 1 allows up to Period - // of either side of the specified time. Defaults to 0 allowed skews. Values greater - // than 1 are likely sketchy. - Skew uint - // Digits as part of the input. Defaults to 6. - Digits otp.Digits - // Algorithm to use for HMAC. Defaults to SHA1. - Algorithm otp.Algorithm -} - -// GenerateCodeCustom takes a timepoint and produces a passcode using a -// secret and the provided opts. (Under the hood, this is making an adapted -// call to hotp.GenerateCodeCustom) -func GenerateCodeCustom(secret string, t time.Time, opts ValidateOpts) (passcode string, err error) { - if opts.Period == 0 { - opts.Period = 30 - } - counter := uint64(math.Floor(float64(t.Unix()) / float64(opts.Period))) - passcode, err = hotp.GenerateCodeCustom(secret, counter, hotp.ValidateOpts{ - Digits: opts.Digits, - Algorithm: opts.Algorithm, - }) - if err != nil { - return "", err - } - return passcode, nil -} - -// ValidateCustom validates a TOTP given a user specified time and custom options. -// Most users should use Validate() to provide an interpolatable TOTP experience. -func ValidateCustom(passcode string, secret string, t time.Time, opts ValidateOpts) (bool, error) { - if opts.Period == 0 { - opts.Period = 30 - } - - counters := []uint64{} - counter := int64(math.Floor(float64(t.Unix()) / float64(opts.Period))) - - counters = append(counters, uint64(counter)) - for i := 1; i <= int(opts.Skew); i++ { - counters = append(counters, uint64(counter+int64(i))) - counters = append(counters, uint64(counter-int64(i))) - } - - for _, counter := range counters { - rv, err := hotp.ValidateCustom(passcode, counter, secret, hotp.ValidateOpts{ - Digits: opts.Digits, - Algorithm: opts.Algorithm, - }) - - if err != nil { - return false, err - } - - if rv == true { - return true, nil - } - } - - return false, nil -} - -// GenerateOpts provides options for Generate(). The default values -// are compatible with Google-Authenticator. -type GenerateOpts struct { - // Name of the issuing Organization/Company. - Issuer string - // Name of the User's Account (eg, email address) - AccountName string - // Number of seconds a TOTP hash is valid for. Defaults to 30 seconds. - Period uint - // Size in size of the generated Secret. Defaults to 20 bytes. - SecretSize uint - // Secret to store. Defaults to a randomly generated secret of SecretSize. You should generally leave this empty. - Secret []byte - // Digits to request. Defaults to 6. - Digits otp.Digits - // Algorithm to use for HMAC. Defaults to SHA1. - Algorithm otp.Algorithm -} - -var b32NoPadding = base32.StdEncoding.WithPadding(base32.NoPadding) - -// Generate a new TOTP Key. -func Generate(opts GenerateOpts) (*otp.Key, error) { - // url encode the Issuer/AccountName - if opts.Issuer == "" { - return nil, otp.ErrGenerateMissingIssuer - } - - if opts.AccountName == "" { - return nil, otp.ErrGenerateMissingAccountName - } - - if opts.Period == 0 { - opts.Period = 30 - } - - if opts.SecretSize == 0 { - opts.SecretSize = 20 - } - - if opts.Digits == 0 { - opts.Digits = otp.DigitsSix - } - - // otpauth://totp/Example:alice@google.com?secret=JBSWY3DPEHPK3PXP&issuer=Example - - v := url.Values{} - if len(opts.Secret) != 0 { - v.Set("secret", b32NoPadding.EncodeToString(opts.Secret)) - } else { - secret := make([]byte, opts.SecretSize) - _, err := rand.Read(secret) - if err != nil { - return nil, err - } - v.Set("secret", b32NoPadding.EncodeToString(secret)) - } - - v.Set("issuer", opts.Issuer) - v.Set("period", strconv.FormatUint(uint64(opts.Period), 10)) - v.Set("algorithm", opts.Algorithm.String()) - v.Set("digits", opts.Digits.String()) - - u := url.URL{ - Scheme: "otpauth", - Host: "totp", - Path: "/" + opts.Issuer + ":" + opts.AccountName, - RawQuery: v.Encode(), - } - - return otp.NewKeyFromURL(u.String()) -} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go index ff6ff7b99..3c4f43f91 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -10,3 +10,4 @@ func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } + diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go index 163c468d5..355dc966f 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -10,3 +10,4 @@ func isTerminal(fd int) bool { _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) return err == nil } + diff --git a/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go b/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go index 0ca6759e2..2e7c27fb4 100644 --- a/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go +++ b/vendor/github.com/sourcegraph/go-diff/diff/diff.pb.go @@ -15,19 +15,14 @@ */ package diff -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - proto "github.com/gogo/protobuf/proto" +// discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto" +import pbtypes "sourcegraph.com/sqs/pbtypes" - math "math" - - // discarding unused import gogoproto "github.com/gogo/protobuf/gogoproto" - - pbtypes "sourcegraph.com/sqs/pbtypes" - - io "io" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go index 9914c68ae..7af2fb56f 100644 --- a/vendor/github.com/spf13/afero/mem/file.go +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -22,9 +22,10 @@ import ( "path/filepath" "sync" "sync/atomic" - "time" ) +import "time" + const FilePathSeparator = string(filepath.Separator) type File struct { diff --git a/vendor/github.com/ulikunitz/xz/LICENSE b/vendor/github.com/ulikunitz/xz/LICENSE index 58ebdc162..d32149979 100644 --- a/vendor/github.com/ulikunitz/xz/LICENSE +++ b/vendor/github.com/ulikunitz/xz/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2014-2016 Ulrich Kunitz +Copyright (c) 2014-2020 Ulrich Kunitz All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/ulikunitz/xz/TODO.md b/vendor/github.com/ulikunitz/xz/TODO.md index c10e51b9a..a4224ce14 100644 --- a/vendor/github.com/ulikunitz/xz/TODO.md +++ b/vendor/github.com/ulikunitz/xz/TODO.md @@ -1,5 +1,9 @@ # TODO list +## Release v0.5.x + +1. Support check flag in gxz command. + ## Release v0.6 1. Review encoder and check for lzma improvements under xz. @@ -86,6 +90,15 @@ ## Log +### 2020-02-24 + +Release v0.5.7 supports the check-ID None and fixes +[issue #27](https://github.com/ulikunitz/xz/issues/27). + +### 2019-02-20 + +Release v0.5.6 supports the go.mod file. + ### 2018-10-28 Release v0.5.5 fixes issues #19 observing ErrLimit outputs. diff --git a/vendor/github.com/ulikunitz/xz/bits.go b/vendor/github.com/ulikunitz/xz/bits.go index fadc1a594..364213dd9 100644 --- a/vendor/github.com/ulikunitz/xz/bits.go +++ b/vendor/github.com/ulikunitz/xz/bits.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/crc.go b/vendor/github.com/ulikunitz/xz/crc.go index b44dca96e..638774ada 100644 --- a/vendor/github.com/ulikunitz/xz/crc.go +++ b/vendor/github.com/ulikunitz/xz/crc.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/format.go b/vendor/github.com/ulikunitz/xz/format.go index 798159c6c..edfec9a94 100644 --- a/vendor/github.com/ulikunitz/xz/format.go +++ b/vendor/github.com/ulikunitz/xz/format.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -46,7 +46,8 @@ const HeaderLen = 12 // Constants for the checksum methods supported by xz. const ( - CRC32 byte = 0x1 + None byte = 0x0 + CRC32 = 0x1 CRC64 = 0x4 SHA256 = 0xa ) @@ -58,7 +59,7 @@ var errInvalidFlags = errors.New("xz: invalid flags") // invalid. func verifyFlags(flags byte) error { switch flags { - case CRC32, CRC64, SHA256: + case None, CRC32, CRC64, SHA256: return nil default: return errInvalidFlags @@ -67,6 +68,7 @@ func verifyFlags(flags byte) error { // flagstrings maps flag values to strings. var flagstrings = map[byte]string{ + None: "None", CRC32: "CRC-32", CRC64: "CRC-64", SHA256: "SHA-256", @@ -85,6 +87,8 @@ func flagString(flags byte) string { // hash method encoded in flags. func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { switch flags { + case None: + newHash = newNoneHash case CRC32: newHash = newCRC32 case CRC64: diff --git a/vendor/github.com/ulikunitz/xz/fox-check-none.xz b/vendor/github.com/ulikunitz/xz/fox-check-none.xz new file mode 100644 index 0000000000000000000000000000000000000000..46043f7dc89b610dc3badb9db3426620c4c97462 GIT binary patch literal 96 zcmexsUKJ6=z`*cd=%ynRgCe6CkX@qxbTK1?PDnLRM*R tL9s%9S!$6&2~avGv8qxbB|lw{3#g5Ofzej?!NQIFY(?{`7{LOOQ2>-O93KDx literal 0 HcmV?d00001 diff --git a/vendor/github.com/ulikunitz/xz/go.mod b/vendor/github.com/ulikunitz/xz/go.mod new file mode 100644 index 000000000..330b675bd --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/go.mod @@ -0,0 +1,3 @@ +module github.com/ulikunitz/xz + +go 1.12 diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go index a32887872..f2861ba3f 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go index f99ec2206..e28d23be4 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/doc.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/doc.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go index 58635b113..b8e66d972 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go index ab6a19ca4..34c81b38a 100644 --- a/vendor/github.com/ulikunitz/xz/internal/hash/roller.go +++ b/vendor/github.com/ulikunitz/xz/internal/hash/roller.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go index 0ba45e8ff..678b5a058 100644 --- a/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go +++ b/vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bintree.go b/vendor/github.com/ulikunitz/xz/lzma/bintree.go index a781bd195..58d6a92a7 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bintree.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bintree.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bitops.go b/vendor/github.com/ulikunitz/xz/lzma/bitops.go index e9bab0199..2784ec6ba 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bitops.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bitops.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/breader.go b/vendor/github.com/ulikunitz/xz/lzma/breader.go index 5350d814f..4ad09a14e 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/breader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/breader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/buffer.go b/vendor/github.com/ulikunitz/xz/lzma/buffer.go index 50e0b6d57..9cb7838ac 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/buffer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/buffer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go index a3696ba08..290606ddc 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go +++ b/vendor/github.com/ulikunitz/xz/lzma/bytewriter.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoder.go b/vendor/github.com/ulikunitz/xz/lzma/decoder.go index 16e14db39..e5a760a50 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoder.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go index 564a12b83..ba06712b0 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/decoderdict.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go index e08eb989f..e6e0c6ddf 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/directcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/directcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go index b053a2dce..69871c04a 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/distcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/distcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoder.go b/vendor/github.com/ulikunitz/xz/lzma/encoder.go index fe1900a66..59055eb64 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoder.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoder.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go index 9d0fbc703..40f3d3f64 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go +++ b/vendor/github.com/ulikunitz/xz/lzma/encoderdict.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go index d786a9745..e82970eac 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/hashtable.go +++ b/vendor/github.com/ulikunitz/xz/lzma/hashtable.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/header.go b/vendor/github.com/ulikunitz/xz/lzma/header.go index bc708969f..cda39462c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/header2.go b/vendor/github.com/ulikunitz/xz/lzma/header2.go index ac6a71a5a..cd148812c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/header2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/header2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go index e51773092..927395bd8 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/lengthcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go index c949d6ebd..ca31530fd 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/literalcodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go index 4a244eb1a..7d03ec0dc 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go +++ b/vendor/github.com/ulikunitz/xz/lzma/matchalgorithm.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/operation.go b/vendor/github.com/ulikunitz/xz/lzma/operation.go index 733bb99da..a75c9b46c 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/operation.go +++ b/vendor/github.com/ulikunitz/xz/lzma/operation.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/prob.go b/vendor/github.com/ulikunitz/xz/lzma/prob.go index 24d50ec68..6987a166f 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/prob.go +++ b/vendor/github.com/ulikunitz/xz/lzma/prob.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/properties.go b/vendor/github.com/ulikunitz/xz/lzma/properties.go index 23418e25d..662feba87 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/properties.go +++ b/vendor/github.com/ulikunitz/xz/lzma/properties.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go index 6361c5e7c..7189a0377 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go +++ b/vendor/github.com/ulikunitz/xz/lzma/rangecodec.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader.go b/vendor/github.com/ulikunitz/xz/lzma/reader.go index 2ef3dcaaa..7b7eef31f 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/reader2.go b/vendor/github.com/ulikunitz/xz/lzma/reader2.go index a55cfaa4e..33074e624 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/reader2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/reader2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/state.go b/vendor/github.com/ulikunitz/xz/lzma/state.go index 502351052..03f061cf1 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/state.go +++ b/vendor/github.com/ulikunitz/xz/lzma/state.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go index 504b3d78e..1cb3596fe 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go +++ b/vendor/github.com/ulikunitz/xz/lzma/treecodecs.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer.go b/vendor/github.com/ulikunitz/xz/lzma/writer.go index efe34fb6b..5803ecca9 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzma/writer2.go b/vendor/github.com/ulikunitz/xz/lzma/writer2.go index 7c1afe157..c263b0666 100644 --- a/vendor/github.com/ulikunitz/xz/lzma/writer2.go +++ b/vendor/github.com/ulikunitz/xz/lzma/writer2.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/lzmafilter.go b/vendor/github.com/ulikunitz/xz/lzmafilter.go index 69cf5f7c2..6f4aa2c09 100644 --- a/vendor/github.com/ulikunitz/xz/lzmafilter.go +++ b/vendor/github.com/ulikunitz/xz/lzmafilter.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/github.com/ulikunitz/xz/none-check.go b/vendor/github.com/ulikunitz/xz/none-check.go new file mode 100644 index 000000000..e12d8e476 --- /dev/null +++ b/vendor/github.com/ulikunitz/xz/none-check.go @@ -0,0 +1,23 @@ +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xz + +import "hash" + +type noneHash struct{} + +func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil } + +func (h noneHash) Sum(b []byte) []byte { return b } + +func (h noneHash) Reset() {} + +func (h noneHash) Size() int { return 0 } + +func (h noneHash) BlockSize() int { return 0 } + +func newNoneHash() hash.Hash { + return &noneHash{} +} diff --git a/vendor/github.com/ulikunitz/xz/reader.go b/vendor/github.com/ulikunitz/xz/reader.go index 0634c6bcc..22cd6d500 100644 --- a/vendor/github.com/ulikunitz/xz/reader.go +++ b/vendor/github.com/ulikunitz/xz/reader.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -283,7 +283,11 @@ func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader, if err != nil { return nil, err } - br.r = io.TeeReader(fr, br.hash) + if br.hash.Size() != 0 { + br.r = io.TeeReader(fr, br.hash) + } else { + br.r = fr + } return br, nil } diff --git a/vendor/github.com/ulikunitz/xz/writer.go b/vendor/github.com/ulikunitz/xz/writer.go index c126f7099..aec10dfa6 100644 --- a/vendor/github.com/ulikunitz/xz/writer.go +++ b/vendor/github.com/ulikunitz/xz/writer.go @@ -1,4 +1,4 @@ -// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. +// Copyright 2014-2019 Ulrich Kunitz. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -18,8 +18,10 @@ type WriterConfig struct { DictCap int BufSize int BlockSize int64 - // checksum method: CRC32, CRC64 or SHA256 + // checksum method: CRC32, CRC64 or SHA256 (default: CRC64) CheckSum byte + // Forces NoChecksum (default: false) + NoCheckSum bool // match algorithm Matcher lzma.MatchAlgorithm } @@ -41,6 +43,9 @@ func (c *WriterConfig) fill() { if c.CheckSum == 0 { c.CheckSum = CRC64 } + if c.NoCheckSum { + c.CheckSum = None + } } // Verify checks the configuration for errors. Zero values will be @@ -284,7 +289,11 @@ func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWr if err != nil { return nil, err } - bw.mw = io.MultiWriter(bw.w, bw.hash) + if bw.hash.Size() != 0 { + bw.mw = io.MultiWriter(bw.w, bw.hash) + } else { + bw.mw = bw.w + } return bw, nil } diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go index b0c436c4a..7c1f5fac3 100644 --- a/vendor/github.com/zclconf/go-cty-yaml/readerc.go +++ b/vendor/github.com/zclconf/go-cty-yaml/readerc.go @@ -95,7 +95,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { // [Go] This function was changed to guarantee the requested length size at EOF. // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests + // for that to be the case, and there are tests // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { diff --git a/vendor/go.opencensus.io/Gopkg.lock b/vendor/go.opencensus.io/Gopkg.lock deleted file mode 100644 index 3be12ac8f..000000000 --- a/vendor/go.opencensus.io/Gopkg.lock +++ /dev/null @@ -1,231 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" - name = "git.apache.org/thrift.git" - packages = ["lib/go/thrift"] - pruneopts = "UT" - revision = "6e67faa92827ece022380b211c2caaadd6145bf5" - source = "github.com/apache/thrift" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" - name = "github.com/openzipkin/zipkin-go" - packages = [ - ".", - "idgenerator", - "model", - "propagation", - "reporter", - "reporter/http", - ] - pruneopts = "UT" - revision = "d455a5674050831c1e187644faa4046d653433c2" - version = "v0.1.1" - -[[projects]] - digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - branch = "master" - digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "e21acd801f91da814261b938941d193bb036441a" - -[[projects]] - branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" - -[[projects]] - digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" - version = "v1.14.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "git.apache.org/thrift.git/lib/go/thrift", - "github.com/golang/protobuf/proto", - "github.com/openzipkin/zipkin-go", - "github.com/openzipkin/zipkin-go/model", - "github.com/openzipkin/zipkin-go/reporter", - "github.com/openzipkin/zipkin-go/reporter/http", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "golang.org/x/net/context", - "golang.org/x/net/http2", - "google.golang.org/api/support/bundler", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/grpclog", - "google.golang.org/grpc/metadata", - "google.golang.org/grpc/stats", - "google.golang.org/grpc/status", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/go.opencensus.io/Gopkg.toml b/vendor/go.opencensus.io/Gopkg.toml deleted file mode 100644 index a9f3cd68e..000000000 --- a/vendor/go.opencensus.io/Gopkg.toml +++ /dev/null @@ -1,36 +0,0 @@ -# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" -# to avoid locking to a particular minor version which can cause dep to not be -# able to find a satisfying dependency graph. - -[[constraint]] - branch = "master" - name = "git.apache.org/thrift.git" - source = "github.com/apache/thrift" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.0.0" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go" - version = ">=0.1.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = ">=0.8.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/api" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.11.3" - -[prune] - go-tests = true - unused-packages = true diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index 457866cb1..b3ce3df30 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -8,7 +8,7 @@ ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) GOTEST_OPT?=-v -race -timeout 30s GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic GOTEST=go test -GOFMT=gofmt +GOIMPORTS=goimports GOLINT=golint GOVET=go vet EMBEDMD=embedmd @@ -17,14 +17,14 @@ TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packag TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') -.DEFAULT_GOAL := fmt-lint-vet-embedmd-test +.DEFAULT_GOAL := imports-lint-vet-embedmd-test -.PHONY: fmt-lint-vet-embedmd-test -fmt-lint-vet-embedmd-test: fmt lint vet embedmd test +.PHONY: imports-lint-vet-embedmd-test +imports-lint-vet-embedmd-test: imports lint vet embedmd test # TODO enable test-with-coverage in tavis .PHONY: travis-ci -travis-ci: fmt lint vet embedmd test test-386 +travis-ci: imports lint vet embedmd test test-386 all-pkgs: @echo $(ALL_PKGS) | tr ' ' '\n' | sort @@ -44,15 +44,15 @@ test-386: test-with-coverage: $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) -.PHONY: fmt -fmt: - @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ - if [ "$$FMTOUT" ]; then \ - echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ - echo "$$FMTOUT\n"; \ +.PHONY: imports +imports: + @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ + if [ "$$IMPORTSOUT" ]; then \ + echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ + echo "$$IMPORTSOUT\n"; \ exit 1; \ else \ - echo "Fmt finished successfully"; \ + echo "Imports finished successfully"; \ fi .PHONY: lint @@ -91,6 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/tools/cmd/cover go get -u golang.org/x/lint/golint + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/tools/cmd/goimports go get -u github.com/rakyll/embedmd diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md index fabab2e06..1d7e83711 100644 --- a/vendor/go.opencensus.io/README.md +++ b/vendor/go.opencensus.io/README.md @@ -9,6 +9,8 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. Currently it consists of three major components: tags, stats and tracing. +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). + ## Installation ``` @@ -57,6 +59,7 @@ can implement their own exporters by implementing the exporter interfaces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats * [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces ## Overview @@ -78,7 +81,7 @@ Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go -ctx, err = tag.New(ctx, +ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) @@ -261,3 +264,4 @@ release in which the functionality was marked *Deprecated*. [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite [exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml index 12bd7c4c7..d08f0edaf 100644 --- a/vendor/go.opencensus.io/appveyor.yml +++ b/vendor/go.opencensus.io/appveyor.yml @@ -6,13 +6,12 @@ clone_folder: c:\gopath\src\go.opencensus.io environment: GOPATH: 'c:\gopath' - GOVERSION: '1.11' GO111MODULE: 'on' CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 +stack: go 1.11 + +before_test: - go version - go env diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod index cb4de80f3..c867df5f5 100644 --- a/vendor/go.opencensus.io/go.mod +++ b/vendor/go.opencensus.io/go.mod @@ -1,12 +1,15 @@ module go.opencensus.io require ( + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 github.com/golang/protobuf v1.3.1 github.com/google/go-cmp v0.3.0 - github.com/hashicorp/golang-lru v0.5.1 - golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 + github.com/stretchr/testify v1.4.0 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) + +go 1.13 diff --git a/vendor/go.opencensus.io/go.sum b/vendor/go.opencensus.io/go.sum index 0b948c2b4..01c02972c 100644 --- a/vendor/go.opencensus.io/go.sum +++ b/vendor/go.opencensus.io/go.sum @@ -1,8 +1,12 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -10,20 +14,24 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -45,6 +53,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -58,4 +67,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index 9a638781c..81dc7183e 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -33,5 +33,5 @@ var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // end as a monotonic time. // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Now().Sub(start)) + return start.Add(time.Since(start)) } diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index 626d73645..e5e4b4368 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.22.0" + return "0.23.0" } diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index 2f1c7f006..9ad885219 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -68,7 +68,7 @@ func ParseTraceID(tid string) (trace.TraceID, bool) { return trace.TraceID{}, false } b, err := hex.DecodeString(tid) - if err != nil { + if err != nil || len(b) > 16 { return trace.TraceID{}, false } var traceID trace.TraceID @@ -90,7 +90,7 @@ func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { return trace.SpanID{}, false } b, err := hex.DecodeString(sid) - if err != nil { + if err != nil || len(b) > 8 { return trace.SpanID{}, false } start := 8 - len(b) diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index 4f6404fa7..c7ea64235 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -70,6 +70,12 @@ type Handler struct { // from the information found in the incoming HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -87,7 +93,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if isHealthEndpoint(r.URL.Path) { + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { return r, func() {} } var name string @@ -128,7 +134,7 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ // TODO: Handle cases where ContentLength is not set. } else if r.ContentLength > 0 { span.AddMessageReceiveEvent(0, /* TODO: messageID */ - int64(r.ContentLength), -1) + r.ContentLength, -1) } return r.WithContext(ctx), span.End } @@ -174,8 +180,6 @@ type trackingResponseWriter struct { // Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -var logTagsErrorOnce sync.Once - func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 63bbcda5e..ee3729040 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -92,38 +92,38 @@ var ( // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. - Host, _ = tag.NewKey("http.host") + Host = tag.MustNewKey("http.host") // StatusCode is the numeric HTTP response status code, // or "error" if a transport error occurred and no status code was read. - StatusCode, _ = tag.NewKey("http.status") + StatusCode = tag.MustNewKey("http.status") // Path is the URL path (not including query string) in the request. // // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. - Path, _ = tag.NewKey("http.path") + Path = tag.MustNewKey("http.path") // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method, _ = tag.NewKey("http.method") + Method = tag.MustNewKey("http.method") // KeyServerRoute is a low cardinality string representing the logical // handler of the request. This is usually the pattern registered on the a // ServeMux (or similar string). - KeyServerRoute, _ = tag.NewKey("http_server_route") + KeyServerRoute = tag.MustNewKey("http_server_route") ) // Client tag keys. var ( // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod, _ = tag.NewKey("http_client_method") + KeyClientMethod = tag.MustNewKey("http_client_method") // KeyClientPath is the URL path (not including query string). - KeyClientPath, _ = tag.NewKey("http_client_path") + KeyClientPath = tag.MustNewKey("http_client_path") // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus, _ = tag.NewKey("http_client_status") + KeyClientStatus = tag.MustNewKey("http_client_status") // KeyClientHost is the value of the request Host header. - KeyClientHost, _ = tag.NewKey("http_client_host") + KeyClientHost = tag.MustNewKey("http_client_host") ) // Default distributions used by views in this package. diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index c23b97fb1..ed3a5db56 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -186,6 +186,8 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeCancelled case http.StatusBadRequest: code = trace.StatusCodeInvalidArgument + case http.StatusUnprocessableEntity: + code = trace.StatusCodeInvalidArgument case http.StatusGatewayTimeout: code = trace.StatusCodeDeadlineExceeded case http.StatusNotFound: @@ -202,7 +204,10 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists } + return trace.Status{Code: code, Message: codeToStr[code]} } diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index ad4691184..2b9728346 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -31,10 +31,19 @@ func init() { } } +// Recorder provides an interface for exporting measurement information from +// the static Record method by using the WithRecorder option. +type Recorder interface { + // Record records a set of measurements associated with the given tags and attachments. + // The second argument is a `[]Measurement`. + Record(*tag.Map, interface{}, map[string]interface{}) +} + type recordOptions struct { attachments metricdata.Attachments mutators []tag.Mutator measurements []Measurement + recorder Recorder } // WithAttachments applies provided exemplar attachments. @@ -58,6 +67,14 @@ func WithMeasurements(measurements ...Measurement) Options { } } +// WithRecorder records the measurements to the specified `Recorder`, rather +// than to the global metrics recorder. +func WithRecorder(meter Recorder) Options { + return func(ro *recordOptions) { + ro.recorder = meter + } +} + // Options apply changes to recordOptions. type Options func(*recordOptions) @@ -93,6 +110,9 @@ func RecordWithOptions(ctx context.Context, ros ...Options) error { return nil } recorder := internal.DefaultRecorder + if o.recorder != nil { + recorder = o.recorder.Record + } if recorder == nil { return nil } diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go index 6931a5f29..736399652 100644 --- a/vendor/go.opencensus.io/stats/units.go +++ b/vendor/go.opencensus.io/stats/units.go @@ -22,4 +22,5 @@ const ( UnitDimensionless = "1" UnitBytes = "By" UnitMilliseconds = "ms" + UnitSeconds = "s" ) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index b7f169b4a..9d7093728 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -82,7 +82,7 @@ func Sum() *Aggregation { // Distribution indicates that the desired aggregation is // a histogram distribution. // -// An distribution aggregation may contain a histogram of the values in the +// A distribution aggregation may contain a histogram of the values in the // population. The bucket boundaries for that histogram are described // by the bounds. This defines len(bounds)+1 buckets. // @@ -99,13 +99,14 @@ func Sum() *Aggregation { // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ + agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, } + agg.newData = func() AggregationData { + return newDistributionData(agg) + } + return agg } // LastValue only reports the last value recorded using this diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index d500e67f7..f331d456e 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -128,12 +128,12 @@ type DistributionData struct { bounds []float64 // histogram distribution of the values } -func newDistributionData(bounds []float64) *DistributionData { - bucketCount := len(bounds) + 1 +func newDistributionData(agg *Aggregation) *DistributionData { + bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: bounds, + bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, } diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index dced225c3..7bbedfe1f 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -29,7 +29,7 @@ // LastValue just keeps track of the most recently recorded measurement value. // All aggregations are cumulative. // -// Views can be registerd and unregistered at any time during program execution. +// Views can be registered and unregistered at any time during program execution. // // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go index 7cb59718f..73ba11f5b 100644 --- a/vendor/go.opencensus.io/stats/view/export.go +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -14,13 +14,6 @@ package view -import "sync" - -var ( - exportersMu sync.RWMutex // guards exporters - exporters = make(map[Exporter]struct{}) -) - // Exporter exports the collected records as view data. // // The ExportView method should return quickly; if an @@ -43,16 +36,10 @@ type Exporter interface { // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - exporters[e] = struct{}{} + defaultWorker.RegisterExporter(e) } // UnregisterExporter unregisters an exporter. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - defer exportersMu.Unlock() - - delete(exporters, e) + defaultWorker.UnregisterExporter(e) } diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 37f88e1d9..293b54ecb 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -30,7 +30,7 @@ import ( ) // View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function to be before data will be +// Views need to be passed to the Register function before data will be // collected and sent to Exporters. type View struct { Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. @@ -43,7 +43,7 @@ type View struct { // Measure is a stats.Measure to aggregate in this view. Measure stats.Measure - // Aggregation is the aggregation function tp apply to the set of Measurements. + // Aggregation is the aggregation function to apply to the set of Measurements. Aggregation *Aggregation } @@ -189,7 +189,7 @@ func (r *Row) String() string { } // Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even both rows have the same tags but the tags appear in +// by the key name. Even if both rows have the same tags but the tags appear in // different orders it will return false. func (r *Row) Equal(other *Row) bool { if r == other { diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go index f67b5c464..5e1656a1f 100644 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -18,6 +18,8 @@ package view import ( "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" ) @@ -85,12 +87,21 @@ func viewToMetricDescriptor(v *View) *metricdata.Descriptor { return &metricdata.Descriptor{ Name: v.Name, Description: v.Description, - Unit: getUnit(v.Measure.Unit()), + Unit: convertUnit(v), Type: getType(v), LabelKeys: getLabelKeys(v), } } +func convertUnit(v *View) metricdata.Unit { + switch v.Aggregation.Type { + case AggTypeCount: + return metricdata.UnitDimensionless + default: + return getUnit(v.Measure.Unit()) + } +} + func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { labelValues := []metricdata.LabelValue{} tagMap := make(map[string]string) @@ -116,7 +127,7 @@ func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Ti } } -func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTime time.Time) *metricdata.Metric { if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { startTime = time.Time{} @@ -135,6 +146,7 @@ func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricda m := &metricdata.Metric{ Descriptor: *v.metricDescriptor, TimeSeries: ts, + Resource: r, } return m } diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2f3c018af..ab8bfd46d 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -20,6 +20,8 @@ import ( "sync" "time" + "go.opencensus.io/resource" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" @@ -28,7 +30,7 @@ import ( ) func init() { - defaultWorker = newWorker() + defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record } @@ -47,8 +49,69 @@ type worker struct { c chan command quit, done chan bool mu sync.RWMutex + r *resource.Resource + + exportersMu sync.RWMutex + exporters map[Exporter]struct{} +} + +// Meter defines an interface which allows a single process to maintain +// multiple sets of metrics exports (intended for the advanced case where a +// single process wants to report metrics about multiple objects, such as +// multiple databases or HTTP services). +// +// Note that this is an advanced use case, and the static functions in this +// module should cover the common use cases. +type Meter interface { + stats.Recorder + // Find returns a registered view associated with this name. + // If no registered view is found, nil is returned. + Find(name string) *View + // Register begins collecting data for the given views. + // Once a view is registered, it reports data to the registered exporters. + Register(views ...*View) error + // Unregister the given views. Data will not longer be exported for these views + // after Unregister returns. + // It is not necessary to unregister from views you expect to collect for the + // duration of your program execution. + Unregister(views ...*View) + // SetReportingPeriod sets the interval between reporting aggregated views in + // the program. If duration is less than or equal to zero, it enables the + // default behavior. + // + // Note: each exporter makes different promises about what the lowest supported + // duration is. For example, the Stackdriver exporter recommends a value no + // lower than 1 minute. Consult each exporter per your needs. + SetReportingPeriod(time.Duration) + + // RegisterExporter registers an exporter. + // Collected data will be reported via all the + // registered exporters. Once you no longer + // want data to be exported, invoke UnregisterExporter + // with the previously registered exporter. + // + // Binaries can register exporters, libraries shouldn't register exporters. + RegisterExporter(Exporter) + // UnregisterExporter unregisters an exporter. + UnregisterExporter(Exporter) + // SetResource may be used to set the Resource associated with this registry. + // This is intended to be used in cases where a single process exports metrics + // for multiple Resources, typically in a multi-tenant situation. + SetResource(*resource.Resource) + + // Start causes the Meter to start processing Record calls and aggregating + // statistics as well as exporting data. + Start() + // Stop causes the Meter to stop processing calls and terminate data export. + Stop() + + // RetrieveData gets a snapshot of the data collected for the the view registered + // with the given name. It is intended for testing only. + RetrieveData(viewName string) ([]*Row, error) } +var _ Meter = (*worker)(nil) + var defaultWorker *worker var defaultReportingDuration = 10 * time.Second @@ -56,11 +119,17 @@ var defaultReportingDuration = 10 * time.Second // Find returns a registered view associated with this name. // If no registered view is found, nil is returned. func Find(name string) (v *View) { + return defaultWorker.Find(name) +} + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func (w *worker) Find(name string) (v *View) { req := &getViewByNameReq{ name: name, c: make(chan *getViewByNameResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.v } @@ -68,11 +137,17 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { + return defaultWorker.Register(views...) +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func (w *worker) Register(views ...*View) error { req := ®isterViewReq{ views: views, err: make(chan error), } - defaultWorker.c <- req + w.c <- req return <-req.err } @@ -81,6 +156,14 @@ func Register(views ...*View) error { // It is not necessary to unregister from views you expect to collect for the // duration of your program execution. func Unregister(views ...*View) { + defaultWorker.Unregister(views...) +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func (w *worker) Unregister(views ...*View) { names := make([]string, len(views)) for i := range views { names[i] = views[i].Name @@ -89,31 +172,42 @@ func Unregister(views ...*View) { views: names, done: make(chan struct{}), } - defaultWorker.c <- req + w.c <- req <-req.done } // RetrieveData gets a snapshot of the data collected for the the view registered // with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { + return defaultWorker.RetrieveData(viewName) +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func (w *worker) RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), v: viewName, c: make(chan *retrieveDataResp), } - defaultWorker.c <- req + w.c <- req resp := <-req.c return resp.rows, resp.err } func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + defaultWorker.Record(tags, ms, attachments) +} + +// Record records a set of measurements ms associated with the given tags and attachments. +func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ tm: tags, ms: ms.([]stats.Measurement), attachments: attachments, t: time.Now(), } - defaultWorker.c <- req + w.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in @@ -124,17 +218,31 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { // duration is. For example, the Stackdriver exporter recommends a value no // lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { + defaultWorker.SetReportingPeriod(d) +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func (w *worker) SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s req := &setReportingPeriodReq{ d: d, c: make(chan bool), } - defaultWorker.c <- req + w.c <- req <-req.c // don't return until the timer is set to the new duration. } -func newWorker() *worker { +// NewMeter constructs a Meter instance. You should only need to use this if +// you need to separate out Measurement recordings and View aggregations within +// a single process. +func NewMeter() Meter { return &worker{ measures: make(map[string]*measureRef), views: make(map[string]*viewInternal), @@ -143,9 +251,23 @@ func newWorker() *worker { c: make(chan command, 1024), quit: make(chan bool), done: make(chan bool), + + exporters: make(map[Exporter]struct{}), } } +// SetResource associates all data collected by this Meter with the specified +// resource. This resource is reported when using metricexport.ReadAndExport; +// it is not provided when used with ExportView/RegisterExporter, because that +// interface does not provide a means for reporting the Resource. +func (w *worker) SetResource(r *resource.Resource) { + w.r = r +} + +func (w *worker) Start() { + go w.start() +} + func (w *worker) start() { prodMgr := metricproducer.GlobalManager() prodMgr.AddProducer(w) @@ -155,7 +277,7 @@ func (w *worker) start() { case cmd := <-w.c: cmd.handleCommand(w) case <-w.timer.C: - w.reportUsage(time.Now()) + w.reportUsage() case <-w.quit: w.timer.Stop() close(w.c) @@ -165,7 +287,7 @@ func (w *worker) start() { } } -func (w *worker) stop() { +func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) @@ -202,44 +324,45 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return x, nil } w.views[vi.view.Name] = vi + w.startTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil } -func (w *worker) unregisterView(viewName string) { +func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() - delete(w.views, viewName) + delete(w.views, v.view.Name) + delete(w.startTimes, v) + if measure := w.measures[v.view.Measure.Name()]; measure != nil { + delete(measure.views, v) + } } -func (w *worker) reportView(v *viewInternal, now time.Time) { +func (w *worker) reportView(v *viewInternal) { if !v.isSubscribed() { return } rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } viewData := &Data{ View: v.view, Start: w.startTimes[v], End: time.Now(), Rows: rows, } - exportersMu.Lock() - for e := range exporters { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + for e := range w.exporters { e.ExportView(viewData) } - exportersMu.Unlock() } -func (w *worker) reportUsage(now time.Time) { +func (w *worker) reportUsage() { w.mu.Lock() defer w.mu.Unlock() for _, v := range w.views { - w.reportView(v, now) + w.reportView(v) } } @@ -248,11 +371,6 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { return nil } - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - var startTime time.Time if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { @@ -261,7 +379,7 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { startTime = w.startTimes[v] } - return viewToMetric(v, now, startTime) + return viewToMetric(v, w.r, now, startTime) } // Read reads all view data and returns them as metrics. @@ -279,3 +397,17 @@ func (w *worker) Read() []*metricdata.Metric { } return metrics } + +func (w *worker) RegisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + w.exporters[e] = struct{}{} +} + +func (w *worker) UnregisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + delete(w.exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 0267e179a..9ac4cc059 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -95,7 +95,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { } // Report pending data for this view before removing it. - w.reportView(vi, time.Now()) + w.reportView(vi) vi.unsubscribe() if !vi.isSubscribed() { @@ -103,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - w.unregisterView(name) + w.unregisterView(vi) } cmd.done <- struct{}{} } @@ -163,7 +163,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) } } } diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go index 4e63d08c9..71ec91365 100644 --- a/vendor/go.opencensus.io/tag/key.go +++ b/vendor/go.opencensus.io/tag/key.go @@ -21,7 +21,7 @@ type Key struct { } // NewKey creates or retrieves a string key identified by name. -// Calling NewKey consequently with the same name returns the same key. +// Calling NewKey more than once with the same name returns the same key. func NewKey(name string) (Key, error) { if !checkKeyName(name) { return Key{}, errInvalidKeyName @@ -29,8 +29,7 @@ func NewKey(name string) (Key, error) { return Key{name: name}, nil } -// MustNewKey creates or retrieves a string key identified by name. -// An invalid key name raises a panic. +// MustNewKey returns a key with the given name, and panics if name is an invalid key name. func MustNewKey(name string) Key { k, err := NewKey(name) if err != nil { diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go index f8b582761..c242e695c 100644 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -168,7 +168,7 @@ func Encode(m *Map) []byte { eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } - eg.writeByte(byte(tagsVersionID)) + eg.writeByte(tagsVersionID) for k, v := range m.m { if v.m.ttl.ttl == valueTTLUnlimitedPropagation { eg.writeByte(byte(keyTypeString)) diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 3f80a3368..908c2497e 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -15,23 +15,47 @@ package trace import ( - "github.com/hashicorp/golang-lru/simplelru" + "github.com/golang/groupcache/lru" ) +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. type lruMap struct { - simpleLruMap *simplelru.LRU + cacheKeys map[lru.Key]bool + cache *lru.Cache droppedCount int } func newLruMap(size int) *lruMap { - lm := &lruMap{} - lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } return lm } -func (lm *lruMap) add(key, value interface{}) { - evicted := lm.simpleLruMap.Add(key, value) - if evicted { - lm.droppedCount++ +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := make([]interface{}, len(lm.cacheKeys)) + for k := range lm.cacheKeys { + keys = append(keys, k) } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) } diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 38ead7bf0..125e2cd90 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -296,7 +296,7 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.lruAttributes.simpleLruMap.Len() > 0 { + if s.lruAttributes.len() > 0 { sd.Attributes = s.lruAttributesToAttributeMap() sd.DroppedAttributeCount = s.lruAttributes.droppedCount } @@ -345,7 +345,7 @@ func (s *Span) SetStatus(status Status) { } func (s *Span) interfaceArrayToLinksArray() []Link { - linksArr := make([]Link, 0) + linksArr := make([]Link, 0, len(s.links.queue)) for _, value := range s.links.queue { linksArr = append(linksArr, value.(Link)) } @@ -353,7 +353,7 @@ func (s *Span) interfaceArrayToLinksArray() []Link { } func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { - messageEventArr := make([]MessageEvent, 0) + messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) for _, value := range s.messageEvents.queue { messageEventArr = append(messageEventArr, value.(MessageEvent)) } @@ -361,7 +361,7 @@ func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { } func (s *Span) interfaceArrayToAnnotationArray() []Annotation { - annotationArr := make([]Annotation, 0) + annotationArr := make([]Annotation, 0, len(s.annotations.queue)) for _, value := range s.annotations.queue { annotationArr = append(annotationArr, value.(Annotation)) } @@ -369,9 +369,9 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { } func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { - attributes := make(map[string]interface{}) - for _, key := range s.lruAttributes.simpleLruMap.Keys() { - value, ok := s.lruAttributes.simpleLruMap.Get(key) + attributes := make(map[string]interface{}, s.lruAttributes.len()) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) if ok { keyStr := key.(string) attributes[keyStr] = value @@ -420,7 +420,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in var m map[string]interface{} s.mu.Lock() if len(attributes) != 0 { - m = make(map[string]interface{}) + m = make(map[string]interface{}, len(attributes)) copyAttributes(m, attributes) } s.annotations.add(Annotation{ @@ -436,7 +436,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { var a map[string]interface{} s.mu.Lock() if len(attributes) != 0 { - a = make(map[string]interface{}) + a = make(map[string]interface{}, len(attributes)) copyAttributes(a, attributes) } s.annotations.add(Annotation{ diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go new file mode 100644 index 000000000..b799e440b --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11,!gccgo,!purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s similarity index 99% rename from vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s rename to vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index b3a16ef75..891481539 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/asm_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.11 -// +build !gccgo,!appengine +// +build go1.11,!gccgo,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go new file mode 100644 index 000000000..a2ecf5c32 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go @@ -0,0 +1,398 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms +// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. +package chacha20 + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/internal/subtle" +) + +const ( + // KeySize is the size of the key used by this cipher, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // cipher, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20 variant of + // this cipher, in bytes. + NonceSizeX = 24 +) + +// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key +// and nonce. A *Cipher implements the cipher.Stream interface. +type Cipher struct { + // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter + // (incremented after each block), and 3 of nonce. + key [8]uint32 + counter uint32 + nonce [3]uint32 + + // The last len bytes of buf are leftover key stream bytes from the previous + // XORKeyStream invocation. The size of buf depends on how many blocks are + // computed at a time by xorKeyStreamBlocks. + buf [bufSize]byte + len int + + // overflow is set when the counter overflowed, no more blocks can be + // generated, and the next XORKeyStream call should panic. + overflow bool + + // The counter-independent results of the first round are cached after they + // are computed the first time. + precompDone bool + p1, p5, p9, p13 uint32 + p2, p6, p10, p14 uint32 + p3, p7, p11, p15 uint32 +} + +var _ cipher.Stream = (*Cipher)(nil) + +// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given +// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, +// the XChaCha20 construction will be used. It returns an error if key or nonce +// have any other length. +// +// Note that ChaCha20, like all stream ciphers, is not authenticated and allows +// attackers to silently tamper with the plaintext. For this reason, it is more +// appropriate as a building block than as a standalone encryption mechanism. +// Instead, consider using package golang.org/x/crypto/chacha20poly1305. +func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { + // This function is split into a wrapper so that the Cipher allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + c := &Cipher{} + return newUnauthenticatedCipher(c, key, nonce) +} + +func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong key size") + } + if len(nonce) == NonceSizeX { + // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a + // derived key, allowing it to operate on a nonce of 24 bytes. See + // draft-irtf-cfrg-xchacha-01, Section 2.3. + key, _ = HChaCha20(key, nonce[0:16]) + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + nonce = cNonce + } else if len(nonce) != NonceSize { + return nil, errors.New("chacha20: wrong nonce size") + } + + key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint + c.key = [8]uint32{ + binary.LittleEndian.Uint32(key[0:4]), + binary.LittleEndian.Uint32(key[4:8]), + binary.LittleEndian.Uint32(key[8:12]), + binary.LittleEndian.Uint32(key[12:16]), + binary.LittleEndian.Uint32(key[16:20]), + binary.LittleEndian.Uint32(key[20:24]), + binary.LittleEndian.Uint32(key[24:28]), + binary.LittleEndian.Uint32(key[28:32]), + } + c.nonce = [3]uint32{ + binary.LittleEndian.Uint32(nonce[0:4]), + binary.LittleEndian.Uint32(nonce[4:8]), + binary.LittleEndian.Uint32(nonce[8:12]), + } + return c, nil +} + +// The constant first 4 words of the ChaCha20 state. +const ( + j0 uint32 = 0x61707865 // expa + j1 uint32 = 0x3320646e // nd 3 + j2 uint32 = 0x79622d32 // 2-by + j3 uint32 = 0x6b206574 // te k +) + +const blockSize = 64 + +// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. +// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 +// words each round, in columnar or diagonal groups of 4 at a time. +func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { + a += b + d ^= a + d = bits.RotateLeft32(d, 16) + c += d + b ^= c + b = bits.RotateLeft32(b, 12) + a += b + d ^= a + d = bits.RotateLeft32(d, 8) + c += d + b ^= c + b = bits.RotateLeft32(b, 7) + return a, b, c, d +} + +// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will +// behave as if (64 * counter) bytes had been encrypted so far. +// +// To prevent accidental counter reuse, SetCounter panics if counter is less +// than the current value. +// +// Note that the execution time of XORKeyStream is not independent of the +// counter value. +func (s *Cipher) SetCounter(counter uint32) { + // Internally, s may buffer multiple blocks, which complicates this + // implementation slightly. When checking whether the counter has rolled + // back, we must use both s.counter and s.len to determine how many blocks + // we have already output. + outputCounter := s.counter - uint32(s.len)/blockSize + if s.overflow || counter < outputCounter { + panic("chacha20: SetCounter attempted to rollback counter") + } + + // In the general case, we set the new counter value and reset s.len to 0, + // causing the next call to XORKeyStream to refill the buffer. However, if + // we're advancing within the existing buffer, we can save work by simply + // setting s.len. + if counter < s.counter { + s.len = int(s.counter-counter) * blockSize + } else { + s.counter = counter + s.len = 0 + } +} + +// XORKeyStream XORs each byte in the given slice with a byte from the +// cipher's key stream. Dst and src must overlap entirely or not at all. +// +// If len(dst) < len(src), XORKeyStream will panic. It is acceptable +// to pass a dst bigger than src, and in that case, XORKeyStream will +// only update dst[:len(src)] and will not touch the rest of dst. +// +// Multiple calls to XORKeyStream behave as if the concatenation of +// the src buffers was passed in a single run. That is, Cipher +// maintains state and does not reset at each XORKeyStream call. +func (s *Cipher) XORKeyStream(dst, src []byte) { + if len(src) == 0 { + return + } + if len(dst) < len(src) { + panic("chacha20: output smaller than input") + } + dst = dst[:len(src)] + if subtle.InexactOverlap(dst, src) { + panic("chacha20: invalid buffer overlap") + } + + // First, drain any remaining key stream from a previous XORKeyStream. + if s.len != 0 { + keyStream := s.buf[bufSize-s.len:] + if len(src) < len(keyStream) { + keyStream = keyStream[:len(src)] + } + _ = src[len(keyStream)-1] // bounds check elimination hint + for i, b := range keyStream { + dst[i] = src[i] ^ b + } + s.len -= len(keyStream) + dst, src = dst[len(keyStream):], src[len(keyStream):] + } + if len(src) == 0 { + return + } + + // If we'd need to let the counter overflow and keep generating output, + // panic immediately. If instead we'd only reach the last block, remember + // not to generate any more output after the buffer is drained. + numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize + if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { + panic("chacha20: counter overflow") + } else if uint64(s.counter)+numBlocks == 1<<32 { + s.overflow = true + } + + // xorKeyStreamBlocks implementations expect input lengths that are a + // multiple of bufSize. Platform-specific ones process multiple blocks at a + // time, so have bufSizes that are a multiple of blockSize. + + full := len(src) - len(src)%bufSize + if full > 0 { + s.xorKeyStreamBlocks(dst[:full], src[:full]) + } + dst, src = dst[full:], src[full:] + + // If using a multi-block xorKeyStreamBlocks would overflow, use the generic + // one that does one block at a time. + const blocksPerBuf = bufSize / blockSize + if uint64(s.counter)+blocksPerBuf > 1<<32 { + s.buf = [bufSize]byte{} + numBlocks := (len(src) + blockSize - 1) / blockSize + buf := s.buf[bufSize-numBlocks*blockSize:] + copy(buf, src) + s.xorKeyStreamBlocksGeneric(buf, buf) + s.len = len(buf) - copy(dst, buf) + return + } + + // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and + // keep the leftover keystream for the next XORKeyStream invocation. + if len(src) > 0 { + s.buf = [bufSize]byte{} + copy(s.buf[:], src) + s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) + s.len = bufSize - copy(dst, s.buf[:]) + } +} + +func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { + if len(dst) != len(src) || len(dst)%blockSize != 0 { + panic("chacha20: internal error: wrong dst and/or src length") + } + + // To generate each block of key stream, the initial cipher state + // (represented below) is passed through 20 rounds of shuffling, + // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) + // or by diagonals (like 1, 6, 11, 12). + // + // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc + // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk + // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk + // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn + // + // c=constant k=key b=blockcount n=nonce + var ( + c0, c1, c2, c3 = j0, j1, j2, j3 + c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] + c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] + _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] + ) + + // Three quarters of the first round don't depend on the counter, so we can + // calculate them here, and reuse them for multiple blocks in the loop, and + // for future XORKeyStream invocations. + if !s.precompDone { + s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) + s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) + s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) + s.precompDone = true + } + + // A condition of len(src) > 0 would be sufficient, but this also + // acts as a bounds check elimination hint. + for len(src) >= 64 && len(dst) >= 64 { + // The remainder of the first column round. + fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) + + // The second diagonal round. + x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) + x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) + x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) + x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) + + // The remaining 18 rounds. + for i := 0; i < 9; i++ { + // Column round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Diagonal round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + // Add back the initial state to generate the key stream, then + // XOR the key stream with the source and write out the result. + addXor(dst[0:4], src[0:4], x0, c0) + addXor(dst[4:8], src[4:8], x1, c1) + addXor(dst[8:12], src[8:12], x2, c2) + addXor(dst[12:16], src[12:16], x3, c3) + addXor(dst[16:20], src[16:20], x4, c4) + addXor(dst[20:24], src[20:24], x5, c5) + addXor(dst[24:28], src[24:28], x6, c6) + addXor(dst[28:32], src[28:32], x7, c7) + addXor(dst[32:36], src[32:36], x8, c8) + addXor(dst[36:40], src[36:40], x9, c9) + addXor(dst[40:44], src[40:44], x10, c10) + addXor(dst[44:48], src[44:48], x11, c11) + addXor(dst[48:52], src[48:52], x12, s.counter) + addXor(dst[52:56], src[52:56], x13, c13) + addXor(dst[56:60], src[56:60], x14, c14) + addXor(dst[60:64], src[60:64], x15, c15) + + s.counter += 1 + + src, dst = src[blockSize:], dst[blockSize:] + } +} + +// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes +// key and a 16 bytes nonce. It returns an error if key or nonce have any other +// length. It is used as part of the XChaCha20 construction. +func HChaCha20(key, nonce []byte) ([]byte, error) { + // This function is split into a wrapper so that the slice allocation will + // be inlined, and depending on how the caller uses the return value, won't + // escape to the heap. + out := make([]byte, 32) + return hChaCha20(out, key, nonce) +} + +func hChaCha20(out, key, nonce []byte) ([]byte, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20: wrong HChaCha20 key size") + } + if len(nonce) != 16 { + return nil, errors.New("chacha20: wrong HChaCha20 nonce size") + } + + x0, x1, x2, x3 := j0, j1, j2, j3 + x4 := binary.LittleEndian.Uint32(key[0:4]) + x5 := binary.LittleEndian.Uint32(key[4:8]) + x6 := binary.LittleEndian.Uint32(key[8:12]) + x7 := binary.LittleEndian.Uint32(key[12:16]) + x8 := binary.LittleEndian.Uint32(key[16:20]) + x9 := binary.LittleEndian.Uint32(key[20:24]) + x10 := binary.LittleEndian.Uint32(key[24:28]) + x11 := binary.LittleEndian.Uint32(key[28:32]) + x12 := binary.LittleEndian.Uint32(nonce[0:4]) + x13 := binary.LittleEndian.Uint32(nonce[4:8]) + x14 := binary.LittleEndian.Uint32(nonce[8:12]) + x15 := binary.LittleEndian.Uint32(nonce[12:16]) + + for i := 0; i < 10; i++ { + // Diagonal round. + x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) + x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) + x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) + x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) + + // Column round. + x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) + x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) + x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) + x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) + } + + _ = out[31] // bounds check elimination hint + binary.LittleEndian.PutUint32(out[0:4], x0) + binary.LittleEndian.PutUint32(out[4:8], x1) + binary.LittleEndian.PutUint32(out[8:12], x2) + binary.LittleEndian.PutUint32(out[12:16], x3) + binary.LittleEndian.PutUint32(out[16:20], x12) + binary.LittleEndian.PutUint32(out[20:24], x13) + binary.LittleEndian.PutUint32(out[24:28], x14) + binary.LittleEndian.PutUint32(out[28:32], x15) + return out, nil +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go similarity index 50% rename from vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go rename to vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index fc2682528..4635307b8 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,15 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo appengine +// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo purego package chacha20 -const ( - bufSize = 64 - haveAsm = false -) +const bufSize = blockSize -func (*Cipher) xorKeyStreamAsm(dst, src []byte) { - panic("not implemented") +func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { + s.xorKeyStreamBlocksGeneric(dst, src) } diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go new file mode 100644 index 000000000..b79933034 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package chacha20 + +const bufSize = 256 + +//go:noescape +func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s similarity index 95% rename from vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s rename to vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s index 544185229..23c602164 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/asm_ppc64le.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s @@ -19,7 +19,7 @@ // The differences in this and the original implementation are // due to the calling conventions and initialization of constants. -// +build ppc64le,!gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" @@ -31,24 +31,7 @@ #define TMP R15 #define CONSTBASE R16 - -#define X0 R11 -#define X1 R12 -#define X2 R14 -#define X3 R15 -#define X4 R16 -#define X5 R17 -#define X6 R18 -#define X7 R19 -#define X8 R20 -#define X9 R21 -#define X10 R22 -#define X11 R23 -#define X12 R24 -#define X13 R25 -#define X14 R26 -#define X15 R27 - +#define BLOCKS R17 DATA consts<>+0x00(SB)/8, $0x3320646e61707865 DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 @@ -72,13 +55,13 @@ DATA consts<>+0x90(SB)/8, $0x0000000100000000 DATA consts<>+0x98(SB)/8, $0x0000000300000002 GLOBL consts<>(SB), RODATA, $0xa0 -//func chaCha20_ctr32_vsx(out, inp []byte, len int, key *[32]byte, counter *[16]byte) +//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD out+0(FP), OUT MOVD inp+8(FP), INP MOVD len+16(FP), LEN MOVD key+24(FP), KEY - MOVD cnt+32(FP), CNT + MOVD counter+32(FP), CNT // Addressing for constants MOVD $consts<>+0x00(SB), CONSTBASE @@ -86,6 +69,7 @@ TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 MOVD $32, R9 MOVD $48, R10 MOVD $64, R11 + SRD $6, LEN, BLOCKS // V16 LXVW4X (CONSTBASE)(R0), VS48 ADD $80,CONSTBASE @@ -429,9 +413,9 @@ loop_vsx: BNE loop_outer_vsx done_vsx: - // Increment counter by 4 + // Increment counter by number of 64 byte blocks MOVD (CNT), R14 - ADD $4, R14 + ADD BLOCKS, R14 MOVD R14, (CNT) RET diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go new file mode 100644 index 000000000..a9244bdf4 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo,!purego + +package chacha20 + +import "golang.org/x/sys/cpu" + +var haveAsm = cpu.S390X.HasVX + +const bufSize = 256 + +// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only +// be called when the vector facility is available. Implementation in asm_s390x.s. +//go:noescape +func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) + +func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { + if cpu.S390X.HasVX { + xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) + } else { + c.xorKeyStreamBlocksGeneric(dst, src) + } +} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s similarity index 87% rename from vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s rename to vendor/golang.org/x/crypto/chacha20/chacha_s390x.s index 57df40446..89c658c41 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,!gccgo,!appengine +// +build !gccgo,!purego #include "go_asm.h" #include "textflag.h" @@ -24,15 +24,6 @@ DATA ·constants<>+0x14(SB)/4, $0x3320646e DATA ·constants<>+0x18(SB)/4, $0x79622d32 DATA ·constants<>+0x1c(SB)/4, $0x6b206574 -// EXRL targets: -TEXT ·mvcSrcToBuf(SB), NOFRAME|NOSPLIT, $0 - MVC $1, (R1), (R8) - RET - -TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0 - MVC $1, (R8), (R9) - RET - #define BSWAP V5 #define J0 V6 #define KEY0 V7 @@ -144,7 +135,7 @@ TEXT ·mvcBufToDst(SB), NOFRAME|NOSPLIT, $0 VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) +// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 MOVD $·constants<>(SB), R1 MOVD dst+0(FP), R2 // R2=&dst[0] @@ -152,25 +143,10 @@ TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 MOVD key+48(FP), R5 // R5=key MOVD nonce+56(FP), R6 // R6=nonce MOVD counter+64(FP), R7 // R7=counter - MOVD buf+72(FP), R8 // R8=buf - MOVD len+80(FP), R9 // R9=len // load BSWAP and J0 VLM (R1), BSWAP, J0 - // set up tail buffer - ADD $-1, R4, R12 - MOVBZ R12, R12 - CMPUBEQ R12, $255, aligned - MOVD R4, R1 - AND $~255, R1 - MOVD $(R3)(R1*1), R1 - EXRL $·mvcSrcToBuf(SB), R12 - MOVD $255, R0 - SUB R12, R0 - MOVD R0, (R9) // update len - -aligned: // setup MOVD $95, R0 VLM (R5), KEY0, KEY1 @@ -217,9 +193,7 @@ loop: // decrement length ADD $-256, R4 - BLT tail -continue: // rearrange vectors SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) ADDV(J0, X0, X1, X2, X3) @@ -245,16 +219,6 @@ continue: MOVD $256(R3), R3 CMPBNE R4, $0, chacha - CMPUBEQ R12, $255, return - EXRL $·mvcBufToDst(SB), R12 // len was updated during setup -return: VSTEF $0, CTR, (R7) RET - -tail: - MOVD R2, R9 - MOVD R8, R2 - MOVD R8, R3 - MOVD $0, R4 - JMP continue diff --git a/vendor/golang.org/x/crypto/internal/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go similarity index 73% rename from vendor/golang.org/x/crypto/internal/chacha20/xor.go rename to vendor/golang.org/x/crypto/chacha20/xor.go index 9c5ba0b33..c2d04851e 100644 --- a/vendor/golang.org/x/crypto/internal/chacha20/xor.go +++ b/vendor/golang.org/x/crypto/chacha20/xor.go @@ -4,9 +4,7 @@ package chacha20 -import ( - "runtime" -) +import "runtime" // Platforms that have fast unaligned 32-bit little endian accesses. const unaligned = runtime.GOARCH == "386" || @@ -15,10 +13,10 @@ const unaligned = runtime.GOARCH == "386" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x" -// xor reads a little endian uint32 from src, XORs it with u and +// addXor reads a little endian uint32 from src, XORs it with (a + b) and // places the result in little endian byte order in dst. -func xor(dst, src []byte, u uint32) { - _, _ = src[3], dst[3] // eliminate bounds checks +func addXor(dst, src []byte, a, b uint32) { + _, _ = src[3], dst[3] // bounds check elimination hint if unaligned { // The compiler should optimize this code into // 32-bit unaligned little endian loads and stores. @@ -29,15 +27,16 @@ func xor(dst, src []byte, u uint32) { v |= uint32(src[1]) << 8 v |= uint32(src[2]) << 16 v |= uint32(src[3]) << 24 - v ^= u + v ^= a + b dst[0] = byte(v) dst[1] = byte(v >> 8) dst[2] = byte(v >> 16) dst[3] = byte(v >> 24) } else { - dst[0] = src[0] ^ byte(u) - dst[1] = src[1] ^ byte(u>>8) - dst[2] = src[2] ^ byte(u>>16) - dst[3] = src[3] ^ byte(u>>24) + a += b + dst[0] = src[0] ^ byte(a) + dst[1] = src[1] ^ byte(a>>8) + dst[2] = src[2] ^ byte(a>>16) + dst[3] = src[3] ^ byte(a>>24) } } diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h deleted file mode 100644 index b3f74162f..000000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.h +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s deleted file mode 100644 index ee7b4bd5f..000000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// These constants cannot be encoded in non-MOVQ immediates. -// We access them directly from memory instead. - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s deleted file mode 100644 index cd793a5b5..000000000 --- a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -// func cswap(inout *[4][5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - SUBQ $1, SI - NOTQ SI - MOVQ SI, X15 - PSHUFD $0x44, X15, X15 - - MOVOU 0(DI), X0 - MOVOU 16(DI), X2 - MOVOU 32(DI), X4 - MOVOU 48(DI), X6 - MOVOU 64(DI), X8 - MOVOU 80(DI), X1 - MOVOU 96(DI), X3 - MOVOU 112(DI), X5 - MOVOU 128(DI), X7 - MOVOU 144(DI), X9 - - MOVO X1, X10 - MOVO X3, X11 - MOVO X5, X12 - MOVO X7, X13 - MOVO X9, X14 - - PXOR X0, X10 - PXOR X2, X11 - PXOR X4, X12 - PXOR X6, X13 - PXOR X8, X14 - PAND X15, X10 - PAND X15, X11 - PAND X15, X12 - PAND X15, X13 - PAND X15, X14 - PXOR X10, X0 - PXOR X10, X1 - PXOR X11, X2 - PXOR X11, X3 - PXOR X12, X4 - PXOR X12, X5 - PXOR X13, X6 - PXOR X13, X7 - PXOR X14, X8 - PXOR X14, X9 - - MOVOU X0, 0(DI) - MOVOU X2, 16(DI) - MOVOU X4, 32(DI) - MOVOU X6, 48(DI) - MOVOU X8, 64(DI) - MOVOU X1, 80(DI) - MOVOU X3, 96(DI) - MOVOU X5, 112(DI) - MOVOU X7, 128(DI) - MOVOU X9, 144(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 75f24babb..4b9a655d1 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -1,834 +1,95 @@ -// Copyright 2013 The Go Authors. All rights reserved. +// Copyright 2019 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// We have an implementation in amd64 assembly so this code is only run on -// non-amd64 platforms. The amd64 assembly does not support gccgo. -// +build !amd64 gccgo appengine - -package curve25519 +// Package curve25519 provides an implementation of the X25519 function, which +// performs scalar multiplication on the elliptic curve known as Curve25519. +// See RFC 7748. +package curve25519 // import "golang.org/x/crypto/curve25519" import ( - "encoding/binary" + "crypto/subtle" + "fmt" ) -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - b = -b - for i := range f { - t := b & (f[i] ^ g[i]) - f[i] ^= t - g[i] ^= t - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - return int64(binary.LittleEndian.Uint32(in)) -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 0x7fffff) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) +// Deprecated: when provided a low-order point, ScalarMult will set dst to all +// zeroes, irrespective of the scalar. Instead, use the X25519 function, which +// will return an error. +func ScalarMult(dst, scalar, point *[32]byte) { + scalarMult(dst, scalar, point) } -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. +// ScalarBaseMult sets dst to the product scalar * base where base is the +// standard generator. // -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) +// It is recommended to use the X25519 function with Basepoint instead, as +// copying into fixed size arrays can lead to unexpected bugs. +func ScalarBaseMult(dst, scalar *[32]byte) { + ScalarMult(dst, scalar, &basePoint) } -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 +const ( + // ScalarSize is the size of the scalar input to X25519. + ScalarSize = 32 + // PointSize is the size of the point input to X25519. + PointSize = 32 +) - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 +// Basepoint is the canonical Curve25519 generator. +var Basepoint []byte - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 +func init() { Basepoint = basePoint[:] } - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) +func checkBasepoint() { + if subtle.ConstantTimeCompare(Basepoint, []byte{ + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }) != 1 { + panic("curve25519: global Basepoint value was modified") + } } -// feMul121666 calculates h = f * 121666. Can overlap h with f. +// X25519 returns the result of the scalar multiplication (scalar * point), +// according to RFC 7748, Section 5. scalar, point and the return value are +// slices of 32 bytes. // -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// scalar can be generated at random, for example with crypto/rand. point should +// be either Basepoint or the output of another X25519 call. // -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) +// If point is Basepoint (but not if it's a different slice with the same +// contents) a precomputed implementation might be used for performance. +func X25519(scalar, point []byte) ([]byte, error) { + // Outline the body of function, to let the allocation be inlined in the + // caller, and possibly avoid escaping to the heap. + var dst [32]byte + return x25519(&dst, scalar, point) } -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) +func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { + var in [32]byte + if l := len(scalar); l != 32 { + return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) + } + if l := len(point); l != 32 { + return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) + } + copy(in[:], scalar) + if &point[0] == &Basepoint[0] { + checkBasepoint() + ScalarBaseMult(dst, &in) + } else { + var base, zero [32]byte + copy(base[:], point) + ScalarMult(dst, &in, &base) + if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { + return nil, fmt.Errorf("bad input point: low order point") + } + } + return dst[:], nil } diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go similarity index 99% rename from vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go rename to vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go index 5822bd533..5120b779b 100644 --- a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build amd64,!gccgo,!appengine,!purego package curve25519 diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s similarity index 76% rename from vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s rename to vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s index e0ac30c70..0250c8885 100644 --- a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s @@ -5,9 +5,84 @@ // This code was translated into a form compatible with 6a from the public // domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html -// +build amd64,!gccgo,!appengine +// +build amd64,!gccgo,!appengine,!purego -#include "const_amd64.h" +#define REDMASK51 0x0007FFFFFFFFFFFF + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET // func ladderstep(inout *[5][5]uint64) TEXT ·ladderstep(SB),0,$296-8 @@ -1375,3 +1450,344 @@ TEXT ·ladderstep(SB),0,$296-8 MOVQ AX,104(DI) MOVQ R10,112(DI) RET + +// func cswap(inout *[4][5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + SUBQ $1, SI + NOTQ SI + MOVQ SI, X15 + PSHUFD $0x44, X15, X15 + + MOVOU 0(DI), X0 + MOVOU 16(DI), X2 + MOVOU 32(DI), X4 + MOVOU 48(DI), X6 + MOVOU 64(DI), X8 + MOVOU 80(DI), X1 + MOVOU 96(DI), X3 + MOVOU 112(DI), X5 + MOVOU 128(DI), X7 + MOVOU 144(DI), X9 + + MOVO X1, X10 + MOVO X3, X11 + MOVO X5, X12 + MOVO X7, X13 + MOVO X9, X14 + + PXOR X0, X10 + PXOR X2, X11 + PXOR X4, X12 + PXOR X6, X13 + PXOR X8, X14 + PAND X15, X10 + PAND X15, X11 + PAND X15, X12 + PAND X15, X13 + PAND X15, X14 + PXOR X10, X0 + PXOR X10, X1 + PXOR X11, X2 + PXOR X11, X3 + PXOR X12, X4 + PXOR X12, X5 + PXOR X13, X6 + PXOR X13, X7 + PXOR X14, X8 + PXOR X14, X9 + + MOVOU X0, 0(DI) + MOVOU X2, 16(DI) + MOVOU X4, 32(DI) + MOVOU X6, 48(DI) + MOVOU X8, 64(DI) + MOVOU X1, 80(DI) + MOVOU X3, 96(DI) + MOVOU X5, 112(DI) + MOVOU X7, 128(DI) + MOVOU X9, 144(DI) + RET + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R8,R9 + ANDQ SI,R8 + SHLQ $13,R10,R11 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R12,R13 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R14,R15 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BX,BP + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,CX,R8 + ANDQ SI,CX + SHLQ $13,R9,R10 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R11,R12 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R13,R14 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,R15,BX + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go b/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go new file mode 100644 index 000000000..c43b13fc8 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go @@ -0,0 +1,828 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package curve25519 + +import "encoding/binary" + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + b = -b + for i := range f { + t := b & (f[i] ^ g[i]) + f[i] ^= t + g[i] ^= t + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + return int64(binary.LittleEndian.Uint32(in)) +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 0x7fffff) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMultGeneric(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go new file mode 100644 index 000000000..047d49afc --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 gccgo appengine purego + +package curve25519 + +func scalarMult(out, in, base *[32]byte) { + scalarMultGeneric(out, in, base) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go deleted file mode 100644 index da9b10d9c..000000000 --- a/vendor/golang.org/x/crypto/curve25519/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of scalar multiplication on -// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html -package curve25519 // import "golang.org/x/crypto/curve25519" - -// basePoint is the x coordinate of the generator of the curve. -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -// ScalarMult sets dst to the product in*base where dst and base are the x -// coordinates of group points and all values are in little-endian form. -func ScalarMult(dst, in, base *[32]byte) { - scalarMult(dst, in, base) -} - -// ScalarBaseMult sets dst to the product in*base where dst and base are the x -// coordinates of group points, base is the standard generator and all values -// are in little-endian form. -func ScalarBaseMult(dst, in *[32]byte) { - ScalarMult(dst, in, &basePoint) -} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s deleted file mode 100644 index 390816106..000000000 --- a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$0-8 - MOVQ inout+0(FP), DI - - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ $REDMASK51,AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s deleted file mode 100644 index 1f76d1a3f..000000000 --- a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$16-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,0(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 0(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 0(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ $REDMASK51,SI - SHLQ $13,R8,R9 - ANDQ SI,R8 - SHLQ $13,R10,R11 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BX,BP - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s deleted file mode 100644 index 07511a45a..000000000 --- a/vendor/golang.org/x/crypto/curve25519/square_amd64.s +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -#include "const_amd64.h" - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$0-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ $REDMASK51,SI - SHLQ $13,CX,R8 - ANDQ SI,CX - SHLQ $13,R9,R10 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R11,R12 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R13,R14 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,R15,BX - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go deleted file mode 100644 index ad74e23ae..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_arm64.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11 -// +build !gccgo - -package chacha20 - -const ( - haveAsm = true - bufSize = 256 -) - -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { - - if len(src) >= bufSize { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) - } - - if len(src)%bufSize != 0 { - i := len(src) - len(src)%bufSize - c.buf = [bufSize]byte{} - copy(c.buf[:], src[i:]) - xorKeyStreamVX(c.buf[:], c.buf[:], &c.key, &c.nonce, &c.counter) - c.len = bufSize - copy(dst[i:], c.buf[:len(src)%bufSize]) - } -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go deleted file mode 100644 index 6570847f5..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_generic.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ChaCha20 implements the core ChaCha20 function as specified -// in https://tools.ietf.org/html/rfc7539#section-2.3. -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - - "golang.org/x/crypto/internal/subtle" -) - -// assert that *Cipher implements cipher.Stream -var _ cipher.Stream = (*Cipher)(nil) - -// Cipher is a stateful instance of ChaCha20 using a particular key -// and nonce. A *Cipher implements the cipher.Stream interface. -type Cipher struct { - key [8]uint32 - counter uint32 // incremented after each block - nonce [3]uint32 - buf [bufSize]byte // buffer for unused keystream bytes - len int // number of unused keystream bytes at end of buf -} - -// New creates a new ChaCha20 stream cipher with the given key and nonce. -// The initial counter value is set to 0. -func New(key [8]uint32, nonce [3]uint32) *Cipher { - return &Cipher{key: key, nonce: nonce} -} - -// ChaCha20 constants spelling "expand 32-byte k" -const ( - j0 uint32 = 0x61707865 - j1 uint32 = 0x3320646e - j2 uint32 = 0x79622d32 - j3 uint32 = 0x6b206574 -) - -func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { - a += b - d ^= a - d = (d << 16) | (d >> 16) - c += d - b ^= c - b = (b << 12) | (b >> 20) - a += b - d ^= a - d = (d << 8) | (d >> 24) - c += d - b ^= c - b = (b << 7) | (b >> 25) - return a, b, c, d -} - -// XORKeyStream XORs each byte in the given slice with a byte from the -// cipher's key stream. Dst and src must overlap entirely or not at all. -// -// If len(dst) < len(src), XORKeyStream will panic. It is acceptable -// to pass a dst bigger than src, and in that case, XORKeyStream will -// only update dst[:len(src)] and will not touch the rest of dst. -// -// Multiple calls to XORKeyStream behave as if the concatenation of -// the src buffers was passed in a single run. That is, Cipher -// maintains state and does not reset at each XORKeyStream call. -func (s *Cipher) XORKeyStream(dst, src []byte) { - if len(dst) < len(src) { - panic("chacha20: output smaller than input") - } - if subtle.InexactOverlap(dst[:len(src)], src) { - panic("chacha20: invalid buffer overlap") - } - - // xor src with buffered keystream first - if s.len != 0 { - buf := s.buf[len(s.buf)-s.len:] - if len(src) < len(buf) { - buf = buf[:len(src)] - } - td, ts := dst[:len(buf)], src[:len(buf)] // BCE hint - for i, b := range buf { - td[i] = ts[i] ^ b - } - s.len -= len(buf) - if s.len != 0 { - return - } - s.buf = [len(s.buf)]byte{} // zero the empty buffer - src = src[len(buf):] - dst = dst[len(buf):] - } - - if len(src) == 0 { - return - } - if haveAsm { - if uint64(len(src))+uint64(s.counter)*64 > (1<<38)-64 { - panic("chacha20: counter overflow") - } - s.xorKeyStreamAsm(dst, src) - return - } - - // set up a 64-byte buffer to pad out the final block if needed - // (hoisted out of the main loop to avoid spills) - rem := len(src) % 64 // length of final block - fin := len(src) - rem // index of final block - if rem > 0 { - copy(s.buf[len(s.buf)-64:], src[fin:]) - } - - // pre-calculate most of the first round - s1, s5, s9, s13 := quarterRound(j1, s.key[1], s.key[5], s.nonce[0]) - s2, s6, s10, s14 := quarterRound(j2, s.key[2], s.key[6], s.nonce[1]) - s3, s7, s11, s15 := quarterRound(j3, s.key[3], s.key[7], s.nonce[2]) - - n := len(src) - src, dst = src[:n:n], dst[:n:n] // BCE hint - for i := 0; i < n; i += 64 { - // calculate the remainder of the first round - s0, s4, s8, s12 := quarterRound(j0, s.key[0], s.key[4], s.counter) - - // execute the second round - x0, x5, x10, x15 := quarterRound(s0, s5, s10, s15) - x1, x6, x11, x12 := quarterRound(s1, s6, s11, s12) - x2, x7, x8, x13 := quarterRound(s2, s7, s8, s13) - x3, x4, x9, x14 := quarterRound(s3, s4, s9, s14) - - // execute the remaining 18 rounds - for i := 0; i < 9; i++ { - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - x0 += j0 - x1 += j1 - x2 += j2 - x3 += j3 - - x4 += s.key[0] - x5 += s.key[1] - x6 += s.key[2] - x7 += s.key[3] - x8 += s.key[4] - x9 += s.key[5] - x10 += s.key[6] - x11 += s.key[7] - - x12 += s.counter - x13 += s.nonce[0] - x14 += s.nonce[1] - x15 += s.nonce[2] - - // increment the counter - s.counter += 1 - if s.counter == 0 { - panic("chacha20: counter overflow") - } - - // pad to 64 bytes if needed - in, out := src[i:], dst[i:] - if i == fin { - // src[fin:] has already been copied into s.buf before - // the main loop - in, out = s.buf[len(s.buf)-64:], s.buf[len(s.buf)-64:] - } - in, out = in[:64], out[:64] // BCE hint - - // XOR the key stream with the source and write out the result - xor(out[0:], in[0:], x0) - xor(out[4:], in[4:], x1) - xor(out[8:], in[8:], x2) - xor(out[12:], in[12:], x3) - xor(out[16:], in[16:], x4) - xor(out[20:], in[20:], x5) - xor(out[24:], in[24:], x6) - xor(out[28:], in[28:], x7) - xor(out[32:], in[32:], x8) - xor(out[36:], in[36:], x9) - xor(out[40:], in[40:], x10) - xor(out[44:], in[44:], x11) - xor(out[48:], in[48:], x12) - xor(out[52:], in[52:], x13) - xor(out[56:], in[56:], x14) - xor(out[60:], in[60:], x15) - } - // copy any trailing bytes out of the buffer and into dst - if rem != 0 { - s.len = 64 - rem - copy(dst[fin:], s.buf[len(s.buf)-64:]) - } -} - -// Advance discards bytes in the key stream until the next 64 byte block -// boundary is reached and updates the counter accordingly. If the key -// stream is already at a block boundary no bytes will be discarded and -// the counter will be unchanged. -func (s *Cipher) Advance() { - s.len -= s.len % 64 - if s.len == 0 { - s.buf = [len(s.buf)]byte{} - } -} - -// XORKeyStream crypts bytes from in to out using the given key and counters. -// In and out must overlap entirely or not at all. Counter contains the raw -// ChaCha20 counter bytes (i.e. block counter followed by nonce). -func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { - s := Cipher{ - key: [8]uint32{ - binary.LittleEndian.Uint32(key[0:4]), - binary.LittleEndian.Uint32(key[4:8]), - binary.LittleEndian.Uint32(key[8:12]), - binary.LittleEndian.Uint32(key[12:16]), - binary.LittleEndian.Uint32(key[16:20]), - binary.LittleEndian.Uint32(key[20:24]), - binary.LittleEndian.Uint32(key[24:28]), - binary.LittleEndian.Uint32(key[28:32]), - }, - nonce: [3]uint32{ - binary.LittleEndian.Uint32(counter[4:8]), - binary.LittleEndian.Uint32(counter[8:12]), - binary.LittleEndian.Uint32(counter[12:16]), - }, - counter: binary.LittleEndian.Uint32(counter[0:4]), - } - s.XORKeyStream(out, in) -} - -// HChaCha20 uses the ChaCha20 core to generate a derived key from a key and a -// nonce. It should only be used as part of the XChaCha20 construction. -func HChaCha20(key *[8]uint32, nonce *[4]uint32) [8]uint32 { - x0, x1, x2, x3 := j0, j1, j2, j3 - x4, x5, x6, x7 := key[0], key[1], key[2], key[3] - x8, x9, x10, x11 := key[4], key[5], key[6], key[7] - x12, x13, x14, x15 := nonce[0], nonce[1], nonce[2], nonce[3] - - for i := 0; i < 10; i++ { - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - var out [8]uint32 - out[0], out[1], out[2], out[3] = x0, x1, x2, x3 - out[4], out[5], out[6], out[7] = x12, x13, x14, x15 - return out -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go deleted file mode 100644 index d38a7d380..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_ppc64le.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ppc64le,!gccgo,!appengine - -package chacha20 - -import ( - "encoding/binary" -) - -var haveAsm = true - -const bufSize = 256 - -//go:noescape -func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { - // This implementation can handle buffers that aren't multiples of - // 256. - if len(src) >= bufSize { - chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) - } else if len(src)%bufSize != 0 { - chaCha20_ctr32_vsx(&c.buf[0], &c.buf[0], bufSize, &c.key, &c.counter) - start := len(src) - len(src)%bufSize - ts, td, tb := src[start:], dst[start:], c.buf[:] - // Unroll loop to XOR 32 bytes per iteration. - for i := 0; i < len(ts)-32; i += 32 { - td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination - s0 := binary.LittleEndian.Uint64(ts[0:8]) - s1 := binary.LittleEndian.Uint64(ts[8:16]) - s2 := binary.LittleEndian.Uint64(ts[16:24]) - s3 := binary.LittleEndian.Uint64(ts[24:32]) - b0 := binary.LittleEndian.Uint64(tb[0:8]) - b1 := binary.LittleEndian.Uint64(tb[8:16]) - b2 := binary.LittleEndian.Uint64(tb[16:24]) - b3 := binary.LittleEndian.Uint64(tb[24:32]) - binary.LittleEndian.PutUint64(td[0:8], s0^b0) - binary.LittleEndian.PutUint64(td[8:16], s1^b1) - binary.LittleEndian.PutUint64(td[16:24], s2^b2) - binary.LittleEndian.PutUint64(td[24:32], s3^b3) - ts, td, tb = ts[32:], td[32:], tb[32:] - } - td, tb = td[:len(ts)], tb[:len(ts)] // bounds check elimination - for i, v := range ts { - td[i] = tb[i] ^ v - } - c.len = bufSize - (len(src) % bufSize) - } - -} diff --git a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go deleted file mode 100644 index aad645b44..000000000 --- a/vendor/golang.org/x/crypto/internal/chacha20/chacha_s390x.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,!gccgo,!appengine - -package chacha20 - -import ( - "golang.org/x/sys/cpu" -) - -var haveAsm = cpu.S390X.HasVX - -const bufSize = 256 - -// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only -// be called when the vector facility is available. -// Implementation in asm_s390x.s. -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32, buf *[256]byte, len *int) - -func (c *Cipher) xorKeyStreamAsm(dst, src []byte) { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter, &c.buf, &c.len) -} - -// EXRL targets, DO NOT CALL! -func mvcSrcToBuf() -func mvcBufToDst() diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go index ab86e8c07..36a680436 100644 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ b/vendor/golang.org/x/crypto/openpgp/armor/armor.go @@ -10,9 +10,8 @@ import ( "bufio" "bytes" "encoding/base64" - "io" - "golang.org/x/crypto/openpgp/errors" + "io" ) // A Block represents an OpenPGP armored structure. @@ -63,10 +62,11 @@ var armorEndOfLine = []byte("-----") // lineReader wraps a line based reader. It watches for the end of an armor // block and records the expected CRC value. type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool } func (l *lineReader) Read(p []byte) (n int, err error) { @@ -88,6 +88,11 @@ func (l *lineReader) Read(p []byte) (n int, err error) { return 0, ArmorCorrupt } + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + if len(line) == 5 && line[0] == '=' { // This is the checksum line var expectedBytes [3]byte @@ -109,6 +114,7 @@ func (l *lineReader) Read(p []byte) (n int, err error) { } l.eof = true + l.crcSet = true return 0, io.EOF } @@ -142,10 +148,8 @@ func (r *openpgpReader) Read(p []byte) (n int, err error) { n, err = r.b64Reader.Read(p) r.currentCRC = crc24(r.currentCRC, p[:n]) - if err == io.EOF { - if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt } return diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go index 73f4fe378..72a6a7394 100644 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go @@ -76,7 +76,9 @@ func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err // Bleichenbacher, Advances in Cryptology (Crypto '98), func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { s := new(big.Int).Exp(c1, priv.X, priv.P) - s.ModInverse(s, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } s.Mul(s, c2) s.Mod(s, priv.P) em := s.Bytes() diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go index 6a719dfce..e8f0b5caa 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go @@ -8,10 +8,9 @@ import ( "compress/bzip2" "compress/flate" "compress/zlib" + "golang.org/x/crypto/openpgp/errors" "io" "strconv" - - "golang.org/x/crypto/openpgp/errors" ) // Compressed represents a compressed OpenPGP packet. The decompressed contents diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go index eb6a31bc6..171350339 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go @@ -7,11 +7,10 @@ package packet import ( "crypto" "encoding/binary" - "io" - "strconv" - "golang.org/x/crypto/openpgp/errors" "golang.org/x/crypto/openpgp/s2k" + "io" + "strconv" ) // OnePassSignature represents a one-pass signature packet. See RFC 4880, diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 5af64c542..9728d61d7 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -14,6 +14,7 @@ import ( "crypto/rsa" "io" "math/big" + "math/bits" "golang.org/x/crypto/cast5" "golang.org/x/crypto/openpgp/errors" @@ -100,33 +101,65 @@ func (r *partialLengthReader) Read(p []byte) (n int, err error) { type partialLengthWriter struct { w io.WriteCloser lengthByte [1]byte + sentFirst bool + buf []byte } +// RFC 4880 4.2.2.4: the first partial length MUST be at least 512 octets long. +const minFirstPartialWrite = 512 + func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + off := 0 + if !w.sentFirst { + if len(w.buf) > 0 || len(p) < minFirstPartialWrite { + off = len(w.buf) + w.buf = append(w.buf, p...) + if len(w.buf) < minFirstPartialWrite { + return len(p), nil + } + p = w.buf + w.buf = nil + } + w.sentFirst = true + } + + power := uint8(30) for len(p) > 0 { - for power := uint(14); power < 32; power-- { - l := 1 << power - if len(p) >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(p[:l]) - n += m - if err != nil { - return - } - p = p[l:] - break + l := 1 << power + if len(p) < l { + power = uint8(bits.Len32(uint32(len(p)))) - 1 + l = 1 << power + } + w.lengthByte[0] = 224 + power + _, err = w.w.Write(w.lengthByte[:]) + if err == nil { + var m int + m, err = w.w.Write(p[:l]) + n += m + } + if err != nil { + if n < off { + return 0, err } + return n - off, err } + p = p[l:] } - return + return n - off, nil } func (w *partialLengthWriter) Close() error { + if len(w.buf) > 0 { + // In this case we can't send a 512 byte packet. + // Just send what we have. + p := w.buf + w.sentFirst = true + w.buf = nil + if _, err := w.Write(p); err != nil { + return err + } + } + w.lengthByte[0] = 0 _, err := w.w.Write(w.lengthByte[:]) if err != nil { diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go index 2a4ac8500..34bc7c613 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/reader.go @@ -5,9 +5,8 @@ package packet import ( - "io" - "golang.org/x/crypto/openpgp/errors" + "io" ) // Reader reads packets from an io.Reader and allows packets to be 'unread' so diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go index 789a27dca..6126030eb 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go @@ -8,11 +8,10 @@ import ( "crypto/cipher" "crypto/sha1" "crypto/subtle" + "golang.org/x/crypto/openpgp/errors" "hash" "io" "strconv" - - "golang.org/x/crypto/openpgp/errors" ) // SymmetricallyEncrypted represents a symmetrically encrypted byte string. The diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go new file mode 100644 index 000000000..157a69f61 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_compat.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package poly1305 + +// Generic fallbacks for the math/bits intrinsics, copied from +// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had +// variable time fallbacks until Go 1.13. + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + sum = x + y + carry + carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 + return +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + diff = x - y - borrow + borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 + return +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + const mask32 = 1<<32 - 1 + x0 := x & mask32 + x1 := x >> 32 + y0 := y & mask32 + y1 := y >> 32 + w0 := x0 * y0 + t := x1*y0 + w0>>32 + w1 := t & mask32 + w2 := t >> 32 + w1 += x0 * y1 + hi = x1*y1 + w2 + w1>>32 + lo = x * y + return +} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go new file mode 100644 index 000000000..a0a185f0f --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package poly1305 + +import "math/bits" + +func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { + return bits.Add64(x, y, carry) +} + +func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { + return bits.Sub64(x, y, borrow) +} + +func bitsMul64(x, y uint64) (hi, lo uint64) { + return bits.Mul64(x, y) +} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go index a8dd589ae..d118f30ed 100644 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go @@ -2,10 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!ppc64le gccgo appengine +// +build !amd64,!ppc64le,!s390x gccgo purego package poly1305 type mac struct{ macGeneric } - -func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go index d076a5623..9d7a6af09 100644 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -22,8 +22,16 @@ import "crypto/subtle" // TagSize is the size, in bytes, of a poly1305 authenticator. const TagSize = 16 -// Verify returns true if mac is a valid authenticator for m with the given -// key. +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + h := New(key) + h.Write(m) + h.Sum(out[:0]) +} + +// Verify returns true if mac is a valid authenticator for m with the given key. func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { var tmp [16]byte Sum(&tmp, m, key) @@ -40,10 +48,9 @@ func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { // two different messages with the same key allows an attacker // to forge messages at will. func New(key *[32]byte) *MAC { - return &MAC{ - mac: newMAC(key), - finalized: false, - } + m := &MAC{} + initialize(key, &m.macState) + return m } // MAC is an io.Writer computing an authentication tag @@ -52,7 +59,7 @@ func New(key *[32]byte) *MAC { // MAC cannot be used like common hash.Hash implementations, // because using a poly1305 key twice breaks its security. // Therefore writing data to a running MAC after calling -// Sum causes it to panic. +// Sum or Verify causes it to panic. type MAC struct { mac // platform-dependent implementation @@ -65,10 +72,10 @@ func (h *MAC) Size() int { return TagSize } // Write adds more data to the running message authentication code. // It never returns an error. // -// It must not be called after the first call of Sum. +// It must not be called after the first call of Sum or Verify. func (h *MAC) Write(p []byte) (n int, err error) { if h.finalized { - panic("poly1305: write to MAC after Sum") + panic("poly1305: write to MAC after Sum or Verify") } return h.mac.Write(p) } @@ -81,3 +88,12 @@ func (h *MAC) Sum(b []byte) []byte { h.finalized = true return append(b, mac[:]...) } + +// Verify returns whether the authenticator of all data written to +// the message authentication code matches the expected value. +func (h *MAC) Verify(expected []byte) bool { + var mac [TagSize]byte + h.mac.Sum(&mac) + h.finalized = true + return subtle.ConstantTimeCompare(expected, mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go index 2dbf42aa5..99e5a1d50 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -2,67 +2,46 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build !gccgo,!purego package poly1305 //go:noescape -func initialize(state *[7]uint64, key *[32]byte) +func update(state *macState, msg []byte) -//go:noescape -func update(state *[7]uint64, msg []byte) - -//go:noescape -func finalize(tag *[TagSize]byte, state *[7]uint64) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := newMAC(key) - h.Write(m) - h.Sum(out) -} - -func newMAC(key *[32]byte) (h mac) { - initialize(&h.state, key) - return -} - -type mac struct { - state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } - - buffer [TagSize]byte - offset int -} +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } -func (h *mac) Write(p []byte) (n int, err error) { - n = len(p) +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) if h.offset > 0 { - remaining := TagSize - h.offset - if n < remaining { - h.offset += copy(h.buffer[h.offset:], p) - return n, nil + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil } - copy(h.buffer[h.offset:], p[:remaining]) - p = p[remaining:] + p = p[n:] h.offset = 0 - update(&h.state, h.buffer[:]) + update(&h.macState, h.buffer[:]) } - if nn := len(p) - (len(p) % TagSize); nn > 0 { - update(&h.state, p[:nn]) - p = p[nn:] + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] } if len(p) > 0 { h.offset += copy(h.buffer[h.offset:], p) } - return n, nil + return nn, nil } func (h *mac) Sum(out *[16]byte) { - state := h.state + state := h.macState if h.offset > 0 { update(&state, h.buffer[:h.offset]) } - finalize(out, &state) + finalize(out, &state.h, &state.s) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s index 7d600f13c..8d394a212 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" @@ -54,10 +54,6 @@ ADCQ t3, h1; \ ADCQ $0, h2 -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - // func update(state *[7]uint64, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI @@ -110,39 +106,3 @@ done: MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET - -// func initialize(state *[7]uint64, key *[32]byte) -TEXT ·initialize(SB), $0-16 - MOVQ state+0(FP), DI - MOVQ key+8(FP), SI - - // state[0...7] is initialized with zero - MOVOU 0(SI), X0 - MOVOU 16(SI), X1 - MOVOU ·poly1305Mask<>(SB), X2 - PAND X2, X0 - MOVOU X0, 24(DI) - MOVOU X1, 40(DI) - RET - -// func finalize(tag *[TagSize]byte, state *[7]uint64) -TEXT ·finalize(SB), $0-16 - MOVQ tag+0(FP), DI - MOVQ state+8(FP), SI - - MOVQ 0(SI), AX - MOVQ 8(SI), BX - MOVQ 16(SI), CX - MOVQ AX, R8 - MOVQ BX, R9 - SUBQ $0xFFFFFFFFFFFFFFFB, AX - SBBQ $0xFFFFFFFFFFFFFFFF, BX - SBBQ $3, CX - CMOVQCS R8, AX - CMOVQCS R9, BX - ADDQ 40(SI), AX - ADCQ 48(SI), BX - - MOVQ AX, 0(DI) - MOVQ BX, 8(DI) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go deleted file mode 100644 index 5dc321c2f..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_arm.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine,!nacl - -package poly1305 - -// This function is implemented in sum_arm.s -//go:noescape -func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] - } - poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s deleted file mode 100644 index f70b4ac48..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_arm.s +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build arm,!gccgo,!appengine,!nacl - -#include "textflag.h" - -// This code was translated into a form compatible with 5a from the public -// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305. - -DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff -DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03 -DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff -DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff -DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff -GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20 - -// Warning: the linker may use R11 to synthesize certain instructions. Please -// take care and verify that no synthetic instructions use it. - -TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0 - // Needs 16 bytes of stack and 64 bytes of space pointed to by R0. (It - // might look like it's only 60 bytes of space but the final four bytes - // will be written by another function.) We need to skip over four - // bytes of stack because that's saving the value of 'g'. - ADD $4, R13, R8 - MOVM.IB [R4-R7], (R8) - MOVM.IA.W (R1), [R2-R5] - MOVW $·poly1305_init_constants_armv6<>(SB), R7 - MOVW R2, R8 - MOVW R2>>26, R9 - MOVW R3>>20, g - MOVW R4>>14, R11 - MOVW R5>>8, R12 - ORR R3<<6, R9, R9 - ORR R4<<12, g, g - ORR R5<<18, R11, R11 - MOVM.IA (R7), [R2-R6] - AND R8, R2, R2 - AND R9, R3, R3 - AND g, R4, R4 - AND R11, R5, R5 - AND R12, R6, R6 - MOVM.IA.W [R2-R6], (R0) - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - MOVM.IA.W [R2-R6], (R0) - MOVM.IA.W (R1), [R2-R5] - MOVM.IA [R2-R6], (R0) - ADD $20, R13, R0 - MOVM.DA (R0), [R4-R7] - RET - -#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \ - MOVBU (offset+0)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+0)(Rdst); \ - MOVBU (offset+1)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+1)(Rdst); \ - MOVBU (offset+2)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+2)(Rdst); \ - MOVBU (offset+3)(Rsrc), Rtmp; \ - MOVBU Rtmp, (offset+3)(Rdst) - -TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0 - // Needs 24 bytes of stack for saved registers and then 88 bytes of - // scratch space after that. We assume that 24 bytes at (R13) have - // already been used: four bytes for the link register saved in the - // prelude of poly1305_auth_armv6, four bytes for saving the value of g - // in that function and 16 bytes of scratch space used around - // poly1305_finish_ext_armv6_skip1. - ADD $24, R13, R12 - MOVM.IB [R4-R8, R14], (R12) - MOVW R0, 88(R13) - MOVW R1, 92(R13) - MOVW R2, 96(R13) - MOVW R1, R14 - MOVW R2, R12 - MOVW 56(R0), R8 - WORD $0xe1180008 // TST R8, R8 not working see issue 5921 - EOR R6, R6, R6 - MOVW.EQ $(1<<24), R6 - MOVW R6, 84(R13) - ADD $116, R13, g - MOVM.IA (R0), [R0-R9] - MOVM.IA [R0-R4], (g) - CMP $16, R12 - BLO poly1305_blocks_armv6_done - -poly1305_blocks_armv6_mainloop: - WORD $0xe31e0003 // TST R14, #3 not working see issue 5921 - BEQ poly1305_blocks_armv6_mainloop_aligned - ADD $100, R13, g - MOVW_UNALIGNED(R14, g, R0, 0) - MOVW_UNALIGNED(R14, g, R0, 4) - MOVW_UNALIGNED(R14, g, R0, 8) - MOVW_UNALIGNED(R14, g, R0, 12) - MOVM.IA (g), [R0-R3] - ADD $16, R14 - B poly1305_blocks_armv6_mainloop_loaded - -poly1305_blocks_armv6_mainloop_aligned: - MOVM.IA.W (R14), [R0-R3] - -poly1305_blocks_armv6_mainloop_loaded: - MOVW R0>>26, g - MOVW R1>>20, R11 - MOVW R2>>14, R12 - MOVW R14, 92(R13) - MOVW R3>>8, R4 - ORR R1<<6, g, g - ORR R2<<12, R11, R11 - ORR R3<<18, R12, R12 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, g, g - MOVW 84(R13), R3 - BIC $0xfc000000, R11, R11 - BIC $0xfc000000, R12, R12 - ADD R0, R5, R5 - ADD g, R6, R6 - ORR R3, R4, R4 - ADD R11, R7, R7 - ADD $116, R13, R14 - ADD R12, R8, R8 - ADD R4, R9, R9 - MOVM.IA (R14), [R0-R4] - MULLU R4, R5, (R11, g) - MULLU R3, R5, (R14, R12) - MULALU R3, R6, (R11, g) - MULALU R2, R6, (R14, R12) - MULALU R2, R7, (R11, g) - MULALU R1, R7, (R14, R12) - ADD R4<<2, R4, R4 - ADD R3<<2, R3, R3 - MULALU R1, R8, (R11, g) - MULALU R0, R8, (R14, R12) - MULALU R0, R9, (R11, g) - MULALU R4, R9, (R14, R12) - MOVW g, 76(R13) - MOVW R11, 80(R13) - MOVW R12, 68(R13) - MOVW R14, 72(R13) - MULLU R2, R5, (R11, g) - MULLU R1, R5, (R14, R12) - MULALU R1, R6, (R11, g) - MULALU R0, R6, (R14, R12) - MULALU R0, R7, (R11, g) - MULALU R4, R7, (R14, R12) - ADD R2<<2, R2, R2 - ADD R1<<2, R1, R1 - MULALU R4, R8, (R11, g) - MULALU R3, R8, (R14, R12) - MULALU R3, R9, (R11, g) - MULALU R2, R9, (R14, R12) - MOVW g, 60(R13) - MOVW R11, 64(R13) - MOVW R12, 52(R13) - MOVW R14, 56(R13) - MULLU R0, R5, (R11, g) - MULALU R4, R6, (R11, g) - MULALU R3, R7, (R11, g) - MULALU R2, R8, (R11, g) - MULALU R1, R9, (R11, g) - ADD $52, R13, R0 - MOVM.IA (R0), [R0-R7] - MOVW g>>26, R12 - MOVW R4>>26, R14 - ORR R11<<6, R12, R12 - ORR R5<<6, R14, R14 - BIC $0xfc000000, g, g - BIC $0xfc000000, R4, R4 - ADD.S R12, R0, R0 - ADC $0, R1, R1 - ADD.S R14, R6, R6 - ADC $0, R7, R7 - MOVW R0>>26, R12 - MOVW R6>>26, R14 - ORR R1<<6, R12, R12 - ORR R7<<6, R14, R14 - BIC $0xfc000000, R0, R0 - BIC $0xfc000000, R6, R6 - ADD R14<<2, R14, R14 - ADD.S R12, R2, R2 - ADC $0, R3, R3 - ADD R14, g, g - MOVW R2>>26, R12 - MOVW g>>26, R14 - ORR R3<<6, R12, R12 - BIC $0xfc000000, g, R5 - BIC $0xfc000000, R2, R7 - ADD R12, R4, R4 - ADD R14, R0, R0 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R8 - ADD R12, R6, R9 - MOVW 96(R13), R12 - MOVW 92(R13), R14 - MOVW R0, R6 - CMP $32, R12 - SUB $16, R12, R12 - MOVW R12, 96(R13) - BHS poly1305_blocks_armv6_mainloop - -poly1305_blocks_armv6_done: - MOVW 88(R13), R12 - MOVW R5, 20(R12) - MOVW R6, 24(R12) - MOVW R7, 28(R12) - MOVW R8, 32(R12) - MOVW R9, 36(R12) - ADD $48, R13, R0 - MOVM.DA (R0), [R4-R8, R14] - RET - -#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst); \ - MOVBU.P 1(Rsrc), Rtmp; \ - MOVBU.P Rtmp, 1(Rdst) - -#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \ - MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) - -// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key) -TEXT ·poly1305_auth_armv6(SB), $196-16 - // The value 196, just above, is the sum of 64 (the size of the context - // structure) and 132 (the amount of stack needed). - // - // At this point, the stack pointer (R13) has been moved down. It - // points to the saved link register and there's 196 bytes of free - // space above it. - // - // The stack for this function looks like: - // - // +--------------------- - // | - // | 64 bytes of context structure - // | - // +--------------------- - // | - // | 112 bytes for poly1305_blocks_armv6 - // | - // +--------------------- - // | 16 bytes of final block, constructed at - // | poly1305_finish_ext_armv6_skip8 - // +--------------------- - // | four bytes of saved 'g' - // +--------------------- - // | lr, saved by prelude <- R13 points here - // +--------------------- - MOVW g, 4(R13) - - MOVW out+0(FP), R4 - MOVW m+4(FP), R5 - MOVW mlen+8(FP), R6 - MOVW key+12(FP), R7 - - ADD $136, R13, R0 // 136 = 4 + 4 + 16 + 112 - MOVW R7, R1 - - // poly1305_init_ext_armv6 will write to the stack from R13+4, but - // that's ok because none of the other values have been written yet. - BL poly1305_init_ext_armv6<>(SB) - BIC.S $15, R6, R2 - BEQ poly1305_auth_armv6_noblocks - ADD $136, R13, R0 - MOVW R5, R1 - ADD R2, R5, R5 - SUB R2, R6, R6 - BL poly1305_blocks_armv6<>(SB) - -poly1305_auth_armv6_noblocks: - ADD $136, R13, R0 - MOVW R5, R1 - MOVW R6, R2 - MOVW R4, R3 - - MOVW R0, R5 - MOVW R1, R6 - MOVW R2, R7 - MOVW R3, R8 - AND.S R2, R2, R2 - BEQ poly1305_finish_ext_armv6_noremaining - EOR R0, R0 - ADD $8, R13, R9 // 8 = offset to 16 byte scratch space - MOVW R0, (R9) - MOVW R0, 4(R9) - MOVW R0, 8(R9) - MOVW R0, 12(R9) - WORD $0xe3110003 // TST R1, #3 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_aligned - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8 - MOVWP_UNALIGNED(R1, R9, g) - MOVWP_UNALIGNED(R1, R9, g) - -poly1305_finish_ext_armv6_skip8: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4 - MOVWP_UNALIGNED(R1, R9, g) - -poly1305_finish_ext_armv6_skip4: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHUP_UNALIGNED(R1, R9, g) - B poly1305_finish_ext_armv6_skip2 - -poly1305_finish_ext_armv6_aligned: - WORD $0xe3120008 // TST R2, #8 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip8_aligned - MOVM.IA.W (R1), [g-R11] - MOVM.IA.W [g-R11], (R9) - -poly1305_finish_ext_armv6_skip8_aligned: - WORD $0xe3120004 // TST $4, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip4_aligned - MOVW.P 4(R1), g - MOVW.P g, 4(R9) - -poly1305_finish_ext_armv6_skip4_aligned: - WORD $0xe3120002 // TST $2, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip2 - MOVHU.P 2(R1), g - MOVH.P g, 2(R9) - -poly1305_finish_ext_armv6_skip2: - WORD $0xe3120001 // TST $1, R2 not working see issue 5921 - BEQ poly1305_finish_ext_armv6_skip1 - MOVBU.P 1(R1), g - MOVBU.P g, 1(R9) - -poly1305_finish_ext_armv6_skip1: - MOVW $1, R11 - MOVBU R11, 0(R9) - MOVW R11, 56(R5) - MOVW R5, R0 - ADD $8, R13, R1 - MOVW $16, R2 - BL poly1305_blocks_armv6<>(SB) - -poly1305_finish_ext_armv6_noremaining: - MOVW 20(R5), R0 - MOVW 24(R5), R1 - MOVW 28(R5), R2 - MOVW 32(R5), R3 - MOVW 36(R5), R4 - MOVW R4>>26, R12 - BIC $0xfc000000, R4, R4 - ADD R12<<2, R12, R12 - ADD R12, R0, R0 - MOVW R0>>26, R12 - BIC $0xfc000000, R0, R0 - ADD R12, R1, R1 - MOVW R1>>26, R12 - BIC $0xfc000000, R1, R1 - ADD R12, R2, R2 - MOVW R2>>26, R12 - BIC $0xfc000000, R2, R2 - ADD R12, R3, R3 - MOVW R3>>26, R12 - BIC $0xfc000000, R3, R3 - ADD R12, R4, R4 - ADD $5, R0, R6 - MOVW R6>>26, R12 - BIC $0xfc000000, R6, R6 - ADD R12, R1, R7 - MOVW R7>>26, R12 - BIC $0xfc000000, R7, R7 - ADD R12, R2, g - MOVW g>>26, R12 - BIC $0xfc000000, g, g - ADD R12, R3, R11 - MOVW $-(1<<26), R12 - ADD R11>>26, R12, R12 - BIC $0xfc000000, R11, R11 - ADD R12, R4, R9 - MOVW R9>>31, R12 - SUB $1, R12 - AND R12, R6, R6 - AND R12, R7, R7 - AND R12, g, g - AND R12, R11, R11 - AND R12, R9, R9 - MVN R12, R12 - AND R12, R0, R0 - AND R12, R1, R1 - AND R12, R2, R2 - AND R12, R3, R3 - AND R12, R4, R4 - ORR R6, R0, R0 - ORR R7, R1, R1 - ORR g, R2, R2 - ORR R11, R3, R3 - ORR R9, R4, R4 - ORR R1<<26, R0, R0 - MOVW R1>>6, R1 - ORR R2<<20, R1, R1 - MOVW R2>>12, R2 - ORR R3<<14, R2, R2 - MOVW R3>>18, R3 - ORR R4<<8, R3, R3 - MOVW 40(R5), R6 - MOVW 44(R5), R7 - MOVW 48(R5), g - MOVW 52(R5), R11 - ADD.S R6, R0, R0 - ADC.S R7, R1, R1 - ADC.S g, R2, R2 - ADC.S R11, R3, R3 - MOVM.IA [R0-R3], (R8) - MOVW R5, R12 - EOR R0, R0, R0 - EOR R1, R1, R1 - EOR R2, R2, R2 - EOR R3, R3, R3 - EOR R4, R4, R4 - EOR R5, R5, R5 - EOR R6, R6, R6 - EOR R7, R7, R7 - MOVM.IA.W [R0-R7], (R12) - MOVM.IA [R0-R7], (R12) - MOVW 4(R13), g - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go index bab76ef0d..c942a6590 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_generic.go @@ -2,171 +2,309 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This file provides the generic implementation of Sum and MAC. Other files +// might provide optimized assembly implementations of some of this code. + package poly1305 import "encoding/binary" -const ( - msgBlock = uint32(1 << 24) - finalBlock = uint32(0) -) +// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag +// for a 64 bytes message is approximately +// +// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 +// +// for some secret r and s. It can be computed sequentially like +// +// for len(msg) > 0: +// h += read(msg, 16) +// h *= r +// h %= 2¹³⁰ - 5 +// return h + s +// +// All the complexity is about doing performant constant-time math on numbers +// larger than any available numeric type. -// sumGeneric generates an authenticator for msg using a one-time key and -// puts the 16-byte result into out. This is the generic implementation of -// Sum and should be called if no assembly implementation is available. func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { h := newMACGeneric(key) h.Write(msg) h.Sum(out) } -func newMACGeneric(key *[32]byte) (h macGeneric) { - h.r[0] = binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff - h.r[1] = (binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03 - h.r[2] = (binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff - h.r[3] = (binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff - h.r[4] = (binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff - - h.s[0] = binary.LittleEndian.Uint32(key[16:]) - h.s[1] = binary.LittleEndian.Uint32(key[20:]) - h.s[2] = binary.LittleEndian.Uint32(key[24:]) - h.s[3] = binary.LittleEndian.Uint32(key[28:]) - return +func newMACGeneric(key *[32]byte) macGeneric { + m := macGeneric{} + initialize(key, &m.macState) + return m +} + +// macState holds numbers in saturated 64-bit little-endian limbs. That is, +// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. +type macState struct { + // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but + // can grow larger during and after rounds. It must, however, remain below + // 2 * (2¹³⁰ - 5). + h [3]uint64 + // r and s are the private key components. + r [2]uint64 + s [2]uint64 } type macGeneric struct { - h, r [5]uint32 - s [4]uint32 + macState buffer [TagSize]byte offset int } -func (h *macGeneric) Write(p []byte) (n int, err error) { - n = len(p) +// Write splits the incoming message into TagSize chunks, and passes them to +// update. It buffers incomplete chunks. +func (h *macGeneric) Write(p []byte) (int, error) { + nn := len(p) if h.offset > 0 { - remaining := TagSize - h.offset - if n < remaining { - h.offset += copy(h.buffer[h.offset:], p) - return n, nil + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil } - copy(h.buffer[h.offset:], p[:remaining]) - p = p[remaining:] + p = p[n:] h.offset = 0 - updateGeneric(h.buffer[:], msgBlock, &(h.h), &(h.r)) + updateGeneric(&h.macState, h.buffer[:]) } - if nn := len(p) - (len(p) % TagSize); nn > 0 { - updateGeneric(p, msgBlock, &(h.h), &(h.r)) - p = p[nn:] + if n := len(p) - (len(p) % TagSize); n > 0 { + updateGeneric(&h.macState, p[:n]) + p = p[n:] } if len(p) > 0 { h.offset += copy(h.buffer[h.offset:], p) } - return n, nil + return nn, nil } -func (h *macGeneric) Sum(out *[16]byte) { - H, R := h.h, h.r +// Sum flushes the last incomplete chunk from the buffer, if any, and generates +// the MAC output. It does not modify its state, in order to allow for multiple +// calls to Sum, even if no Write is allowed after Sum. +func (h *macGeneric) Sum(out *[TagSize]byte) { + state := h.macState if h.offset > 0 { - var buffer [TagSize]byte - copy(buffer[:], h.buffer[:h.offset]) - buffer[h.offset] = 1 // invariant: h.offset < TagSize - updateGeneric(buffer[:], finalBlock, &H, &R) + updateGeneric(&state, h.buffer[:h.offset]) } - finalizeGeneric(out, &H, &(h.s)) + finalize(out, &state.h, &state.s) +} + +// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It +// clears some bits of the secret coefficient to make it possible to implement +// multiplication more efficiently. +const ( + rMask0 = 0x0FFFFFFC0FFFFFFF + rMask1 = 0x0FFFFFFC0FFFFFFC +) + +// initialize loads the 256-bit key into the two 128-bit secret values r and s. +func initialize(key *[32]byte, m *macState) { + m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 + m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 + m.s[0] = binary.LittleEndian.Uint64(key[16:24]) + m.s[1] = binary.LittleEndian.Uint64(key[24:32]) +} + +// uint128 holds a 128-bit number as two 64-bit limbs, for use with the +// bits.Mul64 and bits.Add64 intrinsics. +type uint128 struct { + lo, hi uint64 +} + +func mul64(a, b uint64) uint128 { + hi, lo := bitsMul64(a, b) + return uint128{lo, hi} } -func updateGeneric(msg []byte, flag uint32, h, r *[5]uint32) { - h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] - r0, r1, r2, r3, r4 := uint64(r[0]), uint64(r[1]), uint64(r[2]), uint64(r[3]), uint64(r[4]) - R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 - - for len(msg) >= TagSize { - // h += msg - h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff - h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff - h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff - h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff - h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | flag - - // h *= r - d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) - d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) - d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) - d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) - d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) - - // h %= p - h0 = uint32(d0) & 0x3ffffff - h1 = uint32(d1) & 0x3ffffff - h2 = uint32(d2) & 0x3ffffff - h3 = uint32(d3) & 0x3ffffff - h4 = uint32(d4) & 0x3ffffff - - h0 += uint32(d4>>26) * 5 - h1 += h0 >> 26 - h0 = h0 & 0x3ffffff - - msg = msg[TagSize:] +func add128(a, b uint128) uint128 { + lo, c := bitsAdd64(a.lo, b.lo, 0) + hi, c := bitsAdd64(a.hi, b.hi, c) + if c != 0 { + panic("poly1305: unexpected overflow") } + return uint128{lo, hi} +} - h[0], h[1], h[2], h[3], h[4] = h0, h1, h2, h3, h4 +func shiftRightBy2(a uint128) uint128 { + a.lo = a.lo>>2 | (a.hi&3)<<62 + a.hi = a.hi >> 2 + return a } -func finalizeGeneric(out *[TagSize]byte, h *[5]uint32, s *[4]uint32) { - h0, h1, h2, h3, h4 := h[0], h[1], h[2], h[3], h[4] - - // h %= p reduction - h2 += h1 >> 26 - h1 &= 0x3ffffff - h3 += h2 >> 26 - h2 &= 0x3ffffff - h4 += h3 >> 26 - h3 &= 0x3ffffff - h0 += 5 * (h4 >> 26) - h4 &= 0x3ffffff - h1 += h0 >> 26 - h0 &= 0x3ffffff - - // h - p - t0 := h0 + 5 - t1 := h1 + (t0 >> 26) - t2 := h2 + (t1 >> 26) - t3 := h3 + (t2 >> 26) - t4 := h4 + (t3 >> 26) - (1 << 26) - t0 &= 0x3ffffff - t1 &= 0x3ffffff - t2 &= 0x3ffffff - t3 &= 0x3ffffff - - // select h if h < p else h - p - t_mask := (t4 >> 31) - 1 - h_mask := ^t_mask - h0 = (h0 & h_mask) | (t0 & t_mask) - h1 = (h1 & h_mask) | (t1 & t_mask) - h2 = (h2 & h_mask) | (t2 & t_mask) - h3 = (h3 & h_mask) | (t3 & t_mask) - h4 = (h4 & h_mask) | (t4 & t_mask) - - // h %= 2^128 - h0 |= h1 << 26 - h1 = ((h1 >> 6) | (h2 << 20)) - h2 = ((h2 >> 12) | (h3 << 14)) - h3 = ((h3 >> 18) | (h4 << 8)) - - // s: the s part of the key - // tag = (h + s) % (2^128) - t := uint64(h0) + uint64(s[0]) - h0 = uint32(t) - t = uint64(h1) + uint64(s[1]) + (t >> 32) - h1 = uint32(t) - t = uint64(h2) + uint64(s[2]) + (t >> 32) - h2 = uint32(t) - t = uint64(h3) + uint64(s[3]) + (t >> 32) - h3 = uint32(t) - - binary.LittleEndian.PutUint32(out[0:], h0) - binary.LittleEndian.PutUint32(out[4:], h1) - binary.LittleEndian.PutUint32(out[8:], h2) - binary.LittleEndian.PutUint32(out[12:], h3) +// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of +// 128 bits of message, it computes +// +// h₊ = (h + m) * r mod 2¹³⁰ - 5 +// +// If the msg length is not a multiple of TagSize, it assumes the last +// incomplete chunk is the final one. +func updateGeneric(state *macState, msg []byte) { + h0, h1, h2 := state.h[0], state.h[1], state.h[2] + r0, r1 := state.r[0], state.r[1] + + for len(msg) > 0 { + var c uint64 + + // For the first step, h + m, we use a chain of bits.Add64 intrinsics. + // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially + // reduced at the end of the multiplication below. + // + // The spec requires us to set a bit just above the message size, not to + // hide leading zeroes. For full chunks, that's 1 << 128, so we can just + // add 1 to the most significant (2¹²⁸) limb, h2. + if len(msg) >= TagSize { + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h2 += c + 1 + + msg = msg[TagSize:] + } else { + var buf [TagSize]byte + copy(buf[:], msg) + buf[len(msg)] = 1 + + h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h2 += c + + msg = nil + } + + // Multiplication of big number limbs is similar to elementary school + // columnar multiplication. Instead of digits, there are 64-bit limbs. + // + // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. + // + // h2 h1 h0 x + // r1 r0 = + // ---------------- + // h2r0 h1r0 h0r0 <-- individual 128-bit products + // + h2r1 h1r1 h0r1 + // ------------------------ + // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs + // ------------------------ + // m3.hi m2.hi m1.hi m0.hi <-- carry propagation + // + m3.lo m2.lo m1.lo m0.lo + // ------------------------------- + // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs + // + // The main difference from pen-and-paper multiplication is that we do + // carry propagation in a separate step, as if we wrote two digit sums + // at first (the 128-bit limbs), and then carried the tens all at once. + + h0r0 := mul64(h0, r0) + h1r0 := mul64(h1, r0) + h2r0 := mul64(h2, r0) + h0r1 := mul64(h0, r1) + h1r1 := mul64(h1, r1) + h2r1 := mul64(h2, r1) + + // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their + // top 4 bits cleared by rMask{0,1}, we know that their product is not going + // to overflow 64 bits, so we can ignore the high part of the products. + // + // This also means that the product doesn't have a fifth limb (t4). + if h2r0.hi != 0 { + panic("poly1305: unexpected overflow") + } + if h2r1.hi != 0 { + panic("poly1305: unexpected overflow") + } + + m0 := h0r0 + m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again + m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. + m3 := h2r1 + + t0 := m0.lo + t1, c := bitsAdd64(m1.lo, m0.hi, 0) + t2, c := bitsAdd64(m2.lo, m1.hi, c) + t3, _ := bitsAdd64(m3.lo, m2.hi, c) + + // Now we have the result as 4 64-bit limbs, and we need to reduce it + // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do + // a cheap partial reduction according to the reduction identity + // + // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 + // + // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is + // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the + // assumptions we make about h in the rest of the code. + // + // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 + + // We split the final result at the 2¹³⁰ mark into h and cc, the carry. + // Note that the carry bits are effectively shifted left by 2, in other + // words, cc = c * 4 for the c in the reduction identity. + h0, h1, h2 = t0, t1, t2&maskLow2Bits + cc := uint128{t2 & maskNotLow2Bits, t3} + + // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + cc = shiftRightBy2(cc) + + h0, c = bitsAdd64(h0, cc.lo, 0) + h1, c = bitsAdd64(h1, cc.hi, c) + h2 += c + + // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most + // + // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 + } + + state.h[0], state.h[1], state.h[2] = h0, h1, h2 +} + +const ( + maskLow2Bits uint64 = 0x0000000000000003 + maskNotLow2Bits uint64 = ^maskLow2Bits +) + +// select64 returns x if v == 1 and y if v == 0, in constant time. +func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } + +// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. +const ( + p0 = 0xFFFFFFFFFFFFFFFB + p1 = 0xFFFFFFFFFFFFFFFF + p2 = 0x0000000000000003 +) + +// finalize completes the modular reduction of h and computes +// +// out = h + s mod 2¹²⁸ +// +func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { + h0, h1, h2 := h[0], h[1], h[2] + + // After the partial reduction in updateGeneric, h might be more than + // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction + // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the + // result if the subtraction underflows, and t otherwise. + + hMinusP0, b := bitsSub64(h0, p0, 0) + hMinusP1, b := bitsSub64(h1, p1, b) + _, b = bitsSub64(h2, p2, b) + + // h = h if h < p else h - p + h0 = select64(b, h0, hMinusP0) + h1 = select64(b, h1, hMinusP1) + + // Finally, we compute the last Poly1305 step + // + // tag = h + s mod 2¹²⁸ + // + // by just doing a wide addition with the 128 low bits of h and discarding + // the overflow. + h0, c := bitsAdd64(h0, s[0], 0) + h1, _ = bitsAdd64(h1, s[1], c) + + binary.LittleEndian.PutUint64(out[0:8], h0) + binary.LittleEndian.PutUint64(out[8:16], h1) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go b/vendor/golang.org/x/crypto/poly1305/sum_noasm.go deleted file mode 100644 index 8a9c2070b..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_noasm.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,!go1.11 !arm,!amd64,!s390x,!ppc64le gccgo appengine nacl - -package poly1305 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMAC(key) - h.Write(msg) - h.Sum(out) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go index 2402b6371..2e7a120b1 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go @@ -2,67 +2,46 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build ppc64le,!gccgo,!appengine +// +build !gccgo,!purego package poly1305 //go:noescape -func initialize(state *[7]uint64, key *[32]byte) +func update(state *macState, msg []byte) -//go:noescape -func update(state *[7]uint64, msg []byte) - -//go:noescape -func finalize(tag *[TagSize]byte, state *[7]uint64) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := newMAC(key) - h.Write(m) - h.Sum(out) -} - -func newMAC(key *[32]byte) (h mac) { - initialize(&h.state, key) - return -} - -type mac struct { - state [7]uint64 // := uint64{ h0, h1, h2, r0, r1, pad0, pad1 } - - buffer [TagSize]byte - offset int -} +// mac is a wrapper for macGeneric that redirects calls that would have gone to +// updateGeneric to update. +// +// Its Write and Sum methods are otherwise identical to the macGeneric ones, but +// using function pointers would carry a major performance cost. +type mac struct{ macGeneric } -func (h *mac) Write(p []byte) (n int, err error) { - n = len(p) +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) if h.offset > 0 { - remaining := TagSize - h.offset - if n < remaining { - h.offset += copy(h.buffer[h.offset:], p) - return n, nil + n := copy(h.buffer[h.offset:], p) + if h.offset+n < TagSize { + h.offset += n + return nn, nil } - copy(h.buffer[h.offset:], p[:remaining]) - p = p[remaining:] + p = p[n:] h.offset = 0 - update(&h.state, h.buffer[:]) + update(&h.macState, h.buffer[:]) } - if nn := len(p) - (len(p) % TagSize); nn > 0 { - update(&h.state, p[:nn]) - p = p[nn:] + if n := len(p) - (len(p) % TagSize); n > 0 { + update(&h.macState, p[:n]) + p = p[n:] } if len(p) > 0 { h.offset += copy(h.buffer[h.offset:], p) } - return n, nil + return nn, nil } func (h *mac) Sum(out *[16]byte) { - state := h.state + state := h.macState if h.offset > 0 { update(&state, h.buffer[:h.offset]) } - finalize(out, &state) + finalize(out, &state.h, &state.s) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s index 55c7167ec..4e0281387 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build ppc64le,!gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" @@ -58,7 +58,6 @@ DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC GLOBL ·poly1305Mask<>(SB), RODATA, $16 // func update(state *[7]uint64, msg []byte) - TEXT ·update(SB), $0-32 MOVD state+0(FP), R3 MOVD msg_base+8(FP), R4 @@ -180,68 +179,3 @@ done: MOVD R9, 8(R3) MOVD R10, 16(R3) RET - -// func initialize(state *[7]uint64, key *[32]byte) -TEXT ·initialize(SB), $0-16 - MOVD state+0(FP), R3 - MOVD key+8(FP), R4 - - // state[0...7] is initialized with zero - // Load key - MOVD 0(R4), R5 - MOVD 8(R4), R6 - MOVD 16(R4), R7 - MOVD 24(R4), R8 - - // Address of key mask - MOVD $·poly1305Mask<>(SB), R9 - - // Save original key in state - MOVD R7, 40(R3) - MOVD R8, 48(R3) - - // Get mask - MOVD (R9), R7 - MOVD 8(R9), R8 - - // And with key - AND R5, R7, R5 - AND R6, R8, R6 - - // Save masked key in state - MOVD R5, 24(R3) - MOVD R6, 32(R3) - RET - -// func finalize(tag *[TagSize]byte, state *[7]uint64) -TEXT ·finalize(SB), $0-16 - MOVD tag+0(FP), R3 - MOVD state+8(FP), R4 - - // Get h0, h1, h2 from state - MOVD 0(R4), R5 - MOVD 8(R4), R6 - MOVD 16(R4), R7 - - // Save h0, h1 - MOVD R5, R8 - MOVD R6, R9 - MOVD $3, R20 - MOVD $-1, R21 - SUBC $-5, R5 - SUBE R21, R6 - SUBE R20, R7 - MOVD $0, R21 - SUBZE R21 - - // Check for carry - CMP $0, R21 - ISEL $2, R5, R8, R5 - ISEL $2, R6, R9, R6 - MOVD 40(R4), R8 - MOVD 48(R4), R9 - ADDC R8, R5 - ADDE R9, R6 - MOVD R5, 0(R3) - MOVD R6, 8(R3) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go index ec99e07e9..958fedc07 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,go1.11,!gccgo,!appengine +// +build !gccgo,!purego package poly1305 @@ -10,33 +10,66 @@ import ( "golang.org/x/sys/cpu" ) -// poly1305vx is an assembly implementation of Poly1305 that uses vector +// updateVX is an assembly implementation of Poly1305 that uses vector // instructions. It must only be called if the vector facility (vx) is // available. //go:noescape -func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte) +func updateVX(state *macState, msg []byte) -// poly1305vmsl is an assembly implementation of Poly1305 that uses vector -// instructions, including VMSL. It must only be called if the vector facility (vx) is -// available and if VMSL is supported. -//go:noescape -func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte) - -// Sum generates an authenticator for m using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - if cpu.S390X.HasVX { - var mPtr *byte - if len(m) > 0 { - mPtr = &m[0] +// mac is a replacement for macGeneric that uses a larger buffer and redirects +// calls that would have gone to updateGeneric to updateVX if the vector +// facility is installed. +// +// A larger buffer is required for good performance because the vector +// implementation has a higher fixed cost per call than the generic +// implementation. +type mac struct { + macState + + buffer [16 * TagSize]byte // size must be a multiple of block size (16) + offset int +} + +func (h *mac) Write(p []byte) (int, error) { + nn := len(p) + if h.offset > 0 { + n := copy(h.buffer[h.offset:], p) + if h.offset+n < len(h.buffer) { + h.offset += n + return nn, nil + } + p = p[n:] + h.offset = 0 + if cpu.S390X.HasVX { + updateVX(&h.macState, h.buffer[:]) + } else { + updateGeneric(&h.macState, h.buffer[:]) } - if cpu.S390X.HasVXE && len(m) > 256 { - poly1305vmsl(out, mPtr, uint64(len(m)), key) + } + + tail := len(p) % len(h.buffer) // number of bytes to copy into buffer + body := len(p) - tail // number of bytes to process now + if body > 0 { + if cpu.S390X.HasVX { + updateVX(&h.macState, p[:body]) } else { - poly1305vx(out, mPtr, uint64(len(m)), key) + updateGeneric(&h.macState, p[:body]) } - } else { - sumGeneric(out, m, key) } + h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 + return nn, nil +} + +func (h *mac) Sum(out *[TagSize]byte) { + state := h.macState + remainder := h.buffer[:h.offset] + + // Use the generic implementation if we have 2 or fewer blocks left + // to sum. The vector implementation has a higher startup time. + if cpu.S390X.HasVX && len(remainder) > 2*TagSize { + updateVX(&state, remainder) + } else if len(remainder) > 0 { + updateGeneric(&state, remainder) + } + finalize(out, &state.h, &state.s) } diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s index ca5a309d8..0fa9ee6e0 100644 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s @@ -2,115 +2,187 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build s390x,go1.11,!gccgo,!appengine +// +build !gccgo,!purego #include "textflag.h" -// Implementation of Poly1305 using the vector facility (vx). - -// constants -#define MOD26 V0 -#define EX0 V1 -#define EX1 V2 -#define EX2 V3 - -// temporaries -#define T_0 V4 -#define T_1 V5 -#define T_2 V6 -#define T_3 V7 -#define T_4 V8 - -// key (r) -#define R_0 V9 -#define R_1 V10 -#define R_2 V11 -#define R_3 V12 -#define R_4 V13 -#define R5_1 V14 -#define R5_2 V15 -#define R5_3 V16 -#define R5_4 V17 -#define RSAVE_0 R5 -#define RSAVE_1 R6 -#define RSAVE_2 R7 -#define RSAVE_3 R8 -#define RSAVE_4 R9 -#define R5SAVE_1 V28 -#define R5SAVE_2 V29 -#define R5SAVE_3 V30 -#define R5SAVE_4 V31 - -// message block -#define F_0 V18 -#define F_1 V19 -#define F_2 V20 -#define F_3 V21 -#define F_4 V22 - -// accumulator -#define H_0 V23 -#define H_1 V24 -#define H_2 V25 -#define H_3 V26 -#define H_4 V27 - -GLOBL ·keyMask<>(SB), RODATA, $16 -DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f -DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f - -GLOBL ·bswapMask<>(SB), RODATA, $16 -DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 -DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 - -GLOBL ·constants<>(SB), RODATA, $64 -// MOD26 -DATA ·constants<>+0(SB)/8, $0x3ffffff -DATA ·constants<>+8(SB)/8, $0x3ffffff +// This implementation of Poly1305 uses the vector facility (vx) +// to process up to 2 blocks (32 bytes) per iteration using an +// algorithm based on the one described in: +// +// NEON crypto, Daniel J. Bernstein & Peter Schwabe +// https://cryptojedi.org/papers/neoncrypto-20120320.pdf +// +// This algorithm uses 5 26-bit limbs to represent a 130-bit +// value. These limbs are, for the most part, zero extended and +// placed into 64-bit vector register elements. Each vector +// register is 128-bits wide and so holds 2 of these elements. +// Using 26-bit limbs allows us plenty of headroom to accomodate +// accumulations before and after multiplication without +// overflowing either 32-bits (before multiplication) or 64-bits +// (after multiplication). +// +// In order to parallelise the operations required to calculate +// the sum we use two separate accumulators and then sum those +// in an extra final step. For compatibility with the generic +// implementation we perform this summation at the end of every +// updateVX call. +// +// To use two accumulators we must multiply the message blocks +// by r² rather than r. Only the final message block should be +// multiplied by r. +// +// Example: +// +// We want to calculate the sum (h) for a 64 byte message (m): +// +// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r +// +// To do this we split the calculation into the even indices +// and odd indices of the message. These form our SIMD 'lanes': +// +// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 +// m[16:32]r³ + m[48:64]r <- lane 1 +// +// To calculate this iteratively we refactor so that both lanes +// are written in terms of r² and r: +// +// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 +// (m[16:32]r² + m[48:64])r <- lane 1 +// ^ ^ +// | coefficients for second iteration +// coefficients for first iteration +// +// So in this case we would have two iterations. In the first +// both lanes are multiplied by r². In the second only the +// first lane is multiplied by r² and the second lane is +// instead multiplied by r. This gives use the odd and even +// powers of r that we need from the original equation. +// +// Notation: +// +// h - accumulator +// r - key +// m - message +// +// [a, b] - SIMD register holding two 64-bit values +// [a, b, c, d] - SIMD register holding four 32-bit values +// xᵢ[n] - limb n of variable x with bit width i +// +// Limbs are expressed in little endian order, so for 26-bit +// limbs x₂₆[4] will be the most significant limb and x₂₆[0] +// will be the least significant limb. + +// masking constants +#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits +#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits + +// expansion constants (see EXPAND macro) +#define EX0 V2 +#define EX1 V3 +#define EX2 V4 + +// key (r², r or 1 depending on context) +#define R_0 V5 +#define R_1 V6 +#define R_2 V7 +#define R_3 V8 +#define R_4 V9 + +// precalculated coefficients (5r², 5r or 0 depending on context) +#define R5_1 V10 +#define R5_2 V11 +#define R5_3 V12 +#define R5_4 V13 + +// message block (m) +#define M_0 V14 +#define M_1 V15 +#define M_2 V16 +#define M_3 V17 +#define M_4 V18 + +// accumulator (h) +#define H_0 V19 +#define H_1 V20 +#define H_2 V21 +#define H_3 V22 +#define H_4 V23 + +// temporary registers (for short-lived values) +#define T_0 V24 +#define T_1 V25 +#define T_2 V26 +#define T_3 V27 +#define T_4 V28 + +GLOBL ·constants<>(SB), RODATA, $0x30 // EX0 -DATA ·constants<>+16(SB)/8, $0x0006050403020100 -DATA ·constants<>+24(SB)/8, $0x1016151413121110 +DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 +DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 // EX1 -DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716 +DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 +DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 // EX2 -DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d - -// h = (f*g) % (2**130-5) [partial reduction] +DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d +DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d + +// MULTIPLY multiplies each lane of f and g, partially reduced +// modulo 2¹³⁰ - 5. The result, h, consists of partial products +// in each lane that need to be reduced further to produce the +// final result. +// +// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ +// +// Note that the multiplication by 5 of the high bits is +// achieved by precalculating the multiplication of four of the +// g coefficients by 5. These are g51-g54. #define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ VMLOF f0, g0, h0 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g2, h2 \ VMLOF f0, g3, h3 \ + VMLOF f0, g1, h1 \ VMLOF f0, g4, h4 \ + VMLOF f0, g2, h2 \ VMLOF f1, g54, T_0 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g1, T_2 \ VMLOF f1, g2, T_3 \ + VMLOF f1, g0, T_1 \ VMLOF f1, g3, T_4 \ + VMLOF f1, g1, T_2 \ VMALOF f2, g53, h0, h0 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g0, h2, h2 \ VMALOF f2, g1, h3, h3 \ + VMALOF f2, g54, h1, h1 \ VMALOF f2, g2, h4, h4 \ + VMALOF f2, g0, h2, h2 \ VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g54, T_2, T_2 \ VMALOF f3, g0, T_3, T_3 \ + VMALOF f3, g53, T_1, T_1 \ VMALOF f3, g1, T_4, T_4 \ + VMALOF f3, g54, T_2, T_2 \ VMALOF f4, g51, h0, h0 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g53, h2, h2 \ VMALOF f4, g54, h3, h3 \ + VMALOF f4, g52, h1, h1 \ VMALOF f4, g0, h4, h4 \ + VMALOF f4, g53, h2, h2 \ VAG T_0, h0, h0 \ - VAG T_1, h1, h1 \ - VAG T_2, h2, h2 \ VAG T_3, h3, h3 \ - VAG T_4, h4, h4 - -// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4 + VAG T_1, h1, h1 \ + VAG T_4, h4, h4 \ + VAG T_2, h2, h2 + +// REDUCE performs the following carry operations in four +// stages, as specified in Bernstein & Schwabe: +// +// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] +// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] +// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] +// 4: h₂₆[3]->h₂₆[4] +// +// The result is that all of the limbs are limited to 26-bits +// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. +// +// Note that although each limb is aligned at 26-bit intervals +// they may contain values that exceed 2²⁶ - 1, hence the need +// to carry the excess bits in each limb. #define REDUCE(h0, h1, h2, h3, h4) \ VESRLG $26, h0, T_0 \ VESRLG $26, h3, T_1 \ @@ -136,144 +208,155 @@ DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d VN MOD26, h3, h3 \ VAG T_2, h4, h4 -// expand in0 into d[0] and in1 into d[1] +// EXPAND splits the 128-bit little-endian values in0 and in1 +// into 26-bit big-endian limbs and places the results into +// the first and second lane of d₂₆[0:4] respectively. +// +// The EX0, EX1 and EX2 constants are arrays of byte indices +// for permutation. The permutation both reverses the bytes +// in the input and ensures the bytes are copied into the +// destination limb ready to be shifted into their final +// position. #define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VGBM $0x0707, d1 \ // d1=tmp - VPERM in0, in1, EX2, d4 \ VPERM in0, in1, EX0, d0 \ VPERM in0, in1, EX1, d2 \ - VN d1, d4, d4 \ + VPERM in0, in1, EX2, d4 \ VESRLG $26, d0, d1 \ VESRLG $30, d2, d3 \ VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ - VN MOD26, d1, d1 \ - VN MOD26, d2, d2 \ - VN MOD26, d3, d3 - -// pack h4:h0 into h1:h0 (no carry) -#define PACK(h0, h1, h2, h3, h4) \ - VESLG $26, h1, h1 \ - VESLG $26, h3, h3 \ - VO h0, h1, h0 \ - VO h2, h3, h2 \ - VESLG $4, h2, h2 \ - VLEIB $7, $48, h1 \ - VSLB h1, h2, h2 \ - VO h0, h2, h0 \ - VLEIB $7, $104, h1 \ - VSLB h1, h4, h3 \ - VO h3, h0, h0 \ - VLEIB $7, $24, h1 \ - VSRLB h1, h4, h1 - -// if h > 2**130-5 then h -= 2**130-5 -#define MOD(h0, h1, t0, t1, t2) \ - VZERO t0 \ - VLEIG $1, $5, t0 \ - VACCQ h0, t0, t1 \ - VAQ h0, t0, t0 \ - VONE t2 \ - VLEIG $1, $-4, t2 \ - VAQ t2, t1, t1 \ - VACCQ h1, t1, t1 \ - VONE t2 \ - VAQ t2, t1, t1 \ - VN h0, t1, t2 \ - VNC t0, t1, t1 \ - VO t1, t2, h0 - -// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305vx(SB), $0-32 - // This code processes up to 2 blocks (32 bytes) per iteration - // using the algorithm described in: - // NEON crypto, Daniel J. Bernstein & Peter Schwabe - // https://cryptojedi.org/papers/neoncrypto-20120320.pdf - LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key - - // load MOD26, EX0, EX1 and EX2 + VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] + VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] + VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] + VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] + VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] + +// func updateVX(state *macState, msg []byte) +TEXT ·updateVX(SB), NOSPLIT, $0 + MOVD state+0(FP), R1 + LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len + + // load EX0, EX1 and EX2 MOVD $·constants<>(SB), R5 - VLM (R5), MOD26, EX2 - - // setup r - VL (R4), T_0 - MOVD $·keyMask<>(SB), R6 - VL (R6), T_1 - VN T_0, T_1, T_0 - EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4) - - // setup r*5 - VLEIG $0, $5, T_0 - VLEIG $1, $5, T_0 - - // store r (for final block) - VMLOF T_0, R_1, R5SAVE_1 - VMLOF T_0, R_2, R5SAVE_2 - VMLOF T_0, R_3, R5SAVE_3 - VMLOF T_0, R_4, R5SAVE_4 - VLGVG $0, R_0, RSAVE_0 - VLGVG $0, R_1, RSAVE_1 - VLGVG $0, R_2, RSAVE_2 - VLGVG $0, R_3, RSAVE_3 - VLGVG $0, R_4, RSAVE_4 - - // skip r**2 calculation + VLM (R5), EX0, EX2 + + // generate masks + VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] + VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] + + // load h (accumulator) and r (key) from state + VZERO T_1 // [0, 0] + VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] + VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] + VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] + VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] + VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] + + // unpack h and r into 26-bit limbs + // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value + VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] + VZERO H_1 // [0, 0] + VZERO H_3 // [0, 0] + VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out + VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] + VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] + VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only + VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] + VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only + VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete + VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete + + // replicate r across all 4 vector elements + VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] + VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] + VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] + VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] + VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] + + // zero out lane 1 of h + VLEIG $1, $0, H_0 // [h₂₆[0], 0] + VLEIG $1, $0, H_1 // [h₂₆[1], 0] + VLEIG $1, $0, H_2 // [h₂₆[2], 0] + VLEIG $1, $0, H_3 // [h₂₆[3], 0] + VLEIG $1, $0, H_4 // [h₂₆[4], 0] + + // calculate 5r (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] + + // skip r² calculation if we are only calculating one block CMPBLE R3, $16, skip - // calculate r**2 - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4) - REDUCE(H_0, H_1, H_2, H_3, H_4) - VLEIG $0, $5, T_0 - VLEIG $1, $5, T_0 - VMLOF T_0, H_1, R5_1 - VMLOF T_0, H_2, R5_2 - VMLOF T_0, H_3, R5_3 - VMLOF T_0, H_4, R5_4 - VLR H_0, R_0 - VLR H_1, R_1 - VLR H_2, R_2 - VLR H_3, R_3 - VLR H_4, R_4 - - // initialize h - VZERO H_0 - VZERO H_1 - VZERO H_2 - VZERO H_3 - VZERO H_4 + // calculate r² + MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) + REDUCE(M_0, M_1, M_2, M_3, M_4) + VGBM $0x0f0f, T_0 + VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] + VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] + VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] + VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] + VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] + + // calculate 5r² (ignore least significant limb) + VREPIF $5, T_0 + VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] + VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] + VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] + VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] loop: - CMPBLE R3, $32, b2 - VLM (R2), T_0, T_1 - SUB $32, R3 - MOVD $32(R2), R2 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) - VLEIB $4, $1, F_4 - VLEIB $12, $1, F_4 + CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients + + // load next 2 blocks from message + VLM (R2), T_0, T_1 + + // update message slice + SUB $32, R3 + MOVD $32(R2), R2 + + // unpack message blocks into 26-bit big-endian limbs + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // add 2¹²⁸ to each message block value + VLEIB $4, $1, M_4 + VLEIB $12, $1, M_4 multiply: - VAG H_0, F_0, F_0 - VAG H_1, F_1, F_1 - VAG H_2, F_2, F_2 - VAG H_3, F_3, F_3 - VAG H_4, F_4, F_4 - MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + // accumulate the incoming message + VAG H_0, M_0, M_0 + VAG H_3, M_3, M_3 + VAG H_1, M_1, M_1 + VAG H_4, M_4, M_4 + VAG H_2, M_2, M_2 + + // multiply the accumulator by the key coefficient + MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) + + // carry and partially reduce the partial products REDUCE(H_0, H_1, H_2, H_3, H_4) + CMPBNE R3, $0, loop finish: - // sum vectors + // sum lane 0 and lane 1 and put the result in lane 1 VZERO T_0 VSUMQG H_0, T_0, H_0 - VSUMQG H_1, T_0, H_1 - VSUMQG H_2, T_0, H_2 VSUMQG H_3, T_0, H_3 + VSUMQG H_1, T_0, H_1 VSUMQG H_4, T_0, H_4 + VSUMQG H_2, T_0, H_2 - // h may be >= 2*(2**130-5) so we need to reduce it again + // reduce again after summation + // TODO(mundaym): there might be a more efficient way to do this + // now that we only have 1 active lane. For example, we could + // simultaneously pack the values as we reduce them. REDUCE(H_0, H_1, H_2, H_3, H_4) - // carry h1->h4 + // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 + // TODO(mundaym): in testing this final carry was unnecessary. + // Needs a proof before it can be removed though. VESRLG $26, H_1, T_1 VN MOD26, H_1, H_1 VAQ T_1, H_2, H_2 @@ -284,95 +367,137 @@ finish: VN MOD26, H_3, H_3 VAQ T_3, H_4, H_4 - // h is now < 2*(2**130-5) - // pack h into h1 (hi) and h0 (lo) - PACK(H_0, H_1, H_2, H_3, H_4) - - // if h > 2**130-5 then h -= 2**130-5 - MOD(H_0, H_1, T_0, T_1, T_2) - - // h += s - MOVD $·bswapMask<>(SB), R5 - VL (R5), T_1 - VL 16(R4), T_0 - VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) - VAQ T_0, H_0, H_0 - VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little) - VST H_0, (R1) - + // h is now < 2(2¹³⁰ - 5) + // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. + VESLG $26, H_1, H_1 + VESLG $26, H_3, H_3 + VO H_0, H_1, H_0 + VO H_2, H_3, H_2 + VESLG $4, H_2, H_2 + VLEIB $7, $48, H_1 + VSLB H_1, H_2, H_2 + VO H_0, H_2, H_0 + VLEIB $7, $104, H_1 + VSLB H_1, H_4, H_3 + VO H_3, H_0, H_0 + VLEIB $7, $24, H_1 + VSRLB H_1, H_4, H_1 + + // update state + VSTEG $1, H_0, 0(R1) + VSTEG $0, H_0, 8(R1) + VSTEG $1, H_1, 16(R1) RET -b2: +b2: // 2 or fewer blocks remaining CMPBLE R3, $16, b1 - // 2 blocks remaining - SUB $17, R3 - VL (R2), T_0 - VLL R3, 16(R2), T_1 - ADD $1, R3 + // Load the 2 remaining blocks (17-32 bytes remaining). + MOVD $-17(R3), R0 // index of final byte to load modulo 16 + VL (R2), T_0 // load full 16 byte block + VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_1 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) + CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long + VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 + + // Split both blocks into 26-bit limbs in the appropriate lanes. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the second to last block. + VLEIB $4, $1, M_4 + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, F_4 - VLEIB $4, $1, F_4 - - // setup [r²,r] - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, RSAVE_3, R_3 - VLVGG $1, RSAVE_4, R_4 - VPDI $0, R5_1, R5SAVE_1, R5_1 - VPDI $0, R5_2, R5SAVE_2, R5_2 - VPDI $0, R5_3, R5SAVE_3, R5_3 - VPDI $0, R5_4, R5SAVE_4, R5_4 + VLEIB $12, $1, M_4 + + // Finally, set up the coefficients for the final multiplication. + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r² so that can be kept the + // same. We want lane 1 to be multiplied by r so we need to move + // the saved r value into the 32-bit odd index in lane 1 by + // rotating the 64-bit lane by 32. + VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only + VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] + VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] + VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] + VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] + VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] + VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] + VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] + VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] + VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] MOVD $0, R3 BR multiply skip: - VZERO H_0 - VZERO H_1 - VZERO H_2 - VZERO H_3 - VZERO H_4 - CMPBEQ R3, $0, finish -b1: - // 1 block remaining - SUB $1, R3 - VLL R3, (R2), T_0 - ADD $1, R3 +b1: // 1 block remaining + + // Load the final block (1-16 bytes). This will be placed into + // lane 0. + MOVD $-1(R3), R0 + VLL R0, (R2), T_0 // pad to 16 bytes with zeros + + // The Poly1305 algorithm requires that a 1 bit be appended to + // each message block. If the final block is less than 16 bytes + // long then it is easiest to insert the 1 before the message + // block is split into 26-bit limbs. If, on the other hand, the + // final message block is 16 bytes long then we append the 1 bit + // after expansion as normal. MOVBZ $1, R0 CMPBEQ R3, $16, 2(PC) VLVGB R3, R0, T_0 - VZERO T_1 - EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4) + + // Set the message block in lane 1 to the value 0 so that it + // can be accumulated without affecting the final result. + VZERO T_1 + + // Split the final message block into 26-bit limbs in lane 0. + // Lane 1 will be contain 0. + EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) + + // Append a 1 byte to the end of the last block only if it is a + // full 16 byte block. CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, F_4 - VLEIG $1, $1, R_0 - VZERO R_1 - VZERO R_2 - VZERO R_3 - VZERO R_4 - VZERO R5_1 - VZERO R5_2 - VZERO R5_3 - VZERO R5_4 - - // setup [r, 1] - VLVGG $0, RSAVE_0, R_0 - VLVGG $0, RSAVE_1, R_1 - VLVGG $0, RSAVE_2, R_2 - VLVGG $0, RSAVE_3, R_3 - VLVGG $0, RSAVE_4, R_4 - VPDI $0, R5SAVE_1, R5_1, R5_1 - VPDI $0, R5SAVE_2, R5_2, R5_2 - VPDI $0, R5SAVE_3, R5_3, R5_3 - VPDI $0, R5SAVE_4, R5_4, R5_4 + VLEIB $4, $1, M_4 + + // We have previously saved r and 5r in the 32-bit even indexes + // of the R_[0-4] and R5_[1-4] coefficient registers. + // + // We want lane 0 to be multiplied by r so we need to move the + // saved r value into the 32-bit odd index in lane 0. We want + // lane 1 to be set to the value 1. This makes multiplication + // a no-op. We do this by setting lane 1 in every register to 0 + // and then just setting the 32-bit index 3 in R_0 to 1. + VZERO T_0 + MOVD $0, R0 + MOVD $0x10111213, R12 + VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] + VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] + VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] + VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] + VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] + VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] + VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] + VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] + VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] + VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] + + // Set the value of lane 1 to be 1. + VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] MOVD $0, R3 BR multiply diff --git a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s deleted file mode 100644 index e60bbc1d7..000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build s390x,go1.11,!gccgo,!appengine - -#include "textflag.h" - -// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction. - -// constants -#define EX0 V1 -#define EX1 V2 -#define EX2 V3 - -// temporaries -#define T_0 V4 -#define T_1 V5 -#define T_2 V6 -#define T_3 V7 -#define T_4 V8 -#define T_5 V9 -#define T_6 V10 -#define T_7 V11 -#define T_8 V12 -#define T_9 V13 -#define T_10 V14 - -// r**2 & r**4 -#define R_0 V15 -#define R_1 V16 -#define R_2 V17 -#define R5_1 V18 -#define R5_2 V19 -// key (r) -#define RSAVE_0 R7 -#define RSAVE_1 R8 -#define RSAVE_2 R9 -#define R5SAVE_1 R10 -#define R5SAVE_2 R11 - -// message block -#define M0 V20 -#define M1 V21 -#define M2 V22 -#define M3 V23 -#define M4 V24 -#define M5 V25 - -// accumulator -#define H0_0 V26 -#define H1_0 V27 -#define H2_0 V28 -#define H0_1 V29 -#define H1_1 V30 -#define H2_1 V31 - -GLOBL ·keyMask<>(SB), RODATA, $16 -DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f -DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f - -GLOBL ·bswapMask<>(SB), RODATA, $16 -DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908 -DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100 - -GLOBL ·constants<>(SB), RODATA, $48 -// EX0 -DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+8(SB)/8, $0x0000050403020100 -// EX1 -DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+24(SB)/8, $0x00000a0908070605 -// EX2 -DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f -DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b - -GLOBL ·c<>(SB), RODATA, $48 -// EX0 -DATA ·c<>+0(SB)/8, $0x0000050403020100 -DATA ·c<>+8(SB)/8, $0x0000151413121110 -// EX1 -DATA ·c<>+16(SB)/8, $0x00000a0908070605 -DATA ·c<>+24(SB)/8, $0x00001a1918171615 -// EX2 -DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b -DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b - -GLOBL ·reduce<>(SB), RODATA, $32 -// 44 bit -DATA ·reduce<>+0(SB)/8, $0x0 -DATA ·reduce<>+8(SB)/8, $0xfffffffffff -// 42 bit -DATA ·reduce<>+16(SB)/8, $0x0 -DATA ·reduce<>+24(SB)/8, $0x3ffffffffff - -// h = (f*g) % (2**130-5) [partial reduction] -// uses T_0...T_9 temporary registers -// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 -// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2 -#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \ - \ // Eliminate the dependency for the last 2 VMSLs - VMSLG m02_0, r_2, m4_2, m4_2 \ - VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined - VMSLG m02_0, r_0, m4_0, m4_0 \ - VMSLG m02_1, r5_2, V0, T_0 \ - VMSLG m02_0, r_1, m4_1, m4_1 \ - VMSLG m02_1, r_0, V0, T_1 \ - VMSLG m02_1, r_1, V0, T_2 \ - VMSLG m02_2, r5_1, V0, T_3 \ - VMSLG m02_2, r5_2, V0, T_4 \ - VMSLG m13_0, r_0, m5_0, m5_0 \ - VMSLG m13_1, r5_2, V0, T_5 \ - VMSLG m13_0, r_1, m5_1, m5_1 \ - VMSLG m13_1, r_0, V0, T_6 \ - VMSLG m13_1, r_1, V0, T_7 \ - VMSLG m13_2, r5_1, V0, T_8 \ - VMSLG m13_2, r5_2, V0, T_9 \ - VMSLG m02_2, r_0, m4_2, m4_2 \ - VMSLG m13_2, r_0, m5_2, m5_2 \ - VAQ m4_0, T_0, m02_0 \ - VAQ m4_1, T_1, m02_1 \ - VAQ m5_0, T_5, m13_0 \ - VAQ m5_1, T_6, m13_1 \ - VAQ m02_0, T_3, m02_0 \ - VAQ m02_1, T_4, m02_1 \ - VAQ m13_0, T_8, m13_0 \ - VAQ m13_1, T_9, m13_1 \ - VAQ m4_2, T_2, m02_2 \ - VAQ m5_2, T_7, m13_2 \ - -// SQUARE uses three limbs of r and r_2*5 to output square of r -// uses T_1, T_5 and T_7 temporary registers -// input: r_0, r_1, r_2, r5_2 -// temp: TEMP0, TEMP1, TEMP2 -// output: p0, p1, p2 -#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \ - VMSLG r_0, r_0, p0, p0 \ - VMSLG r_1, r5_2, V0, TEMP0 \ - VMSLG r_2, r5_2, p1, p1 \ - VMSLG r_0, r_1, V0, TEMP1 \ - VMSLG r_1, r_1, p2, p2 \ - VMSLG r_0, r_2, V0, TEMP2 \ - VAQ TEMP0, p0, p0 \ - VAQ TEMP1, p1, p1 \ - VAQ TEMP2, p2, p2 \ - VAQ TEMP0, p0, p0 \ - VAQ TEMP1, p1, p1 \ - VAQ TEMP2, p2, p2 \ - -// carry h0->h1->h2->h0 || h3->h4->h5->h3 -// uses T_2, T_4, T_5, T_7, T_8, T_9 -// t6, t7, t8, t9, t10, t11 -// input: h0, h1, h2, h3, h4, h5 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11 -// output: h0, h1, h2, h3, h4, h5 -#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \ - VLM (R12), t6, t7 \ // 44 and 42 bit clear mask - VLEIB $7, $0x28, t10 \ // 5 byte shift mask - VREPIB $4, t8 \ // 4 bit shift mask - VREPIB $2, t11 \ // 2 bit shift mask - VSRLB t10, h0, t0 \ // h0 byte shift - VSRLB t10, h1, t1 \ // h1 byte shift - VSRLB t10, h2, t2 \ // h2 byte shift - VSRLB t10, h3, t3 \ // h3 byte shift - VSRLB t10, h4, t4 \ // h4 byte shift - VSRLB t10, h5, t5 \ // h5 byte shift - VSRL t8, t0, t0 \ // h0 bit shift - VSRL t8, t1, t1 \ // h2 bit shift - VSRL t11, t2, t2 \ // h2 bit shift - VSRL t8, t3, t3 \ // h3 bit shift - VSRL t8, t4, t4 \ // h4 bit shift - VESLG $2, t2, t9 \ // h2 carry x5 - VSRL t11, t5, t5 \ // h5 bit shift - VN t6, h0, h0 \ // h0 clear carry - VAQ t2, t9, t2 \ // h2 carry x5 - VESLG $2, t5, t9 \ // h5 carry x5 - VN t6, h1, h1 \ // h1 clear carry - VN t7, h2, h2 \ // h2 clear carry - VAQ t5, t9, t5 \ // h5 carry x5 - VN t6, h3, h3 \ // h3 clear carry - VN t6, h4, h4 \ // h4 clear carry - VN t7, h5, h5 \ // h5 clear carry - VAQ t0, h1, h1 \ // h0->h1 - VAQ t3, h4, h4 \ // h3->h4 - VAQ t1, h2, h2 \ // h1->h2 - VAQ t4, h5, h5 \ // h4->h5 - VAQ t2, h0, h0 \ // h2->h0 - VAQ t5, h3, h3 \ // h5->h3 - VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves - VREPG $1, t7, t7 \ - VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5] - VSLDB $8, h1, h1, h1 \ - VSLDB $8, h2, h2, h2 \ - VO h0, h3, h3 \ - VO h1, h4, h4 \ - VO h2, h5, h5 \ - VESRLG $44, h3, t0 \ // 44 bit shift right - VESRLG $44, h4, t1 \ - VESRLG $42, h5, t2 \ - VN t6, h3, h3 \ // clear carry bits - VN t6, h4, h4 \ - VN t7, h5, h5 \ - VESLG $2, t2, t9 \ // multiply carry by 5 - VAQ t9, t2, t2 \ - VAQ t0, h4, h4 \ - VAQ t1, h5, h5 \ - VAQ t2, h3, h3 \ - -// carry h0->h1->h2->h0 -// input: h0, h1, h2 -// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8 -// output: h0, h1, h2 -#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \ - VLEIB $7, $0x28, t3 \ // 5 byte shift mask - VREPIB $4, t4 \ // 4 bit shift mask - VREPIB $2, t7 \ // 2 bit shift mask - VGBM $0x003F, t5 \ // mask to clear carry bits - VSRLB t3, h0, t0 \ - VSRLB t3, h1, t1 \ - VSRLB t3, h2, t2 \ - VESRLG $4, t5, t5 \ // 44 bit clear mask - VSRL t4, t0, t0 \ - VSRL t4, t1, t1 \ - VSRL t7, t2, t2 \ - VESRLG $2, t5, t6 \ // 42 bit clear mask - VESLG $2, t2, t8 \ - VAQ t8, t2, t2 \ - VN t5, h0, h0 \ - VN t5, h1, h1 \ - VN t6, h2, h2 \ - VAQ t0, h1, h1 \ - VAQ t1, h2, h2 \ - VAQ t2, h0, h0 \ - VSRLB t3, h0, t0 \ - VSRLB t3, h1, t1 \ - VSRLB t3, h2, t2 \ - VSRL t4, t0, t0 \ - VSRL t4, t1, t1 \ - VSRL t7, t2, t2 \ - VN t5, h0, h0 \ - VN t5, h1, h1 \ - VESLG $2, t2, t8 \ - VN t6, h2, h2 \ - VAQ t0, h1, h1 \ - VAQ t8, t2, t2 \ - VAQ t1, h2, h2 \ - VAQ t2, h0, h0 \ - -// expands two message blocks into the lower halfs of the d registers -// moves the contents of the d registers into upper halfs -// input: in1, in2, d0, d1, d2, d3, d4, d5 -// temp: TEMP0, TEMP1, TEMP2, TEMP3 -// output: d0, d1, d2, d3, d4, d5 -#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \ - VGBM $0xff3f, TEMP0 \ - VGBM $0xff1f, TEMP1 \ - VESLG $4, d1, TEMP2 \ - VESLG $4, d4, TEMP3 \ - VESRLG $4, TEMP0, TEMP0 \ - VPERM in1, d0, EX0, d0 \ - VPERM in2, d3, EX0, d3 \ - VPERM in1, d2, EX2, d2 \ - VPERM in2, d5, EX2, d5 \ - VPERM in1, TEMP2, EX1, d1 \ - VPERM in2, TEMP3, EX1, d4 \ - VN TEMP0, d0, d0 \ - VN TEMP0, d3, d3 \ - VESRLG $4, d1, d1 \ - VESRLG $4, d4, d4 \ - VN TEMP1, d2, d2 \ - VN TEMP1, d5, d5 \ - VN TEMP0, d1, d1 \ - VN TEMP0, d4, d4 \ - -// expands one message block into the lower halfs of the d registers -// moves the contents of the d registers into upper halfs -// input: in, d0, d1, d2 -// temp: TEMP0, TEMP1, TEMP2 -// output: d0, d1, d2 -#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \ - VGBM $0xff3f, TEMP0 \ - VESLG $4, d1, TEMP2 \ - VGBM $0xff1f, TEMP1 \ - VPERM in, d0, EX0, d0 \ - VESRLG $4, TEMP0, TEMP0 \ - VPERM in, d2, EX2, d2 \ - VPERM in, TEMP2, EX1, d1 \ - VN TEMP0, d0, d0 \ - VN TEMP1, d2, d2 \ - VESRLG $4, d1, d1 \ - VN TEMP0, d1, d1 \ - -// pack h2:h0 into h1:h0 (no carry) -// input: h0, h1, h2 -// output: h0, h1, h2 -#define PACK(h0, h1, h2) \ - VMRLG h1, h2, h2 \ // copy h1 to upper half h2 - VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20 - VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1 - VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1 - VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1 - VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1) - VLEIG $0, $0, h2 \ // clear upper half of h2 - VESRLG $40, h2, h1 \ // h1 now has upper two bits of result - VLEIB $7, $88, h1 \ // for byte shift (11 bytes) - VSLB h1, h2, h2 \ // shift h2 11 bytes to the left - VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1 - VLEIG $0, $0, h1 \ // clear upper half of h1 - -// if h > 2**130-5 then h -= 2**130-5 -// input: h0, h1 -// temp: t0, t1, t2 -// output: h0 -#define MOD(h0, h1, t0, t1, t2) \ - VZERO t0 \ - VLEIG $1, $5, t0 \ - VACCQ h0, t0, t1 \ - VAQ h0, t0, t0 \ - VONE t2 \ - VLEIG $1, $-4, t2 \ - VAQ t2, t1, t1 \ - VACCQ h1, t1, t1 \ - VONE t2 \ - VAQ t2, t1, t1 \ - VN h0, t1, t2 \ - VNC t0, t1, t1 \ - VO t1, t2, h0 \ - -// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key) -TEXT ·poly1305vmsl(SB), $0-32 - // This code processes 6 + up to 4 blocks (32 bytes) per iteration - // using the algorithm described in: - // NEON crypto, Daniel J. Bernstein & Peter Schwabe - // https://cryptojedi.org/papers/neoncrypto-20120320.pdf - // And as moddified for VMSL as described in - // Accelerating Poly1305 Cryptographic Message Authentication on the z14 - // O'Farrell et al, CASCON 2017, p48-55 - // https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht - - LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key - VZERO V0 // c - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 // c - - // setup r - VL (R4), T_0 - MOVD $·keyMask<>(SB), R6 - VL (R6), T_1 - VN T_0, T_1, T_0 - VZERO T_2 // limbs for r - VZERO T_3 - VZERO T_4 - EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7) - - // T_2, T_3, T_4: [0, r] - - // setup r*20 - VLEIG $0, $0, T_0 - VLEIG $1, $20, T_0 // T_0: [0, 20] - VZERO T_5 - VZERO T_6 - VMSLG T_0, T_3, T_5, T_5 - VMSLG T_0, T_4, T_6, T_6 - - // store r for final block in GR - VLGVG $1, T_2, RSAVE_0 // c - VLGVG $1, T_3, RSAVE_1 // c - VLGVG $1, T_4, RSAVE_2 // c - VLGVG $1, T_5, R5SAVE_1 // c - VLGVG $1, T_6, R5SAVE_2 // c - - // initialize h - VZERO H0_0 - VZERO H1_0 - VZERO H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - // initialize pointer for reduce constants - MOVD $·reduce<>(SB), R12 - - // calculate r**2 and 20*(r**2) - VZERO R_0 - VZERO R_1 - VZERO R_2 - SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7) - REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1) - VZERO R5_1 - VZERO R5_2 - VMSLG T_0, R_1, R5_1, R5_1 - VMSLG T_0, R_2, R5_2, R5_2 - - // skip r**4 calculation if 3 blocks or less - CMPBLE R3, $48, b4 - - // calculate r**4 and 20*(r**4) - VZERO T_8 - VZERO T_9 - VZERO T_10 - SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7) - REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1) - VZERO T_2 - VZERO T_3 - VMSLG T_0, T_9, T_2, T_2 - VMSLG T_0, T_10, T_3, T_3 - - // put r**2 to the right and r**4 to the left of R_0, R_1, R_2 - VSLDB $8, T_8, T_8, T_8 - VSLDB $8, T_9, T_9, T_9 - VSLDB $8, T_10, T_10, T_10 - VSLDB $8, T_2, T_2, T_2 - VSLDB $8, T_3, T_3, T_3 - - VO T_8, R_0, R_0 - VO T_9, R_1, R_1 - VO T_10, R_2, R_2 - VO T_2, R5_1, R5_1 - VO T_3, R5_2, R5_2 - - CMPBLE R3, $80, load // less than or equal to 5 blocks in message - - // 6(or 5+1) blocks - SUB $81, R3 - VLM (R2), M0, M4 - VLL R3, 80(R2), M5 - ADD $1, R3 - MOVBZ $1, R0 - CMPBGE R3, $16, 2(PC) - VLVGB R3, R0, M5 - MOVD $96(R2), R2 - EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - VLEIB $2, $1, H2_0 - VLEIB $2, $1, H2_1 - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO T_4 - VZERO T_10 - EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3) - VLR T_4, M4 - VLEIB $10, $1, M2 - CMPBLT R3, $16, 2(PC) - VLEIB $10, $1, T_10 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - SUB $16, R3 - CMPBLE R3, $0, square - -load: - // load EX0, EX1 and EX2 - MOVD $·c<>(SB), R5 - VLM (R5), EX0, EX2 - -loop: - CMPBLE R3, $64, add // b4 // last 4 or less blocks left - - // next 4 full blocks - VLM (R2), M2, M5 - SUB $64, R3 - MOVD $64(R2), R2 - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9) - - // expacc in-lined to create [m2, m3] limbs - VGBM $0x3f3f, T_0 // 44 bit clear mask - VGBM $0x1f1f, T_1 // 40 bit clear mask - VPERM M2, M3, EX0, T_3 - VESRLG $4, T_0, T_0 // 44 bit clear mask ready - VPERM M2, M3, EX1, T_4 - VPERM M2, M3, EX2, T_5 - VN T_0, T_3, T_3 - VESRLG $4, T_4, T_4 - VN T_1, T_5, T_5 - VN T_0, T_4, T_4 - VMRHG H0_1, T_3, H0_0 - VMRHG H1_1, T_4, H1_0 - VMRHG H2_1, T_5, H2_0 - VMRLG H0_1, T_3, H0_1 - VMRLG H1_1, T_4, H1_1 - VMRLG H2_1, T_5, H2_1 - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - VPERM M4, M5, EX0, T_3 - VPERM M4, M5, EX1, T_4 - VPERM M4, M5, EX2, T_5 - VN T_0, T_3, T_3 - VESRLG $4, T_4, T_4 - VN T_1, T_5, T_5 - VN T_0, T_4, T_4 - VMRHG V0, T_3, M0 - VMRHG V0, T_4, M1 - VMRHG V0, T_5, M2 - VMRLG V0, T_3, M3 - VMRLG V0, T_4, M4 - VMRLG V0, T_5, M5 - VLEIB $10, $1, M2 - VLEIB $10, $1, M5 - - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - CMPBNE R3, $0, loop - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - // load EX0, EX1, EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // sum vectors - VAQ H0_0, H0_1, H0_0 - VAQ H1_0, H1_1, H1_0 - VAQ H2_0, H2_1, H2_0 - - // h may be >= 2*(2**130-5) so we need to reduce it again - // M0...M4 are used as temps here - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - -next: // carry h1->h2 - VLEIB $7, $0x28, T_1 - VREPIB $4, T_2 - VGBM $0x003F, T_3 - VESRLG $4, T_3 - - // byte shift - VSRLB T_1, H1_0, T_4 - - // bit shift - VSRL T_2, T_4, T_4 - - // clear h1 carry bits - VN T_3, H1_0, H1_0 - - // add carry - VAQ T_4, H2_0, H2_0 - - // h is now < 2*(2**130-5) - // pack h into h1 (hi) and h0 (lo) - PACK(H0_0, H1_0, H2_0) - - // if h > 2**130-5 then h -= 2**130-5 - MOD(H0_0, H1_0, T_0, T_1, T_2) - - // h += s - MOVD $·bswapMask<>(SB), R5 - VL (R5), T_1 - VL 16(R4), T_0 - VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big) - VAQ T_0, H0_0, H0_0 - VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little) - VST H0_0, (R1) - RET - -add: - // load EX0, EX1, EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - CMPBLE R3, $64, b4 - -b4: - CMPBLE R3, $48, b3 // 3 blocks or less - - // 4(3+1) blocks remaining - SUB $49, R3 - VLM (R2), M0, M2 - VLL R3, 48(R2), M3 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M3 - MOVD $64(R2), R2 - EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3) - VLEIB $10, $1, H2_0 - VLEIB $10, $1, H2_1 - VZERO M0 - VZERO M1 - VZERO M4 - VZERO M5 - VZERO T_4 - VZERO T_10 - EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3) - VLR T_4, M2 - VLEIB $10, $1, M4 - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_10 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - SUB $16, R3 - CMPBLE R3, $0, square // this condition must always hold true! - -b3: - CMPBLE R3, $32, b2 - - // 3 blocks remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5) - - SUB $33, R3 - VLM (R2), M0, M1 - VLL R3, 32(R2), M2 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M2 - - // H += m0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6) - VLEIB $10, $1, T_3 - VAG H0_0, T_1, H0_0 - VAG H1_0, T_2, H1_0 - VAG H2_0, T_3, H2_0 - - VZERO M0 - VZERO M3 - VZERO M4 - VZERO M5 - VZERO T_10 - - // (H+m0)*r - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9) - - // H += m1 - VZERO V0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6) - VLEIB $10, $1, T_3 - VAQ H0_0, T_1, H0_0 - VAQ H1_0, T_2, H1_0 - VAQ H2_0, T_3, H2_0 - REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) - - // [H, m2] * [r**2, r] - EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3) - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, H2_0 - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10) - SUB $16, R3 - CMPBLE R3, $0, next // this condition must always hold true! - -b2: - CMPBLE R3, $16, b1 - - // 2 blocks remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9) - VMRHG V0, H0_1, H0_0 - VMRHG V0, H1_1, H1_0 - VMRHG V0, H2_1, H2_0 - VMRLG V0, H0_1, H0_1 - VMRLG V0, H1_1, H1_1 - VMRLG V0, H2_1, H2_1 - - // move h to the left and 0s at the right - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - - // get message blocks and append 1 to start - SUB $17, R3 - VL (R2), M0 - VLL R3, 16(R2), M1 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M1 - VZERO T_6 - VZERO T_7 - VZERO T_8 - EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3) - EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3) - VLEIB $2, $1, T_8 - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_8 - - // add [m0, m1] to h - VAG H0_0, T_6, H0_0 - VAG H1_0, T_7, H1_0 - VAG H2_0, T_8, H2_0 - - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - VZERO T_10 - VZERO M0 - - // at this point R_0 .. R5_2 look like [r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10) - SUB $16, R3, R3 - CMPBLE R3, $0, next - -b1: - CMPBLE R3, $0, next - - // 1 block remaining - - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // H*[r**2, r] - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - - // set up [0, m0] limbs - SUB $1, R3 - VLL R3, (R2), M0 - ADD $1, R3 - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, M0 - VZERO T_1 - VZERO T_2 - VZERO T_3 - EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m] - CMPBNE R3, $16, 2(PC) - VLEIB $10, $1, T_3 - - // h+m0 - VAQ H0_0, T_1, H0_0 - VAQ H1_0, T_2, H1_0 - VAQ H2_0, T_3, H2_0 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - - BR next - -square: - // setup [r²,r] - VSLDB $8, R_0, R_0, R_0 - VSLDB $8, R_1, R_1, R_1 - VSLDB $8, R_2, R_2, R_2 - VSLDB $8, R5_1, R5_1, R5_1 - VSLDB $8, R5_2, R5_2, R5_2 - - VLVGG $1, RSAVE_0, R_0 - VLVGG $1, RSAVE_1, R_1 - VLVGG $1, RSAVE_2, R_2 - VLVGG $1, R5SAVE_1, R5_1 - VLVGG $1, R5SAVE_2, R5_2 - - // setup [h0, h1] - VSLDB $8, H0_0, H0_0, H0_0 - VSLDB $8, H1_0, H1_0, H1_0 - VSLDB $8, H2_0, H2_0, H2_0 - VO H0_1, H0_0, H0_0 - VO H1_1, H1_0, H1_0 - VO H2_1, H2_0, H2_0 - VZERO H0_1 - VZERO H1_1 - VZERO H2_1 - - VZERO M0 - VZERO M1 - VZERO M2 - VZERO M3 - VZERO M4 - VZERO M5 - - // (h0*r**2) + (h1*r) - MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9) - REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5) - BR next diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index 00ed9923e..916c840b6 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -17,12 +17,14 @@ import ( // These constants from [PROTOCOL.certkeys] represent the algorithm names // for certificate types supported by this package. const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" + CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" ) // Certificate types distinguish between host and user @@ -37,6 +39,7 @@ const ( type Signature struct { Format string Blob []byte + Rest []byte `ssh:"rest"` } // CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that @@ -411,8 +414,8 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { return nil } -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. +// SignCert signs the certificate with an authority, setting the Nonce, +// SignatureKey, and Signature fields. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { @@ -429,12 +432,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoED25519: CertAlgoED25519v01, + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, + KeyAlgoED25519: CertAlgoED25519v01, + KeyAlgoSKED25519: CertAlgoSKED25519v01, } // certToPrivAlgo returns the underlying algorithm for a certificate algorithm. @@ -518,6 +523,12 @@ func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { return } + switch out.Format { + case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: + out.Rest = in + return out, nil, ok + } + return out, in, ok } diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go index a65a923be..8bd6b3daf 100644 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -16,9 +16,8 @@ import ( "hash" "io" "io/ioutil" - "math/bits" - "golang.org/x/crypto/internal/chacha20" + "golang.org/x/crypto/chacha20" "golang.org/x/crypto/poly1305" ) @@ -120,7 +119,7 @@ var cipherModes = map[string]*cipherMode{ chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely // needed, it's possible to specify a custom Config to enable it. // You should expect that an active attacker can recover plaintext if // you do. @@ -642,8 +641,8 @@ const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" // the methods here also implement padding, which RFC4253 Section 6 // also requires of stream ciphers. type chacha20Poly1305Cipher struct { - lengthKey [8]uint32 - contentKey [8]uint32 + lengthKey [32]byte + contentKey [32]byte buf []byte } @@ -656,21 +655,21 @@ func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionA buf: make([]byte, 256), } - for i := range c.contentKey { - c.contentKey[i] = binary.LittleEndian.Uint32(key[i*4 : (i+1)*4]) - } - for i := range c.lengthKey { - c.lengthKey[i] = binary.LittleEndian.Uint32(key[(i+8)*4 : (i+9)*4]) - } + copy(c.contentKey[:], key[:32]) + copy(c.lengthKey[:], key[32:]) return c, nil } func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} - s := chacha20.New(c.contentKey, nonce) - var polyKey [32]byte + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return nil, err + } + var polyKey, discardBuf [32]byte s.XORKeyStream(polyKey[:], polyKey[:]) - s.Advance() // skip next 32 bytes + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes encryptedLength := c.buf[:4] if _, err := io.ReadFull(r, encryptedLength); err != nil { @@ -678,7 +677,11 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([ } var lenBytes [4]byte - chacha20.New(c.lengthKey, nonce).XORKeyStream(lenBytes[:], encryptedLength) + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return nil, err + } + ls.XORKeyStream(lenBytes[:], encryptedLength) length := binary.BigEndian.Uint32(lenBytes[:]) if length > maxPacket { @@ -724,11 +727,15 @@ func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([ } func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := [3]uint32{0, 0, bits.ReverseBytes32(seqNum)} - s := chacha20.New(c.contentKey, nonce) - var polyKey [32]byte + nonce := make([]byte, 12) + binary.BigEndian.PutUint32(nonce[8:], seqNum) + s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) + if err != nil { + return err + } + var polyKey, discardBuf [32]byte s.XORKeyStream(polyKey[:], polyKey[:]) - s.Advance() // skip next 32 bytes + s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes // There is no blocksize, so fall back to multiple of 8 byte // padding, as described in RFC 4253, Sec 6. @@ -748,7 +755,11 @@ func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, r } binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - chacha20.New(c.lengthKey, nonce).XORKeyStream(c.buf, c.buf[:4]) + ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) + if err != nil { + return err + } + ls.XORKeyStream(c.buf, c.buf[:4]) c.buf[4] = byte(padding) copy(c.buf[5:], payload) packetEnd := 5 + len(payload) + padding diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 0590070e2..f3265655e 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -36,7 +36,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { // during the authentication phase the client first attempts the "none" method // then any untried methods suggested by the server. - tried := make(map[string]bool) + var tried []string var lastMethods []string sessionID := c.transport.getSessionID() @@ -49,7 +49,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { // success return nil } else if ok == authFailure { - tried[auth.method()] = true + if m := auth.method(); !contains(tried, m) { + tried = append(tried, m) + } } if methods == nil { methods = lastMethods @@ -61,7 +63,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { findNext: for _, a := range config.Auth { candidateMethod := a.method() - if tried[candidateMethod] { + if contains(tried, candidateMethod) { continue } for _, meth := range methods { @@ -72,16 +74,16 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { } } } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) } -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) +func contains(list []string, e string) bool { + for _, s := range list { + if s == e { + return true + } } - return s + return false } // An AuthMethod represents an instance of an RFC 4252 authentication method. diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go new file mode 100644 index 000000000..af81d2665 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. +// +// See https://flak.tedunangst.com/post/bcrypt-pbkdf and +// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. +package bcrypt_pbkdf + +import ( + "crypto/sha512" + "errors" + "golang.org/x/crypto/blowfish" +) + +const blockSize = 32 + +// Key derives a key from the password, salt and rounds count, returning a +// []byte of length keyLen that can be used as cryptographic key. +func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { + if rounds < 1 { + return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") + } + if len(password) == 0 { + return nil, errors.New("bcrypt_pbkdf: empty password") + } + if len(salt) == 0 || len(salt) > 1<<20 { + return nil, errors.New("bcrypt_pbkdf: bad salt length") + } + if keyLen > 1024 { + return nil, errors.New("bcrypt_pbkdf: keyLen is too large") + } + + numBlocks := (keyLen + blockSize - 1) / blockSize + key := make([]byte, numBlocks*blockSize) + + h := sha512.New() + h.Write(password) + shapass := h.Sum(nil) + + shasalt := make([]byte, 0, sha512.Size) + cnt, tmp := make([]byte, 4), make([]byte, blockSize) + for block := 1; block <= numBlocks; block++ { + h.Reset() + h.Write(salt) + cnt[0] = byte(block >> 24) + cnt[1] = byte(block >> 16) + cnt[2] = byte(block >> 8) + cnt[3] = byte(block) + h.Write(cnt) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + + out := make([]byte, blockSize) + copy(out, tmp) + for i := 2; i <= rounds; i++ { + h.Reset() + h.Write(tmp) + bcryptHash(tmp, shapass, h.Sum(shasalt)) + for j := 0; j < len(out); j++ { + out[j] ^= tmp[j] + } + } + + for i, v := range out { + key[i*numBlocks+(block-1)] = v + } + } + return key[:keyLen], nil +} + +var magic = []byte("OxychromaticBlowfishSwatDynamite") + +func bcryptHash(out, shapass, shasalt []byte) { + c, err := blowfish.NewSaltedCipher(shapass, shasalt) + if err != nil { + panic(err) + } + for i := 0; i < 64; i++ { + blowfish.ExpandKey(shasalt, c) + blowfish.ExpandKey(shapass, c) + } + copy(out, magic) + for i := 0; i < 32; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(out[i:i+8], out[i:i+8]) + } + } + // Swap bytes due to different endianness. + for i := 0; i < 32; i += 4 { + out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] + } +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go index 16072004b..7eedb209f 100644 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -212,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha HostKey: hostKeyBytes, Signature: sig, Hash: crypto.SHA1, - }, nil + }, err } // ecdh performs Elliptic Curve Diffie-Hellman key exchange as @@ -572,7 +572,7 @@ func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, e return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil } -func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { +func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { // Send GexRequest kexDHGexRequest := kexDHGexRequestMsg{ MinBits: dhGroupExchangeMinimumBits, @@ -677,7 +677,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak // Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. // // This is a minimal implementation to satisfy the automated tests. -func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { +func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { // Receive GexRequest packet, err := c.readPacket() if err != nil { diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index 969804794..31f26349a 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -7,6 +7,8 @@ package ssh import ( "bytes" "crypto" + "crypto/aes" + "crypto/cipher" "crypto/dsa" "crypto/ecdsa" "crypto/elliptic" @@ -25,17 +27,20 @@ import ( "strings" "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) // These constants represent the algorithm names for key types supported by this // package. const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" + KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" ) // These constants represent non-default signature algorithms that are supported @@ -58,9 +63,13 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err return parseDSA(in) case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: return parseECDSA(in) + case KeyAlgoSKECDSA256: + return parseSKECDSA(in) case KeyAlgoED25519: return parseED25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + case KeyAlgoSKED25519: + return parseSKEd25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: cert, err := parseCert(in, certToPrivAlgo(algo)) if err != nil { return nil, nil, err @@ -553,9 +562,11 @@ func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { return nil, nil, err } - key := ed25519.PublicKey(w.KeyBytes) + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } - return (ed25519PublicKey)(key), w.Rest, nil + return ed25519PublicKey(w.KeyBytes), w.Rest, nil } func (k ed25519PublicKey) Marshal() []byte { @@ -573,9 +584,11 @@ func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { if sig.Format != k.Type() { return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) } + if l := len(k); l != ed25519.PublicKeySize { + return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } - edKey := (ed25519.PublicKey)(k) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { return errors.New("ssh: signature did not verify") } @@ -685,6 +698,224 @@ func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { return (*ecdsa.PublicKey)(k) } +// skFields holds the additional fields present in U2F/FIDO2 signatures. +// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. +type skFields struct { + // Flags contains U2F/FIDO2 flags such as 'user present' + Flags byte + // Counter is a monotonic signature counter which can be + // used to detect concurrent use of a private key, should + // it be extracted from hardware. + Counter uint32 +} + +type skECDSAPublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ecdsa.PublicKey +} + +func (k *skECDSAPublicKey) Type() string { + return KeyAlgoSKECDSA256 +} + +func (k *skECDSAPublicKey) nistID() string { + return "nistp256" +} + +func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(skECDSAPublicKey) + key.application = w.Application + + if w.Curve != "nistp256" { + return nil, nil, errors.New("ssh: unsupported curve") + } + key.Curve = elliptic.P256() + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + + return key, w.Rest, nil +} + +func (k *skECDSAPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) + w := struct { + Name string + ID string + Key []byte + Application string + }{ + k.Type(), + k.nistID(), + keyBytes, + k.application, + } + + return Marshal(&w) +} + +func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + + h := ecHash(k.Curve).New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var ecSig struct { + R *big.Int + S *big.Int + } + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + h.Reset() + h.Write(original) + digest := h.Sum(nil) + + if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +type skEd25519PublicKey struct { + // application is a URL-like string, typically "ssh:" for SSH. + // see openssh/PROTOCOL.u2f for details. + application string + ed25519.PublicKey +} + +func (k *skEd25519PublicKey) Type() string { + return KeyAlgoSKED25519 +} + +func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Application string + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if l := len(w.KeyBytes); l != ed25519.PublicKeySize { + return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + key := new(skEd25519PublicKey) + key.application = w.Application + key.PublicKey = ed25519.PublicKey(w.KeyBytes) + + return key, w.Rest, nil +} + +func (k *skEd25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + Application string + }{ + KeyAlgoSKED25519, + []byte(k.PublicKey), + k.application, + } + return Marshal(&w) +} + +func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + if l := len(k.PublicKey); l != ed25519.PublicKeySize { + return fmt.Errorf("invalid size %d for Ed25519 public key", l) + } + + h := sha256.New() + h.Write([]byte(k.application)) + appDigest := h.Sum(nil) + + h.Reset() + h.Write(data) + dataDigest := h.Sum(nil) + + var edSig struct { + Signature []byte `ssh:"rest"` + } + + if err := Unmarshal(sig.Blob, &edSig); err != nil { + return err + } + + var skf skFields + if err := Unmarshal(sig.Rest, &skf); err != nil { + return err + } + + blob := struct { + ApplicationDigest []byte `ssh:"rest"` + Flags byte + Counter uint32 + MessageDigest []byte `ssh:"rest"` + }{ + appDigest, + skf.Flags, + skf.Counter, + dataDigest, + } + + original := Marshal(blob) + + if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + // NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, // *ecdsa.PrivateKey or any other crypto.Signer and returns a // corresponding Signer instance. ECDSA keys must use P-256, P-384 or @@ -830,14 +1061,18 @@ func NewPublicKey(key interface{}) (PublicKey, error) { case *dsa.PublicKey: return (*dsaPublicKey)(key), nil case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil + if l := len(key); l != ed25519.PublicKeySize { + return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) + } + return ed25519PublicKey(key), nil default: return nil, fmt.Errorf("ssh: unsupported key type %T", key) } } // ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. +// the same keys as ParseRawPrivateKey. If the private key is encrypted, it +// will return a PassphraseMissingError. func ParsePrivateKey(pemBytes []byte) (Signer, error) { key, err := ParseRawPrivateKey(pemBytes) if err != nil { @@ -850,8 +1085,8 @@ func ParsePrivateKey(pemBytes []byte) (Signer, error) { // ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private // key and passphrase. It supports the same keys as // ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase) +func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { + key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) if err != nil { return nil, err } @@ -867,8 +1102,21 @@ func encryptedBlock(block *pem.Block) bool { return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") } +// A PassphraseMissingError indicates that parsing this private key requires a +// passphrase. Use ParsePrivateKeyWithPassphrase. +type PassphraseMissingError struct { + // PublicKey will be set if the private key format includes an unencrypted + // public key along with the encrypted private key. + PublicKey PublicKey +} + +func (*PassphraseMissingError) Error() string { + return "ssh: this private key is passphrase protected" +} + // ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. +// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the +// private key is encrypted, it will return a PassphraseMissingError. func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { block, _ := pem.Decode(pemBytes) if block == nil { @@ -876,7 +1124,7 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { } if encryptedBlock(block) { - return nil, errors.New("ssh: cannot decode encrypted private keys") + return nil, &PassphraseMissingError{} } switch block.Type { @@ -890,33 +1138,35 @@ func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { case "DSA PRIVATE KEY": return ParseDSAPrivateKey(block.Bytes) case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) + return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) default: return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) } } // ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If wrong passphrase, return -// x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, error) { +// passphrase from a PEM encoded private key. If the passphrase is wrong, it +// will return x509.IncorrectPasswordError. +func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { block, _ := pem.Decode(pemBytes) if block == nil { return nil, errors.New("ssh: no key found") } - buf := block.Bytes - if encryptedBlock(block) { - if x509.IsEncryptedPEMBlock(block) { - var err error - buf, err = x509.DecryptPEMBlock(block, passPhrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } + if block.Type == "OPENSSH PRIVATE KEY" { + return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) + } + + if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { + return nil, errors.New("ssh: not an encrypted key") + } + + buf, err := x509.DecryptPEMBlock(block, passphrase) + if err != nil { + if err == x509.IncorrectPasswordError { + return nil, err } + return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) } switch block.Type { @@ -926,8 +1176,6 @@ func ParseRawPrivateKeyWithPassphrase(pemBytes, passPhrase []byte) (interface{}, return x509.ParseECPrivateKey(buf) case "DSA PRIVATE KEY": return ParseDSAPrivateKey(buf) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(buf) default: return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) } @@ -965,9 +1213,68 @@ func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { }, nil } -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { +func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName != "none" || cipherName != "none" { + return nil, &PassphraseMissingError{} + } + if kdfOpts != "" { + return nil, errors.New("ssh: invalid openssh private key") + } + return privKeyBlock, nil +} + +func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { + return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { + if kdfName == "none" || cipherName == "none" { + return nil, errors.New("ssh: key is not password protected") + } + if kdfName != "bcrypt" { + return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") + } + + var opts struct { + Salt string + Rounds uint32 + } + if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { + return nil, err + } + + k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) + if err != nil { + return nil, err + } + key, iv := k[:32], k[32:] + + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + switch cipherName { + case "aes256-ctr": + ctr := cipher.NewCTR(c, iv) + ctr.XORKeyStream(privKeyBlock, privKeyBlock) + case "aes256-cbc": + if len(privKeyBlock)%c.BlockSize() != 0 { + return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") + } + cbc := cipher.NewCBCDecrypter(c, iv) + cbc.CryptBlocks(privKeyBlock, privKeyBlock) + default: + return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") + } + + return privKeyBlock, nil + } +} + +type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) + +// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt +// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used +// as the decrypt function to parse an unencrypted private key. See +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. +func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { const magic = "openssh-key-v1\x00" if len(key) < len(magic) || string(key[:len(magic)]) != magic { return nil, errors.New("ssh: invalid openssh private key format") @@ -986,9 +1293,22 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { if err := Unmarshal(remaining, &w); err != nil { return nil, err } + if w.NumKeys != 1 { + // We only support single key files, and so does OpenSSH. + // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 + return nil, errors.New("ssh: multi-key files are not supported") + } - if w.KdfName != "none" || w.CipherName != "none" { - return nil, errors.New("ssh: cannot decode encrypted private keys") + privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) + if err != nil { + if err, ok := err.(*PassphraseMissingError); ok { + pub, errPub := ParsePublicKey(w.PubKey) + if errPub != nil { + return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) + } + err.PublicKey = pub + } + return nil, err } pk1 := struct { @@ -998,15 +1318,13 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { Rest []byte `ssh:"rest"` }{} - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") + if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { + if w.CipherName != "none" { + return nil, x509.IncorrectPasswordError + } + return nil, errors.New("ssh: malformed OpenSSH key") } - // we only handle ed25519 and rsa keys currently switch pk1.Keytype { case KeyAlgoRSA: // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 @@ -1025,10 +1343,8 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { return nil, err } - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err } pk := &rsa.PrivateKey{ @@ -1063,20 +1379,78 @@ func parseOpenSSHPrivateKey(key []byte) (crypto.PrivateKey, error) { return nil, errors.New("ssh: private key unexpected length") } - for i, b := range key.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err } pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) copy(pk, key.Priv) return &pk, nil + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + key := struct { + Curve string + Pub []byte + D *big.Int + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(pk1.Rest, &key); err != nil { + return nil, err + } + + if err := checkOpenSSHKeyPadding(key.Pad); err != nil { + return nil, err + } + + var curve elliptic.Curve + switch key.Curve { + case "nistp256": + curve = elliptic.P256() + case "nistp384": + curve = elliptic.P384() + case "nistp521": + curve = elliptic.P521() + default: + return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) + } + + X, Y := elliptic.Unmarshal(curve, key.Pub) + if X == nil || Y == nil { + return nil, errors.New("ssh: failed to unmarshal public key") + } + + if key.D.Cmp(curve.Params().N) >= 0 { + return nil, errors.New("ssh: scalar is out of range") + } + + x, y := curve.ScalarBaseMult(key.D.Bytes()) + if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { + return nil, errors.New("ssh: public key does not match private key") + } + + return &ecdsa.PrivateKey{ + PublicKey: ecdsa.PublicKey{ + Curve: curve, + X: X, + Y: Y, + }, + D: key.D, + }, nil default: return nil, errors.New("ssh: unhandled key type") } } +func checkOpenSSHKeyPadding(pad []byte) error { + for i, b := range pad { + if int(b) != i+1 { + return errors.New("ssh: padding not as expected") + } + } + return nil +} + // FingerprintLegacyMD5 returns the user presentation of the key's // fingerprint as described by RFC 4716 section 4. func FingerprintLegacyMD5(pubKey PublicKey) string { diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go index f19016270..9654c0186 100644 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -240,7 +240,7 @@ func (m *mux) onePacket() error { id := binary.BigEndian.Uint32(packet[1:]) ch := m.chanList.getChan(id) if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) + return m.handleUnknownChannelPacket(id, packet) } return ch.handlePacket(packet) @@ -328,3 +328,24 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) } } + +func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + // RFC 4254 section 5.4 says unrecognized channel requests should + // receive a failure response. + case *channelRequestMsg: + if msg.WantReply { + return m.sendMessage(channelRequestFailureMsg{ + PeersID: msg.PeersID, + }) + } + return nil + default: + return fmt.Errorf("ssh: invalid channel %d", id) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 7a5a1d7ad..7d42a8c88 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -284,8 +284,8 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: return true } return false diff --git a/vendor/golang.org/x/lint/.travis.yml b/vendor/golang.org/x/lint/.travis.yml new file mode 100644 index 000000000..50553ebd0 --- /dev/null +++ b/vendor/golang.org/x/lint/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.11.x + - master + +go_import_path: golang.org/x/lint + +install: + - go get -t -v ./... + +script: + - go test -v -race ./... + +matrix: + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md b/vendor/golang.org/x/lint/CONTRIBUTING.md similarity index 54% rename from vendor/honnef.co/go/tools/simple/CONTRIBUTING.md rename to vendor/golang.org/x/lint/CONTRIBUTING.md index c54c6c50a..1fadda62d 100644 --- a/vendor/honnef.co/go/tools/simple/CONTRIBUTING.md +++ b/vendor/golang.org/x/lint/CONTRIBUTING.md @@ -1,15 +1,15 @@ -# Contributing to gosimple +# Contributing to Golint ## Before filing an issue: -### Are you having trouble building gosimple? +### Are you having trouble building golint? Check you have the latest version of its dependencies. Run ``` -go get -u honnef.co/go/tools/simple +go get -u golang.org/x/lint/golint ``` If you still have problems, consider searching for existing issues before filing a new issue. ## Before sending a pull request: -Have you understood the purpose of gosimple? Make sure to carefully read `README`. +Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/github.com/agl/ed25519/LICENSE b/vendor/golang.org/x/lint/LICENSE similarity index 96% rename from vendor/github.com/agl/ed25519/LICENSE rename to vendor/golang.org/x/lint/LICENSE index 744875676..65d761bc9 100644 --- a/vendor/github.com/agl/ed25519/LICENSE +++ b/vendor/golang.org/x/lint/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2013 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/golang.org/x/lint/README.md b/vendor/golang.org/x/lint/README.md new file mode 100644 index 000000000..4968b13ae --- /dev/null +++ b/vendor/golang.org/x/lint/README.md @@ -0,0 +1,88 @@ +Golint is a linter for Go source code. + +[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) + +## Installation + +Golint requires a +[supported release of Go](https://golang.org/doc/devel/release.html#policy). + + go get -u golang.org/x/lint/golint + +To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. + +## Usage + +Invoke `golint` with one or more filenames, directories, or packages named +by its import path. Golint uses the same +[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as +the `go` command and therefore +also supports relative import paths like `./...`. Additionally the `...` +wildcard can be used as suffix on relative and absolute file paths to recurse +into them. + +The output of this tool is a list of suggestions in Vim quickfix format, +which is accepted by lots of different editors. + +## Purpose + +Golint differs from gofmt. Gofmt reformats Go source code, whereas +golint prints out style mistakes. + +Golint differs from govet. Govet is concerned with correctness, whereas +golint is concerned with coding style. Golint is in use at Google, and it +seeks to match the accepted style of the open source Go project. + +The suggestions made by golint are exactly that: suggestions. +Golint is not perfect, and has both false positives and false negatives. +Do not treat its output as a gold standard. We will not be adding pragmas +or other knobs to suppress specific warnings, so do not expect or require +code to be completely "lint-free". +In short, this tool is not, and will never be, trustworthy enough for its +suggestions to be enforced automatically, for example as part of a build process. +Golint makes suggestions for many of the mechanically checkable items listed in +[Effective Go](https://golang.org/doc/effective_go.html) and the +[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). + +## Scope + +Golint is meant to carry out the stylistic conventions put forth in +[Effective Go](https://golang.org/doc/effective_go.html) and +[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). +Changes that are not aligned with those documents will not be considered. + +## Contributions + +Contributions to this project are welcome provided they are [in scope](#scope), +though please send mail before starting work on anything major. +Contributors retain their copyright, so we need you to fill out +[a short form](https://developers.google.com/open-source/cla/individual) +before we can accept your contribution. + +## Vim + +Add this to your ~/.vimrc: + + set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running `:Lint` will run golint on the current file and populate the quickfix list. + +Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` + + autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow + + +## Emacs + +Add this to your `.emacs` file: + + (add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/")) + (require 'golint) + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running M-x golint will run golint on the current file. + +For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod new file mode 100644 index 000000000..b32309c45 --- /dev/null +++ b/vendor/golang.org/x/lint/go.mod @@ -0,0 +1,5 @@ +module golang.org/x/lint + +go 1.11 + +require golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum new file mode 100644 index 000000000..2ad45cae2 --- /dev/null +++ b/vendor/golang.org/x/lint/go.sum @@ -0,0 +1,12 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/lint/golint/golint.go b/vendor/golang.org/x/lint/golint/golint.go new file mode 100644 index 000000000..ac024b6d2 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/golint.go @@ -0,0 +1,159 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// golint lints the Go source files named on its command line. +package main + +import ( + "flag" + "fmt" + "go/build" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/lint" +) + +var ( + minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it") + setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found") + suggestions int +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + flag.Usage = usage + flag.Parse() + + if flag.NArg() == 0 { + lintDir(".") + } else { + // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to + // directory, file or package targets. The distinction affects which + // checks are run. It is no valid to mix target types. + var dirsRun, filesRun, pkgsRun int + var args []string + for _, arg := range flag.Args() { + if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { + dirsRun = 1 + for _, dirname := range allPackagesInFS(arg) { + args = append(args, dirname) + } + } else if isDir(arg) { + dirsRun = 1 + args = append(args, arg) + } else if exists(arg) { + filesRun = 1 + args = append(args, arg) + } else { + pkgsRun = 1 + args = append(args, arg) + } + } + + if dirsRun+filesRun+pkgsRun != 1 { + usage() + os.Exit(2) + } + switch { + case dirsRun == 1: + for _, dir := range args { + lintDir(dir) + } + case filesRun == 1: + lintFiles(args...) + case pkgsRun == 1: + for _, pkg := range importPaths(args) { + lintPackage(pkg) + } + } + } + + if *setExitStatus && suggestions > 0 { + fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions) + os.Exit(1) + } +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func lintFiles(filenames ...string) { + files := make(map[string][]byte) + for _, filename := range filenames { + src, err := ioutil.ReadFile(filename) + if err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + files[filename] = src + } + + l := new(lint.Linter) + ps, err := l.LintFiles(files) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + for _, p := range ps { + if p.Confidence >= *minConfidence { + fmt.Printf("%v: %s\n", p.Position, p.Text) + suggestions++ + } + } +} + +func lintDir(dirname string) { + pkg, err := build.ImportDir(dirname, 0) + lintImportedPackage(pkg, err) +} + +func lintPackage(pkgname string) { + pkg, err := build.Import(pkgname, ".", 0) + lintImportedPackage(pkg, err) +} + +func lintImportedPackage(pkg *build.Package, err error) { + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + // Don't complain if the failure is due to no Go source files. + return + } + fmt.Fprintln(os.Stderr, err) + return + } + + var files []string + files = append(files, pkg.GoFiles...) + files = append(files, pkg.CgoFiles...) + files = append(files, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range files { + files[i] = filepath.Join(pkg.Dir, f) + } + } + // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles) + + lintFiles(files...) +} diff --git a/vendor/golang.org/x/lint/golint/import.go b/vendor/golang.org/x/lint/golint/import.go new file mode 100644 index 000000000..2ba9dea77 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/import.go @@ -0,0 +1,309 @@ +package main + +/* + +This file holds a direct copy of the import path matching code of +https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be +replaced when https://golang.org/issue/8768 is resolved. + +It has been updated to follow upstream changes in a few ways. + +*/ + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var ( + buildContext = build.Default + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") +) + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if a == "all" || a == "std" { + out = append(out, allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func importPaths(args []string) []string { + args = importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, allPackagesInFS(a)...) + } else { + out = append(out, allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return func(name string) bool { + return reg.MatchString(name) + } +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages) +// or a path including "...". +func allPackages(pattern string) []string { + pkgs := matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if pattern != "all" && pattern != "std" { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !buildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + // Commands + cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) + filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == cmd { + return nil + } + name := path[len(cmd):] + if !treeCanMatch(name) { + return filepath.SkipDir + } + // Commands are all in cmd/, not in subdirectories. + if strings.Contains(name, string(filepath.Separator)) { + return filepath.SkipDir + } + + // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. + name = "cmd/" + name + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + + for _, src := range buildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func allPackagesInFS(pattern string) []string { + pkgs := matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + if _, err = build.ImportDir(path, 0); err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} diff --git a/vendor/golang.org/x/lint/golint/importcomment.go b/vendor/golang.org/x/lint/golint/importcomment.go new file mode 100644 index 000000000..d5b32f734 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/importcomment.go @@ -0,0 +1,13 @@ +// Copyright (c) 2018 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// +build go1.12 + +// Require use of the correct import path only for Go 1.12+ users, so +// any breakages coincide with people updating their CI configs or +// whatnot. + +package main // import "golang.org/x/lint/golint" diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go new file mode 100644 index 000000000..7d813e061 --- /dev/null +++ b/vendor/golang.org/x/lint/lint.go @@ -0,0 +1,1615 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lint contains a linter for Go source code. +package lint // import "golang.org/x/lint" + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/gcexportdata" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// A Linter lints Go source code. +type Linter struct { +} + +// Problem represents a problem in some source code. +type Problem struct { + Position token.Position // position in source file + Text string // the prose that describes the problem + Link string // (optional) the link to the style guide for the problem + Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness + LineText string // the source line + Category string // a short name for the general category of the problem + + // If the problem has a suggested fix (the minority case), + // ReplacementLine is a full replacement for the relevant line of the source file. + ReplacementLine string +} + +func (p *Problem) String() string { + if p.Link != "" { + return p.Text + "\n\n" + p.Link + } + return p.Text +} + +type byPosition []Problem + +func (p byPosition) Len() int { return len(p) } +func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p byPosition) Less(i, j int) bool { + pi, pj := p[i].Position, p[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return p[i].Text < p[j].Text +} + +// Lint lints src. +func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { + return l.LintFiles(map[string][]byte{filename: src}) +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { + pkg := &pkg{ + fset: token.NewFileSet(), + files: make(map[string]*file), + } + var pkgName string + for filename, src := range files { + if isGenerated(src) { + continue // See issue #239 + } + f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} + +// pkg represents a package being linted. +type pkg struct { + fset *token.FileSet + files map[string]*file + + typesPkg *types.Package + typesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + sortable map[string]bool + // main is whether this is a "main" package. + main bool + + problems []Problem +} + +func (p *pkg) lint() []Problem { + if err := p.typeCheck(); err != nil { + /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. + if e, ok := err.(types.Error); ok { + pos := p.fset.Position(e.Pos) + conf := 1.0 + if strings.Contains(e.Msg, "can't find import: ") { + // Golint is probably being run in a context that doesn't support + // typechecking (e.g. package files aren't found), so don't warn about it. + conf = 0 + } + if conf > 0 { + p.errorfAt(pos, conf, category("typechecking"), e.Msg) + } + + // TODO(dsymonds): Abort if !e.Soft? + } + */ + } + + p.scanSortable() + p.main = p.isMain() + + for _, f := range p.files { + f.lint() + } + + sort.Sort(byPosition(p.problems)) + + return p.problems +} + +// file represents a file being linted. +type file struct { + pkg *pkg + f *ast.File + fset *token.FileSet + src []byte + filename string +} + +func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } + +func (f *file) lint() { + f.lintPackageComment() + f.lintImports() + f.lintBlankImports() + f.lintExported() + f.lintNames() + f.lintElses() + f.lintRanges() + f.lintErrorf() + f.lintErrors() + f.lintErrorStrings() + f.lintReceiverNames() + f.lintIncDec() + f.lintErrorReturn() + f.lintUnexportedReturn() + f.lintTimeNames() + f.lintContextKeyTypes() + f.lintContextArgs() +} + +type link string +type category string + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +// It returns the new Problem. +func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { + pos := f.fset.Position(n.Pos()) + if pos.Filename == "" { + pos.Filename = f.filename + } + return f.pkg.errorfAt(pos, confidence, args...) +} + +func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { + problem := Problem{ + Position: pos, + Confidence: confidence, + } + if pos.Filename != "" { + // The file might not exist in our mapping if a //line directive was encountered. + if f, ok := p.files[pos.Filename]; ok { + problem.LineText = srcLine(f.src, pos) + } + } + +argLoop: + for len(args) > 1 { // always leave at least the format string in args + switch v := args[0].(type) { + case link: + problem.Link = string(v) + case category: + problem.Category = string(v) + default: + break argLoop + } + args = args[1:] + } + + problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) + + p.problems = append(p.problems, problem) + return &p.problems[len(p.problems)-1] +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +func (p *pkg) typeCheck() error { + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *file + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.f) + } + pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.typesPkg = pkg + p.typesInfo = info + return err +} + +func (p *pkg) typeOf(expr ast.Expr) types.Type { + if p.typesInfo == nil { + return nil + } + return p.typesInfo.TypeOf(expr) +} + +func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} + +// scopeOf returns the tightest scope encompassing id. +func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { + var scope *types.Scope + if obj := p.typesInfo.ObjectOf(id); obj != nil { + scope = obj.Parent() + } + if scope == p.typesPkg.Scope() { + // We were given a top-level identifier. + // Use the file-level scope instead of the package-level scope. + pos := id.Pos() + for _, f := range p.files { + if f.f.Pos() <= pos && pos < f.f.End() { + scope = p.typesInfo.Scopes[f.f] + break + } + } + } + return scope +} + +func (p *pkg) scanSortable() { + p.sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := nmap[fn.Name.Name]; ok { + has[recv] |= i + } + return false + }) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.sortable[typ] = true + } + } +} + +func (p *pkg) isMain() bool { + for _, f := range p.files { + if f.isMain() { + return true + } + } + return false +} + +func (f *file) isMain() bool { + if f.f.Name.Name == "main" { + return true + } + return false +} + +// lintPackageComment checks package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +func (f *file) lintPackageComment() { + if f.isTest() { + return + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + f.f.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range f.f.Comments { + if cg.Pos() > f.f.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := f.fset.Position(lastCG.End()) + pkgPos := f.fset.Position(f.f.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") + return + } + } + + if f.f.Doc == nil { + f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") + return + } + s := f.f.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") + s = ts + } + // Only non-main packages need to keep to this form. + if !f.pkg.main && !strings.HasPrefix(s, prefix) { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) + } +} + +// lintBlankImports complains if a non-main package has blank imports that are +// not documented. +func (f *file) lintBlankImports() { + // In package main and in tests, we don't complain about blank imports. + if f.pkg.main || f.isTest() { + return + } + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range f.f.Imports { + pos := f.fset.Position(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + if i > 0 { + prev := f.f.Imports[i-1] + prevPos := f.fset.Position(prev.Pos()) + if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { + continue // A subsequent blank in a group. + } + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + ref := "" + f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") + } + } +} + +// lintImports examines import blocks. +func (f *file) lintImports() { + for i, is := range f.f.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !f.isTest() { + f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") + } + + } +} + +const docCommentsLink = styleGuideBase + "#doc-comments" + +// lintExported examines the exported names. +// It complains if any required doc comments are missing, +// or if they are not of the right form. The exact rules are in +// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function +// also tracks the GenDecl structure being traversed to permit +// doc comments for constants to be on top of the const block. +// It also complains if the names stutter when combined with +// the package name. +func (f *file) lintExported() { + if f.isTest() { + return + } + + var lastGen *ast.GenDecl // last GenDecl entered. + + // Set of GenDecls that have already had missing comments flagged. + genDeclMissingComments := make(map[*ast.GenDecl]bool) + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return false + } + // token.CONST, token.TYPE or token.VAR + lastGen = v + return true + case *ast.FuncDecl: + f.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + f.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return false + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = lastGen.Doc + } + f.lintTypeDoc(v, doc) + f.checkStutter(v.Name, "type") + // Don't proceed inside types. + return false + case *ast.ValueSpec: + f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) + return false + } + return true + }) +} + +var ( + allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + anyCapsRE = regexp.MustCompile(`[A-Z]`) +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func isInTopLevel(f *ast.File, ident *ast.Ident) bool { + path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) + for _, f := range path { + switch f.(type) { + case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: + continue + } + return false + } + return true +} + +// lintNames examines all names in the file. +// It complains if any use underscores or incorrect known initialisms. +func (f *file) lintNames() { + // Package names need slightly different handling than other names. + if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") + } + if anyCapsRE.MatchString(f.f.Name.Name) { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) + } + + check := func(id *ast.Ident, thing string) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + capCount := 0 + for _, c := range id.Name { + if 'A' <= c && c <= 'Z' { + capCount++ + } + } + if capCount >= 2 { + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") + return + } + } + if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) + } + } + + should := lintName(id.Name) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing) + } + } + } + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var") + } + } + case *ast.FuncDecl: + if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing) + } + + checkList(v.Type.Params, thing+" parameter") + checkList(v.Type.Results, thing+" result") + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter") + checkList(ft.Results, "interface method result") + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var") + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var") + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field") + } + } + } + return true + }) +} + +// lintName returns a different name if it should be different. +func lintName(name string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); commonInitialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} + +// lintTypeDoc examines the doc comment on a type. +// It complains if they are missing from an exported type, +// or if they are not of the standard form. +func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) + return + } + + s := doc.Text() + articles := [...]string{"A", "An", "The"} + for _, a := range articles { + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) + } +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, + "Unwrap": true, +} + +// lintFuncDoc examines doc comments on functions and methods. +// It complains if they are missing, or not of the right form. +// It has specific exclusions for well-known methods (see commonMethods above). +func (f *file) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if f.pkg.sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) + return + } + s := fn.Doc.Text() + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +// lintValueSpecDoc examines package-global variables and constants. +// It complains if they are not individually declared, +// or if they are not suitably documented in the right form (unless they are in a block that is commented). +func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + if !strings.HasPrefix(doc.Text(), prefix) { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +func (f *file) checkStutter(id *ast.Ident, thing string) { + pkg, name := f.f.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) + } +} + +// zeroLiteral is a set of ast.BasicLit values that are zero values. +// It is not exhaustive. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// lintElses examines else blocks. It complains about any else block whose if block ends in a return. +func (f *file) lintElses() { + // We don't want to flag if { } else if { } else { } constructions. + // They will appear as an IfStmt whose Else field is also an IfStmt. + // Record such a node so we ignore it when we visit it. + ignore := make(map[*ast.IfStmt]bool) + + f.walk(func(node ast.Node) bool { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return true + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + ignore[elseif] = true + return true + } + if ignore[ifStmt] { + return true + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return true + } + if len(ifStmt.Body.List) == 0 { + return true + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) + } + return true + }) +} + +// lintRanges examines range clauses. It complains about redundant constructions. +func (f *file) lintRanges() { + f.walk(func(node ast.Node) bool { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return true + } + + if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { + p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") + + newRS := *rs // shallow copy + newRS.Value = nil + newRS.Key = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + + return true + } + + if isIdent(rs.Value, "_") { + p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) + + newRS := *rs // shallow copy + newRS.Value = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + } + + return true + }) +} + +// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. +func (f *file) lintErrorf() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return true + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := f.pkg.typeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return true + } + if !f.imports("errors") { + return true + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return true + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = f.render(se.X) + } + p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) + + m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + return true + }) +} + +// lintErrors examines global error vars. It complains if they aren't named in the standard way. +func (f *file) lintErrors() { + for _, decl := range f.f.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) + } + } + } +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} + +// lintErrorStrings examines error strings. +// It complains if they are capitalized or end in punctuation or a newline. +func (f *file) lintErrorStrings() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return true + } + if len(ce.Args) < 1 { + return true + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return true + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return true + } + clean, conf := lintErrorString(s) + if clean { + return true + } + + f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), + "error strings should not be capitalized or end with punctuation or a newline") + return true + }) +} + +// lintReceiverNames examines receiver names. It complains about inconsistent +// names used for the same type and names such as "this". +func (f *file) lintReceiverNames() { + typeReceiver := map[string]string{} + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return true + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) + return true + } + if name == "this" || name == "self" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + return true + } + recv := receiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) + return true + } + typeReceiver[recv] = name + return true + }) +} + +// lintIncDec examines statements that increment or decrement a variable. +// It complains if they don't use x++ or x--. +func (f *file) lintIncDec() { + f.walk(func(n ast.Node) bool { + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + if len(as.Lhs) != 1 { + return true + } + if !isOne(as.Rhs[0]) { + return true + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return true + } + f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) + return true + }) +} + +// lintErrorReturn examines function declarations that return an error. +// It complains if the error isn't the last parameter. +func (f *file) lintErrorReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return true + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return true + } + if isIdent(ret[len(ret)-1].Type, "error") { + return true + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") + break // only flag one + } + } + return true + }) +} + +// lintUnexportedReturn examines exported function declarations. +// It complains if any return an unexported type. +func (f *file) lintUnexportedReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + if fn.Type.Results == nil { + return false + } + if !fn.Name.IsExported() { + return false + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return false + } + } + for _, ret := range fn.Type.Results.List { + typ := f.pkg.typeOf(ret.Type) + if exportedType(typ) { + continue + } + f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), + "exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ) + break // only flag one + } + return false + }) +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + // Builtin types have no package. + return T.Obj().Pkg() == nil || T.Obj().Exported() + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func (f *file) lintTimeNames() { + f.walk(func(node ast.Node) bool { + v, ok := node.(*ast.ValueSpec) + if !ok { + return true + } + for _, name := range v.Names { + origTyp := f.pkg.typeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !f.pkg.isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) + } + return true + }) +} + +// lintContextKeyTypes checks for call expressions to context.WithValue with +// basic types used for the key argument. +// See: https://golang.org/issue/17293 +func (f *file) lintContextKeyTypes() { + f.walk(func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + f.checkContextKeyType(node) + } + + return true + }) +} + +// checkContextKeyType reports an error if the call expression calls +// context.WithValue with a key argument of basic type. +func (f *file) checkContextKeyType(x *ast.CallExpr) { + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.pkg.typesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) + } +} + +// lintContextArgs examines function declarations that contain an +// argument with a type of context.Context +// It complains if that argument isn't the first parameter. +func (f *file) lintContextArgs() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return true + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + for _, arg := range fn.Type.Params.List[1:] { + if isPkgDot(arg.Type, "context", "Context") { + f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") + break // only flag one + } + } + return true + }) +} + +// containsComments returns whether the interval [start, end) contains any +// comments without "// MATCH " prefix. +func (f *file) containsComments(start, end token.Pos) bool { + for _, cgroup := range f.f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (f *file) walk(fn func(ast.Node) bool) { + ast.Walk(walker(fn), f.f) +} + +func (f *file) render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func (f *file) debugRender(x interface{}) string { + var buf bytes.Buffer + if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { + panic(err) + } + return buf.String() +} + +// walker adapts a function to satisfy the ast.Visitor interface. +// The function return whether the walk should proceed into the node's children. +type walker func(ast.Node) bool + +func (w walker) Visit(node ast.Node) ast.Visitor { + if w(node) { + return w + } + return nil +} + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// isUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.render(expr) + tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +// firstLineOf renders the given node and returns its first line. +// It will also match the indentation of another node. +func (f *file) firstLineOf(node, match ast.Node) string { + line := f.render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return f.indentOf(match) + line +} + +func (f *file) indentOf(node ast.Node) string { + line := srcLine(f.src, f.fset.Position(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} + +func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { + line := srcLine(f.src, f.fset.Position(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} + +// imports returns true if the current file imports the specified package path. +func (f *file) imports(importPath string) bool { + all := astutil.Imports(f.fset, f.f) + for _, p := range all { + for _, i := range p { + uq, err := strconv.Unquote(i.Path.Value) + if err == nil && importPath == uq { + return true + } + } + } + return false +} + +// srcLine returns the complete line at p, including the terminating newline. +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go index 21f123957..6cd37280a 100644 --- a/vendor/golang.org/x/mod/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -123,8 +123,12 @@ type Version struct { Version string `json:",omitempty"` } -// String returns the module version syntax Path@Version. +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). func (m Version) String() string { + if m.Version == "" { + return m.Path + } return m.Path + "@" + m.Version } diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index f4d9b5ece..3a67636fe 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis // dialCall is an in-flight Transport dial call to a host. type dialCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done res *ClientConn // valid after done is closed @@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) } type addConnCall struct { + _ incomparable p *clientConnPool done chan struct{} // closed when done err error @@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { close(c.done) } -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - // p.mu must be held func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { for _, v := range p.conns[key] { diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index cea601fcd..b51f0e0cf 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -8,6 +8,8 @@ package http2 // flow is the flow control window's size. type flow struct { + _ incomparable + // n is the number of DATA bytes we're allowed to send. // A flow is kept both on a conn and a per-stream. n int32 diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index b412a96c5..a1ab2f056 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { return nil } +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() + type node struct { + _ incomparable + // children is non-nil for internal nodes children *[256]*node diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index bdaba1d46..5571ccfd2 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,7 +19,6 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" "crypto/tls" - "errors" "fmt" "io" "net/http" @@ -173,11 +172,6 @@ func (s SettingID) String() string { return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) } -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - // validWireHeaderFieldName reports whether v is a valid header field // name (key). See httpguts.ValidHeaderName for the base rules. // @@ -247,6 +241,7 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { + _ incomparable w io.Writer // immutable bw *bufio.Writer // non-nil when data is buffered } @@ -319,6 +314,7 @@ func bodyAllowedForStatus(status int) bool { } type httpError struct { + _ incomparable msg string timeout bool } @@ -382,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) { func validPseudoPath(v string) bool { return (len(v) > 0 && v[0] == '/') || v == "*" } + +// incomparable is a zero-width, non-comparable type. Adding it to a struct +// makes that struct also non-comparable, and generally doesn't add +// any size (as long as it's first). +type incomparable [0]func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index de31d72b2..345b7cd85 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -581,13 +581,10 @@ type stream struct { cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen @@ -764,6 +761,7 @@ func (sc *serverConn) readFrames() { // frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. type frameWriteResult struct { + _ incomparable wr FrameWriteRequest // what was written (or attempted) err error // result of the writeFrame call } @@ -774,7 +772,7 @@ type frameWriteResult struct { // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { err := wr.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wr, err} + sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err} } func (sc *serverConn) closeAllStreamsOnConnClose() { @@ -1164,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { if wr.write.staysWithinBuffer(sc.bw.Available()) { sc.writingFrameAsync = false err := wr.write.writeFrame(sc) - sc.wroteFrame(frameWriteResult{wr, err}) + sc.wroteFrame(frameWriteResult{wr: wr, err: err}) } else { sc.writingFrameAsync = true go sc.writeFrameAsync(wr) @@ -2060,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r var trailer http.Header for _, v := range rp.header["Trailer"] { for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + key = http.CanonicalHeaderKey(textproto.TrimString(key)) switch key { case "Transfer-Encoding", "Trailer", "Content-Length": // Bogus. (copy of http1 rules) @@ -2278,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { // requestBody is the Handler's Request.Body type. // Read and Close may be called concurrently. type requestBody struct { + _ incomparable stream *stream conn *serverConn closed bool // for use by Close only diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index d948e96ee..76a92e0ca 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -108,6 +108,19 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // ReadIdleTimeout is the timeout after which a health check using ping + // frame will be carried out if no frame is received on the connection. + // Note that a ping response will is considered a received frame, so if + // there is no other traffic on the connection, the health check will + // be performed every ReadIdleTimeout interval. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to Ping is not received. + // Defaults to 15s. + PingTimeout time.Duration + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } +func (t *Transport) pingTimeout() time.Duration { + if t.PingTimeout == 0 { + return 15 * time.Second + } + return t.PingTimeout + +} + // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { @@ -675,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return cc, nil } +func (cc *ClientConn) healthCheck() { + pingTimeout := cc.t.pingTimeout() + // We don't need to periodically ping in the health check, because the readLoop of ClientConn will + // trigger the healthCheck again if there is no frame received. + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + defer cancel() + err := cc.Ping(ctx) + if err != nil { + cc.closeForLostPing() + cc.t.connPool().MarkDead(cc) + return + } +} + func (cc *ClientConn) setGoAway(f *GoAwayFrame) { cc.mu.Lock() defer cc.mu.Unlock() @@ -846,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error { return nil } -// Close closes the client connection immediately. -// -// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. -func (cc *ClientConn) Close() error { +// closes the client connection immediately. In-flight requests are interrupted. +// err is sent to streams. +func (cc *ClientConn) closeForError(err error) error { cc.mu.Lock() defer cc.cond.Broadcast() defer cc.mu.Unlock() - err := errors.New("http2: client connection force closed via ClientConn.Close") for id, cs := range cc.streams { select { case cs.resc <- resAndError{err: err}: @@ -866,6 +899,20 @@ func (cc *ClientConn) Close() error { return cc.tconn.Close() } +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + err := errors.New("http2: client connection force closed via ClientConn.Close") + return cc.closeForError(err) +} + +// closes the client connection immediately. In-flight requests are interrupted. +func (cc *ClientConn) closeForLostPing() error { + err := errors.New("http2: client connection lost") + return cc.closeForError(err) +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -916,7 +963,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) { k = http.CanonicalHeaderKey(k) switch k { case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} + return "", fmt.Errorf("invalid Trailer key %q", k) } keys = append(keys, k) } @@ -1394,13 +1441,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - // requires cc.mu be held. func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { cc.hbuf.Reset() @@ -1616,6 +1656,7 @@ func (cc *ClientConn) writeHeader(name, value string) { } type resAndError struct { + _ incomparable res *http.Response err error } @@ -1663,6 +1704,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { + _ incomparable cc *ClientConn closeWhenIdle bool } @@ -1742,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error { rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse gotReply := false // ever saw a HEADERS reply gotSettings := false + readIdleTimeout := cc.t.ReadIdleTimeout + var t *time.Timer + if readIdleTimeout != 0 { + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) + defer t.Stop() + } for { f, err := cc.fr.ReadFrame() + if t != nil { + t.Reset(readIdleTimeout) + } if err != nil { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } @@ -1892,7 +1943,9 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } - header := make(http.Header) + regularFields := f.RegularFields() + strs := make([]string, len(regularFields)) + header := make(http.Header, len(regularFields)) res := &http.Response{ Proto: "HTTP/2.0", ProtoMajor: 2, @@ -1900,7 +1953,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra StatusCode: statusCode, Status: status + " " + http.StatusText(statusCode), } - for _, hf := range f.RegularFields() { + for _, hf := range regularFields { key := http.CanonicalHeaderKey(hf.Name) if key == "Trailer" { t := res.Trailer @@ -1912,7 +1965,18 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra t[http.CanonicalHeaderKey(v)] = nil }) } else { - header[key] = append(header[key], hf.Value) + vv := header[key] + if vv == nil && len(strs) > 0 { + // More than likely this will be a single-element key. + // Most headers aren't multi-valued. + // Set the capacity on strs[0] to 1, so any future append + // won't extend the slice into the other strings. + vv, strs = strs[:1:1], strs[1:] + vv[0] = hf.Value + header[key] = vv + } else { + header[key] = append(vv, hf.Value) + } } } @@ -2198,8 +2262,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { return nil } -var errInvalidTrailers = errors.New("http2: invalid trailers") - func (rl *clientConnReadLoop) endStream(cs *clientStream) { // TODO: check that any declared content-length matches, like // server.go's (*stream).endStream method. @@ -2430,7 +2492,6 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -2469,6 +2530,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { // gzipReader wraps a response body so it can lazily // call gzip.NewReader on the first call to Read type gzipReader struct { + _ incomparable body io.ReadCloser // underlying Response.Body zr *gzip.Reader // lazily-initialized gzip reader zerr error // sticky error diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 0f443e693..8cfd6063e 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -16,15 +16,16 @@ Or you can manually git clone the repository to See godoc for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) +* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) ## Policy for new packages -We no longer accept new provider-specific packages in this repo. For -defining provider endpoints and provider-specific OAuth2 behavior, we -encourage you to create packages elsewhere. We'll keep the existing -packages for compatibility. +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index aa0d34f1e..90657915f 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -6,7 +6,7 @@ package oauth2 import ( "errors" - "io" + "log" "net/http" "sync" ) @@ -25,9 +25,6 @@ type Transport struct { // Base is the base RoundTripper used to make HTTP requests. // If nil, http.DefaultTransport is used. Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified } // RoundTrip authorizes and authenticates the request with an @@ -52,35 +49,22 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { req2 := cloneRequest(req) // per RoundTripper contract token.SetAuthHeader(req2) - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - // req.Body is assumed to have been closed by the base RoundTripper. + // req.Body is assumed to be closed by the base RoundTripper. reqBodyClosed = true - - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil + return t.base().RoundTrip(req2) } -// CancelRequest cancels an in-flight request by closing its connection. +var cancelOnce sync.Once + +// CancelRequest does nothing. It used to be a legacy cancellation mechanism +// but now only it only logs on first use to warn that it's deprecated. +// +// Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } + cancelOnce.Do(func() { + log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") + }) } func (t *Transport) base() http.RoundTripper { @@ -90,19 +74,6 @@ func (t *Transport) base() http.RoundTripper { return http.DefaultTransport } -func (t *Transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { @@ -116,29 +87,3 @@ func cloneRequest(r *http.Request) *http.Request { } return r2 } - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index b4e6ecb2d..e44deb757 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -114,6 +114,15 @@ var ARM struct { _ CacheLinePad } +// MIPS64X contains the supported CPU features of the current mips64/mips64le +// platforms. If the current platform is not mips64/mips64le or the current +// operating system is not Linux then all feature flags are false. +var MIPS64X struct { + _ CacheLinePad + HasMSA bool // MIPS SIMD architecture + _ CacheLinePad +} + // PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. // If the current platform is not ppc64/ppc64le then all feature flags are false. // diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix.go similarity index 96% rename from vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go rename to vendor/golang.org/x/sys/cpu/cpu_aix.go index be6027224..da2989668 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_aix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix,ppc64 +// +build aix package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go new file mode 100644 index 000000000..7bcb36c7b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go @@ -0,0 +1,144 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import "runtime" + +const cacheLineSize = 64 + +func init() { + switch runtime.GOOS { + case "android", "darwin", "netbsd": + // Android and iOS don't seem to allow reading these registers. + // + // NetBSD: + // ID_AA64ISAR0_EL1 is a privileged register and cannot be read from EL0. + // It can be read via sysctl(3). Example for future implementers: + // https://nxr.netbsd.org/xref/src/usr.sbin/cpuctl/arch/aarch64.c + // + // Fake the minimal features expected by + // TestARM64minimalFeatures. + ARM64.HasASIMD = true + ARM64.HasFP = true + case "linux": + doinit() + default: + readARM64Registers() + } +} + +func readARM64Registers() { + Initialized = true + + // ID_AA64ISAR0_EL1 + isar0 := getisar0() + + switch extractBits(isar0, 4, 7) { + case 1: + ARM64.HasAES = true + case 2: + ARM64.HasAES = true + ARM64.HasPMULL = true + } + + switch extractBits(isar0, 8, 11) { + case 1: + ARM64.HasSHA1 = true + } + + switch extractBits(isar0, 12, 15) { + case 1: + ARM64.HasSHA2 = true + case 2: + ARM64.HasSHA2 = true + ARM64.HasSHA512 = true + } + + switch extractBits(isar0, 16, 19) { + case 1: + ARM64.HasCRC32 = true + } + + switch extractBits(isar0, 20, 23) { + case 2: + ARM64.HasATOMICS = true + } + + switch extractBits(isar0, 28, 31) { + case 1: + ARM64.HasASIMDRDM = true + } + + switch extractBits(isar0, 32, 35) { + case 1: + ARM64.HasSHA3 = true + } + + switch extractBits(isar0, 36, 39) { + case 1: + ARM64.HasSM3 = true + } + + switch extractBits(isar0, 40, 43) { + case 1: + ARM64.HasSM4 = true + } + + switch extractBits(isar0, 44, 47) { + case 1: + ARM64.HasASIMDDP = true + } + + // ID_AA64ISAR1_EL1 + isar1 := getisar1() + + switch extractBits(isar1, 0, 3) { + case 1: + ARM64.HasDCPOP = true + } + + switch extractBits(isar1, 12, 15) { + case 1: + ARM64.HasJSCVT = true + } + + switch extractBits(isar1, 16, 19) { + case 1: + ARM64.HasFCMA = true + } + + switch extractBits(isar1, 20, 23) { + case 1: + ARM64.HasLRCPC = true + } + + // ID_AA64PFR0_EL1 + pfr0 := getpfr0() + + switch extractBits(pfr0, 16, 19) { + case 0: + ARM64.HasFP = true + case 1: + ARM64.HasFP = true + ARM64.HasFPHP = true + } + + switch extractBits(pfr0, 20, 23) { + case 0: + ARM64.HasASIMD = true + case 1: + ARM64.HasASIMD = true + ARM64.HasASIMDHP = true + } + + switch extractBits(pfr0, 32, 35) { + case 1: + ARM64.HasSVE = true + } +} + +func extractBits(data uint64, start, end uint) uint { + return (uint)(data>>start) & ((1 << (end - start + 1)) - 1) +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.s b/vendor/golang.org/x/sys/cpu/cpu_arm64.s new file mode 100644 index 000000000..a54436e39 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.s @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func getisar0() uint64 +TEXT ·getisar0(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 0 into x0 + // mrs x0, ID_AA64ISAR0_EL1 = d5380600 + WORD $0xd5380600 + MOVD R0, ret+0(FP) + RET + +// func getisar1() uint64 +TEXT ·getisar1(SB),NOSPLIT,$0-8 + // get Instruction Set Attributes 1 into x0 + // mrs x0, ID_AA64ISAR1_EL1 = d5380620 + WORD $0xd5380620 + MOVD R0, ret+0(FP) + RET + +// func getpfr0() uint64 +TEXT ·getpfr0(SB),NOSPLIT,$0-8 + // get Processor Feature Register 0 into x0 + // mrs x0, ID_AA64PFR0_EL1 = d5380400 + WORD $0xd5380400 + MOVD R0, ret+0(FP) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go new file mode 100644 index 000000000..7b88e865a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +func getisar0() uint64 +func getisar1() uint64 +func getpfr0() uint64 diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go new file mode 100644 index 000000000..53ca8d65c --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +func getisar0() uint64 { return 0 } +func getisar1() uint64 { return 0 } +func getpfr0() uint64 { return 0 } diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c similarity index 100% rename from vendor/golang.org/x/sys/cpu/cpu_gccgo.c rename to vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go similarity index 100% rename from vendor/golang.org/x/sys/cpu/cpu_gccgo.go rename to vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go index 10e712dc5..fe139182c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -2,58 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64,!amd64p32,!386 +// +build !386,!amd64,!amd64p32,!arm64 package cpu -import ( - "io/ioutil" -) - -const ( - _AT_HWCAP = 16 - _AT_HWCAP2 = 26 - - procAuxv = "/proc/self/auxv" - - uintSize = int(32 << (^uint(0) >> 63)) -) - -// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 -// These are initialized in cpu_$GOARCH.go -// and should not be changed after they are initialized. -var hwCap uint -var hwCap2 uint - func init() { - buf, err := ioutil.ReadFile(procAuxv) - if err != nil { - // e.g. on android /proc/self/auxv is not accessible, so silently - // ignore the error and leave Initialized = false + if err := readHWCAP(); err != nil { return } - - bo := hostByteOrder() - for len(buf) >= 2*(uintSize/8) { - var tag, val uint - switch uintSize { - case 32: - tag = uint(bo.Uint32(buf[0:])) - val = uint(bo.Uint32(buf[4:])) - buf = buf[8:] - case 64: - tag = uint(bo.Uint64(buf[0:])) - val = uint(bo.Uint64(buf[8:])) - buf = buf[16:] - } - switch tag { - case _AT_HWCAP: - hwCap = val - case _AT_HWCAP2: - hwCap2 = val - } - } doinit() - Initialized = true } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go index fa7fb1bd7..79a38a0b9 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -4,8 +4,6 @@ package cpu -const cacheLineSize = 64 - // HWCAP/HWCAP2 bits. These are exposed by Linux. const ( hwcap_FP = 1 << 0 @@ -35,6 +33,12 @@ const ( ) func doinit() { + if err := readHWCAP(); err != nil { + // failed to read /proc/self/auxv, try reading registers directly + readARM64Registers() + return + } + // HWCAP feature bits ARM64.HasFP = isSet(hwCap, hwcap_FP) ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go new file mode 100644 index 000000000..eb24e5073 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel 5.4. +const ( + // CPU features + hwcap_MIPS_MSA = 1 << 1 +) + +func doinit() { + // HWCAP feature bits + MIPS64X.HasMSA = isSet(hwCap, hwcap_MIPS_MSA) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go new file mode 100644 index 000000000..42b5d33cb --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!arm,!arm64,!mips64,!mips64le,!ppc64,!ppc64le,!s390x + +package cpu + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go index f55e0c82c..6165f1212 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -7,5 +7,3 @@ package cpu const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go index cda87b1a1..1269eee88 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -7,5 +7,3 @@ package cpu const cacheLineSize = 32 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go index dd1e76dc9..3ffc4afa0 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -6,6 +6,4 @@ package cpu -const cacheLineSize = 64 - func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go new file mode 100644 index 000000000..efe2b7a84 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build riscv64 + +package cpu + +const cacheLineSize = 32 diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go index bd9bbda0c..8681e876a 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_wasm.go +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -11,5 +11,3 @@ package cpu // rules are good enough. const cacheLineSize = 0 - -func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go new file mode 100644 index 000000000..f3baa3793 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func readHWCAP() error { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false. On some + // architectures (e.g. arm64) doinit() implements a fallback + // readout and will set Initialized = true again. + return err + } + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil +} diff --git a/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go new file mode 100644 index 000000000..76fbe40b7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go @@ -0,0 +1,27 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Recreate a getsystemcfg syscall handler instead of +// using the one provided by x/sys/unix to avoid having +// the dependency between them. (See golang.org/issue/32102) +// Morever, this file will be used during the building of +// gccgo's libgo and thus must not used a CGo method. + +// +build aix +// +build gccgo + +package cpu + +import ( + "syscall" +) + +//extern getsystemcfg +func gccgoGetsystemcfg(label uint32) (r uint64) + +func callgetsystemcfg(label int) (r1 uintptr, e1 syscall.Errno) { + r1 = uintptr(gccgoGetsystemcfg(uint32(label))) + e1 = syscall.GetErrno() + return +} diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go new file mode 100644 index 000000000..e07899b90 --- /dev/null +++ b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package unsafeheader contains header declarations for the Go runtime's +// slice and string implementations. +// +// This package allows x/sys to use types equivalent to +// reflect.SliceHeader and reflect.StringHeader without introducing +// a dependency on the (relatively heavy) "reflect" package. +package unsafeheader + +import ( + "unsafe" +) + +// Slice is the runtime representation of a slice. +// It cannot be used safely or portably and its representation may change in a later release. +type Slice struct { + Data unsafe.Pointer + Len int + Cap int +} + +// String is the runtime representation of a string. +// It cannot be used safely or portably and its representation may change in a later release. +type String struct { + Data unsafe.Pointer + Len int +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index eb2f78ae2..579d2d735 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -89,7 +89,7 @@ constants. Adding new syscall numbers is mostly done by running the build on a sufficiently new installation of the target OS (or updating the source checkouts for the -new build system). However, depending on the OS, you make need to update the +new build system). However, depending on the OS, you may need to update the parsing in mksysnum. ### mksyscall.go @@ -149,10 +149,21 @@ To add a constant, add the header that includes it to the appropriate variable. Then, edit the regex (if necessary) to match the desired constant. Avoid making the regex too broad to avoid matching unintended constants. +### mkmerge.go + +This program is used to extract duplicate const, func, and type declarations +from the generated architecture-specific files listed below, and merge these +into a common file for each OS. + +The merge is performed in the following steps: +1. Construct the set of common code that is idential in all architecture-specific files. +2. Write this common code to the merged file. +3. Remove the common code from all architecture-specific files. + ## Generated files -### `zerror_${GOOS}_${GOARCH}.go` +### `zerrors_${GOOS}_${GOARCH}.go` A file containing all of the system's generated error numbers, error strings, signal numbers, and constants. Generated by `mkerrors.sh` (see above). diff --git a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s index 6db717de5..3cfefed2e 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_riscv64.s @@ -23,10 +23,6 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 MOV a1+8(FP), A0 MOV a2+16(FP), A1 MOV a3+24(FP), A2 - MOV $0, A3 - MOV $0, A4 - MOV $0, A5 - MOV $0, A6 MOV trap+0(FP), A7 // syscall entry ECALL MOV A0, r1+32(FP) // r1 @@ -44,9 +40,6 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48 MOV a1+8(FP), A0 MOV a2+16(FP), A1 MOV a3+24(FP), A2 - MOV ZERO, A3 - MOV ZERO, A4 - MOV ZERO, A5 MOV trap+0(FP), A7 // syscall entry ECALL MOV A0, r1+32(FP) diff --git a/vendor/golang.org/x/sys/unix/bluetooth_linux.go b/vendor/golang.org/x/sys/unix/bluetooth_linux.go index 6e3229697..a178a6149 100644 --- a/vendor/golang.org/x/sys/unix/bluetooth_linux.go +++ b/vendor/golang.org/x/sys/unix/bluetooth_linux.go @@ -23,6 +23,7 @@ const ( HCI_CHANNEL_USER = 1 HCI_CHANNEL_MONITOR = 2 HCI_CHANNEL_CONTROL = 3 + HCI_CHANNEL_LOGGING = 4 ) // Socketoption Level diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go index c56bc8b05..761db66ef 100644 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_386.go @@ -8,6 +8,7 @@ package unix const ( + DLT_HHDLC = 0x79 IFF_SMART = 0x20 IFT_1822 = 0x2 IFT_A12MPPSWITCH = 0x82 @@ -210,13 +211,18 @@ const ( IFT_XETHER = 0x1a IPPROTO_MAXID = 0x34 IPV6_FAITH = 0x1d + IPV6_MIN_MEMBERSHIPS = 0x1f IP_FAITH = 0x16 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MIN_MEMBERSHIPS = 0x1f MAP_NORESERVE = 0x40 MAP_RENAME = 0x20 NET_RT_MAXID = 0x6 RTF_PRCLONING = 0x10000 RTM_OLDADD = 0x9 RTM_OLDDEL = 0xa + RT_CACHING_CONTEXT = 0x1 + RT_NORTREF = 0x2 SIOCADDRT = 0x8030720a SIOCALIFADDR = 0x8118691b SIOCDELRT = 0x8030720b diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go index 3e9771175..070f44b65 100644 --- a/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_amd64.go @@ -8,6 +8,7 @@ package unix const ( + DLT_HHDLC = 0x79 IFF_SMART = 0x20 IFT_1822 = 0x2 IFT_A12MPPSWITCH = 0x82 @@ -210,13 +211,18 @@ const ( IFT_XETHER = 0x1a IPPROTO_MAXID = 0x34 IPV6_FAITH = 0x1d + IPV6_MIN_MEMBERSHIPS = 0x1f IP_FAITH = 0x16 + IP_MAX_SOURCE_FILTER = 0x400 + IP_MIN_MEMBERSHIPS = 0x1f MAP_NORESERVE = 0x40 MAP_RENAME = 0x20 NET_RT_MAXID = 0x6 RTF_PRCLONING = 0x10000 RTM_OLDADD = 0x9 RTM_OLDDEL = 0xa + RT_CACHING_CONTEXT = 0x1 + RT_NORTREF = 0x2 SIOCADDRT = 0x8040720a SIOCALIFADDR = 0x8118691b SIOCDELRT = 0x8040720b diff --git a/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go new file mode 100644 index 000000000..946dcf3fc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/errors_freebsd_arm64.go @@ -0,0 +1,17 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Constants that were deprecated or moved to enums in the FreeBSD headers. Keep +// them here for backwards compatibility. + +package unix + +const ( + DLT_HHDLC = 0x79 + IPV6_MIN_MEMBERSHIPS = 0x1f + IP_MAX_SOURCE_FILTER = 0x400 + IP_MIN_MEMBERSHIPS = 0x1f + RT_CACHING_CONTEXT = 0x1 + RT_NORTREF = 0x2 +) diff --git a/vendor/golang.org/x/sys/unix/fcntl.go b/vendor/golang.org/x/sys/unix/fcntl.go index 39c03f1ef..4dc534864 100644 --- a/vendor/golang.org/x/sys/unix/fcntl.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -9,12 +9,11 @@ package unix import "unsafe" // fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux -// systems by flock_linux_32bit.go to be SYS_FCNTL64. +// systems by fcntl_linux_32bit.go to be SYS_FCNTL64. var fcntl64Syscall uintptr = SYS_FCNTL -// FcntlInt performs a fcntl syscall on fd with the provided command and argument. -func FcntlInt(fd uintptr, cmd, arg int) (int, error) { - valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) +func fcntl(fd int, cmd, arg int) (int, error) { + valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg)) var err error if errno != 0 { err = errno @@ -22,6 +21,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) { return int(valptr), err } +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go new file mode 100644 index 000000000..b27be0a01 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/fdset.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris + +package unix + +// Set adds fd to the set fds. +func (fds *FdSet) Set(fd int) { + fds.Bits[fd/NFDBITS] |= (1 << (uintptr(fd) % NFDBITS)) +} + +// Clear removes fd from the set fds. +func (fds *FdSet) Clear(fd int) { + fds.Bits[fd/NFDBITS] &^= (1 << (uintptr(fd) % NFDBITS)) +} + +// IsSet returns whether fd is in the set fds. +func (fds *FdSet) IsSet(fd int) bool { + return fds.Bits[fd/NFDBITS]&(1<<(uintptr(fd)%NFDBITS)) != 0 +} + +// Zero clears the set fds. +func (fds *FdSet) Zero() { + for i := range fds.Bits { + fds.Bits[i] = 0 + } +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 890ec464c..ece31e9dc 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then # Use the Docker-based build system # Files generated through docker (use $cmd so you can Ctl-C the build or run) $cmd docker build --tag generate:$GOOS $GOOS - $cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS + $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS exit fi @@ -124,7 +124,7 @@ freebsd_arm) freebsd_arm64) mkerrors="$mkerrors -m64" mksysnum="go run mksysnum.go 'https://svn.freebsd.org/base/stable/11/sys/kern/syscalls.master'" - mktypes="GOARCH=$GOARCH go tool cgo -godefs" + mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char" ;; netbsd_386) mkerrors="$mkerrors -m32" @@ -190,6 +190,12 @@ solaris_amd64) mksysnum= mktypes="GOARCH=$GOARCH go tool cgo -godefs" ;; +illumos_amd64) + mksyscall="go run mksyscall_solaris.go" + mkerrors= + mksysnum= + mktypes= + ;; *) echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2 exit 1 @@ -217,6 +223,11 @@ esac echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; # 1.13 and later, syscalls via libSystem (including syscallPtr) echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; + elif [ "$GOOS" == "illumos" ]; then + # illumos code generation requires a --illumos switch + echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go"; + # illumos implies solaris, so solaris code generation is also required + echo "$mksyscall -tags solaris,$GOARCH syscall_solaris.go syscall_solaris_$GOARCH.go |gofmt >zsyscall_solaris_$GOARCH.go"; else echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 67b84828a..780e387e3 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -44,6 +44,7 @@ includes_AIX=' #include #include #include +#include #include #include #include @@ -104,6 +105,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -185,16 +187,21 @@ struct ltchars { #include #include #include +#include +#include #include #include #include #include #include +#include #include #include #include #include #include +#include +#include #include #include #include @@ -276,6 +283,11 @@ struct ltchars { // for the tipc_subscr timeout __u32 field. #undef TIPC_WAIT_FOREVER #define TIPC_WAIT_FOREVER 0xffffffff + +// Copied from linux/l2tp.h +// Including linux/l2tp.h here causes conflicts between linux/in.h +// and netinet/in.h included via net/route.h above. +#define IPPROTO_L2TP 115 ' includes_NetBSD=' @@ -469,12 +481,13 @@ ccflags="$@" $2 ~ /^(MS|MNT|UMOUNT)_/ || $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || - $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || + $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ || $2 ~ /^KEXEC_/ || $2 ~ /^LINUX_REBOOT_CMD_/ || $2 ~ /^LINUX_REBOOT_MAGIC[12]$/ || $2 ~ /^MODULE_INIT_/ || $2 !~ "NLA_TYPE_MASK" && + $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -482,8 +495,9 @@ ccflags="$@" $2 ~ /^TCSET/ || $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || $2 !~ "RTF_BITS" && - $2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || + $2 ~ /^(IFF|IFT|NET_RT|RTM(GRP)?|RTF|RTV|RTA|RTAX)_/ || $2 ~ /^BIOC/ || + $2 ~ /^DIOC/ || $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || @@ -494,7 +508,10 @@ ccflags="$@" $2 ~ /^CAN_/ || $2 ~ /^CAP_/ || $2 ~ /^ALG_/ || - $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE|IOC_(GET|SET)_ENCRYPTION)/ || + $2 ~ /^FS_(POLICY_FLAGS|KEY_DESC|ENCRYPTION_MODE|[A-Z0-9_]+_KEY_SIZE)/ || + $2 ~ /^FS_IOC_.*(ENCRYPTION|VERITY|GETFLAGS)/ || + $2 ~ /^FS_VERITY_/ || + $2 ~ /^FSCRYPT_/ || $2 ~ /^GRND_/ || $2 ~ /^RND/ || $2 ~ /^KEY_(SPEC|REQKEY_DEFL)_/ || @@ -521,9 +538,11 @@ ccflags="$@" $2 ~ /^WDIOC_/ || $2 ~ /^NFN/ || $2 ~ /^XDP_/ || + $2 ~ /^RWF_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || $2 ~ /^TIPC_/ || + $2 ~ /^DEVLINK_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go b/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go new file mode 100644 index 000000000..5144deecc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +// Round the length of a raw sockaddr up to align it properly. +func cmsgAlignOf(salen int) int { + salign := SizeofPtr + if SizeofPtr == 8 && !supportsABI(_dragonflyABIChangeVersion) { + // 64-bit Dragonfly before the September 2019 ABI changes still requires + // 32-bit aligned access to network subsystem. + salign = 4 + } + return (salen + salign - 1) & ^(salign - 1) +} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 6079eb4ac..8bf457059 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -17,7 +17,7 @@ func UnixCredentials(ucred *Ucred) []byte { h.Level = SOL_SOCKET h.Type = SCM_CREDENTIALS h.SetLen(CmsgLen(SizeofUcred)) - *((*Ucred)(cmsgData(h))) = *ucred + *(*Ucred)(h.data(0)) = *ucred return b } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 062bcabab..003916ed7 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -9,35 +9,9 @@ package unix import ( - "runtime" "unsafe" ) -// Round the length of a raw sockaddr up to align it properly. -func cmsgAlignOf(salen int) int { - salign := SizeofPtr - - switch runtime.GOOS { - case "aix": - // There is no alignment on AIX. - salign = 1 - case "darwin", "dragonfly", "solaris", "illumos": - // NOTE: It seems like 64-bit Darwin, DragonFly BSD, - // illumos, and Solaris kernels still require 32-bit - // aligned access to network subsystem. - if SizeofPtr == 8 { - salign = 4 - } - case "netbsd", "openbsd": - // NetBSD and OpenBSD armv7 require 64-bit alignment. - if runtime.GOARCH == "arm" { - salign = 8 - } - } - - return (salen + salign - 1) & ^(salign - 1) -} - // CmsgLen returns the value to store in the Len field of the Cmsghdr // structure, taking into account any necessary alignment. func CmsgLen(datalen int) int { @@ -50,8 +24,8 @@ func CmsgSpace(datalen int) int { return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen) } -func cmsgData(h *Cmsghdr) unsafe.Pointer { - return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr))) +func (h *Cmsghdr) data(offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr)) + offset) } // SocketControlMessage represents a socket control message. @@ -94,10 +68,8 @@ func UnixRights(fds ...int) []byte { h.Level = SOL_SOCKET h.Type = SCM_RIGHTS h.SetLen(CmsgLen(datalen)) - data := cmsgData(h) - for _, fd := range fds { - *(*int32)(data) = int32(fd) - data = unsafe.Pointer(uintptr(data) + 4) + for i, fd := range fds { + *(*int32)(h.data(4 * uintptr(i))) = int32(fd) } return b } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go new file mode 100644 index 000000000..7d08dae5b --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin freebsd linux netbsd openbsd solaris + +package unix + +import ( + "runtime" +) + +// Round the length of a raw sockaddr up to align it properly. +func cmsgAlignOf(salen int) int { + salign := SizeofPtr + + // dragonfly needs to check ABI version at runtime, see cmsgAlignOf in + // sockcmsg_dragonfly.go + switch runtime.GOOS { + case "aix": + // There is no alignment on AIX. + salign = 1 + case "darwin", "illumos", "solaris": + // NOTE: It seems like 64-bit Darwin, Illumos and Solaris + // kernels still require 32-bit aligned access to network + // subsystem. + if SizeofPtr == 8 { + salign = 4 + } + case "netbsd", "openbsd": + // NetBSD and OpenBSD armv7 require 64-bit alignment. + if runtime.GOARCH == "arm" { + salign = 8 + } + } + + return (salen + salign - 1) & ^(salign - 1) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 3e6671426..68605db62 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -237,7 +237,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -510,6 +510,23 @@ func SysctlRaw(name string, args ...int) ([]byte, error) { return buf[:n], nil } +func SysctlClockinfo(name string) (*Clockinfo, error) { + mib, err := sysctlmib(name) + if err != nil { + return nil, err + } + + n := uintptr(SizeofClockinfo) + var ci Clockinfo + if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { + return nil, err + } + if n != SizeofClockinfo { + return nil, EIO + } + return &ci, nil +} + //sys utimes(path string, timeval *[2]Timeval) (err error) func Utimes(path string, tv []Timeval) error { @@ -577,8 +594,6 @@ func Futimes(fd int, tv []Timeval) error { return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) } -//sys fcntl(fd int, cmd int, arg int) (val int, err error) - //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index 24960c38b..dc0befee3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -6,7 +6,11 @@ package unix -import "unsafe" +import ( + "unsafe" + + "golang.org/x/sys/internal/unsafeheader" +) //sys closedir(dir uintptr) (err error) //sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) @@ -27,8 +31,6 @@ func libc_fdopendir_trampoline() func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // Simulate Getdirentries using fdopendir/readdir_r/closedir. - const ptrSize = unsafe.Sizeof(uintptr(0)) - // We store the number of entries to skip in the seek // offset of fd. See issue #31368. // It's not the full required semantics, but should handle the case @@ -73,6 +75,7 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { cnt++ continue } + reclen := int(entry.Reclen) if reclen > len(buf) { // Not enough room. Return for now. @@ -81,13 +84,15 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // restarting is O(n^2) in the length of the directory. Oh well. break } + // Copy entry into return buffer. - s := struct { - ptr unsafe.Pointer - siz int - cap int - }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen} - copy(buf, *(*[]byte)(unsafe.Pointer(&s))) + var s []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s)) + hdr.Data = unsafe.Pointer(&entry) + hdr.Cap = reclen + hdr.Len = reclen + copy(buf, s) + buf = buf[reclen:] n += reclen cnt++ diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index c5018a385..0cf31acf0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -155,23 +155,6 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( //sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) -func SysctlClockinfo(name string) (*Clockinfo, error) { - mib, err := sysctlmib(name) - if err != nil { - return nil, err - } - - n := uintptr(SizeofClockinfo) - var ci Clockinfo - if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { - return nil, err - } - if n != SizeofClockinfo { - return nil, EIO - } - return &ci, nil -} - //sysnb pipe() (r int, w int, err error) func Pipe(p []int) (err error) { @@ -333,12 +316,16 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error { * Wrapped */ +//sys fcntl(fd int, cmd int, arg int) (val int, err error) + //sys kill(pid int, signum int, posix int) (err error) func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL + func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -436,6 +423,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Getrlimit(which int, lim *Rlimit) (err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) +//sysnb Gettimeofday(tp *Timeval) (err error) //sysnb Getuid() (uid int) //sysnb Issetugid() (tainted bool) //sys Kqueue() (fd int, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index dd756e708..2724e3a51 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,7 +10,6 @@ import ( "syscall" ) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func setTimespec(sec, nsec int64) Timespec { @@ -21,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 7f148c428..ce2e0d249 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,7 +10,6 @@ import ( "syscall" ) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func setTimespec(sec, nsec int64) Timespec { @@ -21,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go index c81510da2..0e3f25aca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build darwin,386,!go1.12 +// +build darwin,arm,!go1.12 package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 58be02e71..fc17a3f23 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -12,10 +12,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { - return ENOTSUP -} - func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -24,17 +20,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int32, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = int32(sec) - tv.Usec = int32(usec) - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 1ee931f97..1e91ddf32 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -14,10 +14,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { - return ENOTSUP -} - func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -26,17 +22,6 @@ func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: sec, Usec: int32(usec)} } -//sysnb gettimeofday(tp *Timeval) (sec int64, usec int32, err error) -func Gettimeofday(tv *Timeval) (err error) { - // The tv passed to gettimeofday must be non-nil - // but is otherwise unused. The answers come back - // in the two registers. - sec, usec, err := gettimeofday(tv) - tv.Sec = sec - tv.Usec = usec - return err -} - func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) k.Filter = int16(mode) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 8c8d50297..8a195ae58 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -12,9 +12,25 @@ package unix -import "unsafe" - -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +import ( + "sync" + "unsafe" +) + +// See version list in https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/param.h +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// First __DragonFly_version after September 2019 ABI changes +// http://lists.dragonflybsd.org/pipermail/users/2019-September/358280.html +const _dragonflyABIChangeVersion = 500705 + +func supportsABI(ver uint32) bool { + osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) + return osreldate >= ver +} // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { @@ -152,6 +168,8 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) if err != nil { diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 25ac9340c..6932e7c2c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -36,8 +36,6 @@ var ( // INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h const _ino64First = 1200031 -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -203,6 +201,8 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -462,8 +462,12 @@ func convertFromDirents11(buf []byte, old []byte) int { dstPos := 0 srcPos := 0 for dstPos+fixedSize < len(buf) && srcPos+oldFixedSize < len(old) { - dstDirent := (*Dirent)(unsafe.Pointer(&buf[dstPos])) - srcDirent := (*dirent_freebsd11)(unsafe.Pointer(&old[srcPos])) + var dstDirent Dirent + var srcDirent dirent_freebsd11 + + // If multiple direntries are written, sometimes when we reach the final one, + // we may have cap of old less than size of dirent_freebsd11. + copy((*[unsafe.Sizeof(srcDirent)]byte)(unsafe.Pointer(&srcDirent))[:], old[srcPos:]) reclen := roundup(fixedSize+int(srcDirent.Namlen)+1, 8) if dstPos+reclen > len(buf) { @@ -479,6 +483,7 @@ func convertFromDirents11(buf []byte, old []byte) int { dstDirent.Pad1 = 0 copy(dstDirent.Name[:], srcDirent.Name[:srcDirent.Namlen]) + copy(buf[dstPos:], (*[unsafe.Sizeof(dstDirent)]byte)(unsafe.Pointer(&dstDirent))[:]) padding := buf[dstPos+fixedSize+int(dstDirent.Namlen) : dstPos+reclen] for i := range padding { padding[i] = 0 @@ -516,20 +521,10 @@ func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { return ptrace(PTRACE_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) } -func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - func PtraceGetRegs(pid int, regsout *Reg) (err error) { return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) } -func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)} - err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} - func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index dcc56457a..72a506ddc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -54,3 +54,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceGetFsBase(pid int, fsbase *int64) (err error) { + return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) +} + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 321c3bace..d5e376aca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -54,3 +54,13 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceGetFsBase(pid int, fsbase *int64) (err error) { + return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) +} + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 697700831..4ea45bce5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -54,3 +54,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index dbbbfd603..aa5326db1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -54,3 +54,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) + +func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)} + err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) + return int(ioDesc.Len), err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_illumos.go b/vendor/golang.org/x/sys/unix/syscall_illumos.go new file mode 100644 index 000000000..99e62dcd8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_illumos.go @@ -0,0 +1,57 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// illumos system calls not present on Solaris. + +// +build amd64,illumos + +package unix + +import "unsafe" + +func bytes2iovec(bs [][]byte) []Iovec { + iovecs := make([]Iovec, len(bs)) + for i, b := range bs { + iovecs[i].SetLen(len(b)) + if len(b) > 0 { + // somehow Iovec.Base on illumos is (*int8), not (*byte) + iovecs[i].Base = (*int8)(unsafe.Pointer(&b[0])) + } else { + iovecs[i].Base = (*int8)(unsafe.Pointer(&_zero)) + } + } + return iovecs +} + +//sys readv(fd int, iovs []Iovec) (n int, err error) + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = readv(fd, iovecs) + return n, err +} + +//sys preadv(fd int, iovs []Iovec, off int64) (n int, err error) + +func Preadv(fd int, iovs [][]byte, off int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = preadv(fd, iovecs, off) + return n, err +} + +//sys writev(fd int, iovs []Iovec) (n int, err error) + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = writev(fd, iovecs) + return n, err +} + +//sys pwritev(fd int, iovs []Iovec, off int64) (n int, err error) + +func Pwritev(fd int, iovs [][]byte, off int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = pwritev(fd, iovecs, off) + return n, err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index ebf3195b6..942a4bbf7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -839,6 +839,40 @@ func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil } +// SockaddrL2TPIP implements the Sockaddr interface for IPPROTO_L2TP/AF_INET sockets. +type SockaddrL2TPIP struct { + Addr [4]byte + ConnId uint32 + raw RawSockaddrL2TPIP +} + +func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_INET + sa.raw.Conn_id = sa.ConnId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil +} + +// SockaddrL2TPIP6 implements the Sockaddr interface for IPPROTO_L2TP/AF_INET6 sockets. +type SockaddrL2TPIP6 struct { + Addr [16]byte + ZoneId uint32 + ConnId uint32 + raw RawSockaddrL2TPIP6 +} + +func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) { + sa.raw.Family = AF_INET6 + sa.raw.Conn_id = sa.ConnId + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -884,30 +918,63 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil case AF_INET: - pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet4) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + + switch proto { + case IPPROTO_L2TP: + pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa)) + sa := new(SockaddrL2TPIP) + sa.ConnId = pp.Conn_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + default: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil } - return sa, nil case AF_INET6: - pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet6) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] + proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + if err != nil { + return nil, err + } + + switch proto { + case IPPROTO_L2TP: + pp := (*RawSockaddrL2TPIP6)(unsafe.Pointer(rsa)) + sa := new(SockaddrL2TPIP6) + sa.ConnId = pp.Conn_id + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + default: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil } - return sa, nil case AF_VSOCK: pp := (*RawSockaddrVM)(unsafe.Pointer(rsa)) @@ -1555,8 +1622,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Acct(path string) (err error) //sys AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) //sys Adjtimex(buf *Timex) (state int, err error) -//sys Capget(hdr *CapUserHeader, data *CapUserData) (err error) -//sys Capset(hdr *CapUserHeader, data *CapUserData) (err error) +//sysnb Capget(hdr *CapUserHeader, data *CapUserData) (err error) +//sysnb Capset(hdr *CapUserHeader, data *CapUserData) (err error) //sys Chdir(path string) (err error) //sys Chroot(path string) (err error) //sys ClockGetres(clockid int32, res *Timespec) (err error) @@ -1566,6 +1633,15 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys DeleteModule(name string, flags int) (err error) //sys Dup(oldfd int) (fd int, err error) + +func Dup2(oldfd, newfd int) error { + // Android O and newer blocks dup2; riscv and arm64 don't implement dup2. + if runtime.GOOS == "android" || runtime.GOARCH == "riscv64" || runtime.GOARCH == "arm64" { + return Dup3(oldfd, newfd, 0) + } + return dup2(oldfd, newfd) +} + //sys Dup3(oldfd int, newfd int, flags int) (err error) //sysnb EpollCreate1(flag int) (fd int, err error) //sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) @@ -1575,7 +1651,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fchdir(fd int) (err error) //sys Fchmod(fd int, mode uint32) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) -//sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys Fdatasync(fd int) (err error) //sys Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) //sys FinitModule(fd int, params string, flags int) (err error) @@ -1631,6 +1706,17 @@ func Getpgrp() (pid int) { //sysnb Settimeofday(tv *Timeval) (err error) //sys Setns(fd int, nstype int) (err error) +// PrctlRetInt performs a prctl operation specified by option and further +// optional arguments arg2 through arg5 depending on option. It returns a +// non-negative integer that is returned by the prctl syscall. +func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (int, error) { + ret, _, err := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // issue 1435. // On linux Setuid and Setgid only affects the current thread, not the process. // This does not match what most callers expect so we must return an error @@ -1644,6 +1730,30 @@ func Setgid(uid int) (err error) { return EOPNOTSUPP } +// SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set. +// setfsgid(2) will return a non-nil error only if its caller lacks CAP_SETUID capability. +// If the call fails due to other reasons, current fsgid will be returned. +func SetfsgidRetGid(gid int) (int, error) { + return setfsgid(gid) +} + +// SetfsuidRetUid sets fsuid for current thread and returns previous fsuid set. +// setfsgid(2) will return a non-nil error only if its caller lacks CAP_SETUID capability +// If the call fails due to other reasons, current fsuid will be returned. +func SetfsuidRetUid(uid int) (int, error) { + return setfsuid(uid) +} + +func Setfsgid(gid int) error { + _, err := setfsgid(gid) + return err +} + +func Setfsuid(uid int) error { + _, err := setfsuid(uid) + return err +} + func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { return signalfd(fd, sigmask, _C__NSIG/8, flags) } @@ -1656,6 +1766,9 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) //sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error) +//sysnb TimerfdCreate(clockid int, flags int) (fd int, err error) +//sysnb TimerfdGettime(fd int, currValue *ItimerSpec) (err error) +//sysnb TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) //sysnb Tgkill(tgid int, tid int, sig syscall.Signal) (err error) //sysnb Times(tms *Tms) (ticks uintptr, err error) //sysnb Umask(mask int) (oldmask int) @@ -1666,6 +1779,123 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys exitThread(code int) (err error) = SYS_EXIT //sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ //sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE +//sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV +//sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV +//sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV +//sys pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PWRITEV +//sys preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PREADV2 +//sys pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) = SYS_PWRITEV2 + +func bytes2iovec(bs [][]byte) []Iovec { + iovecs := make([]Iovec, len(bs)) + for i, b := range bs { + iovecs[i].SetLen(len(b)) + if len(b) > 0 { + iovecs[i].Base = &b[0] + } else { + iovecs[i].Base = (*byte)(unsafe.Pointer(&_zero)) + } + } + return iovecs +} + +// offs2lohi splits offs into its lower and upper unsigned long. On 64-bit +// systems, hi will always be 0. On 32-bit systems, offs will be split in half. +// preadv/pwritev chose this calling convention so they don't need to add a +// padding-register for alignment on ARM. +func offs2lohi(offs int64) (lo, hi uintptr) { + return uintptr(offs), uintptr(uint64(offs) >> SizeofLong) +} + +func Readv(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + lo, hi := offs2lohi(offset) + n, err = preadv(fd, iovecs, lo, hi) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { + iovecs := bytes2iovec(iovs) + lo, hi := offs2lohi(offset) + n, err = preadv2(fd, iovecs, lo, hi, flags) + readvRacedetect(iovecs, n, err) + return n, err +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + lo, hi := offs2lohi(offset) + n, err = pwritev(fd, iovecs, lo, hi) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev2(fd int, iovs [][]byte, offset int64, flags int) (n int, err error) { + iovecs := bytes2iovec(iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + lo, hi := offs2lohi(offset) + n, err = pwritev2(fd, iovecs, lo, hi, flags) + writevRacedetect(iovecs, n) + return n, err +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} // mmap varies by architecture; see syscall_linux_*.go. //sys munmap(addr uintptr, length uintptr) (err error) @@ -1960,7 +2190,6 @@ func Klogset(typ int, arg int) (err error) { // TimerGetoverrun // TimerGettime // TimerSettime -// Timerfd // Tkill (obsolete) // Tuxcall // Umount2 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index e7fa665e6..048d18e3c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -49,7 +49,7 @@ func Pipe2(p []int, flags int) (err error) { // 64-bit file system and 32-bit uid calls // (386 default is 32-bit file system and 16-bit uid). -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64_64 @@ -70,8 +70,8 @@ func Pipe2(p []int, flags int) (err error) { //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 -//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 -//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 +//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 //sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 //sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 //sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 088ce0f93..72efe86ed 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -6,7 +6,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -55,8 +55,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index 11930fc8f..e1913e2c9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -80,7 +80,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // 64-bit file system and 32-bit uid calls // (16-bit uid calls are not always supported in newer kernels) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fchown(fd int, uid int, gid int) (err error) = SYS_FCHOWN32 @@ -98,8 +98,8 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT -//sys Setfsgid(gid int) (err error) = SYS_SETFSGID32 -//sys Setfsuid(uid int) (err error) = SYS_SETFSUID32 +//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32 +//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32 //sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32 //sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32 //sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index 251e2d971..c6de6b913 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -25,7 +25,7 @@ func EpollCreate(size int) (fd int, err error) { //sysnb Getegid() (egid int) //sysnb Geteuid() (euid int) //sysnb Getgid() (gid int) -//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) +//sysnb getrlimit(resource int, rlim *Rlimit) (err error) //sysnb Getuid() (uid int) //sys Listen(s int, n int) (err error) //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 @@ -42,12 +42,12 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) -//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) +//sysnb setrlimit(resource int, rlim *Rlimit) (err error) //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) @@ -168,6 +168,24 @@ func Pipe2(p []int, flags int) (err error) { return } +// Getrlimit prefers the prlimit64 system call. See issue 38604. +func Getrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, nil, rlim) + if err != ENOSYS { + return err + } + return getrlimit(resource, rlim) +} + +// Setrlimit prefers the prlimit64 system call. See issue 38604. +func Setrlimit(resource int, rlim *Rlimit) error { + err := prlimit(0, resource, rlim, nil) + if err != ENOSYS { + return err + } + return setrlimit(resource, rlim) +} + func (r *PtraceRegs) PC() uint64 { return r.Pc } func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc } @@ -192,9 +210,9 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error func Pause() error { _, err := ppoll(nil, 0, nil, nil) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 7562fe97b..f0287476c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -36,8 +36,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -216,6 +216,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } +func InotifyInit() (fd int, err error) { + return InotifyInit1(0) +} + //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) func Poll(fds []PollFd, timeout int) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index a939ff8f2..c11328111 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -14,7 +14,7 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -31,8 +31,8 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64 -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 28d6d0f22..349374409 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -7,7 +7,7 @@ package unix -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -34,8 +34,8 @@ package unix //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6798c2625..b0b150556 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -41,8 +41,8 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err } //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) @@ -191,10 +191,6 @@ func InotifyInit() (fd int, err error) { return InotifyInit1(0) } -func Dup2(oldfd int, newfd int) (err error) { - return Dup3(oldfd, newfd, 0) -} - func Pause() error { _, err := ppoll(nil, 0, nil, nil) return err @@ -228,3 +224,7 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +// dup2 exists because func Dup3 in syscall_linux.go references +// it in an unreachable path. dup2 isn't available on arm64. +func dup2(oldfd int, newfd int) error diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index eb5cb1a71..2363f7499 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -10,7 +10,7 @@ import ( "unsafe" ) -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sysnb EpollCreate(size int) (fd int, err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 @@ -34,8 +34,8 @@ import ( //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index 37321c12e..d389f1518 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -8,7 +8,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 -//sys Dup2(oldfd int, newfd int) (err error) +//sys dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 @@ -30,8 +30,8 @@ package unix //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK //sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) -//sys Setfsgid(gid int) (err error) -//sys Setfsuid(uid int) (err error) +//sys setfsgid(gid int) (prev int, err error) +//sys setfsuid(uid int) (prev int, err error) //sysnb Setregid(rgid int, egid int) (err error) //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index f95463ee2..45b50a610 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -18,8 +18,6 @@ import ( "unsafe" ) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -108,23 +106,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -func SysctlClockinfo(name string) (*Clockinfo, error) { - mib, err := sysctlmib(name) - if err != nil { - return nil, err - } - - n := uintptr(SizeofClockinfo) - var ci Clockinfo - if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { - return nil, err - } - if n != SizeofClockinfo { - return nil, EIO - } - return &ci, nil -} - //sysnb pipe() (fd1 int, fd2 int, err error) func Pipe(p []int) (err error) { if len(p) != 2 { @@ -189,6 +170,8 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -249,6 +232,14 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e return sendfile(outfd, infd, offset, count) } +func Fstatvfs(fd int, buf *Statvfs_t) (err error) { + return Fstatvfs1(fd, buf, ST_WAIT) +} + +func Statvfs(path string, buf *Statvfs_t) (err error) { + return Statvfs1(path, buf, ST_WAIT) +} + /* * Exposed directly */ @@ -262,6 +253,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) +//sys Dup3(from int, to int, flags int) (err error) //sys Exit(code int) //sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) //sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error) @@ -287,6 +279,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) +//sys Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) = SYS_FSTATVFS1 //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) @@ -343,6 +336,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) //sys Stat(path string, stat *Stat_t) (err error) +//sys Statvfs1(path string, buf *Statvfs_t, flags int) (err error) = SYS_STATVFS1 //sys Symlink(path string, link string) (err error) //sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error) //sys Sync() (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 7fe65ef75..a266e92a9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -18,8 +18,6 @@ import ( "unsafe" ) -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -57,23 +55,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -func SysctlClockinfo(name string) (*Clockinfo, error) { - mib, err := sysctlmib(name) - if err != nil { - return nil, err - } - - n := uintptr(SizeofClockinfo) - var ci Clockinfo - if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil { - return nil, err - } - if n != SizeofClockinfo { - return nil, EIO - } - return &ci, nil -} - func SysctlUvmexp(name string) (*Uvmexp, error) { mib, err := sysctlmib(name) if err != nil { @@ -91,16 +72,20 @@ func SysctlUvmexp(name string) (*Uvmexp, error) { return &u, nil } -//sysnb pipe(p *[2]_C_int) (err error) func Pipe(p []int) (err error) { + return Pipe2(p, 0) +} + +//sysnb pipe2(p *[2]_C_int, flags int) (err error) +func Pipe2(p []int, flags int) error { if len(p) != 2 { return EINVAL } var pp [2]_C_int - err = pipe(&pp) + err := pipe2(&pp, flags) p[0] = int(pp[0]) p[1] = int(pp[1]) - return + return err } //sys Getdents(fd int, buf []byte) (n int, err error) @@ -180,6 +165,8 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -248,6 +235,7 @@ func Uname(uname *Utsname) error { //sys Close(fd int) (err error) //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) +//sys Dup3(from int, to int, flags int) (err error) //sys Exit(code int) //sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchdir(fd int) (err error) @@ -352,7 +340,6 @@ func Uname(uname *Utsname) error { // clock_settime // closefrom // execve -// fcntl // fhopen // fhstat // fhstatfs diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 62f968c7f..0e2a696ad 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -391,7 +391,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 3de37566c..400ba9fbc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -12,6 +12,8 @@ import ( "sync" "syscall" "unsafe" + + "golang.org/x/sys/internal/unsafeheader" ) var ( @@ -76,7 +78,7 @@ func SignalName(s syscall.Signal) string { // The signal name should start with "SIG". func SignalNum(s string) syscall.Signal { signalNameMapOnce.Do(func() { - signalNameMap = make(map[string]syscall.Signal) + signalNameMap = make(map[string]syscall.Signal, len(signalList)) for _, signal := range signalList { signalNameMap[signal.name] = signal.num } @@ -113,15 +115,12 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d return nil, errno } - // Slice memory layout - var sl = struct { - addr uintptr - len int - cap int - }{addr, length, length} - - // Use unsafe to turn sl into a []byte. - b := *(*[]byte)(unsafe.Pointer(&sl)) + // Use unsafe to convert addr into a []byte. + var b []byte + hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b)) + hdr.Data = unsafe.Pointer(addr) + hdr.Cap = length + hdr.Len = length // Register mapping in m and return it. p := &b[cap(b)-1] diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go index 1def8a581..104994bc6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc.go @@ -459,6 +459,15 @@ const ( MAP_SHARED = 0x1 MAP_TYPE = 0xf0 MAP_VARIABLE = 0x0 + MCAST_BLOCK_SOURCE = 0x40 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x3e + MCAST_JOIN_SOURCE_GROUP = 0x42 + MCAST_LEAVE_GROUP = 0x3f + MCAST_LEAVE_SOURCE_GROUP = 0x43 + MCAST_SOURCE_FILTER = 0x49 + MCAST_UNBLOCK_SOURCE = 0x41 MCL_CURRENT = 0x100 MCL_FUTURE = 0x200 MSG_ANY = 0x4 @@ -483,6 +492,7 @@ const ( MS_INVALIDATE = 0x40 MS_PER_SEC = 0x3e8 MS_SYNC = 0x20 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x4000 NL2 = 0x8000 @@ -688,7 +698,7 @@ const ( SIOCGHIWAT = 0x40047301 SIOCGIFADDR = -0x3fd796df SIOCGIFADDRS = 0x2000698c - SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBAUDRATE = -0x3fdf9669 SIOCGIFBRDADDR = -0x3fd796dd SIOCGIFCONF = -0x3ff796bb SIOCGIFCONFGLOB = -0x3ff79670 diff --git a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go index 03187dea9..4fc8d3064 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_aix_ppc64.go @@ -459,6 +459,15 @@ const ( MAP_SHARED = 0x1 MAP_TYPE = 0xf0 MAP_VARIABLE = 0x0 + MCAST_BLOCK_SOURCE = 0x40 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x3e + MCAST_JOIN_SOURCE_GROUP = 0x42 + MCAST_LEAVE_GROUP = 0x3f + MCAST_LEAVE_SOURCE_GROUP = 0x43 + MCAST_SOURCE_FILTER = 0x49 + MCAST_UNBLOCK_SOURCE = 0x41 MCL_CURRENT = 0x100 MCL_FUTURE = 0x200 MSG_ANY = 0x4 @@ -483,6 +492,7 @@ const ( MS_INVALIDATE = 0x40 MS_PER_SEC = 0x3e8 MS_SYNC = 0x20 + NFDBITS = 0x40 NL0 = 0x0 NL1 = 0x4000 NL2 = 0x8000 @@ -688,7 +698,7 @@ const ( SIOCGHIWAT = 0x40047301 SIOCGIFADDR = -0x3fd796df SIOCGIFADDRS = 0x2000698c - SIOCGIFBAUDRATE = -0x3fd79693 + SIOCGIFBAUDRATE = -0x3fdf9669 SIOCGIFBRDADDR = -0x3fd796dd SIOCGIFCONF = -0x3fef96bb SIOCGIFCONFGLOB = -0x3fef9670 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index b72544fcd..848245873 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc144648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x804c6490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc06c648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -379,11 +395,14 @@ const ( DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -393,6 +412,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -406,7 +426,6 @@ const ( DLT_GPRS_LLC = 0xa9 DLT_GSMTAP_ABIS = 0xda DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 @@ -429,6 +448,7 @@ const ( DLT_IPV4 = 0xe4 DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 DLT_JUNIPER_ATM_CEMIC = 0xee @@ -461,8 +481,9 @@ const ( DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MAX = 0x113 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -478,14 +499,16 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 + DLT_PPP_BSDOS = 0xe DLT_PPP_ETHER = 0x33 DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 @@ -496,19 +519,25 @@ const ( DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf + DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -527,10 +556,14 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -548,6 +581,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -555,11 +589,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -576,6 +611,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -617,6 +653,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -633,6 +670,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -807,6 +845,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -827,13 +866,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -845,6 +884,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -905,10 +945,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -918,6 +956,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -926,6 +965,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -975,6 +1015,7 @@ const ( MAP_EXCL = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 MAP_HASSEMAPHORE = 0x200 MAP_NOCORE = 0x20000 MAP_NOSYNC = 0x800 @@ -986,6 +1027,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1026,10 +1076,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1058,6 +1110,7 @@ const ( NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1212,7 +1265,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1222,15 +1274,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1246,6 +1300,7 @@ const ( SIOCGETSGCNT = 0xc0147210 SIOCGETVIFCNT = 0xc014720f SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 SIOCGIFBRDADDR = 0xc0206923 @@ -1267,8 +1322,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc028698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1299,6 +1357,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1317,6 +1376,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1325,6 +1385,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1337,11 +1398,19 @@ const ( SO_RCVTIMEO = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1385,10 +1454,45 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1396,6 +1500,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1411,8 +1521,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_SESS_CWV = 0x42a + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1476,6 +1608,8 @@ const ( TIOCTIMESTAMP = 0x40087459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1487,6 +1621,8 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_BCACHE_SIZE_MAX = 0x70e0000 + VM_SWZONE_SIZE_MAX = 0x2280000 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 9f382678e..4acd101c3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80506490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc080648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -379,11 +395,14 @@ const ( DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -393,6 +412,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -406,7 +426,6 @@ const ( DLT_GPRS_LLC = 0xa9 DLT_GSMTAP_ABIS = 0xda DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 @@ -429,6 +448,7 @@ const ( DLT_IPV4 = 0xe4 DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 DLT_JUNIPER_ATM_CEMIC = 0xee @@ -461,8 +481,9 @@ const ( DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MAX = 0x113 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -478,14 +499,16 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 + DLT_PPP_BSDOS = 0xe DLT_PPP_ETHER = 0x33 DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 @@ -496,19 +519,25 @@ const ( DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf + DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -527,10 +556,14 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -548,6 +581,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -555,11 +589,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -576,6 +611,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -617,6 +653,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -633,6 +670,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -807,6 +845,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -827,13 +866,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -845,6 +884,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -905,10 +945,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -918,6 +956,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -926,6 +965,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -976,6 +1016,7 @@ const ( MAP_EXCL = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 MAP_HASSEMAPHORE = 0x200 MAP_NOCORE = 0x20000 MAP_NOSYNC = 0x800 @@ -987,6 +1028,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1027,10 +1077,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1059,6 +1111,7 @@ const ( NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1213,7 +1266,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1223,15 +1275,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1247,6 +1301,7 @@ const ( SIOCGETSGCNT = 0xc0207210 SIOCGETVIFCNT = 0xc028720f SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 SIOCGIFBRDADDR = 0xc0206923 @@ -1268,8 +1323,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1300,6 +1358,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1318,6 +1377,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1326,6 +1386,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1338,11 +1399,19 @@ const ( SO_RCVTIMEO = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1386,10 +1455,45 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1397,6 +1501,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1412,8 +1522,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_SESS_CWV = 0x42a + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1477,6 +1609,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 16db56abc..e4719873b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc144648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x804c6490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc06c648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index 1a1de3454..5e49769d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -355,6 +355,22 @@ const ( CTL_KERN = 0x1 CTL_MAXNAME = 0x18 CTL_NET = 0x4 + DIOCGATTR = 0xc148648e + DIOCGDELETE = 0x80106488 + DIOCGFLUSH = 0x20006487 + DIOCGFRONTSTUFF = 0x40086486 + DIOCGFWHEADS = 0x40046483 + DIOCGFWSECTORS = 0x40046482 + DIOCGIDENT = 0x41006489 + DIOCGMEDIASIZE = 0x40086481 + DIOCGPHYSPATH = 0x4400648d + DIOCGPROVIDERNAME = 0x4400648a + DIOCGSECTORSIZE = 0x40046480 + DIOCGSTRIPEOFFSET = 0x4008648c + DIOCGSTRIPESIZE = 0x4008648b + DIOCSKERNELDUMP = 0x80506490 + DIOCSKERNELDUMP_FREEBSD11 = 0x80046485 + DIOCZONECMD = 0xc080648f DLT_A429 = 0xb8 DLT_A653_ICM = 0xb9 DLT_AIRONET_HEADER = 0x78 @@ -379,11 +395,14 @@ const ( DLT_CHAOS = 0x5 DLT_CHDLC = 0x68 DLT_CISCO_IOS = 0x76 + DLT_CLASS_NETBSD_RAWAF = 0x2240000 DLT_C_HDLC = 0x68 DLT_C_HDLC_WITH_DIR = 0xcd DLT_DBUS = 0xe7 DLT_DECT = 0xdd + DLT_DISPLAYPORT_AUX = 0x113 DLT_DOCSIS = 0x8f + DLT_DOCSIS31_XRA31 = 0x111 DLT_DVB_CI = 0xeb DLT_ECONET = 0x73 DLT_EN10MB = 0x1 @@ -393,6 +412,7 @@ const ( DLT_ERF = 0xc5 DLT_ERF_ETH = 0xaf DLT_ERF_POS = 0xb0 + DLT_ETHERNET_MPACKET = 0x112 DLT_FC_2 = 0xe0 DLT_FC_2_WITH_FRAME_DELIMS = 0xe1 DLT_FDDI = 0xa @@ -406,7 +426,6 @@ const ( DLT_GPRS_LLC = 0xa9 DLT_GSMTAP_ABIS = 0xda DLT_GSMTAP_UM = 0xd9 - DLT_HHDLC = 0x79 DLT_IBM_SN = 0x92 DLT_IBM_SP = 0x91 DLT_IEEE802 = 0x6 @@ -429,6 +448,7 @@ const ( DLT_IPV4 = 0xe4 DLT_IPV6 = 0xe5 DLT_IP_OVER_FC = 0x7a + DLT_ISO_14443 = 0x108 DLT_JUNIPER_ATM1 = 0x89 DLT_JUNIPER_ATM2 = 0x87 DLT_JUNIPER_ATM_CEMIC = 0xee @@ -461,8 +481,9 @@ const ( DLT_LINUX_PPP_WITHDIRECTION = 0xa6 DLT_LINUX_SLL = 0x71 DLT_LOOP = 0x6c + DLT_LORATAP = 0x10e DLT_LTALK = 0x72 - DLT_MATCHING_MAX = 0x104 + DLT_MATCHING_MAX = 0x113 DLT_MATCHING_MIN = 0x68 DLT_MFR = 0xb6 DLT_MOST = 0xd3 @@ -478,14 +499,16 @@ const ( DLT_NFC_LLCP = 0xf5 DLT_NFLOG = 0xef DLT_NG40 = 0xf4 + DLT_NORDIC_BLE = 0x110 DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PCI_EXP = 0x7d DLT_PFLOG = 0x75 DLT_PFSYNC = 0x79 DLT_PKTAP = 0x102 DLT_PPI = 0xc0 DLT_PPP = 0x9 - DLT_PPP_BSDOS = 0x10 + DLT_PPP_BSDOS = 0xe DLT_PPP_ETHER = 0x33 DLT_PPP_PPPD = 0xa6 DLT_PPP_SERIAL = 0x32 @@ -496,19 +519,25 @@ const ( DLT_PRONET = 0x4 DLT_RAIF1 = 0xc6 DLT_RAW = 0xc + DLT_RDS = 0x109 + DLT_REDBACK_SMARTEDGE = 0x20 DLT_RIO = 0x7c DLT_RTAC_SERIAL = 0xfa DLT_SCCP = 0x8e DLT_SCTP = 0xf8 + DLT_SDLC = 0x10c DLT_SITA = 0xc4 DLT_SLIP = 0x8 - DLT_SLIP_BSDOS = 0xf + DLT_SLIP_BSDOS = 0xd DLT_STANAG_5066_D_PDU = 0xed DLT_SUNATM = 0x7b DLT_SYMANTEC_FIREWALL = 0x63 + DLT_TI_LLN_SNIFFER = 0x10d DLT_TZSP = 0x80 DLT_USB = 0xba DLT_USBPCAP = 0xf9 + DLT_USB_DARWIN = 0x10a + DLT_USB_FREEBSD = 0xba DLT_USB_LINUX = 0xbd DLT_USB_LINUX_MMAPPED = 0xdc DLT_USER0 = 0x93 @@ -527,10 +556,14 @@ const ( DLT_USER7 = 0x9a DLT_USER8 = 0x9b DLT_USER9 = 0x9c + DLT_VSOCK = 0x10f + DLT_WATTSTOPPER_DLM = 0x107 DLT_WIHART = 0xdf DLT_WIRESHARK_UPPER_PDU = 0xfc DLT_X2E_SERIAL = 0xd5 DLT_X2E_XORAYA = 0xd6 + DLT_ZWAVE_R1_R2 = 0x105 + DLT_ZWAVE_R3 = 0x106 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -548,6 +581,7 @@ const ( ECHONL = 0x10 ECHOPRT = 0x20 EVFILT_AIO = -0x3 + EVFILT_EMPTY = -0xd EVFILT_FS = -0x9 EVFILT_LIO = -0xa EVFILT_PROC = -0x5 @@ -555,11 +589,12 @@ const ( EVFILT_READ = -0x1 EVFILT_SENDFILE = -0xc EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0xc + EVFILT_SYSCOUNT = 0xd EVFILT_TIMER = -0x7 EVFILT_USER = -0xb EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVNAMEMAP_NAME_SIZE = 0x40 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 @@ -576,6 +611,7 @@ const ( EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 + EXTATTR_MAXNAMELEN = 0xff EXTATTR_NAMESPACE_EMPTY = 0x0 EXTATTR_NAMESPACE_SYSTEM = 0x2 EXTATTR_NAMESPACE_USER = 0x1 @@ -617,6 +653,7 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 + IFCAP_WOL_MAGIC = 0x2000 IFF_ALLMULTI = 0x200 IFF_ALTPHYS = 0x4000 IFF_BROADCAST = 0x2 @@ -633,6 +670,7 @@ const ( IFF_MONITOR = 0x40000 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 + IFF_NOGROUP = 0x800000 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PPROMISC = 0x20000 @@ -807,6 +845,7 @@ const ( IPV6_DSTOPTS = 0x32 IPV6_FLOWID = 0x43 IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_LEN = 0x14 IPV6_FLOWLABEL_MASK = 0xffff0f00 IPV6_FLOWTYPE = 0x44 IPV6_FRAGTTL = 0x78 @@ -827,13 +866,13 @@ const ( IPV6_MAX_GROUP_SRC_FILTER = 0x200 IPV6_MAX_MEMBERSHIPS = 0xfff IPV6_MAX_SOCK_SRC_FILTER = 0x80 - IPV6_MIN_MEMBERSHIPS = 0x1f IPV6_MMTU = 0x500 IPV6_MSFILTER = 0x4a IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 IPV6_MULTICAST_LOOP = 0xb IPV6_NEXTHOP = 0x30 + IPV6_ORIGDSTADDR = 0x48 IPV6_PATHMTU = 0x2c IPV6_PKTINFO = 0x2e IPV6_PORTRANGE = 0xe @@ -845,6 +884,7 @@ const ( IPV6_RECVFLOWID = 0x46 IPV6_RECVHOPLIMIT = 0x25 IPV6_RECVHOPOPTS = 0x27 + IPV6_RECVORIGDSTADDR = 0x48 IPV6_RECVPATHMTU = 0x2b IPV6_RECVPKTINFO = 0x24 IPV6_RECVRSSBUCKETID = 0x47 @@ -905,10 +945,8 @@ const ( IP_MAX_MEMBERSHIPS = 0xfff IP_MAX_SOCK_MUTE_FILTER = 0x80 IP_MAX_SOCK_SRC_FILTER = 0x80 - IP_MAX_SOURCE_FILTER = 0x400 IP_MF = 0x2000 IP_MINTTL = 0x42 - IP_MIN_MEMBERSHIPS = 0x1f IP_MSFILTER = 0x4a IP_MSS = 0x240 IP_MULTICAST_IF = 0x9 @@ -918,6 +956,7 @@ const ( IP_OFFMASK = 0x1fff IP_ONESBCAST = 0x17 IP_OPTIONS = 0x1 + IP_ORIGDSTADDR = 0x1b IP_PORTRANGE = 0x13 IP_PORTRANGE_DEFAULT = 0x0 IP_PORTRANGE_HIGH = 0x1 @@ -926,6 +965,7 @@ const ( IP_RECVFLOWID = 0x5d IP_RECVIF = 0x14 IP_RECVOPTS = 0x5 + IP_RECVORIGDSTADDR = 0x1b IP_RECVRETOPTS = 0x6 IP_RECVRSSBUCKETID = 0x5e IP_RECVTOS = 0x44 @@ -976,6 +1016,7 @@ const ( MAP_EXCL = 0x4000 MAP_FILE = 0x0 MAP_FIXED = 0x10 + MAP_GUARD = 0x2000 MAP_HASSEMAPHORE = 0x200 MAP_NOCORE = 0x20000 MAP_NOSYNC = 0x800 @@ -987,6 +1028,15 @@ const ( MAP_RESERVED0100 = 0x100 MAP_SHARED = 0x1 MAP_STACK = 0x400 + MCAST_BLOCK_SOURCE = 0x54 + MCAST_EXCLUDE = 0x2 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x50 + MCAST_JOIN_SOURCE_GROUP = 0x52 + MCAST_LEAVE_GROUP = 0x51 + MCAST_LEAVE_SOURCE_GROUP = 0x53 + MCAST_UNBLOCK_SOURCE = 0x55 + MCAST_UNDEFINED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ACLS = 0x8000000 @@ -1027,10 +1077,12 @@ const ( MNT_SUSPEND = 0x4 MNT_SYNCHRONOUS = 0x2 MNT_UNION = 0x20 + MNT_UNTRUSTED = 0x800000000 MNT_UPDATE = 0x10000 - MNT_UPDATEMASK = 0x2d8d0807e + MNT_UPDATEMASK = 0xad8d0807e MNT_USER = 0x8000 - MNT_VISFLAGMASK = 0x3fef0ffff + MNT_VERIFIED = 0x400000000 + MNT_VISFLAGMASK = 0xffef0ffff MNT_WAIT = 0x1 MSG_CMSG_CLOEXEC = 0x40000 MSG_COMPAT = 0x8000 @@ -1059,6 +1111,7 @@ const ( NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 + NOTE_ABSTIME = 0x10 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 NOTE_CLOSE = 0x100 @@ -1213,7 +1266,6 @@ const ( RTV_WEIGHT = 0x100 RT_ALL_FIBS = -0x1 RT_BLACKHOLE = 0x40 - RT_CACHING_CONTEXT = 0x1 RT_DEFAULT_FIB = 0x0 RT_HAS_GW = 0x80 RT_HAS_HEADER = 0x10 @@ -1223,15 +1275,17 @@ const ( RT_LLE_CACHE = 0x100 RT_MAY_LOOP = 0x8 RT_MAY_LOOP_BIT = 0x3 - RT_NORTREF = 0x2 RT_REJECT = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 SCM_BINTIME = 0x4 SCM_CREDS = 0x3 + SCM_MONOTONIC = 0x6 + SCM_REALTIME = 0x5 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 + SCM_TIME_INFO = 0x7 SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1247,6 +1301,7 @@ const ( SIOCGETSGCNT = 0xc0207210 SIOCGETVIFCNT = 0xc028720f SIOCGHIWAT = 0x40047301 + SIOCGHWADDR = 0xc020693e SIOCGI2C = 0xc020693d SIOCGIFADDR = 0xc0206921 SIOCGIFBRDADDR = 0xc0206923 @@ -1268,8 +1323,11 @@ const ( SIOCGIFPDSTADDR = 0xc0206948 SIOCGIFPHYS = 0xc0206935 SIOCGIFPSRCADDR = 0xc0206947 + SIOCGIFRSSHASH = 0xc0186997 + SIOCGIFRSSKEY = 0xc0946996 SIOCGIFSTATUS = 0xc331693b SIOCGIFXMEDIA = 0xc030698b + SIOCGLANPCP = 0xc0206998 SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGPRIVATE_0 = 0xc0206950 @@ -1300,6 +1358,7 @@ const ( SIOCSIFPHYS = 0x80206936 SIOCSIFRVNET = 0xc020695b SIOCSIFVNET = 0xc020695a + SIOCSLANPCP = 0x80206999 SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSTUNFIB = 0x8020695f @@ -1318,6 +1377,7 @@ const ( SO_BINTIME = 0x2000 SO_BROADCAST = 0x20 SO_DEBUG = 0x1 + SO_DOMAIN = 0x1019 SO_DONTROUTE = 0x10 SO_ERROR = 0x1007 SO_KEEPALIVE = 0x8 @@ -1326,6 +1386,7 @@ const ( SO_LISTENINCQLEN = 0x1013 SO_LISTENQLEN = 0x1012 SO_LISTENQLIMIT = 0x1011 + SO_MAX_PACING_RATE = 0x1018 SO_NOSIGPIPE = 0x800 SO_NO_DDP = 0x8000 SO_NO_OFFLOAD = 0x4000 @@ -1338,11 +1399,19 @@ const ( SO_RCVTIMEO = 0x1006 SO_REUSEADDR = 0x4 SO_REUSEPORT = 0x200 + SO_REUSEPORT_LB = 0x10000 SO_SETFIB = 0x1014 SO_SNDBUF = 0x1001 SO_SNDLOWAT = 0x1003 SO_SNDTIMEO = 0x1005 SO_TIMESTAMP = 0x400 + SO_TS_BINTIME = 0x1 + SO_TS_CLOCK = 0x1017 + SO_TS_CLOCK_MAX = 0x3 + SO_TS_DEFAULT = 0x0 + SO_TS_MONOTONIC = 0x3 + SO_TS_REALTIME = 0x2 + SO_TS_REALTIME_MICRO = 0x0 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 SO_USER_COOKIE = 0x1015 @@ -1386,10 +1455,45 @@ const ( TCOFLUSH = 0x2 TCOOFF = 0x1 TCOON = 0x2 + TCP_BBR_ACK_COMP_ALG = 0x448 + TCP_BBR_DRAIN_INC_EXTRA = 0x43c + TCP_BBR_DRAIN_PG = 0x42e + TCP_BBR_EXTRA_GAIN = 0x449 + TCP_BBR_IWINTSO = 0x42b + TCP_BBR_LOWGAIN_FD = 0x436 + TCP_BBR_LOWGAIN_HALF = 0x435 + TCP_BBR_LOWGAIN_THRESH = 0x434 + TCP_BBR_MAX_RTO = 0x439 + TCP_BBR_MIN_RTO = 0x438 + TCP_BBR_ONE_RETRAN = 0x431 + TCP_BBR_PACE_CROSS = 0x442 + TCP_BBR_PACE_DEL_TAR = 0x43f + TCP_BBR_PACE_PER_SEC = 0x43e + TCP_BBR_PACE_SEG_MAX = 0x440 + TCP_BBR_PACE_SEG_MIN = 0x441 + TCP_BBR_PROBE_RTT_GAIN = 0x44d + TCP_BBR_PROBE_RTT_INT = 0x430 + TCP_BBR_PROBE_RTT_LEN = 0x44e + TCP_BBR_RACK_RTT_USE = 0x44a + TCP_BBR_RECFORCE = 0x42c + TCP_BBR_REC_OVER_HPTS = 0x43a + TCP_BBR_RETRAN_WTSO = 0x44b + TCP_BBR_RWND_IS_APP = 0x42f + TCP_BBR_STARTUP_EXIT_EPOCH = 0x43d + TCP_BBR_STARTUP_LOSS_EXIT = 0x432 + TCP_BBR_STARTUP_PG = 0x42d + TCP_BBR_UNLIMITED = 0x43b + TCP_BBR_USEDEL_RATE = 0x437 + TCP_BBR_USE_LOWGAIN = 0x433 TCP_CA_NAME_MAX = 0x10 TCP_CCALGOOPT = 0x41 TCP_CONGESTION = 0x40 + TCP_DATA_AFTER_CLOSE = 0x44c + TCP_DELACK = 0x48 TCP_FASTOPEN = 0x401 + TCP_FASTOPEN_MAX_COOKIE_LEN = 0x10 + TCP_FASTOPEN_MIN_COOKIE_LEN = 0x4 + TCP_FASTOPEN_PSK_LEN = 0x10 TCP_FUNCTION_BLK = 0x2000 TCP_FUNCTION_NAME_LEN_MAX = 0x20 TCP_INFO = 0x20 @@ -1397,6 +1501,12 @@ const ( TCP_KEEPIDLE = 0x100 TCP_KEEPINIT = 0x80 TCP_KEEPINTVL = 0x200 + TCP_LOG = 0x22 + TCP_LOGBUF = 0x23 + TCP_LOGDUMP = 0x25 + TCP_LOGDUMPID = 0x26 + TCP_LOGID = 0x24 + TCP_LOG_ID_LEN = 0x40 TCP_MAXBURST = 0x4 TCP_MAXHLEN = 0x3c TCP_MAXOLEN = 0x28 @@ -1412,8 +1522,30 @@ const ( TCP_NOPUSH = 0x4 TCP_PCAP_IN = 0x1000 TCP_PCAP_OUT = 0x800 + TCP_RACK_EARLY_RECOV = 0x423 + TCP_RACK_EARLY_SEG = 0x424 + TCP_RACK_IDLE_REDUCE_HIGH = 0x444 + TCP_RACK_MIN_PACE = 0x445 + TCP_RACK_MIN_PACE_SEG = 0x446 + TCP_RACK_MIN_TO = 0x422 + TCP_RACK_PACE_ALWAYS = 0x41f + TCP_RACK_PACE_MAX_SEG = 0x41e + TCP_RACK_PACE_REDUCE = 0x41d + TCP_RACK_PKT_DELAY = 0x428 + TCP_RACK_PROP = 0x41b + TCP_RACK_PROP_RATE = 0x420 + TCP_RACK_PRR_SENDALOT = 0x421 + TCP_RACK_REORD_FADE = 0x426 + TCP_RACK_REORD_THRESH = 0x425 + TCP_RACK_SESS_CWV = 0x42a + TCP_RACK_TLP_INC_VAR = 0x429 + TCP_RACK_TLP_REDUCE = 0x41c + TCP_RACK_TLP_THRESH = 0x427 + TCP_RACK_TLP_USE = 0x447 TCP_VENDOR = 0x80000000 TCSAFLUSH = 0x2 + TIMER_ABSTIME = 0x1 + TIMER_RELTIME = 0x0 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 TIOCCONS = 0x80047462 @@ -1477,6 +1609,8 @@ const ( TIOCTIMESTAMP = 0x40107459 TIOCUCNTL = 0x80047466 TOSTOP = 0x400000 + UTIME_NOW = -0x1 + UTIME_OMIT = -0x2 VDISCARD = 0xf VDSUSP = 0xb VEOF = 0x0 @@ -1488,6 +1622,7 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_BCACHE_SIZE_MAX = 0x19000000 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go new file mode 100644 index 000000000..6e3cfec46 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -0,0 +1,2471 @@ +// Code generated by mkmerge.go; DO NOT EDIT. + +// +build linux + +package unix + +import "syscall" + +const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f + AF_ALG = 0x26 + AF_APPLETALK = 0x5 + AF_ASH = 0x12 + AF_ATMPVC = 0x8 + AF_ATMSVC = 0x14 + AF_AX25 = 0x3 + AF_BLUETOOTH = 0x1f + AF_BRIDGE = 0x7 + AF_CAIF = 0x25 + AF_CAN = 0x1d + AF_DECnet = 0xc + AF_ECONET = 0x13 + AF_FILE = 0x1 + AF_IB = 0x1b + AF_IEEE802154 = 0x24 + AF_INET = 0x2 + AF_INET6 = 0xa + AF_IPX = 0x4 + AF_IRDA = 0x17 + AF_ISDN = 0x22 + AF_IUCV = 0x20 + AF_KCM = 0x29 + AF_KEY = 0xf + AF_LLC = 0x1a + AF_LOCAL = 0x1 + AF_MAX = 0x2d + AF_MPLS = 0x1c + AF_NETBEUI = 0xd + AF_NETLINK = 0x10 + AF_NETROM = 0x6 + AF_NFC = 0x27 + AF_PACKET = 0x11 + AF_PHONET = 0x23 + AF_PPPOX = 0x18 + AF_QIPCRTR = 0x2a + AF_RDS = 0x15 + AF_ROSE = 0xb + AF_ROUTE = 0x10 + AF_RXRPC = 0x21 + AF_SECURITY = 0xe + AF_SMC = 0x2b + AF_SNA = 0x16 + AF_TIPC = 0x1e + AF_UNIX = 0x1 + AF_UNSPEC = 0x0 + AF_VSOCK = 0x28 + AF_WANPIPE = 0x19 + AF_X25 = 0x9 + AF_XDP = 0x2c + ALG_OP_DECRYPT = 0x0 + ALG_OP_ENCRYPT = 0x1 + ALG_SET_AEAD_ASSOCLEN = 0x4 + ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_IV = 0x2 + ALG_SET_KEY = 0x1 + ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 + ARPHRD_6LOWPAN = 0x339 + ARPHRD_ADAPT = 0x108 + ARPHRD_APPLETLK = 0x8 + ARPHRD_ARCNET = 0x7 + ARPHRD_ASH = 0x30d + ARPHRD_ATM = 0x13 + ARPHRD_AX25 = 0x3 + ARPHRD_BIF = 0x307 + ARPHRD_CAIF = 0x336 + ARPHRD_CAN = 0x118 + ARPHRD_CHAOS = 0x5 + ARPHRD_CISCO = 0x201 + ARPHRD_CSLIP = 0x101 + ARPHRD_CSLIP6 = 0x103 + ARPHRD_DDCMP = 0x205 + ARPHRD_DLCI = 0xf + ARPHRD_ECONET = 0x30e + ARPHRD_EETHER = 0x2 + ARPHRD_ETHER = 0x1 + ARPHRD_EUI64 = 0x1b + ARPHRD_FCAL = 0x311 + ARPHRD_FCFABRIC = 0x313 + ARPHRD_FCPL = 0x312 + ARPHRD_FCPP = 0x310 + ARPHRD_FDDI = 0x306 + ARPHRD_FRAD = 0x302 + ARPHRD_HDLC = 0x201 + ARPHRD_HIPPI = 0x30c + ARPHRD_HWX25 = 0x110 + ARPHRD_IEEE1394 = 0x18 + ARPHRD_IEEE802 = 0x6 + ARPHRD_IEEE80211 = 0x321 + ARPHRD_IEEE80211_PRISM = 0x322 + ARPHRD_IEEE80211_RADIOTAP = 0x323 + ARPHRD_IEEE802154 = 0x324 + ARPHRD_IEEE802154_MONITOR = 0x325 + ARPHRD_IEEE802_TR = 0x320 + ARPHRD_INFINIBAND = 0x20 + ARPHRD_IP6GRE = 0x337 + ARPHRD_IPDDP = 0x309 + ARPHRD_IPGRE = 0x30a + ARPHRD_IRDA = 0x30f + ARPHRD_LAPB = 0x204 + ARPHRD_LOCALTLK = 0x305 + ARPHRD_LOOPBACK = 0x304 + ARPHRD_METRICOM = 0x17 + ARPHRD_NETLINK = 0x338 + ARPHRD_NETROM = 0x0 + ARPHRD_NONE = 0xfffe + ARPHRD_PHONET = 0x334 + ARPHRD_PHONET_PIPE = 0x335 + ARPHRD_PIMREG = 0x30b + ARPHRD_PPP = 0x200 + ARPHRD_PRONET = 0x4 + ARPHRD_RAWHDLC = 0x206 + ARPHRD_RAWIP = 0x207 + ARPHRD_ROSE = 0x10e + ARPHRD_RSRVD = 0x104 + ARPHRD_SIT = 0x308 + ARPHRD_SKIP = 0x303 + ARPHRD_SLIP = 0x100 + ARPHRD_SLIP6 = 0x102 + ARPHRD_TUNNEL = 0x300 + ARPHRD_TUNNEL6 = 0x301 + ARPHRD_VOID = 0xffff + ARPHRD_VSOCKMON = 0x33a + ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 + B0 = 0x0 + B110 = 0x3 + B1200 = 0x9 + B134 = 0x4 + B150 = 0x5 + B1800 = 0xa + B19200 = 0xe + B200 = 0x6 + B2400 = 0xb + B300 = 0x7 + B38400 = 0xf + B4800 = 0xc + B50 = 0x1 + B600 = 0x8 + B75 = 0x2 + B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINDERFS_SUPER_MAGIC = 0x6c6f6f70 + BINFMTFS_MAGIC = 0x42494e4d + BPF_A = 0x10 + BPF_ABS = 0x20 + BPF_ADD = 0x0 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 + BPF_ALU = 0x4 + BPF_ALU64 = 0x7 + BPF_AND = 0x50 + BPF_ANY = 0x0 + BPF_ARSH = 0xc0 + BPF_B = 0x10 + BPF_BUILD_ID_SIZE = 0x14 + BPF_CALL = 0x80 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 + BPF_DIV = 0x30 + BPF_DW = 0x18 + BPF_END = 0xd0 + BPF_EXIST = 0x2 + BPF_EXIT = 0x90 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 + BPF_FROM_BE = 0x8 + BPF_FROM_LE = 0x0 + BPF_FS_MAGIC = 0xcafe4a11 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ALLOW_MULTI = 0x2 + BPF_F_ALLOW_OVERRIDE = 0x1 + BPF_F_ANY_ALIGNMENT = 0x2 + BPF_F_CLONE = 0x200 + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CURRENT_NETNS = -0x1 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_INGRESS = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_MMAPABLE = 0x400 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NUMA_NODE = 0x4 + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_QUERY_EFFECTIVE = 0x1 + BPF_F_RDONLY = 0x8 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_REPLACE = 0x4 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_STRICT_ALIGNMENT = 0x1 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 + BPF_F_TEST_STATE_FREQ = 0x8 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_USER_STACK = 0x100 + BPF_F_WRONLY = 0x10 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_ZERO_SEED = 0x40 + BPF_H = 0x8 + BPF_IMM = 0x0 + BPF_IND = 0x40 + BPF_JA = 0x0 + BPF_JEQ = 0x10 + BPF_JGE = 0x30 + BPF_JGT = 0x20 + BPF_JLE = 0xb0 + BPF_JLT = 0xa0 + BPF_JMP = 0x5 + BPF_JMP32 = 0x6 + BPF_JNE = 0x50 + BPF_JSET = 0x40 + BPF_JSGE = 0x70 + BPF_JSGT = 0x60 + BPF_JSLE = 0xd0 + BPF_JSLT = 0xc0 + BPF_K = 0x0 + BPF_LD = 0x0 + BPF_LDX = 0x1 + BPF_LEN = 0x80 + BPF_LL_OFF = -0x200000 + BPF_LSH = 0x60 + BPF_MAJOR_VERSION = 0x1 + BPF_MAXINSNS = 0x1000 + BPF_MEM = 0x60 + BPF_MEMWORDS = 0x10 + BPF_MINOR_VERSION = 0x1 + BPF_MISC = 0x7 + BPF_MOD = 0x90 + BPF_MOV = 0xb0 + BPF_MSH = 0xa0 + BPF_MUL = 0x20 + BPF_NEG = 0x80 + BPF_NET_OFF = -0x100000 + BPF_NOEXIST = 0x1 + BPF_OBJ_NAME_LEN = 0x10 + BPF_OR = 0x40 + BPF_PSEUDO_CALL = 0x1 + BPF_PSEUDO_MAP_FD = 0x1 + BPF_PSEUDO_MAP_VALUE = 0x2 + BPF_RET = 0x6 + BPF_RSH = 0x70 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 + BPF_ST = 0x2 + BPF_STX = 0x3 + BPF_SUB = 0x10 + BPF_TAG_SIZE = 0x8 + BPF_TAX = 0x0 + BPF_TO_BE = 0x8 + BPF_TO_LE = 0x0 + BPF_TXA = 0x80 + BPF_W = 0x0 + BPF_X = 0x8 + BPF_XADD = 0xc0 + BPF_XOR = 0xa0 + BRKINT = 0x2 + BS0 = 0x0 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 + CAN_BCM = 0x2 + CAN_EFF_FLAG = 0x80000000 + CAN_EFF_ID_BITS = 0x1d + CAN_EFF_MASK = 0x1fffffff + CAN_ERR_FLAG = 0x20000000 + CAN_ERR_MASK = 0x1fffffff + CAN_INV_FILTER = 0x20000000 + CAN_ISOTP = 0x6 + CAN_J1939 = 0x7 + CAN_MAX_DLC = 0x8 + CAN_MAX_DLEN = 0x8 + CAN_MCNET = 0x5 + CAN_MTU = 0x10 + CAN_NPROTO = 0x8 + CAN_RAW = 0x1 + CAN_RAW_FILTER_MAX = 0x200 + CAN_RTR_FLAG = 0x40000000 + CAN_SFF_ID_BITS = 0xb + CAN_SFF_MASK = 0x7ff + CAN_TP16 = 0x3 + CAN_TP20 = 0x4 + CAP_AUDIT_CONTROL = 0x1e + CAP_AUDIT_READ = 0x25 + CAP_AUDIT_WRITE = 0x1d + CAP_BLOCK_SUSPEND = 0x24 + CAP_CHOWN = 0x0 + CAP_DAC_OVERRIDE = 0x1 + CAP_DAC_READ_SEARCH = 0x2 + CAP_FOWNER = 0x3 + CAP_FSETID = 0x4 + CAP_IPC_LOCK = 0xe + CAP_IPC_OWNER = 0xf + CAP_KILL = 0x5 + CAP_LAST_CAP = 0x25 + CAP_LEASE = 0x1c + CAP_LINUX_IMMUTABLE = 0x9 + CAP_MAC_ADMIN = 0x21 + CAP_MAC_OVERRIDE = 0x20 + CAP_MKNOD = 0x1b + CAP_NET_ADMIN = 0xc + CAP_NET_BIND_SERVICE = 0xa + CAP_NET_BROADCAST = 0xb + CAP_NET_RAW = 0xd + CAP_SETFCAP = 0x1f + CAP_SETGID = 0x6 + CAP_SETPCAP = 0x8 + CAP_SETUID = 0x7 + CAP_SYSLOG = 0x22 + CAP_SYS_ADMIN = 0x15 + CAP_SYS_BOOT = 0x16 + CAP_SYS_CHROOT = 0x12 + CAP_SYS_MODULE = 0x10 + CAP_SYS_NICE = 0x17 + CAP_SYS_PACCT = 0x14 + CAP_SYS_PTRACE = 0x13 + CAP_SYS_RAWIO = 0x11 + CAP_SYS_RESOURCE = 0x18 + CAP_SYS_TIME = 0x19 + CAP_SYS_TTY_CONFIG = 0x1a + CAP_WAKE_ALARM = 0x23 + CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb + CLOCK_BOOTTIME = 0x7 + CLOCK_BOOTTIME_ALARM = 0x9 + CLOCK_DEFAULT = 0x0 + CLOCK_EXT = 0x1 + CLOCK_INT = 0x2 + CLOCK_MONOTONIC = 0x1 + CLOCK_MONOTONIC_COARSE = 0x6 + CLOCK_MONOTONIC_RAW = 0x4 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_REALTIME_ALARM = 0x8 + CLOCK_REALTIME_COARSE = 0x5 + CLOCK_TAI = 0xb + CLOCK_THREAD_CPUTIME_ID = 0x3 + CLOCK_TXFROMRX = 0x4 + CLOCK_TXINT = 0x3 + CLONE_ARGS_SIZE_VER0 = 0x40 + CLONE_ARGS_SIZE_VER1 = 0x50 + CLONE_CHILD_CLEARTID = 0x200000 + CLONE_CHILD_SETTID = 0x1000000 + CLONE_CLEAR_SIGHAND = 0x100000000 + CLONE_DETACHED = 0x400000 + CLONE_FILES = 0x400 + CLONE_FS = 0x200 + CLONE_IO = 0x80000000 + CLONE_NEWCGROUP = 0x2000000 + CLONE_NEWIPC = 0x8000000 + CLONE_NEWNET = 0x40000000 + CLONE_NEWNS = 0x20000 + CLONE_NEWPID = 0x20000000 + CLONE_NEWTIME = 0x80 + CLONE_NEWUSER = 0x10000000 + CLONE_NEWUTS = 0x4000000 + CLONE_PARENT = 0x8000 + CLONE_PARENT_SETTID = 0x100000 + CLONE_PIDFD = 0x1000 + CLONE_PTRACE = 0x2000 + CLONE_SETTLS = 0x80000 + CLONE_SIGHAND = 0x800 + CLONE_SYSVSEM = 0x40000 + CLONE_THREAD = 0x10000 + CLONE_UNTRACED = 0x800000 + CLONE_VFORK = 0x4000 + CLONE_VM = 0x100 + CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 + CR0 = 0x0 + CRAMFS_MAGIC = 0x28cd3d45 + CRTSCTS = 0x80000000 + CRYPTO_MAX_NAME = 0x40 + CRYPTO_MSG_MAX = 0x15 + CRYPTO_NR_MSGTYPES = 0x6 + CRYPTO_REPORT_MAXSIZE = 0x160 + CS5 = 0x0 + CSIGNAL = 0xff + CSTART = 0x11 + CSTATUS = 0x0 + CSTOP = 0x13 + CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVLINK_CMD_ESWITCH_MODE_GET = 0x1d + DEVLINK_CMD_ESWITCH_MODE_SET = 0x1e + DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" + DEVLINK_GENL_NAME = "devlink" + DEVLINK_GENL_VERSION = 0x1 + DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 + DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 + DT_BLK = 0x6 + DT_CHR = 0x2 + DT_DIR = 0x4 + DT_FIFO = 0x1 + DT_LNK = 0xa + DT_REG = 0x8 + DT_SOCK = 0xc + DT_UNKNOWN = 0x0 + DT_WHT = 0xe + ECHO = 0x8 + ECRYPTFS_SUPER_MAGIC = 0xf15f + EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 + ENCODING_DEFAULT = 0x0 + ENCODING_FM_MARK = 0x3 + ENCODING_FM_SPACE = 0x4 + ENCODING_MANCHESTER = 0x5 + ENCODING_NRZ = 0x1 + ENCODING_NRZI = 0x2 + EPOLLERR = 0x8 + EPOLLET = 0x80000000 + EPOLLEXCLUSIVE = 0x10000000 + EPOLLHUP = 0x10 + EPOLLIN = 0x1 + EPOLLMSG = 0x400 + EPOLLONESHOT = 0x40000000 + EPOLLOUT = 0x4 + EPOLLPRI = 0x2 + EPOLLRDBAND = 0x80 + EPOLLRDHUP = 0x2000 + EPOLLRDNORM = 0x40 + EPOLLWAKEUP = 0x20000000 + EPOLLWRBAND = 0x200 + EPOLLWRNORM = 0x100 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 + ETH_P_1588 = 0x88f7 + ETH_P_8021AD = 0x88a8 + ETH_P_8021AH = 0x88e7 + ETH_P_8021Q = 0x8100 + ETH_P_80221 = 0x8917 + ETH_P_802_2 = 0x4 + ETH_P_802_3 = 0x1 + ETH_P_802_3_MIN = 0x600 + ETH_P_802_EX1 = 0x88b5 + ETH_P_AARP = 0x80f3 + ETH_P_AF_IUCV = 0xfbfb + ETH_P_ALL = 0x3 + ETH_P_AOE = 0x88a2 + ETH_P_ARCNET = 0x1a + ETH_P_ARP = 0x806 + ETH_P_ATALK = 0x809b + ETH_P_ATMFATE = 0x8884 + ETH_P_ATMMPOA = 0x884c + ETH_P_AX25 = 0x2 + ETH_P_BATMAN = 0x4305 + ETH_P_BPQ = 0x8ff + ETH_P_CAIF = 0xf7 + ETH_P_CAN = 0xc + ETH_P_CANFD = 0xd + ETH_P_CONTROL = 0x16 + ETH_P_CUST = 0x6006 + ETH_P_DDCMP = 0x6 + ETH_P_DEC = 0x6000 + ETH_P_DIAG = 0x6005 + ETH_P_DNA_DL = 0x6001 + ETH_P_DNA_RC = 0x6002 + ETH_P_DNA_RT = 0x6003 + ETH_P_DSA = 0x1b + ETH_P_DSA_8021Q = 0xdadb + ETH_P_ECONET = 0x18 + ETH_P_EDSA = 0xdada + ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb + ETH_P_FCOE = 0x8906 + ETH_P_FIP = 0x8914 + ETH_P_HDLC = 0x19 + ETH_P_HSR = 0x892f + ETH_P_IBOE = 0x8915 + ETH_P_IEEE802154 = 0xf6 + ETH_P_IEEEPUP = 0xa00 + ETH_P_IEEEPUPAT = 0xa01 + ETH_P_IFE = 0xed3e + ETH_P_IP = 0x800 + ETH_P_IPV6 = 0x86dd + ETH_P_IPX = 0x8137 + ETH_P_IRDA = 0x17 + ETH_P_LAT = 0x6004 + ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc + ETH_P_LOCALTALK = 0x9 + ETH_P_LOOP = 0x60 + ETH_P_LOOPBACK = 0x9000 + ETH_P_MACSEC = 0x88e5 + ETH_P_MAP = 0xf9 + ETH_P_MOBITEX = 0x15 + ETH_P_MPLS_MC = 0x8848 + ETH_P_MPLS_UC = 0x8847 + ETH_P_MVRP = 0x88f5 + ETH_P_NCSI = 0x88f8 + ETH_P_NSH = 0x894f + ETH_P_PAE = 0x888e + ETH_P_PAUSE = 0x8808 + ETH_P_PHONET = 0xf5 + ETH_P_PPPTALK = 0x10 + ETH_P_PPP_DISC = 0x8863 + ETH_P_PPP_MP = 0x8 + ETH_P_PPP_SES = 0x8864 + ETH_P_PREAUTH = 0x88c7 + ETH_P_PRP = 0x88fb + ETH_P_PUP = 0x200 + ETH_P_PUPAT = 0x201 + ETH_P_QINQ1 = 0x9100 + ETH_P_QINQ2 = 0x9200 + ETH_P_QINQ3 = 0x9300 + ETH_P_RARP = 0x8035 + ETH_P_SCA = 0x6007 + ETH_P_SLOW = 0x8809 + ETH_P_SNAP = 0x5 + ETH_P_TDLS = 0x890d + ETH_P_TEB = 0x6558 + ETH_P_TIPC = 0x88ca + ETH_P_TRAILER = 0x1c + ETH_P_TR_802_2 = 0x11 + ETH_P_TSN = 0x22f0 + ETH_P_WAN_PPP = 0x7 + ETH_P_WCCP = 0x883e + ETH_P_X25 = 0x805 + ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 + EXTA = 0xe + EXTB = 0xf + F2FS_SUPER_MAGIC = 0xf2f52010 + FALLOC_FL_COLLAPSE_RANGE = 0x8 + FALLOC_FL_INSERT_RANGE = 0x20 + FALLOC_FL_KEEP_SIZE = 0x1 + FALLOC_FL_NO_HIDE_STALE = 0x4 + FALLOC_FL_PUNCH_HOLE = 0x2 + FALLOC_FL_UNSHARE_RANGE = 0x40 + FALLOC_FL_ZERO_RANGE = 0x10 + FANOTIFY_METADATA_VERSION = 0x3 + FAN_ACCESS = 0x1 + FAN_ACCESS_PERM = 0x20000 + FAN_ALLOW = 0x1 + FAN_ALL_CLASS_BITS = 0xc + FAN_ALL_EVENTS = 0x3b + FAN_ALL_INIT_FLAGS = 0x3f + FAN_ALL_MARK_FLAGS = 0xff + FAN_ALL_OUTGOING_EVENTS = 0x3403b + FAN_ALL_PERM_EVENTS = 0x30000 + FAN_ATTRIB = 0x4 + FAN_AUDIT = 0x10 + FAN_CLASS_CONTENT = 0x4 + FAN_CLASS_NOTIF = 0x0 + FAN_CLASS_PRE_CONTENT = 0x8 + FAN_CLOEXEC = 0x1 + FAN_CLOSE = 0x18 + FAN_CLOSE_NOWRITE = 0x10 + FAN_CLOSE_WRITE = 0x8 + FAN_CREATE = 0x100 + FAN_DELETE = 0x200 + FAN_DELETE_SELF = 0x400 + FAN_DENY = 0x2 + FAN_ENABLE_AUDIT = 0x40 + FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_METADATA_LEN = 0x18 + FAN_EVENT_ON_CHILD = 0x8000000 + FAN_MARK_ADD = 0x1 + FAN_MARK_DONT_FOLLOW = 0x4 + FAN_MARK_FILESYSTEM = 0x100 + FAN_MARK_FLUSH = 0x80 + FAN_MARK_IGNORED_MASK = 0x20 + FAN_MARK_IGNORED_SURV_MODIFY = 0x40 + FAN_MARK_INODE = 0x0 + FAN_MARK_MOUNT = 0x10 + FAN_MARK_ONLYDIR = 0x8 + FAN_MARK_REMOVE = 0x2 + FAN_MODIFY = 0x2 + FAN_MOVE = 0xc0 + FAN_MOVED_FROM = 0x40 + FAN_MOVED_TO = 0x80 + FAN_MOVE_SELF = 0x800 + FAN_NOFD = -0x1 + FAN_NONBLOCK = 0x2 + FAN_ONDIR = 0x40000000 + FAN_OPEN = 0x20 + FAN_OPEN_EXEC = 0x1000 + FAN_OPEN_EXEC_PERM = 0x40000 + FAN_OPEN_PERM = 0x10000 + FAN_Q_OVERFLOW = 0x4000 + FAN_REPORT_FID = 0x200 + FAN_REPORT_TID = 0x100 + FAN_UNLIMITED_MARKS = 0x20 + FAN_UNLIMITED_QUEUE = 0x10 + FD_CLOEXEC = 0x1 + FD_SETSIZE = 0x400 + FF0 = 0x0 + FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 + FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" + FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 + FSCRYPT_KEY_IDENTIFIER_SIZE = 0x10 + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY = 0x1 + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS = 0x2 + FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR = 0x1 + FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER = 0x2 + FSCRYPT_KEY_STATUS_ABSENT = 0x1 + FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF = 0x1 + FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED = 0x3 + FSCRYPT_KEY_STATUS_PRESENT = 0x2 + FSCRYPT_MAX_KEY_SIZE = 0x40 + FSCRYPT_MODE_ADIANTUM = 0x9 + FSCRYPT_MODE_AES_128_CBC = 0x5 + FSCRYPT_MODE_AES_128_CTS = 0x6 + FSCRYPT_MODE_AES_256_CTS = 0x4 + FSCRYPT_MODE_AES_256_XTS = 0x1 + FSCRYPT_POLICY_FLAGS_PAD_16 = 0x2 + FSCRYPT_POLICY_FLAGS_PAD_32 = 0x3 + FSCRYPT_POLICY_FLAGS_PAD_4 = 0x0 + FSCRYPT_POLICY_FLAGS_PAD_8 = 0x1 + FSCRYPT_POLICY_FLAGS_PAD_MASK = 0x3 + FSCRYPT_POLICY_FLAGS_VALID = 0xf + FSCRYPT_POLICY_FLAG_DIRECT_KEY = 0x4 + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 = 0x8 + FSCRYPT_POLICY_V1 = 0x0 + FSCRYPT_POLICY_V2 = 0x2 + FS_ENCRYPTION_MODE_ADIANTUM = 0x9 + FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 + FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 + FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 + FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 + FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 + FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 + FS_ENCRYPTION_MODE_INVALID = 0x0 + FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 + FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 + FS_IOC_ADD_ENCRYPTION_KEY = 0xc0506617 + FS_IOC_GET_ENCRYPTION_KEY_STATUS = 0xc080661a + FS_IOC_GET_ENCRYPTION_POLICY_EX = 0xc0096616 + FS_IOC_MEASURE_VERITY = 0xc0046686 + FS_IOC_REMOVE_ENCRYPTION_KEY = 0xc0406618 + FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS = 0xc0406619 + FS_KEY_DESCRIPTOR_SIZE = 0x8 + FS_KEY_DESC_PREFIX = "fscrypt:" + FS_KEY_DESC_PREFIX_SIZE = 0x8 + FS_MAX_KEY_SIZE = 0x40 + FS_POLICY_FLAGS_PAD_16 = 0x2 + FS_POLICY_FLAGS_PAD_32 = 0x3 + FS_POLICY_FLAGS_PAD_4 = 0x0 + FS_POLICY_FLAGS_PAD_8 = 0x1 + FS_POLICY_FLAGS_PAD_MASK = 0x3 + FS_POLICY_FLAGS_VALID = 0xf + FS_VERITY_FL = 0x100000 + FS_VERITY_HASH_ALG_SHA256 = 0x1 + FS_VERITY_HASH_ALG_SHA512 = 0x2 + FUTEXFS_SUPER_MAGIC = 0xbad1dea + F_ADD_SEALS = 0x409 + F_DUPFD = 0x0 + F_DUPFD_CLOEXEC = 0x406 + F_EXLCK = 0x4 + F_GETFD = 0x1 + F_GETFL = 0x3 + F_GETLEASE = 0x401 + F_GETOWN_EX = 0x10 + F_GETPIPE_SZ = 0x408 + F_GETSIG = 0xb + F_GET_FILE_RW_HINT = 0x40d + F_GET_RW_HINT = 0x40b + F_GET_SEALS = 0x40a + F_LOCK = 0x1 + F_NOTIFY = 0x402 + F_OFD_GETLK = 0x24 + F_OFD_SETLK = 0x25 + F_OFD_SETLKW = 0x26 + F_OK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 + F_SEAL_GROW = 0x4 + F_SEAL_SEAL = 0x1 + F_SEAL_SHRINK = 0x2 + F_SEAL_WRITE = 0x8 + F_SETFD = 0x2 + F_SETFL = 0x4 + F_SETLEASE = 0x400 + F_SETOWN_EX = 0xf + F_SETPIPE_SZ = 0x407 + F_SETSIG = 0xa + F_SET_FILE_RW_HINT = 0x40e + F_SET_RW_HINT = 0x40c + F_SHLCK = 0x8 + F_TEST = 0x3 + F_TLOCK = 0x2 + F_ULOCK = 0x0 + GENL_ADMIN_PERM = 0x1 + GENL_CMD_CAP_DO = 0x2 + GENL_CMD_CAP_DUMP = 0x4 + GENL_CMD_CAP_HASPOL = 0x8 + GENL_HDRLEN = 0x4 + GENL_ID_CTRL = 0x10 + GENL_ID_PMCRAID = 0x12 + GENL_ID_VFS_DQUOT = 0x11 + GENL_MAX_ID = 0x3ff + GENL_MIN_ID = 0x10 + GENL_NAMSIZ = 0x10 + GENL_START_ALLOC = 0x13 + GENL_UNS_ADMIN_PERM = 0x10 + GRND_INSECURE = 0x4 + GRND_NONBLOCK = 0x1 + GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 + IBSHIFT = 0x10 + ICMPV6_FILTER = 0x1 + ICRNL = 0x100 + IFA_F_DADFAILED = 0x8 + IFA_F_DEPRECATED = 0x20 + IFA_F_HOMEADDRESS = 0x10 + IFA_F_MANAGETEMPADDR = 0x100 + IFA_F_MCAUTOJOIN = 0x400 + IFA_F_NODAD = 0x2 + IFA_F_NOPREFIXROUTE = 0x200 + IFA_F_OPTIMISTIC = 0x4 + IFA_F_PERMANENT = 0x80 + IFA_F_SECONDARY = 0x1 + IFA_F_STABLE_PRIVACY = 0x800 + IFA_F_TEMPORARY = 0x1 + IFA_F_TENTATIVE = 0x40 + IFA_MAX = 0xa + IFF_ALLMULTI = 0x200 + IFF_ATTACH_QUEUE = 0x200 + IFF_AUTOMEDIA = 0x4000 + IFF_BROADCAST = 0x2 + IFF_DEBUG = 0x4 + IFF_DETACH_QUEUE = 0x400 + IFF_DORMANT = 0x20000 + IFF_DYNAMIC = 0x8000 + IFF_ECHO = 0x40000 + IFF_LOOPBACK = 0x8 + IFF_LOWER_UP = 0x10000 + IFF_MASTER = 0x400 + IFF_MULTICAST = 0x1000 + IFF_MULTI_QUEUE = 0x100 + IFF_NAPI = 0x10 + IFF_NAPI_FRAGS = 0x20 + IFF_NOARP = 0x80 + IFF_NOFILTER = 0x1000 + IFF_NOTRAILERS = 0x20 + IFF_NO_PI = 0x1000 + IFF_ONE_QUEUE = 0x2000 + IFF_PERSIST = 0x800 + IFF_POINTOPOINT = 0x10 + IFF_PORTSEL = 0x2000 + IFF_PROMISC = 0x100 + IFF_RUNNING = 0x40 + IFF_SLAVE = 0x800 + IFF_TAP = 0x2 + IFF_TUN = 0x1 + IFF_TUN_EXCL = 0x8000 + IFF_UP = 0x1 + IFF_VNET_HDR = 0x4000 + IFF_VOLATILE = 0x70c5a + IFNAMSIZ = 0x10 + IGNBRK = 0x1 + IGNCR = 0x80 + IGNPAR = 0x4 + IMAXBEL = 0x2000 + INLCR = 0x40 + INPCK = 0x10 + IN_ACCESS = 0x1 + IN_ALL_EVENTS = 0xfff + IN_ATTRIB = 0x4 + IN_CLASSA_HOST = 0xffffff + IN_CLASSA_MAX = 0x80 + IN_CLASSA_NET = 0xff000000 + IN_CLASSA_NSHIFT = 0x18 + IN_CLASSB_HOST = 0xffff + IN_CLASSB_MAX = 0x10000 + IN_CLASSB_NET = 0xffff0000 + IN_CLASSB_NSHIFT = 0x10 + IN_CLASSC_HOST = 0xff + IN_CLASSC_NET = 0xffffff00 + IN_CLASSC_NSHIFT = 0x8 + IN_CLOSE = 0x18 + IN_CLOSE_NOWRITE = 0x10 + IN_CLOSE_WRITE = 0x8 + IN_CREATE = 0x100 + IN_DELETE = 0x200 + IN_DELETE_SELF = 0x400 + IN_DONT_FOLLOW = 0x2000000 + IN_EXCL_UNLINK = 0x4000000 + IN_IGNORED = 0x8000 + IN_ISDIR = 0x40000000 + IN_LOOPBACKNET = 0x7f + IN_MASK_ADD = 0x20000000 + IN_MASK_CREATE = 0x10000000 + IN_MODIFY = 0x2 + IN_MOVE = 0xc0 + IN_MOVED_FROM = 0x40 + IN_MOVED_TO = 0x80 + IN_MOVE_SELF = 0x800 + IN_ONESHOT = 0x80000000 + IN_ONLYDIR = 0x1000000 + IN_OPEN = 0x20 + IN_Q_OVERFLOW = 0x4000 + IN_UNMOUNT = 0x2000 + IPPROTO_AH = 0x33 + IPPROTO_BEETPH = 0x5e + IPPROTO_COMP = 0x6c + IPPROTO_DCCP = 0x21 + IPPROTO_DSTOPTS = 0x3c + IPPROTO_EGP = 0x8 + IPPROTO_ENCAP = 0x62 + IPPROTO_ESP = 0x32 + IPPROTO_FRAGMENT = 0x2c + IPPROTO_GRE = 0x2f + IPPROTO_HOPOPTS = 0x0 + IPPROTO_ICMP = 0x1 + IPPROTO_ICMPV6 = 0x3a + IPPROTO_IDP = 0x16 + IPPROTO_IGMP = 0x2 + IPPROTO_IP = 0x0 + IPPROTO_IPIP = 0x4 + IPPROTO_IPV6 = 0x29 + IPPROTO_L2TP = 0x73 + IPPROTO_MH = 0x87 + IPPROTO_MPLS = 0x89 + IPPROTO_MTP = 0x5c + IPPROTO_NONE = 0x3b + IPPROTO_PIM = 0x67 + IPPROTO_PUP = 0xc + IPPROTO_RAW = 0xff + IPPROTO_ROUTING = 0x2b + IPPROTO_RSVP = 0x2e + IPPROTO_SCTP = 0x84 + IPPROTO_TCP = 0x6 + IPPROTO_TP = 0x1d + IPPROTO_UDP = 0x11 + IPPROTO_UDPLITE = 0x88 + IPV6_2292DSTOPTS = 0x4 + IPV6_2292HOPLIMIT = 0x8 + IPV6_2292HOPOPTS = 0x3 + IPV6_2292PKTINFO = 0x2 + IPV6_2292PKTOPTIONS = 0x6 + IPV6_2292RTHDR = 0x5 + IPV6_ADDRFORM = 0x1 + IPV6_ADDR_PREFERENCES = 0x48 + IPV6_ADD_MEMBERSHIP = 0x14 + IPV6_AUTHHDR = 0xa + IPV6_AUTOFLOWLABEL = 0x46 + IPV6_CHECKSUM = 0x7 + IPV6_DONTFRAG = 0x3e + IPV6_DROP_MEMBERSHIP = 0x15 + IPV6_DSTOPTS = 0x3b + IPV6_FREEBIND = 0x4e + IPV6_HDRINCL = 0x24 + IPV6_HOPLIMIT = 0x34 + IPV6_HOPOPTS = 0x36 + IPV6_IPSEC_POLICY = 0x22 + IPV6_JOIN_ANYCAST = 0x1b + IPV6_JOIN_GROUP = 0x14 + IPV6_LEAVE_ANYCAST = 0x1c + IPV6_LEAVE_GROUP = 0x15 + IPV6_MINHOPCOUNT = 0x49 + IPV6_MTU = 0x18 + IPV6_MTU_DISCOVER = 0x17 + IPV6_MULTICAST_ALL = 0x1d + IPV6_MULTICAST_HOPS = 0x12 + IPV6_MULTICAST_IF = 0x11 + IPV6_MULTICAST_LOOP = 0x13 + IPV6_NEXTHOP = 0x9 + IPV6_ORIGDSTADDR = 0x4a + IPV6_PATHMTU = 0x3d + IPV6_PKTINFO = 0x32 + IPV6_PMTUDISC_DO = 0x2 + IPV6_PMTUDISC_DONT = 0x0 + IPV6_PMTUDISC_INTERFACE = 0x4 + IPV6_PMTUDISC_OMIT = 0x5 + IPV6_PMTUDISC_PROBE = 0x3 + IPV6_PMTUDISC_WANT = 0x1 + IPV6_RECVDSTOPTS = 0x3a + IPV6_RECVERR = 0x19 + IPV6_RECVFRAGSIZE = 0x4d + IPV6_RECVHOPLIMIT = 0x33 + IPV6_RECVHOPOPTS = 0x35 + IPV6_RECVORIGDSTADDR = 0x4a + IPV6_RECVPATHMTU = 0x3c + IPV6_RECVPKTINFO = 0x31 + IPV6_RECVRTHDR = 0x38 + IPV6_RECVTCLASS = 0x42 + IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e + IPV6_RTHDR = 0x39 + IPV6_RTHDRDSTOPTS = 0x37 + IPV6_RTHDR_LOOSE = 0x0 + IPV6_RTHDR_STRICT = 0x1 + IPV6_RTHDR_TYPE_0 = 0x0 + IPV6_RXDSTOPTS = 0x3b + IPV6_RXHOPOPTS = 0x36 + IPV6_TCLASS = 0x43 + IPV6_TRANSPARENT = 0x4b + IPV6_UNICAST_HOPS = 0x10 + IPV6_UNICAST_IF = 0x4c + IPV6_V6ONLY = 0x1a + IPV6_XFRM_POLICY = 0x23 + IP_ADD_MEMBERSHIP = 0x23 + IP_ADD_SOURCE_MEMBERSHIP = 0x27 + IP_BIND_ADDRESS_NO_PORT = 0x18 + IP_BLOCK_SOURCE = 0x26 + IP_CHECKSUM = 0x17 + IP_DEFAULT_MULTICAST_LOOP = 0x1 + IP_DEFAULT_MULTICAST_TTL = 0x1 + IP_DF = 0x4000 + IP_DROP_MEMBERSHIP = 0x24 + IP_DROP_SOURCE_MEMBERSHIP = 0x28 + IP_FREEBIND = 0xf + IP_HDRINCL = 0x3 + IP_IPSEC_POLICY = 0x10 + IP_MAXPACKET = 0xffff + IP_MAX_MEMBERSHIPS = 0x14 + IP_MF = 0x2000 + IP_MINTTL = 0x15 + IP_MSFILTER = 0x29 + IP_MSS = 0x240 + IP_MTU = 0xe + IP_MTU_DISCOVER = 0xa + IP_MULTICAST_ALL = 0x31 + IP_MULTICAST_IF = 0x20 + IP_MULTICAST_LOOP = 0x22 + IP_MULTICAST_TTL = 0x21 + IP_NODEFRAG = 0x16 + IP_OFFMASK = 0x1fff + IP_OPTIONS = 0x4 + IP_ORIGDSTADDR = 0x14 + IP_PASSSEC = 0x12 + IP_PKTINFO = 0x8 + IP_PKTOPTIONS = 0x9 + IP_PMTUDISC = 0xa + IP_PMTUDISC_DO = 0x2 + IP_PMTUDISC_DONT = 0x0 + IP_PMTUDISC_INTERFACE = 0x4 + IP_PMTUDISC_OMIT = 0x5 + IP_PMTUDISC_PROBE = 0x3 + IP_PMTUDISC_WANT = 0x1 + IP_RECVERR = 0xb + IP_RECVFRAGSIZE = 0x19 + IP_RECVOPTS = 0x6 + IP_RECVORIGDSTADDR = 0x14 + IP_RECVRETOPTS = 0x7 + IP_RECVTOS = 0xd + IP_RECVTTL = 0xc + IP_RETOPTS = 0x7 + IP_RF = 0x8000 + IP_ROUTER_ALERT = 0x5 + IP_TOS = 0x1 + IP_TRANSPARENT = 0x13 + IP_TTL = 0x2 + IP_UNBLOCK_SOURCE = 0x25 + IP_UNICAST_IF = 0x32 + IP_XFRM_POLICY = 0x11 + ISOFS_SUPER_MAGIC = 0x9660 + ISTRIP = 0x20 + IUTF8 = 0x4000 + IXANY = 0x800 + JFFS2_SUPER_MAGIC = 0x72b6 + KEXEC_ARCH_386 = 0x30000 + KEXEC_ARCH_68K = 0x40000 + KEXEC_ARCH_AARCH64 = 0xb70000 + KEXEC_ARCH_ARM = 0x280000 + KEXEC_ARCH_DEFAULT = 0x0 + KEXEC_ARCH_IA_64 = 0x320000 + KEXEC_ARCH_MASK = 0xffff0000 + KEXEC_ARCH_MIPS = 0x80000 + KEXEC_ARCH_MIPS_LE = 0xa0000 + KEXEC_ARCH_PARISC = 0xf0000 + KEXEC_ARCH_PPC = 0x140000 + KEXEC_ARCH_PPC64 = 0x150000 + KEXEC_ARCH_S390 = 0x160000 + KEXEC_ARCH_SH = 0x2a0000 + KEXEC_ARCH_X86_64 = 0x3e0000 + KEXEC_FILE_NO_INITRAMFS = 0x4 + KEXEC_FILE_ON_CRASH = 0x2 + KEXEC_FILE_UNLOAD = 0x1 + KEXEC_ON_CRASH = 0x1 + KEXEC_PRESERVE_CONTEXT = 0x2 + KEXEC_SEGMENT_MAX = 0x10 + KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 + KEYCTL_CHOWN = 0x4 + KEYCTL_CLEAR = 0x7 + KEYCTL_DESCRIBE = 0x6 + KEYCTL_DH_COMPUTE = 0x17 + KEYCTL_GET_KEYRING_ID = 0x0 + KEYCTL_GET_PERSISTENT = 0x16 + KEYCTL_GET_SECURITY = 0x11 + KEYCTL_INSTANTIATE = 0xc + KEYCTL_INSTANTIATE_IOV = 0x14 + KEYCTL_INVALIDATE = 0x15 + KEYCTL_JOIN_SESSION_KEYRING = 0x1 + KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 + KEYCTL_NEGATE = 0xd + KEYCTL_PKEY_DECRYPT = 0x1a + KEYCTL_PKEY_ENCRYPT = 0x19 + KEYCTL_PKEY_QUERY = 0x18 + KEYCTL_PKEY_SIGN = 0x1b + KEYCTL_PKEY_VERIFY = 0x1c + KEYCTL_READ = 0xb + KEYCTL_REJECT = 0x13 + KEYCTL_RESTRICT_KEYRING = 0x1d + KEYCTL_REVOKE = 0x3 + KEYCTL_SEARCH = 0xa + KEYCTL_SESSION_TO_PARENT = 0x12 + KEYCTL_SETPERM = 0x5 + KEYCTL_SET_REQKEY_KEYRING = 0xe + KEYCTL_SET_TIMEOUT = 0xf + KEYCTL_SUPPORTS_DECRYPT = 0x2 + KEYCTL_SUPPORTS_ENCRYPT = 0x1 + KEYCTL_SUPPORTS_SIGN = 0x4 + KEYCTL_SUPPORTS_VERIFY = 0x8 + KEYCTL_UNLINK = 0x9 + KEYCTL_UPDATE = 0x2 + KEY_REQKEY_DEFL_DEFAULT = 0x0 + KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 + KEY_REQKEY_DEFL_NO_CHANGE = -0x1 + KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 + KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 + KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 + KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 + KEY_REQKEY_DEFL_USER_KEYRING = 0x4 + KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 + KEY_SPEC_GROUP_KEYRING = -0x6 + KEY_SPEC_PROCESS_KEYRING = -0x2 + KEY_SPEC_REQKEY_AUTH_KEY = -0x7 + KEY_SPEC_REQUESTOR_KEYRING = -0x8 + KEY_SPEC_SESSION_KEYRING = -0x3 + KEY_SPEC_THREAD_KEYRING = -0x1 + KEY_SPEC_USER_KEYRING = -0x4 + KEY_SPEC_USER_SESSION_KEYRING = -0x5 + LINUX_REBOOT_CMD_CAD_OFF = 0x0 + LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef + LINUX_REBOOT_CMD_HALT = 0xcdef0123 + LINUX_REBOOT_CMD_KEXEC = 0x45584543 + LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc + LINUX_REBOOT_CMD_RESTART = 0x1234567 + LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 + LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 + LINUX_REBOOT_MAGIC1 = 0xfee1dead + LINUX_REBOOT_MAGIC2 = 0x28121969 + LOCK_EX = 0x2 + LOCK_NB = 0x4 + LOCK_SH = 0x1 + LOCK_UN = 0x8 + LOOP_CLR_FD = 0x4c01 + LOOP_CTL_ADD = 0x4c80 + LOOP_CTL_GET_FREE = 0x4c82 + LOOP_CTL_REMOVE = 0x4c81 + LOOP_GET_STATUS = 0x4c03 + LOOP_GET_STATUS64 = 0x4c05 + LOOP_SET_BLOCK_SIZE = 0x4c09 + LOOP_SET_CAPACITY = 0x4c07 + LOOP_SET_DIRECT_IO = 0x4c08 + LOOP_SET_FD = 0x4c00 + LOOP_SET_STATUS = 0x4c02 + LOOP_SET_STATUS64 = 0x4c04 + LO_KEY_SIZE = 0x20 + LO_NAME_SIZE = 0x40 + MADV_COLD = 0x14 + MADV_DODUMP = 0x11 + MADV_DOFORK = 0xb + MADV_DONTDUMP = 0x10 + MADV_DONTFORK = 0xa + MADV_DONTNEED = 0x4 + MADV_FREE = 0x8 + MADV_HUGEPAGE = 0xe + MADV_HWPOISON = 0x64 + MADV_KEEPONFORK = 0x13 + MADV_MERGEABLE = 0xc + MADV_NOHUGEPAGE = 0xf + MADV_NORMAL = 0x0 + MADV_PAGEOUT = 0x15 + MADV_RANDOM = 0x1 + MADV_REMOVE = 0x9 + MADV_SEQUENTIAL = 0x2 + MADV_UNMERGEABLE = 0xd + MADV_WILLNEED = 0x3 + MADV_WIPEONFORK = 0x12 + MAP_FILE = 0x0 + MAP_FIXED = 0x10 + MAP_FIXED_NOREPLACE = 0x100000 + MAP_HUGE_MASK = 0x3f + MAP_HUGE_SHIFT = 0x1a + MAP_PRIVATE = 0x2 + MAP_SHARED = 0x1 + MAP_SHARED_VALIDATE = 0x3 + MAP_TYPE = 0xf + MCAST_BLOCK_SOURCE = 0x2b + MCAST_EXCLUDE = 0x0 + MCAST_INCLUDE = 0x1 + MCAST_JOIN_GROUP = 0x2a + MCAST_JOIN_SOURCE_GROUP = 0x2e + MCAST_LEAVE_GROUP = 0x2d + MCAST_LEAVE_SOURCE_GROUP = 0x2f + MCAST_MSFILTER = 0x30 + MCAST_UNBLOCK_SOURCE = 0x2c + MFD_ALLOW_SEALING = 0x2 + MFD_CLOEXEC = 0x1 + MFD_HUGETLB = 0x4 + MFD_HUGE_16GB = -0x78000000 + MFD_HUGE_16MB = 0x60000000 + MFD_HUGE_1GB = 0x78000000 + MFD_HUGE_1MB = 0x50000000 + MFD_HUGE_256MB = 0x70000000 + MFD_HUGE_2GB = 0x7c000000 + MFD_HUGE_2MB = 0x54000000 + MFD_HUGE_32MB = 0x64000000 + MFD_HUGE_512KB = 0x4c000000 + MFD_HUGE_512MB = 0x74000000 + MFD_HUGE_64KB = 0x40000000 + MFD_HUGE_8MB = 0x5c000000 + MFD_HUGE_MASK = 0x3f + MFD_HUGE_SHIFT = 0x1a + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f + MNT_DETACH = 0x2 + MNT_EXPIRE = 0x4 + MNT_FORCE = 0x1 + MODULE_INIT_IGNORE_MODVERSIONS = 0x1 + MODULE_INIT_IGNORE_VERMAGIC = 0x2 + MSDOS_SUPER_MAGIC = 0x4d44 + MSG_BATCH = 0x40000 + MSG_CMSG_CLOEXEC = 0x40000000 + MSG_CONFIRM = 0x800 + MSG_CTRUNC = 0x8 + MSG_DONTROUTE = 0x4 + MSG_DONTWAIT = 0x40 + MSG_EOR = 0x80 + MSG_ERRQUEUE = 0x2000 + MSG_FASTOPEN = 0x20000000 + MSG_FIN = 0x200 + MSG_MORE = 0x8000 + MSG_NOSIGNAL = 0x4000 + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_PROXY = 0x10 + MSG_RST = 0x1000 + MSG_SYN = 0x400 + MSG_TRUNC = 0x20 + MSG_TRYHARD = 0x4 + MSG_WAITALL = 0x100 + MSG_WAITFORONE = 0x10000 + MSG_ZEROCOPY = 0x4000000 + MS_ACTIVE = 0x40000000 + MS_ASYNC = 0x1 + MS_BIND = 0x1000 + MS_BORN = 0x20000000 + MS_DIRSYNC = 0x80 + MS_INVALIDATE = 0x2 + MS_I_VERSION = 0x800000 + MS_KERNMOUNT = 0x400000 + MS_LAZYTIME = 0x2000000 + MS_MANDLOCK = 0x40 + MS_MGC_MSK = 0xffff0000 + MS_MGC_VAL = 0xc0ed0000 + MS_MOVE = 0x2000 + MS_NOATIME = 0x400 + MS_NODEV = 0x4 + MS_NODIRATIME = 0x800 + MS_NOEXEC = 0x8 + MS_NOREMOTELOCK = 0x8000000 + MS_NOSEC = 0x10000000 + MS_NOSUID = 0x2 + MS_NOUSER = -0x80000000 + MS_POSIXACL = 0x10000 + MS_PRIVATE = 0x40000 + MS_RDONLY = 0x1 + MS_REC = 0x4000 + MS_RELATIME = 0x200000 + MS_REMOUNT = 0x20 + MS_RMT_MASK = 0x2800051 + MS_SHARED = 0x100000 + MS_SILENT = 0x8000 + MS_SLAVE = 0x80000 + MS_STRICTATIME = 0x1000000 + MS_SUBMOUNT = 0x4000000 + MS_SYNC = 0x4 + MS_SYNCHRONOUS = 0x10 + MS_UNBINDABLE = 0x20000 + MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 + NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c + NETLINK_ADD_MEMBERSHIP = 0x1 + NETLINK_AUDIT = 0x9 + NETLINK_BROADCAST_ERROR = 0x4 + NETLINK_CAP_ACK = 0xa + NETLINK_CONNECTOR = 0xb + NETLINK_CRYPTO = 0x15 + NETLINK_DNRTMSG = 0xe + NETLINK_DROP_MEMBERSHIP = 0x2 + NETLINK_ECRYPTFS = 0x13 + NETLINK_EXT_ACK = 0xb + NETLINK_FIB_LOOKUP = 0xa + NETLINK_FIREWALL = 0x3 + NETLINK_GENERIC = 0x10 + NETLINK_GET_STRICT_CHK = 0xc + NETLINK_INET_DIAG = 0x4 + NETLINK_IP6_FW = 0xd + NETLINK_ISCSI = 0x8 + NETLINK_KOBJECT_UEVENT = 0xf + NETLINK_LISTEN_ALL_NSID = 0x8 + NETLINK_LIST_MEMBERSHIPS = 0x9 + NETLINK_NETFILTER = 0xc + NETLINK_NFLOG = 0x5 + NETLINK_NO_ENOBUFS = 0x5 + NETLINK_PKTINFO = 0x3 + NETLINK_RDMA = 0x14 + NETLINK_ROUTE = 0x0 + NETLINK_RX_RING = 0x6 + NETLINK_SCSITRANSPORT = 0x12 + NETLINK_SELINUX = 0x7 + NETLINK_SMC = 0x16 + NETLINK_SOCK_DIAG = 0x4 + NETLINK_TX_RING = 0x7 + NETLINK_UNUSED = 0x1 + NETLINK_USERSOCK = 0x2 + NETLINK_XFRM = 0x6 + NETNSA_MAX = 0x5 + NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 + NL0 = 0x0 + NL1 = 0x100 + NLA_ALIGNTO = 0x4 + NLA_F_NESTED = 0x8000 + NLA_F_NET_BYTEORDER = 0x4000 + NLA_HDRLEN = 0x4 + NLMSG_ALIGNTO = 0x4 + NLMSG_DONE = 0x3 + NLMSG_ERROR = 0x2 + NLMSG_HDRLEN = 0x10 + NLMSG_MIN_TYPE = 0x10 + NLMSG_NOOP = 0x1 + NLMSG_OVERRUN = 0x4 + NLM_F_ACK = 0x4 + NLM_F_ACK_TLVS = 0x200 + NLM_F_APPEND = 0x800 + NLM_F_ATOMIC = 0x400 + NLM_F_CAPPED = 0x100 + NLM_F_CREATE = 0x400 + NLM_F_DUMP = 0x300 + NLM_F_DUMP_FILTERED = 0x20 + NLM_F_DUMP_INTR = 0x10 + NLM_F_ECHO = 0x8 + NLM_F_EXCL = 0x200 + NLM_F_MATCH = 0x200 + NLM_F_MULTI = 0x2 + NLM_F_NONREC = 0x100 + NLM_F_REPLACE = 0x100 + NLM_F_REQUEST = 0x1 + NLM_F_ROOT = 0x100 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f + OCRNL = 0x8 + OFDEL = 0x80 + OFILL = 0x40 + ONLRET = 0x20 + ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 + OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 + O_ACCMODE = 0x3 + O_RDONLY = 0x0 + O_RDWR = 0x2 + O_WRONLY = 0x1 + PACKET_ADD_MEMBERSHIP = 0x1 + PACKET_AUXDATA = 0x8 + PACKET_BROADCAST = 0x1 + PACKET_COPY_THRESH = 0x7 + PACKET_DROP_MEMBERSHIP = 0x2 + PACKET_FANOUT = 0x12 + PACKET_FANOUT_CBPF = 0x6 + PACKET_FANOUT_CPU = 0x2 + PACKET_FANOUT_DATA = 0x16 + PACKET_FANOUT_EBPF = 0x7 + PACKET_FANOUT_FLAG_DEFRAG = 0x8000 + PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 + PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 + PACKET_FANOUT_HASH = 0x0 + PACKET_FANOUT_LB = 0x1 + PACKET_FANOUT_QM = 0x5 + PACKET_FANOUT_RND = 0x4 + PACKET_FANOUT_ROLLOVER = 0x3 + PACKET_FASTROUTE = 0x6 + PACKET_HDRLEN = 0xb + PACKET_HOST = 0x0 + PACKET_IGNORE_OUTGOING = 0x17 + PACKET_KERNEL = 0x7 + PACKET_LOOPBACK = 0x5 + PACKET_LOSS = 0xe + PACKET_MR_ALLMULTI = 0x2 + PACKET_MR_MULTICAST = 0x0 + PACKET_MR_PROMISC = 0x1 + PACKET_MR_UNICAST = 0x3 + PACKET_MULTICAST = 0x2 + PACKET_ORIGDEV = 0x9 + PACKET_OTHERHOST = 0x3 + PACKET_OUTGOING = 0x4 + PACKET_QDISC_BYPASS = 0x14 + PACKET_RECV_OUTPUT = 0x3 + PACKET_RESERVE = 0xc + PACKET_ROLLOVER_STATS = 0x15 + PACKET_RX_RING = 0x5 + PACKET_STATISTICS = 0x6 + PACKET_TIMESTAMP = 0x11 + PACKET_TX_HAS_OFF = 0x13 + PACKET_TX_RING = 0xd + PACKET_TX_TIMESTAMP = 0x10 + PACKET_USER = 0x6 + PACKET_VERSION = 0xa + PACKET_VNET_HDR = 0xf + PARITY_CRC16_PR0 = 0x2 + PARITY_CRC16_PR0_CCITT = 0x4 + PARITY_CRC16_PR1 = 0x3 + PARITY_CRC16_PR1_CCITT = 0x5 + PARITY_CRC32_PR0_CCITT = 0x6 + PARITY_CRC32_PR1_CCITT = 0x7 + PARITY_DEFAULT = 0x0 + PARITY_NONE = 0x1 + PARMRK = 0x8 + PIPEFS_MAGIC = 0x50495045 + PPC_CMM_MAGIC = 0xc7571590 + PPPIOCGNPMODE = 0xc008744c + PPPIOCNEWUNIT = 0xc004743e + PRIO_PGRP = 0x1 + PRIO_PROCESS = 0x0 + PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 + PROT_EXEC = 0x4 + PROT_GROWSDOWN = 0x1000000 + PROT_GROWSUP = 0x2000000 + PROT_NONE = 0x0 + PROT_READ = 0x1 + PROT_WRITE = 0x2 + PR_CAPBSET_DROP = 0x18 + PR_CAPBSET_READ = 0x17 + PR_CAP_AMBIENT = 0x2f + PR_CAP_AMBIENT_CLEAR_ALL = 0x4 + PR_CAP_AMBIENT_IS_SET = 0x1 + PR_CAP_AMBIENT_LOWER = 0x3 + PR_CAP_AMBIENT_RAISE = 0x2 + PR_ENDIAN_BIG = 0x0 + PR_ENDIAN_LITTLE = 0x1 + PR_ENDIAN_PPC_LITTLE = 0x2 + PR_FPEMU_NOPRINT = 0x1 + PR_FPEMU_SIGFPE = 0x2 + PR_FP_EXC_ASYNC = 0x2 + PR_FP_EXC_DISABLED = 0x0 + PR_FP_EXC_DIV = 0x10000 + PR_FP_EXC_INV = 0x100000 + PR_FP_EXC_NONRECOV = 0x1 + PR_FP_EXC_OVF = 0x20000 + PR_FP_EXC_PRECISE = 0x3 + PR_FP_EXC_RES = 0x80000 + PR_FP_EXC_SW_ENABLE = 0x80 + PR_FP_EXC_UND = 0x40000 + PR_FP_MODE_FR = 0x1 + PR_FP_MODE_FRE = 0x2 + PR_GET_CHILD_SUBREAPER = 0x25 + PR_GET_DUMPABLE = 0x3 + PR_GET_ENDIAN = 0x13 + PR_GET_FPEMU = 0x9 + PR_GET_FPEXC = 0xb + PR_GET_FP_MODE = 0x2e + PR_GET_IO_FLUSHER = 0x3a + PR_GET_KEEPCAPS = 0x7 + PR_GET_NAME = 0x10 + PR_GET_NO_NEW_PRIVS = 0x27 + PR_GET_PDEATHSIG = 0x2 + PR_GET_SECCOMP = 0x15 + PR_GET_SECUREBITS = 0x1b + PR_GET_SPECULATION_CTRL = 0x34 + PR_GET_TAGGED_ADDR_CTRL = 0x38 + PR_GET_THP_DISABLE = 0x2a + PR_GET_TID_ADDRESS = 0x28 + PR_GET_TIMERSLACK = 0x1e + PR_GET_TIMING = 0xd + PR_GET_TSC = 0x19 + PR_GET_UNALIGN = 0x5 + PR_MCE_KILL = 0x21 + PR_MCE_KILL_CLEAR = 0x0 + PR_MCE_KILL_DEFAULT = 0x2 + PR_MCE_KILL_EARLY = 0x1 + PR_MCE_KILL_GET = 0x22 + PR_MCE_KILL_LATE = 0x0 + PR_MCE_KILL_SET = 0x1 + PR_MPX_DISABLE_MANAGEMENT = 0x2c + PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_PAC_APDAKEY = 0x4 + PR_PAC_APDBKEY = 0x8 + PR_PAC_APGAKEY = 0x10 + PR_PAC_APIAKEY = 0x1 + PR_PAC_APIBKEY = 0x2 + PR_PAC_RESET_KEYS = 0x36 + PR_SET_CHILD_SUBREAPER = 0x24 + PR_SET_DUMPABLE = 0x4 + PR_SET_ENDIAN = 0x14 + PR_SET_FPEMU = 0xa + PR_SET_FPEXC = 0xc + PR_SET_FP_MODE = 0x2d + PR_SET_IO_FLUSHER = 0x39 + PR_SET_KEEPCAPS = 0x8 + PR_SET_MM = 0x23 + PR_SET_MM_ARG_END = 0x9 + PR_SET_MM_ARG_START = 0x8 + PR_SET_MM_AUXV = 0xc + PR_SET_MM_BRK = 0x7 + PR_SET_MM_END_CODE = 0x2 + PR_SET_MM_END_DATA = 0x4 + PR_SET_MM_ENV_END = 0xb + PR_SET_MM_ENV_START = 0xa + PR_SET_MM_EXE_FILE = 0xd + PR_SET_MM_MAP = 0xe + PR_SET_MM_MAP_SIZE = 0xf + PR_SET_MM_START_BRK = 0x6 + PR_SET_MM_START_CODE = 0x1 + PR_SET_MM_START_DATA = 0x3 + PR_SET_MM_START_STACK = 0x5 + PR_SET_NAME = 0xf + PR_SET_NO_NEW_PRIVS = 0x26 + PR_SET_PDEATHSIG = 0x1 + PR_SET_PTRACER = 0x59616d61 + PR_SET_SECCOMP = 0x16 + PR_SET_SECUREBITS = 0x1c + PR_SET_SPECULATION_CTRL = 0x35 + PR_SET_TAGGED_ADDR_CTRL = 0x37 + PR_SET_THP_DISABLE = 0x29 + PR_SET_TIMERSLACK = 0x1d + PR_SET_TIMING = 0xe + PR_SET_TSC = 0x1a + PR_SET_UNALIGN = 0x6 + PR_SPEC_DISABLE = 0x4 + PR_SPEC_DISABLE_NOEXEC = 0x10 + PR_SPEC_ENABLE = 0x2 + PR_SPEC_FORCE_DISABLE = 0x8 + PR_SPEC_INDIRECT_BRANCH = 0x1 + PR_SPEC_NOT_AFFECTED = 0x0 + PR_SPEC_PRCTL = 0x1 + PR_SPEC_STORE_BYPASS = 0x0 + PR_SVE_GET_VL = 0x33 + PR_SVE_SET_VL = 0x32 + PR_SVE_SET_VL_ONEXEC = 0x40000 + PR_SVE_VL_INHERIT = 0x20000 + PR_SVE_VL_LEN_MASK = 0xffff + PR_TAGGED_ADDR_ENABLE = 0x1 + PR_TASK_PERF_EVENTS_DISABLE = 0x1f + PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMING_STATISTICAL = 0x0 + PR_TIMING_TIMESTAMP = 0x1 + PR_TSC_ENABLE = 0x1 + PR_TSC_SIGSEGV = 0x2 + PR_UNALIGN_NOPRINT = 0x1 + PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c + PTRACE_ATTACH = 0x10 + PTRACE_CONT = 0x7 + PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 + PTRACE_EVENT_CLONE = 0x3 + PTRACE_EVENT_EXEC = 0x4 + PTRACE_EVENT_EXIT = 0x6 + PTRACE_EVENT_FORK = 0x1 + PTRACE_EVENT_SECCOMP = 0x7 + PTRACE_EVENT_STOP = 0x80 + PTRACE_EVENT_VFORK = 0x2 + PTRACE_EVENT_VFORK_DONE = 0x5 + PTRACE_GETEVENTMSG = 0x4201 + PTRACE_GETREGS = 0xc + PTRACE_GETREGSET = 0x4204 + PTRACE_GETSIGINFO = 0x4202 + PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e + PTRACE_INTERRUPT = 0x4207 + PTRACE_KILL = 0x8 + PTRACE_LISTEN = 0x4208 + PTRACE_O_EXITKILL = 0x100000 + PTRACE_O_MASK = 0x3000ff + PTRACE_O_SUSPEND_SECCOMP = 0x200000 + PTRACE_O_TRACECLONE = 0x8 + PTRACE_O_TRACEEXEC = 0x10 + PTRACE_O_TRACEEXIT = 0x40 + PTRACE_O_TRACEFORK = 0x2 + PTRACE_O_TRACESECCOMP = 0x80 + PTRACE_O_TRACESYSGOOD = 0x1 + PTRACE_O_TRACEVFORK = 0x4 + PTRACE_O_TRACEVFORKDONE = 0x20 + PTRACE_PEEKDATA = 0x2 + PTRACE_PEEKSIGINFO = 0x4209 + PTRACE_PEEKSIGINFO_SHARED = 0x1 + PTRACE_PEEKTEXT = 0x1 + PTRACE_PEEKUSR = 0x3 + PTRACE_POKEDATA = 0x5 + PTRACE_POKETEXT = 0x4 + PTRACE_POKEUSR = 0x6 + PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d + PTRACE_SEIZE = 0x4206 + PTRACE_SETOPTIONS = 0x4200 + PTRACE_SETREGS = 0xd + PTRACE_SETREGSET = 0x4205 + PTRACE_SETSIGINFO = 0x4203 + PTRACE_SETSIGMASK = 0x420b + PTRACE_SINGLESTEP = 0x9 + PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 + PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 + RENAME_EXCHANGE = 0x2 + RENAME_NOREPLACE = 0x1 + RENAME_WHITEOUT = 0x4 + RLIMIT_CORE = 0x4 + RLIMIT_CPU = 0x0 + RLIMIT_DATA = 0x2 + RLIMIT_FSIZE = 0x1 + RLIMIT_LOCKS = 0xa + RLIMIT_MSGQUEUE = 0xc + RLIMIT_NICE = 0xd + RLIMIT_RTPRIO = 0xe + RLIMIT_RTTIME = 0xf + RLIMIT_SIGPENDING = 0xb + RLIMIT_STACK = 0x3 + RLIM_INFINITY = 0xffffffffffffffff + RTAX_ADVMSS = 0x8 + RTAX_CC_ALGO = 0x10 + RTAX_CWND = 0x7 + RTAX_FASTOPEN_NO_COOKIE = 0x11 + RTAX_FEATURES = 0xc + RTAX_FEATURE_ALLFRAG = 0x8 + RTAX_FEATURE_ECN = 0x1 + RTAX_FEATURE_MASK = 0xf + RTAX_FEATURE_SACK = 0x2 + RTAX_FEATURE_TIMESTAMP = 0x4 + RTAX_HOPLIMIT = 0xa + RTAX_INITCWND = 0xb + RTAX_INITRWND = 0xe + RTAX_LOCK = 0x1 + RTAX_MAX = 0x11 + RTAX_MTU = 0x2 + RTAX_QUICKACK = 0xf + RTAX_REORDERING = 0x9 + RTAX_RTO_MIN = 0xd + RTAX_RTT = 0x4 + RTAX_RTTVAR = 0x5 + RTAX_SSTHRESH = 0x6 + RTAX_UNSPEC = 0x0 + RTAX_WINDOW = 0x3 + RTA_ALIGNTO = 0x4 + RTA_MAX = 0x1e + RTCF_DIRECTSRC = 0x4000000 + RTCF_DOREDIRECT = 0x1000000 + RTCF_LOG = 0x2000000 + RTCF_MASQ = 0x400000 + RTCF_NAT = 0x800000 + RTCF_VALVE = 0x200000 + RTC_AF = 0x20 + RTC_IRQF = 0x80 + RTC_MAX_FREQ = 0x2000 + RTC_PF = 0x40 + RTC_UF = 0x10 + RTF_ADDRCLASSMASK = 0xf8000000 + RTF_ADDRCONF = 0x40000 + RTF_ALLONLINK = 0x20000 + RTF_BROADCAST = 0x10000000 + RTF_CACHE = 0x1000000 + RTF_DEFAULT = 0x10000 + RTF_DYNAMIC = 0x10 + RTF_FLOW = 0x2000000 + RTF_GATEWAY = 0x2 + RTF_HOST = 0x4 + RTF_INTERFACE = 0x40000000 + RTF_IRTT = 0x100 + RTF_LINKRT = 0x100000 + RTF_LOCAL = 0x80000000 + RTF_MODIFIED = 0x20 + RTF_MSS = 0x40 + RTF_MTU = 0x40 + RTF_MULTICAST = 0x20000000 + RTF_NAT = 0x8000000 + RTF_NOFORWARD = 0x1000 + RTF_NONEXTHOP = 0x200000 + RTF_NOPMTUDISC = 0x4000 + RTF_POLICY = 0x4000000 + RTF_REINSTATE = 0x8 + RTF_REJECT = 0x200 + RTF_STATIC = 0x400 + RTF_THROW = 0x2000 + RTF_UP = 0x1 + RTF_WINDOW = 0x80 + RTF_XRESOLVE = 0x800 + RTMGRP_DECnet_IFADDR = 0x1000 + RTMGRP_DECnet_ROUTE = 0x4000 + RTMGRP_IPV4_IFADDR = 0x10 + RTMGRP_IPV4_MROUTE = 0x20 + RTMGRP_IPV4_ROUTE = 0x40 + RTMGRP_IPV4_RULE = 0x80 + RTMGRP_IPV6_IFADDR = 0x100 + RTMGRP_IPV6_IFINFO = 0x800 + RTMGRP_IPV6_MROUTE = 0x200 + RTMGRP_IPV6_PREFIX = 0x20000 + RTMGRP_IPV6_ROUTE = 0x400 + RTMGRP_LINK = 0x1 + RTMGRP_NEIGH = 0x4 + RTMGRP_NOTIFY = 0x2 + RTMGRP_TC = 0x8 + RTM_BASE = 0x10 + RTM_DELACTION = 0x31 + RTM_DELADDR = 0x15 + RTM_DELADDRLABEL = 0x49 + RTM_DELCHAIN = 0x65 + RTM_DELLINK = 0x11 + RTM_DELLINKPROP = 0x6d + RTM_DELMDB = 0x55 + RTM_DELNEIGH = 0x1d + RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 + RTM_DELNSID = 0x59 + RTM_DELQDISC = 0x25 + RTM_DELROUTE = 0x19 + RTM_DELRULE = 0x21 + RTM_DELTCLASS = 0x29 + RTM_DELTFILTER = 0x2d + RTM_DELVLAN = 0x71 + RTM_F_CLONED = 0x200 + RTM_F_EQUALIZE = 0x400 + RTM_F_FIB_MATCH = 0x2000 + RTM_F_LOOKUP_TABLE = 0x1000 + RTM_F_NOTIFY = 0x100 + RTM_F_OFFLOAD = 0x4000 + RTM_F_PREFIX = 0x800 + RTM_F_TRAP = 0x8000 + RTM_GETACTION = 0x32 + RTM_GETADDR = 0x16 + RTM_GETADDRLABEL = 0x4a + RTM_GETANYCAST = 0x3e + RTM_GETCHAIN = 0x66 + RTM_GETDCB = 0x4e + RTM_GETLINK = 0x12 + RTM_GETLINKPROP = 0x6e + RTM_GETMDB = 0x56 + RTM_GETMULTICAST = 0x3a + RTM_GETNEIGH = 0x1e + RTM_GETNEIGHTBL = 0x42 + RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a + RTM_GETNSID = 0x5a + RTM_GETQDISC = 0x26 + RTM_GETROUTE = 0x1a + RTM_GETRULE = 0x22 + RTM_GETSTATS = 0x5e + RTM_GETTCLASS = 0x2a + RTM_GETTFILTER = 0x2e + RTM_GETVLAN = 0x72 + RTM_MAX = 0x73 + RTM_NEWACTION = 0x30 + RTM_NEWADDR = 0x14 + RTM_NEWADDRLABEL = 0x48 + RTM_NEWCACHEREPORT = 0x60 + RTM_NEWCHAIN = 0x64 + RTM_NEWLINK = 0x10 + RTM_NEWLINKPROP = 0x6c + RTM_NEWMDB = 0x54 + RTM_NEWNDUSEROPT = 0x44 + RTM_NEWNEIGH = 0x1c + RTM_NEWNEIGHTBL = 0x40 + RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 + RTM_NEWNSID = 0x58 + RTM_NEWNVLAN = 0x70 + RTM_NEWPREFIX = 0x34 + RTM_NEWQDISC = 0x24 + RTM_NEWROUTE = 0x18 + RTM_NEWRULE = 0x20 + RTM_NEWSTATS = 0x5c + RTM_NEWTCLASS = 0x28 + RTM_NEWTFILTER = 0x2c + RTM_NR_FAMILIES = 0x19 + RTM_NR_MSGTYPES = 0x64 + RTM_SETDCB = 0x4f + RTM_SETLINK = 0x13 + RTM_SETNEIGHTBL = 0x43 + RTNH_ALIGNTO = 0x4 + RTNH_COMPARE_MASK = 0x19 + RTNH_F_DEAD = 0x1 + RTNH_F_LINKDOWN = 0x10 + RTNH_F_OFFLOAD = 0x8 + RTNH_F_ONLINK = 0x4 + RTNH_F_PERVASIVE = 0x2 + RTNH_F_UNRESOLVED = 0x20 + RTN_MAX = 0xb + RTPROT_BABEL = 0x2a + RTPROT_BGP = 0xba + RTPROT_BIRD = 0xc + RTPROT_BOOT = 0x3 + RTPROT_DHCP = 0x10 + RTPROT_DNROUTED = 0xd + RTPROT_EIGRP = 0xc0 + RTPROT_GATED = 0x8 + RTPROT_ISIS = 0xbb + RTPROT_KERNEL = 0x2 + RTPROT_MROUTED = 0x11 + RTPROT_MRT = 0xa + RTPROT_NTK = 0xf + RTPROT_OSPF = 0xbc + RTPROT_RA = 0x9 + RTPROT_REDIRECT = 0x1 + RTPROT_RIP = 0xbd + RTPROT_STATIC = 0x4 + RTPROT_UNSPEC = 0x0 + RTPROT_XORP = 0xe + RTPROT_ZEBRA = 0xb + RT_CLASS_DEFAULT = 0xfd + RT_CLASS_LOCAL = 0xff + RT_CLASS_MAIN = 0xfe + RT_CLASS_MAX = 0xff + RT_CLASS_UNSPEC = 0x0 + RUSAGE_CHILDREN = -0x1 + RUSAGE_SELF = 0x0 + RUSAGE_THREAD = 0x1 + RWF_APPEND = 0x10 + RWF_DSYNC = 0x2 + RWF_HIPRI = 0x1 + RWF_NOWAIT = 0x8 + RWF_SUPPORTED = 0x1f + RWF_SYNC = 0x4 + RWF_WRITE_LIFE_NOT_SET = 0x0 + SCM_CREDENTIALS = 0x2 + SCM_RIGHTS = 0x1 + SCM_TIMESTAMP = 0x1d + SC_LOG_FLUSH = 0x100000 + SECCOMP_MODE_DISABLED = 0x0 + SECCOMP_MODE_FILTER = 0x2 + SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c + SHUT_RD = 0x0 + SHUT_RDWR = 0x2 + SHUT_WR = 0x1 + SIOCADDDLCI = 0x8980 + SIOCADDMULTI = 0x8931 + SIOCADDRT = 0x890b + SIOCBONDCHANGEACTIVE = 0x8995 + SIOCBONDENSLAVE = 0x8990 + SIOCBONDINFOQUERY = 0x8994 + SIOCBONDRELEASE = 0x8991 + SIOCBONDSETHWADDR = 0x8992 + SIOCBONDSLAVEINFOQUERY = 0x8993 + SIOCBRADDBR = 0x89a0 + SIOCBRADDIF = 0x89a2 + SIOCBRDELBR = 0x89a1 + SIOCBRDELIF = 0x89a3 + SIOCDARP = 0x8953 + SIOCDELDLCI = 0x8981 + SIOCDELMULTI = 0x8932 + SIOCDELRT = 0x890c + SIOCDEVPRIVATE = 0x89f0 + SIOCDIFADDR = 0x8936 + SIOCDRARP = 0x8960 + SIOCETHTOOL = 0x8946 + SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 + SIOCGHWTSTAMP = 0x89b1 + SIOCGIFADDR = 0x8915 + SIOCGIFBR = 0x8940 + SIOCGIFBRDADDR = 0x8919 + SIOCGIFCONF = 0x8912 + SIOCGIFCOUNT = 0x8938 + SIOCGIFDSTADDR = 0x8917 + SIOCGIFENCAP = 0x8925 + SIOCGIFFLAGS = 0x8913 + SIOCGIFHWADDR = 0x8927 + SIOCGIFINDEX = 0x8933 + SIOCGIFMAP = 0x8970 + SIOCGIFMEM = 0x891f + SIOCGIFMETRIC = 0x891d + SIOCGIFMTU = 0x8921 + SIOCGIFNAME = 0x8910 + SIOCGIFNETMASK = 0x891b + SIOCGIFPFLAGS = 0x8935 + SIOCGIFSLAVE = 0x8929 + SIOCGIFTXQLEN = 0x8942 + SIOCGIFVLAN = 0x8982 + SIOCGMIIPHY = 0x8947 + SIOCGMIIREG = 0x8948 + SIOCGPPPCSTATS = 0x89f2 + SIOCGPPPSTATS = 0x89f0 + SIOCGPPPVER = 0x89f1 + SIOCGRARP = 0x8961 + SIOCGSKNS = 0x894c + SIOCGSTAMP = 0x8906 + SIOCGSTAMPNS = 0x8907 + SIOCGSTAMPNS_OLD = 0x8907 + SIOCGSTAMP_OLD = 0x8906 + SIOCOUTQNSD = 0x894b + SIOCPROTOPRIVATE = 0x89e0 + SIOCRTMSG = 0x890d + SIOCSARP = 0x8955 + SIOCSHWTSTAMP = 0x89b0 + SIOCSIFADDR = 0x8916 + SIOCSIFBR = 0x8941 + SIOCSIFBRDADDR = 0x891a + SIOCSIFDSTADDR = 0x8918 + SIOCSIFENCAP = 0x8926 + SIOCSIFFLAGS = 0x8914 + SIOCSIFHWADDR = 0x8924 + SIOCSIFHWBROADCAST = 0x8937 + SIOCSIFLINK = 0x8911 + SIOCSIFMAP = 0x8971 + SIOCSIFMEM = 0x8920 + SIOCSIFMETRIC = 0x891e + SIOCSIFMTU = 0x8922 + SIOCSIFNAME = 0x8923 + SIOCSIFNETMASK = 0x891c + SIOCSIFPFLAGS = 0x8934 + SIOCSIFSLAVE = 0x8930 + SIOCSIFTXQLEN = 0x8943 + SIOCSIFVLAN = 0x8983 + SIOCSMIIREG = 0x8949 + SIOCSRARP = 0x8962 + SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b + SOCK_DCCP = 0x6 + SOCK_IOC_TYPE = 0x89 + SOCK_PACKET = 0xa + SOCK_RAW = 0x3 + SOCK_RDM = 0x4 + SOCK_SEQPACKET = 0x5 + SOL_AAL = 0x109 + SOL_ALG = 0x117 + SOL_ATM = 0x108 + SOL_CAIF = 0x116 + SOL_CAN_BASE = 0x64 + SOL_DCCP = 0x10d + SOL_DECNET = 0x105 + SOL_ICMPV6 = 0x3a + SOL_IP = 0x0 + SOL_IPV6 = 0x29 + SOL_IRDA = 0x10a + SOL_IUCV = 0x115 + SOL_KCM = 0x119 + SOL_LLC = 0x10c + SOL_NETBEUI = 0x10b + SOL_NETLINK = 0x10e + SOL_NFC = 0x118 + SOL_PACKET = 0x107 + SOL_PNPIPE = 0x113 + SOL_PPPOL2TP = 0x111 + SOL_RAW = 0xff + SOL_RDS = 0x114 + SOL_RXRPC = 0x110 + SOL_TCP = 0x6 + SOL_TIPC = 0x10f + SOL_TLS = 0x11a + SOL_X25 = 0x106 + SOL_XDP = 0x11b + SOMAXCONN = 0x1000 + SO_ATTACH_FILTER = 0x1a + SO_DEBUG = 0x1 + SO_DETACH_BPF = 0x1b + SO_DETACH_FILTER = 0x1b + SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 + SO_EE_CODE_TXTIME_MISSED = 0x2 + SO_EE_CODE_ZEROCOPY_COPIED = 0x1 + SO_EE_ORIGIN_ICMP = 0x2 + SO_EE_ORIGIN_ICMP6 = 0x3 + SO_EE_ORIGIN_LOCAL = 0x1 + SO_EE_ORIGIN_NONE = 0x0 + SO_EE_ORIGIN_TIMESTAMPING = 0x4 + SO_EE_ORIGIN_TXSTATUS = 0x4 + SO_EE_ORIGIN_TXTIME = 0x6 + SO_EE_ORIGIN_ZEROCOPY = 0x5 + SO_GET_FILTER = 0x1a + SO_NO_CHECK = 0xb + SO_PEERNAME = 0x1c + SO_PRIORITY = 0xc + SO_TIMESTAMP = 0x1d + SO_TIMESTAMP_OLD = 0x1d + SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 + SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 + SO_VM_SOCKETS_BUFFER_SIZE = 0x0 + SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 + SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 + SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 + SO_VM_SOCKETS_TRUSTED = 0x5 + SPLICE_F_GIFT = 0x8 + SPLICE_F_MORE = 0x4 + SPLICE_F_MOVE = 0x1 + SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_ATTR_VERITY = 0x100000 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 + SYNC_FILE_RANGE_WAIT_AFTER = 0x4 + SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 + SYNC_FILE_RANGE_WRITE = 0x2 + SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 + SYSFS_MAGIC = 0x62656572 + S_BLKSIZE = 0x200 + S_IEXEC = 0x40 + S_IFBLK = 0x6000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFIFO = 0x1000 + S_IFLNK = 0xa000 + S_IFMT = 0xf000 + S_IFREG = 0x8000 + S_IFSOCK = 0xc000 + S_IREAD = 0x100 + S_IRGRP = 0x20 + S_IROTH = 0x4 + S_IRUSR = 0x100 + S_IRWXG = 0x38 + S_IRWXO = 0x7 + S_IRWXU = 0x1c0 + S_ISGID = 0x400 + S_ISUID = 0x800 + S_ISVTX = 0x200 + S_IWGRP = 0x10 + S_IWOTH = 0x2 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXGRP = 0x8 + S_IXOTH = 0x1 + S_IXUSR = 0x40 + TAB0 = 0x0 + TASKSTATS_CMD_ATTR_MAX = 0x4 + TASKSTATS_CMD_MAX = 0x2 + TASKSTATS_GENL_NAME = "TASKSTATS" + TASKSTATS_GENL_VERSION = 0x1 + TASKSTATS_TYPE_MAX = 0x6 + TASKSTATS_VERSION = 0xa + TCIFLUSH = 0x0 + TCIOFF = 0x2 + TCIOFLUSH = 0x2 + TCION = 0x3 + TCOFLUSH = 0x1 + TCOOFF = 0x0 + TCOON = 0x1 + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea + TCP_CC_INFO = 0x1a + TCP_CM_INQ = 0x24 + TCP_CONGESTION = 0xd + TCP_COOKIE_IN_ALWAYS = 0x1 + TCP_COOKIE_MAX = 0x10 + TCP_COOKIE_MIN = 0x8 + TCP_COOKIE_OUT_NEVER = 0x2 + TCP_COOKIE_PAIR_SIZE = 0x20 + TCP_COOKIE_TRANSACTIONS = 0xf + TCP_CORK = 0x3 + TCP_DEFER_ACCEPT = 0x9 + TCP_FASTOPEN = 0x17 + TCP_FASTOPEN_CONNECT = 0x1e + TCP_FASTOPEN_KEY = 0x21 + TCP_FASTOPEN_NO_COOKIE = 0x22 + TCP_INFO = 0xb + TCP_INQ = 0x24 + TCP_KEEPCNT = 0x6 + TCP_KEEPIDLE = 0x4 + TCP_KEEPINTVL = 0x5 + TCP_LINGER2 = 0x8 + TCP_MAXSEG = 0x2 + TCP_MAXWIN = 0xffff + TCP_MAX_WINSHIFT = 0xe + TCP_MD5SIG = 0xe + TCP_MD5SIG_EXT = 0x20 + TCP_MD5SIG_FLAG_PREFIX = 0x1 + TCP_MD5SIG_MAXKEYLEN = 0x50 + TCP_MSS = 0x200 + TCP_MSS_DEFAULT = 0x218 + TCP_MSS_DESIRED = 0x4c4 + TCP_NODELAY = 0x1 + TCP_NOTSENT_LOWAT = 0x19 + TCP_QUEUE_SEQ = 0x15 + TCP_QUICKACK = 0xc + TCP_REPAIR = 0x13 + TCP_REPAIR_OFF = 0x0 + TCP_REPAIR_OFF_NO_WP = -0x1 + TCP_REPAIR_ON = 0x1 + TCP_REPAIR_OPTIONS = 0x16 + TCP_REPAIR_QUEUE = 0x14 + TCP_REPAIR_WINDOW = 0x1d + TCP_SAVED_SYN = 0x1c + TCP_SAVE_SYN = 0x1b + TCP_SYNCNT = 0x7 + TCP_S_DATA_IN = 0x4 + TCP_S_DATA_OUT = 0x8 + TCP_THIN_DUPACK = 0x11 + TCP_THIN_LINEAR_TIMEOUTS = 0x10 + TCP_TIMESTAMP = 0x18 + TCP_TX_DELAY = 0x25 + TCP_ULP = 0x1f + TCP_USER_TIMEOUT = 0x12 + TCP_WINDOW_CLAMP = 0xa + TCP_ZEROCOPY_RECEIVE = 0x23 + TFD_TIMER_ABSTIME = 0x1 + TFD_TIMER_CANCEL_ON_SET = 0x2 + TIMER_ABSTIME = 0x1 + TIOCM_DTR = 0x2 + TIOCM_LE = 0x1 + TIOCM_RTS = 0x4 + TIOCPKT_DATA = 0x0 + TIOCPKT_DOSTOP = 0x20 + TIOCPKT_FLUSHREAD = 0x1 + TIOCPKT_FLUSHWRITE = 0x2 + TIOCPKT_IOCTL = 0x40 + TIOCPKT_NOSTOP = 0x10 + TIOCPKT_START = 0x8 + TIOCPKT_STOP = 0x4 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_AEAD_ALG_NAME = 0x20 + TIPC_AEAD_KEYLEN_MAX = 0x24 + TIPC_AEAD_KEYLEN_MIN = 0x14 + TIPC_AEAD_KEY_SIZE_MAX = 0x48 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODELAY = 0x8a + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff + TMPFS_MAGIC = 0x1021994 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 + TS_COMM_LEN = 0x20 + UDF_SUPER_MAGIC = 0x15013346 + UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 + UTIME_NOW = 0x3fffffff + UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 + VERASE = 0x2 + VINTR = 0x0 + VKILL = 0x3 + VLNEXT = 0xf + VMADDR_CID_ANY = 0xffffffff + VMADDR_CID_HOST = 0x2 + VMADDR_CID_HYPERVISOR = 0x0 + VMADDR_CID_LOCAL = 0x1 + VMADDR_PORT_ANY = 0xffffffff + VM_SOCKETS_INVALID_VERSION = 0xffffffff + VQUIT = 0x1 + VT0 = 0x0 + WALL = 0x40000000 + WCLONE = 0x80000000 + WCONTINUED = 0x8 + WDIOC_SETPRETIMEOUT = 0xc0045708 + WDIOC_SETTIMEOUT = 0xc0045706 + WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c + WNOHANG = 0x1 + WNOTHREAD = 0x20000000 + WNOWAIT = 0x1000000 + WSTOPPED = 0x2 + WUNTRACED = 0x2 + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + XDP_COPY = 0x2 + XDP_FLAGS_DRV_MODE = 0x4 + XDP_FLAGS_HW_MODE = 0x8 + XDP_FLAGS_MASK = 0xf + XDP_FLAGS_MODES = 0xe + XDP_FLAGS_SKB_MODE = 0x2 + XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 + XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 + XDP_PACKET_HEADROOM = 0x100 + XDP_PGOFF_RX_RING = 0x0 + XDP_PGOFF_TX_RING = 0x80000000 + XDP_RING_NEED_WAKEUP = 0x1 + XDP_RX_RING = 0x2 + XDP_SHARED_UMEM = 0x1 + XDP_STATISTICS = 0x7 + XDP_TX_RING = 0x3 + XDP_UMEM_COMPLETION_RING = 0x6 + XDP_UMEM_FILL_RING = 0x5 + XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 + XDP_UMEM_PGOFF_FILL_RING = 0x100000000 + XDP_UMEM_REG = 0x4 + XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 + XDP_USE_NEED_WAKEUP = 0x8 + XDP_ZEROCOPY = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 + XFS_SUPER_MAGIC = 0x58465342 + Z3FOLD_MAGIC = 0x33 + ZONEFS_MAGIC = 0x5a4f4653 + ZSMALLOC_MAGIC = 0x58295829 +) + +// Errors +const ( + E2BIG = syscall.Errno(0x7) + EACCES = syscall.Errno(0xd) + EAGAIN = syscall.Errno(0xb) + EBADF = syscall.Errno(0x9) + EBUSY = syscall.Errno(0x10) + ECHILD = syscall.Errno(0xa) + EDOM = syscall.Errno(0x21) + EEXIST = syscall.Errno(0x11) + EFAULT = syscall.Errno(0xe) + EFBIG = syscall.Errno(0x1b) + EINTR = syscall.Errno(0x4) + EINVAL = syscall.Errno(0x16) + EIO = syscall.Errno(0x5) + EISDIR = syscall.Errno(0x15) + EMFILE = syscall.Errno(0x18) + EMLINK = syscall.Errno(0x1f) + ENFILE = syscall.Errno(0x17) + ENODEV = syscall.Errno(0x13) + ENOENT = syscall.Errno(0x2) + ENOEXEC = syscall.Errno(0x8) + ENOMEM = syscall.Errno(0xc) + ENOSPC = syscall.Errno(0x1c) + ENOTBLK = syscall.Errno(0xf) + ENOTDIR = syscall.Errno(0x14) + ENOTTY = syscall.Errno(0x19) + ENXIO = syscall.Errno(0x6) + EPERM = syscall.Errno(0x1) + EPIPE = syscall.Errno(0x20) + ERANGE = syscall.Errno(0x22) + EROFS = syscall.Errno(0x1e) + ESPIPE = syscall.Errno(0x1d) + ESRCH = syscall.Errno(0x3) + ETXTBSY = syscall.Errno(0x1a) + EWOULDBLOCK = syscall.Errno(0xb) + EXDEV = syscall.Errno(0x12) +) + +// Signals +const ( + SIGABRT = syscall.Signal(0x6) + SIGALRM = syscall.Signal(0xe) + SIGFPE = syscall.Signal(0x8) + SIGHUP = syscall.Signal(0x1) + SIGILL = syscall.Signal(0x4) + SIGINT = syscall.Signal(0x2) + SIGIOT = syscall.Signal(0x6) + SIGKILL = syscall.Signal(0x9) + SIGPIPE = syscall.Signal(0xd) + SIGQUIT = syscall.Signal(0x3) + SIGSEGV = syscall.Signal(0xb) + SIGTERM = syscall.Signal(0xf) + SIGTRAP = syscall.Signal(0x5) +) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index fcf5796a0..5e974110d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -11,2784 +11,489 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80041270 - BLKBSZSET = 0x40041271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80041272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FP_XSTATE_MAGIC2 = 0x46505845 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xc - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0xd - F_SETLK64 = 0xd - F_SETLKW = 0xe - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_32BIT = 0x40 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x20 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0xb703 - NS_GET_OWNER_UID = 0xb704 - NS_GET_PARENT = 0xb702 - NS_GET_USERNS = 0xb701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x8000 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8008743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40087446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x400c744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40087447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPXREGS = 0x12 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPXREGS = 0x13 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SINGLEBLOCK = 0x21 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_SYSEMU = 0x1f - PTRACE_SYSEMU_SINGLESTEP = 0x20 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8004700d - RTC_EPOCH_SET = 0x4004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8004700b - RTC_IRQP_SET = 0x4004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x801c7011 - RTC_PLL_SET = 0x401c7012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x400854d5 - TUNDETACHFILTER = 0x400854d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x800854db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - X86_FXSR_MAGIC = 0x0 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80041270 + BLKBSZSET = 0x40041271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80041272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x8000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8008743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40087446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x400c744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40087447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x800854db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x20 + X86_FXSR_MAGIC = 0x0 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2797,23 +502,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2830,8 +527,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2839,99 +534,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 5bcf3dbd7..47a57fe46 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -11,2784 +11,489 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FP_XSTATE_MAGIC2 = 0x46505845 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_32BIT = 0x40 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0xb703 - NS_GET_OWNER_UID = 0xb704 - NS_GET_PARENT = 0xb702 - NS_GET_USERNS = 0xb701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ARCH_PRCTL = 0x1e - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPXREGS = 0x12 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPXREGS = 0x13 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SINGLEBLOCK = 0x21 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_SYSEMU = 0x1f - PTRACE_SYSEMU_SINGLESTEP = 0x20 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FP_XSTATE_MAGIC2 = 0x46505845 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_32BIT = 0x40 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_ARCH_PRCTL = 0x1e + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPXREGS = 0x13 + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SINGLEBLOCK = 0x21 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2797,23 +502,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2830,8 +527,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2839,99 +534,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3e02dcff8..df2eea4bb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -11,2790 +11,495 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80041270 - BLKBSZSET = 0x40041271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80041272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xc - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0xd - F_SETLK64 = 0xd - F_SETLKW = 0xe - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x20 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0xb703 - NS_GET_OWNER_UID = 0xb704 - NS_GET_PARENT = 0xb702 - NS_GET_USERNS = 0xb701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x20000 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8008743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40087446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x400c744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40087447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETCRUNCHREGS = 0x19 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFDPIC = 0x1f - PTRACE_GETFDPIC_EXEC = 0x0 - PTRACE_GETFDPIC_INTERP = 0x1 - PTRACE_GETFPREGS = 0xe - PTRACE_GETHBPREGS = 0x1d - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GETVFPREGS = 0x1b - PTRACE_GETWMMXREGS = 0x12 - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x16 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETCRUNCHREGS = 0x1a - PTRACE_SETFPREGS = 0xf - PTRACE_SETHBPREGS = 0x1e - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SETVFPREGS = 0x1c - PTRACE_SETWMMXREGS = 0x13 - PTRACE_SET_SYSCALL = 0x17 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - PT_DATA_ADDR = 0x10004 - PT_TEXT_ADDR = 0x10000 - PT_TEXT_END_ADDR = 0x10008 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8004700d - RTC_EPOCH_SET = 0x4004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8004700b - RTC_IRQP_SET = 0x4004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x801c7011 - RTC_PLL_SET = 0x401c7012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x400854d5 - TUNDETACHFILTER = 0x400854d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x800854db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80041270 + BLKBSZSET = 0x40041271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80041272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0xc + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0xd + F_SETLK64 = 0xd + F_SETLKW = 0xe + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x20000 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8008743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40087446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x400c744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40087447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETCRUNCHREGS = 0x19 + PTRACE_GETFDPIC = 0x1f + PTRACE_GETFDPIC_EXEC = 0x0 + PTRACE_GETFDPIC_INTERP = 0x1 + PTRACE_GETFPREGS = 0xe + PTRACE_GETHBPREGS = 0x1d + PTRACE_GETVFPREGS = 0x1b + PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_THREAD_AREA = 0x16 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_SETCRUNCHREGS = 0x1a + PTRACE_SETFPREGS = 0xf + PTRACE_SETHBPREGS = 0x1e + PTRACE_SETVFPREGS = 0x1c + PTRACE_SETWMMXREGS = 0x13 + PTRACE_SET_SYSCALL = 0x17 + PT_DATA_ADDR = 0x10004 + PT_TEXT_ADDR = 0x10000 + PT_TEXT_END_ADDR = 0x10008 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8004700d + RTC_EPOCH_SET = 0x4004700e + RTC_IRQP_READ = 0x8004700b + RTC_IRQP_SET = 0x4004700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x801c7011 + RTC_PLL_SET = 0x401c7012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x400854d5 + TUNDETACHFILTER = 0x400854d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x800854db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x20 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2803,23 +508,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2836,8 +533,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2845,99 +540,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 2293f8bb8..4e1214217 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -11,2777 +11,482 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ESR_MAGIC = 0x45535201 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - EXTRA_MAGIC = 0x45585401 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FPSIMD_MAGIC = 0x46508001 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0xb703 - NS_GET_OWNER_UID = 0xb704 - NS_GET_PARENT = 0xb702 - NS_GET_USERNS = 0xb701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x10000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_SYSEMU = 0x1f - PTRACE_SYSEMU_SINGLESTEP = 0x20 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SVE_MAGIC = 0x53564501 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + ESR_MAGIC = 0x45535201 + EXTPROC = 0x10000 + EXTRA_MAGIC = 0x45585401 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FPSIMD_MAGIC = 0x46508001 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x10000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + SVE_MAGIC = 0x53564501 + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2790,23 +495,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2823,8 +520,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2832,99 +527,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 57742ea27..a23b08029 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -11,2786 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40041270 - BLKBSZSET = 0x80041271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40041272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x21 - F_GETLK64 = 0x21 - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x22 - F_SETLK64 = 0x22 - F_SETLKW = 0x23 - F_SETLKW64 = 0x23 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x20 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x2000 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4008743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80087446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x800c744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80087447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4004700d - RTC_EPOCH_SET = 0x8004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4004700b - RTC_IRQP_SET = 0x8004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x401c7011 - RTC_PLL_SET = 0x801c7012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x800854d5 - TUNDETACHFILTER = 0x800854d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x400854db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x21 + F_GETLK64 = 0x21 + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x22 + F_SETLK64 = 0x22 + F_SETLKW = 0x23 + F_SETLKW64 = 0x23 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x20 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2799,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2812,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2833,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2842,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 33bfa6cbf..a5a921e43 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -11,2786 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xe - F_GETLK64 = 0xe - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x0 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x0 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2799,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2812,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2833,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2842,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 89fd414e6..d088e197b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -11,2786 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0xe - F_GETLK64 = 0xe - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x0 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0xe + F_GETLK64 = 0xe + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x0 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2799,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2812,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2833,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2842,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index aabe5e424..0ddf9d5fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -11,2786 +11,491 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40041270 - BLKBSZSET = 0x80041271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40041272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x80 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x2000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x21 - F_GETLK64 = 0x21 - F_GETOWN = 0x17 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x22 - F_SETLK64 = 0x22 - F_SETLKW = 0x23 - F_SETLKW64 = 0x23 - F_SETOWN = 0x18 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x100 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x80 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x800 - MAP_ANONYMOUS = 0x800 - MAP_DENYWRITE = 0x2000 - MAP_EXECUTABLE = 0x4000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x1000 - MAP_HUGETLB = 0x80000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x8000 - MAP_NONBLOCK = 0x20000 - MAP_NORESERVE = 0x400 - MAP_POPULATE = 0x10000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x800 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x40000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x20 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x1000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x100 - O_DIRECT = 0x8000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x10 - O_EXCL = 0x400 - O_FSYNC = 0x4010 - O_LARGEFILE = 0x2000 - O_NDELAY = 0x80 - O_NOATIME = 0x40000 - O_NOCTTY = 0x800 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x80 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x4010 - O_SYNC = 0x4010 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40042407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc004240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80042406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4008743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80087446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x800c744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80087447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_GET_THREAD_AREA = 0x19 - PTRACE_GET_THREAD_AREA_3264 = 0xc4 - PTRACE_GET_WATCH_REGS = 0xd0 - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_3264 = 0xc1 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_3264 = 0xc0 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_3264 = 0xc3 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_3264 = 0xc2 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SET_THREAD_AREA = 0x1a - PTRACE_SET_WATCH_REGS = 0xd1 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x6 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x9 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x5 - RLIMIT_NPROC = 0x8 - RLIMIT_RSS = 0x7 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4004700d - RTC_EPOCH_SET = 0x8004700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4004700b - RTC_IRQP_SET = 0x8004700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x401c7011 - RTC_PLL_SET = 0x801c7012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x80 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x40047307 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x40047309 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x467f - SIOCOUTQ = 0x7472 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x80047308 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x1 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x80 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x2 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1009 - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x11 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x12 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x1004 - SO_RCVTIMEO = 0x1006 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x1006 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x1f - SO_SNDLOWAT = 0x1003 - SO_SNDTIMEO = 0x1005 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x1005 - SO_STYLE = 0x1008 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x5407 - TCGETA = 0x5401 - TCGETS = 0x540d - TCGETS2 = 0x4030542a - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x5410 - TCSBRK = 0x5405 - TCSBRKP = 0x5486 - TCSETA = 0x5402 - TCSETAF = 0x5404 - TCSETAW = 0x5403 - TCSETS = 0x540e - TCSETS2 = 0x8030542b - TCSETSF = 0x5410 - TCSETSF2 = 0x8030542d - TCSETSW = 0x540f - TCSETSW2 = 0x8030542c - TCXONC = 0x5406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x80047478 - TIOCEXCL = 0x740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x7400 - TIOCGETP = 0x7408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x5492 - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x548b - TIOCGLTC = 0x7474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x4020542e - TIOCGSERIAL = 0x5484 - TIOCGSID = 0x7416 - TIOCGSOFTCAR = 0x5481 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x467f - TIOCLINUX = 0x5483 - TIOCMBIC = 0x741c - TIOCMBIS = 0x741b - TIOCMGET = 0x741d - TIOCMIWAIT = 0x5491 - TIOCMSET = 0x741a - TIOCM_CAR = 0x100 - TIOCM_CD = 0x100 - TIOCM_CTS = 0x40 - TIOCM_DSR = 0x400 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x200 - TIOCM_RNG = 0x200 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x20 - TIOCM_ST = 0x10 - TIOCNOTTY = 0x5471 - TIOCNXCL = 0x740e - TIOCOUTQ = 0x7472 - TIOCPKT = 0x5470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x5480 - TIOCSERCONFIG = 0x5488 - TIOCSERGETLSR = 0x548e - TIOCSERGETMULTI = 0x548f - TIOCSERGSTRUCT = 0x548d - TIOCSERGWILD = 0x5489 - TIOCSERSETMULTI = 0x5490 - TIOCSERSWILD = 0x548a - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x7401 - TIOCSETN = 0x740a - TIOCSETP = 0x7409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x548c - TIOCSLTC = 0x7475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0xc020542f - TIOCSSERIAL = 0x5485 - TIOCSSOFTCAR = 0x5482 - TIOCSTI = 0x5472 - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x8000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x800854d5 - TUNDETACHFILTER = 0x800854d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x400854db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x10 - VEOL = 0x11 - VEOL2 = 0x6 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x4 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VSWTCH = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x20 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40041270 + BLKBSZSET = 0x80041271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40041272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x80 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x2000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40046601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x21 + F_GETLK64 = 0x21 + F_GETOWN = 0x17 + F_RDLCK = 0x0 + F_SETLK = 0x22 + F_SETLK64 = 0x22 + F_SETLKW = 0x23 + F_SETLKW64 = 0x23 + F_SETOWN = 0x18 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x100 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x80 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x800 + MAP_ANONYMOUS = 0x800 + MAP_DENYWRITE = 0x2000 + MAP_EXECUTABLE = 0x4000 + MAP_GROWSDOWN = 0x1000 + MAP_HUGETLB = 0x80000 + MAP_LOCKED = 0x8000 + MAP_NONBLOCK = 0x20000 + MAP_NORESERVE = 0x400 + MAP_POPULATE = 0x10000 + MAP_RENAME = 0x800 + MAP_STACK = 0x40000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x20 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x1000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x100 + O_DIRECT = 0x8000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x10 + O_EXCL = 0x400 + O_FSYNC = 0x4010 + O_LARGEFILE = 0x2000 + O_NDELAY = 0x80 + O_NOATIME = 0x40000 + O_NOCTTY = 0x800 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x80 + O_PATH = 0x200000 + O_RSYNC = 0x4010 + O_SYNC = 0x4010 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40042407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8004240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80042406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4008743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80087446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x800c744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80087447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffff + PTRACE_GETFPREGS = 0xe + PTRACE_GET_THREAD_AREA = 0x19 + PTRACE_GET_THREAD_AREA_3264 = 0xc4 + PTRACE_GET_WATCH_REGS = 0xd0 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_3264 = 0xc1 + PTRACE_PEEKTEXT_3264 = 0xc0 + PTRACE_POKEDATA_3264 = 0xc3 + PTRACE_POKETEXT_3264 = 0xc2 + PTRACE_SETFPREGS = 0xf + PTRACE_SET_THREAD_AREA = 0x1a + PTRACE_SET_WATCH_REGS = 0xd1 + RLIMIT_AS = 0x6 + RLIMIT_MEMLOCK = 0x9 + RLIMIT_NOFILE = 0x5 + RLIMIT_NPROC = 0x8 + RLIMIT_RSS = 0x7 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4004700d + RTC_EPOCH_SET = 0x8004700e + RTC_IRQP_READ = 0x4004700b + RTC_IRQP_SET = 0x8004700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x401c7011 + RTC_PLL_SET = 0x801c7012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x80 + SIOCATMARK = 0x40047307 + SIOCGPGRP = 0x40047309 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x467f + SIOCOUTQ = 0x7472 + SIOCSPGRP = 0x80047308 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x1 + SOCK_NONBLOCK = 0x80 + SOCK_STREAM = 0x2 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x1009 + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x11 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x12 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x1004 + SO_RCVTIMEO = 0x1006 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x1006 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x1f + SO_SNDLOWAT = 0x1003 + SO_SNDTIMEO = 0x1005 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x1005 + SO_STYLE = 0x1008 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x5407 + TCGETA = 0x5401 + TCGETS = 0x540d + TCGETS2 = 0x4030542a + TCSAFLUSH = 0x5410 + TCSBRK = 0x5405 + TCSBRKP = 0x5486 + TCSETA = 0x5402 + TCSETAF = 0x5404 + TCSETAW = 0x5403 + TCSETS = 0x540e + TCSETS2 = 0x8030542b + TCSETSF = 0x5410 + TCSETSF2 = 0x8030542d + TCSETSW = 0x540f + TCSETSW2 = 0x8030542c + TCXONC = 0x5406 + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x80 + TIOCCBRK = 0x5428 + TIOCCONS = 0x80047478 + TIOCEXCL = 0x740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x7400 + TIOCGETP = 0x7408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x5492 + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x548b + TIOCGLTC = 0x7474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x4020542e + TIOCGSERIAL = 0x5484 + TIOCGSID = 0x7416 + TIOCGSOFTCAR = 0x5481 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x467f + TIOCLINUX = 0x5483 + TIOCMBIC = 0x741c + TIOCMBIS = 0x741b + TIOCMGET = 0x741d + TIOCMIWAIT = 0x5491 + TIOCMSET = 0x741a + TIOCM_CAR = 0x100 + TIOCM_CD = 0x100 + TIOCM_CTS = 0x40 + TIOCM_DSR = 0x400 + TIOCM_RI = 0x200 + TIOCM_RNG = 0x200 + TIOCM_SR = 0x20 + TIOCM_ST = 0x10 + TIOCNOTTY = 0x5471 + TIOCNXCL = 0x740e + TIOCOUTQ = 0x7472 + TIOCPKT = 0x5470 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x5480 + TIOCSERCONFIG = 0x5488 + TIOCSERGETLSR = 0x548e + TIOCSERGETMULTI = 0x548f + TIOCSERGSTRUCT = 0x548d + TIOCSERGWILD = 0x5489 + TIOCSERSETMULTI = 0x5490 + TIOCSERSWILD = 0x548a + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x7401 + TIOCSETN = 0x740a + TIOCSETP = 0x7409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x548c + TIOCSLTC = 0x7475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0xc020542f + TIOCSSERIAL = 0x5485 + TIOCSSOFTCAR = 0x5482 + TIOCSTI = 0x5472 + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x8000 + TUNATTACHFILTER = 0x800854d5 + TUNDETACHFILTER = 0x800854d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x400854db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x10 + VEOL = 0x11 + VEOL2 = 0x6 + VMIN = 0x4 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VSWTCH = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x20 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x7d) EADDRNOTAVAIL = syscall.Errno(0x7e) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x7c) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x95) EBADE = syscall.Errno(0x32) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x51) EBADMSG = syscall.Errno(0x4d) EBADR = syscall.Errno(0x33) EBADRQC = syscall.Errno(0x36) EBADSLT = syscall.Errno(0x37) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x9e) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x25) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x82) @@ -2799,12 +504,8 @@ const ( EDEADLK = syscall.Errno(0x2d) EDEADLOCK = syscall.Errno(0x38) EDESTADDRREQ = syscall.Errno(0x60) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x46d) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x93) EHOSTUNREACH = syscall.Errno(0x94) EHWPOISON = syscall.Errno(0xa8) @@ -2812,11 +513,7 @@ const ( EILSEQ = syscall.Errno(0x58) EINIT = syscall.Errno(0x8d) EINPROGRESS = syscall.Errno(0x96) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x85) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x8b) EKEYEXPIRED = syscall.Errno(0xa2) EKEYREJECTED = syscall.Errno(0xa4) @@ -2833,8 +530,6 @@ const ( ELNRNG = syscall.Errno(0x29) ELOOP = syscall.Errno(0x5a) EMEDIUMTYPE = syscall.Errno(0xa0) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x61) EMULTIHOP = syscall.Errno(0x4a) ENAMETOOLONG = syscall.Errno(0x4e) @@ -2842,100 +537,68 @@ const ( ENETDOWN = syscall.Errno(0x7f) ENETRESET = syscall.Errno(0x81) ENETUNREACH = syscall.Errno(0x80) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x35) ENOBUFS = syscall.Errno(0x84) ENOCSI = syscall.Errno(0x2b) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0xa1) ENOLCK = syscall.Errno(0x2e) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x9f) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x23) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x63) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x59) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x86) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x5d) ENOTNAM = syscall.Errno(0x89) ENOTRECOVERABLE = syscall.Errno(0xa6) ENOTSOCK = syscall.Errno(0x5f) ENOTSUP = syscall.Errno(0x7a) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x50) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x7a) EOVERFLOW = syscall.Errno(0x4f) EOWNERDEAD = syscall.Errno(0xa5) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x7b) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x78) EPROTOTYPE = syscall.Errno(0x62) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x52) EREMDEV = syscall.Errno(0x8e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x8c) ERESTART = syscall.Errno(0x5b) ERFKILL = syscall.Errno(0xa7) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x8f) ESOCKTNOSUPPORT = syscall.Errno(0x79) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x97) ESTRPIPE = syscall.Errno(0x5c) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x91) ETOOMANYREFS = syscall.Errno(0x90) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x87) EUNATCH = syscall.Errno(0x2a) EUSERS = syscall.Errno(0x5e) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x34) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x12) SIGCLD = syscall.Signal(0x12) SIGCONT = syscall.Signal(0x19) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x16) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x16) SIGPROF = syscall.Signal(0x1d) SIGPWR = syscall.Signal(0x13) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x17) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x18) SIGTTIN = syscall.Signal(0x1a) SIGTTOU = syscall.Signal(0x1b) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 272279128..a93ffc180 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -11,2845 +11,551 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x17 - B110 = 0x3 - B115200 = 0x11 - B1152000 = 0x18 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x19 - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x1a - B230400 = 0x12 - B2400 = 0xb - B2500000 = 0x1b - B300 = 0x7 - B3000000 = 0x1c - B3500000 = 0x1d - B38400 = 0xf - B4000000 = 0x1e - B460800 = 0x13 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x14 - B57600 = 0x10 - B576000 = 0x15 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x16 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1f - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0xff - CBAUDEX = 0x0 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0xff0000 - CLOCAL = 0x8000 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIGNAL = 0xff - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0xd - F_SETLKW = 0x7 - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x4000 - IBSHIFT = 0x10 - ICANON = 0x100 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x400 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x80 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x1000 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x80 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x300 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80000000 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x4 - ONLCR = 0x2 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x20000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x1000 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_SAO = 0x10 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETEVRREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GETVRREGS = 0x12 - PTRACE_GETVSRREGS = 0x1b - PTRACE_GET_DEBUGREG = 0x19 - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETEVRREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SETVRREGS = 0x13 - PTRACE_SETVSRREGS = 0x1c - PTRACE_SET_DEBUGREG = 0x1a - PTRACE_SINGLEBLOCK = 0x100 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_SYSEMU = 0x1d - PTRACE_SYSEMU_SINGLESTEP = 0x1e - PTRACE_TRACEME = 0x0 - PT_CCR = 0x26 - PT_CTR = 0x23 - PT_DAR = 0x29 - PT_DSCR = 0x2c - PT_DSISR = 0x2a - PT_FPR0 = 0x30 - PT_FPSCR = 0x50 - PT_LNK = 0x24 - PT_MSR = 0x21 - PT_NIP = 0x20 - PT_ORIG_R3 = 0x22 - PT_R0 = 0x0 - PT_R1 = 0x1 - PT_R10 = 0xa - PT_R11 = 0xb - PT_R12 = 0xc - PT_R13 = 0xd - PT_R14 = 0xe - PT_R15 = 0xf - PT_R16 = 0x10 - PT_R17 = 0x11 - PT_R18 = 0x12 - PT_R19 = 0x13 - PT_R2 = 0x2 - PT_R20 = 0x14 - PT_R21 = 0x15 - PT_R22 = 0x16 - PT_R23 = 0x17 - PT_R24 = 0x18 - PT_R25 = 0x19 - PT_R26 = 0x1a - PT_R27 = 0x1b - PT_R28 = 0x1c - PT_R29 = 0x1d - PT_R3 = 0x3 - PT_R30 = 0x1e - PT_R31 = 0x1f - PT_R4 = 0x4 - PT_R5 = 0x5 - PT_R6 = 0x6 - PT_R7 = 0x7 - PT_R8 = 0x8 - PT_R9 = 0x9 - PT_REGS_COUNT = 0x2c - PT_RESULT = 0x2b - PT_SOFTE = 0x27 - PT_TRAP = 0x28 - PT_VR0 = 0x52 - PT_VRSAVE = 0x94 - PT_VSCR = 0x93 - PT_VSR0 = 0x96 - PT_VSR31 = 0xd4 - PT_XER = 0x25 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x14 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x15 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x10 - SO_RCVTIMEO = 0x12 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x12 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x11 - SO_SNDTIMEO = 0x13 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x13 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0xc00 - TABDLY = 0xc00 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x2000741f - TCGETA = 0x40147417 - TCGETS = 0x402c7413 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x2000741d - TCSBRKP = 0x5425 - TCSETA = 0x80147418 - TCSETAF = 0x8014741c - TCSETAW = 0x80147419 - TCSETS = 0x802c7414 - TCSETSF = 0x802c7416 - TCSETSW = 0x802c7415 - TCXONC = 0x2000741e - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x40045432 - TIOCGETC = 0x40067412 - TIOCGETD = 0x5424 - TIOCGETP = 0x40067408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGLTC = 0x40067474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_LOOP = 0x8000 - TIOCM_OUT1 = 0x2000 - TIOCM_OUT2 = 0x4000 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETC = 0x80067411 - TIOCSETD = 0x5423 - TIOCSETN = 0x8006740a - TIOCSETP = 0x80067409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSLTC = 0x80067475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTART = 0x2000746e - TIOCSTI = 0x5412 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x400000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0x10 - VEOF = 0x4 - VEOL = 0x6 - VEOL2 = 0x8 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x5 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xb - VSTART = 0xd - VSTOP = 0xe - VSUSP = 0xc - VSWTC = 0x9 - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x7 - VWERASE = 0xa - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4000 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0xc00 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x17 + B115200 = 0x11 + B1152000 = 0x18 + B1500000 = 0x19 + B2000000 = 0x1a + B230400 = 0x12 + B2500000 = 0x1b + B3000000 = 0x1c + B3500000 = 0x1d + B4000000 = 0x1e + B460800 = 0x13 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B921600 = 0x16 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTOPB = 0x400 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000000 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x4000 + ICANON = 0x100 + IEXTEN = 0x400 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x80 + IUCLC = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + NFDBITS = 0x40 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x4 + ONLCR = 0x2 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x1000 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PROT_SAO = 0x10 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS64 = 0x16 + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETREGS64 = 0x17 + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SYSEMU = 0x1d + PTRACE_SYSEMU_SINGLESTEP = 0x1e + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VMIN = 0x5 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4000 + XTABS = 0xc00 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2858,23 +564,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x3a) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2891,8 +589,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2900,99 +596,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e33be416c..c1ea48b95 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -11,2845 +11,551 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x17 - B110 = 0x3 - B115200 = 0x11 - B1152000 = 0x18 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x19 - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x1a - B230400 = 0x12 - B2400 = 0xb - B2500000 = 0x1b - B300 = 0x7 - B3000000 = 0x1c - B3500000 = 0x1d - B38400 = 0xf - B4000000 = 0x1e - B460800 = 0x13 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x14 - B57600 = 0x10 - B576000 = 0x15 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x16 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1f - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x8000 - BSDLY = 0x8000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0xff - CBAUDEX = 0x0 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0xff0000 - CLOCAL = 0x8000 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x1000 - CR2 = 0x2000 - CR3 = 0x3000 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x3000 - CREAD = 0x800 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x100 - CS7 = 0x200 - CS8 = 0x300 - CSIGNAL = 0xff - CSIZE = 0x300 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x400 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x40 - ECHOE = 0x2 - ECHOK = 0x4 - ECHOKE = 0x1 - ECHONL = 0x10 - ECHOPRT = 0x20 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x4000 - FFDLY = 0x4000 - FLUSHO = 0x800000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0xc - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0xd - F_SETLKW = 0x7 - F_SETLKW64 = 0xe - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x4000 - IBSHIFT = 0x10 - ICANON = 0x100 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x400 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x80 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x1000 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x400 - IXON = 0x200 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x80 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NL2 = 0x200 - NL3 = 0x300 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x300 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80000000 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x4 - ONLCR = 0x2 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x20000 - O_DIRECTORY = 0x4000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x8000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x404000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x1000 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x2000 - PENDIN = 0x20000000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_SAO = 0x10 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETEVRREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GETVRREGS = 0x12 - PTRACE_GETVSRREGS = 0x1b - PTRACE_GET_DEBUGREG = 0x19 - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETEVRREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SETVRREGS = 0x13 - PTRACE_SETVSRREGS = 0x1c - PTRACE_SET_DEBUGREG = 0x1a - PTRACE_SINGLEBLOCK = 0x100 - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_SYSEMU = 0x1d - PTRACE_SYSEMU_SINGLESTEP = 0x1e - PTRACE_TRACEME = 0x0 - PT_CCR = 0x26 - PT_CTR = 0x23 - PT_DAR = 0x29 - PT_DSCR = 0x2c - PT_DSISR = 0x2a - PT_FPR0 = 0x30 - PT_FPSCR = 0x50 - PT_LNK = 0x24 - PT_MSR = 0x21 - PT_NIP = 0x20 - PT_ORIG_R3 = 0x22 - PT_R0 = 0x0 - PT_R1 = 0x1 - PT_R10 = 0xa - PT_R11 = 0xb - PT_R12 = 0xc - PT_R13 = 0xd - PT_R14 = 0xe - PT_R15 = 0xf - PT_R16 = 0x10 - PT_R17 = 0x11 - PT_R18 = 0x12 - PT_R19 = 0x13 - PT_R2 = 0x2 - PT_R20 = 0x14 - PT_R21 = 0x15 - PT_R22 = 0x16 - PT_R23 = 0x17 - PT_R24 = 0x18 - PT_R25 = 0x19 - PT_R26 = 0x1a - PT_R27 = 0x1b - PT_R28 = 0x1c - PT_R29 = 0x1d - PT_R3 = 0x3 - PT_R30 = 0x1e - PT_R31 = 0x1f - PT_R4 = 0x4 - PT_R5 = 0x5 - PT_R6 = 0x6 - PT_R7 = 0x7 - PT_R8 = 0x8 - PT_R9 = 0x9 - PT_REGS_COUNT = 0x2c - PT_RESULT = 0x2b - PT_SOFTE = 0x27 - PT_TRAP = 0x28 - PT_VR0 = 0x52 - PT_VRSAVE = 0x94 - PT_VSCR = 0x93 - PT_VSR0 = 0x96 - PT_VSR31 = 0xd4 - PT_XER = 0x25 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x14 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x15 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x10 - SO_RCVTIMEO = 0x12 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x12 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x11 - SO_SNDTIMEO = 0x13 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x13 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x400 - TAB2 = 0x800 - TAB3 = 0xc00 - TABDLY = 0xc00 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x2000741f - TCGETA = 0x40147417 - TCGETS = 0x402c7413 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x2000741d - TCSBRKP = 0x5425 - TCSETA = 0x80147418 - TCSETAF = 0x8014741c - TCSETAW = 0x80147419 - TCSETS = 0x802c7414 - TCSETSF = 0x802c7416 - TCSETSW = 0x802c7415 - TCXONC = 0x2000741e - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x40045432 - TIOCGETC = 0x40067412 - TIOCGETD = 0x5424 - TIOCGETP = 0x40067408 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x40285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGLTC = 0x40067474 - TIOCGPGRP = 0x40047477 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40045430 - TIOCGPTPEER = 0x20005441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_LOOP = 0x8000 - TIOCM_OUT1 = 0x2000 - TIOCM_OUT2 = 0x4000 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETC = 0x80067411 - TIOCSETD = 0x5423 - TIOCSETN = 0x8006740a - TIOCSETP = 0x80067409 - TIOCSIG = 0x80045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSLTC = 0x80067475 - TIOCSPGRP = 0x80047476 - TIOCSPTLCK = 0x80045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTART = 0x2000746e - TIOCSTI = 0x5412 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x400000 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0x10 - VEOF = 0x4 - VEOL = 0x6 - VEOL2 = 0x8 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x5 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xb - VSTART = 0xd - VSTOP = 0xe - VSUSP = 0xc - VSWTC = 0x9 - VT0 = 0x0 - VT1 = 0x10000 - VTDLY = 0x10000 - VTIME = 0x7 - VWERASE = 0xa - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4000 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0xc00 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x17 + B115200 = 0x11 + B1152000 = 0x18 + B1500000 = 0x19 + B2000000 = 0x1a + B230400 = 0x12 + B2500000 = 0x1b + B3000000 = 0x1c + B3500000 = 0x1d + B4000000 = 0x1e + B460800 = 0x13 + B500000 = 0x14 + B57600 = 0x10 + B576000 = 0x15 + B921600 = 0x16 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1f + BS1 = 0x8000 + BSDLY = 0x8000 + CBAUD = 0xff + CBAUDEX = 0x0 + CIBAUD = 0xff0000 + CLOCAL = 0x8000 + CR1 = 0x1000 + CR2 = 0x2000 + CR3 = 0x3000 + CRDLY = 0x3000 + CREAD = 0x800 + CS6 = 0x100 + CS7 = 0x200 + CS8 = 0x300 + CSIZE = 0x300 + CSTOPB = 0x400 + ECHOCTL = 0x40 + ECHOE = 0x2 + ECHOK = 0x4 + ECHOKE = 0x1 + ECHONL = 0x10 + ECHOPRT = 0x20 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000000 + FF1 = 0x4000 + FFDLY = 0x4000 + FLUSHO = 0x800000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x5 + F_GETLK64 = 0xc + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0xd + F_SETLKW = 0x7 + F_SETLKW64 = 0xe + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x4000 + ICANON = 0x100 + IEXTEN = 0x400 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x80 + IUCLC = 0x1000 + IXOFF = 0x400 + IXON = 0x200 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x80 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + NFDBITS = 0x40 + NL2 = 0x200 + NL3 = 0x300 + NLDLY = 0x300 + NOFLSH = 0x80000000 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x4 + ONLCR = 0x2 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x20000 + O_DIRECTORY = 0x4000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x8000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x404000 + O_TRUNC = 0x200 + PARENB = 0x1000 + PARODD = 0x2000 + PENDIN = 0x20000000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PROT_SAO = 0x10 + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETEVRREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETREGS64 = 0x16 + PTRACE_GETVRREGS = 0x12 + PTRACE_GETVSRREGS = 0x1b + PTRACE_GET_DEBUGREG = 0x19 + PTRACE_SETEVRREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETREGS64 = 0x17 + PTRACE_SETVRREGS = 0x13 + PTRACE_SETVSRREGS = 0x1c + PTRACE_SET_DEBUGREG = 0x1a + PTRACE_SINGLEBLOCK = 0x100 + PTRACE_SYSEMU = 0x1d + PTRACE_SYSEMU_SINGLESTEP = 0x1e + PT_CCR = 0x26 + PT_CTR = 0x23 + PT_DAR = 0x29 + PT_DSCR = 0x2c + PT_DSISR = 0x2a + PT_FPR0 = 0x30 + PT_FPSCR = 0x50 + PT_LNK = 0x24 + PT_MSR = 0x21 + PT_NIP = 0x20 + PT_ORIG_R3 = 0x22 + PT_R0 = 0x0 + PT_R1 = 0x1 + PT_R10 = 0xa + PT_R11 = 0xb + PT_R12 = 0xc + PT_R13 = 0xd + PT_R14 = 0xe + PT_R15 = 0xf + PT_R16 = 0x10 + PT_R17 = 0x11 + PT_R18 = 0x12 + PT_R19 = 0x13 + PT_R2 = 0x2 + PT_R20 = 0x14 + PT_R21 = 0x15 + PT_R22 = 0x16 + PT_R23 = 0x17 + PT_R24 = 0x18 + PT_R25 = 0x19 + PT_R26 = 0x1a + PT_R27 = 0x1b + PT_R28 = 0x1c + PT_R29 = 0x1d + PT_R3 = 0x3 + PT_R30 = 0x1e + PT_R31 = 0x1f + PT_R4 = 0x4 + PT_R5 = 0x5 + PT_R6 = 0x6 + PT_R7 = 0x7 + PT_R8 = 0x8 + PT_R9 = 0x9 + PT_REGS_COUNT = 0x2c + PT_RESULT = 0x2b + PT_SOFTE = 0x27 + PT_TRAP = 0x28 + PT_VR0 = 0x52 + PT_VRSAVE = 0x94 + PT_VSCR = 0x93 + PT_VSR0 = 0x96 + PT_VSR31 = 0xd4 + PT_XER = 0x25 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x14 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x15 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x10 + SO_RCVTIMEO = 0x12 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x12 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x11 + SO_SNDTIMEO = 0x13 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x13 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x400 + TAB2 = 0x800 + TAB3 = 0xc00 + TABDLY = 0xc00 + TCFLSH = 0x2000741f + TCGETA = 0x40147417 + TCGETS = 0x402c7413 + TCSAFLUSH = 0x2 + TCSBRK = 0x2000741d + TCSBRKP = 0x5425 + TCSETA = 0x80147418 + TCSETAF = 0x8014741c + TCSETAW = 0x80147419 + TCSETS = 0x802c7414 + TCSETSF = 0x802c7416 + TCSETSW = 0x802c7415 + TCXONC = 0x2000741e + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x40045432 + TIOCGETC = 0x40067412 + TIOCGETD = 0x5424 + TIOCGETP = 0x40067408 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGLTC = 0x40067474 + TIOCGPGRP = 0x40047477 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40045430 + TIOCGPTPEER = 0x20005441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_LOOP = 0x8000 + TIOCM_OUT1 = 0x2000 + TIOCM_OUT2 = 0x4000 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETC = 0x80067411 + TIOCSETD = 0x5423 + TIOCSETN = 0x8006740a + TIOCSETP = 0x80067409 + TIOCSIG = 0x80045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSLTC = 0x80067475 + TIOCSPGRP = 0x80047476 + TIOCSPTLCK = 0x80045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTART = 0x2000746e + TIOCSTI = 0x5412 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x400000 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0x10 + VEOF = 0x4 + VEOL = 0x6 + VEOL2 = 0x8 + VMIN = 0x5 + VREPRINT = 0xb + VSTART = 0xd + VSTOP = 0xe + VSUSP = 0xc + VSWTC = 0x9 + VT1 = 0x10000 + VTDLY = 0x10000 + VTIME = 0x7 + VWERASE = 0xa + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4000 + XTABS = 0xc00 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2858,23 +564,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x3a) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2891,8 +589,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2900,99 +596,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index b9908d309..7def950ba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -11,2771 +11,476 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0xb703 - NS_GET_OWNER_UID = 0xb704 - NS_GET_PARENT = 0xb702 - NS_GET_USERNS = 0xb701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2784,23 +489,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2817,8 +514,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2826,99 +521,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 85647f4f7..d39293c87 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -11,2844 +11,549 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x80081270 - BLKBSZSET = 0x40081271 - BLKFLSBUF = 0x1261 - BLKFRAGET = 0x1265 - BLKFRASET = 0x1264 - BLKGETSIZE = 0x1260 - BLKGETSIZE64 = 0x80081272 - BLKPBSZGET = 0x127b - BLKRAGET = 0x1263 - BLKRASET = 0x1262 - BLKROGET = 0x125e - BLKROSET = 0x125d - BLKRRPART = 0x125f - BLKSECTGET = 0x1267 - BLKSECTSET = 0x1266 - BLKSSZGET = 0x1268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x80000 - EFD_NONBLOCK = 0x800 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x5 - F_GETLK64 = 0x5 - F_GETOWN = 0x9 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x0 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x6 - F_SETLK64 = 0x6 - F_SETLKW = 0x7 - F_SETLKW64 = 0x7 - F_SETOWN = 0x8 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x2 - F_WRLCK = 0x1 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x80000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x800 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x100 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x2000 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x4000 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_SYNC = 0x80000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x1 - MCL_FUTURE = 0x2 - MCL_ONFAULT = 0x4 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0xb703 - NS_GET_OWNER_UID = 0xb704 - NS_GET_PARENT = 0xb702 - NS_GET_USERNS = 0xb701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x400 - O_ASYNC = 0x2000 - O_CLOEXEC = 0x80000 - O_CREAT = 0x40 - O_DIRECT = 0x4000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x1000 - O_EXCL = 0x80 - O_FSYNC = 0x101000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x800 - O_NOATIME = 0x40000 - O_NOCTTY = 0x100 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x800 - O_PATH = 0x200000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x101000 - O_SYNC = 0x101000 - O_TMPFILE = 0x410000 - O_TRUNC = 0x200 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x2401 - PERF_EVENT_IOC_ENABLE = 0x2400 - PERF_EVENT_IOC_ID = 0x80082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 - PERF_EVENT_IOC_PERIOD = 0x40082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x2402 - PERF_EVENT_IOC_RESET = 0x2403 - PERF_EVENT_IOC_SET_BPF = 0x40042408 - PERF_EVENT_IOC_SET_FILTER = 0x40082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x2405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x4004743d - PPPIOCATTCHAN = 0x40047438 - PPPIOCCONNECT = 0x4004743a - PPPIOCDETACH = 0x4004743c - PPPIOCDISCONN = 0x7439 - PPPIOCGASYNCMAP = 0x80047458 - PPPIOCGCHAN = 0x80047437 - PPPIOCGDEBUG = 0x80047441 - PPPIOCGFLAGS = 0x8004745a - PPPIOCGIDLE = 0x8010743f - PPPIOCGL2TPSTATS = 0x80487436 - PPPIOCGMRU = 0x80047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x80047455 - PPPIOCGUNIT = 0x80047456 - PPPIOCGXASYNCMAP = 0x80207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x40107446 - PPPIOCSASYNCMAP = 0x40047457 - PPPIOCSCOMPRESS = 0x4010744d - PPPIOCSDEBUG = 0x40047440 - PPPIOCSFLAGS = 0x40047459 - PPPIOCSMAXCID = 0x40047451 - PPPIOCSMRRU = 0x4004743b - PPPIOCSMRU = 0x40047452 - PPPIOCSNPMODE = 0x4008744b - PPPIOCSPASS = 0x40107447 - PPPIOCSRASYNCMAP = 0x40047454 - PPPIOCSXASYNCMAP = 0x4020744f - PPPIOCXFERUNIT = 0x744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_DISABLE_TE = 0x5010 - PTRACE_ENABLE_TE = 0x5009 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETREGS = 0xc - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_LAST_BREAK = 0x5006 - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_OLDSETOPTIONS = 0x15 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKDATA_AREA = 0x5003 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKTEXT_AREA = 0x5002 - PTRACE_PEEKUSR = 0x3 - PTRACE_PEEKUSR_AREA = 0x5000 - PTRACE_PEEK_SYSTEM_CALL = 0x5007 - PTRACE_POKEDATA = 0x5 - PTRACE_POKEDATA_AREA = 0x5005 - PTRACE_POKETEXT = 0x4 - PTRACE_POKETEXT_AREA = 0x5004 - PTRACE_POKEUSR = 0x6 - PTRACE_POKEUSR_AREA = 0x5001 - PTRACE_POKE_SYSTEM_CALL = 0x5008 - PTRACE_PROT = 0x15 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLEBLOCK = 0xc - PTRACE_SINGLESTEP = 0x9 - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TE_ABORT_RAND = 0x5011 - PTRACE_TRACEME = 0x0 - PT_ACR0 = 0x90 - PT_ACR1 = 0x94 - PT_ACR10 = 0xb8 - PT_ACR11 = 0xbc - PT_ACR12 = 0xc0 - PT_ACR13 = 0xc4 - PT_ACR14 = 0xc8 - PT_ACR15 = 0xcc - PT_ACR2 = 0x98 - PT_ACR3 = 0x9c - PT_ACR4 = 0xa0 - PT_ACR5 = 0xa4 - PT_ACR6 = 0xa8 - PT_ACR7 = 0xac - PT_ACR8 = 0xb0 - PT_ACR9 = 0xb4 - PT_CR_10 = 0x168 - PT_CR_11 = 0x170 - PT_CR_9 = 0x160 - PT_ENDREGS = 0x1af - PT_FPC = 0xd8 - PT_FPR0 = 0xe0 - PT_FPR1 = 0xe8 - PT_FPR10 = 0x130 - PT_FPR11 = 0x138 - PT_FPR12 = 0x140 - PT_FPR13 = 0x148 - PT_FPR14 = 0x150 - PT_FPR15 = 0x158 - PT_FPR2 = 0xf0 - PT_FPR3 = 0xf8 - PT_FPR4 = 0x100 - PT_FPR5 = 0x108 - PT_FPR6 = 0x110 - PT_FPR7 = 0x118 - PT_FPR8 = 0x120 - PT_FPR9 = 0x128 - PT_GPR0 = 0x10 - PT_GPR1 = 0x18 - PT_GPR10 = 0x60 - PT_GPR11 = 0x68 - PT_GPR12 = 0x70 - PT_GPR13 = 0x78 - PT_GPR14 = 0x80 - PT_GPR15 = 0x88 - PT_GPR2 = 0x20 - PT_GPR3 = 0x28 - PT_GPR4 = 0x30 - PT_GPR5 = 0x38 - PT_GPR6 = 0x40 - PT_GPR7 = 0x48 - PT_GPR8 = 0x50 - PT_GPR9 = 0x58 - PT_IEEE_IP = 0x1a8 - PT_LASTOFF = 0x1a8 - PT_ORIGGPR2 = 0xd0 - PT_PSWADDR = 0x8 - PT_PSWMASK = 0x0 - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x7 - RLIMIT_NPROC = 0x6 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x40085203 - RNDADDTOENTCNT = 0x40045201 - RNDCLEARPOOL = 0x5206 - RNDGETENTCNT = 0x80045200 - RNDGETPOOL = 0x80085202 - RNDRESEEDCRNG = 0x5207 - RNDZAPENTCNT = 0x5204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x7002 - RTC_AIE_ON = 0x7001 - RTC_ALM_READ = 0x80247008 - RTC_ALM_SET = 0x40247007 - RTC_EPOCH_READ = 0x8008700d - RTC_EPOCH_SET = 0x4008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x8008700b - RTC_IRQP_SET = 0x4008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x7006 - RTC_PIE_ON = 0x7005 - RTC_PLL_GET = 0x80207011 - RTC_PLL_SET = 0x40207012 - RTC_RD_TIME = 0x80247009 - RTC_SET_TIME = 0x4024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x7004 - RTC_UIE_ON = 0x7003 - RTC_VL_CLR = 0x7014 - RTC_VL_READ = 0x80047013 - RTC_WIE_OFF = 0x7010 - RTC_WIE_ON = 0x700f - RTC_WKALM_RD = 0x80287010 - RTC_WKALM_SET = 0x4028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x25 - SCM_TIMESTAMPING_OPT_STATS = 0x36 - SCM_TIMESTAMPING_PKTINFO = 0x3a - SCM_TIMESTAMPNS = 0x23 - SCM_TXTIME = 0x3d - SCM_WIFI_STATUS = 0x29 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x80000 - SFD_NONBLOCK = 0x800 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x80108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x80108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x541b - SIOCOUTQ = 0x5411 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x80000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x800 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0x1 - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x1e - SO_ATTACH_BPF = 0x32 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x33 - SO_ATTACH_REUSEPORT_EBPF = 0x34 - SO_BINDTODEVICE = 0x19 - SO_BINDTOIFINDEX = 0x3e - SO_BPF_EXTENSIONS = 0x30 - SO_BROADCAST = 0x6 - SO_BSDCOMPAT = 0xe - SO_BUSY_POLL = 0x2e - SO_CNX_ADVICE = 0x35 - SO_COOKIE = 0x39 - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x44 - SO_DOMAIN = 0x27 - SO_DONTROUTE = 0x5 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x4 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x31 - SO_INCOMING_NAPI_ID = 0x38 - SO_KEEPALIVE = 0x9 - SO_LINGER = 0xd - SO_LOCK_FILTER = 0x2c - SO_MARK = 0x24 - SO_MAX_PACING_RATE = 0x2f - SO_MEMINFO = 0x37 - SO_NOFCS = 0x2b - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0xa - SO_PASSCRED = 0x10 - SO_PASSSEC = 0x22 - SO_PEEK_OFF = 0x2a - SO_PEERCRED = 0x11 - SO_PEERGROUPS = 0x3b - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1f - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x26 - SO_RCVBUF = 0x8 - SO_RCVBUFFORCE = 0x21 - SO_RCVLOWAT = 0x12 - SO_RCVTIMEO = 0x14 - SO_RCVTIMEO_NEW = 0x42 - SO_RCVTIMEO_OLD = 0x14 - SO_REUSEADDR = 0x2 - SO_REUSEPORT = 0xf - SO_RXQ_OVFL = 0x28 - SO_SECURITY_AUTHENTICATION = 0x16 - SO_SECURITY_ENCRYPTION_NETWORK = 0x18 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 - SO_SELECT_ERR_QUEUE = 0x2d - SO_SNDBUF = 0x7 - SO_SNDBUFFORCE = 0x20 - SO_SNDLOWAT = 0x13 - SO_SNDTIMEO = 0x15 - SO_SNDTIMEO_NEW = 0x43 - SO_SNDTIMEO_OLD = 0x15 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x25 - SO_TIMESTAMPING_NEW = 0x41 - SO_TIMESTAMPING_OLD = 0x25 - SO_TIMESTAMPNS = 0x23 - SO_TIMESTAMPNS_NEW = 0x40 - SO_TIMESTAMPNS_OLD = 0x23 - SO_TIMESTAMP_NEW = 0x3f - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3d - SO_TYPE = 0x3 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x29 - SO_ZEROCOPY = 0x3c - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x540b - TCGETA = 0x5405 - TCGETS = 0x5401 - TCGETS2 = 0x802c542a - TCGETX = 0x5432 - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x5409 - TCSBRKP = 0x5425 - TCSETA = 0x5406 - TCSETAF = 0x5408 - TCSETAW = 0x5407 - TCSETS = 0x5402 - TCSETS2 = 0x402c542b - TCSETSF = 0x5404 - TCSETSF2 = 0x402c542d - TCSETSW = 0x5403 - TCSETSW2 = 0x402c542c - TCSETX = 0x5433 - TCSETXF = 0x5434 - TCSETXW = 0x5435 - TCXONC = 0x540a - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x5428 - TIOCCONS = 0x541d - TIOCEXCL = 0x540c - TIOCGDEV = 0x80045432 - TIOCGETD = 0x5424 - TIOCGEXCL = 0x80045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x80285442 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x540f - TIOCGPKT = 0x80045438 - TIOCGPTLCK = 0x80045439 - TIOCGPTN = 0x80045430 - TIOCGPTPEER = 0x5441 - TIOCGRS485 = 0x542e - TIOCGSERIAL = 0x541e - TIOCGSID = 0x5429 - TIOCGSOFTCAR = 0x5419 - TIOCGWINSZ = 0x5413 - TIOCINQ = 0x541b - TIOCLINUX = 0x541c - TIOCMBIC = 0x5417 - TIOCMBIS = 0x5416 - TIOCMGET = 0x5415 - TIOCMIWAIT = 0x545c - TIOCMSET = 0x5418 - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x5422 - TIOCNXCL = 0x540d - TIOCOUTQ = 0x5411 - TIOCPKT = 0x5420 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x5427 - TIOCSCTTY = 0x540e - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSER_TEMT = 0x1 - TIOCSETD = 0x5423 - TIOCSIG = 0x40045436 - TIOCSISO7816 = 0xc0285443 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x5410 - TIOCSPTLCK = 0x40045431 - TIOCSRS485 = 0x542f - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x541a - TIOCSTI = 0x5412 - TIOCSWINSZ = 0x5414 - TIOCVHANGUP = 0x5437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x401054d5 - TUNDETACHFILTER = 0x401054d6 - TUNGETDEVNETNS = 0x54e3 - TUNGETFEATURES = 0x800454cf - TUNGETFILTER = 0x801054db - TUNGETIFF = 0x800454d2 - TUNGETSNDBUF = 0x800454d3 - TUNGETVNETBE = 0x800454df - TUNGETVNETHDRSZ = 0x800454d7 - TUNGETVNETLE = 0x800454dd - TUNSETCARRIER = 0x400454e2 - TUNSETDEBUG = 0x400454c9 - TUNSETFILTEREBPF = 0x800454e1 - TUNSETGROUP = 0x400454ce - TUNSETIFF = 0x400454ca - TUNSETIFINDEX = 0x400454da - TUNSETLINK = 0x400454cd - TUNSETNOCSUM = 0x400454c8 - TUNSETOFFLOAD = 0x400454d0 - TUNSETOWNER = 0x400454cc - TUNSETPERSIST = 0x400454cb - TUNSETQUEUE = 0x400454d9 - TUNSETSNDBUF = 0x400454d4 - TUNSETSTEERINGEBPF = 0x800454e0 - TUNSETTXFILTER = 0x400454d1 - TUNSETVNETBE = 0x400454de - TUNSETVNETHDRSZ = 0x400454d8 - TUNSETVNETLE = 0x400454dc - UBI_IOCATT = 0x40186f40 - UBI_IOCDET = 0x40046f41 - UBI_IOCEBCH = 0x40044f02 - UBI_IOCEBER = 0x40044f01 - UBI_IOCEBISMAP = 0x80044f05 - UBI_IOCEBMAP = 0x40084f03 - UBI_IOCEBUNMAP = 0x40044f04 - UBI_IOCMKVOL = 0x40986f00 - UBI_IOCRMVOL = 0x40046f01 - UBI_IOCRNVOL = 0x51106f03 - UBI_IOCRPEB = 0x40046f04 - UBI_IOCRSVOL = 0x400c6f02 - UBI_IOCSETVOLPROP = 0x40104f06 - UBI_IOCSPEB = 0x40046f05 - UBI_IOCVOLCRBLK = 0x40804f07 - UBI_IOCVOLRMBLK = 0x4f08 - UBI_IOCVOLUP = 0x40084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x80045702 - WDIOC_GETPRETIMEOUT = 0x80045709 - WDIOC_GETSTATUS = 0x80045701 - WDIOC_GETSUPPORT = 0x80285700 - WDIOC_GETTEMP = 0x80045703 - WDIOC_GETTIMELEFT = 0x8004570a - WDIOC_GETTIMEOUT = 0x80045707 - WDIOC_KEEPALIVE = 0x80045705 - WDIOC_SETOPTIONS = 0x80045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x80081270 + BLKBSZSET = 0x40081271 + BLKFLSBUF = 0x1261 + BLKFRAGET = 0x1265 + BLKFRASET = 0x1264 + BLKGETSIZE = 0x1260 + BLKGETSIZE64 = 0x80081272 + BLKPBSZGET = 0x127b + BLKRAGET = 0x1263 + BLKRASET = 0x1262 + BLKROGET = 0x125e + BLKROSET = 0x125d + BLKRRPART = 0x125f + BLKSECTGET = 0x1267 + BLKSECTSET = 0x1266 + BLKSSZGET = 0x1268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x80000 + EFD_NONBLOCK = 0x800 + EPOLL_CLOEXEC = 0x80000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x40806685 + FS_IOC_GETFLAGS = 0x80086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x400c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x40106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x800c6613 + F_GETLK = 0x5 + F_GETLK64 = 0x5 + F_GETOWN = 0x9 + F_RDLCK = 0x0 + F_SETLK = 0x6 + F_SETLK64 = 0x6 + F_SETLKW = 0x7 + F_SETLKW64 = 0x7 + F_SETOWN = 0x8 + F_UNLCK = 0x2 + F_WRLCK = 0x1 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x80000 + IN_NONBLOCK = 0x800 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x100 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x2000 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x4000 + MAP_POPULATE = 0x8000 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x1 + MCL_FUTURE = 0x2 + MCL_ONFAULT = 0x4 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x400 + O_ASYNC = 0x2000 + O_CLOEXEC = 0x80000 + O_CREAT = 0x40 + O_DIRECT = 0x4000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x1000 + O_EXCL = 0x80 + O_FSYNC = 0x101000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x800 + O_NOATIME = 0x40000 + O_NOCTTY = 0x100 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x800 + O_PATH = 0x200000 + O_RSYNC = 0x101000 + O_SYNC = 0x101000 + O_TMPFILE = 0x410000 + O_TRUNC = 0x200 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x2401 + PERF_EVENT_IOC_ENABLE = 0x2400 + PERF_EVENT_IOC_ID = 0x80082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x4008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 + PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x2402 + PERF_EVENT_IOC_RESET = 0x2403 + PERF_EVENT_IOC_SET_BPF = 0x40042408 + PERF_EVENT_IOC_SET_FILTER = 0x40082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PPPIOCATTACH = 0x4004743d + PPPIOCATTCHAN = 0x40047438 + PPPIOCCONNECT = 0x4004743a + PPPIOCDETACH = 0x4004743c + PPPIOCDISCONN = 0x7439 + PPPIOCGASYNCMAP = 0x80047458 + PPPIOCGCHAN = 0x80047437 + PPPIOCGDEBUG = 0x80047441 + PPPIOCGFLAGS = 0x8004745a + PPPIOCGIDLE = 0x8010743f + PPPIOCGIDLE32 = 0x8008743f + PPPIOCGIDLE64 = 0x8010743f + PPPIOCGL2TPSTATS = 0x80487436 + PPPIOCGMRU = 0x80047453 + PPPIOCGRASYNCMAP = 0x80047455 + PPPIOCGUNIT = 0x80047456 + PPPIOCGXASYNCMAP = 0x80207450 + PPPIOCSACTIVE = 0x40107446 + PPPIOCSASYNCMAP = 0x40047457 + PPPIOCSCOMPRESS = 0x4010744d + PPPIOCSDEBUG = 0x40047440 + PPPIOCSFLAGS = 0x40047459 + PPPIOCSMAXCID = 0x40047451 + PPPIOCSMRRU = 0x4004743b + PPPIOCSMRU = 0x40047452 + PPPIOCSNPMODE = 0x4008744b + PPPIOCSPASS = 0x40107447 + PPPIOCSRASYNCMAP = 0x40047454 + PPPIOCSXASYNCMAP = 0x4020744f + PPPIOCXFERUNIT = 0x744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_DISABLE_TE = 0x5010 + PTRACE_ENABLE_TE = 0x5009 + PTRACE_GET_LAST_BREAK = 0x5006 + PTRACE_OLDSETOPTIONS = 0x15 + PTRACE_PEEKDATA_AREA = 0x5003 + PTRACE_PEEKTEXT_AREA = 0x5002 + PTRACE_PEEKUSR_AREA = 0x5000 + PTRACE_PEEK_SYSTEM_CALL = 0x5007 + PTRACE_POKEDATA_AREA = 0x5005 + PTRACE_POKETEXT_AREA = 0x5004 + PTRACE_POKEUSR_AREA = 0x5001 + PTRACE_POKE_SYSTEM_CALL = 0x5008 + PTRACE_PROT = 0x15 + PTRACE_SINGLEBLOCK = 0xc + PTRACE_TE_ABORT_RAND = 0x5011 + PT_ACR0 = 0x90 + PT_ACR1 = 0x94 + PT_ACR10 = 0xb8 + PT_ACR11 = 0xbc + PT_ACR12 = 0xc0 + PT_ACR13 = 0xc4 + PT_ACR14 = 0xc8 + PT_ACR15 = 0xcc + PT_ACR2 = 0x98 + PT_ACR3 = 0x9c + PT_ACR4 = 0xa0 + PT_ACR5 = 0xa4 + PT_ACR6 = 0xa8 + PT_ACR7 = 0xac + PT_ACR8 = 0xb0 + PT_ACR9 = 0xb4 + PT_CR_10 = 0x168 + PT_CR_11 = 0x170 + PT_CR_9 = 0x160 + PT_ENDREGS = 0x1af + PT_FPC = 0xd8 + PT_FPR0 = 0xe0 + PT_FPR1 = 0xe8 + PT_FPR10 = 0x130 + PT_FPR11 = 0x138 + PT_FPR12 = 0x140 + PT_FPR13 = 0x148 + PT_FPR14 = 0x150 + PT_FPR15 = 0x158 + PT_FPR2 = 0xf0 + PT_FPR3 = 0xf8 + PT_FPR4 = 0x100 + PT_FPR5 = 0x108 + PT_FPR6 = 0x110 + PT_FPR7 = 0x118 + PT_FPR8 = 0x120 + PT_FPR9 = 0x128 + PT_GPR0 = 0x10 + PT_GPR1 = 0x18 + PT_GPR10 = 0x60 + PT_GPR11 = 0x68 + PT_GPR12 = 0x70 + PT_GPR13 = 0x78 + PT_GPR14 = 0x80 + PT_GPR15 = 0x88 + PT_GPR2 = 0x20 + PT_GPR3 = 0x28 + PT_GPR4 = 0x30 + PT_GPR5 = 0x38 + PT_GPR6 = 0x40 + PT_GPR7 = 0x48 + PT_GPR8 = 0x50 + PT_GPR9 = 0x58 + PT_IEEE_IP = 0x1a8 + PT_LASTOFF = 0x1a8 + PT_ORIGGPR2 = 0xd0 + PT_PSWADDR = 0x8 + PT_PSWMASK = 0x0 + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x7 + RLIMIT_NPROC = 0x6 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x40085203 + RNDADDTOENTCNT = 0x40045201 + RNDCLEARPOOL = 0x5206 + RNDGETENTCNT = 0x80045200 + RNDGETPOOL = 0x80085202 + RNDRESEEDCRNG = 0x5207 + RNDZAPENTCNT = 0x5204 + RTC_AIE_OFF = 0x7002 + RTC_AIE_ON = 0x7001 + RTC_ALM_READ = 0x80247008 + RTC_ALM_SET = 0x40247007 + RTC_EPOCH_READ = 0x8008700d + RTC_EPOCH_SET = 0x4008700e + RTC_IRQP_READ = 0x8008700b + RTC_IRQP_SET = 0x4008700c + RTC_PIE_OFF = 0x7006 + RTC_PIE_ON = 0x7005 + RTC_PLL_GET = 0x80207011 + RTC_PLL_SET = 0x40207012 + RTC_RD_TIME = 0x80247009 + RTC_SET_TIME = 0x4024700a + RTC_UIE_OFF = 0x7004 + RTC_UIE_ON = 0x7003 + RTC_VL_CLR = 0x7014 + RTC_VL_READ = 0x80047013 + RTC_WIE_OFF = 0x7010 + RTC_WIE_ON = 0x700f + RTC_WKALM_RD = 0x80287010 + RTC_WKALM_SET = 0x4028700f + SCM_TIMESTAMPING = 0x25 + SCM_TIMESTAMPING_OPT_STATS = 0x36 + SCM_TIMESTAMPING_PKTINFO = 0x3a + SCM_TIMESTAMPNS = 0x23 + SCM_TXTIME = 0x3d + SCM_WIFI_STATUS = 0x29 + SFD_CLOEXEC = 0x80000 + SFD_NONBLOCK = 0x800 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x80108907 + SIOCGSTAMP_NEW = 0x80108906 + SIOCINQ = 0x541b + SIOCOUTQ = 0x5411 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x80000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x800 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0x1 + SO_ACCEPTCONN = 0x1e + SO_ATTACH_BPF = 0x32 + SO_ATTACH_REUSEPORT_CBPF = 0x33 + SO_ATTACH_REUSEPORT_EBPF = 0x34 + SO_BINDTODEVICE = 0x19 + SO_BINDTOIFINDEX = 0x3e + SO_BPF_EXTENSIONS = 0x30 + SO_BROADCAST = 0x6 + SO_BSDCOMPAT = 0xe + SO_BUSY_POLL = 0x2e + SO_CNX_ADVICE = 0x35 + SO_COOKIE = 0x39 + SO_DETACH_REUSEPORT_BPF = 0x44 + SO_DOMAIN = 0x27 + SO_DONTROUTE = 0x5 + SO_ERROR = 0x4 + SO_INCOMING_CPU = 0x31 + SO_INCOMING_NAPI_ID = 0x38 + SO_KEEPALIVE = 0x9 + SO_LINGER = 0xd + SO_LOCK_FILTER = 0x2c + SO_MARK = 0x24 + SO_MAX_PACING_RATE = 0x2f + SO_MEMINFO = 0x37 + SO_NOFCS = 0x2b + SO_OOBINLINE = 0xa + SO_PASSCRED = 0x10 + SO_PASSSEC = 0x22 + SO_PEEK_OFF = 0x2a + SO_PEERCRED = 0x11 + SO_PEERGROUPS = 0x3b + SO_PEERSEC = 0x1f + SO_PROTOCOL = 0x26 + SO_RCVBUF = 0x8 + SO_RCVBUFFORCE = 0x21 + SO_RCVLOWAT = 0x12 + SO_RCVTIMEO = 0x14 + SO_RCVTIMEO_NEW = 0x42 + SO_RCVTIMEO_OLD = 0x14 + SO_REUSEADDR = 0x2 + SO_REUSEPORT = 0xf + SO_RXQ_OVFL = 0x28 + SO_SECURITY_AUTHENTICATION = 0x16 + SO_SECURITY_ENCRYPTION_NETWORK = 0x18 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17 + SO_SELECT_ERR_QUEUE = 0x2d + SO_SNDBUF = 0x7 + SO_SNDBUFFORCE = 0x20 + SO_SNDLOWAT = 0x13 + SO_SNDTIMEO = 0x15 + SO_SNDTIMEO_NEW = 0x43 + SO_SNDTIMEO_OLD = 0x15 + SO_TIMESTAMPING = 0x25 + SO_TIMESTAMPING_NEW = 0x41 + SO_TIMESTAMPING_OLD = 0x25 + SO_TIMESTAMPNS = 0x23 + SO_TIMESTAMPNS_NEW = 0x40 + SO_TIMESTAMPNS_OLD = 0x23 + SO_TIMESTAMP_NEW = 0x3f + SO_TXTIME = 0x3d + SO_TYPE = 0x3 + SO_WIFI_STATUS = 0x29 + SO_ZEROCOPY = 0x3c + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x540b + TCGETA = 0x5405 + TCGETS = 0x5401 + TCGETS2 = 0x802c542a + TCGETX = 0x5432 + TCSAFLUSH = 0x2 + TCSBRK = 0x5409 + TCSBRKP = 0x5425 + TCSETA = 0x5406 + TCSETAF = 0x5408 + TCSETAW = 0x5407 + TCSETS = 0x5402 + TCSETS2 = 0x402c542b + TCSETSF = 0x5404 + TCSETSF2 = 0x402c542d + TCSETSW = 0x5403 + TCSETSW2 = 0x402c542c + TCSETX = 0x5433 + TCSETXF = 0x5434 + TCSETXW = 0x5435 + TCXONC = 0x540a + TFD_CLOEXEC = 0x80000 + TFD_NONBLOCK = 0x800 + TIOCCBRK = 0x5428 + TIOCCONS = 0x541d + TIOCEXCL = 0x540c + TIOCGDEV = 0x80045432 + TIOCGETD = 0x5424 + TIOCGEXCL = 0x80045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x80285442 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x540f + TIOCGPKT = 0x80045438 + TIOCGPTLCK = 0x80045439 + TIOCGPTN = 0x80045430 + TIOCGPTPEER = 0x5441 + TIOCGRS485 = 0x542e + TIOCGSERIAL = 0x541e + TIOCGSID = 0x5429 + TIOCGSOFTCAR = 0x5419 + TIOCGWINSZ = 0x5413 + TIOCINQ = 0x541b + TIOCLINUX = 0x541c + TIOCMBIC = 0x5417 + TIOCMBIS = 0x5416 + TIOCMGET = 0x5415 + TIOCMIWAIT = 0x545c + TIOCMSET = 0x5418 + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x5422 + TIOCNXCL = 0x540d + TIOCOUTQ = 0x5411 + TIOCPKT = 0x5420 + TIOCSBRK = 0x5427 + TIOCSCTTY = 0x540e + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSER_TEMT = 0x1 + TIOCSETD = 0x5423 + TIOCSIG = 0x40045436 + TIOCSISO7816 = 0xc0285443 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x5410 + TIOCSPTLCK = 0x40045431 + TIOCSRS485 = 0x542f + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x541a + TIOCSTI = 0x5412 + TIOCSWINSZ = 0x5414 + TIOCVHANGUP = 0x5437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x401054d5 + TUNDETACHFILTER = 0x401054d6 + TUNGETDEVNETNS = 0x54e3 + TUNGETFEATURES = 0x800454cf + TUNGETFILTER = 0x801054db + TUNGETIFF = 0x800454d2 + TUNGETSNDBUF = 0x800454d3 + TUNGETVNETBE = 0x800454df + TUNGETVNETHDRSZ = 0x800454d7 + TUNGETVNETLE = 0x800454dd + TUNSETCARRIER = 0x400454e2 + TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 + TUNSETGROUP = 0x400454ce + TUNSETIFF = 0x400454ca + TUNSETIFINDEX = 0x400454da + TUNSETLINK = 0x400454cd + TUNSETNOCSUM = 0x400454c8 + TUNSETOFFLOAD = 0x400454d0 + TUNSETOWNER = 0x400454cc + TUNSETPERSIST = 0x400454cb + TUNSETQUEUE = 0x400454d9 + TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 + TUNSETTXFILTER = 0x400454d1 + TUNSETVNETBE = 0x400454de + TUNSETVNETHDRSZ = 0x400454d8 + TUNSETVNETLE = 0x400454dc + UBI_IOCATT = 0x40186f40 + UBI_IOCDET = 0x40046f41 + UBI_IOCEBCH = 0x40044f02 + UBI_IOCEBER = 0x40044f01 + UBI_IOCEBISMAP = 0x80044f05 + UBI_IOCEBMAP = 0x40084f03 + UBI_IOCEBUNMAP = 0x40044f04 + UBI_IOCMKVOL = 0x40986f00 + UBI_IOCRMVOL = 0x40046f01 + UBI_IOCRNVOL = 0x51106f03 + UBI_IOCRPEB = 0x40046f04 + UBI_IOCRSVOL = 0x400c6f02 + UBI_IOCSETVOLPROP = 0x40104f06 + UBI_IOCSPEB = 0x40046f05 + UBI_IOCVOLCRBLK = 0x40804f07 + UBI_IOCVOLRMBLK = 0x4f08 + UBI_IOCVOLUP = 0x40084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x80045702 + WDIOC_GETPRETIMEOUT = 0x80045709 + WDIOC_GETSTATUS = 0x80045701 + WDIOC_GETSUPPORT = 0x80285700 + WDIOC_GETTEMP = 0x80045703 + WDIOC_GETTIMELEFT = 0x8004570a + WDIOC_GETTIMEOUT = 0x80045707 + WDIOC_KEEPALIVE = 0x80045705 + WDIOC_SETOPTIONS = 0x80045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x62) EADDRNOTAVAIL = syscall.Errno(0x63) EADV = syscall.Errno(0x44) EAFNOSUPPORT = syscall.Errno(0x61) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x72) EBADE = syscall.Errno(0x34) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x4d) EBADMSG = syscall.Errno(0x4a) EBADR = syscall.Errno(0x35) EBADRQC = syscall.Errno(0x38) EBADSLT = syscall.Errno(0x39) EBFONT = syscall.Errno(0x3b) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7d) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x2c) ECOMM = syscall.Errno(0x46) ECONNABORTED = syscall.Errno(0x67) @@ -2857,23 +562,15 @@ const ( EDEADLK = syscall.Errno(0x23) EDEADLOCK = syscall.Errno(0x23) EDESTADDRREQ = syscall.Errno(0x59) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x49) EDQUOT = syscall.Errno(0x7a) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x70) EHOSTUNREACH = syscall.Errno(0x71) EHWPOISON = syscall.Errno(0x85) EIDRM = syscall.Errno(0x2b) EILSEQ = syscall.Errno(0x54) EINPROGRESS = syscall.Errno(0x73) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x6a) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x7f) EKEYREJECTED = syscall.Errno(0x81) @@ -2890,8 +587,6 @@ const ( ELNRNG = syscall.Errno(0x30) ELOOP = syscall.Errno(0x28) EMEDIUMTYPE = syscall.Errno(0x7c) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x5a) EMULTIHOP = syscall.Errno(0x48) ENAMETOOLONG = syscall.Errno(0x24) @@ -2899,99 +594,67 @@ const ( ENETDOWN = syscall.Errno(0x64) ENETRESET = syscall.Errno(0x66) ENETUNREACH = syscall.Errno(0x65) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x37) ENOBUFS = syscall.Errno(0x69) ENOCSI = syscall.Errno(0x32) ENODATA = syscall.Errno(0x3d) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x7e) ENOLCK = syscall.Errno(0x25) ENOLINK = syscall.Errno(0x43) ENOMEDIUM = syscall.Errno(0x7b) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x2a) ENONET = syscall.Errno(0x40) ENOPKG = syscall.Errno(0x41) ENOPROTOOPT = syscall.Errno(0x5c) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x3f) ENOSTR = syscall.Errno(0x3c) ENOSYS = syscall.Errno(0x26) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x6b) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x27) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x83) ENOTSOCK = syscall.Errno(0x58) ENOTSUP = syscall.Errno(0x5f) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x4c) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x5f) EOVERFLOW = syscall.Errno(0x4b) EOWNERDEAD = syscall.Errno(0x82) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x60) - EPIPE = syscall.Errno(0x20) EPROTO = syscall.Errno(0x47) EPROTONOSUPPORT = syscall.Errno(0x5d) EPROTOTYPE = syscall.Errno(0x5b) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x4e) EREMOTE = syscall.Errno(0x42) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x55) ERFKILL = syscall.Errno(0x84) - EROFS = syscall.Errno(0x1e) ESHUTDOWN = syscall.Errno(0x6c) ESOCKTNOSUPPORT = syscall.Errno(0x5e) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x45) ESTALE = syscall.Errno(0x74) ESTRPIPE = syscall.Errno(0x56) ETIME = syscall.Errno(0x3e) ETIMEDOUT = syscall.Errno(0x6e) ETOOMANYREFS = syscall.Errno(0x6d) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x31) EUSERS = syscall.Errno(0x57) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x36) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0x7) SIGCHLD = syscall.Signal(0x11) SIGCLD = syscall.Signal(0x11) SIGCONT = syscall.Signal(0x12) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x1d) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x1d) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1e) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTKFLT = syscall.Signal(0x10) SIGSTOP = syscall.Signal(0x13) SIGSYS = syscall.Signal(0x1f) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x14) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c0095a543..3ff3ec681 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -11,2834 +11,540 @@ package unix import "syscall" const ( - AAFS_MAGIC = 0x5a3c69f0 - ADFS_SUPER_MAGIC = 0xadf5 - AFFS_SUPER_MAGIC = 0xadff - AFS_FS_MAGIC = 0x6b414653 - AFS_SUPER_MAGIC = 0x5346414f - AF_ALG = 0x26 - AF_APPLETALK = 0x5 - AF_ASH = 0x12 - AF_ATMPVC = 0x8 - AF_ATMSVC = 0x14 - AF_AX25 = 0x3 - AF_BLUETOOTH = 0x1f - AF_BRIDGE = 0x7 - AF_CAIF = 0x25 - AF_CAN = 0x1d - AF_DECnet = 0xc - AF_ECONET = 0x13 - AF_FILE = 0x1 - AF_IB = 0x1b - AF_IEEE802154 = 0x24 - AF_INET = 0x2 - AF_INET6 = 0xa - AF_IPX = 0x4 - AF_IRDA = 0x17 - AF_ISDN = 0x22 - AF_IUCV = 0x20 - AF_KCM = 0x29 - AF_KEY = 0xf - AF_LLC = 0x1a - AF_LOCAL = 0x1 - AF_MAX = 0x2d - AF_MPLS = 0x1c - AF_NETBEUI = 0xd - AF_NETLINK = 0x10 - AF_NETROM = 0x6 - AF_NFC = 0x27 - AF_PACKET = 0x11 - AF_PHONET = 0x23 - AF_PPPOX = 0x18 - AF_QIPCRTR = 0x2a - AF_RDS = 0x15 - AF_ROSE = 0xb - AF_ROUTE = 0x10 - AF_RXRPC = 0x21 - AF_SECURITY = 0xe - AF_SMC = 0x2b - AF_SNA = 0x16 - AF_TIPC = 0x1e - AF_UNIX = 0x1 - AF_UNSPEC = 0x0 - AF_VSOCK = 0x28 - AF_WANPIPE = 0x19 - AF_X25 = 0x9 - AF_XDP = 0x2c - ALG_OP_DECRYPT = 0x0 - ALG_OP_ENCRYPT = 0x1 - ALG_SET_AEAD_ASSOCLEN = 0x4 - ALG_SET_AEAD_AUTHSIZE = 0x5 - ALG_SET_IV = 0x2 - ALG_SET_KEY = 0x1 - ALG_SET_OP = 0x3 - ANON_INODE_FS_MAGIC = 0x9041934 - ARPHRD_6LOWPAN = 0x339 - ARPHRD_ADAPT = 0x108 - ARPHRD_APPLETLK = 0x8 - ARPHRD_ARCNET = 0x7 - ARPHRD_ASH = 0x30d - ARPHRD_ATM = 0x13 - ARPHRD_AX25 = 0x3 - ARPHRD_BIF = 0x307 - ARPHRD_CAIF = 0x336 - ARPHRD_CAN = 0x118 - ARPHRD_CHAOS = 0x5 - ARPHRD_CISCO = 0x201 - ARPHRD_CSLIP = 0x101 - ARPHRD_CSLIP6 = 0x103 - ARPHRD_DDCMP = 0x205 - ARPHRD_DLCI = 0xf - ARPHRD_ECONET = 0x30e - ARPHRD_EETHER = 0x2 - ARPHRD_ETHER = 0x1 - ARPHRD_EUI64 = 0x1b - ARPHRD_FCAL = 0x311 - ARPHRD_FCFABRIC = 0x313 - ARPHRD_FCPL = 0x312 - ARPHRD_FCPP = 0x310 - ARPHRD_FDDI = 0x306 - ARPHRD_FRAD = 0x302 - ARPHRD_HDLC = 0x201 - ARPHRD_HIPPI = 0x30c - ARPHRD_HWX25 = 0x110 - ARPHRD_IEEE1394 = 0x18 - ARPHRD_IEEE802 = 0x6 - ARPHRD_IEEE80211 = 0x321 - ARPHRD_IEEE80211_PRISM = 0x322 - ARPHRD_IEEE80211_RADIOTAP = 0x323 - ARPHRD_IEEE802154 = 0x324 - ARPHRD_IEEE802154_MONITOR = 0x325 - ARPHRD_IEEE802_TR = 0x320 - ARPHRD_INFINIBAND = 0x20 - ARPHRD_IP6GRE = 0x337 - ARPHRD_IPDDP = 0x309 - ARPHRD_IPGRE = 0x30a - ARPHRD_IRDA = 0x30f - ARPHRD_LAPB = 0x204 - ARPHRD_LOCALTLK = 0x305 - ARPHRD_LOOPBACK = 0x304 - ARPHRD_METRICOM = 0x17 - ARPHRD_NETLINK = 0x338 - ARPHRD_NETROM = 0x0 - ARPHRD_NONE = 0xfffe - ARPHRD_PHONET = 0x334 - ARPHRD_PHONET_PIPE = 0x335 - ARPHRD_PIMREG = 0x30b - ARPHRD_PPP = 0x200 - ARPHRD_PRONET = 0x4 - ARPHRD_RAWHDLC = 0x206 - ARPHRD_RAWIP = 0x207 - ARPHRD_ROSE = 0x10e - ARPHRD_RSRVD = 0x104 - ARPHRD_SIT = 0x308 - ARPHRD_SKIP = 0x303 - ARPHRD_SLIP = 0x100 - ARPHRD_SLIP6 = 0x102 - ARPHRD_TUNNEL = 0x300 - ARPHRD_TUNNEL6 = 0x301 - ARPHRD_VOID = 0xffff - ARPHRD_VSOCKMON = 0x33a - ARPHRD_X25 = 0x10f - ASI_LEON_DFLUSH = 0x11 - ASI_LEON_IFLUSH = 0x10 - ASI_LEON_MMUFLUSH = 0x18 - AUTOFS_SUPER_MAGIC = 0x187 - B0 = 0x0 - B1000000 = 0x1008 - B110 = 0x3 - B115200 = 0x1002 - B1152000 = 0x1009 - B1200 = 0x9 - B134 = 0x4 - B150 = 0x5 - B1500000 = 0x100a - B1800 = 0xa - B19200 = 0xe - B200 = 0x6 - B2000000 = 0x100b - B230400 = 0x1003 - B2400 = 0xb - B2500000 = 0x100c - B300 = 0x7 - B3000000 = 0x100d - B3500000 = 0x100e - B38400 = 0xf - B4000000 = 0x100f - B460800 = 0x1004 - B4800 = 0xc - B50 = 0x1 - B500000 = 0x1005 - B57600 = 0x1001 - B576000 = 0x1006 - B600 = 0x8 - B75 = 0x2 - B921600 = 0x1007 - B9600 = 0xd - BALLOON_KVM_MAGIC = 0x13661366 - BDEVFS_MAGIC = 0x62646576 - BINDERFS_SUPER_MAGIC = 0x6c6f6f70 - BINFMTFS_MAGIC = 0x42494e4d - BLKBSZGET = 0x40081270 - BLKBSZSET = 0x80081271 - BLKFLSBUF = 0x20001261 - BLKFRAGET = 0x20001265 - BLKFRASET = 0x20001264 - BLKGETSIZE = 0x20001260 - BLKGETSIZE64 = 0x40081272 - BLKPBSZGET = 0x2000127b - BLKRAGET = 0x20001263 - BLKRASET = 0x20001262 - BLKROGET = 0x2000125e - BLKROSET = 0x2000125d - BLKRRPART = 0x2000125f - BLKSECTGET = 0x20001267 - BLKSECTSET = 0x20001266 - BLKSSZGET = 0x20001268 - BOTHER = 0x1000 - BPF_A = 0x10 - BPF_ABS = 0x20 - BPF_ADD = 0x0 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_ALU = 0x4 - BPF_ALU64 = 0x7 - BPF_AND = 0x50 - BPF_ANY = 0x0 - BPF_ARSH = 0xc0 - BPF_B = 0x10 - BPF_BUILD_ID_SIZE = 0x14 - BPF_CALL = 0x80 - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_DIV = 0x30 - BPF_DW = 0x18 - BPF_END = 0xd0 - BPF_EXIST = 0x2 - BPF_EXIT = 0x90 - BPF_FROM_BE = 0x8 - BPF_FROM_LE = 0x0 - BPF_FS_MAGIC = 0xcafe4a11 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ALLOW_MULTI = 0x2 - BPF_F_ALLOW_OVERRIDE = 0x1 - BPF_F_ANY_ALIGNMENT = 0x2 - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CURRENT_NETNS = -0x1 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_INGRESS = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NUMA_NODE = 0x4 - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_QUERY_EFFECTIVE = 0x1 - BPF_F_RDONLY = 0x8 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_STRICT_ALIGNMENT = 0x1 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_F_TEST_RND_HI32 = 0x4 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_USER_STACK = 0x100 - BPF_F_WRONLY = 0x10 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_ZERO_SEED = 0x40 - BPF_H = 0x8 - BPF_IMM = 0x0 - BPF_IND = 0x40 - BPF_JA = 0x0 - BPF_JEQ = 0x10 - BPF_JGE = 0x30 - BPF_JGT = 0x20 - BPF_JLE = 0xb0 - BPF_JLT = 0xa0 - BPF_JMP = 0x5 - BPF_JMP32 = 0x6 - BPF_JNE = 0x50 - BPF_JSET = 0x40 - BPF_JSGE = 0x70 - BPF_JSGT = 0x60 - BPF_JSLE = 0xd0 - BPF_JSLT = 0xc0 - BPF_K = 0x0 - BPF_LD = 0x0 - BPF_LDX = 0x1 - BPF_LEN = 0x80 - BPF_LL_OFF = -0x200000 - BPF_LSH = 0x60 - BPF_MAJOR_VERSION = 0x1 - BPF_MAXINSNS = 0x1000 - BPF_MEM = 0x60 - BPF_MEMWORDS = 0x10 - BPF_MINOR_VERSION = 0x1 - BPF_MISC = 0x7 - BPF_MOD = 0x90 - BPF_MOV = 0xb0 - BPF_MSH = 0xa0 - BPF_MUL = 0x20 - BPF_NEG = 0x80 - BPF_NET_OFF = -0x100000 - BPF_NOEXIST = 0x1 - BPF_OBJ_NAME_LEN = 0x10 - BPF_OR = 0x40 - BPF_PSEUDO_CALL = 0x1 - BPF_PSEUDO_MAP_FD = 0x1 - BPF_PSEUDO_MAP_VALUE = 0x2 - BPF_RET = 0x6 - BPF_RSH = 0x70 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_ST = 0x2 - BPF_STX = 0x3 - BPF_SUB = 0x10 - BPF_TAG_SIZE = 0x8 - BPF_TAX = 0x0 - BPF_TO_BE = 0x8 - BPF_TO_LE = 0x0 - BPF_TXA = 0x80 - BPF_W = 0x0 - BPF_X = 0x8 - BPF_XADD = 0xc0 - BPF_XOR = 0xa0 - BRKINT = 0x2 - BS0 = 0x0 - BS1 = 0x2000 - BSDLY = 0x2000 - BTRFS_SUPER_MAGIC = 0x9123683e - BTRFS_TEST_MAGIC = 0x73727279 - CAN_BCM = 0x2 - CAN_EFF_FLAG = 0x80000000 - CAN_EFF_ID_BITS = 0x1d - CAN_EFF_MASK = 0x1fffffff - CAN_ERR_FLAG = 0x20000000 - CAN_ERR_MASK = 0x1fffffff - CAN_INV_FILTER = 0x20000000 - CAN_ISOTP = 0x6 - CAN_MAX_DLC = 0x8 - CAN_MAX_DLEN = 0x8 - CAN_MCNET = 0x5 - CAN_MTU = 0x10 - CAN_NPROTO = 0x7 - CAN_RAW = 0x1 - CAN_RAW_FILTER_MAX = 0x200 - CAN_RTR_FLAG = 0x40000000 - CAN_SFF_ID_BITS = 0xb - CAN_SFF_MASK = 0x7ff - CAN_TP16 = 0x3 - CAN_TP20 = 0x4 - CAP_AUDIT_CONTROL = 0x1e - CAP_AUDIT_READ = 0x25 - CAP_AUDIT_WRITE = 0x1d - CAP_BLOCK_SUSPEND = 0x24 - CAP_CHOWN = 0x0 - CAP_DAC_OVERRIDE = 0x1 - CAP_DAC_READ_SEARCH = 0x2 - CAP_FOWNER = 0x3 - CAP_FSETID = 0x4 - CAP_IPC_LOCK = 0xe - CAP_IPC_OWNER = 0xf - CAP_KILL = 0x5 - CAP_LAST_CAP = 0x25 - CAP_LEASE = 0x1c - CAP_LINUX_IMMUTABLE = 0x9 - CAP_MAC_ADMIN = 0x21 - CAP_MAC_OVERRIDE = 0x20 - CAP_MKNOD = 0x1b - CAP_NET_ADMIN = 0xc - CAP_NET_BIND_SERVICE = 0xa - CAP_NET_BROADCAST = 0xb - CAP_NET_RAW = 0xd - CAP_SETFCAP = 0x1f - CAP_SETGID = 0x6 - CAP_SETPCAP = 0x8 - CAP_SETUID = 0x7 - CAP_SYSLOG = 0x22 - CAP_SYS_ADMIN = 0x15 - CAP_SYS_BOOT = 0x16 - CAP_SYS_CHROOT = 0x12 - CAP_SYS_MODULE = 0x10 - CAP_SYS_NICE = 0x17 - CAP_SYS_PACCT = 0x14 - CAP_SYS_PTRACE = 0x13 - CAP_SYS_RAWIO = 0x11 - CAP_SYS_RESOURCE = 0x18 - CAP_SYS_TIME = 0x19 - CAP_SYS_TTY_CONFIG = 0x1a - CAP_WAKE_ALARM = 0x23 - CBAUD = 0x100f - CBAUDEX = 0x1000 - CFLUSH = 0xf - CGROUP2_SUPER_MAGIC = 0x63677270 - CGROUP_SUPER_MAGIC = 0x27e0eb - CIBAUD = 0x100f0000 - CLOCAL = 0x800 - CLOCK_BOOTTIME = 0x7 - CLOCK_BOOTTIME_ALARM = 0x9 - CLOCK_DEFAULT = 0x0 - CLOCK_EXT = 0x1 - CLOCK_INT = 0x2 - CLOCK_MONOTONIC = 0x1 - CLOCK_MONOTONIC_COARSE = 0x6 - CLOCK_MONOTONIC_RAW = 0x4 - CLOCK_PROCESS_CPUTIME_ID = 0x2 - CLOCK_REALTIME = 0x0 - CLOCK_REALTIME_ALARM = 0x8 - CLOCK_REALTIME_COARSE = 0x5 - CLOCK_TAI = 0xb - CLOCK_THREAD_CPUTIME_ID = 0x3 - CLOCK_TXFROMRX = 0x4 - CLOCK_TXINT = 0x3 - CLONE_CHILD_CLEARTID = 0x200000 - CLONE_CHILD_SETTID = 0x1000000 - CLONE_DETACHED = 0x400000 - CLONE_FILES = 0x400 - CLONE_FS = 0x200 - CLONE_IO = 0x80000000 - CLONE_NEWCGROUP = 0x2000000 - CLONE_NEWIPC = 0x8000000 - CLONE_NEWNET = 0x40000000 - CLONE_NEWNS = 0x20000 - CLONE_NEWPID = 0x20000000 - CLONE_NEWUSER = 0x10000000 - CLONE_NEWUTS = 0x4000000 - CLONE_PARENT = 0x8000 - CLONE_PARENT_SETTID = 0x100000 - CLONE_PIDFD = 0x1000 - CLONE_PTRACE = 0x2000 - CLONE_SETTLS = 0x80000 - CLONE_SIGHAND = 0x800 - CLONE_SYSVSEM = 0x40000 - CLONE_THREAD = 0x10000 - CLONE_UNTRACED = 0x800000 - CLONE_VFORK = 0x4000 - CLONE_VM = 0x100 - CMSPAR = 0x40000000 - CODA_SUPER_MAGIC = 0x73757245 - CR0 = 0x0 - CR1 = 0x200 - CR2 = 0x400 - CR3 = 0x600 - CRAMFS_MAGIC = 0x28cd3d45 - CRDLY = 0x600 - CREAD = 0x80 - CRTSCTS = 0x80000000 - CRYPTO_MAX_NAME = 0x40 - CRYPTO_MSG_MAX = 0x15 - CRYPTO_NR_MSGTYPES = 0x6 - CRYPTO_REPORT_MAXSIZE = 0x160 - CS5 = 0x0 - CS6 = 0x10 - CS7 = 0x20 - CS8 = 0x30 - CSIGNAL = 0xff - CSIZE = 0x30 - CSTART = 0x11 - CSTATUS = 0x0 - CSTOP = 0x13 - CSTOPB = 0x40 - CSUSP = 0x1a - DAXFS_MAGIC = 0x64646178 - DEBUGFS_MAGIC = 0x64626720 - DEVPTS_SUPER_MAGIC = 0x1cd1 - DMA_BUF_MAGIC = 0x444d4142 - DT_BLK = 0x6 - DT_CHR = 0x2 - DT_DIR = 0x4 - DT_FIFO = 0x1 - DT_LNK = 0xa - DT_REG = 0x8 - DT_SOCK = 0xc - DT_UNKNOWN = 0x0 - DT_WHT = 0xe - ECHO = 0x8 - ECHOCTL = 0x200 - ECHOE = 0x10 - ECHOK = 0x20 - ECHOKE = 0x800 - ECHONL = 0x40 - ECHOPRT = 0x400 - ECRYPTFS_SUPER_MAGIC = 0xf15f - EFD_CLOEXEC = 0x400000 - EFD_NONBLOCK = 0x4000 - EFD_SEMAPHORE = 0x1 - EFIVARFS_MAGIC = 0xde5e81e4 - EFS_SUPER_MAGIC = 0x414a53 - EMT_TAGOVF = 0x1 - ENCODING_DEFAULT = 0x0 - ENCODING_FM_MARK = 0x3 - ENCODING_FM_SPACE = 0x4 - ENCODING_MANCHESTER = 0x5 - ENCODING_NRZ = 0x1 - ENCODING_NRZI = 0x2 - EPOLLERR = 0x8 - EPOLLET = 0x80000000 - EPOLLEXCLUSIVE = 0x10000000 - EPOLLHUP = 0x10 - EPOLLIN = 0x1 - EPOLLMSG = 0x400 - EPOLLONESHOT = 0x40000000 - EPOLLOUT = 0x4 - EPOLLPRI = 0x2 - EPOLLRDBAND = 0x80 - EPOLLRDHUP = 0x2000 - EPOLLRDNORM = 0x40 - EPOLLWAKEUP = 0x20000000 - EPOLLWRBAND = 0x200 - EPOLLWRNORM = 0x100 - EPOLL_CLOEXEC = 0x400000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 - ETH_P_1588 = 0x88f7 - ETH_P_8021AD = 0x88a8 - ETH_P_8021AH = 0x88e7 - ETH_P_8021Q = 0x8100 - ETH_P_80221 = 0x8917 - ETH_P_802_2 = 0x4 - ETH_P_802_3 = 0x1 - ETH_P_802_3_MIN = 0x600 - ETH_P_802_EX1 = 0x88b5 - ETH_P_AARP = 0x80f3 - ETH_P_AF_IUCV = 0xfbfb - ETH_P_ALL = 0x3 - ETH_P_AOE = 0x88a2 - ETH_P_ARCNET = 0x1a - ETH_P_ARP = 0x806 - ETH_P_ATALK = 0x809b - ETH_P_ATMFATE = 0x8884 - ETH_P_ATMMPOA = 0x884c - ETH_P_AX25 = 0x2 - ETH_P_BATMAN = 0x4305 - ETH_P_BPQ = 0x8ff - ETH_P_CAIF = 0xf7 - ETH_P_CAN = 0xc - ETH_P_CANFD = 0xd - ETH_P_CONTROL = 0x16 - ETH_P_CUST = 0x6006 - ETH_P_DDCMP = 0x6 - ETH_P_DEC = 0x6000 - ETH_P_DIAG = 0x6005 - ETH_P_DNA_DL = 0x6001 - ETH_P_DNA_RC = 0x6002 - ETH_P_DNA_RT = 0x6003 - ETH_P_DSA = 0x1b - ETH_P_DSA_8021Q = 0xdadb - ETH_P_ECONET = 0x18 - ETH_P_EDSA = 0xdada - ETH_P_ERSPAN = 0x88be - ETH_P_ERSPAN2 = 0x22eb - ETH_P_FCOE = 0x8906 - ETH_P_FIP = 0x8914 - ETH_P_HDLC = 0x19 - ETH_P_HSR = 0x892f - ETH_P_IBOE = 0x8915 - ETH_P_IEEE802154 = 0xf6 - ETH_P_IEEEPUP = 0xa00 - ETH_P_IEEEPUPAT = 0xa01 - ETH_P_IFE = 0xed3e - ETH_P_IP = 0x800 - ETH_P_IPV6 = 0x86dd - ETH_P_IPX = 0x8137 - ETH_P_IRDA = 0x17 - ETH_P_LAT = 0x6004 - ETH_P_LINK_CTL = 0x886c - ETH_P_LLDP = 0x88cc - ETH_P_LOCALTALK = 0x9 - ETH_P_LOOP = 0x60 - ETH_P_LOOPBACK = 0x9000 - ETH_P_MACSEC = 0x88e5 - ETH_P_MAP = 0xf9 - ETH_P_MOBITEX = 0x15 - ETH_P_MPLS_MC = 0x8848 - ETH_P_MPLS_UC = 0x8847 - ETH_P_MVRP = 0x88f5 - ETH_P_NCSI = 0x88f8 - ETH_P_NSH = 0x894f - ETH_P_PAE = 0x888e - ETH_P_PAUSE = 0x8808 - ETH_P_PHONET = 0xf5 - ETH_P_PPPTALK = 0x10 - ETH_P_PPP_DISC = 0x8863 - ETH_P_PPP_MP = 0x8 - ETH_P_PPP_SES = 0x8864 - ETH_P_PREAUTH = 0x88c7 - ETH_P_PRP = 0x88fb - ETH_P_PUP = 0x200 - ETH_P_PUPAT = 0x201 - ETH_P_QINQ1 = 0x9100 - ETH_P_QINQ2 = 0x9200 - ETH_P_QINQ3 = 0x9300 - ETH_P_RARP = 0x8035 - ETH_P_SCA = 0x6007 - ETH_P_SLOW = 0x8809 - ETH_P_SNAP = 0x5 - ETH_P_TDLS = 0x890d - ETH_P_TEB = 0x6558 - ETH_P_TIPC = 0x88ca - ETH_P_TRAILER = 0x1c - ETH_P_TR_802_2 = 0x11 - ETH_P_TSN = 0x22f0 - ETH_P_WAN_PPP = 0x7 - ETH_P_WCCP = 0x883e - ETH_P_X25 = 0x805 - ETH_P_XDSA = 0xf8 - EXABYTE_ENABLE_NEST = 0xf0 - EXT2_SUPER_MAGIC = 0xef53 - EXT3_SUPER_MAGIC = 0xef53 - EXT4_SUPER_MAGIC = 0xef53 - EXTA = 0xe - EXTB = 0xf - EXTPROC = 0x10000 - F2FS_SUPER_MAGIC = 0xf2f52010 - FALLOC_FL_COLLAPSE_RANGE = 0x8 - FALLOC_FL_INSERT_RANGE = 0x20 - FALLOC_FL_KEEP_SIZE = 0x1 - FALLOC_FL_NO_HIDE_STALE = 0x4 - FALLOC_FL_PUNCH_HOLE = 0x2 - FALLOC_FL_UNSHARE_RANGE = 0x40 - FALLOC_FL_ZERO_RANGE = 0x10 - FANOTIFY_METADATA_VERSION = 0x3 - FAN_ACCESS = 0x1 - FAN_ACCESS_PERM = 0x20000 - FAN_ALLOW = 0x1 - FAN_ALL_CLASS_BITS = 0xc - FAN_ALL_EVENTS = 0x3b - FAN_ALL_INIT_FLAGS = 0x3f - FAN_ALL_MARK_FLAGS = 0xff - FAN_ALL_OUTGOING_EVENTS = 0x3403b - FAN_ALL_PERM_EVENTS = 0x30000 - FAN_ATTRIB = 0x4 - FAN_AUDIT = 0x10 - FAN_CLASS_CONTENT = 0x4 - FAN_CLASS_NOTIF = 0x0 - FAN_CLASS_PRE_CONTENT = 0x8 - FAN_CLOEXEC = 0x1 - FAN_CLOSE = 0x18 - FAN_CLOSE_NOWRITE = 0x10 - FAN_CLOSE_WRITE = 0x8 - FAN_CREATE = 0x100 - FAN_DELETE = 0x200 - FAN_DELETE_SELF = 0x400 - FAN_DENY = 0x2 - FAN_ENABLE_AUDIT = 0x40 - FAN_EVENT_INFO_TYPE_FID = 0x1 - FAN_EVENT_METADATA_LEN = 0x18 - FAN_EVENT_ON_CHILD = 0x8000000 - FAN_MARK_ADD = 0x1 - FAN_MARK_DONT_FOLLOW = 0x4 - FAN_MARK_FILESYSTEM = 0x100 - FAN_MARK_FLUSH = 0x80 - FAN_MARK_IGNORED_MASK = 0x20 - FAN_MARK_IGNORED_SURV_MODIFY = 0x40 - FAN_MARK_INODE = 0x0 - FAN_MARK_MOUNT = 0x10 - FAN_MARK_ONLYDIR = 0x8 - FAN_MARK_REMOVE = 0x2 - FAN_MODIFY = 0x2 - FAN_MOVE = 0xc0 - FAN_MOVED_FROM = 0x40 - FAN_MOVED_TO = 0x80 - FAN_MOVE_SELF = 0x800 - FAN_NOFD = -0x1 - FAN_NONBLOCK = 0x2 - FAN_ONDIR = 0x40000000 - FAN_OPEN = 0x20 - FAN_OPEN_EXEC = 0x1000 - FAN_OPEN_EXEC_PERM = 0x40000 - FAN_OPEN_PERM = 0x10000 - FAN_Q_OVERFLOW = 0x4000 - FAN_REPORT_FID = 0x200 - FAN_REPORT_TID = 0x100 - FAN_UNLIMITED_MARKS = 0x20 - FAN_UNLIMITED_QUEUE = 0x10 - FD_CLOEXEC = 0x1 - FD_SETSIZE = 0x400 - FF0 = 0x0 - FF1 = 0x8000 - FFDLY = 0x8000 - FLUSHO = 0x1000 - FS_ENCRYPTION_MODE_ADIANTUM = 0x9 - FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 - FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 - FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 - FS_ENCRYPTION_MODE_AES_256_CTS = 0x4 - FS_ENCRYPTION_MODE_AES_256_GCM = 0x2 - FS_ENCRYPTION_MODE_AES_256_XTS = 0x1 - FS_ENCRYPTION_MODE_INVALID = 0x0 - FS_ENCRYPTION_MODE_SPECK128_256_CTS = 0x8 - FS_ENCRYPTION_MODE_SPECK128_256_XTS = 0x7 - FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 - FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 - FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 - FS_KEY_DESCRIPTOR_SIZE = 0x8 - FS_KEY_DESC_PREFIX = "fscrypt:" - FS_KEY_DESC_PREFIX_SIZE = 0x8 - FS_MAX_KEY_SIZE = 0x40 - FS_POLICY_FLAGS_PAD_16 = 0x2 - FS_POLICY_FLAGS_PAD_32 = 0x3 - FS_POLICY_FLAGS_PAD_4 = 0x0 - FS_POLICY_FLAGS_PAD_8 = 0x1 - FS_POLICY_FLAGS_PAD_MASK = 0x3 - FS_POLICY_FLAGS_VALID = 0x7 - FUTEXFS_SUPER_MAGIC = 0xbad1dea - F_ADD_SEALS = 0x409 - F_DUPFD = 0x0 - F_DUPFD_CLOEXEC = 0x406 - F_EXLCK = 0x4 - F_GETFD = 0x1 - F_GETFL = 0x3 - F_GETLEASE = 0x401 - F_GETLK = 0x7 - F_GETLK64 = 0x7 - F_GETOWN = 0x5 - F_GETOWN_EX = 0x10 - F_GETPIPE_SZ = 0x408 - F_GETSIG = 0xb - F_GET_FILE_RW_HINT = 0x40d - F_GET_RW_HINT = 0x40b - F_GET_SEALS = 0x40a - F_LOCK = 0x1 - F_NOTIFY = 0x402 - F_OFD_GETLK = 0x24 - F_OFD_SETLK = 0x25 - F_OFD_SETLKW = 0x26 - F_OK = 0x0 - F_RDLCK = 0x1 - F_SEAL_FUTURE_WRITE = 0x10 - F_SEAL_GROW = 0x4 - F_SEAL_SEAL = 0x1 - F_SEAL_SHRINK = 0x2 - F_SEAL_WRITE = 0x8 - F_SETFD = 0x2 - F_SETFL = 0x4 - F_SETLEASE = 0x400 - F_SETLK = 0x8 - F_SETLK64 = 0x8 - F_SETLKW = 0x9 - F_SETLKW64 = 0x9 - F_SETOWN = 0x6 - F_SETOWN_EX = 0xf - F_SETPIPE_SZ = 0x407 - F_SETSIG = 0xa - F_SET_FILE_RW_HINT = 0x40e - F_SET_RW_HINT = 0x40c - F_SHLCK = 0x8 - F_TEST = 0x3 - F_TLOCK = 0x2 - F_ULOCK = 0x0 - F_UNLCK = 0x3 - F_WRLCK = 0x2 - GENL_ADMIN_PERM = 0x1 - GENL_CMD_CAP_DO = 0x2 - GENL_CMD_CAP_DUMP = 0x4 - GENL_CMD_CAP_HASPOL = 0x8 - GENL_HDRLEN = 0x4 - GENL_ID_CTRL = 0x10 - GENL_ID_PMCRAID = 0x12 - GENL_ID_VFS_DQUOT = 0x11 - GENL_MAX_ID = 0x3ff - GENL_MIN_ID = 0x10 - GENL_NAMSIZ = 0x10 - GENL_START_ALLOC = 0x13 - GENL_UNS_ADMIN_PERM = 0x10 - GRND_NONBLOCK = 0x1 - GRND_RANDOM = 0x2 - HDIO_DRIVE_CMD = 0x31f - HDIO_DRIVE_CMD_AEB = 0x31e - HDIO_DRIVE_CMD_HDR_SIZE = 0x4 - HDIO_DRIVE_HOB_HDR_SIZE = 0x8 - HDIO_DRIVE_RESET = 0x31c - HDIO_DRIVE_TASK = 0x31e - HDIO_DRIVE_TASKFILE = 0x31d - HDIO_DRIVE_TASK_HDR_SIZE = 0x8 - HDIO_GETGEO = 0x301 - HDIO_GET_32BIT = 0x309 - HDIO_GET_ACOUSTIC = 0x30f - HDIO_GET_ADDRESS = 0x310 - HDIO_GET_BUSSTATE = 0x31a - HDIO_GET_DMA = 0x30b - HDIO_GET_IDENTITY = 0x30d - HDIO_GET_KEEPSETTINGS = 0x308 - HDIO_GET_MULTCOUNT = 0x304 - HDIO_GET_NICE = 0x30c - HDIO_GET_NOWERR = 0x30a - HDIO_GET_QDMA = 0x305 - HDIO_GET_UNMASKINTR = 0x302 - HDIO_GET_WCACHE = 0x30e - HDIO_OBSOLETE_IDENTITY = 0x307 - HDIO_SCAN_HWIF = 0x328 - HDIO_SET_32BIT = 0x324 - HDIO_SET_ACOUSTIC = 0x32c - HDIO_SET_ADDRESS = 0x32f - HDIO_SET_BUSSTATE = 0x32d - HDIO_SET_DMA = 0x326 - HDIO_SET_KEEPSETTINGS = 0x323 - HDIO_SET_MULTCOUNT = 0x321 - HDIO_SET_NICE = 0x329 - HDIO_SET_NOWERR = 0x325 - HDIO_SET_PIO_MODE = 0x327 - HDIO_SET_QDMA = 0x32e - HDIO_SET_UNMASKINTR = 0x322 - HDIO_SET_WCACHE = 0x32b - HDIO_SET_XFER = 0x306 - HDIO_TRISTATE_HWIF = 0x31b - HDIO_UNREGISTER_HWIF = 0x32a - HOSTFS_SUPER_MAGIC = 0xc0ffee - HPFS_SUPER_MAGIC = 0xf995e849 - HUGETLBFS_MAGIC = 0x958458f6 - HUPCL = 0x400 - IBSHIFT = 0x10 - ICANON = 0x2 - ICMPV6_FILTER = 0x1 - ICRNL = 0x100 - IEXTEN = 0x8000 - IFA_F_DADFAILED = 0x8 - IFA_F_DEPRECATED = 0x20 - IFA_F_HOMEADDRESS = 0x10 - IFA_F_MANAGETEMPADDR = 0x100 - IFA_F_MCAUTOJOIN = 0x400 - IFA_F_NODAD = 0x2 - IFA_F_NOPREFIXROUTE = 0x200 - IFA_F_OPTIMISTIC = 0x4 - IFA_F_PERMANENT = 0x80 - IFA_F_SECONDARY = 0x1 - IFA_F_STABLE_PRIVACY = 0x800 - IFA_F_TEMPORARY = 0x1 - IFA_F_TENTATIVE = 0x40 - IFA_MAX = 0xa - IFF_ALLMULTI = 0x200 - IFF_ATTACH_QUEUE = 0x200 - IFF_AUTOMEDIA = 0x4000 - IFF_BROADCAST = 0x2 - IFF_DEBUG = 0x4 - IFF_DETACH_QUEUE = 0x400 - IFF_DORMANT = 0x20000 - IFF_DYNAMIC = 0x8000 - IFF_ECHO = 0x40000 - IFF_LOOPBACK = 0x8 - IFF_LOWER_UP = 0x10000 - IFF_MASTER = 0x400 - IFF_MULTICAST = 0x1000 - IFF_MULTI_QUEUE = 0x100 - IFF_NAPI = 0x10 - IFF_NAPI_FRAGS = 0x20 - IFF_NOARP = 0x80 - IFF_NOFILTER = 0x1000 - IFF_NOTRAILERS = 0x20 - IFF_NO_PI = 0x1000 - IFF_ONE_QUEUE = 0x2000 - IFF_PERSIST = 0x800 - IFF_POINTOPOINT = 0x10 - IFF_PORTSEL = 0x2000 - IFF_PROMISC = 0x100 - IFF_RUNNING = 0x40 - IFF_SLAVE = 0x800 - IFF_TAP = 0x2 - IFF_TUN = 0x1 - IFF_TUN_EXCL = 0x8000 - IFF_UP = 0x1 - IFF_VNET_HDR = 0x4000 - IFF_VOLATILE = 0x70c5a - IFNAMSIZ = 0x10 - IGNBRK = 0x1 - IGNCR = 0x80 - IGNPAR = 0x4 - IMAXBEL = 0x2000 - INLCR = 0x40 - INPCK = 0x10 - IN_ACCESS = 0x1 - IN_ALL_EVENTS = 0xfff - IN_ATTRIB = 0x4 - IN_CLASSA_HOST = 0xffffff - IN_CLASSA_MAX = 0x80 - IN_CLASSA_NET = 0xff000000 - IN_CLASSA_NSHIFT = 0x18 - IN_CLASSB_HOST = 0xffff - IN_CLASSB_MAX = 0x10000 - IN_CLASSB_NET = 0xffff0000 - IN_CLASSB_NSHIFT = 0x10 - IN_CLASSC_HOST = 0xff - IN_CLASSC_NET = 0xffffff00 - IN_CLASSC_NSHIFT = 0x8 - IN_CLOEXEC = 0x400000 - IN_CLOSE = 0x18 - IN_CLOSE_NOWRITE = 0x10 - IN_CLOSE_WRITE = 0x8 - IN_CREATE = 0x100 - IN_DELETE = 0x200 - IN_DELETE_SELF = 0x400 - IN_DONT_FOLLOW = 0x2000000 - IN_EXCL_UNLINK = 0x4000000 - IN_IGNORED = 0x8000 - IN_ISDIR = 0x40000000 - IN_LOOPBACKNET = 0x7f - IN_MASK_ADD = 0x20000000 - IN_MASK_CREATE = 0x10000000 - IN_MODIFY = 0x2 - IN_MOVE = 0xc0 - IN_MOVED_FROM = 0x40 - IN_MOVED_TO = 0x80 - IN_MOVE_SELF = 0x800 - IN_NONBLOCK = 0x4000 - IN_ONESHOT = 0x80000000 - IN_ONLYDIR = 0x1000000 - IN_OPEN = 0x20 - IN_Q_OVERFLOW = 0x4000 - IN_UNMOUNT = 0x2000 - IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 - IPPROTO_AH = 0x33 - IPPROTO_BEETPH = 0x5e - IPPROTO_COMP = 0x6c - IPPROTO_DCCP = 0x21 - IPPROTO_DSTOPTS = 0x3c - IPPROTO_EGP = 0x8 - IPPROTO_ENCAP = 0x62 - IPPROTO_ESP = 0x32 - IPPROTO_FRAGMENT = 0x2c - IPPROTO_GRE = 0x2f - IPPROTO_HOPOPTS = 0x0 - IPPROTO_ICMP = 0x1 - IPPROTO_ICMPV6 = 0x3a - IPPROTO_IDP = 0x16 - IPPROTO_IGMP = 0x2 - IPPROTO_IP = 0x0 - IPPROTO_IPIP = 0x4 - IPPROTO_IPV6 = 0x29 - IPPROTO_MH = 0x87 - IPPROTO_MPLS = 0x89 - IPPROTO_MTP = 0x5c - IPPROTO_NONE = 0x3b - IPPROTO_PIM = 0x67 - IPPROTO_PUP = 0xc - IPPROTO_RAW = 0xff - IPPROTO_ROUTING = 0x2b - IPPROTO_RSVP = 0x2e - IPPROTO_SCTP = 0x84 - IPPROTO_TCP = 0x6 - IPPROTO_TP = 0x1d - IPPROTO_UDP = 0x11 - IPPROTO_UDPLITE = 0x88 - IPV6_2292DSTOPTS = 0x4 - IPV6_2292HOPLIMIT = 0x8 - IPV6_2292HOPOPTS = 0x3 - IPV6_2292PKTINFO = 0x2 - IPV6_2292PKTOPTIONS = 0x6 - IPV6_2292RTHDR = 0x5 - IPV6_ADDRFORM = 0x1 - IPV6_ADDR_PREFERENCES = 0x48 - IPV6_ADD_MEMBERSHIP = 0x14 - IPV6_AUTHHDR = 0xa - IPV6_AUTOFLOWLABEL = 0x46 - IPV6_CHECKSUM = 0x7 - IPV6_DONTFRAG = 0x3e - IPV6_DROP_MEMBERSHIP = 0x15 - IPV6_DSTOPTS = 0x3b - IPV6_FREEBIND = 0x4e - IPV6_HDRINCL = 0x24 - IPV6_HOPLIMIT = 0x34 - IPV6_HOPOPTS = 0x36 - IPV6_IPSEC_POLICY = 0x22 - IPV6_JOIN_ANYCAST = 0x1b - IPV6_JOIN_GROUP = 0x14 - IPV6_LEAVE_ANYCAST = 0x1c - IPV6_LEAVE_GROUP = 0x15 - IPV6_MINHOPCOUNT = 0x49 - IPV6_MTU = 0x18 - IPV6_MTU_DISCOVER = 0x17 - IPV6_MULTICAST_ALL = 0x1d - IPV6_MULTICAST_HOPS = 0x12 - IPV6_MULTICAST_IF = 0x11 - IPV6_MULTICAST_LOOP = 0x13 - IPV6_NEXTHOP = 0x9 - IPV6_ORIGDSTADDR = 0x4a - IPV6_PATHMTU = 0x3d - IPV6_PKTINFO = 0x32 - IPV6_PMTUDISC_DO = 0x2 - IPV6_PMTUDISC_DONT = 0x0 - IPV6_PMTUDISC_INTERFACE = 0x4 - IPV6_PMTUDISC_OMIT = 0x5 - IPV6_PMTUDISC_PROBE = 0x3 - IPV6_PMTUDISC_WANT = 0x1 - IPV6_RECVDSTOPTS = 0x3a - IPV6_RECVERR = 0x19 - IPV6_RECVFRAGSIZE = 0x4d - IPV6_RECVHOPLIMIT = 0x33 - IPV6_RECVHOPOPTS = 0x35 - IPV6_RECVORIGDSTADDR = 0x4a - IPV6_RECVPATHMTU = 0x3c - IPV6_RECVPKTINFO = 0x31 - IPV6_RECVRTHDR = 0x38 - IPV6_RECVTCLASS = 0x42 - IPV6_ROUTER_ALERT = 0x16 - IPV6_ROUTER_ALERT_ISOLATE = 0x1e - IPV6_RTHDR = 0x39 - IPV6_RTHDRDSTOPTS = 0x37 - IPV6_RTHDR_LOOSE = 0x0 - IPV6_RTHDR_STRICT = 0x1 - IPV6_RTHDR_TYPE_0 = 0x0 - IPV6_RXDSTOPTS = 0x3b - IPV6_RXHOPOPTS = 0x36 - IPV6_TCLASS = 0x43 - IPV6_TRANSPARENT = 0x4b - IPV6_UNICAST_HOPS = 0x10 - IPV6_UNICAST_IF = 0x4c - IPV6_V6ONLY = 0x1a - IPV6_XFRM_POLICY = 0x23 - IP_ADD_MEMBERSHIP = 0x23 - IP_ADD_SOURCE_MEMBERSHIP = 0x27 - IP_BIND_ADDRESS_NO_PORT = 0x18 - IP_BLOCK_SOURCE = 0x26 - IP_CHECKSUM = 0x17 - IP_DEFAULT_MULTICAST_LOOP = 0x1 - IP_DEFAULT_MULTICAST_TTL = 0x1 - IP_DF = 0x4000 - IP_DROP_MEMBERSHIP = 0x24 - IP_DROP_SOURCE_MEMBERSHIP = 0x28 - IP_FREEBIND = 0xf - IP_HDRINCL = 0x3 - IP_IPSEC_POLICY = 0x10 - IP_MAXPACKET = 0xffff - IP_MAX_MEMBERSHIPS = 0x14 - IP_MF = 0x2000 - IP_MINTTL = 0x15 - IP_MSFILTER = 0x29 - IP_MSS = 0x240 - IP_MTU = 0xe - IP_MTU_DISCOVER = 0xa - IP_MULTICAST_ALL = 0x31 - IP_MULTICAST_IF = 0x20 - IP_MULTICAST_LOOP = 0x22 - IP_MULTICAST_TTL = 0x21 - IP_NODEFRAG = 0x16 - IP_OFFMASK = 0x1fff - IP_OPTIONS = 0x4 - IP_ORIGDSTADDR = 0x14 - IP_PASSSEC = 0x12 - IP_PKTINFO = 0x8 - IP_PKTOPTIONS = 0x9 - IP_PMTUDISC = 0xa - IP_PMTUDISC_DO = 0x2 - IP_PMTUDISC_DONT = 0x0 - IP_PMTUDISC_INTERFACE = 0x4 - IP_PMTUDISC_OMIT = 0x5 - IP_PMTUDISC_PROBE = 0x3 - IP_PMTUDISC_WANT = 0x1 - IP_RECVERR = 0xb - IP_RECVFRAGSIZE = 0x19 - IP_RECVOPTS = 0x6 - IP_RECVORIGDSTADDR = 0x14 - IP_RECVRETOPTS = 0x7 - IP_RECVTOS = 0xd - IP_RECVTTL = 0xc - IP_RETOPTS = 0x7 - IP_RF = 0x8000 - IP_ROUTER_ALERT = 0x5 - IP_TOS = 0x1 - IP_TRANSPARENT = 0x13 - IP_TTL = 0x2 - IP_UNBLOCK_SOURCE = 0x25 - IP_UNICAST_IF = 0x32 - IP_XFRM_POLICY = 0x11 - ISIG = 0x1 - ISOFS_SUPER_MAGIC = 0x9660 - ISTRIP = 0x20 - IUCLC = 0x200 - IUTF8 = 0x4000 - IXANY = 0x800 - IXOFF = 0x1000 - IXON = 0x400 - JFFS2_SUPER_MAGIC = 0x72b6 - KEXEC_ARCH_386 = 0x30000 - KEXEC_ARCH_68K = 0x40000 - KEXEC_ARCH_AARCH64 = 0xb70000 - KEXEC_ARCH_ARM = 0x280000 - KEXEC_ARCH_DEFAULT = 0x0 - KEXEC_ARCH_IA_64 = 0x320000 - KEXEC_ARCH_MASK = 0xffff0000 - KEXEC_ARCH_MIPS = 0x80000 - KEXEC_ARCH_MIPS_LE = 0xa0000 - KEXEC_ARCH_PPC = 0x140000 - KEXEC_ARCH_PPC64 = 0x150000 - KEXEC_ARCH_S390 = 0x160000 - KEXEC_ARCH_SH = 0x2a0000 - KEXEC_ARCH_X86_64 = 0x3e0000 - KEXEC_FILE_NO_INITRAMFS = 0x4 - KEXEC_FILE_ON_CRASH = 0x2 - KEXEC_FILE_UNLOAD = 0x1 - KEXEC_ON_CRASH = 0x1 - KEXEC_PRESERVE_CONTEXT = 0x2 - KEXEC_SEGMENT_MAX = 0x10 - KEYCTL_ASSUME_AUTHORITY = 0x10 - KEYCTL_CAPABILITIES = 0x1f - KEYCTL_CAPS0_BIG_KEY = 0x10 - KEYCTL_CAPS0_CAPABILITIES = 0x1 - KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 - KEYCTL_CAPS0_INVALIDATE = 0x20 - KEYCTL_CAPS0_MOVE = 0x80 - KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 - KEYCTL_CAPS0_PUBLIC_KEY = 0x8 - KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 - KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 - KEYCTL_CAPS1_NS_KEY_TAG = 0x2 - KEYCTL_CHOWN = 0x4 - KEYCTL_CLEAR = 0x7 - KEYCTL_DESCRIBE = 0x6 - KEYCTL_DH_COMPUTE = 0x17 - KEYCTL_GET_KEYRING_ID = 0x0 - KEYCTL_GET_PERSISTENT = 0x16 - KEYCTL_GET_SECURITY = 0x11 - KEYCTL_INSTANTIATE = 0xc - KEYCTL_INSTANTIATE_IOV = 0x14 - KEYCTL_INVALIDATE = 0x15 - KEYCTL_JOIN_SESSION_KEYRING = 0x1 - KEYCTL_LINK = 0x8 - KEYCTL_MOVE = 0x1e - KEYCTL_MOVE_EXCL = 0x1 - KEYCTL_NEGATE = 0xd - KEYCTL_PKEY_DECRYPT = 0x1a - KEYCTL_PKEY_ENCRYPT = 0x19 - KEYCTL_PKEY_QUERY = 0x18 - KEYCTL_PKEY_SIGN = 0x1b - KEYCTL_PKEY_VERIFY = 0x1c - KEYCTL_READ = 0xb - KEYCTL_REJECT = 0x13 - KEYCTL_RESTRICT_KEYRING = 0x1d - KEYCTL_REVOKE = 0x3 - KEYCTL_SEARCH = 0xa - KEYCTL_SESSION_TO_PARENT = 0x12 - KEYCTL_SETPERM = 0x5 - KEYCTL_SET_REQKEY_KEYRING = 0xe - KEYCTL_SET_TIMEOUT = 0xf - KEYCTL_SUPPORTS_DECRYPT = 0x2 - KEYCTL_SUPPORTS_ENCRYPT = 0x1 - KEYCTL_SUPPORTS_SIGN = 0x4 - KEYCTL_SUPPORTS_VERIFY = 0x8 - KEYCTL_UNLINK = 0x9 - KEYCTL_UPDATE = 0x2 - KEY_REQKEY_DEFL_DEFAULT = 0x0 - KEY_REQKEY_DEFL_GROUP_KEYRING = 0x6 - KEY_REQKEY_DEFL_NO_CHANGE = -0x1 - KEY_REQKEY_DEFL_PROCESS_KEYRING = 0x2 - KEY_REQKEY_DEFL_REQUESTOR_KEYRING = 0x7 - KEY_REQKEY_DEFL_SESSION_KEYRING = 0x3 - KEY_REQKEY_DEFL_THREAD_KEYRING = 0x1 - KEY_REQKEY_DEFL_USER_KEYRING = 0x4 - KEY_REQKEY_DEFL_USER_SESSION_KEYRING = 0x5 - KEY_SPEC_GROUP_KEYRING = -0x6 - KEY_SPEC_PROCESS_KEYRING = -0x2 - KEY_SPEC_REQKEY_AUTH_KEY = -0x7 - KEY_SPEC_REQUESTOR_KEYRING = -0x8 - KEY_SPEC_SESSION_KEYRING = -0x3 - KEY_SPEC_THREAD_KEYRING = -0x1 - KEY_SPEC_USER_KEYRING = -0x4 - KEY_SPEC_USER_SESSION_KEYRING = -0x5 - LINUX_REBOOT_CMD_CAD_OFF = 0x0 - LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef - LINUX_REBOOT_CMD_HALT = 0xcdef0123 - LINUX_REBOOT_CMD_KEXEC = 0x45584543 - LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc - LINUX_REBOOT_CMD_RESTART = 0x1234567 - LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4 - LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2 - LINUX_REBOOT_MAGIC1 = 0xfee1dead - LINUX_REBOOT_MAGIC2 = 0x28121969 - LOCK_EX = 0x2 - LOCK_NB = 0x4 - LOCK_SH = 0x1 - LOCK_UN = 0x8 - LOOP_CLR_FD = 0x4c01 - LOOP_CTL_ADD = 0x4c80 - LOOP_CTL_GET_FREE = 0x4c82 - LOOP_CTL_REMOVE = 0x4c81 - LOOP_GET_STATUS = 0x4c03 - LOOP_GET_STATUS64 = 0x4c05 - LOOP_SET_BLOCK_SIZE = 0x4c09 - LOOP_SET_CAPACITY = 0x4c07 - LOOP_SET_DIRECT_IO = 0x4c08 - LOOP_SET_FD = 0x4c00 - LOOP_SET_STATUS = 0x4c02 - LOOP_SET_STATUS64 = 0x4c04 - LO_KEY_SIZE = 0x20 - LO_NAME_SIZE = 0x40 - MADV_DODUMP = 0x11 - MADV_DOFORK = 0xb - MADV_DONTDUMP = 0x10 - MADV_DONTFORK = 0xa - MADV_DONTNEED = 0x4 - MADV_FREE = 0x8 - MADV_HUGEPAGE = 0xe - MADV_HWPOISON = 0x64 - MADV_KEEPONFORK = 0x13 - MADV_MERGEABLE = 0xc - MADV_NOHUGEPAGE = 0xf - MADV_NORMAL = 0x0 - MADV_RANDOM = 0x1 - MADV_REMOVE = 0x9 - MADV_SEQUENTIAL = 0x2 - MADV_UNMERGEABLE = 0xd - MADV_WILLNEED = 0x3 - MADV_WIPEONFORK = 0x12 - MAP_ANON = 0x20 - MAP_ANONYMOUS = 0x20 - MAP_DENYWRITE = 0x800 - MAP_EXECUTABLE = 0x1000 - MAP_FILE = 0x0 - MAP_FIXED = 0x10 - MAP_FIXED_NOREPLACE = 0x100000 - MAP_GROWSDOWN = 0x200 - MAP_HUGETLB = 0x40000 - MAP_HUGE_MASK = 0x3f - MAP_HUGE_SHIFT = 0x1a - MAP_LOCKED = 0x100 - MAP_NONBLOCK = 0x10000 - MAP_NORESERVE = 0x40 - MAP_POPULATE = 0x8000 - MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 - MAP_SHARED = 0x1 - MAP_SHARED_VALIDATE = 0x3 - MAP_STACK = 0x20000 - MAP_TYPE = 0xf - MCAST_BLOCK_SOURCE = 0x2b - MCAST_EXCLUDE = 0x0 - MCAST_INCLUDE = 0x1 - MCAST_JOIN_GROUP = 0x2a - MCAST_JOIN_SOURCE_GROUP = 0x2e - MCAST_LEAVE_GROUP = 0x2d - MCAST_LEAVE_SOURCE_GROUP = 0x2f - MCAST_MSFILTER = 0x30 - MCAST_UNBLOCK_SOURCE = 0x2c - MCL_CURRENT = 0x2000 - MCL_FUTURE = 0x4000 - MCL_ONFAULT = 0x8000 - MFD_ALLOW_SEALING = 0x2 - MFD_CLOEXEC = 0x1 - MFD_HUGETLB = 0x4 - MFD_HUGE_16GB = -0x78000000 - MFD_HUGE_16MB = 0x60000000 - MFD_HUGE_1GB = 0x78000000 - MFD_HUGE_1MB = 0x50000000 - MFD_HUGE_256MB = 0x70000000 - MFD_HUGE_2GB = 0x7c000000 - MFD_HUGE_2MB = 0x54000000 - MFD_HUGE_32MB = 0x64000000 - MFD_HUGE_512KB = 0x4c000000 - MFD_HUGE_512MB = 0x74000000 - MFD_HUGE_64KB = 0x40000000 - MFD_HUGE_8MB = 0x5c000000 - MFD_HUGE_MASK = 0x3f - MFD_HUGE_SHIFT = 0x1a - MINIX2_SUPER_MAGIC = 0x2468 - MINIX2_SUPER_MAGIC2 = 0x2478 - MINIX3_SUPER_MAGIC = 0x4d5a - MINIX_SUPER_MAGIC = 0x137f - MINIX_SUPER_MAGIC2 = 0x138f - MNT_DETACH = 0x2 - MNT_EXPIRE = 0x4 - MNT_FORCE = 0x1 - MODULE_INIT_IGNORE_MODVERSIONS = 0x1 - MODULE_INIT_IGNORE_VERMAGIC = 0x2 - MSDOS_SUPER_MAGIC = 0x4d44 - MSG_BATCH = 0x40000 - MSG_CMSG_CLOEXEC = 0x40000000 - MSG_CONFIRM = 0x800 - MSG_CTRUNC = 0x8 - MSG_DONTROUTE = 0x4 - MSG_DONTWAIT = 0x40 - MSG_EOR = 0x80 - MSG_ERRQUEUE = 0x2000 - MSG_FASTOPEN = 0x20000000 - MSG_FIN = 0x200 - MSG_MORE = 0x8000 - MSG_NOSIGNAL = 0x4000 - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_PROXY = 0x10 - MSG_RST = 0x1000 - MSG_SYN = 0x400 - MSG_TRUNC = 0x20 - MSG_TRYHARD = 0x4 - MSG_WAITALL = 0x100 - MSG_WAITFORONE = 0x10000 - MSG_ZEROCOPY = 0x4000000 - MS_ACTIVE = 0x40000000 - MS_ASYNC = 0x1 - MS_BIND = 0x1000 - MS_BORN = 0x20000000 - MS_DIRSYNC = 0x80 - MS_INVALIDATE = 0x2 - MS_I_VERSION = 0x800000 - MS_KERNMOUNT = 0x400000 - MS_LAZYTIME = 0x2000000 - MS_MANDLOCK = 0x40 - MS_MGC_MSK = 0xffff0000 - MS_MGC_VAL = 0xc0ed0000 - MS_MOVE = 0x2000 - MS_NOATIME = 0x400 - MS_NODEV = 0x4 - MS_NODIRATIME = 0x800 - MS_NOEXEC = 0x8 - MS_NOREMOTELOCK = 0x8000000 - MS_NOSEC = 0x10000000 - MS_NOSUID = 0x2 - MS_NOUSER = -0x80000000 - MS_POSIXACL = 0x10000 - MS_PRIVATE = 0x40000 - MS_RDONLY = 0x1 - MS_REC = 0x4000 - MS_RELATIME = 0x200000 - MS_REMOUNT = 0x20 - MS_RMT_MASK = 0x2800051 - MS_SHARED = 0x100000 - MS_SILENT = 0x8000 - MS_SLAVE = 0x80000 - MS_STRICTATIME = 0x1000000 - MS_SUBMOUNT = 0x4000000 - MS_SYNC = 0x4 - MS_SYNCHRONOUS = 0x10 - MS_UNBINDABLE = 0x20000 - MS_VERBOSE = 0x8000 - MTD_INODE_FS_MAGIC = 0x11307854 - NAME_MAX = 0xff - NCP_SUPER_MAGIC = 0x564c - NETLINK_ADD_MEMBERSHIP = 0x1 - NETLINK_AUDIT = 0x9 - NETLINK_BROADCAST_ERROR = 0x4 - NETLINK_CAP_ACK = 0xa - NETLINK_CONNECTOR = 0xb - NETLINK_CRYPTO = 0x15 - NETLINK_DNRTMSG = 0xe - NETLINK_DROP_MEMBERSHIP = 0x2 - NETLINK_ECRYPTFS = 0x13 - NETLINK_EXT_ACK = 0xb - NETLINK_FIB_LOOKUP = 0xa - NETLINK_FIREWALL = 0x3 - NETLINK_GENERIC = 0x10 - NETLINK_GET_STRICT_CHK = 0xc - NETLINK_INET_DIAG = 0x4 - NETLINK_IP6_FW = 0xd - NETLINK_ISCSI = 0x8 - NETLINK_KOBJECT_UEVENT = 0xf - NETLINK_LISTEN_ALL_NSID = 0x8 - NETLINK_LIST_MEMBERSHIPS = 0x9 - NETLINK_NETFILTER = 0xc - NETLINK_NFLOG = 0x5 - NETLINK_NO_ENOBUFS = 0x5 - NETLINK_PKTINFO = 0x3 - NETLINK_RDMA = 0x14 - NETLINK_ROUTE = 0x0 - NETLINK_RX_RING = 0x6 - NETLINK_SCSITRANSPORT = 0x12 - NETLINK_SELINUX = 0x7 - NETLINK_SMC = 0x16 - NETLINK_SOCK_DIAG = 0x4 - NETLINK_TX_RING = 0x7 - NETLINK_UNUSED = 0x1 - NETLINK_USERSOCK = 0x2 - NETLINK_XFRM = 0x6 - NETNSA_MAX = 0x5 - NETNSA_NSID_NOT_ASSIGNED = -0x1 - NFDBITS = 0x40 - NFNETLINK_V0 = 0x0 - NFNLGRP_ACCT_QUOTA = 0x8 - NFNLGRP_CONNTRACK_DESTROY = 0x3 - NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 - NFNLGRP_CONNTRACK_EXP_NEW = 0x4 - NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 - NFNLGRP_CONNTRACK_NEW = 0x1 - NFNLGRP_CONNTRACK_UPDATE = 0x2 - NFNLGRP_MAX = 0x9 - NFNLGRP_NFTABLES = 0x7 - NFNLGRP_NFTRACE = 0x9 - NFNLGRP_NONE = 0x0 - NFNL_BATCH_MAX = 0x1 - NFNL_MSG_BATCH_BEGIN = 0x10 - NFNL_MSG_BATCH_END = 0x11 - NFNL_NFA_NEST = 0x8000 - NFNL_SUBSYS_ACCT = 0x7 - NFNL_SUBSYS_COUNT = 0xc - NFNL_SUBSYS_CTHELPER = 0x9 - NFNL_SUBSYS_CTNETLINK = 0x1 - NFNL_SUBSYS_CTNETLINK_EXP = 0x2 - NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 - NFNL_SUBSYS_IPSET = 0x6 - NFNL_SUBSYS_NFTABLES = 0xa - NFNL_SUBSYS_NFT_COMPAT = 0xb - NFNL_SUBSYS_NONE = 0x0 - NFNL_SUBSYS_OSF = 0x5 - NFNL_SUBSYS_QUEUE = 0x3 - NFNL_SUBSYS_ULOG = 0x4 - NFS_SUPER_MAGIC = 0x6969 - NILFS_SUPER_MAGIC = 0x3434 - NL0 = 0x0 - NL1 = 0x100 - NLA_ALIGNTO = 0x4 - NLA_F_NESTED = 0x8000 - NLA_F_NET_BYTEORDER = 0x4000 - NLA_HDRLEN = 0x4 - NLDLY = 0x100 - NLMSG_ALIGNTO = 0x4 - NLMSG_DONE = 0x3 - NLMSG_ERROR = 0x2 - NLMSG_HDRLEN = 0x10 - NLMSG_MIN_TYPE = 0x10 - NLMSG_NOOP = 0x1 - NLMSG_OVERRUN = 0x4 - NLM_F_ACK = 0x4 - NLM_F_ACK_TLVS = 0x200 - NLM_F_APPEND = 0x800 - NLM_F_ATOMIC = 0x400 - NLM_F_CAPPED = 0x100 - NLM_F_CREATE = 0x400 - NLM_F_DUMP = 0x300 - NLM_F_DUMP_FILTERED = 0x20 - NLM_F_DUMP_INTR = 0x10 - NLM_F_ECHO = 0x8 - NLM_F_EXCL = 0x200 - NLM_F_MATCH = 0x200 - NLM_F_MULTI = 0x2 - NLM_F_NONREC = 0x100 - NLM_F_REPLACE = 0x100 - NLM_F_REQUEST = 0x1 - NLM_F_ROOT = 0x100 - NOFLSH = 0x80 - NSFS_MAGIC = 0x6e736673 - NS_GET_NSTYPE = 0x2000b703 - NS_GET_OWNER_UID = 0x2000b704 - NS_GET_PARENT = 0x2000b702 - NS_GET_USERNS = 0x2000b701 - OCFS2_SUPER_MAGIC = 0x7461636f - OCRNL = 0x8 - OFDEL = 0x80 - OFILL = 0x40 - OLCUC = 0x2 - ONLCR = 0x4 - ONLRET = 0x20 - ONOCR = 0x10 - OPENPROM_SUPER_MAGIC = 0x9fa1 - OPOST = 0x1 - OVERLAYFS_SUPER_MAGIC = 0x794c7630 - O_ACCMODE = 0x3 - O_APPEND = 0x8 - O_ASYNC = 0x40 - O_CLOEXEC = 0x400000 - O_CREAT = 0x200 - O_DIRECT = 0x100000 - O_DIRECTORY = 0x10000 - O_DSYNC = 0x2000 - O_EXCL = 0x800 - O_FSYNC = 0x802000 - O_LARGEFILE = 0x0 - O_NDELAY = 0x4004 - O_NOATIME = 0x200000 - O_NOCTTY = 0x8000 - O_NOFOLLOW = 0x20000 - O_NONBLOCK = 0x4000 - O_PATH = 0x1000000 - O_RDONLY = 0x0 - O_RDWR = 0x2 - O_RSYNC = 0x802000 - O_SYNC = 0x802000 - O_TMPFILE = 0x2010000 - O_TRUNC = 0x400 - O_WRONLY = 0x1 - PACKET_ADD_MEMBERSHIP = 0x1 - PACKET_AUXDATA = 0x8 - PACKET_BROADCAST = 0x1 - PACKET_COPY_THRESH = 0x7 - PACKET_DROP_MEMBERSHIP = 0x2 - PACKET_FANOUT = 0x12 - PACKET_FANOUT_CBPF = 0x6 - PACKET_FANOUT_CPU = 0x2 - PACKET_FANOUT_DATA = 0x16 - PACKET_FANOUT_EBPF = 0x7 - PACKET_FANOUT_FLAG_DEFRAG = 0x8000 - PACKET_FANOUT_FLAG_ROLLOVER = 0x1000 - PACKET_FANOUT_FLAG_UNIQUEID = 0x2000 - PACKET_FANOUT_HASH = 0x0 - PACKET_FANOUT_LB = 0x1 - PACKET_FANOUT_QM = 0x5 - PACKET_FANOUT_RND = 0x4 - PACKET_FANOUT_ROLLOVER = 0x3 - PACKET_FASTROUTE = 0x6 - PACKET_HDRLEN = 0xb - PACKET_HOST = 0x0 - PACKET_IGNORE_OUTGOING = 0x17 - PACKET_KERNEL = 0x7 - PACKET_LOOPBACK = 0x5 - PACKET_LOSS = 0xe - PACKET_MR_ALLMULTI = 0x2 - PACKET_MR_MULTICAST = 0x0 - PACKET_MR_PROMISC = 0x1 - PACKET_MR_UNICAST = 0x3 - PACKET_MULTICAST = 0x2 - PACKET_ORIGDEV = 0x9 - PACKET_OTHERHOST = 0x3 - PACKET_OUTGOING = 0x4 - PACKET_QDISC_BYPASS = 0x14 - PACKET_RECV_OUTPUT = 0x3 - PACKET_RESERVE = 0xc - PACKET_ROLLOVER_STATS = 0x15 - PACKET_RX_RING = 0x5 - PACKET_STATISTICS = 0x6 - PACKET_TIMESTAMP = 0x11 - PACKET_TX_HAS_OFF = 0x13 - PACKET_TX_RING = 0xd - PACKET_TX_TIMESTAMP = 0x10 - PACKET_USER = 0x6 - PACKET_VERSION = 0xa - PACKET_VNET_HDR = 0xf - PARENB = 0x100 - PARITY_CRC16_PR0 = 0x2 - PARITY_CRC16_PR0_CCITT = 0x4 - PARITY_CRC16_PR1 = 0x3 - PARITY_CRC16_PR1_CCITT = 0x5 - PARITY_CRC32_PR0_CCITT = 0x6 - PARITY_CRC32_PR1_CCITT = 0x7 - PARITY_DEFAULT = 0x0 - PARITY_NONE = 0x1 - PARMRK = 0x8 - PARODD = 0x200 - PENDIN = 0x4000 - PERF_EVENT_IOC_DISABLE = 0x20002401 - PERF_EVENT_IOC_ENABLE = 0x20002400 - PERF_EVENT_IOC_ID = 0x40082407 - PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b - PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 - PERF_EVENT_IOC_PERIOD = 0x80082404 - PERF_EVENT_IOC_QUERY_BPF = 0xc008240a - PERF_EVENT_IOC_REFRESH = 0x20002402 - PERF_EVENT_IOC_RESET = 0x20002403 - PERF_EVENT_IOC_SET_BPF = 0x80042408 - PERF_EVENT_IOC_SET_FILTER = 0x80082406 - PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 - PIPEFS_MAGIC = 0x50495045 - PPPIOCATTACH = 0x8004743d - PPPIOCATTCHAN = 0x80047438 - PPPIOCCONNECT = 0x8004743a - PPPIOCDETACH = 0x8004743c - PPPIOCDISCONN = 0x20007439 - PPPIOCGASYNCMAP = 0x40047458 - PPPIOCGCHAN = 0x40047437 - PPPIOCGDEBUG = 0x40047441 - PPPIOCGFLAGS = 0x4004745a - PPPIOCGIDLE = 0x4010743f - PPPIOCGL2TPSTATS = 0x40487436 - PPPIOCGMRU = 0x40047453 - PPPIOCGNPMODE = 0xc008744c - PPPIOCGRASYNCMAP = 0x40047455 - PPPIOCGUNIT = 0x40047456 - PPPIOCGXASYNCMAP = 0x40207450 - PPPIOCNEWUNIT = 0xc004743e - PPPIOCSACTIVE = 0x80107446 - PPPIOCSASYNCMAP = 0x80047457 - PPPIOCSCOMPRESS = 0x8010744d - PPPIOCSDEBUG = 0x80047440 - PPPIOCSFLAGS = 0x80047459 - PPPIOCSMAXCID = 0x80047451 - PPPIOCSMRRU = 0x8004743b - PPPIOCSMRU = 0x80047452 - PPPIOCSNPMODE = 0x8008744b - PPPIOCSPASS = 0x80107447 - PPPIOCSRASYNCMAP = 0x80047454 - PPPIOCSXASYNCMAP = 0x8020744f - PPPIOCXFERUNIT = 0x2000744e - PRIO_PGRP = 0x1 - PRIO_PROCESS = 0x0 - PRIO_USER = 0x2 - PROC_SUPER_MAGIC = 0x9fa0 - PROT_EXEC = 0x4 - PROT_GROWSDOWN = 0x1000000 - PROT_GROWSUP = 0x2000000 - PROT_NONE = 0x0 - PROT_READ = 0x1 - PROT_WRITE = 0x2 - PR_CAPBSET_DROP = 0x18 - PR_CAPBSET_READ = 0x17 - PR_CAP_AMBIENT = 0x2f - PR_CAP_AMBIENT_CLEAR_ALL = 0x4 - PR_CAP_AMBIENT_IS_SET = 0x1 - PR_CAP_AMBIENT_LOWER = 0x3 - PR_CAP_AMBIENT_RAISE = 0x2 - PR_ENDIAN_BIG = 0x0 - PR_ENDIAN_LITTLE = 0x1 - PR_ENDIAN_PPC_LITTLE = 0x2 - PR_FPEMU_NOPRINT = 0x1 - PR_FPEMU_SIGFPE = 0x2 - PR_FP_EXC_ASYNC = 0x2 - PR_FP_EXC_DISABLED = 0x0 - PR_FP_EXC_DIV = 0x10000 - PR_FP_EXC_INV = 0x100000 - PR_FP_EXC_NONRECOV = 0x1 - PR_FP_EXC_OVF = 0x20000 - PR_FP_EXC_PRECISE = 0x3 - PR_FP_EXC_RES = 0x80000 - PR_FP_EXC_SW_ENABLE = 0x80 - PR_FP_EXC_UND = 0x40000 - PR_FP_MODE_FR = 0x1 - PR_FP_MODE_FRE = 0x2 - PR_GET_CHILD_SUBREAPER = 0x25 - PR_GET_DUMPABLE = 0x3 - PR_GET_ENDIAN = 0x13 - PR_GET_FPEMU = 0x9 - PR_GET_FPEXC = 0xb - PR_GET_FP_MODE = 0x2e - PR_GET_KEEPCAPS = 0x7 - PR_GET_NAME = 0x10 - PR_GET_NO_NEW_PRIVS = 0x27 - PR_GET_PDEATHSIG = 0x2 - PR_GET_SECCOMP = 0x15 - PR_GET_SECUREBITS = 0x1b - PR_GET_SPECULATION_CTRL = 0x34 - PR_GET_THP_DISABLE = 0x2a - PR_GET_TID_ADDRESS = 0x28 - PR_GET_TIMERSLACK = 0x1e - PR_GET_TIMING = 0xd - PR_GET_TSC = 0x19 - PR_GET_UNALIGN = 0x5 - PR_MCE_KILL = 0x21 - PR_MCE_KILL_CLEAR = 0x0 - PR_MCE_KILL_DEFAULT = 0x2 - PR_MCE_KILL_EARLY = 0x1 - PR_MCE_KILL_GET = 0x22 - PR_MCE_KILL_LATE = 0x0 - PR_MCE_KILL_SET = 0x1 - PR_MPX_DISABLE_MANAGEMENT = 0x2c - PR_MPX_ENABLE_MANAGEMENT = 0x2b - PR_PAC_APDAKEY = 0x4 - PR_PAC_APDBKEY = 0x8 - PR_PAC_APGAKEY = 0x10 - PR_PAC_APIAKEY = 0x1 - PR_PAC_APIBKEY = 0x2 - PR_PAC_RESET_KEYS = 0x36 - PR_SET_CHILD_SUBREAPER = 0x24 - PR_SET_DUMPABLE = 0x4 - PR_SET_ENDIAN = 0x14 - PR_SET_FPEMU = 0xa - PR_SET_FPEXC = 0xc - PR_SET_FP_MODE = 0x2d - PR_SET_KEEPCAPS = 0x8 - PR_SET_MM = 0x23 - PR_SET_MM_ARG_END = 0x9 - PR_SET_MM_ARG_START = 0x8 - PR_SET_MM_AUXV = 0xc - PR_SET_MM_BRK = 0x7 - PR_SET_MM_END_CODE = 0x2 - PR_SET_MM_END_DATA = 0x4 - PR_SET_MM_ENV_END = 0xb - PR_SET_MM_ENV_START = 0xa - PR_SET_MM_EXE_FILE = 0xd - PR_SET_MM_MAP = 0xe - PR_SET_MM_MAP_SIZE = 0xf - PR_SET_MM_START_BRK = 0x6 - PR_SET_MM_START_CODE = 0x1 - PR_SET_MM_START_DATA = 0x3 - PR_SET_MM_START_STACK = 0x5 - PR_SET_NAME = 0xf - PR_SET_NO_NEW_PRIVS = 0x26 - PR_SET_PDEATHSIG = 0x1 - PR_SET_PTRACER = 0x59616d61 - PR_SET_PTRACER_ANY = 0xffffffffffffffff - PR_SET_SECCOMP = 0x16 - PR_SET_SECUREBITS = 0x1c - PR_SET_SPECULATION_CTRL = 0x35 - PR_SET_THP_DISABLE = 0x29 - PR_SET_TIMERSLACK = 0x1d - PR_SET_TIMING = 0xe - PR_SET_TSC = 0x1a - PR_SET_UNALIGN = 0x6 - PR_SPEC_DISABLE = 0x4 - PR_SPEC_DISABLE_NOEXEC = 0x10 - PR_SPEC_ENABLE = 0x2 - PR_SPEC_FORCE_DISABLE = 0x8 - PR_SPEC_INDIRECT_BRANCH = 0x1 - PR_SPEC_NOT_AFFECTED = 0x0 - PR_SPEC_PRCTL = 0x1 - PR_SPEC_STORE_BYPASS = 0x0 - PR_SVE_GET_VL = 0x33 - PR_SVE_SET_VL = 0x32 - PR_SVE_SET_VL_ONEXEC = 0x40000 - PR_SVE_VL_INHERIT = 0x20000 - PR_SVE_VL_LEN_MASK = 0xffff - PR_TASK_PERF_EVENTS_DISABLE = 0x1f - PR_TASK_PERF_EVENTS_ENABLE = 0x20 - PR_TIMING_STATISTICAL = 0x0 - PR_TIMING_TIMESTAMP = 0x1 - PR_TSC_ENABLE = 0x1 - PR_TSC_SIGSEGV = 0x2 - PR_UNALIGN_NOPRINT = 0x1 - PR_UNALIGN_SIGBUS = 0x2 - PSTOREFS_MAGIC = 0x6165676c - PTRACE_ATTACH = 0x10 - PTRACE_CONT = 0x7 - PTRACE_DETACH = 0x11 - PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 - PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 - PTRACE_EVENT_CLONE = 0x3 - PTRACE_EVENT_EXEC = 0x4 - PTRACE_EVENT_EXIT = 0x6 - PTRACE_EVENT_FORK = 0x1 - PTRACE_EVENT_SECCOMP = 0x7 - PTRACE_EVENT_STOP = 0x80 - PTRACE_EVENT_VFORK = 0x2 - PTRACE_EVENT_VFORK_DONE = 0x5 - PTRACE_GETEVENTMSG = 0x4201 - PTRACE_GETFPAREGS = 0x14 - PTRACE_GETFPREGS = 0xe - PTRACE_GETFPREGS64 = 0x19 - PTRACE_GETREGS = 0xc - PTRACE_GETREGS64 = 0x16 - PTRACE_GETREGSET = 0x4204 - PTRACE_GETSIGINFO = 0x4202 - PTRACE_GETSIGMASK = 0x420a - PTRACE_GET_SYSCALL_INFO = 0x420e - PTRACE_INTERRUPT = 0x4207 - PTRACE_KILL = 0x8 - PTRACE_LISTEN = 0x4208 - PTRACE_O_EXITKILL = 0x100000 - PTRACE_O_MASK = 0x3000ff - PTRACE_O_SUSPEND_SECCOMP = 0x200000 - PTRACE_O_TRACECLONE = 0x8 - PTRACE_O_TRACEEXEC = 0x10 - PTRACE_O_TRACEEXIT = 0x40 - PTRACE_O_TRACEFORK = 0x2 - PTRACE_O_TRACESECCOMP = 0x80 - PTRACE_O_TRACESYSGOOD = 0x1 - PTRACE_O_TRACEVFORK = 0x4 - PTRACE_O_TRACEVFORKDONE = 0x20 - PTRACE_PEEKDATA = 0x2 - PTRACE_PEEKSIGINFO = 0x4209 - PTRACE_PEEKSIGINFO_SHARED = 0x1 - PTRACE_PEEKTEXT = 0x1 - PTRACE_PEEKUSR = 0x3 - PTRACE_POKEDATA = 0x5 - PTRACE_POKETEXT = 0x4 - PTRACE_POKEUSR = 0x6 - PTRACE_READDATA = 0x10 - PTRACE_READTEXT = 0x12 - PTRACE_SECCOMP_GET_FILTER = 0x420c - PTRACE_SECCOMP_GET_METADATA = 0x420d - PTRACE_SEIZE = 0x4206 - PTRACE_SETFPAREGS = 0x15 - PTRACE_SETFPREGS = 0xf - PTRACE_SETFPREGS64 = 0x1a - PTRACE_SETOPTIONS = 0x4200 - PTRACE_SETREGS = 0xd - PTRACE_SETREGS64 = 0x17 - PTRACE_SETREGSET = 0x4205 - PTRACE_SETSIGINFO = 0x4203 - PTRACE_SETSIGMASK = 0x420b - PTRACE_SINGLESTEP = 0x9 - PTRACE_SPARC_DETACH = 0xb - PTRACE_SYSCALL = 0x18 - PTRACE_SYSCALL_INFO_ENTRY = 0x1 - PTRACE_SYSCALL_INFO_EXIT = 0x2 - PTRACE_SYSCALL_INFO_NONE = 0x0 - PTRACE_SYSCALL_INFO_SECCOMP = 0x3 - PTRACE_TRACEME = 0x0 - PTRACE_WRITEDATA = 0x11 - PTRACE_WRITETEXT = 0x13 - PT_FP = 0x48 - PT_G0 = 0x10 - PT_G1 = 0x14 - PT_G2 = 0x18 - PT_G3 = 0x1c - PT_G4 = 0x20 - PT_G5 = 0x24 - PT_G6 = 0x28 - PT_G7 = 0x2c - PT_I0 = 0x30 - PT_I1 = 0x34 - PT_I2 = 0x38 - PT_I3 = 0x3c - PT_I4 = 0x40 - PT_I5 = 0x44 - PT_I6 = 0x48 - PT_I7 = 0x4c - PT_NPC = 0x8 - PT_PC = 0x4 - PT_PSR = 0x0 - PT_REGS_MAGIC = 0x57ac6c00 - PT_TNPC = 0x90 - PT_TPC = 0x88 - PT_TSTATE = 0x80 - PT_V9_FP = 0x70 - PT_V9_G0 = 0x0 - PT_V9_G1 = 0x8 - PT_V9_G2 = 0x10 - PT_V9_G3 = 0x18 - PT_V9_G4 = 0x20 - PT_V9_G5 = 0x28 - PT_V9_G6 = 0x30 - PT_V9_G7 = 0x38 - PT_V9_I0 = 0x40 - PT_V9_I1 = 0x48 - PT_V9_I2 = 0x50 - PT_V9_I3 = 0x58 - PT_V9_I4 = 0x60 - PT_V9_I5 = 0x68 - PT_V9_I6 = 0x70 - PT_V9_I7 = 0x78 - PT_V9_MAGIC = 0x9c - PT_V9_TNPC = 0x90 - PT_V9_TPC = 0x88 - PT_V9_TSTATE = 0x80 - PT_V9_Y = 0x98 - PT_WIM = 0x10 - PT_Y = 0xc - QNX4_SUPER_MAGIC = 0x2f - QNX6_SUPER_MAGIC = 0x68191122 - RAMFS_MAGIC = 0x858458f6 - RDTGROUP_SUPER_MAGIC = 0x7655821 - REISERFS_SUPER_MAGIC = 0x52654973 - RENAME_EXCHANGE = 0x2 - RENAME_NOREPLACE = 0x1 - RENAME_WHITEOUT = 0x4 - RLIMIT_AS = 0x9 - RLIMIT_CORE = 0x4 - RLIMIT_CPU = 0x0 - RLIMIT_DATA = 0x2 - RLIMIT_FSIZE = 0x1 - RLIMIT_LOCKS = 0xa - RLIMIT_MEMLOCK = 0x8 - RLIMIT_MSGQUEUE = 0xc - RLIMIT_NICE = 0xd - RLIMIT_NOFILE = 0x6 - RLIMIT_NPROC = 0x7 - RLIMIT_RSS = 0x5 - RLIMIT_RTPRIO = 0xe - RLIMIT_RTTIME = 0xf - RLIMIT_SIGPENDING = 0xb - RLIMIT_STACK = 0x3 - RLIM_INFINITY = 0xffffffffffffffff - RNDADDENTROPY = 0x80085203 - RNDADDTOENTCNT = 0x80045201 - RNDCLEARPOOL = 0x20005206 - RNDGETENTCNT = 0x40045200 - RNDGETPOOL = 0x40085202 - RNDRESEEDCRNG = 0x20005207 - RNDZAPENTCNT = 0x20005204 - RTAX_ADVMSS = 0x8 - RTAX_CC_ALGO = 0x10 - RTAX_CWND = 0x7 - RTAX_FASTOPEN_NO_COOKIE = 0x11 - RTAX_FEATURES = 0xc - RTAX_FEATURE_ALLFRAG = 0x8 - RTAX_FEATURE_ECN = 0x1 - RTAX_FEATURE_MASK = 0xf - RTAX_FEATURE_SACK = 0x2 - RTAX_FEATURE_TIMESTAMP = 0x4 - RTAX_HOPLIMIT = 0xa - RTAX_INITCWND = 0xb - RTAX_INITRWND = 0xe - RTAX_LOCK = 0x1 - RTAX_MAX = 0x11 - RTAX_MTU = 0x2 - RTAX_QUICKACK = 0xf - RTAX_REORDERING = 0x9 - RTAX_RTO_MIN = 0xd - RTAX_RTT = 0x4 - RTAX_RTTVAR = 0x5 - RTAX_SSTHRESH = 0x6 - RTAX_UNSPEC = 0x0 - RTAX_WINDOW = 0x3 - RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1e - RTCF_DIRECTSRC = 0x4000000 - RTCF_DOREDIRECT = 0x1000000 - RTCF_LOG = 0x2000000 - RTCF_MASQ = 0x400000 - RTCF_NAT = 0x800000 - RTCF_VALVE = 0x200000 - RTC_AF = 0x20 - RTC_AIE_OFF = 0x20007002 - RTC_AIE_ON = 0x20007001 - RTC_ALM_READ = 0x40247008 - RTC_ALM_SET = 0x80247007 - RTC_EPOCH_READ = 0x4008700d - RTC_EPOCH_SET = 0x8008700e - RTC_IRQF = 0x80 - RTC_IRQP_READ = 0x4008700b - RTC_IRQP_SET = 0x8008700c - RTC_MAX_FREQ = 0x2000 - RTC_PF = 0x40 - RTC_PIE_OFF = 0x20007006 - RTC_PIE_ON = 0x20007005 - RTC_PLL_GET = 0x40207011 - RTC_PLL_SET = 0x80207012 - RTC_RD_TIME = 0x40247009 - RTC_SET_TIME = 0x8024700a - RTC_UF = 0x10 - RTC_UIE_OFF = 0x20007004 - RTC_UIE_ON = 0x20007003 - RTC_VL_CLR = 0x20007014 - RTC_VL_READ = 0x40047013 - RTC_WIE_OFF = 0x20007010 - RTC_WIE_ON = 0x2000700f - RTC_WKALM_RD = 0x40287010 - RTC_WKALM_SET = 0x8028700f - RTF_ADDRCLASSMASK = 0xf8000000 - RTF_ADDRCONF = 0x40000 - RTF_ALLONLINK = 0x20000 - RTF_BROADCAST = 0x10000000 - RTF_CACHE = 0x1000000 - RTF_DEFAULT = 0x10000 - RTF_DYNAMIC = 0x10 - RTF_FLOW = 0x2000000 - RTF_GATEWAY = 0x2 - RTF_HOST = 0x4 - RTF_INTERFACE = 0x40000000 - RTF_IRTT = 0x100 - RTF_LINKRT = 0x100000 - RTF_LOCAL = 0x80000000 - RTF_MODIFIED = 0x20 - RTF_MSS = 0x40 - RTF_MTU = 0x40 - RTF_MULTICAST = 0x20000000 - RTF_NAT = 0x8000000 - RTF_NOFORWARD = 0x1000 - RTF_NONEXTHOP = 0x200000 - RTF_NOPMTUDISC = 0x4000 - RTF_POLICY = 0x4000000 - RTF_REINSTATE = 0x8 - RTF_REJECT = 0x200 - RTF_STATIC = 0x400 - RTF_THROW = 0x2000 - RTF_UP = 0x1 - RTF_WINDOW = 0x80 - RTF_XRESOLVE = 0x800 - RTM_BASE = 0x10 - RTM_DELACTION = 0x31 - RTM_DELADDR = 0x15 - RTM_DELADDRLABEL = 0x49 - RTM_DELCHAIN = 0x65 - RTM_DELLINK = 0x11 - RTM_DELMDB = 0x55 - RTM_DELNEIGH = 0x1d - RTM_DELNETCONF = 0x51 - RTM_DELNEXTHOP = 0x69 - RTM_DELNSID = 0x59 - RTM_DELQDISC = 0x25 - RTM_DELROUTE = 0x19 - RTM_DELRULE = 0x21 - RTM_DELTCLASS = 0x29 - RTM_DELTFILTER = 0x2d - RTM_F_CLONED = 0x200 - RTM_F_EQUALIZE = 0x400 - RTM_F_FIB_MATCH = 0x2000 - RTM_F_LOOKUP_TABLE = 0x1000 - RTM_F_NOTIFY = 0x100 - RTM_F_PREFIX = 0x800 - RTM_GETACTION = 0x32 - RTM_GETADDR = 0x16 - RTM_GETADDRLABEL = 0x4a - RTM_GETANYCAST = 0x3e - RTM_GETCHAIN = 0x66 - RTM_GETDCB = 0x4e - RTM_GETLINK = 0x12 - RTM_GETMDB = 0x56 - RTM_GETMULTICAST = 0x3a - RTM_GETNEIGH = 0x1e - RTM_GETNEIGHTBL = 0x42 - RTM_GETNETCONF = 0x52 - RTM_GETNEXTHOP = 0x6a - RTM_GETNSID = 0x5a - RTM_GETQDISC = 0x26 - RTM_GETROUTE = 0x1a - RTM_GETRULE = 0x22 - RTM_GETSTATS = 0x5e - RTM_GETTCLASS = 0x2a - RTM_GETTFILTER = 0x2e - RTM_MAX = 0x6b - RTM_NEWACTION = 0x30 - RTM_NEWADDR = 0x14 - RTM_NEWADDRLABEL = 0x48 - RTM_NEWCACHEREPORT = 0x60 - RTM_NEWCHAIN = 0x64 - RTM_NEWLINK = 0x10 - RTM_NEWMDB = 0x54 - RTM_NEWNDUSEROPT = 0x44 - RTM_NEWNEIGH = 0x1c - RTM_NEWNEIGHTBL = 0x40 - RTM_NEWNETCONF = 0x50 - RTM_NEWNEXTHOP = 0x68 - RTM_NEWNSID = 0x58 - RTM_NEWPREFIX = 0x34 - RTM_NEWQDISC = 0x24 - RTM_NEWROUTE = 0x18 - RTM_NEWRULE = 0x20 - RTM_NEWSTATS = 0x5c - RTM_NEWTCLASS = 0x28 - RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x17 - RTM_NR_MSGTYPES = 0x5c - RTM_SETDCB = 0x4f - RTM_SETLINK = 0x13 - RTM_SETNEIGHTBL = 0x43 - RTNH_ALIGNTO = 0x4 - RTNH_COMPARE_MASK = 0x19 - RTNH_F_DEAD = 0x1 - RTNH_F_LINKDOWN = 0x10 - RTNH_F_OFFLOAD = 0x8 - RTNH_F_ONLINK = 0x4 - RTNH_F_PERVASIVE = 0x2 - RTNH_F_UNRESOLVED = 0x20 - RTN_MAX = 0xb - RTPROT_BABEL = 0x2a - RTPROT_BGP = 0xba - RTPROT_BIRD = 0xc - RTPROT_BOOT = 0x3 - RTPROT_DHCP = 0x10 - RTPROT_DNROUTED = 0xd - RTPROT_EIGRP = 0xc0 - RTPROT_GATED = 0x8 - RTPROT_ISIS = 0xbb - RTPROT_KERNEL = 0x2 - RTPROT_MROUTED = 0x11 - RTPROT_MRT = 0xa - RTPROT_NTK = 0xf - RTPROT_OSPF = 0xbc - RTPROT_RA = 0x9 - RTPROT_REDIRECT = 0x1 - RTPROT_RIP = 0xbd - RTPROT_STATIC = 0x4 - RTPROT_UNSPEC = 0x0 - RTPROT_XORP = 0xe - RTPROT_ZEBRA = 0xb - RT_CLASS_DEFAULT = 0xfd - RT_CLASS_LOCAL = 0xff - RT_CLASS_MAIN = 0xfe - RT_CLASS_MAX = 0xff - RT_CLASS_UNSPEC = 0x0 - RUSAGE_CHILDREN = -0x1 - RUSAGE_SELF = 0x0 - RUSAGE_THREAD = 0x1 - SCM_CREDENTIALS = 0x2 - SCM_RIGHTS = 0x1 - SCM_TIMESTAMP = 0x1d - SCM_TIMESTAMPING = 0x23 - SCM_TIMESTAMPING_OPT_STATS = 0x38 - SCM_TIMESTAMPING_PKTINFO = 0x3c - SCM_TIMESTAMPNS = 0x21 - SCM_TXTIME = 0x3f - SCM_WIFI_STATUS = 0x25 - SC_LOG_FLUSH = 0x100000 - SECCOMP_MODE_DISABLED = 0x0 - SECCOMP_MODE_FILTER = 0x2 - SECCOMP_MODE_STRICT = 0x1 - SECURITYFS_MAGIC = 0x73636673 - SELINUX_MAGIC = 0xf97cff8c - SFD_CLOEXEC = 0x400000 - SFD_NONBLOCK = 0x4000 - SHUT_RD = 0x0 - SHUT_RDWR = 0x2 - SHUT_WR = 0x1 - SIOCADDDLCI = 0x8980 - SIOCADDMULTI = 0x8931 - SIOCADDRT = 0x890b - SIOCATMARK = 0x8905 - SIOCBONDCHANGEACTIVE = 0x8995 - SIOCBONDENSLAVE = 0x8990 - SIOCBONDINFOQUERY = 0x8994 - SIOCBONDRELEASE = 0x8991 - SIOCBONDSETHWADDR = 0x8992 - SIOCBONDSLAVEINFOQUERY = 0x8993 - SIOCBRADDBR = 0x89a0 - SIOCBRADDIF = 0x89a2 - SIOCBRDELBR = 0x89a1 - SIOCBRDELIF = 0x89a3 - SIOCDARP = 0x8953 - SIOCDELDLCI = 0x8981 - SIOCDELMULTI = 0x8932 - SIOCDELRT = 0x890c - SIOCDEVPRIVATE = 0x89f0 - SIOCDIFADDR = 0x8936 - SIOCDRARP = 0x8960 - SIOCETHTOOL = 0x8946 - SIOCGARP = 0x8954 - SIOCGETLINKNAME = 0x89e0 - SIOCGETNODEID = 0x89e1 - SIOCGHWTSTAMP = 0x89b1 - SIOCGIFADDR = 0x8915 - SIOCGIFBR = 0x8940 - SIOCGIFBRDADDR = 0x8919 - SIOCGIFCONF = 0x8912 - SIOCGIFCOUNT = 0x8938 - SIOCGIFDSTADDR = 0x8917 - SIOCGIFENCAP = 0x8925 - SIOCGIFFLAGS = 0x8913 - SIOCGIFHWADDR = 0x8927 - SIOCGIFINDEX = 0x8933 - SIOCGIFMAP = 0x8970 - SIOCGIFMEM = 0x891f - SIOCGIFMETRIC = 0x891d - SIOCGIFMTU = 0x8921 - SIOCGIFNAME = 0x8910 - SIOCGIFNETMASK = 0x891b - SIOCGIFPFLAGS = 0x8935 - SIOCGIFSLAVE = 0x8929 - SIOCGIFTXQLEN = 0x8942 - SIOCGIFVLAN = 0x8982 - SIOCGMIIPHY = 0x8947 - SIOCGMIIREG = 0x8948 - SIOCGPGRP = 0x8904 - SIOCGPPPCSTATS = 0x89f2 - SIOCGPPPSTATS = 0x89f0 - SIOCGPPPVER = 0x89f1 - SIOCGRARP = 0x8961 - SIOCGSKNS = 0x894c - SIOCGSTAMP = 0x8906 - SIOCGSTAMPNS = 0x8907 - SIOCGSTAMPNS_NEW = 0x40108907 - SIOCGSTAMPNS_OLD = 0x8907 - SIOCGSTAMP_NEW = 0x40108906 - SIOCGSTAMP_OLD = 0x8906 - SIOCINQ = 0x4004667f - SIOCOUTQ = 0x40047473 - SIOCOUTQNSD = 0x894b - SIOCPROTOPRIVATE = 0x89e0 - SIOCRTMSG = 0x890d - SIOCSARP = 0x8955 - SIOCSHWTSTAMP = 0x89b0 - SIOCSIFADDR = 0x8916 - SIOCSIFBR = 0x8941 - SIOCSIFBRDADDR = 0x891a - SIOCSIFDSTADDR = 0x8918 - SIOCSIFENCAP = 0x8926 - SIOCSIFFLAGS = 0x8914 - SIOCSIFHWADDR = 0x8924 - SIOCSIFHWBROADCAST = 0x8937 - SIOCSIFLINK = 0x8911 - SIOCSIFMAP = 0x8971 - SIOCSIFMEM = 0x8920 - SIOCSIFMETRIC = 0x891e - SIOCSIFMTU = 0x8922 - SIOCSIFNAME = 0x8923 - SIOCSIFNETMASK = 0x891c - SIOCSIFPFLAGS = 0x8934 - SIOCSIFSLAVE = 0x8930 - SIOCSIFTXQLEN = 0x8943 - SIOCSIFVLAN = 0x8983 - SIOCSMIIREG = 0x8949 - SIOCSPGRP = 0x8902 - SIOCSRARP = 0x8962 - SIOCWANDEV = 0x894a - SMACK_MAGIC = 0x43415d53 - SMART_AUTOSAVE = 0xd2 - SMART_AUTO_OFFLINE = 0xdb - SMART_DISABLE = 0xd9 - SMART_ENABLE = 0xd8 - SMART_HCYL_PASS = 0xc2 - SMART_IMMEDIATE_OFFLINE = 0xd4 - SMART_LCYL_PASS = 0x4f - SMART_READ_LOG_SECTOR = 0xd5 - SMART_READ_THRESHOLDS = 0xd1 - SMART_READ_VALUES = 0xd0 - SMART_SAVE = 0xd3 - SMART_STATUS = 0xda - SMART_WRITE_LOG_SECTOR = 0xd6 - SMART_WRITE_THRESHOLDS = 0xd7 - SMB_SUPER_MAGIC = 0x517b - SOCKFS_MAGIC = 0x534f434b - SOCK_CLOEXEC = 0x400000 - SOCK_DCCP = 0x6 - SOCK_DGRAM = 0x2 - SOCK_IOC_TYPE = 0x89 - SOCK_NONBLOCK = 0x4000 - SOCK_PACKET = 0xa - SOCK_RAW = 0x3 - SOCK_RDM = 0x4 - SOCK_SEQPACKET = 0x5 - SOCK_STREAM = 0x1 - SOL_AAL = 0x109 - SOL_ALG = 0x117 - SOL_ATM = 0x108 - SOL_CAIF = 0x116 - SOL_CAN_BASE = 0x64 - SOL_DCCP = 0x10d - SOL_DECNET = 0x105 - SOL_ICMPV6 = 0x3a - SOL_IP = 0x0 - SOL_IPV6 = 0x29 - SOL_IRDA = 0x10a - SOL_IUCV = 0x115 - SOL_KCM = 0x119 - SOL_LLC = 0x10c - SOL_NETBEUI = 0x10b - SOL_NETLINK = 0x10e - SOL_NFC = 0x118 - SOL_PACKET = 0x107 - SOL_PNPIPE = 0x113 - SOL_PPPOL2TP = 0x111 - SOL_RAW = 0xff - SOL_RDS = 0x114 - SOL_RXRPC = 0x110 - SOL_SOCKET = 0xffff - SOL_TCP = 0x6 - SOL_TIPC = 0x10f - SOL_TLS = 0x11a - SOL_X25 = 0x106 - SOL_XDP = 0x11b - SOMAXCONN = 0x80 - SO_ACCEPTCONN = 0x8000 - SO_ATTACH_BPF = 0x34 - SO_ATTACH_FILTER = 0x1a - SO_ATTACH_REUSEPORT_CBPF = 0x35 - SO_ATTACH_REUSEPORT_EBPF = 0x36 - SO_BINDTODEVICE = 0xd - SO_BINDTOIFINDEX = 0x41 - SO_BPF_EXTENSIONS = 0x32 - SO_BROADCAST = 0x20 - SO_BSDCOMPAT = 0x400 - SO_BUSY_POLL = 0x30 - SO_CNX_ADVICE = 0x37 - SO_COOKIE = 0x3b - SO_DEBUG = 0x1 - SO_DETACH_BPF = 0x1b - SO_DETACH_FILTER = 0x1b - SO_DETACH_REUSEPORT_BPF = 0x47 - SO_DOMAIN = 0x1029 - SO_DONTROUTE = 0x10 - SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 - SO_EE_CODE_TXTIME_MISSED = 0x2 - SO_EE_CODE_ZEROCOPY_COPIED = 0x1 - SO_EE_ORIGIN_ICMP = 0x2 - SO_EE_ORIGIN_ICMP6 = 0x3 - SO_EE_ORIGIN_LOCAL = 0x1 - SO_EE_ORIGIN_NONE = 0x0 - SO_EE_ORIGIN_TIMESTAMPING = 0x4 - SO_EE_ORIGIN_TXSTATUS = 0x4 - SO_EE_ORIGIN_TXTIME = 0x6 - SO_EE_ORIGIN_ZEROCOPY = 0x5 - SO_ERROR = 0x1007 - SO_GET_FILTER = 0x1a - SO_INCOMING_CPU = 0x33 - SO_INCOMING_NAPI_ID = 0x3a - SO_KEEPALIVE = 0x8 - SO_LINGER = 0x80 - SO_LOCK_FILTER = 0x28 - SO_MARK = 0x22 - SO_MAX_PACING_RATE = 0x31 - SO_MEMINFO = 0x39 - SO_NOFCS = 0x27 - SO_NO_CHECK = 0xb - SO_OOBINLINE = 0x100 - SO_PASSCRED = 0x2 - SO_PASSSEC = 0x1f - SO_PEEK_OFF = 0x26 - SO_PEERCRED = 0x40 - SO_PEERGROUPS = 0x3d - SO_PEERNAME = 0x1c - SO_PEERSEC = 0x1e - SO_PRIORITY = 0xc - SO_PROTOCOL = 0x1028 - SO_RCVBUF = 0x1002 - SO_RCVBUFFORCE = 0x100b - SO_RCVLOWAT = 0x800 - SO_RCVTIMEO = 0x2000 - SO_RCVTIMEO_NEW = 0x44 - SO_RCVTIMEO_OLD = 0x2000 - SO_REUSEADDR = 0x4 - SO_REUSEPORT = 0x200 - SO_RXQ_OVFL = 0x24 - SO_SECURITY_AUTHENTICATION = 0x5001 - SO_SECURITY_ENCRYPTION_NETWORK = 0x5004 - SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002 - SO_SELECT_ERR_QUEUE = 0x29 - SO_SNDBUF = 0x1001 - SO_SNDBUFFORCE = 0x100a - SO_SNDLOWAT = 0x1000 - SO_SNDTIMEO = 0x4000 - SO_SNDTIMEO_NEW = 0x45 - SO_SNDTIMEO_OLD = 0x4000 - SO_TIMESTAMP = 0x1d - SO_TIMESTAMPING = 0x23 - SO_TIMESTAMPING_NEW = 0x43 - SO_TIMESTAMPING_OLD = 0x23 - SO_TIMESTAMPNS = 0x21 - SO_TIMESTAMPNS_NEW = 0x42 - SO_TIMESTAMPNS_OLD = 0x21 - SO_TIMESTAMP_NEW = 0x46 - SO_TIMESTAMP_OLD = 0x1d - SO_TXTIME = 0x3f - SO_TYPE = 0x1008 - SO_VM_SOCKETS_BUFFER_MAX_SIZE = 0x2 - SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1 - SO_VM_SOCKETS_BUFFER_SIZE = 0x0 - SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6 - SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7 - SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3 - SO_VM_SOCKETS_TRUSTED = 0x5 - SO_WIFI_STATUS = 0x25 - SO_ZEROCOPY = 0x3e - SPLICE_F_GIFT = 0x8 - SPLICE_F_MORE = 0x4 - SPLICE_F_MOVE = 0x1 - SPLICE_F_NONBLOCK = 0x2 - SQUASHFS_MAGIC = 0x73717368 - STACK_END_MAGIC = 0x57ac6e9d - STATX_ALL = 0xfff - STATX_ATIME = 0x20 - STATX_ATTR_APPEND = 0x20 - STATX_ATTR_AUTOMOUNT = 0x1000 - STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_ENCRYPTED = 0x800 - STATX_ATTR_IMMUTABLE = 0x10 - STATX_ATTR_NODUMP = 0x40 - STATX_BASIC_STATS = 0x7ff - STATX_BLOCKS = 0x400 - STATX_BTIME = 0x800 - STATX_CTIME = 0x80 - STATX_GID = 0x10 - STATX_INO = 0x100 - STATX_MODE = 0x2 - STATX_MTIME = 0x40 - STATX_NLINK = 0x4 - STATX_SIZE = 0x200 - STATX_TYPE = 0x1 - STATX_UID = 0x8 - STATX__RESERVED = 0x80000000 - SYNC_FILE_RANGE_WAIT_AFTER = 0x4 - SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 - SYNC_FILE_RANGE_WRITE = 0x2 - SYNC_FILE_RANGE_WRITE_AND_WAIT = 0x7 - SYSFS_MAGIC = 0x62656572 - S_BLKSIZE = 0x200 - S_IEXEC = 0x40 - S_IFBLK = 0x6000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFIFO = 0x1000 - S_IFLNK = 0xa000 - S_IFMT = 0xf000 - S_IFREG = 0x8000 - S_IFSOCK = 0xc000 - S_IREAD = 0x100 - S_IRGRP = 0x20 - S_IROTH = 0x4 - S_IRUSR = 0x100 - S_IRWXG = 0x38 - S_IRWXO = 0x7 - S_IRWXU = 0x1c0 - S_ISGID = 0x400 - S_ISUID = 0x800 - S_ISVTX = 0x200 - S_IWGRP = 0x10 - S_IWOTH = 0x2 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXGRP = 0x8 - S_IXOTH = 0x1 - S_IXUSR = 0x40 - TAB0 = 0x0 - TAB1 = 0x800 - TAB2 = 0x1000 - TAB3 = 0x1800 - TABDLY = 0x1800 - TASKSTATS_CMD_ATTR_MAX = 0x4 - TASKSTATS_CMD_MAX = 0x2 - TASKSTATS_GENL_NAME = "TASKSTATS" - TASKSTATS_GENL_VERSION = 0x1 - TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0x9 - TCFLSH = 0x20005407 - TCGETA = 0x40125401 - TCGETS = 0x40245408 - TCGETS2 = 0x402c540c - TCIFLUSH = 0x0 - TCIOFF = 0x2 - TCIOFLUSH = 0x2 - TCION = 0x3 - TCOFLUSH = 0x1 - TCOOFF = 0x0 - TCOON = 0x1 - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - TCP_CC_INFO = 0x1a - TCP_CM_INQ = 0x24 - TCP_CONGESTION = 0xd - TCP_COOKIE_IN_ALWAYS = 0x1 - TCP_COOKIE_MAX = 0x10 - TCP_COOKIE_MIN = 0x8 - TCP_COOKIE_OUT_NEVER = 0x2 - TCP_COOKIE_PAIR_SIZE = 0x20 - TCP_COOKIE_TRANSACTIONS = 0xf - TCP_CORK = 0x3 - TCP_DEFER_ACCEPT = 0x9 - TCP_FASTOPEN = 0x17 - TCP_FASTOPEN_CONNECT = 0x1e - TCP_FASTOPEN_KEY = 0x21 - TCP_FASTOPEN_NO_COOKIE = 0x22 - TCP_INFO = 0xb - TCP_INQ = 0x24 - TCP_KEEPCNT = 0x6 - TCP_KEEPIDLE = 0x4 - TCP_KEEPINTVL = 0x5 - TCP_LINGER2 = 0x8 - TCP_MAXSEG = 0x2 - TCP_MAXWIN = 0xffff - TCP_MAX_WINSHIFT = 0xe - TCP_MD5SIG = 0xe - TCP_MD5SIG_EXT = 0x20 - TCP_MD5SIG_FLAG_PREFIX = 0x1 - TCP_MD5SIG_MAXKEYLEN = 0x50 - TCP_MSS = 0x200 - TCP_MSS_DEFAULT = 0x218 - TCP_MSS_DESIRED = 0x4c4 - TCP_NODELAY = 0x1 - TCP_NOTSENT_LOWAT = 0x19 - TCP_QUEUE_SEQ = 0x15 - TCP_QUICKACK = 0xc - TCP_REPAIR = 0x13 - TCP_REPAIR_OFF = 0x0 - TCP_REPAIR_OFF_NO_WP = -0x1 - TCP_REPAIR_ON = 0x1 - TCP_REPAIR_OPTIONS = 0x16 - TCP_REPAIR_QUEUE = 0x14 - TCP_REPAIR_WINDOW = 0x1d - TCP_SAVED_SYN = 0x1c - TCP_SAVE_SYN = 0x1b - TCP_SYNCNT = 0x7 - TCP_S_DATA_IN = 0x4 - TCP_S_DATA_OUT = 0x8 - TCP_THIN_DUPACK = 0x11 - TCP_THIN_LINEAR_TIMEOUTS = 0x10 - TCP_TIMESTAMP = 0x18 - TCP_ULP = 0x1f - TCP_USER_TIMEOUT = 0x12 - TCP_WINDOW_CLAMP = 0xa - TCP_ZEROCOPY_RECEIVE = 0x23 - TCSAFLUSH = 0x2 - TCSBRK = 0x20005405 - TCSBRKP = 0x5425 - TCSETA = 0x80125402 - TCSETAF = 0x80125404 - TCSETAW = 0x80125403 - TCSETS = 0x80245409 - TCSETS2 = 0x802c540d - TCSETSF = 0x8024540b - TCSETSF2 = 0x802c540f - TCSETSW = 0x8024540a - TCSETSW2 = 0x802c540e - TCXONC = 0x20005406 - TIMER_ABSTIME = 0x1 - TIOCCBRK = 0x2000747a - TIOCCONS = 0x20007424 - TIOCEXCL = 0x2000740d - TIOCGDEV = 0x40045432 - TIOCGETD = 0x40047400 - TIOCGEXCL = 0x40045440 - TIOCGICOUNT = 0x545d - TIOCGISO7816 = 0x40285443 - TIOCGLCKTRMIOS = 0x5456 - TIOCGPGRP = 0x40047483 - TIOCGPKT = 0x40045438 - TIOCGPTLCK = 0x40045439 - TIOCGPTN = 0x40047486 - TIOCGPTPEER = 0x20007489 - TIOCGRS485 = 0x40205441 - TIOCGSERIAL = 0x541e - TIOCGSID = 0x40047485 - TIOCGSOFTCAR = 0x40047464 - TIOCGWINSZ = 0x40087468 - TIOCINQ = 0x4004667f - TIOCLINUX = 0x541c - TIOCMBIC = 0x8004746b - TIOCMBIS = 0x8004746c - TIOCMGET = 0x4004746a - TIOCMIWAIT = 0x545c - TIOCMSET = 0x8004746d - TIOCM_CAR = 0x40 - TIOCM_CD = 0x40 - TIOCM_CTS = 0x20 - TIOCM_DSR = 0x100 - TIOCM_DTR = 0x2 - TIOCM_LE = 0x1 - TIOCM_RI = 0x80 - TIOCM_RNG = 0x80 - TIOCM_RTS = 0x4 - TIOCM_SR = 0x10 - TIOCM_ST = 0x8 - TIOCNOTTY = 0x20007471 - TIOCNXCL = 0x2000740e - TIOCOUTQ = 0x40047473 - TIOCPKT = 0x80047470 - TIOCPKT_DATA = 0x0 - TIOCPKT_DOSTOP = 0x20 - TIOCPKT_FLUSHREAD = 0x1 - TIOCPKT_FLUSHWRITE = 0x2 - TIOCPKT_IOCTL = 0x40 - TIOCPKT_NOSTOP = 0x10 - TIOCPKT_START = 0x8 - TIOCPKT_STOP = 0x4 - TIOCSBRK = 0x2000747b - TIOCSCTTY = 0x20007484 - TIOCSERCONFIG = 0x5453 - TIOCSERGETLSR = 0x5459 - TIOCSERGETMULTI = 0x545a - TIOCSERGSTRUCT = 0x5458 - TIOCSERGWILD = 0x5454 - TIOCSERSETMULTI = 0x545b - TIOCSERSWILD = 0x5455 - TIOCSETD = 0x80047401 - TIOCSIG = 0x80047488 - TIOCSISO7816 = 0xc0285444 - TIOCSLCKTRMIOS = 0x5457 - TIOCSPGRP = 0x80047482 - TIOCSPTLCK = 0x80047487 - TIOCSRS485 = 0xc0205442 - TIOCSSERIAL = 0x541f - TIOCSSOFTCAR = 0x80047465 - TIOCSTART = 0x2000746e - TIOCSTI = 0x80017472 - TIOCSTOP = 0x2000746f - TIOCSWINSZ = 0x80087467 - TIOCVHANGUP = 0x20005437 - TIPC_ADDR_ID = 0x3 - TIPC_ADDR_MCAST = 0x1 - TIPC_ADDR_NAME = 0x2 - TIPC_ADDR_NAMESEQ = 0x1 - TIPC_CFG_SRV = 0x0 - TIPC_CLUSTER_BITS = 0xc - TIPC_CLUSTER_MASK = 0xfff000 - TIPC_CLUSTER_OFFSET = 0xc - TIPC_CLUSTER_SIZE = 0xfff - TIPC_CONN_SHUTDOWN = 0x5 - TIPC_CONN_TIMEOUT = 0x82 - TIPC_CRITICAL_IMPORTANCE = 0x3 - TIPC_DESTNAME = 0x3 - TIPC_DEST_DROPPABLE = 0x81 - TIPC_ERRINFO = 0x1 - TIPC_ERR_NO_NAME = 0x1 - TIPC_ERR_NO_NODE = 0x3 - TIPC_ERR_NO_PORT = 0x2 - TIPC_ERR_OVERLOAD = 0x4 - TIPC_GROUP_JOIN = 0x87 - TIPC_GROUP_LEAVE = 0x88 - TIPC_GROUP_LOOPBACK = 0x1 - TIPC_GROUP_MEMBER_EVTS = 0x2 - TIPC_HIGH_IMPORTANCE = 0x2 - TIPC_IMPORTANCE = 0x7f - TIPC_LINK_STATE = 0x2 - TIPC_LOW_IMPORTANCE = 0x0 - TIPC_MAX_BEARER_NAME = 0x20 - TIPC_MAX_IF_NAME = 0x10 - TIPC_MAX_LINK_NAME = 0x44 - TIPC_MAX_MEDIA_NAME = 0x10 - TIPC_MAX_USER_MSG_SIZE = 0x101d0 - TIPC_MCAST_BROADCAST = 0x85 - TIPC_MCAST_REPLICAST = 0x86 - TIPC_MEDIUM_IMPORTANCE = 0x1 - TIPC_NODEID_LEN = 0x10 - TIPC_NODE_BITS = 0xc - TIPC_NODE_MASK = 0xfff - TIPC_NODE_OFFSET = 0x0 - TIPC_NODE_RECVQ_DEPTH = 0x83 - TIPC_NODE_SIZE = 0xfff - TIPC_NODE_STATE = 0x0 - TIPC_OK = 0x0 - TIPC_PUBLISHED = 0x1 - TIPC_RESERVED_TYPES = 0x40 - TIPC_RETDATA = 0x2 - TIPC_SERVICE_ADDR = 0x2 - TIPC_SERVICE_RANGE = 0x1 - TIPC_SOCKET_ADDR = 0x3 - TIPC_SOCK_RECVQ_DEPTH = 0x84 - TIPC_SOCK_RECVQ_USED = 0x89 - TIPC_SRC_DROPPABLE = 0x80 - TIPC_SUBSCR_TIMEOUT = 0x3 - TIPC_SUB_CANCEL = 0x4 - TIPC_SUB_PORTS = 0x1 - TIPC_SUB_SERVICE = 0x2 - TIPC_TOP_SRV = 0x1 - TIPC_WAIT_FOREVER = 0xffffffff - TIPC_WITHDRAWN = 0x2 - TIPC_ZONE_BITS = 0x8 - TIPC_ZONE_CLUSTER_MASK = 0xfffff000 - TIPC_ZONE_MASK = 0xff000000 - TIPC_ZONE_OFFSET = 0x18 - TIPC_ZONE_SCOPE = 0x1 - TIPC_ZONE_SIZE = 0xff - TMPFS_MAGIC = 0x1021994 - TOSTOP = 0x100 - TPACKET_ALIGNMENT = 0x10 - TPACKET_HDRLEN = 0x34 - TP_STATUS_AVAILABLE = 0x0 - TP_STATUS_BLK_TMO = 0x20 - TP_STATUS_COPY = 0x2 - TP_STATUS_CSUMNOTREADY = 0x8 - TP_STATUS_CSUM_VALID = 0x80 - TP_STATUS_KERNEL = 0x0 - TP_STATUS_LOSING = 0x4 - TP_STATUS_SENDING = 0x2 - TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = 0x80000000 - TP_STATUS_TS_SOFTWARE = 0x20000000 - TP_STATUS_TS_SYS_HARDWARE = 0x40000000 - TP_STATUS_USER = 0x1 - TP_STATUS_VLAN_TPID_VALID = 0x40 - TP_STATUS_VLAN_VALID = 0x10 - TP_STATUS_WRONG_FORMAT = 0x4 - TRACEFS_MAGIC = 0x74726163 - TS_COMM_LEN = 0x20 - TUNATTACHFILTER = 0x801054d5 - TUNDETACHFILTER = 0x801054d6 - TUNGETDEVNETNS = 0x200054e3 - TUNGETFEATURES = 0x400454cf - TUNGETFILTER = 0x401054db - TUNGETIFF = 0x400454d2 - TUNGETSNDBUF = 0x400454d3 - TUNGETVNETBE = 0x400454df - TUNGETVNETHDRSZ = 0x400454d7 - TUNGETVNETLE = 0x400454dd - TUNSETCARRIER = 0x800454e2 - TUNSETDEBUG = 0x800454c9 - TUNSETFILTEREBPF = 0x400454e1 - TUNSETGROUP = 0x800454ce - TUNSETIFF = 0x800454ca - TUNSETIFINDEX = 0x800454da - TUNSETLINK = 0x800454cd - TUNSETNOCSUM = 0x800454c8 - TUNSETOFFLOAD = 0x800454d0 - TUNSETOWNER = 0x800454cc - TUNSETPERSIST = 0x800454cb - TUNSETQUEUE = 0x800454d9 - TUNSETSNDBUF = 0x800454d4 - TUNSETSTEERINGEBPF = 0x400454e0 - TUNSETTXFILTER = 0x800454d1 - TUNSETVNETBE = 0x800454de - TUNSETVNETHDRSZ = 0x800454d8 - TUNSETVNETLE = 0x800454dc - UBI_IOCATT = 0x80186f40 - UBI_IOCDET = 0x80046f41 - UBI_IOCEBCH = 0x80044f02 - UBI_IOCEBER = 0x80044f01 - UBI_IOCEBISMAP = 0x40044f05 - UBI_IOCEBMAP = 0x80084f03 - UBI_IOCEBUNMAP = 0x80044f04 - UBI_IOCMKVOL = 0x80986f00 - UBI_IOCRMVOL = 0x80046f01 - UBI_IOCRNVOL = 0x91106f03 - UBI_IOCRPEB = 0x80046f04 - UBI_IOCRSVOL = 0x800c6f02 - UBI_IOCSETVOLPROP = 0x80104f06 - UBI_IOCSPEB = 0x80046f05 - UBI_IOCVOLCRBLK = 0x80804f07 - UBI_IOCVOLRMBLK = 0x20004f08 - UBI_IOCVOLUP = 0x80084f00 - UDF_SUPER_MAGIC = 0x15013346 - UMOUNT_NOFOLLOW = 0x8 - USBDEVICE_SUPER_MAGIC = 0x9fa2 - UTIME_NOW = 0x3fffffff - UTIME_OMIT = 0x3ffffffe - V9FS_MAGIC = 0x1021997 - VDISCARD = 0xd - VEOF = 0x4 - VEOL = 0xb - VEOL2 = 0x10 - VERASE = 0x2 - VINTR = 0x0 - VKILL = 0x3 - VLNEXT = 0xf - VMADDR_CID_ANY = 0xffffffff - VMADDR_CID_HOST = 0x2 - VMADDR_CID_HYPERVISOR = 0x0 - VMADDR_CID_RESERVED = 0x1 - VMADDR_PORT_ANY = 0xffffffff - VMIN = 0x6 - VM_SOCKETS_INVALID_VERSION = 0xffffffff - VQUIT = 0x1 - VREPRINT = 0xc - VSTART = 0x8 - VSTOP = 0x9 - VSUSP = 0xa - VSWTC = 0x7 - VT0 = 0x0 - VT1 = 0x4000 - VTDLY = 0x4000 - VTIME = 0x5 - VWERASE = 0xe - WALL = 0x40000000 - WCLONE = 0x80000000 - WCONTINUED = 0x8 - WDIOC_GETBOOTSTATUS = 0x40045702 - WDIOC_GETPRETIMEOUT = 0x40045709 - WDIOC_GETSTATUS = 0x40045701 - WDIOC_GETSUPPORT = 0x40285700 - WDIOC_GETTEMP = 0x40045703 - WDIOC_GETTIMELEFT = 0x4004570a - WDIOC_GETTIMEOUT = 0x40045707 - WDIOC_KEEPALIVE = 0x40045705 - WDIOC_SETOPTIONS = 0x40045704 - WDIOC_SETPRETIMEOUT = 0xc0045708 - WDIOC_SETTIMEOUT = 0xc0045706 - WEXITED = 0x4 - WIN_ACKMEDIACHANGE = 0xdb - WIN_CHECKPOWERMODE1 = 0xe5 - WIN_CHECKPOWERMODE2 = 0x98 - WIN_DEVICE_RESET = 0x8 - WIN_DIAGNOSE = 0x90 - WIN_DOORLOCK = 0xde - WIN_DOORUNLOCK = 0xdf - WIN_DOWNLOAD_MICROCODE = 0x92 - WIN_FLUSH_CACHE = 0xe7 - WIN_FLUSH_CACHE_EXT = 0xea - WIN_FORMAT = 0x50 - WIN_GETMEDIASTATUS = 0xda - WIN_IDENTIFY = 0xec - WIN_IDENTIFY_DMA = 0xee - WIN_IDLEIMMEDIATE = 0xe1 - WIN_INIT = 0x60 - WIN_MEDIAEJECT = 0xed - WIN_MULTREAD = 0xc4 - WIN_MULTREAD_EXT = 0x29 - WIN_MULTWRITE = 0xc5 - WIN_MULTWRITE_EXT = 0x39 - WIN_NOP = 0x0 - WIN_PACKETCMD = 0xa0 - WIN_PIDENTIFY = 0xa1 - WIN_POSTBOOT = 0xdc - WIN_PREBOOT = 0xdd - WIN_QUEUED_SERVICE = 0xa2 - WIN_READ = 0x20 - WIN_READDMA = 0xc8 - WIN_READDMA_EXT = 0x25 - WIN_READDMA_ONCE = 0xc9 - WIN_READDMA_QUEUED = 0xc7 - WIN_READDMA_QUEUED_EXT = 0x26 - WIN_READ_BUFFER = 0xe4 - WIN_READ_EXT = 0x24 - WIN_READ_LONG = 0x22 - WIN_READ_LONG_ONCE = 0x23 - WIN_READ_NATIVE_MAX = 0xf8 - WIN_READ_NATIVE_MAX_EXT = 0x27 - WIN_READ_ONCE = 0x21 - WIN_RECAL = 0x10 - WIN_RESTORE = 0x10 - WIN_SECURITY_DISABLE = 0xf6 - WIN_SECURITY_ERASE_PREPARE = 0xf3 - WIN_SECURITY_ERASE_UNIT = 0xf4 - WIN_SECURITY_FREEZE_LOCK = 0xf5 - WIN_SECURITY_SET_PASS = 0xf1 - WIN_SECURITY_UNLOCK = 0xf2 - WIN_SEEK = 0x70 - WIN_SETFEATURES = 0xef - WIN_SETIDLE1 = 0xe3 - WIN_SETIDLE2 = 0x97 - WIN_SETMULT = 0xc6 - WIN_SET_MAX = 0xf9 - WIN_SET_MAX_EXT = 0x37 - WIN_SLEEPNOW1 = 0xe6 - WIN_SLEEPNOW2 = 0x99 - WIN_SMART = 0xb0 - WIN_SPECIFY = 0x91 - WIN_SRST = 0x8 - WIN_STANDBY = 0xe2 - WIN_STANDBY2 = 0x96 - WIN_STANDBYNOW1 = 0xe0 - WIN_STANDBYNOW2 = 0x94 - WIN_VERIFY = 0x40 - WIN_VERIFY_EXT = 0x42 - WIN_VERIFY_ONCE = 0x41 - WIN_WRITE = 0x30 - WIN_WRITEDMA = 0xca - WIN_WRITEDMA_EXT = 0x35 - WIN_WRITEDMA_ONCE = 0xcb - WIN_WRITEDMA_QUEUED = 0xcc - WIN_WRITEDMA_QUEUED_EXT = 0x36 - WIN_WRITE_BUFFER = 0xe8 - WIN_WRITE_EXT = 0x34 - WIN_WRITE_LONG = 0x32 - WIN_WRITE_LONG_ONCE = 0x33 - WIN_WRITE_ONCE = 0x31 - WIN_WRITE_SAME = 0xe9 - WIN_WRITE_VERIFY = 0x3c - WNOHANG = 0x1 - WNOTHREAD = 0x20000000 - WNOWAIT = 0x1000000 - WORDSIZE = 0x40 - WSTOPPED = 0x2 - WUNTRACED = 0x2 - XATTR_CREATE = 0x1 - XATTR_REPLACE = 0x2 - XCASE = 0x4 - XDP_COPY = 0x2 - XDP_FLAGS_DRV_MODE = 0x4 - XDP_FLAGS_HW_MODE = 0x8 - XDP_FLAGS_MASK = 0xf - XDP_FLAGS_MODES = 0xe - XDP_FLAGS_SKB_MODE = 0x2 - XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 - XDP_MMAP_OFFSETS = 0x1 - XDP_OPTIONS = 0x8 - XDP_OPTIONS_ZEROCOPY = 0x1 - XDP_PACKET_HEADROOM = 0x100 - XDP_PGOFF_RX_RING = 0x0 - XDP_PGOFF_TX_RING = 0x80000000 - XDP_RX_RING = 0x2 - XDP_SHARED_UMEM = 0x1 - XDP_STATISTICS = 0x7 - XDP_TX_RING = 0x3 - XDP_UMEM_COMPLETION_RING = 0x6 - XDP_UMEM_FILL_RING = 0x5 - XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 - XDP_UMEM_PGOFF_FILL_RING = 0x100000000 - XDP_UMEM_REG = 0x4 - XDP_ZEROCOPY = 0x4 - XENFS_SUPER_MAGIC = 0xabba1974 - XFS_SUPER_MAGIC = 0x58465342 - XTABS = 0x1800 - Z3FOLD_MAGIC = 0x33 - ZSMALLOC_MAGIC = 0x58295829 - __TIOCFLUSH = 0x80047410 + ASI_LEON_DFLUSH = 0x11 + ASI_LEON_IFLUSH = 0x10 + ASI_LEON_MMUFLUSH = 0x18 + B1000000 = 0x1008 + B115200 = 0x1002 + B1152000 = 0x1009 + B1500000 = 0x100a + B2000000 = 0x100b + B230400 = 0x1003 + B2500000 = 0x100c + B3000000 = 0x100d + B3500000 = 0x100e + B4000000 = 0x100f + B460800 = 0x1004 + B500000 = 0x1005 + B57600 = 0x1001 + B576000 = 0x1006 + B921600 = 0x1007 + BLKBSZGET = 0x40081270 + BLKBSZSET = 0x80081271 + BLKFLSBUF = 0x20001261 + BLKFRAGET = 0x20001265 + BLKFRASET = 0x20001264 + BLKGETSIZE = 0x20001260 + BLKGETSIZE64 = 0x40081272 + BLKPBSZGET = 0x2000127b + BLKRAGET = 0x20001263 + BLKRASET = 0x20001262 + BLKROGET = 0x2000125e + BLKROSET = 0x2000125d + BLKRRPART = 0x2000125f + BLKSECTGET = 0x20001267 + BLKSECTSET = 0x20001266 + BLKSSZGET = 0x20001268 + BOTHER = 0x1000 + BS1 = 0x2000 + BSDLY = 0x2000 + CBAUD = 0x100f + CBAUDEX = 0x1000 + CIBAUD = 0x100f0000 + CLOCAL = 0x800 + CR1 = 0x200 + CR2 = 0x400 + CR3 = 0x600 + CRDLY = 0x600 + CREAD = 0x80 + CS6 = 0x10 + CS7 = 0x20 + CS8 = 0x30 + CSIZE = 0x30 + CSTOPB = 0x40 + ECHOCTL = 0x200 + ECHOE = 0x10 + ECHOK = 0x20 + ECHOKE = 0x800 + ECHONL = 0x40 + ECHOPRT = 0x400 + EFD_CLOEXEC = 0x400000 + EFD_NONBLOCK = 0x4000 + EMT_TAGOVF = 0x1 + EPOLL_CLOEXEC = 0x400000 + EXTPROC = 0x10000 + FF1 = 0x8000 + FFDLY = 0x8000 + FLUSHO = 0x1000 + FS_IOC_ENABLE_VERITY = 0x80806685 + FS_IOC_GETFLAGS = 0x40086601 + FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615 + FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614 + FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613 + F_GETLK = 0x7 + F_GETLK64 = 0x7 + F_GETOWN = 0x5 + F_RDLCK = 0x1 + F_SETLK = 0x8 + F_SETLK64 = 0x8 + F_SETLKW = 0x9 + F_SETLKW64 = 0x9 + F_SETOWN = 0x6 + F_UNLCK = 0x3 + F_WRLCK = 0x2 + HUPCL = 0x400 + ICANON = 0x2 + IEXTEN = 0x8000 + IN_CLOEXEC = 0x400000 + IN_NONBLOCK = 0x4000 + IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + ISIG = 0x1 + IUCLC = 0x200 + IXOFF = 0x1000 + IXON = 0x400 + MAP_ANON = 0x20 + MAP_ANONYMOUS = 0x20 + MAP_DENYWRITE = 0x800 + MAP_EXECUTABLE = 0x1000 + MAP_GROWSDOWN = 0x200 + MAP_HUGETLB = 0x40000 + MAP_LOCKED = 0x100 + MAP_NONBLOCK = 0x10000 + MAP_NORESERVE = 0x40 + MAP_POPULATE = 0x8000 + MAP_RENAME = 0x20 + MAP_STACK = 0x20000 + MAP_SYNC = 0x80000 + MCL_CURRENT = 0x2000 + MCL_FUTURE = 0x4000 + MCL_ONFAULT = 0x8000 + NFDBITS = 0x40 + NLDLY = 0x100 + NOFLSH = 0x80 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 + OLCUC = 0x2 + ONLCR = 0x4 + O_APPEND = 0x8 + O_ASYNC = 0x40 + O_CLOEXEC = 0x400000 + O_CREAT = 0x200 + O_DIRECT = 0x100000 + O_DIRECTORY = 0x10000 + O_DSYNC = 0x2000 + O_EXCL = 0x800 + O_FSYNC = 0x802000 + O_LARGEFILE = 0x0 + O_NDELAY = 0x4004 + O_NOATIME = 0x200000 + O_NOCTTY = 0x8000 + O_NOFOLLOW = 0x20000 + O_NONBLOCK = 0x4000 + O_PATH = 0x1000000 + O_RSYNC = 0x802000 + O_SYNC = 0x802000 + O_TMPFILE = 0x2010000 + O_TRUNC = 0x400 + PARENB = 0x100 + PARODD = 0x200 + PENDIN = 0x4000 + PERF_EVENT_IOC_DISABLE = 0x20002401 + PERF_EVENT_IOC_ENABLE = 0x20002400 + PERF_EVENT_IOC_ID = 0x40082407 + PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b + PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 + PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a + PERF_EVENT_IOC_REFRESH = 0x20002402 + PERF_EVENT_IOC_RESET = 0x20002403 + PERF_EVENT_IOC_SET_BPF = 0x80042408 + PERF_EVENT_IOC_SET_FILTER = 0x80082406 + PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PPPIOCATTACH = 0x8004743d + PPPIOCATTCHAN = 0x80047438 + PPPIOCCONNECT = 0x8004743a + PPPIOCDETACH = 0x8004743c + PPPIOCDISCONN = 0x20007439 + PPPIOCGASYNCMAP = 0x40047458 + PPPIOCGCHAN = 0x40047437 + PPPIOCGDEBUG = 0x40047441 + PPPIOCGFLAGS = 0x4004745a + PPPIOCGIDLE = 0x4010743f + PPPIOCGIDLE32 = 0x4008743f + PPPIOCGIDLE64 = 0x4010743f + PPPIOCGL2TPSTATS = 0x40487436 + PPPIOCGMRU = 0x40047453 + PPPIOCGRASYNCMAP = 0x40047455 + PPPIOCGUNIT = 0x40047456 + PPPIOCGXASYNCMAP = 0x40207450 + PPPIOCSACTIVE = 0x80107446 + PPPIOCSASYNCMAP = 0x80047457 + PPPIOCSCOMPRESS = 0x8010744d + PPPIOCSDEBUG = 0x80047440 + PPPIOCSFLAGS = 0x80047459 + PPPIOCSMAXCID = 0x80047451 + PPPIOCSMRRU = 0x8004743b + PPPIOCSMRU = 0x80047452 + PPPIOCSNPMODE = 0x8008744b + PPPIOCSPASS = 0x80107447 + PPPIOCSRASYNCMAP = 0x80047454 + PPPIOCSXASYNCMAP = 0x8020744f + PPPIOCXFERUNIT = 0x2000744e + PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_GETFPAREGS = 0x14 + PTRACE_GETFPREGS = 0xe + PTRACE_GETFPREGS64 = 0x19 + PTRACE_GETREGS64 = 0x16 + PTRACE_READDATA = 0x10 + PTRACE_READTEXT = 0x12 + PTRACE_SETFPAREGS = 0x15 + PTRACE_SETFPREGS = 0xf + PTRACE_SETFPREGS64 = 0x1a + PTRACE_SETREGS64 = 0x17 + PTRACE_SPARC_DETACH = 0xb + PTRACE_WRITEDATA = 0x11 + PTRACE_WRITETEXT = 0x13 + PT_FP = 0x48 + PT_G0 = 0x10 + PT_G1 = 0x14 + PT_G2 = 0x18 + PT_G3 = 0x1c + PT_G4 = 0x20 + PT_G5 = 0x24 + PT_G6 = 0x28 + PT_G7 = 0x2c + PT_I0 = 0x30 + PT_I1 = 0x34 + PT_I2 = 0x38 + PT_I3 = 0x3c + PT_I4 = 0x40 + PT_I5 = 0x44 + PT_I6 = 0x48 + PT_I7 = 0x4c + PT_NPC = 0x8 + PT_PC = 0x4 + PT_PSR = 0x0 + PT_REGS_MAGIC = 0x57ac6c00 + PT_TNPC = 0x90 + PT_TPC = 0x88 + PT_TSTATE = 0x80 + PT_V9_FP = 0x70 + PT_V9_G0 = 0x0 + PT_V9_G1 = 0x8 + PT_V9_G2 = 0x10 + PT_V9_G3 = 0x18 + PT_V9_G4 = 0x20 + PT_V9_G5 = 0x28 + PT_V9_G6 = 0x30 + PT_V9_G7 = 0x38 + PT_V9_I0 = 0x40 + PT_V9_I1 = 0x48 + PT_V9_I2 = 0x50 + PT_V9_I3 = 0x58 + PT_V9_I4 = 0x60 + PT_V9_I5 = 0x68 + PT_V9_I6 = 0x70 + PT_V9_I7 = 0x78 + PT_V9_MAGIC = 0x9c + PT_V9_TNPC = 0x90 + PT_V9_TPC = 0x88 + PT_V9_TSTATE = 0x80 + PT_V9_Y = 0x98 + PT_WIM = 0x10 + PT_Y = 0xc + RLIMIT_AS = 0x9 + RLIMIT_MEMLOCK = 0x8 + RLIMIT_NOFILE = 0x6 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 + RNDADDENTROPY = 0x80085203 + RNDADDTOENTCNT = 0x80045201 + RNDCLEARPOOL = 0x20005206 + RNDGETENTCNT = 0x40045200 + RNDGETPOOL = 0x40085202 + RNDRESEEDCRNG = 0x20005207 + RNDZAPENTCNT = 0x20005204 + RTC_AIE_OFF = 0x20007002 + RTC_AIE_ON = 0x20007001 + RTC_ALM_READ = 0x40247008 + RTC_ALM_SET = 0x80247007 + RTC_EPOCH_READ = 0x4008700d + RTC_EPOCH_SET = 0x8008700e + RTC_IRQP_READ = 0x4008700b + RTC_IRQP_SET = 0x8008700c + RTC_PIE_OFF = 0x20007006 + RTC_PIE_ON = 0x20007005 + RTC_PLL_GET = 0x40207011 + RTC_PLL_SET = 0x80207012 + RTC_RD_TIME = 0x40247009 + RTC_SET_TIME = 0x8024700a + RTC_UIE_OFF = 0x20007004 + RTC_UIE_ON = 0x20007003 + RTC_VL_CLR = 0x20007014 + RTC_VL_READ = 0x40047013 + RTC_WIE_OFF = 0x20007010 + RTC_WIE_ON = 0x2000700f + RTC_WKALM_RD = 0x40287010 + RTC_WKALM_SET = 0x8028700f + SCM_TIMESTAMPING = 0x23 + SCM_TIMESTAMPING_OPT_STATS = 0x38 + SCM_TIMESTAMPING_PKTINFO = 0x3c + SCM_TIMESTAMPNS = 0x21 + SCM_TXTIME = 0x3f + SCM_WIFI_STATUS = 0x25 + SFD_CLOEXEC = 0x400000 + SFD_NONBLOCK = 0x4000 + SIOCATMARK = 0x8905 + SIOCGPGRP = 0x8904 + SIOCGSTAMPNS_NEW = 0x40108907 + SIOCGSTAMP_NEW = 0x40108906 + SIOCINQ = 0x4004667f + SIOCOUTQ = 0x40047473 + SIOCSPGRP = 0x8902 + SOCK_CLOEXEC = 0x400000 + SOCK_DGRAM = 0x2 + SOCK_NONBLOCK = 0x4000 + SOCK_STREAM = 0x1 + SOL_SOCKET = 0xffff + SO_ACCEPTCONN = 0x8000 + SO_ATTACH_BPF = 0x34 + SO_ATTACH_REUSEPORT_CBPF = 0x35 + SO_ATTACH_REUSEPORT_EBPF = 0x36 + SO_BINDTODEVICE = 0xd + SO_BINDTOIFINDEX = 0x41 + SO_BPF_EXTENSIONS = 0x32 + SO_BROADCAST = 0x20 + SO_BSDCOMPAT = 0x400 + SO_BUSY_POLL = 0x30 + SO_CNX_ADVICE = 0x37 + SO_COOKIE = 0x3b + SO_DETACH_REUSEPORT_BPF = 0x47 + SO_DOMAIN = 0x1029 + SO_DONTROUTE = 0x10 + SO_ERROR = 0x1007 + SO_INCOMING_CPU = 0x33 + SO_INCOMING_NAPI_ID = 0x3a + SO_KEEPALIVE = 0x8 + SO_LINGER = 0x80 + SO_LOCK_FILTER = 0x28 + SO_MARK = 0x22 + SO_MAX_PACING_RATE = 0x31 + SO_MEMINFO = 0x39 + SO_NOFCS = 0x27 + SO_OOBINLINE = 0x100 + SO_PASSCRED = 0x2 + SO_PASSSEC = 0x1f + SO_PEEK_OFF = 0x26 + SO_PEERCRED = 0x40 + SO_PEERGROUPS = 0x3d + SO_PEERSEC = 0x1e + SO_PROTOCOL = 0x1028 + SO_RCVBUF = 0x1002 + SO_RCVBUFFORCE = 0x100b + SO_RCVLOWAT = 0x800 + SO_RCVTIMEO = 0x2000 + SO_RCVTIMEO_NEW = 0x44 + SO_RCVTIMEO_OLD = 0x2000 + SO_REUSEADDR = 0x4 + SO_REUSEPORT = 0x200 + SO_RXQ_OVFL = 0x24 + SO_SECURITY_AUTHENTICATION = 0x5001 + SO_SECURITY_ENCRYPTION_NETWORK = 0x5004 + SO_SECURITY_ENCRYPTION_TRANSPORT = 0x5002 + SO_SELECT_ERR_QUEUE = 0x29 + SO_SNDBUF = 0x1001 + SO_SNDBUFFORCE = 0x100a + SO_SNDLOWAT = 0x1000 + SO_SNDTIMEO = 0x4000 + SO_SNDTIMEO_NEW = 0x45 + SO_SNDTIMEO_OLD = 0x4000 + SO_TIMESTAMPING = 0x23 + SO_TIMESTAMPING_NEW = 0x43 + SO_TIMESTAMPING_OLD = 0x23 + SO_TIMESTAMPNS = 0x21 + SO_TIMESTAMPNS_NEW = 0x42 + SO_TIMESTAMPNS_OLD = 0x21 + SO_TIMESTAMP_NEW = 0x46 + SO_TXTIME = 0x3f + SO_TYPE = 0x1008 + SO_WIFI_STATUS = 0x25 + SO_ZEROCOPY = 0x3e + TAB1 = 0x800 + TAB2 = 0x1000 + TAB3 = 0x1800 + TABDLY = 0x1800 + TCFLSH = 0x20005407 + TCGETA = 0x40125401 + TCGETS = 0x40245408 + TCGETS2 = 0x402c540c + TCSAFLUSH = 0x2 + TCSBRK = 0x20005405 + TCSBRKP = 0x5425 + TCSETA = 0x80125402 + TCSETAF = 0x80125404 + TCSETAW = 0x80125403 + TCSETS = 0x80245409 + TCSETS2 = 0x802c540d + TCSETSF = 0x8024540b + TCSETSF2 = 0x802c540f + TCSETSW = 0x8024540a + TCSETSW2 = 0x802c540e + TCXONC = 0x20005406 + TFD_CLOEXEC = 0x400000 + TFD_NONBLOCK = 0x4000 + TIOCCBRK = 0x2000747a + TIOCCONS = 0x20007424 + TIOCEXCL = 0x2000740d + TIOCGDEV = 0x40045432 + TIOCGETD = 0x40047400 + TIOCGEXCL = 0x40045440 + TIOCGICOUNT = 0x545d + TIOCGISO7816 = 0x40285443 + TIOCGLCKTRMIOS = 0x5456 + TIOCGPGRP = 0x40047483 + TIOCGPKT = 0x40045438 + TIOCGPTLCK = 0x40045439 + TIOCGPTN = 0x40047486 + TIOCGPTPEER = 0x20007489 + TIOCGRS485 = 0x40205441 + TIOCGSERIAL = 0x541e + TIOCGSID = 0x40047485 + TIOCGSOFTCAR = 0x40047464 + TIOCGWINSZ = 0x40087468 + TIOCINQ = 0x4004667f + TIOCLINUX = 0x541c + TIOCMBIC = 0x8004746b + TIOCMBIS = 0x8004746c + TIOCMGET = 0x4004746a + TIOCMIWAIT = 0x545c + TIOCMSET = 0x8004746d + TIOCM_CAR = 0x40 + TIOCM_CD = 0x40 + TIOCM_CTS = 0x20 + TIOCM_DSR = 0x100 + TIOCM_RI = 0x80 + TIOCM_RNG = 0x80 + TIOCM_SR = 0x10 + TIOCM_ST = 0x8 + TIOCNOTTY = 0x20007471 + TIOCNXCL = 0x2000740e + TIOCOUTQ = 0x40047473 + TIOCPKT = 0x80047470 + TIOCSBRK = 0x2000747b + TIOCSCTTY = 0x20007484 + TIOCSERCONFIG = 0x5453 + TIOCSERGETLSR = 0x5459 + TIOCSERGETMULTI = 0x545a + TIOCSERGSTRUCT = 0x5458 + TIOCSERGWILD = 0x5454 + TIOCSERSETMULTI = 0x545b + TIOCSERSWILD = 0x5455 + TIOCSETD = 0x80047401 + TIOCSIG = 0x80047488 + TIOCSISO7816 = 0xc0285444 + TIOCSLCKTRMIOS = 0x5457 + TIOCSPGRP = 0x80047482 + TIOCSPTLCK = 0x80047487 + TIOCSRS485 = 0xc0205442 + TIOCSSERIAL = 0x541f + TIOCSSOFTCAR = 0x80047465 + TIOCSTART = 0x2000746e + TIOCSTI = 0x80017472 + TIOCSTOP = 0x2000746f + TIOCSWINSZ = 0x80087467 + TIOCVHANGUP = 0x20005437 + TOSTOP = 0x100 + TUNATTACHFILTER = 0x801054d5 + TUNDETACHFILTER = 0x801054d6 + TUNGETDEVNETNS = 0x200054e3 + TUNGETFEATURES = 0x400454cf + TUNGETFILTER = 0x401054db + TUNGETIFF = 0x400454d2 + TUNGETSNDBUF = 0x400454d3 + TUNGETVNETBE = 0x400454df + TUNGETVNETHDRSZ = 0x400454d7 + TUNGETVNETLE = 0x400454dd + TUNSETCARRIER = 0x800454e2 + TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 + TUNSETGROUP = 0x800454ce + TUNSETIFF = 0x800454ca + TUNSETIFINDEX = 0x800454da + TUNSETLINK = 0x800454cd + TUNSETNOCSUM = 0x800454c8 + TUNSETOFFLOAD = 0x800454d0 + TUNSETOWNER = 0x800454cc + TUNSETPERSIST = 0x800454cb + TUNSETQUEUE = 0x800454d9 + TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 + TUNSETTXFILTER = 0x800454d1 + TUNSETVNETBE = 0x800454de + TUNSETVNETHDRSZ = 0x800454d8 + TUNSETVNETLE = 0x800454dc + UBI_IOCATT = 0x80186f40 + UBI_IOCDET = 0x80046f41 + UBI_IOCEBCH = 0x80044f02 + UBI_IOCEBER = 0x80044f01 + UBI_IOCEBISMAP = 0x40044f05 + UBI_IOCEBMAP = 0x80084f03 + UBI_IOCEBUNMAP = 0x80044f04 + UBI_IOCMKVOL = 0x80986f00 + UBI_IOCRMVOL = 0x80046f01 + UBI_IOCRNVOL = 0x91106f03 + UBI_IOCRPEB = 0x80046f04 + UBI_IOCRSVOL = 0x800c6f02 + UBI_IOCSETVOLPROP = 0x80104f06 + UBI_IOCSPEB = 0x80046f05 + UBI_IOCVOLCRBLK = 0x80804f07 + UBI_IOCVOLRMBLK = 0x20004f08 + UBI_IOCVOLUP = 0x80084f00 + VDISCARD = 0xd + VEOF = 0x4 + VEOL = 0xb + VEOL2 = 0x10 + VMIN = 0x6 + VREPRINT = 0xc + VSTART = 0x8 + VSTOP = 0x9 + VSUSP = 0xa + VSWTC = 0x7 + VT1 = 0x4000 + VTDLY = 0x4000 + VTIME = 0x5 + VWERASE = 0xe + WDIOC_GETBOOTSTATUS = 0x40045702 + WDIOC_GETPRETIMEOUT = 0x40045709 + WDIOC_GETSTATUS = 0x40045701 + WDIOC_GETSUPPORT = 0x40285700 + WDIOC_GETTEMP = 0x40045703 + WDIOC_GETTIMELEFT = 0x4004570a + WDIOC_GETTIMEOUT = 0x40045707 + WDIOC_KEEPALIVE = 0x40045705 + WDIOC_SETOPTIONS = 0x40045704 + WORDSIZE = 0x40 + XCASE = 0x4 + XTABS = 0x1800 + __TIOCFLUSH = 0x80047410 ) // Errors const ( - E2BIG = syscall.Errno(0x7) - EACCES = syscall.Errno(0xd) EADDRINUSE = syscall.Errno(0x30) EADDRNOTAVAIL = syscall.Errno(0x31) EADV = syscall.Errno(0x53) EAFNOSUPPORT = syscall.Errno(0x2f) - EAGAIN = syscall.Errno(0xb) EALREADY = syscall.Errno(0x25) EBADE = syscall.Errno(0x66) - EBADF = syscall.Errno(0x9) EBADFD = syscall.Errno(0x5d) EBADMSG = syscall.Errno(0x4c) EBADR = syscall.Errno(0x67) EBADRQC = syscall.Errno(0x6a) EBADSLT = syscall.Errno(0x6b) EBFONT = syscall.Errno(0x6d) - EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x7f) - ECHILD = syscall.Errno(0xa) ECHRNG = syscall.Errno(0x5e) ECOMM = syscall.Errno(0x55) ECONNABORTED = syscall.Errno(0x35) @@ -2847,23 +553,15 @@ const ( EDEADLK = syscall.Errno(0x4e) EDEADLOCK = syscall.Errno(0x6c) EDESTADDRREQ = syscall.Errno(0x27) - EDOM = syscall.Errno(0x21) EDOTDOT = syscall.Errno(0x58) EDQUOT = syscall.Errno(0x45) - EEXIST = syscall.Errno(0x11) - EFAULT = syscall.Errno(0xe) - EFBIG = syscall.Errno(0x1b) EHOSTDOWN = syscall.Errno(0x40) EHOSTUNREACH = syscall.Errno(0x41) EHWPOISON = syscall.Errno(0x87) EIDRM = syscall.Errno(0x4d) EILSEQ = syscall.Errno(0x7a) EINPROGRESS = syscall.Errno(0x24) - EINTR = syscall.Errno(0x4) - EINVAL = syscall.Errno(0x16) - EIO = syscall.Errno(0x5) EISCONN = syscall.Errno(0x38) - EISDIR = syscall.Errno(0x15) EISNAM = syscall.Errno(0x78) EKEYEXPIRED = syscall.Errno(0x81) EKEYREJECTED = syscall.Errno(0x83) @@ -2880,8 +578,6 @@ const ( ELNRNG = syscall.Errno(0x62) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x7e) - EMFILE = syscall.Errno(0x18) - EMLINK = syscall.Errno(0x1f) EMSGSIZE = syscall.Errno(0x28) EMULTIHOP = syscall.Errno(0x57) ENAMETOOLONG = syscall.Errno(0x3f) @@ -2889,102 +585,70 @@ const ( ENETDOWN = syscall.Errno(0x32) ENETRESET = syscall.Errno(0x34) ENETUNREACH = syscall.Errno(0x33) - ENFILE = syscall.Errno(0x17) ENOANO = syscall.Errno(0x69) ENOBUFS = syscall.Errno(0x37) ENOCSI = syscall.Errno(0x64) ENODATA = syscall.Errno(0x6f) - ENODEV = syscall.Errno(0x13) - ENOENT = syscall.Errno(0x2) - ENOEXEC = syscall.Errno(0x8) ENOKEY = syscall.Errno(0x80) ENOLCK = syscall.Errno(0x4f) ENOLINK = syscall.Errno(0x52) ENOMEDIUM = syscall.Errno(0x7d) - ENOMEM = syscall.Errno(0xc) ENOMSG = syscall.Errno(0x4b) ENONET = syscall.Errno(0x50) ENOPKG = syscall.Errno(0x71) ENOPROTOOPT = syscall.Errno(0x2a) - ENOSPC = syscall.Errno(0x1c) ENOSR = syscall.Errno(0x4a) ENOSTR = syscall.Errno(0x48) ENOSYS = syscall.Errno(0x5a) - ENOTBLK = syscall.Errno(0xf) ENOTCONN = syscall.Errno(0x39) - ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) ENOTNAM = syscall.Errno(0x76) ENOTRECOVERABLE = syscall.Errno(0x85) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x2d) - ENOTTY = syscall.Errno(0x19) ENOTUNIQ = syscall.Errno(0x73) - ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x5c) EOWNERDEAD = syscall.Errno(0x84) - EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) - EPIPE = syscall.Errno(0x20) EPROCLIM = syscall.Errno(0x43) EPROTO = syscall.Errno(0x56) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) - ERANGE = syscall.Errno(0x22) EREMCHG = syscall.Errno(0x59) EREMOTE = syscall.Errno(0x47) EREMOTEIO = syscall.Errno(0x79) ERESTART = syscall.Errno(0x74) ERFKILL = syscall.Errno(0x86) - EROFS = syscall.Errno(0x1e) ERREMOTE = syscall.Errno(0x51) ESHUTDOWN = syscall.Errno(0x3a) ESOCKTNOSUPPORT = syscall.Errno(0x2c) - ESPIPE = syscall.Errno(0x1d) - ESRCH = syscall.Errno(0x3) ESRMNT = syscall.Errno(0x54) ESTALE = syscall.Errno(0x46) ESTRPIPE = syscall.Errno(0x5b) ETIME = syscall.Errno(0x49) ETIMEDOUT = syscall.Errno(0x3c) ETOOMANYREFS = syscall.Errno(0x3b) - ETXTBSY = syscall.Errno(0x1a) EUCLEAN = syscall.Errno(0x75) EUNATCH = syscall.Errno(0x63) EUSERS = syscall.Errno(0x44) - EWOULDBLOCK = syscall.Errno(0xb) - EXDEV = syscall.Errno(0x12) EXFULL = syscall.Errno(0x68) ) // Signals const ( - SIGABRT = syscall.Signal(0x6) - SIGALRM = syscall.Signal(0xe) SIGBUS = syscall.Signal(0xa) SIGCHLD = syscall.Signal(0x14) SIGCLD = syscall.Signal(0x14) SIGCONT = syscall.Signal(0x13) SIGEMT = syscall.Signal(0x7) - SIGFPE = syscall.Signal(0x8) - SIGHUP = syscall.Signal(0x1) - SIGILL = syscall.Signal(0x4) - SIGINT = syscall.Signal(0x2) SIGIO = syscall.Signal(0x17) - SIGIOT = syscall.Signal(0x6) - SIGKILL = syscall.Signal(0x9) SIGLOST = syscall.Signal(0x1d) - SIGPIPE = syscall.Signal(0xd) SIGPOLL = syscall.Signal(0x17) SIGPROF = syscall.Signal(0x1b) SIGPWR = syscall.Signal(0x1d) - SIGQUIT = syscall.Signal(0x3) - SIGSEGV = syscall.Signal(0xb) SIGSTOP = syscall.Signal(0x11) SIGSYS = syscall.Signal(0xc) - SIGTERM = syscall.Signal(0xf) - SIGTRAP = syscall.Signal(0x5) SIGTSTP = syscall.Signal(0x12) SIGTTIN = syscall.Signal(0x15) SIGTTOU = syscall.Signal(0x16) diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go similarity index 93% rename from vendor/golang.org/x/sys/unix/zptracearm_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go index faf23bbed..89c5920e0 100644 --- a/vendor/golang.org/x/sys/unix/zptracearm_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_armnn_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("arm", "arm64"). DO NOT EDIT. // +build linux // +build arm arm64 diff --git a/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go new file mode 100644 index 000000000..6cb6d688a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zptrace_linux_arm64.go @@ -0,0 +1,17 @@ +// Code generated by linux/mkall.go generatePtraceRegSet("arm64"). DO NOT EDIT. + +package unix + +import "unsafe" + +// PtraceGetRegSetArm64 fetches the registers used by arm64 binaries. +func PtraceGetRegSetArm64(pid, addr int, regsout *PtraceRegsArm64) error { + iovec := Iovec{(*byte)(unsafe.Pointer(regsout)), uint64(unsafe.Sizeof(*regsout))} + return ptrace(PTRACE_GETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) +} + +// PtraceSetRegSetArm64 sets the registers used by arm64 binaries. +func PtraceSetRegSetArm64(pid, addr int, regs *PtraceRegsArm64) error { + iovec := Iovec{(*byte)(unsafe.Pointer(regs)), uint64(unsafe.Sizeof(*regs))} + return ptrace(PTRACE_SETREGSET, pid, uintptr(addr), uintptr(unsafe.Pointer(&iovec))) +} diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go similarity index 93% rename from vendor/golang.org/x/sys/unix/zptracemips_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go index c431131e6..24b841eec 100644 --- a/vendor/golang.org/x/sys/unix/zptracemips_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnn_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("mips", "mips64"). DO NOT EDIT. // +build linux // +build mips mips64 diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go similarity index 93% rename from vendor/golang.org/x/sys/unix/zptracemipsle_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go index dc3d6d373..47b048956 100644 --- a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_mipsnnle_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("mipsle", "mips64le"). DO NOT EDIT. // +build linux // +build mipsle mips64le diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go similarity index 95% rename from vendor/golang.org/x/sys/unix/zptrace386_linux.go rename to vendor/golang.org/x/sys/unix/zptrace_x86_linux.go index 2d21c49e1..ea5d9cb53 100644 --- a/vendor/golang.org/x/sys/unix/zptrace386_linux.go +++ b/vendor/golang.org/x/sys/unix/zptrace_x86_linux.go @@ -1,4 +1,4 @@ -// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT. +// Code generated by linux/mkall.go generatePtracePair("386", "amd64"). DO NOT EDIT. // +build linux // +build 386 amd64 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index b42c1cbad..23e94d366 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -547,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -950,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1683,22 +1709,6 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 603c9f6eb..e2ffb3bed 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -757,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -1355,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2321,27 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index ece6f67c2..6836a4129 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 @@ -262,8 +264,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index 38b7cbab3..102561730 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -547,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -950,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1683,22 +1709,6 @@ func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -1709,18 +1719,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index fda478e8b..c67e336e2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -757,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -1355,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2321,27 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { @@ -2357,23 +2372,6 @@ func libc_ptrace_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat64_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 7c4d59016..a3fdf099d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 @@ -262,8 +264,6 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index abb69183a..d34e6df2f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -547,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -950,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1666,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 163b3912d..b759757a7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -757,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall9(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(offset>>32), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags), 0, 0) if e1 != 0 { @@ -1355,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2321,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int32(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 5bebb1bbd..b67f518fa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index b75c11d41..8d39a09f7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -527,6 +516,17 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -547,6 +547,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := Syscall6(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -950,6 +966,16 @@ func Getsid(pid int) (sid int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) @@ -1666,18 +1692,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 7c5bd510e..b28861260 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -339,22 +339,6 @@ func libc_futimes_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_fcntl_trampoline() - -//go:linkname libc_fcntl libc_fcntl -//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_poll_trampoline), uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -727,6 +711,22 @@ func libc_setattrlist_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (val int, err error) { + r0, _, e1 := syscall_syscall(funcPC(libc_fcntl_trampoline), uintptr(fd), uintptr(cmd), uintptr(arg)) + val = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fcntl_trampoline() + +//go:linkname libc_fcntl libc_fcntl +//go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_kill_trampoline), uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -757,6 +757,27 @@ func libc_ioctl_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc_sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_sysctl_trampoline() + +//go:linkname libc_sysctl libc_sysctl +//go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_sendfile_trampoline), uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { @@ -1355,6 +1376,21 @@ func libc_getsid_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Gettimeofday(tp *Timeval) (err error) { + _, _, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_gettimeofday_trampoline() + +//go:linkname libc_gettimeofday libc_gettimeofday +//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getuid() (uid int) { r0, _, _ := syscall_rawSyscall(funcPC(libc_getuid_trampoline), 0, 0, 0) uid = int(r0) @@ -2321,23 +2357,6 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { - r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) - sec = int64(r0) - usec = int32(r1) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_gettimeofday_trampoline() - -//go:linkname libc_gettimeofday libc_gettimeofday -//go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_fstat_trampoline), uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 96ab9877e..40cce1bb2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -44,8 +44,6 @@ TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) -TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 - JMP libc_fcntl(SB) TEXT ·libc_poll_trampoline(SB),NOSPLIT,$0-0 JMP libc_poll(SB) TEXT ·libc_madvise_trampoline(SB),NOSPLIT,$0-0 @@ -84,10 +82,14 @@ TEXT ·libc_flistxattr_trampoline(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) TEXT ·libc_setattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_setattrlist(SB) +TEXT ·libc_fcntl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) TEXT ·libc_kill_trampoline(SB),NOSPLIT,$0-0 JMP libc_kill(SB) TEXT ·libc_ioctl_trampoline(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) +TEXT ·libc_sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc_sysctl(SB) TEXT ·libc_sendfile_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) TEXT ·libc_access_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index df199b345..fe1fdd78d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -255,17 +255,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index e68185f1e..600f1d26d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -255,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -387,8 +360,15 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -397,15 +377,24 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getcwd(buf []byte) (n int, err error) { +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) } else { _p0 = unsafe.Pointer(&_zero) } - r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +403,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1363,7 +1352,7 @@ func mknodat_freebsd12(fd int, path string, mode uint32, dev uint64) (err error) if err != nil { return } - _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + _, _, e1 := Syscall6(SYS_MKNODAT_FREEBSD12, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), uintptr(dev>>32), 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2f77f93c4..064934b0d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,22 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -414,6 +387,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index e9a12c9d9..31d2c4616 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 27ab0fbda..4adaaa561 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags freebsd,arm64 -- syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go +// go run mksyscall.go -tags freebsd,arm64 syscall_bsd.go syscall_freebsd.go syscall_freebsd_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build freebsd,arm64 @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,22 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -414,6 +387,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data int) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go new file mode 100644 index 000000000..92efa1da3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -0,0 +1,87 @@ +// go run mksyscall_solaris.go -illumos -tags illumos,amd64 syscall_illumos.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build illumos,amd64 + +package unix + +import ( + "unsafe" +) + +//go:cgo_import_dynamic libc_readv readv "libc.so" +//go:cgo_import_dynamic libc_preadv preadv "libc.so" +//go:cgo_import_dynamic libc_writev writev "libc.so" +//go:cgo_import_dynamic libc_pwritev pwritev "libc.so" + +//go:linkname procreadv libc_readv +//go:linkname procpreadv libc_preadv +//go:linkname procwritev libc_writev +//go:linkname procpwritev libc_pwritev + +var ( + procreadv, + procpreadv, + procwritev, + procpwritev syscallFunc +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { + var _p0 *Iovec + if len(iovs) > 0 { + _p0 = &iovs[0] + } + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) + n = int(r0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go new file mode 100644 index 000000000..df217825f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -0,0 +1,1856 @@ +// Code generated by mkmerge.go; DO NOT EDIT. + +// +build linux + +package unix + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { + r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fchmodat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(buf) > 0 { + _p1 = unsafe.Pointer(&buf[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unlinkat(dirfd int, path string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getcwd(buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { + r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) + wpid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlJoin(cmd int, arg2 string) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg2) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg3) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(arg4) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { + var _p0 unsafe.Pointer + if len(payload) > 0 { + _p0 = unsafe.Pointer(&payload[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) + ret = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(arg) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(source) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(target) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(fstype) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Acct(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(payload) > 0 { + _p2 = unsafe.Pointer(&payload[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Adjtimex(buf *Timex) (state int, err error) { + r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) + state = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := RawSyscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { + _, _, e1 := RawSyscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chdir(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Chroot(path string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGetres(clockid int32, res *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { + _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Close(fd int) (err error) { + _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func DeleteModule(name string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup(oldfd int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Dup3(oldfd int, newfd int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCreate1(flag int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { + _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Eventfd(initval uint, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Exit(code int) { + SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchdir(fd int) (err error) { + _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmod(fd int, mode uint32) (err error) { + _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fdatasync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func FinitModule(fd int, params string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flistxattr(fd int, dest []byte) (sz int, err error) { + var _p0 unsafe.Pointer + if len(dest) > 0 { + _p0 = unsafe.Pointer(&dest[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Flock(fd int, how int) (err error) { + _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fremovexattr(fd int, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fsync(fd int) (err error) { + _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getdents(fd int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpgid(pid int) (pgid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) + pgid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpid() (pid int) { + r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getppid() (ppid int) { + r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) + ppid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getpriority(which int, who int) (prio int, err error) { + r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) + prio = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrandom(buf []byte, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getrusage(who int, rusage *Rusage) (err error) { + _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Gettid() (tid int) { + r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) + tid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InitModule(moduleImage []byte, params string) (err error) { + var _p0 unsafe.Pointer + if len(moduleImage) > 0 { + _p0 = unsafe.Pointer(&moduleImage[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + var _p1 *byte + _p1, err = BytePtrFromString(params) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) + watchdesc = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyInit1(flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { + r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) + success = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Kill(pid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Klogctl(typ int, buf []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Listxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Llistxattr(path string, dest []byte) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lremovexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func MemfdCreate(name string, flags int) (fd int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(name) + if err != nil { + return + } + r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mkdirat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Nanosleep(time *Timespec, leftover *Timespec) (err error) { + _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func PivotRoot(newroot string, putold string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(newroot) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(putold) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { + r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func read(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Removexattr(path string, attr string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(oldpath) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(newpath) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(description) + if err != nil { + return + } + var _p2 *byte + _p2, err = BytePtrFromString(callback) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) + id = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setdomainname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sethostname(p []byte) (err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpgid(pid int, pgid int) (err error) { + _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setsid() (pid int, err error) { + r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) + pid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Settimeofday(tv *Timeval) (err error) { + _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setns(fd int, nstype int) (err error) { + _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setpriority(which int, who int, prio int) (err error) { + _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { + r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) + newfd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sync() { + SyscallNoError(SYS_SYNC, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Syncfs(fd int) (err error) { + _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Sysinfo(info *Sysinfo_t) (err error) { + _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdCreate(clockid int, flags int) (fd int, err error) { + r0, _, e1 := RawSyscall(SYS_TIMERFD_CREATE, uintptr(clockid), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdGettime(fd int, currValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall(SYS_TIMERFD_GETTIME, uintptr(fd), uintptr(unsafe.Pointer(currValue)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func TimerfdSettime(fd int, flags int, newValue *ItimerSpec, oldValue *ItimerSpec) (err error) { + _, _, e1 := RawSyscall6(SYS_TIMERFD_SETTIME, uintptr(fd), uintptr(flags), uintptr(unsafe.Pointer(newValue)), uintptr(unsafe.Pointer(oldValue)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { + _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Times(tms *Tms) (ticks uintptr, err error) { + r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) + ticks = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Umask(mask int) (oldmask int) { + r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) + oldmask = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Uname(buf *Utsname) (err error) { + _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unmount(target string, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(target) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Unshare(flags int) (err error) { + _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func write(fd int, p []byte) (n int, err error) { + var _p0 unsafe.Pointer + if len(p) > 0 { + _p0 = unsafe.Pointer(&p[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func exitThread(code int) (err error) { + _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readlen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writelen(fd int, p *byte, np int) (n int, err error) { + r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readv(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_READV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall(SYS_WRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PREADV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev2(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr, flags int) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovs) > 0 { + _p0 = unsafe.Pointer(&iovs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_PWRITEV2, uintptr(fd), uintptr(_p0), uintptr(len(iovs)), uintptr(offs_l), uintptr(offs_h), uintptr(flags)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func munmap(addr uintptr, length uintptr) (err error) { + _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Madvise(b []byte, advice int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mprotect(b []byte, prot int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Mlockall(flags int) (err error) { + _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Msync(b []byte, flags int) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlock(b []byte) (err error) { + var _p0 unsafe.Pointer + if len(b) > 0 { + _p0 = unsafe.Pointer(&b[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Munlockall() (err error) { + _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func faccessat(dirfd int, path string, mode uint32) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(pathname) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { + r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index fe5d462e4..19ebd3ff7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1495,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1769,17 +55,7 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2030,8 +306,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2040,8 +317,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 536abcea3..5c562182a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2046,8 +332,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2056,8 +343,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2432,16 +720,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 37823cd6b..dc69d99c6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1495,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1769,16 +55,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) @@ -1958,7 +234,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2166,8 +442,9 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID32, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2176,8 +453,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID32, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 794f61264..1b897dee0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1865,7 +151,7 @@ func Getgid() (gid int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrlimit(resource int, rlim *Rlimit) (err error) { +func getrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) @@ -1969,8 +255,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1979,8 +266,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2019,7 +307,7 @@ func Setresuid(ruid int, euid int, suid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setrlimit(resource int, rlim *Rlimit) (err error) { +func setrlimit(resource int, rlim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0) if e1 != 0 { err = errnoErr(e1) @@ -2300,16 +588,6 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(cmdline) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 1b34b550c..49186843a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask>>32), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off>>32), uintptr(off), uintptr(len>>32), uintptr(len)) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1960,8 +246,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1970,8 +257,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2417,16 +705,6 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (p1 int, p2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) p1 = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 5714e2592..9171d3bd2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1990,8 +276,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2000,8 +287,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2376,16 +664,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func fstat(fd int, st *stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 88a6b3362..82286f04f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1990,8 +276,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2000,8 +287,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2376,16 +664,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func fstat(fd int, st *stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(st)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index c09dbe345..15920621c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(mask>>32), uintptr(dirFd), uintptr(unsafe.Pointer(pathname))) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(off>>32), uintptr(len), uintptr(len>>32)) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -1960,8 +246,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1970,8 +257,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2417,16 +705,6 @@ func Pause() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (p1 int, p2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) p1 = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 42f6c2103..73a42e2cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2072,8 +358,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2082,8 +369,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2484,16 +772,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index de2cd8db9..6b8559536 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2072,8 +358,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2082,8 +369,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2484,16 +772,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index d51bf07fc..b76133447 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1949,8 +235,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1959,8 +246,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2280,16 +568,6 @@ func Gettimeofday(tv *Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func kexecFileLoad(kernelFd int, initrdFd int, cmdlineLen int, cmdline string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(cmdline) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 1e3a3cb73..d7032ab1e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,271 +45,7 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2042,8 +328,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2052,8 +339,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2264,16 +552,6 @@ func utimes(path string, times *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 3c97008cd..bcbbdd906 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -14,1458 +14,8 @@ var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func FanotifyInit(flags uint, event_f_flags uint) (fd int, err error) { - r0, _, e1 := Syscall(SYS_FANOTIFY_INIT, uintptr(flags), uintptr(event_f_flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { - _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fchmodat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(buf) > 0 { - _p1 = unsafe.Pointer(&buf[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unlinkat(dirfd int, path string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getcwd(buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { - r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) - wpid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlInt(cmd int, arg2 int, arg3 int, arg4 int, arg5 int) (ret int, err error) { - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func KeyctlBuffer(cmd int, arg2 int, buf []byte, arg5 int) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(buf)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlJoin(cmd int, arg2 string) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg2) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlSearch(cmd int, arg2 int, arg3 string, arg4 string, arg5 int) (ret int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg3) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(arg4) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(arg5), 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlIOV(cmd int, arg2 int, payload []Iovec, arg5 int) (err error) { - var _p0 unsafe.Pointer - if len(payload) > 0 { - _p0 = unsafe.Pointer(&payload[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(_p0), uintptr(len(payload)), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(unsafe.Pointer(arg2)), uintptr(_p0), uintptr(len(buf)), 0, 0) - ret = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(restriction) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { - _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(arg) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(source) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(target) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(fstype) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Acct(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func AddKey(keyType string, description string, payload []byte, ringid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(payload) > 0 { - _p2 = unsafe.Pointer(&payload[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_ADD_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(payload)), uintptr(ringid), 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Adjtimex(buf *Timex) (state int, err error) { - r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0) - state = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capget(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPGET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Capset(hdr *CapUserHeader, data *CapUserData) (err error) { - _, _, e1 := Syscall(SYS_CAPSET, uintptr(unsafe.Pointer(hdr)), uintptr(unsafe.Pointer(data)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chdir(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Chroot(path string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGetres(clockid int32, res *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETRES, uintptr(clockid), uintptr(unsafe.Pointer(res)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockGettime(clockid int32, time *Timespec) (err error) { - _, _, e1 := Syscall(SYS_CLOCK_GETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) { - _, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Close(fd int) (err error) { - _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func CopyFileRange(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { - r0, _, e1 := Syscall6(SYS_COPY_FILE_RANGE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func DeleteModule(name string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_DELETE_MODULE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup(oldfd int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Dup3(oldfd int, newfd int, flags int) (err error) { - _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCreate1(flag int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) { - _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Eventfd(initval uint, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Exit(code int) { - SyscallNoError(SYS_EXIT_GROUP, uintptr(code), 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { - _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchdir(fd int) (err error) { - _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchmod(fd int, mode uint32) (err error) { - _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fdatasync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fgetxattr(fd int, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_FGETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func FinitModule(fd int, params string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FINIT_MODULE, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flistxattr(fd int, dest []byte) (sz int, err error) { - var _p0 unsafe.Pointer - if len(dest) > 0 { - _p0 = unsafe.Pointer(&dest[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_FLISTXATTR, uintptr(fd), uintptr(_p0), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Flock(fd int, how int) (err error) { - _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fremovexattr(fd int, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FREMOVEXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsetxattr(fd int, attr string, dest []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_FSETXATTR, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Fsync(fd int) (err error) { - _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getdents(fd int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETDENTS64, uintptr(fd), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpgid(pid int) (pgid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) - pgid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpid() (pid int) { - r0, _ := RawSyscallNoError(SYS_GETPID, 0, 0, 0) - pid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getppid() (ppid int) { - r0, _ := RawSyscallNoError(SYS_GETPPID, 0, 0, 0) - ppid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getpriority(which int, who int) (prio int, err error) { - r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) - prio = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getrusage(who int, rusage *Rusage) (err error) { - _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getsid(pid int) (sid int, err error) { - r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) - sid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Gettid() (tid int) { - r0, _ := RawSyscallNoError(SYS_GETTID, 0, 0, 0) - tid = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Getxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InitModule(moduleImage []byte, params string) (err error) { - var _p0 unsafe.Pointer - if len(moduleImage) > 0 { - _p0 = unsafe.Pointer(&moduleImage[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - var _p1 *byte - _p1, err = BytePtrFromString(params) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_INIT_MODULE, uintptr(_p0), uintptr(len(moduleImage)), uintptr(unsafe.Pointer(_p1))) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask)) - watchdesc = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyInit1(flags int) (fd int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) { - r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0) - success = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Kill(pid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Klogctl(typ int, buf []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lgetxattr(path string, attr string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(dest) > 0 { - _p2 = unsafe.Pointer(&dest[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Listxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Llistxattr(path string, dest []byte) (sz int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 unsafe.Pointer - if len(dest) > 0 { - _p1 = unsafe.Pointer(&dest[0]) - } else { - _p1 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest))) - sz = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lremovexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Lsetxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func MemfdCreate(name string, flags int) (fd int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(name) - if err != nil { - return - } - r0, _, e1 := Syscall(SYS_MEMFD_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mkdirat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Nanosleep(time *Timespec, leftover *Timespec) (err error) { - _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { - r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func PivotRoot(newroot string, putold string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(newroot) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(putold) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PRCTL, uintptr(option), uintptr(arg2), uintptr(arg3), uintptr(arg4), uintptr(arg5), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { - r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func read(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Removexattr(path string, attr string) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Renameat2(olddirfd int, oldpath string, newdirfd int, newpath string, flags uint) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(oldpath) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(newpath) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_RENAMEAT2, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func RequestKey(keyType string, description string, callback string, destRingid int) (id int, err error) { - var _p0 *byte - _p0, err = BytePtrFromString(keyType) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(description) - if err != nil { - return - } - var _p2 *byte - _p2, err = BytePtrFromString(callback) - if err != nil { - return - } - r0, _, e1 := Syscall6(SYS_REQUEST_KEY, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(destRingid), 0, 0) - id = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setdomainname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sethostname(p []byte) (err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpgid(pid int, pgid int) (err error) { - _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setsid() (pid int, err error) { - r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) - pid = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Settimeofday(tv *Timeval) (err error) { - _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setns(fd int, nstype int) (err error) { - _, _, e1 := Syscall(SYS_SETNS, uintptr(fd), uintptr(nstype), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setpriority(which int, who int, prio int) (err error) { - _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - var _p1 *byte - _p1, err = BytePtrFromString(attr) - if err != nil { - return - } - var _p2 unsafe.Pointer - if len(data) > 0 { - _p2 = unsafe.Pointer(&data[0]) - } else { - _p2 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func signalfd(fd int, sigmask *Sigset_t, maskSize uintptr, flags int) (newfd int, err error) { - r0, _, e1 := Syscall6(SYS_SIGNALFD4, uintptr(fd), uintptr(unsafe.Pointer(sigmask)), uintptr(maskSize), uintptr(flags), 0, 0) - newfd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Sync() { - SyscallNoError(SYS_SYNC, 0, 0, 0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Syncfs(fd int) (err error) { - _, _, e1 := Syscall(SYS_SYNCFS, uintptr(fd), 0, 0) +func fanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname *byte) (err error) { + _, _, e1 := Syscall6(SYS_FANOTIFY_MARK, uintptr(fd), uintptr(flags), uintptr(mask), uintptr(dirFd), uintptr(unsafe.Pointer(pathname)), 0) if e1 != 0 { err = errnoErr(e1) } @@ -1474,8 +24,8 @@ func Syncfs(fd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Sysinfo(info *Sysinfo_t) (err error) { - _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0) +func Fallocate(fd int, mode uint32, off int64, len int64) (err error) { + _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1495,270 +45,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Tgkill(tgid int, tid int, sig syscall.Signal) (err error) { - _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Times(tms *Tms) (ticks uintptr, err error) { - r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0) - ticks = uintptr(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Umask(mask int) (oldmask int) { - r0, _ := RawSyscallNoError(SYS_UMASK, uintptr(mask), 0, 0) - oldmask = int(r0) - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Uname(buf *Utsname) (err error) { - _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unmount(target string, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(target) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Unshare(flags int) (err error) { - _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func write(fd int, p []byte) (n int, err error) { - var _p0 unsafe.Pointer - if len(p) > 0 { - _p0 = unsafe.Pointer(&p[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func exitThread(code int) (err error) { - _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func munmap(addr uintptr, length uintptr) (err error) { - _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Madvise(b []byte, advice int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mprotect(b []byte, prot int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Mlockall(flags int) (err error) { - _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Msync(b []byte, flags int) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlock(b []byte) (err error) { - var _p0 unsafe.Pointer - if len(b) > 0 { - _p0 = unsafe.Pointer(&b[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func Munlockall() (err error) { - _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func faccessat(dirfd int, path string, mode uint32) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func nameToHandleAt(dirFD int, pathname string, fh *fileHandle, mountID *_C_int, flags int) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(pathname) - if err != nil { - return - } - _, _, e1 := Syscall6(SYS_NAME_TO_HANDLE_AT, uintptr(dirFD), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(fh)), uintptr(unsafe.Pointer(mountID)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func openByHandleAt(mountFD int, fh *fileHandle, flags int) (fd int, err error) { - r0, _, e1 := Syscall(SYS_OPEN_BY_HANDLE_AT, uintptr(mountFD), uintptr(unsafe.Pointer(fh)), uintptr(flags)) - fd = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { var _p0 unsafe.Pointer if len(events) > 0 { @@ -1786,7 +72,7 @@ func Fadvise(fd int, offset int64, length int64, advice int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Dup2(oldfd int, newfd int) (err error) { +func dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { err = errnoErr(e1) @@ -2041,8 +327,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsgid(gid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) +func setfsgid(gid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2051,8 +338,9 @@ func Setfsgid(gid int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Setfsuid(uid int) (err error) { - _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) +func setfsuid(uid int) (prev int, err error) { + r0, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0) + prev = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2442,16 +730,6 @@ func pipe(p *[2]_C_int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 5ade42cce..3bbd9e39c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,22 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3e0bbc5f1..d8cf5012c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,22 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index cb0af13a3..1153fe69b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,22 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 6fd48d3dc..24b4ebb41 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,22 +350,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -433,6 +406,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -564,6 +553,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return @@ -926,6 +925,16 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatvfs1(fd int, buf *Statvfs_t, flags int) (err error) { + _, _, e1 := Syscall(SYS_FSTATVFS1, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { @@ -1635,6 +1644,21 @@ func Stat(path string, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statvfs1(path string, buf *Statvfs_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_STATVFS1, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 2938e4124..b44b31aeb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,24 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 22b79ab0e..67f93ee76 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,24 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index cb921f37a..d7c878b1d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,24 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 5a7438035..8facd695d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -239,17 +239,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func fcntl(fd int, cmd int, arg int) (val int, err error) { - r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) - val = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout)) n = int(r0) @@ -361,24 +350,8 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func pipe(p *[2]_C_int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -431,6 +404,22 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) @@ -573,6 +562,16 @@ func Dup2(from int, to int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Dup3(from int, to int, flags int) (err error) { + _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go index 37dcc74c2..102f1ab47 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go @@ -1,4 +1,4 @@ -// mksysctl_openbsd.pl +// go run mksysctl_openbsd.go // Code generated by the command above; DO NOT EDIT. // +build 386,openbsd @@ -30,6 +30,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index fe6caa6eb..4866fced8 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -31,6 +31,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go index 6eb8c0b08..d3801eb24 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go @@ -30,6 +30,7 @@ var sysctlMib = []mibentry{ {"hw.model", []_C_int{6, 2}}, {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, + {"hw.ncpuonline", []_C_int{6, 25}}, {"hw.pagesize", []_C_int{6, 7}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 7aae554f2..54559a895 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -431,4 +431,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 7968439a9..054a741b7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -353,4 +353,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3c663c69d..307f2ba12 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 753def987..e9404dd54 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -297,4 +297,7 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index ac86bd544..68bb6d29b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -415,4 +415,7 @@ const ( SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 + SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 1f5705b58..4e5251185 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -345,4 +345,7 @@ const ( SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index d9ed95326..4d9aa3003 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -345,4 +345,7 @@ const ( SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 SYS_PIDFD_OPEN = 5434 + SYS_CLONE3 = 5435 + SYS_OPENAT2 = 5437 + SYS_PIDFD_GETFD = 5438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 94266b65a..64af0707d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -415,4 +415,7 @@ const ( SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 SYS_PIDFD_OPEN = 4434 + SYS_CLONE3 = 4435 + SYS_OPENAT2 = 4437 + SYS_PIDFD_GETFD = 4438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 52e3da649..cc3c067ba 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 6141f90a8..4050ff983 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -395,4 +395,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 4f7261a88..529abb6a7 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -297,4 +297,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index f47014ac0..276650010 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -360,4 +360,6 @@ const ( SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 SYS_CLONE3 = 435 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index dd78abb0d..4dc82bb24 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -374,4 +374,6 @@ const ( SYS_FSMOUNT = 432 SYS_FSPICK = 433 SYS_PIDFD_OPEN = 434 + SYS_OPENAT2 = 437 + SYS_PIDFD_GETFD = 438 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index c206f2b05..71ea1d6d2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -467,3 +467,13 @@ type Utsname struct { Version [32]byte Machine [32]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Tickadj int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 7312e95ff..2a3ec615f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -128,9 +128,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -153,9 +153,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { @@ -375,15 +375,15 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [4]byte - X_reason [32]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [4]byte + _ [32]byte } type Sigset_t struct { @@ -423,7 +423,7 @@ type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint32 } type Kevent_t struct { @@ -458,7 +458,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -469,7 +469,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -536,7 +535,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -547,7 +546,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -564,7 +563,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 @@ -698,3 +697,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 29ba2f5bf..e11e95499 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -123,9 +123,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -148,9 +148,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { @@ -275,10 +275,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -428,7 +426,7 @@ type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint64 } type Kevent_t struct { @@ -463,7 +461,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -474,7 +472,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -541,7 +538,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -552,7 +549,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -569,7 +566,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 @@ -623,7 +620,6 @@ type BpfZbuf struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -704,3 +700,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index b4090ef31..6f79227d7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -405,7 +405,7 @@ type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint32 } type Kevent_t struct { @@ -681,3 +681,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 1542a8773..c6fe1d097 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs types_freebsd.go | go run mkpost.go +// cgo -godefs -- -fsigned-char types_freebsd.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,freebsd @@ -123,9 +123,9 @@ type Statfs_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [1024]int8 - Mntonname [1024]int8 + Fstypename [16]byte + Mntfromname [1024]byte + Mntonname [1024]byte } type statfs_freebsd11_t struct { @@ -148,9 +148,9 @@ type statfs_freebsd11_t struct { Owner uint32 Fsid Fsid Charspare [80]int8 - Fstypename [16]int8 - Mntfromname [88]int8 - Mntonname [88]int8 + Fstypename [16]byte + Mntfromname [88]byte + Mntonname [88]byte } type Flock_t struct { @@ -275,10 +275,8 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - _ [4]byte Iov *Iovec Iovlen int32 - _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -326,11 +324,9 @@ const ( PTRACE_CONT = 0x7 PTRACE_DETACH = 0xb PTRACE_GETFPREGS = 0x23 - PTRACE_GETFSBASE = 0x47 PTRACE_GETLWPLIST = 0xf PTRACE_GETNUMLWPS = 0xe PTRACE_GETREGS = 0x21 - PTRACE_GETXSTATE = 0x45 PTRACE_IO = 0xc PTRACE_KILL = 0x8 PTRACE_LWPEVENTS = 0x18 @@ -373,15 +369,15 @@ type PtraceLwpInfoStruct struct { } type __Siginfo struct { - Signo int32 - Errno int32 - Code int32 - Pid int32 - Uid uint32 - Status int32 - Addr *byte - Value [8]byte - X_reason [40]byte + Signo int32 + Errno int32 + Code int32 + Pid int32 + Uid uint32 + Status int32 + Addr *byte + Value [8]byte + _ [40]byte } type Sigset_t struct { @@ -394,19 +390,21 @@ type Reg struct { Sp uint64 Elr uint64 Spsr uint32 + _ [4]byte } type FpReg struct { - Fp_q [32]uint128 - Fp_sr uint32 - Fp_cr uint32 + Q [32][16]uint8 + Sr uint32 + Cr uint32 + _ [8]byte } type PtraceIoDesc struct { Op int32 Offs *byte Addr *byte - Len uint + Len uint64 } type Kevent_t struct { @@ -441,7 +439,7 @@ type ifMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Data ifData } @@ -452,7 +450,6 @@ type IfMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte Data IfData } @@ -519,7 +516,7 @@ type IfaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 Metric int32 } @@ -530,7 +527,7 @@ type IfmaMsghdr struct { Addrs int32 Flags int32 Index uint16 - _ [2]byte + _ uint16 } type IfAnnounceMsghdr struct { @@ -547,7 +544,7 @@ type RtMsghdr struct { Version uint8 Type uint8 Index uint16 - _ [2]byte + _ uint16 Flags int32 Addrs int32 Pid int32 @@ -601,7 +598,6 @@ type BpfZbuf struct { type BpfProgram struct { Len uint32 - _ [4]byte Insns *BpfInsn } @@ -682,3 +678,13 @@ type Utsname struct { Version [256]byte Machine [256]byte } + +const SizeofClockinfo = 0x14 + +type Clockinfo struct { + Hz int32 + Tick int32 + Spare int32 + Stathz int32 + Profhz int32 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go new file mode 100644 index 000000000..416f7767e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -0,0 +1,2345 @@ +// Code generated by mkmerge.go; DO NOT EDIT. + +// +build linux + +package unix + +const ( + SizeofShort = 0x2 + SizeofInt = 0x4 + SizeofLongLong = 0x8 + PathMax = 0x1000 +) + +type ( + _C_short int16 + _C_int int32 + + _C_long_long int64 +) + +type ItimerSpec struct { + Interval Timespec + Value Timespec +} + +const ( + TIME_OK = 0x0 + TIME_INS = 0x1 + TIME_DEL = 0x2 + TIME_OOP = 0x3 + TIME_WAIT = 0x4 + TIME_ERROR = 0x5 + TIME_BAD = 0x5 +) + +type Rlimit struct { + Cur uint64 + Max uint64 +} + +type _Gid_t uint32 + +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + +type Fsid struct { + Val [2]int32 +} + +type FscryptPolicy struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptKey struct { + Mode uint32 + Raw [64]uint8 + Size uint32 +} + +type FscryptPolicyV1 struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + Master_key_descriptor [8]uint8 +} + +type FscryptPolicyV2 struct { + Version uint8 + Contents_encryption_mode uint8 + Filenames_encryption_mode uint8 + Flags uint8 + _ [4]uint8 + Master_key_identifier [16]uint8 +} + +type FscryptGetPolicyExArg struct { + Size uint64 + Policy [24]byte +} + +type FscryptKeySpecifier struct { + Type uint32 + _ uint32 + U [32]byte +} + +type FscryptAddKeyArg struct { + Key_spec FscryptKeySpecifier + Raw_size uint32 + Key_id uint32 + _ [8]uint32 +} + +type FscryptRemoveKeyArg struct { + Key_spec FscryptKeySpecifier + Removal_status_flags uint32 + _ [5]uint32 +} + +type FscryptGetKeyStatusArg struct { + Key_spec FscryptKeySpecifier + _ [6]uint32 + Status uint32 + Status_flags uint32 + User_count uint32 + _ [13]uint32 +} + +type KeyctlDHParams struct { + Private int32 + Prime int32 + Base int32 +} + +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 +) + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddrUnix struct { + Family uint16 + Path [108]int8 +} + +type RawSockaddrLinklayer struct { + Family uint16 + Protocol uint16 + Ifindex int32 + Hatype uint16 + Pkttype uint8 + Halen uint8 + Addr [8]uint8 +} + +type RawSockaddrNetlink struct { + Family uint16 + Pad uint16 + Pid uint32 + Groups uint32 +} + +type RawSockaddrHCI struct { + Family uint16 + Dev uint16 + Channel uint16 +} + +type RawSockaddrL2 struct { + Family uint16 + Psm uint16 + Bdaddr [6]uint8 + Cid uint16 + Bdaddr_type uint8 + _ [1]byte +} + +type RawSockaddrRFCOMM struct { + Family uint16 + Bdaddr [6]uint8 + Channel uint8 + _ [1]byte +} + +type RawSockaddrCAN struct { + Family uint16 + Ifindex int32 + Addr [16]byte +} + +type RawSockaddrALG struct { + Family uint16 + Type [14]uint8 + Feat uint32 + Mask uint32 + Name [64]uint8 +} + +type RawSockaddrVM struct { + Family uint16 + Reserved1 uint16 + Port uint32 + Cid uint32 + Zero [4]uint8 +} + +type RawSockaddrXDP struct { + Family uint16 + Flags uint16 + Ifindex uint32 + Queue_id uint32 + Shared_umem_fd uint32 +} + +type RawSockaddrPPPoX [0x1e]byte + +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + +type RawSockaddrL2TPIP struct { + Family uint16 + Unused uint16 + Addr [4]byte /* in_addr */ + Conn_id uint32 + _ [4]uint8 +} + +type RawSockaddrL2TPIP6 struct { + Family uint16 + Unused uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + Conn_id uint32 +} + +type _Socklen uint32 + +type Linger struct { + Onoff int32 + Linger int32 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type PacketMreq struct { + Ifindex int32 + Type uint16 + Alen uint16 + Address [8]uint8 +} + +type Inet4Pktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type Inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type IPv6MTUInfo struct { + Addr RawSockaddrInet6 + Mtu uint32 +} + +type ICMPv6Filter struct { + Data [8]uint32 +} + +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +type TCPInfo struct { + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 +} + +type CanFilter struct { + Id uint32 + Mask uint32 +} + +const ( + SizeofSockaddrInet4 = 0x10 + SizeofSockaddrInet6 = 0x1c + SizeofSockaddrAny = 0x70 + SizeofSockaddrUnix = 0x6e + SizeofSockaddrLinklayer = 0x14 + SizeofSockaddrNetlink = 0xc + SizeofSockaddrHCI = 0x6 + SizeofSockaddrL2 = 0xe + SizeofSockaddrRFCOMM = 0xa + SizeofSockaddrCAN = 0x18 + SizeofSockaddrALG = 0x58 + SizeofSockaddrVM = 0x10 + SizeofSockaddrXDP = 0x10 + SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 + SizeofSockaddrL2TPIP = 0x10 + SizeofSockaddrL2TPIP6 = 0x20 + SizeofLinger = 0x8 + SizeofIPMreq = 0x8 + SizeofIPMreqn = 0xc + SizeofIPv6Mreq = 0x14 + SizeofPacketMreq = 0x10 + SizeofInet4Pktinfo = 0xc + SizeofInet6Pktinfo = 0x14 + SizeofIPv6MTUInfo = 0x20 + SizeofICMPv6Filter = 0x20 + SizeofUcred = 0xc + SizeofTCPInfo = 0x68 + SizeofCanFilter = 0x8 +) + +const ( + NDA_UNSPEC = 0x0 + NDA_DST = 0x1 + NDA_LLADDR = 0x2 + NDA_CACHEINFO = 0x3 + NDA_PROBES = 0x4 + NDA_VLAN = 0x5 + NDA_PORT = 0x6 + NDA_VNI = 0x7 + NDA_IFINDEX = 0x8 + NDA_MASTER = 0x9 + NDA_LINK_NETNSID = 0xa + NDA_SRC_VNI = 0xb + NTF_USE = 0x1 + NTF_SELF = 0x2 + NTF_MASTER = 0x4 + NTF_PROXY = 0x8 + NTF_EXT_LEARNED = 0x10 + NTF_OFFLOADED = 0x20 + NTF_ROUTER = 0x80 + NUD_INCOMPLETE = 0x1 + NUD_REACHABLE = 0x2 + NUD_STALE = 0x4 + NUD_DELAY = 0x8 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 + NUD_NONE = 0x0 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFA_FLAGS = 0x8 + IFA_RT_PRIORITY = 0x9 + IFA_TARGET_NETNSID = 0xa + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_TARGET_NETNSID = 0x2e + IFLA_CARRIER_UP_COUNT = 0x2f + IFLA_CARRIER_DOWN_COUNT = 0x30 + IFLA_NEW_IFINDEX = 0x31 + IFLA_MIN_MTU = 0x32 + IFLA_MAX_MTU = 0x33 + IFLA_MAX = 0x36 + IFLA_INFO_KIND = 0x1 + IFLA_INFO_DATA = 0x2 + IFLA_INFO_XSTATS = 0x3 + IFLA_INFO_SLAVE_KIND = 0x4 + IFLA_INFO_SLAVE_DATA = 0x5 + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTA_MARK = 0x10 + RTA_MFC_STATS = 0x11 + RTA_VIA = 0x12 + RTA_NEWDST = 0x13 + RTA_PREF = 0x14 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 + RTA_EXPIRES = 0x17 + RTA_PAD = 0x18 + RTA_UID = 0x19 + RTA_TTL_PROPAGATE = 0x1a + RTA_IP_PROTO = 0x1b + RTA_SPORT = 0x1c + RTA_DPORT = 0x1d + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 + SizeofNdUseroptmsg = 0x10 + SizeofNdMsg = 0xc +) + +type NlMsghdr struct { + Len uint32 + Type uint16 + Flags uint16 + Seq uint32 + Pid uint32 +} + +type NlMsgerr struct { + Error int32 + Msg NlMsghdr +} + +type RtGenmsg struct { + Family uint8 +} + +type NlAttr struct { + Len uint16 + Type uint16 +} + +type RtAttr struct { + Len uint16 + Type uint16 +} + +type IfInfomsg struct { + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 +} + +type IfAddrmsg struct { + Family uint8 + Prefixlen uint8 + Flags uint8 + Scope uint8 + Index uint32 +} + +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + +type RtMsg struct { + Family uint8 + Dst_len uint8 + Src_len uint8 + Tos uint8 + Table uint8 + Protocol uint8 + Scope uint8 + Type uint8 + Flags uint32 +} + +type RtNexthop struct { + Len uint16 + Flags uint8 + Hops uint8 + Ifindex int32 +} + +type NdUseroptmsg struct { + Family uint8 + Pad1 uint8 + Opts_len uint16 + Ifindex int32 + Icmp_type uint8 + Icmp_code uint8 + Pad2 uint16 + Pad3 uint32 +} + +type NdMsg struct { + Family uint8 + Pad1 uint8 + Pad2 uint16 + Ifindex int32 + State uint16 + Flags uint8 + Type uint8 +} + +const ( + SizeofSockFilter = 0x8 +) + +type SockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} + +type SockFprog struct { + Len uint16 + Filter *SockFilter +} + +type InotifyEvent struct { + Wd int32 + Mask uint32 + Cookie uint32 + Len uint32 +} + +const SizeofInotifyEvent = 0x10 + +type Utsname struct { + Sysname [65]byte + Nodename [65]byte + Release [65]byte + Version [65]byte + Machine [65]byte + Domainname [65]byte +} + +const ( + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + + AT_SYMLINK_FOLLOW = 0x400 + AT_SYMLINK_NOFOLLOW = 0x100 + + AT_EACCESS = 0x200 +) + +type PollFd struct { + Fd int32 + Events int16 + Revents int16 +} + +const ( + POLLIN = 0x1 + POLLPRI = 0x2 + POLLOUT = 0x4 + POLLERR = 0x8 + POLLHUP = 0x10 + POLLNVAL = 0x20 +) + +type SignalfdSiginfo struct { + Signo uint32 + Errno int32 + Code int32 + Pid uint32 + Uid uint32 + Fd int32 + Tid uint32 + Band uint32 + Overrun uint32 + Trapno uint32 + Status int32 + Int int32 + Ptr uint64 + Utime uint64 + Stime uint64 + Addr uint64 + Addr_lsb uint16 + _ uint16 + Syscall int32 + Call_addr uint64 + Arch uint32 + _ [28]uint8 +} + +const PERF_IOC_FLAG_GROUP = 0x1 + +type Winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +const ( + TASKSTATS_CMD_UNSPEC = 0x0 + TASKSTATS_CMD_GET = 0x1 + TASKSTATS_CMD_NEW = 0x2 + TASKSTATS_TYPE_UNSPEC = 0x0 + TASKSTATS_TYPE_PID = 0x1 + TASKSTATS_TYPE_TGID = 0x2 + TASKSTATS_TYPE_STATS = 0x3 + TASKSTATS_TYPE_AGGR_PID = 0x4 + TASKSTATS_TYPE_AGGR_TGID = 0x5 + TASKSTATS_TYPE_NULL = 0x6 + TASKSTATS_CMD_ATTR_UNSPEC = 0x0 + TASKSTATS_CMD_ATTR_PID = 0x1 + TASKSTATS_CMD_ATTR_TGID = 0x2 + TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 + TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 +) + +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + +type Genlmsghdr struct { + Cmd uint8 + Version uint8 + Reserved uint16 +} + +const ( + CTRL_CMD_UNSPEC = 0x0 + CTRL_CMD_NEWFAMILY = 0x1 + CTRL_CMD_DELFAMILY = 0x2 + CTRL_CMD_GETFAMILY = 0x3 + CTRL_CMD_NEWOPS = 0x4 + CTRL_CMD_DELOPS = 0x5 + CTRL_CMD_GETOPS = 0x6 + CTRL_CMD_NEWMCAST_GRP = 0x7 + CTRL_CMD_DELMCAST_GRP = 0x8 + CTRL_CMD_GETMCAST_GRP = 0x9 + CTRL_ATTR_UNSPEC = 0x0 + CTRL_ATTR_FAMILY_ID = 0x1 + CTRL_ATTR_FAMILY_NAME = 0x2 + CTRL_ATTR_VERSION = 0x3 + CTRL_ATTR_HDRSIZE = 0x4 + CTRL_ATTR_MAXATTR = 0x5 + CTRL_ATTR_OPS = 0x6 + CTRL_ATTR_MCAST_GROUPS = 0x7 + CTRL_ATTR_OP_UNSPEC = 0x0 + CTRL_ATTR_OP_ID = 0x1 + CTRL_ATTR_OP_FLAGS = 0x2 + CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 + CTRL_ATTR_MCAST_GRP_NAME = 0x1 + CTRL_ATTR_MCAST_GRP_ID = 0x2 +) + +const ( + _CPU_SETSIZE = 0x400 +) + +const ( + BDADDR_BREDR = 0x0 + BDADDR_LE_PUBLIC = 0x1 + BDADDR_LE_RANDOM = 0x2 +) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + Sample_max_stack uint16 + _ uint16 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + PERF_COUNT_SW_BPF_OUTPUT = 0xa + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 + PERF_SAMPLE_BRANCH_IN_TX = 0x100 + PERF_SAMPLE_BRANCH_NO_TX = 0x200 + PERF_SAMPLE_BRANCH_COND = 0x400 + PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 + PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 + PERF_SAMPLE_BRANCH_CALL = 0x2000 + PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 + PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 + PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + PERF_RECORD_MMAP2 = 0xa + PERF_RECORD_AUX = 0xb + PERF_RECORD_ITRACE_START = 0xc + PERF_RECORD_LOST_SAMPLES = 0xd + PERF_RECORD_SWITCH = 0xe + PERF_RECORD_SWITCH_CPU_WIDE = 0xf + PERF_RECORD_NAMESPACES = 0x10 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 + PERF_FLAG_FD_CLOEXEC = 0x8 +) + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketBDTS struct { + Sec uint32 + Usec uint32 +} + +type TpacketHdrV1 struct { + Block_status uint32 + Num_pkts uint32 + Offset_to_first_pkt uint32 + Blk_len uint32 + Seq_num uint64 + Ts_first_pkt TpacketBDTS + Ts_last_pkt TpacketBDTS +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 + + SizeofTpacketStats = 0x8 + SizeofTpacketStatsV3 = 0xc +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) + +type RTCTime struct { + Sec int32 + Min int32 + Hour int32 + Mday int32 + Mon int32 + Year int32 + Wday int32 + Yday int32 + Isdst int32 +} + +type RTCWkAlrm struct { + Enabled uint8 + Pending uint8 + Time RTCTime +} + +type BlkpgIoctlArg struct { + Op int32 + Flags int32 + Datalen int32 + Data *byte +} + +const ( + BLKPG_ADD_PARTITION = 0x1 + BLKPG_DEL_PARTITION = 0x2 + BLKPG_RESIZE_PARTITION = 0x3 +) + +const ( + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 +) + +type XDPRingOffset struct { + Producer uint64 + Consumer uint64 + Desc uint64 + Flags uint64 +} + +type XDPMmapOffsets struct { + Rx XDPRingOffset + Tx XDPRingOffset + Fr XDPRingOffset + Cr XDPRingOffset +} + +type XDPStatistics struct { + Rx_dropped uint64 + Rx_invalid_descs uint64 + Tx_invalid_descs uint64 +} + +type XDPDesc struct { + Addr uint64 + Len uint32 + Options uint32 +} + +const ( + NCSI_CMD_UNSPEC = 0x0 + NCSI_CMD_PKG_INFO = 0x1 + NCSI_CMD_SET_INTERFACE = 0x2 + NCSI_CMD_CLEAR_INTERFACE = 0x3 + NCSI_ATTR_UNSPEC = 0x0 + NCSI_ATTR_IFINDEX = 0x1 + NCSI_ATTR_PACKAGE_LIST = 0x2 + NCSI_ATTR_PACKAGE_ID = 0x3 + NCSI_ATTR_CHANNEL_ID = 0x4 + NCSI_PKG_ATTR_UNSPEC = 0x0 + NCSI_PKG_ATTR = 0x1 + NCSI_PKG_ATTR_ID = 0x2 + NCSI_PKG_ATTR_FORCED = 0x3 + NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 + NCSI_CHANNEL_ATTR_UNSPEC = 0x0 + NCSI_CHANNEL_ATTR = 0x1 + NCSI_CHANNEL_ATTR_ID = 0x2 + NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 + NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 + NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 + NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 + NCSI_CHANNEL_ATTR_ACTIVE = 0x7 + NCSI_CHANNEL_ATTR_FORCED = 0x8 + NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 + NCSI_CHANNEL_ATTR_VLAN_ID = 0xa +) + +type ScmTimestamping struct { + Ts [3]Timespec +} + +const ( + SOF_TIMESTAMPING_TX_HARDWARE = 0x1 + SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 + SOF_TIMESTAMPING_RX_HARDWARE = 0x4 + SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 + SOF_TIMESTAMPING_SOFTWARE = 0x10 + SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 + SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 + SOF_TIMESTAMPING_OPT_ID = 0x80 + SOF_TIMESTAMPING_TX_SCHED = 0x100 + SOF_TIMESTAMPING_TX_ACK = 0x200 + SOF_TIMESTAMPING_OPT_CMSG = 0x400 + SOF_TIMESTAMPING_OPT_TSONLY = 0x800 + SOF_TIMESTAMPING_OPT_STATS = 0x1000 + SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 + SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + + SOF_TIMESTAMPING_LAST = 0x4000 + SOF_TIMESTAMPING_MASK = 0x7fff + + SCM_TSTAMP_SND = 0x0 + SCM_TSTAMP_SCHED = 0x1 + SCM_TSTAMP_ACK = 0x2 +) + +type SockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type FanotifyEventMetadata struct { + Event_len uint32 + Vers uint8 + Reserved uint8 + Metadata_len uint16 + Mask uint64 + Fd int32 + Pid int32 +} + +type FanotifyResponse struct { + Fd int32 + Response uint32 +} + +const ( + CRYPTO_MSG_BASE = 0x10 + CRYPTO_MSG_NEWALG = 0x10 + CRYPTO_MSG_DELALG = 0x11 + CRYPTO_MSG_UPDATEALG = 0x12 + CRYPTO_MSG_GETALG = 0x13 + CRYPTO_MSG_DELRNG = 0x14 + CRYPTO_MSG_GETSTAT = 0x15 +) + +const ( + CRYPTOCFGA_UNSPEC = 0x0 + CRYPTOCFGA_PRIORITY_VAL = 0x1 + CRYPTOCFGA_REPORT_LARVAL = 0x2 + CRYPTOCFGA_REPORT_HASH = 0x3 + CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 + CRYPTOCFGA_REPORT_AEAD = 0x5 + CRYPTOCFGA_REPORT_COMPRESS = 0x6 + CRYPTOCFGA_REPORT_RNG = 0x7 + CRYPTOCFGA_REPORT_CIPHER = 0x8 + CRYPTOCFGA_REPORT_AKCIPHER = 0x9 + CRYPTOCFGA_REPORT_KPP = 0xa + CRYPTOCFGA_REPORT_ACOMP = 0xb + CRYPTOCFGA_STAT_LARVAL = 0xc + CRYPTOCFGA_STAT_HASH = 0xd + CRYPTOCFGA_STAT_BLKCIPHER = 0xe + CRYPTOCFGA_STAT_AEAD = 0xf + CRYPTOCFGA_STAT_COMPRESS = 0x10 + CRYPTOCFGA_STAT_RNG = 0x11 + CRYPTOCFGA_STAT_CIPHER = 0x12 + CRYPTOCFGA_STAT_AKCIPHER = 0x13 + CRYPTOCFGA_STAT_KPP = 0x14 + CRYPTOCFGA_STAT_ACOMP = 0x15 +) + +const ( + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_FREEZE = 0x16 + BPF_BTF_GET_NEXT_ID = 0x17 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_MAP_TYPE_SK_STORAGE = 0x18 + BPF_MAP_TYPE_DEVMAP_HASH = 0x19 + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18 + BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19 + BPF_PROG_TYPE_TRACING = 0x1a + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_CGROUP_SYSCTL = 0x12 + BPF_CGROUP_UDP4_RECVMSG = 0x13 + BPF_CGROUP_UDP6_RECVMSG = 0x14 + BPF_CGROUP_GETSOCKOPT = 0x15 + BPF_CGROUP_SETSOCKOPT = 0x16 + BPF_TRACE_RAW_TP = 0x17 + BPF_TRACE_FENTRY = 0x18 + BPF_TRACE_FEXIT = 0x19 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_ADJ_ROOM_MAC = 0x1 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_LWT_ENCAP_IP = 0x2 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_LWT_REROUTE = 0x80 + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_SOCK_OPS_RTT_CB = 0xc + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 +) + +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + +type CapUserHeader struct { + Version uint32 + Pid int32 +} + +type CapUserData struct { + Effective uint32 + Permitted uint32 + Inheritable uint32 +} + +const ( + LINUX_CAPABILITY_VERSION_1 = 0x19980330 + LINUX_CAPABILITY_VERSION_2 = 0x20071026 + LINUX_CAPABILITY_VERSION_3 = 0x20080522 +) + +const ( + LO_FLAGS_READ_ONLY = 0x1 + LO_FLAGS_AUTOCLEAR = 0x4 + LO_FLAGS_PARTSCAN = 0x8 + LO_FLAGS_DIRECT_IO = 0x10 +) + +type LoopInfo64 struct { + Device uint64 + Inode uint64 + Rdevice uint64 + Offset uint64 + Sizelimit uint64 + Number uint32 + Encrypt_type uint32 + Encrypt_key_size uint32 + Flags uint32 + File_name [64]uint8 + Crypt_name [64]uint8 + Encrypt_key [32]uint8 + Init [2]uint64 +} + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) + +const ( + DEVLINK_CMD_UNSPEC = 0x0 + DEVLINK_CMD_GET = 0x1 + DEVLINK_CMD_SET = 0x2 + DEVLINK_CMD_NEW = 0x3 + DEVLINK_CMD_DEL = 0x4 + DEVLINK_CMD_PORT_GET = 0x5 + DEVLINK_CMD_PORT_SET = 0x6 + DEVLINK_CMD_PORT_NEW = 0x7 + DEVLINK_CMD_PORT_DEL = 0x8 + DEVLINK_CMD_PORT_SPLIT = 0x9 + DEVLINK_CMD_PORT_UNSPLIT = 0xa + DEVLINK_CMD_SB_GET = 0xb + DEVLINK_CMD_SB_SET = 0xc + DEVLINK_CMD_SB_NEW = 0xd + DEVLINK_CMD_SB_DEL = 0xe + DEVLINK_CMD_SB_POOL_GET = 0xf + DEVLINK_CMD_SB_POOL_SET = 0x10 + DEVLINK_CMD_SB_POOL_NEW = 0x11 + DEVLINK_CMD_SB_POOL_DEL = 0x12 + DEVLINK_CMD_SB_PORT_POOL_GET = 0x13 + DEVLINK_CMD_SB_PORT_POOL_SET = 0x14 + DEVLINK_CMD_SB_PORT_POOL_NEW = 0x15 + DEVLINK_CMD_SB_PORT_POOL_DEL = 0x16 + DEVLINK_CMD_SB_TC_POOL_BIND_GET = 0x17 + DEVLINK_CMD_SB_TC_POOL_BIND_SET = 0x18 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 0x19 + DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 0x1a + DEVLINK_CMD_SB_OCC_SNAPSHOT = 0x1b + DEVLINK_CMD_SB_OCC_MAX_CLEAR = 0x1c + DEVLINK_CMD_ESWITCH_GET = 0x1d + DEVLINK_CMD_ESWITCH_SET = 0x1e + DEVLINK_CMD_DPIPE_TABLE_GET = 0x1f + DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20 + DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21 + DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22 + DEVLINK_CMD_MAX = 0x44 + DEVLINK_PORT_TYPE_NOTSET = 0x0 + DEVLINK_PORT_TYPE_AUTO = 0x1 + DEVLINK_PORT_TYPE_ETH = 0x2 + DEVLINK_PORT_TYPE_IB = 0x3 + DEVLINK_SB_POOL_TYPE_INGRESS = 0x0 + DEVLINK_SB_POOL_TYPE_EGRESS = 0x1 + DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0x0 + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 0x1 + DEVLINK_ESWITCH_MODE_LEGACY = 0x0 + DEVLINK_ESWITCH_MODE_SWITCHDEV = 0x1 + DEVLINK_ESWITCH_INLINE_MODE_NONE = 0x0 + DEVLINK_ESWITCH_INLINE_MODE_LINK = 0x1 + DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 0x2 + DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 0x3 + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0x0 + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 0x1 + DEVLINK_ATTR_UNSPEC = 0x0 + DEVLINK_ATTR_BUS_NAME = 0x1 + DEVLINK_ATTR_DEV_NAME = 0x2 + DEVLINK_ATTR_PORT_INDEX = 0x3 + DEVLINK_ATTR_PORT_TYPE = 0x4 + DEVLINK_ATTR_PORT_DESIRED_TYPE = 0x5 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 0x6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 0x7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 0x8 + DEVLINK_ATTR_PORT_SPLIT_COUNT = 0x9 + DEVLINK_ATTR_PORT_SPLIT_GROUP = 0xa + DEVLINK_ATTR_SB_INDEX = 0xb + DEVLINK_ATTR_SB_SIZE = 0xc + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 0xd + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 0xe + DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 0xf + DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 0x10 + DEVLINK_ATTR_SB_POOL_INDEX = 0x11 + DEVLINK_ATTR_SB_POOL_TYPE = 0x12 + DEVLINK_ATTR_SB_POOL_SIZE = 0x13 + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 0x14 + DEVLINK_ATTR_SB_THRESHOLD = 0x15 + DEVLINK_ATTR_SB_TC_INDEX = 0x16 + DEVLINK_ATTR_SB_OCC_CUR = 0x17 + DEVLINK_ATTR_SB_OCC_MAX = 0x18 + DEVLINK_ATTR_ESWITCH_MODE = 0x19 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 0x1a + DEVLINK_ATTR_DPIPE_TABLES = 0x1b + DEVLINK_ATTR_DPIPE_TABLE = 0x1c + DEVLINK_ATTR_DPIPE_TABLE_NAME = 0x1d + DEVLINK_ATTR_DPIPE_TABLE_SIZE = 0x1e + DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 0x1f + DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 0x20 + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 0x21 + DEVLINK_ATTR_DPIPE_ENTRIES = 0x22 + DEVLINK_ATTR_DPIPE_ENTRY = 0x23 + DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 0x24 + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 0x25 + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 0x26 + DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 0x27 + DEVLINK_ATTR_DPIPE_MATCH = 0x28 + DEVLINK_ATTR_DPIPE_MATCH_VALUE = 0x29 + DEVLINK_ATTR_DPIPE_MATCH_TYPE = 0x2a + DEVLINK_ATTR_DPIPE_ACTION = 0x2b + DEVLINK_ATTR_DPIPE_ACTION_VALUE = 0x2c + DEVLINK_ATTR_DPIPE_ACTION_TYPE = 0x2d + DEVLINK_ATTR_DPIPE_VALUE = 0x2e + DEVLINK_ATTR_DPIPE_VALUE_MASK = 0x2f + DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 0x30 + DEVLINK_ATTR_DPIPE_HEADERS = 0x31 + DEVLINK_ATTR_DPIPE_HEADER = 0x32 + DEVLINK_ATTR_DPIPE_HEADER_NAME = 0x33 + DEVLINK_ATTR_DPIPE_HEADER_ID = 0x34 + DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 0x35 + DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 0x36 + DEVLINK_ATTR_DPIPE_HEADER_INDEX = 0x37 + DEVLINK_ATTR_DPIPE_FIELD = 0x38 + DEVLINK_ATTR_DPIPE_FIELD_NAME = 0x39 + DEVLINK_ATTR_DPIPE_FIELD_ID = 0x3a + DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 0x3b + DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c + DEVLINK_ATTR_PAD = 0x3d + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e + DEVLINK_ATTR_MAX = 0x8c + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 + DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 + DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0x0 + DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0x0 + DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0x0 + DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0x0 + DEVLINK_DPIPE_HEADER_ETHERNET = 0x0 + DEVLINK_DPIPE_HEADER_IPV4 = 0x1 + DEVLINK_DPIPE_HEADER_IPV6 = 0x2 +) + +type FsverityDigest struct { + Algorithm uint16 + Size uint16 +} + +type FsverityEnableArg struct { + Version uint32 + Hash_algorithm uint32 + Block_size uint32 + Salt_size uint32 + Salt_ptr uint64 + Sig_size uint32 + _ uint32 + Sig_ptr uint64 + _ [11]uint64 +} + +type Nhmsg struct { + Family uint8 + Scope uint8 + Protocol uint8 + Resvd uint8 + Flags uint32 +} + +type NexthopGrp struct { + Id uint32 + Weight uint8 + Resvd1 uint8 + Resvd2 uint16 +} + +const ( + NHA_UNSPEC = 0x0 + NHA_ID = 0x1 + NHA_GROUP = 0x2 + NHA_GROUP_TYPE = 0x3 + NHA_BLACKHOLE = 0x4 + NHA_OIF = 0x5 + NHA_GATEWAY = 0x6 + NHA_ENCAP_TYPE = 0x7 + NHA_ENCAP = 0x8 + NHA_GROUPS = 0x9 + NHA_MASTER = 0xa +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 74d42bb5d..761b67c86 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 _ uint16 @@ -114,36 +100,6 @@ type Stat_t struct { Ino uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [1]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -165,133 +117,11 @@ type Flock_t struct { Pid int32 } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -302,41 +132,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -353,399 +153,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Ebx int32 Ecx int32 @@ -787,15 +204,6 @@ type Sysinfo_t struct { _ [8]int8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -810,35 +218,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -847,33 +227,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -885,13 +238,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -941,279 +287,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1289,22 +369,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1312,88 +376,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1409,18 +391,6 @@ type Statfs_t struct { Spare [4]int32 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1431,589 +401,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x18 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2024,13 +415,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2040,168 +424,17 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2332,182 +565,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint16 @@ -2522,38 +579,6 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]int8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2562,21 +587,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2587,22 +597,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 8debef94a..201fb3482 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -113,36 +99,6 @@ type Stat_t struct { _ [3]int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -152,10 +108,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -165,133 +117,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -302,41 +132,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -354,399 +154,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { R15 uint64 R14 uint64 @@ -799,15 +216,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -823,35 +231,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -860,33 +240,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -898,13 +251,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -952,279 +298,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1300,22 +380,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1323,88 +387,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1420,18 +402,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1443,589 +413,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd + SizeofTpacketHdr = 0x20 ) -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2036,13 +427,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2053,168 +437,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2345,182 +579,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint64 @@ -2536,38 +594,6 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2576,21 +602,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2601,22 +612,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index feb7d8370..8051b5610 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 _ uint16 @@ -116,36 +102,6 @@ type Stat_t struct { Ino uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -155,10 +111,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -169,133 +121,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -306,41 +136,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -357,399 +157,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Uregs [18]uint32 } @@ -775,15 +192,6 @@ type Sysinfo_t struct { _ [8]uint8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -799,35 +207,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -836,33 +216,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -874,13 +227,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -930,279 +276,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1278,22 +358,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1301,88 +365,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1399,18 +381,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1421,589 +391,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x18 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2014,13 +405,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2031,168 +415,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x1269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2323,182 +557,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint16 @@ -2513,38 +571,6 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]uint8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2553,21 +579,6 @@ type TIPCSubscr struct { Handle [8]uint8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2578,22 +589,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6da217837..a936f2169 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -114,36 +100,6 @@ type Stat_t struct { _ [2]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,133 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -303,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -355,399 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [31]uint64 Sp uint64 @@ -777,15 +194,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -802,35 +210,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -839,33 +219,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -877,13 +230,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -931,279 +277,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1279,22 +359,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1302,88 +366,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1399,18 +381,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1422,589 +392,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2015,13 +406,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2032,168 +416,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x1269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2324,182 +558,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2515,38 +573,6 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2555,21 +581,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2580,22 +591,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 14b1dea68..aaca03dd7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]int32 @@ -115,36 +101,6 @@ type Stat_t struct { Pad5 [14]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -168,133 +120,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -305,41 +135,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -356,399 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -780,15 +197,6 @@ type Sysinfo_t struct { _ [8]int8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -804,35 +212,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -841,33 +221,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -879,13 +232,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -935,279 +281,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1283,22 +363,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1306,88 +370,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1405,18 +387,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1427,589 +397,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x18 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2020,13 +411,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2037,168 +421,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2329,182 +563,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2519,38 +577,6 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]int8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2559,21 +585,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2584,22 +595,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 0fb94a768..2e7f3b8ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]uint32 @@ -114,36 +100,6 @@ type Stat_t struct { Blocks int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,133 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -303,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -355,399 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -780,15 +197,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -799,40 +207,13 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -841,33 +222,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -879,13 +233,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -933,279 +280,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1281,22 +362,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1304,88 +369,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1401,18 +384,6 @@ type Statfs_t struct { Spare [5]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1424,589 +395,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2017,13 +409,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2034,168 +419,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x20001269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2326,182 +561,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2517,38 +576,6 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2557,21 +584,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2582,22 +594,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 7ffc7bbc5..16add5a25 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]uint32 @@ -114,36 +100,6 @@ type Stat_t struct { Blocks int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,133 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -303,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -355,399 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -780,15 +197,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -799,40 +207,13 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -841,33 +222,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -879,13 +233,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -933,279 +280,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1281,22 +362,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1304,88 +369,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1401,18 +384,6 @@ type Statfs_t struct { Spare [5]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1424,589 +395,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2017,13 +409,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2034,168 +419,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x20001269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2326,182 +561,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2517,38 +576,6 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2557,21 +584,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2582,22 +594,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 12ef8eb45..4ed2c8e54 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x4 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x4 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x4 + SizeofLong = 0x4 ) type ( - _C_short int16 - _C_int int32 - _C_long int32 - _C_long_long int64 + _C_long int32 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int32 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint32 Pad1 [3]int32 @@ -115,36 +101,6 @@ type Stat_t struct { Pad5 [14]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -168,133 +120,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -305,41 +135,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint32 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -356,399 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - -const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x8 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x1c - SizeofCmsghdr = 0xc - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 -) - const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofIovec = 0x8 + SizeofMsghdr = 0x1c + SizeofCmsghdr = 0xc ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x8 + SizeofSockFprog = 0x8 ) -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [32]uint64 Lo uint64 @@ -780,15 +197,6 @@ type Sysinfo_t struct { _ [8]int8 } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint32 @@ -804,35 +212,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -841,33 +221,6 @@ type Sigset_t struct { const _C__NSIG = 0x80 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -879,13 +232,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -935,279 +281,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint32 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x20 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x20 ) const ( @@ -1283,22 +363,6 @@ type SockaddrStorage struct { _ uint32 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1306,88 +370,6 @@ type HDGeometry struct { Start uint32 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int32 Bsize int32 @@ -1405,18 +387,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint32 Len uint32 @@ -1427,589 +397,10 @@ type TpacketHdr struct { Usec uint32 } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x18 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 + SizeofTpacketHdr = 0x18 ) -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2020,13 +411,6 @@ type RTCPLLInfo struct { Clock int32 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2037,168 +421,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2329,182 +563,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2519,38 +577,6 @@ type LoopInfo struct { Init [2]uint32 Reserved [4]int8 } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2559,21 +585,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2584,22 +595,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index cb89d8a1e..741519099 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -115,36 +101,6 @@ type Stat_t struct { _ uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -167,133 +119,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -304,41 +134,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -356,399 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Gpr [32]uint64 Nip uint64 @@ -787,15 +204,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -812,35 +220,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -849,33 +229,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -887,13 +240,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -941,279 +287,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1289,22 +369,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1312,88 +376,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1409,18 +391,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1432,589 +402,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd + SizeofTpacketHdr = 0x20 ) -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2025,13 +416,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2042,168 +426,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2334,182 +568,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint64 @@ -2525,38 +583,6 @@ type LoopInfo struct { Reserved [4]uint8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2565,21 +591,6 @@ type TIPCSubscr struct { Handle [8]uint8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2590,22 +601,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index d9c93affb..046c2debd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -115,36 +101,6 @@ type Stat_t struct { _ uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -154,10 +110,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -167,133 +119,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -304,41 +134,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -356,399 +156,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Gpr [32]uint64 Nip uint64 @@ -787,15 +204,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -812,35 +220,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -849,33 +229,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -887,13 +240,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -941,279 +287,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1289,22 +369,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1312,88 +376,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1409,18 +391,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1432,589 +402,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - -const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 -) - const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd + SizeofTpacketHdr = 0x20 ) -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2025,13 +416,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2042,168 +426,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 -) - -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + BLKPG = 0x20001269 ) -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2334,182 +568,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint64 @@ -2525,38 +583,6 @@ type LoopInfo struct { Reserved [4]uint8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2565,21 +591,6 @@ type TIPCSubscr struct { Handle [8]uint8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2590,22 +601,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index a198cc529..0f2f61a6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -114,36 +100,6 @@ type Stat_t struct { _ [2]int32 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -153,10 +109,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -166,133 +118,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -303,41 +133,11 @@ type RawSockaddrAny struct { Pad [96]uint8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -355,399 +155,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Pc uint64 Ra uint64 @@ -805,15 +222,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -830,35 +238,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -867,33 +247,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -905,13 +258,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -959,279 +305,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1307,22 +387,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1330,88 +394,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1427,18 +409,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1450,589 +420,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2043,13 +434,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2060,168 +444,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x1269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]uint8 Driver_name [64]uint8 @@ -2352,182 +586,6 @@ type CryptoReportAcomp struct { Type [64]uint8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2543,38 +601,6 @@ type LoopInfo struct { Reserved [4]uint8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2583,21 +609,6 @@ type TIPCSubscr struct { Handle [8]uint8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2608,22 +619,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]uint8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index f1e26c565..cca1b6be2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -88,13 +81,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 Ino uint64 @@ -113,36 +99,6 @@ type Stat_t struct { _ [3]int64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -152,10 +108,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -165,133 +117,11 @@ type Flock_t struct { _ [4]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x6 - FADV_NOREUSE = 0x7 + FADV_DONTNEED = 0x6 + FADV_NOREUSE = 0x7 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -302,41 +132,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -354,399 +154,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Psw PtracePsw Gprs [16]uint64 @@ -800,15 +217,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -825,35 +233,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x2000 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -862,33 +242,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -900,13 +253,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -954,279 +300,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1302,22 +382,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1325,88 +389,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type uint32 Bsize uint32 @@ -1423,18 +405,6 @@ type Statfs_t struct { _ [4]byte } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1446,589 +416,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2039,13 +430,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2056,168 +440,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x1269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x1269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2348,182 +582,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint16 @@ -2539,38 +597,6 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2579,21 +605,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2604,22 +615,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index d28248040..33a73bf18 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -6,19 +6,12 @@ package unix const ( - SizeofPtr = 0x8 - SizeofShort = 0x2 - SizeofInt = 0x4 - SizeofLong = 0x8 - SizeofLongLong = 0x8 - PathMax = 0x1000 + SizeofPtr = 0x8 + SizeofLong = 0x8 ) type ( - _C_short int16 - _C_int int32 - _C_long int64 - _C_long_long int64 + _C_long int64 ) type Timespec struct { @@ -89,13 +82,6 @@ type Rusage struct { Nivcsw int64 } -type Rlimit struct { - Cur uint64 - Max uint64 -} - -type _Gid_t uint32 - type Stat_t struct { Dev uint64 _ uint16 @@ -116,36 +102,6 @@ type Stat_t struct { _ uint64 } -type StatxTimestamp struct { - Sec int64 - Nsec uint32 - _ int32 -} - -type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - _ [14]uint64 -} - type Dirent struct { Ino uint64 Off int64 @@ -155,10 +111,6 @@ type Dirent struct { _ [5]byte } -type Fsid struct { - Val [2]int32 -} - type Flock_t struct { Type int16 Whence int16 @@ -169,133 +121,11 @@ type Flock_t struct { _ [2]byte } -type FscryptPolicy struct { - Version uint8 - Contents_encryption_mode uint8 - Filenames_encryption_mode uint8 - Flags uint8 - Master_key_descriptor [8]uint8 -} - -type FscryptKey struct { - Mode uint32 - Raw [64]uint8 - Size uint32 -} - -type KeyctlDHParams struct { - Private int32 - Prime int32 - Base int32 -} - const ( - FADV_NORMAL = 0x0 - FADV_RANDOM = 0x1 - FADV_SEQUENTIAL = 0x2 - FADV_WILLNEED = 0x3 - FADV_DONTNEED = 0x4 - FADV_NOREUSE = 0x5 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 ) -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddrUnix struct { - Family uint16 - Path [108]int8 -} - -type RawSockaddrLinklayer struct { - Family uint16 - Protocol uint16 - Ifindex int32 - Hatype uint16 - Pkttype uint8 - Halen uint8 - Addr [8]uint8 -} - -type RawSockaddrNetlink struct { - Family uint16 - Pad uint16 - Pid uint32 - Groups uint32 -} - -type RawSockaddrHCI struct { - Family uint16 - Dev uint16 - Channel uint16 -} - -type RawSockaddrL2 struct { - Family uint16 - Psm uint16 - Bdaddr [6]uint8 - Cid uint16 - Bdaddr_type uint8 - _ [1]byte -} - -type RawSockaddrRFCOMM struct { - Family uint16 - Bdaddr [6]uint8 - Channel uint8 - _ [1]byte -} - -type RawSockaddrCAN struct { - Family uint16 - Ifindex int32 - Addr [8]byte -} - -type RawSockaddrALG struct { - Family uint16 - Type [14]uint8 - Feat uint32 - Mask uint32 - Name [64]uint8 -} - -type RawSockaddrVM struct { - Family uint16 - Reserved1 uint16 - Port uint32 - Cid uint32 - Zero [4]uint8 -} - -type RawSockaddrXDP struct { - Family uint16 - Flags uint16 - Ifindex uint32 - Queue_id uint32 - Shared_umem_fd uint32 -} - -type RawSockaddrPPPoX [0x1e]byte - -type RawSockaddrTIPC struct { - Family uint16 - Addrtype uint8 - Scope int8 - Addr [12]byte -} - type RawSockaddr struct { Family uint16 Data [14]int8 @@ -306,41 +136,11 @@ type RawSockaddrAny struct { Pad [96]int8 } -type _Socklen uint32 - -type Linger struct { - Onoff int32 - Linger int32 -} - type Iovec struct { Base *byte Len uint64 } -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPMreqn struct { - Multiaddr [4]byte /* in_addr */ - Address [4]byte /* in_addr */ - Ifindex int32 -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -type PacketMreq struct { - Ifindex int32 - Type uint16 - Alen uint16 - Address [8]uint8 -} - type Msghdr struct { Name *byte Namelen uint32 @@ -358,399 +158,16 @@ type Cmsghdr struct { Type int32 } -type Inet4Pktinfo struct { - Ifindex int32 - Spec_dst [4]byte /* in_addr */ - Addr [4]byte /* in_addr */ -} - -type Inet6Pktinfo struct { - Addr [16]byte /* in6_addr */ - Ifindex uint32 -} - -type IPv6MTUInfo struct { - Addr RawSockaddrInet6 - Mtu uint32 -} - -type ICMPv6Filter struct { - Data [8]uint32 -} - -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 -} - -type CanFilter struct { - Id uint32 - Mask uint32 -} - const ( - SizeofSockaddrInet4 = 0x10 - SizeofSockaddrInet6 = 0x1c - SizeofSockaddrAny = 0x70 - SizeofSockaddrUnix = 0x6e - SizeofSockaddrLinklayer = 0x14 - SizeofSockaddrNetlink = 0xc - SizeofSockaddrHCI = 0x6 - SizeofSockaddrL2 = 0xe - SizeofSockaddrRFCOMM = 0xa - SizeofSockaddrCAN = 0x10 - SizeofSockaddrALG = 0x58 - SizeofSockaddrVM = 0x10 - SizeofSockaddrXDP = 0x10 - SizeofSockaddrPPPoX = 0x1e - SizeofSockaddrTIPC = 0x10 - SizeofLinger = 0x8 - SizeofIovec = 0x10 - SizeofIPMreq = 0x8 - SizeofIPMreqn = 0xc - SizeofIPv6Mreq = 0x14 - SizeofPacketMreq = 0x10 - SizeofMsghdr = 0x38 - SizeofCmsghdr = 0x10 - SizeofInet4Pktinfo = 0xc - SizeofInet6Pktinfo = 0x14 - SizeofIPv6MTUInfo = 0x20 - SizeofICMPv6Filter = 0x20 - SizeofUcred = 0xc - SizeofTCPInfo = 0x68 - SizeofCanFilter = 0x8 + SizeofIovec = 0x10 + SizeofMsghdr = 0x38 + SizeofCmsghdr = 0x10 ) const ( - NDA_UNSPEC = 0x0 - NDA_DST = 0x1 - NDA_LLADDR = 0x2 - NDA_CACHEINFO = 0x3 - NDA_PROBES = 0x4 - NDA_VLAN = 0x5 - NDA_PORT = 0x6 - NDA_VNI = 0x7 - NDA_IFINDEX = 0x8 - NDA_MASTER = 0x9 - NDA_LINK_NETNSID = 0xa - NDA_SRC_VNI = 0xb - NTF_USE = 0x1 - NTF_SELF = 0x2 - NTF_MASTER = 0x4 - NTF_PROXY = 0x8 - NTF_EXT_LEARNED = 0x10 - NTF_OFFLOADED = 0x20 - NTF_ROUTER = 0x80 - NUD_INCOMPLETE = 0x1 - NUD_REACHABLE = 0x2 - NUD_STALE = 0x4 - NUD_DELAY = 0x8 - NUD_PROBE = 0x10 - NUD_FAILED = 0x20 - NUD_NOARP = 0x40 - NUD_PERMANENT = 0x80 - NUD_NONE = 0x0 - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFA_FLAGS = 0x8 - IFA_RT_PRIORITY = 0x9 - IFA_TARGET_NETNSID = 0xa - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_NUM_VF = 0x15 - IFLA_VFINFO_LIST = 0x16 - IFLA_STATS64 = 0x17 - IFLA_VF_PORTS = 0x18 - IFLA_PORT_SELF = 0x19 - IFLA_AF_SPEC = 0x1a - IFLA_GROUP = 0x1b - IFLA_NET_NS_FD = 0x1c - IFLA_EXT_MASK = 0x1d - IFLA_PROMISCUITY = 0x1e - IFLA_NUM_TX_QUEUES = 0x1f - IFLA_NUM_RX_QUEUES = 0x20 - IFLA_CARRIER = 0x21 - IFLA_PHYS_PORT_ID = 0x22 - IFLA_CARRIER_CHANGES = 0x23 - IFLA_PHYS_SWITCH_ID = 0x24 - IFLA_LINK_NETNSID = 0x25 - IFLA_PHYS_PORT_NAME = 0x26 - IFLA_PROTO_DOWN = 0x27 - IFLA_GSO_MAX_SEGS = 0x28 - IFLA_GSO_MAX_SIZE = 0x29 - IFLA_PAD = 0x2a - IFLA_XDP = 0x2b - IFLA_EVENT = 0x2c - IFLA_NEW_NETNSID = 0x2d - IFLA_IF_NETNSID = 0x2e - IFLA_TARGET_NETNSID = 0x2e - IFLA_CARRIER_UP_COUNT = 0x2f - IFLA_CARRIER_DOWN_COUNT = 0x30 - IFLA_NEW_IFINDEX = 0x31 - IFLA_MIN_MTU = 0x32 - IFLA_MAX_MTU = 0x33 - IFLA_MAX = 0x33 - IFLA_INFO_KIND = 0x1 - IFLA_INFO_DATA = 0x2 - IFLA_INFO_XSTATS = 0x3 - IFLA_INFO_SLAVE_KIND = 0x4 - IFLA_INFO_SLAVE_DATA = 0x5 - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTA_MARK = 0x10 - RTA_MFC_STATS = 0x11 - RTA_VIA = 0x12 - RTA_NEWDST = 0x13 - RTA_PREF = 0x14 - RTA_ENCAP_TYPE = 0x15 - RTA_ENCAP = 0x16 - RTA_EXPIRES = 0x17 - RTA_PAD = 0x18 - RTA_UID = 0x19 - RTA_TTL_PROPAGATE = 0x1a - RTA_IP_PROTO = 0x1b - RTA_SPORT = 0x1c - RTA_DPORT = 0x1d - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofIfaCacheinfo = 0x10 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 - SizeofNdUseroptmsg = 0x10 - SizeofNdMsg = 0xc + SizeofSockFprog = 0x10 ) -type NlMsghdr struct { - Len uint32 - Type uint16 - Flags uint16 - Seq uint32 - Pid uint32 -} - -type NlMsgerr struct { - Error int32 - Msg NlMsghdr -} - -type RtGenmsg struct { - Family uint8 -} - -type NlAttr struct { - Len uint16 - Type uint16 -} - -type RtAttr struct { - Len uint16 - Type uint16 -} - -type IfInfomsg struct { - Family uint8 - _ uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 -} - -type IfAddrmsg struct { - Family uint8 - Prefixlen uint8 - Flags uint8 - Scope uint8 - Index uint32 -} - -type IfaCacheinfo struct { - Prefered uint32 - Valid uint32 - Cstamp uint32 - Tstamp uint32 -} - -type RtMsg struct { - Family uint8 - Dst_len uint8 - Src_len uint8 - Tos uint8 - Table uint8 - Protocol uint8 - Scope uint8 - Type uint8 - Flags uint32 -} - -type RtNexthop struct { - Len uint16 - Flags uint8 - Hops uint8 - Ifindex int32 -} - -type NdUseroptmsg struct { - Family uint8 - Pad1 uint8 - Opts_len uint16 - Ifindex int32 - Icmp_type uint8 - Icmp_code uint8 - Pad2 uint16 - Pad3 uint32 -} - -type NdMsg struct { - Family uint8 - Pad1 uint8 - Pad2 uint16 - Ifindex int32 - State uint16 - Flags uint8 - Type uint8 -} - -const ( - SizeofSockFilter = 0x8 - SizeofSockFprog = 0x10 -) - -type SockFilter struct { - Code uint16 - Jt uint8 - Jf uint8 - K uint32 -} - -type SockFprog struct { - Len uint16 - Filter *SockFilter -} - -type InotifyEvent struct { - Wd int32 - Mask uint32 - Cookie uint32 - Len uint32 -} - -const SizeofInotifyEvent = 0x10 - type PtraceRegs struct { Regs [16]uint64 Tstate uint64 @@ -782,15 +199,6 @@ type Sysinfo_t struct { _ [4]byte } -type Utsname struct { - Sysname [65]byte - Nodename [65]byte - Release [65]byte - Version [65]byte - Machine [65]byte - Domainname [65]byte -} - type Ustat_t struct { Tfree int32 Tinode uint64 @@ -807,35 +215,7 @@ type EpollEvent struct { } const ( - AT_EMPTY_PATH = 0x1000 - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 - - AT_STATX_SYNC_AS_STAT = 0x0 - AT_STATX_FORCE_SYNC = 0x2000 - AT_STATX_DONT_SYNC = 0x4000 - - AT_SYMLINK_FOLLOW = 0x400 - AT_SYMLINK_NOFOLLOW = 0x100 - - AT_EACCESS = 0x200 -) - -type PollFd struct { - Fd int32 - Events int16 - Revents int16 -} - -const ( - POLLIN = 0x1 - POLLPRI = 0x2 - POLLOUT = 0x4 POLLRDHUP = 0x800 - POLLERR = 0x8 - POLLHUP = 0x10 - POLLNVAL = 0x20 ) type Sigset_t struct { @@ -844,33 +224,6 @@ type Sigset_t struct { const _C__NSIG = 0x41 -type SignalfdSiginfo struct { - Signo uint32 - Errno int32 - Code int32 - Pid uint32 - Uid uint32 - Fd int32 - Tid uint32 - Band uint32 - Overrun uint32 - Trapno uint32 - Status int32 - Int int32 - Ptr uint64 - Utime uint64 - Stime uint64 - Addr uint64 - Addr_lsb uint16 - _ uint16 - Syscall int32 - Call_addr uint64 - Arch uint32 - _ [28]uint8 -} - -const PERF_IOC_FLAG_GROUP = 0x1 - type Termios struct { Iflag uint32 Oflag uint32 @@ -882,13 +235,6 @@ type Termios struct { Ospeed uint32 } -type Winsize struct { - Row uint16 - Col uint16 - Xpixel uint16 - Ypixel uint16 -} - type Taskstats struct { Version uint16 Ac_exitcode uint32 @@ -936,279 +282,13 @@ type Taskstats struct { Freepages_delay_total uint64 Thrashing_count uint64 Thrashing_delay_total uint64 + Ac_btime64 uint64 } -const ( - TASKSTATS_CMD_UNSPEC = 0x0 - TASKSTATS_CMD_GET = 0x1 - TASKSTATS_CMD_NEW = 0x2 - TASKSTATS_TYPE_UNSPEC = 0x0 - TASKSTATS_TYPE_PID = 0x1 - TASKSTATS_TYPE_TGID = 0x2 - TASKSTATS_TYPE_STATS = 0x3 - TASKSTATS_TYPE_AGGR_PID = 0x4 - TASKSTATS_TYPE_AGGR_TGID = 0x5 - TASKSTATS_TYPE_NULL = 0x6 - TASKSTATS_CMD_ATTR_UNSPEC = 0x0 - TASKSTATS_CMD_ATTR_PID = 0x1 - TASKSTATS_CMD_ATTR_TGID = 0x2 - TASKSTATS_CMD_ATTR_REGISTER_CPUMASK = 0x3 - TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 -) - -type CGroupStats struct { - Sleeping uint64 - Running uint64 - Stopped uint64 - Uninterruptible uint64 - Io_wait uint64 -} - -const ( - CGROUPSTATS_CMD_UNSPEC = 0x3 - CGROUPSTATS_CMD_GET = 0x4 - CGROUPSTATS_CMD_NEW = 0x5 - CGROUPSTATS_TYPE_UNSPEC = 0x0 - CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 - CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 - CGROUPSTATS_CMD_ATTR_FD = 0x1 -) - -type Genlmsghdr struct { - Cmd uint8 - Version uint8 - Reserved uint16 -} - -const ( - CTRL_CMD_UNSPEC = 0x0 - CTRL_CMD_NEWFAMILY = 0x1 - CTRL_CMD_DELFAMILY = 0x2 - CTRL_CMD_GETFAMILY = 0x3 - CTRL_CMD_NEWOPS = 0x4 - CTRL_CMD_DELOPS = 0x5 - CTRL_CMD_GETOPS = 0x6 - CTRL_CMD_NEWMCAST_GRP = 0x7 - CTRL_CMD_DELMCAST_GRP = 0x8 - CTRL_CMD_GETMCAST_GRP = 0x9 - CTRL_ATTR_UNSPEC = 0x0 - CTRL_ATTR_FAMILY_ID = 0x1 - CTRL_ATTR_FAMILY_NAME = 0x2 - CTRL_ATTR_VERSION = 0x3 - CTRL_ATTR_HDRSIZE = 0x4 - CTRL_ATTR_MAXATTR = 0x5 - CTRL_ATTR_OPS = 0x6 - CTRL_ATTR_MCAST_GROUPS = 0x7 - CTRL_ATTR_OP_UNSPEC = 0x0 - CTRL_ATTR_OP_ID = 0x1 - CTRL_ATTR_OP_FLAGS = 0x2 - CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0 - CTRL_ATTR_MCAST_GRP_NAME = 0x1 - CTRL_ATTR_MCAST_GRP_ID = 0x2 -) - type cpuMask uint64 const ( - _CPU_SETSIZE = 0x400 - _NCPUBITS = 0x40 -) - -const ( - BDADDR_BREDR = 0x0 - BDADDR_LE_PUBLIC = 0x1 - BDADDR_LE_RANDOM = 0x2 -) - -type PerfEventAttr struct { - Type uint32 - Size uint32 - Config uint64 - Sample uint64 - Sample_type uint64 - Read_format uint64 - Bits uint64 - Wakeup uint32 - Bp_type uint32 - Ext1 uint64 - Ext2 uint64 - Branch_sample_type uint64 - Sample_regs_user uint64 - Sample_stack_user uint32 - Clockid int32 - Sample_regs_intr uint64 - Aux_watermark uint32 - Sample_max_stack uint16 - _ uint16 -} - -type PerfEventMmapPage struct { - Version uint32 - Compat_version uint32 - Lock uint32 - Index uint32 - Offset int64 - Time_enabled uint64 - Time_running uint64 - Capabilities uint64 - Pmc_width uint16 - Time_shift uint16 - Time_mult uint32 - Time_offset uint64 - Time_zero uint64 - Size uint32 - _ [948]uint8 - Data_head uint64 - Data_tail uint64 - Data_offset uint64 - Data_size uint64 - Aux_head uint64 - Aux_tail uint64 - Aux_offset uint64 - Aux_size uint64 -} - -const ( - PerfBitDisabled uint64 = CBitFieldMaskBit0 - PerfBitInherit = CBitFieldMaskBit1 - PerfBitPinned = CBitFieldMaskBit2 - PerfBitExclusive = CBitFieldMaskBit3 - PerfBitExcludeUser = CBitFieldMaskBit4 - PerfBitExcludeKernel = CBitFieldMaskBit5 - PerfBitExcludeHv = CBitFieldMaskBit6 - PerfBitExcludeIdle = CBitFieldMaskBit7 - PerfBitMmap = CBitFieldMaskBit8 - PerfBitComm = CBitFieldMaskBit9 - PerfBitFreq = CBitFieldMaskBit10 - PerfBitInheritStat = CBitFieldMaskBit11 - PerfBitEnableOnExec = CBitFieldMaskBit12 - PerfBitTask = CBitFieldMaskBit13 - PerfBitWatermark = CBitFieldMaskBit14 - PerfBitPreciseIPBit1 = CBitFieldMaskBit15 - PerfBitPreciseIPBit2 = CBitFieldMaskBit16 - PerfBitMmapData = CBitFieldMaskBit17 - PerfBitSampleIDAll = CBitFieldMaskBit18 - PerfBitExcludeHost = CBitFieldMaskBit19 - PerfBitExcludeGuest = CBitFieldMaskBit20 - PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 - PerfBitExcludeCallchainUser = CBitFieldMaskBit22 - PerfBitMmap2 = CBitFieldMaskBit23 - PerfBitCommExec = CBitFieldMaskBit24 - PerfBitUseClockID = CBitFieldMaskBit25 - PerfBitContextSwitch = CBitFieldMaskBit26 -) - -const ( - PERF_TYPE_HARDWARE = 0x0 - PERF_TYPE_SOFTWARE = 0x1 - PERF_TYPE_TRACEPOINT = 0x2 - PERF_TYPE_HW_CACHE = 0x3 - PERF_TYPE_RAW = 0x4 - PERF_TYPE_BREAKPOINT = 0x5 - - PERF_COUNT_HW_CPU_CYCLES = 0x0 - PERF_COUNT_HW_INSTRUCTIONS = 0x1 - PERF_COUNT_HW_CACHE_REFERENCES = 0x2 - PERF_COUNT_HW_CACHE_MISSES = 0x3 - PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 - PERF_COUNT_HW_BRANCH_MISSES = 0x5 - PERF_COUNT_HW_BUS_CYCLES = 0x6 - PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 - PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 - PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 - - PERF_COUNT_HW_CACHE_L1D = 0x0 - PERF_COUNT_HW_CACHE_L1I = 0x1 - PERF_COUNT_HW_CACHE_LL = 0x2 - PERF_COUNT_HW_CACHE_DTLB = 0x3 - PERF_COUNT_HW_CACHE_ITLB = 0x4 - PERF_COUNT_HW_CACHE_BPU = 0x5 - PERF_COUNT_HW_CACHE_NODE = 0x6 - - PERF_COUNT_HW_CACHE_OP_READ = 0x0 - PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 - PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 - - PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 - PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 - - PERF_COUNT_SW_CPU_CLOCK = 0x0 - PERF_COUNT_SW_TASK_CLOCK = 0x1 - PERF_COUNT_SW_PAGE_FAULTS = 0x2 - PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 - PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 - PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 - PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 - PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 - PERF_COUNT_SW_EMULATION_FAULTS = 0x8 - PERF_COUNT_SW_DUMMY = 0x9 - PERF_COUNT_SW_BPF_OUTPUT = 0xa - - PERF_SAMPLE_IP = 0x1 - PERF_SAMPLE_TID = 0x2 - PERF_SAMPLE_TIME = 0x4 - PERF_SAMPLE_ADDR = 0x8 - PERF_SAMPLE_READ = 0x10 - PERF_SAMPLE_CALLCHAIN = 0x20 - PERF_SAMPLE_ID = 0x40 - PERF_SAMPLE_CPU = 0x80 - PERF_SAMPLE_PERIOD = 0x100 - PERF_SAMPLE_STREAM_ID = 0x200 - PERF_SAMPLE_RAW = 0x400 - PERF_SAMPLE_BRANCH_STACK = 0x800 - - PERF_SAMPLE_BRANCH_USER = 0x1 - PERF_SAMPLE_BRANCH_KERNEL = 0x2 - PERF_SAMPLE_BRANCH_HV = 0x4 - PERF_SAMPLE_BRANCH_ANY = 0x8 - PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 - PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 - PERF_SAMPLE_BRANCH_IND_CALL = 0x40 - PERF_SAMPLE_BRANCH_ABORT_TX = 0x80 - PERF_SAMPLE_BRANCH_IN_TX = 0x100 - PERF_SAMPLE_BRANCH_NO_TX = 0x200 - PERF_SAMPLE_BRANCH_COND = 0x400 - PERF_SAMPLE_BRANCH_CALL_STACK = 0x800 - PERF_SAMPLE_BRANCH_IND_JUMP = 0x1000 - PERF_SAMPLE_BRANCH_CALL = 0x2000 - PERF_SAMPLE_BRANCH_NO_FLAGS = 0x4000 - PERF_SAMPLE_BRANCH_NO_CYCLES = 0x8000 - PERF_SAMPLE_BRANCH_TYPE_SAVE = 0x10000 - - PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 - PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 - PERF_FORMAT_ID = 0x4 - PERF_FORMAT_GROUP = 0x8 - - PERF_RECORD_MMAP = 0x1 - PERF_RECORD_LOST = 0x2 - PERF_RECORD_COMM = 0x3 - PERF_RECORD_EXIT = 0x4 - PERF_RECORD_THROTTLE = 0x5 - PERF_RECORD_UNTHROTTLE = 0x6 - PERF_RECORD_FORK = 0x7 - PERF_RECORD_READ = 0x8 - PERF_RECORD_SAMPLE = 0x9 - PERF_RECORD_MMAP2 = 0xa - PERF_RECORD_AUX = 0xb - PERF_RECORD_ITRACE_START = 0xc - PERF_RECORD_LOST_SAMPLES = 0xd - PERF_RECORD_SWITCH = 0xe - PERF_RECORD_SWITCH_CPU_WIDE = 0xf - PERF_RECORD_NAMESPACES = 0x10 - - PERF_CONTEXT_HV = -0x20 - PERF_CONTEXT_KERNEL = -0x80 - PERF_CONTEXT_USER = -0x200 - - PERF_CONTEXT_GUEST = -0x800 - PERF_CONTEXT_GUEST_KERNEL = -0x880 - PERF_CONTEXT_GUEST_USER = -0xa00 - - PERF_FLAG_FD_NO_GROUP = 0x1 - PERF_FLAG_FD_OUTPUT = 0x2 - PERF_FLAG_PID_CGROUP = 0x4 - PERF_FLAG_FD_CLOEXEC = 0x8 + _NCPUBITS = 0x40 ) const ( @@ -1284,22 +364,6 @@ type SockaddrStorage struct { _ uint64 } -type TCPMD5Sig struct { - Addr SockaddrStorage - Flags uint8 - Prefixlen uint8 - Keylen uint16 - _ uint32 - Key [80]uint8 -} - -type HDDriveCmdHdr struct { - Command uint8 - Number uint8 - Feature uint8 - Count uint8 -} - type HDGeometry struct { Heads uint8 Sectors uint8 @@ -1307,88 +371,6 @@ type HDGeometry struct { Start uint64 } -type HDDriveID struct { - Config uint16 - Cyls uint16 - Reserved2 uint16 - Heads uint16 - Track_bytes uint16 - Sector_bytes uint16 - Sectors uint16 - Vendor0 uint16 - Vendor1 uint16 - Vendor2 uint16 - Serial_no [20]uint8 - Buf_type uint16 - Buf_size uint16 - Ecc_bytes uint16 - Fw_rev [8]uint8 - Model [40]uint8 - Max_multsect uint8 - Vendor3 uint8 - Dword_io uint16 - Vendor4 uint8 - Capability uint8 - Reserved50 uint16 - Vendor5 uint8 - TPIO uint8 - Vendor6 uint8 - TDMA uint8 - Field_valid uint16 - Cur_cyls uint16 - Cur_heads uint16 - Cur_sectors uint16 - Cur_capacity0 uint16 - Cur_capacity1 uint16 - Multsect uint8 - Multsect_valid uint8 - Lba_capacity uint32 - Dma_1word uint16 - Dma_mword uint16 - Eide_pio_modes uint16 - Eide_dma_min uint16 - Eide_dma_time uint16 - Eide_pio uint16 - Eide_pio_iordy uint16 - Words69_70 [2]uint16 - Words71_74 [4]uint16 - Queue_depth uint16 - Words76_79 [4]uint16 - Major_rev_num uint16 - Minor_rev_num uint16 - Command_set_1 uint16 - Command_set_2 uint16 - Cfsse uint16 - Cfs_enable_1 uint16 - Cfs_enable_2 uint16 - Csf_default uint16 - Dma_ultra uint16 - Trseuc uint16 - TrsEuc uint16 - CurAPMvalues uint16 - Mprc uint16 - Hw_config uint16 - Acoustic uint16 - Msrqs uint16 - Sxfert uint16 - Sal uint16 - Spg uint32 - Lba_capacity_2 uint64 - Words104_125 [22]uint16 - Last_lun uint16 - Word127 uint16 - Dlf uint16 - Csfo uint16 - Words130_155 [26]uint16 - Word156 uint16 - Words157_159 [3]uint16 - Cfa_power uint16 - Words161_175 [15]uint16 - Words176_205 [30]uint16 - Words206_254 [49]uint16 - Integrity_word uint16 -} - type Statfs_t struct { Type int64 Bsize int64 @@ -1404,18 +386,6 @@ type Statfs_t struct { Spare [4]int64 } -const ( - ST_MANDLOCK = 0x40 - ST_NOATIME = 0x400 - ST_NODEV = 0x4 - ST_NODIRATIME = 0x800 - ST_NOEXEC = 0x8 - ST_NOSUID = 0x2 - ST_RDONLY = 0x1 - ST_RELATIME = 0x1000 - ST_SYNCHRONOUS = 0x10 -) - type TpacketHdr struct { Status uint64 Len uint32 @@ -1427,589 +397,10 @@ type TpacketHdr struct { _ [4]byte } -type Tpacket2Hdr struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Sec uint32 - Nsec uint32 - Vlan_tci uint16 - Vlan_tpid uint16 - _ [4]uint8 -} - -type Tpacket3Hdr struct { - Next_offset uint32 - Sec uint32 - Nsec uint32 - Snaplen uint32 - Len uint32 - Status uint32 - Mac uint16 - Net uint16 - Hv1 TpacketHdrVariant1 - _ [8]uint8 -} - -type TpacketHdrVariant1 struct { - Rxhash uint32 - Vlan_tci uint32 - Vlan_tpid uint16 - _ uint16 -} - -type TpacketBlockDesc struct { - Version uint32 - To_priv uint32 - Hdr [40]byte -} - -type TpacketBDTS struct { - Sec uint32 - Usec uint32 -} - -type TpacketHdrV1 struct { - Block_status uint32 - Num_pkts uint32 - Offset_to_first_pkt uint32 - Blk_len uint32 - Seq_num uint64 - Ts_first_pkt TpacketBDTS - Ts_last_pkt TpacketBDTS -} - -type TpacketReq struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 -} - -type TpacketReq3 struct { - Block_size uint32 - Block_nr uint32 - Frame_size uint32 - Frame_nr uint32 - Retire_blk_tov uint32 - Sizeof_priv uint32 - Feature_req_word uint32 -} - -type TpacketStats struct { - Packets uint32 - Drops uint32 -} - -type TpacketStatsV3 struct { - Packets uint32 - Drops uint32 - Freeze_q_cnt uint32 -} - -type TpacketAuxdata struct { - Status uint32 - Len uint32 - Snaplen uint32 - Mac uint16 - Net uint16 - Vlan_tci uint16 - Vlan_tpid uint16 -} - -const ( - TPACKET_V1 = 0x0 - TPACKET_V2 = 0x1 - TPACKET_V3 = 0x2 -) - -const ( - SizeofTpacketHdr = 0x20 - SizeofTpacket2Hdr = 0x20 - SizeofTpacket3Hdr = 0x30 - - SizeofTpacketStats = 0x8 - SizeofTpacketStatsV3 = 0xc -) - -const ( - NF_INET_PRE_ROUTING = 0x0 - NF_INET_LOCAL_IN = 0x1 - NF_INET_FORWARD = 0x2 - NF_INET_LOCAL_OUT = 0x3 - NF_INET_POST_ROUTING = 0x4 - NF_INET_NUMHOOKS = 0x5 -) - const ( - NF_NETDEV_INGRESS = 0x0 - NF_NETDEV_NUMHOOKS = 0x1 + SizeofTpacketHdr = 0x20 ) -const ( - NFPROTO_UNSPEC = 0x0 - NFPROTO_INET = 0x1 - NFPROTO_IPV4 = 0x2 - NFPROTO_ARP = 0x3 - NFPROTO_NETDEV = 0x5 - NFPROTO_BRIDGE = 0x7 - NFPROTO_IPV6 = 0xa - NFPROTO_DECNET = 0xc - NFPROTO_NUMPROTO = 0xd -) - -type Nfgenmsg struct { - Nfgen_family uint8 - Version uint8 - Res_id uint16 -} - -const ( - NFNL_BATCH_UNSPEC = 0x0 - NFNL_BATCH_GENID = 0x1 -) - -const ( - NFT_REG_VERDICT = 0x0 - NFT_REG_1 = 0x1 - NFT_REG_2 = 0x2 - NFT_REG_3 = 0x3 - NFT_REG_4 = 0x4 - NFT_REG32_00 = 0x8 - NFT_REG32_01 = 0x9 - NFT_REG32_02 = 0xa - NFT_REG32_03 = 0xb - NFT_REG32_04 = 0xc - NFT_REG32_05 = 0xd - NFT_REG32_06 = 0xe - NFT_REG32_07 = 0xf - NFT_REG32_08 = 0x10 - NFT_REG32_09 = 0x11 - NFT_REG32_10 = 0x12 - NFT_REG32_11 = 0x13 - NFT_REG32_12 = 0x14 - NFT_REG32_13 = 0x15 - NFT_REG32_14 = 0x16 - NFT_REG32_15 = 0x17 - NFT_CONTINUE = -0x1 - NFT_BREAK = -0x2 - NFT_JUMP = -0x3 - NFT_GOTO = -0x4 - NFT_RETURN = -0x5 - NFT_MSG_NEWTABLE = 0x0 - NFT_MSG_GETTABLE = 0x1 - NFT_MSG_DELTABLE = 0x2 - NFT_MSG_NEWCHAIN = 0x3 - NFT_MSG_GETCHAIN = 0x4 - NFT_MSG_DELCHAIN = 0x5 - NFT_MSG_NEWRULE = 0x6 - NFT_MSG_GETRULE = 0x7 - NFT_MSG_DELRULE = 0x8 - NFT_MSG_NEWSET = 0x9 - NFT_MSG_GETSET = 0xa - NFT_MSG_DELSET = 0xb - NFT_MSG_NEWSETELEM = 0xc - NFT_MSG_GETSETELEM = 0xd - NFT_MSG_DELSETELEM = 0xe - NFT_MSG_NEWGEN = 0xf - NFT_MSG_GETGEN = 0x10 - NFT_MSG_TRACE = 0x11 - NFT_MSG_NEWOBJ = 0x12 - NFT_MSG_GETOBJ = 0x13 - NFT_MSG_DELOBJ = 0x14 - NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 - NFTA_LIST_UNPEC = 0x0 - NFTA_LIST_ELEM = 0x1 - NFTA_HOOK_UNSPEC = 0x0 - NFTA_HOOK_HOOKNUM = 0x1 - NFTA_HOOK_PRIORITY = 0x2 - NFTA_HOOK_DEV = 0x3 - NFT_TABLE_F_DORMANT = 0x1 - NFTA_TABLE_UNSPEC = 0x0 - NFTA_TABLE_NAME = 0x1 - NFTA_TABLE_FLAGS = 0x2 - NFTA_TABLE_USE = 0x3 - NFTA_CHAIN_UNSPEC = 0x0 - NFTA_CHAIN_TABLE = 0x1 - NFTA_CHAIN_HANDLE = 0x2 - NFTA_CHAIN_NAME = 0x3 - NFTA_CHAIN_HOOK = 0x4 - NFTA_CHAIN_POLICY = 0x5 - NFTA_CHAIN_USE = 0x6 - NFTA_CHAIN_TYPE = 0x7 - NFTA_CHAIN_COUNTERS = 0x8 - NFTA_CHAIN_PAD = 0x9 - NFTA_RULE_UNSPEC = 0x0 - NFTA_RULE_TABLE = 0x1 - NFTA_RULE_CHAIN = 0x2 - NFTA_RULE_HANDLE = 0x3 - NFTA_RULE_EXPRESSIONS = 0x4 - NFTA_RULE_COMPAT = 0x5 - NFTA_RULE_POSITION = 0x6 - NFTA_RULE_USERDATA = 0x7 - NFTA_RULE_PAD = 0x8 - NFTA_RULE_ID = 0x9 - NFT_RULE_COMPAT_F_INV = 0x2 - NFT_RULE_COMPAT_F_MASK = 0x2 - NFTA_RULE_COMPAT_UNSPEC = 0x0 - NFTA_RULE_COMPAT_PROTO = 0x1 - NFTA_RULE_COMPAT_FLAGS = 0x2 - NFT_SET_ANONYMOUS = 0x1 - NFT_SET_CONSTANT = 0x2 - NFT_SET_INTERVAL = 0x4 - NFT_SET_MAP = 0x8 - NFT_SET_TIMEOUT = 0x10 - NFT_SET_EVAL = 0x20 - NFT_SET_OBJECT = 0x40 - NFT_SET_POL_PERFORMANCE = 0x0 - NFT_SET_POL_MEMORY = 0x1 - NFTA_SET_DESC_UNSPEC = 0x0 - NFTA_SET_DESC_SIZE = 0x1 - NFTA_SET_UNSPEC = 0x0 - NFTA_SET_TABLE = 0x1 - NFTA_SET_NAME = 0x2 - NFTA_SET_FLAGS = 0x3 - NFTA_SET_KEY_TYPE = 0x4 - NFTA_SET_KEY_LEN = 0x5 - NFTA_SET_DATA_TYPE = 0x6 - NFTA_SET_DATA_LEN = 0x7 - NFTA_SET_POLICY = 0x8 - NFTA_SET_DESC = 0x9 - NFTA_SET_ID = 0xa - NFTA_SET_TIMEOUT = 0xb - NFTA_SET_GC_INTERVAL = 0xc - NFTA_SET_USERDATA = 0xd - NFTA_SET_PAD = 0xe - NFTA_SET_OBJ_TYPE = 0xf - NFT_SET_ELEM_INTERVAL_END = 0x1 - NFTA_SET_ELEM_UNSPEC = 0x0 - NFTA_SET_ELEM_KEY = 0x1 - NFTA_SET_ELEM_DATA = 0x2 - NFTA_SET_ELEM_FLAGS = 0x3 - NFTA_SET_ELEM_TIMEOUT = 0x4 - NFTA_SET_ELEM_EXPIRATION = 0x5 - NFTA_SET_ELEM_USERDATA = 0x6 - NFTA_SET_ELEM_EXPR = 0x7 - NFTA_SET_ELEM_PAD = 0x8 - NFTA_SET_ELEM_OBJREF = 0x9 - NFTA_SET_ELEM_LIST_UNSPEC = 0x0 - NFTA_SET_ELEM_LIST_TABLE = 0x1 - NFTA_SET_ELEM_LIST_SET = 0x2 - NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 - NFTA_SET_ELEM_LIST_SET_ID = 0x4 - NFT_DATA_VALUE = 0x0 - NFT_DATA_VERDICT = 0xffffff00 - NFTA_DATA_UNSPEC = 0x0 - NFTA_DATA_VALUE = 0x1 - NFTA_DATA_VERDICT = 0x2 - NFTA_VERDICT_UNSPEC = 0x0 - NFTA_VERDICT_CODE = 0x1 - NFTA_VERDICT_CHAIN = 0x2 - NFTA_EXPR_UNSPEC = 0x0 - NFTA_EXPR_NAME = 0x1 - NFTA_EXPR_DATA = 0x2 - NFTA_IMMEDIATE_UNSPEC = 0x0 - NFTA_IMMEDIATE_DREG = 0x1 - NFTA_IMMEDIATE_DATA = 0x2 - NFTA_BITWISE_UNSPEC = 0x0 - NFTA_BITWISE_SREG = 0x1 - NFTA_BITWISE_DREG = 0x2 - NFTA_BITWISE_LEN = 0x3 - NFTA_BITWISE_MASK = 0x4 - NFTA_BITWISE_XOR = 0x5 - NFT_BYTEORDER_NTOH = 0x0 - NFT_BYTEORDER_HTON = 0x1 - NFTA_BYTEORDER_UNSPEC = 0x0 - NFTA_BYTEORDER_SREG = 0x1 - NFTA_BYTEORDER_DREG = 0x2 - NFTA_BYTEORDER_OP = 0x3 - NFTA_BYTEORDER_LEN = 0x4 - NFTA_BYTEORDER_SIZE = 0x5 - NFT_CMP_EQ = 0x0 - NFT_CMP_NEQ = 0x1 - NFT_CMP_LT = 0x2 - NFT_CMP_LTE = 0x3 - NFT_CMP_GT = 0x4 - NFT_CMP_GTE = 0x5 - NFTA_CMP_UNSPEC = 0x0 - NFTA_CMP_SREG = 0x1 - NFTA_CMP_OP = 0x2 - NFTA_CMP_DATA = 0x3 - NFT_RANGE_EQ = 0x0 - NFT_RANGE_NEQ = 0x1 - NFTA_RANGE_UNSPEC = 0x0 - NFTA_RANGE_SREG = 0x1 - NFTA_RANGE_OP = 0x2 - NFTA_RANGE_FROM_DATA = 0x3 - NFTA_RANGE_TO_DATA = 0x4 - NFT_LOOKUP_F_INV = 0x1 - NFTA_LOOKUP_UNSPEC = 0x0 - NFTA_LOOKUP_SET = 0x1 - NFTA_LOOKUP_SREG = 0x2 - NFTA_LOOKUP_DREG = 0x3 - NFTA_LOOKUP_SET_ID = 0x4 - NFTA_LOOKUP_FLAGS = 0x5 - NFT_DYNSET_OP_ADD = 0x0 - NFT_DYNSET_OP_UPDATE = 0x1 - NFT_DYNSET_F_INV = 0x1 - NFTA_DYNSET_UNSPEC = 0x0 - NFTA_DYNSET_SET_NAME = 0x1 - NFTA_DYNSET_SET_ID = 0x2 - NFTA_DYNSET_OP = 0x3 - NFTA_DYNSET_SREG_KEY = 0x4 - NFTA_DYNSET_SREG_DATA = 0x5 - NFTA_DYNSET_TIMEOUT = 0x6 - NFTA_DYNSET_EXPR = 0x7 - NFTA_DYNSET_PAD = 0x8 - NFTA_DYNSET_FLAGS = 0x9 - NFT_PAYLOAD_LL_HEADER = 0x0 - NFT_PAYLOAD_NETWORK_HEADER = 0x1 - NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 - NFT_PAYLOAD_CSUM_NONE = 0x0 - NFT_PAYLOAD_CSUM_INET = 0x1 - NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 - NFTA_PAYLOAD_UNSPEC = 0x0 - NFTA_PAYLOAD_DREG = 0x1 - NFTA_PAYLOAD_BASE = 0x2 - NFTA_PAYLOAD_OFFSET = 0x3 - NFTA_PAYLOAD_LEN = 0x4 - NFTA_PAYLOAD_SREG = 0x5 - NFTA_PAYLOAD_CSUM_TYPE = 0x6 - NFTA_PAYLOAD_CSUM_OFFSET = 0x7 - NFTA_PAYLOAD_CSUM_FLAGS = 0x8 - NFT_EXTHDR_F_PRESENT = 0x1 - NFT_EXTHDR_OP_IPV6 = 0x0 - NFT_EXTHDR_OP_TCPOPT = 0x1 - NFTA_EXTHDR_UNSPEC = 0x0 - NFTA_EXTHDR_DREG = 0x1 - NFTA_EXTHDR_TYPE = 0x2 - NFTA_EXTHDR_OFFSET = 0x3 - NFTA_EXTHDR_LEN = 0x4 - NFTA_EXTHDR_FLAGS = 0x5 - NFTA_EXTHDR_OP = 0x6 - NFTA_EXTHDR_SREG = 0x7 - NFT_META_LEN = 0x0 - NFT_META_PROTOCOL = 0x1 - NFT_META_PRIORITY = 0x2 - NFT_META_MARK = 0x3 - NFT_META_IIF = 0x4 - NFT_META_OIF = 0x5 - NFT_META_IIFNAME = 0x6 - NFT_META_OIFNAME = 0x7 - NFT_META_IIFTYPE = 0x8 - NFT_META_OIFTYPE = 0x9 - NFT_META_SKUID = 0xa - NFT_META_SKGID = 0xb - NFT_META_NFTRACE = 0xc - NFT_META_RTCLASSID = 0xd - NFT_META_SECMARK = 0xe - NFT_META_NFPROTO = 0xf - NFT_META_L4PROTO = 0x10 - NFT_META_BRI_IIFNAME = 0x11 - NFT_META_BRI_OIFNAME = 0x12 - NFT_META_PKTTYPE = 0x13 - NFT_META_CPU = 0x14 - NFT_META_IIFGROUP = 0x15 - NFT_META_OIFGROUP = 0x16 - NFT_META_CGROUP = 0x17 - NFT_META_PRANDOM = 0x18 - NFT_RT_CLASSID = 0x0 - NFT_RT_NEXTHOP4 = 0x1 - NFT_RT_NEXTHOP6 = 0x2 - NFT_RT_TCPMSS = 0x3 - NFT_HASH_JENKINS = 0x0 - NFT_HASH_SYM = 0x1 - NFTA_HASH_UNSPEC = 0x0 - NFTA_HASH_SREG = 0x1 - NFTA_HASH_DREG = 0x2 - NFTA_HASH_LEN = 0x3 - NFTA_HASH_MODULUS = 0x4 - NFTA_HASH_SEED = 0x5 - NFTA_HASH_OFFSET = 0x6 - NFTA_HASH_TYPE = 0x7 - NFTA_META_UNSPEC = 0x0 - NFTA_META_DREG = 0x1 - NFTA_META_KEY = 0x2 - NFTA_META_SREG = 0x3 - NFTA_RT_UNSPEC = 0x0 - NFTA_RT_DREG = 0x1 - NFTA_RT_KEY = 0x2 - NFT_CT_STATE = 0x0 - NFT_CT_DIRECTION = 0x1 - NFT_CT_STATUS = 0x2 - NFT_CT_MARK = 0x3 - NFT_CT_SECMARK = 0x4 - NFT_CT_EXPIRATION = 0x5 - NFT_CT_HELPER = 0x6 - NFT_CT_L3PROTOCOL = 0x7 - NFT_CT_SRC = 0x8 - NFT_CT_DST = 0x9 - NFT_CT_PROTOCOL = 0xa - NFT_CT_PROTO_SRC = 0xb - NFT_CT_PROTO_DST = 0xc - NFT_CT_LABELS = 0xd - NFT_CT_PKTS = 0xe - NFT_CT_BYTES = 0xf - NFT_CT_AVGPKT = 0x10 - NFT_CT_ZONE = 0x11 - NFT_CT_EVENTMASK = 0x12 - NFTA_CT_UNSPEC = 0x0 - NFTA_CT_DREG = 0x1 - NFTA_CT_KEY = 0x2 - NFTA_CT_DIRECTION = 0x3 - NFTA_CT_SREG = 0x4 - NFT_LIMIT_PKTS = 0x0 - NFT_LIMIT_PKT_BYTES = 0x1 - NFT_LIMIT_F_INV = 0x1 - NFTA_LIMIT_UNSPEC = 0x0 - NFTA_LIMIT_RATE = 0x1 - NFTA_LIMIT_UNIT = 0x2 - NFTA_LIMIT_BURST = 0x3 - NFTA_LIMIT_TYPE = 0x4 - NFTA_LIMIT_FLAGS = 0x5 - NFTA_LIMIT_PAD = 0x6 - NFTA_COUNTER_UNSPEC = 0x0 - NFTA_COUNTER_BYTES = 0x1 - NFTA_COUNTER_PACKETS = 0x2 - NFTA_COUNTER_PAD = 0x3 - NFTA_LOG_UNSPEC = 0x0 - NFTA_LOG_GROUP = 0x1 - NFTA_LOG_PREFIX = 0x2 - NFTA_LOG_SNAPLEN = 0x3 - NFTA_LOG_QTHRESHOLD = 0x4 - NFTA_LOG_LEVEL = 0x5 - NFTA_LOG_FLAGS = 0x6 - NFTA_QUEUE_UNSPEC = 0x0 - NFTA_QUEUE_NUM = 0x1 - NFTA_QUEUE_TOTAL = 0x2 - NFTA_QUEUE_FLAGS = 0x3 - NFTA_QUEUE_SREG_QNUM = 0x4 - NFT_QUOTA_F_INV = 0x1 - NFT_QUOTA_F_DEPLETED = 0x2 - NFTA_QUOTA_UNSPEC = 0x0 - NFTA_QUOTA_BYTES = 0x1 - NFTA_QUOTA_FLAGS = 0x2 - NFTA_QUOTA_PAD = 0x3 - NFTA_QUOTA_CONSUMED = 0x4 - NFT_REJECT_ICMP_UNREACH = 0x0 - NFT_REJECT_TCP_RST = 0x1 - NFT_REJECT_ICMPX_UNREACH = 0x2 - NFT_REJECT_ICMPX_NO_ROUTE = 0x0 - NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 - NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 - NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 - NFTA_REJECT_UNSPEC = 0x0 - NFTA_REJECT_TYPE = 0x1 - NFTA_REJECT_ICMP_CODE = 0x2 - NFT_NAT_SNAT = 0x0 - NFT_NAT_DNAT = 0x1 - NFTA_NAT_UNSPEC = 0x0 - NFTA_NAT_TYPE = 0x1 - NFTA_NAT_FAMILY = 0x2 - NFTA_NAT_REG_ADDR_MIN = 0x3 - NFTA_NAT_REG_ADDR_MAX = 0x4 - NFTA_NAT_REG_PROTO_MIN = 0x5 - NFTA_NAT_REG_PROTO_MAX = 0x6 - NFTA_NAT_FLAGS = 0x7 - NFTA_MASQ_UNSPEC = 0x0 - NFTA_MASQ_FLAGS = 0x1 - NFTA_MASQ_REG_PROTO_MIN = 0x2 - NFTA_MASQ_REG_PROTO_MAX = 0x3 - NFTA_REDIR_UNSPEC = 0x0 - NFTA_REDIR_REG_PROTO_MIN = 0x1 - NFTA_REDIR_REG_PROTO_MAX = 0x2 - NFTA_REDIR_FLAGS = 0x3 - NFTA_DUP_UNSPEC = 0x0 - NFTA_DUP_SREG_ADDR = 0x1 - NFTA_DUP_SREG_DEV = 0x2 - NFTA_FWD_UNSPEC = 0x0 - NFTA_FWD_SREG_DEV = 0x1 - NFTA_OBJREF_UNSPEC = 0x0 - NFTA_OBJREF_IMM_TYPE = 0x1 - NFTA_OBJREF_IMM_NAME = 0x2 - NFTA_OBJREF_SET_SREG = 0x3 - NFTA_OBJREF_SET_NAME = 0x4 - NFTA_OBJREF_SET_ID = 0x5 - NFTA_GEN_UNSPEC = 0x0 - NFTA_GEN_ID = 0x1 - NFTA_GEN_PROC_PID = 0x2 - NFTA_GEN_PROC_NAME = 0x3 - NFTA_FIB_UNSPEC = 0x0 - NFTA_FIB_DREG = 0x1 - NFTA_FIB_RESULT = 0x2 - NFTA_FIB_FLAGS = 0x3 - NFT_FIB_RESULT_UNSPEC = 0x0 - NFT_FIB_RESULT_OIF = 0x1 - NFT_FIB_RESULT_OIFNAME = 0x2 - NFT_FIB_RESULT_ADDRTYPE = 0x3 - NFTA_FIB_F_SADDR = 0x1 - NFTA_FIB_F_DADDR = 0x2 - NFTA_FIB_F_MARK = 0x4 - NFTA_FIB_F_IIF = 0x8 - NFTA_FIB_F_OIF = 0x10 - NFTA_FIB_F_PRESENT = 0x20 - NFTA_CT_HELPER_UNSPEC = 0x0 - NFTA_CT_HELPER_NAME = 0x1 - NFTA_CT_HELPER_L3PROTO = 0x2 - NFTA_CT_HELPER_L4PROTO = 0x3 - NFTA_OBJ_UNSPEC = 0x0 - NFTA_OBJ_TABLE = 0x1 - NFTA_OBJ_NAME = 0x2 - NFTA_OBJ_TYPE = 0x3 - NFTA_OBJ_DATA = 0x4 - NFTA_OBJ_USE = 0x5 - NFTA_TRACE_UNSPEC = 0x0 - NFTA_TRACE_TABLE = 0x1 - NFTA_TRACE_CHAIN = 0x2 - NFTA_TRACE_RULE_HANDLE = 0x3 - NFTA_TRACE_TYPE = 0x4 - NFTA_TRACE_VERDICT = 0x5 - NFTA_TRACE_ID = 0x6 - NFTA_TRACE_LL_HEADER = 0x7 - NFTA_TRACE_NETWORK_HEADER = 0x8 - NFTA_TRACE_TRANSPORT_HEADER = 0x9 - NFTA_TRACE_IIF = 0xa - NFTA_TRACE_IIFTYPE = 0xb - NFTA_TRACE_OIF = 0xc - NFTA_TRACE_OIFTYPE = 0xd - NFTA_TRACE_MARK = 0xe - NFTA_TRACE_NFPROTO = 0xf - NFTA_TRACE_POLICY = 0x10 - NFTA_TRACE_PAD = 0x11 - NFT_TRACETYPE_UNSPEC = 0x0 - NFT_TRACETYPE_POLICY = 0x1 - NFT_TRACETYPE_RETURN = 0x2 - NFT_TRACETYPE_RULE = 0x3 - NFTA_NG_UNSPEC = 0x0 - NFTA_NG_DREG = 0x1 - NFTA_NG_MODULUS = 0x2 - NFTA_NG_TYPE = 0x3 - NFTA_NG_OFFSET = 0x4 - NFT_NG_INCREMENTAL = 0x0 - NFT_NG_RANDOM = 0x1 -) - -type RTCTime struct { - Sec int32 - Min int32 - Hour int32 - Mday int32 - Mon int32 - Year int32 - Wday int32 - Yday int32 - Isdst int32 -} - -type RTCWkAlrm struct { - Enabled uint8 - Pending uint8 - Time RTCTime -} - type RTCPLLInfo struct { Ctrl int32 Value int32 @@ -2020,13 +411,6 @@ type RTCPLLInfo struct { Clock int64 } -type BlkpgIoctlArg struct { - Op int32 - Flags int32 - Datalen int32 - Data *byte -} - type BlkpgPartition struct { Start int64 Length int64 @@ -2037,168 +421,18 @@ type BlkpgPartition struct { } const ( - BLKPG = 0x20001269 - BLKPG_ADD_PARTITION = 0x1 - BLKPG_DEL_PARTITION = 0x2 - BLKPG_RESIZE_PARTITION = 0x3 + BLKPG = 0x20001269 ) -const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 -) - -type XDPRingOffset struct { - Producer uint64 - Consumer uint64 - Desc uint64 -} - -type XDPMmapOffsets struct { - Rx XDPRingOffset - Tx XDPRingOffset - Fr XDPRingOffset - Cr XDPRingOffset -} - type XDPUmemReg struct { Addr uint64 Len uint64 Size uint32 Headroom uint32 + Flags uint32 + _ [4]byte } -type XDPStatistics struct { - Rx_dropped uint64 - Rx_invalid_descs uint64 - Tx_invalid_descs uint64 -} - -type XDPDesc struct { - Addr uint64 - Len uint32 - Options uint32 -} - -const ( - NCSI_CMD_UNSPEC = 0x0 - NCSI_CMD_PKG_INFO = 0x1 - NCSI_CMD_SET_INTERFACE = 0x2 - NCSI_CMD_CLEAR_INTERFACE = 0x3 - NCSI_ATTR_UNSPEC = 0x0 - NCSI_ATTR_IFINDEX = 0x1 - NCSI_ATTR_PACKAGE_LIST = 0x2 - NCSI_ATTR_PACKAGE_ID = 0x3 - NCSI_ATTR_CHANNEL_ID = 0x4 - NCSI_PKG_ATTR_UNSPEC = 0x0 - NCSI_PKG_ATTR = 0x1 - NCSI_PKG_ATTR_ID = 0x2 - NCSI_PKG_ATTR_FORCED = 0x3 - NCSI_PKG_ATTR_CHANNEL_LIST = 0x4 - NCSI_CHANNEL_ATTR_UNSPEC = 0x0 - NCSI_CHANNEL_ATTR = 0x1 - NCSI_CHANNEL_ATTR_ID = 0x2 - NCSI_CHANNEL_ATTR_VERSION_MAJOR = 0x3 - NCSI_CHANNEL_ATTR_VERSION_MINOR = 0x4 - NCSI_CHANNEL_ATTR_VERSION_STR = 0x5 - NCSI_CHANNEL_ATTR_LINK_STATE = 0x6 - NCSI_CHANNEL_ATTR_ACTIVE = 0x7 - NCSI_CHANNEL_ATTR_FORCED = 0x8 - NCSI_CHANNEL_ATTR_VLAN_LIST = 0x9 - NCSI_CHANNEL_ATTR_VLAN_ID = 0xa -) - -type ScmTimestamping struct { - Ts [3]Timespec -} - -const ( - SOF_TIMESTAMPING_TX_HARDWARE = 0x1 - SOF_TIMESTAMPING_TX_SOFTWARE = 0x2 - SOF_TIMESTAMPING_RX_HARDWARE = 0x4 - SOF_TIMESTAMPING_RX_SOFTWARE = 0x8 - SOF_TIMESTAMPING_SOFTWARE = 0x10 - SOF_TIMESTAMPING_SYS_HARDWARE = 0x20 - SOF_TIMESTAMPING_RAW_HARDWARE = 0x40 - SOF_TIMESTAMPING_OPT_ID = 0x80 - SOF_TIMESTAMPING_TX_SCHED = 0x100 - SOF_TIMESTAMPING_TX_ACK = 0x200 - SOF_TIMESTAMPING_OPT_CMSG = 0x400 - SOF_TIMESTAMPING_OPT_TSONLY = 0x800 - SOF_TIMESTAMPING_OPT_STATS = 0x1000 - SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 - SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 - - SOF_TIMESTAMPING_LAST = 0x4000 - SOF_TIMESTAMPING_MASK = 0x7fff - - SCM_TSTAMP_SND = 0x0 - SCM_TSTAMP_SCHED = 0x1 - SCM_TSTAMP_ACK = 0x2 -) - -type SockExtendedErr struct { - Errno uint32 - Origin uint8 - Type uint8 - Code uint8 - Pad uint8 - Info uint32 - Data uint32 -} - -type FanotifyEventMetadata struct { - Event_len uint32 - Vers uint8 - Reserved uint8 - Metadata_len uint16 - Mask uint64 - Fd int32 - Pid int32 -} - -type FanotifyResponse struct { - Fd int32 - Response uint32 -} - -const ( - CRYPTO_MSG_BASE = 0x10 - CRYPTO_MSG_NEWALG = 0x10 - CRYPTO_MSG_DELALG = 0x11 - CRYPTO_MSG_UPDATEALG = 0x12 - CRYPTO_MSG_GETALG = 0x13 - CRYPTO_MSG_DELRNG = 0x14 - CRYPTO_MSG_GETSTAT = 0x15 -) - -const ( - CRYPTOCFGA_UNSPEC = 0x0 - CRYPTOCFGA_PRIORITY_VAL = 0x1 - CRYPTOCFGA_REPORT_LARVAL = 0x2 - CRYPTOCFGA_REPORT_HASH = 0x3 - CRYPTOCFGA_REPORT_BLKCIPHER = 0x4 - CRYPTOCFGA_REPORT_AEAD = 0x5 - CRYPTOCFGA_REPORT_COMPRESS = 0x6 - CRYPTOCFGA_REPORT_RNG = 0x7 - CRYPTOCFGA_REPORT_CIPHER = 0x8 - CRYPTOCFGA_REPORT_AKCIPHER = 0x9 - CRYPTOCFGA_REPORT_KPP = 0xa - CRYPTOCFGA_REPORT_ACOMP = 0xb - CRYPTOCFGA_STAT_LARVAL = 0xc - CRYPTOCFGA_STAT_HASH = 0xd - CRYPTOCFGA_STAT_BLKCIPHER = 0xe - CRYPTOCFGA_STAT_AEAD = 0xf - CRYPTOCFGA_STAT_COMPRESS = 0x10 - CRYPTOCFGA_STAT_RNG = 0x11 - CRYPTOCFGA_STAT_CIPHER = 0x12 - CRYPTOCFGA_STAT_AKCIPHER = 0x13 - CRYPTOCFGA_STAT_KPP = 0x14 - CRYPTOCFGA_STAT_ACOMP = 0x15 -) - type CryptoUserAlg struct { Name [64]int8 Driver_name [64]int8 @@ -2329,182 +563,6 @@ type CryptoReportAcomp struct { Type [64]int8 } -const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_ADJ_ROOM_NET = 0x0 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 -) - -type CapUserHeader struct { - Version uint32 - Pid int32 -} - -type CapUserData struct { - Effective uint32 - Permitted uint32 - Inheritable uint32 -} - -const ( - LINUX_CAPABILITY_VERSION_1 = 0x19980330 - LINUX_CAPABILITY_VERSION_2 = 0x20071026 - LINUX_CAPABILITY_VERSION_3 = 0x20080522 -) - -const ( - LO_FLAGS_READ_ONLY = 0x1 - LO_FLAGS_AUTOCLEAR = 0x4 - LO_FLAGS_PARTSCAN = 0x8 - LO_FLAGS_DIRECT_IO = 0x10 -) - type LoopInfo struct { Number int32 Device uint32 @@ -2520,38 +578,6 @@ type LoopInfo struct { Reserved [4]int8 _ [4]byte } -type LoopInfo64 struct { - Device uint64 - Inode uint64 - Rdevice uint64 - Offset uint64 - Sizelimit uint64 - Number uint32 - Encrypt_type uint32 - Encrypt_key_size uint32 - Flags uint32 - File_name [64]uint8 - Crypt_name [64]uint8 - Encrypt_key [32]uint8 - Init [2]uint64 -} - -type TIPCSocketAddr struct { - Ref uint32 - Node uint32 -} - -type TIPCServiceRange struct { - Type uint32 - Lower uint32 - Upper uint32 -} - -type TIPCServiceName struct { - Type uint32 - Instance uint32 - Domain uint32 -} type TIPCSubscr struct { Seq TIPCServiceRange @@ -2560,21 +586,6 @@ type TIPCSubscr struct { Handle [8]int8 } -type TIPCEvent struct { - Event uint32 - Lower uint32 - Upper uint32 - Port TIPCSocketAddr - S TIPCSubscr -} - -type TIPCGroupReq struct { - Type uint32 - Instance uint32 - Scope uint32 - Flags uint32 -} - type TIPCSIOCLNReq struct { Peer uint32 Id uint32 @@ -2585,22 +596,3 @@ type TIPCSIOCNodeIDReq struct { Peer uint32 Id [16]int8 } - -const ( - TIPC_CLUSTER_SCOPE = 0x2 - TIPC_NODE_SCOPE = 0x3 -) - -const ( - SYSLOG_ACTION_CLOSE = 0 - SYSLOG_ACTION_OPEN = 1 - SYSLOG_ACTION_READ = 2 - SYSLOG_ACTION_READ_ALL = 3 - SYSLOG_ACTION_READ_CLEAR = 4 - SYSLOG_ACTION_CLEAR = 5 - SYSLOG_ACTION_CONSOLE_OFF = 6 - SYSLOG_ACTION_CONSOLE_ON = 7 - SYSLOG_ACTION_CONSOLE_LEVEL = 8 - SYSLOG_ACTION_SIZE_UNREAD = 9 - SYSLOG_ACTION_SIZE_BUFFER = 10 -) diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index 86736ab6e..a89100c08 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -78,6 +78,33 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint32 + Bsize uint32 + Frsize uint32 + Iosize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint32 + Namemax uint32 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte +} + type Flock_t struct { Start int64 Len int64 @@ -103,6 +130,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 3427811f9..289184e0b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -82,6 +82,34 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte + _ [4]byte +} + type Flock_t struct { Start int64 Len int64 @@ -107,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 399f37a43..428c450e4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -83,6 +83,33 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint32 + Bsize uint32 + Frsize uint32 + Iosize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint32 + Namemax uint32 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte +} + type Flock_t struct { Start int64 Len int64 @@ -108,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 32f0c15d9..6f1f2842c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -82,6 +82,34 @@ type Stat_t struct { type Statfs_t [0]byte +type Statvfs_t struct { + Flag uint64 + Bsize uint64 + Frsize uint64 + Iosize uint64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Bresvd uint64 + Files uint64 + Ffree uint64 + Favail uint64 + Fresvd uint64 + Syncreads uint64 + Syncwrites uint64 + Asyncreads uint64 + Asyncwrites uint64 + Fsidx Fsid + Fsid uint64 + Namemax uint64 + Owner uint32 + Spare [4]uint32 + Fstypename [32]byte + Mntonname [1024]byte + Mntfromname [1024]byte + _ [4]byte +} + type Flock_t struct { Start int64 Len int64 @@ -107,6 +135,11 @@ const ( PathMax = 0x400 ) +const ( + ST_WAIT = 0x1 + ST_NOWAIT = 0x2 +) + const ( FADV_NORMAL = 0x0 FADV_RANDOM = 0x1 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index 8531a190f..23ed9fe51 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -211,6 +211,12 @@ type Cmsghdr struct { Type int32 } +type Inet4Pktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + type Inet6Pktinfo struct { Addr [16]byte /* in6_addr */ Ifindex uint32 @@ -236,6 +242,7 @@ const ( SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 SizeofCmsghdr = 0xc + SizeofInet4Pktinfo = 0xc SizeofInet6Pktinfo = 0x14 SizeofIPv6MTUInfo = 0x24 SizeofICMPv6Filter = 0x20 diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index 520b9ada0..48ec64b40 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -648,7 +648,8 @@ func String(t Transformer, s string) (result string, n int, err error) { // Transform the remaining input, growing dst and src buffers as necessary. for { n := copy(src, s[pSrc:]) - nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s)) + atEOF := pSrc+n == len(s) + nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], atEOF) pDst += nDst pSrc += nSrc @@ -659,6 +660,9 @@ func String(t Transformer, s string) (result string, n int, err error) { dst = grow(dst, pDst) } } else if err == ErrShortSrc { + if atEOF { + return string(dst[:pDst]), pSrc, err + } if nSrc == 0 { src = grow(src, 0) } diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 48d144008..50deb6600 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -480,15 +480,15 @@ func (s *isolatingRunSequence) resolveWeakTypes() { // Rule W1. // Changes all NSMs. - preceedingCharacterType := s.sos + precedingCharacterType := s.sos for i, t := range s.types { if t == NSM { - s.types[i] = preceedingCharacterType + s.types[i] = precedingCharacterType } else { if t.in(LRI, RLI, FSI, PDI) { - preceedingCharacterType = ON + precedingCharacterType = ON } - preceedingCharacterType = t + precedingCharacterType = t } } diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go index 022e3c690..16b11db53 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go new file mode 100644 index 000000000..7ffa36512 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables12.0.0.go @@ -0,0 +1,1923 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.14 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16896 bytes (16.50 KiB). Checksum: 6f0927067913dc6d. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 240 blocks, 15360 entries, 15360 bytes +// The third block is the zero block. +var bidiValues = [15360]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7a: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19af: 0x000c, + 0x19b0: 0x000c, 0x19b1: 0x000c, + 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, 0x19bf: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19ff: 0x000c, + // Block 0x68, offset 0x1a00 + 0x1a20: 0x000c, 0x1a21: 0x000c, 0x1a22: 0x000c, 0x1a23: 0x000c, + 0x1a24: 0x000c, 0x1a25: 0x000c, 0x1a26: 0x000c, 0x1a27: 0x000c, 0x1a28: 0x000c, 0x1a29: 0x000c, + 0x1a2a: 0x000c, 0x1a2b: 0x000c, 0x1a2c: 0x000c, 0x1a2d: 0x000c, 0x1a2e: 0x000c, 0x1a2f: 0x000c, + 0x1a30: 0x000c, 0x1a31: 0x000c, 0x1a32: 0x000c, 0x1a33: 0x000c, 0x1a34: 0x000c, 0x1a35: 0x000c, + 0x1a36: 0x000c, 0x1a37: 0x000c, 0x1a38: 0x000c, 0x1a39: 0x000c, 0x1a3a: 0x000c, 0x1a3b: 0x000c, + 0x1a3c: 0x000c, 0x1a3d: 0x000c, 0x1a3e: 0x000c, 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x000a, 0x1a41: 0x000a, 0x1a42: 0x000a, 0x1a43: 0x000a, 0x1a44: 0x000a, 0x1a45: 0x000a, + 0x1a46: 0x000a, 0x1a47: 0x000a, 0x1a48: 0x000a, 0x1a49: 0x000a, 0x1a4a: 0x000a, 0x1a4b: 0x000a, + 0x1a4c: 0x000a, 0x1a4d: 0x000a, 0x1a4e: 0x000a, 0x1a4f: 0x000a, 0x1a50: 0x000a, 0x1a51: 0x000a, + 0x1a52: 0x000a, 0x1a53: 0x000a, 0x1a54: 0x000a, 0x1a55: 0x000a, 0x1a56: 0x000a, 0x1a57: 0x000a, + 0x1a58: 0x000a, 0x1a59: 0x000a, 0x1a5a: 0x000a, 0x1a5b: 0x000a, 0x1a5c: 0x000a, 0x1a5d: 0x000a, + 0x1a5e: 0x000a, 0x1a5f: 0x000a, 0x1a60: 0x000a, 0x1a61: 0x000a, 0x1a62: 0x003a, 0x1a63: 0x002a, + 0x1a64: 0x003a, 0x1a65: 0x002a, 0x1a66: 0x003a, 0x1a67: 0x002a, 0x1a68: 0x003a, 0x1a69: 0x002a, + 0x1a6a: 0x000a, 0x1a6b: 0x000a, 0x1a6c: 0x000a, 0x1a6d: 0x000a, 0x1a6e: 0x000a, 0x1a6f: 0x000a, + 0x1a70: 0x000a, 0x1a71: 0x000a, 0x1a72: 0x000a, 0x1a73: 0x000a, 0x1a74: 0x000a, 0x1a75: 0x000a, + 0x1a76: 0x000a, 0x1a77: 0x000a, 0x1a78: 0x000a, 0x1a79: 0x000a, 0x1a7a: 0x000a, 0x1a7b: 0x000a, + 0x1a7c: 0x000a, 0x1a7d: 0x000a, 0x1a7e: 0x000a, 0x1a7f: 0x000a, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, 0x1acf: 0x000a, 0x1ad0: 0x000a, 0x1ad1: 0x000a, + 0x1ad2: 0x000a, 0x1ad3: 0x000a, 0x1ad4: 0x000a, 0x1ad5: 0x000a, 0x1ad6: 0x000a, 0x1ad7: 0x000a, + 0x1ad8: 0x000a, 0x1ad9: 0x000a, 0x1adb: 0x000a, 0x1adc: 0x000a, 0x1add: 0x000a, + 0x1ade: 0x000a, 0x1adf: 0x000a, 0x1ae0: 0x000a, 0x1ae1: 0x000a, 0x1ae2: 0x000a, 0x1ae3: 0x000a, + 0x1ae4: 0x000a, 0x1ae5: 0x000a, 0x1ae6: 0x000a, 0x1ae7: 0x000a, 0x1ae8: 0x000a, 0x1ae9: 0x000a, + 0x1aea: 0x000a, 0x1aeb: 0x000a, 0x1aec: 0x000a, 0x1aed: 0x000a, 0x1aee: 0x000a, 0x1aef: 0x000a, + 0x1af0: 0x000a, 0x1af1: 0x000a, 0x1af2: 0x000a, 0x1af3: 0x000a, 0x1af4: 0x000a, 0x1af5: 0x000a, + 0x1af6: 0x000a, 0x1af7: 0x000a, 0x1af8: 0x000a, 0x1af9: 0x000a, 0x1afa: 0x000a, 0x1afb: 0x000a, + 0x1afc: 0x000a, 0x1afd: 0x000a, 0x1afe: 0x000a, 0x1aff: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1a: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, 0x1b74: 0x000a, 0x1b75: 0x000a, + 0x1b76: 0x000a, 0x1b77: 0x000a, 0x1b78: 0x000a, 0x1b79: 0x000a, 0x1b7a: 0x000a, 0x1b7b: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x0009, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, + 0x1b88: 0x003a, 0x1b89: 0x002a, 0x1b8a: 0x003a, 0x1b8b: 0x002a, + 0x1b8c: 0x003a, 0x1b8d: 0x002a, 0x1b8e: 0x003a, 0x1b8f: 0x002a, 0x1b90: 0x003a, 0x1b91: 0x002a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x003a, 0x1b95: 0x002a, 0x1b96: 0x003a, 0x1b97: 0x002a, + 0x1b98: 0x003a, 0x1b99: 0x002a, 0x1b9a: 0x003a, 0x1b9b: 0x002a, 0x1b9c: 0x000a, 0x1b9d: 0x000a, + 0x1b9e: 0x000a, 0x1b9f: 0x000a, 0x1ba0: 0x000a, + 0x1baa: 0x000c, 0x1bab: 0x000c, 0x1bac: 0x000c, 0x1bad: 0x000c, + 0x1bb0: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, + 0x1bbd: 0x000a, 0x1bbe: 0x000a, 0x1bbf: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bd9: 0x000c, 0x1bda: 0x000c, 0x1bdb: 0x000a, 0x1bdc: 0x000a, + 0x1be0: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c3b: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c40: 0x000a, 0x1c41: 0x000a, 0x1c42: 0x000a, 0x1c43: 0x000a, 0x1c44: 0x000a, 0x1c45: 0x000a, + 0x1c46: 0x000a, 0x1c47: 0x000a, 0x1c48: 0x000a, 0x1c49: 0x000a, 0x1c4a: 0x000a, 0x1c4b: 0x000a, + 0x1c4c: 0x000a, 0x1c4d: 0x000a, 0x1c4e: 0x000a, 0x1c4f: 0x000a, 0x1c50: 0x000a, 0x1c51: 0x000a, + 0x1c52: 0x000a, 0x1c53: 0x000a, 0x1c54: 0x000a, 0x1c55: 0x000a, 0x1c56: 0x000a, 0x1c57: 0x000a, + 0x1c58: 0x000a, 0x1c59: 0x000a, 0x1c5a: 0x000a, 0x1c5b: 0x000a, 0x1c5c: 0x000a, 0x1c5d: 0x000a, + 0x1c5e: 0x000a, 0x1c5f: 0x000a, 0x1c60: 0x000a, 0x1c61: 0x000a, 0x1c62: 0x000a, 0x1c63: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c9d: 0x000a, + 0x1c9e: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cd0: 0x000a, 0x1cd1: 0x000a, + 0x1cd2: 0x000a, 0x1cd3: 0x000a, 0x1cd4: 0x000a, 0x1cd5: 0x000a, 0x1cd6: 0x000a, 0x1cd7: 0x000a, + 0x1cd8: 0x000a, 0x1cd9: 0x000a, 0x1cda: 0x000a, 0x1cdb: 0x000a, 0x1cdc: 0x000a, 0x1cdd: 0x000a, + 0x1cde: 0x000a, 0x1cdf: 0x000a, + 0x1cfc: 0x000a, 0x1cfd: 0x000a, 0x1cfe: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d31: 0x000a, 0x1d32: 0x000a, 0x1d33: 0x000a, 0x1d34: 0x000a, 0x1d35: 0x000a, + 0x1d36: 0x000a, 0x1d37: 0x000a, 0x1d38: 0x000a, 0x1d39: 0x000a, 0x1d3a: 0x000a, 0x1d3b: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, 0x1d3f: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d4c: 0x000a, 0x1d4d: 0x000a, 0x1d4e: 0x000a, 0x1d4f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1db7: 0x000a, 0x1db8: 0x000a, 0x1db9: 0x000a, 0x1dba: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1dde: 0x000a, 0x1ddf: 0x000a, + 0x1dff: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e10: 0x000a, 0x1e11: 0x000a, + 0x1e12: 0x000a, 0x1e13: 0x000a, 0x1e14: 0x000a, 0x1e15: 0x000a, 0x1e16: 0x000a, 0x1e17: 0x000a, + 0x1e18: 0x000a, 0x1e19: 0x000a, 0x1e1a: 0x000a, 0x1e1b: 0x000a, 0x1e1c: 0x000a, 0x1e1d: 0x000a, + 0x1e1e: 0x000a, 0x1e1f: 0x000a, 0x1e20: 0x000a, 0x1e21: 0x000a, 0x1e22: 0x000a, 0x1e23: 0x000a, + 0x1e24: 0x000a, 0x1e25: 0x000a, 0x1e26: 0x000a, 0x1e27: 0x000a, 0x1e28: 0x000a, 0x1e29: 0x000a, + 0x1e2a: 0x000a, 0x1e2b: 0x000a, 0x1e2c: 0x000a, 0x1e2d: 0x000a, 0x1e2e: 0x000a, 0x1e2f: 0x000a, + 0x1e30: 0x000a, 0x1e31: 0x000a, 0x1e32: 0x000a, 0x1e33: 0x000a, 0x1e34: 0x000a, 0x1e35: 0x000a, + 0x1e36: 0x000a, 0x1e37: 0x000a, 0x1e38: 0x000a, 0x1e39: 0x000a, 0x1e3a: 0x000a, 0x1e3b: 0x000a, + 0x1e3c: 0x000a, 0x1e3d: 0x000a, 0x1e3e: 0x000a, 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e40: 0x000a, 0x1e41: 0x000a, 0x1e42: 0x000a, 0x1e43: 0x000a, 0x1e44: 0x000a, 0x1e45: 0x000a, + 0x1e46: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e8d: 0x000a, 0x1e8e: 0x000a, 0x1e8f: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1eef: 0x000c, + 0x1ef0: 0x000c, 0x1ef1: 0x000c, 0x1ef2: 0x000c, 0x1ef3: 0x000a, 0x1ef4: 0x000c, 0x1ef5: 0x000c, + 0x1ef6: 0x000c, 0x1ef7: 0x000c, 0x1ef8: 0x000c, 0x1ef9: 0x000c, 0x1efa: 0x000c, 0x1efb: 0x000c, + 0x1efc: 0x000c, 0x1efd: 0x000c, 0x1efe: 0x000a, 0x1eff: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f1e: 0x000c, 0x1f1f: 0x000c, + // Block 0x7d, offset 0x1f40 + 0x1f70: 0x000c, 0x1f71: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0x000a, 0x1f81: 0x000a, 0x1f82: 0x000a, 0x1f83: 0x000a, 0x1f84: 0x000a, 0x1f85: 0x000a, + 0x1f86: 0x000a, 0x1f87: 0x000a, 0x1f88: 0x000a, 0x1f89: 0x000a, 0x1f8a: 0x000a, 0x1f8b: 0x000a, + 0x1f8c: 0x000a, 0x1f8d: 0x000a, 0x1f8e: 0x000a, 0x1f8f: 0x000a, 0x1f90: 0x000a, 0x1f91: 0x000a, + 0x1f92: 0x000a, 0x1f93: 0x000a, 0x1f94: 0x000a, 0x1f95: 0x000a, 0x1f96: 0x000a, 0x1f97: 0x000a, + 0x1f98: 0x000a, 0x1f99: 0x000a, 0x1f9a: 0x000a, 0x1f9b: 0x000a, 0x1f9c: 0x000a, 0x1f9d: 0x000a, + 0x1f9e: 0x000a, 0x1f9f: 0x000a, 0x1fa0: 0x000a, 0x1fa1: 0x000a, + // Block 0x7f, offset 0x1fc0 + 0x1fc8: 0x000a, + // Block 0x80, offset 0x2000 + 0x2002: 0x000c, + 0x2006: 0x000c, 0x200b: 0x000c, + 0x2025: 0x000c, 0x2026: 0x000c, 0x2028: 0x000a, 0x2029: 0x000a, + 0x202a: 0x000a, 0x202b: 0x000a, + 0x2038: 0x0004, 0x2039: 0x0004, + // Block 0x81, offset 0x2040 + 0x2074: 0x000a, 0x2075: 0x000a, + 0x2076: 0x000a, 0x2077: 0x000a, + // Block 0x82, offset 0x2080 + 0x2084: 0x000c, 0x2085: 0x000c, + 0x20a0: 0x000c, 0x20a1: 0x000c, 0x20a2: 0x000c, 0x20a3: 0x000c, + 0x20a4: 0x000c, 0x20a5: 0x000c, 0x20a6: 0x000c, 0x20a7: 0x000c, 0x20a8: 0x000c, 0x20a9: 0x000c, + 0x20aa: 0x000c, 0x20ab: 0x000c, 0x20ac: 0x000c, 0x20ad: 0x000c, 0x20ae: 0x000c, 0x20af: 0x000c, + 0x20b0: 0x000c, 0x20b1: 0x000c, + 0x20bf: 0x000c, + // Block 0x83, offset 0x20c0 + 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, + // Block 0x84, offset 0x2100 + 0x2107: 0x000c, 0x2108: 0x000c, 0x2109: 0x000c, 0x210a: 0x000c, 0x210b: 0x000c, + 0x210c: 0x000c, 0x210d: 0x000c, 0x210e: 0x000c, 0x210f: 0x000c, 0x2110: 0x000c, 0x2111: 0x000c, + // Block 0x85, offset 0x2140 + 0x2140: 0x000c, 0x2141: 0x000c, 0x2142: 0x000c, + 0x2173: 0x000c, + 0x2176: 0x000c, 0x2177: 0x000c, 0x2178: 0x000c, 0x2179: 0x000c, + 0x217c: 0x000c, 0x217d: 0x000c, + // Block 0x86, offset 0x2180 + 0x21a5: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e9: 0x000c, + 0x21ea: 0x000c, 0x21eb: 0x000c, 0x21ec: 0x000c, 0x21ed: 0x000c, 0x21ee: 0x000c, + 0x21f1: 0x000c, 0x21f2: 0x000c, 0x21f5: 0x000c, + 0x21f6: 0x000c, + // Block 0x88, offset 0x2200 + 0x2203: 0x000c, + 0x220c: 0x000c, + 0x223c: 0x000c, + // Block 0x89, offset 0x2240 + 0x2270: 0x000c, 0x2272: 0x000c, 0x2273: 0x000c, 0x2274: 0x000c, + 0x2277: 0x000c, 0x2278: 0x000c, + 0x227e: 0x000c, 0x227f: 0x000c, + // Block 0x8a, offset 0x2280 + 0x2281: 0x000c, + 0x22ac: 0x000c, 0x22ad: 0x000c, + 0x22b6: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22e5: 0x000c, 0x22e8: 0x000c, + 0x22ed: 0x000c, + // Block 0x8c, offset 0x2300 + 0x231d: 0x0001, + 0x231e: 0x000c, 0x231f: 0x0001, 0x2320: 0x0001, 0x2321: 0x0001, 0x2322: 0x0001, 0x2323: 0x0001, + 0x2324: 0x0001, 0x2325: 0x0001, 0x2326: 0x0001, 0x2327: 0x0001, 0x2328: 0x0001, 0x2329: 0x0003, + 0x232a: 0x0001, 0x232b: 0x0001, 0x232c: 0x0001, 0x232d: 0x0001, 0x232e: 0x0001, 0x232f: 0x0001, + 0x2330: 0x0001, 0x2331: 0x0001, 0x2332: 0x0001, 0x2333: 0x0001, 0x2334: 0x0001, 0x2335: 0x0001, + 0x2336: 0x0001, 0x2337: 0x0001, 0x2338: 0x0001, 0x2339: 0x0001, 0x233a: 0x0001, 0x233b: 0x0001, + 0x233c: 0x0001, 0x233d: 0x0001, 0x233e: 0x0001, 0x233f: 0x0001, + // Block 0x8d, offset 0x2340 + 0x2340: 0x0001, 0x2341: 0x0001, 0x2342: 0x0001, 0x2343: 0x0001, 0x2344: 0x0001, 0x2345: 0x0001, + 0x2346: 0x0001, 0x2347: 0x0001, 0x2348: 0x0001, 0x2349: 0x0001, 0x234a: 0x0001, 0x234b: 0x0001, + 0x234c: 0x0001, 0x234d: 0x0001, 0x234e: 0x0001, 0x234f: 0x0001, 0x2350: 0x000d, 0x2351: 0x000d, + 0x2352: 0x000d, 0x2353: 0x000d, 0x2354: 0x000d, 0x2355: 0x000d, 0x2356: 0x000d, 0x2357: 0x000d, + 0x2358: 0x000d, 0x2359: 0x000d, 0x235a: 0x000d, 0x235b: 0x000d, 0x235c: 0x000d, 0x235d: 0x000d, + 0x235e: 0x000d, 0x235f: 0x000d, 0x2360: 0x000d, 0x2361: 0x000d, 0x2362: 0x000d, 0x2363: 0x000d, + 0x2364: 0x000d, 0x2365: 0x000d, 0x2366: 0x000d, 0x2367: 0x000d, 0x2368: 0x000d, 0x2369: 0x000d, + 0x236a: 0x000d, 0x236b: 0x000d, 0x236c: 0x000d, 0x236d: 0x000d, 0x236e: 0x000d, 0x236f: 0x000d, + 0x2370: 0x000d, 0x2371: 0x000d, 0x2372: 0x000d, 0x2373: 0x000d, 0x2374: 0x000d, 0x2375: 0x000d, + 0x2376: 0x000d, 0x2377: 0x000d, 0x2378: 0x000d, 0x2379: 0x000d, 0x237a: 0x000d, 0x237b: 0x000d, + 0x237c: 0x000d, 0x237d: 0x000d, 0x237e: 0x000d, 0x237f: 0x000d, + // Block 0x8e, offset 0x2380 + 0x2380: 0x000d, 0x2381: 0x000d, 0x2382: 0x000d, 0x2383: 0x000d, 0x2384: 0x000d, 0x2385: 0x000d, + 0x2386: 0x000d, 0x2387: 0x000d, 0x2388: 0x000d, 0x2389: 0x000d, 0x238a: 0x000d, 0x238b: 0x000d, + 0x238c: 0x000d, 0x238d: 0x000d, 0x238e: 0x000d, 0x238f: 0x000d, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000a, 0x23bf: 0x000a, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000b, 0x23d1: 0x000b, + 0x23d2: 0x000b, 0x23d3: 0x000b, 0x23d4: 0x000b, 0x23d5: 0x000b, 0x23d6: 0x000b, 0x23d7: 0x000b, + 0x23d8: 0x000b, 0x23d9: 0x000b, 0x23da: 0x000b, 0x23db: 0x000b, 0x23dc: 0x000b, 0x23dd: 0x000b, + 0x23de: 0x000b, 0x23df: 0x000b, 0x23e0: 0x000b, 0x23e1: 0x000b, 0x23e2: 0x000b, 0x23e3: 0x000b, + 0x23e4: 0x000b, 0x23e5: 0x000b, 0x23e6: 0x000b, 0x23e7: 0x000b, 0x23e8: 0x000b, 0x23e9: 0x000b, + 0x23ea: 0x000b, 0x23eb: 0x000b, 0x23ec: 0x000b, 0x23ed: 0x000b, 0x23ee: 0x000b, 0x23ef: 0x000b, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000a, 0x23fe: 0x000d, 0x23ff: 0x000d, + // Block 0x90, offset 0x2400 + 0x2400: 0x000c, 0x2401: 0x000c, 0x2402: 0x000c, 0x2403: 0x000c, 0x2404: 0x000c, 0x2405: 0x000c, + 0x2406: 0x000c, 0x2407: 0x000c, 0x2408: 0x000c, 0x2409: 0x000c, 0x240a: 0x000c, 0x240b: 0x000c, + 0x240c: 0x000c, 0x240d: 0x000c, 0x240e: 0x000c, 0x240f: 0x000c, 0x2410: 0x000a, 0x2411: 0x000a, + 0x2412: 0x000a, 0x2413: 0x000a, 0x2414: 0x000a, 0x2415: 0x000a, 0x2416: 0x000a, 0x2417: 0x000a, + 0x2418: 0x000a, 0x2419: 0x000a, + 0x2420: 0x000c, 0x2421: 0x000c, 0x2422: 0x000c, 0x2423: 0x000c, + 0x2424: 0x000c, 0x2425: 0x000c, 0x2426: 0x000c, 0x2427: 0x000c, 0x2428: 0x000c, 0x2429: 0x000c, + 0x242a: 0x000c, 0x242b: 0x000c, 0x242c: 0x000c, 0x242d: 0x000c, 0x242e: 0x000c, 0x242f: 0x000c, + 0x2430: 0x000a, 0x2431: 0x000a, 0x2432: 0x000a, 0x2433: 0x000a, 0x2434: 0x000a, 0x2435: 0x000a, + 0x2436: 0x000a, 0x2437: 0x000a, 0x2438: 0x000a, 0x2439: 0x000a, 0x243a: 0x000a, 0x243b: 0x000a, + 0x243c: 0x000a, 0x243d: 0x000a, 0x243e: 0x000a, 0x243f: 0x000a, + // Block 0x91, offset 0x2440 + 0x2440: 0x000a, 0x2441: 0x000a, 0x2442: 0x000a, 0x2443: 0x000a, 0x2444: 0x000a, 0x2445: 0x000a, + 0x2446: 0x000a, 0x2447: 0x000a, 0x2448: 0x000a, 0x2449: 0x000a, 0x244a: 0x000a, 0x244b: 0x000a, + 0x244c: 0x000a, 0x244d: 0x000a, 0x244e: 0x000a, 0x244f: 0x000a, 0x2450: 0x0006, 0x2451: 0x000a, + 0x2452: 0x0006, 0x2454: 0x000a, 0x2455: 0x0006, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x009a, 0x245a: 0x008a, 0x245b: 0x007a, 0x245c: 0x006a, 0x245d: 0x009a, + 0x245e: 0x008a, 0x245f: 0x0004, 0x2460: 0x000a, 0x2461: 0x000a, 0x2462: 0x0003, 0x2463: 0x0003, + 0x2464: 0x000a, 0x2465: 0x000a, 0x2466: 0x000a, 0x2468: 0x000a, 0x2469: 0x0004, + 0x246a: 0x0004, 0x246b: 0x000a, + 0x2470: 0x000d, 0x2471: 0x000d, 0x2472: 0x000d, 0x2473: 0x000d, 0x2474: 0x000d, 0x2475: 0x000d, + 0x2476: 0x000d, 0x2477: 0x000d, 0x2478: 0x000d, 0x2479: 0x000d, 0x247a: 0x000d, 0x247b: 0x000d, + 0x247c: 0x000d, 0x247d: 0x000d, 0x247e: 0x000d, 0x247f: 0x000d, + // Block 0x92, offset 0x2480 + 0x2480: 0x000d, 0x2481: 0x000d, 0x2482: 0x000d, 0x2483: 0x000d, 0x2484: 0x000d, 0x2485: 0x000d, + 0x2486: 0x000d, 0x2487: 0x000d, 0x2488: 0x000d, 0x2489: 0x000d, 0x248a: 0x000d, 0x248b: 0x000d, + 0x248c: 0x000d, 0x248d: 0x000d, 0x248e: 0x000d, 0x248f: 0x000d, 0x2490: 0x000d, 0x2491: 0x000d, + 0x2492: 0x000d, 0x2493: 0x000d, 0x2494: 0x000d, 0x2495: 0x000d, 0x2496: 0x000d, 0x2497: 0x000d, + 0x2498: 0x000d, 0x2499: 0x000d, 0x249a: 0x000d, 0x249b: 0x000d, 0x249c: 0x000d, 0x249d: 0x000d, + 0x249e: 0x000d, 0x249f: 0x000d, 0x24a0: 0x000d, 0x24a1: 0x000d, 0x24a2: 0x000d, 0x24a3: 0x000d, + 0x24a4: 0x000d, 0x24a5: 0x000d, 0x24a6: 0x000d, 0x24a7: 0x000d, 0x24a8: 0x000d, 0x24a9: 0x000d, + 0x24aa: 0x000d, 0x24ab: 0x000d, 0x24ac: 0x000d, 0x24ad: 0x000d, 0x24ae: 0x000d, 0x24af: 0x000d, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000b, + // Block 0x93, offset 0x24c0 + 0x24c1: 0x000a, 0x24c2: 0x000a, 0x24c3: 0x0004, 0x24c4: 0x0004, 0x24c5: 0x0004, + 0x24c6: 0x000a, 0x24c7: 0x000a, 0x24c8: 0x003a, 0x24c9: 0x002a, 0x24ca: 0x000a, 0x24cb: 0x0003, + 0x24cc: 0x0006, 0x24cd: 0x0003, 0x24ce: 0x0006, 0x24cf: 0x0006, 0x24d0: 0x0002, 0x24d1: 0x0002, + 0x24d2: 0x0002, 0x24d3: 0x0002, 0x24d4: 0x0002, 0x24d5: 0x0002, 0x24d6: 0x0002, 0x24d7: 0x0002, + 0x24d8: 0x0002, 0x24d9: 0x0002, 0x24da: 0x0006, 0x24db: 0x000a, 0x24dc: 0x000a, 0x24dd: 0x000a, + 0x24de: 0x000a, 0x24df: 0x000a, 0x24e0: 0x000a, + 0x24fb: 0x005a, + 0x24fc: 0x000a, 0x24fd: 0x004a, 0x24fe: 0x000a, 0x24ff: 0x000a, + // Block 0x94, offset 0x2500 + 0x2500: 0x000a, + 0x251b: 0x005a, 0x251c: 0x000a, 0x251d: 0x004a, + 0x251e: 0x000a, 0x251f: 0x00fa, 0x2520: 0x00ea, 0x2521: 0x000a, 0x2522: 0x003a, 0x2523: 0x002a, + 0x2524: 0x000a, 0x2525: 0x000a, + // Block 0x95, offset 0x2540 + 0x2560: 0x0004, 0x2561: 0x0004, 0x2562: 0x000a, 0x2563: 0x000a, + 0x2564: 0x000a, 0x2565: 0x0004, 0x2566: 0x0004, 0x2568: 0x000a, 0x2569: 0x000a, + 0x256a: 0x000a, 0x256b: 0x000a, 0x256c: 0x000a, 0x256d: 0x000a, 0x256e: 0x000a, + 0x2570: 0x000b, 0x2571: 0x000b, 0x2572: 0x000b, 0x2573: 0x000b, 0x2574: 0x000b, 0x2575: 0x000b, + 0x2576: 0x000b, 0x2577: 0x000b, 0x2578: 0x000b, 0x2579: 0x000a, 0x257a: 0x000a, 0x257b: 0x000a, + 0x257c: 0x000a, 0x257d: 0x000a, 0x257e: 0x000b, 0x257f: 0x000b, + // Block 0x96, offset 0x2580 + 0x2581: 0x000a, + // Block 0x97, offset 0x25c0 + 0x25c0: 0x000a, 0x25c1: 0x000a, 0x25c2: 0x000a, 0x25c3: 0x000a, 0x25c4: 0x000a, 0x25c5: 0x000a, + 0x25c6: 0x000a, 0x25c7: 0x000a, 0x25c8: 0x000a, 0x25c9: 0x000a, 0x25ca: 0x000a, 0x25cb: 0x000a, + 0x25cc: 0x000a, 0x25d0: 0x000a, 0x25d1: 0x000a, + 0x25d2: 0x000a, 0x25d3: 0x000a, 0x25d4: 0x000a, 0x25d5: 0x000a, 0x25d6: 0x000a, 0x25d7: 0x000a, + 0x25d8: 0x000a, 0x25d9: 0x000a, 0x25da: 0x000a, 0x25db: 0x000a, + 0x25e0: 0x000a, + // Block 0x98, offset 0x2600 + 0x263d: 0x000c, + // Block 0x99, offset 0x2640 + 0x2660: 0x000c, 0x2661: 0x0002, 0x2662: 0x0002, 0x2663: 0x0002, + 0x2664: 0x0002, 0x2665: 0x0002, 0x2666: 0x0002, 0x2667: 0x0002, 0x2668: 0x0002, 0x2669: 0x0002, + 0x266a: 0x0002, 0x266b: 0x0002, 0x266c: 0x0002, 0x266d: 0x0002, 0x266e: 0x0002, 0x266f: 0x0002, + 0x2670: 0x0002, 0x2671: 0x0002, 0x2672: 0x0002, 0x2673: 0x0002, 0x2674: 0x0002, 0x2675: 0x0002, + 0x2676: 0x0002, 0x2677: 0x0002, 0x2678: 0x0002, 0x2679: 0x0002, 0x267a: 0x0002, 0x267b: 0x0002, + // Block 0x9a, offset 0x2680 + 0x26b6: 0x000c, 0x26b7: 0x000c, 0x26b8: 0x000c, 0x26b9: 0x000c, 0x26ba: 0x000c, + // Block 0x9b, offset 0x26c0 + 0x26c0: 0x0001, 0x26c1: 0x0001, 0x26c2: 0x0001, 0x26c3: 0x0001, 0x26c4: 0x0001, 0x26c5: 0x0001, + 0x26c6: 0x0001, 0x26c7: 0x0001, 0x26c8: 0x0001, 0x26c9: 0x0001, 0x26ca: 0x0001, 0x26cb: 0x0001, + 0x26cc: 0x0001, 0x26cd: 0x0001, 0x26ce: 0x0001, 0x26cf: 0x0001, 0x26d0: 0x0001, 0x26d1: 0x0001, + 0x26d2: 0x0001, 0x26d3: 0x0001, 0x26d4: 0x0001, 0x26d5: 0x0001, 0x26d6: 0x0001, 0x26d7: 0x0001, + 0x26d8: 0x0001, 0x26d9: 0x0001, 0x26da: 0x0001, 0x26db: 0x0001, 0x26dc: 0x0001, 0x26dd: 0x0001, + 0x26de: 0x0001, 0x26df: 0x0001, 0x26e0: 0x0001, 0x26e1: 0x0001, 0x26e2: 0x0001, 0x26e3: 0x0001, + 0x26e4: 0x0001, 0x26e5: 0x0001, 0x26e6: 0x0001, 0x26e7: 0x0001, 0x26e8: 0x0001, 0x26e9: 0x0001, + 0x26ea: 0x0001, 0x26eb: 0x0001, 0x26ec: 0x0001, 0x26ed: 0x0001, 0x26ee: 0x0001, 0x26ef: 0x0001, + 0x26f0: 0x0001, 0x26f1: 0x0001, 0x26f2: 0x0001, 0x26f3: 0x0001, 0x26f4: 0x0001, 0x26f5: 0x0001, + 0x26f6: 0x0001, 0x26f7: 0x0001, 0x26f8: 0x0001, 0x26f9: 0x0001, 0x26fa: 0x0001, 0x26fb: 0x0001, + 0x26fc: 0x0001, 0x26fd: 0x0001, 0x26fe: 0x0001, 0x26ff: 0x0001, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x000a, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x000c, 0x2742: 0x000c, 0x2743: 0x000c, 0x2744: 0x0001, 0x2745: 0x000c, + 0x2746: 0x000c, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x000c, 0x274d: 0x000c, 0x274e: 0x000c, 0x274f: 0x000c, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x0001, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x000c, 0x2779: 0x000c, 0x277a: 0x000c, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x000c, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x0001, 0x2782: 0x0001, 0x2783: 0x0001, 0x2784: 0x0001, 0x2785: 0x0001, + 0x2786: 0x0001, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x0001, 0x278d: 0x0001, 0x278e: 0x0001, 0x278f: 0x0001, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x000c, 0x27a6: 0x000c, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x0001, 0x27b9: 0x0001, 0x27ba: 0x0001, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x0001, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x0001, 0x27e6: 0x0001, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x000a, 0x27fa: 0x000a, 0x27fb: 0x000a, + 0x27fc: 0x000a, 0x27fd: 0x000a, 0x27fe: 0x000a, 0x27ff: 0x000a, + // Block 0xa0, offset 0x2800 + 0x2800: 0x000d, 0x2801: 0x000d, 0x2802: 0x000d, 0x2803: 0x000d, 0x2804: 0x000d, 0x2805: 0x000d, + 0x2806: 0x000d, 0x2807: 0x000d, 0x2808: 0x000d, 0x2809: 0x000d, 0x280a: 0x000d, 0x280b: 0x000d, + 0x280c: 0x000d, 0x280d: 0x000d, 0x280e: 0x000d, 0x280f: 0x000d, 0x2810: 0x000d, 0x2811: 0x000d, + 0x2812: 0x000d, 0x2813: 0x000d, 0x2814: 0x000d, 0x2815: 0x000d, 0x2816: 0x000d, 0x2817: 0x000d, + 0x2818: 0x000d, 0x2819: 0x000d, 0x281a: 0x000d, 0x281b: 0x000d, 0x281c: 0x000d, 0x281d: 0x000d, + 0x281e: 0x000d, 0x281f: 0x000d, 0x2820: 0x000d, 0x2821: 0x000d, 0x2822: 0x000d, 0x2823: 0x000d, + 0x2824: 0x000c, 0x2825: 0x000c, 0x2826: 0x000c, 0x2827: 0x000c, 0x2828: 0x000d, 0x2829: 0x000d, + 0x282a: 0x000d, 0x282b: 0x000d, 0x282c: 0x000d, 0x282d: 0x000d, 0x282e: 0x000d, 0x282f: 0x000d, + 0x2830: 0x0005, 0x2831: 0x0005, 0x2832: 0x0005, 0x2833: 0x0005, 0x2834: 0x0005, 0x2835: 0x0005, + 0x2836: 0x0005, 0x2837: 0x0005, 0x2838: 0x0005, 0x2839: 0x0005, 0x283a: 0x000d, 0x283b: 0x000d, + 0x283c: 0x000d, 0x283d: 0x000d, 0x283e: 0x000d, 0x283f: 0x000d, + // Block 0xa1, offset 0x2840 + 0x2840: 0x0001, 0x2841: 0x0001, 0x2842: 0x0001, 0x2843: 0x0001, 0x2844: 0x0001, 0x2845: 0x0001, + 0x2846: 0x0001, 0x2847: 0x0001, 0x2848: 0x0001, 0x2849: 0x0001, 0x284a: 0x0001, 0x284b: 0x0001, + 0x284c: 0x0001, 0x284d: 0x0001, 0x284e: 0x0001, 0x284f: 0x0001, 0x2850: 0x0001, 0x2851: 0x0001, + 0x2852: 0x0001, 0x2853: 0x0001, 0x2854: 0x0001, 0x2855: 0x0001, 0x2856: 0x0001, 0x2857: 0x0001, + 0x2858: 0x0001, 0x2859: 0x0001, 0x285a: 0x0001, 0x285b: 0x0001, 0x285c: 0x0001, 0x285d: 0x0001, + 0x285e: 0x0001, 0x285f: 0x0001, 0x2860: 0x0005, 0x2861: 0x0005, 0x2862: 0x0005, 0x2863: 0x0005, + 0x2864: 0x0005, 0x2865: 0x0005, 0x2866: 0x0005, 0x2867: 0x0005, 0x2868: 0x0005, 0x2869: 0x0005, + 0x286a: 0x0005, 0x286b: 0x0005, 0x286c: 0x0005, 0x286d: 0x0005, 0x286e: 0x0005, 0x286f: 0x0005, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x0005, 0x287b: 0x0005, + 0x287c: 0x0005, 0x287d: 0x0005, 0x287e: 0x0005, 0x287f: 0x0001, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0001, 0x28a1: 0x0001, 0x28a2: 0x0001, 0x28a3: 0x0001, + 0x28a4: 0x0001, 0x28a5: 0x0001, 0x28a6: 0x0001, 0x28a7: 0x0001, 0x28a8: 0x0001, 0x28a9: 0x0001, + 0x28aa: 0x0001, 0x28ab: 0x0001, 0x28ac: 0x0001, 0x28ad: 0x0001, 0x28ae: 0x0001, 0x28af: 0x0001, + 0x28b0: 0x000d, 0x28b1: 0x000d, 0x28b2: 0x000d, 0x28b3: 0x000d, 0x28b4: 0x000d, 0x28b5: 0x000d, + 0x28b6: 0x000d, 0x28b7: 0x000d, 0x28b8: 0x000d, 0x28b9: 0x000d, 0x28ba: 0x000d, 0x28bb: 0x000d, + 0x28bc: 0x000d, 0x28bd: 0x000d, 0x28be: 0x000d, 0x28bf: 0x000d, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x000d, 0x28c1: 0x000d, 0x28c2: 0x000d, 0x28c3: 0x000d, 0x28c4: 0x000d, 0x28c5: 0x000d, + 0x28c6: 0x000c, 0x28c7: 0x000c, 0x28c8: 0x000c, 0x28c9: 0x000c, 0x28ca: 0x000c, 0x28cb: 0x000c, + 0x28cc: 0x000c, 0x28cd: 0x000c, 0x28ce: 0x000c, 0x28cf: 0x000c, 0x28d0: 0x000c, 0x28d1: 0x000d, + 0x28d2: 0x000d, 0x28d3: 0x000d, 0x28d4: 0x000d, 0x28d5: 0x000d, 0x28d6: 0x000d, 0x28d7: 0x000d, + 0x28d8: 0x000d, 0x28d9: 0x000d, 0x28da: 0x000d, 0x28db: 0x000d, 0x28dc: 0x000d, 0x28dd: 0x000d, + 0x28de: 0x000d, 0x28df: 0x000d, 0x28e0: 0x000d, 0x28e1: 0x000d, 0x28e2: 0x000d, 0x28e3: 0x000d, + 0x28e4: 0x000d, 0x28e5: 0x000d, 0x28e6: 0x000d, 0x28e7: 0x000d, 0x28e8: 0x000d, 0x28e9: 0x000d, + 0x28ea: 0x000d, 0x28eb: 0x000d, 0x28ec: 0x000d, 0x28ed: 0x000d, 0x28ee: 0x000d, 0x28ef: 0x000d, + 0x28f0: 0x0001, 0x28f1: 0x0001, 0x28f2: 0x0001, 0x28f3: 0x0001, 0x28f4: 0x0001, 0x28f5: 0x0001, + 0x28f6: 0x0001, 0x28f7: 0x0001, 0x28f8: 0x0001, 0x28f9: 0x0001, 0x28fa: 0x0001, 0x28fb: 0x0001, + 0x28fc: 0x0001, 0x28fd: 0x0001, 0x28fe: 0x0001, 0x28ff: 0x0001, + // Block 0xa4, offset 0x2900 + 0x2901: 0x000c, + 0x2938: 0x000c, 0x2939: 0x000c, 0x293a: 0x000c, 0x293b: 0x000c, + 0x293c: 0x000c, 0x293d: 0x000c, 0x293e: 0x000c, 0x293f: 0x000c, + // Block 0xa5, offset 0x2940 + 0x2940: 0x000c, 0x2941: 0x000c, 0x2942: 0x000c, 0x2943: 0x000c, 0x2944: 0x000c, 0x2945: 0x000c, + 0x2946: 0x000c, + 0x2952: 0x000a, 0x2953: 0x000a, 0x2954: 0x000a, 0x2955: 0x000a, 0x2956: 0x000a, 0x2957: 0x000a, + 0x2958: 0x000a, 0x2959: 0x000a, 0x295a: 0x000a, 0x295b: 0x000a, 0x295c: 0x000a, 0x295d: 0x000a, + 0x295e: 0x000a, 0x295f: 0x000a, 0x2960: 0x000a, 0x2961: 0x000a, 0x2962: 0x000a, 0x2963: 0x000a, + 0x2964: 0x000a, 0x2965: 0x000a, + 0x297f: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, + 0x29b3: 0x000c, 0x29b4: 0x000c, 0x29b5: 0x000c, + 0x29b6: 0x000c, 0x29b9: 0x000c, 0x29ba: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, 0x29c2: 0x000c, + 0x29e7: 0x000c, 0x29e8: 0x000c, 0x29e9: 0x000c, + 0x29ea: 0x000c, 0x29eb: 0x000c, 0x29ed: 0x000c, 0x29ee: 0x000c, 0x29ef: 0x000c, + 0x29f0: 0x000c, 0x29f1: 0x000c, 0x29f2: 0x000c, 0x29f3: 0x000c, 0x29f4: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a33: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a40: 0x000c, 0x2a41: 0x000c, + 0x2a76: 0x000c, 0x2a77: 0x000c, 0x2a78: 0x000c, 0x2a79: 0x000c, 0x2a7a: 0x000c, 0x2a7b: 0x000c, + 0x2a7c: 0x000c, 0x2a7d: 0x000c, 0x2a7e: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a89: 0x000c, 0x2a8a: 0x000c, 0x2a8b: 0x000c, + 0x2a8c: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2aef: 0x000c, + 0x2af0: 0x000c, 0x2af1: 0x000c, 0x2af4: 0x000c, + 0x2af6: 0x000c, 0x2af7: 0x000c, + 0x2afe: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b1f: 0x000c, 0x2b23: 0x000c, + 0x2b24: 0x000c, 0x2b25: 0x000c, 0x2b26: 0x000c, 0x2b27: 0x000c, 0x2b28: 0x000c, 0x2b29: 0x000c, + 0x2b2a: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b40: 0x000c, + 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, 0x2b6b: 0x000c, 0x2b6c: 0x000c, + 0x2b70: 0x000c, 0x2b71: 0x000c, 0x2b72: 0x000c, 0x2b73: 0x000c, 0x2b74: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2bb8: 0x000c, 0x2bb9: 0x000c, 0x2bba: 0x000c, 0x2bbb: 0x000c, + 0x2bbc: 0x000c, 0x2bbd: 0x000c, 0x2bbe: 0x000c, 0x2bbf: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bc2: 0x000c, 0x2bc3: 0x000c, 0x2bc4: 0x000c, + 0x2bc6: 0x000c, + 0x2bde: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c33: 0x000c, 0x2c34: 0x000c, 0x2c35: 0x000c, + 0x2c36: 0x000c, 0x2c37: 0x000c, 0x2c38: 0x000c, 0x2c3a: 0x000c, + 0x2c3f: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c40: 0x000c, 0x2c42: 0x000c, 0x2c43: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2cb2: 0x000c, 0x2cb3: 0x000c, 0x2cb4: 0x000c, 0x2cb5: 0x000c, + 0x2cbc: 0x000c, 0x2cbd: 0x000c, 0x2cbf: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cc0: 0x000c, + 0x2cdc: 0x000c, 0x2cdd: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d33: 0x000c, 0x2d34: 0x000c, 0x2d35: 0x000c, + 0x2d36: 0x000c, 0x2d37: 0x000c, 0x2d38: 0x000c, 0x2d39: 0x000c, 0x2d3a: 0x000c, + 0x2d3d: 0x000c, 0x2d3f: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d40: 0x000c, + 0x2d60: 0x000a, 0x2d61: 0x000a, 0x2d62: 0x000a, 0x2d63: 0x000a, + 0x2d64: 0x000a, 0x2d65: 0x000a, 0x2d66: 0x000a, 0x2d67: 0x000a, 0x2d68: 0x000a, 0x2d69: 0x000a, + 0x2d6a: 0x000a, 0x2d6b: 0x000a, 0x2d6c: 0x000a, + // Block 0xb6, offset 0x2d80 + 0x2dab: 0x000c, 0x2dad: 0x000c, + 0x2db0: 0x000c, 0x2db1: 0x000c, 0x2db2: 0x000c, 0x2db3: 0x000c, 0x2db4: 0x000c, 0x2db5: 0x000c, + 0x2db7: 0x000c, + // Block 0xb7, offset 0x2dc0 + 0x2ddd: 0x000c, + 0x2dde: 0x000c, 0x2ddf: 0x000c, 0x2de2: 0x000c, 0x2de3: 0x000c, + 0x2de4: 0x000c, 0x2de5: 0x000c, 0x2de7: 0x000c, 0x2de8: 0x000c, 0x2de9: 0x000c, + 0x2dea: 0x000c, 0x2deb: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e2f: 0x000c, + 0x2e30: 0x000c, 0x2e31: 0x000c, 0x2e32: 0x000c, 0x2e33: 0x000c, 0x2e34: 0x000c, 0x2e35: 0x000c, + 0x2e36: 0x000c, 0x2e37: 0x000c, 0x2e39: 0x000c, 0x2e3a: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e54: 0x000c, 0x2e55: 0x000c, 0x2e56: 0x000c, 0x2e57: 0x000c, + 0x2e5a: 0x000c, 0x2e5b: 0x000c, + 0x2e60: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c, + 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c, + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec7: 0x000c, + 0x2ed1: 0x000c, + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, + 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0a: 0x000c, 0x2f0b: 0x000c, + 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c, + 0x2f18: 0x000c, 0x2f19: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c, + 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7d: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c, + 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c, + 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c, + 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c, + 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, 0x2ffa: 0x000c, + 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3007: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3050: 0x000c, 0x3051: 0x000c, + 0x3055: 0x000c, 0x3057: 0x000c, + // Block 0xc2, offset 0x3080 + 0x30b3: 0x000c, 0x30b4: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30d5: 0x000a, 0x30d6: 0x000a, 0x30d7: 0x000a, + 0x30d8: 0x000a, 0x30d9: 0x000a, 0x30da: 0x000a, 0x30db: 0x000a, 0x30dc: 0x000a, 0x30dd: 0x0004, + 0x30de: 0x0004, 0x30df: 0x0004, 0x30e0: 0x0004, 0x30e1: 0x000a, 0x30e2: 0x000a, 0x30e3: 0x000a, + 0x30e4: 0x000a, 0x30e5: 0x000a, 0x30e6: 0x000a, 0x30e7: 0x000a, 0x30e8: 0x000a, 0x30e9: 0x000a, + 0x30ea: 0x000a, 0x30eb: 0x000a, 0x30ec: 0x000a, 0x30ed: 0x000a, 0x30ee: 0x000a, 0x30ef: 0x000a, + 0x30f0: 0x000a, 0x30f1: 0x000a, + // Block 0xc4, offset 0x3100 + 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, + // Block 0xc5, offset 0x3140 + 0x3170: 0x000c, 0x3171: 0x000c, 0x3172: 0x000c, 0x3173: 0x000c, 0x3174: 0x000c, 0x3175: 0x000c, + 0x3176: 0x000c, + // Block 0xc6, offset 0x3180 + 0x318f: 0x000c, + // Block 0xc7, offset 0x31c0 + 0x31cf: 0x000c, 0x31d0: 0x000c, 0x31d1: 0x000c, + 0x31d2: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3222: 0x000a, + // Block 0xc9, offset 0x3240 + 0x325d: 0x000c, + 0x325e: 0x000c, 0x3260: 0x000b, 0x3261: 0x000b, 0x3262: 0x000b, 0x3263: 0x000b, + // Block 0xca, offset 0x3280 + 0x32a7: 0x000c, 0x32a8: 0x000c, 0x32a9: 0x000c, + 0x32b3: 0x000b, 0x32b4: 0x000b, 0x32b5: 0x000b, + 0x32b6: 0x000b, 0x32b7: 0x000b, 0x32b8: 0x000b, 0x32b9: 0x000b, 0x32ba: 0x000b, 0x32bb: 0x000c, + 0x32bc: 0x000c, 0x32bd: 0x000c, 0x32be: 0x000c, 0x32bf: 0x000c, + // Block 0xcb, offset 0x32c0 + 0x32c0: 0x000c, 0x32c1: 0x000c, 0x32c2: 0x000c, 0x32c5: 0x000c, + 0x32c6: 0x000c, 0x32c7: 0x000c, 0x32c8: 0x000c, 0x32c9: 0x000c, 0x32ca: 0x000c, 0x32cb: 0x000c, + 0x32ea: 0x000c, 0x32eb: 0x000c, 0x32ec: 0x000c, 0x32ed: 0x000c, + // Block 0xcc, offset 0x3300 + 0x3300: 0x000a, 0x3301: 0x000a, 0x3302: 0x000c, 0x3303: 0x000c, 0x3304: 0x000c, 0x3305: 0x000a, + // Block 0xcd, offset 0x3340 + 0x3340: 0x000a, 0x3341: 0x000a, 0x3342: 0x000a, 0x3343: 0x000a, 0x3344: 0x000a, 0x3345: 0x000a, + 0x3346: 0x000a, 0x3347: 0x000a, 0x3348: 0x000a, 0x3349: 0x000a, 0x334a: 0x000a, 0x334b: 0x000a, + 0x334c: 0x000a, 0x334d: 0x000a, 0x334e: 0x000a, 0x334f: 0x000a, 0x3350: 0x000a, 0x3351: 0x000a, + 0x3352: 0x000a, 0x3353: 0x000a, 0x3354: 0x000a, 0x3355: 0x000a, 0x3356: 0x000a, + // Block 0xce, offset 0x3380 + 0x339b: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33d5: 0x000a, + // Block 0xd0, offset 0x3400 + 0x340f: 0x000a, + // Block 0xd1, offset 0x3440 + 0x3449: 0x000a, + // Block 0xd2, offset 0x3480 + 0x3483: 0x000a, + 0x348e: 0x0002, 0x348f: 0x0002, 0x3490: 0x0002, 0x3491: 0x0002, + 0x3492: 0x0002, 0x3493: 0x0002, 0x3494: 0x0002, 0x3495: 0x0002, 0x3496: 0x0002, 0x3497: 0x0002, + 0x3498: 0x0002, 0x3499: 0x0002, 0x349a: 0x0002, 0x349b: 0x0002, 0x349c: 0x0002, 0x349d: 0x0002, + 0x349e: 0x0002, 0x349f: 0x0002, 0x34a0: 0x0002, 0x34a1: 0x0002, 0x34a2: 0x0002, 0x34a3: 0x0002, + 0x34a4: 0x0002, 0x34a5: 0x0002, 0x34a6: 0x0002, 0x34a7: 0x0002, 0x34a8: 0x0002, 0x34a9: 0x0002, + 0x34aa: 0x0002, 0x34ab: 0x0002, 0x34ac: 0x0002, 0x34ad: 0x0002, 0x34ae: 0x0002, 0x34af: 0x0002, + 0x34b0: 0x0002, 0x34b1: 0x0002, 0x34b2: 0x0002, 0x34b3: 0x0002, 0x34b4: 0x0002, 0x34b5: 0x0002, + 0x34b6: 0x0002, 0x34b7: 0x0002, 0x34b8: 0x0002, 0x34b9: 0x0002, 0x34ba: 0x0002, 0x34bb: 0x0002, + 0x34bc: 0x0002, 0x34bd: 0x0002, 0x34be: 0x0002, 0x34bf: 0x0002, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c, + 0x34c6: 0x000c, 0x34c7: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c, + 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c, + 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c, + 0x34d8: 0x000c, 0x34d9: 0x000c, 0x34da: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c, + 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e2: 0x000c, 0x34e3: 0x000c, + 0x34e4: 0x000c, 0x34e5: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c, + 0x34ea: 0x000c, 0x34eb: 0x000c, 0x34ec: 0x000c, 0x34ed: 0x000c, 0x34ee: 0x000c, 0x34ef: 0x000c, + 0x34f0: 0x000c, 0x34f1: 0x000c, 0x34f2: 0x000c, 0x34f3: 0x000c, 0x34f4: 0x000c, 0x34f5: 0x000c, + 0x34f6: 0x000c, 0x34fb: 0x000c, + 0x34fc: 0x000c, 0x34fd: 0x000c, 0x34fe: 0x000c, 0x34ff: 0x000c, + // Block 0xd4, offset 0x3500 + 0x3500: 0x000c, 0x3501: 0x000c, 0x3502: 0x000c, 0x3503: 0x000c, 0x3504: 0x000c, 0x3505: 0x000c, + 0x3506: 0x000c, 0x3507: 0x000c, 0x3508: 0x000c, 0x3509: 0x000c, 0x350a: 0x000c, 0x350b: 0x000c, + 0x350c: 0x000c, 0x350d: 0x000c, 0x350e: 0x000c, 0x350f: 0x000c, 0x3510: 0x000c, 0x3511: 0x000c, + 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x000c, + 0x3518: 0x000c, 0x3519: 0x000c, 0x351a: 0x000c, 0x351b: 0x000c, 0x351c: 0x000c, 0x351d: 0x000c, + 0x351e: 0x000c, 0x351f: 0x000c, 0x3520: 0x000c, 0x3521: 0x000c, 0x3522: 0x000c, 0x3523: 0x000c, + 0x3524: 0x000c, 0x3525: 0x000c, 0x3526: 0x000c, 0x3527: 0x000c, 0x3528: 0x000c, 0x3529: 0x000c, + 0x352a: 0x000c, 0x352b: 0x000c, 0x352c: 0x000c, + 0x3535: 0x000c, + // Block 0xd5, offset 0x3540 + 0x3544: 0x000c, + 0x355b: 0x000c, 0x355c: 0x000c, 0x355d: 0x000c, + 0x355e: 0x000c, 0x355f: 0x000c, 0x3561: 0x000c, 0x3562: 0x000c, 0x3563: 0x000c, + 0x3564: 0x000c, 0x3565: 0x000c, 0x3566: 0x000c, 0x3567: 0x000c, 0x3568: 0x000c, 0x3569: 0x000c, + 0x356a: 0x000c, 0x356b: 0x000c, 0x356c: 0x000c, 0x356d: 0x000c, 0x356e: 0x000c, 0x356f: 0x000c, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000c, 0x3581: 0x000c, 0x3582: 0x000c, 0x3583: 0x000c, 0x3584: 0x000c, 0x3585: 0x000c, + 0x3586: 0x000c, 0x3588: 0x000c, 0x3589: 0x000c, 0x358a: 0x000c, 0x358b: 0x000c, + 0x358c: 0x000c, 0x358d: 0x000c, 0x358e: 0x000c, 0x358f: 0x000c, 0x3590: 0x000c, 0x3591: 0x000c, + 0x3592: 0x000c, 0x3593: 0x000c, 0x3594: 0x000c, 0x3595: 0x000c, 0x3596: 0x000c, 0x3597: 0x000c, + 0x3598: 0x000c, 0x359b: 0x000c, 0x359c: 0x000c, 0x359d: 0x000c, + 0x359e: 0x000c, 0x359f: 0x000c, 0x35a0: 0x000c, 0x35a1: 0x000c, 0x35a3: 0x000c, + 0x35a4: 0x000c, 0x35a6: 0x000c, 0x35a7: 0x000c, 0x35a8: 0x000c, 0x35a9: 0x000c, + 0x35aa: 0x000c, + // Block 0xd7, offset 0x35c0 + 0x35ec: 0x000c, 0x35ed: 0x000c, 0x35ee: 0x000c, 0x35ef: 0x000c, + 0x35ff: 0x0004, + // Block 0xd8, offset 0x3600 + 0x3600: 0x0001, 0x3601: 0x0001, 0x3602: 0x0001, 0x3603: 0x0001, 0x3604: 0x0001, 0x3605: 0x0001, + 0x3606: 0x0001, 0x3607: 0x0001, 0x3608: 0x0001, 0x3609: 0x0001, 0x360a: 0x0001, 0x360b: 0x0001, + 0x360c: 0x0001, 0x360d: 0x0001, 0x360e: 0x0001, 0x360f: 0x0001, 0x3610: 0x000c, 0x3611: 0x000c, + 0x3612: 0x000c, 0x3613: 0x000c, 0x3614: 0x000c, 0x3615: 0x000c, 0x3616: 0x000c, 0x3617: 0x0001, + 0x3618: 0x0001, 0x3619: 0x0001, 0x361a: 0x0001, 0x361b: 0x0001, 0x361c: 0x0001, 0x361d: 0x0001, + 0x361e: 0x0001, 0x361f: 0x0001, 0x3620: 0x0001, 0x3621: 0x0001, 0x3622: 0x0001, 0x3623: 0x0001, + 0x3624: 0x0001, 0x3625: 0x0001, 0x3626: 0x0001, 0x3627: 0x0001, 0x3628: 0x0001, 0x3629: 0x0001, + 0x362a: 0x0001, 0x362b: 0x0001, 0x362c: 0x0001, 0x362d: 0x0001, 0x362e: 0x0001, 0x362f: 0x0001, + 0x3630: 0x0001, 0x3631: 0x0001, 0x3632: 0x0001, 0x3633: 0x0001, 0x3634: 0x0001, 0x3635: 0x0001, + 0x3636: 0x0001, 0x3637: 0x0001, 0x3638: 0x0001, 0x3639: 0x0001, 0x363a: 0x0001, 0x363b: 0x0001, + 0x363c: 0x0001, 0x363d: 0x0001, 0x363e: 0x0001, 0x363f: 0x0001, + // Block 0xd9, offset 0x3640 + 0x3640: 0x0001, 0x3641: 0x0001, 0x3642: 0x0001, 0x3643: 0x0001, 0x3644: 0x000c, 0x3645: 0x000c, + 0x3646: 0x000c, 0x3647: 0x000c, 0x3648: 0x000c, 0x3649: 0x000c, 0x364a: 0x000c, 0x364b: 0x0001, + 0x364c: 0x0001, 0x364d: 0x0001, 0x364e: 0x0001, 0x364f: 0x0001, 0x3650: 0x0001, 0x3651: 0x0001, + 0x3652: 0x0001, 0x3653: 0x0001, 0x3654: 0x0001, 0x3655: 0x0001, 0x3656: 0x0001, 0x3657: 0x0001, + 0x3658: 0x0001, 0x3659: 0x0001, 0x365a: 0x0001, 0x365b: 0x0001, 0x365c: 0x0001, 0x365d: 0x0001, + 0x365e: 0x0001, 0x365f: 0x0001, 0x3660: 0x0001, 0x3661: 0x0001, 0x3662: 0x0001, 0x3663: 0x0001, + 0x3664: 0x0001, 0x3665: 0x0001, 0x3666: 0x0001, 0x3667: 0x0001, 0x3668: 0x0001, 0x3669: 0x0001, + 0x366a: 0x0001, 0x366b: 0x0001, 0x366c: 0x0001, 0x366d: 0x0001, 0x366e: 0x0001, 0x366f: 0x0001, + 0x3670: 0x0001, 0x3671: 0x0001, 0x3672: 0x0001, 0x3673: 0x0001, 0x3674: 0x0001, 0x3675: 0x0001, + 0x3676: 0x0001, 0x3677: 0x0001, 0x3678: 0x0001, 0x3679: 0x0001, 0x367a: 0x0001, 0x367b: 0x0001, + 0x367c: 0x0001, 0x367d: 0x0001, 0x367e: 0x0001, 0x367f: 0x0001, + // Block 0xda, offset 0x3680 + 0x3680: 0x000d, 0x3681: 0x000d, 0x3682: 0x000d, 0x3683: 0x000d, 0x3684: 0x000d, 0x3685: 0x000d, + 0x3686: 0x000d, 0x3687: 0x000d, 0x3688: 0x000d, 0x3689: 0x000d, 0x368a: 0x000d, 0x368b: 0x000d, + 0x368c: 0x000d, 0x368d: 0x000d, 0x368e: 0x000d, 0x368f: 0x000d, 0x3690: 0x0001, 0x3691: 0x0001, + 0x3692: 0x0001, 0x3693: 0x0001, 0x3694: 0x0001, 0x3695: 0x0001, 0x3696: 0x0001, 0x3697: 0x0001, + 0x3698: 0x0001, 0x3699: 0x0001, 0x369a: 0x0001, 0x369b: 0x0001, 0x369c: 0x0001, 0x369d: 0x0001, + 0x369e: 0x0001, 0x369f: 0x0001, 0x36a0: 0x0001, 0x36a1: 0x0001, 0x36a2: 0x0001, 0x36a3: 0x0001, + 0x36a4: 0x0001, 0x36a5: 0x0001, 0x36a6: 0x0001, 0x36a7: 0x0001, 0x36a8: 0x0001, 0x36a9: 0x0001, + 0x36aa: 0x0001, 0x36ab: 0x0001, 0x36ac: 0x0001, 0x36ad: 0x0001, 0x36ae: 0x0001, 0x36af: 0x0001, + 0x36b0: 0x0001, 0x36b1: 0x0001, 0x36b2: 0x0001, 0x36b3: 0x0001, 0x36b4: 0x0001, 0x36b5: 0x0001, + 0x36b6: 0x0001, 0x36b7: 0x0001, 0x36b8: 0x0001, 0x36b9: 0x0001, 0x36ba: 0x0001, 0x36bb: 0x0001, + 0x36bc: 0x0001, 0x36bd: 0x0001, 0x36be: 0x0001, 0x36bf: 0x0001, + // Block 0xdb, offset 0x36c0 + 0x36c0: 0x000d, 0x36c1: 0x000d, 0x36c2: 0x000d, 0x36c3: 0x000d, 0x36c4: 0x000d, 0x36c5: 0x000d, + 0x36c6: 0x000d, 0x36c7: 0x000d, 0x36c8: 0x000d, 0x36c9: 0x000d, 0x36ca: 0x000d, 0x36cb: 0x000d, + 0x36cc: 0x000d, 0x36cd: 0x000d, 0x36ce: 0x000d, 0x36cf: 0x000d, 0x36d0: 0x000d, 0x36d1: 0x000d, + 0x36d2: 0x000d, 0x36d3: 0x000d, 0x36d4: 0x000d, 0x36d5: 0x000d, 0x36d6: 0x000d, 0x36d7: 0x000d, + 0x36d8: 0x000d, 0x36d9: 0x000d, 0x36da: 0x000d, 0x36db: 0x000d, 0x36dc: 0x000d, 0x36dd: 0x000d, + 0x36de: 0x000d, 0x36df: 0x000d, 0x36e0: 0x000d, 0x36e1: 0x000d, 0x36e2: 0x000d, 0x36e3: 0x000d, + 0x36e4: 0x000d, 0x36e5: 0x000d, 0x36e6: 0x000d, 0x36e7: 0x000d, 0x36e8: 0x000d, 0x36e9: 0x000d, + 0x36ea: 0x000d, 0x36eb: 0x000d, 0x36ec: 0x000d, 0x36ed: 0x000d, 0x36ee: 0x000d, 0x36ef: 0x000d, + 0x36f0: 0x000a, 0x36f1: 0x000a, 0x36f2: 0x000d, 0x36f3: 0x000d, 0x36f4: 0x000d, 0x36f5: 0x000d, + 0x36f6: 0x000d, 0x36f7: 0x000d, 0x36f8: 0x000d, 0x36f9: 0x000d, 0x36fa: 0x000d, 0x36fb: 0x000d, + 0x36fc: 0x000d, 0x36fd: 0x000d, 0x36fe: 0x000d, 0x36ff: 0x000d, + // Block 0xdc, offset 0x3700 + 0x3700: 0x000a, 0x3701: 0x000a, 0x3702: 0x000a, 0x3703: 0x000a, 0x3704: 0x000a, 0x3705: 0x000a, + 0x3706: 0x000a, 0x3707: 0x000a, 0x3708: 0x000a, 0x3709: 0x000a, 0x370a: 0x000a, 0x370b: 0x000a, + 0x370c: 0x000a, 0x370d: 0x000a, 0x370e: 0x000a, 0x370f: 0x000a, 0x3710: 0x000a, 0x3711: 0x000a, + 0x3712: 0x000a, 0x3713: 0x000a, 0x3714: 0x000a, 0x3715: 0x000a, 0x3716: 0x000a, 0x3717: 0x000a, + 0x3718: 0x000a, 0x3719: 0x000a, 0x371a: 0x000a, 0x371b: 0x000a, 0x371c: 0x000a, 0x371d: 0x000a, + 0x371e: 0x000a, 0x371f: 0x000a, 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, 0x3726: 0x000a, 0x3727: 0x000a, 0x3728: 0x000a, 0x3729: 0x000a, + 0x372a: 0x000a, 0x372b: 0x000a, + 0x3730: 0x000a, 0x3731: 0x000a, 0x3732: 0x000a, 0x3733: 0x000a, 0x3734: 0x000a, 0x3735: 0x000a, + 0x3736: 0x000a, 0x3737: 0x000a, 0x3738: 0x000a, 0x3739: 0x000a, 0x373a: 0x000a, 0x373b: 0x000a, + 0x373c: 0x000a, 0x373d: 0x000a, 0x373e: 0x000a, 0x373f: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, + 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, 0x376d: 0x000a, 0x376e: 0x000a, + 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, 0x377a: 0x000a, 0x377b: 0x000a, + 0x377c: 0x000a, 0x377d: 0x000a, 0x377e: 0x000a, 0x377f: 0x000a, + // Block 0xde, offset 0x3780 + 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, 0x3799: 0x000a, 0x379a: 0x000a, 0x379b: 0x000a, 0x379c: 0x000a, 0x379d: 0x000a, + 0x379e: 0x000a, 0x379f: 0x000a, 0x37a0: 0x000a, 0x37a1: 0x000a, 0x37a2: 0x000a, 0x37a3: 0x000a, + 0x37a4: 0x000a, 0x37a5: 0x000a, 0x37a6: 0x000a, 0x37a7: 0x000a, 0x37a8: 0x000a, 0x37a9: 0x000a, + 0x37aa: 0x000a, 0x37ab: 0x000a, 0x37ac: 0x000a, 0x37ad: 0x000a, 0x37ae: 0x000a, 0x37af: 0x000a, + 0x37b0: 0x000a, 0x37b1: 0x000a, 0x37b2: 0x000a, 0x37b3: 0x000a, 0x37b4: 0x000a, 0x37b5: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x0002, 0x37c1: 0x0002, 0x37c2: 0x0002, 0x37c3: 0x0002, 0x37c4: 0x0002, 0x37c5: 0x0002, + 0x37c6: 0x0002, 0x37c7: 0x0002, 0x37c8: 0x0002, 0x37c9: 0x0002, 0x37ca: 0x0002, 0x37cb: 0x000a, + 0x37cc: 0x000a, + 0x37ef: 0x000a, + // Block 0xe0, offset 0x3800 + 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, + // Block 0xe1, offset 0x3840 + 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a, + 0x3864: 0x000a, 0x3865: 0x000a, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x388c: 0x000a, 0x388d: 0x000a, 0x388e: 0x000a, 0x388f: 0x000a, 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, + 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a, + 0x38d8: 0x000a, + 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x3910: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, 0x3923: 0x000a, + 0x3924: 0x000a, 0x3925: 0x000a, 0x3926: 0x000a, 0x3927: 0x000a, 0x3928: 0x000a, 0x3929: 0x000a, + 0x392a: 0x000a, 0x392b: 0x000a, 0x392c: 0x000a, 0x392d: 0x000a, 0x392e: 0x000a, 0x392f: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, 0x393a: 0x000a, 0x393b: 0x000a, + 0x393c: 0x000a, 0x393d: 0x000a, 0x393e: 0x000a, 0x393f: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, 0x3943: 0x000a, 0x3944: 0x000a, 0x3945: 0x000a, + 0x3946: 0x000a, 0x3947: 0x000a, + 0x3950: 0x000a, 0x3951: 0x000a, + 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a, + 0x3958: 0x000a, 0x3959: 0x000a, + 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a, + 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a, + 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a, + 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a, + 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a, + 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x3980: 0x000a, 0x3981: 0x000a, 0x3982: 0x000a, 0x3983: 0x000a, 0x3984: 0x000a, 0x3985: 0x000a, + 0x3986: 0x000a, 0x3987: 0x000a, + 0x3990: 0x000a, 0x3991: 0x000a, + 0x3992: 0x000a, 0x3993: 0x000a, 0x3994: 0x000a, 0x3995: 0x000a, 0x3996: 0x000a, 0x3997: 0x000a, + 0x3998: 0x000a, 0x3999: 0x000a, 0x399a: 0x000a, 0x399b: 0x000a, 0x399c: 0x000a, 0x399d: 0x000a, + 0x399e: 0x000a, 0x399f: 0x000a, 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a, + 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a, + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39c0: 0x000a, 0x39c1: 0x000a, 0x39c2: 0x000a, 0x39c3: 0x000a, 0x39c4: 0x000a, 0x39c5: 0x000a, + 0x39c6: 0x000a, 0x39c7: 0x000a, 0x39c8: 0x000a, 0x39c9: 0x000a, 0x39ca: 0x000a, 0x39cb: 0x000a, + 0x39cd: 0x000a, 0x39ce: 0x000a, 0x39cf: 0x000a, 0x39d0: 0x000a, 0x39d1: 0x000a, + 0x39d2: 0x000a, 0x39d3: 0x000a, 0x39d4: 0x000a, 0x39d5: 0x000a, 0x39d6: 0x000a, 0x39d7: 0x000a, + 0x39d8: 0x000a, 0x39d9: 0x000a, 0x39da: 0x000a, 0x39db: 0x000a, 0x39dc: 0x000a, 0x39dd: 0x000a, + 0x39de: 0x000a, 0x39df: 0x000a, 0x39e0: 0x000a, 0x39e1: 0x000a, 0x39e2: 0x000a, 0x39e3: 0x000a, + 0x39e4: 0x000a, 0x39e5: 0x000a, 0x39e6: 0x000a, 0x39e7: 0x000a, 0x39e8: 0x000a, 0x39e9: 0x000a, + 0x39ea: 0x000a, 0x39eb: 0x000a, 0x39ec: 0x000a, 0x39ed: 0x000a, 0x39ee: 0x000a, 0x39ef: 0x000a, + 0x39f0: 0x000a, 0x39f1: 0x000a, 0x39f2: 0x000a, 0x39f3: 0x000a, 0x39f4: 0x000a, 0x39f5: 0x000a, + 0x39f6: 0x000a, 0x39f7: 0x000a, 0x39f8: 0x000a, 0x39f9: 0x000a, 0x39fa: 0x000a, 0x39fb: 0x000a, + 0x39fc: 0x000a, 0x39fd: 0x000a, 0x39fe: 0x000a, 0x39ff: 0x000a, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x000a, 0x3a01: 0x000a, 0x3a02: 0x000a, 0x3a03: 0x000a, 0x3a04: 0x000a, 0x3a05: 0x000a, + 0x3a06: 0x000a, 0x3a07: 0x000a, 0x3a08: 0x000a, 0x3a09: 0x000a, 0x3a0a: 0x000a, 0x3a0b: 0x000a, + 0x3a0c: 0x000a, 0x3a0d: 0x000a, 0x3a0e: 0x000a, 0x3a0f: 0x000a, 0x3a10: 0x000a, 0x3a11: 0x000a, + 0x3a12: 0x000a, 0x3a13: 0x000a, 0x3a14: 0x000a, 0x3a15: 0x000a, 0x3a16: 0x000a, 0x3a17: 0x000a, + 0x3a18: 0x000a, 0x3a19: 0x000a, 0x3a1a: 0x000a, 0x3a1b: 0x000a, 0x3a1c: 0x000a, 0x3a1d: 0x000a, + 0x3a1e: 0x000a, 0x3a1f: 0x000a, 0x3a20: 0x000a, 0x3a21: 0x000a, 0x3a22: 0x000a, 0x3a23: 0x000a, + 0x3a24: 0x000a, 0x3a25: 0x000a, 0x3a26: 0x000a, 0x3a27: 0x000a, 0x3a28: 0x000a, 0x3a29: 0x000a, + 0x3a2a: 0x000a, 0x3a2b: 0x000a, 0x3a2c: 0x000a, 0x3a2d: 0x000a, 0x3a2e: 0x000a, 0x3a2f: 0x000a, + 0x3a30: 0x000a, 0x3a31: 0x000a, 0x3a33: 0x000a, 0x3a34: 0x000a, 0x3a35: 0x000a, + 0x3a36: 0x000a, 0x3a3a: 0x000a, 0x3a3b: 0x000a, + 0x3a3c: 0x000a, 0x3a3d: 0x000a, 0x3a3e: 0x000a, 0x3a3f: 0x000a, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000a, 0x3a41: 0x000a, 0x3a42: 0x000a, 0x3a43: 0x000a, 0x3a44: 0x000a, 0x3a45: 0x000a, + 0x3a46: 0x000a, 0x3a47: 0x000a, 0x3a48: 0x000a, 0x3a49: 0x000a, 0x3a4a: 0x000a, 0x3a4b: 0x000a, + 0x3a4c: 0x000a, 0x3a4d: 0x000a, 0x3a4e: 0x000a, 0x3a4f: 0x000a, 0x3a50: 0x000a, 0x3a51: 0x000a, + 0x3a52: 0x000a, 0x3a53: 0x000a, 0x3a54: 0x000a, 0x3a55: 0x000a, 0x3a56: 0x000a, 0x3a57: 0x000a, + 0x3a58: 0x000a, 0x3a59: 0x000a, 0x3a5a: 0x000a, 0x3a5b: 0x000a, 0x3a5c: 0x000a, 0x3a5d: 0x000a, + 0x3a5e: 0x000a, 0x3a5f: 0x000a, 0x3a60: 0x000a, 0x3a61: 0x000a, 0x3a62: 0x000a, + 0x3a65: 0x000a, 0x3a66: 0x000a, 0x3a67: 0x000a, 0x3a68: 0x000a, 0x3a69: 0x000a, + 0x3a6a: 0x000a, 0x3a6e: 0x000a, 0x3a6f: 0x000a, + 0x3a70: 0x000a, 0x3a71: 0x000a, 0x3a72: 0x000a, 0x3a73: 0x000a, 0x3a74: 0x000a, 0x3a75: 0x000a, + 0x3a76: 0x000a, 0x3a77: 0x000a, 0x3a78: 0x000a, 0x3a79: 0x000a, 0x3a7a: 0x000a, 0x3a7b: 0x000a, + 0x3a7c: 0x000a, 0x3a7d: 0x000a, 0x3a7e: 0x000a, 0x3a7f: 0x000a, + // Block 0xea, offset 0x3a80 + 0x3a80: 0x000a, 0x3a81: 0x000a, 0x3a82: 0x000a, 0x3a83: 0x000a, 0x3a84: 0x000a, 0x3a85: 0x000a, + 0x3a86: 0x000a, 0x3a87: 0x000a, 0x3a88: 0x000a, 0x3a89: 0x000a, 0x3a8a: 0x000a, + 0x3a8d: 0x000a, 0x3a8e: 0x000a, 0x3a8f: 0x000a, 0x3a90: 0x000a, 0x3a91: 0x000a, + 0x3a92: 0x000a, 0x3a93: 0x000a, 0x3a94: 0x000a, 0x3a95: 0x000a, 0x3a96: 0x000a, 0x3a97: 0x000a, + 0x3a98: 0x000a, 0x3a99: 0x000a, 0x3a9a: 0x000a, 0x3a9b: 0x000a, 0x3a9c: 0x000a, 0x3a9d: 0x000a, + 0x3a9e: 0x000a, 0x3a9f: 0x000a, 0x3aa0: 0x000a, 0x3aa1: 0x000a, 0x3aa2: 0x000a, 0x3aa3: 0x000a, + 0x3aa4: 0x000a, 0x3aa5: 0x000a, 0x3aa6: 0x000a, 0x3aa7: 0x000a, 0x3aa8: 0x000a, 0x3aa9: 0x000a, + 0x3aaa: 0x000a, 0x3aab: 0x000a, 0x3aac: 0x000a, 0x3aad: 0x000a, 0x3aae: 0x000a, 0x3aaf: 0x000a, + 0x3ab0: 0x000a, 0x3ab1: 0x000a, 0x3ab2: 0x000a, 0x3ab3: 0x000a, 0x3ab4: 0x000a, 0x3ab5: 0x000a, + 0x3ab6: 0x000a, 0x3ab7: 0x000a, 0x3ab8: 0x000a, 0x3ab9: 0x000a, 0x3aba: 0x000a, 0x3abb: 0x000a, + 0x3abc: 0x000a, 0x3abd: 0x000a, 0x3abe: 0x000a, 0x3abf: 0x000a, + // Block 0xeb, offset 0x3ac0 + 0x3ac0: 0x000a, 0x3ac1: 0x000a, 0x3ac2: 0x000a, 0x3ac3: 0x000a, 0x3ac4: 0x000a, 0x3ac5: 0x000a, + 0x3ac6: 0x000a, 0x3ac7: 0x000a, 0x3ac8: 0x000a, 0x3ac9: 0x000a, 0x3aca: 0x000a, 0x3acb: 0x000a, + 0x3acc: 0x000a, 0x3acd: 0x000a, 0x3ace: 0x000a, 0x3acf: 0x000a, 0x3ad0: 0x000a, 0x3ad1: 0x000a, + 0x3ad2: 0x000a, 0x3ad3: 0x000a, + 0x3ae0: 0x000a, 0x3ae1: 0x000a, 0x3ae2: 0x000a, 0x3ae3: 0x000a, + 0x3ae4: 0x000a, 0x3ae5: 0x000a, 0x3ae6: 0x000a, 0x3ae7: 0x000a, 0x3ae8: 0x000a, 0x3ae9: 0x000a, + 0x3aea: 0x000a, 0x3aeb: 0x000a, 0x3aec: 0x000a, 0x3aed: 0x000a, + 0x3af0: 0x000a, 0x3af1: 0x000a, 0x3af2: 0x000a, 0x3af3: 0x000a, + 0x3af8: 0x000a, 0x3af9: 0x000a, 0x3afa: 0x000a, + // Block 0xec, offset 0x3b00 + 0x3b00: 0x000a, 0x3b01: 0x000a, 0x3b02: 0x000a, + 0x3b10: 0x000a, 0x3b11: 0x000a, + 0x3b12: 0x000a, 0x3b13: 0x000a, 0x3b14: 0x000a, 0x3b15: 0x000a, + // Block 0xed, offset 0x3b40 + 0x3b7e: 0x000b, 0x3b7f: 0x000b, + // Block 0xee, offset 0x3b80 + 0x3b80: 0x000b, 0x3b81: 0x000b, 0x3b82: 0x000b, 0x3b83: 0x000b, 0x3b84: 0x000b, 0x3b85: 0x000b, + 0x3b86: 0x000b, 0x3b87: 0x000b, 0x3b88: 0x000b, 0x3b89: 0x000b, 0x3b8a: 0x000b, 0x3b8b: 0x000b, + 0x3b8c: 0x000b, 0x3b8d: 0x000b, 0x3b8e: 0x000b, 0x3b8f: 0x000b, 0x3b90: 0x000b, 0x3b91: 0x000b, + 0x3b92: 0x000b, 0x3b93: 0x000b, 0x3b94: 0x000b, 0x3b95: 0x000b, 0x3b96: 0x000b, 0x3b97: 0x000b, + 0x3b98: 0x000b, 0x3b99: 0x000b, 0x3b9a: 0x000b, 0x3b9b: 0x000b, 0x3b9c: 0x000b, 0x3b9d: 0x000b, + 0x3b9e: 0x000b, 0x3b9f: 0x000b, 0x3ba0: 0x000b, 0x3ba1: 0x000b, 0x3ba2: 0x000b, 0x3ba3: 0x000b, + 0x3ba4: 0x000b, 0x3ba5: 0x000b, 0x3ba6: 0x000b, 0x3ba7: 0x000b, 0x3ba8: 0x000b, 0x3ba9: 0x000b, + 0x3baa: 0x000b, 0x3bab: 0x000b, 0x3bac: 0x000b, 0x3bad: 0x000b, 0x3bae: 0x000b, 0x3baf: 0x000b, + 0x3bb0: 0x000b, 0x3bb1: 0x000b, 0x3bb2: 0x000b, 0x3bb3: 0x000b, 0x3bb4: 0x000b, 0x3bb5: 0x000b, + 0x3bb6: 0x000b, 0x3bb7: 0x000b, 0x3bb8: 0x000b, 0x3bb9: 0x000b, 0x3bba: 0x000b, 0x3bbb: 0x000b, + 0x3bbc: 0x000b, 0x3bbd: 0x000b, 0x3bbe: 0x000b, 0x3bbf: 0x000b, + // Block 0xef, offset 0x3bc0 + 0x3bc0: 0x000c, 0x3bc1: 0x000c, 0x3bc2: 0x000c, 0x3bc3: 0x000c, 0x3bc4: 0x000c, 0x3bc5: 0x000c, + 0x3bc6: 0x000c, 0x3bc7: 0x000c, 0x3bc8: 0x000c, 0x3bc9: 0x000c, 0x3bca: 0x000c, 0x3bcb: 0x000c, + 0x3bcc: 0x000c, 0x3bcd: 0x000c, 0x3bce: 0x000c, 0x3bcf: 0x000c, 0x3bd0: 0x000c, 0x3bd1: 0x000c, + 0x3bd2: 0x000c, 0x3bd3: 0x000c, 0x3bd4: 0x000c, 0x3bd5: 0x000c, 0x3bd6: 0x000c, 0x3bd7: 0x000c, + 0x3bd8: 0x000c, 0x3bd9: 0x000c, 0x3bda: 0x000c, 0x3bdb: 0x000c, 0x3bdc: 0x000c, 0x3bdd: 0x000c, + 0x3bde: 0x000c, 0x3bdf: 0x000c, 0x3be0: 0x000c, 0x3be1: 0x000c, 0x3be2: 0x000c, 0x3be3: 0x000c, + 0x3be4: 0x000c, 0x3be5: 0x000c, 0x3be6: 0x000c, 0x3be7: 0x000c, 0x3be8: 0x000c, 0x3be9: 0x000c, + 0x3bea: 0x000c, 0x3beb: 0x000c, 0x3bec: 0x000c, 0x3bed: 0x000c, 0x3bee: 0x000c, 0x3bef: 0x000c, + 0x3bf0: 0x000b, 0x3bf1: 0x000b, 0x3bf2: 0x000b, 0x3bf3: 0x000b, 0x3bf4: 0x000b, 0x3bf5: 0x000b, + 0x3bf6: 0x000b, 0x3bf7: 0x000b, 0x3bf8: 0x000b, 0x3bf9: 0x000b, 0x3bfa: 0x000b, 0x3bfb: 0x000b, + 0x3bfc: 0x000b, 0x3bfd: 0x000b, 0x3bfe: 0x000b, 0x3bff: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x54, + 0x1b3: 0x64, 0x1b5: 0x65, 0x1b7: 0x66, + 0x1b8: 0x67, 0x1b9: 0x68, 0x1ba: 0x69, 0x1bb: 0x6a, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6b, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6c, 0x1c2: 0x6d, 0x1c3: 0x6e, 0x1c7: 0x6f, + 0x1c8: 0x70, 0x1c9: 0x71, 0x1ca: 0x72, 0x1cb: 0x73, 0x1cd: 0x74, 0x1cf: 0x75, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x76, 0x253: 0x77, + 0x258: 0x78, 0x259: 0x79, 0x25a: 0x7a, 0x25b: 0x7b, 0x25c: 0x7c, 0x25e: 0x7d, + 0x260: 0x7e, 0x261: 0x7f, 0x263: 0x80, 0x264: 0x81, 0x265: 0x82, 0x266: 0x83, 0x267: 0x84, + 0x268: 0x85, 0x269: 0x86, 0x26a: 0x87, 0x26b: 0x88, 0x26f: 0x89, + // Block 0xa, offset 0x280 + 0x2ac: 0x8a, 0x2ad: 0x8b, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8c, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8d, + 0x2b8: 0x8e, 0x2b9: 0x8f, 0x2ba: 0x0e, 0x2bb: 0x90, 0x2bc: 0x91, 0x2bd: 0x92, 0x2bf: 0x93, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x94, 0x2c5: 0x54, 0x2c6: 0x95, 0x2c7: 0x96, + 0x2cb: 0x97, 0x2cd: 0x98, + 0x2e0: 0x99, 0x2e1: 0x99, 0x2e2: 0x99, 0x2e3: 0x99, 0x2e4: 0x9a, 0x2e5: 0x99, 0x2e6: 0x99, 0x2e7: 0x99, + 0x2e8: 0x9b, 0x2e9: 0x99, 0x2ea: 0x99, 0x2eb: 0x9c, 0x2ec: 0x9d, 0x2ed: 0x99, 0x2ee: 0x99, 0x2ef: 0x99, + 0x2f0: 0x99, 0x2f1: 0x99, 0x2f2: 0x99, 0x2f3: 0x99, 0x2f4: 0x9e, 0x2f5: 0x99, 0x2f6: 0x99, 0x2f7: 0x99, + 0x2f8: 0x99, 0x2f9: 0x9f, 0x2fa: 0x99, 0x2fb: 0x99, 0x2fc: 0xa0, 0x2fd: 0xa1, 0x2fe: 0x99, 0x2ff: 0x99, + // Block 0xc, offset 0x300 + 0x300: 0xa2, 0x301: 0xa3, 0x302: 0xa4, 0x304: 0xa5, 0x305: 0xa6, 0x306: 0xa7, 0x307: 0xa8, + 0x308: 0xa9, 0x30b: 0xaa, 0x30c: 0x26, 0x30d: 0xab, + 0x310: 0xac, 0x311: 0xad, 0x312: 0xae, 0x313: 0xaf, 0x316: 0xb0, 0x317: 0xb1, + 0x318: 0xb2, 0x319: 0xb3, 0x31a: 0xb4, 0x31c: 0xb5, + 0x320: 0xb6, 0x327: 0xb7, + 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba, + 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf, + 0x33b: 0xc0, 0x33f: 0xc1, + // Block 0xd, offset 0x340 + 0x36b: 0xc2, 0x36c: 0xc3, + 0x37d: 0xc4, 0x37e: 0xc5, 0x37f: 0xc6, + // Block 0xe, offset 0x380 + 0x3b2: 0xc7, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc8, 0x3c6: 0xc9, + 0x3c8: 0x54, 0x3c9: 0xca, 0x3cc: 0x54, 0x3cd: 0xcb, + 0x3db: 0xcc, 0x3dc: 0xcd, 0x3dd: 0xce, 0x3de: 0xcf, 0x3df: 0xd0, + 0x3e8: 0xd1, 0x3e9: 0xd2, 0x3ea: 0xd3, + // Block 0x10, offset 0x400 + 0x400: 0xd4, 0x404: 0xc3, + 0x40b: 0xd5, + 0x420: 0x99, 0x421: 0x99, 0x422: 0x99, 0x423: 0xd6, 0x424: 0x99, 0x425: 0xd7, 0x426: 0x99, 0x427: 0x99, + 0x428: 0x99, 0x429: 0x99, 0x42a: 0x99, 0x42b: 0x99, 0x42c: 0x99, 0x42d: 0x99, 0x42e: 0x99, 0x42f: 0x99, + 0x430: 0x99, 0x431: 0xa0, 0x432: 0x0e, 0x433: 0x99, 0x434: 0x0e, 0x435: 0xd8, 0x436: 0x99, 0x437: 0x99, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd9, 0x43c: 0x99, 0x43d: 0x99, 0x43e: 0x99, 0x43f: 0x99, + // Block 0x11, offset 0x440 + 0x440: 0xda, 0x441: 0x54, 0x442: 0xdb, 0x443: 0xdc, 0x444: 0xdd, 0x445: 0xde, + 0x449: 0xdf, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xe0, 0x45c: 0x54, 0x45d: 0x6a, 0x45e: 0x54, 0x45f: 0xe1, + 0x460: 0xe2, 0x461: 0xe3, 0x462: 0xe4, 0x464: 0xe5, 0x465: 0xe6, 0x466: 0xe7, 0x467: 0xe8, + 0x468: 0x54, 0x469: 0xe9, 0x46a: 0xea, + 0x47f: 0xeb, + // Block 0x12, offset 0x480 + 0x4bf: 0xeb, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xec, 0x541: 0xec, 0x542: 0xec, 0x543: 0xec, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xed, + 0x548: 0xec, 0x549: 0xec, 0x54a: 0xec, 0x54b: 0xec, 0x54c: 0xec, 0x54d: 0xec, 0x54e: 0xec, 0x54f: 0xec, + 0x550: 0xec, 0x551: 0xec, 0x552: 0xec, 0x553: 0xec, 0x554: 0xec, 0x555: 0xec, 0x556: 0xec, 0x557: 0xec, + 0x558: 0xec, 0x559: 0xec, 0x55a: 0xec, 0x55b: 0xec, 0x55c: 0xec, 0x55d: 0xec, 0x55e: 0xec, 0x55f: 0xec, + 0x560: 0xec, 0x561: 0xec, 0x562: 0xec, 0x563: 0xec, 0x564: 0xec, 0x565: 0xec, 0x566: 0xec, 0x567: 0xec, + 0x568: 0xec, 0x569: 0xec, 0x56a: 0xec, 0x56b: 0xec, 0x56c: 0xec, 0x56d: 0xec, 0x56e: 0xec, 0x56f: 0xec, + 0x570: 0xec, 0x571: 0xec, 0x572: 0xec, 0x573: 0xec, 0x574: 0xec, 0x575: 0xec, 0x576: 0xec, 0x577: 0xec, + 0x578: 0xec, 0x579: 0xec, 0x57a: 0xec, 0x57b: 0xec, 0x57c: 0xec, 0x57d: 0xec, 0x57e: 0xec, 0x57f: 0xec, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16952 bytes (16KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go index 7297cce32..2c58f09ba 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package norm diff --git a/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go new file mode 100644 index 000000000..10f5202c6 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables12.0.0.go @@ -0,0 +1,7710 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.14 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "12.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2CA1 + endMulti = 0x2F63 + firstLeadingCCC = 0x49B1 + firstCCCZeroExcept = 0x4A7B + firstStarterWithNLead = 0x4AA2 + lastDecomp = 0x4AA4 + maxDecomp = 0x8000 +) + +// decomps: 19108 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x52, 0x42, 0x4D, 0x56, 0x42, + 0x4D, 0x57, 0x42, 0x4E, 0x4A, 0x42, 0x4E, 0x6A, + 0x42, 0x4E, 0x6F, 0x42, 0x50, 0x48, 0x42, 0x50, + 0x52, 0x42, 0x50, 0x61, 0x42, 0x52, 0x73, 0x42, + 0x53, 0x44, 0x42, 0x53, 0x4D, 0x42, 0x53, 0x53, + 0x42, 0x53, 0x76, 0x42, 0x54, 0x4D, 0x42, 0x56, + 0x49, 0x42, 0x57, 0x43, 0x42, 0x57, 0x5A, 0x42, + 0x57, 0x62, 0x42, 0x58, 0x49, 0x42, 0x63, 0x63, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x64, 0x42, 0x63, 0x6D, 0x42, 0x64, + 0x42, 0x42, 0x64, 0x61, 0x42, 0x64, 0x6C, 0x42, + 0x64, 0x6D, 0x42, 0x64, 0x7A, 0x42, 0x65, 0x56, + 0x42, 0x66, 0x66, 0x42, 0x66, 0x69, 0x42, 0x66, + 0x6C, 0x42, 0x66, 0x6D, 0x42, 0x68, 0x61, 0x42, + 0x69, 0x69, 0x42, 0x69, 0x6A, 0x42, 0x69, 0x6E, + 0x42, 0x69, 0x76, 0x42, 0x69, 0x78, 0x42, 0x6B, + 0x41, 0x42, 0x6B, 0x56, 0x42, 0x6B, 0x57, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x67, 0x42, 0x6B, 0x6C, 0x42, 0x6B, 0x6D, + 0x42, 0x6B, 0x74, 0x42, 0x6C, 0x6A, 0x42, 0x6C, + 0x6D, 0x42, 0x6C, 0x6E, 0x42, 0x6C, 0x78, 0x42, + 0x6D, 0x32, 0x42, 0x6D, 0x33, 0x42, 0x6D, 0x41, + 0x42, 0x6D, 0x56, 0x42, 0x6D, 0x57, 0x42, 0x6D, + 0x62, 0x42, 0x6D, 0x67, 0x42, 0x6D, 0x6C, 0x42, + 0x6D, 0x6D, 0x42, 0x6D, 0x73, 0x42, 0x6E, 0x41, + 0x42, 0x6E, 0x46, 0x42, 0x6E, 0x56, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x57, 0x42, 0x6E, 0x6A, 0x42, 0x6E, 0x6D, 0x42, + 0x6E, 0x73, 0x42, 0x6F, 0x56, 0x42, 0x70, 0x41, + 0x42, 0x70, 0x46, 0x42, 0x70, 0x56, 0x42, 0x70, + 0x57, 0x42, 0x70, 0x63, 0x42, 0x70, 0x73, 0x42, + 0x73, 0x72, 0x42, 0x73, 0x74, 0x42, 0x76, 0x69, + 0x42, 0x78, 0x69, 0x43, 0x28, 0x31, 0x29, 0x43, + 0x28, 0x32, 0x29, 0x43, 0x28, 0x33, 0x29, 0x43, + 0x28, 0x34, 0x29, 0x43, 0x28, 0x35, 0x29, 0x43, + // Bytes 1a80 - 1abf + 0x28, 0x36, 0x29, 0x43, 0x28, 0x37, 0x29, 0x43, + 0x28, 0x38, 0x29, 0x43, 0x28, 0x39, 0x29, 0x43, + 0x28, 0x41, 0x29, 0x43, 0x28, 0x42, 0x29, 0x43, + 0x28, 0x43, 0x29, 0x43, 0x28, 0x44, 0x29, 0x43, + 0x28, 0x45, 0x29, 0x43, 0x28, 0x46, 0x29, 0x43, + 0x28, 0x47, 0x29, 0x43, 0x28, 0x48, 0x29, 0x43, + 0x28, 0x49, 0x29, 0x43, 0x28, 0x4A, 0x29, 0x43, + 0x28, 0x4B, 0x29, 0x43, 0x28, 0x4C, 0x29, 0x43, + // Bytes 1ac0 - 1aff + 0x28, 0x4D, 0x29, 0x43, 0x28, 0x4E, 0x29, 0x43, + 0x28, 0x4F, 0x29, 0x43, 0x28, 0x50, 0x29, 0x43, + 0x28, 0x51, 0x29, 0x43, 0x28, 0x52, 0x29, 0x43, + 0x28, 0x53, 0x29, 0x43, 0x28, 0x54, 0x29, 0x43, + 0x28, 0x55, 0x29, 0x43, 0x28, 0x56, 0x29, 0x43, + 0x28, 0x57, 0x29, 0x43, 0x28, 0x58, 0x29, 0x43, + 0x28, 0x59, 0x29, 0x43, 0x28, 0x5A, 0x29, 0x43, + 0x28, 0x61, 0x29, 0x43, 0x28, 0x62, 0x29, 0x43, + // Bytes 1b00 - 1b3f + 0x28, 0x63, 0x29, 0x43, 0x28, 0x64, 0x29, 0x43, + 0x28, 0x65, 0x29, 0x43, 0x28, 0x66, 0x29, 0x43, + 0x28, 0x67, 0x29, 0x43, 0x28, 0x68, 0x29, 0x43, + 0x28, 0x69, 0x29, 0x43, 0x28, 0x6A, 0x29, 0x43, + 0x28, 0x6B, 0x29, 0x43, 0x28, 0x6C, 0x29, 0x43, + 0x28, 0x6D, 0x29, 0x43, 0x28, 0x6E, 0x29, 0x43, + 0x28, 0x6F, 0x29, 0x43, 0x28, 0x70, 0x29, 0x43, + 0x28, 0x71, 0x29, 0x43, 0x28, 0x72, 0x29, 0x43, + // Bytes 1b40 - 1b7f + 0x28, 0x73, 0x29, 0x43, 0x28, 0x74, 0x29, 0x43, + 0x28, 0x75, 0x29, 0x43, 0x28, 0x76, 0x29, 0x43, + 0x28, 0x77, 0x29, 0x43, 0x28, 0x78, 0x29, 0x43, + 0x28, 0x79, 0x29, 0x43, 0x28, 0x7A, 0x29, 0x43, + 0x2E, 0x2E, 0x2E, 0x43, 0x31, 0x30, 0x2E, 0x43, + 0x31, 0x31, 0x2E, 0x43, 0x31, 0x32, 0x2E, 0x43, + 0x31, 0x33, 0x2E, 0x43, 0x31, 0x34, 0x2E, 0x43, + 0x31, 0x35, 0x2E, 0x43, 0x31, 0x36, 0x2E, 0x43, + // Bytes 1b80 - 1bbf + 0x31, 0x37, 0x2E, 0x43, 0x31, 0x38, 0x2E, 0x43, + 0x31, 0x39, 0x2E, 0x43, 0x32, 0x30, 0x2E, 0x43, + 0x3A, 0x3A, 0x3D, 0x43, 0x3D, 0x3D, 0x3D, 0x43, + 0x43, 0x6F, 0x2E, 0x43, 0x46, 0x41, 0x58, 0x43, + 0x47, 0x48, 0x7A, 0x43, 0x47, 0x50, 0x61, 0x43, + 0x49, 0x49, 0x49, 0x43, 0x4C, 0x54, 0x44, 0x43, + 0x4C, 0xC2, 0xB7, 0x43, 0x4D, 0x48, 0x7A, 0x43, + 0x4D, 0x50, 0x61, 0x43, 0x4D, 0xCE, 0xA9, 0x43, + // Bytes 1bc0 - 1bff + 0x50, 0x50, 0x4D, 0x43, 0x50, 0x50, 0x56, 0x43, + 0x50, 0x54, 0x45, 0x43, 0x54, 0x45, 0x4C, 0x43, + 0x54, 0x48, 0x7A, 0x43, 0x56, 0x49, 0x49, 0x43, + 0x58, 0x49, 0x49, 0x43, 0x61, 0x2F, 0x63, 0x43, + 0x61, 0x2F, 0x73, 0x43, 0x61, 0xCA, 0xBE, 0x43, + 0x62, 0x61, 0x72, 0x43, 0x63, 0x2F, 0x6F, 0x43, + 0x63, 0x2F, 0x75, 0x43, 0x63, 0x61, 0x6C, 0x43, + 0x63, 0x6D, 0x32, 0x43, 0x63, 0x6D, 0x33, 0x43, + // Bytes 1c00 - 1c3f + 0x64, 0x6D, 0x32, 0x43, 0x64, 0x6D, 0x33, 0x43, + 0x65, 0x72, 0x67, 0x43, 0x66, 0x66, 0x69, 0x43, + 0x66, 0x66, 0x6C, 0x43, 0x67, 0x61, 0x6C, 0x43, + 0x68, 0x50, 0x61, 0x43, 0x69, 0x69, 0x69, 0x43, + 0x6B, 0x48, 0x7A, 0x43, 0x6B, 0x50, 0x61, 0x43, + 0x6B, 0x6D, 0x32, 0x43, 0x6B, 0x6D, 0x33, 0x43, + 0x6B, 0xCE, 0xA9, 0x43, 0x6C, 0x6F, 0x67, 0x43, + 0x6C, 0xC2, 0xB7, 0x43, 0x6D, 0x69, 0x6C, 0x43, + // Bytes 1c40 - 1c7f + 0x6D, 0x6D, 0x32, 0x43, 0x6D, 0x6D, 0x33, 0x43, + 0x6D, 0x6F, 0x6C, 0x43, 0x72, 0x61, 0x64, 0x43, + 0x76, 0x69, 0x69, 0x43, 0x78, 0x69, 0x69, 0x43, + 0xC2, 0xB0, 0x43, 0x43, 0xC2, 0xB0, 0x46, 0x43, + 0xCA, 0xBC, 0x6E, 0x43, 0xCE, 0xBC, 0x41, 0x43, + 0xCE, 0xBC, 0x46, 0x43, 0xCE, 0xBC, 0x56, 0x43, + 0xCE, 0xBC, 0x57, 0x43, 0xCE, 0xBC, 0x67, 0x43, + 0xCE, 0xBC, 0x6C, 0x43, 0xCE, 0xBC, 0x6D, 0x43, + // Bytes 1c80 - 1cbf + 0xCE, 0xBC, 0x73, 0x44, 0x28, 0x31, 0x30, 0x29, + 0x44, 0x28, 0x31, 0x31, 0x29, 0x44, 0x28, 0x31, + 0x32, 0x29, 0x44, 0x28, 0x31, 0x33, 0x29, 0x44, + 0x28, 0x31, 0x34, 0x29, 0x44, 0x28, 0x31, 0x35, + 0x29, 0x44, 0x28, 0x31, 0x36, 0x29, 0x44, 0x28, + 0x31, 0x37, 0x29, 0x44, 0x28, 0x31, 0x38, 0x29, + 0x44, 0x28, 0x31, 0x39, 0x29, 0x44, 0x28, 0x32, + 0x30, 0x29, 0x44, 0x30, 0xE7, 0x82, 0xB9, 0x44, + // Bytes 1cc0 - 1cff + 0x31, 0xE2, 0x81, 0x84, 0x44, 0x31, 0xE6, 0x97, + 0xA5, 0x44, 0x31, 0xE6, 0x9C, 0x88, 0x44, 0x31, + 0xE7, 0x82, 0xB9, 0x44, 0x32, 0xE6, 0x97, 0xA5, + 0x44, 0x32, 0xE6, 0x9C, 0x88, 0x44, 0x32, 0xE7, + 0x82, 0xB9, 0x44, 0x33, 0xE6, 0x97, 0xA5, 0x44, + 0x33, 0xE6, 0x9C, 0x88, 0x44, 0x33, 0xE7, 0x82, + 0xB9, 0x44, 0x34, 0xE6, 0x97, 0xA5, 0x44, 0x34, + 0xE6, 0x9C, 0x88, 0x44, 0x34, 0xE7, 0x82, 0xB9, + // Bytes 1d00 - 1d3f + 0x44, 0x35, 0xE6, 0x97, 0xA5, 0x44, 0x35, 0xE6, + 0x9C, 0x88, 0x44, 0x35, 0xE7, 0x82, 0xB9, 0x44, + 0x36, 0xE6, 0x97, 0xA5, 0x44, 0x36, 0xE6, 0x9C, + 0x88, 0x44, 0x36, 0xE7, 0x82, 0xB9, 0x44, 0x37, + 0xE6, 0x97, 0xA5, 0x44, 0x37, 0xE6, 0x9C, 0x88, + 0x44, 0x37, 0xE7, 0x82, 0xB9, 0x44, 0x38, 0xE6, + 0x97, 0xA5, 0x44, 0x38, 0xE6, 0x9C, 0x88, 0x44, + 0x38, 0xE7, 0x82, 0xB9, 0x44, 0x39, 0xE6, 0x97, + // Bytes 1d40 - 1d7f + 0xA5, 0x44, 0x39, 0xE6, 0x9C, 0x88, 0x44, 0x39, + 0xE7, 0x82, 0xB9, 0x44, 0x56, 0x49, 0x49, 0x49, + 0x44, 0x61, 0x2E, 0x6D, 0x2E, 0x44, 0x6B, 0x63, + 0x61, 0x6C, 0x44, 0x70, 0x2E, 0x6D, 0x2E, 0x44, + 0x76, 0x69, 0x69, 0x69, 0x44, 0xD5, 0xA5, 0xD6, + 0x82, 0x44, 0xD5, 0xB4, 0xD5, 0xA5, 0x44, 0xD5, + 0xB4, 0xD5, 0xAB, 0x44, 0xD5, 0xB4, 0xD5, 0xAD, + 0x44, 0xD5, 0xB4, 0xD5, 0xB6, 0x44, 0xD5, 0xBE, + // Bytes 1d80 - 1dbf + 0xD5, 0xB6, 0x44, 0xD7, 0x90, 0xD7, 0x9C, 0x44, + 0xD8, 0xA7, 0xD9, 0xB4, 0x44, 0xD8, 0xA8, 0xD8, + 0xAC, 0x44, 0xD8, 0xA8, 0xD8, 0xAD, 0x44, 0xD8, + 0xA8, 0xD8, 0xAE, 0x44, 0xD8, 0xA8, 0xD8, 0xB1, + 0x44, 0xD8, 0xA8, 0xD8, 0xB2, 0x44, 0xD8, 0xA8, + 0xD9, 0x85, 0x44, 0xD8, 0xA8, 0xD9, 0x86, 0x44, + 0xD8, 0xA8, 0xD9, 0x87, 0x44, 0xD8, 0xA8, 0xD9, + 0x89, 0x44, 0xD8, 0xA8, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1dc0 - 1dff + 0xAA, 0xD8, 0xAC, 0x44, 0xD8, 0xAA, 0xD8, 0xAD, + 0x44, 0xD8, 0xAA, 0xD8, 0xAE, 0x44, 0xD8, 0xAA, + 0xD8, 0xB1, 0x44, 0xD8, 0xAA, 0xD8, 0xB2, 0x44, + 0xD8, 0xAA, 0xD9, 0x85, 0x44, 0xD8, 0xAA, 0xD9, + 0x86, 0x44, 0xD8, 0xAA, 0xD9, 0x87, 0x44, 0xD8, + 0xAA, 0xD9, 0x89, 0x44, 0xD8, 0xAA, 0xD9, 0x8A, + 0x44, 0xD8, 0xAB, 0xD8, 0xAC, 0x44, 0xD8, 0xAB, + 0xD8, 0xB1, 0x44, 0xD8, 0xAB, 0xD8, 0xB2, 0x44, + // Bytes 1e00 - 1e3f + 0xD8, 0xAB, 0xD9, 0x85, 0x44, 0xD8, 0xAB, 0xD9, + 0x86, 0x44, 0xD8, 0xAB, 0xD9, 0x87, 0x44, 0xD8, + 0xAB, 0xD9, 0x89, 0x44, 0xD8, 0xAB, 0xD9, 0x8A, + 0x44, 0xD8, 0xAC, 0xD8, 0xAD, 0x44, 0xD8, 0xAC, + 0xD9, 0x85, 0x44, 0xD8, 0xAC, 0xD9, 0x89, 0x44, + 0xD8, 0xAC, 0xD9, 0x8A, 0x44, 0xD8, 0xAD, 0xD8, + 0xAC, 0x44, 0xD8, 0xAD, 0xD9, 0x85, 0x44, 0xD8, + 0xAD, 0xD9, 0x89, 0x44, 0xD8, 0xAD, 0xD9, 0x8A, + // Bytes 1e40 - 1e7f + 0x44, 0xD8, 0xAE, 0xD8, 0xAC, 0x44, 0xD8, 0xAE, + 0xD8, 0xAD, 0x44, 0xD8, 0xAE, 0xD9, 0x85, 0x44, + 0xD8, 0xAE, 0xD9, 0x89, 0x44, 0xD8, 0xAE, 0xD9, + 0x8A, 0x44, 0xD8, 0xB3, 0xD8, 0xAC, 0x44, 0xD8, + 0xB3, 0xD8, 0xAD, 0x44, 0xD8, 0xB3, 0xD8, 0xAE, + 0x44, 0xD8, 0xB3, 0xD8, 0xB1, 0x44, 0xD8, 0xB3, + 0xD9, 0x85, 0x44, 0xD8, 0xB3, 0xD9, 0x87, 0x44, + 0xD8, 0xB3, 0xD9, 0x89, 0x44, 0xD8, 0xB3, 0xD9, + // Bytes 1e80 - 1ebf + 0x8A, 0x44, 0xD8, 0xB4, 0xD8, 0xAC, 0x44, 0xD8, + 0xB4, 0xD8, 0xAD, 0x44, 0xD8, 0xB4, 0xD8, 0xAE, + 0x44, 0xD8, 0xB4, 0xD8, 0xB1, 0x44, 0xD8, 0xB4, + 0xD9, 0x85, 0x44, 0xD8, 0xB4, 0xD9, 0x87, 0x44, + 0xD8, 0xB4, 0xD9, 0x89, 0x44, 0xD8, 0xB4, 0xD9, + 0x8A, 0x44, 0xD8, 0xB5, 0xD8, 0xAD, 0x44, 0xD8, + 0xB5, 0xD8, 0xAE, 0x44, 0xD8, 0xB5, 0xD8, 0xB1, + 0x44, 0xD8, 0xB5, 0xD9, 0x85, 0x44, 0xD8, 0xB5, + // Bytes 1ec0 - 1eff + 0xD9, 0x89, 0x44, 0xD8, 0xB5, 0xD9, 0x8A, 0x44, + 0xD8, 0xB6, 0xD8, 0xAC, 0x44, 0xD8, 0xB6, 0xD8, + 0xAD, 0x44, 0xD8, 0xB6, 0xD8, 0xAE, 0x44, 0xD8, + 0xB6, 0xD8, 0xB1, 0x44, 0xD8, 0xB6, 0xD9, 0x85, + 0x44, 0xD8, 0xB6, 0xD9, 0x89, 0x44, 0xD8, 0xB6, + 0xD9, 0x8A, 0x44, 0xD8, 0xB7, 0xD8, 0xAD, 0x44, + 0xD8, 0xB7, 0xD9, 0x85, 0x44, 0xD8, 0xB7, 0xD9, + 0x89, 0x44, 0xD8, 0xB7, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1f00 - 1f3f + 0xB8, 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD8, 0xAC, + 0x44, 0xD8, 0xB9, 0xD9, 0x85, 0x44, 0xD8, 0xB9, + 0xD9, 0x89, 0x44, 0xD8, 0xB9, 0xD9, 0x8A, 0x44, + 0xD8, 0xBA, 0xD8, 0xAC, 0x44, 0xD8, 0xBA, 0xD9, + 0x85, 0x44, 0xD8, 0xBA, 0xD9, 0x89, 0x44, 0xD8, + 0xBA, 0xD9, 0x8A, 0x44, 0xD9, 0x81, 0xD8, 0xAC, + 0x44, 0xD9, 0x81, 0xD8, 0xAD, 0x44, 0xD9, 0x81, + 0xD8, 0xAE, 0x44, 0xD9, 0x81, 0xD9, 0x85, 0x44, + // Bytes 1f40 - 1f7f + 0xD9, 0x81, 0xD9, 0x89, 0x44, 0xD9, 0x81, 0xD9, + 0x8A, 0x44, 0xD9, 0x82, 0xD8, 0xAD, 0x44, 0xD9, + 0x82, 0xD9, 0x85, 0x44, 0xD9, 0x82, 0xD9, 0x89, + 0x44, 0xD9, 0x82, 0xD9, 0x8A, 0x44, 0xD9, 0x83, + 0xD8, 0xA7, 0x44, 0xD9, 0x83, 0xD8, 0xAC, 0x44, + 0xD9, 0x83, 0xD8, 0xAD, 0x44, 0xD9, 0x83, 0xD8, + 0xAE, 0x44, 0xD9, 0x83, 0xD9, 0x84, 0x44, 0xD9, + 0x83, 0xD9, 0x85, 0x44, 0xD9, 0x83, 0xD9, 0x89, + // Bytes 1f80 - 1fbf + 0x44, 0xD9, 0x83, 0xD9, 0x8A, 0x44, 0xD9, 0x84, + 0xD8, 0xA7, 0x44, 0xD9, 0x84, 0xD8, 0xAC, 0x44, + 0xD9, 0x84, 0xD8, 0xAD, 0x44, 0xD9, 0x84, 0xD8, + 0xAE, 0x44, 0xD9, 0x84, 0xD9, 0x85, 0x44, 0xD9, + 0x84, 0xD9, 0x87, 0x44, 0xD9, 0x84, 0xD9, 0x89, + 0x44, 0xD9, 0x84, 0xD9, 0x8A, 0x44, 0xD9, 0x85, + 0xD8, 0xA7, 0x44, 0xD9, 0x85, 0xD8, 0xAC, 0x44, + 0xD9, 0x85, 0xD8, 0xAD, 0x44, 0xD9, 0x85, 0xD8, + // Bytes 1fc0 - 1fff + 0xAE, 0x44, 0xD9, 0x85, 0xD9, 0x85, 0x44, 0xD9, + 0x85, 0xD9, 0x89, 0x44, 0xD9, 0x85, 0xD9, 0x8A, + 0x44, 0xD9, 0x86, 0xD8, 0xAC, 0x44, 0xD9, 0x86, + 0xD8, 0xAD, 0x44, 0xD9, 0x86, 0xD8, 0xAE, 0x44, + 0xD9, 0x86, 0xD8, 0xB1, 0x44, 0xD9, 0x86, 0xD8, + 0xB2, 0x44, 0xD9, 0x86, 0xD9, 0x85, 0x44, 0xD9, + 0x86, 0xD9, 0x86, 0x44, 0xD9, 0x86, 0xD9, 0x87, + 0x44, 0xD9, 0x86, 0xD9, 0x89, 0x44, 0xD9, 0x86, + // Bytes 2000 - 203f + 0xD9, 0x8A, 0x44, 0xD9, 0x87, 0xD8, 0xAC, 0x44, + 0xD9, 0x87, 0xD9, 0x85, 0x44, 0xD9, 0x87, 0xD9, + 0x89, 0x44, 0xD9, 0x87, 0xD9, 0x8A, 0x44, 0xD9, + 0x88, 0xD9, 0xB4, 0x44, 0xD9, 0x8A, 0xD8, 0xAC, + 0x44, 0xD9, 0x8A, 0xD8, 0xAD, 0x44, 0xD9, 0x8A, + 0xD8, 0xAE, 0x44, 0xD9, 0x8A, 0xD8, 0xB1, 0x44, + 0xD9, 0x8A, 0xD8, 0xB2, 0x44, 0xD9, 0x8A, 0xD9, + 0x85, 0x44, 0xD9, 0x8A, 0xD9, 0x86, 0x44, 0xD9, + // Bytes 2040 - 207f + 0x8A, 0xD9, 0x87, 0x44, 0xD9, 0x8A, 0xD9, 0x89, + 0x44, 0xD9, 0x8A, 0xD9, 0x8A, 0x44, 0xD9, 0x8A, + 0xD9, 0xB4, 0x44, 0xDB, 0x87, 0xD9, 0xB4, 0x45, + 0x28, 0xE1, 0x84, 0x80, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x82, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x83, + 0x29, 0x45, 0x28, 0xE1, 0x84, 0x85, 0x29, 0x45, + 0x28, 0xE1, 0x84, 0x86, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x87, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x89, + // Bytes 2080 - 20bf + 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8B, 0x29, 0x45, + 0x28, 0xE1, 0x84, 0x8C, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x8E, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8F, + 0x29, 0x45, 0x28, 0xE1, 0x84, 0x90, 0x29, 0x45, + 0x28, 0xE1, 0x84, 0x91, 0x29, 0x45, 0x28, 0xE1, + 0x84, 0x92, 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x80, + 0x29, 0x45, 0x28, 0xE4, 0xB8, 0x83, 0x29, 0x45, + 0x28, 0xE4, 0xB8, 0x89, 0x29, 0x45, 0x28, 0xE4, + // Bytes 20c0 - 20ff + 0xB9, 0x9D, 0x29, 0x45, 0x28, 0xE4, 0xBA, 0x8C, + 0x29, 0x45, 0x28, 0xE4, 0xBA, 0x94, 0x29, 0x45, + 0x28, 0xE4, 0xBB, 0xA3, 0x29, 0x45, 0x28, 0xE4, + 0xBC, 0x81, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x91, + 0x29, 0x45, 0x28, 0xE5, 0x85, 0xAB, 0x29, 0x45, + 0x28, 0xE5, 0x85, 0xAD, 0x29, 0x45, 0x28, 0xE5, + 0x8A, 0xB4, 0x29, 0x45, 0x28, 0xE5, 0x8D, 0x81, + 0x29, 0x45, 0x28, 0xE5, 0x8D, 0x94, 0x29, 0x45, + // Bytes 2100 - 213f + 0x28, 0xE5, 0x90, 0x8D, 0x29, 0x45, 0x28, 0xE5, + 0x91, 0xBC, 0x29, 0x45, 0x28, 0xE5, 0x9B, 0x9B, + 0x29, 0x45, 0x28, 0xE5, 0x9C, 0x9F, 0x29, 0x45, + 0x28, 0xE5, 0xAD, 0xA6, 0x29, 0x45, 0x28, 0xE6, + 0x97, 0xA5, 0x29, 0x45, 0x28, 0xE6, 0x9C, 0x88, + 0x29, 0x45, 0x28, 0xE6, 0x9C, 0x89, 0x29, 0x45, + 0x28, 0xE6, 0x9C, 0xA8, 0x29, 0x45, 0x28, 0xE6, + 0xA0, 0xAA, 0x29, 0x45, 0x28, 0xE6, 0xB0, 0xB4, + // Bytes 2140 - 217f + 0x29, 0x45, 0x28, 0xE7, 0x81, 0xAB, 0x29, 0x45, + 0x28, 0xE7, 0x89, 0xB9, 0x29, 0x45, 0x28, 0xE7, + 0x9B, 0xA3, 0x29, 0x45, 0x28, 0xE7, 0xA4, 0xBE, + 0x29, 0x45, 0x28, 0xE7, 0xA5, 0x9D, 0x29, 0x45, + 0x28, 0xE7, 0xA5, 0xAD, 0x29, 0x45, 0x28, 0xE8, + 0x87, 0xAA, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xB3, + 0x29, 0x45, 0x28, 0xE8, 0xB2, 0xA1, 0x29, 0x45, + 0x28, 0xE8, 0xB3, 0x87, 0x29, 0x45, 0x28, 0xE9, + // Bytes 2180 - 21bf + 0x87, 0x91, 0x29, 0x45, 0x30, 0xE2, 0x81, 0x84, + 0x33, 0x45, 0x31, 0x30, 0xE6, 0x97, 0xA5, 0x45, + 0x31, 0x30, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x30, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x31, 0xE6, 0x97, + 0xA5, 0x45, 0x31, 0x31, 0xE6, 0x9C, 0x88, 0x45, + 0x31, 0x31, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x32, + 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x32, 0xE6, 0x9C, + 0x88, 0x45, 0x31, 0x32, 0xE7, 0x82, 0xB9, 0x45, + // Bytes 21c0 - 21ff + 0x31, 0x33, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x33, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x34, 0xE6, 0x97, + 0xA5, 0x45, 0x31, 0x34, 0xE7, 0x82, 0xB9, 0x45, + 0x31, 0x35, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x35, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x36, 0xE6, 0x97, + 0xA5, 0x45, 0x31, 0x36, 0xE7, 0x82, 0xB9, 0x45, + 0x31, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x37, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x38, 0xE6, 0x97, + // Bytes 2200 - 223f + 0xA5, 0x45, 0x31, 0x38, 0xE7, 0x82, 0xB9, 0x45, + 0x31, 0x39, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x39, + 0xE7, 0x82, 0xB9, 0x45, 0x31, 0xE2, 0x81, 0x84, + 0x32, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x33, 0x45, + 0x31, 0xE2, 0x81, 0x84, 0x34, 0x45, 0x31, 0xE2, + 0x81, 0x84, 0x35, 0x45, 0x31, 0xE2, 0x81, 0x84, + 0x36, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x37, 0x45, + 0x31, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x31, 0xE2, + // Bytes 2240 - 227f + 0x81, 0x84, 0x39, 0x45, 0x32, 0x30, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x30, 0xE7, 0x82, 0xB9, 0x45, + 0x32, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x31, + 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x32, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x32, 0xE7, 0x82, 0xB9, 0x45, + 0x32, 0x33, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x33, + 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x34, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x34, 0xE7, 0x82, 0xB9, 0x45, + // Bytes 2280 - 22bf + 0x32, 0x35, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x36, + 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x37, 0xE6, 0x97, + 0xA5, 0x45, 0x32, 0x38, 0xE6, 0x97, 0xA5, 0x45, + 0x32, 0x39, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0xE2, + 0x81, 0x84, 0x33, 0x45, 0x32, 0xE2, 0x81, 0x84, + 0x35, 0x45, 0x33, 0x30, 0xE6, 0x97, 0xA5, 0x45, + 0x33, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0xE2, + 0x81, 0x84, 0x34, 0x45, 0x33, 0xE2, 0x81, 0x84, + // Bytes 22c0 - 22ff + 0x35, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x38, 0x45, + 0x34, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x35, 0xE2, + 0x81, 0x84, 0x36, 0x45, 0x35, 0xE2, 0x81, 0x84, + 0x38, 0x45, 0x37, 0xE2, 0x81, 0x84, 0x38, 0x45, + 0x41, 0xE2, 0x88, 0x95, 0x6D, 0x45, 0x56, 0xE2, + 0x88, 0x95, 0x6D, 0x45, 0x6D, 0xE2, 0x88, 0x95, + 0x73, 0x46, 0x31, 0xE2, 0x81, 0x84, 0x31, 0x30, + 0x46, 0x43, 0xE2, 0x88, 0x95, 0x6B, 0x67, 0x46, + // Bytes 2300 - 233f + 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x46, 0xD8, + 0xA8, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xA8, + 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD8, + // Bytes 2340 - 237f + 0xAA, 0xD8, 0xAE, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD8, 0xAA, 0xD9, + 0x85, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, 0xD9, 0x85, + 0xD8, 0xAD, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, + 0xAE, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAC, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD8, + 0xAC, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, + // Bytes 2380 - 23bf + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xAC, 0xD9, + 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, + 0x8A, 0x46, 0xD8, 0xAD, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAD, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD8, + 0xB3, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD8, 0xB3, + 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xB3, 0xD8, + // Bytes 23c0 - 23ff + 0xAE, 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAD, + 0x46, 0xD8, 0xB3, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xB4, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xB4, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xB4, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD9, + 0x85, 0xD8, 0xAE, 0x46, 0xD8, 0xB4, 0xD9, 0x85, + // Bytes 2400 - 243f + 0xD9, 0x85, 0x46, 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, + 0xAD, 0x46, 0xD8, 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, 0x46, + 0xD8, 0xB5, 0xD9, 0x84, 0xDB, 0x92, 0x46, 0xD8, + 0xB5, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB6, + 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xB6, 0xD8, + 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB6, 0xD8, 0xAE, + 0xD9, 0x85, 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD8, + // Bytes 2440 - 247f + 0xAD, 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD8, 0xB7, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xB9, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD8, + 0xB9, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB9, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xB9, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xBA, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2480 - 24bf + 0x46, 0xD9, 0x81, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD9, 0x81, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, + 0x82, 0xD9, 0x84, 0xDB, 0x92, 0x46, 0xD9, 0x82, + 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x82, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x82, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x83, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD9, 0x84, 0xD8, 0xAC, 0xD8, 0xAC, 0x46, + // Bytes 24c0 - 24ff + 0xD9, 0x84, 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x84, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAE, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD9, 0x85, 0xD8, 0xAD, + 0x46, 0xD9, 0x84, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x85, 0xD8, 0xAC, 0xD8, 0xAD, 0x46, 0xD9, + // Bytes 2500 - 253f + 0x85, 0xD8, 0xAC, 0xD8, 0xAE, 0x46, 0xD9, 0x85, + 0xD8, 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, 0xAD, + 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAE, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + // Bytes 2540 - 257f + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x86, 0xD8, 0xAC, + 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAD, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD9, + // Bytes 2580 - 25bf + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x87, 0xD9, 0x85, + 0xD8, 0xAC, 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD9, + 0x85, 0x46, 0xD9, 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x8A, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x8A, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xA7, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD8, 0xAC, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + // Bytes 25c0 - 25ff + 0xD8, 0xAD, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xAE, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xB1, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xB2, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD9, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD9, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x89, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x86, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x87, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xDB, 0x88, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xDB, 0x90, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x95, 0x46, 0xE0, 0xB9, 0x8D, + 0xE0, 0xB8, 0xB2, 0x46, 0xE0, 0xBA, 0xAB, 0xE0, + 0xBA, 0x99, 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, + 0xA1, 0x46, 0xE0, 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, + // Bytes 2640 - 267f + 0x46, 0xE0, 0xBD, 0x80, 0xE0, 0xBE, 0xB5, 0x46, + 0xE0, 0xBD, 0x82, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBD, 0x8C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, + 0x91, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x96, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x9B, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0x90, 0xE0, 0xBE, + 0xB5, 0x46, 0xE0, 0xBE, 0x92, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBE, 0x9C, 0xE0, 0xBE, 0xB7, 0x46, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xA1, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, + 0xBE, 0xA6, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0xAB, 0xE0, 0xBE, 0xB7, 0x46, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x46, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x46, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0x46, 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, + 0x46, 0xE3, 0x81, 0xBB, 0xE3, 0x81, 0x8B, 0x46, + 0xE3, 0x82, 0x88, 0xE3, 0x82, 0x8A, 0x46, 0xE3, + // Bytes 26c0 - 26ff + 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0x46, 0xE3, 0x82, + 0xB3, 0xE3, 0x82, 0xB3, 0x46, 0xE3, 0x82, 0xB3, + 0xE3, 0x83, 0x88, 0x46, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x46, 0xE3, 0x83, 0x8A, 0xE3, 0x83, + 0x8E, 0x46, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xB3, + 0x46, 0xE3, 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0x46, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xA9, 0x46, 0xE3, + 0x83, 0xAC, 0xE3, 0x83, 0xA0, 0x46, 0xE5, 0xA4, + // Bytes 2700 - 273f + 0xA7, 0xE6, 0xAD, 0xA3, 0x46, 0xE5, 0xB9, 0xB3, + 0xE6, 0x88, 0x90, 0x46, 0xE6, 0x98, 0x8E, 0xE6, + 0xB2, 0xBB, 0x46, 0xE6, 0x98, 0xAD, 0xE5, 0x92, + 0x8C, 0x47, 0x72, 0x61, 0x64, 0xE2, 0x88, 0x95, + 0x73, 0x47, 0xE3, 0x80, 0x94, 0x53, 0xE3, 0x80, + 0x95, 0x48, 0x28, 0xE1, 0x84, 0x80, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x82, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x83, + // Bytes 2740 - 277f + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x85, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8C, 0xE1, + // Bytes 2780 - 27bf + 0x85, 0xAE, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8E, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x8F, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x72, 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, + 0x32, 0x48, 0xD8, 0xA7, 0xD9, 0x83, 0xD8, 0xA8, + // Bytes 27c0 - 27ff + 0xD8, 0xB1, 0x48, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, + 0x84, 0xD9, 0x87, 0x48, 0xD8, 0xB1, 0xD8, 0xB3, + 0xD9, 0x88, 0xD9, 0x84, 0x48, 0xD8, 0xB1, 0xDB, + 0x8C, 0xD8, 0xA7, 0xD9, 0x84, 0x48, 0xD8, 0xB5, + 0xD9, 0x84, 0xD8, 0xB9, 0xD9, 0x85, 0x48, 0xD8, + 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x48, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0xD8, 0xAF, + 0x48, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, 0x84, 0xD9, + // Bytes 2800 - 283f + 0x85, 0x49, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x49, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x49, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x49, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0xE2, 0x88, + 0xAE, 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xB8, 0x89, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE4, + 0xBA, 0x8C, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + // Bytes 2840 - 287f + 0x94, 0xE5, 0x8B, 0x9D, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE5, 0xAE, 0x89, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x89, 0x93, + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, + 0x95, 0x97, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, + 0x94, 0xE6, 0x9C, 0xAC, 0xE3, 0x80, 0x95, 0x49, + 0xE3, 0x80, 0x94, 0xE7, 0x82, 0xB9, 0xE3, 0x80, + 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE7, 0x9B, 0x97, + // Bytes 2880 - 28bf + 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, + 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0xA9, 0xE3, 0x83, + 0xB3, 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xB3, + 0xE3, 0x82, 0xB9, 0x49, 0xE3, 0x82, 0xAA, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x49, 0xE3, 0x82, + 0xAB, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAA, 0x49, + // Bytes 28c0 - 28ff + 0xE3, 0x82, 0xB1, 0xE3, 0x83, 0xBC, 0xE3, 0x82, + 0xB9, 0x49, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0x8A, 0x49, 0xE3, 0x82, 0xBB, 0xE3, + 0x83, 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, + 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x49, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0xE3, 0x82, + 0xB7, 0x49, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x8E, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x83, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0x84, 0x49, + 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xAB, 0x49, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xB3, 0x49, 0xE3, 0x83, 0x95, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xBD, 0x49, + 0xE3, 0x83, 0x98, 0xE3, 0x83, 0xAB, 0xE3, 0x83, + // Bytes 2940 - 297f + 0x84, 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9B, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, + 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAB, 0x49, + 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x8F, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xAF, 0x49, 0xE3, 0x83, 0xA4, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, + // Bytes 2980 - 29bf + 0xA6, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, 0x49, + 0xE3, 0x83, 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0x4C, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0xE2, 0x88, 0xAB, 0x4C, 0xE3, 0x82, 0xA2, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA1, + 0x4C, 0xE3, 0x82, 0xA8, 0xE3, 0x83, 0xBC, 0xE3, + // Bytes 29c0 - 29ff + 0x82, 0xAB, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, + 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xB3, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x9E, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAA, 0xE3, 0x83, + 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x83, 0xA5, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAF, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, + 0x83, 0xBC, 0xE3, 0x83, 0x8D, 0x4C, 0xE3, 0x82, + 0xB5, 0xE3, 0x82, 0xA4, 0xE3, 0x82, 0xAF, 0xE3, + 0x83, 0xAB, 0x4C, 0xE3, 0x82, 0xBF, 0xE3, 0x82, + // Bytes 2a40 - 2a7f + 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x4C, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x84, 0x4C, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0x4C, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA3, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0x4C, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xBF, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + // Bytes 2a80 - 2abf + 0x82, 0x9A, 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0x92, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xB3, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, + 0x9B, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0x4C, + 0xE3, 0x83, 0x9F, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAD, 0xE3, 0x83, 0xB3, 0x4C, 0xE3, 0x83, 0xA1, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, + 0xAB, 0x4C, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x83, + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAB, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, + 0xBC, 0x8F, 0xE4, 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, + 0x4E, 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA9, + 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xAE, 0x29, 0x4F, + // Bytes 2b00 - 2b3f + 0xD8, 0xAC, 0xD9, 0x84, 0x20, 0xD8, 0xAC, 0xD9, + 0x84, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x87, 0x4F, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0x4F, + 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0x4F, + 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xAF, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4F, + // Bytes 2b40 - 2b7f + 0xE3, 0x82, 0xB5, 0xE3, 0x83, 0xB3, 0xE3, 0x83, + 0x81, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xA0, 0x4F, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xAB, 0x4F, + 0xE3, 0x83, 0x98, 0xE3, 0x82, 0xAF, 0xE3, 0x82, + 0xBF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0x4F, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0x9E, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xA7, 0xE3, 0x83, 0xB3, 0x4F, + 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x4F, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x51, + 0x28, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xA5, 0xE1, 0x86, 0xAB, + // Bytes 2bc0 - 2bff + 0x29, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBF, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xBC, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x82, 0xAF, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x52, + 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xA1, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xAB, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x82, + // Bytes 2c00 - 2c3f + 0x99, 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0xE3, + 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x52, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xBB, 0xE3, + 0x82, 0x99, 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0xAD, + 0x52, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, + 0x83, 0xBC, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0x52, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0xE3, 0x82, 0xA2, 0xE3, 0x82, 0xB9, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x83, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0xA7, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0x9F, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0xAC, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, + 0xB1, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xB3, 0x61, + // Bytes 2c80 - 2cbf + 0xD8, 0xB5, 0xD9, 0x84, 0xD9, 0x89, 0x20, 0xD8, + 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, 0x20, + 0xD8, 0xB9, 0xD9, 0x84, 0xD9, 0x8A, 0xD9, 0x87, + 0x20, 0xD9, 0x88, 0xD8, 0xB3, 0xD9, 0x84, 0xD9, + 0x85, 0x06, 0xE0, 0xA7, 0x87, 0xE0, 0xA6, 0xBE, + 0x01, 0x06, 0xE0, 0xA7, 0x87, 0xE0, 0xA7, 0x97, + 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAC, 0xBE, + 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAD, 0x96, + // Bytes 2cc0 - 2cff + 0x01, 0x06, 0xE0, 0xAD, 0x87, 0xE0, 0xAD, 0x97, + 0x01, 0x06, 0xE0, 0xAE, 0x92, 0xE0, 0xAF, 0x97, + 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, 0xAE, 0xBE, + 0x01, 0x06, 0xE0, 0xAF, 0x86, 0xE0, 0xAF, 0x97, + 0x01, 0x06, 0xE0, 0xAF, 0x87, 0xE0, 0xAE, 0xBE, + 0x01, 0x06, 0xE0, 0xB2, 0xBF, 0xE0, 0xB3, 0x95, + 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x95, + 0x01, 0x06, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x96, + // Bytes 2d00 - 2d3f + 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, 0xB4, 0xBE, + 0x01, 0x06, 0xE0, 0xB5, 0x86, 0xE0, 0xB5, 0x97, + 0x01, 0x06, 0xE0, 0xB5, 0x87, 0xE0, 0xB4, 0xBE, + 0x01, 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x9F, + 0x01, 0x06, 0xE1, 0x80, 0xA5, 0xE1, 0x80, 0xAE, + 0x01, 0x06, 0xE1, 0xAC, 0x85, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x87, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x89, 0xE1, 0xAC, 0xB5, + // Bytes 2d40 - 2d7f + 0x01, 0x06, 0xE1, 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0x91, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, + 0x01, 0x06, 0xE1, 0xAD, 0x82, 0xE1, 0xAC, 0xB5, + // Bytes 2d80 - 2dbf + 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB1, 0xF0, 0x91, + 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x84, 0xB2, + 0xF0, 0x91, 0x84, 0xA7, 0x01, 0x08, 0xF0, 0x91, + 0x8D, 0x87, 0xF0, 0x91, 0x8C, 0xBE, 0x01, 0x08, + 0xF0, 0x91, 0x8D, 0x87, 0xF0, 0x91, 0x8D, 0x97, + 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, + 0x92, 0xB0, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, + 0xF0, 0x91, 0x92, 0xBA, 0x01, 0x08, 0xF0, 0x91, + // Bytes 2dc0 - 2dff + 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xBD, 0x01, 0x08, + 0xF0, 0x91, 0x96, 0xB8, 0xF0, 0x91, 0x96, 0xAF, + 0x01, 0x08, 0xF0, 0x91, 0x96, 0xB9, 0xF0, 0x91, + 0x96, 0xAF, 0x01, 0x09, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x82, 0xE0, 0xB3, 0x95, 0x02, 0x09, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, + 0x12, 0x44, 0x44, 0x5A, 0xCC, 0x8C, 0xC9, 0x44, + 0x44, 0x7A, 0xCC, 0x8C, 0xC9, 0x44, 0x64, 0x7A, + // Bytes 2e00 - 2e3f + 0xCC, 0x8C, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x93, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x94, 0xC9, 0x46, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x46, 0xE1, 0x84, 0x80, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x82, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x83, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x85, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x86, 0xE1, + // Bytes 2e40 - 2e7f + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x87, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x89, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8B, 0xE1, + 0x85, 0xAE, 0x01, 0x46, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x8F, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x90, 0xE1, + // Bytes 2e80 - 2ebf + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x91, 0xE1, + 0x85, 0xA1, 0x01, 0x46, 0xE1, 0x84, 0x92, 0xE1, + 0x85, 0xA1, 0x01, 0x49, 0xE3, 0x83, 0xA1, 0xE3, + 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, 0xE1, + 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0xE1, 0x84, 0x8B, + 0xE1, 0x85, 0xB4, 0x01, 0x4C, 0xE3, 0x82, 0xAD, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0x0D, 0x4C, 0xE3, 0x82, 0xB3, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0xBC, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x4C, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE1, + 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0xE1, 0x86, 0xB7, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA9, 0x01, 0x4F, + 0xE3, 0x82, 0xA4, 0xE3, 0x83, 0x8B, 0xE3, 0x83, + 0xB3, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x4F, 0xE3, 0x82, 0xB7, 0xE3, 0x83, 0xAA, 0xE3, + // Bytes 2f00 - 2f3f + 0x83, 0xB3, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, + 0x0D, 0x4F, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB7, 0xE3, 0x82, + 0x99, 0x0D, 0x4F, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x9A, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x82, 0xA8, 0xE3, + 0x82, 0xB9, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, 0x52, + // Bytes 2f40 - 2f7f + 0xE3, 0x83, 0x95, 0xE3, 0x82, 0xA1, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x86, 0xE0, 0xB3, 0x86, 0xE0, + 0xB3, 0x82, 0x01, 0x86, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0x01, 0x03, 0x3C, 0xCC, 0xB8, 0x05, + 0x03, 0x3D, 0xCC, 0xB8, 0x05, 0x03, 0x3E, 0xCC, + 0xB8, 0x05, 0x03, 0x41, 0xCC, 0x80, 0xC9, 0x03, + 0x41, 0xCC, 0x81, 0xC9, 0x03, 0x41, 0xCC, 0x83, + // Bytes 2f80 - 2fbf + 0xC9, 0x03, 0x41, 0xCC, 0x84, 0xC9, 0x03, 0x41, + 0xCC, 0x89, 0xC9, 0x03, 0x41, 0xCC, 0x8C, 0xC9, + 0x03, 0x41, 0xCC, 0x8F, 0xC9, 0x03, 0x41, 0xCC, + 0x91, 0xC9, 0x03, 0x41, 0xCC, 0xA5, 0xB5, 0x03, + 0x41, 0xCC, 0xA8, 0xA5, 0x03, 0x42, 0xCC, 0x87, + 0xC9, 0x03, 0x42, 0xCC, 0xA3, 0xB5, 0x03, 0x42, + 0xCC, 0xB1, 0xB5, 0x03, 0x43, 0xCC, 0x81, 0xC9, + 0x03, 0x43, 0xCC, 0x82, 0xC9, 0x03, 0x43, 0xCC, + // Bytes 2fc0 - 2fff + 0x87, 0xC9, 0x03, 0x43, 0xCC, 0x8C, 0xC9, 0x03, + 0x44, 0xCC, 0x87, 0xC9, 0x03, 0x44, 0xCC, 0x8C, + 0xC9, 0x03, 0x44, 0xCC, 0xA3, 0xB5, 0x03, 0x44, + 0xCC, 0xA7, 0xA5, 0x03, 0x44, 0xCC, 0xAD, 0xB5, + 0x03, 0x44, 0xCC, 0xB1, 0xB5, 0x03, 0x45, 0xCC, + 0x80, 0xC9, 0x03, 0x45, 0xCC, 0x81, 0xC9, 0x03, + 0x45, 0xCC, 0x83, 0xC9, 0x03, 0x45, 0xCC, 0x86, + 0xC9, 0x03, 0x45, 0xCC, 0x87, 0xC9, 0x03, 0x45, + // Bytes 3000 - 303f + 0xCC, 0x88, 0xC9, 0x03, 0x45, 0xCC, 0x89, 0xC9, + 0x03, 0x45, 0xCC, 0x8C, 0xC9, 0x03, 0x45, 0xCC, + 0x8F, 0xC9, 0x03, 0x45, 0xCC, 0x91, 0xC9, 0x03, + 0x45, 0xCC, 0xA8, 0xA5, 0x03, 0x45, 0xCC, 0xAD, + 0xB5, 0x03, 0x45, 0xCC, 0xB0, 0xB5, 0x03, 0x46, + 0xCC, 0x87, 0xC9, 0x03, 0x47, 0xCC, 0x81, 0xC9, + 0x03, 0x47, 0xCC, 0x82, 0xC9, 0x03, 0x47, 0xCC, + 0x84, 0xC9, 0x03, 0x47, 0xCC, 0x86, 0xC9, 0x03, + // Bytes 3040 - 307f + 0x47, 0xCC, 0x87, 0xC9, 0x03, 0x47, 0xCC, 0x8C, + 0xC9, 0x03, 0x47, 0xCC, 0xA7, 0xA5, 0x03, 0x48, + 0xCC, 0x82, 0xC9, 0x03, 0x48, 0xCC, 0x87, 0xC9, + 0x03, 0x48, 0xCC, 0x88, 0xC9, 0x03, 0x48, 0xCC, + 0x8C, 0xC9, 0x03, 0x48, 0xCC, 0xA3, 0xB5, 0x03, + 0x48, 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0xAE, + 0xB5, 0x03, 0x49, 0xCC, 0x80, 0xC9, 0x03, 0x49, + 0xCC, 0x81, 0xC9, 0x03, 0x49, 0xCC, 0x82, 0xC9, + // Bytes 3080 - 30bf + 0x03, 0x49, 0xCC, 0x83, 0xC9, 0x03, 0x49, 0xCC, + 0x84, 0xC9, 0x03, 0x49, 0xCC, 0x86, 0xC9, 0x03, + 0x49, 0xCC, 0x87, 0xC9, 0x03, 0x49, 0xCC, 0x89, + 0xC9, 0x03, 0x49, 0xCC, 0x8C, 0xC9, 0x03, 0x49, + 0xCC, 0x8F, 0xC9, 0x03, 0x49, 0xCC, 0x91, 0xC9, + 0x03, 0x49, 0xCC, 0xA3, 0xB5, 0x03, 0x49, 0xCC, + 0xA8, 0xA5, 0x03, 0x49, 0xCC, 0xB0, 0xB5, 0x03, + 0x4A, 0xCC, 0x82, 0xC9, 0x03, 0x4B, 0xCC, 0x81, + // Bytes 30c0 - 30ff + 0xC9, 0x03, 0x4B, 0xCC, 0x8C, 0xC9, 0x03, 0x4B, + 0xCC, 0xA3, 0xB5, 0x03, 0x4B, 0xCC, 0xA7, 0xA5, + 0x03, 0x4B, 0xCC, 0xB1, 0xB5, 0x03, 0x4C, 0xCC, + 0x81, 0xC9, 0x03, 0x4C, 0xCC, 0x8C, 0xC9, 0x03, + 0x4C, 0xCC, 0xA7, 0xA5, 0x03, 0x4C, 0xCC, 0xAD, + 0xB5, 0x03, 0x4C, 0xCC, 0xB1, 0xB5, 0x03, 0x4D, + 0xCC, 0x81, 0xC9, 0x03, 0x4D, 0xCC, 0x87, 0xC9, + 0x03, 0x4D, 0xCC, 0xA3, 0xB5, 0x03, 0x4E, 0xCC, + // Bytes 3100 - 313f + 0x80, 0xC9, 0x03, 0x4E, 0xCC, 0x81, 0xC9, 0x03, + 0x4E, 0xCC, 0x83, 0xC9, 0x03, 0x4E, 0xCC, 0x87, + 0xC9, 0x03, 0x4E, 0xCC, 0x8C, 0xC9, 0x03, 0x4E, + 0xCC, 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0xA7, 0xA5, + 0x03, 0x4E, 0xCC, 0xAD, 0xB5, 0x03, 0x4E, 0xCC, + 0xB1, 0xB5, 0x03, 0x4F, 0xCC, 0x80, 0xC9, 0x03, + 0x4F, 0xCC, 0x81, 0xC9, 0x03, 0x4F, 0xCC, 0x86, + 0xC9, 0x03, 0x4F, 0xCC, 0x89, 0xC9, 0x03, 0x4F, + // Bytes 3140 - 317f + 0xCC, 0x8B, 0xC9, 0x03, 0x4F, 0xCC, 0x8C, 0xC9, + 0x03, 0x4F, 0xCC, 0x8F, 0xC9, 0x03, 0x4F, 0xCC, + 0x91, 0xC9, 0x03, 0x50, 0xCC, 0x81, 0xC9, 0x03, + 0x50, 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x81, + 0xC9, 0x03, 0x52, 0xCC, 0x87, 0xC9, 0x03, 0x52, + 0xCC, 0x8C, 0xC9, 0x03, 0x52, 0xCC, 0x8F, 0xC9, + 0x03, 0x52, 0xCC, 0x91, 0xC9, 0x03, 0x52, 0xCC, + 0xA7, 0xA5, 0x03, 0x52, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3180 - 31bf + 0x53, 0xCC, 0x82, 0xC9, 0x03, 0x53, 0xCC, 0x87, + 0xC9, 0x03, 0x53, 0xCC, 0xA6, 0xB5, 0x03, 0x53, + 0xCC, 0xA7, 0xA5, 0x03, 0x54, 0xCC, 0x87, 0xC9, + 0x03, 0x54, 0xCC, 0x8C, 0xC9, 0x03, 0x54, 0xCC, + 0xA3, 0xB5, 0x03, 0x54, 0xCC, 0xA6, 0xB5, 0x03, + 0x54, 0xCC, 0xA7, 0xA5, 0x03, 0x54, 0xCC, 0xAD, + 0xB5, 0x03, 0x54, 0xCC, 0xB1, 0xB5, 0x03, 0x55, + 0xCC, 0x80, 0xC9, 0x03, 0x55, 0xCC, 0x81, 0xC9, + // Bytes 31c0 - 31ff + 0x03, 0x55, 0xCC, 0x82, 0xC9, 0x03, 0x55, 0xCC, + 0x86, 0xC9, 0x03, 0x55, 0xCC, 0x89, 0xC9, 0x03, + 0x55, 0xCC, 0x8A, 0xC9, 0x03, 0x55, 0xCC, 0x8B, + 0xC9, 0x03, 0x55, 0xCC, 0x8C, 0xC9, 0x03, 0x55, + 0xCC, 0x8F, 0xC9, 0x03, 0x55, 0xCC, 0x91, 0xC9, + 0x03, 0x55, 0xCC, 0xA3, 0xB5, 0x03, 0x55, 0xCC, + 0xA4, 0xB5, 0x03, 0x55, 0xCC, 0xA8, 0xA5, 0x03, + 0x55, 0xCC, 0xAD, 0xB5, 0x03, 0x55, 0xCC, 0xB0, + // Bytes 3200 - 323f + 0xB5, 0x03, 0x56, 0xCC, 0x83, 0xC9, 0x03, 0x56, + 0xCC, 0xA3, 0xB5, 0x03, 0x57, 0xCC, 0x80, 0xC9, + 0x03, 0x57, 0xCC, 0x81, 0xC9, 0x03, 0x57, 0xCC, + 0x82, 0xC9, 0x03, 0x57, 0xCC, 0x87, 0xC9, 0x03, + 0x57, 0xCC, 0x88, 0xC9, 0x03, 0x57, 0xCC, 0xA3, + 0xB5, 0x03, 0x58, 0xCC, 0x87, 0xC9, 0x03, 0x58, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x80, 0xC9, + 0x03, 0x59, 0xCC, 0x81, 0xC9, 0x03, 0x59, 0xCC, + // Bytes 3240 - 327f + 0x82, 0xC9, 0x03, 0x59, 0xCC, 0x83, 0xC9, 0x03, + 0x59, 0xCC, 0x84, 0xC9, 0x03, 0x59, 0xCC, 0x87, + 0xC9, 0x03, 0x59, 0xCC, 0x88, 0xC9, 0x03, 0x59, + 0xCC, 0x89, 0xC9, 0x03, 0x59, 0xCC, 0xA3, 0xB5, + 0x03, 0x5A, 0xCC, 0x81, 0xC9, 0x03, 0x5A, 0xCC, + 0x82, 0xC9, 0x03, 0x5A, 0xCC, 0x87, 0xC9, 0x03, + 0x5A, 0xCC, 0x8C, 0xC9, 0x03, 0x5A, 0xCC, 0xA3, + 0xB5, 0x03, 0x5A, 0xCC, 0xB1, 0xB5, 0x03, 0x61, + // Bytes 3280 - 32bf + 0xCC, 0x80, 0xC9, 0x03, 0x61, 0xCC, 0x81, 0xC9, + 0x03, 0x61, 0xCC, 0x83, 0xC9, 0x03, 0x61, 0xCC, + 0x84, 0xC9, 0x03, 0x61, 0xCC, 0x89, 0xC9, 0x03, + 0x61, 0xCC, 0x8C, 0xC9, 0x03, 0x61, 0xCC, 0x8F, + 0xC9, 0x03, 0x61, 0xCC, 0x91, 0xC9, 0x03, 0x61, + 0xCC, 0xA5, 0xB5, 0x03, 0x61, 0xCC, 0xA8, 0xA5, + 0x03, 0x62, 0xCC, 0x87, 0xC9, 0x03, 0x62, 0xCC, + 0xA3, 0xB5, 0x03, 0x62, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 32c0 - 32ff + 0x63, 0xCC, 0x81, 0xC9, 0x03, 0x63, 0xCC, 0x82, + 0xC9, 0x03, 0x63, 0xCC, 0x87, 0xC9, 0x03, 0x63, + 0xCC, 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0x87, 0xC9, + 0x03, 0x64, 0xCC, 0x8C, 0xC9, 0x03, 0x64, 0xCC, + 0xA3, 0xB5, 0x03, 0x64, 0xCC, 0xA7, 0xA5, 0x03, + 0x64, 0xCC, 0xAD, 0xB5, 0x03, 0x64, 0xCC, 0xB1, + 0xB5, 0x03, 0x65, 0xCC, 0x80, 0xC9, 0x03, 0x65, + 0xCC, 0x81, 0xC9, 0x03, 0x65, 0xCC, 0x83, 0xC9, + // Bytes 3300 - 333f + 0x03, 0x65, 0xCC, 0x86, 0xC9, 0x03, 0x65, 0xCC, + 0x87, 0xC9, 0x03, 0x65, 0xCC, 0x88, 0xC9, 0x03, + 0x65, 0xCC, 0x89, 0xC9, 0x03, 0x65, 0xCC, 0x8C, + 0xC9, 0x03, 0x65, 0xCC, 0x8F, 0xC9, 0x03, 0x65, + 0xCC, 0x91, 0xC9, 0x03, 0x65, 0xCC, 0xA8, 0xA5, + 0x03, 0x65, 0xCC, 0xAD, 0xB5, 0x03, 0x65, 0xCC, + 0xB0, 0xB5, 0x03, 0x66, 0xCC, 0x87, 0xC9, 0x03, + 0x67, 0xCC, 0x81, 0xC9, 0x03, 0x67, 0xCC, 0x82, + // Bytes 3340 - 337f + 0xC9, 0x03, 0x67, 0xCC, 0x84, 0xC9, 0x03, 0x67, + 0xCC, 0x86, 0xC9, 0x03, 0x67, 0xCC, 0x87, 0xC9, + 0x03, 0x67, 0xCC, 0x8C, 0xC9, 0x03, 0x67, 0xCC, + 0xA7, 0xA5, 0x03, 0x68, 0xCC, 0x82, 0xC9, 0x03, + 0x68, 0xCC, 0x87, 0xC9, 0x03, 0x68, 0xCC, 0x88, + 0xC9, 0x03, 0x68, 0xCC, 0x8C, 0xC9, 0x03, 0x68, + 0xCC, 0xA3, 0xB5, 0x03, 0x68, 0xCC, 0xA7, 0xA5, + 0x03, 0x68, 0xCC, 0xAE, 0xB5, 0x03, 0x68, 0xCC, + // Bytes 3380 - 33bf + 0xB1, 0xB5, 0x03, 0x69, 0xCC, 0x80, 0xC9, 0x03, + 0x69, 0xCC, 0x81, 0xC9, 0x03, 0x69, 0xCC, 0x82, + 0xC9, 0x03, 0x69, 0xCC, 0x83, 0xC9, 0x03, 0x69, + 0xCC, 0x84, 0xC9, 0x03, 0x69, 0xCC, 0x86, 0xC9, + 0x03, 0x69, 0xCC, 0x89, 0xC9, 0x03, 0x69, 0xCC, + 0x8C, 0xC9, 0x03, 0x69, 0xCC, 0x8F, 0xC9, 0x03, + 0x69, 0xCC, 0x91, 0xC9, 0x03, 0x69, 0xCC, 0xA3, + 0xB5, 0x03, 0x69, 0xCC, 0xA8, 0xA5, 0x03, 0x69, + // Bytes 33c0 - 33ff + 0xCC, 0xB0, 0xB5, 0x03, 0x6A, 0xCC, 0x82, 0xC9, + 0x03, 0x6A, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, + 0x81, 0xC9, 0x03, 0x6B, 0xCC, 0x8C, 0xC9, 0x03, + 0x6B, 0xCC, 0xA3, 0xB5, 0x03, 0x6B, 0xCC, 0xA7, + 0xA5, 0x03, 0x6B, 0xCC, 0xB1, 0xB5, 0x03, 0x6C, + 0xCC, 0x81, 0xC9, 0x03, 0x6C, 0xCC, 0x8C, 0xC9, + 0x03, 0x6C, 0xCC, 0xA7, 0xA5, 0x03, 0x6C, 0xCC, + 0xAD, 0xB5, 0x03, 0x6C, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3400 - 343f + 0x6D, 0xCC, 0x81, 0xC9, 0x03, 0x6D, 0xCC, 0x87, + 0xC9, 0x03, 0x6D, 0xCC, 0xA3, 0xB5, 0x03, 0x6E, + 0xCC, 0x80, 0xC9, 0x03, 0x6E, 0xCC, 0x81, 0xC9, + 0x03, 0x6E, 0xCC, 0x83, 0xC9, 0x03, 0x6E, 0xCC, + 0x87, 0xC9, 0x03, 0x6E, 0xCC, 0x8C, 0xC9, 0x03, + 0x6E, 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0xA7, + 0xA5, 0x03, 0x6E, 0xCC, 0xAD, 0xB5, 0x03, 0x6E, + 0xCC, 0xB1, 0xB5, 0x03, 0x6F, 0xCC, 0x80, 0xC9, + // Bytes 3440 - 347f + 0x03, 0x6F, 0xCC, 0x81, 0xC9, 0x03, 0x6F, 0xCC, + 0x86, 0xC9, 0x03, 0x6F, 0xCC, 0x89, 0xC9, 0x03, + 0x6F, 0xCC, 0x8B, 0xC9, 0x03, 0x6F, 0xCC, 0x8C, + 0xC9, 0x03, 0x6F, 0xCC, 0x8F, 0xC9, 0x03, 0x6F, + 0xCC, 0x91, 0xC9, 0x03, 0x70, 0xCC, 0x81, 0xC9, + 0x03, 0x70, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, + 0x81, 0xC9, 0x03, 0x72, 0xCC, 0x87, 0xC9, 0x03, + 0x72, 0xCC, 0x8C, 0xC9, 0x03, 0x72, 0xCC, 0x8F, + // Bytes 3480 - 34bf + 0xC9, 0x03, 0x72, 0xCC, 0x91, 0xC9, 0x03, 0x72, + 0xCC, 0xA7, 0xA5, 0x03, 0x72, 0xCC, 0xB1, 0xB5, + 0x03, 0x73, 0xCC, 0x82, 0xC9, 0x03, 0x73, 0xCC, + 0x87, 0xC9, 0x03, 0x73, 0xCC, 0xA6, 0xB5, 0x03, + 0x73, 0xCC, 0xA7, 0xA5, 0x03, 0x74, 0xCC, 0x87, + 0xC9, 0x03, 0x74, 0xCC, 0x88, 0xC9, 0x03, 0x74, + 0xCC, 0x8C, 0xC9, 0x03, 0x74, 0xCC, 0xA3, 0xB5, + 0x03, 0x74, 0xCC, 0xA6, 0xB5, 0x03, 0x74, 0xCC, + // Bytes 34c0 - 34ff + 0xA7, 0xA5, 0x03, 0x74, 0xCC, 0xAD, 0xB5, 0x03, + 0x74, 0xCC, 0xB1, 0xB5, 0x03, 0x75, 0xCC, 0x80, + 0xC9, 0x03, 0x75, 0xCC, 0x81, 0xC9, 0x03, 0x75, + 0xCC, 0x82, 0xC9, 0x03, 0x75, 0xCC, 0x86, 0xC9, + 0x03, 0x75, 0xCC, 0x89, 0xC9, 0x03, 0x75, 0xCC, + 0x8A, 0xC9, 0x03, 0x75, 0xCC, 0x8B, 0xC9, 0x03, + 0x75, 0xCC, 0x8C, 0xC9, 0x03, 0x75, 0xCC, 0x8F, + 0xC9, 0x03, 0x75, 0xCC, 0x91, 0xC9, 0x03, 0x75, + // Bytes 3500 - 353f + 0xCC, 0xA3, 0xB5, 0x03, 0x75, 0xCC, 0xA4, 0xB5, + 0x03, 0x75, 0xCC, 0xA8, 0xA5, 0x03, 0x75, 0xCC, + 0xAD, 0xB5, 0x03, 0x75, 0xCC, 0xB0, 0xB5, 0x03, + 0x76, 0xCC, 0x83, 0xC9, 0x03, 0x76, 0xCC, 0xA3, + 0xB5, 0x03, 0x77, 0xCC, 0x80, 0xC9, 0x03, 0x77, + 0xCC, 0x81, 0xC9, 0x03, 0x77, 0xCC, 0x82, 0xC9, + 0x03, 0x77, 0xCC, 0x87, 0xC9, 0x03, 0x77, 0xCC, + 0x88, 0xC9, 0x03, 0x77, 0xCC, 0x8A, 0xC9, 0x03, + // Bytes 3540 - 357f + 0x77, 0xCC, 0xA3, 0xB5, 0x03, 0x78, 0xCC, 0x87, + 0xC9, 0x03, 0x78, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x80, 0xC9, 0x03, 0x79, 0xCC, 0x81, 0xC9, + 0x03, 0x79, 0xCC, 0x82, 0xC9, 0x03, 0x79, 0xCC, + 0x83, 0xC9, 0x03, 0x79, 0xCC, 0x84, 0xC9, 0x03, + 0x79, 0xCC, 0x87, 0xC9, 0x03, 0x79, 0xCC, 0x88, + 0xC9, 0x03, 0x79, 0xCC, 0x89, 0xC9, 0x03, 0x79, + 0xCC, 0x8A, 0xC9, 0x03, 0x79, 0xCC, 0xA3, 0xB5, + // Bytes 3580 - 35bf + 0x03, 0x7A, 0xCC, 0x81, 0xC9, 0x03, 0x7A, 0xCC, + 0x82, 0xC9, 0x03, 0x7A, 0xCC, 0x87, 0xC9, 0x03, + 0x7A, 0xCC, 0x8C, 0xC9, 0x03, 0x7A, 0xCC, 0xA3, + 0xB5, 0x03, 0x7A, 0xCC, 0xB1, 0xB5, 0x04, 0xC2, + 0xA8, 0xCC, 0x80, 0xCA, 0x04, 0xC2, 0xA8, 0xCC, + 0x81, 0xCA, 0x04, 0xC2, 0xA8, 0xCD, 0x82, 0xCA, + 0x04, 0xC3, 0x86, 0xCC, 0x81, 0xC9, 0x04, 0xC3, + 0x86, 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0x98, 0xCC, + // Bytes 35c0 - 35ff + 0x81, 0xC9, 0x04, 0xC3, 0xA6, 0xCC, 0x81, 0xC9, + 0x04, 0xC3, 0xA6, 0xCC, 0x84, 0xC9, 0x04, 0xC3, + 0xB8, 0xCC, 0x81, 0xC9, 0x04, 0xC5, 0xBF, 0xCC, + 0x87, 0xC9, 0x04, 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, + 0x04, 0xCA, 0x92, 0xCC, 0x8C, 0xC9, 0x04, 0xCE, + 0x91, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x91, 0xCC, + 0x81, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x84, 0xC9, + 0x04, 0xCE, 0x91, 0xCC, 0x86, 0xC9, 0x04, 0xCE, + // Bytes 3600 - 363f + 0x91, 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0x95, 0xCC, + 0x80, 0xC9, 0x04, 0xCE, 0x95, 0xCC, 0x81, 0xC9, + 0x04, 0xCE, 0x97, 0xCC, 0x80, 0xC9, 0x04, 0xCE, + 0x97, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, 0xCD, + 0x85, 0xD9, 0x04, 0xCE, 0x99, 0xCC, 0x80, 0xC9, + 0x04, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x04, 0xCE, + 0x99, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x99, 0xCC, + 0x86, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x88, 0xC9, + // Bytes 3640 - 367f + 0x04, 0xCE, 0x9F, 0xCC, 0x80, 0xC9, 0x04, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA1, 0xCC, + 0x94, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x80, 0xC9, + 0x04, 0xCE, 0xA5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, + 0xA5, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, + 0x86, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x04, 0xCE, 0xA9, 0xCC, 0x80, 0xC9, 0x04, 0xCE, + 0xA9, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA9, 0xCD, + // Bytes 3680 - 36bf + 0x85, 0xD9, 0x04, 0xCE, 0xB1, 0xCC, 0x84, 0xC9, + 0x04, 0xCE, 0xB1, 0xCC, 0x86, 0xC9, 0x04, 0xCE, + 0xB1, 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB5, 0xCC, + 0x80, 0xC9, 0x04, 0xCE, 0xB5, 0xCC, 0x81, 0xC9, + 0x04, 0xCE, 0xB7, 0xCD, 0x85, 0xD9, 0x04, 0xCE, + 0xB9, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xB9, 0xCC, + 0x81, 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x84, 0xC9, + 0x04, 0xCE, 0xB9, 0xCC, 0x86, 0xC9, 0x04, 0xCE, + // Bytes 36c0 - 36ff + 0xB9, 0xCD, 0x82, 0xC9, 0x04, 0xCE, 0xBF, 0xCC, + 0x80, 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x81, 0xC9, + 0x04, 0xCF, 0x81, 0xCC, 0x93, 0xC9, 0x04, 0xCF, + 0x81, 0xCC, 0x94, 0xC9, 0x04, 0xCF, 0x85, 0xCC, + 0x80, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x81, 0xC9, + 0x04, 0xCF, 0x85, 0xCC, 0x84, 0xC9, 0x04, 0xCF, + 0x85, 0xCC, 0x86, 0xC9, 0x04, 0xCF, 0x85, 0xCD, + 0x82, 0xC9, 0x04, 0xCF, 0x89, 0xCD, 0x85, 0xD9, + // Bytes 3700 - 373f + 0x04, 0xCF, 0x92, 0xCC, 0x81, 0xC9, 0x04, 0xCF, + 0x92, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x86, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0x90, 0xCC, 0x86, 0xC9, + 0x04, 0xD0, 0x90, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0x93, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x95, 0xCC, + 0x80, 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x86, 0xC9, + 0x04, 0xD0, 0x95, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0x96, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x96, 0xCC, + // Bytes 3740 - 377f + 0x88, 0xC9, 0x04, 0xD0, 0x97, 0xCC, 0x88, 0xC9, + 0x04, 0xD0, 0x98, 0xCC, 0x80, 0xC9, 0x04, 0xD0, + 0x98, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0x98, 0xCC, + 0x86, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x88, 0xC9, + 0x04, 0xD0, 0x9A, 0xCC, 0x81, 0xC9, 0x04, 0xD0, + 0x9E, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, + 0x84, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x86, 0xC9, + 0x04, 0xD0, 0xA3, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + // Bytes 3780 - 37bf + 0xA3, 0xCC, 0x8B, 0xC9, 0x04, 0xD0, 0xA7, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0xAB, 0xCC, 0x88, 0xC9, + 0x04, 0xD0, 0xAD, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0xB0, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0xB3, 0xCC, 0x81, 0xC9, + 0x04, 0xD0, 0xB5, 0xCC, 0x80, 0xC9, 0x04, 0xD0, + 0xB5, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, + 0x88, 0xC9, 0x04, 0xD0, 0xB6, 0xCC, 0x86, 0xC9, + // Bytes 37c0 - 37ff + 0x04, 0xD0, 0xB6, 0xCC, 0x88, 0xC9, 0x04, 0xD0, + 0xB7, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, + 0x80, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x84, 0xC9, + 0x04, 0xD0, 0xB8, 0xCC, 0x86, 0xC9, 0x04, 0xD0, + 0xB8, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xBA, 0xCC, + 0x81, 0xC9, 0x04, 0xD0, 0xBE, 0xCC, 0x88, 0xC9, + 0x04, 0xD1, 0x83, 0xCC, 0x84, 0xC9, 0x04, 0xD1, + 0x83, 0xCC, 0x86, 0xC9, 0x04, 0xD1, 0x83, 0xCC, + // Bytes 3800 - 383f + 0x88, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x8B, 0xC9, + 0x04, 0xD1, 0x87, 0xCC, 0x88, 0xC9, 0x04, 0xD1, + 0x8B, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8D, 0xCC, + 0x88, 0xC9, 0x04, 0xD1, 0x96, 0xCC, 0x88, 0xC9, + 0x04, 0xD1, 0xB4, 0xCC, 0x8F, 0xC9, 0x04, 0xD1, + 0xB5, 0xCC, 0x8F, 0xC9, 0x04, 0xD3, 0x98, 0xCC, + 0x88, 0xC9, 0x04, 0xD3, 0x99, 0xCC, 0x88, 0xC9, + 0x04, 0xD3, 0xA8, 0xCC, 0x88, 0xC9, 0x04, 0xD3, + // Bytes 3840 - 387f + 0xA9, 0xCC, 0x88, 0xC9, 0x04, 0xD8, 0xA7, 0xD9, + 0x93, 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x04, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, 0x04, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x04, 0xD9, 0x8A, 0xD9, + 0x94, 0xC9, 0x04, 0xDB, 0x81, 0xD9, 0x94, 0xC9, + 0x04, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x04, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x05, 0x41, 0xCC, 0x82, + 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, + // Bytes 3880 - 38bf + 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x83, + 0xCA, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x89, 0xCA, + 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x80, 0xCA, 0x05, + 0x41, 0xCC, 0x86, 0xCC, 0x81, 0xCA, 0x05, 0x41, + 0xCC, 0x86, 0xCC, 0x83, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, 0x87, + 0xCC, 0x84, 0xCA, 0x05, 0x41, 0xCC, 0x88, 0xCC, + 0x84, 0xCA, 0x05, 0x41, 0xCC, 0x8A, 0xCC, 0x81, + // Bytes 38c0 - 38ff + 0xCA, 0x05, 0x41, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x41, 0xCC, 0xA3, 0xCC, 0x86, 0xCA, 0x05, + 0x43, 0xCC, 0xA7, 0xCC, 0x81, 0xCA, 0x05, 0x45, + 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, 0x45, 0xCC, + 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, + 0xCC, 0x83, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x89, 0xCA, 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x45, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + // Bytes 3900 - 393f + 0x05, 0x45, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x45, 0xCC, 0xA7, 0xCC, 0x86, 0xCA, 0x05, 0x49, + 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x4C, 0xCC, + 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x82, + 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, + 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x83, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x89, 0xCA, + 0x05, 0x4F, 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3940 - 397f + 0x4F, 0xCC, 0x83, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x83, 0xCC, 0x88, 0xCA, 0x05, 0x4F, 0xCC, + 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, 0x84, + 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x87, 0xCC, + 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x88, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + // Bytes 3980 - 39bf + 0xCC, 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0xA3, 0xB6, 0x05, 0x4F, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x4F, 0xCC, 0xA8, 0xCC, + 0x84, 0xCA, 0x05, 0x52, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x53, 0xCC, 0x81, 0xCC, 0x87, 0xCA, + 0x05, 0x53, 0xCC, 0x8C, 0xCC, 0x87, 0xCA, 0x05, + 0x53, 0xCC, 0xA3, 0xCC, 0x87, 0xCA, 0x05, 0x55, + 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, + // Bytes 39c0 - 39ff + 0x84, 0xCC, 0x88, 0xCA, 0x05, 0x55, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x84, + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x8C, 0xCA, + 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, + 0x55, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x55, + 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + // Bytes 3a00 - 3a3f + 0xCC, 0xA3, 0xB6, 0x05, 0x61, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x61, 0xCC, 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x61, + 0xCC, 0x86, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0x86, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x87, 0xCC, + // Bytes 3a40 - 3a7f + 0x84, 0xCA, 0x05, 0x61, 0xCC, 0x88, 0xCC, 0x84, + 0xCA, 0x05, 0x61, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, + 0x05, 0x61, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x61, 0xCC, 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x63, + 0xCC, 0xA7, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, + 0x82, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, 0x82, + 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, + 0x83, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x89, + // Bytes 3a80 - 3abf + 0xCA, 0x05, 0x65, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x65, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x65, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x65, + 0xCC, 0xA7, 0xCC, 0x86, 0xCA, 0x05, 0x69, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x6C, 0xCC, 0xA3, + 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + // Bytes 3ac0 - 3aff + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x6F, 0xCC, 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x83, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x83, 0xCC, 0x88, 0xCA, 0x05, 0x6F, 0xCC, 0x84, + 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, + 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x87, 0xCC, 0x84, + 0xCA, 0x05, 0x6F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, + // Bytes 3b00 - 3b3f + 0x6F, 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + 0xCC, 0xA3, 0xB6, 0x05, 0x6F, 0xCC, 0xA3, 0xCC, + 0x82, 0xCA, 0x05, 0x6F, 0xCC, 0xA8, 0xCC, 0x84, + 0xCA, 0x05, 0x72, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x73, 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, + 0x73, 0xCC, 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x73, + // Bytes 3b40 - 3b7f + 0xCC, 0xA3, 0xCC, 0x87, 0xCA, 0x05, 0x75, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x84, + 0xCC, 0x88, 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, + 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x84, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, + 0x75, 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x75, + 0xCC, 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x75, 0xCC, + // Bytes 3b80 - 3bbf + 0x9B, 0xCC, 0x83, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x89, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0xA3, 0xB6, 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x80, + 0xCA, 0x05, 0xE1, 0xBE, 0xBF, 0xCC, 0x81, 0xCA, + 0x05, 0xE1, 0xBE, 0xBF, 0xCD, 0x82, 0xCA, 0x05, + 0xE1, 0xBF, 0xBE, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBF, 0xBE, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBF, + 0xBE, 0xCD, 0x82, 0xCA, 0x05, 0xE2, 0x86, 0x90, + // Bytes 3bc0 - 3bff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x86, 0x92, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x86, 0x94, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x87, 0x90, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x87, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x87, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, + 0x88, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x8B, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0xA3, 0xCC, + // Bytes 3c00 - 3c3f + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0xA5, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0xBC, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0x83, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0x85, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0x88, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x8D, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA1, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA4, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xA5, 0xCC, 0xB8, + // Bytes 3c40 - 3c7f + 0x05, 0x05, 0xE2, 0x89, 0xB2, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xB3, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xB6, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x89, 0xB7, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xBA, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBB, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBC, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBD, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x82, 0xCC, 0xB8, 0x05, + // Bytes 3c80 - 3cbf + 0x05, 0xE2, 0x8A, 0x83, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0x86, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x87, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0x91, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x92, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xA2, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xA9, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xAB, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3cc0 - 3cff + 0xE2, 0x8A, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xB4, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB5, + 0xCC, 0xB8, 0x05, 0x06, 0xCE, 0x91, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x91, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x95, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x94, + // Bytes 3d00 - 3d3f + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x95, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x97, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0x99, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x93, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94, + // Bytes 3d40 - 3d7f + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x99, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0x9F, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xA5, 0xCC, 0x94, + // Bytes 3d80 - 3dbf + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xA9, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xA9, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x80, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x81, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB1, 0xCD, 0x82, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB5, 0xCC, 0x93, + // Bytes 3dc0 - 3dff + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB5, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB7, 0xCC, 0x80, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x81, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB7, 0xCD, 0x82, + // Bytes 3e00 - 3e3f + 0xCD, 0x85, 0xDA, 0x06, 0xCE, 0xB9, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x88, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x88, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x93, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94, + // Bytes 3e40 - 3e7f + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xB9, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCE, 0xBF, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x88, + // Bytes 3e80 - 3ebf + 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x93, + 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94, + 0xCC, 0x80, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94, + 0xCC, 0x81, 0xCA, 0x06, 0xCF, 0x85, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x06, 0xCF, 0x89, 0xCC, 0x80, + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x81, + // Bytes 3ec0 - 3eff + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCC, 0x94, + 0xCD, 0x85, 0xDA, 0x06, 0xCF, 0x89, 0xCD, 0x82, + 0xCD, 0x85, 0xDA, 0x06, 0xE0, 0xA4, 0xA8, 0xE0, + 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xA4, 0xB0, 0xE0, + 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xA4, 0xB3, 0xE0, + 0xA4, 0xBC, 0x09, 0x06, 0xE0, 0xB1, 0x86, 0xE0, + 0xB1, 0x96, 0x85, 0x06, 0xE0, 0xB7, 0x99, 0xE0, + // Bytes 3f00 - 3f3f + 0xB7, 0x8A, 0x11, 0x06, 0xE3, 0x81, 0x86, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8B, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8D, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x8F, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x91, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x93, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x95, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x97, 0xE3, + // Bytes 3f40 - 3f7f + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x99, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9B, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9D, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0x9F, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA1, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA4, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA6, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xA8, 0xE3, + // Bytes 3f80 - 3fbf + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xAF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xAF, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB2, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB2, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB5, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB5, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xB8, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xB8, 0xE3, + // Bytes 3fc0 - 3fff + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x81, 0xBB, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x81, 0xBB, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x82, 0x9D, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xA6, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAD, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xAF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB1, 0xE3, + // Bytes 4000 - 403f + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB3, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB5, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB7, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBB, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBD, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x82, 0xBF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x81, 0xE3, + // Bytes 4040 - 407f + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x84, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x86, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x8F, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x92, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x95, 0xE3, + // Bytes 4080 - 40bf + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0x9B, 0xE3, + 0x82, 0x9A, 0x0D, 0x06, 0xE3, 0x83, 0xAF, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB0, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB1, 0xE3, + // Bytes 40c0 - 40ff + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xB2, 0xE3, + 0x82, 0x99, 0x0D, 0x06, 0xE3, 0x83, 0xBD, 0xE3, + 0x82, 0x99, 0x0D, 0x08, 0xCE, 0x91, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, + // Bytes 4100 - 413f + 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + // Bytes 4140 - 417f + 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, + // Bytes 4180 - 41bf + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 41c0 - 41ff + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, + 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, + 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, + 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCD, 0x85, + 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, + 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, + 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, + 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, + // Bytes 4200 - 423f + 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCD, 0x85, + 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, + 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x93, + 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, + 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, + 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCD, 0x85, + 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, + 0xCD, 0x85, 0xDB, 0x08, 0xF0, 0x91, 0x82, 0x99, + // Bytes 4240 - 427f + 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, + 0x82, 0x9B, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x08, + 0xF0, 0x91, 0x82, 0xA5, 0xF0, 0x91, 0x82, 0xBA, + 0x09, 0x42, 0xC2, 0xB4, 0x01, 0x43, 0x20, 0xCC, + 0x81, 0xC9, 0x43, 0x20, 0xCC, 0x83, 0xC9, 0x43, + 0x20, 0xCC, 0x84, 0xC9, 0x43, 0x20, 0xCC, 0x85, + 0xC9, 0x43, 0x20, 0xCC, 0x86, 0xC9, 0x43, 0x20, + 0xCC, 0x87, 0xC9, 0x43, 0x20, 0xCC, 0x88, 0xC9, + // Bytes 4280 - 42bf + 0x43, 0x20, 0xCC, 0x8A, 0xC9, 0x43, 0x20, 0xCC, + 0x8B, 0xC9, 0x43, 0x20, 0xCC, 0x93, 0xC9, 0x43, + 0x20, 0xCC, 0x94, 0xC9, 0x43, 0x20, 0xCC, 0xA7, + 0xA5, 0x43, 0x20, 0xCC, 0xA8, 0xA5, 0x43, 0x20, + 0xCC, 0xB3, 0xB5, 0x43, 0x20, 0xCD, 0x82, 0xC9, + 0x43, 0x20, 0xCD, 0x85, 0xD9, 0x43, 0x20, 0xD9, + 0x8B, 0x59, 0x43, 0x20, 0xD9, 0x8C, 0x5D, 0x43, + 0x20, 0xD9, 0x8D, 0x61, 0x43, 0x20, 0xD9, 0x8E, + // Bytes 42c0 - 42ff + 0x65, 0x43, 0x20, 0xD9, 0x8F, 0x69, 0x43, 0x20, + 0xD9, 0x90, 0x6D, 0x43, 0x20, 0xD9, 0x91, 0x71, + 0x43, 0x20, 0xD9, 0x92, 0x75, 0x43, 0x41, 0xCC, + 0x8A, 0xC9, 0x43, 0x73, 0xCC, 0x87, 0xC9, 0x44, + 0x20, 0xE3, 0x82, 0x99, 0x0D, 0x44, 0x20, 0xE3, + 0x82, 0x9A, 0x0D, 0x44, 0xC2, 0xA8, 0xCC, 0x81, + 0xCA, 0x44, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x44, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x97, + // Bytes 4300 - 433f + 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x99, 0xCC, 0x81, + 0xC9, 0x44, 0xCE, 0x9F, 0xCC, 0x81, 0xC9, 0x44, + 0xCE, 0xA5, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, + 0xCC, 0x88, 0xC9, 0x44, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x44, 0xCE, 0xB1, 0xCC, 0x81, 0xC9, 0x44, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB7, + 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB9, 0xCC, 0x81, + 0xC9, 0x44, 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x44, + // Bytes 4340 - 437f + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x89, + 0xCC, 0x81, 0xC9, 0x44, 0xD7, 0x90, 0xD6, 0xB7, + 0x31, 0x44, 0xD7, 0x90, 0xD6, 0xB8, 0x35, 0x44, + 0xD7, 0x90, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x91, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBF, + 0x49, 0x44, 0xD7, 0x92, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x93, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x94, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x95, 0xD6, 0xB9, + // Bytes 4380 - 43bf + 0x39, 0x44, 0xD7, 0x95, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x96, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x98, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x99, 0xD6, 0xB4, + 0x25, 0x44, 0xD7, 0x99, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x9A, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9B, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBF, + 0x49, 0x44, 0xD7, 0x9C, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0x9E, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA0, + // Bytes 43c0 - 43ff + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA1, 0xD6, 0xBC, + 0x41, 0x44, 0xD7, 0xA3, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0xA4, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, + 0xD6, 0xBF, 0x49, 0x44, 0xD7, 0xA6, 0xD6, 0xBC, + 0x41, 0x44, 0xD7, 0xA7, 0xD6, 0xBC, 0x41, 0x44, + 0xD7, 0xA8, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA9, + 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD7, 0x81, + 0x4D, 0x44, 0xD7, 0xA9, 0xD7, 0x82, 0x51, 0x44, + // Bytes 4400 - 443f + 0xD7, 0xAA, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xB2, + 0xD6, 0xB7, 0x31, 0x44, 0xD8, 0xA7, 0xD9, 0x8B, + 0x59, 0x44, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x44, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x44, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x44, 0xD8, 0xB0, 0xD9, 0xB0, + 0x79, 0x44, 0xD8, 0xB1, 0xD9, 0xB0, 0x79, 0x44, + 0xD9, 0x80, 0xD9, 0x8B, 0x59, 0x44, 0xD9, 0x80, + 0xD9, 0x8E, 0x65, 0x44, 0xD9, 0x80, 0xD9, 0x8F, + // Bytes 4440 - 447f + 0x69, 0x44, 0xD9, 0x80, 0xD9, 0x90, 0x6D, 0x44, + 0xD9, 0x80, 0xD9, 0x91, 0x71, 0x44, 0xD9, 0x80, + 0xD9, 0x92, 0x75, 0x44, 0xD9, 0x87, 0xD9, 0xB0, + 0x79, 0x44, 0xD9, 0x88, 0xD9, 0x94, 0xC9, 0x44, + 0xD9, 0x89, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x8A, + 0xD9, 0x94, 0xC9, 0x44, 0xDB, 0x92, 0xD9, 0x94, + 0xC9, 0x44, 0xDB, 0x95, 0xD9, 0x94, 0xC9, 0x45, + 0x20, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x45, 0x20, + // Bytes 4480 - 44bf + 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, + 0x88, 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xCC, 0x93, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xD9, 0x8C, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, + // Bytes 44c0 - 44ff + 0x8D, 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8E, + 0xD9, 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8F, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x90, 0xD9, 0x91, + 0x72, 0x45, 0x20, 0xD9, 0x91, 0xD9, 0xB0, 0x7A, + 0x45, 0xE2, 0xAB, 0x9D, 0xCC, 0xB8, 0x05, 0x46, + 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x46, + 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x46, + 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, 0x81, 0x4E, 0x46, + // Bytes 4500 - 453f + 0xD7, 0xA9, 0xD6, 0xBC, 0xD7, 0x82, 0x52, 0x46, + 0xD9, 0x80, 0xD9, 0x8E, 0xD9, 0x91, 0x72, 0x46, + 0xD9, 0x80, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x46, + 0xD9, 0x80, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x46, + 0xE0, 0xA4, 0x95, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0x96, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0x97, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0x9C, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xA1, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0xA2, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0xAB, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA4, 0xAF, 0xE0, 0xA4, 0xBC, 0x09, 0x46, + 0xE0, 0xA6, 0xA1, 0xE0, 0xA6, 0xBC, 0x09, 0x46, + 0xE0, 0xA6, 0xA2, 0xE0, 0xA6, 0xBC, 0x09, 0x46, + 0xE0, 0xA6, 0xAF, 0xE0, 0xA6, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0x96, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0x97, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0x9C, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0xAB, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0xB2, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xA8, 0xB8, 0xE0, 0xA8, 0xBC, 0x09, 0x46, + 0xE0, 0xAC, 0xA1, 0xE0, 0xAC, 0xBC, 0x09, 0x46, + 0xE0, 0xAC, 0xA2, 0xE0, 0xAC, 0xBC, 0x09, 0x46, + 0xE0, 0xBE, 0xB2, 0xE0, 0xBE, 0x80, 0x9D, 0x46, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0xB3, 0xE0, 0xBE, 0x80, 0x9D, 0x46, + 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, 0x48, + 0xF0, 0x9D, 0x85, 0x97, 0xF0, 0x9D, 0x85, 0xA5, + 0xAD, 0x48, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xB9, + 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, 0x9D, + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x49, + 0xE0, 0xBE, 0xB2, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + // Bytes 4600 - 463f + 0x80, 0x9E, 0x49, 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, + 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB0, 0xAE, 0x4C, + 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, + // Bytes 4640 - 467f + 0xF0, 0x9D, 0x85, 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, + 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xB2, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, + 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, + 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, + 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, + 0xF0, 0x9D, 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, + 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, + // Bytes 4680 - 46bf + 0x86, 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, + 0x85, 0xAF, 0xAE, 0x83, 0x41, 0xCC, 0x82, 0xC9, + 0x83, 0x41, 0xCC, 0x86, 0xC9, 0x83, 0x41, 0xCC, + 0x87, 0xC9, 0x83, 0x41, 0xCC, 0x88, 0xC9, 0x83, + 0x41, 0xCC, 0x8A, 0xC9, 0x83, 0x41, 0xCC, 0xA3, + 0xB5, 0x83, 0x43, 0xCC, 0xA7, 0xA5, 0x83, 0x45, + 0xCC, 0x82, 0xC9, 0x83, 0x45, 0xCC, 0x84, 0xC9, + 0x83, 0x45, 0xCC, 0xA3, 0xB5, 0x83, 0x45, 0xCC, + // Bytes 46c0 - 46ff + 0xA7, 0xA5, 0x83, 0x49, 0xCC, 0x88, 0xC9, 0x83, + 0x4C, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0x82, + 0xC9, 0x83, 0x4F, 0xCC, 0x83, 0xC9, 0x83, 0x4F, + 0xCC, 0x84, 0xC9, 0x83, 0x4F, 0xCC, 0x87, 0xC9, + 0x83, 0x4F, 0xCC, 0x88, 0xC9, 0x83, 0x4F, 0xCC, + 0x9B, 0xAD, 0x83, 0x4F, 0xCC, 0xA3, 0xB5, 0x83, + 0x4F, 0xCC, 0xA8, 0xA5, 0x83, 0x52, 0xCC, 0xA3, + 0xB5, 0x83, 0x53, 0xCC, 0x81, 0xC9, 0x83, 0x53, + // Bytes 4700 - 473f + 0xCC, 0x8C, 0xC9, 0x83, 0x53, 0xCC, 0xA3, 0xB5, + 0x83, 0x55, 0xCC, 0x83, 0xC9, 0x83, 0x55, 0xCC, + 0x84, 0xC9, 0x83, 0x55, 0xCC, 0x88, 0xC9, 0x83, + 0x55, 0xCC, 0x9B, 0xAD, 0x83, 0x61, 0xCC, 0x82, + 0xC9, 0x83, 0x61, 0xCC, 0x86, 0xC9, 0x83, 0x61, + 0xCC, 0x87, 0xC9, 0x83, 0x61, 0xCC, 0x88, 0xC9, + 0x83, 0x61, 0xCC, 0x8A, 0xC9, 0x83, 0x61, 0xCC, + 0xA3, 0xB5, 0x83, 0x63, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 4740 - 477f + 0x65, 0xCC, 0x82, 0xC9, 0x83, 0x65, 0xCC, 0x84, + 0xC9, 0x83, 0x65, 0xCC, 0xA3, 0xB5, 0x83, 0x65, + 0xCC, 0xA7, 0xA5, 0x83, 0x69, 0xCC, 0x88, 0xC9, + 0x83, 0x6C, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + 0x82, 0xC9, 0x83, 0x6F, 0xCC, 0x83, 0xC9, 0x83, + 0x6F, 0xCC, 0x84, 0xC9, 0x83, 0x6F, 0xCC, 0x87, + 0xC9, 0x83, 0x6F, 0xCC, 0x88, 0xC9, 0x83, 0x6F, + 0xCC, 0x9B, 0xAD, 0x83, 0x6F, 0xCC, 0xA3, 0xB5, + // Bytes 4780 - 47bf + 0x83, 0x6F, 0xCC, 0xA8, 0xA5, 0x83, 0x72, 0xCC, + 0xA3, 0xB5, 0x83, 0x73, 0xCC, 0x81, 0xC9, 0x83, + 0x73, 0xCC, 0x8C, 0xC9, 0x83, 0x73, 0xCC, 0xA3, + 0xB5, 0x83, 0x75, 0xCC, 0x83, 0xC9, 0x83, 0x75, + 0xCC, 0x84, 0xC9, 0x83, 0x75, 0xCC, 0x88, 0xC9, + 0x83, 0x75, 0xCC, 0x9B, 0xAD, 0x84, 0xCE, 0x91, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x91, 0xCC, 0x94, + 0xC9, 0x84, 0xCE, 0x95, 0xCC, 0x93, 0xC9, 0x84, + // Bytes 47c0 - 47ff + 0xCE, 0x95, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x97, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x94, + 0xC9, 0x84, 0xCE, 0x99, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0x99, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0x9F, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x94, + 0xC9, 0x84, 0xCE, 0xA5, 0xCC, 0x94, 0xC9, 0x84, + 0xCE, 0xA9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xA9, + 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x80, + // Bytes 4800 - 483f + 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x81, 0xC9, 0x84, + 0xCE, 0xB1, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB1, + 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB1, 0xCD, 0x82, + 0xC9, 0x84, 0xCE, 0xB5, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0xB5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB7, + 0xCC, 0x80, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x81, + 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0xB7, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xB7, + // Bytes 4840 - 487f + 0xCD, 0x82, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x88, + 0xC9, 0x84, 0xCE, 0xB9, 0xCC, 0x93, 0xC9, 0x84, + 0xCE, 0xB9, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xBF, + 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x94, + 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x88, 0xC9, 0x84, + 0xCF, 0x85, 0xCC, 0x93, 0xC9, 0x84, 0xCF, 0x85, + 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x80, + 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x81, 0xC9, 0x84, + // Bytes 4880 - 48bf + 0xCF, 0x89, 0xCC, 0x93, 0xC9, 0x84, 0xCF, 0x89, + 0xCC, 0x94, 0xC9, 0x84, 0xCF, 0x89, 0xCD, 0x82, + 0xC9, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 48c0 - 48ff + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x81, + // Bytes 4900 - 493f + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x80, + // Bytes 4940 - 497f + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x82, + // Bytes 4980 - 49bf + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x80, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCC, 0x81, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x80, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCC, 0x81, + 0xCA, 0x86, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x82, + 0xCA, 0x42, 0xCC, 0x80, 0xC9, 0x32, 0x42, 0xCC, + 0x81, 0xC9, 0x32, 0x42, 0xCC, 0x93, 0xC9, 0x32, + // Bytes 49c0 - 49ff + 0x43, 0xE1, 0x85, 0xA1, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA2, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA3, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA4, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA5, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xA6, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA7, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA8, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xA9, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAA, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAB, + // Bytes 4a00 - 4a3f + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAC, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xAD, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xAE, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAF, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB0, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB1, 0x01, 0x00, 0x43, 0xE1, + 0x85, 0xB2, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB3, + 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB4, 0x01, 0x00, + 0x43, 0xE1, 0x85, 0xB5, 0x01, 0x00, 0x43, 0xE1, + // Bytes 4a40 - 4a7f + 0x86, 0xAA, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAC, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAD, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB0, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB2, + 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB3, 0x01, 0x00, + 0x43, 0xE1, 0x86, 0xB4, 0x01, 0x00, 0x43, 0xE1, + 0x86, 0xB5, 0x01, 0x00, 0x44, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x32, 0x43, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4a80 - 4abf + 0x03, 0x43, 0xE3, 0x82, 0x9A, 0x0D, 0x03, 0x46, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB2, 0x9E, 0x26, + 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, + 0x26, 0x46, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, + 0x9E, 0x26, 0x00, 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10610 bytes (10.36 KiB). Checksum: 95e8869a9f81e5e6. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f72, 0xc1: 0x2f77, 0xc2: 0x468b, 0xc3: 0x2f7c, 0xc4: 0x469a, 0xc5: 0x469f, + 0xc6: 0xa000, 0xc7: 0x46a9, 0xc8: 0x2fe5, 0xc9: 0x2fea, 0xca: 0x46ae, 0xcb: 0x2ffe, + 0xcc: 0x3071, 0xcd: 0x3076, 0xce: 0x307b, 0xcf: 0x46c2, 0xd1: 0x3107, + 0xd2: 0x312a, 0xd3: 0x312f, 0xd4: 0x46cc, 0xd5: 0x46d1, 0xd6: 0x46e0, + 0xd8: 0xa000, 0xd9: 0x31b6, 0xda: 0x31bb, 0xdb: 0x31c0, 0xdc: 0x4712, 0xdd: 0x3238, + 0xe0: 0x327e, 0xe1: 0x3283, 0xe2: 0x471c, 0xe3: 0x3288, + 0xe4: 0x472b, 0xe5: 0x4730, 0xe6: 0xa000, 0xe7: 0x473a, 0xe8: 0x32f1, 0xe9: 0x32f6, + 0xea: 0x473f, 0xeb: 0x330a, 0xec: 0x3382, 0xed: 0x3387, 0xee: 0x338c, 0xef: 0x4753, + 0xf1: 0x3418, 0xf2: 0x343b, 0xf3: 0x3440, 0xf4: 0x475d, 0xf5: 0x4762, + 0xf6: 0x4771, 0xf8: 0xa000, 0xf9: 0x34cc, 0xfa: 0x34d1, 0xfb: 0x34d6, + 0xfc: 0x47a3, 0xfd: 0x3553, 0xff: 0x356c, + // Block 0x4, offset 0x100 + 0x100: 0x2f81, 0x101: 0x328d, 0x102: 0x4690, 0x103: 0x4721, 0x104: 0x2f9f, 0x105: 0x32ab, + 0x106: 0x2fb3, 0x107: 0x32bf, 0x108: 0x2fb8, 0x109: 0x32c4, 0x10a: 0x2fbd, 0x10b: 0x32c9, + 0x10c: 0x2fc2, 0x10d: 0x32ce, 0x10e: 0x2fcc, 0x10f: 0x32d8, + 0x112: 0x46b3, 0x113: 0x4744, 0x114: 0x2ff4, 0x115: 0x3300, 0x116: 0x2ff9, 0x117: 0x3305, + 0x118: 0x3017, 0x119: 0x3323, 0x11a: 0x3008, 0x11b: 0x3314, 0x11c: 0x3030, 0x11d: 0x333c, + 0x11e: 0x303a, 0x11f: 0x3346, 0x120: 0x303f, 0x121: 0x334b, 0x122: 0x3049, 0x123: 0x3355, + 0x124: 0x304e, 0x125: 0x335a, 0x128: 0x3080, 0x129: 0x3391, + 0x12a: 0x3085, 0x12b: 0x3396, 0x12c: 0x308a, 0x12d: 0x339b, 0x12e: 0x30ad, 0x12f: 0x33b9, + 0x130: 0x308f, 0x134: 0x30b7, 0x135: 0x33c3, + 0x136: 0x30cb, 0x137: 0x33dc, 0x139: 0x30d5, 0x13a: 0x33e6, 0x13b: 0x30df, + 0x13c: 0x33f0, 0x13d: 0x30da, 0x13e: 0x33eb, + // Block 0x5, offset 0x140 + 0x143: 0x3102, 0x144: 0x3413, 0x145: 0x311b, + 0x146: 0x342c, 0x147: 0x3111, 0x148: 0x3422, + 0x14c: 0x46d6, 0x14d: 0x4767, 0x14e: 0x3134, 0x14f: 0x3445, 0x150: 0x313e, 0x151: 0x344f, + 0x154: 0x315c, 0x155: 0x346d, 0x156: 0x3175, 0x157: 0x3486, + 0x158: 0x3166, 0x159: 0x3477, 0x15a: 0x46f9, 0x15b: 0x478a, 0x15c: 0x317f, 0x15d: 0x3490, + 0x15e: 0x318e, 0x15f: 0x349f, 0x160: 0x46fe, 0x161: 0x478f, 0x162: 0x31a7, 0x163: 0x34bd, + 0x164: 0x3198, 0x165: 0x34ae, 0x168: 0x4708, 0x169: 0x4799, + 0x16a: 0x470d, 0x16b: 0x479e, 0x16c: 0x31c5, 0x16d: 0x34db, 0x16e: 0x31cf, 0x16f: 0x34e5, + 0x170: 0x31d4, 0x171: 0x34ea, 0x172: 0x31f2, 0x173: 0x3508, 0x174: 0x3215, 0x175: 0x352b, + 0x176: 0x323d, 0x177: 0x3558, 0x178: 0x3251, 0x179: 0x3260, 0x17a: 0x3580, 0x17b: 0x326a, + 0x17c: 0x358a, 0x17d: 0x326f, 0x17e: 0x358f, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f8b, 0x18e: 0x3297, 0x18f: 0x3099, 0x190: 0x33a5, 0x191: 0x3143, + 0x192: 0x3454, 0x193: 0x31d9, 0x194: 0x34ef, 0x195: 0x39d2, 0x196: 0x3b61, 0x197: 0x39cb, + 0x198: 0x3b5a, 0x199: 0x39d9, 0x19a: 0x3b68, 0x19b: 0x39c4, 0x19c: 0x3b53, + 0x19e: 0x38b3, 0x19f: 0x3a42, 0x1a0: 0x38ac, 0x1a1: 0x3a3b, 0x1a2: 0x35b6, 0x1a3: 0x35c8, + 0x1a6: 0x3044, 0x1a7: 0x3350, 0x1a8: 0x30c1, 0x1a9: 0x33d2, + 0x1aa: 0x46ef, 0x1ab: 0x4780, 0x1ac: 0x3993, 0x1ad: 0x3b22, 0x1ae: 0x35da, 0x1af: 0x35e0, + 0x1b0: 0x33c8, 0x1b4: 0x302b, 0x1b5: 0x3337, + 0x1b8: 0x30fd, 0x1b9: 0x340e, 0x1ba: 0x38ba, 0x1bb: 0x3a49, + 0x1bc: 0x35b0, 0x1bd: 0x35c2, 0x1be: 0x35bc, 0x1bf: 0x35ce, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f90, 0x1c1: 0x329c, 0x1c2: 0x2f95, 0x1c3: 0x32a1, 0x1c4: 0x300d, 0x1c5: 0x3319, + 0x1c6: 0x3012, 0x1c7: 0x331e, 0x1c8: 0x309e, 0x1c9: 0x33aa, 0x1ca: 0x30a3, 0x1cb: 0x33af, + 0x1cc: 0x3148, 0x1cd: 0x3459, 0x1ce: 0x314d, 0x1cf: 0x345e, 0x1d0: 0x316b, 0x1d1: 0x347c, + 0x1d2: 0x3170, 0x1d3: 0x3481, 0x1d4: 0x31de, 0x1d5: 0x34f4, 0x1d6: 0x31e3, 0x1d7: 0x34f9, + 0x1d8: 0x3189, 0x1d9: 0x349a, 0x1da: 0x31a2, 0x1db: 0x34b8, + 0x1de: 0x305d, 0x1df: 0x3369, + 0x1e6: 0x4695, 0x1e7: 0x4726, 0x1e8: 0x46bd, 0x1e9: 0x474e, + 0x1ea: 0x3962, 0x1eb: 0x3af1, 0x1ec: 0x393f, 0x1ed: 0x3ace, 0x1ee: 0x46db, 0x1ef: 0x476c, + 0x1f0: 0x395b, 0x1f1: 0x3aea, 0x1f2: 0x3247, 0x1f3: 0x3562, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49b1, 0x241: 0x49b6, 0x242: 0x9932, 0x243: 0x49bb, 0x244: 0x4a74, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a4, + 0x286: 0x35ec, 0x287: 0x00ce, 0x288: 0x360a, 0x289: 0x3616, 0x28a: 0x3628, + 0x28c: 0x3646, 0x28e: 0x3658, 0x28f: 0x3676, 0x290: 0x3e0b, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x363a, 0x2ab: 0x366a, 0x2ac: 0x4801, 0x2ad: 0x369a, 0x2ae: 0x482b, 0x2af: 0x36ac, + 0x2b0: 0x3e73, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3724, 0x2c1: 0x3730, 0x2c3: 0x371e, + 0x2c6: 0xa000, 0x2c7: 0x370c, + 0x2cc: 0x3760, 0x2cd: 0x3748, 0x2ce: 0x3772, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3754, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d8, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3736, 0x302: 0x37ba, + 0x310: 0x3712, 0x311: 0x3796, + 0x312: 0x3718, 0x313: 0x379c, 0x316: 0x372a, 0x317: 0x37ae, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x382c, 0x31b: 0x3832, 0x31c: 0x373c, 0x31d: 0x37c0, + 0x31e: 0x3742, 0x31f: 0x37c6, 0x322: 0x374e, 0x323: 0x37d2, + 0x324: 0x375a, 0x325: 0x37de, 0x326: 0x3766, 0x327: 0x37ea, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3838, 0x32b: 0x383e, 0x32c: 0x3790, 0x32d: 0x3814, 0x32e: 0x376c, 0x32f: 0x37f0, + 0x330: 0x3778, 0x331: 0x37fc, 0x332: 0x377e, 0x333: 0x3802, 0x334: 0x3784, 0x335: 0x3808, + 0x338: 0x378a, 0x339: 0x380e, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d, + 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d, + 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d, + 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132, + 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132, + 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d29, 0x407: 0xa000, 0x408: 0x2d31, 0x409: 0xa000, 0x40a: 0x2d39, 0x40b: 0xa000, + 0x40c: 0x2d41, 0x40d: 0xa000, 0x40e: 0x2d49, 0x411: 0xa000, + 0x412: 0x2d51, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d59, + 0x43c: 0xa000, 0x43d: 0x2d61, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132, + 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132, + 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132, + 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132, + 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132, + 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132, + 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132, + 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132, + 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132, + 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132, + 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d, + // Block 0x12, offset 0x480 + 0x480: 0x2f9a, 0x481: 0x32a6, 0x482: 0x2fa4, 0x483: 0x32b0, 0x484: 0x2fa9, 0x485: 0x32b5, + 0x486: 0x2fae, 0x487: 0x32ba, 0x488: 0x38cf, 0x489: 0x3a5e, 0x48a: 0x2fc7, 0x48b: 0x32d3, + 0x48c: 0x2fd1, 0x48d: 0x32dd, 0x48e: 0x2fe0, 0x48f: 0x32ec, 0x490: 0x2fd6, 0x491: 0x32e2, + 0x492: 0x2fdb, 0x493: 0x32e7, 0x494: 0x38f2, 0x495: 0x3a81, 0x496: 0x38f9, 0x497: 0x3a88, + 0x498: 0x301c, 0x499: 0x3328, 0x49a: 0x3021, 0x49b: 0x332d, 0x49c: 0x3907, 0x49d: 0x3a96, + 0x49e: 0x3026, 0x49f: 0x3332, 0x4a0: 0x3035, 0x4a1: 0x3341, 0x4a2: 0x3053, 0x4a3: 0x335f, + 0x4a4: 0x3062, 0x4a5: 0x336e, 0x4a6: 0x3058, 0x4a7: 0x3364, 0x4a8: 0x3067, 0x4a9: 0x3373, + 0x4aa: 0x306c, 0x4ab: 0x3378, 0x4ac: 0x30b2, 0x4ad: 0x33be, 0x4ae: 0x390e, 0x4af: 0x3a9d, + 0x4b0: 0x30bc, 0x4b1: 0x33cd, 0x4b2: 0x30c6, 0x4b3: 0x33d7, 0x4b4: 0x30d0, 0x4b5: 0x33e1, + 0x4b6: 0x46c7, 0x4b7: 0x4758, 0x4b8: 0x3915, 0x4b9: 0x3aa4, 0x4ba: 0x30e9, 0x4bb: 0x33fa, + 0x4bc: 0x30e4, 0x4bd: 0x33f5, 0x4be: 0x30ee, 0x4bf: 0x33ff, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x30f3, 0x4c1: 0x3404, 0x4c2: 0x30f8, 0x4c3: 0x3409, 0x4c4: 0x310c, 0x4c5: 0x341d, + 0x4c6: 0x3116, 0x4c7: 0x3427, 0x4c8: 0x3125, 0x4c9: 0x3436, 0x4ca: 0x3120, 0x4cb: 0x3431, + 0x4cc: 0x3938, 0x4cd: 0x3ac7, 0x4ce: 0x3946, 0x4cf: 0x3ad5, 0x4d0: 0x394d, 0x4d1: 0x3adc, + 0x4d2: 0x3954, 0x4d3: 0x3ae3, 0x4d4: 0x3152, 0x4d5: 0x3463, 0x4d6: 0x3157, 0x4d7: 0x3468, + 0x4d8: 0x3161, 0x4d9: 0x3472, 0x4da: 0x46f4, 0x4db: 0x4785, 0x4dc: 0x399a, 0x4dd: 0x3b29, + 0x4de: 0x317a, 0x4df: 0x348b, 0x4e0: 0x3184, 0x4e1: 0x3495, 0x4e2: 0x4703, 0x4e3: 0x4794, + 0x4e4: 0x39a1, 0x4e5: 0x3b30, 0x4e6: 0x39a8, 0x4e7: 0x3b37, 0x4e8: 0x39af, 0x4e9: 0x3b3e, + 0x4ea: 0x3193, 0x4eb: 0x34a4, 0x4ec: 0x319d, 0x4ed: 0x34b3, 0x4ee: 0x31b1, 0x4ef: 0x34c7, + 0x4f0: 0x31ac, 0x4f1: 0x34c2, 0x4f2: 0x31ed, 0x4f3: 0x3503, 0x4f4: 0x31fc, 0x4f5: 0x3512, + 0x4f6: 0x31f7, 0x4f7: 0x350d, 0x4f8: 0x39b6, 0x4f9: 0x3b45, 0x4fa: 0x39bd, 0x4fb: 0x3b4c, + 0x4fc: 0x3201, 0x4fd: 0x3517, 0x4fe: 0x3206, 0x4ff: 0x351c, + // Block 0x14, offset 0x500 + 0x500: 0x320b, 0x501: 0x3521, 0x502: 0x3210, 0x503: 0x3526, 0x504: 0x321f, 0x505: 0x3535, + 0x506: 0x321a, 0x507: 0x3530, 0x508: 0x3224, 0x509: 0x353f, 0x50a: 0x3229, 0x50b: 0x3544, + 0x50c: 0x322e, 0x50d: 0x3549, 0x50e: 0x324c, 0x50f: 0x3567, 0x510: 0x3265, 0x511: 0x3585, + 0x512: 0x3274, 0x513: 0x3594, 0x514: 0x3279, 0x515: 0x3599, 0x516: 0x337d, 0x517: 0x34a9, + 0x518: 0x353a, 0x519: 0x3576, 0x51b: 0x35d4, + 0x520: 0x46a4, 0x521: 0x4735, 0x522: 0x2f86, 0x523: 0x3292, + 0x524: 0x387b, 0x525: 0x3a0a, 0x526: 0x3874, 0x527: 0x3a03, 0x528: 0x3889, 0x529: 0x3a18, + 0x52a: 0x3882, 0x52b: 0x3a11, 0x52c: 0x38c1, 0x52d: 0x3a50, 0x52e: 0x3897, 0x52f: 0x3a26, + 0x530: 0x3890, 0x531: 0x3a1f, 0x532: 0x38a5, 0x533: 0x3a34, 0x534: 0x389e, 0x535: 0x3a2d, + 0x536: 0x38c8, 0x537: 0x3a57, 0x538: 0x46b8, 0x539: 0x4749, 0x53a: 0x3003, 0x53b: 0x330f, + 0x53c: 0x2fef, 0x53d: 0x32fb, 0x53e: 0x38dd, 0x53f: 0x3a6c, + // Block 0x15, offset 0x540 + 0x540: 0x38d6, 0x541: 0x3a65, 0x542: 0x38eb, 0x543: 0x3a7a, 0x544: 0x38e4, 0x545: 0x3a73, + 0x546: 0x3900, 0x547: 0x3a8f, 0x548: 0x3094, 0x549: 0x33a0, 0x54a: 0x30a8, 0x54b: 0x33b4, + 0x54c: 0x46ea, 0x54d: 0x477b, 0x54e: 0x3139, 0x54f: 0x344a, 0x550: 0x3923, 0x551: 0x3ab2, + 0x552: 0x391c, 0x553: 0x3aab, 0x554: 0x3931, 0x555: 0x3ac0, 0x556: 0x392a, 0x557: 0x3ab9, + 0x558: 0x398c, 0x559: 0x3b1b, 0x55a: 0x3970, 0x55b: 0x3aff, 0x55c: 0x3969, 0x55d: 0x3af8, + 0x55e: 0x397e, 0x55f: 0x3b0d, 0x560: 0x3977, 0x561: 0x3b06, 0x562: 0x3985, 0x563: 0x3b14, + 0x564: 0x31e8, 0x565: 0x34fe, 0x566: 0x31ca, 0x567: 0x34e0, 0x568: 0x39e7, 0x569: 0x3b76, + 0x56a: 0x39e0, 0x56b: 0x3b6f, 0x56c: 0x39f5, 0x56d: 0x3b84, 0x56e: 0x39ee, 0x56f: 0x3b7d, + 0x570: 0x39fc, 0x571: 0x3b8b, 0x572: 0x3233, 0x573: 0x354e, 0x574: 0x325b, 0x575: 0x357b, + 0x576: 0x3256, 0x577: 0x3571, 0x578: 0x3242, 0x579: 0x355d, + // Block 0x16, offset 0x580 + 0x580: 0x4807, 0x581: 0x480d, 0x582: 0x4921, 0x583: 0x4939, 0x584: 0x4929, 0x585: 0x4941, + 0x586: 0x4931, 0x587: 0x4949, 0x588: 0x47ad, 0x589: 0x47b3, 0x58a: 0x4891, 0x58b: 0x48a9, + 0x58c: 0x4899, 0x58d: 0x48b1, 0x58e: 0x48a1, 0x58f: 0x48b9, 0x590: 0x4819, 0x591: 0x481f, + 0x592: 0x3dbb, 0x593: 0x3dcb, 0x594: 0x3dc3, 0x595: 0x3dd3, + 0x598: 0x47b9, 0x599: 0x47bf, 0x59a: 0x3ceb, 0x59b: 0x3cfb, 0x59c: 0x3cf3, 0x59d: 0x3d03, + 0x5a0: 0x4831, 0x5a1: 0x4837, 0x5a2: 0x4951, 0x5a3: 0x4969, + 0x5a4: 0x4959, 0x5a5: 0x4971, 0x5a6: 0x4961, 0x5a7: 0x4979, 0x5a8: 0x47c5, 0x5a9: 0x47cb, + 0x5aa: 0x48c1, 0x5ab: 0x48d9, 0x5ac: 0x48c9, 0x5ad: 0x48e1, 0x5ae: 0x48d1, 0x5af: 0x48e9, + 0x5b0: 0x4849, 0x5b1: 0x484f, 0x5b2: 0x3e1b, 0x5b3: 0x3e33, 0x5b4: 0x3e23, 0x5b5: 0x3e3b, + 0x5b6: 0x3e2b, 0x5b7: 0x3e43, 0x5b8: 0x47d1, 0x5b9: 0x47d7, 0x5ba: 0x3d1b, 0x5bb: 0x3d33, + 0x5bc: 0x3d23, 0x5bd: 0x3d3b, 0x5be: 0x3d2b, 0x5bf: 0x3d43, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4855, 0x5c1: 0x485b, 0x5c2: 0x3e4b, 0x5c3: 0x3e5b, 0x5c4: 0x3e53, 0x5c5: 0x3e63, + 0x5c8: 0x47dd, 0x5c9: 0x47e3, 0x5ca: 0x3d4b, 0x5cb: 0x3d5b, + 0x5cc: 0x3d53, 0x5cd: 0x3d63, 0x5d0: 0x4867, 0x5d1: 0x486d, + 0x5d2: 0x3e83, 0x5d3: 0x3e9b, 0x5d4: 0x3e8b, 0x5d5: 0x3ea3, 0x5d6: 0x3e93, 0x5d7: 0x3eab, + 0x5d9: 0x47e9, 0x5db: 0x3d6b, 0x5dd: 0x3d73, + 0x5df: 0x3d7b, 0x5e0: 0x487f, 0x5e1: 0x4885, 0x5e2: 0x4981, 0x5e3: 0x4999, + 0x5e4: 0x4989, 0x5e5: 0x49a1, 0x5e6: 0x4991, 0x5e7: 0x49a9, 0x5e8: 0x47ef, 0x5e9: 0x47f5, + 0x5ea: 0x48f1, 0x5eb: 0x4909, 0x5ec: 0x48f9, 0x5ed: 0x4911, 0x5ee: 0x4901, 0x5ef: 0x4919, + 0x5f0: 0x47fb, 0x5f1: 0x4321, 0x5f2: 0x3694, 0x5f3: 0x4327, 0x5f4: 0x4825, 0x5f5: 0x432d, + 0x5f6: 0x36a6, 0x5f7: 0x4333, 0x5f8: 0x36c4, 0x5f9: 0x4339, 0x5fa: 0x36dc, 0x5fb: 0x433f, + 0x5fc: 0x4873, 0x5fd: 0x4345, + // Block 0x18, offset 0x600 + 0x600: 0x3da3, 0x601: 0x3dab, 0x602: 0x4187, 0x603: 0x41a5, 0x604: 0x4191, 0x605: 0x41af, + 0x606: 0x419b, 0x607: 0x41b9, 0x608: 0x3cdb, 0x609: 0x3ce3, 0x60a: 0x40d3, 0x60b: 0x40f1, + 0x60c: 0x40dd, 0x60d: 0x40fb, 0x60e: 0x40e7, 0x60f: 0x4105, 0x610: 0x3deb, 0x611: 0x3df3, + 0x612: 0x41c3, 0x613: 0x41e1, 0x614: 0x41cd, 0x615: 0x41eb, 0x616: 0x41d7, 0x617: 0x41f5, + 0x618: 0x3d0b, 0x619: 0x3d13, 0x61a: 0x410f, 0x61b: 0x412d, 0x61c: 0x4119, 0x61d: 0x4137, + 0x61e: 0x4123, 0x61f: 0x4141, 0x620: 0x3ec3, 0x621: 0x3ecb, 0x622: 0x41ff, 0x623: 0x421d, + 0x624: 0x4209, 0x625: 0x4227, 0x626: 0x4213, 0x627: 0x4231, 0x628: 0x3d83, 0x629: 0x3d8b, + 0x62a: 0x414b, 0x62b: 0x4169, 0x62c: 0x4155, 0x62d: 0x4173, 0x62e: 0x415f, 0x62f: 0x417d, + 0x630: 0x3688, 0x631: 0x3682, 0x632: 0x3d93, 0x633: 0x368e, 0x634: 0x3d9b, + 0x636: 0x4813, 0x637: 0x3db3, 0x638: 0x35f8, 0x639: 0x35f2, 0x63a: 0x35e6, 0x63b: 0x42f1, + 0x63c: 0x35fe, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35aa, 0x642: 0x3ddb, 0x643: 0x36a0, 0x644: 0x3de3, + 0x646: 0x483d, 0x647: 0x3dfb, 0x648: 0x3604, 0x649: 0x42f7, 0x64a: 0x3610, 0x64b: 0x42fd, + 0x64c: 0x361c, 0x64d: 0x3b92, 0x64e: 0x3b99, 0x64f: 0x3ba0, 0x650: 0x36b8, 0x651: 0x36b2, + 0x652: 0x3e03, 0x653: 0x44e7, 0x656: 0x36be, 0x657: 0x3e13, + 0x658: 0x3634, 0x659: 0x362e, 0x65a: 0x3622, 0x65b: 0x4303, 0x65d: 0x3ba7, + 0x65e: 0x3bae, 0x65f: 0x3bb5, 0x660: 0x36ee, 0x661: 0x36e8, 0x662: 0x3e6b, 0x663: 0x44ef, + 0x664: 0x36d0, 0x665: 0x36d6, 0x666: 0x36f4, 0x667: 0x3e7b, 0x668: 0x3664, 0x669: 0x365e, + 0x66a: 0x3652, 0x66b: 0x430f, 0x66c: 0x364c, 0x66d: 0x359e, 0x66e: 0x42eb, 0x66f: 0x0081, + 0x672: 0x3eb3, 0x673: 0x36fa, 0x674: 0x3ebb, + 0x676: 0x488b, 0x677: 0x3ed3, 0x678: 0x3640, 0x679: 0x4309, 0x67a: 0x3670, 0x67b: 0x431b, + 0x67c: 0x367c, 0x67d: 0x4259, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c09, 0x683: 0xa000, 0x684: 0x3c10, 0x685: 0xa000, + 0x687: 0x3c17, 0x688: 0xa000, 0x689: 0x3c1e, + 0x68d: 0xa000, + 0x6a0: 0x2f68, 0x6a1: 0xa000, 0x6a2: 0x3c2c, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c25, 0x6ae: 0x2f63, 0x6af: 0x2f6d, + 0x6b0: 0x3c33, 0x6b1: 0x3c3a, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c41, 0x6b5: 0x3c48, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4f, 0x6b9: 0x3c56, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c5d, 0x6c1: 0x3c64, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c79, 0x6c5: 0x3c80, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c87, 0x6c9: 0x3c8e, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3ca3, 0x6ed: 0x3caa, 0x6ee: 0x3cb1, 0x6ef: 0x3cb8, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f0b, 0x70d: 0xa000, 0x70e: 0x3f13, 0x70f: 0xa000, 0x710: 0x3f1b, 0x711: 0xa000, + 0x712: 0x3f23, 0x713: 0xa000, 0x714: 0x3f2b, 0x715: 0xa000, 0x716: 0x3f33, 0x717: 0xa000, + 0x718: 0x3f3b, 0x719: 0xa000, 0x71a: 0x3f43, 0x71b: 0xa000, 0x71c: 0x3f4b, 0x71d: 0xa000, + 0x71e: 0x3f53, 0x71f: 0xa000, 0x720: 0x3f5b, 0x721: 0xa000, 0x722: 0x3f63, + 0x724: 0xa000, 0x725: 0x3f6b, 0x726: 0xa000, 0x727: 0x3f73, 0x728: 0xa000, 0x729: 0x3f7b, + 0x72f: 0xa000, + 0x730: 0x3f83, 0x731: 0x3f8b, 0x732: 0xa000, 0x733: 0x3f93, 0x734: 0x3f9b, 0x735: 0xa000, + 0x736: 0x3fa3, 0x737: 0x3fab, 0x738: 0xa000, 0x739: 0x3fb3, 0x73a: 0x3fbb, 0x73b: 0xa000, + 0x73c: 0x3fc3, 0x73d: 0x3fcb, + // Block 0x1d, offset 0x740 + 0x754: 0x3f03, + 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fd3, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3fe3, 0x76d: 0xa000, 0x76e: 0x3feb, 0x76f: 0xa000, + 0x770: 0x3ff3, 0x771: 0xa000, 0x772: 0x3ffb, 0x773: 0xa000, 0x774: 0x4003, 0x775: 0xa000, + 0x776: 0x400b, 0x777: 0xa000, 0x778: 0x4013, 0x779: 0xa000, 0x77a: 0x401b, 0x77b: 0xa000, + 0x77c: 0x4023, 0x77d: 0xa000, 0x77e: 0x402b, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4033, 0x781: 0xa000, 0x782: 0x403b, 0x784: 0xa000, 0x785: 0x4043, + 0x786: 0xa000, 0x787: 0x404b, 0x788: 0xa000, 0x789: 0x4053, + 0x78f: 0xa000, 0x790: 0x405b, 0x791: 0x4063, + 0x792: 0xa000, 0x793: 0x406b, 0x794: 0x4073, 0x795: 0xa000, 0x796: 0x407b, 0x797: 0x4083, + 0x798: 0xa000, 0x799: 0x408b, 0x79a: 0x4093, 0x79b: 0xa000, 0x79c: 0x409b, 0x79d: 0x40a3, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fdb, + 0x7b7: 0x40ab, 0x7b8: 0x40b3, 0x7b9: 0x40bb, 0x7ba: 0x40c3, + 0x7bd: 0xa000, 0x7be: 0x40cb, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb, + 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943, + 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3, + 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43, + 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87, + 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283, + 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f, + 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853, + 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b, + 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b, + 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b, + // Block 0x20, offset 0x800 + 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b, + 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f, + 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7, + 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127, + 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357, + 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873, + 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3, + 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b, + 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57, + 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb, + 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b, + // Block 0x21, offset 0x840 + 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f, + 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3, + 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83, + 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193, + 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b, + 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b, + 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f, + 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b, + 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753, + 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777, + 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73, + // Block 0x22, offset 0x880 + 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3, + 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47, + 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af, + 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df, + 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817, + 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3, + 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457, + 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b, + 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27, + 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f, + 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03, + 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27, + 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af, + 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3, + 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb, + 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353, + 0x8e5: 0x1407, 0x8e6: 0x1433, + 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7, + 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897, + 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93, + 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3, + // Block 0x24, offset 0x900 + 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b, + 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f, + 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f, + 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f, + 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff, + 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f, + 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f, + 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3, + 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7, + 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963, + 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f, + // Block 0x25, offset 0x940 + 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b, + 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb, + 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf, + 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f, + 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013, + 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f, + 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b, + 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b, + 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb, + 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343, + 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f, + // Block 0x26, offset 0x980 + 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b, + 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b, + 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2, + 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809, + 0x998: 0x1617, 0x999: 0x1627, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757, + 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773, + 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3, + 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf, + 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff, + 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f, + 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867, + 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af, + 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93, + 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3, + 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917, + // Block 0x28, offset 0xa00 + 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f, + 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983, + 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf, + 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3, + 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef, + 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23, + 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37, + 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63, + 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f, + 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692, + 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7, + // Block 0x29, offset 0xa40 + 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb, + 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f, + 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6, + 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9, + 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83, + 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3, + 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf, + 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7, + 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f, + 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b, + 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87, + 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb, + 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7, + 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663, + 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd, + 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7, + 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b, + 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f, + 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7, + 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700, + 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23, + 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53, + 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714, + 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b, + 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719, + 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728, + 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37, + 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57, + 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737, + 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741, + 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff, + 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637, + 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f, + 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093, + 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782, + 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3, + 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7, + 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133, + 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa, + 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4, + 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7, + 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7, + 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b, + 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd, + 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f, + 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f, + 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273, + 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677, + 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7, + 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb, + 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5, + 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa, + 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b, + 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7, + 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665, + 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f, + 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477, + 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693, + 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb, + 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b, + 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567, + 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7, + 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7, + 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef, + 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + 0x374: 0xa1, + 0x37d: 0xa2, + // Block 0xe, offset 0x380 + 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6, + 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa, + 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf, + 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1, + 0x3a0: 0xb2, 0x3a7: 0xb3, + 0x3a8: 0xb4, 0x3a9: 0xb5, 0x3aa: 0xb6, + 0x3b0: 0x73, 0x3b5: 0xb7, 0x3b6: 0xb8, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb9, 0x3ec: 0xba, + // Block 0x10, offset 0x400 + 0x432: 0xbb, + // Block 0x11, offset 0x440 + 0x445: 0xbc, 0x446: 0xbd, 0x447: 0xbe, + 0x449: 0xbf, + // Block 0x12, offset 0x480 + 0x480: 0xc0, 0x484: 0xba, + 0x48b: 0xc1, + 0x4a3: 0xc2, 0x4a5: 0xc3, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc4, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 151 entries, 302 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xd0, 0xd2, 0xd7, 0xe8, 0xf4, 0xf6, 0xfc, 0xfe, 0x100, 0x102, 0x104, 0x106, 0x108, 0x10b, 0x10e, 0x110, 0x113, 0x116, 0x11a, 0x11f, 0x128, 0x12a, 0x12d, 0x12f, 0x13a, 0x13e, 0x14c, 0x14f, 0x155, 0x15b, 0x166, 0x16a, 0x16c, 0x16e, 0x170, 0x172, 0x174, 0x17a, 0x17e, 0x180, 0x182, 0x18a, 0x18e, 0x191, 0x193, 0x195, 0x197, 0x19a, 0x19c, 0x19e, 0x1a0, 0x1a2, 0x1a8, 0x1ab, 0x1ad, 0x1b4, 0x1ba, 0x1c0, 0x1c8, 0x1ce, 0x1d4, 0x1da, 0x1de, 0x1ec, 0x1f5, 0x1f8, 0x1fb, 0x1fd, 0x200, 0x202, 0x206, 0x20b, 0x20d, 0x20f, 0x214, 0x21a, 0x21c, 0x21e, 0x220, 0x226, 0x229, 0x22b, 0x231, 0x234, 0x23c, 0x243, 0x246, 0x249, 0x24b, 0x24e, 0x256, 0x25a, 0x261, 0x264, 0x26a, 0x26c, 0x26f, 0x271, 0x274, 0x276, 0x278, 0x27a, 0x27c, 0x27f, 0x281, 0x283, 0x285, 0x287, 0x294, 0x29e, 0x2a0, 0x2a2, 0x2a8, 0x2aa, 0x2ac, 0x2af} + +// nfcSparseValues: 689 entries, 2756 bytes +var nfcSparseValues = [689]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e5, lo: 0xa0, hi: 0xa1}, + {value: 0x4717, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4843, lo: 0x8a, hi: 0x8a}, + {value: 0x4861, lo: 0x8b, hi: 0x8b}, + {value: 0x36ca, lo: 0x8c, hi: 0x8c}, + {value: 0x36e2, lo: 0x8d, hi: 0x8d}, + {value: 0x4879, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3700, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a8, lo: 0x90, hi: 0x90}, + {value: 0x37b4, lo: 0x91, hi: 0x91}, + {value: 0x37a2, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x381a, lo: 0x97, hi: 0x97}, + {value: 0x37e4, lo: 0x9c, hi: 0x9c}, + {value: 0x37cc, lo: 0x9d, hi: 0x9d}, + {value: 0x37f6, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3820, lo: 0xb6, hi: 0xb6}, + {value: 0x3826, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3844, lo: 0xa2, hi: 0xa2}, + {value: 0x384a, lo: 0xa3, hi: 0xa3}, + {value: 0x3856, lo: 0xa4, hi: 0xa4}, + {value: 0x3850, lo: 0xa5, hi: 0xa5}, + {value: 0x385c, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386e, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3862, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3868, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3edb, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee3, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eeb, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451f, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ca1, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455f, lo: 0x9c, hi: 0x9d}, + {value: 0x456f, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x4597, lo: 0xb3, hi: 0xb3}, + {value: 0x459f, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4577, lo: 0x99, hi: 0x9b}, + {value: 0x458f, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb9, lo: 0x88, hi: 0x88}, + {value: 0x2cb1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cc1, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a7, lo: 0x9c, hi: 0x9c}, + {value: 0x45af, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc9, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cd1, lo: 0x8a, hi: 0x8a}, + {value: 0x2ce1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd9, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef3, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2ce9, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cf1, lo: 0x87, hi: 0x87}, + {value: 0x2cf9, lo: 0x88, hi: 0x88}, + {value: 0x2f53, lo: 0x8a, hi: 0x8a}, + {value: 0x2ddb, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d01, lo: 0x8a, hi: 0x8a}, + {value: 0x2d11, lo: 0x8b, hi: 0x8b}, + {value: 0x2d09, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6be7, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3efb, lo: 0x9a, hi: 0x9a}, + {value: 0x2f5b, lo: 0x9c, hi: 0x9c}, + {value: 0x2de6, lo: 0x9d, hi: 0x9d}, + {value: 0x2d19, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x02}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x23, offset 0xd0 + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd2 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd7 + {value: 0x0000, lo: 0x10}, + {value: 0x2647, lo: 0x83, hi: 0x83}, + {value: 0x264e, lo: 0x8d, hi: 0x8d}, + {value: 0x2655, lo: 0x92, hi: 0x92}, + {value: 0x265c, lo: 0x97, hi: 0x97}, + {value: 0x2663, lo: 0x9c, hi: 0x9c}, + {value: 0x2640, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a87, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a90, lo: 0xb5, hi: 0xb5}, + {value: 0x45b7, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bf, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe8 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a99, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x2671, lo: 0x93, hi: 0x93}, + {value: 0x2678, lo: 0x9d, hi: 0x9d}, + {value: 0x267f, lo: 0xa2, hi: 0xa2}, + {value: 0x2686, lo: 0xa7, hi: 0xa7}, + {value: 0x268d, lo: 0xac, hi: 0xac}, + {value: 0x266a, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf4 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf6 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d21, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfc + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfe + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x100 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x102 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x104 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x108 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x110 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x113 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x116 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x11a + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x11f + {value: 0x0000, lo: 0x08}, + {value: 0x2d69, lo: 0x80, hi: 0x80}, + {value: 0x2d71, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d79, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x128 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x12a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x12d + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x12f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x13a + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3c, offset 0x13e + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3d, offset 0x14c + {value: 0x427e, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3e, offset 0x14f + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bbc, lo: 0x9a, hi: 0x9b}, + {value: 0x3bca, lo: 0xae, hi: 0xae}, + // Block 0x3f, offset 0x155 + {value: 0x000e, lo: 0x05}, + {value: 0x3bd1, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd8, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x40, offset 0x15b + {value: 0x6405, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be6, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bed, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf4, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bfb, lo: 0xa4, hi: 0xa5}, + {value: 0x3c02, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x41, offset 0x166 + {value: 0x0007, lo: 0x03}, + {value: 0x3c6b, lo: 0xa0, hi: 0xa1}, + {value: 0x3c95, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbf, lo: 0xaa, hi: 0xad}, + // Block 0x42, offset 0x16a + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x43, offset 0x16c + {value: 0x0000, lo: 0x01}, + {value: 0x44e0, lo: 0x9c, hi: 0x9c}, + // Block 0x44, offset 0x16e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x45, offset 0x170 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x46, offset 0x172 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x47, offset 0x174 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x48, offset 0x17a + {value: 0x0000, lo: 0x03}, + {value: 0x4aa2, lo: 0xb3, hi: 0xb3}, + {value: 0x4aa2, lo: 0xb5, hi: 0xb6}, + {value: 0x4aa2, lo: 0xba, hi: 0xbf}, + // Block 0x49, offset 0x17e + {value: 0x0000, lo: 0x01}, + {value: 0x4aa2, lo: 0x8f, hi: 0xa3}, + // Block 0x4a, offset 0x180 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4b, offset 0x182 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4d, offset 0x18e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4e, offset 0x191 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x4f, offset 0x193 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x50, offset 0x195 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x51, offset 0x197 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x52, offset 0x19a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x53, offset 0x19c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x54, offset 0x19e + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x55, offset 0x1a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x56, offset 0x1a2 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x57, offset 0x1a8 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x58, offset 0x1ab + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x59, offset 0x1ad + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5a, offset 0x1b4 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5b, offset 0x1ba + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5c, offset 0x1c0 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5d, offset 0x1c8 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5e, offset 0x1ce + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x5f, offset 0x1d4 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x60, offset 0x1da + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x61, offset 0x1de + {value: 0x0006, lo: 0x0d}, + {value: 0x4393, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4405, lo: 0x9f, hi: 0x9f}, + {value: 0x43f3, lo: 0xaa, hi: 0xab}, + {value: 0x44f7, lo: 0xac, hi: 0xac}, + {value: 0x44ff, lo: 0xad, hi: 0xad}, + {value: 0x434b, lo: 0xae, hi: 0xb1}, + {value: 0x4369, lo: 0xb2, hi: 0xb4}, + {value: 0x4381, lo: 0xb5, hi: 0xb6}, + {value: 0x438d, lo: 0xb8, hi: 0xb8}, + {value: 0x4399, lo: 0xb9, hi: 0xbb}, + {value: 0x43b1, lo: 0xbc, hi: 0xbc}, + {value: 0x43b7, lo: 0xbe, hi: 0xbe}, + // Block 0x62, offset 0x1ec + {value: 0x0006, lo: 0x08}, + {value: 0x43bd, lo: 0x80, hi: 0x81}, + {value: 0x43c9, lo: 0x83, hi: 0x84}, + {value: 0x43db, lo: 0x86, hi: 0x89}, + {value: 0x43ff, lo: 0x8a, hi: 0x8a}, + {value: 0x437b, lo: 0x8b, hi: 0x8b}, + {value: 0x4363, lo: 0x8c, hi: 0x8c}, + {value: 0x43ab, lo: 0x8d, hi: 0x8d}, + {value: 0x43d5, lo: 0x8e, hi: 0x8e}, + // Block 0x63, offset 0x1f5 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x64, offset 0x1f8 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x65, offset 0x1fb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x66, offset 0x1fd + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x67, offset 0x200 + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x68, offset 0x202 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x69, offset 0x206 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6a, offset 0x20b + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6b, offset 0x20d + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6c, offset 0x20f + {value: 0x0000, lo: 0x04}, + {value: 0x4aa2, lo: 0x9e, hi: 0x9f}, + {value: 0x4aa2, lo: 0xa3, hi: 0xa3}, + {value: 0x4aa2, lo: 0xa5, hi: 0xa6}, + {value: 0x4aa2, lo: 0xaa, hi: 0xaf}, + // Block 0x6d, offset 0x214 + {value: 0x0000, lo: 0x05}, + {value: 0x4aa2, lo: 0x82, hi: 0x87}, + {value: 0x4aa2, lo: 0x8a, hi: 0x8f}, + {value: 0x4aa2, lo: 0x92, hi: 0x97}, + {value: 0x4aa2, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6e, offset 0x21a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6f, offset 0x21c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x70, offset 0x21e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x71, offset 0x220 + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x226 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x73, offset 0x229 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x74, offset 0x22b + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x231 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x234 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x423b, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4245, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424f, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x23c + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d81, lo: 0xae, hi: 0xae}, + {value: 0x2d8b, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x243 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x246 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x249 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x24b + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7c, offset 0x24e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d95, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9f, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7d, offset 0x256 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7e, offset 0x25a + {value: 0x6b57, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db3, lo: 0xbb, hi: 0xbb}, + {value: 0x2da9, lo: 0xbc, hi: 0xbd}, + {value: 0x2dbd, lo: 0xbe, hi: 0xbe}, + // Block 0x7f, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x80, offset 0x264 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc7, lo: 0xba, hi: 0xba}, + {value: 0x2dd1, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x26a + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x82, offset 0x26c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x83, offset 0x26f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x84, offset 0x271 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x85, offset 0x274 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + // Block 0x86, offset 0x276 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x87, offset 0x278 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x88, offset 0x27a + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x89, offset 0x27c + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x8a, offset 0x27f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x8b, offset 0x281 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8c, offset 0x283 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8d, offset 0x285 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8e, offset 0x287 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cf, lo: 0x9e, hi: 0x9e}, + {value: 0x45d9, lo: 0x9f, hi: 0x9f}, + {value: 0x460d, lo: 0xa0, hi: 0xa0}, + {value: 0x461b, lo: 0xa1, hi: 0xa1}, + {value: 0x4629, lo: 0xa2, hi: 0xa2}, + {value: 0x4637, lo: 0xa3, hi: 0xa3}, + {value: 0x4645, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8f, offset 0x294 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e3, lo: 0xbb, hi: 0xbb}, + {value: 0x45ed, lo: 0xbc, hi: 0xbc}, + {value: 0x4653, lo: 0xbd, hi: 0xbd}, + {value: 0x466f, lo: 0xbe, hi: 0xbe}, + {value: 0x4661, lo: 0xbf, hi: 0xbf}, + // Block 0x90, offset 0x29e + {value: 0x0000, lo: 0x01}, + {value: 0x467d, lo: 0x80, hi: 0x80}, + // Block 0x91, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x92, offset 0x2a2 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x93, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xac, hi: 0xaf}, + // Block 0x94, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x95, offset 0x2ac + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x96, offset 0x2af + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 18684 bytes (18.25 KiB). Checksum: 113e23c477adfabd. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f72, 0xc1: 0x2f77, 0xc2: 0x468b, 0xc3: 0x2f7c, 0xc4: 0x469a, 0xc5: 0x469f, + 0xc6: 0xa000, 0xc7: 0x46a9, 0xc8: 0x2fe5, 0xc9: 0x2fea, 0xca: 0x46ae, 0xcb: 0x2ffe, + 0xcc: 0x3071, 0xcd: 0x3076, 0xce: 0x307b, 0xcf: 0x46c2, 0xd1: 0x3107, + 0xd2: 0x312a, 0xd3: 0x312f, 0xd4: 0x46cc, 0xd5: 0x46d1, 0xd6: 0x46e0, + 0xd8: 0xa000, 0xd9: 0x31b6, 0xda: 0x31bb, 0xdb: 0x31c0, 0xdc: 0x4712, 0xdd: 0x3238, + 0xe0: 0x327e, 0xe1: 0x3283, 0xe2: 0x471c, 0xe3: 0x3288, + 0xe4: 0x472b, 0xe5: 0x4730, 0xe6: 0xa000, 0xe7: 0x473a, 0xe8: 0x32f1, 0xe9: 0x32f6, + 0xea: 0x473f, 0xeb: 0x330a, 0xec: 0x3382, 0xed: 0x3387, 0xee: 0x338c, 0xef: 0x4753, + 0xf1: 0x3418, 0xf2: 0x343b, 0xf3: 0x3440, 0xf4: 0x475d, 0xf5: 0x4762, + 0xf6: 0x4771, 0xf8: 0xa000, 0xf9: 0x34cc, 0xfa: 0x34d1, 0xfb: 0x34d6, + 0xfc: 0x47a3, 0xfd: 0x3553, 0xff: 0x356c, + // Block 0x4, offset 0x100 + 0x100: 0x2f81, 0x101: 0x328d, 0x102: 0x4690, 0x103: 0x4721, 0x104: 0x2f9f, 0x105: 0x32ab, + 0x106: 0x2fb3, 0x107: 0x32bf, 0x108: 0x2fb8, 0x109: 0x32c4, 0x10a: 0x2fbd, 0x10b: 0x32c9, + 0x10c: 0x2fc2, 0x10d: 0x32ce, 0x10e: 0x2fcc, 0x10f: 0x32d8, + 0x112: 0x46b3, 0x113: 0x4744, 0x114: 0x2ff4, 0x115: 0x3300, 0x116: 0x2ff9, 0x117: 0x3305, + 0x118: 0x3017, 0x119: 0x3323, 0x11a: 0x3008, 0x11b: 0x3314, 0x11c: 0x3030, 0x11d: 0x333c, + 0x11e: 0x303a, 0x11f: 0x3346, 0x120: 0x303f, 0x121: 0x334b, 0x122: 0x3049, 0x123: 0x3355, + 0x124: 0x304e, 0x125: 0x335a, 0x128: 0x3080, 0x129: 0x3391, + 0x12a: 0x3085, 0x12b: 0x3396, 0x12c: 0x308a, 0x12d: 0x339b, 0x12e: 0x30ad, 0x12f: 0x33b9, + 0x130: 0x308f, 0x132: 0x195d, 0x133: 0x19ea, 0x134: 0x30b7, 0x135: 0x33c3, + 0x136: 0x30cb, 0x137: 0x33dc, 0x139: 0x30d5, 0x13a: 0x33e6, 0x13b: 0x30df, + 0x13c: 0x33f0, 0x13d: 0x30da, 0x13e: 0x33eb, 0x13f: 0x1baf, + // Block 0x5, offset 0x140 + 0x140: 0x1c37, 0x143: 0x3102, 0x144: 0x3413, 0x145: 0x311b, + 0x146: 0x342c, 0x147: 0x3111, 0x148: 0x3422, 0x149: 0x1c5f, + 0x14c: 0x46d6, 0x14d: 0x4767, 0x14e: 0x3134, 0x14f: 0x3445, 0x150: 0x313e, 0x151: 0x344f, + 0x154: 0x315c, 0x155: 0x346d, 0x156: 0x3175, 0x157: 0x3486, + 0x158: 0x3166, 0x159: 0x3477, 0x15a: 0x46f9, 0x15b: 0x478a, 0x15c: 0x317f, 0x15d: 0x3490, + 0x15e: 0x318e, 0x15f: 0x349f, 0x160: 0x46fe, 0x161: 0x478f, 0x162: 0x31a7, 0x163: 0x34bd, + 0x164: 0x3198, 0x165: 0x34ae, 0x168: 0x4708, 0x169: 0x4799, + 0x16a: 0x470d, 0x16b: 0x479e, 0x16c: 0x31c5, 0x16d: 0x34db, 0x16e: 0x31cf, 0x16f: 0x34e5, + 0x170: 0x31d4, 0x171: 0x34ea, 0x172: 0x31f2, 0x173: 0x3508, 0x174: 0x3215, 0x175: 0x352b, + 0x176: 0x323d, 0x177: 0x3558, 0x178: 0x3251, 0x179: 0x3260, 0x17a: 0x3580, 0x17b: 0x326a, + 0x17c: 0x358a, 0x17d: 0x326f, 0x17e: 0x358f, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2df1, 0x185: 0x2df7, + 0x186: 0x2dfd, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a0b, 0x18a: 0x198a, 0x18b: 0x198d, + 0x18c: 0x1a41, 0x18d: 0x2f8b, 0x18e: 0x3297, 0x18f: 0x3099, 0x190: 0x33a5, 0x191: 0x3143, + 0x192: 0x3454, 0x193: 0x31d9, 0x194: 0x34ef, 0x195: 0x39d2, 0x196: 0x3b61, 0x197: 0x39cb, + 0x198: 0x3b5a, 0x199: 0x39d9, 0x19a: 0x3b68, 0x19b: 0x39c4, 0x19c: 0x3b53, + 0x19e: 0x38b3, 0x19f: 0x3a42, 0x1a0: 0x38ac, 0x1a1: 0x3a3b, 0x1a2: 0x35b6, 0x1a3: 0x35c8, + 0x1a6: 0x3044, 0x1a7: 0x3350, 0x1a8: 0x30c1, 0x1a9: 0x33d2, + 0x1aa: 0x46ef, 0x1ab: 0x4780, 0x1ac: 0x3993, 0x1ad: 0x3b22, 0x1ae: 0x35da, 0x1af: 0x35e0, + 0x1b0: 0x33c8, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19d2, 0x1b4: 0x302b, 0x1b5: 0x3337, + 0x1b8: 0x30fd, 0x1b9: 0x340e, 0x1ba: 0x38ba, 0x1bb: 0x3a49, + 0x1bc: 0x35b0, 0x1bd: 0x35c2, 0x1be: 0x35bc, 0x1bf: 0x35ce, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f90, 0x1c1: 0x329c, 0x1c2: 0x2f95, 0x1c3: 0x32a1, 0x1c4: 0x300d, 0x1c5: 0x3319, + 0x1c6: 0x3012, 0x1c7: 0x331e, 0x1c8: 0x309e, 0x1c9: 0x33aa, 0x1ca: 0x30a3, 0x1cb: 0x33af, + 0x1cc: 0x3148, 0x1cd: 0x3459, 0x1ce: 0x314d, 0x1cf: 0x345e, 0x1d0: 0x316b, 0x1d1: 0x347c, + 0x1d2: 0x3170, 0x1d3: 0x3481, 0x1d4: 0x31de, 0x1d5: 0x34f4, 0x1d6: 0x31e3, 0x1d7: 0x34f9, + 0x1d8: 0x3189, 0x1d9: 0x349a, 0x1da: 0x31a2, 0x1db: 0x34b8, + 0x1de: 0x305d, 0x1df: 0x3369, + 0x1e6: 0x4695, 0x1e7: 0x4726, 0x1e8: 0x46bd, 0x1e9: 0x474e, + 0x1ea: 0x3962, 0x1eb: 0x3af1, 0x1ec: 0x393f, 0x1ed: 0x3ace, 0x1ee: 0x46db, 0x1ef: 0x476c, + 0x1f0: 0x395b, 0x1f1: 0x3aea, 0x1f2: 0x3247, 0x1f3: 0x3562, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49b1, 0x241: 0x49b6, 0x242: 0x9932, 0x243: 0x49bb, 0x244: 0x4a74, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a8, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425d, 0x285: 0x447e, + 0x286: 0x35ec, 0x287: 0x00ce, 0x288: 0x360a, 0x289: 0x3616, 0x28a: 0x3628, + 0x28c: 0x3646, 0x28e: 0x3658, 0x28f: 0x3676, 0x290: 0x3e0b, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x363a, 0x2ab: 0x366a, 0x2ac: 0x4801, 0x2ad: 0x369a, 0x2ae: 0x482b, 0x2af: 0x36ac, + 0x2b0: 0x3e73, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4843, 0x2cb: 0x4861, + 0x2cc: 0x36ca, 0x2cd: 0x36e2, 0x2ce: 0x4879, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430f, 0x2d4: 0x4315, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3724, 0x301: 0x3730, 0x303: 0x371e, + 0x306: 0xa000, 0x307: 0x370c, + 0x30c: 0x3760, 0x30d: 0x3748, 0x30e: 0x3772, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3754, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d8, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3736, 0x342: 0x37ba, + 0x350: 0x3712, 0x351: 0x3796, + 0x352: 0x3718, 0x353: 0x379c, 0x356: 0x372a, 0x357: 0x37ae, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x382c, 0x35b: 0x3832, 0x35c: 0x373c, 0x35d: 0x37c0, + 0x35e: 0x3742, 0x35f: 0x37c6, 0x362: 0x374e, 0x363: 0x37d2, + 0x364: 0x375a, 0x365: 0x37de, 0x366: 0x3766, 0x367: 0x37ea, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3838, 0x36b: 0x383e, 0x36c: 0x3790, 0x36d: 0x3814, 0x36e: 0x376c, 0x36f: 0x37f0, + 0x370: 0x3778, 0x371: 0x37fc, 0x372: 0x377e, 0x373: 0x3802, 0x374: 0x3784, 0x375: 0x3808, + 0x378: 0x378a, 0x379: 0x380e, + // Block 0xe, offset 0x380 + 0x387: 0x1d64, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d87, + 0x3f6: 0x2016, 0x3f7: 0x2052, 0x3f8: 0x204d, + // Block 0x10, offset 0x400 + 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d, + 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d29, 0x447: 0xa000, 0x448: 0x2d31, 0x449: 0xa000, 0x44a: 0x2d39, 0x44b: 0xa000, + 0x44c: 0x2d41, 0x44d: 0xa000, 0x44e: 0x2d49, 0x451: 0xa000, + 0x452: 0x2d51, + 0x474: 0x8102, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d59, + 0x47c: 0xa000, 0x47d: 0x2d61, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be, + 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa, + 0x4aa: 0x01fd, + 0x4b8: 0x020c, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128, + 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0, + // Block 0x14, offset 0x500 + 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132, + 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132, + 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132, + 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132, + 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132, + 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132, + 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132, + 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132, + 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132, + 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132, + 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d, + // Block 0x15, offset 0x540 + 0x540: 0x2f9a, 0x541: 0x32a6, 0x542: 0x2fa4, 0x543: 0x32b0, 0x544: 0x2fa9, 0x545: 0x32b5, + 0x546: 0x2fae, 0x547: 0x32ba, 0x548: 0x38cf, 0x549: 0x3a5e, 0x54a: 0x2fc7, 0x54b: 0x32d3, + 0x54c: 0x2fd1, 0x54d: 0x32dd, 0x54e: 0x2fe0, 0x54f: 0x32ec, 0x550: 0x2fd6, 0x551: 0x32e2, + 0x552: 0x2fdb, 0x553: 0x32e7, 0x554: 0x38f2, 0x555: 0x3a81, 0x556: 0x38f9, 0x557: 0x3a88, + 0x558: 0x301c, 0x559: 0x3328, 0x55a: 0x3021, 0x55b: 0x332d, 0x55c: 0x3907, 0x55d: 0x3a96, + 0x55e: 0x3026, 0x55f: 0x3332, 0x560: 0x3035, 0x561: 0x3341, 0x562: 0x3053, 0x563: 0x335f, + 0x564: 0x3062, 0x565: 0x336e, 0x566: 0x3058, 0x567: 0x3364, 0x568: 0x3067, 0x569: 0x3373, + 0x56a: 0x306c, 0x56b: 0x3378, 0x56c: 0x30b2, 0x56d: 0x33be, 0x56e: 0x390e, 0x56f: 0x3a9d, + 0x570: 0x30bc, 0x571: 0x33cd, 0x572: 0x30c6, 0x573: 0x33d7, 0x574: 0x30d0, 0x575: 0x33e1, + 0x576: 0x46c7, 0x577: 0x4758, 0x578: 0x3915, 0x579: 0x3aa4, 0x57a: 0x30e9, 0x57b: 0x33fa, + 0x57c: 0x30e4, 0x57d: 0x33f5, 0x57e: 0x30ee, 0x57f: 0x33ff, + // Block 0x16, offset 0x580 + 0x580: 0x30f3, 0x581: 0x3404, 0x582: 0x30f8, 0x583: 0x3409, 0x584: 0x310c, 0x585: 0x341d, + 0x586: 0x3116, 0x587: 0x3427, 0x588: 0x3125, 0x589: 0x3436, 0x58a: 0x3120, 0x58b: 0x3431, + 0x58c: 0x3938, 0x58d: 0x3ac7, 0x58e: 0x3946, 0x58f: 0x3ad5, 0x590: 0x394d, 0x591: 0x3adc, + 0x592: 0x3954, 0x593: 0x3ae3, 0x594: 0x3152, 0x595: 0x3463, 0x596: 0x3157, 0x597: 0x3468, + 0x598: 0x3161, 0x599: 0x3472, 0x59a: 0x46f4, 0x59b: 0x4785, 0x59c: 0x399a, 0x59d: 0x3b29, + 0x59e: 0x317a, 0x59f: 0x348b, 0x5a0: 0x3184, 0x5a1: 0x3495, 0x5a2: 0x4703, 0x5a3: 0x4794, + 0x5a4: 0x39a1, 0x5a5: 0x3b30, 0x5a6: 0x39a8, 0x5a7: 0x3b37, 0x5a8: 0x39af, 0x5a9: 0x3b3e, + 0x5aa: 0x3193, 0x5ab: 0x34a4, 0x5ac: 0x319d, 0x5ad: 0x34b3, 0x5ae: 0x31b1, 0x5af: 0x34c7, + 0x5b0: 0x31ac, 0x5b1: 0x34c2, 0x5b2: 0x31ed, 0x5b3: 0x3503, 0x5b4: 0x31fc, 0x5b5: 0x3512, + 0x5b6: 0x31f7, 0x5b7: 0x350d, 0x5b8: 0x39b6, 0x5b9: 0x3b45, 0x5ba: 0x39bd, 0x5bb: 0x3b4c, + 0x5bc: 0x3201, 0x5bd: 0x3517, 0x5be: 0x3206, 0x5bf: 0x351c, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x320b, 0x5c1: 0x3521, 0x5c2: 0x3210, 0x5c3: 0x3526, 0x5c4: 0x321f, 0x5c5: 0x3535, + 0x5c6: 0x321a, 0x5c7: 0x3530, 0x5c8: 0x3224, 0x5c9: 0x353f, 0x5ca: 0x3229, 0x5cb: 0x3544, + 0x5cc: 0x322e, 0x5cd: 0x3549, 0x5ce: 0x324c, 0x5cf: 0x3567, 0x5d0: 0x3265, 0x5d1: 0x3585, + 0x5d2: 0x3274, 0x5d3: 0x3594, 0x5d4: 0x3279, 0x5d5: 0x3599, 0x5d6: 0x337d, 0x5d7: 0x34a9, + 0x5d8: 0x353a, 0x5d9: 0x3576, 0x5da: 0x1be3, 0x5db: 0x42da, + 0x5e0: 0x46a4, 0x5e1: 0x4735, 0x5e2: 0x2f86, 0x5e3: 0x3292, + 0x5e4: 0x387b, 0x5e5: 0x3a0a, 0x5e6: 0x3874, 0x5e7: 0x3a03, 0x5e8: 0x3889, 0x5e9: 0x3a18, + 0x5ea: 0x3882, 0x5eb: 0x3a11, 0x5ec: 0x38c1, 0x5ed: 0x3a50, 0x5ee: 0x3897, 0x5ef: 0x3a26, + 0x5f0: 0x3890, 0x5f1: 0x3a1f, 0x5f2: 0x38a5, 0x5f3: 0x3a34, 0x5f4: 0x389e, 0x5f5: 0x3a2d, + 0x5f6: 0x38c8, 0x5f7: 0x3a57, 0x5f8: 0x46b8, 0x5f9: 0x4749, 0x5fa: 0x3003, 0x5fb: 0x330f, + 0x5fc: 0x2fef, 0x5fd: 0x32fb, 0x5fe: 0x38dd, 0x5ff: 0x3a6c, + // Block 0x18, offset 0x600 + 0x600: 0x38d6, 0x601: 0x3a65, 0x602: 0x38eb, 0x603: 0x3a7a, 0x604: 0x38e4, 0x605: 0x3a73, + 0x606: 0x3900, 0x607: 0x3a8f, 0x608: 0x3094, 0x609: 0x33a0, 0x60a: 0x30a8, 0x60b: 0x33b4, + 0x60c: 0x46ea, 0x60d: 0x477b, 0x60e: 0x3139, 0x60f: 0x344a, 0x610: 0x3923, 0x611: 0x3ab2, + 0x612: 0x391c, 0x613: 0x3aab, 0x614: 0x3931, 0x615: 0x3ac0, 0x616: 0x392a, 0x617: 0x3ab9, + 0x618: 0x398c, 0x619: 0x3b1b, 0x61a: 0x3970, 0x61b: 0x3aff, 0x61c: 0x3969, 0x61d: 0x3af8, + 0x61e: 0x397e, 0x61f: 0x3b0d, 0x620: 0x3977, 0x621: 0x3b06, 0x622: 0x3985, 0x623: 0x3b14, + 0x624: 0x31e8, 0x625: 0x34fe, 0x626: 0x31ca, 0x627: 0x34e0, 0x628: 0x39e7, 0x629: 0x3b76, + 0x62a: 0x39e0, 0x62b: 0x3b6f, 0x62c: 0x39f5, 0x62d: 0x3b84, 0x62e: 0x39ee, 0x62f: 0x3b7d, + 0x630: 0x39fc, 0x631: 0x3b8b, 0x632: 0x3233, 0x633: 0x354e, 0x634: 0x325b, 0x635: 0x357b, + 0x636: 0x3256, 0x637: 0x3571, 0x638: 0x3242, 0x639: 0x355d, + // Block 0x19, offset 0x640 + 0x640: 0x4807, 0x641: 0x480d, 0x642: 0x4921, 0x643: 0x4939, 0x644: 0x4929, 0x645: 0x4941, + 0x646: 0x4931, 0x647: 0x4949, 0x648: 0x47ad, 0x649: 0x47b3, 0x64a: 0x4891, 0x64b: 0x48a9, + 0x64c: 0x4899, 0x64d: 0x48b1, 0x64e: 0x48a1, 0x64f: 0x48b9, 0x650: 0x4819, 0x651: 0x481f, + 0x652: 0x3dbb, 0x653: 0x3dcb, 0x654: 0x3dc3, 0x655: 0x3dd3, + 0x658: 0x47b9, 0x659: 0x47bf, 0x65a: 0x3ceb, 0x65b: 0x3cfb, 0x65c: 0x3cf3, 0x65d: 0x3d03, + 0x660: 0x4831, 0x661: 0x4837, 0x662: 0x4951, 0x663: 0x4969, + 0x664: 0x4959, 0x665: 0x4971, 0x666: 0x4961, 0x667: 0x4979, 0x668: 0x47c5, 0x669: 0x47cb, + 0x66a: 0x48c1, 0x66b: 0x48d9, 0x66c: 0x48c9, 0x66d: 0x48e1, 0x66e: 0x48d1, 0x66f: 0x48e9, + 0x670: 0x4849, 0x671: 0x484f, 0x672: 0x3e1b, 0x673: 0x3e33, 0x674: 0x3e23, 0x675: 0x3e3b, + 0x676: 0x3e2b, 0x677: 0x3e43, 0x678: 0x47d1, 0x679: 0x47d7, 0x67a: 0x3d1b, 0x67b: 0x3d33, + 0x67c: 0x3d23, 0x67d: 0x3d3b, 0x67e: 0x3d2b, 0x67f: 0x3d43, + // Block 0x1a, offset 0x680 + 0x680: 0x4855, 0x681: 0x485b, 0x682: 0x3e4b, 0x683: 0x3e5b, 0x684: 0x3e53, 0x685: 0x3e63, + 0x688: 0x47dd, 0x689: 0x47e3, 0x68a: 0x3d4b, 0x68b: 0x3d5b, + 0x68c: 0x3d53, 0x68d: 0x3d63, 0x690: 0x4867, 0x691: 0x486d, + 0x692: 0x3e83, 0x693: 0x3e9b, 0x694: 0x3e8b, 0x695: 0x3ea3, 0x696: 0x3e93, 0x697: 0x3eab, + 0x699: 0x47e9, 0x69b: 0x3d6b, 0x69d: 0x3d73, + 0x69f: 0x3d7b, 0x6a0: 0x487f, 0x6a1: 0x4885, 0x6a2: 0x4981, 0x6a3: 0x4999, + 0x6a4: 0x4989, 0x6a5: 0x49a1, 0x6a6: 0x4991, 0x6a7: 0x49a9, 0x6a8: 0x47ef, 0x6a9: 0x47f5, + 0x6aa: 0x48f1, 0x6ab: 0x4909, 0x6ac: 0x48f9, 0x6ad: 0x4911, 0x6ae: 0x4901, 0x6af: 0x4919, + 0x6b0: 0x47fb, 0x6b1: 0x4321, 0x6b2: 0x3694, 0x6b3: 0x4327, 0x6b4: 0x4825, 0x6b5: 0x432d, + 0x6b6: 0x36a6, 0x6b7: 0x4333, 0x6b8: 0x36c4, 0x6b9: 0x4339, 0x6ba: 0x36dc, 0x6bb: 0x433f, + 0x6bc: 0x4873, 0x6bd: 0x4345, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3da3, 0x6c1: 0x3dab, 0x6c2: 0x4187, 0x6c3: 0x41a5, 0x6c4: 0x4191, 0x6c5: 0x41af, + 0x6c6: 0x419b, 0x6c7: 0x41b9, 0x6c8: 0x3cdb, 0x6c9: 0x3ce3, 0x6ca: 0x40d3, 0x6cb: 0x40f1, + 0x6cc: 0x40dd, 0x6cd: 0x40fb, 0x6ce: 0x40e7, 0x6cf: 0x4105, 0x6d0: 0x3deb, 0x6d1: 0x3df3, + 0x6d2: 0x41c3, 0x6d3: 0x41e1, 0x6d4: 0x41cd, 0x6d5: 0x41eb, 0x6d6: 0x41d7, 0x6d7: 0x41f5, + 0x6d8: 0x3d0b, 0x6d9: 0x3d13, 0x6da: 0x410f, 0x6db: 0x412d, 0x6dc: 0x4119, 0x6dd: 0x4137, + 0x6de: 0x4123, 0x6df: 0x4141, 0x6e0: 0x3ec3, 0x6e1: 0x3ecb, 0x6e2: 0x41ff, 0x6e3: 0x421d, + 0x6e4: 0x4209, 0x6e5: 0x4227, 0x6e6: 0x4213, 0x6e7: 0x4231, 0x6e8: 0x3d83, 0x6e9: 0x3d8b, + 0x6ea: 0x414b, 0x6eb: 0x4169, 0x6ec: 0x4155, 0x6ed: 0x4173, 0x6ee: 0x415f, 0x6ef: 0x417d, + 0x6f0: 0x3688, 0x6f1: 0x3682, 0x6f2: 0x3d93, 0x6f3: 0x368e, 0x6f4: 0x3d9b, + 0x6f6: 0x4813, 0x6f7: 0x3db3, 0x6f8: 0x35f8, 0x6f9: 0x35f2, 0x6fa: 0x35e6, 0x6fb: 0x42f1, + 0x6fc: 0x35fe, 0x6fd: 0x428a, 0x6fe: 0x01d3, 0x6ff: 0x428a, + // Block 0x1c, offset 0x700 + 0x700: 0x42a3, 0x701: 0x4485, 0x702: 0x3ddb, 0x703: 0x36a0, 0x704: 0x3de3, + 0x706: 0x483d, 0x707: 0x3dfb, 0x708: 0x3604, 0x709: 0x42f7, 0x70a: 0x3610, 0x70b: 0x42fd, + 0x70c: 0x361c, 0x70d: 0x448c, 0x70e: 0x4493, 0x70f: 0x449a, 0x710: 0x36b8, 0x711: 0x36b2, + 0x712: 0x3e03, 0x713: 0x44e7, 0x716: 0x36be, 0x717: 0x3e13, + 0x718: 0x3634, 0x719: 0x362e, 0x71a: 0x3622, 0x71b: 0x4303, 0x71d: 0x44a1, + 0x71e: 0x44a8, 0x71f: 0x44af, 0x720: 0x36ee, 0x721: 0x36e8, 0x722: 0x3e6b, 0x723: 0x44ef, + 0x724: 0x36d0, 0x725: 0x36d6, 0x726: 0x36f4, 0x727: 0x3e7b, 0x728: 0x3664, 0x729: 0x365e, + 0x72a: 0x3652, 0x72b: 0x430f, 0x72c: 0x364c, 0x72d: 0x4477, 0x72e: 0x447e, 0x72f: 0x0081, + 0x732: 0x3eb3, 0x733: 0x36fa, 0x734: 0x3ebb, + 0x736: 0x488b, 0x737: 0x3ed3, 0x738: 0x3640, 0x739: 0x4309, 0x73a: 0x3670, 0x73b: 0x431b, + 0x73c: 0x367c, 0x73d: 0x425d, 0x73e: 0x428f, + // Block 0x1d, offset 0x740 + 0x740: 0x1bdb, 0x741: 0x1bdf, 0x742: 0x0047, 0x743: 0x1c57, 0x745: 0x1beb, + 0x746: 0x1bef, 0x747: 0x00e9, 0x749: 0x1c5b, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x1990, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x19a2, 0x761: 0x1bcb, 0x762: 0x19ab, + 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42d5, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215, + 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b9b, + 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0, + // Block 0x1e, offset 0x780 + 0x780: 0x0463, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x2231, 0x791: 0x223d, + 0x792: 0x22f1, 0x793: 0x2219, 0x794: 0x229d, 0x795: 0x2225, 0x796: 0x22a3, 0x797: 0x22bb, + 0x798: 0x22c7, 0x799: 0x222b, 0x79a: 0x22cd, 0x79b: 0x2237, 0x79c: 0x22c1, 0x79d: 0x22d3, + 0x79e: 0x22d9, 0x79f: 0x1cbf, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba7, 0x7a3: 0x1963, + 0x7a4: 0x006d, 0x7a5: 0x19ae, 0x7a6: 0x1bd3, 0x7a7: 0x1d4b, 0x7a8: 0x1966, 0x7a9: 0x0071, + 0x7aa: 0x19ba, 0x7ab: 0x1bd7, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19e7, 0x7b2: 0x1c1b, 0x7b3: 0x19f0, 0x7b4: 0x00ad, 0x7b5: 0x1a65, + 0x7b6: 0x1c4f, 0x7b7: 0x1d5f, 0x7b8: 0x19f3, 0x7b9: 0x00b1, 0x7ba: 0x1a68, 0x7bb: 0x1c53, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c09, 0x7c3: 0xa000, 0x7c4: 0x3c10, 0x7c5: 0xa000, + 0x7c7: 0x3c17, 0x7c8: 0xa000, 0x7c9: 0x3c1e, + 0x7cd: 0xa000, + 0x7e0: 0x2f68, 0x7e1: 0xa000, 0x7e2: 0x3c2c, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c25, 0x7ee: 0x2f63, 0x7ef: 0x2f6d, + 0x7f0: 0x3c33, 0x7f1: 0x3c3a, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c41, 0x7f5: 0x3c48, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4f, 0x7f9: 0x3c56, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c5d, 0x801: 0x3c64, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c79, 0x805: 0x3c80, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c87, 0x809: 0x3c8e, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3ca3, 0x82d: 0x3caa, 0x82e: 0x3cb1, 0x82f: 0x3cb8, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882, + 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894, + 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a6b, 0x875: 0x1a6f, + 0x876: 0x1a73, 0x877: 0x1a77, 0x878: 0x1a7b, 0x879: 0x1a7f, 0x87a: 0x1a83, 0x87b: 0x1a87, + 0x87c: 0x1a8b, 0x87d: 0x1c83, 0x87e: 0x1c88, 0x87f: 0x1c8d, + // Block 0x22, offset 0x880 + 0x880: 0x1c92, 0x881: 0x1c97, 0x882: 0x1c9c, 0x883: 0x1ca1, 0x884: 0x1ca6, 0x885: 0x1cab, + 0x886: 0x1cb0, 0x887: 0x1cb5, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb, + 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b63, + 0x892: 0x1b67, 0x893: 0x1b6b, 0x894: 0x1b6f, 0x895: 0x1b73, 0x896: 0x1b77, 0x897: 0x1b7b, + 0x898: 0x1b7f, 0x899: 0x1b83, 0x89a: 0x1b87, 0x89b: 0x1b8b, 0x89c: 0x1af7, 0x89d: 0x1afb, + 0x89e: 0x1aff, 0x89f: 0x1b03, 0x8a0: 0x1b07, 0x8a1: 0x1b0b, 0x8a2: 0x1b0f, 0x8a3: 0x1b13, + 0x8a4: 0x1b17, 0x8a5: 0x1b1b, 0x8a6: 0x1b1f, 0x8a7: 0x1b23, 0x8a8: 0x1b27, 0x8a9: 0x1b2b, + 0x8aa: 0x1b2f, 0x8ab: 0x1b33, 0x8ac: 0x1b37, 0x8ad: 0x1b3b, 0x8ae: 0x1b3f, 0x8af: 0x1b43, + 0x8b0: 0x1b47, 0x8b1: 0x1b4b, 0x8b2: 0x1b4f, 0x8b3: 0x1b53, 0x8b4: 0x1b57, 0x8b5: 0x1b5b, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713, + 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab, + 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803, + 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887, + 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db, + 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb, + 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b, + 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7, + 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33, + 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63, + 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f, + // Block 0x24, offset 0x900 + 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb, + 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b, + 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb, + 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3, + 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f, + 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83, + 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7, + 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f, + 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf, + 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f, + 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187, + // Block 0x25, offset 0x940 + 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3, + 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb, + 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b, + 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b, + 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf, + 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f, + 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f, + 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503, + 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f, + 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f, + 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f0b, 0x98d: 0xa000, 0x98e: 0x3f13, 0x98f: 0xa000, 0x990: 0x3f1b, 0x991: 0xa000, + 0x992: 0x3f23, 0x993: 0xa000, 0x994: 0x3f2b, 0x995: 0xa000, 0x996: 0x3f33, 0x997: 0xa000, + 0x998: 0x3f3b, 0x999: 0xa000, 0x99a: 0x3f43, 0x99b: 0xa000, 0x99c: 0x3f4b, 0x99d: 0xa000, + 0x99e: 0x3f53, 0x99f: 0xa000, 0x9a0: 0x3f5b, 0x9a1: 0xa000, 0x9a2: 0x3f63, + 0x9a4: 0xa000, 0x9a5: 0x3f6b, 0x9a6: 0xa000, 0x9a7: 0x3f73, 0x9a8: 0xa000, 0x9a9: 0x3f7b, + 0x9af: 0xa000, + 0x9b0: 0x3f83, 0x9b1: 0x3f8b, 0x9b2: 0xa000, 0x9b3: 0x3f93, 0x9b4: 0x3f9b, 0x9b5: 0xa000, + 0x9b6: 0x3fa3, 0x9b7: 0x3fab, 0x9b8: 0xa000, 0x9b9: 0x3fb3, 0x9ba: 0x3fbb, 0x9bb: 0xa000, + 0x9bc: 0x3fc3, 0x9bd: 0x3fcb, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f03, + 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42df, 0x9dc: 0x42e5, 0x9dd: 0xa000, + 0x9de: 0x3fd3, 0x9df: 0x26b7, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3fe3, 0x9ed: 0xa000, 0x9ee: 0x3feb, 0x9ef: 0xa000, + 0x9f0: 0x3ff3, 0x9f1: 0xa000, 0x9f2: 0x3ffb, 0x9f3: 0xa000, 0x9f4: 0x4003, 0x9f5: 0xa000, + 0x9f6: 0x400b, 0x9f7: 0xa000, 0x9f8: 0x4013, 0x9f9: 0xa000, 0x9fa: 0x401b, 0x9fb: 0xa000, + 0x9fc: 0x4023, 0x9fd: 0xa000, 0x9fe: 0x402b, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4033, 0xa01: 0xa000, 0xa02: 0x403b, 0xa04: 0xa000, 0xa05: 0x4043, + 0xa06: 0xa000, 0xa07: 0x404b, 0xa08: 0xa000, 0xa09: 0x4053, + 0xa0f: 0xa000, 0xa10: 0x405b, 0xa11: 0x4063, + 0xa12: 0xa000, 0xa13: 0x406b, 0xa14: 0x4073, 0xa15: 0xa000, 0xa16: 0x407b, 0xa17: 0x4083, + 0xa18: 0xa000, 0xa19: 0x408b, 0xa1a: 0x4093, 0xa1b: 0xa000, 0xa1c: 0x409b, 0xa1d: 0x40a3, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fdb, + 0xa37: 0x40ab, 0xa38: 0x40b3, 0xa39: 0x40bb, 0xa3a: 0x40c3, + 0xa3d: 0xa000, 0xa3e: 0x40cb, 0xa3f: 0x26cc, + // Block 0x29, offset 0xa40 + 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337, + 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f, + 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49c0, 0xa50: 0x49c6, 0xa51: 0x49cc, + 0xa52: 0x49d2, 0xa53: 0x49d8, 0xa54: 0x49de, 0xa55: 0x49e4, 0xa56: 0x49ea, 0xa57: 0x49f0, + 0xa58: 0x49f6, 0xa59: 0x49fc, 0xa5a: 0x4a02, 0xa5b: 0x4a08, 0xa5c: 0x4a0e, 0xa5d: 0x4a14, + 0xa5e: 0x4a1a, 0xa5f: 0x4a20, 0xa60: 0x4a26, 0xa61: 0x4a2c, 0xa62: 0x4a32, 0xa63: 0x4a38, + 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef, + 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403, + 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383, + 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b, + 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb, + 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7, + 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3, + 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7, + 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff, + 0xa9e: 0x098f, 0xa9f: 0x072f, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2057, 0xac1: 0x205d, 0xac2: 0x2063, 0xac3: 0x2069, 0xac4: 0x206f, 0xac5: 0x2075, + 0xac6: 0x207b, 0xac7: 0x2081, 0xac8: 0x2087, 0xac9: 0x208d, 0xaca: 0x2093, 0xacb: 0x2099, + 0xacc: 0x209f, 0xacd: 0x20a5, 0xace: 0x2729, 0xacf: 0x2732, 0xad0: 0x273b, 0xad1: 0x2744, + 0xad2: 0x274d, 0xad3: 0x2756, 0xad4: 0x275f, 0xad5: 0x2768, 0xad6: 0x2771, 0xad7: 0x2783, + 0xad8: 0x278c, 0xad9: 0x2795, 0xada: 0x279e, 0xadb: 0x27a7, 0xadc: 0x277a, 0xadd: 0x2baf, + 0xade: 0x2af0, 0xae0: 0x20ab, 0xae1: 0x20c3, 0xae2: 0x20b7, 0xae3: 0x210b, + 0xae4: 0x20c9, 0xae5: 0x20e7, 0xae6: 0x20b1, 0xae7: 0x20e1, 0xae8: 0x20bd, 0xae9: 0x20f3, + 0xaea: 0x2123, 0xaeb: 0x2141, 0xaec: 0x213b, 0xaed: 0x212f, 0xaee: 0x217d, 0xaef: 0x2111, + 0xaf0: 0x211d, 0xaf1: 0x2135, 0xaf2: 0x2129, 0xaf3: 0x2153, 0xaf4: 0x20ff, 0xaf5: 0x2147, + 0xaf6: 0x2171, 0xaf7: 0x2159, 0xaf8: 0x20ed, 0xaf9: 0x20cf, 0xafa: 0x2105, 0xafb: 0x2117, + 0xafc: 0x214d, 0xafd: 0x20d5, 0xafe: 0x2177, 0xaff: 0x20f9, + // Block 0x2c, offset 0xb00 + 0xb00: 0x215f, 0xb01: 0x20db, 0xb02: 0x2165, 0xb03: 0x216b, 0xb04: 0x092f, 0xb05: 0x0b03, + 0xb06: 0x0ca7, 0xb07: 0x10c7, + 0xb10: 0x1bc7, 0xb11: 0x18a9, + 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb, + 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3, + 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327, + 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b, + 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e1b, 0xb2f: 0x2e23, + 0xb30: 0x2e2b, 0xb31: 0x2e33, 0xb32: 0x2e3b, 0xb33: 0x2e43, 0xb34: 0x2e4b, 0xb35: 0x2e53, + 0xb36: 0x2e63, 0xb37: 0x2e6b, 0xb38: 0x2e73, 0xb39: 0x2e7b, 0xb3a: 0x2e83, 0xb3b: 0x2e8b, + 0xb3c: 0x2ed6, 0xb3d: 0x2e9e, 0xb3e: 0x2e5b, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af, + 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f, + 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b, + 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f, + 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f, + 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b, + 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f, + 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b, + 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee, + 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900, + 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1cc9, 0xb81: 0x1cd8, 0xb82: 0x1ce7, 0xb83: 0x1cf6, 0xb84: 0x1d05, 0xb85: 0x1d14, + 0xb86: 0x1d23, 0xb87: 0x1d32, 0xb88: 0x1d41, 0xb89: 0x218f, 0xb8a: 0x21a1, 0xb8b: 0x21b3, + 0xb8c: 0x1954, 0xb8d: 0x1c07, 0xb8e: 0x19d5, 0xb8f: 0x1bab, 0xb90: 0x04cb, 0xb91: 0x04d3, + 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7, + 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f, + 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b, + 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543, + 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b, + 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f, + 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597, + 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b0f, 0xbc1: 0x29ab, 0xbc2: 0x2b1f, 0xbc3: 0x2883, 0xbc4: 0x2ee7, 0xbc5: 0x288d, + 0xbc6: 0x2897, 0xbc7: 0x2f2b, 0xbc8: 0x29b8, 0xbc9: 0x28a1, 0xbca: 0x28ab, 0xbcb: 0x28b5, + 0xbcc: 0x29df, 0xbcd: 0x29ec, 0xbce: 0x29c5, 0xbcf: 0x29d2, 0xbd0: 0x2eac, 0xbd1: 0x29f9, + 0xbd2: 0x2a06, 0xbd3: 0x2bc1, 0xbd4: 0x26be, 0xbd5: 0x2bd4, 0xbd6: 0x2be7, 0xbd7: 0x2b2f, + 0xbd8: 0x2a13, 0xbd9: 0x2bfa, 0xbda: 0x2c0d, 0xbdb: 0x2a20, 0xbdc: 0x28bf, 0xbdd: 0x28c9, + 0xbde: 0x2eba, 0xbdf: 0x2a2d, 0xbe0: 0x2b3f, 0xbe1: 0x2ef8, 0xbe2: 0x28d3, 0xbe3: 0x28dd, + 0xbe4: 0x2a3a, 0xbe5: 0x28e7, 0xbe6: 0x28f1, 0xbe7: 0x26d3, 0xbe8: 0x26da, 0xbe9: 0x28fb, + 0xbea: 0x2905, 0xbeb: 0x2c20, 0xbec: 0x2a47, 0xbed: 0x2b4f, 0xbee: 0x2c33, 0xbef: 0x2a54, + 0xbf0: 0x2919, 0xbf1: 0x290f, 0xbf2: 0x2f3f, 0xbf3: 0x2a61, 0xbf4: 0x2c46, 0xbf5: 0x2923, + 0xbf6: 0x2b5f, 0xbf7: 0x292d, 0xbf8: 0x2a7b, 0xbf9: 0x2937, 0xbfa: 0x2a88, 0xbfb: 0x2f09, + 0xbfc: 0x2a6e, 0xbfd: 0x2b6f, 0xbfe: 0x2a95, 0xbff: 0x26e1, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f1a, 0xc01: 0x2941, 0xc02: 0x294b, 0xc03: 0x2aa2, 0xc04: 0x2955, 0xc05: 0x295f, + 0xc06: 0x2969, 0xc07: 0x2b7f, 0xc08: 0x2aaf, 0xc09: 0x26e8, 0xc0a: 0x2c59, 0xc0b: 0x2e93, + 0xc0c: 0x2b8f, 0xc0d: 0x2abc, 0xc0e: 0x2ec8, 0xc0f: 0x2973, 0xc10: 0x297d, 0xc11: 0x2ac9, + 0xc12: 0x26ef, 0xc13: 0x2ad6, 0xc14: 0x2b9f, 0xc15: 0x26f6, 0xc16: 0x2c6c, 0xc17: 0x2987, + 0xc18: 0x1cba, 0xc19: 0x1cce, 0xc1a: 0x1cdd, 0xc1b: 0x1cec, 0xc1c: 0x1cfb, 0xc1d: 0x1d0a, + 0xc1e: 0x1d19, 0xc1f: 0x1d28, 0xc20: 0x1d37, 0xc21: 0x1d46, 0xc22: 0x2195, 0xc23: 0x21a7, + 0xc24: 0x21b9, 0xc25: 0x21c5, 0xc26: 0x21d1, 0xc27: 0x21dd, 0xc28: 0x21e9, 0xc29: 0x21f5, + 0xc2a: 0x2201, 0xc2b: 0x220d, 0xc2c: 0x2249, 0xc2d: 0x2255, 0xc2e: 0x2261, 0xc2f: 0x226d, + 0xc30: 0x2279, 0xc31: 0x1c17, 0xc32: 0x19c9, 0xc33: 0x1936, 0xc34: 0x1be7, 0xc35: 0x1a4a, + 0xc36: 0x1a59, 0xc37: 0x19cf, 0xc38: 0x1bff, 0xc39: 0x1c03, 0xc3a: 0x1960, 0xc3b: 0x2704, + 0xc3c: 0x2712, 0xc3d: 0x26fd, 0xc3e: 0x270b, 0xc3f: 0x2ae3, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a4d, 0xc41: 0x1a35, 0xc42: 0x1c63, 0xc43: 0x1a1d, 0xc44: 0x19f6, 0xc45: 0x1969, + 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf3, 0xc49: 0x1d55, 0xc4a: 0x1a50, 0xc4b: 0x1a38, + 0xc4c: 0x1c67, 0xc4d: 0x1c73, 0xc4e: 0x1a29, 0xc4f: 0x19ff, 0xc50: 0x1957, 0xc51: 0x1c1f, + 0xc52: 0x1bb3, 0xc53: 0x1b9f, 0xc54: 0x1bcf, 0xc55: 0x1c77, 0xc56: 0x1a2c, 0xc57: 0x19cc, + 0xc58: 0x1a02, 0xc59: 0x19e1, 0xc5a: 0x1a44, 0xc5b: 0x1c7b, 0xc5c: 0x1a2f, 0xc5d: 0x19c3, + 0xc5e: 0x1a05, 0xc5f: 0x1c3f, 0xc60: 0x1bf7, 0xc61: 0x1a17, 0xc62: 0x1c27, 0xc63: 0x1c43, + 0xc64: 0x1bfb, 0xc65: 0x1a1a, 0xc66: 0x1c2b, 0xc67: 0x22eb, 0xc68: 0x22ff, 0xc69: 0x1999, + 0xc6a: 0x1c23, 0xc6b: 0x1bb7, 0xc6c: 0x1ba3, 0xc6d: 0x1c4b, 0xc6e: 0x2719, 0xc6f: 0x27b0, + 0xc70: 0x1a5c, 0xc71: 0x1a47, 0xc72: 0x1c7f, 0xc73: 0x1a32, 0xc74: 0x1a53, 0xc75: 0x1a3b, + 0xc76: 0x1c6b, 0xc77: 0x1a20, 0xc78: 0x19f9, 0xc79: 0x1984, 0xc7a: 0x1a56, 0xc7b: 0x1a3e, + 0xc7c: 0x1c6f, 0xc7d: 0x1a23, 0xc7e: 0x19fc, 0xc7f: 0x1987, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c2f, 0xc81: 0x1bbb, 0xc82: 0x1d50, 0xc83: 0x1939, 0xc84: 0x19bd, 0xc85: 0x19c0, + 0xc86: 0x22f8, 0xc87: 0x1b97, 0xc88: 0x19c6, 0xc89: 0x194b, 0xc8a: 0x19e4, 0xc8b: 0x194e, + 0xc8c: 0x19ed, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a08, 0xc90: 0x1a0e, 0xc91: 0x1a11, + 0xc92: 0x1c33, 0xc93: 0x1a14, 0xc94: 0x1a26, 0xc95: 0x1c3b, 0xc96: 0x1c47, 0xc97: 0x1993, + 0xc98: 0x1d5a, 0xc99: 0x1bbf, 0xc9a: 0x1996, 0xc9b: 0x1a5f, 0xc9c: 0x19a8, 0xc9d: 0x19b7, + 0xc9e: 0x22e5, 0xc9f: 0x22df, 0xca0: 0x1cc4, 0xca1: 0x1cd3, 0xca2: 0x1ce2, 0xca3: 0x1cf1, + 0xca4: 0x1d00, 0xca5: 0x1d0f, 0xca6: 0x1d1e, 0xca7: 0x1d2d, 0xca8: 0x1d3c, 0xca9: 0x2189, + 0xcaa: 0x219b, 0xcab: 0x21ad, 0xcac: 0x21bf, 0xcad: 0x21cb, 0xcae: 0x21d7, 0xcaf: 0x21e3, + 0xcb0: 0x21ef, 0xcb1: 0x21fb, 0xcb2: 0x2207, 0xcb3: 0x2243, 0xcb4: 0x224f, 0xcb5: 0x225b, + 0xcb6: 0x2267, 0xcb7: 0x2273, 0xcb8: 0x227f, 0xcb9: 0x2285, 0xcba: 0x228b, 0xcbb: 0x2291, + 0xcbc: 0x2297, 0xcbd: 0x22a9, 0xcbe: 0x22af, 0xcbf: 0x1c13, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb, + 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943, + 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3, + 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43, + 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87, + 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283, + 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f, + 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853, + 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b, + 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b, + 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b, + // Block 0x34, offset 0xd00 + 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b, + 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f, + 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7, + 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127, + 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357, + 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873, + 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3, + 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b, + 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57, + 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb, + 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b, + // Block 0x35, offset 0xd40 + 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f, + 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3, + 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83, + 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193, + 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b, + 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b, + 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f, + 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b, + 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753, + 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777, + 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3, + 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47, + 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af, + 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df, + 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817, + 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3, + 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457, + 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b, + 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27, + 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f, + 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03, + 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27, + 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af, + 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3, + 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb, + 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353, + 0xde5: 0x1407, 0xde6: 0x1433, + 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7, + 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897, + 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93, + 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3, + // Block 0x38, offset 0xe00 + 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b, + 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f, + 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f, + 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f, + 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff, + 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f, + 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f, + 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3, + 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7, + 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963, + 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b, + 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb, + 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf, + 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f, + 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013, + 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f, + 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b, + 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b, + 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb, + 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343, + 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b, + 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b, + 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2, + 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809, + 0xe98: 0x1617, 0xe99: 0x1627, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19d8, 0xec1: 0x19db, 0xec2: 0x19de, 0xec3: 0x1c0b, 0xec4: 0x1c0f, 0xec5: 0x1a62, + 0xec6: 0x1a62, + 0xed3: 0x1d78, 0xed4: 0x1d69, 0xed5: 0x1d6e, 0xed6: 0x1d7d, 0xed7: 0x1d73, + 0xedd: 0x4393, + 0xede: 0x8115, 0xedf: 0x4405, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221, + 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017, + 0xeea: 0x43f3, 0xeeb: 0x43f9, 0xeec: 0x44f7, 0xeed: 0x44ff, 0xeee: 0x434b, 0xeef: 0x4351, + 0xef0: 0x4357, 0xef1: 0x435d, 0xef2: 0x4369, 0xef3: 0x436f, 0xef4: 0x4375, 0xef5: 0x4381, + 0xef6: 0x4387, 0xef8: 0x438d, 0xef9: 0x4399, 0xefa: 0x439f, 0xefb: 0x43a5, + 0xefc: 0x43b1, 0xefe: 0x43b7, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43bd, 0xf01: 0x43c3, 0xf03: 0x43c9, 0xf04: 0x43cf, + 0xf06: 0x43db, 0xf07: 0x43e1, 0xf08: 0x43e7, 0xf09: 0x43ed, 0xf0a: 0x43ff, 0xf0b: 0x437b, + 0xf0c: 0x4363, 0xf0d: 0x43ab, 0xf0e: 0x43d5, 0xf0f: 0x1d82, 0xf10: 0x0299, 0xf11: 0x0299, + 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5, + 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab, + 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8, + 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c, + 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2, + 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1, + 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4, + 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd, + 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9, + 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5, + 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1, + 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de, + 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7, + 0xf64: 0x4471, 0xf65: 0x4471, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed, + 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308, + 0xf70: 0x446b, 0xf71: 0x446b, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6, + 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x2052, + 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc, + 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d, + 0xfaa: 0x25ad, 0xfab: 0x25ad, 0xfac: 0x261d, 0xfad: 0x261d, 0xfae: 0x25ec, 0xfaf: 0x25ec, + 0xfb0: 0x2608, 0xfb1: 0x2608, 0xfb2: 0x2601, 0xfb3: 0x2601, 0xfb4: 0x260f, 0xfb5: 0x260f, + 0xfb6: 0x2616, 0xfb7: 0x2616, 0xfb8: 0x2616, 0xfb9: 0x25f3, 0xfba: 0x25f3, 0xfbb: 0x25f3, + 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b4, 0xfc1: 0x25bb, 0xfc2: 0x25d7, 0xfc3: 0x25f3, 0xfc4: 0x25fa, 0xfc5: 0x1d8c, + 0xfc6: 0x1d91, 0xfc7: 0x1d96, 0xfc8: 0x1da5, 0xfc9: 0x1db4, 0xfca: 0x1db9, 0xfcb: 0x1dbe, + 0xfcc: 0x1dc3, 0xfcd: 0x1dc8, 0xfce: 0x1dd7, 0xfcf: 0x1de6, 0xfd0: 0x1deb, 0xfd1: 0x1df0, + 0xfd2: 0x1dff, 0xfd3: 0x1e0e, 0xfd4: 0x1e13, 0xfd5: 0x1e18, 0xfd6: 0x1e1d, 0xfd7: 0x1e2c, + 0xfd8: 0x1e31, 0xfd9: 0x1e40, 0xfda: 0x1e45, 0xfdb: 0x1e4a, 0xfdc: 0x1e59, 0xfdd: 0x1e5e, + 0xfde: 0x1e63, 0xfdf: 0x1e6d, 0xfe0: 0x1ea9, 0xfe1: 0x1eb8, 0xfe2: 0x1ec7, 0xfe3: 0x1ecc, + 0xfe4: 0x1ed1, 0xfe5: 0x1edb, 0xfe6: 0x1eea, 0xfe7: 0x1eef, 0xfe8: 0x1efe, 0xfe9: 0x1f03, + 0xfea: 0x1f08, 0xfeb: 0x1f17, 0xfec: 0x1f1c, 0xfed: 0x1f2b, 0xfee: 0x1f30, 0xfef: 0x1f35, + 0xff0: 0x1f3a, 0xff1: 0x1f3f, 0xff2: 0x1f44, 0xff3: 0x1f49, 0xff4: 0x1f4e, 0xff5: 0x1f53, + 0xff6: 0x1f58, 0xff7: 0x1f5d, 0xff8: 0x1f62, 0xff9: 0x1f67, 0xffa: 0x1f6c, 0xffb: 0x1f71, + 0xffc: 0x1f76, 0xffd: 0x1f7b, 0xffe: 0x1f80, 0xfff: 0x1f8a, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f8f, 0x1001: 0x1f94, 0x1002: 0x1f99, 0x1003: 0x1fa3, 0x1004: 0x1fa8, 0x1005: 0x1fb2, + 0x1006: 0x1fb7, 0x1007: 0x1fbc, 0x1008: 0x1fc1, 0x1009: 0x1fc6, 0x100a: 0x1fcb, 0x100b: 0x1fd0, + 0x100c: 0x1fd5, 0x100d: 0x1fda, 0x100e: 0x1fe9, 0x100f: 0x1ff8, 0x1010: 0x1ffd, 0x1011: 0x2002, + 0x1012: 0x2007, 0x1013: 0x200c, 0x1014: 0x2011, 0x1015: 0x201b, 0x1016: 0x2020, 0x1017: 0x2025, + 0x1018: 0x2034, 0x1019: 0x2043, 0x101a: 0x2048, 0x101b: 0x4423, 0x101c: 0x4429, 0x101d: 0x445f, + 0x101e: 0x44b6, 0x101f: 0x44bd, 0x1020: 0x44c4, 0x1021: 0x44cb, 0x1022: 0x44d2, 0x1023: 0x44d9, + 0x1024: 0x25c9, 0x1025: 0x25d0, 0x1026: 0x25d7, 0x1027: 0x25de, 0x1028: 0x25f3, 0x1029: 0x25fa, + 0x102a: 0x1d9b, 0x102b: 0x1da0, 0x102c: 0x1da5, 0x102d: 0x1daa, 0x102e: 0x1db4, 0x102f: 0x1db9, + 0x1030: 0x1dcd, 0x1031: 0x1dd2, 0x1032: 0x1dd7, 0x1033: 0x1ddc, 0x1034: 0x1de6, 0x1035: 0x1deb, + 0x1036: 0x1df5, 0x1037: 0x1dfa, 0x1038: 0x1dff, 0x1039: 0x1e04, 0x103a: 0x1e0e, 0x103b: 0x1e13, + 0x103c: 0x1f3f, 0x103d: 0x1f44, 0x103e: 0x1f53, 0x103f: 0x1f58, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f5d, 0x1041: 0x1f71, 0x1042: 0x1f76, 0x1043: 0x1f7b, 0x1044: 0x1f80, 0x1045: 0x1f99, + 0x1046: 0x1fa3, 0x1047: 0x1fa8, 0x1048: 0x1fad, 0x1049: 0x1fc1, 0x104a: 0x1fdf, 0x104b: 0x1fe4, + 0x104c: 0x1fe9, 0x104d: 0x1fee, 0x104e: 0x1ff8, 0x104f: 0x1ffd, 0x1050: 0x445f, 0x1051: 0x202a, + 0x1052: 0x202f, 0x1053: 0x2034, 0x1054: 0x2039, 0x1055: 0x2043, 0x1056: 0x2048, 0x1057: 0x25b4, + 0x1058: 0x25bb, 0x1059: 0x25c2, 0x105a: 0x25d7, 0x105b: 0x25e5, 0x105c: 0x1d8c, 0x105d: 0x1d91, + 0x105e: 0x1d96, 0x105f: 0x1da5, 0x1060: 0x1daf, 0x1061: 0x1dbe, 0x1062: 0x1dc3, 0x1063: 0x1dc8, + 0x1064: 0x1dd7, 0x1065: 0x1de1, 0x1066: 0x1dff, 0x1067: 0x1e18, 0x1068: 0x1e1d, 0x1069: 0x1e2c, + 0x106a: 0x1e31, 0x106b: 0x1e40, 0x106c: 0x1e4a, 0x106d: 0x1e59, 0x106e: 0x1e5e, 0x106f: 0x1e63, + 0x1070: 0x1e6d, 0x1071: 0x1ea9, 0x1072: 0x1eae, 0x1073: 0x1eb8, 0x1074: 0x1ec7, 0x1075: 0x1ecc, + 0x1076: 0x1ed1, 0x1077: 0x1edb, 0x1078: 0x1eea, 0x1079: 0x1efe, 0x107a: 0x1f03, 0x107b: 0x1f08, + 0x107c: 0x1f17, 0x107d: 0x1f1c, 0x107e: 0x1f2b, 0x107f: 0x1f30, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f35, 0x1081: 0x1f3a, 0x1082: 0x1f49, 0x1083: 0x1f4e, 0x1084: 0x1f62, 0x1085: 0x1f67, + 0x1086: 0x1f6c, 0x1087: 0x1f71, 0x1088: 0x1f76, 0x1089: 0x1f8a, 0x108a: 0x1f8f, 0x108b: 0x1f94, + 0x108c: 0x1f99, 0x108d: 0x1f9e, 0x108e: 0x1fb2, 0x108f: 0x1fb7, 0x1090: 0x1fbc, 0x1091: 0x1fc1, + 0x1092: 0x1fd0, 0x1093: 0x1fd5, 0x1094: 0x1fda, 0x1095: 0x1fe9, 0x1096: 0x1ff3, 0x1097: 0x2002, + 0x1098: 0x2007, 0x1099: 0x4453, 0x109a: 0x201b, 0x109b: 0x2020, 0x109c: 0x2025, 0x109d: 0x2034, + 0x109e: 0x203e, 0x109f: 0x25d7, 0x10a0: 0x25e5, 0x10a1: 0x1da5, 0x10a2: 0x1daf, 0x10a3: 0x1dd7, + 0x10a4: 0x1de1, 0x10a5: 0x1dff, 0x10a6: 0x1e09, 0x10a7: 0x1e6d, 0x10a8: 0x1e72, 0x10a9: 0x1e95, + 0x10aa: 0x1e9a, 0x10ab: 0x1f71, 0x10ac: 0x1f76, 0x10ad: 0x1f99, 0x10ae: 0x1fe9, 0x10af: 0x1ff3, + 0x10b0: 0x2034, 0x10b1: 0x203e, 0x10b2: 0x4507, 0x10b3: 0x450f, 0x10b4: 0x4517, 0x10b5: 0x1ef4, + 0x10b6: 0x1ef9, 0x10b7: 0x1f0d, 0x10b8: 0x1f12, 0x10b9: 0x1f21, 0x10ba: 0x1f26, 0x10bb: 0x1e77, + 0x10bc: 0x1e7c, 0x10bd: 0x1e9f, 0x10be: 0x1ea4, 0x10bf: 0x1e36, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e3b, 0x10c1: 0x1e22, 0x10c2: 0x1e27, 0x10c3: 0x1e4f, 0x10c4: 0x1e54, 0x10c5: 0x1ebd, + 0x10c6: 0x1ec2, 0x10c7: 0x1ee0, 0x10c8: 0x1ee5, 0x10c9: 0x1e81, 0x10ca: 0x1e86, 0x10cb: 0x1e8b, + 0x10cc: 0x1e95, 0x10cd: 0x1e90, 0x10ce: 0x1e68, 0x10cf: 0x1eb3, 0x10d0: 0x1ed6, 0x10d1: 0x1ef4, + 0x10d2: 0x1ef9, 0x10d3: 0x1f0d, 0x10d4: 0x1f12, 0x10d5: 0x1f21, 0x10d6: 0x1f26, 0x10d7: 0x1e77, + 0x10d8: 0x1e7c, 0x10d9: 0x1e9f, 0x10da: 0x1ea4, 0x10db: 0x1e36, 0x10dc: 0x1e3b, 0x10dd: 0x1e22, + 0x10de: 0x1e27, 0x10df: 0x1e4f, 0x10e0: 0x1e54, 0x10e1: 0x1ebd, 0x10e2: 0x1ec2, 0x10e3: 0x1ee0, + 0x10e4: 0x1ee5, 0x10e5: 0x1e81, 0x10e6: 0x1e86, 0x10e7: 0x1e8b, 0x10e8: 0x1e95, 0x10e9: 0x1e90, + 0x10ea: 0x1e68, 0x10eb: 0x1eb3, 0x10ec: 0x1ed6, 0x10ed: 0x1e81, 0x10ee: 0x1e86, 0x10ef: 0x1e8b, + 0x10f0: 0x1e95, 0x10f1: 0x1e72, 0x10f2: 0x1e9a, 0x10f3: 0x1eef, 0x10f4: 0x1e59, 0x10f5: 0x1e5e, + 0x10f6: 0x1e63, 0x10f7: 0x1e81, 0x10f8: 0x1e86, 0x10f9: 0x1e8b, 0x10fa: 0x1eef, 0x10fb: 0x1efe, + 0x10fc: 0x440b, 0x10fd: 0x440b, + // Block 0x44, offset 0x1100 + 0x1110: 0x2314, 0x1111: 0x2329, + 0x1112: 0x2329, 0x1113: 0x2330, 0x1114: 0x2337, 0x1115: 0x234c, 0x1116: 0x2353, 0x1117: 0x235a, + 0x1118: 0x237d, 0x1119: 0x237d, 0x111a: 0x23a0, 0x111b: 0x2399, 0x111c: 0x23b5, 0x111d: 0x23a7, + 0x111e: 0x23ae, 0x111f: 0x23d1, 0x1120: 0x23d1, 0x1121: 0x23ca, 0x1122: 0x23d8, 0x1123: 0x23d8, + 0x1124: 0x2402, 0x1125: 0x2402, 0x1126: 0x241e, 0x1127: 0x23e6, 0x1128: 0x23e6, 0x1129: 0x23df, + 0x112a: 0x23f4, 0x112b: 0x23f4, 0x112c: 0x23fb, 0x112d: 0x23fb, 0x112e: 0x2425, 0x112f: 0x2433, + 0x1130: 0x2433, 0x1131: 0x243a, 0x1132: 0x243a, 0x1133: 0x2441, 0x1134: 0x2448, 0x1135: 0x244f, + 0x1136: 0x2456, 0x1137: 0x2456, 0x1138: 0x245d, 0x1139: 0x246b, 0x113a: 0x2479, 0x113b: 0x2472, + 0x113c: 0x2480, 0x113d: 0x2480, 0x113e: 0x2495, 0x113f: 0x249c, + // Block 0x45, offset 0x1140 + 0x1140: 0x24cd, 0x1141: 0x24db, 0x1142: 0x24d4, 0x1143: 0x24b8, 0x1144: 0x24b8, 0x1145: 0x24e2, + 0x1146: 0x24e2, 0x1147: 0x24e9, 0x1148: 0x24e9, 0x1149: 0x2513, 0x114a: 0x251a, 0x114b: 0x2521, + 0x114c: 0x24f7, 0x114d: 0x2505, 0x114e: 0x2528, 0x114f: 0x252f, + 0x1152: 0x24fe, 0x1153: 0x2583, 0x1154: 0x258a, 0x1155: 0x2560, 0x1156: 0x2567, 0x1157: 0x254b, + 0x1158: 0x254b, 0x1159: 0x2552, 0x115a: 0x257c, 0x115b: 0x2575, 0x115c: 0x259f, 0x115d: 0x259f, + 0x115e: 0x230d, 0x115f: 0x2322, 0x1160: 0x231b, 0x1161: 0x2345, 0x1162: 0x233e, 0x1163: 0x2368, + 0x1164: 0x2361, 0x1165: 0x238b, 0x1166: 0x236f, 0x1167: 0x2384, 0x1168: 0x23bc, 0x1169: 0x2409, + 0x116a: 0x23ed, 0x116b: 0x242c, 0x116c: 0x24c6, 0x116d: 0x24f0, 0x116e: 0x2598, 0x116f: 0x2591, + 0x1170: 0x25a6, 0x1171: 0x253d, 0x1172: 0x24a3, 0x1173: 0x256e, 0x1174: 0x2495, 0x1175: 0x24cd, + 0x1176: 0x2464, 0x1177: 0x24b1, 0x1178: 0x2544, 0x1179: 0x2536, 0x117a: 0x24bf, 0x117b: 0x24aa, + 0x117c: 0x24bf, 0x117d: 0x2544, 0x117e: 0x2376, 0x117f: 0x2392, + // Block 0x46, offset 0x1180 + 0x1180: 0x250c, 0x1181: 0x2487, 0x1182: 0x2306, 0x1183: 0x24aa, 0x1184: 0x244f, 0x1185: 0x241e, + 0x1186: 0x23c3, 0x1187: 0x2559, + 0x11b0: 0x2417, 0x11b1: 0x248e, 0x11b2: 0x27c2, 0x11b3: 0x27b9, 0x11b4: 0x27ef, 0x11b5: 0x27dd, + 0x11b6: 0x27cb, 0x11b7: 0x27e6, 0x11b8: 0x27f8, 0x11b9: 0x2410, 0x11ba: 0x2c7f, 0x11bb: 0x2aff, + 0x11bc: 0x27d4, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf, + 0x11d8: 0x04c3, 0x11d9: 0x1b5f, + 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132, + 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d, + 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132, + 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab, + 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b, + // Block 0x48, offset 0x1200 + 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x426c, 0x120a: 0x426c, 0x120b: 0x426c, + 0x120c: 0x426c, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7, + 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42ad, 0x1231: 0x442f, 0x1232: 0x42b2, 0x1234: 0x42b7, + 0x1236: 0x42bc, 0x1237: 0x4435, 0x1238: 0x42c1, 0x1239: 0x443b, 0x123a: 0x42c6, 0x123b: 0x4441, + 0x123c: 0x42cb, 0x123d: 0x4447, 0x123e: 0x42d0, 0x123f: 0x444d, + // Block 0x49, offset 0x1240 + 0x1240: 0x0236, 0x1241: 0x4411, 0x1242: 0x4411, 0x1243: 0x4417, 0x1244: 0x4417, 0x1245: 0x4459, + 0x1246: 0x4459, 0x1247: 0x441d, 0x1248: 0x441d, 0x1249: 0x4465, 0x124a: 0x4465, 0x124b: 0x4465, + 0x124c: 0x4465, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c, + 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242, + 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248, + 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b, + 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251, + 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a, + 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260, + 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263, + 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c, + 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f, + 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275, + 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278, + 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e, + 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281, + 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287, + 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d, + 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e03, + 0x12b6: 0x2e03, 0x12b7: 0x2e0b, 0x12b8: 0x2e0b, 0x12b9: 0x2e13, 0x12ba: 0x2e13, 0x12bb: 0x1f85, + 0x12bc: 0x1f85, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f, + 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7, + 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f, + 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb, + 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503, + 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f, + 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547, + 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f, + 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583, + 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7, + 0x131e: 0x4a7b, 0x131f: 0x4a81, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3e, + 0x1324: 0x031b, 0x1325: 0x4a44, 0x1326: 0x4a4a, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327, + 0x132a: 0x4a50, 0x132b: 0x4a56, 0x132c: 0x4a5c, 0x132d: 0x4a62, 0x132e: 0x4a68, 0x132f: 0x4a6e, + 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337, + 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f, + 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49c0, 0x1343: 0x49c6, 0x1344: 0x49cc, 0x1345: 0x49d2, + 0x1346: 0x49d8, 0x1347: 0x49de, 0x134a: 0x49e4, 0x134b: 0x49ea, + 0x134c: 0x49f0, 0x134d: 0x49f6, 0x134e: 0x49fc, 0x134f: 0x4a02, + 0x1352: 0x4a08, 0x1353: 0x4a0e, 0x1354: 0x4a14, 0x1355: 0x4a1a, 0x1356: 0x4a20, 0x1357: 0x4a26, + 0x135a: 0x4a2c, 0x135b: 0x4a32, 0x135c: 0x4a38, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4267, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b, + 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176, + 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188, + 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a, + 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9, + 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0, + 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4, + 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1442: 0x0248, + 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e, + 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4, + 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248, + 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + 0x147c: 0x0293, 0x147e: 0x02cc, + // Block 0x52, offset 0x1480 + 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a, + 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e, + 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263, + 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e, + 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272, + 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251, + 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290, + 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f, + 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242, + 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8, + 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927, + 0x14d0: 0x1a8f, 0x14d1: 0x1a93, + 0x14d2: 0x1a97, 0x14d3: 0x1a9b, 0x14d4: 0x1a9f, 0x14d5: 0x1aa3, 0x14d6: 0x1aa7, 0x14d7: 0x1aab, + 0x14d8: 0x1aaf, 0x14d9: 0x1ab3, 0x14da: 0x1ab7, 0x14db: 0x1abb, 0x14dc: 0x1abf, 0x14dd: 0x1ac3, + 0x14de: 0x1ac7, 0x14df: 0x1acb, 0x14e0: 0x1acf, 0x14e1: 0x1ad3, 0x14e2: 0x1ad7, 0x14e3: 0x1adb, + 0x14e4: 0x1adf, 0x14e5: 0x1ae3, 0x14e6: 0x1ae7, 0x14e7: 0x1aeb, 0x14e8: 0x1aef, 0x14e9: 0x1af3, + 0x14ea: 0x2721, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b4, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26b0, 0x1501: 0x26c5, 0x1502: 0x0503, + 0x1510: 0x0c0f, 0x1511: 0x0a47, + 0x1512: 0x08d3, 0x1513: 0x45c7, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff, + 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b, + 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b, + 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf, + 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b, + 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43, + 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757, + 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773, + 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3, + 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf, + 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff, + 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f, + 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867, + 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af, + 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93, + 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3, + 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917, + // Block 0x56, offset 0x1580 + 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f, + 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983, + 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf, + 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3, + 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef, + 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23, + 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37, + 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63, + 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f, + 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692, + 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb, + 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f, + 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6, + 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9, + 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83, + 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3, + 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf, + 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7, + 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f, + 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b, + 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87, + 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb, + 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7, + 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663, + 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd, + 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7, + 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b, + 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f, + 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7, + 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700, + 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23, + 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53, + 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714, + 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b, + 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719, + 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728, + 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37, + 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57, + 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737, + 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741, + 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff, + 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637, + 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f, + 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093, + 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782, + 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3, + 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7, + 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133, + 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa, + 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4, + 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7, + 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7, + 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b, + 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd, + 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f, + 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f, + 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273, + 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677, + 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7, + 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb, + 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5, + 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa, + 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b, + 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7, + 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665, + 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f, + 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477, + 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693, + 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb, + 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b, + 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567, + 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7, + 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7, + 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef, + 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 2816 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + 0x374: 0xcb, + 0x37d: 0xcc, + // Block 0xe, offset 0x380 + 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0, + 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4, + 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9, + 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc, + 0x3a0: 0xdd, 0x3a7: 0xde, + 0x3a8: 0xdf, 0x3a9: 0xe0, 0x3aa: 0xe1, + 0x3b0: 0xda, 0x3b5: 0xe2, 0x3b6: 0xe3, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe4, 0x3ec: 0xe5, + // Block 0x10, offset 0x400 + 0x432: 0xe6, + // Block 0x11, offset 0x440 + 0x445: 0xe7, 0x446: 0xe8, 0x447: 0xe9, + 0x449: 0xea, + 0x450: 0xeb, 0x451: 0xec, 0x452: 0xed, 0x453: 0xee, 0x454: 0xef, 0x455: 0xf0, 0x456: 0xf1, 0x457: 0xf2, + 0x458: 0xf3, 0x459: 0xf4, 0x45a: 0x4c, 0x45b: 0xf5, 0x45c: 0xf6, 0x45d: 0xf7, 0x45e: 0xf8, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xf9, 0x484: 0xe5, + 0x48b: 0xfa, + 0x4a3: 0xfb, 0x4a5: 0xfc, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0xfd, 0x4c6: 0xfe, + 0x4c8: 0x52, 0x4c9: 0xff, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 164 entries, 328 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xdf, 0xe3, 0xe9, 0xfa, 0x106, 0x108, 0x10e, 0x110, 0x112, 0x114, 0x116, 0x118, 0x11a, 0x11c, 0x11f, 0x122, 0x124, 0x127, 0x12a, 0x12e, 0x133, 0x13c, 0x13e, 0x141, 0x143, 0x14e, 0x159, 0x167, 0x175, 0x185, 0x193, 0x19a, 0x1a0, 0x1af, 0x1b3, 0x1b5, 0x1b9, 0x1bb, 0x1be, 0x1c0, 0x1c3, 0x1c5, 0x1c8, 0x1ca, 0x1cc, 0x1ce, 0x1da, 0x1e4, 0x1ee, 0x1f1, 0x1f5, 0x1f7, 0x1f9, 0x1fb, 0x1fd, 0x200, 0x202, 0x204, 0x206, 0x208, 0x20e, 0x211, 0x215, 0x217, 0x21e, 0x224, 0x22a, 0x232, 0x238, 0x23e, 0x244, 0x248, 0x24a, 0x24c, 0x24e, 0x250, 0x256, 0x259, 0x25b, 0x261, 0x264, 0x26c, 0x273, 0x276, 0x279, 0x27b, 0x27e, 0x286, 0x28a, 0x291, 0x294, 0x29a, 0x29c, 0x29e, 0x2a1, 0x2a3, 0x2a6, 0x2a8, 0x2aa, 0x2ac, 0x2ae, 0x2b1, 0x2b3, 0x2b5, 0x2b7, 0x2b9, 0x2c6, 0x2d0, 0x2d2, 0x2d4, 0x2d8, 0x2dd, 0x2e9, 0x2ee, 0x2f7, 0x2fd, 0x302, 0x306, 0x30b, 0x30f, 0x31f, 0x32d, 0x33b, 0x349, 0x34f, 0x351, 0x353, 0x356, 0x361, 0x363} + +// nfkcSparseValues: 877 entries, 3508 bytes +var nfkcSparseValues = [877]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x427b, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4267, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425d, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4294, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221f, lo: 0xbc, hi: 0xbc}, + {value: 0x2213, lo: 0xbd, hi: 0xbd}, + {value: 0x22b5, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e5, lo: 0xa0, hi: 0xa1}, + {value: 0x4717, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x4271, lo: 0x98, hi: 0x98}, + {value: 0x4276, lo: 0x99, hi: 0x9a}, + {value: 0x4299, lo: 0x9b, hi: 0x9b}, + {value: 0x4262, lo: 0x9c, hi: 0x9c}, + {value: 0x4285, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a8, lo: 0x90, hi: 0x90}, + {value: 0x37b4, lo: 0x91, hi: 0x91}, + {value: 0x37a2, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x381a, lo: 0x97, hi: 0x97}, + {value: 0x37e4, lo: 0x9c, hi: 0x9c}, + {value: 0x37cc, lo: 0x9d, hi: 0x9d}, + {value: 0x37f6, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x3820, lo: 0xb6, hi: 0xb6}, + {value: 0x3826, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3844, lo: 0xa2, hi: 0xa2}, + {value: 0x384a, lo: 0xa3, hi: 0xa3}, + {value: 0x3856, lo: 0xa4, hi: 0xa4}, + {value: 0x3850, lo: 0xa5, hi: 0xa5}, + {value: 0x385c, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386e, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x3862, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3868, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3edb, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee3, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3eeb, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451f, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2ca1, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455f, lo: 0x9c, hi: 0x9d}, + {value: 0x456f, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x4597, lo: 0xb3, hi: 0xb3}, + {value: 0x459f, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4577, lo: 0x99, hi: 0x9b}, + {value: 0x458f, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb9, lo: 0x88, hi: 0x88}, + {value: 0x2cb1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cc1, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a7, lo: 0x9c, hi: 0x9c}, + {value: 0x45af, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc9, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cd1, lo: 0x8a, hi: 0x8a}, + {value: 0x2ce1, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd9, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef3, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce9, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cf1, lo: 0x87, hi: 0x87}, + {value: 0x2cf9, lo: 0x88, hi: 0x88}, + {value: 0x2f53, lo: 0x8a, hi: 0x8a}, + {value: 0x2ddb, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2d01, lo: 0x8a, hi: 0x8a}, + {value: 0x2d11, lo: 0x8b, hi: 0x8b}, + {value: 0x2d09, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6be7, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3efb, lo: 0x9a, hi: 0x9a}, + {value: 0x2f5b, lo: 0x9c, hi: 0x9c}, + {value: 0x2de6, lo: 0x9d, hi: 0x9d}, + {value: 0x2d19, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2624, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x03}, + {value: 0x2639, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x22, offset 0xdf + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x262b, lo: 0x9c, hi: 0x9c}, + {value: 0x2632, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe3 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe9 + {value: 0x0000, lo: 0x10}, + {value: 0x2647, lo: 0x83, hi: 0x83}, + {value: 0x264e, lo: 0x8d, hi: 0x8d}, + {value: 0x2655, lo: 0x92, hi: 0x92}, + {value: 0x265c, lo: 0x97, hi: 0x97}, + {value: 0x2663, lo: 0x9c, hi: 0x9c}, + {value: 0x2640, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a87, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a90, lo: 0xb5, hi: 0xb5}, + {value: 0x45b7, lo: 0xb6, hi: 0xb6}, + {value: 0x45f7, lo: 0xb7, hi: 0xb7}, + {value: 0x45bf, lo: 0xb8, hi: 0xb8}, + {value: 0x4602, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xfa + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a99, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x2671, lo: 0x93, hi: 0x93}, + {value: 0x2678, lo: 0x9d, hi: 0x9d}, + {value: 0x267f, lo: 0xa2, hi: 0xa2}, + {value: 0x2686, lo: 0xa7, hi: 0xa7}, + {value: 0x268d, lo: 0xac, hi: 0xac}, + {value: 0x266a, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x106 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x108 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d21, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10e + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x110 + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x112 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x114 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x116 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x118 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x11a + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11c + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11f + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x122 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x124 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x127 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x12a + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x133 + {value: 0x0000, lo: 0x08}, + {value: 0x2d69, lo: 0x80, hi: 0x80}, + {value: 0x2d71, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d79, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x13c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x13e + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x141 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x143 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x14e + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x159 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429e, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5f, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2694, lo: 0xb3, hi: 0xb3}, + {value: 0x2801, lo: 0xb4, hi: 0xb4}, + {value: 0x269b, lo: 0xb6, hi: 0xb6}, + {value: 0x280b, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x426c, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x167 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x2991, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x175 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x199c, lo: 0xa8, hi: 0xa8}, + // Block 0x3f, offset 0x185 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x40, offset 0x193 + {value: 0x0007, lo: 0x06}, + {value: 0x2183, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bbc, lo: 0x9a, hi: 0x9b}, + {value: 0x3bca, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x19a + {value: 0x000e, lo: 0x05}, + {value: 0x3bd1, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd8, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x1a0 + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be6, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bed, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf4, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bfb, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3c02, lo: 0xa6, hi: 0xa6}, + {value: 0x26a2, lo: 0xac, hi: 0xad}, + {value: 0x26a9, lo: 0xaf, hi: 0xaf}, + {value: 0x281f, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x1af + {value: 0x0007, lo: 0x03}, + {value: 0x3c6b, lo: 0xa0, hi: 0xa1}, + {value: 0x3c95, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbf, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x1b3 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x1b5 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x46, offset 0x1b9 + {value: 0x0000, lo: 0x01}, + {value: 0x299e, lo: 0x8c, hi: 0x8c}, + // Block 0x47, offset 0x1bb + {value: 0x0266, lo: 0x02}, + {value: 0x1b8f, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x48, offset 0x1be + {value: 0x0000, lo: 0x01}, + {value: 0x44e0, lo: 0x9c, hi: 0x9c}, + // Block 0x49, offset 0x1c0 + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4a, offset 0x1c3 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4b, offset 0x1c5 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4c, offset 0x1c8 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4d, offset 0x1ca + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4e, offset 0x1cc + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x4f, offset 0x1ce + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x50, offset 0x1da + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x51, offset 0x1e4 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3e, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a44, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a50, lo: 0xba, hi: 0xbf}, + // Block 0x52, offset 0x1ee + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x53, offset 0x1f1 + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x54, offset 0x1f5 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x55, offset 0x1f7 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x56, offset 0x1f9 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x57, offset 0x1fb + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x58, offset 0x1fd + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x59, offset 0x200 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5a, offset 0x202 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5b, offset 0x204 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5c, offset 0x206 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5d, offset 0x208 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x20e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5f, offset 0x211 + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x60, offset 0x215 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x61, offset 0x217 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x62, offset 0x21e + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x63, offset 0x224 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x64, offset 0x22a + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x65, offset 0x232 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x66, offset 0x238 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x67, offset 0x23e + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x68, offset 0x244 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x69, offset 0x248 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6a, offset 0x24a + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6b, offset 0x24c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6c, offset 0x24e + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6d, offset 0x250 + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x256 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x6f, offset 0x259 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x70, offset 0x25b + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x71, offset 0x261 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x264 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x423b, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4245, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424f, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x26c + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d81, lo: 0xae, hi: 0xae}, + {value: 0x2d8b, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x273 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x276 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x279 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x27b + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x78, offset 0x27e + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d95, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9f, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x79, offset 0x286 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7a, offset 0x28a + {value: 0x6b57, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db3, lo: 0xbb, hi: 0xbb}, + {value: 0x2da9, lo: 0xbc, hi: 0xbd}, + {value: 0x2dbd, lo: 0xbe, hi: 0xbe}, + // Block 0x7b, offset 0x291 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7c, offset 0x294 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc7, lo: 0xba, hi: 0xba}, + {value: 0x2dd1, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x29a + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7e, offset 0x29c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x29e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x80, offset 0x2a1 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x81, offset 0x2a3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x82, offset 0x2a6 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + // Block 0x83, offset 0x2a8 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x84, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x85, offset 0x2ac + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x86, offset 0x2ae + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x87, offset 0x2b1 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x88, offset 0x2b3 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x89, offset 0x2b5 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8a, offset 0x2b7 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8b, offset 0x2b9 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cf, lo: 0x9e, hi: 0x9e}, + {value: 0x45d9, lo: 0x9f, hi: 0x9f}, + {value: 0x460d, lo: 0xa0, hi: 0xa0}, + {value: 0x461b, lo: 0xa1, hi: 0xa1}, + {value: 0x4629, lo: 0xa2, hi: 0xa2}, + {value: 0x4637, lo: 0xa3, hi: 0xa3}, + {value: 0x4645, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8c, offset 0x2c6 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e3, lo: 0xbb, hi: 0xbb}, + {value: 0x45ed, lo: 0xbc, hi: 0xbc}, + {value: 0x4653, lo: 0xbd, hi: 0xbd}, + {value: 0x466f, lo: 0xbe, hi: 0xbe}, + {value: 0x4661, lo: 0xbf, hi: 0xbf}, + // Block 0x8d, offset 0x2d0 + {value: 0x0000, lo: 0x01}, + {value: 0x467d, lo: 0x80, hi: 0x80}, + // Block 0x8e, offset 0x2d2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8f, offset 0x2d4 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x90, offset 0x2d8 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x91, offset 0x2dd + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x92, offset 0x2e9 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x93, offset 0x2ee + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x94, offset 0x2f7 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x95, offset 0x2fd + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x96, offset 0x302 + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x97, offset 0x306 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x98, offset 0x30b + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x30f + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x9a, offset 0x31f + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x9b, offset 0x32d + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x33b + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x9d, offset 0x349 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x9e, offset 0x34f + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xac, hi: 0xaf}, + // Block 0x9f, offset 0x351 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0xa0, offset 0x353 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0xa1, offset 0x356 + {value: 0x0002, lo: 0x0a}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1984, lo: 0x8b, hi: 0x8b}, + {value: 0x199f, lo: 0x8c, hi: 0x8c}, + {value: 0x19a5, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc3, lo: 0x8e, hi: 0x8e}, + {value: 0x19b1, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + {value: 0x1981, lo: 0xac, hi: 0xac}, + // Block 0xa2, offset 0x361 + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0xa3, offset 0x363 + {value: 0x0028, lo: 0x09}, + {value: 0x2865, lo: 0x80, hi: 0x80}, + {value: 0x2829, lo: 0x81, hi: 0x81}, + {value: 0x2833, lo: 0x82, hi: 0x82}, + {value: 0x2847, lo: 0x83, hi: 0x84}, + {value: 0x2851, lo: 0x85, hi: 0x86}, + {value: 0x283d, lo: 0x87, hi: 0x87}, + {value: 0x285b, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 55KB (55977 bytes) diff --git a/vendor/golang.org/x/text/width/tables11.0.0.go b/vendor/golang.org/x/text/width/tables11.0.0.go index d6def0e7b..3c75e428f 100644 --- a/vendor/golang.org/x/text/width/tables11.0.0.go +++ b/vendor/golang.org/x/text/width/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13 +// +build go1.13,!go1.14 package width diff --git a/vendor/golang.org/x/text/width/tables12.0.0.go b/vendor/golang.org/x/text/width/tables12.0.0.go new file mode 100644 index 000000000..5c859677a --- /dev/null +++ b/vendor/golang.org/x/text/width/tables12.0.0.go @@ -0,0 +1,1350 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.14 + +package width + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "12.0.0" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *widthTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return widthValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := widthIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = widthIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = widthIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *widthTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return widthValues[c0] + } + i := widthIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = widthIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = widthIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// widthTrie. Total size: 14720 bytes (14.38 KiB). Checksum: 3f4f2516ded5489b. +type widthTrie struct{} + +func newWidthTrie(i int) *widthTrie { + return &widthTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *widthTrie) lookupValue(n uint32, b byte) uint16 { + switch { + default: + return uint16(widthValues[n<<6+uint32(b)]) + } +} + +// widthValues: 104 blocks, 6656 entries, 13312 bytes +// The third block is the zero block. +var widthValues = [6656]uint16{ + // Block 0x0, offset 0x0 + 0x20: 0x6001, 0x21: 0x6002, 0x22: 0x6002, 0x23: 0x6002, + 0x24: 0x6002, 0x25: 0x6002, 0x26: 0x6002, 0x27: 0x6002, 0x28: 0x6002, 0x29: 0x6002, + 0x2a: 0x6002, 0x2b: 0x6002, 0x2c: 0x6002, 0x2d: 0x6002, 0x2e: 0x6002, 0x2f: 0x6002, + 0x30: 0x6002, 0x31: 0x6002, 0x32: 0x6002, 0x33: 0x6002, 0x34: 0x6002, 0x35: 0x6002, + 0x36: 0x6002, 0x37: 0x6002, 0x38: 0x6002, 0x39: 0x6002, 0x3a: 0x6002, 0x3b: 0x6002, + 0x3c: 0x6002, 0x3d: 0x6002, 0x3e: 0x6002, 0x3f: 0x6002, + // Block 0x1, offset 0x40 + 0x40: 0x6003, 0x41: 0x6003, 0x42: 0x6003, 0x43: 0x6003, 0x44: 0x6003, 0x45: 0x6003, + 0x46: 0x6003, 0x47: 0x6003, 0x48: 0x6003, 0x49: 0x6003, 0x4a: 0x6003, 0x4b: 0x6003, + 0x4c: 0x6003, 0x4d: 0x6003, 0x4e: 0x6003, 0x4f: 0x6003, 0x50: 0x6003, 0x51: 0x6003, + 0x52: 0x6003, 0x53: 0x6003, 0x54: 0x6003, 0x55: 0x6003, 0x56: 0x6003, 0x57: 0x6003, + 0x58: 0x6003, 0x59: 0x6003, 0x5a: 0x6003, 0x5b: 0x6003, 0x5c: 0x6003, 0x5d: 0x6003, + 0x5e: 0x6003, 0x5f: 0x6003, 0x60: 0x6004, 0x61: 0x6004, 0x62: 0x6004, 0x63: 0x6004, + 0x64: 0x6004, 0x65: 0x6004, 0x66: 0x6004, 0x67: 0x6004, 0x68: 0x6004, 0x69: 0x6004, + 0x6a: 0x6004, 0x6b: 0x6004, 0x6c: 0x6004, 0x6d: 0x6004, 0x6e: 0x6004, 0x6f: 0x6004, + 0x70: 0x6004, 0x71: 0x6004, 0x72: 0x6004, 0x73: 0x6004, 0x74: 0x6004, 0x75: 0x6004, + 0x76: 0x6004, 0x77: 0x6004, 0x78: 0x6004, 0x79: 0x6004, 0x7a: 0x6004, 0x7b: 0x6004, + 0x7c: 0x6004, 0x7d: 0x6004, 0x7e: 0x6004, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xe1: 0x2000, 0xe2: 0x6005, 0xe3: 0x6005, + 0xe4: 0x2000, 0xe5: 0x6006, 0xe6: 0x6005, 0xe7: 0x2000, 0xe8: 0x2000, + 0xea: 0x2000, 0xec: 0x6007, 0xed: 0x2000, 0xee: 0x2000, 0xef: 0x6008, + 0xf0: 0x2000, 0xf1: 0x2000, 0xf2: 0x2000, 0xf3: 0x2000, 0xf4: 0x2000, + 0xf6: 0x2000, 0xf7: 0x2000, 0xf8: 0x2000, 0xf9: 0x2000, 0xfa: 0x2000, + 0xfc: 0x2000, 0xfd: 0x2000, 0xfe: 0x2000, 0xff: 0x2000, + // Block 0x4, offset 0x100 + 0x106: 0x2000, + 0x110: 0x2000, + 0x117: 0x2000, + 0x118: 0x2000, + 0x11e: 0x2000, 0x11f: 0x2000, 0x120: 0x2000, 0x121: 0x2000, + 0x126: 0x2000, 0x128: 0x2000, 0x129: 0x2000, + 0x12a: 0x2000, 0x12c: 0x2000, 0x12d: 0x2000, + 0x130: 0x2000, 0x132: 0x2000, 0x133: 0x2000, + 0x137: 0x2000, 0x138: 0x2000, 0x139: 0x2000, 0x13a: 0x2000, + 0x13c: 0x2000, 0x13e: 0x2000, + // Block 0x5, offset 0x140 + 0x141: 0x2000, + 0x151: 0x2000, + 0x153: 0x2000, + 0x15b: 0x2000, + 0x166: 0x2000, 0x167: 0x2000, + 0x16b: 0x2000, + 0x171: 0x2000, 0x172: 0x2000, 0x173: 0x2000, + 0x178: 0x2000, + 0x17f: 0x2000, + // Block 0x6, offset 0x180 + 0x180: 0x2000, 0x181: 0x2000, 0x182: 0x2000, 0x184: 0x2000, + 0x188: 0x2000, 0x189: 0x2000, 0x18a: 0x2000, 0x18b: 0x2000, + 0x18d: 0x2000, + 0x192: 0x2000, 0x193: 0x2000, + 0x1a6: 0x2000, 0x1a7: 0x2000, + 0x1ab: 0x2000, + // Block 0x7, offset 0x1c0 + 0x1ce: 0x2000, 0x1d0: 0x2000, + 0x1d2: 0x2000, 0x1d4: 0x2000, 0x1d6: 0x2000, + 0x1d8: 0x2000, 0x1da: 0x2000, 0x1dc: 0x2000, + // Block 0x8, offset 0x200 + 0x211: 0x2000, + 0x221: 0x2000, + // Block 0x9, offset 0x240 + 0x244: 0x2000, + 0x247: 0x2000, 0x249: 0x2000, 0x24a: 0x2000, 0x24b: 0x2000, + 0x24d: 0x2000, 0x250: 0x2000, + 0x258: 0x2000, 0x259: 0x2000, 0x25a: 0x2000, 0x25b: 0x2000, 0x25d: 0x2000, + 0x25f: 0x2000, + // Block 0xa, offset 0x280 + 0x280: 0x2000, 0x281: 0x2000, 0x282: 0x2000, 0x283: 0x2000, 0x284: 0x2000, 0x285: 0x2000, + 0x286: 0x2000, 0x287: 0x2000, 0x288: 0x2000, 0x289: 0x2000, 0x28a: 0x2000, 0x28b: 0x2000, + 0x28c: 0x2000, 0x28d: 0x2000, 0x28e: 0x2000, 0x28f: 0x2000, 0x290: 0x2000, 0x291: 0x2000, + 0x292: 0x2000, 0x293: 0x2000, 0x294: 0x2000, 0x295: 0x2000, 0x296: 0x2000, 0x297: 0x2000, + 0x298: 0x2000, 0x299: 0x2000, 0x29a: 0x2000, 0x29b: 0x2000, 0x29c: 0x2000, 0x29d: 0x2000, + 0x29e: 0x2000, 0x29f: 0x2000, 0x2a0: 0x2000, 0x2a1: 0x2000, 0x2a2: 0x2000, 0x2a3: 0x2000, + 0x2a4: 0x2000, 0x2a5: 0x2000, 0x2a6: 0x2000, 0x2a7: 0x2000, 0x2a8: 0x2000, 0x2a9: 0x2000, + 0x2aa: 0x2000, 0x2ab: 0x2000, 0x2ac: 0x2000, 0x2ad: 0x2000, 0x2ae: 0x2000, 0x2af: 0x2000, + 0x2b0: 0x2000, 0x2b1: 0x2000, 0x2b2: 0x2000, 0x2b3: 0x2000, 0x2b4: 0x2000, 0x2b5: 0x2000, + 0x2b6: 0x2000, 0x2b7: 0x2000, 0x2b8: 0x2000, 0x2b9: 0x2000, 0x2ba: 0x2000, 0x2bb: 0x2000, + 0x2bc: 0x2000, 0x2bd: 0x2000, 0x2be: 0x2000, 0x2bf: 0x2000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x2000, 0x2c1: 0x2000, 0x2c2: 0x2000, 0x2c3: 0x2000, 0x2c4: 0x2000, 0x2c5: 0x2000, + 0x2c6: 0x2000, 0x2c7: 0x2000, 0x2c8: 0x2000, 0x2c9: 0x2000, 0x2ca: 0x2000, 0x2cb: 0x2000, + 0x2cc: 0x2000, 0x2cd: 0x2000, 0x2ce: 0x2000, 0x2cf: 0x2000, 0x2d0: 0x2000, 0x2d1: 0x2000, + 0x2d2: 0x2000, 0x2d3: 0x2000, 0x2d4: 0x2000, 0x2d5: 0x2000, 0x2d6: 0x2000, 0x2d7: 0x2000, + 0x2d8: 0x2000, 0x2d9: 0x2000, 0x2da: 0x2000, 0x2db: 0x2000, 0x2dc: 0x2000, 0x2dd: 0x2000, + 0x2de: 0x2000, 0x2df: 0x2000, 0x2e0: 0x2000, 0x2e1: 0x2000, 0x2e2: 0x2000, 0x2e3: 0x2000, + 0x2e4: 0x2000, 0x2e5: 0x2000, 0x2e6: 0x2000, 0x2e7: 0x2000, 0x2e8: 0x2000, 0x2e9: 0x2000, + 0x2ea: 0x2000, 0x2eb: 0x2000, 0x2ec: 0x2000, 0x2ed: 0x2000, 0x2ee: 0x2000, 0x2ef: 0x2000, + // Block 0xc, offset 0x300 + 0x311: 0x2000, + 0x312: 0x2000, 0x313: 0x2000, 0x314: 0x2000, 0x315: 0x2000, 0x316: 0x2000, 0x317: 0x2000, + 0x318: 0x2000, 0x319: 0x2000, 0x31a: 0x2000, 0x31b: 0x2000, 0x31c: 0x2000, 0x31d: 0x2000, + 0x31e: 0x2000, 0x31f: 0x2000, 0x320: 0x2000, 0x321: 0x2000, 0x323: 0x2000, + 0x324: 0x2000, 0x325: 0x2000, 0x326: 0x2000, 0x327: 0x2000, 0x328: 0x2000, 0x329: 0x2000, + 0x331: 0x2000, 0x332: 0x2000, 0x333: 0x2000, 0x334: 0x2000, 0x335: 0x2000, + 0x336: 0x2000, 0x337: 0x2000, 0x338: 0x2000, 0x339: 0x2000, 0x33a: 0x2000, 0x33b: 0x2000, + 0x33c: 0x2000, 0x33d: 0x2000, 0x33e: 0x2000, 0x33f: 0x2000, + // Block 0xd, offset 0x340 + 0x340: 0x2000, 0x341: 0x2000, 0x343: 0x2000, 0x344: 0x2000, 0x345: 0x2000, + 0x346: 0x2000, 0x347: 0x2000, 0x348: 0x2000, 0x349: 0x2000, + // Block 0xe, offset 0x380 + 0x381: 0x2000, + 0x390: 0x2000, 0x391: 0x2000, + 0x392: 0x2000, 0x393: 0x2000, 0x394: 0x2000, 0x395: 0x2000, 0x396: 0x2000, 0x397: 0x2000, + 0x398: 0x2000, 0x399: 0x2000, 0x39a: 0x2000, 0x39b: 0x2000, 0x39c: 0x2000, 0x39d: 0x2000, + 0x39e: 0x2000, 0x39f: 0x2000, 0x3a0: 0x2000, 0x3a1: 0x2000, 0x3a2: 0x2000, 0x3a3: 0x2000, + 0x3a4: 0x2000, 0x3a5: 0x2000, 0x3a6: 0x2000, 0x3a7: 0x2000, 0x3a8: 0x2000, 0x3a9: 0x2000, + 0x3aa: 0x2000, 0x3ab: 0x2000, 0x3ac: 0x2000, 0x3ad: 0x2000, 0x3ae: 0x2000, 0x3af: 0x2000, + 0x3b0: 0x2000, 0x3b1: 0x2000, 0x3b2: 0x2000, 0x3b3: 0x2000, 0x3b4: 0x2000, 0x3b5: 0x2000, + 0x3b6: 0x2000, 0x3b7: 0x2000, 0x3b8: 0x2000, 0x3b9: 0x2000, 0x3ba: 0x2000, 0x3bb: 0x2000, + 0x3bc: 0x2000, 0x3bd: 0x2000, 0x3be: 0x2000, 0x3bf: 0x2000, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x2000, 0x3c1: 0x2000, 0x3c2: 0x2000, 0x3c3: 0x2000, 0x3c4: 0x2000, 0x3c5: 0x2000, + 0x3c6: 0x2000, 0x3c7: 0x2000, 0x3c8: 0x2000, 0x3c9: 0x2000, 0x3ca: 0x2000, 0x3cb: 0x2000, + 0x3cc: 0x2000, 0x3cd: 0x2000, 0x3ce: 0x2000, 0x3cf: 0x2000, 0x3d1: 0x2000, + // Block 0x10, offset 0x400 + 0x400: 0x4000, 0x401: 0x4000, 0x402: 0x4000, 0x403: 0x4000, 0x404: 0x4000, 0x405: 0x4000, + 0x406: 0x4000, 0x407: 0x4000, 0x408: 0x4000, 0x409: 0x4000, 0x40a: 0x4000, 0x40b: 0x4000, + 0x40c: 0x4000, 0x40d: 0x4000, 0x40e: 0x4000, 0x40f: 0x4000, 0x410: 0x4000, 0x411: 0x4000, + 0x412: 0x4000, 0x413: 0x4000, 0x414: 0x4000, 0x415: 0x4000, 0x416: 0x4000, 0x417: 0x4000, + 0x418: 0x4000, 0x419: 0x4000, 0x41a: 0x4000, 0x41b: 0x4000, 0x41c: 0x4000, 0x41d: 0x4000, + 0x41e: 0x4000, 0x41f: 0x4000, 0x420: 0x4000, 0x421: 0x4000, 0x422: 0x4000, 0x423: 0x4000, + 0x424: 0x4000, 0x425: 0x4000, 0x426: 0x4000, 0x427: 0x4000, 0x428: 0x4000, 0x429: 0x4000, + 0x42a: 0x4000, 0x42b: 0x4000, 0x42c: 0x4000, 0x42d: 0x4000, 0x42e: 0x4000, 0x42f: 0x4000, + 0x430: 0x4000, 0x431: 0x4000, 0x432: 0x4000, 0x433: 0x4000, 0x434: 0x4000, 0x435: 0x4000, + 0x436: 0x4000, 0x437: 0x4000, 0x438: 0x4000, 0x439: 0x4000, 0x43a: 0x4000, 0x43b: 0x4000, + 0x43c: 0x4000, 0x43d: 0x4000, 0x43e: 0x4000, 0x43f: 0x4000, + // Block 0x11, offset 0x440 + 0x440: 0x4000, 0x441: 0x4000, 0x442: 0x4000, 0x443: 0x4000, 0x444: 0x4000, 0x445: 0x4000, + 0x446: 0x4000, 0x447: 0x4000, 0x448: 0x4000, 0x449: 0x4000, 0x44a: 0x4000, 0x44b: 0x4000, + 0x44c: 0x4000, 0x44d: 0x4000, 0x44e: 0x4000, 0x44f: 0x4000, 0x450: 0x4000, 0x451: 0x4000, + 0x452: 0x4000, 0x453: 0x4000, 0x454: 0x4000, 0x455: 0x4000, 0x456: 0x4000, 0x457: 0x4000, + 0x458: 0x4000, 0x459: 0x4000, 0x45a: 0x4000, 0x45b: 0x4000, 0x45c: 0x4000, 0x45d: 0x4000, + 0x45e: 0x4000, 0x45f: 0x4000, + // Block 0x12, offset 0x480 + 0x490: 0x2000, + 0x493: 0x2000, 0x494: 0x2000, 0x495: 0x2000, 0x496: 0x2000, + 0x498: 0x2000, 0x499: 0x2000, 0x49c: 0x2000, 0x49d: 0x2000, + 0x4a0: 0x2000, 0x4a1: 0x2000, 0x4a2: 0x2000, + 0x4a4: 0x2000, 0x4a5: 0x2000, 0x4a6: 0x2000, 0x4a7: 0x2000, + 0x4b0: 0x2000, 0x4b2: 0x2000, 0x4b3: 0x2000, 0x4b5: 0x2000, + 0x4bb: 0x2000, + 0x4be: 0x2000, + // Block 0x13, offset 0x4c0 + 0x4f4: 0x2000, + 0x4ff: 0x2000, + // Block 0x14, offset 0x500 + 0x501: 0x2000, 0x502: 0x2000, 0x503: 0x2000, 0x504: 0x2000, + 0x529: 0xa009, + 0x52c: 0x2000, + // Block 0x15, offset 0x540 + 0x543: 0x2000, 0x545: 0x2000, + 0x549: 0x2000, + 0x553: 0x2000, 0x556: 0x2000, + 0x561: 0x2000, 0x562: 0x2000, + 0x566: 0x2000, + 0x56b: 0x2000, + // Block 0x16, offset 0x580 + 0x593: 0x2000, 0x594: 0x2000, + 0x59b: 0x2000, 0x59c: 0x2000, 0x59d: 0x2000, + 0x59e: 0x2000, 0x5a0: 0x2000, 0x5a1: 0x2000, 0x5a2: 0x2000, 0x5a3: 0x2000, + 0x5a4: 0x2000, 0x5a5: 0x2000, 0x5a6: 0x2000, 0x5a7: 0x2000, 0x5a8: 0x2000, 0x5a9: 0x2000, + 0x5aa: 0x2000, 0x5ab: 0x2000, + 0x5b0: 0x2000, 0x5b1: 0x2000, 0x5b2: 0x2000, 0x5b3: 0x2000, 0x5b4: 0x2000, 0x5b5: 0x2000, + 0x5b6: 0x2000, 0x5b7: 0x2000, 0x5b8: 0x2000, 0x5b9: 0x2000, + // Block 0x17, offset 0x5c0 + 0x5c9: 0x2000, + 0x5d0: 0x200a, 0x5d1: 0x200b, + 0x5d2: 0x200a, 0x5d3: 0x200c, 0x5d4: 0x2000, 0x5d5: 0x2000, 0x5d6: 0x2000, 0x5d7: 0x2000, + 0x5d8: 0x2000, 0x5d9: 0x2000, + 0x5f8: 0x2000, 0x5f9: 0x2000, + // Block 0x18, offset 0x600 + 0x612: 0x2000, 0x614: 0x2000, + 0x627: 0x2000, + // Block 0x19, offset 0x640 + 0x640: 0x2000, 0x642: 0x2000, 0x643: 0x2000, + 0x647: 0x2000, 0x648: 0x2000, 0x64b: 0x2000, + 0x64f: 0x2000, 0x651: 0x2000, + 0x655: 0x2000, + 0x65a: 0x2000, 0x65d: 0x2000, + 0x65e: 0x2000, 0x65f: 0x2000, 0x660: 0x2000, 0x663: 0x2000, + 0x665: 0x2000, 0x667: 0x2000, 0x668: 0x2000, 0x669: 0x2000, + 0x66a: 0x2000, 0x66b: 0x2000, 0x66c: 0x2000, 0x66e: 0x2000, + 0x674: 0x2000, 0x675: 0x2000, + 0x676: 0x2000, 0x677: 0x2000, + 0x67c: 0x2000, 0x67d: 0x2000, + // Block 0x1a, offset 0x680 + 0x688: 0x2000, + 0x68c: 0x2000, + 0x692: 0x2000, + 0x6a0: 0x2000, 0x6a1: 0x2000, + 0x6a4: 0x2000, 0x6a5: 0x2000, 0x6a6: 0x2000, 0x6a7: 0x2000, + 0x6aa: 0x2000, 0x6ab: 0x2000, 0x6ae: 0x2000, 0x6af: 0x2000, + // Block 0x1b, offset 0x6c0 + 0x6c2: 0x2000, 0x6c3: 0x2000, + 0x6c6: 0x2000, 0x6c7: 0x2000, + 0x6d5: 0x2000, + 0x6d9: 0x2000, + 0x6e5: 0x2000, + 0x6ff: 0x2000, + // Block 0x1c, offset 0x700 + 0x712: 0x2000, + 0x71a: 0x4000, 0x71b: 0x4000, + 0x729: 0x4000, + 0x72a: 0x4000, + // Block 0x1d, offset 0x740 + 0x769: 0x4000, + 0x76a: 0x4000, 0x76b: 0x4000, 0x76c: 0x4000, + 0x770: 0x4000, 0x773: 0x4000, + // Block 0x1e, offset 0x780 + 0x7a0: 0x2000, 0x7a1: 0x2000, 0x7a2: 0x2000, 0x7a3: 0x2000, + 0x7a4: 0x2000, 0x7a5: 0x2000, 0x7a6: 0x2000, 0x7a7: 0x2000, 0x7a8: 0x2000, 0x7a9: 0x2000, + 0x7aa: 0x2000, 0x7ab: 0x2000, 0x7ac: 0x2000, 0x7ad: 0x2000, 0x7ae: 0x2000, 0x7af: 0x2000, + 0x7b0: 0x2000, 0x7b1: 0x2000, 0x7b2: 0x2000, 0x7b3: 0x2000, 0x7b4: 0x2000, 0x7b5: 0x2000, + 0x7b6: 0x2000, 0x7b7: 0x2000, 0x7b8: 0x2000, 0x7b9: 0x2000, 0x7ba: 0x2000, 0x7bb: 0x2000, + 0x7bc: 0x2000, 0x7bd: 0x2000, 0x7be: 0x2000, 0x7bf: 0x2000, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x2000, 0x7c1: 0x2000, 0x7c2: 0x2000, 0x7c3: 0x2000, 0x7c4: 0x2000, 0x7c5: 0x2000, + 0x7c6: 0x2000, 0x7c7: 0x2000, 0x7c8: 0x2000, 0x7c9: 0x2000, 0x7ca: 0x2000, 0x7cb: 0x2000, + 0x7cc: 0x2000, 0x7cd: 0x2000, 0x7ce: 0x2000, 0x7cf: 0x2000, 0x7d0: 0x2000, 0x7d1: 0x2000, + 0x7d2: 0x2000, 0x7d3: 0x2000, 0x7d4: 0x2000, 0x7d5: 0x2000, 0x7d6: 0x2000, 0x7d7: 0x2000, + 0x7d8: 0x2000, 0x7d9: 0x2000, 0x7da: 0x2000, 0x7db: 0x2000, 0x7dc: 0x2000, 0x7dd: 0x2000, + 0x7de: 0x2000, 0x7df: 0x2000, 0x7e0: 0x2000, 0x7e1: 0x2000, 0x7e2: 0x2000, 0x7e3: 0x2000, + 0x7e4: 0x2000, 0x7e5: 0x2000, 0x7e6: 0x2000, 0x7e7: 0x2000, 0x7e8: 0x2000, 0x7e9: 0x2000, + 0x7eb: 0x2000, 0x7ec: 0x2000, 0x7ed: 0x2000, 0x7ee: 0x2000, 0x7ef: 0x2000, + 0x7f0: 0x2000, 0x7f1: 0x2000, 0x7f2: 0x2000, 0x7f3: 0x2000, 0x7f4: 0x2000, 0x7f5: 0x2000, + 0x7f6: 0x2000, 0x7f7: 0x2000, 0x7f8: 0x2000, 0x7f9: 0x2000, 0x7fa: 0x2000, 0x7fb: 0x2000, + 0x7fc: 0x2000, 0x7fd: 0x2000, 0x7fe: 0x2000, 0x7ff: 0x2000, + // Block 0x20, offset 0x800 + 0x800: 0x2000, 0x801: 0x2000, 0x802: 0x200d, 0x803: 0x2000, 0x804: 0x2000, 0x805: 0x2000, + 0x806: 0x2000, 0x807: 0x2000, 0x808: 0x2000, 0x809: 0x2000, 0x80a: 0x2000, 0x80b: 0x2000, + 0x80c: 0x2000, 0x80d: 0x2000, 0x80e: 0x2000, 0x80f: 0x2000, 0x810: 0x2000, 0x811: 0x2000, + 0x812: 0x2000, 0x813: 0x2000, 0x814: 0x2000, 0x815: 0x2000, 0x816: 0x2000, 0x817: 0x2000, + 0x818: 0x2000, 0x819: 0x2000, 0x81a: 0x2000, 0x81b: 0x2000, 0x81c: 0x2000, 0x81d: 0x2000, + 0x81e: 0x2000, 0x81f: 0x2000, 0x820: 0x2000, 0x821: 0x2000, 0x822: 0x2000, 0x823: 0x2000, + 0x824: 0x2000, 0x825: 0x2000, 0x826: 0x2000, 0x827: 0x2000, 0x828: 0x2000, 0x829: 0x2000, + 0x82a: 0x2000, 0x82b: 0x2000, 0x82c: 0x2000, 0x82d: 0x2000, 0x82e: 0x2000, 0x82f: 0x2000, + 0x830: 0x2000, 0x831: 0x2000, 0x832: 0x2000, 0x833: 0x2000, 0x834: 0x2000, 0x835: 0x2000, + 0x836: 0x2000, 0x837: 0x2000, 0x838: 0x2000, 0x839: 0x2000, 0x83a: 0x2000, 0x83b: 0x2000, + 0x83c: 0x2000, 0x83d: 0x2000, 0x83e: 0x2000, 0x83f: 0x2000, + // Block 0x21, offset 0x840 + 0x840: 0x2000, 0x841: 0x2000, 0x842: 0x2000, 0x843: 0x2000, 0x844: 0x2000, 0x845: 0x2000, + 0x846: 0x2000, 0x847: 0x2000, 0x848: 0x2000, 0x849: 0x2000, 0x84a: 0x2000, 0x84b: 0x2000, + 0x850: 0x2000, 0x851: 0x2000, + 0x852: 0x2000, 0x853: 0x2000, 0x854: 0x2000, 0x855: 0x2000, 0x856: 0x2000, 0x857: 0x2000, + 0x858: 0x2000, 0x859: 0x2000, 0x85a: 0x2000, 0x85b: 0x2000, 0x85c: 0x2000, 0x85d: 0x2000, + 0x85e: 0x2000, 0x85f: 0x2000, 0x860: 0x2000, 0x861: 0x2000, 0x862: 0x2000, 0x863: 0x2000, + 0x864: 0x2000, 0x865: 0x2000, 0x866: 0x2000, 0x867: 0x2000, 0x868: 0x2000, 0x869: 0x2000, + 0x86a: 0x2000, 0x86b: 0x2000, 0x86c: 0x2000, 0x86d: 0x2000, 0x86e: 0x2000, 0x86f: 0x2000, + 0x870: 0x2000, 0x871: 0x2000, 0x872: 0x2000, 0x873: 0x2000, + // Block 0x22, offset 0x880 + 0x880: 0x2000, 0x881: 0x2000, 0x882: 0x2000, 0x883: 0x2000, 0x884: 0x2000, 0x885: 0x2000, + 0x886: 0x2000, 0x887: 0x2000, 0x888: 0x2000, 0x889: 0x2000, 0x88a: 0x2000, 0x88b: 0x2000, + 0x88c: 0x2000, 0x88d: 0x2000, 0x88e: 0x2000, 0x88f: 0x2000, + 0x892: 0x2000, 0x893: 0x2000, 0x894: 0x2000, 0x895: 0x2000, + 0x8a0: 0x200e, 0x8a1: 0x2000, 0x8a3: 0x2000, + 0x8a4: 0x2000, 0x8a5: 0x2000, 0x8a6: 0x2000, 0x8a7: 0x2000, 0x8a8: 0x2000, 0x8a9: 0x2000, + 0x8b2: 0x2000, 0x8b3: 0x2000, + 0x8b6: 0x2000, 0x8b7: 0x2000, + 0x8bc: 0x2000, 0x8bd: 0x2000, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x2000, 0x8c1: 0x2000, + 0x8c6: 0x2000, 0x8c7: 0x2000, 0x8c8: 0x2000, 0x8cb: 0x200f, + 0x8ce: 0x2000, 0x8cf: 0x2000, 0x8d0: 0x2000, 0x8d1: 0x2000, + 0x8e2: 0x2000, 0x8e3: 0x2000, + 0x8e4: 0x2000, 0x8e5: 0x2000, + 0x8ef: 0x2000, + 0x8fd: 0x4000, 0x8fe: 0x4000, + // Block 0x24, offset 0x900 + 0x905: 0x2000, + 0x906: 0x2000, 0x909: 0x2000, + 0x90e: 0x2000, 0x90f: 0x2000, + 0x914: 0x4000, 0x915: 0x4000, + 0x91c: 0x2000, + 0x91e: 0x2000, + // Block 0x25, offset 0x940 + 0x940: 0x2000, 0x942: 0x2000, + 0x948: 0x4000, 0x949: 0x4000, 0x94a: 0x4000, 0x94b: 0x4000, + 0x94c: 0x4000, 0x94d: 0x4000, 0x94e: 0x4000, 0x94f: 0x4000, 0x950: 0x4000, 0x951: 0x4000, + 0x952: 0x4000, 0x953: 0x4000, + 0x960: 0x2000, 0x961: 0x2000, 0x963: 0x2000, + 0x964: 0x2000, 0x965: 0x2000, 0x967: 0x2000, 0x968: 0x2000, 0x969: 0x2000, + 0x96a: 0x2000, 0x96c: 0x2000, 0x96d: 0x2000, 0x96f: 0x2000, + 0x97f: 0x4000, + // Block 0x26, offset 0x980 + 0x993: 0x4000, + 0x99e: 0x2000, 0x99f: 0x2000, 0x9a1: 0x4000, + 0x9aa: 0x4000, 0x9ab: 0x4000, + 0x9bd: 0x4000, 0x9be: 0x4000, 0x9bf: 0x2000, + // Block 0x27, offset 0x9c0 + 0x9c4: 0x4000, 0x9c5: 0x4000, + 0x9c6: 0x2000, 0x9c7: 0x2000, 0x9c8: 0x2000, 0x9c9: 0x2000, 0x9ca: 0x2000, 0x9cb: 0x2000, + 0x9cc: 0x2000, 0x9cd: 0x2000, 0x9ce: 0x4000, 0x9cf: 0x2000, 0x9d0: 0x2000, 0x9d1: 0x2000, + 0x9d2: 0x2000, 0x9d3: 0x2000, 0x9d4: 0x4000, 0x9d5: 0x2000, 0x9d6: 0x2000, 0x9d7: 0x2000, + 0x9d8: 0x2000, 0x9d9: 0x2000, 0x9da: 0x2000, 0x9db: 0x2000, 0x9dc: 0x2000, 0x9dd: 0x2000, + 0x9de: 0x2000, 0x9df: 0x2000, 0x9e0: 0x2000, 0x9e1: 0x2000, 0x9e3: 0x2000, + 0x9e8: 0x2000, 0x9e9: 0x2000, + 0x9ea: 0x4000, 0x9eb: 0x2000, 0x9ec: 0x2000, 0x9ed: 0x2000, 0x9ee: 0x2000, 0x9ef: 0x2000, + 0x9f0: 0x2000, 0x9f1: 0x2000, 0x9f2: 0x4000, 0x9f3: 0x4000, 0x9f4: 0x2000, 0x9f5: 0x4000, + 0x9f6: 0x2000, 0x9f7: 0x2000, 0x9f8: 0x2000, 0x9f9: 0x2000, 0x9fa: 0x4000, 0x9fb: 0x2000, + 0x9fc: 0x2000, 0x9fd: 0x4000, 0x9fe: 0x2000, 0x9ff: 0x2000, + // Block 0x28, offset 0xa00 + 0xa05: 0x4000, + 0xa0a: 0x4000, 0xa0b: 0x4000, + 0xa28: 0x4000, + 0xa3d: 0x2000, + // Block 0x29, offset 0xa40 + 0xa4c: 0x4000, 0xa4e: 0x4000, + 0xa53: 0x4000, 0xa54: 0x4000, 0xa55: 0x4000, 0xa57: 0x4000, + 0xa76: 0x2000, 0xa77: 0x2000, 0xa78: 0x2000, 0xa79: 0x2000, 0xa7a: 0x2000, 0xa7b: 0x2000, + 0xa7c: 0x2000, 0xa7d: 0x2000, 0xa7e: 0x2000, 0xa7f: 0x2000, + // Block 0x2a, offset 0xa80 + 0xa95: 0x4000, 0xa96: 0x4000, 0xa97: 0x4000, + 0xab0: 0x4000, + 0xabf: 0x4000, + // Block 0x2b, offset 0xac0 + 0xae6: 0x6000, 0xae7: 0x6000, 0xae8: 0x6000, 0xae9: 0x6000, + 0xaea: 0x6000, 0xaeb: 0x6000, 0xaec: 0x6000, 0xaed: 0x6000, + // Block 0x2c, offset 0xb00 + 0xb05: 0x6010, + 0xb06: 0x6011, + // Block 0x2d, offset 0xb40 + 0xb5b: 0x4000, 0xb5c: 0x4000, + // Block 0x2e, offset 0xb80 + 0xb90: 0x4000, + 0xb95: 0x4000, 0xb96: 0x2000, 0xb97: 0x2000, + 0xb98: 0x2000, 0xb99: 0x2000, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x4000, 0xbc1: 0x4000, 0xbc2: 0x4000, 0xbc3: 0x4000, 0xbc4: 0x4000, 0xbc5: 0x4000, + 0xbc6: 0x4000, 0xbc7: 0x4000, 0xbc8: 0x4000, 0xbc9: 0x4000, 0xbca: 0x4000, 0xbcb: 0x4000, + 0xbcc: 0x4000, 0xbcd: 0x4000, 0xbce: 0x4000, 0xbcf: 0x4000, 0xbd0: 0x4000, 0xbd1: 0x4000, + 0xbd2: 0x4000, 0xbd3: 0x4000, 0xbd4: 0x4000, 0xbd5: 0x4000, 0xbd6: 0x4000, 0xbd7: 0x4000, + 0xbd8: 0x4000, 0xbd9: 0x4000, 0xbdb: 0x4000, 0xbdc: 0x4000, 0xbdd: 0x4000, + 0xbde: 0x4000, 0xbdf: 0x4000, 0xbe0: 0x4000, 0xbe1: 0x4000, 0xbe2: 0x4000, 0xbe3: 0x4000, + 0xbe4: 0x4000, 0xbe5: 0x4000, 0xbe6: 0x4000, 0xbe7: 0x4000, 0xbe8: 0x4000, 0xbe9: 0x4000, + 0xbea: 0x4000, 0xbeb: 0x4000, 0xbec: 0x4000, 0xbed: 0x4000, 0xbee: 0x4000, 0xbef: 0x4000, + 0xbf0: 0x4000, 0xbf1: 0x4000, 0xbf2: 0x4000, 0xbf3: 0x4000, 0xbf4: 0x4000, 0xbf5: 0x4000, + 0xbf6: 0x4000, 0xbf7: 0x4000, 0xbf8: 0x4000, 0xbf9: 0x4000, 0xbfa: 0x4000, 0xbfb: 0x4000, + 0xbfc: 0x4000, 0xbfd: 0x4000, 0xbfe: 0x4000, 0xbff: 0x4000, + // Block 0x30, offset 0xc00 + 0xc00: 0x4000, 0xc01: 0x4000, 0xc02: 0x4000, 0xc03: 0x4000, 0xc04: 0x4000, 0xc05: 0x4000, + 0xc06: 0x4000, 0xc07: 0x4000, 0xc08: 0x4000, 0xc09: 0x4000, 0xc0a: 0x4000, 0xc0b: 0x4000, + 0xc0c: 0x4000, 0xc0d: 0x4000, 0xc0e: 0x4000, 0xc0f: 0x4000, 0xc10: 0x4000, 0xc11: 0x4000, + 0xc12: 0x4000, 0xc13: 0x4000, 0xc14: 0x4000, 0xc15: 0x4000, 0xc16: 0x4000, 0xc17: 0x4000, + 0xc18: 0x4000, 0xc19: 0x4000, 0xc1a: 0x4000, 0xc1b: 0x4000, 0xc1c: 0x4000, 0xc1d: 0x4000, + 0xc1e: 0x4000, 0xc1f: 0x4000, 0xc20: 0x4000, 0xc21: 0x4000, 0xc22: 0x4000, 0xc23: 0x4000, + 0xc24: 0x4000, 0xc25: 0x4000, 0xc26: 0x4000, 0xc27: 0x4000, 0xc28: 0x4000, 0xc29: 0x4000, + 0xc2a: 0x4000, 0xc2b: 0x4000, 0xc2c: 0x4000, 0xc2d: 0x4000, 0xc2e: 0x4000, 0xc2f: 0x4000, + 0xc30: 0x4000, 0xc31: 0x4000, 0xc32: 0x4000, 0xc33: 0x4000, + // Block 0x31, offset 0xc40 + 0xc40: 0x4000, 0xc41: 0x4000, 0xc42: 0x4000, 0xc43: 0x4000, 0xc44: 0x4000, 0xc45: 0x4000, + 0xc46: 0x4000, 0xc47: 0x4000, 0xc48: 0x4000, 0xc49: 0x4000, 0xc4a: 0x4000, 0xc4b: 0x4000, + 0xc4c: 0x4000, 0xc4d: 0x4000, 0xc4e: 0x4000, 0xc4f: 0x4000, 0xc50: 0x4000, 0xc51: 0x4000, + 0xc52: 0x4000, 0xc53: 0x4000, 0xc54: 0x4000, 0xc55: 0x4000, + 0xc70: 0x4000, 0xc71: 0x4000, 0xc72: 0x4000, 0xc73: 0x4000, 0xc74: 0x4000, 0xc75: 0x4000, + 0xc76: 0x4000, 0xc77: 0x4000, 0xc78: 0x4000, 0xc79: 0x4000, 0xc7a: 0x4000, 0xc7b: 0x4000, + // Block 0x32, offset 0xc80 + 0xc80: 0x9012, 0xc81: 0x4013, 0xc82: 0x4014, 0xc83: 0x4000, 0xc84: 0x4000, 0xc85: 0x4000, + 0xc86: 0x4000, 0xc87: 0x4000, 0xc88: 0x4000, 0xc89: 0x4000, 0xc8a: 0x4000, 0xc8b: 0x4000, + 0xc8c: 0x4015, 0xc8d: 0x4015, 0xc8e: 0x4000, 0xc8f: 0x4000, 0xc90: 0x4000, 0xc91: 0x4000, + 0xc92: 0x4000, 0xc93: 0x4000, 0xc94: 0x4000, 0xc95: 0x4000, 0xc96: 0x4000, 0xc97: 0x4000, + 0xc98: 0x4000, 0xc99: 0x4000, 0xc9a: 0x4000, 0xc9b: 0x4000, 0xc9c: 0x4000, 0xc9d: 0x4000, + 0xc9e: 0x4000, 0xc9f: 0x4000, 0xca0: 0x4000, 0xca1: 0x4000, 0xca2: 0x4000, 0xca3: 0x4000, + 0xca4: 0x4000, 0xca5: 0x4000, 0xca6: 0x4000, 0xca7: 0x4000, 0xca8: 0x4000, 0xca9: 0x4000, + 0xcaa: 0x4000, 0xcab: 0x4000, 0xcac: 0x4000, 0xcad: 0x4000, 0xcae: 0x4000, 0xcaf: 0x4000, + 0xcb0: 0x4000, 0xcb1: 0x4000, 0xcb2: 0x4000, 0xcb3: 0x4000, 0xcb4: 0x4000, 0xcb5: 0x4000, + 0xcb6: 0x4000, 0xcb7: 0x4000, 0xcb8: 0x4000, 0xcb9: 0x4000, 0xcba: 0x4000, 0xcbb: 0x4000, + 0xcbc: 0x4000, 0xcbd: 0x4000, 0xcbe: 0x4000, + // Block 0x33, offset 0xcc0 + 0xcc1: 0x4000, 0xcc2: 0x4000, 0xcc3: 0x4000, 0xcc4: 0x4000, 0xcc5: 0x4000, + 0xcc6: 0x4000, 0xcc7: 0x4000, 0xcc8: 0x4000, 0xcc9: 0x4000, 0xcca: 0x4000, 0xccb: 0x4000, + 0xccc: 0x4000, 0xccd: 0x4000, 0xcce: 0x4000, 0xccf: 0x4000, 0xcd0: 0x4000, 0xcd1: 0x4000, + 0xcd2: 0x4000, 0xcd3: 0x4000, 0xcd4: 0x4000, 0xcd5: 0x4000, 0xcd6: 0x4000, 0xcd7: 0x4000, + 0xcd8: 0x4000, 0xcd9: 0x4000, 0xcda: 0x4000, 0xcdb: 0x4000, 0xcdc: 0x4000, 0xcdd: 0x4000, + 0xcde: 0x4000, 0xcdf: 0x4000, 0xce0: 0x4000, 0xce1: 0x4000, 0xce2: 0x4000, 0xce3: 0x4000, + 0xce4: 0x4000, 0xce5: 0x4000, 0xce6: 0x4000, 0xce7: 0x4000, 0xce8: 0x4000, 0xce9: 0x4000, + 0xcea: 0x4000, 0xceb: 0x4000, 0xcec: 0x4000, 0xced: 0x4000, 0xcee: 0x4000, 0xcef: 0x4000, + 0xcf0: 0x4000, 0xcf1: 0x4000, 0xcf2: 0x4000, 0xcf3: 0x4000, 0xcf4: 0x4000, 0xcf5: 0x4000, + 0xcf6: 0x4000, 0xcf7: 0x4000, 0xcf8: 0x4000, 0xcf9: 0x4000, 0xcfa: 0x4000, 0xcfb: 0x4000, + 0xcfc: 0x4000, 0xcfd: 0x4000, 0xcfe: 0x4000, 0xcff: 0x4000, + // Block 0x34, offset 0xd00 + 0xd00: 0x4000, 0xd01: 0x4000, 0xd02: 0x4000, 0xd03: 0x4000, 0xd04: 0x4000, 0xd05: 0x4000, + 0xd06: 0x4000, 0xd07: 0x4000, 0xd08: 0x4000, 0xd09: 0x4000, 0xd0a: 0x4000, 0xd0b: 0x4000, + 0xd0c: 0x4000, 0xd0d: 0x4000, 0xd0e: 0x4000, 0xd0f: 0x4000, 0xd10: 0x4000, 0xd11: 0x4000, + 0xd12: 0x4000, 0xd13: 0x4000, 0xd14: 0x4000, 0xd15: 0x4000, 0xd16: 0x4000, + 0xd19: 0x4016, 0xd1a: 0x4017, 0xd1b: 0x4000, 0xd1c: 0x4000, 0xd1d: 0x4000, + 0xd1e: 0x4000, 0xd1f: 0x4000, 0xd20: 0x4000, 0xd21: 0x4018, 0xd22: 0x4019, 0xd23: 0x401a, + 0xd24: 0x401b, 0xd25: 0x401c, 0xd26: 0x401d, 0xd27: 0x401e, 0xd28: 0x401f, 0xd29: 0x4020, + 0xd2a: 0x4021, 0xd2b: 0x4022, 0xd2c: 0x4000, 0xd2d: 0x4010, 0xd2e: 0x4000, 0xd2f: 0x4023, + 0xd30: 0x4000, 0xd31: 0x4024, 0xd32: 0x4000, 0xd33: 0x4025, 0xd34: 0x4000, 0xd35: 0x4026, + 0xd36: 0x4000, 0xd37: 0x401a, 0xd38: 0x4000, 0xd39: 0x4027, 0xd3a: 0x4000, 0xd3b: 0x4028, + 0xd3c: 0x4000, 0xd3d: 0x4020, 0xd3e: 0x4000, 0xd3f: 0x4029, + // Block 0x35, offset 0xd40 + 0xd40: 0x4000, 0xd41: 0x402a, 0xd42: 0x4000, 0xd43: 0x402b, 0xd44: 0x402c, 0xd45: 0x4000, + 0xd46: 0x4017, 0xd47: 0x4000, 0xd48: 0x402d, 0xd49: 0x4000, 0xd4a: 0x402e, 0xd4b: 0x402f, + 0xd4c: 0x4030, 0xd4d: 0x4017, 0xd4e: 0x4016, 0xd4f: 0x4017, 0xd50: 0x4000, 0xd51: 0x4000, + 0xd52: 0x4031, 0xd53: 0x4000, 0xd54: 0x4000, 0xd55: 0x4031, 0xd56: 0x4000, 0xd57: 0x4000, + 0xd58: 0x4032, 0xd59: 0x4000, 0xd5a: 0x4000, 0xd5b: 0x4032, 0xd5c: 0x4000, 0xd5d: 0x4000, + 0xd5e: 0x4033, 0xd5f: 0x402e, 0xd60: 0x4034, 0xd61: 0x4035, 0xd62: 0x4034, 0xd63: 0x4036, + 0xd64: 0x4037, 0xd65: 0x4024, 0xd66: 0x4035, 0xd67: 0x4025, 0xd68: 0x4038, 0xd69: 0x4038, + 0xd6a: 0x4039, 0xd6b: 0x4039, 0xd6c: 0x403a, 0xd6d: 0x403a, 0xd6e: 0x4000, 0xd6f: 0x4035, + 0xd70: 0x4000, 0xd71: 0x4000, 0xd72: 0x403b, 0xd73: 0x403c, 0xd74: 0x4000, 0xd75: 0x4000, + 0xd76: 0x4000, 0xd77: 0x4000, 0xd78: 0x4000, 0xd79: 0x4000, 0xd7a: 0x4000, 0xd7b: 0x403d, + 0xd7c: 0x401c, 0xd7d: 0x4000, 0xd7e: 0x4000, 0xd7f: 0x4000, + // Block 0x36, offset 0xd80 + 0xd85: 0x4000, + 0xd86: 0x4000, 0xd87: 0x4000, 0xd88: 0x4000, 0xd89: 0x4000, 0xd8a: 0x4000, 0xd8b: 0x4000, + 0xd8c: 0x4000, 0xd8d: 0x4000, 0xd8e: 0x4000, 0xd8f: 0x4000, 0xd90: 0x4000, 0xd91: 0x4000, + 0xd92: 0x4000, 0xd93: 0x4000, 0xd94: 0x4000, 0xd95: 0x4000, 0xd96: 0x4000, 0xd97: 0x4000, + 0xd98: 0x4000, 0xd99: 0x4000, 0xd9a: 0x4000, 0xd9b: 0x4000, 0xd9c: 0x4000, 0xd9d: 0x4000, + 0xd9e: 0x4000, 0xd9f: 0x4000, 0xda0: 0x4000, 0xda1: 0x4000, 0xda2: 0x4000, 0xda3: 0x4000, + 0xda4: 0x4000, 0xda5: 0x4000, 0xda6: 0x4000, 0xda7: 0x4000, 0xda8: 0x4000, 0xda9: 0x4000, + 0xdaa: 0x4000, 0xdab: 0x4000, 0xdac: 0x4000, 0xdad: 0x4000, 0xdae: 0x4000, 0xdaf: 0x4000, + 0xdb1: 0x403e, 0xdb2: 0x403e, 0xdb3: 0x403e, 0xdb4: 0x403e, 0xdb5: 0x403e, + 0xdb6: 0x403e, 0xdb7: 0x403e, 0xdb8: 0x403e, 0xdb9: 0x403e, 0xdba: 0x403e, 0xdbb: 0x403e, + 0xdbc: 0x403e, 0xdbd: 0x403e, 0xdbe: 0x403e, 0xdbf: 0x403e, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x4037, 0xdc1: 0x4037, 0xdc2: 0x4037, 0xdc3: 0x4037, 0xdc4: 0x4037, 0xdc5: 0x4037, + 0xdc6: 0x4037, 0xdc7: 0x4037, 0xdc8: 0x4037, 0xdc9: 0x4037, 0xdca: 0x4037, 0xdcb: 0x4037, + 0xdcc: 0x4037, 0xdcd: 0x4037, 0xdce: 0x4037, 0xdcf: 0x400e, 0xdd0: 0x403f, 0xdd1: 0x4040, + 0xdd2: 0x4041, 0xdd3: 0x4040, 0xdd4: 0x403f, 0xdd5: 0x4042, 0xdd6: 0x4043, 0xdd7: 0x4044, + 0xdd8: 0x4040, 0xdd9: 0x4041, 0xdda: 0x4040, 0xddb: 0x4045, 0xddc: 0x4009, 0xddd: 0x4045, + 0xdde: 0x4046, 0xddf: 0x4045, 0xde0: 0x4047, 0xde1: 0x400b, 0xde2: 0x400a, 0xde3: 0x400c, + 0xde4: 0x4048, 0xde5: 0x4000, 0xde6: 0x4000, 0xde7: 0x4000, 0xde8: 0x4000, 0xde9: 0x4000, + 0xdea: 0x4000, 0xdeb: 0x4000, 0xdec: 0x4000, 0xded: 0x4000, 0xdee: 0x4000, 0xdef: 0x4000, + 0xdf0: 0x4000, 0xdf1: 0x4000, 0xdf2: 0x4000, 0xdf3: 0x4000, 0xdf4: 0x4000, 0xdf5: 0x4000, + 0xdf6: 0x4000, 0xdf7: 0x4000, 0xdf8: 0x4000, 0xdf9: 0x4000, 0xdfa: 0x4000, 0xdfb: 0x4000, + 0xdfc: 0x4000, 0xdfd: 0x4000, 0xdfe: 0x4000, 0xdff: 0x4000, + // Block 0x38, offset 0xe00 + 0xe00: 0x4000, 0xe01: 0x4000, 0xe02: 0x4000, 0xe03: 0x4000, 0xe04: 0x4000, 0xe05: 0x4000, + 0xe06: 0x4000, 0xe07: 0x4000, 0xe08: 0x4000, 0xe09: 0x4000, 0xe0a: 0x4000, 0xe0b: 0x4000, + 0xe0c: 0x4000, 0xe0d: 0x4000, 0xe0e: 0x4000, 0xe10: 0x4000, 0xe11: 0x4000, + 0xe12: 0x4000, 0xe13: 0x4000, 0xe14: 0x4000, 0xe15: 0x4000, 0xe16: 0x4000, 0xe17: 0x4000, + 0xe18: 0x4000, 0xe19: 0x4000, 0xe1a: 0x4000, 0xe1b: 0x4000, 0xe1c: 0x4000, 0xe1d: 0x4000, + 0xe1e: 0x4000, 0xe1f: 0x4000, 0xe20: 0x4000, 0xe21: 0x4000, 0xe22: 0x4000, 0xe23: 0x4000, + 0xe24: 0x4000, 0xe25: 0x4000, 0xe26: 0x4000, 0xe27: 0x4000, 0xe28: 0x4000, 0xe29: 0x4000, + 0xe2a: 0x4000, 0xe2b: 0x4000, 0xe2c: 0x4000, 0xe2d: 0x4000, 0xe2e: 0x4000, 0xe2f: 0x4000, + 0xe30: 0x4000, 0xe31: 0x4000, 0xe32: 0x4000, 0xe33: 0x4000, 0xe34: 0x4000, 0xe35: 0x4000, + 0xe36: 0x4000, 0xe37: 0x4000, 0xe38: 0x4000, 0xe39: 0x4000, 0xe3a: 0x4000, + // Block 0x39, offset 0xe40 + 0xe40: 0x4000, 0xe41: 0x4000, 0xe42: 0x4000, 0xe43: 0x4000, 0xe44: 0x4000, 0xe45: 0x4000, + 0xe46: 0x4000, 0xe47: 0x4000, 0xe48: 0x4000, 0xe49: 0x4000, 0xe4a: 0x4000, 0xe4b: 0x4000, + 0xe4c: 0x4000, 0xe4d: 0x4000, 0xe4e: 0x4000, 0xe4f: 0x4000, 0xe50: 0x4000, 0xe51: 0x4000, + 0xe52: 0x4000, 0xe53: 0x4000, 0xe54: 0x4000, 0xe55: 0x4000, 0xe56: 0x4000, 0xe57: 0x4000, + 0xe58: 0x4000, 0xe59: 0x4000, 0xe5a: 0x4000, 0xe5b: 0x4000, 0xe5c: 0x4000, 0xe5d: 0x4000, + 0xe5e: 0x4000, 0xe5f: 0x4000, 0xe60: 0x4000, 0xe61: 0x4000, 0xe62: 0x4000, 0xe63: 0x4000, + 0xe70: 0x4000, 0xe71: 0x4000, 0xe72: 0x4000, 0xe73: 0x4000, 0xe74: 0x4000, 0xe75: 0x4000, + 0xe76: 0x4000, 0xe77: 0x4000, 0xe78: 0x4000, 0xe79: 0x4000, 0xe7a: 0x4000, 0xe7b: 0x4000, + 0xe7c: 0x4000, 0xe7d: 0x4000, 0xe7e: 0x4000, 0xe7f: 0x4000, + // Block 0x3a, offset 0xe80 + 0xe80: 0x4000, 0xe81: 0x4000, 0xe82: 0x4000, 0xe83: 0x4000, 0xe84: 0x4000, 0xe85: 0x4000, + 0xe86: 0x4000, 0xe87: 0x4000, 0xe88: 0x4000, 0xe89: 0x4000, 0xe8a: 0x4000, 0xe8b: 0x4000, + 0xe8c: 0x4000, 0xe8d: 0x4000, 0xe8e: 0x4000, 0xe8f: 0x4000, 0xe90: 0x4000, 0xe91: 0x4000, + 0xe92: 0x4000, 0xe93: 0x4000, 0xe94: 0x4000, 0xe95: 0x4000, 0xe96: 0x4000, 0xe97: 0x4000, + 0xe98: 0x4000, 0xe99: 0x4000, 0xe9a: 0x4000, 0xe9b: 0x4000, 0xe9c: 0x4000, 0xe9d: 0x4000, + 0xe9e: 0x4000, 0xea0: 0x4000, 0xea1: 0x4000, 0xea2: 0x4000, 0xea3: 0x4000, + 0xea4: 0x4000, 0xea5: 0x4000, 0xea6: 0x4000, 0xea7: 0x4000, 0xea8: 0x4000, 0xea9: 0x4000, + 0xeaa: 0x4000, 0xeab: 0x4000, 0xeac: 0x4000, 0xead: 0x4000, 0xeae: 0x4000, 0xeaf: 0x4000, + 0xeb0: 0x4000, 0xeb1: 0x4000, 0xeb2: 0x4000, 0xeb3: 0x4000, 0xeb4: 0x4000, 0xeb5: 0x4000, + 0xeb6: 0x4000, 0xeb7: 0x4000, 0xeb8: 0x4000, 0xeb9: 0x4000, 0xeba: 0x4000, 0xebb: 0x4000, + 0xebc: 0x4000, 0xebd: 0x4000, 0xebe: 0x4000, 0xebf: 0x4000, + // Block 0x3b, offset 0xec0 + 0xec0: 0x4000, 0xec1: 0x4000, 0xec2: 0x4000, 0xec3: 0x4000, 0xec4: 0x4000, 0xec5: 0x4000, + 0xec6: 0x4000, 0xec7: 0x4000, 0xec8: 0x2000, 0xec9: 0x2000, 0xeca: 0x2000, 0xecb: 0x2000, + 0xecc: 0x2000, 0xecd: 0x2000, 0xece: 0x2000, 0xecf: 0x2000, 0xed0: 0x4000, 0xed1: 0x4000, + 0xed2: 0x4000, 0xed3: 0x4000, 0xed4: 0x4000, 0xed5: 0x4000, 0xed6: 0x4000, 0xed7: 0x4000, + 0xed8: 0x4000, 0xed9: 0x4000, 0xeda: 0x4000, 0xedb: 0x4000, 0xedc: 0x4000, 0xedd: 0x4000, + 0xede: 0x4000, 0xedf: 0x4000, 0xee0: 0x4000, 0xee1: 0x4000, 0xee2: 0x4000, 0xee3: 0x4000, + 0xee4: 0x4000, 0xee5: 0x4000, 0xee6: 0x4000, 0xee7: 0x4000, 0xee8: 0x4000, 0xee9: 0x4000, + 0xeea: 0x4000, 0xeeb: 0x4000, 0xeec: 0x4000, 0xeed: 0x4000, 0xeee: 0x4000, 0xeef: 0x4000, + 0xef0: 0x4000, 0xef1: 0x4000, 0xef2: 0x4000, 0xef3: 0x4000, 0xef4: 0x4000, 0xef5: 0x4000, + 0xef6: 0x4000, 0xef7: 0x4000, 0xef8: 0x4000, 0xef9: 0x4000, 0xefa: 0x4000, 0xefb: 0x4000, + 0xefc: 0x4000, 0xefd: 0x4000, 0xefe: 0x4000, 0xeff: 0x4000, + // Block 0x3c, offset 0xf00 + 0xf00: 0x4000, 0xf01: 0x4000, 0xf02: 0x4000, 0xf03: 0x4000, 0xf04: 0x4000, 0xf05: 0x4000, + 0xf06: 0x4000, 0xf07: 0x4000, 0xf08: 0x4000, 0xf09: 0x4000, 0xf0a: 0x4000, 0xf0b: 0x4000, + 0xf0c: 0x4000, 0xf0d: 0x4000, 0xf0e: 0x4000, 0xf0f: 0x4000, 0xf10: 0x4000, 0xf11: 0x4000, + 0xf12: 0x4000, 0xf13: 0x4000, 0xf14: 0x4000, 0xf15: 0x4000, 0xf16: 0x4000, 0xf17: 0x4000, + 0xf18: 0x4000, 0xf19: 0x4000, 0xf1a: 0x4000, 0xf1b: 0x4000, 0xf1c: 0x4000, 0xf1d: 0x4000, + 0xf1e: 0x4000, 0xf1f: 0x4000, 0xf20: 0x4000, 0xf21: 0x4000, 0xf22: 0x4000, 0xf23: 0x4000, + 0xf24: 0x4000, 0xf25: 0x4000, 0xf26: 0x4000, 0xf27: 0x4000, 0xf28: 0x4000, 0xf29: 0x4000, + 0xf2a: 0x4000, 0xf2b: 0x4000, 0xf2c: 0x4000, 0xf2d: 0x4000, 0xf2e: 0x4000, 0xf2f: 0x4000, + 0xf30: 0x4000, 0xf31: 0x4000, 0xf32: 0x4000, 0xf33: 0x4000, 0xf34: 0x4000, 0xf35: 0x4000, + 0xf36: 0x4000, 0xf37: 0x4000, 0xf38: 0x4000, 0xf39: 0x4000, 0xf3a: 0x4000, 0xf3b: 0x4000, + 0xf3c: 0x4000, 0xf3d: 0x4000, 0xf3e: 0x4000, + // Block 0x3d, offset 0xf40 + 0xf40: 0x4000, 0xf41: 0x4000, 0xf42: 0x4000, 0xf43: 0x4000, 0xf44: 0x4000, 0xf45: 0x4000, + 0xf46: 0x4000, 0xf47: 0x4000, 0xf48: 0x4000, 0xf49: 0x4000, 0xf4a: 0x4000, 0xf4b: 0x4000, + 0xf4c: 0x4000, 0xf50: 0x4000, 0xf51: 0x4000, + 0xf52: 0x4000, 0xf53: 0x4000, 0xf54: 0x4000, 0xf55: 0x4000, 0xf56: 0x4000, 0xf57: 0x4000, + 0xf58: 0x4000, 0xf59: 0x4000, 0xf5a: 0x4000, 0xf5b: 0x4000, 0xf5c: 0x4000, 0xf5d: 0x4000, + 0xf5e: 0x4000, 0xf5f: 0x4000, 0xf60: 0x4000, 0xf61: 0x4000, 0xf62: 0x4000, 0xf63: 0x4000, + 0xf64: 0x4000, 0xf65: 0x4000, 0xf66: 0x4000, 0xf67: 0x4000, 0xf68: 0x4000, 0xf69: 0x4000, + 0xf6a: 0x4000, 0xf6b: 0x4000, 0xf6c: 0x4000, 0xf6d: 0x4000, 0xf6e: 0x4000, 0xf6f: 0x4000, + 0xf70: 0x4000, 0xf71: 0x4000, 0xf72: 0x4000, 0xf73: 0x4000, 0xf74: 0x4000, 0xf75: 0x4000, + 0xf76: 0x4000, 0xf77: 0x4000, 0xf78: 0x4000, 0xf79: 0x4000, 0xf7a: 0x4000, 0xf7b: 0x4000, + 0xf7c: 0x4000, 0xf7d: 0x4000, 0xf7e: 0x4000, 0xf7f: 0x4000, + // Block 0x3e, offset 0xf80 + 0xf80: 0x4000, 0xf81: 0x4000, 0xf82: 0x4000, 0xf83: 0x4000, 0xf84: 0x4000, 0xf85: 0x4000, + 0xf86: 0x4000, + // Block 0x3f, offset 0xfc0 + 0xfe0: 0x4000, 0xfe1: 0x4000, 0xfe2: 0x4000, 0xfe3: 0x4000, + 0xfe4: 0x4000, 0xfe5: 0x4000, 0xfe6: 0x4000, 0xfe7: 0x4000, 0xfe8: 0x4000, 0xfe9: 0x4000, + 0xfea: 0x4000, 0xfeb: 0x4000, 0xfec: 0x4000, 0xfed: 0x4000, 0xfee: 0x4000, 0xfef: 0x4000, + 0xff0: 0x4000, 0xff1: 0x4000, 0xff2: 0x4000, 0xff3: 0x4000, 0xff4: 0x4000, 0xff5: 0x4000, + 0xff6: 0x4000, 0xff7: 0x4000, 0xff8: 0x4000, 0xff9: 0x4000, 0xffa: 0x4000, 0xffb: 0x4000, + 0xffc: 0x4000, + // Block 0x40, offset 0x1000 + 0x1000: 0x4000, 0x1001: 0x4000, 0x1002: 0x4000, 0x1003: 0x4000, 0x1004: 0x4000, 0x1005: 0x4000, + 0x1006: 0x4000, 0x1007: 0x4000, 0x1008: 0x4000, 0x1009: 0x4000, 0x100a: 0x4000, 0x100b: 0x4000, + 0x100c: 0x4000, 0x100d: 0x4000, 0x100e: 0x4000, 0x100f: 0x4000, 0x1010: 0x4000, 0x1011: 0x4000, + 0x1012: 0x4000, 0x1013: 0x4000, 0x1014: 0x4000, 0x1015: 0x4000, 0x1016: 0x4000, 0x1017: 0x4000, + 0x1018: 0x4000, 0x1019: 0x4000, 0x101a: 0x4000, 0x101b: 0x4000, 0x101c: 0x4000, 0x101d: 0x4000, + 0x101e: 0x4000, 0x101f: 0x4000, 0x1020: 0x4000, 0x1021: 0x4000, 0x1022: 0x4000, 0x1023: 0x4000, + // Block 0x41, offset 0x1040 + 0x1040: 0x2000, 0x1041: 0x2000, 0x1042: 0x2000, 0x1043: 0x2000, 0x1044: 0x2000, 0x1045: 0x2000, + 0x1046: 0x2000, 0x1047: 0x2000, 0x1048: 0x2000, 0x1049: 0x2000, 0x104a: 0x2000, 0x104b: 0x2000, + 0x104c: 0x2000, 0x104d: 0x2000, 0x104e: 0x2000, 0x104f: 0x2000, 0x1050: 0x4000, 0x1051: 0x4000, + 0x1052: 0x4000, 0x1053: 0x4000, 0x1054: 0x4000, 0x1055: 0x4000, 0x1056: 0x4000, 0x1057: 0x4000, + 0x1058: 0x4000, 0x1059: 0x4000, + 0x1070: 0x4000, 0x1071: 0x4000, 0x1072: 0x4000, 0x1073: 0x4000, 0x1074: 0x4000, 0x1075: 0x4000, + 0x1076: 0x4000, 0x1077: 0x4000, 0x1078: 0x4000, 0x1079: 0x4000, 0x107a: 0x4000, 0x107b: 0x4000, + 0x107c: 0x4000, 0x107d: 0x4000, 0x107e: 0x4000, 0x107f: 0x4000, + // Block 0x42, offset 0x1080 + 0x1080: 0x4000, 0x1081: 0x4000, 0x1082: 0x4000, 0x1083: 0x4000, 0x1084: 0x4000, 0x1085: 0x4000, + 0x1086: 0x4000, 0x1087: 0x4000, 0x1088: 0x4000, 0x1089: 0x4000, 0x108a: 0x4000, 0x108b: 0x4000, + 0x108c: 0x4000, 0x108d: 0x4000, 0x108e: 0x4000, 0x108f: 0x4000, 0x1090: 0x4000, 0x1091: 0x4000, + 0x1092: 0x4000, 0x1094: 0x4000, 0x1095: 0x4000, 0x1096: 0x4000, 0x1097: 0x4000, + 0x1098: 0x4000, 0x1099: 0x4000, 0x109a: 0x4000, 0x109b: 0x4000, 0x109c: 0x4000, 0x109d: 0x4000, + 0x109e: 0x4000, 0x109f: 0x4000, 0x10a0: 0x4000, 0x10a1: 0x4000, 0x10a2: 0x4000, 0x10a3: 0x4000, + 0x10a4: 0x4000, 0x10a5: 0x4000, 0x10a6: 0x4000, 0x10a8: 0x4000, 0x10a9: 0x4000, + 0x10aa: 0x4000, 0x10ab: 0x4000, + // Block 0x43, offset 0x10c0 + 0x10c1: 0x9012, 0x10c2: 0x9012, 0x10c3: 0x9012, 0x10c4: 0x9012, 0x10c5: 0x9012, + 0x10c6: 0x9012, 0x10c7: 0x9012, 0x10c8: 0x9012, 0x10c9: 0x9012, 0x10ca: 0x9012, 0x10cb: 0x9012, + 0x10cc: 0x9012, 0x10cd: 0x9012, 0x10ce: 0x9012, 0x10cf: 0x9012, 0x10d0: 0x9012, 0x10d1: 0x9012, + 0x10d2: 0x9012, 0x10d3: 0x9012, 0x10d4: 0x9012, 0x10d5: 0x9012, 0x10d6: 0x9012, 0x10d7: 0x9012, + 0x10d8: 0x9012, 0x10d9: 0x9012, 0x10da: 0x9012, 0x10db: 0x9012, 0x10dc: 0x9012, 0x10dd: 0x9012, + 0x10de: 0x9012, 0x10df: 0x9012, 0x10e0: 0x9049, 0x10e1: 0x9049, 0x10e2: 0x9049, 0x10e3: 0x9049, + 0x10e4: 0x9049, 0x10e5: 0x9049, 0x10e6: 0x9049, 0x10e7: 0x9049, 0x10e8: 0x9049, 0x10e9: 0x9049, + 0x10ea: 0x9049, 0x10eb: 0x9049, 0x10ec: 0x9049, 0x10ed: 0x9049, 0x10ee: 0x9049, 0x10ef: 0x9049, + 0x10f0: 0x9049, 0x10f1: 0x9049, 0x10f2: 0x9049, 0x10f3: 0x9049, 0x10f4: 0x9049, 0x10f5: 0x9049, + 0x10f6: 0x9049, 0x10f7: 0x9049, 0x10f8: 0x9049, 0x10f9: 0x9049, 0x10fa: 0x9049, 0x10fb: 0x9049, + 0x10fc: 0x9049, 0x10fd: 0x9049, 0x10fe: 0x9049, 0x10ff: 0x9049, + // Block 0x44, offset 0x1100 + 0x1100: 0x9049, 0x1101: 0x9049, 0x1102: 0x9049, 0x1103: 0x9049, 0x1104: 0x9049, 0x1105: 0x9049, + 0x1106: 0x9049, 0x1107: 0x9049, 0x1108: 0x9049, 0x1109: 0x9049, 0x110a: 0x9049, 0x110b: 0x9049, + 0x110c: 0x9049, 0x110d: 0x9049, 0x110e: 0x9049, 0x110f: 0x9049, 0x1110: 0x9049, 0x1111: 0x9049, + 0x1112: 0x9049, 0x1113: 0x9049, 0x1114: 0x9049, 0x1115: 0x9049, 0x1116: 0x9049, 0x1117: 0x9049, + 0x1118: 0x9049, 0x1119: 0x9049, 0x111a: 0x9049, 0x111b: 0x9049, 0x111c: 0x9049, 0x111d: 0x9049, + 0x111e: 0x9049, 0x111f: 0x904a, 0x1120: 0x904b, 0x1121: 0xb04c, 0x1122: 0xb04d, 0x1123: 0xb04d, + 0x1124: 0xb04e, 0x1125: 0xb04f, 0x1126: 0xb050, 0x1127: 0xb051, 0x1128: 0xb052, 0x1129: 0xb053, + 0x112a: 0xb054, 0x112b: 0xb055, 0x112c: 0xb056, 0x112d: 0xb057, 0x112e: 0xb058, 0x112f: 0xb059, + 0x1130: 0xb05a, 0x1131: 0xb05b, 0x1132: 0xb05c, 0x1133: 0xb05d, 0x1134: 0xb05e, 0x1135: 0xb05f, + 0x1136: 0xb060, 0x1137: 0xb061, 0x1138: 0xb062, 0x1139: 0xb063, 0x113a: 0xb064, 0x113b: 0xb065, + 0x113c: 0xb052, 0x113d: 0xb066, 0x113e: 0xb067, 0x113f: 0xb055, + // Block 0x45, offset 0x1140 + 0x1140: 0xb068, 0x1141: 0xb069, 0x1142: 0xb06a, 0x1143: 0xb06b, 0x1144: 0xb05a, 0x1145: 0xb056, + 0x1146: 0xb06c, 0x1147: 0xb06d, 0x1148: 0xb06b, 0x1149: 0xb06e, 0x114a: 0xb06b, 0x114b: 0xb06f, + 0x114c: 0xb06f, 0x114d: 0xb070, 0x114e: 0xb070, 0x114f: 0xb071, 0x1150: 0xb056, 0x1151: 0xb072, + 0x1152: 0xb073, 0x1153: 0xb072, 0x1154: 0xb074, 0x1155: 0xb073, 0x1156: 0xb075, 0x1157: 0xb075, + 0x1158: 0xb076, 0x1159: 0xb076, 0x115a: 0xb077, 0x115b: 0xb077, 0x115c: 0xb073, 0x115d: 0xb078, + 0x115e: 0xb079, 0x115f: 0xb067, 0x1160: 0xb07a, 0x1161: 0xb07b, 0x1162: 0xb07b, 0x1163: 0xb07b, + 0x1164: 0xb07b, 0x1165: 0xb07b, 0x1166: 0xb07b, 0x1167: 0xb07b, 0x1168: 0xb07b, 0x1169: 0xb07b, + 0x116a: 0xb07b, 0x116b: 0xb07b, 0x116c: 0xb07b, 0x116d: 0xb07b, 0x116e: 0xb07b, 0x116f: 0xb07b, + 0x1170: 0xb07c, 0x1171: 0xb07c, 0x1172: 0xb07c, 0x1173: 0xb07c, 0x1174: 0xb07c, 0x1175: 0xb07c, + 0x1176: 0xb07c, 0x1177: 0xb07c, 0x1178: 0xb07c, 0x1179: 0xb07c, 0x117a: 0xb07c, 0x117b: 0xb07c, + 0x117c: 0xb07c, 0x117d: 0xb07c, 0x117e: 0xb07c, + // Block 0x46, offset 0x1180 + 0x1182: 0xb07d, 0x1183: 0xb07e, 0x1184: 0xb07f, 0x1185: 0xb080, + 0x1186: 0xb07f, 0x1187: 0xb07e, 0x118a: 0xb081, 0x118b: 0xb082, + 0x118c: 0xb083, 0x118d: 0xb07f, 0x118e: 0xb080, 0x118f: 0xb07f, + 0x1192: 0xb084, 0x1193: 0xb085, 0x1194: 0xb084, 0x1195: 0xb086, 0x1196: 0xb084, 0x1197: 0xb087, + 0x119a: 0xb088, 0x119b: 0xb089, 0x119c: 0xb08a, + 0x11a0: 0x908b, 0x11a1: 0x908b, 0x11a2: 0x908c, 0x11a3: 0x908d, + 0x11a4: 0x908b, 0x11a5: 0x908e, 0x11a6: 0x908f, 0x11a8: 0xb090, 0x11a9: 0xb091, + 0x11aa: 0xb092, 0x11ab: 0xb091, 0x11ac: 0xb093, 0x11ad: 0xb094, 0x11ae: 0xb095, + 0x11bd: 0x2000, + // Block 0x47, offset 0x11c0 + 0x11e0: 0x4000, 0x11e1: 0x4000, 0x11e2: 0x4000, 0x11e3: 0x4000, + // Block 0x48, offset 0x1200 + 0x1200: 0x4000, 0x1201: 0x4000, 0x1202: 0x4000, 0x1203: 0x4000, 0x1204: 0x4000, 0x1205: 0x4000, + 0x1206: 0x4000, 0x1207: 0x4000, 0x1208: 0x4000, 0x1209: 0x4000, 0x120a: 0x4000, 0x120b: 0x4000, + 0x120c: 0x4000, 0x120d: 0x4000, 0x120e: 0x4000, 0x120f: 0x4000, 0x1210: 0x4000, 0x1211: 0x4000, + 0x1212: 0x4000, 0x1213: 0x4000, 0x1214: 0x4000, 0x1215: 0x4000, 0x1216: 0x4000, 0x1217: 0x4000, + 0x1218: 0x4000, 0x1219: 0x4000, 0x121a: 0x4000, 0x121b: 0x4000, 0x121c: 0x4000, 0x121d: 0x4000, + 0x121e: 0x4000, 0x121f: 0x4000, 0x1220: 0x4000, 0x1221: 0x4000, 0x1222: 0x4000, 0x1223: 0x4000, + 0x1224: 0x4000, 0x1225: 0x4000, 0x1226: 0x4000, 0x1227: 0x4000, 0x1228: 0x4000, 0x1229: 0x4000, + 0x122a: 0x4000, 0x122b: 0x4000, 0x122c: 0x4000, 0x122d: 0x4000, 0x122e: 0x4000, 0x122f: 0x4000, + 0x1230: 0x4000, 0x1231: 0x4000, 0x1232: 0x4000, 0x1233: 0x4000, 0x1234: 0x4000, 0x1235: 0x4000, + 0x1236: 0x4000, 0x1237: 0x4000, + // Block 0x49, offset 0x1240 + 0x1240: 0x4000, 0x1241: 0x4000, 0x1242: 0x4000, 0x1243: 0x4000, 0x1244: 0x4000, 0x1245: 0x4000, + 0x1246: 0x4000, 0x1247: 0x4000, 0x1248: 0x4000, 0x1249: 0x4000, 0x124a: 0x4000, 0x124b: 0x4000, + 0x124c: 0x4000, 0x124d: 0x4000, 0x124e: 0x4000, 0x124f: 0x4000, 0x1250: 0x4000, 0x1251: 0x4000, + 0x1252: 0x4000, 0x1253: 0x4000, 0x1254: 0x4000, 0x1255: 0x4000, 0x1256: 0x4000, 0x1257: 0x4000, + 0x1258: 0x4000, 0x1259: 0x4000, 0x125a: 0x4000, 0x125b: 0x4000, 0x125c: 0x4000, 0x125d: 0x4000, + 0x125e: 0x4000, 0x125f: 0x4000, 0x1260: 0x4000, 0x1261: 0x4000, 0x1262: 0x4000, 0x1263: 0x4000, + 0x1264: 0x4000, 0x1265: 0x4000, 0x1266: 0x4000, 0x1267: 0x4000, 0x1268: 0x4000, 0x1269: 0x4000, + 0x126a: 0x4000, 0x126b: 0x4000, 0x126c: 0x4000, 0x126d: 0x4000, 0x126e: 0x4000, 0x126f: 0x4000, + 0x1270: 0x4000, 0x1271: 0x4000, 0x1272: 0x4000, + // Block 0x4a, offset 0x1280 + 0x1280: 0x4000, 0x1281: 0x4000, 0x1282: 0x4000, 0x1283: 0x4000, 0x1284: 0x4000, 0x1285: 0x4000, + 0x1286: 0x4000, 0x1287: 0x4000, 0x1288: 0x4000, 0x1289: 0x4000, 0x128a: 0x4000, 0x128b: 0x4000, + 0x128c: 0x4000, 0x128d: 0x4000, 0x128e: 0x4000, 0x128f: 0x4000, 0x1290: 0x4000, 0x1291: 0x4000, + 0x1292: 0x4000, 0x1293: 0x4000, 0x1294: 0x4000, 0x1295: 0x4000, 0x1296: 0x4000, 0x1297: 0x4000, + 0x1298: 0x4000, 0x1299: 0x4000, 0x129a: 0x4000, 0x129b: 0x4000, 0x129c: 0x4000, 0x129d: 0x4000, + 0x129e: 0x4000, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x4000, 0x12d1: 0x4000, + 0x12d2: 0x4000, + 0x12e4: 0x4000, 0x12e5: 0x4000, 0x12e6: 0x4000, 0x12e7: 0x4000, + 0x12f0: 0x4000, 0x12f1: 0x4000, 0x12f2: 0x4000, 0x12f3: 0x4000, 0x12f4: 0x4000, 0x12f5: 0x4000, + 0x12f6: 0x4000, 0x12f7: 0x4000, 0x12f8: 0x4000, 0x12f9: 0x4000, 0x12fa: 0x4000, 0x12fb: 0x4000, + 0x12fc: 0x4000, 0x12fd: 0x4000, 0x12fe: 0x4000, 0x12ff: 0x4000, + // Block 0x4c, offset 0x1300 + 0x1300: 0x4000, 0x1301: 0x4000, 0x1302: 0x4000, 0x1303: 0x4000, 0x1304: 0x4000, 0x1305: 0x4000, + 0x1306: 0x4000, 0x1307: 0x4000, 0x1308: 0x4000, 0x1309: 0x4000, 0x130a: 0x4000, 0x130b: 0x4000, + 0x130c: 0x4000, 0x130d: 0x4000, 0x130e: 0x4000, 0x130f: 0x4000, 0x1310: 0x4000, 0x1311: 0x4000, + 0x1312: 0x4000, 0x1313: 0x4000, 0x1314: 0x4000, 0x1315: 0x4000, 0x1316: 0x4000, 0x1317: 0x4000, + 0x1318: 0x4000, 0x1319: 0x4000, 0x131a: 0x4000, 0x131b: 0x4000, 0x131c: 0x4000, 0x131d: 0x4000, + 0x131e: 0x4000, 0x131f: 0x4000, 0x1320: 0x4000, 0x1321: 0x4000, 0x1322: 0x4000, 0x1323: 0x4000, + 0x1324: 0x4000, 0x1325: 0x4000, 0x1326: 0x4000, 0x1327: 0x4000, 0x1328: 0x4000, 0x1329: 0x4000, + 0x132a: 0x4000, 0x132b: 0x4000, 0x132c: 0x4000, 0x132d: 0x4000, 0x132e: 0x4000, 0x132f: 0x4000, + 0x1330: 0x4000, 0x1331: 0x4000, 0x1332: 0x4000, 0x1333: 0x4000, 0x1334: 0x4000, 0x1335: 0x4000, + 0x1336: 0x4000, 0x1337: 0x4000, 0x1338: 0x4000, 0x1339: 0x4000, 0x133a: 0x4000, 0x133b: 0x4000, + // Block 0x4d, offset 0x1340 + 0x1344: 0x4000, + // Block 0x4e, offset 0x1380 + 0x138f: 0x4000, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x2000, 0x13c1: 0x2000, 0x13c2: 0x2000, 0x13c3: 0x2000, 0x13c4: 0x2000, 0x13c5: 0x2000, + 0x13c6: 0x2000, 0x13c7: 0x2000, 0x13c8: 0x2000, 0x13c9: 0x2000, 0x13ca: 0x2000, + 0x13d0: 0x2000, 0x13d1: 0x2000, + 0x13d2: 0x2000, 0x13d3: 0x2000, 0x13d4: 0x2000, 0x13d5: 0x2000, 0x13d6: 0x2000, 0x13d7: 0x2000, + 0x13d8: 0x2000, 0x13d9: 0x2000, 0x13da: 0x2000, 0x13db: 0x2000, 0x13dc: 0x2000, 0x13dd: 0x2000, + 0x13de: 0x2000, 0x13df: 0x2000, 0x13e0: 0x2000, 0x13e1: 0x2000, 0x13e2: 0x2000, 0x13e3: 0x2000, + 0x13e4: 0x2000, 0x13e5: 0x2000, 0x13e6: 0x2000, 0x13e7: 0x2000, 0x13e8: 0x2000, 0x13e9: 0x2000, + 0x13ea: 0x2000, 0x13eb: 0x2000, 0x13ec: 0x2000, 0x13ed: 0x2000, + 0x13f0: 0x2000, 0x13f1: 0x2000, 0x13f2: 0x2000, 0x13f3: 0x2000, 0x13f4: 0x2000, 0x13f5: 0x2000, + 0x13f6: 0x2000, 0x13f7: 0x2000, 0x13f8: 0x2000, 0x13f9: 0x2000, 0x13fa: 0x2000, 0x13fb: 0x2000, + 0x13fc: 0x2000, 0x13fd: 0x2000, 0x13fe: 0x2000, 0x13ff: 0x2000, + // Block 0x50, offset 0x1400 + 0x1400: 0x2000, 0x1401: 0x2000, 0x1402: 0x2000, 0x1403: 0x2000, 0x1404: 0x2000, 0x1405: 0x2000, + 0x1406: 0x2000, 0x1407: 0x2000, 0x1408: 0x2000, 0x1409: 0x2000, 0x140a: 0x2000, 0x140b: 0x2000, + 0x140c: 0x2000, 0x140d: 0x2000, 0x140e: 0x2000, 0x140f: 0x2000, 0x1410: 0x2000, 0x1411: 0x2000, + 0x1412: 0x2000, 0x1413: 0x2000, 0x1414: 0x2000, 0x1415: 0x2000, 0x1416: 0x2000, 0x1417: 0x2000, + 0x1418: 0x2000, 0x1419: 0x2000, 0x141a: 0x2000, 0x141b: 0x2000, 0x141c: 0x2000, 0x141d: 0x2000, + 0x141e: 0x2000, 0x141f: 0x2000, 0x1420: 0x2000, 0x1421: 0x2000, 0x1422: 0x2000, 0x1423: 0x2000, + 0x1424: 0x2000, 0x1425: 0x2000, 0x1426: 0x2000, 0x1427: 0x2000, 0x1428: 0x2000, 0x1429: 0x2000, + 0x1430: 0x2000, 0x1431: 0x2000, 0x1432: 0x2000, 0x1433: 0x2000, 0x1434: 0x2000, 0x1435: 0x2000, + 0x1436: 0x2000, 0x1437: 0x2000, 0x1438: 0x2000, 0x1439: 0x2000, 0x143a: 0x2000, 0x143b: 0x2000, + 0x143c: 0x2000, 0x143d: 0x2000, 0x143e: 0x2000, 0x143f: 0x2000, + // Block 0x51, offset 0x1440 + 0x1440: 0x2000, 0x1441: 0x2000, 0x1442: 0x2000, 0x1443: 0x2000, 0x1444: 0x2000, 0x1445: 0x2000, + 0x1446: 0x2000, 0x1447: 0x2000, 0x1448: 0x2000, 0x1449: 0x2000, 0x144a: 0x2000, 0x144b: 0x2000, + 0x144c: 0x2000, 0x144d: 0x2000, 0x144e: 0x4000, 0x144f: 0x2000, 0x1450: 0x2000, 0x1451: 0x4000, + 0x1452: 0x4000, 0x1453: 0x4000, 0x1454: 0x4000, 0x1455: 0x4000, 0x1456: 0x4000, 0x1457: 0x4000, + 0x1458: 0x4000, 0x1459: 0x4000, 0x145a: 0x4000, 0x145b: 0x2000, 0x145c: 0x2000, 0x145d: 0x2000, + 0x145e: 0x2000, 0x145f: 0x2000, 0x1460: 0x2000, 0x1461: 0x2000, 0x1462: 0x2000, 0x1463: 0x2000, + 0x1464: 0x2000, 0x1465: 0x2000, 0x1466: 0x2000, 0x1467: 0x2000, 0x1468: 0x2000, 0x1469: 0x2000, + 0x146a: 0x2000, 0x146b: 0x2000, 0x146c: 0x2000, + // Block 0x52, offset 0x1480 + 0x1480: 0x4000, 0x1481: 0x4000, 0x1482: 0x4000, + 0x1490: 0x4000, 0x1491: 0x4000, + 0x1492: 0x4000, 0x1493: 0x4000, 0x1494: 0x4000, 0x1495: 0x4000, 0x1496: 0x4000, 0x1497: 0x4000, + 0x1498: 0x4000, 0x1499: 0x4000, 0x149a: 0x4000, 0x149b: 0x4000, 0x149c: 0x4000, 0x149d: 0x4000, + 0x149e: 0x4000, 0x149f: 0x4000, 0x14a0: 0x4000, 0x14a1: 0x4000, 0x14a2: 0x4000, 0x14a3: 0x4000, + 0x14a4: 0x4000, 0x14a5: 0x4000, 0x14a6: 0x4000, 0x14a7: 0x4000, 0x14a8: 0x4000, 0x14a9: 0x4000, + 0x14aa: 0x4000, 0x14ab: 0x4000, 0x14ac: 0x4000, 0x14ad: 0x4000, 0x14ae: 0x4000, 0x14af: 0x4000, + 0x14b0: 0x4000, 0x14b1: 0x4000, 0x14b2: 0x4000, 0x14b3: 0x4000, 0x14b4: 0x4000, 0x14b5: 0x4000, + 0x14b6: 0x4000, 0x14b7: 0x4000, 0x14b8: 0x4000, 0x14b9: 0x4000, 0x14ba: 0x4000, 0x14bb: 0x4000, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x4000, 0x14c1: 0x4000, 0x14c2: 0x4000, 0x14c3: 0x4000, 0x14c4: 0x4000, 0x14c5: 0x4000, + 0x14c6: 0x4000, 0x14c7: 0x4000, 0x14c8: 0x4000, + 0x14d0: 0x4000, 0x14d1: 0x4000, + 0x14e0: 0x4000, 0x14e1: 0x4000, 0x14e2: 0x4000, 0x14e3: 0x4000, + 0x14e4: 0x4000, 0x14e5: 0x4000, + // Block 0x54, offset 0x1500 + 0x1500: 0x4000, 0x1501: 0x4000, 0x1502: 0x4000, 0x1503: 0x4000, 0x1504: 0x4000, 0x1505: 0x4000, + 0x1506: 0x4000, 0x1507: 0x4000, 0x1508: 0x4000, 0x1509: 0x4000, 0x150a: 0x4000, 0x150b: 0x4000, + 0x150c: 0x4000, 0x150d: 0x4000, 0x150e: 0x4000, 0x150f: 0x4000, 0x1510: 0x4000, 0x1511: 0x4000, + 0x1512: 0x4000, 0x1513: 0x4000, 0x1514: 0x4000, 0x1515: 0x4000, 0x1516: 0x4000, 0x1517: 0x4000, + 0x1518: 0x4000, 0x1519: 0x4000, 0x151a: 0x4000, 0x151b: 0x4000, 0x151c: 0x4000, 0x151d: 0x4000, + 0x151e: 0x4000, 0x151f: 0x4000, 0x1520: 0x4000, + 0x152d: 0x4000, 0x152e: 0x4000, 0x152f: 0x4000, + 0x1530: 0x4000, 0x1531: 0x4000, 0x1532: 0x4000, 0x1533: 0x4000, 0x1534: 0x4000, 0x1535: 0x4000, + 0x1537: 0x4000, 0x1538: 0x4000, 0x1539: 0x4000, 0x153a: 0x4000, 0x153b: 0x4000, + 0x153c: 0x4000, 0x153d: 0x4000, 0x153e: 0x4000, 0x153f: 0x4000, + // Block 0x55, offset 0x1540 + 0x1540: 0x4000, 0x1541: 0x4000, 0x1542: 0x4000, 0x1543: 0x4000, 0x1544: 0x4000, 0x1545: 0x4000, + 0x1546: 0x4000, 0x1547: 0x4000, 0x1548: 0x4000, 0x1549: 0x4000, 0x154a: 0x4000, 0x154b: 0x4000, + 0x154c: 0x4000, 0x154d: 0x4000, 0x154e: 0x4000, 0x154f: 0x4000, 0x1550: 0x4000, 0x1551: 0x4000, + 0x1552: 0x4000, 0x1553: 0x4000, 0x1554: 0x4000, 0x1555: 0x4000, 0x1556: 0x4000, 0x1557: 0x4000, + 0x1558: 0x4000, 0x1559: 0x4000, 0x155a: 0x4000, 0x155b: 0x4000, 0x155c: 0x4000, 0x155d: 0x4000, + 0x155e: 0x4000, 0x155f: 0x4000, 0x1560: 0x4000, 0x1561: 0x4000, 0x1562: 0x4000, 0x1563: 0x4000, + 0x1564: 0x4000, 0x1565: 0x4000, 0x1566: 0x4000, 0x1567: 0x4000, 0x1568: 0x4000, 0x1569: 0x4000, + 0x156a: 0x4000, 0x156b: 0x4000, 0x156c: 0x4000, 0x156d: 0x4000, 0x156e: 0x4000, 0x156f: 0x4000, + 0x1570: 0x4000, 0x1571: 0x4000, 0x1572: 0x4000, 0x1573: 0x4000, 0x1574: 0x4000, 0x1575: 0x4000, + 0x1576: 0x4000, 0x1577: 0x4000, 0x1578: 0x4000, 0x1579: 0x4000, 0x157a: 0x4000, 0x157b: 0x4000, + 0x157c: 0x4000, 0x157e: 0x4000, 0x157f: 0x4000, + // Block 0x56, offset 0x1580 + 0x1580: 0x4000, 0x1581: 0x4000, 0x1582: 0x4000, 0x1583: 0x4000, 0x1584: 0x4000, 0x1585: 0x4000, + 0x1586: 0x4000, 0x1587: 0x4000, 0x1588: 0x4000, 0x1589: 0x4000, 0x158a: 0x4000, 0x158b: 0x4000, + 0x158c: 0x4000, 0x158d: 0x4000, 0x158e: 0x4000, 0x158f: 0x4000, 0x1590: 0x4000, 0x1591: 0x4000, + 0x1592: 0x4000, 0x1593: 0x4000, + 0x15a0: 0x4000, 0x15a1: 0x4000, 0x15a2: 0x4000, 0x15a3: 0x4000, + 0x15a4: 0x4000, 0x15a5: 0x4000, 0x15a6: 0x4000, 0x15a7: 0x4000, 0x15a8: 0x4000, 0x15a9: 0x4000, + 0x15aa: 0x4000, 0x15ab: 0x4000, 0x15ac: 0x4000, 0x15ad: 0x4000, 0x15ae: 0x4000, 0x15af: 0x4000, + 0x15b0: 0x4000, 0x15b1: 0x4000, 0x15b2: 0x4000, 0x15b3: 0x4000, 0x15b4: 0x4000, 0x15b5: 0x4000, + 0x15b6: 0x4000, 0x15b7: 0x4000, 0x15b8: 0x4000, 0x15b9: 0x4000, 0x15ba: 0x4000, 0x15bb: 0x4000, + 0x15bc: 0x4000, 0x15bd: 0x4000, 0x15be: 0x4000, 0x15bf: 0x4000, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x4000, 0x15c1: 0x4000, 0x15c2: 0x4000, 0x15c3: 0x4000, 0x15c4: 0x4000, 0x15c5: 0x4000, + 0x15c6: 0x4000, 0x15c7: 0x4000, 0x15c8: 0x4000, 0x15c9: 0x4000, 0x15ca: 0x4000, + 0x15cf: 0x4000, 0x15d0: 0x4000, 0x15d1: 0x4000, + 0x15d2: 0x4000, 0x15d3: 0x4000, + 0x15e0: 0x4000, 0x15e1: 0x4000, 0x15e2: 0x4000, 0x15e3: 0x4000, + 0x15e4: 0x4000, 0x15e5: 0x4000, 0x15e6: 0x4000, 0x15e7: 0x4000, 0x15e8: 0x4000, 0x15e9: 0x4000, + 0x15ea: 0x4000, 0x15eb: 0x4000, 0x15ec: 0x4000, 0x15ed: 0x4000, 0x15ee: 0x4000, 0x15ef: 0x4000, + 0x15f0: 0x4000, 0x15f4: 0x4000, + 0x15f8: 0x4000, 0x15f9: 0x4000, 0x15fa: 0x4000, 0x15fb: 0x4000, + 0x15fc: 0x4000, 0x15fd: 0x4000, 0x15fe: 0x4000, 0x15ff: 0x4000, + // Block 0x58, offset 0x1600 + 0x1600: 0x4000, 0x1602: 0x4000, 0x1603: 0x4000, 0x1604: 0x4000, 0x1605: 0x4000, + 0x1606: 0x4000, 0x1607: 0x4000, 0x1608: 0x4000, 0x1609: 0x4000, 0x160a: 0x4000, 0x160b: 0x4000, + 0x160c: 0x4000, 0x160d: 0x4000, 0x160e: 0x4000, 0x160f: 0x4000, 0x1610: 0x4000, 0x1611: 0x4000, + 0x1612: 0x4000, 0x1613: 0x4000, 0x1614: 0x4000, 0x1615: 0x4000, 0x1616: 0x4000, 0x1617: 0x4000, + 0x1618: 0x4000, 0x1619: 0x4000, 0x161a: 0x4000, 0x161b: 0x4000, 0x161c: 0x4000, 0x161d: 0x4000, + 0x161e: 0x4000, 0x161f: 0x4000, 0x1620: 0x4000, 0x1621: 0x4000, 0x1622: 0x4000, 0x1623: 0x4000, + 0x1624: 0x4000, 0x1625: 0x4000, 0x1626: 0x4000, 0x1627: 0x4000, 0x1628: 0x4000, 0x1629: 0x4000, + 0x162a: 0x4000, 0x162b: 0x4000, 0x162c: 0x4000, 0x162d: 0x4000, 0x162e: 0x4000, 0x162f: 0x4000, + 0x1630: 0x4000, 0x1631: 0x4000, 0x1632: 0x4000, 0x1633: 0x4000, 0x1634: 0x4000, 0x1635: 0x4000, + 0x1636: 0x4000, 0x1637: 0x4000, 0x1638: 0x4000, 0x1639: 0x4000, 0x163a: 0x4000, 0x163b: 0x4000, + 0x163c: 0x4000, 0x163d: 0x4000, 0x163e: 0x4000, 0x163f: 0x4000, + // Block 0x59, offset 0x1640 + 0x1640: 0x4000, 0x1641: 0x4000, 0x1642: 0x4000, 0x1643: 0x4000, 0x1644: 0x4000, 0x1645: 0x4000, + 0x1646: 0x4000, 0x1647: 0x4000, 0x1648: 0x4000, 0x1649: 0x4000, 0x164a: 0x4000, 0x164b: 0x4000, + 0x164c: 0x4000, 0x164d: 0x4000, 0x164e: 0x4000, 0x164f: 0x4000, 0x1650: 0x4000, 0x1651: 0x4000, + 0x1652: 0x4000, 0x1653: 0x4000, 0x1654: 0x4000, 0x1655: 0x4000, 0x1656: 0x4000, 0x1657: 0x4000, + 0x1658: 0x4000, 0x1659: 0x4000, 0x165a: 0x4000, 0x165b: 0x4000, 0x165c: 0x4000, 0x165d: 0x4000, + 0x165e: 0x4000, 0x165f: 0x4000, 0x1660: 0x4000, 0x1661: 0x4000, 0x1662: 0x4000, 0x1663: 0x4000, + 0x1664: 0x4000, 0x1665: 0x4000, 0x1666: 0x4000, 0x1667: 0x4000, 0x1668: 0x4000, 0x1669: 0x4000, + 0x166a: 0x4000, 0x166b: 0x4000, 0x166c: 0x4000, 0x166d: 0x4000, 0x166e: 0x4000, 0x166f: 0x4000, + 0x1670: 0x4000, 0x1671: 0x4000, 0x1672: 0x4000, 0x1673: 0x4000, 0x1674: 0x4000, 0x1675: 0x4000, + 0x1676: 0x4000, 0x1677: 0x4000, 0x1678: 0x4000, 0x1679: 0x4000, 0x167a: 0x4000, 0x167b: 0x4000, + 0x167c: 0x4000, 0x167f: 0x4000, + // Block 0x5a, offset 0x1680 + 0x1680: 0x4000, 0x1681: 0x4000, 0x1682: 0x4000, 0x1683: 0x4000, 0x1684: 0x4000, 0x1685: 0x4000, + 0x1686: 0x4000, 0x1687: 0x4000, 0x1688: 0x4000, 0x1689: 0x4000, 0x168a: 0x4000, 0x168b: 0x4000, + 0x168c: 0x4000, 0x168d: 0x4000, 0x168e: 0x4000, 0x168f: 0x4000, 0x1690: 0x4000, 0x1691: 0x4000, + 0x1692: 0x4000, 0x1693: 0x4000, 0x1694: 0x4000, 0x1695: 0x4000, 0x1696: 0x4000, 0x1697: 0x4000, + 0x1698: 0x4000, 0x1699: 0x4000, 0x169a: 0x4000, 0x169b: 0x4000, 0x169c: 0x4000, 0x169d: 0x4000, + 0x169e: 0x4000, 0x169f: 0x4000, 0x16a0: 0x4000, 0x16a1: 0x4000, 0x16a2: 0x4000, 0x16a3: 0x4000, + 0x16a4: 0x4000, 0x16a5: 0x4000, 0x16a6: 0x4000, 0x16a7: 0x4000, 0x16a8: 0x4000, 0x16a9: 0x4000, + 0x16aa: 0x4000, 0x16ab: 0x4000, 0x16ac: 0x4000, 0x16ad: 0x4000, 0x16ae: 0x4000, 0x16af: 0x4000, + 0x16b0: 0x4000, 0x16b1: 0x4000, 0x16b2: 0x4000, 0x16b3: 0x4000, 0x16b4: 0x4000, 0x16b5: 0x4000, + 0x16b6: 0x4000, 0x16b7: 0x4000, 0x16b8: 0x4000, 0x16b9: 0x4000, 0x16ba: 0x4000, 0x16bb: 0x4000, + 0x16bc: 0x4000, 0x16bd: 0x4000, + // Block 0x5b, offset 0x16c0 + 0x16cb: 0x4000, + 0x16cc: 0x4000, 0x16cd: 0x4000, 0x16ce: 0x4000, 0x16d0: 0x4000, 0x16d1: 0x4000, + 0x16d2: 0x4000, 0x16d3: 0x4000, 0x16d4: 0x4000, 0x16d5: 0x4000, 0x16d6: 0x4000, 0x16d7: 0x4000, + 0x16d8: 0x4000, 0x16d9: 0x4000, 0x16da: 0x4000, 0x16db: 0x4000, 0x16dc: 0x4000, 0x16dd: 0x4000, + 0x16de: 0x4000, 0x16df: 0x4000, 0x16e0: 0x4000, 0x16e1: 0x4000, 0x16e2: 0x4000, 0x16e3: 0x4000, + 0x16e4: 0x4000, 0x16e5: 0x4000, 0x16e6: 0x4000, 0x16e7: 0x4000, + 0x16fa: 0x4000, + // Block 0x5c, offset 0x1700 + 0x1715: 0x4000, 0x1716: 0x4000, + 0x1724: 0x4000, + // Block 0x5d, offset 0x1740 + 0x177b: 0x4000, + 0x177c: 0x4000, 0x177d: 0x4000, 0x177e: 0x4000, 0x177f: 0x4000, + // Block 0x5e, offset 0x1780 + 0x1780: 0x4000, 0x1781: 0x4000, 0x1782: 0x4000, 0x1783: 0x4000, 0x1784: 0x4000, 0x1785: 0x4000, + 0x1786: 0x4000, 0x1787: 0x4000, 0x1788: 0x4000, 0x1789: 0x4000, 0x178a: 0x4000, 0x178b: 0x4000, + 0x178c: 0x4000, 0x178d: 0x4000, 0x178e: 0x4000, 0x178f: 0x4000, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x4000, 0x17c1: 0x4000, 0x17c2: 0x4000, 0x17c3: 0x4000, 0x17c4: 0x4000, 0x17c5: 0x4000, + 0x17cc: 0x4000, 0x17d0: 0x4000, 0x17d1: 0x4000, + 0x17d2: 0x4000, 0x17d5: 0x4000, + 0x17eb: 0x4000, 0x17ec: 0x4000, + 0x17f4: 0x4000, 0x17f5: 0x4000, + 0x17f6: 0x4000, 0x17f7: 0x4000, 0x17f8: 0x4000, 0x17f9: 0x4000, 0x17fa: 0x4000, + // Block 0x60, offset 0x1800 + 0x1820: 0x4000, 0x1821: 0x4000, 0x1822: 0x4000, 0x1823: 0x4000, + 0x1824: 0x4000, 0x1825: 0x4000, 0x1826: 0x4000, 0x1827: 0x4000, 0x1828: 0x4000, 0x1829: 0x4000, + 0x182a: 0x4000, 0x182b: 0x4000, + // Block 0x61, offset 0x1840 + 0x184d: 0x4000, 0x184e: 0x4000, 0x184f: 0x4000, 0x1850: 0x4000, 0x1851: 0x4000, + 0x1852: 0x4000, 0x1853: 0x4000, 0x1854: 0x4000, 0x1855: 0x4000, 0x1856: 0x4000, 0x1857: 0x4000, + 0x1858: 0x4000, 0x1859: 0x4000, 0x185a: 0x4000, 0x185b: 0x4000, 0x185c: 0x4000, 0x185d: 0x4000, + 0x185e: 0x4000, 0x185f: 0x4000, 0x1860: 0x4000, 0x1861: 0x4000, 0x1862: 0x4000, 0x1863: 0x4000, + 0x1864: 0x4000, 0x1865: 0x4000, 0x1866: 0x4000, 0x1867: 0x4000, 0x1868: 0x4000, 0x1869: 0x4000, + 0x186a: 0x4000, 0x186b: 0x4000, 0x186c: 0x4000, 0x186d: 0x4000, 0x186e: 0x4000, 0x186f: 0x4000, + 0x1870: 0x4000, 0x1871: 0x4000, 0x1872: 0x4000, 0x1873: 0x4000, 0x1874: 0x4000, 0x1875: 0x4000, + 0x1876: 0x4000, 0x1877: 0x4000, 0x1878: 0x4000, 0x1879: 0x4000, 0x187a: 0x4000, 0x187b: 0x4000, + 0x187c: 0x4000, 0x187d: 0x4000, 0x187e: 0x4000, 0x187f: 0x4000, + // Block 0x62, offset 0x1880 + 0x1880: 0x4000, 0x1881: 0x4000, 0x1882: 0x4000, 0x1883: 0x4000, 0x1884: 0x4000, 0x1885: 0x4000, + 0x1886: 0x4000, 0x1887: 0x4000, 0x1888: 0x4000, 0x1889: 0x4000, 0x188a: 0x4000, 0x188b: 0x4000, + 0x188c: 0x4000, 0x188d: 0x4000, 0x188e: 0x4000, 0x188f: 0x4000, 0x1890: 0x4000, 0x1891: 0x4000, + 0x1892: 0x4000, 0x1893: 0x4000, 0x1894: 0x4000, 0x1895: 0x4000, 0x1896: 0x4000, 0x1897: 0x4000, + 0x1898: 0x4000, 0x1899: 0x4000, 0x189a: 0x4000, 0x189b: 0x4000, 0x189c: 0x4000, 0x189d: 0x4000, + 0x189e: 0x4000, 0x189f: 0x4000, 0x18a0: 0x4000, 0x18a1: 0x4000, 0x18a2: 0x4000, 0x18a3: 0x4000, + 0x18a4: 0x4000, 0x18a5: 0x4000, 0x18a6: 0x4000, 0x18a7: 0x4000, 0x18a8: 0x4000, 0x18a9: 0x4000, + 0x18aa: 0x4000, 0x18ab: 0x4000, 0x18ac: 0x4000, 0x18ad: 0x4000, 0x18ae: 0x4000, 0x18af: 0x4000, + 0x18b0: 0x4000, 0x18b1: 0x4000, 0x18b3: 0x4000, 0x18b4: 0x4000, 0x18b5: 0x4000, + 0x18b6: 0x4000, 0x18ba: 0x4000, 0x18bb: 0x4000, + 0x18bc: 0x4000, 0x18bd: 0x4000, 0x18be: 0x4000, 0x18bf: 0x4000, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x4000, 0x18c1: 0x4000, 0x18c2: 0x4000, 0x18c3: 0x4000, 0x18c4: 0x4000, 0x18c5: 0x4000, + 0x18c6: 0x4000, 0x18c7: 0x4000, 0x18c8: 0x4000, 0x18c9: 0x4000, 0x18ca: 0x4000, 0x18cb: 0x4000, + 0x18cc: 0x4000, 0x18cd: 0x4000, 0x18ce: 0x4000, 0x18cf: 0x4000, 0x18d0: 0x4000, 0x18d1: 0x4000, + 0x18d2: 0x4000, 0x18d3: 0x4000, 0x18d4: 0x4000, 0x18d5: 0x4000, 0x18d6: 0x4000, 0x18d7: 0x4000, + 0x18d8: 0x4000, 0x18d9: 0x4000, 0x18da: 0x4000, 0x18db: 0x4000, 0x18dc: 0x4000, 0x18dd: 0x4000, + 0x18de: 0x4000, 0x18df: 0x4000, 0x18e0: 0x4000, 0x18e1: 0x4000, 0x18e2: 0x4000, + 0x18e5: 0x4000, 0x18e6: 0x4000, 0x18e7: 0x4000, 0x18e8: 0x4000, 0x18e9: 0x4000, + 0x18ea: 0x4000, 0x18ee: 0x4000, 0x18ef: 0x4000, + 0x18f0: 0x4000, 0x18f1: 0x4000, 0x18f2: 0x4000, 0x18f3: 0x4000, 0x18f4: 0x4000, 0x18f5: 0x4000, + 0x18f6: 0x4000, 0x18f7: 0x4000, 0x18f8: 0x4000, 0x18f9: 0x4000, 0x18fa: 0x4000, 0x18fb: 0x4000, + 0x18fc: 0x4000, 0x18fd: 0x4000, 0x18fe: 0x4000, 0x18ff: 0x4000, + // Block 0x64, offset 0x1900 + 0x1900: 0x4000, 0x1901: 0x4000, 0x1902: 0x4000, 0x1903: 0x4000, 0x1904: 0x4000, 0x1905: 0x4000, + 0x1906: 0x4000, 0x1907: 0x4000, 0x1908: 0x4000, 0x1909: 0x4000, 0x190a: 0x4000, + 0x190d: 0x4000, 0x190e: 0x4000, 0x190f: 0x4000, 0x1910: 0x4000, 0x1911: 0x4000, + 0x1912: 0x4000, 0x1913: 0x4000, 0x1914: 0x4000, 0x1915: 0x4000, 0x1916: 0x4000, 0x1917: 0x4000, + 0x1918: 0x4000, 0x1919: 0x4000, 0x191a: 0x4000, 0x191b: 0x4000, 0x191c: 0x4000, 0x191d: 0x4000, + 0x191e: 0x4000, 0x191f: 0x4000, 0x1920: 0x4000, 0x1921: 0x4000, 0x1922: 0x4000, 0x1923: 0x4000, + 0x1924: 0x4000, 0x1925: 0x4000, 0x1926: 0x4000, 0x1927: 0x4000, 0x1928: 0x4000, 0x1929: 0x4000, + 0x192a: 0x4000, 0x192b: 0x4000, 0x192c: 0x4000, 0x192d: 0x4000, 0x192e: 0x4000, 0x192f: 0x4000, + 0x1930: 0x4000, 0x1931: 0x4000, 0x1932: 0x4000, 0x1933: 0x4000, 0x1934: 0x4000, 0x1935: 0x4000, + 0x1936: 0x4000, 0x1937: 0x4000, 0x1938: 0x4000, 0x1939: 0x4000, 0x193a: 0x4000, 0x193b: 0x4000, + 0x193c: 0x4000, 0x193d: 0x4000, 0x193e: 0x4000, 0x193f: 0x4000, + // Block 0x65, offset 0x1940 + 0x1970: 0x4000, 0x1971: 0x4000, 0x1972: 0x4000, 0x1973: 0x4000, + 0x1978: 0x4000, 0x1979: 0x4000, 0x197a: 0x4000, + // Block 0x66, offset 0x1980 + 0x1980: 0x4000, 0x1981: 0x4000, 0x1982: 0x4000, + 0x1990: 0x4000, 0x1991: 0x4000, + 0x1992: 0x4000, 0x1993: 0x4000, 0x1994: 0x4000, 0x1995: 0x4000, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x2000, 0x19c1: 0x2000, 0x19c2: 0x2000, 0x19c3: 0x2000, 0x19c4: 0x2000, 0x19c5: 0x2000, + 0x19c6: 0x2000, 0x19c7: 0x2000, 0x19c8: 0x2000, 0x19c9: 0x2000, 0x19ca: 0x2000, 0x19cb: 0x2000, + 0x19cc: 0x2000, 0x19cd: 0x2000, 0x19ce: 0x2000, 0x19cf: 0x2000, 0x19d0: 0x2000, 0x19d1: 0x2000, + 0x19d2: 0x2000, 0x19d3: 0x2000, 0x19d4: 0x2000, 0x19d5: 0x2000, 0x19d6: 0x2000, 0x19d7: 0x2000, + 0x19d8: 0x2000, 0x19d9: 0x2000, 0x19da: 0x2000, 0x19db: 0x2000, 0x19dc: 0x2000, 0x19dd: 0x2000, + 0x19de: 0x2000, 0x19df: 0x2000, 0x19e0: 0x2000, 0x19e1: 0x2000, 0x19e2: 0x2000, 0x19e3: 0x2000, + 0x19e4: 0x2000, 0x19e5: 0x2000, 0x19e6: 0x2000, 0x19e7: 0x2000, 0x19e8: 0x2000, 0x19e9: 0x2000, + 0x19ea: 0x2000, 0x19eb: 0x2000, 0x19ec: 0x2000, 0x19ed: 0x2000, 0x19ee: 0x2000, 0x19ef: 0x2000, + 0x19f0: 0x2000, 0x19f1: 0x2000, 0x19f2: 0x2000, 0x19f3: 0x2000, 0x19f4: 0x2000, 0x19f5: 0x2000, + 0x19f6: 0x2000, 0x19f7: 0x2000, 0x19f8: 0x2000, 0x19f9: 0x2000, 0x19fa: 0x2000, 0x19fb: 0x2000, + 0x19fc: 0x2000, 0x19fd: 0x2000, +} + +// widthIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var widthIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, 0xc4: 0x03, 0xc5: 0x04, 0xc7: 0x05, + 0xc9: 0x06, 0xcb: 0x07, 0xcc: 0x08, 0xcd: 0x09, 0xce: 0x0a, 0xcf: 0x0b, + 0xd0: 0x0c, 0xd1: 0x0d, + 0xe1: 0x02, 0xe2: 0x03, 0xe3: 0x04, 0xe4: 0x05, 0xe5: 0x06, 0xe6: 0x06, 0xe7: 0x06, + 0xe8: 0x06, 0xe9: 0x06, 0xea: 0x07, 0xeb: 0x06, 0xec: 0x06, 0xed: 0x08, 0xee: 0x09, 0xef: 0x0a, + 0xf0: 0x0f, 0xf3: 0x12, 0xf4: 0x13, + // Block 0x4, offset 0x100 + 0x104: 0x0e, 0x105: 0x0f, + // Block 0x5, offset 0x140 + 0x140: 0x10, 0x141: 0x11, 0x142: 0x12, 0x144: 0x13, 0x145: 0x14, 0x146: 0x15, 0x147: 0x16, + 0x148: 0x17, 0x149: 0x18, 0x14a: 0x19, 0x14c: 0x1a, 0x14f: 0x1b, + 0x151: 0x1c, 0x152: 0x08, 0x153: 0x1d, 0x154: 0x1e, 0x155: 0x1f, 0x156: 0x20, 0x157: 0x21, + 0x158: 0x22, 0x159: 0x23, 0x15a: 0x24, 0x15b: 0x25, 0x15c: 0x26, 0x15d: 0x27, 0x15e: 0x28, 0x15f: 0x29, + 0x166: 0x2a, + 0x16c: 0x2b, 0x16d: 0x2c, + 0x17a: 0x2d, 0x17b: 0x2e, 0x17c: 0x0e, 0x17d: 0x0e, 0x17e: 0x0e, 0x17f: 0x2f, + // Block 0x6, offset 0x180 + 0x180: 0x30, 0x181: 0x31, 0x182: 0x32, 0x183: 0x33, 0x184: 0x34, 0x185: 0x35, 0x186: 0x36, 0x187: 0x37, + 0x188: 0x38, 0x189: 0x39, 0x18a: 0x0e, 0x18b: 0x3a, 0x18c: 0x0e, 0x18d: 0x0e, 0x18e: 0x0e, 0x18f: 0x0e, + 0x190: 0x0e, 0x191: 0x0e, 0x192: 0x0e, 0x193: 0x0e, 0x194: 0x0e, 0x195: 0x0e, 0x196: 0x0e, 0x197: 0x0e, + 0x198: 0x0e, 0x199: 0x0e, 0x19a: 0x0e, 0x19b: 0x0e, 0x19c: 0x0e, 0x19d: 0x0e, 0x19e: 0x0e, 0x19f: 0x0e, + 0x1a0: 0x0e, 0x1a1: 0x0e, 0x1a2: 0x0e, 0x1a3: 0x0e, 0x1a4: 0x0e, 0x1a5: 0x0e, 0x1a6: 0x0e, 0x1a7: 0x0e, + 0x1a8: 0x0e, 0x1a9: 0x0e, 0x1aa: 0x0e, 0x1ab: 0x0e, 0x1ac: 0x0e, 0x1ad: 0x0e, 0x1ae: 0x0e, 0x1af: 0x0e, + 0x1b0: 0x0e, 0x1b1: 0x0e, 0x1b2: 0x0e, 0x1b3: 0x0e, 0x1b4: 0x0e, 0x1b5: 0x0e, 0x1b6: 0x0e, 0x1b7: 0x0e, + 0x1b8: 0x0e, 0x1b9: 0x0e, 0x1ba: 0x0e, 0x1bb: 0x0e, 0x1bc: 0x0e, 0x1bd: 0x0e, 0x1be: 0x0e, 0x1bf: 0x0e, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0e, 0x1c1: 0x0e, 0x1c2: 0x0e, 0x1c3: 0x0e, 0x1c4: 0x0e, 0x1c5: 0x0e, 0x1c6: 0x0e, 0x1c7: 0x0e, + 0x1c8: 0x0e, 0x1c9: 0x0e, 0x1ca: 0x0e, 0x1cb: 0x0e, 0x1cc: 0x0e, 0x1cd: 0x0e, 0x1ce: 0x0e, 0x1cf: 0x0e, + 0x1d0: 0x0e, 0x1d1: 0x0e, 0x1d2: 0x0e, 0x1d3: 0x0e, 0x1d4: 0x0e, 0x1d5: 0x0e, 0x1d6: 0x0e, 0x1d7: 0x0e, + 0x1d8: 0x0e, 0x1d9: 0x0e, 0x1da: 0x0e, 0x1db: 0x0e, 0x1dc: 0x0e, 0x1dd: 0x0e, 0x1de: 0x0e, 0x1df: 0x0e, + 0x1e0: 0x0e, 0x1e1: 0x0e, 0x1e2: 0x0e, 0x1e3: 0x0e, 0x1e4: 0x0e, 0x1e5: 0x0e, 0x1e6: 0x0e, 0x1e7: 0x0e, + 0x1e8: 0x0e, 0x1e9: 0x0e, 0x1ea: 0x0e, 0x1eb: 0x0e, 0x1ec: 0x0e, 0x1ed: 0x0e, 0x1ee: 0x0e, 0x1ef: 0x0e, + 0x1f0: 0x0e, 0x1f1: 0x0e, 0x1f2: 0x0e, 0x1f3: 0x0e, 0x1f4: 0x0e, 0x1f5: 0x0e, 0x1f6: 0x0e, + 0x1f8: 0x0e, 0x1f9: 0x0e, 0x1fa: 0x0e, 0x1fb: 0x0e, 0x1fc: 0x0e, 0x1fd: 0x0e, 0x1fe: 0x0e, 0x1ff: 0x0e, + // Block 0x8, offset 0x200 + 0x200: 0x0e, 0x201: 0x0e, 0x202: 0x0e, 0x203: 0x0e, 0x204: 0x0e, 0x205: 0x0e, 0x206: 0x0e, 0x207: 0x0e, + 0x208: 0x0e, 0x209: 0x0e, 0x20a: 0x0e, 0x20b: 0x0e, 0x20c: 0x0e, 0x20d: 0x0e, 0x20e: 0x0e, 0x20f: 0x0e, + 0x210: 0x0e, 0x211: 0x0e, 0x212: 0x0e, 0x213: 0x0e, 0x214: 0x0e, 0x215: 0x0e, 0x216: 0x0e, 0x217: 0x0e, + 0x218: 0x0e, 0x219: 0x0e, 0x21a: 0x0e, 0x21b: 0x0e, 0x21c: 0x0e, 0x21d: 0x0e, 0x21e: 0x0e, 0x21f: 0x0e, + 0x220: 0x0e, 0x221: 0x0e, 0x222: 0x0e, 0x223: 0x0e, 0x224: 0x0e, 0x225: 0x0e, 0x226: 0x0e, 0x227: 0x0e, + 0x228: 0x0e, 0x229: 0x0e, 0x22a: 0x0e, 0x22b: 0x0e, 0x22c: 0x0e, 0x22d: 0x0e, 0x22e: 0x0e, 0x22f: 0x0e, + 0x230: 0x0e, 0x231: 0x0e, 0x232: 0x0e, 0x233: 0x0e, 0x234: 0x0e, 0x235: 0x0e, 0x236: 0x0e, 0x237: 0x0e, + 0x238: 0x0e, 0x239: 0x0e, 0x23a: 0x0e, 0x23b: 0x0e, 0x23c: 0x0e, 0x23d: 0x0e, 0x23e: 0x0e, 0x23f: 0x0e, + // Block 0x9, offset 0x240 + 0x240: 0x0e, 0x241: 0x0e, 0x242: 0x0e, 0x243: 0x0e, 0x244: 0x0e, 0x245: 0x0e, 0x246: 0x0e, 0x247: 0x0e, + 0x248: 0x0e, 0x249: 0x0e, 0x24a: 0x0e, 0x24b: 0x0e, 0x24c: 0x0e, 0x24d: 0x0e, 0x24e: 0x0e, 0x24f: 0x0e, + 0x250: 0x0e, 0x251: 0x0e, 0x252: 0x3b, 0x253: 0x3c, + 0x265: 0x3d, + 0x270: 0x0e, 0x271: 0x0e, 0x272: 0x0e, 0x273: 0x0e, 0x274: 0x0e, 0x275: 0x0e, 0x276: 0x0e, 0x277: 0x0e, + 0x278: 0x0e, 0x279: 0x0e, 0x27a: 0x0e, 0x27b: 0x0e, 0x27c: 0x0e, 0x27d: 0x0e, 0x27e: 0x0e, 0x27f: 0x0e, + // Block 0xa, offset 0x280 + 0x280: 0x0e, 0x281: 0x0e, 0x282: 0x0e, 0x283: 0x0e, 0x284: 0x0e, 0x285: 0x0e, 0x286: 0x0e, 0x287: 0x0e, + 0x288: 0x0e, 0x289: 0x0e, 0x28a: 0x0e, 0x28b: 0x0e, 0x28c: 0x0e, 0x28d: 0x0e, 0x28e: 0x0e, 0x28f: 0x0e, + 0x290: 0x0e, 0x291: 0x0e, 0x292: 0x0e, 0x293: 0x0e, 0x294: 0x0e, 0x295: 0x0e, 0x296: 0x0e, 0x297: 0x0e, + 0x298: 0x0e, 0x299: 0x0e, 0x29a: 0x0e, 0x29b: 0x0e, 0x29c: 0x0e, 0x29d: 0x0e, 0x29e: 0x3e, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x08, 0x2c1: 0x08, 0x2c2: 0x08, 0x2c3: 0x08, 0x2c4: 0x08, 0x2c5: 0x08, 0x2c6: 0x08, 0x2c7: 0x08, + 0x2c8: 0x08, 0x2c9: 0x08, 0x2ca: 0x08, 0x2cb: 0x08, 0x2cc: 0x08, 0x2cd: 0x08, 0x2ce: 0x08, 0x2cf: 0x08, + 0x2d0: 0x08, 0x2d1: 0x08, 0x2d2: 0x08, 0x2d3: 0x08, 0x2d4: 0x08, 0x2d5: 0x08, 0x2d6: 0x08, 0x2d7: 0x08, + 0x2d8: 0x08, 0x2d9: 0x08, 0x2da: 0x08, 0x2db: 0x08, 0x2dc: 0x08, 0x2dd: 0x08, 0x2de: 0x08, 0x2df: 0x08, + 0x2e0: 0x08, 0x2e1: 0x08, 0x2e2: 0x08, 0x2e3: 0x08, 0x2e4: 0x08, 0x2e5: 0x08, 0x2e6: 0x08, 0x2e7: 0x08, + 0x2e8: 0x08, 0x2e9: 0x08, 0x2ea: 0x08, 0x2eb: 0x08, 0x2ec: 0x08, 0x2ed: 0x08, 0x2ee: 0x08, 0x2ef: 0x08, + 0x2f0: 0x08, 0x2f1: 0x08, 0x2f2: 0x08, 0x2f3: 0x08, 0x2f4: 0x08, 0x2f5: 0x08, 0x2f6: 0x08, 0x2f7: 0x08, + 0x2f8: 0x08, 0x2f9: 0x08, 0x2fa: 0x08, 0x2fb: 0x08, 0x2fc: 0x08, 0x2fd: 0x08, 0x2fe: 0x08, 0x2ff: 0x08, + // Block 0xc, offset 0x300 + 0x300: 0x08, 0x301: 0x08, 0x302: 0x08, 0x303: 0x08, 0x304: 0x08, 0x305: 0x08, 0x306: 0x08, 0x307: 0x08, + 0x308: 0x08, 0x309: 0x08, 0x30a: 0x08, 0x30b: 0x08, 0x30c: 0x08, 0x30d: 0x08, 0x30e: 0x08, 0x30f: 0x08, + 0x310: 0x08, 0x311: 0x08, 0x312: 0x08, 0x313: 0x08, 0x314: 0x08, 0x315: 0x08, 0x316: 0x08, 0x317: 0x08, + 0x318: 0x08, 0x319: 0x08, 0x31a: 0x08, 0x31b: 0x08, 0x31c: 0x08, 0x31d: 0x08, 0x31e: 0x08, 0x31f: 0x08, + 0x320: 0x08, 0x321: 0x08, 0x322: 0x08, 0x323: 0x08, 0x324: 0x0e, 0x325: 0x0e, 0x326: 0x0e, 0x327: 0x0e, + 0x328: 0x0e, 0x329: 0x0e, 0x32a: 0x0e, 0x32b: 0x0e, + 0x338: 0x3f, 0x339: 0x40, 0x33c: 0x41, 0x33d: 0x42, 0x33e: 0x43, 0x33f: 0x44, + // Block 0xd, offset 0x340 + 0x37f: 0x45, + // Block 0xe, offset 0x380 + 0x380: 0x0e, 0x381: 0x0e, 0x382: 0x0e, 0x383: 0x0e, 0x384: 0x0e, 0x385: 0x0e, 0x386: 0x0e, 0x387: 0x0e, + 0x388: 0x0e, 0x389: 0x0e, 0x38a: 0x0e, 0x38b: 0x0e, 0x38c: 0x0e, 0x38d: 0x0e, 0x38e: 0x0e, 0x38f: 0x0e, + 0x390: 0x0e, 0x391: 0x0e, 0x392: 0x0e, 0x393: 0x0e, 0x394: 0x0e, 0x395: 0x0e, 0x396: 0x0e, 0x397: 0x0e, + 0x398: 0x0e, 0x399: 0x0e, 0x39a: 0x0e, 0x39b: 0x0e, 0x39c: 0x0e, 0x39d: 0x0e, 0x39e: 0x0e, 0x39f: 0x46, + 0x3a0: 0x0e, 0x3a1: 0x0e, 0x3a2: 0x0e, 0x3a3: 0x0e, 0x3a4: 0x0e, 0x3a5: 0x0e, 0x3a6: 0x0e, 0x3a7: 0x0e, + 0x3a8: 0x0e, 0x3a9: 0x0e, 0x3aa: 0x0e, 0x3ab: 0x47, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0e, 0x3c1: 0x0e, 0x3c2: 0x0e, 0x3c3: 0x0e, 0x3c4: 0x48, 0x3c5: 0x49, 0x3c6: 0x0e, 0x3c7: 0x0e, + 0x3c8: 0x0e, 0x3c9: 0x0e, 0x3ca: 0x0e, 0x3cb: 0x4a, + // Block 0x10, offset 0x400 + 0x400: 0x4b, 0x403: 0x4c, 0x404: 0x4d, 0x405: 0x4e, 0x406: 0x4f, + 0x408: 0x50, 0x409: 0x51, 0x40c: 0x52, 0x40d: 0x53, 0x40e: 0x54, 0x40f: 0x55, + 0x410: 0x3a, 0x411: 0x56, 0x412: 0x0e, 0x413: 0x57, 0x414: 0x58, 0x415: 0x59, 0x416: 0x5a, 0x417: 0x5b, + 0x418: 0x0e, 0x419: 0x5c, 0x41a: 0x0e, 0x41b: 0x5d, 0x41f: 0x5e, + 0x424: 0x5f, 0x425: 0x60, 0x426: 0x61, 0x427: 0x62, + 0x429: 0x63, 0x42a: 0x64, + // Block 0x11, offset 0x440 + 0x456: 0x0b, 0x457: 0x06, + 0x458: 0x0c, 0x45b: 0x0d, 0x45f: 0x0e, + 0x460: 0x06, 0x461: 0x06, 0x462: 0x06, 0x463: 0x06, 0x464: 0x06, 0x465: 0x06, 0x466: 0x06, 0x467: 0x06, + 0x468: 0x06, 0x469: 0x06, 0x46a: 0x06, 0x46b: 0x06, 0x46c: 0x06, 0x46d: 0x06, 0x46e: 0x06, 0x46f: 0x06, + 0x470: 0x06, 0x471: 0x06, 0x472: 0x06, 0x473: 0x06, 0x474: 0x06, 0x475: 0x06, 0x476: 0x06, 0x477: 0x06, + 0x478: 0x06, 0x479: 0x06, 0x47a: 0x06, 0x47b: 0x06, 0x47c: 0x06, 0x47d: 0x06, 0x47e: 0x06, 0x47f: 0x06, + // Block 0x12, offset 0x480 + 0x484: 0x08, 0x485: 0x08, 0x486: 0x08, 0x487: 0x09, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x08, 0x4c1: 0x08, 0x4c2: 0x08, 0x4c3: 0x08, 0x4c4: 0x08, 0x4c5: 0x08, 0x4c6: 0x08, 0x4c7: 0x08, + 0x4c8: 0x08, 0x4c9: 0x08, 0x4ca: 0x08, 0x4cb: 0x08, 0x4cc: 0x08, 0x4cd: 0x08, 0x4ce: 0x08, 0x4cf: 0x08, + 0x4d0: 0x08, 0x4d1: 0x08, 0x4d2: 0x08, 0x4d3: 0x08, 0x4d4: 0x08, 0x4d5: 0x08, 0x4d6: 0x08, 0x4d7: 0x08, + 0x4d8: 0x08, 0x4d9: 0x08, 0x4da: 0x08, 0x4db: 0x08, 0x4dc: 0x08, 0x4dd: 0x08, 0x4de: 0x08, 0x4df: 0x08, + 0x4e0: 0x08, 0x4e1: 0x08, 0x4e2: 0x08, 0x4e3: 0x08, 0x4e4: 0x08, 0x4e5: 0x08, 0x4e6: 0x08, 0x4e7: 0x08, + 0x4e8: 0x08, 0x4e9: 0x08, 0x4ea: 0x08, 0x4eb: 0x08, 0x4ec: 0x08, 0x4ed: 0x08, 0x4ee: 0x08, 0x4ef: 0x08, + 0x4f0: 0x08, 0x4f1: 0x08, 0x4f2: 0x08, 0x4f3: 0x08, 0x4f4: 0x08, 0x4f5: 0x08, 0x4f6: 0x08, 0x4f7: 0x08, + 0x4f8: 0x08, 0x4f9: 0x08, 0x4fa: 0x08, 0x4fb: 0x08, 0x4fc: 0x08, 0x4fd: 0x08, 0x4fe: 0x08, 0x4ff: 0x65, + // Block 0x14, offset 0x500 + 0x520: 0x10, + 0x530: 0x09, 0x531: 0x09, 0x532: 0x09, 0x533: 0x09, 0x534: 0x09, 0x535: 0x09, 0x536: 0x09, 0x537: 0x09, + 0x538: 0x09, 0x539: 0x09, 0x53a: 0x09, 0x53b: 0x09, 0x53c: 0x09, 0x53d: 0x09, 0x53e: 0x09, 0x53f: 0x11, + // Block 0x15, offset 0x540 + 0x540: 0x09, 0x541: 0x09, 0x542: 0x09, 0x543: 0x09, 0x544: 0x09, 0x545: 0x09, 0x546: 0x09, 0x547: 0x09, + 0x548: 0x09, 0x549: 0x09, 0x54a: 0x09, 0x54b: 0x09, 0x54c: 0x09, 0x54d: 0x09, 0x54e: 0x09, 0x54f: 0x11, +} + +// inverseData contains 4-byte entries of the following format: +// <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8. +var inverseData = [150][4]byte{ + {0x00, 0x00, 0x00, 0x00}, + {0x03, 0xe3, 0x80, 0xa0}, + {0x03, 0xef, 0xbc, 0xa0}, + {0x03, 0xef, 0xbc, 0xe0}, + {0x03, 0xef, 0xbd, 0xe0}, + {0x03, 0xef, 0xbf, 0x02}, + {0x03, 0xef, 0xbf, 0x00}, + {0x03, 0xef, 0xbf, 0x0e}, + {0x03, 0xef, 0xbf, 0x0c}, + {0x03, 0xef, 0xbf, 0x0f}, + {0x03, 0xef, 0xbf, 0x39}, + {0x03, 0xef, 0xbf, 0x3b}, + {0x03, 0xef, 0xbf, 0x3f}, + {0x03, 0xef, 0xbf, 0x2a}, + {0x03, 0xef, 0xbf, 0x0d}, + {0x03, 0xef, 0xbf, 0x25}, + {0x03, 0xef, 0xbd, 0x1a}, + {0x03, 0xef, 0xbd, 0x26}, + {0x01, 0xa0, 0x00, 0x00}, + {0x03, 0xef, 0xbd, 0x25}, + {0x03, 0xef, 0xbd, 0x23}, + {0x03, 0xef, 0xbd, 0x2e}, + {0x03, 0xef, 0xbe, 0x07}, + {0x03, 0xef, 0xbe, 0x05}, + {0x03, 0xef, 0xbd, 0x06}, + {0x03, 0xef, 0xbd, 0x13}, + {0x03, 0xef, 0xbd, 0x0b}, + {0x03, 0xef, 0xbd, 0x16}, + {0x03, 0xef, 0xbd, 0x0c}, + {0x03, 0xef, 0xbd, 0x15}, + {0x03, 0xef, 0xbd, 0x0d}, + {0x03, 0xef, 0xbd, 0x1c}, + {0x03, 0xef, 0xbd, 0x02}, + {0x03, 0xef, 0xbd, 0x1f}, + {0x03, 0xef, 0xbd, 0x1d}, + {0x03, 0xef, 0xbd, 0x17}, + {0x03, 0xef, 0xbd, 0x08}, + {0x03, 0xef, 0xbd, 0x09}, + {0x03, 0xef, 0xbd, 0x0e}, + {0x03, 0xef, 0xbd, 0x04}, + {0x03, 0xef, 0xbd, 0x05}, + {0x03, 0xef, 0xbe, 0x3f}, + {0x03, 0xef, 0xbe, 0x00}, + {0x03, 0xef, 0xbd, 0x2c}, + {0x03, 0xef, 0xbe, 0x06}, + {0x03, 0xef, 0xbe, 0x0c}, + {0x03, 0xef, 0xbe, 0x0f}, + {0x03, 0xef, 0xbe, 0x0d}, + {0x03, 0xef, 0xbe, 0x0b}, + {0x03, 0xef, 0xbe, 0x19}, + {0x03, 0xef, 0xbe, 0x15}, + {0x03, 0xef, 0xbe, 0x11}, + {0x03, 0xef, 0xbe, 0x31}, + {0x03, 0xef, 0xbe, 0x33}, + {0x03, 0xef, 0xbd, 0x0f}, + {0x03, 0xef, 0xbe, 0x30}, + {0x03, 0xef, 0xbe, 0x3e}, + {0x03, 0xef, 0xbe, 0x32}, + {0x03, 0xef, 0xbe, 0x36}, + {0x03, 0xef, 0xbd, 0x14}, + {0x03, 0xef, 0xbe, 0x2e}, + {0x03, 0xef, 0xbd, 0x1e}, + {0x03, 0xef, 0xbe, 0x10}, + {0x03, 0xef, 0xbf, 0x13}, + {0x03, 0xef, 0xbf, 0x15}, + {0x03, 0xef, 0xbf, 0x17}, + {0x03, 0xef, 0xbf, 0x1f}, + {0x03, 0xef, 0xbf, 0x1d}, + {0x03, 0xef, 0xbf, 0x1b}, + {0x03, 0xef, 0xbf, 0x09}, + {0x03, 0xef, 0xbf, 0x0b}, + {0x03, 0xef, 0xbf, 0x37}, + {0x03, 0xef, 0xbe, 0x04}, + {0x01, 0xe0, 0x00, 0x00}, + {0x03, 0xe2, 0xa6, 0x1a}, + {0x03, 0xe2, 0xa6, 0x26}, + {0x03, 0xe3, 0x80, 0x23}, + {0x03, 0xe3, 0x80, 0x2e}, + {0x03, 0xe3, 0x80, 0x25}, + {0x03, 0xe3, 0x83, 0x1e}, + {0x03, 0xe3, 0x83, 0x14}, + {0x03, 0xe3, 0x82, 0x06}, + {0x03, 0xe3, 0x82, 0x0b}, + {0x03, 0xe3, 0x82, 0x0c}, + {0x03, 0xe3, 0x82, 0x0d}, + {0x03, 0xe3, 0x82, 0x02}, + {0x03, 0xe3, 0x83, 0x0f}, + {0x03, 0xe3, 0x83, 0x08}, + {0x03, 0xe3, 0x83, 0x09}, + {0x03, 0xe3, 0x83, 0x2c}, + {0x03, 0xe3, 0x83, 0x0c}, + {0x03, 0xe3, 0x82, 0x13}, + {0x03, 0xe3, 0x82, 0x16}, + {0x03, 0xe3, 0x82, 0x15}, + {0x03, 0xe3, 0x82, 0x1c}, + {0x03, 0xe3, 0x82, 0x1f}, + {0x03, 0xe3, 0x82, 0x1d}, + {0x03, 0xe3, 0x82, 0x1a}, + {0x03, 0xe3, 0x82, 0x17}, + {0x03, 0xe3, 0x82, 0x08}, + {0x03, 0xe3, 0x82, 0x09}, + {0x03, 0xe3, 0x82, 0x0e}, + {0x03, 0xe3, 0x82, 0x04}, + {0x03, 0xe3, 0x82, 0x05}, + {0x03, 0xe3, 0x82, 0x3f}, + {0x03, 0xe3, 0x83, 0x00}, + {0x03, 0xe3, 0x83, 0x06}, + {0x03, 0xe3, 0x83, 0x05}, + {0x03, 0xe3, 0x83, 0x0d}, + {0x03, 0xe3, 0x83, 0x0b}, + {0x03, 0xe3, 0x83, 0x07}, + {0x03, 0xe3, 0x83, 0x19}, + {0x03, 0xe3, 0x83, 0x15}, + {0x03, 0xe3, 0x83, 0x11}, + {0x03, 0xe3, 0x83, 0x31}, + {0x03, 0xe3, 0x83, 0x33}, + {0x03, 0xe3, 0x83, 0x30}, + {0x03, 0xe3, 0x83, 0x3e}, + {0x03, 0xe3, 0x83, 0x32}, + {0x03, 0xe3, 0x83, 0x36}, + {0x03, 0xe3, 0x83, 0x2e}, + {0x03, 0xe3, 0x82, 0x07}, + {0x03, 0xe3, 0x85, 0x04}, + {0x03, 0xe3, 0x84, 0x10}, + {0x03, 0xe3, 0x85, 0x30}, + {0x03, 0xe3, 0x85, 0x0d}, + {0x03, 0xe3, 0x85, 0x13}, + {0x03, 0xe3, 0x85, 0x15}, + {0x03, 0xe3, 0x85, 0x17}, + {0x03, 0xe3, 0x85, 0x1f}, + {0x03, 0xe3, 0x85, 0x1d}, + {0x03, 0xe3, 0x85, 0x1b}, + {0x03, 0xe3, 0x85, 0x09}, + {0x03, 0xe3, 0x85, 0x0f}, + {0x03, 0xe3, 0x85, 0x0b}, + {0x03, 0xe3, 0x85, 0x37}, + {0x03, 0xe3, 0x85, 0x3b}, + {0x03, 0xe3, 0x85, 0x39}, + {0x03, 0xe3, 0x85, 0x3f}, + {0x02, 0xc2, 0x02, 0x00}, + {0x02, 0xc2, 0x0e, 0x00}, + {0x02, 0xc2, 0x0c, 0x00}, + {0x02, 0xc2, 0x00, 0x00}, + {0x03, 0xe2, 0x82, 0x0f}, + {0x03, 0xe2, 0x94, 0x2a}, + {0x03, 0xe2, 0x86, 0x39}, + {0x03, 0xe2, 0x86, 0x3b}, + {0x03, 0xe2, 0x86, 0x3f}, + {0x03, 0xe2, 0x96, 0x0d}, + {0x03, 0xe2, 0x97, 0x25}, +} + +// Total table size 15320 bytes (14KiB) diff --git a/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vendor/golang.org/x/tools/cmd/goimports/doc.go new file mode 100644 index 000000000..7033e4d4c --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/doc.go @@ -0,0 +1,43 @@ +/* + +Command goimports updates your Go import lines, +adding missing ones and removing unreferenced ones. + + $ go get golang.org/x/tools/cmd/goimports + +In addition to fixing imports, goimports also formats +your code in the same style as gofmt so it can be used +as a replacement for your editor's gofmt-on-save hook. + +For emacs, make sure you have the latest go-mode.el: + https://github.com/dominikh/go-mode.el +Then in your .emacs file: + (setq gofmt-command "goimports") + (add-hook 'before-save-hook 'gofmt-before-save) + +For vim, set "gofmt_command" to "goimports": + https://golang.org/change/39c724dd7f252 + https://golang.org/wiki/IDEsAndTextEditorPlugins + etc + +For GoSublime, follow the steps described here: + http://michaelwhatcott.com/gosublime-goimports/ + +For other editors, you probably know what to do. + +To exclude directories in your $GOPATH from being scanned for Go +files, goimports respects a configuration file at +$GOPATH/src/.goimportsignore which may contain blank lines, comment +lines (beginning with '#'), or lines naming a directory relative to +the configuration file to ignore when scanning. No globbing or regex +patterns are allowed. Use the "-v" verbose flag to verify it's +working and see what goimports is doing. + +File bugs or feature requests at: + + https://golang.org/issues/new?title=x/tools/cmd/goimports:+ + +Happy hacking! + +*/ +package main // import "golang.org/x/tools/cmd/goimports" diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go new file mode 100644 index 000000000..27708972d --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -0,0 +1,380 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/scanner" + "io" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "runtime/pprof" + "strings" + + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/imports" +) + +var ( + // main operation modes + list = flag.Bool("l", false, "list files whose formatting differs from goimport's") + write = flag.Bool("w", false, "write result to (source) file instead of stdout") + doDiff = flag.Bool("d", false, "display diffs instead of rewriting files") + srcdir = flag.String("srcdir", "", "choose imports as if source code is from `dir`. When operating on a single file, dir may instead be the complete file name.") + + verbose bool // verbose logging + + cpuProfile = flag.String("cpuprofile", "", "CPU profile output") + memProfile = flag.String("memprofile", "", "memory profile output") + memProfileRate = flag.Int("memrate", 0, "if > 0, sets runtime.MemProfileRate") + + options = &imports.Options{ + TabWidth: 8, + TabIndent: true, + Comments: true, + Fragment: true, + Env: &imports.ProcessEnv{ + GocmdRunner: &gocommand.Runner{}, + }, + } + exitCode = 0 +) + +func init() { + flag.BoolVar(&options.AllErrors, "e", false, "report all errors (not just the first 10 on different lines)") + flag.StringVar(&options.LocalPrefix, "local", "", "put imports beginning with this string after 3rd-party packages; comma-separated list") + flag.BoolVar(&options.FormatOnly, "format-only", false, "if true, don't fix imports and only format. In this mode, goimports is effectively gofmt, with the addition that imports are grouped into sections.") +} + +func report(err error) { + scanner.PrintError(os.Stderr, err) + exitCode = 2 +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: goimports [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(2) +} + +func isGoFile(f os.FileInfo) bool { + // ignore non-Go files + name := f.Name() + return !f.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") +} + +// argumentType is which mode goimports was invoked as. +type argumentType int + +const ( + // fromStdin means the user is piping their source into goimports. + fromStdin argumentType = iota + + // singleArg is the common case from editors, when goimports is run on + // a single file. + singleArg + + // multipleArg is when the user ran "goimports file1.go file2.go" + // or ran goimports on a directory tree. + multipleArg +) + +func processFile(filename string, in io.Reader, out io.Writer, argType argumentType) error { + opt := options + if argType == fromStdin { + nopt := *options + nopt.Fragment = true + opt = &nopt + } + + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + target := filename + if *srcdir != "" { + // Determine whether the provided -srcdirc is a directory or file + // and then use it to override the target. + // + // See https://github.com/dominikh/go-mode.el/issues/146 + if isFile(*srcdir) { + if argType == multipleArg { + return errors.New("-srcdir value can't be a file when passing multiple arguments or when walking directories") + } + target = *srcdir + } else if argType == singleArg && strings.HasSuffix(*srcdir, ".go") && !isDir(*srcdir) { + // For a file which doesn't exist on disk yet, but might shortly. + // e.g. user in editor opens $DIR/newfile.go and newfile.go doesn't yet exist on disk. + // The goimports on-save hook writes the buffer to a temp file + // first and runs goimports before the actual save to newfile.go. + // The editor's buffer is named "newfile.go" so that is passed to goimports as: + // goimports -srcdir=/gopath/src/pkg/newfile.go /tmp/gofmtXXXXXXXX.go + // and then the editor reloads the result from the tmp file and writes + // it to newfile.go. + target = *srcdir + } else { + // Pretend that file is from *srcdir in order to decide + // visible imports correctly. + target = filepath.Join(*srcdir, filepath.Base(filename)) + } + } + + res, err := imports.Process(target, src, opt) + if err != nil { + return err + } + + if !bytes.Equal(src, res) { + // formatting has changed + if *list { + fmt.Fprintln(out, filename) + } + if *write { + if argType == fromStdin { + // filename is "" + return errors.New("can't use -w on stdin") + } + // On Windows, we need to re-set the permissions from the file. See golang/go#38225. + var perms os.FileMode + if fi, err := os.Stat(filename); err == nil { + perms = fi.Mode() & os.ModePerm + } + err = ioutil.WriteFile(filename, res, perms) + if err != nil { + return err + } + } + if *doDiff { + if argType == fromStdin { + filename = "stdin.go" // because .orig looks silly + } + data, err := diff(src, res, filename) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Printf("diff -u %s %s\n", filepath.ToSlash(filename+".orig"), filepath.ToSlash(filename)) + out.Write(data) + } + } + + if !*list && !*write && !*doDiff { + _, err = out.Write(res) + } + + return err +} + +func visitFile(path string, f os.FileInfo, err error) error { + if err == nil && isGoFile(f) { + err = processFile(path, nil, os.Stdout, multipleArg) + } + if err != nil { + report(err) + } + return nil +} + +func walkDir(path string) { + filepath.Walk(path, visitFile) +} + +func main() { + runtime.GOMAXPROCS(runtime.NumCPU()) + + // call gofmtMain in a separate function + // so that it can use defer and have them + // run before the exit. + gofmtMain() + os.Exit(exitCode) +} + +// parseFlags parses command line flags and returns the paths to process. +// It's a var so that custom implementations can replace it in other files. +var parseFlags = func() []string { + flag.BoolVar(&verbose, "v", false, "verbose logging") + + flag.Parse() + return flag.Args() +} + +func bufferedFileWriter(dest string) (w io.Writer, close func()) { + f, err := os.Create(dest) + if err != nil { + log.Fatal(err) + } + bw := bufio.NewWriter(f) + return bw, func() { + if err := bw.Flush(); err != nil { + log.Fatalf("error flushing %v: %v", dest, err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } + } +} + +func gofmtMain() { + flag.Usage = usage + paths := parseFlags() + + if *cpuProfile != "" { + bw, flush := bufferedFileWriter(*cpuProfile) + pprof.StartCPUProfile(bw) + defer flush() + defer pprof.StopCPUProfile() + } + // doTrace is a conditionally compiled wrapper around runtime/trace. It is + // used to allow goimports to compile under gccgo, which does not support + // runtime/trace. See https://golang.org/issue/15544. + defer doTrace()() + if *memProfileRate > 0 { + runtime.MemProfileRate = *memProfileRate + bw, flush := bufferedFileWriter(*memProfile) + defer func() { + runtime.GC() // materialize all statistics + if err := pprof.WriteHeapProfile(bw); err != nil { + log.Fatal(err) + } + flush() + }() + } + + if verbose { + log.SetFlags(log.LstdFlags | log.Lmicroseconds) + options.Env.Logf = log.Printf + } + if options.TabWidth < 0 { + fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth) + exitCode = 2 + return + } + + if len(paths) == 0 { + if err := processFile("", os.Stdin, os.Stdout, fromStdin); err != nil { + report(err) + } + return + } + + argType := singleArg + if len(paths) > 1 { + argType = multipleArg + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + report(err) + case dir.IsDir(): + walkDir(path) + default: + if err := processFile(path, nil, os.Stdout, argType); err != nil { + report(err) + } + } + } +} + +func writeTempFile(dir, prefix string, data []byte) (string, error) { + file, err := ioutil.TempFile(dir, prefix) + if err != nil { + return "", err + } + _, err = file.Write(data) + if err1 := file.Close(); err == nil { + err = err1 + } + if err != nil { + os.Remove(file.Name()) + return "", err + } + return file.Name(), nil +} + +func diff(b1, b2 []byte, filename string) (data []byte, err error) { + f1, err := writeTempFile("", "gofmt", b1) + if err != nil { + return + } + defer os.Remove(f1) + + f2, err := writeTempFile("", "gofmt", b2) + if err != nil { + return + } + defer os.Remove(f2) + + cmd := "diff" + if runtime.GOOS == "plan9" { + cmd = "/bin/ape/diff" + } + + data, err = exec.Command(cmd, "-u", f1, f2).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + return replaceTempFilename(data, filename) + } + return +} + +// replaceTempFilename replaces temporary filenames in diff with actual one. +// +// --- /tmp/gofmt316145376 2017-02-03 19:13:00.280468375 -0500 +// +++ /tmp/gofmt617882815 2017-02-03 19:13:00.280468375 -0500 +// ... +// -> +// --- path/to/file.go.orig 2017-02-03 19:13:00.280468375 -0500 +// +++ path/to/file.go 2017-02-03 19:13:00.280468375 -0500 +// ... +func replaceTempFilename(diff []byte, filename string) ([]byte, error) { + bs := bytes.SplitN(diff, []byte{'\n'}, 3) + if len(bs) < 3 { + return nil, fmt.Errorf("got unexpected diff for %s", filename) + } + // Preserve timestamps. + var t0, t1 []byte + if i := bytes.LastIndexByte(bs[0], '\t'); i != -1 { + t0 = bs[0][i:] + } + if i := bytes.LastIndexByte(bs[1], '\t'); i != -1 { + t1 = bs[1][i:] + } + // Always print filepath with slash separator. + f := filepath.ToSlash(filename) + bs[0] = []byte(fmt.Sprintf("--- %s%s", f+".orig", t0)) + bs[1] = []byte(fmt.Sprintf("+++ %s%s", f, t1)) + return bytes.Join(bs, []byte{'\n'}), nil +} + +// isFile reports whether name is a file. +func isFile(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.Mode().IsRegular() +} + +// isDir reports whether name is a directory. +func isDir(name string) bool { + fi, err := os.Stat(name) + return err == nil && fi.IsDir() +} diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go new file mode 100644 index 000000000..21d867eaa --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_gc.go @@ -0,0 +1,26 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gc + +package main + +import ( + "flag" + "runtime/trace" +) + +var traceProfile = flag.String("trace", "", "trace profile output") + +func doTrace() func() { + if *traceProfile != "" { + bw, flush := bufferedFileWriter(*traceProfile) + trace.Start(bw) + return func() { + flush() + trace.Stop() + } + } + return func() {} +} diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go new file mode 100644 index 000000000..f5531ceb3 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports_not_gc.go @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gc + +package main + +func doTrace() func() { + return func() {} +} diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index ea605f4fd..8c9977355 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -7,6 +7,8 @@ import ( "go/token" "go/types" "reflect" + + "golang.org/x/tools/internal/analysisinternal" ) // An Analyzer describes an analysis function and its options. @@ -69,6 +71,17 @@ type Analyzer struct { func (a *Analyzer) String() string { return a.Name } +func init() { + // Set the analysisinternal functions to be able to pass type errors + // to the Pass type without modifying the go/analysis API. + analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) { + p.(*Pass).typeErrors = errors + } + analysisinternal.GetTypeErrors = func(p interface{}) []types.Error { + return p.(*Pass).typeErrors + } +} + // A Pass provides information to the Run function that // applies a specific analyzer to a single Go package. // @@ -138,6 +151,9 @@ type Pass struct { // WARNING: This is an experimental API and may change in the future. AllObjectFacts func() []ObjectFact + // typeErrors contains types.Errors that are associated with the pkg. + typeErrors []types.Error + /* Further fields may be added in future. */ // For example, suggested or applied refactorings. } diff --git a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go b/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go deleted file mode 100644 index 74043c59d..000000000 --- a/vendor/golang.org/x/tools/go/analysis/analysistest/analysistest.go +++ /dev/null @@ -1,400 +0,0 @@ -// Package analysistest provides utilities for testing analyzers. -package analysistest - -import ( - "fmt" - "go/token" - "go/types" - "io/ioutil" - "log" - "os" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "text/scanner" - - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/go/analysis/internal/checker" - "golang.org/x/tools/go/packages" - "golang.org/x/tools/internal/testenv" -) - -// WriteFiles is a helper function that creates a temporary directory -// and populates it with a GOPATH-style project using filemap (which -// maps file names to contents). On success it returns the name of the -// directory and a cleanup function to delete it. -func WriteFiles(filemap map[string]string) (dir string, cleanup func(), err error) { - gopath, err := ioutil.TempDir("", "analysistest") - if err != nil { - return "", nil, err - } - cleanup = func() { os.RemoveAll(gopath) } - - for name, content := range filemap { - filename := filepath.Join(gopath, "src", name) - os.MkdirAll(filepath.Dir(filename), 0777) // ignore error - if err := ioutil.WriteFile(filename, []byte(content), 0666); err != nil { - cleanup() - return "", nil, err - } - } - return gopath, cleanup, nil -} - -// TestData returns the effective filename of -// the program's "testdata" directory. -// This function may be overridden by projects using -// an alternative build system (such as Blaze) that -// does not run a test in its package directory. -var TestData = func() string { - testdata, err := filepath.Abs("testdata") - if err != nil { - log.Fatal(err) - } - return testdata -} - -// Testing is an abstraction of a *testing.T. -type Testing interface { - Errorf(format string, args ...interface{}) -} - -// Run applies an analysis to the packages denoted by the "go list" patterns. -// -// It loads the packages from the specified GOPATH-style project -// directory using golang.org/x/tools/go/packages, runs the analysis on -// them, and checks that each analysis emits the expected diagnostics -// and facts specified by the contents of '// want ...' comments in the -// package's source files. -// -// An expectation of a Diagnostic is specified by a string literal -// containing a regular expression that must match the diagnostic -// message. For example: -// -// fmt.Printf("%s", 1) // want `cannot provide int 1 to %s` -// -// An expectation of a Fact associated with an object is specified by -// 'name:"pattern"', where name is the name of the object, which must be -// declared on the same line as the comment, and pattern is a regular -// expression that must match the string representation of the fact, -// fmt.Sprint(fact). For example: -// -// func panicf(format string, args interface{}) { // want panicf:"printfWrapper" -// -// Package facts are specified by the name "package" and appear on -// line 1 of the first source file of the package. -// -// A single 'want' comment may contain a mixture of diagnostic and fact -// expectations, including multiple facts about the same object: -// -// // want "diag" "diag2" x:"fact1" x:"fact2" y:"fact3" -// -// Unexpected diagnostics and facts, and unmatched expectations, are -// reported as errors to the Testing. -// -// Run reports an error to the Testing if loading or analysis failed. -// Run also returns a Result for each package for which analysis was -// attempted, even if unsuccessful. It is safe for a test to ignore all -// the results, but a test may use it to perform additional checks. -func Run(t Testing, dir string, a *analysis.Analyzer, patterns ...string) []*Result { - if t, ok := t.(testenv.Testing); ok { - testenv.NeedsGoPackages(t) - } - - pkgs, err := loadPackages(dir, patterns...) - if err != nil { - t.Errorf("loading %s: %v", patterns, err) - return nil - } - - results := checker.TestAnalyzer(a, pkgs) - for _, result := range results { - if result.Err != nil { - t.Errorf("error analyzing %s: %v", result.Pass, result.Err) - } else { - check(t, dir, result.Pass, result.Diagnostics, result.Facts) - } - } - return results -} - -// A Result holds the result of applying an analyzer to a package. -type Result = checker.TestAnalyzerResult - -// loadPackages uses go/packages to load a specified packages (from source, with -// dependencies) from dir, which is the root of a GOPATH-style project -// tree. It returns an error if any package had an error, or the pattern -// matched no packages. -func loadPackages(dir string, patterns ...string) ([]*packages.Package, error) { - // packages.Load loads the real standard library, not a minimal - // fake version, which would be more efficient, especially if we - // have many small tests that import, say, net/http. - // However there is no easy way to make go/packages to consume - // a list of packages we generate and then do the parsing and - // typechecking, though this feature seems to be a recurring need. - - cfg := &packages.Config{ - Mode: packages.LoadAllSyntax, - Dir: dir, - Tests: true, - Env: append(os.Environ(), "GOPATH="+dir, "GO111MODULE=off", "GOPROXY=off"), - } - pkgs, err := packages.Load(cfg, patterns...) - if err != nil { - return nil, err - } - - // Print errors but do not stop: - // some Analyzers may be disposed to RunDespiteErrors. - packages.PrintErrors(pkgs) - - if len(pkgs) == 0 { - return nil, fmt.Errorf("no packages matched %s", patterns) - } - return pkgs, nil -} - -// check inspects an analysis pass on which the analysis has already -// been run, and verifies that all reported diagnostics and facts match -// specified by the contents of "// want ..." comments in the package's -// source files, which must have been parsed with comments enabled. -func check(t Testing, gopath string, pass *analysis.Pass, diagnostics []analysis.Diagnostic, facts map[types.Object][]analysis.Fact) { - - type key struct { - file string - line int - } - - want := make(map[key][]expectation) - - // processComment parses expectations out of comments. - processComment := func(filename string, linenum int, text string) { - text = strings.TrimSpace(text) - - // Any comment starting with "want" is treated - // as an expectation, even without following whitespace. - if rest := strings.TrimPrefix(text, "want"); rest != text { - expects, err := parseExpectations(rest) - if err != nil { - t.Errorf("%s:%d: in 'want' comment: %s", filename, linenum, err) - return - } - if expects != nil { - want[key{filename, linenum}] = expects - } - } - } - - // Extract 'want' comments from Go files. - for _, f := range pass.Files { - for _, cgroup := range f.Comments { - for _, c := range cgroup.List { - - text := strings.TrimPrefix(c.Text, "//") - if text == c.Text { // not a //-comment. - text = strings.TrimPrefix(text, "/*") - text = strings.TrimSuffix(text, "*/") - } - - // Hack: treat a comment of the form "//...// want..." - // or "/*...// want... */ - // as if it starts at 'want'. - // This allows us to add comments on comments, - // as required when testing the buildtag analyzer. - if i := strings.Index(text, "// want"); i >= 0 { - text = text[i+len("// "):] - } - - // It's tempting to compute the filename - // once outside the loop, but it's - // incorrect because it can change due - // to //line directives. - posn := pass.Fset.Position(c.Pos()) - filename := sanitize(gopath, posn.Filename) - processComment(filename, posn.Line, text) - } - } - } - - // Extract 'want' comments from non-Go files. - // TODO(adonovan): we may need to handle //line directives. - for _, filename := range pass.OtherFiles { - data, err := ioutil.ReadFile(filename) - if err != nil { - t.Errorf("can't read '// want' comments from %s: %v", filename, err) - continue - } - filename := sanitize(gopath, filename) - linenum := 0 - for _, line := range strings.Split(string(data), "\n") { - linenum++ - if i := strings.Index(line, "//"); i >= 0 { - line = line[i+len("//"):] - processComment(filename, linenum, line) - } - } - } - - checkMessage := func(posn token.Position, kind, name, message string) { - posn.Filename = sanitize(gopath, posn.Filename) - k := key{posn.Filename, posn.Line} - expects := want[k] - var unmatched []string - for i, exp := range expects { - if exp.kind == kind && exp.name == name { - if exp.rx.MatchString(message) { - // matched: remove the expectation. - expects[i] = expects[len(expects)-1] - expects = expects[:len(expects)-1] - want[k] = expects - return - } - unmatched = append(unmatched, fmt.Sprintf("%q", exp.rx)) - } - } - if unmatched == nil { - t.Errorf("%v: unexpected %s: %v", posn, kind, message) - } else { - t.Errorf("%v: %s %q does not match pattern %s", - posn, kind, message, strings.Join(unmatched, " or ")) - } - } - - // Check the diagnostics match expectations. - for _, f := range diagnostics { - // TODO(matloob): Support ranges in analysistest. - posn := pass.Fset.Position(f.Pos) - checkMessage(posn, "diagnostic", "", f.Message) - } - - // Check the facts match expectations. - // Report errors in lexical order for determinism. - // (It's only deterministic within each file, not across files, - // because go/packages does not guarantee file.Pos is ascending - // across the files of a single compilation unit.) - var objects []types.Object - for obj := range facts { - objects = append(objects, obj) - } - sort.Slice(objects, func(i, j int) bool { - // Package facts compare less than object facts. - ip, jp := objects[i] == nil, objects[j] == nil // whether i, j is a package fact - if ip != jp { - return ip && !jp - } - return objects[i].Pos() < objects[j].Pos() - }) - for _, obj := range objects { - var posn token.Position - var name string - if obj != nil { - // Object facts are reported on the declaring line. - name = obj.Name() - posn = pass.Fset.Position(obj.Pos()) - } else { - // Package facts are reported at the start of the file. - name = "package" - posn = pass.Fset.Position(pass.Files[0].Pos()) - posn.Line = 1 - } - - for _, fact := range facts[obj] { - checkMessage(posn, "fact", name, fmt.Sprint(fact)) - } - } - - // Reject surplus expectations. - // - // Sometimes an Analyzer reports two similar diagnostics on a - // line with only one expectation. The reader may be confused by - // the error message. - // TODO(adonovan): print a better error: - // "got 2 diagnostics here; each one needs its own expectation". - var surplus []string - for key, expects := range want { - for _, exp := range expects { - err := fmt.Sprintf("%s:%d: no %s was reported matching %q", key.file, key.line, exp.kind, exp.rx) - surplus = append(surplus, err) - } - } - sort.Strings(surplus) - for _, err := range surplus { - t.Errorf("%s", err) - } -} - -type expectation struct { - kind string // either "fact" or "diagnostic" - name string // name of object to which fact belongs, or "package" ("fact" only) - rx *regexp.Regexp -} - -func (ex expectation) String() string { - return fmt.Sprintf("%s %s:%q", ex.kind, ex.name, ex.rx) // for debugging -} - -// parseExpectations parses the content of a "// want ..." comment -// and returns the expectations, a mixture of diagnostics ("rx") and -// facts (name:"rx"). -func parseExpectations(text string) ([]expectation, error) { - var scanErr string - sc := new(scanner.Scanner).Init(strings.NewReader(text)) - sc.Error = func(s *scanner.Scanner, msg string) { - scanErr = msg // e.g. bad string escape - } - sc.Mode = scanner.ScanIdents | scanner.ScanStrings | scanner.ScanRawStrings - - scanRegexp := func(tok rune) (*regexp.Regexp, error) { - if tok != scanner.String && tok != scanner.RawString { - return nil, fmt.Errorf("got %s, want regular expression", - scanner.TokenString(tok)) - } - pattern, _ := strconv.Unquote(sc.TokenText()) // can't fail - return regexp.Compile(pattern) - } - - var expects []expectation - for { - tok := sc.Scan() - switch tok { - case scanner.String, scanner.RawString: - rx, err := scanRegexp(tok) - if err != nil { - return nil, err - } - expects = append(expects, expectation{"diagnostic", "", rx}) - - case scanner.Ident: - name := sc.TokenText() - tok = sc.Scan() - if tok != ':' { - return nil, fmt.Errorf("got %s after %s, want ':'", - scanner.TokenString(tok), name) - } - tok = sc.Scan() - rx, err := scanRegexp(tok) - if err != nil { - return nil, err - } - expects = append(expects, expectation{"fact", name, rx}) - - case scanner.EOF: - if scanErr != "" { - return nil, fmt.Errorf("%s", scanErr) - } - return expects, nil - - default: - return nil, fmt.Errorf("unexpected %s", scanner.TokenString(tok)) - } - } -} - -// sanitize removes the GOPATH portion of the filename, -// typically a gnarly /tmp directory, and returns the rest. -func sanitize(gopath, filename string) string { - prefix := gopath + string(os.PathSeparator) + "src" + string(os.PathSeparator) - return filepath.ToSlash(strings.TrimPrefix(filename, prefix)) -} diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index ea56b724e..fb17a0e41 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -170,6 +170,15 @@ Diagnostic is defined as: The optional Category field is a short identifier that classifies the kind of message when an analysis produces several kinds of diagnostic. +Many analyses want to associate diagnostics with a severity level. +Because Diagnostic does not have a severity level field, an Analyzer's +diagnostics effectively all have the same severity level. To separate which +diagnostics are high severity and which are low severity, expose multiple +Analyzers instead. Analyzers should also be separated when their +diagnostics belong in different groups, or could be tagged differently +before being shown to the end user. Analyzers should document their severity +level to help downstream tools surface diagnostics properly. + Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl and buildtag, inspect the raw text of Go source files or even non-Go files such as assembly. To report a diagnostic against a line of a diff --git a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go index 0778f4220..4b7be2d1f 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/analysisflags/flags.go @@ -382,7 +382,7 @@ func (tree JSONTree) Add(fset *token.FileSet, id, name string, diags []analysis. func (tree JSONTree) Print() { data, err := json.MarshalIndent(tree, "", "\t") if err != nil { - log.Panicf("internal error: JSON marshalling failed: %v", err) + log.Panicf("internal error: JSON marshaling failed: %v", err) } fmt.Printf("%s\n", data) } diff --git a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go index 671a9696a..5ccfb1637 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/checker/checker.go @@ -32,6 +32,8 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/internal/analysisflags" "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/analysisinternal" + "golang.org/x/tools/internal/span" ) var ( @@ -651,6 +653,36 @@ func (act *action) execOnce() { } act.pass = pass + var errors []types.Error + // Get any type errors that are attributed to the pkg. + // This is necessary to test analyzers that provide + // suggested fixes for compiler/type errors. + for _, err := range act.pkg.Errors { + if err.Kind != packages.TypeError { + continue + } + // err.Pos is a string of form: "file:line:col" or "file:line" or "" or "-" + spn := span.Parse(err.Pos) + // Extract the token positions from the error string. + line, col, offset := spn.Start().Line(), spn.Start().Column(), -1 + act.pkg.Fset.Iterate(func(f *token.File) bool { + if f.Name() != spn.URI().Filename() { + return true + } + offset = int(f.LineStart(line)) + col - 1 + return false + }) + if offset == -1 { + continue + } + errors = append(errors, types.Error{ + Fset: act.pkg.Fset, + Msg: err.Msg, + Pos: token.Pos(offset), + }) + } + analysisinternal.SetTypeErrors(pass, errors) + var err error if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors { err = fmt.Errorf("analysis skipped due to errors in package") @@ -768,8 +800,13 @@ func exportedFrom(obj types.Object, pkg *types.Package) bool { return obj.Exported() && obj.Pkg() == pkg || obj.Type().(*types.Signature).Recv() != nil case *types.Var: - return obj.Exported() && obj.Pkg() == pkg || - obj.IsField() + if obj.IsField() { + return true + } + // we can't filter more aggressively than this because we need + // to consider function parameters exported, but have no way + // of telling apart function parameters from local variables. + return obj.Pkg() == pkg case *types.TypeName, *types.Const: return true } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go index b80271afb..384f02557 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/errorsas/errorsas.go @@ -51,7 +51,7 @@ func run(pass *analysis.Pass) (interface{}, error) { return // not enough arguments, e.g. called with return values of another function } if fn.FullName() == "errors.As" && !pointerToInterfaceOrError(pass, call.Args[1]) { - pass.ReportRangef(call, "second argument to errors.As must be a pointer to an interface or a type implementing error") + pass.ReportRangef(call, "second argument to errors.As must be a non-nil pointer to either a type that implements error, or to any interface type") } }) return nil, nil diff --git a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go index 948dfe663..27b1b8400 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -11,6 +11,7 @@ package findcall import ( + "fmt" "go/ast" "go/types" @@ -48,7 +49,18 @@ func run(pass *analysis.Pass) (interface{}, error) { id = fun.Sel } if id != nil && !pass.TypesInfo.Types[id].IsType() && id.Name == name { - pass.Reportf(call.Lparen, "call of %s(...)", id.Name) + pass.Report(analysis.Diagnostic{ + Pos: call.Lparen, + Message: fmt.Sprintf("call of %s(...)", id.Name), + SuggestedFixes: []analysis.SuggestedFix{{ + Message: fmt.Sprintf("Add '_TEST_'"), + TextEdits: []analysis.TextEdit{{ + Pos: call.Lparen, + End: call.Lparen, + NewText: []byte("_TEST_"), + }}, + }}, + }) } } return true diff --git a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go index 5fe3f2673..f0d2c7edf 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/nilness/nilness.go @@ -20,10 +20,13 @@ import ( const Doc = `check for redundant or impossible nil comparisons The nilness checker inspects the control-flow graph of each function in -a package and reports nil pointer dereferences and degenerate nil -pointers. A degenerate comparison is of the form x==nil or x!=nil where x -is statically known to be nil or non-nil. These are often a mistake, -especially in control flow related to errors. +a package and reports nil pointer dereferences, degenerate nil +pointers, and panics with nil values. A degenerate comparison is of the form +x==nil or x!=nil where x is statically known to be nil or non-nil. These are +often a mistake, especially in control flow related to errors. Panics with nil +values are checked because they are not detectable by + + if r := recover(); r != nil { This check reports conditions such as: @@ -42,6 +45,12 @@ and: if p == nil { print(*p) // nil dereference } + +and: + + if p == nil { + panic(p) + } ` var Analyzer = &analysis.Analyzer{ @@ -109,7 +118,9 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { case *ssa.Store: notNil(stack, instr, instr.Addr, "store") case *ssa.TypeAssert: - notNil(stack, instr, instr.X, "type assertion") + if !instr.CommaOk { + notNil(stack, instr, instr.X, "type assertion") + } case *ssa.UnOp: if instr.Op == token.MUL { // *X notNil(stack, instr, instr.X, "load") @@ -117,6 +128,16 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { } } + // Look for panics with nil value + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *ssa.Panic: + if nilnessOf(stack, instr.X) == isnil { + reportf("nilpanic", instr.Pos(), "panic with nil value") + } + } + } + // For nil comparison blocks, report an error if the condition // is degenerate, and push a nilness fact on the stack when // visiting its true and false successor blocks. @@ -158,15 +179,15 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { // "if x == nil" or "if nil == y" condition; x, y are unknown. if xnil == isnil || ynil == isnil { - var f fact + var newFacts facts if xnil == isnil { // x is nil, y is unknown: // t successor learns y is nil. - f = fact{binop.Y, isnil} + newFacts = expandFacts(fact{binop.Y, isnil}) } else { // x is nil, y is unknown: // t successor learns x is nil. - f = fact{binop.X, isnil} + newFacts = expandFacts(fact{binop.X, isnil}) } for _, d := range b.Dominees() { @@ -177,9 +198,9 @@ func runFunc(pass *analysis.Pass, fn *ssa.Function) { s := stack if len(d.Preds) == 1 { if d == tsucc { - s = append(s, f) + s = append(s, newFacts...) } else if d == fsucc { - s = append(s, f.negate()) + s = append(s, newFacts.negate()...) } } visit(d, s) @@ -223,6 +244,23 @@ func (n nilness) String() string { return nilnessStrings[n+1] } // nilnessOf reports whether v is definitely nil, definitely not nil, // or unknown given the dominating stack of facts. func nilnessOf(stack []fact, v ssa.Value) nilness { + switch v := v.(type) { + // unwrap ChangeInterface values recursively, to detect if underlying + // values have any facts recorded or are otherwise known with regard to nilness. + // + // This work must be in addition to expanding facts about + // ChangeInterfaces during inference/fact gathering because this covers + // cases where the nilness of a value is intrinsic, rather than based + // on inferred facts, such as a zero value interface variable. That + // said, this work alone would only inform us when facts are about + // underlying values, rather than outer values, when the analysis is + // transitive in both directions. + case *ssa.ChangeInterface: + if underlying := nilnessOf(stack, v.X); underlying != unknown { + return underlying + } + } + // Is value intrinsically nil or non-nil? switch v := v.(type) { case *ssa.Alloc, @@ -269,3 +307,48 @@ func eq(b *ssa.BasicBlock) (op *ssa.BinOp, tsucc, fsucc *ssa.BasicBlock) { } return nil, nil, nil } + +// expandFacts takes a single fact and returns the set of facts that can be +// known about it or any of its related values. Some operations, like +// ChangeInterface, have transitive nilness, such that if you know the +// underlying value is nil, you also know the value itself is nil, and vice +// versa. This operation allows callers to match on any of the related values +// in analyses, rather than just the one form of the value that happend to +// appear in a comparison. +// +// This work must be in addition to unwrapping values within nilnessOf because +// while this work helps give facts about transitively known values based on +// inferred facts, the recursive check within nilnessOf covers cases where +// nilness facts are intrinsic to the underlying value, such as a zero value +// interface variables. +// +// ChangeInterface is the only expansion currently supported, but others, like +// Slice, could be added. At this time, this tool does not check slice +// operations in a way this expansion could help. See +// https://play.golang.org/p/mGqXEp7w4fR for an example. +func expandFacts(f fact) []fact { + ff := []fact{f} + +Loop: + for { + switch v := f.value.(type) { + case *ssa.ChangeInterface: + f = fact{v.X, f.nilness} + ff = append(ff, f) + default: + break Loop + } + } + + return ff +} + +type facts []fact + +func (ff facts) negate() facts { + nn := make([]fact, len(ff)) + for i, f := range ff { + nn[i] = f.negate() + } + return nn +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index ebd7f6e34..ddad4c796 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -508,9 +508,13 @@ func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, return fn, KindNone } -// isFormatter reports whether t satisfies fmt.Formatter. +// isFormatter reports whether t could satisfy fmt.Formatter. // The only interface method to look for is "Format(State, rune)". func isFormatter(typ types.Type) bool { + // If the type is an interface, the value it holds might satisfy fmt.Formatter. + if _, ok := typ.Underlying().(*types.Interface); ok { + return true + } obj, _, _ := types.LookupFieldOrMethod(typ, false, nil, "Format") fn, ok := obj.(*types.Func) if !ok { @@ -801,6 +805,7 @@ var printVerbs = []printVerb{ {'g', sharpNumFlag, argFloat | argComplex}, {'G', sharpNumFlag, argFloat | argComplex}, {'o', sharpNumFlag, argInt | argPointer}, + {'O', sharpNumFlag, argInt | argPointer}, {'p', "-#", argPointer}, {'q', " -+.0#", argRune | argInt | argString}, {'s', " -+.0", argString}, @@ -827,7 +832,7 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o } } - // Does current arg implement fmt.Formatter? + // Could current arg implement fmt.Formatter? formatter := false if state.argNum < len(call.Args) { if tv, ok := pass.TypesInfo.Types[call.Args[state.argNum]]; ok { @@ -891,43 +896,51 @@ func okPrintfArg(pass *analysis.Pass, call *ast.CallExpr, state *formatState) (o pass.ReportRangef(call, "%s format %s has arg %s of wrong type %s", state.name, state.format, analysisutil.Format(pass.Fset, arg), typeString) return false } - if v.typ&argString != 0 && v.verb != 'T' && !bytes.Contains(state.flags, []byte{'#'}) && recursiveStringer(pass, arg) { - pass.ReportRangef(call, "%s format %s with arg %s causes recursive String method call", state.name, state.format, analysisutil.Format(pass.Fset, arg)) - return false + if v.typ&argString != 0 && v.verb != 'T' && !bytes.Contains(state.flags, []byte{'#'}) { + if methodName, ok := recursiveStringer(pass, arg); ok { + pass.ReportRangef(call, "%s format %s with arg %s causes recursive %s method call", state.name, state.format, analysisutil.Format(pass.Fset, arg), methodName) + return false + } } return true } // recursiveStringer reports whether the argument e is a potential -// recursive call to stringer, such as t and &t in these examples: +// recursive call to stringer or is an error, such as t and &t in these examples: // // func (t *T) String() string { printf("%s", t) } -// func (t T) String() string { printf("%s", t) } +// func (t T) Error() string { printf("%s", t) } // func (t T) String() string { printf("%s", &t) } -// -func recursiveStringer(pass *analysis.Pass, e ast.Expr) bool { +func recursiveStringer(pass *analysis.Pass, e ast.Expr) (string, bool) { typ := pass.TypesInfo.Types[e].Type // It's unlikely to be a recursive stringer if it has a Format method. if isFormatter(typ) { - return false + return "", false } - // Does e allow e.String()? - obj, _, _ := types.LookupFieldOrMethod(typ, false, pass.Pkg, "String") - stringMethod, ok := obj.(*types.Func) - if !ok { - return false + // Does e allow e.String() or e.Error()? + strObj, _, _ := types.LookupFieldOrMethod(typ, false, pass.Pkg, "String") + strMethod, strOk := strObj.(*types.Func) + errObj, _, _ := types.LookupFieldOrMethod(typ, false, pass.Pkg, "Error") + errMethod, errOk := errObj.(*types.Func) + if !strOk && !errOk { + return "", false } - // Is the expression e within the body of that String method? - if stringMethod.Pkg() != pass.Pkg || !stringMethod.Scope().Contains(e.Pos()) { - return false + // Is the expression e within the body of that String or Error method? + var method *types.Func + if strOk && strMethod.Pkg() == pass.Pkg && strMethod.Scope().Contains(e.Pos()) { + method = strMethod + } else if errOk && errMethod.Pkg() == pass.Pkg && errMethod.Scope().Contains(e.Pos()) { + method = errMethod + } else { + return "", false } - sig := stringMethod.Type().(*types.Signature) + sig := method.Type().(*types.Signature) if !isStringer(sig) { - return false + return "", false } // Is it the receiver r, or &r? @@ -935,9 +948,11 @@ func recursiveStringer(pass *analysis.Pass, e ast.Expr) bool { e = u.X // strip off & from &r } if id, ok := e.(*ast.Ident); ok { - return pass.TypesInfo.Uses[id] == sig.Recv() + if pass.TypesInfo.Uses[id] == sig.Recv() { + return method.Name(), true + } } - return false + return "", false } // isStringer reports whether the method signature matches the String() definition in fmt.Stringer. @@ -1061,8 +1076,8 @@ func checkPrint(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { if isFunctionValue(pass, arg) { pass.ReportRangef(call, "%s arg %s is a func value, not called", fn.Name(), analysisutil.Format(pass.Fset, arg)) } - if recursiveStringer(pass, arg) { - pass.ReportRangef(call, "%s arg %s causes recursive call to String method", fn.Name(), analysisutil.Format(pass.Fset, arg)) + if methodName, ok := recursiveStringer(pass, arg); ok { + pass.ReportRangef(call, "%s arg %s causes recursive call to %s method", fn.Name(), analysisutil.Format(pass.Fset, arg), methodName) } } } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go index 089c06483..90896dd1b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/unreachable/unreachable.go @@ -189,7 +189,18 @@ func (d *deadState) findDead(stmt ast.Stmt) { case *ast.EmptyStmt: // do not warn about unreachable empty statements default: - d.pass.ReportRangef(stmt, "unreachable code") + d.pass.Report(analysis.Diagnostic{ + Pos: stmt.Pos(), + End: stmt.End(), + Message: "unreachable code", + SuggestedFixes: []analysis.SuggestedFix{{ + Message: "Remove", + TextEdits: []analysis.TextEdit{{ + Pos: stmt.Pos(), + End: stmt.End(), + }}, + }}, + }) d.reachable = true // silence error about next statement } } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 3084508b5..af5e17fee 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -150,7 +150,11 @@ func traverse(files []*ast.File) []event { extent += int(f.End() - f.Pos()) } // This estimate is based on the net/http package. - events := make([]event, 0, extent*33/100) + capacity := extent * 33 / 100 + if capacity > 1e6 { + capacity = 1e6 // impose some reasonable maximum + } + events := make([]event, 0, capacity) var stack []event for _, f := range files { diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index db0c9a7ea..dc6177c12 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -11,17 +11,15 @@ import ( "encoding/json" "fmt" "go/types" - "log" - "os" "os/exec" "strings" - "time" + + "golang.org/x/tools/internal/gocommand" ) var debug = false -// GetSizes returns the sizes used by the underlying driver with the given parameters. -func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { +func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) { // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. const toolPrefix = "GOPACKAGESDRIVER=" tool := "" @@ -41,7 +39,7 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp } if tool == "off" { - return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData) + return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir) } req, err := json.Marshal(struct { @@ -77,98 +75,43 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp return response.Sizes, nil } -func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { - args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} - args = append(args, buildFlags...) - args = append(args, "--", "unsafe") - stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...) +func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) { + inv := gocommand.Invocation{ + Verb: "list", + Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}, + Env: env, + BuildFlags: buildFlags, + WorkingDir: dir, + } + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) var goarch, compiler string - if err != nil { - if strings.Contains(err.Error(), "cannot find main module") { + if rawErr != nil { + if strings.Contains(rawErr.Error(), "cannot find main module") { // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? - envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") + inv := gocommand.Invocation{ + Verb: "env", + Args: []string{"GOARCH"}, + Env: env, + WorkingDir: dir, + } + envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, err + return nil, enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" } else { - return nil, err + return nil, friendlyErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>", - cmdDebugStr(env, args...), dir, stdout.String(), stderr.String()) + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } return types.SizesFor(compiler, goarch), nil } - -// invokeGo returns the stdout and stderr of a go command invocation. -func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { - if debug { - defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) - } - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - cmd := exec.CommandContext(ctx, "go", args...) - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the - // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. - cmd.Env = append(append([]string{}, env...), "PWD="+dir) - cmd.Dir = dir - cmd.Stdout = stdout - cmd.Stderr = stderr - if err := cmd.Run(); err != nil { - exitErr, ok := err.(*exec.ExitError) - if !ok { - // Catastrophic error: - // - executable not found - // - context cancellation - return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) - } - - // Export mode entails a build. - // If that build fails, errors appear on stderr - // (despite the -e flag) and the Export field is blank. - // Do not fail in that case. - if !usesExportData { - return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) - } - } - - // As of writing, go list -export prints some non-fatal compilation - // errors to stderr, even with -e set. We would prefer that it put - // them in the Package.Error JSON (see https://golang.org/issue/26319). - // In the meantime, there's nowhere good to put them, but they can - // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS - // is set. - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) - } - - // debugging - if false { - fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) - } - - return stdout, stderr, nil -} - -func cmdDebugStr(envlist []string, args ...string) string { - env := make(map[string]string) - for _, kv := range envlist { - split := strings.Split(kv, "=") - k, v := split[0], split[1] - env[k] = v - } - - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) -} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index fc0b28ecf..1a5aba9f9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -20,10 +20,11 @@ import ( "strconv" "strings" "sync" - "time" "unicode" "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/xerrors" ) // debug controls verbose logging. @@ -141,7 +142,7 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { sizeswg.Add(1) go func() { var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.gocmdRunner, cfg.Dir) // types.SizesFor always returns nil or a *types.StdSizes. response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() @@ -380,6 +381,7 @@ type jsonPackage struct { Imports []string ImportMap map[string]string Deps []string + Module *Module TestGoFiles []string TestImports []string XTestGoFiles []string @@ -500,10 +502,19 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse errkind = "use of internal package not allowed" } if errkind != "" { - if len(old.Error.ImportStack) < 2 { - return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack with fewer than two elements`, errkind) + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] } - importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-2] additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ Pos: old.Error.Pos, Msg: old.Error.Err, @@ -529,6 +540,26 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), forTest: p.ForTest, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles; https://golang.org/issue/38990?", + Kind: ListError, + }) + } } // Work around https://golang.org/issue/28749: @@ -604,6 +635,23 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse pkg.CompiledGoFiles = pkg.GoFiles } + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && len(err.ImportStack) == 0 && len(pkg.CompiledGoFiles) == 0 { + if split := strings.Split(err.Pos, ":"); len(split) > 1 { + if filename := split[0]; filename != "" { + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + if info, _ := os.Stat(filename); info != nil { + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + } + } + } + } + if p.Error != nil { msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. // Address golang.org/issue/35964 by appending import stack to error message. @@ -704,29 +752,20 @@ func golistargs(cfg *Config, words []string) []string { func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { cfg := state.cfg - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - goArgs := []string{verb} - if verb != "env" { - goArgs = append(goArgs, cfg.BuildFlags...) - } - goArgs = append(goArgs, args...) - cmd := exec.CommandContext(state.ctx, "go", goArgs...) - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the - // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. - cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) - cmd.Dir = cfg.Dir - cmd.Stdout = stdout - cmd.Stderr = stderr - defer func(start time.Time) { - cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, goArgs...), stderr, stdout) - }(time.Now()) - - if err := cmd.Run(); err != nil { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: cfg.BuildFlags, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, _, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) @@ -736,7 +775,7 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, if !ok { // Catastrophic error: // - context cancellation - return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + return nil, xerrors.Errorf("couldn't run 'go': %w", err) } // Old go version? @@ -857,16 +896,6 @@ func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) } } - - // As of writing, go list -export prints some non-fatal compilation - // errors to stderr, even with -e set. We would prefer that it put - // them in the Package.Error JSON (see https://golang.org/issue/26319). - // In the meantime, there's nowhere good to put them, but they can - // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS - // is set. - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) - } return stdout, nil } diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index 7974a6c9b..4eabfd98c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -5,6 +5,7 @@ import ( "fmt" "go/parser" "go/token" + "log" "os" "path/filepath" "sort" @@ -22,10 +23,15 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif needPkgsSet := make(map[string]bool) modifiedPkgsSet := make(map[string]bool) + pkgOfDir := make(map[string][]*Package) for _, pkg := range response.dr.Packages { // This is an approximation of import path to id. This can be // wrong for tests, vendored packages, and a number of other cases. havePkgs[pkg.PkgPath] = pkg.ID + x := commonDir(pkg.GoFiles) + if x != "" { + pkgOfDir[x] = append(pkgOfDir[x], pkg) + } } // If no new imports are added, it is safe to avoid loading any needPkgs. @@ -64,6 +70,9 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif // to the overlay. continue } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) nextPackage: for _, p := range response.dr.Packages { if pkgName != p.Name && p.ID != "command-line-arguments" { @@ -93,8 +102,11 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif } } } - // The overlay could have included an entirely new package. - if pkg == nil { + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. pkgPath, ok, err := state.getPkgPath(dir) @@ -104,34 +116,53 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif if !ok { break } + var forTest string // only set for x tests isXTest := strings.HasSuffix(pkgName, "_test") if isXTest { + forTest = pkgPath pkgPath += "_test" } id := pkgPath - if isTestFile && !isXTest { - id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) - } - // Try to reclaim a package with the same id if it exists in the response. - for _, p := range response.dr.Packages { - if reclaimPackage(p, id, opath, contents) { - pkg = p - break + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) } } - // Otherwise, create a new package - if pkg == nil { - pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} - response.addPackage(pkg) - havePkgs[pkg.PkgPath] = id - // Add the production package's sources for a test variant. - if isTestFile && !isXTest && testVariantOf != nil { - pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) - // Add the package under test and its imports to the test variant. - pkg.forTest = testVariantOf.PkgPath - for k, v := range testVariantOf.Imports { - pkg.Imports[k] = &Package{ID: v.ID} + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest } } } @@ -149,6 +180,8 @@ func (state *golistState) processGolistOverlay(response *responseDeduper) (modif continue } for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. if _, found := pkg.Imports[imp]; found { continue } @@ -282,7 +315,17 @@ func (state *golistState) determineRootDirs() (map[string]string, error) { } func (state *golistState) determineRootDirsModules() (map[string]string, error) { - out, err := state.invokeGo("list", "-m", "-json", "all") + // This will only return the root directory for the main module. + // For now we only support overlays in main modules. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that, but editing files in replaced modules + // is something we may want to support. To do that, we'll want to + // do a go list -m to determine the replaced module's module path and + // directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}} + // from the main module to determine if that module is actually a replacement. + // See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751 + // for more information. + out, err := state.invokeGo("list", "-m", "-json") if err != nil { return nil, err } @@ -374,3 +417,57 @@ func extractPackageName(filename string, contents []byte) (string, bool) { } return f.Name.Name, true } + +func commonDir(a []string) string { + seen := make(map[string]bool) + x := append([]string{}, a...) + for _, f := range x { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + log.Fatalf("commonDir saw %v for %v", seen, x) + } + for k := range seen { + // len(seen) == 1 + return k + } + return "" // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index aff94a3fe..7ea37e7ee 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -38,7 +38,7 @@ var modeStrings = []string{ func (mod LoadMode) String() string { m := mod if m == 0 { - return fmt.Sprintf("LoadMode(0)") + return "LoadMode(0)" } var out []string for i, x := range allModes { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 586c714f6..04053f1e7 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -21,9 +21,12 @@ import ( "path/filepath" "strings" "sync" + "time" "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -69,6 +72,13 @@ const ( // NeedTypesSizes adds TypesSizes. NeedTypesSizes + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule ) const ( @@ -127,6 +137,9 @@ type Config struct { // Env []string + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + // BuildFlags is a list of command-line flags to be passed through to // the build system's query tool. BuildFlags []string @@ -178,6 +191,13 @@ type driver func(cfg *Config, patterns ...string) (*driverResponse, error) // driverResponse contains the results for a driver query. type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + // Sizes, if not nil, is the types.Sizes to use when type checking. Sizes *types.StdSizes @@ -219,14 +239,22 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { return l.refine(response.Roots, response.Packages...) } -// defaultDriver is a driver that looks for an external driver binary, and if -// it does not find it falls back to the built in go list driver. +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { driver := findExternalDriver(cfg) if driver == nil { driver = goListDriver } - return driver(cfg, patterns...) + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil } // A Package describes a loaded Go package. @@ -253,7 +281,7 @@ type Package struct { GoFiles []string // CompiledGoFiles lists the absolute file paths of the package's source - // files that were presented to the compiler. + // files that are suitable for type checking. // This may differ from GoFiles if files are processed before compilation. CompiledGoFiles []string @@ -299,12 +327,41 @@ type Package struct { // forTest is the package under test, if any. forTest string + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself } func init() { packagesinternal.GetForTest = func(p interface{}) string { return p.(*Package).forTest } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.TypecheckCgo = int(typecheckCgo) } // An Error describes a problem with a package's metadata, syntax, or types. @@ -467,6 +524,9 @@ func newLoader(cfg *Config) *loader { if ld.Config.Env == nil { ld.Config.Env = os.Environ() } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } if ld.Context == nil { ld.Context = context.Background() } @@ -684,6 +744,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if ld.requestedMode&NeedTypesSizes == 0 { ld.pkgs[i].TypesSizes = nil } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } } return result, nil @@ -859,6 +922,15 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Error: appendError, Sizes: ld.sizes, } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 882e3b3d8..cffd7acbe 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -226,7 +226,8 @@ func For(obj types.Object) (Path, error) { // the best paths because non-types may // refer to types, but not the reverse. empty := make([]byte, 0, 48) // initial space - for _, name := range scope.Names() { + names := scope.Names() + for _, name := range names { o := scope.Lookup(name) tname, ok := o.(*types.TypeName) if !ok { @@ -253,7 +254,7 @@ func For(obj types.Object) (Path, error) { // Then inspect everything else: // non-types, and declared methods of defined types. - for _, name := range scope.Names() { + for _, name := range names { o := scope.Lookup(name) path := append(empty, name...) if _, ok := o.(*types.TypeName); !ok { diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index b4f428767..a4e40adba 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -3,9 +3,10 @@ package imports // import "golang.org/x/tools/imports" import ( - "go/build" - "os" + "io/ioutil" + "log" + "golang.org/x/tools/internal/gocommand" intimp "golang.org/x/tools/internal/imports" ) @@ -30,32 +31,37 @@ var Debug = false var LocalPrefix string // Process formats and adjusts imports for the provided file. -// If opt is nil the defaults are used. +// If opt is nil the defaults are used, and if src is nil the source +// is read from the filesystem. // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. // To process data ``as if'' it were in filename, pass the data as a non-nil src. func Process(filename string, src []byte, opt *Options) ([]byte, error) { + var err error + if src == nil { + src, err = ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + } if opt == nil { opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} } intopt := &intimp.Options{ Env: &intimp.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), - Debug: Debug, - LocalPrefix: LocalPrefix, + GocmdRunner: &gocommand.Runner{}, }, - AllErrors: opt.AllErrors, - Comments: opt.Comments, - FormatOnly: opt.FormatOnly, - Fragment: opt.Fragment, - TabIndent: opt.TabIndent, - TabWidth: opt.TabWidth, + LocalPrefix: LocalPrefix, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + if Debug { + intopt.Env.Logf = log.Printf } return intimp.Process(filename, src, intopt) } diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go new file mode 100644 index 000000000..311fbfd88 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go @@ -0,0 +1,200 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package analysisinternal exposes internal-only fields from go/analysis. +package analysisinternal + +import ( + "bytes" + "fmt" + "go/ast" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/ast/astutil" +) + +func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos { + // Get the end position for the type error. + offset, end := fset.PositionFor(start, false).Offset, start + if offset >= len(src) { + return end + } + if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 { + end = start + token.Pos(width) + } + return end +} + +func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + under := typ + if n, ok := typ.(*types.Named); ok { + under = n.Underlying() + } + switch u := under.(type) { + case *types.Basic: + switch { + case u.Info()&types.IsNumeric != 0: + return &ast.BasicLit{Kind: token.INT, Value: "0"} + case u.Info()&types.IsBoolean != 0: + return &ast.Ident{Name: "false"} + case u.Info()&types.IsString != 0: + return &ast.BasicLit{Kind: token.STRING, Value: `""`} + default: + panic("unknown basic type") + } + case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice: + return ast.NewIdent("nil") + case *types.Struct: + texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here. + if texpr == nil { + return nil + } + return &ast.CompositeLit{ + Type: texpr, + } + case *types.Array: + texpr := TypeExpr(fset, f, pkg, u.Elem()) + if texpr == nil { + return nil + } + return &ast.CompositeLit{ + Type: &ast.ArrayType{ + Elt: texpr, + Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())}, + }, + } + } + return nil +} + +func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { + switch t := typ.(type) { + case *types.Basic: + switch t.Kind() { + case types.UnsafePointer: + return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + default: + return ast.NewIdent(t.Name()) + } + case *types.Named: + if t.Obj().Pkg() == nil { + return nil + } + if t.Obj().Pkg() == pkg { + return ast.NewIdent(t.Obj().Name()) + } + pkgName := t.Obj().Pkg().Name() + // If the file already imports the package under another name, use that. + for _, group := range astutil.Imports(fset, f) { + for _, cand := range group { + if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() { + if cand.Name != nil && cand.Name.Name != "" { + pkgName = cand.Name.Name + } + } + } + } + if pkgName == "." { + return ast.NewIdent(t.Obj().Name()) + } + return &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: ast.NewIdent(t.Obj().Name()), + } + case *types.Pointer: + return &ast.UnaryExpr{ + Op: token.MUL, + X: TypeExpr(fset, f, pkg, t.Elem()), + } + default: + return nil // TODO: anonymous structs, but who does that + } +} + +var GetTypeErrors = func(p interface{}) []types.Error { return nil } +var SetTypeErrors = func(p interface{}, errors []types.Error) {} + +type TypeErrorPass string + +const ( + NoNewVars TypeErrorPass = "nonewvars" + NoResultValues TypeErrorPass = "noresultvalues" + UndeclaredName TypeErrorPass = "undeclaredname" +) + +// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable. +// Some examples: +// +// Basic Example: +// z := 1 +// y := z + x +// If x is undeclared, then this function would return `y := z + x`, so that we +// can insert `x := ` on the line before `y := z + x`. +// +// If stmt example: +// if z == 1 { +// } else if z == y {} +// If y is undeclared, then this function would return `if z == 1 {`, because we cannot +// insert a statement between an if and an else if statement. As a result, we need to find +// the top of the if chain to insert `y := ` before. +func StmtToInsertVarBefore(path []ast.Node) ast.Stmt { + enclosingIndex := -1 + for i, p := range path { + if _, ok := p.(ast.Stmt); ok { + enclosingIndex = i + break + } + } + if enclosingIndex == -1 { + return nil + } + enclosingStmt := path[enclosingIndex] + switch enclosingStmt.(type) { + case *ast.IfStmt: + // The enclosingStmt is inside of the if declaration, + // We need to check if we are in an else-if stmt and + // get the base if statement. + return baseIfStmt(path, enclosingIndex) + case *ast.CaseClause: + // Get the enclosing switch stmt if the enclosingStmt is + // inside of the case statement. + for i := enclosingIndex + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.SwitchStmt); ok { + return node + } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok { + return node + } + } + } + if len(path) <= enclosingIndex+1 { + return enclosingStmt.(ast.Stmt) + } + // Check if the enclosing statement is inside another node. + switch expr := path[enclosingIndex+1].(type) { + case *ast.IfStmt: + // Get the base if statement. + return baseIfStmt(path, enclosingIndex+1) + case *ast.ForStmt: + if expr.Init == enclosingStmt || expr.Post == enclosingStmt { + return expr + } + } + return enclosingStmt.(ast.Stmt) +} + +// baseIfStmt walks up the if/else-if chain until we get to +// the top of the current if chain. +func baseIfStmt(path []ast.Node, index int) ast.Stmt { + stmt := path[index] + for i := index + 1; i < len(path); i++ { + if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt { + stmt = node + continue + } + break + } + return stmt.(ast.Stmt) +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 000000000..e37b49491 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that ocurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 000000000..05f3a9a57 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 000000000..06c1d4615 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 000000000..5dc6e6bab --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 000000000..4d55e577d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 000000000..a02206e30 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 000000000..7e9586659 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 000000000..b55c12eb2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,213 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: unsafe.Pointer(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(unsafe.Pointer)) + hdr.Len = int(t.packed) + return *(*string)(unsafe.Pointer(hdr)) +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go index ce38fdcf8..5901a8f61 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -76,8 +76,9 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e } func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { - // golang.org/issue/15653 - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + // golang.org/issue/37269 + dirent := &syscall.Dirent{} + copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 000000000..f516e1762 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,230 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "time" + + "golang.org/x/tools/internal/event" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + switch i.Verb { + case "mod": + // mod needs the sub-verb before build flags. + goArgs = append(goArgs, i.Args[0]) + goArgs = append(goArgs, i.BuildFlags...) + goArgs = append(goArgs, i.Args[1:]...) + case "env": + // env doesn't take build flags. + goArgs = append(goArgs, i.Args...) + default: + goArgs = append(goArgs, i.BuildFlags...) + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(os.Environ(), i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + // Cancelled. Interrupt and see if it ends voluntarily. + cmd.Process.Signal(os.Interrupt) + select { + case err := <-resChan: + return err + case <-time.After(time.Second): + } + // Didn't shut down in response to interrupt. Kill it hard. + cmd.Process.Kill() + return <-resChan +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 000000000..1cd8d8473 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Replace *ModuleJSON // replaced by this module + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file for this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return nil, false, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + if modFlag != "" { + // Don't override an explicit '-mod=' argument. + return mainMod, modFlag == "vendor", nil + } + if mainMod == nil || !go114 { + return mainMod, false, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return mainMod, true, nil + } + } + return mainMod, false, nil +} + +// getMainModuleAnd114 gets the main module's information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 64309db74..390cb9db7 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -23,8 +23,10 @@ import ( // Options controls the behavior of a Walk call. type Options struct { - Debug bool // Enable debug logging - ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool } // RootType indicates the type of a Root. @@ -80,14 +82,14 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root // walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if _, err := os.Stat(root.Path); os.IsNotExist(err) { - if opts.Debug { - log.Printf("skipping nonexistent directory: %v", root.Path) + if opts.Logf != nil { + opts.Logf("skipping nonexistent directory: %v", root.Path) } return } start := time.Now() - if opts.Debug { - log.Printf("gopathwalk: scanning %s", root.Path) + if opts.Logf != nil { + opts.Logf("gopathwalk: scanning %s", root.Path) } w := &walker{ root: root, @@ -100,8 +102,8 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) } - if opts.Debug { - log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + if opts.Logf != nil { + opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) } } @@ -130,11 +132,11 @@ func (w *walker) init() { full := filepath.Join(w.root.Path, p) if fi, err := os.Stat(full); err == nil { w.ignoredDirs = append(w.ignoredDirs, fi) - if w.opts.Debug { - log.Printf("Directory added to ignore list: %s", full) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) } - } else if w.opts.Debug { - log.Printf("Error statting ignored directory: %v", err) + } else if w.opts.Logf != nil { + w.opts.Logf("Error statting ignored directory: %v", err) } } } @@ -145,11 +147,11 @@ func (w *walker) init() { func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") slurp, err := ioutil.ReadFile(file) - if w.opts.Debug { + if w.opts.Logf != nil { if err != nil { - log.Print(err) + w.opts.Logf("%v", err) } else { - log.Printf("Read %s", file) + w.opts.Logf("Read %s", file) } } if err != nil { diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index ee01d34b1..ecd13e87a 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -7,6 +7,7 @@ package imports import ( "bytes" "context" + "encoding/json" "fmt" "go/ast" "go/build" @@ -14,7 +15,6 @@ import ( "go/token" "io/ioutil" "os" - "os/exec" "path" "path/filepath" "reflect" @@ -22,45 +22,46 @@ import ( "strconv" "strings" "sync" - "time" "unicode" "unicode/utf8" "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) // importToGroup is a list of functions which map from an import path to // a group number. -var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool){ - func(env *ProcessEnv, importPath string) (num int, ok bool) { - if env.LocalPrefix == "" { +var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ + func(localPrefix, importPath string) (num int, ok bool) { + if localPrefix == "" { return } - for _, p := range strings.Split(env.LocalPrefix, ",") { + for _, p := range strings.Split(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { + func(_, importPath string) (num int, ok bool) { if strings.HasPrefix(importPath, "appengine") { return 2, true } return }, - func(_ *ProcessEnv, importPath string) (num int, ok bool) { - if strings.Contains(importPath, ".") { + func(_, importPath string) (num int, ok bool) { + firstComponent := strings.Split(importPath, "/")[0] + if strings.Contains(firstComponent, ".") { return 1, true } return }, } -func importGroup(env *ProcessEnv, importPath string) int { +func importGroup(localPrefix, importPath string) int { for _, fn := range importToGroup { - if n, ok := fn(env, importPath); ok { + if n, ok := fn(localPrefix, importPath); ok { return n } } @@ -263,7 +264,7 @@ type pass struct { // loadPackageNames saves the package names for everything referenced by imports. func (p *pass) loadPackageNames(imports []*ImportInfo) error { - if p.env.Debug { + if p.env.Logf != nil { p.env.Logf("loading package names for %v packages", len(imports)) defer func() { p.env.Logf("done loading package names for %v packages", len(imports)) @@ -277,7 +278,12 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error { unknown = append(unknown, imp.ImportPath) } - names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) + resolver, err := p.env.GetResolver() + if err != nil { + return err + } + + names, err := resolver.loadPackageNames(unknown, p.srcDir) if err != nil { return err } @@ -335,7 +341,7 @@ func (p *pass) load() ([]*ImportFix, bool) { if p.loadRealPackageNames { err := p.loadPackageNames(append(imports, p.candidates...)) if err != nil { - if p.env.Debug { + if p.env.Logf != nil { p.env.Logf("loading package names: %v", err) } return nil, false @@ -529,7 +535,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return nil, err } srcDir := filepath.Dir(abs) - if env.Debug { + if env.Logf != nil { env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) } @@ -598,7 +604,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena // Start off with the standard library. for importPath, exports := range stdlib { p := &pkg{ - dir: filepath.Join(env.GOROOT, "src", importPath), + dir: filepath.Join(env.goroot(), "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), relevance: MaxRelevance, @@ -639,15 +645,23 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena wrappedCallback.exportsLoaded(pkg, exports) }, } - return env.GetResolver().scan(ctx, scanFilter) + resolver, err := env.GetResolver() + if err != nil { + return err + } + return resolver.scan(ctx, scanFilter) } -func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map[string]int, error) { result := make(map[string]int) + resolver, err := env.GetResolver() + if err != nil { + return nil, err + } for _, path := range paths { - result[path] = env.GetResolver().scoreImportPath(ctx, path) + result[path] = resolver.scoreImportPath(ctx, path) } - return result + return result, nil } func PrimeCache(ctx context.Context, env *ProcessEnv) error { @@ -673,8 +687,9 @@ func candidateImportName(pkg *pkg) string { return "" } -// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -713,7 +728,8 @@ type PackageExport struct { Exports []string } -func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { +// GetPackageExports returns all known packages with name pkg and their exports. +func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { callback := &scanCallback{ rootFound: func(gopathwalk.Root) bool { return true @@ -743,25 +759,45 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP return getCandidatePkgs(ctx, callback, filename, filePkg, env) } +var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"} + // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. type ProcessEnv struct { - LocalPrefix string - Debug bool + GocmdRunner *gocommand.Runner BuildFlags []string - // If non-empty, these will be used instead of the - // process-wide values. - GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string - WorkingDir string + // Env overrides the OS environment, and can be used to specify + // GOPROXY, GO111MODULE, etc. PATH cannot be set here, because + // exec.Command will not honor it. + // Specifying all of RequiredGoEnvVars avoids a call to `go env`. + Env map[string]string + + WorkingDir string - // Logf is the default logger for the ProcessEnv. + // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) resolver Resolver } +func (e *ProcessEnv) goroot() string { + return e.mustGetEnv("GOROOT") +} + +func (e *ProcessEnv) gopath() string { + return e.mustGetEnv("GOPATH") +} + +func (e *ProcessEnv) mustGetEnv(k string) string { + v, ok := e.Env[k] + if !ok { + panic(fmt.Sprintf("%v not set in evaluated environment", k)) + } + return v +} + // CopyConfig copies the env's configuration into a new env. func (e *ProcessEnv) CopyConfig() *ProcessEnv { copy := *e @@ -769,42 +805,63 @@ func (e *ProcessEnv) CopyConfig() *ProcessEnv { return © } -func (e *ProcessEnv) env() []string { - env := os.Environ() - add := func(k, v string) { - if v != "" { - env = append(env, k+"="+v) +func (e *ProcessEnv) init() error { + foundAllRequired := true + for _, k := range RequiredGoEnvVars { + if _, ok := e.Env[k]; !ok { + foundAllRequired = false + break } } - add("GOPATH", e.GOPATH) - add("GOROOT", e.GOROOT) - add("GO111MODULE", e.GO111MODULE) - add("GOPROXY", e.GOPROXY) - add("GOFLAGS", e.GOFLAGS) - add("GOSUMDB", e.GOSUMDB) - if e.WorkingDir != "" { - add("PWD", e.WorkingDir) + if foundAllRequired { + return nil + } + + if e.Env == nil { + e.Env = map[string]string{} + } + + goEnv := map[string]string{} + stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...) + if err != nil { + return err + } + if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil { + return err + } + for k, v := range goEnv { + e.Env[k] = v + } + return nil +} + +func (e *ProcessEnv) env() []string { + var env []string // the gocommand package will prepend os.Environ. + for k, v := range e.Env { + env = append(env, k+"="+v) } return env } -func (e *ProcessEnv) GetResolver() Resolver { +func (e *ProcessEnv) GetResolver() (Resolver, error) { if e.resolver != nil { - return e.resolver + return e.resolver, nil } - out, err := e.invokeGo("env", "GOMOD") - if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { + if err := e.init(); err != nil { + return nil, err + } + if len(e.Env["GOMOD"]) == 0 { e.resolver = newGopathResolver(e) - return e.resolver + return e.resolver, nil } e.resolver = newModuleResolver(e) - return e.resolver + return e.resolver, nil } func (e *ProcessEnv) buildContext() *build.Context { ctx := build.Default - ctx.GOROOT = e.GOROOT - ctx.GOPATH = e.GOPATH + ctx.GOROOT = e.goroot() + ctx.GOPATH = e.gopath() // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). @@ -823,44 +880,22 @@ func (e *ProcessEnv) buildContext() *build.Context { return &ctx } -func (e *ProcessEnv) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { - goArgs := []string{verb} - if verb != "env" { - goArgs = append(goArgs, e.BuildFlags...) - } - goArgs = append(goArgs, args...) - cmd := exec.Command("go", goArgs...) - stdout := &bytes.Buffer{} - stderr := &bytes.Buffer{} - cmd.Stdout = stdout - cmd.Stderr = stderr - cmd.Env = e.env() - cmd.Dir = e.WorkingDir - - if e.Debug { - defer func(start time.Time) { e.Logf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) - } - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr) - } - return stdout, nil -} - -func cmdDebugStr(cmd *exec.Cmd) string { - env := make(map[string]string) - for _, kv := range cmd.Env { - split := strings.Split(kv, "=") - k, v := split[0], split[1] - env[k] = v +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, } - - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) + return e.GocmdRunner.Run(ctx, inv) } func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { // Prevent self-imports. - if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.goroot(), "src", pkg) == pass.srcDir { return } exports := copyExports(stdlib[pkg]) @@ -945,10 +980,13 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return false // We'll do our own loading after we sort. }, } - err := pass.env.GetResolver().scan(context.Background(), callback) + resolver, err := pass.env.GetResolver() if err != nil { return err } + if err = resolver.scan(context.Background(), callback); err != nil { + return err + } // Search for imports matching potential package references. type result struct { @@ -1261,7 +1299,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error case <-r.scanSema: } defer func() { r.scanSema <- struct{}{} }() - gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) close(scanDone) }() select { @@ -1346,7 +1384,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl fullFile := filepath.Join(dir, fi.Name()) f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fullFile, err) + if env.Logf != nil { + env.Logf("error parsing %v: %v", fullFile, err) + } + continue } if f.Name.Name == "documentation" { // Special case from go/build.ImportDir, not @@ -1365,7 +1406,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } } - if env.Debug { + if env.Logf != nil { sortedExports := append([]string(nil), exports...) sort.Strings(sortedExports) env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) @@ -1381,11 +1422,15 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if pass.env.Debug { + if pass.env.Logf != nil { for i, c := range candidates { pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } + resolver, err := pass.env.GetResolver() + if err != nil { + return nil, err + } // Collect exports for packages with matching names. rescv := make([]chan *pkg, len(candidates)) @@ -1419,14 +1464,14 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa wg.Done() }() - if pass.env.Debug { + if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } // If we're an x_test, load the package under test's test variant. includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir - _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) + _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest) if err != nil { - if pass.env.Debug { + if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } resc <- nil diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 2e7a317e5..2815edc33 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,18 +11,13 @@ package imports import ( "bufio" "bytes" - "context" "fmt" "go/ast" - "go/build" "go/format" "go/parser" "go/printer" "go/token" "io" - "io/ioutil" - "log" - "os" "regexp" "strconv" "strings" @@ -34,6 +29,11 @@ import ( type Options struct { Env *ProcessEnv // The environment to use. Note: this contains the cached module and filesystem state. + // LocalPrefix is a comma-separated string of import path prefixes, which, if + // set, instructs Process to sort the import paths with the given prefixes + // into another group after 3rd-party packages. + LocalPrefix string + Fragment bool // Accept fragment of a source file (no package statement) AllErrors bool // Report all errors (not just the first 10 on different lines) @@ -44,13 +44,8 @@ type Options struct { FormatOnly bool // Disable the insertion and deletion of imports } -// Process implements golang.org/x/tools/imports.Process with explicit context in env. +// Process implements golang.org/x/tools/imports.Process with explicit context in opt.Env. func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, adjust, err := parse(fileSet, filename, src, opt) if err != nil { @@ -66,16 +61,12 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e } // FixImports returns a list of fixes to the imports that, when applied, -// will leave the imports in the same state as Process. +// will leave the imports in the same state as Process. src and opt must +// be specified. // // Note that filename's directory influences which imports can be chosen, // so it is important that filename be accurate. func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - fileSet := token.NewFileSet() file, _, err := parse(fileSet, filename, src, opt) if err != nil { @@ -86,13 +77,9 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, } // ApplyFixes applies all of the fixes to the file and formats it. extraMode -// is added in when parsing the file. +// is added in when parsing the file. src and opts must be specified, but no +// env is needed. func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { - src, opt, err = initialize(filename, src, opt) - if err != nil { - return nil, err - } - // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() @@ -116,65 +103,9 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the packages starting with prefix that can be -// imported by filename, sorted by import path. -func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) -} - -// GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { - _, opt, err := initialize(filename, []byte{}, opt) - if err != nil { - return err - } - return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) -} - -// initialize sets the values for opt and src. -// If they are provided, they are not changed. Otherwise opt is set to the -// default values and src is read from the file system. -func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, error) { - // Use defaults if opt is nil. - if opt == nil { - opt = &Options{Comments: true, TabIndent: true, TabWidth: 8} - } - - // Set the env if the user has not provided it. - if opt.Env == nil { - opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, - GOFLAGS: os.Getenv("GOFLAGS"), - GO111MODULE: os.Getenv("GO111MODULE"), - GOPROXY: os.Getenv("GOPROXY"), - GOSUMDB: os.Getenv("GOSUMDB"), - } - } - - // Set the logger if the user has not provided it. - if opt.Env.Logf == nil { - opt.Env.Logf = log.Printf - } - - if src == nil { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, nil, err - } - src = b - } - - return src, opt, nil -} - func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { - mergeImports(opt.Env, fileSet, file) - sortImports(opt.Env, fileSet, file) + mergeImports(fileSet, file) + sortImports(opt.LocalPrefix, fileSet, file) imps := astutil.Imports(fileSet, file) var spacesBefore []string // import paths we need spaces before for _, impSection := range imps { @@ -185,7 +116,7 @@ func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func( lastGroup := -1 for _, importSpec := range impSection { importPath, _ := strconv.Unquote(importSpec.Path.Value) - groupNum := importGroup(opt.Env, importPath) + groupNum := importGroup(opt.LocalPrefix, importPath) if groupNum != lastGroup && lastGroup != -1 { spacesBefore = append(spacesBefore, importPath) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 1980f59de..664fbbf5b 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -15,7 +15,7 @@ import ( "strings" "golang.org/x/mod/module" - "golang.org/x/mod/semver" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -24,31 +24,21 @@ import ( type ModuleResolver struct { env *ProcessEnv moduleCacheDir string - dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. roots []gopathwalk.Root scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. scannedRoots map[gopathwalk.Root]bool initialized bool - main *ModuleJSON - modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - modsByDir []*ModuleJSON // ...or Dir. + main *gocommand.ModuleJSON + modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*gocommand.ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache otherCache *dirInfoCache } -type ModuleJSON struct { - Path string // module path - Replace *ModuleJSON // replaced by this module - Main bool // is this the main module? - Indirect bool // is this module only an indirect dependency of main module? - Dir string // directory holding files for this module, if any - GoMod string // path to go.mod file for this module, if any - GoVersion string // go version used in module -} - func newModuleResolver(e *ProcessEnv) *ModuleResolver { r := &ModuleResolver{ env: e, @@ -62,7 +52,14 @@ func (r *ModuleResolver) init() error { if r.initialized { return nil } - mainMod, vendorEnabled, err := vendorEnabled(r.env) + + inv := gocommand.Invocation{ + BuildFlags: r.env.BuildFlags, + Env: r.env.env(), + Logf: r.env.Logf, + WorkingDir: r.env.WorkingDir, + } + mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner) if err != nil { return err } @@ -71,18 +68,22 @@ func (r *ModuleResolver) init() error { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. r.main = mainMod - r.dummyVendorMod = &ModuleJSON{ + r.dummyVendorMod = &gocommand.ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() } - r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") + if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { + r.moduleCacheDir = gmc + } else { + r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.gopath())[0], "/pkg/mod") + } sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { @@ -98,7 +99,7 @@ func (r *ModuleResolver) init() error { }) r.roots = []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + {filepath.Join(r.env.goroot(), "/src"), gopathwalk.RootGOROOT}, } if r.main != nil { r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) @@ -106,7 +107,7 @@ func (r *ModuleResolver) init() error { if vendorEnabled { r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) } else { - addDep := func(mod *ModuleJSON) { + addDep := func(mod *gocommand.ModuleJSON) { if mod.Replace == nil { // This is redundant with the cache, but we'll skip it cheaply enough. r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) @@ -146,22 +147,24 @@ func (r *ModuleResolver) init() error { } func (r *ModuleResolver) initAllMods() error { - stdout, err := r.env.invokeGo("list", "-m", "-json", "...") + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...") if err != nil { return err } for dec := json.NewDecoder(stdout); dec.More(); { - mod := &ModuleJSON{} + mod := &gocommand.ModuleJSON{} if err := dec.Decode(mod); err != nil { return err } if mod.Dir == "" { - if r.env.Debug { + if r.env.Logf != nil { r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) } // Can't do anything with a module that's not downloaded. continue } + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) r.modsByModPath = append(r.modsByModPath, mod) r.modsByDir = append(r.modsByDir, mod) if mod.Main { @@ -195,7 +198,7 @@ func (r *ModuleResolver) ClearForNewMod() { // findPackage returns the module and directory that contains the package at // the given import path, or returns nil, "" if no module is in scope. -func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { +func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. for _, m := range r.modsByModPath { @@ -281,7 +284,7 @@ func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info // findModuleByDir returns the module that contains dir, or nil if no such // module is in scope. -func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { +func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON { // This is quite tricky and may not be correct. dir could be: // - a package in the main module. // - a replace target underneath the main module's directory. @@ -308,7 +311,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // dirIsNestedModule reports if dir is contained in a nested module underneath // mod, not actually in mod. -func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool { +func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool { if !strings.HasPrefix(dir, mod.Dir) { return false } @@ -468,7 +471,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error if r.scannedRoots[root] { continue } - gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) r.scannedRoots[root] = true } close(scanDone) @@ -488,7 +491,7 @@ func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { return modRelevance(mod) } -func modRelevance(mod *ModuleJSON) int { +func modRelevance(mod *gocommand.ModuleJSON) int { switch { case mod == nil: // out of scope return MaxRelevance - 4 @@ -581,7 +584,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { - if r.env.Debug { + if r.env.Logf != nil { r.env.Logf("decoding module cache path %q: %v", subdir, err) } return directoryPackageInfo{ @@ -654,63 +657,3 @@ func modulePath(mod []byte) string { } return "" // missing module path } - -var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) - -// vendorEnabled indicates if vendoring is enabled. -// Inspired by setDefaultBuildMod in modload/init.go -func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) { - mainMod, go114, err := getMainModuleAnd114(env) - if err != nil { - return nil, false, err - } - matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS) - var modFlag string - if len(matches) != 0 { - modFlag = matches[1] - } - if modFlag != "" { - // Don't override an explicit '-mod=' argument. - return mainMod, modFlag == "vendor", nil - } - if mainMod == nil || !go114 { - return mainMod, false, nil - } - // Check 1.14's automatic vendor mode. - if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { - if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { - // The Go version is at least 1.14, and a vendor directory exists. - // Set -mod=vendor by default. - return mainMod, true, nil - } - } - return mainMod, false, nil -} - -// getMainModuleAnd114 gets the main module's information and whether the -// go command in use is 1.14+. This is the information needed to figure out -// if vendoring should be enabled. -func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) { - const format = `{{.Path}} -{{.Dir}} -{{.GoMod}} -{{.GoVersion}} -{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} -` - stdout, err := env.invokeGo("list", "-m", "-f", format) - if err != nil { - return nil, false, nil - } - lines := strings.Split(stdout.String(), "\n") - if len(lines) < 5 { - return nil, false, fmt.Errorf("unexpected stdout: %q", stdout) - } - mod := &ModuleJSON{ - Path: lines[0], - Dir: lines[1], - GoMod: lines[2], - GoVersion: lines[3], - Main: true, - } - return mod, lines[4] == "go1.14", nil -} diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 226279471..be8ffa25f 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -15,7 +15,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. -func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func sortImports(localPrefix string, fset *token.FileSet, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) if !ok || d.Tok != token.IMPORT { @@ -40,11 +40,11 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { for j, s := range d.Specs { if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line { // j begins a new run. End this one. - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:j])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:j])...) i = j } } - specs = append(specs, sortSpecs(env, fset, f, d.Specs[i:])...) + specs = append(specs, sortSpecs(localPrefix, fset, f, d.Specs[i:])...) d.Specs = specs // Deduping can leave a blank line before the rparen; clean that up. @@ -60,7 +60,7 @@ func sortImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { // mergeImports merges all the import declarations into the first one. // Taken from golang.org/x/tools/ast/astutil. -func mergeImports(env *ProcessEnv, fset *token.FileSet, f *ast.File) { +func mergeImports(fset *token.FileSet, f *ast.File) { if len(f.Decls) <= 1 { return } @@ -142,7 +142,7 @@ type posSpan struct { End token.Pos } -func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { +func sortSpecs(localPrefix string, fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec { // Can't short-circuit here even if specs are already sorted, // since they might yet need deduplication. // A lone import, however, may be safely ignored. @@ -191,7 +191,7 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp // Reassign the import paths to have the same position sequence. // Reassign each comment to abut the end of its spec. // Sort the comments by new position. - sort.Sort(byImportSpec{env, specs}) + sort.Sort(byImportSpec{localPrefix, specs}) // Dedup. Thanks to our sorting, we can just consider // adjacent pairs of imports. @@ -245,8 +245,8 @@ func sortSpecs(env *ProcessEnv, fset *token.FileSet, f *ast.File, specs []ast.Sp } type byImportSpec struct { - env *ProcessEnv - specs []ast.Spec // slice of *ast.ImportSpec + localPrefix string + specs []ast.Spec // slice of *ast.ImportSpec } func (x byImportSpec) Len() int { return len(x.specs) } @@ -255,8 +255,8 @@ func (x byImportSpec) Less(i, j int) bool { ipath := importPath(x.specs[i]) jpath := importPath(x.specs[j]) - igroup := importGroup(x.env, ipath) - jgroup := importGroup(x.env, jpath) + igroup := importGroup(x.localPrefix, ipath) + jgroup := importGroup(x.localPrefix, jpath) if igroup != jgroup { return igroup < jgroup } diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go index 7e60eb04e..16252111f 100644 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ b/vendor/golang.org/x/tools/internal/imports/zstdlib.go @@ -415,6 +415,9 @@ var stdlib = map[string][]string{ "crypto/tls": []string{ "Certificate", "CertificateRequestInfo", + "CipherSuite", + "CipherSuiteName", + "CipherSuites", "Client", "ClientAuthType", "ClientHelloInfo", @@ -434,6 +437,7 @@ var stdlib = map[string][]string{ "ECDSAWithP521AndSHA512", "ECDSAWithSHA1", "Ed25519", + "InsecureCipherSuites", "Listen", "LoadX509KeyPair", "NewLRUClientSessionCache", @@ -465,6 +469,7 @@ var stdlib = map[string][]string{ "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", @@ -473,6 +478,7 @@ var stdlib = map[string][]string{ "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_RC4_128_SHA", "TLS_FALLBACK_SCSV", "TLS_RSA_WITH_3DES_EDE_CBC_SHA", @@ -698,36 +704,65 @@ var stdlib = map[string][]string{ "Attr", "AttrAbstractOrigin", "AttrAccessibility", + "AttrAddrBase", "AttrAddrClass", + "AttrAlignment", "AttrAllocated", "AttrArtificial", "AttrAssociated", "AttrBaseTypes", + "AttrBinaryScale", "AttrBitOffset", "AttrBitSize", "AttrByteSize", + "AttrCallAllCalls", + "AttrCallAllSourceCalls", + "AttrCallAllTailCalls", "AttrCallColumn", + "AttrCallDataLocation", + "AttrCallDataValue", "AttrCallFile", "AttrCallLine", + "AttrCallOrigin", + "AttrCallPC", + "AttrCallParameter", + "AttrCallReturnPC", + "AttrCallTailCall", + "AttrCallTarget", + "AttrCallTargetClobbered", + "AttrCallValue", "AttrCalling", "AttrCommonRef", "AttrCompDir", + "AttrConstExpr", "AttrConstValue", "AttrContainingType", "AttrCount", + "AttrDataBitOffset", "AttrDataLocation", "AttrDataMemberLoc", + "AttrDecimalScale", + "AttrDecimalSign", "AttrDeclColumn", "AttrDeclFile", "AttrDeclLine", "AttrDeclaration", "AttrDefaultValue", + "AttrDefaulted", + "AttrDeleted", "AttrDescription", + "AttrDigitCount", "AttrDiscr", "AttrDiscrList", "AttrDiscrValue", + "AttrDwoName", + "AttrElemental", "AttrEncoding", + "AttrEndianity", "AttrEntrypc", + "AttrEnumClass", + "AttrExplicit", + "AttrExportSymbols", "AttrExtension", "AttrExternal", "AttrFrameBase", @@ -738,27 +773,47 @@ var stdlib = map[string][]string{ "AttrInline", "AttrIsOptional", "AttrLanguage", + "AttrLinkageName", "AttrLocation", + "AttrLoclistsBase", "AttrLowerBound", "AttrLowpc", "AttrMacroInfo", + "AttrMacros", + "AttrMainSubprogram", + "AttrMutable", "AttrName", "AttrNamelistItem", + "AttrNoreturn", + "AttrObjectPointer", "AttrOrdering", + "AttrPictureString", "AttrPriority", "AttrProducer", "AttrPrototyped", + "AttrPure", "AttrRanges", + "AttrRank", + "AttrRecursive", + "AttrReference", "AttrReturnAddr", + "AttrRnglistsBase", + "AttrRvalueReference", "AttrSegment", "AttrSibling", + "AttrSignature", + "AttrSmall", "AttrSpecification", "AttrStartScope", "AttrStaticLink", "AttrStmtList", + "AttrStrOffsetsBase", "AttrStride", "AttrStrideSize", "AttrStringLength", + "AttrStringLengthBitSize", + "AttrStringLengthByteSize", + "AttrThreadsScaled", "AttrTrampoline", "AttrType", "AttrUpperBound", @@ -772,18 +827,23 @@ var stdlib = map[string][]string{ "BoolType", "CharType", "Class", + "ClassAddrPtr", "ClassAddress", "ClassBlock", "ClassConstant", "ClassExprLoc", "ClassFlag", "ClassLinePtr", + "ClassLocList", "ClassLocListPtr", "ClassMacPtr", "ClassRangeListPtr", "ClassReference", "ClassReferenceAlt", "ClassReferenceSig", + "ClassRngList", + "ClassRngListsPtr", + "ClassStrOffsetsPtr", "ClassString", "ClassStringAlt", "ClassUnknown", @@ -814,9 +874,13 @@ var stdlib = map[string][]string{ "Tag", "TagAccessDeclaration", "TagArrayType", + "TagAtomicType", "TagBaseType", + "TagCallSite", + "TagCallSiteParameter", "TagCatchDwarfBlock", "TagClassType", + "TagCoarrayType", "TagCommonDwarfBlock", "TagCommonInclusion", "TagCompileUnit", @@ -824,12 +888,15 @@ var stdlib = map[string][]string{ "TagConstType", "TagConstant", "TagDwarfProcedure", + "TagDynamicType", "TagEntryPoint", "TagEnumerationType", "TagEnumerator", "TagFileType", "TagFormalParameter", "TagFriend", + "TagGenericSubrange", + "TagImmutableType", "TagImportedDeclaration", "TagImportedModule", "TagImportedUnit", @@ -853,6 +920,7 @@ var stdlib = map[string][]string{ "TagRvalueReferenceType", "TagSetType", "TagSharedType", + "TagSkeletonUnit", "TagStringType", "TagStructType", "TagSubprogram", @@ -2359,6 +2427,7 @@ var stdlib = map[string][]string{ "RawValue", "StructuralError", "SyntaxError", + "TagBMPString", "TagBitString", "TagBoolean", "TagEnum", @@ -2787,6 +2856,7 @@ var stdlib = map[string][]string{ "IsPredeclared", "Mode", "New", + "NewFromFiles", "Note", "Package", "PreserveAST", @@ -3115,6 +3185,11 @@ var stdlib = map[string][]string{ "New64", "New64a", }, + "hash/maphash": []string{ + "Hash", + "MakeSeed", + "Seed", + }, "html": []string{ "EscapeString", "UnescapeString", @@ -3367,6 +3442,7 @@ var stdlib = map[string][]string{ "Ldate", "Llongfile", "Lmicroseconds", + "Lmsgprefix", "Logger", "Lshortfile", "LstdFlags", @@ -3443,6 +3519,7 @@ var stdlib = map[string][]string{ "Exp", "Exp2", "Expm1", + "FMA", "Float32bits", "Float32frombits", "Float64bits", @@ -3567,6 +3644,9 @@ var stdlib = map[string][]string{ "OnesCount32", "OnesCount64", "OnesCount8", + "Rem", + "Rem32", + "Rem64", "Reverse", "Reverse16", "Reverse32", @@ -5140,7 +5220,10 @@ var stdlib = map[string][]string{ "CTL_NET", "CTL_QUERY", "CTRL_BREAK_EVENT", + "CTRL_CLOSE_EVENT", "CTRL_C_EVENT", + "CTRL_LOGOFF_EVENT", + "CTRL_SHUTDOWN_EVENT", "CancelIo", "CancelIoEx", "CertAddCertificateContextToStore", @@ -10112,6 +10195,7 @@ var stdlib = map[string][]string{ "Duployan", "Egyptian_Hieroglyphs", "Elbasan", + "Elymaic", "Ethiopic", "Extender", "FoldCategory", @@ -10215,6 +10299,7 @@ var stdlib = map[string][]string{ "Myanmar", "N", "Nabataean", + "Nandinagari", "Nd", "New_Tai_Lue", "Newa", @@ -10224,6 +10309,7 @@ var stdlib = map[string][]string{ "Noncharacter_Code_Point", "Number", "Nushu", + "Nyiakeng_Puachue_Hmong", "Ogham", "Ol_Chiki", "Old_Hungarian", @@ -10331,6 +10417,7 @@ var stdlib = map[string][]string{ "Vai", "Variation_Selector", "Version", + "Wancho", "Warang_Citi", "White_Space", "Yi", diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 0c0dbb6a9..2c4527f24 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -1,4 +1,14 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal +import ( + "golang.org/x/tools/internal/gocommand" +) + var GetForTest = func(p interface{}) string { return "" } + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int diff --git a/vendor/golang.org/x/tools/internal/span/parse.go b/vendor/golang.org/x/tools/internal/span/parse.go new file mode 100644 index 000000000..aa17c84ec --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/parse.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Parse returns the location represented by the input. +// Only file paths are accepted, not URIs. +// The returned span will be normalized, and thus if printed may produce a +// different string. +func Parse(input string) Span { + // :0:0#0-0:0#0 + valid := input + var hold, offset int + hadCol := false + suf := rstripSuffix(input) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep == ":" { + valid = suf.remains + hold = suf.num + hadCol = true + suf = rstripSuffix(suf.remains) + } + switch { + case suf.sep == ":": + return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), Point{}) + case suf.sep == "-": + // we have a span, fall out of the case to continue + default: + // separator not valid, rewind to either the : or the start + return New(URIFromPath(valid), NewPoint(hold, 0, offset), Point{}) + } + // only the span form can get here + // at this point we still don't know what the numbers we have mean + // if have not yet seen a : then we might have either a line or a column depending + // on whether start has a column or not + // we build an end point and will fix it later if needed + end := NewPoint(suf.num, hold, offset) + hold, offset = 0, 0 + suf = rstripSuffix(suf.remains) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep != ":" { + // turns out we don't have a span after all, rewind + return New(URIFromPath(valid), end, Point{}) + } + valid = suf.remains + hold = suf.num + suf = rstripSuffix(suf.remains) + if suf.sep != ":" { + // line#offset only + return New(URIFromPath(valid), NewPoint(hold, 0, offset), end) + } + // we have a column, so if end only had one number, it is also the column + if !hadCol { + end = NewPoint(suf.num, end.v.Line, end.v.Offset) + } + return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), end) +} + +type suffix struct { + remains string + sep string + num int +} + +func rstripSuffix(input string) suffix { + if len(input) == 0 { + return suffix{"", "", -1} + } + remains := input + num := -1 + // first see if we have a number at the end + last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) + if last >= 0 && last < len(remains)-1 { + number, err := strconv.ParseInt(remains[last+1:], 10, 64) + if err == nil { + num = int(number) + remains = remains[:last+1] + } + } + // now see if we have a trailing separator + r, w := utf8.DecodeLastRuneInString(remains) + if r != ':' && r != '#' && r == '#' { + return suffix{input, "", -1} + } + remains = remains[:len(remains)-w] + return suffix{remains, string(r), num} +} diff --git a/vendor/golang.org/x/tools/internal/span/span.go b/vendor/golang.org/x/tools/internal/span/span.go new file mode 100644 index 000000000..4d2ad0986 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/span.go @@ -0,0 +1,285 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package span contains support for representing with positions and ranges in +// text files. +package span + +import ( + "encoding/json" + "fmt" + "path" +) + +// Span represents a source code range in standardized form. +type Span struct { + v span +} + +// Point represents a single point within a file. +// In general this should only be used as part of a Span, as on its own it +// does not carry enough information. +type Point struct { + v point +} + +type span struct { + URI URI `json:"uri"` + Start point `json:"start"` + End point `json:"end"` +} + +type point struct { + Line int `json:"line"` + Column int `json:"column"` + Offset int `json:"offset"` +} + +// Invalid is a span that reports false from IsValid +var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} + +var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} + +// Converter is the interface to an object that can convert between line:column +// and offset forms for a single file. +type Converter interface { + //ToPosition converts from an offset to a line:column pair. + ToPosition(offset int) (int, int, error) + //ToOffset converts from a line:column pair to an offset. + ToOffset(line, col int) (int, error) +} + +func New(uri URI, start Point, end Point) Span { + s := Span{v: span{URI: uri, Start: start.v, End: end.v}} + s.v.clean() + return s +} + +func NewPoint(line, col, offset int) Point { + p := Point{v: point{Line: line, Column: col, Offset: offset}} + p.v.clean() + return p +} + +func Compare(a, b Span) int { + if r := CompareURI(a.URI(), b.URI()); r != 0 { + return r + } + if r := comparePoint(a.v.Start, b.v.Start); r != 0 { + return r + } + return comparePoint(a.v.End, b.v.End) +} + +func ComparePoint(a, b Point) int { + return comparePoint(a.v, b.v) +} + +func comparePoint(a, b point) int { + if !a.hasPosition() { + if a.Offset < b.Offset { + return -1 + } + if a.Offset > b.Offset { + return 1 + } + return 0 + } + if a.Line < b.Line { + return -1 + } + if a.Line > b.Line { + return 1 + } + if a.Column < b.Column { + return -1 + } + if a.Column > b.Column { + return 1 + } + return 0 +} + +func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } +func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } +func (s Span) IsValid() bool { return s.v.Start.isValid() } +func (s Span) IsPoint() bool { return s.v.Start == s.v.End } +func (s Span) URI() URI { return s.v.URI } +func (s Span) Start() Point { return Point{s.v.Start} } +func (s Span) End() Point { return Point{s.v.End} } +func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } +func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } + +func (p Point) HasPosition() bool { return p.v.hasPosition() } +func (p Point) HasOffset() bool { return p.v.hasOffset() } +func (p Point) IsValid() bool { return p.v.isValid() } +func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } +func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } +func (p Point) Line() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Line +} +func (p Point) Column() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Column +} +func (p Point) Offset() int { + if !p.v.hasOffset() { + panic(fmt.Errorf("offset not set in %v", p.v)) + } + return p.v.Offset +} + +func (p point) hasPosition() bool { return p.Line > 0 } +func (p point) hasOffset() bool { return p.Offset >= 0 } +func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } +func (p point) isZero() bool { + return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) +} + +func (s *span) clean() { + //this presumes the points are already clean + if !s.End.isValid() || (s.End == point{}) { + s.End = s.Start + } +} + +func (p *point) clean() { + if p.Line < 0 { + p.Line = 0 + } + if p.Column <= 0 { + if p.Line > 0 { + p.Column = 1 + } else { + p.Column = 0 + } + } + if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { + p.Offset = -1 + } +} + +// Format implements fmt.Formatter to print the Location in a standard form. +// The format produced is one that can be read back in using Parse. +func (s Span) Format(f fmt.State, c rune) { + fullForm := f.Flag('+') + preferOffset := f.Flag('#') + // we should always have a uri, simplify if it is file format + //TODO: make sure the end of the uri is unambiguous + uri := string(s.v.URI) + if c == 'f' { + uri = path.Base(uri) + } else if !fullForm { + uri = s.v.URI.Filename() + } + fmt.Fprint(f, uri) + if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { + return + } + // see which bits of start to write + printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) + printLine := s.HasPosition() && (fullForm || !printOffset) + printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) + fmt.Fprint(f, ":") + if printLine { + fmt.Fprintf(f, "%d", s.v.Start.Line) + } + if printColumn { + fmt.Fprintf(f, ":%d", s.v.Start.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.Start.Offset) + } + // start is written, do we need end? + if s.IsPoint() { + return + } + // we don't print the line if it did not change + printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) + fmt.Fprint(f, "-") + if printLine { + fmt.Fprintf(f, "%d", s.v.End.Line) + } + if printColumn { + if printLine { + fmt.Fprint(f, ":") + } + fmt.Fprintf(f, "%d", s.v.End.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.End.Offset) + } +} + +func (s Span) WithPosition(c Converter) (Span, error) { + if err := s.update(c, true, false); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithOffset(c Converter) (Span, error) { + if err := s.update(c, false, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithAll(c Converter) (Span, error) { + if err := s.update(c, true, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s *Span) update(c Converter, withPos, withOffset bool) error { + if !s.IsValid() { + return fmt.Errorf("cannot add information to an invalid span") + } + if withPos && !s.HasPosition() { + if err := s.v.Start.updatePosition(c); err != nil { + return err + } + if s.v.End.Offset == s.v.Start.Offset { + s.v.End = s.v.Start + } else if err := s.v.End.updatePosition(c); err != nil { + return err + } + } + if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { + if err := s.v.Start.updateOffset(c); err != nil { + return err + } + if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { + s.v.End.Offset = s.v.Start.Offset + } else if err := s.v.End.updateOffset(c); err != nil { + return err + } + } + return nil +} + +func (p *point) updatePosition(c Converter) error { + line, col, err := c.ToPosition(p.Offset) + if err != nil { + return err + } + p.Line = line + p.Column = col + return nil +} + +func (p *point) updateOffset(c Converter) error { + offset, err := c.ToOffset(p.Line, p.Column) + if err != nil { + return err + } + p.Offset = offset + return nil +} diff --git a/vendor/golang.org/x/tools/internal/span/token.go b/vendor/golang.org/x/tools/internal/span/token.go new file mode 100644 index 000000000..1710b7779 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/token.go @@ -0,0 +1,182 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "go/token" +) + +// Range represents a source code range in token.Pos form. +// It also carries the FileSet that produced the positions, so that it is +// self contained. +type Range struct { + FileSet *token.FileSet + Start token.Pos + End token.Pos + Converter Converter +} + +// TokenConverter is a Converter backed by a token file set and file. +// It uses the file set methods to work out the conversions, which +// makes it fast and does not require the file contents. +type TokenConverter struct { + fset *token.FileSet + file *token.File +} + +// NewRange creates a new Range from a FileSet and two positions. +// To represent a point pass a 0 as the end pos. +func NewRange(fset *token.FileSet, start, end token.Pos) Range { + return Range{ + FileSet: fset, + Start: start, + End: end, + } +} + +// NewTokenConverter returns an implementation of Converter backed by a +// token.File. +func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { + return &TokenConverter{fset: fset, file: f} +} + +// NewContentConverter returns an implementation of Converter for the +// given file content. +func NewContentConverter(filename string, content []byte) *TokenConverter { + fset := token.NewFileSet() + f := fset.AddFile(filename, -1, len(content)) + f.SetLinesForContent(content) + return &TokenConverter{fset: fset, file: f} +} + +// IsPoint returns true if the range represents a single point. +func (r Range) IsPoint() bool { + return r.Start == r.End +} + +// Span converts a Range to a Span that represents the Range. +// It will fill in all the members of the Span, calculating the line and column +// information. +func (r Range) Span() (Span, error) { + if !r.Start.IsValid() { + return Span{}, fmt.Errorf("start pos is not valid") + } + f := r.FileSet.File(r.Start) + if f == nil { + return Span{}, fmt.Errorf("file not found in FileSet") + } + var s Span + var err error + var startFilename string + startFilename, s.v.Start.Line, s.v.Start.Column, err = position(f, r.Start) + if err != nil { + return Span{}, err + } + s.v.URI = URIFromPath(startFilename) + if r.End.IsValid() { + var endFilename string + endFilename, s.v.End.Line, s.v.End.Column, err = position(f, r.End) + if err != nil { + return Span{}, err + } + // In the presence of line directives, a single File can have sections from + // multiple file names. + if endFilename != startFilename { + return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename) + } + } + s.v.Start.clean() + s.v.End.clean() + s.v.clean() + if r.Converter != nil { + return s.WithOffset(r.Converter) + } + if startFilename != f.Name() { + return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", f.Name(), startFilename) + } + return s.WithOffset(NewTokenConverter(r.FileSet, f)) +} + +func position(f *token.File, pos token.Pos) (string, int, int, error) { + off, err := offset(f, pos) + if err != nil { + return "", 0, 0, err + } + return positionFromOffset(f, off) +} + +func positionFromOffset(f *token.File, offset int) (string, int, int, error) { + if offset > f.Size() { + return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size()) + } + pos := f.Pos(offset) + p := f.Position(pos) + if offset == f.Size() { + return p.Filename, p.Line + 1, 1, nil + } + return p.Filename, p.Line, p.Column, nil +} + +// offset is a copy of the Offset function in go/token, but with the adjustment +// that it does not panic on invalid positions. +func offset(f *token.File, pos token.Pos) (int, error) { + if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { + return 0, fmt.Errorf("invalid pos") + } + return int(pos) - f.Base(), nil +} + +// Range converts a Span to a Range that represents the Span for the supplied +// File. +func (s Span) Range(converter *TokenConverter) (Range, error) { + s, err := s.WithOffset(converter) + if err != nil { + return Range{}, err + } + // go/token will panic if the offset is larger than the file's size, + // so check here to avoid panicking. + if s.Start().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) + } + if s.End().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) + } + return Range{ + FileSet: converter.fset, + Start: converter.file.Pos(s.Start().Offset()), + End: converter.file.Pos(s.End().Offset()), + Converter: converter, + }, nil +} + +func (l *TokenConverter) ToPosition(offset int) (int, int, error) { + _, line, col, err := positionFromOffset(l.file, offset) + return line, col, err +} + +func (l *TokenConverter) ToOffset(line, col int) (int, error) { + if line < 0 { + return -1, fmt.Errorf("line is not valid") + } + lineMax := l.file.LineCount() + 1 + if line > lineMax { + return -1, fmt.Errorf("line is beyond end of file %v", lineMax) + } else if line == lineMax { + if col > 1 { + return -1, fmt.Errorf("column is beyond end of file") + } + // at the end of the file, allowing for a trailing eol + return l.file.Size(), nil + } + pos := lineStart(l.file, line) + if !pos.IsValid() { + return -1, fmt.Errorf("line is not in file") + } + // we assume that column is in bytes here, and that the first byte of a + // line is at column 1 + pos += token.Pos(col - 1) + return offset(l.file, pos) +} diff --git a/vendor/golang.org/x/tools/internal/span/token111.go b/vendor/golang.org/x/tools/internal/span/token111.go new file mode 100644 index 000000000..bf7a5406b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/token111.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package span + +import ( + "go/token" +) + +// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go +// versions <= 1.11, we borrow logic from the analysisutil package. +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + // Use binary search to find the start offset of this line. + + min := 0 // inclusive + max := f.Size() // exclusive + for { + offset := (min + max) / 2 + pos := f.Pos(offset) + posn := f.Position(pos) + if posn.Line == line { + return pos - (token.Pos(posn.Column) - 1) + } + + if min+1 >= max { + return token.NoPos + } + + if posn.Line < line { + min = offset + } else { + max = offset + } + } +} diff --git a/vendor/golang.org/x/tools/internal/span/token112.go b/vendor/golang.org/x/tools/internal/span/token112.go new file mode 100644 index 000000000..017aec9c1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/token112.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package span + +import ( + "go/token" +) + +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + return f.LineStart(line) +} diff --git a/vendor/golang.org/x/tools/internal/span/uri.go b/vendor/golang.org/x/tools/internal/span/uri.go new file mode 100644 index 000000000..78e71fe45 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/uri.go @@ -0,0 +1,169 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +const fileScheme = "file" + +// URI represents the full URI for a file. +type URI string + +func (uri URI) IsFile() bool { + return strings.HasPrefix(string(uri), "file://") +} + +// Filename returns the file path for the given URI. +// It is an error to call this on a URI that is not a valid filename. +func (uri URI) Filename() string { + filename, err := filename(uri) + if err != nil { + panic(err) + } + return filepath.FromSlash(filename) +} + +func filename(uri URI) (string, error) { + if uri == "" { + return "", nil + } + u, err := url.ParseRequestURI(string(uri)) + if err != nil { + return "", err + } + if u.Scheme != fileScheme { + return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) + } + // If the URI is a Windows URI, we trim the leading "/" and lowercase + // the drive letter, which will never be case sensitive. + if isWindowsDriveURIPath(u.Path) { + u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:] + } + return u.Path, nil +} + +func URIFromURI(s string) URI { + if !strings.HasPrefix(s, "file://") { + return URI(s) + } + + if !strings.HasPrefix(s, "file:///") { + // VS Code sends URLs with only two slashes, which are invalid. golang/go#39789. + s = "file:///" + s[len("file://"):] + } + // Even though the input is a URI, it may not be in canonical form. VS Code + // in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize. + path, err := url.PathUnescape(s[len("file://"):]) + if err != nil { + panic(err) + } + + // File URIs from Windows may have lowercase drive letters. + // Since drive letters are guaranteed to be case insensitive, + // we change them to uppercase to remain consistent. + // For example, file:///c:/x/y/z becomes file:///C:/x/y/z. + if isWindowsDriveURIPath(path) { + path = path[:1] + strings.ToUpper(string(path[1])) + path[2:] + } + u := url.URL{Scheme: fileScheme, Path: path} + return URI(u.String()) +} + +func CompareURI(a, b URI) int { + if equalURI(a, b) { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func equalURI(a, b URI) bool { + if a == b { + return true + } + // If we have the same URI basename, we may still have the same file URIs. + if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { + return false + } + fa, err := filename(a) + if err != nil { + return false + } + fb, err := filename(b) + if err != nil { + return false + } + // Stat the files to check if they are equal. + infoa, err := os.Stat(filepath.FromSlash(fa)) + if err != nil { + return false + } + infob, err := os.Stat(filepath.FromSlash(fb)) + if err != nil { + return false + } + return os.SameFile(infoa, infob) +} + +// URIFromPath returns a span URI for the supplied file path. +// It will always have the file scheme. +func URIFromPath(path string) URI { + if path == "" { + return "" + } + // Handle standard library paths that contain the literal "$GOROOT". + // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. + const prefix = "$GOROOT" + if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { + suffix := path[len(prefix):] + path = runtime.GOROOT() + suffix + } + if !isWindowsDrivePath(path) { + if abs, err := filepath.Abs(path); err == nil { + path = abs + } + } + // Check the file path again, in case it became absolute. + if isWindowsDrivePath(path) { + path = "/" + strings.ToUpper(string(path[0])) + path[1:] + } + path = filepath.ToSlash(path) + u := url.URL{ + Scheme: fileScheme, + Path: path, + } + return URI(u.String()) +} + +// isWindowsDrivePath returns true if the file path is of the form used by +// Windows. We check if the path begins with a drive letter, followed by a ":". +// For example: C:/x/y/z. +func isWindowsDrivePath(path string) bool { + if len(path) < 3 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see golang/go#6027). We check if the URI path has a drive prefix (e.g. "/C:"). +func isWindowsDriveURIPath(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} diff --git a/vendor/golang.org/x/tools/internal/span/utf16.go b/vendor/golang.org/x/tools/internal/span/utf16.go new file mode 100644 index 000000000..561b3fa50 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/utf16.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "unicode/utf16" + "unicode/utf8" +) + +// ToUTF16Column calculates the utf16 column expressed by the point given the +// supplied file contents. +// This is used to convert from the native (always in bytes) column +// representation and the utf16 counts used by some editors. +func ToUTF16Column(p Point, content []byte) (int, error) { + if content == nil { + return -1, fmt.Errorf("ToUTF16Column: missing content") + } + if !p.HasPosition() { + return -1, fmt.Errorf("ToUTF16Column: point is missing position") + } + if !p.HasOffset() { + return -1, fmt.Errorf("ToUTF16Column: point is missing offset") + } + offset := p.Offset() // 0-based + colZero := p.Column() - 1 // 0-based + if colZero == 0 { + // 0-based column 0, so it must be chr 1 + return 1, nil + } else if colZero < 0 { + return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) + } + // work out the offset at the start of the line using the column + lineOffset := offset - colZero + if lineOffset < 0 || offset > len(content) { + return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) + } + // Use the offset to pick out the line start. + // This cannot panic: offset > len(content) and lineOffset < offset. + start := content[lineOffset:] + + // Now, truncate down to the supplied column. + start = start[:colZero] + + // and count the number of utf16 characters + // in theory we could do this by hand more efficiently... + return len(utf16.Encode([]rune(string(start)))) + 1, nil +} + +// FromUTF16Column advances the point by the utf16 character offset given the +// supplied line contents. +// This is used to convert from the utf16 counts used by some editors to the +// native (always in bytes) column representation. +func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { + if !p.HasOffset() { + return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") + } + // if chr is 1 then no adjustment needed + if chr <= 1 { + return p, nil + } + if p.Offset() >= len(content) { + return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) + } + remains := content[p.Offset():] + // scan forward the specified number of characters + for count := 1; count < chr; count++ { + if len(remains) <= 0 { + return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") + } + r, w := utf8.DecodeRune(remains) + if r == '\n' { + // Per the LSP spec: + // + // > If the character value is greater than the line length it + // > defaults back to the line length. + break + } + remains = remains[w:] + if r >= 0x10000 { + // a two point rune + count++ + // if we finished in a two point rune, do not advance past the first + if count >= chr { + break + } + } + p.v.Column += w + p.v.Offset += w + } + return p, nil +} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv.go b/vendor/golang.org/x/tools/internal/testenv/testenv.go deleted file mode 100644 index 0cc90d26a..000000000 --- a/vendor/golang.org/x/tools/internal/testenv/testenv.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package testenv contains helper functions for skipping tests -// based on which tools are present in the environment. -package testenv - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "runtime" - "strings" - "sync" -) - -// Testing is an abstraction of a *testing.T. -type Testing interface { - Skipf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) -} - -type helperer interface { - Helper() -} - -// packageMainIsDevel reports whether the module containing package main -// is a development version (if module information is available). -// -// Builds in GOPATH mode and builds that lack module information are assumed to -// be development versions. -var packageMainIsDevel = func() bool { return true } - -var checkGoGoroot struct { - once sync.Once - err error -} - -func hasTool(tool string) error { - _, err := exec.LookPath(tool) - if err != nil { - return err - } - - switch tool { - case "patch": - // check that the patch tools supports the -o argument - temp, err := ioutil.TempFile("", "patch-test") - if err != nil { - return err - } - temp.Close() - defer os.Remove(temp.Name()) - cmd := exec.Command(tool, "-o", temp.Name()) - if err := cmd.Run(); err != nil { - return err - } - - case "go": - checkGoGoroot.once.Do(func() { - // Ensure that the 'go' command found by exec.LookPath is from the correct - // GOROOT. Otherwise, 'some/path/go test ./...' will test against some - // version of the 'go' binary other than 'some/path/go', which is almost - // certainly not what the user intended. - out, err := exec.Command(tool, "env", "GOROOT").CombinedOutput() - if err != nil { - checkGoGoroot.err = err - return - } - GOROOT := strings.TrimSpace(string(out)) - if GOROOT != runtime.GOROOT() { - checkGoGoroot.err = fmt.Errorf("'go env GOROOT' does not match runtime.GOROOT:\n\tgo env: %s\n\tGOROOT: %s", GOROOT, runtime.GOROOT()) - } - }) - if checkGoGoroot.err != nil { - return checkGoGoroot.err - } - } - - return nil -} - -func allowMissingTool(tool string) bool { - if runtime.GOOS == "android" { - // Android builds generally run tests on a separate machine from the build, - // so don't expect any external tools to be available. - return true - } - - switch tool { - case "go": - if os.Getenv("GO_BUILDER_NAME") == "illumos-amd64-joyent" { - // Work around a misconfigured builder (see https://golang.org/issue/33950). - return true - } - case "diff": - if os.Getenv("GO_BUILDER_NAME") != "" { - return true - } - case "patch": - if os.Getenv("GO_BUILDER_NAME") != "" { - return true - } - } - - // If a developer is actively working on this test, we expect them to have all - // of its dependencies installed. However, if it's just a dependency of some - // other module (for example, being run via 'go test all'), we should be more - // tolerant of unusual environments. - return !packageMainIsDevel() -} - -// NeedsTool skips t if the named tool is not present in the path. -func NeedsTool(t Testing, tool string) { - if t, ok := t.(helperer); ok { - t.Helper() - } - err := hasTool(tool) - if err == nil { - return - } - if allowMissingTool(tool) { - t.Skipf("skipping because %s tool not available: %v", tool, err) - } else { - t.Fatalf("%s tool not available: %v", tool, err) - } -} - -// NeedsGoPackages skips t if the go/packages driver (or 'go' tool) implied by -// the current process environment is not present in the path. -func NeedsGoPackages(t Testing) { - if t, ok := t.(helperer); ok { - t.Helper() - } - - tool := os.Getenv("GOPACKAGESDRIVER") - switch tool { - case "off": - // "off" forces go/packages to use the go command. - tool = "go" - case "": - if _, err := exec.LookPath("gopackagesdriver"); err == nil { - tool = "gopackagesdriver" - } else { - tool = "go" - } - } - - NeedsTool(t, tool) -} - -// NeedsGoPackagesEnv skips t if the go/packages driver (or 'go' tool) implied -// by env is not present in the path. -func NeedsGoPackagesEnv(t Testing, env []string) { - if t, ok := t.(helperer); ok { - t.Helper() - } - - for _, v := range env { - if strings.HasPrefix(v, "GOPACKAGESDRIVER=") { - tool := strings.TrimPrefix(v, "GOPACKAGESDRIVER=") - if tool == "off" { - NeedsTool(t, "go") - } else { - NeedsTool(t, tool) - } - return - } - } - - NeedsGoPackages(t) -} - -// ExitIfSmallMachine emits a helpful diagnostic and calls os.Exit(0) if the -// current machine is a builder known to have scarce resources. -// -// It should be called from within a TestMain function. -func ExitIfSmallMachine() { - if os.Getenv("GO_BUILDER_NAME") == "linux-arm" { - fmt.Fprintln(os.Stderr, "skipping test: linux-arm builder lacks sufficient memory (https://golang.org/issue/32834)") - os.Exit(0) - } -} diff --git a/vendor/golang.org/x/tools/internal/testenv/testenv_112.go b/vendor/golang.org/x/tools/internal/testenv/testenv_112.go deleted file mode 100644 index b25846c20..000000000 --- a/vendor/golang.org/x/tools/internal/testenv/testenv_112.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.12 - -package testenv - -import "runtime/debug" - -func packageMainIsDevelModule() bool { - info, ok := debug.ReadBuildInfo() - if !ok { - // Most test binaries currently lack build info, but this should become more - // permissive once https://golang.org/issue/33976 is fixed. - return true - } - - // Note: info.Main.Version describes the version of the module containing - // package main, not the version of “the main module”. - // See https://golang.org/issue/33975. - return info.Main.Version == "(devel)" -} - -func init() { - packageMainIsDevel = packageMainIsDevelModule -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 000000000..a5bb408e2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,28 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go index 74c1c93ec..829862ddf 100644 --- a/vendor/golang.org/x/xerrors/fmt.go +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -7,10 +7,14 @@ package xerrors import ( "fmt" "strings" + "unicode" + "unicode/utf8" "golang.org/x/xerrors/internal" ) +const percentBangString = "%!" + // Errorf formats according to a format specifier and returns the string as a // value that satisfies error. // @@ -18,29 +22,71 @@ import ( // formatted with additional detail enabled. If the last argument is an error // the returned error's Format method will return it if the format string ends // with ": %s", ": %v", or ": %w". If the last argument is an error and the -// format string ends with ": %w", the returned error implements Wrapper -// with an Unwrap method returning it. +// format string ends with ": %w", the returned error implements an Unwrap +// method returning it. +// +// If the format specifier includes a %w verb with an error operand in a +// position other than at the end, the returned error will still implement an +// Unwrap method returning the operand, but the error's Format method will not +// return the wrapped error. +// +// It is invalid to include more than one %w verb or to supply it with an +// operand that does not implement the error interface. The %w verb is otherwise +// a synonym for %v. func Errorf(format string, a ...interface{}) error { - err, wrap := lastError(format, a) format = formatPlusW(format) - if err == nil { - return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. + wrap := strings.HasSuffix(format, ": %w") + idx, format2, ok := parsePercentW(format) + percentWElsewhere := !wrap && idx >= 0 + if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { + err := errorAt(a, len(a)-1) + if err == nil { + return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + } + // TODO: this is not entirely correct. The error value could be + // printed elsewhere in format if it mixes numbered with unnumbered + // substitutions. With relatively small changes to doPrintf we can + // have it optionally ignore extra arguments and pass the argument + // list in its entirety. + msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + if wrap { + return &wrapError{msg, err, frame} + } + return &noWrapError{msg, err, frame} + } + // Support %w anywhere. + // TODO: don't repeat the wrapped error's message when %w occurs in the middle. + msg := fmt.Sprintf(format2, a...) + if idx < 0 { + return &noWrapError{msg, nil, Caller(1)} + } + err := errorAt(a, idx) + if !ok || err == nil { + // Too many %ws or argument of %w is not an error. Approximate the Go + // 1.13 fmt.Errorf message. + return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} } - - // TODO: this is not entirely correct. The error value could be - // printed elsewhere in format if it mixes numbered with unnumbered - // substitutions. With relatively small changes to doPrintf we can - // have it optionally ignore extra arguments and pass the argument - // list in its entirety. - msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) frame := Frame{} if internal.EnableTrace { frame = Caller(1) } - if wrap { - return &wrapError{msg, err, frame} + return &wrapError{msg, err, frame} +} + +func errorAt(args []interface{}, i int) error { + if i < 0 || i >= len(args) { + return nil } - return &noWrapError{msg, err, frame} + err, ok := args[i].(error) + if !ok { + return nil + } + return err } // formatPlusW is used to avoid the vet check that will barf at %w. @@ -48,24 +94,56 @@ func formatPlusW(s string) string { return s } -func lastError(format string, a []interface{}) (err error, wrap bool) { - wrap = strings.HasSuffix(format, ": %w") - if !wrap && - !strings.HasSuffix(format, ": %s") && - !strings.HasSuffix(format, ": %v") { - return nil, false - } - - if len(a) == 0 { - return nil, false +// Return the index of the only %w in format, or -1 if none. +// Also return a rewritten format string with %w replaced by %v, and +// false if there is more than one %w. +// TODO: handle "%[N]w". +func parsePercentW(format string) (idx int, newFormat string, ok bool) { + // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. + idx = -1 + ok = true + n := 0 + sz := 0 + var isW bool + for i := 0; i < len(format); i += sz { + if format[i] != '%' { + sz = 1 + continue + } + // "%%" is not a format directive. + if i+1 < len(format) && format[i+1] == '%' { + sz = 2 + continue + } + sz, isW = parsePrintfVerb(format[i:]) + if isW { + if idx >= 0 { + ok = false + } else { + idx = n + } + // "Replace" the last character, the 'w', with a 'v'. + p := i + sz - 1 + format = format[:p] + "v" + format[p+1:] + } + n++ } + return idx, format, ok +} - err, ok := a[len(a)-1].(error) - if !ok { - return nil, false +// Parse the printf verb starting with a % at s[0]. +// Return how many bytes it occupies and whether the verb is 'w'. +func parsePrintfVerb(s string) (int, bool) { + // Assume only that the directive is a sequence of non-letters followed by a single letter. + sz := 0 + var r rune + for i := 1; i < len(s); i += sz { + r, sz = utf8.DecodeRuneInString(s[i:]) + if unicode.IsLetter(r) { + return i + sz, r == 'w' + } } - - return err, wrap + return len(s), false } type noWrapError struct { diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS index f73b72574..f07029059 100644 --- a/vendor/google.golang.org/api/AUTHORS +++ b/vendor/google.golang.org/api/AUTHORS @@ -8,3 +8,4 @@ # Please keep the list sorted. Google Inc. +LightStep Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS index fe55ebff0..788677b8f 100644 --- a/vendor/google.golang.org/api/CONTRIBUTORS +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -45,6 +45,7 @@ Jason Hall Johan Euphrosine Kostik Shtoyk Kunpei Sakai +Matthew Dolan Matthew Whisenhunt Michael McGreevy Nick Craig-Wood diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index ab5376762..d1784f1a3 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -1,4 +1,4 @@ -// Copyright 2011 Google Inc. All rights reserved. +// Copyright 2011 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -16,7 +16,7 @@ import ( "net/url" "strings" - "google.golang.org/api/googleapi/internal/uritemplates" + "google.golang.org/api/internal/third_party/uritemplates" ) // ContentTyper is an interface for Readers which know (or would like @@ -54,7 +54,7 @@ const ( // DefaultUploadChunkSize is the default chunk size to use for resumable // uploads if not specified by the user. - DefaultUploadChunkSize = 8 * 1024 * 1024 + DefaultUploadChunkSize = 16 * 1024 * 1024 // MinUploadChunkSize is the minimum chunk size that can be used for // resumable uploads. All user-specified chunk sizes must be multiple of @@ -69,6 +69,8 @@ type Error struct { // Message is the server response message and is only populated when // explicitly referenced by the JSON server response. Message string `json:"message"` + // Details provide more context to an error. + Details []interface{} `json:"details"` // Body is the raw response returned by the server. // It is often but not always JSON, depending on how the request fails. Body string @@ -95,6 +97,16 @@ func (e *Error) Error() string { if e.Message != "" { fmt.Fprintf(&buf, "%s", e.Message) } + if len(e.Details) > 0 { + var detailBuf bytes.Buffer + enc := json.NewEncoder(&detailBuf) + enc.SetIndent("", " ") + if err := enc.Encode(e.Details); err == nil { + fmt.Fprint(&buf, "\nDetails:") + fmt.Fprintf(&buf, "\n%s", detailBuf.String()) + + } + } if len(e.Errors) == 0 { return strings.TrimSpace(buf.String()) } @@ -256,14 +268,22 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions { // "http://www.golang.org/topics/myproject/mytopic". It strips all parent // references (e.g. ../..) as well as anything after the host // (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). +// +// ResolveRelative panics if either basestr or relstr is not able to be parsed. func ResolveRelative(basestr, relstr string) string { - u, _ := url.Parse(basestr) + u, err := url.Parse(basestr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", basestr)) + } afterColonPath := "" if i := strings.IndexRune(relstr, ':'); i > 0 { afterColonPath = relstr[i+1:] relstr = relstr[:i] } - rel, _ := url.Parse(relstr) + rel, err := url.Parse(relstr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", relstr)) + } u = u.ResolveReference(rel) us := u.String() if afterColonPath != "" { @@ -331,7 +351,7 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { } // A Field names a field to be retrieved with a partial response. -// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance // // Partial responses can dramatically reduce the amount of data that must be sent to your application. // In order to request partial responses, you can specify the full list of fields @@ -348,9 +368,6 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool { // // svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() // -// More information about field formatting can be found here: -// https://developers.google.com/+/api/#fields-syntax -// // Another way to find field names is through the Google API explorer: // https://developers.google.com/apis-explorer/#p/ type Field string diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE b/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE deleted file mode 100644 index de9c88cb6..000000000 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go index eca1ea250..61720ec2e 100644 --- a/vendor/google.golang.org/api/googleapi/transport/apikey.go +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -1,9 +1,13 @@ -// Copyright 2012 Google Inc. All rights reserved. +// Copyright 2012 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package transport contains HTTP transports used to make // authenticated API requests. +// +// This package is DEPRECATED. Users should instead use, +// +// service, err := NewService(..., option.WithAPIKey(...)) package transport import ( @@ -13,6 +17,8 @@ import ( // APIKey is an HTTP Transport which wraps an underlying transport and // appends an API Key "key" parameter to the URL of outgoing requests. +// +// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. type APIKey struct { // Key is the API Key to set on requests. Key string diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go index a280e3021..fabf74d50 100644 --- a/vendor/google.golang.org/api/googleapi/types.go +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -1,4 +1,4 @@ -// Copyright 2013 Google Inc. All rights reserved. +// Copyright 2013 Google LLC. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 000000000..fedcce15b --- /dev/null +++ b/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 69b8659fd..75e9445e1 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -1,16 +1,6 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package internal @@ -100,3 +90,16 @@ func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) } return google.JWTAccessTokenSourceFromJSON(data, audience) } + +// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. +// +// NOTE(cbro): consider promoting this to a field on google.Credentials. +func QuotaProjectFromCreds(cred *google.Credentials) string { + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(cred.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/internal/gensupport/buffer.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/buffer.go rename to vendor/google.golang.org/api/internal/gensupport/buffer.go diff --git a/vendor/google.golang.org/api/gensupport/doc.go b/vendor/google.golang.org/api/internal/gensupport/doc.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/doc.go rename to vendor/google.golang.org/api/internal/gensupport/doc.go diff --git a/vendor/google.golang.org/api/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/json.go rename to vendor/google.golang.org/api/internal/gensupport/json.go diff --git a/vendor/google.golang.org/api/gensupport/jsonfloat.go b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go similarity index 65% rename from vendor/google.golang.org/api/gensupport/jsonfloat.go rename to vendor/google.golang.org/api/internal/gensupport/jsonfloat.go index 837785081..13c2f9302 100644 --- a/vendor/google.golang.org/api/gensupport/jsonfloat.go +++ b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package gensupport diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go similarity index 98% rename from vendor/google.golang.org/api/gensupport/media.go rename to vendor/google.golang.org/api/internal/gensupport/media.go index 0ef96b3f1..0288cc304 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -290,6 +290,9 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB fb := readerFunc(body) fm := readerFunc(media) combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + toCleanup := []io.Closer{ + combined, + } if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { rb := ioutil.NopCloser(fb()) @@ -299,10 +302,16 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB mimeBoundary = params["boundary"] } r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) + toCleanup = append(toCleanup, r) return r, nil } } - cleanup = func() { combined.Close() } + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } reqHeaders.Set("Content-Type", ctype) body = combined } diff --git a/vendor/google.golang.org/api/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go similarity index 100% rename from vendor/google.golang.org/api/gensupport/params.go rename to vendor/google.golang.org/api/internal/gensupport/params.go diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go similarity index 88% rename from vendor/google.golang.org/api/gensupport/resumable.go rename to vendor/google.golang.org/api/internal/gensupport/resumable.go index e67ccd9a6..edc87ec24 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -28,6 +28,8 @@ var ( backoff = func() Backoff { return &gax.Backoff{Initial: 100 * time.Millisecond} } + // isRetryable is a platform-specific hook, specified in retryable_linux.go + syscallRetryable func(error) bool = func(err error) bool { return false } ) const ( @@ -160,21 +162,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { - var shouldRetry = func(status int, err error) bool { - if 500 <= status && status <= 599 { - return true - } - if status == statusTooManyRequests { - return true - } - if err == io.ErrUnexpectedEOF { - return true - } - if err, ok := err.(interface{ Temporary() bool }); ok { - return err.Temporary() - } - return false - } // There are a couple of cases where it's possible for err and resp to both // be non-nil. However, we expose a simpler contract to our callers: exactly @@ -239,3 +226,33 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err return prepareReturn(resp, err) } } + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, following guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped + // errors. + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go new file mode 100644 index 000000000..fed998b5d --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package gensupport + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go similarity index 54% rename from vendor/google.golang.org/api/gensupport/send.go rename to vendor/google.golang.org/api/internal/gensupport/send.go index 579939309..3338c8d19 100644 --- a/vendor/google.golang.org/api/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "net/http" + "time" ) // Hook is the type of a function that is called once before each HTTP request @@ -77,6 +78,90 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re return resp, err } +// SendRequestWithRetry sends a single HTTP request using the given client, +// with retries if a retryable error is returned. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request with retry. + resp, err := sendAndRetry(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + var resp *http.Response + var err error + + // Loop to retry the request, up to the context deadline. + var pause time.Duration + bo := backoff() + + for { + select { + case <-ctx.Done(): + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err == nil { + err = ctx.Err() + } + return resp, err + case <-time.After(pause): + } + + resp, err = client.Do(req.WithContext(ctx)) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we can retry the request. A retry can only be done if the error + // is retryable and the request body can be re-created using GetBody (this + // will not be possible if the body was unbuffered). + if req.GetBody == nil || !shouldRetry(status, err) { + break + } + var errBody error + req.Body, errBody = req.GetBody() + if errBody != nil { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + return resp, err +} + // DecodeResponse decodes the body of res into target. If there is no body, // target is unchanged. func DecodeResponse(target interface{}, res *http.Response) error { diff --git a/vendor/google.golang.org/api/internal/gensupport/version.go b/vendor/google.golang.org/api/internal/gensupport/version.go new file mode 100644 index 000000000..23f6aa24e --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/version.go @@ -0,0 +1,53 @@ +// Copyright 2020 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "runtime" + "strings" + "unicode" +) + +// GoVersion returns the Go runtime version. The returned string +// has no whitespace. +func GoVersion() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/google.golang.org/api/internal/pool.go b/vendor/google.golang.org/api/internal/pool.go deleted file mode 100644 index a4426dcb7..000000000 --- a/vendor/google.golang.org/api/internal/pool.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "errors" - - "google.golang.org/grpc/naming" -) - -// PoolResolver provides a fixed list of addresses to load balance between -// and does not provide further updates. -type PoolResolver struct { - poolSize int - dialOpt *DialSettings - ch chan []*naming.Update -} - -// NewPoolResolver returns a PoolResolver -// This is an EXPERIMENTAL API and may be changed or removed in the future. -func NewPoolResolver(size int, o *DialSettings) *PoolResolver { - return &PoolResolver{poolSize: size, dialOpt: o} -} - -// Resolve returns a Watcher for the endpoint defined by the DialSettings -// provided to NewPoolResolver. -func (r *PoolResolver) Resolve(target string) (naming.Watcher, error) { - if r.dialOpt.Endpoint == "" { - return nil, errors.New("no endpoint configured") - } - addrs := make([]*naming.Update, 0, r.poolSize) - for i := 0; i < r.poolSize; i++ { - addrs = append(addrs, &naming.Update{Op: naming.Add, Addr: r.dialOpt.Endpoint, Metadata: i}) - } - r.ch = make(chan []*naming.Update, 1) - r.ch <- addrs - return r, nil -} - -// Next returns a static list of updates on the first call, -// and blocks indefinitely until Close is called on subsequent calls. -func (r *PoolResolver) Next() ([]*naming.Update, error) { - return <-r.ch, nil -} - -// Close releases resources associated with the pool and causes Next to unblock. -func (r *PoolResolver) Close() { - close(r.ch) -} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 062301c65..f435519de 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -1,21 +1,12 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package internal supports the options and transport packages. package internal import ( + "crypto/tls" "errors" "net/http" @@ -27,19 +18,26 @@ import ( // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { - Endpoint string - Scopes []string - TokenSource oauth2.TokenSource - Credentials *google.Credentials - CredentialsFile string // if set, Token Source is ignored. - CredentialsJSON []byte - UserAgent string - APIKey string - Audiences []string - HTTPClient *http.Client - GRPCDialOpts []grpc.DialOption - GRPCConn *grpc.ClientConn - NoAuth bool + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} // Google API system parameters. For more information please read: // https://cloud.google.com/apis/docs/system-parameters @@ -79,6 +77,12 @@ func (ds *DialSettings) Validate() error { if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { return errors.New("multiple credential options provided") } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } if ds.HTTPClient != nil && ds.GRPCConn != nil { return errors.New("WithHTTPClient is incompatible with WithGRPCConn") } @@ -91,6 +95,12 @@ func (ds *DialSettings) Validate() error { if ds.HTTPClient != nil && ds.RequestReason != "" { return errors.New("WithHTTPClient is incompatible with RequestReason") } + if ds.HTTPClient != nil && ds.ClientCertSource != nil { + return errors.New("WithHTTPClient is incompatible with WithClientCertSource") + } + if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { + return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") + } return nil } diff --git a/vendor/github.com/keybase/go-crypto/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE similarity index 96% rename from vendor/github.com/keybase/go-crypto/LICENSE rename to vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE index 6a66aea5e..7109c6ef9 100644 --- a/vendor/github.com/keybase/go-crypto/LICENSE +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2013 Joshua Tacoma. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA new file mode 100644 index 000000000..c7f86fcd5 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA @@ -0,0 +1,14 @@ +name: "uritemplates" +description: + "Package uritemplates is a level 4 implementation of RFC 6570 (URI " + "Template, http://tools.ietf.org/html/rfc6570)." + +third_party { + url { + type: GIT + value: "https://github.com/jtacoma/uritemplates" + } + version: "0.1" + last_upgrade_date { year: 2014 month: 8 day: 18 } + license_type: NOTICE +} diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go similarity index 98% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go index 63bf05383..8c27d19d7 100644 --- a/vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go @@ -191,7 +191,7 @@ func parseTerm(term string) (result templateTerm, err error) { err = errors.New("not a valid name: " + result.name) } if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") + err = errors.New("both explode and prefix modifiers on same term") } return result, err } diff --git a/vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go similarity index 100% rename from vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go rename to vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go diff --git a/vendor/google.golang.org/api/iterator/iterator.go b/vendor/google.golang.org/api/iterator/iterator.go index 3c8ea7732..1799b5d9a 100644 --- a/vendor/google.golang.org/api/iterator/iterator.go +++ b/vendor/google.golang.org/api/iterator/iterator.go @@ -1,16 +1,6 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package iterator provides support for standard Google API iterators. // See https://github.com/GoogleCloudPlatform/gcloud-golang/wiki/Iterator-Guidelines. @@ -82,17 +72,23 @@ type PageInfo struct { // It is not a stable interface. var NewPageInfo = newPageInfo -// If an iterator can support paging, its iterator-creating method should call -// this (via the NewPageInfo variable above). +// newPageInfo creates and returns a PageInfo and a next func. If an iterator can +// support paging, its iterator-creating method should call this. Each time the +// iterator's Next is called, it should call the returned next fn to determine +// whether a next item exists, and if so it should pop an item from the buffer. // -// The fetch, bufLen and takeBuf arguments provide access to the -// iterator's internal slice of buffered items. They behave as described in -// PageInfo, above. +// The fetch, bufLen and takeBuf arguments provide access to the iterator's +// internal slice of buffered items. They behave as described in PageInfo, above. // // The return value is the PageInfo.next method bound to the returned PageInfo value. // (Returning it avoids exporting PageInfo.next.) -func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (*PageInfo, func() error) { - pi := &PageInfo{ +// +// Note: the returned PageInfo and next fn do not remove items from the buffer. +// It is up to the iterator using these to remove items from the buffer: +// typically by performing a pop in its Next. If items are not removed from the +// buffer, memory may grow unbounded. +func newPageInfo(fetch func(int, string) (string, error), bufLen func() int, takeBuf func() interface{}) (pi *PageInfo, next func() error) { + pi = &PageInfo{ fetch: fetch, bufLen: bufLen, takeBuf: takeBuf, diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go index 0636a8294..d06f918b0 100644 --- a/vendor/google.golang.org/api/option/credentials_go19.go +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build go1.9 diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go index 74d3a4b5b..0ce107a62 100644 --- a/vendor/google.golang.org/api/option/credentials_notgo19.go +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -1,16 +1,6 @@ -// Copyright 2018 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // +build !go1.9 diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go new file mode 100644 index 000000000..ff5b530cf --- /dev/null +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -0,0 +1,40 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internaloption contains options used internally by Google client code. +package internaloption + +import ( + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +type defaultEndpointOption string + +func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpoint = string(o) +} + +// WithDefaultEndpoint is an option that indicates the default endpoint. +// +// It should only be used internally by generated clients. +// +// This is similar to WithEndpoint, but allows us to determine whether the user has overriden the default endpoint. +func WithDefaultEndpoint(url string) option.ClientOption { + return defaultEndpointOption(url) +} + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +// +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 0a1c2dba9..b7c40d60a 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -1,21 +1,12 @@ -// Copyright 2017 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package option contains options for Google API clients. package option import ( + "crypto/tls" "net/http" "golang.org/x/oauth2" @@ -124,7 +115,7 @@ func (w withHTTPClient) Apply(o *internal.DialSettings) { } // WithGRPCConn returns a ClientOption that specifies the gRPC client -// connection to use as the basis of communications. This option many only be +// connection to use as the basis of communications. This option may only be // used with services that support gRPC as their communication transport. When // used, the WithGRPCConn option takes precedent over all other supplied // options. @@ -152,6 +143,7 @@ func (w withGRPCDialOption) Apply(o *internal.DialSettings) { // WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC // connections that requests will be balanced between. +// // This is an EXPERIMENTAL API and may be changed or removed in the future. func WithGRPCConnectionPool(size int) ClientOption { return withGRPCConnectionPool(size) @@ -160,8 +152,7 @@ func WithGRPCConnectionPool(size int) ClientOption { type withGRPCConnectionPool int func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { - balancer := grpc.RoundRobin(internal.NewPoolResolver(int(w), o)) - o.GRPCDialOpts = append(o.GRPCDialOpts, grpc.WithBalancer(balancer)) + o.GRPCConnPoolSize = int(w) } // WithAPIKey returns a ClientOption that specifies an API key to be used @@ -233,3 +224,48 @@ type withRequestReason string func (w withRequestReason) Apply(o *internal.DialSettings) { o.RequestReason = string(w) } + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabled{} +} + +type withTelemetryDisabled struct{} + +func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} + +// ClientCertSource is a function that returns a TLS client certificate to be used +// when opening TLS connections. +// +// It follows the same semantics as crypto/tls.Config.GetClientCertificate. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// WithClientCertSource returns a ClientOption that specifies a +// callback function for obtaining a TLS client certificate. +// +// This option is used for supporting mTLS authentication, where the +// server validates the client certifcate when establishing a connection. +// +// The callback function will be invoked whenever the server requests a +// certificate from the client. Implementations of the callback function +// should try to ensure that a valid certificate can be repeatedly returned +// on demand for the entire life cycle of the transport client. If a nil +// Certificate is returned (i.e. no Certificate can be obtained), an error +// should be returned. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithClientCertSource(s ClientCertSource) ClientOption { + return withClientCertSource{s} +} + +type withClientCertSource struct{ s ClientCertSource } + +func (w withClientCertSource) Apply(o *internal.DialSettings) { + o.ClientCertSource = w.s +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 2a0d2746f..e78776b2b 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -21,12 +21,12 @@ } }, "basePath": "/storage/v1/", - "baseUrl": "https://www.googleapis.com/storage/v1/", + "baseUrl": "https://storage.googleapis.com/storage/v1/", "batchPath": "batch/storage/v1", "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"9eZ1uxVRThTDhLJCZHhqs3eQWz4/m18VxIxuaQHJN-C1B3-yQYvta24\"", + "etag": "\"u9GIe6H63LSGq-9_t39K2Zx_EAc/5Ir-e9ddNPcr5skzvRsSnJlvTYg\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -450,6 +450,13 @@ "required": true, "type": "string" }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, "provisionalUserProject": { "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", "location": "query", @@ -1774,7 +1781,7 @@ "type": "string" }, "kmsKeyName": { - "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", "location": "query", "type": "string" }, @@ -1819,6 +1826,11 @@ "required": true, "type": "string" }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, "destinationObject": { "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", "location": "path", @@ -2303,6 +2315,11 @@ "location": "query", "type": "string" }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2344,6 +2361,11 @@ "location": "query", "type": "string" }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2875,6 +2897,11 @@ "location": "query", "type": "string" }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2916,6 +2943,11 @@ "location": "query", "type": "string" }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -3054,6 +3086,7 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", "https://www.googleapis.com/auth/devstorage.read_only" ] }, @@ -3196,8 +3229,8 @@ } } }, - "revision": "20190624", - "rootUrl": "https://www.googleapis.com/", + "revision": "20200611", + "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { "description": "A bucket.", @@ -3289,7 +3322,7 @@ "description": "The bucket's IAM configuration.", "properties": { "bucketPolicyOnly": { - "description": "The bucket's Bucket Policy Only configuration.", + "description": "The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.", "properties": { "enabled": { "description": "If set, access is controlled only by bucket-level or above IAM policies.", @@ -3372,6 +3405,21 @@ "format": "date", "type": "string" }, + "customTimeBefore": { + "description": "A timestamp in RFC 3339 format. This condition is satisfied when the custom time on an object is before this timestamp.", + "format": "date-time", + "type": "string" + }, + "daysSinceCustomTime": { + "description": "Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.", + "format": "int32", + "type": "integer" + }, + "daysSinceNoncurrentTime": { + "description": "Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.", + "format": "int32", + "type": "integer" + }, "isLive": { "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.", "type": "boolean" @@ -3381,12 +3429,17 @@ "type": "string" }, "matchesStorageClass": { - "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", + "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", "items": { "type": "string" }, "type": "array" }, + "noncurrentTimeBefore": { + "description": "A timestamp in RFC 3339 format. This condition is satisfied when the noncurrent time on an object is before this timestamp. This condition is relevant only for versioned objects.", + "format": "date-time", + "type": "string" + }, "numNewerVersions": { "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", "format": "int32", @@ -3483,7 +3536,7 @@ "type": "string" }, "storageClass": { - "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", "type": "string" }, "timeCreated": { @@ -3519,6 +3572,17 @@ } }, "type": "object" + }, + "zoneAffinity": { + "description": "The zone or zones from which the bucket is intended to use zonal quota. Requests for data from outside the specified affinities are still allowed but won't be able to use zonal quota. The zone or zones need to be within the bucket location otherwise the requests will fail with a 400 Bad Request response.", + "items": { + "type": "string" + }, + "type": "array" + }, + "zoneSeparation": { + "description": "If set, objects placed in this bucket are required to be separated by disaster domain.", + "type": "boolean" } }, "type": "object" @@ -3990,6 +4054,11 @@ "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", "type": "string" }, + "customTime": { + "description": "A timestamp in RFC 3339 format specified by the user for an object.", + "format": "date-time", + "type": "string" + }, "customerEncryption": { "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", "properties": { @@ -4305,6 +4374,11 @@ "resourceId": { "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", "type": "string" + }, + "version": { + "description": "The IAM policy format version.", + "format": "int32", + "type": "integer" } }, "type": "object" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 2eff67418..dc51a22ed 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -55,9 +55,10 @@ import ( "strconv" "strings" - gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" htransport "google.golang.org/api/transport/http" ) @@ -74,11 +75,12 @@ var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint const apiId = "storage:v1" const apiName = "storage" const apiVersion = "v1" -const basePath = "https://www.googleapis.com/storage/v1/" +const basePath = "https://storage.googleapis.com/storage/v1/" // OAuth2 scopes used by this API. const ( @@ -109,6 +111,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -371,9 +374,9 @@ type Bucket struct { // storageClass is specified for a newly-created object. This defines // how objects in the bucket are stored and determines the SLA and the // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, - // NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY. If this value - // is not specified when the bucket is created, it will default to - // STANDARD. For more information, see storage classes. + // NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If + // this value is not specified when the bucket is created, it will + // default to STANDARD. For more information, see storage classes. StorageClass string `json:"storageClass,omitempty"` // TimeCreated: The creation time of the bucket in RFC 3339 format. @@ -390,6 +393,17 @@ type Bucket struct { // Static Website Examples for more information. Website *BucketWebsite `json:"website,omitempty"` + // ZoneAffinity: The zone or zones from which the bucket is intended to + // use zonal quota. Requests for data from outside the specified + // affinities are still allowed but won't be able to use zonal quota. + // The zone or zones need to be within the bucket location otherwise the + // requests will fail with a 400 Bad Request response. + ZoneAffinity []string `json:"zoneAffinity,omitempty"` + + // ZoneSeparation: If set, objects placed in this bucket are required to + // be separated by disaster domain. + ZoneSeparation bool `json:"zoneSeparation,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -522,7 +536,12 @@ func (s *BucketEncryption) MarshalJSON() ([]byte, error) { // BucketIamConfiguration: The bucket's IAM configuration. type BucketIamConfiguration struct { - // BucketPolicyOnly: The bucket's Bucket Policy Only configuration. + // BucketPolicyOnly: The bucket's uniform bucket-level access + // configuration. The feature was formerly known as Bucket Policy Only. + // For backward compatibility, this field will be populated with + // identical information as the uniformBucketLevelAccess field. We + // recommend using the uniformBucketLevelAccess field to enable and + // disable the feature. BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` // UniformBucketLevelAccess: The bucket's uniform bucket-level access @@ -553,8 +572,12 @@ func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BucketIamConfigurationBucketPolicyOnly: The bucket's Bucket Policy -// Only configuration. +// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform +// bucket-level access configuration. The feature was formerly known as +// Bucket Policy Only. For backward compatibility, this field will be +// populated with identical information as the uniformBucketLevelAccess +// field. We recommend using the uniformBucketLevelAccess field to +// enable and disable the feature. type BucketIamConfigurationBucketPolicyOnly struct { // Enabled: If set, access is controlled only by bucket-level or above // IAM policies. @@ -733,6 +756,24 @@ type BucketLifecycleRuleCondition struct { // is created before midnight of the specified date in UTC. CreatedBefore string `json:"createdBefore,omitempty"` + // CustomTimeBefore: A timestamp in RFC 3339 format. This condition is + // satisfied when the custom time on an object is before this timestamp. + CustomTimeBefore string `json:"customTimeBefore,omitempty"` + + // DaysSinceCustomTime: Number of days elapsed since the user-specified + // timestamp set on an object. The condition is satisfied if the days + // elapsed is at least this number. If no custom timestamp is specified + // on an object, the condition does not apply. + DaysSinceCustomTime int64 `json:"daysSinceCustomTime,omitempty"` + + // DaysSinceNoncurrentTime: Number of days elapsed since the noncurrent + // timestamp of an object. The condition is satisfied if the days + // elapsed is at least this number. This condition is relevant only for + // versioned objects. The value of the field must be a nonnegative + // integer. If it's zero, the object version will become eligible for + // Lifecycle action as soon as it becomes noncurrent. + DaysSinceNoncurrentTime int64 `json:"daysSinceNoncurrentTime,omitempty"` + // IsLive: Relevant only for versioned objects. If the value is true, // this condition matches live objects; if the value is false, it // matches archived objects. @@ -748,10 +789,15 @@ type BucketLifecycleRuleCondition struct { // MatchesStorageClass: Objects having any of the storage classes // specified by this condition will be matched. Values include - // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, STANDARD, and + // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and // DURABLE_REDUCED_AVAILABILITY. MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + // NoncurrentTimeBefore: A timestamp in RFC 3339 format. This condition + // is satisfied when the noncurrent time on an object is before this + // timestamp. This condition is relevant only for versioned objects. + NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` + // NumNewerVersions: Relevant only for versioned objects. If the value // is N, this condition is satisfied when there are at least N versions // (including the live version) newer than this version of the object. @@ -1650,6 +1696,10 @@ type Object struct { // Practices. Crc32c string `json:"crc32c,omitempty"` + // CustomTime: A timestamp in RFC 3339 format specified by the user for + // an object. + CustomTime string `json:"customTime,omitempty"` + // CustomerEncryption: Metadata of customer-supplied encryption key, if // the object is encrypted by such a key. CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` @@ -2068,6 +2118,9 @@ type Policy struct { // generation can be denoted with #0. This field is ignored on input. ResourceId string `json:"resourceId,omitempty"` + // Version: The IAM policy format version. + Version int64 `json:"version,omitempty"` + // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` @@ -2389,7 +2442,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2537,7 +2590,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2704,7 +2757,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2877,7 +2930,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3038,7 +3091,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3212,7 +3265,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3398,7 +3451,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3577,7 +3630,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3721,6 +3774,16 @@ func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { return c } +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + // ProvisionalUserProject sets the optional parameter // "provisionalUserProject": The project to be billed for this request // if the target bucket is requester-pays bucket. @@ -3773,7 +3836,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3848,6 +3911,13 @@ func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, err // "required": true, // "type": "string" // }, + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, // "provisionalUserProject": { // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", // "location": "query", @@ -3983,7 +4053,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4240,7 +4310,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4450,7 +4520,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4685,7 +4755,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4914,7 +4984,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5089,7 +5159,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5329,7 +5399,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5541,7 +5611,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5658,7 +5728,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5806,7 +5876,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5974,7 +6044,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6164,7 +6234,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6337,7 +6407,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6511,7 +6581,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6683,7 +6753,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6831,7 +6901,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7001,7 +7071,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7176,7 +7246,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7349,7 +7419,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7521,7 +7591,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7712,7 +7782,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7909,7 +7979,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8094,7 +8164,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8292,7 +8362,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8477,11 +8547,9 @@ func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) return c } -// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of -// the Cloud KMS key, of the form -// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, -// that will be used to encrypt the object. Overrides the object -// metadata's kms_key_name value, if any. +// KmsKeyName sets the optional parameter "kmsKeyName": Not currently +// supported. Specifying the parameter causes the request to fail with +// status code 400 - Bad Request. func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { c.urlParams_.Set("kmsKeyName", kmsKeyName) return c @@ -8529,7 +8597,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8648,7 +8716,7 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "type": "string" // }, // "kmsKeyName": { - // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", // "location": "query", // "type": "string" // }, @@ -8705,6 +8773,17 @@ func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinat return c } +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + // DestinationPredefinedAcl sets the optional parameter // "destinationPredefinedAcl": Apply a predefined set of access controls // to the destination object. @@ -8861,7 +8940,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8944,6 +9023,11 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "required": true, // "type": "string" // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, // "destinationObject": { // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", // "location": "path", @@ -9184,7 +9268,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9417,7 +9501,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9667,7 +9751,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9984,7 +10068,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9999,7 +10083,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") if c.mediaInfo_ != nil { - urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) } if body == nil { @@ -10018,7 +10102,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. @@ -10234,6 +10318,15 @@ func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { return c } +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one // instance of delimiter will have their metadata included in items in @@ -10287,6 +10380,15 @@ func (c *ObjectsListCall) ProvisionalUserProject(provisionalUserProject string) return c } +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { @@ -10339,7 +10441,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10419,6 +10521,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "includeTrailingDelimiter": { // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", // "location": "query", @@ -10460,6 +10567,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10646,7 +10758,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11038,7 +11150,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11341,7 +11453,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11541,7 +11653,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11802,7 +11914,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12007,6 +12119,15 @@ func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { return c } +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one // instance of delimiter will have their metadata included in items in @@ -12060,6 +12181,15 @@ func (c *ObjectsWatchAllCall) ProvisionalUserProject(provisionalUserProject stri return c } +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { @@ -12102,7 +12232,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12184,6 +12314,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "location": "query", // "type": "string" // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "includeTrailingDelimiter": { // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", // "location": "query", @@ -12225,6 +12360,11 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // "location": "query", // "type": "string" // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -12308,7 +12448,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12458,7 +12598,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12593,7 +12733,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12689,6 +12829,7 @@ func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMeta // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", // "https://www.googleapis.com/auth/devstorage.read_only" // ] // } @@ -12792,7 +12933,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12987,7 +13128,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13164,7 +13305,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/1.11.0 gdcl/20190802") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200707") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go new file mode 100644 index 000000000..c03af65fd --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -0,0 +1,110 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +var ( + defaultSourceOnce sync.Once + defaultSource Source + defaultSourceErr error +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// DefaultSource returns a certificate source that execs the command specified +// in the file at ~/.secureConnect/context_aware_metadata.json +// +// If that file does not exist, a nil source is returned. +func DefaultSource() (Source, error) { + defaultSourceOnce.Do(func() { + defaultSource, defaultSourceErr = newSecureConnectSource() + }) + return defaultSource, defaultSourceErr +} + +type secureConnectSource struct { + metadata secureConnectMetadata +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// newSecureConnectSource creates a secureConnectSource by reading the well-known file. +func newSecureConnectSource() (Source, error) { + user, err := user.Current() + if err != nil { + // Ignore. + return nil, nil + } + filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) + file, err := ioutil.ReadFile(filename) + if os.IsNotExist(err) { + // Ignore. + return nil, nil + } + if err != nil { + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + // TODO(cbro): consider caching valid certificates rather than exec'ing every time. + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + // TODO(cbro): read stderr for error message? Might contain sensitive info. + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_go113.go new file mode 100644 index 000000000..924f2704d --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_go113.go @@ -0,0 +1,20 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// It returns nil if the RoundTripper can't be cloned or coerced to +// *http.Transport. +func clonedTransport(rt http.RoundTripper) *http.Transport { + t, ok := rt.(*http.Transport) + if !ok { + return nil + } + return t.Clone() +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go new file mode 100644 index 000000000..3cb16c6cb --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// For versions of Go <1.13, this is not supported, so return nil. +func clonedTransport(rt http.RoundTripper) *http.Transport { + return nil +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index c0d8bf20b..445030141 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -1,16 +1,6 @@ -// Copyright 2015 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. // Package http supports network connections to HTTP servers. // This package is not intended for use by end developers. Use the @@ -19,17 +9,30 @@ package http import ( "context" + "crypto/tls" "errors" + "net" "net/http" + "net/url" + "os" + "strings" + "time" "go.opencensus.io/plugin/ochttp" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" "google.golang.org/api/option" + "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" ) +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" +) + // NewClient returns an HTTP client for use communicating with a Google cloud // service, configured with the given ClientOptions. It also returns the endpoint // for the service as specified in the options. @@ -38,15 +41,23 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? if settings.HTTPClient != nil { - return settings.HTTPClient, settings.Endpoint, nil + return settings.HTTPClient, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx), settings) + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) if err != nil { return nil, "", err } - return &http.Client{Transport: trans}, settings.Endpoint, nil + return &http.Client{Transport: trans}, endpoint, nil } // NewTransport creates an http.RoundTripper for use communicating with a Google @@ -63,14 +74,14 @@ func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.Cl } func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { - trans := base - trans = parameterTransport{ - base: trans, + paramTransport := ¶meterTransport{ + base: base, userAgent: settings.UserAgent, quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } - trans = addOCTransport(trans) + var trans http.RoundTripper = paramTransport + trans = addOCTransport(trans, settings) switch { case settings.NoAuth: // Do nothing. @@ -84,9 +95,17 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + if paramTransport.quotaProject == "" { + paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) + } + + ts := creds.TokenSource + if settings.TokenSource != nil { + ts = settings.TokenSource + } trans = &oauth2.Transport{ Base: trans, - Source: creds.TokenSource, + Source: ts, } } return trans, nil @@ -114,21 +133,20 @@ type parameterTransport struct { base http.RoundTripper } -func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base if rt == nil { return nil, errors.New("transport: no Transport specified") } - if t.userAgent == "" { - return rt.RoundTrip(req) - } newReq := *req newReq.Header = make(http.Header) for k, vv := range req.Header { newReq.Header[k] = vv } - // TODO(cbro): append to existing User-Agent header? - newReq.Header.Set("User-Agent", t.userAgent) + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } // Attach system parameters into the header if t.quotaProject != "" { @@ -145,17 +163,142 @@ func (t parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) var appengineUrlfetchHook func(context.Context) http.RoundTripper // defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport, otherwise it's http.DefaultTransport. -func defaultBaseTransport(ctx context.Context) http.RoundTripper { +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } - return http.DefaultTransport + + // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, + // which is increased due to reported performance issues under load in the GCS + // client. Transport.Clone is only available in Go 1.13 and up. + trans := clonedTransport(http.DefaultTransport) + if trans == nil { + trans = fallbackBaseTransport() + } + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + + return trans +} + +// fallbackBaseTransport is used in google.protobuf.MethodOptions + 1, // 1: google.api.http:type_name -> google.api.HttpRule + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/api/annotations.proto", fileDescriptor_c591c5aa9fb79aab) } - -var fileDescriptor_c591c5aa9fb79aab = []byte{ - // 208 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, - 0xcf, 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0xc8, 0xea, 0x25, 0x16, 0x64, - 0x4a, 0x89, 0x22, 0xa9, 0xcc, 0x28, 0x29, 0x29, 0x80, 0x28, 0x91, 0x52, 0x80, 0x0a, 0x83, 0x79, - 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa9, 0xc5, 0xc9, 0x45, 0x99, 0x05, 0x25, 0xf9, 0x45, 0x10, 0x15, - 0x56, 0xde, 0x5c, 0x2c, 0x20, 0xf5, 0x42, 0x72, 0x7a, 0x50, 0xd3, 0x60, 0x4a, 0xf5, 0x7c, 0x53, - 0x4b, 0x32, 0xf2, 0x53, 0xfc, 0x0b, 0xc0, 0x56, 0x4a, 0x6c, 0x38, 0xb5, 0x47, 0x49, 0x81, 0x51, - 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x61, 0xad, 0x9e, 0x47, 0x49, 0x49, 0x41, 0x50, 0x69, 0x4e, 0x6a, - 0x10, 0xd8, 0x10, 0xa7, 0x3c, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x24, 0x05, 0x4e, 0x02, 0x8e, 0x08, - 0x67, 0x07, 0x80, 0x4c, 0x0e, 0x60, 0x8c, 0x72, 0x84, 0xca, 0xa7, 0xe7, 0xe7, 0x24, 0xe6, 0xa5, - 0xeb, 0xe5, 0x17, 0xa5, 0xeb, 0xa7, 0xa7, 0xe6, 0x81, 0xed, 0xd5, 0x87, 0x48, 0x25, 0x16, 0x64, - 0x16, 0xa3, 0x7b, 0xda, 0x1a, 0x89, 0xbd, 0x88, 0x89, 0xc5, 0xdd, 0x31, 0xc0, 0x33, 0x89, 0x0d, - 0xac, 0xc9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x29, 0x19, 0x62, 0x28, 0x01, 0x00, 0x00, +func init() { file_google_api_annotations_proto_init() } +func file_google_api_annotations_proto_init() { + if File_google_api_annotations_proto != nil { + return + } + file_google_api_http_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_annotations_proto_goTypes, + DependencyIndexes: file_google_api_annotations_proto_depIdxs, + ExtensionInfos: file_google_api_annotations_proto_extTypes, + }.Build() + File_google_api_annotations_proto = out.File + file_google_api_annotations_proto_rawDesc = nil + file_google_api_annotations_proto_goTypes = nil + file_google_api_annotations_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 867fc0c3f..3832df0bd 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -1,79 +1,219 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/client.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 -var E_MethodSignature = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MethodOptions)(nil), - ExtensionType: ([]string)(nil), - Field: 1051, - Name: "google.api.method_signature", - Tag: "bytes,1051,rep,name=method_signature", - Filename: "google/api/client.proto", +var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.MethodOptions)(nil), + ExtensionType: ([]string)(nil), + Field: 1051, + Name: "google.api.method_signature", + Tag: "bytes,1051,rep,name=method_signature", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1049, + Name: "google.api.default_host", + Tag: "bytes,1049,opt,name=default_host", + Filename: "google/api/client.proto", + }, + { + ExtendedType: (*descriptor.ServiceOptions)(nil), + ExtensionType: (*string)(nil), + Field: 1050, + Name: "google.api.oauth_scopes", + Tag: "bytes,1050,opt,name=oauth_scopes", + Filename: "google/api/client.proto", + }, } -var E_DefaultHost = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1049, - Name: "google.api.default_host", - Tag: "bytes,1049,opt,name=default_host", - Filename: "google/api/client.proto", -} +// Extension fields to descriptor.MethodOptions. +var ( + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + // + // repeated string method_signature = 1051; + E_MethodSignature = &file_google_api_client_proto_extTypes[0] +) -var E_OauthScopes = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.ServiceOptions)(nil), - ExtensionType: (*string)(nil), - Field: 1050, - Name: "google.api.oauth_scopes", - Tag: "bytes,1050,opt,name=oauth_scopes", - Filename: "google/api/client.proto", -} +// Extension fields to descriptor.ServiceOptions. +var ( + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + // + // optional string default_host = 1049; + E_DefaultHost = &file_google_api_client_proto_extTypes[1] + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + // + // optional string oauth_scopes = 1050; + E_OauthScopes = &file_google_api_client_proto_extTypes[2] +) + +var File_google_api_client_proto protoreflect.FileDescriptor -func init() { - proto.RegisterExtension(E_MethodSignature) - proto.RegisterExtension(E_DefaultHost) - proto.RegisterExtension(E_OauthScopes) +var file_google_api_client_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, + 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, + 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/api/client.proto", fileDescriptor_78f2c6f7c3a942c1) } +var file_google_api_client_proto_goTypes = []interface{}{ + (*descriptor.MethodOptions)(nil), // 0: google.protobuf.MethodOptions + (*descriptor.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions +} +var file_google_api_client_proto_depIdxs = []int32{ + 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 0, // [0:3] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} -var fileDescriptor_78f2c6f7c3a942c1 = []byte{ - // 262 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x90, 0x3f, 0x4f, 0xc3, 0x30, - 0x10, 0xc5, 0x55, 0x40, 0xa8, 0x75, 0x11, 0xa0, 0x2c, 0x20, 0x06, 0xc8, 0xd8, 0xc9, 0x1e, 0xd8, - 0xca, 0xd4, 0x76, 0xe0, 0x8f, 0x84, 0x88, 0x9a, 0x8d, 0x25, 0x72, 0x9d, 0xab, 0x63, 0x29, 0xf5, - 0x59, 0xf6, 0x85, 0xef, 0x02, 0x6c, 0x7c, 0x52, 0x54, 0xc7, 0x11, 0x48, 0x0c, 0x6c, 0x27, 0xbd, - 0xf7, 0xfb, 0x9d, 0xf4, 0xd8, 0x85, 0x46, 0xd4, 0x2d, 0x08, 0xe9, 0x8c, 0x50, 0xad, 0x01, 0x4b, - 0xdc, 0x79, 0x24, 0xcc, 0x58, 0x1f, 0x70, 0xe9, 0xcc, 0x55, 0x9e, 0x4a, 0x31, 0xd9, 0x74, 0x5b, - 0x51, 0x43, 0x50, 0xde, 0x38, 0x42, 0xdf, 0xb7, 0xe7, 0x4f, 0xec, 0x7c, 0x07, 0xd4, 0x60, 0x5d, - 0x05, 0xa3, 0xad, 0xa4, 0xce, 0x43, 0x76, 0xcd, 0x93, 0x62, 0xc0, 0xf8, 0x73, 0xac, 0xbc, 0x38, - 0x32, 0x68, 0xc3, 0xe5, 0xe7, 0x38, 0x3f, 0x9c, 0x4d, 0xd6, 0x67, 0x3d, 0x58, 0x0e, 0xdc, 0x7c, - 0xc5, 0x4e, 0x6a, 0xd8, 0xca, 0xae, 0xa5, 0xaa, 0xc1, 0x40, 0xd9, 0xcd, 0x1f, 0x4f, 0x09, 0xfe, - 0xcd, 0x28, 0x18, 0x44, 0xef, 0xe3, 0x7c, 0x34, 0x9b, 0xac, 0xa7, 0x89, 0x7a, 0xc0, 0x40, 0x7b, - 0x09, 0xca, 0x8e, 0x9a, 0x2a, 0x28, 0x74, 0x10, 0xfe, 0x97, 0x7c, 0x24, 0x49, 0xa4, 0xca, 0x08, - 0x2d, 0x0d, 0x3b, 0x55, 0xb8, 0xe3, 0x3f, 0x4b, 0x2c, 0xa7, 0xab, 0xb8, 0x51, 0xb1, 0x97, 0x14, - 0xa3, 0xd7, 0x45, 0x8a, 0x34, 0xb6, 0xd2, 0x6a, 0x8e, 0x5e, 0x0b, 0x0d, 0x36, 0xbe, 0x10, 0x7d, - 0x24, 0x9d, 0x09, 0x71, 0x5c, 0x69, 0x2d, 0x92, 0x8c, 0xbf, 0xee, 0x7e, 0xdd, 0x5f, 0x07, 0x47, - 0xf7, 0x8b, 0xe2, 0x71, 0x73, 0x1c, 0xa1, 0xdb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xc2, - 0xcf, 0x71, 0x90, 0x01, 0x00, 0x00, +func init() { file_google_api_client_proto_init() } +func file_google_api_client_proto_init() { + if File_google_api_client_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_client_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_client_proto_goTypes, + DependencyIndexes: file_google_api_client_proto_depIdxs, + ExtensionInfos: file_google_api_client_proto_extTypes, + }.Build() + File_google_api_client_proto = out.File + file_google_api_client_proto_rawDesc = nil + file_google_api_client_proto_goTypes = nil + file_google_api_client_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index 31f87dd00..0038b20e4 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -1,26 +1,45 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/field_behavior.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // An indicator of the behavior of a given field (for example, that a field // is required in requests, or given as output but ignored as input). @@ -56,67 +75,164 @@ const ( FieldBehavior_IMMUTABLE FieldBehavior = 5 ) -var FieldBehavior_name = map[int32]string{ - 0: "FIELD_BEHAVIOR_UNSPECIFIED", - 1: "OPTIONAL", - 2: "REQUIRED", - 3: "OUTPUT_ONLY", - 4: "INPUT_ONLY", - 5: "IMMUTABLE", -} +// Enum value maps for FieldBehavior. +var ( + FieldBehavior_name = map[int32]string{ + 0: "FIELD_BEHAVIOR_UNSPECIFIED", + 1: "OPTIONAL", + 2: "REQUIRED", + 3: "OUTPUT_ONLY", + 4: "INPUT_ONLY", + 5: "IMMUTABLE", + } + FieldBehavior_value = map[string]int32{ + "FIELD_BEHAVIOR_UNSPECIFIED": 0, + "OPTIONAL": 1, + "REQUIRED": 2, + "OUTPUT_ONLY": 3, + "INPUT_ONLY": 4, + "IMMUTABLE": 5, + } +) -var FieldBehavior_value = map[string]int32{ - "FIELD_BEHAVIOR_UNSPECIFIED": 0, - "OPTIONAL": 1, - "REQUIRED": 2, - "OUTPUT_ONLY": 3, - "INPUT_ONLY": 4, - "IMMUTABLE": 5, +func (x FieldBehavior) Enum() *FieldBehavior { + p := new(FieldBehavior) + *p = x + return p } func (x FieldBehavior) String() string { - return proto.EnumName(FieldBehavior_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_behavior_proto_enumTypes[0].Descriptor() +} + +func (FieldBehavior) Type() protoreflect.EnumType { + return &file_google_api_field_behavior_proto_enumTypes[0] } +func (x FieldBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldBehavior.Descriptor instead. func (FieldBehavior) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4648f18fd5079967, []int{0} + return file_google_api_field_behavior_proto_rawDescGZIP(), []int{0} +} + +var file_google_api_field_behavior_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: ([]FieldBehavior)(nil), + Field: 1052, + Name: "google.api.field_behavior", + Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", + Filename: "google/api/field_behavior.proto", + }, +} + +// Extension fields to descriptor.FieldOptions. +var ( + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + // + // repeated google.api.FieldBehavior field_behavior = 1052; + E_FieldBehavior = &file_google_api_field_behavior_proto_extTypes[0] +) + +var File_google_api_field_behavior_proto protoreflect.FileDescriptor + +var file_google_api_field_behavior_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, + 0x7b, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, + 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, + 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0c, + 0x0a, 0x08, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, + 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0e, 0x0a, + 0x0a, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x3a, 0x60, 0x0a, 0x0e, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, + 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, + 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, + 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var E_FieldBehavior = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: ([]FieldBehavior)(nil), - Field: 1052, - Name: "google.api.field_behavior", - Tag: "varint,1052,rep,name=field_behavior,enum=google.api.FieldBehavior", - Filename: "google/api/field_behavior.proto", +var ( + file_google_api_field_behavior_proto_rawDescOnce sync.Once + file_google_api_field_behavior_proto_rawDescData = file_google_api_field_behavior_proto_rawDesc +) + +func file_google_api_field_behavior_proto_rawDescGZIP() []byte { + file_google_api_field_behavior_proto_rawDescOnce.Do(func() { + file_google_api_field_behavior_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_behavior_proto_rawDescData) + }) + return file_google_api_field_behavior_proto_rawDescData } -func init() { - proto.RegisterEnum("google.api.FieldBehavior", FieldBehavior_name, FieldBehavior_value) - proto.RegisterExtension(E_FieldBehavior) +var file_google_api_field_behavior_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_behavior_proto_goTypes = []interface{}{ + (FieldBehavior)(0), // 0: google.api.FieldBehavior + (*descriptor.FieldOptions)(nil), // 1: google.protobuf.FieldOptions +} +var file_google_api_field_behavior_proto_depIdxs = []int32{ + 1, // 0: google.api.field_behavior:extendee -> google.protobuf.FieldOptions + 0, // 1: google.api.field_behavior:type_name -> google.api.FieldBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/api/field_behavior.proto", fileDescriptor_4648f18fd5079967) } - -var fileDescriptor_4648f18fd5079967 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0x4f, 0x4f, 0xb3, 0x30, - 0x1c, 0xc7, 0x9f, 0xfd, 0x79, 0xcc, 0xac, 0x0e, 0x49, 0x4f, 0xba, 0x44, 0xdd, 0xd1, 0x78, 0x28, - 0x89, 0xde, 0xf4, 0x04, 0xae, 0xd3, 0x26, 0x8c, 0x56, 0x04, 0x13, 0xbd, 0x60, 0xb7, 0xb1, 0xda, - 0x64, 0xd2, 0x06, 0xd0, 0x8b, 0x6f, 0xc5, 0x93, 0xaf, 0xd4, 0xd0, 0x31, 0x85, 0x5b, 0xbf, 0xf9, - 0x7d, 0xfa, 0xeb, 0xe7, 0x5b, 0x70, 0x2a, 0x94, 0x12, 0xeb, 0xd4, 0xe1, 0x5a, 0x3a, 0x2b, 0x99, - 0xae, 0x97, 0xc9, 0x3c, 0x7d, 0xe5, 0x1f, 0x52, 0xe5, 0x48, 0xe7, 0xaa, 0x54, 0x10, 0x6c, 0x00, - 0xc4, 0xb5, 0x1c, 0x8d, 0x6b, 0xd8, 0x4c, 0xe6, 0xef, 0x2b, 0x67, 0x99, 0x16, 0x8b, 0x5c, 0xea, - 0x72, 0x4b, 0x9f, 0x7f, 0x82, 0xe1, 0xb4, 0xda, 0xe2, 0xd5, 0x4b, 0xe0, 0x09, 0x18, 0x4d, 0x09, - 0xf6, 0x27, 0x89, 0x87, 0xef, 0xdc, 0x47, 0x42, 0xc3, 0x24, 0x0e, 0x1e, 0x18, 0xbe, 0x21, 0x53, - 0x82, 0x27, 0xf6, 0x3f, 0xb8, 0x0f, 0x06, 0x94, 0x45, 0x84, 0x06, 0xae, 0x6f, 0x77, 0xaa, 0x14, - 0xe2, 0xfb, 0x98, 0x84, 0x78, 0x62, 0x77, 0xe1, 0x01, 0xd8, 0xa3, 0x71, 0xc4, 0xe2, 0x28, 0xa1, - 0x81, 0xff, 0x64, 0xf7, 0xa0, 0x05, 0x00, 0x09, 0x7e, 0x73, 0x1f, 0x0e, 0xc1, 0x2e, 0x99, 0xcd, - 0xe2, 0xc8, 0xf5, 0x7c, 0x6c, 0xff, 0xbf, 0x7a, 0x01, 0x56, 0xbb, 0x02, 0x3c, 0x46, 0xb5, 0xfd, - 0xd6, 0x18, 0x19, 0x3b, 0xaa, 0x4b, 0xa9, 0xb2, 0xe2, 0xf0, 0x6b, 0x30, 0xee, 0x9d, 0x59, 0x17, - 0x47, 0xe8, 0xaf, 0x23, 0x6a, 0xe9, 0x87, 0xc3, 0x55, 0x33, 0x7a, 0x1a, 0x58, 0x0b, 0xf5, 0xd6, - 0xc0, 0x3d, 0xd8, 0xe2, 0x59, 0xf5, 0x0c, 0xeb, 0x3c, 0xbb, 0x35, 0x21, 0xd4, 0x9a, 0x67, 0x02, - 0xa9, 0x5c, 0x38, 0x22, 0xcd, 0x8c, 0x84, 0xb3, 0x19, 0x71, 0x2d, 0x0b, 0xf3, 0xe9, 0x3c, 0xcb, - 0x54, 0xc9, 0x8d, 0xcf, 0x75, 0xe3, 0xfc, 0xdd, 0xed, 0xdf, 0xba, 0x8c, 0xcc, 0x77, 0xcc, 0xa5, - 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x94, 0x57, 0x94, 0xa8, 0x01, 0x00, 0x00, +func init() { file_google_api_field_behavior_proto_init() } +func file_google_api_field_behavior_proto_init() { + if File_google_api_field_behavior_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_behavior_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_behavior_proto_goTypes, + DependencyIndexes: file_google_api_field_behavior_proto_depIdxs, + EnumInfos: file_google_api_field_behavior_proto_enumTypes, + ExtensionInfos: file_google_api_field_behavior_proto_extTypes, + }.Build() + File_google_api_field_behavior_proto = out.File + file_google_api_field_behavior_proto_rawDesc = nil + file_google_api_field_behavior_proto_goTypes = nil + file_google_api_field_behavior_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index a63870374..955bc56a3 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -1,30 +1,53 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/http.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. type Http struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A list of HTTP configuration rules that apply to individual API methods. // // **NOTE:** All service configuration rules follow "last one wins" order. @@ -35,47 +58,51 @@ type Http struct { // // The default behavior is to not decode RFC 6570 reserved characters in multi // segment matches. - FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + FullyDecodeReservedExpansion bool `protobuf:"varint,2,opt,name=fully_decode_reserved_expansion,json=fullyDecodeReservedExpansion,proto3" json:"fully_decode_reserved_expansion,omitempty"` } -func (m *Http) Reset() { *m = Http{} } -func (m *Http) String() string { return proto.CompactTextString(m) } -func (*Http) ProtoMessage() {} -func (*Http) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9994be407cdcc9, []int{0} +func (x *Http) Reset() { + *x = Http{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Http) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Http.Unmarshal(m, b) -} -func (m *Http) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Http.Marshal(b, m, deterministic) +func (x *Http) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Http) XXX_Merge(src proto.Message) { - xxx_messageInfo_Http.Merge(m, src) -} -func (m *Http) XXX_Size() int { - return xxx_messageInfo_Http.Size(m) -} -func (m *Http) XXX_DiscardUnknown() { - xxx_messageInfo_Http.DiscardUnknown(m) + +func (*Http) ProtoMessage() {} + +func (x *Http) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Http proto.InternalMessageInfo +// Deprecated: Use Http.ProtoReflect.Descriptor instead. +func (*Http) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{0} +} -func (m *Http) GetRules() []*HttpRule { - if m != nil { - return m.Rules +func (x *Http) GetRules() []*HttpRule { + if x != nil { + return x.Rules } return nil } -func (m *Http) GetFullyDecodeReservedExpansion() bool { - if m != nil { - return m.FullyDecodeReservedExpansion +func (x *Http) GetFullyDecodeReservedExpansion() bool { + if x != nil { + return x.FullyDecodeReservedExpansion } return false } @@ -350,6 +377,10 @@ func (m *Http) GetFullyDecodeReservedExpansion() bool { // the request or response body to a repeated field. However, some gRPC // Transcoding implementations may not support this feature. type HttpRule struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Selects a method to which this rule applies. // // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. @@ -358,7 +389,7 @@ type HttpRule struct { // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. // - // Types that are valid to be assigned to Pattern: + // Types that are assignable to Pattern: // *HttpRule_Get // *HttpRule_Put // *HttpRule_Post @@ -383,84 +414,48 @@ type HttpRule struct { // Additional HTTP bindings for the selector. Nested bindings must // not contain an `additional_bindings` field themselves (that is, // the nesting may only be one level deep). - AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AdditionalBindings []*HttpRule `protobuf:"bytes,11,rep,name=additional_bindings,json=additionalBindings,proto3" json:"additional_bindings,omitempty"` } -func (m *HttpRule) Reset() { *m = HttpRule{} } -func (m *HttpRule) String() string { return proto.CompactTextString(m) } -func (*HttpRule) ProtoMessage() {} -func (*HttpRule) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9994be407cdcc9, []int{1} -} - -func (m *HttpRule) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_HttpRule.Unmarshal(m, b) -} -func (m *HttpRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_HttpRule.Marshal(b, m, deterministic) -} -func (m *HttpRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_HttpRule.Merge(m, src) -} -func (m *HttpRule) XXX_Size() int { - return xxx_messageInfo_HttpRule.Size(m) -} -func (m *HttpRule) XXX_DiscardUnknown() { - xxx_messageInfo_HttpRule.DiscardUnknown(m) -} - -var xxx_messageInfo_HttpRule proto.InternalMessageInfo - -func (m *HttpRule) GetSelector() string { - if m != nil { - return m.Selector +func (x *HttpRule) Reset() { + *x = HttpRule{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -type isHttpRule_Pattern interface { - isHttpRule_Pattern() +func (x *HttpRule) String() string { + return protoimpl.X.MessageStringOf(x) } -type HttpRule_Get struct { - Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` -} - -type HttpRule_Put struct { - Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` -} - -type HttpRule_Post struct { - Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` -} +func (*HttpRule) ProtoMessage() {} -type HttpRule_Delete struct { - Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +func (x *HttpRule) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type HttpRule_Patch struct { - Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +// Deprecated: Use HttpRule.ProtoReflect.Descriptor instead. +func (*HttpRule) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{1} } -type HttpRule_Custom struct { - Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +func (x *HttpRule) GetSelector() string { + if x != nil { + return x.Selector + } + return "" } -func (*HttpRule_Get) isHttpRule_Pattern() {} - -func (*HttpRule_Put) isHttpRule_Pattern() {} - -func (*HttpRule_Post) isHttpRule_Pattern() {} - -func (*HttpRule_Delete) isHttpRule_Pattern() {} - -func (*HttpRule_Patch) isHttpRule_Pattern() {} - -func (*HttpRule_Custom) isHttpRule_Pattern() {} - func (m *HttpRule) GetPattern() isHttpRule_Pattern { if m != nil { return m.Pattern @@ -468,166 +463,321 @@ func (m *HttpRule) GetPattern() isHttpRule_Pattern { return nil } -func (m *HttpRule) GetGet() string { - if x, ok := m.GetPattern().(*HttpRule_Get); ok { +func (x *HttpRule) GetGet() string { + if x, ok := x.GetPattern().(*HttpRule_Get); ok { return x.Get } return "" } -func (m *HttpRule) GetPut() string { - if x, ok := m.GetPattern().(*HttpRule_Put); ok { +func (x *HttpRule) GetPut() string { + if x, ok := x.GetPattern().(*HttpRule_Put); ok { return x.Put } return "" } -func (m *HttpRule) GetPost() string { - if x, ok := m.GetPattern().(*HttpRule_Post); ok { +func (x *HttpRule) GetPost() string { + if x, ok := x.GetPattern().(*HttpRule_Post); ok { return x.Post } return "" } -func (m *HttpRule) GetDelete() string { - if x, ok := m.GetPattern().(*HttpRule_Delete); ok { +func (x *HttpRule) GetDelete() string { + if x, ok := x.GetPattern().(*HttpRule_Delete); ok { return x.Delete } return "" } -func (m *HttpRule) GetPatch() string { - if x, ok := m.GetPattern().(*HttpRule_Patch); ok { +func (x *HttpRule) GetPatch() string { + if x, ok := x.GetPattern().(*HttpRule_Patch); ok { return x.Patch } return "" } -func (m *HttpRule) GetCustom() *CustomHttpPattern { - if x, ok := m.GetPattern().(*HttpRule_Custom); ok { +func (x *HttpRule) GetCustom() *CustomHttpPattern { + if x, ok := x.GetPattern().(*HttpRule_Custom); ok { return x.Custom } return nil } -func (m *HttpRule) GetBody() string { - if m != nil { - return m.Body +func (x *HttpRule) GetBody() string { + if x != nil { + return x.Body } return "" } -func (m *HttpRule) GetResponseBody() string { - if m != nil { - return m.ResponseBody +func (x *HttpRule) GetResponseBody() string { + if x != nil { + return x.ResponseBody } return "" } -func (m *HttpRule) GetAdditionalBindings() []*HttpRule { - if m != nil { - return m.AdditionalBindings +func (x *HttpRule) GetAdditionalBindings() []*HttpRule { + if x != nil { + return x.AdditionalBindings } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*HttpRule) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*HttpRule_Get)(nil), - (*HttpRule_Put)(nil), - (*HttpRule_Post)(nil), - (*HttpRule_Delete)(nil), - (*HttpRule_Patch)(nil), - (*HttpRule_Custom)(nil), - } +type isHttpRule_Pattern interface { + isHttpRule_Pattern() } +type HttpRule_Get struct { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + Get string `protobuf:"bytes,2,opt,name=get,proto3,oneof"` +} + +type HttpRule_Put struct { + // Maps to HTTP PUT. Used for replacing a resource. + Put string `protobuf:"bytes,3,opt,name=put,proto3,oneof"` +} + +type HttpRule_Post struct { + // Maps to HTTP POST. Used for creating a resource or performing an action. + Post string `protobuf:"bytes,4,opt,name=post,proto3,oneof"` +} + +type HttpRule_Delete struct { + // Maps to HTTP DELETE. Used for deleting a resource. + Delete string `protobuf:"bytes,5,opt,name=delete,proto3,oneof"` +} + +type HttpRule_Patch struct { + // Maps to HTTP PATCH. Used for updating a resource. + Patch string `protobuf:"bytes,6,opt,name=patch,proto3,oneof"` +} + +type HttpRule_Custom struct { + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + Custom *CustomHttpPattern `protobuf:"bytes,8,opt,name=custom,proto3,oneof"` +} + +func (*HttpRule_Get) isHttpRule_Pattern() {} + +func (*HttpRule_Put) isHttpRule_Pattern() {} + +func (*HttpRule_Post) isHttpRule_Pattern() {} + +func (*HttpRule_Delete) isHttpRule_Pattern() {} + +func (*HttpRule_Patch) isHttpRule_Pattern() {} + +func (*HttpRule_Custom) isHttpRule_Pattern() {} + // A custom pattern is used for defining custom HTTP verb. type CustomHttpPattern struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of this custom HTTP verb. Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` // The path matched by this custom verb. - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` } -func (m *CustomHttpPattern) Reset() { *m = CustomHttpPattern{} } -func (m *CustomHttpPattern) String() string { return proto.CompactTextString(m) } -func (*CustomHttpPattern) ProtoMessage() {} -func (*CustomHttpPattern) Descriptor() ([]byte, []int) { - return fileDescriptor_ff9994be407cdcc9, []int{2} +func (x *CustomHttpPattern) Reset() { + *x = CustomHttpPattern{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_http_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *CustomHttpPattern) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CustomHttpPattern.Unmarshal(m, b) +func (x *CustomHttpPattern) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CustomHttpPattern) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CustomHttpPattern.Marshal(b, m, deterministic) -} -func (m *CustomHttpPattern) XXX_Merge(src proto.Message) { - xxx_messageInfo_CustomHttpPattern.Merge(m, src) -} -func (m *CustomHttpPattern) XXX_Size() int { - return xxx_messageInfo_CustomHttpPattern.Size(m) -} -func (m *CustomHttpPattern) XXX_DiscardUnknown() { - xxx_messageInfo_CustomHttpPattern.DiscardUnknown(m) + +func (*CustomHttpPattern) ProtoMessage() {} + +func (x *CustomHttpPattern) ProtoReflect() protoreflect.Message { + mi := &file_google_api_http_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_CustomHttpPattern proto.InternalMessageInfo +// Deprecated: Use CustomHttpPattern.ProtoReflect.Descriptor instead. +func (*CustomHttpPattern) Descriptor() ([]byte, []int) { + return file_google_api_http_proto_rawDescGZIP(), []int{2} +} -func (m *CustomHttpPattern) GetKind() string { - if m != nil { - return m.Kind +func (x *CustomHttpPattern) GetKind() string { + if x != nil { + return x.Kind } return "" } -func (m *CustomHttpPattern) GetPath() string { - if m != nil { - return m.Path +func (x *CustomHttpPattern) GetPath() string { + if x != nil { + return x.Path } return "" } -func init() { - proto.RegisterType((*Http)(nil), "google.api.Http") - proto.RegisterType((*HttpRule)(nil), "google.api.HttpRule") - proto.RegisterType((*CustomHttpPattern)(nil), "google.api.CustomHttpPattern") -} - -func init() { proto.RegisterFile("google/api/http.proto", fileDescriptor_ff9994be407cdcc9) } - -var fileDescriptor_ff9994be407cdcc9 = []byte{ - // 419 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x30, - 0x10, 0x86, 0x49, 0x9b, 0x76, 0xdb, 0xe9, 0x82, 0x84, 0x59, 0x90, 0x85, 0x40, 0x54, 0xe5, 0x52, - 0x71, 0x48, 0xa5, 0xe5, 0xc0, 0x61, 0x4f, 0x1b, 0xa8, 0x58, 0x6e, 0x55, 0x8e, 0x5c, 0x22, 0x37, - 0x1e, 0x52, 0x83, 0xd7, 0xb6, 0xe2, 0x09, 0xa2, 0xaf, 0xc3, 0x63, 0xf1, 0x24, 0x1c, 0x91, 0x9d, - 0x84, 0x56, 0x42, 0xe2, 0x36, 0xf3, 0xff, 0x9f, 0xa7, 0x7f, 0x27, 0x03, 0x4f, 0x6b, 0x6b, 0x6b, - 0x8d, 0x1b, 0xe1, 0xd4, 0xe6, 0x40, 0xe4, 0x32, 0xd7, 0x58, 0xb2, 0x0c, 0x3a, 0x39, 0x13, 0x4e, - 0xad, 0x8e, 0x90, 0xde, 0x11, 0x39, 0xf6, 0x06, 0x26, 0x4d, 0xab, 0xd1, 0xf3, 0x64, 0x39, 0x5e, - 0x2f, 0xae, 0xaf, 0xb2, 0x13, 0x93, 0x05, 0xa0, 0x68, 0x35, 0x16, 0x1d, 0xc2, 0xb6, 0xf0, 0xea, - 0x4b, 0xab, 0xf5, 0xb1, 0x94, 0x58, 0x59, 0x89, 0x65, 0x83, 0x1e, 0x9b, 0xef, 0x28, 0x4b, 0xfc, - 0xe1, 0x84, 0xf1, 0xca, 0x1a, 0x3e, 0x5a, 0x26, 0xeb, 0x59, 0xf1, 0x22, 0x62, 0x1f, 0x22, 0x55, - 0xf4, 0xd0, 0x76, 0x60, 0x56, 0xbf, 0x46, 0x30, 0x1b, 0x46, 0xb3, 0xe7, 0x30, 0xf3, 0xa8, 0xb1, - 0x22, 0xdb, 0xf0, 0x64, 0x99, 0xac, 0xe7, 0xc5, 0xdf, 0x9e, 0x31, 0x18, 0xd7, 0x48, 0x71, 0xe6, - 0xfc, 0xee, 0x41, 0x11, 0x9a, 0xa0, 0xb9, 0x96, 0xf8, 0x78, 0xd0, 0x5c, 0x4b, 0xec, 0x0a, 0x52, - 0x67, 0x3d, 0xf1, 0xb4, 0x17, 0x63, 0xc7, 0x38, 0x4c, 0x25, 0x6a, 0x24, 0xe4, 0x93, 0x5e, 0xef, - 0x7b, 0xf6, 0x0c, 0x26, 0x4e, 0x50, 0x75, 0xe0, 0xd3, 0xde, 0xe8, 0x5a, 0xf6, 0x0e, 0xa6, 0x55, - 0xeb, 0xc9, 0xde, 0xf3, 0xd9, 0x32, 0x59, 0x2f, 0xae, 0x5f, 0x9e, 0x2f, 0xe3, 0x7d, 0x74, 0x42, - 0xee, 0x9d, 0x20, 0xc2, 0xc6, 0x84, 0x81, 0x1d, 0xce, 0x18, 0xa4, 0x7b, 0x2b, 0x8f, 0xfc, 0x22, - 0xfe, 0x81, 0x58, 0xb3, 0xd7, 0xf0, 0xb0, 0x41, 0xef, 0xac, 0xf1, 0x58, 0x46, 0xf3, 0x32, 0x9a, - 0x97, 0x83, 0x98, 0x07, 0x68, 0x0b, 0x4f, 0x84, 0x94, 0x8a, 0x94, 0x35, 0x42, 0x97, 0x7b, 0x65, - 0xa4, 0x32, 0xb5, 0xe7, 0x8b, 0xff, 0x7c, 0x0b, 0x76, 0x7a, 0x90, 0xf7, 0x7c, 0x3e, 0x87, 0x0b, - 0xd7, 0x85, 0x5a, 0xdd, 0xc0, 0xe3, 0x7f, 0x92, 0x86, 0x7c, 0xdf, 0x94, 0x91, 0xfd, 0x82, 0x63, - 0x1d, 0x34, 0x27, 0xe8, 0xd0, 0x6d, 0xb7, 0x88, 0x75, 0xfe, 0x15, 0x1e, 0x55, 0xf6, 0xfe, 0xec, - 0x67, 0xf3, 0x79, 0x1c, 0x13, 0xae, 0x67, 0x97, 0x7c, 0xbe, 0xed, 0x8d, 0xda, 0x6a, 0x61, 0xea, - 0xcc, 0x36, 0xf5, 0xa6, 0x46, 0x13, 0x6f, 0x6b, 0xd3, 0x59, 0xc2, 0x29, 0x1f, 0xaf, 0x4e, 0x18, - 0x63, 0x49, 0x84, 0x98, 0xfe, 0xe6, 0xac, 0xfe, 0x9d, 0x24, 0x3f, 0x47, 0xe9, 0xc7, 0xdb, 0xdd, - 0xa7, 0xfd, 0x34, 0xbe, 0x7b, 0xfb, 0x27, 0x00, 0x00, 0xff, 0xff, 0xae, 0xde, 0xa1, 0xd0, 0xac, - 0x02, 0x00, 0x00, +var File_google_api_http_proto protoreflect.FileDescriptor + +var file_google_api_http_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x22, 0x79, 0x0a, 0x04, 0x48, 0x74, 0x74, 0x70, 0x12, 0x2a, 0x0a, 0x05, 0x72, + 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, + 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1f, 0x66, 0x75, 0x6c, 0x6c, 0x79, + 0x5f, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x1c, 0x66, 0x75, 0x6c, 0x6c, 0x79, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x45, 0x78, 0x70, 0x61, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xda, + 0x02, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x03, 0x70, + 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, + 0x14, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x04, 0x70, 0x6f, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x16, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x62, 0x6f, 0x64, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x45, 0x0a, 0x13, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x12, 0x61, 0x64, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, + 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3b, 0x0a, 0x11, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_http_proto_rawDescOnce sync.Once + file_google_api_http_proto_rawDescData = file_google_api_http_proto_rawDesc +) + +func file_google_api_http_proto_rawDescGZIP() []byte { + file_google_api_http_proto_rawDescOnce.Do(func() { + file_google_api_http_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_http_proto_rawDescData) + }) + return file_google_api_http_proto_rawDescData +} + +var file_google_api_http_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_google_api_http_proto_goTypes = []interface{}{ + (*Http)(nil), // 0: google.api.Http + (*HttpRule)(nil), // 1: google.api.HttpRule + (*CustomHttpPattern)(nil), // 2: google.api.CustomHttpPattern +} +var file_google_api_http_proto_depIdxs = []int32{ + 1, // 0: google.api.Http.rules:type_name -> google.api.HttpRule + 2, // 1: google.api.HttpRule.custom:type_name -> google.api.CustomHttpPattern + 1, // 2: google.api.HttpRule.additional_bindings:type_name -> google.api.HttpRule + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_api_http_proto_init() } +func file_google_api_http_proto_init() { + if File_google_api_http_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_http_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Http); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpRule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_http_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomHttpPattern); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_api_http_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*HttpRule_Get)(nil), + (*HttpRule_Put)(nil), + (*HttpRule_Post)(nil), + (*HttpRule_Delete)(nil), + (*HttpRule_Patch)(nil), + (*HttpRule_Custom)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_http_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_http_proto_goTypes, + DependencyIndexes: file_google_api_http_proto_depIdxs, + MessageInfos: file_google_api_http_proto_msgTypes, + }.Build() + File_google_api_http_proto = out.File + file_google_api_http_proto_rawDesc = nil + file_google_api_http_proto_goTypes = nil + file_google_api_http_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index af057b90b..ff6147fe2 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -1,26 +1,45 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/api/resource.proto package annotations import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" descriptor "github.com/golang/protobuf/protoc-gen-go/descriptor" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // A description of the historical or future-looking state of the // resource pattern. @@ -38,24 +57,45 @@ const ( ResourceDescriptor_FUTURE_MULTI_PATTERN ResourceDescriptor_History = 2 ) -var ResourceDescriptor_History_name = map[int32]string{ - 0: "HISTORY_UNSPECIFIED", - 1: "ORIGINALLY_SINGLE_PATTERN", - 2: "FUTURE_MULTI_PATTERN", -} +// Enum value maps for ResourceDescriptor_History. +var ( + ResourceDescriptor_History_name = map[int32]string{ + 0: "HISTORY_UNSPECIFIED", + 1: "ORIGINALLY_SINGLE_PATTERN", + 2: "FUTURE_MULTI_PATTERN", + } + ResourceDescriptor_History_value = map[string]int32{ + "HISTORY_UNSPECIFIED": 0, + "ORIGINALLY_SINGLE_PATTERN": 1, + "FUTURE_MULTI_PATTERN": 2, + } +) -var ResourceDescriptor_History_value = map[string]int32{ - "HISTORY_UNSPECIFIED": 0, - "ORIGINALLY_SINGLE_PATTERN": 1, - "FUTURE_MULTI_PATTERN": 2, +func (x ResourceDescriptor_History) Enum() *ResourceDescriptor_History { + p := new(ResourceDescriptor_History) + *p = x + return p } func (x ResourceDescriptor_History) String() string { - return proto.EnumName(ResourceDescriptor_History_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (ResourceDescriptor_History) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_resource_proto_enumTypes[0].Descriptor() +} + +func (ResourceDescriptor_History) Type() protoreflect.EnumType { + return &file_google_api_resource_proto_enumTypes[0] +} + +func (x ResourceDescriptor_History) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ResourceDescriptor_History.Descriptor instead. func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_465e9122405d1bb5, []int{0, 0} + return file_google_api_resource_proto_rawDescGZIP(), []int{0, 0} } // A simple descriptor of a resource type. @@ -66,31 +106,111 @@ func (ResourceDescriptor_History) EnumDescriptor() ([]byte, []int) { // // Example: // -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// name_descriptor: { +// pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// name_descriptor: +// - pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" // // Sometimes, resources have multiple patterns, typically because they can // live under multiple parents. // // Example: // -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// name_descriptor: { +// pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// name_descriptor: { +// pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// } +// name_descriptor: { +// pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// } +// name_descriptor: { +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// name_descriptor: +// - pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// - pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// - pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// - pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// +// For flexible resources, the resource name doesn't contain parent names, but +// the resource itself has parents for policy evaluation. +// +// Example: +// +// message Shelf { +// option (google.api.resource) = { +// type: "library.googleapis.com/Shelf" +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// } +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'library.googleapis.com/Shelf' +// name_descriptor: +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" type ResourceDescriptor struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The resource type. It must be in the format of // {service_name}/{resource_type_kind}. The `resource_type_kind` must be // singular and must not include version numbers. @@ -102,11 +222,20 @@ type ResourceDescriptor struct { // should use PascalCase (UpperCamelCase). The maximum number of // characters allowed for the `resource_type_kind` is 100. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // Optional. The valid resource name pattern(s) for this resource type. + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; // // Examples: - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" // // The components in braces correspond to the IDs for each resource in the // hierarchy. It is expected that, if multiple patterns are provided, @@ -119,199 +248,397 @@ type ResourceDescriptor struct { // Optional. The historical or future-looking state of the resource pattern. // // Example: - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"` + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + // concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + Plural string `protobuf:"bytes,5,opt,name=plural,proto3" json:"plural,omitempty"` + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + Singular string `protobuf:"bytes,6,opt,name=singular,proto3" json:"singular,omitempty"` } -func (m *ResourceDescriptor) Reset() { *m = ResourceDescriptor{} } -func (m *ResourceDescriptor) String() string { return proto.CompactTextString(m) } -func (*ResourceDescriptor) ProtoMessage() {} -func (*ResourceDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_465e9122405d1bb5, []int{0} +func (x *ResourceDescriptor) Reset() { + *x = ResourceDescriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResourceDescriptor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceDescriptor.Unmarshal(m, b) -} -func (m *ResourceDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceDescriptor.Marshal(b, m, deterministic) -} -func (m *ResourceDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceDescriptor.Merge(m, src) +func (x *ResourceDescriptor) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResourceDescriptor) XXX_Size() int { - return xxx_messageInfo_ResourceDescriptor.Size(m) -} -func (m *ResourceDescriptor) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceDescriptor.DiscardUnknown(m) + +func (*ResourceDescriptor) ProtoMessage() {} + +func (x *ResourceDescriptor) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ResourceDescriptor proto.InternalMessageInfo +// Deprecated: Use ResourceDescriptor.ProtoReflect.Descriptor instead. +func (*ResourceDescriptor) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{0} +} -func (m *ResourceDescriptor) GetType() string { - if m != nil { - return m.Type +func (x *ResourceDescriptor) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ResourceDescriptor) GetPattern() []string { - if m != nil { - return m.Pattern +func (x *ResourceDescriptor) GetPattern() []string { + if x != nil { + return x.Pattern } return nil } -func (m *ResourceDescriptor) GetNameField() string { - if m != nil { - return m.NameField +func (x *ResourceDescriptor) GetNameField() string { + if x != nil { + return x.NameField } return "" } -func (m *ResourceDescriptor) GetHistory() ResourceDescriptor_History { - if m != nil { - return m.History +func (x *ResourceDescriptor) GetHistory() ResourceDescriptor_History { + if x != nil { + return x.History } return ResourceDescriptor_HISTORY_UNSPECIFIED } -// Defines a proto annotation that describes a field that refers to a resource. +func (x *ResourceDescriptor) GetPlural() string { + if x != nil { + return x.Plural + } + return "" +} + +func (x *ResourceDescriptor) GetSingular() string { + if x != nil { + return x.Singular + } + return "" +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. type ResourceReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The resource type that the annotated field references. // // Example: // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type = "pubsub.googleapis.com/Topic" - // }]; - // } + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The resource type of a child collection that the annotated field - // references. This is useful for `parent` fields where a resource has more - // than one possible type of parent. + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. // // Example: // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + ChildType string `protobuf:"bytes,2,opt,name=child_type,json=childType,proto3" json:"child_type,omitempty"` } -func (m *ResourceReference) Reset() { *m = ResourceReference{} } -func (m *ResourceReference) String() string { return proto.CompactTextString(m) } -func (*ResourceReference) ProtoMessage() {} -func (*ResourceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_465e9122405d1bb5, []int{1} +func (x *ResourceReference) Reset() { + *x = ResourceReference{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_resource_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ResourceReference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResourceReference.Unmarshal(m, b) +func (x *ResourceReference) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResourceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResourceReference.Marshal(b, m, deterministic) -} -func (m *ResourceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceReference.Merge(m, src) -} -func (m *ResourceReference) XXX_Size() int { - return xxx_messageInfo_ResourceReference.Size(m) -} -func (m *ResourceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceReference.DiscardUnknown(m) + +func (*ResourceReference) ProtoMessage() {} + +func (x *ResourceReference) ProtoReflect() protoreflect.Message { + mi := &file_google_api_resource_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ResourceReference proto.InternalMessageInfo +// Deprecated: Use ResourceReference.ProtoReflect.Descriptor instead. +func (*ResourceReference) Descriptor() ([]byte, []int) { + return file_google_api_resource_proto_rawDescGZIP(), []int{1} +} -func (m *ResourceReference) GetType() string { - if m != nil { - return m.Type +func (x *ResourceReference) GetType() string { + if x != nil { + return x.Type } return "" } -func (m *ResourceReference) GetChildType() string { - if m != nil { - return m.ChildType +func (x *ResourceReference) GetChildType() string { + if x != nil { + return x.ChildType } return "" } -var E_ResourceReference = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*ResourceReference)(nil), - Field: 1055, - Name: "google.api.resource_reference", - Tag: "bytes,1055,opt,name=resource_reference", - Filename: "google/api/resource.proto", +var file_google_api_resource_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*ResourceReference)(nil), + Field: 1055, + Name: "google.api.resource_reference", + Tag: "bytes,1055,opt,name=resource_reference", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: ([]*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource_definition", + Tag: "bytes,1053,rep,name=resource_definition", + Filename: "google/api/resource.proto", + }, + { + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*ResourceDescriptor)(nil), + Field: 1053, + Name: "google.api.resource", + Tag: "bytes,1053,opt,name=resource", + Filename: "google/api/resource.proto", + }, +} + +// Extension fields to descriptor.FieldOptions. +var ( + // An annotation that describes a resource reference, see + // [ResourceReference][]. + // + // optional google.api.ResourceReference resource_reference = 1055; + E_ResourceReference = &file_google_api_resource_proto_extTypes[0] +) + +// Extension fields to descriptor.FileOptions. +var ( + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + // + // repeated google.api.ResourceDescriptor resource_definition = 1053; + E_ResourceDefinition = &file_google_api_resource_proto_extTypes[1] +) + +// Extension fields to descriptor.MessageOptions. +var ( + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + // + // optional google.api.ResourceDescriptor resource = 1053; + E_Resource = &file_google_api_resource_proto_extTypes[2] +) + +var File_google_api_resource_proto protoreflect.FileDescriptor + +var file_google_api_resource_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb4, 0x02, 0x0a, 0x12, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1d, + 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x40, 0x0a, + 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x48, + 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x6c, 0x75, 0x72, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x69, 0x6e, 0x67, 0x75, + 0x6c, 0x61, 0x72, 0x22, 0x5b, 0x0a, 0x07, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x17, + 0x0a, 0x13, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x4f, 0x52, 0x49, 0x47, 0x49, + 0x4e, 0x41, 0x4c, 0x4c, 0x59, 0x5f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x41, 0x54, + 0x54, 0x45, 0x52, 0x4e, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x46, 0x55, 0x54, 0x55, 0x52, 0x45, + 0x5f, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x5f, 0x50, 0x41, 0x54, 0x54, 0x45, 0x52, 0x4e, 0x10, 0x02, + 0x22, 0x46, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x69, + 0x6c, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, + 0x68, 0x69, 0x6c, 0x64, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9f, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x11, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x3a, 0x6e, 0x0a, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x52, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x66, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x5c, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, + 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var E_Resource = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*ResourceDescriptor)(nil), - Field: 1053, - Name: "google.api.resource", - Tag: "bytes,1053,opt,name=resource", - Filename: "google/api/resource.proto", +var ( + file_google_api_resource_proto_rawDescOnce sync.Once + file_google_api_resource_proto_rawDescData = file_google_api_resource_proto_rawDesc +) + +func file_google_api_resource_proto_rawDescGZIP() []byte { + file_google_api_resource_proto_rawDescOnce.Do(func() { + file_google_api_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_resource_proto_rawDescData) + }) + return file_google_api_resource_proto_rawDescData } -func init() { - proto.RegisterEnum("google.api.ResourceDescriptor_History", ResourceDescriptor_History_name, ResourceDescriptor_History_value) - proto.RegisterType((*ResourceDescriptor)(nil), "google.api.ResourceDescriptor") - proto.RegisterType((*ResourceReference)(nil), "google.api.ResourceReference") - proto.RegisterExtension(E_ResourceReference) - proto.RegisterExtension(E_Resource) +var file_google_api_resource_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_google_api_resource_proto_goTypes = []interface{}{ + (ResourceDescriptor_History)(0), // 0: google.api.ResourceDescriptor.History + (*ResourceDescriptor)(nil), // 1: google.api.ResourceDescriptor + (*ResourceReference)(nil), // 2: google.api.ResourceReference + (*descriptor.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*descriptor.FileOptions)(nil), // 4: google.protobuf.FileOptions + (*descriptor.MessageOptions)(nil), // 5: google.protobuf.MessageOptions +} +var file_google_api_resource_proto_depIdxs = []int32{ + 0, // 0: google.api.ResourceDescriptor.history:type_name -> google.api.ResourceDescriptor.History + 3, // 1: google.api.resource_reference:extendee -> google.protobuf.FieldOptions + 4, // 2: google.api.resource_definition:extendee -> google.protobuf.FileOptions + 5, // 3: google.api.resource:extendee -> google.protobuf.MessageOptions + 2, // 4: google.api.resource_reference:type_name -> google.api.ResourceReference + 1, // 5: google.api.resource_definition:type_name -> google.api.ResourceDescriptor + 1, // 6: google.api.resource:type_name -> google.api.ResourceDescriptor + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 4, // [4:7] is the sub-list for extension type_name + 1, // [1:4] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/api/resource.proto", fileDescriptor_465e9122405d1bb5) } - -var fileDescriptor_465e9122405d1bb5 = []byte{ - // 430 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x41, 0x6f, 0xd3, 0x30, - 0x18, 0x25, 0x59, 0x45, 0xd7, 0x0f, 0x31, 0x6d, 0x06, 0x89, 0x0c, 0x29, 0x10, 0xf5, 0x80, 0x7a, - 0x4a, 0xa4, 0x71, 0x1b, 0x17, 0x3a, 0x96, 0x76, 0x91, 0xba, 0x36, 0x72, 0xd3, 0xc3, 0x00, 0x29, - 0xf2, 0xd2, 0xaf, 0x59, 0xa4, 0xcc, 0xb6, 0x9c, 0xec, 0xd0, 0x1b, 0x7f, 0x04, 0x21, 0xf1, 0x2b, - 0x39, 0xa2, 0x3a, 0x71, 0x98, 0xd8, 0xb4, 0x9b, 0xf3, 0xde, 0xfb, 0xbe, 0xf7, 0xfc, 0x1c, 0x38, - 0xce, 0x85, 0xc8, 0x4b, 0x0c, 0x98, 0x2c, 0x02, 0x85, 0x95, 0xb8, 0x53, 0x19, 0xfa, 0x52, 0x89, - 0x5a, 0x10, 0x68, 0x28, 0x9f, 0xc9, 0xe2, 0xad, 0xd7, 0xca, 0x34, 0x73, 0x7d, 0xb7, 0x09, 0xd6, - 0x58, 0x65, 0xaa, 0x90, 0xb5, 0x50, 0x8d, 0x7a, 0xf8, 0xc3, 0x06, 0x42, 0xdb, 0x05, 0xe7, 0x1d, - 0x49, 0x08, 0xf4, 0xea, 0xad, 0x44, 0xc7, 0xf2, 0xac, 0xd1, 0x80, 0xea, 0x33, 0x71, 0xa0, 0x2f, - 0x59, 0x5d, 0xa3, 0xe2, 0x8e, 0xed, 0xed, 0x8d, 0x06, 0xd4, 0x7c, 0x12, 0x17, 0x80, 0xb3, 0x5b, - 0x4c, 0x37, 0x05, 0x96, 0x6b, 0x67, 0x4f, 0xcf, 0x0c, 0x76, 0xc8, 0x64, 0x07, 0x90, 0xcf, 0xd0, - 0xbf, 0x29, 0xaa, 0x5a, 0xa8, 0xad, 0xd3, 0xf3, 0xac, 0xd1, 0xc1, 0xc9, 0x07, 0xff, 0x5f, 0x46, - 0xff, 0xa1, 0xbb, 0x7f, 0xd1, 0xa8, 0xa9, 0x19, 0x1b, 0x7e, 0x83, 0x7e, 0x8b, 0x91, 0x37, 0xf0, - 0xea, 0x22, 0x5a, 0x26, 0x0b, 0x7a, 0x95, 0xae, 0xe6, 0xcb, 0x38, 0xfc, 0x12, 0x4d, 0xa2, 0xf0, - 0xfc, 0xf0, 0x19, 0x71, 0xe1, 0x78, 0x41, 0xa3, 0x69, 0x34, 0x1f, 0xcf, 0x66, 0x57, 0xe9, 0x32, - 0x9a, 0x4f, 0x67, 0x61, 0x1a, 0x8f, 0x93, 0x24, 0xa4, 0xf3, 0x43, 0x8b, 0x38, 0xf0, 0x7a, 0xb2, - 0x4a, 0x56, 0x34, 0x4c, 0x2f, 0x57, 0xb3, 0x24, 0xea, 0x18, 0x7b, 0x38, 0x81, 0x23, 0x93, 0x81, - 0xe2, 0x06, 0x15, 0xf2, 0x0c, 0x1f, 0x2d, 0xc0, 0x05, 0xc8, 0x6e, 0x8a, 0x72, 0x9d, 0x6a, 0xc6, - 0x6e, 0xae, 0xa9, 0x91, 0x64, 0x2b, 0xf1, 0xb4, 0x04, 0x62, 0x9e, 0x22, 0x55, 0xdd, 0x22, 0xd7, - 0xdc, 0xd5, 0xbc, 0x81, 0xaf, 0x4b, 0x59, 0xc8, 0xba, 0x10, 0xbc, 0x72, 0x7e, 0xed, 0x7b, 0xd6, - 0xe8, 0xc5, 0x89, 0xfb, 0x58, 0x23, 0x5d, 0x1a, 0x7a, 0xa4, 0xfe, 0x87, 0x4e, 0xbf, 0xc3, 0xbe, - 0x01, 0xc9, 0xfb, 0x07, 0x1e, 0x97, 0x58, 0x55, 0x2c, 0x47, 0xe3, 0xf2, 0xb3, 0x71, 0x79, 0xf7, - 0x74, 0xef, 0xb4, 0xdb, 0x78, 0xc6, 0xe1, 0x20, 0x13, 0xb7, 0xf7, 0xe4, 0x67, 0x2f, 0x8d, 0x3e, - 0xde, 0x79, 0xc4, 0xd6, 0xd7, 0x71, 0x4b, 0xe6, 0xa2, 0x64, 0x3c, 0xf7, 0x85, 0xca, 0x83, 0x1c, - 0xb9, 0x4e, 0x10, 0x34, 0x14, 0x93, 0x45, 0xa5, 0xff, 0x50, 0xc6, 0xb9, 0xa8, 0x99, 0x8e, 0xf2, - 0xe9, 0xde, 0xf9, 0x8f, 0x65, 0xfd, 0xb6, 0x7b, 0xd3, 0x71, 0x1c, 0x5d, 0x3f, 0xd7, 0x73, 0x1f, - 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x1e, 0x07, 0x80, 0xd8, 0x02, 0x00, 0x00, +func init() { file_google_api_resource_proto_init() } +func file_google_api_resource_proto_init() { + if File_google_api_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceDescriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_resource_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_resource_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 3, + NumServices: 0, + }, + GoTypes: file_google_api_resource_proto_goTypes, + DependencyIndexes: file_google_api_resource_proto_depIdxs, + EnumInfos: file_google_api_resource_proto_enumTypes, + MessageInfos: file_google_api_resource_proto_msgTypes, + ExtensionInfos: file_google_api_resource_proto_extTypes, + }.Build() + File_google_api_resource_proto = out.File + file_google_api_resource_proto_rawDesc = nil + file_google_api_resource_proto_goTypes = nil + file_google_api_resource_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go index 0499510d7..375d38765 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go @@ -1,31 +1,57 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/iam/v1/iam_policy.proto package iam import ( context "context" - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // REQUIRED: The resource for which the policy is being specified. // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` @@ -33,105 +59,121 @@ type SetIamPolicyRequest struct { // the policy is limited to a few 10s of KB. An empty policy is a // valid policy but certain Cloud Platform services (such as Projects) // might reject them. - Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Policy *Policy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"` } -func (m *SetIamPolicyRequest) Reset() { *m = SetIamPolicyRequest{} } -func (m *SetIamPolicyRequest) String() string { return proto.CompactTextString(m) } -func (*SetIamPolicyRequest) ProtoMessage() {} -func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{0} +func (x *SetIamPolicyRequest) Reset() { + *x = SetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *SetIamPolicyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetIamPolicyRequest.Unmarshal(m, b) -} -func (m *SetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetIamPolicyRequest.Marshal(b, m, deterministic) +func (x *SetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SetIamPolicyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetIamPolicyRequest.Merge(m, src) -} -func (m *SetIamPolicyRequest) XXX_Size() int { - return xxx_messageInfo_SetIamPolicyRequest.Size(m) -} -func (m *SetIamPolicyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetIamPolicyRequest.DiscardUnknown(m) + +func (*SetIamPolicyRequest) ProtoMessage() {} + +func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_SetIamPolicyRequest proto.InternalMessageInfo +// Deprecated: Use SetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*SetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{0} +} -func (m *SetIamPolicyRequest) GetResource() string { - if m != nil { - return m.Resource +func (x *SetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *SetIamPolicyRequest) GetPolicy() *Policy { - if m != nil { - return m.Policy +func (x *SetIamPolicyRequest) GetPolicy() *Policy { + if x != nil { + return x.Policy } return nil } // Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // REQUIRED: The resource for which the policy is being requested. // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` // OPTIONAL: A `GetPolicyOptions` object for specifying options to // `GetIamPolicy`. This field is only used by Cloud IAM. - Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Options *GetPolicyOptions `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (m *GetIamPolicyRequest) Reset() { *m = GetIamPolicyRequest{} } -func (m *GetIamPolicyRequest) String() string { return proto.CompactTextString(m) } -func (*GetIamPolicyRequest) ProtoMessage() {} -func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{1} +func (x *GetIamPolicyRequest) Reset() { + *x = GetIamPolicyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetIamPolicyRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetIamPolicyRequest.Unmarshal(m, b) -} -func (m *GetIamPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetIamPolicyRequest.Marshal(b, m, deterministic) -} -func (m *GetIamPolicyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetIamPolicyRequest.Merge(m, src) +func (x *GetIamPolicyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetIamPolicyRequest) XXX_Size() int { - return xxx_messageInfo_GetIamPolicyRequest.Size(m) -} -func (m *GetIamPolicyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetIamPolicyRequest.DiscardUnknown(m) + +func (*GetIamPolicyRequest) ProtoMessage() {} + +func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_GetIamPolicyRequest proto.InternalMessageInfo +// Deprecated: Use GetIamPolicyRequest.ProtoReflect.Descriptor instead. +func (*GetIamPolicyRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{1} +} -func (m *GetIamPolicyRequest) GetResource() string { - if m != nil { - return m.Resource +func (x *GetIamPolicyRequest) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { - if m != nil { - return m.Options +func (x *GetIamPolicyRequest) GetOptions() *GetPolicyOptions { + if x != nil { + return x.Options } return nil } // Request message for `TestIamPermissions` method. type TestIamPermissionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // REQUIRED: The resource for which the policy detail is being requested. // See the operation documentation for the appropriate value for this field. Resource string `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` @@ -139,143 +181,307 @@ type TestIamPermissionsRequest struct { // wildcards (such as '*' or 'storage.*') are not allowed. For more // information see // [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). - Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Permissions []string `protobuf:"bytes,2,rep,name=permissions,proto3" json:"permissions,omitempty"` } -func (m *TestIamPermissionsRequest) Reset() { *m = TestIamPermissionsRequest{} } -func (m *TestIamPermissionsRequest) String() string { return proto.CompactTextString(m) } -func (*TestIamPermissionsRequest) ProtoMessage() {} -func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{2} +func (x *TestIamPermissionsRequest) Reset() { + *x = TestIamPermissionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TestIamPermissionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestIamPermissionsRequest.Unmarshal(m, b) -} -func (m *TestIamPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestIamPermissionsRequest.Marshal(b, m, deterministic) -} -func (m *TestIamPermissionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestIamPermissionsRequest.Merge(m, src) -} -func (m *TestIamPermissionsRequest) XXX_Size() int { - return xxx_messageInfo_TestIamPermissionsRequest.Size(m) +func (x *TestIamPermissionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TestIamPermissionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TestIamPermissionsRequest.DiscardUnknown(m) + +func (*TestIamPermissionsRequest) ProtoMessage() {} + +func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TestIamPermissionsRequest proto.InternalMessageInfo +// Deprecated: Use TestIamPermissionsRequest.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsRequest) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{2} +} -func (m *TestIamPermissionsRequest) GetResource() string { - if m != nil { - return m.Resource +func (x *TestIamPermissionsRequest) GetResource() string { + if x != nil { + return x.Resource } return "" } -func (m *TestIamPermissionsRequest) GetPermissions() []string { - if m != nil { - return m.Permissions +func (x *TestIamPermissionsRequest) GetPermissions() []string { + if x != nil { + return x.Permissions } return nil } // Response message for `TestIamPermissions` method. type TestIamPermissionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A subset of `TestPermissionsRequest.permissions` that the caller is // allowed. - Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Permissions []string `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` } -func (m *TestIamPermissionsResponse) Reset() { *m = TestIamPermissionsResponse{} } -func (m *TestIamPermissionsResponse) String() string { return proto.CompactTextString(m) } -func (*TestIamPermissionsResponse) ProtoMessage() {} -func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d2728eb97d748a32, []int{3} +func (x *TestIamPermissionsResponse) Reset() { + *x = TestIamPermissionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *TestIamPermissionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TestIamPermissionsResponse.Unmarshal(m, b) +func (x *TestIamPermissionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TestIamPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TestIamPermissionsResponse.Marshal(b, m, deterministic) -} -func (m *TestIamPermissionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TestIamPermissionsResponse.Merge(m, src) -} -func (m *TestIamPermissionsResponse) XXX_Size() int { - return xxx_messageInfo_TestIamPermissionsResponse.Size(m) -} -func (m *TestIamPermissionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TestIamPermissionsResponse.DiscardUnknown(m) + +func (*TestIamPermissionsResponse) ProtoMessage() {} + +func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_TestIamPermissionsResponse proto.InternalMessageInfo +// Deprecated: Use TestIamPermissionsResponse.ProtoReflect.Descriptor instead. +func (*TestIamPermissionsResponse) Descriptor() ([]byte, []int) { + return file_google_iam_v1_iam_policy_proto_rawDescGZIP(), []int{3} +} -func (m *TestIamPermissionsResponse) GetPermissions() []string { - if m != nil { - return m.Permissions +func (x *TestIamPermissionsResponse) GetPermissions() []string { + if x != nil { + return x.Permissions } return nil } -func init() { - proto.RegisterType((*SetIamPolicyRequest)(nil), "google.iam.v1.SetIamPolicyRequest") - proto.RegisterType((*GetIamPolicyRequest)(nil), "google.iam.v1.GetIamPolicyRequest") - proto.RegisterType((*TestIamPermissionsRequest)(nil), "google.iam.v1.TestIamPermissionsRequest") - proto.RegisterType((*TestIamPermissionsResponse)(nil), "google.iam.v1.TestIamPermissionsResponse") -} - -func init() { proto.RegisterFile("google/iam/v1/iam_policy.proto", fileDescriptor_d2728eb97d748a32) } - -var fileDescriptor_d2728eb97d748a32 = []byte{ - // 465 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x53, 0xcd, 0x8a, 0x13, 0x31, - 0x1c, 0x27, 0x5d, 0x58, 0x6d, 0x56, 0x05, 0xa7, 0x88, 0x35, 0x2b, 0xb5, 0x44, 0x0f, 0x6d, 0xa1, - 0x19, 0xbb, 0x9e, 0xac, 0x28, 0xec, 0x7a, 0x18, 0xe6, 0x20, 0x96, 0x51, 0x16, 0x94, 0x82, 0xc6, - 0x31, 0x0c, 0x81, 0xc9, 0x24, 0x4e, 0xd2, 0x05, 0x11, 0x2f, 0x1e, 0x7c, 0x01, 0x6f, 0x3e, 0x82, - 0x67, 0x9f, 0x62, 0xaf, 0xbe, 0x82, 0x0f, 0xe1, 0x51, 0x66, 0x92, 0xee, 0xce, 0x47, 0x95, 0x0a, - 0x9e, 0x4a, 0xf3, 0xfb, 0xfa, 0x7f, 0xcc, 0x1f, 0x0e, 0x12, 0x29, 0x93, 0x94, 0xf9, 0x9c, 0x0a, - 0xff, 0x64, 0x56, 0xfc, 0xbc, 0x52, 0x32, 0xe5, 0xf1, 0x7b, 0xa2, 0x72, 0x69, 0xa4, 0x77, 0xd9, - 0xe2, 0x84, 0x53, 0x41, 0x4e, 0x66, 0x68, 0xbf, 0x4e, 0x97, 0xca, 0x70, 0x99, 0x69, 0xcb, 0x45, - 0xa8, 0x0e, 0x56, 0x7d, 0xd0, 0x4d, 0x87, 0x51, 0xc5, 0x7d, 0x9a, 0x65, 0xd2, 0xd0, 0xaa, 0xf2, - 0x7a, 0x05, 0x8d, 0x53, 0xce, 0x32, 0x63, 0x01, 0xfc, 0x1a, 0xf6, 0x9e, 0x31, 0x13, 0x52, 0xb1, - 0x28, 0xcd, 0x22, 0xf6, 0x6e, 0xc5, 0xb4, 0xf1, 0x10, 0xbc, 0x98, 0x33, 0x2d, 0x57, 0x79, 0xcc, - 0xfa, 0x60, 0x08, 0x46, 0xdd, 0xe8, 0xec, 0xbf, 0x37, 0x85, 0xbb, 0x36, 0xb9, 0xdf, 0x19, 0x82, - 0xd1, 0xde, 0xc1, 0x35, 0x52, 0x6b, 0x81, 0x38, 0x27, 0x47, 0xc2, 0x29, 0xec, 0x05, 0xff, 0x98, - 0x70, 0x1f, 0x5e, 0x70, 0x8d, 0xbb, 0x88, 0x5b, 0x8d, 0x88, 0x80, 0x19, 0xeb, 0xf6, 0xd4, 0xd2, - 0xa2, 0x35, 0x1f, 0xbf, 0x80, 0x37, 0x9e, 0x33, 0x5d, 0xc6, 0xb1, 0x5c, 0x70, 0xad, 0x4b, 0x78, - 0x8b, 0xcc, 0x21, 0xdc, 0x53, 0xe7, 0x8a, 0x7e, 0x67, 0xb8, 0x33, 0xea, 0x46, 0xd5, 0x27, 0xfc, - 0x08, 0xa2, 0x4d, 0xd6, 0x5a, 0xc9, 0x4c, 0xb7, 0xf4, 0xa0, 0xa5, 0x3f, 0xf8, 0xbe, 0x03, 0xbb, - 0xe1, 0xe1, 0x13, 0x5b, 0xb8, 0x67, 0xe0, 0xa5, 0xea, 0xe0, 0x3d, 0xdc, 0x68, 0x71, 0xc3, 0x56, - 0xd0, 0xe6, 0x49, 0xe3, 0xf1, 0xa7, 0x1f, 0x3f, 0xbf, 0x74, 0x6e, 0xe3, 0x41, 0xf1, 0x51, 0x7c, - 0x58, 0x77, 0xf4, 0x70, 0x32, 0xf9, 0x38, 0xd7, 0x15, 0x97, 0x39, 0x98, 0x14, 0xa9, 0xc1, 0xdf, - 0x52, 0x83, 0xff, 0x92, 0x9a, 0x34, 0x52, 0xbf, 0x02, 0xe8, 0xb5, 0x47, 0xe7, 0x8d, 0x1a, 0xc6, - 0x7f, 0x5c, 0x1c, 0x1a, 0x6f, 0xc1, 0xb4, 0x7b, 0xc0, 0x7e, 0x59, 0xd6, 0x18, 0xdf, 0x69, 0x97, - 0x65, 0x5a, 0xaa, 0x39, 0x98, 0xa0, 0xc1, 0xe9, 0xe1, 0x3e, 0xa7, 0x62, 0x2a, 0x98, 0xa1, 0x53, - 0xaa, 0xb8, 0x8b, 0xa2, 0x8a, 0x6b, 0x12, 0x4b, 0x71, 0xf4, 0x19, 0xc0, 0xab, 0xb1, 0x14, 0xf5, - 0x0a, 0x8e, 0xae, 0x9c, 0x35, 0xb8, 0x28, 0xee, 0x68, 0x01, 0x5e, 0xde, 0x75, 0x84, 0x44, 0xa6, - 0x34, 0x4b, 0x88, 0xcc, 0x13, 0x3f, 0x61, 0x59, 0x79, 0x65, 0xfe, 0xb9, 0xa5, 0xbb, 0xdd, 0x07, - 0x9c, 0x8a, 0x5f, 0x00, 0x7c, 0xeb, 0xf4, 0x02, 0xab, 0x7a, 0x9c, 0xca, 0xd5, 0x5b, 0x12, 0x52, - 0x41, 0x8e, 0x67, 0xa7, 0xeb, 0xd7, 0x65, 0xf9, 0xba, 0x0c, 0xa9, 0x58, 0x1e, 0xcf, 0xde, 0xec, - 0x96, 0x5e, 0xf7, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x57, 0xb0, 0xe9, 0x52, 0x04, 0x00, - 0x00, +var File_google_iam_v1_iam_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, + 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x70, 0x0a, 0x13, 0x53, + 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, 0x52, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x77, 0x0a, + 0x13, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, + 0x2a, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x69, 0x0a, 0x19, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x09, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x03, 0x0a, 0x01, 0x2a, + 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x22, 0x3e, 0x0a, 0x1a, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x32, 0xb4, 0x03, 0x0a, 0x09, 0x49, 0x41, 0x4d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x74, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0x29, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x22, 0x1e, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x12, 0x9a, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, + 0x24, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x2a, + 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d, + 0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, + 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, + 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_iam_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_iam_policy_proto_rawDescData = file_google_iam_v1_iam_policy_proto_rawDesc +) + +func file_google_iam_v1_iam_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_iam_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_iam_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_iam_policy_proto_rawDescData) + }) + return file_google_iam_v1_iam_policy_proto_rawDescData +} + +var file_google_iam_v1_iam_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_iam_v1_iam_policy_proto_goTypes = []interface{}{ + (*SetIamPolicyRequest)(nil), // 0: google.iam.v1.SetIamPolicyRequest + (*GetIamPolicyRequest)(nil), // 1: google.iam.v1.GetIamPolicyRequest + (*TestIamPermissionsRequest)(nil), // 2: google.iam.v1.TestIamPermissionsRequest + (*TestIamPermissionsResponse)(nil), // 3: google.iam.v1.TestIamPermissionsResponse + (*Policy)(nil), // 4: google.iam.v1.Policy + (*GetPolicyOptions)(nil), // 5: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_iam_policy_proto_depIdxs = []int32{ + 4, // 0: google.iam.v1.SetIamPolicyRequest.policy:type_name -> google.iam.v1.Policy + 5, // 1: google.iam.v1.GetIamPolicyRequest.options:type_name -> google.iam.v1.GetPolicyOptions + 0, // 2: google.iam.v1.IAMPolicy.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 1, // 3: google.iam.v1.IAMPolicy.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 2, // 4: google.iam.v1.IAMPolicy.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 4, // 5: google.iam.v1.IAMPolicy.SetIamPolicy:output_type -> google.iam.v1.Policy + 4, // 6: google.iam.v1.IAMPolicy.GetIamPolicy:output_type -> google.iam.v1.Policy + 3, // 7: google.iam.v1.IAMPolicy.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_iam_policy_proto_init() } +func file_google_iam_v1_iam_policy_proto_init() { + if File_google_iam_v1_iam_policy_proto != nil { + return + } + file_google_iam_v1_options_proto_init() + file_google_iam_v1_policy_proto_init() + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetIamPolicyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestIamPermissionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TestIamPermissionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_iam_policy_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_google_iam_v1_iam_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_iam_policy_proto_depIdxs, + MessageInfos: file_google_iam_v1_iam_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_iam_policy_proto = out.File + file_google_iam_v1_iam_policy_proto_rawDesc = nil + file_google_iam_v1_iam_policy_proto_goTypes = nil + file_google_iam_v1_iam_policy_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context -var _ grpc.ClientConn +var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +const _ = grpc.SupportPackageIsVersion6 // IAMPolicyClient is the client API for IAMPolicy service. // @@ -299,10 +505,10 @@ type IAMPolicyClient interface { } type iAMPolicyClient struct { - cc *grpc.ClientConn + cc grpc.ClientConnInterface } -func NewIAMPolicyClient(cc *grpc.ClientConn) IAMPolicyClient { +func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient { return &iAMPolicyClient{cc} } @@ -352,6 +558,20 @@ type IAMPolicyServer interface { TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) } +// UnimplementedIAMPolicyServer can be embedded to have forward compatible implementations. +type UnimplementedIAMPolicyServer struct { +} + +func (*UnimplementedIAMPolicyServer) SetIamPolicy(context.Context, *SetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) GetIamPolicy(context.Context, *GetIamPolicyRequest) (*Policy, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented") +} +func (*UnimplementedIAMPolicyServer) TestIamPermissions(context.Context, *TestIamPermissionsRequest) (*TestIamPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented") +} + func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) { s.RegisterService(&_IAMPolicy_serviceDesc, srv) } diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go index 8f02bd3c4..e6c2bfcba 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go @@ -1,92 +1,186 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/iam/v1/options.proto package iam import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Encapsulates settings provided to GetIamPolicy. type GetPolicyOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Optional. The policy format version to be returned. - // Acceptable values are 0 and 1. - // If the value is 0, or the field is omitted, policy format version 1 will be - // returned. - RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value will be + // rejected. + // + // Requests for policies with any conditional bindings must specify version 3. + // Policies without any conditional bindings may specify any valid value or + // leave the field unset. + RequestedPolicyVersion int32 `protobuf:"varint,1,opt,name=requested_policy_version,json=requestedPolicyVersion,proto3" json:"requested_policy_version,omitempty"` } -func (m *GetPolicyOptions) Reset() { *m = GetPolicyOptions{} } -func (m *GetPolicyOptions) String() string { return proto.CompactTextString(m) } -func (*GetPolicyOptions) ProtoMessage() {} -func (*GetPolicyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_19aa09e909092bd1, []int{0} +func (x *GetPolicyOptions) Reset() { + *x = GetPolicyOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *GetPolicyOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPolicyOptions.Unmarshal(m, b) +func (x *GetPolicyOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetPolicyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPolicyOptions.Marshal(b, m, deterministic) + +func (*GetPolicyOptions) ProtoMessage() {} + +func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_options_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *GetPolicyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPolicyOptions.Merge(m, src) + +// Deprecated: Use GetPolicyOptions.ProtoReflect.Descriptor instead. +func (*GetPolicyOptions) Descriptor() ([]byte, []int) { + return file_google_iam_v1_options_proto_rawDescGZIP(), []int{0} } -func (m *GetPolicyOptions) XXX_Size() int { - return xxx_messageInfo_GetPolicyOptions.Size(m) + +func (x *GetPolicyOptions) GetRequestedPolicyVersion() int32 { + if x != nil { + return x.RequestedPolicyVersion + } + return 0 } -func (m *GetPolicyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_GetPolicyOptions.DiscardUnknown(m) + +var File_google_iam_v1_options_proto protoreflect.FileDescriptor + +var file_google_iam_v1_options_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4c, 0x0a, 0x10, 0x47, 0x65, + 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x38, + 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0c, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, + 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -var xxx_messageInfo_GetPolicyOptions proto.InternalMessageInfo +var ( + file_google_iam_v1_options_proto_rawDescOnce sync.Once + file_google_iam_v1_options_proto_rawDescData = file_google_iam_v1_options_proto_rawDesc +) -func (m *GetPolicyOptions) GetRequestedPolicyVersion() int32 { - if m != nil { - return m.RequestedPolicyVersion - } - return 0 +func file_google_iam_v1_options_proto_rawDescGZIP() []byte { + file_google_iam_v1_options_proto_rawDescOnce.Do(func() { + file_google_iam_v1_options_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_options_proto_rawDescData) + }) + return file_google_iam_v1_options_proto_rawDescData } -func init() { - proto.RegisterType((*GetPolicyOptions)(nil), "google.iam.v1.GetPolicyOptions") +var file_google_iam_v1_options_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_iam_v1_options_proto_goTypes = []interface{}{ + (*GetPolicyOptions)(nil), // 0: google.iam.v1.GetPolicyOptions +} +var file_google_iam_v1_options_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/iam/v1/options.proto", fileDescriptor_19aa09e909092bd1) } - -var fileDescriptor_19aa09e909092bd1 = []byte{ - // 229 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0xcf, 0x4c, 0xcc, 0xd5, 0x2f, 0x33, 0xd4, 0xcf, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, - 0x2b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x85, 0x48, 0xea, 0x65, 0x26, 0xe6, 0xea, - 0x95, 0x19, 0x4a, 0xc9, 0x40, 0xd5, 0x26, 0x16, 0x64, 0xea, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, - 0x22, 0x29, 0x56, 0xf2, 0xe1, 0x12, 0x70, 0x4f, 0x2d, 0x09, 0xc8, 0xcf, 0xc9, 0x4c, 0xae, 0xf4, - 0x87, 0x18, 0x23, 0x64, 0xc1, 0x25, 0x51, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x92, 0x9a, 0x12, - 0x5f, 0x00, 0x96, 0x8a, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x93, 0x60, 0x54, 0x60, 0xd4, - 0x60, 0x0d, 0x12, 0x83, 0xcb, 0x43, 0x74, 0x86, 0x41, 0x64, 0x9d, 0x5a, 0x18, 0xb9, 0x04, 0x93, - 0xf3, 0x73, 0xf5, 0x50, 0x5c, 0xe0, 0xc4, 0x03, 0x35, 0x38, 0x00, 0x64, 0x63, 0x00, 0x63, 0x94, - 0x01, 0x54, 0x3a, 0x3d, 0x3f, 0x27, 0x31, 0x2f, 0x5d, 0x2f, 0xbf, 0x28, 0x5d, 0x3f, 0x3d, 0x35, - 0x0f, 0xec, 0x1e, 0x7d, 0x88, 0x54, 0x62, 0x41, 0x66, 0x31, 0xd4, 0x73, 0xd6, 0x99, 0x89, 0xb9, - 0x3f, 0x18, 0x19, 0x57, 0x31, 0x09, 0xbb, 0x43, 0x74, 0x39, 0xe7, 0xe4, 0x97, 0xa6, 0xe8, 0x79, - 0x26, 0xe6, 0xea, 0x85, 0x19, 0x9e, 0x82, 0x89, 0xc6, 0x80, 0x45, 0x63, 0x3c, 0x13, 0x73, 0x63, - 0xc2, 0x0c, 0x93, 0xd8, 0xc0, 0x66, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x96, 0x0c, - 0x8b, 0x27, 0x01, 0x00, 0x00, +func init() { file_google_iam_v1_options_proto_init() } +func file_google_iam_v1_options_proto_init() { + if File_google_iam_v1_options_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetPolicyOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_options_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_options_proto_goTypes, + DependencyIndexes: file_google_iam_v1_options_proto_depIdxs, + MessageInfos: file_google_iam_v1_options_proto_msgTypes, + }.Build() + File_google_iam_v1_options_proto = out.File + file_google_iam_v1_options_proto_rawDesc = nil + file_google_iam_v1_options_proto_goTypes = nil + file_google_iam_v1_options_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go index 275cfcea8..ba64dcc46 100644 --- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go @@ -1,27 +1,47 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/iam/v1/policy.proto package iam import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" _ "google.golang.org/genproto/googleapis/api/annotations" expr "google.golang.org/genproto/googleapis/type/expr" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // The type of action performed on a Binding in a policy. type BindingDelta_Action int32 @@ -35,24 +55,45 @@ const ( BindingDelta_REMOVE BindingDelta_Action = 2 ) -var BindingDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", -} +// Enum value maps for BindingDelta_Action. +var ( + BindingDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + BindingDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) -var BindingDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, +func (x BindingDelta_Action) Enum() *BindingDelta_Action { + p := new(BindingDelta_Action) + *p = x + return p } func (x BindingDelta_Action) String() string { - return proto.EnumName(BindingDelta_Action_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } +func (BindingDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[0].Descriptor() +} + +func (BindingDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[0] +} + +func (x BindingDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BindingDelta_Action.Descriptor instead. func (BindingDelta_Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{3, 0} + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3, 0} } // The type of action performed on an audit configuration in a policy. @@ -67,51 +108,81 @@ const ( AuditConfigDelta_REMOVE AuditConfigDelta_Action = 2 ) -var AuditConfigDelta_Action_name = map[int32]string{ - 0: "ACTION_UNSPECIFIED", - 1: "ADD", - 2: "REMOVE", -} +// Enum value maps for AuditConfigDelta_Action. +var ( + AuditConfigDelta_Action_name = map[int32]string{ + 0: "ACTION_UNSPECIFIED", + 1: "ADD", + 2: "REMOVE", + } + AuditConfigDelta_Action_value = map[string]int32{ + "ACTION_UNSPECIFIED": 0, + "ADD": 1, + "REMOVE": 2, + } +) -var AuditConfigDelta_Action_value = map[string]int32{ - "ACTION_UNSPECIFIED": 0, - "ADD": 1, - "REMOVE": 2, +func (x AuditConfigDelta_Action) Enum() *AuditConfigDelta_Action { + p := new(AuditConfigDelta_Action) + *p = x + return p } func (x AuditConfigDelta_Action) String() string { - return proto.EnumName(AuditConfigDelta_Action_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuditConfigDelta_Action) Descriptor() protoreflect.EnumDescriptor { + return file_google_iam_v1_policy_proto_enumTypes[1].Descriptor() } +func (AuditConfigDelta_Action) Type() protoreflect.EnumType { + return &file_google_iam_v1_policy_proto_enumTypes[1] +} + +func (x AuditConfigDelta_Action) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuditConfigDelta_Action.Descriptor instead. func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{4, 0} + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4, 0} } // Defines an Identity and Access Management (IAM) policy. It is used to // specify access control policies for Cloud Platform resources. // // -// A `Policy` consists of a list of `bindings`. A `binding` binds a list of -// `members` to a `role`, where the members can be user accounts, Google groups, -// Google domains, and service accounts. A `role` is a named list of permissions -// defined by IAM. +// A `Policy` is a collection of `bindings`. A `binding` binds one or more +// `members` to a single `role`. Members can be user accounts, service accounts, +// Google groups, and domains (such as G Suite). A `role` is a named list of +// permissions (defined by IAM or configured by users). A `binding` can +// optionally specify a `condition`, which is a logic expression that further +// constrains the role binding based on attributes about the request and/or +// target resource. // // **JSON Example** // // { // "bindings": [ // { -// "role": "roles/owner", +// "role": "roles/resourcemanager.organizationAdmin", // "members": [ // "user:mike@example.com", // "group:admins@example.com", // "domain:google.com", -// "serviceAccount:my-other-app@appspot.gserviceaccount.com" +// "serviceAccount:my-project-id@appspot.gserviceaccount.com" // ] // }, // { -// "role": "roles/viewer", -// "members": ["user:sean@example.com"] +// "role": "roles/resourcemanager.organizationViewer", +// "members": ["user:eve@example.com"], +// "condition": { +// "title": "expirable access", +// "description": "Does not grant access after Sep 2020", +// "expression": "request.time < +// timestamp('2020-10-01T00:00:00.000Z')", +// } // } // ] // } @@ -123,19 +194,40 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) { // - user:mike@example.com // - group:admins@example.com // - domain:google.com -// - serviceAccount:my-other-app@appspot.gserviceaccount.com -// role: roles/owner +// - serviceAccount:my-project-id@appspot.gserviceaccount.com +// role: roles/resourcemanager.organizationAdmin // - members: -// - user:sean@example.com -// role: roles/viewer -// +// - user:eve@example.com +// role: roles/resourcemanager.organizationViewer +// condition: +// title: expirable access +// description: Does not grant access after Sep 2020 +// expression: request.time < timestamp('2020-10-01T00:00:00.000Z') // // For a description of IAM and its features, see the // [IAM developer's guide](https://cloud.google.com/iam/docs). type Policy struct { - // Deprecated. + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the format of the policy. + // + // Valid values are 0, 1, and 3. Requests specifying an invalid value will be + // rejected. + // + // Operations affecting conditional bindings must specify version 3. This can + // be either setting a conditional policy, modifying a conditional binding, + // or removing a binding (conditional or unconditional) from the stored + // conditional policy. + // Operations on non-conditional policies may specify any valid value or + // leave the field unset. + // + // If no etag is provided in the call to `setIamPolicy`, version compliance + // checks against the stored policy is skipped. Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Associates a list of `members` to a `role`. + // Associates a list of `members` to a `role`. Optionally may specify a + // `condition` that determines when binding is in effect. // `bindings` with no members will result in an error. Bindings []*Binding `protobuf:"bytes,4,rep,name=bindings,proto3" json:"bindings,omitempty"` // `etag` is used for optimistic concurrency control as a way to help @@ -147,61 +239,71 @@ type Policy struct { // ensure that their change will be applied to the same version of the policy. // // If no `etag` is provided in the call to `setIamPolicy`, then the existing - // policy is overwritten. - Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // policy is overwritten. Due to blind-set semantics of an etag-less policy, + // 'setIamPolicy' will not fail even if the incoming policy version does not + // meet the requirements for modifying the stored policy. + Etag []byte `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` +} + +func (x *Policy) Reset() { + *x = Policy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Policy) Reset() { *m = Policy{} } -func (m *Policy) String() string { return proto.CompactTextString(m) } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{0} +func (x *Policy) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Policy) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Policy.Unmarshal(m, b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Policy.Marshal(b, m, deterministic) -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return xxx_messageInfo_Policy.Size(m) -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) +func (*Policy) ProtoMessage() {} + +func (x *Policy) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Policy proto.InternalMessageInfo +// Deprecated: Use Policy.ProtoReflect.Descriptor instead. +func (*Policy) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{0} +} -func (m *Policy) GetVersion() int32 { - if m != nil { - return m.Version +func (x *Policy) GetVersion() int32 { + if x != nil { + return x.Version } return 0 } -func (m *Policy) GetBindings() []*Binding { - if m != nil { - return m.Bindings +func (x *Policy) GetBindings() []*Binding { + if x != nil { + return x.Bindings } return nil } -func (m *Policy) GetEtag() []byte { - if m != nil { - return m.Etag +func (x *Policy) GetEtag() []byte { + if x != nil { + return x.Etag } return nil } // Associates `members` with a `role`. type Binding struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Role that is assigned to `members`. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` @@ -234,104 +336,116 @@ type Binding struct { // NOTE: An unsatisfied condition will not allow user access via current // binding. Different bindings, including their conditions, are examined // independently. - Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Condition *expr.Expr `protobuf:"bytes,3,opt,name=condition,proto3" json:"condition,omitempty"` } -func (m *Binding) Reset() { *m = Binding{} } -func (m *Binding) String() string { return proto.CompactTextString(m) } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{1} +func (x *Binding) Reset() { + *x = Binding{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Binding) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Binding.Unmarshal(m, b) -} -func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Binding.Marshal(b, m, deterministic) +func (x *Binding) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Binding) XXX_Merge(src proto.Message) { - xxx_messageInfo_Binding.Merge(m, src) -} -func (m *Binding) XXX_Size() int { - return xxx_messageInfo_Binding.Size(m) -} -func (m *Binding) XXX_DiscardUnknown() { - xxx_messageInfo_Binding.DiscardUnknown(m) + +func (*Binding) ProtoMessage() {} + +func (x *Binding) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Binding proto.InternalMessageInfo +// Deprecated: Use Binding.ProtoReflect.Descriptor instead. +func (*Binding) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{1} +} -func (m *Binding) GetRole() string { - if m != nil { - return m.Role +func (x *Binding) GetRole() string { + if x != nil { + return x.Role } return "" } -func (m *Binding) GetMembers() []string { - if m != nil { - return m.Members +func (x *Binding) GetMembers() []string { + if x != nil { + return x.Members } return nil } -func (m *Binding) GetCondition() *expr.Expr { - if m != nil { - return m.Condition +func (x *Binding) GetCondition() *expr.Expr { + if x != nil { + return x.Condition } return nil } // The difference delta between two policies. type PolicyDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The delta for Bindings between two policies. BindingDeltas []*BindingDelta `protobuf:"bytes,1,rep,name=binding_deltas,json=bindingDeltas,proto3" json:"binding_deltas,omitempty"` // The delta for AuditConfigs between two policies. - AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + AuditConfigDeltas []*AuditConfigDelta `protobuf:"bytes,2,rep,name=audit_config_deltas,json=auditConfigDeltas,proto3" json:"audit_config_deltas,omitempty"` } -func (m *PolicyDelta) Reset() { *m = PolicyDelta{} } -func (m *PolicyDelta) String() string { return proto.CompactTextString(m) } -func (*PolicyDelta) ProtoMessage() {} -func (*PolicyDelta) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{2} +func (x *PolicyDelta) Reset() { + *x = PolicyDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *PolicyDelta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PolicyDelta.Unmarshal(m, b) -} -func (m *PolicyDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PolicyDelta.Marshal(b, m, deterministic) -} -func (m *PolicyDelta) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyDelta.Merge(m, src) +func (x *PolicyDelta) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PolicyDelta) XXX_Size() int { - return xxx_messageInfo_PolicyDelta.Size(m) -} -func (m *PolicyDelta) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyDelta.DiscardUnknown(m) + +func (*PolicyDelta) ProtoMessage() {} + +func (x *PolicyDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_PolicyDelta proto.InternalMessageInfo +// Deprecated: Use PolicyDelta.ProtoReflect.Descriptor instead. +func (*PolicyDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{2} +} -func (m *PolicyDelta) GetBindingDeltas() []*BindingDelta { - if m != nil { - return m.BindingDeltas +func (x *PolicyDelta) GetBindingDeltas() []*BindingDelta { + if x != nil { + return x.BindingDeltas } return nil } -func (m *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { - if m != nil { - return m.AuditConfigDeltas +func (x *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { + if x != nil { + return x.AuditConfigDeltas } return nil } @@ -339,6 +453,10 @@ func (m *PolicyDelta) GetAuditConfigDeltas() []*AuditConfigDelta { // One delta entry for Binding. Each individual change (only one member in each // entry) to a binding will be a separate entry. type BindingDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The action that was performed on a Binding. // Required Action BindingDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.BindingDelta_Action" json:"action,omitempty"` @@ -350,63 +468,66 @@ type BindingDelta struct { // Follows the same format of Binding.members. // Required Member string `protobuf:"bytes,3,opt,name=member,proto3" json:"member,omitempty"` - // The condition that is associated with this binding. This field is logged - // only for Cloud Audit Logging. - Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // The condition that is associated with this binding. + Condition *expr.Expr `protobuf:"bytes,4,opt,name=condition,proto3" json:"condition,omitempty"` } -func (m *BindingDelta) Reset() { *m = BindingDelta{} } -func (m *BindingDelta) String() string { return proto.CompactTextString(m) } -func (*BindingDelta) ProtoMessage() {} -func (*BindingDelta) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{3} +func (x *BindingDelta) Reset() { + *x = BindingDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *BindingDelta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindingDelta.Unmarshal(m, b) -} -func (m *BindingDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindingDelta.Marshal(b, m, deterministic) +func (x *BindingDelta) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *BindingDelta) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindingDelta.Merge(m, src) -} -func (m *BindingDelta) XXX_Size() int { - return xxx_messageInfo_BindingDelta.Size(m) -} -func (m *BindingDelta) XXX_DiscardUnknown() { - xxx_messageInfo_BindingDelta.DiscardUnknown(m) + +func (*BindingDelta) ProtoMessage() {} + +func (x *BindingDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_BindingDelta proto.InternalMessageInfo +// Deprecated: Use BindingDelta.ProtoReflect.Descriptor instead. +func (*BindingDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{3} +} -func (m *BindingDelta) GetAction() BindingDelta_Action { - if m != nil { - return m.Action +func (x *BindingDelta) GetAction() BindingDelta_Action { + if x != nil { + return x.Action } return BindingDelta_ACTION_UNSPECIFIED } -func (m *BindingDelta) GetRole() string { - if m != nil { - return m.Role +func (x *BindingDelta) GetRole() string { + if x != nil { + return x.Role } return "" } -func (m *BindingDelta) GetMember() string { - if m != nil { - return m.Member +func (x *BindingDelta) GetMember() string { + if x != nil { + return x.Member } return "" } -func (m *BindingDelta) GetCondition() *expr.Expr { - if m != nil { - return m.Condition +func (x *BindingDelta) GetCondition() *expr.Expr { + if x != nil { + return x.Condition } return nil } @@ -414,6 +535,10 @@ func (m *BindingDelta) GetCondition() *expr.Expr { // One delta entry for AuditConfig. Each individual change (only one // exempted_member in each entry) to a AuditConfig will be a separate entry. type AuditConfigDelta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The action that was performed on an audit configuration in a policy. // Required Action AuditConfigDelta_Action `protobuf:"varint,1,opt,name=action,proto3,enum=google.iam.v1.AuditConfigDelta_Action" json:"action,omitempty"` @@ -429,112 +554,265 @@ type AuditConfigDelta struct { // Specifies the log_type that was be enabled. ADMIN_ACTIVITY is always // enabled, and cannot be configured. // Required - LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + LogType string `protobuf:"bytes,4,opt,name=log_type,json=logType,proto3" json:"log_type,omitempty"` } -func (m *AuditConfigDelta) Reset() { *m = AuditConfigDelta{} } -func (m *AuditConfigDelta) String() string { return proto.CompactTextString(m) } -func (*AuditConfigDelta) ProtoMessage() {} -func (*AuditConfigDelta) Descriptor() ([]byte, []int) { - return fileDescriptor_a3cd40b8a66b2a99, []int{4} +func (x *AuditConfigDelta) Reset() { + *x = AuditConfigDelta{} + if protoimpl.UnsafeEnabled { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *AuditConfigDelta) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AuditConfigDelta.Unmarshal(m, b) -} -func (m *AuditConfigDelta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AuditConfigDelta.Marshal(b, m, deterministic) +func (x *AuditConfigDelta) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AuditConfigDelta) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditConfigDelta.Merge(m, src) -} -func (m *AuditConfigDelta) XXX_Size() int { - return xxx_messageInfo_AuditConfigDelta.Size(m) -} -func (m *AuditConfigDelta) XXX_DiscardUnknown() { - xxx_messageInfo_AuditConfigDelta.DiscardUnknown(m) + +func (*AuditConfigDelta) ProtoMessage() {} + +func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message { + mi := &file_google_iam_v1_policy_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_AuditConfigDelta proto.InternalMessageInfo +// Deprecated: Use AuditConfigDelta.ProtoReflect.Descriptor instead. +func (*AuditConfigDelta) Descriptor() ([]byte, []int) { + return file_google_iam_v1_policy_proto_rawDescGZIP(), []int{4} +} -func (m *AuditConfigDelta) GetAction() AuditConfigDelta_Action { - if m != nil { - return m.Action +func (x *AuditConfigDelta) GetAction() AuditConfigDelta_Action { + if x != nil { + return x.Action } return AuditConfigDelta_ACTION_UNSPECIFIED } -func (m *AuditConfigDelta) GetService() string { - if m != nil { - return m.Service +func (x *AuditConfigDelta) GetService() string { + if x != nil { + return x.Service } return "" } -func (m *AuditConfigDelta) GetExemptedMember() string { - if m != nil { - return m.ExemptedMember +func (x *AuditConfigDelta) GetExemptedMember() string { + if x != nil { + return x.ExemptedMember } return "" } -func (m *AuditConfigDelta) GetLogType() string { - if m != nil { - return m.LogType +func (x *AuditConfigDelta) GetLogType() string { + if x != nil { + return x.LogType } return "" } -func init() { - proto.RegisterEnum("google.iam.v1.BindingDelta_Action", BindingDelta_Action_name, BindingDelta_Action_value) - proto.RegisterEnum("google.iam.v1.AuditConfigDelta_Action", AuditConfigDelta_Action_name, AuditConfigDelta_Action_value) - proto.RegisterType((*Policy)(nil), "google.iam.v1.Policy") - proto.RegisterType((*Binding)(nil), "google.iam.v1.Binding") - proto.RegisterType((*PolicyDelta)(nil), "google.iam.v1.PolicyDelta") - proto.RegisterType((*BindingDelta)(nil), "google.iam.v1.BindingDelta") - proto.RegisterType((*AuditConfigDelta)(nil), "google.iam.v1.AuditConfigDelta") -} - -func init() { proto.RegisterFile("google/iam/v1/policy.proto", fileDescriptor_a3cd40b8a66b2a99) } - -var fileDescriptor_a3cd40b8a66b2a99 = []byte{ - // 550 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0xae, 0xd2, 0x4e, - 0x14, 0xc7, 0x7f, 0x03, 0xfc, 0xca, 0xe5, 0x70, 0x2f, 0xc2, 0xdc, 0x84, 0x54, 0x34, 0x91, 0x74, - 0xa1, 0xac, 0x5a, 0xc1, 0xb8, 0xd1, 0xc4, 0x84, 0x7f, 0x1a, 0x16, 0xf7, 0x42, 0xc6, 0x2b, 0x0b, - 0x43, 0x42, 0x86, 0x76, 0xac, 0x63, 0xda, 0x4e, 0xd3, 0xf6, 0x12, 0x58, 0xfb, 0x26, 0x2e, 0x7d, - 0x14, 0x1f, 0xc2, 0xad, 0xaf, 0xe0, 0xd2, 0x74, 0xa6, 0x45, 0x68, 0x8c, 0x1a, 0x77, 0x73, 0xce, - 0xf9, 0xce, 0x39, 0xe7, 0xf3, 0x6d, 0x07, 0x3a, 0xae, 0x10, 0xae, 0xc7, 0x2c, 0x4e, 0x7d, 0x6b, - 0xdb, 0xb7, 0x42, 0xe1, 0x71, 0x7b, 0x6f, 0x86, 0x91, 0x48, 0x04, 0xbe, 0x50, 0x35, 0x93, 0x53, - 0xdf, 0xdc, 0xf6, 0x3b, 0xed, 0x4c, 0x9a, 0xec, 0x43, 0x66, 0xb1, 0x5d, 0x18, 0x29, 0x59, 0xe7, - 0x7e, 0x96, 0xa7, 0x21, 0xb7, 0x68, 0x10, 0x88, 0x84, 0x26, 0x5c, 0x04, 0xb1, 0xaa, 0x1a, 0x1f, - 0x40, 0x5b, 0xc8, 0xa6, 0x58, 0x87, 0xea, 0x96, 0x45, 0x31, 0x17, 0x81, 0x8e, 0xba, 0xa8, 0xf7, - 0x3f, 0xc9, 0x43, 0x3c, 0x80, 0xb3, 0x0d, 0x0f, 0x1c, 0x1e, 0xb8, 0xb1, 0x5e, 0xe9, 0x96, 0x7b, - 0xf5, 0x41, 0xdb, 0x3c, 0x99, 0x6d, 0x8e, 0x54, 0x99, 0x1c, 0x74, 0x18, 0x43, 0x85, 0x25, 0xd4, - 0xd5, 0xcb, 0x5d, 0xd4, 0x3b, 0x27, 0xf2, 0x6c, 0xbc, 0x87, 0x6a, 0x26, 0x4c, 0xcb, 0x91, 0xf0, - 0x98, 0x9c, 0x54, 0x23, 0xf2, 0x9c, 0x2e, 0xe0, 0x33, 0x7f, 0xc3, 0xa2, 0x58, 0x2f, 0x75, 0xcb, - 0xbd, 0x1a, 0xc9, 0x43, 0x6c, 0x41, 0xcd, 0x16, 0x81, 0xc3, 0xd3, 0xc5, 0x65, 0xc7, 0xfa, 0xa0, - 0x95, 0x6f, 0x90, 0xe2, 0x9a, 0xd3, 0x5d, 0x18, 0x91, 0x9f, 0x1a, 0xe3, 0x13, 0x82, 0xba, 0xc2, - 0x9a, 0x30, 0x2f, 0xa1, 0x78, 0x04, 0x8d, 0x6c, 0xb3, 0xb5, 0x93, 0x26, 0x62, 0x1d, 0x49, 0x8e, - 0x7b, 0xbf, 0xe6, 0x90, 0x97, 0xc8, 0xc5, 0xe6, 0x28, 0x8a, 0xf1, 0x1c, 0x2e, 0xe9, 0xad, 0xc3, - 0x93, 0xb5, 0x2d, 0x82, 0x77, 0xfc, 0xd0, 0xa8, 0x24, 0x1b, 0x3d, 0x28, 0x34, 0x1a, 0xa6, 0xca, - 0xb1, 0x14, 0xaa, 0x66, 0x2d, 0x5a, 0xc8, 0xc4, 0xc6, 0x57, 0x04, 0xe7, 0xc7, 0x03, 0xf1, 0x33, - 0xd0, 0xa8, 0x9d, 0xe4, 0x1f, 0xa0, 0x31, 0x30, 0x7e, 0xb3, 0x9d, 0x39, 0x94, 0x4a, 0x92, 0xdd, - 0x38, 0x18, 0x5a, 0x3a, 0x32, 0xb4, 0x0d, 0x9a, 0x72, 0x50, 0x7a, 0x56, 0x23, 0x59, 0x74, 0x6a, - 0x67, 0xe5, 0x2f, 0xec, 0x7c, 0x0a, 0x9a, 0x1a, 0x87, 0xdb, 0x80, 0x87, 0xe3, 0x9b, 0xd9, 0xfc, - 0x7a, 0xfd, 0xe6, 0xfa, 0xf5, 0x62, 0x3a, 0x9e, 0xbd, 0x9c, 0x4d, 0x27, 0xcd, 0xff, 0x70, 0x15, - 0xca, 0xc3, 0xc9, 0xa4, 0x89, 0x30, 0x80, 0x46, 0xa6, 0x57, 0xf3, 0xe5, 0xb4, 0x59, 0x32, 0xbe, - 0x21, 0x68, 0x16, 0x8d, 0xc0, 0x2f, 0x0a, 0x90, 0x0f, 0xff, 0xe0, 0x5c, 0x11, 0x54, 0x87, 0x6a, - 0xcc, 0xa2, 0x2d, 0xb7, 0x73, 0xd6, 0x3c, 0xc4, 0x8f, 0xe0, 0x0e, 0xdb, 0x31, 0x3f, 0x4c, 0x98, - 0xb3, 0x3e, 0xe1, 0x6e, 0xe4, 0xe9, 0x2b, 0xc5, 0x7f, 0x17, 0xce, 0x3c, 0xe1, 0xae, 0x53, 0x54, - 0x89, 0x5f, 0x23, 0x55, 0x4f, 0xb8, 0x37, 0xfb, 0x90, 0xfd, 0x23, 0xe9, 0xe8, 0x23, 0x82, 0x96, - 0x2d, 0xfc, 0x53, 0x94, 0x51, 0xf6, 0x0b, 0x2e, 0xd2, 0x87, 0xb6, 0x40, 0x6f, 0x1f, 0x67, 0x55, - 0x57, 0x78, 0x34, 0x70, 0x4d, 0x11, 0xb9, 0x96, 0xcb, 0x02, 0xf9, 0x0c, 0x2d, 0x55, 0xa2, 0x21, - 0x8f, 0xb3, 0xa7, 0xfe, 0x9c, 0x53, 0xff, 0x3b, 0x42, 0x9f, 0x4b, 0x97, 0xaf, 0xd4, 0xad, 0xb1, - 0x27, 0x6e, 0x1d, 0x73, 0x46, 0x7d, 0x73, 0xd9, 0xff, 0x92, 0x67, 0x57, 0x32, 0xbb, 0x9a, 0x51, - 0x7f, 0xb5, 0xec, 0x6f, 0x34, 0xd9, 0xeb, 0xc9, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x29, 0x86, - 0x8f, 0x3e, 0x35, 0x04, 0x00, 0x00, +var File_google_iam_v1_policy_proto protoreflect.FileDescriptor + +var file_google_iam_v1_policy_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x16, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x6a, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x08, 0x62, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x68, 0x0a, + 0x07, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, + 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa2, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x69, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x0d, 0x62, 0x69, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x12, 0x4f, 0x0a, 0x13, 0x61, + 0x75, 0x64, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x74, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x52, 0x11, 0x61, 0x75, 0x64, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x73, 0x22, 0xde, 0x01, 0x0a, + 0x0c, 0x42, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x3a, 0x0a, + 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x69, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x22, 0xe7, 0x01, + 0x0a, 0x10, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, 0x6c, + 0x74, 0x61, 0x12, 0x3e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x41, 0x75, 0x64, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x65, + 0x6c, 0x74, 0x61, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x74, 0x65, 0x64, 0x4d, + 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, + 0x22, 0x35, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, + 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, + 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, + 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_iam_v1_policy_proto_rawDescOnce sync.Once + file_google_iam_v1_policy_proto_rawDescData = file_google_iam_v1_policy_proto_rawDesc +) + +func file_google_iam_v1_policy_proto_rawDescGZIP() []byte { + file_google_iam_v1_policy_proto_rawDescOnce.Do(func() { + file_google_iam_v1_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_policy_proto_rawDescData) + }) + return file_google_iam_v1_policy_proto_rawDescData +} + +var file_google_iam_v1_policy_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_iam_v1_policy_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_google_iam_v1_policy_proto_goTypes = []interface{}{ + (BindingDelta_Action)(0), // 0: google.iam.v1.BindingDelta.Action + (AuditConfigDelta_Action)(0), // 1: google.iam.v1.AuditConfigDelta.Action + (*Policy)(nil), // 2: google.iam.v1.Policy + (*Binding)(nil), // 3: google.iam.v1.Binding + (*PolicyDelta)(nil), // 4: google.iam.v1.PolicyDelta + (*BindingDelta)(nil), // 5: google.iam.v1.BindingDelta + (*AuditConfigDelta)(nil), // 6: google.iam.v1.AuditConfigDelta + (*expr.Expr)(nil), // 7: google.type.Expr +} +var file_google_iam_v1_policy_proto_depIdxs = []int32{ + 3, // 0: google.iam.v1.Policy.bindings:type_name -> google.iam.v1.Binding + 7, // 1: google.iam.v1.Binding.condition:type_name -> google.type.Expr + 5, // 2: google.iam.v1.PolicyDelta.binding_deltas:type_name -> google.iam.v1.BindingDelta + 6, // 3: google.iam.v1.PolicyDelta.audit_config_deltas:type_name -> google.iam.v1.AuditConfigDelta + 0, // 4: google.iam.v1.BindingDelta.action:type_name -> google.iam.v1.BindingDelta.Action + 7, // 5: google.iam.v1.BindingDelta.condition:type_name -> google.type.Expr + 1, // 6: google.iam.v1.AuditConfigDelta.action:type_name -> google.iam.v1.AuditConfigDelta.Action + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_google_iam_v1_policy_proto_init() } +func file_google_iam_v1_policy_proto_init() { + if File_google_iam_v1_policy_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Policy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Binding); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PolicyDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BindingDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuditConfigDelta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_iam_v1_policy_proto_rawDesc, + NumEnums: 2, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_iam_v1_policy_proto_goTypes, + DependencyIndexes: file_google_iam_v1_policy_proto_depIdxs, + EnumInfos: file_google_iam_v1_policy_proto_enumTypes, + MessageInfos: file_google_iam_v1_policy_proto_msgTypes, + }.Build() + File_google_iam_v1_policy_proto = out.File + file_google_iam_v1_policy_proto_rawDesc = nil + file_google_iam_v1_policy_proto_goTypes = nil + file_google_iam_v1_policy_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 9ff770b5f..9034439ad 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,27 +1,46 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/rpc/code.proto package code import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 -// The canonical error codes for Google APIs. +// The canonical error codes for gRPC APIs. // // // Sometimes multiple error codes may apply. Services should return @@ -156,7 +175,8 @@ const ( Code_INTERNAL Code = 13 // The service is currently unavailable. This is most likely a // transient condition, which can be corrected by retrying with - // a backoff. + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. @@ -169,83 +189,153 @@ const ( Code_DATA_LOSS Code = 15 ) -var Code_name = map[int32]string{ - 0: "OK", - 1: "CANCELLED", - 2: "UNKNOWN", - 3: "INVALID_ARGUMENT", - 4: "DEADLINE_EXCEEDED", - 5: "NOT_FOUND", - 6: "ALREADY_EXISTS", - 7: "PERMISSION_DENIED", - 16: "UNAUTHENTICATED", - 8: "RESOURCE_EXHAUSTED", - 9: "FAILED_PRECONDITION", - 10: "ABORTED", - 11: "OUT_OF_RANGE", - 12: "UNIMPLEMENTED", - 13: "INTERNAL", - 14: "UNAVAILABLE", - 15: "DATA_LOSS", -} +// Enum value maps for Code. +var ( + Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + } + Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + } +) -var Code_value = map[string]int32{ - "OK": 0, - "CANCELLED": 1, - "UNKNOWN": 2, - "INVALID_ARGUMENT": 3, - "DEADLINE_EXCEEDED": 4, - "NOT_FOUND": 5, - "ALREADY_EXISTS": 6, - "PERMISSION_DENIED": 7, - "UNAUTHENTICATED": 16, - "RESOURCE_EXHAUSTED": 8, - "FAILED_PRECONDITION": 9, - "ABORTED": 10, - "OUT_OF_RANGE": 11, - "UNIMPLEMENTED": 12, - "INTERNAL": 13, - "UNAVAILABLE": 14, - "DATA_LOSS": 15, +func (x Code) Enum() *Code { + p := new(Code) + *p = x + return p } func (x Code) String() string { - return proto.EnumName(Code_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Code) Descriptor() protoreflect.EnumDescriptor { + return file_google_rpc_code_proto_enumTypes[0].Descriptor() +} + +func (Code) Type() protoreflect.EnumType { + return &file_google_rpc_code_proto_enumTypes[0] +} + +func (x Code) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use Code.Descriptor instead. func (Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_fe593a732623ccf0, []int{0} + return file_google_rpc_code_proto_rawDescGZIP(), []int{0} +} + +var File_google_rpc_code_proto protoreflect.FileDescriptor + +var file_google_rpc_code_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, + 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, + 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, + 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, + 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, + 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, + 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, + 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, + 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64, + 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { - proto.RegisterEnum("google.rpc.Code", Code_name, Code_value) +var ( + file_google_rpc_code_proto_rawDescOnce sync.Once + file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc +) + +func file_google_rpc_code_proto_rawDescGZIP() []byte { + file_google_rpc_code_proto_rawDescOnce.Do(func() { + file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData) + }) + return file_google_rpc_code_proto_rawDescData +} + +var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_rpc_code_proto_goTypes = []interface{}{ + (Code)(0), // 0: google.rpc.Code +} +var file_google_rpc_code_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_fe593a732623ccf0) } - -var fileDescriptor_fe593a732623ccf0 = []byte{ - // 362 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31, - 0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c, - 0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42, - 0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1, - 0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61, - 0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c, - 0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c, - 0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34, - 0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c, - 0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97, - 0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3, - 0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60, - 0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8, - 0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75, - 0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40, - 0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31, - 0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53, - 0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9, - 0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90, - 0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d, - 0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9, - 0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00, +func init() { file_google_rpc_code_proto_init() } +func file_google_rpc_code_proto_init() { + if File_google_rpc_code_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_code_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_code_proto_goTypes, + DependencyIndexes: file_google_rpc_code_proto_depIdxs, + EnumInfos: file_google_rpc_code_proto_enumTypes, + }.Build() + File_google_rpc_code_proto = out.File + file_google_rpc_code_proto_rawDesc = nil + file_google_rpc_code_proto_goTypes = nil + file_google_rpc_code_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index 0b9907f89..5dfabd640 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,163 +1,206 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/rpc/status.proto package status import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. // -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). type Status struct { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. - Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` } -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_24d244abaf643bfe, []int{0} +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Status) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Status.Unmarshal(m, b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Status) XXX_Size() int { - return xxx_messageInfo_Status.Size(m) -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Status proto.InternalMessageInfo +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code } return 0 } -func (m *Status) GetMessage() string { - if m != nil { - return m.Message +func (x *Status) GetMessage() string { + if x != nil { + return x.Message } return "" } -func (m *Status) GetDetails() []*any.Any { - if m != nil { - return m.Details +func (x *Status) GetDetails() []*any.Any { + if x != nil { + return x.Details } return nil } -func init() { - proto.RegisterType((*Status)(nil), "google.rpc.Status") +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } - -var fileDescriptor_24d244abaf643bfe = []byte{ - // 209 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, - 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, - 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, - 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, - 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, - 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, - 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, - 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, - 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, - 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, - 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, - 0x00, +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*any.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go index 27d46fe00..4d9a47dca 100644 --- a/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/type/expr/expr.pb.go @@ -1,25 +1,45 @@ +// Copyright 2019 Google LLC. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + // Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.22.0 +// protoc v3.12.3 // source: google/type/expr.proto package expr import ( - fmt "fmt" - math "math" + reflect "reflect" + sync "sync" proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 // Represents an expression text. Example: // @@ -27,6 +47,10 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // description: "Determines whether the request has a user account" // expression: "size(request.user) > 0" type Expr struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Textual representation of an expression in // Common Expression Language syntax. // @@ -42,84 +66,150 @@ type Expr struct { Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // An optional string indicating the location of the expression for error // reporting, e.g. a file name and a position in the file. - Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Location string `protobuf:"bytes,4,opt,name=location,proto3" json:"location,omitempty"` } -func (m *Expr) Reset() { *m = Expr{} } -func (m *Expr) String() string { return proto.CompactTextString(m) } -func (*Expr) ProtoMessage() {} -func (*Expr) Descriptor() ([]byte, []int) { - return fileDescriptor_d7920f1ae7a2722f, []int{0} +func (x *Expr) Reset() { + *x = Expr{} + if protoimpl.UnsafeEnabled { + mi := &file_google_type_expr_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Expr) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Expr.Unmarshal(m, b) -} -func (m *Expr) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Expr.Marshal(b, m, deterministic) +func (x *Expr) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Expr) XXX_Merge(src proto.Message) { - xxx_messageInfo_Expr.Merge(m, src) -} -func (m *Expr) XXX_Size() int { - return xxx_messageInfo_Expr.Size(m) -} -func (m *Expr) XXX_DiscardUnknown() { - xxx_messageInfo_Expr.DiscardUnknown(m) + +func (*Expr) ProtoMessage() {} + +func (x *Expr) ProtoReflect() protoreflect.Message { + mi := &file_google_type_expr_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_Expr proto.InternalMessageInfo +// Deprecated: Use Expr.ProtoReflect.Descriptor instead. +func (*Expr) Descriptor() ([]byte, []int) { + return file_google_type_expr_proto_rawDescGZIP(), []int{0} +} -func (m *Expr) GetExpression() string { - if m != nil { - return m.Expression +func (x *Expr) GetExpression() string { + if x != nil { + return x.Expression } return "" } -func (m *Expr) GetTitle() string { - if m != nil { - return m.Title +func (x *Expr) GetTitle() string { + if x != nil { + return x.Title } return "" } -func (m *Expr) GetDescription() string { - if m != nil { - return m.Description +func (x *Expr) GetDescription() string { + if x != nil { + return x.Description } return "" } -func (m *Expr) GetLocation() string { - if m != nil { - return m.Location +func (x *Expr) GetLocation() string { + if x != nil { + return x.Location } return "" } -func init() { - proto.RegisterType((*Expr)(nil), "google.type.Expr") +var File_google_type_expr_proto protoreflect.FileDescriptor + +var file_google_type_expr_proto_rawDesc = []byte{ + 0x0a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x22, 0x7a, 0x0a, 0x04, 0x45, 0x78, 0x70, 0x72, 0x12, 0x1e, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, + 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x5a, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x42, 0x09, 0x45, 0x78, 0x70, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x34, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x65, 0x78, + 0x70, 0x72, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xa2, 0x02, 0x03, 0x47, 0x54, 0x50, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func init() { proto.RegisterFile("google/type/expr.proto", fileDescriptor_d7920f1ae7a2722f) } - -var fileDescriptor_d7920f1ae7a2722f = []byte{ - // 195 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0xd5, 0x4f, 0xad, 0x28, 0x28, 0xd2, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0xe2, 0x86, 0x88, 0xeb, 0x81, 0xc4, 0x95, 0xaa, 0xb8, 0x58, 0x5c, 0x2b, 0x0a, - 0x8a, 0x84, 0xe4, 0xb8, 0xb8, 0x40, 0x4a, 0x52, 0x8b, 0x8b, 0x33, 0xf3, 0xf3, 0x24, 0x18, 0x15, - 0x18, 0x35, 0x38, 0x83, 0x90, 0x44, 0x84, 0x44, 0xb8, 0x58, 0x4b, 0x32, 0x4b, 0x72, 0x52, 0x25, - 0x98, 0xc0, 0x52, 0x10, 0x8e, 0x90, 0x02, 0x17, 0x77, 0x4a, 0x6a, 0x71, 0x72, 0x51, 0x66, 0x41, - 0x09, 0x48, 0x1b, 0x33, 0x58, 0x0e, 0x59, 0x48, 0x48, 0x8a, 0x8b, 0x23, 0x27, 0x3f, 0x39, 0x11, - 0x2c, 0xcd, 0x02, 0x96, 0x86, 0xf3, 0x9d, 0xa2, 0xb8, 0xf8, 0x93, 0xf3, 0x73, 0xf5, 0x90, 0x9c, - 0xe3, 0xc4, 0x09, 0x72, 0x4c, 0x00, 0xc8, 0x99, 0x01, 0x8c, 0x51, 0x26, 0x50, 0x99, 0xf4, 0xfc, - 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74, 0xfd, 0xf4, 0xd4, 0x3c, 0xb0, 0x27, 0xf4, 0x21, - 0x52, 0x89, 0x05, 0x99, 0xc5, 0x08, 0xff, 0x59, 0x83, 0x88, 0x45, 0x4c, 0xcc, 0xee, 0x21, 0x01, - 0x49, 0x6c, 0x60, 0x65, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x67, 0x9e, 0xf5, 0x05, - 0x01, 0x00, 0x00, +var ( + file_google_type_expr_proto_rawDescOnce sync.Once + file_google_type_expr_proto_rawDescData = file_google_type_expr_proto_rawDesc +) + +func file_google_type_expr_proto_rawDescGZIP() []byte { + file_google_type_expr_proto_rawDescOnce.Do(func() { + file_google_type_expr_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_type_expr_proto_rawDescData) + }) + return file_google_type_expr_proto_rawDescData +} + +var file_google_type_expr_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_type_expr_proto_goTypes = []interface{}{ + (*Expr)(nil), // 0: google.type.Expr +} +var file_google_type_expr_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_type_expr_proto_init() } +func file_google_type_expr_proto_init() { + if File_google_type_expr_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_type_expr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Expr); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_type_expr_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_type_expr_proto_goTypes, + DependencyIndexes: file_google_type_expr_proto_depIdxs, + MessageInfos: file_google_type_expr_proto_msgTypes, + }.Build() + File_google_type_expr_proto = out.File + file_google_type_expr_proto_rawDesc = nil + file_google_type_expr_proto_goTypes = nil + file_google_type_expr_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 024408e64..0e24e59f0 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -2,18 +2,20 @@ language: go matrix: include: - - go: 1.12.x + - go: 1.14.x env: VET=1 GO111MODULE=on - - go: 1.12.x + - go: 1.14.x env: RACE=1 GO111MODULE=on - - go: 1.12.x + - go: 1.14.x env: RUN386=1 - - go: 1.12.x + - go: 1.14.x env: GRPC_GO_RETRY=on - - go: 1.11.x + - go: 1.14.x + env: TESTEXTRAS=1 + - go: 1.13.x + env: GO111MODULE=on + - go: 1.12.x env: GO111MODULE=on - - go: 1.10.x - - go: 1.9.x - go: 1.9.x env: GAE=1 @@ -23,17 +25,18 @@ before_install: - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi - - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then VET_SKIP_PROTO=1; fi + - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi install: - try3() { eval "$*" || eval "$*" || eval "$*"; } - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi' - - if [[ "${GAE}" = 1 ]]; then source ./install_gae.sh; make testappenginedeps; fi - - if [[ "${VET}" = 1 ]]; then ./vet.sh -install; fi + - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi + - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi script: - set -e - - if [[ "${VET}" = 1 ]]; then ./vet.sh; fi - - if [[ "${GAE}" = 1 ]]; then make testappengine; exit 0; fi - - if [[ "${RACE}" = 1 ]]; then make testrace; exit 0; fi + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi + - if [[ -n "${VET}" ]]; then ./vet.sh; fi + - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi + - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi - make test diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 000000000..9d4213ebc --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 6e69b28c2..4f1567e2f 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -1,6 +1,8 @@ # How to contribute -We definitely welcome your patches and contributions to gRPC! +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 000000000..d6ff26747 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 000000000..093c82b3a --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,27 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) +- [canguler](https://github.com/canguler), Google LLC +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabd..410f7d56d 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -19,6 +19,9 @@ proto: test: testdeps go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... +testsubmodule: testdeps + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + testappengine: testappenginedeps goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db5..249cd2063 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -7,7 +7,7 @@ The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: -Go](https://grpc.io/docs/quickstart/go.html) guide. +Go](https://grpc.io/docs/languages/go/quickstart/) guide. Installation ------------ @@ -29,7 +29,8 @@ If you are trying to access grpc-go from within China, please see the Prerequisites ------------- -gRPC-Go requires Go 1.9 or later. +gRPC-Go officially supports the +[three latest major releases of Go](https://golang.org/doc/devel/release.html). Documentation ------------- @@ -93,6 +94,22 @@ To build Go code, there are several options: #### Compiling error, undefined: grpc.SupportPackageIsVersion +##### If you are using Go modules: + +Please ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +``` +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +##### If you are *not* using Go modules: + Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` @@ -114,6 +131,10 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have configured + your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but the root cause of the connection being closed is on the server side. Turn on diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 000000000..ee5c51e6c --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,76 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// All APIs in this package are EXPERIMENTAL. +package attributes + +import "fmt" + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. +type Attributes struct { + m map[interface{}]interface{} +} + +// New returns a new Attributes containing all key/value pairs in kvs. If the +// same key appears multiple times, the last value overwrites all previous +// values for that key. Panics if len(kvs) is not even. +func New(kvs ...interface{}) *Attributes { + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)} + for i := 0; i < len(kvs)/2; i++ { + a.m[kvs[i*2]] = kvs[i*2+1] + } + return a +} + +// WithValues returns a new Attributes containing all key/value pairs in a and +// kvs. Panics if len(kvs) is not even. If the same key appears multiple +// times, the last value overwrites all previous values for that key. To +// remove an existing key, use a nil value. +func (a *Attributes) WithValues(kvs ...interface{}) *Attributes { + if a == nil { + return New(kvs...) + } + if len(kvs)%2 != 0 { + panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs))) + } + n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)} + for k, v := range a.m { + n.m[k] = v + } + for i := 0; i < len(kvs)/2; i++ { + n.m[kvs[i*2]] = kvs[i*2+1] + } + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. +func (a *Attributes) Value(key interface{}) interface{} { + if a == nil { + return nil + } + return a.m[key] +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 97c6e2568..ff7c3ee6f 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -23,16 +23,36 @@ package grpc import ( "time" + + "google.golang.org/grpc/backoff" ) // DefaultBackoffConfig uses values specified for backoff in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. var DefaultBackoffConfig = BackoffConfig{ MaxDelay: 120 * time.Second, } // BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. type BackoffConfig struct { // MaxDelay is the upper bound of backoff delay. MaxDelay time.Duration } + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This API is EXPERIMENTAL. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 000000000..0787d0b50 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index a8eb0f476..000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,391 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" - "google.golang.org/grpc/status" -) - -// Address represents a server the client connects to. -// -// Deprecated: please use package balancer. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerConfig specifies the configurations for Balancer. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerConfig struct { - // DialCreds is the transport credential the Balancer implementation can - // use to dial to a remote load balancer server. The Balancer implementations - // can ignore this if it does not need to talk to another party securely. - DialCreds credentials.TransportCredentials - // Dialer is the custom dialer the Balancer implementation can use to dial - // to a remote load balancer server. The Balancer implementations - // can ignore this if it doesn't need to talk to remote balancer. - Dialer func(context.Context, string) (net.Conn, error) -} - -// BalancerGetOptions configures a Get call. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// -// Deprecated: please use package balancer. May be removed in a future 1.x release. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string, config BalancerConfig) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage of the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -// -// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Errorln("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - select { - case <-rr.addrCh: - default: - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string, config BalancerConfig) error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return ErrClientConnClosing - } - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address, 1) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = status.Errorf(codes.Unavailable, "there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - if rr.done { - return errBalancerClosed - } - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} - -// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. -// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() -// returns the only address Up by resetTransport(). -type pickFirst struct { - *roundRobin -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index c266f4ec1..e75b28436 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -111,12 +111,24 @@ type NewSubConnOptions struct { // CredsBundle is the credentials bundle that will be used in the created // SubConn. If it's nil, the original creds from grpc DialOptions will be // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. CredsBundle credentials.Bundle // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool } +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + // ClientConn represents a gRPC ClientConn. // // This interface is to be implemented by gRPC. Users should not need a @@ -132,15 +144,15 @@ type ClientConn interface { // The SubConn will be shutdown. RemoveSubConn(SubConn) - // UpdateBalancerState is called by balancer to notify gRPC that some internal - // state in balancer has changed. + // UpdateState notifies gRPC that the balancer's internal state has + // changed. // - // gRPC will update the connectivity state of the ClientConn, and will call pick - // on the new picker to pick new SubConn. - UpdateBalancerState(s connectivity.State, p Picker) + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) // ResolveNow is called by balancer to notify gRPC to do a name resolving. - ResolveNow(resolver.ResolveNowOption) + ResolveNow(resolver.ResolveNowOptions) // Target returns the dial target for this ClientConn. // @@ -185,11 +197,14 @@ type ConfigParser interface { ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) } -// PickOptions contains addition information for the Pick operation. -type PickOptions struct { +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { // FullMethodName is the method name that NewClientStream() is called // with. The canonical format is /service/Method. FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context } // DoneInfo contains additional information for done. @@ -211,50 +226,65 @@ type DoneInfo struct { var ( // ErrNoSubConnAvailable indicates no SubConn is available for pick(). - // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + // gRPC will block the RPC until a new picker is available via UpdateState(). ErrNoSubConnAvailable = errors.New("no SubConn is available") // ErrTransientFailure indicates all SubConns are in TransientFailure. // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. ErrTransientFailure = errors.New("all SubConns are in TransientFailure") ) +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + // Picker is used by gRPC to pick a SubConn to send an RPC. // Balancer is expected to generate a new picker from its snapshot every time its // internal state has changed. // -// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). type Picker interface { - // Pick returns the SubConn to be used to send the RPC. - // The returned SubConn must be one returned by NewSubConn(). + // Pick returns the connection to use for this RPC and related information. // - // This functions is expected to return: - // - a SubConn that is known to be READY; - // - ErrNoSubConnAvailable if no SubConn is available, but progress is being - // made (for example, some SubConn is in CONNECTING mode); - // - other errors if no active connecting is happening (for example, all SubConn - // are in TRANSIENT_FAILURE mode). + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). // - // If a SubConn is returned: - // - If it is READY, gRPC will send the RPC on it; - // - If it is not ready, or becomes not ready after it's returned, gRPC will - // block until UpdateBalancerState() is called and will call pick on the - // new picker. The done function returned from Pick(), if not nil, will be - // called with nil error, no bytes sent and no bytes received. + // If an error is returned: // - // If the returned error is not nil: - // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() - // - If the error is ErrTransientFailure: - // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() - // is called to pick again; - // - Otherwise, RPC will fail with unavailable error. - // - Else (error is other non-nil error): - // - The RPC will fail with unavailable error. + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). // - // The returned done() function will be called once the rpc has finished, - // with the final status of that RPC. If the SubConn returned is not a - // valid SubConn type, done may not be called. done may be nil if balancer - // doesn't care about the RPC status. - Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) } // Balancer takes input from gRPC, manages SubConns, and collects and aggregates @@ -262,38 +292,43 @@ type Picker interface { // // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. // -// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed -// to be called synchronously from the same goroutine. -// There's no guarantee on picker.Pick, it may be called anytime. +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. type Balancer interface { - // HandleSubConnStateChange is called by gRPC when the connectivity state - // of sc has changed. - // Balancer is expected to aggregate all the state of SubConn and report - // that back to gRPC. - // Balancer should also generate and update Pickers when its internal state has - // been changed by the new state. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateSubConnState will be called instead. - HandleSubConnStateChange(sc SubConn, state connectivity.State) - // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to - // balancers. - // Balancer can create new SubConn or remove SubConn with the addresses. - // An empty address slice and a non-nil error will be passed if the resolver returns - // non-nil error to gRPC. - // - // Deprecated: if V2Balancer is implemented by the Balancer, - // UpdateClientConnState will be called instead. - HandleResolvedAddrs([]resolver.Address, error) + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + UpdateSubConnState(SubConn, SubConnState) // Close closes the balancer. The balancer is not required to call // ClientConn.RemoveSubConn for its existing SubConns. Close() } +// V2Balancer is temporarily defined for backward compatibility reasons. +// +// Deprecated: use Balancer directly instead. +type V2Balancer = Balancer + +// V2Picker is temporarily defined for backward compatibility reasons. +// +// Deprecated: use Picker directly instead. +type V2Picker = Picker + // SubConnState describes the state of a SubConn. type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. ConnectivityState connectivity.State - // TODO: add last connection error + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error } // ClientConnState describes the state of a ClientConn relevant to the @@ -305,30 +340,17 @@ type ClientConnState struct { BalancerConfig serviceconfig.LoadBalancingConfig } -// V2Balancer is defined for documentation purposes. If a Balancer also -// implements V2Balancer, its UpdateClientConnState method will be called -// instead of HandleResolvedAddrs and its UpdateSubConnState will be called -// instead of HandleSubConnStateChange. -type V2Balancer interface { - // UpdateClientConnState is called by gRPC when the state of the ClientConn - // changes. - UpdateClientConnState(ClientConnState) - // UpdateSubConnState is called by gRPC when the state of a SubConn - // changes. - UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. - Close() -} +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") // ConnectivityStateEvaluator takes the connectivity states of multiple SubConns // and returns one aggregated connectivity state. // // It's not thread safe. type ConnectivityStateEvaluator struct { - numReady uint64 // Number of addrConns in ready state. - numConnecting uint64 // Number of addrConns in connecting state. - numTransientFailure uint64 // Number of addrConns in transientFailure. + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. } // RecordTransition records state change happening in subConn and based on that @@ -348,8 +370,6 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numReady += updateVal case connectivity.Connecting: cse.numConnecting += updateVal - case connectivity.TransientFailure: - cse.numTransientFailure += updateVal } } diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 1af88f0a3..d62b4b606 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -19,7 +19,8 @@ package base import ( - "context" + "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -34,19 +35,20 @@ type baseBuilder struct { } func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &baseBalancer{ + bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, - // Initialize picker to a picker that always return - // ErrNoSubConnAvailable, because when state of a SubConn changes, we - // may call UpdateBalancerState with this picker. - picker: NewErrPicker(balancer.ErrNoSubConnAvailable), - config: bb.config, + config: bb.config, } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal } func (bb *baseBuilder) Name() string { @@ -64,18 +66,36 @@ type baseBalancer struct { scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure } -func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - panic("not implemented") +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) } -func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { - // TODO: handle s.ResolverState.Err (log if not nil) once implemented. +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // TODO: handle s.ResolverState.ServiceConfig? if grpclog.V(2) { grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { @@ -98,33 +118,52 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) { b.cc.RemoveSubConn(sc) delete(b.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in HandleSubConnStateChange. + // The entry will be deleted in UpdateSubConnState. } } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) } // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - errPicker if the balancer is in TransientFailure, // - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { - b.picker = NewErrPicker(balancer.ErrTransientFailure) + b.picker = NewErrPicker(b.mergeErrors()) return } - readySCs := make(map[resolver.Address]balancer.SubConn) + readySCs := make(map[balancer.SubConn]SubConnInfo) // Filter out all ready SCs from full subConn map. for addr, sc := range b.subConns { if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { - readySCs[addr] = sc + readySCs[sc] = SubConnInfo{Address: addr} } } - b.picker = b.pickerBuilder.Build(readySCs) -} - -func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - panic("not implemented") + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { @@ -139,6 +178,12 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } b.scStates[sc] = s switch s { case connectivity.Idle: @@ -147,22 +192,23 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError } - oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.state == connectivity.TransientFailure { b.regeneratePicker() } - b.cc.UpdateBalancerState(b.state, b.picker) + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) } // Close is a nop because base balancer doesn't have internal state to clean up, @@ -170,15 +216,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su func (b *baseBalancer) Close() { } -// NewErrPicker returns a picker that always returns err on Pick(). +// NewErrPicker returns a Picker that always returns err on Pick(). func NewErrPicker(err error) balancer.Picker { return &errPicker{err: err} } +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - return nil, nil, p.err +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go index 34b1f2994..c4fc89111 100644 --- a/vendor/google.golang.org/grpc/balancer/base/base.go +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -37,15 +37,22 @@ import ( // PickerBuilder creates balancer.Picker. type PickerBuilder interface { - // Build takes a slice of ready SubConns, and returns a picker that will be - // used by gRPC to pick a SubConn. - Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker } -// NewBalancerBuilder returns a balancer builder. The balancers -// built by this builder will use the picker builder to build pickers. -func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { - return NewBalancerBuilderWithConfig(name, pb, Config{}) +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn } // Config contains the config info about the base balancer builder. @@ -54,11 +61,22 @@ type Config struct { HealthCheck bool } -// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config. -func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder { +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { return &baseBuilder{ name: name, pickerBuilder: pb, config: config, } } + +// NewBalancerBuilderV2 is temporarily defined for backward compatibility +// reasons. +// +// Deprecated: use NewBalancerBuilder instead. +var NewBalancerBuilderV2 = NewBalancerBuilder + +// V2PickerBuilder is temporarily defined for backward compatibility reasons. +// +// Deprecated: use PickerBuilder instead. +type V2PickerBuilder = PickerBuilder diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 000000000..a24264a34 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValues(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 29f7a4ddd..a02b372cf 100644 --- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,14 +22,12 @@ package roundrobin import ( - "context" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/grpcrand" - "google.golang.org/grpc/resolver" ) // Name is the name of round_robin balancer. @@ -37,7 +35,7 @@ const Name = "round_robin" // newBuilder creates a new roundrobin balancer builder. func newBuilder() balancer.Builder { - return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) } func init() { @@ -46,13 +44,13 @@ func init() { type rrPickerBuilder struct{} -func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { - grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) - if len(readySCs) == 0 { +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info) + if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } var scs []balancer.SubConn - for _, sc := range readySCs { + for sc := range info.ReadySCs { scs = append(scs, sc) } return &rrPicker{ @@ -74,10 +72,10 @@ type rrPicker struct { next int } -func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { p.mu.Lock() sc := p.subConns[p.next] p.next = (p.next + 1) % len(p.subConns) p.mu.Unlock() - return sc, nil, nil + return balancer.PickResult{SubConn: sc}, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 8df4095ca..807d19197 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -24,7 +24,9 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) @@ -32,64 +34,17 @@ import ( type scStateUpdate struct { sc balancer.SubConn state connectivity.State -} - -// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. -// TODO make a general purpose buffer that uses interface{}. -type scStateUpdateBuffer struct { - c chan *scStateUpdate - mu sync.Mutex - backlog []*scStateUpdate -} - -func newSCStateUpdateBuffer() *scStateUpdateBuffer { - return &scStateUpdateBuffer{ - c: make(chan *scStateUpdate, 1), - } -} - -func (b *scStateUpdateBuffer) put(t *scStateUpdate) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- t: - return - default: - } - } - b.backlog = append(b.backlog, t) -} - -func (b *scStateUpdateBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog[0] = nil - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that the scStateUpdate will be sent to. -// -// Upon receiving, the caller should call load to send another -// scStateChangeTuple onto the channel if there is any. -func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { - return b.c + err error } // ccBalancerWrapper is a wrapper on top of cc for balancers. // It implements balancer.ClientConn interface. type ccBalancerWrapper struct { - cc *ClientConn - balancer balancer.Balancer - stateChangeQueue *scStateUpdateBuffer - ccUpdateCh chan *balancer.ClientConnState - done chan struct{} + cc *ClientConn + balancerMu sync.Mutex // synchronizes calls to the balancer + balancer balancer.Balancer + scBuffer *buffer.Unbounded + done *grpcsync.Event mu sync.Mutex subConns map[*acBalancerWrapper]struct{} @@ -97,11 +52,10 @@ type ccBalancerWrapper struct { func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ - cc: cc, - stateChangeQueue: newSCStateUpdateBuffer(), - ccUpdateCh: make(chan *balancer.ClientConnState, 1), - done: make(chan struct{}), - subConns: make(map[*acBalancerWrapper]struct{}), + cc: cc, + scBuffer: buffer.NewUnbounded(), + done: grpcsync.NewEvent(), + subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() ccb.balancer = b.Build(ccb, bopts) @@ -113,36 +67,19 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.stateChangeQueue.get(): - ccb.stateChangeQueue.load() - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateSubConnState(t.sc, balancer.SubConnState{ConnectivityState: t.state}) - } else { - ccb.balancer.HandleSubConnStateChange(t.sc, t.state) - } - case s := <-ccb.ccUpdateCh: - select { - case <-ccb.done: - ccb.balancer.Close() - return - default: - } - if ub, ok := ccb.balancer.(balancer.V2Balancer); ok { - ub.UpdateClientConnState(*s) - } else { - ccb.balancer.HandleResolvedAddrs(s.ResolverState.Addresses, nil) + case t := <-ccb.scBuffer.Get(): + ccb.scBuffer.Load() + if ccb.done.HasFired() { + break } - case <-ccb.done: + ccb.balancerMu.Lock() + su := t.(*scStateUpdate) + ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err}) + ccb.balancerMu.Unlock() + case <-ccb.done.Done(): } - select { - case <-ccb.done: + if ccb.done.HasFired() { ccb.balancer.Close() ccb.mu.Lock() scs := ccb.subConns @@ -151,19 +88,17 @@ func (ccb *ccBalancerWrapper) watcher() { for acbw := range scs { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } - ccb.UpdateBalancerState(connectivity.Connecting, nil) + ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) return - default: } - ccb.cc.firstResolveEvent.Fire() } } func (ccb *ccBalancerWrapper) close() { - close(ccb.done) + ccb.done.Fire() } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -174,30 +109,23 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co if sc == nil { return } - ccb.stateChangeQueue.put(&scStateUpdate{ + ccb.scBuffer.Put(&scStateUpdate{ sc: sc, state: s, + err: err, }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) { - if ccb.cc.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - s := &ccs.ResolverState - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } - select { - case <-ccb.ccUpdateCh: - default: - } - ccb.ccUpdateCh <- ccs +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.balancerMu.Lock() + defer ccb.balancerMu.Unlock() + return ccb.balancer.UpdateClientConnState(*ccs) +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.balancerMu.Lock() + ccb.balancer.ResolverError(err) + ccb.balancerMu.Unlock() } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { @@ -235,7 +163,7 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) } -func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { ccb.mu.Lock() defer ccb.mu.Unlock() if ccb.subConns == nil { @@ -246,11 +174,11 @@ func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balanc // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(p) - ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) } -func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { ccb.cc.resolveNow(o) } @@ -292,7 +220,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go deleted file mode 100644 index 66e9a44ac..000000000 --- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go +++ /dev/null @@ -1,334 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" -) - -type balancerWrapperBuilder struct { - b Balancer // The v1 balancer. -} - -func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { - bwb.b.Start(opts.Target.Endpoint, BalancerConfig{ - DialCreds: opts.DialCreds, - Dialer: opts.Dialer, - }) - _, pickfirst := bwb.b.(*pickFirst) - bw := &balancerWrapper{ - balancer: bwb.b, - pickfirst: pickfirst, - cc: cc, - targetAddr: opts.Target.Endpoint, - startCh: make(chan struct{}), - conns: make(map[resolver.Address]balancer.SubConn), - connSt: make(map[balancer.SubConn]*scState), - csEvltr: &balancer.ConnectivityStateEvaluator{}, - state: connectivity.Idle, - } - cc.UpdateBalancerState(connectivity.Idle, bw) - go bw.lbWatcher() - return bw -} - -func (bwb *balancerWrapperBuilder) Name() string { - return "wrapper" -} - -type scState struct { - addr Address // The v1 address type. - s connectivity.State - down func(error) -} - -type balancerWrapper struct { - balancer Balancer // The v1 balancer. - pickfirst bool - - cc balancer.ClientConn - targetAddr string // Target without the scheme. - - mu sync.Mutex - conns map[resolver.Address]balancer.SubConn - connSt map[balancer.SubConn]*scState - // This channel is closed when handling the first resolver result. - // lbWatcher blocks until this is closed, to avoid race between - // - NewSubConn is created, cc wants to notify balancer of state changes; - // - Build hasn't return, cc doesn't have access to balancer. - startCh chan struct{} - - // To aggregate the connectivity state. - csEvltr *balancer.ConnectivityStateEvaluator - state connectivity.State -} - -// lbWatcher watches the Notify channel of the balancer and manages -// connections accordingly. -func (bw *balancerWrapper) lbWatcher() { - <-bw.startCh - notifyCh := bw.balancer.Notify() - if notifyCh == nil { - // There's no resolver in the balancer. Connect directly. - a := resolver.Address{ - Addr: bw.targetAddr, - Type: resolver.Backend, - } - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: Address{Addr: bw.targetAddr}, - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - return - } - - for addrs := range notifyCh { - grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs) - if bw.pickfirst { - var ( - oldA resolver.Address - oldSC balancer.SubConn - ) - bw.mu.Lock() - for oldA, oldSC = range bw.conns { - break - } - bw.mu.Unlock() - if len(addrs) <= 0 { - if oldSC != nil { - // Teardown old sc. - bw.mu.Lock() - delete(bw.conns, oldA) - delete(bw.connSt, oldSC) - bw.mu.Unlock() - bw.cc.RemoveSubConn(oldSC) - } - continue - } - - var newAddrs []resolver.Address - for _, a := range addrs { - newAddr := resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - } - newAddrs = append(newAddrs, newAddr) - } - if oldSC == nil { - // Create new sc. - sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) - } else { - bw.mu.Lock() - // For pickfirst, there should be only one SubConn, so the - // address doesn't matter. All states updating (up and down) - // and picking should all happen on that only SubConn. - bw.conns[resolver.Address{}] = sc - bw.connSt[sc] = &scState{ - addr: addrs[0], // Use the first address. - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } else { - bw.mu.Lock() - bw.connSt[oldSC].addr = addrs[0] - bw.mu.Unlock() - oldSC.UpdateAddresses(newAddrs) - } - } else { - var ( - add []resolver.Address // Addresses need to setup connections. - del []balancer.SubConn // Connections need to tear down. - ) - resAddrs := make(map[resolver.Address]Address) - for _, a := range addrs { - resAddrs[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, // All addresses from balancer are all backends. - ServerName: "", - Metadata: a.Metadata, - }] = a - } - bw.mu.Lock() - for a := range resAddrs { - if _, ok := bw.conns[a]; !ok { - add = append(add, a) - } - } - for a, c := range bw.conns { - if _, ok := resAddrs[a]; !ok { - del = append(del, c) - delete(bw.conns, a) - // Keep the state of this sc in bw.connSt until its state becomes Shutdown. - } - } - bw.mu.Unlock() - for _, a := range add { - sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) - if err != nil { - grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) - } else { - bw.mu.Lock() - bw.conns[a] = sc - bw.connSt[sc] = &scState{ - addr: resAddrs[a], - s: connectivity.Idle, - } - bw.mu.Unlock() - sc.Connect() - } - } - for _, c := range del { - bw.cc.RemoveSubConn(c) - } - } - } -} - -func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { - bw.mu.Lock() - defer bw.mu.Unlock() - scSt, ok := bw.connSt[sc] - if !ok { - return - } - if s == connectivity.Idle { - sc.Connect() - } - oldS := scSt.s - scSt.s = s - if oldS != connectivity.Ready && s == connectivity.Ready { - scSt.down = bw.balancer.Up(scSt.addr) - } else if oldS == connectivity.Ready && s != connectivity.Ready { - if scSt.down != nil { - scSt.down(errConnClosing) - } - } - sa := bw.csEvltr.RecordTransition(oldS, s) - if bw.state != sa { - bw.state = sa - } - bw.cc.UpdateBalancerState(bw.state, bw) - if s == connectivity.Shutdown { - // Remove state for this sc. - delete(bw.connSt, sc) - } -} - -func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - // There should be a resolver inside the balancer. - // All updates here, if any, are ignored. -} - -func (bw *balancerWrapper) Close() { - bw.mu.Lock() - defer bw.mu.Unlock() - select { - case <-bw.startCh: - default: - close(bw.startCh) - } - bw.balancer.Close() -} - -// The picker is the balancerWrapper itself. -// It either blocks or returns error, consistent with v1 balancer Get(). -func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) { - failfast := true // Default failfast is true. - if ss, ok := rpcInfoFromContext(ctx); ok { - failfast = ss.failfast - } - a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) - if err != nil { - return nil, nil, err - } - if p != nil { - done = func(balancer.DoneInfo) { p() } - defer func() { - if err != nil { - p() - } - }() - } - - bw.mu.Lock() - defer bw.mu.Unlock() - if bw.pickfirst { - // Get the first sc in conns. - for _, sc := range bw.conns { - return sc, done, nil - } - return nil, nil, balancer.ErrNoSubConnAvailable - } - sc, ok1 := bw.conns[resolver.Address{ - Addr: a.Addr, - Type: resolver.Backend, - ServerName: "", - Metadata: a.Metadata, - }] - s, ok2 := bw.connSt[sc] - if !ok1 || !ok2 { - // This can only happen due to a race where Get() returned an address - // that was subsequently removed by Notify. In this case we should - // retry always. - return nil, nil, balancer.ErrNoSubConnAvailable - } - switch s.s { - case connectivity.Ready, connectivity.Idle: - return sc, done, nil - case connectivity.Shutdown, connectivity.TransientFailure: - // If the returned sc has been shut down or is in transient failure, - // return error, and this RPC will fail or wait for another picker (if - // non-failfast). - return nil, nil, balancer.ErrTransientFailure - default: - // For other states (connecting or unknown), the v1 balancer would - // traditionally wait until ready and then issue the RPC. Returning - // ErrNoSubConnAvailable will be a slight improvement in that it will - // allow the balancer to choose another address in case others are - // connected. - return nil, nil, balancer.ErrNoSubConnAvailable - } -} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index d8bba16a4..f826ec769 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -1,18 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto +// source: grpc/binlog/v1/binarylog.proto -package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +package grpc_binarylog_v1 import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" - - math "math" - duration "github.com/golang/protobuf/ptypes/duration" - timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -24,7 +20,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // Enumerates the type of event // Note the terminology is different from the RPC semantics @@ -70,6 +66,7 @@ var GrpcLogEntry_EventType_name = map[int32]string{ 6: "EVENT_TYPE_SERVER_TRAILER", 7: "EVENT_TYPE_CANCEL", } + var GrpcLogEntry_EventType_value = map[string]int32{ "EVENT_TYPE_UNKNOWN": 0, "EVENT_TYPE_CLIENT_HEADER": 1, @@ -84,8 +81,9 @@ var GrpcLogEntry_EventType_value = map[string]int32{ func (x GrpcLogEntry_EventType) String() string { return proto.EnumName(GrpcLogEntry_EventType_name, int32(x)) } + func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0} + return fileDescriptor_b7972e58de45083a, []int{0, 0} } // Enumerates the entity that generates the log entry @@ -102,6 +100,7 @@ var GrpcLogEntry_Logger_name = map[int32]string{ 1: "LOGGER_CLIENT", 2: "LOGGER_SERVER", } + var GrpcLogEntry_Logger_value = map[string]int32{ "LOGGER_UNKNOWN": 0, "LOGGER_CLIENT": 1, @@ -111,8 +110,9 @@ var GrpcLogEntry_Logger_value = map[string]int32{ func (x GrpcLogEntry_Logger) String() string { return proto.EnumName(GrpcLogEntry_Logger_name, int32(x)) } + func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1} + return fileDescriptor_b7972e58de45083a, []int{0, 1} } type Address_Type int32 @@ -134,6 +134,7 @@ var Address_Type_name = map[int32]string{ 2: "TYPE_IPV6", 3: "TYPE_UNIX", } + var Address_Type_value = map[string]int32{ "TYPE_UNKNOWN": 0, "TYPE_IPV4": 1, @@ -144,8 +145,9 @@ var Address_Type_value = map[string]int32{ func (x Address_Type) String() string { return proto.EnumName(Address_Type_name, int32(x)) } + func (Address_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0} + return fileDescriptor_b7972e58de45083a, []int{7, 0} } // Log entry we store in binary logs @@ -191,16 +193,17 @@ func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} } func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) } func (*GrpcLogEntry) ProtoMessage() {} func (*GrpcLogEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{0} + return fileDescriptor_b7972e58de45083a, []int{0} } + func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b) } func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic) } -func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_GrpcLogEntry.Merge(dst, src) +func (m *GrpcLogEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_GrpcLogEntry.Merge(m, src) } func (m *GrpcLogEntry) XXX_Size() int { return xxx_messageInfo_GrpcLogEntry.Size(m) @@ -323,9 +326,9 @@ func (m *GrpcLogEntry) GetPeer() *Address { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), @@ -333,108 +336,6 @@ func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) } } -func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClientHeader); err != nil { - return err - } - case *GrpcLogEntry_ServerHeader: - b.EncodeVarint(7<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ServerHeader); err != nil { - return err - } - case *GrpcLogEntry_Message: - b.EncodeVarint(8<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Message); err != nil { - return err - } - case *GrpcLogEntry_Trailer: - b.EncodeVarint(9<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Trailer); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x) - } - return nil -} - -func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GrpcLogEntry) - switch tag { - case 6: // payload.client_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ClientHeader{msg} - return true, err - case 7: // payload.server_header - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerHeader) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_ServerHeader{msg} - return true, err - case 8: // payload.message - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Message) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Message{msg} - return true, err - case 9: // payload.trailer - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Trailer) - err := b.DecodeMessage(msg) - m.Payload = &GrpcLogEntry_Trailer{msg} - return true, err - default: - return false, nil - } -} - -func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GrpcLogEntry) - // payload - switch x := m.Payload.(type) { - case *GrpcLogEntry_ClientHeader: - s := proto.Size(x.ClientHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_ServerHeader: - s := proto.Size(x.ServerHeader) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Message: - s := proto.Size(x.Message) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case *GrpcLogEntry_Trailer: - s := proto.Size(x.Trailer) - n += 1 // tag and wire - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type ClientHeader struct { // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` @@ -459,16 +360,17 @@ func (m *ClientHeader) Reset() { *m = ClientHeader{} } func (m *ClientHeader) String() string { return proto.CompactTextString(m) } func (*ClientHeader) ProtoMessage() {} func (*ClientHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{1} + return fileDescriptor_b7972e58de45083a, []int{1} } + func (m *ClientHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ClientHeader.Unmarshal(m, b) } func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic) } -func (dst *ClientHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientHeader.Merge(dst, src) +func (m *ClientHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientHeader.Merge(m, src) } func (m *ClientHeader) XXX_Size() int { return xxx_messageInfo_ClientHeader.Size(m) @@ -519,16 +421,17 @@ func (m *ServerHeader) Reset() { *m = ServerHeader{} } func (m *ServerHeader) String() string { return proto.CompactTextString(m) } func (*ServerHeader) ProtoMessage() {} func (*ServerHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{2} + return fileDescriptor_b7972e58de45083a, []int{2} } + func (m *ServerHeader) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerHeader.Unmarshal(m, b) } func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic) } -func (dst *ServerHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerHeader.Merge(dst, src) +func (m *ServerHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerHeader.Merge(m, src) } func (m *ServerHeader) XXX_Size() int { return xxx_messageInfo_ServerHeader.Size(m) @@ -566,16 +469,17 @@ func (m *Trailer) Reset() { *m = Trailer{} } func (m *Trailer) String() string { return proto.CompactTextString(m) } func (*Trailer) ProtoMessage() {} func (*Trailer) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{3} + return fileDescriptor_b7972e58de45083a, []int{3} } + func (m *Trailer) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Trailer.Unmarshal(m, b) } func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Trailer.Marshal(b, m, deterministic) } -func (dst *Trailer) XXX_Merge(src proto.Message) { - xxx_messageInfo_Trailer.Merge(dst, src) +func (m *Trailer) XXX_Merge(src proto.Message) { + xxx_messageInfo_Trailer.Merge(m, src) } func (m *Trailer) XXX_Size() int { return xxx_messageInfo_Trailer.Size(m) @@ -630,16 +534,17 @@ func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (*Message) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{4} + return fileDescriptor_b7972e58de45083a, []int{4} } + func (m *Message) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Message.Unmarshal(m, b) } func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Message.Marshal(b, m, deterministic) } -func (dst *Message) XXX_Merge(src proto.Message) { - xxx_messageInfo_Message.Merge(dst, src) +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) } func (m *Message) XXX_Size() int { return xxx_messageInfo_Message.Size(m) @@ -696,16 +601,17 @@ func (m *Metadata) Reset() { *m = Metadata{} } func (m *Metadata) String() string { return proto.CompactTextString(m) } func (*Metadata) ProtoMessage() {} func (*Metadata) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{5} + return fileDescriptor_b7972e58de45083a, []int{5} } + func (m *Metadata) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Metadata.Unmarshal(m, b) } func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Metadata.Marshal(b, m, deterministic) } -func (dst *Metadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metadata.Merge(dst, src) +func (m *Metadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metadata.Merge(m, src) } func (m *Metadata) XXX_Size() int { return xxx_messageInfo_Metadata.Size(m) @@ -736,16 +642,17 @@ func (m *MetadataEntry) Reset() { *m = MetadataEntry{} } func (m *MetadataEntry) String() string { return proto.CompactTextString(m) } func (*MetadataEntry) ProtoMessage() {} func (*MetadataEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{6} + return fileDescriptor_b7972e58de45083a, []int{6} } + func (m *MetadataEntry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MetadataEntry.Unmarshal(m, b) } func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic) } -func (dst *MetadataEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetadataEntry.Merge(dst, src) +func (m *MetadataEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetadataEntry.Merge(m, src) } func (m *MetadataEntry) XXX_Size() int { return xxx_messageInfo_MetadataEntry.Size(m) @@ -785,16 +692,17 @@ func (m *Address) Reset() { *m = Address{} } func (m *Address) String() string { return proto.CompactTextString(m) } func (*Address) ProtoMessage() {} func (*Address) Descriptor() ([]byte, []int) { - return fileDescriptor_binarylog_264c8c9c551ce911, []int{7} + return fileDescriptor_b7972e58de45083a, []int{7} } + func (m *Address) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Address.Unmarshal(m, b) } func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Address.Marshal(b, m, deterministic) } -func (dst *Address) XXX_Merge(src proto.Message) { - xxx_messageInfo_Address.Merge(dst, src) +func (m *Address) XXX_Merge(src proto.Message) { + xxx_messageInfo_Address.Merge(m, src) } func (m *Address) XXX_Size() int { return xxx_messageInfo_Address.Size(m) @@ -827,6 +735,9 @@ func (m *Address) GetIpPort() uint32 { } func init() { + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) + proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) + proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry") proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader") proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader") @@ -835,72 +746,67 @@ func init() { proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata") proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry") proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address") - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value) - proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value) - proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value) } -func init() { - proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911) -} +func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) } -var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{ - // 900 bytes of a gzipped FileDescriptorProto +var fileDescriptor_b7972e58de45083a = []byte{ + // 904 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44, - 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04, - 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d, - 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c, - 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf, - 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2, - 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09, - 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e, - 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef, - 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36, - 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5, - 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46, - 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84, - 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72, - 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa, - 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb, - 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84, - 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1, - 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c, - 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24, - 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba, - 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8, - 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5, - 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1, - 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94, - 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f, - 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec, - 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b, - 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1, - 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5, - 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b, - 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d, - 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42, - 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4, - 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd, - 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51, - 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01, - 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58, - 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5, - 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff, - 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26, - 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23, - 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44, - 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46, - 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf, - 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab, - 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32, - 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49, - 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb, - 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c, - 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0, - 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed, - 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f, - 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, - 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e, - 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50, - 0xd4, 0x07, 0x00, 0x00, + 0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09, + 0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda, + 0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59, + 0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c, + 0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8, + 0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52, + 0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2, + 0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0, + 0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22, + 0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54, + 0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03, + 0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f, + 0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee, + 0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1, + 0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d, + 0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94, + 0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf, + 0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9, + 0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca, + 0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0, + 0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c, + 0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c, + 0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a, + 0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e, + 0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd, + 0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1, + 0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe, + 0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1, + 0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05, + 0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71, + 0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28, + 0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37, + 0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a, + 0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38, + 0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64, + 0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a, + 0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15, + 0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b, + 0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b, + 0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f, + 0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55, + 0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72, + 0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79, + 0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41, + 0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f, + 0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95, + 0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32, + 0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a, + 0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10, + 0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4, + 0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b, + 0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80, + 0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed, + 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3, + 0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00, } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index a7643df7d..ef327e8af 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -31,21 +31,23 @@ import ( "time" "google.golang.org/grpc/balancer" - _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" - _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. - _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. "google.golang.org/grpc/serviceconfig" "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. ) const ( @@ -66,8 +68,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errBalancerClosed indicates that the balancer is closed. - errBalancerClosed = errors.New("grpc: balancer is closed") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -149,7 +149,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -159,10 +159,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + channelz.Info(cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -186,21 +183,22 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } if cc.dopts.defaultServiceConfigRawJSON != nil { - sc, err := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) - if err != nil { - return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, err) + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) } - cc.dopts.defaultServiceConfig = sc + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } cc.mkp = cc.dopts.copts.KeepaliveParams if cc.dopts.copts.Dialer == nil { - cc.dopts.copts.Dialer = newProxyDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return (&net.Dialer{}).DialContext(ctx, network, addr) - }, - ) + cc.dopts.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return (&net.Dialer{}).DialContext(ctx, network, addr) + } + if cc.dopts.withProxy { + cc.dopts.copts.Dialer = newProxyDialer(cc.dopts.copts.Dialer) + } } if cc.dopts.copts.UserAgent != "" { @@ -217,7 +215,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * defer func() { select { case <-ctx.Done(): - conn, err = nil, ctx.Err() + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } default: } }() @@ -235,29 +240,28 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } } if cc.dopts.bs == nil { - cc.dopts.bs = backoff.Exponential{ - MaxDelay: DefaultBackoffConfig.MaxDelay, + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + cc.parsedTarget = grpcutil.ParseTarget(cc.target) + channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, } - } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} } + creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName @@ -297,14 +301,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } - cc.mu.Lock() cc.resolverWrapper = rWrapper cc.mu.Unlock() + // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { @@ -312,7 +316,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if s == connectivity.Ready { break } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.blockingpicker.connectionError(); err != nil { + if err = cc.connectionError(); err != nil { terr, ok := err.(interface { Temporary() bool }) @@ -323,6 +327,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } return nil, ctx.Err() } } @@ -415,12 +422,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -443,7 +445,32 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { return csm.notifyChan } -// ClientConn represents a client connection to an RPC server. +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. type ClientConn struct { ctx context.Context cancel context.CancelFunc @@ -471,6 +498,9 @@ type ClientConn struct { channelzID int64 // channelz unique identification number czData *channelzData + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -532,58 +562,104 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { } } -func (cc *ClientConn) updateResolverState(s resolver.State) error { +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() cc.mu.Lock() - defer cc.mu.Unlock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. if cc.conns == nil { + cc.mu.Unlock() return nil } - if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { - if cc.dopts.defaultServiceConfig != nil && cc.sc == nil { - cc.applyServiceConfig(cc.dopts.defaultServiceConfig) + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + if cc.balancerWrapper != nil { + cc.balancerWrapper.resolverError(err) } - } else if sc, ok := s.ServiceConfig.(*ServiceConfig); ok { - cc.applyServiceConfig(sc) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState } - var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - balCfg = cc.sc.lbConfig.cfg + var ret error + if cc.dopts.disableServiceConfig || s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + cc.applyServiceConfigAndBalancer(sc, s.Addresses) } else { - var isGRPCLB bool - for _, a := range s.Addresses { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break + ret = balancer.ErrBadResolverState + if cc.balancerWrapper == nil { + var err error + if s.ServiceConfig.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) + cc.mu.Unlock() + return ret } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) } - cc.balancerWrapper.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) - return nil + var balCfg serviceconfig.LoadBalancingConfig + if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + + cbn := cc.curBalancerName + bw := cc.balancerWrapper + cc.mu.Unlock() + if cbn != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + for i := 0; i < len(s.Addresses); { + if s.Addresses[i].Type == resolver.GRPCLB { + copy(s.Addresses[i:], s.Addresses[i+1:]) + s.Addresses = s.Addresses[:len(s.Addresses)-1] + continue + } + i++ + } + } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret } // switchBalancer starts the switching from current balancer to the balancer @@ -599,9 +675,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - grpclog.Infof("ClientConn switching balancer to %q", name) + channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -609,29 +685,19 @@ func (cc *ClientConn) switchBalancer(name string) { } builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() + } else { + channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() @@ -639,7 +705,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi } // TODO(bar switching) send updates to all balancer wrappers when balancer // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.balancerWrapper.handleSubConnStateChange(sc, s, err) cc.mu.Unlock() } @@ -648,6 +714,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ + state: connectivity.Idle, cc: cc, addrs: addrs, scopts: opts, @@ -664,7 +731,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -736,7 +803,7 @@ func (ac *addrConn) connect() error { } // Update connectivity state within the lock to prevent subsequent or // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() // Start a goroutine connecting to the server asynchronously. @@ -762,7 +829,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -782,7 +849,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -822,7 +889,8 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, FullMethodName: method, }) if err != nil { @@ -831,10 +899,10 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st return t, done, nil } -func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) { if sc == nil { // should never reach here. - return fmt.Errorf("got nil pointer for service config") + return } cc.sc = sc @@ -850,10 +918,38 @@ func (cc *ClientConn) applyServiceConfig(sc *ServiceConfig) error { cc.retryThrottler.Store((*retryThrottler)(nil)) } - return nil + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.curBalancerName = cc.dopts.balancerBuilder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } } -func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() r := cc.resolverWrapper cc.mu.RUnlock() @@ -875,8 +971,9 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { // This API is EXPERIMENTAL. func (cc *ClientConn) ResetConnectBackoff() { cc.mu.Lock() - defer cc.mu.Unlock() - for ac := range cc.conns { + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { ac.resetConnectBackoff() } } @@ -923,7 +1020,7 @@ func (cc *ClientConn) Close() error { Severity: channelz.CtINFO, } } - channelz.AddTraceEvent(cc.channelzID, ted) + channelz.AddTraceEvent(cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -962,20 +1059,13 @@ type addrConn struct { } // Note: this requires a lock on ac.mu. -func (ac *addrConn) updateConnectivityState(s connectivity.State) { +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { if ac.state == s { return } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } - ac.cc.handleSubConnStateChange(ac.acbw, s) + channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -995,7 +1085,7 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { for i := 0; ; i++ { if i > 0 { - ac.cc.resolveNow(resolver.ResolveNowOption{}) + ac.cc.resolveNow(resolver.ResolveNowOptions{}) } ac.mu.Lock() @@ -1024,7 +1114,7 @@ func (ac *addrConn) resetTransport() { // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm connectDeadline := time.Now().Add(dialDuration) - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) ac.transport = nil ac.mu.Unlock() @@ -1037,7 +1127,7 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() return } - ac.updateConnectivityState(connectivity.TransientFailure) + ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. b := ac.resetBackoff @@ -1093,6 +1183,7 @@ func (ac *addrConn) resetTransport() { // first successful one. It returns the transport, the address and a Event in // the successful case. The Event fires when the returned transport disconnects. func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) { + var firstConnErr error for _, addr := range addrs { ac.mu.Lock() if ac.state == connectivity.Shutdown { @@ -1110,22 +1201,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { return newTr, addr, reconnect, nil } - ac.cc.blockingpicker.updateConnectionError(err) + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) } // Couldn't connect to any address. - return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address") + return nil, resolver.Address{}, nil, firstConnErr } // createTransport creates a connection to addr. It returns the transport and a @@ -1136,10 +1225,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne onCloseCalled := make(chan struct{}) reconnect := grpcsync.NewEvent() - target := transport.TargetInfo{ - Addr: addr.Addr, - Metadata: addr.Metadata, - Authority: ac.cc.authority, + // addr.ServerName takes precedent over ClientConn authority, if present. + if addr.ServerName == "" { + addr.ServerName = ac.cc.authority } once := sync.Once{} @@ -1152,7 +1240,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state to Connecting. // // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) } }) ac.mu.Unlock() @@ -1167,7 +1255,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // state to Connecting. // // TODO: this should be Idle when grpc-go properly supports it. - ac.updateConnectivityState(connectivity.Connecting) + ac.updateConnectivityState(connectivity.Connecting, nil) } }) ac.mu.Unlock() @@ -1185,18 +1273,18 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne copts.ChannelzParentID = ac.channelzID } - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } select { - case <-time.After(connectDeadline.Sub(time.Now())): + case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1224,7 +1312,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { var healthcheckManagingState bool defer func() { if !healthcheckManagingState { - ac.updateConnectivityState(connectivity.Ready) + ac.updateConnectivityState(connectivity.Ready, nil) } }() @@ -1243,7 +1331,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - grpclog.Error("Health check is requested but health check function is not set.") + channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1260,28 +1348,22 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { ac.mu.Unlock() return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) } - setConnectivityState := func(s connectivity.State) { + setConnectivityState := func(s connectivity.State, lastErr error) { ac.mu.Lock() defer ac.mu.Unlock() if ac.transport != currentTr { return } - ac.updateConnectivityState(s) + ac.updateConnectivityState(s, lastErr) } // Start the health checking stream. go func() { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1331,8 +1413,8 @@ func (ac *addrConn) tearDown(err error) { curTr := ac.transport ac.transport = nil // We have to set the state to Shutdown before anything else to prevent races - // between setting the state and logic that waits on context cancelation / etc. - ac.updateConnectivityState(connectivity.Shutdown) + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} if err == errConnDrain && curTr != nil { @@ -1346,7 +1428,7 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -1355,7 +1437,7 @@ func (ac *addrConn) tearDown(err error) { }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity beng deleted, and thus prevent it from being deleted right away. + // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(ac.channelzID) } ac.mu.Unlock() @@ -1445,3 +1527,24 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // Deprecated: This error is never returned by grpc and should not be // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 8ea3d4a1d..02766443a 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -24,16 +24,13 @@ package credentials // import "google.golang.org/grpc/credentials" import ( "context" - "crypto/tls" - "crypto/x509" "errors" "fmt" - "io/ioutil" "net" - "strings" "github.com/golang/protobuf/proto" - "google.golang.org/grpc/credentials/internal" + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/internal" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -45,7 +42,8 @@ type PerRPCCredentials interface { // context. If a status code is returned, it will be used as the status // for the RPC. uri is the URI of the entry point for the request. // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. + // timeout and cancellation. Additionally, RequestInfo data will be + // available via ctx to this call. // TODO(zhaoq): Define the set of the qualified keys instead of leaving // it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) @@ -54,6 +52,50 @@ type PerRPCCredentials interface { RequireTransportSecurity() bool } +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // Invalid indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + Invalid SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { @@ -61,13 +103,19 @@ type ProtocolInfo struct { ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string - // SecurityVersion is the security protocol version. + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string } // AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. type AuthInfo interface { AuthType() string } @@ -79,20 +127,25 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR // TransportCredentials defines the common interface for all the live gRPC wire // protocols and supported transport security protocols (e.g., TLS, SSL). type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. - // gRPC will try to reconnect if the error returned is a temporary error - // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). - // If the returned error is a wrapper error, implementations should make sure that + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about - // the connection. + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) @@ -125,145 +178,91 @@ type Bundle interface { NewWithMode(mode string) (Bundle, error) } -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo } -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" -} +// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. +type requestInfoKey struct{} -// GetSecurityValue returns security info requested by channelz. -func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { - v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], - } - // Currently there's no way to get LocalCertificate info from tls package. - if len(t.State.PeerCertificates) > 0 { - v.RemoteCertificate = t.State.PeerCertificates[0].Raw - } - return v +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo) + return } -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes } -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - ServerName: c.config.ServerName, - } -} +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} -func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) - if cfg.ServerName == "" { - colonPos := strings.LastIndex(authority, ":") - if colonPos == -1 { - colonPos = len(authority) - } - cfg.ServerName = authority[:colonPos] - } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - }() - select { - case err := <-errChannel: - if err != nil { - return nil, nil, err - } - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo) + return chi } -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - return nil, nil, err +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil -} - -func (c *tlsCreds) Clone() TransportCredentials { - return NewTLS(c.config) -} - -func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { - c.config.ServerName = serverNameOverride - return nil -} - -const alpnProtoStrH2 = "h2" - -func appendH2ToNextProtos(ps []string) []string { - for _, p := range ps { - if p == alpnProtoStrH2 { - return ps + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") + } + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == Invalid { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) } } - ret := make([]string, 0, len(ps)+1) - ret = append(ret, ps...) - return append(ret, alpnProtoStrH2) -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) - return tc -} - -// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil } -// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. -// serverNameOverride is for testing only. If set to a non empty string, -// it will override the virtual host name of authority (e.g. :authority header field) in requests. -func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") +func init() { + internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) } - return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err + internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } // ChannelzSecurityInfo defines the interface that security protocols should implement // in order to provide security info to channelz. +// +// This API is experimental. type ChannelzSecurityInfo interface { GetSecurityValue() ChannelzSecurityValue } @@ -271,66 +270,20 @@ type ChannelzSecurityInfo interface { // ChannelzSecurityValue defines the interface that GetSecurityValue() return value // should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue // and *OtherChannelzSecurityValue. +// +// This API is experimental. type ChannelzSecurityValue interface { isChannelzSecurityValue() } -// TLSChannelzSecurityValue defines the struct that TLS protocol should return -// from GetSecurityValue(), containing security info like cipher and certificate used. -type TLSChannelzSecurityValue struct { - ChannelzSecurityValue - StandardName string - LocalCertificate []byte - RemoteCertificate []byte -} - // OtherChannelzSecurityValue defines the struct that non-TLS protocol should return // from GetSecurityValue(), which contains protocol specific security info. Note // the Value field will be sent to users of channelz requesting channel info, and // thus sensitive info should better be avoided. +// +// This API is experimental. type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string Value proto.Message } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", -} - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO: inline this function if possible. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - - return cfg.Clone() -} diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/go12.go similarity index 100% rename from vendor/google.golang.org/grpc/credentials/tls13.go rename to vendor/google.golang.org/grpc/credentials/go12.go diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 000000000..86e956bc8 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,235 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + + "google.golang.org/grpc/credentials/internal" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +const alpnProtoStrH2 = "h2" + +func appendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// This API is EXPERIMENTAL. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", +} + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index e8f34d0d6..d5030c076 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -24,11 +24,12 @@ import ( "net" "time" + "google.golang.org/grpc/backoff" "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" + internalbackoff "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -45,21 +46,19 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs backoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by WithBalancerName dial option. + balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool disableRetry bool @@ -68,6 +67,12 @@ type dialOptions struct { minConnectTimeout func() time.Duration defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string + // This is used by ccResolverWrapper to backoff between successive calls to + // resolver.ResolveNow(). The user will have no need to configure this, but + // we need to be able to configure this in tests. + resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder + withProxy bool } // DialOption configures how we set up the connection. @@ -194,19 +199,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and -// WithBalancerName. Will be removed in a future 1.x release. -func WithBalancer(b Balancer) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - }) -} - // WithBalancerName sets the balancer that the ClientConn will be initialized // with. Balancer registered with balancerName will be used. This function // panics if no balancer was registered by balancerName. @@ -226,13 +218,6 @@ func WithBalancerName(balancerName string) DialOption { }) } -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -246,8 +231,28 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption { }) } +// WithConnectParams configures the dialer to use the provided ConnectParams. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +// +// This API is EXPERIMENTAL. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + // WithBackoffMaxDelay configures the dialer to use the provided maximum delay // when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffMaxDelay(md time.Duration) DialOption { return WithBackoffConfig(BackoffConfig{MaxDelay: md}) } @@ -255,19 +260,18 @@ func WithBackoffMaxDelay(md time.Duration) DialOption { // WithBackoffConfig configures the dialer to use the provided backoff // parameters after connection failures. // -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. func WithBackoffConfig(b BackoffConfig) DialOption { - return withBackoff(backoff.Exponential{ - MaxDelay: b.MaxDelay, - }) + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) } // withBackoff sets the backoff strategy used for connectRetryNum after a failed // connection attempt. // // This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoff.Strategy) DialOption { +func withBackoff(bs internalbackoff.Strategy) DialOption { return newFuncDialOption(func(o *dialOptions) { o.bs = bs }) @@ -282,6 +286,19 @@ func WithBlock() DialOption { }) } +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// This API is EXPERIMENTAL. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + // WithInsecure returns a DialOption which disables transport security for this // ClientConn. Note that transport security is required unless WithInsecure is // set. @@ -291,6 +308,16 @@ func WithInsecure() DialOption { }) } +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// This API is EXPERIMENTAL. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.withProxy = false + }) +} + // WithTransportCredentials returns a DialOption which configures a connection // level security credentials (e.g., TLS/SSL). This should not be used together // with WithCredentialsBundle. @@ -322,8 +349,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption { // WithTimeout returns a DialOption that configures a timeout for dialing a // ClientConn initially. This is valid if and only if WithBlock() is present. // -// Deprecated: use DialContext and context.WithTimeout instead. Will be -// supported throughout 1.x. +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. func WithTimeout(d time.Duration) DialOption { return newFuncDialOption(func(o *dialOptions) { o.timeout = d @@ -341,7 +368,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp } func init() { - internal.WithResolverBuilder = withResolverBuilder internal.WithHealthCheckFunc = withHealthCheckFunc } @@ -433,7 +459,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption { } // WithChainStreamInterceptor returns a DialOption that specifies the chained -// interceptor for unary RPCs. The first interceptor will be the outer most, +// interceptor for streaming RPCs. The first interceptor will be the outer most, // while the last interceptor will be the inner most wrapper around the real call. // All interceptors added by this method will be chained, and the interceptor // defined by WithStreamInterceptor will always be prepended to the chain. @@ -455,6 +481,8 @@ func WithAuthority(a string) DialOption { // WithChannelzParentID returns a DialOption that specifies the channelz ID of // current ClientConn's parent. This function is used in nested channel creation // (e.g. grpclb dial). +// +// This API is EXPERIMENTAL. func WithChannelzParentID(id int64) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id @@ -539,6 +567,8 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, }, + resolveNowBackoff: internalbackoff.DefaultExponential.Backoff, + withProxy: true, } } @@ -552,3 +582,25 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { o.minConnectTimeout = f }) } + +// withResolveNowBackoff specifies the function that clientconn uses to backoff +// between successive calls to resolver.ResolveNow(). +// +// For testing purpose only. +func withResolveNowBackoff(f func(int) time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolveNowBackoff = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// This API is EXPERIMENTAL. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 187adbb11..0022859ad 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,6 +16,8 @@ * */ +//go:generate ./regenerate.sh + /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 30a75da99..195e8448b 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -46,6 +46,10 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string + // EXPERIMENTAL: if a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index c1a8340c5..31f2b01f6 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -1,19 +1,15 @@ module google.golang.org/grpc +go 1.11 + require ( - cloud.google.com/go v0.26.0 // indirect - github.com/BurntSushi/toml v0.3.1 // indirect - github.com/client9/misspell v0.3.4 + github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f + github.com/envoyproxy/go-control-plane v0.9.4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.2.0 - github.com/google/go-cmp v0.2.0 - golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 + github.com/golang/protobuf v1.3.3 + github.com/google/go-cmp v0.4.0 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a - golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 - google.golang.org/appengine v1.1.0 // indirect - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 - honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 ) diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index 741677d2e..be8078eac 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -2,36 +2,67 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 51bb9457c..c8bb2be34 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -26,72 +26,78 @@ // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. package grpclog // import "google.golang.org/grpc/grpclog" -import "os" +import ( + "os" -var logger = newLoggerV2() + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return logger.V(l) + return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...interface{}) { - logger.Warning(args...) + grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) + grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...interface{}) { - logger.Warningln(args...) + grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...interface{}) { - logger.Error(args...) + grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) + grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...interface{}) { - logger.Errorln(args...) + grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...interface{}) { - logger.Fatal(args...) + grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. -// It calles os.Exit() with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) + grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. func Fatalln(args ...interface{}) { - logger.Fatalln(args...) + grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) { // // Deprecated: use Info. func Print(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 097494f71..ef06a4822 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,6 +18,8 @@ package grpclog +import "google.golang.org/grpc/internal/grpclog" + // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. @@ -35,7 +37,7 @@ type Logger interface { // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} + grpclog.Logger = &loggerWrapper{Logger: l} } // loggerWrapper wraps Logger into a LoggerV2. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index d49325776..23612b7c4 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -24,6 +24,8 @@ import ( "log" "os" "strconv" + + "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. @@ -65,7 +67,8 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { - logger = l + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } const ( @@ -193,3 +196,19 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) { func (g *loggerT) V(l int) bool { return l <= g.v } + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go index b43746e61..b5bee4838 100644 --- a/vendor/google.golang.org/grpc/health/client.go +++ b/vendor/google.golang.org/grpc/health/client.go @@ -33,20 +33,20 @@ import ( "google.golang.org/grpc/status" ) -const maxDelay = 120 * time.Second - -var backoffStrategy = backoff.Exponential{MaxDelay: maxDelay} -var backoffFunc = func(ctx context.Context, retries int) bool { - d := backoffStrategy.Backoff(retries) - timer := time.NewTimer(d) - select { - case <-timer.C: - return true - case <-ctx.Done(): - timer.Stop() - return false +var ( + backoffStrategy = backoff.DefaultExponential + backoffFunc = func(ctx context.Context, retries int) bool { + d := backoffStrategy.Backoff(retries) + timer := time.NewTimer(d) + select { + case <-timer.C: + return true + case <-ctx.Done(): + timer.Stop() + return false + } } -} +) func init() { internal.HealthCheckFunc = clientHealthCheck @@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error { +func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: @@ -70,7 +70,7 @@ retryConnection: if ctx.Err() != nil { return nil } - setConnectivityState(connectivity.Connecting) + setConnectivityState(connectivity.Connecting, nil) rawS, err := newStream(healthCheckMethod) if err != nil { continue retryConnection @@ -79,7 +79,7 @@ retryConnection: s, ok := rawS.(grpc.ClientStream) // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. if !ok { - setConnectivityState(connectivity.Ready) + setConnectivityState(connectivity.Ready, nil) return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) } @@ -95,22 +95,22 @@ retryConnection: // Reports healthy for the LBing purposes if health check is not implemented in the server. if status.Code(err) == codes.Unimplemented { - setConnectivityState(connectivity.Ready) + setConnectivityState(connectivity.Ready, nil) return err } // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. if err != nil { - setConnectivityState(connectivity.TransientFailure) + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) continue retryConnection } - // As a message has been received, removes the need for backoff for the next retry by reseting the try count. + // As a message has been received, removes the need for backoff for the next retry by resetting the try count. tryCnt = 0 if resp.Status == healthpb.HealthCheckResponse_SERVING { - setConnectivityState(connectivity.Ready) + setConnectivityState(connectivity.Ready, nil) } else { - setConnectivityState(connectivity.TransientFailure) + setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) } } } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 6eeefdafb..e9919c007 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -1,18 +1,12 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: grpc/health/v1/health.proto -package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1" +package grpc_health_v1 import ( fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" - - context "golang.org/x/net/context" - - grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. @@ -24,7 +18,7 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type HealthCheckResponse_ServingStatus int32 @@ -41,6 +35,7 @@ var HealthCheckResponse_ServingStatus_name = map[int32]string{ 2: "NOT_SERVING", 3: "SERVICE_UNKNOWN", } + var HealthCheckResponse_ServingStatus_value = map[string]int32{ "UNKNOWN": 0, "SERVING": 1, @@ -51,8 +46,9 @@ var HealthCheckResponse_ServingStatus_value = map[string]int32{ func (x HealthCheckResponse_ServingStatus) String() string { return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) } + func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0} + return fileDescriptor_e265fd9d4e077217, []int{1, 0} } type HealthCheckRequest struct { @@ -66,16 +62,17 @@ func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } func (*HealthCheckRequest) ProtoMessage() {} func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{0} + return fileDescriptor_e265fd9d4e077217, []int{0} } + func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) } func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) } -func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_HealthCheckRequest.Merge(dst, src) +func (m *HealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckRequest.Merge(m, src) } func (m *HealthCheckRequest) XXX_Size() int { return xxx_messageInfo_HealthCheckRequest.Size(m) @@ -104,16 +101,17 @@ func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } func (*HealthCheckResponse) ProtoMessage() {} func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_health_6b1a06aa67f91efd, []int{1} + return fileDescriptor_e265fd9d4e077217, []int{1} } + func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) } func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) } -func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_HealthCheckResponse.Merge(dst, src) +func (m *HealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckResponse.Merge(m, src) } func (m *HealthCheckResponse) XXX_Size() int { return xxx_messageInfo_HealthCheckResponse.Size(m) @@ -132,181 +130,14 @@ func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { } func init() { + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") - proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// HealthClient is the client API for Health service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. - Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) -} - -type healthClient struct { - cc *grpc.ClientConn -} - -func NewHealthClient(cc *grpc.ClientConn) HealthClient { - return &healthClient{cc} -} - -func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) - if err != nil { - return nil, err - } - x := &healthWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HealthServer is the server API for Health service. -type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. - Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error -} - -func RegisterHealthServer(s *grpc.Server, srv HealthServer) { - s.RegisterService(&_Health_serviceDesc, srv) -} - -func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(HealthServer).Check(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(HealthCheckRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream -} - -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _Health_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.health.v1.Health", - HandlerType: (*HealthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Check", - Handler: _Health_Check_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Health_Watch_Handler, - ServerStreams: true, - }, - }, - Metadata: "grpc/health/v1/health.proto", } -func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) } +func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) } -var fileDescriptor_health_6b1a06aa67f91efd = []byte{ +var fileDescriptor_e265fd9d4e077217 = []byte{ // 297 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2, diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go new file mode 100644 index 000000000..f87e3c92a --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -0,0 +1,186 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_health_v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// HealthClient is the client API for Health service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type HealthClient interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) +} + +type healthClient struct { + cc grpc.ClientConnInterface +} + +func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { + stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) + if err != nil { + return nil, err + } + x := &healthWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Health_WatchClient interface { + Recv() (*HealthCheckResponse, error) + grpc.ClientStream +} + +type healthWatchClient struct { + grpc.ClientStream +} + +func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { + m := new(HealthCheckResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// HealthServer is the server API for Health service. +// All implementations should embed UnimplementedHealthServer +// for forward compatibility +type HealthServer interface { + // If the requested service is unknown, the call will fail with status + // NOT_FOUND. + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // Performs a watch for the serving status of the requested service. + // The server will immediately send back a message indicating the current + // serving status. It will then subsequently send a new message whenever + // the service's serving status changes. + // + // If the requested service is unknown when the call is received, the + // server will send a message setting the serving status to + // SERVICE_UNKNOWN but will *not* terminate the call. If at some + // future point, the serving status of the service becomes known, the + // server will send a new message with the service's serving status. + // + // If the call terminates with status UNIMPLEMENTED, then clients + // should assume this method is not supported and should not retry the + // call. If the call terminates with any other status (including OK), + // clients should retry the call with appropriate exponential backoff. + Watch(*HealthCheckRequest, Health_WatchServer) error +} + +// UnimplementedHealthServer should be embedded to have forward compatible implementations. +type UnimplementedHealthServer struct { +} + +func (*UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (*UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { + return status.Errorf(codes.Unimplemented, "method Watch not implemented") +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(HealthCheckRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) +} + +type Health_WatchServer interface { + Send(*HealthCheckResponse) error + grpc.ServerStream +} + +type healthWatchServer struct { + grpc.ServerStream +} + +func (x *healthWatchServer) Send(m *HealthCheckResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Health_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "grpc/health/v1/health.proto", +} diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh deleted file mode 100644 index b11eccb29..000000000 --- a/vendor/google.golang.org/grpc/health/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/health/v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto -popd -rm -f grpc_health_v1/*.pb.go -cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/ - diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index c79f9d2ab..ed2b3df70 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -16,8 +16,6 @@ * */ -//go:generate ./regenerate.sh - // Package health provides a service that exposes server's health and it must be // imported to enable support for client-side health checks. package health @@ -35,7 +33,8 @@ import ( // Server implements `service Health`. type Server struct { - mu sync.Mutex + healthgrpc.UnimplementedHealthServer + mu sync.RWMutex // If shutdown is true, it's expected all serving status is NOT_SERVING, and // will stay in NOT_SERVING. shutdown bool @@ -54,8 +53,8 @@ func NewServer() *Server { // Check implements `service Health`. func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() + s.mu.RLock() + defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { return &healthpb.HealthCheckResponse{ Status: servingStatus, @@ -139,7 +138,7 @@ func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.H // Shutdown sets all serving status to NOT_SERVING, and configures the server to // ignore all future status changes. // -// This changes serving status for all services. To set status for a perticular +// This changes serving status for all services. To set status for a particular // services, call SetServingStatus(). func (s *Server) Shutdown() { s.mu.Lock() @@ -153,7 +152,7 @@ func (s *Server) Shutdown() { // Resume sets all serving status to SERVING, and configures the server to // accept all future status changes. // -// This changes serving status for all services. To set status for a perticular +// This changes serving status for all services. To set status for a particular // services, call SetServingStatus(). func (s *Server) Resume() { s.mu.Lock() diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 1bd0cce5a..5fc0ee3da 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -25,44 +25,39 @@ package backoff import ( "time" + grpcbackoff "google.golang.org/grpc/backoff" "google.golang.org/grpc/internal/grpcrand" ) // Strategy defines the methodology for backing off after a grpc connection // failure. -// type Strategy interface { // Backoff returns the amount of time to wait before the next retry given // the number of consecutive failures. Backoff(retries int) time.Duration } -const ( - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay = 1.0 * time.Second - // factor is applied to the backoff after each retry. - factor = 1.6 - // jitter provides a range to randomize backoff delays. - jitter = 0.2 -) +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} // Exponential implements exponential backoff algorithm as defined in // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. type Exponential struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config } // Backoff returns the amount of time to wait before the next retry given the // number of retries. func (bc Exponential) Backoff(retries int) time.Duration { if retries == 0 { - return baseDelay + return bc.Config.BaseDelay } - backoff, max := float64(baseDelay), float64(bc.MaxDelay) + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) for backoff < max && retries > 0 { - backoff *= factor + backoff *= bc.Config.Multiplier retries-- } if backoff > max { @@ -70,7 +65,7 @@ func (bc Exponential) Backoff(retries int) time.Duration { } // Randomize backoff delays so that if a cluster of requests start at // the same time, they won't operate in lockstep. - backoff *= 1 + jitter*(grpcrand.Float64()*2-1) + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) if backoff < 0 { return 0 } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index fee6aecd0..b7a3dd8f9 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -25,6 +25,7 @@ import ( "os" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" ) // Logger is the global binary logger. It can be used to get binary logger for @@ -34,7 +35,7 @@ type Logger interface { } // binLogger is the global binary logger for the binary. One of this should be -// built at init time from the configuration (environment varialbe or flags). +// built at init time from the configuration (environment variable or flags). // // It is used to get a methodLogger for each individual method. var binLogger Logger @@ -98,7 +99,7 @@ func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { // New methodLogger with same service overrides the old one. func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { if _, ok := l.services[service]; ok { - return fmt.Errorf("conflicting rules for service %v found", service) + return fmt.Errorf("conflicting service rules for service %v found", service) } if l.services == nil { l.services = make(map[string]*methodLoggerConfig) @@ -112,10 +113,10 @@ func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) // New methodLogger with same method overrides the old one. func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting blacklist rules for method %v found", method) } if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting method rules for method %v found", method) } if l.methods == nil { l.methods = make(map[string]*methodLoggerConfig) @@ -127,10 +128,10 @@ func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) er // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { if _, ok := l.blacklist[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting blacklist rules for method %v found", method) } if _, ok := l.methods[method]; ok { - return fmt.Errorf("conflicting rules for method %v found", method) + return fmt.Errorf("conflicting method rules for method %v found", method) } if l.blacklist == nil { l.blacklist = make(map[string]struct{}) @@ -146,7 +147,7 @@ func (l *logger) setBlacklist(method string) error { // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. func (l *logger) getMethodLogger(methodName string) *MethodLogger { - s, m, err := parseMethodName(methodName) + s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index 4cc2525df..be30d0e65 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -43,7 +43,7 @@ import ( // Foo. // // If two configs exist for one certain method or service, the one specified -// later overrides the privous config. +// later overrides the previous config. func NewLoggerFromConfigString(s string) Logger { if s == "" { return nil @@ -74,7 +74,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid config: %q, %v", config, err) } if m == "*" { - return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config") + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") } if suffix != "" { return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh deleted file mode 100644 index 113d40cbe..000000000 --- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Copyright 2018 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eux -o pipefail - -TMP=$(mktemp -d) - -function finish { - rm -rf "$TMP" -} -trap finish EXIT - -pushd "$TMP" -mkdir -p grpc/binarylog/grpc_binarylog_v1 -curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto - -protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto -popd -rm -f ./grpc_binarylog_v1/*.pb.go -cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/ - diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index 20d044f0f..a2e7c346d 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -63,7 +63,7 @@ func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // -// Write() marshalls the proto message and writes it to the given writer. Each +// Write() marshals the proto message and writes it to the given writer. Each // message is prefixed with a 4 byte big endian unsigned integer as the length. // // No buffer is done, Close() doesn't try to close the writer. diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 000000000..9f6a0c120 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,85 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `interface{}`. This means that a call to Put() incurs an extra memory +// allocation, and also that users need a type assertion while reading. For +// performance critical code paths, using Unbounded is strongly discouraged and +// defining a new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan interface{} + mu sync.Mutex + backlog []interface{} +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan interface{}, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t interface{}) { + b.mu.Lock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, t) + b.mu.Unlock() +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +func (b *Unbounded) Get() <-chan interface{} { + return b.c +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f993..e4252e5be 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -30,7 +30,7 @@ import ( "sync/atomic" "time" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpclog" ) const ( @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") + grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") + grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") + grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,7 +294,19 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { +func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUNKNOWN: + grpclog.InfoDepth(depth+1, d.Desc) + case CtINFO: + grpclog.InfoDepth(depth+1, d.Desc) + case CtWarning: + grpclog.WarningDepth(depth+1, d.Desc) + case CtError: + grpclog.ErrorDepth(depth+1, d.Desc) + } + } if getMaxTraceEntry() == 0 { return } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..59c7bedec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// Info logs through grpclog.Info and adds a trace event if channelz is on. +func Info(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, args...) + } +} + +// Infof logs through grpclog.Infof and adds a trace event if channelz is on. +func Infof(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, msg) + } +} + +// Warning logs through grpclog.Warning and adds a trace event if channelz is on. +func Warning(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, args...) + } +} + +// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. +func Warningf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, msg) + } +} + +// Error logs through grpclog.Error and adds a trace event if channelz is on. +func Error(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, args...) + } +} + +// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. +func Errorf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3ee8740f1..73931a94b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,14 @@ import ( ) const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..8c8e19fce --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Info(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warning(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Error(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatal(args...) + } +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..f6e0dc1da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + } + Logger.Infof(format, args...) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Warningf(format, args...) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Errorf(format, args...) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if Logger.V(2) { + pl.Infof(format, args...) + } +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(prefix string) *PrefixLogger { + return &PrefixLogger{prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go similarity index 82% rename from vendor/google.golang.org/grpc/internal/binarylog/util.go rename to vendor/google.golang.org/grpc/internal/grpcutil/method.go index 15dc7803d..2c2ff7732 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/util.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -16,18 +16,17 @@ * */ -package binarylog +package grpcutil import ( "errors" "strings" ) -// parseMethodName splits service and method from the input. It expects format +// ParseMethod splits service and method from the input. It expects format // "/service/method". // -// TODO: move to internal/grpcutil. -func parseMethodName(methodName string) (service, method string, _ error) { +func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 000000000..80b33cdaf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func ParseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index bc1f99ac8..818ca8579 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -25,12 +25,11 @@ import ( "time" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" ) var ( - // WithResolverBuilder is exported by dialoptions.go - WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption - // WithHealthCheckFunc is not exported by dialoptions.go + // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker @@ -39,14 +38,20 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfig is a function to parse JSON service configs into - // opaque data structures. - ParseServiceConfig func(sc string) (interface{}, error) - // StatusRawProto is exported by status/status.go. This func returns a - // pointer to the wrapped Status proto for a given status.Status without a - // call to proto.Clone(). The returned Status proto should not be mutated by - // the caller. - StatusRawProto interface{} // func (*status.Status) *spb.Status + // NewRequestInfoContext creates a new context based on the argument context attaching + // the passed in RequestInfo to the new context. + NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context + // NewClientHandshakeInfoContext returns a copy of the input context with + // the passed in ClientHandshakeInfo struct added to it. + NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context + // ParseServiceConfigForTesting is for creating a fake + // ClientConn for resolver testing only + ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -57,7 +62,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go similarity index 71% rename from vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go rename to vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 297492e87..9d08dd8ab 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -32,19 +32,24 @@ import ( "sync" "time" + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + func init() { resolver.Register(NewBuilder()) } const ( defaultPort = "443" - defaultFreq = time.Minute * 30 defaultDNSSvrPort = "53" golang = "GO" // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. @@ -94,47 +99,33 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. func NewBuilder() resolver.Builder { - return &dnsBuilder{minFreq: defaultFreq} + return &dnsBuilder{} } -type dnsBuilder struct { - // minimum frequency of polling the DNS server. - minFreq time.Duration -} +type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. -func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint, defaultPort) if err != nil { return nil, err } // IP address. - if net.ParseIP(host) != nil { - host, _ = formatIP(host) - addr := []resolver.Address{{Addr: host + ":" + port}} - i := &ipResolver{ - cc: cc, - ip: addr, - rn: make(chan struct{}, 1), - q: make(chan struct{}), - } - cc.NewAddress(addr) - go i.watcher() - return i, nil + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil } // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - freq: b.minFreq, - backoff: backoff.Exponential{MaxDelay: b.minFreq}, host: host, port: port, ctx: ctx, cancel: cancel, cc: cc, - t: time.NewTimer(0), rn: make(chan struct{}, 1), disableServiceConfig: opts.DisableServiceConfig, } @@ -150,6 +141,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts d.wg.Add(1) go d.watcher() + d.ResolveNow(resolver.ResolveNowOptions{}) return d, nil } @@ -164,53 +156,23 @@ type netResolver interface { LookupTXT(ctx context.Context, name string) (txts []string, err error) } -// ipResolver watches for the name resolution update for an IP address. -type ipResolver struct { - cc resolver.ClientConn - ip []resolver.Address - // rn channel is used by ResolveNow() to force an immediate resolution of the target. - rn chan struct{} - q chan struct{} -} - -// ResolveNow resend the address it stores, no resolution is needed. -func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { - select { - case i.rn <- struct{}{}: - default: - } -} +// deadResolver is a resolver that does nothing. +type deadResolver struct{} -// Close closes the ipResolver. -func (i *ipResolver) Close() { - close(i.q) -} +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} -func (i *ipResolver) watcher() { - for { - select { - case <-i.rn: - i.cc.NewAddress(i.ip) - case <-i.q: - return - } - } -} +func (deadResolver) Close() {} // dnsResolver watches for the name resolution update for a non-IP target. type dnsResolver struct { - freq time.Duration - backoff backoff.Exponential - retryCount int - host string - port string - resolver netResolver - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn // rn channel is used by ResolveNow() to force an immediate resolution of the target. rn chan struct{} - t *time.Timer // wg is used to enforce Close() to return after the watcher() goroutine has finished. // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we // replace the real lookup functions with mocked ones to facilitate testing. @@ -222,7 +184,7 @@ type dnsResolver struct { } // ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. -func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: default: @@ -233,7 +195,6 @@ func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { func (d *dnsResolver) Close() { d.cancel() d.wg.Wait() - d.t.Stop() } func (d *dnsResolver) watcher() { @@ -242,27 +203,15 @@ func (d *dnsResolver) watcher() { select { case <-d.ctx.Done(): return - case <-d.t.C: case <-d.rn: - if !d.t.Stop() { - // Before resetting a timer, it should be stopped to prevent racing with - // reads on it's channel. - <-d.t.C - } } - result, sc := d.lookup() - // Next lookup should happen within an interval defined by d.freq. It may be - // more often due to exponential retry on empty address list. - if len(result) == 0 { - d.retryCount++ - d.t.Reset(d.backoff.Backoff(d.retryCount)) + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) } else { - d.retryCount = 0 - d.t.Reset(d.freq) + d.cc.UpdateState(*state) } - d.cc.NewServiceConfig(sc) - d.cc.NewAddress(result) // Sleep to prevent excessive re-resolutions. Incoming resolution requests // will be queued in d.rn. @@ -276,37 +225,68 @@ func (d *dnsResolver) watcher() { } } -func (d *dnsResolver) lookupSRV() []resolver.Address { +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } var newAddrs []resolver.Address _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "SRV") // may become nil + return nil, err } for _, s := range srvs { lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) if err != nil { - grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err } for _, a := range lbAddrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) } } - return newAddrs + return newAddrs, nil +} + +var filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + return err +} + +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + grpclog.Infoln(err) + } + return err } -func (d *dnsResolver) lookupTXT() string { +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) if err != nil { - grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) - return "" + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil } var res string for _, s := range ss { @@ -315,40 +295,47 @@ func (d *dnsResolver) lookupTXT() string { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) - return "" + grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service config. + return nil } - return strings.TrimPrefix(res, txtAttribute) + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() []resolver.Address { +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "A") + return nil, err } for _, a := range addrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + d.port + addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) } - return newAddrs + return newAddrs, nil } -func (d *dnsResolver) lookup() ([]resolver.Address, string) { - newAddrs := d.lookupSRV() - // Support fallback to non-balancer address. - newAddrs = append(newAddrs, d.lookupHost()...) - if d.disableServiceConfig { - return newAddrs, "" +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() } - sc := d.lookupTXT() - return newAddrs, canaryingSC(sc) + return &state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. @@ -434,12 +421,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + grpclog.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + grpclog.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go new file mode 100644 index 000000000..8783a8cf8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/go113.go @@ -0,0 +1,33 @@ +// +build go1.13 + +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +func init() { + filterError = func(err error) error { + if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound { + // The name does not exist; not an error. + return nil + } + return err + } +} diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go similarity index 94% rename from vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go rename to vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 893d5d12c..520d9229e 100644 --- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -26,7 +26,7 @@ const scheme = "passthrough" type passthroughBuilder struct{} -func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { r := &passthroughResolver{ target: target, cc: cc, @@ -48,7 +48,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 000000000..9b26414d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,90 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +// BalancerConfig is the balancer config part that service config's +// loadBalancingConfig fields can be unmarshalled to. It's a json unmarshaller. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// UnmarshalJSON implements json unmarshaller. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. + for name, jsonCfg = range lbcfg { + } + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found") +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 000000000..710223b8d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{e: s.Proto()} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + e *spb.Status +} + +func (e *Error) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage()) +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return FromProto(e.e) +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.e, tse.e) +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index d3fd9dab3..ae0a9117e 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -18,6 +18,8 @@ * */ +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. package syscall import ( diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index b8e0aa4db..d4bb19c3b 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -107,8 +107,8 @@ func (*registerStream) isTransportResponseFrame() bool { return false } type headerFrame struct { streamID uint32 hf []hpack.HeaderField - endStream bool // Valid on server side. - initStream func(uint32) (bool, error) // Used only on the client side. + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. onWrite func() wq *writeQuota // write quota for the stream created. cleanup *cleanupStream // Valid on the server side. @@ -637,21 +637,17 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { func (l *loopyWriter) originateStream(str *outStream) error { hdr := str.itl.dequeue().(*headerFrame) - sendPing, err := hdr.initStream(str.id) - if err != nil { + if err := hdr.initStream(str.id); err != nil { if err == ErrConnClosing { return err } // Other errors(errStreamDrain) need not close transport. return nil } - if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } l.estdStreams[str.id] = str - if sendPing { - return l.pingHandler(&ping{data: [8]byte{}}) - } return nil } @@ -861,38 +857,45 @@ func (l *loopyWriter) processData() (bool, error) { return false, nil } var ( - idx int buf []byte ) - if len(dataItem.h) != 0 { // data header has not been written out yet. - buf = dataItem.h - } else { - idx = 1 - buf = dataItem.d - } - size := http2MaxFrameLen - if len(buf) < size { - size = len(buf) - } + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil - } else if strQuota < size { - size = strQuota + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d } - if l.sendQuota < uint32(size) { // connection-level flow control. - size = int(l.sendQuota) - } + size := hSize + dSize + // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && size == len(buf) { - // buf contains either data or it contains header but data is empty. - if idx == 1 || len(dataItem.d) == 0 { - endStream = true - } + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() @@ -900,14 +903,10 @@ func (l *loopyWriter) processData() (bool, error) { if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { return false, err } - buf = buf[size:] str.bytesOutStanding += size l.sendQuota -= uint32(size) - if idx == 0 { - dataItem.h = buf - } else { - dataItem.d = buf - } + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. str.itl.dequeue() @@ -928,3 +927,10 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 78f9ddc3d..fc44e9761 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -112,11 +112,10 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta // at this point to be speaking over HTTP/2, so it's able to speak valid // gRPC. type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration headerMD metadata.MD @@ -186,8 +185,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro ht.writeStatusMu.Lock() defer ht.writeStatusMu.Unlock() + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } // And flush, in case no header or body has been sent yet. // This forces a separation of headers and trailers if this is the @@ -227,21 +229,27 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if err == nil { // transport has not been closed if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) } } ht.Close() return err } +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + // writeCommonHeaders sets common headers on the first write // call (Write, WriteHeader, or WriteStatus). func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - h := ht.rw.Header() h["Date"] = nil // suppress Date to make tests happy; TODO: restore h.Set("Content-Type", ht.contentType) @@ -260,9 +268,30 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } } +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() return ht.do(func() { - ht.writeCommonHeaders(s) + if !headersWritten { + ht.writePendingHeaders(s) + } ht.rw.Write(hdr) ht.rw.Write(data) ht.rw.(http.Flusher).Flush() @@ -270,26 +299,28 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() err := ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - v = encodeMetadataHeader(k, v) - h.Add(k, v) - } + if !headersWritten { + ht.writePendingHeaders(s) } + ht.rw.WriteHeader(200) ht.rw.(http.Flusher).Flush() }) if err == nil { if ht.stats != nil { - ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) } } return err @@ -334,7 +365,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 41a79c567..b43e21ffa 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -35,17 +35,25 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ctx context.Context cancel context.CancelFunc ctxDone <-chan struct{} // Cache the ctx.Done() chan. @@ -62,8 +70,6 @@ type http2Client struct { // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) // that the server sent GoAway on this transport. goAway chan struct{} - // awakenKeepalive is used to wake up keepalive when after it has gone dormant. - awakenKeepalive chan struct{} framer *framer // controlBuf delivers all the control related tasks (e.g., window @@ -77,9 +83,6 @@ type http2Client struct { perRPCCreds []credentials.PerRPCCredentials - // Boolean to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. kp keepalive.ClientParameters keepaliveEnabled bool @@ -110,6 +113,16 @@ type http2Client struct { // goAwayReason records the http2.ErrCode and debug data received with the // GoAway frame. goAwayReason GoAwayReason + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number @@ -119,6 +132,8 @@ type http2Client struct { onClose func() bufferPool *bufferPool + + connectionID uint64 } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { @@ -147,7 +162,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -200,12 +215,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } if transportCreds != nil { - scheme = "https" - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn) + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the credential handshaker. This makes it possible for + // address specific arbitrary data to reach the credential handshaker. + contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context) + connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } } dynamicWindow := true icwz := int32(initialWindowSize) @@ -232,7 +255,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - awakenKeepalive: make(chan struct{}, 1), framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, @@ -264,9 +286,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne updateFlowControl: t.updateFlowControl, } } - // Make sure awakenKeepalive can't be written upon. - // keepalive routine will make it writable, if need be. - t.awakenKeepalive <- struct{}{} if t.statsHandler != nil { t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ RemoteAddr: t.remoteAddr, @@ -281,6 +300,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) } if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } // Start the reader goroutine for incoming message. Each transport has @@ -325,6 +345,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne } } + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + if err := t.framer.writer.Flush(); err != nil { return nil, err } @@ -347,6 +369,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { // TODO(zhaoq): Handle uint32 overflow of Stream.id. s := &Stream{ + ct: t, done: make(chan struct{}), method: callHdr.Method, sendCompress: callHdr.SendCompress, @@ -380,23 +403,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { } func (t *http2Client) getPeer() *peer.Peer { - pr := &peer.Peer{ - Addr: t.remoteAddr, + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - return pr } func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) - authData, err := t.getTrAuthData(ctx, aud) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) if err != nil { return nil, err } - callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) if err != nil { return nil, err } @@ -419,6 +443,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -538,13 +563,26 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call return callAuthData, nil } +// PerformedIOError wraps an error to indicate IO may have been performed +// before the error occurred. +type PerformedIOError struct { + Err error +} + +// Error implements error. +func (p PerformedIOError) Error() string { + return p.Err.Error() +} + // NewStream creates a stream and registers it into the transport as "active" // streams. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { ctx = peer.NewContext(ctx, t.getPeer()) headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { - return nil, err + // We may have performed I/O in the per-RPC creds callback, so do not + // allow transparent retry. + return nil, PerformedIOError{err} } s := t.newStream(ctx, callHdr) cleanup := func(err error) { @@ -564,7 +602,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) (bool, error) { + initStream: func(id uint32) error { t.mu.Lock() if state := t.state; state != reachable { t.mu.Unlock() @@ -574,29 +612,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea err = ErrConnClosing } cleanup(err) - return false, err + return err } t.activeStreams[id] = s if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } - var sendPing bool - // If the number of active streams change from 0 to 1, then check if keepalive - // has gone dormant. If so, wake it up. - if len(t.activeStreams) == 1 && t.keepaliveEnabled { - select { - case t.awakenKeepalive <- struct{}{}: - sendPing = true - // Fill the awakenKeepalive channel again as this channel must be - // kept non-writable except at the point that the keepalive() - // goroutine is waiting either to be awaken or shutdown. - t.awakenKeepalive <- struct{}{} - default: - } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() } t.mu.Unlock() - return sendPing, nil + return nil }, onOrphaned: cleanup, wq: s.wq, @@ -674,12 +702,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } if t.statsHandler != nil { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, + Header: header, } t.statsHandler.HandleRPC(s.ctx, outHeader) } @@ -778,6 +815,11 @@ func (t *http2Client) Close() error { t.state = closing streams := t.activeStreams t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } t.mu.Unlock() t.controlBuf.finish() t.cancel() @@ -834,18 +876,10 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e df := &dataFrame{ streamID: s.id, endStream: opts.Last, + h: hdr, + d: data, } - if hdr != nil || data != nil { // If it's not an empty data frame. - // Add some data to grpc message header so that we can equally - // distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] - df.h, df.d = hdr, data - // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { return err } @@ -853,11 +887,11 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e return t.controlBuf.put(df) } -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { +func (t *http2Client) getStream(f http2.Frame) *Stream { t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s } // adjustWindow sends out extra window update over the initial window size @@ -937,8 +971,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) { t.controlBuf.put(bdpPing) } // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { + s := t.getStream(f) + if s == nil { return } if size > 0 { @@ -969,8 +1003,8 @@ func (t *http2Client) handleData(f *http2.DataFrame) { } func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { + s := t.getStream(f) + if s == nil { return } if f.ErrCode == http2.ErrCodeRefusedStream { @@ -1147,8 +1181,8 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { // operateHeaders takes action on the decoded headers. func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { + s := t.getStream(frame) + if s == nil { return } endStream := frame.StreamEnded() @@ -1175,14 +1209,17 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if t.statsHandler != nil { if isHeader { inHeader := &stats.InHeader{ - Client: true, - WireLength: int(frame.Header().Length), + Client: true, + WireLength: int(frame.Header().Length), + Header: s.header.Copy(), + Compression: s.recvCompress, } t.statsHandler.HandleRPC(s.ctx, inHeader) } else { inTrailer := &stats.InTrailer{ Client: true, WireLength: int(frame.Header().Length), + Trailer: s.trailer.Copy(), } t.statsHandler.HandleRPC(s.ctx, inTrailer) } @@ -1191,6 +1228,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { // If headerChan hasn't been closed yet if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true if !endStream { // HEADERS frame block carries a Response-Headers. isHeader = true @@ -1233,7 +1271,7 @@ func (t *http2Client) reader() { } t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } sf, ok := frame.(*http2.SettingsFrame) if !ok { @@ -1248,7 +1286,7 @@ func (t *http2Client) reader() { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() if t.keepaliveEnabled { - atomic.CompareAndSwapUint32(&t.activity, 0, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) } if err != nil { // Abort an active stream if the http2.Framer returns a @@ -1292,56 +1330,83 @@ func (t *http2Client) reader() { } } +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + // keepalive running in a separate goroutune makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() timer := time.NewTimer(t.kp.Time) for { select { case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead continue } - // Check if keepalive should go dormant. + if outstandingPing && timeoutLeft <= 0 { + t.Close() + return + } t.mu.Lock() - if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { - // Make awakenKeepalive writable. - <-t.awakenKeepalive - t.mu.Unlock() - select { - case <-t.awakenKeepalive: - // If the control gets here a ping has been sent - // need to reset the timer with keepalive.Timeout. - case <-t.ctx.Done(): - return - } - } else { + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { if channelz.IsOn() { atomic.AddInt64(&t.czData.kpCount, 1) } - // Send ping. t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true } - - // By the time control gets here a ping has been sent one way or the other. - timer.Reset(t.kp.Timeout) - select { - case <-timer.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - timer.Reset(t.kp.Time) - continue - } - infof("transport: closing client transport due to idleness.") - t.Close() - return - case <-t.ctx.Done(): - if !timer.Stop() { - <-timer.C - } - return - } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) case <-t.ctx.Done(): if !timer.Stop() { <-timer.C diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 83439b562..e8c757321 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,11 +35,9 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/keepalive" @@ -57,16 +55,17 @@ var ( // ErrHeaderListSizeLimitViolation indicates that the header list size is larger // than the limit set by peer. ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") - // statusRawProto is a function to get to the raw status proto wrapped in a - // status.Status without a proto.Clone(). - statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status) ) +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. ctx context.Context - ctxDone <-chan struct{} // Cache the context.Done() chan - cancel context.CancelFunc + done chan struct{} conn net.Conn loopy *loopyWriter readerDone chan struct{} // sync point to enable testing. @@ -84,12 +83,8 @@ type http2Server struct { controlBuf *controlBuffer fc *trInFlow stats stats.Handler - // Flag to keep track of reading activity on transport. - // 1 is true and 0 is false. - activity uint32 // Accessed atomically. // Keepalive and max-age parameters for the server. kp keepalive.ServerParameters - // Keepalive enforcement policy. kep keepalive.EnforcementPolicy // The time instance last ping was received. @@ -125,6 +120,8 @@ type http2Server struct { channelzID int64 // channelz unique identification number czData *channelzData bufferPool *bufferPool + + connectionID uint64 } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is @@ -138,7 +135,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. - var isettings []http2.Setting + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} // TODO(zhaoq): Have a better way to signal "no limit" because 0 is // permitted in the HTTP2 spec. maxStreams := config.MaxStreams @@ -172,6 +172,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err Val: *config.MaxHeaderListSize, }) } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } if err := framer.fr.WriteSettings(isettings...); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } @@ -203,11 +209,10 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime } - ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) t := &http2Server{ - ctx: ctx, - cancel: cancel, - ctxDone: ctx.Done(), + ctx: context.Background(), + done: done, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -228,7 +233,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err czData: new(channelzData), bufferPool: newBufferPool(), } - t.controlBuf = newControlBuffer(t.ctxDone) + t.controlBuf = newControlBuffer(t.done) if dynamicWindow { t.bdpEst = &bdpEstimator{ bdp: initialWindowSize, @@ -246,6 +251,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if channelz.IsOn() { t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() defer func() { @@ -270,7 +278,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err if err != nil { return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) } - atomic.StoreUint32(&t.activity, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) sf, ok := frame.(*http2.SettingsFrame) if !ok { return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) @@ -359,12 +367,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) + s.cancel() return false } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() + s.cancel() return false } if uint32(len(t.activeStreams)) >= t.maxStreams { @@ -375,12 +385,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) + s.cancel() return false } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() // illegal gRPC stream id. errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + s.cancel() return true } t.maxStreamID = streamID @@ -405,6 +417,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), + Header: metadata.MD(state.data.mdata).Copy(), } t.stats.HandleRPC(s.ctx, inHeader) } @@ -438,7 +451,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() - atomic.StoreUint32(&t.activity, 1) + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) @@ -746,7 +759,7 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { return true } -// WriteHeader sends the header metedata md back to the client. +// WriteHeader sends the header metadata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.updateHeaderSent() || s.getState() == streamDone { return ErrIllegalHeaderWrite @@ -795,9 +808,12 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error { return ErrHeaderListSizeLimitViolation } if t.stats != nil { - // Note: WireLength is not set in outHeader. - // TODO(mmukhi): Revisit this later, if needed. - outHeader := &stats.OutHeader{} + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } t.stats.HandleRPC(s.Context(), outHeader) } return nil @@ -829,7 +845,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) - if p := statusRawProto(st); p != nil && len(p.Details) > 0 { + if p := st.Proto(); p != nil && len(p.Details) > 0 { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. @@ -860,7 +876,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { rst := s.getState() == streamActive t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) if t.stats != nil { - t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) } return nil } @@ -882,20 +902,13 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e // TODO(mmukhi, dfawley): Should the server write also return io.EOF? s.cancel() select { - case <-t.ctx.Done(): + case <-t.done: return ErrConnClosing default: } return ContextErr(s.ctx.Err()) } } - // Add some data to header frame so that we can equally distribute bytes across frames. - emptyLen := http2MaxFrameLen - len(hdr) - if emptyLen > len(data) { - emptyLen = len(data) - } - hdr = append(hdr, data[:emptyLen]...) - data = data[emptyLen:] df := &dataFrame{ streamID: s.id, h: hdr, @@ -904,7 +917,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e } if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { select { - case <-t.ctx.Done(): + case <-t.done: return ErrConnClosing default: } @@ -921,32 +934,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e // after an additional duration of keepalive.Timeout. func (t *http2Server) keepalive() { p := &ping{} - var pingSent bool - maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) - maxAge := time.NewTimer(t.kp.MaxConnectionAge) - keepalive := time.NewTimer(t.kp.Time) - // NOTE: All exit paths of this function should reset their - // respective timers. A failure to do so will cause the - // following clean-up to deadlock and eventually leak. + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) defer func() { - if !maxIdle.Stop() { - <-maxIdle.C - } - if !maxAge.Stop() { - <-maxAge.C - } - if !keepalive.Stop() { - <-keepalive.C - } + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() }() + for { select { - case <-maxIdle.C: + case <-idleTimer.C: t.mu.Lock() idle := t.idle if idle.IsZero() { // The connection is non-idle. t.mu.Unlock() - maxIdle.Reset(t.kp.MaxConnectionIdle) + idleTimer.Reset(t.kp.MaxConnectionIdle) continue } val := t.kp.MaxConnectionIdle - time.Since(idle) @@ -955,44 +971,52 @@ func (t *http2Server) keepalive() { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. t.drain(http2.ErrCodeNo, []byte{}) - // Resetting the timer so that the clean-up doesn't deadlock. - maxIdle.Reset(infinity) return } - maxIdle.Reset(val) - case <-maxAge.C: + idleTimer.Reset(val) + case <-ageTimer.C: t.drain(http2.ErrCodeNo, []byte{}) - maxAge.Reset(t.kp.MaxConnectionAgeGrace) + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { - case <-maxAge.C: + case <-ageTimer.C: // Close the connection after grace period. infof("transport: closing server transport due to maximum connection age.") t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - maxAge.Reset(infinity) - case <-t.ctx.Done(): + case <-t.done: } return - case <-keepalive.C: - if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { - pingSent = false - keepalive.Reset(t.kp.Time) + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead continue } - if pingSent { + if outstandingPing && kpTimeoutLeft <= 0 { infof("transport: closing server transport due to idleness.") t.Close() - // Resetting the timer so that the clean-up doesn't deadlock. - keepalive.Reset(infinity) return } - pingSent = true - if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true } - t.controlBuf.put(p) - keepalive.Reset(t.kp.Timeout) - case <-t.ctx.Done(): + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: return } } @@ -1012,7 +1036,7 @@ func (t *http2Server) Close() error { t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() - t.cancel() + close(t.done) err := t.conn.Close() if channelz.IsOn() { channelz.RemoveEntry(t.channelzID) @@ -1152,7 +1176,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { select { case <-t.drainChan: case <-timer.C: - case <-t.ctx.Done(): + case <-t.done: return } t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) @@ -1202,7 +1226,7 @@ func (t *http2Server) getOutFlowWindow() int64 { select { case sz := <-resp: return int64(sz) - case <-t.ctxDone: + case <-t.done: return -1 case <-timer.C: return -2 diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 9d212867c..8f5f3349d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -667,6 +667,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList writer: w, fr: http2.NewFramer(w, r), } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1c1d10670..1ffd96ff4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" @@ -73,10 +74,11 @@ type recvMsg struct { } // recvBuffer is an unbounded channel of recvMsg structs. -// Note recvBuffer differs from controlBuffer only in that recvBuffer -// holds a channel of only recvMsg structs instead of objects implementing "item" interface. -// recvBuffer is written to much more often than -// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" type recvBuffer struct { c chan recvMsg mu sync.Mutex @@ -233,6 +235,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,6 +254,10 @@ type Stream struct { headerChan chan struct{} // closed to indicate the end of header metadata. headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -303,34 +310,28 @@ func (s *Stream) getState() streamState { return streamState(atomic.LoadUint32((*uint32)(&s.state))) } -func (s *Stream) waitOnHeader() error { +func (s *Stream) waitOnHeader() { if s.headerChan == nil { // On the server headerChan is always nil since a stream originates // only after having received headers. - return nil + return } select { case <-s.ctx.Done(): - // We prefer success over failure when reading messages because we delay - // context error in stream.Read(). To keep behavior consistent, we also - // prefer success here. - select { - case <-s.headerChan: - return nil - default: - } - return ContextErr(s.ctx.Err()) + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan case <-s.headerChan: - return nil } } // RecvCompress returns the compression algorithm applied to the inbound // message. It is empty string if there is no compression applied. func (s *Stream) RecvCompress() string { - if err := s.waitOnHeader(); err != nil { - return "" - } + s.waitOnHeader() return s.recvCompress } @@ -351,36 +352,27 @@ func (s *Stream) Done() <-chan struct{} { // available. It blocks until i) the metadata is ready or ii) there is no header // metadata or iii) the stream is canceled/expired. // -// On server side, it returns the out header after t.WriteHeader is called. +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. func (s *Stream) Header() (metadata.MD, error) { - if s.headerChan == nil && s.header != nil { + if s.headerChan == nil { // On server side, return the header in stream. It will be the out // header after t.WriteHeader is called. return s.header.Copy(), nil } - err := s.waitOnHeader() - // Even if the stream is closed, header is returned if available. - select { - case <-s.headerChan: - if s.header == nil { - return nil, nil - } - return s.header.Copy(), nil - default: + s.waitOnHeader() + if !s.headerValid { + return nil, s.status.Err() } - return nil, err + return s.header.Copy(), nil } // TrailersOnly blocks until a header or trailers-only frame is received and // then returns true if the stream was trailers-only. If the stream ends -// before headers are received, returns true, nil. If a context error happens -// first, returns it as a status error. Client-side only. -func (s *Stream) TrailersOnly() (bool, error) { - err := s.waitOnHeader() - if err != nil { - return false, err - } - return s.noHeaders, nil +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders } // Trailer returns the cached trailer metedata. Note that if it is not called @@ -534,6 +526,7 @@ type ServerConfig struct { ReadBufferSize int ChannelzParentID int64 MaxHeaderListSize *uint32 + HeaderTableSize *uint32 } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -576,17 +569,10 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 } -// TargetInfo contains the information of the target such as network address and metadata. -type TargetInfo struct { - Addr string - Metadata interface{} - Authority string -} - // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) } // Options provides additional hints and information for message diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go deleted file mode 100644 index c9f79dc53..000000000 --- a/vendor/google.golang.org/grpc/naming/dns_resolver.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package naming - -import ( - "context" - "errors" - "fmt" - "net" - "strconv" - "time" - - "google.golang.org/grpc/grpclog" -) - -const ( - defaultPort = "443" - defaultFreq = time.Minute * 30 -) - -var ( - errMissingAddr = errors.New("missing address") - errWatcherClose = errors.New("watcher has been closed") - - lookupHost = net.DefaultResolver.LookupHost - lookupSRV = net.DefaultResolver.LookupSRV -) - -// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and -// create watchers that poll the DNS server using the frequency set by freq. -func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { - return &dnsResolver{freq: freq}, nil -} - -// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create -// watchers that poll the DNS server using the default frequency defined by defaultFreq. -func NewDNSResolver() (Resolver, error) { - return NewDNSResolverWithFreq(defaultFreq) -} - -// dnsResolver handles name resolution for names following the DNS scheme -type dnsResolver struct { - // frequency of polling the DNS server that the watchers created by this resolver will use. - freq time.Duration -} - -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. -func formatIP(addr string) (addrIP string, ok bool) { - ip := net.ParseIP(addr) - if ip == nil { - return "", false - } - if ip.To4() != nil { - return addr, true - } - return "[" + addr + "]", true -} - -// parseTarget takes the user input target string, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. -// examples: -// target: "www.google.com" returns host: "www.google.com", port: "443" -// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" -// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" -// target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" -func parseTarget(target string) (host, port string, err error) { - if target == "" { - return "", "", errMissingAddr - } - - if ip := net.ParseIP(target); ip != nil { - // target is an IPv4 or IPv6(without brackets) address - return target, defaultPort, nil - } - if host, port, err := net.SplitHostPort(target); err == nil { - // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port - if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. - host = "localhost" - } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } - return host, port, nil - } - if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { - // target doesn't have port - return host, port, nil - } - return "", "", fmt.Errorf("invalid target address %v", target) -} - -// Resolve creates a watcher that watches the name resolution of the target. -func (r *dnsResolver) Resolve(target string) (Watcher, error) { - host, port, err := parseTarget(target) - if err != nil { - return nil, err - } - - if net.ParseIP(host) != nil { - ipWatcher := &ipWatcher{ - updateChan: make(chan *Update, 1), - } - host, _ = formatIP(host) - ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} - return ipWatcher, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - return &dnsWatcher{ - r: r, - host: host, - port: port, - ctx: ctx, - cancel: cancel, - t: time.NewTimer(0), - }, nil -} - -// dnsWatcher watches for the name resolution update for a specific target -type dnsWatcher struct { - r *dnsResolver - host string - port string - // The latest resolved address set - curAddrs map[string]*Update - ctx context.Context - cancel context.CancelFunc - t *time.Timer -} - -// ipWatcher watches for the name resolution update for an IP address. -type ipWatcher struct { - updateChan chan *Update -} - -// Next returns the address resolution Update for the target. For IP address, -// the resolution is itself, thus polling name server is unnecessary. Therefore, -// Next() will return an Update the first time it is called, and will be blocked -// for all following calls as no Update exists until watcher is closed. -func (i *ipWatcher) Next() ([]*Update, error) { - u, ok := <-i.updateChan - if !ok { - return nil, errWatcherClose - } - return []*Update{u}, nil -} - -// Close closes the ipWatcher. -func (i *ipWatcher) Close() { - close(i.updateChan) -} - -// AddressType indicates the address type returned by name resolution. -type AddressType uint8 - -const ( - // Backend indicates the server is a backend server. - Backend AddressType = iota - // GRPCLB indicates the server is a grpclb load balancer. - GRPCLB -) - -// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The -// name resolver used by the grpclb balancer is required to provide this type of metadata in -// its address updates. -type AddrMetadataGRPCLB struct { - // AddrType is the type of server (grpc load balancer or backend). - AddrType AddressType - // ServerName is the name of the grpc load balancer. Used for authentication. - ServerName string -} - -// compileUpdate compares the old resolved addresses and newly resolved addresses, -// and generates an update list -func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { - var res []*Update - for a, u := range w.curAddrs { - if _, ok := newAddrs[a]; !ok { - u.Op = Delete - res = append(res, u) - } - } - for a, u := range newAddrs { - if _, ok := w.curAddrs[a]; !ok { - res = append(res, u) - } - } - return res -} - -func (w *dnsWatcher) lookupSRV() map[string]*Update { - newAddrs := make(map[string]*Update) - _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) - if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil - } - for _, s := range srvs { - lbAddrs, err := lookupHost(w.ctx, s.Target) - if err != nil { - grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue - } - for _, a := range lbAddrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + strconv.Itoa(int(s.Port)) - newAddrs[addr] = &Update{Addr: addr, - Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} - } - } - return newAddrs -} - -func (w *dnsWatcher) lookupHost() map[string]*Update { - newAddrs := make(map[string]*Update) - addrs, err := lookupHost(w.ctx, w.host) - if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil - } - for _, a := range addrs { - a, ok := formatIP(a) - if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue - } - addr := a + ":" + w.port - newAddrs[addr] = &Update{Addr: addr} - } - return newAddrs -} - -func (w *dnsWatcher) lookup() []*Update { - newAddrs := w.lookupSRV() - if newAddrs == nil { - // If failed to get any balancer address (either no corresponding SRV for the - // target, or caused by failure during resolution/parsing of the balancer target), - // return any A record info available. - newAddrs = w.lookupHost() - } - result := w.compileUpdate(newAddrs) - w.curAddrs = newAddrs - return result -} - -// Next returns the resolved address update(delta) for the target. If there's no -// change, it will sleep for 30 mins and try to resolve again after that. -func (w *dnsWatcher) Next() ([]*Update, error) { - for { - select { - case <-w.ctx.Done(): - return nil, errWatcherClose - case <-w.t.C: - } - result := w.lookup() - // Next lookup should happen after an interval defined by w.r.freq. - w.t.Reset(w.r.freq) - if len(result) > 0 { - return result, nil - } - } -} - -func (w *dnsWatcher) Close() { - w.cancel() -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index f4c1c8b68..000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// -// This package is deprecated: please use package resolver instead. -package naming - -// Operation defines the corresponding operations for a name resolution change. -// -// Deprecated: please use package resolver. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an existing address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -// -// Deprecated: please use package resolver. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -// -// Deprecated: please use package resolver. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -// -// Deprecated: please use package resolver. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 45baa2ae1..7f3edaaed 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -38,42 +38,24 @@ type pickerWrapper struct { done bool blockingCh chan struct{} picker balancer.Picker - - // The latest connection happened. - connErrMu sync.Mutex - connErr error } func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{blockingCh: make(chan struct{})} - return bp -} - -func (bp *pickerWrapper) updateConnectionError(err error) { - bp.connErrMu.Lock() - bp.connErr = err - bp.connErrMu.Unlock() -} - -func (bp *pickerWrapper) connectionError() error { - bp.connErrMu.Lock() - err := bp.connErr - bp.connErrMu.Unlock() - return err + return &pickerWrapper{blockingCh: make(chan struct{})} } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. -func (bp *pickerWrapper) updatePicker(p balancer.Picker) { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() return } - bp.picker = p - // bp.blockingCh should never be nil. - close(bp.blockingCh) - bp.blockingCh = make(chan struct{}) - bp.mu.Unlock() + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() } func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { @@ -100,83 +82,82 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { var ch chan struct{} + var lastPickErr error for { - bp.mu.Lock() - if bp.done { - bp.mu.Unlock() + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() return nil, nil, ErrClientConnClosing } - if bp.picker == nil { - ch = bp.blockingCh + if pw.picker == nil { + ch = pw.blockingCh } - if ch == bp.blockingCh { + if ch == pw.blockingCh { // This could happen when either: - // - bp.picker is nil (the previous if condition), or + // - pw.picker is nil (the previous if condition), or // - has called pick on the current picker. - bp.mu.Unlock() + pw.mu.Unlock() select { case <-ctx.Done(): - if connectionErr := bp.connectionError(); connectionErr != nil { - switch ctx.Err() { - case context.DeadlineExceeded: - return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr) - case context.Canceled: - return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr) - } + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, nil, status.Error(codes.Canceled, errStr) } - return nil, nil, ctx.Err() case <-ch: } continue } - ch = bp.blockingCh - p := bp.picker - bp.mu.Unlock() + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() - subConn, done, err := p.Pick(ctx, opts) + pickResult, err := p.Pick(info) if err != nil { - switch err { - case balancer.ErrNoSubConnAvailable: + if err == balancer.ErrNoSubConnAvailable { continue - case balancer.ErrTransientFailure: - if !failfast { - continue - } - return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) - case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, err.Error()) - case context.Canceled: - return nil, nil, status.Error(codes.Canceled, err.Error()) - default: - if _, ok := status.FromError(err); ok { - return nil, nil, err - } - // err is some other error. - return nil, nil, status.Error(codes.Unknown, err.Error()) } + if _, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + return nil, nil, err + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, nil, status.Error(codes.Unavailable, err.Error()) } - acw, ok := subConn.(*acBalancerWrapper) + acw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { grpclog.Error("subconn returned from pick is not *acBalancerWrapper") continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, done), nil + return t, doneChannelzWrapper(acw, pickResult.Done), nil } - return t, done, nil + return t, pickResult.Done, nil } - if done != nil { + if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. // DoneInfo with default value works. - done(balancer.DoneInfo{}) + pickResult.Done(balancer.DoneInfo{}) } grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") // If ok == false, ac.state is not READY. @@ -186,12 +167,12 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. } } -func (bp *pickerWrapper) close() { - bp.mu.Lock() - defer bp.mu.Unlock() - if bp.done { +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { return } - bp.done = true - close(bp.blockingCh) + pw.done = true + close(pw.blockingCh) } diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index ed05b02ed..4b7340ad3 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,12 +19,12 @@ package grpc import ( - "context" + "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/resolver" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -45,37 +45,55 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + sc balancer.SubConn } -func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { - if err != nil { - if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) - } - return +func (b *pickfirstBalancer) ResolverError(err error) { + switch b.state { + case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: + // Set a failing picker if we don't have a good picker. + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) + } + if grpclog.V(2) { + grpclog.Infof("pickfirstBalancer: ResolverError called with error %v", err) + } +} + +func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { + if len(cs.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState } if b.sc == nil { - b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var err error + b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) if err != nil { - //TODO(yuxuanli): why not change the cc state to Idle? if grpclog.V(2) { grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - return + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } - b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) b.sc.Connect() } else { - b.sc.UpdateAddresses(addrs) + b.sc.UpdateAddresses(cs.ResolverState.Addresses) b.sc.Connect() } + return nil } -func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { +func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { if grpclog.V(2) { - grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + grpclog.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) } if b.sc != sc { if grpclog.V(2) { @@ -83,18 +101,22 @@ func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s conn } return } - if s == connectivity.Shutdown { + b.state = s.ConnectivityState + if s.ConnectivityState == connectivity.Shutdown { b.sc = nil return } - switch s { + switch s.ConnectivityState { case connectivity.Ready, connectivity.Idle: - b.cc.UpdateBalancerState(s, &picker{sc: sc}) + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) case connectivity.Connecting: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) case connectivity.TransientFailure: - b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: s.ConnectivityState, + Picker: &picker{err: s.ConnectionError}, + }) } } @@ -102,15 +124,12 @@ func (b *pickfirstBalancer) Close() { } type picker struct { - err error - sc balancer.SubConn + result balancer.PickResult + err error } -func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { - if p.err != nil { - return nil, nil, p.err - } - return p.sc, nil, nil +func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err } func init() { diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md new file mode 100644 index 000000000..04b6371af --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/README.md @@ -0,0 +1,18 @@ +# Reflection + +Package reflection implements server reflection service. + +The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: +```go +import "google.golang.org/grpc/reflection" + +s := grpc.NewServer() +pb.RegisterYourOwnServer(s, &server{}) + +// Register reflection service on gRPC server. +reflection.Register(s) + +s.Serve(lis) +``` diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go new file mode 100644 index 000000000..382612d52 --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -0,0 +1,634 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reflection/grpc_reflection_v1alpha/reflection.proto + +package grpc_reflection_v1alpha + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The message sent by the client when calling ServerReflectionInfo method. +type ServerReflectionRequest struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + // + // Types that are valid to be assigned to MessageRequest: + // *ServerReflectionRequest_FileByFilename + // *ServerReflectionRequest_FileContainingSymbol + // *ServerReflectionRequest_FileContainingExtension + // *ServerReflectionRequest_AllExtensionNumbersOfType + // *ServerReflectionRequest_ListServices + MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} } +func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionRequest) ProtoMessage() {} +func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{0} +} + +func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b) +} +func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic) +} +func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerReflectionRequest.Merge(m, src) +} +func (m *ServerReflectionRequest) XXX_Size() int { + return xxx_messageInfo_ServerReflectionRequest.Size(m) +} +func (m *ServerReflectionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo + +func (m *ServerReflectionRequest) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +type isServerReflectionRequest_MessageRequest interface { + isServerReflectionRequest_MessageRequest() +} + +type ServerReflectionRequest_FileByFilename struct { + FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingSymbol struct { + FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"` +} + +type ServerReflectionRequest_FileContainingExtension struct { + FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"` +} + +type ServerReflectionRequest_AllExtensionNumbersOfType struct { + AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"` +} + +type ServerReflectionRequest_ListServices struct { + ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"` +} + +func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {} + +func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() { +} + +func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {} + +func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest { + if m != nil { + return m.MessageRequest + } + return nil +} + +func (m *ServerReflectionRequest) GetFileByFilename() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok { + return x.FileByFilename + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingSymbol() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok { + return x.FileContainingSymbol + } + return "" +} + +func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok { + return x.FileContainingExtension + } + return nil +} + +func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok { + return x.AllExtensionNumbersOfType + } + return "" +} + +func (m *ServerReflectionRequest) GetListServices() string { + if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok { + return x.ListServices + } + return "" +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ServerReflectionRequest_FileByFilename)(nil), + (*ServerReflectionRequest_FileContainingSymbol)(nil), + (*ServerReflectionRequest_FileContainingExtension)(nil), + (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil), + (*ServerReflectionRequest_ListServices)(nil), + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +type ExtensionRequest struct { + // Fully-qualified type name. The format should be . + ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"` + ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} } +func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionRequest) ProtoMessage() {} +func (*ExtensionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{1} +} + +func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b) +} +func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic) +} +func (m *ExtensionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRequest.Merge(m, src) +} +func (m *ExtensionRequest) XXX_Size() int { + return xxx_messageInfo_ExtensionRequest.Size(m) +} +func (m *ExtensionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo + +func (m *ExtensionRequest) GetContainingType() string { + if m != nil { + return m.ContainingType + } + return "" +} + +func (m *ExtensionRequest) GetExtensionNumber() int32 { + if m != nil { + return m.ExtensionNumber + } + return 0 +} + +// The message sent by the server to answer ServerReflectionInfo method. +type ServerReflectionResponse struct { + ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"` + OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"` + // The server sets one of the following fields according to the + // message_request in the request. + // + // Types that are valid to be assigned to MessageResponse: + // *ServerReflectionResponse_FileDescriptorResponse + // *ServerReflectionResponse_AllExtensionNumbersResponse + // *ServerReflectionResponse_ListServicesResponse + // *ServerReflectionResponse_ErrorResponse + MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} } +func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) } +func (*ServerReflectionResponse) ProtoMessage() {} +func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{2} +} + +func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b) +} +func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic) +} +func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerReflectionResponse.Merge(m, src) +} +func (m *ServerReflectionResponse) XXX_Size() int { + return xxx_messageInfo_ServerReflectionResponse.Size(m) +} +func (m *ServerReflectionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo + +func (m *ServerReflectionResponse) GetValidHost() string { + if m != nil { + return m.ValidHost + } + return "" +} + +func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest { + if m != nil { + return m.OriginalRequest + } + return nil +} + +type isServerReflectionResponse_MessageResponse interface { + isServerReflectionResponse_MessageResponse() +} + +type ServerReflectionResponse_FileDescriptorResponse struct { + FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"` +} + +type ServerReflectionResponse_AllExtensionNumbersResponse struct { + AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ListServicesResponse struct { + ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"` +} + +type ServerReflectionResponse_ErrorResponse struct { + ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"` +} + +func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() { +} + +func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {} + +func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {} + +func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse { + if m != nil { + return m.MessageResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok { + return x.FileDescriptorResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok { + return x.AllExtensionNumbersResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok { + return x.ListServicesResponse + } + return nil +} + +func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse { + if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok { + return x.ErrorResponse + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ServerReflectionResponse_FileDescriptorResponse)(nil), + (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil), + (*ServerReflectionResponse_ListServicesResponse)(nil), + (*ServerReflectionResponse_ErrorResponse)(nil), + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +type FileDescriptorResponse struct { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} } +func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorResponse) ProtoMessage() {} +func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{3} +} + +func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b) +} +func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic) +} +func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorResponse.Merge(m, src) +} +func (m *FileDescriptorResponse) XXX_Size() int { + return xxx_messageInfo_FileDescriptorResponse.Size(m) +} +func (m *FileDescriptorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo + +func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte { + if m != nil { + return m.FileDescriptorProto + } + return nil +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +type ExtensionNumberResponse struct { + // Full name of the base type, including the package name. The format + // is . + BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"` + ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} } +func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionNumberResponse) ProtoMessage() {} +func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{4} +} + +func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b) +} +func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic) +} +func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionNumberResponse.Merge(m, src) +} +func (m *ExtensionNumberResponse) XXX_Size() int { + return xxx_messageInfo_ExtensionNumberResponse.Size(m) +} +func (m *ExtensionNumberResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo + +func (m *ExtensionNumberResponse) GetBaseTypeName() string { + if m != nil { + return m.BaseTypeName + } + return "" +} + +func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 { + if m != nil { + return m.ExtensionNumber + } + return nil +} + +// A list of ServiceResponse sent by the server answering list_services request. +type ListServiceResponse struct { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} } +func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ListServiceResponse) ProtoMessage() {} +func (*ListServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{5} +} + +func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b) +} +func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic) +} +func (m *ListServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListServiceResponse.Merge(m, src) +} +func (m *ListServiceResponse) XXX_Size() int { + return xxx_messageInfo_ListServiceResponse.Size(m) +} +func (m *ListServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo + +func (m *ListServiceResponse) GetService() []*ServiceResponse { + if m != nil { + return m.Service + } + return nil +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +type ServiceResponse struct { + // Full name of a registered service, including its package name. The format + // is . + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceResponse) Reset() { *m = ServiceResponse{} } +func (m *ServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ServiceResponse) ProtoMessage() {} +func (*ServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{6} +} + +func (m *ServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceResponse.Unmarshal(m, b) +} +func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic) +} +func (m *ServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceResponse.Merge(m, src) +} +func (m *ServiceResponse) XXX_Size() int { + return xxx_messageInfo_ServiceResponse.Size(m) +} +func (m *ServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo + +func (m *ServiceResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The error code and error message sent by the server when an error occurs. +type ErrorResponse struct { + // This field uses the error codes defined in grpc::StatusCode. + ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"` + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ErrorResponse) Reset() { *m = ErrorResponse{} } +func (m *ErrorResponse) String() string { return proto.CompactTextString(m) } +func (*ErrorResponse) ProtoMessage() {} +func (*ErrorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_e8cf9f2921ad6c95, []int{7} +} + +func (m *ErrorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ErrorResponse.Unmarshal(m, b) +} +func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic) +} +func (m *ErrorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ErrorResponse.Merge(m, src) +} +func (m *ErrorResponse) XXX_Size() int { + return xxx_messageInfo_ErrorResponse.Size(m) +} +func (m *ErrorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ErrorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo + +func (m *ErrorResponse) GetErrorCode() int32 { + if m != nil { + return m.ErrorCode + } + return 0 +} + +func (m *ErrorResponse) GetErrorMessage() string { + if m != nil { + return m.ErrorMessage + } + return "" +} + +func init() { + proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest") + proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest") + proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse") + proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse") + proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse") + proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse") + proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse") + proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse") +} + +func init() { + proto.RegisterFile("reflection/grpc_reflection_v1alpha/reflection.proto", fileDescriptor_e8cf9f2921ad6c95) +} + +var fileDescriptor_e8cf9f2921ad6c95 = []byte{ + // 686 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0xad, 0xdb, 0xa4, 0x55, 0x26, 0x69, 0x92, 0x6f, 0xdb, 0xaf, 0x71, 0x41, 0x45, 0x91, 0xa1, + 0x90, 0x22, 0x94, 0xb4, 0xa9, 0x84, 0x84, 0xb8, 0xa5, 0x80, 0x82, 0x54, 0x5a, 0xe4, 0x70, 0x01, + 0x0e, 0x2b, 0x27, 0x99, 0xb8, 0x06, 0xc7, 0x6b, 0x76, 0xdd, 0x40, 0x4e, 0xfc, 0x08, 0x7e, 0x14, + 0x7f, 0x89, 0x23, 0xda, 0xb5, 0x63, 0x3b, 0x6e, 0x4c, 0xd5, 0x53, 0x9c, 0x37, 0x33, 0xfb, 0x66, + 0xf6, 0xbd, 0xb1, 0xe1, 0x94, 0xe3, 0xc4, 0xc5, 0x51, 0xe0, 0x30, 0xaf, 0x63, 0x73, 0x7f, 0x44, + 0x93, 0xff, 0x74, 0x76, 0x62, 0xb9, 0xfe, 0x95, 0xd5, 0x49, 0xa0, 0xb6, 0xcf, 0x59, 0xc0, 0x48, + 0x43, 0x66, 0xb6, 0x53, 0x70, 0x94, 0x69, 0xfc, 0x59, 0x87, 0xc6, 0x00, 0xf9, 0x0c, 0xb9, 0x19, + 0x07, 0x4d, 0xfc, 0x76, 0x8d, 0x22, 0x20, 0x04, 0x0a, 0x57, 0x4c, 0x04, 0xba, 0xd6, 0xd4, 0x5a, + 0x25, 0x53, 0x3d, 0x93, 0xa7, 0x50, 0x9f, 0x38, 0x2e, 0xd2, 0xe1, 0x9c, 0xca, 0x5f, 0xcf, 0x9a, + 0xa2, 0xbe, 0x21, 0xe3, 0xfd, 0x35, 0xb3, 0x2a, 0x91, 0xde, 0xfc, 0x4d, 0x84, 0x93, 0xe7, 0xb0, + 0xa7, 0x72, 0x47, 0xcc, 0x0b, 0x2c, 0xc7, 0x73, 0x3c, 0x9b, 0x8a, 0xf9, 0x74, 0xc8, 0x5c, 0xbd, + 0x10, 0x55, 0xec, 0xca, 0xf8, 0x59, 0x1c, 0x1e, 0xa8, 0x28, 0xb1, 0x61, 0x3f, 0x5b, 0x87, 0x3f, + 0x02, 0xf4, 0x84, 0xc3, 0x3c, 0xbd, 0xd8, 0xd4, 0x5a, 0xe5, 0xee, 0x51, 0x3b, 0x67, 0xa0, 0xf6, + 0xeb, 0x45, 0x66, 0x34, 0x45, 0x7f, 0xcd, 0x6c, 0x2c, 0xb3, 0xc4, 0x19, 0xa4, 0x07, 0x07, 0x96, + 0xeb, 0x26, 0x87, 0x53, 0xef, 0x7a, 0x3a, 0x44, 0x2e, 0x28, 0x9b, 0xd0, 0x60, 0xee, 0xa3, 0xbe, + 0x19, 0xf5, 0xb9, 0x6f, 0xb9, 0x6e, 0x5c, 0x76, 0x11, 0x26, 0x5d, 0x4e, 0x3e, 0xcc, 0x7d, 0x24, + 0x87, 0xb0, 0xed, 0x3a, 0x22, 0xa0, 0x02, 0xf9, 0xcc, 0x19, 0xa1, 0xd0, 0xb7, 0xa2, 0x9a, 0x8a, + 0x84, 0x07, 0x11, 0xda, 0xfb, 0x0f, 0x6a, 0x53, 0x14, 0xc2, 0xb2, 0x91, 0xf2, 0xb0, 0x31, 0x63, + 0x02, 0xf5, 0x6c, 0xb3, 0xe4, 0x09, 0xd4, 0x52, 0x53, 0xab, 0x1e, 0xc2, 0xdb, 0xaf, 0x26, 0xb0, + 0xa2, 0x3d, 0x82, 0x7a, 0xb6, 0x6d, 0x7d, 0xbd, 0xa9, 0xb5, 0x8a, 0x66, 0x0d, 0x97, 0x1b, 0x35, + 0x7e, 0x17, 0x40, 0xbf, 0x29, 0xb1, 0xf0, 0x99, 0x27, 0x90, 0x1c, 0x00, 0xcc, 0x2c, 0xd7, 0x19, + 0xd3, 0x94, 0xd2, 0x25, 0x85, 0xf4, 0xa5, 0xdc, 0x9f, 0xa1, 0xce, 0xb8, 0x63, 0x3b, 0x9e, 0xe5, + 0x2e, 0xfa, 0x56, 0x34, 0xe5, 0xee, 0x71, 0xae, 0x02, 0x39, 0x76, 0x32, 0x6b, 0x8b, 0x93, 0x16, + 0xc3, 0x7e, 0x05, 0x5d, 0xe9, 0x3c, 0x46, 0x31, 0xe2, 0x8e, 0x1f, 0x30, 0x4e, 0x79, 0xd4, 0x97, + 0x72, 0x48, 0xb9, 0xdb, 0xc9, 0x25, 0x91, 0x26, 0x7b, 0x15, 0xd7, 0x2d, 0xc6, 0xe9, 0xaf, 0x99, + 0xca, 0x72, 0x37, 0x23, 0xe4, 0x3b, 0x3c, 0x58, 0xad, 0x75, 0x4c, 0x59, 0xbc, 0x65, 0xae, 0x8c, + 0x01, 0x52, 0x9c, 0xf7, 0x57, 0xd8, 0x23, 0x26, 0x1e, 0xc3, 0xde, 0x92, 0x41, 0x12, 0xc2, 0x4d, + 0x45, 0xf8, 0x2c, 0x97, 0xf0, 0x3c, 0x31, 0x50, 0x8a, 0x6c, 0x37, 0xed, 0xab, 0x98, 0xe5, 0x12, + 0xaa, 0xc8, 0x79, 0xfa, 0x06, 0xb7, 0xd4, 0xe9, 0x8f, 0xf3, 0xc7, 0x91, 0xe9, 0xa9, 0x73, 0xb7, + 0x31, 0x0d, 0xf4, 0x08, 0xd4, 0x13, 0xc3, 0x86, 0x98, 0x71, 0x0e, 0x7b, 0xab, 0xef, 0x9d, 0x74, + 0xe1, 0xff, 0xac, 0x94, 0xea, 0xc5, 0xa3, 0x6b, 0xcd, 0x8d, 0x56, 0xc5, 0xdc, 0x59, 0x16, 0xe5, + 0xbd, 0x0c, 0x19, 0x5f, 0xa0, 0x91, 0x73, 0xa5, 0xe4, 0x11, 0x54, 0x87, 0x96, 0x40, 0xb5, 0x00, + 0x54, 0xbd, 0x63, 0x42, 0x67, 0x56, 0x24, 0x2a, 0xfd, 0x7f, 0x21, 0xdf, 0x2f, 0xab, 0x77, 0x60, + 0x63, 0xd5, 0x0e, 0x7c, 0x84, 0x9d, 0x15, 0xb7, 0x49, 0x7a, 0xb0, 0x15, 0xc9, 0xa2, 0x1a, 0x2d, + 0x77, 0x5b, 0xff, 0x74, 0x75, 0xaa, 0xd4, 0x5c, 0x14, 0x1a, 0x87, 0x50, 0xcb, 0x1e, 0x4b, 0xa0, + 0x90, 0x6a, 0x5a, 0x3d, 0x1b, 0x03, 0xd8, 0x5e, 0xba, 0x71, 0xb9, 0x79, 0xa1, 0x62, 0x23, 0x36, + 0x0e, 0x53, 0x8b, 0x66, 0x49, 0x21, 0x67, 0x6c, 0x8c, 0xe4, 0x21, 0x84, 0x82, 0xd0, 0x48, 0x05, + 0xb5, 0x76, 0x25, 0xb3, 0xa2, 0xc0, 0x77, 0x21, 0xd6, 0xfd, 0xa5, 0x41, 0x3d, 0xbb, 0x6e, 0xe4, + 0x27, 0xec, 0x66, 0xb1, 0xb7, 0xde, 0x84, 0x91, 0x3b, 0x6f, 0xec, 0xbd, 0x93, 0x3b, 0x54, 0x84, + 0x53, 0xb5, 0xb4, 0x63, 0xad, 0xf7, 0xf2, 0xd3, 0x0b, 0x9b, 0x31, 0xdb, 0xc5, 0xb6, 0xcd, 0x5c, + 0xcb, 0xb3, 0xdb, 0x8c, 0xdb, 0xea, 0x53, 0xd5, 0xb9, 0xfd, 0xd3, 0x35, 0xdc, 0x54, 0xbe, 0x39, + 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x6c, 0x74, 0x3a, 0x67, 0xe7, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto new file mode 100644 index 000000000..ee2b82c0a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto @@ -0,0 +1,138 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Service exported by server reflection + +syntax = "proto3"; + +option go_package = "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"; + +package grpc.reflection.v1alpha; + +service ServerReflection { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + rpc ServerReflectionInfo(stream ServerReflectionRequest) + returns (stream ServerReflectionResponse); +} + +// The message sent by the client when calling ServerReflectionInfo method. +message ServerReflectionRequest { + string host = 1; + // To use reflection service, the client should set one of the following + // fields in message_request. The server distinguishes requests by their + // defined field and then handles them using corresponding methods. + oneof message_request { + // Find a proto file by the file name. + string file_by_filename = 3; + + // Find the proto file that declares the given fully-qualified symbol name. + // This field should be a fully-qualified symbol name + // (e.g. .[.] or .). + string file_containing_symbol = 4; + + // Find the proto file which defines an extension extending the given + // message type with the given field number. + ExtensionRequest file_containing_extension = 5; + + // Finds the tag numbers used by all known extensions of extendee_type, and + // appends them to ExtensionNumberResponse in an undefined order. + // Its corresponding method is best-effort: it's not guaranteed that the + // reflection service will implement this method, and it's not guaranteed + // that this method will provide all extensions. Returns + // StatusCode::UNIMPLEMENTED if it's not implemented. + // This field should be a fully-qualified type name. The format is + // . + string all_extension_numbers_of_type = 6; + + // List the full names of registered services. The content will not be + // checked. + string list_services = 7; + } +} + +// The type name and extension number sent by the client when requesting +// file_containing_extension. +message ExtensionRequest { + // Fully-qualified type name. The format should be . + string containing_type = 1; + int32 extension_number = 2; +} + +// The message sent by the server to answer ServerReflectionInfo method. +message ServerReflectionResponse { + string valid_host = 1; + ServerReflectionRequest original_request = 2; + // The server sets one of the following fields according to the + // message_request in the request. + oneof message_response { + // This message is used to answer file_by_filename, file_containing_symbol, + // file_containing_extension requests with transitive dependencies. + // As the repeated label is not allowed in oneof fields, we use a + // FileDescriptorResponse message to encapsulate the repeated fields. + // The reflection service is allowed to avoid sending FileDescriptorProtos + // that were previously sent in response to earlier requests in the stream. + FileDescriptorResponse file_descriptor_response = 4; + + // This message is used to answer all_extension_numbers_of_type requests. + ExtensionNumberResponse all_extension_numbers_response = 5; + + // This message is used to answer list_services requests. + ListServiceResponse list_services_response = 6; + + // This message is used when an error occurs. + ErrorResponse error_response = 7; + } +} + +// Serialized FileDescriptorProto messages sent by the server answering +// a file_by_filename, file_containing_symbol, or file_containing_extension +// request. +message FileDescriptorResponse { + // Serialized FileDescriptorProto messages. We avoid taking a dependency on + // descriptor.proto, which uses proto2 only features, by making them opaque + // bytes instead. + repeated bytes file_descriptor_proto = 1; +} + +// A list of extension numbers sent by the server answering +// all_extension_numbers_of_type request. +message ExtensionNumberResponse { + // Full name of the base type, including the package name. The format + // is . + string base_type_name = 1; + repeated int32 extension_number = 2; +} + +// A list of ServiceResponse sent by the server answering list_services request. +message ListServiceResponse { + // The information of each service may be expanded in the future, so we use + // ServiceResponse message to encapsulate it. + repeated ServiceResponse service = 1; +} + +// The information of a single service used by ListServiceResponse to answer +// list_services request. +message ServiceResponse { + // Full name of a registered service, including its package name. The format + // is . + string name = 1; +} + +// The error code and error message sent by the server when an error occurs. +message ErrorResponse { + // This field uses the error codes defined in grpc::StatusCode. + int32 error_code = 1; + string error_message = 2; +} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go new file mode 100644 index 000000000..2294b2c6c --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package grpc_reflection_v1alpha + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ServerReflectionClient is the client API for ServerReflection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ServerReflectionClient interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) +} + +type serverReflectionClient struct { + cc grpc.ClientConnInterface +} + +func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient { + return &serverReflectionClient{cc} +} + +func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) { + stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...) + if err != nil { + return nil, err + } + x := &serverReflectionServerReflectionInfoClient{stream} + return x, nil +} + +type ServerReflection_ServerReflectionInfoClient interface { + Send(*ServerReflectionRequest) error + Recv() (*ServerReflectionResponse, error) + grpc.ClientStream +} + +type serverReflectionServerReflectionInfoClient struct { + grpc.ClientStream +} + +func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) { + m := new(ServerReflectionResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// ServerReflectionServer is the server API for ServerReflection service. +// All implementations should embed UnimplementedServerReflectionServer +// for forward compatibility +type ServerReflectionServer interface { + // The reflection service is structured as a bidirectional stream, ensuring + // all related requests go to a single server. + ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error +} + +// UnimplementedServerReflectionServer should be embedded to have forward compatible implementations. +type UnimplementedServerReflectionServer struct { +} + +func (*UnimplementedServerReflectionServer) ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error { + return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented") +} + +func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) { + s.RegisterService(&_ServerReflection_serviceDesc, srv) +} + +func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream}) +} + +type ServerReflection_ServerReflectionInfoServer interface { + Send(*ServerReflectionResponse) error + Recv() (*ServerReflectionRequest, error) + grpc.ServerStream +} + +type serverReflectionServerReflectionInfoServer struct { + grpc.ServerStream +} + +func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) { + m := new(ServerReflectionRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _ServerReflection_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.reflection.v1alpha.ServerReflection", + HandlerType: (*ServerReflectionServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ServerReflectionInfo", + Handler: _ServerReflection_ServerReflectionInfo_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "reflection/grpc_reflection_v1alpha/reflection.proto", +} diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go new file mode 100644 index 000000000..7b6dd414a --- /dev/null +++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go @@ -0,0 +1,453 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package reflection implements server reflection service. + +The service implemented is defined in: +https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. + +To register server reflection on a gRPC server: + import "google.golang.org/grpc/reflection" + + s := grpc.NewServer() + pb.RegisterYourOwnServer(s, &server{}) + + // Register reflection service on gRPC server. + reflection.Register(s) + + s.Serve(lis) + +*/ +package reflection // import "google.golang.org/grpc/reflection" + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "reflect" + "sort" + "sync" + + "github.com/golang/protobuf/proto" + dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + "google.golang.org/grpc/status" +) + +type serverReflectionServer struct { + rpb.UnimplementedServerReflectionServer + s *grpc.Server + + initSymbols sync.Once + serviceNames []string + symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files +} + +// Register registers the server reflection service on the given gRPC server. +func Register(s *grpc.Server) { + rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ + s: s, + }) +} + +// protoMessage is used for type assertion on proto messages. +// Generated proto message implements function Descriptor(), but Descriptor() +// is not part of interface proto.Message. This interface is needed to +// call Descriptor(). +type protoMessage interface { + Descriptor() ([]byte, []int) +} + +func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { + s.initSymbols.Do(func() { + serviceInfo := s.s.GetServiceInfo() + + s.symbols = map[string]*dpb.FileDescriptorProto{} + s.serviceNames = make([]string, 0, len(serviceInfo)) + processed := map[string]struct{}{} + for svc, info := range serviceInfo { + s.serviceNames = append(s.serviceNames, svc) + fdenc, ok := parseMetadata(info.Metadata) + if !ok { + continue + } + fd, err := decodeFileDesc(fdenc) + if err != nil { + continue + } + s.processFile(fd, processed) + } + sort.Strings(s.serviceNames) + }) + + return s.serviceNames, s.symbols +} + +func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) { + filename := fd.GetName() + if _, ok := processed[filename]; ok { + return + } + processed[filename] = struct{}{} + + prefix := fd.GetPackage() + + for _, msg := range fd.MessageType { + s.processMessage(fd, prefix, msg) + } + for _, en := range fd.EnumType { + s.processEnum(fd, prefix, en) + } + for _, ext := range fd.Extension { + s.processField(fd, prefix, ext) + } + for _, svc := range fd.Service { + svcName := fqn(prefix, svc.GetName()) + s.symbols[svcName] = fd + for _, meth := range svc.Method { + name := fqn(svcName, meth.GetName()) + s.symbols[name] = fd + } + } + + for _, dep := range fd.Dependency { + fdenc := proto.FileDescriptor(dep) + fdDep, err := decodeFileDesc(fdenc) + if err != nil { + continue + } + s.processFile(fdDep, processed) + } +} + +func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) { + msgName := fqn(prefix, msg.GetName()) + s.symbols[msgName] = fd + + for _, nested := range msg.NestedType { + s.processMessage(fd, msgName, nested) + } + for _, en := range msg.EnumType { + s.processEnum(fd, msgName, en) + } + for _, ext := range msg.Extension { + s.processField(fd, msgName, ext) + } + for _, fld := range msg.Field { + s.processField(fd, msgName, fld) + } + for _, oneof := range msg.OneofDecl { + oneofName := fqn(msgName, oneof.GetName()) + s.symbols[oneofName] = fd + } +} + +func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) { + enName := fqn(prefix, en.GetName()) + s.symbols[enName] = fd + + for _, val := range en.Value { + valName := fqn(enName, val.GetName()) + s.symbols[valName] = fd + } +} + +func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) { + fldName := fqn(prefix, fld.GetName()) + s.symbols[fldName] = fd +} + +func fqn(prefix, name string) string { + if prefix == "" { + return name + } + return prefix + "." + name +} + +// fileDescForType gets the file descriptor for the given type. +// The given type should be a proto message. +func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + enc, _ := m.Descriptor() + + return decodeFileDesc(enc) +} + +// decodeFileDesc does decompression and unmarshalling on the given +// file descriptor byte slice. +func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) { + raw, err := decompress(enc) + if err != nil { + return nil, fmt.Errorf("failed to decompress enc: %v", err) + } + + fd := new(dpb.FileDescriptorProto) + if err := proto.Unmarshal(raw, fd); err != nil { + return nil, fmt.Errorf("bad descriptor: %v", err) + } + return fd, nil +} + +// decompress does gzip decompression. +func decompress(b []byte) ([]byte, error) { + r, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + out, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("bad gzipped descriptor: %v", err) + } + return out, nil +} + +func typeForName(name string) (reflect.Type, error) { + pt := proto.MessageType(name) + if pt == nil { + return nil, fmt.Errorf("unknown type: %q", name) + } + st := pt.Elem() + + return st, nil +} + +func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + var extDesc *proto.ExtensionDesc + for id, desc := range proto.RegisteredExtensions(m) { + if id == ext { + extDesc = desc + break + } + } + + if extDesc == nil { + return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext) + } + + return decodeFileDesc(proto.FileDescriptor(extDesc.Filename)) +} + +func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { + m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to create message from type: %v", st) + } + + exts := proto.RegisteredExtensions(m) + out := make([]int32, 0, len(exts)) + for id := range exts { + out = append(out, id) + } + return out, nil +} + +// fileDescEncodingByFilename finds the file descriptor for given filename, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) { + enc := proto.FileDescriptor(name) + if enc == nil { + return nil, fmt.Errorf("unknown file: %v", name) + } + fd, err := decodeFileDesc(enc) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// parseMetadata finds the file descriptor bytes specified meta. +// For SupportPackageIsVersion4, m is the name of the proto file, we +// call proto.FileDescriptor to get the byte slice. +// For SupportPackageIsVersion3, m is a byte slice itself. +func parseMetadata(meta interface{}) ([]byte, bool) { + // Check if meta is the file name. + if fileNameForMeta, ok := meta.(string); ok { + return proto.FileDescriptor(fileNameForMeta), true + } + + // Check if meta is the byte slice. + if enc, ok := meta.([]byte); ok { + return enc, true + } + + return nil, false +} + +// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol, +// does marshalling on it and returns the marshalled result. +// The given symbol can be a type, a service or a method. +func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) { + _, symbols := s.getSymbols() + fd := symbols[name] + if fd == nil { + // Check if it's a type name that was not present in the + // transitive dependencies of the registered services. + if st, err := typeForName(name); err == nil { + fd, err = s.fileDescForType(st) + if err != nil { + return nil, err + } + } + } + + if fd == nil { + return nil, fmt.Errorf("unknown symbol: %v", name) + } + + return proto.Marshal(fd) +} + +// fileDescEncodingContainingExtension finds the file descriptor containing given extension, +// does marshalling on it and returns the marshalled result. +func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) { + st, err := typeForName(typeName) + if err != nil { + return nil, err + } + fd, err := fileDescContainingExtension(st, extNum) + if err != nil { + return nil, err + } + return proto.Marshal(fd) +} + +// allExtensionNumbersForTypeName returns all extension numbers for the given type. +func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) { + st, err := typeForName(name) + if err != nil { + return nil, err + } + extNums, err := s.allExtensionNumbersForType(st) + if err != nil { + return nil, err + } + return extNums, nil +} + +// ServerReflectionInfo is the reflection service handler. +func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { + for { + in, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + + out := &rpb.ServerReflectionResponse{ + ValidHost: in.Host, + OriginalRequest: in, + } + switch req := in.MessageRequest.(type) { + case *rpb.ServerReflectionRequest_FileByFilename: + b, err := s.fileDescEncodingByFilename(req.FileByFilename) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingSymbol: + b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_FileContainingExtension: + typeName := req.FileContainingExtension.ContainingType + extNum := req.FileContainingExtension.ExtensionNumber + b, err := s.fileDescEncodingContainingExtension(typeName, extNum) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{ + FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}}, + } + } + case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType: + extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType) + if err != nil { + out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{ + ErrorResponse: &rpb.ErrorResponse{ + ErrorCode: int32(codes.NotFound), + ErrorMessage: err.Error(), + }, + } + } else { + out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{ + AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{ + BaseTypeName: req.AllExtensionNumbersOfType, + ExtensionNumber: extNums, + }, + } + } + case *rpb.ServerReflectionRequest_ListServices: + svcNames, _ := s.getSymbols() + serviceResponses := make([]*rpb.ServiceResponse, len(svcNames)) + for i, n := range svcNames { + serviceResponses[i] = &rpb.ServiceResponse{ + Name: n, + } + } + out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{ + ListServicesResponse: &rpb.ListServiceResponse{ + Service: serviceResponses, + }, + } + default: + return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest) + } + + if err := stream.Send(out); err != nil { + return err + } + } +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 000000000..987bc2025 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "go install github.com/golang/protobuf/protoc-gen-go" +(cd test/tools && go install github.com/golang/protobuf/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +SOURCES=( + ${WORKDIR}/googleapis/google/rpc/code.proto + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/service_config/service_config.proto + $(git ls-files --exclude-standard --cached --others "*.proto") +) +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},requireUnimplementedServers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1 + +# grpc_testingv3/testv3.pb.go is not re-generated because it was +# intentionally generated by an older version of protoc-gen-go. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go + +# grpc/service_config/service_config.proto does not have a go_package option. +cp ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e83da346a..379275a2d 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -21,6 +21,11 @@ package resolver import ( + "context" + "net" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/serviceconfig" ) @@ -69,12 +74,21 @@ func GetDefaultScheme() string { } // AddressType indicates the address type returned by name resolution. +// +// Deprecated: use Attributes in Address instead. type AddressType uint8 const ( // Backend indicates the address is for a backend server. + // + // Deprecated: use Attributes in Address instead. Backend AddressType = iota // GRPCLB indicates the address is for a grpclb load balancer. + // + // Deprecated: to select the GRPCLB load balancing policy, use a service + // config with a corresponding loadBalancingConfig. To supply balancer + // addresses to the GRPCLB load balancing policy, set State.Attributes + // using balancer/grpclb/state.Set. GRPCLB ) @@ -83,33 +97,75 @@ const ( type Address struct { // Addr is the server address on which a connection will be established. Addr string - // Type is the type of this address. - Type AddressType + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. // - // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // If Type is GRPCLB, ServerName should be the name of the remote load // balancer, not the name of the backend. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes + + // Type is the type of this address. + // + // Deprecated: use Attributes instead. + Type AddressType + // Metadata is the information associated with Addr, which may be used // to make load balancing decision. + // + // Deprecated: use Attributes instead. Metadata interface{} } -// BuildOption includes additional information for the builder to create +// BuildOptions includes additional information for the builder to create // the resolver. -type BuildOption struct { - // DisableServiceConfig indicates whether resolver should fetch service config data. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) } // State contains the current Resolver state relevant to the ClientConn. type State struct { - Addresses []Address // Resolved addresses for the target - // ServiceConfig is the parsed service config; obtained from - // serviceconfig.Parse. - ServiceConfig serviceconfig.Config + // Addresses is the latest set of resolved addresses for the target. + Addresses []Address + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult - // TODO: add Err error + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes } // ClientConn contains the callbacks for resolver to notify any updates @@ -122,6 +178,10 @@ type State struct { type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. UpdateState(State) + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) // NewAddress is called by resolver to notify ClientConn a new list // of resolved addresses. // The address list should be the complete list of resolved addresses. @@ -133,6 +193,9 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult } // Target represents a target for gRPC, as specified in: @@ -164,14 +227,14 @@ type Builder interface { // // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. - Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) // Scheme returns the scheme supported by this resolver. // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. Scheme() string } -// ResolveNowOption includes additional information for ResolveNow. -type ResolveNowOption struct{} +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} // Resolver watches for the updates on the specified target. // Updates include address updates and service config updates. @@ -180,7 +243,7 @@ type Resolver interface { // again. It's just a hint, resolver can ignore this if it's not necessary. // // It could be called multiple times concurrently. - ResolveNow(ResolveNowOption) + ResolveNow(ResolveNowOptions) // Close closes the resolver. Close() } diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 6934905b0..edfda866c 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -21,138 +21,192 @@ package grpc import ( "fmt" "strings" - "sync/atomic" + "sync" + "time" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) // ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. +// It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolver resolver.Resolver - addrCh chan []resolver.Address - scCh chan string - done uint32 // accessed atomically; set to 1 when closed. - curState resolver.State + cc *ClientConn + resolverMu sync.Mutex + resolver resolver.Resolver + done *grpcsync.Event + curState resolver.State + + pollingMu sync.Mutex + polling chan struct{} } -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { + ccr := &ccResolverWrapper{ + cc: cc, + done: grpcsync.NewEvent(), } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} -// newCCResolverWrapper parses cc.target for scheme and gets the resolver -// builder for this scheme and builds the resolver. The monitoring goroutine -// for it is not started yet and can be created by calling start(). -// -// If withResolverBuilder dial option is set, the specified resolver will be -// used instead. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - rb := cc.dopts.resolverBuilder - if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() } - - ccr := &ccResolverWrapper{ - cc: cc, - addrCh: make(chan []resolver.Address, 1), - scCh: make(chan string, 1), + rbo := resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, } var err error - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) + // We need to hold the lock here while we assign to the ccr.resolver field + // to guard against a data race caused by the following code path, + // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up + // accessing ccr.resolver which is being assigned here. + ccr.resolverMu.Lock() + defer ccr.resolverMu.Unlock() + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) if err != nil { return nil, err } return ccr, nil } -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { - ccr.resolver.ResolveNow(o) +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.resolverMu.Lock() + if !ccr.done.HasFired() { + ccr.resolver.ResolveNow(o) + } + ccr.resolverMu.Unlock() } func (ccr *ccResolverWrapper) close() { + ccr.resolverMu.Lock() ccr.resolver.Close() - atomic.StoreUint32(&ccr.done, 1) + ccr.done.Fire() + ccr.resolverMu.Unlock() } -func (ccr *ccResolverWrapper) isDone() bool { - return atomic.LoadUint32(&ccr.done) == 1 +// poll begins or ends asynchronous polling of the resolver based on whether +// err is ErrBadResolverState. +func (ccr *ccResolverWrapper) poll(err error) { + ccr.pollingMu.Lock() + defer ccr.pollingMu.Unlock() + if err != balancer.ErrBadResolverState { + // stop polling + if ccr.polling != nil { + close(ccr.polling) + ccr.polling = nil + } + return + } + if ccr.polling != nil { + // already polling + return + } + p := make(chan struct{}) + ccr.polling = p + go func() { + for i := 0; ; i++ { + ccr.resolveNow(resolver.ResolveNowOptions{}) + t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i)) + select { + case <-p: + t.Stop() + return + case <-ccr.done.Done(): + // Resolver has been closed. + t.Stop() + return + case <-t.C: + select { + case <-p: + return + default: + } + // Timer expired; re-resolve. + } + } + }() } func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { - if ccr.isDone() { + if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } - ccr.cc.updateResolverState(s) ccr.curState = s + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ReportError(err error) { + if ccr.done.HasFired() { + return + } + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } // NewAddress is called by the resolver implementation to send addresses to gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - if ccr.isDone() { + if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState) + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - if ccr.isDone() { + if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) - c, err := parseServiceConfig(sc) - if err != nil { + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + if ccr.cc.dopts.disableServiceConfig { + channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + ccr.poll(balancer.ErrBadResolverState) return } if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: c}) + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) } - ccr.curState.ServiceConfig = c - ccr.cc.updateResolverState(ccr.curState) + ccr.curState.ServiceConfig = scpr + ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil)) +} + +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) } func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string - oldSC, oldOK := ccr.curState.ServiceConfig.(*ServiceConfig) - newSC, newOK := s.ServiceConfig.(*ServiceConfig) + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { updates = append(updates, "service config updated") } @@ -161,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 088c3f1b2..8644b8a7d 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -155,7 +155,6 @@ func (d *gzipDecompressor) Type() string { type callInfo struct { compressorType string failFast bool - stream ClientStream maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials @@ -180,7 +179,7 @@ type CallOption interface { // after is called after the call has completed. after cannot return an // error, so any failures should be reported via output parameters. - after(*callInfo) + after(*callInfo, *csAttempt) } // EmptyCallOption does not alter the Call configuration. @@ -188,8 +187,8 @@ type CallOption interface { // by interceptors. type EmptyCallOption struct{} -func (EmptyCallOption) before(*callInfo) error { return nil } -func (EmptyCallOption) after(*callInfo) {} +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} // Header returns a CallOptions that retrieves the header metadata // for a unary RPC. @@ -205,10 +204,8 @@ type HeaderCallOption struct { } func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo) { - if c.stream != nil { - *o.HeaderAddr, _ = c.stream.Header() - } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -225,10 +222,8 @@ type TrailerCallOption struct { } func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo) { - if c.stream != nil { - *o.TrailerAddr = c.stream.Trailer() - } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -245,11 +240,9 @@ type PeerCallOption struct { } func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo) { - if c.stream != nil { - if x, ok := peer.FromContext(c.stream.Context()); ok { - *o.PeerAddr = *x - } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x } } @@ -285,15 +278,16 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo) {} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. +// size in bytes the client can receive. // This is an EXPERIMENTAL API. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int @@ -303,15 +297,16 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. +// size in bytes the client can send. // This is an EXPERIMENTAL API. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int @@ -321,7 +316,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -340,7 +335,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo) {} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -361,7 +356,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo) {} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -394,7 +389,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo) {} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // ForceCodec returns a CallOption that will set the given Codec to be // used for all request and response messages for a call. The result of calling @@ -426,7 +421,7 @@ func (o ForceCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o ForceCodecCallOption) after(c *callInfo) {} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -448,7 +443,7 @@ func (o CustomCodecCallOption) before(c *callInfo) error { c.codec = o.Codec return nil } -func (o CustomCodecCallOption) after(c *callInfo) {} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -469,7 +464,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -648,35 +643,58 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, st.Err() } + var size int if pf == compressionMade { // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } + size = len(d) } else { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } + d, size, err = decompress(compressor, d, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) } + } else { + size = len(d) } - if len(d) > maxReceiveMessageSize { + if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize) } return d, nil } +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? @@ -848,7 +866,7 @@ type channelzData struct { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 5. +// support package version is 6. // // Older versions are kept for compatibility. They may be removed if // compatibility cannot be maintained. @@ -858,6 +876,7 @@ const ( SupportPackageIsVersion3 = true SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f064b73e5..c2c7cae6c 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -42,6 +42,7 @@ import ( "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -87,6 +88,12 @@ type service struct { mdata interface{} } +type serverWorkerData struct { + st transport.ServerTransport + wg *sync.WaitGroup + stream *transport.Stream +} + // Server is a gRPC server to serve RPC requests. type Server struct { opts serverOptions @@ -107,6 +114,8 @@ type Server struct { channelzID int64 // channelz unique identification number czData *channelzData + + serverWorkerChannels []chan *serverWorkerData } type serverOptions struct { @@ -116,6 +125,8 @@ type serverOptions struct { dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle statsHandler stats.Handler maxConcurrentStreams uint32 @@ -130,6 +141,8 @@ type serverOptions struct { readBufferSize int connectionTimeout time.Duration maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 } var defaultServerOptions = serverOptions{ @@ -310,6 +323,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { }) } +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { @@ -321,6 +344,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { }) } +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { @@ -343,8 +376,8 @@ func StatsHandler(h stats.Handler) ServerOption { // unknown service handler. The provided method is a bidi-streaming RPC service // handler that will be invoked instead of returning the "unimplemented" gRPC // error whenever a request is received for an unregistered service or method. -// The handling function has full access to the Context of the request and the -// stream, and the invocation bypasses interceptors. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.unknownStreamDesc = &StreamDesc{ @@ -377,6 +410,76 @@ func MaxHeaderListSize(s uint32) ServerOption { }) } +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// This API is EXPERIMENTAL. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// This API is EXPERIMENTAL. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows different requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker(ch chan *serverWorkerData) { + // To make sure all server workers don't reset at the same time, choose a + // random number of iterations before resetting. + threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) + for completed := 0; completed < threshold; completed++ { + data, ok := <-ch + if !ok { + return + } + s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) + data.wg.Done() + } + go s.serverWorker(ch) +} + +// initServerWorkers creates worker goroutines and channels to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + s.serverWorkerChannels[i] = make(chan *serverWorkerData) + go s.serverWorker(s.serverWorkerChannels[i]) + } +} + +func (s *Server) stopServerWorkers() { + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + close(s.serverWorkerChannels[i]) + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -393,12 +496,18 @@ func NewServer(opt ...ServerOption) *Server { done: grpcsync.NewEvent(), czData: new(channelzData), } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) } + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + if channelz.IsOn() { s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") } @@ -647,7 +756,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -686,6 +795,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr ReadBufferSize: s.opts.readBufferSize, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -693,7 +803,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -703,12 +813,27 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close() var wg sync.WaitGroup + + var roundRobinCounter uint32 st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() + if s.opts.numServerWorkers > 0 { + data := &serverWorkerData{st: st, wg: &wg, stream: stream} + select { + case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + default: + // If all stream workers are busy, fallback to the default code path. + go func() { + s.handleStream(st, stream, s.traceInfo(st, stream)) + wg.Done() + }() + } + } else { + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + } }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -832,12 +957,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) + channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) + channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -852,42 +977,93 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return err } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if channelz.IsOn() { - s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { sh := s.opts.statsHandler - if sh != nil { - beginTime := time.Now() - begin := &stats.Begin{ - BeginTime: beginTime, + if sh != nil || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ + var statsBegin *stats.Begin + if sh != nil { + beginTime := time.Now() + statsBegin = &stats.Begin{ BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) - }() - } - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.tr.LazyLog(&trInfo.firstLine, false) + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } } }() } @@ -960,7 +1136,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } } return err @@ -1005,7 +1181,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1032,9 +1208,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1084,34 +1260,52 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() - defer func() { - if err != nil && err != io.EOF { - s.incrCallsFailed() - } else { - s.incrCallsSucceeded() - } - }() } sh := s.opts.statsHandler + var statsBegin *stats.Begin if sh != nil { beginTime := time.Now() - begin := &stats.Begin{ + statsBegin = &stats.Begin{ BeginTime: beginTime, } - sh.HandleRPC(stream.Context(), begin) - defer func() { - end := &stats.End{ - BeginTime: beginTime, - EndTime: time.Now(), - } - if err != nil && err != io.EOF { - end.Error = toRPCErr(err) - } - sh.HandleRPC(stream.Context(), end) - }() + sh.HandleRPC(stream.Context(), statsBegin) } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ @@ -1126,6 +1320,41 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp statsHandler: sh, } + if sh != nil || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if sh != nil { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + ss.binlog = binarylog.GetMethodLogger(stream.Method()) if ss.binlog != nil { md, _ := metadata.FromIncomingContext(ctx) @@ -1179,16 +1408,6 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() } var appErr error var server interface{} @@ -1259,7 +1478,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1300,7 +1519,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1377,6 +1596,9 @@ func (s *Server) Stop() { for c := range st { c.Close() } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } s.mu.Lock() if s.events != nil { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index d0787f1e2..3132a66cd 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -21,14 +21,15 @@ package grpc import ( "encoding/json" "fmt" + "reflect" "strconv" "strings" "time" - "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -79,7 +80,7 @@ type ServiceConfig struct { serviceconfig.Config // LB is the load balancer the service providers recommends. The balancer - // specified via grpc.WithBalancer will override this. This is deprecated; + // specified via grpc.WithBalancerName will override this. This is deprecated; // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig // will be used. LB *string @@ -136,9 +137,9 @@ type retryPolicy struct { maxAttempts int // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). // // These fields are required and must be greater than zero. initialBackoff time.Duration @@ -249,32 +250,27 @@ type jsonMC struct { RetryPolicy *jsonRetryPolicy } -type loadBalancingConfig map[string]json.RawMessage - // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *[]loadBalancingConfig + LoadBalancingConfig *internalserviceconfig.BalancerConfig MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig } func init() { - internal.ParseServiceConfig = func(sc string) (interface{}, error) { - return parseServiceConfig(sc) - } + internal.ParseServiceConfigForTesting = parseServiceConfig } - -func parseServiceConfig(js string) (*ServiceConfig, error) { +func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { - return nil, fmt.Errorf("no JSON service config provided") + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} } var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ LB: rsc.LoadBalancingPolicy, @@ -283,37 +279,15 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if rsc.LoadBalancingConfig != nil { - for i, lbcfg := range *rsc.LoadBalancingConfig { - if len(lbcfg) != 1 { - err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) - grpclog.Warningf(err.Error()) - return nil, err - } - var name string - var jsonCfg json.RawMessage - for name, jsonCfg = range lbcfg { - } - builder := balancer.Get(name) - if builder == nil { - continue - } - sc.lbConfig = &lbConfig{name: name} - if parser, ok := builder.(balancer.ConfigParser); ok { - var err error - sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg) - if err != nil { - return nil, fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) - } - } else if string(jsonCfg) != "{}" { - grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) - } - break + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, } } if rsc.MethodConfig == nil { - return &sc, nil + return &serviceconfig.ParseResult{Config: &sc} } for _, m := range *rsc.MethodConfig { if m.Name == nil { @@ -322,7 +296,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { d, err := parseDuration(m.Timeout) if err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return &serviceconfig.ParseResult{Err: err} } mc := MethodConfig{ @@ -331,7 +305,7 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { } if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return nil, err + return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { @@ -356,13 +330,13 @@ func parseServiceConfig(js string) (*ServiceConfig, error) { if sc.retryThrottling != nil { if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { - return nil, fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt) + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} } if tr := sc.retryThrottling.TokenRatio; tr <= 0 { - return nil, fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr) + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} } } - return &sc, nil + return &serviceconfig.ParseResult{Config: &sc} } func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { @@ -427,3 +401,34 @@ func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { func newInt(b int) *int { return &b } + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 53b27875a..187c30442 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -22,27 +22,20 @@ // This package is EXPERIMENTAL. package serviceconfig -import ( - "google.golang.org/grpc/internal" -) - // Config represents an opaque data structure holding a service config. type Config interface { - isConfig() + isServiceConfig() } // LoadBalancingConfig represents an opaque data structure holding a load -// balancer config. +// balancing config. type LoadBalancingConfig interface { isLoadBalancingConfig() } -// Parse parses the JSON service config provided into an internal form or -// returns an error if the config is invalid. -func Parse(ServiceConfigJSON string) (Config, error) { - c, err := internal.ParseServiceConfig(ServiceConfigJSON) - if err != nil { - return nil, err - } - return c.(Config), err +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error } diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index f3f593c84..63e476ee7 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -16,8 +16,6 @@ * */ -//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto - // Package stats is for collecting and reporting various network and RPC stats. // This package is for monitoring purpose only. All fields are read-only. // All APIs are experimental. @@ -81,6 +79,10 @@ type InHeader struct { Client bool // WireLength is the wire length of header. WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD // The following fields are valid only if Client is false. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -89,8 +91,6 @@ type InHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string } // IsClient indicates if the stats information is from client side. @@ -104,6 +104,9 @@ type InTrailer struct { Client bool // WireLength is the wire length of trailer. WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD } // IsClient indicates if the stats information is from client side. @@ -136,6 +139,10 @@ func (s *OutPayload) isRPCStats() {} type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD // The following fields are valid only if Client is true. // FullMethod is the full RPC method string, i.e., /package.service/method. @@ -144,8 +151,6 @@ type OutHeader struct { RemoteAddr net.Addr // LocalAddr is the local address of the corresponding connection. LocalAddr net.Addr - // Compression is the compression algorithm used for the RPC. - Compression string } // IsClient indicates if this stats information is from client side. @@ -158,7 +163,13 @@ type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD } // IsClient indicates if this stats information is from client side. @@ -176,6 +187,7 @@ type End struct { EndTime time.Time // Trailer contains the trailer metadata received from the server. This // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. Trailer metadata.MD // Error is the error the RPC ended with. It is an error generated from // status.Status and can be converted back to status.Status using diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index a1348e9b1..01e182c30 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -29,88 +29,23 @@ package status import ( "context" - "errors" "fmt" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" - "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/status" ) -func init() { - internal.StatusRawProto = statusRawProto -} - -func statusRawProto(s *Status) *spb.Status { return s.s } - -// statusError is an alias of a status proto. It implements error and Status, -// and a nil statusError should never be returned by this package. -type statusError spb.Status - -func (se *statusError) Error() string { - p := (*spb.Status)(se) - return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) -} - -func (se *statusError) GRPCStatus() *Status { - return &Status{s: (*spb.Status)(se)} -} - -// Is implements future error.Is functionality. -// A statusError is equivalent if the code and message are identical. -func (se *statusError) Is(target error) bool { - tse, ok := target.(*statusError) - if !ok { - return false - } - - return proto.Equal((*spb.Status)(se), (*spb.Status)(tse)) -} - -// Status represents an RPC status code, message, and details. It is immutable -// and should be created with New, Newf, or FromProto. -type Status struct { - s *spb.Status -} - -// Code returns the status code contained in s. -func (s *Status) Code() codes.Code { - if s == nil || s.s == nil { - return codes.OK - } - return codes.Code(s.s.Code) -} - -// Message returns the message contained in s. -func (s *Status) Message() string { - if s == nil || s.s == nil { - return "" - } - return s.s.Message -} - -// Proto returns s's status as an spb.Status proto message. -func (s *Status) Proto() *spb.Status { - if s == nil { - return nil - } - return proto.Clone(s.s).(*spb.Status) -} - -// Err returns an immutable error representing s; returns nil if s.Code() is -// OK. -func (s *Status) Err() error { - if s.Code() == codes.OK { - return nil - } - return (*statusError)(s.s) -} +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { - return &Status{s: &spb.Status{Code: int32(c), Message: msg}} + return status.New(c, msg) } // Newf returns New(c, fmt.Sprintf(format, a...)). @@ -135,7 +70,7 @@ func ErrorProto(s *spb.Status) error { // FromProto returns a Status representing s. func FromProto(s *spb.Status) *Status { - return &Status{s: proto.Clone(s).(*spb.Status)} + return status.FromProto(s) } // FromError returns a Status representing err if it was produced from this @@ -160,42 +95,6 @@ func Convert(err error) *Status { return s } -// WithDetails returns a new status with the provided details messages appended to the status. -// If any errors are encountered, it returns nil and the first error encountered. -func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - // s.Code() != OK implies that s.Proto() != nil. - p := s.Proto() - for _, detail := range details { - any, err := ptypes.MarshalAny(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, any) - } - return &Status{s: p}, nil -} - -// Details returns a slice of details messages attached to the status. -// If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { - if s == nil || s.s == nil { - return nil - } - details := make([]interface{}, 0, len(s.s.Details)) - for _, any := range s.s.Details { - detail := &ptypes.DynamicAny{} - if err := ptypes.UnmarshalAny(any, detail); err != nil { - details = append(details, err) - continue - } - details = append(details, detail.Message) - } - return details -} - // Code returns the Code of the error if it is a Status error, codes.OK if err // is nil, or codes.Unknown otherwise. func Code(err error) codes.Code { diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 134a624a1..629af76bd 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -278,7 +277,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } cs.binlog = binarylog.GetMethodLogger(method) - cs.callInfo.stream = cs // Only this initial attempt has stats/tracing. // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. if err := cs.newAttemptLocked(sh, trInfo); err != nil { @@ -366,6 +364,11 @@ func (a *csAttempt) newStream() error { cs.callHdr.PreviousAttempts = cs.numRetries s, err := a.t.NewStream(cs.ctx, cs.callHdr) if err != nil { + if _, ok := err.(transport.PerformedIOError); ok { + // Return without converting to an RPC error so retry code can + // inspect. + return err + } return toRPCErr(err) } cs.attempt.s = s @@ -461,11 +464,21 @@ func (cs *clientStream) commitAttempt() { // shouldRetry returns nil if the RPC should be retried; otherwise it returns // the error that should be returned by the operation. func (cs *clientStream) shouldRetry(err error) error { - if cs.attempt.s == nil && !cs.callInfo.failFast { - // In the event of any error from NewStream (attempt.s == nil), we - // never attempted to write anything to the wire, so we can retry - // indefinitely for non-fail-fast RPCs. - return nil + unprocessed := false + if cs.attempt.s == nil { + pioErr, ok := err.(transport.PerformedIOError) + if ok { + // Unwrap error. + err = toRPCErr(pioErr.Err) + } else { + unprocessed = true + } + if !ok && !cs.callInfo.failFast { + // In the event of a non-IO operation error from NewStream, we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } } if cs.finished || cs.committed { // RPC is finished or committed; cannot retry. @@ -474,13 +487,12 @@ func (cs *clientStream) shouldRetry(err error) error { // Wait for the trailers. if cs.attempt.s != nil { <-cs.attempt.s.Done() + unprocessed = cs.attempt.s.Unprocessed() } - if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. - cs.firstAttempt = false return nil } - cs.firstAttempt = false if cs.cc.dopts.disableRetry { return err } @@ -488,7 +500,7 @@ func (cs *clientStream) shouldRetry(err error) error { pushback := 0 hasPushback := false if cs.attempt.s != nil { - if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil || !to { + if !cs.attempt.s.TrailersOnly() { return err } @@ -498,13 +510,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } @@ -566,6 +578,7 @@ func (cs *clientStream) retryLocked(lastErr error) error { cs.commitAttemptLocked() return err } + cs.firstAttempt = false if err := cs.newAttemptLocked(nil, nil); err != nil { return err } @@ -800,6 +813,15 @@ func (cs *clientStream) finish(err error) { } cs.finished = true cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } cs.mu.Unlock() // For binary logging. only log cancel in finish (could be caused by RPC ctx // canceled or ClientConn closed). Trailer will be logged in RecvMsg. @@ -821,15 +843,6 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - if cs.attempt != nil { - cs.attempt.finish(err) - // after functions all rely upon having a stream. - if cs.attempt.s != nil { - for _, o := range cs.opts { - o.after(cs.callInfo) - } - } - } cs.cancel() } @@ -1067,7 +1080,6 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin t: t, } - as.callInfo.stream = as s, err := as.t.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go index 60ae770f5..168cdb857 100644 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -37,7 +37,16 @@ type Listener struct { done chan struct{} } +// Implementation of net.Error providing timeout +type netErrorTimeout struct { + error +} + +func (e netErrorTimeout) Timeout() bool { return true } +func (e netErrorTimeout) Temporary() bool { return false } + var errClosed = fmt.Errorf("closed") +var errTimeout net.Error = netErrorTimeout{error: fmt.Errorf("i/o timeout")} // Listen returns a Listener that can only be contacted by its own Dialers and // creates buffered connections between the two. @@ -104,6 +113,13 @@ type pipe struct { wwait sync.Cond rwait sync.Cond + // Indicate that a write/read timeout has occurred + wtimedout bool + rtimedout bool + + wtimer *time.Timer + rtimer *time.Timer + closed bool writeClosed bool } @@ -112,6 +128,9 @@ func newPipe(sz int) *pipe { p := &pipe{buf: make([]byte, 0, sz)} p.wwait.L = &p.mu p.rwait.L = &p.mu + + p.wtimer = time.AfterFunc(0, func() {}) + p.rtimer = time.AfterFunc(0, func() {}) return p } @@ -137,6 +156,10 @@ func (p *pipe) Read(b []byte) (n int, err error) { if p.writeClosed { return 0, io.EOF } + if p.rtimedout { + return 0, errTimeout + } + p.rwait.Wait() } wasFull := p.full() @@ -171,6 +194,10 @@ func (p *pipe) Write(b []byte) (n int, err error) { if !p.full() { break } + if p.wtimedout { + return 0, errTimeout + } + p.wwait.Wait() } wasEmpty := p.empty() @@ -232,11 +259,48 @@ func (c *conn) Close() error { return err2 } -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } -func (c *conn) SetDeadline(t time.Time) error { return fmt.Errorf("unsupported") } -func (c *conn) SetReadDeadline(t time.Time) error { return fmt.Errorf("unsupported") } -func (c *conn) SetWriteDeadline(t time.Time) error { return fmt.Errorf("unsupported") } +func (c *conn) SetDeadline(t time.Time) error { + c.SetReadDeadline(t) + c.SetWriteDeadline(t) + return nil +} + +func (c *conn) SetReadDeadline(t time.Time) error { + p := c.Reader.(*pipe) + p.mu.Lock() + defer p.mu.Unlock() + p.rtimer.Stop() + p.rtimedout = false + if !t.IsZero() { + p.rtimer = time.AfterFunc(time.Until(t), func() { + p.mu.Lock() + defer p.mu.Unlock() + p.rtimedout = true + p.rwait.Broadcast() + }) + } + return nil +} + +func (c *conn) SetWriteDeadline(t time.Time) error { + p := c.Writer.(*pipe) + p.mu.Lock() + defer p.mu.Unlock() + p.wtimer.Stop() + p.wtimedout = false + if !t.IsZero() { + p.wtimer = time.AfterFunc(time.Until(t), func() { + p.mu.Lock() + defer p.mu.Unlock() + p.wtimedout = true + p.wwait.Broadcast() + }) + } + return nil +} + +func (*conn) LocalAddr() net.Addr { return addr{} } +func (*conn) RemoteAddr() net.Addr { return addr{} } type addr struct{} diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b9994..07a2d26b3 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -41,9 +41,6 @@ func methodFamily(m string) string { if i := strings.Index(m, "/"); i >= 0 { m = m[:i] // remove everything from second slash } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } return m } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 5411a73a2..c7e67b847 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.23.0" +const Version = "1.30.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 661e1e1de..f0a67298a 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -1,20 +1,22 @@ #!/bin/bash -if [[ `uname -a` = *"Darwin"* ]]; then - echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" - exit 1 -fi - set -ex # Exit on error; debugging enabled. set -o pipefail # Fail a pipe if any sub-command fails. +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + die() { echo "$@" >&2 exit 1 } fail_on_output() { - tee /dev/stderr | (! read) + tee /dev/stderr | not read } # Check to make sure it's safe to modify the user's git repo. @@ -31,12 +33,14 @@ PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}" if [[ "$1" = "-install" ]]; then # Check for module support if go help mod >& /dev/null; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools go install \ golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell + popd else # Ye olde `go get` incantation. # Note: this gets the latest version of all tools (vs. the pinned versions @@ -45,8 +49,7 @@ if [[ "$1" = "-install" ]]; then golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ - github.com/client9/misspell/cmd/misspell \ - github.com/golang/protobuf/protoc-gen-go + github.com/client9/misspell/cmd/misspell fi if [[ -z "${VET_SKIP_PROTO}" ]]; then if [[ "${TRAVIS}" = "true" ]]; then @@ -57,7 +60,7 @@ if [[ "$1" = "-install" ]]; then unzip ${PROTOC_FILENAME} bin/protoc --version popd - elif ! which protoc > /dev/null; then + elif not which protoc > /dev/null; then die "Please install protoc into your path" fi fi @@ -67,18 +70,24 @@ elif [[ "$#" -ne 0 ]]; then fi # - Ensure all source files contain a copyright message. -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | fail_on_output +not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. -(! grep 'func Test[^(]' *_test.go) -(! grep 'func Test[^(]' test/*.go) +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Ensure all ptypes proto packages are renamed when importing. -git ls-files "*.go" | (! xargs grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/") +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' | not grep -v 'pb "\|grpc "' # - Check imports that are illegal in appengine (until Go 1.11). # TODO: Remove when we drop Go 1.10 support @@ -86,9 +95,11 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go # - gofmt, goimports, golint (with exceptions for generated code), go vet. gofmt -s -d -l . 2>&1 | fail_on_output -goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") | fail_on_output -golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . +goimports -l . 2>&1 | not grep -vE "(_mock|\.pb)\.go" +golint ./... 2>&1 | not grep -vE "(_mock|\.pb)\.go:" +go vet -all ./... + +misspell -error . # - Check that generated proto files are up to date. if [[ -z "${VET_SKIP_PROTO}" ]]; then @@ -105,30 +116,74 @@ if go help mod >& /dev/null; then fi # - Collection of static analysis checks -# TODO(dfawley): don't use deprecated functions in examples. -staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore ' -google.golang.org/grpc/balancer.go:SA1019 -google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019 -google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019 -google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019 -google.golang.org/grpc/balancer_conn_wrappers.go:SA1019 -google.golang.org/grpc/balancer_test.go:SA1019 -google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 -google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019 -google.golang.org/grpc/clientconn.go:S1024 -google.golang.org/grpc/clientconn_state_transition_test.go:SA1019 -google.golang.org/grpc/clientconn_test.go:SA1019 -google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019 -google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server.go:SA1019 -google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019 -google.golang.org/grpc/resolver/dns/dns_resolver.go:SA1019 -google.golang.org/grpc/stats/stats_test.go:SA1019 -google.golang.org/grpc/test/balancer_test.go:SA1019 -google.golang.org/grpc/test/channelz_test.go:SA1019 -google.golang.org/grpc/test/end2end_test.go:SA1019 -google.golang.org/grpc/test/healthcheck_test.go:SA1019 -' ./... -misspell -error . +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +SC_OUT="$(mktemp)" +staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +balancer.ErrTransientFailure +balancer.Picker +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithBalancerName +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +resolver.Backend +resolver.GRPCLB' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/github.com/keybase/go-crypto/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS similarity index 74% rename from vendor/github.com/keybase/go-crypto/AUTHORS rename to vendor/google.golang.org/protobuf/AUTHORS index 15167cd74..2b00ddba0 100644 --- a/vendor/github.com/keybase/go-crypto/AUTHORS +++ b/vendor/google.golang.org/protobuf/AUTHORS @@ -1,3 +1,3 @@ # This source code refers to The Go Authors for copyright purposes. # The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/keybase/go-crypto/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS similarity index 70% rename from vendor/github.com/keybase/go-crypto/CONTRIBUTORS rename to vendor/google.golang.org/protobuf/CONTRIBUTORS index 1c4577e96..1fbd3e976 100644 --- a/vendor/github.com/keybase/go-crypto/CONTRIBUTORS +++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS @@ -1,3 +1,3 @@ # This source code was written by the Go contributors. # The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/google.golang.org/protobuf/LICENSE similarity index 95% rename from vendor/github.com/golang/snappy/LICENSE rename to vendor/google.golang.org/protobuf/LICENSE index 6050c10f4..49ea0f928 100644 --- a/vendor/github.com/golang/snappy/LICENSE +++ b/vendor/google.golang.org/protobuf/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/keybase/go-crypto/PATENTS b/vendor/google.golang.org/protobuf/PATENTS similarity index 100% rename from vendor/github.com/keybase/go-crypto/PATENTS rename to vendor/google.golang.org/protobuf/PATENTS diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go new file mode 100644 index 000000000..369df13da --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/init.go @@ -0,0 +1,168 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/encoding/protowire" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type fileInfo struct { + *protogen.File + + allEnums []*enumInfo + allMessages []*messageInfo + allExtensions []*extensionInfo + + allEnumsByPtr map[*enumInfo]int // value is index into allEnums + allMessagesByPtr map[*messageInfo]int // value is index into allMessages + allMessageFieldsByPtr map[*messageInfo]*structFields + + // needRawDesc specifies whether the generator should emit logic to provide + // the legacy raw descriptor in GZIP'd form. + // This is updated by enum and message generation logic as necessary, + // and checked at the end of file generation. + needRawDesc bool +} + +type structFields struct { + count int + unexported map[int]string +} + +func (sf *structFields) append(name string) { + if r, _ := utf8.DecodeRuneInString(name); !unicode.IsUpper(r) { + if sf.unexported == nil { + sf.unexported = make(map[int]string) + } + sf.unexported[sf.count] = name + } + sf.count++ +} + +func newFileInfo(file *protogen.File) *fileInfo { + f := &fileInfo{File: file} + + // Collect all enums, messages, and extensions in "flattened ordering". + // See filetype.TypeBuilder. + var walkMessages func([]*protogen.Message, func(*protogen.Message)) + walkMessages = func(messages []*protogen.Message, f func(*protogen.Message)) { + for _, m := range messages { + f(m) + walkMessages(m.Messages, f) + } + } + initEnumInfos := func(enums []*protogen.Enum) { + for _, enum := range enums { + f.allEnums = append(f.allEnums, newEnumInfo(f, enum)) + } + } + initMessageInfos := func(messages []*protogen.Message) { + for _, message := range messages { + f.allMessages = append(f.allMessages, newMessageInfo(f, message)) + } + } + initExtensionInfos := func(extensions []*protogen.Extension) { + for _, extension := range extensions { + f.allExtensions = append(f.allExtensions, newExtensionInfo(f, extension)) + } + } + initEnumInfos(f.Enums) + initMessageInfos(f.Messages) + initExtensionInfos(f.Extensions) + walkMessages(f.Messages, func(m *protogen.Message) { + initEnumInfos(m.Enums) + initMessageInfos(m.Messages) + initExtensionInfos(m.Extensions) + }) + + // Derive a reverse mapping of enum and message pointers to their index + // in allEnums and allMessages. + if len(f.allEnums) > 0 { + f.allEnumsByPtr = make(map[*enumInfo]int) + for i, e := range f.allEnums { + f.allEnumsByPtr[e] = i + } + } + if len(f.allMessages) > 0 { + f.allMessagesByPtr = make(map[*messageInfo]int) + f.allMessageFieldsByPtr = make(map[*messageInfo]*structFields) + for i, m := range f.allMessages { + f.allMessagesByPtr[m] = i + f.allMessageFieldsByPtr[m] = new(structFields) + } + } + + return f +} + +type enumInfo struct { + *protogen.Enum + + genJSONMethod bool + genRawDescMethod bool +} + +func newEnumInfo(f *fileInfo, enum *protogen.Enum) *enumInfo { + e := &enumInfo{Enum: enum} + e.genJSONMethod = true + e.genRawDescMethod = true + return e +} + +type messageInfo struct { + *protogen.Message + + genRawDescMethod bool + genExtRangeMethod bool + + isTracked bool + hasWeak bool +} + +func newMessageInfo(f *fileInfo, message *protogen.Message) *messageInfo { + m := &messageInfo{Message: message} + m.genRawDescMethod = true + m.genExtRangeMethod = true + m.isTracked = isTrackedMessage(m) + for _, field := range m.Fields { + m.hasWeak = m.hasWeak || field.Desc.IsWeak() + } + return m +} + +// isTrackedMessage reports whether field tracking is enabled on the message. +func isTrackedMessage(m *messageInfo) (tracked bool) { + const trackFieldUse_fieldNumber = 37383685 + + // Decode the option from unknown fields to avoid a dependency on the + // annotation proto from protoc-gen-go. + b := m.Desc.Options().(*descriptorpb.MessageOptions).ProtoReflect().GetUnknown() + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + if num == trackFieldUse_fieldNumber && typ == protowire.VarintType { + v, _ := protowire.ConsumeVarint(b) + tracked = protowire.DecodeBool(v) + } + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + return tracked +} + +type extensionInfo struct { + *protogen.Extension +} + +func newExtensionInfo(f *fileInfo, extension *protogen.Extension) *extensionInfo { + x := &extensionInfo{Extension: extension} + return x +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go new file mode 100644 index 000000000..b2e3930fb --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/main.go @@ -0,0 +1,901 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal_gengo is internal to the protobuf module. +package internal_gengo + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "math" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/version" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +// SupportedFeatures reports the set of supported protobuf language features. +var SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + +// GenerateVersionMarkers specifies whether to generate version markers. +var GenerateVersionMarkers = true + +// Standard library dependencies. +const ( + base64Package = protogen.GoImportPath("encoding/base64") + mathPackage = protogen.GoImportPath("math") + reflectPackage = protogen.GoImportPath("reflect") + sortPackage = protogen.GoImportPath("sort") + stringsPackage = protogen.GoImportPath("strings") + syncPackage = protogen.GoImportPath("sync") + timePackage = protogen.GoImportPath("time") + utf8Package = protogen.GoImportPath("unicode/utf8") +) + +// Protobuf library dependencies. +// +// These are declared as an interface type so that they can be more easily +// patched to support unique build environments that impose restrictions +// on the dependencies of generated source code. +var ( + protoPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/proto") + protoifacePackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoiface") + protoimplPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/runtime/protoimpl") + protojsonPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/encoding/protojson") + protoreflectPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoreflect") + protoregistryPackage goImportPath = protogen.GoImportPath("google.golang.org/protobuf/reflect/protoregistry") + protoV1Package goImportPath = protogen.GoImportPath("github.com/golang/protobuf/proto") +) + +type goImportPath interface { + String() string + Ident(string) protogen.GoIdent +} + +// GenerateFile generates the contents of a .pb.go file. +func GenerateFile(gen *protogen.Plugin, file *protogen.File) *protogen.GeneratedFile { + filename := file.GeneratedFilenamePrefix + ".pb.go" + g := gen.NewGeneratedFile(filename, file.GoImportPath) + f := newFileInfo(file) + + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Syntax_field_number)) + genGeneratedHeader(gen, g, f) + genStandaloneComments(g, f, int32(genid.FileDescriptorProto_Package_field_number)) + + packageDoc := genPackageKnownComment(f) + g.P(packageDoc, "package ", f.GoPackageName) + g.P() + + // Emit a static check that enforces a minimum version of the proto package. + if GenerateVersionMarkers { + g.P("const (") + g.P("// Verify that this generated code is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimpl.GenVersion, " - ", protoimplPackage.Ident("MinVersion"), ")") + g.P("// Verify that runtime/protoimpl is sufficiently up-to-date.") + g.P("_ = ", protoimplPackage.Ident("EnforceVersion"), "(", protoimplPackage.Ident("MaxVersion"), " - ", protoimpl.GenVersion, ")") + g.P(")") + g.P() + + // TODO: Remove this after some soak-in period after the v2 release. + g.P("// This is a compile-time assertion that a sufficiently up-to-date version") + g.P("// of the legacy proto package is being used.") + g.P("const _ = ", protoV1Package.Ident("ProtoPackageIsVersion4")) + g.P() + } + + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + genImport(gen, g, f, imps.Get(i)) + } + for _, enum := range f.allEnums { + genEnum(g, f, enum) + } + for _, message := range f.allMessages { + genMessage(g, f, message) + } + genExtensions(g, f) + + genReflectFileDescriptor(gen, g, f) + + return g +} + +// genStandaloneComments prints all leading comments for a FileDescriptorProto +// location identified by the field number n. +func genStandaloneComments(g *protogen.GeneratedFile, f *fileInfo, n int32) { + for _, loc := range f.Proto.GetSourceCodeInfo().GetLocation() { + if len(loc.Path) == 1 && loc.Path[0] == n { + for _, s := range loc.GetLeadingDetachedComments() { + g.P(protogen.Comments(s)) + g.P() + } + if s := loc.GetLeadingComments(); s != "" { + g.P(protogen.Comments(s)) + g.P() + } + } + } +} + +func genGeneratedHeader(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + + if GenerateVersionMarkers { + g.P("// versions:") + protocGenGoVersion := version.String() + protocVersion := "(unknown)" + if v := gen.Request.GetCompilerVersion(); v != nil { + protocVersion = fmt.Sprintf("v%v.%v.%v", v.GetMajor(), v.GetMinor(), v.GetPatch()) + } + g.P("// \tprotoc-gen-go ", protocGenGoVersion) + g.P("// \tprotoc ", protocVersion) + } + + if f.Proto.GetOptions().GetDeprecated() { + g.P("// ", f.Desc.Path(), " is a deprecated file.") + } else { + g.P("// source: ", f.Desc.Path()) + } + g.P() +} + +func genImport(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo, imp protoreflect.FileImport) { + impFile, ok := gen.FilesByPath[imp.Path()] + if !ok { + return + } + if impFile.GoImportPath == f.GoImportPath { + // Don't generate imports or aliases for types in the same Go package. + return + } + // Generate imports for all non-weak dependencies, even if they are not + // referenced, because other code and tools depend on having the + // full transitive closure of protocol buffer types in the binary. + if !imp.IsWeak { + g.Import(impFile.GoImportPath) + } + if !imp.IsPublic { + return + } + + // Generate public imports by generating the imported file, parsing it, + // and extracting every symbol that should receive a forwarding declaration. + impGen := GenerateFile(gen, impFile) + impGen.Skip() + b, err := impGen.Content() + if err != nil { + gen.Error(err) + return + } + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", b, parser.ParseComments) + if err != nil { + gen.Error(err) + return + } + genForward := func(tok token.Token, name string, expr ast.Expr) { + // Don't import unexported symbols. + r, _ := utf8.DecodeRuneInString(name) + if !unicode.IsUpper(r) { + return + } + // Don't import the FileDescriptor. + if name == impFile.GoDescriptorIdent.GoName { + return + } + // Don't import decls referencing a symbol defined in another package. + // i.e., don't import decls which are themselves public imports: + // + // type T = somepackage.T + if _, ok := expr.(*ast.SelectorExpr); ok { + return + } + g.P(tok, " ", name, " = ", impFile.GoImportPath.Ident(name)) + } + g.P("// Symbols defined in public import of ", imp.Path(), ".") + g.P() + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + genForward(decl.Tok, spec.Name.Name, spec.Type) + case *ast.ValueSpec: + for i, name := range spec.Names { + var expr ast.Expr + if i < len(spec.Values) { + expr = spec.Values[i] + } + genForward(decl.Tok, name.Name, expr) + } + case *ast.ImportSpec: + default: + panic(fmt.Sprintf("can't generate forward for spec type %T", spec)) + } + } + } + } + g.P() +} + +func genEnum(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + // Enum type declaration. + g.Annotate(e.GoIdent.GoName, e.Location) + leadingComments := appendDeprecationSuffix(e.Comments.Leading, + e.Desc.Options().(*descriptorpb.EnumOptions).GetDeprecated()) + g.P(leadingComments, + "type ", e.GoIdent, " int32") + + // Enum value constants. + g.P("const (") + for _, value := range e.Values { + g.Annotate(value.GoIdent.GoName, value.Location) + leadingComments := appendDeprecationSuffix(value.Comments.Leading, + value.Desc.Options().(*descriptorpb.EnumValueOptions).GetDeprecated()) + g.P(leadingComments, + value.GoIdent, " ", e.GoIdent, " = ", value.Desc.Number(), + trailingComment(value.Comments.Trailing)) + } + g.P(")") + g.P() + + // Enum value maps. + g.P("// Enum value maps for ", e.GoIdent, ".") + g.P("var (") + g.P(e.GoIdent.GoName+"_name", " = map[int32]string{") + for _, value := range e.Values { + duplicate := "" + if value.Desc != e.Desc.Values().ByNumber(value.Desc.Number()) { + duplicate = "// Duplicate value: " + } + g.P(duplicate, value.Desc.Number(), ": ", strconv.Quote(string(value.Desc.Name())), ",") + } + g.P("}") + g.P(e.GoIdent.GoName+"_value", " = map[string]int32{") + for _, value := range e.Values { + g.P(strconv.Quote(string(value.Desc.Name())), ": ", value.Desc.Number(), ",") + } + g.P("}") + g.P(")") + g.P() + + // Enum method. + // + // NOTE: A pointer value is needed to represent presence in proto2. + // Since a proto2 message can reference a proto3 enum, it is useful to + // always generate this method (even on proto3 enums) to support that case. + g.P("func (x ", e.GoIdent, ") Enum() *", e.GoIdent, " {") + g.P("p := new(", e.GoIdent, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + + // String method. + g.P("func (x ", e.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".EnumStringOf(x.Descriptor(), ", protoreflectPackage.Ident("EnumNumber"), "(x))") + g.P("}") + g.P() + + genEnumReflectMethods(g, f, e) + + // UnmarshalJSON method. + if e.genJSONMethod && e.Desc.Syntax() == protoreflect.Proto2 { + g.P("// Deprecated: Do not use.") + g.P("func (x *", e.GoIdent, ") UnmarshalJSON(b []byte) error {") + g.P("num, err := ", protoimplPackage.Ident("X"), ".UnmarshalJSONEnum(x.Descriptor(), b)") + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", e.GoIdent, "(num)") + g.P("return nil") + g.P("}") + g.P() + } + + // EnumDescriptor method. + if e.genRawDescMethod { + var indexes []string + for i := 1; i < len(e.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(e.Location.Path[i]))) + } + g.P("// Deprecated: Use ", e.GoIdent, ".Descriptor instead.") + g.P("func (", e.GoIdent, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } +} + +func genMessage(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + if m.Desc.IsMapEntry() { + return + } + + // Message type declaration. + g.Annotate(m.GoIdent.GoName, m.Location) + leadingComments := appendDeprecationSuffix(m.Comments.Leading, + m.Desc.Options().(*descriptorpb.MessageOptions).GetDeprecated()) + g.P(leadingComments, + "type ", m.GoIdent, " struct {") + genMessageFields(g, f, m) + g.P("}") + g.P() + + genMessageKnownFunctions(g, f, m) + genMessageDefaultDecls(g, f, m) + genMessageMethods(g, f, m) + genMessageOneofWrapperTypes(g, f, m) +} + +func genMessageFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + sf := f.allMessageFieldsByPtr[m] + genMessageInternalFields(g, f, m, sf) + for _, field := range m.Fields { + genMessageField(g, f, m, field, sf) + } +} + +func genMessageInternalFields(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, sf *structFields) { + g.P(genid.State_goname, " ", protoimplPackage.Ident("MessageState")) + sf.append(genid.State_goname) + g.P(genid.SizeCache_goname, " ", protoimplPackage.Ident("SizeCache")) + sf.append(genid.SizeCache_goname) + if m.hasWeak { + g.P(genid.WeakFields_goname, " ", protoimplPackage.Ident("WeakFields")) + sf.append(genid.WeakFields_goname) + } + g.P(genid.UnknownFields_goname, " ", protoimplPackage.Ident("UnknownFields")) + sf.append(genid.UnknownFields_goname) + if m.Desc.ExtensionRanges().Len() > 0 { + g.P(genid.ExtensionFields_goname, " ", protoimplPackage.Ident("ExtensionFields")) + sf.append(genid.ExtensionFields_goname) + } + if sf.count > 0 { + g.P() + } +} + +func genMessageField(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo, field *protogen.Field, sf *structFields) { + if oneof := field.Oneof; oneof != nil && !oneof.Desc.IsSynthetic() { + // It would be a bit simpler to iterate over the oneofs below, + // but generating the field here keeps the contents of the Go + // struct in the same order as the contents of the source + // .proto file. + if oneof.Fields[0] != field { + return // only generate for first appearance + } + + tags := structTags{ + {"protobuf_oneof", string(oneof.Desc.Name())}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + g.Annotate(m.GoIdent.GoName+"."+oneof.GoName, oneof.Location) + leadingComments := oneof.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + ss := []string{fmt.Sprintf(" Types that are assignable to %s:\n", oneof.GoName)} + for _, field := range oneof.Fields { + ss = append(ss, "\t*"+field.GoIdent.GoName+"\n") + } + leadingComments += protogen.Comments(strings.Join(ss, "")) + g.P(leadingComments, + oneof.GoName, " ", oneofInterfaceName(oneof), tags) + sf.append(oneof.GoName) + return + } + goType, pointer := fieldGoType(g, f, field) + if pointer { + goType = "*" + goType + } + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + {"json", fieldJSONTagValue(field)}, + } + if field.Desc.IsMap() { + key := field.Message.Fields[0] + val := field.Message.Fields[1] + tags = append(tags, structTags{ + {"protobuf_key", fieldProtobufTagValue(key)}, + {"protobuf_val", fieldProtobufTagValue(val)}, + }...) + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + + name := field.GoName + if field.Desc.IsWeak() { + name = genid.WeakFieldPrefix_goname + name + } + g.Annotate(m.GoIdent.GoName+"."+name, field.Location) + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + name, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + sf.append(field.GoName) +} + +// genMessageDefaultDecls generates consts and vars holding the default +// values of fields. +func genMessageDefaultDecls(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + var consts, vars []string + for _, field := range m.Fields { + if !field.Desc.HasDefault() { + continue + } + name := "Default_" + m.GoIdent.GoName + "_" + field.GoName + goType, _ := fieldGoType(g, f, field) + defVal := field.Desc.Default() + switch field.Desc.Kind() { + case protoreflect.StringKind: + consts = append(consts, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.String())) + case protoreflect.BytesKind: + vars = append(vars, fmt.Sprintf("%s = %s(%q)", name, goType, defVal.Bytes())) + case protoreflect.EnumKind: + idx := field.Desc.DefaultEnumValue().Index() + val := field.Enum.Values[idx] + consts = append(consts, fmt.Sprintf("%s = %s", name, g.QualifiedGoIdent(val.GoIdent))) + case protoreflect.FloatKind, protoreflect.DoubleKind: + if f := defVal.Float(); math.IsNaN(f) || math.IsInf(f, 0) { + var fn, arg string + switch f := defVal.Float(); { + case math.IsInf(f, -1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "-1" + case math.IsInf(f, +1): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("Inf")), "+1" + case math.IsNaN(f): + fn, arg = g.QualifiedGoIdent(mathPackage.Ident("NaN")), "" + } + vars = append(vars, fmt.Sprintf("%s = %s(%s(%s))", name, goType, fn, arg)) + } else { + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, f)) + } + default: + consts = append(consts, fmt.Sprintf("%s = %s(%v)", name, goType, defVal.Interface())) + } + } + if len(consts) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("const (") + for _, s := range consts { + g.P(s) + } + g.P(")") + } + if len(vars) > 0 { + g.P("// Default values for ", m.GoIdent, " fields.") + g.P("var (") + for _, s := range vars { + g.P(s) + } + g.P(")") + } + g.P() +} + +func genMessageMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + genMessageBaseMethods(g, f, m) + genMessageGetterMethods(g, f, m) + genMessageSetterMethods(g, f, m) +} + +func genMessageBaseMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + // Reset method. + g.P("func (x *", m.GoIdent, ") Reset() {") + g.P("*x = ", m.GoIdent, "{}") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " {") + g.P("mi := &", messageTypesVarName(f), "[", f.allMessagesByPtr[m], "]") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("}") + g.P() + + // String method. + g.P("func (x *", m.GoIdent, ") String() string {") + g.P("return ", protoimplPackage.Ident("X"), ".MessageStringOf(x)") + g.P("}") + g.P() + + // ProtoMessage method. + g.P("func (*", m.GoIdent, ") ProtoMessage() {}") + g.P() + + // ProtoReflect method. + genMessageReflectMethods(g, f, m) + + // Descriptor method. + if m.genRawDescMethod { + var indexes []string + for i := 1; i < len(m.Location.Path); i += 2 { + indexes = append(indexes, strconv.Itoa(int(m.Location.Path[i]))) + } + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor instead.") + g.P("func (*", m.GoIdent, ") Descriptor() ([]byte, []int) {") + g.P("return ", rawDescVarName(f), "GZIP(), []int{", strings.Join(indexes, ","), "}") + g.P("}") + g.P() + f.needRawDesc = true + } + + // ExtensionRangeArray method. + extRanges := m.Desc.ExtensionRanges() + if m.genExtRangeMethod && extRanges.Len() > 0 { + protoExtRange := protoifacePackage.Ident("ExtensionRangeV1") + extRangeVar := "extRange_" + m.GoIdent.GoName + g.P("var ", extRangeVar, " = []", protoExtRange, " {") + for i := 0; i < extRanges.Len(); i++ { + r := extRanges.Get(i) + g.P("{Start:", r[0], ", End:", r[1]-1 /* inclusive */, "},") + } + g.P("}") + g.P() + g.P("// Deprecated: Use ", m.GoIdent, ".ProtoReflect.Descriptor.ExtensionRanges instead.") + g.P("func (*", m.GoIdent, ") ExtensionRangeArray() []", protoExtRange, " {") + g.P("return ", extRangeVar) + g.P("}") + g.P() + } +} + +func genMessageGetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + genNoInterfacePragma(g, m.isTracked) + + // Getter for parent oneof. + if oneof := field.Oneof; oneof != nil && oneof.Fields[0] == field && !oneof.Desc.IsSynthetic() { + g.Annotate(m.GoIdent.GoName+".Get"+oneof.GoName, oneof.Location) + g.P("func (m *", m.GoIdent.GoName, ") Get", oneof.GoName, "() ", oneofInterfaceName(oneof), " {") + g.P("if m != nil {") + g.P("return m.", oneof.GoName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + } + + // Getter for message field. + goType, pointer := fieldGoType(g, f, field) + defaultValue := fieldDefaultValue(g, m, field) + g.Annotate(m.GoIdent.GoName+".Get"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + switch { + case field.Desc.IsWeak(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", protoPackage.Ident("Message"), "{") + g.P("var w ", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P("return ", protoimplPackage.Ident("X"), ".GetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ")") + g.P("}") + case field.Oneof != nil && !field.Oneof.Desc.IsSynthetic(): + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + g.P("if x, ok := x.Get", field.Oneof.GoName, "().(*", field.GoIdent, "); ok {") + g.P("return x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + default: + g.P(leadingComments, "func (x *", m.GoIdent, ") Get", field.GoName, "() ", goType, " {") + if !field.Desc.HasPresence() || defaultValue == "nil" { + g.P("if x != nil {") + } else { + g.P("if x != nil && x.", field.GoName, " != nil {") + } + star := "" + if pointer { + star = "*" + } + g.P("return ", star, " x.", field.GoName) + g.P("}") + g.P("return ", defaultValue) + g.P("}") + } + g.P() + } +} + +func genMessageSetterMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, field := range m.Fields { + if !field.Desc.IsWeak() { + continue + } + + genNoInterfacePragma(g, m.isTracked) + + g.Annotate(m.GoIdent.GoName+".Set"+field.GoName, field.Location) + leadingComments := appendDeprecationSuffix("", + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, "func (x *", m.GoIdent, ") Set", field.GoName, "(v ", protoPackage.Ident("Message"), ") {") + g.P("var w *", protoimplPackage.Ident("WeakFields")) + g.P("if x != nil {") + g.P("w = &x.", genid.WeakFields_goname) + if m.isTracked { + g.P("_ = x.", genid.WeakFieldPrefix_goname+field.GoName) + } + g.P("}") + g.P(protoimplPackage.Ident("X"), ".SetWeak(w, ", field.Desc.Number(), ", ", strconv.Quote(string(field.Message.Desc.FullName())), ", v)") + g.P("}") + g.P() + } +} + +// fieldGoType returns the Go type used for a field. +// +// If it returns pointer=true, the struct field is a pointer to the type. +func fieldGoType(g *protogen.GeneratedFile, f *fileInfo, field *protogen.Field) (goType string, pointer bool) { + if field.Desc.IsWeak() { + return "struct{}", false + } + + pointer = field.Desc.HasPresence() + switch field.Desc.Kind() { + case protoreflect.BoolKind: + goType = "bool" + case protoreflect.EnumKind: + goType = g.QualifiedGoIdent(field.Enum.GoIdent) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + goType = "int32" + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + goType = "uint32" + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + goType = "int64" + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + goType = "uint64" + case protoreflect.FloatKind: + goType = "float32" + case protoreflect.DoubleKind: + goType = "float64" + case protoreflect.StringKind: + goType = "string" + case protoreflect.BytesKind: + goType = "[]byte" + pointer = false // rely on nullability of slices for presence + case protoreflect.MessageKind, protoreflect.GroupKind: + goType = "*" + g.QualifiedGoIdent(field.Message.GoIdent) + pointer = false // pointer captured as part of the type + } + switch { + case field.Desc.IsList(): + return "[]" + goType, false + case field.Desc.IsMap(): + keyType, _ := fieldGoType(g, f, field.Message.Fields[0]) + valType, _ := fieldGoType(g, f, field.Message.Fields[1]) + return fmt.Sprintf("map[%v]%v", keyType, valType), false + } + return goType, pointer +} + +func fieldProtobufTagValue(field *protogen.Field) string { + var enumName string + if field.Desc.Kind() == protoreflect.EnumKind { + enumName = protoimpl.X.LegacyEnumName(field.Enum.Desc) + } + return tag.Marshal(field.Desc, enumName) +} + +func fieldDefaultValue(g *protogen.GeneratedFile, m *messageInfo, field *protogen.Field) string { + if field.Desc.IsList() { + return "nil" + } + if field.Desc.HasDefault() { + defVarName := "Default_" + m.GoIdent.GoName + "_" + field.GoName + if field.Desc.Kind() == protoreflect.BytesKind { + return "append([]byte(nil), " + defVarName + "...)" + } + return defVarName + } + switch field.Desc.Kind() { + case protoreflect.BoolKind: + return "false" + case protoreflect.StringKind: + return `""` + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.BytesKind: + return "nil" + case protoreflect.EnumKind: + return g.QualifiedGoIdent(field.Enum.Values[0].GoIdent) + default: + return "0" + } +} + +func fieldJSONTagValue(field *protogen.Field) string { + return string(field.Desc.Name()) + ",omitempty" +} + +func genExtensions(g *protogen.GeneratedFile, f *fileInfo) { + if len(f.allExtensions) == 0 { + return + } + + g.P("var ", extensionTypesVarName(f), " = []", protoimplPackage.Ident("ExtensionInfo"), "{") + for _, x := range f.allExtensions { + // For MessageSet extensions, the name used is the parent message. + name := x.Desc.FullName() + if messageset.IsMessageSetExtension(x.Desc) { + name = name.Parent() + } + + g.P("{") + g.P("ExtendedType: (*", x.Extendee.GoIdent, ")(nil),") + goType, pointer := fieldGoType(g, f, x.Extension) + if pointer { + goType = "*" + goType + } + g.P("ExtensionType: (", goType, ")(nil),") + g.P("Field: ", x.Desc.Number(), ",") + g.P("Name: ", strconv.Quote(string(name)), ",") + g.P("Tag: ", strconv.Quote(fieldProtobufTagValue(x.Extension)), ",") + g.P("Filename: ", strconv.Quote(f.Desc.Path()), ",") + g.P("},") + } + g.P("}") + g.P() + + // Group extensions by the target message. + var orderedTargets []protogen.GoIdent + allExtensionsByTarget := make(map[protogen.GoIdent][]*extensionInfo) + allExtensionsByPtr := make(map[*extensionInfo]int) + for i, x := range f.allExtensions { + target := x.Extendee.GoIdent + if len(allExtensionsByTarget[target]) == 0 { + orderedTargets = append(orderedTargets, target) + } + allExtensionsByTarget[target] = append(allExtensionsByTarget[target], x) + allExtensionsByPtr[x] = i + } + for _, target := range orderedTargets { + g.P("// Extension fields to ", target, ".") + g.P("var (") + for _, x := range allExtensionsByTarget[target] { + xd := x.Desc + typeName := xd.Kind().String() + switch xd.Kind() { + case protoreflect.EnumKind: + typeName = string(xd.Enum().FullName()) + case protoreflect.MessageKind, protoreflect.GroupKind: + typeName = string(xd.Message().FullName()) + } + fieldName := string(xd.Name()) + + leadingComments := x.Comments.Leading + if leadingComments != "" { + leadingComments += "\n" + } + leadingComments += protogen.Comments(fmt.Sprintf(" %v %v %v = %v;\n", + xd.Cardinality(), typeName, fieldName, xd.Number())) + leadingComments = appendDeprecationSuffix(leadingComments, + x.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + "E_", x.GoIdent, " = &", extensionTypesVarName(f), "[", allExtensionsByPtr[x], "]", + trailingComment(x.Comments.Trailing)) + } + g.P(")") + g.P() + } +} + +// genMessageOneofWrapperTypes generates the oneof wrapper types and +// associates the types with the parent message type. +func genMessageOneofWrapperTypes(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + for _, oneof := range m.Oneofs { + if oneof.Desc.IsSynthetic() { + continue + } + ifName := oneofInterfaceName(oneof) + g.P("type ", ifName, " interface {") + g.P(ifName, "()") + g.P("}") + g.P() + for _, field := range oneof.Fields { + g.Annotate(field.GoIdent.GoName, field.Location) + g.Annotate(field.GoIdent.GoName+"."+field.GoName, field.Location) + g.P("type ", field.GoIdent, " struct {") + goType, _ := fieldGoType(g, f, field) + tags := structTags{ + {"protobuf", fieldProtobufTagValue(field)}, + } + if m.isTracked { + tags = append(tags, gotrackTags...) + } + leadingComments := appendDeprecationSuffix(field.Comments.Leading, + field.Desc.Options().(*descriptorpb.FieldOptions).GetDeprecated()) + g.P(leadingComments, + field.GoName, " ", goType, tags, + trailingComment(field.Comments.Trailing)) + g.P("}") + g.P() + } + for _, field := range oneof.Fields { + g.P("func (*", field.GoIdent, ") ", ifName, "() {}") + g.P() + } + } +} + +// oneofInterfaceName returns the name of the interface type implemented by +// the oneof field value types. +func oneofInterfaceName(oneof *protogen.Oneof) string { + return "is" + oneof.GoIdent.GoName +} + +// genNoInterfacePragma generates a standalone "nointerface" pragma to +// decorate methods with field-tracking support. +func genNoInterfacePragma(g *protogen.GeneratedFile, tracked bool) { + if tracked { + g.P("//go:nointerface") + g.P() + } +} + +var gotrackTags = structTags{{"go", "track"}} + +// structTags is a data structure for build idiomatic Go struct tags. +// Each [2]string is a key-value pair, where value is the unescaped string. +// +// Example: structTags{{"key", "value"}}.String() -> `key:"value"` +type structTags [][2]string + +func (tags structTags) String() string { + if len(tags) == 0 { + return "" + } + var ss []string + for _, tag := range tags { + // NOTE: When quoting the value, we need to make sure the backtick + // character does not appear. Convert all cases to the escaped hex form. + key := tag[0] + val := strings.Replace(strconv.Quote(tag[1]), "`", `\x60`, -1) + ss = append(ss, fmt.Sprintf("%s:%s", key, val)) + } + return "`" + strings.Join(ss, " ") + "`" +} + +// appendDeprecationSuffix optionally appends a deprecation notice as a suffix. +func appendDeprecationSuffix(prefix protogen.Comments, deprecated bool) protogen.Comments { + if !deprecated { + return prefix + } + if prefix != "" { + prefix += "\n" + } + return prefix + " Deprecated: Do not use.\n" +} + +// trailingComment is like protogen.Comments, but lacks a trailing newline. +type trailingComment protogen.Comments + +func (c trailingComment) String() string { + s := strings.TrimSuffix(protogen.Comments(c).String(), "\n") + if strings.Contains(s, "\n") { + // We don't support multi-lined trailing comments as it is unclear + // how to best render them in the generated code. + return "" + } + return s +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go new file mode 100644 index 000000000..1319a1267 --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/reflect.go @@ -0,0 +1,351 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "fmt" + "math" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func genReflectFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + g.P("var ", f.GoDescriptorIdent, " ", protoreflectPackage.Ident("FileDescriptor")) + g.P() + + genFileDescriptor(gen, g, f) + if len(f.allEnums) > 0 { + g.P("var ", enumTypesVarName(f), " = make([]", protoimplPackage.Ident("EnumInfo"), ",", len(f.allEnums), ")") + } + if len(f.allMessages) > 0 { + g.P("var ", messageTypesVarName(f), " = make([]", protoimplPackage.Ident("MessageInfo"), ",", len(f.allMessages), ")") + } + + // Generate a unique list of Go types for all declarations and dependencies, + // and the associated index into the type list for all dependencies. + var goTypes []string + var depIdxs []string + seen := map[protoreflect.FullName]int{} + genDep := func(name protoreflect.FullName, depSource string) { + if depSource != "" { + line := fmt.Sprintf("%d, // %d: %s -> %s", seen[name], len(depIdxs), depSource, name) + depIdxs = append(depIdxs, line) + } + } + genEnum := func(e *protogen.Enum, depSource string) { + if e != nil { + name := e.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(%s)(0), // %d: %s", g.QualifiedGoIdent(e.GoIdent), len(goTypes), name) + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + genMessage := func(m *protogen.Message, depSource string) { + if m != nil { + name := m.Desc.FullName() + if _, ok := seen[name]; !ok { + line := fmt.Sprintf("(*%s)(nil), // %d: %s", g.QualifiedGoIdent(m.GoIdent), len(goTypes), name) + if m.Desc.IsMapEntry() { + // Map entry messages have no associated Go type. + line = fmt.Sprintf("nil, // %d: %s", len(goTypes), name) + } + goTypes = append(goTypes, line) + seen[name] = len(seen) + } + if depSource != "" { + genDep(name, depSource) + } + } + } + + // This ordering is significant. + // See filetype.TypeBuilder.DependencyIndexes. + type offsetEntry struct { + start int + name string + } + var depOffsets []offsetEntry + for _, enum := range f.allEnums { + genEnum(enum.Enum, "") + } + for _, message := range f.allMessages { + genMessage(message.Message, "") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "field type_name"}) + for _, message := range f.allMessages { + for _, field := range message.Fields { + if field.Desc.IsWeak() { + continue + } + source := string(field.Desc.FullName()) + genEnum(field.Enum, source+":type_name") + genMessage(field.Message, source+":type_name") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension extendee"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genMessage(extension.Extendee, source+":extendee") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "extension type_name"}) + for _, extension := range f.allExtensions { + source := string(extension.Desc.FullName()) + genEnum(extension.Enum, source+":type_name") + genMessage(extension.Message, source+":type_name") + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method input_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Input, source+":input_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), "method output_type"}) + for _, service := range f.Services { + for _, method := range service.Methods { + source := string(method.Desc.FullName()) + genMessage(method.Output, source+":output_type") + } + } + depOffsets = append(depOffsets, offsetEntry{len(depIdxs), ""}) + for i := len(depOffsets) - 2; i >= 0; i-- { + curr, next := depOffsets[i], depOffsets[i+1] + depIdxs = append(depIdxs, fmt.Sprintf("%d, // [%d:%d] is the sub-list for %s", + curr.start, curr.start, next.start, curr.name)) + } + if len(depIdxs) > math.MaxInt32 { + panic("too many dependencies") // sanity check + } + + g.P("var ", goTypesVarName(f), " = []interface{}{") + for _, s := range goTypes { + g.P(s) + } + g.P("}") + + g.P("var ", depIdxsVarName(f), " = []int32{") + for _, s := range depIdxs { + g.P(s) + } + g.P("}") + + g.P("func init() { ", initFuncName(f.File), "() }") + + g.P("func ", initFuncName(f.File), "() {") + g.P("if ", f.GoDescriptorIdent, " != nil {") + g.P("return") + g.P("}") + + // Ensure that initialization functions for different files in the same Go + // package run in the correct order: Call the init funcs for every .proto file + // imported by this one that is in the same Go package. + for i, imps := 0, f.Desc.Imports(); i < imps.Len(); i++ { + impFile := gen.FilesByPath[imps.Get(i).Path()] + if impFile.GoImportPath != f.GoImportPath { + continue + } + g.P(initFuncName(impFile), "()") + } + + if len(f.allMessages) > 0 { + // Populate MessageInfo.Exporters. + g.P("if !", protoimplPackage.Ident("UnsafeEnabled"), " {") + for _, message := range f.allMessages { + if sf := f.allMessageFieldsByPtr[message]; len(sf.unexported) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + g.P(typesVar, "[", idx, "].Exporter = func(v interface{}, i int) interface{} {") + g.P("switch v := v.(*", message.GoIdent, "); i {") + for i := 0; i < sf.count; i++ { + if name := sf.unexported[i]; name != "" { + g.P("case ", i, ": return &v.", name) + } + } + g.P("default: return nil") + g.P("}") + g.P("}") + } + } + g.P("}") + + // Populate MessageInfo.OneofWrappers. + for _, message := range f.allMessages { + if len(message.Oneofs) > 0 { + idx := f.allMessagesByPtr[message] + typesVar := messageTypesVarName(f) + + // Associate the wrapper types by directly passing them to the MessageInfo. + g.P(typesVar, "[", idx, "].OneofWrappers = []interface{} {") + for _, oneof := range message.Oneofs { + if !oneof.Desc.IsSynthetic() { + for _, field := range oneof.Fields { + g.P("(*", field.GoIdent, ")(nil),") + } + } + } + g.P("}") + } + } + } + + g.P("type x struct{}") + g.P("out := ", protoimplPackage.Ident("TypeBuilder"), "{") + g.P("File: ", protoimplPackage.Ident("DescBuilder"), "{") + g.P("GoPackagePath: ", reflectPackage.Ident("TypeOf"), "(x{}).PkgPath(),") + g.P("RawDescriptor: ", rawDescVarName(f), ",") + g.P("NumEnums: ", len(f.allEnums), ",") + g.P("NumMessages: ", len(f.allMessages), ",") + g.P("NumExtensions: ", len(f.allExtensions), ",") + g.P("NumServices: ", len(f.Services), ",") + g.P("},") + g.P("GoTypes: ", goTypesVarName(f), ",") + g.P("DependencyIndexes: ", depIdxsVarName(f), ",") + if len(f.allEnums) > 0 { + g.P("EnumInfos: ", enumTypesVarName(f), ",") + } + if len(f.allMessages) > 0 { + g.P("MessageInfos: ", messageTypesVarName(f), ",") + } + if len(f.allExtensions) > 0 { + g.P("ExtensionInfos: ", extensionTypesVarName(f), ",") + } + g.P("}.Build()") + g.P(f.GoDescriptorIdent, " = out.File") + + // Set inputs to nil to allow GC to reclaim resources. + g.P(rawDescVarName(f), " = nil") + g.P(goTypesVarName(f), " = nil") + g.P(depIdxsVarName(f), " = nil") + g.P("}") +} + +func genFileDescriptor(gen *protogen.Plugin, g *protogen.GeneratedFile, f *fileInfo) { + descProto := proto.Clone(f.Proto).(*descriptorpb.FileDescriptorProto) + descProto.SourceCodeInfo = nil // drop source code information + + b, err := proto.MarshalOptions{AllowPartial: true, Deterministic: true}.Marshal(descProto) + if err != nil { + gen.Error(err) + return + } + + g.P("var ", rawDescVarName(f), " = []byte{") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") + g.P() + + if f.needRawDesc { + onceVar := rawDescVarName(f) + "Once" + dataVar := rawDescVarName(f) + "Data" + g.P("var (") + g.P(onceVar, " ", syncPackage.Ident("Once")) + g.P(dataVar, " = ", rawDescVarName(f)) + g.P(")") + g.P() + + g.P("func ", rawDescVarName(f), "GZIP() []byte {") + g.P(onceVar, ".Do(func() {") + g.P(dataVar, " = ", protoimplPackage.Ident("X"), ".CompressGZIP(", dataVar, ")") + g.P("})") + g.P("return ", dataVar) + g.P("}") + g.P() + } +} + +func genEnumReflectMethods(g *protogen.GeneratedFile, f *fileInfo, e *enumInfo) { + idx := f.allEnumsByPtr[e] + typesVar := enumTypesVarName(f) + + // Descriptor method. + g.P("func (", e.GoIdent, ") Descriptor() ", protoreflectPackage.Ident("EnumDescriptor"), " {") + g.P("return ", typesVar, "[", idx, "].Descriptor()") + g.P("}") + g.P() + + // Type method. + g.P("func (", e.GoIdent, ") Type() ", protoreflectPackage.Ident("EnumType"), " {") + g.P("return &", typesVar, "[", idx, "]") + g.P("}") + g.P() + + // Number method. + g.P("func (x ", e.GoIdent, ") Number() ", protoreflectPackage.Ident("EnumNumber"), " {") + g.P("return ", protoreflectPackage.Ident("EnumNumber"), "(x)") + g.P("}") + g.P() +} + +func genMessageReflectMethods(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + idx := f.allMessagesByPtr[m] + typesVar := messageTypesVarName(f) + + // ProtoReflect method. + g.P("func (x *", m.GoIdent, ") ProtoReflect() ", protoreflectPackage.Ident("Message"), " {") + g.P("mi := &", typesVar, "[", idx, "]") + g.P("if ", protoimplPackage.Ident("UnsafeEnabled"), " && x != nil {") + g.P("ms := ", protoimplPackage.Ident("X"), ".MessageStateOf(", protoimplPackage.Ident("Pointer"), "(x))") + g.P("if ms.LoadMessageInfo() == nil {") + g.P("ms.StoreMessageInfo(mi)") + g.P("}") + g.P("return ms") + g.P("}") + g.P("return mi.MessageOf(x)") + g.P("}") + g.P() +} + +func fileVarName(f *protogen.File, suffix string) string { + prefix := f.GoDescriptorIdent.GoName + _, n := utf8.DecodeRuneInString(prefix) + prefix = strings.ToLower(prefix[:n]) + prefix[n:] + return prefix + "_" + suffix +} +func rawDescVarName(f *fileInfo) string { + return fileVarName(f.File, "rawDesc") +} +func goTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "goTypes") +} +func depIdxsVarName(f *fileInfo) string { + return fileVarName(f.File, "depIdxs") +} +func enumTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "enumTypes") +} +func messageTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "msgTypes") +} +func extensionTypesVarName(f *fileInfo) string { + return fileVarName(f.File, "extTypes") +} +func initFuncName(f *protogen.File) string { + return fileVarName(f, "init") +} diff --git a/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go new file mode 100644 index 000000000..9a1b7bdfa --- /dev/null +++ b/vendor/google.golang.org/protobuf/cmd/protoc-gen-go/internal_gengo/well_known_types.go @@ -0,0 +1,1077 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal_gengo + +import ( + "strings" + + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/internal/genid" +) + +// Specialized support for well-known types are hard-coded into the generator +// as opposed to being injected in adjacent .go sources in the generated package +// in order to support specialized build systems like Bazel that always generate +// dynamically from the source .proto files. + +func genPackageKnownComment(f *fileInfo) protogen.Comments { + switch f.Desc.Path() { + case genid.File_google_protobuf_any_proto: + return ` Package anypb contains generated types for ` + genid.File_google_protobuf_any_proto + `. + + The Any message is a dynamic representation of any other message value. + It is functionally a tuple of the full name of the remote message type and + the serialized bytes of the remote message value. + + + Constructing an Any + + An Any message containing another message value is constructed using New: + + any, err := anypb.New(m) + if err != nil { + ... // handle error + } + ... // make use of any + + + Unmarshaling an Any + + With a populated Any message, the underlying message can be serialized into + a remote concrete message value in a few ways. + + If the exact concrete type is known, then a new (or pre-existing) instance + of that message can be passed to the UnmarshalTo method: + + m := new(foopb.MyMessage) + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + + If the exact concrete type is not known, then the UnmarshalNew method can be + used to unmarshal the contents into a new instance of the remote message type: + + m, err := any.UnmarshalNew() + if err != nil { + ... // handle error + } + ... // make use of m + + UnmarshalNew uses the global type registry to resolve the message type and + construct a new instance of that message to unmarshal into. In order for a + message type to appear in the global registry, the Go type representing that + protobuf message type must be linked into the Go binary. For messages + generated by protoc-gen-go, this is achieved through an import of the + generated Go package representing a .proto file. + + A common pattern with UnmarshalNew is to use a type switch with the resulting + proto.Message value: + + switch m := m.(type) { + case *foopb.MyMessage: + ... // make use of m as a *foopb.MyMessage + case *barpb.OtherMessage: + ... // make use of m as a *barpb.OtherMessage + case *bazpb.SomeMessage: + ... // make use of m as a *bazpb.SomeMessage + } + + This pattern ensures that the generated packages containing the message types + listed in the case clauses are linked into the Go binary and therefore also + registered in the global registry. + + + Type checking an Any + + In order to type check whether an Any message represents some other message, + then use the MessageIs method: + + if any.MessageIs((*foopb.MyMessage)(nil)) { + ... // make use of any, knowing that it contains a foopb.MyMessage + } + + The MessageIs method can also be used with an allocated instance of the target + message type if the intention is to unmarshal into it if the type matches: + + m := new(foopb.MyMessage) + if any.MessageIs(m) { + if err := any.UnmarshalTo(m); err != nil { + ... // handle error + } + ... // make use of m + } + +` + case genid.File_google_protobuf_timestamp_proto: + return ` Package timestamppb contains generated types for ` + genid.File_google_protobuf_timestamp_proto + `. + + The Timestamp message represents a timestamp, + an instant in time since the Unix epoch (January 1st, 1970). + + + Conversion to a Go Time + + The AsTime method can be used to convert a Timestamp message to a + standard Go time.Time value in UTC: + + t := ts.AsTime() + ... // make use of t as a time.Time + + Converting to a time.Time is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsTime method performs the conversion on a best-effort basis. Timestamps + with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) + are normalized during the conversion to a time.Time. To manually check for + invalid Timestamps per the documented limitations in timestamp.proto, + additionally call the CheckValid method: + + if err := ts.CheckValid(); err != nil { + ... // handle error + } + + + Conversion from a Go Time + + The timestamppb.New function can be used to construct a Timestamp message + from a standard Go time.Time value: + + ts := timestamppb.New(t) + ... // make use of ts as a *timestamppb.Timestamp + + In order to construct a Timestamp representing the current time, use Now: + + ts := timestamppb.Now() + ... // make use of ts as a *timestamppb.Timestamp + +` + case genid.File_google_protobuf_duration_proto: + return ` Package durationpb contains generated types for ` + genid.File_google_protobuf_duration_proto + `. + + The Duration message represents a signed span of time. + + + Conversion to a Go Duration + + The AsDuration method can be used to convert a Duration message to a + standard Go time.Duration value: + + d := dur.AsDuration() + ... // make use of d as a time.Duration + + Converting to a time.Duration is a common operation so that the extensive + set of time-based operations provided by the time package can be leveraged. + See https://golang.org/pkg/time for more information. + + The AsDuration method performs the conversion on a best-effort basis. + Durations with denormal values (e.g., nanoseconds beyond -99999999 and + +99999999, inclusive; or seconds and nanoseconds with opposite signs) + are normalized during the conversion to a time.Duration. To manually check for + invalid Duration per the documented limitations in duration.proto, + additionally call the CheckValid method: + + if err := dur.CheckValid(); err != nil { + ... // handle error + } + + Note that the documented limitations in duration.proto does not protect a + Duration from overflowing the representable range of a time.Duration in Go. + The AsDuration method uses saturation arithmetic such that an overflow clamps + the resulting value to the closest representable value (e.g., math.MaxInt64 + for positive overflow and math.MinInt64 for negative overflow). + + + Conversion from a Go Duration + + The durationpb.New function can be used to construct a Duration message + from a standard Go time.Duration value: + + dur := durationpb.New(d) + ... // make use of d as a *durationpb.Duration + +` + case genid.File_google_protobuf_struct_proto: + return ` Package structpb contains generated types for ` + genid.File_google_protobuf_struct_proto + `. + + The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are + used to represent arbitrary JSON. The Value message represents a JSON value, + the Struct message represents a JSON object, and the ListValue message + represents a JSON array. See https://json.org for more information. + + The Value, Struct, and ListValue types have generated MarshalJSON and + UnmarshalJSON methods such that they serialize JSON equivalent to what the + messages themselves represent. Use of these types with the + "google.golang.org/protobuf/encoding/protojson" package + ensures that they will be serialized as their JSON equivalent. + + + Conversion to and from a Go interface + + The standard Go "encoding/json" package has functionality to serialize + arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and + ListValue.AsSlice methods can convert the protobuf message representation into + a form represented by interface{}, map[string]interface{}, and []interface{}. + This form can be used with other packages that operate on such data structures + and also directly with the standard json package. + + In order to convert the interface{}, map[string]interface{}, and []interface{} + forms back as Value, Struct, and ListValue messages, use the NewStruct, + NewList, and NewValue constructor functions. + + + Example usage + + Consider the following example JSON object: + + { + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": { + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100" + }, + "phoneNumbers": [ + { + "type": "home", + "number": "212 555-1234" + }, + { + "type": "office", + "number": "646 555-4567" + } + ], + "children": [], + "spouse": null + } + + To construct a Value message representing the above JSON object: + + m, err := structpb.NewValue(map[string]interface{}{ + "firstName": "John", + "lastName": "Smith", + "isAlive": true, + "age": 27, + "address": map[string]interface{}{ + "streetAddress": "21 2nd Street", + "city": "New York", + "state": "NY", + "postalCode": "10021-3100", + }, + "phoneNumbers": []interface{}{ + map[string]interface{}{ + "type": "home", + "number": "212 555-1234", + }, + map[string]interface{}{ + "type": "office", + "number": "646 555-4567", + }, + }, + "children": []interface{}{}, + "spouse": nil, + }) + if err != nil { + ... // handle error + } + ... // make use of m as a *structpb.Value + +` + case genid.File_google_protobuf_field_mask_proto: + return ` Package fieldmaskpb contains generated types for ` + genid.File_google_protobuf_field_mask_proto + `. + + The FieldMask message represents a set of symbolic field paths. + The paths are specific to some target message type, + which is not stored within the FieldMask message itself. + + + Constructing a FieldMask + + The New function is used construct a FieldMask: + + var messageType *descriptorpb.DescriptorProto + fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") + if err != nil { + ... // handle error + } + ... // make use of fm + + The "field.name" and "field.number" paths are valid paths according to the + google.protobuf.DescriptorProto message. Use of a path that does not correlate + to valid fields reachable from DescriptorProto would result in an error. + + Once a FieldMask message has been constructed, + the Append method can be used to insert additional paths to the path set: + + var messageType *descriptorpb.DescriptorProto + if err := fm.Append(messageType, "options"); err != nil { + ... // handle error + } + + + Type checking a FieldMask + + In order to verify that a FieldMask represents a set of fields that are + reachable from some target message type, use the IsValid method: + + var messageType *descriptorpb.DescriptorProto + if fm.IsValid(messageType) { + ... // make use of fm + } + + IsValid needs to be passed the target message type as an input since the + FieldMask message itself does not store the message type that the set of paths + are for. +` + default: + return "" + } +} + +func genMessageKnownFunctions(g *protogen.GeneratedFile, f *fileInfo, m *messageInfo) { + switch m.Desc.FullName() { + case genid.Any_message_fullname: + g.P("// New marshals src into a new Any instance.") + g.P("func New(src ", protoPackage.Ident("Message"), ") (*Any, error) {") + g.P(" dst := new(Any)") + g.P(" if err := dst.MarshalFrom(src); err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return dst, nil") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals src into dst as the underlying message") + g.P("// using the provided marshal options.") + g.P("//") + g.P("// If no options are specified, call dst.MarshalFrom instead.") + g.P("func MarshalFrom(dst *Any, src ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("MarshalOptions"), ") error {") + g.P(" const urlPrefix = \"type.googleapis.com/\"") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" b, err := opts.Marshal(src)") + g.P(" if err != nil {") + g.P(" return err") + g.P(" }") + g.P(" dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName())") + g.P(" dst.Value = b") + g.P(" return nil") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the underlying message from src into dst") + g.P("// using the provided unmarshal options.") + g.P("// It reports an error if dst is not of the right message type.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalTo instead.") + g.P("func UnmarshalTo(src *Any, dst ", protoPackage.Ident("Message"), ", opts ", protoPackage.Ident("UnmarshalOptions"), ") error {") + g.P(" if src == nil {") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil source message\")") + g.P(" }") + g.P(" if !src.MessageIs(dst) {") + g.P(" got := dst.ProtoReflect().Descriptor().FullName()") + g.P(" want := src.MessageName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"mismatched message type: got %q, want %q\", got, want)") + g.P(" }") + g.P(" return opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the underlying message from src into dst,") + g.P("// which is newly created message using a type resolved from the type URL.") + g.P("// The message type is resolved according to opt.Resolver,") + g.P("// which should implement protoregistry.MessageTypeResolver.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("//") + g.P("// If no options are specified, call src.UnmarshalNew instead.") + g.P("func UnmarshalNew(src *Any, opts ", protoPackage.Ident("UnmarshalOptions"), ") (dst ", protoPackage.Ident("Message"), ", err error) {") + g.P(" if src.GetTypeUrl() == \"\" {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid empty type URL\")") + g.P(" }") + g.P(" if opts.Resolver == nil {") + g.P(" opts.Resolver = ", protoregistryPackage.Ident("GlobalTypes")) + g.P(" }") + g.P(" r, ok := opts.Resolver.(", protoregistryPackage.Ident("MessageTypeResolver"), ")") + g.P(" if !ok {") + g.P(" return nil, ", protoregistryPackage.Ident("NotFound")) + g.P(" }") + g.P(" mt, err := r.FindMessageByURL(src.GetTypeUrl())") + g.P(" if err != nil {") + g.P(" if err == ", protoregistryPackage.Ident("NotFound"), " {") + g.P(" return nil, err") + g.P(" }") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"could not resolve %q: %v\", src.GetTypeUrl(), err)") + g.P(" }") + g.P(" dst = mt.New().Interface()") + g.P(" return dst, opts.Unmarshal(src.GetValue(), dst)") + g.P("}") + g.P() + + g.P("// MessageIs reports whether the underlying message is of the same type as m.") + g.P("func (x *Any) MessageIs(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" if m == nil {") + g.P(" return false") + g.P(" }") + g.P(" url := x.GetTypeUrl()") + g.P(" name := string(m.ProtoReflect().Descriptor().FullName())") + g.P(" if !", stringsPackage.Ident("HasSuffix"), "(url, name) {") + g.P(" return false") + g.P(" }") + g.P(" return len(url) == len(name) || url[len(url)-len(name)-1] == '/'") + g.P("}") + g.P() + + g.P("// MessageName reports the full name of the underlying message,") + g.P("// returning an empty string if invalid.") + g.P("func (x *Any) MessageName() ", protoreflectPackage.Ident("FullName"), " {") + g.P(" url := x.GetTypeUrl()") + g.P(" name := ", protoreflectPackage.Ident("FullName"), "(url)") + g.P(" if i := ", stringsPackage.Ident("LastIndexByte"), "(url, '/'); i >= 0 {") + g.P(" name = name[i+len(\"/\"):]") + g.P(" }") + g.P(" if !name.IsValid() {") + g.P(" return \"\"") + g.P(" }") + g.P(" return name") + g.P("}") + g.P() + + g.P("// MarshalFrom marshals m into x as the underlying message.") + g.P("func (x *Any) MarshalFrom(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return MarshalFrom(x, m, ", protoPackage.Ident("MarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalTo unmarshals the contents of the underlying message of x into m.") + g.P("// It resets m before performing the unmarshal operation.") + g.P("// It reports an error if m is not of the right message type.") + g.P("func (x *Any) UnmarshalTo(m ", protoPackage.Ident("Message"), ") error {") + g.P(" return UnmarshalTo(x, m, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + g.P("// UnmarshalNew unmarshals the contents of the underlying message of x into") + g.P("// a newly allocated message of the specified type.") + g.P("// It reports an error if the underlying message type could not be resolved.") + g.P("func (x *Any) UnmarshalNew() (", protoPackage.Ident("Message"), ", error) {") + g.P(" return UnmarshalNew(x, ", protoPackage.Ident("UnmarshalOptions"), "{})") + g.P("}") + g.P() + + case genid.Timestamp_message_fullname: + g.P("// Now constructs a new Timestamp from the current time.") + g.P("func Now() *Timestamp {") + g.P(" return New(", timePackage.Ident("Now"), "())") + g.P("}") + g.P() + + g.P("// New constructs a new Timestamp from the provided time.Time.") + g.P("func New(t ", timePackage.Ident("Time"), ") *Timestamp {") + g.P(" return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())}") + g.P("}") + g.P() + + g.P("// AsTime converts x to a time.Time.") + g.P("func (x *Timestamp) AsTime() ", timePackage.Ident("Time"), " {") + g.P(" return ", timePackage.Ident("Unix"), "(int64(x.GetSeconds()), int64(x.GetNanos())).UTC()") + g.P("}") + g.P() + + g.P("// IsValid reports whether the timestamp is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Timestamp) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the timestamp is invalid.") + g.P("// In particular, it checks whether the value represents a date that is") + g.P("// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.") + g.P("// An error is reported for a nil Timestamp.") + g.P("func (x *Timestamp) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Timestamp\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) before 0001-01-01\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) after 9999-12-31\", x)") + g.P(" case invalidNanos:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"timestamp (%v) has out-of-range nanos\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanos") + g.P(")") + g.P() + + g.P("func (x *Timestamp) check() uint {") + g.P(" const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive") + g.P(" const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < minTimestamp:") + g.P(" return invalidUnderflow") + g.P(" case secs > maxTimestamp:") + g.P(" return invalidOverflow") + g.P(" case nanos < 0 || nanos >= 1e9:") + g.P(" return invalidNanos") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Duration_message_fullname: + g.P("// New constructs a new Duration from the provided time.Duration.") + g.P("func New(d ", timePackage.Ident("Duration"), ") *Duration {") + g.P(" nanos := d.Nanoseconds()") + g.P(" secs := nanos / 1e9") + g.P(" nanos -= secs * 1e9") + g.P(" return &Duration{Seconds: int64(secs), Nanos: int32(nanos)}") + g.P("}") + g.P() + + g.P("// AsDuration converts x to a time.Duration,") + g.P("// returning the closest duration value in the event of overflow.") + g.P("func (x *Duration) AsDuration() ", timePackage.Ident("Duration"), " {") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" d := ", timePackage.Ident("Duration"), "(secs) * ", timePackage.Ident("Second")) + g.P(" overflow := d/", timePackage.Ident("Second"), " != ", timePackage.Ident("Duration"), "(secs)") + g.P(" d += ", timePackage.Ident("Duration"), "(nanos) * ", timePackage.Ident("Nanosecond")) + g.P(" overflow = overflow || (secs < 0 && nanos < 0 && d > 0)") + g.P(" overflow = overflow || (secs > 0 && nanos > 0 && d < 0)") + g.P(" if overflow {") + g.P(" switch {") + g.P(" case secs < 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MinInt64"), ")") + g.P(" case secs > 0:") + g.P(" return ", timePackage.Ident("Duration"), "(", mathPackage.Ident("MaxInt64"), ")") + g.P(" }") + g.P(" }") + g.P(" return d") + g.P("}") + g.P() + + g.P("// IsValid reports whether the duration is valid.") + g.P("// It is equivalent to CheckValid == nil.") + g.P("func (x *Duration) IsValid() bool {") + g.P(" return x.check() == 0") + g.P("}") + g.P() + + g.P("// CheckValid returns an error if the duration is invalid.") + g.P("// In particular, it checks whether the value is within the range of") + g.P("// -10000 years to +10000 years inclusive.") + g.P("// An error is reported for a nil Duration.") + g.P("func (x *Duration) CheckValid() error {") + g.P(" switch x.check() {") + g.P(" case invalidNil:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid nil Duration\")") + g.P(" case invalidUnderflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds -10000 years\", x)") + g.P(" case invalidOverflow:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) exceeds +10000 years\", x)") + g.P(" case invalidNanosRange:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has out-of-range nanos\", x)") + g.P(" case invalidNanosSign:") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"duration (%v) has seconds and nanos with different signs\", x)") + g.P(" default:") + g.P(" return nil") + g.P(" }") + g.P("}") + g.P() + + g.P("const (") + g.P(" _ = iota") + g.P(" invalidNil") + g.P(" invalidUnderflow") + g.P(" invalidOverflow") + g.P(" invalidNanosRange") + g.P(" invalidNanosSign") + g.P(")") + g.P() + + g.P("func (x *Duration) check() uint {") + g.P(" const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min") + g.P(" secs := x.GetSeconds()") + g.P(" nanos := x.GetNanos()") + g.P(" switch {") + g.P(" case x == nil:") + g.P(" return invalidNil") + g.P(" case secs < -absDuration:") + g.P(" return invalidUnderflow") + g.P(" case secs > +absDuration:") + g.P(" return invalidOverflow") + g.P(" case nanos <= -1e9 || nanos >= +1e9:") + g.P(" return invalidNanosRange") + g.P(" case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0):") + g.P(" return invalidNanosSign") + g.P(" default:") + g.P(" return 0") + g.P(" }") + g.P("}") + g.P() + + case genid.Struct_message_fullname: + g.P("// NewStruct constructs a Struct from a general-purpose Go map.") + g.P("// The map keys must be valid UTF-8.") + g.P("// The map values are converted using NewValue.") + g.P("func NewStruct(v map[string]interface{}) (*Struct, error) {") + g.P(" x := &Struct{Fields: make(map[string]*Value, len(v))}") + g.P(" for k, v := range v {") + g.P(" if !", utf8Package.Ident("ValidString"), "(k) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", k)") + g.P(" }") + g.P(" var err error") + g.P(" x.Fields[k], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsMap converts x to a general-purpose Go map.") + g.P("// The map values are converted by calling Value.AsInterface.") + g.P("func (x *Struct) AsMap() map[string]interface{} {") + g.P(" vs := make(map[string]interface{})") + g.P(" for k, v := range x.GetFields() {") + g.P(" vs[k] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *Struct) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Struct) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.ListValue_message_fullname: + g.P("// NewList constructs a ListValue from a general-purpose Go slice.") + g.P("// The slice elements are converted using NewValue.") + g.P("func NewList(v []interface{}) (*ListValue, error) {") + g.P(" x := &ListValue{Values: make([]*Value, len(v))}") + g.P(" for i, v := range v {") + g.P(" var err error") + g.P(" x.Values[i], err = NewValue(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" }") + g.P(" return x, nil") + g.P("}") + g.P() + + g.P("// AsSlice converts x to a general-purpose Go slice.") + g.P("// The slice elements are converted by calling Value.AsInterface.") + g.P("func (x *ListValue) AsSlice() []interface{} {") + g.P(" vs := make([]interface{}, len(x.GetValues()))") + g.P(" for i, v := range x.GetValues() {") + g.P(" vs[i] = v.AsInterface()") + g.P(" }") + g.P(" return vs") + g.P("}") + g.P() + + g.P("func (x *ListValue) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *ListValue) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.Value_message_fullname: + g.P("// NewValue constructs a Value from a general-purpose Go interface.") + g.P("//") + g.P("// ╔════════════════════════╤════════════════════════════════════════════╗") + g.P("// ║ Go type │ Conversion ║") + g.P("// ╠════════════════════════╪════════════════════════════════════════════╣") + g.P("// ║ nil │ stored as NullValue ║") + g.P("// ║ bool │ stored as BoolValue ║") + g.P("// ║ int, int32, int64 │ stored as NumberValue ║") + g.P("// ║ uint, uint32, uint64 │ stored as NumberValue ║") + g.P("// ║ float32, float64 │ stored as NumberValue ║") + g.P("// ║ string │ stored as StringValue; must be valid UTF-8 ║") + g.P("// ║ []byte │ stored as StringValue; base64-encoded ║") + g.P("// ║ map[string]interface{} │ stored as StructValue ║") + g.P("// ║ []interface{} │ stored as ListValue ║") + g.P("// ╚════════════════════════╧════════════════════════════════════════════╝") + g.P("//") + g.P("// When converting an int64 or uint64 to a NumberValue, numeric precision loss") + g.P("// is possible since they are stored as a float64.") + g.P("func NewValue(v interface{}) (*Value, error) {") + g.P(" switch v := v.(type) {") + g.P(" case nil:") + g.P(" return NewNullValue(), nil") + g.P(" case bool:") + g.P(" return NewBoolValue(v), nil") + g.P(" case int:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case int64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case uint64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float32:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case float64:") + g.P(" return NewNumberValue(float64(v)), nil") + g.P(" case string:") + g.P(" if !", utf8Package.Ident("ValidString"), "(v) {") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid UTF-8 in string: %q\", v)") + g.P(" }") + g.P(" return NewStringValue(v), nil") + g.P(" case []byte:") + g.P(" s := ", base64Package.Ident("StdEncoding"), ".EncodeToString(v)") + g.P(" return NewStringValue(s), nil") + g.P(" case map[string]interface{}:") + g.P(" v2, err := NewStruct(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewStructValue(v2), nil") + g.P(" case []interface{}:") + g.P(" v2, err := NewList(v)") + g.P(" if err != nil {") + g.P(" return nil, err") + g.P(" }") + g.P(" return NewListValue(v2), nil") + g.P(" default:") + g.P(" return nil, ", protoimplPackage.Ident("X"), ".NewError(\"invalid type: %T\", v)") + g.P(" }") + g.P("}") + g.P() + + g.P("// NewNullValue constructs a new null Value.") + g.P("func NewNullValue() *Value {") + g.P(" return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}") + g.P("}") + g.P() + + g.P("// NewBoolValue constructs a new boolean Value.") + g.P("func NewBoolValue(v bool) *Value {") + g.P(" return &Value{Kind: &Value_BoolValue{BoolValue: v}}") + g.P("}") + g.P() + + g.P("// NewNumberValue constructs a new number Value.") + g.P("func NewNumberValue(v float64) *Value {") + g.P(" return &Value{Kind: &Value_NumberValue{NumberValue: v}}") + g.P("}") + g.P() + + g.P("// NewStringValue constructs a new string Value.") + g.P("func NewStringValue(v string) *Value {") + g.P(" return &Value{Kind: &Value_StringValue{StringValue: v}}") + g.P("}") + g.P() + + g.P("// NewStructValue constructs a new struct Value.") + g.P("func NewStructValue(v *Struct) *Value {") + g.P(" return &Value{Kind: &Value_StructValue{StructValue: v}}") + g.P("}") + g.P() + + g.P("// NewListValue constructs a new list Value.") + g.P("func NewListValue(v *ListValue) *Value {") + g.P(" return &Value{Kind: &Value_ListValue{ListValue: v}}") + g.P("}") + g.P() + + g.P("// AsInterface converts x to a general-purpose Go interface.") + g.P("//") + g.P("// Calling Value.MarshalJSON and \"encoding/json\".Marshal on this output produce") + g.P("// semantically equivalent JSON (assuming no errors occur).") + g.P("//") + g.P("// Floating-point values (i.e., \"NaN\", \"Infinity\", and \"-Infinity\") are") + g.P("// converted as strings to remain compatible with MarshalJSON.") + g.P("func (x *Value) AsInterface() interface{} {") + g.P(" switch v := x.GetKind().(type) {") + g.P(" case *Value_NumberValue:") + g.P(" if v != nil {") + g.P(" switch {") + g.P(" case ", mathPackage.Ident("IsNaN"), "(v.NumberValue):") + g.P(" return \"NaN\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, +1):") + g.P(" return \"Infinity\"") + g.P(" case ", mathPackage.Ident("IsInf"), "(v.NumberValue, -1):") + g.P(" return \"-Infinity\"") + g.P(" default:") + g.P(" return v.NumberValue") + g.P(" }") + g.P(" }") + g.P(" case *Value_StringValue:") + g.P(" if v != nil {") + g.P(" return v.StringValue") + g.P(" }") + g.P(" case *Value_BoolValue:") + g.P(" if v != nil {") + g.P(" return v.BoolValue") + g.P(" }") + g.P(" case *Value_StructValue:") + g.P(" if v != nil {") + g.P(" return v.StructValue.AsMap()") + g.P(" }") + g.P(" case *Value_ListValue:") + g.P(" if v != nil {") + g.P(" return v.ListValue.AsSlice()") + g.P(" }") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func (x *Value) MarshalJSON() ([]byte, error) {") + g.P(" return ", protojsonPackage.Ident("Marshal"), "(x)") + g.P("}") + g.P() + + g.P("func (x *Value) UnmarshalJSON(b []byte) error {") + g.P(" return ", protojsonPackage.Ident("Unmarshal"), "(b, x)") + g.P("}") + g.P() + + case genid.FieldMask_message_fullname: + g.P("// New constructs a field mask from a list of paths and verifies that") + g.P("// each one is valid according to the specified message type.") + g.P("func New(m ", protoPackage.Ident("Message"), ", paths ...string) (*FieldMask, error) {") + g.P(" x := new(FieldMask)") + g.P(" return x, x.Append(m, paths...)") + g.P("}") + g.P() + + g.P("// Union returns the union of all the paths in the input field masks.") + g.P("func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var out []string") + g.P(" out = append(out, mx.GetPaths()...)") + g.P(" out = append(out, my.GetPaths()...)") + g.P(" for _, m := range ms {") + g.P(" out = append(out, m.GetPaths()...)") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// Intersect returns the intersection of all the paths in the input field masks.") + g.P("func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask {") + g.P(" var ss1, ss2 []string // reused buffers for performance") + g.P(" intersect := func(out, in []string) []string {") + g.P(" ss1 = normalizePaths(append(ss1[:0], in...))") + g.P(" ss2 = normalizePaths(append(ss2[:0], out...))") + g.P(" out = out[:0]") + g.P(" for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); {") + g.P(" switch s1, s2 := ss1[i1], ss2[i2]; {") + g.P(" case hasPathPrefix(s1, s2):") + g.P(" out = append(out, s1)") + g.P(" i1++") + g.P(" case hasPathPrefix(s2, s1):") + g.P(" out = append(out, s2)") + g.P(" i2++") + g.P(" case lessPath(s1, s2):") + g.P(" i1++") + g.P(" case lessPath(s2, s1):") + g.P(" i2++") + g.P(" }") + g.P(" }") + g.P(" return out") + g.P(" }") + g.P() + g.P(" out := Union(mx, my, ms...).GetPaths()") + g.P(" out = intersect(out, mx.GetPaths())") + g.P(" out = intersect(out, my.GetPaths())") + g.P(" for _, m := range ms {") + g.P(" out = intersect(out, m.GetPaths())") + g.P(" }") + g.P(" return &FieldMask{Paths: normalizePaths(out)}") + g.P("}") + g.P() + + g.P("// IsValid reports whether all the paths are syntactically valid and") + g.P("// refer to known fields in the specified message type.") + g.P("// It reports false for a nil FieldMask.") + g.P("func (x *FieldMask) IsValid(m ", protoPackage.Ident("Message"), ") bool {") + g.P(" paths := x.GetPaths()") + g.P(" return x != nil && numValidPaths(m, paths) == len(paths)") + g.P("}") + g.P() + + g.P("// Append appends a list of paths to the mask and verifies that each one") + g.P("// is valid according to the specified message type.") + g.P("// An invalid path is not appended and breaks insertion of subsequent paths.") + g.P("func (x *FieldMask) Append(m ", protoPackage.Ident("Message"), ", paths ...string) error {") + g.P(" numValid := numValidPaths(m, paths)") + g.P(" x.Paths = append(x.Paths, paths[:numValid]...)") + g.P(" paths = paths[numValid:]") + g.P(" if len(paths) > 0 {") + g.P(" name := m.ProtoReflect().Descriptor().FullName()") + g.P(" return ", protoimplPackage.Ident("X"), ".NewError(\"invalid path %q for message %q\", paths[0], name)") + g.P(" }") + g.P(" return nil") + g.P("}") + g.P() + + g.P("func numValidPaths(m ", protoPackage.Ident("Message"), ", paths []string) int {") + g.P(" md0 := m.ProtoReflect().Descriptor()") + g.P(" for i, path := range paths {") + g.P(" md := md0") + g.P(" if !rangeFields(path, func(field string) bool {") + g.P(" // Search the field within the message.") + g.P(" if md == nil {") + g.P(" return false // not within a message") + g.P(" }") + g.P(" fd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(field))") + g.P(" // The real field name of a group is the message name.") + g.P(" if fd == nil {") + g.P(" gd := md.Fields().ByName(", protoreflectPackage.Ident("Name"), "(", stringsPackage.Ident("ToLower"), "(field)))") + g.P(" if gd != nil && gd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(gd.Message().Name()) == field {") + g.P(" fd = gd") + g.P(" }") + g.P(" } else if fd.Kind() == ", protoreflectPackage.Ident("GroupKind"), " && string(fd.Message().Name()) != field {") + g.P(" fd = nil") + g.P(" }") + g.P(" if fd == nil {") + g.P(" return false // message has does not have this field") + g.P(" }") + g.P() + g.P(" // Identify the next message to search within.") + g.P(" md = fd.Message() // may be nil") + g.P(" if fd.IsMap() {") + g.P(" md = fd.MapValue().Message() // may be nil") + g.P(" }") + g.P(" return true") + g.P(" }) {") + g.P(" return i") + g.P(" }") + g.P(" }") + g.P(" return len(paths)") + g.P("}") + g.P() + + g.P("// Normalize converts the mask to its canonical form where all paths are sorted") + g.P("// and redundant paths are removed.") + g.P("func (x *FieldMask) Normalize() {") + g.P(" x.Paths = normalizePaths(x.Paths)") + g.P("}") + g.P() + g.P("func normalizePaths(paths []string) []string {") + g.P(" ", sortPackage.Ident("Slice"), "(paths, func(i, j int) bool {") + g.P(" return lessPath(paths[i], paths[j])") + g.P(" })") + g.P() + g.P(" // Elide any path that is a prefix match on the previous.") + g.P(" out := paths[:0]") + g.P(" for _, path := range paths {") + g.P(" if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) {") + g.P(" continue") + g.P(" }") + g.P(" out = append(out, path)") + g.P(" }") + g.P(" return out") + g.P("}") + g.P() + + g.P("// hasPathPrefix is like strings.HasPrefix, but further checks for either") + g.P("// an exact matche or that the prefix is delimited by a dot.") + g.P("func hasPathPrefix(path, prefix string) bool {") + g.P(" return ", stringsPackage.Ident("HasPrefix"), "(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.')") + g.P("}") + g.P() + + g.P("// lessPath is a lexicographical comparison where dot is specially treated") + g.P("// as the smallest symbol.") + g.P("func lessPath(x, y string) bool {") + g.P(" for i := 0; i < len(x) && i < len(y); i++ {") + g.P(" if x[i] != y[i] {") + g.P(" return (x[i] - '.') < (y[i] - '.')") + g.P(" }") + g.P(" }") + g.P(" return len(x) < len(y)") + g.P("}") + g.P() + + g.P("// rangeFields is like strings.Split(path, \".\"), but avoids allocations by") + g.P("// iterating over each field in place and calling a iterator function.") + g.P("func rangeFields(path string, f func(field string) bool) bool {") + g.P(" for {") + g.P(" var field string") + g.P(" if i := ", stringsPackage.Ident("IndexByte"), "(path, '.'); i >= 0 {") + g.P(" field, path = path[:i], path[i:]") + g.P(" } else {") + g.P(" field, path = path, \"\"") + g.P(" }") + g.P() + g.P(" if !f(field) {") + g.P(" return false") + g.P(" }") + g.P() + g.P(" if len(path) == 0 {") + g.P(" return true") + g.P(" }") + g.P(" path = ", stringsPackage.Ident("TrimPrefix"), "(path, \".\")") + g.P(" }") + g.P("}") + g.P() + + case genid.BoolValue_message_fullname, + genid.Int32Value_message_fullname, + genid.Int64Value_message_fullname, + genid.UInt32Value_message_fullname, + genid.UInt64Value_message_fullname, + genid.FloatValue_message_fullname, + genid.DoubleValue_message_fullname, + genid.StringValue_message_fullname, + genid.BytesValue_message_fullname: + funcName := strings.TrimSuffix(m.GoIdent.GoName, "Value") + typeName := strings.ToLower(funcName) + switch typeName { + case "float": + typeName = "float32" + case "double": + typeName = "float64" + case "bytes": + typeName = "[]byte" + } + + g.P("// ", funcName, " stores v in a new ", m.GoIdent, " and returns a pointer to it.") + g.P("func ", funcName, "(v ", typeName, ") *", m.GoIdent, " {") + g.P(" return &", m.GoIdent, "{Value: v}") + g.P("}") + g.P() + } +} diff --git a/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go new file mode 100644 index 000000000..3892d0583 --- /dev/null +++ b/vendor/google.golang.org/protobuf/compiler/protogen/protogen.go @@ -0,0 +1,1419 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protogen provides support for writing protoc plugins. +// +// Plugins for protoc, the Protocol Buffer compiler, +// are programs which read a CodeGeneratorRequest message from standard input +// and write a CodeGeneratorResponse message to standard output. +// This package provides support for writing plugins which generate Go code. +package protogen + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +const goPackageDocURL = "https://developers.google.com/protocol-buffers/docs/reference/go-generated#package" + +// Run executes a function as a protoc plugin. +// +// It reads a CodeGeneratorRequest message from os.Stdin, invokes the plugin +// function, and writes a CodeGeneratorResponse message to os.Stdout. +// +// If a failure occurs while reading or writing, Run prints an error to +// os.Stderr and calls os.Exit(1). +func (opts Options) Run(f func(*Plugin) error) { + if err := run(opts, f); err != nil { + fmt.Fprintf(os.Stderr, "%s: %v\n", filepath.Base(os.Args[0]), err) + os.Exit(1) + } +} + +func run(opts Options, f func(*Plugin) error) error { + if len(os.Args) > 1 { + return fmt.Errorf("unknown argument %q (this program should be run by protoc, not directly)", os.Args[1]) + } + in, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return err + } + req := &pluginpb.CodeGeneratorRequest{} + if err := proto.Unmarshal(in, req); err != nil { + return err + } + gen, err := opts.New(req) + if err != nil { + return err + } + if err := f(gen); err != nil { + // Errors from the plugin function are reported by setting the + // error field in the CodeGeneratorResponse. + // + // In contrast, errors that indicate a problem in protoc + // itself (unparsable input, I/O errors, etc.) are reported + // to stderr. + gen.Error(err) + } + resp := gen.Response() + out, err := proto.Marshal(resp) + if err != nil { + return err + } + if _, err := os.Stdout.Write(out); err != nil { + return err + } + return nil +} + +// A Plugin is a protoc plugin invocation. +type Plugin struct { + // Request is the CodeGeneratorRequest provided by protoc. + Request *pluginpb.CodeGeneratorRequest + + // Files is the set of files to generate and everything they import. + // Files appear in topological order, so each file appears before any + // file that imports it. + Files []*File + FilesByPath map[string]*File + + // SupportedFeatures is the set of protobuf language features supported by + // this generator plugin. See the documentation for + // google.protobuf.CodeGeneratorResponse.supported_features for details. + SupportedFeatures uint64 + + fileReg *protoregistry.Files + enumsByName map[protoreflect.FullName]*Enum + messagesByName map[protoreflect.FullName]*Message + annotateCode bool + pathType pathType + module string + genFiles []*GeneratedFile + opts Options + err error +} + +type Options struct { + // If ParamFunc is non-nil, it will be called with each unknown + // generator parameter. + // + // Plugins for protoc can accept parameters from the command line, + // passed in the --_out protoc, separated from the output + // directory with a colon; e.g., + // + // --go_out==,=: + // + // Parameters passed in this fashion as a comma-separated list of + // key=value pairs will be passed to the ParamFunc. + // + // The (flag.FlagSet).Set method matches this function signature, + // so parameters can be converted into flags as in the following: + // + // var flags flag.FlagSet + // value := flags.Bool("param", false, "") + // opts := &protogen.Options{ + // ParamFunc: flags.Set, + // } + // protogen.Run(opts, func(p *protogen.Plugin) error { + // if *value { ... } + // }) + ParamFunc func(name, value string) error + + // ImportRewriteFunc is called with the import path of each package + // imported by a generated file. It returns the import path to use + // for this package. + ImportRewriteFunc func(GoImportPath) GoImportPath +} + +// New returns a new Plugin. +func (opts Options) New(req *pluginpb.CodeGeneratorRequest) (*Plugin, error) { + gen := &Plugin{ + Request: req, + FilesByPath: make(map[string]*File), + fileReg: new(protoregistry.Files), + enumsByName: make(map[protoreflect.FullName]*Enum), + messagesByName: make(map[protoreflect.FullName]*Message), + opts: opts, + } + + packageNames := make(map[string]GoPackageName) // filename -> package name + importPaths := make(map[string]GoImportPath) // filename -> import path + mfiles := make(map[string]bool) // filename set + var packageImportPath GoImportPath + for _, param := range strings.Split(req.GetParameter(), ",") { + var value string + if i := strings.Index(param, "="); i >= 0 { + value = param[i+1:] + param = param[0:i] + } + switch param { + case "": + // Ignore. + case "import_path": + packageImportPath = GoImportPath(value) + case "module": + gen.module = value + case "paths": + switch value { + case "import": + gen.pathType = pathTypeImport + case "source_relative": + gen.pathType = pathTypeSourceRelative + default: + return nil, fmt.Errorf(`unknown path type %q: want "import" or "source_relative"`, value) + } + case "annotate_code": + switch value { + case "true", "": + gen.annotateCode = true + case "false": + default: + return nil, fmt.Errorf(`bad value for parameter %q: want "true" or "false"`, param) + } + default: + if param[0] == 'M' { + if i := strings.Index(value, ";"); i >= 0 { + pkgName := GoPackageName(value[i+1:]) + if otherName, ok := packageNames[param[1:]]; ok && pkgName != otherName { + return nil, fmt.Errorf("inconsistent package names for %q: %q != %q", value[:i], pkgName, otherName) + } + packageNames[param[1:]] = pkgName + value = value[:i] + } + importPaths[param[1:]] = GoImportPath(value) + mfiles[param[1:]] = true + continue + } + if opts.ParamFunc != nil { + if err := opts.ParamFunc(param, value); err != nil { + return nil, err + } + } + } + } + if gen.module != "" { + // When the module= option is provided, we strip the module name + // prefix from generated files. This only makes sense if generated + // filenames are based on the import path, so default to paths=import + // and complain if source_relative was selected manually. + switch gen.pathType { + case pathTypeLegacy: + gen.pathType = pathTypeImport + case pathTypeSourceRelative: + return nil, fmt.Errorf("cannot use module= with paths=source_relative") + } + } + + // Figure out the import path and package name for each file. + // + // The rules here are complicated and have grown organically over time. + // Interactions between different ways of specifying package information + // may be surprising. + // + // The recommended approach is to include a go_package option in every + // .proto source file specifying the full import path of the Go package + // associated with this file. + // + // option go_package = "google.golang.org/protobuf/types/known/anypb"; + // + // Build systems which want to exert full control over import paths may + // specify M= flags. + // + // Other approaches are not recommend. + generatedFileNames := make(map[string]bool) + for _, name := range gen.Request.FileToGenerate { + generatedFileNames[name] = true + } + // We need to determine the import paths before the package names, + // because the Go package name for a file is sometimes derived from + // different file in the same package. + packageNameForImportPath := make(map[GoImportPath]GoPackageName) + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + packageName, importPath := goPackageOption(fdesc) + switch { + case importPaths[filename] != "": + // Command line: Mfoo.proto=quux/bar + // + // Explicit mapping of source file to import path. + case generatedFileNames[filename] && packageImportPath != "": + // Command line: import_path=quux/bar + // + // The import_path flag sets the import path for every file that + // we generate code for. + importPaths[filename] = packageImportPath + case importPath != "": + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + importPaths[filename] = importPath + default: + // Source filename. + // + // Last resort when nothing else is available. + importPaths[filename] = GoImportPath(path.Dir(filename)) + } + if packageName != "" { + packageNameForImportPath[importPaths[filename]] = packageName + } + } + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + packageName, importPath := goPackageOption(fdesc) + defaultPackageName := packageNameForImportPath[importPaths[filename]] + switch { + case packageNames[filename] != "": + // A package name specified by the "M" command-line argument. + case packageName != "": + // TODO: For the "M" command-line argument, this means that the + // package name can be derived from the go_package option. + // Go package information should either consistently come from the + // command-line or the .proto source file, but not both. + // See how to make this consistent. + + // Source file: option go_package = "quux/bar"; + packageNames[filename] = packageName + case defaultPackageName != "": + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + packageNames[filename] = defaultPackageName + case generatedFileNames[filename] && packageImportPath != "": + // Command line: import_path=quux/bar + packageNames[filename] = cleanPackageName(path.Base(string(packageImportPath))) + case fdesc.GetPackage() != "": + // Source file: package quux.bar; + packageNames[filename] = cleanPackageName(fdesc.GetPackage()) + default: + // Source filename. + packageNames[filename] = cleanPackageName(baseName(filename)) + } + + goPkgOpt := string(importPaths[filename]) + if path.Base(string(goPkgOpt)) != string(packageNames[filename]) { + goPkgOpt += ";" + string(packageNames[filename]) + } + switch { + case packageImportPath != "": + // Command line: import_path=quux/bar + warn("Deprecated use of the 'import_path' command-line argument. In %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will no longer support the 'import_path' argument.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName(), goPkgOpt) + case mfiles[filename]: + // Command line: M=foo.proto=quux/bar + case packageName != "" && importPath == "": + // Source file: option go_package = "quux"; + warn("Deprecated use of 'go_package' option without a full import path in %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will require the import path be specified.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName(), goPkgOpt) + case packageName == "" && importPath == "": + // No Go package information provided. + dotIdx := strings.Index(goPkgOpt, ".") // heuristic for top-level domain + slashIdx := strings.Index(goPkgOpt, "/") // heuristic for multi-segment path + if isFull := 0 <= dotIdx && dotIdx <= slashIdx; isFull { + warn("Missing 'go_package' option in %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will require this be specified.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName(), goPkgOpt) + } else { + warn("Missing 'go_package' option in %q,\n"+ + "please specify it with the full Go package path as\n"+ + "a future release of protoc-gen-go will require this be specified.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", fdesc.GetName()) + } + } + } + + // Consistency check: Every file with the same Go import path should have + // the same Go package name. + packageFiles := make(map[GoImportPath][]string) + for filename, importPath := range importPaths { + if _, ok := packageNames[filename]; !ok { + // Skip files mentioned in a M= parameter + // but which do not appear in the CodeGeneratorRequest. + continue + } + packageFiles[importPath] = append(packageFiles[importPath], filename) + } + for importPath, filenames := range packageFiles { + for i := 1; i < len(filenames); i++ { + if a, b := packageNames[filenames[0]], packageNames[filenames[i]]; a != b { + return nil, fmt.Errorf("Go package %v has inconsistent names %v (%v) and %v (%v)", + importPath, a, filenames[0], b, filenames[i]) + } + } + } + + for _, fdesc := range gen.Request.ProtoFile { + filename := fdesc.GetName() + if gen.FilesByPath[filename] != nil { + return nil, fmt.Errorf("duplicate file name: %q", filename) + } + f, err := newFile(gen, fdesc, packageNames[filename], importPaths[filename]) + if err != nil { + return nil, err + } + gen.Files = append(gen.Files, f) + gen.FilesByPath[filename] = f + } + for _, filename := range gen.Request.FileToGenerate { + f, ok := gen.FilesByPath[filename] + if !ok { + return nil, fmt.Errorf("no descriptor for generated file: %v", filename) + } + f.Generate = true + } + return gen, nil +} + +// Error records an error in code generation. The generator will report the +// error back to protoc and will not produce output. +func (gen *Plugin) Error(err error) { + if gen.err == nil { + gen.err = err + } +} + +// Response returns the generator output. +func (gen *Plugin) Response() *pluginpb.CodeGeneratorResponse { + resp := &pluginpb.CodeGeneratorResponse{} + if gen.err != nil { + resp.Error = proto.String(gen.err.Error()) + return resp + } + for _, g := range gen.genFiles { + if g.skip { + continue + } + content, err := g.Content() + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + filename := g.filename + if gen.module != "" { + trim := gen.module + "/" + if !strings.HasPrefix(filename, trim) { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(fmt.Sprintf("%v: generated file does not match prefix %q", filename, gen.module)), + } + } + filename = strings.TrimPrefix(filename, trim) + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename), + Content: proto.String(string(content)), + }) + if gen.annotateCode && strings.HasSuffix(g.filename, ".go") { + meta, err := g.metaFile(content) + if err != nil { + return &pluginpb.CodeGeneratorResponse{ + Error: proto.String(err.Error()), + } + } + resp.File = append(resp.File, &pluginpb.CodeGeneratorResponse_File{ + Name: proto.String(filename + ".meta"), + Content: proto.String(meta), + }) + } + } + if gen.SupportedFeatures > 0 { + resp.SupportedFeatures = proto.Uint64(gen.SupportedFeatures) + } + return resp +} + +// A File describes a .proto source file. +type File struct { + Desc protoreflect.FileDescriptor + Proto *descriptorpb.FileDescriptorProto + + GoDescriptorIdent GoIdent // name of Go variable for the file descriptor + GoPackageName GoPackageName // name of this file's Go package + GoImportPath GoImportPath // import path of this file's Go package + + Enums []*Enum // top-level enum declarations + Messages []*Message // top-level message declarations + Extensions []*Extension // top-level extension declarations + Services []*Service // top-level service declarations + + Generate bool // true if we should generate code for this file + + // GeneratedFilenamePrefix is used to construct filenames for generated + // files associated with this source file. + // + // For example, the source file "dir/foo.proto" might have a filename prefix + // of "dir/foo". Appending ".pb.go" produces an output file of "dir/foo.pb.go". + GeneratedFilenamePrefix string + + comments map[pathKey]CommentSet +} + +func newFile(gen *Plugin, p *descriptorpb.FileDescriptorProto, packageName GoPackageName, importPath GoImportPath) (*File, error) { + desc, err := protodesc.NewFile(p, gen.fileReg) + if err != nil { + return nil, fmt.Errorf("invalid FileDescriptorProto %q: %v", p.GetName(), err) + } + if err := gen.fileReg.RegisterFile(desc); err != nil { + return nil, fmt.Errorf("cannot register descriptor %q: %v", p.GetName(), err) + } + f := &File{ + Desc: desc, + Proto: p, + GoPackageName: packageName, + GoImportPath: importPath, + comments: make(map[pathKey]CommentSet), + } + + // Determine the prefix for generated Go files. + prefix := p.GetName() + if ext := path.Ext(prefix); ext == ".proto" || ext == ".protodevel" { + prefix = prefix[:len(prefix)-len(ext)] + } + switch gen.pathType { + case pathTypeLegacy: + // The default is to derive the output filename from the Go import path + // if the file contains a go_package option,or from the input filename instead. + if _, importPath := goPackageOption(p); importPath != "" { + prefix = path.Join(string(importPath), path.Base(prefix)) + } + case pathTypeImport: + // If paths=import, the output filename is derived from the Go import path. + prefix = path.Join(string(f.GoImportPath), path.Base(prefix)) + case pathTypeSourceRelative: + // If paths=source_relative, the output filename is derived from + // the input filename. + } + f.GoDescriptorIdent = GoIdent{ + GoName: "File_" + strs.GoSanitized(p.GetName()), + GoImportPath: f.GoImportPath, + } + f.GeneratedFilenamePrefix = prefix + + for _, loc := range p.GetSourceCodeInfo().GetLocation() { + // Descriptors declarations are guaranteed to have unique comment sets. + // Other locations may not be unique, but we don't use them. + var leadingDetached []Comments + for _, s := range loc.GetLeadingDetachedComments() { + leadingDetached = append(leadingDetached, Comments(s)) + } + f.comments[newPathKey(loc.Path)] = CommentSet{ + LeadingDetached: leadingDetached, + Leading: Comments(loc.GetLeadingComments()), + Trailing: Comments(loc.GetTrailingComments()), + } + } + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + f.Enums = append(f.Enums, newEnum(gen, f, nil, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + f.Messages = append(f.Messages, newMessage(gen, f, nil, mds.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + f.Extensions = append(f.Extensions, newField(gen, f, nil, xds.Get(i))) + } + for i, sds := 0, desc.Services(); i < sds.Len(); i++ { + f.Services = append(f.Services, newService(gen, f, sds.Get(i))) + } + for _, message := range f.Messages { + if err := message.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, extension := range f.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return nil, err + } + } + for _, service := range f.Services { + for _, method := range service.Methods { + if err := method.resolveDependencies(gen); err != nil { + return nil, err + } + } + } + return f, nil +} + +func (f *File) location(idxPath ...int32) Location { + return Location{ + SourceFile: f.Desc.Path(), + Path: idxPath, + } +} + +// goPackageOption interprets a file's go_package option. +// If there is no go_package, it returns ("", ""). +// If there's a simple name, it returns (pkg, ""). +// If the option implies an import path, it returns (pkg, impPath). +func goPackageOption(d *descriptorpb.FileDescriptorProto) (pkg GoPackageName, impPath GoImportPath) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "" + } + rawPkg, impPath := goPackageOptionRaw(opt) + pkg = cleanPackageName(rawPkg) + if string(pkg) != rawPkg && impPath != "" { + warn("Malformed 'go_package' option in %q, please specify:\n"+ + "\toption go_package = %q;\n"+ + "A future release of protoc-gen-go will reject this.\n"+ + "See "+goPackageDocURL+" for more information.\n"+ + "\n", d.GetName(), string(impPath)+";"+string(pkg)) + } + return pkg, impPath +} +func goPackageOptionRaw(opt string) (rawPkg string, impPath GoImportPath) { + // A semicolon-delimited suffix delimits the import path and package name. + if i := strings.Index(opt, ";"); i >= 0 { + return opt[i+1:], GoImportPath(opt[:i]) + } + // The presence of a slash implies there's an import path. + if i := strings.LastIndex(opt, "/"); i >= 0 { + return opt[i+1:], GoImportPath(opt) + } + return opt, "" +} + +// An Enum describes an enum. +type Enum struct { + Desc protoreflect.EnumDescriptor + + GoIdent GoIdent // name of the generated Go type + + Values []*EnumValue // enum value declarations + + Location Location // location of this enum + Comments CommentSet // comments associated with this enum +} + +func newEnum(gen *Plugin, f *File, parent *Message, desc protoreflect.EnumDescriptor) *Enum { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(int32(genid.DescriptorProto_EnumType_field_number), int32(desc.Index())) + } else { + loc = f.location(int32(genid.FileDescriptorProto_EnumType_field_number), int32(desc.Index())) + } + enum := &Enum{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + gen.enumsByName[desc.FullName()] = enum + for i, vds := 0, enum.Desc.Values(); i < vds.Len(); i++ { + enum.Values = append(enum.Values, newEnumValue(gen, f, parent, enum, vds.Get(i))) + } + return enum +} + +// An EnumValue describes an enum value. +type EnumValue struct { + Desc protoreflect.EnumValueDescriptor + + GoIdent GoIdent // name of the generated Go declaration + + Parent *Enum // enum in which this value is declared + + Location Location // location of this enum value + Comments CommentSet // comments associated with this enum value +} + +func newEnumValue(gen *Plugin, f *File, message *Message, enum *Enum, desc protoreflect.EnumValueDescriptor) *EnumValue { + // A top-level enum value's name is: EnumName_ValueName + // An enum value contained in a message is: MessageName_ValueName + // + // For historical reasons, enum value names are not camel-cased. + parentIdent := enum.GoIdent + if message != nil { + parentIdent = message.GoIdent + } + name := parentIdent.GoName + "_" + string(desc.Name()) + loc := enum.Location.appendPath(int32(genid.EnumDescriptorProto_Value_field_number), int32(desc.Index())) + return &EnumValue{ + Desc: desc, + GoIdent: f.GoImportPath.Ident(name), + Parent: enum, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } +} + +// A Message describes a message. +type Message struct { + Desc protoreflect.MessageDescriptor + + GoIdent GoIdent // name of the generated Go type + + Fields []*Field // message field declarations + Oneofs []*Oneof // message oneof declarations + + Enums []*Enum // nested enum declarations + Messages []*Message // nested message declarations + Extensions []*Extension // nested extension declarations + + Location Location // location of this message + Comments CommentSet // comments associated with this message +} + +func newMessage(gen *Plugin, f *File, parent *Message, desc protoreflect.MessageDescriptor) *Message { + var loc Location + if parent != nil { + loc = parent.Location.appendPath(int32(genid.DescriptorProto_NestedType_field_number), int32(desc.Index())) + } else { + loc = f.location(int32(genid.FileDescriptorProto_MessageType_field_number), int32(desc.Index())) + } + message := &Message{ + Desc: desc, + GoIdent: newGoIdent(f, desc), + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + gen.messagesByName[desc.FullName()] = message + for i, eds := 0, desc.Enums(); i < eds.Len(); i++ { + message.Enums = append(message.Enums, newEnum(gen, f, message, eds.Get(i))) + } + for i, mds := 0, desc.Messages(); i < mds.Len(); i++ { + message.Messages = append(message.Messages, newMessage(gen, f, message, mds.Get(i))) + } + for i, fds := 0, desc.Fields(); i < fds.Len(); i++ { + message.Fields = append(message.Fields, newField(gen, f, message, fds.Get(i))) + } + for i, ods := 0, desc.Oneofs(); i < ods.Len(); i++ { + message.Oneofs = append(message.Oneofs, newOneof(gen, f, message, ods.Get(i))) + } + for i, xds := 0, desc.Extensions(); i < xds.Len(); i++ { + message.Extensions = append(message.Extensions, newField(gen, f, message, xds.Get(i))) + } + + // Resolve local references between fields and oneofs. + for _, field := range message.Fields { + if od := field.Desc.ContainingOneof(); od != nil { + oneof := message.Oneofs[od.Index()] + field.Oneof = oneof + oneof.Fields = append(oneof.Fields, field) + } + } + + // Field name conflict resolution. + // + // We assume well-known method names that may be attached to a generated + // message type, as well as a 'Get*' method for each field. For each + // field in turn, we add _s to its name until there are no conflicts. + // + // Any change to the following set of method names is a potential + // incompatible API change because it may change generated field names. + // + // TODO: If we ever support a 'go_name' option to set the Go name of a + // field, we should consider dropping this entirely. The conflict + // resolution algorithm is subtle and surprising (changing the order + // in which fields appear in the .proto source file can change the + // names of fields in generated code), and does not adapt well to + // adding new per-field methods such as setters. + usedNames := map[string]bool{ + "Reset": true, + "String": true, + "ProtoMessage": true, + "Marshal": true, + "Unmarshal": true, + "ExtensionRangeArray": true, + "ExtensionMap": true, + "Descriptor": true, + } + makeNameUnique := func(name string, hasGetter bool) string { + for usedNames[name] || (hasGetter && usedNames["Get"+name]) { + name += "_" + } + usedNames[name] = true + usedNames["Get"+name] = hasGetter + return name + } + for _, field := range message.Fields { + field.GoName = makeNameUnique(field.GoName, true) + field.GoIdent.GoName = message.GoIdent.GoName + "_" + field.GoName + if field.Oneof != nil && field.Oneof.Fields[0] == field { + // Make the name for a oneof unique as well. For historical reasons, + // this assumes that a getter method is not generated for oneofs. + // This is incorrect, but fixing it breaks existing code. + field.Oneof.GoName = makeNameUnique(field.Oneof.GoName, false) + field.Oneof.GoIdent.GoName = message.GoIdent.GoName + "_" + field.Oneof.GoName + } + } + + // Oneof field name conflict resolution. + // + // This conflict resolution is incomplete as it does not consider collisions + // with other oneof field types, but fixing it breaks existing code. + for _, field := range message.Fields { + if field.Oneof != nil { + Loop: + for { + for _, nestedMessage := range message.Messages { + if nestedMessage.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + for _, nestedEnum := range message.Enums { + if nestedEnum.GoIdent == field.GoIdent { + field.GoIdent.GoName += "_" + continue Loop + } + } + break Loop + } + } + } + + return message +} + +func (message *Message) resolveDependencies(gen *Plugin) error { + for _, field := range message.Fields { + if err := field.resolveDependencies(gen); err != nil { + return err + } + } + for _, message := range message.Messages { + if err := message.resolveDependencies(gen); err != nil { + return err + } + } + for _, extension := range message.Extensions { + if err := extension.resolveDependencies(gen); err != nil { + return err + } + } + return nil +} + +// A Field describes a message field. +type Field struct { + Desc protoreflect.FieldDescriptor + + // GoName is the base name of this field's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "FieldName" + + // GoIdent is the base name of a top-level declaration for this field. + // For code generated by protoc-gen-go, this means a wrapper type named + // '{{GoIdent}}' for members fields of a oneof, and a variable named + // 'E_{{GoIdent}}' for extension fields. + GoIdent GoIdent // e.g., "MessageName_FieldName" + + Parent *Message // message in which this field is declared; nil if top-level extension + Oneof *Oneof // containing oneof; nil if not part of a oneof + Extendee *Message // extended message for extension fields; nil otherwise + + Enum *Enum // type for enum fields; nil otherwise + Message *Message // type for message or group fields; nil otherwise + + Location Location // location of this field + Comments CommentSet // comments associated with this field +} + +func newField(gen *Plugin, f *File, message *Message, desc protoreflect.FieldDescriptor) *Field { + var loc Location + switch { + case desc.IsExtension() && message == nil: + loc = f.location(int32(genid.FileDescriptorProto_Extension_field_number), int32(desc.Index())) + case desc.IsExtension() && message != nil: + loc = message.Location.appendPath(int32(genid.DescriptorProto_Extension_field_number), int32(desc.Index())) + default: + loc = message.Location.appendPath(int32(genid.DescriptorProto_Field_field_number), int32(desc.Index())) + } + camelCased := strs.GoCamelCase(string(desc.Name())) + var parentPrefix string + if message != nil { + parentPrefix = message.GoIdent.GoName + "_" + } + field := &Field{ + Desc: desc, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Parent: message, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + return field +} + +func (field *Field) resolveDependencies(gen *Plugin) error { + desc := field.Desc + switch desc.Kind() { + case protoreflect.EnumKind: + name := field.Desc.Enum().FullName() + enum, ok := gen.enumsByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for enum %v", desc.FullName(), name) + } + field.Enum = enum + case protoreflect.MessageKind, protoreflect.GroupKind: + name := desc.Message().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Message = message + } + if desc.IsExtension() { + name := desc.ContainingMessage().FullName() + message, ok := gen.messagesByName[name] + if !ok { + return fmt.Errorf("field %v: no descriptor for type %v", desc.FullName(), name) + } + field.Extendee = message + } + return nil +} + +// A Oneof describes a message oneof. +type Oneof struct { + Desc protoreflect.OneofDescriptor + + // GoName is the base name of this oneof's Go field and methods. + // For code generated by protoc-gen-go, this means a field named + // '{{GoName}}' and a getter method named 'Get{{GoName}}'. + GoName string // e.g., "OneofName" + + // GoIdent is the base name of a top-level declaration for this oneof. + GoIdent GoIdent // e.g., "MessageName_OneofName" + + Parent *Message // message in which this oneof is declared + + Fields []*Field // fields that are part of this oneof + + Location Location // location of this oneof + Comments CommentSet // comments associated with this oneof +} + +func newOneof(gen *Plugin, f *File, message *Message, desc protoreflect.OneofDescriptor) *Oneof { + loc := message.Location.appendPath(int32(genid.DescriptorProto_OneofDecl_field_number), int32(desc.Index())) + camelCased := strs.GoCamelCase(string(desc.Name())) + parentPrefix := message.GoIdent.GoName + "_" + return &Oneof{ + Desc: desc, + Parent: message, + GoName: camelCased, + GoIdent: GoIdent{ + GoImportPath: f.GoImportPath, + GoName: parentPrefix + camelCased, + }, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } +} + +// Extension is an alias of Field for documentation. +type Extension = Field + +// A Service describes a service. +type Service struct { + Desc protoreflect.ServiceDescriptor + + GoName string + + Methods []*Method // service method declarations + + Location Location // location of this service + Comments CommentSet // comments associated with this service +} + +func newService(gen *Plugin, f *File, desc protoreflect.ServiceDescriptor) *Service { + loc := f.location(int32(genid.FileDescriptorProto_Service_field_number), int32(desc.Index())) + service := &Service{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + for i, mds := 0, desc.Methods(); i < mds.Len(); i++ { + service.Methods = append(service.Methods, newMethod(gen, f, service, mds.Get(i))) + } + return service +} + +// A Method describes a method in a service. +type Method struct { + Desc protoreflect.MethodDescriptor + + GoName string + + Parent *Service // service in which this method is declared + + Input *Message + Output *Message + + Location Location // location of this method + Comments CommentSet // comments associated with this method +} + +func newMethod(gen *Plugin, f *File, service *Service, desc protoreflect.MethodDescriptor) *Method { + loc := service.Location.appendPath(int32(genid.ServiceDescriptorProto_Method_field_number), int32(desc.Index())) + method := &Method{ + Desc: desc, + GoName: strs.GoCamelCase(string(desc.Name())), + Parent: service, + Location: loc, + Comments: f.comments[newPathKey(loc.Path)], + } + return method +} + +func (method *Method) resolveDependencies(gen *Plugin) error { + desc := method.Desc + + inName := desc.Input().FullName() + in, ok := gen.messagesByName[inName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), inName) + } + method.Input = in + + outName := desc.Output().FullName() + out, ok := gen.messagesByName[outName] + if !ok { + return fmt.Errorf("method %v: no descriptor for type %v", desc.FullName(), outName) + } + method.Output = out + + return nil +} + +// A GeneratedFile is a generated file. +type GeneratedFile struct { + gen *Plugin + skip bool + filename string + goImportPath GoImportPath + buf bytes.Buffer + packageNames map[GoImportPath]GoPackageName + usedPackageNames map[GoPackageName]bool + manualImports map[GoImportPath]bool + annotations map[string][]Location +} + +// NewGeneratedFile creates a new generated file with the given filename +// and import path. +func (gen *Plugin) NewGeneratedFile(filename string, goImportPath GoImportPath) *GeneratedFile { + g := &GeneratedFile{ + gen: gen, + filename: filename, + goImportPath: goImportPath, + packageNames: make(map[GoImportPath]GoPackageName), + usedPackageNames: make(map[GoPackageName]bool), + manualImports: make(map[GoImportPath]bool), + annotations: make(map[string][]Location), + } + + // All predeclared identifiers in Go are already used. + for _, s := range types.Universe.Names() { + g.usedPackageNames[GoPackageName(s)] = true + } + + gen.genFiles = append(gen.genFiles, g) + return g +} + +// P prints a line to the generated output. It converts each parameter to a +// string following the same rules as fmt.Print. It never inserts spaces +// between parameters. +func (g *GeneratedFile) P(v ...interface{}) { + for _, x := range v { + switch x := x.(type) { + case GoIdent: + fmt.Fprint(&g.buf, g.QualifiedGoIdent(x)) + default: + fmt.Fprint(&g.buf, x) + } + } + fmt.Fprintln(&g.buf) +} + +// QualifiedGoIdent returns the string to use for a Go identifier. +// +// If the identifier is from a different Go package than the generated file, +// the returned name will be qualified (package.name) and an import statement +// for the identifier's package will be included in the file. +func (g *GeneratedFile) QualifiedGoIdent(ident GoIdent) string { + if ident.GoImportPath == g.goImportPath { + return ident.GoName + } + if packageName, ok := g.packageNames[ident.GoImportPath]; ok { + return string(packageName) + "." + ident.GoName + } + packageName := cleanPackageName(baseName(string(ident.GoImportPath))) + for i, orig := 1, packageName; g.usedPackageNames[packageName]; i++ { + packageName = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[ident.GoImportPath] = packageName + g.usedPackageNames[packageName] = true + return string(packageName) + "." + ident.GoName +} + +// Import ensures a package is imported by the generated file. +// +// Packages referenced by QualifiedGoIdent are automatically imported. +// Explicitly importing a package with Import is generally only necessary +// when the import will be blank (import _ "package"). +func (g *GeneratedFile) Import(importPath GoImportPath) { + g.manualImports[importPath] = true +} + +// Write implements io.Writer. +func (g *GeneratedFile) Write(p []byte) (n int, err error) { + return g.buf.Write(p) +} + +// Skip removes the generated file from the plugin output. +func (g *GeneratedFile) Skip() { + g.skip = true +} + +// Unskip reverts a previous call to Skip, re-including the generated file in +// the plugin output. +func (g *GeneratedFile) Unskip() { + g.skip = false +} + +// Annotate associates a symbol in a generated Go file with a location in a +// source .proto file. +// +// The symbol may refer to a type, constant, variable, function, method, or +// struct field. The "T.sel" syntax is used to identify the method or field +// 'sel' on type 'T'. +func (g *GeneratedFile) Annotate(symbol string, loc Location) { + g.annotations[symbol] = append(g.annotations[symbol], loc) +} + +// Content returns the contents of the generated file. +func (g *GeneratedFile) Content() ([]byte, error) { + if !strings.HasSuffix(g.filename, ".go") { + return g.buf.Bytes(), nil + } + + // Reformat generated code. + original := g.buf.Bytes() + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + return nil, fmt.Errorf("%v: unparsable Go source: %v\n%v", g.filename, err, src.String()) + } + + // Collect a sorted list of all imports. + var importPaths [][2]string + rewriteImport := func(importPath string) string { + if f := g.gen.opts.ImportRewriteFunc; f != nil { + return string(f(GoImportPath(importPath))) + } + return importPath + } + for importPath := range g.packageNames { + pkgName := string(g.packageNames[GoImportPath(importPath)]) + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{pkgName, pkgPath}) + } + for importPath := range g.manualImports { + if _, ok := g.packageNames[importPath]; !ok { + pkgPath := rewriteImport(string(importPath)) + importPaths = append(importPaths, [2]string{"_", pkgPath}) + } + } + sort.Slice(importPaths, func(i, j int) bool { + return importPaths[i][1] < importPaths[j][1] + }) + + // Modify the AST to include a new import block. + if len(importPaths) > 0 { + // Insert block after package statement or + // possible comment attached to the end of the package statement. + pos := file.Package + tokFile := fset.File(file.Package) + pkgLine := tokFile.Line(file.Package) + for _, c := range file.Comments { + if tokFile.Line(c.Pos()) > pkgLine { + break + } + pos = c.End() + } + + // Construct the import block. + impDecl := &ast.GenDecl{ + Tok: token.IMPORT, + TokPos: pos, + Lparen: pos, + Rparen: pos, + } + for _, importPath := range importPaths { + impDecl.Specs = append(impDecl.Specs, &ast.ImportSpec{ + Name: &ast.Ident{ + Name: importPath[0], + NamePos: pos, + }, + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(importPath[1]), + ValuePos: pos, + }, + EndPos: pos, + }) + } + file.Decls = append([]ast.Decl{impDecl}, file.Decls...) + } + + var out bytes.Buffer + if err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(&out, fset, file); err != nil { + return nil, fmt.Errorf("%v: can not reformat Go source: %v", g.filename, err) + } + return out.Bytes(), nil +} + +// metaFile returns the contents of the file's metadata file, which is a +// text formatted string of the google.protobuf.GeneratedCodeInfo. +func (g *GeneratedFile) metaFile(content []byte) (string, error) { + fset := token.NewFileSet() + astFile, err := parser.ParseFile(fset, "", content, 0) + if err != nil { + return "", err + } + info := &descriptorpb.GeneratedCodeInfo{} + + seenAnnotations := make(map[string]bool) + annotate := func(s string, ident *ast.Ident) { + seenAnnotations[s] = true + for _, loc := range g.annotations[s] { + info.Annotation = append(info.Annotation, &descriptorpb.GeneratedCodeInfo_Annotation{ + SourceFile: proto.String(loc.SourceFile), + Path: loc.Path, + Begin: proto.Int32(int32(fset.Position(ident.Pos()).Offset)), + End: proto.Int32(int32(fset.Position(ident.End()).Offset)), + }) + } + } + for _, decl := range astFile.Decls { + switch decl := decl.(type) { + case *ast.GenDecl: + for _, spec := range decl.Specs { + switch spec := spec.(type) { + case *ast.TypeSpec: + annotate(spec.Name.Name, spec.Name) + switch st := spec.Type.(type) { + case *ast.StructType: + for _, field := range st.Fields.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + case *ast.InterfaceType: + for _, field := range st.Methods.List { + for _, name := range field.Names { + annotate(spec.Name.Name+"."+name.Name, name) + } + } + } + case *ast.ValueSpec: + for _, name := range spec.Names { + annotate(name.Name, name) + } + } + } + case *ast.FuncDecl: + if decl.Recv == nil { + annotate(decl.Name.Name, decl.Name) + } else { + recv := decl.Recv.List[0].Type + if s, ok := recv.(*ast.StarExpr); ok { + recv = s.X + } + if id, ok := recv.(*ast.Ident); ok { + annotate(id.Name+"."+decl.Name.Name, decl.Name) + } + } + } + } + for a := range g.annotations { + if !seenAnnotations[a] { + return "", fmt.Errorf("%v: no symbol matching annotation %q", g.filename, a) + } + } + + b, err := prototext.Marshal(info) + if err != nil { + return "", err + } + return string(b), nil +} + +// A GoIdent is a Go identifier, consisting of a name and import path. +// The name is a single identifier and may not be a dot-qualified selector. +type GoIdent struct { + GoName string + GoImportPath GoImportPath +} + +func (id GoIdent) String() string { return fmt.Sprintf("%q.%v", id.GoImportPath, id.GoName) } + +// newGoIdent returns the Go identifier for a descriptor. +func newGoIdent(f *File, d protoreflect.Descriptor) GoIdent { + name := strings.TrimPrefix(string(d.FullName()), string(f.Desc.Package())+".") + return GoIdent{ + GoName: strs.GoCamelCase(name), + GoImportPath: f.GoImportPath, + } +} + +// A GoImportPath is the import path of a Go package. +// For example: "google.golang.org/protobuf/compiler/protogen" +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// Ident returns a GoIdent with s as the GoName and p as the GoImportPath. +func (p GoImportPath) Ident(s string) GoIdent { + return GoIdent{GoName: s, GoImportPath: p} +} + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// cleanPackageName converts a string to a valid Go package name. +func cleanPackageName(name string) GoPackageName { + return GoPackageName(strs.GoSanitized(name)) +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[:i] + } + return name +} + +type pathType int + +const ( + pathTypeLegacy pathType = iota + pathTypeImport + pathTypeSourceRelative +) + +// A Location is a location in a .proto source file. +// +// See the google.protobuf.SourceCodeInfo documentation in descriptor.proto +// for details. +type Location struct { + SourceFile string + Path protoreflect.SourcePath +} + +// appendPath add elements to a Location's path, returning a new Location. +func (loc Location) appendPath(a ...int32) Location { + var n protoreflect.SourcePath + n = append(n, loc.Path...) + n = append(n, a...) + return Location{ + SourceFile: loc.SourceFile, + Path: n, + } +} + +// A pathKey is a representation of a location path suitable for use as a map key. +type pathKey struct { + s string +} + +// newPathKey converts a location path to a pathKey. +func newPathKey(idxPath []int32) pathKey { + buf := make([]byte, 4*len(idxPath)) + for i, x := range idxPath { + binary.LittleEndian.PutUint32(buf[i*4:], uint32(x)) + } + return pathKey{string(buf)} +} + +// CommentSet is a set of leading and trailing comments associated +// with a .proto descriptor declaration. +type CommentSet struct { + LeadingDetached []Comments + Leading Comments + Trailing Comments +} + +// Comments is a comments string as provided by protoc. +type Comments string + +// String formats the comments by inserting // to the start of each line, +// ensuring that there is a trailing newline. +// An empty comment is formatted as an empty string. +func (c Comments) String() string { + if c == "" { + return "" + } + var b []byte + for _, line := range strings.Split(strings.TrimSuffix(string(c), "\n"), "\n") { + b = append(b, "//"...) + b = append(b, line...) + b = append(b, "\n"...) + } + return string(b) +} + +var warnings = true + +func warn(format string, a ...interface{}) { + if warnings { + log.Printf("WARNING: "+format, a...) + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go new file mode 100644 index 000000000..cab95a427 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -0,0 +1,791 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable textproto format unmarshaler. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return error if there are any missing required fields. + AllowPartial bool + + // DiscardUnknown specifies whether to ignore unknown fields when parsing. + // An unknown field is any field whose field name or field number does not + // resolve to any known or extension field in the message. + // By default, unmarshal rejects unknown fields as an error. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message using options in +// UnmarshalOptions object. +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{text.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *text.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok text.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if messageDesc.FullName() == genid.Any_message_fullname { + return d.unmarshalAny(m, checkDelims) + } + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch typ := tok.Kind(); typ { + case text.Name: + // Continue below. + case text.EOF: + if checkDelims { + return text.ErrUnexpectedEOF + } + return nil + default: + if checkDelims && typ == text.MessageClose { + return nil + } + return d.unexpectedTokenError(tok) + } + + // Resolve the field descriptor. + var name pref.Name + var fd pref.FieldDescriptor + var xt pref.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: + name = pref.Name(tok.IdentName()) + fd = fieldDescs.ByName(name) + if fd == nil { + // The proto name of a group field is in all lowercase, + // while the textproto field name is the group message name. + gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { + fd = gd + } + } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { + fd = nil // reset since field name is actually the message name + } + + case text.TypeName: + // Handle extensions only. This code path is not for Any. + xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true + num := pref.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } + fd = fieldDescs.ByNumber(num) + if fd == nil { + xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) + } + } + + if xt != nil { + fd = xt.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } else if xtErr != nil && xtErr != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + // Handle unknown fields. + if fd == nil { + if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { + d.skipValue() + continue + } + return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) + } + + // Handle fields identified by field number. + if isFieldNumberName { + // TODO: Add an option to permit parsing field numbers. + // + // This requires careful thought as the MarshalOptions.EmitUnknown + // option allows formatting unknown fields as the field number and the + // best-effort textual representation of the field value. In that case, + // it may not be possible to unmarshal the value from a parser that does + // have information about the unknown field. + return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) + } + + switch { + case fd.IsList(): + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + list := m.Mutable(fd).List() + if err := d.unmarshalList(fd, list); err != nil { + return err + } + + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(fd, mmap); err != nil { + return err + } + + default: + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) + } + + if err := d.unmarshalSingular(fd, m); err != nil { + return err + } + seenNums.Set(num) + } + } + + return nil +} + +// findExtension returns protoreflect.ExtensionType from the Resolver if found. +func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { + xt, err := d.opts.Resolver.FindExtensionByName(xtName) + if err == nil { + return xt, nil + } + return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) +} + +// unmarshalSingular unmarshals a non-repeated field value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: + val, err = d.unmarshalScalar(fd) + } + if err == nil { + m.Set(fd, val) + } + return err +} + +// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + if tok.Kind() != text.Scalar { + return pref.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if b, ok := tok.Bool(); ok { + return pref.ValueOfBool(b), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, ok := tok.Int32(); ok { + return pref.ValueOfInt32(n), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, ok := tok.Int64(); ok { + return pref.ValueOfInt64(n), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, ok := tok.Uint32(); ok { + return pref.ValueOfUint32(n), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, ok := tok.Uint64(); ok { + return pref.ValueOfUint64(n), nil + } + + case pref.FloatKind: + if n, ok := tok.Float32(); ok { + return pref.ValueOfFloat32(n), nil + } + + case pref.DoubleKind: + if n, ok := tok.Float64(); ok { + return pref.ValueOfFloat64(n), nil + } + + case pref.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } + return pref.ValueOfString(s), nil + } + + case pref.BytesKind: + if b, ok := tok.String(); ok { + return pref.ValueOfBytes([]byte(b)), nil + } + + case pref.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. + if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { + return pref.ValueOfEnum(pref.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +// unmarshalList unmarshals into given protoreflect.List. A list value can +// either be in [] syntax or simply just a single scalar/message value. +func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + return nil + } + + default: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + return nil + } + } + + return d.unexpectedTokenError(tok) +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return pref.Value{}, err + } + return pval, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageOpen: + return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { + return err + } + default: + return d.unexpectedTokenError(tok) + } + } + + default: + return d.unexpectedTokenError(tok) + } +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { + var key pref.MapKey + var pval pref.Value +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.Name: + if tok.NameKind() != text.IdentName { + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) + } + d.skipValue() + continue Loop + } + // Continue below. + case text.MessageClose: + break Loop + default: + return d.unexpectedTokenError(tok) + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.MapEntry_Key_field_name: + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + if key.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + val, err := d.unmarshalScalar(fd.MapKey()) + if err != nil { + return err + } + key = val.MapKey() + + case genid.MapEntry_Value_field_name: + if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + } + if pval.IsValid() { + return d.newError(tok.Pos(), "map entry %q cannot be repeated", name) + } + pval, err = unmarshalMapValue() + if err != nil { + return err + } + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", name) + } + d.skipValue() + } + } + + if !key.IsValid() { + key = fd.MapKey().Default().MapKey() + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() + default: + pval = fd.MapValue().Default() + } + } + mmap.Set(key, pval) + return nil +} + +// unmarshalAny unmarshals an Any textproto. It can either be in expanded form +// or non-expanded form. +func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + var seenTypeUrl bool + var seenValue bool + var isExpanded bool + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + +Loop: + for { + // Read field name. Can only have 3 possible field names, i.e. type_url, + // value and type URL name inside []. + tok, err := d.Read() + if err != nil { + return err + } + if typ := tok.Kind(); typ != text.Name { + if checkDelims { + if typ == text.MessageClose { + break Loop + } + } else if typ == text.EOF { + break Loop + } + return d.unexpectedTokenError(tok) + } + + switch tok.NameKind() { + case text.IdentName: + // Both type_url and value fields require field separator :. + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + switch name := pref.Name(tok.IdentName()); name { + case genid.Any_TypeUrl_field_name: + if seenTypeUrl { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + var ok bool + typeURL, ok = tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString()) + } + seenTypeUrl = true + + case genid.Any_Value_field_name: + if seenValue { + return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname) + } + if isExpanded { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + s, ok := tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString()) + } + bValue = []byte(s) + seenValue = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + + case text.TypeName: + if isExpanded { + return d.newError(tok.Pos(), "cannot have more than one type") + } + if seenTypeUrl { + return d.newError(tok.Pos(), "conflict with type_url field") + } + typeURL = tok.TypeName() + var err error + bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) + if err != nil { + return err + } + isExpanded = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname) + } + } + } + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + } + if len(bValue) > 0 { + m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + } + return nil +} + +func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { + mt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) + } + // Create new message for the embedded message type and unmarshal the value + // field into it. + m := mt.New() + if err := d.unmarshalMessage(m, true); err != nil { + return nil, err + } + // Serialize the embedded message and return the resulting bytes. + b, err := proto.MarshalOptions{ + AllowPartial: true, // Never check required fields inside an Any. + Deterministic: true, + }.Marshal(m.Interface()) + if err != nil { + return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) + } + return b, nil +} + +// skipValue makes the decoder parse a field value in order to advance the read +// to the next field. It relies on Read returning an error if the types are not +// in valid sequence. +func (d decoder) skipValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for messages and lists. + switch tok.Kind() { + case text.MessageOpen: + return d.skipMessageValue() + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + return d.skipMessageValue() + default: + // Skip items. This will not validate whether skipped values are + // of the same type or not, same behavior as C++ + // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. + if err := d.skipValue(); err != nil { + return err + } + } + } + } + return nil +} + +// skipMessageValue makes the decoder parse and skip over all fields in a +// message. It assumes that the previous read type is MessageOpen. +func (d decoder) skipMessageValue() error { + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageClose: + return nil + case text.Name: + if err := d.skipValue(); err != nil { + return err + } + } + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go new file mode 100644 index 000000000..162b4f98a --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package prototext marshals and unmarshals protocol buffer messages as the +// textproto format. +package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go new file mode 100644 index 000000000..0877d71c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -0,0 +1,433 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "sort" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in textproto format using default +// options. Do not depend on the output being stable. It may change over time +// across different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable text format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // EmitASCII specifies whether to format strings and bytes as ASCII only + // as opposed to using UTF-8 encoding when possible. + EmitASCII bool + + // allowInvalidUTF8 specifies whether to permit the encoding of strings + // with invalid UTF-8. This is unexported as it is intended to only + // be specified by the Format method. + allowInvalidUTF8 bool + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // EmitUnknown specifies whether to emit unknown fields in the output. + // If specified, the unmarshaler may be unable to parse the output. + // The default is to exclude unknown fields. + EmitUnknown bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.allowInvalidUTF8 = true + o.AllowPartial = true + o.EmitUnknown = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal writes the given proto.Message in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. It may +// change over time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { + var delims = [2]byte{'{', '}'} + + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return []byte{}, nil + } + + enc := encoder{internalEnc, o} + err = enc.marshalMessage(m.ProtoReflect(), false) + if err != nil { + return nil, err + } + out := enc.Bytes() + if len(o.Indent) > 0 && len(out) > 0 { + out = append(out, '\n') + } + if o.AllowPartial { + return out, nil + } + return out, proto.CheckInitialized(m) +} + +type encoder struct { + *text.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if inclDelims { + e.StartMessage() + defer e.EndMessage() + } + + // Handle Any expansion. + if messageDesc.FullName() == genid.Any_message_fullname { + if e.marshalAny(m) { + return nil + } + // If unable to expand, continue on to marshal Any as a regular message. + } + + // Marshal known fields. + fieldDescs := messageDesc.Fields() + size := fieldDescs.Len() + for i := 0; i < size; { + fd := fieldDescs.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + + if fd == nil || !m.Has(fd) { + continue + } + + name := fd.Name() + // Use type name for group field name. + if fd.Kind() == pref.GroupKind { + name = fd.Message().Name() + } + val := m.Get(fd) + if err := e.marshalField(string(name), val, fd); err != nil { + return err + } + } + + // Marshal extensions. + if err := e.marshalExtensions(m); err != nil { + return err + } + + // Marshal unknown fields. + if e.opts.EmitUnknown { + e.marshalUnknown(m.GetUnknown()) + } + + return nil +} + +// marshalField marshals the given field with protoreflect.Value. +func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) + case fd.IsMap(): + return e.marshalMap(name, val.Map(), fd) + default: + e.WriteName(name) + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + kind := fd.Kind() + switch kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + + case pref.Int32Kind, pref.Int64Kind, + pref.Sint32Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + e.WriteUint(val.Uint()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(string(val.Bytes())) + + case pref.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) + } else { + // Use numeric value if there is no enum description. + e.WriteInt(int64(num)) + } + + case pref.MessageKind, pref.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List as multiple name-value fields. +func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) + if err := e.marshalSingular(list.Get(i), fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals the given protoreflect.Map as multiple name-value fields. +func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { + var err error + mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() + + e.WriteName(string(genid.MapEntry_Key_field_name)) + err = e.marshalSingular(key.Value(), fd.MapKey()) + if err != nil { + return false + } + + e.WriteName(string(genid.MapEntry_Value_field_name)) + err = e.marshalSingular(val, fd.MapValue()) + if err != nil { + return false + } + return true + }) + return err +} + +// marshalExtensions marshals extension fields. +func (e encoder) marshalExtensions(m pref.Message) error { + type entry struct { + key string + value pref.Value + desc pref.FieldDescriptor + } + + // Get a sorted list based on field key first. + var entries []entry + m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + if !fd.IsExtension() { + return true + } + // For MessageSet extensions, the name used is the parent message. + name := fd.FullName() + if messageset.IsMessageSetExtension(fd) { + name = name.Parent() + } + entries = append(entries, entry{ + key: string(name), + value: v, + desc: fd, + }) + return true + }) + // Sort extensions lexicographically. + sort.Slice(entries, func(i, j int) bool { + return entries[i].key < entries[j].key + }) + + // Write out sorted list. + for _, entry := range entries { + // Extension field name is the proto field name enclosed in []. + name := "[" + entry.key + "]" + if err := e.marshalField(name, entry.value, entry.desc); err != nil { + return err + } + } + return nil +} + +// marshalUnknown parses the given []byte and marshals fields out. +// This function assumes proper encoding in the given []byte. +func (e encoder) marshalUnknown(b []byte) { + const dec = 10 + const hex = 16 + for len(b) > 0 { + num, wtype, n := protowire.ConsumeTag(b) + b = b[n:] + e.WriteName(strconv.FormatInt(int64(num), dec)) + + switch wtype { + case protowire.VarintType: + var v uint64 + v, n = protowire.ConsumeVarint(b) + e.WriteUint(v) + case protowire.Fixed32Type: + var v uint32 + v, n = protowire.ConsumeFixed32(b) + e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) + case protowire.Fixed64Type: + var v uint64 + v, n = protowire.ConsumeFixed64(b) + e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) + case protowire.BytesType: + var v []byte + v, n = protowire.ConsumeBytes(b) + e.WriteString(string(v)) + case protowire.StartGroupType: + e.StartMessage() + var v []byte + v, n = protowire.ConsumeGroup(num, b) + e.marshalUnknown(v) + e.EndMessage() + default: + panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) + } + + b = b[n:] + } +} + +// marshalAny marshals the given google.protobuf.Any message in expanded form. +// It returns true if it was able to marshal, else false. +func (e encoder) marshalAny(any pref.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + typeURL := any.Get(fdType).String() + mt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return false + } + m := mt.New().Interface() + + // Unmarshal bytes into embedded message. + fdValue := fds.ByNumber(genid.Any_Value_field_number) + value := any.Get(fdValue) + err = proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: e.opts.Resolver, + }.Unmarshal(value.Bytes(), m) + if err != nil { + return false + } + + // Get current encoder position. If marshaling fails, reset encoder output + // back to this position. + pos := e.Snapshot() + + // Field name is the proto field name enclosed in []. + e.WriteName("[" + typeURL + "]") + err = e.marshalMessage(m.ProtoReflect(), true) + if err != nil { + e.Reset(pos) + return false + } + return true +} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go new file mode 100644 index 000000000..a427f8b70 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -0,0 +1,538 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protowire parses and formats the raw wire encoding. +// See https://developers.google.com/protocol-buffers/docs/encoding. +// +// For marshaling and unmarshaling entire protobuf messages, +// use the "google.golang.org/protobuf/proto" package instead. +package protowire + +import ( + "io" + "math" + "math/bits" + + "google.golang.org/protobuf/internal/errors" +) + +// Number represents the field number. +type Number int32 + +const ( + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 +) + +// IsValid reports whether the field number is semantically valid. +// +// Note that while numbers within the reserved range are semantically invalid, +// they are syntactically valid in the wire format. +// Implementations may treat records with reserved field numbers as unknown. +func (n Number) IsValid() bool { + return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber +} + +// Type represents the wire type. +type Type int8 + +const ( + VarintType Type = 0 + Fixed32Type Type = 5 + Fixed64Type Type = 1 + BytesType Type = 2 + StartGroupType Type = 3 + EndGroupType Type = 4 +) + +const ( + _ = -iota + errCodeTruncated + errCodeFieldNumber + errCodeOverflow + errCodeReserved + errCodeEndGroup +) + +var ( + errFieldNumber = errors.New("invalid field number") + errOverflow = errors.New("variable length integer overflow") + errReserved = errors.New("cannot parse reserved wire type") + errEndGroup = errors.New("mismatching end group marker") + errParse = errors.New("parse error") +) + +// ParseError converts an error code into an error value. +// This returns nil if n is a non-negative number. +func ParseError(n int) error { + if n >= 0 { + return nil + } + switch n { + case errCodeTruncated: + return io.ErrUnexpectedEOF + case errCodeFieldNumber: + return errFieldNumber + case errCodeOverflow: + return errOverflow + case errCodeReserved: + return errReserved + case errCodeEndGroup: + return errEndGroup + default: + return errParse + } +} + +// ConsumeField parses an entire field record (both tag and value) and returns +// the field number, the wire type, and the total length. +// This returns a negative length upon an error (see ParseError). +// +// The total length includes the tag header and the end group marker (if the +// field is a group). +func ConsumeField(b []byte) (Number, Type, int) { + num, typ, n := ConsumeTag(b) + if n < 0 { + return 0, 0, n // forward error code + } + m := ConsumeFieldValue(num, typ, b[n:]) + if m < 0 { + return 0, 0, m // forward error code + } + return num, typ, n + m +} + +// ConsumeFieldValue parses a field value and returns its length. +// This assumes that the field Number and wire Type have already been parsed. +// This returns a negative length upon an error (see ParseError). +// +// When parsing a group, the length includes the end group marker and +// the end group is verified to match the starting field number. +func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + switch typ { + case VarintType: + _, n = ConsumeVarint(b) + return n + case Fixed32Type: + _, n = ConsumeFixed32(b) + return n + case Fixed64Type: + _, n = ConsumeFixed64(b) + return n + case BytesType: + _, n = ConsumeBytes(b) + return n + case StartGroupType: + n0 := len(b) + for { + num2, typ2, n := ConsumeTag(b) + if n < 0 { + return n // forward error code + } + b = b[n:] + if typ2 == EndGroupType { + if num != num2 { + return errCodeEndGroup + } + return n0 - len(b) + } + + n = ConsumeFieldValue(num2, typ2, b) + if n < 0 { + return n // forward error code + } + b = b[n:] + } + case EndGroupType: + return errCodeEndGroup + default: + return errCodeReserved + } +} + +// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. +func AppendTag(b []byte, num Number, typ Type) []byte { + return AppendVarint(b, EncodeTag(num, typ)) +} + +// ConsumeTag parses b as a varint-encoded tag, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeTag(b []byte) (Number, Type, int) { + v, n := ConsumeVarint(b) + if n < 0 { + return 0, 0, n // forward error code + } + num, typ := DecodeTag(v) + if num < MinValidNumber { + return 0, 0, errCodeFieldNumber + } + return num, typ, n +} + +func SizeTag(num Number) int { + return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size +} + +// AppendVarint appends v to b as a varint-encoded uint64. +func AppendVarint(b []byte, v uint64) []byte { + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeVarint(b []byte) (v uint64, n int) { + var y uint64 + if len(b) <= 0 { + return 0, errCodeTruncated + } + v = uint64(b[0]) + if v < 0x80 { + return v, 1 + } + v -= 0x80 + + if len(b) <= 1 { + return 0, errCodeTruncated + } + y = uint64(b[1]) + v += y << 7 + if y < 0x80 { + return v, 2 + } + v -= 0x80 << 7 + + if len(b) <= 2 { + return 0, errCodeTruncated + } + y = uint64(b[2]) + v += y << 14 + if y < 0x80 { + return v, 3 + } + v -= 0x80 << 14 + + if len(b) <= 3 { + return 0, errCodeTruncated + } + y = uint64(b[3]) + v += y << 21 + if y < 0x80 { + return v, 4 + } + v -= 0x80 << 21 + + if len(b) <= 4 { + return 0, errCodeTruncated + } + y = uint64(b[4]) + v += y << 28 + if y < 0x80 { + return v, 5 + } + v -= 0x80 << 28 + + if len(b) <= 5 { + return 0, errCodeTruncated + } + y = uint64(b[5]) + v += y << 35 + if y < 0x80 { + return v, 6 + } + v -= 0x80 << 35 + + if len(b) <= 6 { + return 0, errCodeTruncated + } + y = uint64(b[6]) + v += y << 42 + if y < 0x80 { + return v, 7 + } + v -= 0x80 << 42 + + if len(b) <= 7 { + return 0, errCodeTruncated + } + y = uint64(b[7]) + v += y << 49 + if y < 0x80 { + return v, 8 + } + v -= 0x80 << 49 + + if len(b) <= 8 { + return 0, errCodeTruncated + } + y = uint64(b[8]) + v += y << 56 + if y < 0x80 { + return v, 9 + } + v -= 0x80 << 56 + + if len(b) <= 9 { + return 0, errCodeTruncated + } + y = uint64(b[9]) + v += y << 63 + if y < 2 { + return v, 10 + } + return 0, errCodeOverflow +} + +// SizeVarint returns the encoded size of a varint. +// The size is guaranteed to be within 1 and 10, inclusive. +func SizeVarint(v uint64) int { + // This computes 1 + (bits.Len64(v)-1)/7. + // 9/64 is a good enough approximation of 1/7 + return int(9*uint32(bits.Len64(v))+64) / 64 +} + +// AppendFixed32 appends v to b as a little-endian uint32. +func AppendFixed32(b []byte, v uint32) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24)) +} + +// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed32(b []byte) (v uint32, n int) { + if len(b) < 4 { + return 0, errCodeTruncated + } + v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return v, 4 +} + +// SizeFixed32 returns the encoded size of a fixed32; which is always 4. +func SizeFixed32() int { + return 4 +} + +// AppendFixed64 appends v to b as a little-endian uint64. +func AppendFixed64(b []byte, v uint64) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) +} + +// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed64(b []byte) (v uint64, n int) { + if len(b) < 8 { + return 0, errCodeTruncated + } + v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return v, 8 +} + +// SizeFixed64 returns the encoded size of a fixed64; which is always 8. +func SizeFixed64() int { + return 8 +} + +// AppendBytes appends v to b as a length-prefixed bytes value. +func AppendBytes(b []byte, v []byte) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeBytes(b []byte) (v []byte, n int) { + m, n := ConsumeVarint(b) + if n < 0 { + return nil, n // forward error code + } + if m > uint64(len(b[n:])) { + return nil, errCodeTruncated + } + return b[n:][:m], n + int(m) +} + +// SizeBytes returns the encoded size of a length-prefixed bytes value, +// given only the length. +func SizeBytes(n int) int { + return SizeVarint(uint64(n)) + n +} + +// AppendString appends v to b as a length-prefixed bytes value. +func AppendString(b []byte, v string) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeString parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeString(b []byte) (v string, n int) { + bb, n := ConsumeBytes(b) + return string(bb), n +} + +// AppendGroup appends v to b as group value, with a trailing end group marker. +// The value v must not contain the end marker. +func AppendGroup(b []byte, num Number, v []byte) []byte { + return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) +} + +// ConsumeGroup parses b as a group value until the trailing end group marker, +// and verifies that the end marker matches the provided num. The value v +// does not contain the end marker, while the length does contain the end marker. +// This returns a negative length upon an error (see ParseError). +func ConsumeGroup(num Number, b []byte) (v []byte, n int) { + n = ConsumeFieldValue(num, StartGroupType, b) + if n < 0 { + return nil, n // forward error code + } + b = b[:n] + + // Truncate off end group marker, but need to handle denormalized varints. + // Assuming end marker is never 0 (which is always the case since + // EndGroupType is non-zero), we can truncate all trailing bytes where the + // lower 7 bits are all zero (implying that the varint is denormalized). + for len(b) > 0 && b[len(b)-1]&0x7f == 0 { + b = b[:len(b)-1] + } + b = b[:len(b)-SizeTag(num)] + return b, n +} + +// SizeGroup returns the encoded size of a group, given only the length. +func SizeGroup(num Number, n int) int { + return n + SizeTag(num) +} + +// DecodeTag decodes the field Number and wire Type from its unified form. +// The Number is -1 if the decoded field number overflows int32. +// Other than overflow, this does not check for field number validity. +func DecodeTag(x uint64) (Number, Type) { + // NOTE: MessageSet allows for larger field numbers than normal. + if x>>3 > uint64(math.MaxInt32) { + return -1, 0 + } + return Number(x >> 3), Type(x & 7) +} + +// EncodeTag encodes the field Number and wire Type into its unified form. +func EncodeTag(num Number, typ Type) uint64 { + return uint64(num)<<3 | uint64(typ&7) +} + +// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} +// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} +func DecodeZigZag(x uint64) int64 { + return int64(x>>1) ^ int64(x)<<63>>63 +} + +// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} +// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} +func EncodeZigZag(x int64) uint64 { + return uint64(x<<1) ^ uint64(x>>63) +} + +// DecodeBool decodes a uint64 as a bool. +// Input: { 0, 1, 2, …} +// Output: {false, true, true, …} +func DecodeBool(x uint64) bool { + return x != 0 +} + +// EncodeBool encodes a bool as a uint64. +// Input: {false, true} +// Output: { 0, 1} +func EncodeBool(x bool) uint64 { + if x { + return 1 + } + return 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go new file mode 100644 index 000000000..e7af0fe0d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -0,0 +1,316 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descfmt provides functionality to format descriptors. +package descfmt + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type list interface { + Len() int + pragma.DoNotImplement +} + +func FormatList(s fmt.State, r rune, vs list) { + io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatListOpt(vs list, isRoot, allowMulti bool) string { + start, end := "[", "]" + if isRoot { + var name string + switch vs.(type) { + case pref.Names: + name = "Names" + case pref.FieldNumbers: + name = "FieldNumbers" + case pref.FieldRanges: + name = "FieldRanges" + case pref.EnumRanges: + name = "EnumRanges" + case pref.FileImports: + name = "FileImports" + case pref.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + } + start, end = name+"{", "}" + } + + var ss []string + switch vs := vs.(type) { + case pref.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive + } + } + return start + joinStrings(ss, false) + end + case pref.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive + } + } + return start + joinStrings(ss, false) + end + case pref.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + ss = append(ss, "{"+rs.Join()+"}") + } + return start + joinStrings(ss, allowMulti) + end + default: + _, isEnumValue := vs.(pref.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() + ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +} + +// descriptorAccessors is a list of accessors to print for each descriptor. +// +// Do not print all accessors since some contain redundant information, +// while others are pointers that we do not want to follow since the descriptor +// is actually a cyclic graph. +// +// Using a list allows us to print the accessors in a sensible order. +var descriptorAccessors = map[reflect.Type][]string{ + reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +} + +func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + + start, end := "{", "}" + if isRoot { + start = rt.Name() + "{" + } + + _, isFile := t.(pref.FileDescriptor) + rs := records{allowMulti: allowMulti} + if t.IsPlaceholder() { + if isFile { + rs.Append(rv, "Path", "Package", "IsPlaceholder") + } else { + rs.Append(rv, "FullName", "IsPlaceholder") + } + } else { + switch { + case isFile: + rs.Append(rv, "Syntax") + case isRoot: + rs.Append(rv, "Syntax", "FullName") + default: + rs.Append(rv, "Name") + } + switch t := t.(type) { + case pref.FieldDescriptor: + for _, s := range descriptorAccessors[rt] { + switch s { + case "MapKey": + if k := t.MapKey(); k != nil { + rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) + } + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { + case pref.EnumKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + case pref.MessageKind, pref.GroupKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + } + } + case "ContainingOneof": + if od := t.ContainingOneof(); od != nil { + rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + } + case "ContainingMessage": + if t.IsExtension() { + rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + } + case "Message": + if !t.IsMap() { + rs.Append(rv, s) + } + default: + rs.Append(rv, s) + } + } + case pref.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { + ss = append(ss, string(fs.Get(i).Name())) + } + if len(ss) > 0 { + rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + } + default: + rs.Append(rv, descriptorAccessors[rt]...) + } + if rv.MethodByName("GoType").IsValid() { + rs.Append(rv, "GoType") + } + } + return start + rs.Join() + end +} + +type records struct { + recs [][2]string + allowMulti bool +} + +func (rs *records) Append(v reflect.Value, accessors ...string) { + for _, a := range accessors { + var rv reflect.Value + if m := v.MethodByName(a); m.IsValid() { + rv = m.Call(nil)[0] + } + if v.Kind() == reflect.Struct && !rv.IsValid() { + rv = v.FieldByName(a) + } + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + } + if _, ok := rv.Interface().(pref.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() + } + } + + // Ignore zero values. + var isZero bool + switch rv.Kind() { + case reflect.Interface, reflect.Slice: + isZero = rv.IsNil() + case reflect.Bool: + isZero = rv.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + isZero = rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + isZero = rv.Uint() == 0 + case reflect.String: + isZero = rv.String() == "" + } + if n, ok := rv.Interface().(list); ok { + isZero = n.Len() == 0 + } + if isZero { + continue + } + + // Format the value. + var s string + v := rv.Interface() + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) + case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: + s = string(v.(pref.Descriptor).Name()) + case pref.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) + case []byte: + s = fmt.Sprintf("%q", v) + default: + s = fmt.Sprint(v) + } + rs.recs = append(rs.recs, [2]string{a, s}) + } +} + +func (rs *records) Join() string { + var ss []string + + // In single line mode, simply join all records with commas. + if !rs.allowMulti { + for _, r := range rs.recs { + ss = append(ss, r[0]+formatColon(0)+r[1]) + } + return joinStrings(ss, false) + } + + // In allowMulti line mode, align single line records for more readable output. + var maxLen int + flush := func(i int) { + for _, r := range rs.recs[len(ss):i] { + ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) + } + maxLen = 0 + } + for i, r := range rs.recs { + if isMulti := strings.Contains(r[1], "\n"); isMulti { + flush(i) + ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) + } else if maxLen < len(r[0]) { + maxLen = len(r[0]) + } + } + flush(len(rs.recs)) + return joinStrings(ss, true) +} + +func formatColon(padding int) string { + // Deliberately introduce instability into the debug output to + // discourage users from performing string comparisons. + // This provides us flexibility to change the output in the future. + if detrand.Bool() { + return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) + } else { + return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) + } +} + +func joinStrings(ss []string, isMulti bool) string { + if len(ss) == 0 { + return "" + } + if isMulti { + return "\n\t" + strings.Join(ss, "\n\t") + "\n" + } + return strings.Join(ss, ", ") +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go new file mode 100644 index 000000000..8401be8c8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descopts contains the nil pointers to concrete descriptor options. +// +// This package exists as a form of reverse dependency injection so that certain +// packages (e.g., internal/filedesc and internal/filetype can avoid a direct +// dependency on the descriptor proto package). +package descopts + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +// These variables are set by the init function in descriptor.pb.go via logic +// in internal/filetype. In other words, so long as the descriptor proto package +// is linked in, these variables will be populated. +// +// Each variable is populated with a nil pointer to the options struct. +var ( + File pref.ProtoMessage + Enum pref.ProtoMessage + EnumValue pref.ProtoMessage + Message pref.ProtoMessage + Field pref.ProtoMessage + Oneof pref.ProtoMessage + ExtensionRange pref.ProtoMessage + Service pref.ProtoMessage + Method pref.ProtoMessage +) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go new file mode 100644 index 000000000..a904dd1f9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package detrand provides deterministically random functionality. +// +// The pseudo-randomness of these functions is seeded by the program binary +// itself and guarantees that the output does not change within a program, +// while ensuring that the output is unstable across different builds. +package detrand + +import ( + "encoding/binary" + "hash/fnv" + "os" +) + +// Disable disables detrand such that all functions returns the zero value. +// This function is not concurrent-safe and must be called during program init. +func Disable() { + randSeed = 0 +} + +// Bool returns a deterministically random boolean. +func Bool() bool { + return randSeed%2 == 1 +} + +// randSeed is a best-effort at an approximate hash of the Go binary. +var randSeed = binaryHash() + +func binaryHash() uint64 { + // Open the Go binary. + s, err := os.Executable() + if err != nil { + return 0 + } + f, err := os.Open(s) + if err != nil { + return 0 + } + defer f.Close() + + // Hash the size and several samples of the Go binary. + const numSamples = 8 + var buf [64]byte + h := fnv.New64() + fi, err := f.Stat() + if err != nil { + return 0 + } + binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) + h.Write(buf[:8]) + for i := int64(0); i < numSamples; i++ { + if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { + return 0 + } + h.Write(buf[:]) + } + return h.Sum64() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go new file mode 100644 index 000000000..fdd9b13f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defval marshals and unmarshals textual forms of default values. +// +// This package handles both the form historically used in Go struct field tags +// and also the form used by google.protobuf.FieldDescriptorProto.default_value +// since they differ in superficial ways. +package defval + +import ( + "fmt" + "math" + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" + errors "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Format is the serialization format used to represent the default value. +type Format int + +const ( + _ Format = iota + + // Descriptor uses the serialization format that protoc uses with the + // google.protobuf.FieldDescriptorProto.default_value field. + Descriptor + + // GoTag uses the historical serialization format in Go struct field tags. + GoTag +) + +// Unmarshal deserializes the default string s according to the given kind k. +// When k is an enum, a list of enum value descriptors must be provided. +func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + switch s { + case "1": + return pref.ValueOfBool(true), nil, nil + case "0": + return pref.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": + return pref.ValueOfBool(true), nil, nil + case "false": + return pref.ValueOfBool(false), nil, nil + } + } + case pref.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { + if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. + ev := evs.ByName(pref.Name(s)) + if ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { + return pref.ValueOfInt32(int32(v)), nil, nil + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return pref.ValueOfInt64(int64(v)), nil, nil + } + case pref.Uint32Kind, pref.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { + return pref.ValueOfUint32(uint32(v)), nil, nil + } + case pref.Uint64Kind, pref.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + return pref.ValueOfUint64(uint64(v)), nil, nil + } + case pref.FloatKind, pref.DoubleKind: + var v float64 + var err error + switch s { + case "-inf": + v = math.Inf(-1) + case "inf": + v = math.Inf(+1) + case "nan": + v = math.NaN() + default: + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { + if k == pref.FloatKind { + return pref.ValueOfFloat32(float32(v)), nil, nil + } else { + return pref.ValueOfFloat64(float64(v)), nil, nil + } + } + case pref.StringKind: + // String values are already unescaped and can be used as is. + return pref.ValueOfString(s), nil, nil + case pref.BytesKind: + if b, ok := unmarshalBytes(s); ok { + return pref.ValueOfBytes(b), nil, nil + } + } + return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) +} + +// Marshal serializes v as the default string according to the given kind k. +// When specifying the Descriptor format for an enum kind, the associated +// enum value descriptor must be provided. +func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil + } else { + return "0", nil + } + } else { + if v.Bool() { + return "true", nil + } else { + return "false", nil + } + } + case pref.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil + case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil + case pref.FloatKind, pref.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): + return "-inf", nil + case math.IsInf(f, +1): + return "inf", nil + case math.IsNaN(f): + return "nan", nil + default: + if k == pref.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } + case pref.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil + case pref.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } + } + return "", errors.New("could not format value for %v: %v", k, v) +} + +// unmarshalBytes deserializes bytes by applying C unescaping. +func unmarshalBytes(s string) ([]byte, bool) { + // Bytes values use the same escaping as the text format, + // however they lack the surrounding double quotes. + v, err := ptext.UnmarshalString(`"` + s + `"`) + if err != nil { + return nil, false + } + return []byte(v), true +} + +// marshalBytes serializes bytes by using C escaping. +// To match the exact output of protoc, this is identical to the +// CEscape function in strutil.cc of the protoc source code. +func marshalBytes(b []byte) (string, bool) { + var s []byte + for _, c := range b { + switch c { + case '\n': + s = append(s, `\n`...) + case '\r': + s = append(s, `\r`...) + case '\t': + s = append(s, `\t`...) + case '"': + s = append(s, `\"`...) + case '\'': + s = append(s, `\'`...) + case '\\': + s = append(s, `\\`...) + default: + if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { + s = append(s, c) + } else { + s = append(s, fmt.Sprintf(`\%03o`, c)...) + } + } + } + return string(s), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go new file mode 100644 index 000000000..b1eeea507 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -0,0 +1,258 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package messageset encodes and decodes the obsolete MessageSet wire format. +package messageset + +import ( + "math" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// The MessageSet wire format is equivalent to a message defiend as follows, +// where each Item defines an extension field with a field number of 'type_id' +// and content of 'message'. MessageSet extensions must be non-repeated message +// fields. +// +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// } +// } +const ( + FieldItem = protowire.Number(1) + FieldTypeID = protowire.Number(2) + FieldMessage = protowire.Number(3) +) + +// ExtensionName is the field name for extensions of MessageSet. +// +// A valid MessageSet extension must be of the form: +// message MyMessage { +// extend proto2.bridge.MessageSet { +// optional MyMessage message_set_extension = 1234; +// } +// ... +// } +const ExtensionName = "message_set_extension" + +// IsMessageSet returns whether the message uses the MessageSet wire format. +func IsMessageSet(md pref.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() +} + +// IsMessageSetExtension reports this field extends a MessageSet. +func IsMessageSetExtension(fd pref.FieldDescriptor) bool { + if fd.Name() != ExtensionName { + return false + } + if fd.FullName().Parent() != fd.Message().FullName() { + return false + } + return IsMessageSet(fd.ContainingMessage()) +} + +// FindMessageSetExtension locates a MessageSet extension field by name. +// In text and JSON formats, the extension name used is the message itself. +// The extension field name is derived by appending ExtensionName. +func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { + name := s.Append(ExtensionName) + xt, err := r.FindExtensionByName(name) + if err != nil { + if err == preg.NotFound { + return nil, err + } + return nil, errors.Wrap(err, "%q", name) + } + if !IsMessageSetExtension(xt.TypeDescriptor()) { + return nil, preg.NotFound + } + return xt, nil +} + +// SizeField returns the size of a MessageSet item field containing an extension +// with the given field number, not counting the contents of the message subfield. +func SizeField(num protowire.Number) int { + return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) +} + +// Unmarshal parses a MessageSet. +// +// It calls fn with the type ID and value of each item in the MessageSet. +// Unknown fields are discarded. +// +// If wantLen is true, the item values include the varint length prefix. +// This is ugly, but simplifies the fast-path decoder in internal/impl. +func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + if num != FieldItem || wtyp != protowire.StartGroupType { + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + continue + } + typeID, value, n, err := ConsumeFieldValue(b, wantLen) + if err != nil { + return err + } + b = b[n:] + if typeID == 0 { + continue + } + if err := fn(typeID, value); err != nil { + return err + } + } + return nil +} + +// ConsumeFieldValue parses b as a MessageSet item field value until and including +// the trailing end group marker. It assumes the start group tag has already been parsed. +// It returns the contents of the type_id and message subfields and the total +// item length. +// +// If wantLen is true, the returned message value includes the length prefix. +func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { + ilen := len(b) + for { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + switch { + case num == FieldItem && wtyp == protowire.EndGroupType: + if wantLen && len(message) == 0 { + // The message field was missing, which should never happen. + // Be prepared for this case anyway. + message = protowire.AppendVarint(message, 0) + } + return typeid, message, ilen - len(b), nil + case num == FieldTypeID && wtyp == protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + if v < 1 || v > math.MaxInt32 { + return 0, nil, 0, errors.New("invalid type_id in message set") + } + typeid = protowire.Number(v) + case num == FieldMessage && wtyp == protowire.BytesType: + m, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + if message == nil { + if wantLen { + message = b[:n:n] + } else { + message = m[:len(m):len(m)] + } + } else { + // This case should never happen in practice, but handle it for + // correctness: The MessageSet item contains multiple message + // fields, which need to be merged. + // + // In the case where we're returning the length, this becomes + // quite inefficient since we need to strip the length off + // the existing data and reconstruct it with the combined length. + if wantLen { + _, nn := protowire.ConsumeVarint(message) + m0 := message[nn:] + message = nil + message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) + message = append(message, m0...) + message = append(message, m...) + } else { + message = append(message, m...) + } + } + b = b[n:] + default: + // We have no place to put it, so we just ignore unknown fields. + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + } + } +} + +// AppendFieldStart appends the start of a MessageSet item field containing +// an extension with the given number. The caller must add the message +// subfield (including the tag). +func AppendFieldStart(b []byte, num protowire.Number) []byte { + b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) + b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) + b = protowire.AppendVarint(b, uint64(num)) + return b +} + +// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. +func AppendFieldEnd(b []byte) []byte { + return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) +} + +// SizeUnknown returns the size of an unknown fields section in MessageSet format. +// +// See AppendUnknown. +func SizeUnknown(unknown []byte) (size int) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return 0 + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return 0 + } + unknown = unknown[n:] + size += SizeField(num) + protowire.SizeTag(FieldMessage) + n + } + return size +} + +// AppendUnknown appends unknown fields to b in MessageSet format. +// +// For historic reasons, unresolved items in a MessageSet are stored in a +// message's unknown fields section in non-MessageSet format. That is, an +// unknown item with typeID T and value V appears in the unknown fields as +// a field with number T and value V. +// +// This function converts the unknown fields back into MessageSet form. +func AppendUnknown(b, unknown []byte) ([]byte, error) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return nil, errors.New("invalid data in message set unknown fields") + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return nil, errors.New("invalid data in message set unknown fields") + } + b = AppendFieldStart(b, num) + b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) + b = append(b, unknown[:n]...) + b = AppendFieldEnd(b) + unknown = unknown[n:] + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go new file mode 100644 index 000000000..16c02d7b6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -0,0 +1,207 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag marshals and unmarshals the legacy struct tags as generated +// by historical versions of protoc-gen-go. +package tag + +import ( + "reflect" + "strconv" + "strings" + + defval "google.golang.org/protobuf/internal/encoding/defval" + fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var byteType = reflect.TypeOf(byte(0)) + +// Unmarshal decodes the tag into a prototype.Field. +// +// The goType is needed to determine the original protoreflect.Kind since the +// tag does not record sufficient information to determine that. +// The type is the underlying field type (e.g., a repeated field may be +// represented by []T, but the Go type passed in is just T). +// A list of enum value descriptors must be provided for enum fields. +// This does not populate the Enum or Message (except for weak message). +// +// This function is a best effort attempt; parsing errors are ignored. +func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { + f := new(fdesc.Field) + f.L0.ParentFile = fdesc.SurrogateProto2 + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + f.L0.FullName = pref.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + f.L1.Number = pref.FieldNumber(n) + case s == "opt": + f.L1.Cardinality = pref.Optional + case s == "req": + f.L1.Cardinality = pref.Required + case s == "rep": + f.L1.Cardinality = pref.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: + f.L1.Kind = pref.BoolKind + case reflect.Int32: + f.L1.Kind = pref.Int32Kind + case reflect.Int64: + f.L1.Kind = pref.Int64Kind + case reflect.Uint32: + f.L1.Kind = pref.Uint32Kind + case reflect.Uint64: + f.L1.Kind = pref.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { + f.L1.Kind = pref.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { + f.L1.Kind = pref.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: + f.L1.Kind = pref.Sfixed32Kind + case reflect.Uint32: + f.L1.Kind = pref.Fixed32Kind + case reflect.Float32: + f.L1.Kind = pref.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: + f.L1.Kind = pref.Sfixed64Kind + case reflect.Uint64: + f.L1.Kind = pref.Fixed64Kind + case reflect.Float64: + f.L1.Kind = pref.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: + f.L1.Kind = pref.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: + f.L1.Kind = pref.BytesKind + default: + f.L1.Kind = pref.MessageKind + } + case s == "group": + f.L1.Kind = pref.GroupKind + case strings.HasPrefix(s, "enum="): + f.L1.Kind = pref.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { + f.L1.JSONName.Init(jsonName) + } + case s == "packed": + f.L1.HasPacked = true + f.L1.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true + f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) + f.L1.Default = fdesc.DefaultValue(v, ev) + case s == "proto3": + f.L0.ParentFile = fdesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. + if f.L1.Kind == pref.GroupKind { + f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f +} + +// Marshal encodes the protoreflect.FieldDescriptor as a tag. +// +// The enumName must be provided if the kind is an enum. +// Historically, the formulation of the enum "name" was the proto package +// dot-concatenated with the generated Go identifier for the enum type. +// Depending on the context on how Marshal is called, there are different ways +// through which that information is determined. As such it is the caller's +// responsibility to provide a function to obtain that information. +func Marshal(fd pref.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { + case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + tag = append(tag, "varint") + case pref.Sint32Kind: + tag = append(tag, "zigzag32") + case pref.Sint64Kind: + tag = append(tag, "zigzag64") + case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + tag = append(tag, "fixed32") + case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + tag = append(tag, "fixed64") + case pref.StringKind, pref.BytesKind, pref.MessageKind: + tag = append(tag, "bytes") + case pref.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { + case pref.Optional: + tag = append(tag, "opt") + case pref.Required: + tag = append(tag, "req") + case pref.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) + if fd.Kind() == pref.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. + name = string(fd.Message().Name()) + } + tag = append(tag, "name="+name) + if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { + // NOTE: The jsonName != name condition is suspect, but it preserve + // the exact same semantics from the previous generator. + tag = append(tag, "json="+jsonName) + } + if fd.IsWeak() { + tag = append(tag, "weak="+string(fd.Message().FullName())) + } + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. + if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } + if fd.Kind() == pref.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { + tag = append(tag, "oneof") + } + // This must appear last in the tag, since commas in strings aren't escaped. + if fd.HasDefault() { + def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) + tag = append(tag, "def="+def) + } + return strings.Join(tag, ",") +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go new file mode 100644 index 000000000..eb10ea102 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// Decoder is a token-based textproto decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing the byte characters for MessageOpen and + // ListOpen kinds. The top of stack represents the message or the list that + // the current token is nested in. An empty stack means the current token is + // at the top level message. The characters '{' and '<' both represent the + // MessageOpen kind. + openStack []byte + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +// Peek looks ahead and returns the next token and error without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext(d.lastToken.kind) + if err != nil { + return Token{}, err + } + + switch tok.kind { + case comma, semicolon: + tok, err = d.parseNext(tok.kind) + if err != nil { + return Token{}, err + } + } + d.lastToken = tok + return tok, nil +} + +const ( + mismatchedFmt = "mismatched close character %q" + unexpectedFmt = "unexpected character %q" +) + +// parseNext parses the next Token based on given last kind. +func (d *Decoder) parseNext(lastKind Kind) (Token, error) { + // Trim leading spaces. + d.consume(0) + isEOF := false + if len(d.in) == 0 { + isEOF = true + } + + switch lastKind { + case EOF: + return d.consumeToken(EOF, 0, 0), nil + + case bof: + // Start of top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case Name: + // Next token can be MessageOpen, ListOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + case '[': + d.pushOpenStack(ch) + return d.consumeToken(ListOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case Scalar: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch d.in[0] { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + _, closeCh := d.currentOpenKind() + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case MessageClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case ListOpen: + // Next token can be ListClose, MessageStart or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case ListClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + default: + // It is not possible to have this case. Let it panic below. + } + + case comma, semicolon: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case ListOpen: + if lastKind == semicolon { + // It is not be possible to have this case as logic here + // should not have produced a semicolon Token when inside a + // list. Let it panic below. + break + } + // Next token can be MessageOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + } + } + + line, column := d.Position(len(d.orig) - len(d.in)) + panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) +} + +var otherCloseChar = map[byte]byte{ + '}': '>', + '>': '}', +} + +// currentOpenKind indicates whether current position is inside a message, list +// or top-level message by returning MessageOpen, ListOpen or bof respectively. +// If the returned kind is either a MessageOpen or ListOpen, it also returns the +// corresponding closing character. +func (d *Decoder) currentOpenKind() (Kind, byte) { + if len(d.openStack) == 0 { + return bof, 0 + } + openCh := d.openStack[len(d.openStack)-1] + switch openCh { + case '{': + return MessageOpen, '}' + case '<': + return MessageOpen, '>' + case '[': + return ListOpen, ']' + } + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) +} + +func (d *Decoder) pushOpenStack(ch byte) { + d.openStack = append(d.openStack, ch) +} + +func (d *Decoder) popOpenStack() { + d.openStack = d.openStack[:len(d.openStack)-1] +} + +// parseFieldName parses field name and separator. +func (d *Decoder) parseFieldName() (tok Token, err error) { + defer func() { + if err == nil && d.tryConsumeChar(':') { + tok.attrs |= hasSeparator + } + }() + + // Extension or Any type URL. + if d.in[0] == '[' { + return d.parseTypeName() + } + + // Identifier. + if size := parseIdent(d.in, false); size > 0 { + return d.consumeToken(Name, size, uint8(IdentName)), nil + } + + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { + if !num.neg && num.kind == numDec { + if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } + return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + } + + return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) +} + +// parseTypeName parses Any type URL or extension field name. The name is +// enclosed in [ and ] characters. The C++ parser does not handle many legal URL +// strings. This implementation is more liberal and allows for the pattern +// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed +// in between [ ], '.', '/' and the sub names. +func (d *Decoder) parseTypeName() (Token, error) { + startPos := len(d.orig) - len(d.in) + // Use alias s to advance first in order to use d.in for error handling. + // Caller already checks for [ as first character. + s := consume(d.in[1:], 0) + if len(s) == 0 { + return Token{}, ErrUnexpectedEOF + } + + var name []byte + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + var closed bool + for len(s) > 0 && !closed { + switch { + case s[0] == ']': + s = s[1:] + closed = true + + case s[0] == '/', s[0] == '.': + if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)+1]) + } + name = append(name, s[0]) + s = s[1:] + s = consume(s, 0) + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + default: + return Token{}, d.newSyntaxError( + "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + } + } + + if !closed { + return Token{}, ErrUnexpectedEOF + } + + // First character cannot be '.'. Last character cannot be '.' or '/'. + size := len(name) + if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)]) + } + + d.in = s + endPos := len(d.orig) - len(d.in) + d.consume(0) + + return Token{ + kind: Name, + attrs: uint8(TypeName), + pos: startPos, + raw: d.orig[startPos:endPos], + str: string(name), + }, nil +} + +func isTypeNameChar(b byte) bool { + return (b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z')) +} + +func isWhiteSpace(b byte) bool { + switch b { + case ' ', '\n', '\r', '\t': + return true + default: + return false + } +} + +// parseIdent parses an unquoted proto identifier and returns size. +// If allowNeg is true, it allows '-' to be the first character in the +// identifier. This is used when parsing literal values like -infinity, etc. +// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` +func parseIdent(input []byte, allowNeg bool) int { + var size int + + s := input + if len(s) == 0 { + return 0 + } + + if allowNeg && s[0] == '-' { + s = s[1:] + size++ + if len(s) == 0 { + return 0 + } + } + + switch { + case s[0] == '_', + 'a' <= s[0] && s[0] <= 'z', + 'A' <= s[0] && s[0] <= 'Z': + s = s[1:] + size++ + default: + return 0 + } + + for len(s) > 0 && (s[0] == '_' || + 'a' <= s[0] && s[0] <= 'z' || + 'A' <= s[0] && s[0] <= 'Z' || + '0' <= s[0] && s[0] <= '9') { + s = s[1:] + size++ + } + + if len(s) > 0 && !isDelim(s[0]) { + return 0 + } + + return size +} + +// parseScalar parses for a string, literal or number value. +func (d *Decoder) parseScalar() (Token, error) { + if d.in[0] == '"' || d.in[0] == '\'' { + return d.parseStringValue() + } + + if tok, ok := d.parseLiteralValue(); ok { + return tok, nil + } + + if tok, ok := d.parseNumberValue(); ok { + return tok, nil + } + + return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) +} + +// parseLiteralValue parses a literal value. A literal value is used for +// bools, special floats and enums. This function simply identifies that the +// field value is a literal. +func (d *Decoder) parseLiteralValue() (Token, bool) { + size := parseIdent(d.in, true) + if size == 0 { + return Token{}, false + } + return d.consumeToken(Scalar, size, literalValue), true +} + +// consumeToken constructs a Token for given Kind from d.in and consumes given +// size-length from it. +func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { + // Important to compute raw and pos before consuming. + tok := Token{ + kind: kind, + attrs: attrs, + pos: len(d.orig) - len(d.in), + raw: d.in[:size], + } + d.consume(size) + return tok +} + +// newSyntaxError returns a syntax error with line and column information for +// current position. +func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(len(d.orig) - len(d.in)) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +func (d *Decoder) tryConsumeChar(c byte) bool { + if len(d.in) > 0 && d.in[0] == c { + d.consume(1) + return true + } + return false +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func (d *Decoder) consume(n int) { + d.in = consume(d.in, n) + return +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func consume(b []byte, n int) []byte { + b = b[n:] + for len(b) > 0 { + switch b[0] { + case ' ', '\n', '\r', '\t': + b = b[1:] + case '#': + if i := bytes.IndexByte(b, '\n'); i >= 0 { + b = b[i+len("\n"):] + } else { + b = nil + } + default: + return b + } + } + return b +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) + +// isDelim returns true if given byte is a delimiter character. +func isDelim(c byte) bool { + return !(c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go new file mode 100644 index 000000000..f2d90b789 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +// parseNumberValue parses a number from the input and returns a Token object. +func (d *Decoder) parseNumberValue() (Token, bool) { + in := d.in + num := parseNumber(in) + if num.size == 0 { + return Token{}, false + } + numAttrs := num.kind + if num.neg { + numAttrs |= isNegative + } + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { + strSize = last + } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], + str: string(d.in[:strSize]), + numAttrs: numAttrs, + } + d.consume(num.size) + return tok, true +} + +const ( + numDec uint8 = (1 << iota) / 2 + numHex + numOct + numFloat +) + +// number is the result of parsing out a valid number from parseNumber. It +// contains data for doing float or integer conversion via the strconv package +// in conjunction with the input bytes. +type number struct { + kind uint8 + neg bool + size int +} + +// parseNumber constructs a number object from given input. It allows for the +// following patterns: +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// It also returns the number of parsed bytes for the given number, 0 if it is +// not a number. +func parseNumber(input []byte) number { + kind := numDec + var size int + var neg bool + + s := input + if len(s) == 0 { + return number{} + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + size++ + if len(s) == 0 { + return number{} + } + } + + // C++ allows for whitespace and comments in between the negative sign and + // the rest of the number. This logic currently does not but is consistent + // with v1. + + switch { + case s[0] == '0': + if len(s) > 1 { + switch { + case s[1] == 'x' || s[1] == 'X': + // Parse as hex number. + kind = numHex + n := 2 + s = s[2:] + for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || + ('a' <= s[0] && s[0] <= 'f') || + ('A' <= s[0] && s[0] <= 'F')) { + s = s[1:] + n++ + } + if n == 2 { + return number{} + } + size += n + + case '0' <= s[1] && s[1] <= '7': + // Parse as octal number. + kind = numOct + n := 2 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { + s = s[1:] + n++ + } + size += n + } + + if kind&(numHex|numOct) > 0 { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + return number{kind: kind, neg: neg, size: size} + } + } + s = s[1:] + size++ + + case '1' <= s[0] && s[0] <= '9': + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + + case s[0] == '.': + // Set kind to numFloat to signify the intent to parse as float. And + // that it needs to have other digits after '.'. + kind = numFloat + + default: + return number{} + } + + // . followed by 0 or more digits. + if len(s) > 0 && s[0] == '.' { + n := 1 + s = s[1:] + // If decimal point was before any digits, it should be followed by + // other digits. + if len(s) == 0 && kind == numFloat { + return number{} + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + kind = numFloat + } + + // e or E followed by an optional - or + and 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + kind = numFloat + s = s[1:] + n := 1 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return number{} + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + } + + // Optional suffix f or F for floats. + if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { + kind = numFloat + s = s[1:] + size++ + } + + // Check that next byte is a delimiter or it is at the end. + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + + return number{kind: kind, neg: neg, size: size} +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go new file mode 100644 index 000000000..d4d349023 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go @@ -0,0 +1,161 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +// parseStringValue parses string field token. +// This differs from parseString since the text format allows +// multiple back-to-back string literals where they are semantically treated +// as a single large string with all values concatenated. +// +// E.g., `"foo" "bar" "baz"` => "foobarbaz" +func (d *Decoder) parseStringValue() (Token, error) { + // Note that the ending quote is sufficient to unambiguously mark the end + // of a string. Thus, the text grammar does not require intervening + // whitespace or control characters in-between strings. + // Thus, the following is valid: + // `"foo"'bar'"baz"` => "foobarbaz" + in0 := d.in + var ss []string + for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { + s, err := d.parseString() + if err != nil { + return Token{}, err + } + ss = append(ss, s) + } + // d.in already points to the end of the value at this point. + return Token{ + kind: Scalar, + attrs: stringValue, + pos: len(d.orig) - len(in0), + raw: in0[:len(in0)-len(d.in)], + str: strings.Join(ss, ""), + }, nil +} + +// parseString parses a string value enclosed in " or '. +func (d *Decoder) parseString() (string, error) { + in := d.in + if len(in) == 0 { + return "", ErrUnexpectedEOF + } + quote := in[0] + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", d.newSyntaxError("invalid UTF-8 detected") + case r == 0 || r == '\n': + return "", d.newSyntaxError("invalid character %q in string", r) + case r == rune(quote): + in = in[1:] + d.consume(len(d.in) - len(in)) + return string(out), nil + case r == '\\': + if len(in) < 2 { + return "", ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\'', '\\', '?': + in, out = in[2:], append(out, r) + case 'a': + in, out = in[2:], append(out, '\a') + case 'b': + in, out = in[2:], append(out, '\b') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'v': + in, out = in[2:], append(out, '\v') + case 'f': + in, out = in[2:], append(out, '\f') + case '0', '1', '2', '3', '4', '5', '6', '7': + // One, two, or three octal characters. + n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) + if err != nil { + return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) + } + in, out = in[1+n:], append(out, byte(v)) + case 'x': + // One or two hexadecimal characters. + n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) + if n > 2 { + n = 2 + } + v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) + if err != nil { + return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) + } + in, out = in[2+n:], append(out, byte(v)) + case 'u', 'U': + // Four or eight hexadecimal characters + n := 6 + if r == 'U' { + n = 10 + } + if len(in) < n { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:n]), 16, 32) + if utf8.MaxRune < v || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) + } + in = in[n:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", ErrUnexpectedEOF +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } + +// UnmarshalString returns an unescaped string given a textproto string value. +// String value needs to contain single or double quotes. This is only used by +// internal/encoding/defval package for unmarshaling bytes. +func UnmarshalString(s string) (string, error) { + d := NewDecoder([]byte(s)) + return d.parseString() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go new file mode 100644 index 000000000..83d2b0d5a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/flags" +) + +// Kind represents a token kind expressible in the textproto format. +type Kind uint8 + +// Kind values. +const ( + Invalid Kind = iota + EOF + Name // Name indicates the field name. + Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. + MessageOpen + MessageClose + ListOpen + ListClose + + // comma and semi-colon are only for parsing in between values and should not be exposed. + comma + semicolon + + // bof indicates beginning of file, which is the default token + // kind at the beginning of parsing. + bof = Invalid +) + +func (t Kind) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "eof" + case Scalar: + return "scalar" + case Name: + return "name" + case MessageOpen: + return "{" + case MessageClose: + return "}" + case ListOpen: + return "[" + case ListClose: + return "]" + case comma: + return "," + case semicolon: + return ";" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// NameKind represents different types of field names. +type NameKind uint8 + +// NameKind values. +const ( + IdentName NameKind = iota + 1 + TypeName + FieldNumber +) + +func (t NameKind) String() string { + switch t { + case IdentName: + return "IdentName" + case TypeName: + return "TypeName" + case FieldNumber: + return "FieldNumber" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// Bit mask in Token.attrs to indicate if a Name token is followed by the +// separator char ':'. The field name separator char is optional for message +// field or repeated message field, but required for all other types. Decoder +// simply indicates whether a Name token is followed by separator or not. It is +// up to the prototext package to validate. +const hasSeparator = 1 << 7 + +// Scalar value types. +const ( + numberValue = iota + 1 + stringValue + literalValue +) + +// Bit mask in Token.numAttrs to indicate that the number is a negative. +const isNegative = 1 << 7 + +// Token provides a parsed token kind and value. Values are provided by the +// different accessor methods. +type Token struct { + // Kind of the Token object. + kind Kind + // attrs contains metadata for the following Kinds: + // Name: hasSeparator bit and one of NameKind. + // Scalar: one of numberValue, stringValue, literalValue. + attrs uint8 + // numAttrs contains metadata for numberValue: + // - highest bit is whether negative or positive. + // - lower bits indicate one of numDec, numHex, numOct, numFloat. + numAttrs uint8 + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // str contains parsed string for the following: + // - stringValue of Scalar kind + // - numberValue of Scalar kind + // - TypeName of Name kind + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// NameKind returns IdentName, TypeName or FieldNumber. +// It panics if type is not Name. +func (t Token) NameKind() NameKind { + if t.kind == Name { + return NameKind(t.attrs &^ hasSeparator) + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// HasSeparator returns true if the field name is followed by the separator char +// ':', else false. It panics if type is not Name. +func (t Token) HasSeparator() bool { + if t.kind == Name { + return t.attrs&hasSeparator != 0 + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// IdentName returns the value for IdentName type. +func (t Token) IdentName() string { + if t.kind == Name && t.attrs&uint8(IdentName) != 0 { + return string(t.raw) + } + panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// TypeName returns the value for TypeName type. +func (t Token) TypeName() string { + if t.kind == Name && t.attrs&uint8(TypeName) != 0 { + return t.str + } + panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// FieldNumber returns the value for FieldNumber type. It returns a +// non-negative int32 value. Caller will still need to validate for the correct +// field number range. +func (t Token) FieldNumber() int32 { + if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { + panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) + } + // Following should not return an error as it had already been called right + // before this Token was constructed. + num, _ := strconv.ParseInt(string(t.raw), 10, 32) + return int32(num) +} + +// String returns the string value for a Scalar type. +func (t Token) String() (string, bool) { + if t.kind != Scalar || t.attrs != stringValue { + return "", false + } + return t.str, true +} + +// Enum returns the literal value for a Scalar type for use as enum literals. +func (t Token) Enum() (string, bool) { + if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { + return "", false + } + return string(t.raw), true +} + +// Bool returns the bool value for a Scalar type. +func (t Token) Bool() (bool, bool) { + if t.kind != Scalar { + return false, false + } + switch t.attrs { + case literalValue: + if b, ok := boolLits[string(t.raw)]; ok { + return b, true + } + case numberValue: + // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, + // 0x1, etc. + n, err := strconv.ParseUint(t.str, 0, 64) + if err == nil { + switch n { + case 0: + return false, true + case 1: + return true, true + } + } + } + return false, false +} + +// These exact boolean literals are the ones supported in C++. +var boolLits = map[string]bool{ + "t": true, + "true": true, + "True": true, + "f": false, + "false": false, + "False": false, +} + +// Uint64 returns the uint64 value for a Scalar type. +func (t Token) Uint64() (uint64, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 64) + if err != nil { + return 0, false + } + return n, true +} + +// Uint32 returns the uint32 value for a Scalar type. +func (t Token) Uint32() (uint32, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 32) + if err != nil { + return 0, false + } + return uint32(n), true +} + +// Int64 returns the int64 value for a Scalar type. +func (t Token) Int64() (int64, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { + return n, true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { + return int64(n), true + } + } + return 0, false +} + +// Int32 returns the int32 value for a Scalar type. +func (t Token) Int32() (int32, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { + return int32(n), true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { + return int32(n), true + } + } + return 0, false +} + +// Float64 returns the float64 value for a Scalar type. +func (t Token) Float64() (float64, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return f, true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + return n, true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return n, true + } + } + return 0, false +} + +// Float32 returns the float32 value for a Scalar type. +func (t Token) Float32() (float32, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return float32(f), true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + // Overflows are treated as (-)infinity. + return float32(n), true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return float32(n), true + } + } + return 0, false +} + +// These are the supported float literals which C++ permits case-insensitive +// variants of these. +var floatLits = map[string]float64{ + "nan": math.NaN(), + "inf": math.Inf(1), + "infinity": math.Inf(1), + "-inf": math.Inf(-1), + "-infinity": math.Inf(-1), +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.attrs == y.attrs && + x.numAttrs == y.numAttrs && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go new file mode 100644 index 000000000..0ce8d6fb8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package text implements the text format for protocol buffers. +// This package has no semantic understanding for protocol buffers and is only +// a parser and composer for the format. +// +// There is no formal specification for the protobuf text format, as such the +// C++ implementation (see google::protobuf::TextFormat) is the reference +// implementation of the text format. +// +// This package is neither a superset nor a subset of the C++ implementation. +// This implementation permits a more liberal grammar in some cases to be +// backwards compatible with the historical Go implementation. +// Future parsings unique to Go should not be added. +// Some grammars allowed by the C++ implementation are deliberately +// not implemented here because they are considered a bug by the protobuf team +// and should not be replicated. +// +// The Go implementation should implement a sufficient amount of the C++ +// grammar such that the default text serialization by C++ can be parsed by Go. +// However, just because the C++ parser accepts some input does not mean that +// the Go implementation should as well. +// +// The text format is almost a superset of JSON except: +// * message keys are not quoted strings, but identifiers +// * the top-level value must be a message without the delimiters +package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go new file mode 100644 index 000000000..c4ba1c598 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -0,0 +1,267 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// encType represents an encoding type. +type encType uint8 + +const ( + _ encType = (1 << iota) / 2 + name + scalar + messageOpen + messageClose +) + +// Encoder provides methods to write out textproto constructs and values. The user is +// responsible for producing valid sequences of constructs and values. +type Encoder struct { + encoderState + + indent string + newline string // set to "\n" if len(indent) > 0 + delims [2]byte + outputASCII bool +} + +type encoderState struct { + lastType encType + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry in a List or Message +// to be preceded by the indent and trailed by a newline. +// +// If delims is not the zero value, it controls the delimiter characters used +// for messages (e.g., "{}" vs "<>"). +// +// If outputASCII is true, strings will be serialized in such a way that +// multi-byte UTF-8 sequences are escaped. This property ensures that the +// overall output is ASCII (as opposed to UTF-8). +func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space and tab characters") + } + e.indent = indent + e.newline = "\n" + } + switch delims { + case [2]byte{0, 0}: + e.delims = [2]byte{'{', '}'} + case [2]byte{'{', '}'}, [2]byte{'<', '>'}: + e.delims = delims + default: + return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") + } + e.outputASCII = outputASCII + + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// StartMessage writes out the '{' or '<' symbol. +func (e *Encoder) StartMessage() { + e.prepareNext(messageOpen) + e.out = append(e.out, e.delims[0]) +} + +// EndMessage writes out the '}' or '>' symbol. +func (e *Encoder) EndMessage() { + e.prepareNext(messageClose) + e.out = append(e.out, e.delims[1]) +} + +// WriteName writes out the field name and the separator ':'. +func (e *Encoder) WriteName(s string) { + e.prepareNext(name) + e.out = append(e.out, s...) + e.out = append(e.out, ':') +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + if b { + e.WriteLiteral("true") + } else { + e.WriteLiteral("false") + } +} + +// WriteString writes out the given string value. +func (e *Encoder) WriteString(s string) { + e.prepareNext(scalar) + e.out = appendString(e.out, s, e.outputASCII) +} + +func appendString(out []byte, in string, outputASCII bool) []byte { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + // We do not report invalid UTF-8 because strings in the text format + // are used to represent both the proto string and bytes type. + r = rune(in[0]) + fallthrough + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'x') + out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + case outputASCII && r >= utf8.RuneSelf: + out = append(out, '\\') + if r <= math.MaxUint16 { + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } else { + out = append(out, 'U') + out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i := 0; i < len(s); i++ { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float value for given bitSize. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, "nan"...) + case math.IsInf(n, +1): + return append(out, "inf"...) + case math.IsInf(n, -1): + return append(out, "-inf"...) + default: + return strconv.AppendFloat(out, n, 'g', -1, bitSize) + } +} + +// WriteInt writes out the given signed integer value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// WriteLiteral writes out the given string as a literal value without quotes. +// This is used for writing enum literal strings. +func (e *Encoder) WriteLiteral(s string) { + e.prepareNext(scalar) + e.out = append(e.out, s...) +} + +// prepareNext adds possible space and indentation for the next value based +// on last encType and indent option. It also updates e.lastType to next. +func (e *Encoder) prepareNext(next encType) { + defer func() { + e.lastType = next + }() + + // Single line. + if len(e.indent) == 0 { + // Add space after each field before the next one. + if e.lastType&(scalar|messageClose) != 0 && next == name { + e.out = append(e.out, ' ') + // Add a random extra space to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + // Multi-line. + switch { + case e.lastType == name: + e.out = append(e.out, ' ') + // Add a random extra space after name: to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + + case e.lastType == messageOpen && next != messageClose: + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + + case e.lastType&(scalar|messageClose) != 0: + if next == messageClose { + e.indents = e.indents[:len(e.indents)-len(e.indent)] + } + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } +} + +// Snapshot returns the current snapshot for use in Reset. +func (e *Encoder) Snapshot() encoderState { + return e.encoderState +} + +// Reset resets the Encoder to the given encoderState from a Snapshot. +func (e *Encoder) Reset(es encoderState) { + e.encoderState = es +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go new file mode 100644 index 000000000..20c17b35e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/internal/detrand" +) + +// Error is a sentinel matching all errors produced by this package. +var Error = errors.New("protobuf error") + +// New formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func New(f string, x ...interface{}) error { + return &prefixError{s: format(f, x...)} +} + +type prefixError struct{ s string } + +var prefix = func() string { + // Deliberately introduce instability into the error message string to + // discourage users from performing error string comparisons. + if detrand.Bool() { + return "proto: " // use non-breaking spaces (U+00a0) + } else { + return "proto: " // use regular spaces (U+0020) + } +}() + +func (e *prefixError) Error() string { + return prefix + e.s +} + +func (e *prefixError) Unwrap() error { + return Error +} + +// Wrap returns an error that has a "proto" prefix, the formatted string described +// by the format specifier and arguments, and a suffix of err. The error wraps err. +func Wrap(err error, f string, x ...interface{}) error { + return &wrapError{ + s: format(f, x...), + err: err, + } +} + +type wrapError struct { + s string + err error +} + +func (e *wrapError) Error() string { + return format("%v%v: %v", prefix, e.s, e.err) +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +func (e *wrapError) Is(target error) bool { + return target == Error +} + +func format(f string, x ...interface{}) string { + // avoid "proto: " prefix when chaining + for i := 0; i < len(x); i++ { + switch e := x[i].(type) { + case *prefixError: + x[i] = e.s + case *wrapError: + x[i] = format("%v: %v", e.s, e.err) + } + } + return fmt.Sprintf(f, x...) +} + +func InvalidUTF8(name string) error { + return New("field %v contains invalid UTF-8", name) +} + +func RequiredNotSet(name string) error { + return New("required field %v not set", name) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go new file mode 100644 index 000000000..f90e909b3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -0,0 +1,39 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package errors + +import "reflect" + +// Is is a copy of Go 1.13's errors.Is for use with older Go versions. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + if err = unwrap(err); err == nil { + return false + } + } +} + +func unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go new file mode 100644 index 000000000..dc05f4191 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package errors + +import "errors" + +// Is is errors.Is. +func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go new file mode 100644 index 000000000..517c4e2a0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fieldsort defines an ordering of fields. +// +// The ordering defined by this package matches the historic behavior of the proto +// package, placing extensions first and oneofs last. +// +// There is no guarantee about stability of the wire encoding, and users should not +// depend on the order defined in this package as it is subject to change without +// notice. +package fieldsort + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Less returns true if field a comes before field j in ordered wire marshal output. +func Less(a, b protoreflect.FieldDescriptor) bool { + ea := a.IsExtension() + eb := b.IsExtension() + oa := a.ContainingOneof() + ob := b.ContainingOneof() + switch { + case ea != eb: + return ea + case oa != nil && ob != nil: + if oa == ob { + return a.Number() < b.Number() + } + return oa.Index() < ob.Index() + case oa != nil && !oa.IsSynthetic(): + return false + case ob != nil && !ob.IsSynthetic(): + return true + default: + return a.Number() < b.Number() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go new file mode 100644 index 000000000..d02d770c9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filedesc provides functionality for constructing descriptors. +package filedesc + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder construct a protoreflect.FileDescriptor from the raw descriptor. +type Builder struct { + // GoPackagePath is the Go package path that is invoking this builder. + GoPackagePath string + + // RawDescriptor is the wire-encoded bytes of FileDescriptorProto + // and must be populated. + RawDescriptor []byte + + // NumEnums is the total number of enums declared in the file. + NumEnums int32 + // NumMessages is the total number of messages declared in the file. + // It includes the implicit message declarations for map entries. + NumMessages int32 + // NumExtensions is the total number of extensions declared in the file. + NumExtensions int32 + // NumServices is the total number of services declared in the file. + NumServices int32 + + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { + preg.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. + // Once constructed, the file descriptor is registered here. + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +} + +// resolverByIndex is an interface Builder.FileRegistry may implement. +// If so, it permits looking up an enum or message dependency based on the +// sub-list and element index into filetype.Builder.DependencyIndexes. +type resolverByIndex interface { + FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor +} + +// Indexes of each sub-list in filetype.Builder.DependencyIndexes. +const ( + listFieldDeps int32 = iota + listExtTargets + listExtDeps + listMethInDeps + listMethOutDeps +) + +// Out is the output of the Builder. +type Out struct { + File pref.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum + // Messages is all message descriptors in "flattened ordering". + // It includes the implicit message declarations for map entries. + Messages []Message + // Extensions is all extension descriptors in "flattened ordering". + Extensions []Extension + // Service is all service descriptors in "flattened ordering". + Services []Service +} + +// Build constructs a FileDescriptor given the parameters set in Builder. +// It assumes that the inputs are well-formed and panics if any inconsistencies +// are encountered. +// +// If NumEnums+NumMessages+NumExtensions+NumServices is zero, +// then Build automatically derives them from the raw descriptor. +func (db Builder) Build() (out Out) { + // Populate the counts if uninitialized. + if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { + db.unmarshalCounts(db.RawDescriptor, true) + } + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { + db.TypeResolver = preg.GlobalTypes + } + if db.FileRegistry == nil { + db.FileRegistry = preg.GlobalFiles + } + + fd := newRawFile(db) + out.File = fd + out.Enums = fd.allEnums + out.Messages = fd.allMessages + out.Extensions = fd.allExtensions + out.Services = fd.allServices + + if err := db.FileRegistry.RegisterFile(fd); err != nil { + panic(err) + } + return out +} + +// unmarshalCounts counts the number of enum, message, extension, and service +// declarations in the raw message, which is either a FileDescriptorProto +// or a MessageDescriptorProto depending on whether isFile is set. +func (db *Builder) unmarshalCounts(b []byte, isFile bool) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + if isFile { + switch num { + case genid.FileDescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.FileDescriptorProto_Extension_field_number: + db.NumExtensions++ + case genid.FileDescriptorProto_Service_field_number: + db.NumServices++ + } + } else { + switch num { + case genid.DescriptorProto_EnumType_field_number: + db.NumEnums++ + case genid.DescriptorProto_NestedType_field_number: + db.unmarshalCounts(v, false) + db.NumMessages++ + case genid.DescriptorProto_Extension_field_number: + db.NumExtensions++ + } + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go new file mode 100644 index 000000000..9385126fb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -0,0 +1,614 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// The types in this file may have a suffix: +// • L0: Contains fields common to all descriptors (except File) and +// must be initialized up front. +// • L1: Contains fields specific to a descriptor and +// must be initialized up front. +// • L2: Contains fields that are lazily initialized when constructing +// from the raw file descriptor. When constructing as a literal, the L2 +// fields must be initialized up front. +// +// The types are exported so that packages like reflect/protodesc can +// directly construct descriptors. + +type ( + File struct { + fileRaw + L1 FileL1 + + once uint32 // atomically set if L2 is valid + mu sync.Mutex // protects L2 + L2 *FileL2 + } + FileL1 struct { + Syntax pref.Syntax + Path string + Package pref.FullName + + Enums Enums + Messages Messages + Extensions Extensions + Services Services + } + FileL2 struct { + Options func() pref.ProtoMessage + Imports FileImports + Locations SourceLocations + } +) + +func (fd *File) ParentFile() pref.FileDescriptor { return fd } +func (fd *File) Parent() pref.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } +func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() pref.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() pref.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File +} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() pref.FullName { return fd.L1.Package } +func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(pref.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + +func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { + fd.lazyInitOnce() + } + return fd.L2 +} + +func (fd *File) lazyInitOnce() { + fd.mu.Lock() + if fd.L2 == nil { + fd.lazyRawInit() // recursively initializes all L2 structures + } + atomic.StoreUint32(&fd.once, 1) + fd.mu.Unlock() +} + +// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code +// to be able to retrieve the raw descriptor. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) ProtoLegacyRawDesc() []byte { + return fd.builder.RawDescriptor +} + +// GoPackagePath is a pseudo-internal API for determining the Go package path +// that this file descriptor is declared in. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) GoPackagePath() string { + return fd.builder.GoPackagePath +} + +type ( + Enum struct { + Base + L1 EnumL1 + L2 *EnumL2 // protected by fileDesc.once + } + EnumL1 struct { + eagerValues bool // controls whether EnumL2.Values is already populated + } + EnumL2 struct { + Options func() pref.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges + } + + EnumValue struct { + Base + L1 EnumValueL1 + } + EnumValueL1 struct { + Options func() pref.ProtoMessage + Number pref.EnumNumber + } +) + +func (ed *Enum) Options() pref.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum +} +func (ed *Enum) Values() pref.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values +} +func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 +} + +func (ed *EnumValue) Options() pref.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue +} +func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} + +type ( + Message struct { + Base + L1 MessageL1 + L2 *MessageL2 // protected by fileDesc.once + } + MessageL1 struct { + Enums Enums + Messages Messages + Extensions Extensions + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions + } + MessageL2 struct { + Options func() pref.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges + ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { + Base + L1 FieldL1 + } + FieldL1 struct { + Options func() pref.ProtoMessage + Number pref.FieldNumber + Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers + Kind pref.Kind + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } + + Oneof struct { + Base + L1 OneofL1 + } + OneofL1 struct { + Options func() pref.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + } +) + +func (md *Message) Options() pref.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message +} +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange +} +func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(pref.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +} + +// IsMessageSet is a pseudo-internal API for checking whether a message +// should serialize in the proto1 message format. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet +} + +func (fd *Field) Options() pref.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field +} +func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } +func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasPresence() bool { + return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} +func (fd *Field) IsPacked() bool { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + switch fd.L1.Kind { + case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + default: + return true + } + } + return fd.L1.IsPacked +} +func (fd *Field) IsExtension() bool { return false } +func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +func (fd *Field) MapKey() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) +} +func (fd *Field) MapValue() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) +} +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() pref.MessageDescriptor { + return fd.L0.Parent.(pref.MessageDescriptor) +} +func (fd *Field) Enum() pref.EnumDescriptor { + return fd.L1.Enum +} +func (fd *Field) Message() pref.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + } + return fd.L1.Message +} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(pref.FieldDescriptor) {} + +// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 +// validation for the string field. This exists for Google-internal use only +// since proto3 did not enforce UTF-8 validity prior to the open-source release. +// If this method does not exist, the default is to enforce valid UTF-8. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *Field) EnforceUTF8() bool { + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } + return fd.L0.ParentFile.L1.Syntax == pref.Proto3 +} + +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} +func (od *Oneof) Options() pref.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof +} +func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(pref.OneofDescriptor) {} + +type ( + Extension struct { + Base + L1 ExtensionL1 + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { + Number pref.FieldNumber + Extendee pref.MessageDescriptor + Cardinality pref.Cardinality + Kind pref.Kind + } + ExtensionL2 struct { + Options func() pref.ProtoMessage + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } +) + +func (xd *Extension) Options() pref.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field +} +func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } +func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } +func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } +func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(pref.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} +func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +} + +type ( + Service struct { + Base + L1 ServiceL1 + L2 *ServiceL2 // protected by fileDesc.once + } + ServiceL1 struct{} + ServiceL2 struct { + Options func() pref.ProtoMessage + Methods Methods + } + + Method struct { + Base + L1 MethodL1 + } + MethodL1 struct { + Options func() pref.ProtoMessage + Input pref.MessageDescriptor + Output pref.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } +) + +func (sd *Service) Options() pref.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service +} +func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(pref.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 +} + +func (md *Method) Options() pref.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method +} +func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(pref.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + +// Surrogate files are can be used to create standalone descriptors +// where the syntax is only information derived from the parent file. +var ( + SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} +) + +type ( + Base struct { + L0 BaseL0 + } + BaseL0 struct { + FullName pref.FullName // must be populated + ParentFile *File // must be populated + Parent pref.Descriptor + Index int + } +) + +func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() pref.FullName { return d.L0.FullName } +func (d *Base) ParentFile() pref.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile +} +func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Index() int { return d.L0.Index } +func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) IsPlaceholder() bool { return false } +func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +type jsonName struct { + has bool + once sync.Once + name string +} + +// Init initializes the name. It is exported for use by other internal packages. +func (js *jsonName) Init(s string) { + js.has = true + js.name = s +} + +func (js *jsonName) get(fd pref.FieldDescriptor) string { + if !js.has { + js.once.Do(func() { + js.name = strs.JSONCamelCase(string(fd.Name())) + }) + } + return js.name +} + +func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect + // accidental mutations of the original value. + dv.bytes = append([]byte(nil), b...) + } + return dv +} + +func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { + var evs pref.EnumValueDescriptors + if k == pref.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { + evs = &e.L2.Values + } else { + evs = ed.Values() + } + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. + if ed.IsPlaceholder() && pref.Name(b).IsValid() { + v := pref.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + return DefaultValue(v, ev) + } + } + + v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) + if err != nil { + panic(err) + } + return DefaultValue(v, ev) +} + +type defaultValue struct { + has bool + val pref.Value + enum pref.EnumValueDescriptor + bytes []byte +} + +func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { + if fd.Cardinality() == pref.Repeated { + return pref.Value{} + } + switch fd.Kind() { + case pref.BoolKind: + return pref.ValueOfBool(false) + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + return pref.ValueOfInt32(0) + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return pref.ValueOfInt64(0) + case pref.Uint32Kind, pref.Fixed32Kind: + return pref.ValueOfUint32(0) + case pref.Uint64Kind, pref.Fixed64Kind: + return pref.ValueOfUint64(0) + case pref.FloatKind: + return pref.ValueOfFloat32(0) + case pref.DoubleKind: + return pref.ValueOfFloat64(0) + case pref.StringKind: + return pref.ValueOfString("") + case pref.BytesKind: + return pref.ValueOfBytes(nil) + case pref.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { + return pref.ValueOfEnum(evs.Get(0).Number()) + } + return pref.ValueOfEnum(0) + } + } + + if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { + // TODO: Avoid panic if we're running with the race detector + // and instead spawn a goroutine that periodically resets + // this value back to the original to induce a race. + panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) + } + return dv.val +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go new file mode 100644 index 000000000..66e1fee52 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -0,0 +1,471 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// fileRaw is a data struct used when initializing a file descriptor from +// a raw FileDescriptorProto. +type fileRaw struct { + builder Builder + allEnums []Enum + allMessages []Message + allExtensions []Extension + allServices []Service +} + +func newRawFile(db Builder) *File { + fd := &File{fileRaw: fileRaw{builder: db}} + fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) + fd.unmarshalSeed(db.RawDescriptor) + + // Extended message targets are eagerly resolved since registration + // needs this information at program init time. + for i := range fd.allExtensions { + xd := &fd.allExtensions[i] + xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) + } + + fd.checkDecls() + return fd +} + +// initDecls pre-allocates slices for the exact number of enums, messages +// (including map entries), extensions, and services declared in the proto file. +// This is done to avoid regrowing the slice, which would change the address +// for any previously seen declaration. +// +// The alloc methods "allocates" slices by pulling from the capacity. +func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { + fd.allEnums = make([]Enum, 0, numEnums) + fd.allMessages = make([]Message, 0, numMessages) + fd.allExtensions = make([]Extension, 0, numExtensions) + fd.allServices = make([]Service, 0, numServices) +} + +func (fd *File) allocEnums(n int) []Enum { + total := len(fd.allEnums) + es := fd.allEnums[total : total+n] + fd.allEnums = fd.allEnums[:total+n] + return es +} +func (fd *File) allocMessages(n int) []Message { + total := len(fd.allMessages) + ms := fd.allMessages[total : total+n] + fd.allMessages = fd.allMessages[:total+n] + return ms +} +func (fd *File) allocExtensions(n int) []Extension { + total := len(fd.allExtensions) + xs := fd.allExtensions[total : total+n] + fd.allExtensions = fd.allExtensions[:total+n] + return xs +} +func (fd *File) allocServices(n int) []Service { + total := len(fd.allServices) + xs := fd.allServices[total : total+n] + fd.allServices = fd.allServices[:total+n] + return xs +} + +// checkDecls performs a sanity check that the expected number of expected +// declarations matches the number that were found in the descriptor proto. +func (fd *File) checkDecls() { + switch { + case len(fd.allEnums) != cap(fd.allEnums): + case len(fd.allMessages) != cap(fd.allMessages): + case len(fd.allExtensions) != cap(fd.allExtensions): + case len(fd.allServices) != cap(fd.allServices): + default: + return + } + panic("mismatching cardinality") +} + +func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Syntax_field_number: + switch string(v) { + case "proto2": + fd.L1.Syntax = pref.Proto2 + case "proto3": + fd.L1.Syntax = pref.Proto3 + default: + panic("invalid syntax") + } + case genid.FileDescriptorProto_Name_field_number: + fd.L1.Path = sb.MakeString(v) + case genid.FileDescriptorProto_Package_field_number: + fd.L1.Package = pref.FullName(sb.MakeString(v)) + case genid.FileDescriptorProto_EnumType_field_number: + if prevField != genid.FileDescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.FileDescriptorProto_MessageType_field_number: + if prevField != genid.FileDescriptorProto_MessageType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.FileDescriptorProto_Extension_field_number: + if prevField != genid.FileDescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.FileDescriptorProto_Service_field_number: + if prevField != genid.FileDescriptorProto_Service_field_number { + if numServices > 0 { + panic("non-contiguous repeated field") + } + posServices = len(b0) - len(b) - n - m + } + numServices++ + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { + fd.L1.Syntax = pref.Proto2 + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + fd.L1.Enums.List = fd.allocEnums(numEnums) + } + if numMessages > 0 { + fd.L1.Messages.List = fd.allocMessages(numMessages) + } + if numExtensions > 0 { + fd.L1.Extensions.List = fd.allocExtensions(numExtensions) + } + if numServices > 0 { + fd.L1.Services.List = fd.allocServices(numServices) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range fd.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range fd.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range fd.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numServices > 0 { + b := b0[posServices:] + for i := range fd.L1.Services.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } +} + +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i + + var numValues int + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Name_field_number: + ed.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.EnumDescriptorProto_Value_field_number: + numValues++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + // Only construct enum value descriptors for top-level enums since + // they are needed for registration. + if pd != pf { + return + } + ed.L1.eagerValues = true + ed.L2 = new(EnumL2) + ed.L2.Values.List = make([]EnumValue, numValues) + for i := 0; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) + i++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.DescriptorProto_EnumType_field_number: + if prevField != genid.DescriptorProto_EnumType_field_number { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case genid.DescriptorProto_NestedType_field_number: + if prevField != genid.DescriptorProto_NestedType_field_number { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case genid.DescriptorProto_Extension_field_number: + if prevField != genid.DescriptorProto_Extension_field_number { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalSeedOptions(v) + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + md.L1.Enums.List = pf.allocEnums(numEnums) + } + if numMessages > 0 { + md.L1.Messages.List = pf.allocMessages(numMessages) + } + if numExtensions > 0 { + md.L1.Extensions.List = pf.allocExtensions(numExtensions) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range md.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range md.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range md.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } +} + +func (md *Message) unmarshalSeedOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + xd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + xd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + xd.L1.Kind = pref.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + xd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_Extendee_field_number: + xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Name_field_number: + sd.L0.FullName = appendFullName(sb, pd.FullName(), v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +var nameBuilderPool = sync.Pool{ + New: func() interface{} { return new(strs.Builder) }, +} + +func getBuilder() *strs.Builder { + return nameBuilderPool.Get().(*strs.Builder) +} +func putBuilder(b *strs.Builder) { + nameBuilderPool.Put(b) +} + +// makeFullName converts b to a protoreflect.FullName, +// where b must start with a leading dot. +func makeFullName(sb *strs.Builder, b []byte) pref.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } + return pref.FullName(sb.MakeString(b[1:])) +} + +func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { + return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go new file mode 100644 index 000000000..e672233e7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -0,0 +1,704 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func (fd *File) lazyRawInit() { + fd.unmarshalFull(fd.builder.RawDescriptor) + fd.resolveMessages() + fd.resolveExtensions() + fd.resolveServices() +} + +func (file *File) resolveMessages() { + var depIdx int32 + for i := range file.allMessages { + md := &file.allMessages[i] + + // Resolve message field dependencies. + for j := range md.L2.Fields.List { + fd := &md.L2.Fields.List[j] + + // Weak fields are resolved upon actual use. + if fd.L1.IsWeak { + continue + } + + // Resolve message field dependency. + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := fd.L1.Default.val; v.IsValid() { + fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) + } + } + } +} + +func (file *File) resolveExtensions() { + var depIdx int32 + for i := range file.allExtensions { + xd := &file.allExtensions[i] + + // Resolve extension field dependency. + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := xd.L2.Default.val; v.IsValid() { + xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) + } + } +} + +func (file *File) resolveServices() { + var depIdx int32 + for i := range file.allServices { + sd := &file.allServices[i] + + // Resolve method dependencies. + for j := range sd.L2.Methods.List { + md := &sd.L2.Methods.List[j] + md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) + md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) + depIdx++ + } + } +} + +func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { + return ed2 + } + } + for i := range file.allEnums { + if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { + return ed2 + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { + return d.(pref.EnumDescriptor) + } + return ed +} + +func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { + return md2 + } + } + for i := range file.allMessages { + if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { + return md2 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + return md +} + +func (fd *File) unmarshalFull(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var enumIdx, messageIdx, extensionIdx, serviceIdx int + var rawOptions []byte + fd.L2 = new(FileL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_PublicDependency_field_number: + fd.L2.Imports[v].IsPublic = true + case genid.FileDescriptorProto_WeakDependency_field_number: + fd.L2.Imports[v].IsWeak = true + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FileDescriptorProto_Dependency_field_number: + path := sb.MakeString(v) + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + case genid.FileDescriptorProto_EnumType_field_number: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.FileDescriptorProto_MessageType_field_number: + fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.FileDescriptorProto_Extension_field_number: + fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.FileDescriptorProto_Service_field_number: + fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) + serviceIdx++ + case genid.FileDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) +} + +func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + var rawValues [][]byte + var rawOptions []byte + if !ed.L1.eagerValues { + ed.L2 = new(EnumL2) + } + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_Value_field_number: + rawValues = append(rawValues, v) + case genid.EnumDescriptorProto_ReservedName_field_number: + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.EnumDescriptorProto_ReservedRange_field_number: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case genid.EnumDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if !ed.L1.eagerValues && len(rawValues) > 0 { + ed.L2.Values.List = make([]EnumValue, len(rawValues)) + for i, b := range rawValues { + ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) + } + } + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) +} + +func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: + r[0] = pref.EnumNumber(v) + case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: + r[1] = pref.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Number_field_number: + vd.L1.Number = pref.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.EnumValueDescriptorProto_Name_field_number: + // NOTE: Enum values are in the same scope as the enum parent. + vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) + case genid.EnumValueDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) +} + +func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + var rawFields, rawOneofs [][]byte + var enumIdx, messageIdx, extensionIdx int + var rawOptions []byte + md.L2 = new(MessageL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_Field_field_number: + rawFields = append(rawFields, v) + case genid.DescriptorProto_OneofDecl_field_number: + rawOneofs = append(rawOneofs, v) + case genid.DescriptorProto_ReservedName_field_number: + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case genid.DescriptorProto_ReservedRange_field_number: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case genid.DescriptorProto_ExtensionRange_field_number: + r, rawOptions := unmarshalMessageExtensionRange(v) + opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) + case genid.DescriptorProto_EnumType_field_number: + md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case genid.DescriptorProto_NestedType_field_number: + md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case genid.DescriptorProto_Extension_field_number: + md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case genid.DescriptorProto_Options_field_number: + md.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawFields) > 0 || len(rawOneofs) > 0 { + md.L2.Fields.List = make([]Field, len(rawFields)) + md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + if fd.L1.Cardinality == pref.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } + for i, b := range rawOneofs { + od := &md.L2.Oneofs.List[i] + od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + } + } + md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) +} + +func (md *Message) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MessageOptions_MapEntry_field_number: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case genid.MessageOptions_MessageSetWireFormat_field_number: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ReservedRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ReservedRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Start_field_number: + r[0] = pref.FieldNumber(v) + case genid.DescriptorProto_ExtensionRange_End_field_number: + r[1] = pref.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.DescriptorProto_ExtensionRange_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r, rawOptions +} + +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i + + var rawTypeName []byte + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Number_field_number: + fd.L1.Number = pref.FieldNumber(v) + case genid.FieldDescriptorProto_Label_field_number: + fd.L1.Cardinality = pref.Cardinality(v) + case genid.FieldDescriptorProto_Type_field_number: + fd.L1.Kind = pref.Kind(v) + case genid.FieldDescriptorProto_OneofIndex_field_number: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either + // of them. This ensures pointers to slice elements are stable. + od := &pd.(*Message).L2.Oneofs.List[v] + od.L1.Fields.List = append(od.L1.Fields.List, fd) + if fd.L1.ContainingOneof != nil { + panic("oneof type already set") + } + fd.L1.ContainingOneof = od + case genid.FieldDescriptorProto_Proto3Optional_field_number: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Name_field_number: + fd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.FieldDescriptorProto_JsonName_field_number: + fd.L1.JSONName.Init(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + fd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } + fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (fd *Field) unmarshalOptions(b []byte) { + const FieldOptions_EnforceUTF8 = 13 + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + fd.L1.HasPacked = true + fd.L1.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Weak_field_number: + fd.L1.IsWeak = protowire.DecodeBool(v) + case FieldOptions_EnforceUTF8: + fd.L1.HasEnforceUTF8 = true + fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.OneofDescriptorProto_Name_field_number: + od.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.OneofDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) +} + +func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + var rawTypeName []byte + var rawOptions []byte + xd.L2 = new(ExtensionL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_Proto3Optional_field_number: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.FieldDescriptorProto_JsonName_field_number: + xd.L2.JSONName.Init(sb.MakeString(v)) + case genid.FieldDescriptorProto_DefaultValue_field_number: + xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case genid.FieldDescriptorProto_TypeName_field_number: + rawTypeName = v + case genid.FieldDescriptorProto_Options_field_number: + xd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } + xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.FieldOptions_Packed_field_number: + xd.L2.IsPacked = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + var rawMethods [][]byte + var rawOptions []byte + sd.L2 = new(ServiceL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.ServiceDescriptorProto_Method_field_number: + rawMethods = append(rawMethods, v) + case genid.ServiceDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawMethods) > 0 { + sd.L2.Methods.List = make([]Method, len(rawMethods)) + for i, b := range rawMethods { + sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) + } + } + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) +} + +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_ClientStreaming_field_number: + md.L1.IsStreamingClient = protowire.DecodeBool(v) + case genid.MethodDescriptorProto_ServerStreaming_field_number: + md.L1.IsStreamingServer = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case genid.MethodDescriptorProto_Name_field_number: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case genid.MethodDescriptorProto_InputType_field_number: + md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_OutputType_field_number: + md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) + case genid.MethodDescriptorProto_Options_field_number: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) +} + +// appendOptions appends src to dst, where the returned slice is never nil. +// This is necessary to distinguish between empty and unpopulated options. +func appendOptions(dst, src []byte) []byte { + if dst == nil { + dst = []byte{} + } + return append(dst, src...) +} + +// optionsUnmarshaler constructs a lazy unmarshal function for an options message. +// +// The type of message to unmarshal to is passed as a pointer since the +// vars in descopts may not yet be populated at the time this function is called. +func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { + if b == nil { + return nil + } + var opts pref.ProtoMessage + var once sync.Once + return func() pref.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, + }).Unmarshal(b, opts); err != nil { + panic(err) + } + }) + return opts + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go new file mode 100644 index 000000000..c876cd34d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -0,0 +1,282 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "sort" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type FileImports []pref.FileImport + +func (p *FileImports) Len() int { return len(*p) } +func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + +type Names struct { + List []pref.Name + once sync.Once + has map[pref.Name]int // protected by once +} + +func (p *Names) Len() int { return len(p.List) } +func (p *Names) Get(i int) pref.Name { return p.List[i] } +func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *Names) ProtoInternal(pragma.DoNotImplement) {} +func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } + } + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *Names) CheckValid() error { + for s, n := range p.lazyInit().has { + switch { + case n > 1: + return errors.New("duplicate name: %q", s) + case false && !s.IsValid(): + // NOTE: The C++ implementation does not validate the identifier. + // See https://github.com/protocolbuffers/protobuf/issues/6335. + return errors.New("invalid name: %q", s) + } + } + return nil +} + +type EnumRanges struct { + List [][2]pref.EnumNumber // start inclusive; end inclusive + once sync.Once + sorted [][2]pref.EnumNumber // protected by once +} + +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n pref.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumRanges) lazyInit() *EnumRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *EnumRanges) CheckValid() error { + var rp enumRange + for i, r := range p.lazyInit().sorted { + r := enumRange(r) + switch { + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +type enumRange [2]protoreflect.EnumNumber + +func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive +func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive +func (r enumRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldRanges struct { + List [][2]pref.FieldNumber // start inclusive; end exclusive + once sync.Once + sorted [][2]pref.FieldNumber // protected by once +} + +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n pref.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *FieldRanges) lazyInit() *FieldRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of ranges with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *FieldRanges) CheckValid(isMessageSet bool) error { + var rp fieldRange + for i, r := range p.lazyInit().sorted { + r := fieldRange(r) + switch { + case !isValidFieldNumber(r.Start(), isMessageSet): + return errors.New("invalid field number: %d", r.Start()) + case !isValidFieldNumber(r.End(), isMessageSet): + return errors.New("invalid field number: %d", r.End()) + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +// isValidFieldNumber reports whether the field number is valid. +// Unlike the FieldNumber.IsValid method, it allows ranges that cover the +// reserved number range. +func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { + return protowire.MinValidNumber <= n && (n <= protowire.MaxValidNumber || isMessageSet) +} + +// CheckOverlap reports an error if p and q overlap. +func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { + rps := p.lazyInit().sorted + rqs := q.lazyInit().sorted + for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { + rp := fieldRange(rps[pi]) + rq := fieldRange(rqs[qi]) + if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { + return errors.New("overlapping ranges: %v with %v", rp, rq) + } + if rp.Start() < rq.Start() { + pi++ + } else { + qi++ + } + } + return nil +} + +type fieldRange [2]protoreflect.FieldNumber + +func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive +func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive +func (r fieldRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldNumbers struct { + List []pref.FieldNumber + once sync.Once + has map[pref.FieldNumber]struct{} // protected by once +} + +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n pref.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } + } + }) + _, ok := p.has[n] + return ok +} +func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + +type OneofFields struct { + List []pref.FieldDescriptor + once sync.Once + byName map[pref.Name]pref.FieldDescriptor // protected by once + byJSON map[string]pref.FieldDescriptor // protected by once + byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once +} + +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } +func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + +func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f + p.byJSON[f.JSONName()] = f + p.byNum[f.Number()] = f + } + } + }) + return p +} + +type SourceLocations struct { + List []pref.SourceLocation +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } +func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go new file mode 100644 index 000000000..6a8825e80 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -0,0 +1,345 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package filedesc + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type Enums struct { + List []Enum + once sync.Once + byName map[protoreflect.Name]*Enum // protected by once +} + +func (p *Enums) Len() int { + return len(p.List) +} +func (p *Enums) Get(i int) protoreflect.EnumDescriptor { + return &p.List[i] +} +func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Enums) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} +func (p *Enums) lazyInit() *Enums { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type EnumValues struct { + List []EnumValue + once sync.Once + byName map[protoreflect.Name]*EnumValue // protected by once + byNum map[protoreflect.EnumNumber]*EnumValue // protected by once +} + +func (p *EnumValues) Len() int { + return len(p.List) +} +func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { + return &p.List[i] +} +func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *EnumValues) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumValues) lazyInit() *EnumValues { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) + p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Messages struct { + List []Message + once sync.Once + byName map[protoreflect.Name]*Message // protected by once +} + +func (p *Messages) Len() int { + return len(p.List) +} +func (p *Messages) Get(i int) protoreflect.MessageDescriptor { + return &p.List[i] +} +func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Messages) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} +func (p *Messages) lazyInit() *Messages { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Message, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Fields struct { + List []Field + once sync.Once + byName map[protoreflect.Name]*Field // protected by once + byJSON map[string]*Field // protected by once + byNum map[protoreflect.FieldNumber]*Field // protected by once +} + +func (p *Fields) Len() int { + return len(p.List) +} +func (p *Fields) Get(i int) protoreflect.FieldDescriptor { + return &p.List[i] +} +func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byJSON[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *Fields) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} +func (p *Fields) lazyInit() *Fields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Field, len(p.List)) + p.byJSON = make(map[string]*Field, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byJSON[d.JSONName()]; !ok { + p.byJSON[d.JSONName()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Oneofs struct { + List []Oneof + once sync.Once + byName map[protoreflect.Name]*Oneof // protected by once +} + +func (p *Oneofs) Len() int { + return len(p.List) +} +func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { + return &p.List[i] +} +func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Oneofs) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} +func (p *Oneofs) lazyInit() *Oneofs { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Extensions struct { + List []Extension + once sync.Once + byName map[protoreflect.Name]*Extension // protected by once +} + +func (p *Extensions) Len() int { + return len(p.List) +} +func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { + return &p.List[i] +} +func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Extensions) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} +func (p *Extensions) lazyInit() *Extensions { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Services struct { + List []Service + once sync.Once + byName map[protoreflect.Name]*Service // protected by once +} + +func (p *Services) Len() int { + return len(p.List) +} +func (p *Services) Get(i int) protoreflect.ServiceDescriptor { + return &p.List[i] +} +func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Services) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Services) ProtoInternal(pragma.DoNotImplement) {} +func (p *Services) lazyInit() *Services { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Service, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Methods struct { + List []Method + once sync.Once + byName map[protoreflect.Name]*Method // protected by once +} + +func (p *Methods) Len() int { + return len(p.List) +} +func (p *Methods) Get(i int) protoreflect.MethodDescriptor { + return &p.List[i] +} +func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Methods) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} +func (p *Methods) lazyInit() *Methods { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Method, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go new file mode 100644 index 000000000..dbf2c605b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + emptyNames = new(Names) + emptyEnumRanges = new(EnumRanges) + emptyFieldRanges = new(FieldRanges) + emptyFieldNumbers = new(FieldNumbers) + emptySourceLocations = new(SourceLocations) + + emptyFiles = new(FileImports) + emptyMessages = new(Messages) + emptyFields = new(Fields) + emptyOneofs = new(Oneofs) + emptyEnums = new(Enums) + emptyEnumValues = new(EnumValues) + emptyExtensions = new(Extensions) + emptyServices = new(Services) +) + +// PlaceholderFile is a placeholder, representing only the file path. +type PlaceholderFile string + +func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } +func (f PlaceholderFile) Parent() pref.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } +func (f PlaceholderFile) Name() pref.Name { return "" } +func (f PlaceholderFile) FullName() pref.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() pref.FullName { return "" } +func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnum is a placeholder, representing only the full name. +type PlaceholderEnum pref.FullName + +func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnumValue is a placeholder, representing only the full name. +type PlaceholderEnumValue pref.FullName + +func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderMessage is a placeholder, representing only the full name. +type PlaceholderMessage pref.FullName + +func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } +func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } +func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } +func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go new file mode 100644 index 000000000..0a0dd35de --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -0,0 +1,297 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filetype provides functionality for wrapping descriptors +// with Go type information. +package filetype + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + fdesc "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder constructs type descriptors from a raw file descriptor +// and associated Go types for each enum and message declaration. +// +// +// Flattened Ordering +// +// The protobuf type system represents declarations as a tree. Certain nodes in +// the tree require us to either associate it with a concrete Go type or to +// resolve a dependency, which is information that must be provided separately +// since it cannot be derived from the file descriptor alone. +// +// However, representing a tree as Go literals is difficult to simply do in a +// space and time efficient way. Thus, we store them as a flattened list of +// objects where the serialization order from the tree-based form is important. +// +// The "flattened ordering" is defined as a tree traversal of all enum, message, +// extension, and service declarations using the following algorithm: +// +// def VisitFileDecls(fd): +// for e in fd.Enums: yield e +// for m in fd.Messages: yield m +// for x in fd.Extensions: yield x +// for s in fd.Services: yield s +// for m in fd.Messages: yield from VisitMessageDecls(m) +// +// def VisitMessageDecls(md): +// for e in md.Enums: yield e +// for m in md.Messages: yield m +// for x in md.Extensions: yield x +// for m in md.Messages: yield from VisitMessageDecls(m) +// +// The traversal starts at the root file descriptor and yields each direct +// declaration within each node before traversing into sub-declarations +// that children themselves may have. +type Builder struct { + // File is the underlying file descriptor builder. + File fdesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. + // + // Declarations are Go types generated for enums and messages directly + // declared (not publicly imported) in the proto source file. + // Messages for map entries are accounted for, but represented by nil. + // Enum declarations in "flattened ordering" come first, followed by + // message declarations in "flattened ordering". + // + // Dependencies are Go types for enums or messages referenced by + // message fields (excluding weak fields), for parent extended messages of + // extension fields, for enums or messages referenced by extension fields, + // and for input and output messages referenced by service methods. + // Dependencies must come after declarations, but the ordering of + // dependencies themselves is unspecified. + GoTypes []interface{} + + // DependencyIndexes is an ordered list of indexes into GoTypes for the + // dependencies of messages, extensions, or services. + // + // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: + // 0. Message field dependencies: list of the enum or message type + // referred to by every message field. + // 1. Extension field targets: list of the extended parent message of + // every extension. + // 2. Extension field dependencies: list of the enum or message type + // referred to by every extension field. + // 3. Service method inputs: list of the input message type + // referred to by every service method. + // 4. Service method outputs: list of the output message type + // referred to by every service method. + // + // The offset into DependencyIndexes for the start of each sub-list + // is appended to the end in reverse order. + DependencyIndexes []int32 + + // EnumInfos is a list of enum infos in "flattened ordering". + EnumInfos []pimpl.EnumInfo + + // MessageInfos is a list of message infos in "flattened ordering". + // If provided, the GoType and PBType for each element is populated. + // + // Requirement: len(MessageInfos) == len(Build.Messages) + MessageInfos []pimpl.MessageInfo + + // ExtensionInfos is a list of extension infos in "flattened ordering". + // Each element is initialized and registered with the protoregistry package. + // + // Requirement: len(LegacyExtensions) == len(Build.Extensions) + ExtensionInfos []pimpl.ExtensionInfo + + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { + RegisterMessage(pref.MessageType) error + RegisterEnum(pref.EnumType) error + RegisterExtension(pref.ExtensionType) error + } +} + +// Out is the output of the builder. +type Out struct { + File pref.FileDescriptor +} + +func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { + tb.File.FileRegistry = preg.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, + depIdxs: tb.DependencyIndexes, + fileRegistry: tb.File.FileRegistry, + } + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { + tb.TypeRegistry = preg.GlobalTypes + } + + fbOut := tb.File.Build() + out.File = fbOut.File + + // Process enums. + enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] + if len(tb.EnumInfos) != len(fbOut.Enums) { + panic("mismatching enum lengths") + } + if len(fbOut.Enums) > 0 { + for i := range fbOut.Enums { + tb.EnumInfos[i] = pimpl.EnumInfo{ + GoReflectType: reflect.TypeOf(enumGoTypes[i]), + Desc: &fbOut.Enums[i], + } + // Register enum types. + if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { + panic(err) + } + } + } + + // Process messages. + messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] + if len(tb.MessageInfos) != len(fbOut.Messages) { + panic("mismatching message lengths") + } + if len(fbOut.Messages) > 0 { + for i := range fbOut.Messages { + if messageGoTypes[i] == nil { + continue // skip map entry + } + + tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) + tb.MessageInfos[i].Desc = &fbOut.Messages[i] + + // Register message types. + if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { + panic(err) + } + } + + // As a special-case for descriptor.proto, + // locally register concrete message type for the options. + if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": + descopts.File = messageGoTypes[i].(pref.ProtoMessage) + case "EnumOptions": + descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + case "EnumValueOptions": + descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + case "MessageOptions": + descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + case "FieldOptions": + descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + case "OneofOptions": + descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + case "ExtensionRangeOptions": + descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + case "ServiceOptions": + descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + case "MethodOptions": + descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + } + } + } + } + + // Process extensions. + if len(tb.ExtensionInfos) != len(fbOut.Extensions) { + panic("mismatching extension lengths") + } + var depIdx int32 + for i := range fbOut.Extensions { + // For enum and message kinds, determine the referent Go type so + // that we can construct their constructors. + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { + case pref.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + case pref.MessageKind, pref.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + default: + goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] + } + if fbOut.Extensions[i].IsList() { + goType = reflect.SliceOf(goType) + } + + pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) + + // Register extension types. + if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { + panic(err) + } + } + + return out +} + +var goTypeForPBKind = map[pref.Kind]reflect.Type{ + pref.BoolKind: reflect.TypeOf(bool(false)), + pref.Int32Kind: reflect.TypeOf(int32(0)), + pref.Sint32Kind: reflect.TypeOf(int32(0)), + pref.Sfixed32Kind: reflect.TypeOf(int32(0)), + pref.Int64Kind: reflect.TypeOf(int64(0)), + pref.Sint64Kind: reflect.TypeOf(int64(0)), + pref.Sfixed64Kind: reflect.TypeOf(int64(0)), + pref.Uint32Kind: reflect.TypeOf(uint32(0)), + pref.Fixed32Kind: reflect.TypeOf(uint32(0)), + pref.Uint64Kind: reflect.TypeOf(uint64(0)), + pref.Fixed64Kind: reflect.TypeOf(uint64(0)), + pref.FloatKind: reflect.TypeOf(float32(0)), + pref.DoubleKind: reflect.TypeOf(float64(0)), + pref.StringKind: reflect.TypeOf(string("")), + pref.BytesKind: reflect.TypeOf([]byte(nil)), +} + +type depIdxs []int32 + +// Get retrieves the jth element of the ith sub-list. +func (x depIdxs) Get(i, j int32) int32 { + return x[x[int32(len(x))-i-1]+j] +} + +type ( + resolverByIndex struct { + goTypes []interface{} + depIdxs depIdxs + fileRegistry + } + fileRegistry interface { + FindFileByPath(string) (pref.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +) + +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { + return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) + } +} + +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { + return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go new file mode 100644 index 000000000..58372dd34 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags provides a set of flags controlled by build tags. +package flags + +// ProtoLegacy specifies whether to enable support for legacy functionality +// such as MessageSets, weak fields, and various other obscure behavior +// that is necessary to maintain backwards compatibility with proto1 or +// the pre-release variants of proto2 and proto3. +// +// This is disabled by default unless built with the "protolegacy" tag. +// +// WARNING: The compatibility agreement covers nothing provided by this flag. +// As such, functionality may suddenly be removed or changed at our discretion. +const ProtoLegacy = protoLegacy + +// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. +// +// Lazy extension unmarshaling validates the contents of message-valued +// extension fields at unmarshal time, but defers creating the message +// structure until the extension is first accessed. +const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go new file mode 100644 index 000000000..a72995f02 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !protolegacy + +package flags + +const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go new file mode 100644 index 000000000..772e2f0e4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build protolegacy + +package flags + +const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genid/any_gen.go b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go new file mode 100644 index 000000000..e6f7d47ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/any_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_any_proto = "google/protobuf/any.proto" + +// Names for google.protobuf.Any. +const ( + Any_message_name protoreflect.Name = "Any" + Any_message_fullname protoreflect.FullName = "google.protobuf.Any" +) + +// Field names for google.protobuf.Any. +const ( + Any_TypeUrl_field_name protoreflect.Name = "type_url" + Any_Value_field_name protoreflect.Name = "value" + + Any_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Any.type_url" + Any_Value_field_fullname protoreflect.FullName = "google.protobuf.Any.value" +) + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl_field_number protoreflect.FieldNumber = 1 + Any_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go new file mode 100644 index 000000000..df8f91850 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go @@ -0,0 +1,106 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_api_proto = "google/protobuf/api.proto" + +// Names for google.protobuf.Api. +const ( + Api_message_name protoreflect.Name = "Api" + Api_message_fullname protoreflect.FullName = "google.protobuf.Api" +) + +// Field names for google.protobuf.Api. +const ( + Api_Name_field_name protoreflect.Name = "name" + Api_Methods_field_name protoreflect.Name = "methods" + Api_Options_field_name protoreflect.Name = "options" + Api_Version_field_name protoreflect.Name = "version" + Api_SourceContext_field_name protoreflect.Name = "source_context" + Api_Mixins_field_name protoreflect.Name = "mixins" + Api_Syntax_field_name protoreflect.Name = "syntax" + + Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name" + Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods" + Api_Options_field_fullname protoreflect.FullName = "google.protobuf.Api.options" + Api_Version_field_fullname protoreflect.FullName = "google.protobuf.Api.version" + Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context" + Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins" + Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax" +) + +// Field numbers for google.protobuf.Api. +const ( + Api_Name_field_number protoreflect.FieldNumber = 1 + Api_Methods_field_number protoreflect.FieldNumber = 2 + Api_Options_field_number protoreflect.FieldNumber = 3 + Api_Version_field_number protoreflect.FieldNumber = 4 + Api_SourceContext_field_number protoreflect.FieldNumber = 5 + Api_Mixins_field_number protoreflect.FieldNumber = 6 + Api_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Method. +const ( + Method_message_name protoreflect.Name = "Method" + Method_message_fullname protoreflect.FullName = "google.protobuf.Method" +) + +// Field names for google.protobuf.Method. +const ( + Method_Name_field_name protoreflect.Name = "name" + Method_RequestTypeUrl_field_name protoreflect.Name = "request_type_url" + Method_RequestStreaming_field_name protoreflect.Name = "request_streaming" + Method_ResponseTypeUrl_field_name protoreflect.Name = "response_type_url" + Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming" + Method_Options_field_name protoreflect.Name = "options" + Method_Syntax_field_name protoreflect.Name = "syntax" + + Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name" + Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url" + Method_RequestStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.request_streaming" + Method_ResponseTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.response_type_url" + Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming" + Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options" + Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax" +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name_field_number protoreflect.FieldNumber = 1 + Method_RequestTypeUrl_field_number protoreflect.FieldNumber = 2 + Method_RequestStreaming_field_number protoreflect.FieldNumber = 3 + Method_ResponseTypeUrl_field_number protoreflect.FieldNumber = 4 + Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5 + Method_Options_field_number protoreflect.FieldNumber = 6 + Method_Syntax_field_number protoreflect.FieldNumber = 7 +) + +// Names for google.protobuf.Mixin. +const ( + Mixin_message_name protoreflect.Name = "Mixin" + Mixin_message_fullname protoreflect.FullName = "google.protobuf.Mixin" +) + +// Field names for google.protobuf.Mixin. +const ( + Mixin_Name_field_name protoreflect.Name = "name" + Mixin_Root_field_name protoreflect.Name = "root" + + Mixin_Name_field_fullname protoreflect.FullName = "google.protobuf.Mixin.name" + Mixin_Root_field_fullname protoreflect.FullName = "google.protobuf.Mixin.root" +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name_field_number protoreflect.FieldNumber = 1 + Mixin_Root_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go new file mode 100644 index 000000000..e3cdf1c20 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -0,0 +1,829 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto" + +// Names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet" + FileDescriptorSet_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet" +) + +// Field names for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_name protoreflect.Name = "file" + + FileDescriptorSet_File_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorSet.file" +) + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_message_name protoreflect.Name = "FileDescriptorProto" + FileDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto" +) + +// Field names for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_name protoreflect.Name = "name" + FileDescriptorProto_Package_field_name protoreflect.Name = "package" + FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency" + FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency" + FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency" + FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type" + FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + FileDescriptorProto_Service_field_name protoreflect.Name = "service" + FileDescriptorProto_Extension_field_name protoreflect.Name = "extension" + FileDescriptorProto_Options_field_name protoreflect.Name = "options" + FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" + FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + + FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" + FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" + FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency" + FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency" + FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency" + FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type" + FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type" + FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service" + FileDescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.extension" + FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" + FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" + FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FileDescriptorProto_Package_field_number protoreflect.FieldNumber = 2 + FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3 + FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10 + FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11 + FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4 + FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5 + FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6 + FileDescriptorProto_Extension_field_number protoreflect.FieldNumber = 7 + FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 + FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 +) + +// Names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_message_name protoreflect.Name = "DescriptorProto" + DescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto" +) + +// Field names for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_name protoreflect.Name = "name" + DescriptorProto_Field_field_name protoreflect.Name = "field" + DescriptorProto_Extension_field_name protoreflect.Name = "extension" + DescriptorProto_NestedType_field_name protoreflect.Name = "nested_type" + DescriptorProto_EnumType_field_name protoreflect.Name = "enum_type" + DescriptorProto_ExtensionRange_field_name protoreflect.Name = "extension_range" + DescriptorProto_OneofDecl_field_name protoreflect.Name = "oneof_decl" + DescriptorProto_Options_field_name protoreflect.Name = "options" + DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name" + DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field" + DescriptorProto_Extension_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension" + DescriptorProto_NestedType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.nested_type" + DescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.enum_type" + DescriptorProto_ExtensionRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.extension_range" + DescriptorProto_OneofDecl_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.oneof_decl" + DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options" + DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range" + DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + DescriptorProto_Field_field_number protoreflect.FieldNumber = 2 + DescriptorProto_Extension_field_number protoreflect.FieldNumber = 6 + DescriptorProto_NestedType_field_number protoreflect.FieldNumber = 3 + DescriptorProto_EnumType_field_number protoreflect.FieldNumber = 4 + DescriptorProto_ExtensionRange_field_number protoreflect.FieldNumber = 5 + DescriptorProto_OneofDecl_field_number protoreflect.FieldNumber = 8 + DescriptorProto_Options_field_number protoreflect.FieldNumber = 7 + DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9 + DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10 +) + +// Names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_message_name protoreflect.Name = "ExtensionRange" + DescriptorProto_ExtensionRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange" +) + +// Field names for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ExtensionRange_End_field_name protoreflect.Name = "end" + DescriptorProto_ExtensionRange_Options_field_name protoreflect.Name = "options" + + DescriptorProto_ExtensionRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.start" + DescriptorProto_ExtensionRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.end" + DescriptorProto_ExtensionRange_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ExtensionRange.options" +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ExtensionRange_End_field_number protoreflect.FieldNumber = 2 + DescriptorProto_ExtensionRange_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_message_name protoreflect.Name = "ReservedRange" + DescriptorProto_ReservedRange_message_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange" +) + +// Field names for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_name protoreflect.Name = "start" + DescriptorProto_ReservedRange_End_field_name protoreflect.Name = "end" + + DescriptorProto_ReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.start" + DescriptorProto_ReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.ReservedRange.end" +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start_field_number protoreflect.FieldNumber = 1 + DescriptorProto_ReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_message_name protoreflect.Name = "ExtensionRangeOptions" + ExtensionRangeOptions_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions" +) + +// Field names for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_message_name protoreflect.Name = "FieldDescriptorProto" + FieldDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto" +) + +// Field names for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_name protoreflect.Name = "name" + FieldDescriptorProto_Number_field_name protoreflect.Name = "number" + FieldDescriptorProto_Label_field_name protoreflect.Name = "label" + FieldDescriptorProto_Type_field_name protoreflect.Name = "type" + FieldDescriptorProto_TypeName_field_name protoreflect.Name = "type_name" + FieldDescriptorProto_Extendee_field_name protoreflect.Name = "extendee" + FieldDescriptorProto_DefaultValue_field_name protoreflect.Name = "default_value" + FieldDescriptorProto_OneofIndex_field_name protoreflect.Name = "oneof_index" + FieldDescriptorProto_JsonName_field_name protoreflect.Name = "json_name" + FieldDescriptorProto_Options_field_name protoreflect.Name = "options" + FieldDescriptorProto_Proto3Optional_field_name protoreflect.Name = "proto3_optional" + + FieldDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.name" + FieldDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.number" + FieldDescriptorProto_Label_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.label" + FieldDescriptorProto_Type_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type" + FieldDescriptorProto_TypeName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.type_name" + FieldDescriptorProto_Extendee_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.extendee" + FieldDescriptorProto_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.default_value" + FieldDescriptorProto_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.oneof_index" + FieldDescriptorProto_JsonName_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.json_name" + FieldDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.options" + FieldDescriptorProto_Proto3Optional_field_fullname protoreflect.FullName = "google.protobuf.FieldDescriptorProto.proto3_optional" +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + FieldDescriptorProto_Number_field_number protoreflect.FieldNumber = 3 + FieldDescriptorProto_Label_field_number protoreflect.FieldNumber = 4 + FieldDescriptorProto_Type_field_number protoreflect.FieldNumber = 5 + FieldDescriptorProto_TypeName_field_number protoreflect.FieldNumber = 6 + FieldDescriptorProto_Extendee_field_number protoreflect.FieldNumber = 2 + FieldDescriptorProto_DefaultValue_field_number protoreflect.FieldNumber = 7 + FieldDescriptorProto_OneofIndex_field_number protoreflect.FieldNumber = 9 + FieldDescriptorProto_JsonName_field_number protoreflect.FieldNumber = 10 + FieldDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 + FieldDescriptorProto_Proto3Optional_field_number protoreflect.FieldNumber = 17 +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Type. +const ( + FieldDescriptorProto_Type_enum_fullname = "google.protobuf.FieldDescriptorProto.Type" + FieldDescriptorProto_Type_enum_name = "Type" +) + +// Full and short names for google.protobuf.FieldDescriptorProto.Label. +const ( + FieldDescriptorProto_Label_enum_fullname = "google.protobuf.FieldDescriptorProto.Label" + FieldDescriptorProto_Label_enum_name = "Label" +) + +// Names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_message_name protoreflect.Name = "OneofDescriptorProto" + OneofDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto" +) + +// Field names for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_name protoreflect.Name = "name" + OneofDescriptorProto_Options_field_name protoreflect.Name = "options" + + OneofDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.name" + OneofDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.OneofDescriptorProto.options" +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + OneofDescriptorProto_Options_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_message_name protoreflect.Name = "EnumDescriptorProto" + EnumDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto" +) + +// Field names for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumDescriptorProto_Value_field_name protoreflect.Name = "value" + EnumDescriptorProto_Options_field_name protoreflect.Name = "options" + EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range" + EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name" + + EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name" + EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value" + EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options" + EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range" + EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name" +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_Value_field_number protoreflect.FieldNumber = 2 + EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 + EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4 + EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_message_name protoreflect.Name = "EnumReservedRange" + EnumDescriptorProto_EnumReservedRange_message_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange" +) + +// Field names for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_name protoreflect.Name = "start" + EnumDescriptorProto_EnumReservedRange_End_field_name protoreflect.Name = "end" + + EnumDescriptorProto_EnumReservedRange_Start_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.start" + EnumDescriptorProto_EnumReservedRange_End_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.EnumReservedRange.end" +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start_field_number protoreflect.FieldNumber = 1 + EnumDescriptorProto_EnumReservedRange_End_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_message_name protoreflect.Name = "EnumValueDescriptorProto" + EnumValueDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto" +) + +// Field names for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_name protoreflect.Name = "name" + EnumValueDescriptorProto_Number_field_name protoreflect.Name = "number" + EnumValueDescriptorProto_Options_field_name protoreflect.Name = "options" + + EnumValueDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.name" + EnumValueDescriptorProto_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.number" + EnumValueDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValueDescriptorProto.options" +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + EnumValueDescriptorProto_Number_field_number protoreflect.FieldNumber = 2 + EnumValueDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_message_name protoreflect.Name = "ServiceDescriptorProto" + ServiceDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto" +) + +// Field names for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_name protoreflect.Name = "name" + ServiceDescriptorProto_Method_field_name protoreflect.Name = "method" + ServiceDescriptorProto_Options_field_name protoreflect.Name = "options" + + ServiceDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.name" + ServiceDescriptorProto_Method_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.method" + ServiceDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.ServiceDescriptorProto.options" +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + ServiceDescriptorProto_Method_field_number protoreflect.FieldNumber = 2 + ServiceDescriptorProto_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_message_name protoreflect.Name = "MethodDescriptorProto" + MethodDescriptorProto_message_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto" +) + +// Field names for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_name protoreflect.Name = "name" + MethodDescriptorProto_InputType_field_name protoreflect.Name = "input_type" + MethodDescriptorProto_OutputType_field_name protoreflect.Name = "output_type" + MethodDescriptorProto_Options_field_name protoreflect.Name = "options" + MethodDescriptorProto_ClientStreaming_field_name protoreflect.Name = "client_streaming" + MethodDescriptorProto_ServerStreaming_field_name protoreflect.Name = "server_streaming" + + MethodDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.name" + MethodDescriptorProto_InputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.input_type" + MethodDescriptorProto_OutputType_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.output_type" + MethodDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.options" + MethodDescriptorProto_ClientStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.client_streaming" + MethodDescriptorProto_ServerStreaming_field_fullname protoreflect.FullName = "google.protobuf.MethodDescriptorProto.server_streaming" +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name_field_number protoreflect.FieldNumber = 1 + MethodDescriptorProto_InputType_field_number protoreflect.FieldNumber = 2 + MethodDescriptorProto_OutputType_field_number protoreflect.FieldNumber = 3 + MethodDescriptorProto_Options_field_number protoreflect.FieldNumber = 4 + MethodDescriptorProto_ClientStreaming_field_number protoreflect.FieldNumber = 5 + MethodDescriptorProto_ServerStreaming_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.FileOptions. +const ( + FileOptions_message_name protoreflect.Name = "FileOptions" + FileOptions_message_fullname protoreflect.FullName = "google.protobuf.FileOptions" +) + +// Field names for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_name protoreflect.Name = "java_package" + FileOptions_JavaOuterClassname_field_name protoreflect.Name = "java_outer_classname" + FileOptions_JavaMultipleFiles_field_name protoreflect.Name = "java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_name protoreflect.Name = "java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_name protoreflect.Name = "java_string_check_utf8" + FileOptions_OptimizeFor_field_name protoreflect.Name = "optimize_for" + FileOptions_GoPackage_field_name protoreflect.Name = "go_package" + FileOptions_CcGenericServices_field_name protoreflect.Name = "cc_generic_services" + FileOptions_JavaGenericServices_field_name protoreflect.Name = "java_generic_services" + FileOptions_PyGenericServices_field_name protoreflect.Name = "py_generic_services" + FileOptions_PhpGenericServices_field_name protoreflect.Name = "php_generic_services" + FileOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FileOptions_CcEnableArenas_field_name protoreflect.Name = "cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_name protoreflect.Name = "objc_class_prefix" + FileOptions_CsharpNamespace_field_name protoreflect.Name = "csharp_namespace" + FileOptions_SwiftPrefix_field_name protoreflect.Name = "swift_prefix" + FileOptions_PhpClassPrefix_field_name protoreflect.Name = "php_class_prefix" + FileOptions_PhpNamespace_field_name protoreflect.Name = "php_namespace" + FileOptions_PhpMetadataNamespace_field_name protoreflect.Name = "php_metadata_namespace" + FileOptions_RubyPackage_field_name protoreflect.Name = "ruby_package" + FileOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FileOptions_JavaPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_package" + FileOptions_JavaOuterClassname_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_outer_classname" + FileOptions_JavaMultipleFiles_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_multiple_files" + FileOptions_JavaGenerateEqualsAndHash_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generate_equals_and_hash" + FileOptions_JavaStringCheckUtf8_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_string_check_utf8" + FileOptions_OptimizeFor_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.optimize_for" + FileOptions_GoPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.go_package" + FileOptions_CcGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_generic_services" + FileOptions_JavaGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.java_generic_services" + FileOptions_PyGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.py_generic_services" + FileOptions_PhpGenericServices_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_generic_services" + FileOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.deprecated" + FileOptions_CcEnableArenas_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.cc_enable_arenas" + FileOptions_ObjcClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.objc_class_prefix" + FileOptions_CsharpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.csharp_namespace" + FileOptions_SwiftPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.swift_prefix" + FileOptions_PhpClassPrefix_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_class_prefix" + FileOptions_PhpNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_namespace" + FileOptions_PhpMetadataNamespace_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace" + FileOptions_RubyPackage_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.ruby_package" + FileOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage_field_number protoreflect.FieldNumber = 1 + FileOptions_JavaOuterClassname_field_number protoreflect.FieldNumber = 8 + FileOptions_JavaMultipleFiles_field_number protoreflect.FieldNumber = 10 + FileOptions_JavaGenerateEqualsAndHash_field_number protoreflect.FieldNumber = 20 + FileOptions_JavaStringCheckUtf8_field_number protoreflect.FieldNumber = 27 + FileOptions_OptimizeFor_field_number protoreflect.FieldNumber = 9 + FileOptions_GoPackage_field_number protoreflect.FieldNumber = 11 + FileOptions_CcGenericServices_field_number protoreflect.FieldNumber = 16 + FileOptions_JavaGenericServices_field_number protoreflect.FieldNumber = 17 + FileOptions_PyGenericServices_field_number protoreflect.FieldNumber = 18 + FileOptions_PhpGenericServices_field_number protoreflect.FieldNumber = 42 + FileOptions_Deprecated_field_number protoreflect.FieldNumber = 23 + FileOptions_CcEnableArenas_field_number protoreflect.FieldNumber = 31 + FileOptions_ObjcClassPrefix_field_number protoreflect.FieldNumber = 36 + FileOptions_CsharpNamespace_field_number protoreflect.FieldNumber = 37 + FileOptions_SwiftPrefix_field_number protoreflect.FieldNumber = 39 + FileOptions_PhpClassPrefix_field_number protoreflect.FieldNumber = 40 + FileOptions_PhpNamespace_field_number protoreflect.FieldNumber = 41 + FileOptions_PhpMetadataNamespace_field_number protoreflect.FieldNumber = 44 + FileOptions_RubyPackage_field_number protoreflect.FieldNumber = 45 + FileOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FileOptions.OptimizeMode. +const ( + FileOptions_OptimizeMode_enum_fullname = "google.protobuf.FileOptions.OptimizeMode" + FileOptions_OptimizeMode_enum_name = "OptimizeMode" +) + +// Names for google.protobuf.MessageOptions. +const ( + MessageOptions_message_name protoreflect.Name = "MessageOptions" + MessageOptions_message_fullname protoreflect.FullName = "google.protobuf.MessageOptions" +) + +// Field names for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.FieldOptions. +const ( + FieldOptions_message_name protoreflect.Name = "FieldOptions" + FieldOptions_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions" +) + +// Field names for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_name protoreflect.Name = "ctype" + FieldOptions_Packed_field_name protoreflect.Name = "packed" + FieldOptions_Jstype_field_name protoreflect.Name = "jstype" + FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" + FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" + FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" + FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" + FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" + FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype_field_number protoreflect.FieldNumber = 1 + FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 + FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 + FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.FieldOptions.CType. +const ( + FieldOptions_CType_enum_fullname = "google.protobuf.FieldOptions.CType" + FieldOptions_CType_enum_name = "CType" +) + +// Full and short names for google.protobuf.FieldOptions.JSType. +const ( + FieldOptions_JSType_enum_fullname = "google.protobuf.FieldOptions.JSType" + FieldOptions_JSType_enum_name = "JSType" +) + +// Names for google.protobuf.OneofOptions. +const ( + OneofOptions_message_name protoreflect.Name = "OneofOptions" + OneofOptions_message_fullname protoreflect.FullName = "google.protobuf.OneofOptions" +) + +// Field names for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumOptions. +const ( + EnumOptions_message_name protoreflect.Name = "EnumOptions" + EnumOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumOptions" +) + +// Field names for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_message_name protoreflect.Name = "EnumValueOptions" + EnumValueOptions_message_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions" +) + +// Field names for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated" + EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1 + EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_message_name protoreflect.Name = "ServiceOptions" + ServiceOptions_message_fullname protoreflect.FullName = "google.protobuf.ServiceOptions" +) + +// Field names for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_name protoreflect.Name = "deprecated" + ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + ServiceOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated" + ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Names for google.protobuf.MethodOptions. +const ( + MethodOptions_message_name protoreflect.Name = "MethodOptions" + MethodOptions_message_fullname protoreflect.FullName = "google.protobuf.MethodOptions" +) + +// Field names for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MethodOptions_IdempotencyLevel_field_name protoreflect.Name = "idempotency_level" + MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + + MethodOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.deprecated" + MethodOptions_IdempotencyLevel_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level" + MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option" +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated_field_number protoreflect.FieldNumber = 33 + MethodOptions_IdempotencyLevel_field_number protoreflect.FieldNumber = 34 + MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 +) + +// Full and short names for google.protobuf.MethodOptions.IdempotencyLevel. +const ( + MethodOptions_IdempotencyLevel_enum_fullname = "google.protobuf.MethodOptions.IdempotencyLevel" + MethodOptions_IdempotencyLevel_enum_name = "IdempotencyLevel" +) + +// Names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_message_name protoreflect.Name = "UninterpretedOption" + UninterpretedOption_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption" +) + +// Field names for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_name protoreflect.Name = "name" + UninterpretedOption_IdentifierValue_field_name protoreflect.Name = "identifier_value" + UninterpretedOption_PositiveIntValue_field_name protoreflect.Name = "positive_int_value" + UninterpretedOption_NegativeIntValue_field_name protoreflect.Name = "negative_int_value" + UninterpretedOption_DoubleValue_field_name protoreflect.Name = "double_value" + UninterpretedOption_StringValue_field_name protoreflect.Name = "string_value" + UninterpretedOption_AggregateValue_field_name protoreflect.Name = "aggregate_value" + + UninterpretedOption_Name_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.name" + UninterpretedOption_IdentifierValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.identifier_value" + UninterpretedOption_PositiveIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.positive_int_value" + UninterpretedOption_NegativeIntValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.negative_int_value" + UninterpretedOption_DoubleValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.double_value" + UninterpretedOption_StringValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.string_value" + UninterpretedOption_AggregateValue_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.aggregate_value" +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name_field_number protoreflect.FieldNumber = 2 + UninterpretedOption_IdentifierValue_field_number protoreflect.FieldNumber = 3 + UninterpretedOption_PositiveIntValue_field_number protoreflect.FieldNumber = 4 + UninterpretedOption_NegativeIntValue_field_number protoreflect.FieldNumber = 5 + UninterpretedOption_DoubleValue_field_number protoreflect.FieldNumber = 6 + UninterpretedOption_StringValue_field_number protoreflect.FieldNumber = 7 + UninterpretedOption_AggregateValue_field_number protoreflect.FieldNumber = 8 +) + +// Names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_message_name protoreflect.Name = "NamePart" + UninterpretedOption_NamePart_message_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart" +) + +// Field names for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_name protoreflect.Name = "name_part" + UninterpretedOption_NamePart_IsExtension_field_name protoreflect.Name = "is_extension" + + UninterpretedOption_NamePart_NamePart_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.name_part" + UninterpretedOption_NamePart_IsExtension_field_fullname protoreflect.FullName = "google.protobuf.UninterpretedOption.NamePart.is_extension" +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart_field_number protoreflect.FieldNumber = 1 + UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_message_name protoreflect.Name = "SourceCodeInfo" + SourceCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo" +) + +// Field names for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_name protoreflect.Name = "location" + + SourceCodeInfo_Location_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.location" +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_message_name protoreflect.Name = "Location" + SourceCodeInfo_Location_message_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location" +) + +// Field names for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_name protoreflect.Name = "path" + SourceCodeInfo_Location_Span_field_name protoreflect.Name = "span" + SourceCodeInfo_Location_LeadingComments_field_name protoreflect.Name = "leading_comments" + SourceCodeInfo_Location_TrailingComments_field_name protoreflect.Name = "trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_name protoreflect.Name = "leading_detached_comments" + + SourceCodeInfo_Location_Path_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.path" + SourceCodeInfo_Location_Span_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.span" + SourceCodeInfo_Location_LeadingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_comments" + SourceCodeInfo_Location_TrailingComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.trailing_comments" + SourceCodeInfo_Location_LeadingDetachedComments_field_fullname protoreflect.FullName = "google.protobuf.SourceCodeInfo.Location.leading_detached_comments" +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path_field_number protoreflect.FieldNumber = 1 + SourceCodeInfo_Location_Span_field_number protoreflect.FieldNumber = 2 + SourceCodeInfo_Location_LeadingComments_field_number protoreflect.FieldNumber = 3 + SourceCodeInfo_Location_TrailingComments_field_number protoreflect.FieldNumber = 4 + SourceCodeInfo_Location_LeadingDetachedComments_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_message_name protoreflect.Name = "GeneratedCodeInfo" + GeneratedCodeInfo_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo" +) + +// Field names for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_name protoreflect.Name = "annotation" + + GeneratedCodeInfo_Annotation_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.annotation" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_message_name protoreflect.Name = "Annotation" + GeneratedCodeInfo_Annotation_message_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation" +) + +// Field names for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_name protoreflect.Name = "path" + GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" + GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" + GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + + GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" + GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" + GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" + GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path_field_number protoreflect.FieldNumber = 1 + GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 + GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 + GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go new file mode 100644 index 000000000..45ccd0121 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genid contains constants for declarations in descriptor.proto +// and the well-known types. +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go new file mode 100644 index 000000000..b070ef4fd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/duration_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_duration_proto = "google/protobuf/duration.proto" + +// Names for google.protobuf.Duration. +const ( + Duration_message_name protoreflect.Name = "Duration" + Duration_message_fullname protoreflect.FullName = "google.protobuf.Duration" +) + +// Field names for google.protobuf.Duration. +const ( + Duration_Seconds_field_name protoreflect.Name = "seconds" + Duration_Nanos_field_name protoreflect.Name = "nanos" + + Duration_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Duration.seconds" + Duration_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Duration.nanos" +) + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds_field_number protoreflect.FieldNumber = 1 + Duration_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go new file mode 100644 index 000000000..762abb34a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/empty_gen.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_empty_proto = "google/protobuf/empty.proto" + +// Names for google.protobuf.Empty. +const ( + Empty_message_name protoreflect.Name = "Empty" + Empty_message_fullname protoreflect.FullName = "google.protobuf.Empty" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go new file mode 100644 index 000000000..70bed453f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/field_mask_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_field_mask_proto = "google/protobuf/field_mask.proto" + +// Names for google.protobuf.FieldMask. +const ( + FieldMask_message_name protoreflect.Name = "FieldMask" + FieldMask_message_fullname protoreflect.FullName = "google.protobuf.FieldMask" +) + +// Field names for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_name protoreflect.Name = "paths" + + FieldMask_Paths_field_fullname protoreflect.FullName = "google.protobuf.FieldMask.paths" +) + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go new file mode 100644 index 000000000..693d2e9e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +// Go names of implementation-specific struct fields in generated messages. +const ( + State_goname = "state" + + SizeCache_goname = "sizeCache" + SizeCacheA_goname = "XXX_sizecache" + + WeakFields_goname = "weakFields" + WeakFieldsA_goname = "XXX_weak" + + UnknownFields_goname = "unknownFields" + UnknownFieldsA_goname = "XXX_unrecognized" + + ExtensionFields_goname = "extensionFields" + ExtensionFieldsA_goname = "XXX_InternalExtensions" + ExtensionFieldsB_goname = "XXX_extensions" + + WeakFieldPrefix_goname = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go new file mode 100644 index 000000000..8f9ea02ff --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field names and numbers for synthetic map entry messages. +const ( + MapEntry_Key_field_name protoreflect.Name = "key" + MapEntry_Value_field_name protoreflect.Name = "value" + + MapEntry_Key_field_number protoreflect.FieldNumber = 1 + MapEntry_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go new file mode 100644 index 000000000..3e99ae16c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/source_context_gen.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_source_context_proto = "google/protobuf/source_context.proto" + +// Names for google.protobuf.SourceContext. +const ( + SourceContext_message_name protoreflect.Name = "SourceContext" + SourceContext_message_fullname protoreflect.FullName = "google.protobuf.SourceContext" +) + +// Field names for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_name protoreflect.Name = "file_name" + + SourceContext_FileName_field_fullname protoreflect.FullName = "google.protobuf.SourceContext.file_name" +) + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go new file mode 100644 index 000000000..1a38944b2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/struct_gen.go @@ -0,0 +1,116 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_struct_proto = "google/protobuf/struct.proto" + +// Full and short names for google.protobuf.NullValue. +const ( + NullValue_enum_fullname = "google.protobuf.NullValue" + NullValue_enum_name = "NullValue" +) + +// Names for google.protobuf.Struct. +const ( + Struct_message_name protoreflect.Name = "Struct" + Struct_message_fullname protoreflect.FullName = "google.protobuf.Struct" +) + +// Field names for google.protobuf.Struct. +const ( + Struct_Fields_field_name protoreflect.Name = "fields" + + Struct_Fields_field_fullname protoreflect.FullName = "google.protobuf.Struct.fields" +) + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_message_name protoreflect.Name = "FieldsEntry" + Struct_FieldsEntry_message_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry" +) + +// Field names for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_name protoreflect.Name = "key" + Struct_FieldsEntry_Value_field_name protoreflect.Name = "value" + + Struct_FieldsEntry_Key_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.key" + Struct_FieldsEntry_Value_field_fullname protoreflect.FullName = "google.protobuf.Struct.FieldsEntry.value" +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key_field_number protoreflect.FieldNumber = 1 + Struct_FieldsEntry_Value_field_number protoreflect.FieldNumber = 2 +) + +// Names for google.protobuf.Value. +const ( + Value_message_name protoreflect.Name = "Value" + Value_message_fullname protoreflect.FullName = "google.protobuf.Value" +) + +// Field names for google.protobuf.Value. +const ( + Value_NullValue_field_name protoreflect.Name = "null_value" + Value_NumberValue_field_name protoreflect.Name = "number_value" + Value_StringValue_field_name protoreflect.Name = "string_value" + Value_BoolValue_field_name protoreflect.Name = "bool_value" + Value_StructValue_field_name protoreflect.Name = "struct_value" + Value_ListValue_field_name protoreflect.Name = "list_value" + + Value_NullValue_field_fullname protoreflect.FullName = "google.protobuf.Value.null_value" + Value_NumberValue_field_fullname protoreflect.FullName = "google.protobuf.Value.number_value" + Value_StringValue_field_fullname protoreflect.FullName = "google.protobuf.Value.string_value" + Value_BoolValue_field_fullname protoreflect.FullName = "google.protobuf.Value.bool_value" + Value_StructValue_field_fullname protoreflect.FullName = "google.protobuf.Value.struct_value" + Value_ListValue_field_fullname protoreflect.FullName = "google.protobuf.Value.list_value" +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue_field_number protoreflect.FieldNumber = 1 + Value_NumberValue_field_number protoreflect.FieldNumber = 2 + Value_StringValue_field_number protoreflect.FieldNumber = 3 + Value_BoolValue_field_number protoreflect.FieldNumber = 4 + Value_StructValue_field_number protoreflect.FieldNumber = 5 + Value_ListValue_field_number protoreflect.FieldNumber = 6 +) + +// Oneof names for google.protobuf.Value. +const ( + Value_Kind_oneof_name protoreflect.Name = "kind" + + Value_Kind_oneof_fullname protoreflect.FullName = "google.protobuf.Value.kind" +) + +// Names for google.protobuf.ListValue. +const ( + ListValue_message_name protoreflect.Name = "ListValue" + ListValue_message_fullname protoreflect.FullName = "google.protobuf.ListValue" +) + +// Field names for google.protobuf.ListValue. +const ( + ListValue_Values_field_name protoreflect.Name = "values" + + ListValue_Values_field_fullname protoreflect.FullName = "google.protobuf.ListValue.values" +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go new file mode 100644 index 000000000..f5cd5634c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/timestamp_gen.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_timestamp_proto = "google/protobuf/timestamp.proto" + +// Names for google.protobuf.Timestamp. +const ( + Timestamp_message_name protoreflect.Name = "Timestamp" + Timestamp_message_fullname protoreflect.FullName = "google.protobuf.Timestamp" +) + +// Field names for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_name protoreflect.Name = "seconds" + Timestamp_Nanos_field_name protoreflect.Name = "nanos" + + Timestamp_Seconds_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.seconds" + Timestamp_Nanos_field_fullname protoreflect.FullName = "google.protobuf.Timestamp.nanos" +) + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds_field_number protoreflect.FieldNumber = 1 + Timestamp_Nanos_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go new file mode 100644 index 000000000..3bc710138 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -0,0 +1,184 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_type_proto = "google/protobuf/type.proto" + +// Full and short names for google.protobuf.Syntax. +const ( + Syntax_enum_fullname = "google.protobuf.Syntax" + Syntax_enum_name = "Syntax" +) + +// Names for google.protobuf.Type. +const ( + Type_message_name protoreflect.Name = "Type" + Type_message_fullname protoreflect.FullName = "google.protobuf.Type" +) + +// Field names for google.protobuf.Type. +const ( + Type_Name_field_name protoreflect.Name = "name" + Type_Fields_field_name protoreflect.Name = "fields" + Type_Oneofs_field_name protoreflect.Name = "oneofs" + Type_Options_field_name protoreflect.Name = "options" + Type_SourceContext_field_name protoreflect.Name = "source_context" + Type_Syntax_field_name protoreflect.Name = "syntax" + + Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" + Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" + Type_Oneofs_field_fullname protoreflect.FullName = "google.protobuf.Type.oneofs" + Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" + Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" + Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" +) + +// Field numbers for google.protobuf.Type. +const ( + Type_Name_field_number protoreflect.FieldNumber = 1 + Type_Fields_field_number protoreflect.FieldNumber = 2 + Type_Oneofs_field_number protoreflect.FieldNumber = 3 + Type_Options_field_number protoreflect.FieldNumber = 4 + Type_SourceContext_field_number protoreflect.FieldNumber = 5 + Type_Syntax_field_number protoreflect.FieldNumber = 6 +) + +// Names for google.protobuf.Field. +const ( + Field_message_name protoreflect.Name = "Field" + Field_message_fullname protoreflect.FullName = "google.protobuf.Field" +) + +// Field names for google.protobuf.Field. +const ( + Field_Kind_field_name protoreflect.Name = "kind" + Field_Cardinality_field_name protoreflect.Name = "cardinality" + Field_Number_field_name protoreflect.Name = "number" + Field_Name_field_name protoreflect.Name = "name" + Field_TypeUrl_field_name protoreflect.Name = "type_url" + Field_OneofIndex_field_name protoreflect.Name = "oneof_index" + Field_Packed_field_name protoreflect.Name = "packed" + Field_Options_field_name protoreflect.Name = "options" + Field_JsonName_field_name protoreflect.Name = "json_name" + Field_DefaultValue_field_name protoreflect.Name = "default_value" + + Field_Kind_field_fullname protoreflect.FullName = "google.protobuf.Field.kind" + Field_Cardinality_field_fullname protoreflect.FullName = "google.protobuf.Field.cardinality" + Field_Number_field_fullname protoreflect.FullName = "google.protobuf.Field.number" + Field_Name_field_fullname protoreflect.FullName = "google.protobuf.Field.name" + Field_TypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Field.type_url" + Field_OneofIndex_field_fullname protoreflect.FullName = "google.protobuf.Field.oneof_index" + Field_Packed_field_fullname protoreflect.FullName = "google.protobuf.Field.packed" + Field_Options_field_fullname protoreflect.FullName = "google.protobuf.Field.options" + Field_JsonName_field_fullname protoreflect.FullName = "google.protobuf.Field.json_name" + Field_DefaultValue_field_fullname protoreflect.FullName = "google.protobuf.Field.default_value" +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind_field_number protoreflect.FieldNumber = 1 + Field_Cardinality_field_number protoreflect.FieldNumber = 2 + Field_Number_field_number protoreflect.FieldNumber = 3 + Field_Name_field_number protoreflect.FieldNumber = 4 + Field_TypeUrl_field_number protoreflect.FieldNumber = 6 + Field_OneofIndex_field_number protoreflect.FieldNumber = 7 + Field_Packed_field_number protoreflect.FieldNumber = 8 + Field_Options_field_number protoreflect.FieldNumber = 9 + Field_JsonName_field_number protoreflect.FieldNumber = 10 + Field_DefaultValue_field_number protoreflect.FieldNumber = 11 +) + +// Full and short names for google.protobuf.Field.Kind. +const ( + Field_Kind_enum_fullname = "google.protobuf.Field.Kind" + Field_Kind_enum_name = "Kind" +) + +// Full and short names for google.protobuf.Field.Cardinality. +const ( + Field_Cardinality_enum_fullname = "google.protobuf.Field.Cardinality" + Field_Cardinality_enum_name = "Cardinality" +) + +// Names for google.protobuf.Enum. +const ( + Enum_message_name protoreflect.Name = "Enum" + Enum_message_fullname protoreflect.FullName = "google.protobuf.Enum" +) + +// Field names for google.protobuf.Enum. +const ( + Enum_Name_field_name protoreflect.Name = "name" + Enum_Enumvalue_field_name protoreflect.Name = "enumvalue" + Enum_Options_field_name protoreflect.Name = "options" + Enum_SourceContext_field_name protoreflect.Name = "source_context" + Enum_Syntax_field_name protoreflect.Name = "syntax" + + Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" + Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" + Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" + Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" + Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name_field_number protoreflect.FieldNumber = 1 + Enum_Enumvalue_field_number protoreflect.FieldNumber = 2 + Enum_Options_field_number protoreflect.FieldNumber = 3 + Enum_SourceContext_field_number protoreflect.FieldNumber = 4 + Enum_Syntax_field_number protoreflect.FieldNumber = 5 +) + +// Names for google.protobuf.EnumValue. +const ( + EnumValue_message_name protoreflect.Name = "EnumValue" + EnumValue_message_fullname protoreflect.FullName = "google.protobuf.EnumValue" +) + +// Field names for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_name protoreflect.Name = "name" + EnumValue_Number_field_name protoreflect.Name = "number" + EnumValue_Options_field_name protoreflect.Name = "options" + + EnumValue_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.name" + EnumValue_Number_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.number" + EnumValue_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumValue.options" +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name_field_number protoreflect.FieldNumber = 1 + EnumValue_Number_field_number protoreflect.FieldNumber = 2 + EnumValue_Options_field_number protoreflect.FieldNumber = 3 +) + +// Names for google.protobuf.Option. +const ( + Option_message_name protoreflect.Name = "Option" + Option_message_fullname protoreflect.FullName = "google.protobuf.Option" +) + +// Field names for google.protobuf.Option. +const ( + Option_Name_field_name protoreflect.Name = "name" + Option_Value_field_name protoreflect.Name = "value" + + Option_Name_field_fullname protoreflect.FullName = "google.protobuf.Option.name" + Option_Value_field_fullname protoreflect.FullName = "google.protobuf.Option.value" +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name_field_number protoreflect.FieldNumber = 1 + Option_Value_field_number protoreflect.FieldNumber = 2 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go new file mode 100644 index 000000000..429384b85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package genid + +import protoreflect "google.golang.org/protobuf/reflect/protoreflect" + +// Generic field name and number for messages in wrappers.proto. +const ( + WrapperValue_Value_field_name protoreflect.Name = "value" + WrapperValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go new file mode 100644 index 000000000..72527d2ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers_gen.go @@ -0,0 +1,175 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package genid + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" +) + +const File_google_protobuf_wrappers_proto = "google/protobuf/wrappers.proto" + +// Names for google.protobuf.DoubleValue. +const ( + DoubleValue_message_name protoreflect.Name = "DoubleValue" + DoubleValue_message_fullname protoreflect.FullName = "google.protobuf.DoubleValue" +) + +// Field names for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_name protoreflect.Name = "value" + + DoubleValue_Value_field_fullname protoreflect.FullName = "google.protobuf.DoubleValue.value" +) + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.FloatValue. +const ( + FloatValue_message_name protoreflect.Name = "FloatValue" + FloatValue_message_fullname protoreflect.FullName = "google.protobuf.FloatValue" +) + +// Field names for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_name protoreflect.Name = "value" + + FloatValue_Value_field_fullname protoreflect.FullName = "google.protobuf.FloatValue.value" +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int64Value. +const ( + Int64Value_message_name protoreflect.Name = "Int64Value" + Int64Value_message_fullname protoreflect.FullName = "google.protobuf.Int64Value" +) + +// Field names for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_name protoreflect.Name = "value" + + Int64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int64Value.value" +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt64Value. +const ( + UInt64Value_message_name protoreflect.Name = "UInt64Value" + UInt64Value_message_fullname protoreflect.FullName = "google.protobuf.UInt64Value" +) + +// Field names for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_name protoreflect.Name = "value" + + UInt64Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt64Value.value" +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.Int32Value. +const ( + Int32Value_message_name protoreflect.Name = "Int32Value" + Int32Value_message_fullname protoreflect.FullName = "google.protobuf.Int32Value" +) + +// Field names for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_name protoreflect.Name = "value" + + Int32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.Int32Value.value" +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.UInt32Value. +const ( + UInt32Value_message_name protoreflect.Name = "UInt32Value" + UInt32Value_message_fullname protoreflect.FullName = "google.protobuf.UInt32Value" +) + +// Field names for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_name protoreflect.Name = "value" + + UInt32Value_Value_field_fullname protoreflect.FullName = "google.protobuf.UInt32Value.value" +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BoolValue. +const ( + BoolValue_message_name protoreflect.Name = "BoolValue" + BoolValue_message_fullname protoreflect.FullName = "google.protobuf.BoolValue" +) + +// Field names for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_name protoreflect.Name = "value" + + BoolValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BoolValue.value" +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.StringValue. +const ( + StringValue_message_name protoreflect.Name = "StringValue" + StringValue_message_fullname protoreflect.FullName = "google.protobuf.StringValue" +) + +// Field names for google.protobuf.StringValue. +const ( + StringValue_Value_field_name protoreflect.Name = "value" + + StringValue_Value_field_fullname protoreflect.FullName = "google.protobuf.StringValue.value" +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value_field_number protoreflect.FieldNumber = 1 +) + +// Names for google.protobuf.BytesValue. +const ( + BytesValue_message_name protoreflect.Name = "BytesValue" + BytesValue_message_fullname protoreflect.FullName = "google.protobuf.BytesValue" +) + +// Field names for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_name protoreflect.Name = "value" + + BytesValue_Value_field_fullname protoreflect.FullName = "google.protobuf.BytesValue.value" +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value_field_number protoreflect.FieldNumber = 1 +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go new file mode 100644 index 000000000..b5974528d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// Export is a zero-length named type that exists only to export a set of +// functions that we do not want to appear in godoc. +type Export struct{} + +// NewError formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func (Export) NewError(f string, x ...interface{}) error { + return errors.New(f, x...) +} + +// enum is any enum type generated by protoc-gen-go +// and must be a named int32 type. +type enum = interface{} + +// EnumOf returns the protoreflect.Enum interface over e. +// It returns nil if e is nil. +func (Export) EnumOf(e enum) pref.Enum { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) + } +} + +// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. +// It returns nil if e is nil. +func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) + } +} + +// EnumTypeOf returns the protoreflect.EnumType for e. +// It returns nil if e is nil. +func (Export) EnumTypeOf(e enum) pref.EnumType { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) + } +} + +// EnumStringOf returns the enum value as a string, either as the name if +// the number is resolvable, or the number formatted as a string. +func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) + } + return strconv.Itoa(int(n)) +} + +// message is any message type generated by protoc-gen-go +// and must be a pointer to a named struct type. +type message = interface{} + +// legacyMessageWrapper wraps a v2 message as a v1 message. +type legacyMessageWrapper struct{ m pref.ProtoMessage } + +func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } +func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +func (m legacyMessageWrapper) ProtoMessage() {} + +// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil + case piface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) + case pref.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +func (Export) protoMessageV2Of(m message) pref.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil + case pref.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m + case piface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv + } + return legacyWrapMessage(reflect.ValueOf(m)).Interface() +} + +// MessageOf returns the protoreflect.Message interface over m. +// It returns nil if m is nil. +func (Export) MessageOf(m message) pref.Message { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect() + } + return legacyWrapMessage(reflect.ValueOf(m)) +} + +// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. +// It returns nil if m is nil. +func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Descriptor() + } + return LegacyLoadMessageDesc(reflect.TypeOf(m)) +} + +// MessageTypeOf returns the protoreflect.MessageType for m. +// It returns nil if m is nil. +func (Export) MessageTypeOf(m message) pref.MessageType { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageInfo(reflect.TypeOf(m), "") +} + +// MessageStringOf returns the message value as a string, +// which is the message serialized in the protobuf text format. +func (Export) MessageStringOf(m pref.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go new file mode 100644 index 000000000..b82341e57 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) +} + +func (mi *MessageInfo) checkInitializedPointer(p pointer) error { + mi.init() + if !mi.needsInitCheck { + return nil + } + if p.IsNil() { + for _, f := range mi.orderedCoderFields { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + } + return nil + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + if err := mi.isInitExtensions(e); err != nil { + return err + } + } + for _, f := range mi.orderedCoderFields { + if !f.isRequired && f.funcs.isInit == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit == nil { + continue + } + if err := f.funcs.isInit(fptr, f); err != nil { + return err + } + } + return nil +} + +func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { + if ext == nil { + return nil + } + for _, x := range *ext { + ei := getExtensionFieldInfo(x.Type()) + if ei.funcs.isInit == nil { + continue + } + v := x.Value() + if !v.IsValid() { + continue + } + if err := ei.funcs.isInit(v); err != nil { + return err + } + } + return nil +} + +var ( + needsInitCheckMu sync.Mutex + needsInitCheckMap sync.Map +) + +// needsInitCheck reports whether a message needs to be checked for partial initialization. +// +// It returns true if the message transitively includes any required or extension fields. +func needsInitCheck(md pref.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has + } + } + needsInitCheckMu.Lock() + defer needsInitCheckMu.Unlock() + return needsInitCheckLocked(md) +} + +func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. + // + // If has is false, we've previously determined that it can never + // be uninitialized. + // + // If has is not a bool, we've just encountered a cycle in the + // message graph. In this case, it is safe to return false: If + // the message does have required fields, we'll detect them later + // in the graph traversal. + has, ok := v.(bool) + return ok && has + } + needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message + defer func() { + needsInitCheckMap.Store(md, has) + }() + if md.RequiredNumbers().Len() > 0 { + return true + } + if md.ExtensionRanges().Len() > 0 { + return true + } + for i := 0; i < md.Fields().Len(); i++ { + fd := md.Fields().Get(i) + // Map keys are never messages, so just consider the map value. + if fd.IsMap() { + fd = fd.MapValue() + } + fmd := fd.Message() + if fmd != nil && needsInitCheckLocked(fmd) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go new file mode 100644 index 000000000..08d35170b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type extensionFieldInfo struct { + wiretag uint64 + tagsize int + unmarshalNeedsValue bool + funcs valueCoderFuncs + validation validationInfo +} + +var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + +func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info + } + return legacyLoadExtensionFieldInfo(xt) +} + +// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } + e := makeExtensionFieldInfo(xt.TypeDescriptor()) + if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { + return e.(*extensionFieldInfo) + } + return e +} + +func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) + } else { + wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) + } + e := &extensionFieldInfo{ + wiretag: wiretag, + tagsize: protowire.SizeVarint(wiretag), + funcs: encoderFuncsForValue(xd), + } + // Does the unmarshal function need a value passed to it? + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { + case pref.MessageKind, pref.GroupKind, pref.EnumKind: + e.unmarshalNeedsValue = true + default: + if xd.Cardinality() == pref.Repeated { + e.unmarshalNeedsValue = true + } + } + return e +} + +type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo + value pref.Value + b []byte + fn func() pref.Value +} + +type ExtensionField struct { + typ pref.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. + value pref.Value + lazy *lazyExtensionValue +} + +func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } + f.typ = xt + f.lazy.xi = xi + f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) + f.lazy.b = append(f.lazy.b, b...) +} + +func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { + if f.typ == nil { + return true + } + if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + return true + } + return false +} + +func (f *ExtensionField) lazyInit() { + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { + return + } + if f.lazy.xi != nil { + b := f.lazy.b + val := f.typ.New() + for len(b) > 0 { + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + panic(errors.New("bad tag in lazy extension decoding")) + } + b = b[n:] + } + num := protowire.Number(tag >> 3) + wtyp := protowire.Type(tag & 7) + var out unmarshalOutput + var err error + val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) + if err != nil { + panic(errors.New("decode failure in lazy extension decoding: %v", err)) + } + b = b[out.n:] + } + f.lazy.value = val + } else { + f.lazy.value = f.lazy.fn() + } + f.lazy.xi = nil + f.lazy.fn = nil + f.lazy.b = nil + atomic.StoreUint32(&f.lazy.atomicOnce, 1) +} + +// Set sets the type and value of the extension field. +// This must not be called concurrently. +func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { + f.typ = t + f.value = v + f.lazy = nil +} + +// SetLazy sets the type and a value that is to be lazily evaluated upon first use. +// This must not be called concurrently. +func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} +} + +// Value returns the value of the extension field. +// This may be called concurrently. +func (f *ExtensionField) Value() pref.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() + } + return f.lazy.value + } + return f.value +} + +// Type returns the type of the extension field. +// This may be called concurrently. +func (f ExtensionField) Type() pref.ExtensionType { + return f.typ +} + +// IsSet returns whether the extension field is set. +// This may be called concurrently. +func (f ExtensionField) IsSet() bool { + return f.typ != nil +} + +// IsLazy reports whether a field is lazily encoded. +// It is exported for testing. +func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { + case *messageState: + mi = m.messageInfo() + p = m.pointer() + case *messageReflectWrapper: + mi = m.messageInfo() + p = m.pointer() + default: + return false + } + xd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + return false + } + xt := xd.Type() + ext := mi.extensionMap(p) + if ext == nil { + return false + } + f, ok := (*ext)[int32(fd.Number())] + if !ok { + return false + } + return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go new file mode 100644 index 000000000..c00744d38 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -0,0 +1,828 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type errInvalidUTF8 struct{} + +func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } +func (errInvalidUTF8) InvalidUTF8() bool { return true } + +// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. +// +// For size, marshal, and isInit operations, functions are set only on the first field +// in the oneof. The functions are called when the oneof is non-nil, and will dispatch +// to the appropriate field-specific function as necessary. +// +// The unmarshal function is set on each field individually as usual. +func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) + needIsInit := false + fields := od.Fields() + for i, lim := 0, fields.Len(); i < lim; i++ { + fd := od.Fields().Get(i) + num := fd.Number() + // Make a copy of the original coderFieldInfo for use in unmarshaling. + // + // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. + // + // mi.coderFields[num].marshal is set on only the first field in the oneof, + // and dispatches to the field-specific marshaler in oneofFields. + cf := *mi.coderFields[num] + ot := si.oneofWrappersByNumber[num] + cf.ft = ot.Field(0).Type + cf.mi, cf.funcs = fieldCoder(fd, cf.ft) + oneofFields[ot] = &cf + if cf.funcs.isInit != nil { + needIsInit = true + } + mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + var vw reflect.Value // pointer to wrapper type + vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind + if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { + vw = vi.Elem() + } else { + vw = reflect.New(ot) + } + out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) + if err != nil { + return out, err + } + vi.Set(vw) + return out, nil + } + } + getInfo := func(p pointer) (pointer, *coderFieldInfo) { + v := p.AsValueOf(ft).Elem() + if v.IsNil() { + return pointer{}, nil + } + v = v.Elem() // interface -> *struct + if v.IsNil() { + return pointer{}, nil + } + return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] + } + first := mi.coderFields[od.Fields().Get(0).Number()] + first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { + p, info := getInfo(p) + if info == nil || info.funcs.size == nil { + return 0 + } + return info.funcs.size(p, info, opts) + } + first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { + p, info := getInfo(p) + if info == nil || info.funcs.marshal == nil { + return b, nil + } + return info.funcs.marshal(b, p, info, opts) + } + first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { + srcp, srcinfo := getInfo(src) + if srcinfo == nil || srcinfo.funcs.merge == nil { + return + } + dstp, dstinfo := getInfo(dst) + if dstinfo != srcinfo { + dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) + dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) + } + srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) + } + if needIsInit { + first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { + p, info := getInfo(p) + if info == nil || info.funcs.isInit == nil { + return nil + } + return info.funcs.isInit(p, info) + } + } +} + +func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + }) + } + + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m, ok := p.WeakFields().get(f.num) + if !ok { + return 0 + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m, ok := p.WeakFields().get(f.num) + if !ok { + return b, nil + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + fs := p.WeakFields() + m, ok := fs.get(f.num) + if !ok { + lazyInit() + if messageType == nil { + return unmarshalOutput{}, errUnknown + } + m = messageType.New().Interface() + fs.set(f.num, m) + } + return consumeMessage(b, m, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m, ok := p.WeakFields().get(f.num) + if !ok { + return nil + } + return proto.CheckInitialized(m) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + sm, ok := src.WeakFields().get(f.num) + if !ok { + return + } + dm, ok := dst.WeakFields().get(f.num) + if !ok { + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + dm = messageType.New().Interface() + dst.WeakFields().set(f.num, dm) + } + opts.Merge(dm, sm) + }, + } +} + +func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, + marshal: appendMessageInfo, + unmarshal: consumeMessageInfo, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeMessage(b, asMessage(mp), wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize +} + +func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) + return f.mi.marshalAppendPointer(b, p.Elem(), opts) +} + +func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageInfo(p pointer, f *coderFieldInfo) error { + return f.mi.checkInitializedPointer(p.Elem()) +} + +func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { + return protowire.SizeBytes(proto.Size(m)) + tagsize +} + +func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(proto.Size(m))) + return opts.Options().MarshalAppend(b, m) +} + +func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) +} + +func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) +} + +func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err +} + +func isInitMessageValue(v pref.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) +} + +var coderMessageValue = valueCoderFuncs{ + size: sizeMessageValue, + marshal: appendMessageValue, + unmarshal: consumeMessageValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) +} + +func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) +} + +func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +} + +var coderGroupValue = valueCoderFuncs{ + size: sizeGroupValue, + marshal: appendGroupValue, + unmarshal: consumeGroupValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupType, + marshal: appendGroupType, + unmarshal: consumeGroupType, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeGroup(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendGroup(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeGroup(b, asMessage(mp), num, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) +} + +func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) +} + +func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { + return 2*tagsize + proto.Size(m) +} + +func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) // start group + b, err := opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, wiretag+1) // end group + return b, err +} + +func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, protowire.ParseError(n) + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, + marshal: appendMessageSliceInfo, + unmarshal: consumeMessageSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMessageSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMessageSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeMessageSlice(b, p, ft, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { + s := p.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func isInitMessageSlice(p pointer, goType reflect.Type) error { + s := p.PointerSlice() + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +// Slices of messages + +func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return pref.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return pref.Value{}, out, protowire.ParseError(n) + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +func isInitMessageSliceValue(listv pref.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +var coderMessageSliceValue = valueCoderFuncs{ + size: sizeMessageSliceValue, + marshal: appendMessageSliceValue, + unmarshal: consumeMessageSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) // start group + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { + return pref.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return pref.Value{}, out, protowire.ParseError(n) + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +var coderGroupSliceValue = valueCoderFuncs{ + size: sizeGroupSliceValue, + marshal: appendGroupSliceValue, + unmarshal: consumeGroupSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupSliceInfo, + marshal: appendGroupSliceInfo, + unmarshal: consumeGroupSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeGroupSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendGroupSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeGroupSlice(b, p, num, wtyp, ft, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + b = protowire.AppendVarint(b, wiretag) // start group + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, protowire.ParseError(n) + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + if wtyp != protowire.StartGroupType { + return unmarshalOutput{}, errUnknown + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + return out, nil +} + +func asMessage(v reflect.Value) pref.ProtoMessage { + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go new file mode 100644 index 000000000..ff198d0a1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -0,0 +1,5637 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// sizeBool returns the size of wire encoding a bool pointer as a Bool. +func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bool() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBool wire encodes a bool pointer as a Bool. +func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bool() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBool wire decodes a bool pointer as a Bool. +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bool() = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBool = pointerCoderFuncs{ + size: sizeBool, + marshal: appendBool, + unmarshal: consumeBool, + merge: mergeBool, +} + +// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. +// The zero value is not encoded. +func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bool() + if v == false { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolNoZero wire encodes a bool pointer as a Bool. +// The zero value is not encoded. +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bool() + if v == false { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +var coderBoolNoZero = pointerCoderFuncs{ + size: sizeBoolNoZero, + marshal: appendBoolNoZero, + unmarshal: consumeBool, + merge: mergeBoolNoZero, +} + +// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. +// It panics if the pointer is nil. +func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.BoolPtr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolPtr wire encodes a *bool pointer as a Bool. +// It panics if the pointer is nil. +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.BoolPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBoolPtr wire decodes a *bool pointer as a Bool. +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.BoolPtr() + if *vp == nil { + *vp = new(bool) + } + **vp = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBoolPtr = pointerCoderFuncs{ + size: sizeBoolPtr, + marshal: appendBoolPtr, + unmarshal: consumeBoolPtr, + merge: mergeBoolPtr, +} + +// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. +func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BoolSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) + } + return size +} + +// appendBoolSlice encodes a []bool pointer as a repeated Bool. +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BoolSlice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, protowire.DecodeBool(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, protowire.DecodeBool(v)) + out.n = n + return out, nil +} + +var coderBoolSlice = pointerCoderFuncs{ + size: sizeBoolSlice, + marshal: appendBoolSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BoolSlice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +var coderBoolPackedSlice = pointerCoderFuncs{ + size: sizeBoolPackedSlice, + marshal: appendBoolPackedSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolValue returns the size of wire encoding a bool value as a Bool. +func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) +} + +// appendBoolValue encodes a bool value as a Bool. +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + return b, nil +} + +// consumeBoolValue decodes a bool value as a Bool. +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil +} + +var coderBoolValue = valueCoderFuncs{ + size: sizeBoolValue, + marshal: appendBoolValue, + unmarshal: consumeBoolValue, + merge: mergeScalarValue, +} + +// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return size +} + +// appendBoolSliceValue encodes a []bool value as a repeated Bool. +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + out.n = n + return listv, out, nil +} + +var coderBoolSliceValue = valueCoderFuncs{ + size: sizeBoolSliceValue, + marshal: appendBoolSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +var coderBoolPackedSliceValue = valueCoderFuncs{ + size: sizeBoolPackedSliceValue, + marshal: appendBoolPackedSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeEnumValue returns the size of wire encoding a value as a Enum. +func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Enum())) +} + +// appendEnumValue encodes a value as a Enum. +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + return b, nil +} + +// consumeEnumValue decodes a value as a Enum. +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil +} + +var coderEnumValue = valueCoderFuncs{ + size: sizeEnumValue, + marshal: appendEnumValue, + unmarshal: consumeEnumValue, + merge: mergeScalarValue, +} + +// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Enum())) + } + return size +} + +// appendEnumSliceValue encodes a [] value as a repeated Enum. +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + out.n = n + return listv, out, nil +} + +var coderEnumSliceValue = valueCoderFuncs{ + size: sizeEnumSliceValue, + marshal: appendEnumSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +var coderEnumPackedSliceValue = valueCoderFuncs{ + size: sizeEnumPackedSliceValue, + marshal: appendEnumPackedSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. +func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32 wire encodes a int32 pointer as a Int32. +func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32 wire decodes a int32 pointer as a Int32. +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderInt32 = pointerCoderFuncs{ + size: sizeInt32, + marshal: appendInt32, + unmarshal: consumeInt32, + merge: mergeInt32, +} + +// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. +// The zero value is not encoded. +func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32NoZero wire encodes a int32 pointer as a Int32. +// The zero value is not encoded. +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt32NoZero = pointerCoderFuncs{ + size: sizeInt32NoZero, + marshal: appendInt32NoZero, + unmarshal: consumeInt32, + merge: mergeInt32NoZero, +} + +// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32Ptr wire encodes a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderInt32Ptr = pointerCoderFuncs{ + size: sizeInt32Ptr, + marshal: appendInt32Ptr, + unmarshal: consumeInt32Ptr, + merge: mergeInt32Ptr, +} + +// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. +func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt32Slice encodes a []int32 pointer as a repeated Int32. +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderInt32Slice = pointerCoderFuncs{ + size: sizeInt32Slice, + marshal: appendInt32Slice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt32PackedSlice = pointerCoderFuncs{ + size: sizeInt32PackedSlice, + marshal: appendInt32PackedSlice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. +func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) +} + +// appendInt32Value encodes a int32 value as a Int32. +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + return b, nil +} + +// consumeInt32Value decodes a int32 value as a Int32. +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderInt32Value = valueCoderFuncs{ + size: sizeInt32Value, + marshal: appendInt32Value, + unmarshal: consumeInt32Value, + merge: mergeScalarValue, +} + +// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) + } + return size +} + +// appendInt32SliceValue encodes a []int32 value as a repeated Int32. +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderInt32SliceValue = valueCoderFuncs{ + size: sizeInt32SliceValue, + marshal: appendInt32SliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +var coderInt32PackedSliceValue = valueCoderFuncs{ + size: sizeInt32PackedSliceValue, + marshal: appendInt32PackedSliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. +func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32 wire encodes a int32 pointer as a Sint32. +func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32 wire decodes a int32 pointer as a Sint32. +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32 = pointerCoderFuncs{ + size: sizeSint32, + marshal: appendSint32, + unmarshal: consumeSint32, + merge: mergeInt32, +} + +// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. +// The zero value is not encoded. +func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32NoZero wire encodes a int32 pointer as a Sint32. +// The zero value is not encoded. +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +var coderSint32NoZero = pointerCoderFuncs{ + size: sizeSint32NoZero, + marshal: appendSint32NoZero, + unmarshal: consumeSint32, + merge: mergeInt32NoZero, +} + +// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32Ptr = pointerCoderFuncs{ + size: sizeSint32Ptr, + marshal: appendSint32Ptr, + unmarshal: consumeSint32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. +func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return size +} + +// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + out.n = n + return out, nil +} + +var coderSint32Slice = pointerCoderFuncs{ + size: sizeSint32Slice, + marshal: appendSint32Slice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +var coderSint32PackedSlice = pointerCoderFuncs{ + size: sizeSint32PackedSlice, + marshal: appendSint32PackedSlice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. +func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) +} + +// appendSint32Value encodes a int32 value as a Sint32. +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + return b, nil +} + +// consumeSint32Value decodes a int32 value as a Sint32. +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil +} + +var coderSint32Value = valueCoderFuncs{ + size: sizeSint32Value, + marshal: appendSint32Value, + unmarshal: consumeSint32Value, + merge: mergeScalarValue, +} + +// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return size +} + +// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + out.n = n + return listv, out, nil +} + +var coderSint32SliceValue = valueCoderFuncs{ + size: sizeSint32SliceValue, + marshal: appendSint32SliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +var coderSint32PackedSliceValue = valueCoderFuncs{ + size: sizeSint32PackedSliceValue, + marshal: appendSint32PackedSliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. +func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32 wire encodes a uint32 pointer as a Uint32. +func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32 wire decodes a uint32 pointer as a Uint32. +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint32() = uint32(v) + out.n = n + return out, nil +} + +var coderUint32 = pointerCoderFuncs{ + size: sizeUint32, + marshal: appendUint32, + unmarshal: consumeUint32, + merge: mergeUint32, +} + +// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. +// The zero value is not encoded. +func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. +// The zero value is not encoded. +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderUint32NoZero = pointerCoderFuncs{ + size: sizeUint32NoZero, + marshal: appendUint32NoZero, + unmarshal: consumeUint32, + merge: mergeUint32NoZero, +} + +// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Uint32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = uint32(v) + out.n = n + return out, nil +} + +var coderUint32Ptr = pointerCoderFuncs{ + size: sizeUint32Ptr, + marshal: appendUint32Ptr, + unmarshal: consumeUint32Ptr, + merge: mergeUint32Ptr, +} + +// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. +func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, uint32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, uint32(v)) + out.n = n + return out, nil +} + +var coderUint32Slice = pointerCoderFuncs{ + size: sizeUint32Slice, + marshal: appendUint32Slice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderUint32PackedSlice = pointerCoderFuncs{ + size: sizeUint32PackedSlice, + marshal: appendUint32PackedSlice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. +func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) +} + +// appendUint32Value encodes a uint32 value as a Uint32. +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + return b, nil +} + +// consumeUint32Value decodes a uint32 value as a Uint32. +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderUint32Value = valueCoderFuncs{ + size: sizeUint32Value, + marshal: appendUint32Value, + unmarshal: consumeUint32Value, + merge: mergeScalarValue, +} + +// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return size +} + +// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderUint32SliceValue = valueCoderFuncs{ + size: sizeUint32SliceValue, + marshal: appendUint32SliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +var coderUint32PackedSliceValue = valueCoderFuncs{ + size: sizeUint32PackedSliceValue, + marshal: appendUint32PackedSliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. +func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64 wire encodes a int64 pointer as a Int64. +func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64 wire decodes a int64 pointer as a Int64. +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderInt64 = pointerCoderFuncs{ + size: sizeInt64, + marshal: appendInt64, + unmarshal: consumeInt64, + merge: mergeInt64, +} + +// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. +// The zero value is not encoded. +func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64NoZero wire encodes a int64 pointer as a Int64. +// The zero value is not encoded. +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt64NoZero = pointerCoderFuncs{ + size: sizeInt64NoZero, + marshal: appendInt64NoZero, + unmarshal: consumeInt64, + merge: mergeInt64NoZero, +} + +// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64Ptr wire encodes a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderInt64Ptr = pointerCoderFuncs{ + size: sizeInt64Ptr, + marshal: appendInt64Ptr, + unmarshal: consumeInt64Ptr, + merge: mergeInt64Ptr, +} + +// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. +func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt64Slice encodes a []int64 pointer as a repeated Int64. +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderInt64Slice = pointerCoderFuncs{ + size: sizeInt64Slice, + marshal: appendInt64Slice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt64PackedSlice = pointerCoderFuncs{ + size: sizeInt64PackedSlice, + marshal: appendInt64PackedSlice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. +func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Int())) +} + +// appendInt64Value encodes a int64 value as a Int64. +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + return b, nil +} + +// consumeInt64Value decodes a int64 value as a Int64. +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderInt64Value = valueCoderFuncs{ + size: sizeInt64Value, + marshal: appendInt64Value, + unmarshal: consumeInt64Value, + merge: mergeScalarValue, +} + +// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Int())) + } + return size +} + +// appendInt64SliceValue encodes a []int64 value as a repeated Int64. +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderInt64SliceValue = valueCoderFuncs{ + size: sizeInt64SliceValue, + marshal: appendInt64SliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +var coderInt64PackedSliceValue = valueCoderFuncs{ + size: sizeInt64PackedSliceValue, + marshal: appendInt64PackedSliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. +func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64 wire encodes a int64 pointer as a Sint64. +func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64 wire decodes a int64 pointer as a Sint64. +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64 = pointerCoderFuncs{ + size: sizeSint64, + marshal: appendSint64, + unmarshal: consumeSint64, + merge: mergeInt64, +} + +// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. +// The zero value is not encoded. +func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64NoZero wire encodes a int64 pointer as a Sint64. +// The zero value is not encoded. +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +var coderSint64NoZero = pointerCoderFuncs{ + size: sizeSint64NoZero, + marshal: appendSint64NoZero, + unmarshal: consumeSint64, + merge: mergeInt64NoZero, +} + +// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64Ptr = pointerCoderFuncs{ + size: sizeSint64Ptr, + marshal: appendSint64Ptr, + unmarshal: consumeSint64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. +func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return size +} + +// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, protowire.DecodeZigZag(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, protowire.DecodeZigZag(v)) + out.n = n + return out, nil +} + +var coderSint64Slice = pointerCoderFuncs{ + size: sizeSint64Slice, + marshal: appendSint64Slice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +var coderSint64PackedSlice = pointerCoderFuncs{ + size: sizeSint64PackedSlice, + marshal: appendSint64PackedSlice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. +func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) +} + +// appendSint64Value encodes a int64 value as a Sint64. +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + return b, nil +} + +// consumeSint64Value decodes a int64 value as a Sint64. +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil +} + +var coderSint64Value = valueCoderFuncs{ + size: sizeSint64Value, + marshal: appendSint64Value, + unmarshal: consumeSint64Value, + merge: mergeScalarValue, +} + +// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return size +} + +// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + out.n = n + return listv, out, nil +} + +var coderSint64SliceValue = valueCoderFuncs{ + size: sizeSint64SliceValue, + marshal: appendSint64SliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +var coderSint64PackedSliceValue = valueCoderFuncs{ + size: sizeSint64PackedSliceValue, + marshal: appendSint64PackedSliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. +func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64 wire encodes a uint64 pointer as a Uint64. +func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64 wire decodes a uint64 pointer as a Uint64. +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderUint64 = pointerCoderFuncs{ + size: sizeUint64, + marshal: appendUint64, + unmarshal: consumeUint64, + merge: mergeUint64, +} + +// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. +// The zero value is not encoded. +func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. +// The zero value is not encoded. +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +var coderUint64NoZero = pointerCoderFuncs{ + size: sizeUint64NoZero, + marshal: appendUint64NoZero, + unmarshal: consumeUint64, + merge: mergeUint64NoZero, +} + +// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Uint64Ptr() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderUint64Ptr = pointerCoderFuncs{ + size: sizeUint64Ptr, + marshal: appendUint64Ptr, + unmarshal: consumeUint64Ptr, + merge: mergeUint64Ptr, +} + +// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. +func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(v) + } + return size +} + +// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderUint64Slice = pointerCoderFuncs{ + size: sizeUint64Slice, + marshal: appendUint64Slice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +var coderUint64PackedSlice = pointerCoderFuncs{ + size: sizeUint64PackedSlice, + marshal: appendUint64PackedSlice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. +func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(v.Uint()) +} + +// appendUint64Value encodes a uint64 value as a Uint64. +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + return b, nil +} + +// consumeUint64Value decodes a uint64 value as a Uint64. +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderUint64Value = valueCoderFuncs{ + size: sizeUint64Value, + marshal: appendUint64Value, + unmarshal: consumeUint64Value, + merge: mergeScalarValue, +} + +// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(v.Uint()) + } + return size +} + +// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderUint64SliceValue = valueCoderFuncs{ + size: sizeUint64SliceValue, + marshal: appendUint64SliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +var coderUint64PackedSliceValue = valueCoderFuncs{ + size: sizeUint64PackedSliceValue, + marshal: appendUint64PackedSliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. +func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32 = pointerCoderFuncs{ + size: sizeSfixed32, + marshal: appendSfixed32, + unmarshal: consumeSfixed32, + merge: mergeInt32, +} + +// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +var coderSfixed32NoZero = pointerCoderFuncs{ + size: sizeSfixed32NoZero, + marshal: appendSfixed32NoZero, + unmarshal: consumeSfixed32, + merge: mergeInt32NoZero, +} + +// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32Ptr = pointerCoderFuncs{ + size: sizeSfixed32Ptr, + marshal: appendSfixed32Ptr, + unmarshal: consumeSfixed32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderSfixed32Slice = pointerCoderFuncs{ + size: sizeSfixed32Slice, + marshal: appendSfixed32Slice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +var coderSfixed32PackedSlice = pointerCoderFuncs{ + size: sizeSfixed32PackedSlice, + marshal: appendSfixed32PackedSlice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. +func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Value encodes a int32 value as a Sfixed32. +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + return b, nil +} + +// consumeSfixed32Value decodes a int32 value as a Sfixed32. +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderSfixed32Value = valueCoderFuncs{ + size: sizeSfixed32Value, + marshal: appendSfixed32Value, + unmarshal: consumeSfixed32Value, + merge: mergeScalarValue, +} + +// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed32SliceValue = valueCoderFuncs{ + size: sizeSfixed32SliceValue, + marshal: appendSfixed32SliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +var coderSfixed32PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed32PackedSliceValue, + marshal: appendSfixed32PackedSliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. +func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32 wire encodes a uint32 pointer as a Fixed32. +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint32() = v + out.n = n + return out, nil +} + +var coderFixed32 = pointerCoderFuncs{ + size: sizeFixed32, + marshal: appendFixed32, + unmarshal: consumeFixed32, + merge: mergeUint32, +} + +// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +var coderFixed32NoZero = pointerCoderFuncs{ + size: sizeFixed32NoZero, + marshal: appendFixed32NoZero, + unmarshal: consumeFixed32, + merge: mergeUint32NoZero, +} + +// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed32Ptr = pointerCoderFuncs{ + size: sizeFixed32Ptr, + marshal: appendFixed32Ptr, + unmarshal: consumeFixed32Ptr, + merge: mergeUint32Ptr, +} + +// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. +func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed32Slice = pointerCoderFuncs{ + size: sizeFixed32Slice, + marshal: appendFixed32Slice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +var coderFixed32PackedSlice = pointerCoderFuncs{ + size: sizeFixed32PackedSlice, + marshal: appendFixed32PackedSlice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. +func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFixed32Value encodes a uint32 value as a Fixed32. +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + return b, nil +} + +// consumeFixed32Value decodes a uint32 value as a Fixed32. +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderFixed32Value = valueCoderFuncs{ + size: sizeFixed32Value, + marshal: appendFixed32Value, + unmarshal: consumeFixed32Value, + merge: mergeScalarValue, +} + +// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderFixed32SliceValue = valueCoderFuncs{ + size: sizeFixed32SliceValue, + marshal: appendFixed32SliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +var coderFixed32PackedSliceValue = valueCoderFuncs{ + size: sizeFixed32PackedSliceValue, + marshal: appendFixed32PackedSliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFloat returns the size of wire encoding a float32 pointer as a Float. +func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloat wire encodes a float32 pointer as a Float. +func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloat wire decodes a float32 pointer as a Float. +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Float32() = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloat = pointerCoderFuncs{ + size: sizeFloat, + marshal: appendFloat, + unmarshal: consumeFloat, + merge: mergeFloat32, +} + +// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. +// The zero value is not encoded. +func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatNoZero wire encodes a float32 pointer as a Float. +// The zero value is not encoded. +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +var coderFloatNoZero = pointerCoderFuncs{ + size: sizeFloatNoZero, + marshal: appendFloatNoZero, + unmarshal: consumeFloat, + merge: mergeFloat32NoZero, +} + +// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. +// It panics if the pointer is nil. +func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatPtr wire encodes a *float32 pointer as a Float. +// It panics if the pointer is nil. +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Float32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloatPtr wire decodes a *float32 pointer as a Float. +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Float32Ptr() + if *vp == nil { + *vp = new(float32) + } + **vp = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloatPtr = pointerCoderFuncs{ + size: sizeFloatPtr, + marshal: appendFloatPtr, + unmarshal: consumeFloatPtr, + merge: mergeFloat32Ptr, +} + +// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. +func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSlice encodes a []float32 pointer as a repeated Float. +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, math.Float32frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, math.Float32frombits(v)) + out.n = n + return out, nil +} + +var coderFloatSlice = pointerCoderFuncs{ + size: sizeFloatSlice, + marshal: appendFloatSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +var coderFloatPackedSlice = pointerCoderFuncs{ + size: sizeFloatPackedSlice, + marshal: appendFloatPackedSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatValue returns the size of wire encoding a float32 value as a Float. +func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFloatValue encodes a float32 value as a Float. +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + return b, nil +} + +// consumeFloatValue decodes a float32 value as a Float. +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil +} + +var coderFloatValue = valueCoderFuncs{ + size: sizeFloatValue, + marshal: appendFloatValue, + unmarshal: consumeFloatValue, + merge: mergeScalarValue, +} + +// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSliceValue encodes a []float32 value as a repeated Float. +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + out.n = n + return listv, out, nil +} + +var coderFloatSliceValue = valueCoderFuncs{ + size: sizeFloatSliceValue, + marshal: appendFloatSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +var coderFloatPackedSliceValue = valueCoderFuncs{ + size: sizeFloatPackedSliceValue, + marshal: appendFloatPackedSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. +func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64 = pointerCoderFuncs{ + size: sizeSfixed64, + marshal: appendSfixed64, + unmarshal: consumeSfixed64, + merge: mergeInt64, +} + +// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +var coderSfixed64NoZero = pointerCoderFuncs{ + size: sizeSfixed64NoZero, + marshal: appendSfixed64NoZero, + unmarshal: consumeSfixed64, + merge: mergeInt64NoZero, +} + +// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64Ptr = pointerCoderFuncs{ + size: sizeSfixed64Ptr, + marshal: appendSfixed64Ptr, + unmarshal: consumeSfixed64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderSfixed64Slice = pointerCoderFuncs{ + size: sizeSfixed64Slice, + marshal: appendSfixed64Slice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +var coderSfixed64PackedSlice = pointerCoderFuncs{ + size: sizeSfixed64PackedSlice, + marshal: appendSfixed64PackedSlice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. +func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Value encodes a int64 value as a Sfixed64. +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + return b, nil +} + +// consumeSfixed64Value decodes a int64 value as a Sfixed64. +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderSfixed64Value = valueCoderFuncs{ + size: sizeSfixed64Value, + marshal: appendSfixed64Value, + unmarshal: consumeSfixed64Value, + merge: mergeScalarValue, +} + +// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed64SliceValue = valueCoderFuncs{ + size: sizeSfixed64SliceValue, + marshal: appendSfixed64SliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +var coderSfixed64PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed64PackedSliceValue, + marshal: appendSfixed64PackedSliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. +func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64 wire encodes a uint64 pointer as a Fixed64. +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderFixed64 = pointerCoderFuncs{ + size: sizeFixed64, + marshal: appendFixed64, + unmarshal: consumeFixed64, + merge: mergeUint64, +} + +// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +var coderFixed64NoZero = pointerCoderFuncs{ + size: sizeFixed64NoZero, + marshal: appendFixed64NoZero, + unmarshal: consumeFixed64, + merge: mergeUint64NoZero, +} + +// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed64Ptr = pointerCoderFuncs{ + size: sizeFixed64Ptr, + marshal: appendFixed64Ptr, + unmarshal: consumeFixed64Ptr, + merge: mergeUint64Ptr, +} + +// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. +func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed64Slice = pointerCoderFuncs{ + size: sizeFixed64Slice, + marshal: appendFixed64Slice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +var coderFixed64PackedSlice = pointerCoderFuncs{ + size: sizeFixed64PackedSlice, + marshal: appendFixed64PackedSlice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. +func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendFixed64Value encodes a uint64 value as a Fixed64. +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + return b, nil +} + +// consumeFixed64Value decodes a uint64 value as a Fixed64. +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderFixed64Value = valueCoderFuncs{ + size: sizeFixed64Value, + marshal: appendFixed64Value, + unmarshal: consumeFixed64Value, + merge: mergeScalarValue, +} + +// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderFixed64SliceValue = valueCoderFuncs{ + size: sizeFixed64SliceValue, + marshal: appendFixed64SliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +var coderFixed64PackedSliceValue = valueCoderFuncs{ + size: sizeFixed64PackedSliceValue, + marshal: appendFixed64PackedSliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeDouble returns the size of wire encoding a float64 pointer as a Double. +func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendDouble wire encodes a float64 pointer as a Double. +func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDouble wire decodes a float64 pointer as a Double. +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Float64() = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDouble = pointerCoderFuncs{ + size: sizeDouble, + marshal: appendDouble, + unmarshal: consumeDouble, + merge: mergeFloat64, +} + +// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. +// The zero value is not encoded. +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoubleNoZero wire encodes a float64 pointer as a Double. +// The zero value is not encoded. +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +var coderDoubleNoZero = pointerCoderFuncs{ + size: sizeDoubleNoZero, + marshal: appendDoubleNoZero, + unmarshal: consumeDouble, + merge: mergeFloat64NoZero, +} + +// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. +// It panics if the pointer is nil. +func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoublePtr wire encodes a *float64 pointer as a Double. +// It panics if the pointer is nil. +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Float64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDoublePtr wire decodes a *float64 pointer as a Double. +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Float64Ptr() + if *vp == nil { + *vp = new(float64) + } + **vp = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDoublePtr = pointerCoderFuncs{ + size: sizeDoublePtr, + marshal: appendDoublePtr, + unmarshal: consumeDoublePtr, + merge: mergeFloat64Ptr, +} + +// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. +func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSlice encodes a []float64 pointer as a repeated Double. +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, math.Float64frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, math.Float64frombits(v)) + out.n = n + return out, nil +} + +var coderDoubleSlice = pointerCoderFuncs{ + size: sizeDoubleSlice, + marshal: appendDoubleSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +var coderDoublePackedSlice = pointerCoderFuncs{ + size: sizeDoublePackedSlice, + marshal: appendDoublePackedSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. +func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendDoubleValue encodes a float64 value as a Double. +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + return b, nil +} + +// consumeDoubleValue decodes a float64 value as a Double. +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil +} + +var coderDoubleValue = valueCoderFuncs{ + size: sizeDoubleValue, + marshal: appendDoubleValue, + unmarshal: consumeDoubleValue, + merge: mergeScalarValue, +} + +// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSliceValue encodes a []float64 value as a repeated Double. +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + out.n = n + return listv, out, nil +} + +var coderDoubleSliceValue = valueCoderFuncs{ + size: sizeDoubleSliceValue, + marshal: appendDoubleSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +var coderDoublePackedSliceValue = valueCoderFuncs{ + size: sizeDoublePackedSliceValue, + marshal: appendDoublePackedSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeString returns the size of wire encoding a string pointer as a String. +func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.String() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendString wire encodes a string pointer as a String. +func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeString wire decodes a string pointer as a String. +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.String() = v + out.n = n + return out, nil +} + +var coderString = pointerCoderFuncs{ + size: sizeString, + marshal: appendString, + unmarshal: consumeString, + merge: mergeString, +} + +// appendStringValidateUTF8 wire encodes a string pointer as a String. +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValidateUTF8 wire decodes a string pointer as a String. +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + *p.String() = v + out.n = n + return out, nil +} + +var coderStringValidateUTF8 = pointerCoderFuncs{ + size: sizeString, + marshal: appendStringValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeString, +} + +// sizeStringNoZero returns the size of wire encoding a string pointer as a String. +// The zero value is not encoded. +func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.String() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringNoZero wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +var coderStringNoZero = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZero, + unmarshal: consumeString, + merge: mergeStringNoZero, +} + +// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZeroValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeStringNoZero, +} + +// sizeStringPtr returns the size of wire encoding a *string pointer as a String. +// It panics if the pointer is nil. +func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.StringPtr() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringPtr wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeStringPtr wire decodes a *string pointer as a String. +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtr = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtr, + unmarshal: consumeStringPtr, + merge: mergeStringPtr, +} + +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + +// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. +func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.StringSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendStringSlice encodes a []string pointer as a repeated String. +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + } + return b, nil +} + +// consumeStringSlice wire decodes a []string pointer as a repeated String. +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderStringSlice = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSlice, + unmarshal: consumeStringSlice, + merge: mergeStringSlice, +} + +// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderStringSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSliceValidateUTF8, + unmarshal: consumeStringSliceValidateUTF8, + merge: mergeStringSlice, +} + +// sizeStringValue returns the size of wire encoding a string value as a String. +func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.String())) +} + +// appendStringValue encodes a string value as a String. +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + return b, nil +} + +// consumeStringValue decodes a string value as a String. +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValue = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValue, + unmarshal: consumeStringValue, + merge: mergeScalarValue, +} + +// appendStringValueValidateUTF8 encodes a string value as a String. +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + if !utf8.ValidString(v.String()) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValueValidateUTF8 decodes a string value as a String. +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return protoreflect.Value{}, out, errInvalidUTF8{} + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValueValidateUTF8 = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValueValidateUTF8, + unmarshal: consumeStringValueValidateUTF8, + merge: mergeScalarValue, +} + +// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.String())) + } + return size +} + +// appendStringSliceValue encodes a []string value as a repeated String. +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + } + return b, nil +} + +// consumeStringSliceValue wire decodes a []string value as a repeated String. +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfString(string(v))) + out.n = n + return listv, out, nil +} + +var coderStringSliceValue = valueCoderFuncs{ + size: sizeStringSliceValue, + marshal: appendStringSliceValue, + unmarshal: consumeStringSliceValue, + merge: mergeListValue, +} + +// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. +func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bytes() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytes wire encodes a []byte pointer as a Bytes. +func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytes wire decodes a []byte pointer as a Bytes. +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytes = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytes, + unmarshal: consumeBytes, + merge: mergeBytes, +} + +// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytesValidateUTF8 = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytesValidateUTF8, + unmarshal: consumeBytesValidateUTF8, + merge: mergeBytes, +} + +// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. +// The zero value is not encoded. +func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bytes() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytesNoZero wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. +// The zero value is not decoded. +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZero = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZero, + unmarshal: consumeBytesNoZero, + merge: mergeBytesNoZero, +} + +// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZeroValidateUTF8, + unmarshal: consumeBytesNoZeroValidateUTF8, + merge: mergeBytesNoZero, +} + +// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. +func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BytesSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + } + return b, nil +} + +// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSlice = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSlice, + unmarshal: consumeBytesSlice, + merge: mergeBytesSlice, +} + +// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSliceValidateUTF8, + unmarshal: consumeBytesSliceValidateUTF8, + merge: mergeBytesSlice, +} + +// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. +func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.Bytes())) +} + +// appendBytesValue encodes a []byte value as a Bytes. +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + return b, nil +} + +// consumeBytesValue decodes a []byte value as a Bytes. +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil +} + +var coderBytesValue = valueCoderFuncs{ + size: sizeBytesValue, + marshal: appendBytesValue, + unmarshal: consumeBytesValue, + merge: mergeBytesValue, +} + +// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.Bytes())) + } + return size +} + +// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + } + return b, nil +} + +// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + out.n = n + return listv, out, nil +} + +var coderBytesSliceValue = valueCoderFuncs{ + size: sizeBytesSliceValue, + marshal: appendBytesSliceValue, + unmarshal: consumeBytesSliceValue, + merge: mergeBytesListValue, +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go new file mode 100644 index 000000000..44885a761 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -0,0 +1,389 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "errors" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/genid" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapInfo struct { + goType reflect.Type + keyWiretag uint64 + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs + keyZero pref.Value + keyKind pref.Kind + conv *mapConverter +} + +func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() + keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) + valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) + keyFuncs := encoderFuncsForValue(keyField) + valFuncs := encoderFuncsForValue(valField) + conv := newMapConverter(ft, fd) + + mapi := &mapInfo{ + goType: ft, + keyWiretag: keyWiretag, + valWiretag: valWiretag, + keyFuncs: keyFuncs, + valFuncs: valFuncs, + keyZero: keyField.Default(), + keyKind: keyField.Kind(), + conv: conv, + } + if valField.Kind() == pref.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft) + if mp.Elem().IsNil() { + mp.Elem().Set(reflect.MakeMap(mapi.goType)) + } + if f.mi == nil { + return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) + } else { + return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) + } + }, + } + switch valField.Kind() { + case pref.MessageKind: + funcs.merge = mergeMapOfMessage + case pref.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap + } + if valFuncs.isInit != nil { + funcs.isInit = func(p pointer, f *coderFieldInfo) error { + return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) + } + } + return valueMessage, funcs +} + +const ( + mapKeyTagSize = 1 // field 1, tag size 1. + mapValTagSize = 1 // field 2, tag size 2. +) + +func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { + if mapv.Len() == 0 { + return 0 + } + n := 0 + iter := mapRange(mapv) + for iter.Next() { + key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() + keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + var valSize int + value := mapi.conv.valConv.PBValueOf(iter.Value()) + if f.mi == nil { + valSize = mapi.valFuncs.size(value, mapValTagSize, opts) + } else { + p := pointerOfValue(iter.Value()) + valSize += mapValTagSize + valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) + } + n += f.tagsize + protowire.SizeBytes(keySize+valSize) + } + return n +} + +func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var ( + key = mapi.keyZero + val = mapi.conv.valConv.New() + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return out, errors.New("invalid field number") + } + b = b[n:] + err := errUnknown + switch num { + case genid.MapEntry_Key_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case genid.MapEntry_Value_field_number: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { + break + } + val = v + n = o.n + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) + out.n = n + return out, nil +} + +func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var ( + key = mapi.keyZero + val = reflect.New(f.mi.GoReflectType.Elem()) + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return out, errors.New("invalid field number") + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + if wtyp != protowire.BytesType { + break + } + var v []byte + v, n = protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var o unmarshalOutput + o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) + if o.initialized { + // Consider this map item initialized so long as we see + // an initialized value. + out.initialized = true + } + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) + out.n = n + return out, nil +} + +func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if f.mi == nil { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := mapi.conv.valConv.PBValueOf(valrv) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapi.valFuncs.size(val, mapValTagSize, opts) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + } else { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := pointerOfValue(valrv) + valSize := f.mi.sizePointer(val, opts) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapValTagSize + protowire.SizeBytes(valSize) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b = protowire.AppendVarint(b, mapi.valWiretag) + b = protowire.AppendVarint(b, uint64(valSize)) + return f.mi.marshalAppendPointer(b, val, opts) + } +} + +func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if mapv.Len() == 0 { + return b, nil + } + if opts.Deterministic() { + return appendMapDeterministic(b, mapv, mapi, f, opts) + } + iter := mapRange(mapv) + for iter.Next() { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + keys := mapv.MapKeys() + sort.Slice(keys, func(i, j int) bool { + switch keys[i].Kind() { + case reflect.Bool: + return !keys[i].Bool() && keys[j].Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return keys[i].Int() < keys[j].Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return keys[i].Uint() < keys[j].Uint() + case reflect.Float32, reflect.Float64: + return keys[i].Float() < keys[j].Float() + case reflect.String: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keys[i].Kind().String()) + } + }) + for _, key := range keys { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { + if mi := f.mi; mi != nil { + mi.init() + if !mi.needsInitCheck { + return nil + } + iter := mapRange(mapv) + for iter.Next() { + val := pointerOfValue(iter.Value()) + if err := mi.checkInitializedPointer(val); err != nil { + return err + } + } + } else { + iter := mapRange(mapv) + for iter.Next() { + val := mapi.conv.valConv.PBValueOf(iter.Value()) + if err := mapi.valFuncs.isInit(val); err != nil { + return err + } + } + } + return nil +} + +func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), iter.Value()) + } +} + +func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) + } +} + +func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + val := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) + } else { + opts.Merge(asMessage(val), asMessage(iter.Value())) + } + dstm.SetMapIndex(iter.Key(), val) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go new file mode 100644 index 000000000..2706bb67f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package impl + +import "reflect" + +type mapIter struct { + v reflect.Value + keys []reflect.Value +} + +// mapRange provides a less-efficient equivalent to +// the Go 1.12 reflect.Value.MapRange method. +func mapRange(v reflect.Value) *mapIter { + return &mapIter{v: v} +} + +func (i *mapIter) Next() bool { + if i.keys == nil { + i.keys = i.v.MapKeys() + } else { + i.keys = i.keys[1:] + } + return len(i.keys) > 0 +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[0] +} + +func (i *mapIter) Value() reflect.Value { + return i.v.MapIndex(i.keys[0]) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go new file mode 100644 index 000000000..1533ef600 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package impl + +import "reflect" + +func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go new file mode 100644 index 000000000..0e176d565 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -0,0 +1,159 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/fieldsort" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// coderMessageInfo contains per-message information used by the fast-path functions. +// This is a different type from MessageInfo to keep MessageInfo as general-purpose as +// possible. +type coderMessageInfo struct { + methods piface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo + coderFields map[protowire.Number]*coderFieldInfo + sizecacheOffset offset + unknownOffset offset + extensionOffset offset + needsInitCheck bool + isMessageSet bool + numRequiredFields uint8 +} + +type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type + validation validationInfo // information used by message validation + num pref.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required +} + +func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.extensionOffset = si.extensionOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + preallocFields := make([]coderFieldInfo, fields.Len()) + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case isOneof: + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &preallocFields[i] + *cf = coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), + isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == pref.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense pref.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) >= len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return fieldsort.Less(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go new file mode 100644 index 000000000..cfb68e12f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -0,0 +1,120 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" +) + +func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { + if !flags.ProtoLegacy { + return 0 + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + for _, x := range ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + num, _ := protowire.DecodeTag(xi.wiretag) + size += messageset.SizeField(num) + size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) + } + + unknown := *p.Apply(mi.unknownOffset).Bytes() + size += messageset.SizeUnknown(unknown) + + return size +} + +func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + switch len(ext) { + case 0: + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + for _, x := range ext { + var err error + b, err = marshalMessageSetField(mi, b, x, opts) + if err != nil { + return b, err + } + } + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(ext)) + for k := range ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + var err error + b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) + if err != nil { + return b, err + } + } + } + + unknown := *p.Apply(mi.unknownOffset).Bytes() + b, err := messageset.AppendUnknown(b, unknown) + if err != nil { + return b, err + } + + return b, nil +} + +func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { + xi := getExtensionFieldInfo(x.Type()) + num, _ := protowire.DecodeTag(xi.wiretag) + b = messageset.AppendFieldStart(b, num) + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { + if !flags.ProtoLegacy { + return out, errors.New("no support for message_set_wire_format") + } + + ep := p.Apply(mi.extensionOffset).Extensions() + if *ep == nil { + *ep = make(map[int32]ExtensionField) + } + ext := *ep + unknown := p.Apply(mi.unknownOffset).Bytes() + initialized := true + err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { + o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) + if err == errUnknown { + *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) + *unknown = append(*unknown, v...) + return nil + } + if !o.initialized { + initialized = false + } + return err + }) + out.n = len(b) + out.initialized = initialized + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go new file mode 100644 index 000000000..86f7dc3c9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := p.v.Elem().Int() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := p.v.Elem().Int() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + p.v.Elem().SetInt(int64(v)) + out.n = n + return out, nil +} + +func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(src.v.Elem()) +} + +var coderEnum = pointerCoderFuncs{ + size: sizeEnum, + marshal: appendEnum, + unmarshal: consumeEnum, + merge: mergeEnum, +} + +func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + if p.v.Elem().Int() == 0 { + return 0 + } + return sizeEnum(p, f, opts) +} + +func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if p.v.Elem().Int() == 0 { + return b, nil + } + return appendEnum(b, p, f, opts) +} + +func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if src.v.Elem().Int() != 0 { + dst.v.Elem().Set(src.v.Elem()) + } +} + +var coderEnumNoZero = pointerCoderFuncs{ + size: sizeEnumNoZero, + marshal: appendEnumNoZero, + unmarshal: consumeEnum, + merge: mergeEnumNoZero, +} + +func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return sizeEnum(pointer{p.v.Elem()}, f, opts) +} + +func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendEnum(b, pointer{p.v.Elem()}, f, opts) +} + +func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + if p.v.Elem().IsNil() { + p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) + } + return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) +} + +func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if !src.v.Elem().IsNil() { + v := reflect.New(dst.v.Type().Elem().Elem()) + v.Elem().Set(src.v.Elem().Elem()) + dst.v.Elem().Set(v) + } +} + +var coderEnumPtr = pointerCoderFuncs{ + size: sizeEnumPtr, + marshal: appendEnumPtr, + unmarshal: consumeEnumPtr, + merge: mergeEnumPtr, +} + +func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize + } + return size +} + +func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + s := p.v.Elem() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + b = b[n:] + } + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + out.n = n + return out, nil +} + +func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) +} + +var coderEnumSlice = pointerCoderFuncs{ + size: sizeEnumSlice, + marshal: appendEnumSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} + +func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return 0 + } + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + return f.tagsize + protowire.SizeBytes(n) +} + +func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +var coderEnumPackedSlice = pointerCoderFuncs{ + size: sizeEnumPackedSlice, + marshal: appendEnumPackedSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go new file mode 100644 index 000000000..e89971238 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -0,0 +1,557 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// pointerCoderFuncs is a set of pointer encoding functions. +type pointerCoderFuncs struct { + mi *MessageInfo + size func(p pointer, f *coderFieldInfo, opts marshalOptions) int + marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) + isInit func(p pointer, f *coderFieldInfo) error + merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) +} + +// valueCoderFuncs is a set of protoreflect.Value encoding functions. +type valueCoderFuncs struct { + size func(v pref.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) + isInit func(v pref.Value) error + merge func(dst, src pref.Value, opts mergeOptions) pref.Value +} + +// fieldCoder returns pointer functions for a field, used for operating on +// struct fields. +func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesSliceValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) + case pref.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types + // (Varint, Fixed32, or Fixed64 wire type) can be packed. + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } + case fd.Kind() == pref.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) + case fd.Kind() == pref.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) + case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesNoZeroValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + } + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + } + } + panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) +} + +// encoderFuncsForValue returns value functions for a field, used for +// extension values and map encoding. +func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { + switch { + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolSliceValue + case pref.EnumKind: + return coderEnumSliceValue + case pref.Int32Kind: + return coderInt32SliceValue + case pref.Sint32Kind: + return coderSint32SliceValue + case pref.Uint32Kind: + return coderUint32SliceValue + case pref.Int64Kind: + return coderInt64SliceValue + case pref.Sint64Kind: + return coderSint64SliceValue + case pref.Uint64Kind: + return coderUint64SliceValue + case pref.Sfixed32Kind: + return coderSfixed32SliceValue + case pref.Fixed32Kind: + return coderFixed32SliceValue + case pref.FloatKind: + return coderFloatSliceValue + case pref.Sfixed64Kind: + return coderSfixed64SliceValue + case pref.Fixed64Kind: + return coderFixed64SliceValue + case pref.DoubleKind: + return coderDoubleSliceValue + case pref.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue + case pref.BytesKind: + return coderBytesSliceValue + case pref.MessageKind: + return coderMessageSliceValue + case pref.GroupKind: + return coderGroupSliceValue + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolPackedSliceValue + case pref.EnumKind: + return coderEnumPackedSliceValue + case pref.Int32Kind: + return coderInt32PackedSliceValue + case pref.Sint32Kind: + return coderSint32PackedSliceValue + case pref.Uint32Kind: + return coderUint32PackedSliceValue + case pref.Int64Kind: + return coderInt64PackedSliceValue + case pref.Sint64Kind: + return coderSint64PackedSliceValue + case pref.Uint64Kind: + return coderUint64PackedSliceValue + case pref.Sfixed32Kind: + return coderSfixed32PackedSliceValue + case pref.Fixed32Kind: + return coderFixed32PackedSliceValue + case pref.FloatKind: + return coderFloatPackedSliceValue + case pref.Sfixed64Kind: + return coderSfixed64PackedSliceValue + case pref.Fixed64Kind: + return coderFixed64PackedSliceValue + case pref.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: + case pref.BoolKind: + return coderBoolValue + case pref.EnumKind: + return coderEnumValue + case pref.Int32Kind: + return coderInt32Value + case pref.Sint32Kind: + return coderSint32Value + case pref.Uint32Kind: + return coderUint32Value + case pref.Int64Kind: + return coderInt64Value + case pref.Sint64Kind: + return coderSint64Value + case pref.Uint64Kind: + return coderUint64Value + case pref.Sfixed32Kind: + return coderSfixed32Value + case pref.Fixed32Kind: + return coderFixed32Value + case pref.FloatKind: + return coderFloatValue + case pref.Sfixed64Kind: + return coderSfixed64Value + case pref.Fixed64Kind: + return coderFixed64Value + case pref.DoubleKind: + return coderDoubleValue + case pref.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue + case pref.BytesKind: + return coderBytesValue + case pref.MessageKind: + return coderMessageValue + case pref.GroupKind: + return coderGroupValue + } + } + panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go new file mode 100644 index 000000000..e118af1e2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +// When using unsafe pointers, we can just treat enum values as int32s. + +var ( + coderEnumNoZero = coderInt32NoZero + coderEnum = coderInt32 + coderEnumPtr = coderInt32Ptr + coderEnumSlice = coderInt32Slice + coderEnumPackedSlice = coderInt32PackedSlice +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go new file mode 100644 index 000000000..36a90dff3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -0,0 +1,467 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// unwrapper unwraps the value to the underlying value. +// This is implemented by List and Map. +type unwrapper interface { + protoUnwrap() interface{} +} + +// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. +type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. + PBValueOf(reflect.Value) pref.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. + GoValueOf(pref.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. + IsValidPB(pref.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool + + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. + New() pref.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. + Zero() pref.Value +} + +// NewConverter matches a Go type with a protobuf field and returns a Converter +// that converts between the two. Enums must be a named int32 kind that +// implements protoreflect.Enum, and messages must be pointer to a named +// struct type that implements protoreflect.ProtoMessage. +// +// This matcher deliberately supports a wider range of Go types than what +// protoc-gen-go historically generated to be able to automatically wrap some +// v1 messages generated by other forks of protoc-gen-go. +func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) + case fd.IsMap(): + return newMapConverter(t, fd) + default: + return newSingularConverter(t, fd) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +var ( + boolType = reflect.TypeOf(bool(false)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf(string("")) + bytesType = reflect.TypeOf([]byte(nil)) + byteType = reflect.TypeOf(byte(0)) +) + +var ( + boolZero = pref.ValueOfBool(false) + int32Zero = pref.ValueOfInt32(0) + int64Zero = pref.ValueOfInt64(0) + uint32Zero = pref.ValueOfUint32(0) + uint64Zero = pref.ValueOfUint64(0) + float32Zero = pref.ValueOfFloat32(0) + float64Zero = pref.ValueOfFloat64(0) + stringZero = pref.ValueOfString("") + bytesZero = pref.ValueOfBytes(nil) +) + +func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { + if fd.Cardinality() == pref.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { + case pref.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } + case pref.Uint32Kind, pref.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } + case pref.Uint64Kind, pref.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } + case pref.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } + case pref.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } + case pref.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } + case pref.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } + case pref.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } + case pref.MessageKind, pref.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type boolConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfBool(v.Bool()) +} +func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) +} +func (c *boolConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(bool) + return ok +} +func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *boolConverter) New() pref.Value { return c.def } +func (c *boolConverter) Zero() pref.Value { return c.def } + +type int32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt32(int32(v.Int())) +} +func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) +} +func (c *int32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int32) + return ok +} +func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int32Converter) New() pref.Value { return c.def } +func (c *int32Converter) Zero() pref.Value { return c.def } + +type int64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt64(int64(v.Int())) +} +func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) +} +func (c *int64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int64) + return ok +} +func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int64Converter) New() pref.Value { return c.def } +func (c *int64Converter) Zero() pref.Value { return c.def } + +type uint32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint32(uint32(v.Uint())) +} +func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) +} +func (c *uint32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint32) + return ok +} +func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint32Converter) New() pref.Value { return c.def } +func (c *uint32Converter) Zero() pref.Value { return c.def } + +type uint64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint64(uint64(v.Uint())) +} +func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) +} +func (c *uint64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint64) + return ok +} +func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint64Converter) New() pref.Value { return c.def } +func (c *uint64Converter) Zero() pref.Value { return c.def } + +type float32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat32(float32(v.Float())) +} +func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) +} +func (c *float32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float32) + return ok +} +func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float32Converter) New() pref.Value { return c.def } +func (c *float32Converter) Zero() pref.Value { return c.def } + +type float64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat64(float64(v.Float())) +} +func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) +} +func (c *float64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float64) + return ok +} +func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float64Converter) New() pref.Value { return c.def } +func (c *float64Converter) Zero() pref.Value { return c.def } + +type stringConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfString(v.Convert(stringType).String()) +} +func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) + if c.goType.Kind() == reflect.Slice && s == "" { + return reflect.Zero(c.goType) // ensure empty string is []byte(nil) + } + return reflect.ValueOf(s).Convert(c.goType) +} +func (c *stringConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(string) + return ok +} +func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *stringConverter) New() pref.Value { return c.def } +func (c *stringConverter) Zero() pref.Value { return c.def } + +type bytesConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { + return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } + return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) +} +func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) +} +func (c *bytesConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().([]byte) + return ok +} +func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *bytesConverter) New() pref.Value { return c.def } +func (c *bytesConverter) Zero() pref.Value { return c.def } + +type enumConverter struct { + goType reflect.Type + def pref.Value +} + +func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { + var def pref.Value + if fd.Cardinality() == pref.Repeated { + def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} +} + +func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfEnum(pref.EnumNumber(v.Int())) +} + +func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) +} + +func (c *enumConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(pref.EnumNumber) + return ok +} + +func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *enumConverter) New() pref.Value { + return c.def +} + +func (c *enumConverter) Zero() pref.Value { + return c.def +} + +type messageConverter struct { + goType reflect.Type +} + +func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} +} + +func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return pref.ValueOfMessage(m.ProtoReflect()) + } + return pref.ValueOfMessage(legacyWrapMessage(v)) +} + +func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if rv.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) + } + return rv +} + +func (c *messageConverter) IsValidPB(v pref.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + return rv.Type() == c.goType +} + +func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *messageConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *messageConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go new file mode 100644 index 000000000..6fccab520 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} + case t.Kind() == reflect.Slice: + return &listConverter{t, newSingularConverter(t.Elem(), fd)} + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type listConverter struct { + goType reflect.Type // []T + c Converter +} + +func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) + return pref.ValueOfList(&listReflect{pv, c.c}) +} + +func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) + } + return rv.Elem() +} + +func (c *listConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type().Elem() == c.goType +} + +func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listConverter) New() pref.Value { + return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +} + +func (c *listConverter) Zero() pref.Value { + return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +} + +type listPtrConverter struct { + goType reflect.Type // *[]T + c Converter +} + +func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfList(&listReflect{v, c.c}) +} + +func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { + return v.List().(*listReflect).v +} + +func (c *listPtrConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type() == c.goType +} + +func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listPtrConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *listPtrConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type listReflect struct { + v reflect.Value // *[]T + conv Converter +} + +func (ls *listReflect) Len() int { + if ls.v.IsNil() { + return 0 + } + return ls.v.Elem().Len() +} +func (ls *listReflect) Get(i int) pref.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) +} +func (ls *listReflect) Set(i int, v pref.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) +} +func (ls *listReflect) Append(v pref.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) +} +func (ls *listReflect) AppendMutable() pref.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } + v := ls.NewElement() + ls.Append(v) + return v +} +func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) +} +func (ls *listReflect) NewElement() pref.Value { + return ls.conv.New() +} +func (ls *listReflect) IsValid() bool { + return !ls.v.IsNil() +} +func (ls *listReflect) protoUnwrap() interface{} { + return ls.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go new file mode 100644 index 000000000..de06b2593 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapConverter struct { + goType reflect.Type // map[K]V + keyConv, valConv Converter +} + +func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + return &mapConverter{ + goType: t, + keyConv: newSingularConverter(t.Key(), fd.MapKey()), + valConv: newSingularConverter(t.Elem(), fd.MapValue()), + } +} + +func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) +} + +func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { + return v.Map().(*mapReflect).v +} + +func (c *mapConverter) IsValidPB(v pref.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false + } + return mapv.v.Type() == c.goType +} + +func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *mapConverter) New() pref.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) +} + +func (c *mapConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type mapReflect struct { + v reflect.Value // map[K]V + keyConv Converter + valConv Converter +} + +func (ms *mapReflect) Len() int { + return ms.v.Len() +} +func (ms *mapReflect) Has(k pref.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() +} +func (ms *mapReflect) Get(k pref.MapKey) pref.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { + return pref.Value{} + } + return ms.valConv.PBValueOf(rv) +} +func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) +} +func (ms *mapReflect) Clear(k pref.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) +} +func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } + v := ms.Get(k) + if !v.IsValid() { + v = ms.NewValue() + ms.Set(k, v) + } + return v +} +func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() + v := ms.valConv.PBValueOf(iter.Value()) + if !f(k, v) { + return + } + } +} +func (ms *mapReflect) NewValue() pref.Value { + return ms.valConv.New() +} +func (ms *mapReflect) IsValid() bool { + return !ms.v.IsNil() +} +func (ms *mapReflect) protoUnwrap() interface{} { + return ms.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go new file mode 100644 index 000000000..85ba1d3b3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -0,0 +1,274 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math/bits" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type unmarshalOptions struct { + flags protoiface.UnmarshalInputFlags + resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +func (o unmarshalOptions) Options() proto.UnmarshalOptions { + return proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + DiscardUnknown: o.DiscardUnknown(), + Resolver: o.resolver, + } +} + +func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } + +func (o unmarshalOptions) IsDefault() bool { + return o.flags == 0 && o.resolver == preg.GlobalTypes +} + +var lazyUnmarshalOptions = unmarshalOptions{ + resolver: preg.GlobalTypes, +} + +type unmarshalOutput struct { + n int // number of bytes consumed + initialized bool +} + +// unmarshal is protoreflect.Methods.Unmarshal. +func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + var flags piface.UnmarshalOutputFlags + if out.initialized { + flags |= piface.UnmarshalInitialized + } + return piface.UnmarshalOutput{ + Flags: flags, + }, err +} + +// errUnknown is returned during unmarshaling to indicate a parse error that +// should result in a field being placed in the unknown fields section (for example, +// when the wire type doesn't match) as opposed to the entire unmarshal operation +// failing (for example, when a field extends past the available input). +// +// This is a sentinel error which should never be visible to the user. +var errUnknown = errors.New("unknown") + +func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + mi.init() + if flags.ProtoLegacy && mi.isMessageSet { + return unmarshalMessageSet(mi, b, p, opts) + } + initialized := true + var requiredMask uint64 + var exts *map[int32]ExtensionField + start := len(b) + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := p.Apply(mi.unknownOffset).Bytes() + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} + +func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { + x := exts[int32(num)] + xt := x.Type() + if xt == nil { + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { + if err == preg.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) + } + } + xi := getExtensionFieldInfo(xt) + if xi.funcs.unmarshal == nil { + return out, errUnknown + } + if flags.LazyUnmarshalExtensions { + if opts.IsDefault() && x.canLazy(xt) { + out, valid := skipExtension(b, xi, num, wtyp, opts) + switch valid { + case ValidationValid: + if out.initialized { + x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) + exts[int32(num)] = x + return out, nil + } + case ValidationInvalid: + return out, errors.New("invalid wire format") + case ValidationUnknown: + } + } + } + ival := x.Value() + if !ival.IsValid() && xi.unmarshalNeedsValue { + // Create a new message, list, or map value to fill in. + // For enums, create a prototype value to let the unmarshal func know the + // concrete type. + ival = xt.New() + } + v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) + if err != nil { + return out, err + } + if xi.funcs.isInit == nil { + out.initialized = true + } + x.Set(xt, v) + exts[int32(num)] = x + return out, nil +} + +func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + if xi.validation.mi == nil { + return out, ValidationUnknown + } + xi.validation.mi.init() + switch xi.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(b, num, opts) + return out, st + default: + return out, ValidationUnknown + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go new file mode 100644 index 000000000..8c8a794c6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -0,0 +1,199 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/internal/flags" + proto "google.golang.org/protobuf/proto" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type marshalOptions struct { + flags piface.MarshalInputFlags +} + +func (o marshalOptions) Options() proto.MarshalOptions { + return proto.MarshalOptions{ + AllowPartial: true, + Deterministic: o.Deterministic(), + UseCachedSize: o.UseCachedSize(), + } +} + +func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } +func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } + +// size is protoreflect.Methods.Size. +func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + size := mi.sizePointer(p, marshalOptions{ + flags: in.Flags, + }) + return piface.SizeOutput{Size: size} +} + +func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { + mi.init() + if p.IsNil() { + return 0 + } + if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { + return int(size) + } + } + return mi.sizePointerSlow(p, opts) +} + +func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { + if flags.ProtoLegacy && mi.isMessageSet { + size = sizeMessageSet(mi, p, opts) + if mi.sizecacheOffset.IsValid() { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + return size + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + size += mi.sizeExtensions(e, opts) + } + for _, f := range mi.orderedCoderFields { + if f.funcs.size == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + size += f.funcs.size(fptr, f, opts) + } + if mi.unknownOffset.IsValid() { + u := *p.Apply(mi.unknownOffset).Bytes() + size += len(u) + } + if mi.sizecacheOffset.IsValid() { + if size > math.MaxInt32 { + // The size is too large for the int32 sizecache field. + // We will need to recompute the size when encoding; + // unfortunately expensive, but better than invalid output. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + } else { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + } + return size +} + +// marshal is protoreflect.Methods.Marshal. +func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ + flags: in.Flags, + }) + return piface.MarshalOutput{Buf: b}, err +} + +func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { + mi.init() + if p.IsNil() { + return b, nil + } + if flags.ProtoLegacy && mi.isMessageSet { + return marshalMessageSet(mi, b, p, opts) + } + var err error + // The old marshaler encodes extensions at beginning. + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + // TODO: Special handling for MessageSet? + b, err = mi.appendExtensions(b, e, opts) + if err != nil { + return b, err + } + } + for _, f := range mi.orderedCoderFields { + if f.funcs.marshal == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + } + if mi.unknownOffset.IsValid() && !mi.isMessageSet { + u := *p.Apply(mi.unknownOffset).Bytes() + b = append(b, u...) + } + return b, nil +} + +func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { + if ext == nil { + return 0 + } + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + n += xi.funcs.size(x.Value(), xi.tagsize, opts) + } + return n +} + +func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { + if ext == nil { + return b, nil + } + + switch len(*ext) { + case 0: + return b, nil + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + var err error + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + } + return b, err + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(*ext)) + for k := range *ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + var err error + for _, k := range keys { + x := (*ext)[int32(k)] + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + if err != nil { + return b, err + } + } + return b, nil + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go new file mode 100644 index 000000000..8c1eab4bf --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type EnumInfo struct { + GoReflectType reflect.Type // int32 kind + Desc pref.EnumDescriptor +} + +func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +} +func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go new file mode 100644 index 000000000..e904fd993 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sync" + "sync/atomic" + + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ExtensionInfo implements ExtensionType. +// +// This type contains a number of exported fields for legacy compatibility. +// The only non-deprecated use of this type is through the methods of the +// ExtensionType interface. +type ExtensionInfo struct { + // An ExtensionInfo may exist in several stages of initialization. + // + // extensionInfoUninitialized: Some or all of the legacy exported + // fields may be set, but none of the unexported fields have been + // initialized. This is the starting state for an ExtensionInfo + // in legacy generated code. + // + // extensionInfoDescInit: The desc field is set, but other unexported fields + // may not be initialized. Legacy exported fields may or may not be set. + // This is the starting state for an ExtensionInfo in newly generated code. + // + // extensionInfoFullInit: The ExtensionInfo is fully initialized. + // This state is only entered after lazy initialization is complete. + init uint32 + mu sync.Mutex + + goType reflect.Type + desc extensionTypeDescriptor + conv Converter + info *extensionFieldInfo // for fast-path method implementations + + // ExtendedType is a typed nil-pointer to the parent message type that + // is being extended. It is possible for this to be unpopulated in v2 + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. + ExtendedType piface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // + // For historical reasons, reflect.TypeOf(ExtensionType) and the + // type returned by InterfaceOf may not be identical. + // + // Deprecated: Use InterfaceOf(xt.Zero()) instead. + ExtensionType interface{} + + // Field is the field number of the extension. + // + // Deprecated: Use the Descriptor().Number method instead. + Field int32 + + // Name is the fully qualified name of extension. + // + // Deprecated: Use the Descriptor().FullName method instead. + Name string + + // Tag is the protobuf struct tag used in the v1 API. + // + // Deprecated: Do not use. + Tag string + + // Filename is the proto filename in which the extension is defined. + // + // Deprecated: Use Descriptor().ParentFile().Path() instead. + Filename string +} + +// Stages of initialization: See the ExtensionInfo.init field. +const ( + extensionInfoUninitialized = 0 + extensionInfoDescInit = 1 + extensionInfoFullInit = 2 +) + +func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit +} + +func (xi *ExtensionInfo) New() pref.Value { + return xi.lazyInit().New() +} +func (xi *ExtensionInfo) Zero() pref.Value { + return xi.lazyInit().Zero() +} +func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { + return xi.lazyInit().GoValueOf(v).Interface() +} +func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { + return xi.lazyInit().IsValidPB(v) +} +func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } + return &xi.desc +} + +func (xi *ExtensionInfo) lazyInit() Converter { + if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { + xi.lazyInitSlow() + } + return xi.conv +} + +func (xi *ExtensionInfo) lazyInitSlow() { + xi.mu.Lock() + defer xi.mu.Unlock() + + if xi.init == extensionInfoFullInit { + return + } + defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) + + if xi.desc.ExtensionDescriptor == nil { + xi.initFromLegacy() + } + if !xi.desc.ExtensionDescriptor.IsPlaceholder() { + if xi.ExtensionType == nil { + xi.initToLegacy() + } + xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) + xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) + xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) + } +} + +type extensionTypeDescriptor struct { + pref.ExtensionDescriptor + xi *ExtensionInfo +} + +func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { + return xtd.xi +} +func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { + return xtd.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go new file mode 100644 index 000000000..f7d7ffb51 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -0,0 +1,219 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// legacyEnumName returns the name of enums used in legacy code. +// It is neither the protobuf full name nor the qualified Go name, +// but rather an odd hybrid of both. +func legacyEnumName(ed pref.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { + protoPkg = string(fd.Package()) + enumName = strings.TrimPrefix(enumName, protoPkg+".") + } + if protoPkg == "" { + return strs.GoCamelCase(enumName) + } + return protoPkg + "." + strs.GoCamelCase(enumName) +} + +// legacyWrapEnum wraps v as a protoreflect.Enum, +// where v must be a int32 kind and not implement the v2 API already. +func legacyWrapEnum(v reflect.Value) pref.Enum { + et := legacyLoadEnumType(v.Type()) + return et.New(pref.EnumNumber(v.Int())) +} + +var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + +// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, +// where t must be an int32 kind and not implement the v2 API already. +func legacyLoadEnumType(t reflect.Type) pref.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { + return et.(pref.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. + var et pref.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { + return et.(pref.EnumType) + } + return et +} + +type legacyEnumType struct { + desc pref.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum +} + +func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { + if e, ok := t.m.Load(n); ok { + return e.(pref.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e +} +func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { + return t.desc +} + +type legacyEnumWrapper struct { + num pref.EnumNumber + pbTyp pref.EnumType + goTyp reflect.Type +} + +func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { + return e.pbTyp.Descriptor() +} +func (e *legacyEnumWrapper) Type() pref.EnumType { + return e.pbTyp +} +func (e *legacyEnumWrapper) Number() pref.EnumNumber { + return e.num +} +func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { + return e +} +func (e *legacyEnumWrapper) protoUnwrap() interface{} { + v := reflect.New(e.goTyp).Elem() + v.SetInt(int64(e.num)) + return v.Interface() +} + +var ( + _ pref.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) +) + +var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must be an int32 kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() + if _, ok := ev.(pref.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) + if !ok { + return aberrantLoadEnumDesc(t) + } + b, idxs := edV1.EnumDescriptor() + + var ed pref.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1 : len(idxs)-1] { + md = md.Messages().Get(i) + } + ed = md.Enums().Get(idxs[len(idxs)-1]) + } + if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must not implement protoreflect.Enum or enumV1. +// +// If the type does not implement enumV1, then there is no reliable +// way to derive the original protobuf type information. +// We are unable to use the global enum registry since it is +// unfortunately keyed by the protobuf full name, which we also do not know. +// Thus, this produces some bogus enum descriptor based on the Go type name. +func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. + ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} + ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum + ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) + + // TODO: Use the presence of a UnmarshalJSON method to determine proto2? + + vd := &ed.L2.Values.List[0] + vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN + vd.L0.ParentFile = ed.L0.ParentFile + vd.L0.Parent = ed + + // TODO: We could use the String method to obtain some enum value names by + // starting at 0 and print the enum until it produces invalid identifiers. + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(pref.EnumDescriptor) + } + return ed +} + +// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type +// The provided name is not guaranteed to be stable nor universally unique. +// It should be sufficiently unique within a program. +// +// This is exported for testing purposes. +func AberrantDeriveFullName(t reflect.Type) pref.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': + return '.' + case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': + return r + default: + return '_' + } + } + prefix := strings.Map(sanitize, t.PkgPath()) + suffix := strings.Map(sanitize, t.Name()) + if suffix == "" { + suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) + } + + ss := append(strings.Split(prefix, "."), suffix) + for i, s := range ss { + if s == "" || ('0' <= s[0] && s[0] <= '9') { + ss[i] = "x" + s + } + } + return pref.FullName(strings.Join(ss, ".")) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go new file mode 100644 index 000000000..c3d741c2f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "encoding/binary" + "encoding/json" + "hash/crc32" + "math" + "reflect" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// These functions exist to support exported APIs in generated protobufs. +// While these are deprecated, they cannot be removed for compatibility reasons. + +// LegacyEnumName returns the name of enums used in legacy code. +func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { + return legacyEnumName(ed) +} + +// LegacyMessageTypeOf returns the protoreflect.MessageType for m, +// with name used as the message name if necessary. +func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageInfo(reflect.TypeOf(m), name) +} + +// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. +// The input can either be a string representing the enum value by name, +// or a number representing the enum number itself. +func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { + if b[0] == '"' { + var name pref.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + ev := ed.Values().ByName(name) + if ev == nil { + return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) + } + return ev.Number(), nil + } else { + var num pref.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + return num, nil + } +} + +// CompressGZIP compresses the input as a GZIP-encoded file. +// The current implementation does no compression. +func (Export) CompressGZIP(in []byte) (out []byte) { + // RFC 1952, section 2.3.1. + var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} + + // RFC 1951, section 3.2.4. + var blockHeader [5]byte + const maxBlockSize = math.MaxUint16 + numBlocks := 1 + len(in)/maxBlockSize + + // RFC 1952, section 2.3.1. + var gzipFooter [8]byte + binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) + binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) + + // Encode the input without compression using raw DEFLATE blocks. + out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) + out = append(out, gzipHeader[:]...) + for blockHeader[0] == 0 { + blockSize := maxBlockSize + if blockSize > len(in) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) + binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] + } + out = append(out, gzipFooter[:]...) + return out +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go new file mode 100644 index 000000000..61757ce50 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -0,0 +1,175 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/messageset" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc + var parent piface.MessageV1 + messageName := xd.ContainingMessage().FullName() + if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) + if mv, ok := mv.(unwrapper); ok { + t = reflect.TypeOf(mv.protoUnwrap()) + } + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() + if mz, ok := mz.(piface.MessageV1); ok { + parent = mz + } + } + + // Determine the v1 extension type, which is unfortunately not the same as + // the v2 ExtensionType.GoType. + extType := xi.goType + switch extType.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields + } + + // Reconstruct the legacy enum full name. + var enumName string + if xd.Kind() == pref.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + + // Derive the proto file that the extension was declared within. + var filename string + if fd := xd.ParentFile(); fd != nil { + filename = fd.Path() + } + + // For MessageSet extensions, the name used is the parent message. + name := xd.FullName() + if messageset.IsMessageSetExtension(xd) { + name = name.Parent() + } + + xi.ExtendedType = parent + xi.ExtensionType = reflect.Zero(extType).Interface() + xi.Field = int32(xd.Number()) + xi.Name = string(name) + xi.Tag = ptag.Marshal(xd, enumName) + xi.Filename = filename +} + +// initFromLegacy initializes an ExtensionInfo from +// the contents of the deprecated exported fields of the type. +func (xi *ExtensionInfo) initFromLegacy() { + // The v1 API returns "type incomplete" descriptors where only the + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ + name: pref.FullName(xi.Name), + number: pref.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. + var ed pref.EnumDescriptor + var md pref.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) + case pref.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. + var evs pref.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } + fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) + + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 + xd.L0.FullName = pref.FullName(xi.Name) + xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L2.IsPacked = fd.L1.IsPacked + xd.L2.Default = fd.L1.Default + xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) + xd.L2.Enum = ed + xd.L2.Message = md + + // Derive real extension field name for MessageSets. + if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { + xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) + } + + tt := reflect.TypeOf(xi.ExtensionType) + if isOptional { + tt = tt.Elem() + } + xi.goType = tt + xi.desc = extensionTypeDescriptor{xd, xi} +} + +type placeholderExtension struct { + name pref.FullName + number pref.FieldNumber +} + +func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } +func (x placeholderExtension) Parent() pref.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() pref.Syntax { return 0 } +func (x placeholderExtension) Name() pref.Name { return x.name.Name() } +func (x placeholderExtension) FullName() pref.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() pref.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } +func (x placeholderExtension) Kind() pref.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() pref.Value { return pref.Value{} } +func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } +func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go new file mode 100644 index 000000000..9ab091086 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 +// on February 25th, 2016 has had a method to get the raw descriptor. +// Types that were not generated by protoc-gen-go or were generated prior +// to that version are not supported. +// +// The []byte returned is the encoded form of a FileDescriptorProto message +// compressed using GZIP. The []int is the path from the top-level file +// to the specific message or enum declaration. +type ( + enumV1 interface { + EnumDescriptor() ([]byte, []int) + } + messageV1 interface { + Descriptor() ([]byte, []int) + } +) + +var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor + +// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. +// +// This assumes that b is immutable and that b does not refer to part of a +// concatenated series of GZIP files (which would require shenanigans that +// rely on the concatenation properties of both protobufs and GZIP). +// File descriptors generated by protoc-gen-go do not rely on that property. +func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { + // Fast-path: check whether we already have a cached file descriptor. + if fd, ok := legacyFileDescCache.Load(&b[0]); ok { + return fd.(protoreflect.FileDescriptor) + } + + // Slow-path: decompress and unmarshal the file descriptor proto. + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + panic(err) + } + b2, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + + fd := filedesc.Builder{ + RawDescriptor: b2, + FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry + }.Build().File + if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { + return fd.(protoreflect.FileDescriptor) + } + return fd +} + +type resolverOnly struct { + reg *protoregistry.Files +} + +func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return r.reg.FindFileByPath(path) +} +func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return r.reg.FindDescriptorByName(name) +} +func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go new file mode 100644 index 000000000..06c68e117 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -0,0 +1,502 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descopts" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// legacyWrapMessage wraps v as a protoreflect.Message, +// where v must be a *struct kind and not implement the v2 API already. +func legacyWrapMessage(v reflect.Value) pref.Message { + typ := v.Type() + if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} + } + mt := legacyLoadMessageInfo(typ, "") + return mt.MessageOf(v.Interface()) +} + +var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + +// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, +// where t must be a *struct kind and not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) + } + + // Slow-path: derive message descriptor and initialize MessageInfo. + mi := &MessageInfo{ + Desc: legacyLoadMessageDesc(t, name), + GoReflectType: t, + } + + v := reflect.Zero(t).Interface() + if _, ok := v.(legacyMarshaler); ok { + mi.methods.Marshal = legacyMarshal + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + mi.methods.Flags |= piface.SupportMarshalDeterministic + } + if _, ok := v.(legacyUnmarshaler); ok { + mi.methods.Unmarshal = legacyUnmarshal + } + if _, ok := v.(legacyMerger); ok { + mi.methods.Merge = legacyMerge + } + + if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { + return mi.(*MessageInfo) + } + return mi +} + +var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor + +// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must be a *struct kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { + return legacyLoadMessageDesc(t, "") +} +func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { + return mi.(pref.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() + if _, ok := mv.(pref.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) + if !ok { + return aberrantLoadMessageDesc(t, name) + } + + // If this is a dynamic message type where there isn't a 1-1 mapping between + // Go and protobuf types, calling the Descriptor method on the zero value of + // the message type isn't likely to work. If it panics, swallow the panic and + // continue as if the Descriptor method wasn't present. + b, idxs := func() ([]byte, []int) { + defer func() { + recover() + }() + return mdV1.Descriptor() + }() + if b == nil { + return aberrantLoadMessageDesc(t, name) + } + + // If the Go type has no fields, then this might be a proto3 empty message + // from before the size cache was added. If there are any fields, check to + // see that at least one of them looks like something we generated. + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) + } + } + + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1:] { + md = md.Messages().Get(i) + } + if name != "" && md.FullName() != name { + panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) + } + if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { + return md.(protoreflect.MessageDescriptor) + } + return md +} + +var ( + aberrantMessageDescLock sync.Mutex + aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor +) + +// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must not implement protoreflect.ProtoMessage or messageV1. +// +// This is a best-effort derivation of the message descriptor using the protobuf +// tags on the struct fields. +func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { + aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) + } + return aberrantLoadMessageDescReentrant(t, name) +} +func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md + } + + // Slow-path: construct a descriptor from the Go struct type (best-effort). + // Cache the MessageDescriptor early on so that we can resolve internal + // cyclic references. + md := &filedesc.Message{L2: new(filedesc.MessageL2)} + md.L0.FullName = aberrantDeriveMessageName(t, name) + md.L0.ParentFile = filedesc.SurrogateProto2 + aberrantMessageDescCache[t] = md + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return md + } + + // Try to determine if the message is using proto3 by checking scalars. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + switch f.Type.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + md.L0.ParentFile = filedesc.SurrogateProto3 + } + for _, s := range strings.Split(tag, ",") { + if s == "proto3" { + md.L0.ParentFile = filedesc.SurrogateProto3 + } + } + } + } + + // Obtain a list of oneof wrapper types. + var oneofWrappers []reflect.Type + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := t.MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } + } + } + } + } + + // Obtain a list of the extension ranges. + if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ + pref.FieldNumber(v.FieldByName("Start").Int()), + pref.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } + } + + // Derive the message fields by inspecting the struct fields. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + tagKey := f.Tag.Get("protobuf_key") + tagVal := f.Tag.Get("protobuf_val") + aberrantAppendField(md, f.Type, tag, tagKey, tagVal) + } + if tag := f.Tag.Get("protobuf_oneof"); tag != "" { + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] + od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L0.Parent = md + od.L0.Index = n + + for _, t := range oneofWrappers { + if t.Implements(f.Type) { + f := t.Elem().Field(0) + if tag := f.Tag.Get("protobuf"); tag != "" { + aberrantAppendField(md, f.Type, tag, "", "") + fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] + fd.L1.ContainingOneof = od + od.L1.Fields.List = append(od.L1.Fields.List, fd) + } + } + } + } + } + + return md +} + +func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { + name = pref.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { + return name + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return AberrantDeriveFullName(t) +} + +func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { + t := goType + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) + + // Append field descriptor to the message. + n := len(md.L2.Fields.List) + md.L2.Fields.List = append(md.L2.Fields.List, *fd) + fd = &md.L2.Fields.List[n] + fd.L0.FullName = md.FullName().Append(fd.Name()) + fd.L0.ParentFile = md.L0.ParentFile + fd.L0.Parent = md + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.HasPacked { + fd.L1.Options = func() pref.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) + } + if fd.L1.HasPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + } + return opts.Interface() + } + } + + // Populate Enum and Message. + if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } + if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { + case pref.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) + default: + if t.Kind() == reflect.Map { + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] + md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + + md2.L1.IsMapEntry = true + md2.L2.Options = func() pref.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() + } + + aberrantAppendField(md2, t.Key(), tagKey, "", "") + aberrantAppendField(md2, t.Elem(), tagVal, "", "") + + fd.L1.Message = md2 + break + } + fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") + } + } +} + +type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors +} + +func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +} + +// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +type legacyMarshaler interface { + Marshal() ([]byte, error) +} + +// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. +type legacyUnmarshaler interface { + Unmarshal([]byte) error +} + +// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. +type legacyMerger interface { + Merge(protoiface.MessageV1) +} + +var legacyProtoMethods = &piface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + Flags: piface.SupportMarshalDeterministic, +} + +func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { + return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } + return piface.MarshalOutput{ + Buf: out, + }, err +} + +func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { + return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) +} + +func legacyMerge(in piface.MergeInput) piface.MergeOutput { + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if !ok { + return piface.MergeOutput{} + } + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +// aberrantMessageType implements MessageType for all types other than pointer-to-struct. +type aberrantMessageType struct { + t reflect.Type +} + +func (mt aberrantMessageType) New() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) Zero() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t +} +func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) +} + +// aberrantMessage implements Message for all types other than pointer-to-struct. +// +// When the underlying type implements legacyMarshaler or legacyUnmarshaler, +// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is +// not much that can be done with values of this type. +type aberrantMessage struct { + v reflect.Value +} + +func (m aberrantMessage) ProtoReflect() pref.Message { + return m +} + +func (m aberrantMessage) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) +} +func (m aberrantMessage) Type() pref.MessageType { + return aberrantMessageType{m.v.Type()} +} +func (m aberrantMessage) New() pref.Message { + return aberrantMessage{reflect.Zero(m.v.Type())} +} +func (m aberrantMessage) Interface() pref.ProtoMessage { + return m +} +func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +} +func (m aberrantMessage) Has(pref.FieldDescriptor) bool { + panic("invalid field descriptor") +} +func (m aberrantMessage) Clear(pref.FieldDescriptor) { + panic("invalid field descriptor") +} +func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { + panic("invalid field descriptor") +} +func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { + panic("invalid oneof descriptor") +} +func (m aberrantMessage) GetUnknown() pref.RawFields { + return nil +} +func (m aberrantMessage) SetUnknown(pref.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. +} +func (m aberrantMessage) IsValid() bool { + // An invalid message is a read-only, empty message. Since we don't know anything + // about the alleged contents of this message, we can't say with confidence that + // it is invalid in this sense. Therefore, report it as valid. + return true +} +func (m aberrantMessage) ProtoMethods() *piface.Methods { + return legacyProtoMethods +} +func (m aberrantMessage) protoUnwrap() interface{} { + return m.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go new file mode 100644 index 000000000..cdc4267df --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type mergeOptions struct{} + +func (o mergeOptions) Merge(dst, src proto.Message) { + proto.Merge(dst, src) +} + +// merge is protoreflect.Methods.Merge. +func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { + return piface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { + return piface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + mi.init() + if dst.IsNil() { + panic(fmt.Sprintf("invalid value: merging into nil message")) + } + if src.IsNil() { + return + } + for _, f := range mi.orderedCoderFields { + if f.funcs.merge == nil { + continue + } + sfptr := src.Apply(f.offset) + if f.isPointer && sfptr.Elem().IsNil() { + continue + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + } + if mi.extensionOffset.IsValid() { + sext := src.Apply(mi.extensionOffset).Extensions() + dext := dst.Apply(mi.extensionOffset).Extensions() + if *dext == nil { + *dext = make(map[int32]ExtensionField) + } + for num, sx := range *sext { + xt := sx.Type() + xi := getExtensionFieldInfo(xt) + if xi.funcs.merge == nil { + continue + } + dx := (*dext)[num] + var dv pref.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } + if !dv.IsValid() && xi.unmarshalNeedsValue { + dv = xt.New() + } + dv = xi.funcs.merge(dv, sx.Value(), opts) + dx.Set(sx.Type(), dv) + (*dext)[num] = dx + } + } + if mi.unknownOffset.IsValid() { + du := dst.Apply(mi.unknownOffset).Bytes() + su := src.Apply(mi.unknownOffset).Bytes() + if len(*su) > 0 { + *du = append(*du, *su...) + } + } +} + +func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return src +} + +func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +} + +func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + dstl.Append(srcl.Get(i)) + } + return dst +} + +func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) + dstl.Append(pref.ValueOfBytes(db)) + } + return dst +} + +func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() + dstl.Append(pref.ValueOfMessage(dm)) + } + return dst +} + +func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst +} + +func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + if f.mi != nil { + if dst.Elem().IsNil() { + dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dst.Elem(), src.Elem(), opts) + } else { + dm := dst.AsValueOf(f.ft).Elem() + sm := src.AsValueOf(f.ft).Elem() + if dm.IsNil() { + dm.Set(reflect.New(f.ft.Elem())) + } + opts.Merge(asMessage(dm), asMessage(sm)) + } +} + +func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + for _, sp := range src.PointerSlice() { + dm := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(dm), sp, opts) + } else { + opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) + } + dst.AppendPointerSlice(pointerOfValue(dm)) + } +} + +func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) +} + +func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bytes() + if len(v) > 0 { + *dst.Bytes() = append(emptyBuf[:], v...) + } +} + +func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BytesSlice() + for _, v := range *src.BytesSlice() { + *ds = append(*ds, append(emptyBuf[:], v...)) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go new file mode 100644 index 000000000..8816c274d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import () + +func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bool() = *src.Bool() +} + +func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bool() + if v != false { + *dst.Bool() = v + } +} + +func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.BoolPtr() + if p != nil { + v := *p + *dst.BoolPtr() = &v + } +} + +func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BoolSlice() + ss := src.BoolSlice() + *ds = append(*ds, *ss...) +} + +func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int32() = *src.Int32() +} + +func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int32() + if v != 0 { + *dst.Int32() = v + } +} + +func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int32Ptr() + if p != nil { + v := *p + *dst.Int32Ptr() = &v + } +} + +func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int32Slice() + ss := src.Int32Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint32() = *src.Uint32() +} + +func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint32() + if v != 0 { + *dst.Uint32() = v + } +} + +func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint32Ptr() + if p != nil { + v := *p + *dst.Uint32Ptr() = &v + } +} + +func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint32Slice() + ss := src.Uint32Slice() + *ds = append(*ds, *ss...) +} + +func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int64() = *src.Int64() +} + +func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int64() + if v != 0 { + *dst.Int64() = v + } +} + +func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int64Ptr() + if p != nil { + v := *p + *dst.Int64Ptr() = &v + } +} + +func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int64Slice() + ss := src.Int64Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint64() = *src.Uint64() +} + +func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint64() + if v != 0 { + *dst.Uint64() = v + } +} + +func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint64Ptr() + if p != nil { + v := *p + *dst.Uint64Ptr() = &v + } +} + +func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint64Slice() + ss := src.Uint64Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float32() = *src.Float32() +} + +func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float32() + if v != 0 { + *dst.Float32() = v + } +} + +func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float32Ptr() + if p != nil { + v := *p + *dst.Float32Ptr() = &v + } +} + +func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float32Slice() + ss := src.Float32Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float64() = *src.Float64() +} + +func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float64() + if v != 0 { + *dst.Float64() = v + } +} + +func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float64Ptr() + if p != nil { + v := *p + *dst.Float64Ptr() = &v + } +} + +func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float64Slice() + ss := src.Float64Slice() + *ds = append(*ds, *ss...) +} + +func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.String() = *src.String() +} + +func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.String() + if v != "" { + *dst.String() = v + } +} + +func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.StringPtr() + if p != nil { + v := *p + *dst.StringPtr() = &v + } +} + +func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.StringSlice() + ss := src.StringSlice() + *ds = append(*ds, *ss...) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go new file mode 100644 index 000000000..c026a9818 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -0,0 +1,215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// MessageInfo provides protobuf related functionality for a given Go type +// that represents a message. A given instance of MessageInfo is tied to +// exactly one Go type, which must be a pointer to a struct type. +// +// The exported fields must be populated before any methods are called +// and cannot be mutated after set. +type MessageInfo struct { + // GoReflectType is the underlying message Go type and must be populated. + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. + Desc pref.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. + Exporter exporter + + // OneofWrappers is list of pointers to oneof wrapper struct types. + OneofWrappers []interface{} + + initMu sync.Mutex // protects all unexported fields + initDone uint32 + + reflectMessageInfo // for reflection implementation + coderMessageInfo // for fast-path method implementations +} + +// exporter is a function that returns a reference to the ith field of v, +// where v is a pointer to a struct. It returns nil if it does not support +// exporting the requested field (e.g., already exported). +type exporter func(v interface{}, i int) interface{} + +// getMessageInfo returns the MessageInfo for any message type that +// is generated by our implementation of protoc-gen-go (for v2 and on). +// If it is unable to obtain a MessageInfo, it returns nil. +func getMessageInfo(mt reflect.Type) *MessageInfo { + m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + if !ok { + return nil + } + mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) + if !ok { + return nil + } + return mr.ProtoMessageInfo() +} + +func (mi *MessageInfo) init() { + // This function is called in the hot path. Inline the sync.Once logic, + // since allocating a closure for Once.Do is expensive. + // Keep init small to ensure that it can be inlined. + if atomic.LoadUint32(&mi.initDone) == 0 { + mi.initOnce() + } +} + +func (mi *MessageInfo) initOnce() { + mi.initMu.Lock() + defer mi.initMu.Unlock() + if mi.initDone == 1 { + return + } + + t := mi.GoReflectType + if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("got %v, want *struct kind", t)) + } + t = t.Elem() + + si := mi.makeStructInfo(t) + mi.makeReflectFuncs(t, si) + mi.makeCoderMethods(t, si) + + atomic.StoreUint32(&mi.initDone, 1) +} + +// getPointer returns the pointer for a message, which should be of +// the type of the MessageInfo. If the message is of a different type, +// it returns ok==false. +func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi + case *messageReflectWrapper: + return m.pointer(), m.messageInfo() == mi + } + return pointer{}, false +} + +type ( + SizeCache = int32 + WeakFields = map[int32]protoreflect.ProtoMessage + UnknownFields = []byte + ExtensionFields = map[int32]ExtensionField +) + +var ( + sizecacheType = reflect.TypeOf(SizeCache(0)) + weakFieldsType = reflect.TypeOf(WeakFields(nil)) + unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) +) + +type structInfo struct { + sizecacheOffset offset + weakOffset offset + unknownOffset offset + extensionOffset offset + + fieldsByNumber map[pref.FieldNumber]reflect.StructField + oneofsByName map[pref.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]pref.FieldNumber + oneofWrappersByNumber map[pref.FieldNumber]reflect.Type +} + +func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + si := structInfo{ + sizecacheOffset: invalidOffset, + weakOffset: invalidOffset, + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + + fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, + oneofsByName: map[pref.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, + oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + } + +fieldLoop: + for i := 0; i < t.NumField(); i++ { + switch f := t.Field(i); f.Name { + case genid.SizeCache_goname, genid.SizeCacheA_goname: + if f.Type == sizecacheType { + si.sizecacheOffset = offsetOf(f, mi.Exporter) + } + case genid.WeakFields_goname, genid.WeakFieldsA_goname: + if f.Type == weakFieldsType { + si.weakOffset = offsetOf(f, mi.Exporter) + } + case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: + if f.Type == unknownFieldsType { + si.unknownOffset = offsetOf(f, mi.Exporter) + } + case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: + if f.Type == extensionFieldsType { + si.extensionOffset = offsetOf(f, mi.Exporter) + } + default: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.fieldsByNumber[pref.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { + si.oneofsByName[pref.Name(s)] = f + continue fieldLoop + } + } + } + + // Derive a mapping of oneof wrappers to fields. + oneofWrappers := mi.OneofWrappers + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs + } + } + } + } + for _, v := range oneofWrappers { + tf := reflect.TypeOf(v).Elem() + f := tf.Field(0) + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.oneofWrappersByType[tf] = pref.FieldNumber(n) + si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + break + } + } + } + + return si +} + +func (mi *MessageInfo) New() protoreflect.Message { + return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) +} +func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +} +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go new file mode 100644 index 000000000..0f4b8db76 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -0,0 +1,364 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type reflectMessageInfo struct { + fields map[pref.FieldNumber]*fieldInfo + oneofs map[pref.Name]*oneofInfo + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) + // It provides faster access to the fieldInfo, but may be incomplete. + denseFields []*fieldInfo + + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []interface{} // either *fieldInfo or *oneofInfo + + getUnknown func(pointer) pref.RawFields + setUnknown func(pointer, pref.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +} + +// makeReflectFuncs generates the set of functions to support reflection. +func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + mi.makeKnownFieldsFunc(si) + mi.makeUnknownFieldsFunc(t, si) + mi.makeExtensionFieldsFunc(t, si) +} + +// makeKnownFieldsFunc generates functions for operations that can be performed +// on each protobuf message field. It takes in a reflect.Type representing the +// Go struct and matches message fields with struct fields. +// +// This code assumes that the struct is well-formed and panics if there are +// any discrepancies. +func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields = map[pref.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = fieldInfoForMap(fd, fs, mi.Exporter) + case fd.IsList(): + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsWeak(): + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = fieldInfoForScalar(fd, fs, mi.Exporter) + } + mi.fields[fd.Number()] = &fi + } + + mi.oneofs = map[pref.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } +} + +func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + mi.getUnknown = func(pointer) pref.RawFields { return nil } + mi.setUnknown = func(pointer, pref.RawFields) { return } + if si.unknownOffset.IsValid() { + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) + return pref.RawFields(*rv.Interface().(*[]byte)) + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) + *rv.Interface().(*[]byte) = []byte(b) + } + } else { + mi.getUnknown = func(pointer) pref.RawFields { + return nil + } + mi.setUnknown = func(p pointer, _ pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + } + } +} + +func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { + if si.extensionOffset.IsValid() { + mi.extensionMap = func(p pointer) *extensionMap { + if p.IsNil() { + return (*extensionMap)(nil) + } + v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) + return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) + } + } else { + mi.extensionMap = func(pointer) *extensionMap { + return (*extensionMap)(nil) + } + } +} + +type extensionMap map[int32]ExtensionField + +func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() + v := x.Value() + if xd.IsList() && v.List().Len() == 0 { + continue + } + if !f(xd, v) { + return + } + } + } +} +func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { + if m == nil { + return false + } + xd := xt.TypeDescriptor() + x, ok := (*m)[int32(xd.Number())] + if !ok { + return false + } + switch { + case xd.IsList(): + return x.Value().List().Len() > 0 + case xd.IsMap(): + return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() + } + return true +} +func (m *extensionMap) Clear(xt pref.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) +} +func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + } + return xt.Zero() +} +func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { + xd := xt.TypeDescriptor() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + } + + if *m == nil { + *m = make(map[int32]ExtensionField) + } + var x ExtensionField + x.Set(xt, v) + (*m)[int32(xd.Number())] = x +} +func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + v := xt.New() + m.Set(xt, v) + return v +} + +// MessageState is a data structure that is nested as the first field in a +// concrete message. It provides a way to implement the ProtoReflect method +// in an allocation-free way without needing to have a shadow Go type generated +// for every message type. This technique only works using unsafe. +// +// +// Example generated code: +// +// type M struct { +// state protoimpl.MessageState +// +// Field1 int32 +// Field2 string +// Field3 *BarMessage +// ... +// } +// +// func (m *M) ProtoReflect() protoreflect.Message { +// mi := &file_fizz_buzz_proto_msgInfos[5] +// if protoimpl.UnsafeEnabled && m != nil { +// ms := protoimpl.X.MessageStateOf(Pointer(m)) +// if ms.LoadMessageInfo() == nil { +// ms.StoreMessageInfo(mi) +// } +// return ms +// } +// return mi.MessageOf(m) +// } +// +// The MessageState type holds a *MessageInfo, which must be atomically set to +// the message info associated with a given message instance. +// By unsafely converting a *M into a *MessageState, the MessageState object +// has access to all the information needed to implement protobuf reflection. +// It has access to the message info as its first field, and a pointer to the +// MessageState is identical to a pointer to the concrete message value. +// +// +// Requirements: +// • The type M must implement protoreflect.ProtoMessage. +// • The address of m must not be nil. +// • The address of m and the address of m.state must be equal, +// even though they are different Go types. +type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare + pragma.DoNotCopy + + atomicMessageInfo *MessageInfo +} + +type messageState MessageState + +var ( + _ pref.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) +) + +// messageDataType is a tuple of a pointer to the message data and +// a pointer to the message type. It is a generalized way of providing a +// reflective view over a message instance. The disadvantage of this approach +// is the need to allocate this tuple of 16B. +type messageDataType struct { + p pointer + mi *MessageInfo +} + +type ( + messageReflectWrapper messageDataType + messageIfaceWrapper messageDataType +) + +var ( + _ pref.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) +) + +// MessageOf returns a reflective view over a message. The input must be a +// pointer to a named Go struct. If the provided type has a ProtoReflect method, +// it must be implemented by calling this method. +func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { + // TODO: Switch the input to be an opaque Pointer. + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } + p := pointerOfIface(m) + if p.IsNil() { + return mi.nilMessage.Init(mi) + } + return &messageReflectWrapper{p, mi} +} + +func (m *messageReflectWrapper) pointer() pointer { return m.p } +func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } + +func (m *messageIfaceWrapper) ProtoReflect() pref.Message { + return (*messageReflectWrapper)(m) +} +func (m *messageIfaceWrapper) protoUnwrap() interface{} { + return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) +} + +// checkField verifies that the provided field descriptor is valid. +// Exactly one of the returned values is populated. +func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] + } else { + fi = mi.fields[n] + } + if fi != nil { + if fi.fieldDesc != fd { + if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { + panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) + } + panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) + } + return fi, nil + } + + if fd.IsExtension() { + if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { + // TODO: Should this be exact containing message descriptor match? + panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) + } + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } + xtd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return nil, xtd.Type() + } + panic(fmt.Sprintf("field %v is invalid", fd.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go new file mode 100644 index 000000000..23124a86e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -0,0 +1,466 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "sync" + + "google.golang.org/protobuf/internal/flags" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +type fieldInfo struct { + fieldDesc pref.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) + get func(pointer) pref.Value + set func(pointer, pref.Value) + mutable func(pointer) pref.Value + newMessage func() pref.Message + newField func() pref.Value +} + +func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) + } + if ot.Kind() != reflect.Struct { + panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) + } + if !reflect.PtrTo(ot).Implements(ft) { + panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) + } + conv := NewConverter(ot.Field(0).Type, fd) + isMessage := fd.Message() != nil + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + // NOTE: The logic below intentionally assumes that oneof fields are + // well-formatted. That is, the oneof interface never contains a + // typed nil pointer to one of the wrapper structs. + + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return false + } + return true + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot { + // NOTE: We intentionally don't check for rv.Elem().IsNil() + // so that (*OneofWrapperType)(nil) gets cleared to nil. + return + } + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return conv.Zero() + } + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, + mutable: func(p pointer) pref.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + if rv.IsNil() { + rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +var ( + nilBytes = reflect.ValueOf([]byte(nil)) + emptyBytes = reflect.ValueOf([]byte{}) +) + +func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + if nullable { + if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + } + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + return !rv.IsNil() + } + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + if rv.IsNil() { + return conv.Zero() + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + rv.Set(conv.GoValueOf(v)) + if isBytes && rv.Len() == 0 { + if nullable { + rv.Set(emptyBytes) // preserve presence + } else { + rv.Set(nilBytes) // do not preserve presence + } + } + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } + }) + } + + num := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + _, ok := p.Apply(weakOffset).WeakFields().get(num) + return ok + }, + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, + get: func(p pointer) pref.Value { + lazyInit() + if p.IsNil() { + return pref.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { + return pref.ValueOfMessage(messageType.Zero()) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + set: func(p pointer, v pref.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { + if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) + } + panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, + mutable: func(p pointer) pref.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) + if !ok { + m = messageType.New().Interface() + fs.set(num, m) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + newMessage: func() pref.Message { + lazyInit() + return messageType.New() + }, + newField: func() pref.Value { + lazyInit() + return pref.ValueOfMessage(messageType.New()) + }, + } +} + +func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return !rv.IsNil() + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, + mutable: func(p pointer) pref.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +type oneofInfo struct { + oneofDesc pref.OneofDescriptor + which func(pointer) pref.FieldNumber +} + +func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + return 0 + } + rv = rv.Elem() + if rv.IsNil() { + return 0 + } + return si.oneofWrappersByType[rv.Type().Elem()] + } + } + return oi +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go new file mode 100644 index 000000000..741d6e5b6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (m *messageState) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageState) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageState) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageState) Interface() protoreflect.ProtoMessage { + return m.protoUnwrap().(protoreflect.ProtoMessage) +} +func (m *messageState) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageState) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageState) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageState) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageState) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageState) IsValid() bool { + return !m.pointer().IsNil() +} + +func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageReflectWrapper) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageReflectWrapper) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { + if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { + return m + } + return (*messageIfaceWrapper)(m) +} +func (m *messageReflectWrapper) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageReflectWrapper) IsValid() bool { + return !m.pointer().IsNil() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go new file mode 100644 index 000000000..67b4ede67 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "fmt" + "reflect" + "sync" +) + +const UnsafeEnabled = false + +// Pointer is an opaque pointer type. +type Pointer interface{} + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the field index into a struct. +type offset struct { + index int + export exporter +} + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + if len(f.Index) != 1 { + panic("embedded structs are not supported") + } + if f.PkgPath == "" { + return offset{index: f.Index[0]} // field is already exported + } + if x == nil { + panic("exporter must be provided for unexported field") + } + return offset{index: f.Index[0], export: x} +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f.index >= 0 } + +// invalidOffset is an invalid field offset. +var invalidOffset = offset{index: -1} + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset{index: 0} + +// pointer is an abstract representation of a pointer to a struct or field. +type pointer struct{ v reflect.Value } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointerOfIface(p) +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{v: v} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + return pointer{v: reflect.ValueOf(v)} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.v.IsNil() +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The current pointer must be pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if f.export != nil { + if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { + return pointer{v: v} + } + } + return pointer{v: p.v.Elem().Field(f.index).Addr()} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + if got := p.v.Type().Elem(); got != t { + panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) + } + return p.v +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } +func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } +func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } +func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } +func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } +func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } +func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } +func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } +func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } +func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } +func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } +func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } +func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } +func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } +func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } +func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } +func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } +func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } +func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } +func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } +func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } +func (p pointer) String() *string { return p.v.Interface().(*string) } +func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } +func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } +func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } +func (p pointer) Extensions() *map[int32]ExtensionField { + return p.v.Interface().(*map[int32]ExtensionField) +} + +func (p pointer) Elem() pointer { + return pointer{v: p.v.Elem()} +} + +// PointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) PointerSlice() []pointer { + // TODO: reconsider this + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + sp := p.v.Elem() + sp.Set(reflect.Append(sp, v.v)) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + p.v.Elem().Set(v.v) +} + +func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } +func (ms *messageState) pointer() pointer { panic("not supported") } +func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } + +type atomicNilMessage struct { + once sync.Once + m messageReflectWrapper +} + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + m.once.Do(func() { + m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) + m.m.mi = mi + }) + return &m.m +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go new file mode 100644 index 000000000..088aa85d4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -0,0 +1,173 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const UnsafeEnabled = true + +// Pointer is an opaque pointer type. +type Pointer unsafe.Pointer + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the byte offset to the field from the start of the struct. +type offset uintptr + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + return offset(f.Offset) +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f != invalidOffset } + +// invalidOffset is an invalid field offset. +var invalidOffset = ^offset(0) + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset(0) + +// pointer is a pointer to a message struct or field. +type pointer struct{ p unsafe.Pointer } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointer{p: unsafe.Pointer(p)} +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + type ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } + return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.p == nil +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The pointer must be valid and pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if p.IsNil() { + panic("invalid nil pointer") + } + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + // TODO: Use tricky unsafe magic to directly create ifaceHeader. + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return (*bool)(p.p) } +func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } +func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } +func (p pointer) Int32() *int32 { return (*int32)(p.p) } +func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } +func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } +func (p pointer) Int64() *int64 { return (*int64)(p.p) } +func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } +func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } +func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } +func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } +func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } +func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } +func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } +func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } +func (p pointer) Float32() *float32 { return (*float32)(p.p) } +func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } +func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } +func (p pointer) Float64() *float64 { return (*float64)(p.p) } +func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } +func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } +func (p pointer) String() *string { return (*string)(p.p) } +func (p pointer) StringPtr() **string { return (**string)(p.p) } +func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } +func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } +func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } + +func (p pointer) Elem() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// PointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) PointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) +} + +// Static check that MessageState does not exceed the size of a pointer. +const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) + +func (Export) MessageStateOf(p Pointer) *messageState { + // Super-tricky - see documentation on MessageState. + return (*messageState)(unsafe.Pointer(p)) +} +func (ms *messageState) pointer() pointer { + // Super-tricky - see documentation on MessageState. + return pointer{p: unsafe.Pointer(ms)} +} +func (ms *messageState) messageInfo() *MessageInfo { + mi := ms.LoadMessageInfo() + if mi == nil { + panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") + } + return mi +} +func (ms *messageState) LoadMessageInfo() *MessageInfo { + return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) +} +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) +} + +type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + if p := atomic.LoadPointer(&m.p); p != nil { + return (*messageReflectWrapper)(p) + } + w := &messageReflectWrapper{mi: mi} + atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) + return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go new file mode 100644 index 000000000..08cfb6054 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -0,0 +1,576 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "math/bits" + "reflect" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ValidationStatus is the result of validating the wire-format encoding of a message. +type ValidationStatus int + +const ( + // ValidationUnknown indicates that unmarshaling the message might succeed or fail. + // The validator was unable to render a judgement. + // + // The only causes of this status are an aberrant message type appearing somewhere + // in the message or a failure in the extension resolver. + ValidationUnknown ValidationStatus = iota + 1 + + // ValidationInvalid indicates that unmarshaling the message will fail. + ValidationInvalid + + // ValidationValid indicates that unmarshaling the message will succeed. + ValidationValid +) + +func (v ValidationStatus) String() string { + switch v { + case ValidationUnknown: + return "ValidationUnknown" + case ValidationInvalid: + return "ValidationInvalid" + case ValidationValid: + return "ValidationValid" + default: + return fmt.Sprintf("ValidationStatus(%d)", int(v)) + } +} + +// Validate determines whether the contents of the buffer are a valid wire encoding +// of the message type. +// +// This function is exposed for testing. +func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { + in.Resolver = preg.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { + out.Flags |= piface.UnmarshalInitialized + } + return out, st +} + +type validationInfo struct { + mi *MessageInfo + typ validationType + keyType, valType validationType + + // For non-required fields, requiredBit is 0. + // + // For required fields, requiredBit's nth bit is set, where n is a + // unique index in the range [0, MessageInfo.numRequiredFields). + // + // If there are more than 64 required fields, requiredBit is 0. + requiredBit uint64 +} + +type validationType uint8 + +const ( + validationTypeOther validationType = iota + validationTypeMessage + validationTypeGroup + validationTypeMap + validationTypeRepeatedVarint + validationTypeRepeatedFixed32 + validationTypeRepeatedFixed64 + validationTypeVarint + validationTypeFixed32 + validationTypeFixed64 + validationTypeBytes + validationTypeUTF8String + validationTypeMessageSetItem +) + +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + } + default: + vi = newValidationInfo(fd, ft) + } + if fd.Cardinality() == pref.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count + // of the required fields past 64. + if mi.numRequiredFields < math.MaxUint8 { + mi.numRequiredFields++ + vi.requiredBit = 1 << (mi.numRequiredFields - 1) + } + } + return vi +} + +func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeRepeatedVarint + case protowire.Fixed32Type: + vi.typ = validationTypeRepeatedFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeRepeatedFixed64 + } + } + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { + case pref.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeVarint + case protowire.Fixed32Type: + vi.typ = validationTypeFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeFixed64 + case protowire.BytesType: + vi.typ = validationTypeBytes + } + } + } + return vi +} + +func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { + mi.init() + type validationState struct { + typ validationType + keyType, valType validationType + endGroup protowire.Number + mi *MessageInfo + tail []byte + requiredMask uint64 + } + + // Pre-allocate some slots to avoid repeated slice reallocation. + states := make([]validationState, 0, 16) + states = append(states, validationState{ + typ: validationTypeMessage, + mi: mi, + }) + if groupTag > 0 { + states[0].typ = validationTypeGroup + states[0].endGroup = groupTag + } + initialized := true + start := len(b) +State: + for len(states) > 0 { + st := &states[len(states)-1] + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, ValidationInvalid + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if st.endGroup == num { + goto PopState + } + return out, ValidationInvalid + } + var vi validationInfo + switch { + case st.typ == validationTypeMap: + switch num { + case genid.MapEntry_Key_field_number: + vi.typ = st.keyType + case genid.MapEntry_Value_field_number: + vi.typ = st.valType + vi.mi = st.mi + vi.requiredBit = 1 + } + case flags.ProtoLegacy && st.mi.isMessageSet: + switch num { + case messageset.FieldItem: + vi.typ = validationTypeMessageSetItem + } + default: + var f *coderFieldInfo + if int(num) < len(st.mi.denseCoderFields) { + f = st.mi.denseCoderFields[num] + } else { + f = st.mi.coderFields[num] + } + if f != nil { + vi = f.validation + if vi.typ == validationTypeMessage && vi.mi == nil { + // Probable weak field. + // + // TODO: Consider storing the results of this lookup somewhere + // rather than recomputing it on every validation. + fd := st.mi.Desc.Fields().ByNumber(num) + if fd == nil || !fd.IsWeak() { + break + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) + case preg.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown + } + } + break + } + // Possible extension field. + // + // TODO: We should return ValidationUnknown when: + // 1. The resolver is not frozen. (More extensions may be added to it.) + // 2. The resolver returns preg.NotFound. + // In this case, a type added to the resolver in the future could cause + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) + if err != nil && err != preg.NotFound { + return out, ValidationUnknown + } + if err == nil { + vi = getExtensionFieldInfo(xt).validation + } + } + if vi.requiredBit != 0 { + // Check that the field has a compatible wire type. + // We only need to consider non-repeated field types, + // since repeated fields (and maps) can never be required. + ok := false + switch vi.typ { + case validationTypeVarint: + ok = wtyp == protowire.VarintType + case validationTypeFixed32: + ok = wtyp == protowire.Fixed32Type + case validationTypeFixed64: + ok = wtyp == protowire.Fixed64Type + case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: + ok = wtyp == protowire.BytesType + case validationTypeGroup: + ok = wtyp == protowire.StartGroupType + } + if ok { + st.requiredMask |= vi.requiredBit + } + } + + switch wtyp { + case protowire.VarintType: + if len(b) >= 10 { + switch { + case b[0] < 0x80: + b = b[1:] + case b[1] < 0x80: + b = b[2:] + case b[2] < 0x80: + b = b[3:] + case b[3] < 0x80: + b = b[4:] + case b[4] < 0x80: + b = b[5:] + case b[5] < 0x80: + b = b[6:] + case b[6] < 0x80: + b = b[7:] + case b[7] < 0x80: + b = b[8:] + case b[8] < 0x80: + b = b[9:] + case b[9] < 0x80 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } else { + switch { + case len(b) > 0 && b[0] < 0x80: + b = b[1:] + case len(b) > 1 && b[1] < 0x80: + b = b[2:] + case len(b) > 2 && b[2] < 0x80: + b = b[3:] + case len(b) > 3 && b[3] < 0x80: + b = b[4:] + case len(b) > 4 && b[4] < 0x80: + b = b[5:] + case len(b) > 5 && b[5] < 0x80: + b = b[6:] + case len(b) > 6 && b[6] < 0x80: + b = b[7:] + case len(b) > 7 && b[7] < 0x80: + b = b[8:] + case len(b) > 8 && b[8] < 0x80: + b = b[9:] + case len(b) > 9 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } + continue State + case protowire.BytesType: + var size uint64 + if len(b) >= 1 && b[0] < 0x80 { + size = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + size = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + size, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + if size > uint64(len(b)) { + return out, ValidationInvalid + } + v := b[:size] + b = b[size:] + switch vi.typ { + case validationTypeMessage: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + fallthrough + case validationTypeMap: + if vi.mi != nil { + vi.mi.init() + } + states = append(states, validationState{ + typ: vi.typ, + keyType: vi.keyType, + valType: vi.valType, + mi: vi.mi, + tail: b, + }) + b = v + continue State + case validationTypeRepeatedVarint: + // Packed field. + for len(v) > 0 { + _, n := protowire.ConsumeVarint(v) + if n < 0 { + return out, ValidationInvalid + } + v = v[n:] + } + case validationTypeRepeatedFixed32: + // Packed field. + if len(v)%4 != 0 { + return out, ValidationInvalid + } + case validationTypeRepeatedFixed64: + // Packed field. + if len(v)%8 != 0 { + return out, ValidationInvalid + } + case validationTypeUTF8String: + if !utf8.Valid(v) { + return out, ValidationInvalid + } + } + case protowire.Fixed32Type: + if len(b) < 4 { + return out, ValidationInvalid + } + b = b[4:] + case protowire.Fixed64Type: + if len(b) < 8 { + return out, ValidationInvalid + } + b = b[8:] + case protowire.StartGroupType: + switch { + case vi.typ == validationTypeGroup: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + states = append(states, validationState{ + typ: validationTypeGroup, + mi: vi.mi, + endGroup: num, + }) + continue State + case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: + typeid, v, n, err := messageset.ConsumeFieldValue(b, false) + if err != nil { + return out, ValidationInvalid + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { + case err == preg.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown + default: + xvi := getExtensionFieldInfo(xt).validation + if xvi.mi != nil { + xvi.mi.init() + } + states = append(states, validationState{ + typ: xvi.typ, + mi: xvi.mi, + tail: b[n:], + }) + b = v + continue State + } + default: + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + default: + return out, ValidationInvalid + } + } + if st.endGroup != 0 { + return out, ValidationInvalid + } + if len(b) != 0 { + return out, ValidationInvalid + } + b = st.tail + PopState: + numRequiredFields := 0 + switch st.typ { + case validationTypeMessage, validationTypeGroup: + numRequiredFields = int(st.mi.numRequiredFields) + case validationTypeMap: + // If this is a map field with a message value that contains + // required fields, require that the value be present. + if st.mi != nil && st.mi.numRequiredFields > 0 { + numRequiredFields = 1 + } + } + // If there are more than 64 required fields, this check will + // always fail and we will report that the message is potentially + // uninitialized. + if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { + initialized = false + } + states = states[:len(states)-1] + } + out.n = start - len(b) + if initialized { + out.initialized = true + } + return out, ValidationValid +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go new file mode 100644 index 000000000..009cbefd1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// weakFields adds methods to the exported WeakFields type for internal use. +// +// The exported type is an alias to an unnamed type, so methods can't be +// defined directly on it. +type weakFields WeakFields + +func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok +} + +func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} + +func (w *weakFields) clear(num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go new file mode 100644 index 000000000..a3de1cf32 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mapsort provides sorted access to maps. +package mapsort + +import ( + "sort" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Range iterates over every map entry in sorted key order, +// calling f for each key and value encountered. +func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { + var keys []protoreflect.MapKey + mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { + keys = append(keys, key) + return true + }) + sort.Slice(keys, func(i, j int) bool { + switch keyKind { + case protoreflect.BoolKind: + return !keys[i].Bool() && keys[j].Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return keys[i].Int() < keys[j].Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, + protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return keys[i].Uint() < keys[j].Uint() + case protoreflect.StringKind: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keyKind.String()) + } + }) + for _, key := range keys { + if !f(key, mapv.Get(key)) { + break + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go new file mode 100644 index 000000000..49dc4fcd9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pragma provides types that can be embedded into a struct to +// statically enforce or prevent certain language properties. +package pragma + +import "sync" + +// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. +type NoUnkeyedLiterals struct{} + +// DoNotImplement can be embedded in an interface to prevent trivial +// implementations of the interface. +// +// This is useful to prevent unauthorized implementations of an interface +// so that it can be extended in the future for any protobuf language changes. +type DoNotImplement interface{ ProtoInternal(DoNotImplement) } + +// DoNotCompare can be embedded in a struct to prevent comparability. +type DoNotCompare [0]func() + +// DoNotCopy can be embedded in a struct to help prevent shallow copies. +// This does not rely on a Go language feature, but rather a special case +// within the vet checker. +// +// See https://golang.org/issues/8005. +type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go new file mode 100644 index 000000000..d3d7f89ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/set/ints.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides simple set data structures for uint64s. +package set + +import "math/bits" + +// int64s represents a set of integers within the range of 0..63. +type int64s uint64 + +func (bs *int64s) Len() int { + return bits.OnesCount64(uint64(*bs)) +} +func (bs *int64s) Has(n uint64) bool { + return uint64(*bs)&(uint64(1)< 0 +} +func (bs *int64s) Set(n uint64) { + *(*uint64)(bs) |= uint64(1) << n +} +func (bs *int64s) Clear(n uint64) { + *(*uint64)(bs) &^= uint64(1) << n +} + +// Ints represents a set of integers within the range of 0..math.MaxUint64. +type Ints struct { + lo int64s + hi map[uint64]struct{} +} + +func (bs *Ints) Len() int { + return bs.lo.Len() + len(bs.hi) +} +func (bs *Ints) Has(n uint64) bool { + if n < 64 { + return bs.lo.Has(n) + } + _, ok := bs.hi[n] + return ok +} +func (bs *Ints) Set(n uint64) { + if n < 64 { + bs.lo.Set(n) + return + } + if bs.hi == nil { + bs.hi = make(map[uint64]struct{}) + } + bs.hi[n] = struct{}{} +} +func (bs *Ints) Clear(n uint64) { + if n < 64 { + bs.lo.Clear(n) + return + } + delete(bs.hi, n) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go new file mode 100644 index 000000000..0b74e7658 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package strs provides string manipulation functionality specific to protobuf. +package strs + +import ( + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// EnforceUTF8 reports whether to enforce strict UTF-8 validation. +func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { + if flags.ProtoLegacy { + if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { + return fd.EnforceUTF8() + } + } + return fd.Syntax() == protoreflect.Proto3 +} + +// GoCamelCase camel-cases a protobuf name for use as a Go identifier. +// +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +func GoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +// GoSanitized converts a string to a valid Go identifier. +func GoSanitized(s string) string { + // Sanitize the input to the set of valid characters, + // which must be '_' or be in the Unicode L or N categories. + s = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' + }, s) + + // Prepend '_' in the event of a Go keyword conflict or if + // the identifier is invalid (does not start in the Unicode L category). + r, _ := utf8.DecodeRuneInString(s) + if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { + return "_" + s + } + return s +} + +// JSONCamelCase converts a snake_case identifier to a camelCase identifier, +// according to the protobuf JSON specification. +func JSONCamelCase(s string) string { + var b []byte + var wasUnderscore bool + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if c != '_' { + if wasUnderscore && isASCIILower(c) { + c -= 'a' - 'A' // convert to uppercase + } + b = append(b, c) + } + wasUnderscore = c == '_' + } + return string(b) +} + +// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, +// according to the protobuf JSON specification. +func JSONSnakeCase(s string) string { + var b []byte + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if isASCIIUpper(c) { + b = append(b, '_') + c += 'a' - 'A' // convert to lowercase + } + b = append(b, c) + } + return string(b) +} + +// MapEntryName derives the name of the map entry message given the field name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 +func MapEntryName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(c)) + } + } + b = append(b, "Entry"...) + return string(b) +} + +// EnumValueName derives the camel-cased enum value name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 +func EnumValueName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(unicode.ToLower(c))) + upperNext = false + } + } + return string(b) +} + +// TrimEnumPrefix trims the enum name prefix from an enum value name, +// where the prefix is all lowercase without underscores. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 +func TrimEnumPrefix(s, prefix string) string { + s0 := s // original input + for len(s) > 0 && len(prefix) > 0 { + if s[0] == '_' { + s = s[1:] + continue + } + if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { + return s0 // no prefix match + } + s, prefix = s[1:], prefix[1:] + } + if len(prefix) > 0 { + return s0 // no prefix match + } + s = strings.TrimLeft(s, "_") + if len(s) == 0 { + return s0 // avoid returning empty string + } + return s +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} +func isASCIIUpper(c byte) bool { + return 'A' <= c && c <= 'Z' +} +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go new file mode 100644 index 000000000..85e074c97 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package strs + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} + +type Builder struct{} + +func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + return prefix.Append(name) +} + +func (*Builder) MakeString(b []byte) string { + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go new file mode 100644 index 000000000..2160c7019 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package strs + +import ( + "unsafe" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return pref.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go new file mode 100644 index 000000000..72cf770b4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package version records versioning information about this module. +package version + +import ( + "fmt" + "strings" +) + +// These constants determine the current version of this module. +// +// +// For our release process, we enforce the following rules: +// * Tagged releases use a tag that is identical to String. +// * Tagged releases never reference a commit where the String +// contains "devel". +// * The set of all commits in this repository where String +// does not contain "devel" must have a unique String. +// +// +// Steps for tagging a new release: +// 1. Create a new CL. +// +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". +// +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. +// +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. +// +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. +// +// 6. Tag a new version, where the tag is is the current String. +// +// 7. Write release notes for all notable changes +// between this release and the last release. +// +// 8. Create a new CL. +// +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. +const ( + Major = 1 + Minor = 25 + Patch = 0 + PreRelease = "" +) + +// String formats the version string for this module in semver format. +// +// Examples: +// v1.20.1 +// v1.21.0-rc.1 +func String() string { + v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) + if PreRelease != "" { + v += "-" + PreRelease + + // TODO: Add metadata about the commit or build hash. + // See https://golang.org/issue/29814 + // See https://golang.org/issue/33533 + var metadata string + if strings.Contains(PreRelease, "devel") && metadata != "" { + v += "+" + metadata + } + } + return v +} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go new file mode 100644 index 000000000..3e9a6a2f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// CheckInitialized returns an error if any required fields in m are not set. +func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + + return checkInitialized(m.ProtoReflect()) +} + +// CheckInitialized returns an error if any required fields in m are not set. +func checkInitialized(m protoreflect.Message) error { + if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { + _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ + Message: m, + }) + return err + } + return checkInitializedSlow(m) +} + +func checkInitializedSlow(m protoreflect.Message) error { + md := m.Descriptor() + fds := md.Fields() + for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { + fd := fds.ByNumber(nums.Get(i)) + if !m.Has(fd) { + return errors.RequiredNotSet(string(fd.FullName())) + } + } + var err error + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + if fd.Message() == nil { + return true + } + for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { + err = checkInitialized(list.Get(i).Message()) + } + case fd.IsMap(): + if fd.MapValue().Message() == nil { + return true + } + v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { + err = checkInitialized(v.Message()) + return err == nil + }) + default: + if fd.Message() == nil { + return true + } + err = checkInitialized(v.Message()) + } + return err == nil + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go new file mode 100644 index 000000000..42fc5195e --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -0,0 +1,274 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// UnmarshalOptions configures the unmarshaler. +// +// Example usage: +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Merge merges the input into the destination message. + // The default behavior is to always reset the message before unmarshaling, + // unless Merge is specified. + Merge bool + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return an error if there are any missing required fields. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// Unmarshal parses the wire-format message in b and places the result in m. +func Unmarshal(b []byte, m Message) error { + _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + return err +} + +// Unmarshal parses the wire-format message in b and places the result in m. +func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + _, err := o.unmarshal(b, m.ProtoReflect()) + return err +} + +// UnmarshalState parses a wire-format message and places the result in m. +// +// This method permits fine-grained control over the unmarshaler. +// Most users should use Unmarshal instead. +func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + return o.unmarshal(in.Buf, in.Message) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if !o.Merge { + Reset(m.Interface()) + } + allowPartial := o.AllowPartial + o.Merge = true + o.AllowPartial = true + methods := protoMethods(m) + if methods != nil && methods.Unmarshal != nil && + !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { + in := protoiface.UnmarshalInput{ + Message: m, + Buf: b, + Resolver: o.Resolver, + } + if o.DiscardUnknown { + in.Flags |= protoiface.UnmarshalDiscardUnknown + } + out, err = methods.Unmarshal(in) + } else { + err = o.unmarshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { + return out, nil + } + return out, checkInitialized(m) +} + +func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { + _, err := o.unmarshal(b, m) + return err +} + +func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { + md := m.Descriptor() + if messageset.IsMessageSet(md) { + return o.unmarshalMessageSet(b, m) + } + fields := md.Fields() + for len(b) > 0 { + // Parse the tag (field number and wire type). + num, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return protowire.ParseError(tagLen) + } + if num > protowire.MaxValidNumber { + return errors.New("invalid field number") + } + + // Find the field descriptor for this field number. + fd := fields.ByNumber(num) + if fd == nil && md.ExtensionRanges().Has(num) { + extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + if extType != nil { + fd = extType.TypeDescriptor() + } + } + var err error + if fd == nil { + err = errUnknown + } else if flags.ProtoLegacy { + if fd.IsWeak() && fd.Message().IsPlaceholder() { + err = errUnknown // weak referent is not linked in + } + } + + // Parse the field value. + var valLen int + switch { + case err != nil: + case fd.IsList(): + valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) + case fd.IsMap(): + valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) + default: + valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) + } + if err != nil { + if err != errUnknown { + return err + } + valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) + if valLen < 0 { + return protowire.ParseError(valLen) + } + if !o.DiscardUnknown { + m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) + } + } + b = b[tagLen+valLen:] + } + return nil +} + +func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { + v, n, err := o.unmarshalScalar(b, wtyp, fd) + if err != nil { + return 0, err + } + switch fd.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + m2 := m.Mutable(fd).Message() + if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { + return n, err + } + default: + // Non-message scalars replace the previous value. + m.Set(fd, v) + } + return n, nil +} + +func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if wtyp != protowire.BytesType { + return 0, errUnknown + } + b, n = protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + var ( + keyField = fd.MapKey() + valField = fd.MapValue() + key protoreflect.Value + val protoreflect.Value + haveKey bool + haveVal bool + ) + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + val = mapv.NewValue() + } + // Map entries are represented as a two-element message with fields + // containing the key and value. + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return 0, errors.New("invalid field number") + } + b = b[n:] + err = errUnknown + switch num { + case genid.MapEntry_Key_field_number: + key, n, err = o.unmarshalScalar(b, wtyp, keyField) + if err != nil { + break + } + haveKey = true + case genid.MapEntry_Value_field_number: + var v protoreflect.Value + v, n, err = o.unmarshalScalar(b, wtyp, valField) + if err != nil { + break + } + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { + return 0, err + } + default: + val = v + } + haveVal = true + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, protowire.ParseError(n) + } + } else if err != nil { + return 0, err + } + b = b[n:] + } + // Every map entry should have entries for key and value, but this is not strictly required. + if !haveKey { + key = keyField.Default() + } + if !haveVal { + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + default: + val = valField.Default() + } + } + mapv.Set(key.MapKey(), val) + return n, nil +} + +// errUnknown is used internally to indicate fields which should be added +// to the unknown field set of a message. It is never returned from an exported +// function. +var errUnknown = errors.New("BUG: internal error (unknown)") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go new file mode 100644 index 000000000..d6dc904dc --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -0,0 +1,603 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unmarshalScalar decodes a value of the given kind. +// +// Message values are decoded into a []byte which aliases the input data. +func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil + case protoreflect.EnumKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil + case protoreflect.Int32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Sint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil + case protoreflect.Uint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.Int64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Sint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil + case protoreflect.Uint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.Sfixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Fixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.FloatKind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil + case protoreflect.Sfixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Fixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.DoubleKind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) + } + return protoreflect.ValueOfString(string(v)), n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(v), n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(v), n, nil + default: + return val, 0, errUnknown + } +} + +func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + return n, nil + case protoreflect.EnumKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + return n, nil + case protoreflect.Int32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Sint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + return n, nil + case protoreflect.Uint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.Int64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Sint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + return n, nil + case protoreflect.Uint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.Sfixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Fixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.FloatKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + return n, nil + case protoreflect.Sfixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Fixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.DoubleKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + return n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return 0, errors.InvalidUTF8(string(fd.FullName())) + } + list.Append(protoreflect.ValueOfString(string(v))) + return n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + return n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return 0, protowire.ParseError(n) + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + default: + return 0, errUnknown + } +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go new file mode 100644 index 000000000..c52d8c4ab --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functions operating on protocol buffer messages. +// +// For documentation on protocol buffers in general, see: +// +// https://developers.google.com/protocol-buffers +// +// For a tutorial on using protocol buffers with Go, see: +// +// https://developers.google.com/protocol-buffers/docs/gotutorial +// +// For a guide to generated Go protocol buffer code, see: +// +// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// +// +// Binary serialization +// +// This package contains functions to convert to and from the wire format, +// an efficient binary serialization of protocol buffers. +// +// • Size reports the size of a message in the wire format. +// +// • Marshal converts a message to the wire format. +// The MarshalOptions type provides more control over wire marshaling. +// +// • Unmarshal converts a message from the wire format. +// The UnmarshalOptions type provides more control over wire unmarshaling. +// +// +// Basic message operations +// +// • Clone makes a deep copy of a message. +// +// • Merge merges the content of a message into another. +// +// • Equal compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// "google.golang.org/protobuf/testing/protocmp". +// +// • Reset clears the content of a message. +// +// • CheckInitialized reports whether all required fields in a message are set. +// +// +// Optional scalar constructors +// +// The API for some generated messages represents optional scalar fields +// as pointers to a value. For example, an optional string field has the +// Go type *string. +// +// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. +// +// Generated enum types usually have an Enum method which performs the +// same operation. +// +// Optional scalar fields are only supported in proto2. +// +// +// Extension accessors +// +// • HasExtension, GetExtension, SetExtension, and ClearExtension +// access extension field values in a protocol buffer message. +// +// Extension fields are only supported in proto2. +// +// +// Related packages +// +// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to +// and from JSON. +// +// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to +// and from the text format. +// +// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a +// reflection interface for protocol buffer data types. +// +// • Package "google.golang.org/protobuf/testing/protocmp" provides features +// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" +// package. +// +// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. +// +// This module contains additional packages for more specialized use cases. +// Consult the individual package documentation for details. +package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go new file mode 100644 index 000000000..7b47a1180 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -0,0 +1,346 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// MarshalOptions configures the marshaler. +// +// Example usage: +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return an error if there are any missing required fields. + AllowPartial bool + + // Deterministic controls whether the same message will always be + // serialized to the same bytes within the same binary. + // + // Setting this option guarantees that repeated serialization of + // the same message will return the same bytes, and that different + // processes of the same binary (which may be executing on different + // machines) will serialize equal messages to the same bytes. + // It has no effect on the resulting size of the encoded message compared + // to a non-deterministic marshal. + // + // Note that the deterministic serialization is NOT canonical across + // languages. It is not guaranteed to remain stable over time. It is + // unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization (e.g., persistent + // storage in a canonical form, fingerprinting, etc.) must define + // their own canonicalization specification and implement their own + // serializer rather than relying on this API. + // + // If deterministic serialization is requested, map entries will be + // sorted by keys in lexographical order. This is an implementation + // detail and subject to change. + Deterministic bool + + // UseCachedSize indicates that the result of a previous Size call + // may be reused. + // + // Setting this option asserts that: + // + // 1. Size has previously been called on this message with identical + // options (except for UseCachedSize itself). + // + // 2. The message and all its submessages have not changed in any + // way since the Size call. + // + // If either of these invariants is violated, + // the results are undefined and may include panics or corrupted output. + // + // Implementations MAY take this option into account to provide + // better performance, but there is no guarantee that they will do so. + // There is absolutely no guarantee that Size followed by Marshal with + // UseCachedSize set will perform equivalently to Marshal alone. + UseCachedSize bool +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// Marshal returns the wire-format encoding of m. +func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// m1.OptionalBytes, _ = proto.Marshal(m2) +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + +// MarshalAppend appends the wire-format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + + out, err := o.marshal(b, m.ProtoReflect()) + return out.Buf, err +} + +// MarshalState returns the wire-format encoding of a message. +// +// This method permits fine-grained control over the marshaler. +// Most users should use Marshal instead. +func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + return o.marshal(in.Buf, in.Message) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { + allowPartial := o.AllowPartial + o.AllowPartial = true + if methods := protoMethods(m); methods != nil && methods.Marshal != nil && + !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { + in := protoiface.MarshalInput{ + Message: m, + Buf: b, + } + if o.Deterministic { + in.Flags |= protoiface.MarshalDeterministic + } + if o.UseCachedSize { + in.Flags |= protoiface.MarshalUseCachedSize + } + if methods.Size != nil { + sout := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: in.Flags, + }) + if cap(b) < len(b)+sout.Size { + in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) + copy(in.Buf, b) + } + in.Flags |= protoiface.MarshalUseCachedSize + } + out, err = methods.Marshal(in) + } else { + out.Buf, err = o.marshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial { + return out, nil + } + return out, checkInitialized(m) +} + +func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { + out, err := o.marshal(b, m) + return out.Buf, err +} + +// growcap scales up the capacity of a slice. +// +// Given a slice with a current capacity of oldcap and a desired +// capacity of wantcap, growcap returns a new capacity >= wantcap. +// +// The algorithm is mostly identical to the one used by append as of Go 1.14. +func growcap(oldcap, wantcap int) (newcap int) { + if wantcap > oldcap*2 { + newcap = wantcap + } else if oldcap < 1024 { + // The Go 1.14 runtime takes this case when len(s) < 1024, + // not when cap(s) < 1024. The difference doesn't seem + // significant here. + newcap = oldcap * 2 + } else { + newcap = oldcap + for 0 < newcap && newcap < wantcap { + newcap += newcap / 4 + } + if newcap <= 0 { + newcap = wantcap + } + } + return newcap +} + +func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.marshalMessageSet(b, m) + } + // There are many choices for what order we visit fields in. The default one here + // is chosen for reasonable efficiency and simplicity given the protoreflect API. + // It is not deterministic, since Message.Range does not return fields in any + // defined order. + // + // When using deterministic serialization, we sort the known fields. + var err error + o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + b = append(b, m.GetUnknown()...) + return b, nil +} + +// rangeFields visits fields in a defined order when deterministic serialization is enabled. +func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !o.Deterministic { + m.Range(f) + return + } + var fds []protoreflect.FieldDescriptor + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + fds = append(fds, fd) + return true + }) + sort.Slice(fds, func(a, b int) bool { + return fieldsort.Less(fds[a], fds[b]) + }) + for _, fd := range fds { + if !f(fd, m.Get(fd)) { + break + } + } +} + +func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + switch { + case fd.IsList(): + return o.marshalList(b, fd, value.List()) + case fd.IsMap(): + return o.marshalMap(b, fd, value.Map()) + default: + b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) + return o.marshalSingular(b, fd, value) + } +} + +func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { + if fd.IsPacked() && list.Len() > 0 { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + b, pos := appendSpeculativeLength(b) + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + b = finishSpeculativeLength(b, pos) + return b, nil + } + + kind := fd.Kind() + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + return b, nil +} + +func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { + keyf := fd.MapKey() + valf := fd.MapValue() + var err error + o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + var pos int + b, pos = appendSpeculativeLength(b) + + b, err = o.marshalField(b, keyf, key.Value()) + if err != nil { + return false + } + b, err = o.marshalField(b, valf, value) + if err != nil { + return false + } + b = finishSpeculativeLength(b, pos) + return true + }) + return b, err +} + +func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { + if !o.Deterministic { + mapv.Range(f) + return + } + mapsort.Range(mapv, kind, f) +} + +// When encoding length-prefixed fields, we speculatively set aside some number of bytes +// for the length, encode the data, and then encode the length (shifting the data if necessary +// to make room). +const speculativeLength = 1 + +func appendSpeculativeLength(b []byte) ([]byte, int) { + pos := len(b) + b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) + return b, pos +} + +func finishSpeculativeLength(b []byte, pos int) []byte { + mlen := len(b) - pos - speculativeLength + msiz := protowire.SizeVarint(uint64(mlen)) + if msiz != speculativeLength { + for i := 0; i < msiz-speculativeLength; i++ { + b = append(b, 0) + } + copy(b[pos+msiz:], b[pos+speculativeLength:]) + b = b[:pos+msiz+mlen] + } + protowire.AppendVarint(b[:pos], uint64(mlen)) + return b +} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go new file mode 100644 index 000000000..185dacfb4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} + +func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + b = protowire.AppendVarint(b, uint64(v.Enum())) + case protoreflect.Int32Kind: + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + b = protowire.AppendVarint(b, uint64(v.Int())) + case protoreflect.Sint64Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + b = protowire.AppendVarint(b, v.Uint()) + case protoreflect.Sfixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Int())) + case protoreflect.Fixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Uint())) + case protoreflect.FloatKind: + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + case protoreflect.Sfixed64Kind: + b = protowire.AppendFixed64(b, uint64(v.Int())) + case protoreflect.Fixed64Kind: + b = protowire.AppendFixed64(b, v.Uint()) + case protoreflect.DoubleKind: + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { + return b, errors.InvalidUTF8(string(fd.FullName())) + } + b = protowire.AppendString(b, v.String()) + case protoreflect.BytesKind: + b = protowire.AppendBytes(b, v.Bytes()) + case protoreflect.MessageKind: + var pos int + var err error + b, pos = appendSpeculativeLength(b) + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = finishSpeculativeLength(b, pos) + case protoreflect.GroupKind: + var err error + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) + default: + return b, errors.New("invalid kind %v", fd.Kind()) + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go new file mode 100644 index 000000000..10902bd85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -0,0 +1,154 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. If either of the top-level +// messages are invalid, then Equal reports true only if both are invalid. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } + return equalMessage(mx, my) +} + +// equalMessage compares two messages. +func equalMessage(mx, my pref.Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalField(fd, vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalField compares two fields. +func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + default: + return equalValue(fd, x, y) + } +} + +// equalMap compares two maps. +func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k pref.MapKey, vx pref.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +// equalList compares two lists. +func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalValue compares two singular values. +func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.Message() != nil: + return equalMessage(x.Message(), y.Message()) + case fd.Kind() == pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + fx := x.Float() + fy := y.Float() + if math.IsNaN(fx) || math.IsNaN(fy) { + return math.IsNaN(fx) && math.IsNaN(fy) + } + return fx == fy + default: + return x.Interface() == y.Interface() + } +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y pref.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[pref.FieldNumber]pref.RawFields) + my := make(map[pref.FieldNumber]pref.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go new file mode 100644 index 000000000..5f293cda8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// HasExtension reports whether an extension field is populated. +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + return false + } + + return m.ProtoReflect().Has(xt.TypeDescriptor()) +} + +// ClearExtension clears an extension field such that subsequent +// HasExtension calls return false. +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) +} + +// GetExtension retrieves the value for an extension field. +// If the field is unpopulated, it returns the default value for +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) +} + +// SetExtension stores the value of an extension field. +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) +} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go new file mode 100644 index 000000000..d761ab331 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -0,0 +1,139 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Merge merges src into dst, which must be a message with the same descriptor. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +// +// It is semantically equivalent to unmarshaling the encoded form of src +// into dst with the UnmarshalOptions.Merge option specified. +func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() + if dstMsg.Descriptor() != srcMsg.Descriptor() { + if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) + } + panic("descriptor mismatch") + } + mergeOptions{}.mergeMessage(dstMsg, srcMsg) +} + +// Clone returns a deep copy of m. +// If the top-level message is invalid, it returns an invalid message as well. +func Clone(m Message) Message { + // NOTE: Most usages of Clone assume the following properties: + // t := reflect.TypeOf(m) + // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) + // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) + // + // Embedding protobuf messages breaks this since the parent type will have + // a forwarded ProtoReflect method, but the Interface method will return + // the underlying embedded message type. + if m == nil { + return nil + } + src := m.ProtoReflect() + if !src.IsValid() { + return src.Type().Zero().Interface() + } + dst := src.New() + mergeOptions{}.mergeMessage(dst, src) + return dst.Interface() +} + +// mergeOptions provides a namespace for merge functions, and can be +// exported in the future if we add user-visible merge options. +type mergeOptions struct{} + +func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { + methods := protoMethods(dst) + if methods != nil && methods.Merge != nil { + in := protoiface.MergeInput{ + Destination: dst, + Source: src, + } + out := methods.Merge(in) + if out.Flags&protoiface.MergeComplete != 0 { + return + } + } + + if !dst.IsValid() { + panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) + } + + src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + o.mergeList(dst.Mutable(fd).List(), v.List(), fd) + case fd.IsMap(): + o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) + case fd.Message() != nil: + o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(fd, o.cloneBytes(v)) + default: + dst.Set(fd, v) + } + return true + }) + + if len(src.GetUnknown()) > 0 { + dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) + } +} + +func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { + // Merge semantics appends to the end of the existing list. + for i, n := 0, src.Len(); i < n; i++ { + switch v := src.Get(i); { + case fd.Message() != nil: + dstv := dst.NewElement() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Append(dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Append(o.cloneBytes(v)) + default: + dst.Append(v) + } + } +} + +func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { + // Merge semantics replaces, rather than merges into existing entries. + src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + switch { + case fd.Message() != nil: + dstv := dst.NewValue() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Set(k, dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(k, o.cloneBytes(v)) + default: + dst.Set(k, v) + } + return true + }) +} + +func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { + return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) +} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go new file mode 100644 index 000000000..1d692c3a8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -0,0 +1,88 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +func (o MarshalOptions) sizeMessageSet(m protoreflect.Message) (size int) { + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += messageset.SizeField(fd.Number()) + size += protowire.SizeTag(messageset.FieldMessage) + size += protowire.SizeBytes(o.size(v.Message())) + return true + }) + size += messageset.SizeUnknown(m.GetUnknown()) + return size +} + +func (o MarshalOptions) marshalMessageSet(b []byte, m protoreflect.Message) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + var err error + o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalMessageSetField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + return messageset.AppendUnknown(b, m.GetUnknown()) +} + +func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + b = messageset.AppendFieldStart(b, fd.Number()) + b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) + b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + b, err := o.marshalMessage(b, value.Message()) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func (o UnmarshalOptions) unmarshalMessageSet(b []byte, m protoreflect.Message) error { + if !flags.ProtoLegacy { + return errors.New("no support for message_set_wire_format") + } + return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { + err := o.unmarshalMessageSetField(m, num, v) + if err == errUnknown { + unknown := m.GetUnknown() + unknown = protowire.AppendTag(unknown, num, protowire.BytesType) + unknown = protowire.AppendBytes(unknown, v) + m.SetUnknown(unknown) + return nil + } + return err + }) +} + +func (o UnmarshalOptions) unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte) error { + md := m.Descriptor() + if !md.ExtensionRanges().Has(num) { + return errUnknown + } + xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err == protoregistry.NotFound { + return errUnknown + } + if err != nil { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + xd := xt.TypeDescriptor() + if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go new file mode 100644 index 000000000..ca14b09c3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Message is the top-level interface that all messages must implement. +// It provides access to a reflective view of a message. +// Any implementation of this interface may be used with all functions in the +// protobuf module that accept a Message, except where otherwise specified. +// +// This is the v2 interface definition for protobuf messages. +// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// +// To convert a v1 message to a v2 message, +// use "github.com/golang/protobuf/proto".MessageV2. +// To convert a v2 message to a v1 message, +// use "github.com/golang/protobuf/proto".MessageV1. +type Message = protoreflect.ProtoMessage + +// Error matches all errors produced by packages in the protobuf module. +// +// That is, errors.Is(err, Error) reports whether an error is produced +// by this module. +var Error error + +func init() { + Error = errors.Error +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go new file mode 100644 index 000000000..d8dd604f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build !protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = true + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return m.ProtoMethods() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go new file mode 100644 index 000000000..b103d4320 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = false + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go new file mode 100644 index 000000000..3d7f89436 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/reset.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Reset clears every field in the message. +// The resulting message shares no observable memory with its previous state +// other than the memory for the message itself. +func Reset(m Message) { + if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { + mr.Reset() + return + } + resetMessage(m.ProtoReflect()) +} + +func resetMessage(m protoreflect.Message) { + if !m.IsValid() { + panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) + } + + // Clear all known fields. + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + m.Clear(fds.Get(i)) + } + + // Clear extension fields. + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + m.Clear(fd) + return true + }) + + // Clear unknown fields. + m.SetUnknown(nil) +} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go new file mode 100644 index 000000000..554b9c6c0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -0,0 +1,97 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + return MarshalOptions{}.Size(m) +} + +// Size returns the size in bytes of the wire-format encoding of m. +func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + + return o.size(m.ProtoReflect()) +} + +// size is a centralized function that all size operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for size that do not go through this. +func (o MarshalOptions) size(m protoreflect.Message) (size int) { + methods := protoMethods(m) + if methods != nil && methods.Size != nil { + out := methods.Size(protoiface.SizeInput{ + Message: m, + }) + return out.Size + } + if methods != nil && methods.Marshal != nil { + // This is not efficient, but we don't have any choice. + // This case is mainly used for legacy types with a Marshal method. + out, _ := methods.Marshal(protoiface.MarshalInput{ + Message: m, + }) + return len(out.Buf) + } + return o.sizeMessageSlow(m) +} + +func (o MarshalOptions) sizeMessageSlow(m protoreflect.Message) (size int) { + if messageset.IsMessageSet(m.Descriptor()) { + return o.sizeMessageSet(m) + } + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += o.sizeField(fd, v) + return true + }) + size += len(m.GetUnknown()) + return size +} + +func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { + num := fd.Number() + switch { + case fd.IsList(): + return o.sizeList(num, fd, value.List()) + case fd.IsMap(): + return o.sizeMap(num, fd, value.Map()) + default: + return protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), value) + } +} + +func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + if fd.IsPacked() && list.Len() > 0 { + content := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + content += o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return protowire.SizeTag(num) + protowire.SizeBytes(content) + } + + for i, llen := 0, list.Len(); i < llen; i++ { + size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + } + return size +} + +func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { + size += protowire.SizeTag(num) + size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) + return true + }) + return size +} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go new file mode 100644 index 000000000..3cf61a824 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size_gen.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func (o MarshalOptions) sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { + switch kind { + case protoreflect.BoolKind: + return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + return protowire.SizeVarint(uint64(v.Enum())) + case protoreflect.Int32Kind: + return protowire.SizeVarint(uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + return protowire.SizeVarint(uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + return protowire.SizeVarint(uint64(v.Int())) + case protoreflect.Sint64Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + return protowire.SizeVarint(v.Uint()) + case protoreflect.Sfixed32Kind: + return protowire.SizeFixed32() + case protoreflect.Fixed32Kind: + return protowire.SizeFixed32() + case protoreflect.FloatKind: + return protowire.SizeFixed32() + case protoreflect.Sfixed64Kind: + return protowire.SizeFixed64() + case protoreflect.Fixed64Kind: + return protowire.SizeFixed64() + case protoreflect.DoubleKind: + return protowire.SizeFixed64() + case protoreflect.StringKind: + return protowire.SizeBytes(len(v.String())) + case protoreflect.BytesKind: + return protowire.SizeBytes(len(v.Bytes())) + case protoreflect.MessageKind: + return protowire.SizeBytes(o.size(v.Message())) + case protoreflect.GroupKind: + return protowire.SizeGroup(num, o.size(v.Message())) + default: + return 0 + } +} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go new file mode 100644 index 000000000..653b12c3a --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrappers.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go new file mode 100644 index 000000000..37f254d4c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -0,0 +1,275 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protodesc provides functionality for converting +// FileDescriptorProto messages to/from protoreflect.FileDescriptor values. +// +// The google.protobuf.FileDescriptorProto is a protobuf message that describes +// the type information for a .proto file in a form that is easily serializable. +// The protoreflect.FileDescriptor is a more structured representation of +// the FileDescriptorProto message where references and remote dependencies +// can be directly followed. +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// Resolver is the resolver used by NewFile to resolve dependencies. +// The enums and messages provided must belong to some parent file, +// which is also registered. +// +// It is implemented by protoregistry.Files. +type Resolver interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) +} + +// FileOptions configures the construction of file descriptors. +type FileOptions struct { + pragma.NoUnkeyedLiterals + + // AllowUnresolvable configures New to permissively allow unresolvable + // file, enum, or message dependencies. Unresolved dependencies are replaced + // by placeholder equivalents. + // + // The following dependencies may be left unresolved: + // • Resolving an imported file. + // • Resolving the type for a message field or extension field. + // If the kind of the field is unknown, then a placeholder is used for both + // the Enum and Message accessors on the protoreflect.FieldDescriptor. + // • Resolving an enum value set as the default for an optional enum field. + // If unresolvable, the protoreflect.FieldDescriptor.Default is set to the + // first value in the associated enum (or zero if the also enum dependency + // is also unresolvable). The protoreflect.FieldDescriptor.DefaultEnumValue + // is populated with a placeholder. + // • Resolving the extended message type for an extension field. + // • Resolving the input or output message type for a service method. + // + // If the unresolved dependency uses a relative name, + // then the placeholder will contain an invalid FullName with a "*." prefix, + // indicating that the starting prefix of the full name is unknown. + AllowUnresolvable bool +} + +// NewFile creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. See FileOptions.New for more information. +func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + return FileOptions{}.New(fd, r) +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. See FileOptions.NewFiles for more information. +func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + return FileOptions{}.NewFiles(fd) +} + +// New creates a new protoreflect.FileDescriptor from the provided +// file descriptor message. The file must represent a valid proto file according +// to protobuf semantics. The returned descriptor is a deep copy of the input. +// +// Any imported files, enum types, or message types referenced in the file are +// resolved using the provided registry. When looking up an import file path, +// the path must be unique. The newly created file descriptor is not registered +// back into the provided file registry. +func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) { + if r == nil { + r = (*protoregistry.Files)(nil) // empty resolver + } + + // Handle the file descriptor content. + f := &filedesc.File{L2: &filedesc.FileL2{}} + switch fd.GetSyntax() { + case "proto2", "": + f.L1.Syntax = protoreflect.Proto2 + case "proto3": + f.L1.Syntax = protoreflect.Proto3 + default: + return nil, errors.New("invalid syntax: %q", fd.GetSyntax()) + } + f.L1.Path = fd.GetName() + if f.L1.Path == "" { + return nil, errors.New("file path must be populated") + } + f.L1.Package = protoreflect.FullName(fd.GetPackage()) + if !f.L1.Package.IsValid() && f.L1.Package != "" { + return nil, errors.New("invalid package: %q", f.L1.Package) + } + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FileOptions) + f.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + + f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency())) + for _, i := range fd.GetPublicDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsPublic { + return nil, errors.New("invalid or duplicate public import index: %d", i) + } + f.L2.Imports[i].IsPublic = true + } + for _, i := range fd.GetWeakDependency() { + if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { + return nil, errors.New("invalid or duplicate weak import index: %d", i) + } + f.L2.Imports[i].IsWeak = true + } + imps := importSet{f.Path(): true} + for i, path := range fd.GetDependency() { + imp := &f.L2.Imports[i] + f, err := r.FindFileByPath(path) + if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + f = filedesc.PlaceholderFile(path) + } else if err != nil { + return nil, errors.New("could not resolve import %q: %v", path, err) + } + imp.FileDescriptor = f + + if imps[imp.Path()] { + return nil, errors.New("already imported %q", path) + } + imps[imp.Path()] = true + } + for i := range fd.GetDependency() { + imp := &f.L2.Imports[i] + imps.importPublic(imp.Imports()) + } + + // Handle source locations. + for _, loc := range fd.GetSourceCodeInfo().GetLocation() { + var l protoreflect.SourceLocation + // TODO: Validate that the path points to an actual declaration? + l.Path = protoreflect.SourcePath(loc.GetPath()) + s := loc.GetSpan() + switch len(s) { + case 3: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[0]), int(s[2]) + case 4: + l.StartLine, l.StartColumn, l.EndLine, l.EndColumn = int(s[0]), int(s[1]), int(s[2]), int(s[3]) + default: + return nil, errors.New("invalid span: %v", s) + } + // TODO: Validate that the span information is sensible? + // See https://github.com/protocolbuffers/protobuf/issues/6378. + if false && (l.EndLine < l.StartLine || l.StartLine < 0 || l.StartColumn < 0 || l.EndColumn < 0 || + (l.StartLine == l.EndLine && l.EndColumn <= l.StartColumn)) { + return nil, errors.New("invalid span: %v", s) + } + l.LeadingDetachedComments = loc.GetLeadingDetachedComments() + l.LeadingComments = loc.GetLeadingComments() + l.TrailingComments = loc.GetTrailingComments() + f.L2.Locations.List = append(f.L2.Locations.List, l) + } + + // Step 1: Allocate and derive the names for all declarations. + // This copies all fields from the descriptor proto except: + // google.protobuf.FieldDescriptorProto.type_name + // google.protobuf.FieldDescriptorProto.default_value + // google.protobuf.FieldDescriptorProto.oneof_index + // google.protobuf.FieldDescriptorProto.extendee + // google.protobuf.MethodDescriptorProto.input + // google.protobuf.MethodDescriptorProto.output + var err error + sb := new(strs.Builder) + r1 := make(descsByName) + if f.L1.Enums.List, err = r1.initEnumDeclarations(fd.GetEnumType(), f, sb); err != nil { + return nil, err + } + if f.L1.Messages.List, err = r1.initMessagesDeclarations(fd.GetMessageType(), f, sb); err != nil { + return nil, err + } + if f.L1.Extensions.List, err = r1.initExtensionDeclarations(fd.GetExtension(), f, sb); err != nil { + return nil, err + } + if f.L1.Services.List, err = r1.initServiceDeclarations(fd.GetService(), f, sb); err != nil { + return nil, err + } + + // Step 2: Resolve every dependency reference not handled by step 1. + r2 := &resolver{local: r1, remote: r, imports: imps, allowUnresolvable: o.AllowUnresolvable} + if err := r2.resolveMessageDependencies(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := r2.resolveExtensionDependencies(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + if err := r2.resolveServiceDependencies(f.L1.Services.List, fd.GetService()); err != nil { + return nil, err + } + + // Step 3: Validate every enum, message, and extension declaration. + if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil { + return nil, err + } + if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil { + return nil, err + } + if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil { + return nil, err + } + + return f, nil +} + +type importSet map[string]bool + +func (is importSet) importPublic(imps protoreflect.FileImports) { + for i := 0; i < imps.Len(); i++ { + if imp := imps.Get(i); imp.IsPublic { + is[imp.Path()] = true + is.importPublic(imp.Imports()) + } + } +} + +// NewFiles creates a new protoregistry.Files from the provided +// FileDescriptorSet message. The descriptor set must include only +// valid files according to protobuf semantics. The returned descriptors +// are a deep copy of the input. +func (o FileOptions) NewFiles(fds *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) { + files := make(map[string]*descriptorpb.FileDescriptorProto) + for _, fd := range fds.File { + if _, ok := files[fd.GetName()]; ok { + return nil, errors.New("file appears multiple times: %q", fd.GetName()) + } + files[fd.GetName()] = fd + } + r := &protoregistry.Files{} + for _, fd := range files { + if err := o.addFileDeps(r, fd, files); err != nil { + return nil, err + } + } + return r, nil +} +func (o FileOptions) addFileDeps(r *protoregistry.Files, fd *descriptorpb.FileDescriptorProto, files map[string]*descriptorpb.FileDescriptorProto) error { + // Set the entry to nil while descending into a file's dependencies to detect cycles. + files[fd.GetName()] = nil + for _, dep := range fd.Dependency { + depfd, ok := files[dep] + if depfd == nil { + if ok { + return errors.New("import cycle in file: %q", dep) + } + continue + } + if err := o.addFileDeps(r, depfd, files); err != nil { + return err + } + } + // Delete the entry once dependencies are processed. + delete(files, fd.GetName()) + f, err := o.New(fd, r) + if err != nil { + return err + } + return r.RegisterFile(f) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go new file mode 100644 index 000000000..673a230e7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -0,0 +1,248 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +type descsByName map[protoreflect.FullName]protoreflect.Descriptor + +func (r descsByName) initEnumDeclarations(eds []*descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (es []filedesc.Enum, err error) { + es = make([]filedesc.Enum, len(eds)) // allocate up-front to ensure stable pointers + for i, ed := range eds { + e := &es[i] + e.L2 = new(filedesc.EnumL2) + if e.L0, err = r.makeBase(e, parent, ed.GetName(), i, sb); err != nil { + return nil, err + } + if opts := ed.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumOptions) + e.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + for _, s := range ed.GetReservedName() { + e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range ed.GetReservedRange() { + e.L2.ReservedRanges.List = append(e.L2.ReservedRanges.List, [2]protoreflect.EnumNumber{ + protoreflect.EnumNumber(rr.GetStart()), + protoreflect.EnumNumber(rr.GetEnd()), + }) + } + if e.L2.Values.List, err = r.initEnumValuesFromDescriptorProto(ed.GetValue(), e, sb); err != nil { + return nil, err + } + } + return es, nil +} + +func (r descsByName) initEnumValuesFromDescriptorProto(vds []*descriptorpb.EnumValueDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (vs []filedesc.EnumValue, err error) { + vs = make([]filedesc.EnumValue, len(vds)) // allocate up-front to ensure stable pointers + for i, vd := range vds { + v := &vs[i] + if v.L0, err = r.makeBase(v, parent, vd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := vd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.EnumValueOptions) + v.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + v.L1.Number = protoreflect.EnumNumber(vd.GetNumber()) + } + return vs, nil +} + +func (r descsByName) initMessagesDeclarations(mds []*descriptorpb.DescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Message, err error) { + ms = make([]filedesc.Message, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + m.L2 = new(filedesc.MessageL2) + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MessageOptions) + m.L2.Options = func() protoreflect.ProtoMessage { return opts } + m.L1.IsMapEntry = opts.GetMapEntry() + m.L1.IsMessageSet = opts.GetMessageSetWireFormat() + } + for _, s := range md.GetReservedName() { + m.L2.ReservedNames.List = append(m.L2.ReservedNames.List, protoreflect.Name(s)) + } + for _, rr := range md.GetReservedRange() { + m.L2.ReservedRanges.List = append(m.L2.ReservedRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(rr.GetStart()), + protoreflect.FieldNumber(rr.GetEnd()), + }) + } + for _, xr := range md.GetExtensionRange() { + m.L2.ExtensionRanges.List = append(m.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(xr.GetStart()), + protoreflect.FieldNumber(xr.GetEnd()), + }) + var optsFunc func() protoreflect.ProtoMessage + if opts := xr.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ExtensionRangeOptions) + optsFunc = func() protoreflect.ProtoMessage { return opts } + } + m.L2.ExtensionRangeOptions = append(m.L2.ExtensionRangeOptions, optsFunc) + } + if m.L2.Fields.List, err = r.initFieldsFromDescriptorProto(md.GetField(), m, sb); err != nil { + return nil, err + } + if m.L2.Oneofs.List, err = r.initOneofsFromDescriptorProto(md.GetOneofDecl(), m, sb); err != nil { + return nil, err + } + if m.L1.Enums.List, err = r.initEnumDeclarations(md.GetEnumType(), m, sb); err != nil { + return nil, err + } + if m.L1.Messages.List, err = r.initMessagesDeclarations(md.GetNestedType(), m, sb); err != nil { + return nil, err + } + if m.L1.Extensions.List, err = r.initExtensionDeclarations(md.GetExtension(), m, sb); err != nil { + return nil, err + } + } + return ms, nil +} + +func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (fs []filedesc.Field, err error) { + fs = make([]filedesc.Field, len(fds)) // allocate up-front to ensure stable pointers + for i, fd := range fds { + f := &fs[i] + if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil { + return nil, err + } + f.L1.IsProto3Optional = fd.GetProto3Optional() + if opts := fd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + f.L1.Options = func() protoreflect.ProtoMessage { return opts } + f.L1.IsWeak = opts.GetWeak() + f.L1.HasPacked = opts.Packed != nil + f.L1.IsPacked = opts.GetPacked() + } + f.L1.Number = protoreflect.FieldNumber(fd.GetNumber()) + f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel()) + if fd.Type != nil { + f.L1.Kind = protoreflect.Kind(fd.GetType()) + } + if fd.JsonName != nil { + f.L1.JSONName.Init(fd.GetJsonName()) + } + } + return fs, nil +} + +func (r descsByName) initOneofsFromDescriptorProto(ods []*descriptorpb.OneofDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (os []filedesc.Oneof, err error) { + os = make([]filedesc.Oneof, len(ods)) // allocate up-front to ensure stable pointers + for i, od := range ods { + o := &os[i] + if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil { + return nil, err + } + if opts := od.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.OneofOptions) + o.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + } + return os, nil +} + +func (r descsByName) initExtensionDeclarations(xds []*descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (xs []filedesc.Extension, err error) { + xs = make([]filedesc.Extension, len(xds)) // allocate up-front to ensure stable pointers + for i, xd := range xds { + x := &xs[i] + x.L2 = new(filedesc.ExtensionL2) + if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := xd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.FieldOptions) + x.L2.Options = func() protoreflect.ProtoMessage { return opts } + x.L2.IsPacked = opts.GetPacked() + } + x.L1.Number = protoreflect.FieldNumber(xd.GetNumber()) + x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel()) + if xd.Type != nil { + x.L1.Kind = protoreflect.Kind(xd.GetType()) + } + if xd.JsonName != nil { + x.L2.JSONName.Init(xd.GetJsonName()) + } + } + return xs, nil +} + +func (r descsByName) initServiceDeclarations(sds []*descriptorpb.ServiceDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ss []filedesc.Service, err error) { + ss = make([]filedesc.Service, len(sds)) // allocate up-front to ensure stable pointers + for i, sd := range sds { + s := &ss[i] + s.L2 = new(filedesc.ServiceL2) + if s.L0, err = r.makeBase(s, parent, sd.GetName(), i, sb); err != nil { + return nil, err + } + if opts := sd.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.ServiceOptions) + s.L2.Options = func() protoreflect.ProtoMessage { return opts } + } + if s.L2.Methods.List, err = r.initMethodsFromDescriptorProto(sd.GetMethod(), s, sb); err != nil { + return nil, err + } + } + return ss, nil +} + +func (r descsByName) initMethodsFromDescriptorProto(mds []*descriptorpb.MethodDescriptorProto, parent protoreflect.Descriptor, sb *strs.Builder) (ms []filedesc.Method, err error) { + ms = make([]filedesc.Method, len(mds)) // allocate up-front to ensure stable pointers + for i, md := range mds { + m := &ms[i] + if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil { + return nil, err + } + if opts := md.GetOptions(); opts != nil { + opts = proto.Clone(opts).(*descriptorpb.MethodOptions) + m.L1.Options = func() protoreflect.ProtoMessage { return opts } + } + m.L1.IsStreamingClient = md.GetClientStreaming() + m.L1.IsStreamingServer = md.GetServerStreaming() + } + return ms, nil +} + +func (r descsByName) makeBase(child, parent protoreflect.Descriptor, name string, idx int, sb *strs.Builder) (filedesc.BaseL0, error) { + if !protoreflect.Name(name).IsValid() { + return filedesc.BaseL0{}, errors.New("descriptor %q has an invalid nested name: %q", parent.FullName(), name) + } + + // Derive the full name of the child. + // Note that enum values are a sibling to the enum parent in the namespace. + var fullName protoreflect.FullName + if _, ok := parent.(protoreflect.EnumDescriptor); ok { + fullName = sb.AppendFullName(parent.FullName().Parent(), protoreflect.Name(name)) + } else { + fullName = sb.AppendFullName(parent.FullName(), protoreflect.Name(name)) + } + if _, ok := r[fullName]; ok { + return filedesc.BaseL0{}, errors.New("descriptor %q already declared", fullName) + } + r[fullName] = child + + // TODO: Verify that the full name does not already exist in the resolver? + // This is not as critical since most usages of NewFile will register + // the created file back into the registry, which will perform this check. + + return filedesc.BaseL0{ + FullName: fullName, + ParentFile: parent.ParentFile().(*filedesc.File), + Parent: parent, + Index: idx, + }, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go new file mode 100644 index 000000000..cebb36cda --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// resolver is a wrapper around a local registry of declarations within the file +// and the remote resolver. The remote resolver is restricted to only return +// descriptors that have been imported. +type resolver struct { + local descsByName + remote Resolver + imports importSet + + allowUnresolvable bool +} + +func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) (err error) { + for i, md := range mds { + m := &ms[i] + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if f.L1.Cardinality == protoreflect.Required { + m.L2.RequiredNumbers.List = append(m.L2.RequiredNumbers.List, f.L1.Number) + } + if fd.OneofIndex != nil { + k := int(fd.GetOneofIndex()) + if !(0 <= k && k < len(md.GetOneofDecl())) { + return errors.New("message field %q has an invalid oneof index: %d", f.FullName(), k) + } + o := &m.L2.Oneofs.List[k] + f.L1.ContainingOneof = o + o.L1.Fields.List = append(o.L1.Fields.List, f) + } + + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) + } + if fd.DefaultValue != nil { + v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable) + if err != nil { + return errors.New("message field %q has invalid default: %v", f.FullName(), err) + } + f.L1.Default = filedesc.DefaultValue(v, ev) + } + } + + if err := r.resolveMessageDependencies(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := r.resolveExtensionDependencies(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { + for i, xd := range xds { + x := &xs[i] + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) + } + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) + } + if xd.DefaultValue != nil { + v, ev, err := unmarshalDefault(xd.GetDefaultValue(), x, r.allowUnresolvable) + if err != nil { + return errors.New("extension field %q has invalid default: %v", x.FullName(), err) + } + x.L2.Default = filedesc.DefaultValue(v, ev) + } + } + return nil +} + +func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*descriptorpb.ServiceDescriptorProto) (err error) { + for i, sd := range sds { + s := &ss[i] + for j, md := range sd.GetMethod() { + m := &s.L2.Methods.List[j] + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) + } + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + if err != nil { + return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) + } + } + } + return nil +} + +// findTarget finds an enum or message descriptor if k is an enum, message, +// group, or unknown. If unknown, and the name could be resolved, the kind +// returned kind is set based on the type of the resolved descriptor. +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { + switch k { + case protoreflect.EnumKind: + ed, err := r.findEnumDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, ed, nil, nil + case protoreflect.MessageKind, protoreflect.GroupKind: + md, err := r.findMessageDescriptor(scope, ref, isWeak) + if err != nil { + return 0, nil, nil, err + } + return k, nil, md, nil + case 0: + // Handle unspecified kinds (possible with parsers that operate + // on a per-file basis without knowledge of dependencies). + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return 0, nil, nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return 0, nil, nil, err + } + switch d := d.(type) { + case protoreflect.EnumDescriptor: + return protoreflect.EnumKind, d, nil, nil + case protoreflect.MessageDescriptor: + return protoreflect.MessageKind, nil, d, nil + default: + return 0, nil, nil, errors.New("unknown kind") + } + default: + if ref != "" { + return 0, nil, nil, errors.New("target name cannot be specified for %v", k) + } + if !k.IsValid() { + return 0, nil, nil, errors.New("invalid kind: %d", k) + } + return k, nil, nil, nil + } +} + +// findDescriptor finds the descriptor by name, +// which may be a relative name within some scope. +// +// Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", +// then the following full names are searched: +// * fizz.buzz.Foo.Bar +// * fizz.Foo.Bar +// * Foo.Bar +func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { + if !ref.IsValid() { + return nil, errors.New("invalid name reference: %q", ref) + } + if ref.IsFull() { + scope, ref = "", ref[1:] + } + var foundButNotImported protoreflect.Descriptor + for { + // Derive the full name to search. + s := protoreflect.FullName(ref) + if scope != "" { + s = scope + "." + s + } + + // Check the current file for the descriptor. + if d, ok := r.local[s]; ok { + return d, nil + } + + // Check the remote registry for the descriptor. + d, err := r.remote.FindDescriptorByName(s) + if err == nil { + // Only allow descriptors covered by one of the imports. + if r.imports[d.ParentFile().Path()] { + return d, nil + } + foundButNotImported = d + } else if err != protoregistry.NotFound { + return nil, errors.Wrap(err, "%q", s) + } + + // Continue on at a higher level of scoping. + if scope == "" { + if d := foundButNotImported; d != nil { + return nil, errors.New("resolved %q, but %q is not imported", d.FullName(), d.ParentFile().Path()) + } + return nil, protoregistry.NotFound + } + scope = scope.Parent() + } +} + +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderEnum(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + ed, ok := d.(protoreflect.EnumDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an enum", d.FullName()) + } + return ed, nil +} + +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { + d, err := r.findDescriptor(scope, ref) + if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + return filedesc.PlaceholderMessage(ref.FullName()), nil + } else if err == protoregistry.NotFound { + return nil, errors.New("%q not found", ref.FullName()) + } else if err != nil { + return nil, err + } + md, ok := d.(protoreflect.MessageDescriptor) + if !ok { + return nil, errors.New("resolved %q, but it is not an message", d.FullName()) + } + return md, nil +} + +// partialName is the partial name. A leading dot means that the name is full, +// otherwise the name is relative to some current scope. +// See google.protobuf.FieldDescriptorProto.type_name. +type partialName string + +func (s partialName) IsFull() bool { + return len(s) > 0 && s[0] == '.' +} + +func (s partialName) IsValid() bool { + if s.IsFull() { + return protoreflect.FullName(s[1:]).IsValid() + } + return protoreflect.FullName(s).IsValid() +} + +const unknownPrefix = "*." + +// FullName converts the partial name to a full name on a best-effort basis. +// If relative, it creates an invalid full name, using a "*." prefix +// to indicate that the start of the full name is unknown. +func (s partialName) FullName() protoreflect.FullName { + if s.IsFull() { + return protoreflect.FullName(s[1:]) + } + return protoreflect.FullName(unknownPrefix + s) +} + +func unmarshalDefault(s string, fd protoreflect.FieldDescriptor, allowUnresolvable bool) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { + var evs protoreflect.EnumValueDescriptors + if fd.Enum() != nil { + evs = fd.Enum().Values() + } + v, ev, err := defval.Unmarshal(s, fd.Kind(), evs, defval.Descriptor) + if err != nil && allowUnresolvable && evs != nil && protoreflect.Name(s).IsValid() { + v = protoreflect.ValueOfEnum(0) + if evs.Len() > 0 { + v = protoreflect.ValueOfEnum(evs.Get(0).Number()) + } + ev = filedesc.PlaceholderEnumValue(fd.Enum().FullName().Parent().Append(protoreflect.Name(s))) + } else if err != nil { + return v, ev, err + } + if fd.Syntax() == protoreflect.Proto3 { + return v, ev, errors.New("cannot be specified under proto3 semantics") + } + if fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind || fd.Cardinality() == protoreflect.Repeated { + return v, ev, errors.New("cannot be specified on composite types") + } + return v, ev, nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go new file mode 100644 index 000000000..2d5fa9936 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -0,0 +1,371 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "strings" + "unicode" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +func validateEnumDeclarations(es []filedesc.Enum, eds []*descriptorpb.EnumDescriptorProto) error { + for i, ed := range eds { + e := &es[i] + if err := e.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("enum %q reserved names has %v", e.FullName(), err) + } + if err := e.L2.ReservedRanges.CheckValid(); err != nil { + return errors.New("enum %q reserved ranges has %v", e.FullName(), err) + } + if len(ed.GetValue()) == 0 { + return errors.New("enum %q must contain at least one value declaration", e.FullName()) + } + allowAlias := ed.GetOptions().GetAllowAlias() + foundAlias := false + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + if v2 := e.Values().ByNumber(v1.Number()); v1 != v2 { + foundAlias = true + if !allowAlias { + return errors.New("enum %q has conflicting non-aliased values on number %d: %q with %q", e.FullName(), v1.Number(), v1.Name(), v2.Name()) + } + } + } + if allowAlias && !foundAlias { + return errors.New("enum %q allows aliases, but none were found", e.FullName()) + } + if e.Syntax() == protoreflect.Proto3 { + if v := e.Values().Get(0); v.Number() != 0 { + return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName()) + } + // Verify that value names in proto3 do not conflict if the + // case-insensitive prefix is removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055 + names := map[string]protoreflect.EnumValueDescriptor{} + prefix := strings.Replace(strings.ToLower(string(e.Name())), "_", "", -1) + for i := 0; i < e.Values().Len(); i++ { + v1 := e.Values().Get(i) + s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix)) + if v2, ok := names[s]; ok && v1.Number() != v2.Number() { + return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name()) + } + names[s] = v1 + } + } + + for j, vd := range ed.GetValue() { + v := &e.L2.Values.List[j] + if vd.Number == nil { + return errors.New("enum value %q must have a specified number", v.FullName()) + } + if e.L2.ReservedNames.Has(v.Name()) { + return errors.New("enum value %q must not use reserved name", v.FullName()) + } + if e.L2.ReservedRanges.Has(v.Number()) { + return errors.New("enum value %q must not use reserved number %d", v.FullName(), v.Number()) + } + } + } + return nil +} + +func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error { + for i, md := range mds { + m := &ms[i] + + // Handle the message descriptor itself. + isMessageSet := md.GetOptions().GetMessageSetWireFormat() + if err := m.L2.ReservedNames.CheckValid(); err != nil { + return errors.New("message %q reserved names has %v", m.FullName(), err) + } + if err := m.L2.ReservedRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q reserved ranges has %v", m.FullName(), err) + } + if err := m.L2.ExtensionRanges.CheckValid(isMessageSet); err != nil { + return errors.New("message %q extension ranges has %v", m.FullName(), err) + } + if err := (*filedesc.FieldRanges).CheckOverlap(&m.L2.ReservedRanges, &m.L2.ExtensionRanges); err != nil { + return errors.New("message %q reserved and extension ranges has %v", m.FullName(), err) + } + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + if f2 := m.Fields().ByNumber(f1.Number()); f1 != f2 { + return errors.New("message %q has conflicting fields: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + } + if isMessageSet && !flags.ProtoLegacy { + return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName()) + } + if isMessageSet && (m.Syntax() != protoreflect.Proto2 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) { + return errors.New("message %q is an invalid proto1 MessageSet", m.FullName()) + } + if m.Syntax() == protoreflect.Proto3 { + if m.ExtensionRanges().Len() > 0 { + return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName()) + } + // Verify that field names in proto3 do not conflict if lowercased + // with all underscores removed. + // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847 + names := map[string]protoreflect.FieldDescriptor{} + for i := 0; i < m.Fields().Len(); i++ { + f1 := m.Fields().Get(i) + s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1) + if f2, ok := names[s]; ok { + return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name()) + } + names[s] = f1 + } + } + + for j, fd := range md.GetField() { + f := &m.L2.Fields.List[j] + if m.L2.ReservedNames.Has(f.Name()) { + return errors.New("message field %q must not use reserved name", f.FullName()) + } + if !f.Number().IsValid() { + return errors.New("message field %q has an invalid number: %d", f.FullName(), f.Number()) + } + if !f.Cardinality().IsValid() { + return errors.New("message field %q has an invalid cardinality: %d", f.FullName(), f.Cardinality()) + } + if m.L2.ReservedRanges.Has(f.Number()) { + return errors.New("message field %q must not use reserved number %d", f.FullName(), f.Number()) + } + if m.L2.ExtensionRanges.Has(f.Number()) { + return errors.New("message field %q with number %d in extension range", f.FullName(), f.Number()) + } + if fd.Extendee != nil { + return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee()) + } + if f.L1.IsProto3Optional { + if f.Syntax() != protoreflect.Proto3 { + return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName()) + } + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q under proto3 optional semantics must have optional cardinality", f.FullName()) + } + if f.ContainingOneof() != nil && f.ContainingOneof().Fields().Len() != 1 { + return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) + } + } + if f.IsWeak() && !flags.ProtoLegacy { + return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) + } + if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) { + return errors.New("message field %q may only be weak for an optional message", f.FullName()) + } + if f.IsPacked() && !isPackable(f) { + return errors.New("message field %q is not packable", f.FullName()) + } + if err := checkValidGroup(f); err != nil { + return errors.New("message field %q is an invalid group: %v", f.FullName(), err) + } + if err := checkValidMap(f); err != nil { + return errors.New("message field %q is an invalid map: %v", f.FullName(), err) + } + if f.Syntax() == protoreflect.Proto3 { + if f.Cardinality() == protoreflect.Required { + return errors.New("message field %q using proto3 semantics cannot be required", f.FullName()) + } + if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 { + return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName()) + } + } + } + seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs + for j := range md.GetOneofDecl() { + o := &m.L2.Oneofs.List[j] + if o.Fields().Len() == 0 { + return errors.New("message oneof %q must contain at least one field declaration", o.FullName()) + } + if n := o.Fields().Len(); n-1 != (o.Fields().Get(n-1).Index() - o.Fields().Get(0).Index()) { + return errors.New("message oneof %q must have consecutively declared fields", o.FullName()) + } + + if o.IsSynthetic() { + seenSynthetic = true + continue + } + if !o.IsSynthetic() && seenSynthetic { + return errors.New("message oneof %q must be declared before synthetic oneofs", o.FullName()) + } + + for i := 0; i < o.Fields().Len(); i++ { + f := o.Fields().Get(i) + if f.Cardinality() != protoreflect.Optional { + return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) + } + if f.IsWeak() { + return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) + } + } + } + + if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil { + return err + } + if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil { + return err + } + if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil { + return err + } + } + return nil +} + +func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error { + for i, xd := range xds { + x := &xs[i] + // NOTE: Avoid using the IsValid method since extensions to MessageSet + // may have a field number higher than normal. This check only verifies + // that the number is not negative or reserved. We check again later + // if we know that the extendee is definitely not a MessageSet. + if n := x.Number(); n < 0 || (protowire.FirstReservedNumber <= n && n <= protowire.LastReservedNumber) { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + if !x.Cardinality().IsValid() || x.Cardinality() == protoreflect.Required { + return errors.New("extension field %q has an invalid cardinality: %d", x.FullName(), x.Cardinality()) + } + if xd.JsonName != nil { + if xd.GetJsonName() != strs.JSONCamelCase(string(x.Name())) { + return errors.New("extension field %q may not have an explicitly set JSON name: %q", x.FullName(), xd.GetJsonName()) + } + } + if xd.OneofIndex != nil { + return errors.New("extension field %q may not be part of a oneof", x.FullName()) + } + if md := x.ContainingMessage(); !md.IsPlaceholder() { + if !md.ExtensionRanges().Has(x.Number()) { + return errors.New("extension field %q extends %q with non-extension field number: %d", x.FullName(), md.FullName(), x.Number()) + } + isMessageSet := md.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() + if isMessageSet && !isOptionalMessage(x) { + return errors.New("extension field %q extends MessageSet and must be an optional message", x.FullName()) + } + if !isMessageSet && !x.Number().IsValid() { + return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) + } + } + if xd.GetOptions().GetWeak() { + return errors.New("extension field %q cannot be a weak reference", x.FullName()) + } + if x.IsPacked() && !isPackable(x) { + return errors.New("extension field %q is not packable", x.FullName()) + } + if err := checkValidGroup(x); err != nil { + return errors.New("extension field %q is an invalid group: %v", x.FullName(), err) + } + if md := x.Message(); md != nil && md.IsMapEntry() { + return errors.New("extension field %q cannot be a map entry", x.FullName()) + } + if x.Syntax() == protoreflect.Proto3 { + switch x.ContainingMessage().FullName() { + case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.EnumValueOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MessageOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.FieldOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.OneofOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ExtensionRangeOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.ServiceOptions)(nil).ProtoReflect().Descriptor().FullName(): + case (*descriptorpb.MethodOptions)(nil).ProtoReflect().Descriptor().FullName(): + default: + return errors.New("extension field %q cannot be declared in proto3 unless extended descriptor options", x.FullName()) + } + } + } + return nil +} + +// isOptionalMessage reports whether this is an optional message. +// If the kind is unknown, it is assumed to be a message. +func isOptionalMessage(fd protoreflect.FieldDescriptor) bool { + return (fd.Kind() == 0 || fd.Kind() == protoreflect.MessageKind) && fd.Cardinality() == protoreflect.Optional +} + +// isPackable checks whether the pack option can be specified. +func isPackable(fd protoreflect.FieldDescriptor) bool { + switch fd.Kind() { + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: + return false + } + return fd.IsList() +} + +// checkValidGroup reports whether fd is a valid group according to the same +// rules that protoc imposes. +func checkValidGroup(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case fd.Kind() != protoreflect.GroupKind: + return nil + case fd.Syntax() != protoreflect.Proto2: + return errors.New("invalid under proto2 semantics") + case md == nil || md.IsPlaceholder(): + return errors.New("message must be resolvable") + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case !unicode.IsUpper(rune(md.Name()[0])): + return errors.New("message name must start with an uppercase") + case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))): + return errors.New("field name must be lowercased form of the message name") + } + return nil +} + +// checkValidMap checks whether the field is a valid map according to the same +// rules that protoc imposes. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:6045-6115 +func checkValidMap(fd protoreflect.FieldDescriptor) error { + md := fd.Message() + switch { + case md == nil || !md.IsMapEntry(): + return nil + case fd.FullName().Parent() != md.FullName().Parent(): + return errors.New("message and field must be declared in the same scope") + case md.Name() != protoreflect.Name(strs.MapEntryName(string(fd.Name()))): + return errors.New("incorrect implicit map entry name") + case fd.Cardinality() != protoreflect.Repeated: + return errors.New("field must be repeated") + case md.Fields().Len() != 2: + return errors.New("message must have exactly two fields") + case md.ExtensionRanges().Len() > 0: + return errors.New("message must not have any extension ranges") + case md.Enums().Len()+md.Messages().Len()+md.Extensions().Len() > 0: + return errors.New("message must not have any nested declarations") + } + kf := md.Fields().Get(0) + vf := md.Fields().Get(1) + switch { + case kf.Name() != genid.MapEntry_Key_field_name || kf.Number() != genid.MapEntry_Key_field_number || kf.Cardinality() != protoreflect.Optional || kf.ContainingOneof() != nil || kf.HasDefault(): + return errors.New("invalid key field") + case vf.Name() != genid.MapEntry_Value_field_name || vf.Number() != genid.MapEntry_Value_field_number || vf.Cardinality() != protoreflect.Optional || vf.ContainingOneof() != nil || vf.HasDefault(): + return errors.New("invalid value field") + } + switch kf.Kind() { + case protoreflect.BoolKind: // bool + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: // int32 + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: // int64 + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: // uint32 + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: // uint64 + case protoreflect.StringKind: // string + default: + return errors.New("invalid key kind: %v", kf.Kind()) + } + if e := vf.Enum(); e != nil && e.Values().Len() > 0 && e.Values().Get(0).Number() != 0 { + return errors.New("map enum value must have zero number for the first value") + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go new file mode 100644 index 000000000..00d35e02e --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -0,0 +1,242 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protodesc + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + + "google.golang.org/protobuf/types/descriptorpb" +) + +// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a +// google.protobuf.FileDescriptorProto message. +func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto { + p := &descriptorpb.FileDescriptorProto{ + Name: proto.String(file.Path()), + Package: proto.String(string(file.Package())), + Options: proto.Clone(file.Options()).(*descriptorpb.FileOptions), + } + for i, imports := 0, file.Imports(); i < imports.Len(); i++ { + imp := imports.Get(i) + p.Dependency = append(p.Dependency, imp.Path()) + if imp.IsPublic { + p.PublicDependency = append(p.PublicDependency, int32(i)) + } + if imp.IsWeak { + p.WeakDependency = append(p.WeakDependency, int32(i)) + } + } + for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { + loc := locs.Get(i) + l := &descriptorpb.SourceCodeInfo_Location{} + l.Path = append(l.Path, loc.Path...) + if loc.StartLine == loc.EndLine { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndColumn)} + } else { + l.Span = []int32{int32(loc.StartLine), int32(loc.StartColumn), int32(loc.EndLine), int32(loc.EndColumn)} + } + l.LeadingDetachedComments = append([]string(nil), loc.LeadingDetachedComments...) + if loc.LeadingComments != "" { + l.LeadingComments = proto.String(loc.LeadingComments) + } + if loc.TrailingComments != "" { + l.TrailingComments = proto.String(loc.TrailingComments) + } + if p.SourceCodeInfo == nil { + p.SourceCodeInfo = &descriptorpb.SourceCodeInfo{} + } + p.SourceCodeInfo.Location = append(p.SourceCodeInfo.Location, l) + + } + for i, messages := 0, file.Messages(); i < messages.Len(); i++ { + p.MessageType = append(p.MessageType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, file.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, services := 0, file.Services(); i < services.Len(); i++ { + p.Service = append(p.Service, ToServiceDescriptorProto(services.Get(i))) + } + for i, exts := 0, file.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + if syntax := file.Syntax(); syntax != protoreflect.Proto2 { + p.Syntax = proto.String(file.Syntax().String()) + } + return p +} + +// ToDescriptorProto copies a protoreflect.MessageDescriptor into a +// google.protobuf.DescriptorProto message. +func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto { + p := &descriptorpb.DescriptorProto{ + Name: proto.String(string(message.Name())), + Options: proto.Clone(message.Options()).(*descriptorpb.MessageOptions), + } + for i, fields := 0, message.Fields(); i < fields.Len(); i++ { + p.Field = append(p.Field, ToFieldDescriptorProto(fields.Get(i))) + } + for i, exts := 0, message.Extensions(); i < exts.Len(); i++ { + p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i))) + } + for i, messages := 0, message.Messages(); i < messages.Len(); i++ { + p.NestedType = append(p.NestedType, ToDescriptorProto(messages.Get(i))) + } + for i, enums := 0, message.Enums(); i < enums.Len(); i++ { + p.EnumType = append(p.EnumType, ToEnumDescriptorProto(enums.Get(i))) + } + for i, xranges := 0, message.ExtensionRanges(); i < xranges.Len(); i++ { + xrange := xranges.Get(i) + p.ExtensionRange = append(p.ExtensionRange, &descriptorpb.DescriptorProto_ExtensionRange{ + Start: proto.Int32(int32(xrange[0])), + End: proto.Int32(int32(xrange[1])), + Options: proto.Clone(message.ExtensionRangeOptions(i)).(*descriptorpb.ExtensionRangeOptions), + }) + } + for i, oneofs := 0, message.Oneofs(); i < oneofs.Len(); i++ { + p.OneofDecl = append(p.OneofDecl, ToOneofDescriptorProto(oneofs.Get(i))) + } + for i, ranges := 0, message.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.DescriptorProto_ReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, message.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a +// google.protobuf.FieldDescriptorProto message. +func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto { + p := &descriptorpb.FieldDescriptorProto{ + Name: proto.String(string(field.Name())), + Number: proto.Int32(int32(field.Number())), + Label: descriptorpb.FieldDescriptorProto_Label(field.Cardinality()).Enum(), + Options: proto.Clone(field.Options()).(*descriptorpb.FieldOptions), + } + if field.IsExtension() { + p.Extendee = fullNameOf(field.ContainingMessage()) + } + if field.Kind().IsValid() { + p.Type = descriptorpb.FieldDescriptorProto_Type(field.Kind()).Enum() + } + if field.Enum() != nil { + p.TypeName = fullNameOf(field.Enum()) + } + if field.Message() != nil { + p.TypeName = fullNameOf(field.Message()) + } + if field.HasJSONName() { + p.JsonName = proto.String(field.JSONName()) + } + if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() { + p.Proto3Optional = proto.Bool(true) + } + if field.HasDefault() { + def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor) + if err != nil && field.DefaultEnumValue() != nil { + def = string(field.DefaultEnumValue().Name()) // occurs for unresolved enum values + } else if err != nil { + panic(fmt.Sprintf("%v: %v", field.FullName(), err)) + } + p.DefaultValue = proto.String(def) + } + if oneof := field.ContainingOneof(); oneof != nil { + p.OneofIndex = proto.Int32(int32(oneof.Index())) + } + return p +} + +// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a +// google.protobuf.OneofDescriptorProto message. +func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto { + return &descriptorpb.OneofDescriptorProto{ + Name: proto.String(string(oneof.Name())), + Options: proto.Clone(oneof.Options()).(*descriptorpb.OneofOptions), + } +} + +// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a +// google.protobuf.EnumDescriptorProto message. +func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto { + p := &descriptorpb.EnumDescriptorProto{ + Name: proto.String(string(enum.Name())), + Options: proto.Clone(enum.Options()).(*descriptorpb.EnumOptions), + } + for i, values := 0, enum.Values(); i < values.Len(); i++ { + p.Value = append(p.Value, ToEnumValueDescriptorProto(values.Get(i))) + } + for i, ranges := 0, enum.ReservedRanges(); i < ranges.Len(); i++ { + rrange := ranges.Get(i) + p.ReservedRange = append(p.ReservedRange, &descriptorpb.EnumDescriptorProto_EnumReservedRange{ + Start: proto.Int32(int32(rrange[0])), + End: proto.Int32(int32(rrange[1])), + }) + } + for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ { + p.ReservedName = append(p.ReservedName, string(names.Get(i))) + } + return p +} + +// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a +// google.protobuf.EnumValueDescriptorProto message. +func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto { + return &descriptorpb.EnumValueDescriptorProto{ + Name: proto.String(string(value.Name())), + Number: proto.Int32(int32(value.Number())), + Options: proto.Clone(value.Options()).(*descriptorpb.EnumValueOptions), + } +} + +// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a +// google.protobuf.ServiceDescriptorProto message. +func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto { + p := &descriptorpb.ServiceDescriptorProto{ + Name: proto.String(string(service.Name())), + Options: proto.Clone(service.Options()).(*descriptorpb.ServiceOptions), + } + for i, methods := 0, service.Methods(); i < methods.Len(); i++ { + p.Method = append(p.Method, ToMethodDescriptorProto(methods.Get(i))) + } + return p +} + +// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a +// google.protobuf.MethodDescriptorProto message. +func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto { + p := &descriptorpb.MethodDescriptorProto{ + Name: proto.String(string(method.Name())), + InputType: fullNameOf(method.Input()), + OutputType: fullNameOf(method.Output()), + Options: proto.Clone(method.Options()).(*descriptorpb.MethodOptions), + } + if method.IsStreamingClient() { + p.ClientStreaming = proto.Bool(true) + } + if method.IsStreamingServer() { + p.ServerStreaming = proto.Bool(true) + } + return p +} + +func fullNameOf(d protoreflect.Descriptor) *string { + if d == nil { + return nil + } + if strings.HasPrefix(string(d.FullName()), unknownPrefix) { + return proto.String(string(d.FullName()[len(unknownPrefix):])) + } + return proto.String("." + string(d.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go new file mode 100644 index 000000000..6be5d16e9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -0,0 +1,77 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "google.golang.org/protobuf/internal/pragma" +) + +// The following types are used by the fast-path Message.ProtoMethods method. +// +// To avoid polluting the public protoreflect API with types used only by +// low-level implementations, the canonical definitions of these types are +// in the runtime/protoiface package. The definitions here and in protoiface +// must be kept in sync. +type ( + methods = struct { + pragma.NoUnkeyedLiterals + Flags supportFlags + Size func(sizeInput) sizeOutput + Marshal func(marshalInput) (marshalOutput, error) + Unmarshal func(unmarshalInput) (unmarshalOutput, error) + Merge func(mergeInput) mergeOutput + CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + } + supportFlags = uint64 + sizeInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Flags uint8 + } + sizeOutput = struct { + pragma.NoUnkeyedLiterals + Size int + } + marshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + } + marshalOutput = struct { + pragma.NoUnkeyedLiterals + Buf []byte + } + unmarshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + Resolver interface { + FindExtensionByName(field FullName) (ExtensionType, error) + FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) + } + } + unmarshalOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + mergeInput = struct { + pragma.NoUnkeyedLiterals + Source Message + Destination Message + } + mergeOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + checkInitializedInput = struct { + pragma.NoUnkeyedLiterals + Message Message + } + checkInitializedOutput = struct { + pragma.NoUnkeyedLiterals + } +) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go new file mode 100644 index 000000000..dd85915bd --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -0,0 +1,504 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoreflect provides interfaces to dynamically manipulate messages. +// +// This package includes type descriptors which describe the structure of types +// defined in proto source files and value interfaces which provide the +// ability to examine and manipulate the contents of messages. +// +// +// Protocol Buffer Descriptors +// +// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// are immutable objects that represent protobuf type information. +// They are wrappers around the messages declared in descriptor.proto. +// Protobuf descriptors alone lack any information regarding Go types. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Descriptor and ProtoReflect.Descriptor accessors respectively +// return the protobuf descriptor for the values. +// +// The protobuf descriptor interfaces are not meant to be implemented by +// user code since they might need to be extended in the future to support +// additions to the protobuf language. +// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. +// +// +// Go Type Descriptors +// +// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// a concrete Go type that represents the associated protobuf descriptor. +// There is commonly a one-to-one relationship between protobuf descriptors and +// Go type descriptors, but it can potentially be a one-to-many relationship. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Type and ProtoReflect.Type accessors respectively +// return the protobuf descriptor for the values. +// +// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// create Go type descriptors from protobuf descriptors. +// +// +// Value Interfaces +// +// The Enum and Message interfaces provide a reflective view over an +// enum or message instance. For enums, it provides the ability to retrieve +// the enum value number for any concrete enum type. For messages, it provides +// the ability to access or manipulate fields of the message. +// +// To convert a proto.Message to a protoreflect.Message, use the +// former's ProtoReflect method. Since the ProtoReflect method is new to the +// v2 message interface, it may not be present on older message implementations. +// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// to obtain a reflective view on older messages. +// +// +// Relationships +// +// The following diagrams demonstrate the relationships between +// various types declared in this package. +// +// +// ┌───────────────────────────────────┐ +// V │ +// ┌────────────── New(n) ─────────────┐ │ +// │ │ │ +// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ +// │ │ V V │ V │ +// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ +// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ +// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ +// Λ Λ │ │ +// │ └─── Descriptor() ──┘ │ +// │ │ +// └────────────────── Type() ───────┘ +// +// • An EnumType describes a concrete Go enum type. +// It has an EnumDescriptor and can construct an Enum instance. +// +// • An EnumDescriptor describes an abstract protobuf enum type. +// +// • An Enum is a concrete enum instance. Generated enums implement Enum. +// +// +// ┌──────────────── New() ─────────────────┐ +// │ │ +// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +// │ │ V V │ V +// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ +// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ +// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ +// Λ Λ │ │ Λ │ +// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ +// │ │ +// └─────────────────── Type() ─────────┘ +// +// • A MessageType describes a concrete Go message type. +// It has a MessageDescriptor and can construct a Message instance. +// +// • A MessageDescriptor describes an abstract protobuf message type. +// +// • A Message is a concrete message instance. Generated messages implement +// ProtoMessage, which can convert to/from a Message. +// +// +// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ +// │ V │ V +// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ +// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ +// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ +// Λ │ │ Λ │ Λ +// └─────── Type() ───────┘ │ └─── may implement ────┘ │ +// │ │ +// └────── implements ────────┘ +// +// • An ExtensionType describes a concrete Go implementation of an extension. +// It has an ExtensionTypeDescriptor and can convert to/from +// abstract Values and Go values. +// +// • An ExtensionTypeDescriptor is an ExtensionDescriptor +// which also has an ExtensionType. +// +// • An ExtensionDescriptor describes an abstract protobuf extension field and +// may not always be an ExtensionTypeDescriptor. +package protoreflect + +import ( + "fmt" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/pragma" +) + +type doNotImplement pragma.DoNotImplement + +// ProtoMessage is the top-level interface that all proto messages implement. +// This is declared in the protoreflect package to avoid a cyclic dependency; +// use the proto.Message type instead, which aliases this type. +type ProtoMessage interface{ ProtoReflect() Message } + +// Syntax is the language version of the proto file. +type Syntax syntax + +type syntax int8 // keep exact type opaque as the int type may change + +const ( + Proto2 Syntax = 2 + Proto3 Syntax = 3 +) + +// IsValid reports whether the syntax is valid. +func (s Syntax) IsValid() bool { + switch s { + case Proto2, Proto3: + return true + default: + return false + } +} + +// String returns s as a proto source identifier (e.g., "proto2"). +func (s Syntax) String() string { + switch s { + case Proto2: + return "proto2" + case Proto3: + return "proto3" + default: + return fmt.Sprintf("", s) + } +} + +// GoString returns s as a Go source identifier (e.g., "Proto2"). +func (s Syntax) GoString() string { + switch s { + case Proto2: + return "Proto2" + case Proto3: + return "Proto3" + default: + return fmt.Sprintf("Syntax(%d)", s) + } +} + +// Cardinality determines whether a field is optional, required, or repeated. +type Cardinality cardinality + +type cardinality int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Cardinality enumeration. +const ( + Optional Cardinality = 1 // appears zero or one times + Required Cardinality = 2 // appears exactly one time; invalid with Proto3 + Repeated Cardinality = 3 // appears zero or more times +) + +// IsValid reports whether the cardinality is valid. +func (c Cardinality) IsValid() bool { + switch c { + case Optional, Required, Repeated: + return true + default: + return false + } +} + +// String returns c as a proto source identifier (e.g., "optional"). +func (c Cardinality) String() string { + switch c { + case Optional: + return "optional" + case Required: + return "required" + case Repeated: + return "repeated" + default: + return fmt.Sprintf("", c) + } +} + +// GoString returns c as a Go source identifier (e.g., "Optional"). +func (c Cardinality) GoString() string { + switch c { + case Optional: + return "Optional" + case Required: + return "Required" + case Repeated: + return "Repeated" + default: + return fmt.Sprintf("Cardinality(%d)", c) + } +} + +// Kind indicates the basic proto kind of a field. +type Kind kind + +type kind int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Field.Kind enumeration. +const ( + BoolKind Kind = 8 + EnumKind Kind = 14 + Int32Kind Kind = 5 + Sint32Kind Kind = 17 + Uint32Kind Kind = 13 + Int64Kind Kind = 3 + Sint64Kind Kind = 18 + Uint64Kind Kind = 4 + Sfixed32Kind Kind = 15 + Fixed32Kind Kind = 7 + FloatKind Kind = 2 + Sfixed64Kind Kind = 16 + Fixed64Kind Kind = 6 + DoubleKind Kind = 1 + StringKind Kind = 9 + BytesKind Kind = 12 + MessageKind Kind = 11 + GroupKind Kind = 10 +) + +// IsValid reports whether the kind is valid. +func (k Kind) IsValid() bool { + switch k { + case BoolKind, EnumKind, + Int32Kind, Sint32Kind, Uint32Kind, + Int64Kind, Sint64Kind, Uint64Kind, + Sfixed32Kind, Fixed32Kind, FloatKind, + Sfixed64Kind, Fixed64Kind, DoubleKind, + StringKind, BytesKind, MessageKind, GroupKind: + return true + default: + return false + } +} + +// String returns k as a proto source identifier (e.g., "bool"). +func (k Kind) String() string { + switch k { + case BoolKind: + return "bool" + case EnumKind: + return "enum" + case Int32Kind: + return "int32" + case Sint32Kind: + return "sint32" + case Uint32Kind: + return "uint32" + case Int64Kind: + return "int64" + case Sint64Kind: + return "sint64" + case Uint64Kind: + return "uint64" + case Sfixed32Kind: + return "sfixed32" + case Fixed32Kind: + return "fixed32" + case FloatKind: + return "float" + case Sfixed64Kind: + return "sfixed64" + case Fixed64Kind: + return "fixed64" + case DoubleKind: + return "double" + case StringKind: + return "string" + case BytesKind: + return "bytes" + case MessageKind: + return "message" + case GroupKind: + return "group" + default: + return fmt.Sprintf("", k) + } +} + +// GoString returns k as a Go source identifier (e.g., "BoolKind"). +func (k Kind) GoString() string { + switch k { + case BoolKind: + return "BoolKind" + case EnumKind: + return "EnumKind" + case Int32Kind: + return "Int32Kind" + case Sint32Kind: + return "Sint32Kind" + case Uint32Kind: + return "Uint32Kind" + case Int64Kind: + return "Int64Kind" + case Sint64Kind: + return "Sint64Kind" + case Uint64Kind: + return "Uint64Kind" + case Sfixed32Kind: + return "Sfixed32Kind" + case Fixed32Kind: + return "Fixed32Kind" + case FloatKind: + return "FloatKind" + case Sfixed64Kind: + return "Sfixed64Kind" + case Fixed64Kind: + return "Fixed64Kind" + case DoubleKind: + return "DoubleKind" + case StringKind: + return "StringKind" + case BytesKind: + return "BytesKind" + case MessageKind: + return "MessageKind" + case GroupKind: + return "GroupKind" + default: + return fmt.Sprintf("Kind(%d)", k) + } +} + +// FieldNumber is the field number in a message. +type FieldNumber = protowire.Number + +// FieldNumbers represent a list of field numbers. +type FieldNumbers interface { + // Len reports the number of fields in the list. + Len() int + // Get returns the ith field number. It panics if out of bounds. + Get(i int) FieldNumber + // Has reports whether n is within the list of fields. + Has(n FieldNumber) bool + + doNotImplement +} + +// FieldRanges represent a list of field number ranges. +type FieldRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]FieldNumber // start inclusive; end exclusive + // Has reports whether n is within any of the ranges. + Has(n FieldNumber) bool + + doNotImplement +} + +// EnumNumber is the numeric value for an enum. +type EnumNumber int32 + +// EnumRanges represent a list of enum number ranges. +type EnumRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]EnumNumber // start inclusive; end inclusive + // Has reports whether n is within any of the ranges. + Has(n EnumNumber) bool + + doNotImplement +} + +// Name is the short name for a proto declaration. This is not the name +// as used in Go source code, which might not be identical to the proto name. +type Name string // e.g., "Kind" + +// IsValid reports whether s is a syntactically valid name. +// An empty name is invalid. +func (s Name) IsValid() bool { + return consumeIdent(string(s)) == len(s) +} + +// Names represent a list of names. +type Names interface { + // Len reports the number of names in the list. + Len() int + // Get returns the ith name. It panics if out of bounds. + Get(i int) Name + // Has reports whether s matches any names in the list. + Has(s Name) bool + + doNotImplement +} + +// FullName is a qualified name that uniquely identifies a proto declaration. +// A qualified name is the concatenation of the proto package along with the +// fully-declared name (i.e., name of parent preceding the name of the child), +// with a '.' delimiter placed between each Name. +// +// This should not have any leading or trailing dots. +type FullName string // e.g., "google.protobuf.Field.Kind" + +// IsValid reports whether s is a syntactically valid full name. +// An empty full name is invalid. +func (s FullName) IsValid() bool { + i := consumeIdent(string(s)) + if i < 0 { + return false + } + for len(s) > i { + if s[i] != '.' { + return false + } + i++ + n := consumeIdent(string(s[i:])) + if n < 0 { + return false + } + i += n + } + return true +} + +func consumeIdent(s string) (i int) { + if len(s) == 0 || !isLetter(s[i]) { + return -1 + } + i++ + for len(s) > i && isLetterDigit(s[i]) { + i++ + } + return i +} +func isLetter(c byte) bool { + return c == '_' || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') +} +func isLetterDigit(c byte) bool { + return isLetter(c) || ('0' <= c && c <= '9') +} + +// Name returns the short name, which is the last identifier segment. +// A single segment FullName is the Name itself. +func (n FullName) Name() Name { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return Name(n[i+1:]) + } + return Name(n) +} + +// Parent returns the full name with the trailing identifier removed. +// A single segment FullName has no parent. +func (n FullName) Parent() FullName { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return n[:i] + } + return "" +} + +// Append returns the qualified name appended with the provided short name. +// +// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid +func (n FullName) Append(s Name) FullName { + if n == "" { + return FullName(s) + } + return n + "." + FullName(s) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go new file mode 100644 index 000000000..32ea3d98c --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// SourceLocations is a list of source locations. +type SourceLocations interface { + // Len reports the number of source locations in the proto file. + Len() int + // Get returns the ith SourceLocation. It panics if out of bounds. + Get(int) SourceLocation + + doNotImplement + + // TODO: Add ByPath and ByDescriptor helper methods. +} + +// SourceLocation describes a source location and +// corresponds with the google.protobuf.SourceCodeInfo.Location message. +type SourceLocation struct { + // Path is the path to the declaration from the root file descriptor. + // The contents of this slice must not be mutated. + Path SourcePath + + // StartLine and StartColumn are the zero-indexed starting location + // in the source file for the declaration. + StartLine, StartColumn int + // EndLine and EndColumn are the zero-indexed ending location + // in the source file for the declaration. + // In the descriptor.proto, the end line may be omitted if it is identical + // to the start line. Here, it is always populated. + EndLine, EndColumn int + + // LeadingDetachedComments are the leading detached comments + // for the declaration. The contents of this slice must not be mutated. + LeadingDetachedComments []string + // LeadingComments is the leading attached comment for the declaration. + LeadingComments string + // TrailingComments is the trailing attached comment for the declaration. + TrailingComments string +} + +// SourcePath identifies part of a file descriptor for a source location. +// The SourcePath is a sequence of either field numbers or indexes into +// a repeated field that form a path starting from the root file descriptor. +// +// See google.protobuf.SourceCodeInfo.Location.path. +type SourcePath []int32 + +// TODO: Add SourcePath.String method to pretty-print the path. For example: +// ".message_type[6].nested_type[15].field[3]" diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go new file mode 100644 index 000000000..5be14a725 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -0,0 +1,631 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// Descriptor provides a set of accessors that are common to every descriptor. +// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, +// but provides efficient lookup and immutability. +// +// Each descriptor is comparable. Equality implies that the two types are +// exactly identical. However, it is possible for the same semantically +// identical proto type to be represented by multiple type descriptors. +// +// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// If t1 == t2, then the types are definitely equal and all accessors return +// the same information. However, if t1 != t2, then it is still possible that +// they still represent the same proto type (e.g., t1.FullName == t2.FullName). +// This can occur if a descriptor type is created dynamically, or multiple +// versions of the same proto type are accidentally linked into the Go binary. +type Descriptor interface { + // ParentFile returns the parent file descriptor that this descriptor + // is declared within. The parent file for the file descriptor is itself. + // + // Support for this functionality is optional and may return nil. + ParentFile() FileDescriptor + + // Parent returns the parent containing this descriptor declaration. + // The following shows the mapping from child type to possible parent types: + // + // ╔═════════════════════╤═══════════════════════════════════╗ + // ║ Child type │ Possible parent types ║ + // ╠═════════════════════╪═══════════════════════════════════╣ + // ║ FileDescriptor │ nil ║ + // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ OneofDescriptor │ MessageDescriptor ║ + // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ EnumValueDescriptor │ EnumDescriptor ║ + // ║ ServiceDescriptor │ FileDescriptor ║ + // ║ MethodDescriptor │ ServiceDescriptor ║ + // ╚═════════════════════╧═══════════════════════════════════╝ + // + // Support for this functionality is optional and may return nil. + Parent() Descriptor + + // Index returns the index of this descriptor within its parent. + // It returns 0 if the descriptor does not have a parent or if the parent + // is unknown. + Index() int + + // Syntax is the protobuf syntax. + Syntax() Syntax // e.g., Proto2 or Proto3 + + // Name is the short name of the declaration (i.e., FullName.Name). + Name() Name // e.g., "Any" + + // FullName is the fully-qualified name of the declaration. + // + // The FullName is a concatenation of the full name of the type that this + // type is declared within and the declaration name. For example, + // field "foo_field" in message "proto.package.MyMessage" is + // uniquely identified as "proto.package.MyMessage.foo_field". + // Enum values are an exception to the rule (see EnumValueDescriptor). + FullName() FullName // e.g., "google.protobuf.Any" + + // IsPlaceholder reports whether type information is missing since a + // dependency is not resolved, in which case only name information is known. + // + // Placeholder types may only be returned by the following accessors + // as a result of unresolved dependencies or weak imports: + // + // ╔═══════════════════════════════════╤═════════════════════╗ + // ║ Accessor │ Descriptor ║ + // ╠═══════════════════════════════════╪═════════════════════╣ + // ║ FileImports.FileDescriptor │ FileDescriptor ║ + // ║ FieldDescriptor.Enum │ EnumDescriptor ║ + // ║ FieldDescriptor.Message │ MessageDescriptor ║ + // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ + // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ + // ║ MethodDescriptor.Input │ MessageDescriptor ║ + // ║ MethodDescriptor.Output │ MessageDescriptor ║ + // ╚═══════════════════════════════════╧═════════════════════╝ + // + // If true, only Name and FullName are valid. + // For FileDescriptor, the Path is also valid. + IsPlaceholder() bool + + // Options returns the descriptor options. The caller must not modify + // the returned value. + // + // To avoid a dependency cycle, this function returns a proto.Message value. + // The proto message type returned for each descriptor type is as follows: + // ╔═════════════════════╤══════════════════════════════════════════╗ + // ║ Go type │ Protobuf message type ║ + // ╠═════════════════════╪══════════════════════════════════════════╣ + // ║ FileDescriptor │ google.protobuf.FileOptions ║ + // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ + // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ + // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ + // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ + // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ + // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ + // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ + // ╚═════════════════════╧══════════════════════════════════════════╝ + // + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + Options() ProtoMessage + + doNotImplement +} + +// FileDescriptor describes the types in a complete proto file and +// corresponds with the google.protobuf.FileDescriptorProto message. +// +// Top-level declarations: +// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +type FileDescriptor interface { + Descriptor // Descriptor.FullName is identical to Package + + // Path returns the file name, relative to the source tree root. + Path() string // e.g., "path/to/file.proto" + // Package returns the protobuf package namespace. + Package() FullName // e.g., "google.protobuf" + + // Imports is a list of imported proto files. + Imports() FileImports + + // Enums is a list of the top-level enum declarations. + Enums() EnumDescriptors + // Messages is a list of the top-level message declarations. + Messages() MessageDescriptors + // Extensions is a list of the top-level extension declarations. + Extensions() ExtensionDescriptors + // Services is a list of the top-level service declarations. + Services() ServiceDescriptors + + // SourceLocations is a list of source locations. + SourceLocations() SourceLocations + + isFileDescriptor +} +type isFileDescriptor interface{ ProtoType(FileDescriptor) } + +// FileImports is a list of file imports. +type FileImports interface { + // Len reports the number of files imported by this proto file. + Len() int + // Get returns the ith FileImport. It panics if out of bounds. + Get(i int) FileImport + + doNotImplement +} + +// FileImport is the declaration for a proto file import. +type FileImport struct { + // FileDescriptor is the file type for the given import. + // It is a placeholder descriptor if IsWeak is set or if a dependency has + // not been regenerated to implement the new reflection APIs. + FileDescriptor + + // IsPublic reports whether this is a public import, which causes this file + // to alias declarations within the imported file. The intended use cases + // for this feature is the ability to move proto files without breaking + // existing dependencies. + // + // The current file and the imported file must be within proto package. + IsPublic bool + + // IsWeak reports whether this is a weak import, which does not impose + // a direct dependency on the target file. + // + // Weak imports are a legacy proto1 feature. Equivalent behavior is + // achieved using proto2 extension fields or proto3 Any messages. + IsWeak bool +} + +// MessageDescriptor describes a message and +// corresponds with the google.protobuf.DescriptorProto message. +// +// Nested declarations: +// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, +// and/or MessageDescriptor. +type MessageDescriptor interface { + Descriptor + + // IsMapEntry indicates that this is an auto-generated message type to + // represent the entry type for a map field. + // + // Map entry messages have only two fields: + // • a "key" field with a field number of 1 + // • a "value" field with a field number of 2 + // The key and value types are determined by these two fields. + // + // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true + // for some field with this message type. + IsMapEntry() bool + + // Fields is a list of nested field declarations. + Fields() FieldDescriptors + // Oneofs is a list of nested oneof declarations. + Oneofs() OneofDescriptors + + // ReservedNames is a list of reserved field names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of field numbers. + ReservedRanges() FieldRanges + // RequiredNumbers is a list of required field numbers. + // In Proto3, it is always an empty list. + RequiredNumbers() FieldNumbers + // ExtensionRanges is the field ranges used for extension fields. + // In Proto3, it is always an empty ranges. + ExtensionRanges() FieldRanges + // ExtensionRangeOptions returns the ith extension range options. + // + // To avoid a dependency cycle, this method returns a proto.Message value, + // which always contains a google.protobuf.ExtensionRangeOptions message. + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + ExtensionRangeOptions(i int) ProtoMessage + + // Enums is a list of nested enum declarations. + Enums() EnumDescriptors + // Messages is a list of nested message declarations. + Messages() MessageDescriptors + // Extensions is a list of nested extension declarations. + Extensions() ExtensionDescriptors + + isMessageDescriptor +} +type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } + +// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +type MessageType interface { + // New returns a newly allocated empty message. + New() Message + + // Zero returns an empty, read-only message. + Zero() Message + + // Descriptor returns the message descriptor. + // + // Invariant: t.Descriptor() == t.New().Descriptor() + Descriptor() MessageDescriptor +} + +// MessageDescriptors is a list of message declarations. +type MessageDescriptors interface { + // Len reports the number of messages. + Len() int + // Get returns the ith MessageDescriptor. It panics if out of bounds. + Get(i int) MessageDescriptor + // ByName returns the MessageDescriptor for a message named s. + // It returns nil if not found. + ByName(s Name) MessageDescriptor + + doNotImplement +} + +// FieldDescriptor describes a field within a message and +// corresponds with the google.protobuf.FieldDescriptorProto message. +// +// It is used for both normal fields defined within the parent message +// (e.g., MessageDescriptor.Fields) and fields that extend some remote message +// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +type FieldDescriptor interface { + Descriptor + + // Number reports the unique number for this field. + Number() FieldNumber + // Cardinality reports the cardinality for this field. + Cardinality() Cardinality + // Kind reports the basic kind for this field. + Kind() Kind + + // HasJSONName reports whether this field has an explicitly set JSON name. + HasJSONName() bool + + // JSONName reports the name used for JSON serialization. + // It is usually the camel-cased form of the field name. + JSONName() string + + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + + // IsExtension reports whether this is an extension field. If false, + // then Parent and ContainingMessage refer to the same message. + // Otherwise, ContainingMessage and Parent likely differ. + IsExtension() bool + + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + + // IsWeak reports whether this is a weak field, which does not impose a + // direct dependency on the target type. + // If true, then Message returns a placeholder type. + IsWeak() bool + + // IsPacked reports whether repeated primitive numeric kinds should be + // serialized using a packed encoding. + // If true, then it implies Cardinality is Repeated. + IsPacked() bool + + // IsList reports whether this field represents a list, + // where the value type for the associated field is a List. + // It is equivalent to checking whether Cardinality is Repeated and + // that IsMap reports false. + IsList() bool + + // IsMap reports whether this field represents a map, + // where the value type for the associated field is a Map. + // It is equivalent to checking whether Cardinality is Repeated, + // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + IsMap() bool + + // MapKey returns the field descriptor for the key in the map entry. + // It returns nil if IsMap reports false. + MapKey() FieldDescriptor + + // MapValue returns the field descriptor for the value in the map entry. + // It returns nil if IsMap reports false. + MapValue() FieldDescriptor + + // HasDefault reports whether this field has a default value. + HasDefault() bool + + // Default returns the default value for scalar fields. + // For proto2, it is the default value as specified in the proto file, + // or the zero value if unspecified. + // For proto3, it is always the zero value of the scalar. + // The Value type is determined by the Kind. + Default() Value + + // DefaultEnumValue returns the enum value descriptor for the default value + // of an enum field, and is nil for any other kind of field. + DefaultEnumValue() EnumValueDescriptor + + // ContainingOneof is the containing oneof that this field belongs to, + // and is nil if this field is not part of a oneof. + ContainingOneof() OneofDescriptor + + // ContainingMessage is the containing message that this field belongs to. + // For extension fields, this may not necessarily be the parent message + // that the field is declared within. + ContainingMessage() MessageDescriptor + + // Enum is the enum descriptor if Kind is EnumKind. + // It returns nil for any other Kind. + Enum() EnumDescriptor + + // Message is the message descriptor if Kind is + // MessageKind or GroupKind. It returns nil for any other Kind. + Message() MessageDescriptor + + isFieldDescriptor +} +type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } + +// FieldDescriptors is a list of field declarations. +type FieldDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith FieldDescriptor. It panics if out of bounds. + Get(i int) FieldDescriptor + // ByName returns the FieldDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) FieldDescriptor + // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. + // It returns nil if not found. + ByJSONName(s string) FieldDescriptor + // ByNumber returns the FieldDescriptor for a field numbered n. + // It returns nil if not found. + ByNumber(n FieldNumber) FieldDescriptor + + doNotImplement +} + +// OneofDescriptor describes a oneof field set within a given message and +// corresponds with the google.protobuf.OneofDescriptorProto message. +type OneofDescriptor interface { + Descriptor + + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with HasOptionalKeyword specified. + IsSynthetic() bool + + // Fields is a list of fields belonging to this oneof. + Fields() FieldDescriptors + + isOneofDescriptor +} +type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } + +// OneofDescriptors is a list of oneof declarations. +type OneofDescriptors interface { + // Len reports the number of oneof fields. + Len() int + // Get returns the ith OneofDescriptor. It panics if out of bounds. + Get(i int) OneofDescriptor + // ByName returns the OneofDescriptor for a oneof named s. + // It returns nil if not found. + ByName(s Name) OneofDescriptor + + doNotImplement +} + +// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +type ExtensionDescriptor = FieldDescriptor + +// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +type ExtensionTypeDescriptor interface { + ExtensionDescriptor + + // Type returns the associated ExtensionType. + Type() ExtensionType + + // Descriptor returns the plain ExtensionDescriptor without the + // associated ExtensionType. + Descriptor() ExtensionDescriptor +} + +// ExtensionDescriptors is a list of field declarations. +type ExtensionDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith ExtensionDescriptor. It panics if out of bounds. + Get(i int) ExtensionDescriptor + // ByName returns the ExtensionDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) ExtensionDescriptor + + doNotImplement +} + +// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// Go implementation. The nested field descriptor must be for a extension field. +// +// While a normal field is a member of the parent message that it is declared +// within (see Descriptor.Parent), an extension field is a member of some other +// target message (see ExtensionDescriptor.Extendee) and may have no +// relationship with the parent. However, the full name of an extension field is +// relative to the parent that it is declared within. +// +// For example: +// syntax = "proto2"; +// package example; +// message FooMessage { +// extensions 100 to max; +// } +// message BarMessage { +// extends FooMessage { optional BarMessage bar_field = 100; } +// } +// +// Field "bar_field" is an extension of FooMessage, but its full name is +// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". +type ExtensionType interface { + // New returns a new value for the field. + // For scalars, this returns the default value in native Go form. + New() Value + + // Zero returns a new value for the field. + // For scalars, this returns the default value in native Go form. + // For composite types, this returns an empty, read-only message, list, or map. + Zero() Value + + // TypeDescriptor returns the extension type descriptor. + TypeDescriptor() ExtensionTypeDescriptor + + // ValueOf wraps the input and returns it as a Value. + // ValueOf panics if the input value is invalid or not the appropriate type. + // + // ValueOf is more extensive than protoreflect.ValueOf for a given field's + // value as it has more type information available. + ValueOf(interface{}) Value + + // InterfaceOf completely unwraps the Value to the underlying Go type. + // InterfaceOf panics if the input is nil or does not represent the + // appropriate underlying Go type. For composite types, it panics if the + // value is not mutable. + // + // InterfaceOf is able to unwrap the Value further than Value.Interface + // as it has more type information available. + InterfaceOf(Value) interface{} + + // IsValidValue reports whether the Value is valid to assign to the field. + IsValidValue(Value) bool + + // IsValidInterface reports whether the input is valid to assign to the field. + IsValidInterface(interface{}) bool +} + +// EnumDescriptor describes an enum and +// corresponds with the google.protobuf.EnumDescriptorProto message. +// +// Nested declarations: +// EnumValueDescriptor. +type EnumDescriptor interface { + Descriptor + + // Values is a list of nested enum value declarations. + Values() EnumValueDescriptors + + // ReservedNames is a list of reserved enum names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of enum numbers. + ReservedRanges() EnumRanges + + isEnumDescriptor +} +type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } + +// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +type EnumType interface { + // New returns an instance of this enum type with its value set to n. + New(n EnumNumber) Enum + + // Descriptor returns the enum descriptor. + // + // Invariant: t.Descriptor() == t.New(0).Descriptor() + Descriptor() EnumDescriptor +} + +// EnumDescriptors is a list of enum declarations. +type EnumDescriptors interface { + // Len reports the number of enum types. + Len() int + // Get returns the ith EnumDescriptor. It panics if out of bounds. + Get(i int) EnumDescriptor + // ByName returns the EnumDescriptor for an enum named s. + // It returns nil if not found. + ByName(s Name) EnumDescriptor + + doNotImplement +} + +// EnumValueDescriptor describes an enum value and +// corresponds with the google.protobuf.EnumValueDescriptorProto message. +// +// All other proto declarations are in the namespace of the parent. +// However, enum values do not follow this rule and are within the namespace +// of the parent's parent (i.e., they are a sibling of the containing enum). +// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified +// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". +type EnumValueDescriptor interface { + Descriptor + + // Number returns the enum value as an integer. + Number() EnumNumber + + isEnumValueDescriptor +} +type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } + +// EnumValueDescriptors is a list of enum value declarations. +type EnumValueDescriptors interface { + // Len reports the number of enum values. + Len() int + // Get returns the ith EnumValueDescriptor. It panics if out of bounds. + Get(i int) EnumValueDescriptor + // ByName returns the EnumValueDescriptor for the enum value named s. + // It returns nil if not found. + ByName(s Name) EnumValueDescriptor + // ByNumber returns the EnumValueDescriptor for the enum value numbered n. + // If multiple have the same number, the first one defined is returned + // It returns nil if not found. + ByNumber(n EnumNumber) EnumValueDescriptor + + doNotImplement +} + +// ServiceDescriptor describes a service and +// corresponds with the google.protobuf.ServiceDescriptorProto message. +// +// Nested declarations: MethodDescriptor. +type ServiceDescriptor interface { + Descriptor + + // Methods is a list of nested message declarations. + Methods() MethodDescriptors + + isServiceDescriptor +} +type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } + +// ServiceDescriptors is a list of service declarations. +type ServiceDescriptors interface { + // Len reports the number of services. + Len() int + // Get returns the ith ServiceDescriptor. It panics if out of bounds. + Get(i int) ServiceDescriptor + // ByName returns the ServiceDescriptor for a service named s. + // It returns nil if not found. + ByName(s Name) ServiceDescriptor + + doNotImplement +} + +// MethodDescriptor describes a method and +// corresponds with the google.protobuf.MethodDescriptorProto message. +type MethodDescriptor interface { + Descriptor + + // Input is the input message descriptor. + Input() MessageDescriptor + // Output is the output message descriptor. + Output() MessageDescriptor + // IsStreamingClient reports whether the client streams multiple messages. + IsStreamingClient() bool + // IsStreamingServer reports whether the server streams multiple messages. + IsStreamingServer() bool + + isMethodDescriptor +} +type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } + +// MethodDescriptors is a list of method declarations. +type MethodDescriptors interface { + // Len reports the number of methods. + Len() int + // Get returns the ith MethodDescriptor. It panics if out of bounds. + Get(i int) MethodDescriptor + // ByName returns the MethodDescriptor for a service method named s. + // It returns nil if not found. + ByName(s Name) MethodDescriptor + + doNotImplement +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go new file mode 100644 index 000000000..f31981077 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -0,0 +1,285 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import "google.golang.org/protobuf/encoding/protowire" + +// Enum is a reflection interface for a concrete enum value, +// which provides type information and a getter for the enum number. +// Enum does not provide a mutable API since enums are commonly backed by +// Go constants, which are not addressable. +type Enum interface { + // Descriptor returns enum descriptor, which contains only the protobuf + // type information for the enum. + Descriptor() EnumDescriptor + + // Type returns the enum type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the enum descriptor be used instead. + Type() EnumType + + // Number returns the enum value as an integer. + Number() EnumNumber +} + +// Message is a reflective interface for a concrete message value, +// encapsulating both type and value information for the message. +// +// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// For non-extension fields, the descriptor must exactly match the +// field known by the parent message. +// For extension fields, the descriptor must implement ExtensionTypeDescriptor, +// extend the parent message (i.e., have the same message FullName), and +// be within the parent's extension range. +// +// Each field Value can be a scalar or a composite type (Message, List, or Map). +// See Value for the Go types associated with a FieldDescriptor. +// Providing a Value that is invalid or of an incorrect type panics. +type Message interface { + // Descriptor returns message descriptor, which contains only the protobuf + // type information for the message. + Descriptor() MessageDescriptor + + // Type returns the message type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the message descriptor be used instead. + Type() MessageType + + // New returns a newly allocated and mutable empty message. + New() Message + + // Interface unwraps the message reflection interface and + // returns the underlying ProtoMessage interface. + Interface() ProtoMessage + + // Range iterates over every populated field in an undefined order, + // calling f for each field descriptor and value encountered. + // Range returns immediately if f returns false. + // While iterating, mutating operations may only be performed + // on the current field descriptor. + Range(f func(FieldDescriptor, Value) bool) + + // Has reports whether a field is populated. + // + // Some fields have the property of nullability where it is possible to + // distinguish between the default value of a field and whether the field + // was explicitly populated with the default value. Singular message fields, + // member fields of a oneof, and proto2 scalar fields are nullable. Such + // fields are populated only if explicitly set. + // + // In other cases (aside from the nullable cases above), + // a proto3 scalar field is populated if it contains a non-zero value, and + // a repeated field is populated if it is non-empty. + Has(FieldDescriptor) bool + + // Clear clears the field such that a subsequent Has call reports false. + // + // Clearing an extension field clears both the extension type and value + // associated with the given field number. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(FieldDescriptor) + + // Get retrieves the value for a field. + // + // For unpopulated scalars, it returns the default value, where + // the default value of a bytes scalar is guaranteed to be a copy. + // For unpopulated composite types, it returns an empty, read-only view + // of the value; to obtain a mutable reference, use Mutable. + Get(FieldDescriptor) Value + + // Set stores the value for a field. + // + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType. + // When setting a composite type, it is unspecified whether the stored value + // aliases the source's memory in any way. If the composite value is an + // empty, read-only value, then it panics. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(FieldDescriptor, Value) + + // Mutable returns a mutable reference to a composite type. + // + // If the field is unpopulated, it may allocate a composite value. + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType + // if not already stored. + // It panics if the field does not contain a composite type. + // + // Mutable is a mutating operation and unsafe for concurrent use. + Mutable(FieldDescriptor) Value + + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. + // For lists, maps, and messages, this returns a new, empty, mutable value. + NewField(FieldDescriptor) Value + + // WhichOneof reports which field within the oneof is populated, + // returning nil if none are populated. + // It panics if the oneof descriptor does not belong to this message. + WhichOneof(OneofDescriptor) FieldDescriptor + + // GetUnknown retrieves the entire list of unknown fields. + // The caller may only mutate the contents of the RawFields + // if the mutated bytes are stored back into the message with SetUnknown. + GetUnknown() RawFields + + // SetUnknown stores an entire list of unknown fields. + // The raw fields must be syntactically valid according to the wire format. + // An implementation may panic if this is not the case. + // Once stored, the caller must not mutate the content of the RawFields. + // An empty RawFields may be passed to clear the fields. + // + // SetUnknown is a mutating operation and unsafe for concurrent use. + SetUnknown(RawFields) + + // IsValid reports whether the message is valid. + // + // An invalid message is an empty, read-only value. + // + // An invalid message often corresponds to a nil pointer of the concrete + // message type, but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool + + // ProtoMethods returns optional fast-path implementions of various operations. + // This method may return nil. + // + // The returned methods type is identical to + // "google.golang.org/protobuf/runtime/protoiface".Methods. + // Consult the protoiface package documentation for details. + ProtoMethods() *methods +} + +// RawFields is the raw bytes for an ordered sequence of fields. +// Each field contains both the tag (representing field number and wire type), +// and also the wire data itself. +type RawFields []byte + +// IsValid reports whether b is syntactically correct wire format. +func (b RawFields) IsValid() bool { + for len(b) > 0 { + _, _, n := protowire.ConsumeField(b) + if n < 0 { + return false + } + b = b[n:] + } + return true +} + +// List is a zero-indexed, ordered list. +// The element Value type is determined by FieldDescriptor.Kind. +// Providing a Value that is invalid or of an incorrect type panics. +type List interface { + // Len reports the number of entries in the List. + // Get, Set, and Truncate panic with out of bound indexes. + Len() int + + // Get retrieves the value at the given index. + // It never returns an invalid value. + Get(int) Value + + // Set stores a value for the given index. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(int, Value) + + // Append appends the provided value to the end of the list. + // When appending a composite type, it is unspecified whether the appended + // value aliases the source's memory in any way. + // + // Append is a mutating operation and unsafe for concurrent use. + Append(Value) + + // AppendMutable appends a new, empty, mutable message value to the end + // of the list and returns it. + // It panics if the list does not contain a message type. + AppendMutable() Value + + // Truncate truncates the list to a smaller length. + // + // Truncate is a mutating operation and unsafe for concurrent use. + Truncate(int) + + // NewElement returns a new value for a list element. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewElement() Value + + // IsValid reports whether the list is valid. + // + // An invalid list is an empty, read-only value. + // + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} + +// Map is an unordered, associative map. +// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. +// The entry Value type is determined by FieldDescriptor.MapValue.Kind. +// Providing a MapKey or Value that is invalid or of an incorrect type panics. +type Map interface { + // Len reports the number of elements in the map. + Len() int + + // Range iterates over every map entry in an undefined order, + // calling f for each key and value encountered. + // Range calls f Len times unless f returns false, which stops iteration. + // While iterating, mutating operations may only be performed + // on the current map key. + Range(f func(MapKey, Value) bool) + + // Has reports whether an entry with the given key is in the map. + Has(MapKey) bool + + // Clear clears the entry associated with they given key. + // The operation does nothing if there is no entry associated with the key. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(MapKey) + + // Get retrieves the value for an entry with the given key. + // It returns an invalid value for non-existent entries. + Get(MapKey) Value + + // Set stores the value for an entry with the given key. + // It panics when given a key or value that is invalid or the wrong type. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(MapKey, Value) + + // Mutable retrieves a mutable reference to the entry for the given key. + // If no entry exists for the key, it creates a new, empty, mutable value + // and stores it as the entry for the key. + // It panics if the map value is not a message. + Mutable(MapKey) Value + + // NewValue returns a new value assignable as a map value. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewValue() Value + + // IsValid reports whether the map is valid. + // + // An invalid map is an empty, read-only value. + // + // An invalid message often corresponds to a nil Go map value, + // but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go new file mode 100644 index 000000000..918e685e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package protoreflect + +import "google.golang.org/protobuf/internal/pragma" + +type valueType int + +const ( + nilType valueType = iota + boolType + int32Type + int64Type + uint32Type + uint64Type + float32Type + float64Type + stringType + bytesType + enumType + ifaceType +) + +// value is a union where only one type can be represented at a time. +// This uses a distinct field for each type. This is type safe in Go, but +// occupies more memory than necessary (72B). +type value struct { + pragma.DoNotCompare // 0B + + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface interface{} // 16B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, str: v} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, bin: v} +} +func valueOfIface(v interface{}) Value { + return Value{typ: ifaceType, iface: v} +} + +func (v Value) getString() string { + return v.str +} +func (v Value) getBytes() []byte { + return v.bin +} +func (v Value) getIface() interface{} { + return v.iface +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go new file mode 100644 index 000000000..5a3414724 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -0,0 +1,411 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "fmt" + "math" +) + +// Value is a union where only one Go type may be set at a time. +// The Value is used to represent all possible values a field may take. +// The following shows which Go type is used to represent each proto Kind: +// +// ╔════════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠════════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ float32 │ FloatKind ║ +// ║ float64 │ DoubleKind ║ +// ║ string │ StringKind ║ +// ║ []byte │ BytesKind ║ +// ║ EnumNumber │ EnumKind ║ +// ║ Message │ MessageKind, GroupKind ║ +// ╚════════════╧═════════════════════════════════════╝ +// +// Multiple protobuf Kinds may be represented by a single Go type if the type +// can losslessly represent the information for the proto kind. For example, +// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// but use different integer encoding methods. +// +// The List or Map types are used if the field cardinality is repeated. +// A field is a List if FieldDescriptor.IsList reports true. +// A field is a Map if FieldDescriptor.IsMap reports true. +// +// Converting to/from a Value and a concrete Go value panics on type mismatch. +// For example, ValueOf("hello").Int() panics because this attempts to +// retrieve an int64 from a string. +type Value value + +// The protoreflect API uses a custom Value union type instead of interface{} +// to keep the future open for performance optimizations. Using an interface{} +// always incurs an allocation for primitives (e.g., int64) since it needs to +// be boxed on the heap (as interfaces can only contain pointers natively). +// Instead, we represent the Value union as a flat struct that internally keeps +// track of which type is set. Using unsafe, the Value union can be reduced +// down to 24B, which is identical in size to a slice. +// +// The latest compiler (Go1.11) currently suffers from some limitations: +// • With inlining, the compiler should be able to statically prove that +// only one of these switch cases are taken and inline one specific case. +// See https://golang.org/issue/22310. + +// ValueOf returns a Value initialized with the concrete value stored in v. +// This panics if the type does not match one of the allowed types in the +// Value union. +func ValueOf(v interface{}) Value { + switch v := v.(type) { + case nil: + return Value{} + case bool: + return ValueOfBool(v) + case int32: + return ValueOfInt32(v) + case int64: + return ValueOfInt64(v) + case uint32: + return ValueOfUint32(v) + case uint64: + return ValueOfUint64(v) + case float32: + return ValueOfFloat32(v) + case float64: + return ValueOfFloat64(v) + case string: + return ValueOfString(v) + case []byte: + return ValueOfBytes(v) + case EnumNumber: + return ValueOfEnum(v) + case Message, List, Map: + return valueOfIface(v) + case ProtoMessage: + panic(fmt.Sprintf("invalid proto.Message(%T) type, expected a protoreflect.Message type", v)) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +// ValueOfBool returns a new boolean value. +func ValueOfBool(v bool) Value { + if v { + return Value{typ: boolType, num: 1} + } else { + return Value{typ: boolType, num: 0} + } +} + +// ValueOfInt32 returns a new int32 value. +func ValueOfInt32(v int32) Value { + return Value{typ: int32Type, num: uint64(v)} +} + +// ValueOfInt64 returns a new int64 value. +func ValueOfInt64(v int64) Value { + return Value{typ: int64Type, num: uint64(v)} +} + +// ValueOfUint32 returns a new uint32 value. +func ValueOfUint32(v uint32) Value { + return Value{typ: uint32Type, num: uint64(v)} +} + +// ValueOfUint64 returns a new uint64 value. +func ValueOfUint64(v uint64) Value { + return Value{typ: uint64Type, num: v} +} + +// ValueOfFloat32 returns a new float32 value. +func ValueOfFloat32(v float32) Value { + return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfFloat64 returns a new float64 value. +func ValueOfFloat64(v float64) Value { + return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfString returns a new string value. +func ValueOfString(v string) Value { + return valueOfString(v) +} + +// ValueOfBytes returns a new bytes value. +func ValueOfBytes(v []byte) Value { + return valueOfBytes(v[:len(v):len(v)]) +} + +// ValueOfEnum returns a new enum value. +func ValueOfEnum(v EnumNumber) Value { + return Value{typ: enumType, num: uint64(v)} +} + +// ValueOfMessage returns a new Message value. +func ValueOfMessage(v Message) Value { + return valueOfIface(v) +} + +// ValueOfList returns a new List value. +func ValueOfList(v List) Value { + return valueOfIface(v) +} + +// ValueOfMap returns a new Map value. +func ValueOfMap(v Map) Value { + return valueOfIface(v) +} + +// IsValid reports whether v is populated with a value. +func (v Value) IsValid() bool { + return v.typ != nilType +} + +// Interface returns v as an interface{}. +// +// Invariant: v == ValueOf(v).Interface() +func (v Value) Interface() interface{} { + switch v.typ { + case nilType: + return nil + case boolType: + return v.Bool() + case int32Type: + return int32(v.Int()) + case int64Type: + return int64(v.Int()) + case uint32Type: + return uint32(v.Uint()) + case uint64Type: + return uint64(v.Uint()) + case float32Type: + return float32(v.Float()) + case float64Type: + return float64(v.Float()) + case stringType: + return v.String() + case bytesType: + return v.Bytes() + case enumType: + return v.Enum() + default: + return v.getIface() + } +} + +func (v Value) typeName() string { + switch v.typ { + case nilType: + return "nil" + case boolType: + return "bool" + case int32Type: + return "int32" + case int64Type: + return "int64" + case uint32Type: + return "uint32" + case uint64Type: + return "uint64" + case float32Type: + return "float32" + case float64Type: + return "float64" + case stringType: + return "string" + case bytesType: + return "bytes" + case enumType: + return "enum" + default: + switch v := v.getIface().(type) { + case Message: + return "message" + case List: + return "list" + case Map: + return "map" + default: + return fmt.Sprintf("", v) + } + } +} + +func (v Value) panicMessage(what string) string { + return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) +} + +// Bool returns v as a bool and panics if the type is not a bool. +func (v Value) Bool() bool { + switch v.typ { + case boolType: + return v.num > 0 + default: + panic(v.panicMessage("bool")) + } +} + +// Int returns v as a int64 and panics if the type is not a int32 or int64. +func (v Value) Int() int64 { + switch v.typ { + case int32Type, int64Type: + return int64(v.num) + default: + panic(v.panicMessage("int")) + } +} + +// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. +func (v Value) Uint() uint64 { + switch v.typ { + case uint32Type, uint64Type: + return uint64(v.num) + default: + panic(v.panicMessage("uint")) + } +} + +// Float returns v as a float64 and panics if the type is not a float32 or float64. +func (v Value) Float() float64 { + switch v.typ { + case float32Type, float64Type: + return math.Float64frombits(uint64(v.num)) + default: + panic(v.panicMessage("float")) + } +} + +// String returns v as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (v Value) String() string { + switch v.typ { + case stringType: + return v.getString() + default: + return fmt.Sprint(v.Interface()) + } +} + +// Bytes returns v as a []byte and panics if the type is not a []byte. +func (v Value) Bytes() []byte { + switch v.typ { + case bytesType: + return v.getBytes() + default: + panic(v.panicMessage("bytes")) + } +} + +// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +func (v Value) Enum() EnumNumber { + switch v.typ { + case enumType: + return EnumNumber(v.num) + default: + panic(v.panicMessage("enum")) + } +} + +// Message returns v as a Message and panics if the type is not a Message. +func (v Value) Message() Message { + switch vi := v.getIface().(type) { + case Message: + return vi + default: + panic(v.panicMessage("message")) + } +} + +// List returns v as a List and panics if the type is not a List. +func (v Value) List() List { + switch vi := v.getIface().(type) { + case List: + return vi + default: + panic(v.panicMessage("list")) + } +} + +// Map returns v as a Map and panics if the type is not a Map. +func (v Value) Map() Map { + switch vi := v.getIface().(type) { + case Map: + return vi + default: + panic(v.panicMessage("map")) + } +} + +// MapKey returns v as a MapKey and panics for invalid MapKey types. +func (v Value) MapKey() MapKey { + switch v.typ { + case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: + return MapKey(v) + default: + panic(v.panicMessage("map key")) + } +} + +// MapKey is used to index maps, where the Go type of the MapKey must match +// the specified key Kind (see MessageDescriptor.IsMapEntry). +// The following shows what Go type is used to represent each proto Kind: +// +// ╔═════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ string │ StringKind ║ +// ╚═════════╧═════════════════════════════════════╝ +// +// A MapKey is constructed and accessed through a Value: +// k := ValueOf("hash").MapKey() // convert string to MapKey +// s := k.String() // convert MapKey to string +// +// The MapKey is a strict subset of valid types used in Value; +// converting a Value to a MapKey with an invalid type panics. +type MapKey value + +// IsValid reports whether k is populated with a value. +func (k MapKey) IsValid() bool { + return Value(k).IsValid() +} + +// Interface returns k as an interface{}. +func (k MapKey) Interface() interface{} { + return Value(k).Interface() +} + +// Bool returns k as a bool and panics if the type is not a bool. +func (k MapKey) Bool() bool { + return Value(k).Bool() +} + +// Int returns k as a int64 and panics if the type is not a int32 or int64. +func (k MapKey) Int() int64 { + return Value(k).Int() +} + +// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. +func (k MapKey) Uint() uint64 { + return Value(k).Uint() +} + +// String returns k as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (k MapKey) String() string { + return Value(k).String() +} + +// Value returns k as a Value. +func (k MapKey) Value() Value { + return Value(k) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go new file mode 100644 index 000000000..c45debdca --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go new file mode 100644 index 000000000..5e5f96716 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -0,0 +1,800 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoregistry provides data structures to register and lookup +// protobuf descriptor types. +// +// The Files registry contains file descriptors and provides the ability +// to iterate over the files or lookup a specific descriptor within the files. +// Files only contains protobuf descriptors and has no understanding of Go +// type information that may be associated with each descriptor. +// +// The Types registry contains descriptor types for which there is a known +// Go type associated with that descriptor. It provides the ability to iterate +// over the registered types or lookup a type by name. +package protoregistry + +import ( + "fmt" + "log" + "strings" + "sync" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// ignoreConflict reports whether to ignore a registration conflict +// given the descriptor being registered and the error. +// It is a variable so that the behavior is easily overridden in another file. +var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + log.Printf(""+ + "WARNING: %v\n"+ + "A future release will panic on registration conflicts. See:\n"+ + "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ + "\n", err) + return true +} + +var globalMutex sync.RWMutex + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +// GlobalTypes is the registry used by default for type lookups +// unless a local registry is provided by the user. +var GlobalTypes *Types = new(Types) + +// NotFound is a sentinel error value to indicate that the type was not found. +// +// Since registry lookup can happen in the critical performance path, resolvers +// must return this exact error value, not an error wrapping it. +var NotFound = errors.New("not found") + +// Files is a registry for looking up or iterating over files and the +// descriptors contained within them. +// The Find and Range methods are safe for concurrent use. +type Files struct { + // The map of descsByName contains: + // EnumDescriptor + // EnumValueDescriptor + // MessageDescriptor + // ExtensionDescriptor + // ServiceDescriptor + // *packageDescriptor + // + // Note that files are stored as a slice, since a package may contain + // multiple files. Only top-level declarations are registered. + // Note that enum values are in the top-level since that are in the same + // scope as the parent enum. + descsByName map[protoreflect.FullName]interface{} + filesByPath map[string]protoreflect.FileDescriptor +} + +type packageDescriptor struct { + files []protoreflect.FileDescriptor +} + +// RegisterFile registers the provided file descriptor. +// +// If any descriptor within the file conflicts with the descriptor of any +// previously registered file (e.g., two enums with the same full name), +// then the file is not registered and an error is returned. +// +// It is permitted for multiple files to have the same file path. +func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { + if r == GlobalFiles { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if r.descsByName == nil { + r.descsByName = map[protoreflect.FullName]interface{}{ + "": &packageDescriptor{}, + } + r.filesByPath = make(map[string]protoreflect.FileDescriptor) + } + path := file.Path() + if prev := r.filesByPath[path]; prev != nil { + // TODO: Remove this after some soak-in period after moving these types. + var prevPath string + const prevModule = "google.golang.org/genproto" + const prevVersion = "cb27e3aa (May 26th, 2020)" + switch path { + case "google/protobuf/field_mask.proto": + prevPath = prevModule + "/protobuf/field_mask" + case "google/protobuf/api.proto": + prevPath = prevModule + "/protobuf/api" + case "google/protobuf/type.proto": + prevPath = prevModule + "/protobuf/ptype" + case "google/protobuf/source_context.proto": + prevPath = prevModule + "/protobuf/source_context" + } + if r == GlobalFiles && prevPath != "" { + pkgName := strings.TrimSuffix(strings.TrimPrefix(path, "google/protobuf/"), ".proto") + pkgName = strings.Replace(pkgName, "_", "", -1) + "pb" + currPath := "google.golang.org/protobuf/types/known/" + pkgName + panic(fmt.Sprintf(""+ + "duplicate registration of %q\n"+ + "\n"+ + "The generated definition for this file has moved:\n"+ + "\tfrom: %q\n"+ + "\tto: %q\n"+ + "A dependency on the %q module must\n"+ + "be at version %v or higher.\n"+ + "\n"+ + "Upgrade the dependency by running:\n"+ + "\tgo get -u %v\n", + path, prevPath, currPath, prevModule, prevVersion, prevPath)) + } + + err := errors.New("file %q is already registered", file.Path()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + switch prev := r.descsByName[name]; prev.(type) { + case nil, *packageDescriptor: + default: + err := errors.New("file %q has a package name conflict over %v", file.Path(), name) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + } + var err error + var hasConflict bool + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + if prev := r.descsByName[d.FullName()]; prev != nil { + hasConflict = true + err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(d, err) { + err = nil + } + } + }) + if hasConflict { + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + if r.descsByName[name] == nil { + r.descsByName[name] = &packageDescriptor{} + } + } + p := r.descsByName[file.Package()].(*packageDescriptor) + p.files = append(p.files, file) + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + r.descsByName[d.FullName()] = d + }) + r.filesByPath[path] = file + return nil +} + +// FindDescriptorByName looks up a descriptor by the full name. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + prefix := name + suffix := nameSuffix("") + for prefix != "" { + if d, ok := r.descsByName[prefix]; ok { + switch d := d.(type) { + case protoreflect.EnumDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.EnumValueDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.MessageDescriptor: + if d.FullName() == name { + return d, nil + } + if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { + return d, nil + } + case protoreflect.ExtensionDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.ServiceDescriptor: + if d.FullName() == name { + return d, nil + } + if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { + return d, nil + } + } + return nil, NotFound + } + prefix = prefix.Parent() + suffix = nameSuffix(name[len(prefix)+len("."):]) + } + return nil, NotFound +} + +func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { + name := suffix.Pop() + if suffix == "" { + if ed := md.Enums().ByName(name); ed != nil { + return ed + } + for i := md.Enums().Len() - 1; i >= 0; i-- { + if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { + return vd + } + } + if xd := md.Extensions().ByName(name); xd != nil { + return xd + } + if fd := md.Fields().ByName(name); fd != nil { + return fd + } + if od := md.Oneofs().ByName(name); od != nil { + return od + } + } + if md := md.Messages().ByName(name); md != nil { + if suffix == "" { + return md + } + return findDescriptorInMessage(md, suffix) + } + return nil +} + +type nameSuffix string + +func (s *nameSuffix) Pop() (name protoreflect.Name) { + if i := strings.IndexByte(string(*s), '.'); i >= 0 { + name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] + } else { + name, *s = protoreflect.Name((*s)), "" + } + return name +} + +// FindFileByPath looks up a file by the path. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if fd, ok := r.filesByPath[path]; ok { + return fd, nil + } + return nil, NotFound +} + +// NumFiles reports the number of registered files. +func (r *Files) NumFiles() int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.filesByPath) +} + +// RangeFiles iterates over all registered files while f returns true. +// The iteration order is undefined. +func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, file := range r.filesByPath { + if !f(file) { + return + } + } +} + +// NumFilesByPackage reports the number of registered files in a proto package. +func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return 0 + } + return len(p.files) +} + +// RangeFilesByPackage iterates over all registered files in a given proto package +// while f returns true. The iteration order is undefined. +func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return + } + for _, file := range p.files { + if !f(file) { + return + } + } +} + +// rangeTopLevelDescriptors iterates over all top-level descriptors in a file +// which will be directly entered into the registry. +func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { + eds := fd.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + vds := eds.Get(i).Values() + for i := vds.Len() - 1; i >= 0; i-- { + f(vds.Get(i)) + } + } + mds := fd.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + f(mds.Get(i)) + } + xds := fd.Extensions() + for i := xds.Len() - 1; i >= 0; i-- { + f(xds.Get(i)) + } + sds := fd.Services() + for i := sds.Len() - 1; i >= 0; i-- { + f(sds.Get(i)) + } +} + +// MessageTypeResolver is an interface for looking up messages. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type MessageTypeResolver interface { + // FindMessageByName looks up a message by its full name. + // E.g., "google.protobuf.Any" + // + // This return (nil, NotFound) if not found. + FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) + + // FindMessageByURL looks up a message by a URL identifier. + // See documentation on google.protobuf.Any.type_url for the URL format. + // + // This returns (nil, NotFound) if not found. + FindMessageByURL(url string) (protoreflect.MessageType, error) +} + +// ExtensionTypeResolver is an interface for looking up extensions. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type ExtensionTypeResolver interface { + // FindExtensionByName looks up a extension field by the field's full name. + // Note that this is the full name of the field as determined by + // where the extension is declared and is unrelated to the full name of the + // message being extended. + // + // This returns (nil, NotFound) if not found. + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + + // FindExtensionByNumber looks up a extension field by the field number + // within some parent message, identified by full name. + // + // This returns (nil, NotFound) if not found. + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) +} + +var ( + _ MessageTypeResolver = (*Types)(nil) + _ ExtensionTypeResolver = (*Types)(nil) +) + +// Types is a registry for looking up or iterating over descriptor types. +// The Find and Range methods are safe for concurrent use. +type Types struct { + typesByName typesByName + extensionsByMessage extensionsByMessage + + numEnums int + numMessages int + numExtensions int +} + +type ( + typesByName map[protoreflect.FullName]interface{} + extensionsByMessage map[protoreflect.FullName]extensionsByNumber + extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType +) + +// RegisterMessage registers the provided message type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + md := mt.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("message", md, mt); err != nil { + return err + } + r.numMessages++ + return nil +} + +// RegisterEnum registers the provided enum type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterEnum(et protoreflect.EnumType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + ed := et.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("enum", ed, et); err != nil { + return err + } + r.numEnums++ + return nil +} + +// RegisterExtension registers the provided extension type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + // + // A known case where this can happen: Fetching the TypeDescriptor for a + // legacy ExtensionDesc can consult the global registry. + xd := xt.TypeDescriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + field := xd.Number() + message := xd.ContainingMessage().FullName() + if prev := r.extensionsByMessage[message][field]; prev != nil { + err := errors.New("extension number %d is already registered on message %v", field, message) + err = amendErrorWithCaller(err, prev, xt) + if !(r == GlobalTypes && ignoreConflict(xd, err)) { + return err + } + } + + if err := r.register("extension", xd, xt); err != nil { + return err + } + if r.extensionsByMessage == nil { + r.extensionsByMessage = make(extensionsByMessage) + } + if r.extensionsByMessage[message] == nil { + r.extensionsByMessage[message] = make(extensionsByNumber) + } + r.extensionsByMessage[message][field] = xt + r.numExtensions++ + return nil +} + +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { + name := desc.FullName() + prev := r.typesByName[name] + if prev != nil { + err := errors.New("%v %v is already registered", kind, name) + err = amendErrorWithCaller(err, prev, typ) + if !(r == GlobalTypes && ignoreConflict(desc, err)) { + return err + } + } + if r.typesByName == nil { + r.typesByName = make(typesByName) + } + r.typesByName[name] = typ + return nil +} + +// FindEnumByName looks up an enum by its full name. +// E.g., "google.protobuf.Field.Kind". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[enum]; v != nil { + if et, _ := v.(protoreflect.EnumType); et != nil { + return et, nil + } + return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByName looks up a message by its full name. +// E.g., "google.protobuf.Any" +// +// This return (nil, NotFound) if not found. +func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + // The full name by itself is a valid URL. + return r.FindMessageByURL(string(message)) +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByName looks up a extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + return xt, nil + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByNumber looks up a extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if xt, ok := r.extensionsByMessage[message][field]; ok { + return xt, nil + } + return nil, NotFound +} + +// NumEnums reports the number of registered enums. +func (r *Types) NumEnums() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numEnums +} + +// RangeEnums iterates over all registered enums while f returns true. +// Iteration order is undefined. +func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if et, ok := typ.(protoreflect.EnumType); ok { + if !f(et) { + return + } + } + } +} + +// NumMessages reports the number of registered messages. +func (r *Types) NumMessages() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numMessages +} + +// RangeMessages iterates over all registered messages while f returns true. +// Iteration order is undefined. +func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if mt, ok := typ.(protoreflect.MessageType); ok { + if !f(mt) { + return + } + } + } +} + +// NumExtensions reports the number of registered extensions. +func (r *Types) NumExtensions() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numExtensions +} + +// RangeExtensions iterates over all registered extensions while f returns true. +// Iteration order is undefined. +func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if xt, ok := typ.(protoreflect.ExtensionType); ok { + if !f(xt) { + return + } + } + } +} + +// NumExtensionsByMessage reports the number of registered extensions for +// a given message type. +func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.extensionsByMessage[message]) +} + +// RangeExtensionsByMessage iterates over all registered extensions filtered +// by a given message type while f returns true. Iteration order is undefined. +func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, xt := range r.extensionsByMessage[message] { + if !f(xt) { + return + } + } +} + +func typeName(t interface{}) string { + switch t.(type) { + case protoreflect.EnumType: + return "enum" + case protoreflect.MessageType: + return "message" + case protoreflect.ExtensionType: + return "extension" + default: + return fmt.Sprintf("%T", t) + } +} + +func amendErrorWithCaller(err error, prev, curr interface{}) error { + prevPkg := goPackage(prev) + currPkg := goPackage(curr) + if prevPkg == "" || currPkg == "" || prevPkg == currPkg { + return err + } + return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) +} + +func goPackage(v interface{}) string { + switch d := v.(type) { + case protoreflect.EnumType: + v = d.Descriptor() + case protoreflect.MessageType: + v = d.Descriptor() + case protoreflect.ExtensionType: + v = d.TypeDescriptor() + } + if d, ok := v.(protoreflect.Descriptor); ok { + v = d.ParentFile() + } + if d, ok := v.(interface{ GoPackagePath() string }); ok { + return d.GoPackagePath() + } + return "" +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go new file mode 100644 index 000000000..c58727675 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoiface + +type MessageV1 interface { + Reset() + String() string + ProtoMessage() +} + +type ExtensionRangeV1 struct { + Start, End int32 // both inclusive +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go new file mode 100644 index 000000000..32c04f67e --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoiface contains types referenced or implemented by messages. +// +// WARNING: This package should only be imported by message implementations. +// The functionality found in this package should be accessed through +// higher-level abstractions provided by the proto package. +package protoiface + +import ( + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Methods is a set of optional fast-path implementations of various operations. +type Methods = struct { + pragma.NoUnkeyedLiterals + + // Flags indicate support for optional features. + Flags SupportFlags + + // Size returns the size in bytes of the wire-format encoding of a message. + // Marshal must be provided if a custom Size is provided. + Size func(SizeInput) SizeOutput + + // Marshal formats a message in the wire-format encoding to the provided buffer. + // Size should be provided if a custom Marshal is provided. + // It must not return an error for a partial message. + Marshal func(MarshalInput) (MarshalOutput, error) + + // Unmarshal parses the wire-format encoding and merges the result into a message. + // It must not reset the target message or return an error for a partial message. + Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) + + // Merge merges the contents of a source message into a destination message. + Merge func(MergeInput) MergeOutput + + // CheckInitialized returns an error if any required fields in the message are not set. + CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) +} + +// SupportFlags indicate support for optional features. +type SupportFlags = uint64 + +const ( + // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. + SupportMarshalDeterministic SupportFlags = 1 << iota + + // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. + SupportUnmarshalDiscardUnknown +) + +// SizeInput is input to the Size method. +type SizeInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Flags MarshalInputFlags +} + +// SizeOutput is output from the Size method. +type SizeOutput = struct { + pragma.NoUnkeyedLiterals + + Size int +} + +// MarshalInput is input to the Marshal method. +type MarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // output is appended to this buffer + Flags MarshalInputFlags +} + +// MarshalOutput is output from the Marshal method. +type MarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Buf []byte // contains marshaled message +} + +// MarshalInputFlags configure the marshaler. +// Most flags correspond to fields in proto.MarshalOptions. +type MarshalInputFlags = uint8 + +const ( + MarshalDeterministic MarshalInputFlags = 1 << iota + MarshalUseCachedSize +) + +// UnmarshalInput is input to the Unmarshal method. +type UnmarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // input buffer + Flags UnmarshalInputFlags + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// UnmarshalOutput is output from the Unmarshal method. +type UnmarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Flags UnmarshalOutputFlags +} + +// UnmarshalInputFlags configure the unmarshaler. +// Most flags correspond to fields in proto.UnmarshalOptions. +type UnmarshalInputFlags = uint8 + +const ( + UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota +) + +// UnmarshalOutputFlags are output from the Unmarshal method. +type UnmarshalOutputFlags = uint8 + +const ( + // UnmarshalInitialized may be set on return if all required fields are known to be set. + // If unset, then it does not necessarily indicate that the message is uninitialized, + // only that its status could not be confirmed. + UnmarshalInitialized UnmarshalOutputFlags = 1 << iota +) + +// MergeInput is input to the Merge method. +type MergeInput = struct { + pragma.NoUnkeyedLiterals + + Source protoreflect.Message + Destination protoreflect.Message +} + +// MergeOutput is output from the Merge method. +type MergeOutput = struct { + pragma.NoUnkeyedLiterals + + Flags MergeOutputFlags +} + +// MergeOutputFlags are output from the Merge method. +type MergeOutputFlags = uint8 + +const ( + // MergeComplete reports whether the merge was performed. + // If unset, the merger must have made no changes to the destination. + MergeComplete MergeOutputFlags = 1 << iota +) + +// CheckInitializedInput is input to the CheckInitialized method. +type CheckInitializedInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message +} + +// CheckInitializedOutput is output from the CheckInitialized method. +type CheckInitializedOutput = struct { + pragma.NoUnkeyedLiterals +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go new file mode 100644 index 000000000..4a1ab7fb3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoimpl contains the default implementation for messages +// generated by protoc-gen-go. +// +// WARNING: This package should only ever be imported by generated messages. +// The compatibility agreement covers nothing except for functionality needed +// to keep existing generated messages operational. Breakages that occur due +// to unauthorized usages of this package are not the author's responsibility. +package protoimpl + +import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filetype" + "google.golang.org/protobuf/internal/impl" +) + +// UnsafeEnabled specifies whether package unsafe can be used. +const UnsafeEnabled = impl.UnsafeEnabled + +type ( + // Types used by generated code in init functions. + DescBuilder = filedesc.Builder + TypeBuilder = filetype.Builder + + // Types used by generated code to implement EnumType, MessageType, and ExtensionType. + EnumInfo = impl.EnumInfo + MessageInfo = impl.MessageInfo + ExtensionInfo = impl.ExtensionInfo + + // Types embedded in generated messages. + MessageState = impl.MessageState + SizeCache = impl.SizeCache + WeakFields = impl.WeakFields + UnknownFields = impl.UnknownFields + ExtensionFields = impl.ExtensionFields + ExtensionFieldV1 = impl.ExtensionField + + Pointer = impl.Pointer +) + +var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go new file mode 100644 index 000000000..ff094e1ba --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoimpl + +import ( + "google.golang.org/protobuf/internal/version" +) + +const ( + // MaxVersion is the maximum supported version for generated .pb.go files. + // It is always the current version of the module. + MaxVersion = version.Minor + + // GenVersion is the runtime version required by generated .pb.go files. + // This is incremented when generated code relies on new functionality + // in the runtime. + GenVersion = 20 + + // MinVersion is the minimum supported version for generated .pb.go files. + // This is incremented when the runtime drops support for old code. + MinVersion = 0 +) + +// EnforceVersion is used by code generated by protoc-gen-go +// to statically enforce minimum and maximum versions of this package. +// A compilation failure implies either that: +// * the runtime package is too old and needs to be updated OR +// * the generated code is too old and needs to be regenerated. +// +// The runtime package can be upgraded by running: +// go get google.golang.org/protobuf +// +// The generated code can be regenerated by running: +// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} +// +// Example usage by generated code: +// const ( +// // Verify that this generated code is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +// // Verify that runtime/protoimpl is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) +// ) +// +// The genVersion is the current minor version used to generated the code. +// This compile-time check relies on negative integer overflow of a uint +// being a compilation failure (guaranteed by the Go specification). +type EnforceVersion uint + +// This enforces the following invariant: +// MinVersion ≤ GenVersion ≤ MaxVersion +const ( + _ = EnforceVersion(GenVersion - MinVersion) + _ = EnforceVersion(MaxVersion - GenVersion) +) diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go new file mode 100644 index 000000000..824237856 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -0,0 +1,4040 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptorpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate. + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 // Uses ZigZag encoding. + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 // Uses ZigZag encoding. +) + +// Enum value maps for FieldDescriptorProto_Type. +var ( + FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", + } + FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, + } +) + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Type.Descriptor instead. +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +// Enum value maps for FieldDescriptorProto_Label. +var ( + FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", + } + FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, + } +) + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() +} + +func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[1] +} + +func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldDescriptorProto_Label) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(num) + return nil +} + +// Deprecated: Use FieldDescriptorProto_Label.Descriptor instead. +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 // Generate complete code for parsing, serialization, + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 // Use ReflectionOps to implement these methods. + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 // Generate code using MessageLite and the lite runtime. +) + +// Enum value maps for FileOptions_OptimizeMode. +var ( + FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", + } + FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, + } +) + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() +} + +func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[2] +} + +func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FileOptions_OptimizeMode) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(num) + return nil +} + +// Deprecated: Use FileOptions_OptimizeMode.Descriptor instead. +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +// Enum value maps for FieldOptions_CType. +var ( + FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", + } + FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, + } +) + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() +} + +func (FieldOptions_CType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[3] +} + +func (x FieldOptions_CType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_CType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_CType(num) + return nil +} + +// Deprecated: Use FieldOptions_CType.Descriptor instead. +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +// Enum value maps for FieldOptions_JSType. +var ( + FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", + } + FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, + } +) + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() +} + +func (FieldOptions_JSType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[4] +} + +func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_JSType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_JSType(num) + return nil +} + +// Deprecated: Use FieldOptions_JSType.Descriptor instead. +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 // implies idempotent + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 // idempotent, but may have side effects +) + +// Enum value maps for MethodOptions_IdempotencyLevel. +var ( + MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", + } + MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, + } +) + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() +} + +func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[5] +} + +func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(num) + return nil +} + +// Deprecated: Use MethodOptions_IdempotencyLevel.Descriptor instead. +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` +} + +func (x *FileDescriptorSet) Reset() { + *x = FileDescriptorSet{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorSet) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorSet) ProtoMessage() {} + +func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorSet.ProtoReflect.Descriptor instead. +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0} +} + +func (x *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if x != nil { + return x.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc. + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` +} + +func (x *FileDescriptorProto) Reset() { + *x = FileDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileDescriptorProto) ProtoMessage() {} + +func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileDescriptorProto.ProtoReflect.Descriptor instead. +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1} +} + +func (x *FileDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FileDescriptorProto) GetPackage() string { + if x != nil && x.Package != nil { + return *x.Package + } + return "" +} + +func (x *FileDescriptorProto) GetDependency() []string { + if x != nil { + return x.Dependency + } + return nil +} + +func (x *FileDescriptorProto) GetPublicDependency() []int32 { + if x != nil { + return x.PublicDependency + } + return nil +} + +func (x *FileDescriptorProto) GetWeakDependency() []int32 { + if x != nil { + return x.WeakDependency + } + return nil +} + +func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if x != nil { + return x.MessageType + } + return nil +} + +func (x *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if x != nil { + return x.Service + } + return nil +} + +func (x *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *FileDescriptorProto) GetOptions() *FileOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if x != nil { + return x.SourceCodeInfo + } + return nil +} + +func (x *FileDescriptorProto) GetSyntax() string { + if x != nil && x.Syntax != nil { + return *x.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *DescriptorProto) Reset() { + *x = DescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto) ProtoMessage() {} + +func (x *DescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto.ProtoReflect.Descriptor instead. +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2} +} + +func (x *DescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *DescriptorProto) GetField() []*FieldDescriptorProto { + if x != nil { + return x.Field + } + return nil +} + +func (x *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if x != nil { + return x.Extension + } + return nil +} + +func (x *DescriptorProto) GetNestedType() []*DescriptorProto { + if x != nil { + return x.NestedType + } + return nil +} + +func (x *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if x != nil { + return x.EnumType + } + return nil +} + +func (x *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if x != nil { + return x.ExtensionRange + } + return nil +} + +func (x *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if x != nil { + return x.OneofDecl + } + return nil +} + +func (x *DescriptorProto) GetOptions() *MessageOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *DescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +type ExtensionRangeOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *ExtensionRangeOptions) Reset() { + *x = ExtensionRangeOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions) ProtoMessage() {} + +func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3} +} + +var extRange_ExtensionRangeOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use ExtensionRangeOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ExtensionRangeOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ExtensionRangeOptions +} + +func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // If true, this is a proto3 "optional". When a proto3 field is optional, it + // tracks presence regardless of field type. + // + // When proto3_optional is true, this field must be belong to a oneof to + // signal to old proto3 clients that presence is tracked for this field. This + // oneof is known as a "synthetic" oneof, and this field must be its sole + // member (each proto3 optional field gets its own synthetic oneof). Synthetic + // oneofs exist in the descriptor only, and do not generate any API. Synthetic + // oneofs must be ordered after all "real" oneofs. + // + // For message fields, proto3_optional doesn't create any semantic change, + // since non-repeated message fields always track presence. However it still + // indicates the semantic detail of whether the user wrote "optional" or not. + // This can be useful for round-tripping the .proto file. For consistency we + // give message fields a synthetic oneof also, even though it is not required + // to track presence. This is especially important because the parser can't + // tell if a field is a message or an enum, so it must always create a + // synthetic oneof. + // + // Proto2 optional fields do not set this flag, because they already indicate + // optional with `LABEL_OPTIONAL`. + Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"` +} + +func (x *FieldDescriptorProto) Reset() { + *x = FieldDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldDescriptorProto) ProtoMessage() {} + +func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldDescriptorProto.ProtoReflect.Descriptor instead. +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{4} +} + +func (x *FieldDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *FieldDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if x != nil && x.Label != nil { + return *x.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (x *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (x *FieldDescriptorProto) GetTypeName() string { + if x != nil && x.TypeName != nil { + return *x.TypeName + } + return "" +} + +func (x *FieldDescriptorProto) GetExtendee() string { + if x != nil && x.Extendee != nil { + return *x.Extendee + } + return "" +} + +func (x *FieldDescriptorProto) GetDefaultValue() string { + if x != nil && x.DefaultValue != nil { + return *x.DefaultValue + } + return "" +} + +func (x *FieldDescriptorProto) GetOneofIndex() int32 { + if x != nil && x.OneofIndex != nil { + return *x.OneofIndex + } + return 0 +} + +func (x *FieldDescriptorProto) GetJsonName() string { + if x != nil && x.JsonName != nil { + return *x.JsonName + } + return "" +} + +func (x *FieldDescriptorProto) GetOptions() *FieldOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *FieldDescriptorProto) GetProto3Optional() bool { + if x != nil && x.Proto3Optional != nil { + return *x.Proto3Optional + } + return false +} + +// Describes a oneof. +type OneofDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (x *OneofDescriptorProto) Reset() { + *x = OneofDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofDescriptorProto) ProtoMessage() {} + +func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofDescriptorProto.ProtoReflect.Descriptor instead. +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{5} +} + +func (x *OneofDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *OneofDescriptorProto) GetOptions() *OneofOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` +} + +func (x *EnumDescriptorProto) Reset() { + *x = EnumDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto) ProtoMessage() {} + +func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6} +} + +func (x *EnumDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if x != nil { + return x.Value + } + return nil +} + +func (x *EnumDescriptorProto) GetOptions() *EnumOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if x != nil { + return x.ReservedRange + } + return nil +} + +func (x *EnumDescriptorProto) GetReservedName() []string { + if x != nil { + return x.ReservedName + } + return nil +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *EnumValueDescriptorProto) Reset() { + *x = EnumValueDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueDescriptorProto) ProtoMessage() {} + +func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueDescriptorProto.ProtoReflect.Descriptor instead. +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{7} +} + +func (x *EnumValueDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *EnumValueDescriptorProto) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *ServiceDescriptorProto) Reset() { + *x = ServiceDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceDescriptorProto) ProtoMessage() {} + +func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceDescriptorProto.ProtoReflect.Descriptor instead. +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{8} +} + +func (x *ServiceDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if x != nil { + return x.Method + } + return nil +} + +func (x *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if x != nil { + return x.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` +} + +// Default values for MethodDescriptorProto fields. +const ( + Default_MethodDescriptorProto_ClientStreaming = bool(false) + Default_MethodDescriptorProto_ServerStreaming = bool(false) +) + +func (x *MethodDescriptorProto) Reset() { + *x = MethodDescriptorProto{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodDescriptorProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodDescriptorProto) ProtoMessage() {} + +func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodDescriptorProto.ProtoReflect.Descriptor instead. +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{9} +} + +func (x *MethodDescriptorProto) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *MethodDescriptorProto) GetInputType() string { + if x != nil && x.InputType != nil { + return *x.InputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOutputType() string { + if x != nil && x.OutputType != nil { + return *x.OutputType + } + return "" +} + +func (x *MethodDescriptorProto) GetOptions() *MethodOptions { + if x != nil { + return x.Options + } + return nil +} + +func (x *MethodDescriptorProto) GetClientStreaming() bool { + if x != nil && x.ClientStreaming != nil { + return *x.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (x *MethodDescriptorProto) GetServerStreaming() bool { + if x != nil && x.ServerStreaming != nil { + return *x.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + // + // Deprecated: Do not use. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=1" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FileOptions fields. +const ( + Default_FileOptions_JavaMultipleFiles = bool(false) + Default_FileOptions_JavaStringCheckUtf8 = bool(false) + Default_FileOptions_OptimizeFor = FileOptions_SPEED + Default_FileOptions_CcGenericServices = bool(false) + Default_FileOptions_JavaGenericServices = bool(false) + Default_FileOptions_PyGenericServices = bool(false) + Default_FileOptions_PhpGenericServices = bool(false) + Default_FileOptions_Deprecated = bool(false) + Default_FileOptions_CcEnableArenas = bool(true) +) + +func (x *FileOptions) Reset() { + *x = FileOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileOptions) ProtoMessage() {} + +func (x *FileOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor instead. +func (*FileOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{10} +} + +var extRange_FileOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use FileOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*FileOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_FileOptions +} + +func (x *FileOptions) GetJavaPackage() string { + if x != nil && x.JavaPackage != nil { + return *x.JavaPackage + } + return "" +} + +func (x *FileOptions) GetJavaOuterClassname() string { + if x != nil && x.JavaOuterClassname != nil { + return *x.JavaOuterClassname + } + return "" +} + +func (x *FileOptions) GetJavaMultipleFiles() bool { + if x != nil && x.JavaMultipleFiles != nil { + return *x.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if x != nil && x.JavaGenerateEqualsAndHash != nil { + return *x.JavaGenerateEqualsAndHash + } + return false +} + +func (x *FileOptions) GetJavaStringCheckUtf8() bool { + if x != nil && x.JavaStringCheckUtf8 != nil { + return *x.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (x *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if x != nil && x.OptimizeFor != nil { + return *x.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (x *FileOptions) GetGoPackage() string { + if x != nil && x.GoPackage != nil { + return *x.GoPackage + } + return "" +} + +func (x *FileOptions) GetCcGenericServices() bool { + if x != nil && x.CcGenericServices != nil { + return *x.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (x *FileOptions) GetJavaGenericServices() bool { + if x != nil && x.JavaGenericServices != nil { + return *x.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (x *FileOptions) GetPyGenericServices() bool { + if x != nil && x.PyGenericServices != nil { + return *x.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (x *FileOptions) GetPhpGenericServices() bool { + if x != nil && x.PhpGenericServices != nil { + return *x.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (x *FileOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (x *FileOptions) GetCcEnableArenas() bool { + if x != nil && x.CcEnableArenas != nil { + return *x.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (x *FileOptions) GetObjcClassPrefix() string { + if x != nil && x.ObjcClassPrefix != nil { + return *x.ObjcClassPrefix + } + return "" +} + +func (x *FileOptions) GetCsharpNamespace() string { + if x != nil && x.CsharpNamespace != nil { + return *x.CsharpNamespace + } + return "" +} + +func (x *FileOptions) GetSwiftPrefix() string { + if x != nil && x.SwiftPrefix != nil { + return *x.SwiftPrefix + } + return "" +} + +func (x *FileOptions) GetPhpClassPrefix() string { + if x != nil && x.PhpClassPrefix != nil { + return *x.PhpClassPrefix + } + return "" +} + +func (x *FileOptions) GetPhpNamespace() string { + if x != nil && x.PhpNamespace != nil { + return *x.PhpNamespace + } + return "" +} + +func (x *FileOptions) GetPhpMetadataNamespace() string { + if x != nil && x.PhpMetadataNamespace != nil { + return *x.PhpMetadataNamespace + } + return "" +} + +func (x *FileOptions) GetRubyPackage() string { + if x != nil && x.RubyPackage != nil { + return *x.RubyPackage + } + return "" +} + +func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MessageOptions fields. +const ( + Default_MessageOptions_MessageSetWireFormat = bool(false) + Default_MessageOptions_NoStandardDescriptorAccessor = bool(false) + Default_MessageOptions_Deprecated = bool(false) +) + +func (x *MessageOptions) Reset() { + *x = MessageOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageOptions) ProtoMessage() {} + +func (x *MessageOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor instead. +func (*MessageOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{11} +} + +var extRange_MessageOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use MessageOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*MessageOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_MessageOptions +} + +func (x *MessageOptions) GetMessageSetWireFormat() bool { + if x != nil && x.MessageSetWireFormat != nil { + return *x.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (x *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if x != nil && x.NoStandardDescriptorAccessor != nil { + return *x.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (x *MessageOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (x *MessageOptions) GetMapEntry() bool { + if x != nil && x.MapEntry != nil { + return *x.MapEntry + } + return false +} + +func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for FieldOptions fields. +const ( + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) +) + +func (x *FieldOptions) Reset() { + *x = FieldOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldOptions) ProtoMessage() {} + +func (x *FieldOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor instead. +func (*FieldOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12} +} + +var extRange_FieldOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use FieldOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*FieldOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_FieldOptions +} + +func (x *FieldOptions) GetCtype() FieldOptions_CType { + if x != nil && x.Ctype != nil { + return *x.Ctype + } + return Default_FieldOptions_Ctype +} + +func (x *FieldOptions) GetPacked() bool { + if x != nil && x.Packed != nil { + return *x.Packed + } + return false +} + +func (x *FieldOptions) GetJstype() FieldOptions_JSType { + if x != nil && x.Jstype != nil { + return *x.Jstype + } + return Default_FieldOptions_Jstype +} + +func (x *FieldOptions) GetLazy() bool { + if x != nil && x.Lazy != nil { + return *x.Lazy + } + return Default_FieldOptions_Lazy +} + +func (x *FieldOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (x *FieldOptions) GetWeak() bool { + if x != nil && x.Weak != nil { + return *x.Weak + } + return Default_FieldOptions_Weak +} + +func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +func (x *OneofOptions) Reset() { + *x = OneofOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OneofOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OneofOptions) ProtoMessage() {} + +func (x *OneofOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor instead. +func (*OneofOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13} +} + +var extRange_OneofOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use OneofOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*OneofOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_OneofOptions +} + +func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumOptions fields. +const ( + Default_EnumOptions_Deprecated = bool(false) +) + +func (x *EnumOptions) Reset() { + *x = EnumOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumOptions) ProtoMessage() {} + +func (x *EnumOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor instead. +func (*EnumOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{14} +} + +var extRange_EnumOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use EnumOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*EnumOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_EnumOptions +} + +func (x *EnumOptions) GetAllowAlias() bool { + if x != nil && x.AllowAlias != nil { + return *x.AllowAlias + } + return false +} + +func (x *EnumOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for EnumValueOptions fields. +const ( + Default_EnumValueOptions_Deprecated = bool(false) +) + +func (x *EnumValueOptions) Reset() { + *x = EnumValueOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumValueOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumValueOptions) ProtoMessage() {} + +func (x *EnumValueOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor instead. +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{15} +} + +var extRange_EnumValueOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use EnumValueOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*EnumValueOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_EnumValueOptions +} + +func (x *EnumValueOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for ServiceOptions fields. +const ( + Default_ServiceOptions_Deprecated = bool(false) +) + +func (x *ServiceOptions) Reset() { + *x = ServiceOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServiceOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceOptions) ProtoMessage() {} + +func (x *ServiceOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor instead. +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16} +} + +var extRange_ServiceOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use ServiceOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ServiceOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ServiceOptions +} + +func (x *ServiceOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (x *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` +} + +// Default values for MethodOptions fields. +const ( + Default_MethodOptions_Deprecated = bool(false) + Default_MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN +) + +func (x *MethodOptions) Reset() { + *x = MethodOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodOptions) ProtoMessage() {} + +func (x *MethodOptions) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor instead. +func (*MethodOptions) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17} +} + +var extRange_MethodOptions = []protoiface.ExtensionRangeV1{ + {Start: 1000, End: 536870911}, +} + +// Deprecated: Use MethodOptions.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*MethodOptions) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_MethodOptions +} + +func (x *MethodOptions) GetDeprecated() bool { + if x != nil && x.Deprecated != nil { + return *x.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if x != nil && x.IdempotencyLevel != nil { + return *x.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if x != nil { + return x.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` +} + +func (x *UninterpretedOption) Reset() { + *x = UninterpretedOption{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption) ProtoMessage() {} + +func (x *UninterpretedOption) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption.ProtoReflect.Descriptor instead. +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18} +} + +func (x *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if x != nil { + return x.Name + } + return nil +} + +func (x *UninterpretedOption) GetIdentifierValue() string { + if x != nil && x.IdentifierValue != nil { + return *x.IdentifierValue + } + return "" +} + +func (x *UninterpretedOption) GetPositiveIntValue() uint64 { + if x != nil && x.PositiveIntValue != nil { + return *x.PositiveIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetNegativeIntValue() int64 { + if x != nil && x.NegativeIntValue != nil { + return *x.NegativeIntValue + } + return 0 +} + +func (x *UninterpretedOption) GetDoubleValue() float64 { + if x != nil && x.DoubleValue != nil { + return *x.DoubleValue + } + return 0 +} + +func (x *UninterpretedOption) GetStringValue() []byte { + if x != nil { + return x.StringValue + } + return nil +} + +func (x *UninterpretedOption) GetAggregateValue() string { + if x != nil && x.AggregateValue != nil { + return *x.AggregateValue + } + return "" +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` +} + +func (x *SourceCodeInfo) Reset() { + *x = SourceCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo) ProtoMessage() {} + +func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19} +} + +func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` +} + +func (x *GeneratedCodeInfo) Reset() { + *x = GeneratedCodeInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo) ProtoMessage() {} + +func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20} +} + +func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if x != nil { + return x.Annotation + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` +} + +func (x *DescriptorProto_ExtensionRange) Reset() { + *x = DescriptorProto_ExtensionRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ExtensionRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} + +func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ExtensionRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *DescriptorProto_ExtensionRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +func (x *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if x != nil { + return x.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive. +} + +func (x *DescriptorProto_ReservedRange) Reset() { + *x = DescriptorProto_ReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DescriptorProto_ReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescriptorProto_ReservedRange) ProtoMessage() {} + +func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescriptorProto_ReservedRange.ProtoReflect.Descriptor instead. +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *DescriptorProto_ReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *DescriptorProto_ReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive. + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive. +} + +func (x *EnumDescriptorProto_EnumReservedRange) Reset() { + *x = EnumDescriptorProto_EnumReservedRange{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EnumDescriptorProto_EnumReservedRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} + +func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EnumDescriptorProto_EnumReservedRange.ProtoReflect.Descriptor instead. +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if x != nil && x.Start != nil { + return *x.Start + } + return 0 +} + +func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` +} + +func (x *UninterpretedOption_NamePart) Reset() { + *x = UninterpretedOption_NamePart{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UninterpretedOption_NamePart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UninterpretedOption_NamePart) ProtoMessage() {} + +func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UninterpretedOption_NamePart.ProtoReflect.Descriptor instead. +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{18, 0} +} + +func (x *UninterpretedOption_NamePart) GetNamePart() string { + if x != nil && x.NamePart != nil { + return *x.NamePart + } + return "" +} + +func (x *UninterpretedOption_NamePart) GetIsExtension() bool { + if x != nil && x.IsExtension != nil { + return *x.IsExtension + } + return false +} + +type SourceCodeInfo_Location struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` +} + +func (x *SourceCodeInfo_Location) Reset() { + *x = SourceCodeInfo_Location{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SourceCodeInfo_Location) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceCodeInfo_Location) ProtoMessage() {} + +func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead. +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0} +} + +func (x *SourceCodeInfo_Location) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *SourceCodeInfo_Location) GetSpan() []int32 { + if x != nil { + return x.Span + } + return nil +} + +func (x *SourceCodeInfo_Location) GetLeadingComments() string { + if x != nil && x.LeadingComments != nil { + return *x.LeadingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetTrailingComments() string { + if x != nil && x.TrailingComments != nil { + return *x.TrailingComments + } + return "" +} + +func (x *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if x != nil { + return x.LeadingDetachedComments + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` +} + +func (x *GeneratedCodeInfo_Annotation) Reset() { + *x = GeneratedCodeInfo_Annotation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneratedCodeInfo_Annotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} + +func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead. +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if x != nil { + return x.Path + } + return nil +} + +func (x *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if x != nil && x.SourceFile != nil { + return *x.SourceFile + } + return "" +} + +func (x *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if x != nil && x.Begin != nil { + return *x.Begin + } + return 0 +} + +func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if x != nil && x.End != nil { + return *x.End + } + return 0 +} + +var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor + +var file_google_protobuf_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, + 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, + 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, + 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, + 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, + 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, + 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, + 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, + 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, + 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, + 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, + 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, + 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, + 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, + 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, + 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, + 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, + 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, + 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, + 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, + 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, + 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, + 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, + 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, + 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, + 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, + 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, + 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, + 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, + 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, + 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, + 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, + 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, + 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, + 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, + 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, + 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, + 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, + 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, + 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, + 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, + 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, + 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, + 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, + 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, + 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, + 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, + 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, + 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, + 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, + 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, + 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, + 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, + 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, + 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, + 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, + 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, + 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, + 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, + 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, + 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, + 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, + 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, + 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, + 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, + 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, + 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, + 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, + 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, + 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, + 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, + 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, + 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, + 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, + 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, + 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, + 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, + 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, + 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, + 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, + 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, + 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, + 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, + 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, + 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, + 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, + 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, + 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, + 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, + 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, + 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, + 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, + 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, + 0x64, 0x42, 0x8f, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x3e, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x3b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0xf8, 0x01, 0x01, + 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, +} + +var ( + file_google_protobuf_descriptor_proto_rawDescOnce sync.Once + file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc +) + +func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { + file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + }) + return file_google_protobuf_descriptor_proto_rawDescData +} + +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ + (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType + (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel + (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 16: google.protobuf.FileOptions + (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange + (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation +} +var file_google_protobuf_descriptor_proto_depIdxs = []int32{ + 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 43, // [43:43] is the sub-list for method output_type + 43, // [43:43] is the sub-list for method input_type + 43, // [43:43] is the sub-list for extension type_name + 43, // [43:43] is the sub-list for extension extendee + 0, // [0:43] is the sub-list for field type_name +} + +func init() { file_google_protobuf_descriptor_proto_init() } +func file_google_protobuf_descriptor_proto_init() { + if File_google_protobuf_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorSet); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionRangeOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodDescriptorProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OneofOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumValueOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServiceOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ExtensionRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DescriptorProto_ReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UninterpretedOption_NamePart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneratedCodeInfo_Annotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + NumEnums: 6, + NumMessages: 27, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_descriptor_proto_goTypes, + DependencyIndexes: file_google_protobuf_descriptor_proto_depIdxs, + EnumInfos: file_google_protobuf_descriptor_proto_enumTypes, + MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, + }.Build() + File_google_protobuf_descriptor_proto = out.File + file_google_protobuf_descriptor_proto_rawDesc = nil + file_google_protobuf_descriptor_proto_goTypes = nil + file_google_protobuf_descriptor_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go new file mode 100644 index 000000000..82a473e26 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -0,0 +1,494 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +// Package anypb contains generated types for google/protobuf/any.proto. +// +// The Any message is a dynamic representation of any other message value. +// It is functionally a tuple of the full name of the remote message type and +// the serialized bytes of the remote message value. +// +// +// Constructing an Any +// +// An Any message containing another message value is constructed using New: +// +// any, err := anypb.New(m) +// if err != nil { +// ... // handle error +// } +// ... // make use of any +// +// +// Unmarshaling an Any +// +// With a populated Any message, the underlying message can be serialized into +// a remote concrete message value in a few ways. +// +// If the exact concrete type is known, then a new (or pre-existing) instance +// of that message can be passed to the UnmarshalTo method: +// +// m := new(foopb.MyMessage) +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// +// If the exact concrete type is not known, then the UnmarshalNew method can be +// used to unmarshal the contents into a new instance of the remote message type: +// +// m, err := any.UnmarshalNew() +// if err != nil { +// ... // handle error +// } +// ... // make use of m +// +// UnmarshalNew uses the global type registry to resolve the message type and +// construct a new instance of that message to unmarshal into. In order for a +// message type to appear in the global registry, the Go type representing that +// protobuf message type must be linked into the Go binary. For messages +// generated by protoc-gen-go, this is achieved through an import of the +// generated Go package representing a .proto file. +// +// A common pattern with UnmarshalNew is to use a type switch with the resulting +// proto.Message value: +// +// switch m := m.(type) { +// case *foopb.MyMessage: +// ... // make use of m as a *foopb.MyMessage +// case *barpb.OtherMessage: +// ... // make use of m as a *barpb.OtherMessage +// case *bazpb.SomeMessage: +// ... // make use of m as a *bazpb.SomeMessage +// } +// +// This pattern ensures that the generated packages containing the message types +// listed in the case clauses are linked into the Go binary and therefore also +// registered in the global registry. +// +// +// Type checking an Any +// +// In order to type check whether an Any message represents some other message, +// then use the MessageIs method: +// +// if any.MessageIs((*foopb.MyMessage)(nil)) { +// ... // make use of any, knowing that it contains a foopb.MyMessage +// } +// +// The MessageIs method can also be used with an allocated instance of the target +// message type if the intention is to unmarshal into it if the type matches: +// +// m := new(foopb.MyMessage) +// if any.MessageIs(m) { +// if err := any.UnmarshalTo(m); err != nil { +// ... // handle error +// } +// ... // make use of m +// } +// +package anypb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoregistry "google.golang.org/protobuf/reflect/protoregistry" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + strings "strings" + sync "sync" +) + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +// New marshals src into a new Any instance. +func New(src proto.Message) (*Any, error) { + dst := new(Any) + if err := dst.MarshalFrom(src); err != nil { + return nil, err + } + return dst, nil +} + +// MarshalFrom marshals src into dst as the underlying message +// using the provided marshal options. +// +// If no options are specified, call dst.MarshalFrom instead. +func MarshalFrom(dst *Any, src proto.Message, opts proto.MarshalOptions) error { + const urlPrefix = "type.googleapis.com/" + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + b, err := opts.Marshal(src) + if err != nil { + return err + } + dst.TypeUrl = urlPrefix + string(src.ProtoReflect().Descriptor().FullName()) + dst.Value = b + return nil +} + +// UnmarshalTo unmarshals the underlying message from src into dst +// using the provided unmarshal options. +// It reports an error if dst is not of the right message type. +// +// If no options are specified, call src.UnmarshalTo instead. +func UnmarshalTo(src *Any, dst proto.Message, opts proto.UnmarshalOptions) error { + if src == nil { + return protoimpl.X.NewError("invalid nil source message") + } + if !src.MessageIs(dst) { + got := dst.ProtoReflect().Descriptor().FullName() + want := src.MessageName() + return protoimpl.X.NewError("mismatched message type: got %q, want %q", got, want) + } + return opts.Unmarshal(src.GetValue(), dst) +} + +// UnmarshalNew unmarshals the underlying message from src into dst, +// which is newly created message using a type resolved from the type URL. +// The message type is resolved according to opt.Resolver, +// which should implement protoregistry.MessageTypeResolver. +// It reports an error if the underlying message type could not be resolved. +// +// If no options are specified, call src.UnmarshalNew instead. +func UnmarshalNew(src *Any, opts proto.UnmarshalOptions) (dst proto.Message, err error) { + if src.GetTypeUrl() == "" { + return nil, protoimpl.X.NewError("invalid empty type URL") + } + if opts.Resolver == nil { + opts.Resolver = protoregistry.GlobalTypes + } + r, ok := opts.Resolver.(protoregistry.MessageTypeResolver) + if !ok { + return nil, protoregistry.NotFound + } + mt, err := r.FindMessageByURL(src.GetTypeUrl()) + if err != nil { + if err == protoregistry.NotFound { + return nil, err + } + return nil, protoimpl.X.NewError("could not resolve %q: %v", src.GetTypeUrl(), err) + } + dst = mt.New().Interface() + return dst, opts.Unmarshal(src.GetValue(), dst) +} + +// MessageIs reports whether the underlying message is of the same type as m. +func (x *Any) MessageIs(m proto.Message) bool { + if m == nil { + return false + } + url := x.GetTypeUrl() + name := string(m.ProtoReflect().Descriptor().FullName()) + if !strings.HasSuffix(url, name) { + return false + } + return len(url) == len(name) || url[len(url)-len(name)-1] == '/' +} + +// MessageName reports the full name of the underlying message, +// returning an empty string if invalid. +func (x *Any) MessageName() protoreflect.FullName { + url := x.GetTypeUrl() + name := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "" + } + return name +} + +// MarshalFrom marshals m into x as the underlying message. +func (x *Any) MarshalFrom(m proto.Message) error { + return MarshalFrom(x, m, proto.MarshalOptions{}) +} + +// UnmarshalTo unmarshals the contents of the underlying message of x into m. +// It resets m before performing the unmarshal operation. +// It reports an error if m is not of the right message type. +func (x *Any) UnmarshalTo(m proto.Message) error { + return UnmarshalTo(x, m, proto.UnmarshalOptions{}) +} + +// UnmarshalNew unmarshals the contents of the underlying message of x into +// a newly allocated message of the specified type. +// It reports an error if the underlying message type could not be resolved. +func (x *Any) UnmarshalNew() (proto.Message, error) { + return UnmarshalNew(x, proto.UnmarshalOptions{}) +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_any_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} +} + +func (x *Any) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Any) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_any_proto protoreflect.FileDescriptor + +var file_google_protobuf_any_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, + 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_any_proto_rawDescOnce sync.Once + file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc +) + +func file_google_protobuf_any_proto_rawDescGZIP() []byte { + file_google_protobuf_any_proto_rawDescOnce.Do(func() { + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + }) + return file_google_protobuf_any_proto_rawDescData +} + +var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_any_proto_goTypes = []interface{}{ + (*Any)(nil), // 0: google.protobuf.Any +} +var file_google_protobuf_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_any_proto_init() } +func file_google_protobuf_any_proto_init() { + if File_google_protobuf_any_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_any_proto_goTypes, + DependencyIndexes: file_google_protobuf_any_proto_depIdxs, + MessageInfos: file_google_protobuf_any_proto_msgTypes, + }.Build() + File_google_protobuf_any_proto = out.File + file_google_protobuf_any_proto_rawDesc = nil + file_google_protobuf_any_proto_goTypes = nil + file_google_protobuf_any_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 000000000..f7a110994 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,379 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +// Package durationpb contains generated types for google/protobuf/duration.proto. +// +// The Duration message represents a signed span of time. +// +// +// Conversion to a Go Duration +// +// The AsDuration method can be used to convert a Duration message to a +// standard Go time.Duration value: +// +// d := dur.AsDuration() +// ... // make use of d as a time.Duration +// +// Converting to a time.Duration is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsDuration method performs the conversion on a best-effort basis. +// Durations with denormal values (e.g., nanoseconds beyond -99999999 and +// +99999999, inclusive; or seconds and nanoseconds with opposite signs) +// are normalized during the conversion to a time.Duration. To manually check for +// invalid Duration per the documented limitations in duration.proto, +// additionally call the CheckValid method: +// +// if err := dur.CheckValid(); err != nil { +// ... // handle error +// } +// +// Note that the documented limitations in duration.proto does not protect a +// Duration from overflowing the representable range of a time.Duration in Go. +// The AsDuration method uses saturation arithmetic such that an overflow clamps +// the resulting value to the closest representable value (e.g., math.MaxInt64 +// for positive overflow and math.MinInt64 for negative overflow). +// +// +// Conversion from a Go Duration +// +// The durationpb.New function can be used to construct a Duration message +// from a standard Go time.Duration value: +// +// dur := durationpb.New(d) +// ... // make use of d as a *durationpb.Duration +// +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + time "time" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// New constructs a new Duration from the provided time.Duration. +func New(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{Seconds: int64(secs), Nanos: int32(nanos)} +} + +// AsDuration converts x to a time.Duration, +// returning the closest duration value in the event of overflow. +func (x *Duration) AsDuration() time.Duration { + secs := x.GetSeconds() + nanos := x.GetNanos() + d := time.Duration(secs) * time.Second + overflow := d/time.Second != time.Duration(secs) + d += time.Duration(nanos) * time.Nanosecond + overflow = overflow || (secs < 0 && nanos < 0 && d > 0) + overflow = overflow || (secs > 0 && nanos > 0 && d < 0) + if overflow { + switch { + case secs < 0: + return time.Duration(math.MinInt64) + case secs > 0: + return time.Duration(math.MaxInt64) + } + } + return d +} + +// IsValid reports whether the duration is valid. +// It is equivalent to CheckValid == nil. +func (x *Duration) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the duration is invalid. +// In particular, it checks whether the value is within the range of +// -10000 years to +10000 years inclusive. +// An error is reported for a nil Duration. +func (x *Duration) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Duration") + case invalidUnderflow: + return protoimpl.X.NewError("duration (%v) exceeds -10000 years", x) + case invalidOverflow: + return protoimpl.X.NewError("duration (%v) exceeds +10000 years", x) + case invalidNanosRange: + return protoimpl.X.NewError("duration (%v) has out-of-range nanos", x) + case invalidNanosSign: + return protoimpl.X.NewError("duration (%v) has seconds and nanos with different signs", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanosRange + invalidNanosSign +) + +func (x *Duration) check() uint { + const absDuration = 315576000000 // 10000yr * 365.25day/yr * 24hr/day * 60min/hr * 60sec/min + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < -absDuration: + return invalidUnderflow + case secs > +absDuration: + return invalidOverflow + case nanos <= -1e9 || nanos >= +1e9: + return invalidNanosRange + case (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0): + return invalidNanosSign + default: + return 0 + } +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, + 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, + 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []interface{}{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go new file mode 100644 index 000000000..32a583df5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -0,0 +1,168 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package emptypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Empty) ProtoMessage() {} + +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_empty_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_google_protobuf_empty_proto_rawDescGZIP(), []int{0} +} + +var File_google_protobuf_empty_proto protoreflect.FileDescriptor + +var file_google_protobuf_empty_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, + 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_empty_proto_rawDescOnce sync.Once + file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc +) + +func file_google_protobuf_empty_proto_rawDescGZIP() []byte { + file_google_protobuf_empty_proto_rawDescOnce.Do(func() { + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + }) + return file_google_protobuf_empty_proto_rawDescData +} + +var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_empty_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: google.protobuf.Empty +} +var file_google_protobuf_empty_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_empty_proto_init() } +func file_google_protobuf_empty_proto_init() { + if File_google_protobuf_empty_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_empty_proto_goTypes, + DependencyIndexes: file_google_protobuf_empty_proto_depIdxs, + MessageInfos: file_google_protobuf_empty_proto_msgTypes, + }.Build() + File_google_protobuf_empty_proto = out.File + file_google_protobuf_empty_proto_rawDesc = nil + file_google_protobuf_empty_proto_goTypes = nil + file_google_protobuf_empty_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 000000000..c25e4bd7d --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,381 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +// Package timestamppb contains generated types for google/protobuf/timestamp.proto. +// +// The Timestamp message represents a timestamp, +// an instant in time since the Unix epoch (January 1st, 1970). +// +// +// Conversion to a Go Time +// +// The AsTime method can be used to convert a Timestamp message to a +// standard Go time.Time value in UTC: +// +// t := ts.AsTime() +// ... // make use of t as a time.Time +// +// Converting to a time.Time is a common operation so that the extensive +// set of time-based operations provided by the time package can be leveraged. +// See https://golang.org/pkg/time for more information. +// +// The AsTime method performs the conversion on a best-effort basis. Timestamps +// with denormal values (e.g., nanoseconds beyond 0 and 99999999, inclusive) +// are normalized during the conversion to a time.Time. To manually check for +// invalid Timestamps per the documented limitations in timestamp.proto, +// additionally call the CheckValid method: +// +// if err := ts.CheckValid(); err != nil { +// ... // handle error +// } +// +// +// Conversion from a Go Time +// +// The timestamppb.New function can be used to construct a Timestamp message +// from a standard Go time.Time value: +// +// ts := timestamppb.New(t) +// ... // make use of ts as a *timestamppb.Timestamp +// +// In order to construct a Timestamp representing the current time, use Now: +// +// ts := timestamppb.Now() +// ... // make use of ts as a *timestamppb.Timestamp +// +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + time "time" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +// Now constructs a new Timestamp from the current time. +func Now() *Timestamp { + return New(time.Now()) +} + +// New constructs a new Timestamp from the provided time.Time. +func New(t time.Time) *Timestamp { + return &Timestamp{Seconds: int64(t.Unix()), Nanos: int32(t.Nanosecond())} +} + +// AsTime converts x to a time.Time. +func (x *Timestamp) AsTime() time.Time { + return time.Unix(int64(x.GetSeconds()), int64(x.GetNanos())).UTC() +} + +// IsValid reports whether the timestamp is valid. +// It is equivalent to CheckValid == nil. +func (x *Timestamp) IsValid() bool { + return x.check() == 0 +} + +// CheckValid returns an error if the timestamp is invalid. +// In particular, it checks whether the value represents a date that is +// in the range of 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive. +// An error is reported for a nil Timestamp. +func (x *Timestamp) CheckValid() error { + switch x.check() { + case invalidNil: + return protoimpl.X.NewError("invalid nil Timestamp") + case invalidUnderflow: + return protoimpl.X.NewError("timestamp (%v) before 0001-01-01", x) + case invalidOverflow: + return protoimpl.X.NewError("timestamp (%v) after 9999-12-31", x) + case invalidNanos: + return protoimpl.X.NewError("timestamp (%v) has out-of-range nanos", x) + default: + return nil + } +} + +const ( + _ = iota + invalidNil + invalidUnderflow + invalidOverflow + invalidNanos +) + +func (x *Timestamp) check() uint { + const minTimestamp = -62135596800 // Seconds between 1970-01-01T00:00:00Z and 0001-01-01T00:00:00Z, inclusive + const maxTimestamp = +253402300799 // Seconds between 1970-01-01T00:00:00Z and 9999-12-31T23:59:59Z, inclusive + secs := x.GetSeconds() + nanos := x.GetNanos() + switch { + case x == nil: + return invalidNil + case secs < minTimestamp: + return invalidUnderflow + case secs > maxTimestamp: + return invalidOverflow + case nanos < 0 || nanos >= 1e9: + return invalidNanos + default: + return 0 + } +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_rawDesc = nil + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go new file mode 100644 index 000000000..43ef7cb14 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/pluginpb/plugin.pb.go @@ -0,0 +1,636 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +package pluginpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" + sync "sync" +) + +// Sync with code_generator.h. +type CodeGeneratorResponse_Feature int32 + +const ( + CodeGeneratorResponse_FEATURE_NONE CodeGeneratorResponse_Feature = 0 + CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL CodeGeneratorResponse_Feature = 1 +) + +// Enum value maps for CodeGeneratorResponse_Feature. +var ( + CodeGeneratorResponse_Feature_name = map[int32]string{ + 0: "FEATURE_NONE", + 1: "FEATURE_PROTO3_OPTIONAL", + } + CodeGeneratorResponse_Feature_value = map[string]int32{ + "FEATURE_NONE": 0, + "FEATURE_PROTO3_OPTIONAL": 1, + } +) + +func (x CodeGeneratorResponse_Feature) Enum() *CodeGeneratorResponse_Feature { + p := new(CodeGeneratorResponse_Feature) + *p = x + return p +} + +func (x CodeGeneratorResponse_Feature) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CodeGeneratorResponse_Feature) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_compiler_plugin_proto_enumTypes[0].Descriptor() +} + +func (CodeGeneratorResponse_Feature) Type() protoreflect.EnumType { + return &file_google_protobuf_compiler_plugin_proto_enumTypes[0] +} + +func (x CodeGeneratorResponse_Feature) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *CodeGeneratorResponse_Feature) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = CodeGeneratorResponse_Feature(num) + return nil +} + +// Deprecated: Use CodeGeneratorResponse_Feature.Descriptor instead. +func (CodeGeneratorResponse_Feature) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +// The version number of protocol compiler. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil && x.Major != nil { + return *x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil && x.Minor != nil { + return *x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil && x.Patch != nil { + return *x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil && x.Suffix != nil { + return *x.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*descriptorpb.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (x *CodeGeneratorRequest) Reset() { + *x = CodeGeneratorRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (x *CodeGeneratorRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorRequest.ProtoReflect.Descriptor instead. +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{1} +} + +func (x *CodeGeneratorRequest) GetFileToGenerate() []string { + if x != nil { + return x.FileToGenerate + } + return nil +} + +func (x *CodeGeneratorRequest) GetParameter() string { + if x != nil && x.Parameter != nil { + return *x.Parameter + } + return "" +} + +func (x *CodeGeneratorRequest) GetProtoFile() []*descriptorpb.FileDescriptorProto { + if x != nil { + return x.ProtoFile + } + return nil +} + +func (x *CodeGeneratorRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // A bitmask of supported features that the code generator supports. + // This is a bitwise "or" of values from the Feature enum. + SupportedFeatures *uint64 `protobuf:"varint,2,opt,name=supported_features,json=supportedFeatures" json:"supported_features,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` +} + +func (x *CodeGeneratorResponse) Reset() { + *x = CodeGeneratorResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (x *CodeGeneratorResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2} +} + +func (x *CodeGeneratorResponse) GetError() string { + if x != nil && x.Error != nil { + return *x.Error + } + return "" +} + +func (x *CodeGeneratorResponse) GetSupportedFeatures() uint64 { + if x != nil && x.SupportedFeatures != nil { + return *x.SupportedFeatures + } + return 0 +} + +func (x *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if x != nil { + return x.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` +} + +func (x *CodeGeneratorResponse_File) Reset() { + *x = CodeGeneratorResponse_File{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CodeGeneratorResponse_File) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (x *CodeGeneratorResponse_File) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_compiler_plugin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CodeGeneratorResponse_File.ProtoReflect.Descriptor instead. +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { + return file_google_protobuf_compiler_plugin_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *CodeGeneratorResponse_File) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetInsertionPoint() string { + if x != nil && x.InsertionPoint != nil { + return *x.InsertionPoint + } + return "" +} + +func (x *CodeGeneratorResponse_File) GetContent() string { + if x != nil && x.Content != nil { + return *x.Content + } + return "" +} + +var File_google_protobuf_compiler_plugin_proto protoreflect.FileDescriptor + +var file_google_protobuf_compiler_plugin_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, + 0x72, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x64, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x28, 0x0a, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x66, 0x69, 0x6c, + 0x65, 0x54, 0x6f, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x4c, + 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x63, 0x6f, 0x6d, + 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xbf, 0x02, 0x0a, + 0x15, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2d, 0x0a, 0x12, + 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x04, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, + 0x69, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, + 0x04, 0x66, 0x69, 0x6c, 0x65, 0x1a, 0x5d, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x73, 0x65, + 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x22, 0x38, 0x0a, 0x07, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, + 0x10, 0x0a, 0x0c, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, + 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x46, 0x45, 0x41, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, + 0x54, 0x4f, 0x33, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x42, 0x67, + 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x42, 0x0c, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x5a, 0x39, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, + 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x3b, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x67, 0x6f, +} + +var ( + file_google_protobuf_compiler_plugin_proto_rawDescOnce sync.Once + file_google_protobuf_compiler_plugin_proto_rawDescData = file_google_protobuf_compiler_plugin_proto_rawDesc +) + +func file_google_protobuf_compiler_plugin_proto_rawDescGZIP() []byte { + file_google_protobuf_compiler_plugin_proto_rawDescOnce.Do(func() { + file_google_protobuf_compiler_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_compiler_plugin_proto_rawDescData) + }) + return file_google_protobuf_compiler_plugin_proto_rawDescData +} + +var file_google_protobuf_compiler_plugin_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_compiler_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_compiler_plugin_proto_goTypes = []interface{}{ + (CodeGeneratorResponse_Feature)(0), // 0: google.protobuf.compiler.CodeGeneratorResponse.Feature + (*Version)(nil), // 1: google.protobuf.compiler.Version + (*CodeGeneratorRequest)(nil), // 2: google.protobuf.compiler.CodeGeneratorRequest + (*CodeGeneratorResponse)(nil), // 3: google.protobuf.compiler.CodeGeneratorResponse + (*CodeGeneratorResponse_File)(nil), // 4: google.protobuf.compiler.CodeGeneratorResponse.File + (*descriptorpb.FileDescriptorProto)(nil), // 5: google.protobuf.FileDescriptorProto +} +var file_google_protobuf_compiler_plugin_proto_depIdxs = []int32{ + 5, // 0: google.protobuf.compiler.CodeGeneratorRequest.proto_file:type_name -> google.protobuf.FileDescriptorProto + 1, // 1: google.protobuf.compiler.CodeGeneratorRequest.compiler_version:type_name -> google.protobuf.compiler.Version + 4, // 2: google.protobuf.compiler.CodeGeneratorResponse.file:type_name -> google.protobuf.compiler.CodeGeneratorResponse.File + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_google_protobuf_compiler_plugin_proto_init() } +func file_google_protobuf_compiler_plugin_proto_init() { + if File_google_protobuf_compiler_plugin_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_compiler_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_compiler_plugin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CodeGeneratorResponse_File); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_compiler_plugin_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_compiler_plugin_proto_goTypes, + DependencyIndexes: file_google_protobuf_compiler_plugin_proto_depIdxs, + EnumInfos: file_google_protobuf_compiler_plugin_proto_enumTypes, + MessageInfos: file_google_protobuf_compiler_plugin_proto_msgTypes, + }.Build() + File_google_protobuf_compiler_plugin_proto = out.File + file_google_protobuf_compiler_plugin_proto_rawDesc = nil + file_google_protobuf_compiler_plugin_proto_goTypes = nil + file_google_protobuf_compiler_plugin_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index b0c436c4a..7c1f5fac3 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -95,7 +95,7 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { // [Go] This function was changed to guarantee the requested length size at EOF. // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests + // for that to be the case, and there are tests // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index e29c364b3..4120e0c91 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -180,7 +180,7 @@ func resolve(tag string, in string) (rtag string, out interface{}) { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) if err == nil { if true || intv == int64(int(intv)) { return yaml_INT_TAG, int(intv) diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go index 2edd73405..4c45e660a 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -52,7 +52,7 @@ func (l keyList) Less(i, j int) bool { var ai, bi int var an, bn int64 if ar[i] == '0' || br[i] == '0' { - for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { if ar[j] != '0' { an = 1 bn = 1 diff --git a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY index 7c241b71a..623d85e85 100644 --- a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY +++ b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY @@ -75,7 +75,7 @@ resulting binaries. These projects are: limitations under the License. -* github.com/kisielk/gotool – https://github.com/kisielk/gotool +* github.com/kisielk/gotool - https://github.com/kisielk/gotool Copyright (c) 2013 Kamil Kisiel @@ -224,3 +224,61 @@ resulting binaries. These projects are: (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* gogrep - https://github.com/mvdan/gogrep + + Copyright (c) 2017, Daniel Martí. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +* gosmith - https://github.com/dvyukov/gosmith + + Copyright (c) 2014 Dmitry Vyukov. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * The name of Dmitry Vyukov may be used to endorse or promote + products derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/honnef.co/go/tools/code/code.go b/vendor/honnef.co/go/tools/code/code.go new file mode 100644 index 000000000..6f4df8b9a --- /dev/null +++ b/vendor/honnef.co/go/tools/code/code.go @@ -0,0 +1,481 @@ +// Package code answers structural and type questions about Go code. +package code + +import ( + "flag" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/analysis/passes/inspect" + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/ast/inspector" + "honnef.co/go/tools/facts" + "honnef.co/go/tools/go/types/typeutil" + "honnef.co/go/tools/ir" + "honnef.co/go/tools/lint" +) + +type Positioner interface { + Pos() token.Pos +} + +func CallName(call *ir.CallCommon) string { + if call.IsInvoke() { + return "" + } + switch v := call.Value.(type) { + case *ir.Function: + fn, ok := v.Object().(*types.Func) + if !ok { + return "" + } + return lint.FuncName(fn) + case *ir.Builtin: + return v.Name() + } + return "" +} + +func IsCallTo(call *ir.CallCommon, name string) bool { return CallName(call) == name } + +func IsCallToAny(call *ir.CallCommon, names ...string) bool { + q := CallName(call) + for _, name := range names { + if q == name { + return true + } + } + return false +} + +func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name } + +func FilterDebug(instr []ir.Instruction) []ir.Instruction { + var out []ir.Instruction + for _, ins := range instr { + if _, ok := ins.(*ir.DebugRef); !ok { + out = append(out, ins) + } + } + return out +} + +func IsExample(fn *ir.Function) bool { + if !strings.HasPrefix(fn.Name(), "Example") { + return false + } + f := fn.Prog.Fset.File(fn.Pos()) + if f == nil { + return false + } + return strings.HasSuffix(f.Name(), "_test.go") +} + +func IsPointerLike(T types.Type) bool { + switch T := T.Underlying().(type) { + case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer: + return true + case *types.Basic: + return T.Kind() == types.UnsafePointer + } + return false +} + +func IsIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func IsBlank(id ast.Expr) bool { + ident, _ := id.(*ast.Ident) + return ident != nil && ident.Name == "_" +} + +func IsIntLiteral(expr ast.Expr, literal string) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == literal +} + +// Deprecated: use IsIntLiteral instead +func IsZero(expr ast.Expr) bool { + return IsIntLiteral(expr, "0") +} + +func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool { + return IsType(pass.TypesInfo.TypeOf(expr), name) +} + +func IsInTest(pass *analysis.Pass, node Positioner) bool { + // FIXME(dh): this doesn't work for global variables with + // initializers + f := pass.Fset.File(node.Pos()) + return f != nil && strings.HasSuffix(f.Name(), "_test.go") +} + +// IsMain reports whether the package being processed is a package +// main. +func IsMain(pass *analysis.Pass) bool { + return pass.Pkg.Name() == "main" +} + +// IsMainLike reports whether the package being processed is a +// main-like package. A main-like package is a package that is +// package main, or that is intended to be used by a tool framework +// such as cobra to implement a command. +// +// Note that this function errs on the side of false positives; it may +// return true for packages that aren't main-like. IsMainLike is +// intended for analyses that wish to suppress diagnostics for +// main-like packages to avoid false positives. +func IsMainLike(pass *analysis.Pass) bool { + if pass.Pkg.Name() == "main" { + return true + } + for _, imp := range pass.Pkg.Imports() { + if imp.Path() == "github.com/spf13/cobra" { + return true + } + } + return false +} + +func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string { + info := pass.TypesInfo + sel := info.Selections[expr] + if sel == nil { + if x, ok := expr.X.(*ast.Ident); ok { + pkg, ok := info.ObjectOf(x).(*types.PkgName) + if !ok { + // This shouldn't happen + return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) + } + return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) + } + panic(fmt.Sprintf("unsupported selector: %v", expr)) + } + return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) +} + +func IsNil(pass *analysis.Pass, expr ast.Expr) bool { + return pass.TypesInfo.Types[expr].IsNil() +} + +func BoolConst(pass *analysis.Pass, expr ast.Expr) bool { + val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() + return constant.BoolVal(val) +} + +func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool { + // We explicitly don't support typed bools because more often than + // not, custom bool types are used as binary enums and the + // explicit comparison is desired. + + ident, ok := expr.(*ast.Ident) + if !ok { + return false + } + obj := pass.TypesInfo.ObjectOf(ident) + c, ok := obj.(*types.Const) + if !ok { + return false + } + basic, ok := c.Type().(*types.Basic) + if !ok { + return false + } + if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { + return false + } + return true +} + +func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) { + tv := pass.TypesInfo.Types[expr] + if tv.Value == nil { + return 0, false + } + if tv.Value.Kind() != constant.Int { + return 0, false + } + return constant.Int64Val(tv.Value) +} + +func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) { + val := pass.TypesInfo.Types[expr].Value + if val == nil { + return "", false + } + if val.Kind() != constant.String { + return "", false + } + return constant.StringVal(val), true +} + +// Dereference returns a pointer's element type; otherwise it returns +// T. +func Dereference(T types.Type) types.Type { + if p, ok := T.Underlying().(*types.Pointer); ok { + return p.Elem() + } + return T +} + +// DereferenceR returns a pointer's element type; otherwise it returns +// T. If the element type is itself a pointer, DereferenceR will be +// applied recursively. +func DereferenceR(T types.Type) types.Type { + if p, ok := T.Underlying().(*types.Pointer); ok { + return DereferenceR(p.Elem()) + } + return T +} + +func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string { + switch fun := astutil.Unparen(call.Fun).(type) { + case *ast.SelectorExpr: + fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func) + if !ok { + return "" + } + return lint.FuncName(fn) + case *ast.Ident: + obj := pass.TypesInfo.ObjectOf(fun) + switch obj := obj.(type) { + case *types.Func: + return lint.FuncName(obj) + case *types.Builtin: + return obj.Name() + default: + return "" + } + default: + return "" + } +} + +func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return false + } + return CallNameAST(pass, call) == name +} + +func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool { + call, ok := node.(*ast.CallExpr) + if !ok { + return false + } + q := CallNameAST(pass, call) + for _, name := range names { + if q == name { + return true + } + } + return false +} + +func Preamble(f *ast.File) string { + cutoff := f.Package + if f.Doc != nil { + cutoff = f.Doc.Pos() + } + var out []string + for _, cmt := range f.Comments { + if cmt.Pos() >= cutoff { + break + } + out = append(out, cmt.Text()) + } + return strings.Join(out, "\n") +} + +func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec { + if len(specs) == 0 { + return nil + } + groups := make([][]ast.Spec, 1) + groups[0] = append(groups[0], specs[0]) + + for _, spec := range specs[1:] { + g := groups[len(groups)-1] + if fset.PositionFor(spec.Pos(), false).Line-1 != + fset.PositionFor(g[len(g)-1].End(), false).Line { + + groups = append(groups, nil) + } + + groups[len(groups)-1] = append(groups[len(groups)-1], spec) + } + + return groups +} + +func IsObject(obj types.Object, name string) bool { + var path string + if pkg := obj.Pkg(); pkg != nil { + path = pkg.Path() + "." + } + return path+obj.Name() == name +} + +type Field struct { + Var *types.Var + Tag string + Path []int +} + +// FlattenFields recursively flattens T and embedded structs, +// returning a list of fields. If multiple fields with the same name +// exist, all will be returned. +func FlattenFields(T *types.Struct) []Field { + return flattenFields(T, nil, nil) +} + +func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { + if seen == nil { + seen = map[types.Type]bool{} + } + if seen[T] { + return nil + } + seen[T] = true + var out []Field + for i := 0; i < T.NumFields(); i++ { + field := T.Field(i) + tag := T.Tag(i) + np := append(path[:len(path):len(path)], i) + if field.Anonymous() { + if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { + out = append(out, flattenFields(s, np, seen)...) + } + } else { + out = append(out, Field{field, tag, np}) + } + } + return out +} + +func File(pass *analysis.Pass, node Positioner) *ast.File { + m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File) + return m[pass.Fset.File(node.Pos())] +} + +// IsGenerated reports whether pos is in a generated file, It ignores +// //line directives. +func IsGenerated(pass *analysis.Pass, pos token.Pos) bool { + _, ok := Generator(pass, pos) + return ok +} + +// Generator returns the generator that generated the file containing +// pos. It ignores //line directives. +func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) { + file := pass.Fset.PositionFor(pos, false).Filename + m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) + g, ok := m[file] + return g, ok +} + +// MayHaveSideEffects reports whether expr may have side effects. If +// the purity argument is nil, this function implements a purely +// syntactic check, meaning that any function call may have side +// effects, regardless of the called function's body. Otherwise, +// purity will be consulted to determine the purity of function calls. +func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity facts.PurityResult) bool { + switch expr := expr.(type) { + case *ast.BadExpr: + return true + case *ast.Ellipsis: + return MayHaveSideEffects(pass, expr.Elt, purity) + case *ast.FuncLit: + // the literal itself cannot have side ffects, only calling it + // might, which is handled by CallExpr. + return false + case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType: + // types cannot have side effects + return false + case *ast.BasicLit: + return false + case *ast.BinaryExpr: + return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Y, purity) + case *ast.CallExpr: + if purity == nil { + return true + } + switch obj := typeutil.Callee(pass.TypesInfo, expr).(type) { + case *types.Func: + if _, ok := purity[obj]; !ok { + return true + } + case *types.Builtin: + switch obj.Name() { + case "len", "cap": + default: + return true + } + default: + return true + } + for _, arg := range expr.Args { + if MayHaveSideEffects(pass, arg, purity) { + return true + } + } + return false + case *ast.CompositeLit: + if MayHaveSideEffects(pass, expr.Type, purity) { + return true + } + for _, elt := range expr.Elts { + if MayHaveSideEffects(pass, elt, purity) { + return true + } + } + return false + case *ast.Ident: + return false + case *ast.IndexExpr: + return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity) + case *ast.KeyValueExpr: + return MayHaveSideEffects(pass, expr.Key, purity) || MayHaveSideEffects(pass, expr.Value, purity) + case *ast.SelectorExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case *ast.SliceExpr: + return MayHaveSideEffects(pass, expr.X, purity) || + MayHaveSideEffects(pass, expr.Low, purity) || + MayHaveSideEffects(pass, expr.High, purity) || + MayHaveSideEffects(pass, expr.Max, purity) + case *ast.StarExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case *ast.TypeAssertExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case *ast.UnaryExpr: + if MayHaveSideEffects(pass, expr.X, purity) { + return true + } + return expr.Op == token.ARROW + case *ast.ParenExpr: + return MayHaveSideEffects(pass, expr.X, purity) + case nil: + return false + default: + panic(fmt.Sprintf("internal error: unhandled type %T", expr)) + } +} + +func IsGoVersion(pass *analysis.Pass, minor int) bool { + version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int) + return version >= minor +} + +func Preorder(pass *analysis.Pass, fn func(ast.Node), types ...ast.Node) { + pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder(types, fn) +} diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go index c22093a6d..55115371b 100644 --- a/vendor/honnef.co/go/tools/config/config.go +++ b/vendor/honnef.co/go/tools/config/config.go @@ -3,6 +3,8 @@ package config import ( "bytes" "fmt" + "go/ast" + "go/token" "os" "path/filepath" "reflect" @@ -12,38 +14,57 @@ import ( "golang.org/x/tools/go/analysis" ) +// Dir looks at a list of absolute file names, which should make up a +// single package, and returns the path of the directory that may +// contain a staticcheck.conf file. It returns the empty string if no +// such directory could be determined, for example because all files +// were located in Go's build cache. +func Dir(files []string) string { + if len(files) == 0 { + return "" + } + cache, err := os.UserCacheDir() + if err != nil { + cache = "" + } + var path string + for _, p := range files { + // FIXME(dh): using strings.HasPrefix isn't technically + // correct, but it should be good enough for now. + if cache != "" && strings.HasPrefix(p, cache) { + // File in the build cache of the standard Go build system + continue + } + path = p + break + } + + if path == "" { + // The package only consists of generated files. + return "" + } + + dir := filepath.Dir(path) + return dir +} + +func dirAST(files []*ast.File, fset *token.FileSet) string { + names := make([]string, len(files)) + for i, f := range files { + names[i] = fset.PositionFor(f.Pos(), true).Filename + } + return Dir(names) +} + var Analyzer = &analysis.Analyzer{ Name: "config", Doc: "loads configuration for the current package tree", Run: func(pass *analysis.Pass) (interface{}, error) { - if len(pass.Files) == 0 { - cfg := DefaultConfig - return &cfg, nil - } - cache, err := os.UserCacheDir() - if err != nil { - cache = "" - } - var path string - for _, f := range pass.Files { - p := pass.Fset.PositionFor(f.Pos(), true).Filename - // FIXME(dh): using strings.HasPrefix isn't technically - // correct, but it should be good enough for now. - if cache != "" && strings.HasPrefix(p, cache) { - // File in the build cache of the standard Go build system - continue - } - path = p - break - } - - if path == "" { - // The package only consists of generated files. + dir := dirAST(pass.Files, pass.Fset) + if dir == "" { cfg := DefaultConfig return &cfg, nil } - - dir := filepath.Dir(path) cfg, err := Load(dir) if err != nil { return nil, fmt.Errorf("error loading staticcheck.conf: %s", err) @@ -136,7 +157,7 @@ func (c Config) String() string { } var DefaultConfig = Config{ - Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"}, + Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"}, Initialisms: []string{ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", @@ -144,20 +165,20 @@ var DefaultConfig = Config{ "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", - "XSS", "SIP", "RTP", + "XSS", "SIP", "RTP", "AMQP", "DB", "TS", }, DotImportWhitelist: []string{}, HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"}, } -const configName = "staticcheck.conf" +const ConfigName = "staticcheck.conf" func parseConfigs(dir string) ([]Config, error) { var out []Config // TODO(dh): consider stopping at the GOPATH/module boundary for dir != "" { - f, err := os.Open(filepath.Join(dir, configName)) + f, err := os.Open(filepath.Join(dir, ConfigName)) if os.IsNotExist(err) { ndir := filepath.Dir(dir) if ndir == dir { diff --git a/vendor/honnef.co/go/tools/deprecated/stdlib.go b/vendor/honnef.co/go/tools/deprecated/stdlib.go index 5d8ce186b..cabb8500a 100644 --- a/vendor/honnef.co/go/tools/deprecated/stdlib.go +++ b/vendor/honnef.co/go/tools/deprecated/stdlib.go @@ -6,7 +6,6 @@ type Deprecation struct { } var Stdlib = map[string]Deprecation{ - "image/jpeg.Reader": {4, 0}, // FIXME(dh): AllowBinary isn't being detected as deprecated // because the comment has a newline right after "Deprecated:" "go/build.AllowBinary": {7, 7}, @@ -73,40 +72,48 @@ var Stdlib = map[string]Deprecation{ // This function has no alternative, but also no purpose. "(*crypto/rc4.Cipher).Reset": {12, 0}, "(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7}, + "image.ZP": {13, 0}, + "image.ZR": {13, 0}, + "(*debug/gosym.LineTable).LineToPC": {2, 2}, + "(*debug/gosym.LineTable).PCToLine": {2, 2}, + "crypto/tls.VersionSSL30": {13, 0}, + "(crypto/tls.Config).NameToCertificate": {14, 14}, + "(*crypto/tls.Config).BuildNameToCertificate": {14, 14}, + "image/jpeg.Reader": {4, 0}, // All of these have been deprecated in favour of external libraries - "syscall.AttachLsf": {7, 0}, - "syscall.DetachLsf": {7, 0}, - "syscall.LsfSocket": {7, 0}, - "syscall.SetLsfPromisc": {7, 0}, - "syscall.LsfJump": {7, 0}, - "syscall.LsfStmt": {7, 0}, - "syscall.BpfStmt": {7, 0}, - "syscall.BpfJump": {7, 0}, - "syscall.BpfBuflen": {7, 0}, - "syscall.SetBpfBuflen": {7, 0}, - "syscall.BpfDatalink": {7, 0}, - "syscall.SetBpfDatalink": {7, 0}, - "syscall.SetBpfPromisc": {7, 0}, - "syscall.FlushBpf": {7, 0}, - "syscall.BpfInterface": {7, 0}, - "syscall.SetBpfInterface": {7, 0}, - "syscall.BpfTimeout": {7, 0}, - "syscall.SetBpfTimeout": {7, 0}, - "syscall.BpfStats": {7, 0}, - "syscall.SetBpfImmediate": {7, 0}, - "syscall.SetBpf": {7, 0}, - "syscall.CheckBpfVersion": {7, 0}, - "syscall.BpfHeadercmpl": {7, 0}, - "syscall.SetBpfHeadercmpl": {7, 0}, - "syscall.RouteRIB": {8, 0}, - "syscall.RoutingMessage": {8, 0}, - "syscall.RouteMessage": {8, 0}, - "syscall.InterfaceMessage": {8, 0}, - "syscall.InterfaceAddrMessage": {8, 0}, - "syscall.ParseRoutingMessage": {8, 0}, - "syscall.ParseRoutingSockaddr": {8, 0}, - "InterfaceAnnounceMessage": {7, 0}, - "InterfaceMulticastAddrMessage": {7, 0}, - "syscall.FormatMessage": {5, 0}, + "syscall.AttachLsf": {7, 0}, + "syscall.DetachLsf": {7, 0}, + "syscall.LsfSocket": {7, 0}, + "syscall.SetLsfPromisc": {7, 0}, + "syscall.LsfJump": {7, 0}, + "syscall.LsfStmt": {7, 0}, + "syscall.BpfStmt": {7, 0}, + "syscall.BpfJump": {7, 0}, + "syscall.BpfBuflen": {7, 0}, + "syscall.SetBpfBuflen": {7, 0}, + "syscall.BpfDatalink": {7, 0}, + "syscall.SetBpfDatalink": {7, 0}, + "syscall.SetBpfPromisc": {7, 0}, + "syscall.FlushBpf": {7, 0}, + "syscall.BpfInterface": {7, 0}, + "syscall.SetBpfInterface": {7, 0}, + "syscall.BpfTimeout": {7, 0}, + "syscall.SetBpfTimeout": {7, 0}, + "syscall.BpfStats": {7, 0}, + "syscall.SetBpfImmediate": {7, 0}, + "syscall.SetBpf": {7, 0}, + "syscall.CheckBpfVersion": {7, 0}, + "syscall.BpfHeadercmpl": {7, 0}, + "syscall.SetBpfHeadercmpl": {7, 0}, + "syscall.RouteRIB": {8, 0}, + "syscall.RoutingMessage": {8, 0}, + "syscall.RouteMessage": {8, 0}, + "syscall.InterfaceMessage": {8, 0}, + "syscall.InterfaceAddrMessage": {8, 0}, + "syscall.ParseRoutingMessage": {8, 0}, + "syscall.ParseRoutingSockaddr": {8, 0}, + "syscall.InterfaceAnnounceMessage": {7, 0}, + "syscall.InterfaceMulticastAddrMessage": {7, 0}, + "syscall.FormatMessage": {5, 0}, } diff --git a/vendor/honnef.co/go/tools/edit/edit.go b/vendor/honnef.co/go/tools/edit/edit.go new file mode 100644 index 000000000..f4cfba234 --- /dev/null +++ b/vendor/honnef.co/go/tools/edit/edit.go @@ -0,0 +1,67 @@ +package edit + +import ( + "bytes" + "go/ast" + "go/format" + "go/token" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/pattern" +) + +type Ranger interface { + Pos() token.Pos + End() token.Pos +} + +type Range [2]token.Pos + +func (r Range) Pos() token.Pos { return r[0] } +func (r Range) End() token.Pos { return r[1] } + +func ReplaceWithString(fset *token.FileSet, old Ranger, new string) analysis.TextEdit { + return analysis.TextEdit{ + Pos: old.Pos(), + End: old.End(), + NewText: []byte(new), + } +} + +func ReplaceWithNode(fset *token.FileSet, old Ranger, new ast.Node) analysis.TextEdit { + buf := &bytes.Buffer{} + if err := format.Node(buf, fset, new); err != nil { + panic("internal error: " + err.Error()) + } + return analysis.TextEdit{ + Pos: old.Pos(), + End: old.End(), + NewText: buf.Bytes(), + } +} + +func ReplaceWithPattern(pass *analysis.Pass, after pattern.Pattern, state pattern.State, node Ranger) analysis.TextEdit { + r := pattern.NodeToAST(after.Root, state) + buf := &bytes.Buffer{} + format.Node(buf, pass.Fset, r) + return analysis.TextEdit{ + Pos: node.Pos(), + End: node.End(), + NewText: buf.Bytes(), + } +} + +func Delete(old Ranger) analysis.TextEdit { + return analysis.TextEdit{ + Pos: old.Pos(), + End: old.End(), + NewText: nil, + } +} + +func Fix(msg string, edits ...analysis.TextEdit) analysis.SuggestedFix { + return analysis.SuggestedFix{ + Message: msg, + TextEdits: edits, + } +} diff --git a/vendor/honnef.co/go/tools/facts/generated.go b/vendor/honnef.co/go/tools/facts/generated.go index 1ed9563a3..18cbb49bd 100644 --- a/vendor/honnef.co/go/tools/facts/generated.go +++ b/vendor/honnef.co/go/tools/facts/generated.go @@ -19,6 +19,7 @@ const ( Goyacc Cgo Stringer + ProtocGenGo ) var ( @@ -51,10 +52,16 @@ func isGenerated(path string) (Generator, bool) { return Goyacc, true case "by cmd/cgo;": return Cgo, true + case "by protoc-gen-go.": + return ProtocGenGo, true } if strings.HasPrefix(text, `by "stringer `) { return Stringer, true } + if strings.HasPrefix(text, `by goyacc `) { + return Goyacc, true + } + return Unknown, true } if bytes.Equal(s, oldCgo) { diff --git a/vendor/honnef.co/go/tools/facts/purity.go b/vendor/honnef.co/go/tools/facts/purity.go index 861ca4110..099ee23e3 100644 --- a/vendor/honnef.co/go/tools/facts/purity.go +++ b/vendor/honnef.co/go/tools/facts/purity.go @@ -1,14 +1,13 @@ package facts import ( - "go/token" "go/types" "reflect" "golang.org/x/tools/go/analysis" "honnef.co/go/tools/functions" - "honnef.co/go/tools/internal/passes/buildssa" - "honnef.co/go/tools/ssa" + "honnef.co/go/tools/internal/passes/buildir" + "honnef.co/go/tools/ir" ) type IsPure struct{} @@ -22,7 +21,7 @@ var Purity = &analysis.Analyzer{ Name: "fact_purity", Doc: "Mark pure functions", Run: purity, - Requires: []*analysis.Analyzer{buildssa.Analyzer}, + Requires: []*analysis.Analyzer{buildir.Analyzer}, FactTypes: []analysis.Fact{(*IsPure)(nil)}, ResultType: reflect.TypeOf(PurityResult{}), } @@ -56,65 +55,68 @@ var pureStdlib = map[string]struct{}{ } func purity(pass *analysis.Pass) (interface{}, error) { - seen := map[*ssa.Function]struct{}{} - ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg - var check func(ssafn *ssa.Function) (ret bool) - check = func(ssafn *ssa.Function) (ret bool) { - if ssafn.Object() == nil { + seen := map[*ir.Function]struct{}{} + irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg + var check func(fn *ir.Function) (ret bool) + check = func(fn *ir.Function) (ret bool) { + if fn.Object() == nil { // TODO(dh): support closures return false } - if pass.ImportObjectFact(ssafn.Object(), new(IsPure)) { + if pass.ImportObjectFact(fn.Object(), new(IsPure)) { return true } - if ssafn.Pkg != ssapkg { + if fn.Pkg != irpkg { // Function is in another package but wasn't marked as // pure, ergo it isn't pure return false } // Break recursion - if _, ok := seen[ssafn]; ok { + if _, ok := seen[fn]; ok { return false } - seen[ssafn] = struct{}{} + seen[fn] = struct{}{} defer func() { if ret { - pass.ExportObjectFact(ssafn.Object(), &IsPure{}) + pass.ExportObjectFact(fn.Object(), &IsPure{}) } }() - if functions.IsStub(ssafn) { + if functions.IsStub(fn) { return false } - if _, ok := pureStdlib[ssafn.Object().(*types.Func).FullName()]; ok { + if _, ok := pureStdlib[fn.Object().(*types.Func).FullName()]; ok { return true } - if ssafn.Signature.Results().Len() == 0 { + if fn.Signature.Results().Len() == 0 { // A function with no return values is empty or is doing some // work we cannot see (for example because of build tags); // don't consider it pure. return false } - for _, param := range ssafn.Params { + for _, param := range fn.Params { + // TODO(dh): this may not be strictly correct. pure code + // can, to an extent, operate on non-basic types. if _, ok := param.Type().Underlying().(*types.Basic); !ok { return false } } - if ssafn.Blocks == nil { + // Don't consider external functions pure. + if fn.Blocks == nil { return false } - checkCall := func(common *ssa.CallCommon) bool { + checkCall := func(common *ir.CallCommon) bool { if common.IsInvoke() { return false } - builtin, ok := common.Value.(*ssa.Builtin) + builtin, ok := common.Value.(*ir.Builtin) if !ok { - if common.StaticCallee() != ssafn { + if common.StaticCallee() != fn { if common.StaticCallee() == nil { return false } @@ -124,47 +126,47 @@ func purity(pass *analysis.Pass) (interface{}, error) { } } else { switch builtin.Name() { - case "len", "cap", "make", "new": + case "len", "cap": default: return false } } return true } - for _, b := range ssafn.Blocks { + for _, b := range fn.Blocks { for _, ins := range b.Instrs { switch ins := ins.(type) { - case *ssa.Call: + case *ir.Call: if !checkCall(ins.Common()) { return false } - case *ssa.Defer: + case *ir.Defer: if !checkCall(&ins.Call) { return false } - case *ssa.Select: + case *ir.Select: return false - case *ssa.Send: + case *ir.Send: return false - case *ssa.Go: + case *ir.Go: return false - case *ssa.Panic: + case *ir.Panic: return false - case *ssa.Store: + case *ir.Store: return false - case *ssa.FieldAddr: + case *ir.FieldAddr: + return false + case *ir.Alloc: + return false + case *ir.Load: return false - case *ssa.UnOp: - if ins.Op == token.MUL || ins.Op == token.AND { - return false - } } } } return true } - for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { - check(ssafn) + for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { + check(fn) } out := PurityResult{} diff --git a/vendor/honnef.co/go/tools/functions/loops.go b/vendor/honnef.co/go/tools/functions/loops.go index 15877a2f9..a8af70100 100644 --- a/vendor/honnef.co/go/tools/functions/loops.go +++ b/vendor/honnef.co/go/tools/functions/loops.go @@ -1,10 +1,10 @@ package functions -import "honnef.co/go/tools/ssa" +import "honnef.co/go/tools/ir" -type Loop struct{ ssa.BlockSet } +type Loop struct{ *ir.BlockSet } -func FindLoops(fn *ssa.Function) []Loop { +func FindLoops(fn *ir.Function) []Loop { if fn.Blocks == nil { return nil } @@ -18,12 +18,12 @@ func FindLoops(fn *ssa.Function) []Loop { // n is a back-edge to h // h is the loop header if n == h { - set := Loop{} + set := Loop{ir.NewBlockSet(len(fn.Blocks))} set.Add(n) sets = append(sets, set) continue } - set := Loop{} + set := Loop{ir.NewBlockSet(len(fn.Blocks))} set.Add(h) set.Add(n) for _, b := range allPredsBut(n, h, nil) { @@ -35,7 +35,7 @@ func FindLoops(fn *ssa.Function) []Loop { return sets } -func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock { +func allPredsBut(b, but *ir.BasicBlock, list []*ir.BasicBlock) []*ir.BasicBlock { outer: for _, pred := range b.Preds { if pred == but { diff --git a/vendor/honnef.co/go/tools/functions/pure.go b/vendor/honnef.co/go/tools/functions/pure.go deleted file mode 100644 index 8bc558771..000000000 --- a/vendor/honnef.co/go/tools/functions/pure.go +++ /dev/null @@ -1,46 +0,0 @@ -package functions - -import ( - "honnef.co/go/tools/ssa" -) - -func filterDebug(instr []ssa.Instruction) []ssa.Instruction { - var out []ssa.Instruction - for _, ins := range instr { - if _, ok := ins.(*ssa.DebugRef); !ok { - out = append(out, ins) - } - } - return out -} - -// IsStub reports whether a function is a stub. A function is -// considered a stub if it has no instructions or exactly one -// instruction, which must be either returning only constant values or -// a panic. -func IsStub(fn *ssa.Function) bool { - if len(fn.Blocks) == 0 { - return true - } - if len(fn.Blocks) > 1 { - return false - } - instrs := filterDebug(fn.Blocks[0].Instrs) - if len(instrs) != 1 { - return false - } - - switch instrs[0].(type) { - case *ssa.Return: - // Since this is the only instruction, the return value must - // be a constant. We consider all constants as stubs, not just - // the zero value. This does not, unfortunately, cover zero - // initialised structs, as these cause additional - // instructions. - return true - case *ssa.Panic: - return true - default: - return false - } -} diff --git a/vendor/honnef.co/go/tools/functions/stub.go b/vendor/honnef.co/go/tools/functions/stub.go new file mode 100644 index 000000000..4d5de10b8 --- /dev/null +++ b/vendor/honnef.co/go/tools/functions/stub.go @@ -0,0 +1,32 @@ +package functions + +import ( + "honnef.co/go/tools/ir" +) + +// IsStub reports whether a function is a stub. A function is +// considered a stub if it has no instructions or if all it does is +// return a constant value. +func IsStub(fn *ir.Function) bool { + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case *ir.Const: + // const naturally has no side-effects + case *ir.Panic: + // panic is a stub if it only uses constants + case *ir.Return: + // return is a stub if it only uses constants + case *ir.DebugRef: + case *ir.Jump: + // if there are no disallowed instructions, then we're + // only jumping to the exit block (or possibly + // somewhere else that's stubby?) + default: + // all other instructions are assumed to do actual work + return false + } + } + } + return true +} diff --git a/vendor/honnef.co/go/tools/functions/terminates.go b/vendor/honnef.co/go/tools/functions/terminates.go index 3e9c3a23f..c4984673f 100644 --- a/vendor/honnef.co/go/tools/functions/terminates.go +++ b/vendor/honnef.co/go/tools/functions/terminates.go @@ -1,11 +1,15 @@ package functions -import "honnef.co/go/tools/ssa" +import ( + "go/types" + + "honnef.co/go/tools/ir" +) // Terminates reports whether fn is supposed to return, that is if it // has at least one theoretic path that returns from the function. // Explicit panics do not count as terminating. -func Terminates(fn *ssa.Function) bool { +func Terminates(fn *ir.Function) bool { if fn.Blocks == nil { // assuming that a function terminates is the conservative // choice @@ -13,11 +17,53 @@ func Terminates(fn *ssa.Function) bool { } for _, block := range fn.Blocks { - if len(block.Instrs) == 0 { - continue - } - if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok { - return true + if _, ok := block.Control().(*ir.Return); ok { + if len(block.Preds) == 0 { + return true + } + for _, pred := range block.Preds { + switch ctrl := pred.Control().(type) { + case *ir.Panic: + // explicit panics do not count as terminating + case *ir.If: + // Check if we got here by receiving from a closed + // time.Tick channel – this cannot happen at + // runtime and thus doesn't constitute termination + iff := ctrl + if !ok { + return true + } + ex, ok := iff.Cond.(*ir.Extract) + if !ok { + return true + } + if ex.Index != 1 { + return true + } + recv, ok := ex.Tuple.(*ir.Recv) + if !ok { + return true + } + call, ok := recv.Chan.(*ir.Call) + if !ok { + return true + } + fn, ok := call.Common().Value.(*ir.Function) + if !ok { + return true + } + fn2, ok := fn.Object().(*types.Func) + if !ok { + return true + } + if fn2.FullName() != "time.Tick" { + return true + } + default: + // we've reached the exit block + return true + } + } } } return false diff --git a/vendor/honnef.co/go/tools/internal/cache/cache.go b/vendor/honnef.co/go/tools/internal/cache/cache.go index 2b33ca106..6b41811cf 100644 --- a/vendor/honnef.co/go/tools/internal/cache/cache.go +++ b/vendor/honnef.co/go/tools/internal/cache/cache.go @@ -177,7 +177,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { i++ } tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) - if err != nil || size < 0 { + if err != nil || tm < 0 { return missing() } @@ -265,7 +265,7 @@ func (c *Cache) Trim() { // We maintain in dir/trim.txt the time of the last completed cache trim. // If the cache has been trimmed recently enough, do nothing. // This is the common case. - data, _ := ioutil.ReadFile(filepath.Join(c.dir, "trim.txt")) + data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt")) t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { return @@ -282,7 +282,7 @@ func (c *Cache) Trim() { // Ignore errors from here: if we don't write the complete timestamp, the // cache will appear older than it is, and we'll trim it again next time. - renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix()))) + renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) } // trimSubdir trims a single cache subdirectory. @@ -326,7 +326,8 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify // in verify mode we are double-checking that the cache entries // are entirely reproducible. As just noted, this may be unrealistic // in some cases but the check is also useful for shaking out real bugs. - entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())) + entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano()) + if verify && allowVerify { old, err := c.get(id) if err == nil && (old.OutputID != out || old.Size != size) { @@ -336,7 +337,28 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify } } file := c.fileName(id, "a") - if err := ioutil.WriteFile(file, entry, 0666); err != nil { + + // Copy file to cache directory. + mode := os.O_WRONLY | os.O_CREATE + f, err := os.OpenFile(file, mode, 0666) + if err != nil { + return err + } + _, err = f.WriteString(entry) + if err == nil { + // Truncate the file only *after* writing it. + // (This should be a no-op, but truncate just in case of previous corruption.) + // + // This differs from ioutil.WriteFile, which truncates to 0 *before* writing + // via os.O_TRUNC. Truncating only after writing ensures that a second write + // of the same content to the same file is idempotent, and does not — even + // temporarily! — undo the effect of the first write. + err = f.Truncate(int64(len(entry))) + } + if closeErr := f.Close(); err == nil { + err = closeErr + } + if err != nil { // TODO(bcmills): This Remove potentially races with another go command writing to file. // Can we eliminate it? os.Remove(file) diff --git a/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go b/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go new file mode 100644 index 000000000..394697702 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buildir defines an Analyzer that constructs the IR +// of an error-free package and returns the set of all +// functions within it. It does not report any diagnostics itself but +// may be used as an input to other analyzers. +// +// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. +package buildir + +import ( + "go/ast" + "go/types" + "reflect" + + "golang.org/x/tools/go/analysis" + "honnef.co/go/tools/ir" +) + +type willExit struct{} +type willUnwind struct{} + +func (*willExit) AFact() {} +func (*willUnwind) AFact() {} + +var Analyzer = &analysis.Analyzer{ + Name: "buildir", + Doc: "build IR for later passes", + Run: run, + ResultType: reflect.TypeOf(new(IR)), + FactTypes: []analysis.Fact{new(willExit), new(willUnwind)}, +} + +// IR provides intermediate representation for all the +// non-blank source functions in the current package. +type IR struct { + Pkg *ir.Package + SrcFuncs []*ir.Function +} + +func run(pass *analysis.Pass) (interface{}, error) { + // Plundered from ssautil.BuildPackage. + + // We must create a new Program for each Package because the + // analysis API provides no place to hang a Program shared by + // all Packages. Consequently, IR Packages and Functions do not + // have a canonical representation across an analysis session of + // multiple packages. This is unlikely to be a problem in + // practice because the analysis API essentially forces all + // packages to be analysed independently, so any given call to + // Analysis.Run on a package will see only IR objects belonging + // to a single Program. + + mode := ir.GlobalDebug + + prog := ir.NewProgram(pass.Fset, mode) + + // Create IR packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + irpkg := prog.CreatePackage(p, nil, nil, true) + for _, fn := range irpkg.Functions { + if ast.IsExported(fn.Name()) { + var exit willExit + var unwind willUnwind + if pass.ImportObjectFact(fn.Object(), &exit) { + fn.WillExit = true + } + if pass.ImportObjectFact(fn.Object(), &unwind) { + fn.WillUnwind = true + } + } + } + createAll(p.Imports()) + } + } + } + createAll(pass.Pkg.Imports()) + + // Create and build the primary package. + irpkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) + irpkg.Build() + + // Compute list of source functions, including literals, + // in source order. + var addAnons func(f *ir.Function) + funcs := make([]*ir.Function, len(irpkg.Functions)) + copy(funcs, irpkg.Functions) + addAnons = func(f *ir.Function) { + for _, anon := range f.AnonFuncs { + funcs = append(funcs, anon) + addAnons(anon) + } + } + for _, fn := range irpkg.Functions { + addAnons(fn) + if fn.WillExit { + pass.ExportObjectFact(fn.Object(), new(willExit)) + } + if fn.WillUnwind { + pass.ExportObjectFact(fn.Object(), new(willUnwind)) + } + } + + return &IR{Pkg: irpkg, SrcFuncs: funcs}, nil +} diff --git a/vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go b/vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go deleted file mode 100644 index fde918d12..000000000 --- a/vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package buildssa defines an Analyzer that constructs the SSA -// representation of an error-free package and returns the set of all -// functions within it. It does not report any diagnostics itself but -// may be used as an input to other analyzers. -// -// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. -package buildssa - -import ( - "go/ast" - "go/types" - "reflect" - - "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/ssa" -) - -var Analyzer = &analysis.Analyzer{ - Name: "buildssa", - Doc: "build SSA-form IR for later passes", - Run: run, - ResultType: reflect.TypeOf(new(SSA)), -} - -// SSA provides SSA-form intermediate representation for all the -// non-blank source functions in the current package. -type SSA struct { - Pkg *ssa.Package - SrcFuncs []*ssa.Function -} - -func run(pass *analysis.Pass) (interface{}, error) { - // Plundered from ssautil.BuildPackage. - - // We must create a new Program for each Package because the - // analysis API provides no place to hang a Program shared by - // all Packages. Consequently, SSA Packages and Functions do not - // have a canonical representation across an analysis session of - // multiple packages. This is unlikely to be a problem in - // practice because the analysis API essentially forces all - // packages to be analysed independently, so any given call to - // Analysis.Run on a package will see only SSA objects belonging - // to a single Program. - - mode := ssa.GlobalDebug - - prog := ssa.NewProgram(pass.Fset, mode) - - // Create SSA packages for all imports. - // Order is not significant. - created := make(map[*types.Package]bool) - var createAll func(pkgs []*types.Package) - createAll = func(pkgs []*types.Package) { - for _, p := range pkgs { - if !created[p] { - created[p] = true - prog.CreatePackage(p, nil, nil, true) - createAll(p.Imports()) - } - } - } - createAll(pass.Pkg.Imports()) - - // Create and build the primary package. - ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) - ssapkg.Build() - - // Compute list of source functions, including literals, - // in source order. - var funcs []*ssa.Function - var addAnons func(f *ssa.Function) - addAnons = func(f *ssa.Function) { - funcs = append(funcs, f) - for _, anon := range f.AnonFuncs { - addAnons(anon) - } - } - addAnons(ssapkg.Members["init"].(*ssa.Function)) - for _, f := range pass.Files { - for _, decl := range f.Decls { - if fdecl, ok := decl.(*ast.FuncDecl); ok { - - // SSA will not build a Function - // for a FuncDecl named blank. - // That's arguably too strict but - // relaxing it would break uniqueness of - // names of package members. - if fdecl.Name.Name == "_" { - continue - } - - // (init functions have distinct Func - // objects named "init" and distinct - // ssa.Functions named "init#1", ...) - - fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) - if fn == nil { - panic(fn) - } - - f := ssapkg.Prog.FuncValue(fn) - if f == nil { - panic(fn) - } - - addAnons(f) - } - } - } - - return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil -} diff --git a/vendor/honnef.co/go/tools/internal/renameio/renameio.go b/vendor/honnef.co/go/tools/internal/renameio/renameio.go index 3f3f1708f..a279d1a1e 100644 --- a/vendor/honnef.co/go/tools/internal/renameio/renameio.go +++ b/vendor/honnef.co/go/tools/internal/renameio/renameio.go @@ -8,15 +8,15 @@ package renameio import ( "bytes" "io" - "io/ioutil" + "math/rand" "os" "path/filepath" - "runtime" - "strings" - "time" + "strconv" + + "honnef.co/go/tools/internal/robustio" ) -const patternSuffix = "*.tmp" +const patternSuffix = ".tmp" // Pattern returns a glob pattern that matches the unrenamed temporary files // created when writing to filename. @@ -29,14 +29,14 @@ func Pattern(filename string) string { // final name. // // That ensures that the final location, if it exists, is always a complete file. -func WriteFile(filename string, data []byte) (err error) { - return WriteToFile(filename, bytes.NewReader(data)) +func WriteFile(filename string, data []byte, perm os.FileMode) (err error) { + return WriteToFile(filename, bytes.NewReader(data), perm) } // WriteToFile is a variant of WriteFile that accepts the data as an io.Reader // instead of a slice. -func WriteToFile(filename string, data io.Reader) (err error) { - f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) +func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) { + f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm) if err != nil { return err } @@ -63,21 +63,31 @@ func WriteToFile(filename string, data io.Reader) (err error) { return err } - var start time.Time - for { - err := os.Rename(f.Name(), filename) - if err == nil || runtime.GOOS != "windows" || !strings.HasSuffix(err.Error(), "Access is denied.") { - return err - } + return robustio.Rename(f.Name(), filename) +} - // Windows seems to occasionally trigger spurious "Access is denied" errors - // here (see golang.org/issue/31247). We're not sure why. It's probably - // worth a little extra latency to avoid propagating the spurious errors. - if start.IsZero() { - start = time.Now() - } else if time.Since(start) >= 500*time.Millisecond { - return err +// tempFile creates a new temporary file with given permission bits. +func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) { + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix) + f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm) + if os.IsExist(err) { + continue } - time.Sleep(5 * time.Millisecond) + break } + return +} + +// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that +// may occur if the file is concurrently replaced. +// +// Errors are classified heuristically and retries are bounded, so even this +// function may occasionally return a spurious error on Windows. +// If so, the error will likely wrap one of: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +func ReadFile(filename string) ([]byte, error) { + return robustio.ReadFile(filename) } diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio.go b/vendor/honnef.co/go/tools/internal/robustio/robustio.go new file mode 100644 index 000000000..76e47ad1f --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/robustio/robustio.go @@ -0,0 +1,53 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package robustio wraps I/O functions that are prone to failure on Windows, +// transparently retrying errors up to an arbitrary timeout. +// +// Errors are classified heuristically and retries are bounded, so the functions +// in this package do not completely eliminate spurious errors. However, they do +// significantly reduce the rate of failure in practice. +// +// If so, the error will likely wrap one of: +// The functions in this package do not completely eliminate spurious errors, +// but substantially reduce their rate of occurrence in practice. +package robustio + +// Rename is like os.Rename, but on Windows retries errors that may occur if the +// file is concurrently read or overwritten. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func Rename(oldpath, newpath string) error { + return rename(oldpath, newpath) +} + +// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may +// occur if the file is concurrently replaced. +// +// (See golang.org/issue/31247 and golang.org/issue/32188.) +func ReadFile(filename string) ([]byte, error) { + return readFile(filename) +} + +// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur +// if an executable file in the directory has recently been executed. +// +// (See golang.org/issue/19491.) +func RemoveAll(path string) error { + return removeAll(path) +} + +// IsEphemeralError reports whether err is one of the errors that the functions +// in this package attempt to mitigate. +// +// Errors considered ephemeral include: +// - syscall.ERROR_ACCESS_DENIED +// - syscall.ERROR_FILE_NOT_FOUND +// - internal/syscall/windows.ERROR_SHARING_VIOLATION +// +// This set may be expanded in the future; programs must not rely on the +// non-ephemerality of any given error. +func IsEphemeralError(err error) bool { + return isEphemeralError(err) +} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go new file mode 100644 index 000000000..1ac0d10d7 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "os" + "syscall" +) + +const errFileNotFound = syscall.ENOENT + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + switch werr := err.(type) { + case *os.PathError: + err = werr.Err + case *os.LinkError: + err = werr.Err + case *os.SyscallError: + err = werr.Err + + } + if errno, ok := err.(syscall.Errno); ok { + return errno == errFileNotFound + } + return false +} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go new file mode 100644 index 000000000..e0bf5b9b3 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go @@ -0,0 +1,93 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows darwin + +package robustio + +import ( + "io/ioutil" + "math/rand" + "os" + "syscall" + "time" +) + +const arbitraryTimeout = 500 * time.Millisecond + +const ERROR_SHARING_VIOLATION = 32 + +// retry retries ephemeral errors from f up to an arbitrary timeout +// to work around filesystem flakiness on Windows and Darwin. +func retry(f func() (err error, mayRetry bool)) error { + var ( + bestErr error + lowestErrno syscall.Errno + start time.Time + nextSleep time.Duration = 1 * time.Millisecond + ) + for { + err, mayRetry := f() + if err == nil || !mayRetry { + return err + } + + if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) { + bestErr = err + lowestErrno = errno + } else if bestErr == nil { + bestErr = err + } + + if start.IsZero() { + start = time.Now() + } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout { + break + } + time.Sleep(nextSleep) + nextSleep += time.Duration(rand.Int63n(int64(nextSleep))) + } + + return bestErr +} + +// rename is like os.Rename, but retries ephemeral errors. +// +// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with +// MOVEFILE_REPLACE_EXISTING. +// +// Windows also provides a different system call, ReplaceFile, +// that provides similar semantics, but perhaps preserves more metadata. (The +// documentation on the differences between the two is very sparse.) +// +// Empirical error rates with MoveFileEx are lower under modest concurrency, so +// for now we're sticking with what the os package already provides. +func rename(oldpath, newpath string) (err error) { + return retry(func() (err error, mayRetry bool) { + err = os.Rename(oldpath, newpath) + return err, isEphemeralError(err) + }) +} + +// readFile is like ioutil.ReadFile, but retries ephemeral errors. +func readFile(filename string) ([]byte, error) { + var b []byte + err := retry(func() (err error, mayRetry bool) { + b, err = ioutil.ReadFile(filename) + + // Unlike in rename, we do not retry errFileNotFound here: it can occur + // as a spurious error, but the file may also genuinely not exist, so the + // increase in robustness is probably not worth the extra latency. + + return err, isEphemeralError(err) && err != errFileNotFound + }) + return b, err +} + +func removeAll(path string) error { + return retry(func() (err error, mayRetry bool) { + err = os.RemoveAll(path) + return err, isEphemeralError(err) + }) +} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go new file mode 100644 index 000000000..a2428856f --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !windows,!darwin + +package robustio + +import ( + "io/ioutil" + "os" +) + +func rename(oldpath, newpath string) error { + return os.Rename(oldpath, newpath) +} + +func readFile(filename string) ([]byte, error) { + return ioutil.ReadFile(filename) +} + +func removeAll(path string) error { + return os.RemoveAll(path) +} + +func isEphemeralError(err error) bool { + return false +} diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go new file mode 100644 index 000000000..a35237d44 --- /dev/null +++ b/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package robustio + +import ( + "os" + "syscall" +) + +const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND + +// isEphemeralError returns true if err may be resolved by waiting. +func isEphemeralError(err error) bool { + switch werr := err.(type) { + case *os.PathError: + err = werr.Err + case *os.LinkError: + err = werr.Err + case *os.SyscallError: + err = werr.Err + } + if errno, ok := err.(syscall.Errno); ok { + switch errno { + case syscall.ERROR_ACCESS_DENIED, + syscall.ERROR_FILE_NOT_FOUND, + ERROR_SHARING_VIOLATION: + return true + } + } + return false +} diff --git a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go index affee6607..e9abf0d89 100644 --- a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go +++ b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go @@ -5,23 +5,24 @@ import ( "go/types" "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/internal/passes/buildssa" + "honnef.co/go/tools/code" + "honnef.co/go/tools/internal/passes/buildir" + "honnef.co/go/tools/ir" . "honnef.co/go/tools/lint/lintdsl" - "honnef.co/go/tools/ssa" ) func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { - for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { - fn := func(node ast.Node) bool { + for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs { + cb := func(node ast.Node) bool { rng, ok := node.(*ast.RangeStmt) - if !ok || !IsBlank(rng.Key) { + if !ok || !code.IsBlank(rng.Key) { return true } - v, _ := ssafn.ValueForExpr(rng.X) + v, _ := fn.ValueForExpr(rng.X) // Check that we're converting from string to []rune - val, _ := v.(*ssa.Convert) + val, _ := v.(*ir.Convert) if val == nil { return true } @@ -47,13 +48,13 @@ func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { // Expect two refs: one for obtaining the length of the slice, // one for accessing the elements - if len(FilterDebug(*refs)) != 2 { + if len(code.FilterDebug(*refs)) != 2 { // TODO(dh): right now, we check that only one place // refers to our slice. This will miss cases such as // ranging over the slice twice. Ideally, we'd ensure that // the slice is only used for ranging over (without // accessing the key), but that is harder to do because in - // SSA form, ranging over a slice looks like an ordinary + // IR form, ranging over a slice looks like an ordinary // loop with index increments and slice accesses. We'd // have to look at the associated AST node to check that // it's a range statement. @@ -64,7 +65,7 @@ func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { return true } - Inspect(ssafn.Syntax(), fn) + Inspect(fn.Source(), cb) } return nil, nil } diff --git a/vendor/honnef.co/go/tools/ssa/LICENSE b/vendor/honnef.co/go/tools/ir/LICENSE similarity index 100% rename from vendor/honnef.co/go/tools/ssa/LICENSE rename to vendor/honnef.co/go/tools/ir/LICENSE diff --git a/vendor/honnef.co/go/tools/ssa/blockopt.go b/vendor/honnef.co/go/tools/ir/blockopt.go similarity index 83% rename from vendor/honnef.co/go/tools/ssa/blockopt.go rename to vendor/honnef.co/go/tools/ir/blockopt.go index 22c9a4c0d..d7a0e3567 100644 --- a/vendor/honnef.co/go/tools/ssa/blockopt.go +++ b/vendor/honnef.co/go/tools/ir/blockopt.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // Simple block optimizations to simplify the control flow graph. @@ -21,35 +21,34 @@ const debugBlockOpt = false // markReachable sets Index=-1 for all blocks reachable from b. func markReachable(b *BasicBlock) { - b.Index = -1 + b.gaps = -1 for _, succ := range b.Succs { - if succ.Index == 0 { + if succ.gaps == 0 { markReachable(succ) } } } -func DeleteUnreachableBlocks(f *Function) { - deleteUnreachableBlocks(f) -} - // deleteUnreachableBlocks marks all reachable blocks of f and // eliminates (nils) all others, including possibly cyclic subgraphs. // func deleteUnreachableBlocks(f *Function) { const white, black = 0, -1 - // We borrow b.Index temporarily as the mark bit. + // We borrow b.gaps temporarily as the mark bit. for _, b := range f.Blocks { - b.Index = white + b.gaps = white } markReachable(f.Blocks[0]) - if f.Recover != nil { - markReachable(f.Recover) - } + // In SSI form, we need the exit to be reachable for correct + // post-dominance information. In original form, however, we + // cannot unconditionally mark it reachable because we won't + // be adding fake edges, and this breaks the calculation of + // dominance information. + markReachable(f.Exit) for i, b := range f.Blocks { - if b.Index == white { + if b.gaps == white { for _, c := range b.Succs { - if c.Index == black { + if c.gaps == black { c.removePred(b) // delete white->black edge } } @@ -73,6 +72,13 @@ func jumpThreading(f *Function, b *BasicBlock) bool { if b.Instrs == nil { return false } + for _, pred := range b.Preds { + switch pred.Control().(type) { + case *ConstantSwitch: + // don't optimize away the head blocks of switch statements + return false + } + } if _, ok := b.Instrs[0].(*Jump); !ok { return false // not just a jump } @@ -117,10 +123,17 @@ func fuseBlocks(f *Function, a *BasicBlock) bool { if len(a.Succs) != 1 { return false } + if a.Succs[0] == f.Exit { + return false + } b := a.Succs[0] if len(b.Preds) != 1 { return false } + if _, ok := a.Instrs[len(a.Instrs)-1].(*Panic); ok { + // panics aren't simple jumps, they have side effects. + return false + } // Degenerate &&/|| ops may result in a straight-line CFG // containing φ-nodes. (Ideally we'd replace such them with @@ -151,15 +164,16 @@ func fuseBlocks(f *Function, a *BasicBlock) bool { return true } -func OptimizeBlocks(f *Function) { - optimizeBlocks(f) -} - // optimizeBlocks() performs some simple block optimizations on a // completed function: dead block elimination, block fusion, jump // threading. // func optimizeBlocks(f *Function) { + if debugBlockOpt { + f.WriteTo(os.Stderr) + mustSanityCheck(f, nil) + } + deleteUnreachableBlocks(f) // Loop until no further progress. diff --git a/vendor/honnef.co/go/tools/ssa/builder.go b/vendor/honnef.co/go/tools/ir/builder.go similarity index 71% rename from vendor/honnef.co/go/tools/ssa/builder.go rename to vendor/honnef.co/go/tools/ir/builder.go index 317ac0611..fdf4cb1a9 100644 --- a/vendor/honnef.co/go/tools/ssa/builder.go +++ b/vendor/honnef.co/go/tools/ir/builder.go @@ -2,27 +2,22 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir -// This file implements the BUILD phase of SSA construction. +// This file implements the BUILD phase of IR construction. // -// SSA construction has two phases, CREATE and BUILD. In the CREATE phase +// IR construction has two phases, CREATE and BUILD. In the CREATE phase // (create.go), all packages are constructed and type-checked and // definitions of all package members are created, method-sets are // computed, and wrapper methods are synthesized. -// ssa.Packages are created in arbitrary order. +// ir.Packages are created in arbitrary order. // // In the BUILD phase (builder.go), the builder traverses the AST of -// each Go source function and generates SSA instructions for the +// each Go source function and generates IR instructions for the // function body. Initializer expressions for package-level variables // are emitted to the package's init() function in the order specified // by go/types.Info.InitOrder, then code for each function in the // package is generated in lexical order. -// The BUILD phases for distinct packages are independent and are -// executed in parallel. -// -// TODO(adonovan): indeed, building functions is now embarrassingly parallel. -// Audit for concurrency then benchmark using more goroutines. // // The builder's and Program's indices (maps) are populated and // mutated during the CREATE phase, but during the BUILD phase they @@ -36,7 +31,6 @@ import ( "go/token" "go/types" "os" - "sync" ) type opaqueType struct { @@ -59,27 +53,25 @@ var ( tUntypedNil = types.Typ[types.UntypedNil] tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators tEface = types.NewInterfaceType(nil, nil).Complete() - - // SSA Value constants. - vZero = intConst(0) - vOne = intConst(1) - vTrue = NewConst(constant.MakeBool(true), tBool) ) // builder holds state associated with the package currently being built. -// Its methods contain all the logic for AST-to-SSA conversion. -type builder struct{} +// Its methods contain all the logic for AST-to-IR conversion. +type builder struct { + printFunc string + + blocksets [5]BlockSet +} // cond emits to fn code to evaluate boolean condition e and jump // to t or f depending on its value, performing various simplifications. // // Postcondition: fn.currentBlock is nil. // -func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { +func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If { switch e := e.(type) { case *ast.ParenExpr: - b.cond(fn, e.X, t, f) - return + return b.cond(fn, e.X, t, f) case *ast.BinaryExpr: switch e.Op { @@ -87,21 +79,18 @@ func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { ltrue := fn.newBasicBlock("cond.true") b.cond(fn, e.X, ltrue, f) fn.currentBlock = ltrue - b.cond(fn, e.Y, t, f) - return + return b.cond(fn, e.Y, t, f) case token.LOR: lfalse := fn.newBasicBlock("cond.false") b.cond(fn, e.X, t, lfalse) fn.currentBlock = lfalse - b.cond(fn, e.Y, t, f) - return + return b.cond(fn, e.Y, t, f) } case *ast.UnaryExpr: if e.Op == token.NOT { - b.cond(fn, e.X, f, t) - return + return b.cond(fn, e.X, f, t) } } @@ -111,7 +100,7 @@ func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) { // The value of a constant condition may be platform-specific, // and may cause blocks that are reachable in some configuration // to be hidden from subsequent analyses such as bug-finding tools. - emitIf(fn, b.expr(fn, e), t, f) + return emitIf(fn, b.expr(fn, e), t, f, e) } // logicalBinop emits code to fn to evaluate e, a &&- or @@ -131,11 +120,11 @@ func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { switch e.Op { case token.LAND: b.cond(fn, e.X, rhs, done) - short = NewConst(constant.MakeBool(false), t) + short = emitConst(fn, NewConst(constant.MakeBool(false), t)) case token.LOR: b.cond(fn, e.X, done, rhs) - short = NewConst(constant.MakeBool(true), t) + short = emitConst(fn, NewConst(constant.MakeBool(true), t)) } // Is rhs unreachable? @@ -161,23 +150,21 @@ func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value { // The edge from e.Y to done carries the value of e.Y. fn.currentBlock = rhs edges = append(edges, b.expr(fn, e.Y)) - emitJump(fn, done) + emitJump(fn, done, e) fn.currentBlock = done - phi := &Phi{Edges: edges, Comment: e.Op.String()} - phi.pos = e.OpPos + phi := &Phi{Edges: edges} phi.typ = t - return done.emit(phi) + return done.emit(phi, e) } -// exprN lowers a multi-result expression e to SSA form, emitting code +// exprN lowers a multi-result expression e to IR form, emitting code // to fn and returning a single Value whose type is a *types.Tuple. // The caller must access the components via Extract. // // Multi-result expressions include CallExprs in a multi-value // assignment or return statement, and "value,ok" uses of -// TypeAssertExpr, IndexExpr (when X is a map), and UnaryExpr (when Op -// is token.ARROW). +// TypeAssertExpr, IndexExpr (when X is a map), and Recv. // func (b *builder) exprN(fn *Function, e ast.Expr) Value { typ := fn.Pkg.typeOf(e).(*types.Tuple) @@ -192,36 +179,28 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { var c Call b.setCall(fn, e, &c.Call) c.typ = typ - return fn.emit(&c) + return fn.emit(&c, e) case *ast.IndexExpr: mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) - lookup := &Lookup{ + lookup := &MapLookup{ X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e), CommaOk: true, } lookup.setType(typ) - lookup.setPos(e.Lbrack) - return fn.emit(lookup) + return fn.emit(lookup, e) case *ast.TypeAssertExpr: - return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e.Lparen) + return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e) case *ast.UnaryExpr: // must be receive <- - unop := &UnOp{ - Op: token.ARROW, - X: b.expr(fn, e.X), - CommaOk: true, - } - unop.setType(typ) - unop.setPos(e.OpPos) - return fn.emit(unop) + return emitRecv(fn, b.expr(fn, e.X), true, typ, e) } panic(fmt.Sprintf("exprN(%T) in %s", e, fn)) } -// builtin emits to fn SSA instructions to implement a call to the +// builtin emits to fn IR instructions to implement a call to the // built-in function obj with the specified arguments // and return type. It returns the value defined by the result. // @@ -229,7 +208,7 @@ func (b *builder) exprN(fn *Function, e ast.Expr) Value { // the caller should treat this like an ordinary library function // call. // -func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, pos token.Pos) Value { +func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, source ast.Node) Value { switch obj.Name() { case "make": switch typ.Underlying().(type) { @@ -243,23 +222,20 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ // treat make([]T, n, m) as new([m]T)[:n] cap := m.Int64() at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap) - alloc := emitNew(fn, at, pos) - alloc.Comment = "makeslice" + alloc := emitNew(fn, at, source) v := &Slice{ X: alloc, High: n, } - v.setPos(pos) v.setType(typ) - return fn.emit(v) + return fn.emit(v, source) } v := &MakeSlice{ Len: n, Cap: m, } - v.setPos(pos) v.setType(typ) - return fn.emit(v) + return fn.emit(v, source) case *types.Map: var res Value @@ -267,24 +243,21 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ res = b.expr(fn, args[1]) } v := &MakeMap{Reserve: res} - v.setPos(pos) v.setType(typ) - return fn.emit(v) + return fn.emit(v, source) case *types.Chan: - var sz Value = vZero + var sz Value = emitConst(fn, intConst(0)) if len(args) == 2 { sz = b.expr(fn, args[1]) } v := &MakeChan{Size: sz} - v.setPos(pos) v.setType(typ) - return fn.emit(v) + return fn.emit(v, source) } case "new": - alloc := emitNew(fn, deref(typ), pos) - alloc.Comment = "new" + alloc := emitNew(fn, deref(typ), source) return alloc case "len", "cap": @@ -296,22 +269,22 @@ func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ t := deref(fn.Pkg.typeOf(args[0])).Underlying() if at, ok := t.(*types.Array); ok { b.expr(fn, args[0]) // for effects only - return intConst(at.Len()) + return emitConst(fn, intConst(at.Len())) } // Otherwise treat as normal. case "panic": fn.emit(&Panic{ - X: emitConv(fn, b.expr(fn, args[0]), tEface), - pos: pos, - }) + X: emitConv(fn, b.expr(fn, args[0]), tEface, source), + }, source) + addEdge(fn.currentBlock, fn.Exit) fn.currentBlock = fn.newBasicBlock("unreachable") - return vTrue // any non-nil Value will do + return emitConst(fn, NewConst(constant.MakeBool(true), tBool)) // any non-nil Value will do } return nil // treat all others as a regular function call } -// addr lowers a single-result addressable expression e to SSA form, +// addr lowers a single-result addressable expression e to IR form, // emitting code to fn and returning the location (an lvalue) defined // by the expression. // @@ -345,21 +318,20 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { if v == nil { v = fn.lookup(obj, escaping) } - return &address{addr: v, pos: e.Pos(), expr: e} + return &address{addr: v, expr: e} case *ast.CompositeLit: t := deref(fn.Pkg.typeOf(e)) var v *Alloc if escaping { - v = emitNew(fn, t, e.Lbrace) + v = emitNew(fn, t, e) } else { - v = fn.addLocal(t, e.Lbrace) + v = fn.addLocal(t, e) } - v.Comment = "complit" var sb storebuf b.compLit(fn, v, e, true, &sb) sb.emit(fn) - return &address{addr: v, pos: e.Lbrace, expr: e} + return &address{addr: v, expr: e} case *ast.ParenExpr: return b.addr(fn, e.X, escaping) @@ -374,11 +346,10 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { panic(sel) } wantAddr := true - v := b.receiver(fn, e.X, wantAddr, escaping, sel) + v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) last := len(sel.Index()) - 1 return &address{ addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel), - pos: e.Sel.Pos(), expr: e.Sel, } @@ -397,43 +368,42 @@ func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue { et = types.NewPointer(t.Elem()) case *types.Map: return &element{ - m: b.expr(fn, e.X), - k: emitConv(fn, b.expr(fn, e.Index), t.Key()), - t: t.Elem(), - pos: e.Lbrack, + m: b.expr(fn, e.X), + k: emitConv(fn, b.expr(fn, e.Index), t.Key(), e.Index), + t: t.Elem(), } default: panic("unexpected container type in IndexExpr: " + t.String()) } v := &IndexAddr{ X: x, - Index: emitConv(fn, b.expr(fn, e.Index), tInt), + Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index), } - v.setPos(e.Lbrack) v.setType(et) - return &address{addr: fn.emit(v), pos: e.Lbrack, expr: e} + return &address{addr: fn.emit(v, e), expr: e} case *ast.StarExpr: - return &address{addr: b.expr(fn, e.X), pos: e.Star, expr: e} + return &address{addr: b.expr(fn, e.X), expr: e} } panic(fmt.Sprintf("unexpected address expression: %T", e)) } type store struct { - lhs lvalue - rhs Value + lhs lvalue + rhs Value + source ast.Node } type storebuf struct{ stores []store } -func (sb *storebuf) store(lhs lvalue, rhs Value) { - sb.stores = append(sb.stores, store{lhs, rhs}) +func (sb *storebuf) store(lhs lvalue, rhs Value, source ast.Node) { + sb.stores = append(sb.stores, store{lhs, rhs, source}) } func (sb *storebuf) emit(fn *Function) { for _, s := range sb.stores { - s.lhs.store(fn, s.rhs) + s.lhs.store(fn, s.rhs, s.source) } } @@ -451,7 +421,7 @@ func (sb *storebuf) emit(fn *Function) { // in-place update of existing variables when the RHS is a composite // literal that may reference parts of the LHS. // -func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf) { +func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf, source ast.Node) { // Can we initialize it in place? if e, ok := unparen(e).(*ast.CompositeLit); ok { // A CompositeLit never evaluates to a pointer, @@ -462,9 +432,9 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * ptr := b.addr(fn, e, true).address(fn) // copy address if sb != nil { - sb.store(loc, ptr) + sb.store(loc, ptr, source) } else { - loc.store(fn, ptr) + loc.store(fn, ptr, source) } return } @@ -501,13 +471,13 @@ func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb * // simple case: just copy rhs := b.expr(fn, e) if sb != nil { - sb.store(loc, rhs) + sb.store(loc, rhs, source) } else { - loc.store(fn, rhs) + loc.store(fn, rhs, source) } } -// expr lowers a single-result expression e to SSA form, emitting code +// expr lowers a single-result expression e to IR form, emitting code // to fn and returning the Value defined by the expression. // func (b *builder) expr(fn *Function, e ast.Expr) Value { @@ -517,7 +487,7 @@ func (b *builder) expr(fn *Function, e ast.Expr) Value { // Is expression a constant? if tv.Value != nil { - return NewConst(tv.Value, tv.Type) + return emitConst(fn, NewConst(tv.Value, tv.Type)) } var v Value @@ -525,7 +495,7 @@ func (b *builder) expr(fn *Function, e ast.Expr) Value { // Prefer pointer arithmetic ({Index,Field}Addr) followed // by Load over subelement extraction (e.g. Index, Field), // to avoid large copies. - v = b.addr(fn, e, false).load(fn) + v = b.addr(fn, e, false).load(fn, e) } else { v = b.expr0(fn, e, tv) } @@ -542,15 +512,16 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case *ast.FuncLit: fn2 := &Function{ - name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), - Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), - pos: e.Type.Func, - parent: fn, - Pkg: fn.Pkg, - Prog: fn.Prog, - syntax: e, - } + name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)), + Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature), + parent: fn, + Pkg: fn.Pkg, + Prog: fn.Prog, + functionBody: new(functionBody), + } + fn2.source = e fn.AnonFuncs = append(fn.AnonFuncs, fn2) + fn2.initHTML(b.printFunc) b.buildFunction(fn2) if fn2.FreeVars == nil { return fn2 @@ -561,32 +532,22 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { v.Bindings = append(v.Bindings, fv.outer) fv.outer = nil } - return fn.emit(v) + return fn.emit(v, e) case *ast.TypeAssertExpr: // single-result form only - return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e.Lparen) + return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e) case *ast.CallExpr: if fn.Pkg.info.Types[e.Fun].IsType() { // Explicit type conversion, e.g. string(x) or big.Int(x) x := b.expr(fn, e.Args[0]) - y := emitConv(fn, x, tv.Type) - if y != x { - switch y := y.(type) { - case *Convert: - y.pos = e.Lparen - case *ChangeType: - y.pos = e.Lparen - case *MakeInterface: - y.pos = e.Lparen - } - } + y := emitConv(fn, x, tv.Type, e) return y } // Call to "intrinsic" built-ins, e.g. new, make, panic. if id, ok := unparen(e.Fun).(*ast.Ident); ok { if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok { - if v := b.builtin(fn, obj, e.Args, tv.Type, e.Lparen); v != nil { + if v := b.builtin(fn, obj, e.Args, tv.Type, e); v != nil { return v } } @@ -595,7 +556,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { var v Call b.setCall(fn, e, &v.Call) v.setType(tv.Type) - return fn.emit(&v) + return fn.emit(&v, e) case *ast.UnaryExpr: switch e.Op { @@ -606,19 +567,20 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { // For simplicity, we'll just (suboptimally) rely // on the side effects of a load. // TODO(adonovan): emit dedicated nilcheck. - addr.load(fn) + addr.load(fn, e) } return addr.address(fn) case token.ADD: return b.expr(fn, e.X) - case token.NOT, token.ARROW, token.SUB, token.XOR: // ! <- - ^ + case token.NOT, token.SUB, token.XOR: // ! <- - ^ v := &UnOp{ Op: e.Op, X: b.expr(fn, e.X), } - v.setPos(e.OpPos) v.setType(tv.Type) - return fn.emit(v) + return fn.emit(v, e) + case token.ARROW: + return emitRecv(fn, b.expr(fn, e.X), false, tv.Type, e) default: panic(e.Op) } @@ -630,12 +592,12 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case token.SHL, token.SHR: fallthrough case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: - return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e.OpPos) + return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e) case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ: - cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e.OpPos) + cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e) // The type of x==y may be UntypedBool. - return emitConv(fn, cmp, DefaultType(tv.Type)) + return emitConv(fn, cmp, types.Default(tv.Type), e) default: panic("illegal op in BinaryExpr: " + e.Op.String()) } @@ -667,9 +629,8 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { High: high, Max: max, } - v.setPos(e.Lbrack) v.setType(tv.Type) - return fn.emit(v) + return fn.emit(v, e) case *ast.Ident: obj := fn.Pkg.info.Uses[e] @@ -678,17 +639,17 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case *types.Builtin: return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)} case *types.Nil: - return nilConst(tv.Type) + return emitConst(fn, nilConst(tv.Type)) } // Package-level func or var? if v := fn.Prog.packageLevelValue(obj); v != nil { if _, ok := obj.(*types.Var); ok { - return emitLoad(fn, v) // var (address) + return emitLoad(fn, v, e) // var (address) } return v // (func) } // Local var. - return emitLoad(fn, fn.lookup(obj, false)) // var (address) + return emitLoad(fn, fn.lookup(obj, false), e) // var (address) case *ast.SelectorExpr: sel, ok := fn.Pkg.info.Selections[e] @@ -700,7 +661,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case types.MethodExpr: // (*T).f or T.f, the method f from the method-set of type T. // The result is a "thunk". - return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type) + return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type, e) case types.MethodVal: // e.f where e is an expression and f is a method. @@ -709,26 +670,26 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { rt := recvType(obj) wantAddr := isPointer(rt) escaping := true - v := b.receiver(fn, e.X, wantAddr, escaping, sel) + v := b.receiver(fn, e.X, wantAddr, escaping, sel, e) if isInterface(rt) { // If v has interface type I, // we must emit a check that v is non-nil. // We use: typeassert v.(I). - emitTypeAssert(fn, v, rt, token.NoPos) + emitTypeAssert(fn, v, rt, e) } c := &MakeClosure{ Fn: makeBound(fn.Prog, obj), Bindings: []Value{v}, } - c.setPos(e.Sel.Pos()) + c.source = e.Sel c.setType(tv.Type) - return fn.emit(c) + return fn.emit(c, e) case types.FieldVal: indices := sel.Index() last := len(indices) - 1 v := b.expr(fn, e.X) - v = emitImplicitSelections(fn, v, indices[:last]) + v = emitImplicitSelections(fn, v, indices[:last], e) v = emitFieldSelection(fn, v, indices[last], false, e.Sel) return v } @@ -741,36 +702,33 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { // Non-addressable array (in a register). v := &Index{ X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), tInt), + Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index), } - v.setPos(e.Lbrack) v.setType(t.Elem()) - return fn.emit(v) + return fn.emit(v, e) case *types.Map: // Maps are not addressable. mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map) - v := &Lookup{ + v := &MapLookup{ X: b.expr(fn, e.X), - Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key()), + Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e.Index), } - v.setPos(e.Lbrack) v.setType(mapt.Elem()) - return fn.emit(v) + return fn.emit(v, e) case *types.Basic: // => string // Strings are not addressable. - v := &Lookup{ + v := &StringLookup{ X: b.expr(fn, e.X), Index: b.expr(fn, e.Index), } - v.setPos(e.Lbrack) v.setType(tByte) - return fn.emit(v) + return fn.emit(v, e) case *types.Slice, *types.Pointer: // *array // Addressable slice/array; use IndexAddr and Load. - return b.addr(fn, e, false).load(fn) + return b.addr(fn, e, false).load(fn, e) default: panic("unexpected container type in IndexExpr: " + t.String()) @@ -778,7 +736,7 @@ func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value { case *ast.CompositeLit, *ast.StarExpr: // Addressable types (lvalues) - return b.addr(fn, e, false).load(fn) + return b.addr(fn, e, false).load(fn, e) } panic(fmt.Sprintf("unexpected expr: %T", e)) @@ -802,7 +760,7 @@ func (b *builder) stmtList(fn *Function, list []ast.Stmt) { // // escaping is defined as per builder.addr(). // -func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection) Value { +func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection, source ast.Node) Value { var v Value if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) { v = b.addr(fn, e, escaping).address(fn) @@ -811,9 +769,9 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se } last := len(sel.Index()) - 1 - v = emitImplicitSelections(fn, v, sel.Index()[:last]) + v = emitImplicitSelections(fn, v, sel.Index()[:last], source) if !wantAddr && isPointer(v.Type()) { - v = emitLoad(fn, v) + v = emitLoad(fn, v, e) } return v } @@ -823,8 +781,6 @@ func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, se // occurring in e. // func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { - c.pos = e.Lparen - // Is this a method call? if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok { sel, ok := fn.Pkg.info.Selections[selector] @@ -833,7 +789,7 @@ func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) { recv := recvType(obj) wantAddr := isPointer(recv) escaping := true - v := b.receiver(fn, selector.X, wantAddr, escaping, sel) + v := b.receiver(fn, selector.X, wantAddr, escaping, sel, selector) if isInterface(recv) { // Invoke-mode call. c.Value = v @@ -889,7 +845,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx // f(x, y, z...): pass slice z straight through. if e.Ellipsis != 0 { for i, arg := range e.Args { - v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type()) + v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type(), arg) args = append(args, v) } return args @@ -906,7 +862,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx v := b.expr(fn, arg) if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain for i, n := 0, ttuple.Len(); i < n; i++ { - args = append(args, emitExtract(fn, v, i)) + args = append(args, emitExtract(fn, v, i, arg)) } } else { args = append(args, v) @@ -919,7 +875,7 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx np-- } for i := 0; i < np; i++ { - args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type()) + args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type(), args[offset+i].Source()) } // Actual->formal assignability conversions for variadic parameter, @@ -929,25 +885,24 @@ func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallEx st := sig.Params().At(np).Type().(*types.Slice) vt := st.Elem() if len(varargs) == 0 { - args = append(args, nilConst(st)) + args = append(args, emitConst(fn, nilConst(st))) } else { // Replace a suffix of args with a slice containing it. at := types.NewArray(vt, int64(len(varargs))) - a := emitNew(fn, at, token.NoPos) - a.setPos(e.Rparen) - a.Comment = "varargs" + a := emitNew(fn, at, e) + a.source = e for i, arg := range varargs { iaddr := &IndexAddr{ X: a, - Index: intConst(int64(i)), + Index: emitConst(fn, intConst(int64(i))), } iaddr.setType(types.NewPointer(vt)) - fn.emit(iaddr) - emitStore(fn, iaddr, arg, arg.Pos()) + fn.emit(iaddr, e) + emitStore(fn, iaddr, arg, arg.Source()) } s := &Slice{X: a} s.setType(st) - args[offset+np] = fn.emit(s) + args[offset+np] = fn.emit(s, args[offset+np].Source()) args = args[:offset+np+1] } } @@ -970,9 +925,9 @@ func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) { } // assignOp emits to fn code to perform loc = val. -func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, pos token.Pos) { - oldv := loc.load(fn) - loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type()), loc.typ(), pos)) +func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, source ast.Node) { + oldv := loc.load(fn, source) + loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type(), source), loc.typ(), source), source) } // localValueSpec emits to fn code to define all of the vars in the @@ -988,7 +943,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { fn.addLocalForIdent(id) } lval := b.addr(fn, id, false) // non-escaping - b.assign(fn, lval, spec.Values[i], true, nil) + b.assign(fn, lval, spec.Values[i], true, nil, spec) } case len(spec.Values) == 0: @@ -1010,7 +965,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { if !isBlankIdent(id) { fn.addLocalForIdent(id) lhs := b.addr(fn, id, false) // non-escaping - lhs.store(fn, emitExtract(fn, tuple, i)) + lhs.store(fn, emitExtract(fn, tuple, i, id), id) } } } @@ -1021,7 +976,7 @@ func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) { // // Note the similarity with localValueSpec. // -func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { +func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool, source ast.Node) { // Side effects of all LHSs and RHSs must occur in left-to-right order. lvals := make([]lvalue, len(lhss)) isZero := make([]bool, len(lhss)) @@ -1030,7 +985,7 @@ func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { if !isBlankIdent(lhs) { if isDef { if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil { - fn.addNamedLocal(obj) + fn.addNamedLocal(obj, lhs) isZero[i] = true } } @@ -1047,7 +1002,7 @@ func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { // so we need a storebuf. var sb storebuf for i := range rhss { - b.assign(fn, lvals[i], rhss[i], isZero[i], &sb) + b.assign(fn, lvals[i], rhss[i], isZero[i], &sb, source) } sb.emit(fn) } else { @@ -1055,7 +1010,7 @@ func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool) { tuple := b.exprN(fn, rhss[0]) emitDebugRef(fn, rhss[0], tuple, false) for i, lval := range lvals { - lval.store(fn, emitExtract(fn, tuple, i)) + lval.store(fn, emitExtract(fn, tuple, i, source), source) } } } @@ -1102,20 +1057,17 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero case *types.Struct: if !isZero && len(e.Elts) != t.NumFields() { // memclear - sb.store(&address{addr, e.Lbrace, nil}, - zeroValue(fn, deref(addr.Type()))) + sb.store(&address{addr, nil}, zeroValue(fn, deref(addr.Type()), e), e) isZero = true } for i, e := range e.Elts { fieldIndex := i - pos := e.Pos() if kv, ok := e.(*ast.KeyValueExpr); ok { fname := kv.Key.(*ast.Ident).Name for i, n := 0, t.NumFields(); i < n; i++ { sf := t.Field(i) if sf.Name() == fname { fieldIndex = i - pos = kv.Colon e = kv.Value break } @@ -1127,8 +1079,8 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero Field: fieldIndex, } faddr.setType(types.NewPointer(sf.Type())) - fn.emit(faddr) - b.assign(fn, &address{addr: faddr, pos: pos, expr: e}, e, isZero, sb) + fn.emit(faddr, e) + b.assign(fn, &address{addr: faddr, expr: e}, e, isZero, sb, e) } case *types.Array, *types.Slice: @@ -1137,8 +1089,7 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero switch t := t.(type) { case *types.Slice: at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts)) - alloc := emitNew(fn, at, e.Lbrace) - alloc.Comment = "slicelit" + alloc := emitNew(fn, at, e) array = alloc case *types.Array: at = t @@ -1146,51 +1097,46 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero if !isZero && int64(len(e.Elts)) != at.Len() { // memclear - sb.store(&address{array, e.Lbrace, nil}, - zeroValue(fn, deref(array.Type()))) + sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e) } } var idx *Const for _, e := range e.Elts { - pos := e.Pos() if kv, ok := e.(*ast.KeyValueExpr); ok { idx = b.expr(fn, kv.Key).(*Const) - pos = kv.Colon e = kv.Value } else { var idxval int64 if idx != nil { idxval = idx.Int64() + 1 } - idx = intConst(idxval) + idx = emitConst(fn, intConst(idxval)) } iaddr := &IndexAddr{ X: array, Index: idx, } iaddr.setType(types.NewPointer(at.Elem())) - fn.emit(iaddr) + fn.emit(iaddr, e) if t != at { // slice // backing array is unaliased => storebuf not needed. - b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, nil) + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e) } else { - b.assign(fn, &address{addr: iaddr, pos: pos, expr: e}, e, true, sb) + b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e) } } if t != at { // slice s := &Slice{X: array} - s.setPos(e.Lbrace) s.setType(typ) - sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, fn.emit(s)) + sb.store(&address{addr: addr, expr: e}, fn.emit(s, e), e) } case *types.Map: - m := &MakeMap{Reserve: intConst(int64(len(e.Elts)))} - m.setPos(e.Lbrace) + m := &MakeMap{Reserve: emitConst(fn, intConst(int64(len(e.Elts))))} m.setType(typ) - fn.emit(m) + fn.emit(m, e) for _, e := range e.Elts { e := e.(*ast.KeyValueExpr) @@ -1211,10 +1157,9 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero } loc := element{ - m: m, - k: emitConv(fn, key, t.Key()), - t: t.Elem(), - pos: e.Colon, + m: m, + k: emitConv(fn, key, t.Key(), e), + t: t.Elem(), } // We call assign() only because it takes care @@ -1223,29 +1168,142 @@ func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero // map[int]*struct{}{0: {}} implies &struct{}{}. // In-place update is of course impossible, // and no storebuf is needed. - b.assign(fn, &loc, e.Value, true, nil) + b.assign(fn, &loc, e.Value, true, nil, e) } - sb.store(&address{addr: addr, pos: e.Lbrace, expr: e}, m) + sb.store(&address{addr: addr, expr: e}, m, e) default: panic("unexpected CompositeLit type: " + t.String()) } } +func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { + if s.Tag == nil { + b.switchStmtDynamic(fn, s, label) + return + } + dynamic := false + for _, iclause := range s.Body.List { + clause := iclause.(*ast.CaseClause) + for _, cond := range clause.List { + if fn.Pkg.info.Types[unparen(cond)].Value == nil { + dynamic = true + break + } + } + } + + if dynamic { + b.switchStmtDynamic(fn, s, label) + return + } + + if s.Init != nil { + b.stmt(fn, s.Init) + } + + entry := fn.currentBlock + tag := b.expr(fn, s.Tag) + + heads := make([]*BasicBlock, 0, len(s.Body.List)) + bodies := make([]*BasicBlock, len(s.Body.List)) + conds := make([]Value, 0, len(s.Body.List)) + + hasDefault := false + done := fn.newBasicBlock(fmt.Sprintf("switch.done")) + if label != nil { + label._break = done + } + for i, stmt := range s.Body.List { + body := fn.newBasicBlock(fmt.Sprintf("switch.body.%d", i)) + bodies[i] = body + cas := stmt.(*ast.CaseClause) + if cas.List == nil { + // default branch + hasDefault = true + head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d", i)) + conds = append(conds, nil) + heads = append(heads, head) + fn.currentBlock = head + emitJump(fn, body, cas) + } + for j, cond := range stmt.(*ast.CaseClause).List { + fn.currentBlock = entry + head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d.%d", i, j)) + conds = append(conds, b.expr(fn, cond)) + heads = append(heads, head) + fn.currentBlock = head + emitJump(fn, body, cond) + } + } + + for i, stmt := range s.Body.List { + clause := stmt.(*ast.CaseClause) + body := bodies[i] + fn.currentBlock = body + fallthru := done + if i+1 < len(bodies) { + fallthru = bodies[i+1] + } + fn.targets = &targets{ + tail: fn.targets, + _break: done, + _fallthrough: fallthru, + } + b.stmtList(fn, clause.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, stmt) + } + + if !hasDefault { + head := fn.newBasicBlock(fmt.Sprintf("switch.head.implicit-default")) + body := fn.newBasicBlock("switch.body.implicit-default") + fn.currentBlock = head + emitJump(fn, body, s) + fn.currentBlock = body + emitJump(fn, done, s) + heads = append(heads, head) + conds = append(conds, nil) + } + + if len(heads) != len(conds) { + panic(fmt.Sprintf("internal error: %d heads for %d conds", len(heads), len(conds))) + } + for _, head := range heads { + addEdge(entry, head) + } + fn.currentBlock = entry + entry.emit(&ConstantSwitch{ + Tag: tag, + Conds: conds, + }, s) + fn.currentBlock = done +} + // switchStmt emits to fn code for the switch statement s, optionally // labelled by label. // -func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { +func (b *builder) switchStmtDynamic(fn *Function, s *ast.SwitchStmt, label *lblock) { // We treat SwitchStmt like a sequential if-else chain. - // Multiway dispatch can be recovered later by ssautil.Switches() + // Multiway dispatch can be recovered later by irutil.Switches() // to those cases that are free of side effects. if s.Init != nil { b.stmt(fn, s.Init) } - var tag Value = vTrue + kTrue := emitConst(fn, NewConst(constant.MakeBool(true), tBool)) + + var tagv Value = kTrue + var tagSource ast.Node = s if s.Tag != nil { - tag = b.expr(fn, s.Tag) + tagv = b.expr(fn, s.Tag) + tagSource = s.Tag } + // lifting only considers loads and stores, but we want different + // sigma nodes for the different comparisons. use a temporary and + // load it in every branch. + tag := fn.addLocal(tagv.Type(), tagSource) + emitStore(fn, tag, tagv, tagSource) + done := fn.newBasicBlock("switch.done") if label != nil { label._break = done @@ -1283,13 +1341,23 @@ func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { var nextCond *BasicBlock for _, cond := range cc.List { nextCond = fn.newBasicBlock("switch.next") - // TODO(adonovan): opt: when tag==vTrue, we'd - // get better code if we use b.cond(cond) - // instead of BinOp(EQL, tag, b.expr(cond)) - // followed by If. Don't forget conversions - // though. - cond := emitCompare(fn, token.EQL, tag, b.expr(fn, cond), cond.Pos()) - emitIf(fn, cond, body, nextCond) + if tagv == kTrue { + // emit a proper if/else chain instead of a comparison + // of a value against true. + // + // NOTE(dh): adonovan had a todo saying "don't forget + // conversions though". As far as I can tell, there + // aren't any conversions that we need to take care of + // here. `case bool(a) && bool(b)` as well as `case + // bool(a && b)` are being taken care of by b.cond, + // and `case a` where a is not of type bool is + // invalid. + b.cond(fn, cond, body, nextCond) + } else { + cond := emitCompare(fn, token.EQL, emitLoad(fn, tag, cond), b.expr(fn, cond), cond) + emitIf(fn, cond, body, nextCond, cond.Source()) + } + fn.currentBlock = nextCond } fn.currentBlock = body @@ -1300,11 +1368,14 @@ func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { } b.stmtList(fn, cc.Body) fn.targets = fn.targets.tail - emitJump(fn, done) + emitJump(fn, done, s) fn.currentBlock = nextCond } if dfltBlock != nil { - emitJump(fn, dfltBlock) + // The lack of a Source for the jump doesn't matter, block + // fusing will get rid of the jump later. + + emitJump(fn, dfltBlock, s) fn.currentBlock = dfltBlock fn.targets = &targets{ tail: fn.targets, @@ -1314,138 +1385,175 @@ func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) { b.stmtList(fn, *dfltBody) fn.targets = fn.targets.tail } - emitJump(fn, done) + emitJump(fn, done, s) fn.currentBlock = done } -// typeSwitchStmt emits to fn code for the type switch statement s, optionally -// labelled by label. -// func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) { - // We treat TypeSwitchStmt like a sequential if-else chain. - // Multiway dispatch can be recovered later by ssautil.Switches(). - - // Typeswitch lowering: - // - // var x X - // switch y := x.(type) { - // case T1, T2: S1 // >1 (y := x) - // case nil: SN // nil (y := x) - // default: SD // 0 types (y := x) - // case T3: S3 // 1 type (y := x.(T3)) - // } - // - // ...s.Init... - // x := eval x - // .caseT1: - // t1, ok1 := typeswitch,ok x - // if ok1 then goto S1 else goto .caseT2 - // .caseT2: - // t2, ok2 := typeswitch,ok x - // if ok2 then goto S1 else goto .caseNil - // .S1: - // y := x - // ...S1... - // goto done - // .caseNil: - // if t2, ok2 := typeswitch,ok x - // if x == nil then goto SN else goto .caseT3 - // .SN: - // y := x - // ...SN... - // goto done - // .caseT3: - // t3, ok3 := typeswitch,ok x - // if ok3 then goto S3 else goto default - // .S3: - // y := t3 - // ...S3... - // goto done - // .default: - // y := x - // ...SD... - // goto done - // .done: - if s.Init != nil { b.stmt(fn, s.Init) } - var x Value - switch ass := s.Assign.(type) { + var tag Value + switch e := s.Assign.(type) { case *ast.ExprStmt: // x.(type) - x = b.expr(fn, unparen(ass.X).(*ast.TypeAssertExpr).X) + tag = b.expr(fn, unparen(e.X).(*ast.TypeAssertExpr).X) case *ast.AssignStmt: // y := x.(type) - x = b.expr(fn, unparen(ass.Rhs[0]).(*ast.TypeAssertExpr).X) + tag = b.expr(fn, unparen(e.Rhs[0]).(*ast.TypeAssertExpr).X) + default: + panic("unreachable") } + tagPtr := fn.addLocal(tag.Type(), tag.Source()) + emitStore(fn, tagPtr, tag, tag.Source()) - done := fn.newBasicBlock("typeswitch.done") + // +1 in case there's no explicit default case + heads := make([]*BasicBlock, 0, len(s.Body.List)+1) + + entry := fn.currentBlock + done := fn.newBasicBlock("done") if label != nil { label._break = done } + + // set up type switch and constant switch, populate their conditions + tswtch := &TypeSwitch{ + Tag: emitLoad(fn, tagPtr, tag.Source()), + Conds: make([]types.Type, 0, len(s.Body.List)+1), + } + cswtch := &ConstantSwitch{ + Conds: make([]Value, 0, len(s.Body.List)+1), + } + + rets := make([]types.Type, 0, len(s.Body.List)+1) + index := 0 var default_ *ast.CaseClause for _, clause := range s.Body.List { cc := clause.(*ast.CaseClause) + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + fn.addNamedLocal(obj, cc) + } if cc.List == nil { + // default case default_ = cc + } else { + for _, expr := range cc.List { + tswtch.Conds = append(tswtch.Conds, fn.Pkg.typeOf(expr)) + cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(index)))) + index++ + } + if len(cc.List) == 1 { + rets = append(rets, fn.Pkg.typeOf(cc.List[0])) + } else { + for range cc.List { + rets = append(rets, tag.Type()) + } + } + } + } + + // default branch + rets = append(rets, tag.Type()) + + var vars []*types.Var + vars = append(vars, varIndex) + for _, typ := range rets { + vars = append(vars, anonVar(typ)) + } + tswtch.setType(types.NewTuple(vars...)) + // default branch + fn.currentBlock = entry + fn.emit(tswtch, s) + cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(-1)))) + // in theory we should add a local and stores/loads for tswtch, to + // generate sigma nodes in the branches. however, there isn't any + // useful information we could possibly attach to it. + cswtch.Tag = emitExtract(fn, tswtch, 0, s) + fn.emit(cswtch, s) + + // build heads and bodies + index = 0 + for _, clause := range s.Body.List { + cc := clause.(*ast.CaseClause) + if cc.List == nil { continue } + body := fn.newBasicBlock("typeswitch.body") - var next *BasicBlock - var casetype types.Type - var ti Value // ti, ok := typeassert,ok x - for _, cond := range cc.List { - next = fn.newBasicBlock("typeswitch.next") - casetype = fn.Pkg.typeOf(cond) - var condv Value - if casetype == tUntypedNil { - condv = emitCompare(fn, token.EQL, x, nilConst(x.Type()), token.NoPos) - ti = x - } else { - yok := emitTypeTest(fn, x, casetype, cc.Case) - ti = emitExtract(fn, yok, 0) - condv = emitExtract(fn, yok, 1) + for _, expr := range cc.List { + head := fn.newBasicBlock("typeswitch.head") + heads = append(heads, head) + fn.currentBlock = head + + if obj := fn.Pkg.info.Implicits[cc]; obj != nil { + // In a switch y := x.(type), each case clause + // implicitly declares a distinct object y. + // In a single-type case, y has that type. + // In multi-type cases, 'case nil' and default, + // y has the same type as the interface operand. + + l := fn.objects[obj] + if rets[index] == tUntypedNil { + emitStore(fn, l, emitConst(fn, nilConst(tswtch.Tag.Type())), s.Assign) + } else { + x := emitExtract(fn, tswtch, index+1, s.Assign) + emitStore(fn, l, x, nil) + } } - emitIf(fn, condv, body, next) - fn.currentBlock = next - } - if len(cc.List) != 1 { - ti = x + + emitJump(fn, body, expr) + index++ } fn.currentBlock = body - b.typeCaseBody(fn, cc, ti, done) - fn.currentBlock = next + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, cc.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, clause) } - if default_ != nil { - b.typeCaseBody(fn, default_, x, done) + + if default_ == nil { + // implicit default + heads = append(heads, done) } else { - emitJump(fn, done) + body := fn.newBasicBlock("typeswitch.default") + heads = append(heads, body) + fn.currentBlock = body + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + if obj := fn.Pkg.info.Implicits[default_]; obj != nil { + l := fn.objects[obj] + x := emitExtract(fn, tswtch, index+1, s.Assign) + emitStore(fn, l, x, s) + } + b.stmtList(fn, default_.Body) + fn.targets = fn.targets.tail + emitJump(fn, done, s) } - fn.currentBlock = done -} -func (b *builder) typeCaseBody(fn *Function, cc *ast.CaseClause, x Value, done *BasicBlock) { - if obj := fn.Pkg.info.Implicits[cc]; obj != nil { - // In a switch y := x.(type), each case clause - // implicitly declares a distinct object y. - // In a single-type case, y has that type. - // In multi-type cases, 'case nil' and default, - // y has the same type as the interface operand. - emitStore(fn, fn.addNamedLocal(obj), x, obj.Pos()) - } - fn.targets = &targets{ - tail: fn.targets, - _break: done, + fn.currentBlock = entry + for _, head := range heads { + addEdge(entry, head) } - b.stmtList(fn, cc.Body) - fn.targets = fn.targets.tail - emitJump(fn, done) + fn.currentBlock = done } // selectStmt emits to fn code for the select statement s, optionally // labelled by label. // -func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { +func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (noreturn bool) { + if len(s.Body.List) == 0 { + instr := &Select{Blocking: true} + instr.setType(types.NewTuple(varIndex, varOk)) + fn.emit(instr, s) + fn.emit(new(Unreachable), s) + addEdge(fn.currentBlock, fn.Exit) + return true + } + // A blocking select of a single case degenerates to a // simple send or receive. // TODO(adonovan): opt: is this optimization worth its weight? @@ -1463,9 +1571,9 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { } b.stmtList(fn, clause.Body) fn.targets = fn.targets.tail - emitJump(fn, done) + emitJump(fn, done, clause) fn.currentBlock = done - return + return false } } @@ -1487,7 +1595,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { Dir: types.SendOnly, Chan: ch, Send: emitConv(fn, b.expr(fn, comm.Value), - ch.Type().Underlying().(*types.Chan).Elem()), + ch.Type().Underlying().(*types.Chan).Elem(), comm), Pos: comm.Arrow, } if debugInfo { @@ -1520,22 +1628,12 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { } // We dispatch on the (fair) result of Select using a - // sequential if-else chain, in effect: - // - // idx, recvOk, r0...r_n-1 := select(...) - // if idx == 0 { // receive on channel 0 (first receive => r0) - // x, ok := r0, recvOk - // ...state0... - // } else if v == 1 { // send on channel 1 - // ...state1... - // } else { - // ...default... - // } + // switch on the returned index. sel := &Select{ States: states, Blocking: blocking, } - sel.setPos(s.Select) + sel.source = s var vars []*types.Var vars = append(vars, varIndex, varOk) for _, st := range states { @@ -1545,28 +1643,45 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { } } sel.setType(types.NewTuple(vars...)) - - fn.emit(sel) - idx := emitExtract(fn, sel, 0) + fn.emit(sel, s) + idx := emitExtract(fn, sel, 0, s) done := fn.newBasicBlock("select.done") if label != nil { label._break = done } - var defaultBody *[]ast.Stmt + entry := fn.currentBlock + swtch := &ConstantSwitch{ + Tag: idx, + // one condition per case + Conds: make([]Value, 0, len(s.Body.List)+1), + } + // note that we don't need heads; a select case can only have a single condition + var bodies []*BasicBlock + state := 0 r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV for _, cc := range s.Body.List { clause := cc.(*ast.CommClause) if clause.Comm == nil { - defaultBody = &clause.Body + body := fn.newBasicBlock("select.default") + fn.currentBlock = body + bodies = append(bodies, body) + fn.targets = &targets{ + tail: fn.targets, + _break: done, + } + b.stmtList(fn, clause.Body) + emitJump(fn, done, s) + fn.targets = fn.targets.tail + swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(-1))) continue } + swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(int64(state)))) body := fn.newBasicBlock("select.body") - next := fn.newBasicBlock("select.next") - emitIf(fn, emitCompare(fn, token.EQL, idx, intConst(int64(state)), token.NoPos), body, next) fn.currentBlock = body + bodies = append(bodies, body) fn.targets = &targets{ tail: fn.targets, _break: done, @@ -1574,7 +1689,7 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { switch comm := clause.Comm.(type) { case *ast.ExprStmt: // <-ch if debugInfo { - v := emitExtract(fn, sel, r) + v := emitExtract(fn, sel, r, comm) emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) } r++ @@ -1584,44 +1699,33 @@ func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) { fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident)) } x := b.addr(fn, comm.Lhs[0], false) // non-escaping - v := emitExtract(fn, sel, r) + v := emitExtract(fn, sel, r, comm) if debugInfo { emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false) } - x.store(fn, v) + x.store(fn, v, comm) if len(comm.Lhs) == 2 { // x, ok := ... if comm.Tok == token.DEFINE { fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident)) } ok := b.addr(fn, comm.Lhs[1], false) // non-escaping - ok.store(fn, emitExtract(fn, sel, 1)) + ok.store(fn, emitExtract(fn, sel, 1, comm), comm) } r++ } b.stmtList(fn, clause.Body) fn.targets = fn.targets.tail - emitJump(fn, done) - fn.currentBlock = next + emitJump(fn, done, s) state++ } - if defaultBody != nil { - fn.targets = &targets{ - tail: fn.targets, - _break: done, - } - b.stmtList(fn, *defaultBody) - fn.targets = fn.targets.tail - } else { - // A blocking select must match some case. - // (This should really be a runtime.errorString, not a string.) - fn.emit(&Panic{ - X: emitConv(fn, stringConst("blocking select matched no case"), tEface), - }) - fn.currentBlock = fn.newBasicBlock("unreachable") + fn.currentBlock = entry + fn.emit(swtch, s) + for _, body := range bodies { + addEdge(entry, body) } - emitJump(fn, done) fn.currentBlock = done + return false } // forStmt emits to fn code for the for statement s, optionally @@ -1656,7 +1760,7 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { label._break = done label._continue = cont } - emitJump(fn, loop) + emitJump(fn, loop, s) fn.currentBlock = loop if loop != body { b.cond(fn, s.Cond, body, done) @@ -1669,12 +1773,12 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { } b.stmt(fn, s.Body) fn.targets = fn.targets.tail - emitJump(fn, cont) + emitJump(fn, cont, s) if s.Post != nil { fn.currentBlock = cont b.stmt(fn, s.Post) - emitJump(fn, loop) // back-edge + emitJump(fn, loop, s) // back-edge } fn.currentBlock = done } @@ -1684,7 +1788,7 @@ func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) { // The v result is defined only if tv is non-nil. // forPos is the position of the "for" token. // -func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { +func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { // // length = len(x) // index = -1 @@ -1707,37 +1811,37 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // elimination if x is pure, static unrolling, etc. // Ranging over a nil *array may have >0 iterations. // We still generate code for x, in case it has effects. - length = intConst(arr.Len()) + length = emitConst(fn, intConst(arr.Len())) } else { // length = len(x). var c Call c.Call.Value = makeLen(x.Type()) c.Call.Args = []Value{x} c.setType(tInt) - length = fn.emit(&c) + length = fn.emit(&c, source) } - index := fn.addLocal(tInt, token.NoPos) - emitStore(fn, index, intConst(-1), pos) + index := fn.addLocal(tInt, source) + emitStore(fn, index, emitConst(fn, intConst(-1)), source) loop = fn.newBasicBlock("rangeindex.loop") - emitJump(fn, loop) + emitJump(fn, loop, source) fn.currentBlock = loop incr := &BinOp{ Op: token.ADD, - X: emitLoad(fn, index), - Y: vOne, + X: emitLoad(fn, index, source), + Y: emitConst(fn, intConst(1)), } incr.setType(tInt) - emitStore(fn, index, fn.emit(incr), pos) + emitStore(fn, index, fn.emit(incr, source), source) body := fn.newBasicBlock("rangeindex.body") done = fn.newBasicBlock("rangeindex.done") - emitIf(fn, emitCompare(fn, token.LSS, incr, length, token.NoPos), body, done) + emitIf(fn, emitCompare(fn, token.LSS, incr, length, source), body, done, source) fn.currentBlock = body - k = emitLoad(fn, index) + k = emitLoad(fn, index, source) if tv != nil { switch t := x.Type().Underlying().(type) { case *types.Array: @@ -1746,7 +1850,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P Index: k, } instr.setType(t.Elem()) - v = fn.emit(instr) + v = fn.emit(instr, source) case *types.Pointer: // *array instr := &IndexAddr{ @@ -1754,7 +1858,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P Index: k, } instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())) - v = emitLoad(fn, fn.emit(instr)) + v = emitLoad(fn, fn.emit(instr, source), source) case *types.Slice: instr := &IndexAddr{ @@ -1762,7 +1866,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P Index: k, } instr.setType(types.NewPointer(t.Elem())) - v = emitLoad(fn, fn.emit(instr)) + v = emitLoad(fn, fn.emit(instr, source), source) default: panic("rangeIndexed x:" + t.String()) @@ -1776,7 +1880,7 @@ func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, pos token.P // tk and tv are the types of the key/value results k and v, or nil // if the respective component is not wanted. // -func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token.Pos) (k, v Value, loop, done *BasicBlock) { +func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) { // // it = range x // loop: (target of continue) @@ -1799,12 +1903,11 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token. } rng := &Range{X: x} - rng.setPos(pos) rng.setType(tRangeIter) - it := fn.emit(rng) + it := fn.emit(rng, source) loop = fn.newBasicBlock("rangeiter.loop") - emitJump(fn, loop) + emitJump(fn, loop, source) fn.currentBlock = loop _, isString := x.Type().Underlying().(*types.Basic) @@ -1818,18 +1921,18 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token. newVar("k", tk), newVar("v", tv), )) - fn.emit(okv) + fn.emit(okv, source) body := fn.newBasicBlock("rangeiter.body") done = fn.newBasicBlock("rangeiter.done") - emitIf(fn, emitExtract(fn, okv, 0), body, done) + emitIf(fn, emitExtract(fn, okv, 0, source), body, done, source) fn.currentBlock = body if tk != tInvalid { - k = emitExtract(fn, okv, 1) + k = emitExtract(fn, okv, 1, source) } if tv != tInvalid { - v = emitExtract(fn, okv, 2) + v = emitExtract(fn, okv, 2, source) } return } @@ -1840,7 +1943,7 @@ func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, pos token. // not wanted // pos is the position of the '=' or ':=' token. // -func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) (k Value, loop, done *BasicBlock) { +func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, source ast.Node) (k Value, loop, done *BasicBlock) { // // loop: (target of continue) // ko = <-x (key, ok) @@ -1853,25 +1956,15 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) // done: (target of break) loop = fn.newBasicBlock("rangechan.loop") - emitJump(fn, loop) + emitJump(fn, loop, source) fn.currentBlock = loop - recv := &UnOp{ - Op: token.ARROW, - X: x, - CommaOk: true, - } - recv.setPos(pos) - recv.setType(types.NewTuple( - newVar("k", x.Type().Underlying().(*types.Chan).Elem()), - varOk, - )) - ko := fn.emit(recv) + retv := emitRecv(fn, x, true, types.NewTuple(newVar("k", x.Type().Underlying().(*types.Chan).Elem()), varOk), source) body := fn.newBasicBlock("rangechan.body") done = fn.newBasicBlock("rangechan.done") - emitIf(fn, emitExtract(fn, ko, 1), body, done) + emitIf(fn, emitExtract(fn, retv, 1, source), body, done, source) fn.currentBlock = body if tk != nil { - k = emitExtract(fn, ko, 0) + k = emitExtract(fn, retv, 0, source) } return } @@ -1879,7 +1972,7 @@ func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, pos token.Pos) // rangeStmt emits to fn code for the range statement s, optionally // labelled by label. // -func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { +func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock, source ast.Node) { var tk, tv types.Type if s.Key != nil && !isBlankIdent(s.Key) { tk = fn.Pkg.typeOf(s.Key) @@ -1909,13 +2002,13 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { var loop, done *BasicBlock switch rt := x.Type().Underlying().(type) { case *types.Slice, *types.Array, *types.Pointer: // *array - k, v, loop, done = b.rangeIndexed(fn, x, tv, s.For) + k, v, loop, done = b.rangeIndexed(fn, x, tv, source) case *types.Chan: - k, loop, done = b.rangeChan(fn, x, tk, s.For) + k, loop, done = b.rangeChan(fn, x, tk, source) case *types.Map, *types.Basic: // string - k, v, loop, done = b.rangeIter(fn, x, tk, tv, s.For) + k, v, loop, done = b.rangeIter(fn, x, tk, tv, source) default: panic("Cannot range over: " + rt.String()) @@ -1930,10 +2023,10 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { vl = b.addr(fn, s.Value, false) // non-escaping } if tk != nil { - kl.store(fn, k) + kl.store(fn, k, s) } if tv != nil { - vl.store(fn, v) + vl.store(fn, v, s) } if label != nil { @@ -1948,11 +2041,11 @@ func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock) { } b.stmt(fn, s.Body) fn.targets = fn.targets.tail - emitJump(fn, loop) // back-edge + emitJump(fn, loop, source) // back-edge fn.currentBlock = done } -// stmt lowers statement s to SSA form, emitting code to fn. +// stmt lowers statement s to IR form, emitting code to fn. func (b *builder) stmt(fn *Function, _s ast.Stmt) { // The label of the current statement. If non-nil, its _goto // target is always set; its _break and _continue are set only @@ -1976,7 +2069,7 @@ start: case *ast.LabeledStmt: label = fn.labelledBlock(s.Label) - emitJump(fn, label._goto) + emitJump(fn, label._goto, s) fn.currentBlock = label._goto _s = s.Stmt goto start // effectively: tailcall stmt(fn, s.Stmt, label) @@ -1985,12 +2078,12 @@ start: b.expr(fn, s.X) case *ast.SendStmt: - fn.emit(&Send{ + instr := &Send{ Chan: b.expr(fn, s.Chan), X: emitConv(fn, b.expr(fn, s.Value), - fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem()), - pos: s.Arrow, - }) + fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem(), s), + } + fn.emit(instr, s) case *ast.IncDecStmt: op := token.ADD @@ -1998,37 +2091,37 @@ start: op = token.SUB } loc := b.addr(fn, s.X, false) - b.assignOp(fn, loc, NewConst(constant.MakeInt64(1), loc.typ()), op, s.Pos()) + b.assignOp(fn, loc, emitConst(fn, NewConst(constant.MakeInt64(1), loc.typ())), op, s) case *ast.AssignStmt: switch s.Tok { case token.ASSIGN, token.DEFINE: - b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE) + b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE, _s) default: // +=, etc. op := s.Tok + token.ADD - token.ADD_ASSIGN - b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s.Pos()) + b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s) } case *ast.GoStmt: // The "intrinsics" new/make/len/cap are forbidden here. // panic is treated like an ordinary function call. - v := Go{pos: s.Go} + v := Go{} b.setCall(fn, s.Call, &v.Call) - fn.emit(&v) + fn.emit(&v, s) case *ast.DeferStmt: // The "intrinsics" new/make/len/cap are forbidden here. // panic is treated like an ordinary function call. - v := Defer{pos: s.Defer} + v := Defer{} b.setCall(fn, s.Call, &v.Call) - fn.emit(&v) - - // A deferred call can cause recovery from panic, - // and control resumes at the Recover block. - createRecoverBlock(fn) + fn.hasDefer = true + fn.emit(&v, s) case *ast.ReturnStmt: + // TODO(dh): we could emit tigher position information by + // using the ith returned expression + var results []Value if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 { // Return of one expression in a multi-valued function. @@ -2036,34 +2129,23 @@ start: ttuple := tuple.Type().(*types.Tuple) for i, n := 0, ttuple.Len(); i < n; i++ { results = append(results, - emitConv(fn, emitExtract(fn, tuple, i), - fn.Signature.Results().At(i).Type())) + emitConv(fn, emitExtract(fn, tuple, i, s), + fn.Signature.Results().At(i).Type(), s)) } } else { // 1:1 return, or no-arg return in non-void function. for i, r := range s.Results { - v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type()) + v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type(), s) results = append(results, v) } } - if fn.namedResults != nil { - // Function has named result parameters (NRPs). - // Perform parallel assignment of return operands to NRPs. - for i, r := range results { - emitStore(fn, fn.namedResults[i], r, s.Return) - } - } - // Run function calls deferred in this - // function when explicitly returning from it. - fn.emit(new(RunDefers)) - if fn.namedResults != nil { - // Reload NRPs to form the result tuple. - results = results[:0] - for _, r := range fn.namedResults { - results = append(results, emitLoad(fn, r)) - } + + ret := fn.results() + for i, r := range results { + emitStore(fn, ret[i], r, s) } - fn.emit(&Return{Results: results, pos: s.Return}) + + emitJump(fn, fn.Exit, s) fn.currentBlock = fn.newBasicBlock("unreachable") case *ast.BranchStmt: @@ -2095,7 +2177,8 @@ start: case token.GOTO: block = fn.labelledBlock(s.Label)._goto } - emitJump(fn, block) + j := emitJump(fn, block, s) + j.Comment = s.Tok.String() fn.currentBlock = fn.newBasicBlock("unreachable") case *ast.BlockStmt: @@ -2111,15 +2194,16 @@ start: if s.Else != nil { els = fn.newBasicBlock("if.else") } - b.cond(fn, s.Cond, then, els) + instr := b.cond(fn, s.Cond, then, els) + instr.source = s fn.currentBlock = then b.stmt(fn, s.Body) - emitJump(fn, done) + emitJump(fn, done, s) if s.Else != nil { fn.currentBlock = els b.stmt(fn, s.Else) - emitJump(fn, done) + emitJump(fn, done, s) } fn.currentBlock = done @@ -2131,20 +2215,23 @@ start: b.typeSwitchStmt(fn, s, label) case *ast.SelectStmt: - b.selectStmt(fn, s, label) + if b.selectStmt(fn, s, label) { + // the select has no cases, it blocks forever + fn.currentBlock = fn.newBasicBlock("unreachable") + } case *ast.ForStmt: b.forStmt(fn, s, label) case *ast.RangeStmt: - b.rangeStmt(fn, s, label) + b.rangeStmt(fn, s, label, s) default: panic(fmt.Sprintf("unexpected statement kind: %T", s)) } } -// buildFunction builds SSA code for the body of function fn. Idempotent. +// buildFunction builds IR code for the body of function fn. Idempotent. func (b *builder) buildFunction(fn *Function) { if fn.Blocks != nil { return // building already started @@ -2153,7 +2240,7 @@ func (b *builder) buildFunction(fn *Function) { var recvField *ast.FieldList var body *ast.BlockStmt var functype *ast.FuncType - switch n := fn.syntax.(type) { + switch n := fn.source.(type) { case nil: return // not a Go source function. (Synthetic, or from object file.) case *ast.FuncDecl: @@ -2167,6 +2254,16 @@ func (b *builder) buildFunction(fn *Function) { panic(n) } + if fn.Package().Pkg.Path() == "syscall" && fn.Name() == "Exit" { + // syscall.Exit is a stub and the way os.Exit terminates the + // process. Note that there are other functions in the runtime + // that also terminate or unwind that we cannot analyze. + // However, they aren't stubs, so buildExits ends up getting + // called on them, so that's where we handle those special + // cases. + fn.WillExit = true + } + if body == nil { // External function. if fn.Params == nil { @@ -2178,22 +2275,26 @@ func (b *builder) buildFunction(fn *Function) { // We set Function.Params even though there is no body // code to reference them. This simplifies clients. if recv := fn.Signature.Recv(); recv != nil { - fn.addParamObj(recv) + // XXX synthesize an ast.Node + fn.addParamObj(recv, nil) } params := fn.Signature.Params() for i, n := 0, params.Len(); i < n; i++ { - fn.addParamObj(params.At(i)) + // XXX synthesize an ast.Node + fn.addParamObj(params.At(i), nil) } } return } if fn.Prog.mode&LogSource != 0 { - defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.pos))() + defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.Pos()))() } + fn.blocksets = b.blocksets fn.startBody() fn.createSyntacticParams(recvField, functype) + fn.exitBlock() b.stmt(fn, body) - if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb == fn.Recover || cb.Preds != nil) { + if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb.Preds != nil) { // Control fell off the end of the function's body block. // // Block optimizations eliminate the current block, if @@ -2201,13 +2302,20 @@ func (b *builder) buildFunction(fn *Function) { // if this no-arg return is ill-typed for // fn.Signature.Results, this block must be // unreachable. The sanity checker checks this. - fn.emit(new(RunDefers)) - fn.emit(new(Return)) - } + // fn.emit(new(RunDefers)) + // fn.emit(new(Return)) + emitJump(fn, fn.Exit, nil) + } + optimizeBlocks(fn) + buildFakeExits(fn) + b.buildExits(fn) + b.addUnreachables(fn) fn.finishBody() + b.blocksets = fn.blocksets + fn.functionBody = nil } -// buildFuncDecl builds SSA code for the function or method declared +// buildFuncDecl builds IR code for the function or method declared // by decl in package pkg. // func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { @@ -2220,13 +2328,13 @@ func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { var v Call v.Call.Value = fn v.setType(types.NewTuple()) - pkg.init.emit(&v) + pkg.init.emit(&v, decl) } + fn.source = decl b.buildFunction(fn) } // Build calls Package.Build for each package in prog. -// Building occurs in parallel unless the BuildSerially mode flag was set. // // Build is intended for whole-program analysis; a typical compiler // need only build a single package. @@ -2234,22 +2342,12 @@ func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) { // Build is idempotent and thread-safe. // func (prog *Program) Build() { - var wg sync.WaitGroup for _, p := range prog.packages { - if prog.mode&BuildSerially != 0 { - p.Build() - } else { - wg.Add(1) - go func(p *Package) { - p.Build() - wg.Done() - }(p) - } + p.Build() } - wg.Wait() } -// Build builds SSA code for all functions and vars in package p. +// Build builds IR code for all functions and vars in package p. // // Precondition: CreatePackage must have been called for all of p's // direct imports (and hence its direct imports must have been @@ -2277,33 +2375,33 @@ func (p *Package) build() { } init := p.init init.startBody() + init.exitBlock() var done *BasicBlock - if p.Prog.mode&BareInits == 0 { - // Make init() skip if package is already initialized. - initguard := p.Var("init$guard") - doinit := init.newBasicBlock("init.start") - done = init.newBasicBlock("init.done") - emitIf(init, emitLoad(init, initguard), done, doinit) - init.currentBlock = doinit - emitStore(init, initguard, vTrue, token.NoPos) + // Make init() skip if package is already initialized. + initguard := p.Var("init$guard") + doinit := init.newBasicBlock("init.start") + done = init.Exit + emitIf(init, emitLoad(init, initguard, nil), done, doinit, nil) + init.currentBlock = doinit + emitStore(init, initguard, emitConst(init, NewConst(constant.MakeBool(true), tBool)), nil) - // Call the init() function of each package we import. - for _, pkg := range p.Pkg.Imports() { - prereq := p.Prog.packages[pkg] - if prereq == nil { - panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) - } - var v Call - v.Call.Value = prereq.init - v.Call.pos = init.pos - v.setType(types.NewTuple()) - init.emit(&v) + // Call the init() function of each package we import. + for _, pkg := range p.Pkg.Imports() { + prereq := p.Prog.packages[pkg] + if prereq == nil { + panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path())) } + var v Call + v.Call.Value = prereq.init + v.setType(types.NewTuple()) + init.emit(&v, nil) } - var b builder + b := builder{ + printFunc: p.printFunc, + } // Initialize package-level vars in correct order. for _, varinit := range p.info.InitOrder { @@ -2315,11 +2413,12 @@ func (p *Package) build() { // 1:1 initialization: var x, y = a(), b() var lval lvalue if v := varinit.Lhs[0]; v.Name() != "_" { - lval = &address{addr: p.values[v].(*Global), pos: v.Pos()} + lval = &address{addr: p.values[v].(*Global)} } else { lval = blank{} } - b.assign(init, lval, varinit.Rhs, true, nil) + // TODO(dh): do emit position information + b.assign(init, lval, varinit.Rhs, true, nil, nil) } else { // n:1 initialization: var x, y := f() tuple := b.exprN(init, varinit.Rhs) @@ -2327,7 +2426,7 @@ func (p *Package) build() { if v.Name() == "_" { continue } - emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i), v.Pos()) + emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i, nil), nil) } } } @@ -2344,11 +2443,7 @@ func (p *Package) build() { } // Finish up init(). - if p.Prog.mode&BareInits == 0 { - emitJump(init, done) - init.currentBlock = done - } - init.emit(new(Return)) + emitJump(init, done, nil) init.finishBody() p.info = nil // We no longer need ASTs or go/types deductions. diff --git a/vendor/honnef.co/go/tools/ssa/const.go b/vendor/honnef.co/go/tools/ir/const.go similarity index 85% rename from vendor/honnef.co/go/tools/ssa/const.go rename to vendor/honnef.co/go/tools/ir/const.go index f95d9e114..7cdf006e8 100644 --- a/vendor/honnef.co/go/tools/ssa/const.go +++ b/vendor/honnef.co/go/tools/ir/const.go @@ -2,14 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines the Const SSA value type. import ( "fmt" "go/constant" - "go/token" "go/types" "strconv" ) @@ -18,7 +17,12 @@ import ( // val must be valid according to the specification of Const.Value. // func NewConst(val constant.Value, typ types.Type) *Const { - return &Const{typ, val} + return &Const{ + register: register{ + typ: typ, + }, + Value: val, + } } // intConst returns an 'int' constant that evaluates to i. @@ -71,43 +75,25 @@ func zeroConst(t types.Type) *Const { } func (c *Const) RelString(from *types.Package) string { - var s string + var p string if c.Value == nil { - s = "nil" + p = "nil" } else if c.Value.Kind() == constant.String { - s = constant.StringVal(c.Value) + v := constant.StringVal(c.Value) const max = 20 // TODO(adonovan): don't cut a rune in half. - if len(s) > max { - s = s[:max-3] + "..." // abbreviate + if len(v) > max { + v = v[:max-3] + "..." // abbreviate } - s = strconv.Quote(s) + p = strconv.Quote(v) } else { - s = c.Value.String() + p = c.Value.String() } - return s + ":" + relType(c.Type(), from) -} - -func (c *Const) Name() string { - return c.RelString(nil) + return fmt.Sprintf("Const <%s> {%s}", relType(c.Type(), from), p) } func (c *Const) String() string { - return c.Name() -} - -func (c *Const) Type() types.Type { - return c.typ -} - -func (c *Const) Referrers() *[]Instruction { - return nil -} - -func (c *Const) Parent() *Function { return nil } - -func (c *Const) Pos() token.Pos { - return token.NoPos + return c.RelString(c.Parent().pkg()) } // IsNil returns true if this constant represents a typed or untyped nil value. @@ -115,8 +101,6 @@ func (c *Const) IsNil() bool { return c.Value == nil } -// TODO(adonovan): move everything below into honnef.co/go/tools/ssa/interp. - // Int64 returns the numeric value of this constant truncated to fit // a signed 64-bit integer. // diff --git a/vendor/honnef.co/go/tools/ssa/create.go b/vendor/honnef.co/go/tools/ir/create.go similarity index 82% rename from vendor/honnef.co/go/tools/ssa/create.go rename to vendor/honnef.co/go/tools/ir/create.go index 85163a0c5..ff81a244b 100644 --- a/vendor/honnef.co/go/tools/ssa/create.go +++ b/vendor/honnef.co/go/tools/ir/create.go @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir -// This file implements the CREATE phase of SSA construction. +// This file implements the CREATE phase of IR construction. // See builder.go for explanation. import ( @@ -18,9 +18,9 @@ import ( "golang.org/x/tools/go/types/typeutil" ) -// NewProgram returns a new SSA Program. +// NewProgram returns a new IR Program. // -// mode controls diagnostics and checking during SSA construction. +// mode controls diagnostics and checking during IR construction. // func NewProgram(fset *token.FileSet, mode BuilderMode) *Program { prog := &Program{ @@ -75,7 +75,6 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { name: name, object: obj, typ: types.NewPointer(obj.Type()), // address - pos: obj.Pos(), } pkg.values[obj] = g pkg.Members[name] = g @@ -90,16 +89,20 @@ func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) { name: name, object: obj, Signature: sig, - syntax: syntax, - pos: obj.Pos(), Pkg: pkg, Prog: pkg.Prog, } + + fn.source = syntax + fn.initHTML(pkg.printFunc) if syntax == nil { fn.Synthetic = "loaded from gc object file" + } else { + fn.functionBody = new(functionBody) } pkg.values[obj] = fn + pkg.Functions = append(pkg.Functions, fn) if sig.Recv() == nil { pkg.Members[name] = fn // package-level function } @@ -152,35 +155,39 @@ func membersFromDecl(pkg *Package, decl ast.Decl) { } } -// CreatePackage constructs and returns an SSA Package from the +// CreatePackage constructs and returns an IR Package from the // specified type-checked, error-free file ASTs, and populates its // Members mapping. // // importable determines whether this package should be returned by a // subsequent call to ImportedPackage(pkg.Path()). // -// The real work of building SSA form for each function is not done +// The real work of building IR form for each function is not done // until a subsequent call to Package.Build(). // func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package { p := &Package{ - Prog: prog, - Members: make(map[string]Member), - values: make(map[types.Object]Value), - Pkg: pkg, - info: info, // transient (CREATE and BUILD phases) - files: files, // transient (CREATE and BUILD phases) + Prog: prog, + Members: make(map[string]Member), + values: make(map[types.Object]Value), + Pkg: pkg, + info: info, // transient (CREATE and BUILD phases) + files: files, // transient (CREATE and BUILD phases) + printFunc: prog.PrintFunc, } // Add init() function. p.init = &Function{ - name: "init", - Signature: new(types.Signature), - Synthetic: "package initializer", - Pkg: p, - Prog: prog, + name: "init", + Signature: new(types.Signature), + Synthetic: "package initializer", + Pkg: p, + Prog: prog, + functionBody: new(functionBody), } + p.init.initHTML(prog.PrintFunc) p.Members[p.init.name] = p.init + p.Functions = append(p.Functions, p.init) // CREATE phase. // Allocate all package members: vars, funcs, consts and types. @@ -209,15 +216,13 @@ func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info * } } - if prog.mode&BareInits == 0 { - // Add initializer guard variable. - initguard := &Global{ - Pkg: p, - name: "init$guard", - typ: types.NewPointer(tBool), - } - p.Members[initguard.Name()] = initguard + // Add initializer guard variable. + initguard := &Global{ + Pkg: p, + name: "init$guard", + typ: types.NewPointer(tBool), } + p.Members[initguard.Name()] = initguard if prog.mode&GlobalDebug != 0 { p.SetDebugMode(true) @@ -260,10 +265,10 @@ func (prog *Program) AllPackages() []*Package { // // TODO(adonovan): rethink this function and the "importable" concept; // most packages are importable. This function assumes that all -// types.Package.Path values are unique within the ssa.Program, which is +// types.Package.Path values are unique within the ir.Program, which is // false---yet this function remains very convenient. // Clients should use (*Program).Package instead where possible. -// SSA doesn't really need a string-keyed map of packages. +// IR doesn't really need a string-keyed map of packages. // func (prog *Program) ImportedPackage(path string) *Package { return prog.imported[path] diff --git a/vendor/honnef.co/go/tools/ssa/doc.go b/vendor/honnef.co/go/tools/ir/doc.go similarity index 74% rename from vendor/honnef.co/go/tools/ssa/doc.go rename to vendor/honnef.co/go/tools/ir/doc.go index 0f71fda00..a5f42e4f4 100644 --- a/vendor/honnef.co/go/tools/ssa/doc.go +++ b/vendor/honnef.co/go/tools/ir/doc.go @@ -2,36 +2,34 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package ssa defines a representation of the elements of Go programs +// Package ir defines a representation of the elements of Go programs // (packages, types, functions, variables and constants) using a -// static single-assignment (SSA) form intermediate representation +// static single-information (SSI) form intermediate representation // (IR) for the bodies of functions. // // THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE. // -// For an introduction to SSA form, see +// For an introduction to SSA form, upon which SSI builds, see // http://en.wikipedia.org/wiki/Static_single_assignment_form. // This page provides a broader reading list: // http://www.dcs.gla.ac.uk/~jsinger/ssa.html. // -// The level of abstraction of the SSA form is intentionally close to +// For an introduction to SSI form, see The static single information +// form by C. Scott Ananian. +// +// The level of abstraction of the IR form is intentionally close to // the source language to facilitate construction of source analysis // tools. It is not intended for machine code generation. // -// All looping, branching and switching constructs are replaced with -// unstructured control flow. Higher-level control flow constructs -// such as multi-way branch can be reconstructed as needed; see -// ssautil.Switches() for an example. -// -// The simplest way to create the SSA representation of a package is +// The simplest way to create the IR of a package is // to load typed syntax trees using golang.org/x/tools/go/packages, then -// invoke the ssautil.Packages helper function. See ExampleLoadPackages +// invoke the irutil.Packages helper function. See ExampleLoadPackages // and ExampleWholeProgram for examples. -// The resulting ssa.Program contains all the packages and their -// members, but SSA code is not created for function bodies until a +// The resulting ir.Program contains all the packages and their +// members, but IR code is not created for function bodies until a // subsequent call to (*Package).Build or (*Program).Build. // -// The builder initially builds a naive SSA form in which all local +// The builder initially builds a naive IR form in which all local // variables are addresses of stack locations with explicit loads and // stores. Registerisation of eligible locals and φ-node insertion // using dominance and dataflow are then performed as a second pass @@ -44,7 +42,7 @@ // - Member: a named member of a Go package. // - Value: an expression that yields a value. // - Instruction: a statement that consumes values and performs computation. -// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph) +// - Node: a Value or Instruction (emphasizing its membership in the IR value graph) // // A computation that yields a result implements both the Value and // Instruction interfaces. The following table shows for each @@ -53,47 +51,53 @@ // Value? Instruction? Member? // *Alloc ✔ ✔ // *BinOp ✔ ✔ +// *BlankStore ✔ // *Builtin ✔ // *Call ✔ ✔ // *ChangeInterface ✔ ✔ // *ChangeType ✔ ✔ -// *Const ✔ +// *Const ✔ ✔ // *Convert ✔ ✔ // *DebugRef ✔ -// *Defer ✔ +// *Defer ✔ ✔ // *Extract ✔ ✔ // *Field ✔ ✔ // *FieldAddr ✔ ✔ // *FreeVar ✔ // *Function ✔ ✔ (func) // *Global ✔ ✔ (var) -// *Go ✔ +// *Go ✔ ✔ // *If ✔ // *Index ✔ ✔ // *IndexAddr ✔ ✔ // *Jump ✔ -// *Lookup ✔ ✔ +// *Load ✔ ✔ // *MakeChan ✔ ✔ // *MakeClosure ✔ ✔ // *MakeInterface ✔ ✔ // *MakeMap ✔ ✔ // *MakeSlice ✔ ✔ -// *MapUpdate ✔ +// *MapLookup ✔ ✔ +// *MapUpdate ✔ ✔ // *NamedConst ✔ (const) // *Next ✔ ✔ // *Panic ✔ -// *Parameter ✔ +// *Parameter ✔ ✔ // *Phi ✔ ✔ // *Range ✔ ✔ +// *Recv ✔ ✔ // *Return ✔ // *RunDefers ✔ // *Select ✔ ✔ -// *Send ✔ +// *Send ✔ ✔ +// *Sigma ✔ ✔ // *Slice ✔ ✔ -// *Store ✔ +// *Store ✔ ✔ +// *StringLookup ✔ ✔ // *Type ✔ (type) // *TypeAssert ✔ ✔ // *UnOp ✔ ✔ +// *Unreachable ✔ // // Other key types in this package include: Program, Package, Function // and BasicBlock. @@ -102,7 +106,7 @@ // resolved internally, i.e. it does not rely on the names of Values, // Packages, Functions, Types or BasicBlocks for the correct // interpretation of the program. Only the identities of objects and -// the topology of the SSA and type graphs are semantically +// the topology of the IR and type graphs are semantically // significant. (There is one exception: Ids, used to identify field // and method names, contain strings.) Avoidance of name-based // operations simplifies the implementation of subsequent passes and @@ -111,7 +115,7 @@ // either accurate or unambiguous. The public API exposes a number of // name-based maps for client convenience. // -// The ssa/ssautil package provides various utilities that depend only +// The ir/irutil package provides various utilities that depend only // on the public API of this package. // // TODO(adonovan): Consider the exceptional control-flow implications @@ -120,6 +124,6 @@ // TODO(adonovan): write a how-to document for all the various cases // of trying to determine corresponding elements across the four // domains of source locations, ast.Nodes, types.Objects, -// ssa.Values/Instructions. +// ir.Values/Instructions. // -package ssa // import "honnef.co/go/tools/ssa" +package ir // import "honnef.co/go/tools/ir" diff --git a/vendor/honnef.co/go/tools/ssa/dom.go b/vendor/honnef.co/go/tools/ir/dom.go similarity index 51% rename from vendor/honnef.co/go/tools/ssa/dom.go rename to vendor/honnef.co/go/tools/ir/dom.go index a036be87c..08c147df9 100644 --- a/vendor/honnef.co/go/tools/ssa/dom.go +++ b/vendor/honnef.co/go/tools/ir/dom.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines algorithms related to dominance. @@ -20,6 +20,7 @@ package ssa import ( "bytes" "fmt" + "io" "math/big" "os" "sort" @@ -27,8 +28,7 @@ import ( // Idom returns the block that immediately dominates b: // its parent in the dominator tree, if any. -// Neither the entry node (b.Index==0) nor recover node -// (b==b.Parent().Recover()) have a parent. +// The entry node (b.Index==0) does not have a parent. // func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom } @@ -66,144 +66,211 @@ type domInfo struct { pre, post int32 // pre- and post-order numbering within domtree } -// ltState holds the working state for Lengauer-Tarjan algorithm -// (during which domInfo.pre is repurposed for CFG DFS preorder number). -type ltState struct { - // Each slice is indexed by b.Index. - sdom []*BasicBlock // b's semidominator - parent []*BasicBlock // b's parent in DFS traversal of CFG - ancestor []*BasicBlock // b's ancestor with least sdom -} - -// dfs implements the depth-first search part of the LT algorithm. -func (lt *ltState) dfs(v *BasicBlock, i int32, preorder []*BasicBlock) int32 { - preorder[i] = v - v.dom.pre = i // For now: DFS preorder of spanning tree of CFG - i++ - lt.sdom[v.Index] = v - lt.link(nil, v) - for _, w := range v.Succs { - if lt.sdom[w.Index] == nil { - lt.parent[w.Index] = v - i = lt.dfs(w, i, preorder) - } - } - return i -} - -// eval implements the EVAL part of the LT algorithm. -func (lt *ltState) eval(v *BasicBlock) *BasicBlock { - // TODO(adonovan): opt: do path compression per simple LT. - u := v - for ; lt.ancestor[v.Index] != nil; v = lt.ancestor[v.Index] { - if lt.sdom[v.Index].dom.pre < lt.sdom[u.Index].dom.pre { - u = v - } - } - return u -} - -// link implements the LINK part of the LT algorithm. -func (lt *ltState) link(v, w *BasicBlock) { - lt.ancestor[w.Index] = v -} - // buildDomTree computes the dominator tree of f using the LT algorithm. // Precondition: all blocks are reachable (e.g. optimizeBlocks has been run). // -func buildDomTree(f *Function) { +func buildDomTree(fn *Function) { // The step numbers refer to the original LT paper; the // reordering is due to Georgiadis. // Clear any previous domInfo. - for _, b := range f.Blocks { + for _, b := range fn.Blocks { b.dom = domInfo{} } - n := len(f.Blocks) - // Allocate space for 5 contiguous [n]*BasicBlock arrays: - // sdom, parent, ancestor, preorder, buckets. - space := make([]*BasicBlock, 5*n) - lt := ltState{ - sdom: space[0:n], - parent: space[n : 2*n], - ancestor: space[2*n : 3*n], - } + idoms := make([]*BasicBlock, len(fn.Blocks)) - // Step 1. Number vertices by depth-first preorder. - preorder := space[3*n : 4*n] - root := f.Blocks[0] - prenum := lt.dfs(root, 0, preorder) - recover := f.Recover - if recover != nil { - lt.dfs(recover, prenum, preorder) + order := make([]*BasicBlock, 0, len(fn.Blocks)) + seen := fn.blockset(0) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + return + } + for _, succ := range b.Succs { + dfs(succ) + } + if fn.fakeExits.Has(b) { + dfs(fn.Exit) + } + order = append(order, b) + b.post = len(order) - 1 } + dfs(fn.Blocks[0]) - buckets := space[4*n : 5*n] - copy(buckets, preorder) + for i := 0; i < len(order)/2; i++ { + o := len(order) - i - 1 + order[i], order[o] = order[o], order[i] + } - // In reverse preorder... - for i := int32(n) - 1; i > 0; i-- { - w := preorder[i] + idoms[fn.Blocks[0].Index] = fn.Blocks[0] + changed := true + for changed { + changed = false + // iterate over all nodes in reverse postorder, except for the + // entry node + for _, b := range order[1:] { + var newIdom *BasicBlock + do := func(p *BasicBlock) { + if idoms[p.Index] == nil { + return + } + if newIdom == nil { + newIdom = p + } else { + finger1 := p + finger2 := newIdom + for finger1 != finger2 { + for finger1.post < finger2.post { + finger1 = idoms[finger1.Index] + } + for finger2.post < finger1.post { + finger2 = idoms[finger2.Index] + } + } + newIdom = finger1 + } + } + for _, p := range b.Preds { + do(p) + } + if b == fn.Exit { + for _, p := range fn.Blocks { + if fn.fakeExits.Has(p) { + do(p) + } + } + } - // Step 3. Implicitly define the immediate dominator of each node. - for v := buckets[i]; v != w; v = buckets[v.dom.pre] { - u := lt.eval(v) - if lt.sdom[u.Index].dom.pre < i { - v.dom.idom = u - } else { - v.dom.idom = w + if idoms[b.Index] != newIdom { + idoms[b.Index] = newIdom + changed = true } } + } - // Step 2. Compute the semidominators of all nodes. - lt.sdom[w.Index] = lt.parent[w.Index] - for _, v := range w.Preds { - u := lt.eval(v) - if lt.sdom[u.Index].dom.pre < lt.sdom[w.Index].dom.pre { - lt.sdom[w.Index] = lt.sdom[u.Index] - } + for i, b := range idoms { + fn.Blocks[i].dom.idom = b + if b == nil { + // malformed CFG + continue } + if i == b.Index { + continue + } + b.dom.children = append(b.dom.children, fn.Blocks[i]) + } - lt.link(lt.parent[w.Index], w) + numberDomTree(fn.Blocks[0], 0, 0) - if lt.parent[w.Index] == lt.sdom[w.Index] { - w.dom.idom = lt.parent[w.Index] - } else { - buckets[i] = buckets[lt.sdom[w.Index].dom.pre] - buckets[lt.sdom[w.Index].dom.pre] = w + // printDomTreeDot(os.Stderr, fn) // debugging + // printDomTreeText(os.Stderr, root, 0) // debugging + + if fn.Prog.mode&SanityCheckFunctions != 0 { + sanityCheckDomTree(fn) + } +} + +// buildPostDomTree is like buildDomTree, but builds the post-dominator tree instead. +func buildPostDomTree(fn *Function) { + // The step numbers refer to the original LT paper; the + // reordering is due to Georgiadis. + + // Clear any previous domInfo. + for _, b := range fn.Blocks { + b.pdom = domInfo{} + } + + idoms := make([]*BasicBlock, len(fn.Blocks)) + + order := make([]*BasicBlock, 0, len(fn.Blocks)) + seen := fn.blockset(0) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + return } + for _, pred := range b.Preds { + dfs(pred) + } + if b == fn.Exit { + for _, p := range fn.Blocks { + if fn.fakeExits.Has(p) { + dfs(p) + } + } + } + order = append(order, b) + b.post = len(order) - 1 } + dfs(fn.Exit) - // The final 'Step 3' is now outside the loop. - for v := buckets[0]; v != root; v = buckets[v.dom.pre] { - v.dom.idom = root + for i := 0; i < len(order)/2; i++ { + o := len(order) - i - 1 + order[i], order[o] = order[o], order[i] } - // Step 4. Explicitly define the immediate dominator of each - // node, in preorder. - for _, w := range preorder[1:] { - if w == root || w == recover { - w.dom.idom = nil - } else { - if w.dom.idom != lt.sdom[w.Index] { - w.dom.idom = w.dom.idom.dom.idom + idoms[fn.Exit.Index] = fn.Exit + changed := true + for changed { + changed = false + // iterate over all nodes in reverse postorder, except for the + // exit node + for _, b := range order[1:] { + var newIdom *BasicBlock + do := func(p *BasicBlock) { + if idoms[p.Index] == nil { + return + } + if newIdom == nil { + newIdom = p + } else { + finger1 := p + finger2 := newIdom + for finger1 != finger2 { + for finger1.post < finger2.post { + finger1 = idoms[finger1.Index] + } + for finger2.post < finger1.post { + finger2 = idoms[finger2.Index] + } + } + newIdom = finger1 + } + } + for _, p := range b.Succs { + do(p) + } + if fn.fakeExits.Has(b) { + do(fn.Exit) + } + + if idoms[b.Index] != newIdom { + idoms[b.Index] = newIdom + changed = true } - // Calculate Children relation as inverse of Idom. - w.dom.idom.dom.children = append(w.dom.idom.dom.children, w) } } - pre, post := numberDomTree(root, 0, 0) - if recover != nil { - numberDomTree(recover, pre, post) + for i, b := range idoms { + fn.Blocks[i].pdom.idom = b + if b == nil { + // malformed CFG + continue + } + if i == b.Index { + continue + } + b.pdom.children = append(b.pdom.children, fn.Blocks[i]) } - // printDomTreeDot(os.Stderr, f) // debugging - // printDomTreeText(os.Stderr, root, 0) // debugging + numberPostDomTree(fn.Exit, 0, 0) - if f.Prog.mode&SanityCheckFunctions != 0 { - sanityCheckDomTree(f) + // printPostDomTreeDot(os.Stderr, fn) // debugging + // printPostDomTreeText(os.Stderr, fn.Exit, 0) // debugging + + if fn.Prog.mode&SanityCheckFunctions != 0 { // XXX + sanityCheckDomTree(fn) // XXX } } @@ -222,6 +289,21 @@ func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) { return pre, post } +// numberPostDomTree sets the pre- and post-order numbers of a depth-first +// traversal of the post-dominator tree rooted at v. These are used to +// answer post-dominance queries in constant time. +// +func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) { + v.pdom.pre = pre + pre++ + for _, child := range v.pdom.children { + pre, post = numberPostDomTree(child, pre, post) + } + v.pdom.post = post + post++ + return pre, post +} + // Testing utilities ---------------------------------------- // sanityCheckDomTree checks the correctness of the dominator tree @@ -243,8 +325,8 @@ func sanityCheckDomTree(f *Function) { all.Set(one).Lsh(&all, uint(n)).Sub(&all, one) // Initialization. - for i, b := range f.Blocks { - if i == 0 || b == f.Recover { + for i := range f.Blocks { + if i == 0 { // A root is dominated only by itself. D[i].SetBit(&D[0], 0, 1) } else { @@ -258,7 +340,7 @@ func sanityCheckDomTree(f *Function) { for changed := true; changed; { changed = false for i, b := range f.Blocks { - if i == 0 || b == f.Recover { + if i == 0 { continue } // Compute intersection across predecessors. @@ -267,6 +349,13 @@ func sanityCheckDomTree(f *Function) { for _, pred := range b.Preds { x.And(&x, &D[pred.Index]) } + if b == f.Exit { + for _, p := range f.Blocks { + if f.fakeExits.Has(p) { + x.And(&x, &D[p.Index]) + } + } + } x.SetBit(&x, i, 1) // a block always dominates itself. if D[i].Cmp(&x) != 0 { D[i].Set(&x) @@ -276,14 +365,10 @@ func sanityCheckDomTree(f *Function) { } // Check the entire relation. O(n^2). - // The Recover block (if any) must be treated specially so we skip it. ok := true for i := 0; i < n; i++ { for j := 0; j < n; j++ { b, c := f.Blocks[i], f.Blocks[j] - if c == f.Recover { - continue - } actual := b.Dominates(c) expected := D[j].Bit(i) == 1 if actual != expected { @@ -321,7 +406,7 @@ func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) { // printDomTreeDot prints the dominator tree of f in AT&T GraphViz // (.dot) format. //lint:ignore U1000 used during debugging -func printDomTreeDot(buf *bytes.Buffer, f *Function) { +func printDomTreeDot(buf io.Writer, f *Function) { fmt.Fprintln(buf, "//", f) fmt.Fprintln(buf, "digraph domtree {") for i, b := range f.Blocks { @@ -341,3 +426,36 @@ func printDomTreeDot(buf *bytes.Buffer, f *Function) { } fmt.Fprintln(buf, "}") } + +// printDomTree prints the dominator tree as text, using indentation. +//lint:ignore U1000 used during debugging +func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) { + fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v) + for _, child := range v.pdom.children { + printPostDomTreeText(buf, child, indent+1) + } +} + +// printDomTreeDot prints the dominator tree of f in AT&T GraphViz +// (.dot) format. +//lint:ignore U1000 used during debugging +func printPostDomTreeDot(buf io.Writer, f *Function) { + fmt.Fprintln(buf, "//", f) + fmt.Fprintln(buf, "digraph pdomtree {") + for _, b := range f.Blocks { + v := b.pdom + fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post) + // TODO(adonovan): improve appearance of edges + // belonging to both dominator tree and CFG. + + // Dominator tree edge. + if b != f.Exit { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.pdom.pre, v.pre) + } + // CFG edges. + for _, pred := range b.Preds { + fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.pdom.pre, v.pre) + } + } + fmt.Fprintln(buf, "}") +} diff --git a/vendor/honnef.co/go/tools/ssa/emit.go b/vendor/honnef.co/go/tools/ir/emit.go similarity index 75% rename from vendor/honnef.co/go/tools/ssa/emit.go rename to vendor/honnef.co/go/tools/ir/emit.go index 6bf9ec32d..5fa137af9 100644 --- a/vendor/honnef.co/go/tools/ssa/emit.go +++ b/vendor/honnef.co/go/tools/ir/emit.go @@ -2,13 +2,14 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir -// Helpers for emitting SSA instructions. +// Helpers for emitting IR instructions. import ( "fmt" "go/ast" + "go/constant" "go/token" "go/types" ) @@ -16,24 +17,32 @@ import ( // emitNew emits to f a new (heap Alloc) instruction allocating an // object of type typ. pos is the optional source location. // -func emitNew(f *Function, typ types.Type, pos token.Pos) *Alloc { +func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc { v := &Alloc{Heap: true} v.setType(types.NewPointer(typ)) - v.setPos(pos) - f.emit(v) + f.emit(v, source) return v } // emitLoad emits to f an instruction to load the address addr into a // new temporary, and returns the value so defined. // -func emitLoad(f *Function, addr Value) *UnOp { - v := &UnOp{Op: token.MUL, X: addr} +func emitLoad(f *Function, addr Value, source ast.Node) *Load { + v := &Load{X: addr} v.setType(deref(addr.Type())) - f.emit(v) + f.emit(v, source) return v } +func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.Node) Value { + recv := &Recv{ + Chan: ch, + CommaOk: commaOk, + } + recv.setType(typ) + return f.emit(recv, source) +} + // emitDebugRef emits to f a DebugRef pseudo-instruction associating // expression e with value v. // @@ -61,7 +70,7 @@ func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { Expr: e, IsAddr: isAddr, object: obj, - }) + }, nil) } // emitArith emits to f code to compute the binary operation op(x, y) @@ -69,19 +78,19 @@ func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) { // (Use emitCompare() for comparisons and Builder.logicalBinop() for // non-eager operations.) // -func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token.Pos) Value { +func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value { switch op { case token.SHL, token.SHR: - x = emitConv(f, x, t) + x = emitConv(f, x, t, source) // y may be signed or an 'untyped' constant. // TODO(adonovan): whence signed values? if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 { - y = emitConv(f, y, types.Typ[types.Uint64]) + y = emitConv(f, y, types.Typ[types.Uint64], source) } case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT: - x = emitConv(f, x, t) - y = emitConv(f, y, t) + x = emitConv(f, x, t, source) + y = emitConv(f, y, t, source) default: panic("illegal op in emitArith: " + op.String()) @@ -92,15 +101,14 @@ func emitArith(f *Function, op token.Token, x, y Value, t types.Type, pos token. X: x, Y: y, } - v.setPos(pos) v.setType(t) - return f.emit(v) + return f.emit(v, source) } // emitCompare emits to f code compute the boolean result of // comparison comparison 'x op y'. // -func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { +func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value { xt := x.Type().Underlying() yt := y.Type().Underlying() @@ -111,7 +119,7 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { // if e==true { ... } // even in the case when e's type is an interface. // TODO(adonovan): opt: generalise to x==true, false!=y, etc. - if x == vTrue && op == token.EQL { + if x, ok := x.(*Const); ok && op == token.EQL && x.Value != nil && x.Value.Kind() == constant.Bool && constant.BoolVal(x.Value) { if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 { return y } @@ -120,13 +128,13 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { if types.Identical(xt, yt) { // no conversion necessary } else if _, ok := xt.(*types.Interface); ok { - y = emitConv(f, y, x.Type()) + y = emitConv(f, y, x.Type(), source) } else if _, ok := yt.(*types.Interface); ok { - x = emitConv(f, x, y.Type()) + x = emitConv(f, x, y.Type(), source) } else if _, ok := x.(*Const); ok { - x = emitConv(f, x, y.Type()) + x = emitConv(f, x, y.Type(), source) } else if _, ok := y.(*Const); ok { - y = emitConv(f, y, x.Type()) + y = emitConv(f, y, x.Type(), source) //lint:ignore SA9003 no-op } else { // other cases, e.g. channels. No-op. @@ -137,9 +145,8 @@ func emitCompare(f *Function, op token.Token, x, y Value, pos token.Pos) Value { X: x, Y: y, } - v.setPos(pos) v.setType(tBool) - return f.emit(v) + return f.emit(v, source) } // isValuePreserving returns true if a conversion from ut_src to @@ -171,7 +178,7 @@ func isValuePreserving(ut_src, ut_dst types.Type) bool { // by language assignability rules in assignments, parameter passing, // etc. Conversions cannot fail dynamically. // -func emitConv(f *Function, val Value, typ types.Type) Value { +func emitConv(f *Function, val Value, typ types.Type, source ast.Node) Value { t_src := val.Type() // Identical types? Conversion is a no-op. @@ -186,7 +193,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value { if isValuePreserving(ut_src, ut_dst) { c := &ChangeType{X: val} c.setType(typ) - return f.emit(c) + return f.emit(c, source) } // Conversion to, or construction of a value of, an interface type? @@ -195,23 +202,23 @@ func emitConv(f *Function, val Value, typ types.Type) Value { if _, ok := ut_src.(*types.Interface); ok { c := &ChangeInterface{X: val} c.setType(typ) - return f.emit(c) + return f.emit(c, source) } // Untyped nil constant? Return interface-typed nil constant. if ut_src == tUntypedNil { - return nilConst(typ) + return emitConst(f, nilConst(typ)) } // Convert (non-nil) "untyped" literals to their default type. if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 { - val = emitConv(f, val, DefaultType(ut_src)) + val = emitConv(f, val, types.Default(ut_src), source) } f.Pkg.Prog.needMethodsOf(val.Type()) mi := &MakeInterface{X: val} mi.setType(typ) - return f.emit(mi) + return f.emit(mi, source) } // Conversion of a compile-time constant value? @@ -222,7 +229,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value { // constant of the destination type and // (initially) the same abstract value. // We don't truncate the value yet. - return NewConst(c.Value, typ) + return emitConst(f, NewConst(c.Value, typ)) } // We're converting from constant to non-constant type, @@ -237,7 +244,7 @@ func emitConv(f *Function, val Value, typ types.Type) Value { if ok1 || ok2 { c := &Convert{X: val} c.setType(typ) - return f.emit(c) + return f.emit(c, source) } panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ)) @@ -246,72 +253,75 @@ func emitConv(f *Function, val Value, typ types.Type) Value { // emitStore emits to f an instruction to store value val at location // addr, applying implicit conversions as required by assignability rules. // -func emitStore(f *Function, addr, val Value, pos token.Pos) *Store { +func emitStore(f *Function, addr, val Value, source ast.Node) *Store { s := &Store{ Addr: addr, - Val: emitConv(f, val, deref(addr.Type())), - pos: pos, + Val: emitConv(f, val, deref(addr.Type()), source), } - f.emit(s) + // make sure we call getMem after the call to emitConv, which may + // itself update the memory state + f.emit(s, source) return s } // emitJump emits to f a jump to target, and updates the control-flow graph. // Postcondition: f.currentBlock is nil. // -func emitJump(f *Function, target *BasicBlock) { +func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump { b := f.currentBlock - b.emit(new(Jump)) + j := new(Jump) + b.emit(j, source) addEdge(b, target) f.currentBlock = nil + return j } // emitIf emits to f a conditional jump to tblock or fblock based on // cond, and updates the control-flow graph. // Postcondition: f.currentBlock is nil. // -func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock) { +func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If { b := f.currentBlock - b.emit(&If{Cond: cond}) + stmt := &If{Cond: cond} + b.emit(stmt, source) addEdge(b, tblock) addEdge(b, fblock) f.currentBlock = nil + return stmt } // emitExtract emits to f an instruction to extract the index'th // component of tuple. It returns the extracted value. // -func emitExtract(f *Function, tuple Value, index int) Value { +func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value { e := &Extract{Tuple: tuple, Index: index} e.setType(tuple.Type().(*types.Tuple).At(index).Type()) - return f.emit(e) + return f.emit(e, source) } // emitTypeAssert emits to f a type assertion value := x.(t) and // returns the value. x.Type() must be an interface. // -func emitTypeAssert(f *Function, x Value, t types.Type, pos token.Pos) Value { +func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value { a := &TypeAssert{X: x, AssertedType: t} - a.setPos(pos) a.setType(t) - return f.emit(a) + return f.emit(a, source) } // emitTypeTest emits to f a type test value,ok := x.(t) and returns // a (value, ok) tuple. x.Type() must be an interface. // -func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value { +func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value { a := &TypeAssert{ X: x, AssertedType: t, CommaOk: true, } - a.setPos(pos) a.setType(types.NewTuple( newVar("value", t), varOk, )) - return f.emit(a) + return f.emit(a, source) } // emitTailCall emits to f a function call in tail position. The @@ -320,7 +330,7 @@ func emitTypeTest(f *Function, x Value, t types.Type, pos token.Pos) Value { // Precondition: f does/will not use deferred procedure calls. // Postcondition: f.currentBlock is nil. // -func emitTailCall(f *Function, call *Call) { +func emitTailCall(f *Function, call *Call, source ast.Node) { tresults := f.Signature.Results() nr := tresults.Len() if nr == 1 { @@ -328,7 +338,7 @@ func emitTailCall(f *Function, call *Call) { } else { call.typ = tresults } - tuple := f.emit(call) + tuple := f.emit(call, source) var ret Return switch nr { case 0: @@ -337,7 +347,7 @@ func emitTailCall(f *Function, call *Call) { ret.Results = []Value{tuple} default: for i := 0; i < nr; i++ { - v := emitExtract(f, tuple, i) + v := emitExtract(f, tuple, i, source) // TODO(adonovan): in principle, this is required: // v = emitConv(f, o.Type, f.Signature.Results[i].Type) // but in practice emitTailCall is only used when @@ -345,7 +355,11 @@ func emitTailCall(f *Function, call *Call) { ret.Results = append(ret.Results, v) } } - f.emit(&ret) + + f.Exit = f.newBasicBlock("exit") + emitJump(f, f.Exit, source) + f.currentBlock = f.Exit + f.emit(&ret, source) f.currentBlock = nil } @@ -357,7 +371,7 @@ func emitTailCall(f *Function, call *Call) { // a field; if it is the value of a struct, the result will be the // value of a field. // -func emitImplicitSelections(f *Function, v Value, indices []int) Value { +func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value { for _, index := range indices { fld := deref(v.Type()).Underlying().(*types.Struct).Field(index) @@ -367,10 +381,10 @@ func emitImplicitSelections(f *Function, v Value, indices []int) Value { Field: index, } instr.setType(types.NewPointer(fld.Type())) - v = f.emit(instr) + v = f.emit(instr, source) // Load the field's value iff indirectly embedded. if isPointer(fld.Type()) { - v = emitLoad(f, v) + v = emitLoad(f, v, source) } } else { instr := &Field{ @@ -378,7 +392,7 @@ func emitImplicitSelections(f *Function, v Value, indices []int) Value { Field: index, } instr.setType(fld.Type()) - v = f.emit(instr) + v = f.emit(instr, source) } } return v @@ -398,21 +412,21 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. X: v, Field: index, } - instr.setPos(id.Pos()) + instr.setSource(id) instr.setType(types.NewPointer(fld.Type())) - v = f.emit(instr) + v = f.emit(instr, id) // Load the field's value iff we don't want its address. if !wantAddr { - v = emitLoad(f, v) + v = emitLoad(f, v, id) } } else { instr := &Field{ X: v, Field: index, } - instr.setPos(id.Pos()) + instr.setSource(id) instr.setType(fld.Type()) - v = f.emit(instr) + v = f.emit(instr, id) } emitDebugRef(f, id, v, wantAddr) return v @@ -421,49 +435,16 @@ func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast. // zeroValue emits to f code to produce a zero value of type t, // and returns it. // -func zeroValue(f *Function, t types.Type) Value { +func zeroValue(f *Function, t types.Type, source ast.Node) Value { switch t.Underlying().(type) { case *types.Struct, *types.Array: - return emitLoad(f, f.addLocal(t, token.NoPos)) + return emitLoad(f, f.addLocal(t, source), source) default: - return zeroConst(t) + return emitConst(f, zeroConst(t)) } } -// createRecoverBlock emits to f a block of code to return after a -// recovered panic, and sets f.Recover to it. -// -// If f's result parameters are named, the code loads and returns -// their current values, otherwise it returns the zero values of their -// type. -// -// Idempotent. -// -func createRecoverBlock(f *Function) { - if f.Recover != nil { - return // already created - } - saved := f.currentBlock - - f.Recover = f.newBasicBlock("recover") - f.currentBlock = f.Recover - - var results []Value - if f.namedResults != nil { - // Reload NRPs to form value tuple. - for _, r := range f.namedResults { - results = append(results, emitLoad(f, r)) - } - } else { - R := f.Signature.Results() - for i, n := 0, R.Len(); i < n; i++ { - T := R.At(i).Type() - - // Return zero value of each result type. - results = append(results, zeroValue(f, T)) - } - } - f.emit(&Return{Results: results}) - - f.currentBlock = saved +func emitConst(f *Function, c *Const) *Const { + f.consts = append(f.consts, c) + return c } diff --git a/vendor/honnef.co/go/tools/ir/exits.go b/vendor/honnef.co/go/tools/ir/exits.go new file mode 100644 index 000000000..10cda7bb6 --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/exits.go @@ -0,0 +1,271 @@ +package ir + +import ( + "go/types" +) + +func (b *builder) buildExits(fn *Function) { + if obj := fn.Object(); obj != nil { + switch obj.Pkg().Path() { + case "runtime": + switch obj.Name() { + case "exit": + fn.WillExit = true + return + case "throw": + fn.WillExit = true + return + case "Goexit": + fn.WillUnwind = true + return + } + case "github.com/sirupsen/logrus": + switch obj.(*types.Func).FullName() { + case "(*github.com/sirupsen/logrus.Logger).Exit": + // Technically, this method does not unconditionally exit + // the process. It dynamically calls a function stored in + // the logger. If the function is nil, it defaults to + // os.Exit. + // + // The main intent of this method is to terminate the + // process, and that's what the vast majority of people + // will use it for. We'll happily accept some false + // negatives to avoid a lot of false positives. + fn.WillExit = true + return + case "(*github.com/sirupsen/logrus.Logger).Panic", + "(*github.com/sirupsen/logrus.Logger).Panicf", + "(*github.com/sirupsen/logrus.Logger).Panicln": + + // These methods will always panic, but that's not + // statically known from the code alone, because they + // take a detour through the generic Log methods. + fn.WillUnwind = true + return + case "(*github.com/sirupsen/logrus.Entry).Panicf", + "(*github.com/sirupsen/logrus.Entry).Panicln": + + // Entry.Panic has an explicit panic, but Panicf and + // Panicln do not, relying fully on the generic Log + // method. + fn.WillUnwind = true + return + case "(*github.com/sirupsen/logrus.Logger).Log", + "(*github.com/sirupsen/logrus.Logger).Logf", + "(*github.com/sirupsen/logrus.Logger).Logln": + // TODO(dh): we cannot handle these case. Whether they + // exit or unwind depends on the level, which is set + // via the first argument. We don't currently support + // call-site-specific exit information. + } + } + } + + buildDomTree(fn) + + isRecoverCall := func(instr Instruction) bool { + if instr, ok := instr.(*Call); ok { + if builtin, ok := instr.Call.Value.(*Builtin); ok { + if builtin.Name() == "recover" { + return true + } + } + } + return false + } + + // All panics branch to the exit block, which means that if every + // possible path through the function panics, then all + // predecessors of the exit block must panic. + willPanic := true + for _, pred := range fn.Exit.Preds { + if _, ok := pred.Control().(*Panic); !ok { + willPanic = false + } + } + if willPanic { + recovers := false + recoverLoop: + for _, u := range fn.Blocks { + for _, instr := range u.Instrs { + if instr, ok := instr.(*Defer); ok { + call := instr.Call.StaticCallee() + if call == nil { + // not a static call, so we can't be sure the + // deferred call isn't calling recover + recovers = true + break recoverLoop + } + if len(call.Blocks) == 0 { + // external function, we don't know what's + // happening inside it + // + // TODO(dh): this includes functions from + // imported packages, due to how go/analysis + // works. We could introduce another fact, + // like we've done for exiting and unwinding, + // but it doesn't seem worth it. Virtually all + // uses of recover will be in closures. + recovers = true + break recoverLoop + } + for _, y := range call.Blocks { + for _, instr2 := range y.Instrs { + if isRecoverCall(instr2) { + recovers = true + break recoverLoop + } + } + } + } + } + } + if !recovers { + fn.WillUnwind = true + return + } + } + + // TODO(dh): don't check that any specific call dominates the exit + // block. instead, check that all calls combined cover every + // possible path through the function. + exits := NewBlockSet(len(fn.Blocks)) + unwinds := NewBlockSet(len(fn.Blocks)) + for _, u := range fn.Blocks { + for _, instr := range u.Instrs { + if instr, ok := instr.(CallInstruction); ok { + switch instr.(type) { + case *Defer, *Call: + default: + continue + } + if instr.Common().IsInvoke() { + // give up + return + } + var call *Function + switch instr.Common().Value.(type) { + case *Function, *MakeClosure: + call = instr.Common().StaticCallee() + case *Builtin: + // the only builtins that affect control flow are + // panic and recover, and we've already handled + // those + continue + default: + // dynamic dispatch + return + } + // buildFunction is idempotent. if we're part of a + // (mutually) recursive call chain, then buildFunction + // will immediately return, and fn.WillExit will be false. + if call.Package() == fn.Package() { + b.buildFunction(call) + } + dom := u.Dominates(fn.Exit) + if call.WillExit { + if dom { + fn.WillExit = true + return + } + exits.Add(u) + } else if call.WillUnwind { + if dom { + fn.WillUnwind = true + return + } + unwinds.Add(u) + } + } + } + } + + // depth-first search trying to find a path to the exit block that + // doesn't cross any of the blacklisted blocks + seen := NewBlockSet(len(fn.Blocks)) + var findPath func(root *BasicBlock, bl *BlockSet) bool + findPath = func(root *BasicBlock, bl *BlockSet) bool { + if root == fn.Exit { + return true + } + if seen.Has(root) { + return false + } + if bl.Has(root) { + return false + } + seen.Add(root) + for _, succ := range root.Succs { + if findPath(succ, bl) { + return true + } + } + return false + } + + if exits.Num() > 0 { + if !findPath(fn.Blocks[0], exits) { + fn.WillExit = true + return + } + } + if unwinds.Num() > 0 { + seen.Clear() + if !findPath(fn.Blocks[0], unwinds) { + fn.WillUnwind = true + return + } + } +} + +func (b *builder) addUnreachables(fn *Function) { + for _, bb := range fn.Blocks { + for i, instr := range bb.Instrs { + if instr, ok := instr.(*Call); ok { + var call *Function + switch v := instr.Common().Value.(type) { + case *Function: + call = v + case *MakeClosure: + call = v.Fn.(*Function) + } + if call == nil { + continue + } + if call.Package() == fn.Package() { + // make sure we have information on all functions in this package + b.buildFunction(call) + } + if call.WillExit { + // This call will cause the process to terminate. + // Remove remaining instructions in the block and + // replace any control flow with Unreachable. + for _, succ := range bb.Succs { + succ.removePred(bb) + } + bb.Succs = bb.Succs[:0] + + bb.Instrs = bb.Instrs[:i+1] + bb.emit(new(Unreachable), instr.Source()) + addEdge(bb, fn.Exit) + break + } else if call.WillUnwind { + // This call will cause the goroutine to terminate + // and defers to run (i.e. a panic or + // runtime.Goexit). Remove remaining instructions + // in the block and replace any control flow with + // an unconditional jump to the exit block. + for _, succ := range bb.Succs { + succ.removePred(bb) + } + bb.Succs = bb.Succs[:0] + + bb.Instrs = bb.Instrs[:i+1] + bb.emit(new(Jump), instr.Source()) + addEdge(bb, fn.Exit) + break + } + } + } + } +} diff --git a/vendor/honnef.co/go/tools/ssa/func.go b/vendor/honnef.co/go/tools/ir/func.go similarity index 65% rename from vendor/honnef.co/go/tools/ssa/func.go rename to vendor/honnef.co/go/tools/ir/func.go index 222eea641..386d82b67 100644 --- a/vendor/honnef.co/go/tools/ssa/func.go +++ b/vendor/honnef.co/go/tools/ir/func.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file implements the Function and BasicBlock types. @@ -10,6 +10,8 @@ import ( "bytes" "fmt" "go/ast" + "go/constant" + "go/format" "go/token" "go/types" "io" @@ -23,6 +25,29 @@ func addEdge(from, to *BasicBlock) { to.Preds = append(to.Preds, from) } +// Control returns the last instruction in the block. +func (b *BasicBlock) Control() Instruction { + if len(b.Instrs) == 0 { + return nil + } + return b.Instrs[len(b.Instrs)-1] +} + +// SIgmaFor returns the sigma node for v coming from pred. +func (b *BasicBlock) SigmaFor(v Value, pred *BasicBlock) *Sigma { + for _, instr := range b.Instrs { + sigma, ok := instr.(*Sigma) + if !ok { + // no more sigmas + return nil + } + if sigma.From == pred && sigma.X == v { + return sigma + } + } + return nil +} + // Parent returns the function that contains block b. func (b *BasicBlock) Parent() *Function { return b.parent } @@ -36,7 +61,8 @@ func (b *BasicBlock) String() string { // emit appends an instruction to the current basic block. // If the instruction defines a Value, it is returned. // -func (b *BasicBlock) emit(i Instruction) Value { +func (b *BasicBlock) emit(i Instruction, source ast.Node) Value { + i.setSource(source) i.setBlock(b) b.Instrs = append(b.Instrs, i) v, _ := i.(Value) @@ -54,6 +80,16 @@ func (b *BasicBlock) predIndex(c *BasicBlock) int { panic(fmt.Sprintf("no edge %s -> %s", c, b)) } +// succIndex returns the i such that b.Succs[i] == c or -1 if there is none. +func (b *BasicBlock) succIndex(c *BasicBlock) int { + for i, succ := range b.Succs { + if succ == c { + return i + } + } + return -1 +} + // hasPhi returns true if b.Instrs contains φ-nodes. func (b *BasicBlock) hasPhi() bool { _, ok := b.Instrs[0].(*Phi) @@ -96,10 +132,6 @@ func (b *BasicBlock) replaceSucc(p, q *BasicBlock) { } } -func (b *BasicBlock) RemovePred(p *BasicBlock) { - b.removePred(p) -} - // removePred removes all occurrences of p in b's // predecessor list and φ-nodes. // Ordinarily there should be at most one. @@ -173,23 +205,33 @@ func (f *Function) labelledBlock(label *ast.Ident) *lblock { // addParam adds a (non-escaping) parameter to f.Params of the // specified name, type and source position. // -func (f *Function) addParam(name string, typ types.Type, pos token.Pos) *Parameter { +func (f *Function) addParam(name string, typ types.Type, source ast.Node) *Parameter { + var b *BasicBlock + if len(f.Blocks) > 0 { + b = f.Blocks[0] + } v := &Parameter{ - name: name, - typ: typ, - pos: pos, - parent: f, + name: name, } + v.setBlock(b) + v.setType(typ) + v.setSource(source) f.Params = append(f.Params, v) + if b != nil { + // There may be no blocks if this function has no body. We + // still create params, but aren't interested in the + // instruction. + f.Blocks[0].Instrs = append(f.Blocks[0].Instrs, v) + } return v } -func (f *Function) addParamObj(obj types.Object) *Parameter { +func (f *Function) addParamObj(obj types.Object, source ast.Node) *Parameter { name := obj.Name() if name == "" { name = fmt.Sprintf("arg%d", len(f.Params)) } - param := f.addParam(name, obj.Type(), obj.Pos()) + param := f.addParam(name, obj.Type(), source) param.object = obj return param } @@ -198,25 +240,61 @@ func (f *Function) addParamObj(obj types.Object) *Parameter { // stack; the function body will load/store the spilled location. // Subsequent lifting will eliminate spills where possible. // -func (f *Function) addSpilledParam(obj types.Object) { - param := f.addParamObj(obj) - spill := &Alloc{Comment: obj.Name()} +func (f *Function) addSpilledParam(obj types.Object, source ast.Node) { + param := f.addParamObj(obj, source) + spill := &Alloc{} spill.setType(types.NewPointer(obj.Type())) - spill.setPos(obj.Pos()) + spill.source = source f.objects[obj] = spill f.Locals = append(f.Locals, spill) - f.emit(spill) - f.emit(&Store{Addr: spill, Val: param}) + f.emit(spill, source) + emitStore(f, spill, param, source) + // f.emit(&Store{Addr: spill, Val: param}) } -// startBody initializes the function prior to generating SSA code for its body. +// startBody initializes the function prior to generating IR code for its body. // Precondition: f.Type() already set. // func (f *Function) startBody() { - f.currentBlock = f.newBasicBlock("entry") + entry := f.newBasicBlock("entry") + f.currentBlock = entry f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init } +func (f *Function) blockset(i int) *BlockSet { + bs := &f.blocksets[i] + if len(bs.values) != len(f.Blocks) { + if cap(bs.values) >= len(f.Blocks) { + bs.values = bs.values[:len(f.Blocks)] + bs.Clear() + } else { + bs.values = make([]bool, len(f.Blocks)) + } + } else { + bs.Clear() + } + return bs +} + +func (f *Function) exitBlock() { + old := f.currentBlock + + f.Exit = f.newBasicBlock("exit") + f.currentBlock = f.Exit + + ret := f.results() + results := make([]Value, len(ret)) + // Run function calls deferred in this + // function when explicitly returning from it. + f.emit(new(RunDefers), nil) + for i, r := range ret { + results[i] = emitLoad(f, r, nil) + } + + f.emit(&Return{Results: results}, nil) + f.currentBlock = old +} + // createSyntacticParams populates f.Params and generates code (spills // and named result locals) for all the parameters declared in the // syntax. In addition it populates the f.objects mapping. @@ -231,11 +309,11 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func if recv != nil { for _, field := range recv.List { for _, n := range field.Names { - f.addSpilledParam(f.Pkg.info.Defs[n]) + f.addSpilledParam(f.Pkg.info.Defs[n], n) } // Anonymous receiver? No need to spill. if field.Names == nil { - f.addParamObj(f.Signature.Recv()) + f.addParamObj(f.Signature.Recv(), field) } } } @@ -245,11 +323,11 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func n := len(f.Params) // 1 if has recv, 0 otherwise for _, field := range functype.Params.List { for _, n := range field.Names { - f.addSpilledParam(f.Pkg.info.Defs[n]) + f.addSpilledParam(f.Pkg.info.Defs[n], n) } // Anonymous parameter? No need to spill. if field.Names == nil { - f.addParamObj(f.Signature.Params().At(len(f.Params) - n)) + f.addParamObj(f.Signature.Params().At(len(f.Params)-n), field) } } } @@ -262,24 +340,27 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func f.namedResults = append(f.namedResults, f.addLocalForIdent(n)) } } + + if len(f.namedResults) == 0 { + sig := f.Signature.Results() + for i := 0; i < sig.Len(); i++ { + // XXX position information + v := f.addLocal(sig.At(i).Type(), nil) + f.implicitResults = append(f.implicitResults, v) + } + } } } -// numberRegisters assigns numbers to all SSA registers -// (value-defining Instructions) in f, to aid debugging. -// (Non-Instruction Values are named at construction.) -// -func numberRegisters(f *Function) { - v := 0 +func numberNodes(f *Function) { + var base ID for _, b := range f.Blocks { for _, instr := range b.Instrs { - switch instr.(type) { - case Value: - instr.(interface { - setNum(int) - }).setNum(v) - v++ + if instr == nil { + continue } + base++ + instr.setID(base) } } } @@ -303,111 +384,197 @@ func buildReferrers(f *Function) { } } -// finishBody() finalizes the function after SSA code generation of its body. -func (f *Function) finishBody() { - f.objects = nil - f.currentBlock = nil - f.lblocks = nil +func (f *Function) emitConsts() { + if len(f.Blocks) == 0 { + f.consts = nil + return + } - // Don't pin the AST in memory (except in debug mode). - if n := f.syntax; n != nil && !f.debugInfo() { - f.syntax = extentNode{n.Pos(), n.End()} + // TODO(dh): our deduplication only works on booleans and + // integers. other constants are represented as pointers to + // things. + if len(f.consts) == 0 { + return + } else if len(f.consts) <= 32 { + f.emitConstsFew() + } else { + f.emitConstsMany() } +} - // Remove from f.Locals any Allocs that escape to the heap. - j := 0 - for _, l := range f.Locals { - if !l.Heap { - f.Locals[j] = l - j++ +func (f *Function) emitConstsFew() { + dedup := make([]*Const, 0, 32) + for _, c := range f.consts { + if len(*c.Referrers()) == 0 { + continue + } + found := false + for _, d := range dedup { + if c.typ == d.typ && c.Value == d.Value { + replaceAll(c, d) + found = true + break + } + } + if !found { + dedup = append(dedup, c) } } - // Nil out f.Locals[j:] to aid GC. - for i := j; i < len(f.Locals); i++ { - f.Locals[i] = nil + + instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(dedup)) + for i, c := range dedup { + instrs[i] = c + c.setBlock(f.Blocks[0]) + } + copy(instrs[len(dedup):], f.Blocks[0].Instrs) + f.Blocks[0].Instrs = instrs + f.consts = nil +} + +func (f *Function) emitConstsMany() { + type constKey struct { + typ types.Type + value constant.Value } - f.Locals = f.Locals[:j] - // comma-ok receiving from a time.Tick channel will never return - // ok == false, so any branching on the value of ok can be - // replaced with an unconditional jump. This will primarily match - // `for range time.Tick(x)` loops, but it can also match - // user-written code. - for _, block := range f.Blocks { - if len(block.Instrs) < 3 { + m := make(map[constKey]Value, len(f.consts)) + areNil := 0 + for i, c := range f.consts { + if len(*c.Referrers()) == 0 { + f.consts[i] = nil + areNil++ continue } - if len(block.Succs) != 2 { - continue + + k := constKey{ + typ: c.typ, + value: c.Value, } - var instrs []*Instruction - for i, ins := range block.Instrs { - if _, ok := ins.(*DebugRef); ok { - continue - } - instrs = append(instrs, &block.Instrs[i]) + if dup, ok := m[k]; !ok { + m[k] = c + } else { + f.consts[i] = nil + areNil++ + replaceAll(c, dup) } + } - for i, ins := range instrs { - unop, ok := (*ins).(*UnOp) - if !ok || unop.Op != token.ARROW { - continue - } - call, ok := unop.X.(*Call) - if !ok { - continue - } - if call.Common().IsInvoke() { - continue - } - - // OPT(dh): surely there is a more efficient way of doing - // this, than using FullName. We should already have - // resolved time.Tick somewhere? - v, ok := call.Common().Value.(*Function) - if !ok { - continue + instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(f.consts)-areNil) + i := 0 + for _, c := range f.consts { + if c != nil { + instrs[i] = c + c.setBlock(f.Blocks[0]) + i++ + } + } + copy(instrs[i:], f.Blocks[0].Instrs) + f.Blocks[0].Instrs = instrs + f.consts = nil +} + +// buildFakeExits ensures that every block in the function is +// reachable in reverse from the Exit block. This is required to build +// a full post-dominator tree, and to ensure the exit block's +// inclusion in the dominator tree. +func buildFakeExits(fn *Function) { + // Find back-edges via forward DFS + fn.fakeExits = BlockSet{values: make([]bool, len(fn.Blocks))} + seen := fn.blockset(0) + backEdges := fn.blockset(1) + + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + backEdges.Add(b) + return + } + for _, pred := range b.Succs { + dfs(pred) + } + } + dfs(fn.Blocks[0]) +buildLoop: + for { + seen := fn.blockset(2) + var dfs func(b *BasicBlock) + dfs = func(b *BasicBlock) { + if !seen.Add(b) { + return } - t, ok := v.Object().(*types.Func) - if !ok { - continue + for _, pred := range b.Preds { + dfs(pred) } - if t.FullName() != "time.Tick" { - continue + if b == fn.Exit { + for _, b := range fn.Blocks { + if fn.fakeExits.Has(b) { + dfs(b) + } + } } - ex, ok := (*instrs[i+1]).(*Extract) - if !ok || ex.Tuple != unop || ex.Index != 1 { - continue + } + dfs(fn.Exit) + + for _, b := range fn.Blocks { + if !seen.Has(b) && backEdges.Has(b) { + // Block b is not reachable from the exit block. Add a + // fake jump from b to exit, then try again. Note that we + // only add one fake edge at a time, as it may make + // multiple blocks reachable. + // + // We only consider those blocks that have back edges. + // Any unreachable block that doesn't have a back edge + // must flow into a loop, which by definition has a + // back edge. Thus, by looking for loops, we should + // need fewer fake edges overall. + fn.fakeExits.Add(b) + continue buildLoop } + } - ifstmt, ok := (*instrs[i+2]).(*If) - if !ok || ifstmt.Cond != ex { - continue - } + break + } +} - *instrs[i+2] = NewJump(block) - succ := block.Succs[1] - block.Succs = block.Succs[0:1] - succ.RemovePred(block) +// finishBody() finalizes the function after IR code generation of its body. +func (f *Function) finishBody() { + f.objects = nil + f.currentBlock = nil + f.lblocks = nil + + // Remove from f.Locals any Allocs that escape to the heap. + j := 0 + for _, l := range f.Locals { + if !l.Heap { + f.Locals[j] = l + j++ } } + // Nil out f.Locals[j:] to aid GC. + for i := j; i < len(f.Locals); i++ { + f.Locals[i] = nil + } + f.Locals = f.Locals[:j] optimizeBlocks(f) - buildReferrers(f) - buildDomTree(f) + buildPostDomTree(f) if f.Prog.mode&NaiveForm == 0 { - // For debugging pre-state of lifting pass: - // numberRegisters(f) - // f.WriteTo(os.Stderr) lift(f) } + // emit constants after lifting, because lifting may produce new constants. + f.emitConsts() + f.namedResults = nil // (used by lifting) + f.implicitResults = nil + + numberNodes(f) - numberRegisters(f) + defer f.wr.Close() + f.wr.WriteFunc("start", "start", f) if f.Prog.mode&PrintFunctions != 0 { printMu.Lock() @@ -420,6 +587,29 @@ func (f *Function) finishBody() { } } +func isUselessPhi(phi *Phi) (Value, bool) { + var v0 Value + for _, e := range phi.Edges { + if e == phi { + continue + } + if v0 == nil { + v0 = e + } + if v0 != e { + if v0, ok := v0.(*Const); ok { + if e, ok := e.(*Const); ok { + if v0.typ == e.typ && v0.Value == e.Value { + continue + } + } + } + return nil, false + } + } + return v0, true +} + func (f *Function) RemoveNilBlocks() { f.removeNilBlocks() } @@ -462,26 +652,24 @@ func (f *Function) debugInfo() bool { // returns it. Its name and type are taken from obj. Subsequent // calls to f.lookup(obj) will return the same local. // -func (f *Function) addNamedLocal(obj types.Object) *Alloc { - l := f.addLocal(obj.Type(), obj.Pos()) - l.Comment = obj.Name() +func (f *Function) addNamedLocal(obj types.Object, source ast.Node) *Alloc { + l := f.addLocal(obj.Type(), source) f.objects[obj] = l return l } func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc { - return f.addNamedLocal(f.Pkg.info.Defs[id]) + return f.addNamedLocal(f.Pkg.info.Defs[id], id) } // addLocal creates an anonymous local variable of type typ, adds it // to function f and returns it. pos is the optional source location. // -func (f *Function) addLocal(typ types.Type, pos token.Pos) *Alloc { +func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc { v := &Alloc{} v.setType(types.NewPointer(typ)) - v.setPos(pos) f.Locals = append(f.Locals, v) - f.emit(v) + f.emit(v, source) return v } @@ -501,13 +689,12 @@ func (f *Function) lookup(obj types.Object, escaping bool) Value { // Definition must be in an enclosing function; // plumb it through intervening closures. if f.parent == nil { - panic("no ssa.Value for " + obj.String()) + panic("no ir.Value for " + obj.String()) } outer := f.parent.lookup(obj, true) // escaping v := &FreeVar{ name: obj.Name(), typ: outer.Type(), - pos: outer.Pos(), outer: outer, parent: f, } @@ -517,8 +704,8 @@ func (f *Function) lookup(obj types.Object, escaping bool) Value { } // emit emits the specified instruction to function f. -func (f *Function) emit(instr Instruction) Value { - return f.currentBlock.emit(instr) +func (f *Function) emit(instr Instruction, source ast.Node) Value { + return f.currentBlock.emit(instr, source) } // RelString returns the full name of this function, qualified by @@ -637,10 +824,6 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name()) } - if f.Recover != nil { - fmt.Fprintf(buf, "# Recover: %s\n", f.Recover) - } - from := f.pkg() if f.FreeVars != nil { @@ -663,45 +846,38 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { buf.WriteString("\t(external)\n") } - // NB. column calculations are confused by non-ASCII - // characters and assume 8-space tabs. - const punchcard = 80 // for old time's sake. - const tabwidth = 8 for _, b := range f.Blocks { if b == nil { // Corrupt CFG. fmt.Fprintf(buf, ".nil:\n") continue } - n, _ := fmt.Fprintf(buf, "%d:", b.Index) - bmsg := fmt.Sprintf("%s P:%d S:%d", b.Comment, len(b.Preds), len(b.Succs)) - fmt.Fprintf(buf, "%*s%s\n", punchcard-1-n-len(bmsg), "", bmsg) + fmt.Fprintf(buf, "b%d:", b.Index) + if len(b.Preds) > 0 { + fmt.Fprint(buf, " ←") + for _, pred := range b.Preds { + fmt.Fprintf(buf, " b%d", pred.Index) + } + } + if b.Comment != "" { + fmt.Fprintf(buf, " # %s", b.Comment) + } + buf.WriteByte('\n') if false { // CFG debugging fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs) } + + buf2 := &bytes.Buffer{} for _, instr := range b.Instrs { buf.WriteString("\t") switch v := instr.(type) { case Value: - l := punchcard - tabwidth // Left-align the instruction. if name := v.Name(); name != "" { - n, _ := fmt.Fprintf(buf, "%s = ", name) - l -= n - } - n, _ := buf.WriteString(instr.String()) - l -= n - // Right-align the type if there's space. - if t := v.Type(); t != nil { - buf.WriteByte(' ') - ts := relType(t, from) - l -= len(ts) + len(" ") // (spaces before and after type) - if l > 0 { - fmt.Fprintf(buf, "%*s", l, "") - } - buf.WriteString(ts) + fmt.Fprintf(buf, "%s = ", name) } + buf.WriteString(instr.String()) case nil: // Be robust against bad transforms. buf.WriteString("") @@ -709,9 +885,30 @@ func WriteFunction(buf *bytes.Buffer, f *Function) { buf.WriteString(instr.String()) } buf.WriteString("\n") + + if f.Prog.mode&PrintSource != 0 { + if s := instr.Source(); s != nil { + buf2.Reset() + format.Node(buf2, f.Prog.Fset, s) + for { + line, err := buf2.ReadString('\n') + if len(line) == 0 { + break + } + buf.WriteString("\t\t> ") + buf.WriteString(line) + if line[len(line)-1] != '\n' { + buf.WriteString("\n") + } + if err != nil { + break + } + } + } + } } + buf.WriteString("\n") } - fmt.Fprintf(buf, "\n") } // newBasicBlock adds to f a new basic block and returns it. It does @@ -736,7 +933,7 @@ func (f *Function) newBasicBlock(comment string) *BasicBlock { // the function object, e.g. Pkg, Params, Blocks. // // It is practically impossible for clients to construct well-formed -// SSA functions/packages/programs directly, so we assume this is the +// IR functions/packages/programs directly, so we assume this is the // job of the Builder alone. NewFunction exists to provide clients a // little flexibility. For example, analysis tools may wish to // construct fake Functions for the root of the callgraph, a fake @@ -748,18 +945,17 @@ func (prog *Program) NewFunction(name string, sig *types.Signature, provenance s return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance} } +//lint:ignore U1000 we may make use of this for functions loaded from export data type extentNode [2]token.Pos func (n extentNode) Pos() token.Pos { return n[0] } func (n extentNode) End() token.Pos { return n[1] } -// Syntax returns an ast.Node whose Pos/End methods provide the -// lexical extent of the function if it was defined by Go source code -// (f.Synthetic==""), or nil otherwise. -// -// If f was built with debug information (see Package.SetDebugRef), -// the result is the *ast.FuncDecl or *ast.FuncLit that declared the -// function. Otherwise, it is an opaque Node providing only position -// information; this avoids pinning the AST in memory. -// -func (f *Function) Syntax() ast.Node { return f.syntax } +func (f *Function) initHTML(name string) { + if name == "" { + return + } + if rel := f.RelString(nil); rel == name { + f.wr = NewHTMLWriter("ir.html", rel, "") + } +} diff --git a/vendor/honnef.co/go/tools/ir/html.go b/vendor/honnef.co/go/tools/ir/html.go new file mode 100644 index 000000000..c18375333 --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/html.go @@ -0,0 +1,1124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Copyright 2019 Dominik Honnef. All rights reserved. + +package ir + +import ( + "bytes" + "fmt" + "go/types" + "html" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "reflect" + "sort" + "strings" +) + +func live(f *Function) []bool { + max := 0 + var ops []*Value + + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if int(instr.ID()) > max { + max = int(instr.ID()) + } + } + } + + out := make([]bool, max+1) + var q []Node + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + switch instr.(type) { + case *BlankStore, *Call, *ConstantSwitch, *Defer, *Go, *If, *Jump, *MapUpdate, *Next, *Panic, *Recv, *Return, *RunDefers, *Send, *Store, *Unreachable: + out[instr.ID()] = true + q = append(q, instr) + } + } + } + + for len(q) > 0 { + v := q[len(q)-1] + q = q[:len(q)-1] + for _, op := range v.Operands(ops) { + if *op == nil { + continue + } + if !out[(*op).ID()] { + out[(*op).ID()] = true + q = append(q, *op) + } + } + } + + return out +} + +type funcPrinter interface { + startBlock(b *BasicBlock, reachable bool) + endBlock(b *BasicBlock) + value(v Node, live bool) + startDepCycle() + endDepCycle() + named(n string, vals []Value) +} + +func namedValues(f *Function) map[types.Object][]Value { + names := map[types.Object][]Value{} + for _, b := range f.Blocks { + for _, instr := range b.Instrs { + if instr, ok := instr.(*DebugRef); ok { + if obj := instr.object; obj != nil { + names[obj] = append(names[obj], instr.X) + } + } + } + } + // XXX deduplicate values + return names +} + +func fprintFunc(p funcPrinter, f *Function) { + // XXX does our IR form preserve unreachable blocks? + // reachable, live := findlive(f) + + l := live(f) + for _, b := range f.Blocks { + // XXX + // p.startBlock(b, reachable[b.Index]) + p.startBlock(b, true) + + end := len(b.Instrs) - 1 + if end < 0 { + end = 0 + } + for _, v := range b.Instrs[:end] { + if _, ok := v.(*DebugRef); !ok { + p.value(v, l[v.ID()]) + } + } + p.endBlock(b) + } + + names := namedValues(f) + keys := make([]types.Object, 0, len(names)) + for key := range names { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i].Pos() < keys[j].Pos() + }) + for _, key := range keys { + p.named(key.Name(), names[key]) + } +} + +func opName(v Node) string { + switch v := v.(type) { + case *Call: + if v.Common().IsInvoke() { + return "Invoke" + } + return "Call" + case *Alloc: + if v.Heap { + return "HeapAlloc" + } + return "StackAlloc" + case *Select: + if v.Blocking { + return "SelectBlocking" + } + return "SelectNonBlocking" + default: + return reflect.ValueOf(v).Type().Elem().Name() + } +} + +type HTMLWriter struct { + w io.WriteCloser + path string + dot *dotWriter +} + +func NewHTMLWriter(path string, funcname, cfgMask string) *HTMLWriter { + out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + log.Fatalf("%v", err) + } + pwd, err := os.Getwd() + if err != nil { + log.Fatalf("%v", err) + } + html := HTMLWriter{w: out, path: filepath.Join(pwd, path)} + html.dot = newDotWriter() + html.start(funcname) + return &html +} + +func (w *HTMLWriter) start(name string) { + if w == nil { + return + } + w.WriteString("") + w.WriteString(` + + + + + +`) + w.WriteString("") + w.WriteString("

") + w.WriteString(html.EscapeString(name)) + w.WriteString("

") + w.WriteString(` +help +
+ +

+Click on a value or block to toggle highlighting of that value/block +and its uses. (Values and blocks are highlighted by ID, and IDs of +dead items may be reused, so not all highlights necessarily correspond +to the clicked item.) +

+ +

+Faded out values and blocks are dead code that has not been eliminated. +

+ +

+Values printed in italics have a dependency cycle. +

+ +

+CFG: Dashed edge is for unlikely branches. Blue color is for backward edges. +Edge with a dot means that this edge follows the order in which blocks were laidout. +

+ +
+`) + w.WriteString("
") + w.WriteString("") +} + +func (w *HTMLWriter) Close() { + if w == nil { + return + } + io.WriteString(w.w, "") + io.WriteString(w.w, "
") + io.WriteString(w.w, "") + io.WriteString(w.w, "") + w.w.Close() + fmt.Printf("dumped IR to %v\n", w.path) +} + +// WriteFunc writes f in a column headed by title. +// phase is used for collapsing columns and should be unique across the table. +func (w *HTMLWriter) WriteFunc(phase, title string, f *Function) { + if w == nil { + return + } + w.WriteColumn(phase, title, "", funcHTML(f, phase, w.dot)) +} + +// WriteColumn writes raw HTML in a column headed by title. +// It is intended for pre- and post-compilation log output. +func (w *HTMLWriter) WriteColumn(phase, title, class, html string) { + if w == nil { + return + } + id := strings.Replace(phase, " ", "-", -1) + // collapsed column + w.Printf("
%v
", id, phase) + + if class == "" { + w.Printf("", id) + } else { + w.Printf("", id, class) + } + w.WriteString("

" + title + "

") + w.WriteString(html) + w.WriteString("") +} + +func (w *HTMLWriter) Printf(msg string, v ...interface{}) { + if _, err := fmt.Fprintf(w.w, msg, v...); err != nil { + log.Fatalf("%v", err) + } +} + +func (w *HTMLWriter) WriteString(s string) { + if _, err := io.WriteString(w.w, s); err != nil { + log.Fatalf("%v", err) + } +} + +func valueHTML(v Node) string { + if v == nil { + return "<nil>" + } + // TODO: Using the value ID as the class ignores the fact + // that value IDs get recycled and that some values + // are transmuted into other values. + class := fmt.Sprintf("t%d", v.ID()) + var label string + switch v := v.(type) { + case *Function: + label = v.RelString(nil) + case *Builtin: + label = v.Name() + default: + label = class + } + return fmt.Sprintf("%s", class, label) +} + +func valueLongHTML(v Node) string { + // TODO: Any intra-value formatting? + // I'm wary of adding too much visual noise, + // but a little bit might be valuable. + // We already have visual noise in the form of punctuation + // maybe we could replace some of that with formatting. + s := fmt.Sprintf("", v.ID()) + + linenumber := "(?)" + if v.Pos().IsValid() { + line := v.Parent().Prog.Fset.Position(v.Pos()).Line + linenumber = fmt.Sprintf("(%d)", line, line) + } + + s += fmt.Sprintf("%s %s = %s", valueHTML(v), linenumber, opName(v)) + + if v, ok := v.(Value); ok { + s += " <" + html.EscapeString(v.Type().String()) + ">" + } + + switch v := v.(type) { + case *Parameter: + s += fmt.Sprintf(" {%s}", html.EscapeString(v.name)) + case *BinOp: + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String())) + case *UnOp: + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String())) + case *Extract: + name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name() + s += fmt.Sprintf(" [%d] (%s)", v.Index, name) + case *Field: + st := v.X.Type().Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + s += fmt.Sprintf(" [%d] (%s)", v.Field, name) + case *FieldAddr: + st := deref(v.X.Type()).Underlying().(*types.Struct) + // Be robust against a bad index. + name := "?" + if 0 <= v.Field && v.Field < st.NumFields() { + name = st.Field(v.Field).Name() + } + + s += fmt.Sprintf(" [%d] (%s)", v.Field, name) + case *Recv: + s += fmt.Sprintf(" {%t}", v.CommaOk) + case *Call: + if v.Common().IsInvoke() { + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Common().Method.FullName())) + } + case *Const: + if v.Value == nil { + s += " {<nil>}" + } else { + s += fmt.Sprintf(" {%s}", html.EscapeString(v.Value.String())) + } + case *Sigma: + s += fmt.Sprintf(" [#%s]", v.From) + } + for _, a := range v.Operands(nil) { + s += fmt.Sprintf(" %s", valueHTML(*a)) + } + + // OPT(dh): we're calling namedValues many times on the same function. + allNames := namedValues(v.Parent()) + var names []string + for name, values := range allNames { + for _, value := range values { + if v == value { + names = append(names, name.Name()) + break + } + } + } + if len(names) != 0 { + s += " (" + strings.Join(names, ", ") + ")" + } + + s += "" + return s +} + +func blockHTML(b *BasicBlock) string { + // TODO: Using the value ID as the class ignores the fact + // that value IDs get recycled and that some values + // are transmuted into other values. + s := html.EscapeString(b.String()) + return fmt.Sprintf("%s", s, s) +} + +func blockLongHTML(b *BasicBlock) string { + var kind string + var term Instruction + if len(b.Instrs) > 0 { + term = b.Control() + kind = opName(term) + } + // TODO: improve this for HTML? + s := fmt.Sprintf("%s", b.Index, kind) + + if term != nil { + ops := term.Operands(nil) + if len(ops) > 0 { + var ss []string + for _, op := range ops { + ss = append(ss, valueHTML(*op)) + } + s += " " + strings.Join(ss, ", ") + } + } + if len(b.Succs) > 0 { + s += " →" // right arrow + for _, c := range b.Succs { + s += " " + blockHTML(c) + } + } + return s +} + +func funcHTML(f *Function, phase string, dot *dotWriter) string { + buf := new(bytes.Buffer) + if dot != nil { + dot.writeFuncSVG(buf, phase, f) + } + fmt.Fprint(buf, "") + p := htmlFuncPrinter{w: buf} + fprintFunc(p, f) + + // fprintFunc(&buf, f) // TODO: HTML, not text,
for line breaks, etc. + fmt.Fprint(buf, "
") + return buf.String() +} + +type htmlFuncPrinter struct { + w io.Writer +} + +func (p htmlFuncPrinter) startBlock(b *BasicBlock, reachable bool) { + var dead string + if !reachable { + dead = "dead-block" + } + fmt.Fprintf(p.w, "
    ", b, dead) + fmt.Fprintf(p.w, "
  • %s:", blockHTML(b)) + if len(b.Preds) > 0 { + io.WriteString(p.w, " ←") // left arrow + for _, pred := range b.Preds { + fmt.Fprintf(p.w, " %s", blockHTML(pred)) + } + } + if len(b.Instrs) > 0 { + io.WriteString(p.w, ``) + } + io.WriteString(p.w, "
  • ") + if len(b.Instrs) > 0 { // start list of values + io.WriteString(p.w, "
  • ") + io.WriteString(p.w, "
      ") + } +} + +func (p htmlFuncPrinter) endBlock(b *BasicBlock) { + if len(b.Instrs) > 0 { // end list of values + io.WriteString(p.w, "
    ") + io.WriteString(p.w, "
  • ") + } + io.WriteString(p.w, "
  • ") + fmt.Fprint(p.w, blockLongHTML(b)) + io.WriteString(p.w, "
  • ") + io.WriteString(p.w, "
") +} + +func (p htmlFuncPrinter) value(v Node, live bool) { + var dead string + if !live { + dead = "dead-value" + } + fmt.Fprintf(p.w, "
  • ", dead) + fmt.Fprint(p.w, valueLongHTML(v)) + io.WriteString(p.w, "
  • ") +} + +func (p htmlFuncPrinter) startDepCycle() { + fmt.Fprintln(p.w, "") +} + +func (p htmlFuncPrinter) endDepCycle() { + fmt.Fprintln(p.w, "") +} + +func (p htmlFuncPrinter) named(n string, vals []Value) { + fmt.Fprintf(p.w, "
  • name %s: ", n) + for _, val := range vals { + fmt.Fprintf(p.w, "%s ", valueHTML(val)) + } + fmt.Fprintf(p.w, "
  • ") +} + +type dotWriter struct { + path string + broken bool +} + +// newDotWriter returns non-nil value when mask is valid. +// dotWriter will generate SVGs only for the phases specified in the mask. +// mask can contain following patterns and combinations of them: +// * - all of them; +// x-y - x through y, inclusive; +// x,y - x and y, but not the passes between. +func newDotWriter() *dotWriter { + path, err := exec.LookPath("dot") + if err != nil { + fmt.Println(err) + return nil + } + return &dotWriter{path: path} +} + +func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Function) { + if d.broken { + return + } + cmd := exec.Command(d.path, "-Tsvg") + pipe, err := cmd.StdinPipe() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + buf := new(bytes.Buffer) + cmd.Stdout = buf + bufErr := new(bytes.Buffer) + cmd.Stderr = bufErr + err = cmd.Start() + if err != nil { + d.broken = true + fmt.Println(err) + return + } + fmt.Fprint(pipe, `digraph "" { margin=0; size="4,40"; ranksep=.2; `) + id := strings.Replace(phase, " ", "-", -1) + fmt.Fprintf(pipe, `id="g_graph_%s";`, id) + fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`) + fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`) + for _, b := range f.Blocks { + layout := "" + fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v"];`, b, b, layout, b.Control().String(), id, b) + } + indexOf := make([]int, len(f.Blocks)) + for i, b := range f.Blocks { + indexOf[b.Index] = i + } + + // XXX + /* + ponums := make([]int32, len(f.Blocks)) + _ = postorderWithNumbering(f, ponums) + isBackEdge := func(from, to int) bool { + return ponums[from] <= ponums[to] + } + */ + isBackEdge := func(from, to int) bool { return false } + + for _, b := range f.Blocks { + for i, s := range b.Succs { + style := "solid" + color := "black" + arrow := "vee" + if isBackEdge(b.Index, s.Index) { + color = "blue" + } + fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s, i, style, color, arrow) + } + } + fmt.Fprint(pipe, "}") + pipe.Close() + err = cmd.Wait() + if err != nil { + d.broken = true + fmt.Printf("dot: %v\n%v\n", err, bufErr.String()) + return + } + + svgID := "svg_graph_" + id + fmt.Fprintf(w, `
    `, svgID, svgID) + // For now, an awful hack: edit the html as it passes through + // our fingers, finding ' 0 { + fset = initial[0].Fset + } + + prog := ir.NewProgram(fset, mode) + if opts != nil { + prog.PrintFunc = opts.PrintFunc + } + + isInitial := make(map[*packages.Package]bool, len(initial)) + for _, p := range initial { + isInitial[p] = true + } + + irmap := make(map[*packages.Package]*ir.Package) + packages.Visit(initial, nil, func(p *packages.Package) { + if p.Types != nil && !p.IllTyped { + var files []*ast.File + if deps || isInitial[p] { + files = p.Syntax + } + irmap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true) + } + }) + + var irpkgs []*ir.Package + for _, p := range initial { + irpkgs = append(irpkgs, irmap[p]) // may be nil + } + return prog, irpkgs +} + +// CreateProgram returns a new program in IR form, given a program +// loaded from source. An IR package is created for each transitively +// error-free package of lprog. +// +// Code for bodies of functions is not built until Build is called +// on the result. +// +// The mode parameter controls diagnostics and checking during IR construction. +// +// Deprecated: use golang.org/x/tools/go/packages and the Packages +// function instead; see ir.ExampleLoadPackages. +// +func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program { + prog := ir.NewProgram(lprog.Fset, mode) + + for _, info := range lprog.AllPackages { + if info.TransitivelyErrorFree { + prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable) + } + } + + return prog +} + +// BuildPackage builds an IR program with IR for a single package. +// +// It populates pkg by type-checking the specified file ASTs. All +// dependencies are loaded using the importer specified by tc, which +// typically loads compiler export data; IR code cannot be built for +// those packages. BuildPackage then constructs an ir.Program with all +// dependency packages created, and builds and returns the IR package +// corresponding to pkg. +// +// The caller must have set pkg.Path() to the import path. +// +// The operation fails if there were any type-checking or import errors. +// +// See ../ir/example_test.go for an example. +// +func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) { + if fset == nil { + panic("no token.FileSet") + } + if pkg.Path() == "" { + panic("package has no import path") + } + + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil { + return nil, nil, err + } + + prog := ir.NewProgram(fset, mode) + + // Create IR packages for all imports. + // Order is not significant. + created := make(map[*types.Package]bool) + var createAll func(pkgs []*types.Package) + createAll = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !created[p] { + created[p] = true + prog.CreatePackage(p, nil, nil, true) + createAll(p.Imports()) + } + } + } + createAll(pkg.Imports()) + + // Create and build the primary package. + irpkg := prog.CreatePackage(pkg, files, info, false) + irpkg.Build() + return irpkg, info, nil +} diff --git a/vendor/honnef.co/go/tools/ir/irutil/switch.go b/vendor/honnef.co/go/tools/ir/irutil/switch.go new file mode 100644 index 000000000..f44cbca9e --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/irutil/switch.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package irutil + +// This file implements discovery of switch and type-switch constructs +// from low-level control flow. +// +// Many techniques exist for compiling a high-level switch with +// constant cases to efficient machine code. The optimal choice will +// depend on the data type, the specific case values, the code in the +// body of each case, and the hardware. +// Some examples: +// - a lookup table (for a switch that maps constants to constants) +// - a computed goto +// - a binary tree +// - a perfect hash +// - a two-level switch (to partition constant strings by their first byte). + +import ( + "bytes" + "fmt" + "go/token" + "go/types" + + "honnef.co/go/tools/ir" +) + +// A ConstCase represents a single constant comparison. +// It is part of a Switch. +type ConstCase struct { + Block *ir.BasicBlock // block performing the comparison + Body *ir.BasicBlock // body of the case + Value *ir.Const // case comparand +} + +// A TypeCase represents a single type assertion. +// It is part of a Switch. +type TypeCase struct { + Block *ir.BasicBlock // block performing the type assert + Body *ir.BasicBlock // body of the case + Type types.Type // case type + Binding ir.Value // value bound by this case +} + +// A Switch is a logical high-level control flow operation +// (a multiway branch) discovered by analysis of a CFG containing +// only if/else chains. It is not part of the ir.Instruction set. +// +// One of ConstCases and TypeCases has length >= 2; +// the other is nil. +// +// In a value switch, the list of cases may contain duplicate constants. +// A type switch may contain duplicate types, or types assignable +// to an interface type also in the list. +// TODO(adonovan): eliminate such duplicates. +// +type Switch struct { + Start *ir.BasicBlock // block containing start of if/else chain + X ir.Value // the switch operand + ConstCases []ConstCase // ordered list of constant comparisons + TypeCases []TypeCase // ordered list of type assertions + Default *ir.BasicBlock // successor if all comparisons fail +} + +func (sw *Switch) String() string { + // We represent each block by the String() of its + // first Instruction, e.g. "print(42:int)". + var buf bytes.Buffer + if sw.ConstCases != nil { + fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name()) + for _, c := range sw.ConstCases { + fmt.Fprintf(&buf, "case %s: %s\n", c.Value.Name(), c.Body.Instrs[0]) + } + } else { + fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name()) + for _, c := range sw.TypeCases { + fmt.Fprintf(&buf, "case %s %s: %s\n", + c.Binding.Name(), c.Type, c.Body.Instrs[0]) + } + } + if sw.Default != nil { + fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0]) + } + fmt.Fprintf(&buf, "}") + return buf.String() +} + +// Switches examines the control-flow graph of fn and returns the +// set of inferred value and type switches. A value switch tests an +// ir.Value for equality against two or more compile-time constant +// values. Switches involving link-time constants (addresses) are +// ignored. A type switch type-asserts an ir.Value against two or +// more types. +// +// The switches are returned in dominance order. +// +// The resulting switches do not necessarily correspond to uses of the +// 'switch' keyword in the source: for example, a single source-level +// switch statement with non-constant cases may result in zero, one or +// many Switches, one per plural sequence of constant cases. +// Switches may even be inferred from if/else- or goto-based control flow. +// (In general, the control flow constructs of the source program +// cannot be faithfully reproduced from the IR.) +// +func Switches(fn *ir.Function) []Switch { + // Traverse the CFG in dominance order, so we don't + // enter an if/else-chain in the middle. + var switches []Switch + seen := make(map[*ir.BasicBlock]bool) // TODO(adonovan): opt: use ir.blockSet + for _, b := range fn.DomPreorder() { + if x, k := isComparisonBlock(b); x != nil { + // Block b starts a switch. + sw := Switch{Start: b, X: x} + valueSwitch(&sw, k, seen) + if len(sw.ConstCases) > 1 { + switches = append(switches, sw) + } + } + + if y, x, T := isTypeAssertBlock(b); y != nil { + // Block b starts a type switch. + sw := Switch{Start: b, X: x} + typeSwitch(&sw, y, T, seen) + if len(sw.TypeCases) > 1 { + switches = append(switches, sw) + } + } + } + return switches +} + +func isSameX(x1 ir.Value, x2 ir.Value) bool { + if x1 == x2 { + return true + } + if x2, ok := x2.(*ir.Sigma); ok { + return isSameX(x1, x2.X) + } + return false +} + +func valueSwitch(sw *Switch, k *ir.Const, seen map[*ir.BasicBlock]bool) { + b := sw.Start + x := sw.X + for isSameX(sw.X, x) { + if seen[b] { + break + } + seen[b] = true + + sw.ConstCases = append(sw.ConstCases, ConstCase{ + Block: b, + Body: b.Succs[0], + Value: k, + }) + b = b.Succs[1] + n := 0 + for _, instr := range b.Instrs { + switch instr.(type) { + case *ir.If, *ir.BinOp: + n++ + case *ir.Sigma, *ir.Phi, *ir.DebugRef: + default: + n += 1000 + } + } + if n != 2 { + // Block b contains not just 'if x == k' and σ/ϕ nodes, + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + x, k = isComparisonBlock(b) + } + sw.Default = b +} + +func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bool) { + b := sw.Start + x := sw.X + for isSameX(sw.X, x) { + if seen[b] { + break + } + seen[b] = true + + sw.TypeCases = append(sw.TypeCases, TypeCase{ + Block: b, + Body: b.Succs[0], + Type: T, + Binding: y, + }) + b = b.Succs[1] + n := 0 + for _, instr := range b.Instrs { + switch instr.(type) { + case *ir.TypeAssert, *ir.Extract, *ir.If: + n++ + case *ir.Sigma, *ir.Phi: + default: + n += 1000 + } + } + if n != 4 { + // Block b contains not just + // {TypeAssert; Extract #0; Extract #1; If} + // so it may have side effects that + // make it unsafe to elide. + break + } + if len(b.Preds) != 1 { + // Block b has multiple predecessors, + // so it cannot be treated as a case. + break + } + y, x, T = isTypeAssertBlock(b) + } + sw.Default = b +} + +// isComparisonBlock returns the operands (v, k) if a block ends with +// a comparison v==k, where k is a compile-time constant. +// +func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) { + if n := len(b.Instrs); n >= 2 { + if i, ok := b.Instrs[n-1].(*ir.If); ok { + if binop, ok := i.Cond.(*ir.BinOp); ok && binop.Block() == b && binop.Op == token.EQL { + if k, ok := binop.Y.(*ir.Const); ok { + return binop.X, k + } + if k, ok := binop.X.(*ir.Const); ok { + return binop.Y, k + } + } + } + } + return +} + +// isTypeAssertBlock returns the operands (y, x, T) if a block ends with +// a type assertion "if y, ok := x.(T); ok {". +// +func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) { + if n := len(b.Instrs); n >= 4 { + if i, ok := b.Instrs[n-1].(*ir.If); ok { + if ext1, ok := i.Cond.(*ir.Extract); ok && ext1.Block() == b && ext1.Index == 1 { + if ta, ok := ext1.Tuple.(*ir.TypeAssert); ok && ta.Block() == b { + // hack: relies upon instruction ordering. + if ext0, ok := b.Instrs[n-3].(*ir.Extract); ok { + return ext0, ta.X, ta.AssertedType + } + } + } + } + } + return +} diff --git a/vendor/honnef.co/go/tools/ir/irutil/util.go b/vendor/honnef.co/go/tools/ir/irutil/util.go new file mode 100644 index 000000000..04b25f5f9 --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/irutil/util.go @@ -0,0 +1,70 @@ +package irutil + +import ( + "honnef.co/go/tools/ir" +) + +func Reachable(from, to *ir.BasicBlock) bool { + if from == to { + return true + } + if from.Dominates(to) { + return true + } + + found := false + Walk(from, func(b *ir.BasicBlock) bool { + if b == to { + found = true + return false + } + return true + }) + return found +} + +func Walk(b *ir.BasicBlock, fn func(*ir.BasicBlock) bool) { + seen := map[*ir.BasicBlock]bool{} + wl := []*ir.BasicBlock{b} + for len(wl) > 0 { + b := wl[len(wl)-1] + wl = wl[:len(wl)-1] + if seen[b] { + continue + } + seen[b] = true + if !fn(b) { + continue + } + wl = append(wl, b.Succs...) + } +} + +func Vararg(x *ir.Slice) ([]ir.Value, bool) { + var out []ir.Value + slice, ok := x.X.(*ir.Alloc) + if !ok { + return nil, false + } + for _, ref := range *slice.Referrers() { + if ref == x { + continue + } + if ref.Block() != x.Block() { + return nil, false + } + idx, ok := ref.(*ir.IndexAddr) + if !ok { + return nil, false + } + if len(*idx.Referrers()) != 1 { + return nil, false + } + store, ok := (*idx.Referrers())[0].(*ir.Store) + if !ok { + return nil, false + } + out = append(out, store.Val) + } + return out, true +} diff --git a/vendor/honnef.co/go/tools/ir/irutil/visit.go b/vendor/honnef.co/go/tools/ir/irutil/visit.go new file mode 100644 index 000000000..657c9cde7 --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/irutil/visit.go @@ -0,0 +1,79 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package irutil // import "honnef.co/go/tools/ir/irutil" + +import "honnef.co/go/tools/ir" + +// This file defines utilities for visiting the IR of +// a Program. +// +// TODO(adonovan): test coverage. + +// AllFunctions finds and returns the set of functions potentially +// needed by program prog, as determined by a simple linker-style +// reachability algorithm starting from the members and method-sets of +// each package. The result may include anonymous functions and +// synthetic wrappers. +// +// Precondition: all packages are built. +// +func AllFunctions(prog *ir.Program) map[*ir.Function]bool { + visit := visitor{ + prog: prog, + seen: make(map[*ir.Function]bool), + } + visit.program() + return visit.seen +} + +type visitor struct { + prog *ir.Program + seen map[*ir.Function]bool +} + +func (visit *visitor) program() { + for _, pkg := range visit.prog.AllPackages() { + for _, mem := range pkg.Members { + if fn, ok := mem.(*ir.Function); ok { + visit.function(fn) + } + } + } + for _, T := range visit.prog.RuntimeTypes() { + mset := visit.prog.MethodSets.MethodSet(T) + for i, n := 0, mset.Len(); i < n; i++ { + visit.function(visit.prog.MethodValue(mset.At(i))) + } + } +} + +func (visit *visitor) function(fn *ir.Function) { + if !visit.seen[fn] { + visit.seen[fn] = true + var buf [10]*ir.Value // avoid alloc in common case + for _, b := range fn.Blocks { + for _, instr := range b.Instrs { + for _, op := range instr.Operands(buf[:0]) { + if fn, ok := (*op).(*ir.Function); ok { + visit.function(fn) + } + } + } + } + } +} + +// MainPackages returns the subset of the specified packages +// named "main" that define a main function. +// The result may include synthetic "testmain" packages. +func MainPackages(pkgs []*ir.Package) []*ir.Package { + var mains []*ir.Package + for _, pkg := range pkgs { + if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil { + mains = append(mains, pkg) + } + } + return mains +} diff --git a/vendor/honnef.co/go/tools/ir/lift.go b/vendor/honnef.co/go/tools/ir/lift.go new file mode 100644 index 000000000..71d5c8cb0 --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/lift.go @@ -0,0 +1,1063 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +// This file defines the lifting pass which tries to "lift" Alloc +// cells (new/local variables) into SSA registers, replacing loads +// with the dominating stored value, eliminating loads and stores, and +// inserting φ- and σ-nodes as needed. + +// Cited papers and resources: +// +// Ron Cytron et al. 1991. Efficiently computing SSA form... +// http://doi.acm.org/10.1145/115372.115320 +// +// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm. +// Software Practice and Experience 2001, 4:1-10. +// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf +// +// Daniel Berlin, llvmdev mailing list, 2012. +// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html +// (Be sure to expand the whole thread.) +// +// C. Scott Ananian. 1997. The static single information form. +// +// Jeremy Singer. 2006. Static program analysis based on virtual register renaming. + +// TODO(adonovan): opt: there are many optimizations worth evaluating, and +// the conventional wisdom for SSA construction is that a simple +// algorithm well engineered often beats those of better asymptotic +// complexity on all but the most egregious inputs. +// +// Danny Berlin suggests that the Cooper et al. algorithm for +// computing the dominance frontier is superior to Cytron et al. +// Furthermore he recommends that rather than computing the DF for the +// whole function then renaming all alloc cells, it may be cheaper to +// compute the DF for each alloc cell separately and throw it away. +// +// Consider exploiting liveness information to avoid creating dead +// φ-nodes which we then immediately remove. +// +// Also see many other "TODO: opt" suggestions in the code. + +import ( + "fmt" + "go/types" + "os" +) + +// If true, show diagnostic information at each step of lifting. +// Very verbose. +const debugLifting = false + +// domFrontier maps each block to the set of blocks in its dominance +// frontier. The outer slice is conceptually a map keyed by +// Block.Index. The inner slice is conceptually a set, possibly +// containing duplicates. +// +// TODO(adonovan): opt: measure impact of dups; consider a packed bit +// representation, e.g. big.Int, and bitwise parallel operations for +// the union step in the Children loop. +// +// domFrontier's methods mutate the slice's elements but not its +// length, so their receivers needn't be pointers. +// +type domFrontier [][]*BasicBlock + +func (df domFrontier) add(u, v *BasicBlock) { + df[u.Index] = append(df[u.Index], v) +} + +// build builds the dominance frontier df for the dominator tree of +// fn, using the algorithm found in A Simple, Fast Dominance +// Algorithm, Figure 5. +// +// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA +// by pruning the entire IDF computation, rather than merely pruning +// the DF -> IDF step. +func (df domFrontier) build(fn *Function) { + for _, b := range fn.Blocks { + if len(b.Preds) >= 2 { + for _, p := range b.Preds { + runner := p + for runner != b.dom.idom { + df.add(runner, b) + runner = runner.dom.idom + } + } + } + } +} + +func buildDomFrontier(fn *Function) domFrontier { + df := make(domFrontier, len(fn.Blocks)) + df.build(fn) + return df +} + +type postDomFrontier [][]*BasicBlock + +func (rdf postDomFrontier) add(u, v *BasicBlock) { + rdf[u.Index] = append(rdf[u.Index], v) +} + +func (rdf postDomFrontier) build(fn *Function) { + for _, b := range fn.Blocks { + if len(b.Succs) >= 2 { + for _, s := range b.Succs { + runner := s + for runner != b.pdom.idom { + rdf.add(runner, b) + runner = runner.pdom.idom + } + } + } + } +} + +func buildPostDomFrontier(fn *Function) postDomFrontier { + rdf := make(postDomFrontier, len(fn.Blocks)) + rdf.build(fn) + return rdf +} + +func removeInstr(refs []Instruction, instr Instruction) []Instruction { + i := 0 + for _, ref := range refs { + if ref == instr { + continue + } + refs[i] = ref + i++ + } + for j := i; j != len(refs); j++ { + refs[j] = nil // aid GC + } + return refs[:i] +} + +func clearInstrs(instrs []Instruction) { + for i := range instrs { + instrs[i] = nil + } +} + +// lift replaces local and new Allocs accessed only with +// load/store by IR registers, inserting φ- and σ-nodes where necessary. +// The result is a program in pruned SSI form. +// +// Preconditions: +// - fn has no dead blocks (blockopt has run). +// - Def/use info (Operands and Referrers) is up-to-date. +// - The dominator tree is up-to-date. +// +func lift(fn *Function) { + // TODO(adonovan): opt: lots of little optimizations may be + // worthwhile here, especially if they cause us to avoid + // buildDomFrontier. For example: + // + // - Alloc never loaded? Eliminate. + // - Alloc never stored? Replace all loads with a zero constant. + // - Alloc stored once? Replace loads with dominating store; + // don't forget that an Alloc is itself an effective store + // of zero. + // - Alloc used only within a single block? + // Use degenerate algorithm avoiding φ-nodes. + // - Consider synergy with scalar replacement of aggregates (SRA). + // e.g. *(&x.f) where x is an Alloc. + // Perhaps we'd get better results if we generated this as x.f + // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)). + // Unclear. + // + // But we will start with the simplest correct code. + var df domFrontier + var rdf postDomFrontier + var closure *closure + var newPhis newPhiMap + var newSigmas newSigmaMap + + // During this pass we will replace some BasicBlock.Instrs + // (allocs, loads and stores) with nil, keeping a count in + // BasicBlock.gaps. At the end we will reset Instrs to the + // concatenation of all non-dead newPhis and non-nil Instrs + // for the block, reusing the original array if space permits. + + // While we're here, we also eliminate 'rundefers' + // instructions in functions that contain no 'defer' + // instructions. + usesDefer := false + + // Determine which allocs we can lift and number them densely. + // The renaming phase uses this numbering for compact maps. + numAllocs := 0 + for _, b := range fn.Blocks { + b.gaps = 0 + b.rundefers = 0 + for _, instr := range b.Instrs { + switch instr := instr.(type) { + case *Alloc: + if !liftable(instr) { + instr.index = -1 + continue + } + index := -1 + if numAllocs == 0 { + df = buildDomFrontier(fn) + rdf = buildPostDomFrontier(fn) + if len(fn.Blocks) > 2 { + closure = transitiveClosure(fn) + } + newPhis = make(newPhiMap, len(fn.Blocks)) + newSigmas = make(newSigmaMap, len(fn.Blocks)) + + if debugLifting { + title := false + for i, blocks := range df { + if blocks != nil { + if !title { + fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn) + title = true + } + fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks) + } + } + } + } + liftAlloc(closure, df, rdf, instr, newPhis, newSigmas) + index = numAllocs + numAllocs++ + instr.index = index + case *Defer: + usesDefer = true + case *RunDefers: + b.rundefers++ + } + } + } + + if numAllocs > 0 { + // renaming maps an alloc (keyed by index) to its replacement + // value. Initially the renaming contains nil, signifying the + // zero constant of the appropriate type; we construct the + // Const lazily at most once on each path through the domtree. + // TODO(adonovan): opt: cache per-function not per subtree. + renaming := make([]Value, numAllocs) + + // Renaming. + rename(fn.Blocks[0], renaming, newPhis, newSigmas) + + simplifyPhis(newPhis) + + // Eliminate dead φ- and σ-nodes. + markLiveNodes(fn.Blocks, newPhis, newSigmas) + } + + // Prepend remaining live φ-nodes to each block and possibly kill rundefers. + for _, b := range fn.Blocks { + var head []Instruction + if numAllocs > 0 { + nps := newPhis[b.Index] + head = make([]Instruction, 0, len(nps)) + for _, pred := range b.Preds { + nss := newSigmas[pred.Index] + idx := pred.succIndex(b) + for _, newSigma := range nss { + if sigma := newSigma.sigmas[idx]; sigma != nil && sigma.live { + head = append(head, sigma) + + // we didn't populate referrers before, as most + // sigma nodes will be killed + if refs := sigma.X.Referrers(); refs != nil { + *refs = append(*refs, sigma) + } + } else if sigma != nil { + sigma.block = nil + } + } + } + for _, np := range nps { + if np.phi.live { + head = append(head, np.phi) + } else { + for _, edge := range np.phi.Edges { + if refs := edge.Referrers(); refs != nil { + *refs = removeInstr(*refs, np.phi) + } + } + np.phi.block = nil + } + } + } + + rundefersToKill := b.rundefers + if usesDefer { + rundefersToKill = 0 + } + + j := len(head) + if j+b.gaps+rundefersToKill == 0 { + continue // fast path: no new phis or gaps + } + + // We could do straight copies instead of element-wise copies + // when both b.gaps and rundefersToKill are zero. However, + // that seems to only be the case ~1% of the time, which + // doesn't seem worth the extra branch. + + // Remove dead instructions, add phis and sigmas + ns := len(b.Instrs) + j - b.gaps - rundefersToKill + if ns <= cap(b.Instrs) { + // b.Instrs has enough capacity to store all instructions + + // OPT(dh): check cap vs the actually required space; if + // there is a big enough difference, it may be worth + // allocating a new slice, to avoid pinning memory. + dst := b.Instrs[:cap(b.Instrs)] + i := len(dst) - 1 + for n := len(b.Instrs) - 1; n >= 0; n-- { + instr := dst[n] + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[i] = instr + i-- + } + off := i + 1 - len(head) + // aid GC + clearInstrs(dst[:off]) + dst = dst[off:] + copy(dst, head) + b.Instrs = dst + } else { + // not enough space, so allocate a new slice and copy + // over. + dst := make([]Instruction, ns) + copy(dst, head) + + for _, instr := range b.Instrs { + if instr == nil { + continue + } + if !usesDefer { + if _, ok := instr.(*RunDefers); ok { + continue + } + } + dst[j] = instr + j++ + } + b.Instrs = dst + } + } + + // Remove any fn.Locals that were lifted. + j := 0 + for _, l := range fn.Locals { + if l.index < 0 { + fn.Locals[j] = l + j++ + } + } + // Nil out fn.Locals[j:] to aid GC. + for i := j; i < len(fn.Locals); i++ { + fn.Locals[i] = nil + } + fn.Locals = fn.Locals[:j] +} + +func hasDirectReferrer(instr Instruction) bool { + for _, instr := range *instr.Referrers() { + switch instr.(type) { + case *Phi, *Sigma: + // ignore + default: + return true + } + } + return false +} + +func markLiveNodes(blocks []*BasicBlock, newPhis newPhiMap, newSigmas newSigmaMap) { + // Phi and sigma nodes are considered live if a non-phi, non-sigma + // node uses them. Once we find a node that is live, we mark all + // of its operands as used, too. + for _, npList := range newPhis { + for _, np := range npList { + phi := np.phi + if !phi.live && hasDirectReferrer(phi) { + markLivePhi(phi) + } + } + } + for _, npList := range newSigmas { + for _, np := range npList { + for _, sigma := range np.sigmas { + if sigma != nil && !sigma.live && hasDirectReferrer(sigma) { + markLiveSigma(sigma) + } + } + } + } + // Existing φ-nodes due to && and || operators + // are all considered live (see Go issue 19622). + for _, b := range blocks { + for _, phi := range b.phis() { + markLivePhi(phi.(*Phi)) + } + } +} + +func markLivePhi(phi *Phi) { + phi.live = true + for _, rand := range phi.Edges { + switch rand := rand.(type) { + case *Phi: + if !rand.live { + markLivePhi(rand) + } + case *Sigma: + if !rand.live { + markLiveSigma(rand) + } + } + } +} + +func markLiveSigma(sigma *Sigma) { + sigma.live = true + switch rand := sigma.X.(type) { + case *Phi: + if !rand.live { + markLivePhi(rand) + } + case *Sigma: + if !rand.live { + markLiveSigma(rand) + } + } +} + +// simplifyPhis replaces trivial phis with non-phi alternatives. Phi +// nodes where all edges are identical, or consist of only the phi +// itself and one other value, may be replaced with the value. +func simplifyPhis(newPhis newPhiMap) { + // find all phis that are trivial and can be replaced with a + // non-phi value. run until we reach a fixpoint, because replacing + // a phi may make other phis trivial. + for changed := true; changed; { + changed = false + for _, npList := range newPhis { + for _, np := range npList { + if np.phi.live { + // we're reusing 'live' to mean 'dead' in the context of simplifyPhis + continue + } + if r, ok := isUselessPhi(np.phi); ok { + // useless phi, replace its uses with the + // replacement value. the dead phi pass will clean + // up the phi afterwards. + replaceAll(np.phi, r) + np.phi.live = true + changed = true + } + } + } + } + + for _, npList := range newPhis { + for _, np := range npList { + np.phi.live = false + } + } +} + +type BlockSet struct { + idx int + values []bool + count int +} + +func NewBlockSet(size int) *BlockSet { + return &BlockSet{values: make([]bool, size)} +} + +func (s *BlockSet) Set(s2 *BlockSet) { + copy(s.values, s2.values) + s.count = 0 + for _, v := range s.values { + if v { + s.count++ + } + } +} + +func (s *BlockSet) Num() int { + return s.count +} + +func (s *BlockSet) Has(b *BasicBlock) bool { + if b.Index >= len(s.values) { + return false + } + return s.values[b.Index] +} + +// add adds b to the set and returns true if the set changed. +func (s *BlockSet) Add(b *BasicBlock) bool { + if s.values[b.Index] { + return false + } + s.count++ + s.values[b.Index] = true + s.idx = b.Index + + return true +} + +func (s *BlockSet) Clear() { + for j := range s.values { + s.values[j] = false + } + s.count = 0 +} + +// take removes an arbitrary element from a set s and +// returns its index, or returns -1 if empty. +func (s *BlockSet) Take() int { + // [i, end] + for i := s.idx; i < len(s.values); i++ { + if s.values[i] { + s.values[i] = false + s.idx = i + s.count-- + return i + } + } + + // [start, i) + for i := 0; i < s.idx; i++ { + if s.values[i] { + s.values[i] = false + s.idx = i + s.count-- + return i + } + } + + return -1 +} + +type closure struct { + span []uint32 + reachables []interval +} + +type interval uint32 + +const ( + flagMask = 1 << 31 + numBits = 20 + lengthBits = 32 - numBits - 1 + lengthMask = (1<>numBits + } else { + // large interval + i++ + start = uint32(inv & numMask) + end = uint32(r[i]) + } + if idx >= start && idx <= end { + return true + } + } + return false +} + +func (c closure) reachable(id int) []interval { + return c.reachables[c.span[id]:c.span[id+1]] +} + +func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) { + visited[b.Index] = true + for _, succ := range b.Succs { + if visited[succ.Index] { + continue + } + visited[succ.Index] = true + c.walk(current, succ, visited) + } +} + +func transitiveClosure(fn *Function) *closure { + reachable := make([]bool, len(fn.Blocks)) + c := &closure{} + c.span = make([]uint32, len(fn.Blocks)+1) + + addInterval := func(start, end uint32) { + if l := end - start; l <= 1<= 0 { // store of zero to Alloc cell + // Replace dominated loads by the zero value. + renaming[instr.index] = nil + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr) + } + // Delete the Alloc. + u.Instrs[i] = nil + u.gaps++ + } + + case *Store: + if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell + // Replace dominated loads by the stored value. + renaming[alloc.index] = instr.Val + if debugLifting { + fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n", + instr, instr.Val.Name()) + } + if refs := instr.Addr.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + if refs := instr.Val.Referrers(); refs != nil { + *refs = removeInstr(*refs, instr) + } + // Delete the Store. + u.Instrs[i] = nil + u.gaps++ + } + + case *Load: + if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell + // In theory, we wouldn't be able to replace loads + // directly, because a loaded value could be used in + // different branches, in which case it should be + // replaced with different sigma nodes. But we can't + // simply defer replacement, either, because then + // later stores might incorrectly affect this load. + // + // To avoid doing renaming on _all_ values (instead of + // just loads and stores like we're doing), we make + // sure during code generation that each load is only + // used in one block. For example, in constant switch + // statements, where the tag is only evaluated once, + // we store it in a temporary and load it for each + // comparison, so that we have individual loads to + // replace. + newval := renamed(u.Parent(), renaming, alloc) + if debugLifting { + fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n", + instr.Name(), instr, newval) + } + replaceAll(instr, newval) + u.Instrs[i] = nil + u.gaps++ + } + + case *DebugRef: + if x, ok := instr.X.(*Alloc); ok && x.index >= 0 { + if instr.IsAddr { + instr.X = renamed(u.Parent(), renaming, x) + instr.IsAddr = false + + // Add DebugRef to instr.X's referrers. + if refs := instr.X.Referrers(); refs != nil { + *refs = append(*refs, instr) + } + } else { + // A source expression denotes the address + // of an Alloc that was optimized away. + instr.X = nil + + // Delete the DebugRef. + u.Instrs[i] = nil + u.gaps++ + } + } + } + } + + // update all outgoing sigma nodes with the dominating store + for _, sigmas := range newSigmas[u.Index] { + for _, sigma := range sigmas.sigmas { + if sigma == nil { + continue + } + sigma.X = renamed(u.Parent(), renaming, sigmas.alloc) + } + } + + // For each φ-node in a CFG successor, rename the edge. + for succi, v := range u.Succs { + phis := newPhis[v.Index] + if len(phis) == 0 { + continue + } + i := v.predIndex(u) + for _, np := range phis { + phi := np.phi + alloc := np.alloc + // if there's a sigma node, use it, else use the dominating value + var newval Value + for _, sigmas := range newSigmas[u.Index] { + if sigmas.alloc == alloc && sigmas.sigmas[succi] != nil { + newval = sigmas.sigmas[succi] + break + } + } + if newval == nil { + newval = renamed(u.Parent(), renaming, alloc) + } + if debugLifting { + fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n", + phi.Name(), u, v, i, alloc.Name(), newval.Name()) + } + phi.Edges[i] = newval + if prefs := newval.Referrers(); prefs != nil { + *prefs = append(*prefs, phi) + } + } + } + + // Continue depth-first recursion over domtree, pushing a + // fresh copy of the renaming map for each subtree. + r := make([]Value, len(renaming)) + for _, v := range u.dom.children { + // XXX add debugging + copy(r, renaming) + + // on entry to a block, the incoming sigma nodes become the new values for their alloc + if idx := u.succIndex(v); idx != -1 { + for _, sigma := range newSigmas[u.Index] { + if sigma.sigmas[idx] != nil { + r[sigma.alloc.index] = sigma.sigmas[idx] + } + } + } + rename(v, r, newPhis, newSigmas) + } + +} diff --git a/vendor/honnef.co/go/tools/ssa/lvalue.go b/vendor/honnef.co/go/tools/ir/lvalue.go similarity index 59% rename from vendor/honnef.co/go/tools/ssa/lvalue.go rename to vendor/honnef.co/go/tools/ir/lvalue.go index eb5d71e18..f676a1f7a 100644 --- a/vendor/honnef.co/go/tools/ssa/lvalue.go +++ b/vendor/honnef.co/go/tools/ir/lvalue.go @@ -2,14 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // lvalues are the union of addressable expressions and map-index // expressions. import ( "go/ast" - "go/token" "go/types" ) @@ -18,27 +17,24 @@ import ( // pointer to permit updates to elements of maps. // type lvalue interface { - store(fn *Function, v Value) // stores v into the location - load(fn *Function) Value // loads the contents of the location - address(fn *Function) Value // address of the location - typ() types.Type // returns the type of the location + store(fn *Function, v Value, source ast.Node) // stores v into the location + load(fn *Function, source ast.Node) Value // loads the contents of the location + address(fn *Function) Value // address of the location + typ() types.Type // returns the type of the location } // An address is an lvalue represented by a true pointer. type address struct { addr Value - pos token.Pos // source position - expr ast.Expr // source syntax of the value (not address) [debug mode] + expr ast.Expr // source syntax of the value (not address) [debug mode] } -func (a *address) load(fn *Function) Value { - load := emitLoad(fn, a.addr) - load.pos = a.pos - return load +func (a *address) load(fn *Function, source ast.Node) Value { + return emitLoad(fn, a.addr, source) } -func (a *address) store(fn *Function, v Value) { - store := emitStore(fn, a.addr, v, a.pos) +func (a *address) store(fn *Function, v Value, source ast.Node) { + store := emitStore(fn, a.addr, v, source) if a.expr != nil { // store.Val is v, converted for assignability. emitDebugRef(fn, a.expr, store.Val, false) @@ -57,38 +53,35 @@ func (a *address) typ() types.Type { } // An element is an lvalue represented by m[k], the location of an -// element of a map or string. These locations are not addressable +// element of a map. These locations are not addressable // since pointers cannot be formed from them, but they do support -// load(), and in the case of maps, store(). +// load() and store(). // type element struct { - m, k Value // map or string - t types.Type // map element type or string byte type - pos token.Pos // source position of colon ({k:v}) or lbrack (m[k]=v) + m, k Value // map + t types.Type // map element type } -func (e *element) load(fn *Function) Value { - l := &Lookup{ +func (e *element) load(fn *Function, source ast.Node) Value { + l := &MapLookup{ X: e.m, Index: e.k, } - l.setPos(e.pos) l.setType(e.t) - return fn.emit(l) + return fn.emit(l, source) } -func (e *element) store(fn *Function, v Value) { +func (e *element) store(fn *Function, v Value, source ast.Node) { up := &MapUpdate{ Map: e.m, Key: e.k, - Value: emitConv(fn, v, e.t), + Value: emitConv(fn, v, e.t, source), } - up.pos = e.pos - fn.emit(up) + fn.emit(up, source) } func (e *element) address(fn *Function) Value { - panic("map/string elements are not addressable") + panic("map elements are not addressable") } func (e *element) typ() types.Type { @@ -100,15 +93,15 @@ func (e *element) typ() types.Type { // type blank struct{} -func (bl blank) load(fn *Function) Value { +func (bl blank) load(fn *Function, source ast.Node) Value { panic("blank.load is illegal") } -func (bl blank) store(fn *Function, v Value) { +func (bl blank) store(fn *Function, v Value, source ast.Node) { s := &BlankStore{ Val: v, } - fn.emit(s) + fn.emit(s, source) } func (bl blank) address(fn *Function) Value { diff --git a/vendor/honnef.co/go/tools/ssa/methods.go b/vendor/honnef.co/go/tools/ir/methods.go similarity index 99% rename from vendor/honnef.co/go/tools/ssa/methods.go rename to vendor/honnef.co/go/tools/ir/methods.go index 9cf383916..517f448b8 100644 --- a/vendor/honnef.co/go/tools/ssa/methods.go +++ b/vendor/honnef.co/go/tools/ir/methods.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines utilities for population of method sets. diff --git a/vendor/honnef.co/go/tools/ssa/mode.go b/vendor/honnef.co/go/tools/ir/mode.go similarity index 65% rename from vendor/honnef.co/go/tools/ssa/mode.go rename to vendor/honnef.co/go/tools/ir/mode.go index d2a269893..da548fdbb 100644 --- a/vendor/honnef.co/go/tools/ssa/mode.go +++ b/vendor/honnef.co/go/tools/ir/mode.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines the BuilderMode type and its command-line flag. @@ -15,32 +15,30 @@ import ( // // *BuilderMode satisfies the flag.Value interface. Example: // -// var mode = ssa.BuilderMode(0) -// func init() { flag.Var(&mode, "build", ssa.BuilderModeDoc) } +// var mode = ir.BuilderMode(0) +// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) } // type BuilderMode uint const ( PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout - PrintFunctions // Print function SSA code to stdout - LogSource // Log source locations as SSA builder progresses + PrintFunctions // Print function IR code to stdout + PrintSource // Print source code when printing function IR + LogSource // Log source locations as IR builder progresses SanityCheckFunctions // Perform sanity checking of function bodies - NaiveForm // Build naïve SSA form: don't replace local loads/stores with registers - BuildSerially // Build packages serially, not in parallel. + NaiveForm // Build naïve IR form: don't replace local loads/stores with registers GlobalDebug // Enable debug info for all packages - BareInits // Build init functions without guards or calls to dependent inits ) -const BuilderModeDoc = `Options controlling the SSA builder. +const BuilderModeDoc = `Options controlling the IR builder. The value is a sequence of zero or more of these letters: -C perform sanity [C]hecking of the SSA form. +C perform sanity [C]hecking of the IR form. D include [D]ebug info for every function. P print [P]ackage inventory. -F print [F]unction SSA code. -S log [S]ource locations as SSA builder progresses. -L build distinct packages seria[L]ly instead of in parallel. -N build [N]aive SSA form: don't replace local loads/stores with registers. -I build bare [I]nit functions: no init guards or calls to dependent inits. +F print [F]unction IR code. +A print [A]ST nodes responsible for IR instructions +S log [S]ource locations as IR builder progresses. +N build [N]aive IR form: don't replace local loads/stores with registers. ` func (m BuilderMode) String() string { @@ -54,6 +52,9 @@ func (m BuilderMode) String() string { if m&PrintFunctions != 0 { buf.WriteByte('F') } + if m&PrintSource != 0 { + buf.WriteByte('A') + } if m&LogSource != 0 { buf.WriteByte('S') } @@ -63,9 +64,6 @@ func (m BuilderMode) String() string { if m&NaiveForm != 0 { buf.WriteByte('N') } - if m&BuildSerially != 0 { - buf.WriteByte('L') - } return buf.String() } @@ -80,14 +78,14 @@ func (m *BuilderMode) Set(s string) error { mode |= PrintPackages case 'F': mode |= PrintFunctions + case 'A': + mode |= PrintSource case 'S': - mode |= LogSource | BuildSerially + mode |= LogSource case 'C': mode |= SanityCheckFunctions case 'N': mode |= NaiveForm - case 'L': - mode |= BuildSerially default: return fmt.Errorf("unknown BuilderMode option: %q", c) } diff --git a/vendor/honnef.co/go/tools/ssa/print.go b/vendor/honnef.co/go/tools/ir/print.go similarity index 54% rename from vendor/honnef.co/go/tools/ssa/print.go rename to vendor/honnef.co/go/tools/ir/print.go index 6fd277277..c16c08efa 100644 --- a/vendor/honnef.co/go/tools/ssa/print.go +++ b/vendor/honnef.co/go/tools/ir/print.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file implements the String() methods for all Value and // Instruction types. @@ -25,6 +25,9 @@ import ( // references are package-qualified. // func relName(v Value, i Instruction) string { + if v == nil { + return "" + } var from *types.Package if i != nil { from = i.Parent().pkg() @@ -32,8 +35,6 @@ func relName(v Value, i Instruction) string { switch v := v.(type) { case Member: // *Function or *Global return v.RelString(from) - case *Const: - return v.RelString(from) } return v.Name() } @@ -58,36 +59,40 @@ func relString(m Member, from *types.Package) string { func (v *Parameter) String() string { from := v.Parent().pkg() - return fmt.Sprintf("parameter %s : %s", v.Name(), relType(v.Type(), from)) + return fmt.Sprintf("Parameter <%s> {%s}", relType(v.Type(), from), v.name) } func (v *FreeVar) String() string { from := v.Parent().pkg() - return fmt.Sprintf("freevar %s : %s", v.Name(), relType(v.Type(), from)) + return fmt.Sprintf("FreeVar <%s> %s", relType(v.Type(), from), v.Name()) } func (v *Builtin) String() string { - return fmt.Sprintf("builtin %s", v.Name()) + return fmt.Sprintf("Builtin %s", v.Name()) } // Instruction.String() func (v *Alloc) String() string { - op := "local" + from := v.Parent().pkg() + storage := "Stack" if v.Heap { - op = "new" + storage = "Heap" } + return fmt.Sprintf("%sAlloc <%s>", storage, relType(v.Type(), from)) +} + +func (v *Sigma) String() string { from := v.Parent().pkg() - return fmt.Sprintf("%s %s (%s)", op, relType(deref(v.Type()), from), v.Comment) + s := fmt.Sprintf("Sigma <%s> [b%d] %s", relType(v.Type(), from), v.From.Index, v.X.Name()) + return s } func (v *Phi) String() string { var b bytes.Buffer - b.WriteString("phi [") + fmt.Fprintf(&b, "Phi <%s>", v.Type()) for i, edge := range v.Edges { - if i > 0 { - b.WriteString(", ") - } + b.WriteString(" ") // Be robust against malformed CFG. if v.block == nil { b.WriteString("??") @@ -97,40 +102,35 @@ func (v *Phi) String() string { if i < len(v.block.Preds) { block = v.block.Preds[i].Index } - fmt.Fprintf(&b, "%d: ", block) + fmt.Fprintf(&b, "%d:", block) edgeVal := "" // be robust if edge != nil { edgeVal = relName(edge, v) } b.WriteString(edgeVal) } - b.WriteString("]") - if v.Comment != "" { - b.WriteString(" #") - b.WriteString(v.Comment) - } return b.String() } func printCall(v *CallCommon, prefix string, instr Instruction) string { var b bytes.Buffer - b.WriteString(prefix) if !v.IsInvoke() { - b.WriteString(relName(v.Value, instr)) + if value, ok := instr.(Value); ok { + fmt.Fprintf(&b, "%s <%s> %s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr)) + } else { + fmt.Fprintf(&b, "%s %s", prefix, relName(v.Value, instr)) + } } else { - fmt.Fprintf(&b, "invoke %s.%s", relName(v.Value, instr), v.Method.Name()) - } - b.WriteString("(") - for i, arg := range v.Args { - if i > 0 { - b.WriteString(", ") + if value, ok := instr.(Value); ok { + fmt.Fprintf(&b, "%sInvoke <%s> %s.%s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr), v.Method.Name()) + } else { + fmt.Fprintf(&b, "%sInvoke %s.%s", prefix, relName(v.Value, instr), v.Method.Name()) } - b.WriteString(relName(arg, instr)) } - if v.Signature().Variadic() { - b.WriteString("...") + for _, arg := range v.Args { + b.WriteString(" ") + b.WriteString(relName(arg, instr)) } - b.WriteString(")") return b.String() } @@ -139,73 +139,59 @@ func (c *CallCommon) String() string { } func (v *Call) String() string { - return printCall(&v.Call, "", v) + return printCall(&v.Call, "Call", v) } func (v *BinOp) String() string { - return fmt.Sprintf("%s %s %s", relName(v.X, v), v.Op.String(), relName(v.Y, v)) + return fmt.Sprintf("BinOp <%s> {%s} %s %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v), relName(v.Y, v)) } func (v *UnOp) String() string { - return fmt.Sprintf("%s%s%s", v.Op, relName(v.X, v), commaOk(v.CommaOk)) + return fmt.Sprintf("UnOp <%s> {%s} %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v)) +} + +func (v *Load) String() string { + return fmt.Sprintf("Load <%s> %s", relType(v.Type(), v.Parent().pkg()), relName(v.X, v)) } func printConv(prefix string, v, x Value) string { from := v.Parent().pkg() - return fmt.Sprintf("%s %s <- %s (%s)", + return fmt.Sprintf("%s <%s> %s", prefix, relType(v.Type(), from), - relType(x.Type(), from), relName(x, v.(Instruction))) } -func (v *ChangeType) String() string { return printConv("changetype", v, v.X) } -func (v *Convert) String() string { return printConv("convert", v, v.X) } -func (v *ChangeInterface) String() string { return printConv("change interface", v, v.X) } -func (v *MakeInterface) String() string { return printConv("make", v, v.X) } +func (v *ChangeType) String() string { return printConv("ChangeType", v, v.X) } +func (v *Convert) String() string { return printConv("Convert", v, v.X) } +func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) } +func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) } func (v *MakeClosure) String() string { + from := v.Parent().pkg() var b bytes.Buffer - fmt.Fprintf(&b, "make closure %s", relName(v.Fn, v)) + fmt.Fprintf(&b, "MakeClosure <%s> %s", relType(v.Type(), from), relName(v.Fn, v)) if v.Bindings != nil { - b.WriteString(" [") - for i, c := range v.Bindings { - if i > 0 { - b.WriteString(", ") - } + for _, c := range v.Bindings { + b.WriteString(" ") b.WriteString(relName(c, v)) } - b.WriteString("]") } return b.String() } func (v *MakeSlice) String() string { from := v.Parent().pkg() - return fmt.Sprintf("make %s %s %s", + return fmt.Sprintf("MakeSlice <%s> %s %s", relType(v.Type(), from), relName(v.Len, v), relName(v.Cap, v)) } func (v *Slice) String() string { - var b bytes.Buffer - b.WriteString("slice ") - b.WriteString(relName(v.X, v)) - b.WriteString("[") - if v.Low != nil { - b.WriteString(relName(v.Low, v)) - } - b.WriteString(":") - if v.High != nil { - b.WriteString(relName(v.High, v)) - } - if v.Max != nil { - b.WriteString(":") - b.WriteString(relName(v.Max, v)) - } - b.WriteString("]") - return b.String() + from := v.Parent().pkg() + return fmt.Sprintf("Slice <%s> %s %s %s %s", + relType(v.Type(), from), relName(v.X, v), relName(v.Low, v), relName(v.High, v), relName(v.Max, v)) } func (v *MakeMap) String() string { @@ -214,22 +200,23 @@ func (v *MakeMap) String() string { res = relName(v.Reserve, v) } from := v.Parent().pkg() - return fmt.Sprintf("make %s %s", relType(v.Type(), from), res) + return fmt.Sprintf("MakeMap <%s> %s", relType(v.Type(), from), res) } func (v *MakeChan) String() string { from := v.Parent().pkg() - return fmt.Sprintf("make %s %s", relType(v.Type(), from), relName(v.Size, v)) + return fmt.Sprintf("MakeChan <%s> %s", relType(v.Type(), from), relName(v.Size, v)) } func (v *FieldAddr) String() string { + from := v.Parent().pkg() st := deref(v.X.Type()).Underlying().(*types.Struct) // Be robust against a bad index. name := "?" if 0 <= v.Field && v.Field < st.NumFields() { name = st.Field(v.Field).Name() } - return fmt.Sprintf("&%s.%s [#%d]", relName(v.X, v), name, v.Field) + return fmt.Sprintf("FieldAddr <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v)) } func (v *Field) String() string { @@ -239,36 +226,49 @@ func (v *Field) String() string { if 0 <= v.Field && v.Field < st.NumFields() { name = st.Field(v.Field).Name() } - return fmt.Sprintf("%s.%s [#%d]", relName(v.X, v), name, v.Field) + from := v.Parent().pkg() + return fmt.Sprintf("Field <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v)) } func (v *IndexAddr) String() string { - return fmt.Sprintf("&%s[%s]", relName(v.X, v), relName(v.Index, v)) + from := v.Parent().pkg() + return fmt.Sprintf("IndexAddr <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) } func (v *Index) String() string { - return fmt.Sprintf("%s[%s]", relName(v.X, v), relName(v.Index, v)) + from := v.Parent().pkg() + return fmt.Sprintf("Index <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) } -func (v *Lookup) String() string { - return fmt.Sprintf("%s[%s]%s", relName(v.X, v), relName(v.Index, v), commaOk(v.CommaOk)) +func (v *MapLookup) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("MapLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) +} + +func (v *StringLookup) String() string { + from := v.Parent().pkg() + return fmt.Sprintf("StringLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v)) } func (v *Range) String() string { - return "range " + relName(v.X, v) + from := v.Parent().pkg() + return fmt.Sprintf("Range <%s> %s", relType(v.Type(), from), relName(v.X, v)) } func (v *Next) String() string { - return "next " + relName(v.Iter, v) + from := v.Parent().pkg() + return fmt.Sprintf("Next <%s> %s", relType(v.Type(), from), relName(v.Iter, v)) } func (v *TypeAssert) String() string { from := v.Parent().pkg() - return fmt.Sprintf("typeassert%s %s.(%s)", commaOk(v.CommaOk), relName(v.X, v), relType(v.AssertedType, from)) + return fmt.Sprintf("TypeAssert <%s> %s", relType(v.Type(), from), relName(v.X, v)) } func (v *Extract) String() string { - return fmt.Sprintf("extract %s #%d", relName(v.Tuple, v), v.Index) + from := v.Parent().pkg() + name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name() + return fmt.Sprintf("Extract <%s> [%d] (%s) %s", relType(v.Type(), from), v.Index, name, relName(v.Tuple, v)) } func (s *Jump) String() string { @@ -277,7 +277,20 @@ func (s *Jump) String() string { if s.block != nil && len(s.block.Succs) == 1 { block = s.block.Succs[0].Index } - return fmt.Sprintf("jump %d", block) + str := fmt.Sprintf("Jump → b%d", block) + if s.Comment != "" { + str = fmt.Sprintf("%s # %s", str, s.Comment) + } + return str +} + +func (s *Unreachable) String() string { + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("Unreachable → b%d", block) } func (s *If) String() string { @@ -287,41 +300,70 @@ func (s *If) String() string { tblock = s.block.Succs[0].Index fblock = s.block.Succs[1].Index } - return fmt.Sprintf("if %s goto %d else %d", relName(s.Cond, s), tblock, fblock) + return fmt.Sprintf("If %s → b%d b%d", relName(s.Cond, s), tblock, fblock) +} + +func (s *ConstantSwitch) String() string { + var b bytes.Buffer + fmt.Fprintf(&b, "ConstantSwitch %s", relName(s.Tag, s)) + for _, cond := range s.Conds { + fmt.Fprintf(&b, " %s", relName(cond, s)) + } + fmt.Fprint(&b, " →") + for _, succ := range s.block.Succs { + fmt.Fprintf(&b, " b%d", succ.Index) + } + return b.String() +} + +func (s *TypeSwitch) String() string { + from := s.Parent().pkg() + var b bytes.Buffer + fmt.Fprintf(&b, "TypeSwitch <%s> %s", relType(s.typ, from), relName(s.Tag, s)) + for _, cond := range s.Conds { + fmt.Fprintf(&b, " %q", relType(cond, s.block.parent.pkg())) + } + return b.String() } func (s *Go) String() string { - return printCall(&s.Call, "go ", s) + return printCall(&s.Call, "Go", s) } func (s *Panic) String() string { - return "panic " + relName(s.X, s) + // Be robust against malformed CFG. + block := -1 + if s.block != nil && len(s.block.Succs) == 1 { + block = s.block.Succs[0].Index + } + return fmt.Sprintf("Panic %s → b%d", relName(s.X, s), block) } func (s *Return) String() string { var b bytes.Buffer - b.WriteString("return") - for i, r := range s.Results { - if i == 0 { - b.WriteString(" ") - } else { - b.WriteString(", ") - } + b.WriteString("Return") + for _, r := range s.Results { + b.WriteString(" ") b.WriteString(relName(r, s)) } return b.String() } func (*RunDefers) String() string { - return "rundefers" + return "RunDefers" } func (s *Send) String() string { - return fmt.Sprintf("send %s <- %s", relName(s.Chan, s), relName(s.X, s)) + return fmt.Sprintf("Send %s %s", relName(s.Chan, s), relName(s.X, s)) +} + +func (recv *Recv) String() string { + from := recv.Parent().pkg() + return fmt.Sprintf("Recv <%s> %s", relType(recv.Type(), from), relName(recv.Chan, recv)) } func (s *Defer) String() string { - return printCall(&s.Call, "defer ", s) + return printCall(&s.Call, "Defer", s) } func (s *Select) String() string { @@ -341,21 +383,23 @@ func (s *Select) String() string { } non := "" if !s.Blocking { - non = "non" + non = "Non" } - return fmt.Sprintf("select %sblocking [%s]", non, b.String()) + from := s.Parent().pkg() + return fmt.Sprintf("Select%sBlocking <%s> [%s]", non, relType(s.Type(), from), b.String()) } func (s *Store) String() string { - return fmt.Sprintf("*%s = %s", relName(s.Addr, s), relName(s.Val, s)) + return fmt.Sprintf("Store {%s} %s %s", + s.Val.Type(), relName(s.Addr, s), relName(s.Val, s)) } func (s *BlankStore) String() string { - return fmt.Sprintf("_ = %s", relName(s.Val, s)) + return fmt.Sprintf("BlankStore %s", relName(s.Val, s)) } func (s *MapUpdate) String() string { - return fmt.Sprintf("%s[%s] = %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) + return fmt.Sprintf("MapUpdate %s %s %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s)) } func (s *DebugRef) String() string { @@ -426,10 +470,3 @@ func WritePackage(buf *bytes.Buffer, p *Package) { fmt.Fprintf(buf, "\n") } - -func commaOk(x bool) string { - if x { - return ",ok" - } - return "" -} diff --git a/vendor/honnef.co/go/tools/ssa/sanity.go b/vendor/honnef.co/go/tools/ir/sanity.go similarity index 89% rename from vendor/honnef.co/go/tools/ssa/sanity.go rename to vendor/honnef.co/go/tools/ir/sanity.go index 1d29b66b0..ff9edbc64 100644 --- a/vendor/honnef.co/go/tools/ssa/sanity.go +++ b/vendor/honnef.co/go/tools/ir/sanity.go @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir -// An optional pass for sanity-checking invariants of the SSA representation. +// An optional pass for sanity-checking invariants of the IR representation. // Currently it checks CFG invariants but little at the instruction level. import ( @@ -23,7 +23,7 @@ type sanity struct { insane bool } -// sanityCheck performs integrity checking of the SSA representation +// sanityCheck performs integrity checking of the IR representation // of the function fn and returns true if it was valid. Diagnostics // are written to reporter if non-nil, os.Stderr otherwise. Some // diagnostics are only warnings and do not imply a negative result. @@ -89,8 +89,15 @@ func findDuplicate(blocks []*BasicBlock) *BasicBlock { func (s *sanity) checkInstr(idx int, instr Instruction) { switch instr := instr.(type) { - case *If, *Jump, *Return, *Panic: + case *If, *Jump, *Return, *Panic, *Unreachable, *ConstantSwitch: s.errorf("control flow instruction not at end of block") + case *Sigma: + if idx > 0 { + prev := s.block.Instrs[idx-1] + if _, ok := prev.(*Sigma); !ok { + s.errorf("Sigma instruction follows a non-Sigma: %T", prev) + } + } case *Phi: if idx == 0 { // It suffices to apply this check to just the first phi node. @@ -99,8 +106,10 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { } } else { prev := s.block.Instrs[idx-1] - if _, ok := prev.(*Phi); !ok { - s.errorf("Phi instruction follows a non-Phi: %T", prev) + switch prev.(type) { + case *Phi, *Sigma: + default: + s.errorf("Phi instruction follows a non-Phi, non-Sigma: %T", prev) } } if ne, np := len(instr.Edges), len(s.block.Preds); ne != np { @@ -109,7 +118,7 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { } else { for i, e := range instr.Edges { if e == nil { - s.errorf("phi node '%s' has no value for edge #%d from %s", instr.Comment, i, s.block.Preds[i]) + s.errorf("phi node '%v' has no value for edge #%d from %s", instr, i, s.block.Preds[i]) } } } @@ -146,7 +155,8 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *Go: case *Index: case *IndexAddr: - case *Lookup: + case *MapLookup: + case *StringLookup: case *MakeChan: case *MakeClosure: numFree := len(instr.Fn.(*Function).FreeVars) @@ -175,8 +185,11 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { case *UnOp: case *DebugRef: case *BlankStore: - case *Sigma: - // TODO(adonovan): implement checks. + case *Load: + case *Parameter: + case *Const: + case *Recv: + case *TypeSwitch: default: panic(fmt.Sprintf("Unknown instruction type: %T", instr)) } @@ -196,7 +209,9 @@ func (s *sanity) checkInstr(idx int, instr Instruction) { } else if t == tRangeIter { // not a proper type; ignore. } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 { - s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) + if _, ok := v.(*Const); !ok { + s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t) + } } s.checkReferrerList(v) } @@ -239,11 +254,19 @@ func (s *sanity) checkFinalInstr(instr Instruction) { } case *Panic: - if nsuccs := len(s.block.Succs); nsuccs != 0 { - s.errorf("Panic-terminated block has %d successors; expected none", nsuccs) + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Panic-terminated block has %d successors; expected one", nsuccs) return } + case *Unreachable: + if nsuccs := len(s.block.Succs); nsuccs != 1 { + s.errorf("Unreachable-terminated block has %d successors; expected one", nsuccs) + return + } + + case *ConstantSwitch: + default: s.errorf("non-control flow instruction at end of block") } @@ -260,9 +283,8 @@ func (s *sanity) checkBlock(b *BasicBlock, index int) { } // Check all blocks are reachable. - // (The entry block is always implicitly reachable, - // as is the Recover block, if any.) - if (index > 0 && b != b.parent.Recover) && len(b.Preds) == 0 { + // (The entry block is always implicitly reachable, the exit block may be unreachable.) + if index > 1 && len(b.Preds) == 0 { s.warnf("unreachable block") if b.Instrs == nil { // Since this block is about to be pruned, @@ -395,7 +417,11 @@ func (s *sanity) checkReferrerList(v Value) { } for i, ref := range *refs { if _, ok := s.instrs[ref]; !ok { - s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) + if val, ok := ref.(Value); ok { + s.errorf("%s.Referrers()[%d] = %s = %s is not an instruction belonging to this function", v.Name(), i, val.Name(), val) + } else { + s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref) + } } } } @@ -426,7 +452,7 @@ func (s *sanity) checkFunction(fn *Function) bool { s.errorf("nil Pkg") } } - if src, syn := fn.Synthetic == "", fn.Syntax() != nil; src != syn { + if src, syn := fn.Synthetic == "", fn.source != nil; src != syn { s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn) } for i, l := range fn.Locals { @@ -481,9 +507,6 @@ func (s *sanity) checkFunction(fn *Function) bool { } s.checkBlock(b, i) } - if fn.Recover != nil && fn.Blocks[fn.Recover.Index] != fn.Recover { - s.errorf("Recover block is not in Blocks slice") - } s.block = nil for i, anon := range fn.AnonFuncs { @@ -522,14 +545,11 @@ func sanityCheckPackage(pkg *Package) { if obj.Name() != name { if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") { // Ok. The name of a declared init function varies between - // its types.Func ("init") and its ssa.Function ("init#%d"). + // its types.Func ("init") and its ir.Function ("init#%d"). } else { panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s", pkg.Pkg.Path(), mem, obj.Name(), name)) } } - if obj.Pos() != mem.Pos() { - panic(fmt.Sprintf("%s Pos=%d obj.Pos=%d", mem, mem.Pos(), obj.Pos())) - } } } diff --git a/vendor/honnef.co/go/tools/ssa/source.go b/vendor/honnef.co/go/tools/ir/source.go similarity index 80% rename from vendor/honnef.co/go/tools/ssa/source.go rename to vendor/honnef.co/go/tools/ir/source.go index 8d9cca170..93d1ccbd2 100644 --- a/vendor/honnef.co/go/tools/ssa/source.go +++ b/vendor/honnef.co/go/tools/ir/source.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines utilities for working with source positions // or source-level named entities ("objects"). @@ -25,7 +25,7 @@ import ( // Returns nil if not found; reasons might include: // - the node is not enclosed by any function. // - the node is within an anonymous function (FuncLit) and -// its SSA function has not been created yet +// its IR function has not been created yet // (pkg.Build() has not yet been called). // func EnclosingFunction(pkg *Package, path []ast.Node) *Function { @@ -46,7 +46,7 @@ outer: continue outer } } - // SSA function not found: + // IR function not found: // - package not yet built, or maybe // - builder skipped FuncLit in dead block // (in principle; but currently the Builder @@ -62,9 +62,9 @@ outer: // package-level variable. // // Unlike EnclosingFunction, the behaviour of this function does not -// depend on whether SSA code for pkg has been built, so it can be +// depend on whether IR code for pkg has been built, so it can be // used to quickly reject check inputs that will cause -// EnclosingFunction to fail, prior to SSA building. +// EnclosingFunction to fail, prior to IR building. // func HasEnclosingFunction(pkg *Package, path []ast.Node) bool { return findEnclosingPackageLevelFunction(pkg, path) != nil @@ -83,23 +83,14 @@ func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function } case *ast.FuncDecl: - if decl.Recv == nil && decl.Name.Name == "init" { - // Explicit init() function. - for _, b := range pkg.init.Blocks { - for _, instr := range b.Instrs { - if instr, ok := instr.(*Call); ok { - if callee, ok := instr.Call.Value.(*Function); ok && callee.Pkg == pkg && callee.Pos() == decl.Name.NamePos { - return callee - } - } - } - } - // Hack: return non-nil when SSA is not yet + // Declared function/method. + fn := findNamedFunc(pkg, decl.Pos()) + if fn == nil && decl.Recv == nil && decl.Name.Name == "init" { + // Hack: return non-nil when IR is not yet // built so that HasEnclosingFunction works. return pkg.init } - // Declared function/method. - return findNamedFunc(pkg, decl.Name.NamePos) + return fn } } return nil // not in any function @@ -109,29 +100,15 @@ func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function // position pos. // func findNamedFunc(pkg *Package, pos token.Pos) *Function { - // Look at all package members and method sets of named types. - // Not very efficient. - for _, mem := range pkg.Members { - switch mem := mem.(type) { - case *Function: - if mem.Pos() == pos { - return mem - } - case *Type: - mset := pkg.Prog.MethodSets.MethodSet(types.NewPointer(mem.Type())) - for i, n := 0, mset.Len(); i < n; i++ { - // Don't call Program.Method: avoid creating wrappers. - obj := mset.At(i).Obj().(*types.Func) - if obj.Pos() == pos { - return pkg.values[obj].(*Function) - } - } + for _, fn := range pkg.Functions { + if fn.Pos() == pos { + return fn } } return nil } -// ValueForExpr returns the SSA Value that corresponds to non-constant +// ValueForExpr returns the IR Value that corresponds to non-constant // expression e. // // It returns nil if no value was found, e.g. @@ -149,10 +126,10 @@ func findNamedFunc(pkg *Package, pos token.Pos) *Function { // The types of e (or &e, if isAddr) and the result are equal // (modulo "untyped" bools resulting from comparisons). // -// (Tip: to find the ssa.Value given a source position, use +// (Tip: to find the ir.Value given a source position, use // astutil.PathEnclosingInterval to locate the ast.Node, then // EnclosingFunction to locate the Function, then ValueForExpr to find -// the ssa.Value.) +// the ir.Value.) // func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { if f.debugInfo() { // (opt) @@ -172,9 +149,9 @@ func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) { // --- Lookup functions for source-level named entities (types.Objects) --- -// Package returns the SSA Package corresponding to the specified +// Package returns the IR Package corresponding to the specified // type-checker package object. -// It returns nil if no such SSA package has been created. +// It returns nil if no such IR package has been created. // func (prog *Program) Package(obj *types.Package) *Package { return prog.packages[obj] @@ -203,7 +180,7 @@ func (prog *Program) FuncValue(obj *types.Func) *Function { return fn } -// ConstValue returns the SSA Value denoted by the source-level named +// ConstValue returns the IR Value denoted by the source-level named // constant obj. // func (prog *Program) ConstValue(obj *types.Const) *Const { @@ -221,12 +198,12 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { return NewConst(obj.Val(), obj.Type()) } -// VarValue returns the SSA Value that corresponds to a specific +// VarValue returns the IR Value that corresponds to a specific // identifier denoting the source-level named variable obj. // // VarValue returns nil if a local variable was not found, perhaps // because its package was not built, the debug information was not -// requested during SSA construction, or the value was optimized away. +// requested during IR construction, or the value was optimized away. // // ref is the path to an ast.Ident (e.g. from PathEnclosingInterval), // and that ident must resolve to obj. @@ -252,14 +229,14 @@ func (prog *Program) ConstValue(obj *types.Const) *Const { // // It is not specified whether the value or the address is returned in // any particular case, as it may depend upon optimizations performed -// during SSA code generation, such as registerization, constant +// during IR code generation, such as registerization, constant // folding, avoidance of materialization of subexpressions, etc. // func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) { // All references to a var are local to some function, possibly init. fn := EnclosingFunction(pkg, ref) if fn == nil { - return // e.g. def of struct field; SSA not built? + return // e.g. def of struct field; IR not built? } id := ref[0].(*ast.Ident) diff --git a/vendor/honnef.co/go/tools/ssa/ssa.go b/vendor/honnef.co/go/tools/ir/ssa.go similarity index 78% rename from vendor/honnef.co/go/tools/ssa/ssa.go rename to vendor/honnef.co/go/tools/ir/ssa.go index aeddd65e5..49693045f 100644 --- a/vendor/honnef.co/go/tools/ssa/ssa.go +++ b/vendor/honnef.co/go/tools/ir/ssa.go @@ -2,10 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This package defines a high-level intermediate representation for -// Go programs using static single-assignment (SSA) form. +// Go programs using static single-information (SSI) form. import ( "fmt" @@ -18,12 +18,15 @@ import ( "golang.org/x/tools/go/types/typeutil" ) -// A Program is a partial or complete Go program converted to SSA form. +type ID int + +// A Program is a partial or complete Go program converted to IR form. type Program struct { Fset *token.FileSet // position information for the files of this Program + PrintFunc string // create ir.html for function specified in PrintFunc imported map[string]*Package // all importable Packages, keyed by import path packages map[*types.Package]*Package // all loaded Packages, keyed by object - mode BuilderMode // set of mode bits for SSA construction + mode BuilderMode // set of mode bits for IR construction MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets methodsMu sync.Mutex // guards the following maps: @@ -44,12 +47,14 @@ type Program struct { // and unspecified other things too. // type Package struct { - Prog *Program // the owning program - Pkg *types.Package // the corresponding go/types.Package - Members map[string]Member // all package members keyed by name (incl. init and init#%d) - values map[types.Object]Value // package members (incl. types and methods), keyed by object - init *Function // Func("init"); the package's init function - debug bool // include full debug info in this package + Prog *Program // the owning program + Pkg *types.Package // the corresponding go/types.Package + Members map[string]Member // all package members keyed by name (incl. init and init#%d) + Functions []*Function // all functions, excluding anonymous ones + values map[types.Object]Value // package members (incl. types and methods), keyed by object + init *Function // Func("init"); the package's init function + debug bool // include full debug info in this package + printFunc string // which function to print in HTML form // The following fields are set transiently, then cleared // after building. @@ -68,7 +73,6 @@ type Member interface { String() string // package-qualified name of the package member RelString(*types.Package) string // like String, but relative refs are unqualified Object() types.Object // typechecker's object for this member, if any - Pos() token.Pos // position of member's declaration, if known Type() types.Type // type of the package member Token() token.Token // token.{VAR,FUNC,CONST,TYPE} Package() *Package // the containing package @@ -95,8 +99,10 @@ type NamedConst struct { pkg *Package } -// A Value is an SSA value that can be referenced by an instruction. +// A Value is an IR value that can be referenced by an instruction. type Value interface { + setID(ID) + // Name returns the name of this value, and determines how // this Value appears when used as an operand of an // Instruction. @@ -107,10 +113,21 @@ type Value interface { // and type. For all other Values this is the name of the // virtual register defined by the instruction. // - // The name of an SSA Value is not semantically significant, + // The name of an IR Value is not semantically significant, // and may not even be unique within a function. Name() string + // ID returns the ID of this value. IDs are unique within a single + // function and are densely numbered, but may contain gaps. + // Values and other Instructions share the same ID space. + // Globally, values are identified by their addresses. However, + // IDs exist to facilitate efficient storage of mappings between + // values and data when analysing functions. + // + // NB: IDs are allocated late in the IR construction process and + // are not available to early stages of said process. + ID() ID + // If this value is an Instruction, String returns its // disassembled form; otherwise it returns unspecified // human-readable information about the Value, such as its @@ -123,7 +140,7 @@ type Value interface { Type() types.Type // Parent returns the function to which this Value belongs. - // It returns nil for named Functions, Builtin, Const and Global. + // It returns nil for named Functions, Builtin and Global. Parent() *Function // Referrers returns the list of instructions that have this @@ -136,29 +153,27 @@ type Value interface { // Referrers is currently only defined if Parent()!=nil, // i.e. for the function-local values FreeVar, Parameter, // Functions (iff anonymous) and all value-defining instructions. - // It returns nil for named Functions, Builtin, Const and Global. + // It returns nil for named Functions, Builtin and Global. // // Instruction.Operands contains the inverse of this relation. Referrers() *[]Instruction - // Pos returns the location of the AST token most closely - // associated with the operation that gave rise to this value, - // or token.NoPos if it was not explicit in the source. - // - // For each ast.Node type, a particular token is designated as - // the closest location for the expression, e.g. the Lparen - // for an *ast.CallExpr. This permits a compact but - // approximate mapping from Values to source positions for use - // in diagnostic messages, for example. + Operands(rands []*Value) []*Value // nil for non-Instructions + + // Source returns the AST node responsible for creating this + // value. A single AST node may be responsible for more than one + // value, and not all values have an associated AST node. // - // (Do not use this position to determine which Value - // corresponds to an ast.Expr; use Function.ValueForExpr - // instead. NB: it requires that the function was built with - // debug information.) + // Do not use this method to find a Value given an ast.Expr; use + // ValueForExpr instead. + Source() ast.Node + + // Pos returns Source().Pos() if Source is not nil, else it + // returns token.NoPos. Pos() token.Pos } -// An Instruction is an SSA instruction that computes a new Value or +// An Instruction is an IR instruction that computes a new Value or // has some effect. // // An Instruction that defines a value (e.g. BinOp) also implements @@ -166,23 +181,36 @@ type Value interface { // does not. // type Instruction interface { + setSource(ast.Node) + setID(ID) + // String returns the disassembled form of this value. // // Examples of Instructions that are Values: - // "x + y" (BinOp) - // "len([])" (Call) + // "BinOp {+} t1 t2" (BinOp) + // "Call len t1" (Call) // Note that the name of the Value is not printed. // // Examples of Instructions that are not Values: - // "return x" (Return) - // "*y = x" (Store) + // "Return t1" (Return) + // "Store {int} t2 t1" (Store) // - // (The separation Value.Name() from Value.String() is useful + // (The separation of Value.Name() from Value.String() is useful // for some analyses which distinguish the operation from the // value it defines, e.g., 'y = local int' is both an allocation // of memory 'local int' and a definition of a pointer y.) String() string + // ID returns the ID of this instruction. IDs are unique within a single + // function and are densely numbered, but may contain gaps. + // Globally, instructions are identified by their addresses. However, + // IDs exist to facilitate efficient storage of mappings between + // instructions and data when analysing functions. + // + // NB: IDs are allocated late in the IR construction process and + // are not available to early stages of said process. + ID() ID + // Parent returns the function to which this instruction // belongs. Parent() *Function @@ -212,39 +240,37 @@ type Instruction interface { // Values.) Operands(rands []*Value) []*Value - // Pos returns the location of the AST token most closely - // associated with the operation that gave rise to this - // instruction, or token.NoPos if it was not explicit in the - // source. - // - // For each ast.Node type, a particular token is designated as - // the closest location for the expression, e.g. the Go token - // for an *ast.GoStmt. This permits a compact but approximate - // mapping from Instructions to source positions for use in - // diagnostic messages, for example. - // - // (Do not use this position to determine which Instruction - // corresponds to an ast.Expr; see the notes for Value.Pos. - // This position may be used to determine which non-Value - // Instruction corresponds to some ast.Stmts, but not all: If - // and Jump instructions have no Pos(), for example.) + Referrers() *[]Instruction // nil for non-Values + + // Source returns the AST node responsible for creating this + // instruction. A single AST node may be responsible for more than + // one instruction, and not all instructions have an associated + // AST node. + Source() ast.Node + + // Pos returns Source().Pos() if Source is not nil, else it + // returns token.NoPos. Pos() token.Pos } -// A Node is a node in the SSA value graph. Every concrete type that +// A Node is a node in the IR value graph. Every concrete type that // implements Node is also either a Value, an Instruction, or both. // // Node contains the methods common to Value and Instruction, plus the // Operands and Referrers methods generalized to return nil for // non-Instructions and non-Values, respectively. // -// Node is provided to simplify SSA graph algorithms. Clients should +// Node is provided to simplify IR graph algorithms. Clients should // use the more specific and informative Value or Instruction // interfaces where appropriate. // type Node interface { + setID(ID) + // Common methods: + ID() ID String() string + Source() ast.Node Pos() token.Pos Parent() *Function @@ -267,11 +293,6 @@ type Node interface { // the disassembly. // To iterate over the blocks in dominance order, use DomPreorder(). // -// Recover is an optional second entry point to which control resumes -// after a recovered panic. The Recover block may contain only a return -// statement, preceded by a load of the function's named return -// parameters, if any. -// // A nested function (Parent()!=nil) that refers to one or more // lexically enclosing local variables ("free variables") has FreeVars. // Such functions cannot be called directly but require a @@ -294,44 +315,63 @@ type Node interface { // Type() returns the function's Signature. // type Function struct { + node + name string object types.Object // a declared *types.Func or one of its wrappers method *types.Selection // info about provenance of synthetic methods Signature *types.Signature - pos token.Pos - - Synthetic string // provenance of synthetic function; "" for true source functions - syntax ast.Node // *ast.Func{Decl,Lit}; replaced with simple ast.Node after build, unless debug mode - parent *Function // enclosing function if anon; nil if global - Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) - Prog *Program // enclosing program - Params []*Parameter // function parameters; for methods, includes receiver - FreeVars []*FreeVar // free variables whose values must be supplied by closure - Locals []*Alloc // local variables of this function - Blocks []*BasicBlock // basic blocks of the function; nil => external - Recover *BasicBlock // optional; control transfers here after recovered panic - AnonFuncs []*Function // anonymous functions directly beneath this one - referrers []Instruction // referring instructions (iff Parent() != nil) + Synthetic string // provenance of synthetic function; "" for true source functions + parent *Function // enclosing function if anon; nil if global + Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error) + Prog *Program // enclosing program + Params []*Parameter // function parameters; for methods, includes receiver + FreeVars []*FreeVar // free variables whose values must be supplied by closure + Locals []*Alloc // local variables of this function + Blocks []*BasicBlock // basic blocks of the function; nil => external + Exit *BasicBlock // The function's exit block + AnonFuncs []*Function // anonymous functions directly beneath this one + referrers []Instruction // referring instructions (iff Parent() != nil) + WillExit bool // Calling this function will always terminate the process + WillUnwind bool // Calling this function will always unwind (it will call runtime.Goexit or panic) + + *functionBody +} + +type functionBody struct { // The following fields are set transiently during building, // then cleared. - currentBlock *BasicBlock // where to emit code - objects map[types.Object]Value // addresses of local variables - namedResults []*Alloc // tuple of named results - targets *targets // linked stack of branch targets - lblocks map[*ast.Object]*lblock // labelled blocks + currentBlock *BasicBlock // where to emit code + objects map[types.Object]Value // addresses of local variables + namedResults []*Alloc // tuple of named results + implicitResults []*Alloc // tuple of results + targets *targets // linked stack of branch targets + lblocks map[*ast.Object]*lblock // labelled blocks + consts []*Const + wr *HTMLWriter + fakeExits BlockSet + blocksets [5]BlockSet + hasDefer bool +} + +func (fn *Function) results() []*Alloc { + if len(fn.namedResults) > 0 { + return fn.namedResults + } + return fn.implicitResults } -// BasicBlock represents an SSA basic block. +// BasicBlock represents an IR basic block. // // The final element of Instrs is always an explicit transfer of -// control (If, Jump, Return, or Panic). +// control (If, Jump, Return, Panic, or Unreachable). // // A block may contain no Instructions only if it is unreachable, // i.e., Preds is nil. Empty blocks are typically pruned. // // BasicBlocks and their Preds/Succs relation form a (possibly cyclic) -// graph independent of the SSA Value graph: the control-flow graph or +// graph independent of the IR Value graph: the control-flow graph or // CFG. It is illegal for multiple edges to exist between the same // pair of blocks. // @@ -350,8 +390,10 @@ type BasicBlock struct { Preds, Succs []*BasicBlock // predecessors and successors succs2 [2]*BasicBlock // initial space for Succs dom domInfo // dominator tree info - gaps int // number of nil Instrs (transient) - rundefers int // number of rundefers (transient) + pdom domInfo // post-dominator tree info + post int + gaps int // number of nil Instrs (transient) + rundefers int // number of rundefers (transient) } // Pure values ---------------------------------------- @@ -373,9 +415,10 @@ type BasicBlock struct { // belongs to an enclosing function. // type FreeVar struct { + node + name string typ types.Type - pos token.Pos parent *Function referrers []Instruction @@ -386,12 +429,10 @@ type FreeVar struct { // A Parameter represents an input parameter of a function. // type Parameter struct { - name string - object types.Object // a *types.Var; nil for non-source locals - typ types.Type - pos token.Pos - parent *Function - referrers []Instruction + register + + name string + object types.Object // a *types.Var; nil for non-source locals } // A Const represents the value of a constant expression. @@ -411,12 +452,13 @@ type Parameter struct { // Pos() returns token.NoPos. // // Example printed form: -// 42:int -// "hello":untyped string -// 3+4i:MyComplex +// Const {42} +// Const {"test"} +// Const {(3 + 4i)} // type Const struct { - typ types.Type + register + Value constant.Value } @@ -427,10 +469,11 @@ type Const struct { // identifier. // type Global struct { + node + name string object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard typ types.Type - pos token.Pos Pkg *Package } @@ -441,12 +484,12 @@ type Global struct { // Builtins can only appear in CallCommon.Func. // // Name() indicates the function: one of the built-in functions from the -// Go spec (excluding "make" and "new") or one of these ssa-defined +// Go spec (excluding "make" and "new") or one of these ir-defined // intrinsics: // // // wrapnilchk returns ptr if non-nil, panics otherwise. // // (For use in indirection wrappers.) -// func ssa:wrapnilchk(ptr *T, recvType, methodName string) *T +// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T // // Object() returns a *types.Builtin for built-ins defined by the spec, // nil for others. @@ -455,6 +498,8 @@ type Global struct { // signature of the built-in for this call. // type Builtin struct { + node + name string sig *types.Signature } @@ -470,12 +515,12 @@ type Builtin struct { // // If Heap is false, Alloc allocates space in the function's // activation record (frame); we refer to an Alloc(Heap=false) as a -// "local" alloc. Each local Alloc returns the same address each time +// "stack" alloc. Each stack Alloc returns the same address each time // it is executed within the same activation; the space is // re-initialized to zero. // // If Heap is true, Alloc allocates space in the heap; we -// refer to an Alloc(Heap=true) as a "new" alloc. Each new Alloc +// refer to an Alloc(Heap=true) as a "heap" alloc. Each heap Alloc // returns a different address each time it is executed. // // When Alloc is applied to a channel, map or slice type, it returns @@ -488,44 +533,43 @@ type Builtin struct { // allocates a varargs slice. // // Example printed form: -// t0 = local int -// t1 = new int +// t1 = StackAlloc <*int> +// t2 = HeapAlloc <*int> (new) // type Alloc struct { register - Comment string - Heap bool - index int // dense numbering; for lifting + Heap bool + index int // dense numbering; for lifting } var _ Instruction = (*Sigma)(nil) var _ Value = (*Sigma)(nil) +// The Sigma instruction represents an SSI σ-node, which splits values +// at branches in the control flow. +// +// Conceptually, σ-nodes exist at the end of blocks that branch and +// constitute parallel assignments to one value per destination block. +// However, such a representation would be awkward to work with, so +// instead we place σ-nodes at the beginning of branch targets. The +// From field denotes to which incoming edge the node applies. +// +// Within a block, all σ-nodes must appear before all non-σ nodes. +// +// Example printed form: +// t2 = Sigma [#0] t1 (x) +// type Sigma struct { register - X Value - Branch bool -} - -func (p *Sigma) Value() Value { - v := p.X - for { - sigma, ok := v.(*Sigma) - if !ok { - break - } - v = sigma - } - return v -} + From *BasicBlock + X Value -func (p *Sigma) String() string { - return fmt.Sprintf("σ [%s.%t]", relName(p.X, p), p.Branch) + live bool // used during lifting } // The Phi instruction represents an SSA φ-node, which combines values // that differ across incoming control-flow edges and yields a new -// value. Within a block, all φ-nodes must appear before all non-φ +// value. Within a block, all φ-nodes must appear before all non-φ, non-σ // nodes. // // Pos() returns the position of the && or || for short-circuit @@ -533,12 +577,13 @@ func (p *Sigma) String() string { // during SSA renaming. // // Example printed form: -// t2 = phi [0: t0, 1: t1] +// t3 = Phi 2:t1 4:t2 (x) // type Phi struct { register - Comment string // a hint as to its purpose - Edges []Value // Edges[i] is value for Block().Preds[i] + Edges []Value // Edges[i] is value for Block().Preds[i] + + live bool // used during lifting } // The Call instruction represents a function or method call. @@ -552,9 +597,9 @@ type Phi struct { // Pos() returns the ast.CallExpr.Lparen, if explicit in the source. // // Example printed form: -// t2 = println(t0, t1) -// t4 = t3() -// t7 = invoke t5.Println(...t6) +// t3 = Call <()> println t1 t2 +// t4 = Call <()> foo$1 +// t6 = Invoke t5.String // type Call struct { register @@ -566,7 +611,7 @@ type Call struct { // Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source. // // Example printed form: -// t1 = t0 + 1:int +// t3 = BinOp {+} t2 t1 // type BinOp struct { register @@ -579,32 +624,32 @@ type BinOp struct { } // The UnOp instruction yields the result of Op X. -// ARROW is channel receive. -// MUL is pointer indirection (load). // XOR is bitwise complement. // SUB is negation. // NOT is logical negation. // -// If CommaOk and Op=ARROW, the result is a 2-tuple of the value above -// and a boolean indicating the success of the receive. The -// components of the tuple are accessed using Extract. // -// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. -// For receive operations (ARROW) implicit in ranging over a channel, -// Pos() returns the ast.RangeStmt.For. -// For implicit memory loads (STAR), Pos() returns the position of the +// Example printed form: +// t2 = UnOp {^} t1 +// +type UnOp struct { + register + Op token.Token // One of: NOT SUB XOR ! - ^ + X Value +} + +// The Load instruction loads a value from a memory address. +// +// For implicit memory loads, Pos() returns the position of the // most closely associated source-level construct; the details are not // specified. // // Example printed form: -// t0 = *x -// t2 = <-t1,ok +// t2 = Load t1 // -type UnOp struct { +type Load struct { register - Op token.Token // One of: NOT SUB ARROW MUL XOR ! - <- * ^ - X Value - CommaOk bool + X Value } // The ChangeType instruction applies to X a value-preserving type @@ -623,7 +668,7 @@ type UnOp struct { // from an explicit conversion in the source. // // Example printed form: -// t1 = changetype *int <- IntPtr (t0) +// t2 = ChangeType <*T> t1 // type ChangeType struct { register @@ -646,13 +691,13 @@ type ChangeType struct { // This operation cannot fail dynamically. // // Conversions of untyped string/number/bool constants to a specific -// representation are eliminated during SSA construction. +// representation are eliminated during IR construction. // // Pos() returns the ast.CallExpr.Lparen, if the instruction arose // from an explicit conversion in the source. // // Example printed form: -// t1 = convert []byte <- string (t0) +// t2 = Convert <[]byte> t1 // type Convert struct { register @@ -669,7 +714,7 @@ type Convert struct { // otherwise. // // Example printed form: -// t1 = change interface interface{} <- I (t0) +// t2 = ChangeInterface t1 // type ChangeInterface struct { register @@ -689,8 +734,7 @@ type ChangeInterface struct { // from an explicit conversion in the source. // // Example printed form: -// t1 = make interface{} <- int (42:int) -// t2 = make Stringer <- t0 +// t2 = MakeInterface t1 // type MakeInterface struct { register @@ -706,8 +750,8 @@ type MakeInterface struct { // closure or the ast.SelectorExpr.Sel for a bound method closure. // // Example printed form: -// t0 = make closure anon@1.2 [x y z] -// t1 = make closure bound$(main.I).add [i] +// t1 = MakeClosure foo$1 t1 t2 +// t5 = MakeClosure (T).foo$bound t4 // type MakeClosure struct { register @@ -724,8 +768,8 @@ type MakeClosure struct { // the ast.CompositeLit.Lbrack if created by a literal. // // Example printed form: -// t1 = make map[string]int t0 -// t1 = make StringIntMap t0 +// t1 = MakeMap +// t2 = MakeMap t1 // type MakeMap struct { register @@ -741,8 +785,8 @@ type MakeMap struct { // created it. // // Example printed form: -// t0 = make chan int 0 -// t0 = make IntChan 0 +// t3 = MakeChan t1 +// t4 = MakeChan t2 // type MakeChan struct { register @@ -763,8 +807,8 @@ type MakeChan struct { // created it. // // Example printed form: -// t1 = make []string 1:int t0 -// t1 = make StringSlice 1:int t0 +// t3 = MakeSlice <[]string> t1 t2 +// t4 = MakeSlice t1 t2 // type MakeSlice struct { register @@ -786,7 +830,7 @@ type MakeSlice struct { // NoPos if not explicit in the source (e.g. a variadic argument slice). // // Example printed form: -// t1 = slice t0[1:] +// t4 = Slice <[]int> t3 t2 t1 // type Slice struct { register @@ -808,7 +852,7 @@ type Slice struct { // field, if explicit in the source. // // Example printed form: -// t1 = &t0.name [#1] +// t2 = FieldAddr <*int> [0] (X) t1 // type FieldAddr struct { register @@ -826,7 +870,7 @@ type FieldAddr struct { // field, if explicit in the source. // // Example printed form: -// t1 = t0.name [#1] +// t2 = FieldAddr [0] (X) t1 // type Field struct { register @@ -837,7 +881,7 @@ type Field struct { // The IndexAddr instruction yields the address of the element at // index Index of collection X. Index is an integer expression. // -// The elements of maps and strings are not addressable; use Lookup or +// The elements of maps and strings are not addressable; use StringLookup, MapLookup or // MapUpdate instead. // // Dynamically, this instruction panics if X evaluates to a nil *array @@ -849,7 +893,7 @@ type Field struct { // explicit in the source. // // Example printed form: -// t2 = &t0[t1] +// t3 = IndexAddr <*int> t2 t1 // type IndexAddr struct { register @@ -863,7 +907,7 @@ type IndexAddr struct { // explicit in the source. // // Example printed form: -// t2 = t0[t1] +// t3 = Index t2 t1 // type Index struct { register @@ -871,9 +915,7 @@ type Index struct { Index Value // integer index } -// The Lookup instruction yields element Index of collection X, a map -// or string. Index is an integer expression if X is a string or the -// appropriate key type if X is a map. +// The MapLookup instruction yields element Index of collection X, a map. // // If CommaOk, the result is a 2-tuple of the value above and a // boolean indicating the result of a map membership test for the key. @@ -882,16 +924,30 @@ type Index struct { // Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. // // Example printed form: -// t2 = t0[t1] -// t5 = t3[t4],ok +// t4 = MapLookup t3 t1 +// t6 = MapLookup <(string, bool)> t3 t2 // -type Lookup struct { +type MapLookup struct { register - X Value // string or map - Index Value // numeric or key-typed index + X Value // map + Index Value // key-typed index CommaOk bool // return a value,ok pair } +// The StringLookup instruction yields element Index of collection X, a string. +// Index is an integer expression. +// +// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source. +// +// Example printed form: +// t3 = StringLookup t2 t1 +// +type StringLookup struct { + register + X Value // string + Index Value // numeric index +} + // SelectState is a helper for Select. // It represents one goal state and its corresponding communication. // @@ -906,10 +962,10 @@ type SelectState struct { // The Select instruction tests whether (or blocks until) one // of the specified sent or received states is entered. // -// Let n be the number of States for which Dir==RECV and T_i (0<=i [<-t4, t5<-t1] +// t11 = SelectBlocking <(index int, ok bool)> [] // type Select struct { register @@ -954,7 +1010,7 @@ type Select struct { // Pos() returns the ast.RangeStmt.For. // // Example printed form: -// t0 = range "hello":string +// t2 = Range t1 // type Range struct { register @@ -977,7 +1033,8 @@ type Range struct { // The types of k and/or v may be types.Invalid. // // Example printed form: -// t1 = next t0 +// t5 = Next <(ok bool, k int, v rune)> t2 +// t5 = Next <(ok bool, k invalid type, v invalid type)> t2 // type Next struct { register @@ -1017,8 +1074,8 @@ type Next struct { // type-switch statement. // // Example printed form: -// t1 = typeassert t0.(int) -// t3 = typeassert,ok t2.(T) +// t2 = TypeAssert t1 +// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1 // type TypeAssert struct { register @@ -1030,11 +1087,11 @@ type TypeAssert struct { // The Extract instruction yields component Index of Tuple. // // This is used to access the results of instructions with multiple -// return values, such as Call, TypeAssert, Next, UnOp(ARROW) and -// IndexExpr(Map). +// return values, such as Call, TypeAssert, Next, Recv, +// MapLookup and others. // // Example printed form: -// t1 = extract t0 #1 +// t7 = Extract [1] (ok) t4 // type Extract struct { register @@ -1052,10 +1109,28 @@ type Extract struct { // Pos() returns NoPos. // // Example printed form: -// jump done +// Jump → b1 // type Jump struct { anInstruction + Comment string +} + +// The Unreachable pseudo-instruction signals that execution cannot +// continue after the preceding function call because it terminates +// the process. +// +// The instruction acts as a control instruction, jumping to the exit +// block. However, this jump will never execute. +// +// An Unreachable instruction must be the last instruction of its +// containing BasicBlock. +// +// Example printed form: +// Unreachable → b1 +// +type Unreachable struct { + anInstruction } // The If instruction transfers control to one of the two successors @@ -1065,16 +1140,30 @@ type Jump struct { // An If instruction must be the last instruction of its containing // BasicBlock. // -// Pos() returns NoPos. +// Pos() returns the *ast.IfStmt, if explicit in the source. // // Example printed form: -// if t0 goto done else body +// If t2 → b1 b2 // type If struct { anInstruction Cond Value } +type ConstantSwitch struct { + anInstruction + Tag Value + // Constant branch conditions. A nil Value denotes the (implicit + // or explicit) default branch. + Conds []Value +} + +type TypeSwitch struct { + register + Tag Value + Conds []types.Type +} + // The Return instruction returns values and control back to the calling // function. // @@ -1085,7 +1174,7 @@ type If struct { // components which the caller must access using Extract instructions. // // There is no instruction to return a ready-made tuple like those -// returned by a "value,ok"-mode TypeAssert, Lookup or UnOp(ARROW) or +// returned by a "value,ok"-mode TypeAssert, MapLookup or Recv or // a tail-call to a function with multiple result parameters. // // Return must be the last instruction of its containing BasicBlock. @@ -1094,13 +1183,12 @@ type If struct { // Pos() returns the ast.ReturnStmt.Return, if explicit in the source. // // Example printed form: -// return -// return nil:I, 2:int +// Return +// Return t1 t2 // type Return struct { anInstruction Results []Value - pos token.Pos } // The RunDefers instruction pops and invokes the entire stack of @@ -1113,7 +1201,7 @@ type Return struct { // Pos() returns NoPos. // // Example printed form: -// rundefers +// RunDefers // type RunDefers struct { anInstruction @@ -1122,7 +1210,7 @@ type RunDefers struct { // The Panic instruction initiates a panic with value X. // // A Panic instruction must be the last instruction of its containing -// BasicBlock, which must have no successors. +// BasicBlock, which must have one successor, the exit block. // // NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction; // they are treated as calls to a built-in function. @@ -1131,12 +1219,11 @@ type RunDefers struct { // in the source. // // Example printed form: -// panic t0 +// Panic t1 // type Panic struct { anInstruction - X Value // an interface{} - pos token.Pos + X Value // an interface{} } // The Go instruction creates a new goroutine and calls the specified @@ -1147,14 +1234,13 @@ type Panic struct { // Pos() returns the ast.GoStmt.Go. // // Example printed form: -// go println(t0, t1) -// go t3() -// go invoke t5.Println(...t6) +// Go println t1 +// Go t3 +// GoInvoke t4.Bar t2 // type Go struct { anInstruction Call CallCommon - pos token.Pos } // The Defer instruction pushes the specified call onto a stack of @@ -1165,14 +1251,13 @@ type Go struct { // Pos() returns the ast.DeferStmt.Defer. // // Example printed form: -// defer println(t0, t1) -// defer t3() -// defer invoke t5.Println(...t6) +// Defer println t1 +// Defer t3 +// DeferInvoke t4.Bar t2 // type Defer struct { anInstruction Call CallCommon - pos token.Pos } // The Send instruction sends X on channel Chan. @@ -1180,12 +1265,30 @@ type Defer struct { // Pos() returns the ast.SendStmt.Arrow, if explicit in the source. // // Example printed form: -// send t0 <- t1 +// Send t2 t1 // type Send struct { anInstruction Chan, X Value - pos token.Pos +} + +// The Recv instruction receives from channel Chan. +// +// If CommaOk, the result is a 2-tuple of the value above +// and a boolean indicating the success of the receive. The +// components of the tuple are accessed using Extract. +// +// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source. +// For receive operations implicit in ranging over a channel, +// Pos() returns the ast.RangeStmt.For. +// +// Example printed form: +// t2 = Recv t1 +// t3 = Recv <(int, bool)> t1 +type Recv struct { + register + Chan Value + CommaOk bool } // The Store instruction stores Val at address Addr. @@ -1197,13 +1300,12 @@ type Send struct { // implementation choices, the details are not specified. // // Example printed form: -// *x = y +// Store {int} t2 t1 // type Store struct { anInstruction Addr Value Val Value - pos token.Pos } // The BlankStore instruction is emitted for assignments to the blank @@ -1214,7 +1316,7 @@ type Store struct { // Pos() returns NoPos. // // Example printed form: -// _ = t0 +// BlankStore t1 // type BlankStore struct { anInstruction @@ -1228,18 +1330,17 @@ type BlankStore struct { // if explicit in the source. // // Example printed form: -// t0[t1] = t2 +// MapUpdate t3 t1 t2 // type MapUpdate struct { anInstruction Map Value Key Value Value Value - pos token.Pos } // A DebugRef instruction maps a source-level expression Expr to the -// SSA value X that represents the value (!IsAddr) or address (IsAddr) +// IR value X that represents the value (!IsAddr) or address (IsAddr) // of that expression. // // DebugRef is a pseudo-instruction: it has no dynamic effect. @@ -1249,11 +1350,6 @@ type MapUpdate struct { // documented at Value.Pos(). e.g. CallExpr.Pos() does not return the // position of the ("designated") Lparen token. // -// If Expr is an *ast.Ident denoting a var or func, Object() returns -// the object; though this information can be obtained from the type -// checker, including it here greatly facilitates debugging. -// For non-Ident expressions, Object() returns nil. -// // DebugRefs are generated only for functions built with debugging // enabled; see Package.SetDebugMode() and the GlobalDebug builder // mode flag. @@ -1281,30 +1377,42 @@ type DebugRef struct { // Embeddable mix-ins and helpers for common parts of other structs. ----------- -// register is a mix-in embedded by all SSA values that are also +// register is a mix-in embedded by all IR values that are also // instructions, i.e. virtual registers, and provides a uniform // implementation of most of the Value interface: Value.Name() is a // numbered register (e.g. "t0"); the other methods are field accessors. // // Temporary names are automatically assigned to each register on -// completion of building a function in SSA form. -// -// Clients must not assume that the 'id' value (and the Name() derived -// from it) is unique within a function. As always in this API, -// semantics are determined only by identity; names exist only to -// facilitate debugging. +// completion of building a function in IR form. // type register struct { anInstruction - num int // "name" of virtual register, e.g. "t0". Not guaranteed unique. typ types.Type // type of virtual register - pos token.Pos // position of source expression, or NoPos referrers []Instruction } +type node struct { + source ast.Node + id ID +} + +func (n *node) setID(id ID) { n.id = id } +func (n node) ID() ID { return n.id } + +func (n *node) setSource(source ast.Node) { n.source = source } +func (n *node) Source() ast.Node { return n.source } + +func (n *node) Pos() token.Pos { + if n.source != nil { + return n.source.Pos() + } + return token.NoPos +} + // anInstruction is a mix-in embedded by all Instructions. // It provides the implementations of the Block and setBlock methods. type anInstruction struct { + node block *BasicBlock // the basic block of this instruction } @@ -1336,9 +1444,9 @@ type anInstruction struct { // Args[0] contains the receiver parameter. // // Example printed form: -// t2 = println(t0, t1) -// go t3() -// defer t5(...t6) +// t3 = Call <()> println t1 t2 +// Go t3 +// Defer t3 // // 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon // represents a dynamically dispatched call to an interface method. @@ -1352,18 +1460,18 @@ type anInstruction struct { // receiver but the first true argument. // // Example printed form: -// t1 = invoke t0.String() -// go invoke t3.Run(t2) -// defer invoke t4.Handle(...t5) +// t6 = Invoke t5.String +// GoInvoke t4.Bar t2 +// DeferInvoke t4.Bar t2 // // For all calls to variadic functions (Signature().Variadic()), // the last element of Args is a slice. // type CallCommon struct { - Value Value // receiver (invoke mode) or func value (call mode) - Method *types.Func // abstract method (invoke mode) - Args []Value // actual parameters (in static method call, includes receiver) - pos token.Pos // position of CallExpr.Lparen, iff explicit in source + Value Value // receiver (invoke mode) or func value (call mode) + Method *types.Func // abstract method (invoke mode) + Args []Value // actual parameters (in static method call, includes receiver) + Results Value } // IsInvoke returns true if this call has "invoke" (not "call") mode. @@ -1371,8 +1479,6 @@ func (c *CallCommon) IsInvoke() bool { return c.Method != nil } -func (c *CallCommon) Pos() token.Pos { return c.pos } - // Signature returns the signature of the called function. // // For an "invoke"-mode call, the signature of the interface method is @@ -1427,7 +1533,7 @@ func (c *CallCommon) Description() string { type CallInstruction interface { Instruction Common() *CallCommon // returns the common parts of the call - Value() *Call // returns the result value of the call (*Call) or nil (*Go, *Defer) + Value() *Call } func (s *Call) Common() *CallCommon { return &s.Call } @@ -1448,13 +1554,11 @@ func (v *Builtin) Parent() *Function { return nil } func (v *FreeVar) Type() types.Type { return v.typ } func (v *FreeVar) Name() string { return v.name } func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers } -func (v *FreeVar) Pos() token.Pos { return v.pos } func (v *FreeVar) Parent() *Function { return v.parent } func (v *Global) Type() types.Type { return v.typ } func (v *Global) Name() string { return v.name } func (v *Global) Parent() *Function { return nil } -func (v *Global) Pos() token.Pos { return v.pos } func (v *Global) Referrers() *[]Instruction { return nil } func (v *Global) Token() token.Token { return token.VAR } func (v *Global) Object() types.Object { return v.object } @@ -1464,7 +1568,6 @@ func (v *Global) RelString(from *types.Package) string { return relString(v, fro func (v *Function) Name() string { return v.name } func (v *Function) Type() types.Type { return v.Signature } -func (v *Function) Pos() token.Pos { return v.pos } func (v *Function) Token() token.Token { return token.FUNC } func (v *Function) Object() types.Object { return v.object } func (v *Function) String() string { return v.RelString(nil) } @@ -1477,24 +1580,15 @@ func (v *Function) Referrers() *[]Instruction { return nil } -func (v *Parameter) Type() types.Type { return v.typ } -func (v *Parameter) Name() string { return v.name } -func (v *Parameter) Object() types.Object { return v.object } -func (v *Parameter) Referrers() *[]Instruction { return &v.referrers } -func (v *Parameter) Pos() token.Pos { return v.pos } -func (v *Parameter) Parent() *Function { return v.parent } +func (v *Parameter) Object() types.Object { return v.object } func (v *Alloc) Type() types.Type { return v.typ } func (v *Alloc) Referrers() *[]Instruction { return &v.referrers } -func (v *Alloc) Pos() token.Pos { return v.pos } func (v *register) Type() types.Type { return v.typ } func (v *register) setType(typ types.Type) { v.typ = typ } -func (v *register) Name() string { return fmt.Sprintf("t%d", v.num) } -func (v *register) setNum(num int) { v.num = num } +func (v *register) Name() string { return fmt.Sprintf("t%d", v.id) } func (v *register) Referrers() *[]Instruction { return &v.referrers } -func (v *register) Pos() token.Pos { return v.pos } -func (v *register) setPos(pos token.Pos) { v.pos = pos } func (v *anInstruction) Parent() *Function { return v.block.parent } func (v *anInstruction) Block() *BasicBlock { return v.block } @@ -1551,19 +1645,7 @@ func (p *Package) Type(name string) (t *Type) { return } -func (v *Call) Pos() token.Pos { return v.Call.pos } -func (s *Defer) Pos() token.Pos { return s.pos } -func (s *Go) Pos() token.Pos { return s.pos } -func (s *MapUpdate) Pos() token.Pos { return s.pos } -func (s *Panic) Pos() token.Pos { return s.pos } -func (s *Return) Pos() token.Pos { return s.pos } -func (s *Send) Pos() token.Pos { return s.pos } -func (s *Store) Pos() token.Pos { return s.pos } -func (s *BlankStore) Pos() token.Pos { return token.NoPos } -func (s *If) Pos() token.Pos { return token.NoPos } -func (s *Jump) Pos() token.Pos { return token.NoPos } -func (s *RunDefers) Pos() token.Pos { return token.NoPos } -func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } +func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() } // Operands. @@ -1627,6 +1709,19 @@ func (s *If) Operands(rands []*Value) []*Value { return append(rands, &s.Cond) } +func (s *ConstantSwitch) Operands(rands []*Value) []*Value { + rands = append(rands, &s.Tag) + for i := range s.Conds { + rands = append(rands, &s.Conds[i]) + } + return rands +} + +func (s *TypeSwitch) Operands(rands []*Value) []*Value { + rands = append(rands, &s.Tag) + return rands +} + func (v *Index) Operands(rands []*Value) []*Value { return append(rands, &v.X, &v.Index) } @@ -1639,7 +1734,15 @@ func (*Jump) Operands(rands []*Value) []*Value { return rands } -func (v *Lookup) Operands(rands []*Value) []*Value { +func (*Unreachable) Operands(rands []*Value) []*Value { + return rands +} + +func (v *MapLookup) Operands(rands []*Value) []*Value { + return append(rands, &v.X, &v.Index) +} + +func (v *StringLookup) Operands(rands []*Value) []*Value { return append(rands, &v.X, &v.Index) } @@ -1716,6 +1819,10 @@ func (s *Send) Operands(rands []*Value) []*Value { return append(rands, &s.Chan, &s.X) } +func (recv *Recv) Operands(rands []*Value) []*Value { + return append(rands, &recv.Chan) +} + func (v *Slice) Operands(rands []*Value) []*Value { return append(rands, &v.X, &v.Low, &v.High, &v.Max) } @@ -1736,6 +1843,10 @@ func (v *UnOp) Operands(rands []*Value) []*Value { return append(rands, &v.X) } +func (v *Load) Operands(rands []*Value) []*Value { + return append(rands, &v.X) +} + // Non-Instruction Values: func (v *Builtin) Operands(rands []*Value) []*Value { return rands } func (v *FreeVar) Operands(rands []*Value) []*Value { return rands } diff --git a/vendor/honnef.co/go/tools/ssa/staticcheck.conf b/vendor/honnef.co/go/tools/ir/staticcheck.conf similarity index 100% rename from vendor/honnef.co/go/tools/ssa/staticcheck.conf rename to vendor/honnef.co/go/tools/ir/staticcheck.conf diff --git a/vendor/honnef.co/go/tools/ssa/util.go b/vendor/honnef.co/go/tools/ir/util.go similarity index 76% rename from vendor/honnef.co/go/tools/ssa/util.go rename to vendor/honnef.co/go/tools/ir/util.go index ddb118460..df0f8bf97 100644 --- a/vendor/honnef.co/go/tools/ssa/util.go +++ b/vendor/honnef.co/go/tools/ir/util.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines a number of miscellaneous utility functions. @@ -52,36 +52,6 @@ func recvType(obj *types.Func) types.Type { return obj.Type().(*types.Signature).Recv().Type() } -// DefaultType returns the default "typed" type for an "untyped" type; -// it returns the incoming type for all other types. The default type -// for untyped nil is untyped nil. -// -// Exported to ssa/interp. -// -// TODO(adonovan): use go/types.DefaultType after 1.8. -// -func DefaultType(typ types.Type) types.Type { - if t, ok := typ.(*types.Basic); ok { - k := t.Kind() - switch k { - case types.UntypedBool: - k = types.Bool - case types.UntypedInt: - k = types.Int - case types.UntypedRune: - k = types.Rune - case types.UntypedFloat: - k = types.Float64 - case types.UntypedComplex: - k = types.Complex128 - case types.UntypedString: - k = types.String - } - typ = types.Typ[k] - } - return typ -} - // logStack prints the formatted "start" message to stderr and // returns a closure that prints the corresponding "end" message. // Call using 'defer logStack(...)()' to show builder stack on panic. diff --git a/vendor/honnef.co/go/tools/ssa/wrappers.go b/vendor/honnef.co/go/tools/ir/wrappers.go similarity index 89% rename from vendor/honnef.co/go/tools/ssa/wrappers.go rename to vendor/honnef.co/go/tools/ir/wrappers.go index a4ae71d8c..7dd334748 100644 --- a/vendor/honnef.co/go/tools/ssa/wrappers.go +++ b/vendor/honnef.co/go/tools/ir/wrappers.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package ssa +package ir // This file defines synthesis of Functions that delegate to declared // methods; they come in three kinds: @@ -65,41 +65,42 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { defer logStack("make %s to (%s)", description, recv.Type())() } fn := &Function{ - name: name, - method: sel, - object: obj, - Signature: sig, - Synthetic: description, - Prog: prog, - pos: obj.Pos(), + name: name, + method: sel, + object: obj, + Signature: sig, + Synthetic: description, + Prog: prog, + functionBody: new(functionBody), } + fn.initHTML(prog.PrintFunc) fn.startBody() - fn.addSpilledParam(recv) + fn.addSpilledParam(recv, nil) createParams(fn, start) indices := sel.Index() var v Value = fn.Locals[0] // spilled receiver if isPointer(sel.Recv()) { - v = emitLoad(fn, v) + v = emitLoad(fn, v, nil) // For simple indirection wrappers, perform an informative nil-check: // "value method (T).f called using nil *T pointer" if len(indices) == 1 && !isPointer(recvType(obj)) { var c Call c.Call.Value = &Builtin{ - name: "ssa:wrapnilchk", + name: "ir:wrapnilchk", sig: types.NewSignature(nil, types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)), types.NewTuple(anonVar(sel.Recv())), false), } c.Call.Args = []Value{ v, - stringConst(deref(sel.Recv()).String()), - stringConst(sel.Obj().Name()), + emitConst(fn, stringConst(deref(sel.Recv()).String())), + emitConst(fn, stringConst(sel.Obj().Name())), } c.setType(v.Type()) - v = fn.emit(&c) + v = fn.emit(&c, nil) } } @@ -111,7 +112,7 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { // Load) in preference to value extraction (Field possibly // preceded by Load). - v = emitImplicitSelections(fn, v, indices[:len(indices)-1]) + v = emitImplicitSelections(fn, v, indices[:len(indices)-1], nil) // Invariant: v is a pointer, either // value of implicit *C field, or @@ -120,18 +121,18 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { var c Call if r := recvType(obj); !isInterface(r) { // concrete method if !isPointer(r) { - v = emitLoad(fn, v) + v = emitLoad(fn, v, nil) } c.Call.Value = prog.declaredFunc(obj) c.Call.Args = append(c.Call.Args, v) } else { c.Call.Method = obj - c.Call.Value = emitLoad(fn, v) + c.Call.Value = emitLoad(fn, v, nil) } for _, arg := range fn.Params[1:] { c.Call.Args = append(c.Call.Args, arg) } - emitTailCall(fn, &c) + emitTailCall(fn, &c, nil) fn.finishBody() return fn } @@ -143,7 +144,7 @@ func makeWrapper(prog *Program, sel *types.Selection) *Function { func createParams(fn *Function, start int) { tparams := fn.Signature.Params() for i, n := start, tparams.Len(); i < n; i++ { - fn.addParamObj(tparams.At(i)) + fn.addParamObj(tparams.At(i), nil) } } @@ -184,13 +185,14 @@ func makeBound(prog *Program, obj *types.Func) *Function { defer logStack("%s", description)() } fn = &Function{ - name: obj.Name() + "$bound", - object: obj, - Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver - Synthetic: description, - Prog: prog, - pos: obj.Pos(), + name: obj.Name() + "$bound", + object: obj, + Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver + Synthetic: description, + Prog: prog, + functionBody: new(functionBody), } + fn.initHTML(prog.PrintFunc) fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn} fn.FreeVars = []*FreeVar{fv} @@ -208,7 +210,7 @@ func makeBound(prog *Program, obj *types.Func) *Function { for _, arg := range fn.Params { c.Call.Args = append(c.Call.Args, arg) } - emitTailCall(fn, &c) + emitTailCall(fn, &c, nil) fn.finishBody() prog.bounds[obj] = fn diff --git a/vendor/honnef.co/go/tools/ir/write.go b/vendor/honnef.co/go/tools/ir/write.go new file mode 100644 index 000000000..b936bc985 --- /dev/null +++ b/vendor/honnef.co/go/tools/ir/write.go @@ -0,0 +1,5 @@ +package ir + +func NewJump(parent *BasicBlock) *Jump { + return &Jump{anInstruction{block: parent}, ""} +} diff --git a/vendor/honnef.co/go/tools/lint/lint.go b/vendor/honnef.co/go/tools/lint/lint.go index de5a8f128..1a70e0c29 100644 --- a/vendor/honnef.co/go/tools/lint/lint.go +++ b/vendor/honnef.co/go/tools/lint/lint.go @@ -3,6 +3,7 @@ package lint // import "honnef.co/go/tools/lint" import ( "bytes" + "encoding/gob" "fmt" "go/scanner" "go/token" @@ -17,6 +18,7 @@ import ( "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" "honnef.co/go/tools/config" + "honnef.co/go/tools/internal/cache" ) type Documentation struct { @@ -62,7 +64,7 @@ type LineIgnore struct { Line int Checks []string Matched bool - Pos token.Pos + Pos token.Position } func (li *LineIgnore) Match(p Problem) bool { @@ -119,6 +121,21 @@ type Problem struct { Message string Check string Severity Severity + Related []Related +} + +type Related struct { + Pos token.Position + End token.Position + Message string +} + +func (p Problem) Equal(o Problem) bool { + return p.Pos == o.Pos && + p.End == o.End && + p.Message == o.Message && + p.Check == o.Check && + p.Severity == o.Severity } func (p *Problem) String() string { @@ -132,6 +149,7 @@ type Linter struct { GoVersion int Config config.Config Stats Stats + RepeatAnalyzers uint } type CumulativeChecker interface { @@ -184,6 +202,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error return nil, err } r.goVersion = l.GoVersion + r.repeatAnalyzers = l.RepeatAnalyzers pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative) if err != nil { @@ -264,10 +283,12 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error } atomic.StoreUint32(&r.stats.State, StateCumulative) - var problems []Problem for _, cum := range l.CumulativeCheckers { for _, res := range cum.Result() { pkg := tpkgToPkg[res.Pkg()] + if pkg == nil { + panic(fmt.Sprintf("analyzer %s flagged object %s in package %s, a package that we aren't tracking", cum.Analyzer(), res, res.Pkg())) + } allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) if allowedChecks[cum.Analyzer().Name] { pos := DisplayPosition(pkg.Fset, res.Pos()) @@ -278,21 +299,51 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error continue } p := cum.ProblemObject(pkg.Fset, res) - problems = append(problems, p) + pkg.problems = append(pkg.problems, p) } } } + for _, pkg := range pkgs { + if !pkg.fromSource { + // Don't cache packages that we loaded from the cache + continue + } + cpkg := cachedPackage{ + Problems: pkg.problems, + Ignores: pkg.ignores, + Config: pkg.cfg, + } + buf := &bytes.Buffer{} + if err := gob.NewEncoder(buf).Encode(cpkg); err != nil { + return nil, err + } + id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey) + if err := r.cache.PutBytes(id, buf.Bytes()); err != nil { + return nil, err + } + } + + var problems []Problem + // Deduplicate line ignores. When U1000 processes a package and + // its test variant, it will only emit a single problem for an + // unused object, not two problems. We will, however, have two + // line ignores, one per package. Without deduplication, one line + // ignore will be marked as matched, while the other one won't, + // subsequently reporting a "this linter directive didn't match + // anything" error. + ignores := map[token.Position]Ignore{} for _, pkg := range pkgs { for _, ig := range pkg.ignores { - for i := range pkg.problems { - p := &pkg.problems[i] - if ig.Match(*p) { - p.Severity = Ignored + if lig, ok := ig.(*LineIgnore); ok { + ig = ignores[lig.Pos] + if ig == nil { + ignores[lig.Pos] = lig + ig = lig } } - for i := range problems { - p := &problems[i] + for i := range pkg.problems { + p := &pkg.problems[i] if ig.Match(*p) { p.Severity = Ignored } @@ -318,6 +369,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error if !ok { continue } + ig = ignores[ig.Pos].(*LineIgnore) if ig.Matched { continue } @@ -338,7 +390,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error continue } p := Problem{ - Pos: DisplayPosition(pkg.Fset, ig.Pos), + Pos: ig.Pos, Message: "this linter directive didn't match anything; should it be removed?", Check: "", } @@ -372,7 +424,7 @@ func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error for i, p := range problems[1:] { // We may encounter duplicate problems because one file // can be part of many packages. - if problems[i] != p { + if !problems[i].Equal(p) { out = append(out, p) } } @@ -422,10 +474,6 @@ func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bo return allowedChecks } -type Positioner interface { - Pos() token.Pos -} - func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { if p == token.NoPos { return token.Position{} diff --git a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go index 3b939e95f..4408aff25 100644 --- a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go +++ b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go @@ -4,283 +4,14 @@ package lintdsl import ( "bytes" - "flag" "fmt" "go/ast" - "go/constant" - "go/printer" - "go/token" - "go/types" - "strings" + "go/format" "golang.org/x/tools/go/analysis" - "honnef.co/go/tools/facts" - "honnef.co/go/tools/lint" - "honnef.co/go/tools/ssa" + "honnef.co/go/tools/pattern" ) -type packager interface { - Package() *ssa.Package -} - -func CallName(call *ssa.CallCommon) string { - if call.IsInvoke() { - return "" - } - switch v := call.Value.(type) { - case *ssa.Function: - fn, ok := v.Object().(*types.Func) - if !ok { - return "" - } - return lint.FuncName(fn) - case *ssa.Builtin: - return v.Name() - } - return "" -} - -func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name } -func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name } - -func FilterDebug(instr []ssa.Instruction) []ssa.Instruction { - var out []ssa.Instruction - for _, ins := range instr { - if _, ok := ins.(*ssa.DebugRef); !ok { - out = append(out, ins) - } - } - return out -} - -func IsExample(fn *ssa.Function) bool { - if !strings.HasPrefix(fn.Name(), "Example") { - return false - } - f := fn.Prog.Fset.File(fn.Pos()) - if f == nil { - return false - } - return strings.HasSuffix(f.Name(), "_test.go") -} - -func IsPointerLike(T types.Type) bool { - switch T := T.Underlying().(type) { - case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer: - return true - case *types.Basic: - return T.Kind() == types.UnsafePointer - } - return false -} - -func IsIdent(expr ast.Expr, ident string) bool { - id, ok := expr.(*ast.Ident) - return ok && id.Name == ident -} - -// isBlank returns whether id is the blank identifier "_". -// If id == nil, the answer is false. -func IsBlank(id ast.Expr) bool { - ident, _ := id.(*ast.Ident) - return ident != nil && ident.Name == "_" -} - -func IsIntLiteral(expr ast.Expr, literal string) bool { - lit, ok := expr.(*ast.BasicLit) - return ok && lit.Kind == token.INT && lit.Value == literal -} - -// Deprecated: use IsIntLiteral instead -func IsZero(expr ast.Expr) bool { - return IsIntLiteral(expr, "0") -} - -func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool { - return IsType(pass.TypesInfo.TypeOf(expr), name) -} - -func IsInTest(pass *analysis.Pass, node lint.Positioner) bool { - // FIXME(dh): this doesn't work for global variables with - // initializers - f := pass.Fset.File(node.Pos()) - return f != nil && strings.HasSuffix(f.Name(), "_test.go") -} - -func IsInMain(pass *analysis.Pass, node lint.Positioner) bool { - if node, ok := node.(packager); ok { - return node.Package().Pkg.Name() == "main" - } - return pass.Pkg.Name() == "main" -} - -func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string { - info := pass.TypesInfo - sel := info.Selections[expr] - if sel == nil { - if x, ok := expr.X.(*ast.Ident); ok { - pkg, ok := info.ObjectOf(x).(*types.PkgName) - if !ok { - // This shouldn't happen - return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) - } - return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) - } - panic(fmt.Sprintf("unsupported selector: %v", expr)) - } - return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) -} - -func IsNil(pass *analysis.Pass, expr ast.Expr) bool { - return pass.TypesInfo.Types[expr].IsNil() -} - -func BoolConst(pass *analysis.Pass, expr ast.Expr) bool { - val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() - return constant.BoolVal(val) -} - -func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool { - // We explicitly don't support typed bools because more often than - // not, custom bool types are used as binary enums and the - // explicit comparison is desired. - - ident, ok := expr.(*ast.Ident) - if !ok { - return false - } - obj := pass.TypesInfo.ObjectOf(ident) - c, ok := obj.(*types.Const) - if !ok { - return false - } - basic, ok := c.Type().(*types.Basic) - if !ok { - return false - } - if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { - return false - } - return true -} - -func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) { - tv := pass.TypesInfo.Types[expr] - if tv.Value == nil { - return 0, false - } - if tv.Value.Kind() != constant.Int { - return 0, false - } - return constant.Int64Val(tv.Value) -} - -func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) { - val := pass.TypesInfo.Types[expr].Value - if val == nil { - return "", false - } - if val.Kind() != constant.String { - return "", false - } - return constant.StringVal(val), true -} - -// Dereference returns a pointer's element type; otherwise it returns -// T. -func Dereference(T types.Type) types.Type { - if p, ok := T.Underlying().(*types.Pointer); ok { - return p.Elem() - } - return T -} - -// DereferenceR returns a pointer's element type; otherwise it returns -// T. If the element type is itself a pointer, DereferenceR will be -// applied recursively. -func DereferenceR(T types.Type) types.Type { - if p, ok := T.Underlying().(*types.Pointer); ok { - return DereferenceR(p.Elem()) - } - return T -} - -func IsGoVersion(pass *analysis.Pass, minor int) bool { - version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int) - return version >= minor -} - -func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string { - switch fun := call.Fun.(type) { - case *ast.SelectorExpr: - fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func) - if !ok { - return "" - } - return lint.FuncName(fn) - case *ast.Ident: - obj := pass.TypesInfo.ObjectOf(fun) - switch obj := obj.(type) { - case *types.Func: - return lint.FuncName(obj) - case *types.Builtin: - return obj.Name() - default: - return "" - } - default: - return "" - } -} - -func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool { - call, ok := node.(*ast.CallExpr) - if !ok { - return false - } - return CallNameAST(pass, call) == name -} - -func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool { - for _, name := range names { - if IsCallToAST(pass, node, name) { - return true - } - } - return false -} - -func Render(pass *analysis.Pass, x interface{}) string { - var buf bytes.Buffer - if err := printer.Fprint(&buf, pass.Fset, x); err != nil { - panic(err) - } - return buf.String() -} - -func RenderArgs(pass *analysis.Pass, args []ast.Expr) string { - var ss []string - for _, arg := range args { - ss = append(ss, Render(pass, arg)) - } - return strings.Join(ss, ", ") -} - -func Preamble(f *ast.File) string { - cutoff := f.Package - if f.Doc != nil { - cutoff = f.Doc.Pos() - } - var out []string - for _, cmt := range f.Comments { - if cmt.Pos() >= cutoff { - break - } - out = append(out, cmt.Text()) - } - return strings.Join(out, "\n") -} - func Inspect(node ast.Node, fn func(node ast.Node) bool) { if node == nil { return @@ -288,113 +19,40 @@ func Inspect(node ast.Node, fn func(node ast.Node) bool) { ast.Inspect(node, fn) } -func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec { - if len(specs) == 0 { - return nil - } - groups := make([][]ast.Spec, 1) - groups[0] = append(groups[0], specs[0]) - - for _, spec := range specs[1:] { - g := groups[len(groups)-1] - if fset.PositionFor(spec.Pos(), false).Line-1 != - fset.PositionFor(g[len(g)-1].End(), false).Line { - - groups = append(groups, nil) - } - - groups[len(groups)-1] = append(groups[len(groups)-1], spec) - } - - return groups -} - -func IsObject(obj types.Object, name string) bool { - var path string - if pkg := obj.Pkg(); pkg != nil { - path = pkg.Path() + "." - } - return path+obj.Name() == name -} - -type Field struct { - Var *types.Var - Tag string - Path []int -} - -// FlattenFields recursively flattens T and embedded structs, -// returning a list of fields. If multiple fields with the same name -// exist, all will be returned. -func FlattenFields(T *types.Struct) []Field { - return flattenFields(T, nil, nil) +func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matcher, bool) { + // Note that we ignore q.Relevant – callers of Match usually use + // AST inspectors that already filter on nodes we're interested + // in. + m := &pattern.Matcher{TypesInfo: pass.TypesInfo} + ok := m.Match(q.Root, node) + return m, ok } -func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { - if seen == nil { - seen = map[types.Type]bool{} - } - if seen[T] { - return nil - } - seen[T] = true - var out []Field - for i := 0; i < T.NumFields(); i++ { - field := T.Field(i) - tag := T.Tag(i) - np := append(path[:len(path):len(path)], i) - if field.Anonymous() { - if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { - out = append(out, flattenFields(s, np, seen)...) - } - } else { - out = append(out, Field{field, tag, np}) - } +func MatchAndEdit(pass *analysis.Pass, before, after pattern.Pattern, node ast.Node) (*pattern.Matcher, []analysis.TextEdit, bool) { + m, ok := Match(pass, before, node) + if !ok { + return m, nil, false } - return out -} - -func File(pass *analysis.Pass, node lint.Positioner) *ast.File { - pass.Fset.PositionFor(node.Pos(), true) - m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File) - return m[pass.Fset.File(node.Pos())] -} - -// IsGenerated reports whether pos is in a generated file, It ignores -// //line directives. -func IsGenerated(pass *analysis.Pass, pos token.Pos) bool { - _, ok := Generator(pass, pos) - return ok + r := pattern.NodeToAST(after.Root, m.State) + buf := &bytes.Buffer{} + format.Node(buf, pass.Fset, r) + edit := []analysis.TextEdit{{ + Pos: node.Pos(), + End: node.End(), + NewText: buf.Bytes(), + }} + return m, edit, true } -// Generator returns the generator that generated the file containing -// pos. It ignores //line directives. -func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) { - file := pass.Fset.PositionFor(pos, false).Filename - m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) - g, ok := m[file] - return g, ok -} - -func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) { - file := lint.DisplayPosition(pass.Fset, pos).Filename - m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) - if _, ok := m[file]; ok { - return +func Selector(x, sel string) *ast.SelectorExpr { + return &ast.SelectorExpr{ + X: &ast.Ident{Name: x}, + Sel: &ast.Ident{Name: sel}, } - pass.Reportf(pos, f, args...) } -func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg}) -} - -func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) { - file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename - m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) - if _, ok := m[file]; ok { - return - } - ReportNodef(pass, node, format, args...) +// ExhaustiveTypeSwitch panics when called. It can be used to ensure +// that type switches are exhaustive. +func ExhaustiveTypeSwitch(v interface{}) { + panic(fmt.Sprintf("internal error: unhandled case %T", v)) } diff --git a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go index 9385431f8..b28f8885b 100644 --- a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go +++ b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go @@ -39,7 +39,7 @@ func relativePositionString(pos token.Position) string { } type Statter interface { - Stats(total, errors, warnings int) + Stats(total, errors, warnings, ignored int) } type Formatter interface { @@ -51,7 +51,10 @@ type Text struct { } func (o Text) Format(p lint.Problem) { - fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String()) + fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Pos), p.String()) + for _, r := range p.Related { + fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Pos), r.Message) + } } type JSON struct { @@ -76,12 +79,18 @@ func (o JSON) Format(p lint.Problem) { Line int `json:"line"` Column int `json:"column"` } - jp := struct { - Code string `json:"code"` - Severity string `json:"severity,omitempty"` + type related struct { Location location `json:"location"` End location `json:"end"` Message string `json:"message"` + } + jp := struct { + Code string `json:"code"` + Severity string `json:"severity,omitempty"` + Location location `json:"location"` + End location `json:"end"` + Message string `json:"message"` + Related []related `json:"related,omitempty"` }{ Code: p.Check, Severity: severity(p.Severity), @@ -97,6 +106,21 @@ func (o JSON) Format(p lint.Problem) { }, Message: p.Message, } + for _, r := range p.Related { + jp.Related = append(jp.Related, related{ + Location: location{ + File: r.Pos.Filename, + Line: r.Pos.Line, + Column: r.Pos.Column, + }, + End: location{ + File: r.End.Filename, + Line: r.End.Line, + Column: r.End.Column, + }, + Message: r.Message, + }) + } _ = json.NewEncoder(o.W).Encode(jp) } @@ -123,13 +147,16 @@ func (o *Stylish) Format(p lint.Problem) { o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0) } fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message) + for _, r := range p.Related { + fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Pos.Line, r.Pos.Column, r.Message) + } } -func (o *Stylish) Stats(total, errors, warnings int) { +func (o *Stylish) Stats(total, errors, warnings, ignored int) { if o.tw != nil { o.tw.Flush() fmt.Fprintln(o.W) } - fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n", - total, errors, warnings) + fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings, %d ignored)\n", + total, errors, warnings, ignored) } diff --git a/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vendor/honnef.co/go/tools/lint/lintutil/util.go index fe0279f92..7c3dbdec1 100644 --- a/vendor/honnef.co/go/tools/lint/lintutil/util.go +++ b/vendor/honnef.co/go/tools/lint/lintutil/util.go @@ -23,7 +23,9 @@ import ( "runtime/pprof" "strconv" "strings" + "sync" "sync/atomic" + "time" "honnef.co/go/tools/config" "honnef.co/go/tools/internal/cache" @@ -114,6 +116,8 @@ func FlagSet(name string) *flag.FlagSet { flags.String("debug.memprofile", "", "Write memory profile to `file`") flags.Bool("debug.version", false, "Print detailed version information about this program") flags.Bool("debug.no-compile-errors", false, "Don't print compile errors") + flags.String("debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.") + flags.Uint("debug.repeat-analyzers", 0, "Run analyzers `num` times") checks := list{"inherit"} fail := list{"all"} @@ -153,6 +157,24 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string) debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool) debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool) + debugRepeat := fs.Lookup("debug.repeat-analyzers").Value.(flag.Getter).Get().(uint) + + var measureAnalyzers func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) + if path := fs.Lookup("debug.measure-analyzers").Value.(flag.Getter).Get().(string); path != "" { + f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + log.Fatal(err) + } + + mu := &sync.Mutex{} + measureAnalyzers = func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) { + mu.Lock() + defer mu.Unlock() + if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg.ID, d.Nanoseconds()); err != nil { + log.Println("error writing analysis measurements:", err) + } + } + } cfg := config.Config{} cfg.Checks = *fs.Lookup("checks").Value.(*list) @@ -218,10 +240,12 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * } ps, err := Lint(cs, cums, fs.Args(), &Options{ - Tags: tags, - LintTests: tests, - GoVersion: goVersion, - Config: cfg, + Tags: tags, + LintTests: tests, + GoVersion: goVersion, + Config: cfg, + PrintAnalyzerMeasurement: measureAnalyzers, + RepeatAnalyzers: debugRepeat, }) if err != nil { fmt.Fprintln(os.Stderr, err) @@ -245,6 +269,7 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * total int errors int warnings int + ignored int ) fail := *fs.Lookup("fail").Value.(*list) @@ -262,6 +287,7 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * continue } if p.Severity == lint.Ignored && !showIgnored { + ignored++ continue } if shouldExit[p.Check] { @@ -273,7 +299,7 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * f.Format(p) } if f, ok := f.(format.Statter); ok { - f.Stats(total, errors, warnings) + f.Stats(total, errors, warnings, ignored) } if errors > 0 { exit(1) @@ -284,9 +310,11 @@ func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs * type Options struct { Config config.Config - Tags string - LintTests bool - GoVersion int + Tags string + LintTests bool + GoVersion int + PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) + RepeatAnalyzers uint } func computeSalt() ([]byte, error) { @@ -325,7 +353,9 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string CumulativeCheckers: cums, GoVersion: opt.GoVersion, Config: opt.Config, + RepeatAnalyzers: opt.RepeatAnalyzers, } + l.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement cfg := &packages.Config{} if opt.LintTests { cfg.Tests = true @@ -368,7 +398,8 @@ func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string }() } - return l.Lint(cfg, paths) + ps, err := l.Lint(cfg, paths) + return ps, err } var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`) @@ -390,3 +421,24 @@ func parsePos(pos string) token.Position { Column: col, } } + +func InitializeAnalyzers(docs map[string]*lint.Documentation, analyzers map[string]*analysis.Analyzer) map[string]*analysis.Analyzer { + out := make(map[string]*analysis.Analyzer, len(analyzers)) + for k, v := range analyzers { + vc := *v + out[k] = &vc + + vc.Name = k + doc, ok := docs[k] + if !ok { + panic(fmt.Sprintf("missing documentation for check %s", k)) + } + vc.Doc = doc.String() + if vc.Flags.Usage == nil { + fs := flag.NewFlagSet("", flag.PanicOnError) + fs.Var(NewVersionFlag(), "go", "Target Go version") + vc.Flags = *fs + } + } + return out +} diff --git a/vendor/honnef.co/go/tools/lint/runner.go b/vendor/honnef.co/go/tools/lint/runner.go index 3b22a63fa..74106ced8 100644 --- a/vendor/honnef.co/go/tools/lint/runner.go +++ b/vendor/honnef.co/go/tools/lint/runner.go @@ -1,6 +1,30 @@ package lint /* +Package loading + +Conceptually, package loading in the runner can be imagined as a +graph-shaped work list. We iteratively pop off leaf nodes (packages +that have no unloaded dependencies) and load data from export data, +our cache, or source. + +Specifically, non-initial packages are loaded from export data and the +fact cache if possible, otherwise from source. Initial packages are +loaded from export data, the fact cache and the (problems, ignores, +config) cache if possible, otherwise from source. + +The appeal of this approach is that it is both simple to implement and +easily parallelizable. Each leaf node can be processed independently, +and new leaf nodes appear as their dependencies are being processed. + +The downside of this approach, however, is that we're doing more work +than necessary. Imagine an initial package A, which has the following +dependency chain: A->B->C->D – in the current implementation, we will +load all 4 packages. However, if package A can be loaded fully from +cached information, then none of its dependencies are necessary, and +we could avoid loading them. + + Parallelism Runner implements parallel processing of packages by spawning one @@ -19,6 +43,34 @@ all execute in parallel, while not wasting resources for long linear chains or trying to process more subgraphs in parallel than the system can handle. + +Caching + +We make use of several caches. These caches are Go's export data, our +facts cache, and our (problems, ignores, config) cache. + +Initial packages will either be loaded from a combination of all three +caches, or from source. Non-initial packages will either be loaded +from a combination of export data and facts cache, or from source. + +The facts cache is separate from the (problems, ignores, config) cache +because when we process non-initial packages, we generate facts, but +we discard problems and ignores. + +The facts cache is keyed by (package, analyzer), whereas the +(problems, ignores, config) cache is keyed by (package, list of +analyzes). The difference between the two exists because there are +only a handful of analyses that produce facts, but hundreds of +analyses that don't. Creating one cache entry per fact-generating +analysis is feasible, creating one cache entry per normal analysis has +significant performance and storage overheads. + +The downside of keying by the list of analyzes is, naturally, that a +change in list of analyzes changes the cache key. `staticcheck -checks +A` and `staticcheck -checks A,B` will therefore need their own cache +entries and not reuse each other's work. This problem does not affect +the facts cache. + */ import ( @@ -37,6 +89,7 @@ import ( "strings" "sync" "sync/atomic" + "time" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/packages" @@ -47,6 +100,11 @@ import ( "honnef.co/go/tools/loader" ) +func init() { + gob.Register(&FileIgnore{}) + gob.Register(&LineIgnore{}) +} + // If enabled, abuse of the go/analysis API will lead to panics const sanityCheck = true @@ -58,21 +116,43 @@ const sanityCheck = true // This may change unused's behavior, however, as it may observe fewer // interfaces from transitive dependencies. +// OPT(dh): every single package will have the same value for +// canClearTypes. We could move the Package.decUse method to runner to +// eliminate this field. This is probably not worth it, though. There +// are only thousands of packages, so the field only takes up +// kilobytes of memory. + +// OPT(dh): do we really need the Package.gen field? it's based +// trivially on pkg.results and merely caches the result of a type +// assertion. How often do we actually use the field? + type Package struct { + // dependents is initially set to 1 plus the number of packages + // that directly import this package. It is atomically decreased + // by 1 every time a dependent has been processed or when the + // package itself has been processed. Once the value reaches zero, + // the package is no longer needed. dependents uint64 *packages.Package - Imports []*Package - initial bool + Imports []*Package + initial bool + // fromSource is set to true for packages that have been loaded + // from source. This is the case for initial packages, packages + // with missing export data, and packages with no cached facts. fromSource bool - hash string - done chan struct{} + // hash stores the package hash, as computed by packageHash + hash string + actionID cache.ActionID + done chan struct{} resultsMu sync.Mutex - // results maps analyzer IDs to analyzer results + // results maps analyzer IDs to analyzer results. it is + // implemented as a deduplicating concurrent cache. results []*result - cfg *config.Config + cfg *config.Config + // gen maps file names to the code generator that created them gen map[string]facts.Generator problems []Problem ignores []Ignore @@ -82,12 +162,22 @@ type Package struct { facts []map[types.Object][]analysis.Fact pkgFacts [][]analysis.Fact + // canClearTypes is set to true if we can discard type + // information after the package and its dependents have been + // processed. This is the case when no cumulative checkers are + // being run. canClearTypes bool } +type cachedPackage struct { + Problems []Problem + Ignores []Ignore + Config *config.Config +} + func (pkg *Package) decUse() { - atomic.AddUint64(&pkg.dependents, ^uint64(0)) - if atomic.LoadUint64(&pkg.dependents) == 0 { + ret := atomic.AddUint64(&pkg.dependents, ^uint64(0)) + if ret == 0 { // nobody depends on this package anymore if pkg.canClearTypes { pkg.Types = nil @@ -108,16 +198,16 @@ type result struct { } type Runner struct { - ld loader.Loader - cache *cache.Cache + cache *cache.Cache + goVersion int + stats *Stats + repeatAnalyzers uint - analyzerIDs analyzerIDs + analyzerIDs analyzerIDs + problemsCacheKey string // limits parallelism of loading packages loadSem chan struct{} - - goVersion int - stats *Stats } type analyzerIDs struct { @@ -225,6 +315,13 @@ func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) { Message: d.Message, Check: pass.Analyzer.Name, } + for _, r := range d.Related { + p.Related = append(p.Related, Related{ + Pos: DisplayPosition(pass.Fset, r.Pos), + End: DisplayPosition(pass.Fset, r.End), + Message: r.Message, + }) + } ac.problems = append(ac.problems, p) } @@ -278,6 +375,21 @@ func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) { } } +func (r *Runner) loadCachedPackage(pkg *Package, analyzers []*analysis.Analyzer) (cachedPackage, bool) { + // OPT(dh): we can cache this computation, it'll be the same for all packages + id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey) + + b, _, err := r.cache.GetBytes(id) + if err != nil { + return cachedPackage{}, false + } + var cpkg cachedPackage + if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&cpkg); err != nil { + return cachedPackage{}, false + } + return cpkg, true +} + func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) { if len(a.FactTypes) == 0 { return nil, true @@ -285,10 +397,7 @@ func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bo var facts []Fact // Look in the cache for facts - aID, err := passActionID(pkg, a) - if err != nil { - return nil, false - } + aID := passActionID(pkg, a) aID = cache.Subkey(aID, "facts") b, _, err := r.cache.GetBytes(aID) if err != nil { @@ -378,9 +487,15 @@ func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (inter } // Then with this analyzer - ret, err := ac.analyzer.Run(pass) - if err != nil { - return nil, err + var ret interface{} + for i := uint(0); i < r.repeatAnalyzers+1; i++ { + var err error + t := time.Now() + ret, err = ac.analyzer.Run(pass) + r.stats.MeasureAnalyzer(ac.analyzer, ac.pkg, time.Since(t)) + if err != nil { + return nil, err + } } if len(ac.analyzer.FactTypes) > 0 { @@ -404,16 +519,7 @@ func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (inter } } - buf := &bytes.Buffer{} - if err := gob.NewEncoder(buf).Encode(facts); err != nil { - return nil, err - } - aID, err := passActionID(ac.pkg, ac.analyzer) - if err != nil { - return nil, err - } - aID = cache.Subkey(aID, "facts") - if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil { + if err := r.cacheData(facts, ac.pkg, ac.analyzer, "facts"); err != nil { return nil, err } } @@ -421,6 +527,19 @@ func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (inter return ret, nil } +func (r *Runner) cacheData(v interface{}, pkg *Package, a *analysis.Analyzer, subkey string) error { + buf := &bytes.Buffer{} + if err := gob.NewEncoder(buf).Encode(v); err != nil { + return err + } + aID := passActionID(pkg, a) + aID = cache.Subkey(aID, subkey) + if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil { + return err + } + return nil +} + func NewRunner(stats *Stats) (*Runner, error) { cache, err := cache.Default() if err != nil { @@ -438,9 +557,17 @@ func NewRunner(stats *Stats) (*Runner, error) { // diagnostics as well as extracted ignore directives. // // Note that diagnostics have not been filtered at this point yet, to -// accomodate cumulative analyzes that require additional steps to +// accommodate cumulative analyzes that require additional steps to // produce diagnostics. func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) { + checkerNames := make([]string, len(analyzers)) + for i, a := range analyzers { + checkerNames[i] = a.Name + } + sort.Strings(checkerNames) + r.problemsCacheKey = strings.Join(checkerNames, " ") + + var allAnalyzers []*analysis.Analyzer r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}} id := 0 seen := map[*analysis.Analyzer]struct{}{} @@ -450,6 +577,7 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy return } seen[a] = struct{}{} + allAnalyzers = append(allAnalyzers, a) r.analyzerIDs.m[a] = id id++ for _, f := range a.FactTypes { @@ -468,6 +596,11 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy for _, a := range injectedAnalyses { dfs(a) } + // Run all analyzers on all packages (subject to further + // restrictions enforced later). This guarantees that if analyzer + // A1 depends on A2, and A2 has facts, that A2 will run on the + // dependencies of user-provided packages, even though A1 won't. + analyzers = allAnalyzers var dcfg packages.Config if cfg != nil { @@ -475,11 +608,10 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy } atomic.StoreUint32(&r.stats.State, StateGraph) - initialPkgs, err := r.ld.Graph(dcfg, patterns...) + initialPkgs, err := loader.Graph(dcfg, patterns...) if err != nil { return nil, err } - defer r.cache.Trim() var allPkgs []*Package @@ -507,7 +639,8 @@ func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analy m[l].Imports = append(m[l].Imports, m[v]) } - m[l].hash, err = packageHash(m[l]) + m[l].hash, err = r.packageHash(m[l]) + m[l].actionID = packageActionID(m[l]) if err != nil { m[l].errs = append(m[l].errs, err) } @@ -564,27 +697,36 @@ func parsePos(pos string) (token.Position, int, error) { }, len(parts[0]), nil } -// loadPkg loads a Go package. If the package is in the set of initial -// packages, it will be loaded from source, otherwise it will be -// loaded from export data. In the case that the package was loaded -// from export data, cached facts will also be loaded. -// -// Currently, only cached facts for this package will be loaded, not -// for any of its dependencies. +// loadPkg loads a Go package. It may be loaded from a combination of +// caches, or from source. func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { if pkg.Types != nil { panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package)) } - // Load type information if pkg.initial { - // Load package from source - pkg.fromSource = true - return r.ld.LoadFromSource(pkg.Package) + // Try to load cached package + cpkg, ok := r.loadCachedPackage(pkg, analyzers) + if ok { + pkg.problems = cpkg.Problems + pkg.ignores = cpkg.Ignores + pkg.cfg = cpkg.Config + } else { + pkg.fromSource = true + return loader.LoadFromSource(pkg.Package) + } } + // At this point we're either working with a non-initial package, + // or we managed to load cached problems for the package. We still + // need export data and facts. + + // OPT(dh): we don't need type information for this package if no + // other package depends on it. this may be the case for initial + // packages. + // Load package from export data - if err := r.ld.LoadFromExport(pkg.Package); err != nil { + if err := loader.LoadFromExport(pkg.Package); err != nil { // We asked Go to give us up to date export data, yet // we can't load it. There must be something wrong. // @@ -597,7 +739,7 @@ func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { // FIXME(dh): we no longer reload from export data, so // theoretically we should be able to continue pkg.fromSource = true - if err := r.ld.LoadFromSource(pkg.Package); err != nil { + if err := loader.LoadFromSource(pkg.Package); err != nil { return err } // Make sure this package can't be imported successfully @@ -658,13 +800,14 @@ func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { dfs(a) } - if failed { - pkg.fromSource = true - // XXX we added facts to the maps, we need to get rid of those - return r.ld.LoadFromSource(pkg.Package) + if !failed { + return nil } - return nil + // We failed to load some cached facts + pkg.fromSource = true + // XXX we added facts to the maps, we need to get rid of those + return loader.LoadFromSource(pkg.Package) } type analysisError struct { @@ -695,7 +838,7 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { }() // Ensure all packages have the generated map and config. This is - // required by interna of the runner. Analyses that themselves + // required by internals of the runner. Analyses that themselves // make use of either have an explicit dependency so that other // runners work correctly, too. analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...) @@ -766,7 +909,7 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { defer wg.Done() // Only initial packages and packages with missing // facts will have been loaded from source. - if pkg.initial || r.hasFacts(a) { + if pkg.initial || len(a.FactTypes) > 0 { if _, err := r.runAnalysis(ac); err != nil { errs[i] = analysisError{a, pkg, err} return @@ -800,6 +943,8 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { // We can't process ignores at this point because `unused` needs // to see more than one package to make its decision. + // + // OPT(dh): can't we guard this block of code by pkg.initial? ignores, problems := parseDirectives(pkg.Package) pkg.ignores = append(pkg.ignores, ignores...) pkg.problems = append(pkg.problems, problems...) @@ -824,32 +969,6 @@ func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { // from processPkg. } -// hasFacts reports whether an analysis exports any facts. An analysis -// that has a transitive dependency that exports facts is considered -// to be exporting facts. -func (r *Runner) hasFacts(a *analysis.Analyzer) bool { - ret := false - seen := make([]bool, len(r.analyzerIDs.m)) - var dfs func(*analysis.Analyzer) - dfs = func(a *analysis.Analyzer) { - if seen[r.analyzerIDs.get(a)] { - return - } - seen[r.analyzerIDs.get(a)] = true - if len(a.FactTypes) > 0 { - ret = true - } - for _, req := range a.Requires { - if ret { - break - } - dfs(req) - } - } - dfs(a) - return ret -} - func parseDirective(s string) (cmd string, args []string) { if !strings.HasPrefix(s, "//lint:") { return "", nil @@ -912,7 +1031,7 @@ func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) { File: pos.Filename, Line: pos.Line, Checks: checks, - Pos: c.Pos(), + Pos: DisplayPosition(pkg.Fset, c.Pos()), } case "file-ignore": ig = &FileIgnore{ @@ -932,9 +1051,10 @@ func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) { // packageHash computes a package's hash. The hash is based on all Go // files that make up the package, as well as the hashes of imported // packages. -func packageHash(pkg *Package) (string, error) { +func (r *Runner) packageHash(pkg *Package) (string, error) { key := cache.NewHash("package hash") fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) + fmt.Fprintf(key, "go %d\n", r.goVersion) for _, f := range pkg.CompiledGoFiles { h, err := cache.FileHash(f) if err != nil { @@ -943,6 +1063,28 @@ func packageHash(pkg *Package) (string, error) { fmt.Fprintf(key, "file %s %x\n", f, h) } + // Actually load the configuration to calculate its hash. This + // will take into consideration inheritance of configuration + // files, as well as the default configuration. + // + // OPT(dh): doing this means we'll load the config twice: once for + // computing the hash, and once when analyzing the package from + // source. + cdir := config.Dir(pkg.GoFiles) + if cdir == "" { + fmt.Fprintf(key, "file %s %x\n", config.ConfigName, [cache.HashSize]byte{}) + } else { + cfg, err := config.Load(cdir) + if err != nil { + return "", err + } + h := cache.NewHash(config.ConfigName) + if _, err := h.Write([]byte(cfg.String())); err != nil { + return "", err + } + fmt.Fprintf(key, "file %s %x\n", config.ConfigName, h.Sum()) + } + imps := make([]*Package, len(pkg.Imports)) copy(imps, pkg.Imports) sort.Slice(imps, func(i, j int) bool { @@ -959,12 +1101,14 @@ func packageHash(pkg *Package) (string, error) { return hex.EncodeToString(h[:]), nil } -// passActionID computes an ActionID for an analysis pass. -func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) { - key := cache.NewHash("action ID") +func packageActionID(pkg *Package) cache.ActionID { + key := cache.NewHash("package ID") fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) fmt.Fprintf(key, "pkghash %s\n", pkg.hash) - fmt.Fprintf(key, "analyzer %s\n", analyzer.Name) + return key.Sum() +} - return key.Sum(), nil +// passActionID computes an ActionID for an analysis pass. +func passActionID(pkg *Package, analyzer *analysis.Analyzer) cache.ActionID { + return cache.Subkey(pkg.actionID, fmt.Sprintf("analyzer %s", analyzer.Name)) } diff --git a/vendor/honnef.co/go/tools/lint/stats.go b/vendor/honnef.co/go/tools/lint/stats.go index 2f6508559..85eb97844 100644 --- a/vendor/honnef.co/go/tools/lint/stats.go +++ b/vendor/honnef.co/go/tools/lint/stats.go @@ -1,5 +1,11 @@ package lint +import ( + "time" + + "golang.org/x/tools/go/analysis" +) + const ( StateInitializing = 0 StateGraph = 1 @@ -17,4 +23,16 @@ type Stats struct { Problems uint32 ActiveWorkers uint32 TotalWorkers uint32 + PrintAnalyzerMeasurement func(*analysis.Analyzer, *Package, time.Duration) +} + +type AnalysisMeasurementKey struct { + Analysis string + Pkg string +} + +func (s *Stats) MeasureAnalyzer(analysis *analysis.Analyzer, pkg *Package, d time.Duration) { + if s.PrintAnalyzerMeasurement != nil { + s.PrintAnalyzerMeasurement(analysis, pkg, d) + } } diff --git a/vendor/honnef.co/go/tools/loader/loader.go b/vendor/honnef.co/go/tools/loader/loader.go index 9c6885d48..a14f274d2 100644 --- a/vendor/honnef.co/go/tools/loader/loader.go +++ b/vendor/honnef.co/go/tools/loader/loader.go @@ -1,6 +1,7 @@ package loader import ( + "errors" "fmt" "go/ast" "go/parser" @@ -9,22 +10,17 @@ import ( "go/types" "log" "os" - "sync" "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/go/packages" ) -type Loader struct { - exportMu sync.RWMutex -} - // Graph resolves patterns and returns packages with all the // information required to later load type information, and optionally // syntax trees. // // The provided config can set any setting with the exception of Mode. -func (ld *Loader) Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) { +func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) { cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes pkgs, err := packages.Load(&cfg, patterns...) if err != nil { @@ -34,15 +30,29 @@ func (ld *Loader) Graph(cfg packages.Config, patterns ...string) ([]*packages.Pa packages.Visit(pkgs, nil, func(pkg *packages.Package) { pkg.Fset = fset }) - return pkgs, nil + + n := 0 + for _, pkg := range pkgs { + if len(pkg.CompiledGoFiles) == 0 && len(pkg.Errors) == 0 && pkg.PkgPath != "unsafe" { + // If a package consists only of test files, then + // go/packages incorrectly(?) returns an empty package for + // the non-test variant. Get rid of those packages. See + // #646. + // + // Do not, however, skip packages that have errors. Those, + // too, may have no files, but we want to print the + // errors. + continue + } + pkgs[n] = pkg + n++ + } + return pkgs[:n], nil } // LoadFromExport loads a package from export data. All of its // dependencies must have been loaded already. -func (ld *Loader) LoadFromExport(pkg *packages.Package) error { - ld.exportMu.Lock() - defer ld.exportMu.Unlock() - +func LoadFromExport(pkg *packages.Package) error { pkg.IllTyped = true for path, pkg := range pkg.Imports { if pkg.Types == nil { @@ -87,10 +97,7 @@ func (ld *Loader) LoadFromExport(pkg *packages.Package) error { // LoadFromSource loads a package from source. All of its dependencies // must have been loaded already. -func (ld *Loader) LoadFromSource(pkg *packages.Package) error { - ld.exportMu.RLock() - defer ld.exportMu.RUnlock() - +func LoadFromSource(pkg *packages.Package) error { pkg.IllTyped = true pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) @@ -121,6 +128,12 @@ func (ld *Loader) LoadFromSource(pkg *packages.Package) error { if path == "unsafe" { return types.Unsafe, nil } + if path == "C" { + // go/packages doesn't tell us that cgo preprocessing + // failed. When we subsequently try to parse the package, + // we'll encounter the raw C import. + return nil, errors.New("cgo preprocessing failed") + } imp := pkg.Imports[path] if imp == nil { return nil, nil diff --git a/vendor/honnef.co/go/tools/pattern/convert.go b/vendor/honnef.co/go/tools/pattern/convert.go new file mode 100644 index 000000000..dfcd1560d --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/convert.go @@ -0,0 +1,242 @@ +package pattern + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" +) + +var astTypes = map[string]reflect.Type{ + "Ellipsis": reflect.TypeOf(ast.Ellipsis{}), + "RangeStmt": reflect.TypeOf(ast.RangeStmt{}), + "AssignStmt": reflect.TypeOf(ast.AssignStmt{}), + "IndexExpr": reflect.TypeOf(ast.IndexExpr{}), + "Ident": reflect.TypeOf(ast.Ident{}), + "ValueSpec": reflect.TypeOf(ast.ValueSpec{}), + "GenDecl": reflect.TypeOf(ast.GenDecl{}), + "BinaryExpr": reflect.TypeOf(ast.BinaryExpr{}), + "ForStmt": reflect.TypeOf(ast.ForStmt{}), + "ArrayType": reflect.TypeOf(ast.ArrayType{}), + "DeferStmt": reflect.TypeOf(ast.DeferStmt{}), + "MapType": reflect.TypeOf(ast.MapType{}), + "ReturnStmt": reflect.TypeOf(ast.ReturnStmt{}), + "SliceExpr": reflect.TypeOf(ast.SliceExpr{}), + "StarExpr": reflect.TypeOf(ast.StarExpr{}), + "UnaryExpr": reflect.TypeOf(ast.UnaryExpr{}), + "SendStmt": reflect.TypeOf(ast.SendStmt{}), + "SelectStmt": reflect.TypeOf(ast.SelectStmt{}), + "ImportSpec": reflect.TypeOf(ast.ImportSpec{}), + "IfStmt": reflect.TypeOf(ast.IfStmt{}), + "GoStmt": reflect.TypeOf(ast.GoStmt{}), + "Field": reflect.TypeOf(ast.Field{}), + "SelectorExpr": reflect.TypeOf(ast.SelectorExpr{}), + "StructType": reflect.TypeOf(ast.StructType{}), + "KeyValueExpr": reflect.TypeOf(ast.KeyValueExpr{}), + "FuncType": reflect.TypeOf(ast.FuncType{}), + "FuncLit": reflect.TypeOf(ast.FuncLit{}), + "FuncDecl": reflect.TypeOf(ast.FuncDecl{}), + "ChanType": reflect.TypeOf(ast.ChanType{}), + "CallExpr": reflect.TypeOf(ast.CallExpr{}), + "CaseClause": reflect.TypeOf(ast.CaseClause{}), + "CommClause": reflect.TypeOf(ast.CommClause{}), + "CompositeLit": reflect.TypeOf(ast.CompositeLit{}), + "EmptyStmt": reflect.TypeOf(ast.EmptyStmt{}), + "SwitchStmt": reflect.TypeOf(ast.SwitchStmt{}), + "TypeSwitchStmt": reflect.TypeOf(ast.TypeSwitchStmt{}), + "TypeAssertExpr": reflect.TypeOf(ast.TypeAssertExpr{}), + "TypeSpec": reflect.TypeOf(ast.TypeSpec{}), + "InterfaceType": reflect.TypeOf(ast.InterfaceType{}), + "BranchStmt": reflect.TypeOf(ast.BranchStmt{}), + "IncDecStmt": reflect.TypeOf(ast.IncDecStmt{}), + "BasicLit": reflect.TypeOf(ast.BasicLit{}), +} + +func ASTToNode(node interface{}) Node { + switch node := node.(type) { + case *ast.File: + panic("cannot convert *ast.File to Node") + case nil: + return Nil{} + case string: + return String(node) + case token.Token: + return Token(node) + case *ast.ExprStmt: + return ASTToNode(node.X) + case *ast.BlockStmt: + if node == nil { + return Nil{} + } + return ASTToNode(node.List) + case *ast.FieldList: + if node == nil { + return Nil{} + } + return ASTToNode(node.List) + case *ast.BasicLit: + if node == nil { + return Nil{} + } + case *ast.ParenExpr: + return ASTToNode(node.X) + } + + if node, ok := node.(ast.Node); ok { + name := reflect.TypeOf(node).Elem().Name() + T, ok := structNodes[name] + if !ok { + panic(fmt.Sprintf("internal error: unhandled type %T", node)) + } + + if reflect.ValueOf(node).IsNil() { + return Nil{} + } + v := reflect.ValueOf(node).Elem() + objs := make([]Node, T.NumField()) + for i := 0; i < T.NumField(); i++ { + f := v.FieldByName(T.Field(i).Name) + objs[i] = ASTToNode(f.Interface()) + } + + n, err := populateNode(name, objs, false) + if err != nil { + panic(fmt.Sprintf("internal error: %s", err)) + } + return n + } + + s := reflect.ValueOf(node) + if s.Kind() == reflect.Slice { + if s.Len() == 0 { + return List{} + } + if s.Len() == 1 { + return ASTToNode(s.Index(0).Interface()) + } + + tail := List{} + for i := s.Len() - 1; i >= 0; i-- { + head := ASTToNode(s.Index(i).Interface()) + l := List{ + Head: head, + Tail: tail, + } + tail = l + } + return tail + } + + panic(fmt.Sprintf("internal error: unhandled type %T", node)) +} + +func NodeToAST(node Node, state State) interface{} { + switch node := node.(type) { + case Binding: + v, ok := state[node.Name] + if !ok { + // really we want to return an error here + panic("XXX") + } + switch v := v.(type) { + case types.Object: + return &ast.Ident{Name: v.Name()} + default: + return v + } + case Builtin, Any, Object, Function, Not, Or: + panic("XXX") + case List: + if (node == List{}) { + return []ast.Node{} + } + x := []ast.Node{NodeToAST(node.Head, state).(ast.Node)} + x = append(x, NodeToAST(node.Tail, state).([]ast.Node)...) + return x + case Token: + return token.Token(node) + case String: + return string(node) + case Nil: + return nil + } + + name := reflect.TypeOf(node).Name() + T, ok := astTypes[name] + if !ok { + panic(fmt.Sprintf("internal error: unhandled type %T", node)) + } + v := reflect.ValueOf(node) + out := reflect.New(T) + for i := 0; i < T.NumField(); i++ { + fNode := v.FieldByName(T.Field(i).Name) + if (fNode == reflect.Value{}) { + continue + } + fAST := out.Elem().FieldByName(T.Field(i).Name) + switch fAST.Type().Kind() { + case reflect.Slice: + c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state)) + if c.Kind() != reflect.Slice { + // it's a single node in the pattern, we have to wrap + // it in a slice + slice := reflect.MakeSlice(fAST.Type(), 1, 1) + slice.Index(0).Set(c) + c = slice + } + switch fAST.Interface().(type) { + case []ast.Node: + switch cc := c.Interface().(type) { + case []ast.Node: + fAST.Set(c) + case []ast.Expr: + var slice []ast.Node + for _, el := range cc { + slice = append(slice, el) + } + fAST.Set(reflect.ValueOf(slice)) + default: + panic("XXX") + } + case []ast.Expr: + switch cc := c.Interface().(type) { + case []ast.Node: + var slice []ast.Expr + for _, el := range cc { + slice = append(slice, el.(ast.Expr)) + } + fAST.Set(reflect.ValueOf(slice)) + case []ast.Expr: + fAST.Set(c) + default: + panic("XXX") + } + default: + panic("XXX") + } + case reflect.Int: + c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state)) + switch c.Kind() { + case reflect.String: + tok, ok := tokensByString[c.Interface().(string)] + if !ok { + // really we want to return an error here + panic("XXX") + } + fAST.SetInt(int64(tok)) + case reflect.Int: + fAST.Set(c) + default: + panic(fmt.Sprintf("internal error: unexpected kind %s", c.Kind())) + } + default: + r := NodeToAST(fNode.Interface().(Node), state) + if r != nil { + fAST.Set(reflect.ValueOf(r)) + } + } + } + + return out.Interface().(ast.Node) +} diff --git a/vendor/honnef.co/go/tools/pattern/doc.go b/vendor/honnef.co/go/tools/pattern/doc.go new file mode 100644 index 000000000..05d86c251 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/doc.go @@ -0,0 +1,273 @@ +/* +Package pattern implements a simple language for pattern matching Go ASTs. + +Design decisions and trade-offs + +The language is designed specifically for the task of filtering ASTs +to simplify the implementation of analyses in staticcheck. +It is also intended to be trivial to parse and execute. + +To that end, we make certain decisions that make the language more +suited to its task, while making certain queries infeasible. + +Furthermore, it is fully expected that the majority of analyses will still require ordinary Go code +to further process the filtered AST, to make use of type information and to enforce complex invariants. +It is not our goal to design a scripting language for writing entire checks in. + +The language + +At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching. +Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node. +There is a fixed set of nodes, identified by name, and with the exception of the Or node, all nodes have a fixed number of arguments. +In addition to nodes, there are atoms, which represent basic units such as strings or the nil value. + +Pattern matching is implemented via bindings, represented by the Binding node. +A Binding can match nodes and associate them with names, to later recall the nodes. +This allows for expressing "this node must be equal to that node" constraints. + +To simplify writing and reading patterns, a small amount of additional syntax exists on top of nodes and atoms. +This additional syntax doesn't add any new features of its own, it simply provides shortcuts to creating nodes and atoms. + +To show an example of a pattern, first consider this snippet of Go code: + + if x := fn(); x != nil { + for _, v := range x { + println(v, x) + } + } + +The corresponding AST expressed as an idiomatic pattern would look as follows: + + (IfStmt + (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") [])) + (BinaryExpr (Ident "x") "!=" (Ident "nil")) + (RangeStmt + (Ident "_") (Ident "v") ":=" (Ident "x") + (CallExpr (Ident "println") [(Ident "v") (Ident "x")])) + nil) + +Two things are worth noting about this representation. +First, the [el1 el2 ...] syntax is a short-hand for creating lists. +It is a short-hand for el1:el2:[], which itself is a short-hand for (List el1 (List el2 (List nil nil)). +Second, note the absence of a lot of lists in places that normally accept lists. +For example, assignment assigns a number of right-hands to a number of left-hands, yet our AssignStmt is lacking any form of list. +This is due to the fact that a single node can match a list of exactly one element. +Thus, the two following forms have identical matching behavior: + + (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") [])) + (AssignStmt [(Ident "x")] ":=" [(CallExpr (Ident "fn") [])]) + +This section serves as an overview of the language's syntax. +More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections. + +Pattern matching + +TODO write about pattern matching + +- inspired by haskell syntax, but much, much simpler and naive + +Node types + +The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic. + +Nodes that map directly to AST nodes are named identically to the types in the go/ast package. +What follows is an exhaustive list of these nodes: + + (ArrayType len elt) + (AssignStmt lhs tok rhs) + (BasicLit kind value) + (BinaryExpr x op y) + (BranchStmt tok label) + (CallExpr fun args) + (CaseClause list body) + (ChanType dir value) + (CommClause comm body) + (CompositeLit type elts) + (DeferStmt call) + (Ellipsis elt) + (EmptyStmt) + (Field names type tag) + (ForStmt init cond post body) + (FuncDecl recv name type body) + (FuncLit type body) + (FuncType params results) + (GenDecl specs) + (GoStmt call) + (Ident name) + (IfStmt init cond body else) + (ImportSpec name path) + (IncDecStmt x tok) + (IndexExpr x index) + (InterfaceType methods) + (KeyValueExpr key value) + (MapType key value) + (RangeStmt key value tok x body) + (ReturnStmt results) + (SelectStmt body) + (SelectorExpr x sel) + (SendStmt chan value) + (SliceExpr x low high max) + (StarExpr x) + (StructType fields) + (SwitchStmt init tag body) + (TypeAssertExpr) + (TypeSpec name type) + (TypeSwitchStmt init assign body) + (UnaryExpr op x) + (ValueSpec names type values) + +Additionally, there are the String, Token and nil atoms. +Strings are double-quoted string literals, as in (Ident "someName"). +Tokens are also represented as double-quoted string literals, but are converted to token.Token values in contexts that require tokens, +such as in (BinaryExpr x "<" y), where "<" is transparently converted to token.LSS during matching. +The keyword 'nil' denotes the nil value, which represents the absence of any value. + +We also defines the (List head tail) node, which is used to represent sequences of elements as a singly linked list. +The head is a single element, and the tail is the remainder of the list. +For example, + + (List "foo" (List "bar" (List "baz" (List nil nil)))) + +represents a list of three elements, "foo", "bar" and "baz". There is dedicated syntax for writing lists, which looks as follows: + + ["foo" "bar" "baz"] + +This syntax is itself syntactic sugar for the following form: + + "foo":"bar":"baz":[] + +This form is of particular interest for pattern matching, as it allows matching on the head and tail. For example, + + "foo":"bar":_ + +would match any list with at least two elements, where the first two elements are "foo" and "bar". This is equivalent to writing + + (List "foo" (List "bar" _)) + +Note that it is not possible to match from the end of the list. +That is, there is no way to express a query such as "a list of any length where the last element is foo". + +Note that unlike in LISP, nil and empty lists are distinct from one another. +In patterns, with respect to lists, nil is akin to Go's untyped nil. +It will match a nil ast.Node, but it will not match a nil []ast.Expr. Nil will, however, match pointers to named types such as *ast.Ident. +Similarly, lists are akin to Go's +slices. An empty list will match both a nil and an empty []ast.Expr, but it will not match a nil ast.Node. + +Due to the difference between nil and empty lists, an empty list is represented as (List nil nil), i.e. a list with no head or tail. +Similarly, a list of one element is represented as (List el (List nil nil)). Unlike in LISP, it cannot be represented by (List el nil). + +Finally, there are nodes that implement special logic or matching behavior. + +(Any) matches any value. The underscore (_) maps to this node, making the following two forms equivalent: + + (Ident _) + (Ident (Any)) + +(Builtin name) matches a built-in identifier or function by name. +This is a type-aware variant of (Ident name). +Instead of only comparing the name, it resolves the object behind the name and makes sure it's a pre-declared identifier. + +For example, in the following piece of code + + func fn() { + println(true) + true := false + println(true) + } + +the pattern + + (Builtin "true") + +will match exactly once, on the first use of 'true' in the function. +Subsequent occurrences of 'true' no longer refer to the pre-declared identifier. + +(Object name) matches an identifier by name, but yields the +types.Object it refers to. + +(Function name) matches ast.Idents and ast.SelectorExprs that refer to a function with a given fully qualified name. +For example, "net/url.PathEscape" matches the PathEscape function in the net/url package, +and "(net/url.EscapeError).Error" refers to the Error method on the net/url.EscapeError type, +either on an instance of the type, or on the type itself. + +For example, the following patterns match the following lines of code: + + (CallExpr (Function "fmt.Println") _) // pattern 1 + (CallExpr (Function "(net/url.EscapeError).Error") _) // pattern 2 + + fmt.Println("hello, world") // matches pattern 1 + var x url.EscapeError + x.Error() // matches pattern 2 + (url.EscapeError).Error(x) // also matches pattern 2 + +(Binding name node) creates or uses a binding. +Bindings work like variable assignments, allowing referring to already matched nodes. +As an example, bindings are necessary to match self-assignment of the form "x = x", +since we need to express that the right-hand side is identical to the left-hand side. + +If a binding's node is not nil, the matcher will attempt to match a node according to the pattern. +If a binding's node is nil, the binding will either recall an existing value, or match the Any node. +It is an error to provide a non-nil node to a binding that has already been bound. + +Referring back to the earlier example, the following pattern will match self-assignment of idents: + + (AssignStmt (Binding "lhs" (Ident _)) "=" (Binding "lhs" nil)) + +Because bindings are a crucial component of pattern matching, there is special syntax for creating and recalling bindings. +Lower-case names refer to bindings. If standing on its own, the name "foo" will be equivalent to (Binding "foo" nil). +If a name is followed by an at-sign (@) then it will create a binding for the node that follows. +Together, this allows us to rewrite the earlier example as follows: + + (AssignStmt lhs@(Ident _) "=" lhs) + +(Or nodes...) is a variadic node that tries matching each node until one succeeds. For example, the following pattern matches all idents of name "foo" or "bar": + + (Ident (Or "foo" "bar")) + +We could also have written + + (Or (Ident "foo") (Ident "bar")) + +and achieved the same result. We can also mix different kinds of nodes: + + (Or (Ident "foo") (CallExpr (Ident "bar") _)) + +When using bindings inside of nodes used inside Or, all or none of the bindings will be bound. +That is, partially matched nodes that ultimately failed to match will not produce any bindings observable outside of the matching attempt. +We can thus write + + (Or (Ident name) (CallExpr name)) + +and 'name' will either be a String if the first option matched, or an Ident or SelectorExpr if the second option matched. + +(Not node) + +The Not node negates a match. For example, (Not (Ident _)) will match all nodes that aren't identifiers. + +ChanDir(0) + +Automatic unnesting of AST nodes + +The Go AST has several types of nodes that wrap other nodes. +To simplify matching, we automatically unwrap some of these nodes. + +These nodes are ExprStmt (for using expressions in a statement context), +ParenExpr (for parenthesized expressions), +DeclStmt (for declarations in a statement context), +and LabeledStmt (for labeled statements). + +Thus, the query + + (FuncLit _ [(CallExpr _ _)] + +will match a function literal containing a single function call, +even though in the actual Go AST, the CallExpr is nested inside an ExprStmt, +as function bodies are made up of sequences of statements. + +On the flip-side, there is no way to specifically match these wrapper nodes. +For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code: + + ((x)) += 2 + +*/ +package pattern diff --git a/vendor/honnef.co/go/tools/pattern/fuzz.go b/vendor/honnef.co/go/tools/pattern/fuzz.go new file mode 100644 index 000000000..52e7df974 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/fuzz.go @@ -0,0 +1,50 @@ +// +build gofuzz + +package pattern + +import ( + "go/ast" + goparser "go/parser" + "go/token" + "os" + "path/filepath" + "strings" +) + +var files []*ast.File + +func init() { + fset := token.NewFileSet() + filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error { + if err != nil { + // XXX error handling + panic(err) + } + if !strings.HasSuffix(path, ".go") { + return nil + } + f, err := goparser.ParseFile(fset, path, nil, 0) + if err != nil { + return nil + } + files = append(files, f) + return nil + }) +} + +func Fuzz(data []byte) int { + p := &Parser{} + pat, err := p.Parse(string(data)) + if err != nil { + if strings.Contains(err.Error(), "internal error") { + panic(err) + } + return 0 + } + _ = pat.Root.String() + + for _, f := range files { + Match(pat.Root, f) + } + return 1 +} diff --git a/vendor/honnef.co/go/tools/pattern/lexer.go b/vendor/honnef.co/go/tools/pattern/lexer.go new file mode 100644 index 000000000..fb72e392b --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/lexer.go @@ -0,0 +1,221 @@ +package pattern + +import ( + "fmt" + "go/token" + "unicode" + "unicode/utf8" +) + +type lexer struct { + f *token.File + + input string + start int + pos int + width int + items chan item +} + +type itemType int + +const eof = -1 + +const ( + itemError itemType = iota + itemLeftParen + itemRightParen + itemLeftBracket + itemRightBracket + itemTypeName + itemVariable + itemAt + itemColon + itemBlank + itemString + itemEOF +) + +func (typ itemType) String() string { + switch typ { + case itemError: + return "ERROR" + case itemLeftParen: + return "(" + case itemRightParen: + return ")" + case itemLeftBracket: + return "[" + case itemRightBracket: + return "]" + case itemTypeName: + return "TYPE" + case itemVariable: + return "VAR" + case itemAt: + return "@" + case itemColon: + return ":" + case itemBlank: + return "_" + case itemString: + return "STRING" + case itemEOF: + return "EOF" + default: + return fmt.Sprintf("itemType(%d)", typ) + } +} + +type item struct { + typ itemType + val string + pos int +} + +type stateFn func(*lexer) stateFn + +func (l *lexer) run() { + for state := lexStart; state != nil; { + state = state(l) + } + close(l.items) +} + +func (l *lexer) emitValue(t itemType, value string) { + l.items <- item{t, value, l.start} + l.start = l.pos +} + +func (l *lexer) emit(t itemType) { + l.items <- item{t, l.input[l.start:l.pos], l.start} + l.start = l.pos +} + +func lexStart(l *lexer) stateFn { + switch r := l.next(); { + case r == eof: + l.emit(itemEOF) + return nil + case unicode.IsSpace(r): + l.ignore() + case r == '(': + l.emit(itemLeftParen) + case r == ')': + l.emit(itemRightParen) + case r == '[': + l.emit(itemLeftBracket) + case r == ']': + l.emit(itemRightBracket) + case r == '@': + l.emit(itemAt) + case r == ':': + l.emit(itemColon) + case r == '_': + l.emit(itemBlank) + case r == '"': + l.backup() + return lexString + case unicode.IsUpper(r): + l.backup() + return lexType + case unicode.IsLower(r): + l.backup() + return lexVariable + default: + return l.errorf("unexpected character %c", r) + } + return lexStart +} + +func (l *lexer) next() (r rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + + if r == '\n' { + l.f.AddLine(l.pos) + } + + l.pos += l.width + + return r +} + +func (l *lexer) ignore() { + l.start = l.pos +} + +func (l *lexer) backup() { + l.pos -= l.width +} + +func (l *lexer) errorf(format string, args ...interface{}) stateFn { + // TODO(dh): emit position information in errors + l.items <- item{ + itemError, + fmt.Sprintf(format, args...), + l.start, + } + return nil +} + +func isAlphaNumeric(r rune) bool { + return r >= '0' && r <= '9' || + r >= 'a' && r <= 'z' || + r >= 'A' && r <= 'Z' +} + +func lexString(l *lexer) stateFn { + l.next() // skip quote + escape := false + + var runes []rune + for { + switch r := l.next(); r { + case eof: + return l.errorf("unterminated string") + case '"': + if !escape { + l.emitValue(itemString, string(runes)) + return lexStart + } else { + runes = append(runes, '"') + escape = false + } + case '\\': + if escape { + runes = append(runes, '\\') + escape = false + } else { + escape = true + } + default: + runes = append(runes, r) + } + } +} + +func lexType(l *lexer) stateFn { + l.next() + for { + if !isAlphaNumeric(l.next()) { + l.backup() + l.emit(itemTypeName) + return lexStart + } + } +} + +func lexVariable(l *lexer) stateFn { + l.next() + for { + if !isAlphaNumeric(l.next()) { + l.backup() + l.emit(itemVariable) + return lexStart + } + } +} diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go new file mode 100644 index 000000000..ff039baa7 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/match.go @@ -0,0 +1,513 @@ +package pattern + +import ( + "fmt" + "go/ast" + "go/token" + "go/types" + "reflect" + + "honnef.co/go/tools/lint" +) + +var tokensByString = map[string]Token{ + "INT": Token(token.INT), + "FLOAT": Token(token.FLOAT), + "IMAG": Token(token.IMAG), + "CHAR": Token(token.CHAR), + "STRING": Token(token.STRING), + "+": Token(token.ADD), + "-": Token(token.SUB), + "*": Token(token.MUL), + "/": Token(token.QUO), + "%": Token(token.REM), + "&": Token(token.AND), + "|": Token(token.OR), + "^": Token(token.XOR), + "<<": Token(token.SHL), + ">>": Token(token.SHR), + "&^": Token(token.AND_NOT), + "+=": Token(token.ADD_ASSIGN), + "-=": Token(token.SUB_ASSIGN), + "*=": Token(token.MUL_ASSIGN), + "/=": Token(token.QUO_ASSIGN), + "%=": Token(token.REM_ASSIGN), + "&=": Token(token.AND_ASSIGN), + "|=": Token(token.OR_ASSIGN), + "^=": Token(token.XOR_ASSIGN), + "<<=": Token(token.SHL_ASSIGN), + ">>=": Token(token.SHR_ASSIGN), + "&^=": Token(token.AND_NOT_ASSIGN), + "&&": Token(token.LAND), + "||": Token(token.LOR), + "<-": Token(token.ARROW), + "++": Token(token.INC), + "--": Token(token.DEC), + "==": Token(token.EQL), + "<": Token(token.LSS), + ">": Token(token.GTR), + "=": Token(token.ASSIGN), + "!": Token(token.NOT), + "!=": Token(token.NEQ), + "<=": Token(token.LEQ), + ">=": Token(token.GEQ), + ":=": Token(token.DEFINE), + "...": Token(token.ELLIPSIS), + "IMPORT": Token(token.IMPORT), + "VAR": Token(token.VAR), + "TYPE": Token(token.TYPE), + "CONST": Token(token.CONST), +} + +func maybeToken(node Node) (Node, bool) { + if node, ok := node.(String); ok { + if tok, ok := tokensByString[string(node)]; ok { + return tok, true + } + return node, false + } + return node, false +} + +func isNil(v interface{}) bool { + if v == nil { + return true + } + if _, ok := v.(Nil); ok { + return true + } + return false +} + +type matcher interface { + Match(*Matcher, interface{}) (interface{}, bool) +} + +type State = map[string]interface{} + +type Matcher struct { + TypesInfo *types.Info + State State +} + +func (m *Matcher) fork() *Matcher { + state := make(State, len(m.State)) + for k, v := range m.State { + state[k] = v + } + return &Matcher{ + TypesInfo: m.TypesInfo, + State: state, + } +} + +func (m *Matcher) merge(mc *Matcher) { + m.State = mc.State +} + +func (m *Matcher) Match(a Node, b ast.Node) bool { + m.State = State{} + _, ok := match(m, a, b) + return ok +} + +func Match(a Node, b ast.Node) (*Matcher, bool) { + m := &Matcher{} + ret := m.Match(a, b) + return m, ret +} + +// Match two items, which may be (Node, AST) or (AST, AST) +func match(m *Matcher, l, r interface{}) (interface{}, bool) { + if _, ok := r.(Node); ok { + panic("Node mustn't be on right side of match") + } + + switch l := l.(type) { + case *ast.ParenExpr: + return match(m, l.X, r) + case *ast.ExprStmt: + return match(m, l.X, r) + case *ast.DeclStmt: + return match(m, l.Decl, r) + case *ast.LabeledStmt: + return match(m, l.Stmt, r) + case *ast.BlockStmt: + return match(m, l.List, r) + case *ast.FieldList: + return match(m, l.List, r) + } + + switch r := r.(type) { + case *ast.ParenExpr: + return match(m, l, r.X) + case *ast.ExprStmt: + return match(m, l, r.X) + case *ast.DeclStmt: + return match(m, l, r.Decl) + case *ast.LabeledStmt: + return match(m, l, r.Stmt) + case *ast.BlockStmt: + if r == nil { + return match(m, l, nil) + } + return match(m, l, r.List) + case *ast.FieldList: + if r == nil { + return match(m, l, nil) + } + return match(m, l, r.List) + case *ast.BasicLit: + if r == nil { + return match(m, l, nil) + } + } + + if l, ok := l.(matcher); ok { + return l.Match(m, r) + } + + if l, ok := l.(Node); ok { + // Matching of pattern with concrete value + return matchNodeAST(m, l, r) + } + + if l == nil || r == nil { + return nil, l == r + } + + { + ln, ok1 := l.(ast.Node) + rn, ok2 := r.(ast.Node) + if ok1 && ok2 { + return matchAST(m, ln, rn) + } + } + + { + obj, ok := l.(types.Object) + if ok { + switch r := r.(type) { + case *ast.Ident: + return obj, obj == m.TypesInfo.ObjectOf(r) + case *ast.SelectorExpr: + return obj, obj == m.TypesInfo.ObjectOf(r.Sel) + default: + return obj, false + } + } + } + + { + ln, ok1 := l.([]ast.Expr) + rn, ok2 := r.([]ast.Expr) + if ok1 || ok2 { + if ok1 && !ok2 { + rn = []ast.Expr{r.(ast.Expr)} + } else if !ok1 && ok2 { + ln = []ast.Expr{l.(ast.Expr)} + } + + if len(ln) != len(rn) { + return nil, false + } + for i, ll := range ln { + if _, ok := match(m, ll, rn[i]); !ok { + return nil, false + } + } + return r, true + } + } + + { + ln, ok1 := l.([]ast.Stmt) + rn, ok2 := r.([]ast.Stmt) + if ok1 || ok2 { + if ok1 && !ok2 { + rn = []ast.Stmt{r.(ast.Stmt)} + } else if !ok1 && ok2 { + ln = []ast.Stmt{l.(ast.Stmt)} + } + + if len(ln) != len(rn) { + return nil, false + } + for i, ll := range ln { + if _, ok := match(m, ll, rn[i]); !ok { + return nil, false + } + } + return r, true + } + } + + panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r)) +} + +// Match a Node with an AST node +func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) { + switch b := b.(type) { + case []ast.Stmt: + // 'a' is not a List or we'd be using its Match + // implementation. + + if len(b) != 1 { + return nil, false + } + return match(m, a, b[0]) + case []ast.Expr: + // 'a' is not a List or we'd be using its Match + // implementation. + + if len(b) != 1 { + return nil, false + } + return match(m, a, b[0]) + case ast.Node: + ra := reflect.ValueOf(a) + rb := reflect.ValueOf(b).Elem() + + if ra.Type().Name() != rb.Type().Name() { + return nil, false + } + + for i := 0; i < ra.NumField(); i++ { + af := ra.Field(i) + fieldName := ra.Type().Field(i).Name + bf := rb.FieldByName(fieldName) + if (bf == reflect.Value{}) { + panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a)) + } + ai := af.Interface() + bi := bf.Interface() + if ai == nil { + return b, bi == nil + } + if _, ok := match(m, ai.(Node), bi); !ok { + return b, false + } + } + return b, true + case nil: + return nil, a == Nil{} + default: + panic(fmt.Sprintf("unhandled type %T", b)) + } +} + +// Match two AST nodes +func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) { + ra := reflect.ValueOf(a) + rb := reflect.ValueOf(b) + + if ra.Type() != rb.Type() { + return nil, false + } + if ra.IsNil() || rb.IsNil() { + return rb, ra.IsNil() == rb.IsNil() + } + + ra = ra.Elem() + rb = rb.Elem() + for i := 0; i < ra.NumField(); i++ { + af := ra.Field(i) + bf := rb.Field(i) + if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup { + continue + } + + switch af.Kind() { + case reflect.Slice: + if af.Len() != bf.Len() { + return nil, false + } + for j := 0; j < af.Len(); j++ { + if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok { + return nil, false + } + } + case reflect.String: + if af.String() != bf.String() { + return nil, false + } + case reflect.Int: + if af.Int() != bf.Int() { + return nil, false + } + case reflect.Bool: + if af.Bool() != bf.Bool() { + return nil, false + } + case reflect.Ptr, reflect.Interface: + if _, ok := match(m, af.Interface(), bf.Interface()); !ok { + return nil, false + } + default: + panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface())) + } + } + return b, true +} + +func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) { + if isNil(b.Node) { + v, ok := m.State[b.Name] + if ok { + // Recall value + return match(m, v, node) + } + // Matching anything + b.Node = Any{} + } + + // Store value + if _, ok := m.State[b.Name]; ok { + panic(fmt.Sprintf("binding already created: %s", b.Name)) + } + new, ret := match(m, b.Node, node) + if ret { + m.State[b.Name] = new + } + return new, ret +} + +func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) { + return node, true +} + +func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) { + v := reflect.ValueOf(node) + if v.Kind() == reflect.Slice { + if isNil(l.Head) { + return node, v.Len() == 0 + } + if v.Len() == 0 { + return nil, false + } + // OPT(dh): don't check the entire tail if head didn't match + _, ok1 := match(m, l.Head, v.Index(0).Interface()) + _, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface()) + return node, ok1 && ok2 + } + // Our empty list does not equal an untyped Go nil. This way, we can + // tell apart an if with no else and an if with an empty else. + return nil, false +} + +func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) { + switch o := node.(type) { + case token.Token: + if tok, ok := maybeToken(s); ok { + return match(m, tok, node) + } + return nil, false + case string: + return o, string(s) == o + default: + return nil, false + } +} + +func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) { + o, ok := node.(token.Token) + if !ok { + return nil, false + } + return o, token.Token(tok) == o +} + +func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) { + return nil, isNil(node) +} + +func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) { + ident, ok := node.(*ast.Ident) + if !ok { + return nil, false + } + obj := m.TypesInfo.ObjectOf(ident) + if obj != types.Universe.Lookup(ident.Name) { + return nil, false + } + return match(m, builtin.Name, ident.Name) +} + +func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) { + ident, ok := node.(*ast.Ident) + if !ok { + return nil, false + } + + id := m.TypesInfo.ObjectOf(ident) + _, ok = match(m, obj.Name, ident.Name) + return id, ok +} + +func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) { + var name string + var obj types.Object + switch node := node.(type) { + case *ast.Ident: + obj = m.TypesInfo.ObjectOf(node) + switch obj := obj.(type) { + case *types.Func: + name = lint.FuncName(obj) + case *types.Builtin: + name = obj.Name() + default: + return nil, false + } + case *ast.SelectorExpr: + var ok bool + obj, ok = m.TypesInfo.ObjectOf(node.Sel).(*types.Func) + if !ok { + return nil, false + } + name = lint.FuncName(obj.(*types.Func)) + default: + return nil, false + } + _, ok := match(m, fn.Name, name) + return obj, ok +} + +func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) { + for _, opt := range or.Nodes { + mc := m.fork() + if ret, ok := match(mc, opt, node); ok { + m.merge(mc) + return ret, true + } + } + return nil, false +} + +func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) { + _, ok := match(m, not.Node, node) + if ok { + return nil, false + } + return node, true +} + +var ( + // Types of fields in go/ast structs that we want to skip + rtTokPos = reflect.TypeOf(token.Pos(0)) + rtObject = reflect.TypeOf((*ast.Object)(nil)) + rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil)) +) + +var ( + _ matcher = Binding{} + _ matcher = Any{} + _ matcher = List{} + _ matcher = String("") + _ matcher = Token(0) + _ matcher = Nil{} + _ matcher = Builtin{} + _ matcher = Object{} + _ matcher = Function{} + _ matcher = Or{} + _ matcher = Not{} +) diff --git a/vendor/honnef.co/go/tools/pattern/parser.go b/vendor/honnef.co/go/tools/pattern/parser.go new file mode 100644 index 000000000..009238b86 --- /dev/null +++ b/vendor/honnef.co/go/tools/pattern/parser.go @@ -0,0 +1,455 @@ +package pattern + +import ( + "fmt" + "go/ast" + "go/token" + "reflect" +) + +type Pattern struct { + Root Node + // Relevant contains instances of ast.Node that could potentially + // initiate a successful match of the pattern. + Relevant []reflect.Type +} + +func MustParse(s string) Pattern { + p := &Parser{AllowTypeInfo: true} + pat, err := p.Parse(s) + if err != nil { + panic(err) + } + return pat +} + +func roots(node Node) []reflect.Type { + switch node := node.(type) { + case Or: + var out []reflect.Type + for _, el := range node.Nodes { + out = append(out, roots(el)...) + } + return out + case Not: + return roots(node.Node) + case Binding: + return roots(node.Node) + case Nil, nil: + // this branch is reached via bindings + return allTypes + default: + Ts, ok := nodeToASTTypes[reflect.TypeOf(node)] + if !ok { + panic(fmt.Sprintf("internal error: unhandled type %T", node)) + } + return Ts + } +} + +var allTypes = []reflect.Type{ + reflect.TypeOf((*ast.RangeStmt)(nil)), + reflect.TypeOf((*ast.AssignStmt)(nil)), + reflect.TypeOf((*ast.IndexExpr)(nil)), + reflect.TypeOf((*ast.Ident)(nil)), + reflect.TypeOf((*ast.ValueSpec)(nil)), + reflect.TypeOf((*ast.GenDecl)(nil)), + reflect.TypeOf((*ast.BinaryExpr)(nil)), + reflect.TypeOf((*ast.ForStmt)(nil)), + reflect.TypeOf((*ast.ArrayType)(nil)), + reflect.TypeOf((*ast.DeferStmt)(nil)), + reflect.TypeOf((*ast.MapType)(nil)), + reflect.TypeOf((*ast.ReturnStmt)(nil)), + reflect.TypeOf((*ast.SliceExpr)(nil)), + reflect.TypeOf((*ast.StarExpr)(nil)), + reflect.TypeOf((*ast.UnaryExpr)(nil)), + reflect.TypeOf((*ast.SendStmt)(nil)), + reflect.TypeOf((*ast.SelectStmt)(nil)), + reflect.TypeOf((*ast.ImportSpec)(nil)), + reflect.TypeOf((*ast.IfStmt)(nil)), + reflect.TypeOf((*ast.GoStmt)(nil)), + reflect.TypeOf((*ast.Field)(nil)), + reflect.TypeOf((*ast.SelectorExpr)(nil)), + reflect.TypeOf((*ast.StructType)(nil)), + reflect.TypeOf((*ast.KeyValueExpr)(nil)), + reflect.TypeOf((*ast.FuncType)(nil)), + reflect.TypeOf((*ast.FuncLit)(nil)), + reflect.TypeOf((*ast.FuncDecl)(nil)), + reflect.TypeOf((*ast.ChanType)(nil)), + reflect.TypeOf((*ast.CallExpr)(nil)), + reflect.TypeOf((*ast.CaseClause)(nil)), + reflect.TypeOf((*ast.CommClause)(nil)), + reflect.TypeOf((*ast.CompositeLit)(nil)), + reflect.TypeOf((*ast.EmptyStmt)(nil)), + reflect.TypeOf((*ast.SwitchStmt)(nil)), + reflect.TypeOf((*ast.TypeSwitchStmt)(nil)), + reflect.TypeOf((*ast.TypeAssertExpr)(nil)), + reflect.TypeOf((*ast.TypeSpec)(nil)), + reflect.TypeOf((*ast.InterfaceType)(nil)), + reflect.TypeOf((*ast.BranchStmt)(nil)), + reflect.TypeOf((*ast.IncDecStmt)(nil)), + reflect.TypeOf((*ast.BasicLit)(nil)), +} + +var nodeToASTTypes = map[reflect.Type][]reflect.Type{ + reflect.TypeOf(String("")): nil, + reflect.TypeOf(Token(0)): nil, + reflect.TypeOf(List{}): {reflect.TypeOf((*ast.BlockStmt)(nil)), reflect.TypeOf((*ast.FieldList)(nil))}, + reflect.TypeOf(Builtin{}): {reflect.TypeOf((*ast.Ident)(nil))}, + reflect.TypeOf(Object{}): {reflect.TypeOf((*ast.Ident)(nil))}, + reflect.TypeOf(Function{}): {reflect.TypeOf((*ast.Ident)(nil)), reflect.TypeOf((*ast.SelectorExpr)(nil))}, + reflect.TypeOf(Any{}): allTypes, + reflect.TypeOf(RangeStmt{}): {reflect.TypeOf((*ast.RangeStmt)(nil))}, + reflect.TypeOf(AssignStmt{}): {reflect.TypeOf((*ast.AssignStmt)(nil))}, + reflect.TypeOf(IndexExpr{}): {reflect.TypeOf((*ast.IndexExpr)(nil))}, + reflect.TypeOf(Ident{}): {reflect.TypeOf((*ast.Ident)(nil))}, + reflect.TypeOf(ValueSpec{}): {reflect.TypeOf((*ast.ValueSpec)(nil))}, + reflect.TypeOf(GenDecl{}): {reflect.TypeOf((*ast.GenDecl)(nil))}, + reflect.TypeOf(BinaryExpr{}): {reflect.TypeOf((*ast.BinaryExpr)(nil))}, + reflect.TypeOf(ForStmt{}): {reflect.TypeOf((*ast.ForStmt)(nil))}, + reflect.TypeOf(ArrayType{}): {reflect.TypeOf((*ast.ArrayType)(nil))}, + reflect.TypeOf(DeferStmt{}): {reflect.TypeOf((*ast.DeferStmt)(nil))}, + reflect.TypeOf(MapType{}): {reflect.TypeOf((*ast.MapType)(nil))}, + reflect.TypeOf(ReturnStmt{}): {reflect.TypeOf((*ast.ReturnStmt)(nil))}, + reflect.TypeOf(SliceExpr{}): {reflect.TypeOf((*ast.SliceExpr)(nil))}, + reflect.TypeOf(StarExpr{}): {reflect.TypeOf((*ast.StarExpr)(nil))}, + reflect.TypeOf(UnaryExpr{}): {reflect.TypeOf((*ast.UnaryExpr)(nil))}, + reflect.TypeOf(SendStmt{}): {reflect.TypeOf((*ast.SendStmt)(nil))}, + reflect.TypeOf(SelectStmt{}): {reflect.TypeOf((*ast.SelectStmt)(nil))}, + reflect.TypeOf(ImportSpec{}): {reflect.TypeOf((*ast.ImportSpec)(nil))}, + reflect.TypeOf(IfStmt{}): {reflect.TypeOf((*ast.IfStmt)(nil))}, + reflect.TypeOf(GoStmt{}): {reflect.TypeOf((*ast.GoStmt)(nil))}, + reflect.TypeOf(Field{}): {reflect.TypeOf((*ast.Field)(nil))}, + reflect.TypeOf(SelectorExpr{}): {reflect.TypeOf((*ast.SelectorExpr)(nil))}, + reflect.TypeOf(StructType{}): {reflect.TypeOf((*ast.StructType)(nil))}, + reflect.TypeOf(KeyValueExpr{}): {reflect.TypeOf((*ast.KeyValueExpr)(nil))}, + reflect.TypeOf(FuncType{}): {reflect.TypeOf((*ast.FuncType)(nil))}, + reflect.TypeOf(FuncLit{}): {reflect.TypeOf((*ast.FuncLit)(nil))}, + reflect.TypeOf(FuncDecl{}): {reflect.TypeOf((*ast.FuncDecl)(nil))}, + reflect.TypeOf(ChanType{}): {reflect.TypeOf((*ast.ChanType)(nil))}, + reflect.TypeOf(CallExpr{}): {reflect.TypeOf((*ast.CallExpr)(nil))}, + reflect.TypeOf(CaseClause{}): {reflect.TypeOf((*ast.CaseClause)(nil))}, + reflect.TypeOf(CommClause{}): {reflect.TypeOf((*ast.CommClause)(nil))}, + reflect.TypeOf(CompositeLit{}): {reflect.TypeOf((*ast.CompositeLit)(nil))}, + reflect.TypeOf(EmptyStmt{}): {reflect.TypeOf((*ast.EmptyStmt)(nil))}, + reflect.TypeOf(SwitchStmt{}): {reflect.TypeOf((*ast.SwitchStmt)(nil))}, + reflect.TypeOf(TypeSwitchStmt{}): {reflect.TypeOf((*ast.TypeSwitchStmt)(nil))}, + reflect.TypeOf(TypeAssertExpr{}): {reflect.TypeOf((*ast.TypeAssertExpr)(nil))}, + reflect.TypeOf(TypeSpec{}): {reflect.TypeOf((*ast.TypeSpec)(nil))}, + reflect.TypeOf(InterfaceType{}): {reflect.TypeOf((*ast.InterfaceType)(nil))}, + reflect.TypeOf(BranchStmt{}): {reflect.TypeOf((*ast.BranchStmt)(nil))}, + reflect.TypeOf(IncDecStmt{}): {reflect.TypeOf((*ast.IncDecStmt)(nil))}, + reflect.TypeOf(BasicLit{}): {reflect.TypeOf((*ast.BasicLit)(nil))}, +} + +var requiresTypeInfo = map[string]bool{ + "Function": true, + "Builtin": true, + "Object": true, +} + +type Parser struct { + // Allow nodes that rely on type information + AllowTypeInfo bool + + lex *lexer + cur item + last *item + items chan item +} + +func (p *Parser) Parse(s string) (Pattern, error) { + p.cur = item{} + p.last = nil + p.items = nil + + fset := token.NewFileSet() + p.lex = &lexer{ + f: fset.AddFile("